From 2d1eda8c861738ecfe8f06c1b0b6b47f5ce03d16 Mon Sep 17 00:00:00 2001 From: Jon Malmaud Date: Sun, 20 Jan 2019 14:38:32 -0500 Subject: [PATCH 01/49] Start work on eager mode --- src/TensorFlow.jl | 1 + src/eager.jl | 50 +++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 51 insertions(+) create mode 100644 src/eager.jl diff --git a/src/TensorFlow.jl b/src/TensorFlow.jl index fac6ffdf..bbbb478e 100644 --- a/src/TensorFlow.jl +++ b/src/TensorFlow.jl @@ -198,6 +198,7 @@ include("meta.jl") include("constants.jl") include("tensorflow_protos.jl") include("core.jl") +include("eager.jl") include("run.jl") include("version.jl") include("ops.jl") diff --git a/src/eager.jl b/src/eager.jl new file mode 100644 index 00000000..1f4a3e45 --- /dev/null +++ b/src/eager.jl @@ -0,0 +1,50 @@ +mutable struct EagerContext + ptr::Ptr{Cvoid} + + function EagerContext() + options = @tfcall(:TFE_NewContextOptions, Ptr{Cvoid}, ()) + status = Status() + context = @tfcall(:TFE_NewContext, Ptr{Cvoid}, (Ptr{Cvoid}, Ptr{Cvoid}), options, status) + check_status(status) + this = new(context) + finalizer(this) do self + @tfcall(:TFE_DeleteContext, Cvoid, (Ptr{Cvoid},), self.ptr) + end + @tfcall(:TFE_DeleteContextOptions, Cvoid, (Ptr{Cvoid},), options) + return this + end +end + + +mutable struct TensorHandle + ptr::Ptr{Cvoid} + + function TensorHandle(tensor) + status = Status() + ptr = @tfcall(:TFE_NewTensorHandle, Ptr{Cvoid}, (Ptr{Cvoid}, Ptr{Cvoid}), tensor.ptr, status) + check_status(status) + this = new(ptr) + finalizer(this) do self + @tfcall(:TFE_DeleteTensorHandle, Cvoid, (Ptr{Cvoid},), self.ptr) + end + return this + end +end + +Base.unsafe_convert(::Type{Ptr{Cvoid}}, h::TensorHandle) = h.ptr + + +function device_name(h::TensorHandle) + status = Status() + c_name = @tfcall(:TFE_TensorHandleDeviceName, Cstring, (Ptr{Cvoid}, Ptr{Cvoid}), h, status) + check_status(status) + return unsafe_string(c_name) +end + +function resolve(h::TensorHandle) + status = Status() + ptr = @tfcall(:TFE_TensorHandleResolve, Ptr{Cvoid}, (Ptr{Cvoid}, Ptr{Cvoid}), h, status) + check_status(status) + tensor = RawTensor(ptr) + return tensor +end From b23ff94c592fa02bfb130742ad2b1a26340bf81a Mon Sep 17 00:00:00 2001 From: Jon Malmaud Date: Tue, 22 Jan 2019 16:18:55 -0500 Subject: [PATCH 02/49] Trying to get 'execute' working. --- src/eager.jl | 76 +++++++++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 70 insertions(+), 6 deletions(-) diff --git a/src/eager.jl b/src/eager.jl index 1f4a3e45..a4d1943b 100644 --- a/src/eager.jl +++ b/src/eager.jl @@ -3,18 +3,20 @@ mutable struct EagerContext function EagerContext() options = @tfcall(:TFE_NewContextOptions, Ptr{Cvoid}, ()) + @tfcall(:TFE_ContextOptionsSetAsync, Cvoid, (Ptr{Cvoid}, Cuchar), options, 0) status = Status() context = @tfcall(:TFE_NewContext, Ptr{Cvoid}, (Ptr{Cvoid}, Ptr{Cvoid}), options, status) check_status(status) this = new(context) - finalizer(this) do self - @tfcall(:TFE_DeleteContext, Cvoid, (Ptr{Cvoid},), self.ptr) - end + # finalizer(this) do self + # @tfcall(:TFE_DeleteContext, Cvoid, (Ptr{Cvoid},), self.ptr) + # end @tfcall(:TFE_DeleteContextOptions, Cvoid, (Ptr{Cvoid},), options) return this end end +Base.unsafe_convert(::Type{Ptr{Cvoid}}, c::EagerContext) = c.ptr mutable struct TensorHandle ptr::Ptr{Cvoid} @@ -24,15 +26,24 @@ mutable struct TensorHandle ptr = @tfcall(:TFE_NewTensorHandle, Ptr{Cvoid}, (Ptr{Cvoid}, Ptr{Cvoid}), tensor.ptr, status) check_status(status) this = new(ptr) - finalizer(this) do self - @tfcall(:TFE_DeleteTensorHandle, Cvoid, (Ptr{Cvoid},), self.ptr) - end + # finalizer(this) do self + # @tfcall(:TFE_DeleteTensorHandle, Cvoid, (Ptr{Cvoid},), self.ptr) + # end return this end + + function TensorHandle() + return new() + end end Base.unsafe_convert(::Type{Ptr{Cvoid}}, h::TensorHandle) = h.ptr +function async_wait(ctx::EagerContext) + status = Status() + @tfcall(:TFE_ContextAsyncWait, Cvoid, (Ptr{Cvoid}, Ptr{Cvoid}), ctx, status) + check_status(status) +end function device_name(h::TensorHandle) status = Status() @@ -41,6 +52,14 @@ function device_name(h::TensorHandle) return unsafe_string(c_name) end +function data_type(h::TensorHandle) + return @tfcall(:TFE_TensorHandleDataType, TF_DataType, (Ptr{Cvoid},), h) +end + +function set_attr_type(op, attr_name, value) + @tfcall(:TFE_OpSetAttrType, Cvoid, (Ptr{Cvoid}, Cstring, TF_DataType), op, attr_name, value) +end + function resolve(h::TensorHandle) status = Status() ptr = @tfcall(:TFE_TensorHandleResolve, Ptr{Cvoid}, (Ptr{Cvoid}, Ptr{Cvoid}), h, status) @@ -48,3 +67,48 @@ function resolve(h::TensorHandle) tensor = RawTensor(ptr) return tensor end + +mutable struct EagerOp + ptr::Ptr{Cvoid} + + function EagerOp(ctx::EagerContext, op_name) + status = Status() + ptr = @tfcall(:TFE_NewOp, Ptr{Cvoid}, (Ptr{Cvoid}, Cstring, Ptr{Cvoid}), ctx, op_name, status) + check_status(status) + this = new(ptr) + # finalizer(this) do self + # @tfcall(:TFE_DeleteOp, Cvoid, (Ptr{Cvoid},), self) + # end + return this + end +end + +Base.unsafe_convert(::Type{Ptr{Cvoid}}, op::EagerOp) = op.ptr + +function add_input(op::EagerOp, h::TensorHandle) + status = Status() + @tfcall(:TFE_OpAddInput, Cvoid, (Ptr{Cvoid}, Ptr{Cvoid}, Ptr{Cvoid}), op, h, status) + check_status(status) + return +end + +function execute(op::EagerOp) + handle = TensorHandle() + num_ret = Cint(1) + status = Status() + @tfcall(:TFE_Execute, Cvoid, (Ptr{Cvoid}, Ptr{Cvoid}, Ptr{Cint}, Ptr{Cvoid}), op, Ref(handle.ptr), Ref(num_ret), status) + check_status(status) + return handle +end + +function test_eager(ctx) + h1 = TensorHandle(RawTensor([1,2])) + h2 = TensorHandle(RawTensor([3,4])) + op = EagerOp(ctx, "Add") + add_input(op, h1) + add_input(op, h2) + dtype = data_type(h1) + set_attr_type(op, "T", dtype) + res = execute(op) + return res +end From 4196715d0d3007054be106a931ccfd95e2419c7a Mon Sep 17 00:00:00 2001 From: Jon Malmaud Date: Wed, 30 Jan 2019 14:13:02 -0500 Subject: [PATCH 03/49] Test working --- src/eager.jl | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/src/eager.jl b/src/eager.jl index a4d1943b..6417be45 100644 --- a/src/eager.jl +++ b/src/eager.jl @@ -94,14 +94,17 @@ end function execute(op::EagerOp) handle = TensorHandle() + ptrs = [Ptr{Cvoid}(0)] num_ret = Cint(1) status = Status() - @tfcall(:TFE_Execute, Cvoid, (Ptr{Cvoid}, Ptr{Cvoid}, Ptr{Cint}, Ptr{Cvoid}), op, Ref(handle.ptr), Ref(num_ret), status) + @tfcall(:TFE_Execute, Cvoid, (Ptr{Cvoid}, Ptr{Cvoid}, Ptr{Cint}, Ptr{Cvoid}), op, ptrs, Ref(num_ret), status) + handle.ptr = ptrs[1] check_status(status) return handle end -function test_eager(ctx) +function test_eager() + ctx = EagerContext() h1 = TensorHandle(RawTensor([1,2])) h2 = TensorHandle(RawTensor([3,4])) op = EagerOp(ctx, "Add") @@ -110,5 +113,5 @@ function test_eager(ctx) dtype = data_type(h1) set_attr_type(op, "T", dtype) res = execute(op) - return res + return resolve(res) end From e8e0dc1fc0a1ed8afc403f86ef47429fd4745385 Mon Sep 17 00:00:00 2001 From: Jon Malmaud Date: Wed, 30 Jan 2019 14:36:59 -0500 Subject: [PATCH 04/49] Attribute setters --- src/core.jl | 6 ++++ src/eager.jl | 96 ++++++++++++++++++++++++++++++++++++++++++++-------- 2 files changed, 87 insertions(+), 15 deletions(-) diff --git a/src/core.jl b/src/core.jl index b122e176..930c1395 100644 --- a/src/core.jl +++ b/src/core.jl @@ -521,6 +521,10 @@ mutable struct DeviceList end this end + + function DeviceList(ptr, count) + new(ptr, count) + end end struct DeviceInfo @@ -663,6 +667,8 @@ RawTensor(data::AbstractArray) = RawTensor(collect(data)) RawTensor(t::RawTensor) = t +Base.unsafe_convert(::Type{Ptr{Cvoid}}, t::RawTensor) = t.ptr + function varint_encode(b::IO, n::Integer) while n ≥ 2^7 write(b, UInt8(0b10000000 | (n & 0b1111111))) diff --git a/src/eager.jl b/src/eager.jl index 6417be45..df6df4e7 100644 --- a/src/eager.jl +++ b/src/eager.jl @@ -8,9 +8,9 @@ mutable struct EagerContext context = @tfcall(:TFE_NewContext, Ptr{Cvoid}, (Ptr{Cvoid}, Ptr{Cvoid}), options, status) check_status(status) this = new(context) - # finalizer(this) do self - # @tfcall(:TFE_DeleteContext, Cvoid, (Ptr{Cvoid},), self.ptr) - # end + finalizer(this) do self + @tfcall(:TFE_DeleteContext, Cvoid, (Ptr{Cvoid},), self.ptr) + end @tfcall(:TFE_DeleteContextOptions, Cvoid, (Ptr{Cvoid},), options) return this end @@ -18,6 +18,15 @@ end Base.unsafe_convert(::Type{Ptr{Cvoid}}, c::EagerContext) = c.ptr +function DeviceList(ctx::EagerContext) + status = Status() + ptr = @tfcall(:TFE_ContextListDevices, Ptr{Cvoid}, (Ptr{Cvoid}, Ptr{Cvoid}), ctx, status) + check_status(status) + count = @tfcall(:TF_DeviceListCount, Cint, (Ptr{Cvoid},), ptr) + this = new(ptr, count) + return this +end + mutable struct TensorHandle ptr::Ptr{Cvoid} @@ -26,9 +35,9 @@ mutable struct TensorHandle ptr = @tfcall(:TFE_NewTensorHandle, Ptr{Cvoid}, (Ptr{Cvoid}, Ptr{Cvoid}), tensor.ptr, status) check_status(status) this = new(ptr) - # finalizer(this) do self - # @tfcall(:TFE_DeleteTensorHandle, Cvoid, (Ptr{Cvoid},), self.ptr) - # end + finalizer(this) do self + @tfcall(:TFE_DeleteTensorHandle, Cvoid, (Ptr{Cvoid},), self.ptr) + end return this end @@ -53,11 +62,7 @@ function device_name(h::TensorHandle) end function data_type(h::TensorHandle) - return @tfcall(:TFE_TensorHandleDataType, TF_DataType, (Ptr{Cvoid},), h) -end - -function set_attr_type(op, attr_name, value) - @tfcall(:TFE_OpSetAttrType, Cvoid, (Ptr{Cvoid}, Cstring, TF_DataType), op, attr_name, value) + return @tfcall(:TFE_TensorHandleDataType, TF_DataType, (Ptr{Cvoid},), h) |> tf_to_jl_type end function resolve(h::TensorHandle) @@ -76,9 +81,9 @@ mutable struct EagerOp ptr = @tfcall(:TFE_NewOp, Ptr{Cvoid}, (Ptr{Cvoid}, Cstring, Ptr{Cvoid}), ctx, op_name, status) check_status(status) this = new(ptr) - # finalizer(this) do self - # @tfcall(:TFE_DeleteOp, Cvoid, (Ptr{Cvoid},), self) - # end + finalizer(this) do self + @tfcall(:TFE_DeleteOp, Cvoid, (Ptr{Cvoid},), self) + end return this end end @@ -111,7 +116,68 @@ function test_eager() add_input(op, h1) add_input(op, h2) dtype = data_type(h1) - set_attr_type(op, "T", dtype) + op["T"] = dtype res = execute(op) return resolve(res) end + +function setindex!(op::EagerOp, tensor::RawTensor, attr_name) + status = Status() + @tfcall(:TFE_OpSetAttrTensor, Cvoid, (Ptr{Cvoid}, Cstring, Ptr{Cvoid}, Ptr{Cvoid}), op, attr_name, tensor, status) + check_status(status) +end + +function setindex!(op::EagerOp, dtype::DataType, attr_name) + @tfcall(:TFE_OpSetAttrType, Cvoid, (Ptr{Cvoid}, Cstring, TF_DataType), op, attr_name, dtype|>jl_to_df_type) +end + +function setindex!(op::EagerOp, value::Integer, attr_name) + value = Int64(value) + @tfcall(:TFE_OpSetAttrInt, Cvoid, (Ptr{Cvoid}, Cstring, Int64), op, attr_name, value) +end + +function setindex!(op::EagerOp, value::Bool, attr_name) + @tfcall(:TFE_OpSetAttrBool, Cvoid, (Ptr{Cvoid}, Cstring, Cuchar), op, attr_name, value) +end + +function setindex!(op::EagerOp, value::AbstractFloat, attr_name) + value = Float32(value) + @tfcall(:TFE_OpSetAttrFloat, Cvoid, (Ptr{Cvoid}, Cstring, Cfloat), op, attr_name, value) +end + +function setindex!(op::EagerOp, value::AbstractString, attr_name) + value = String(value) + @tfcall(:TFE_OpSetAttrString, Cvoid, (Ptr{Cvoid}, Cstring, Ptr{Cvoid}, Cint), op, attr_name, Vector{UInt8}(value), sizeof(value)) +end + +function setindex!(op::EagerOp, value::Vector, attr_name) + set_attr_list(op, attr_name, value) +end + +function set_attr_list(op::EagerOp, attr_name, list::Vector{<:Integer}) + list = Int64[Int64(x) for x in list] + @tfcall(:TFE_OpSetAttrIntList, Cvoid, (Ptr{Cvoid}, Cstring, Ptr{Int64}, Cint), op, attr_name, list, length(list)) +end + +function set_attr_list(op::EagerOp, attr_name, list::Vector{<:AbstractFloat}) + list = Float32[Float32(x) for x in list] + @tfcall(:TFE_OpSetAttrFloatList, Cvoid, (Ptr{Cvoid}, Cstring, Ptr{Float32}, Cint), op, attr_name, list, length(list)) +end + +function set_attr_list(op::EagerOp, attr_name, list::Vector{<:DataType}) + list = map(jl_to_df_type, list) + @tfcall(:TFE_OpSetAttrTypeList, Cvoid, (Ptr{Cvoid}, Cstring, Ptr{Cvoid}, Cint), op, attr_name, list, length(list)) +end + +function set_attr_shape_list(op::EagerOp, attr_name, list::Vector) + dims = Vector{Int64}[] + for shape in list + push!(dims, Int64[shape...]) + end + @tfcall(:TFE_OpSetAttrShapeList, Cvoid, (Ptr{Cvoid}, Cstring, Ptr{Ptr{Int64}}, Ptr{Cint}, Cint), + op, + attr_name, + dims, + Cint[length(x) for x in dims], + length(dims)) +end From 104097ab21947611a2ba7898e4d2f4a2d39a4f35 Mon Sep 17 00:00:00 2001 From: Jon Malmaud Date: Wed, 30 Jan 2019 14:59:05 -0500 Subject: [PATCH 05/49] Variable output --- src/eager.jl | 35 ++++++++++++++++++++--------------- 1 file changed, 20 insertions(+), 15 deletions(-) diff --git a/src/eager.jl b/src/eager.jl index df6df4e7..b171a47d 100644 --- a/src/eager.jl +++ b/src/eager.jl @@ -75,17 +75,18 @@ end mutable struct EagerOp ptr::Ptr{Cvoid} + op_name::String +end - function EagerOp(ctx::EagerContext, op_name) - status = Status() - ptr = @tfcall(:TFE_NewOp, Ptr{Cvoid}, (Ptr{Cvoid}, Cstring, Ptr{Cvoid}), ctx, op_name, status) - check_status(status) - this = new(ptr) - finalizer(this) do self - @tfcall(:TFE_DeleteOp, Cvoid, (Ptr{Cvoid},), self) - end - return this +function EagerOp(ctx::EagerContext, op_name) + status = Status() + ptr = @tfcall(:TFE_NewOp, Ptr{Cvoid}, (Ptr{Cvoid}, Cstring, Ptr{Cvoid}), ctx, op_name, status) + check_status(status) + this = EagerOp(ptr, String(op_name)) + finalizer(this) do self + @tfcall(:TFE_DeleteOp, Cvoid, (Ptr{Cvoid},), self) end + return this end Base.unsafe_convert(::Type{Ptr{Cvoid}}, op::EagerOp) = op.ptr @@ -98,14 +99,18 @@ function add_input(op::EagerOp, h::TensorHandle) end function execute(op::EagerOp) - handle = TensorHandle() - ptrs = [Ptr{Cvoid}(0)] - num_ret = Cint(1) + op_desc = get_op_def(op.op_name) + n_outputs = length(op_desc.output_arg) + handles = [TensorHandle() for _ in 1:n_outputs] + ptrs = [Ptr{Cvoid}(0) for _ in 1:n_outputs] + num_ret = Cint(n_outputs) status = Status() @tfcall(:TFE_Execute, Cvoid, (Ptr{Cvoid}, Ptr{Cvoid}, Ptr{Cint}, Ptr{Cvoid}), op, ptrs, Ref(num_ret), status) - handle.ptr = ptrs[1] check_status(status) - return handle + for i in 1:n_outputs + handles[i].ptr = ptrs[i] + end + return handles end function test_eager() @@ -118,7 +123,7 @@ function test_eager() dtype = data_type(h1) op["T"] = dtype res = execute(op) - return resolve(res) + return resolve(res[1]) end function setindex!(op::EagerOp, tensor::RawTensor, attr_name) From de7bafc4adc0026e0577cf4aa588f00c8efc2bb3 Mon Sep 17 00:00:00 2001 From: Jon Malmaud Date: Tue, 19 Feb 2019 22:39:01 -0500 Subject: [PATCH 06/49] Tape AD --- src/tape.jl | 101 ++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 101 insertions(+) create mode 100644 src/tape.jl diff --git a/src/tape.jl b/src/tape.jl new file mode 100644 index 00000000..ccd095bd --- /dev/null +++ b/src/tape.jl @@ -0,0 +1,101 @@ +using MacroTools + +mutable struct Tensor + x +end + +mutable struct TapeNode + op + args +end + +import Base: *, log, sin + +mutable struct Tape + nodes::Dict{Tensor, TapeNode} +end + +Tape() = Tape(Dict{Tensor, TapeNode}()) + +tape = nothing + +function set_tape(new_tape=nothing) + if new_tape === nothing + new_tape = Tape() + end + global tape = new_tape + return tape +end + +function add_node(t, node) + tape.nodes[t] = node +end + +function *(t1::Tensor, t2::Tensor) + t3 = Tensor(t1.x*t2.x) + node = TapeNode(*, [t1, t2]) + add_node(t3, node) + return t3 +end + +function *(t1::Tensor, t2::AbstractFloat) + return Tensor(t1.x*t2) +end + +grad_fns = Dict() + +macro back_for(target, fn) + def = splitdef(fn) + quote + $(esc(fn)) + grad_fns[$target] = $(def[:name]) + end +end + + +@back_for(*, function mul_backwards(args) + return [args[2], args[1]] +end) + + +@back_for(log, function log_backwards(args) + return [Tensor(1/args[1].x)] +end) + +function Base.sin(t::Tensor) +end + +@back_for(sin, function sin_backwards(args) + return [Tensor(cos(args[1].x))] +end) + +function log(t::Tensor) + res = Tensor(log(t.x)) + node = TapeNode(log, [t]) + add_node(res, node) + return res +end + +function grad(tape::Tape, tensor, out_grad, grads) + if !haskey(tape.nodes, tensor) + return + end + + node = tape.nodes[tensor] + back_op = grad_fns[node.op] + arg_grads = back_op(node.args) + + + for (i, arg) in enumerate(node.args) + grads[arg] = arg_grads[i] + grad(tape, arg, grads[arg].x, grads) + end + + return +end + +function grad(tape, tensor, out_grad=1.0) + grads = Dict() + grad(tape, tensor, out_grad, grads) + return grads +end From 19161f700defee4db02b4bd3377800deddd3b5df Mon Sep 17 00:00:00 2001 From: Jon Malmaud Date: Tue, 19 Feb 2019 22:42:05 -0500 Subject: [PATCH 07/49] fix tape --- src/tape.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/tape.jl b/src/tape.jl index ccd095bd..f164db18 100644 --- a/src/tape.jl +++ b/src/tape.jl @@ -87,7 +87,7 @@ function grad(tape::Tape, tensor, out_grad, grads) for (i, arg) in enumerate(node.args) - grads[arg] = arg_grads[i] + grads[arg] = arg_grads[i]*out_grad grad(tape, arg, grads[arg].x, grads) end From c16ff7a17e363fc5605ede95c65ee7b36b2e3cfd Mon Sep 17 00:00:00 2001 From: Jon Malmaud Date: Wed, 20 Feb 2019 09:20:21 -0500 Subject: [PATCH 08/49] Start generating eager ops --- src/generate_ops.jl | 36 ++++++++++++++++++++++++++++++++---- 1 file changed, 32 insertions(+), 4 deletions(-) diff --git a/src/generate_ops.jl b/src/generate_ops.jl index 0020a48f..5a75be1f 100644 --- a/src/generate_ops.jl +++ b/src/generate_ops.jl @@ -7,6 +7,7 @@ using MacroTools struct OpFunc expr::Expr + eager_expr::Expr docstring::String name::Symbol end @@ -77,6 +78,7 @@ function to_function(op::tensorflow.OpDef) jl_name = opname_to_jlname(op.name) inputs = [] input_block = quote end + eager_input_block = quote end convert_block = quote end type_sets = Dict{String, Vector{Symbol}}() for (i, input) in enumerate(op.input_arg) @@ -137,6 +139,11 @@ function to_function(op::tensorflow.OpDef) tf.add_input(desc, $(inputs[input_idx])) end) end + for (input_idx, input) in enumerate(op.input_arg) + push!(eager_input_block.args, quote + tf.add_input(op, $(inputs[input_idx])) + end) + end kwargs = Expr(:parameters) push!(kwargs.args, Expr(:kw, :name, nothing)) attr_block = quote end @@ -198,6 +205,12 @@ function to_function(op::tensorflow.OpDef) out end end + eager_output_block = if scalar_output + quote + execute(op) + end + # else + end expr = quote @tf.op function $(jl_name)($(inputs...)) local desc @@ -210,6 +223,22 @@ function to_function(op::tensorflow.OpDef) $output_block end end + eager_inputs = [] + push!(eager_inputs, inputs[1]) + for i in 2:length(inputs) + push!(eager_inputs, :($(inputs[i])::TensorHandle) + ) + end + eager_expr = quote + function $(jl_name)($(eager_inputs...)) + op = EagerOp(ctx, $(op.name)) + #$convert_block + $eager_input_block + $attr_block + end + op["T"] = data_type($(inputs[2])) + $eager_output_block + end posargs_str = join((arg.name for arg in op.input_arg), ", ") kwargs_str = [] for arg in op.attr @@ -234,7 +263,7 @@ function to_function(op::tensorflow.OpDef) escape_string(op.summary) ) #TODO Workout how to get descriptions for docstrings expr = unblock(MacroTools.flatten(MacroTools.striplines(expr))) - OpFunc(expr, doc_str, jl_name) + OpFunc(expr, eager_expr, doc_str, jl_name) end """ @@ -247,10 +276,9 @@ The function is returned with a triple-quoted docstring. """ function stringify_func(opfunc::OpFunc) s = string(opfunc.expr) - docstring = replace(opfunc.docstring, "\$", "") + docstring = replace(opfunc.docstring, "\$"=>"") doc_line = "\"\"\"\n$(docstring)\n\"\"\"" - lines = [] - "$doc_line\n$s" + "$doc_line\n$s\n$doc_line\n$(string(opfunc.eager_expr))\n" end stringify_func(op::tensorflow.OpDef) = stringify_func(to_function(op)) From c8ba0e3eb27076f8366b3429135ee7e8b95c4dc2 Mon Sep 17 00:00:00 2001 From: Jon Malmaud Date: Wed, 20 Feb 2019 09:48:06 -0500 Subject: [PATCH 09/49] Test working again --- src/eager.jl | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/src/eager.jl b/src/eager.jl index b171a47d..144087da 100644 --- a/src/eager.jl +++ b/src/eager.jl @@ -2,6 +2,11 @@ mutable struct EagerContext ptr::Ptr{Cvoid} function EagerContext() + # For some reason, this has to be called before :TFE_Execute or else tf + # crashes. Maybe something about TF_GetAllOpList is causing the tf + # library to enter a bad state. + get_all_op_list() + options = @tfcall(:TFE_NewContextOptions, Ptr{Cvoid}, ()) @tfcall(:TFE_ContextOptionsSetAsync, Cvoid, (Ptr{Cvoid}, Cuchar), options, 0) status = Status() @@ -103,9 +108,9 @@ function execute(op::EagerOp) n_outputs = length(op_desc.output_arg) handles = [TensorHandle() for _ in 1:n_outputs] ptrs = [Ptr{Cvoid}(0) for _ in 1:n_outputs] - num_ret = Cint(n_outputs) + num_ret = Ref{Cint}(n_outputs) status = Status() - @tfcall(:TFE_Execute, Cvoid, (Ptr{Cvoid}, Ptr{Cvoid}, Ptr{Cint}, Ptr{Cvoid}), op, ptrs, Ref(num_ret), status) + @tfcall(:TFE_Execute, Cvoid, (Ptr{Cvoid}, Ptr{Cvoid}, Ptr{Cint}, Ptr{Cvoid}), op, ptrs, num_ret, status) check_status(status) for i in 1:n_outputs handles[i].ptr = ptrs[i] From b55ecd6079a9938606869151dedf8744816f7766 Mon Sep 17 00:00:00 2001 From: Jon Malmaud Date: Wed, 20 Feb 2019 16:52:09 -0500 Subject: [PATCH 10/49] Improvements to importing --- src/core.jl | 5 +- src/eager.jl | 35 +- src/generate_ops.jl | 93 +- src/ops.jl | 10 +- src/ops/imported_ops.jl | 48209 ++++++++++++++++++++++++++++++++++++-- src/ops/math.jl | 87 +- src/ops/sequences.jl | 32 +- src/show.jl | 21 +- 8 files changed, 45832 insertions(+), 2660 deletions(-) diff --git a/src/core.jl b/src/core.jl index 930c1395..79a7b14f 100644 --- a/src/core.jl +++ b/src/core.jl @@ -1174,7 +1174,7 @@ function load_proto(value::tensorflow.AttrValue) load_proto(value.list) elseif has_field(value, :_type) type_ = value._type - proto_type_map[type_] + get(proto_type_map, type_, Float32) # wrong end end @@ -1233,6 +1233,8 @@ function Tensor(op::Operation, value_index::Int) Tensor{get_output_type(base_tensor)}(op, value_index) end +# Tensor constructors + Tensor(op::Operation) = Tensor(op, 1) Tensor(value) = convert(Tensor, value) @@ -1248,6 +1250,7 @@ Base.convert(::Type{Tensor{Any}}, value::Tensor{R}) where {R} = value Base.convert(::Type{Tensor{T}}, value) where {T} = convert(Tensor{T}, constant(value)) + function operation_output_type(port::Port) @tfcall(:TF_OperationOutputType, TF_DataType, (Port,), port) end diff --git a/src/eager.jl b/src/eager.jl index 144087da..ec7ec25d 100644 --- a/src/eager.jl +++ b/src/eager.jl @@ -21,6 +21,9 @@ mutable struct EagerContext end end +eager_ctx = nothing #EagerContext() +eager_mode = true + Base.unsafe_convert(::Type{Ptr{Cvoid}}, c::EagerContext) = c.ptr function DeviceList(ctx::EagerContext) @@ -32,10 +35,10 @@ function DeviceList(ctx::EagerContext) return this end -mutable struct TensorHandle +mutable struct TensorHandle <: AbstractTensor{Any} ptr::Ptr{Cvoid} - function TensorHandle(tensor) + function TensorHandle(tensor::RawTensor) status = Status() ptr = @tfcall(:TFE_NewTensorHandle, Ptr{Cvoid}, (Ptr{Cvoid}, Ptr{Cvoid}), tensor.ptr, status) check_status(status) @@ -51,6 +54,7 @@ mutable struct TensorHandle end end +EagerTensor(value) = TensorHandle(RawTensor(value)) Base.unsafe_convert(::Type{Ptr{Cvoid}}, h::TensorHandle) = h.ptr function async_wait(ctx::EagerContext) @@ -70,6 +74,8 @@ function data_type(h::TensorHandle) return @tfcall(:TFE_TensorHandleDataType, TF_DataType, (Ptr{Cvoid},), h) |> tf_to_jl_type end +Base.eltype(h::TensorHandle) = data_type(h) + function resolve(h::TensorHandle) status = Status() ptr = @tfcall(:TFE_TensorHandleResolve, Ptr{Cvoid}, (Ptr{Cvoid}, Ptr{Cvoid}), h, status) @@ -78,6 +84,10 @@ function resolve(h::TensorHandle) return tensor end +function Base.Array(h::TensorHandle) + convert(Array, resolve(h)) +end + mutable struct EagerOp ptr::Ptr{Cvoid} op_name::String @@ -94,6 +104,22 @@ function EagerOp(ctx::EagerContext, op_name) return this end +function EagerOp(op_name) + global eager_ctx + if eager_ctx === nothing + eager_ctx = EagerContext() + end + ctx = eager_ctx + status = Status() + ptr = @tfcall(:TFE_NewOp, Ptr{Cvoid}, (Ptr{Cvoid}, Cstring, Ptr{Cvoid}), ctx, op_name, status) + check_status(status) + this = EagerOp(ptr, String(op_name)) + finalizer(this) do self + @tfcall(:TFE_DeleteOp, Cvoid, (Ptr{Cvoid},), self) + end + return this +end + Base.unsafe_convert(::Type{Ptr{Cvoid}}, op::EagerOp) = op.ptr function add_input(op::EagerOp, h::TensorHandle) @@ -128,7 +154,8 @@ function test_eager() dtype = data_type(h1) op["T"] = dtype res = execute(op) - return resolve(res[1]) + return res[1] + # return resolve(res[1]) end function setindex!(op::EagerOp, tensor::RawTensor, attr_name) @@ -138,7 +165,7 @@ function setindex!(op::EagerOp, tensor::RawTensor, attr_name) end function setindex!(op::EagerOp, dtype::DataType, attr_name) - @tfcall(:TFE_OpSetAttrType, Cvoid, (Ptr{Cvoid}, Cstring, TF_DataType), op, attr_name, dtype|>jl_to_df_type) + @tfcall(:TFE_OpSetAttrType, Cvoid, (Ptr{Cvoid}, Cstring, TF_DataType), op, attr_name, dtype |> jl_to_df_type) end function setindex!(op::EagerOp, value::Integer, attr_name) diff --git a/src/generate_ops.jl b/src/generate_ops.jl index 5a75be1f..4312473c 100644 --- a/src/generate_ops.jl +++ b/src/generate_ops.jl @@ -4,6 +4,7 @@ ###### using MacroTools +using Dates struct OpFunc expr::Expr @@ -19,7 +20,7 @@ If `string` is not allowed as a Julia variable identifier, suffix it with a `_`. Otherwise, return it unchanged. """ function keyword_escape(s) - keywords = ["const", "type"] + keywords = ["const", "type", "while", "for", "if"] if (s ∈ keywords) || Base.isoperator(Symbol(s)) s = string(s, "_") end @@ -42,9 +43,9 @@ function opname_to_jlname(name) if idx == length(name) word_end = true else - next_char = name[idx+1] - if idx < length(name)-1 - next_next_char = name[idx+2] + next_char = name[idx + 1] + if idx < length(name) - 1 + next_next_char = name[idx + 2] if isuppercase(cur_char) && isuppercase(next_char) && islowercase(next_next_char) word_end = true end @@ -78,9 +79,8 @@ function to_function(op::tensorflow.OpDef) jl_name = opname_to_jlname(op.name) inputs = [] input_block = quote end - eager_input_block = quote end convert_block = quote end - type_sets = Dict{String, Vector{Symbol}}() + type_sets = Dict{String,Vector{Symbol}}() for (i, input) in enumerate(op.input_arg) sym = Symbol("$(input.name)_") push!(inputs, sym) @@ -91,7 +91,7 @@ function to_function(op::tensorflow.OpDef) end for (input_idx, input) in enumerate(op.input_arg) sym = inputs[input_idx] - convert_target = tf.Tensor{Any} + convert_target = (tf.Tensor{Any}) # Heuristic for when 1-based conversion is necessary # Generally, you can tell by the name of the type attribute. @@ -116,14 +116,14 @@ function to_function(op::tensorflow.OpDef) end end if input._type > 0 && haskey(proto_type_map, input._type) - convert_target = tf.Tensor{proto_type_map[input._type]} + convert_target = (tf.Tensor{(proto_type_map[input._type])}) end convert_expr = if isempty(input.number_attr) && isempty(input.type_list_attr) # Scalar input - :($sym=convert($(convert_target), $sym)) - else # Array argument + :($sym = convert($(convert_target), $sym)) + else # Array argument # :($sym=convert.($(convert_target), $sym)) - :($sym=[convert($(convert_target), x) for x in $sym]) - end + :($sym = [convert($(convert_target), x) for x in $sym]) + end push!(convert_block.args, quote $convert_expr $diff_expr @@ -139,11 +139,8 @@ function to_function(op::tensorflow.OpDef) tf.add_input(desc, $(inputs[input_idx])) end) end - for (input_idx, input) in enumerate(op.input_arg) - push!(eager_input_block.args, quote - tf.add_input(op, $(inputs[input_idx])) - end) - end + eager_input_block = input_block + kwargs = Expr(:parameters) push!(kwargs.args, Expr(:kw, :name, nothing)) attr_block = quote end @@ -156,10 +153,10 @@ function to_function(op::tensorflow.OpDef) m = match(r"list(\(.*\))|(.*)", attr._type) t = m[1] !== nothing ? m[1] : m[2] - t_map = Dict("int"=>:(Base.Int), - "bool"=>:(Base.Bool), - "tensor"=>:(TensorFlow.RawTensor), - "string"=>:(Base.String)) + t_map = Dict("int" => :(Base.Int), + "bool" => :(Base.Bool), + "tensor" => :(TensorFlow.RawTensor), + "string" => :(Base.String)) t_target = get(t_map, t, :(Base.identity)) if m[1] === nothing source = :($(t_target)($name)) @@ -181,6 +178,18 @@ function to_function(op::tensorflow.OpDef) end end) end + t_block = [] + for (i, input_arg) in enumerate(op.input_arg) + if has_field(input_arg, :type_attr) + type_attr = input_arg.type_attr + if length(type_attr) > 0 + code = quote + desc[$type_attr] = tf.data_type($(inputs[i])) + end + push!(t_block, code) + end + end + end pushfirst!(inputs, kwargs) scalar_output = true if length(op.output_arg) > 1 @@ -207,9 +216,12 @@ function to_function(op::tensorflow.OpDef) end eager_output_block = if scalar_output quote - execute(op) + tf.execute(desc)[1] + end + else + quote + tf.execute(desc) end - # else end expr = quote @tf.op function $(jl_name)($(inputs...)) @@ -226,18 +238,19 @@ function to_function(op::tensorflow.OpDef) eager_inputs = [] push!(eager_inputs, inputs[1]) for i in 2:length(inputs) - push!(eager_inputs, :($(inputs[i])::TensorHandle) - ) + push!(eager_inputs, :($(inputs[i])::tf.TensorHandle)) end + eager_expr = quote function $(jl_name)($(eager_inputs...)) - op = EagerOp(ctx, $(op.name)) - #$convert_block + desc = tf.EagerOp($(op.name)) + # $convert_block $eager_input_block $attr_block + $(t_block...) + $eager_output_block end - op["T"] = data_type($(inputs[2])) - $eager_output_block + end posargs_str = join((arg.name for arg in op.input_arg), ", ") kwargs_str = [] @@ -250,6 +263,9 @@ function to_function(op::tensorflow.OpDef) catch err default = "?" end + if default === nothing # Not sure why this is happening. It's happening for dropout + default = "?" + end push!(kwargs_str, "$(arg.name)=$default") end if isempty(kwargs_str) @@ -260,9 +276,9 @@ function to_function(op::tensorflow.OpDef) sig = "$jl_name($(posargs_str)$(kwargs_str))" doc_str = string(" ", sig, "\n\n", - escape_string(op.summary) - ) #TODO Workout how to get descriptions for docstrings + escape_string(op.summary)) #TODO Workout how to get descriptions for docstrings expr = unblock(MacroTools.flatten(MacroTools.striplines(expr))) + eager_expr = unblock(MacroTools.flatten(MacroTools.striplines(eager_expr))) OpFunc(expr, eager_expr, doc_str, jl_name) end @@ -275,10 +291,17 @@ parsed by Julia's parser. The function is returned with a triple-quoted docstring. """ function stringify_func(opfunc::OpFunc) - s = string(opfunc.expr) - docstring = replace(opfunc.docstring, "\$"=>"") + expr = quote + $(opfunc.expr) + $(opfunc.eager_expr) + end + expr = unblock(MacroTools.flatten(MacroTools.striplines(expr))) + + s = string(expr) + docstring = replace(opfunc.docstring, "\$" => "") doc_line = "\"\"\"\n$(docstring)\n\"\"\"" - "$doc_line\n$s\n$doc_line\n$(string(opfunc.eager_expr))\n" + # "$doc_line\n$s\n$doc_line\n$(string(opfunc.eager_expr))\n" + "$doc_line\n$s\n" end stringify_func(op::tensorflow.OpDef) = stringify_func(to_function(op)) @@ -358,7 +381,7 @@ Returns a reference to a Julia function corresponding to the operation. function import_op(name) jl_name = opname_to_jlname(name) mod = TensorFlow.Ops - if jl_name ∉ names(mod, all=true) + if jl_name ∉ names(mod, all = true) ops = Dict(get_all_op_list()) op = ops[name] op_desc = to_function(op) diff --git a/src/ops.jl b/src/ops.jl index bcf630e8..ba05d225 100644 --- a/src/ops.jl +++ b/src/ops.jl @@ -23,7 +23,11 @@ function tf_promote(args...) if isa(arg, AbstractArray) push!(new_args, arg) else - push!(new_args, convert(Tensor{big_type}, arg)) + if eager_mode + push!(new_args, Ops.cast(arg, DstT = big_type)) # TODO implement promotion + else + push!(new_args, convert(Tensor{big_type}, arg)) + end end end (new_args...,) @@ -115,7 +119,7 @@ end capitalize(s::Symbol) = capitalize(string(s)) -function get_name(name="node") +function get_name(name = "node") graph = get_def_graph() name_idx = graph.name_idx if name == "" @@ -158,7 +162,7 @@ Returns: A `Tensor` that may be used as a handle for feeding a value, but not evaluated directly. """ -@op function placeholder(dtype; name=nothing, shape=nothing) +@op function placeholder(dtype; name = nothing, shape = nothing) local node with_op_name(name, "placeholder") do graph = get_def_graph() diff --git a/src/ops/imported_ops.jl b/src/ops/imported_ops.jl index 14b582ef..ed247580 100644 --- a/src/ops/imported_ops.jl +++ b/src/ops/imported_ops.jl @@ -1,3478 +1,46571 @@ -# Autogenerated on 2018-08-22T19:25:49.359 +# Autogenerated on 2019-02-20T16:49:23.66 module Ops import TensorFlow -import SpecialFunctions const tf = TensorFlow """ - equal(x, y) + reduce_join(inputs, reduction_indices; keep_dims=false, separator=) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function reduce_join(inputs_, reduction_indices_; name=nothing, keep_dims=nothing, separator=nothing) + local desc + tf.with_op_name(name, "ReduceJoin") do + desc = tf.NodeDescription("ReduceJoin") + inputs_ = convert(Tensor{String}, inputs_) + reduction_indices_ = convert(Tensor{Int32}, reduction_indices_) + tf.add_input(desc, inputs_) + tf.add_input(desc, reduction_indices_) + if keep_dims !== nothing + desc["keep_dims"] = Base.Bool(keep_dims) + end + if separator !== nothing + desc["separator"] = Base.String(separator) + end + end + tf.Tensor(tf.Operation(desc)) + end + function reduce_join(inputs_::tf.TensorHandle, reduction_indices_::tf.TensorHandle; name=nothing, keep_dims=nothing, separator=nothing) + desc = tf.EagerOp("ReduceJoin") + tf.add_input(desc, inputs_) + tf.add_input(desc, reduction_indices_) + if keep_dims !== nothing + desc["keep_dims"] = Base.Bool(keep_dims) + end + if separator !== nothing + desc["separator"] = Base.String(separator) + end + (tf.execute(desc))[1] + end +end """ -tf.@op function equal(x_, y_; name=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("Equal") - x_ = convert(TensorFlow.Tensor{Any}, x_) - y_ = convert(TensorFlow.Tensor{Any}, y_) - (x_, y_) = tf.tf_promote(x_, y_) - tf.add_input(desc, x_) - tf.add_input(desc, y_) - end), name, "Equal") - tf.Tensor(tf.Operation(desc)) + reduce_dataset(input_dataset, initial_state, other_arguments; use_inter_op_parallelism=true) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function reduce_dataset(input_dataset_, initial_state_, other_arguments_; name=nothing, f=nothing, Tstate=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing) + local desc + tf.with_op_name(name, "ReduceDataset") do + desc = tf.NodeDescription("ReduceDataset") + input_dataset_ = convert(Tensor{Any}, input_dataset_) + initial_state_ = [convert(Tensor{Any}, x) for x = initial_state_] + other_arguments_ = [convert(Tensor{Any}, x) for x = other_arguments_] + tf.add_input(desc, input_dataset_) + tf.add_input(desc, initial_state_) + tf.add_input(desc, other_arguments_) + if f !== nothing + desc["f"] = Base.identity(f) + end + if Tstate !== nothing + desc["Tstate"] = map(Base.identity, Tstate) + end + if Targuments !== nothing + desc["Targuments"] = map(Base.identity, Targuments) + end + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + if use_inter_op_parallelism !== nothing + desc["use_inter_op_parallelism"] = Base.Bool(use_inter_op_parallelism) + end + end + tf.Tensor(tf.Operation(desc)) + end + function reduce_dataset(input_dataset_::tf.TensorHandle, initial_state_::tf.TensorHandle, other_arguments_::tf.TensorHandle; name=nothing, f=nothing, Tstate=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing) + desc = tf.EagerOp("ReduceDataset") + tf.add_input(desc, input_dataset_) + tf.add_input(desc, initial_state_) + tf.add_input(desc, other_arguments_) + if f !== nothing + desc["f"] = Base.identity(f) + end + if Tstate !== nothing + desc["Tstate"] = map(Base.identity, Tstate) + end + if Targuments !== nothing + desc["Targuments"] = map(Base.identity, Targuments) + end + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + if use_inter_op_parallelism !== nothing + desc["use_inter_op_parallelism"] = Base.Bool(use_inter_op_parallelism) + end + (tf.execute(desc))[1] end +end + """ - not_equal(x, y) + tensor_list_from_tensor(tensor, element_shape) """ -tf.@op function not_equal(x_, y_; name=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("NotEqual") - x_ = convert(TensorFlow.Tensor{Any}, x_) - y_ = convert(TensorFlow.Tensor{Any}, y_) - (x_, y_) = tf.tf_promote(x_, y_) - tf.add_input(desc, x_) - tf.add_input(desc, y_) - end), name, "NotEqual") - tf.Tensor(tf.Operation(desc)) +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tensor_list_from_tensor(tensor_, element_shape_; name=nothing, element_dtype=nothing, shape_type=nothing) + local desc + tf.with_op_name(name, "TensorListFromTensor") do + desc = tf.NodeDescription("TensorListFromTensor") + tensor_ = convert(Tensor{Any}, tensor_) + element_shape_ = convert(Tensor{Any}, element_shape_) + (tensor_,) = tf.tf_promote(tensor_) + (element_shape_,) = tf.tf_promote(element_shape_) + tf.add_input(desc, tensor_) + tf.add_input(desc, element_shape_) + if element_dtype !== nothing + desc["element_dtype"] = Base.identity(element_dtype) + end + if shape_type !== nothing + desc["shape_type"] = Base.identity(shape_type) + end + end + tf.Tensor(tf.Operation(desc)) + end + function tensor_list_from_tensor(tensor_::tf.TensorHandle, element_shape_::tf.TensorHandle; name=nothing, element_dtype=nothing, shape_type=nothing) + desc = tf.EagerOp("TensorListFromTensor") + tf.add_input(desc, tensor_) + tf.add_input(desc, element_shape_) + if element_dtype !== nothing + desc["element_dtype"] = Base.identity(element_dtype) + end + if shape_type !== nothing + desc["shape_type"] = Base.identity(shape_type) + end + desc["element_dtype"] = tf.data_type(tensor_) + desc["shape_type"] = tf.data_type(element_shape_) + (tf.execute(desc))[1] end +end + """ - less_equal(x, y) + extract_jpeg_shape(contents; output_type=Int32) """ -tf.@op function less_equal(x_, y_; name=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("LessEqual") - x_ = convert(TensorFlow.Tensor{Any}, x_) - y_ = convert(TensorFlow.Tensor{Any}, y_) - (x_, y_) = tf.tf_promote(x_, y_) - tf.add_input(desc, x_) - tf.add_input(desc, y_) - end), name, "LessEqual") - tf.Tensor(tf.Operation(desc)) +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function extract_jpeg_shape(contents_; name=nothing, output_type=nothing) + local desc + tf.with_op_name(name, "ExtractJpegShape") do + desc = tf.NodeDescription("ExtractJpegShape") + contents_ = convert(Tensor{String}, contents_) + tf.add_input(desc, contents_) + if output_type !== nothing + desc["output_type"] = Base.identity(output_type) + end + end + tf.Tensor(tf.Operation(desc)) + end + function extract_jpeg_shape(contents_::tf.TensorHandle; name=nothing, output_type=nothing) + desc = tf.EagerOp("ExtractJpegShape") + tf.add_input(desc, contents_) + if output_type !== nothing + desc["output_type"] = Base.identity(output_type) + end + (tf.execute(desc))[1] end +end + """ - greater(x, y) + svd(input; compute_uv=true, full_matrices=false) """ -tf.@op function greater(x_, y_; name=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("Greater") - x_ = convert(TensorFlow.Tensor{Any}, x_) - y_ = convert(TensorFlow.Tensor{Any}, y_) - (x_, y_) = tf.tf_promote(x_, y_) - tf.add_input(desc, x_) - tf.add_input(desc, y_) - end), name, "Greater") - tf.Tensor(tf.Operation(desc)) +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function svd(input_; name=nothing, compute_uv=nothing, full_matrices=nothing) + local desc + tf.with_op_name(name, "Svd") do + desc = tf.NodeDescription("Svd") + input_ = convert(Tensor{Any}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + if compute_uv !== nothing + desc["compute_uv"] = Base.Bool(compute_uv) + end + if full_matrices !== nothing + desc["full_matrices"] = Base.Bool(full_matrices) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function svd(input_::tf.TensorHandle; name=nothing, compute_uv=nothing, full_matrices=nothing) + desc = tf.EagerOp("Svd") + tf.add_input(desc, input_) + if compute_uv !== nothing + desc["compute_uv"] = Base.Bool(compute_uv) + end + if full_matrices !== nothing + desc["full_matrices"] = Base.Bool(full_matrices) + end + desc["T"] = tf.data_type(input_) + tf.execute(desc) end +end + """ - greater_equal(x, y) + iterator_get_next_sync(iterator) """ -tf.@op function greater_equal(x_, y_; name=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("GreaterEqual") - x_ = convert(TensorFlow.Tensor{Any}, x_) - y_ = convert(TensorFlow.Tensor{Any}, y_) - (x_, y_) = tf.tf_promote(x_, y_) - tf.add_input(desc, x_) - tf.add_input(desc, y_) - end), name, "GreaterEqual") - tf.Tensor(tf.Operation(desc)) +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function iterator_get_next_sync(iterator_; name=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "IteratorGetNextSync") do + desc = tf.NodeDescription("IteratorGetNextSync") + iterator_ = convert(Tensor{Any}, iterator_) + tf.add_input(desc, iterator_) + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + tf.Tensor(tf.Operation(desc)) + end + function iterator_get_next_sync(iterator_::tf.TensorHandle; name=nothing, output_types=nothing, output_shapes=nothing) + desc = tf.EagerOp("IteratorGetNextSync") + tf.add_input(desc, iterator_) + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + (tf.execute(desc))[1] end +end + """ - less(x, y) + ref_enter(data; is_constant=false, parallel_iterations=10) """ -tf.@op function less(x_, y_; name=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("Less") - x_ = convert(TensorFlow.Tensor{Any}, x_) - y_ = convert(TensorFlow.Tensor{Any}, y_) - (x_, y_) = tf.tf_promote(x_, y_) - tf.add_input(desc, x_) - tf.add_input(desc, y_) - end), name, "Less") - tf.Tensor(tf.Operation(desc)) +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function ref_enter(data_; name=nothing, frame_name=nothing, is_constant=nothing, parallel_iterations=nothing) + local desc + tf.with_op_name(name, "RefEnter") do + desc = tf.NodeDescription("RefEnter") + data_ = convert(Tensor{Any}, data_) + (data_,) = tf.tf_promote(data_) + tf.add_input(desc, data_) + if frame_name !== nothing + desc["frame_name"] = Base.String(frame_name) + end + if is_constant !== nothing + desc["is_constant"] = Base.Bool(is_constant) + end + if parallel_iterations !== nothing + desc["parallel_iterations"] = Base.Int(parallel_iterations) + end + end + tf.Tensor(tf.Operation(desc)) + end + function ref_enter(data_::tf.TensorHandle; name=nothing, frame_name=nothing, is_constant=nothing, parallel_iterations=nothing) + desc = tf.EagerOp("RefEnter") + tf.add_input(desc, data_) + if frame_name !== nothing + desc["frame_name"] = Base.String(frame_name) + end + if is_constant !== nothing + desc["is_constant"] = Base.Bool(is_constant) + end + if parallel_iterations !== nothing + desc["parallel_iterations"] = Base.Int(parallel_iterations) + end + desc["T"] = tf.data_type(data_) + (tf.execute(desc))[1] end +end + """ - no_op() + erf(x) """ -tf.@op function no_op(; name=nothing) - local desc - tf.with_op_name((()->desc = tf.NodeDescription("NoOp")), name, "NoOp") - tf.Tensor(tf.Operation(desc)) +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function erf(x_; name=nothing) + local desc + tf.with_op_name(name, "Erf") do + desc = tf.NodeDescription("Erf") + x_ = convert(Tensor{Any}, x_) + (x_,) = tf.tf_promote(x_) + tf.add_input(desc, x_) + end + tf.Tensor(tf.Operation(desc)) + end + function erf(x_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("Erf") + tf.add_input(desc, x_) + desc["T"] = tf.data_type(x_) + (tf.execute(desc))[1] end +end + """ - count_up_to(ref) + lookup_table_export_v2(table_handle) """ -tf.@op function count_up_to(ref_; name=nothing, limit=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("CountUpTo") - ref_ = convert(TensorFlow.Tensor{Any}, ref_) - (ref_,) = tf.tf_promote(ref_) - tf.add_input(desc, ref_) - if limit !== nothing - desc["limit"] = Base.Int(limit) - end - end), name, "CountUpTo") - tf.Tensor(tf.Operation(desc)) +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function lookup_table_export_v2(table_handle_; name=nothing) + local desc + tf.with_op_name(name, "LookupTableExportV2") do + desc = tf.NodeDescription("LookupTableExportV2") + table_handle_ = convert(Tensor{Any}, table_handle_) + tf.add_input(desc, table_handle_) + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function lookup_table_export_v2(table_handle_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("LookupTableExportV2") + tf.add_input(desc, table_handle_) + tf.execute(desc) end +end + """ - decode_gif(contents) + round(x) """ -tf.@op function decode_gif(contents_; name=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("DecodeGif") - contents_ = convert(TensorFlow.Tensor{String}, contents_) - tf.add_input(desc, contents_) - end), name, "DecodeGif") - tf.Tensor(tf.Operation(desc)) +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function round(x_; name=nothing) + local desc + tf.with_op_name(name, "Round") do + desc = tf.NodeDescription("Round") + x_ = convert(Tensor{Any}, x_) + (x_,) = tf.tf_promote(x_) + tf.add_input(desc, x_) + end + tf.Tensor(tf.Operation(desc)) + end + function round(x_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("Round") + tf.add_input(desc, x_) + desc["T"] = tf.data_type(x_) + (tf.execute(desc))[1] end +end + """ - decode_jpeg(contents; channels=0, ratio=1, fancy_upscaling=true, try_recover_truncated=false, acceptable_fraction=nothing, dct_method=) + outfeed_dequeue(; device_ordinal=-1) """ -tf.@op function decode_jpeg(contents_; name=nothing, channels=nothing, ratio=nothing, fancy_upscaling=nothing, try_recover_truncated=nothing, acceptable_fraction=nothing, dct_method=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("DecodeJpeg") - contents_ = convert(TensorFlow.Tensor{String}, contents_) - tf.add_input(desc, contents_) - if channels !== nothing - desc["channels"] = Base.Int(channels) - end - if ratio !== nothing - desc["ratio"] = Base.Int(ratio) - end - if fancy_upscaling !== nothing - desc["fancy_upscaling"] = Base.Bool(fancy_upscaling) - end - if try_recover_truncated !== nothing - desc["try_recover_truncated"] = Base.Bool(try_recover_truncated) - end - if acceptable_fraction !== nothing - desc["acceptable_fraction"] = Base.identity(acceptable_fraction) - end - if dct_method !== nothing - desc["dct_method"] = Base.String(dct_method) - end - end), name, "DecodeJpeg") - tf.Tensor(tf.Operation(desc)) +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function outfeed_dequeue(; name=nothing, dtype=nothing, shape=nothing, device_ordinal=nothing) + local desc + tf.with_op_name(name, "OutfeedDequeue") do + desc = tf.NodeDescription("OutfeedDequeue") + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + if shape !== nothing + desc["shape"] = Base.identity(shape) + end + if device_ordinal !== nothing + desc["device_ordinal"] = Base.Int(device_ordinal) + end + end + tf.Tensor(tf.Operation(desc)) + end + function outfeed_dequeue(; name=nothing, dtype=nothing, shape=nothing, device_ordinal=nothing) + desc = tf.EagerOp("OutfeedDequeue") + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + if shape !== nothing + desc["shape"] = Base.identity(shape) + end + if device_ordinal !== nothing + desc["device_ordinal"] = Base.Int(device_ordinal) + end + (tf.execute(desc))[1] end +end + """ - encode_jpeg(image; format=, quality=95, progressive=false, optimize_size=false, chroma_downsampling=true, density_unit=in, x_density=300, y_density=300, xmp_metadata=) + tensor_forest_tree_is_initialized_op(tree_handle) """ -tf.@op function encode_jpeg(image_; name=nothing, format=nothing, quality=nothing, progressive=nothing, optimize_size=nothing, chroma_downsampling=nothing, density_unit=nothing, x_density=nothing, y_density=nothing, xmp_metadata=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("EncodeJpeg") - image_ = convert(TensorFlow.Tensor{UInt8}, image_) - tf.add_input(desc, image_) - if format !== nothing - desc["format"] = Base.String(format) - end - if quality !== nothing - desc["quality"] = Base.Int(quality) - end - if progressive !== nothing - desc["progressive"] = Base.Bool(progressive) - end - if optimize_size !== nothing - desc["optimize_size"] = Base.Bool(optimize_size) - end - if chroma_downsampling !== nothing - desc["chroma_downsampling"] = Base.Bool(chroma_downsampling) - end - if density_unit !== nothing - desc["density_unit"] = Base.String(density_unit) - end - if x_density !== nothing - desc["x_density"] = Base.Int(x_density) - end - if y_density !== nothing - desc["y_density"] = Base.Int(y_density) - end - if xmp_metadata !== nothing - desc["xmp_metadata"] = Base.String(xmp_metadata) - end - end), name, "EncodeJpeg") - tf.Tensor(tf.Operation(desc)) +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tensor_forest_tree_is_initialized_op(tree_handle_; name=nothing) + local desc + tf.with_op_name(name, "TensorForestTreeIsInitializedOp") do + desc = tf.NodeDescription("TensorForestTreeIsInitializedOp") + tree_handle_ = convert(Tensor{Any}, tree_handle_) + tf.add_input(desc, tree_handle_) + end + tf.Tensor(tf.Operation(desc)) + end + function tensor_forest_tree_is_initialized_op(tree_handle_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("TensorForestTreeIsInitializedOp") + tf.add_input(desc, tree_handle_) + (tf.execute(desc))[1] end +end + """ - encode_png(image; compression=-1) + merge(inputs) """ -tf.@op function encode_png(image_; name=nothing, compression=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("EncodePng") - image_ = convert(TensorFlow.Tensor{UInt8}, image_) - (image_,) = tf.tf_promote(image_) - tf.add_input(desc, image_) - if compression !== nothing - desc["compression"] = Base.Int(compression) - end - end), name, "EncodePng") - tf.Tensor(tf.Operation(desc)) +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function merge(inputs_; name=nothing, N=nothing) + local desc + tf.with_op_name(name, "Merge") do + desc = tf.NodeDescription("Merge") + inputs_ = [convert(Tensor{Any}, x) for x = inputs_] + (inputs_,) = tf.tf_promote(inputs_) + tf.add_input(desc, inputs_) + if N !== nothing + desc["N"] = Base.Int(N) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function merge(inputs_::tf.TensorHandle; name=nothing, N=nothing) + desc = tf.EagerOp("Merge") + tf.add_input(desc, inputs_) + if N !== nothing + desc["N"] = Base.Int(N) + end + desc["T"] = tf.data_type(inputs_) + tf.execute(desc) end +end + """ - resize_area(images, size; align_corners=false) + histogram_fixed_width(values, value_range, nbins; dtype=Int32) """ -tf.@op function resize_area(images_, size_; name=nothing, align_corners=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("ResizeArea") - images_ = convert(TensorFlow.Tensor{Any}, images_) - size_ = convert(TensorFlow.Tensor{Int32}, size_) - (images_,) = tf.tf_promote(images_) - tf.add_input(desc, images_) - tf.add_input(desc, size_) - if align_corners !== nothing - desc["align_corners"] = Base.Bool(align_corners) - end - end), name, "ResizeArea") - tf.Tensor(tf.Operation(desc)) +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function histogram_fixed_width(values_, value_range_, nbins_; name=nothing, dtype=nothing) + local desc + tf.with_op_name(name, "HistogramFixedWidth") do + desc = tf.NodeDescription("HistogramFixedWidth") + values_ = convert(Tensor{Any}, values_) + value_range_ = convert(Tensor{Any}, value_range_) + nbins_ = convert(Tensor{Int32}, nbins_) + (values_, value_range_) = tf.tf_promote(values_, value_range_) + tf.add_input(desc, values_) + tf.add_input(desc, value_range_) + tf.add_input(desc, nbins_) + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + tf.Tensor(tf.Operation(desc)) + end + function histogram_fixed_width(values_::tf.TensorHandle, value_range_::tf.TensorHandle, nbins_::tf.TensorHandle; name=nothing, dtype=nothing) + desc = tf.EagerOp("HistogramFixedWidth") + tf.add_input(desc, values_) + tf.add_input(desc, value_range_) + tf.add_input(desc, nbins_) + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + desc["T"] = tf.data_type(values_) + desc["T"] = tf.data_type(value_range_) + (tf.execute(desc))[1] end +end + """ - resize_bicubic(images, size; align_corners=false) + asin(x) """ -tf.@op function resize_bicubic(images_, size_; name=nothing, align_corners=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("ResizeBicubic") - images_ = convert(TensorFlow.Tensor{Any}, images_) - size_ = convert(TensorFlow.Tensor{Int32}, size_) - (images_,) = tf.tf_promote(images_) - tf.add_input(desc, images_) - tf.add_input(desc, size_) - if align_corners !== nothing - desc["align_corners"] = Base.Bool(align_corners) - end - end), name, "ResizeBicubic") - tf.Tensor(tf.Operation(desc)) +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function asin(x_; name=nothing) + local desc + tf.with_op_name(name, "Asin") do + desc = tf.NodeDescription("Asin") + x_ = convert(Tensor{Any}, x_) + (x_,) = tf.tf_promote(x_) + tf.add_input(desc, x_) + end + tf.Tensor(tf.Operation(desc)) + end + function asin(x_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("Asin") + tf.add_input(desc, x_) + desc["T"] = tf.data_type(x_) + (tf.execute(desc))[1] end +end + """ - resize_bilinear(images, size; align_corners=false) + any(input, reduction_indices; keep_dims=false) """ -tf.@op function resize_bilinear(images_, size_; name=nothing, align_corners=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("ResizeBilinear") - images_ = convert(TensorFlow.Tensor{Any}, images_) - size_ = convert(TensorFlow.Tensor{Int32}, size_) - (images_,) = tf.tf_promote(images_) - tf.add_input(desc, images_) - tf.add_input(desc, size_) - if align_corners !== nothing - desc["align_corners"] = Base.Bool(align_corners) - end - end), name, "ResizeBilinear") - tf.Tensor(tf.Operation(desc)) +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function any(input_, reduction_indices_; name=nothing, keep_dims=nothing) + local desc + tf.with_op_name(name, "Any") do + desc = tf.NodeDescription("Any") + input_ = convert(Tensor{Bool}, input_) + reduction_indices_ = convert(Tensor{Int32}, reduction_indices_) + reduction_indices_ = reduction_indices_ - convert(tf.Tensor{eltype(reduction_indices_)}, 1) + (reduction_indices_,) = tf.tf_promote(reduction_indices_) + tf.add_input(desc, input_) + tf.add_input(desc, reduction_indices_) + if keep_dims !== nothing + desc["keep_dims"] = Base.Bool(keep_dims) + end + end + tf.Tensor(tf.Operation(desc)) + end + function any(input_::tf.TensorHandle, reduction_indices_::tf.TensorHandle; name=nothing, keep_dims=nothing) + desc = tf.EagerOp("Any") + tf.add_input(desc, input_) + tf.add_input(desc, reduction_indices_) + if keep_dims !== nothing + desc["keep_dims"] = Base.Bool(keep_dims) + end + desc["Tidx"] = tf.data_type(reduction_indices_) + (tf.execute(desc))[1] end +end + """ - resize_nearest_neighbor(images, size; align_corners=false) + rsqrt_grad(y, dy) """ -tf.@op function resize_nearest_neighbor(images_, size_; name=nothing, align_corners=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("ResizeNearestNeighbor") - images_ = convert(TensorFlow.Tensor{Any}, images_) - size_ = convert(TensorFlow.Tensor{Int32}, size_) - (images_,) = tf.tf_promote(images_) - tf.add_input(desc, images_) - tf.add_input(desc, size_) - if align_corners !== nothing - desc["align_corners"] = Base.Bool(align_corners) - end - end), name, "ResizeNearestNeighbor") - tf.Tensor(tf.Operation(desc)) - end - -""" - extract_glimpse(input, size, offsets; centered=true, normalized=true, uniform_noise=true) - - -""" -tf.@op function extract_glimpse(input_, size_, offsets_; name=nothing, centered=nothing, normalized=nothing, uniform_noise=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("ExtractGlimpse") - input_ = convert(TensorFlow.Tensor{Float32}, input_) - size_ = convert(TensorFlow.Tensor{Int32}, size_) - offsets_ = convert(TensorFlow.Tensor{Float32}, offsets_) - tf.add_input(desc, input_) - tf.add_input(desc, size_) - tf.add_input(desc, offsets_) - if centered !== nothing - desc["centered"] = Base.Bool(centered) - end - if normalized !== nothing - desc["normalized"] = Base.Bool(normalized) - end - if uniform_noise !== nothing - desc["uniform_noise"] = Base.Bool(uniform_noise) - end - end), name, "ExtractGlimpse") - tf.Tensor(tf.Operation(desc)) - end - -""" - crop_and_resize(image, boxes, box_ind, crop_size; method=bilinear, extrapolation_value=nothing) - - -""" -tf.@op function crop_and_resize(image_, boxes_, box_ind_, crop_size_; name=nothing, method=nothing, extrapolation_value=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("CropAndResize") - image_ = convert(TensorFlow.Tensor{Any}, image_) - boxes_ = convert(TensorFlow.Tensor{Float32}, boxes_) - box_ind_ = convert(TensorFlow.Tensor{Int32}, box_ind_) - crop_size_ = convert(TensorFlow.Tensor{Int32}, crop_size_) - (image_,) = tf.tf_promote(image_) - tf.add_input(desc, image_) - tf.add_input(desc, boxes_) - tf.add_input(desc, box_ind_) - tf.add_input(desc, crop_size_) - if method !== nothing - desc["method"] = Base.String(method) - end - if extrapolation_value !== nothing - desc["extrapolation_value"] = Base.identity(extrapolation_value) - end - end), name, "CropAndResize") - tf.Tensor(tf.Operation(desc)) +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function rsqrt_grad(y_, dy_; name=nothing) + local desc + tf.with_op_name(name, "RsqrtGrad") do + desc = tf.NodeDescription("RsqrtGrad") + y_ = convert(Tensor{Any}, y_) + dy_ = convert(Tensor{Any}, dy_) + (y_, dy_) = tf.tf_promote(y_, dy_) + tf.add_input(desc, y_) + tf.add_input(desc, dy_) + end + tf.Tensor(tf.Operation(desc)) + end + function rsqrt_grad(y_::tf.TensorHandle, dy_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("RsqrtGrad") + tf.add_input(desc, y_) + tf.add_input(desc, dy_) + desc["T"] = tf.data_type(y_) + desc["T"] = tf.data_type(dy_) + (tf.execute(desc))[1] end +end + """ - adjust_hue(images, delta) + tensor_array_scatter(handle, indices, value, flow_in) """ -tf.@op function adjust_hue(images_, delta_; name=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("AdjustHue") - images_ = convert(TensorFlow.Tensor{Float32}, images_) - delta_ = convert(TensorFlow.Tensor{Float32}, delta_) - tf.add_input(desc, images_) - tf.add_input(desc, delta_) - end), name, "AdjustHue") - tf.Tensor(tf.Operation(desc)) +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tensor_array_scatter(handle_, indices_, value_, flow_in_; name=nothing) + local desc + tf.with_op_name(name, "TensorArrayScatter") do + desc = tf.NodeDescription("TensorArrayScatter") + handle_ = convert(Tensor{String}, handle_) + indices_ = convert(Tensor{Int32}, indices_) + value_ = convert(Tensor{Any}, value_) + flow_in_ = convert(Tensor{Float32}, flow_in_) + (value_,) = tf.tf_promote(value_) + tf.add_input(desc, handle_) + tf.add_input(desc, indices_) + tf.add_input(desc, value_) + tf.add_input(desc, flow_in_) + end + tf.Tensor(tf.Operation(desc)) + end + function tensor_array_scatter(handle_::tf.TensorHandle, indices_::tf.TensorHandle, value_::tf.TensorHandle, flow_in_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("TensorArrayScatter") + tf.add_input(desc, handle_) + tf.add_input(desc, indices_) + tf.add_input(desc, value_) + tf.add_input(desc, flow_in_) + desc["T"] = tf.data_type(value_) + (tf.execute(desc))[1] end +end + """ - adjust_saturation(images, scale) + dynamic_partition(data, partitions) """ -tf.@op function adjust_saturation(images_, scale_; name=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("AdjustSaturation") - images_ = convert(TensorFlow.Tensor{Float32}, images_) - scale_ = convert(TensorFlow.Tensor{Float32}, scale_) - tf.add_input(desc, images_) - tf.add_input(desc, scale_) - end), name, "AdjustSaturation") - tf.Tensor(tf.Operation(desc)) +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function dynamic_partition(data_, partitions_; name=nothing, num_partitions=nothing) + local desc + tf.with_op_name(name, "DynamicPartition") do + desc = tf.NodeDescription("DynamicPartition") + data_ = convert(Tensor{Any}, data_) + partitions_ = convert(Tensor{Int32}, partitions_) + (data_,) = tf.tf_promote(data_) + tf.add_input(desc, data_) + tf.add_input(desc, partitions_) + if num_partitions !== nothing + desc["num_partitions"] = Base.Int(num_partitions) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:num_partitions + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function dynamic_partition(data_::tf.TensorHandle, partitions_::tf.TensorHandle; name=nothing, num_partitions=nothing) + desc = tf.EagerOp("DynamicPartition") + tf.add_input(desc, data_) + tf.add_input(desc, partitions_) + if num_partitions !== nothing + desc["num_partitions"] = Base.Int(num_partitions) + end + desc["T"] = tf.data_type(data_) + tf.execute(desc) end +end + """ - draw_bounding_boxes(images, boxes) + experimental_private_thread_pool_dataset(input_dataset, num_threads) """ -tf.@op function draw_bounding_boxes(images_, boxes_; name=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("DrawBoundingBoxes") - images_ = convert(TensorFlow.Tensor{Float32}, images_) - boxes_ = convert(TensorFlow.Tensor{Float32}, boxes_) - (images_,) = tf.tf_promote(images_) - tf.add_input(desc, images_) - tf.add_input(desc, boxes_) - end), name, "DrawBoundingBoxes") - tf.Tensor(tf.Operation(desc)) - end - -""" - non_max_suppression(boxes, scores, max_output_size; iou_threshold=nothing) - - -""" -tf.@op function non_max_suppression(boxes_, scores_, max_output_size_; name=nothing, iou_threshold=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("NonMaxSuppression") - boxes_ = convert(TensorFlow.Tensor{Float32}, boxes_) - scores_ = convert(TensorFlow.Tensor{Float32}, scores_) - max_output_size_ = convert(TensorFlow.Tensor{Int32}, max_output_size_) - tf.add_input(desc, boxes_) - tf.add_input(desc, scores_) - tf.add_input(desc, max_output_size_) - if iou_threshold !== nothing - desc["iou_threshold"] = Base.identity(iou_threshold) - end - end), name, "NonMaxSuppression") - tf.Tensor(tf.Operation(desc)) - end - -""" - sample_distorted_bounding_box(image_size, bounding_boxes; seed=0, seed2=0, min_object_covered=nothing, aspect_ratio_range=Int64[], area_range=Int64[], max_attempts=100, use_image_if_no_bounding_boxes=false) - - -""" -tf.@op function sample_distorted_bounding_box(image_size_, bounding_boxes_; name=nothing, seed=nothing, seed2=nothing, min_object_covered=nothing, aspect_ratio_range=nothing, area_range=nothing, max_attempts=nothing, use_image_if_no_bounding_boxes=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("SampleDistortedBoundingBox") - image_size_ = convert(TensorFlow.Tensor{Any}, image_size_) - bounding_boxes_ = convert(TensorFlow.Tensor{Float32}, bounding_boxes_) - (image_size_,) = tf.tf_promote(image_size_) - tf.add_input(desc, image_size_) - tf.add_input(desc, bounding_boxes_) - if seed !== nothing - desc["seed"] = Base.Int(seed) - end - if seed2 !== nothing - desc["seed2"] = Base.Int(seed2) - end - if min_object_covered !== nothing - desc["min_object_covered"] = Base.identity(min_object_covered) - end - if aspect_ratio_range !== nothing - desc["aspect_ratio_range"] = map(Base.identity, aspect_ratio_range) - end - if area_range !== nothing - desc["area_range"] = map(Base.identity, area_range) - end - if max_attempts !== nothing - desc["max_attempts"] = Base.Int(max_attempts) - end - if use_image_if_no_bounding_boxes !== nothing - desc["use_image_if_no_bounding_boxes"] = Base.Bool(use_image_if_no_bounding_boxes) - end - end), name, "SampleDistortedBoundingBox") - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:3 - push!(out, tf.Tensor(op, out_idx)) - end - out +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function experimental_private_thread_pool_dataset(input_dataset_, num_threads_; name=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "ExperimentalPrivateThreadPoolDataset") do + desc = tf.NodeDescription("ExperimentalPrivateThreadPoolDataset") + input_dataset_ = convert(Tensor{Any}, input_dataset_) + num_threads_ = convert(Tensor{Int64}, num_threads_) + tf.add_input(desc, input_dataset_) + tf.add_input(desc, num_threads_) + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + tf.Tensor(tf.Operation(desc)) + end + function experimental_private_thread_pool_dataset(input_dataset_::tf.TensorHandle, num_threads_::tf.TensorHandle; name=nothing, output_types=nothing, output_shapes=nothing) + desc = tf.EagerOp("ExperimentalPrivateThreadPoolDataset") + tf.add_input(desc, input_dataset_) + tf.add_input(desc, num_threads_) + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + (tf.execute(desc))[1] end +end + """ - logical_and(x, y) + reader_serialize_state(reader_handle) """ -tf.@op function logical_and(x_, y_; name=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("LogicalAnd") - x_ = convert(TensorFlow.Tensor{Bool}, x_) - y_ = convert(TensorFlow.Tensor{Bool}, y_) - tf.add_input(desc, x_) - tf.add_input(desc, y_) - end), name, "LogicalAnd") - tf.Tensor(tf.Operation(desc)) +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function reader_serialize_state(reader_handle_; name=nothing) + local desc + tf.with_op_name(name, "ReaderSerializeState") do + desc = tf.NodeDescription("ReaderSerializeState") + reader_handle_ = convert(Tensor{String}, reader_handle_) + tf.add_input(desc, reader_handle_) + end + tf.Tensor(tf.Operation(desc)) + end + function reader_serialize_state(reader_handle_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("ReaderSerializeState") + tf.add_input(desc, reader_handle_) + (tf.execute(desc))[1] end +end + """ - logical_not(x) + right_shift(x, y) """ -tf.@op function logical_not(x_; name=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("LogicalNot") - x_ = convert(TensorFlow.Tensor{Bool}, x_) - tf.add_input(desc, x_) - end), name, "LogicalNot") - tf.Tensor(tf.Operation(desc)) +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function right_shift(x_, y_; name=nothing) + local desc + tf.with_op_name(name, "RightShift") do + desc = tf.NodeDescription("RightShift") + x_ = convert(Tensor{Any}, x_) + y_ = convert(Tensor{Any}, y_) + (x_, y_) = tf.tf_promote(x_, y_) + tf.add_input(desc, x_) + tf.add_input(desc, y_) + end + tf.Tensor(tf.Operation(desc)) + end + function right_shift(x_::tf.TensorHandle, y_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("RightShift") + tf.add_input(desc, x_) + tf.add_input(desc, y_) + desc["T"] = tf.data_type(x_) + desc["T"] = tf.data_type(y_) + (tf.execute(desc))[1] end +end + """ - logical_or(x, y) + avg_pool3d(input; data_format=NDHWC) """ -tf.@op function logical_or(x_, y_; name=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("LogicalOr") - x_ = convert(TensorFlow.Tensor{Bool}, x_) - y_ = convert(TensorFlow.Tensor{Bool}, y_) - tf.add_input(desc, x_) - tf.add_input(desc, y_) - end), name, "LogicalOr") - tf.Tensor(tf.Operation(desc)) +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function avg_pool3d(input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + local desc + tf.with_op_name(name, "AvgPool3D") do + desc = tf.NodeDescription("AvgPool3D") + input_ = convert(Tensor{Any}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + if ksize !== nothing + desc["ksize"] = map(Base.identity, ksize) + end + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + if padding !== nothing + desc["padding"] = Base.String(padding) + end + if data_format !== nothing + desc["data_format"] = Base.String(data_format) + end + end + tf.Tensor(tf.Operation(desc)) + end + function avg_pool3d(input_::tf.TensorHandle; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + desc = tf.EagerOp("AvgPool3D") + tf.add_input(desc, input_) + if ksize !== nothing + desc["ksize"] = map(Base.identity, ksize) + end + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + if padding !== nothing + desc["padding"] = Base.String(padding) + end + if data_format !== nothing + desc["data_format"] = Base.String(data_format) + end + desc["T"] = tf.data_type(input_) + (tf.execute(desc))[1] end +end + """ - add_n(inputs) + encode_png(image; compression=-1) """ -tf.@op function add_n(inputs_; name=nothing, N=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("AddN") - inputs_ = [convert(TensorFlow.Tensor{Any}, x) for x = inputs_] - (inputs_,) = tf.tf_promote(inputs_) - tf.add_input(desc, inputs_) - if N !== nothing - desc["N"] = Base.Int(N) - end - end), name, "AddN") - tf.Tensor(tf.Operation(desc)) +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function encode_png(image_; name=nothing, compression=nothing) + local desc + tf.with_op_name(name, "EncodePng") do + desc = tf.NodeDescription("EncodePng") + image_ = convert(Tensor{UInt8}, image_) + (image_,) = tf.tf_promote(image_) + tf.add_input(desc, image_) + if compression !== nothing + desc["compression"] = Base.Int(compression) + end + end + tf.Tensor(tf.Operation(desc)) + end + function encode_png(image_::tf.TensorHandle; name=nothing, compression=nothing) + desc = tf.EagerOp("EncodePng") + tf.add_input(desc, image_) + if compression !== nothing + desc["compression"] = Base.Int(compression) + end + desc["T"] = tf.data_type(image_) + (tf.execute(desc))[1] end - -""" - arg_min(input, dimension; output_type=Int64) +end """ -tf.@op function arg_min(input_, dimension_; name=nothing, output_type=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("ArgMin") - input_ = convert(TensorFlow.Tensor{Any}, input_) - dimension_ = convert(TensorFlow.Tensor{Int32}, dimension_) - dimension_ = dimension_ - convert(tf.Tensor{eltype(dimension_)}, 1) - (input_,) = tf.tf_promote(input_) - (dimension_,) = tf.tf_promote(dimension_) - tf.add_input(desc, input_) - tf.add_input(desc, dimension_) - if output_type !== nothing - desc["output_type"] = Base.identity(output_type) - end - end), name, "ArgMin") - tf.Tensor(tf.Operation(desc)) + debug_identity(input; device_name=, tensor_name=, debug_urls=Int64[], gated_grpc=false) + +Debug Identity Op. +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function debug_identity(input_; name=nothing, device_name=nothing, tensor_name=nothing, debug_urls=nothing, gated_grpc=nothing) + local desc + tf.with_op_name(name, "DebugIdentity") do + desc = tf.NodeDescription("DebugIdentity") + input_ = convert(Tensor{Any}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + if device_name !== nothing + desc["device_name"] = Base.String(device_name) + end + if tensor_name !== nothing + desc["tensor_name"] = Base.String(tensor_name) + end + if debug_urls !== nothing + desc["debug_urls"] = map(Base.identity, debug_urls) + end + if gated_grpc !== nothing + desc["gated_grpc"] = Base.Bool(gated_grpc) + end + end + tf.Tensor(tf.Operation(desc)) + end + function debug_identity(input_::tf.TensorHandle; name=nothing, device_name=nothing, tensor_name=nothing, debug_urls=nothing, gated_grpc=nothing) + desc = tf.EagerOp("DebugIdentity") + tf.add_input(desc, input_) + if device_name !== nothing + desc["device_name"] = Base.String(device_name) + end + if tensor_name !== nothing + desc["tensor_name"] = Base.String(tensor_name) + end + if debug_urls !== nothing + desc["debug_urls"] = map(Base.identity, debug_urls) + end + if gated_grpc !== nothing + desc["gated_grpc"] = Base.Bool(gated_grpc) + end + desc["T"] = tf.data_type(input_) + (tf.execute(desc))[1] end +end + """ - arg_max(input, dimension; output_type=Int64) + imag(input) """ -tf.@op function arg_max(input_, dimension_; name=nothing, output_type=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("ArgMax") - input_ = convert(TensorFlow.Tensor{Any}, input_) - dimension_ = convert(TensorFlow.Tensor{Int32}, dimension_) - dimension_ = dimension_ - convert(tf.Tensor{eltype(dimension_)}, 1) - (input_,) = tf.tf_promote(input_) - (dimension_,) = tf.tf_promote(dimension_) - tf.add_input(desc, input_) - tf.add_input(desc, dimension_) - if output_type !== nothing - desc["output_type"] = Base.identity(output_type) - end - end), name, "ArgMax") - tf.Tensor(tf.Operation(desc)) +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function imag(input_; name=nothing) + local desc + tf.with_op_name(name, "Imag") do + desc = tf.NodeDescription("Imag") + input_ = convert(Tensor{Complex{Float32}}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + end + tf.Tensor(tf.Operation(desc)) + end + function imag(input_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("Imag") + tf.add_input(desc, input_) + desc["T"] = tf.data_type(input_) + (tf.execute(desc))[1] end - -""" - add(x, y) +end """ -tf.@op function add(x_, y_; name=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("Add") - x_ = convert(TensorFlow.Tensor{Any}, x_) - y_ = convert(TensorFlow.Tensor{Any}, y_) - (x_, y_) = tf.tf_promote(x_, y_) - tf.add_input(desc, x_) - tf.add_input(desc, y_) - end), name, "Add") - tf.Tensor(tf.Operation(desc)) + resource_sparse_apply_ftrl_v2(var, accum, linear, grad, indices, lr, l1, l2, l2_shrinkage, lr_power; use_locking=false) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function resource_sparse_apply_ftrl_v2(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "ResourceSparseApplyFtrlV2") do + desc = tf.NodeDescription("ResourceSparseApplyFtrlV2") + var_ = convert(Tensor{Any}, var_) + accum_ = convert(Tensor{Any}, accum_) + linear_ = convert(Tensor{Any}, linear_) + grad_ = convert(Tensor{Any}, grad_) + indices_ = convert(Tensor{Any}, indices_) + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + lr_ = convert(Tensor{Any}, lr_) + l1_ = convert(Tensor{Any}, l1_) + l2_ = convert(Tensor{Any}, l2_) + l2_shrinkage_ = convert(Tensor{Any}, l2_shrinkage_) + lr_power_ = convert(Tensor{Any}, lr_power_) + (grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_) = tf.tf_promote(grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_) + (indices_,) = tf.tf_promote(indices_) + tf.add_input(desc, var_) + tf.add_input(desc, accum_) + tf.add_input(desc, linear_) + tf.add_input(desc, grad_) + tf.add_input(desc, indices_) + tf.add_input(desc, lr_) + tf.add_input(desc, l1_) + tf.add_input(desc, l2_) + tf.add_input(desc, l2_shrinkage_) + tf.add_input(desc, lr_power_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + tf.Tensor(tf.Operation(desc)) + end + function resource_sparse_apply_ftrl_v2(var_::tf.TensorHandle, accum_::tf.TensorHandle, linear_::tf.TensorHandle, grad_::tf.TensorHandle, indices_::tf.TensorHandle, lr_::tf.TensorHandle, l1_::tf.TensorHandle, l2_::tf.TensorHandle, l2_shrinkage_::tf.TensorHandle, lr_power_::tf.TensorHandle; name=nothing, use_locking=nothing) + desc = tf.EagerOp("ResourceSparseApplyFtrlV2") + tf.add_input(desc, var_) + tf.add_input(desc, accum_) + tf.add_input(desc, linear_) + tf.add_input(desc, grad_) + tf.add_input(desc, indices_) + tf.add_input(desc, lr_) + tf.add_input(desc, l1_) + tf.add_input(desc, l2_) + tf.add_input(desc, l2_shrinkage_) + tf.add_input(desc, lr_power_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + desc["T"] = tf.data_type(grad_) + desc["Tindices"] = tf.data_type(indices_) + desc["T"] = tf.data_type(lr_) + desc["T"] = tf.data_type(l1_) + desc["T"] = tf.data_type(l2_) + desc["T"] = tf.data_type(l2_shrinkage_) + desc["T"] = tf.data_type(lr_power_) + (tf.execute(desc))[1] end - -""" - sub(x, y) +end """ -tf.@op function sub(x_, y_; name=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("Sub") - x_ = convert(TensorFlow.Tensor{Any}, x_) - y_ = convert(TensorFlow.Tensor{Any}, y_) - (x_, y_) = tf.tf_promote(x_, y_) - tf.add_input(desc, x_) - tf.add_input(desc, y_) - end), name, "Sub") - tf.Tensor(tf.Operation(desc)) + stage_clear(; capacity=0, memory_limit=0, container=, shared_name=) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function stage_clear(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "StageClear") do + desc = tf.NodeDescription("StageClear") + if capacity !== nothing + desc["capacity"] = Base.Int(capacity) + end + if memory_limit !== nothing + desc["memory_limit"] = Base.Int(memory_limit) + end + if dtypes !== nothing + desc["dtypes"] = map(Base.identity, dtypes) + end + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + tf.Tensor(tf.Operation(desc)) + end + function stage_clear(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + desc = tf.EagerOp("StageClear") + if capacity !== nothing + desc["capacity"] = Base.Int(capacity) + end + if memory_limit !== nothing + desc["memory_limit"] = Base.Int(memory_limit) + end + if dtypes !== nothing + desc["dtypes"] = map(Base.identity, dtypes) + end + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + (tf.execute(desc))[1] end +end + """ - mat_mul(a, b; transpose_a=false, transpose_b=false) + sign(x) """ -tf.@op function mat_mul(a_, b_; name=nothing, transpose_a=nothing, transpose_b=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("MatMul") - a_ = convert(TensorFlow.Tensor{Any}, a_) - b_ = convert(TensorFlow.Tensor{Any}, b_) - (a_, b_) = tf.tf_promote(a_, b_) - tf.add_input(desc, a_) - tf.add_input(desc, b_) - if transpose_a !== nothing - desc["transpose_a"] = Base.Bool(transpose_a) - end - if transpose_b !== nothing - desc["transpose_b"] = Base.Bool(transpose_b) - end - end), name, "MatMul") - tf.Tensor(tf.Operation(desc)) +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function sign(x_; name=nothing) + local desc + tf.with_op_name(name, "Sign") do + desc = tf.NodeDescription("Sign") + x_ = convert(Tensor{Any}, x_) + (x_,) = tf.tf_promote(x_) + tf.add_input(desc, x_) + end + tf.Tensor(tf.Operation(desc)) + end + function sign(x_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("Sign") + tf.add_input(desc, x_) + desc["T"] = tf.data_type(x_) + (tf.execute(desc))[1] end +end + """ - mul(x, y) + population_count(x) """ -tf.@op function mul(x_, y_; name=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("Mul") - x_ = convert(TensorFlow.Tensor{Any}, x_) - y_ = convert(TensorFlow.Tensor{Any}, y_) - (x_, y_) = tf.tf_promote(x_, y_) - tf.add_input(desc, x_) - tf.add_input(desc, y_) - end), name, "Mul") - tf.Tensor(tf.Operation(desc)) +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function population_count(x_; name=nothing) + local desc + tf.with_op_name(name, "PopulationCount") do + desc = tf.NodeDescription("PopulationCount") + x_ = convert(Tensor{Any}, x_) + (x_,) = tf.tf_promote(x_) + tf.add_input(desc, x_) + end + tf.Tensor(tf.Operation(desc)) + end + function population_count(x_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("PopulationCount") + tf.add_input(desc, x_) + desc["T"] = tf.data_type(x_) + (tf.execute(desc))[1] end +end + """ - pow(x, y) + neg(x) """ -tf.@op function pow(x_, y_; name=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("Pow") - x_ = convert(TensorFlow.Tensor{Any}, x_) - y_ = convert(TensorFlow.Tensor{Any}, y_) - (x_, y_) = tf.tf_promote(x_, y_) - tf.add_input(desc, x_) - tf.add_input(desc, y_) - end), name, "Pow") - tf.Tensor(tf.Operation(desc)) +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function neg(x_; name=nothing) + local desc + tf.with_op_name(name, "Neg") do + desc = tf.NodeDescription("Neg") + x_ = convert(Tensor{Any}, x_) + (x_,) = tf.tf_promote(x_) + tf.add_input(desc, x_) + end + tf.Tensor(tf.Operation(desc)) + end + function neg(x_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("Neg") + tf.add_input(desc, x_) + desc["T"] = tf.data_type(x_) + (tf.execute(desc))[1] end +end + """ - matrix_solve(matrix, rhs; adjoint=false) + anonymous_iterator() """ -tf.@op function matrix_solve(matrix_, rhs_; name=nothing, adjoint=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("MatrixSolve") - matrix_ = convert(TensorFlow.Tensor{Any}, matrix_) - rhs_ = convert(TensorFlow.Tensor{Any}, rhs_) - (matrix_, rhs_) = tf.tf_promote(matrix_, rhs_) - tf.add_input(desc, matrix_) - tf.add_input(desc, rhs_) - if adjoint !== nothing - desc["adjoint"] = Base.Bool(adjoint) - end - end), name, "MatrixSolve") - tf.Tensor(tf.Operation(desc)) +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function anonymous_iterator(; name=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "AnonymousIterator") do + desc = tf.NodeDescription("AnonymousIterator") + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + tf.Tensor(tf.Operation(desc)) + end + function anonymous_iterator(; name=nothing, output_types=nothing, output_shapes=nothing) + desc = tf.EagerOp("AnonymousIterator") + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + (tf.execute(desc))[1] end +end + """ - matrix_triangular_solve(matrix, rhs; lower=true, adjoint=false) + sparse_reduce_sum(input_indices, input_values, input_shape, reduction_axes; keep_dims=false) """ -tf.@op function matrix_triangular_solve(matrix_, rhs_; name=nothing, lower=nothing, adjoint=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("MatrixTriangularSolve") - matrix_ = convert(TensorFlow.Tensor{Any}, matrix_) - rhs_ = convert(TensorFlow.Tensor{Any}, rhs_) - (matrix_, rhs_) = tf.tf_promote(matrix_, rhs_) - tf.add_input(desc, matrix_) - tf.add_input(desc, rhs_) - if lower !== nothing - desc["lower"] = Base.Bool(lower) - end - if adjoint !== nothing - desc["adjoint"] = Base.Bool(adjoint) - end - end), name, "MatrixTriangularSolve") - tf.Tensor(tf.Operation(desc)) +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function sparse_reduce_sum(input_indices_, input_values_, input_shape_, reduction_axes_; name=nothing, keep_dims=nothing) + local desc + tf.with_op_name(name, "SparseReduceSum") do + desc = tf.NodeDescription("SparseReduceSum") + input_indices_ = convert(Tensor{Int64}, input_indices_) + input_values_ = convert(Tensor{Any}, input_values_) + input_shape_ = convert(Tensor{Int64}, input_shape_) + reduction_axes_ = convert(Tensor{Int32}, reduction_axes_) + (input_values_,) = tf.tf_promote(input_values_) + tf.add_input(desc, input_indices_) + tf.add_input(desc, input_values_) + tf.add_input(desc, input_shape_) + tf.add_input(desc, reduction_axes_) + if keep_dims !== nothing + desc["keep_dims"] = Base.Bool(keep_dims) + end + end + tf.Tensor(tf.Operation(desc)) + end + function sparse_reduce_sum(input_indices_::tf.TensorHandle, input_values_::tf.TensorHandle, input_shape_::tf.TensorHandle, reduction_axes_::tf.TensorHandle; name=nothing, keep_dims=nothing) + desc = tf.EagerOp("SparseReduceSum") + tf.add_input(desc, input_indices_) + tf.add_input(desc, input_values_) + tf.add_input(desc, input_shape_) + tf.add_input(desc, reduction_axes_) + if keep_dims !== nothing + desc["keep_dims"] = Base.Bool(keep_dims) + end + desc["T"] = tf.data_type(input_values_) + (tf.execute(desc))[1] end - -""" - matrix_solve_ls(matrix, rhs, l2_regularizer; fast=true) +end """ -tf.@op function matrix_solve_ls(matrix_, rhs_, l2_regularizer_; name=nothing, fast=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("MatrixSolveLs") - matrix_ = convert(TensorFlow.Tensor{Any}, matrix_) - rhs_ = convert(TensorFlow.Tensor{Any}, rhs_) - l2_regularizer_ = convert(TensorFlow.Tensor{Float64}, l2_regularizer_) - (matrix_, rhs_) = tf.tf_promote(matrix_, rhs_) - tf.add_input(desc, matrix_) - tf.add_input(desc, rhs_) - tf.add_input(desc, l2_regularizer_) - if fast !== nothing - desc["fast"] = Base.Bool(fast) - end - end), name, "MatrixSolveLs") - tf.Tensor(tf.Operation(desc)) + filter_dataset(input_dataset, other_arguments) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function filter_dataset(input_dataset_, other_arguments_; name=nothing, predicate=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "FilterDataset") do + desc = tf.NodeDescription("FilterDataset") + input_dataset_ = convert(Tensor{Any}, input_dataset_) + other_arguments_ = [convert(Tensor{Any}, x) for x = other_arguments_] + tf.add_input(desc, input_dataset_) + tf.add_input(desc, other_arguments_) + if predicate !== nothing + desc["predicate"] = Base.identity(predicate) + end + if Targuments !== nothing + desc["Targuments"] = map(Base.identity, Targuments) + end + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + tf.Tensor(tf.Operation(desc)) + end + function filter_dataset(input_dataset_::tf.TensorHandle, other_arguments_::tf.TensorHandle; name=nothing, predicate=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) + desc = tf.EagerOp("FilterDataset") + tf.add_input(desc, input_dataset_) + tf.add_input(desc, other_arguments_) + if predicate !== nothing + desc["predicate"] = Base.identity(predicate) + end + if Targuments !== nothing + desc["Targuments"] = map(Base.identity, Targuments) + end + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + (tf.execute(desc))[1] end +end + """ - cholesky(input) + string_length(input; unit=BYTE) """ -tf.@op function cholesky(input_; name=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("Cholesky") - input_ = convert(TensorFlow.Tensor{Any}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - end), name, "Cholesky") - tf.Tensor(tf.Operation(desc)) +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function string_length(input_; name=nothing, unit=nothing) + local desc + tf.with_op_name(name, "StringLength") do + desc = tf.NodeDescription("StringLength") + input_ = convert(Tensor{String}, input_) + tf.add_input(desc, input_) + if unit !== nothing + desc["unit"] = Base.String(unit) + end + end + tf.Tensor(tf.Operation(desc)) + end + function string_length(input_::tf.TensorHandle; name=nothing, unit=nothing) + desc = tf.EagerOp("StringLength") + tf.add_input(desc, input_) + if unit !== nothing + desc["unit"] = Base.String(unit) + end + (tf.execute(desc))[1] end +end + """ - neg(x) + conv3d(input, filter; data_format=NDHWC, dilations=[1, 1, 1, 1, 1]) """ -tf.@op function neg(x_; name=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("Neg") - x_ = convert(TensorFlow.Tensor{Any}, x_) - (x_,) = tf.tf_promote(x_) - tf.add_input(desc, x_) - end), name, "Neg") - tf.Tensor(tf.Operation(desc)) +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function conv3d(input_, filter_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) + local desc + tf.with_op_name(name, "Conv3D") do + desc = tf.NodeDescription("Conv3D") + input_ = convert(Tensor{Any}, input_) + filter_ = convert(Tensor{Any}, filter_) + (input_, filter_) = tf.tf_promote(input_, filter_) + tf.add_input(desc, input_) + tf.add_input(desc, filter_) + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + if padding !== nothing + desc["padding"] = Base.String(padding) + end + if data_format !== nothing + desc["data_format"] = Base.String(data_format) + end + if dilations !== nothing + desc["dilations"] = map(Base.identity, dilations) + end + end + tf.Tensor(tf.Operation(desc)) + end + function conv3d(input_::tf.TensorHandle, filter_::tf.TensorHandle; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) + desc = tf.EagerOp("Conv3D") + tf.add_input(desc, input_) + tf.add_input(desc, filter_) + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + if padding !== nothing + desc["padding"] = Base.String(padding) + end + if data_format !== nothing + desc["data_format"] = Base.String(data_format) + end + if dilations !== nothing + desc["dilations"] = map(Base.identity, dilations) + end + desc["T"] = tf.data_type(input_) + desc["T"] = tf.data_type(filter_) + (tf.execute(desc))[1] end - -""" - square(x) +end """ -tf.@op function square(x_; name=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("Square") - x_ = convert(TensorFlow.Tensor{Any}, x_) - (x_,) = tf.tf_promote(x_) - tf.add_input(desc, x_) - end), name, "Square") - tf.Tensor(tf.Operation(desc)) + retrieve_tpu_embedding_adagrad_parameters(; table_id=-1, table_name=) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function retrieve_tpu_embedding_adagrad_parameters(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + local desc + tf.with_op_name(name, "RetrieveTPUEmbeddingAdagradParameters") do + desc = tf.NodeDescription("RetrieveTPUEmbeddingAdagradParameters") + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function retrieve_tpu_embedding_adagrad_parameters(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + desc = tf.EagerOp("RetrieveTPUEmbeddingAdagradParameters") + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + tf.execute(desc) end +end + """ - shape(input; out_type=Int32) + optional_has_value(optional) """ -tf.@op function shape(input_; name=nothing, out_type=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("Shape") - input_ = convert(TensorFlow.Tensor{Any}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - if out_type !== nothing - desc["out_type"] = Base.identity(out_type) - end - end), name, "Shape") - tf.Tensor(tf.Operation(desc)) +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function optional_has_value(optional_; name=nothing) + local desc + tf.with_op_name(name, "OptionalHasValue") do + desc = tf.NodeDescription("OptionalHasValue") + optional_ = convert(Tensor{Any}, optional_) + tf.add_input(desc, optional_) + end + tf.Tensor(tf.Operation(desc)) + end + function optional_has_value(optional_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("OptionalHasValue") + tf.add_input(desc, optional_) + (tf.execute(desc))[1] end - -""" - unsorted_segment_sum(data, segment_ids, num_segments) +end """ -tf.@op function unsorted_segment_sum(data_, segment_ids_, num_segments_; name=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("UnsortedSegmentSum") - data_ = convert(TensorFlow.Tensor{Any}, data_) - segment_ids_ = convert(TensorFlow.Tensor{Any}, segment_ids_) - segment_ids_ = segment_ids_ - convert(tf.Tensor{eltype(segment_ids_)}, 1) - num_segments_ = convert(TensorFlow.Tensor{Int32}, num_segments_) - (num_segments_,) = tf.tf_promote(num_segments_) - (data_,) = tf.tf_promote(data_) - (segment_ids_,) = tf.tf_promote(segment_ids_) - tf.add_input(desc, data_) - tf.add_input(desc, segment_ids_) - tf.add_input(desc, num_segments_) - end), name, "UnsortedSegmentSum") - tf.Tensor(tf.Operation(desc)) + apply_adam(var, m, v, beta1_power, beta2_power, lr, beta1, beta2, epsilon, grad; use_locking=false, use_nesterov=false) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function apply_adam(var_, m_, v_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing, use_nesterov=nothing) + local desc + tf.with_op_name(name, "ApplyAdam") do + desc = tf.NodeDescription("ApplyAdam") + var_ = convert(Tensor{Any}, var_) + m_ = convert(Tensor{Any}, m_) + v_ = convert(Tensor{Any}, v_) + beta1_power_ = convert(Tensor{Any}, beta1_power_) + beta2_power_ = convert(Tensor{Any}, beta2_power_) + lr_ = convert(Tensor{Any}, lr_) + beta1_ = convert(Tensor{Any}, beta1_) + beta2_ = convert(Tensor{Any}, beta2_) + epsilon_ = convert(Tensor{Any}, epsilon_) + grad_ = convert(Tensor{Any}, grad_) + (var_, m_, v_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_) = tf.tf_promote(var_, m_, v_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_) + tf.add_input(desc, var_) + tf.add_input(desc, m_) + tf.add_input(desc, v_) + tf.add_input(desc, beta1_power_) + tf.add_input(desc, beta2_power_) + tf.add_input(desc, lr_) + tf.add_input(desc, beta1_) + tf.add_input(desc, beta2_) + tf.add_input(desc, epsilon_) + tf.add_input(desc, grad_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + if use_nesterov !== nothing + desc["use_nesterov"] = Base.Bool(use_nesterov) + end + end + tf.Tensor(tf.Operation(desc)) + end + function apply_adam(var_::tf.TensorHandle, m_::tf.TensorHandle, v_::tf.TensorHandle, beta1_power_::tf.TensorHandle, beta2_power_::tf.TensorHandle, lr_::tf.TensorHandle, beta1_::tf.TensorHandle, beta2_::tf.TensorHandle, epsilon_::tf.TensorHandle, grad_::tf.TensorHandle; name=nothing, use_locking=nothing, use_nesterov=nothing) + desc = tf.EagerOp("ApplyAdam") + tf.add_input(desc, var_) + tf.add_input(desc, m_) + tf.add_input(desc, v_) + tf.add_input(desc, beta1_power_) + tf.add_input(desc, beta2_power_) + tf.add_input(desc, lr_) + tf.add_input(desc, beta1_) + tf.add_input(desc, beta2_) + tf.add_input(desc, epsilon_) + tf.add_input(desc, grad_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + if use_nesterov !== nothing + desc["use_nesterov"] = Base.Bool(use_nesterov) + end + desc["T"] = tf.data_type(var_) + desc["T"] = tf.data_type(m_) + desc["T"] = tf.data_type(v_) + desc["T"] = tf.data_type(beta1_power_) + desc["T"] = tf.data_type(beta2_power_) + desc["T"] = tf.data_type(lr_) + desc["T"] = tf.data_type(beta1_) + desc["T"] = tf.data_type(beta2_) + desc["T"] = tf.data_type(epsilon_) + desc["T"] = tf.data_type(grad_) + (tf.execute(desc))[1] end - -""" - unsorted_segment_max(data, segment_ids, num_segments) +end """ -tf.@op function unsorted_segment_max(data_, segment_ids_, num_segments_; name=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("UnsortedSegmentMax") - data_ = convert(TensorFlow.Tensor{Any}, data_) - segment_ids_ = convert(TensorFlow.Tensor{Any}, segment_ids_) - segment_ids_ = segment_ids_ - convert(tf.Tensor{eltype(segment_ids_)}, 1) - num_segments_ = convert(TensorFlow.Tensor{Int32}, num_segments_) - (num_segments_,) = tf.tf_promote(num_segments_) - (data_,) = tf.tf_promote(data_) - (segment_ids_,) = tf.tf_promote(segment_ids_) - tf.add_input(desc, data_) - tf.add_input(desc, segment_ids_) - tf.add_input(desc, num_segments_) - end), name, "UnsortedSegmentMax") - tf.Tensor(tf.Operation(desc)) + cudnn_rnn_params_to_canonical(num_layers, num_units, input_size, params; rnn_mode=lstm, input_mode=linear_input, direction=unidirectional, dropout=?, seed=0, seed2=0) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function cudnn_rnn_params_to_canonical(num_layers_, num_units_, input_size_, params_; name=nothing, num_params=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) + local desc + tf.with_op_name(name, "CudnnRNNParamsToCanonical") do + desc = tf.NodeDescription("CudnnRNNParamsToCanonical") + num_layers_ = convert(Tensor{Int32}, num_layers_) + num_units_ = convert(Tensor{Int32}, num_units_) + input_size_ = convert(Tensor{Int32}, input_size_) + params_ = convert(Tensor{Any}, params_) + (params_,) = tf.tf_promote(params_) + tf.add_input(desc, num_layers_) + tf.add_input(desc, num_units_) + tf.add_input(desc, input_size_) + tf.add_input(desc, params_) + if num_params !== nothing + desc["num_params"] = Base.Int(num_params) + end + if rnn_mode !== nothing + desc["rnn_mode"] = Base.String(rnn_mode) + end + if input_mode !== nothing + desc["input_mode"] = Base.String(input_mode) + end + if direction !== nothing + desc["direction"] = Base.String(direction) + end + if dropout !== nothing + desc["dropout"] = Base.identity(dropout) + end + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function cudnn_rnn_params_to_canonical(num_layers_::tf.TensorHandle, num_units_::tf.TensorHandle, input_size_::tf.TensorHandle, params_::tf.TensorHandle; name=nothing, num_params=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) + desc = tf.EagerOp("CudnnRNNParamsToCanonical") + tf.add_input(desc, num_layers_) + tf.add_input(desc, num_units_) + tf.add_input(desc, input_size_) + tf.add_input(desc, params_) + if num_params !== nothing + desc["num_params"] = Base.Int(num_params) + end + if rnn_mode !== nothing + desc["rnn_mode"] = Base.String(rnn_mode) + end + if input_mode !== nothing + desc["input_mode"] = Base.String(input_mode) + end + if direction !== nothing + desc["direction"] = Base.String(direction) + end + if dropout !== nothing + desc["dropout"] = Base.identity(dropout) + end + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end + desc["T"] = tf.data_type(params_) + tf.execute(desc) end +end + """ - segment_sum(data, segment_ids) + irfft3d(input, fft_length) """ -tf.@op function segment_sum(data_, segment_ids_; name=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("SegmentSum") - data_ = convert(TensorFlow.Tensor{Any}, data_) - segment_ids_ = convert(TensorFlow.Tensor{Any}, segment_ids_) - segment_ids_ = segment_ids_ - convert(tf.Tensor{eltype(segment_ids_)}, 1) - (data_,) = tf.tf_promote(data_) - (segment_ids_,) = tf.tf_promote(segment_ids_) - tf.add_input(desc, data_) - tf.add_input(desc, segment_ids_) - end), name, "SegmentSum") - tf.Tensor(tf.Operation(desc)) +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function irfft3d(input_, fft_length_; name=nothing) + local desc + tf.with_op_name(name, "IRFFT3D") do + desc = tf.NodeDescription("IRFFT3D") + input_ = convert(Tensor{Complex{Float32}}, input_) + fft_length_ = convert(Tensor{Int32}, fft_length_) + tf.add_input(desc, input_) + tf.add_input(desc, fft_length_) + end + tf.Tensor(tf.Operation(desc)) + end + function irfft3d(input_::tf.TensorHandle, fft_length_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("IRFFT3D") + tf.add_input(desc, input_) + tf.add_input(desc, fft_length_) + (tf.execute(desc))[1] end +end + """ - segment_max(data, segment_ids) + angle(input) """ -tf.@op function segment_max(data_, segment_ids_; name=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("SegmentMax") - data_ = convert(TensorFlow.Tensor{Any}, data_) - segment_ids_ = convert(TensorFlow.Tensor{Any}, segment_ids_) - segment_ids_ = segment_ids_ - convert(tf.Tensor{eltype(segment_ids_)}, 1) - (data_,) = tf.tf_promote(data_) - (segment_ids_,) = tf.tf_promote(segment_ids_) - tf.add_input(desc, data_) - tf.add_input(desc, segment_ids_) - end), name, "SegmentMax") - tf.Tensor(tf.Operation(desc)) +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function angle(input_; name=nothing) + local desc + tf.with_op_name(name, "Angle") do + desc = tf.NodeDescription("Angle") + input_ = convert(Tensor{Complex{Float32}}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + end + tf.Tensor(tf.Operation(desc)) + end + function angle(input_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("Angle") + tf.add_input(desc, input_) + desc["T"] = tf.data_type(input_) + (tf.execute(desc))[1] end +end + """ - segment_mean(data, segment_ids) + tensor_forest_tree_resource_handle_op(; container=, shared_name=) """ -tf.@op function segment_mean(data_, segment_ids_; name=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("SegmentMean") - data_ = convert(TensorFlow.Tensor{Any}, data_) - segment_ids_ = convert(TensorFlow.Tensor{Any}, segment_ids_) - segment_ids_ = segment_ids_ - convert(tf.Tensor{eltype(segment_ids_)}, 1) - (data_,) = tf.tf_promote(data_) - (segment_ids_,) = tf.tf_promote(segment_ids_) - tf.add_input(desc, data_) - tf.add_input(desc, segment_ids_) - end), name, "SegmentMean") - tf.Tensor(tf.Operation(desc)) +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tensor_forest_tree_resource_handle_op(; name=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "TensorForestTreeResourceHandleOp") do + desc = tf.NodeDescription("TensorForestTreeResourceHandleOp") + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + tf.Tensor(tf.Operation(desc)) + end + function tensor_forest_tree_resource_handle_op(; name=nothing, container=nothing, shared_name=nothing) + desc = tf.EagerOp("TensorForestTreeResourceHandleOp") + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + (tf.execute(desc))[1] end - -""" - segment_min(data, segment_ids) +end """ -tf.@op function segment_min(data_, segment_ids_; name=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("SegmentMin") - data_ = convert(TensorFlow.Tensor{Any}, data_) - segment_ids_ = convert(TensorFlow.Tensor{Any}, segment_ids_) - segment_ids_ = segment_ids_ - convert(tf.Tensor{eltype(segment_ids_)}, 1) - (data_,) = tf.tf_promote(data_) - (segment_ids_,) = tf.tf_promote(segment_ids_) - tf.add_input(desc, data_) - tf.add_input(desc, segment_ids_) - end), name, "SegmentMin") - tf.Tensor(tf.Operation(desc)) + learned_unigram_candidate_sampler(true_classes; seed=0, seed2=0) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function learned_unigram_candidate_sampler(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing) + local desc + tf.with_op_name(name, "LearnedUnigramCandidateSampler") do + desc = tf.NodeDescription("LearnedUnigramCandidateSampler") + true_classes_ = convert(Tensor{Int64}, true_classes_) + tf.add_input(desc, true_classes_) + if num_true !== nothing + desc["num_true"] = Base.Int(num_true) + end + if num_sampled !== nothing + desc["num_sampled"] = Base.Int(num_sampled) + end + if unique !== nothing + desc["unique"] = Base.Bool(unique) + end + if range_max !== nothing + desc["range_max"] = Base.Int(range_max) + end + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function learned_unigram_candidate_sampler(true_classes_::tf.TensorHandle; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing) + desc = tf.EagerOp("LearnedUnigramCandidateSampler") + tf.add_input(desc, true_classes_) + if num_true !== nothing + desc["num_true"] = Base.Int(num_true) + end + if num_sampled !== nothing + desc["num_sampled"] = Base.Int(num_sampled) + end + if unique !== nothing + desc["unique"] = Base.Bool(unique) + end + if range_max !== nothing + desc["range_max"] = Base.Int(range_max) + end + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end + tf.execute(desc) end +end -""" - segment_prod(data, segment_ids) +""" + _arg() +A graph node which represents an argument to a function. """ -tf.@op function segment_prod(data_, segment_ids_; name=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("SegmentProd") - data_ = convert(TensorFlow.Tensor{Any}, data_) - segment_ids_ = convert(TensorFlow.Tensor{Any}, segment_ids_) - segment_ids_ = segment_ids_ - convert(tf.Tensor{eltype(segment_ids_)}, 1) - (data_,) = tf.tf_promote(data_) - (segment_ids_,) = tf.tf_promote(segment_ids_) - tf.add_input(desc, data_) - tf.add_input(desc, segment_ids_) - end), name, "SegmentProd") - tf.Tensor(tf.Operation(desc)) +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function _arg(; name=nothing, index=nothing) + local desc + tf.with_op_name(name, "_Arg") do + desc = tf.NodeDescription("_Arg") + if index !== nothing + desc["index"] = Base.Int(index) + end + end + tf.Tensor(tf.Operation(desc)) + end + function _arg(; name=nothing, index=nothing) + desc = tf.EagerOp("_Arg") + if index !== nothing + desc["index"] = Base.Int(index) + end + (tf.execute(desc))[1] end +end + """ - relu(features) + matrix_square_root(input) """ -tf.@op function relu(features_; name=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("Relu") - features_ = convert(TensorFlow.Tensor{Any}, features_) - (features_,) = tf.tf_promote(features_) - tf.add_input(desc, features_) - end), name, "Relu") - tf.Tensor(tf.Operation(desc)) +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function matrix_square_root(input_; name=nothing) + local desc + tf.with_op_name(name, "MatrixSquareRoot") do + desc = tf.NodeDescription("MatrixSquareRoot") + input_ = convert(Tensor{Any}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + end + tf.Tensor(tf.Operation(desc)) + end + function matrix_square_root(input_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("MatrixSquareRoot") + tf.add_input(desc, input_) + desc["T"] = tf.data_type(input_) + (tf.execute(desc))[1] end +end + """ - relu6(features) + sparse_dense_cwise_mul(sp_indices, sp_values, sp_shape, dense) """ -tf.@op function relu6(features_; name=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("Relu6") - features_ = convert(TensorFlow.Tensor{Any}, features_) - (features_,) = tf.tf_promote(features_) - tf.add_input(desc, features_) - end), name, "Relu6") - tf.Tensor(tf.Operation(desc)) +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function sparse_dense_cwise_mul(sp_indices_, sp_values_, sp_shape_, dense_; name=nothing) + local desc + tf.with_op_name(name, "SparseDenseCwiseMul") do + desc = tf.NodeDescription("SparseDenseCwiseMul") + sp_indices_ = convert(Tensor{Int64}, sp_indices_) + sp_values_ = convert(Tensor{Any}, sp_values_) + sp_shape_ = convert(Tensor{Int64}, sp_shape_) + dense_ = convert(Tensor{Any}, dense_) + (sp_values_, dense_) = tf.tf_promote(sp_values_, dense_) + tf.add_input(desc, sp_indices_) + tf.add_input(desc, sp_values_) + tf.add_input(desc, sp_shape_) + tf.add_input(desc, dense_) + end + tf.Tensor(tf.Operation(desc)) + end + function sparse_dense_cwise_mul(sp_indices_::tf.TensorHandle, sp_values_::tf.TensorHandle, sp_shape_::tf.TensorHandle, dense_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("SparseDenseCwiseMul") + tf.add_input(desc, sp_indices_) + tf.add_input(desc, sp_values_) + tf.add_input(desc, sp_shape_) + tf.add_input(desc, dense_) + desc["T"] = tf.data_type(sp_values_) + desc["T"] = tf.data_type(dense_) + (tf.execute(desc))[1] end - -""" - elu(features) +end """ -tf.@op function elu(features_; name=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("Elu") - features_ = convert(TensorFlow.Tensor{Any}, features_) - (features_,) = tf.tf_promote(features_) - tf.add_input(desc, features_) - end), name, "Elu") - tf.Tensor(tf.Operation(desc)) + tensor_array_concat_v3(handle, flow_in; element_shape_except0=?) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tensor_array_concat_v3(handle_, flow_in_; name=nothing, dtype=nothing, element_shape_except0=nothing) + local desc + tf.with_op_name(name, "TensorArrayConcatV3") do + desc = tf.NodeDescription("TensorArrayConcatV3") + handle_ = convert(Tensor{Any}, handle_) + flow_in_ = convert(Tensor{Float32}, flow_in_) + tf.add_input(desc, handle_) + tf.add_input(desc, flow_in_) + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + if element_shape_except0 !== nothing + desc["element_shape_except0"] = Base.identity(element_shape_except0) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function tensor_array_concat_v3(handle_::tf.TensorHandle, flow_in_::tf.TensorHandle; name=nothing, dtype=nothing, element_shape_except0=nothing) + desc = tf.EagerOp("TensorArrayConcatV3") + tf.add_input(desc, handle_) + tf.add_input(desc, flow_in_) + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + if element_shape_except0 !== nothing + desc["element_shape_except0"] = Base.identity(element_shape_except0) + end + tf.execute(desc) end +end + """ - softplus(features) + unicode_script(input) """ -tf.@op function softplus(features_; name=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("Softplus") - features_ = convert(TensorFlow.Tensor{Any}, features_) - (features_,) = tf.tf_promote(features_) - tf.add_input(desc, features_) - end), name, "Softplus") - tf.Tensor(tf.Operation(desc)) +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function unicode_script(input_; name=nothing) + local desc + tf.with_op_name(name, "UnicodeScript") do + desc = tf.NodeDescription("UnicodeScript") + input_ = convert(Tensor{Int32}, input_) + tf.add_input(desc, input_) + end + tf.Tensor(tf.Operation(desc)) + end + function unicode_script(input_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("UnicodeScript") + tf.add_input(desc, input_) + (tf.execute(desc))[1] end +end + """ - softsign(features) + batch_cholesky_grad(l, grad) """ -tf.@op function softsign(features_; name=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("Softsign") - features_ = convert(TensorFlow.Tensor{Any}, features_) - (features_,) = tf.tf_promote(features_) - tf.add_input(desc, features_) - end), name, "Softsign") - tf.Tensor(tf.Operation(desc)) +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function batch_cholesky_grad(l_, grad_; name=nothing) + local desc + tf.with_op_name(name, "BatchCholeskyGrad") do + desc = tf.NodeDescription("BatchCholeskyGrad") + l_ = convert(Tensor{Any}, l_) + grad_ = convert(Tensor{Any}, grad_) + (l_, grad_) = tf.tf_promote(l_, grad_) + tf.add_input(desc, l_) + tf.add_input(desc, grad_) + end + tf.Tensor(tf.Operation(desc)) + end + function batch_cholesky_grad(l_::tf.TensorHandle, grad_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("BatchCholeskyGrad") + tf.add_input(desc, l_) + tf.add_input(desc, grad_) + desc["T"] = tf.data_type(l_) + desc["T"] = tf.data_type(grad_) + (tf.execute(desc))[1] end +end + """ - softmax(logits) + mean(input, reduction_indices; keep_dims=false) """ -tf.@op function softmax(logits_; name=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("Softmax") - logits_ = convert(TensorFlow.Tensor{Any}, logits_) - (logits_,) = tf.tf_promote(logits_) - tf.add_input(desc, logits_) - end), name, "Softmax") - tf.Tensor(tf.Operation(desc)) +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function mean(input_, reduction_indices_; name=nothing, keep_dims=nothing) + local desc + tf.with_op_name(name, "Mean") do + desc = tf.NodeDescription("Mean") + input_ = convert(Tensor{Any}, input_) + reduction_indices_ = convert(Tensor{Int32}, reduction_indices_) + reduction_indices_ = reduction_indices_ - convert(tf.Tensor{eltype(reduction_indices_)}, 1) + (input_,) = tf.tf_promote(input_) + (reduction_indices_,) = tf.tf_promote(reduction_indices_) + tf.add_input(desc, input_) + tf.add_input(desc, reduction_indices_) + if keep_dims !== nothing + desc["keep_dims"] = Base.Bool(keep_dims) + end + end + tf.Tensor(tf.Operation(desc)) + end + function mean(input_::tf.TensorHandle, reduction_indices_::tf.TensorHandle; name=nothing, keep_dims=nothing) + desc = tf.EagerOp("Mean") + tf.add_input(desc, input_) + tf.add_input(desc, reduction_indices_) + if keep_dims !== nothing + desc["keep_dims"] = Base.Bool(keep_dims) + end + desc["T"] = tf.data_type(input_) + desc["Tidx"] = tf.data_type(reduction_indices_) + (tf.execute(desc))[1] end +end + """ - sigmoid(x) + batch_fft(input) """ -tf.@op function sigmoid(x_; name=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("Sigmoid") - x_ = convert(TensorFlow.Tensor{Any}, x_) - (x_,) = tf.tf_promote(x_) - tf.add_input(desc, x_) - end), name, "Sigmoid") - tf.Tensor(tf.Operation(desc)) +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function batch_fft(input_; name=nothing) + local desc + tf.with_op_name(name, "BatchFFT") do + desc = tf.NodeDescription("BatchFFT") + input_ = convert(Tensor{Complex{Float32}}, input_) + tf.add_input(desc, input_) + end + tf.Tensor(tf.Operation(desc)) + end + function batch_fft(input_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("BatchFFT") + tf.add_input(desc, input_) + (tf.execute(desc))[1] end +end + """ - conv3d(input, filter; data_format=NDHWC, dilations=[1, 1, 1, 1, 1]) + sin(x) """ -tf.@op function conv3d(input_, filter_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("Conv3D") - input_ = convert(TensorFlow.Tensor{Any}, input_) - filter_ = convert(TensorFlow.Tensor{Any}, filter_) - (input_, filter_) = tf.tf_promote(input_, filter_) - tf.add_input(desc, input_) - tf.add_input(desc, filter_) - if strides !== nothing - desc["strides"] = map(Base.identity, strides) - end - if padding !== nothing - desc["padding"] = Base.String(padding) - end - if data_format !== nothing - desc["data_format"] = Base.String(data_format) - end - if dilations !== nothing - desc["dilations"] = map(Base.identity, dilations) - end - end), name, "Conv3D") - tf.Tensor(tf.Operation(desc)) +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function sin(x_; name=nothing) + local desc + tf.with_op_name(name, "Sin") do + desc = tf.NodeDescription("Sin") + x_ = convert(Tensor{Any}, x_) + (x_,) = tf.tf_promote(x_) + tf.add_input(desc, x_) + end + tf.Tensor(tf.Operation(desc)) + end + function sin(x_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("Sin") + tf.add_input(desc, x_) + desc["T"] = tf.data_type(x_) + (tf.execute(desc))[1] end +end + """ - max_pool(input; data_format=NHWC) + boosted_trees_ensemble_resource_handle_op(; container=, shared_name=) """ -tf.@op function max_pool(input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("MaxPool") - input_ = convert(TensorFlow.Tensor{Float32}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - if ksize !== nothing - desc["ksize"] = map(Base.identity, ksize) - end - if strides !== nothing - desc["strides"] = map(Base.identity, strides) - end - if padding !== nothing - desc["padding"] = Base.String(padding) - end - if data_format !== nothing - desc["data_format"] = Base.String(data_format) - end - end), name, "MaxPool") - tf.Tensor(tf.Operation(desc)) +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function boosted_trees_ensemble_resource_handle_op(; name=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "BoostedTreesEnsembleResourceHandleOp") do + desc = tf.NodeDescription("BoostedTreesEnsembleResourceHandleOp") + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + tf.Tensor(tf.Operation(desc)) + end + function boosted_trees_ensemble_resource_handle_op(; name=nothing, container=nothing, shared_name=nothing) + desc = tf.EagerOp("BoostedTreesEnsembleResourceHandleOp") + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + (tf.execute(desc))[1] end - -""" - max_pool3d(input; data_format=NDHWC) +end """ -tf.@op function max_pool3d(input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("MaxPool3D") - input_ = convert(TensorFlow.Tensor{Any}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - if ksize !== nothing - desc["ksize"] = map(Base.identity, ksize) - end - if strides !== nothing - desc["strides"] = map(Base.identity, strides) - end - if padding !== nothing - desc["padding"] = Base.String(padding) - end - if data_format !== nothing - desc["data_format"] = Base.String(data_format) - end - end), name, "MaxPool3D") - tf.Tensor(tf.Operation(desc)) + quantized_max_pool(input, min_input, max_input) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function quantized_max_pool(input_, min_input_, max_input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing) + local desc + tf.with_op_name(name, "QuantizedMaxPool") do + desc = tf.NodeDescription("QuantizedMaxPool") + input_ = convert(Tensor{Any}, input_) + min_input_ = convert(Tensor{Float32}, min_input_) + max_input_ = convert(Tensor{Float32}, max_input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + tf.add_input(desc, min_input_) + tf.add_input(desc, max_input_) + if ksize !== nothing + desc["ksize"] = map(Base.identity, ksize) + end + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + if padding !== nothing + desc["padding"] = Base.String(padding) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function quantized_max_pool(input_::tf.TensorHandle, min_input_::tf.TensorHandle, max_input_::tf.TensorHandle; name=nothing, ksize=nothing, strides=nothing, padding=nothing) + desc = tf.EagerOp("QuantizedMaxPool") + tf.add_input(desc, input_) + tf.add_input(desc, min_input_) + tf.add_input(desc, max_input_) + if ksize !== nothing + desc["ksize"] = map(Base.identity, ksize) + end + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + if padding !== nothing + desc["padding"] = Base.String(padding) + end + desc["T"] = tf.data_type(input_) + tf.execute(desc) end - -""" - avg_pool(value; data_format=NHWC) +end """ -tf.@op function avg_pool(value_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("AvgPool") - value_ = convert(TensorFlow.Tensor{Any}, value_) - (value_,) = tf.tf_promote(value_) - tf.add_input(desc, value_) - if ksize !== nothing - desc["ksize"] = map(Base.identity, ksize) - end - if strides !== nothing - desc["strides"] = map(Base.identity, strides) - end - if padding !== nothing - desc["padding"] = Base.String(padding) - end - if data_format !== nothing - desc["data_format"] = Base.String(data_format) - end - end), name, "AvgPool") - tf.Tensor(tf.Operation(desc)) + ordered_map_stage(key, indices, values; capacity=0, memory_limit=0, container=, shared_name=) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function ordered_map_stage(key_, indices_, values_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, fake_dtypes=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "OrderedMapStage") do + desc = tf.NodeDescription("OrderedMapStage") + key_ = convert(Tensor{Int64}, key_) + indices_ = convert(Tensor{Int32}, indices_) + values_ = [convert(Tensor{Any}, x) for x = values_] + tf.add_input(desc, key_) + tf.add_input(desc, indices_) + tf.add_input(desc, values_) + if capacity !== nothing + desc["capacity"] = Base.Int(capacity) + end + if memory_limit !== nothing + desc["memory_limit"] = Base.Int(memory_limit) + end + if dtypes !== nothing + desc["dtypes"] = map(Base.identity, dtypes) + end + if fake_dtypes !== nothing + desc["fake_dtypes"] = map(Base.identity, fake_dtypes) + end + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + tf.Tensor(tf.Operation(desc)) + end + function ordered_map_stage(key_::tf.TensorHandle, indices_::tf.TensorHandle, values_::tf.TensorHandle; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, fake_dtypes=nothing, container=nothing, shared_name=nothing) + desc = tf.EagerOp("OrderedMapStage") + tf.add_input(desc, key_) + tf.add_input(desc, indices_) + tf.add_input(desc, values_) + if capacity !== nothing + desc["capacity"] = Base.Int(capacity) + end + if memory_limit !== nothing + desc["memory_limit"] = Base.Int(memory_limit) + end + if dtypes !== nothing + desc["dtypes"] = map(Base.identity, dtypes) + end + if fake_dtypes !== nothing + desc["fake_dtypes"] = map(Base.identity, fake_dtypes) + end + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + (tf.execute(desc))[1] end - -""" - avg_pool3d(input; data_format=NDHWC) +end """ -tf.@op function avg_pool3d(input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("AvgPool3D") - input_ = convert(TensorFlow.Tensor{Any}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - if ksize !== nothing - desc["ksize"] = map(Base.identity, ksize) - end - if strides !== nothing - desc["strides"] = map(Base.identity, strides) - end - if padding !== nothing - desc["padding"] = Base.String(padding) - end - if data_format !== nothing - desc["data_format"] = Base.String(data_format) - end - end), name, "AvgPool3D") - tf.Tensor(tf.Operation(desc)) + partitioned_call(args; config=, config_proto=, executor_type=) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function partitioned_call(args_; name=nothing, Tin=nothing, Tout=nothing, f=nothing, config=nothing, config_proto=nothing, executor_type=nothing) + local desc + tf.with_op_name(name, "PartitionedCall") do + desc = tf.NodeDescription("PartitionedCall") + args_ = [convert(Tensor{Any}, x) for x = args_] + tf.add_input(desc, args_) + if Tin !== nothing + desc["Tin"] = map(Base.identity, Tin) + end + if Tout !== nothing + desc["Tout"] = map(Base.identity, Tout) + end + if f !== nothing + desc["f"] = Base.identity(f) + end + if config !== nothing + desc["config"] = Base.String(config) + end + if config_proto !== nothing + desc["config_proto"] = Base.String(config_proto) + end + if executor_type !== nothing + desc["executor_type"] = Base.String(executor_type) + end + end + tf.Tensor(tf.Operation(desc)) + end + function partitioned_call(args_::tf.TensorHandle; name=nothing, Tin=nothing, Tout=nothing, f=nothing, config=nothing, config_proto=nothing, executor_type=nothing) + desc = tf.EagerOp("PartitionedCall") + tf.add_input(desc, args_) + if Tin !== nothing + desc["Tin"] = map(Base.identity, Tin) + end + if Tout !== nothing + desc["Tout"] = map(Base.identity, Tout) + end + if f !== nothing + desc["f"] = Base.identity(f) + end + if config !== nothing + desc["config"] = Base.String(config) + end + if config_proto !== nothing + desc["config_proto"] = Base.String(config_proto) + end + if executor_type !== nothing + desc["executor_type"] = Base.String(executor_type) + end + (tf.execute(desc))[1] end - -""" - log_softmax(logits) +end """ -tf.@op function log_softmax(logits_; name=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("LogSoftmax") - logits_ = convert(TensorFlow.Tensor{Any}, logits_) - (logits_,) = tf.tf_promote(logits_) - tf.add_input(desc, logits_) - end), name, "LogSoftmax") - tf.Tensor(tf.Operation(desc)) + sparse_apply_adagrad(var, accum, lr, grad, indices; use_locking=false, update_slots=true) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function sparse_apply_adagrad(var_, accum_, lr_, grad_, indices_; name=nothing, use_locking=nothing, update_slots=nothing) + local desc + tf.with_op_name(name, "SparseApplyAdagrad") do + desc = tf.NodeDescription("SparseApplyAdagrad") + var_ = convert(Tensor{Any}, var_) + accum_ = convert(Tensor{Any}, accum_) + lr_ = convert(Tensor{Any}, lr_) + grad_ = convert(Tensor{Any}, grad_) + indices_ = convert(Tensor{Any}, indices_) + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + (var_, accum_, lr_, grad_) = tf.tf_promote(var_, accum_, lr_, grad_) + (indices_,) = tf.tf_promote(indices_) + tf.add_input(desc, var_) + tf.add_input(desc, accum_) + tf.add_input(desc, lr_) + tf.add_input(desc, grad_) + tf.add_input(desc, indices_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + if update_slots !== nothing + desc["update_slots"] = Base.Bool(update_slots) + end + end + tf.Tensor(tf.Operation(desc)) + end + function sparse_apply_adagrad(var_::tf.TensorHandle, accum_::tf.TensorHandle, lr_::tf.TensorHandle, grad_::tf.TensorHandle, indices_::tf.TensorHandle; name=nothing, use_locking=nothing, update_slots=nothing) + desc = tf.EagerOp("SparseApplyAdagrad") + tf.add_input(desc, var_) + tf.add_input(desc, accum_) + tf.add_input(desc, lr_) + tf.add_input(desc, grad_) + tf.add_input(desc, indices_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + if update_slots !== nothing + desc["update_slots"] = Base.Bool(update_slots) + end + desc["T"] = tf.data_type(var_) + desc["T"] = tf.data_type(accum_) + desc["T"] = tf.data_type(lr_) + desc["T"] = tf.data_type(grad_) + desc["Tindices"] = tf.data_type(indices_) + (tf.execute(desc))[1] end - -""" - dilation2d(input, filter) +end """ -tf.@op function dilation2d(input_, filter_; name=nothing, strides=nothing, rates=nothing, padding=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("Dilation2D") - input_ = convert(TensorFlow.Tensor{Any}, input_) - filter_ = convert(TensorFlow.Tensor{Any}, filter_) - (input_, filter_) = tf.tf_promote(input_, filter_) - tf.add_input(desc, input_) - tf.add_input(desc, filter_) - if strides !== nothing - desc["strides"] = map(Base.identity, strides) - end - if rates !== nothing - desc["rates"] = map(Base.identity, rates) - end - if padding !== nothing - desc["padding"] = Base.String(padding) - end - end), name, "Dilation2D") - tf.Tensor(tf.Operation(desc)) - end - -""" - conv2d(input, filter; use_cudnn_on_gpu=true, data_format=NHWC, dilations=[1, 1, 1, 1]) - - -""" -tf.@op function conv2d(input_, filter_; name=nothing, strides=nothing, use_cudnn_on_gpu=nothing, padding=nothing, data_format=nothing, dilations=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("Conv2D") - input_ = convert(TensorFlow.Tensor{Any}, input_) - filter_ = convert(TensorFlow.Tensor{Any}, filter_) - (input_, filter_) = tf.tf_promote(input_, filter_) - tf.add_input(desc, input_) - tf.add_input(desc, filter_) - if strides !== nothing - desc["strides"] = map(Base.identity, strides) - end - if use_cudnn_on_gpu !== nothing - desc["use_cudnn_on_gpu"] = Base.Bool(use_cudnn_on_gpu) - end - if padding !== nothing - desc["padding"] = Base.String(padding) - end - if data_format !== nothing - desc["data_format"] = Base.String(data_format) - end - if dilations !== nothing - desc["dilations"] = map(Base.identity, dilations) - end - end), name, "Conv2D") - tf.Tensor(tf.Operation(desc)) + decode_proto_v2(bytes; descriptor_source=local://, message_format=binary, sanitize=false) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function decode_proto_v2(bytes_; name=nothing, message_type=nothing, field_names=nothing, output_types=nothing, descriptor_source=nothing, message_format=nothing, sanitize=nothing) + local desc + tf.with_op_name(name, "DecodeProtoV2") do + desc = tf.NodeDescription("DecodeProtoV2") + bytes_ = convert(Tensor{String}, bytes_) + tf.add_input(desc, bytes_) + if message_type !== nothing + desc["message_type"] = Base.String(message_type) + end + if field_names !== nothing + desc["field_names"] = map(Base.identity, field_names) + end + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if descriptor_source !== nothing + desc["descriptor_source"] = Base.String(descriptor_source) + end + if message_format !== nothing + desc["message_format"] = Base.String(message_format) + end + if sanitize !== nothing + desc["sanitize"] = Base.Bool(sanitize) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function decode_proto_v2(bytes_::tf.TensorHandle; name=nothing, message_type=nothing, field_names=nothing, output_types=nothing, descriptor_source=nothing, message_format=nothing, sanitize=nothing) + desc = tf.EagerOp("DecodeProtoV2") + tf.add_input(desc, bytes_) + if message_type !== nothing + desc["message_type"] = Base.String(message_type) + end + if field_names !== nothing + desc["field_names"] = map(Base.identity, field_names) + end + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if descriptor_source !== nothing + desc["descriptor_source"] = Base.String(descriptor_source) + end + if message_format !== nothing + desc["message_format"] = Base.String(message_format) + end + if sanitize !== nothing + desc["sanitize"] = Base.Bool(sanitize) + end + tf.execute(desc) end +end + """ - random_uniform(shape; seed=0, seed2=0) + betainc(a, b, x) """ -tf.@op function random_uniform(shape_; name=nothing, seed=nothing, seed2=nothing, dtype=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("RandomUniform") - shape_ = convert(TensorFlow.Tensor{Any}, shape_) - (shape_,) = tf.tf_promote(shape_) - tf.add_input(desc, shape_) - if seed !== nothing - desc["seed"] = Base.Int(seed) - end - if seed2 !== nothing - desc["seed2"] = Base.Int(seed2) - end - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end - end), name, "RandomUniform") - tf.Tensor(tf.Operation(desc)) +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function betainc(a_, b_, x_; name=nothing) + local desc + tf.with_op_name(name, "Betainc") do + desc = tf.NodeDescription("Betainc") + a_ = convert(Tensor{Any}, a_) + b_ = convert(Tensor{Any}, b_) + x_ = convert(Tensor{Any}, x_) + (a_, b_, x_) = tf.tf_promote(a_, b_, x_) + tf.add_input(desc, a_) + tf.add_input(desc, b_) + tf.add_input(desc, x_) + end + tf.Tensor(tf.Operation(desc)) + end + function betainc(a_::tf.TensorHandle, b_::tf.TensorHandle, x_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("Betainc") + tf.add_input(desc, a_) + tf.add_input(desc, b_) + tf.add_input(desc, x_) + desc["T"] = tf.data_type(a_) + desc["T"] = tf.data_type(b_) + desc["T"] = tf.data_type(x_) + (tf.execute(desc))[1] end +end + """ - random_standard_normal(shape; seed=0, seed2=0) + guarantee_const(input) """ -tf.@op function random_standard_normal(shape_; name=nothing, seed=nothing, seed2=nothing, dtype=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("RandomStandardNormal") - shape_ = convert(TensorFlow.Tensor{Any}, shape_) - (shape_,) = tf.tf_promote(shape_) - tf.add_input(desc, shape_) - if seed !== nothing - desc["seed"] = Base.Int(seed) - end - if seed2 !== nothing - desc["seed2"] = Base.Int(seed2) - end - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end - end), name, "RandomStandardNormal") - tf.Tensor(tf.Operation(desc)) +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function guarantee_const(input_; name=nothing) + local desc + tf.with_op_name(name, "GuaranteeConst") do + desc = tf.NodeDescription("GuaranteeConst") + input_ = convert(Tensor{Any}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + end + tf.Tensor(tf.Operation(desc)) + end + function guarantee_const(input_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("GuaranteeConst") + tf.add_input(desc, input_) + desc["T"] = tf.data_type(input_) + (tf.execute(desc))[1] end +end + """ - random_shuffle(value; seed=0, seed2=0) + decode_bmp(contents; channels=0) """ -tf.@op function random_shuffle(value_; name=nothing, seed=nothing, seed2=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("RandomShuffle") - value_ = convert(TensorFlow.Tensor{Any}, value_) - (value_,) = tf.tf_promote(value_) - tf.add_input(desc, value_) - if seed !== nothing - desc["seed"] = Base.Int(seed) - end - if seed2 !== nothing - desc["seed2"] = Base.Int(seed2) - end - end), name, "RandomShuffle") - tf.Tensor(tf.Operation(desc)) +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function decode_bmp(contents_; name=nothing, channels=nothing) + local desc + tf.with_op_name(name, "DecodeBmp") do + desc = tf.NodeDescription("DecodeBmp") + contents_ = convert(Tensor{String}, contents_) + tf.add_input(desc, contents_) + if channels !== nothing + desc["channels"] = Base.Int(channels) + end + end + tf.Tensor(tf.Operation(desc)) + end + function decode_bmp(contents_::tf.TensorHandle; name=nothing, channels=nothing) + desc = tf.EagerOp("DecodeBmp") + tf.add_input(desc, contents_) + if channels !== nothing + desc["channels"] = Base.Int(channels) + end + (tf.execute(desc))[1] end +end + """ - strided_slice(input, begin, end, strides; begin_mask=0, end_mask=0, ellipsis_mask=0, new_axis_mask=0, shrink_axis_mask=0) + boosted_trees_bucketize(float_values, bucket_boundaries) """ -tf.@op function strided_slice(input_, begin_, end_, strides_; name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("StridedSlice") - input_ = convert(TensorFlow.Tensor{Any}, input_) - begin_ = convert(TensorFlow.Tensor{Any}, begin_) - begin_ = begin_ - convert(tf.Tensor{eltype(begin_)}, 1) - end_ = convert(TensorFlow.Tensor{Any}, end_) - end_ = end_ - convert(tf.Tensor{eltype(end_)}, 1) - strides_ = convert(TensorFlow.Tensor{Any}, strides_) - strides_ = strides_ - convert(tf.Tensor{eltype(strides_)}, 1) - (input_,) = tf.tf_promote(input_) - (begin_, end_, strides_) = tf.tf_promote(begin_, end_, strides_) - tf.add_input(desc, input_) - tf.add_input(desc, begin_) - tf.add_input(desc, end_) - tf.add_input(desc, strides_) - if Index !== nothing - desc["Index"] = Base.identity(Index) - end - if begin_mask !== nothing - begin_mask = Base.Int(begin_mask) - 1 - end - if begin_mask !== nothing - desc["begin_mask"] = Base.Int(begin_mask) - end - if end_mask !== nothing - end_mask = Base.Int(end_mask) - 1 - end - if end_mask !== nothing - desc["end_mask"] = Base.Int(end_mask) - end - if ellipsis_mask !== nothing - ellipsis_mask = Base.Int(ellipsis_mask) - 1 - end - if ellipsis_mask !== nothing - desc["ellipsis_mask"] = Base.Int(ellipsis_mask) - end - if new_axis_mask !== nothing - new_axis_mask = Base.Int(new_axis_mask) - 1 - end - if new_axis_mask !== nothing - desc["new_axis_mask"] = Base.Int(new_axis_mask) - end - if shrink_axis_mask !== nothing - shrink_axis_mask = Base.Int(shrink_axis_mask) - 1 - end - if shrink_axis_mask !== nothing - desc["shrink_axis_mask"] = Base.Int(shrink_axis_mask) - end - end), name, "StridedSlice") - tf.Tensor(tf.Operation(desc)) +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function boosted_trees_bucketize(float_values_, bucket_boundaries_; name=nothing, num_features=nothing) + local desc + tf.with_op_name(name, "BoostedTreesBucketize") do + desc = tf.NodeDescription("BoostedTreesBucketize") + float_values_ = [convert(Tensor{Float32}, x) for x = float_values_] + bucket_boundaries_ = [convert(Tensor{Float32}, x) for x = bucket_boundaries_] + tf.add_input(desc, float_values_) + tf.add_input(desc, bucket_boundaries_) + if num_features !== nothing + desc["num_features"] = Base.Int(num_features) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:num_features + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function boosted_trees_bucketize(float_values_::tf.TensorHandle, bucket_boundaries_::tf.TensorHandle; name=nothing, num_features=nothing) + desc = tf.EagerOp("BoostedTreesBucketize") + tf.add_input(desc, float_values_) + tf.add_input(desc, bucket_boundaries_) + if num_features !== nothing + desc["num_features"] = Base.Int(num_features) + end + tf.execute(desc) end +end + """ - expand_dims(input, dim) + shutdown_distributed_tpu() """ -tf.@op function expand_dims(input_, dim_; name=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("ExpandDims") - input_ = convert(TensorFlow.Tensor{Any}, input_) - dim_ = convert(TensorFlow.Tensor{Int32}, dim_) - dim_ = dim_ - convert(tf.Tensor{eltype(dim_)}, 1) - (input_,) = tf.tf_promote(input_) - (dim_,) = tf.tf_promote(dim_) - tf.add_input(desc, input_) - tf.add_input(desc, dim_) - end), name, "ExpandDims") - tf.Tensor(tf.Operation(desc)) +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function shutdown_distributed_tpu(; name=nothing) + local desc + tf.with_op_name(name, "ShutdownDistributedTPU") do + desc + tf.NodeDescription("ShutdownDistributedTPU") + end + tf.Tensor(tf.Operation(desc)) + end + function shutdown_distributed_tpu(; name=nothing) + desc = tf.EagerOp("ShutdownDistributedTPU") + (tf.execute(desc))[1] end +end + """ - tile(input, multiples) + experimental_stats_aggregator_summary(iterator) """ -tf.@op function tile(input_, multiples_; name=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("Tile") - input_ = convert(TensorFlow.Tensor{Any}, input_) - multiples_ = convert(TensorFlow.Tensor{Int32}, multiples_) - (input_,) = tf.tf_promote(input_) - (multiples_,) = tf.tf_promote(multiples_) - tf.add_input(desc, input_) - tf.add_input(desc, multiples_) - end), name, "Tile") - tf.Tensor(tf.Operation(desc)) +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function experimental_stats_aggregator_summary(iterator_; name=nothing) + local desc + tf.with_op_name(name, "ExperimentalStatsAggregatorSummary") do + desc = tf.NodeDescription("ExperimentalStatsAggregatorSummary") + iterator_ = convert(Tensor{Any}, iterator_) + tf.add_input(desc, iterator_) + end + tf.Tensor(tf.Operation(desc)) + end + function experimental_stats_aggregator_summary(iterator_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("ExperimentalStatsAggregatorSummary") + tf.add_input(desc, iterator_) + (tf.execute(desc))[1] end +end + """ - pad(input, paddings) + timestamp() """ -tf.@op function pad(input_, paddings_; name=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("Pad") - input_ = convert(TensorFlow.Tensor{Any}, input_) - paddings_ = convert(TensorFlow.Tensor{Int32}, paddings_) - (input_,) = tf.tf_promote(input_) - (paddings_,) = tf.tf_promote(paddings_) - tf.add_input(desc, input_) - tf.add_input(desc, paddings_) - end), name, "Pad") - tf.Tensor(tf.Operation(desc)) +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function timestamp(; name=nothing) + local desc + tf.with_op_name(name, "Timestamp") do + desc + tf.NodeDescription("Timestamp") + end + tf.Tensor(tf.Operation(desc)) + end + function timestamp(; name=nothing) + desc = tf.EagerOp("Timestamp") + (tf.execute(desc))[1] end +end + """ - gather(params, indices; validate_indices=true) + matrix_exponential(input) """ -tf.@op function gather(params_, indices_; name=nothing, validate_indices=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("Gather") - params_ = convert(TensorFlow.Tensor{Any}, params_) - indices_ = convert(TensorFlow.Tensor{Any}, indices_) - indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) - (params_,) = tf.tf_promote(params_) - (indices_,) = tf.tf_promote(indices_) - tf.add_input(desc, params_) - tf.add_input(desc, indices_) - if validate_indices !== nothing - desc["validate_indices"] = Base.Bool(validate_indices) - end - end), name, "Gather") - tf.Tensor(tf.Operation(desc)) +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function matrix_exponential(input_; name=nothing) + local desc + tf.with_op_name(name, "MatrixExponential") do + desc = tf.NodeDescription("MatrixExponential") + input_ = convert(Tensor{Any}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + end + tf.Tensor(tf.Operation(desc)) + end + function matrix_exponential(input_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("MatrixExponential") + tf.add_input(desc, input_) + desc["T"] = tf.data_type(input_) + (tf.execute(desc))[1] end +end + """ - gather_nd(params, indices) + size(input; out_type=Int32) """ -tf.@op function gather_nd(params_, indices_; name=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("GatherNd") - params_ = convert(TensorFlow.Tensor{Any}, params_) - indices_ = convert(TensorFlow.Tensor{Any}, indices_) - indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) - (params_,) = tf.tf_promote(params_) - (indices_,) = tf.tf_promote(indices_) - tf.add_input(desc, params_) - tf.add_input(desc, indices_) - end), name, "GatherNd") - tf.Tensor(tf.Operation(desc)) +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function size(input_; name=nothing, out_type=nothing) + local desc + tf.with_op_name(name, "Size") do + desc = tf.NodeDescription("Size") + input_ = convert(Tensor{Any}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + if out_type !== nothing + desc["out_type"] = Base.identity(out_type) + end + end + tf.Tensor(tf.Operation(desc)) + end + function size(input_::tf.TensorHandle; name=nothing, out_type=nothing) + desc = tf.EagerOp("Size") + tf.add_input(desc, input_) + if out_type !== nothing + desc["out_type"] = Base.identity(out_type) + end + desc["T"] = tf.data_type(input_) + (tf.execute(desc))[1] end +end + """ - scatter_nd(indices, updates, shape) + add_n(inputs) """ -tf.@op function scatter_nd(indices_, updates_, shape_; name=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("ScatterNd") - indices_ = convert(TensorFlow.Tensor{Any}, indices_) - indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) - updates_ = convert(TensorFlow.Tensor{Any}, updates_) - shape_ = convert(TensorFlow.Tensor{Any}, shape_) - (updates_,) = tf.tf_promote(updates_) - (indices_, shape_) = tf.tf_promote(indices_, shape_) - tf.add_input(desc, indices_) - tf.add_input(desc, updates_) - tf.add_input(desc, shape_) - end), name, "ScatterNd") - tf.Tensor(tf.Operation(desc)) +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function add_n(inputs_; name=nothing, N=nothing) + local desc + tf.with_op_name(name, "AddN") do + desc = tf.NodeDescription("AddN") + inputs_ = [convert(Tensor{Any}, x) for x = inputs_] + (inputs_,) = tf.tf_promote(inputs_) + tf.add_input(desc, inputs_) + if N !== nothing + desc["N"] = Base.Int(N) + end + end + tf.Tensor(tf.Operation(desc)) + end + function add_n(inputs_::tf.TensorHandle; name=nothing, N=nothing) + desc = tf.EagerOp("AddN") + tf.add_input(desc, inputs_) + if N !== nothing + desc["N"] = Base.Int(N) + end + desc["T"] = tf.data_type(inputs_) + (tf.execute(desc))[1] end +end + """ - dynamic_partition(data, partitions) + sparse_segment_sum(data, indices, segment_ids) """ -tf.@op function dynamic_partition(data_, partitions_; name=nothing, num_partitions=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("DynamicPartition") - data_ = convert(TensorFlow.Tensor{Any}, data_) - partitions_ = convert(TensorFlow.Tensor{Int32}, partitions_) - (data_,) = tf.tf_promote(data_) - tf.add_input(desc, data_) - tf.add_input(desc, partitions_) - if num_partitions !== nothing - desc["num_partitions"] = Base.Int(num_partitions) - end - end), name, "DynamicPartition") - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:num_partitions - push!(out, tf.Tensor(op, out_idx)) +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function sparse_segment_sum(data_, indices_, segment_ids_; name=nothing) + local desc + tf.with_op_name(name, "SparseSegmentSum") do + desc = tf.NodeDescription("SparseSegmentSum") + data_ = convert(Tensor{Any}, data_) + indices_ = convert(Tensor{Int32}, indices_) + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + segment_ids_ = convert(Tensor{Int32}, segment_ids_) + (data_,) = tf.tf_promote(data_) + (indices_,) = tf.tf_promote(indices_) + tf.add_input(desc, data_) + tf.add_input(desc, indices_) + tf.add_input(desc, segment_ids_) + end + tf.Tensor(tf.Operation(desc)) end - out + function sparse_segment_sum(data_::tf.TensorHandle, indices_::tf.TensorHandle, segment_ids_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("SparseSegmentSum") + tf.add_input(desc, data_) + tf.add_input(desc, indices_) + tf.add_input(desc, segment_ids_) + desc["T"] = tf.data_type(data_) + desc["Tidx"] = tf.data_type(indices_) + (tf.execute(desc))[1] end +end + """ - dynamic_stitch(indices, data) + batch_dataset(input_dataset, batch_size) """ -tf.@op function dynamic_stitch(indices_, data_; name=nothing, N=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("DynamicStitch") - indices_ = [convert(TensorFlow.Tensor{Int32}, x) for x = indices_] - data_ = [convert(TensorFlow.Tensor{Any}, x) for x = data_] - (data_,) = tf.tf_promote(data_) - tf.add_input(desc, indices_) - tf.add_input(desc, data_) - if N !== nothing - desc["N"] = Base.Int(N) - end - end), name, "DynamicStitch") - tf.Tensor(tf.Operation(desc)) +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function batch_dataset(input_dataset_, batch_size_; name=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "BatchDataset") do + desc = tf.NodeDescription("BatchDataset") + input_dataset_ = convert(Tensor{Any}, input_dataset_) + batch_size_ = convert(Tensor{Int64}, batch_size_) + tf.add_input(desc, input_dataset_) + tf.add_input(desc, batch_size_) + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + tf.Tensor(tf.Operation(desc)) + end + function batch_dataset(input_dataset_::tf.TensorHandle, batch_size_::tf.TensorHandle; name=nothing, output_types=nothing, output_shapes=nothing) + desc = tf.EagerOp("BatchDataset") + tf.add_input(desc, input_dataset_) + tf.add_input(desc, batch_size_) + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + (tf.execute(desc))[1] end - -""" - pack(values; axis=0) +end """ -tf.@op function pack(values_; name=nothing, N=nothing, axis=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("Pack") - values_ = [convert(TensorFlow.Tensor{Any}, x) for x = values_] - (values_,) = tf.tf_promote(values_) - tf.add_input(desc, values_) - if N !== nothing - desc["N"] = Base.Int(N) - end - if axis !== nothing - axis = Base.Int(axis) - 1 - end - if axis !== nothing - desc["axis"] = Base.Int(axis) - end - end), name, "Pack") - tf.Tensor(tf.Operation(desc)) + record_input(; file_random_seed=301, file_shuffle_shift_ratio=?, file_buffer_size=10000, file_parallelism=16, batch_size=32, compression_type=) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function record_input(; name=nothing, file_pattern=nothing, file_random_seed=nothing, file_shuffle_shift_ratio=nothing, file_buffer_size=nothing, file_parallelism=nothing, batch_size=nothing, compression_type=nothing) + local desc + tf.with_op_name(name, "RecordInput") do + desc = tf.NodeDescription("RecordInput") + if file_pattern !== nothing + desc["file_pattern"] = Base.String(file_pattern) + end + if file_random_seed !== nothing + desc["file_random_seed"] = Base.Int(file_random_seed) + end + if file_shuffle_shift_ratio !== nothing + desc["file_shuffle_shift_ratio"] = Base.identity(file_shuffle_shift_ratio) + end + if file_buffer_size !== nothing + desc["file_buffer_size"] = Base.Int(file_buffer_size) + end + if file_parallelism !== nothing + desc["file_parallelism"] = Base.Int(file_parallelism) + end + if batch_size !== nothing + desc["batch_size"] = Base.Int(batch_size) + end + if compression_type !== nothing + desc["compression_type"] = Base.String(compression_type) + end + end + tf.Tensor(tf.Operation(desc)) + end + function record_input(; name=nothing, file_pattern=nothing, file_random_seed=nothing, file_shuffle_shift_ratio=nothing, file_buffer_size=nothing, file_parallelism=nothing, batch_size=nothing, compression_type=nothing) + desc = tf.EagerOp("RecordInput") + if file_pattern !== nothing + desc["file_pattern"] = Base.String(file_pattern) + end + if file_random_seed !== nothing + desc["file_random_seed"] = Base.Int(file_random_seed) + end + if file_shuffle_shift_ratio !== nothing + desc["file_shuffle_shift_ratio"] = Base.identity(file_shuffle_shift_ratio) + end + if file_buffer_size !== nothing + desc["file_buffer_size"] = Base.Int(file_buffer_size) + end + if file_parallelism !== nothing + desc["file_parallelism"] = Base.Int(file_parallelism) + end + if batch_size !== nothing + desc["batch_size"] = Base.Int(batch_size) + end + if compression_type !== nothing + desc["compression_type"] = Base.String(compression_type) + end + (tf.execute(desc))[1] end +end + """ - concat_v2(values, axis) + queue_dequeue_up_to_v2(handle, n; timeout_ms=-1) """ -tf.@op function concat_v2(values_, axis_; name=nothing, N=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("ConcatV2") - values_ = [convert(TensorFlow.Tensor{Any}, x) for x = values_] - axis_ = convert(TensorFlow.Tensor{Int32}, axis_) - axis_ = axis_ - convert(tf.Tensor{eltype(axis_)}, 1) - (values_,) = tf.tf_promote(values_) - (axis_,) = tf.tf_promote(axis_) - tf.add_input(desc, values_) - tf.add_input(desc, axis_) - if N !== nothing - desc["N"] = Base.Int(N) - end - end), name, "ConcatV2") - tf.Tensor(tf.Operation(desc)) +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function queue_dequeue_up_to_v2(handle_, n_; name=nothing, component_types=nothing, timeout_ms=nothing) + local desc + tf.with_op_name(name, "QueueDequeueUpToV2") do + desc = tf.NodeDescription("QueueDequeueUpToV2") + handle_ = convert(Tensor{Any}, handle_) + n_ = convert(Tensor{Int32}, n_) + tf.add_input(desc, handle_) + tf.add_input(desc, n_) + if component_types !== nothing + desc["component_types"] = map(Base.identity, component_types) + end + if timeout_ms !== nothing + desc["timeout_ms"] = Base.Int(timeout_ms) + end + end + tf.Tensor(tf.Operation(desc)) + end + function queue_dequeue_up_to_v2(handle_::tf.TensorHandle, n_::tf.TensorHandle; name=nothing, component_types=nothing, timeout_ms=nothing) + desc = tf.EagerOp("QueueDequeueUpToV2") + tf.add_input(desc, handle_) + tf.add_input(desc, n_) + if component_types !== nothing + desc["component_types"] = map(Base.identity, component_types) + end + if timeout_ms !== nothing + desc["timeout_ms"] = Base.Int(timeout_ms) + end + (tf.execute(desc))[1] end +end + """ - self_adjoint_eig_v2(input; compute_v=true) + retrieve_tpu_embedding_proximal_adagrad_parameters_grad_accum_debug(; table_id=-1, table_name=) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function retrieve_tpu_embedding_proximal_adagrad_parameters_grad_accum_debug(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + local desc + tf.with_op_name(name, "RetrieveTPUEmbeddingProximalAdagradParametersGradAccumDebug") do + desc = tf.NodeDescription("RetrieveTPUEmbeddingProximalAdagradParametersGradAccumDebug") + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function retrieve_tpu_embedding_proximal_adagrad_parameters_grad_accum_debug(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + desc = tf.EagerOp("RetrieveTPUEmbeddingProximalAdagradParametersGradAccumDebug") + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + tf.execute(desc) + end +end """ -tf.@op function self_adjoint_eig_v2(input_; name=nothing, compute_v=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("SelfAdjointEigV2") - input_ = convert(TensorFlow.Tensor{Any}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - if compute_v !== nothing - desc["compute_v"] = Base.Bool(compute_v) - end - end), name, "SelfAdjointEigV2") - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:2 - push!(out, tf.Tensor(op, out_idx)) + load_tpu_embedding_rms_prop_parameters_grad_accum_debug(parameters, ms, mom, gradient_accumulators; table_id=-1, table_name=) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function load_tpu_embedding_rms_prop_parameters_grad_accum_debug(parameters_, ms_, mom_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + local desc + tf.with_op_name(name, "LoadTPUEmbeddingRMSPropParametersGradAccumDebug") do + desc = tf.NodeDescription("LoadTPUEmbeddingRMSPropParametersGradAccumDebug") + parameters_ = convert(Tensor{Float32}, parameters_) + ms_ = convert(Tensor{Float32}, ms_) + mom_ = convert(Tensor{Float32}, mom_) + gradient_accumulators_ = convert(Tensor{Float32}, gradient_accumulators_) + tf.add_input(desc, parameters_) + tf.add_input(desc, ms_) + tf.add_input(desc, mom_) + tf.add_input(desc, gradient_accumulators_) + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + end + tf.Tensor(tf.Operation(desc)) + end + function load_tpu_embedding_rms_prop_parameters_grad_accum_debug(parameters_::tf.TensorHandle, ms_::tf.TensorHandle, mom_::tf.TensorHandle, gradient_accumulators_::tf.TensorHandle; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + desc = tf.EagerOp("LoadTPUEmbeddingRMSPropParametersGradAccumDebug") + tf.add_input(desc, parameters_) + tf.add_input(desc, ms_) + tf.add_input(desc, mom_) + tf.add_input(desc, gradient_accumulators_) + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) end - out + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + (tf.execute(desc))[1] end +end + """ - is_finite(x) + serialize_tensor(tensor) """ -tf.@op function is_finite(x_; name=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("IsFinite") - x_ = convert(TensorFlow.Tensor{Any}, x_) - (x_,) = tf.tf_promote(x_) - tf.add_input(desc, x_) - end), name, "IsFinite") - tf.Tensor(tf.Operation(desc)) +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function serialize_tensor(tensor_; name=nothing) + local desc + tf.with_op_name(name, "SerializeTensor") do + desc = tf.NodeDescription("SerializeTensor") + tensor_ = convert(Tensor{Any}, tensor_) + (tensor_,) = tf.tf_promote(tensor_) + tf.add_input(desc, tensor_) + end + tf.Tensor(tf.Operation(desc)) + end + function serialize_tensor(tensor_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("SerializeTensor") + tf.add_input(desc, tensor_) + desc["T"] = tf.data_type(tensor_) + (tf.execute(desc))[1] end +end + """ - is_nan(x) + mul(x, y) """ -tf.@op function is_nan(x_; name=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("IsNan") - x_ = convert(TensorFlow.Tensor{Any}, x_) - (x_,) = tf.tf_promote(x_) - tf.add_input(desc, x_) - end), name, "IsNan") - tf.Tensor(tf.Operation(desc)) +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function mul(x_, y_; name=nothing) + local desc + tf.with_op_name(name, "Mul") do + desc = tf.NodeDescription("Mul") + x_ = convert(Tensor{Any}, x_) + y_ = convert(Tensor{Any}, y_) + (x_, y_) = tf.tf_promote(x_, y_) + tf.add_input(desc, x_) + tf.add_input(desc, y_) + end + tf.Tensor(tf.Operation(desc)) + end + function mul(x_::tf.TensorHandle, y_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("Mul") + tf.add_input(desc, x_) + tf.add_input(desc, y_) + desc["T"] = tf.data_type(x_) + desc["T"] = tf.data_type(y_) + (tf.execute(desc))[1] end +end + """ - is_inf(x) + softmax_cross_entropy_with_logits(features, labels) """ -tf.@op function is_inf(x_; name=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("IsInf") - x_ = convert(TensorFlow.Tensor{Any}, x_) - (x_,) = tf.tf_promote(x_) - tf.add_input(desc, x_) - end), name, "IsInf") - tf.Tensor(tf.Operation(desc)) +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function softmax_cross_entropy_with_logits(features_, labels_; name=nothing) + local desc + tf.with_op_name(name, "SoftmaxCrossEntropyWithLogits") do + desc = tf.NodeDescription("SoftmaxCrossEntropyWithLogits") + features_ = convert(Tensor{Any}, features_) + labels_ = convert(Tensor{Any}, labels_) + (features_, labels_) = tf.tf_promote(features_, labels_) + tf.add_input(desc, features_) + tf.add_input(desc, labels_) + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function softmax_cross_entropy_with_logits(features_::tf.TensorHandle, labels_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("SoftmaxCrossEntropyWithLogits") + tf.add_input(desc, features_) + tf.add_input(desc, labels_) + desc["T"] = tf.data_type(features_) + desc["T"] = tf.data_type(labels_) + tf.execute(desc) end +end + """ - lrn(input; depth_radius=5, bias=nothing, alpha=nothing, beta=nothing) + resource_scatter_div(resource, indices, updates) """ -tf.@op function lrn(input_; name=nothing, depth_radius=nothing, bias=nothing, alpha=nothing, beta=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("LRN") - input_ = convert(TensorFlow.Tensor{Float32}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - if depth_radius !== nothing - desc["depth_radius"] = Base.Int(depth_radius) - end - if bias !== nothing - desc["bias"] = Base.identity(bias) - end - if alpha !== nothing - desc["alpha"] = Base.identity(alpha) - end - if beta !== nothing - desc["beta"] = Base.identity(beta) - end - end), name, "LRN") - tf.Tensor(tf.Operation(desc)) +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function resource_scatter_div(resource_, indices_, updates_; name=nothing, dtype=nothing) + local desc + tf.with_op_name(name, "ResourceScatterDiv") do + desc = tf.NodeDescription("ResourceScatterDiv") + resource_ = convert(Tensor{Any}, resource_) + indices_ = convert(Tensor{Any}, indices_) + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + updates_ = convert(Tensor{Any}, updates_) + (updates_,) = tf.tf_promote(updates_) + (indices_,) = tf.tf_promote(indices_) + tf.add_input(desc, resource_) + tf.add_input(desc, indices_) + tf.add_input(desc, updates_) + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + tf.Tensor(tf.Operation(desc)) + end + function resource_scatter_div(resource_::tf.TensorHandle, indices_::tf.TensorHandle, updates_::tf.TensorHandle; name=nothing, dtype=nothing) + desc = tf.EagerOp("ResourceScatterDiv") + tf.add_input(desc, resource_) + tf.add_input(desc, indices_) + tf.add_input(desc, updates_) + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + desc["Tindices"] = tf.data_type(indices_) + desc["dtype"] = tf.data_type(updates_) + (tf.execute(desc))[1] end +end + """ - assign(ref, value; validate_shape=true, use_locking=true) + fixed_length_record_dataset_v2(filenames, header_bytes, record_bytes, footer_bytes, buffer_size, compression_type) """ -tf.@op function assign(ref_, value_; name=nothing, validate_shape=nothing, use_locking=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("Assign") - ref_ = convert(TensorFlow.Tensor{Any}, ref_) - value_ = convert(TensorFlow.Tensor{Any}, value_) - (ref_, value_) = tf.tf_promote(ref_, value_) - tf.add_input(desc, ref_) - tf.add_input(desc, value_) - if validate_shape !== nothing - desc["validate_shape"] = Base.Bool(validate_shape) - end - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - end), name, "Assign") - tf.Tensor(tf.Operation(desc)) +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function fixed_length_record_dataset_v2(filenames_, header_bytes_, record_bytes_, footer_bytes_, buffer_size_, compression_type_; name=nothing) + local desc + tf.with_op_name(name, "FixedLengthRecordDatasetV2") do + desc = tf.NodeDescription("FixedLengthRecordDatasetV2") + filenames_ = convert(Tensor{String}, filenames_) + header_bytes_ = convert(Tensor{Int64}, header_bytes_) + record_bytes_ = convert(Tensor{Int64}, record_bytes_) + footer_bytes_ = convert(Tensor{Int64}, footer_bytes_) + buffer_size_ = convert(Tensor{Int64}, buffer_size_) + compression_type_ = convert(Tensor{String}, compression_type_) + tf.add_input(desc, filenames_) + tf.add_input(desc, header_bytes_) + tf.add_input(desc, record_bytes_) + tf.add_input(desc, footer_bytes_) + tf.add_input(desc, buffer_size_) + tf.add_input(desc, compression_type_) + end + tf.Tensor(tf.Operation(desc)) + end + function fixed_length_record_dataset_v2(filenames_::tf.TensorHandle, header_bytes_::tf.TensorHandle, record_bytes_::tf.TensorHandle, footer_bytes_::tf.TensorHandle, buffer_size_::tf.TensorHandle, compression_type_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("FixedLengthRecordDatasetV2") + tf.add_input(desc, filenames_) + tf.add_input(desc, header_bytes_) + tf.add_input(desc, record_bytes_) + tf.add_input(desc, footer_bytes_) + tf.add_input(desc, buffer_size_) + tf.add_input(desc, compression_type_) + (tf.execute(desc))[1] end +end + """ - assign_add(ref, value; use_locking=false) + skip_dataset(input_dataset, count) """ -tf.@op function assign_add(ref_, value_; name=nothing, use_locking=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("AssignAdd") - ref_ = convert(TensorFlow.Tensor{Any}, ref_) - value_ = convert(TensorFlow.Tensor{Any}, value_) - (ref_, value_) = tf.tf_promote(ref_, value_) - tf.add_input(desc, ref_) - tf.add_input(desc, value_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - end), name, "AssignAdd") - tf.Tensor(tf.Operation(desc)) +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function skip_dataset(input_dataset_, count_; name=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "SkipDataset") do + desc = tf.NodeDescription("SkipDataset") + input_dataset_ = convert(Tensor{Any}, input_dataset_) + count_ = convert(Tensor{Int64}, count_) + tf.add_input(desc, input_dataset_) + tf.add_input(desc, count_) + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + tf.Tensor(tf.Operation(desc)) + end + function skip_dataset(input_dataset_::tf.TensorHandle, count_::tf.TensorHandle; name=nothing, output_types=nothing, output_shapes=nothing) + desc = tf.EagerOp("SkipDataset") + tf.add_input(desc, input_dataset_) + tf.add_input(desc, count_) + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + (tf.execute(desc))[1] end +end + """ - assign_sub(ref, value; use_locking=false) + cosh(x) """ -tf.@op function assign_sub(ref_, value_; name=nothing, use_locking=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("AssignSub") - ref_ = convert(TensorFlow.Tensor{Any}, ref_) - value_ = convert(TensorFlow.Tensor{Any}, value_) - (ref_, value_) = tf.tf_promote(ref_, value_) - tf.add_input(desc, ref_) - tf.add_input(desc, value_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - end), name, "AssignSub") - tf.Tensor(tf.Operation(desc)) +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function cosh(x_; name=nothing) + local desc + tf.with_op_name(name, "Cosh") do + desc = tf.NodeDescription("Cosh") + x_ = convert(Tensor{Any}, x_) + (x_,) = tf.tf_promote(x_) + tf.add_input(desc, x_) + end + tf.Tensor(tf.Operation(desc)) + end + function cosh(x_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("Cosh") + tf.add_input(desc, x_) + desc["T"] = tf.data_type(x_) + (tf.execute(desc))[1] end - -""" - scatter_update(ref, indices, updates; use_locking=true) +end """ -tf.@op function scatter_update(ref_, indices_, updates_; name=nothing, use_locking=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("ScatterUpdate") - ref_ = convert(TensorFlow.Tensor{Any}, ref_) - indices_ = convert(TensorFlow.Tensor{Any}, indices_) - indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) - updates_ = convert(TensorFlow.Tensor{Any}, updates_) - (ref_, updates_) = tf.tf_promote(ref_, updates_) - (indices_,) = tf.tf_promote(indices_) - tf.add_input(desc, ref_) - tf.add_input(desc, indices_) - tf.add_input(desc, updates_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - end), name, "ScatterUpdate") - tf.Tensor(tf.Operation(desc)) + fused_batch_norm_v2(x, scale, offset, mean, variance; epsilon=?, data_format=NHWC, is_training=true) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function fused_batch_norm_v2(x_, scale_, offset_, mean_, variance_; name=nothing, U=nothing, epsilon=nothing, data_format=nothing, is_training=nothing) + local desc + tf.with_op_name(name, "FusedBatchNormV2") do + desc = tf.NodeDescription("FusedBatchNormV2") + x_ = convert(Tensor{Any}, x_) + scale_ = convert(Tensor{Any}, scale_) + offset_ = convert(Tensor{Any}, offset_) + mean_ = convert(Tensor{Any}, mean_) + variance_ = convert(Tensor{Any}, variance_) + (scale_, offset_, mean_, variance_) = tf.tf_promote(scale_, offset_, mean_, variance_) + (x_,) = tf.tf_promote(x_) + tf.add_input(desc, x_) + tf.add_input(desc, scale_) + tf.add_input(desc, offset_) + tf.add_input(desc, mean_) + tf.add_input(desc, variance_) + if U !== nothing + desc["U"] = Base.identity(U) + end + if epsilon !== nothing + desc["epsilon"] = Base.identity(epsilon) + end + if data_format !== nothing + desc["data_format"] = Base.String(data_format) + end + if is_training !== nothing + desc["is_training"] = Base.Bool(is_training) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:5 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function fused_batch_norm_v2(x_::tf.TensorHandle, scale_::tf.TensorHandle, offset_::tf.TensorHandle, mean_::tf.TensorHandle, variance_::tf.TensorHandle; name=nothing, U=nothing, epsilon=nothing, data_format=nothing, is_training=nothing) + desc = tf.EagerOp("FusedBatchNormV2") + tf.add_input(desc, x_) + tf.add_input(desc, scale_) + tf.add_input(desc, offset_) + tf.add_input(desc, mean_) + tf.add_input(desc, variance_) + if U !== nothing + desc["U"] = Base.identity(U) + end + if epsilon !== nothing + desc["epsilon"] = Base.identity(epsilon) + end + if data_format !== nothing + desc["data_format"] = Base.String(data_format) + end + if is_training !== nothing + desc["is_training"] = Base.Bool(is_training) + end + desc["T"] = tf.data_type(x_) + desc["U"] = tf.data_type(scale_) + desc["U"] = tf.data_type(offset_) + desc["U"] = tf.data_type(mean_) + desc["U"] = tf.data_type(variance_) + tf.execute(desc) end +end + """ - scatter_sub(ref, indices, updates; use_locking=false) + tensor_array_split(handle, value, lengths, flow_in) """ -tf.@op function scatter_sub(ref_, indices_, updates_; name=nothing, use_locking=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("ScatterSub") - ref_ = convert(TensorFlow.Tensor{Any}, ref_) - indices_ = convert(TensorFlow.Tensor{Any}, indices_) - indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) - updates_ = convert(TensorFlow.Tensor{Any}, updates_) - (ref_, updates_) = tf.tf_promote(ref_, updates_) - (indices_,) = tf.tf_promote(indices_) - tf.add_input(desc, ref_) - tf.add_input(desc, indices_) - tf.add_input(desc, updates_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - end), name, "ScatterSub") - tf.Tensor(tf.Operation(desc)) +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tensor_array_split(handle_, value_, lengths_, flow_in_; name=nothing) + local desc + tf.with_op_name(name, "TensorArraySplit") do + desc = tf.NodeDescription("TensorArraySplit") + handle_ = convert(Tensor{String}, handle_) + value_ = convert(Tensor{Any}, value_) + lengths_ = convert(Tensor{Int64}, lengths_) + flow_in_ = convert(Tensor{Float32}, flow_in_) + (value_,) = tf.tf_promote(value_) + tf.add_input(desc, handle_) + tf.add_input(desc, value_) + tf.add_input(desc, lengths_) + tf.add_input(desc, flow_in_) + end + tf.Tensor(tf.Operation(desc)) + end + function tensor_array_split(handle_::tf.TensorHandle, value_::tf.TensorHandle, lengths_::tf.TensorHandle, flow_in_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("TensorArraySplit") + tf.add_input(desc, handle_) + tf.add_input(desc, value_) + tf.add_input(desc, lengths_) + tf.add_input(desc, flow_in_) + desc["T"] = tf.data_type(value_) + (tf.execute(desc))[1] end +end + """ - scatter_add(ref, indices, updates; use_locking=false) + ctc_loss(inputs, labels_indices, labels_values, sequence_length; preprocess_collapse_repeated=false, ctc_merge_repeated=true, ignore_longer_outputs_than_inputs=false) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function ctc_loss(inputs_, labels_indices_, labels_values_, sequence_length_; name=nothing, preprocess_collapse_repeated=nothing, ctc_merge_repeated=nothing, ignore_longer_outputs_than_inputs=nothing) + local desc + tf.with_op_name(name, "CTCLoss") do + desc = tf.NodeDescription("CTCLoss") + inputs_ = convert(Tensor{Float32}, inputs_) + labels_indices_ = convert(Tensor{Int64}, labels_indices_) + labels_values_ = convert(Tensor{Int32}, labels_values_) + sequence_length_ = convert(Tensor{Int32}, sequence_length_) + tf.add_input(desc, inputs_) + tf.add_input(desc, labels_indices_) + tf.add_input(desc, labels_values_) + tf.add_input(desc, sequence_length_) + if preprocess_collapse_repeated !== nothing + desc["preprocess_collapse_repeated"] = Base.Bool(preprocess_collapse_repeated) + end + if ctc_merge_repeated !== nothing + desc["ctc_merge_repeated"] = Base.Bool(ctc_merge_repeated) + end + if ignore_longer_outputs_than_inputs !== nothing + desc["ignore_longer_outputs_than_inputs"] = Base.Bool(ignore_longer_outputs_than_inputs) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function ctc_loss(inputs_::tf.TensorHandle, labels_indices_::tf.TensorHandle, labels_values_::tf.TensorHandle, sequence_length_::tf.TensorHandle; name=nothing, preprocess_collapse_repeated=nothing, ctc_merge_repeated=nothing, ignore_longer_outputs_than_inputs=nothing) + desc = tf.EagerOp("CTCLoss") + tf.add_input(desc, inputs_) + tf.add_input(desc, labels_indices_) + tf.add_input(desc, labels_values_) + tf.add_input(desc, sequence_length_) + if preprocess_collapse_repeated !== nothing + desc["preprocess_collapse_repeated"] = Base.Bool(preprocess_collapse_repeated) + end + if ctc_merge_repeated !== nothing + desc["ctc_merge_repeated"] = Base.Bool(ctc_merge_repeated) + end + if ignore_longer_outputs_than_inputs !== nothing + desc["ignore_longer_outputs_than_inputs"] = Base.Bool(ignore_longer_outputs_than_inputs) + end + tf.execute(desc) + end +end """ -tf.@op function scatter_add(ref_, indices_, updates_; name=nothing, use_locking=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("ScatterAdd") - ref_ = convert(TensorFlow.Tensor{Any}, ref_) - indices_ = convert(TensorFlow.Tensor{Any}, indices_) - indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) - updates_ = convert(TensorFlow.Tensor{Any}, updates_) - (ref_, updates_) = tf.tf_promote(ref_, updates_) - (indices_,) = tf.tf_promote(indices_) - tf.add_input(desc, ref_) - tf.add_input(desc, indices_) - tf.add_input(desc, updates_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - end), name, "ScatterAdd") - tf.Tensor(tf.Operation(desc)) + quantized_reshape(tensor, shape, input_min, input_max) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function quantized_reshape(tensor_, shape_, input_min_, input_max_; name=nothing) + local desc + tf.with_op_name(name, "QuantizedReshape") do + desc = tf.NodeDescription("QuantizedReshape") + tensor_ = convert(Tensor{Any}, tensor_) + shape_ = convert(Tensor{Int32}, shape_) + input_min_ = convert(Tensor{Float32}, input_min_) + input_max_ = convert(Tensor{Float32}, input_max_) + (tensor_,) = tf.tf_promote(tensor_) + (shape_,) = tf.tf_promote(shape_) + tf.add_input(desc, tensor_) + tf.add_input(desc, shape_) + tf.add_input(desc, input_min_) + tf.add_input(desc, input_max_) + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function quantized_reshape(tensor_::tf.TensorHandle, shape_::tf.TensorHandle, input_min_::tf.TensorHandle, input_max_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("QuantizedReshape") + tf.add_input(desc, tensor_) + tf.add_input(desc, shape_) + tf.add_input(desc, input_min_) + tf.add_input(desc, input_max_) + desc["T"] = tf.data_type(tensor_) + desc["Tshape"] = tf.data_type(shape_) + tf.execute(desc) end +end + """ - scatter_mul(ref, indices, updates; use_locking=false) + floor_div(x, y) """ -tf.@op function scatter_mul(ref_, indices_, updates_; name=nothing, use_locking=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("ScatterMul") - ref_ = convert(TensorFlow.Tensor{Any}, ref_) - indices_ = convert(TensorFlow.Tensor{Any}, indices_) - indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) - updates_ = convert(TensorFlow.Tensor{Any}, updates_) - (ref_, updates_) = tf.tf_promote(ref_, updates_) - (indices_,) = tf.tf_promote(indices_) - tf.add_input(desc, ref_) - tf.add_input(desc, indices_) - tf.add_input(desc, updates_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - end), name, "ScatterMul") - tf.Tensor(tf.Operation(desc)) +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function floor_div(x_, y_; name=nothing) + local desc + tf.with_op_name(name, "FloorDiv") do + desc = tf.NodeDescription("FloorDiv") + x_ = convert(Tensor{Any}, x_) + y_ = convert(Tensor{Any}, y_) + (x_, y_) = tf.tf_promote(x_, y_) + tf.add_input(desc, x_) + tf.add_input(desc, y_) + end + tf.Tensor(tf.Operation(desc)) + end + function floor_div(x_::tf.TensorHandle, y_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("FloorDiv") + tf.add_input(desc, x_) + tf.add_input(desc, y_) + desc["T"] = tf.data_type(x_) + desc["T"] = tf.data_type(y_) + (tf.execute(desc))[1] end +end + + +""" + tensor_array_v2(size; element_shape=?, dynamic_size=false, clear_after_read=true, tensor_array_name=) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tensor_array_v2(size_; name=nothing, dtype=nothing, element_shape=nothing, dynamic_size=nothing, clear_after_read=nothing, tensor_array_name=nothing) + local desc + tf.with_op_name(name, "TensorArrayV2") do + desc = tf.NodeDescription("TensorArrayV2") + size_ = convert(Tensor{Int32}, size_) + tf.add_input(desc, size_) + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + if element_shape !== nothing + desc["element_shape"] = Base.identity(element_shape) + end + if dynamic_size !== nothing + desc["dynamic_size"] = Base.Bool(dynamic_size) + end + if clear_after_read !== nothing + desc["clear_after_read"] = Base.Bool(clear_after_read) + end + if tensor_array_name !== nothing + desc["tensor_array_name"] = Base.String(tensor_array_name) + end + end + tf.Tensor(tf.Operation(desc)) + end + function tensor_array_v2(size_::tf.TensorHandle; name=nothing, dtype=nothing, element_shape=nothing, dynamic_size=nothing, clear_after_read=nothing, tensor_array_name=nothing) + desc = tf.EagerOp("TensorArrayV2") + tf.add_input(desc, size_) + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + if element_shape !== nothing + desc["element_shape"] = Base.identity(element_shape) + end + if dynamic_size !== nothing + desc["dynamic_size"] = Base.Bool(dynamic_size) + end + if clear_after_read !== nothing + desc["clear_after_read"] = Base.Bool(clear_after_read) + end + if tensor_array_name !== nothing + desc["tensor_array_name"] = Base.String(tensor_array_name) + end + (tf.execute(desc))[1] + end +end + + +""" + barrier_close(handle; cancel_pending_enqueues=false) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function barrier_close(handle_; name=nothing, cancel_pending_enqueues=nothing) + local desc + tf.with_op_name(name, "BarrierClose") do + desc = tf.NodeDescription("BarrierClose") + handle_ = convert(Tensor{String}, handle_) + tf.add_input(desc, handle_) + if cancel_pending_enqueues !== nothing + desc["cancel_pending_enqueues"] = Base.Bool(cancel_pending_enqueues) + end + end + tf.Tensor(tf.Operation(desc)) + end + function barrier_close(handle_::tf.TensorHandle; name=nothing, cancel_pending_enqueues=nothing) + desc = tf.EagerOp("BarrierClose") + tf.add_input(desc, handle_) + if cancel_pending_enqueues !== nothing + desc["cancel_pending_enqueues"] = Base.Bool(cancel_pending_enqueues) + end + (tf.execute(desc))[1] + end +end + + +""" + read_variable_op(resource) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function read_variable_op(resource_; name=nothing, dtype=nothing) + local desc + tf.with_op_name(name, "ReadVariableOp") do + desc = tf.NodeDescription("ReadVariableOp") + resource_ = convert(Tensor{Any}, resource_) + tf.add_input(desc, resource_) + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + tf.Tensor(tf.Operation(desc)) + end + function read_variable_op(resource_::tf.TensorHandle; name=nothing, dtype=nothing) + desc = tf.EagerOp("ReadVariableOp") + tf.add_input(desc, resource_) + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + (tf.execute(desc))[1] + end +end + + +""" + quantized_mul(x, y, min_x, max_x, min_y, max_y) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function quantized_mul(x_, y_, min_x_, max_x_, min_y_, max_y_; name=nothing) + local desc + tf.with_op_name(name, "QuantizedMul") do + desc = tf.NodeDescription("QuantizedMul") + x_ = convert(Tensor{Any}, x_) + y_ = convert(Tensor{Any}, y_) + min_x_ = convert(Tensor{Float32}, min_x_) + max_x_ = convert(Tensor{Float32}, max_x_) + min_y_ = convert(Tensor{Float32}, min_y_) + max_y_ = convert(Tensor{Float32}, max_y_) + (x_,) = tf.tf_promote(x_) + (y_,) = tf.tf_promote(y_) + tf.add_input(desc, x_) + tf.add_input(desc, y_) + tf.add_input(desc, min_x_) + tf.add_input(desc, max_x_) + tf.add_input(desc, min_y_) + tf.add_input(desc, max_y_) + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function quantized_mul(x_::tf.TensorHandle, y_::tf.TensorHandle, min_x_::tf.TensorHandle, max_x_::tf.TensorHandle, min_y_::tf.TensorHandle, max_y_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("QuantizedMul") + tf.add_input(desc, x_) + tf.add_input(desc, y_) + tf.add_input(desc, min_x_) + tf.add_input(desc, max_x_) + tf.add_input(desc, min_y_) + tf.add_input(desc, max_y_) + desc["T1"] = tf.data_type(x_) + desc["T2"] = tf.data_type(y_) + tf.execute(desc) + end +end + + +""" + selu(features) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function selu(features_; name=nothing) + local desc + tf.with_op_name(name, "Selu") do + desc = tf.NodeDescription("Selu") + features_ = convert(Tensor{Any}, features_) + (features_,) = tf.tf_promote(features_) + tf.add_input(desc, features_) + end + tf.Tensor(tf.Operation(desc)) + end + function selu(features_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("Selu") + tf.add_input(desc, features_) + desc["T"] = tf.data_type(features_) + (tf.execute(desc))[1] + end +end + + +""" + cudnn_rnn_backprop_v3(input, input_h, input_c, params, sequence_lengths, output, output_h, output_c, output_backprop, output_h_backprop, output_c_backprop, reserve_space, host_reserved; rnn_mode=lstm, input_mode=linear_input, direction=unidirectional, dropout=?, seed=0, seed2=0) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function cudnn_rnn_backprop_v3(input_, input_h_, input_c_, params_, sequence_lengths_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_, host_reserved_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) + local desc + tf.with_op_name(name, "CudnnRNNBackpropV3") do + desc = tf.NodeDescription("CudnnRNNBackpropV3") + input_ = convert(Tensor{Any}, input_) + input_h_ = convert(Tensor{Any}, input_h_) + input_c_ = convert(Tensor{Any}, input_c_) + params_ = convert(Tensor{Any}, params_) + sequence_lengths_ = convert(Tensor{Int32}, sequence_lengths_) + output_ = convert(Tensor{Any}, output_) + output_h_ = convert(Tensor{Any}, output_h_) + output_c_ = convert(Tensor{Any}, output_c_) + output_backprop_ = convert(Tensor{Any}, output_backprop_) + output_h_backprop_ = convert(Tensor{Any}, output_h_backprop_) + output_c_backprop_ = convert(Tensor{Any}, output_c_backprop_) + reserve_space_ = convert(Tensor{Any}, reserve_space_) + host_reserved_ = convert(Tensor{Any}, host_reserved_) + (input_, input_h_, input_c_, params_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_) = tf.tf_promote(input_, input_h_, input_c_, params_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_) + tf.add_input(desc, input_) + tf.add_input(desc, input_h_) + tf.add_input(desc, input_c_) + tf.add_input(desc, params_) + tf.add_input(desc, sequence_lengths_) + tf.add_input(desc, output_) + tf.add_input(desc, output_h_) + tf.add_input(desc, output_c_) + tf.add_input(desc, output_backprop_) + tf.add_input(desc, output_h_backprop_) + tf.add_input(desc, output_c_backprop_) + tf.add_input(desc, reserve_space_) + tf.add_input(desc, host_reserved_) + if rnn_mode !== nothing + desc["rnn_mode"] = Base.String(rnn_mode) + end + if input_mode !== nothing + desc["input_mode"] = Base.String(input_mode) + end + if direction !== nothing + desc["direction"] = Base.String(direction) + end + if dropout !== nothing + desc["dropout"] = Base.identity(dropout) + end + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:4 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function cudnn_rnn_backprop_v3(input_::tf.TensorHandle, input_h_::tf.TensorHandle, input_c_::tf.TensorHandle, params_::tf.TensorHandle, sequence_lengths_::tf.TensorHandle, output_::tf.TensorHandle, output_h_::tf.TensorHandle, output_c_::tf.TensorHandle, output_backprop_::tf.TensorHandle, output_h_backprop_::tf.TensorHandle, output_c_backprop_::tf.TensorHandle, reserve_space_::tf.TensorHandle, host_reserved_::tf.TensorHandle; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) + desc = tf.EagerOp("CudnnRNNBackpropV3") + tf.add_input(desc, input_) + tf.add_input(desc, input_h_) + tf.add_input(desc, input_c_) + tf.add_input(desc, params_) + tf.add_input(desc, sequence_lengths_) + tf.add_input(desc, output_) + tf.add_input(desc, output_h_) + tf.add_input(desc, output_c_) + tf.add_input(desc, output_backprop_) + tf.add_input(desc, output_h_backprop_) + tf.add_input(desc, output_c_backprop_) + tf.add_input(desc, reserve_space_) + tf.add_input(desc, host_reserved_) + if rnn_mode !== nothing + desc["rnn_mode"] = Base.String(rnn_mode) + end + if input_mode !== nothing + desc["input_mode"] = Base.String(input_mode) + end + if direction !== nothing + desc["direction"] = Base.String(direction) + end + if dropout !== nothing + desc["dropout"] = Base.identity(dropout) + end + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end + desc["T"] = tf.data_type(input_) + desc["T"] = tf.data_type(input_h_) + desc["T"] = tf.data_type(input_c_) + desc["T"] = tf.data_type(params_) + desc["T"] = tf.data_type(output_) + desc["T"] = tf.data_type(output_h_) + desc["T"] = tf.data_type(output_c_) + desc["T"] = tf.data_type(output_backprop_) + desc["T"] = tf.data_type(output_h_backprop_) + desc["T"] = tf.data_type(output_c_backprop_) + desc["T"] = tf.data_type(reserve_space_) + tf.execute(desc) + end +end + + +""" + lookup_table_insert(table_handle, keys, values) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function lookup_table_insert(table_handle_, keys_, values_; name=nothing) + local desc + tf.with_op_name(name, "LookupTableInsert") do + desc = tf.NodeDescription("LookupTableInsert") + table_handle_ = convert(Tensor{String}, table_handle_) + keys_ = convert(Tensor{Any}, keys_) + values_ = convert(Tensor{Any}, values_) + (keys_,) = tf.tf_promote(keys_) + (values_,) = tf.tf_promote(values_) + tf.add_input(desc, table_handle_) + tf.add_input(desc, keys_) + tf.add_input(desc, values_) + end + tf.Tensor(tf.Operation(desc)) + end + function lookup_table_insert(table_handle_::tf.TensorHandle, keys_::tf.TensorHandle, values_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("LookupTableInsert") + tf.add_input(desc, table_handle_) + tf.add_input(desc, keys_) + tf.add_input(desc, values_) + desc["Tin"] = tf.data_type(keys_) + desc["Tout"] = tf.data_type(values_) + (tf.execute(desc))[1] + end +end + + +""" + complex_abs(x) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function complex_abs(x_; name=nothing) + local desc + tf.with_op_name(name, "ComplexAbs") do + desc = tf.NodeDescription("ComplexAbs") + x_ = convert(Tensor{Complex{Float32}}, x_) + (x_,) = tf.tf_promote(x_) + tf.add_input(desc, x_) + end + tf.Tensor(tf.Operation(desc)) + end + function complex_abs(x_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("ComplexAbs") + tf.add_input(desc, x_) + desc["T"] = tf.data_type(x_) + (tf.execute(desc))[1] + end +end + + +""" + tridiagonal_solve(diagonals, rhs) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tridiagonal_solve(diagonals_, rhs_; name=nothing) + local desc + tf.with_op_name(name, "TridiagonalSolve") do + desc = tf.NodeDescription("TridiagonalSolve") + diagonals_ = convert(Tensor{Any}, diagonals_) + rhs_ = convert(Tensor{Any}, rhs_) + (diagonals_, rhs_) = tf.tf_promote(diagonals_, rhs_) + tf.add_input(desc, diagonals_) + tf.add_input(desc, rhs_) + end + tf.Tensor(tf.Operation(desc)) + end + function tridiagonal_solve(diagonals_::tf.TensorHandle, rhs_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("TridiagonalSolve") + tf.add_input(desc, diagonals_) + tf.add_input(desc, rhs_) + desc["T"] = tf.data_type(diagonals_) + desc["T"] = tf.data_type(rhs_) + (tf.execute(desc))[1] + end +end + + +""" + lookup_table_import(table_handle, keys, values) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function lookup_table_import(table_handle_, keys_, values_; name=nothing) + local desc + tf.with_op_name(name, "LookupTableImport") do + desc = tf.NodeDescription("LookupTableImport") + table_handle_ = convert(Tensor{String}, table_handle_) + keys_ = convert(Tensor{Any}, keys_) + values_ = convert(Tensor{Any}, values_) + (keys_,) = tf.tf_promote(keys_) + (values_,) = tf.tf_promote(values_) + tf.add_input(desc, table_handle_) + tf.add_input(desc, keys_) + tf.add_input(desc, values_) + end + tf.Tensor(tf.Operation(desc)) + end + function lookup_table_import(table_handle_::tf.TensorHandle, keys_::tf.TensorHandle, values_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("LookupTableImport") + tf.add_input(desc, table_handle_) + tf.add_input(desc, keys_) + tf.add_input(desc, values_) + desc["Tin"] = tf.data_type(keys_) + desc["Tout"] = tf.data_type(values_) + (tf.execute(desc))[1] + end +end + + +""" + abs(x) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function abs(x_; name=nothing) + local desc + tf.with_op_name(name, "Abs") do + desc = tf.NodeDescription("Abs") + x_ = convert(Tensor{Any}, x_) + (x_,) = tf.tf_promote(x_) + tf.add_input(desc, x_) + end + tf.Tensor(tf.Operation(desc)) + end + function abs(x_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("Abs") + tf.add_input(desc, x_) + desc["T"] = tf.data_type(x_) + (tf.execute(desc))[1] + end +end + + +""" + resource_apply_adam(var, m, v, beta1_power, beta2_power, lr, beta1, beta2, epsilon, grad; use_locking=false, use_nesterov=false) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function resource_apply_adam(var_, m_, v_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing, use_nesterov=nothing) + local desc + tf.with_op_name(name, "ResourceApplyAdam") do + desc = tf.NodeDescription("ResourceApplyAdam") + var_ = convert(Tensor{Any}, var_) + m_ = convert(Tensor{Any}, m_) + v_ = convert(Tensor{Any}, v_) + beta1_power_ = convert(Tensor{Any}, beta1_power_) + beta2_power_ = convert(Tensor{Any}, beta2_power_) + lr_ = convert(Tensor{Any}, lr_) + beta1_ = convert(Tensor{Any}, beta1_) + beta2_ = convert(Tensor{Any}, beta2_) + epsilon_ = convert(Tensor{Any}, epsilon_) + grad_ = convert(Tensor{Any}, grad_) + (beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_) = tf.tf_promote(beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_) + tf.add_input(desc, var_) + tf.add_input(desc, m_) + tf.add_input(desc, v_) + tf.add_input(desc, beta1_power_) + tf.add_input(desc, beta2_power_) + tf.add_input(desc, lr_) + tf.add_input(desc, beta1_) + tf.add_input(desc, beta2_) + tf.add_input(desc, epsilon_) + tf.add_input(desc, grad_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + if use_nesterov !== nothing + desc["use_nesterov"] = Base.Bool(use_nesterov) + end + end + tf.Tensor(tf.Operation(desc)) + end + function resource_apply_adam(var_::tf.TensorHandle, m_::tf.TensorHandle, v_::tf.TensorHandle, beta1_power_::tf.TensorHandle, beta2_power_::tf.TensorHandle, lr_::tf.TensorHandle, beta1_::tf.TensorHandle, beta2_::tf.TensorHandle, epsilon_::tf.TensorHandle, grad_::tf.TensorHandle; name=nothing, use_locking=nothing, use_nesterov=nothing) + desc = tf.EagerOp("ResourceApplyAdam") + tf.add_input(desc, var_) + tf.add_input(desc, m_) + tf.add_input(desc, v_) + tf.add_input(desc, beta1_power_) + tf.add_input(desc, beta2_power_) + tf.add_input(desc, lr_) + tf.add_input(desc, beta1_) + tf.add_input(desc, beta2_) + tf.add_input(desc, epsilon_) + tf.add_input(desc, grad_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + if use_nesterov !== nothing + desc["use_nesterov"] = Base.Bool(use_nesterov) + end + desc["T"] = tf.data_type(beta1_power_) + desc["T"] = tf.data_type(beta2_power_) + desc["T"] = tf.data_type(lr_) + desc["T"] = tf.data_type(beta1_) + desc["T"] = tf.data_type(beta2_) + desc["T"] = tf.data_type(epsilon_) + desc["T"] = tf.data_type(grad_) + (tf.execute(desc))[1] + end +end + + +""" + write_histogram_summary(writer, step, tag, values) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function write_histogram_summary(writer_, step_, tag_, values_; name=nothing) + local desc + tf.with_op_name(name, "WriteHistogramSummary") do + desc = tf.NodeDescription("WriteHistogramSummary") + writer_ = convert(Tensor{Any}, writer_) + step_ = convert(Tensor{Int64}, step_) + tag_ = convert(Tensor{String}, tag_) + values_ = convert(Tensor{Float32}, values_) + (values_,) = tf.tf_promote(values_) + tf.add_input(desc, writer_) + tf.add_input(desc, step_) + tf.add_input(desc, tag_) + tf.add_input(desc, values_) + end + tf.Tensor(tf.Operation(desc)) + end + function write_histogram_summary(writer_::tf.TensorHandle, step_::tf.TensorHandle, tag_::tf.TensorHandle, values_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("WriteHistogramSummary") + tf.add_input(desc, writer_) + tf.add_input(desc, step_) + tf.add_input(desc, tag_) + tf.add_input(desc, values_) + desc["T"] = tf.data_type(values_) + (tf.execute(desc))[1] + end +end + + +""" + experimental_indexed_dataset_materialize(dataset, materialized) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function experimental_indexed_dataset_materialize(dataset_, materialized_; name=nothing) + local desc + tf.with_op_name(name, "ExperimentalIndexedDatasetMaterialize") do + desc = tf.NodeDescription("ExperimentalIndexedDatasetMaterialize") + dataset_ = convert(Tensor{Any}, dataset_) + materialized_ = convert(Tensor{Any}, materialized_) + tf.add_input(desc, dataset_) + tf.add_input(desc, materialized_) + end + tf.Tensor(tf.Operation(desc)) + end + function experimental_indexed_dataset_materialize(dataset_::tf.TensorHandle, materialized_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("ExperimentalIndexedDatasetMaterialize") + tf.add_input(desc, dataset_) + tf.add_input(desc, materialized_) + (tf.execute(desc))[1] + end +end + + +""" + _host_send(tensor; client_terminated=false) + +Sends the named tensor from send_device to recv_device. +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function _host_send(tensor_; name=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing) + local desc + tf.with_op_name(name, "_HostSend") do + desc = tf.NodeDescription("_HostSend") + tensor_ = convert(Tensor{Any}, tensor_) + (tensor_,) = tf.tf_promote(tensor_) + tf.add_input(desc, tensor_) + if tensor_name !== nothing + desc["tensor_name"] = Base.String(tensor_name) + end + if send_device !== nothing + desc["send_device"] = Base.String(send_device) + end + if send_device_incarnation !== nothing + desc["send_device_incarnation"] = Base.Int(send_device_incarnation) + end + if recv_device !== nothing + desc["recv_device"] = Base.String(recv_device) + end + if client_terminated !== nothing + desc["client_terminated"] = Base.Bool(client_terminated) + end + end + tf.Tensor(tf.Operation(desc)) + end + function _host_send(tensor_::tf.TensorHandle; name=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing) + desc = tf.EagerOp("_HostSend") + tf.add_input(desc, tensor_) + if tensor_name !== nothing + desc["tensor_name"] = Base.String(tensor_name) + end + if send_device !== nothing + desc["send_device"] = Base.String(send_device) + end + if send_device_incarnation !== nothing + desc["send_device_incarnation"] = Base.Int(send_device_incarnation) + end + if recv_device !== nothing + desc["recv_device"] = Base.String(recv_device) + end + if client_terminated !== nothing + desc["client_terminated"] = Base.Bool(client_terminated) + end + desc["T"] = tf.data_type(tensor_) + (tf.execute(desc))[1] + end +end + + +""" + greater(x, y) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function greater(x_, y_; name=nothing) + local desc + tf.with_op_name(name, "Greater") do + desc = tf.NodeDescription("Greater") + x_ = convert(Tensor{Any}, x_) + y_ = convert(Tensor{Any}, y_) + (x_, y_) = tf.tf_promote(x_, y_) + tf.add_input(desc, x_) + tf.add_input(desc, y_) + end + tf.Tensor(tf.Operation(desc)) + end + function greater(x_::tf.TensorHandle, y_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("Greater") + tf.add_input(desc, x_) + tf.add_input(desc, y_) + desc["T"] = tf.data_type(x_) + desc["T"] = tf.data_type(y_) + (tf.execute(desc))[1] + end +end + + +""" + nccl_broadcast(input) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function nccl_broadcast(input_; name=nothing, shape=nothing) + local desc + tf.with_op_name(name, "NcclBroadcast") do + desc = tf.NodeDescription("NcclBroadcast") + input_ = convert(Tensor{Any}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + if shape !== nothing + desc["shape"] = Base.identity(shape) + end + end + tf.Tensor(tf.Operation(desc)) + end + function nccl_broadcast(input_::tf.TensorHandle; name=nothing, shape=nothing) + desc = tf.EagerOp("NcclBroadcast") + tf.add_input(desc, input_) + if shape !== nothing + desc["shape"] = Base.identity(shape) + end + desc["T"] = tf.data_type(input_) + (tf.execute(desc))[1] + end +end + + +""" + tensor_list_push_back_batch(input_handles, tensor) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tensor_list_push_back_batch(input_handles_, tensor_; name=nothing, element_dtype=nothing) + local desc + tf.with_op_name(name, "TensorListPushBackBatch") do + desc = tf.NodeDescription("TensorListPushBackBatch") + input_handles_ = convert(Tensor{Any}, input_handles_) + tensor_ = convert(Tensor{Any}, tensor_) + (tensor_,) = tf.tf_promote(tensor_) + tf.add_input(desc, input_handles_) + tf.add_input(desc, tensor_) + if element_dtype !== nothing + desc["element_dtype"] = Base.identity(element_dtype) + end + end + tf.Tensor(tf.Operation(desc)) + end + function tensor_list_push_back_batch(input_handles_::tf.TensorHandle, tensor_::tf.TensorHandle; name=nothing, element_dtype=nothing) + desc = tf.EagerOp("TensorListPushBackBatch") + tf.add_input(desc, input_handles_) + tf.add_input(desc, tensor_) + if element_dtype !== nothing + desc["element_dtype"] = Base.identity(element_dtype) + end + desc["element_dtype"] = tf.data_type(tensor_) + (tf.execute(desc))[1] + end +end + + +""" + resource_scatter_min(resource, indices, updates) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function resource_scatter_min(resource_, indices_, updates_; name=nothing, dtype=nothing) + local desc + tf.with_op_name(name, "ResourceScatterMin") do + desc = tf.NodeDescription("ResourceScatterMin") + resource_ = convert(Tensor{Any}, resource_) + indices_ = convert(Tensor{Any}, indices_) + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + updates_ = convert(Tensor{Any}, updates_) + (updates_,) = tf.tf_promote(updates_) + (indices_,) = tf.tf_promote(indices_) + tf.add_input(desc, resource_) + tf.add_input(desc, indices_) + tf.add_input(desc, updates_) + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + tf.Tensor(tf.Operation(desc)) + end + function resource_scatter_min(resource_::tf.TensorHandle, indices_::tf.TensorHandle, updates_::tf.TensorHandle; name=nothing, dtype=nothing) + desc = tf.EagerOp("ResourceScatterMin") + tf.add_input(desc, resource_) + tf.add_input(desc, indices_) + tf.add_input(desc, updates_) + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + desc["Tindices"] = tf.data_type(indices_) + desc["dtype"] = tf.data_type(updates_) + (tf.execute(desc))[1] + end +end + + +""" + slice(input, begin, size) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function slice(input_, begin_, size_; name=nothing, Index=nothing) + local desc + tf.with_op_name(name, "Slice") do + desc = tf.NodeDescription("Slice") + input_ = convert(Tensor{Any}, input_) + begin_ = convert(Tensor{Any}, begin_) + begin_ = begin_ - convert(tf.Tensor{eltype(begin_)}, 1) + size_ = convert(Tensor{Any}, size_) + (input_,) = tf.tf_promote(input_) + (begin_, size_) = tf.tf_promote(begin_, size_) + tf.add_input(desc, input_) + tf.add_input(desc, begin_) + tf.add_input(desc, size_) + if Index !== nothing + desc["Index"] = Base.identity(Index) + end + end + tf.Tensor(tf.Operation(desc)) + end + function slice(input_::tf.TensorHandle, begin_::tf.TensorHandle, size_::tf.TensorHandle; name=nothing, Index=nothing) + desc = tf.EagerOp("Slice") + tf.add_input(desc, input_) + tf.add_input(desc, begin_) + tf.add_input(desc, size_) + if Index !== nothing + desc["Index"] = Base.identity(Index) + end + desc["T"] = tf.data_type(input_) + desc["Index"] = tf.data_type(begin_) + desc["Index"] = tf.data_type(size_) + (tf.execute(desc))[1] + end +end + + +""" + unicode_decode(input; errors=replace, replacement_char=65533, replace_control_characters=false) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function unicode_decode(input_; name=nothing, input_encoding=nothing, errors=nothing, replacement_char=nothing, replace_control_characters=nothing) + local desc + tf.with_op_name(name, "UnicodeDecode") do + desc = tf.NodeDescription("UnicodeDecode") + input_ = convert(Tensor{String}, input_) + tf.add_input(desc, input_) + if input_encoding !== nothing + desc["input_encoding"] = Base.String(input_encoding) + end + if errors !== nothing + desc["errors"] = Base.String(errors) + end + if replacement_char !== nothing + desc["replacement_char"] = Base.Int(replacement_char) + end + if replace_control_characters !== nothing + desc["replace_control_characters"] = Base.Bool(replace_control_characters) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function unicode_decode(input_::tf.TensorHandle; name=nothing, input_encoding=nothing, errors=nothing, replacement_char=nothing, replace_control_characters=nothing) + desc = tf.EagerOp("UnicodeDecode") + tf.add_input(desc, input_) + if input_encoding !== nothing + desc["input_encoding"] = Base.String(input_encoding) + end + if errors !== nothing + desc["errors"] = Base.String(errors) + end + if replacement_char !== nothing + desc["replacement_char"] = Base.Int(replacement_char) + end + if replace_control_characters !== nothing + desc["replace_control_characters"] = Base.Bool(replace_control_characters) + end + tf.execute(desc) + end +end + + +""" + take_dataset(input_dataset, count) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function take_dataset(input_dataset_, count_; name=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "TakeDataset") do + desc = tf.NodeDescription("TakeDataset") + input_dataset_ = convert(Tensor{Any}, input_dataset_) + count_ = convert(Tensor{Int64}, count_) + tf.add_input(desc, input_dataset_) + tf.add_input(desc, count_) + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + tf.Tensor(tf.Operation(desc)) + end + function take_dataset(input_dataset_::tf.TensorHandle, count_::tf.TensorHandle; name=nothing, output_types=nothing, output_shapes=nothing) + desc = tf.EagerOp("TakeDataset") + tf.add_input(desc, input_dataset_) + tf.add_input(desc, count_) + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + (tf.execute(desc))[1] + end +end + + +""" + boosted_trees_make_stats_summary(node_ids, gradients, hessians, bucketized_features_list) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function boosted_trees_make_stats_summary(node_ids_, gradients_, hessians_, bucketized_features_list_; name=nothing, max_splits=nothing, num_buckets=nothing, num_features=nothing) + local desc + tf.with_op_name(name, "BoostedTreesMakeStatsSummary") do + desc = tf.NodeDescription("BoostedTreesMakeStatsSummary") + node_ids_ = convert(Tensor{Int32}, node_ids_) + gradients_ = convert(Tensor{Float32}, gradients_) + hessians_ = convert(Tensor{Float32}, hessians_) + bucketized_features_list_ = [convert(Tensor{Int32}, x) for x = bucketized_features_list_] + tf.add_input(desc, node_ids_) + tf.add_input(desc, gradients_) + tf.add_input(desc, hessians_) + tf.add_input(desc, bucketized_features_list_) + if max_splits !== nothing + desc["max_splits"] = Base.Int(max_splits) + end + if num_buckets !== nothing + desc["num_buckets"] = Base.Int(num_buckets) + end + if num_features !== nothing + desc["num_features"] = Base.Int(num_features) + end + end + tf.Tensor(tf.Operation(desc)) + end + function boosted_trees_make_stats_summary(node_ids_::tf.TensorHandle, gradients_::tf.TensorHandle, hessians_::tf.TensorHandle, bucketized_features_list_::tf.TensorHandle; name=nothing, max_splits=nothing, num_buckets=nothing, num_features=nothing) + desc = tf.EagerOp("BoostedTreesMakeStatsSummary") + tf.add_input(desc, node_ids_) + tf.add_input(desc, gradients_) + tf.add_input(desc, hessians_) + tf.add_input(desc, bucketized_features_list_) + if max_splits !== nothing + desc["max_splits"] = Base.Int(max_splits) + end + if num_buckets !== nothing + desc["num_buckets"] = Base.Int(num_buckets) + end + if num_features !== nothing + desc["num_features"] = Base.Int(num_features) + end + (tf.execute(desc))[1] + end +end + + +""" + all_candidate_sampler(true_classes; seed=0, seed2=0) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function all_candidate_sampler(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, seed=nothing, seed2=nothing) + local desc + tf.with_op_name(name, "AllCandidateSampler") do + desc = tf.NodeDescription("AllCandidateSampler") + true_classes_ = convert(Tensor{Int64}, true_classes_) + tf.add_input(desc, true_classes_) + if num_true !== nothing + desc["num_true"] = Base.Int(num_true) + end + if num_sampled !== nothing + desc["num_sampled"] = Base.Int(num_sampled) + end + if unique !== nothing + desc["unique"] = Base.Bool(unique) + end + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function all_candidate_sampler(true_classes_::tf.TensorHandle; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, seed=nothing, seed2=nothing) + desc = tf.EagerOp("AllCandidateSampler") + tf.add_input(desc, true_classes_) + if num_true !== nothing + desc["num_true"] = Base.Int(num_true) + end + if num_sampled !== nothing + desc["num_sampled"] = Base.Int(num_sampled) + end + if unique !== nothing + desc["unique"] = Base.Bool(unique) + end + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end + tf.execute(desc) + end +end + + +""" + conv2d_backprop_input(input_sizes, filter, out_backprop; use_cudnn_on_gpu=true, explicit_paddings=Int64[], data_format=NHWC, dilations=[1, 1, 1, 1]) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function conv2d_backprop_input(input_sizes_, filter_, out_backprop_; name=nothing, strides=nothing, use_cudnn_on_gpu=nothing, padding=nothing, explicit_paddings=nothing, data_format=nothing, dilations=nothing) + local desc + tf.with_op_name(name, "Conv2DBackpropInput") do + desc = tf.NodeDescription("Conv2DBackpropInput") + input_sizes_ = convert(Tensor{Int32}, input_sizes_) + filter_ = convert(Tensor{Any}, filter_) + out_backprop_ = convert(Tensor{Any}, out_backprop_) + (filter_, out_backprop_) = tf.tf_promote(filter_, out_backprop_) + tf.add_input(desc, input_sizes_) + tf.add_input(desc, filter_) + tf.add_input(desc, out_backprop_) + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + if use_cudnn_on_gpu !== nothing + desc["use_cudnn_on_gpu"] = Base.Bool(use_cudnn_on_gpu) + end + if padding !== nothing + desc["padding"] = Base.String(padding) + end + if explicit_paddings !== nothing + desc["explicit_paddings"] = map(Base.identity, explicit_paddings) + end + if data_format !== nothing + desc["data_format"] = Base.String(data_format) + end + if dilations !== nothing + desc["dilations"] = map(Base.identity, dilations) + end + end + tf.Tensor(tf.Operation(desc)) + end + function conv2d_backprop_input(input_sizes_::tf.TensorHandle, filter_::tf.TensorHandle, out_backprop_::tf.TensorHandle; name=nothing, strides=nothing, use_cudnn_on_gpu=nothing, padding=nothing, explicit_paddings=nothing, data_format=nothing, dilations=nothing) + desc = tf.EagerOp("Conv2DBackpropInput") + tf.add_input(desc, input_sizes_) + tf.add_input(desc, filter_) + tf.add_input(desc, out_backprop_) + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + if use_cudnn_on_gpu !== nothing + desc["use_cudnn_on_gpu"] = Base.Bool(use_cudnn_on_gpu) + end + if padding !== nothing + desc["padding"] = Base.String(padding) + end + if explicit_paddings !== nothing + desc["explicit_paddings"] = map(Base.identity, explicit_paddings) + end + if data_format !== nothing + desc["data_format"] = Base.String(data_format) + end + if dilations !== nothing + desc["dilations"] = map(Base.identity, dilations) + end + desc["T"] = tf.data_type(filter_) + desc["T"] = tf.data_type(out_backprop_) + (tf.execute(desc))[1] + end +end + + +""" + dataset_to_single_element(dataset) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function dataset_to_single_element(dataset_; name=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "DatasetToSingleElement") do + desc = tf.NodeDescription("DatasetToSingleElement") + dataset_ = convert(Tensor{Any}, dataset_) + tf.add_input(desc, dataset_) + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + tf.Tensor(tf.Operation(desc)) + end + function dataset_to_single_element(dataset_::tf.TensorHandle; name=nothing, output_types=nothing, output_shapes=nothing) + desc = tf.EagerOp("DatasetToSingleElement") + tf.add_input(desc, dataset_) + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + (tf.execute(desc))[1] + end +end + + +""" + cache_dataset(input_dataset, filename) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function cache_dataset(input_dataset_, filename_; name=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "CacheDataset") do + desc = tf.NodeDescription("CacheDataset") + input_dataset_ = convert(Tensor{Any}, input_dataset_) + filename_ = convert(Tensor{String}, filename_) + tf.add_input(desc, input_dataset_) + tf.add_input(desc, filename_) + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + tf.Tensor(tf.Operation(desc)) + end + function cache_dataset(input_dataset_::tf.TensorHandle, filename_::tf.TensorHandle; name=nothing, output_types=nothing, output_shapes=nothing) + desc = tf.EagerOp("CacheDataset") + tf.add_input(desc, input_dataset_) + tf.add_input(desc, filename_) + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + (tf.execute(desc))[1] + end +end + + +""" + fake_quant_with_min_max_vars_gradient(gradients, inputs, min, max; num_bits=8, narrow_range=false) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function fake_quant_with_min_max_vars_gradient(gradients_, inputs_, min_, max_; name=nothing, num_bits=nothing, narrow_range=nothing) + local desc + tf.with_op_name(name, "FakeQuantWithMinMaxVarsGradient") do + desc = tf.NodeDescription("FakeQuantWithMinMaxVarsGradient") + gradients_ = convert(Tensor{Float32}, gradients_) + inputs_ = convert(Tensor{Float32}, inputs_) + min_ = convert(Tensor{Float32}, min_) + max_ = convert(Tensor{Float32}, max_) + tf.add_input(desc, gradients_) + tf.add_input(desc, inputs_) + tf.add_input(desc, min_) + tf.add_input(desc, max_) + if num_bits !== nothing + desc["num_bits"] = Base.Int(num_bits) + end + if narrow_range !== nothing + desc["narrow_range"] = Base.Bool(narrow_range) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function fake_quant_with_min_max_vars_gradient(gradients_::tf.TensorHandle, inputs_::tf.TensorHandle, min_::tf.TensorHandle, max_::tf.TensorHandle; name=nothing, num_bits=nothing, narrow_range=nothing) + desc = tf.EagerOp("FakeQuantWithMinMaxVarsGradient") + tf.add_input(desc, gradients_) + tf.add_input(desc, inputs_) + tf.add_input(desc, min_) + tf.add_input(desc, max_) + if num_bits !== nothing + desc["num_bits"] = Base.Int(num_bits) + end + if narrow_range !== nothing + desc["narrow_range"] = Base.Bool(narrow_range) + end + tf.execute(desc) + end +end + + +""" + fused_resize_and_pad_conv2d(input, size, paddings, filter; resize_align_corners=false) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function fused_resize_and_pad_conv2d(input_, size_, paddings_, filter_; name=nothing, resize_align_corners=nothing, mode=nothing, strides=nothing, padding=nothing) + local desc + tf.with_op_name(name, "FusedResizeAndPadConv2D") do + desc = tf.NodeDescription("FusedResizeAndPadConv2D") + input_ = convert(Tensor{Any}, input_) + size_ = convert(Tensor{Int32}, size_) + paddings_ = convert(Tensor{Int32}, paddings_) + filter_ = convert(Tensor{Any}, filter_) + (input_, filter_) = tf.tf_promote(input_, filter_) + tf.add_input(desc, input_) + tf.add_input(desc, size_) + tf.add_input(desc, paddings_) + tf.add_input(desc, filter_) + if resize_align_corners !== nothing + desc["resize_align_corners"] = Base.Bool(resize_align_corners) + end + if mode !== nothing + desc["mode"] = Base.String(mode) + end + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + if padding !== nothing + desc["padding"] = Base.String(padding) + end + end + tf.Tensor(tf.Operation(desc)) + end + function fused_resize_and_pad_conv2d(input_::tf.TensorHandle, size_::tf.TensorHandle, paddings_::tf.TensorHandle, filter_::tf.TensorHandle; name=nothing, resize_align_corners=nothing, mode=nothing, strides=nothing, padding=nothing) + desc = tf.EagerOp("FusedResizeAndPadConv2D") + tf.add_input(desc, input_) + tf.add_input(desc, size_) + tf.add_input(desc, paddings_) + tf.add_input(desc, filter_) + if resize_align_corners !== nothing + desc["resize_align_corners"] = Base.Bool(resize_align_corners) + end + if mode !== nothing + desc["mode"] = Base.String(mode) + end + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + if padding !== nothing + desc["padding"] = Base.String(padding) + end + desc["T"] = tf.data_type(input_) + desc["T"] = tf.data_type(filter_) + (tf.execute(desc))[1] + end +end + + +""" + batch(in_tensors; max_enqueued_batches=10, allowed_batch_sizes=Int64[], container=, shared_name=, batching_queue=) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function batch(in_tensors_; name=nothing, num_batch_threads=nothing, max_batch_size=nothing, max_enqueued_batches=nothing, batch_timeout_micros=nothing, allowed_batch_sizes=nothing, grad_timeout_micros=nothing, container=nothing, shared_name=nothing, batching_queue=nothing, T=nothing) + local desc + tf.with_op_name(name, "Batch") do + desc = tf.NodeDescription("Batch") + in_tensors_ = [convert(Tensor{Any}, x) for x = in_tensors_] + tf.add_input(desc, in_tensors_) + if num_batch_threads !== nothing + desc["num_batch_threads"] = Base.Int(num_batch_threads) + end + if max_batch_size !== nothing + desc["max_batch_size"] = Base.Int(max_batch_size) + end + if max_enqueued_batches !== nothing + desc["max_enqueued_batches"] = Base.Int(max_enqueued_batches) + end + if batch_timeout_micros !== nothing + desc["batch_timeout_micros"] = Base.Int(batch_timeout_micros) + end + if allowed_batch_sizes !== nothing + desc["allowed_batch_sizes"] = map(Base.identity, allowed_batch_sizes) + end + if grad_timeout_micros !== nothing + desc["grad_timeout_micros"] = Base.Int(grad_timeout_micros) + end + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + if batching_queue !== nothing + desc["batching_queue"] = Base.String(batching_queue) + end + if T !== nothing + desc["T"] = map(Base.identity, T) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function batch(in_tensors_::tf.TensorHandle; name=nothing, num_batch_threads=nothing, max_batch_size=nothing, max_enqueued_batches=nothing, batch_timeout_micros=nothing, allowed_batch_sizes=nothing, grad_timeout_micros=nothing, container=nothing, shared_name=nothing, batching_queue=nothing, T=nothing) + desc = tf.EagerOp("Batch") + tf.add_input(desc, in_tensors_) + if num_batch_threads !== nothing + desc["num_batch_threads"] = Base.Int(num_batch_threads) + end + if max_batch_size !== nothing + desc["max_batch_size"] = Base.Int(max_batch_size) + end + if max_enqueued_batches !== nothing + desc["max_enqueued_batches"] = Base.Int(max_enqueued_batches) + end + if batch_timeout_micros !== nothing + desc["batch_timeout_micros"] = Base.Int(batch_timeout_micros) + end + if allowed_batch_sizes !== nothing + desc["allowed_batch_sizes"] = map(Base.identity, allowed_batch_sizes) + end + if grad_timeout_micros !== nothing + desc["grad_timeout_micros"] = Base.Int(grad_timeout_micros) + end + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + if batching_queue !== nothing + desc["batching_queue"] = Base.String(batching_queue) + end + if T !== nothing + desc["T"] = map(Base.identity, T) + end + tf.execute(desc) + end +end + + +""" + collective_bcast_recv() + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function collective_bcast_recv(; name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, shape=nothing) + local desc + tf.with_op_name(name, "CollectiveBcastRecv") do + desc = tf.NodeDescription("CollectiveBcastRecv") + if group_size !== nothing + desc["group_size"] = Base.Int(group_size) + end + if group_key !== nothing + desc["group_key"] = Base.Int(group_key) + end + if instance_key !== nothing + desc["instance_key"] = Base.Int(instance_key) + end + if shape !== nothing + desc["shape"] = Base.identity(shape) + end + end + tf.Tensor(tf.Operation(desc)) + end + function collective_bcast_recv(; name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, shape=nothing) + desc = tf.EagerOp("CollectiveBcastRecv") + if group_size !== nothing + desc["group_size"] = Base.Int(group_size) + end + if group_key !== nothing + desc["group_key"] = Base.Int(group_key) + end + if instance_key !== nothing + desc["instance_key"] = Base.Int(instance_key) + end + if shape !== nothing + desc["shape"] = Base.identity(shape) + end + (tf.execute(desc))[1] + end +end + + +""" + batch_to_space_nd(input, block_shape, crops) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function batch_to_space_nd(input_, block_shape_, crops_; name=nothing) + local desc + tf.with_op_name(name, "BatchToSpaceND") do + desc = tf.NodeDescription("BatchToSpaceND") + input_ = convert(Tensor{Any}, input_) + block_shape_ = convert(Tensor{Int32}, block_shape_) + crops_ = convert(Tensor{Int32}, crops_) + (crops_,) = tf.tf_promote(crops_) + (input_,) = tf.tf_promote(input_) + (block_shape_,) = tf.tf_promote(block_shape_) + tf.add_input(desc, input_) + tf.add_input(desc, block_shape_) + tf.add_input(desc, crops_) + end + tf.Tensor(tf.Operation(desc)) + end + function batch_to_space_nd(input_::tf.TensorHandle, block_shape_::tf.TensorHandle, crops_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("BatchToSpaceND") + tf.add_input(desc, input_) + tf.add_input(desc, block_shape_) + tf.add_input(desc, crops_) + desc["T"] = tf.data_type(input_) + desc["Tblock_shape"] = tf.data_type(block_shape_) + desc["Tcrops"] = tf.data_type(crops_) + (tf.execute(desc))[1] + end +end + + +""" + loop_cond(input) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function loop_cond(input_; name=nothing) + local desc + tf.with_op_name(name, "LoopCond") do + desc = tf.NodeDescription("LoopCond") + input_ = convert(Tensor{Bool}, input_) + tf.add_input(desc, input_) + end + tf.Tensor(tf.Operation(desc)) + end + function loop_cond(input_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("LoopCond") + tf.add_input(desc, input_) + (tf.execute(desc))[1] + end +end + + +""" + depth_to_space(input; data_format=NHWC) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function depth_to_space(input_; name=nothing, block_size=nothing, data_format=nothing) + local desc + tf.with_op_name(name, "DepthToSpace") do + desc = tf.NodeDescription("DepthToSpace") + input_ = convert(Tensor{Any}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + if block_size !== nothing + desc["block_size"] = Base.Int(block_size) + end + if data_format !== nothing + desc["data_format"] = Base.String(data_format) + end + end + tf.Tensor(tf.Operation(desc)) + end + function depth_to_space(input_::tf.TensorHandle; name=nothing, block_size=nothing, data_format=nothing) + desc = tf.EagerOp("DepthToSpace") + tf.add_input(desc, input_) + if block_size !== nothing + desc["block_size"] = Base.Int(block_size) + end + if data_format !== nothing + desc["data_format"] = Base.String(data_format) + end + desc["T"] = tf.data_type(input_) + (tf.execute(desc))[1] + end +end + + +""" + destroy_temporary_variable(ref) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function destroy_temporary_variable(ref_; name=nothing, var_name=nothing) + local desc + tf.with_op_name(name, "DestroyTemporaryVariable") do + desc = tf.NodeDescription("DestroyTemporaryVariable") + ref_ = convert(Tensor{Any}, ref_) + (ref_,) = tf.tf_promote(ref_) + tf.add_input(desc, ref_) + if var_name !== nothing + desc["var_name"] = Base.String(var_name) + end + end + tf.Tensor(tf.Operation(desc)) + end + function destroy_temporary_variable(ref_::tf.TensorHandle; name=nothing, var_name=nothing) + desc = tf.EagerOp("DestroyTemporaryVariable") + tf.add_input(desc, ref_) + if var_name !== nothing + desc["var_name"] = Base.String(var_name) + end + desc["T"] = tf.data_type(ref_) + (tf.execute(desc))[1] + end +end + + +""" + cudnn_rnn(input, input_h, input_c, params; rnn_mode=lstm, input_mode=linear_input, direction=unidirectional, dropout=?, seed=0, seed2=0, is_training=true) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function cudnn_rnn(input_, input_h_, input_c_, params_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing, is_training=nothing) + local desc + tf.with_op_name(name, "CudnnRNN") do + desc = tf.NodeDescription("CudnnRNN") + input_ = convert(Tensor{Any}, input_) + input_h_ = convert(Tensor{Any}, input_h_) + input_c_ = convert(Tensor{Any}, input_c_) + params_ = convert(Tensor{Any}, params_) + (input_, input_h_, input_c_, params_) = tf.tf_promote(input_, input_h_, input_c_, params_) + tf.add_input(desc, input_) + tf.add_input(desc, input_h_) + tf.add_input(desc, input_c_) + tf.add_input(desc, params_) + if rnn_mode !== nothing + desc["rnn_mode"] = Base.String(rnn_mode) + end + if input_mode !== nothing + desc["input_mode"] = Base.String(input_mode) + end + if direction !== nothing + desc["direction"] = Base.String(direction) + end + if dropout !== nothing + desc["dropout"] = Base.identity(dropout) + end + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end + if is_training !== nothing + desc["is_training"] = Base.Bool(is_training) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:4 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function cudnn_rnn(input_::tf.TensorHandle, input_h_::tf.TensorHandle, input_c_::tf.TensorHandle, params_::tf.TensorHandle; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing, is_training=nothing) + desc = tf.EagerOp("CudnnRNN") + tf.add_input(desc, input_) + tf.add_input(desc, input_h_) + tf.add_input(desc, input_c_) + tf.add_input(desc, params_) + if rnn_mode !== nothing + desc["rnn_mode"] = Base.String(rnn_mode) + end + if input_mode !== nothing + desc["input_mode"] = Base.String(input_mode) + end + if direction !== nothing + desc["direction"] = Base.String(direction) + end + if dropout !== nothing + desc["dropout"] = Base.identity(dropout) + end + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end + if is_training !== nothing + desc["is_training"] = Base.Bool(is_training) + end + desc["T"] = tf.data_type(input_) + desc["T"] = tf.data_type(input_h_) + desc["T"] = tf.data_type(input_c_) + desc["T"] = tf.data_type(params_) + tf.execute(desc) + end +end + + +""" + ref_identity(input) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function ref_identity(input_; name=nothing) + local desc + tf.with_op_name(name, "RefIdentity") do + desc = tf.NodeDescription("RefIdentity") + input_ = convert(Tensor{Any}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + end + tf.Tensor(tf.Operation(desc)) + end + function ref_identity(input_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("RefIdentity") + tf.add_input(desc, input_) + desc["T"] = tf.data_type(input_) + (tf.execute(desc))[1] + end +end + + +""" + max_pool3d_grad(orig_input, orig_output, grad; data_format=NDHWC) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function max_pool3d_grad(orig_input_, orig_output_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + local desc + tf.with_op_name(name, "MaxPool3DGrad") do + desc = tf.NodeDescription("MaxPool3DGrad") + orig_input_ = convert(Tensor{Float32}, orig_input_) + orig_output_ = convert(Tensor{Float32}, orig_output_) + grad_ = convert(Tensor{Float32}, grad_) + (grad_,) = tf.tf_promote(grad_) + (orig_input_, orig_output_) = tf.tf_promote(orig_input_, orig_output_) + tf.add_input(desc, orig_input_) + tf.add_input(desc, orig_output_) + tf.add_input(desc, grad_) + if ksize !== nothing + desc["ksize"] = map(Base.identity, ksize) + end + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + if padding !== nothing + desc["padding"] = Base.String(padding) + end + if data_format !== nothing + desc["data_format"] = Base.String(data_format) + end + end + tf.Tensor(tf.Operation(desc)) + end + function max_pool3d_grad(orig_input_::tf.TensorHandle, orig_output_::tf.TensorHandle, grad_::tf.TensorHandle; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + desc = tf.EagerOp("MaxPool3DGrad") + tf.add_input(desc, orig_input_) + tf.add_input(desc, orig_output_) + tf.add_input(desc, grad_) + if ksize !== nothing + desc["ksize"] = map(Base.identity, ksize) + end + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + if padding !== nothing + desc["padding"] = Base.String(padding) + end + if data_format !== nothing + desc["data_format"] = Base.String(data_format) + end + desc["TInput"] = tf.data_type(orig_input_) + desc["TInput"] = tf.data_type(orig_output_) + desc["T"] = tf.data_type(grad_) + (tf.execute(desc))[1] + end +end + + +""" + load_tpu_embedding_momentum_parameters_grad_accum_debug(parameters, momenta, gradient_accumulators; table_id=-1, table_name=) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function load_tpu_embedding_momentum_parameters_grad_accum_debug(parameters_, momenta_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + local desc + tf.with_op_name(name, "LoadTPUEmbeddingMomentumParametersGradAccumDebug") do + desc = tf.NodeDescription("LoadTPUEmbeddingMomentumParametersGradAccumDebug") + parameters_ = convert(Tensor{Float32}, parameters_) + momenta_ = convert(Tensor{Float32}, momenta_) + gradient_accumulators_ = convert(Tensor{Float32}, gradient_accumulators_) + tf.add_input(desc, parameters_) + tf.add_input(desc, momenta_) + tf.add_input(desc, gradient_accumulators_) + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + end + tf.Tensor(tf.Operation(desc)) + end + function load_tpu_embedding_momentum_parameters_grad_accum_debug(parameters_::tf.TensorHandle, momenta_::tf.TensorHandle, gradient_accumulators_::tf.TensorHandle; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + desc = tf.EagerOp("LoadTPUEmbeddingMomentumParametersGradAccumDebug") + tf.add_input(desc, parameters_) + tf.add_input(desc, momenta_) + tf.add_input(desc, gradient_accumulators_) + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + (tf.execute(desc))[1] + end +end + + +""" + padding_fifo_queue_v2(; shapes=Int64[], capacity=-1, container=, shared_name=) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function padding_fifo_queue_v2(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "PaddingFIFOQueueV2") do + desc = tf.NodeDescription("PaddingFIFOQueueV2") + if component_types !== nothing + desc["component_types"] = map(Base.identity, component_types) + end + if shapes !== nothing + desc["shapes"] = map(Base.identity, shapes) + end + if capacity !== nothing + desc["capacity"] = Base.Int(capacity) + end + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + tf.Tensor(tf.Operation(desc)) + end + function padding_fifo_queue_v2(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) + desc = tf.EagerOp("PaddingFIFOQueueV2") + if component_types !== nothing + desc["component_types"] = map(Base.identity, component_types) + end + if shapes !== nothing + desc["shapes"] = map(Base.identity, shapes) + end + if capacity !== nothing + desc["capacity"] = Base.Int(capacity) + end + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + (tf.execute(desc))[1] + end +end + + +""" + conv3d_backprop_input(input, filter, out_backprop; dilations=[1, 1, 1, 1, 1]) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function conv3d_backprop_input(input_, filter_, out_backprop_; name=nothing, strides=nothing, padding=nothing, dilations=nothing) + local desc + tf.with_op_name(name, "Conv3DBackpropInput") do + desc = tf.NodeDescription("Conv3DBackpropInput") + input_ = convert(Tensor{Any}, input_) + filter_ = convert(Tensor{Any}, filter_) + out_backprop_ = convert(Tensor{Any}, out_backprop_) + (input_, filter_, out_backprop_) = tf.tf_promote(input_, filter_, out_backprop_) + tf.add_input(desc, input_) + tf.add_input(desc, filter_) + tf.add_input(desc, out_backprop_) + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + if padding !== nothing + desc["padding"] = Base.String(padding) + end + if dilations !== nothing + desc["dilations"] = map(Base.identity, dilations) + end + end + tf.Tensor(tf.Operation(desc)) + end + function conv3d_backprop_input(input_::tf.TensorHandle, filter_::tf.TensorHandle, out_backprop_::tf.TensorHandle; name=nothing, strides=nothing, padding=nothing, dilations=nothing) + desc = tf.EagerOp("Conv3DBackpropInput") + tf.add_input(desc, input_) + tf.add_input(desc, filter_) + tf.add_input(desc, out_backprop_) + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + if padding !== nothing + desc["padding"] = Base.String(padding) + end + if dilations !== nothing + desc["dilations"] = map(Base.identity, dilations) + end + desc["T"] = tf.data_type(input_) + desc["T"] = tf.data_type(filter_) + desc["T"] = tf.data_type(out_backprop_) + (tf.execute(desc))[1] + end +end + + +""" + ref_exit(data) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function ref_exit(data_; name=nothing) + local desc + tf.with_op_name(name, "RefExit") do + desc = tf.NodeDescription("RefExit") + data_ = convert(Tensor{Any}, data_) + (data_,) = tf.tf_promote(data_) + tf.add_input(desc, data_) + end + tf.Tensor(tf.Operation(desc)) + end + function ref_exit(data_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("RefExit") + tf.add_input(desc, data_) + desc["T"] = tf.data_type(data_) + (tf.execute(desc))[1] + end +end + + +""" + map_clear(; capacity=0, memory_limit=0, container=, shared_name=) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function map_clear(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "MapClear") do + desc = tf.NodeDescription("MapClear") + if capacity !== nothing + desc["capacity"] = Base.Int(capacity) + end + if memory_limit !== nothing + desc["memory_limit"] = Base.Int(memory_limit) + end + if dtypes !== nothing + desc["dtypes"] = map(Base.identity, dtypes) + end + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + tf.Tensor(tf.Operation(desc)) + end + function map_clear(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + desc = tf.EagerOp("MapClear") + if capacity !== nothing + desc["capacity"] = Base.Int(capacity) + end + if memory_limit !== nothing + desc["memory_limit"] = Base.Int(memory_limit) + end + if dtypes !== nothing + desc["dtypes"] = map(Base.identity, dtypes) + end + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + (tf.execute(desc))[1] + end +end + + +""" + encode_wav(audio, sample_rate) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function encode_wav(audio_, sample_rate_; name=nothing) + local desc + tf.with_op_name(name, "EncodeWav") do + desc = tf.NodeDescription("EncodeWav") + audio_ = convert(Tensor{Float32}, audio_) + sample_rate_ = convert(Tensor{Int32}, sample_rate_) + tf.add_input(desc, audio_) + tf.add_input(desc, sample_rate_) + end + tf.Tensor(tf.Operation(desc)) + end + function encode_wav(audio_::tf.TensorHandle, sample_rate_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("EncodeWav") + tf.add_input(desc, audio_) + tf.add_input(desc, sample_rate_) + (tf.execute(desc))[1] + end +end + + +""" + tensor_summary_v2(tag, tensor, serialized_summary_metadata) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tensor_summary_v2(tag_, tensor_, serialized_summary_metadata_; name=nothing) + local desc + tf.with_op_name(name, "TensorSummaryV2") do + desc = tf.NodeDescription("TensorSummaryV2") + tag_ = convert(Tensor{String}, tag_) + tensor_ = convert(Tensor{Any}, tensor_) + serialized_summary_metadata_ = convert(Tensor{String}, serialized_summary_metadata_) + (tensor_,) = tf.tf_promote(tensor_) + tf.add_input(desc, tag_) + tf.add_input(desc, tensor_) + tf.add_input(desc, serialized_summary_metadata_) + end + tf.Tensor(tf.Operation(desc)) + end + function tensor_summary_v2(tag_::tf.TensorHandle, tensor_::tf.TensorHandle, serialized_summary_metadata_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("TensorSummaryV2") + tf.add_input(desc, tag_) + tf.add_input(desc, tensor_) + tf.add_input(desc, serialized_summary_metadata_) + desc["T"] = tf.data_type(tensor_) + (tf.execute(desc))[1] + end +end + + +""" + queue_dequeue_up_to(handle, n; timeout_ms=-1) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function queue_dequeue_up_to(handle_, n_; name=nothing, component_types=nothing, timeout_ms=nothing) + local desc + tf.with_op_name(name, "QueueDequeueUpTo") do + desc = tf.NodeDescription("QueueDequeueUpTo") + handle_ = convert(Tensor{String}, handle_) + n_ = convert(Tensor{Int32}, n_) + tf.add_input(desc, handle_) + tf.add_input(desc, n_) + if component_types !== nothing + desc["component_types"] = map(Base.identity, component_types) + end + if timeout_ms !== nothing + desc["timeout_ms"] = Base.Int(timeout_ms) + end + end + tf.Tensor(tf.Operation(desc)) + end + function queue_dequeue_up_to(handle_::tf.TensorHandle, n_::tf.TensorHandle; name=nothing, component_types=nothing, timeout_ms=nothing) + desc = tf.EagerOp("QueueDequeueUpTo") + tf.add_input(desc, handle_) + tf.add_input(desc, n_) + if component_types !== nothing + desc["component_types"] = map(Base.identity, component_types) + end + if timeout_ms !== nothing + desc["timeout_ms"] = Base.Int(timeout_ms) + end + (tf.execute(desc))[1] + end +end + + +""" + matrix_band_part(input, num_lower, num_upper) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function matrix_band_part(input_, num_lower_, num_upper_; name=nothing) + local desc + tf.with_op_name(name, "MatrixBandPart") do + desc = tf.NodeDescription("MatrixBandPart") + input_ = convert(Tensor{Any}, input_) + num_lower_ = convert(Tensor{Int64}, num_lower_) + num_upper_ = convert(Tensor{Int64}, num_upper_) + (input_,) = tf.tf_promote(input_) + (num_lower_, num_upper_) = tf.tf_promote(num_lower_, num_upper_) + tf.add_input(desc, input_) + tf.add_input(desc, num_lower_) + tf.add_input(desc, num_upper_) + end + tf.Tensor(tf.Operation(desc)) + end + function matrix_band_part(input_::tf.TensorHandle, num_lower_::tf.TensorHandle, num_upper_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("MatrixBandPart") + tf.add_input(desc, input_) + tf.add_input(desc, num_lower_) + tf.add_input(desc, num_upper_) + desc["T"] = tf.data_type(input_) + desc["Tindex"] = tf.data_type(num_lower_) + desc["Tindex"] = tf.data_type(num_upper_) + (tf.execute(desc))[1] + end +end + + +""" + copy(input; tensor_name=, debug_ops_spec=Int64[]) + +Copy Op. +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function copy(input_; name=nothing, tensor_name=nothing, debug_ops_spec=nothing) + local desc + tf.with_op_name(name, "Copy") do + desc = tf.NodeDescription("Copy") + input_ = convert(Tensor{Any}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + if tensor_name !== nothing + desc["tensor_name"] = Base.String(tensor_name) + end + if debug_ops_spec !== nothing + desc["debug_ops_spec"] = map(Base.identity, debug_ops_spec) + end + end + tf.Tensor(tf.Operation(desc)) + end + function copy(input_::tf.TensorHandle; name=nothing, tensor_name=nothing, debug_ops_spec=nothing) + desc = tf.EagerOp("Copy") + tf.add_input(desc, input_) + if tensor_name !== nothing + desc["tensor_name"] = Base.String(tensor_name) + end + if debug_ops_spec !== nothing + desc["debug_ops_spec"] = map(Base.identity, debug_ops_spec) + end + desc["T"] = tf.data_type(input_) + (tf.execute(desc))[1] + end +end + + +""" + shape_n(input; out_type=Int32) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function shape_n(input_; name=nothing, N=nothing, out_type=nothing) + local desc + tf.with_op_name(name, "ShapeN") do + desc = tf.NodeDescription("ShapeN") + input_ = [convert(Tensor{Any}, x) for x = input_] + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + if N !== nothing + desc["N"] = Base.Int(N) + end + if out_type !== nothing + desc["out_type"] = Base.identity(out_type) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:N + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function shape_n(input_::tf.TensorHandle; name=nothing, N=nothing, out_type=nothing) + desc = tf.EagerOp("ShapeN") + tf.add_input(desc, input_) + if N !== nothing + desc["N"] = Base.Int(N) + end + if out_type !== nothing + desc["out_type"] = Base.identity(out_type) + end + desc["T"] = tf.data_type(input_) + tf.execute(desc) + end +end + + +""" + experimental_parse_example_dataset(input_dataset, num_parallel_calls, dense_defaults; sloppy=false) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function experimental_parse_example_dataset(input_dataset_, num_parallel_calls_, dense_defaults_; name=nothing, sparse_keys=nothing, dense_keys=nothing, sparse_types=nothing, Tdense=nothing, dense_shapes=nothing, output_types=nothing, output_shapes=nothing, sloppy=nothing) + local desc + tf.with_op_name(name, "ExperimentalParseExampleDataset") do + desc = tf.NodeDescription("ExperimentalParseExampleDataset") + input_dataset_ = convert(Tensor{Any}, input_dataset_) + num_parallel_calls_ = convert(Tensor{Int64}, num_parallel_calls_) + dense_defaults_ = [convert(Tensor{Any}, x) for x = dense_defaults_] + tf.add_input(desc, input_dataset_) + tf.add_input(desc, num_parallel_calls_) + tf.add_input(desc, dense_defaults_) + if sparse_keys !== nothing + desc["sparse_keys"] = map(Base.identity, sparse_keys) + end + if dense_keys !== nothing + desc["dense_keys"] = map(Base.identity, dense_keys) + end + if sparse_types !== nothing + desc["sparse_types"] = map(Base.identity, sparse_types) + end + if Tdense !== nothing + desc["Tdense"] = map(Base.identity, Tdense) + end + if dense_shapes !== nothing + desc["dense_shapes"] = map(Base.identity, dense_shapes) + end + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + if sloppy !== nothing + desc["sloppy"] = Base.Bool(sloppy) + end + end + tf.Tensor(tf.Operation(desc)) + end + function experimental_parse_example_dataset(input_dataset_::tf.TensorHandle, num_parallel_calls_::tf.TensorHandle, dense_defaults_::tf.TensorHandle; name=nothing, sparse_keys=nothing, dense_keys=nothing, sparse_types=nothing, Tdense=nothing, dense_shapes=nothing, output_types=nothing, output_shapes=nothing, sloppy=nothing) + desc = tf.EagerOp("ExperimentalParseExampleDataset") + tf.add_input(desc, input_dataset_) + tf.add_input(desc, num_parallel_calls_) + tf.add_input(desc, dense_defaults_) + if sparse_keys !== nothing + desc["sparse_keys"] = map(Base.identity, sparse_keys) + end + if dense_keys !== nothing + desc["dense_keys"] = map(Base.identity, dense_keys) + end + if sparse_types !== nothing + desc["sparse_types"] = map(Base.identity, sparse_types) + end + if Tdense !== nothing + desc["Tdense"] = map(Base.identity, Tdense) + end + if dense_shapes !== nothing + desc["dense_shapes"] = map(Base.identity, dense_shapes) + end + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + if sloppy !== nothing + desc["sloppy"] = Base.Bool(sloppy) + end + (tf.execute(desc))[1] + end +end + + +""" + concat(concat_dim, values) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function concat(concat_dim_, values_; name=nothing, N=nothing) + local desc + tf.with_op_name(name, "Concat") do + desc = tf.NodeDescription("Concat") + concat_dim_ = convert(Tensor{Int32}, concat_dim_) + values_ = [convert(Tensor{Any}, x) for x = values_] + (values_,) = tf.tf_promote(values_) + tf.add_input(desc, concat_dim_) + tf.add_input(desc, values_) + if N !== nothing + desc["N"] = Base.Int(N) + end + end + tf.Tensor(tf.Operation(desc)) + end + function concat(concat_dim_::tf.TensorHandle, values_::tf.TensorHandle; name=nothing, N=nothing) + desc = tf.EagerOp("Concat") + tf.add_input(desc, concat_dim_) + tf.add_input(desc, values_) + if N !== nothing + desc["N"] = Base.Int(N) + end + desc["T"] = tf.data_type(values_) + (tf.execute(desc))[1] + end +end + + +""" + data_format_dim_map(x; src_format=NHWC, dst_format=NCHW) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function data_format_dim_map(x_; name=nothing, src_format=nothing, dst_format=nothing) + local desc + tf.with_op_name(name, "DataFormatDimMap") do + desc = tf.NodeDescription("DataFormatDimMap") + x_ = convert(Tensor{Int32}, x_) + (x_,) = tf.tf_promote(x_) + tf.add_input(desc, x_) + if src_format !== nothing + desc["src_format"] = Base.String(src_format) + end + if dst_format !== nothing + desc["dst_format"] = Base.String(dst_format) + end + end + tf.Tensor(tf.Operation(desc)) + end + function data_format_dim_map(x_::tf.TensorHandle; name=nothing, src_format=nothing, dst_format=nothing) + desc = tf.EagerOp("DataFormatDimMap") + tf.add_input(desc, x_) + if src_format !== nothing + desc["src_format"] = Base.String(src_format) + end + if dst_format !== nothing + desc["dst_format"] = Base.String(dst_format) + end + desc["T"] = tf.data_type(x_) + (tf.execute(desc))[1] + end +end + + +""" + identity_reader(; container=, shared_name=) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function identity_reader(; name=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "IdentityReader") do + desc = tf.NodeDescription("IdentityReader") + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + tf.Tensor(tf.Operation(desc)) + end + function identity_reader(; name=nothing, container=nothing, shared_name=nothing) + desc = tf.EagerOp("IdentityReader") + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + (tf.execute(desc))[1] + end +end + + +""" + softplus(features) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function softplus(features_; name=nothing) + local desc + tf.with_op_name(name, "Softplus") do + desc = tf.NodeDescription("Softplus") + features_ = convert(Tensor{Any}, features_) + (features_,) = tf.tf_promote(features_) + tf.add_input(desc, features_) + end + tf.Tensor(tf.Operation(desc)) + end + function softplus(features_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("Softplus") + tf.add_input(desc, features_) + desc["T"] = tf.data_type(features_) + (tf.execute(desc))[1] + end +end + + +""" + resource_sparse_apply_proximal_adagrad(var, accum, lr, l1, l2, grad, indices; use_locking=false) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function resource_sparse_apply_proximal_adagrad(var_, accum_, lr_, l1_, l2_, grad_, indices_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "ResourceSparseApplyProximalAdagrad") do + desc = tf.NodeDescription("ResourceSparseApplyProximalAdagrad") + var_ = convert(Tensor{Any}, var_) + accum_ = convert(Tensor{Any}, accum_) + lr_ = convert(Tensor{Any}, lr_) + l1_ = convert(Tensor{Any}, l1_) + l2_ = convert(Tensor{Any}, l2_) + grad_ = convert(Tensor{Any}, grad_) + indices_ = convert(Tensor{Any}, indices_) + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + (lr_, l1_, l2_, grad_) = tf.tf_promote(lr_, l1_, l2_, grad_) + (indices_,) = tf.tf_promote(indices_) + tf.add_input(desc, var_) + tf.add_input(desc, accum_) + tf.add_input(desc, lr_) + tf.add_input(desc, l1_) + tf.add_input(desc, l2_) + tf.add_input(desc, grad_) + tf.add_input(desc, indices_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + tf.Tensor(tf.Operation(desc)) + end + function resource_sparse_apply_proximal_adagrad(var_::tf.TensorHandle, accum_::tf.TensorHandle, lr_::tf.TensorHandle, l1_::tf.TensorHandle, l2_::tf.TensorHandle, grad_::tf.TensorHandle, indices_::tf.TensorHandle; name=nothing, use_locking=nothing) + desc = tf.EagerOp("ResourceSparseApplyProximalAdagrad") + tf.add_input(desc, var_) + tf.add_input(desc, accum_) + tf.add_input(desc, lr_) + tf.add_input(desc, l1_) + tf.add_input(desc, l2_) + tf.add_input(desc, grad_) + tf.add_input(desc, indices_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + desc["T"] = tf.data_type(lr_) + desc["T"] = tf.data_type(l1_) + desc["T"] = tf.data_type(l2_) + desc["T"] = tf.data_type(grad_) + desc["Tindices"] = tf.data_type(indices_) + (tf.execute(desc))[1] + end +end + + +""" + parse_single_sequence_example(serialized, feature_list_dense_missing_assumed_empty, context_sparse_keys, context_dense_keys, feature_list_sparse_keys, feature_list_dense_keys, context_dense_defaults, debug_name; Ncontext_sparse=0, Ncontext_dense=0, Nfeature_list_sparse=0, Nfeature_list_dense=0, context_sparse_types=Int64[], Tcontext_dense=Int64[], feature_list_dense_types=Int64[], context_dense_shapes=Int64[], feature_list_sparse_types=Int64[], feature_list_dense_shapes=Int64[]) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function parse_single_sequence_example(serialized_, feature_list_dense_missing_assumed_empty_, context_sparse_keys_, context_dense_keys_, feature_list_sparse_keys_, feature_list_dense_keys_, context_dense_defaults_, debug_name_; name=nothing, Ncontext_sparse=nothing, Ncontext_dense=nothing, Nfeature_list_sparse=nothing, Nfeature_list_dense=nothing, context_sparse_types=nothing, Tcontext_dense=nothing, feature_list_dense_types=nothing, context_dense_shapes=nothing, feature_list_sparse_types=nothing, feature_list_dense_shapes=nothing) + local desc + tf.with_op_name(name, "ParseSingleSequenceExample") do + desc = tf.NodeDescription("ParseSingleSequenceExample") + serialized_ = convert(Tensor{String}, serialized_) + feature_list_dense_missing_assumed_empty_ = convert(Tensor{String}, feature_list_dense_missing_assumed_empty_) + context_sparse_keys_ = [convert(Tensor{String}, x) for x = context_sparse_keys_] + context_dense_keys_ = [convert(Tensor{String}, x) for x = context_dense_keys_] + feature_list_sparse_keys_ = [convert(Tensor{String}, x) for x = feature_list_sparse_keys_] + feature_list_dense_keys_ = [convert(Tensor{String}, x) for x = feature_list_dense_keys_] + context_dense_defaults_ = [convert(Tensor{Any}, x) for x = context_dense_defaults_] + debug_name_ = convert(Tensor{String}, debug_name_) + tf.add_input(desc, serialized_) + tf.add_input(desc, feature_list_dense_missing_assumed_empty_) + tf.add_input(desc, context_sparse_keys_) + tf.add_input(desc, context_dense_keys_) + tf.add_input(desc, feature_list_sparse_keys_) + tf.add_input(desc, feature_list_dense_keys_) + tf.add_input(desc, context_dense_defaults_) + tf.add_input(desc, debug_name_) + if Ncontext_sparse !== nothing + desc["Ncontext_sparse"] = Base.Int(Ncontext_sparse) + end + if Ncontext_dense !== nothing + desc["Ncontext_dense"] = Base.Int(Ncontext_dense) + end + if Nfeature_list_sparse !== nothing + desc["Nfeature_list_sparse"] = Base.Int(Nfeature_list_sparse) + end + if Nfeature_list_dense !== nothing + desc["Nfeature_list_dense"] = Base.Int(Nfeature_list_dense) + end + if context_sparse_types !== nothing + desc["context_sparse_types"] = map(Base.identity, context_sparse_types) + end + if Tcontext_dense !== nothing + desc["Tcontext_dense"] = map(Base.identity, Tcontext_dense) + end + if feature_list_dense_types !== nothing + desc["feature_list_dense_types"] = map(Base.identity, feature_list_dense_types) + end + if context_dense_shapes !== nothing + desc["context_dense_shapes"] = map(Base.identity, context_dense_shapes) + end + if feature_list_sparse_types !== nothing + desc["feature_list_sparse_types"] = map(Base.identity, feature_list_sparse_types) + end + if feature_list_dense_shapes !== nothing + desc["feature_list_dense_shapes"] = map(Base.identity, feature_list_dense_shapes) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:8 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function parse_single_sequence_example(serialized_::tf.TensorHandle, feature_list_dense_missing_assumed_empty_::tf.TensorHandle, context_sparse_keys_::tf.TensorHandle, context_dense_keys_::tf.TensorHandle, feature_list_sparse_keys_::tf.TensorHandle, feature_list_dense_keys_::tf.TensorHandle, context_dense_defaults_::tf.TensorHandle, debug_name_::tf.TensorHandle; name=nothing, Ncontext_sparse=nothing, Ncontext_dense=nothing, Nfeature_list_sparse=nothing, Nfeature_list_dense=nothing, context_sparse_types=nothing, Tcontext_dense=nothing, feature_list_dense_types=nothing, context_dense_shapes=nothing, feature_list_sparse_types=nothing, feature_list_dense_shapes=nothing) + desc = tf.EagerOp("ParseSingleSequenceExample") + tf.add_input(desc, serialized_) + tf.add_input(desc, feature_list_dense_missing_assumed_empty_) + tf.add_input(desc, context_sparse_keys_) + tf.add_input(desc, context_dense_keys_) + tf.add_input(desc, feature_list_sparse_keys_) + tf.add_input(desc, feature_list_dense_keys_) + tf.add_input(desc, context_dense_defaults_) + tf.add_input(desc, debug_name_) + if Ncontext_sparse !== nothing + desc["Ncontext_sparse"] = Base.Int(Ncontext_sparse) + end + if Ncontext_dense !== nothing + desc["Ncontext_dense"] = Base.Int(Ncontext_dense) + end + if Nfeature_list_sparse !== nothing + desc["Nfeature_list_sparse"] = Base.Int(Nfeature_list_sparse) + end + if Nfeature_list_dense !== nothing + desc["Nfeature_list_dense"] = Base.Int(Nfeature_list_dense) + end + if context_sparse_types !== nothing + desc["context_sparse_types"] = map(Base.identity, context_sparse_types) + end + if Tcontext_dense !== nothing + desc["Tcontext_dense"] = map(Base.identity, Tcontext_dense) + end + if feature_list_dense_types !== nothing + desc["feature_list_dense_types"] = map(Base.identity, feature_list_dense_types) + end + if context_dense_shapes !== nothing + desc["context_dense_shapes"] = map(Base.identity, context_dense_shapes) + end + if feature_list_sparse_types !== nothing + desc["feature_list_sparse_types"] = map(Base.identity, feature_list_sparse_types) + end + if feature_list_dense_shapes !== nothing + desc["feature_list_dense_shapes"] = map(Base.identity, feature_list_dense_shapes) + end + tf.execute(desc) + end +end + + +""" + matrix_diag(diagonal) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function matrix_diag(diagonal_; name=nothing) + local desc + tf.with_op_name(name, "MatrixDiag") do + desc = tf.NodeDescription("MatrixDiag") + diagonal_ = convert(Tensor{Any}, diagonal_) + (diagonal_,) = tf.tf_promote(diagonal_) + tf.add_input(desc, diagonal_) + end + tf.Tensor(tf.Operation(desc)) + end + function matrix_diag(diagonal_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("MatrixDiag") + tf.add_input(desc, diagonal_) + desc["T"] = tf.data_type(diagonal_) + (tf.execute(desc))[1] + end +end + + +""" + fact() + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function fact(; name=nothing) + local desc + tf.with_op_name(name, "Fact") do + desc + tf.NodeDescription("Fact") + end + tf.Tensor(tf.Operation(desc)) + end + function fact(; name=nothing) + desc = tf.EagerOp("Fact") + (tf.execute(desc))[1] + end +end + + +""" + shard_dataset(input_dataset, num_shards, index) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function shard_dataset(input_dataset_, num_shards_, index_; name=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "ShardDataset") do + desc = tf.NodeDescription("ShardDataset") + input_dataset_ = convert(Tensor{Any}, input_dataset_) + num_shards_ = convert(Tensor{Int64}, num_shards_) + index_ = convert(Tensor{Int64}, index_) + tf.add_input(desc, input_dataset_) + tf.add_input(desc, num_shards_) + tf.add_input(desc, index_) + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + tf.Tensor(tf.Operation(desc)) + end + function shard_dataset(input_dataset_::tf.TensorHandle, num_shards_::tf.TensorHandle, index_::tf.TensorHandle; name=nothing, output_types=nothing, output_shapes=nothing) + desc = tf.EagerOp("ShardDataset") + tf.add_input(desc, input_dataset_) + tf.add_input(desc, num_shards_) + tf.add_input(desc, index_) + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + (tf.execute(desc))[1] + end +end + + +""" + max_pool_grad_grad(orig_input, orig_output, grad; data_format=NHWC) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function max_pool_grad_grad(orig_input_, orig_output_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + local desc + tf.with_op_name(name, "MaxPoolGradGrad") do + desc = tf.NodeDescription("MaxPoolGradGrad") + orig_input_ = convert(Tensor{Any}, orig_input_) + orig_output_ = convert(Tensor{Any}, orig_output_) + grad_ = convert(Tensor{Any}, grad_) + (orig_input_, orig_output_, grad_) = tf.tf_promote(orig_input_, orig_output_, grad_) + tf.add_input(desc, orig_input_) + tf.add_input(desc, orig_output_) + tf.add_input(desc, grad_) + if ksize !== nothing + desc["ksize"] = map(Base.identity, ksize) + end + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + if padding !== nothing + desc["padding"] = Base.String(padding) + end + if data_format !== nothing + desc["data_format"] = Base.String(data_format) + end + end + tf.Tensor(tf.Operation(desc)) + end + function max_pool_grad_grad(orig_input_::tf.TensorHandle, orig_output_::tf.TensorHandle, grad_::tf.TensorHandle; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + desc = tf.EagerOp("MaxPoolGradGrad") + tf.add_input(desc, orig_input_) + tf.add_input(desc, orig_output_) + tf.add_input(desc, grad_) + if ksize !== nothing + desc["ksize"] = map(Base.identity, ksize) + end + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + if padding !== nothing + desc["padding"] = Base.String(padding) + end + if data_format !== nothing + desc["data_format"] = Base.String(data_format) + end + desc["T"] = tf.data_type(orig_input_) + desc["T"] = tf.data_type(orig_output_) + desc["T"] = tf.data_type(grad_) + (tf.execute(desc))[1] + end +end + + +""" + resize_bilinear_grad(grads, original_image; align_corners=false) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function resize_bilinear_grad(grads_, original_image_; name=nothing, align_corners=nothing) + local desc + tf.with_op_name(name, "ResizeBilinearGrad") do + desc = tf.NodeDescription("ResizeBilinearGrad") + grads_ = convert(Tensor{Float32}, grads_) + original_image_ = convert(Tensor{Any}, original_image_) + (original_image_,) = tf.tf_promote(original_image_) + tf.add_input(desc, grads_) + tf.add_input(desc, original_image_) + if align_corners !== nothing + desc["align_corners"] = Base.Bool(align_corners) + end + end + tf.Tensor(tf.Operation(desc)) + end + function resize_bilinear_grad(grads_::tf.TensorHandle, original_image_::tf.TensorHandle; name=nothing, align_corners=nothing) + desc = tf.EagerOp("ResizeBilinearGrad") + tf.add_input(desc, grads_) + tf.add_input(desc, original_image_) + if align_corners !== nothing + desc["align_corners"] = Base.Bool(align_corners) + end + desc["T"] = tf.data_type(original_image_) + (tf.execute(desc))[1] + end +end + + +""" + batch_to_space(input, crops) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function batch_to_space(input_, crops_; name=nothing, block_size=nothing) + local desc + tf.with_op_name(name, "BatchToSpace") do + desc = tf.NodeDescription("BatchToSpace") + input_ = convert(Tensor{Any}, input_) + crops_ = convert(Tensor{Int32}, crops_) + crops_ = crops_ - convert(tf.Tensor{eltype(crops_)}, 1) + (input_,) = tf.tf_promote(input_) + (crops_,) = tf.tf_promote(crops_) + tf.add_input(desc, input_) + tf.add_input(desc, crops_) + if block_size !== nothing + desc["block_size"] = Base.Int(block_size) + end + end + tf.Tensor(tf.Operation(desc)) + end + function batch_to_space(input_::tf.TensorHandle, crops_::tf.TensorHandle; name=nothing, block_size=nothing) + desc = tf.EagerOp("BatchToSpace") + tf.add_input(desc, input_) + tf.add_input(desc, crops_) + if block_size !== nothing + desc["block_size"] = Base.Int(block_size) + end + desc["T"] = tf.data_type(input_) + desc["Tidx"] = tf.data_type(crops_) + (tf.execute(desc))[1] + end +end + + +""" + optional_from_value(components) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function optional_from_value(components_; name=nothing, Toutput_types=nothing) + local desc + tf.with_op_name(name, "OptionalFromValue") do + desc = tf.NodeDescription("OptionalFromValue") + components_ = [convert(Tensor{Any}, x) for x = components_] + tf.add_input(desc, components_) + if Toutput_types !== nothing + desc["Toutput_types"] = map(Base.identity, Toutput_types) + end + end + tf.Tensor(tf.Operation(desc)) + end + function optional_from_value(components_::tf.TensorHandle; name=nothing, Toutput_types=nothing) + desc = tf.EagerOp("OptionalFromValue") + tf.add_input(desc, components_) + if Toutput_types !== nothing + desc["Toutput_types"] = map(Base.identity, Toutput_types) + end + (tf.execute(desc))[1] + end +end + + +""" + xlogy(x, y) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function xlogy(x_, y_; name=nothing) + local desc + tf.with_op_name(name, "Xlogy") do + desc = tf.NodeDescription("Xlogy") + x_ = convert(Tensor{Any}, x_) + y_ = convert(Tensor{Any}, y_) + (x_, y_) = tf.tf_promote(x_, y_) + tf.add_input(desc, x_) + tf.add_input(desc, y_) + end + tf.Tensor(tf.Operation(desc)) + end + function xlogy(x_::tf.TensorHandle, y_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("Xlogy") + tf.add_input(desc, x_) + tf.add_input(desc, y_) + desc["T"] = tf.data_type(x_) + desc["T"] = tf.data_type(y_) + (tf.execute(desc))[1] + end +end + + +""" + cross(a, b) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function cross(a_, b_; name=nothing) + local desc + tf.with_op_name(name, "Cross") do + desc = tf.NodeDescription("Cross") + a_ = convert(Tensor{Any}, a_) + b_ = convert(Tensor{Any}, b_) + (a_, b_) = tf.tf_promote(a_, b_) + tf.add_input(desc, a_) + tf.add_input(desc, b_) + end + tf.Tensor(tf.Operation(desc)) + end + function cross(a_::tf.TensorHandle, b_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("Cross") + tf.add_input(desc, a_) + tf.add_input(desc, b_) + desc["T"] = tf.data_type(a_) + desc["T"] = tf.data_type(b_) + (tf.execute(desc))[1] + end +end + + +""" + bitwise_and(x, y) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function bitwise_and(x_, y_; name=nothing) + local desc + tf.with_op_name(name, "BitwiseAnd") do + desc = tf.NodeDescription("BitwiseAnd") + x_ = convert(Tensor{Any}, x_) + y_ = convert(Tensor{Any}, y_) + (x_, y_) = tf.tf_promote(x_, y_) + tf.add_input(desc, x_) + tf.add_input(desc, y_) + end + tf.Tensor(tf.Operation(desc)) + end + function bitwise_and(x_::tf.TensorHandle, y_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("BitwiseAnd") + tf.add_input(desc, x_) + tf.add_input(desc, y_) + desc["T"] = tf.data_type(x_) + desc["T"] = tf.data_type(y_) + (tf.execute(desc))[1] + end +end + + +""" + broadcast_to(input, shape) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function broadcast_to(input_, shape_; name=nothing) + local desc + tf.with_op_name(name, "BroadcastTo") do + desc = tf.NodeDescription("BroadcastTo") + input_ = convert(Tensor{Any}, input_) + shape_ = convert(Tensor{Int32}, shape_) + (input_,) = tf.tf_promote(input_) + (shape_,) = tf.tf_promote(shape_) + tf.add_input(desc, input_) + tf.add_input(desc, shape_) + end + tf.Tensor(tf.Operation(desc)) + end + function broadcast_to(input_::tf.TensorHandle, shape_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("BroadcastTo") + tf.add_input(desc, input_) + tf.add_input(desc, shape_) + desc["T"] = tf.data_type(input_) + desc["Tidx"] = tf.data_type(shape_) + (tf.execute(desc))[1] + end +end + + +""" + elu_grad(gradients, outputs) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function elu_grad(gradients_, outputs_; name=nothing) + local desc + tf.with_op_name(name, "EluGrad") do + desc = tf.NodeDescription("EluGrad") + gradients_ = convert(Tensor{Any}, gradients_) + outputs_ = convert(Tensor{Any}, outputs_) + (gradients_, outputs_) = tf.tf_promote(gradients_, outputs_) + tf.add_input(desc, gradients_) + tf.add_input(desc, outputs_) + end + tf.Tensor(tf.Operation(desc)) + end + function elu_grad(gradients_::tf.TensorHandle, outputs_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("EluGrad") + tf.add_input(desc, gradients_) + tf.add_input(desc, outputs_) + desc["T"] = tf.data_type(gradients_) + desc["T"] = tf.data_type(outputs_) + (tf.execute(desc))[1] + end +end + + +""" + cudnn_rnn_backprop(input, input_h, input_c, params, output, output_h, output_c, output_backprop, output_h_backprop, output_c_backprop, reserve_space; rnn_mode=lstm, input_mode=linear_input, direction=unidirectional, dropout=?, seed=0, seed2=0) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function cudnn_rnn_backprop(input_, input_h_, input_c_, params_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) + local desc + tf.with_op_name(name, "CudnnRNNBackprop") do + desc = tf.NodeDescription("CudnnRNNBackprop") + input_ = convert(Tensor{Any}, input_) + input_h_ = convert(Tensor{Any}, input_h_) + input_c_ = convert(Tensor{Any}, input_c_) + params_ = convert(Tensor{Any}, params_) + output_ = convert(Tensor{Any}, output_) + output_h_ = convert(Tensor{Any}, output_h_) + output_c_ = convert(Tensor{Any}, output_c_) + output_backprop_ = convert(Tensor{Any}, output_backprop_) + output_h_backprop_ = convert(Tensor{Any}, output_h_backprop_) + output_c_backprop_ = convert(Tensor{Any}, output_c_backprop_) + reserve_space_ = convert(Tensor{Any}, reserve_space_) + (input_, input_h_, input_c_, params_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_) = tf.tf_promote(input_, input_h_, input_c_, params_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_) + tf.add_input(desc, input_) + tf.add_input(desc, input_h_) + tf.add_input(desc, input_c_) + tf.add_input(desc, params_) + tf.add_input(desc, output_) + tf.add_input(desc, output_h_) + tf.add_input(desc, output_c_) + tf.add_input(desc, output_backprop_) + tf.add_input(desc, output_h_backprop_) + tf.add_input(desc, output_c_backprop_) + tf.add_input(desc, reserve_space_) + if rnn_mode !== nothing + desc["rnn_mode"] = Base.String(rnn_mode) + end + if input_mode !== nothing + desc["input_mode"] = Base.String(input_mode) + end + if direction !== nothing + desc["direction"] = Base.String(direction) + end + if dropout !== nothing + desc["dropout"] = Base.identity(dropout) + end + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:4 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function cudnn_rnn_backprop(input_::tf.TensorHandle, input_h_::tf.TensorHandle, input_c_::tf.TensorHandle, params_::tf.TensorHandle, output_::tf.TensorHandle, output_h_::tf.TensorHandle, output_c_::tf.TensorHandle, output_backprop_::tf.TensorHandle, output_h_backprop_::tf.TensorHandle, output_c_backprop_::tf.TensorHandle, reserve_space_::tf.TensorHandle; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) + desc = tf.EagerOp("CudnnRNNBackprop") + tf.add_input(desc, input_) + tf.add_input(desc, input_h_) + tf.add_input(desc, input_c_) + tf.add_input(desc, params_) + tf.add_input(desc, output_) + tf.add_input(desc, output_h_) + tf.add_input(desc, output_c_) + tf.add_input(desc, output_backprop_) + tf.add_input(desc, output_h_backprop_) + tf.add_input(desc, output_c_backprop_) + tf.add_input(desc, reserve_space_) + if rnn_mode !== nothing + desc["rnn_mode"] = Base.String(rnn_mode) + end + if input_mode !== nothing + desc["input_mode"] = Base.String(input_mode) + end + if direction !== nothing + desc["direction"] = Base.String(direction) + end + if dropout !== nothing + desc["dropout"] = Base.identity(dropout) + end + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end + desc["T"] = tf.data_type(input_) + desc["T"] = tf.data_type(input_h_) + desc["T"] = tf.data_type(input_c_) + desc["T"] = tf.data_type(params_) + desc["T"] = tf.data_type(output_) + desc["T"] = tf.data_type(output_h_) + desc["T"] = tf.data_type(output_c_) + desc["T"] = tf.data_type(output_backprop_) + desc["T"] = tf.data_type(output_h_backprop_) + desc["T"] = tf.data_type(output_c_backprop_) + desc["T"] = tf.data_type(reserve_space_) + tf.execute(desc) + end +end + + +""" + string_to_hash_bucket_fast(input) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function string_to_hash_bucket_fast(input_; name=nothing, num_buckets=nothing) + local desc + tf.with_op_name(name, "StringToHashBucketFast") do + desc = tf.NodeDescription("StringToHashBucketFast") + input_ = convert(Tensor{String}, input_) + tf.add_input(desc, input_) + if num_buckets !== nothing + desc["num_buckets"] = Base.Int(num_buckets) + end + end + tf.Tensor(tf.Operation(desc)) + end + function string_to_hash_bucket_fast(input_::tf.TensorHandle; name=nothing, num_buckets=nothing) + desc = tf.EagerOp("StringToHashBucketFast") + tf.add_input(desc, input_) + if num_buckets !== nothing + desc["num_buckets"] = Base.Int(num_buckets) + end + (tf.execute(desc))[1] + end +end + + +""" + mutable_hash_table(; container=, shared_name=, use_node_name_sharing=false) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function mutable_hash_table(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing) + local desc + tf.with_op_name(name, "MutableHashTable") do + desc = tf.NodeDescription("MutableHashTable") + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + if use_node_name_sharing !== nothing + desc["use_node_name_sharing"] = Base.Bool(use_node_name_sharing) + end + if key_dtype !== nothing + desc["key_dtype"] = Base.identity(key_dtype) + end + if value_dtype !== nothing + desc["value_dtype"] = Base.identity(value_dtype) + end + end + tf.Tensor(tf.Operation(desc)) + end + function mutable_hash_table(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing) + desc = tf.EagerOp("MutableHashTable") + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + if use_node_name_sharing !== nothing + desc["use_node_name_sharing"] = Base.Bool(use_node_name_sharing) + end + if key_dtype !== nothing + desc["key_dtype"] = Base.identity(key_dtype) + end + if value_dtype !== nothing + desc["value_dtype"] = Base.identity(value_dtype) + end + (tf.execute(desc))[1] + end +end + + +""" + relu(features) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function relu(features_; name=nothing) + local desc + tf.with_op_name(name, "Relu") do + desc = tf.NodeDescription("Relu") + features_ = convert(Tensor{Any}, features_) + (features_,) = tf.tf_promote(features_) + tf.add_input(desc, features_) + end + tf.Tensor(tf.Operation(desc)) + end + function relu(features_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("Relu") + tf.add_input(desc, features_) + desc["T"] = tf.data_type(features_) + (tf.execute(desc))[1] + end +end + + +""" + nth_element(input, n; reverse=false) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function nth_element(input_, n_; name=nothing, reverse=nothing) + local desc + tf.with_op_name(name, "NthElement") do + desc = tf.NodeDescription("NthElement") + input_ = convert(Tensor{Any}, input_) + n_ = convert(Tensor{Int32}, n_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + tf.add_input(desc, n_) + if reverse !== nothing + desc["reverse"] = Base.Bool(reverse) + end + end + tf.Tensor(tf.Operation(desc)) + end + function nth_element(input_::tf.TensorHandle, n_::tf.TensorHandle; name=nothing, reverse=nothing) + desc = tf.EagerOp("NthElement") + tf.add_input(desc, input_) + tf.add_input(desc, n_) + if reverse !== nothing + desc["reverse"] = Base.Bool(reverse) + end + desc["T"] = tf.data_type(input_) + (tf.execute(desc))[1] + end +end + + +""" + softsign(features) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function softsign(features_; name=nothing) + local desc + tf.with_op_name(name, "Softsign") do + desc = tf.NodeDescription("Softsign") + features_ = convert(Tensor{Any}, features_) + (features_,) = tf.tf_promote(features_) + tf.add_input(desc, features_) + end + tf.Tensor(tf.Operation(desc)) + end + function softsign(features_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("Softsign") + tf.add_input(desc, features_) + desc["T"] = tf.data_type(features_) + (tf.execute(desc))[1] + end +end + + +""" + mutable_dense_hash_table(empty_key; container=, shared_name=, use_node_name_sharing=false, value_shape=?, initial_num_buckets=131072, max_load_factor=?) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function mutable_dense_hash_table(empty_key_; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing, initial_num_buckets=nothing, max_load_factor=nothing) + local desc + tf.with_op_name(name, "MutableDenseHashTable") do + desc = tf.NodeDescription("MutableDenseHashTable") + empty_key_ = convert(Tensor{Any}, empty_key_) + (empty_key_,) = tf.tf_promote(empty_key_) + tf.add_input(desc, empty_key_) + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + if use_node_name_sharing !== nothing + desc["use_node_name_sharing"] = Base.Bool(use_node_name_sharing) + end + if key_dtype !== nothing + desc["key_dtype"] = Base.identity(key_dtype) + end + if value_dtype !== nothing + desc["value_dtype"] = Base.identity(value_dtype) + end + if value_shape !== nothing + desc["value_shape"] = Base.identity(value_shape) + end + if initial_num_buckets !== nothing + desc["initial_num_buckets"] = Base.Int(initial_num_buckets) + end + if max_load_factor !== nothing + desc["max_load_factor"] = Base.identity(max_load_factor) + end + end + tf.Tensor(tf.Operation(desc)) + end + function mutable_dense_hash_table(empty_key_::tf.TensorHandle; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing, initial_num_buckets=nothing, max_load_factor=nothing) + desc = tf.EagerOp("MutableDenseHashTable") + tf.add_input(desc, empty_key_) + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + if use_node_name_sharing !== nothing + desc["use_node_name_sharing"] = Base.Bool(use_node_name_sharing) + end + if key_dtype !== nothing + desc["key_dtype"] = Base.identity(key_dtype) + end + if value_dtype !== nothing + desc["value_dtype"] = Base.identity(value_dtype) + end + if value_shape !== nothing + desc["value_shape"] = Base.identity(value_shape) + end + if initial_num_buckets !== nothing + desc["initial_num_buckets"] = Base.Int(initial_num_buckets) + end + if max_load_factor !== nothing + desc["max_load_factor"] = Base.identity(max_load_factor) + end + desc["key_dtype"] = tf.data_type(empty_key_) + (tf.execute(desc))[1] + end +end + + +""" + _shutdown_distributed_tpu() + +An op that shuts down a running distributed TPU system. The Op returns +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function _shutdown_distributed_tpu(; name=nothing) + local desc + tf.with_op_name(name, "_ShutdownDistributedTPU") do + desc + tf.NodeDescription("_ShutdownDistributedTPU") + end + tf.Tensor(tf.Operation(desc)) + end + function _shutdown_distributed_tpu(; name=nothing) + desc = tf.EagerOp("_ShutdownDistributedTPU") + (tf.execute(desc))[1] + end +end + + +""" + polygamma(a, x) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function polygamma(a_, x_; name=nothing) + local desc + tf.with_op_name(name, "Polygamma") do + desc = tf.NodeDescription("Polygamma") + a_ = convert(Tensor{Any}, a_) + x_ = convert(Tensor{Any}, x_) + (a_, x_) = tf.tf_promote(a_, x_) + tf.add_input(desc, a_) + tf.add_input(desc, x_) + end + tf.Tensor(tf.Operation(desc)) + end + function polygamma(a_::tf.TensorHandle, x_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("Polygamma") + tf.add_input(desc, a_) + tf.add_input(desc, x_) + desc["T"] = tf.data_type(a_) + desc["T"] = tf.data_type(x_) + (tf.execute(desc))[1] + end +end + + +""" + nccl_reduce(input) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function nccl_reduce(input_; name=nothing, reduction=nothing, num_devices=nothing) + local desc + tf.with_op_name(name, "NcclReduce") do + desc = tf.NodeDescription("NcclReduce") + input_ = [convert(Tensor{Any}, x) for x = input_] + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + if reduction !== nothing + desc["reduction"] = Base.String(reduction) + end + if num_devices !== nothing + desc["num_devices"] = Base.Int(num_devices) + end + end + tf.Tensor(tf.Operation(desc)) + end + function nccl_reduce(input_::tf.TensorHandle; name=nothing, reduction=nothing, num_devices=nothing) + desc = tf.EagerOp("NcclReduce") + tf.add_input(desc, input_) + if reduction !== nothing + desc["reduction"] = Base.String(reduction) + end + if num_devices !== nothing + desc["num_devices"] = Base.Int(num_devices) + end + desc["T"] = tf.data_type(input_) + (tf.execute(desc))[1] + end +end + + +""" + arg_max(input, dimension; output_type=Int64) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function arg_max(input_, dimension_; name=nothing, output_type=nothing) + local desc + tf.with_op_name(name, "ArgMax") do + desc = tf.NodeDescription("ArgMax") + input_ = convert(Tensor{Any}, input_) + dimension_ = convert(Tensor{Int32}, dimension_) + dimension_ = dimension_ - convert(tf.Tensor{eltype(dimension_)}, 1) + (input_,) = tf.tf_promote(input_) + (dimension_,) = tf.tf_promote(dimension_) + tf.add_input(desc, input_) + tf.add_input(desc, dimension_) + if output_type !== nothing + desc["output_type"] = Base.identity(output_type) + end + end + tf.Tensor(tf.Operation(desc)) + end + function arg_max(input_::tf.TensorHandle, dimension_::tf.TensorHandle; name=nothing, output_type=nothing) + desc = tf.EagerOp("ArgMax") + tf.add_input(desc, input_) + tf.add_input(desc, dimension_) + if output_type !== nothing + desc["output_type"] = Base.identity(output_type) + end + desc["T"] = tf.data_type(input_) + desc["Tidx"] = tf.data_type(dimension_) + (tf.execute(desc))[1] + end +end + + +""" + matrix_set_diag(input, diagonal) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function matrix_set_diag(input_, diagonal_; name=nothing) + local desc + tf.with_op_name(name, "MatrixSetDiag") do + desc = tf.NodeDescription("MatrixSetDiag") + input_ = convert(Tensor{Any}, input_) + diagonal_ = convert(Tensor{Any}, diagonal_) + (input_, diagonal_) = tf.tf_promote(input_, diagonal_) + tf.add_input(desc, input_) + tf.add_input(desc, diagonal_) + end + tf.Tensor(tf.Operation(desc)) + end + function matrix_set_diag(input_::tf.TensorHandle, diagonal_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("MatrixSetDiag") + tf.add_input(desc, input_) + tf.add_input(desc, diagonal_) + desc["T"] = tf.data_type(input_) + desc["T"] = tf.data_type(diagonal_) + (tf.execute(desc))[1] + end +end + + +""" + space_to_batch_nd(input, block_shape, paddings) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function space_to_batch_nd(input_, block_shape_, paddings_; name=nothing) + local desc + tf.with_op_name(name, "SpaceToBatchND") do + desc = tf.NodeDescription("SpaceToBatchND") + input_ = convert(Tensor{Any}, input_) + block_shape_ = convert(Tensor{Int32}, block_shape_) + paddings_ = convert(Tensor{Int32}, paddings_) + (input_,) = tf.tf_promote(input_) + (paddings_,) = tf.tf_promote(paddings_) + (block_shape_,) = tf.tf_promote(block_shape_) + tf.add_input(desc, input_) + tf.add_input(desc, block_shape_) + tf.add_input(desc, paddings_) + end + tf.Tensor(tf.Operation(desc)) + end + function space_to_batch_nd(input_::tf.TensorHandle, block_shape_::tf.TensorHandle, paddings_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("SpaceToBatchND") + tf.add_input(desc, input_) + tf.add_input(desc, block_shape_) + tf.add_input(desc, paddings_) + desc["T"] = tf.data_type(input_) + desc["Tblock_shape"] = tf.data_type(block_shape_) + desc["Tpaddings"] = tf.data_type(paddings_) + (tf.execute(desc))[1] + end +end + + +""" + sparse_reshape(input_indices, input_shape, new_shape) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function sparse_reshape(input_indices_, input_shape_, new_shape_; name=nothing) + local desc + tf.with_op_name(name, "SparseReshape") do + desc = tf.NodeDescription("SparseReshape") + input_indices_ = convert(Tensor{Int64}, input_indices_) + input_shape_ = convert(Tensor{Int64}, input_shape_) + new_shape_ = convert(Tensor{Int64}, new_shape_) + tf.add_input(desc, input_indices_) + tf.add_input(desc, input_shape_) + tf.add_input(desc, new_shape_) + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function sparse_reshape(input_indices_::tf.TensorHandle, input_shape_::tf.TensorHandle, new_shape_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("SparseReshape") + tf.add_input(desc, input_indices_) + tf.add_input(desc, input_shape_) + tf.add_input(desc, new_shape_) + tf.execute(desc) + end +end + + +""" + optimize_dataset(input_dataset, optimizations) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function optimize_dataset(input_dataset_, optimizations_; name=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "OptimizeDataset") do + desc = tf.NodeDescription("OptimizeDataset") + input_dataset_ = convert(Tensor{Any}, input_dataset_) + optimizations_ = convert(Tensor{String}, optimizations_) + tf.add_input(desc, input_dataset_) + tf.add_input(desc, optimizations_) + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + tf.Tensor(tf.Operation(desc)) + end + function optimize_dataset(input_dataset_::tf.TensorHandle, optimizations_::tf.TensorHandle; name=nothing, output_types=nothing, output_shapes=nothing) + desc = tf.EagerOp("OptimizeDataset") + tf.add_input(desc, input_dataset_) + tf.add_input(desc, optimizations_) + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + (tf.execute(desc))[1] + end +end + + +""" + concat_v2(values, axis) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function concat_v2(values_, axis_; name=nothing, N=nothing) + local desc + tf.with_op_name(name, "ConcatV2") do + desc = tf.NodeDescription("ConcatV2") + values_ = [convert(Tensor{Any}, x) for x = values_] + axis_ = convert(Tensor{Int32}, axis_) + axis_ = axis_ - convert(tf.Tensor{eltype(axis_)}, 1) + (values_,) = tf.tf_promote(values_) + (axis_,) = tf.tf_promote(axis_) + tf.add_input(desc, values_) + tf.add_input(desc, axis_) + if N !== nothing + desc["N"] = Base.Int(N) + end + end + tf.Tensor(tf.Operation(desc)) + end + function concat_v2(values_::tf.TensorHandle, axis_::tf.TensorHandle; name=nothing, N=nothing) + desc = tf.EagerOp("ConcatV2") + tf.add_input(desc, values_) + tf.add_input(desc, axis_) + if N !== nothing + desc["N"] = Base.Int(N) + end + desc["T"] = tf.data_type(values_) + desc["Tidx"] = tf.data_type(axis_) + (tf.execute(desc))[1] + end +end + + +""" + resource_sparse_apply_adadelta(var, accum, accum_update, lr, rho, epsilon, grad, indices; use_locking=false) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function resource_sparse_apply_adadelta(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "ResourceSparseApplyAdadelta") do + desc = tf.NodeDescription("ResourceSparseApplyAdadelta") + var_ = convert(Tensor{Any}, var_) + accum_ = convert(Tensor{Any}, accum_) + accum_update_ = convert(Tensor{Any}, accum_update_) + lr_ = convert(Tensor{Any}, lr_) + rho_ = convert(Tensor{Any}, rho_) + epsilon_ = convert(Tensor{Any}, epsilon_) + grad_ = convert(Tensor{Any}, grad_) + indices_ = convert(Tensor{Any}, indices_) + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + (lr_, rho_, epsilon_, grad_) = tf.tf_promote(lr_, rho_, epsilon_, grad_) + (indices_,) = tf.tf_promote(indices_) + tf.add_input(desc, var_) + tf.add_input(desc, accum_) + tf.add_input(desc, accum_update_) + tf.add_input(desc, lr_) + tf.add_input(desc, rho_) + tf.add_input(desc, epsilon_) + tf.add_input(desc, grad_) + tf.add_input(desc, indices_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + tf.Tensor(tf.Operation(desc)) + end + function resource_sparse_apply_adadelta(var_::tf.TensorHandle, accum_::tf.TensorHandle, accum_update_::tf.TensorHandle, lr_::tf.TensorHandle, rho_::tf.TensorHandle, epsilon_::tf.TensorHandle, grad_::tf.TensorHandle, indices_::tf.TensorHandle; name=nothing, use_locking=nothing) + desc = tf.EagerOp("ResourceSparseApplyAdadelta") + tf.add_input(desc, var_) + tf.add_input(desc, accum_) + tf.add_input(desc, accum_update_) + tf.add_input(desc, lr_) + tf.add_input(desc, rho_) + tf.add_input(desc, epsilon_) + tf.add_input(desc, grad_) + tf.add_input(desc, indices_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + desc["T"] = tf.data_type(lr_) + desc["T"] = tf.data_type(rho_) + desc["T"] = tf.data_type(epsilon_) + desc["T"] = tf.data_type(grad_) + desc["Tindices"] = tf.data_type(indices_) + (tf.execute(desc))[1] + end +end + + +""" + tile(input, multiples) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tile(input_, multiples_; name=nothing) + local desc + tf.with_op_name(name, "Tile") do + desc = tf.NodeDescription("Tile") + input_ = convert(Tensor{Any}, input_) + multiples_ = convert(Tensor{Int32}, multiples_) + (input_,) = tf.tf_promote(input_) + (multiples_,) = tf.tf_promote(multiples_) + tf.add_input(desc, input_) + tf.add_input(desc, multiples_) + end + tf.Tensor(tf.Operation(desc)) + end + function tile(input_::tf.TensorHandle, multiples_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("Tile") + tf.add_input(desc, input_) + tf.add_input(desc, multiples_) + desc["T"] = tf.data_type(input_) + desc["Tmultiples"] = tf.data_type(multiples_) + (tf.execute(desc))[1] + end +end + + +""" + mutex_v2(; container=, shared_name=) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function mutex_v2(; name=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "MutexV2") do + desc = tf.NodeDescription("MutexV2") + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + tf.Tensor(tf.Operation(desc)) + end + function mutex_v2(; name=nothing, container=nothing, shared_name=nothing) + desc = tf.EagerOp("MutexV2") + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + (tf.execute(desc))[1] + end +end + + +""" + serialize_many_sparse(sparse_indices, sparse_values, sparse_shape; out_type=String) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function serialize_many_sparse(sparse_indices_, sparse_values_, sparse_shape_; name=nothing, out_type=nothing) + local desc + tf.with_op_name(name, "SerializeManySparse") do + desc = tf.NodeDescription("SerializeManySparse") + sparse_indices_ = convert(Tensor{Int64}, sparse_indices_) + sparse_values_ = convert(Tensor{Any}, sparse_values_) + sparse_shape_ = convert(Tensor{Int64}, sparse_shape_) + (sparse_values_,) = tf.tf_promote(sparse_values_) + tf.add_input(desc, sparse_indices_) + tf.add_input(desc, sparse_values_) + tf.add_input(desc, sparse_shape_) + if out_type !== nothing + desc["out_type"] = Base.identity(out_type) + end + end + tf.Tensor(tf.Operation(desc)) + end + function serialize_many_sparse(sparse_indices_::tf.TensorHandle, sparse_values_::tf.TensorHandle, sparse_shape_::tf.TensorHandle; name=nothing, out_type=nothing) + desc = tf.EagerOp("SerializeManySparse") + tf.add_input(desc, sparse_indices_) + tf.add_input(desc, sparse_values_) + tf.add_input(desc, sparse_shape_) + if out_type !== nothing + desc["out_type"] = Base.identity(out_type) + end + desc["T"] = tf.data_type(sparse_values_) + (tf.execute(desc))[1] + end +end + + +""" + tpu_embedding_activations(embedding_variable, sliced_activations) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tpu_embedding_activations(embedding_variable_, sliced_activations_; name=nothing, table_id=nothing, lookup_id=nothing) + local desc + tf.with_op_name(name, "TPUEmbeddingActivations") do + desc = tf.NodeDescription("TPUEmbeddingActivations") + embedding_variable_ = convert(Tensor{Float32}, embedding_variable_) + sliced_activations_ = convert(Tensor{Float32}, sliced_activations_) + tf.add_input(desc, embedding_variable_) + tf.add_input(desc, sliced_activations_) + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + if lookup_id !== nothing + desc["lookup_id"] = Base.Int(lookup_id) + end + end + tf.Tensor(tf.Operation(desc)) + end + function tpu_embedding_activations(embedding_variable_::tf.TensorHandle, sliced_activations_::tf.TensorHandle; name=nothing, table_id=nothing, lookup_id=nothing) + desc = tf.EagerOp("TPUEmbeddingActivations") + tf.add_input(desc, embedding_variable_) + tf.add_input(desc, sliced_activations_) + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + if lookup_id !== nothing + desc["lookup_id"] = Base.Int(lookup_id) + end + (tf.execute(desc))[1] + end +end + + +""" + batch_matrix_solve_ls(matrix, rhs, l2_regularizer; fast=true) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function batch_matrix_solve_ls(matrix_, rhs_, l2_regularizer_; name=nothing, fast=nothing) + local desc + tf.with_op_name(name, "BatchMatrixSolveLs") do + desc = tf.NodeDescription("BatchMatrixSolveLs") + matrix_ = convert(Tensor{Any}, matrix_) + rhs_ = convert(Tensor{Any}, rhs_) + l2_regularizer_ = convert(Tensor{Float64}, l2_regularizer_) + (matrix_, rhs_) = tf.tf_promote(matrix_, rhs_) + tf.add_input(desc, matrix_) + tf.add_input(desc, rhs_) + tf.add_input(desc, l2_regularizer_) + if fast !== nothing + desc["fast"] = Base.Bool(fast) + end + end + tf.Tensor(tf.Operation(desc)) + end + function batch_matrix_solve_ls(matrix_::tf.TensorHandle, rhs_::tf.TensorHandle, l2_regularizer_::tf.TensorHandle; name=nothing, fast=nothing) + desc = tf.EagerOp("BatchMatrixSolveLs") + tf.add_input(desc, matrix_) + tf.add_input(desc, rhs_) + tf.add_input(desc, l2_regularizer_) + if fast !== nothing + desc["fast"] = Base.Bool(fast) + end + desc["T"] = tf.data_type(matrix_) + desc["T"] = tf.data_type(rhs_) + (tf.execute(desc))[1] + end +end + + +""" + not_equal(x, y) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function not_equal(x_, y_; name=nothing) + local desc + tf.with_op_name(name, "NotEqual") do + desc = tf.NodeDescription("NotEqual") + x_ = convert(Tensor{Any}, x_) + y_ = convert(Tensor{Any}, y_) + (x_, y_) = tf.tf_promote(x_, y_) + tf.add_input(desc, x_) + tf.add_input(desc, y_) + end + tf.Tensor(tf.Operation(desc)) + end + function not_equal(x_::tf.TensorHandle, y_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("NotEqual") + tf.add_input(desc, x_) + tf.add_input(desc, y_) + desc["T"] = tf.data_type(x_) + desc["T"] = tf.data_type(y_) + (tf.execute(desc))[1] + end +end + + +""" + lgamma(x) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function lgamma(x_; name=nothing) + local desc + tf.with_op_name(name, "Lgamma") do + desc = tf.NodeDescription("Lgamma") + x_ = convert(Tensor{Any}, x_) + (x_,) = tf.tf_promote(x_) + tf.add_input(desc, x_) + end + tf.Tensor(tf.Operation(desc)) + end + function lgamma(x_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("Lgamma") + tf.add_input(desc, x_) + desc["T"] = tf.data_type(x_) + (tf.execute(desc))[1] + end +end + + +""" + tpu_replicate_metadata(; num_cores_per_replica=1, topology=, use_tpu=true, device_assignment=Int64[], computation_shape=Int64[], host_compute_core=Int64[], padding_map=Int64[], step_marker_location=STEP_MARK_AT_ENTRY) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tpu_replicate_metadata(; name=nothing, num_replicas=nothing, num_cores_per_replica=nothing, topology=nothing, use_tpu=nothing, device_assignment=nothing, computation_shape=nothing, host_compute_core=nothing, padding_map=nothing, step_marker_location=nothing) + local desc + tf.with_op_name(name, "TPUReplicateMetadata") do + desc = tf.NodeDescription("TPUReplicateMetadata") + if num_replicas !== nothing + desc["num_replicas"] = Base.Int(num_replicas) + end + if num_cores_per_replica !== nothing + desc["num_cores_per_replica"] = Base.Int(num_cores_per_replica) + end + if topology !== nothing + desc["topology"] = Base.String(topology) + end + if use_tpu !== nothing + desc["use_tpu"] = Base.Bool(use_tpu) + end + if device_assignment !== nothing + desc["device_assignment"] = map(Base.identity, device_assignment) + end + if computation_shape !== nothing + desc["computation_shape"] = map(Base.identity, computation_shape) + end + if host_compute_core !== nothing + desc["host_compute_core"] = map(Base.identity, host_compute_core) + end + if padding_map !== nothing + desc["padding_map"] = map(Base.identity, padding_map) + end + if step_marker_location !== nothing + desc["step_marker_location"] = Base.String(step_marker_location) + end + end + tf.Tensor(tf.Operation(desc)) + end + function tpu_replicate_metadata(; name=nothing, num_replicas=nothing, num_cores_per_replica=nothing, topology=nothing, use_tpu=nothing, device_assignment=nothing, computation_shape=nothing, host_compute_core=nothing, padding_map=nothing, step_marker_location=nothing) + desc = tf.EagerOp("TPUReplicateMetadata") + if num_replicas !== nothing + desc["num_replicas"] = Base.Int(num_replicas) + end + if num_cores_per_replica !== nothing + desc["num_cores_per_replica"] = Base.Int(num_cores_per_replica) + end + if topology !== nothing + desc["topology"] = Base.String(topology) + end + if use_tpu !== nothing + desc["use_tpu"] = Base.Bool(use_tpu) + end + if device_assignment !== nothing + desc["device_assignment"] = map(Base.identity, device_assignment) + end + if computation_shape !== nothing + desc["computation_shape"] = map(Base.identity, computation_shape) + end + if host_compute_core !== nothing + desc["host_compute_core"] = map(Base.identity, host_compute_core) + end + if padding_map !== nothing + desc["padding_map"] = map(Base.identity, padding_map) + end + if step_marker_location !== nothing + desc["step_marker_location"] = Base.String(step_marker_location) + end + (tf.execute(desc))[1] + end +end + + +""" + experimental_thread_pool_handle(; max_intra_op_parallelism=1, container=, shared_name=) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function experimental_thread_pool_handle(; name=nothing, num_threads=nothing, max_intra_op_parallelism=nothing, display_name=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "ExperimentalThreadPoolHandle") do + desc = tf.NodeDescription("ExperimentalThreadPoolHandle") + if num_threads !== nothing + desc["num_threads"] = Base.Int(num_threads) + end + if max_intra_op_parallelism !== nothing + desc["max_intra_op_parallelism"] = Base.Int(max_intra_op_parallelism) + end + if display_name !== nothing + desc["display_name"] = Base.String(display_name) + end + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + tf.Tensor(tf.Operation(desc)) + end + function experimental_thread_pool_handle(; name=nothing, num_threads=nothing, max_intra_op_parallelism=nothing, display_name=nothing, container=nothing, shared_name=nothing) + desc = tf.EagerOp("ExperimentalThreadPoolHandle") + if num_threads !== nothing + desc["num_threads"] = Base.Int(num_threads) + end + if max_intra_op_parallelism !== nothing + desc["max_intra_op_parallelism"] = Base.Int(max_intra_op_parallelism) + end + if display_name !== nothing + desc["display_name"] = Base.String(display_name) + end + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + (tf.execute(desc))[1] + end +end + + +""" + self_adjoint_eig(input) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function self_adjoint_eig(input_; name=nothing) + local desc + tf.with_op_name(name, "SelfAdjointEig") do + desc = tf.NodeDescription("SelfAdjointEig") + input_ = convert(Tensor{Any}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + end + tf.Tensor(tf.Operation(desc)) + end + function self_adjoint_eig(input_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("SelfAdjointEig") + tf.add_input(desc, input_) + desc["T"] = tf.data_type(input_) + (tf.execute(desc))[1] + end +end + + +""" + boosted_trees_quantile_stream_resource_get_bucket_boundaries(quantile_stream_resource_handle) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function boosted_trees_quantile_stream_resource_get_bucket_boundaries(quantile_stream_resource_handle_; name=nothing, num_features=nothing) + local desc + tf.with_op_name(name, "BoostedTreesQuantileStreamResourceGetBucketBoundaries") do + desc = tf.NodeDescription("BoostedTreesQuantileStreamResourceGetBucketBoundaries") + quantile_stream_resource_handle_ = convert(Tensor{Any}, quantile_stream_resource_handle_) + tf.add_input(desc, quantile_stream_resource_handle_) + if num_features !== nothing + desc["num_features"] = Base.Int(num_features) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:num_features + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function boosted_trees_quantile_stream_resource_get_bucket_boundaries(quantile_stream_resource_handle_::tf.TensorHandle; name=nothing, num_features=nothing) + desc = tf.EagerOp("BoostedTreesQuantileStreamResourceGetBucketBoundaries") + tf.add_input(desc, quantile_stream_resource_handle_) + if num_features !== nothing + desc["num_features"] = Base.Int(num_features) + end + tf.execute(desc) + end +end + + +""" + sparse_dense_cwise_div(sp_indices, sp_values, sp_shape, dense) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function sparse_dense_cwise_div(sp_indices_, sp_values_, sp_shape_, dense_; name=nothing) + local desc + tf.with_op_name(name, "SparseDenseCwiseDiv") do + desc = tf.NodeDescription("SparseDenseCwiseDiv") + sp_indices_ = convert(Tensor{Int64}, sp_indices_) + sp_values_ = convert(Tensor{Any}, sp_values_) + sp_shape_ = convert(Tensor{Int64}, sp_shape_) + dense_ = convert(Tensor{Any}, dense_) + (sp_values_, dense_) = tf.tf_promote(sp_values_, dense_) + tf.add_input(desc, sp_indices_) + tf.add_input(desc, sp_values_) + tf.add_input(desc, sp_shape_) + tf.add_input(desc, dense_) + end + tf.Tensor(tf.Operation(desc)) + end + function sparse_dense_cwise_div(sp_indices_::tf.TensorHandle, sp_values_::tf.TensorHandle, sp_shape_::tf.TensorHandle, dense_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("SparseDenseCwiseDiv") + tf.add_input(desc, sp_indices_) + tf.add_input(desc, sp_values_) + tf.add_input(desc, sp_shape_) + tf.add_input(desc, dense_) + desc["T"] = tf.data_type(sp_values_) + desc["T"] = tf.data_type(dense_) + (tf.execute(desc))[1] + end +end + + +""" + acos(x) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function acos(x_; name=nothing) + local desc + tf.with_op_name(name, "Acos") do + desc = tf.NodeDescription("Acos") + x_ = convert(Tensor{Any}, x_) + (x_,) = tf.tf_promote(x_) + tf.add_input(desc, x_) + end + tf.Tensor(tf.Operation(desc)) + end + function acos(x_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("Acos") + tf.add_input(desc, x_) + desc["T"] = tf.data_type(x_) + (tf.execute(desc))[1] + end +end + + +""" + all(input, reduction_indices; keep_dims=false) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function all(input_, reduction_indices_; name=nothing, keep_dims=nothing) + local desc + tf.with_op_name(name, "All") do + desc = tf.NodeDescription("All") + input_ = convert(Tensor{Bool}, input_) + reduction_indices_ = convert(Tensor{Int32}, reduction_indices_) + reduction_indices_ = reduction_indices_ - convert(tf.Tensor{eltype(reduction_indices_)}, 1) + (reduction_indices_,) = tf.tf_promote(reduction_indices_) + tf.add_input(desc, input_) + tf.add_input(desc, reduction_indices_) + if keep_dims !== nothing + desc["keep_dims"] = Base.Bool(keep_dims) + end + end + tf.Tensor(tf.Operation(desc)) + end + function all(input_::tf.TensorHandle, reduction_indices_::tf.TensorHandle; name=nothing, keep_dims=nothing) + desc = tf.EagerOp("All") + tf.add_input(desc, input_) + tf.add_input(desc, reduction_indices_) + if keep_dims !== nothing + desc["keep_dims"] = Base.Bool(keep_dims) + end + desc["Tidx"] = tf.data_type(reduction_indices_) + (tf.execute(desc))[1] + end +end + + +""" + compare_and_bitpack(input, threshold) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function compare_and_bitpack(input_, threshold_; name=nothing) + local desc + tf.with_op_name(name, "CompareAndBitpack") do + desc = tf.NodeDescription("CompareAndBitpack") + input_ = convert(Tensor{Any}, input_) + threshold_ = convert(Tensor{Any}, threshold_) + (input_, threshold_) = tf.tf_promote(input_, threshold_) + tf.add_input(desc, input_) + tf.add_input(desc, threshold_) + end + tf.Tensor(tf.Operation(desc)) + end + function compare_and_bitpack(input_::tf.TensorHandle, threshold_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("CompareAndBitpack") + tf.add_input(desc, input_) + tf.add_input(desc, threshold_) + desc["T"] = tf.data_type(input_) + desc["T"] = tf.data_type(threshold_) + (tf.execute(desc))[1] + end +end + + +""" + var_handle_op(; container=, shared_name=) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function var_handle_op(; name=nothing, container=nothing, shared_name=nothing, dtype=nothing, shape=nothing) + local desc + tf.with_op_name(name, "VarHandleOp") do + desc = tf.NodeDescription("VarHandleOp") + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + if shape !== nothing + desc["shape"] = Base.identity(shape) + end + end + tf.Tensor(tf.Operation(desc)) + end + function var_handle_op(; name=nothing, container=nothing, shared_name=nothing, dtype=nothing, shape=nothing) + desc = tf.EagerOp("VarHandleOp") + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + if shape !== nothing + desc["shape"] = Base.identity(shape) + end + (tf.execute(desc))[1] + end +end + + +""" + experimental_unique_dataset(input_dataset) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function experimental_unique_dataset(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "ExperimentalUniqueDataset") do + desc = tf.NodeDescription("ExperimentalUniqueDataset") + input_dataset_ = convert(Tensor{Any}, input_dataset_) + tf.add_input(desc, input_dataset_) + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + tf.Tensor(tf.Operation(desc)) + end + function experimental_unique_dataset(input_dataset_::tf.TensorHandle; name=nothing, output_types=nothing, output_shapes=nothing) + desc = tf.EagerOp("ExperimentalUniqueDataset") + tf.add_input(desc, input_dataset_) + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + (tf.execute(desc))[1] + end +end + + +""" + quantized_conv2d_with_bias_sum_and_relu(input, filter, bias, min_input, max_input, min_filter, max_filter, summand; out_type=Float32, dilations=[1, 1, 1, 1]) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function quantized_conv2d_with_bias_sum_and_relu(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_, summand_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) + local desc + tf.with_op_name(name, "QuantizedConv2DWithBiasSumAndRelu") do + desc = tf.NodeDescription("QuantizedConv2DWithBiasSumAndRelu") + input_ = convert(Tensor{Any}, input_) + filter_ = convert(Tensor{Any}, filter_) + bias_ = convert(Tensor{Float32}, bias_) + min_input_ = convert(Tensor{Float32}, min_input_) + max_input_ = convert(Tensor{Float32}, max_input_) + min_filter_ = convert(Tensor{Float32}, min_filter_) + max_filter_ = convert(Tensor{Float32}, max_filter_) + summand_ = convert(Tensor{Float32}, summand_) + (filter_,) = tf.tf_promote(filter_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + tf.add_input(desc, filter_) + tf.add_input(desc, bias_) + tf.add_input(desc, min_input_) + tf.add_input(desc, max_input_) + tf.add_input(desc, min_filter_) + tf.add_input(desc, max_filter_) + tf.add_input(desc, summand_) + if out_type !== nothing + desc["out_type"] = Base.identity(out_type) + end + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + if padding !== nothing + desc["padding"] = Base.String(padding) + end + if dilations !== nothing + desc["dilations"] = map(Base.identity, dilations) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function quantized_conv2d_with_bias_sum_and_relu(input_::tf.TensorHandle, filter_::tf.TensorHandle, bias_::tf.TensorHandle, min_input_::tf.TensorHandle, max_input_::tf.TensorHandle, min_filter_::tf.TensorHandle, max_filter_::tf.TensorHandle, summand_::tf.TensorHandle; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) + desc = tf.EagerOp("QuantizedConv2DWithBiasSumAndRelu") + tf.add_input(desc, input_) + tf.add_input(desc, filter_) + tf.add_input(desc, bias_) + tf.add_input(desc, min_input_) + tf.add_input(desc, max_input_) + tf.add_input(desc, min_filter_) + tf.add_input(desc, max_filter_) + tf.add_input(desc, summand_) + if out_type !== nothing + desc["out_type"] = Base.identity(out_type) + end + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + if padding !== nothing + desc["padding"] = Base.String(padding) + end + if dilations !== nothing + desc["dilations"] = map(Base.identity, dilations) + end + desc["Tinput"] = tf.data_type(input_) + desc["Tfilter"] = tf.data_type(filter_) + tf.execute(desc) + end +end + + +""" + list_diff(x, y; out_idx=Int32) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function list_diff(x_, y_; name=nothing, out_idx=nothing) + local desc + tf.with_op_name(name, "ListDiff") do + desc = tf.NodeDescription("ListDiff") + x_ = convert(Tensor{Any}, x_) + y_ = convert(Tensor{Any}, y_) + (x_, y_) = tf.tf_promote(x_, y_) + tf.add_input(desc, x_) + tf.add_input(desc, y_) + if out_idx !== nothing + desc["out_idx"] = Base.identity(out_idx) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function list_diff(x_::tf.TensorHandle, y_::tf.TensorHandle; name=nothing, out_idx=nothing) + desc = tf.EagerOp("ListDiff") + tf.add_input(desc, x_) + tf.add_input(desc, y_) + if out_idx !== nothing + desc["out_idx"] = Base.identity(out_idx) + end + desc["T"] = tf.data_type(x_) + desc["T"] = tf.data_type(y_) + tf.execute(desc) + end +end + + +""" + create_summary_file_writer(writer, logdir, max_queue, flush_millis, filename_suffix) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function create_summary_file_writer(writer_, logdir_, max_queue_, flush_millis_, filename_suffix_; name=nothing) + local desc + tf.with_op_name(name, "CreateSummaryFileWriter") do + desc = tf.NodeDescription("CreateSummaryFileWriter") + writer_ = convert(Tensor{Any}, writer_) + logdir_ = convert(Tensor{String}, logdir_) + max_queue_ = convert(Tensor{Int32}, max_queue_) + flush_millis_ = convert(Tensor{Int32}, flush_millis_) + filename_suffix_ = convert(Tensor{String}, filename_suffix_) + tf.add_input(desc, writer_) + tf.add_input(desc, logdir_) + tf.add_input(desc, max_queue_) + tf.add_input(desc, flush_millis_) + tf.add_input(desc, filename_suffix_) + end + tf.Tensor(tf.Operation(desc)) + end + function create_summary_file_writer(writer_::tf.TensorHandle, logdir_::tf.TensorHandle, max_queue_::tf.TensorHandle, flush_millis_::tf.TensorHandle, filename_suffix_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("CreateSummaryFileWriter") + tf.add_input(desc, writer_) + tf.add_input(desc, logdir_) + tf.add_input(desc, max_queue_) + tf.add_input(desc, flush_millis_) + tf.add_input(desc, filename_suffix_) + (tf.execute(desc))[1] + end +end + + +""" + generate_vocab_remapping(new_vocab_file, old_vocab_file; old_vocab_size=-1) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function generate_vocab_remapping(new_vocab_file_, old_vocab_file_; name=nothing, new_vocab_offset=nothing, num_new_vocab=nothing, old_vocab_size=nothing) + local desc + tf.with_op_name(name, "GenerateVocabRemapping") do + desc = tf.NodeDescription("GenerateVocabRemapping") + new_vocab_file_ = convert(Tensor{String}, new_vocab_file_) + old_vocab_file_ = convert(Tensor{String}, old_vocab_file_) + tf.add_input(desc, new_vocab_file_) + tf.add_input(desc, old_vocab_file_) + if new_vocab_offset !== nothing + desc["new_vocab_offset"] = Base.Int(new_vocab_offset) + end + if num_new_vocab !== nothing + desc["num_new_vocab"] = Base.Int(num_new_vocab) + end + if old_vocab_size !== nothing + desc["old_vocab_size"] = Base.Int(old_vocab_size) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function generate_vocab_remapping(new_vocab_file_::tf.TensorHandle, old_vocab_file_::tf.TensorHandle; name=nothing, new_vocab_offset=nothing, num_new_vocab=nothing, old_vocab_size=nothing) + desc = tf.EagerOp("GenerateVocabRemapping") + tf.add_input(desc, new_vocab_file_) + tf.add_input(desc, old_vocab_file_) + if new_vocab_offset !== nothing + desc["new_vocab_offset"] = Base.Int(new_vocab_offset) + end + if num_new_vocab !== nothing + desc["num_new_vocab"] = Base.Int(num_new_vocab) + end + if old_vocab_size !== nothing + desc["old_vocab_size"] = Base.Int(old_vocab_size) + end + tf.execute(desc) + end +end + + +""" + batch_matrix_inverse(input; adjoint=false) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function batch_matrix_inverse(input_; name=nothing, adjoint=nothing) + local desc + tf.with_op_name(name, "BatchMatrixInverse") do + desc = tf.NodeDescription("BatchMatrixInverse") + input_ = convert(Tensor{Any}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + if adjoint !== nothing + desc["adjoint"] = Base.Bool(adjoint) + end + end + tf.Tensor(tf.Operation(desc)) + end + function batch_matrix_inverse(input_::tf.TensorHandle; name=nothing, adjoint=nothing) + desc = tf.EagerOp("BatchMatrixInverse") + tf.add_input(desc, input_) + if adjoint !== nothing + desc["adjoint"] = Base.Bool(adjoint) + end + desc["T"] = tf.data_type(input_) + (tf.execute(desc))[1] + end +end + + +""" + control_trigger() + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function control_trigger(; name=nothing) + local desc + tf.with_op_name(name, "ControlTrigger") do + desc + tf.NodeDescription("ControlTrigger") + end + tf.Tensor(tf.Operation(desc)) + end + function control_trigger(; name=nothing) + desc = tf.EagerOp("ControlTrigger") + (tf.execute(desc))[1] + end +end + + +""" + tpu_ordinal_selector() + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tpu_ordinal_selector(; name=nothing) + local desc + tf.with_op_name(name, "TPUOrdinalSelector") do + desc + tf.NodeDescription("TPUOrdinalSelector") + end + tf.Tensor(tf.Operation(desc)) + end + function tpu_ordinal_selector(; name=nothing) + desc = tf.EagerOp("TPUOrdinalSelector") + (tf.execute(desc))[1] + end +end + + +""" + stop_gradient(input) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function stop_gradient(input_; name=nothing) + local desc + tf.with_op_name(name, "StopGradient") do + desc = tf.NodeDescription("StopGradient") + input_ = convert(Tensor{Any}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + end + tf.Tensor(tf.Operation(desc)) + end + function stop_gradient(input_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("StopGradient") + tf.add_input(desc, input_) + desc["T"] = tf.data_type(input_) + (tf.execute(desc))[1] + end +end + + +""" + split(split_dim, value) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function split(split_dim_, value_; name=nothing, num_split=nothing) + local desc + tf.with_op_name(name, "Split") do + desc = tf.NodeDescription("Split") + split_dim_ = convert(Tensor{Int32}, split_dim_) + split_dim_ = split_dim_ - convert(tf.Tensor{eltype(split_dim_)}, 1) + value_ = convert(Tensor{Any}, value_) + (value_,) = tf.tf_promote(value_) + tf.add_input(desc, split_dim_) + tf.add_input(desc, value_) + if num_split !== nothing + desc["num_split"] = Base.Int(num_split) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:num_split + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function split(split_dim_::tf.TensorHandle, value_::tf.TensorHandle; name=nothing, num_split=nothing) + desc = tf.EagerOp("Split") + tf.add_input(desc, split_dim_) + tf.add_input(desc, value_) + if num_split !== nothing + desc["num_split"] = Base.Int(num_split) + end + desc["T"] = tf.data_type(value_) + tf.execute(desc) + end +end + + +""" + unpack(value; axis=0) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function unpack(value_; name=nothing, num=nothing, axis=nothing) + local desc + tf.with_op_name(name, "Unpack") do + desc = tf.NodeDescription("Unpack") + value_ = convert(Tensor{Any}, value_) + (value_,) = tf.tf_promote(value_) + tf.add_input(desc, value_) + if num !== nothing + desc["num"] = Base.Int(num) + end + if axis !== nothing + axis = Base.Int(axis) - 1 + end + if axis !== nothing + desc["axis"] = Base.Int(axis) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:num + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function unpack(value_::tf.TensorHandle; name=nothing, num=nothing, axis=nothing) + desc = tf.EagerOp("Unpack") + tf.add_input(desc, value_) + if num !== nothing + desc["num"] = Base.Int(num) + end + if axis !== nothing + axis = Base.Int(axis) - 1 + end + if axis !== nothing + desc["axis"] = Base.Int(axis) + end + desc["T"] = tf.data_type(value_) + tf.execute(desc) + end +end + + +""" + resource_scatter_max(resource, indices, updates) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function resource_scatter_max(resource_, indices_, updates_; name=nothing, dtype=nothing) + local desc + tf.with_op_name(name, "ResourceScatterMax") do + desc = tf.NodeDescription("ResourceScatterMax") + resource_ = convert(Tensor{Any}, resource_) + indices_ = convert(Tensor{Any}, indices_) + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + updates_ = convert(Tensor{Any}, updates_) + (updates_,) = tf.tf_promote(updates_) + (indices_,) = tf.tf_promote(indices_) + tf.add_input(desc, resource_) + tf.add_input(desc, indices_) + tf.add_input(desc, updates_) + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + tf.Tensor(tf.Operation(desc)) + end + function resource_scatter_max(resource_::tf.TensorHandle, indices_::tf.TensorHandle, updates_::tf.TensorHandle; name=nothing, dtype=nothing) + desc = tf.EagerOp("ResourceScatterMax") + tf.add_input(desc, resource_) + tf.add_input(desc, indices_) + tf.add_input(desc, updates_) + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + desc["Tindices"] = tf.data_type(indices_) + desc["dtype"] = tf.data_type(updates_) + (tf.execute(desc))[1] + end +end + + +""" + tensor_array_write(handle, index, value, flow_in) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tensor_array_write(handle_, index_, value_, flow_in_; name=nothing) + local desc + tf.with_op_name(name, "TensorArrayWrite") do + desc = tf.NodeDescription("TensorArrayWrite") + handle_ = convert(Tensor{String}, handle_) + index_ = convert(Tensor{Int32}, index_) + value_ = convert(Tensor{Any}, value_) + flow_in_ = convert(Tensor{Float32}, flow_in_) + (value_,) = tf.tf_promote(value_) + tf.add_input(desc, handle_) + tf.add_input(desc, index_) + tf.add_input(desc, value_) + tf.add_input(desc, flow_in_) + end + tf.Tensor(tf.Operation(desc)) + end + function tensor_array_write(handle_::tf.TensorHandle, index_::tf.TensorHandle, value_::tf.TensorHandle, flow_in_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("TensorArrayWrite") + tf.add_input(desc, handle_) + tf.add_input(desc, index_) + tf.add_input(desc, value_) + tf.add_input(desc, flow_in_) + desc["T"] = tf.data_type(value_) + (tf.execute(desc))[1] + end +end + + +""" + fill(dims, value; index_type=Int32) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function fill(dims_, value_; name=nothing, index_type=nothing) + local desc + tf.with_op_name(name, "Fill") do + desc = tf.NodeDescription("Fill") + dims_ = convert(Tensor{Int32}, dims_) + value_ = convert(Tensor{Any}, value_) + (value_,) = tf.tf_promote(value_) + (dims_,) = tf.tf_promote(dims_) + tf.add_input(desc, dims_) + tf.add_input(desc, value_) + if index_type !== nothing + desc["index_type"] = Base.identity(index_type) + end + end + tf.Tensor(tf.Operation(desc)) + end + function fill(dims_::tf.TensorHandle, value_::tf.TensorHandle; name=nothing, index_type=nothing) + desc = tf.EagerOp("Fill") + tf.add_input(desc, dims_) + tf.add_input(desc, value_) + if index_type !== nothing + desc["index_type"] = Base.identity(index_type) + end + desc["index_type"] = tf.data_type(dims_) + desc["T"] = tf.data_type(value_) + (tf.execute(desc))[1] + end +end + + +""" + quantized_conv2d_with_bias_and_requantize(input, filter, bias, min_input, max_input, min_filter, max_filter, min_freezed_output, max_freezed_output; out_type=Float32, dilations=[1, 1, 1, 1]) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function quantized_conv2d_with_bias_and_requantize(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) + local desc + tf.with_op_name(name, "QuantizedConv2DWithBiasAndRequantize") do + desc = tf.NodeDescription("QuantizedConv2DWithBiasAndRequantize") + input_ = convert(Tensor{Any}, input_) + filter_ = convert(Tensor{Any}, filter_) + bias_ = convert(Tensor{Any}, bias_) + min_input_ = convert(Tensor{Float32}, min_input_) + max_input_ = convert(Tensor{Float32}, max_input_) + min_filter_ = convert(Tensor{Float32}, min_filter_) + max_filter_ = convert(Tensor{Float32}, max_filter_) + min_freezed_output_ = convert(Tensor{Float32}, min_freezed_output_) + max_freezed_output_ = convert(Tensor{Float32}, max_freezed_output_) + (filter_,) = tf.tf_promote(filter_) + (input_,) = tf.tf_promote(input_) + (bias_,) = tf.tf_promote(bias_) + tf.add_input(desc, input_) + tf.add_input(desc, filter_) + tf.add_input(desc, bias_) + tf.add_input(desc, min_input_) + tf.add_input(desc, max_input_) + tf.add_input(desc, min_filter_) + tf.add_input(desc, max_filter_) + tf.add_input(desc, min_freezed_output_) + tf.add_input(desc, max_freezed_output_) + if out_type !== nothing + desc["out_type"] = Base.identity(out_type) + end + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + if padding !== nothing + desc["padding"] = Base.String(padding) + end + if dilations !== nothing + desc["dilations"] = map(Base.identity, dilations) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function quantized_conv2d_with_bias_and_requantize(input_::tf.TensorHandle, filter_::tf.TensorHandle, bias_::tf.TensorHandle, min_input_::tf.TensorHandle, max_input_::tf.TensorHandle, min_filter_::tf.TensorHandle, max_filter_::tf.TensorHandle, min_freezed_output_::tf.TensorHandle, max_freezed_output_::tf.TensorHandle; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) + desc = tf.EagerOp("QuantizedConv2DWithBiasAndRequantize") + tf.add_input(desc, input_) + tf.add_input(desc, filter_) + tf.add_input(desc, bias_) + tf.add_input(desc, min_input_) + tf.add_input(desc, max_input_) + tf.add_input(desc, min_filter_) + tf.add_input(desc, max_filter_) + tf.add_input(desc, min_freezed_output_) + tf.add_input(desc, max_freezed_output_) + if out_type !== nothing + desc["out_type"] = Base.identity(out_type) + end + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + if padding !== nothing + desc["padding"] = Base.String(padding) + end + if dilations !== nothing + desc["dilations"] = map(Base.identity, dilations) + end + desc["Tinput"] = tf.data_type(input_) + desc["Tfilter"] = tf.data_type(filter_) + desc["Tbias"] = tf.data_type(bias_) + tf.execute(desc) + end +end + + +""" + softmax(logits) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function softmax(logits_; name=nothing) + local desc + tf.with_op_name(name, "Softmax") do + desc = tf.NodeDescription("Softmax") + logits_ = convert(Tensor{Any}, logits_) + (logits_,) = tf.tf_promote(logits_) + tf.add_input(desc, logits_) + end + tf.Tensor(tf.Operation(desc)) + end + function softmax(logits_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("Softmax") + tf.add_input(desc, logits_) + desc["T"] = tf.data_type(logits_) + (tf.execute(desc))[1] + end +end + + +""" + resize_bicubic(images, size; align_corners=false) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function resize_bicubic(images_, size_; name=nothing, align_corners=nothing) + local desc + tf.with_op_name(name, "ResizeBicubic") do + desc = tf.NodeDescription("ResizeBicubic") + images_ = convert(Tensor{Any}, images_) + size_ = convert(Tensor{Int32}, size_) + (images_,) = tf.tf_promote(images_) + tf.add_input(desc, images_) + tf.add_input(desc, size_) + if align_corners !== nothing + desc["align_corners"] = Base.Bool(align_corners) + end + end + tf.Tensor(tf.Operation(desc)) + end + function resize_bicubic(images_::tf.TensorHandle, size_::tf.TensorHandle; name=nothing, align_corners=nothing) + desc = tf.EagerOp("ResizeBicubic") + tf.add_input(desc, images_) + tf.add_input(desc, size_) + if align_corners !== nothing + desc["align_corners"] = Base.Bool(align_corners) + end + desc["T"] = tf.data_type(images_) + (tf.execute(desc))[1] + end +end + + +""" + infeed_dequeue_tuple() + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function infeed_dequeue_tuple(; name=nothing, dtypes=nothing, shapes=nothing) + local desc + tf.with_op_name(name, "InfeedDequeueTuple") do + desc = tf.NodeDescription("InfeedDequeueTuple") + if dtypes !== nothing + desc["dtypes"] = map(Base.identity, dtypes) + end + if shapes !== nothing + desc["shapes"] = map(Base.identity, shapes) + end + end + tf.Tensor(tf.Operation(desc)) + end + function infeed_dequeue_tuple(; name=nothing, dtypes=nothing, shapes=nothing) + desc = tf.EagerOp("InfeedDequeueTuple") + if dtypes !== nothing + desc["dtypes"] = map(Base.identity, dtypes) + end + if shapes !== nothing + desc["shapes"] = map(Base.identity, shapes) + end + (tf.execute(desc))[1] + end +end + + +""" + multi_device_iterator() + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function multi_device_iterator(; name=nothing, devices=nothing, shared_name=nothing, container=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "MultiDeviceIterator") do + desc = tf.NodeDescription("MultiDeviceIterator") + if devices !== nothing + desc["devices"] = map(Base.identity, devices) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + if container !== nothing + desc["container"] = Base.String(container) + end + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + tf.Tensor(tf.Operation(desc)) + end + function multi_device_iterator(; name=nothing, devices=nothing, shared_name=nothing, container=nothing, output_types=nothing, output_shapes=nothing) + desc = tf.EagerOp("MultiDeviceIterator") + if devices !== nothing + desc["devices"] = map(Base.identity, devices) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + if container !== nothing + desc["container"] = Base.String(container) + end + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + (tf.execute(desc))[1] + end +end + + +""" + decode_csv(records, record_defaults; field_delim=,, use_quote_delim=true, na_value=, select_cols=Int64[]) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function decode_csv(records_, record_defaults_; name=nothing, OUT_TYPE=nothing, field_delim=nothing, use_quote_delim=nothing, na_value=nothing, select_cols=nothing) + local desc + tf.with_op_name(name, "DecodeCSV") do + desc = tf.NodeDescription("DecodeCSV") + records_ = convert(Tensor{String}, records_) + record_defaults_ = [convert(Tensor{Any}, x) for x = record_defaults_] + tf.add_input(desc, records_) + tf.add_input(desc, record_defaults_) + if OUT_TYPE !== nothing + desc["OUT_TYPE"] = map(Base.identity, OUT_TYPE) + end + if field_delim !== nothing + desc["field_delim"] = Base.String(field_delim) + end + if use_quote_delim !== nothing + desc["use_quote_delim"] = Base.Bool(use_quote_delim) + end + if na_value !== nothing + desc["na_value"] = Base.String(na_value) + end + if select_cols !== nothing + desc["select_cols"] = map(Base.identity, select_cols) + end + end + tf.Tensor(tf.Operation(desc)) + end + function decode_csv(records_::tf.TensorHandle, record_defaults_::tf.TensorHandle; name=nothing, OUT_TYPE=nothing, field_delim=nothing, use_quote_delim=nothing, na_value=nothing, select_cols=nothing) + desc = tf.EagerOp("DecodeCSV") + tf.add_input(desc, records_) + tf.add_input(desc, record_defaults_) + if OUT_TYPE !== nothing + desc["OUT_TYPE"] = map(Base.identity, OUT_TYPE) + end + if field_delim !== nothing + desc["field_delim"] = Base.String(field_delim) + end + if use_quote_delim !== nothing + desc["use_quote_delim"] = Base.Bool(use_quote_delim) + end + if na_value !== nothing + desc["na_value"] = Base.String(na_value) + end + if select_cols !== nothing + desc["select_cols"] = map(Base.identity, select_cols) + end + (tf.execute(desc))[1] + end +end + + +""" + lookup_table_find(table_handle, keys, default_value) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function lookup_table_find(table_handle_, keys_, default_value_; name=nothing) + local desc + tf.with_op_name(name, "LookupTableFind") do + desc = tf.NodeDescription("LookupTableFind") + table_handle_ = convert(Tensor{String}, table_handle_) + keys_ = convert(Tensor{Any}, keys_) + default_value_ = convert(Tensor{Any}, default_value_) + (keys_,) = tf.tf_promote(keys_) + (default_value_,) = tf.tf_promote(default_value_) + tf.add_input(desc, table_handle_) + tf.add_input(desc, keys_) + tf.add_input(desc, default_value_) + end + tf.Tensor(tf.Operation(desc)) + end + function lookup_table_find(table_handle_::tf.TensorHandle, keys_::tf.TensorHandle, default_value_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("LookupTableFind") + tf.add_input(desc, table_handle_) + tf.add_input(desc, keys_) + tf.add_input(desc, default_value_) + desc["Tin"] = tf.data_type(keys_) + desc["Tout"] = tf.data_type(default_value_) + (tf.execute(desc))[1] + end +end + + +""" + shuffle_and_repeat_dataset(input_dataset, buffer_size, seed, seed2, count) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function shuffle_and_repeat_dataset(input_dataset_, buffer_size_, seed_, seed2_, count_; name=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "ShuffleAndRepeatDataset") do + desc = tf.NodeDescription("ShuffleAndRepeatDataset") + input_dataset_ = convert(Tensor{Any}, input_dataset_) + buffer_size_ = convert(Tensor{Int64}, buffer_size_) + seed_ = convert(Tensor{Int64}, seed_) + seed2_ = convert(Tensor{Int64}, seed2_) + count_ = convert(Tensor{Int64}, count_) + tf.add_input(desc, input_dataset_) + tf.add_input(desc, buffer_size_) + tf.add_input(desc, seed_) + tf.add_input(desc, seed2_) + tf.add_input(desc, count_) + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + tf.Tensor(tf.Operation(desc)) + end + function shuffle_and_repeat_dataset(input_dataset_::tf.TensorHandle, buffer_size_::tf.TensorHandle, seed_::tf.TensorHandle, seed2_::tf.TensorHandle, count_::tf.TensorHandle; name=nothing, output_types=nothing, output_shapes=nothing) + desc = tf.EagerOp("ShuffleAndRepeatDataset") + tf.add_input(desc, input_dataset_) + tf.add_input(desc, buffer_size_) + tf.add_input(desc, seed_) + tf.add_input(desc, seed2_) + tf.add_input(desc, count_) + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + (tf.execute(desc))[1] + end +end + + +""" + requantization_range_per_channel(input, input_min, input_max) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function requantization_range_per_channel(input_, input_min_, input_max_; name=nothing, clip_value_max=nothing) + local desc + tf.with_op_name(name, "RequantizationRangePerChannel") do + desc = tf.NodeDescription("RequantizationRangePerChannel") + input_ = convert(Tensor{Float32}, input_) + input_min_ = convert(Tensor{Float32}, input_min_) + input_max_ = convert(Tensor{Float32}, input_max_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + tf.add_input(desc, input_min_) + tf.add_input(desc, input_max_) + if clip_value_max !== nothing + desc["clip_value_max"] = Base.identity(clip_value_max) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function requantization_range_per_channel(input_::tf.TensorHandle, input_min_::tf.TensorHandle, input_max_::tf.TensorHandle; name=nothing, clip_value_max=nothing) + desc = tf.EagerOp("RequantizationRangePerChannel") + tf.add_input(desc, input_) + tf.add_input(desc, input_min_) + tf.add_input(desc, input_max_) + if clip_value_max !== nothing + desc["clip_value_max"] = Base.identity(clip_value_max) + end + desc["T"] = tf.data_type(input_) + tf.execute(desc) + end +end + + +""" + experimental_unbatch_dataset(input_dataset) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function experimental_unbatch_dataset(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "ExperimentalUnbatchDataset") do + desc = tf.NodeDescription("ExperimentalUnbatchDataset") + input_dataset_ = convert(Tensor{Any}, input_dataset_) + tf.add_input(desc, input_dataset_) + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + tf.Tensor(tf.Operation(desc)) + end + function experimental_unbatch_dataset(input_dataset_::tf.TensorHandle; name=nothing, output_types=nothing, output_shapes=nothing) + desc = tf.EagerOp("ExperimentalUnbatchDataset") + tf.add_input(desc, input_dataset_) + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + (tf.execute(desc))[1] + end +end + + +""" + avg_pool3d_grad(orig_input_shape, grad; data_format=NDHWC) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function avg_pool3d_grad(orig_input_shape_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + local desc + tf.with_op_name(name, "AvgPool3DGrad") do + desc = tf.NodeDescription("AvgPool3DGrad") + orig_input_shape_ = convert(Tensor{Int32}, orig_input_shape_) + grad_ = convert(Tensor{Any}, grad_) + (grad_,) = tf.tf_promote(grad_) + tf.add_input(desc, orig_input_shape_) + tf.add_input(desc, grad_) + if ksize !== nothing + desc["ksize"] = map(Base.identity, ksize) + end + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + if padding !== nothing + desc["padding"] = Base.String(padding) + end + if data_format !== nothing + desc["data_format"] = Base.String(data_format) + end + end + tf.Tensor(tf.Operation(desc)) + end + function avg_pool3d_grad(orig_input_shape_::tf.TensorHandle, grad_::tf.TensorHandle; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + desc = tf.EagerOp("AvgPool3DGrad") + tf.add_input(desc, orig_input_shape_) + tf.add_input(desc, grad_) + if ksize !== nothing + desc["ksize"] = map(Base.identity, ksize) + end + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + if padding !== nothing + desc["padding"] = Base.String(padding) + end + if data_format !== nothing + desc["data_format"] = Base.String(data_format) + end + desc["T"] = tf.data_type(grad_) + (tf.execute(desc))[1] + end +end + + +""" + placeholder_with_default(input) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function placeholder_with_default(input_; name=nothing, dtype=nothing, shape=nothing) + local desc + tf.with_op_name(name, "PlaceholderWithDefault") do + desc = tf.NodeDescription("PlaceholderWithDefault") + input_ = convert(Tensor{Any}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + if shape !== nothing + desc["shape"] = Base.identity(shape) + end + end + tf.Tensor(tf.Operation(desc)) + end + function placeholder_with_default(input_::tf.TensorHandle; name=nothing, dtype=nothing, shape=nothing) + desc = tf.EagerOp("PlaceholderWithDefault") + tf.add_input(desc, input_) + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + if shape !== nothing + desc["shape"] = Base.identity(shape) + end + desc["dtype"] = tf.data_type(input_) + (tf.execute(desc))[1] + end +end + + +""" + initialize_table_v2(table_handle, keys, values) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function initialize_table_v2(table_handle_, keys_, values_; name=nothing) + local desc + tf.with_op_name(name, "InitializeTableV2") do + desc = tf.NodeDescription("InitializeTableV2") + table_handle_ = convert(Tensor{Any}, table_handle_) + keys_ = convert(Tensor{Any}, keys_) + values_ = convert(Tensor{Any}, values_) + (values_,) = tf.tf_promote(values_) + (keys_,) = tf.tf_promote(keys_) + tf.add_input(desc, table_handle_) + tf.add_input(desc, keys_) + tf.add_input(desc, values_) + end + tf.Tensor(tf.Operation(desc)) + end + function initialize_table_v2(table_handle_::tf.TensorHandle, keys_::tf.TensorHandle, values_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("InitializeTableV2") + tf.add_input(desc, table_handle_) + tf.add_input(desc, keys_) + tf.add_input(desc, values_) + desc["Tkey"] = tf.data_type(keys_) + desc["Tval"] = tf.data_type(values_) + (tf.execute(desc))[1] + end +end + + +""" + set_size(set_indices, set_values, set_shape; validate_indices=true) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function set_size(set_indices_, set_values_, set_shape_; name=nothing, validate_indices=nothing) + local desc + tf.with_op_name(name, "SetSize") do + desc = tf.NodeDescription("SetSize") + set_indices_ = convert(Tensor{Int64}, set_indices_) + set_values_ = convert(Tensor{Any}, set_values_) + set_shape_ = convert(Tensor{Int64}, set_shape_) + (set_values_,) = tf.tf_promote(set_values_) + tf.add_input(desc, set_indices_) + tf.add_input(desc, set_values_) + tf.add_input(desc, set_shape_) + if validate_indices !== nothing + desc["validate_indices"] = Base.Bool(validate_indices) + end + end + tf.Tensor(tf.Operation(desc)) + end + function set_size(set_indices_::tf.TensorHandle, set_values_::tf.TensorHandle, set_shape_::tf.TensorHandle; name=nothing, validate_indices=nothing) + desc = tf.EagerOp("SetSize") + tf.add_input(desc, set_indices_) + tf.add_input(desc, set_values_) + tf.add_input(desc, set_shape_) + if validate_indices !== nothing + desc["validate_indices"] = Base.Bool(validate_indices) + end + desc["T"] = tf.data_type(set_values_) + (tf.execute(desc))[1] + end +end + + +""" + assert(condition, data; summarize=3) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function assert(condition_, data_; name=nothing, T=nothing, summarize=nothing) + local desc + tf.with_op_name(name, "Assert") do + desc = tf.NodeDescription("Assert") + condition_ = convert(Tensor{Bool}, condition_) + data_ = [convert(Tensor{Any}, x) for x = data_] + tf.add_input(desc, condition_) + tf.add_input(desc, data_) + if T !== nothing + desc["T"] = map(Base.identity, T) + end + if summarize !== nothing + desc["summarize"] = Base.Int(summarize) + end + end + tf.Tensor(tf.Operation(desc)) + end + function assert(condition_::tf.TensorHandle, data_::tf.TensorHandle; name=nothing, T=nothing, summarize=nothing) + desc = tf.EagerOp("Assert") + tf.add_input(desc, condition_) + tf.add_input(desc, data_) + if T !== nothing + desc["T"] = map(Base.identity, T) + end + if summarize !== nothing + desc["summarize"] = Base.Int(summarize) + end + (tf.execute(desc))[1] + end +end + + +""" + non_max_suppression_v4(boxes, scores, max_output_size, iou_threshold, score_threshold; pad_to_max_output_size=false) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function non_max_suppression_v4(boxes_, scores_, max_output_size_, iou_threshold_, score_threshold_; name=nothing, pad_to_max_output_size=nothing) + local desc + tf.with_op_name(name, "NonMaxSuppressionV4") do + desc = tf.NodeDescription("NonMaxSuppressionV4") + boxes_ = convert(Tensor{Float32}, boxes_) + scores_ = convert(Tensor{Float32}, scores_) + max_output_size_ = convert(Tensor{Int32}, max_output_size_) + iou_threshold_ = convert(Tensor{Float32}, iou_threshold_) + score_threshold_ = convert(Tensor{Float32}, score_threshold_) + (boxes_, scores_) = tf.tf_promote(boxes_, scores_) + tf.add_input(desc, boxes_) + tf.add_input(desc, scores_) + tf.add_input(desc, max_output_size_) + tf.add_input(desc, iou_threshold_) + tf.add_input(desc, score_threshold_) + if pad_to_max_output_size !== nothing + desc["pad_to_max_output_size"] = Base.Bool(pad_to_max_output_size) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function non_max_suppression_v4(boxes_::tf.TensorHandle, scores_::tf.TensorHandle, max_output_size_::tf.TensorHandle, iou_threshold_::tf.TensorHandle, score_threshold_::tf.TensorHandle; name=nothing, pad_to_max_output_size=nothing) + desc = tf.EagerOp("NonMaxSuppressionV4") + tf.add_input(desc, boxes_) + tf.add_input(desc, scores_) + tf.add_input(desc, max_output_size_) + tf.add_input(desc, iou_threshold_) + tf.add_input(desc, score_threshold_) + if pad_to_max_output_size !== nothing + desc["pad_to_max_output_size"] = Base.Bool(pad_to_max_output_size) + end + desc["T"] = tf.data_type(boxes_) + desc["T"] = tf.data_type(scores_) + tf.execute(desc) + end +end + + +""" + sample_distorted_bounding_box_v2(image_size, bounding_boxes, min_object_covered; seed=0, seed2=0, aspect_ratio_range=Int64[], area_range=Int64[], max_attempts=100, use_image_if_no_bounding_boxes=false) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function sample_distorted_bounding_box_v2(image_size_, bounding_boxes_, min_object_covered_; name=nothing, seed=nothing, seed2=nothing, aspect_ratio_range=nothing, area_range=nothing, max_attempts=nothing, use_image_if_no_bounding_boxes=nothing) + local desc + tf.with_op_name(name, "SampleDistortedBoundingBoxV2") do + desc = tf.NodeDescription("SampleDistortedBoundingBoxV2") + image_size_ = convert(Tensor{Any}, image_size_) + bounding_boxes_ = convert(Tensor{Float32}, bounding_boxes_) + min_object_covered_ = convert(Tensor{Float32}, min_object_covered_) + (image_size_,) = tf.tf_promote(image_size_) + tf.add_input(desc, image_size_) + tf.add_input(desc, bounding_boxes_) + tf.add_input(desc, min_object_covered_) + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end + if aspect_ratio_range !== nothing + desc["aspect_ratio_range"] = map(Base.identity, aspect_ratio_range) + end + if area_range !== nothing + desc["area_range"] = map(Base.identity, area_range) + end + if max_attempts !== nothing + desc["max_attempts"] = Base.Int(max_attempts) + end + if use_image_if_no_bounding_boxes !== nothing + desc["use_image_if_no_bounding_boxes"] = Base.Bool(use_image_if_no_bounding_boxes) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function sample_distorted_bounding_box_v2(image_size_::tf.TensorHandle, bounding_boxes_::tf.TensorHandle, min_object_covered_::tf.TensorHandle; name=nothing, seed=nothing, seed2=nothing, aspect_ratio_range=nothing, area_range=nothing, max_attempts=nothing, use_image_if_no_bounding_boxes=nothing) + desc = tf.EagerOp("SampleDistortedBoundingBoxV2") + tf.add_input(desc, image_size_) + tf.add_input(desc, bounding_boxes_) + tf.add_input(desc, min_object_covered_) + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end + if aspect_ratio_range !== nothing + desc["aspect_ratio_range"] = map(Base.identity, aspect_ratio_range) + end + if area_range !== nothing + desc["area_range"] = map(Base.identity, area_range) + end + if max_attempts !== nothing + desc["max_attempts"] = Base.Int(max_attempts) + end + if use_image_if_no_bounding_boxes !== nothing + desc["use_image_if_no_bounding_boxes"] = Base.Bool(use_image_if_no_bounding_boxes) + end + desc["T"] = tf.data_type(image_size_) + tf.execute(desc) + end +end + + +""" + initialize_table_from_text_file(table_handle, filename; vocab_size=-1, delimiter= ) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function initialize_table_from_text_file(table_handle_, filename_; name=nothing, key_index=nothing, value_index=nothing, vocab_size=nothing, delimiter=nothing) + local desc + tf.with_op_name(name, "InitializeTableFromTextFile") do + desc = tf.NodeDescription("InitializeTableFromTextFile") + table_handle_ = convert(Tensor{String}, table_handle_) + filename_ = convert(Tensor{String}, filename_) + tf.add_input(desc, table_handle_) + tf.add_input(desc, filename_) + if key_index !== nothing + desc["key_index"] = Base.Int(key_index) + end + if value_index !== nothing + desc["value_index"] = Base.Int(value_index) + end + if vocab_size !== nothing + desc["vocab_size"] = Base.Int(vocab_size) + end + if delimiter !== nothing + desc["delimiter"] = Base.String(delimiter) + end + end + tf.Tensor(tf.Operation(desc)) + end + function initialize_table_from_text_file(table_handle_::tf.TensorHandle, filename_::tf.TensorHandle; name=nothing, key_index=nothing, value_index=nothing, vocab_size=nothing, delimiter=nothing) + desc = tf.EagerOp("InitializeTableFromTextFile") + tf.add_input(desc, table_handle_) + tf.add_input(desc, filename_) + if key_index !== nothing + desc["key_index"] = Base.Int(key_index) + end + if value_index !== nothing + desc["value_index"] = Base.Int(value_index) + end + if vocab_size !== nothing + desc["vocab_size"] = Base.Int(vocab_size) + end + if delimiter !== nothing + desc["delimiter"] = Base.String(delimiter) + end + (tf.execute(desc))[1] + end +end + + +""" + lookup_table_size(table_handle) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function lookup_table_size(table_handle_; name=nothing) + local desc + tf.with_op_name(name, "LookupTableSize") do + desc = tf.NodeDescription("LookupTableSize") + table_handle_ = convert(Tensor{String}, table_handle_) + tf.add_input(desc, table_handle_) + end + tf.Tensor(tf.Operation(desc)) + end + function lookup_table_size(table_handle_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("LookupTableSize") + tf.add_input(desc, table_handle_) + (tf.execute(desc))[1] + end +end + + +""" + sparse_apply_adagrad_da(var, gradient_accumulator, gradient_squared_accumulator, grad, indices, lr, l1, l2, global_step; use_locking=false) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function sparse_apply_adagrad_da(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, indices_, lr_, l1_, l2_, global_step_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "SparseApplyAdagradDA") do + desc = tf.NodeDescription("SparseApplyAdagradDA") + var_ = convert(Tensor{Any}, var_) + gradient_accumulator_ = convert(Tensor{Any}, gradient_accumulator_) + gradient_squared_accumulator_ = convert(Tensor{Any}, gradient_squared_accumulator_) + grad_ = convert(Tensor{Any}, grad_) + indices_ = convert(Tensor{Any}, indices_) + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + lr_ = convert(Tensor{Any}, lr_) + l1_ = convert(Tensor{Any}, l1_) + l2_ = convert(Tensor{Any}, l2_) + global_step_ = convert(Tensor{Int64}, global_step_) + (var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, lr_, l1_, l2_) = tf.tf_promote(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, lr_, l1_, l2_) + (indices_,) = tf.tf_promote(indices_) + tf.add_input(desc, var_) + tf.add_input(desc, gradient_accumulator_) + tf.add_input(desc, gradient_squared_accumulator_) + tf.add_input(desc, grad_) + tf.add_input(desc, indices_) + tf.add_input(desc, lr_) + tf.add_input(desc, l1_) + tf.add_input(desc, l2_) + tf.add_input(desc, global_step_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + tf.Tensor(tf.Operation(desc)) + end + function sparse_apply_adagrad_da(var_::tf.TensorHandle, gradient_accumulator_::tf.TensorHandle, gradient_squared_accumulator_::tf.TensorHandle, grad_::tf.TensorHandle, indices_::tf.TensorHandle, lr_::tf.TensorHandle, l1_::tf.TensorHandle, l2_::tf.TensorHandle, global_step_::tf.TensorHandle; name=nothing, use_locking=nothing) + desc = tf.EagerOp("SparseApplyAdagradDA") + tf.add_input(desc, var_) + tf.add_input(desc, gradient_accumulator_) + tf.add_input(desc, gradient_squared_accumulator_) + tf.add_input(desc, grad_) + tf.add_input(desc, indices_) + tf.add_input(desc, lr_) + tf.add_input(desc, l1_) + tf.add_input(desc, l2_) + tf.add_input(desc, global_step_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + desc["T"] = tf.data_type(var_) + desc["T"] = tf.data_type(gradient_accumulator_) + desc["T"] = tf.data_type(gradient_squared_accumulator_) + desc["T"] = tf.data_type(grad_) + desc["Tindices"] = tf.data_type(indices_) + desc["T"] = tf.data_type(lr_) + desc["T"] = tf.data_type(l1_) + desc["T"] = tf.data_type(l2_) + (tf.execute(desc))[1] + end +end + + +""" + broadcast_gradient_args(s0, s1) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function broadcast_gradient_args(s0_, s1_; name=nothing) + local desc + tf.with_op_name(name, "BroadcastGradientArgs") do + desc = tf.NodeDescription("BroadcastGradientArgs") + s0_ = convert(Tensor{Int32}, s0_) + s1_ = convert(Tensor{Int32}, s1_) + (s0_, s1_) = tf.tf_promote(s0_, s1_) + tf.add_input(desc, s0_) + tf.add_input(desc, s1_) + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function broadcast_gradient_args(s0_::tf.TensorHandle, s1_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("BroadcastGradientArgs") + tf.add_input(desc, s0_) + tf.add_input(desc, s1_) + desc["T"] = tf.data_type(s0_) + desc["T"] = tf.data_type(s1_) + tf.execute(desc) + end +end + + +""" + summary_writer(; shared_name=, container=) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function summary_writer(; name=nothing, shared_name=nothing, container=nothing) + local desc + tf.with_op_name(name, "SummaryWriter") do + desc = tf.NodeDescription("SummaryWriter") + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + if container !== nothing + desc["container"] = Base.String(container) + end + end + tf.Tensor(tf.Operation(desc)) + end + function summary_writer(; name=nothing, shared_name=nothing, container=nothing) + desc = tf.EagerOp("SummaryWriter") + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + if container !== nothing + desc["container"] = Base.String(container) + end + (tf.execute(desc))[1] + end +end + + +""" + recv_tpu_embedding_activations() + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function recv_tpu_embedding_activations(; name=nothing, num_outputs=nothing, config=nothing) + local desc + tf.with_op_name(name, "RecvTPUEmbeddingActivations") do + desc = tf.NodeDescription("RecvTPUEmbeddingActivations") + if num_outputs !== nothing + desc["num_outputs"] = Base.Int(num_outputs) + end + if config !== nothing + desc["config"] = Base.String(config) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:num_outputs + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function recv_tpu_embedding_activations(; name=nothing, num_outputs=nothing, config=nothing) + desc = tf.EagerOp("RecvTPUEmbeddingActivations") + if num_outputs !== nothing + desc["num_outputs"] = Base.Int(num_outputs) + end + if config !== nothing + desc["config"] = Base.String(config) + end + tf.execute(desc) + end +end + + +""" + _while(input) + +output = input; While (Cond(output)) { output = Body(output) } +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function _while(input_; name=nothing, T=nothing, cond=nothing, body=nothing) + local desc + tf.with_op_name(name, "_While") do + desc = tf.NodeDescription("_While") + input_ = [convert(Tensor{Any}, x) for x = input_] + tf.add_input(desc, input_) + if T !== nothing + desc["T"] = map(Base.identity, T) + end + if cond !== nothing + desc["cond"] = Base.identity(cond) + end + if body !== nothing + desc["body"] = Base.identity(body) + end + end + tf.Tensor(tf.Operation(desc)) + end + function _while(input_::tf.TensorHandle; name=nothing, T=nothing, cond=nothing, body=nothing) + desc = tf.EagerOp("_While") + tf.add_input(desc, input_) + if T !== nothing + desc["T"] = map(Base.identity, T) + end + if cond !== nothing + desc["cond"] = Base.identity(cond) + end + if body !== nothing + desc["body"] = Base.identity(body) + end + (tf.execute(desc))[1] + end +end + + +""" + initialize_table(table_handle, keys, values) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function initialize_table(table_handle_, keys_, values_; name=nothing) + local desc + tf.with_op_name(name, "InitializeTable") do + desc = tf.NodeDescription("InitializeTable") + table_handle_ = convert(Tensor{String}, table_handle_) + keys_ = convert(Tensor{Any}, keys_) + values_ = convert(Tensor{Any}, values_) + (values_,) = tf.tf_promote(values_) + (keys_,) = tf.tf_promote(keys_) + tf.add_input(desc, table_handle_) + tf.add_input(desc, keys_) + tf.add_input(desc, values_) + end + tf.Tensor(tf.Operation(desc)) + end + function initialize_table(table_handle_::tf.TensorHandle, keys_::tf.TensorHandle, values_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("InitializeTable") + tf.add_input(desc, table_handle_) + tf.add_input(desc, keys_) + tf.add_input(desc, values_) + desc["Tkey"] = tf.data_type(keys_) + desc["Tval"] = tf.data_type(values_) + (tf.execute(desc))[1] + end +end + + +""" + debug_numeric_summary(input; device_name=, tensor_name=, debug_urls=Int64[], lower_bound=?, upper_bound=?, mute_if_healthy=false, gated_grpc=false) + +Debug Numeric Summary Op. +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function debug_numeric_summary(input_; name=nothing, device_name=nothing, tensor_name=nothing, debug_urls=nothing, lower_bound=nothing, upper_bound=nothing, mute_if_healthy=nothing, gated_grpc=nothing) + local desc + tf.with_op_name(name, "DebugNumericSummary") do + desc = tf.NodeDescription("DebugNumericSummary") + input_ = convert(Tensor{Any}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + if device_name !== nothing + desc["device_name"] = Base.String(device_name) + end + if tensor_name !== nothing + desc["tensor_name"] = Base.String(tensor_name) + end + if debug_urls !== nothing + desc["debug_urls"] = map(Base.identity, debug_urls) + end + if lower_bound !== nothing + desc["lower_bound"] = Base.identity(lower_bound) + end + if upper_bound !== nothing + desc["upper_bound"] = Base.identity(upper_bound) + end + if mute_if_healthy !== nothing + desc["mute_if_healthy"] = Base.Bool(mute_if_healthy) + end + if gated_grpc !== nothing + desc["gated_grpc"] = Base.Bool(gated_grpc) + end + end + tf.Tensor(tf.Operation(desc)) + end + function debug_numeric_summary(input_::tf.TensorHandle; name=nothing, device_name=nothing, tensor_name=nothing, debug_urls=nothing, lower_bound=nothing, upper_bound=nothing, mute_if_healthy=nothing, gated_grpc=nothing) + desc = tf.EagerOp("DebugNumericSummary") + tf.add_input(desc, input_) + if device_name !== nothing + desc["device_name"] = Base.String(device_name) + end + if tensor_name !== nothing + desc["tensor_name"] = Base.String(tensor_name) + end + if debug_urls !== nothing + desc["debug_urls"] = map(Base.identity, debug_urls) + end + if lower_bound !== nothing + desc["lower_bound"] = Base.identity(lower_bound) + end + if upper_bound !== nothing + desc["upper_bound"] = Base.identity(upper_bound) + end + if mute_if_healthy !== nothing + desc["mute_if_healthy"] = Base.Bool(mute_if_healthy) + end + if gated_grpc !== nothing + desc["gated_grpc"] = Base.Bool(gated_grpc) + end + desc["T"] = tf.data_type(input_) + (tf.execute(desc))[1] + end +end + + +""" + retrieve_tpu_embedding_adagrad_parameters_grad_accum_debug(; table_id=-1, table_name=) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function retrieve_tpu_embedding_adagrad_parameters_grad_accum_debug(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + local desc + tf.with_op_name(name, "RetrieveTPUEmbeddingAdagradParametersGradAccumDebug") do + desc = tf.NodeDescription("RetrieveTPUEmbeddingAdagradParametersGradAccumDebug") + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function retrieve_tpu_embedding_adagrad_parameters_grad_accum_debug(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + desc = tf.EagerOp("RetrieveTPUEmbeddingAdagradParametersGradAccumDebug") + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + tf.execute(desc) + end +end + + +""" + tanh(x) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tanh(x_; name=nothing) + local desc + tf.with_op_name(name, "Tanh") do + desc = tf.NodeDescription("Tanh") + x_ = convert(Tensor{Any}, x_) + (x_,) = tf.tf_promote(x_) + tf.add_input(desc, x_) + end + tf.Tensor(tf.Operation(desc)) + end + function tanh(x_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("Tanh") + tf.add_input(desc, x_) + desc["T"] = tf.data_type(x_) + (tf.execute(desc))[1] + end +end + + +""" + symbolic_gradient(input) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function symbolic_gradient(input_; name=nothing, Tin=nothing, Tout=nothing, f=nothing) + local desc + tf.with_op_name(name, "SymbolicGradient") do + desc = tf.NodeDescription("SymbolicGradient") + input_ = [convert(Tensor{Any}, x) for x = input_] + tf.add_input(desc, input_) + if Tin !== nothing + desc["Tin"] = map(Base.identity, Tin) + end + if Tout !== nothing + desc["Tout"] = map(Base.identity, Tout) + end + if f !== nothing + desc["f"] = Base.identity(f) + end + end + tf.Tensor(tf.Operation(desc)) + end + function symbolic_gradient(input_::tf.TensorHandle; name=nothing, Tin=nothing, Tout=nothing, f=nothing) + desc = tf.EagerOp("SymbolicGradient") + tf.add_input(desc, input_) + if Tin !== nothing + desc["Tin"] = map(Base.identity, Tin) + end + if Tout !== nothing + desc["Tout"] = map(Base.identity, Tout) + end + if f !== nothing + desc["f"] = Base.identity(f) + end + (tf.execute(desc))[1] + end +end + + +""" + boosted_trees_update_ensemble(tree_ensemble_handle, feature_ids, node_ids, gains, thresholds, left_node_contribs, right_node_contribs, max_depth, learning_rate) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function boosted_trees_update_ensemble(tree_ensemble_handle_, feature_ids_, node_ids_, gains_, thresholds_, left_node_contribs_, right_node_contribs_, max_depth_, learning_rate_; name=nothing, pruning_mode=nothing, num_features=nothing) + local desc + tf.with_op_name(name, "BoostedTreesUpdateEnsemble") do + desc = tf.NodeDescription("BoostedTreesUpdateEnsemble") + tree_ensemble_handle_ = convert(Tensor{Any}, tree_ensemble_handle_) + feature_ids_ = convert(Tensor{Int32}, feature_ids_) + node_ids_ = [convert(Tensor{Int32}, x) for x = node_ids_] + gains_ = [convert(Tensor{Float32}, x) for x = gains_] + thresholds_ = [convert(Tensor{Int32}, x) for x = thresholds_] + left_node_contribs_ = [convert(Tensor{Float32}, x) for x = left_node_contribs_] + right_node_contribs_ = [convert(Tensor{Float32}, x) for x = right_node_contribs_] + max_depth_ = convert(Tensor{Int32}, max_depth_) + learning_rate_ = convert(Tensor{Float32}, learning_rate_) + tf.add_input(desc, tree_ensemble_handle_) + tf.add_input(desc, feature_ids_) + tf.add_input(desc, node_ids_) + tf.add_input(desc, gains_) + tf.add_input(desc, thresholds_) + tf.add_input(desc, left_node_contribs_) + tf.add_input(desc, right_node_contribs_) + tf.add_input(desc, max_depth_) + tf.add_input(desc, learning_rate_) + if pruning_mode !== nothing + desc["pruning_mode"] = Base.Int(pruning_mode) + end + if num_features !== nothing + desc["num_features"] = Base.Int(num_features) + end + end + tf.Tensor(tf.Operation(desc)) + end + function boosted_trees_update_ensemble(tree_ensemble_handle_::tf.TensorHandle, feature_ids_::tf.TensorHandle, node_ids_::tf.TensorHandle, gains_::tf.TensorHandle, thresholds_::tf.TensorHandle, left_node_contribs_::tf.TensorHandle, right_node_contribs_::tf.TensorHandle, max_depth_::tf.TensorHandle, learning_rate_::tf.TensorHandle; name=nothing, pruning_mode=nothing, num_features=nothing) + desc = tf.EagerOp("BoostedTreesUpdateEnsemble") + tf.add_input(desc, tree_ensemble_handle_) + tf.add_input(desc, feature_ids_) + tf.add_input(desc, node_ids_) + tf.add_input(desc, gains_) + tf.add_input(desc, thresholds_) + tf.add_input(desc, left_node_contribs_) + tf.add_input(desc, right_node_contribs_) + tf.add_input(desc, max_depth_) + tf.add_input(desc, learning_rate_) + if pruning_mode !== nothing + desc["pruning_mode"] = Base.Int(pruning_mode) + end + if num_features !== nothing + desc["num_features"] = Base.Int(num_features) + end + (tf.execute(desc))[1] + end +end + + +""" + apply_momentum(var, accum, lr, grad, momentum; use_locking=false, use_nesterov=false) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function apply_momentum(var_, accum_, lr_, grad_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) + local desc + tf.with_op_name(name, "ApplyMomentum") do + desc = tf.NodeDescription("ApplyMomentum") + var_ = convert(Tensor{Any}, var_) + accum_ = convert(Tensor{Any}, accum_) + lr_ = convert(Tensor{Any}, lr_) + grad_ = convert(Tensor{Any}, grad_) + momentum_ = convert(Tensor{Any}, momentum_) + (var_, accum_, lr_, grad_, momentum_) = tf.tf_promote(var_, accum_, lr_, grad_, momentum_) + tf.add_input(desc, var_) + tf.add_input(desc, accum_) + tf.add_input(desc, lr_) + tf.add_input(desc, grad_) + tf.add_input(desc, momentum_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + if use_nesterov !== nothing + desc["use_nesterov"] = Base.Bool(use_nesterov) + end + end + tf.Tensor(tf.Operation(desc)) + end + function apply_momentum(var_::tf.TensorHandle, accum_::tf.TensorHandle, lr_::tf.TensorHandle, grad_::tf.TensorHandle, momentum_::tf.TensorHandle; name=nothing, use_locking=nothing, use_nesterov=nothing) + desc = tf.EagerOp("ApplyMomentum") + tf.add_input(desc, var_) + tf.add_input(desc, accum_) + tf.add_input(desc, lr_) + tf.add_input(desc, grad_) + tf.add_input(desc, momentum_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + if use_nesterov !== nothing + desc["use_nesterov"] = Base.Bool(use_nesterov) + end + desc["T"] = tf.data_type(var_) + desc["T"] = tf.data_type(accum_) + desc["T"] = tf.data_type(lr_) + desc["T"] = tf.data_type(grad_) + desc["T"] = tf.data_type(momentum_) + (tf.execute(desc))[1] + end +end + + +""" + reader_read(reader_handle, queue_handle) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function reader_read(reader_handle_, queue_handle_; name=nothing) + local desc + tf.with_op_name(name, "ReaderRead") do + desc = tf.NodeDescription("ReaderRead") + reader_handle_ = convert(Tensor{String}, reader_handle_) + queue_handle_ = convert(Tensor{String}, queue_handle_) + tf.add_input(desc, reader_handle_) + tf.add_input(desc, queue_handle_) + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function reader_read(reader_handle_::tf.TensorHandle, queue_handle_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("ReaderRead") + tf.add_input(desc, reader_handle_) + tf.add_input(desc, queue_handle_) + tf.execute(desc) + end +end + + +""" + _wait_for_distributed_tpu(inputs; startup_timeout_sec=20) + +An op that blocks execution until a distributed TPU system has +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function _wait_for_distributed_tpu(inputs_; name=nothing, startup_timeout_sec=nothing, N=nothing) + local desc + tf.with_op_name(name, "_WaitForDistributedTPU") do + desc = tf.NodeDescription("_WaitForDistributedTPU") + inputs_ = [convert(Tensor{Int32}, x) for x = inputs_] + tf.add_input(desc, inputs_) + if startup_timeout_sec !== nothing + desc["startup_timeout_sec"] = Base.Int(startup_timeout_sec) + end + if N !== nothing + desc["N"] = Base.Int(N) + end + end + tf.Tensor(tf.Operation(desc)) + end + function _wait_for_distributed_tpu(inputs_::tf.TensorHandle; name=nothing, startup_timeout_sec=nothing, N=nothing) + desc = tf.EagerOp("_WaitForDistributedTPU") + tf.add_input(desc, inputs_) + if startup_timeout_sec !== nothing + desc["startup_timeout_sec"] = Base.Int(startup_timeout_sec) + end + if N !== nothing + desc["N"] = Base.Int(N) + end + (tf.execute(desc))[1] + end +end + + +""" + mutex_lock(mutex) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function mutex_lock(mutex_; name=nothing) + local desc + tf.with_op_name(name, "MutexLock") do + desc = tf.NodeDescription("MutexLock") + mutex_ = convert(Tensor{Any}, mutex_) + tf.add_input(desc, mutex_) + end + tf.Tensor(tf.Operation(desc)) + end + function mutex_lock(mutex_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("MutexLock") + tf.add_input(desc, mutex_) + (tf.execute(desc))[1] + end +end + + +""" + accumulator_set_global_step(handle, new_global_step) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function accumulator_set_global_step(handle_, new_global_step_; name=nothing) + local desc + tf.with_op_name(name, "AccumulatorSetGlobalStep") do + desc = tf.NodeDescription("AccumulatorSetGlobalStep") + handle_ = convert(Tensor{String}, handle_) + new_global_step_ = convert(Tensor{Int64}, new_global_step_) + tf.add_input(desc, handle_) + tf.add_input(desc, new_global_step_) + end + tf.Tensor(tf.Operation(desc)) + end + function accumulator_set_global_step(handle_::tf.TensorHandle, new_global_step_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("AccumulatorSetGlobalStep") + tf.add_input(desc, handle_) + tf.add_input(desc, new_global_step_) + (tf.execute(desc))[1] + end +end + + +""" + quantized_add(x, y, min_x, max_x, min_y, max_y) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function quantized_add(x_, y_, min_x_, max_x_, min_y_, max_y_; name=nothing) + local desc + tf.with_op_name(name, "QuantizedAdd") do + desc = tf.NodeDescription("QuantizedAdd") + x_ = convert(Tensor{Any}, x_) + y_ = convert(Tensor{Any}, y_) + min_x_ = convert(Tensor{Float32}, min_x_) + max_x_ = convert(Tensor{Float32}, max_x_) + min_y_ = convert(Tensor{Float32}, min_y_) + max_y_ = convert(Tensor{Float32}, max_y_) + (x_,) = tf.tf_promote(x_) + (y_,) = tf.tf_promote(y_) + tf.add_input(desc, x_) + tf.add_input(desc, y_) + tf.add_input(desc, min_x_) + tf.add_input(desc, max_x_) + tf.add_input(desc, min_y_) + tf.add_input(desc, max_y_) + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function quantized_add(x_::tf.TensorHandle, y_::tf.TensorHandle, min_x_::tf.TensorHandle, max_x_::tf.TensorHandle, min_y_::tf.TensorHandle, max_y_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("QuantizedAdd") + tf.add_input(desc, x_) + tf.add_input(desc, y_) + tf.add_input(desc, min_x_) + tf.add_input(desc, max_x_) + tf.add_input(desc, min_y_) + tf.add_input(desc, max_y_) + desc["T1"] = tf.data_type(x_) + desc["T2"] = tf.data_type(y_) + tf.execute(desc) + end +end + + +""" + squeeze(input; squeeze_dims=Int64[]) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function squeeze(input_; name=nothing, squeeze_dims=nothing) + local desc + tf.with_op_name(name, "Squeeze") do + desc = tf.NodeDescription("Squeeze") + input_ = convert(Tensor{Any}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + if squeeze_dims !== nothing + desc["squeeze_dims"] = map(Base.identity, squeeze_dims) + end + end + tf.Tensor(tf.Operation(desc)) + end + function squeeze(input_::tf.TensorHandle; name=nothing, squeeze_dims=nothing) + desc = tf.EagerOp("Squeeze") + tf.add_input(desc, input_) + if squeeze_dims !== nothing + desc["squeeze_dims"] = map(Base.identity, squeeze_dims) + end + desc["T"] = tf.data_type(input_) + (tf.execute(desc))[1] + end +end + + +""" + experimental_matching_files_dataset(patterns) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function experimental_matching_files_dataset(patterns_; name=nothing) + local desc + tf.with_op_name(name, "ExperimentalMatchingFilesDataset") do + desc = tf.NodeDescription("ExperimentalMatchingFilesDataset") + patterns_ = convert(Tensor{String}, patterns_) + tf.add_input(desc, patterns_) + end + tf.Tensor(tf.Operation(desc)) + end + function experimental_matching_files_dataset(patterns_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("ExperimentalMatchingFilesDataset") + tf.add_input(desc, patterns_) + (tf.execute(desc))[1] + end +end + + +""" + experimental_dataset_to_tf_record(input_dataset, filename, compression_type) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function experimental_dataset_to_tf_record(input_dataset_, filename_, compression_type_; name=nothing) + local desc + tf.with_op_name(name, "ExperimentalDatasetToTFRecord") do + desc = tf.NodeDescription("ExperimentalDatasetToTFRecord") + input_dataset_ = convert(Tensor{Any}, input_dataset_) + filename_ = convert(Tensor{String}, filename_) + compression_type_ = convert(Tensor{String}, compression_type_) + tf.add_input(desc, input_dataset_) + tf.add_input(desc, filename_) + tf.add_input(desc, compression_type_) + end + tf.Tensor(tf.Operation(desc)) + end + function experimental_dataset_to_tf_record(input_dataset_::tf.TensorHandle, filename_::tf.TensorHandle, compression_type_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("ExperimentalDatasetToTFRecord") + tf.add_input(desc, input_dataset_) + tf.add_input(desc, filename_) + tf.add_input(desc, compression_type_) + (tf.execute(desc))[1] + end +end + + +""" + load_tpu_embedding_stochastic_gradient_descent_parameters(parameters; table_id=-1, table_name=) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function load_tpu_embedding_stochastic_gradient_descent_parameters(parameters_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + local desc + tf.with_op_name(name, "LoadTPUEmbeddingStochasticGradientDescentParameters") do + desc = tf.NodeDescription("LoadTPUEmbeddingStochasticGradientDescentParameters") + parameters_ = convert(Tensor{Float32}, parameters_) + tf.add_input(desc, parameters_) + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + end + tf.Tensor(tf.Operation(desc)) + end + function load_tpu_embedding_stochastic_gradient_descent_parameters(parameters_::tf.TensorHandle; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + desc = tf.EagerOp("LoadTPUEmbeddingStochasticGradientDescentParameters") + tf.add_input(desc, parameters_) + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + (tf.execute(desc))[1] + end +end + + +""" + no_op() + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function no_op(; name=nothing) + local desc + tf.with_op_name(name, "NoOp") do + desc + tf.NodeDescription("NoOp") + end + tf.Tensor(tf.Operation(desc)) + end + function no_op(; name=nothing) + desc = tf.EagerOp("NoOp") + (tf.execute(desc))[1] + end +end + + +""" + zip_dataset(input_datasets) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function zip_dataset(input_datasets_; name=nothing, output_types=nothing, output_shapes=nothing, N=nothing) + local desc + tf.with_op_name(name, "ZipDataset") do + desc = tf.NodeDescription("ZipDataset") + input_datasets_ = [convert(Tensor{Any}, x) for x = input_datasets_] + tf.add_input(desc, input_datasets_) + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + if N !== nothing + desc["N"] = Base.Int(N) + end + end + tf.Tensor(tf.Operation(desc)) + end + function zip_dataset(input_datasets_::tf.TensorHandle; name=nothing, output_types=nothing, output_shapes=nothing, N=nothing) + desc = tf.EagerOp("ZipDataset") + tf.add_input(desc, input_datasets_) + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + if N !== nothing + desc["N"] = Base.Int(N) + end + (tf.execute(desc))[1] + end +end + + +""" + identity_reader_v2(; container=, shared_name=) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function identity_reader_v2(; name=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "IdentityReaderV2") do + desc = tf.NodeDescription("IdentityReaderV2") + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + tf.Tensor(tf.Operation(desc)) + end + function identity_reader_v2(; name=nothing, container=nothing, shared_name=nothing) + desc = tf.EagerOp("IdentityReaderV2") + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + (tf.execute(desc))[1] + end +end + + +""" + lmdb_reader(; container=, shared_name=) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function lmdb_reader(; name=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "LMDBReader") do + desc = tf.NodeDescription("LMDBReader") + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + tf.Tensor(tf.Operation(desc)) + end + function lmdb_reader(; name=nothing, container=nothing, shared_name=nothing) + desc = tf.EagerOp("LMDBReader") + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + (tf.execute(desc))[1] + end +end + + +""" + nccl_all_reduce(input) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function nccl_all_reduce(input_; name=nothing, reduction=nothing, num_devices=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "NcclAllReduce") do + desc = tf.NodeDescription("NcclAllReduce") + input_ = convert(Tensor{Any}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + if reduction !== nothing + desc["reduction"] = Base.String(reduction) + end + if num_devices !== nothing + desc["num_devices"] = Base.Int(num_devices) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + tf.Tensor(tf.Operation(desc)) + end + function nccl_all_reduce(input_::tf.TensorHandle; name=nothing, reduction=nothing, num_devices=nothing, shared_name=nothing) + desc = tf.EagerOp("NcclAllReduce") + tf.add_input(desc, input_) + if reduction !== nothing + desc["reduction"] = Base.String(reduction) + end + if num_devices !== nothing + desc["num_devices"] = Base.Int(num_devices) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + desc["T"] = tf.data_type(input_) + (tf.execute(desc))[1] + end +end + + +""" + text_line_dataset(filenames, compression_type, buffer_size) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function text_line_dataset(filenames_, compression_type_, buffer_size_; name=nothing) + local desc + tf.with_op_name(name, "TextLineDataset") do + desc = tf.NodeDescription("TextLineDataset") + filenames_ = convert(Tensor{String}, filenames_) + compression_type_ = convert(Tensor{String}, compression_type_) + buffer_size_ = convert(Tensor{Int64}, buffer_size_) + tf.add_input(desc, filenames_) + tf.add_input(desc, compression_type_) + tf.add_input(desc, buffer_size_) + end + tf.Tensor(tf.Operation(desc)) + end + function text_line_dataset(filenames_::tf.TensorHandle, compression_type_::tf.TensorHandle, buffer_size_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("TextLineDataset") + tf.add_input(desc, filenames_) + tf.add_input(desc, compression_type_) + tf.add_input(desc, buffer_size_) + (tf.execute(desc))[1] + end +end + + +""" + sdca_shrink_l1(weights) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function sdca_shrink_l1(weights_; name=nothing, num_features=nothing, l1=nothing, l2=nothing) + local desc + tf.with_op_name(name, "SdcaShrinkL1") do + desc = tf.NodeDescription("SdcaShrinkL1") + weights_ = [convert(Tensor{Float32}, x) for x = weights_] + tf.add_input(desc, weights_) + if num_features !== nothing + desc["num_features"] = Base.Int(num_features) + end + if l1 !== nothing + desc["l1"] = Base.identity(l1) + end + if l2 !== nothing + desc["l2"] = Base.identity(l2) + end + end + tf.Tensor(tf.Operation(desc)) + end + function sdca_shrink_l1(weights_::tf.TensorHandle; name=nothing, num_features=nothing, l1=nothing, l2=nothing) + desc = tf.EagerOp("SdcaShrinkL1") + tf.add_input(desc, weights_) + if num_features !== nothing + desc["num_features"] = Base.Int(num_features) + end + if l1 !== nothing + desc["l1"] = Base.identity(l1) + end + if l2 !== nothing + desc["l2"] = Base.identity(l2) + end + (tf.execute(desc))[1] + end +end + + +""" + tf_record_reader_v2(; container=, shared_name=, compression_type=) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tf_record_reader_v2(; name=nothing, container=nothing, shared_name=nothing, compression_type=nothing) + local desc + tf.with_op_name(name, "TFRecordReaderV2") do + desc = tf.NodeDescription("TFRecordReaderV2") + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + if compression_type !== nothing + desc["compression_type"] = Base.String(compression_type) + end + end + tf.Tensor(tf.Operation(desc)) + end + function tf_record_reader_v2(; name=nothing, container=nothing, shared_name=nothing, compression_type=nothing) + desc = tf.EagerOp("TFRecordReaderV2") + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + if compression_type !== nothing + desc["compression_type"] = Base.String(compression_type) + end + (tf.execute(desc))[1] + end +end + + +""" + multi_device_iterator_from_string_handle(string_handle; output_types=Int64[], output_shapes=Int64[]) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function multi_device_iterator_from_string_handle(string_handle_; name=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "MultiDeviceIteratorFromStringHandle") do + desc = tf.NodeDescription("MultiDeviceIteratorFromStringHandle") + string_handle_ = convert(Tensor{String}, string_handle_) + tf.add_input(desc, string_handle_) + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + tf.Tensor(tf.Operation(desc)) + end + function multi_device_iterator_from_string_handle(string_handle_::tf.TensorHandle; name=nothing, output_types=nothing, output_shapes=nothing) + desc = tf.EagerOp("MultiDeviceIteratorFromStringHandle") + tf.add_input(desc, string_handle_) + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + (tf.execute(desc))[1] + end +end + + +""" + padded_batch_dataset_v2(input_dataset, batch_size, padded_shapes, padding_values, drop_remainder) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function padded_batch_dataset_v2(input_dataset_, batch_size_, padded_shapes_, padding_values_, drop_remainder_; name=nothing, Toutput_types=nothing, output_shapes=nothing, N=nothing) + local desc + tf.with_op_name(name, "PaddedBatchDatasetV2") do + desc = tf.NodeDescription("PaddedBatchDatasetV2") + input_dataset_ = convert(Tensor{Any}, input_dataset_) + batch_size_ = convert(Tensor{Int64}, batch_size_) + padded_shapes_ = [convert(Tensor{Int64}, x) for x = padded_shapes_] + padding_values_ = [convert(Tensor{Any}, x) for x = padding_values_] + drop_remainder_ = convert(Tensor{Bool}, drop_remainder_) + tf.add_input(desc, input_dataset_) + tf.add_input(desc, batch_size_) + tf.add_input(desc, padded_shapes_) + tf.add_input(desc, padding_values_) + tf.add_input(desc, drop_remainder_) + if Toutput_types !== nothing + desc["Toutput_types"] = map(Base.identity, Toutput_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + if N !== nothing + desc["N"] = Base.Int(N) + end + end + tf.Tensor(tf.Operation(desc)) + end + function padded_batch_dataset_v2(input_dataset_::tf.TensorHandle, batch_size_::tf.TensorHandle, padded_shapes_::tf.TensorHandle, padding_values_::tf.TensorHandle, drop_remainder_::tf.TensorHandle; name=nothing, Toutput_types=nothing, output_shapes=nothing, N=nothing) + desc = tf.EagerOp("PaddedBatchDatasetV2") + tf.add_input(desc, input_dataset_) + tf.add_input(desc, batch_size_) + tf.add_input(desc, padded_shapes_) + tf.add_input(desc, padding_values_) + tf.add_input(desc, drop_remainder_) + if Toutput_types !== nothing + desc["Toutput_types"] = map(Base.identity, Toutput_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + if N !== nothing + desc["N"] = Base.Int(N) + end + (tf.execute(desc))[1] + end +end + + +""" + load_tpu_embedding_proximal_adagrad_parameters(parameters, accumulators; table_id=-1, table_name=) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function load_tpu_embedding_proximal_adagrad_parameters(parameters_, accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + local desc + tf.with_op_name(name, "LoadTPUEmbeddingProximalAdagradParameters") do + desc = tf.NodeDescription("LoadTPUEmbeddingProximalAdagradParameters") + parameters_ = convert(Tensor{Float32}, parameters_) + accumulators_ = convert(Tensor{Float32}, accumulators_) + tf.add_input(desc, parameters_) + tf.add_input(desc, accumulators_) + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + end + tf.Tensor(tf.Operation(desc)) + end + function load_tpu_embedding_proximal_adagrad_parameters(parameters_::tf.TensorHandle, accumulators_::tf.TensorHandle; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + desc = tf.EagerOp("LoadTPUEmbeddingProximalAdagradParameters") + tf.add_input(desc, parameters_) + tf.add_input(desc, accumulators_) + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + (tf.execute(desc))[1] + end +end + + +""" + tensor_array_size(handle, flow_in) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tensor_array_size(handle_, flow_in_; name=nothing) + local desc + tf.with_op_name(name, "TensorArraySize") do + desc = tf.NodeDescription("TensorArraySize") + handle_ = convert(Tensor{String}, handle_) + flow_in_ = convert(Tensor{Float32}, flow_in_) + tf.add_input(desc, handle_) + tf.add_input(desc, flow_in_) + end + tf.Tensor(tf.Operation(desc)) + end + function tensor_array_size(handle_::tf.TensorHandle, flow_in_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("TensorArraySize") + tf.add_input(desc, handle_) + tf.add_input(desc, flow_in_) + (tf.execute(desc))[1] + end +end + + +""" + ordered_map_size(; capacity=0, memory_limit=0, container=, shared_name=) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function ordered_map_size(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "OrderedMapSize") do + desc = tf.NodeDescription("OrderedMapSize") + if capacity !== nothing + desc["capacity"] = Base.Int(capacity) + end + if memory_limit !== nothing + desc["memory_limit"] = Base.Int(memory_limit) + end + if dtypes !== nothing + desc["dtypes"] = map(Base.identity, dtypes) + end + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + tf.Tensor(tf.Operation(desc)) + end + function ordered_map_size(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + desc = tf.EagerOp("OrderedMapSize") + if capacity !== nothing + desc["capacity"] = Base.Int(capacity) + end + if memory_limit !== nothing + desc["memory_limit"] = Base.Int(memory_limit) + end + if dtypes !== nothing + desc["dtypes"] = map(Base.identity, dtypes) + end + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + (tf.execute(desc))[1] + end +end + + +""" + stateless_random_uniform(shape, seed; dtype=Float32) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function stateless_random_uniform(shape_, seed_; name=nothing, dtype=nothing) + local desc + tf.with_op_name(name, "StatelessRandomUniform") do + desc = tf.NodeDescription("StatelessRandomUniform") + shape_ = convert(Tensor{Int32}, shape_) + seed_ = convert(Tensor{Int64}, seed_) + (shape_,) = tf.tf_promote(shape_) + (seed_,) = tf.tf_promote(seed_) + tf.add_input(desc, shape_) + tf.add_input(desc, seed_) + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + tf.Tensor(tf.Operation(desc)) + end + function stateless_random_uniform(shape_::tf.TensorHandle, seed_::tf.TensorHandle; name=nothing, dtype=nothing) + desc = tf.EagerOp("StatelessRandomUniform") + tf.add_input(desc, shape_) + tf.add_input(desc, seed_) + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + desc["T"] = tf.data_type(shape_) + desc["Tseed"] = tf.data_type(seed_) + (tf.execute(desc))[1] + end +end + + +""" + sparse_to_sparse_set_operation(set1_indices, set1_values, set1_shape, set2_indices, set2_values, set2_shape; validate_indices=true) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function sparse_to_sparse_set_operation(set1_indices_, set1_values_, set1_shape_, set2_indices_, set2_values_, set2_shape_; name=nothing, set_operation=nothing, validate_indices=nothing) + local desc + tf.with_op_name(name, "SparseToSparseSetOperation") do + desc = tf.NodeDescription("SparseToSparseSetOperation") + set1_indices_ = convert(Tensor{Int64}, set1_indices_) + set1_values_ = convert(Tensor{Any}, set1_values_) + set1_shape_ = convert(Tensor{Int64}, set1_shape_) + set2_indices_ = convert(Tensor{Int64}, set2_indices_) + set2_values_ = convert(Tensor{Any}, set2_values_) + set2_shape_ = convert(Tensor{Int64}, set2_shape_) + (set1_values_, set2_values_) = tf.tf_promote(set1_values_, set2_values_) + tf.add_input(desc, set1_indices_) + tf.add_input(desc, set1_values_) + tf.add_input(desc, set1_shape_) + tf.add_input(desc, set2_indices_) + tf.add_input(desc, set2_values_) + tf.add_input(desc, set2_shape_) + if set_operation !== nothing + desc["set_operation"] = Base.String(set_operation) + end + if validate_indices !== nothing + desc["validate_indices"] = Base.Bool(validate_indices) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function sparse_to_sparse_set_operation(set1_indices_::tf.TensorHandle, set1_values_::tf.TensorHandle, set1_shape_::tf.TensorHandle, set2_indices_::tf.TensorHandle, set2_values_::tf.TensorHandle, set2_shape_::tf.TensorHandle; name=nothing, set_operation=nothing, validate_indices=nothing) + desc = tf.EagerOp("SparseToSparseSetOperation") + tf.add_input(desc, set1_indices_) + tf.add_input(desc, set1_values_) + tf.add_input(desc, set1_shape_) + tf.add_input(desc, set2_indices_) + tf.add_input(desc, set2_values_) + tf.add_input(desc, set2_shape_) + if set_operation !== nothing + desc["set_operation"] = Base.String(set_operation) + end + if validate_indices !== nothing + desc["validate_indices"] = Base.Bool(validate_indices) + end + desc["T"] = tf.data_type(set1_values_) + desc["T"] = tf.data_type(set2_values_) + tf.execute(desc) + end +end + + +""" + tensor_summary(tensor; description=, labels=Int64[], display_name=) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tensor_summary(tensor_; name=nothing, description=nothing, labels=nothing, display_name=nothing) + local desc + tf.with_op_name(name, "TensorSummary") do + desc = tf.NodeDescription("TensorSummary") + tensor_ = convert(Tensor{Any}, tensor_) + (tensor_,) = tf.tf_promote(tensor_) + tf.add_input(desc, tensor_) + if description !== nothing + desc["description"] = Base.String(description) + end + if labels !== nothing + desc["labels"] = map(Base.identity, labels) + end + if display_name !== nothing + desc["display_name"] = Base.String(display_name) + end + end + tf.Tensor(tf.Operation(desc)) + end + function tensor_summary(tensor_::tf.TensorHandle; name=nothing, description=nothing, labels=nothing, display_name=nothing) + desc = tf.EagerOp("TensorSummary") + tf.add_input(desc, tensor_) + if description !== nothing + desc["description"] = Base.String(description) + end + if labels !== nothing + desc["labels"] = map(Base.identity, labels) + end + if display_name !== nothing + desc["display_name"] = Base.String(display_name) + end + desc["T"] = tf.data_type(tensor_) + (tf.execute(desc))[1] + end +end + + +""" + remote_fused_graph_execute(inputs) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function remote_fused_graph_execute(inputs_; name=nothing, Tinputs=nothing, Toutputs=nothing, serialized_remote_fused_graph_execute_info=nothing) + local desc + tf.with_op_name(name, "RemoteFusedGraphExecute") do + desc = tf.NodeDescription("RemoteFusedGraphExecute") + inputs_ = [convert(Tensor{Any}, x) for x = inputs_] + tf.add_input(desc, inputs_) + if Tinputs !== nothing + desc["Tinputs"] = map(Base.identity, Tinputs) + end + if Toutputs !== nothing + desc["Toutputs"] = map(Base.identity, Toutputs) + end + if serialized_remote_fused_graph_execute_info !== nothing + desc["serialized_remote_fused_graph_execute_info"] = Base.String(serialized_remote_fused_graph_execute_info) + end + end + tf.Tensor(tf.Operation(desc)) + end + function remote_fused_graph_execute(inputs_::tf.TensorHandle; name=nothing, Tinputs=nothing, Toutputs=nothing, serialized_remote_fused_graph_execute_info=nothing) + desc = tf.EagerOp("RemoteFusedGraphExecute") + tf.add_input(desc, inputs_) + if Tinputs !== nothing + desc["Tinputs"] = map(Base.identity, Tinputs) + end + if Toutputs !== nothing + desc["Toutputs"] = map(Base.identity, Toutputs) + end + if serialized_remote_fused_graph_execute_info !== nothing + desc["serialized_remote_fused_graph_execute_info"] = Base.String(serialized_remote_fused_graph_execute_info) + end + (tf.execute(desc))[1] + end +end + + +""" + sparse_slice_grad(backprop_val_grad, input_indices, input_start, output_indices) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function sparse_slice_grad(backprop_val_grad_, input_indices_, input_start_, output_indices_; name=nothing) + local desc + tf.with_op_name(name, "SparseSliceGrad") do + desc = tf.NodeDescription("SparseSliceGrad") + backprop_val_grad_ = convert(Tensor{Any}, backprop_val_grad_) + input_indices_ = convert(Tensor{Int64}, input_indices_) + input_start_ = convert(Tensor{Int64}, input_start_) + output_indices_ = convert(Tensor{Int64}, output_indices_) + (backprop_val_grad_,) = tf.tf_promote(backprop_val_grad_) + tf.add_input(desc, backprop_val_grad_) + tf.add_input(desc, input_indices_) + tf.add_input(desc, input_start_) + tf.add_input(desc, output_indices_) + end + tf.Tensor(tf.Operation(desc)) + end + function sparse_slice_grad(backprop_val_grad_::tf.TensorHandle, input_indices_::tf.TensorHandle, input_start_::tf.TensorHandle, output_indices_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("SparseSliceGrad") + tf.add_input(desc, backprop_val_grad_) + tf.add_input(desc, input_indices_) + tf.add_input(desc, input_start_) + tf.add_input(desc, output_indices_) + desc["T"] = tf.data_type(backprop_val_grad_) + (tf.execute(desc))[1] + end +end + + +""" + cumsum(x, axis; exclusive=false, reverse=false) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function cumsum(x_, axis_; name=nothing, exclusive=nothing, reverse=nothing) + local desc + tf.with_op_name(name, "Cumsum") do + desc = tf.NodeDescription("Cumsum") + x_ = convert(Tensor{Any}, x_) + axis_ = convert(Tensor{Int32}, axis_) + axis_ = axis_ - convert(tf.Tensor{eltype(axis_)}, 1) + (x_,) = tf.tf_promote(x_) + (axis_,) = tf.tf_promote(axis_) + tf.add_input(desc, x_) + tf.add_input(desc, axis_) + if exclusive !== nothing + desc["exclusive"] = Base.Bool(exclusive) + end + if reverse !== nothing + desc["reverse"] = Base.Bool(reverse) + end + end + tf.Tensor(tf.Operation(desc)) + end + function cumsum(x_::tf.TensorHandle, axis_::tf.TensorHandle; name=nothing, exclusive=nothing, reverse=nothing) + desc = tf.EagerOp("Cumsum") + tf.add_input(desc, x_) + tf.add_input(desc, axis_) + if exclusive !== nothing + desc["exclusive"] = Base.Bool(exclusive) + end + if reverse !== nothing + desc["reverse"] = Base.Bool(reverse) + end + desc["T"] = tf.data_type(x_) + desc["Tidx"] = tf.data_type(axis_) + (tf.execute(desc))[1] + end +end + + +""" + batch_norm_with_global_normalization_grad(t, m, v, gamma, backprop) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function batch_norm_with_global_normalization_grad(t_, m_, v_, gamma_, backprop_; name=nothing, variance_epsilon=nothing, scale_after_normalization=nothing) + local desc + tf.with_op_name(name, "BatchNormWithGlobalNormalizationGrad") do + desc = tf.NodeDescription("BatchNormWithGlobalNormalizationGrad") + t_ = convert(Tensor{Any}, t_) + m_ = convert(Tensor{Any}, m_) + v_ = convert(Tensor{Any}, v_) + gamma_ = convert(Tensor{Any}, gamma_) + backprop_ = convert(Tensor{Any}, backprop_) + (t_, m_, v_, gamma_, backprop_) = tf.tf_promote(t_, m_, v_, gamma_, backprop_) + tf.add_input(desc, t_) + tf.add_input(desc, m_) + tf.add_input(desc, v_) + tf.add_input(desc, gamma_) + tf.add_input(desc, backprop_) + if variance_epsilon !== nothing + desc["variance_epsilon"] = Base.identity(variance_epsilon) + end + if scale_after_normalization !== nothing + desc["scale_after_normalization"] = Base.Bool(scale_after_normalization) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:5 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function batch_norm_with_global_normalization_grad(t_::tf.TensorHandle, m_::tf.TensorHandle, v_::tf.TensorHandle, gamma_::tf.TensorHandle, backprop_::tf.TensorHandle; name=nothing, variance_epsilon=nothing, scale_after_normalization=nothing) + desc = tf.EagerOp("BatchNormWithGlobalNormalizationGrad") + tf.add_input(desc, t_) + tf.add_input(desc, m_) + tf.add_input(desc, v_) + tf.add_input(desc, gamma_) + tf.add_input(desc, backprop_) + if variance_epsilon !== nothing + desc["variance_epsilon"] = Base.identity(variance_epsilon) + end + if scale_after_normalization !== nothing + desc["scale_after_normalization"] = Base.Bool(scale_after_normalization) + end + desc["T"] = tf.data_type(t_) + desc["T"] = tf.data_type(m_) + desc["T"] = tf.data_type(v_) + desc["T"] = tf.data_type(gamma_) + desc["T"] = tf.data_type(backprop_) + tf.execute(desc) + end +end + + +""" + avg_pool_grad(orig_input_shape, grad; data_format=NHWC) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function avg_pool_grad(orig_input_shape_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + local desc + tf.with_op_name(name, "AvgPoolGrad") do + desc = tf.NodeDescription("AvgPoolGrad") + orig_input_shape_ = convert(Tensor{Int32}, orig_input_shape_) + grad_ = convert(Tensor{Any}, grad_) + (grad_,) = tf.tf_promote(grad_) + tf.add_input(desc, orig_input_shape_) + tf.add_input(desc, grad_) + if ksize !== nothing + desc["ksize"] = map(Base.identity, ksize) + end + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + if padding !== nothing + desc["padding"] = Base.String(padding) + end + if data_format !== nothing + desc["data_format"] = Base.String(data_format) + end + end + tf.Tensor(tf.Operation(desc)) + end + function avg_pool_grad(orig_input_shape_::tf.TensorHandle, grad_::tf.TensorHandle; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + desc = tf.EagerOp("AvgPoolGrad") + tf.add_input(desc, orig_input_shape_) + tf.add_input(desc, grad_) + if ksize !== nothing + desc["ksize"] = map(Base.identity, ksize) + end + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + if padding !== nothing + desc["padding"] = Base.String(padding) + end + if data_format !== nothing + desc["data_format"] = Base.String(data_format) + end + desc["T"] = tf.data_type(grad_) + (tf.execute(desc))[1] + end +end + + +""" + restore_v2(prefix, tensor_names, shape_and_slices) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function restore_v2(prefix_, tensor_names_, shape_and_slices_; name=nothing, dtypes=nothing) + local desc + tf.with_op_name(name, "RestoreV2") do + desc = tf.NodeDescription("RestoreV2") + prefix_ = convert(Tensor{String}, prefix_) + tensor_names_ = convert(Tensor{String}, tensor_names_) + shape_and_slices_ = convert(Tensor{String}, shape_and_slices_) + tf.add_input(desc, prefix_) + tf.add_input(desc, tensor_names_) + tf.add_input(desc, shape_and_slices_) + if dtypes !== nothing + desc["dtypes"] = map(Base.identity, dtypes) + end + end + tf.Tensor(tf.Operation(desc)) + end + function restore_v2(prefix_::tf.TensorHandle, tensor_names_::tf.TensorHandle, shape_and_slices_::tf.TensorHandle; name=nothing, dtypes=nothing) + desc = tf.EagerOp("RestoreV2") + tf.add_input(desc, prefix_) + tf.add_input(desc, tensor_names_) + tf.add_input(desc, shape_and_slices_) + if dtypes !== nothing + desc["dtypes"] = map(Base.identity, dtypes) + end + (tf.execute(desc))[1] + end +end + + +""" + relu6(features) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function relu6(features_; name=nothing) + local desc + tf.with_op_name(name, "Relu6") do + desc = tf.NodeDescription("Relu6") + features_ = convert(Tensor{Any}, features_) + (features_,) = tf.tf_promote(features_) + tf.add_input(desc, features_) + end + tf.Tensor(tf.Operation(desc)) + end + function relu6(features_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("Relu6") + tf.add_input(desc, features_) + desc["T"] = tf.data_type(features_) + (tf.execute(desc))[1] + end +end + + +""" + sparse_apply_rms_prop(var, ms, mom, lr, rho, momentum, epsilon, grad, indices; use_locking=false) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function sparse_apply_rms_prop(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "SparseApplyRMSProp") do + desc = tf.NodeDescription("SparseApplyRMSProp") + var_ = convert(Tensor{Any}, var_) + ms_ = convert(Tensor{Any}, ms_) + mom_ = convert(Tensor{Any}, mom_) + lr_ = convert(Tensor{Any}, lr_) + rho_ = convert(Tensor{Any}, rho_) + momentum_ = convert(Tensor{Any}, momentum_) + epsilon_ = convert(Tensor{Any}, epsilon_) + grad_ = convert(Tensor{Any}, grad_) + indices_ = convert(Tensor{Any}, indices_) + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + (var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_) = tf.tf_promote(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_) + (indices_,) = tf.tf_promote(indices_) + tf.add_input(desc, var_) + tf.add_input(desc, ms_) + tf.add_input(desc, mom_) + tf.add_input(desc, lr_) + tf.add_input(desc, rho_) + tf.add_input(desc, momentum_) + tf.add_input(desc, epsilon_) + tf.add_input(desc, grad_) + tf.add_input(desc, indices_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + tf.Tensor(tf.Operation(desc)) + end + function sparse_apply_rms_prop(var_::tf.TensorHandle, ms_::tf.TensorHandle, mom_::tf.TensorHandle, lr_::tf.TensorHandle, rho_::tf.TensorHandle, momentum_::tf.TensorHandle, epsilon_::tf.TensorHandle, grad_::tf.TensorHandle, indices_::tf.TensorHandle; name=nothing, use_locking=nothing) + desc = tf.EagerOp("SparseApplyRMSProp") + tf.add_input(desc, var_) + tf.add_input(desc, ms_) + tf.add_input(desc, mom_) + tf.add_input(desc, lr_) + tf.add_input(desc, rho_) + tf.add_input(desc, momentum_) + tf.add_input(desc, epsilon_) + tf.add_input(desc, grad_) + tf.add_input(desc, indices_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + desc["T"] = tf.data_type(var_) + desc["T"] = tf.data_type(ms_) + desc["T"] = tf.data_type(mom_) + desc["T"] = tf.data_type(lr_) + desc["T"] = tf.data_type(rho_) + desc["T"] = tf.data_type(momentum_) + desc["T"] = tf.data_type(epsilon_) + desc["T"] = tf.data_type(grad_) + desc["Tindices"] = tf.data_type(indices_) + (tf.execute(desc))[1] + end +end + + +""" + _recv(; client_terminated=false) + +Receives the named tensor from send_device on recv_device. +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function _recv(; name=nothing, tensor_type=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing) + local desc + tf.with_op_name(name, "_Recv") do + desc = tf.NodeDescription("_Recv") + if tensor_type !== nothing + desc["tensor_type"] = Base.identity(tensor_type) + end + if tensor_name !== nothing + desc["tensor_name"] = Base.String(tensor_name) + end + if send_device !== nothing + desc["send_device"] = Base.String(send_device) + end + if send_device_incarnation !== nothing + desc["send_device_incarnation"] = Base.Int(send_device_incarnation) + end + if recv_device !== nothing + desc["recv_device"] = Base.String(recv_device) + end + if client_terminated !== nothing + desc["client_terminated"] = Base.Bool(client_terminated) + end + end + tf.Tensor(tf.Operation(desc)) + end + function _recv(; name=nothing, tensor_type=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing) + desc = tf.EagerOp("_Recv") + if tensor_type !== nothing + desc["tensor_type"] = Base.identity(tensor_type) + end + if tensor_name !== nothing + desc["tensor_name"] = Base.String(tensor_name) + end + if send_device !== nothing + desc["send_device"] = Base.String(send_device) + end + if send_device_incarnation !== nothing + desc["send_device_incarnation"] = Base.Int(send_device_incarnation) + end + if recv_device !== nothing + desc["recv_device"] = Base.String(recv_device) + end + if client_terminated !== nothing + desc["client_terminated"] = Base.Bool(client_terminated) + end + (tf.execute(desc))[1] + end +end + + +""" + max_pool(input; data_format=NHWC) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function max_pool(input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + local desc + tf.with_op_name(name, "MaxPool") do + desc = tf.NodeDescription("MaxPool") + input_ = convert(Tensor{Float32}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + if ksize !== nothing + desc["ksize"] = map(Base.identity, ksize) + end + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + if padding !== nothing + desc["padding"] = Base.String(padding) + end + if data_format !== nothing + desc["data_format"] = Base.String(data_format) + end + end + tf.Tensor(tf.Operation(desc)) + end + function max_pool(input_::tf.TensorHandle; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + desc = tf.EagerOp("MaxPool") + tf.add_input(desc, input_) + if ksize !== nothing + desc["ksize"] = map(Base.identity, ksize) + end + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + if padding !== nothing + desc["padding"] = Base.String(padding) + end + if data_format !== nothing + desc["data_format"] = Base.String(data_format) + end + desc["T"] = tf.data_type(input_) + (tf.execute(desc))[1] + end +end + + +""" + invert(x) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function invert(x_; name=nothing) + local desc + tf.with_op_name(name, "Invert") do + desc = tf.NodeDescription("Invert") + x_ = convert(Tensor{Any}, x_) + (x_,) = tf.tf_promote(x_) + tf.add_input(desc, x_) + end + tf.Tensor(tf.Operation(desc)) + end + function invert(x_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("Invert") + tf.add_input(desc, x_) + desc["T"] = tf.data_type(x_) + (tf.execute(desc))[1] + end +end + + +""" + _unary_ops_composition(x) + +*NOTE*: Do not invoke this operator directly in Python. Graph rewrite pass is +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function _unary_ops_composition(x_; name=nothing, op_names=nothing) + local desc + tf.with_op_name(name, "_UnaryOpsComposition") do + desc = tf.NodeDescription("_UnaryOpsComposition") + x_ = convert(Tensor{Any}, x_) + (x_,) = tf.tf_promote(x_) + tf.add_input(desc, x_) + if op_names !== nothing + desc["op_names"] = map(Base.identity, op_names) + end + end + tf.Tensor(tf.Operation(desc)) + end + function _unary_ops_composition(x_::tf.TensorHandle; name=nothing, op_names=nothing) + desc = tf.EagerOp("_UnaryOpsComposition") + tf.add_input(desc, x_) + if op_names !== nothing + desc["op_names"] = map(Base.identity, op_names) + end + desc["T"] = tf.data_type(x_) + (tf.execute(desc))[1] + end +end + + +""" + experimental_map_dataset(input_dataset, other_arguments; use_inter_op_parallelism=true, preserve_cardinality=false) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function experimental_map_dataset(input_dataset_, other_arguments_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing, preserve_cardinality=nothing) + local desc + tf.with_op_name(name, "ExperimentalMapDataset") do + desc = tf.NodeDescription("ExperimentalMapDataset") + input_dataset_ = convert(Tensor{Any}, input_dataset_) + other_arguments_ = [convert(Tensor{Any}, x) for x = other_arguments_] + tf.add_input(desc, input_dataset_) + tf.add_input(desc, other_arguments_) + if f !== nothing + desc["f"] = Base.identity(f) + end + if Targuments !== nothing + desc["Targuments"] = map(Base.identity, Targuments) + end + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + if use_inter_op_parallelism !== nothing + desc["use_inter_op_parallelism"] = Base.Bool(use_inter_op_parallelism) + end + if preserve_cardinality !== nothing + desc["preserve_cardinality"] = Base.Bool(preserve_cardinality) + end + end + tf.Tensor(tf.Operation(desc)) + end + function experimental_map_dataset(input_dataset_::tf.TensorHandle, other_arguments_::tf.TensorHandle; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing, preserve_cardinality=nothing) + desc = tf.EagerOp("ExperimentalMapDataset") + tf.add_input(desc, input_dataset_) + tf.add_input(desc, other_arguments_) + if f !== nothing + desc["f"] = Base.identity(f) + end + if Targuments !== nothing + desc["Targuments"] = map(Base.identity, Targuments) + end + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + if use_inter_op_parallelism !== nothing + desc["use_inter_op_parallelism"] = Base.Bool(use_inter_op_parallelism) + end + if preserve_cardinality !== nothing + desc["preserve_cardinality"] = Base.Bool(preserve_cardinality) + end + (tf.execute(desc))[1] + end +end + + +""" + load_tpu_embedding_adam_parameters(parameters, momenta, velocities; table_id=-1, table_name=) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function load_tpu_embedding_adam_parameters(parameters_, momenta_, velocities_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + local desc + tf.with_op_name(name, "LoadTPUEmbeddingADAMParameters") do + desc = tf.NodeDescription("LoadTPUEmbeddingADAMParameters") + parameters_ = convert(Tensor{Float32}, parameters_) + momenta_ = convert(Tensor{Float32}, momenta_) + velocities_ = convert(Tensor{Float32}, velocities_) + tf.add_input(desc, parameters_) + tf.add_input(desc, momenta_) + tf.add_input(desc, velocities_) + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + end + tf.Tensor(tf.Operation(desc)) + end + function load_tpu_embedding_adam_parameters(parameters_::tf.TensorHandle, momenta_::tf.TensorHandle, velocities_::tf.TensorHandle; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + desc = tf.EagerOp("LoadTPUEmbeddingADAMParameters") + tf.add_input(desc, parameters_) + tf.add_input(desc, momenta_) + tf.add_input(desc, velocities_) + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + (tf.execute(desc))[1] + end +end + + +""" + parse_tensor(serialized) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function parse_tensor(serialized_; name=nothing, out_type=nothing) + local desc + tf.with_op_name(name, "ParseTensor") do + desc = tf.NodeDescription("ParseTensor") + serialized_ = convert(Tensor{String}, serialized_) + tf.add_input(desc, serialized_) + if out_type !== nothing + desc["out_type"] = Base.identity(out_type) + end + end + tf.Tensor(tf.Operation(desc)) + end + function parse_tensor(serialized_::tf.TensorHandle; name=nothing, out_type=nothing) + desc = tf.EagerOp("ParseTensor") + tf.add_input(desc, serialized_) + if out_type !== nothing + desc["out_type"] = Base.identity(out_type) + end + (tf.execute(desc))[1] + end +end + + +""" + experimental_materialized_index_dataset_handle() + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function experimental_materialized_index_dataset_handle(; name=nothing, container=nothing, shared_name=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "ExperimentalMaterializedIndexDatasetHandle") do + desc = tf.NodeDescription("ExperimentalMaterializedIndexDatasetHandle") + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + tf.Tensor(tf.Operation(desc)) + end + function experimental_materialized_index_dataset_handle(; name=nothing, container=nothing, shared_name=nothing, output_types=nothing, output_shapes=nothing) + desc = tf.EagerOp("ExperimentalMaterializedIndexDatasetHandle") + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + (tf.execute(desc))[1] + end +end + + +""" + multi_device_iterator_get_next_from_shard(multi_device_iterator, shard_num, incarnation_id) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function multi_device_iterator_get_next_from_shard(multi_device_iterator_, shard_num_, incarnation_id_; name=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "MultiDeviceIteratorGetNextFromShard") do + desc = tf.NodeDescription("MultiDeviceIteratorGetNextFromShard") + multi_device_iterator_ = convert(Tensor{Any}, multi_device_iterator_) + shard_num_ = convert(Tensor{Int32}, shard_num_) + incarnation_id_ = convert(Tensor{Int64}, incarnation_id_) + tf.add_input(desc, multi_device_iterator_) + tf.add_input(desc, shard_num_) + tf.add_input(desc, incarnation_id_) + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + tf.Tensor(tf.Operation(desc)) + end + function multi_device_iterator_get_next_from_shard(multi_device_iterator_::tf.TensorHandle, shard_num_::tf.TensorHandle, incarnation_id_::tf.TensorHandle; name=nothing, output_types=nothing, output_shapes=nothing) + desc = tf.EagerOp("MultiDeviceIteratorGetNextFromShard") + tf.add_input(desc, multi_device_iterator_) + tf.add_input(desc, shard_num_) + tf.add_input(desc, incarnation_id_) + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + (tf.execute(desc))[1] + end +end + + +""" + random_uniform_int(shape, minval, maxval; seed=0, seed2=0) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function random_uniform_int(shape_, minval_, maxval_; name=nothing, seed=nothing, seed2=nothing) + local desc + tf.with_op_name(name, "RandomUniformInt") do + desc = tf.NodeDescription("RandomUniformInt") + shape_ = convert(Tensor{Any}, shape_) + minval_ = convert(Tensor{Any}, minval_) + maxval_ = convert(Tensor{Any}, maxval_) + (shape_,) = tf.tf_promote(shape_) + (minval_, maxval_) = tf.tf_promote(minval_, maxval_) + tf.add_input(desc, shape_) + tf.add_input(desc, minval_) + tf.add_input(desc, maxval_) + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end + end + tf.Tensor(tf.Operation(desc)) + end + function random_uniform_int(shape_::tf.TensorHandle, minval_::tf.TensorHandle, maxval_::tf.TensorHandle; name=nothing, seed=nothing, seed2=nothing) + desc = tf.EagerOp("RandomUniformInt") + tf.add_input(desc, shape_) + tf.add_input(desc, minval_) + tf.add_input(desc, maxval_) + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end + desc["T"] = tf.data_type(shape_) + desc["Tout"] = tf.data_type(minval_) + desc["Tout"] = tf.data_type(maxval_) + (tf.execute(desc))[1] + end +end + + +""" + sparse_softmax_cross_entropy_with_logits(features, labels) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function sparse_softmax_cross_entropy_with_logits(features_, labels_; name=nothing) + local desc + tf.with_op_name(name, "SparseSoftmaxCrossEntropyWithLogits") do + desc = tf.NodeDescription("SparseSoftmaxCrossEntropyWithLogits") + features_ = convert(Tensor{Any}, features_) + labels_ = convert(Tensor{Int64}, labels_) + (features_,) = tf.tf_promote(features_) + (labels_,) = tf.tf_promote(labels_) + tf.add_input(desc, features_) + tf.add_input(desc, labels_) + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function sparse_softmax_cross_entropy_with_logits(features_::tf.TensorHandle, labels_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("SparseSoftmaxCrossEntropyWithLogits") + tf.add_input(desc, features_) + tf.add_input(desc, labels_) + desc["T"] = tf.data_type(features_) + desc["Tlabels"] = tf.data_type(labels_) + tf.execute(desc) + end +end + + +""" + tensor_array_read_v2(handle, index, flow_in) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tensor_array_read_v2(handle_, index_, flow_in_; name=nothing, dtype=nothing) + local desc + tf.with_op_name(name, "TensorArrayReadV2") do + desc = tf.NodeDescription("TensorArrayReadV2") + handle_ = convert(Tensor{String}, handle_) + index_ = convert(Tensor{Int32}, index_) + flow_in_ = convert(Tensor{Float32}, flow_in_) + tf.add_input(desc, handle_) + tf.add_input(desc, index_) + tf.add_input(desc, flow_in_) + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + tf.Tensor(tf.Operation(desc)) + end + function tensor_array_read_v2(handle_::tf.TensorHandle, index_::tf.TensorHandle, flow_in_::tf.TensorHandle; name=nothing, dtype=nothing) + desc = tf.EagerOp("TensorArrayReadV2") + tf.add_input(desc, handle_) + tf.add_input(desc, index_) + tf.add_input(desc, flow_in_) + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + (tf.execute(desc))[1] + end +end + + +""" + reader_read_up_to(reader_handle, queue_handle, num_records) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function reader_read_up_to(reader_handle_, queue_handle_, num_records_; name=nothing) + local desc + tf.with_op_name(name, "ReaderReadUpTo") do + desc = tf.NodeDescription("ReaderReadUpTo") + reader_handle_ = convert(Tensor{String}, reader_handle_) + queue_handle_ = convert(Tensor{String}, queue_handle_) + num_records_ = convert(Tensor{Int64}, num_records_) + tf.add_input(desc, reader_handle_) + tf.add_input(desc, queue_handle_) + tf.add_input(desc, num_records_) + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function reader_read_up_to(reader_handle_::tf.TensorHandle, queue_handle_::tf.TensorHandle, num_records_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("ReaderReadUpTo") + tf.add_input(desc, reader_handle_) + tf.add_input(desc, queue_handle_) + tf.add_input(desc, num_records_) + tf.execute(desc) + end +end + + +""" + encode_proto(sizes, values; descriptor_source=local://) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function encode_proto(sizes_, values_; name=nothing, field_names=nothing, message_type=nothing, descriptor_source=nothing, Tinput_types=nothing) + local desc + tf.with_op_name(name, "EncodeProto") do + desc = tf.NodeDescription("EncodeProto") + sizes_ = convert(Tensor{Int32}, sizes_) + values_ = [convert(Tensor{Any}, x) for x = values_] + tf.add_input(desc, sizes_) + tf.add_input(desc, values_) + if field_names !== nothing + desc["field_names"] = map(Base.identity, field_names) + end + if message_type !== nothing + desc["message_type"] = Base.String(message_type) + end + if descriptor_source !== nothing + desc["descriptor_source"] = Base.String(descriptor_source) + end + if Tinput_types !== nothing + desc["Tinput_types"] = map(Base.identity, Tinput_types) + end + end + tf.Tensor(tf.Operation(desc)) + end + function encode_proto(sizes_::tf.TensorHandle, values_::tf.TensorHandle; name=nothing, field_names=nothing, message_type=nothing, descriptor_source=nothing, Tinput_types=nothing) + desc = tf.EagerOp("EncodeProto") + tf.add_input(desc, sizes_) + tf.add_input(desc, values_) + if field_names !== nothing + desc["field_names"] = map(Base.identity, field_names) + end + if message_type !== nothing + desc["message_type"] = Base.String(message_type) + end + if descriptor_source !== nothing + desc["descriptor_source"] = Base.String(descriptor_source) + end + if Tinput_types !== nothing + desc["Tinput_types"] = map(Base.identity, Tinput_types) + end + (tf.execute(desc))[1] + end +end + + +""" + strided_slice_grad(shape, begin, end, strides, dy; begin_mask=0, end_mask=0, ellipsis_mask=0, new_axis_mask=0, shrink_axis_mask=0) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function strided_slice_grad(shape_, begin_, end_, strides_, dy_; name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing) + local desc + tf.with_op_name(name, "StridedSliceGrad") do + desc = tf.NodeDescription("StridedSliceGrad") + shape_ = convert(Tensor{Any}, shape_) + begin_ = convert(Tensor{Any}, begin_) + begin_ = begin_ - convert(tf.Tensor{eltype(begin_)}, 1) + end_ = convert(Tensor{Any}, end_) + end_ = end_ - convert(tf.Tensor{eltype(end_)}, 1) + strides_ = convert(Tensor{Any}, strides_) + strides_ = strides_ - convert(tf.Tensor{eltype(strides_)}, 1) + dy_ = convert(Tensor{Any}, dy_) + (dy_,) = tf.tf_promote(dy_) + (shape_, begin_, end_, strides_) = tf.tf_promote(shape_, begin_, end_, strides_) + tf.add_input(desc, shape_) + tf.add_input(desc, begin_) + tf.add_input(desc, end_) + tf.add_input(desc, strides_) + tf.add_input(desc, dy_) + if Index !== nothing + desc["Index"] = Base.identity(Index) + end + if begin_mask !== nothing + begin_mask = Base.Int(begin_mask) - 1 + end + if begin_mask !== nothing + desc["begin_mask"] = Base.Int(begin_mask) + end + if end_mask !== nothing + end_mask = Base.Int(end_mask) - 1 + end + if end_mask !== nothing + desc["end_mask"] = Base.Int(end_mask) + end + if ellipsis_mask !== nothing + ellipsis_mask = Base.Int(ellipsis_mask) - 1 + end + if ellipsis_mask !== nothing + desc["ellipsis_mask"] = Base.Int(ellipsis_mask) + end + if new_axis_mask !== nothing + new_axis_mask = Base.Int(new_axis_mask) - 1 + end + if new_axis_mask !== nothing + desc["new_axis_mask"] = Base.Int(new_axis_mask) + end + if shrink_axis_mask !== nothing + shrink_axis_mask = Base.Int(shrink_axis_mask) - 1 + end + if shrink_axis_mask !== nothing + desc["shrink_axis_mask"] = Base.Int(shrink_axis_mask) + end + end + tf.Tensor(tf.Operation(desc)) + end + function strided_slice_grad(shape_::tf.TensorHandle, begin_::tf.TensorHandle, end_::tf.TensorHandle, strides_::tf.TensorHandle, dy_::tf.TensorHandle; name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing) + desc = tf.EagerOp("StridedSliceGrad") + tf.add_input(desc, shape_) + tf.add_input(desc, begin_) + tf.add_input(desc, end_) + tf.add_input(desc, strides_) + tf.add_input(desc, dy_) + if Index !== nothing + desc["Index"] = Base.identity(Index) + end + if begin_mask !== nothing + begin_mask = Base.Int(begin_mask) - 1 + end + if begin_mask !== nothing + desc["begin_mask"] = Base.Int(begin_mask) + end + if end_mask !== nothing + end_mask = Base.Int(end_mask) - 1 + end + if end_mask !== nothing + desc["end_mask"] = Base.Int(end_mask) + end + if ellipsis_mask !== nothing + ellipsis_mask = Base.Int(ellipsis_mask) - 1 + end + if ellipsis_mask !== nothing + desc["ellipsis_mask"] = Base.Int(ellipsis_mask) + end + if new_axis_mask !== nothing + new_axis_mask = Base.Int(new_axis_mask) - 1 + end + if new_axis_mask !== nothing + desc["new_axis_mask"] = Base.Int(new_axis_mask) + end + if shrink_axis_mask !== nothing + shrink_axis_mask = Base.Int(shrink_axis_mask) - 1 + end + if shrink_axis_mask !== nothing + desc["shrink_axis_mask"] = Base.Int(shrink_axis_mask) + end + desc["Index"] = tf.data_type(shape_) + desc["Index"] = tf.data_type(begin_) + desc["Index"] = tf.data_type(end_) + desc["Index"] = tf.data_type(strides_) + desc["T"] = tf.data_type(dy_) + (tf.execute(desc))[1] + end +end + + +""" + _nccl_reduce_send(input) + +Replacement node for NcclReduce. +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function _nccl_reduce_send(input_; name=nothing, reduction=nothing, num_devices=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "_NcclReduceSend") do + desc = tf.NodeDescription("_NcclReduceSend") + input_ = convert(Tensor{Any}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + if reduction !== nothing + desc["reduction"] = Base.String(reduction) + end + if num_devices !== nothing + desc["num_devices"] = Base.Int(num_devices) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + tf.Tensor(tf.Operation(desc)) + end + function _nccl_reduce_send(input_::tf.TensorHandle; name=nothing, reduction=nothing, num_devices=nothing, shared_name=nothing) + desc = tf.EagerOp("_NcclReduceSend") + tf.add_input(desc, input_) + if reduction !== nothing + desc["reduction"] = Base.String(reduction) + end + if num_devices !== nothing + desc["num_devices"] = Base.Int(num_devices) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + desc["T"] = tf.data_type(input_) + (tf.execute(desc))[1] + end +end + + +""" + padded_batch_dataset(input_dataset, batch_size, padded_shapes, padding_values) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function padded_batch_dataset(input_dataset_, batch_size_, padded_shapes_, padding_values_; name=nothing, Toutput_types=nothing, output_shapes=nothing, N=nothing) + local desc + tf.with_op_name(name, "PaddedBatchDataset") do + desc = tf.NodeDescription("PaddedBatchDataset") + input_dataset_ = convert(Tensor{Any}, input_dataset_) + batch_size_ = convert(Tensor{Int64}, batch_size_) + padded_shapes_ = [convert(Tensor{Int64}, x) for x = padded_shapes_] + padding_values_ = [convert(Tensor{Any}, x) for x = padding_values_] + tf.add_input(desc, input_dataset_) + tf.add_input(desc, batch_size_) + tf.add_input(desc, padded_shapes_) + tf.add_input(desc, padding_values_) + if Toutput_types !== nothing + desc["Toutput_types"] = map(Base.identity, Toutput_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + if N !== nothing + desc["N"] = Base.Int(N) + end + end + tf.Tensor(tf.Operation(desc)) + end + function padded_batch_dataset(input_dataset_::tf.TensorHandle, batch_size_::tf.TensorHandle, padded_shapes_::tf.TensorHandle, padding_values_::tf.TensorHandle; name=nothing, Toutput_types=nothing, output_shapes=nothing, N=nothing) + desc = tf.EagerOp("PaddedBatchDataset") + tf.add_input(desc, input_dataset_) + tf.add_input(desc, batch_size_) + tf.add_input(desc, padded_shapes_) + tf.add_input(desc, padding_values_) + if Toutput_types !== nothing + desc["Toutput_types"] = map(Base.identity, Toutput_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + if N !== nothing + desc["N"] = Base.Int(N) + end + (tf.execute(desc))[1] + end +end + + +""" + data_format_vec_permute(x; src_format=NHWC, dst_format=NCHW) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function data_format_vec_permute(x_; name=nothing, src_format=nothing, dst_format=nothing) + local desc + tf.with_op_name(name, "DataFormatVecPermute") do + desc = tf.NodeDescription("DataFormatVecPermute") + x_ = convert(Tensor{Int32}, x_) + (x_,) = tf.tf_promote(x_) + tf.add_input(desc, x_) + if src_format !== nothing + desc["src_format"] = Base.String(src_format) + end + if dst_format !== nothing + desc["dst_format"] = Base.String(dst_format) + end + end + tf.Tensor(tf.Operation(desc)) + end + function data_format_vec_permute(x_::tf.TensorHandle; name=nothing, src_format=nothing, dst_format=nothing) + desc = tf.EagerOp("DataFormatVecPermute") + tf.add_input(desc, x_) + if src_format !== nothing + desc["src_format"] = Base.String(src_format) + end + if dst_format !== nothing + desc["dst_format"] = Base.String(dst_format) + end + desc["T"] = tf.data_type(x_) + (tf.execute(desc))[1] + end +end + + +""" + string_format(inputs; template=%s, placeholder=%s, summarize=3) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function string_format(inputs_; name=nothing, T=nothing, template=nothing, placeholder=nothing, summarize=nothing) + local desc + tf.with_op_name(name, "StringFormat") do + desc = tf.NodeDescription("StringFormat") + inputs_ = [convert(Tensor{Any}, x) for x = inputs_] + tf.add_input(desc, inputs_) + if T !== nothing + desc["T"] = map(Base.identity, T) + end + if template !== nothing + desc["template"] = Base.String(template) + end + if placeholder !== nothing + desc["placeholder"] = Base.String(placeholder) + end + if summarize !== nothing + desc["summarize"] = Base.Int(summarize) + end + end + tf.Tensor(tf.Operation(desc)) + end + function string_format(inputs_::tf.TensorHandle; name=nothing, T=nothing, template=nothing, placeholder=nothing, summarize=nothing) + desc = tf.EagerOp("StringFormat") + tf.add_input(desc, inputs_) + if T !== nothing + desc["T"] = map(Base.identity, T) + end + if template !== nothing + desc["template"] = Base.String(template) + end + if placeholder !== nothing + desc["placeholder"] = Base.String(placeholder) + end + if summarize !== nothing + desc["summarize"] = Base.Int(summarize) + end + (tf.execute(desc))[1] + end +end + + +""" + as_string(input; precision=-1, scientific=false, shortest=false, width=-1, fill=) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function as_string(input_; name=nothing, precision=nothing, scientific=nothing, shortest=nothing, width=nothing, fill=nothing) + local desc + tf.with_op_name(name, "AsString") do + desc = tf.NodeDescription("AsString") + input_ = convert(Tensor{Any}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + if precision !== nothing + desc["precision"] = Base.Int(precision) + end + if scientific !== nothing + desc["scientific"] = Base.Bool(scientific) + end + if shortest !== nothing + desc["shortest"] = Base.Bool(shortest) + end + if width !== nothing + desc["width"] = Base.Int(width) + end + if fill !== nothing + desc["fill"] = Base.String(fill) + end + end + tf.Tensor(tf.Operation(desc)) + end + function as_string(input_::tf.TensorHandle; name=nothing, precision=nothing, scientific=nothing, shortest=nothing, width=nothing, fill=nothing) + desc = tf.EagerOp("AsString") + tf.add_input(desc, input_) + if precision !== nothing + desc["precision"] = Base.Int(precision) + end + if scientific !== nothing + desc["scientific"] = Base.Bool(scientific) + end + if shortest !== nothing + desc["shortest"] = Base.Bool(shortest) + end + if width !== nothing + desc["width"] = Base.Int(width) + end + if fill !== nothing + desc["fill"] = Base.String(fill) + end + desc["T"] = tf.data_type(input_) + (tf.execute(desc))[1] + end +end + + +""" + queue_enqueue_many(handle, components; timeout_ms=-1) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function queue_enqueue_many(handle_, components_; name=nothing, Tcomponents=nothing, timeout_ms=nothing) + local desc + tf.with_op_name(name, "QueueEnqueueMany") do + desc = tf.NodeDescription("QueueEnqueueMany") + handle_ = convert(Tensor{String}, handle_) + components_ = [convert(Tensor{Any}, x) for x = components_] + tf.add_input(desc, handle_) + tf.add_input(desc, components_) + if Tcomponents !== nothing + desc["Tcomponents"] = map(Base.identity, Tcomponents) + end + if timeout_ms !== nothing + desc["timeout_ms"] = Base.Int(timeout_ms) + end + end + tf.Tensor(tf.Operation(desc)) + end + function queue_enqueue_many(handle_::tf.TensorHandle, components_::tf.TensorHandle; name=nothing, Tcomponents=nothing, timeout_ms=nothing) + desc = tf.EagerOp("QueueEnqueueMany") + tf.add_input(desc, handle_) + tf.add_input(desc, components_) + if Tcomponents !== nothing + desc["Tcomponents"] = map(Base.identity, Tcomponents) + end + if timeout_ms !== nothing + desc["timeout_ms"] = Base.Int(timeout_ms) + end + (tf.execute(desc))[1] + end +end + + +""" + fake_param() + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function fake_param(; name=nothing, dtype=nothing, shape=nothing) + local desc + tf.with_op_name(name, "FakeParam") do + desc = tf.NodeDescription("FakeParam") + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + if shape !== nothing + desc["shape"] = Base.identity(shape) + end + end + tf.Tensor(tf.Operation(desc)) + end + function fake_param(; name=nothing, dtype=nothing, shape=nothing) + desc = tf.EagerOp("FakeParam") + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + if shape !== nothing + desc["shape"] = Base.identity(shape) + end + (tf.execute(desc))[1] + end +end + + +""" + apply_adagrad(var, accum, lr, grad; use_locking=false, update_slots=true) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function apply_adagrad(var_, accum_, lr_, grad_; name=nothing, use_locking=nothing, update_slots=nothing) + local desc + tf.with_op_name(name, "ApplyAdagrad") do + desc = tf.NodeDescription("ApplyAdagrad") + var_ = convert(Tensor{Any}, var_) + accum_ = convert(Tensor{Any}, accum_) + lr_ = convert(Tensor{Any}, lr_) + grad_ = convert(Tensor{Any}, grad_) + (var_, accum_, lr_, grad_) = tf.tf_promote(var_, accum_, lr_, grad_) + tf.add_input(desc, var_) + tf.add_input(desc, accum_) + tf.add_input(desc, lr_) + tf.add_input(desc, grad_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + if update_slots !== nothing + desc["update_slots"] = Base.Bool(update_slots) + end + end + tf.Tensor(tf.Operation(desc)) + end + function apply_adagrad(var_::tf.TensorHandle, accum_::tf.TensorHandle, lr_::tf.TensorHandle, grad_::tf.TensorHandle; name=nothing, use_locking=nothing, update_slots=nothing) + desc = tf.EagerOp("ApplyAdagrad") + tf.add_input(desc, var_) + tf.add_input(desc, accum_) + tf.add_input(desc, lr_) + tf.add_input(desc, grad_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + if update_slots !== nothing + desc["update_slots"] = Base.Bool(update_slots) + end + desc["T"] = tf.data_type(var_) + desc["T"] = tf.data_type(accum_) + desc["T"] = tf.data_type(lr_) + desc["T"] = tf.data_type(grad_) + (tf.execute(desc))[1] + end +end + + +""" + experimental_iterator_get_device(resource) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function experimental_iterator_get_device(resource_; name=nothing) + local desc + tf.with_op_name(name, "ExperimentalIteratorGetDevice") do + desc = tf.NodeDescription("ExperimentalIteratorGetDevice") + resource_ = convert(Tensor{Any}, resource_) + tf.add_input(desc, resource_) + end + tf.Tensor(tf.Operation(desc)) + end + function experimental_iterator_get_device(resource_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("ExperimentalIteratorGetDevice") + tf.add_input(desc, resource_) + (tf.execute(desc))[1] + end +end + + +""" + adjust_contrast(images, contrast_factor, min_value, max_value) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function adjust_contrast(images_, contrast_factor_, min_value_, max_value_; name=nothing) + local desc + tf.with_op_name(name, "AdjustContrast") do + desc = tf.NodeDescription("AdjustContrast") + images_ = convert(Tensor{Any}, images_) + contrast_factor_ = convert(Tensor{Float32}, contrast_factor_) + min_value_ = convert(Tensor{Float32}, min_value_) + max_value_ = convert(Tensor{Float32}, max_value_) + (images_,) = tf.tf_promote(images_) + tf.add_input(desc, images_) + tf.add_input(desc, contrast_factor_) + tf.add_input(desc, min_value_) + tf.add_input(desc, max_value_) + end + tf.Tensor(tf.Operation(desc)) + end + function adjust_contrast(images_::tf.TensorHandle, contrast_factor_::tf.TensorHandle, min_value_::tf.TensorHandle, max_value_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("AdjustContrast") + tf.add_input(desc, images_) + tf.add_input(desc, contrast_factor_) + tf.add_input(desc, min_value_) + tf.add_input(desc, max_value_) + desc["T"] = tf.data_type(images_) + (tf.execute(desc))[1] + end +end + + +""" + extract_image_patches(images) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function extract_image_patches(images_; name=nothing, ksizes=nothing, strides=nothing, rates=nothing, padding=nothing) + local desc + tf.with_op_name(name, "ExtractImagePatches") do + desc = tf.NodeDescription("ExtractImagePatches") + images_ = convert(Tensor{Any}, images_) + (images_,) = tf.tf_promote(images_) + tf.add_input(desc, images_) + if ksizes !== nothing + desc["ksizes"] = map(Base.identity, ksizes) + end + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + if rates !== nothing + desc["rates"] = map(Base.identity, rates) + end + if padding !== nothing + desc["padding"] = Base.String(padding) + end + end + tf.Tensor(tf.Operation(desc)) + end + function extract_image_patches(images_::tf.TensorHandle; name=nothing, ksizes=nothing, strides=nothing, rates=nothing, padding=nothing) + desc = tf.EagerOp("ExtractImagePatches") + tf.add_input(desc, images_) + if ksizes !== nothing + desc["ksizes"] = map(Base.identity, ksizes) + end + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + if rates !== nothing + desc["rates"] = map(Base.identity, rates) + end + if padding !== nothing + desc["padding"] = Base.String(padding) + end + desc["T"] = tf.data_type(images_) + (tf.execute(desc))[1] + end +end + + +""" + scale_and_translate(images, size, scale, translation; kernel_type=lanczos3) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function scale_and_translate(images_, size_, scale_, translation_; name=nothing, kernel_type=nothing) + local desc + tf.with_op_name(name, "ScaleAndTranslate") do + desc = tf.NodeDescription("ScaleAndTranslate") + images_ = convert(Tensor{Any}, images_) + size_ = convert(Tensor{Int32}, size_) + scale_ = convert(Tensor{Float32}, scale_) + translation_ = convert(Tensor{Float32}, translation_) + (images_,) = tf.tf_promote(images_) + tf.add_input(desc, images_) + tf.add_input(desc, size_) + tf.add_input(desc, scale_) + tf.add_input(desc, translation_) + if kernel_type !== nothing + desc["kernel_type"] = Base.String(kernel_type) + end + end + tf.Tensor(tf.Operation(desc)) + end + function scale_and_translate(images_::tf.TensorHandle, size_::tf.TensorHandle, scale_::tf.TensorHandle, translation_::tf.TensorHandle; name=nothing, kernel_type=nothing) + desc = tf.EagerOp("ScaleAndTranslate") + tf.add_input(desc, images_) + tf.add_input(desc, size_) + tf.add_input(desc, scale_) + tf.add_input(desc, translation_) + if kernel_type !== nothing + desc["kernel_type"] = Base.String(kernel_type) + end + desc["T"] = tf.data_type(images_) + (tf.execute(desc))[1] + end +end + + +""" + optional_none() + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function optional_none(; name=nothing) + local desc + tf.with_op_name(name, "OptionalNone") do + desc + tf.NodeDescription("OptionalNone") + end + tf.Tensor(tf.Operation(desc)) + end + function optional_none(; name=nothing) + desc = tf.EagerOp("OptionalNone") + (tf.execute(desc))[1] + end +end + + +""" + variable_v2(; container=, shared_name=) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function variable_v2(; name=nothing, shape=nothing, dtype=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "VariableV2") do + desc = tf.NodeDescription("VariableV2") + if shape !== nothing + desc["shape"] = Base.identity(shape) + end + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + tf.Tensor(tf.Operation(desc)) + end + function variable_v2(; name=nothing, shape=nothing, dtype=nothing, container=nothing, shared_name=nothing) + desc = tf.EagerOp("VariableV2") + if shape !== nothing + desc["shape"] = Base.identity(shape) + end + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + (tf.execute(desc))[1] + end +end + + +""" + elu(features) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function elu(features_; name=nothing) + local desc + tf.with_op_name(name, "Elu") do + desc = tf.NodeDescription("Elu") + features_ = convert(Tensor{Any}, features_) + (features_,) = tf.tf_promote(features_) + tf.add_input(desc, features_) + end + tf.Tensor(tf.Operation(desc)) + end + function elu(features_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("Elu") + tf.add_input(desc, features_) + desc["T"] = tf.data_type(features_) + (tf.execute(desc))[1] + end +end + + +""" + scatter_update(ref, indices, updates; use_locking=true) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function scatter_update(ref_, indices_, updates_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "ScatterUpdate") do + desc = tf.NodeDescription("ScatterUpdate") + ref_ = convert(Tensor{Any}, ref_) + indices_ = convert(Tensor{Any}, indices_) + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + updates_ = convert(Tensor{Any}, updates_) + (ref_, updates_) = tf.tf_promote(ref_, updates_) + (indices_,) = tf.tf_promote(indices_) + tf.add_input(desc, ref_) + tf.add_input(desc, indices_) + tf.add_input(desc, updates_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + tf.Tensor(tf.Operation(desc)) + end + function scatter_update(ref_::tf.TensorHandle, indices_::tf.TensorHandle, updates_::tf.TensorHandle; name=nothing, use_locking=nothing) + desc = tf.EagerOp("ScatterUpdate") + tf.add_input(desc, ref_) + tf.add_input(desc, indices_) + tf.add_input(desc, updates_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + desc["T"] = tf.data_type(ref_) + desc["Tindices"] = tf.data_type(indices_) + desc["T"] = tf.data_type(updates_) + (tf.execute(desc))[1] + end +end + + +""" + floor_mod(x, y) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function floor_mod(x_, y_; name=nothing) + local desc + tf.with_op_name(name, "FloorMod") do + desc = tf.NodeDescription("FloorMod") + x_ = convert(Tensor{Any}, x_) + y_ = convert(Tensor{Any}, y_) + (x_, y_) = tf.tf_promote(x_, y_) + tf.add_input(desc, x_) + tf.add_input(desc, y_) + end + tf.Tensor(tf.Operation(desc)) + end + function floor_mod(x_::tf.TensorHandle, y_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("FloorMod") + tf.add_input(desc, x_) + tf.add_input(desc, y_) + desc["T"] = tf.data_type(x_) + desc["T"] = tf.data_type(y_) + (tf.execute(desc))[1] + end +end + + +""" + experimental_ignore_errors_dataset(input_dataset) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function experimental_ignore_errors_dataset(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "ExperimentalIgnoreErrorsDataset") do + desc = tf.NodeDescription("ExperimentalIgnoreErrorsDataset") + input_dataset_ = convert(Tensor{Any}, input_dataset_) + tf.add_input(desc, input_dataset_) + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + tf.Tensor(tf.Operation(desc)) + end + function experimental_ignore_errors_dataset(input_dataset_::tf.TensorHandle; name=nothing, output_types=nothing, output_shapes=nothing) + desc = tf.EagerOp("ExperimentalIgnoreErrorsDataset") + tf.add_input(desc, input_dataset_) + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + (tf.execute(desc))[1] + end +end + + +""" + experimental_set_stats_aggregator_dataset(input_dataset, stats_aggregator, tag, counter_prefix) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function experimental_set_stats_aggregator_dataset(input_dataset_, stats_aggregator_, tag_, counter_prefix_; name=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "ExperimentalSetStatsAggregatorDataset") do + desc = tf.NodeDescription("ExperimentalSetStatsAggregatorDataset") + input_dataset_ = convert(Tensor{Any}, input_dataset_) + stats_aggregator_ = convert(Tensor{Any}, stats_aggregator_) + tag_ = convert(Tensor{String}, tag_) + counter_prefix_ = convert(Tensor{String}, counter_prefix_) + tf.add_input(desc, input_dataset_) + tf.add_input(desc, stats_aggregator_) + tf.add_input(desc, tag_) + tf.add_input(desc, counter_prefix_) + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + tf.Tensor(tf.Operation(desc)) + end + function experimental_set_stats_aggregator_dataset(input_dataset_::tf.TensorHandle, stats_aggregator_::tf.TensorHandle, tag_::tf.TensorHandle, counter_prefix_::tf.TensorHandle; name=nothing, output_types=nothing, output_shapes=nothing) + desc = tf.EagerOp("ExperimentalSetStatsAggregatorDataset") + tf.add_input(desc, input_dataset_) + tf.add_input(desc, stats_aggregator_) + tf.add_input(desc, tag_) + tf.add_input(desc, counter_prefix_) + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + (tf.execute(desc))[1] + end +end + + +""" + compute_accidental_hits(true_classes, sampled_candidates; seed=0, seed2=0) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function compute_accidental_hits(true_classes_, sampled_candidates_; name=nothing, num_true=nothing, seed=nothing, seed2=nothing) + local desc + tf.with_op_name(name, "ComputeAccidentalHits") do + desc = tf.NodeDescription("ComputeAccidentalHits") + true_classes_ = convert(Tensor{Int64}, true_classes_) + sampled_candidates_ = convert(Tensor{Int64}, sampled_candidates_) + tf.add_input(desc, true_classes_) + tf.add_input(desc, sampled_candidates_) + if num_true !== nothing + desc["num_true"] = Base.Int(num_true) + end + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function compute_accidental_hits(true_classes_::tf.TensorHandle, sampled_candidates_::tf.TensorHandle; name=nothing, num_true=nothing, seed=nothing, seed2=nothing) + desc = tf.EagerOp("ComputeAccidentalHits") + tf.add_input(desc, true_classes_) + tf.add_input(desc, sampled_candidates_) + if num_true !== nothing + desc["num_true"] = Base.Int(num_true) + end + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end + tf.execute(desc) + end +end + + +""" + string_to_number(string_tensor; out_type=Float32) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function string_to_number(string_tensor_; name=nothing, out_type=nothing) + local desc + tf.with_op_name(name, "StringToNumber") do + desc = tf.NodeDescription("StringToNumber") + string_tensor_ = convert(Tensor{String}, string_tensor_) + tf.add_input(desc, string_tensor_) + if out_type !== nothing + desc["out_type"] = Base.identity(out_type) + end + end + tf.Tensor(tf.Operation(desc)) + end + function string_to_number(string_tensor_::tf.TensorHandle; name=nothing, out_type=nothing) + desc = tf.EagerOp("StringToNumber") + tf.add_input(desc, string_tensor_) + if out_type !== nothing + desc["out_type"] = Base.identity(out_type) + end + (tf.execute(desc))[1] + end +end + + +""" + snapshot(input) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function snapshot(input_; name=nothing) + local desc + tf.with_op_name(name, "Snapshot") do + desc = tf.NodeDescription("Snapshot") + input_ = convert(Tensor{Any}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + end + tf.Tensor(tf.Operation(desc)) + end + function snapshot(input_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("Snapshot") + tf.add_input(desc, input_) + desc["T"] = tf.data_type(input_) + (tf.execute(desc))[1] + end +end + + +""" + deserialize_iterator(resource_handle, serialized) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function deserialize_iterator(resource_handle_, serialized_; name=nothing) + local desc + tf.with_op_name(name, "DeserializeIterator") do + desc = tf.NodeDescription("DeserializeIterator") + resource_handle_ = convert(Tensor{Any}, resource_handle_) + serialized_ = convert(Tensor{Any}, serialized_) + tf.add_input(desc, resource_handle_) + tf.add_input(desc, serialized_) + end + tf.Tensor(tf.Operation(desc)) + end + function deserialize_iterator(resource_handle_::tf.TensorHandle, serialized_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("DeserializeIterator") + tf.add_input(desc, resource_handle_) + tf.add_input(desc, serialized_) + (tf.execute(desc))[1] + end +end + + +""" + atan(x) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function atan(x_; name=nothing) + local desc + tf.with_op_name(name, "Atan") do + desc = tf.NodeDescription("Atan") + x_ = convert(Tensor{Any}, x_) + (x_,) = tf.tf_promote(x_) + tf.add_input(desc, x_) + end + tf.Tensor(tf.Operation(desc)) + end + function atan(x_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("Atan") + tf.add_input(desc, x_) + desc["T"] = tf.data_type(x_) + (tf.execute(desc))[1] + end +end + + +""" + mat_mul(a, b; transpose_a=false, transpose_b=false) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function mat_mul(a_, b_; name=nothing, transpose_a=nothing, transpose_b=nothing) + local desc + tf.with_op_name(name, "MatMul") do + desc = tf.NodeDescription("MatMul") + a_ = convert(Tensor{Any}, a_) + b_ = convert(Tensor{Any}, b_) + (a_, b_) = tf.tf_promote(a_, b_) + tf.add_input(desc, a_) + tf.add_input(desc, b_) + if transpose_a !== nothing + desc["transpose_a"] = Base.Bool(transpose_a) + end + if transpose_b !== nothing + desc["transpose_b"] = Base.Bool(transpose_b) + end + end + tf.Tensor(tf.Operation(desc)) + end + function mat_mul(a_::tf.TensorHandle, b_::tf.TensorHandle; name=nothing, transpose_a=nothing, transpose_b=nothing) + desc = tf.EagerOp("MatMul") + tf.add_input(desc, a_) + tf.add_input(desc, b_) + if transpose_a !== nothing + desc["transpose_a"] = Base.Bool(transpose_a) + end + if transpose_b !== nothing + desc["transpose_b"] = Base.Bool(transpose_b) + end + desc["T"] = tf.data_type(a_) + desc["T"] = tf.data_type(b_) + (tf.execute(desc))[1] + end +end + + +""" + erfc(x) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function erfc(x_; name=nothing) + local desc + tf.with_op_name(name, "Erfc") do + desc = tf.NodeDescription("Erfc") + x_ = convert(Tensor{Any}, x_) + (x_,) = tf.tf_promote(x_) + tf.add_input(desc, x_) + end + tf.Tensor(tf.Operation(desc)) + end + function erfc(x_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("Erfc") + tf.add_input(desc, x_) + desc["T"] = tf.data_type(x_) + (tf.execute(desc))[1] + end +end + + +""" + sigmoid_grad(y, dy) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function sigmoid_grad(y_, dy_; name=nothing) + local desc + tf.with_op_name(name, "SigmoidGrad") do + desc = tf.NodeDescription("SigmoidGrad") + y_ = convert(Tensor{Any}, y_) + dy_ = convert(Tensor{Any}, dy_) + (y_, dy_) = tf.tf_promote(y_, dy_) + tf.add_input(desc, y_) + tf.add_input(desc, dy_) + end + tf.Tensor(tf.Operation(desc)) + end + function sigmoid_grad(y_::tf.TensorHandle, dy_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("SigmoidGrad") + tf.add_input(desc, y_) + tf.add_input(desc, dy_) + desc["T"] = tf.data_type(y_) + desc["T"] = tf.data_type(dy_) + (tf.execute(desc))[1] + end +end + + +""" + fixed_length_record_reader_v2(; header_bytes=0, footer_bytes=0, hop_bytes=0, container=, shared_name=, encoding=) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function fixed_length_record_reader_v2(; name=nothing, header_bytes=nothing, record_bytes=nothing, footer_bytes=nothing, hop_bytes=nothing, container=nothing, shared_name=nothing, encoding=nothing) + local desc + tf.with_op_name(name, "FixedLengthRecordReaderV2") do + desc = tf.NodeDescription("FixedLengthRecordReaderV2") + if header_bytes !== nothing + desc["header_bytes"] = Base.Int(header_bytes) + end + if record_bytes !== nothing + desc["record_bytes"] = Base.Int(record_bytes) + end + if footer_bytes !== nothing + desc["footer_bytes"] = Base.Int(footer_bytes) + end + if hop_bytes !== nothing + desc["hop_bytes"] = Base.Int(hop_bytes) + end + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + if encoding !== nothing + desc["encoding"] = Base.String(encoding) + end + end + tf.Tensor(tf.Operation(desc)) + end + function fixed_length_record_reader_v2(; name=nothing, header_bytes=nothing, record_bytes=nothing, footer_bytes=nothing, hop_bytes=nothing, container=nothing, shared_name=nothing, encoding=nothing) + desc = tf.EagerOp("FixedLengthRecordReaderV2") + if header_bytes !== nothing + desc["header_bytes"] = Base.Int(header_bytes) + end + if record_bytes !== nothing + desc["record_bytes"] = Base.Int(record_bytes) + end + if footer_bytes !== nothing + desc["footer_bytes"] = Base.Int(footer_bytes) + end + if hop_bytes !== nothing + desc["hop_bytes"] = Base.Int(hop_bytes) + end + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + if encoding !== nothing + desc["encoding"] = Base.String(encoding) + end + (tf.execute(desc))[1] + end +end + + +""" + non_max_suppression_v3(boxes, scores, max_output_size, iou_threshold, score_threshold) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function non_max_suppression_v3(boxes_, scores_, max_output_size_, iou_threshold_, score_threshold_; name=nothing) + local desc + tf.with_op_name(name, "NonMaxSuppressionV3") do + desc = tf.NodeDescription("NonMaxSuppressionV3") + boxes_ = convert(Tensor{Float32}, boxes_) + scores_ = convert(Tensor{Float32}, scores_) + max_output_size_ = convert(Tensor{Int32}, max_output_size_) + iou_threshold_ = convert(Tensor{Float32}, iou_threshold_) + score_threshold_ = convert(Tensor{Float32}, score_threshold_) + (boxes_, scores_) = tf.tf_promote(boxes_, scores_) + tf.add_input(desc, boxes_) + tf.add_input(desc, scores_) + tf.add_input(desc, max_output_size_) + tf.add_input(desc, iou_threshold_) + tf.add_input(desc, score_threshold_) + end + tf.Tensor(tf.Operation(desc)) + end + function non_max_suppression_v3(boxes_::tf.TensorHandle, scores_::tf.TensorHandle, max_output_size_::tf.TensorHandle, iou_threshold_::tf.TensorHandle, score_threshold_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("NonMaxSuppressionV3") + tf.add_input(desc, boxes_) + tf.add_input(desc, scores_) + tf.add_input(desc, max_output_size_) + tf.add_input(desc, iou_threshold_) + tf.add_input(desc, score_threshold_) + desc["T"] = tf.data_type(boxes_) + desc["T"] = tf.data_type(scores_) + (tf.execute(desc))[1] + end +end + + +""" + dilation2d_backprop_input(input, filter, out_backprop) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function dilation2d_backprop_input(input_, filter_, out_backprop_; name=nothing, strides=nothing, rates=nothing, padding=nothing) + local desc + tf.with_op_name(name, "Dilation2DBackpropInput") do + desc = tf.NodeDescription("Dilation2DBackpropInput") + input_ = convert(Tensor{Any}, input_) + filter_ = convert(Tensor{Any}, filter_) + out_backprop_ = convert(Tensor{Any}, out_backprop_) + (input_, filter_, out_backprop_) = tf.tf_promote(input_, filter_, out_backprop_) + tf.add_input(desc, input_) + tf.add_input(desc, filter_) + tf.add_input(desc, out_backprop_) + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + if rates !== nothing + desc["rates"] = map(Base.identity, rates) + end + if padding !== nothing + desc["padding"] = Base.String(padding) + end + end + tf.Tensor(tf.Operation(desc)) + end + function dilation2d_backprop_input(input_::tf.TensorHandle, filter_::tf.TensorHandle, out_backprop_::tf.TensorHandle; name=nothing, strides=nothing, rates=nothing, padding=nothing) + desc = tf.EagerOp("Dilation2DBackpropInput") + tf.add_input(desc, input_) + tf.add_input(desc, filter_) + tf.add_input(desc, out_backprop_) + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + if rates !== nothing + desc["rates"] = map(Base.identity, rates) + end + if padding !== nothing + desc["padding"] = Base.String(padding) + end + desc["T"] = tf.data_type(input_) + desc["T"] = tf.data_type(filter_) + desc["T"] = tf.data_type(out_backprop_) + (tf.execute(desc))[1] + end +end + + +""" + logical_or(x, y) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function logical_or(x_, y_; name=nothing) + local desc + tf.with_op_name(name, "LogicalOr") do + desc = tf.NodeDescription("LogicalOr") + x_ = convert(Tensor{Bool}, x_) + y_ = convert(Tensor{Bool}, y_) + tf.add_input(desc, x_) + tf.add_input(desc, y_) + end + tf.Tensor(tf.Operation(desc)) + end + function logical_or(x_::tf.TensorHandle, y_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("LogicalOr") + tf.add_input(desc, x_) + tf.add_input(desc, y_) + (tf.execute(desc))[1] + end +end + + +""" + resource_apply_adadelta(var, accum, accum_update, lr, rho, epsilon, grad; use_locking=false) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function resource_apply_adadelta(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "ResourceApplyAdadelta") do + desc = tf.NodeDescription("ResourceApplyAdadelta") + var_ = convert(Tensor{Any}, var_) + accum_ = convert(Tensor{Any}, accum_) + accum_update_ = convert(Tensor{Any}, accum_update_) + lr_ = convert(Tensor{Any}, lr_) + rho_ = convert(Tensor{Any}, rho_) + epsilon_ = convert(Tensor{Any}, epsilon_) + grad_ = convert(Tensor{Any}, grad_) + (lr_, rho_, epsilon_, grad_) = tf.tf_promote(lr_, rho_, epsilon_, grad_) + tf.add_input(desc, var_) + tf.add_input(desc, accum_) + tf.add_input(desc, accum_update_) + tf.add_input(desc, lr_) + tf.add_input(desc, rho_) + tf.add_input(desc, epsilon_) + tf.add_input(desc, grad_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + tf.Tensor(tf.Operation(desc)) + end + function resource_apply_adadelta(var_::tf.TensorHandle, accum_::tf.TensorHandle, accum_update_::tf.TensorHandle, lr_::tf.TensorHandle, rho_::tf.TensorHandle, epsilon_::tf.TensorHandle, grad_::tf.TensorHandle; name=nothing, use_locking=nothing) + desc = tf.EagerOp("ResourceApplyAdadelta") + tf.add_input(desc, var_) + tf.add_input(desc, accum_) + tf.add_input(desc, accum_update_) + tf.add_input(desc, lr_) + tf.add_input(desc, rho_) + tf.add_input(desc, epsilon_) + tf.add_input(desc, grad_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + desc["T"] = tf.data_type(lr_) + desc["T"] = tf.data_type(rho_) + desc["T"] = tf.data_type(epsilon_) + desc["T"] = tf.data_type(grad_) + (tf.execute(desc))[1] + end +end + + +""" + dense_to_sparse_set_operation(set1, set2_indices, set2_values, set2_shape; validate_indices=true) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function dense_to_sparse_set_operation(set1_, set2_indices_, set2_values_, set2_shape_; name=nothing, set_operation=nothing, validate_indices=nothing) + local desc + tf.with_op_name(name, "DenseToSparseSetOperation") do + desc = tf.NodeDescription("DenseToSparseSetOperation") + set1_ = convert(Tensor{Any}, set1_) + set2_indices_ = convert(Tensor{Int64}, set2_indices_) + set2_values_ = convert(Tensor{Any}, set2_values_) + set2_shape_ = convert(Tensor{Int64}, set2_shape_) + (set1_, set2_values_) = tf.tf_promote(set1_, set2_values_) + tf.add_input(desc, set1_) + tf.add_input(desc, set2_indices_) + tf.add_input(desc, set2_values_) + tf.add_input(desc, set2_shape_) + if set_operation !== nothing + desc["set_operation"] = Base.String(set_operation) + end + if validate_indices !== nothing + desc["validate_indices"] = Base.Bool(validate_indices) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function dense_to_sparse_set_operation(set1_::tf.TensorHandle, set2_indices_::tf.TensorHandle, set2_values_::tf.TensorHandle, set2_shape_::tf.TensorHandle; name=nothing, set_operation=nothing, validate_indices=nothing) + desc = tf.EagerOp("DenseToSparseSetOperation") + tf.add_input(desc, set1_) + tf.add_input(desc, set2_indices_) + tf.add_input(desc, set2_values_) + tf.add_input(desc, set2_shape_) + if set_operation !== nothing + desc["set_operation"] = Base.String(set_operation) + end + if validate_indices !== nothing + desc["validate_indices"] = Base.Bool(validate_indices) + end + desc["T"] = tf.data_type(set1_) + desc["T"] = tf.data_type(set2_values_) + tf.execute(desc) + end +end + + +""" + reader_num_records_produced(reader_handle) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function reader_num_records_produced(reader_handle_; name=nothing) + local desc + tf.with_op_name(name, "ReaderNumRecordsProduced") do + desc = tf.NodeDescription("ReaderNumRecordsProduced") + reader_handle_ = convert(Tensor{String}, reader_handle_) + tf.add_input(desc, reader_handle_) + end + tf.Tensor(tf.Operation(desc)) + end + function reader_num_records_produced(reader_handle_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("ReaderNumRecordsProduced") + tf.add_input(desc, reader_handle_) + (tf.execute(desc))[1] + end +end + + +""" + adjust_hue(images, delta) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function adjust_hue(images_, delta_; name=nothing) + local desc + tf.with_op_name(name, "AdjustHue") do + desc = tf.NodeDescription("AdjustHue") + images_ = convert(Tensor{Float32}, images_) + delta_ = convert(Tensor{Float32}, delta_) + (images_,) = tf.tf_promote(images_) + tf.add_input(desc, images_) + tf.add_input(desc, delta_) + end + tf.Tensor(tf.Operation(desc)) + end + function adjust_hue(images_::tf.TensorHandle, delta_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("AdjustHue") + tf.add_input(desc, images_) + tf.add_input(desc, delta_) + desc["T"] = tf.data_type(images_) + (tf.execute(desc))[1] + end +end + + +""" + boosted_trees_quantile_stream_resource_flush(quantile_stream_resource_handle, num_buckets; generate_quantiles=false) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function boosted_trees_quantile_stream_resource_flush(quantile_stream_resource_handle_, num_buckets_; name=nothing, generate_quantiles=nothing) + local desc + tf.with_op_name(name, "BoostedTreesQuantileStreamResourceFlush") do + desc = tf.NodeDescription("BoostedTreesQuantileStreamResourceFlush") + quantile_stream_resource_handle_ = convert(Tensor{Any}, quantile_stream_resource_handle_) + num_buckets_ = convert(Tensor{Int64}, num_buckets_) + tf.add_input(desc, quantile_stream_resource_handle_) + tf.add_input(desc, num_buckets_) + if generate_quantiles !== nothing + desc["generate_quantiles"] = Base.Bool(generate_quantiles) + end + end + tf.Tensor(tf.Operation(desc)) + end + function boosted_trees_quantile_stream_resource_flush(quantile_stream_resource_handle_::tf.TensorHandle, num_buckets_::tf.TensorHandle; name=nothing, generate_quantiles=nothing) + desc = tf.EagerOp("BoostedTreesQuantileStreamResourceFlush") + tf.add_input(desc, quantile_stream_resource_handle_) + tf.add_input(desc, num_buckets_) + if generate_quantiles !== nothing + desc["generate_quantiles"] = Base.Bool(generate_quantiles) + end + (tf.execute(desc))[1] + end +end + + +""" + experimental_map_and_batch_dataset(input_dataset, other_arguments, batch_size, num_parallel_calls, drop_remainder; preserve_cardinality=false) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function experimental_map_and_batch_dataset(input_dataset_, other_arguments_, batch_size_, num_parallel_calls_, drop_remainder_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, preserve_cardinality=nothing) + local desc + tf.with_op_name(name, "ExperimentalMapAndBatchDataset") do + desc = tf.NodeDescription("ExperimentalMapAndBatchDataset") + input_dataset_ = convert(Tensor{Any}, input_dataset_) + other_arguments_ = [convert(Tensor{Any}, x) for x = other_arguments_] + batch_size_ = convert(Tensor{Int64}, batch_size_) + num_parallel_calls_ = convert(Tensor{Int64}, num_parallel_calls_) + drop_remainder_ = convert(Tensor{Bool}, drop_remainder_) + tf.add_input(desc, input_dataset_) + tf.add_input(desc, other_arguments_) + tf.add_input(desc, batch_size_) + tf.add_input(desc, num_parallel_calls_) + tf.add_input(desc, drop_remainder_) + if f !== nothing + desc["f"] = Base.identity(f) + end + if Targuments !== nothing + desc["Targuments"] = map(Base.identity, Targuments) + end + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + if preserve_cardinality !== nothing + desc["preserve_cardinality"] = Base.Bool(preserve_cardinality) + end + end + tf.Tensor(tf.Operation(desc)) + end + function experimental_map_and_batch_dataset(input_dataset_::tf.TensorHandle, other_arguments_::tf.TensorHandle, batch_size_::tf.TensorHandle, num_parallel_calls_::tf.TensorHandle, drop_remainder_::tf.TensorHandle; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, preserve_cardinality=nothing) + desc = tf.EagerOp("ExperimentalMapAndBatchDataset") + tf.add_input(desc, input_dataset_) + tf.add_input(desc, other_arguments_) + tf.add_input(desc, batch_size_) + tf.add_input(desc, num_parallel_calls_) + tf.add_input(desc, drop_remainder_) + if f !== nothing + desc["f"] = Base.identity(f) + end + if Targuments !== nothing + desc["Targuments"] = map(Base.identity, Targuments) + end + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + if preserve_cardinality !== nothing + desc["preserve_cardinality"] = Base.Bool(preserve_cardinality) + end + (tf.execute(desc))[1] + end +end + + +""" + real_div(x, y) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function real_div(x_, y_; name=nothing) + local desc + tf.with_op_name(name, "RealDiv") do + desc = tf.NodeDescription("RealDiv") + x_ = convert(Tensor{Any}, x_) + y_ = convert(Tensor{Any}, y_) + (x_, y_) = tf.tf_promote(x_, y_) + tf.add_input(desc, x_) + tf.add_input(desc, y_) + end + tf.Tensor(tf.Operation(desc)) + end + function real_div(x_::tf.TensorHandle, y_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("RealDiv") + tf.add_input(desc, x_) + tf.add_input(desc, y_) + desc["T"] = tf.data_type(x_) + desc["T"] = tf.data_type(y_) + (tf.execute(desc))[1] + end +end + + +""" + restore_slice(file_pattern, tensor_name, shape_and_slice; preferred_shard=-1) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function restore_slice(file_pattern_, tensor_name_, shape_and_slice_; name=nothing, dt=nothing, preferred_shard=nothing) + local desc + tf.with_op_name(name, "RestoreSlice") do + desc = tf.NodeDescription("RestoreSlice") + file_pattern_ = convert(Tensor{String}, file_pattern_) + tensor_name_ = convert(Tensor{String}, tensor_name_) + shape_and_slice_ = convert(Tensor{String}, shape_and_slice_) + tf.add_input(desc, file_pattern_) + tf.add_input(desc, tensor_name_) + tf.add_input(desc, shape_and_slice_) + if dt !== nothing + desc["dt"] = Base.identity(dt) + end + if preferred_shard !== nothing + desc["preferred_shard"] = Base.Int(preferred_shard) + end + end + tf.Tensor(tf.Operation(desc)) + end + function restore_slice(file_pattern_::tf.TensorHandle, tensor_name_::tf.TensorHandle, shape_and_slice_::tf.TensorHandle; name=nothing, dt=nothing, preferred_shard=nothing) + desc = tf.EagerOp("RestoreSlice") + tf.add_input(desc, file_pattern_) + tf.add_input(desc, tensor_name_) + tf.add_input(desc, shape_and_slice_) + if dt !== nothing + desc["dt"] = Base.identity(dt) + end + if preferred_shard !== nothing + desc["preferred_shard"] = Base.Int(preferred_shard) + end + (tf.execute(desc))[1] + end +end + + +""" + stack_pop_v2(handle) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function stack_pop_v2(handle_; name=nothing, elem_type=nothing) + local desc + tf.with_op_name(name, "StackPopV2") do + desc = tf.NodeDescription("StackPopV2") + handle_ = convert(Tensor{Any}, handle_) + tf.add_input(desc, handle_) + if elem_type !== nothing + desc["elem_type"] = Base.identity(elem_type) + end + end + tf.Tensor(tf.Operation(desc)) + end + function stack_pop_v2(handle_::tf.TensorHandle; name=nothing, elem_type=nothing) + desc = tf.EagerOp("StackPopV2") + tf.add_input(desc, handle_) + if elem_type !== nothing + desc["elem_type"] = Base.identity(elem_type) + end + (tf.execute(desc))[1] + end +end + + +""" + reverse(tensor, dims) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function reverse(tensor_, dims_; name=nothing) + local desc + tf.with_op_name(name, "Reverse") do + desc = tf.NodeDescription("Reverse") + tensor_ = convert(Tensor{Any}, tensor_) + dims_ = convert(Tensor{Bool}, dims_) + (tensor_,) = tf.tf_promote(tensor_) + tf.add_input(desc, tensor_) + tf.add_input(desc, dims_) + end + tf.Tensor(tf.Operation(desc)) + end + function reverse(tensor_::tf.TensorHandle, dims_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("Reverse") + tf.add_input(desc, tensor_) + tf.add_input(desc, dims_) + desc["T"] = tf.data_type(tensor_) + (tf.execute(desc))[1] + end +end + + +""" + decode_png(contents; channels=0, dtype=UInt8) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function decode_png(contents_; name=nothing, channels=nothing, dtype=nothing) + local desc + tf.with_op_name(name, "DecodePng") do + desc = tf.NodeDescription("DecodePng") + contents_ = convert(Tensor{String}, contents_) + tf.add_input(desc, contents_) + if channels !== nothing + desc["channels"] = Base.Int(channels) + end + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + tf.Tensor(tf.Operation(desc)) + end + function decode_png(contents_::tf.TensorHandle; name=nothing, channels=nothing, dtype=nothing) + desc = tf.EagerOp("DecodePng") + tf.add_input(desc, contents_) + if channels !== nothing + desc["channels"] = Base.Int(channels) + end + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + (tf.execute(desc))[1] + end +end + + +""" + non_max_suppression_v2(boxes, scores, max_output_size, iou_threshold) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function non_max_suppression_v2(boxes_, scores_, max_output_size_, iou_threshold_; name=nothing) + local desc + tf.with_op_name(name, "NonMaxSuppressionV2") do + desc = tf.NodeDescription("NonMaxSuppressionV2") + boxes_ = convert(Tensor{Float32}, boxes_) + scores_ = convert(Tensor{Float32}, scores_) + max_output_size_ = convert(Tensor{Int32}, max_output_size_) + iou_threshold_ = convert(Tensor{Float32}, iou_threshold_) + (boxes_, scores_) = tf.tf_promote(boxes_, scores_) + tf.add_input(desc, boxes_) + tf.add_input(desc, scores_) + tf.add_input(desc, max_output_size_) + tf.add_input(desc, iou_threshold_) + end + tf.Tensor(tf.Operation(desc)) + end + function non_max_suppression_v2(boxes_::tf.TensorHandle, scores_::tf.TensorHandle, max_output_size_::tf.TensorHandle, iou_threshold_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("NonMaxSuppressionV2") + tf.add_input(desc, boxes_) + tf.add_input(desc, scores_) + tf.add_input(desc, max_output_size_) + tf.add_input(desc, iou_threshold_) + desc["T"] = tf.data_type(boxes_) + desc["T"] = tf.data_type(scores_) + (tf.execute(desc))[1] + end +end + + +""" + igamma(a, x) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function igamma(a_, x_; name=nothing) + local desc + tf.with_op_name(name, "Igamma") do + desc = tf.NodeDescription("Igamma") + a_ = convert(Tensor{Any}, a_) + x_ = convert(Tensor{Any}, x_) + (a_, x_) = tf.tf_promote(a_, x_) + tf.add_input(desc, a_) + tf.add_input(desc, x_) + end + tf.Tensor(tf.Operation(desc)) + end + function igamma(a_::tf.TensorHandle, x_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("Igamma") + tf.add_input(desc, a_) + tf.add_input(desc, x_) + desc["T"] = tf.data_type(a_) + desc["T"] = tf.data_type(x_) + (tf.execute(desc))[1] + end +end + + +""" + digamma(x) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function digamma(x_; name=nothing) + local desc + tf.with_op_name(name, "Digamma") do + desc = tf.NodeDescription("Digamma") + x_ = convert(Tensor{Any}, x_) + (x_,) = tf.tf_promote(x_) + tf.add_input(desc, x_) + end + tf.Tensor(tf.Operation(desc)) + end + function digamma(x_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("Digamma") + tf.add_input(desc, x_) + desc["T"] = tf.data_type(x_) + (tf.execute(desc))[1] + end +end + + +""" + resource_apply_ada_max(var, m, v, beta1_power, lr, beta1, beta2, epsilon, grad; use_locking=false) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function resource_apply_ada_max(var_, m_, v_, beta1_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "ResourceApplyAdaMax") do + desc = tf.NodeDescription("ResourceApplyAdaMax") + var_ = convert(Tensor{Any}, var_) + m_ = convert(Tensor{Any}, m_) + v_ = convert(Tensor{Any}, v_) + beta1_power_ = convert(Tensor{Any}, beta1_power_) + lr_ = convert(Tensor{Any}, lr_) + beta1_ = convert(Tensor{Any}, beta1_) + beta2_ = convert(Tensor{Any}, beta2_) + epsilon_ = convert(Tensor{Any}, epsilon_) + grad_ = convert(Tensor{Any}, grad_) + (beta1_power_, lr_, beta1_, beta2_, epsilon_, grad_) = tf.tf_promote(beta1_power_, lr_, beta1_, beta2_, epsilon_, grad_) + tf.add_input(desc, var_) + tf.add_input(desc, m_) + tf.add_input(desc, v_) + tf.add_input(desc, beta1_power_) + tf.add_input(desc, lr_) + tf.add_input(desc, beta1_) + tf.add_input(desc, beta2_) + tf.add_input(desc, epsilon_) + tf.add_input(desc, grad_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + tf.Tensor(tf.Operation(desc)) + end + function resource_apply_ada_max(var_::tf.TensorHandle, m_::tf.TensorHandle, v_::tf.TensorHandle, beta1_power_::tf.TensorHandle, lr_::tf.TensorHandle, beta1_::tf.TensorHandle, beta2_::tf.TensorHandle, epsilon_::tf.TensorHandle, grad_::tf.TensorHandle; name=nothing, use_locking=nothing) + desc = tf.EagerOp("ResourceApplyAdaMax") + tf.add_input(desc, var_) + tf.add_input(desc, m_) + tf.add_input(desc, v_) + tf.add_input(desc, beta1_power_) + tf.add_input(desc, lr_) + tf.add_input(desc, beta1_) + tf.add_input(desc, beta2_) + tf.add_input(desc, epsilon_) + tf.add_input(desc, grad_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + desc["T"] = tf.data_type(beta1_power_) + desc["T"] = tf.data_type(lr_) + desc["T"] = tf.data_type(beta1_) + desc["T"] = tf.data_type(beta2_) + desc["T"] = tf.data_type(epsilon_) + desc["T"] = tf.data_type(grad_) + (tf.execute(desc))[1] + end +end + + +""" + space_to_depth(input; data_format=NHWC) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function space_to_depth(input_; name=nothing, block_size=nothing, data_format=nothing) + local desc + tf.with_op_name(name, "SpaceToDepth") do + desc = tf.NodeDescription("SpaceToDepth") + input_ = convert(Tensor{Any}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + if block_size !== nothing + desc["block_size"] = Base.Int(block_size) + end + if data_format !== nothing + desc["data_format"] = Base.String(data_format) + end + end + tf.Tensor(tf.Operation(desc)) + end + function space_to_depth(input_::tf.TensorHandle; name=nothing, block_size=nothing, data_format=nothing) + desc = tf.EagerOp("SpaceToDepth") + tf.add_input(desc, input_) + if block_size !== nothing + desc["block_size"] = Base.Int(block_size) + end + if data_format !== nothing + desc["data_format"] = Base.String(data_format) + end + desc["T"] = tf.data_type(input_) + (tf.execute(desc))[1] + end +end + + +""" + sqrt_grad(y, dy) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function sqrt_grad(y_, dy_; name=nothing) + local desc + tf.with_op_name(name, "SqrtGrad") do + desc = tf.NodeDescription("SqrtGrad") + y_ = convert(Tensor{Any}, y_) + dy_ = convert(Tensor{Any}, dy_) + (y_, dy_) = tf.tf_promote(y_, dy_) + tf.add_input(desc, y_) + tf.add_input(desc, dy_) + end + tf.Tensor(tf.Operation(desc)) + end + function sqrt_grad(y_::tf.TensorHandle, dy_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("SqrtGrad") + tf.add_input(desc, y_) + tf.add_input(desc, dy_) + desc["T"] = tf.data_type(y_) + desc["T"] = tf.data_type(dy_) + (tf.execute(desc))[1] + end +end + + +""" + map_unstage(key, indices; capacity=0, memory_limit=0, container=, shared_name=) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function map_unstage(key_, indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "MapUnstage") do + desc = tf.NodeDescription("MapUnstage") + key_ = convert(Tensor{Int64}, key_) + indices_ = convert(Tensor{Int32}, indices_) + tf.add_input(desc, key_) + tf.add_input(desc, indices_) + if capacity !== nothing + desc["capacity"] = Base.Int(capacity) + end + if memory_limit !== nothing + desc["memory_limit"] = Base.Int(memory_limit) + end + if dtypes !== nothing + desc["dtypes"] = map(Base.identity, dtypes) + end + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + tf.Tensor(tf.Operation(desc)) + end + function map_unstage(key_::tf.TensorHandle, indices_::tf.TensorHandle; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + desc = tf.EagerOp("MapUnstage") + tf.add_input(desc, key_) + tf.add_input(desc, indices_) + if capacity !== nothing + desc["capacity"] = Base.Int(capacity) + end + if memory_limit !== nothing + desc["memory_limit"] = Base.Int(memory_limit) + end + if dtypes !== nothing + desc["dtypes"] = map(Base.identity, dtypes) + end + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + (tf.execute(desc))[1] + end +end + + +""" + qr(input; full_matrices=false) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function qr(input_; name=nothing, full_matrices=nothing) + local desc + tf.with_op_name(name, "Qr") do + desc = tf.NodeDescription("Qr") + input_ = convert(Tensor{Any}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + if full_matrices !== nothing + desc["full_matrices"] = Base.Bool(full_matrices) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function qr(input_::tf.TensorHandle; name=nothing, full_matrices=nothing) + desc = tf.EagerOp("Qr") + tf.add_input(desc, input_) + if full_matrices !== nothing + desc["full_matrices"] = Base.Bool(full_matrices) + end + desc["T"] = tf.data_type(input_) + tf.execute(desc) + end +end + + +""" + boosted_trees_calculate_best_gains_per_feature(node_id_range, stats_summary_list, l1, l2, tree_complexity, min_node_weight) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function boosted_trees_calculate_best_gains_per_feature(node_id_range_, stats_summary_list_, l1_, l2_, tree_complexity_, min_node_weight_; name=nothing, max_splits=nothing, num_features=nothing) + local desc + tf.with_op_name(name, "BoostedTreesCalculateBestGainsPerFeature") do + desc = tf.NodeDescription("BoostedTreesCalculateBestGainsPerFeature") + node_id_range_ = convert(Tensor{Int32}, node_id_range_) + stats_summary_list_ = [convert(Tensor{Float32}, x) for x = stats_summary_list_] + l1_ = convert(Tensor{Float32}, l1_) + l2_ = convert(Tensor{Float32}, l2_) + tree_complexity_ = convert(Tensor{Float32}, tree_complexity_) + min_node_weight_ = convert(Tensor{Float32}, min_node_weight_) + tf.add_input(desc, node_id_range_) + tf.add_input(desc, stats_summary_list_) + tf.add_input(desc, l1_) + tf.add_input(desc, l2_) + tf.add_input(desc, tree_complexity_) + tf.add_input(desc, min_node_weight_) + if max_splits !== nothing + desc["max_splits"] = Base.Int(max_splits) + end + if num_features !== nothing + desc["num_features"] = Base.Int(num_features) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:5 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function boosted_trees_calculate_best_gains_per_feature(node_id_range_::tf.TensorHandle, stats_summary_list_::tf.TensorHandle, l1_::tf.TensorHandle, l2_::tf.TensorHandle, tree_complexity_::tf.TensorHandle, min_node_weight_::tf.TensorHandle; name=nothing, max_splits=nothing, num_features=nothing) + desc = tf.EagerOp("BoostedTreesCalculateBestGainsPerFeature") + tf.add_input(desc, node_id_range_) + tf.add_input(desc, stats_summary_list_) + tf.add_input(desc, l1_) + tf.add_input(desc, l2_) + tf.add_input(desc, tree_complexity_) + tf.add_input(desc, min_node_weight_) + if max_splits !== nothing + desc["max_splits"] = Base.Int(max_splits) + end + if num_features !== nothing + desc["num_features"] = Base.Int(num_features) + end + tf.execute(desc) + end +end + + +""" + unbatch_grad(original_input, batch_index, grad, id; container=, shared_name=) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function unbatch_grad(original_input_, batch_index_, grad_, id_; name=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "UnbatchGrad") do + desc = tf.NodeDescription("UnbatchGrad") + original_input_ = convert(Tensor{Any}, original_input_) + batch_index_ = convert(Tensor{Int64}, batch_index_) + grad_ = convert(Tensor{Any}, grad_) + id_ = convert(Tensor{Int64}, id_) + (original_input_, grad_) = tf.tf_promote(original_input_, grad_) + tf.add_input(desc, original_input_) + tf.add_input(desc, batch_index_) + tf.add_input(desc, grad_) + tf.add_input(desc, id_) + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + tf.Tensor(tf.Operation(desc)) + end + function unbatch_grad(original_input_::tf.TensorHandle, batch_index_::tf.TensorHandle, grad_::tf.TensorHandle, id_::tf.TensorHandle; name=nothing, container=nothing, shared_name=nothing) + desc = tf.EagerOp("UnbatchGrad") + tf.add_input(desc, original_input_) + tf.add_input(desc, batch_index_) + tf.add_input(desc, grad_) + tf.add_input(desc, id_) + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + desc["T"] = tf.data_type(original_input_) + desc["T"] = tf.data_type(grad_) + (tf.execute(desc))[1] + end +end + + +""" + log_softmax(logits) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function log_softmax(logits_; name=nothing) + local desc + tf.with_op_name(name, "LogSoftmax") do + desc = tf.NodeDescription("LogSoftmax") + logits_ = convert(Tensor{Any}, logits_) + (logits_,) = tf.tf_promote(logits_) + tf.add_input(desc, logits_) + end + tf.Tensor(tf.Operation(desc)) + end + function log_softmax(logits_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("LogSoftmax") + tf.add_input(desc, logits_) + desc["T"] = tf.data_type(logits_) + (tf.execute(desc))[1] + end +end + + +""" + resource_count_up_to(resource) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function resource_count_up_to(resource_; name=nothing, limit=nothing) + local desc + tf.with_op_name(name, "ResourceCountUpTo") do + desc = tf.NodeDescription("ResourceCountUpTo") + resource_ = convert(Tensor{Any}, resource_) + tf.add_input(desc, resource_) + if limit !== nothing + desc["limit"] = Base.Int(limit) + end + end + tf.Tensor(tf.Operation(desc)) + end + function resource_count_up_to(resource_::tf.TensorHandle; name=nothing, limit=nothing) + desc = tf.EagerOp("ResourceCountUpTo") + tf.add_input(desc, resource_) + if limit !== nothing + desc["limit"] = Base.Int(limit) + end + (tf.execute(desc))[1] + end +end + + +""" + accumulate_nv2(inputs) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function accumulate_nv2(inputs_; name=nothing, N=nothing, shape=nothing) + local desc + tf.with_op_name(name, "AccumulateNV2") do + desc = tf.NodeDescription("AccumulateNV2") + inputs_ = [convert(Tensor{Any}, x) for x = inputs_] + (inputs_,) = tf.tf_promote(inputs_) + tf.add_input(desc, inputs_) + if N !== nothing + desc["N"] = Base.Int(N) + end + if shape !== nothing + desc["shape"] = Base.identity(shape) + end + end + tf.Tensor(tf.Operation(desc)) + end + function accumulate_nv2(inputs_::tf.TensorHandle; name=nothing, N=nothing, shape=nothing) + desc = tf.EagerOp("AccumulateNV2") + tf.add_input(desc, inputs_) + if N !== nothing + desc["N"] = Base.Int(N) + end + if shape !== nothing + desc["shape"] = Base.identity(shape) + end + desc["T"] = tf.data_type(inputs_) + (tf.execute(desc))[1] + end +end + + +""" + parallel_map_dataset(input_dataset, other_arguments, num_parallel_calls; use_inter_op_parallelism=true, sloppy=false, preserve_cardinality=false) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function parallel_map_dataset(input_dataset_, other_arguments_, num_parallel_calls_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing, sloppy=nothing, preserve_cardinality=nothing) + local desc + tf.with_op_name(name, "ParallelMapDataset") do + desc = tf.NodeDescription("ParallelMapDataset") + input_dataset_ = convert(Tensor{Any}, input_dataset_) + other_arguments_ = [convert(Tensor{Any}, x) for x = other_arguments_] + num_parallel_calls_ = convert(Tensor{Int32}, num_parallel_calls_) + tf.add_input(desc, input_dataset_) + tf.add_input(desc, other_arguments_) + tf.add_input(desc, num_parallel_calls_) + if f !== nothing + desc["f"] = Base.identity(f) + end + if Targuments !== nothing + desc["Targuments"] = map(Base.identity, Targuments) + end + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + if use_inter_op_parallelism !== nothing + desc["use_inter_op_parallelism"] = Base.Bool(use_inter_op_parallelism) + end + if sloppy !== nothing + desc["sloppy"] = Base.Bool(sloppy) + end + if preserve_cardinality !== nothing + desc["preserve_cardinality"] = Base.Bool(preserve_cardinality) + end + end + tf.Tensor(tf.Operation(desc)) + end + function parallel_map_dataset(input_dataset_::tf.TensorHandle, other_arguments_::tf.TensorHandle, num_parallel_calls_::tf.TensorHandle; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing, sloppy=nothing, preserve_cardinality=nothing) + desc = tf.EagerOp("ParallelMapDataset") + tf.add_input(desc, input_dataset_) + tf.add_input(desc, other_arguments_) + tf.add_input(desc, num_parallel_calls_) + if f !== nothing + desc["f"] = Base.identity(f) + end + if Targuments !== nothing + desc["Targuments"] = map(Base.identity, Targuments) + end + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + if use_inter_op_parallelism !== nothing + desc["use_inter_op_parallelism"] = Base.Bool(use_inter_op_parallelism) + end + if sloppy !== nothing + desc["sloppy"] = Base.Bool(sloppy) + end + if preserve_cardinality !== nothing + desc["preserve_cardinality"] = Base.Bool(preserve_cardinality) + end + (tf.execute(desc))[1] + end +end + + +""" + random_uniform(shape; seed=0, seed2=0) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function random_uniform(shape_; name=nothing, seed=nothing, seed2=nothing, dtype=nothing) + local desc + tf.with_op_name(name, "RandomUniform") do + desc = tf.NodeDescription("RandomUniform") + shape_ = convert(Tensor{Any}, shape_) + (shape_,) = tf.tf_promote(shape_) + tf.add_input(desc, shape_) + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + tf.Tensor(tf.Operation(desc)) + end + function random_uniform(shape_::tf.TensorHandle; name=nothing, seed=nothing, seed2=nothing, dtype=nothing) + desc = tf.EagerOp("RandomUniform") + tf.add_input(desc, shape_) + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + desc["T"] = tf.data_type(shape_) + (tf.execute(desc))[1] + end +end + + +""" + unicode_transcode(input; errors=replace, replacement_char=65533, replace_control_characters=false) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function unicode_transcode(input_; name=nothing, input_encoding=nothing, output_encoding=nothing, errors=nothing, replacement_char=nothing, replace_control_characters=nothing) + local desc + tf.with_op_name(name, "UnicodeTranscode") do + desc = tf.NodeDescription("UnicodeTranscode") + input_ = convert(Tensor{String}, input_) + tf.add_input(desc, input_) + if input_encoding !== nothing + desc["input_encoding"] = Base.String(input_encoding) + end + if output_encoding !== nothing + desc["output_encoding"] = Base.String(output_encoding) + end + if errors !== nothing + desc["errors"] = Base.String(errors) + end + if replacement_char !== nothing + desc["replacement_char"] = Base.Int(replacement_char) + end + if replace_control_characters !== nothing + desc["replace_control_characters"] = Base.Bool(replace_control_characters) + end + end + tf.Tensor(tf.Operation(desc)) + end + function unicode_transcode(input_::tf.TensorHandle; name=nothing, input_encoding=nothing, output_encoding=nothing, errors=nothing, replacement_char=nothing, replace_control_characters=nothing) + desc = tf.EagerOp("UnicodeTranscode") + tf.add_input(desc, input_) + if input_encoding !== nothing + desc["input_encoding"] = Base.String(input_encoding) + end + if output_encoding !== nothing + desc["output_encoding"] = Base.String(output_encoding) + end + if errors !== nothing + desc["errors"] = Base.String(errors) + end + if replacement_char !== nothing + desc["replacement_char"] = Base.Int(replacement_char) + end + if replace_control_characters !== nothing + desc["replace_control_characters"] = Base.Bool(replace_control_characters) + end + (tf.execute(desc))[1] + end +end + + +""" + reader_reset(reader_handle) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function reader_reset(reader_handle_; name=nothing) + local desc + tf.with_op_name(name, "ReaderReset") do + desc = tf.NodeDescription("ReaderReset") + reader_handle_ = convert(Tensor{String}, reader_handle_) + tf.add_input(desc, reader_handle_) + end + tf.Tensor(tf.Operation(desc)) + end + function reader_reset(reader_handle_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("ReaderReset") + tf.add_input(desc, reader_handle_) + (tf.execute(desc))[1] + end +end + + +""" + _nccl_broadcast_send(input) + +Replacement node for NcclBroadcast. +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function _nccl_broadcast_send(input_; name=nothing, num_devices=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "_NcclBroadcastSend") do + desc = tf.NodeDescription("_NcclBroadcastSend") + input_ = convert(Tensor{Any}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + if num_devices !== nothing + desc["num_devices"] = Base.Int(num_devices) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + tf.Tensor(tf.Operation(desc)) + end + function _nccl_broadcast_send(input_::tf.TensorHandle; name=nothing, num_devices=nothing, shared_name=nothing) + desc = tf.EagerOp("_NcclBroadcastSend") + tf.add_input(desc, input_) + if num_devices !== nothing + desc["num_devices"] = Base.Int(num_devices) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + desc["T"] = tf.data_type(input_) + (tf.execute(desc))[1] + end +end + + +""" + batch_matrix_determinant(input) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function batch_matrix_determinant(input_; name=nothing) + local desc + tf.with_op_name(name, "BatchMatrixDeterminant") do + desc = tf.NodeDescription("BatchMatrixDeterminant") + input_ = convert(Tensor{Any}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + end + tf.Tensor(tf.Operation(desc)) + end + function batch_matrix_determinant(input_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("BatchMatrixDeterminant") + tf.add_input(desc, input_) + desc["T"] = tf.data_type(input_) + (tf.execute(desc))[1] + end +end + + +""" + less_equal(x, y) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function less_equal(x_, y_; name=nothing) + local desc + tf.with_op_name(name, "LessEqual") do + desc = tf.NodeDescription("LessEqual") + x_ = convert(Tensor{Any}, x_) + y_ = convert(Tensor{Any}, y_) + (x_, y_) = tf.tf_promote(x_, y_) + tf.add_input(desc, x_) + tf.add_input(desc, y_) + end + tf.Tensor(tf.Operation(desc)) + end + function less_equal(x_::tf.TensorHandle, y_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("LessEqual") + tf.add_input(desc, x_) + tf.add_input(desc, y_) + desc["T"] = tf.data_type(x_) + desc["T"] = tf.data_type(y_) + (tf.execute(desc))[1] + end +end + + +""" + apply_gradient_descent(var, alpha, delta; use_locking=false) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function apply_gradient_descent(var_, alpha_, delta_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "ApplyGradientDescent") do + desc = tf.NodeDescription("ApplyGradientDescent") + var_ = convert(Tensor{Any}, var_) + alpha_ = convert(Tensor{Any}, alpha_) + delta_ = convert(Tensor{Any}, delta_) + (var_, alpha_, delta_) = tf.tf_promote(var_, alpha_, delta_) + tf.add_input(desc, var_) + tf.add_input(desc, alpha_) + tf.add_input(desc, delta_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + tf.Tensor(tf.Operation(desc)) + end + function apply_gradient_descent(var_::tf.TensorHandle, alpha_::tf.TensorHandle, delta_::tf.TensorHandle; name=nothing, use_locking=nothing) + desc = tf.EagerOp("ApplyGradientDescent") + tf.add_input(desc, var_) + tf.add_input(desc, alpha_) + tf.add_input(desc, delta_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + desc["T"] = tf.data_type(var_) + desc["T"] = tf.data_type(alpha_) + desc["T"] = tf.data_type(delta_) + (tf.execute(desc))[1] + end +end + + +""" + sparse_segment_sqrt_n(data, indices, segment_ids) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function sparse_segment_sqrt_n(data_, indices_, segment_ids_; name=nothing) + local desc + tf.with_op_name(name, "SparseSegmentSqrtN") do + desc = tf.NodeDescription("SparseSegmentSqrtN") + data_ = convert(Tensor{Any}, data_) + indices_ = convert(Tensor{Int32}, indices_) + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + segment_ids_ = convert(Tensor{Int32}, segment_ids_) + (data_,) = tf.tf_promote(data_) + (indices_,) = tf.tf_promote(indices_) + tf.add_input(desc, data_) + tf.add_input(desc, indices_) + tf.add_input(desc, segment_ids_) + end + tf.Tensor(tf.Operation(desc)) + end + function sparse_segment_sqrt_n(data_::tf.TensorHandle, indices_::tf.TensorHandle, segment_ids_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("SparseSegmentSqrtN") + tf.add_input(desc, data_) + tf.add_input(desc, indices_) + tf.add_input(desc, segment_ids_) + desc["T"] = tf.data_type(data_) + desc["Tidx"] = tf.data_type(indices_) + (tf.execute(desc))[1] + end +end + + +""" + matrix_logarithm(input) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function matrix_logarithm(input_; name=nothing) + local desc + tf.with_op_name(name, "MatrixLogarithm") do + desc = tf.NodeDescription("MatrixLogarithm") + input_ = convert(Tensor{Any}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + end + tf.Tensor(tf.Operation(desc)) + end + function matrix_logarithm(input_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("MatrixLogarithm") + tf.add_input(desc, input_) + desc["T"] = tf.data_type(input_) + (tf.execute(desc))[1] + end +end + + +""" + scatter_mul(ref, indices, updates; use_locking=false) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function scatter_mul(ref_, indices_, updates_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "ScatterMul") do + desc = tf.NodeDescription("ScatterMul") + ref_ = convert(Tensor{Any}, ref_) + indices_ = convert(Tensor{Any}, indices_) + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + updates_ = convert(Tensor{Any}, updates_) + (ref_, updates_) = tf.tf_promote(ref_, updates_) + (indices_,) = tf.tf_promote(indices_) + tf.add_input(desc, ref_) + tf.add_input(desc, indices_) + tf.add_input(desc, updates_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + tf.Tensor(tf.Operation(desc)) + end + function scatter_mul(ref_::tf.TensorHandle, indices_::tf.TensorHandle, updates_::tf.TensorHandle; name=nothing, use_locking=nothing) + desc = tf.EagerOp("ScatterMul") + tf.add_input(desc, ref_) + tf.add_input(desc, indices_) + tf.add_input(desc, updates_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + desc["T"] = tf.data_type(ref_) + desc["Tindices"] = tf.data_type(indices_) + desc["T"] = tf.data_type(updates_) + (tf.execute(desc))[1] + end +end + + +""" + decode_jpeg(contents; channels=0, ratio=1, fancy_upscaling=true, try_recover_truncated=false, acceptable_fraction=?, dct_method=) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function decode_jpeg(contents_; name=nothing, channels=nothing, ratio=nothing, fancy_upscaling=nothing, try_recover_truncated=nothing, acceptable_fraction=nothing, dct_method=nothing) + local desc + tf.with_op_name(name, "DecodeJpeg") do + desc = tf.NodeDescription("DecodeJpeg") + contents_ = convert(Tensor{String}, contents_) + tf.add_input(desc, contents_) + if channels !== nothing + desc["channels"] = Base.Int(channels) + end + if ratio !== nothing + desc["ratio"] = Base.Int(ratio) + end + if fancy_upscaling !== nothing + desc["fancy_upscaling"] = Base.Bool(fancy_upscaling) + end + if try_recover_truncated !== nothing + desc["try_recover_truncated"] = Base.Bool(try_recover_truncated) + end + if acceptable_fraction !== nothing + desc["acceptable_fraction"] = Base.identity(acceptable_fraction) + end + if dct_method !== nothing + desc["dct_method"] = Base.String(dct_method) + end + end + tf.Tensor(tf.Operation(desc)) + end + function decode_jpeg(contents_::tf.TensorHandle; name=nothing, channels=nothing, ratio=nothing, fancy_upscaling=nothing, try_recover_truncated=nothing, acceptable_fraction=nothing, dct_method=nothing) + desc = tf.EagerOp("DecodeJpeg") + tf.add_input(desc, contents_) + if channels !== nothing + desc["channels"] = Base.Int(channels) + end + if ratio !== nothing + desc["ratio"] = Base.Int(ratio) + end + if fancy_upscaling !== nothing + desc["fancy_upscaling"] = Base.Bool(fancy_upscaling) + end + if try_recover_truncated !== nothing + desc["try_recover_truncated"] = Base.Bool(try_recover_truncated) + end + if acceptable_fraction !== nothing + desc["acceptable_fraction"] = Base.identity(acceptable_fraction) + end + if dct_method !== nothing + desc["dct_method"] = Base.String(dct_method) + end + (tf.execute(desc))[1] + end +end + + +""" + random_shuffle_queue_v2(; shapes=Int64[], capacity=-1, min_after_dequeue=0, seed=0, seed2=0, container=, shared_name=) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function random_shuffle_queue_v2(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, min_after_dequeue=nothing, seed=nothing, seed2=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "RandomShuffleQueueV2") do + desc = tf.NodeDescription("RandomShuffleQueueV2") + if component_types !== nothing + desc["component_types"] = map(Base.identity, component_types) + end + if shapes !== nothing + desc["shapes"] = map(Base.identity, shapes) + end + if capacity !== nothing + desc["capacity"] = Base.Int(capacity) + end + if min_after_dequeue !== nothing + desc["min_after_dequeue"] = Base.Int(min_after_dequeue) + end + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + tf.Tensor(tf.Operation(desc)) + end + function random_shuffle_queue_v2(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, min_after_dequeue=nothing, seed=nothing, seed2=nothing, container=nothing, shared_name=nothing) + desc = tf.EagerOp("RandomShuffleQueueV2") + if component_types !== nothing + desc["component_types"] = map(Base.identity, component_types) + end + if shapes !== nothing + desc["shapes"] = map(Base.identity, shapes) + end + if capacity !== nothing + desc["capacity"] = Base.Int(capacity) + end + if min_after_dequeue !== nothing + desc["min_after_dequeue"] = Base.Int(min_after_dequeue) + end + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + (tf.execute(desc))[1] + end +end + + +""" + queue_enqueue_many_v2(handle, components; timeout_ms=-1) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function queue_enqueue_many_v2(handle_, components_; name=nothing, Tcomponents=nothing, timeout_ms=nothing) + local desc + tf.with_op_name(name, "QueueEnqueueManyV2") do + desc = tf.NodeDescription("QueueEnqueueManyV2") + handle_ = convert(Tensor{Any}, handle_) + components_ = [convert(Tensor{Any}, x) for x = components_] + tf.add_input(desc, handle_) + tf.add_input(desc, components_) + if Tcomponents !== nothing + desc["Tcomponents"] = map(Base.identity, Tcomponents) + end + if timeout_ms !== nothing + desc["timeout_ms"] = Base.Int(timeout_ms) + end + end + tf.Tensor(tf.Operation(desc)) + end + function queue_enqueue_many_v2(handle_::tf.TensorHandle, components_::tf.TensorHandle; name=nothing, Tcomponents=nothing, timeout_ms=nothing) + desc = tf.EagerOp("QueueEnqueueManyV2") + tf.add_input(desc, handle_) + tf.add_input(desc, components_) + if Tcomponents !== nothing + desc["Tcomponents"] = map(Base.identity, Tcomponents) + end + if timeout_ms !== nothing + desc["timeout_ms"] = Base.Int(timeout_ms) + end + (tf.execute(desc))[1] + end +end + + +""" + resource_sparse_apply_centered_rms_prop(var, mg, ms, mom, lr, rho, momentum, epsilon, grad, indices; use_locking=false) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function resource_sparse_apply_centered_rms_prop(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "ResourceSparseApplyCenteredRMSProp") do + desc = tf.NodeDescription("ResourceSparseApplyCenteredRMSProp") + var_ = convert(Tensor{Any}, var_) + mg_ = convert(Tensor{Any}, mg_) + ms_ = convert(Tensor{Any}, ms_) + mom_ = convert(Tensor{Any}, mom_) + lr_ = convert(Tensor{Any}, lr_) + rho_ = convert(Tensor{Any}, rho_) + momentum_ = convert(Tensor{Any}, momentum_) + epsilon_ = convert(Tensor{Any}, epsilon_) + grad_ = convert(Tensor{Any}, grad_) + indices_ = convert(Tensor{Any}, indices_) + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + (lr_, rho_, momentum_, epsilon_, grad_) = tf.tf_promote(lr_, rho_, momentum_, epsilon_, grad_) + (indices_,) = tf.tf_promote(indices_) + tf.add_input(desc, var_) + tf.add_input(desc, mg_) + tf.add_input(desc, ms_) + tf.add_input(desc, mom_) + tf.add_input(desc, lr_) + tf.add_input(desc, rho_) + tf.add_input(desc, momentum_) + tf.add_input(desc, epsilon_) + tf.add_input(desc, grad_) + tf.add_input(desc, indices_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + tf.Tensor(tf.Operation(desc)) + end + function resource_sparse_apply_centered_rms_prop(var_::tf.TensorHandle, mg_::tf.TensorHandle, ms_::tf.TensorHandle, mom_::tf.TensorHandle, lr_::tf.TensorHandle, rho_::tf.TensorHandle, momentum_::tf.TensorHandle, epsilon_::tf.TensorHandle, grad_::tf.TensorHandle, indices_::tf.TensorHandle; name=nothing, use_locking=nothing) + desc = tf.EagerOp("ResourceSparseApplyCenteredRMSProp") + tf.add_input(desc, var_) + tf.add_input(desc, mg_) + tf.add_input(desc, ms_) + tf.add_input(desc, mom_) + tf.add_input(desc, lr_) + tf.add_input(desc, rho_) + tf.add_input(desc, momentum_) + tf.add_input(desc, epsilon_) + tf.add_input(desc, grad_) + tf.add_input(desc, indices_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + desc["T"] = tf.data_type(lr_) + desc["T"] = tf.data_type(rho_) + desc["T"] = tf.data_type(momentum_) + desc["T"] = tf.data_type(epsilon_) + desc["T"] = tf.data_type(grad_) + desc["Tindices"] = tf.data_type(indices_) + (tf.execute(desc))[1] + end +end + + +""" + interleave_dataset(input_dataset, other_arguments, cycle_length, block_length) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function interleave_dataset(input_dataset_, other_arguments_, cycle_length_, block_length_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "InterleaveDataset") do + desc = tf.NodeDescription("InterleaveDataset") + input_dataset_ = convert(Tensor{Any}, input_dataset_) + other_arguments_ = [convert(Tensor{Any}, x) for x = other_arguments_] + cycle_length_ = convert(Tensor{Int64}, cycle_length_) + block_length_ = convert(Tensor{Int64}, block_length_) + tf.add_input(desc, input_dataset_) + tf.add_input(desc, other_arguments_) + tf.add_input(desc, cycle_length_) + tf.add_input(desc, block_length_) + if f !== nothing + desc["f"] = Base.identity(f) + end + if Targuments !== nothing + desc["Targuments"] = map(Base.identity, Targuments) + end + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + tf.Tensor(tf.Operation(desc)) + end + function interleave_dataset(input_dataset_::tf.TensorHandle, other_arguments_::tf.TensorHandle, cycle_length_::tf.TensorHandle, block_length_::tf.TensorHandle; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) + desc = tf.EagerOp("InterleaveDataset") + tf.add_input(desc, input_dataset_) + tf.add_input(desc, other_arguments_) + tf.add_input(desc, cycle_length_) + tf.add_input(desc, block_length_) + if f !== nothing + desc["f"] = Base.identity(f) + end + if Targuments !== nothing + desc["Targuments"] = map(Base.identity, Targuments) + end + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + (tf.execute(desc))[1] + end +end + + +""" + stack_pop(handle) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function stack_pop(handle_; name=nothing, elem_type=nothing) + local desc + tf.with_op_name(name, "StackPop") do + desc = tf.NodeDescription("StackPop") + handle_ = convert(Tensor{String}, handle_) + tf.add_input(desc, handle_) + if elem_type !== nothing + desc["elem_type"] = Base.identity(elem_type) + end + end + tf.Tensor(tf.Operation(desc)) + end + function stack_pop(handle_::tf.TensorHandle; name=nothing, elem_type=nothing) + desc = tf.EagerOp("StackPop") + tf.add_input(desc, handle_) + if elem_type !== nothing + desc["elem_type"] = Base.identity(elem_type) + end + (tf.execute(desc))[1] + end +end + + +""" + max_pool_v2(input, ksize, strides; data_format=NHWC) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function max_pool_v2(input_, ksize_, strides_; name=nothing, padding=nothing, data_format=nothing) + local desc + tf.with_op_name(name, "MaxPoolV2") do + desc = tf.NodeDescription("MaxPoolV2") + input_ = convert(Tensor{Float32}, input_) + ksize_ = convert(Tensor{Int32}, ksize_) + strides_ = convert(Tensor{Int32}, strides_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + tf.add_input(desc, ksize_) + tf.add_input(desc, strides_) + if padding !== nothing + desc["padding"] = Base.String(padding) + end + if data_format !== nothing + desc["data_format"] = Base.String(data_format) + end + end + tf.Tensor(tf.Operation(desc)) + end + function max_pool_v2(input_::tf.TensorHandle, ksize_::tf.TensorHandle, strides_::tf.TensorHandle; name=nothing, padding=nothing, data_format=nothing) + desc = tf.EagerOp("MaxPoolV2") + tf.add_input(desc, input_) + tf.add_input(desc, ksize_) + tf.add_input(desc, strides_) + if padding !== nothing + desc["padding"] = Base.String(padding) + end + if data_format !== nothing + desc["data_format"] = Base.String(data_format) + end + desc["T"] = tf.data_type(input_) + (tf.execute(desc))[1] + end +end + + +""" + boosted_trees_deserialize_ensemble(tree_ensemble_handle, stamp_token, tree_ensemble_serialized) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function boosted_trees_deserialize_ensemble(tree_ensemble_handle_, stamp_token_, tree_ensemble_serialized_; name=nothing) + local desc + tf.with_op_name(name, "BoostedTreesDeserializeEnsemble") do + desc = tf.NodeDescription("BoostedTreesDeserializeEnsemble") + tree_ensemble_handle_ = convert(Tensor{Any}, tree_ensemble_handle_) + stamp_token_ = convert(Tensor{Int64}, stamp_token_) + tree_ensemble_serialized_ = convert(Tensor{String}, tree_ensemble_serialized_) + tf.add_input(desc, tree_ensemble_handle_) + tf.add_input(desc, stamp_token_) + tf.add_input(desc, tree_ensemble_serialized_) + end + tf.Tensor(tf.Operation(desc)) + end + function boosted_trees_deserialize_ensemble(tree_ensemble_handle_::tf.TensorHandle, stamp_token_::tf.TensorHandle, tree_ensemble_serialized_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("BoostedTreesDeserializeEnsemble") + tf.add_input(desc, tree_ensemble_handle_) + tf.add_input(desc, stamp_token_) + tf.add_input(desc, tree_ensemble_serialized_) + (tf.execute(desc))[1] + end +end + + +""" + load_and_remap_matrix(ckpt_path, old_tensor_name, row_remapping, col_remapping, initializing_values; max_rows_in_memory=-1) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function load_and_remap_matrix(ckpt_path_, old_tensor_name_, row_remapping_, col_remapping_, initializing_values_; name=nothing, num_rows=nothing, num_cols=nothing, max_rows_in_memory=nothing) + local desc + tf.with_op_name(name, "LoadAndRemapMatrix") do + desc = tf.NodeDescription("LoadAndRemapMatrix") + ckpt_path_ = convert(Tensor{String}, ckpt_path_) + old_tensor_name_ = convert(Tensor{String}, old_tensor_name_) + row_remapping_ = convert(Tensor{Int64}, row_remapping_) + col_remapping_ = convert(Tensor{Int64}, col_remapping_) + initializing_values_ = convert(Tensor{Float32}, initializing_values_) + tf.add_input(desc, ckpt_path_) + tf.add_input(desc, old_tensor_name_) + tf.add_input(desc, row_remapping_) + tf.add_input(desc, col_remapping_) + tf.add_input(desc, initializing_values_) + if num_rows !== nothing + desc["num_rows"] = Base.Int(num_rows) + end + if num_cols !== nothing + desc["num_cols"] = Base.Int(num_cols) + end + if max_rows_in_memory !== nothing + desc["max_rows_in_memory"] = Base.Int(max_rows_in_memory) + end + end + tf.Tensor(tf.Operation(desc)) + end + function load_and_remap_matrix(ckpt_path_::tf.TensorHandle, old_tensor_name_::tf.TensorHandle, row_remapping_::tf.TensorHandle, col_remapping_::tf.TensorHandle, initializing_values_::tf.TensorHandle; name=nothing, num_rows=nothing, num_cols=nothing, max_rows_in_memory=nothing) + desc = tf.EagerOp("LoadAndRemapMatrix") + tf.add_input(desc, ckpt_path_) + tf.add_input(desc, old_tensor_name_) + tf.add_input(desc, row_remapping_) + tf.add_input(desc, col_remapping_) + tf.add_input(desc, initializing_values_) + if num_rows !== nothing + desc["num_rows"] = Base.Int(num_rows) + end + if num_cols !== nothing + desc["num_cols"] = Base.Int(num_cols) + end + if max_rows_in_memory !== nothing + desc["max_rows_in_memory"] = Base.Int(max_rows_in_memory) + end + (tf.execute(desc))[1] + end +end + + +""" + sparse_apply_proximal_gradient_descent(var, alpha, l1, l2, grad, indices; use_locking=false) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function sparse_apply_proximal_gradient_descent(var_, alpha_, l1_, l2_, grad_, indices_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "SparseApplyProximalGradientDescent") do + desc = tf.NodeDescription("SparseApplyProximalGradientDescent") + var_ = convert(Tensor{Any}, var_) + alpha_ = convert(Tensor{Any}, alpha_) + l1_ = convert(Tensor{Any}, l1_) + l2_ = convert(Tensor{Any}, l2_) + grad_ = convert(Tensor{Any}, grad_) + indices_ = convert(Tensor{Any}, indices_) + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + (var_, alpha_, l1_, l2_, grad_) = tf.tf_promote(var_, alpha_, l1_, l2_, grad_) + (indices_,) = tf.tf_promote(indices_) + tf.add_input(desc, var_) + tf.add_input(desc, alpha_) + tf.add_input(desc, l1_) + tf.add_input(desc, l2_) + tf.add_input(desc, grad_) + tf.add_input(desc, indices_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + tf.Tensor(tf.Operation(desc)) + end + function sparse_apply_proximal_gradient_descent(var_::tf.TensorHandle, alpha_::tf.TensorHandle, l1_::tf.TensorHandle, l2_::tf.TensorHandle, grad_::tf.TensorHandle, indices_::tf.TensorHandle; name=nothing, use_locking=nothing) + desc = tf.EagerOp("SparseApplyProximalGradientDescent") + tf.add_input(desc, var_) + tf.add_input(desc, alpha_) + tf.add_input(desc, l1_) + tf.add_input(desc, l2_) + tf.add_input(desc, grad_) + tf.add_input(desc, indices_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + desc["T"] = tf.data_type(var_) + desc["T"] = tf.data_type(alpha_) + desc["T"] = tf.data_type(l1_) + desc["T"] = tf.data_type(l2_) + desc["T"] = tf.data_type(grad_) + desc["Tindices"] = tf.data_type(indices_) + (tf.execute(desc))[1] + end +end + + +""" + py_func_stateless(input) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function py_func_stateless(input_; name=nothing, token=nothing, Tin=nothing, Tout=nothing) + local desc + tf.with_op_name(name, "PyFuncStateless") do + desc = tf.NodeDescription("PyFuncStateless") + input_ = [convert(Tensor{Any}, x) for x = input_] + tf.add_input(desc, input_) + if token !== nothing + desc["token"] = Base.String(token) + end + if Tin !== nothing + desc["Tin"] = map(Base.identity, Tin) + end + if Tout !== nothing + desc["Tout"] = map(Base.identity, Tout) + end + end + tf.Tensor(tf.Operation(desc)) + end + function py_func_stateless(input_::tf.TensorHandle; name=nothing, token=nothing, Tin=nothing, Tout=nothing) + desc = tf.EagerOp("PyFuncStateless") + tf.add_input(desc, input_) + if token !== nothing + desc["token"] = Base.String(token) + end + if Tin !== nothing + desc["Tin"] = map(Base.identity, Tin) + end + if Tout !== nothing + desc["Tout"] = map(Base.identity, Tout) + end + (tf.execute(desc))[1] + end +end + + +""" + where(input) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function where(input_; name=nothing) + local desc + tf.with_op_name(name, "Where") do + desc = tf.NodeDescription("Where") + input_ = convert(Tensor{Bool}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + end + tf.Tensor(tf.Operation(desc)) + end + function where(input_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("Where") + tf.add_input(desc, input_) + desc["T"] = tf.data_type(input_) + (tf.execute(desc))[1] + end +end + + +""" + mfcc(spectrogram, sample_rate; upper_frequency_limit=?, lower_frequency_limit=?, filterbank_channel_count=40, dct_coefficient_count=13) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function mfcc(spectrogram_, sample_rate_; name=nothing, upper_frequency_limit=nothing, lower_frequency_limit=nothing, filterbank_channel_count=nothing, dct_coefficient_count=nothing) + local desc + tf.with_op_name(name, "Mfcc") do + desc = tf.NodeDescription("Mfcc") + spectrogram_ = convert(Tensor{Float32}, spectrogram_) + sample_rate_ = convert(Tensor{Int32}, sample_rate_) + tf.add_input(desc, spectrogram_) + tf.add_input(desc, sample_rate_) + if upper_frequency_limit !== nothing + desc["upper_frequency_limit"] = Base.identity(upper_frequency_limit) + end + if lower_frequency_limit !== nothing + desc["lower_frequency_limit"] = Base.identity(lower_frequency_limit) + end + if filterbank_channel_count !== nothing + desc["filterbank_channel_count"] = Base.Int(filterbank_channel_count) + end + if dct_coefficient_count !== nothing + desc["dct_coefficient_count"] = Base.Int(dct_coefficient_count) + end + end + tf.Tensor(tf.Operation(desc)) + end + function mfcc(spectrogram_::tf.TensorHandle, sample_rate_::tf.TensorHandle; name=nothing, upper_frequency_limit=nothing, lower_frequency_limit=nothing, filterbank_channel_count=nothing, dct_coefficient_count=nothing) + desc = tf.EagerOp("Mfcc") + tf.add_input(desc, spectrogram_) + tf.add_input(desc, sample_rate_) + if upper_frequency_limit !== nothing + desc["upper_frequency_limit"] = Base.identity(upper_frequency_limit) + end + if lower_frequency_limit !== nothing + desc["lower_frequency_limit"] = Base.identity(lower_frequency_limit) + end + if filterbank_channel_count !== nothing + desc["filterbank_channel_count"] = Base.Int(filterbank_channel_count) + end + if dct_coefficient_count !== nothing + desc["dct_coefficient_count"] = Base.Int(dct_coefficient_count) + end + (tf.execute(desc))[1] + end +end + + +""" + check_numerics(tensor) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function check_numerics(tensor_; name=nothing, message=nothing) + local desc + tf.with_op_name(name, "CheckNumerics") do + desc = tf.NodeDescription("CheckNumerics") + tensor_ = convert(Tensor{Any}, tensor_) + (tensor_,) = tf.tf_promote(tensor_) + tf.add_input(desc, tensor_) + if message !== nothing + desc["message"] = Base.String(message) + end + end + tf.Tensor(tf.Operation(desc)) + end + function check_numerics(tensor_::tf.TensorHandle; name=nothing, message=nothing) + desc = tf.EagerOp("CheckNumerics") + tf.add_input(desc, tensor_) + if message !== nothing + desc["message"] = Base.String(message) + end + desc["T"] = tf.data_type(tensor_) + (tf.execute(desc))[1] + end +end + + +""" + tpu_compilation_result() + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tpu_compilation_result(; name=nothing) + local desc + tf.with_op_name(name, "TPUCompilationResult") do + desc + tf.NodeDescription("TPUCompilationResult") + end + tf.Tensor(tf.Operation(desc)) + end + function tpu_compilation_result(; name=nothing) + desc = tf.EagerOp("TPUCompilationResult") + (tf.execute(desc))[1] + end +end + + +""" + retrieve_tpu_embedding_stochastic_gradient_descent_parameters(; table_id=-1, table_name=) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function retrieve_tpu_embedding_stochastic_gradient_descent_parameters(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + local desc + tf.with_op_name(name, "RetrieveTPUEmbeddingStochasticGradientDescentParameters") do + desc = tf.NodeDescription("RetrieveTPUEmbeddingStochasticGradientDescentParameters") + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + end + tf.Tensor(tf.Operation(desc)) + end + function retrieve_tpu_embedding_stochastic_gradient_descent_parameters(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + desc = tf.EagerOp("RetrieveTPUEmbeddingStochasticGradientDescentParameters") + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + (tf.execute(desc))[1] + end +end + + +""" + sparse_segment_mean_grad(grad, indices, segment_ids, output_dim0) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function sparse_segment_mean_grad(grad_, indices_, segment_ids_, output_dim0_; name=nothing) + local desc + tf.with_op_name(name, "SparseSegmentMeanGrad") do + desc = tf.NodeDescription("SparseSegmentMeanGrad") + grad_ = convert(Tensor{Any}, grad_) + indices_ = convert(Tensor{Int32}, indices_) + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + segment_ids_ = convert(Tensor{Int32}, segment_ids_) + output_dim0_ = convert(Tensor{Int32}, output_dim0_) + (grad_,) = tf.tf_promote(grad_) + (indices_,) = tf.tf_promote(indices_) + tf.add_input(desc, grad_) + tf.add_input(desc, indices_) + tf.add_input(desc, segment_ids_) + tf.add_input(desc, output_dim0_) + end + tf.Tensor(tf.Operation(desc)) + end + function sparse_segment_mean_grad(grad_::tf.TensorHandle, indices_::tf.TensorHandle, segment_ids_::tf.TensorHandle, output_dim0_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("SparseSegmentMeanGrad") + tf.add_input(desc, grad_) + tf.add_input(desc, indices_) + tf.add_input(desc, segment_ids_) + tf.add_input(desc, output_dim0_) + desc["T"] = tf.data_type(grad_) + desc["Tidx"] = tf.data_type(indices_) + (tf.execute(desc))[1] + end +end + + +""" + try_rpc(address, method, request; protocol=, fail_fast=true, timeout_in_ms=0) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function try_rpc(address_, method_, request_; name=nothing, protocol=nothing, fail_fast=nothing, timeout_in_ms=nothing) + local desc + tf.with_op_name(name, "TryRpc") do + desc = tf.NodeDescription("TryRpc") + address_ = convert(Tensor{String}, address_) + method_ = convert(Tensor{String}, method_) + request_ = convert(Tensor{String}, request_) + tf.add_input(desc, address_) + tf.add_input(desc, method_) + tf.add_input(desc, request_) + if protocol !== nothing + desc["protocol"] = Base.String(protocol) + end + if fail_fast !== nothing + desc["fail_fast"] = Base.Bool(fail_fast) + end + if timeout_in_ms !== nothing + desc["timeout_in_ms"] = Base.Int(timeout_in_ms) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function try_rpc(address_::tf.TensorHandle, method_::tf.TensorHandle, request_::tf.TensorHandle; name=nothing, protocol=nothing, fail_fast=nothing, timeout_in_ms=nothing) + desc = tf.EagerOp("TryRpc") + tf.add_input(desc, address_) + tf.add_input(desc, method_) + tf.add_input(desc, request_) + if protocol !== nothing + desc["protocol"] = Base.String(protocol) + end + if fail_fast !== nothing + desc["fail_fast"] = Base.Bool(fail_fast) + end + if timeout_in_ms !== nothing + desc["timeout_in_ms"] = Base.Int(timeout_in_ms) + end + tf.execute(desc) + end +end + + +""" + batch_matrix_triangular_solve(matrix, rhs; lower=true, adjoint=false) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function batch_matrix_triangular_solve(matrix_, rhs_; name=nothing, lower=nothing, adjoint=nothing) + local desc + tf.with_op_name(name, "BatchMatrixTriangularSolve") do + desc = tf.NodeDescription("BatchMatrixTriangularSolve") + matrix_ = convert(Tensor{Any}, matrix_) + rhs_ = convert(Tensor{Any}, rhs_) + (matrix_, rhs_) = tf.tf_promote(matrix_, rhs_) + tf.add_input(desc, matrix_) + tf.add_input(desc, rhs_) + if lower !== nothing + desc["lower"] = Base.Bool(lower) + end + if adjoint !== nothing + desc["adjoint"] = Base.Bool(adjoint) + end + end + tf.Tensor(tf.Operation(desc)) + end + function batch_matrix_triangular_solve(matrix_::tf.TensorHandle, rhs_::tf.TensorHandle; name=nothing, lower=nothing, adjoint=nothing) + desc = tf.EagerOp("BatchMatrixTriangularSolve") + tf.add_input(desc, matrix_) + tf.add_input(desc, rhs_) + if lower !== nothing + desc["lower"] = Base.Bool(lower) + end + if adjoint !== nothing + desc["adjoint"] = Base.Bool(adjoint) + end + desc["T"] = tf.data_type(matrix_) + desc["T"] = tf.data_type(rhs_) + (tf.execute(desc))[1] + end +end + + +""" + _retval(input) + +A graph node which represents a return value of a function. +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function _retval(input_; name=nothing, index=nothing) + local desc + tf.with_op_name(name, "_Retval") do + desc = tf.NodeDescription("_Retval") + input_ = convert(Tensor{Any}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + if index !== nothing + desc["index"] = Base.Int(index) + end + end + tf.Tensor(tf.Operation(desc)) + end + function _retval(input_::tf.TensorHandle; name=nothing, index=nothing) + desc = tf.EagerOp("_Retval") + tf.add_input(desc, input_) + if index !== nothing + desc["index"] = Base.Int(index) + end + desc["T"] = tf.data_type(input_) + (tf.execute(desc))[1] + end +end + + +""" + unique_with_counts(x; out_idx=Int32) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function unique_with_counts(x_; name=nothing, out_idx=nothing) + local desc + tf.with_op_name(name, "UniqueWithCounts") do + desc = tf.NodeDescription("UniqueWithCounts") + x_ = convert(Tensor{Any}, x_) + (x_,) = tf.tf_promote(x_) + tf.add_input(desc, x_) + if out_idx !== nothing + desc["out_idx"] = Base.identity(out_idx) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function unique_with_counts(x_::tf.TensorHandle; name=nothing, out_idx=nothing) + desc = tf.EagerOp("UniqueWithCounts") + tf.add_input(desc, x_) + if out_idx !== nothing + desc["out_idx"] = Base.identity(out_idx) + end + desc["T"] = tf.data_type(x_) + tf.execute(desc) + end +end + + +""" + add(x, y) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function add(x_, y_; name=nothing) + local desc + tf.with_op_name(name, "Add") do + desc = tf.NodeDescription("Add") + x_ = convert(Tensor{Any}, x_) + y_ = convert(Tensor{Any}, y_) + (x_, y_) = tf.tf_promote(x_, y_) + tf.add_input(desc, x_) + tf.add_input(desc, y_) + end + tf.Tensor(tf.Operation(desc)) + end + function add(x_::tf.TensorHandle, y_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("Add") + tf.add_input(desc, x_) + tf.add_input(desc, y_) + desc["T"] = tf.data_type(x_) + desc["T"] = tf.data_type(y_) + (tf.execute(desc))[1] + end +end + + +""" + experimental_scan_dataset(input_dataset, initial_state, other_arguments; preserve_cardinality=false) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function experimental_scan_dataset(input_dataset_, initial_state_, other_arguments_; name=nothing, f=nothing, Tstate=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, preserve_cardinality=nothing) + local desc + tf.with_op_name(name, "ExperimentalScanDataset") do + desc = tf.NodeDescription("ExperimentalScanDataset") + input_dataset_ = convert(Tensor{Any}, input_dataset_) + initial_state_ = [convert(Tensor{Any}, x) for x = initial_state_] + other_arguments_ = [convert(Tensor{Any}, x) for x = other_arguments_] + tf.add_input(desc, input_dataset_) + tf.add_input(desc, initial_state_) + tf.add_input(desc, other_arguments_) + if f !== nothing + desc["f"] = Base.identity(f) + end + if Tstate !== nothing + desc["Tstate"] = map(Base.identity, Tstate) + end + if Targuments !== nothing + desc["Targuments"] = map(Base.identity, Targuments) + end + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + if preserve_cardinality !== nothing + desc["preserve_cardinality"] = Base.Bool(preserve_cardinality) + end + end + tf.Tensor(tf.Operation(desc)) + end + function experimental_scan_dataset(input_dataset_::tf.TensorHandle, initial_state_::tf.TensorHandle, other_arguments_::tf.TensorHandle; name=nothing, f=nothing, Tstate=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, preserve_cardinality=nothing) + desc = tf.EagerOp("ExperimentalScanDataset") + tf.add_input(desc, input_dataset_) + tf.add_input(desc, initial_state_) + tf.add_input(desc, other_arguments_) + if f !== nothing + desc["f"] = Base.identity(f) + end + if Tstate !== nothing + desc["Tstate"] = map(Base.identity, Tstate) + end + if Targuments !== nothing + desc["Targuments"] = map(Base.identity, Targuments) + end + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + if preserve_cardinality !== nothing + desc["preserve_cardinality"] = Base.Bool(preserve_cardinality) + end + (tf.execute(desc))[1] + end +end + + +""" + assign_add_variable_op(resource, value) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function assign_add_variable_op(resource_, value_; name=nothing, dtype=nothing) + local desc + tf.with_op_name(name, "AssignAddVariableOp") do + desc = tf.NodeDescription("AssignAddVariableOp") + resource_ = convert(Tensor{Any}, resource_) + value_ = convert(Tensor{Any}, value_) + (value_,) = tf.tf_promote(value_) + tf.add_input(desc, resource_) + tf.add_input(desc, value_) + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + tf.Tensor(tf.Operation(desc)) + end + function assign_add_variable_op(resource_::tf.TensorHandle, value_::tf.TensorHandle; name=nothing, dtype=nothing) + desc = tf.EagerOp("AssignAddVariableOp") + tf.add_input(desc, resource_) + tf.add_input(desc, value_) + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + desc["dtype"] = tf.data_type(value_) + (tf.execute(desc))[1] + end +end + + +""" + split_v(value, size_splits, split_dim) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function split_v(value_, size_splits_, split_dim_; name=nothing, num_split=nothing) + local desc + tf.with_op_name(name, "SplitV") do + desc = tf.NodeDescription("SplitV") + value_ = convert(Tensor{Any}, value_) + size_splits_ = convert(Tensor{Int64}, size_splits_) + split_dim_ = convert(Tensor{Int32}, split_dim_) + split_dim_ = split_dim_ - convert(tf.Tensor{eltype(split_dim_)}, 1) + (value_,) = tf.tf_promote(value_) + (size_splits_,) = tf.tf_promote(size_splits_) + tf.add_input(desc, value_) + tf.add_input(desc, size_splits_) + tf.add_input(desc, split_dim_) + if num_split !== nothing + desc["num_split"] = Base.Int(num_split) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:num_split + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function split_v(value_::tf.TensorHandle, size_splits_::tf.TensorHandle, split_dim_::tf.TensorHandle; name=nothing, num_split=nothing) + desc = tf.EagerOp("SplitV") + tf.add_input(desc, value_) + tf.add_input(desc, size_splits_) + tf.add_input(desc, split_dim_) + if num_split !== nothing + desc["num_split"] = Base.Int(num_split) + end + desc["T"] = tf.data_type(value_) + desc["Tlen"] = tf.data_type(size_splits_) + tf.execute(desc) + end +end + + +""" + assign(ref, value; validate_shape=true, use_locking=true) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function assign(ref_, value_; name=nothing, validate_shape=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "Assign") do + desc = tf.NodeDescription("Assign") + ref_ = convert(Tensor{Any}, ref_) + value_ = convert(Tensor{Any}, value_) + (ref_, value_) = tf.tf_promote(ref_, value_) + tf.add_input(desc, ref_) + tf.add_input(desc, value_) + if validate_shape !== nothing + desc["validate_shape"] = Base.Bool(validate_shape) + end + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + tf.Tensor(tf.Operation(desc)) + end + function assign(ref_::tf.TensorHandle, value_::tf.TensorHandle; name=nothing, validate_shape=nothing, use_locking=nothing) + desc = tf.EagerOp("Assign") + tf.add_input(desc, ref_) + tf.add_input(desc, value_) + if validate_shape !== nothing + desc["validate_shape"] = Base.Bool(validate_shape) + end + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + desc["T"] = tf.data_type(ref_) + desc["T"] = tf.data_type(value_) + (tf.execute(desc))[1] + end +end + + +""" + max_pool_with_argmax(input) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function max_pool_with_argmax(input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing) + local desc + tf.with_op_name(name, "MaxPoolWithArgmax") do + desc = tf.NodeDescription("MaxPoolWithArgmax") + input_ = convert(Tensor{Any}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + if ksize !== nothing + desc["ksize"] = map(Base.identity, ksize) + end + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + if padding !== nothing + desc["padding"] = Base.String(padding) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function max_pool_with_argmax(input_::tf.TensorHandle; name=nothing, ksize=nothing, strides=nothing, padding=nothing) + desc = tf.EagerOp("MaxPoolWithArgmax") + tf.add_input(desc, input_) + if ksize !== nothing + desc["ksize"] = map(Base.identity, ksize) + end + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + if padding !== nothing + desc["padding"] = Base.String(padding) + end + desc["T"] = tf.data_type(input_) + tf.execute(desc) + end +end + + +""" + quantized_relu_x(features, max_value, min_features, max_features; out_type=Float32) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function quantized_relu_x(features_, max_value_, min_features_, max_features_; name=nothing, out_type=nothing) + local desc + tf.with_op_name(name, "QuantizedReluX") do + desc = tf.NodeDescription("QuantizedReluX") + features_ = convert(Tensor{Any}, features_) + max_value_ = convert(Tensor{Float32}, max_value_) + min_features_ = convert(Tensor{Float32}, min_features_) + max_features_ = convert(Tensor{Float32}, max_features_) + (features_,) = tf.tf_promote(features_) + tf.add_input(desc, features_) + tf.add_input(desc, max_value_) + tf.add_input(desc, min_features_) + tf.add_input(desc, max_features_) + if out_type !== nothing + desc["out_type"] = Base.identity(out_type) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function quantized_relu_x(features_::tf.TensorHandle, max_value_::tf.TensorHandle, min_features_::tf.TensorHandle, max_features_::tf.TensorHandle; name=nothing, out_type=nothing) + desc = tf.EagerOp("QuantizedReluX") + tf.add_input(desc, features_) + tf.add_input(desc, max_value_) + tf.add_input(desc, min_features_) + tf.add_input(desc, max_features_) + if out_type !== nothing + desc["out_type"] = Base.identity(out_type) + end + desc["Tinput"] = tf.data_type(features_) + tf.execute(desc) + end +end + + +""" + random_shuffle_queue(; shapes=Int64[], capacity=-1, min_after_dequeue=0, seed=0, seed2=0, container=, shared_name=) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function random_shuffle_queue(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, min_after_dequeue=nothing, seed=nothing, seed2=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "RandomShuffleQueue") do + desc = tf.NodeDescription("RandomShuffleQueue") + if component_types !== nothing + desc["component_types"] = map(Base.identity, component_types) + end + if shapes !== nothing + desc["shapes"] = map(Base.identity, shapes) + end + if capacity !== nothing + desc["capacity"] = Base.Int(capacity) + end + if min_after_dequeue !== nothing + desc["min_after_dequeue"] = Base.Int(min_after_dequeue) + end + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + tf.Tensor(tf.Operation(desc)) + end + function random_shuffle_queue(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, min_after_dequeue=nothing, seed=nothing, seed2=nothing, container=nothing, shared_name=nothing) + desc = tf.EagerOp("RandomShuffleQueue") + if component_types !== nothing + desc["component_types"] = map(Base.identity, component_types) + end + if shapes !== nothing + desc["shapes"] = map(Base.identity, shapes) + end + if capacity !== nothing + desc["capacity"] = Base.Int(capacity) + end + if min_after_dequeue !== nothing + desc["min_after_dequeue"] = Base.Int(min_after_dequeue) + end + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + (tf.execute(desc))[1] + end +end + + +""" + fft2d(input) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function fft2d(input_; name=nothing) + local desc + tf.with_op_name(name, "FFT2D") do + desc = tf.NodeDescription("FFT2D") + input_ = convert(Tensor{Complex{Float32}}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + end + tf.Tensor(tf.Operation(desc)) + end + function fft2d(input_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("FFT2D") + tf.add_input(desc, input_) + desc["Tcomplex"] = tf.data_type(input_) + (tf.execute(desc))[1] + end +end + + +""" + experimental_thread_pool_dataset(input_dataset, thread_pool) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function experimental_thread_pool_dataset(input_dataset_, thread_pool_; name=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "ExperimentalThreadPoolDataset") do + desc = tf.NodeDescription("ExperimentalThreadPoolDataset") + input_dataset_ = convert(Tensor{Any}, input_dataset_) + thread_pool_ = convert(Tensor{Any}, thread_pool_) + tf.add_input(desc, input_dataset_) + tf.add_input(desc, thread_pool_) + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + tf.Tensor(tf.Operation(desc)) + end + function experimental_thread_pool_dataset(input_dataset_::tf.TensorHandle, thread_pool_::tf.TensorHandle; name=nothing, output_types=nothing, output_shapes=nothing) + desc = tf.EagerOp("ExperimentalThreadPoolDataset") + tf.add_input(desc, input_dataset_) + tf.add_input(desc, thread_pool_) + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + (tf.execute(desc))[1] + end +end + + +""" + experimental_directed_interleave_dataset(selector_input_dataset, data_input_datasets) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function experimental_directed_interleave_dataset(selector_input_dataset_, data_input_datasets_; name=nothing, output_types=nothing, output_shapes=nothing, N=nothing) + local desc + tf.with_op_name(name, "ExperimentalDirectedInterleaveDataset") do + desc = tf.NodeDescription("ExperimentalDirectedInterleaveDataset") + selector_input_dataset_ = convert(Tensor{Any}, selector_input_dataset_) + data_input_datasets_ = [convert(Tensor{Any}, x) for x = data_input_datasets_] + tf.add_input(desc, selector_input_dataset_) + tf.add_input(desc, data_input_datasets_) + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + if N !== nothing + desc["N"] = Base.Int(N) + end + end + tf.Tensor(tf.Operation(desc)) + end + function experimental_directed_interleave_dataset(selector_input_dataset_::tf.TensorHandle, data_input_datasets_::tf.TensorHandle; name=nothing, output_types=nothing, output_shapes=nothing, N=nothing) + desc = tf.EagerOp("ExperimentalDirectedInterleaveDataset") + tf.add_input(desc, selector_input_dataset_) + tf.add_input(desc, data_input_datasets_) + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + if N !== nothing + desc["N"] = Base.Int(N) + end + (tf.execute(desc))[1] + end +end + + +""" + sparse_segment_sqrt_n_grad(grad, indices, segment_ids, output_dim0) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function sparse_segment_sqrt_n_grad(grad_, indices_, segment_ids_, output_dim0_; name=nothing) + local desc + tf.with_op_name(name, "SparseSegmentSqrtNGrad") do + desc = tf.NodeDescription("SparseSegmentSqrtNGrad") + grad_ = convert(Tensor{Any}, grad_) + indices_ = convert(Tensor{Int32}, indices_) + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + segment_ids_ = convert(Tensor{Int32}, segment_ids_) + output_dim0_ = convert(Tensor{Int32}, output_dim0_) + (grad_,) = tf.tf_promote(grad_) + (indices_,) = tf.tf_promote(indices_) + tf.add_input(desc, grad_) + tf.add_input(desc, indices_) + tf.add_input(desc, segment_ids_) + tf.add_input(desc, output_dim0_) + end + tf.Tensor(tf.Operation(desc)) + end + function sparse_segment_sqrt_n_grad(grad_::tf.TensorHandle, indices_::tf.TensorHandle, segment_ids_::tf.TensorHandle, output_dim0_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("SparseSegmentSqrtNGrad") + tf.add_input(desc, grad_) + tf.add_input(desc, indices_) + tf.add_input(desc, segment_ids_) + tf.add_input(desc, output_dim0_) + desc["T"] = tf.data_type(grad_) + desc["Tidx"] = tf.data_type(indices_) + (tf.execute(desc))[1] + end +end + + +""" + real(input) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function real(input_; name=nothing) + local desc + tf.with_op_name(name, "Real") do + desc = tf.NodeDescription("Real") + input_ = convert(Tensor{Complex{Float32}}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + end + tf.Tensor(tf.Operation(desc)) + end + function real(input_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("Real") + tf.add_input(desc, input_) + desc["T"] = tf.data_type(input_) + (tf.execute(desc))[1] + end +end + + +""" + ordered_map_unstage(key, indices; capacity=0, memory_limit=0, container=, shared_name=) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function ordered_map_unstage(key_, indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "OrderedMapUnstage") do + desc = tf.NodeDescription("OrderedMapUnstage") + key_ = convert(Tensor{Int64}, key_) + indices_ = convert(Tensor{Int32}, indices_) + tf.add_input(desc, key_) + tf.add_input(desc, indices_) + if capacity !== nothing + desc["capacity"] = Base.Int(capacity) + end + if memory_limit !== nothing + desc["memory_limit"] = Base.Int(memory_limit) + end + if dtypes !== nothing + desc["dtypes"] = map(Base.identity, dtypes) + end + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + tf.Tensor(tf.Operation(desc)) + end + function ordered_map_unstage(key_::tf.TensorHandle, indices_::tf.TensorHandle; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + desc = tf.EagerOp("OrderedMapUnstage") + tf.add_input(desc, key_) + tf.add_input(desc, indices_) + if capacity !== nothing + desc["capacity"] = Base.Int(capacity) + end + if memory_limit !== nothing + desc["memory_limit"] = Base.Int(memory_limit) + end + if dtypes !== nothing + desc["dtypes"] = map(Base.identity, dtypes) + end + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + (tf.execute(desc))[1] + end +end + + +""" + rfft2d(input, fft_length) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function rfft2d(input_, fft_length_; name=nothing) + local desc + tf.with_op_name(name, "RFFT2D") do + desc = tf.NodeDescription("RFFT2D") + input_ = convert(Tensor{Float32}, input_) + fft_length_ = convert(Tensor{Int32}, fft_length_) + tf.add_input(desc, input_) + tf.add_input(desc, fft_length_) + end + tf.Tensor(tf.Operation(desc)) + end + function rfft2d(input_::tf.TensorHandle, fft_length_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("RFFT2D") + tf.add_input(desc, input_) + tf.add_input(desc, fft_length_) + (tf.execute(desc))[1] + end +end + + +""" + var_is_initialized_op(resource) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function var_is_initialized_op(resource_; name=nothing) + local desc + tf.with_op_name(name, "VarIsInitializedOp") do + desc = tf.NodeDescription("VarIsInitializedOp") + resource_ = convert(Tensor{Any}, resource_) + tf.add_input(desc, resource_) + end + tf.Tensor(tf.Operation(desc)) + end + function var_is_initialized_op(resource_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("VarIsInitializedOp") + tf.add_input(desc, resource_) + (tf.execute(desc))[1] + end +end + + +""" + boosted_trees_quantile_stream_resource_handle_op(; container=, shared_name=) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function boosted_trees_quantile_stream_resource_handle_op(; name=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "BoostedTreesQuantileStreamResourceHandleOp") do + desc = tf.NodeDescription("BoostedTreesQuantileStreamResourceHandleOp") + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + tf.Tensor(tf.Operation(desc)) + end + function boosted_trees_quantile_stream_resource_handle_op(; name=nothing, container=nothing, shared_name=nothing) + desc = tf.EagerOp("BoostedTreesQuantileStreamResourceHandleOp") + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + (tf.execute(desc))[1] + end +end + + +""" + atan2(y, x) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function atan2(y_, x_; name=nothing) + local desc + tf.with_op_name(name, "Atan2") do + desc = tf.NodeDescription("Atan2") + y_ = convert(Tensor{Any}, y_) + x_ = convert(Tensor{Any}, x_) + (y_, x_) = tf.tf_promote(y_, x_) + tf.add_input(desc, y_) + tf.add_input(desc, x_) + end + tf.Tensor(tf.Operation(desc)) + end + function atan2(y_::tf.TensorHandle, x_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("Atan2") + tf.add_input(desc, y_) + tf.add_input(desc, x_) + desc["T"] = tf.data_type(y_) + desc["T"] = tf.data_type(x_) + (tf.execute(desc))[1] + end +end + + +""" + random_poisson(shape, rate; seed=0, seed2=0) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function random_poisson(shape_, rate_; name=nothing, seed=nothing, seed2=nothing, S=nothing, dtype=nothing) + local desc + tf.with_op_name(name, "RandomPoisson") do + desc = tf.NodeDescription("RandomPoisson") + shape_ = convert(Tensor{Any}, shape_) + rate_ = convert(Tensor{Any}, rate_) + (rate_,) = tf.tf_promote(rate_) + (shape_,) = tf.tf_promote(shape_) + tf.add_input(desc, shape_) + tf.add_input(desc, rate_) + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end + if S !== nothing + desc["S"] = Base.identity(S) + end + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + tf.Tensor(tf.Operation(desc)) + end + function random_poisson(shape_::tf.TensorHandle, rate_::tf.TensorHandle; name=nothing, seed=nothing, seed2=nothing, S=nothing, dtype=nothing) + desc = tf.EagerOp("RandomPoisson") + tf.add_input(desc, shape_) + tf.add_input(desc, rate_) + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end + if S !== nothing + desc["S"] = Base.identity(S) + end + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + desc["S"] = tf.data_type(shape_) + desc["dtype"] = tf.data_type(rate_) + (tf.execute(desc))[1] + end +end + + +""" + reverse_sequence(input, seq_lengths; batch_dim=0) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function reverse_sequence(input_, seq_lengths_; name=nothing, seq_dim=nothing, batch_dim=nothing) + local desc + tf.with_op_name(name, "ReverseSequence") do + desc = tf.NodeDescription("ReverseSequence") + input_ = convert(Tensor{Any}, input_) + seq_lengths_ = convert(Tensor{Int64}, seq_lengths_) + (input_,) = tf.tf_promote(input_) + (seq_lengths_,) = tf.tf_promote(seq_lengths_) + tf.add_input(desc, input_) + tf.add_input(desc, seq_lengths_) + if seq_dim !== nothing + desc["seq_dim"] = Base.Int(seq_dim) + end + if batch_dim !== nothing + desc["batch_dim"] = Base.Int(batch_dim) + end + end + tf.Tensor(tf.Operation(desc)) + end + function reverse_sequence(input_::tf.TensorHandle, seq_lengths_::tf.TensorHandle; name=nothing, seq_dim=nothing, batch_dim=nothing) + desc = tf.EagerOp("ReverseSequence") + tf.add_input(desc, input_) + tf.add_input(desc, seq_lengths_) + if seq_dim !== nothing + desc["seq_dim"] = Base.Int(seq_dim) + end + if batch_dim !== nothing + desc["batch_dim"] = Base.Int(batch_dim) + end + desc["T"] = tf.data_type(input_) + desc["Tlen"] = tf.data_type(seq_lengths_) + (tf.execute(desc))[1] + end +end + + +""" + outfeed_enqueue(input) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function outfeed_enqueue(input_; name=nothing, dtype=nothing) + local desc + tf.with_op_name(name, "OutfeedEnqueue") do + desc = tf.NodeDescription("OutfeedEnqueue") + input_ = convert(Tensor{Any}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + tf.Tensor(tf.Operation(desc)) + end + function outfeed_enqueue(input_::tf.TensorHandle; name=nothing, dtype=nothing) + desc = tf.EagerOp("OutfeedEnqueue") + tf.add_input(desc, input_) + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + desc["dtype"] = tf.data_type(input_) + (tf.execute(desc))[1] + end +end + + +""" + sub(x, y) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function sub(x_, y_; name=nothing) + local desc + tf.with_op_name(name, "Sub") do + desc = tf.NodeDescription("Sub") + x_ = convert(Tensor{Any}, x_) + y_ = convert(Tensor{Any}, y_) + (x_, y_) = tf.tf_promote(x_, y_) + tf.add_input(desc, x_) + tf.add_input(desc, y_) + end + tf.Tensor(tf.Operation(desc)) + end + function sub(x_::tf.TensorHandle, y_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("Sub") + tf.add_input(desc, x_) + tf.add_input(desc, y_) + desc["T"] = tf.data_type(x_) + desc["T"] = tf.data_type(y_) + (tf.execute(desc))[1] + end +end + + +""" + string_split(input, delimiter; skip_empty=true) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function string_split(input_, delimiter_; name=nothing, skip_empty=nothing) + local desc + tf.with_op_name(name, "StringSplit") do + desc = tf.NodeDescription("StringSplit") + input_ = convert(Tensor{String}, input_) + delimiter_ = convert(Tensor{String}, delimiter_) + tf.add_input(desc, input_) + tf.add_input(desc, delimiter_) + if skip_empty !== nothing + desc["skip_empty"] = Base.Bool(skip_empty) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function string_split(input_::tf.TensorHandle, delimiter_::tf.TensorHandle; name=nothing, skip_empty=nothing) + desc = tf.EagerOp("StringSplit") + tf.add_input(desc, input_) + tf.add_input(desc, delimiter_) + if skip_empty !== nothing + desc["skip_empty"] = Base.Bool(skip_empty) + end + tf.execute(desc) + end +end + + +""" + cumprod(x, axis; exclusive=false, reverse=false) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function cumprod(x_, axis_; name=nothing, exclusive=nothing, reverse=nothing) + local desc + tf.with_op_name(name, "Cumprod") do + desc = tf.NodeDescription("Cumprod") + x_ = convert(Tensor{Any}, x_) + axis_ = convert(Tensor{Int32}, axis_) + axis_ = axis_ - convert(tf.Tensor{eltype(axis_)}, 1) + (x_,) = tf.tf_promote(x_) + (axis_,) = tf.tf_promote(axis_) + tf.add_input(desc, x_) + tf.add_input(desc, axis_) + if exclusive !== nothing + desc["exclusive"] = Base.Bool(exclusive) + end + if reverse !== nothing + desc["reverse"] = Base.Bool(reverse) + end + end + tf.Tensor(tf.Operation(desc)) + end + function cumprod(x_::tf.TensorHandle, axis_::tf.TensorHandle; name=nothing, exclusive=nothing, reverse=nothing) + desc = tf.EagerOp("Cumprod") + tf.add_input(desc, x_) + tf.add_input(desc, axis_) + if exclusive !== nothing + desc["exclusive"] = Base.Bool(exclusive) + end + if reverse !== nothing + desc["reverse"] = Base.Bool(reverse) + end + desc["T"] = tf.data_type(x_) + desc["Tidx"] = tf.data_type(axis_) + (tf.execute(desc))[1] + end +end + + +""" + quantized_resize_bilinear(images, size, min, max; align_corners=false) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function quantized_resize_bilinear(images_, size_, min_, max_; name=nothing, align_corners=nothing) + local desc + tf.with_op_name(name, "QuantizedResizeBilinear") do + desc = tf.NodeDescription("QuantizedResizeBilinear") + images_ = convert(Tensor{Any}, images_) + size_ = convert(Tensor{Int32}, size_) + min_ = convert(Tensor{Float32}, min_) + max_ = convert(Tensor{Float32}, max_) + (images_,) = tf.tf_promote(images_) + tf.add_input(desc, images_) + tf.add_input(desc, size_) + tf.add_input(desc, min_) + tf.add_input(desc, max_) + if align_corners !== nothing + desc["align_corners"] = Base.Bool(align_corners) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function quantized_resize_bilinear(images_::tf.TensorHandle, size_::tf.TensorHandle, min_::tf.TensorHandle, max_::tf.TensorHandle; name=nothing, align_corners=nothing) + desc = tf.EagerOp("QuantizedResizeBilinear") + tf.add_input(desc, images_) + tf.add_input(desc, size_) + tf.add_input(desc, min_) + tf.add_input(desc, max_) + if align_corners !== nothing + desc["align_corners"] = Base.Bool(align_corners) + end + desc["T"] = tf.data_type(images_) + tf.execute(desc) + end +end + + +""" + parse_single_example(serialized, dense_defaults) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function parse_single_example(serialized_, dense_defaults_; name=nothing, num_sparse=nothing, sparse_keys=nothing, dense_keys=nothing, sparse_types=nothing, Tdense=nothing, dense_shapes=nothing) + local desc + tf.with_op_name(name, "ParseSingleExample") do + desc = tf.NodeDescription("ParseSingleExample") + serialized_ = convert(Tensor{String}, serialized_) + dense_defaults_ = [convert(Tensor{Any}, x) for x = dense_defaults_] + tf.add_input(desc, serialized_) + tf.add_input(desc, dense_defaults_) + if num_sparse !== nothing + desc["num_sparse"] = Base.Int(num_sparse) + end + if sparse_keys !== nothing + desc["sparse_keys"] = map(Base.identity, sparse_keys) + end + if dense_keys !== nothing + desc["dense_keys"] = map(Base.identity, dense_keys) + end + if sparse_types !== nothing + desc["sparse_types"] = map(Base.identity, sparse_types) + end + if Tdense !== nothing + desc["Tdense"] = map(Base.identity, Tdense) + end + if dense_shapes !== nothing + desc["dense_shapes"] = map(Base.identity, dense_shapes) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:4 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function parse_single_example(serialized_::tf.TensorHandle, dense_defaults_::tf.TensorHandle; name=nothing, num_sparse=nothing, sparse_keys=nothing, dense_keys=nothing, sparse_types=nothing, Tdense=nothing, dense_shapes=nothing) + desc = tf.EagerOp("ParseSingleExample") + tf.add_input(desc, serialized_) + tf.add_input(desc, dense_defaults_) + if num_sparse !== nothing + desc["num_sparse"] = Base.Int(num_sparse) + end + if sparse_keys !== nothing + desc["sparse_keys"] = map(Base.identity, sparse_keys) + end + if dense_keys !== nothing + desc["dense_keys"] = map(Base.identity, dense_keys) + end + if sparse_types !== nothing + desc["sparse_types"] = map(Base.identity, sparse_types) + end + if Tdense !== nothing + desc["Tdense"] = map(Base.identity, Tdense) + end + if dense_shapes !== nothing + desc["dense_shapes"] = map(Base.identity, dense_shapes) + end + tf.execute(desc) + end +end + + +""" + is_variable_initialized(ref) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function is_variable_initialized(ref_; name=nothing, dtype=nothing) + local desc + tf.with_op_name(name, "IsVariableInitialized") do + desc = tf.NodeDescription("IsVariableInitialized") + ref_ = convert(Tensor{Any}, ref_) + (ref_,) = tf.tf_promote(ref_) + tf.add_input(desc, ref_) + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + tf.Tensor(tf.Operation(desc)) + end + function is_variable_initialized(ref_::tf.TensorHandle; name=nothing, dtype=nothing) + desc = tf.EagerOp("IsVariableInitialized") + tf.add_input(desc, ref_) + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + desc["dtype"] = tf.data_type(ref_) + (tf.execute(desc))[1] + end +end + + +""" + experimental_stats_aggregator_handle(; container=, shared_name=) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function experimental_stats_aggregator_handle(; name=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "ExperimentalStatsAggregatorHandle") do + desc = tf.NodeDescription("ExperimentalStatsAggregatorHandle") + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + tf.Tensor(tf.Operation(desc)) + end + function experimental_stats_aggregator_handle(; name=nothing, container=nothing, shared_name=nothing) + desc = tf.EagerOp("ExperimentalStatsAggregatorHandle") + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + (tf.execute(desc))[1] + end +end + + +""" + tensor_list_concat_v2(input_handle, element_shape, leading_dims) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tensor_list_concat_v2(input_handle_, element_shape_, leading_dims_; name=nothing, element_dtype=nothing, shape_type=nothing) + local desc + tf.with_op_name(name, "TensorListConcatV2") do + desc = tf.NodeDescription("TensorListConcatV2") + input_handle_ = convert(Tensor{Any}, input_handle_) + element_shape_ = convert(Tensor{Any}, element_shape_) + leading_dims_ = convert(Tensor{Int64}, leading_dims_) + (element_shape_,) = tf.tf_promote(element_shape_) + tf.add_input(desc, input_handle_) + tf.add_input(desc, element_shape_) + tf.add_input(desc, leading_dims_) + if element_dtype !== nothing + desc["element_dtype"] = Base.identity(element_dtype) + end + if shape_type !== nothing + desc["shape_type"] = Base.identity(shape_type) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function tensor_list_concat_v2(input_handle_::tf.TensorHandle, element_shape_::tf.TensorHandle, leading_dims_::tf.TensorHandle; name=nothing, element_dtype=nothing, shape_type=nothing) + desc = tf.EagerOp("TensorListConcatV2") + tf.add_input(desc, input_handle_) + tf.add_input(desc, element_shape_) + tf.add_input(desc, leading_dims_) + if element_dtype !== nothing + desc["element_dtype"] = Base.identity(element_dtype) + end + if shape_type !== nothing + desc["shape_type"] = Base.identity(shape_type) + end + desc["shape_type"] = tf.data_type(element_shape_) + tf.execute(desc) + end +end + + +""" + cudnn_rnnv2(input, input_h, input_c, params; rnn_mode=lstm, input_mode=linear_input, direction=unidirectional, dropout=?, seed=0, seed2=0, is_training=true) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function cudnn_rnnv2(input_, input_h_, input_c_, params_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing, is_training=nothing) + local desc + tf.with_op_name(name, "CudnnRNNV2") do + desc = tf.NodeDescription("CudnnRNNV2") + input_ = convert(Tensor{Any}, input_) + input_h_ = convert(Tensor{Any}, input_h_) + input_c_ = convert(Tensor{Any}, input_c_) + params_ = convert(Tensor{Any}, params_) + (input_, input_h_, input_c_, params_) = tf.tf_promote(input_, input_h_, input_c_, params_) + tf.add_input(desc, input_) + tf.add_input(desc, input_h_) + tf.add_input(desc, input_c_) + tf.add_input(desc, params_) + if rnn_mode !== nothing + desc["rnn_mode"] = Base.String(rnn_mode) + end + if input_mode !== nothing + desc["input_mode"] = Base.String(input_mode) + end + if direction !== nothing + desc["direction"] = Base.String(direction) + end + if dropout !== nothing + desc["dropout"] = Base.identity(dropout) + end + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end + if is_training !== nothing + desc["is_training"] = Base.Bool(is_training) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:5 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function cudnn_rnnv2(input_::tf.TensorHandle, input_h_::tf.TensorHandle, input_c_::tf.TensorHandle, params_::tf.TensorHandle; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing, is_training=nothing) + desc = tf.EagerOp("CudnnRNNV2") + tf.add_input(desc, input_) + tf.add_input(desc, input_h_) + tf.add_input(desc, input_c_) + tf.add_input(desc, params_) + if rnn_mode !== nothing + desc["rnn_mode"] = Base.String(rnn_mode) + end + if input_mode !== nothing + desc["input_mode"] = Base.String(input_mode) + end + if direction !== nothing + desc["direction"] = Base.String(direction) + end + if dropout !== nothing + desc["dropout"] = Base.identity(dropout) + end + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end + if is_training !== nothing + desc["is_training"] = Base.Bool(is_training) + end + desc["T"] = tf.data_type(input_) + desc["T"] = tf.data_type(input_h_) + desc["T"] = tf.data_type(input_c_) + desc["T"] = tf.data_type(params_) + tf.execute(desc) + end +end + + +""" + resource_scatter_sub(resource, indices, updates) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function resource_scatter_sub(resource_, indices_, updates_; name=nothing, dtype=nothing) + local desc + tf.with_op_name(name, "ResourceScatterSub") do + desc = tf.NodeDescription("ResourceScatterSub") + resource_ = convert(Tensor{Any}, resource_) + indices_ = convert(Tensor{Any}, indices_) + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + updates_ = convert(Tensor{Any}, updates_) + (updates_,) = tf.tf_promote(updates_) + (indices_,) = tf.tf_promote(indices_) + tf.add_input(desc, resource_) + tf.add_input(desc, indices_) + tf.add_input(desc, updates_) + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + tf.Tensor(tf.Operation(desc)) + end + function resource_scatter_sub(resource_::tf.TensorHandle, indices_::tf.TensorHandle, updates_::tf.TensorHandle; name=nothing, dtype=nothing) + desc = tf.EagerOp("ResourceScatterSub") + tf.add_input(desc, resource_) + tf.add_input(desc, indices_) + tf.add_input(desc, updates_) + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + desc["Tindices"] = tf.data_type(indices_) + desc["dtype"] = tf.data_type(updates_) + (tf.execute(desc))[1] + end +end + + +""" + assign_add(ref, value; use_locking=false) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function assign_add(ref_, value_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "AssignAdd") do + desc = tf.NodeDescription("AssignAdd") + ref_ = convert(Tensor{Any}, ref_) + value_ = convert(Tensor{Any}, value_) + (ref_, value_) = tf.tf_promote(ref_, value_) + tf.add_input(desc, ref_) + tf.add_input(desc, value_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + tf.Tensor(tf.Operation(desc)) + end + function assign_add(ref_::tf.TensorHandle, value_::tf.TensorHandle; name=nothing, use_locking=nothing) + desc = tf.EagerOp("AssignAdd") + tf.add_input(desc, ref_) + tf.add_input(desc, value_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + desc["T"] = tf.data_type(ref_) + desc["T"] = tf.data_type(value_) + (tf.execute(desc))[1] + end +end + + +""" + tensor_dataset(components) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tensor_dataset(components_; name=nothing, Toutput_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "TensorDataset") do + desc = tf.NodeDescription("TensorDataset") + components_ = [convert(Tensor{Any}, x) for x = components_] + tf.add_input(desc, components_) + if Toutput_types !== nothing + desc["Toutput_types"] = map(Base.identity, Toutput_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + tf.Tensor(tf.Operation(desc)) + end + function tensor_dataset(components_::tf.TensorHandle; name=nothing, Toutput_types=nothing, output_shapes=nothing) + desc = tf.EagerOp("TensorDataset") + tf.add_input(desc, components_) + if Toutput_types !== nothing + desc["Toutput_types"] = map(Base.identity, Toutput_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + (tf.execute(desc))[1] + end +end + + +""" + bucketize(input) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function bucketize(input_; name=nothing, boundaries=nothing) + local desc + tf.with_op_name(name, "Bucketize") do + desc = tf.NodeDescription("Bucketize") + input_ = convert(Tensor{Any}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + if boundaries !== nothing + desc["boundaries"] = map(Base.identity, boundaries) + end + end + tf.Tensor(tf.Operation(desc)) + end + function bucketize(input_::tf.TensorHandle; name=nothing, boundaries=nothing) + desc = tf.EagerOp("Bucketize") + tf.add_input(desc, input_) + if boundaries !== nothing + desc["boundaries"] = map(Base.identity, boundaries) + end + desc["T"] = tf.data_type(input_) + (tf.execute(desc))[1] + end +end + + +""" + sparse_reduce_max(input_indices, input_values, input_shape, reduction_axes; keep_dims=false) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function sparse_reduce_max(input_indices_, input_values_, input_shape_, reduction_axes_; name=nothing, keep_dims=nothing) + local desc + tf.with_op_name(name, "SparseReduceMax") do + desc = tf.NodeDescription("SparseReduceMax") + input_indices_ = convert(Tensor{Int64}, input_indices_) + input_values_ = convert(Tensor{Any}, input_values_) + input_shape_ = convert(Tensor{Int64}, input_shape_) + reduction_axes_ = convert(Tensor{Int32}, reduction_axes_) + (input_values_,) = tf.tf_promote(input_values_) + tf.add_input(desc, input_indices_) + tf.add_input(desc, input_values_) + tf.add_input(desc, input_shape_) + tf.add_input(desc, reduction_axes_) + if keep_dims !== nothing + desc["keep_dims"] = Base.Bool(keep_dims) + end + end + tf.Tensor(tf.Operation(desc)) + end + function sparse_reduce_max(input_indices_::tf.TensorHandle, input_values_::tf.TensorHandle, input_shape_::tf.TensorHandle, reduction_axes_::tf.TensorHandle; name=nothing, keep_dims=nothing) + desc = tf.EagerOp("SparseReduceMax") + tf.add_input(desc, input_indices_) + tf.add_input(desc, input_values_) + tf.add_input(desc, input_shape_) + tf.add_input(desc, reduction_axes_) + if keep_dims !== nothing + desc["keep_dims"] = Base.Bool(keep_dims) + end + desc["T"] = tf.data_type(input_values_) + (tf.execute(desc))[1] + end +end + + +""" + retrieve_tpu_embedding_mdl_adagrad_light_parameters(; table_id=-1, table_name=) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function retrieve_tpu_embedding_mdl_adagrad_light_parameters(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + local desc + tf.with_op_name(name, "RetrieveTPUEmbeddingMDLAdagradLightParameters") do + desc = tf.NodeDescription("RetrieveTPUEmbeddingMDLAdagradLightParameters") + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:4 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function retrieve_tpu_embedding_mdl_adagrad_light_parameters(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + desc = tf.EagerOp("RetrieveTPUEmbeddingMDLAdagradLightParameters") + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + tf.execute(desc) + end +end + + +""" + tensor_array_grad_with_shape(handle, flow_in, shape_to_prepend) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tensor_array_grad_with_shape(handle_, flow_in_, shape_to_prepend_; name=nothing, source=nothing) + local desc + tf.with_op_name(name, "TensorArrayGradWithShape") do + desc = tf.NodeDescription("TensorArrayGradWithShape") + handle_ = convert(Tensor{Any}, handle_) + flow_in_ = convert(Tensor{Float32}, flow_in_) + shape_to_prepend_ = convert(Tensor{Int32}, shape_to_prepend_) + tf.add_input(desc, handle_) + tf.add_input(desc, flow_in_) + tf.add_input(desc, shape_to_prepend_) + if source !== nothing + desc["source"] = Base.String(source) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function tensor_array_grad_with_shape(handle_::tf.TensorHandle, flow_in_::tf.TensorHandle, shape_to_prepend_::tf.TensorHandle; name=nothing, source=nothing) + desc = tf.EagerOp("TensorArrayGradWithShape") + tf.add_input(desc, handle_) + tf.add_input(desc, flow_in_) + tf.add_input(desc, shape_to_prepend_) + if source !== nothing + desc["source"] = Base.String(source) + end + tf.execute(desc) + end +end + + +""" + tensor_array_close_v3(handle) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tensor_array_close_v3(handle_; name=nothing) + local desc + tf.with_op_name(name, "TensorArrayCloseV3") do + desc = tf.NodeDescription("TensorArrayCloseV3") + handle_ = convert(Tensor{Any}, handle_) + tf.add_input(desc, handle_) + end + tf.Tensor(tf.Operation(desc)) + end + function tensor_array_close_v3(handle_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("TensorArrayCloseV3") + tf.add_input(desc, handle_) + (tf.execute(desc))[1] + end +end + + +""" + non_max_suppression_with_overlaps(overlaps, scores, max_output_size, overlap_threshold, score_threshold) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function non_max_suppression_with_overlaps(overlaps_, scores_, max_output_size_, overlap_threshold_, score_threshold_; name=nothing) + local desc + tf.with_op_name(name, "NonMaxSuppressionWithOverlaps") do + desc = tf.NodeDescription("NonMaxSuppressionWithOverlaps") + overlaps_ = convert(Tensor{Float32}, overlaps_) + scores_ = convert(Tensor{Float32}, scores_) + max_output_size_ = convert(Tensor{Int32}, max_output_size_) + overlap_threshold_ = convert(Tensor{Float32}, overlap_threshold_) + score_threshold_ = convert(Tensor{Float32}, score_threshold_) + tf.add_input(desc, overlaps_) + tf.add_input(desc, scores_) + tf.add_input(desc, max_output_size_) + tf.add_input(desc, overlap_threshold_) + tf.add_input(desc, score_threshold_) + end + tf.Tensor(tf.Operation(desc)) + end + function non_max_suppression_with_overlaps(overlaps_::tf.TensorHandle, scores_::tf.TensorHandle, max_output_size_::tf.TensorHandle, overlap_threshold_::tf.TensorHandle, score_threshold_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("NonMaxSuppressionWithOverlaps") + tf.add_input(desc, overlaps_) + tf.add_input(desc, scores_) + tf.add_input(desc, max_output_size_) + tf.add_input(desc, overlap_threshold_) + tf.add_input(desc, score_threshold_) + (tf.execute(desc))[1] + end +end + + +""" + pack(values; axis=0) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function pack(values_; name=nothing, N=nothing, axis=nothing) + local desc + tf.with_op_name(name, "Pack") do + desc = tf.NodeDescription("Pack") + values_ = [convert(Tensor{Any}, x) for x = values_] + (values_,) = tf.tf_promote(values_) + tf.add_input(desc, values_) + if N !== nothing + desc["N"] = Base.Int(N) + end + if axis !== nothing + axis = Base.Int(axis) - 1 + end + if axis !== nothing + desc["axis"] = Base.Int(axis) + end + end + tf.Tensor(tf.Operation(desc)) + end + function pack(values_::tf.TensorHandle; name=nothing, N=nothing, axis=nothing) + desc = tf.EagerOp("Pack") + tf.add_input(desc, values_) + if N !== nothing + desc["N"] = Base.Int(N) + end + if axis !== nothing + axis = Base.Int(axis) - 1 + end + if axis !== nothing + desc["axis"] = Base.Int(axis) + end + desc["T"] = tf.data_type(values_) + (tf.execute(desc))[1] + end +end + + +""" + tensor_array_grad_v2(handle, flow_in) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tensor_array_grad_v2(handle_, flow_in_; name=nothing, source=nothing) + local desc + tf.with_op_name(name, "TensorArrayGradV2") do + desc = tf.NodeDescription("TensorArrayGradV2") + handle_ = convert(Tensor{String}, handle_) + flow_in_ = convert(Tensor{Float32}, flow_in_) + tf.add_input(desc, handle_) + tf.add_input(desc, flow_in_) + if source !== nothing + desc["source"] = Base.String(source) + end + end + tf.Tensor(tf.Operation(desc)) + end + function tensor_array_grad_v2(handle_::tf.TensorHandle, flow_in_::tf.TensorHandle; name=nothing, source=nothing) + desc = tf.EagerOp("TensorArrayGradV2") + tf.add_input(desc, handle_) + tf.add_input(desc, flow_in_) + if source !== nothing + desc["source"] = Base.String(source) + end + (tf.execute(desc))[1] + end +end + + +""" + assign_sub_variable_op(resource, value) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function assign_sub_variable_op(resource_, value_; name=nothing, dtype=nothing) + local desc + tf.with_op_name(name, "AssignSubVariableOp") do + desc = tf.NodeDescription("AssignSubVariableOp") + resource_ = convert(Tensor{Any}, resource_) + value_ = convert(Tensor{Any}, value_) + (value_,) = tf.tf_promote(value_) + tf.add_input(desc, resource_) + tf.add_input(desc, value_) + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + tf.Tensor(tf.Operation(desc)) + end + function assign_sub_variable_op(resource_::tf.TensorHandle, value_::tf.TensorHandle; name=nothing, dtype=nothing) + desc = tf.EagerOp("AssignSubVariableOp") + tf.add_input(desc, resource_) + tf.add_input(desc, value_) + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + desc["dtype"] = tf.data_type(value_) + (tf.execute(desc))[1] + end +end + + +""" + batch_fft2d(input) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function batch_fft2d(input_; name=nothing) + local desc + tf.with_op_name(name, "BatchFFT2D") do + desc = tf.NodeDescription("BatchFFT2D") + input_ = convert(Tensor{Complex{Float32}}, input_) + tf.add_input(desc, input_) + end + tf.Tensor(tf.Operation(desc)) + end + function batch_fft2d(input_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("BatchFFT2D") + tf.add_input(desc, input_) + (tf.execute(desc))[1] + end +end + + +""" + close_summary_writer(writer) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function close_summary_writer(writer_; name=nothing) + local desc + tf.with_op_name(name, "CloseSummaryWriter") do + desc = tf.NodeDescription("CloseSummaryWriter") + writer_ = convert(Tensor{Any}, writer_) + tf.add_input(desc, writer_) + end + tf.Tensor(tf.Operation(desc)) + end + function close_summary_writer(writer_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("CloseSummaryWriter") + tf.add_input(desc, writer_) + (tf.execute(desc))[1] + end +end + + +""" + rank(input) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function rank(input_; name=nothing) + local desc + tf.with_op_name(name, "Rank") do + desc = tf.NodeDescription("Rank") + input_ = convert(Tensor{Any}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + end + tf.Tensor(tf.Operation(desc)) + end + function rank(input_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("Rank") + tf.add_input(desc, input_) + desc["T"] = tf.data_type(input_) + (tf.execute(desc))[1] + end +end + + +""" + fft3d(input) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function fft3d(input_; name=nothing) + local desc + tf.with_op_name(name, "FFT3D") do + desc = tf.NodeDescription("FFT3D") + input_ = convert(Tensor{Complex{Float32}}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + end + tf.Tensor(tf.Operation(desc)) + end + function fft3d(input_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("FFT3D") + tf.add_input(desc, input_) + desc["Tcomplex"] = tf.data_type(input_) + (tf.execute(desc))[1] + end +end + + +""" + apply_ftrl(var, accum, linear, grad, lr, l1, l2, lr_power; use_locking=false) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function apply_ftrl(var_, accum_, linear_, grad_, lr_, l1_, l2_, lr_power_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "ApplyFtrl") do + desc = tf.NodeDescription("ApplyFtrl") + var_ = convert(Tensor{Any}, var_) + accum_ = convert(Tensor{Any}, accum_) + linear_ = convert(Tensor{Any}, linear_) + grad_ = convert(Tensor{Any}, grad_) + lr_ = convert(Tensor{Any}, lr_) + l1_ = convert(Tensor{Any}, l1_) + l2_ = convert(Tensor{Any}, l2_) + lr_power_ = convert(Tensor{Any}, lr_power_) + (var_, accum_, linear_, grad_, lr_, l1_, l2_, lr_power_) = tf.tf_promote(var_, accum_, linear_, grad_, lr_, l1_, l2_, lr_power_) + tf.add_input(desc, var_) + tf.add_input(desc, accum_) + tf.add_input(desc, linear_) + tf.add_input(desc, grad_) + tf.add_input(desc, lr_) + tf.add_input(desc, l1_) + tf.add_input(desc, l2_) + tf.add_input(desc, lr_power_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + tf.Tensor(tf.Operation(desc)) + end + function apply_ftrl(var_::tf.TensorHandle, accum_::tf.TensorHandle, linear_::tf.TensorHandle, grad_::tf.TensorHandle, lr_::tf.TensorHandle, l1_::tf.TensorHandle, l2_::tf.TensorHandle, lr_power_::tf.TensorHandle; name=nothing, use_locking=nothing) + desc = tf.EagerOp("ApplyFtrl") + tf.add_input(desc, var_) + tf.add_input(desc, accum_) + tf.add_input(desc, linear_) + tf.add_input(desc, grad_) + tf.add_input(desc, lr_) + tf.add_input(desc, l1_) + tf.add_input(desc, l2_) + tf.add_input(desc, lr_power_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + desc["T"] = tf.data_type(var_) + desc["T"] = tf.data_type(accum_) + desc["T"] = tf.data_type(linear_) + desc["T"] = tf.data_type(grad_) + desc["T"] = tf.data_type(lr_) + desc["T"] = tf.data_type(l1_) + desc["T"] = tf.data_type(l2_) + desc["T"] = tf.data_type(lr_power_) + (tf.execute(desc))[1] + end +end + + +""" + abort(; error_msg=, exit_without_error=false) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function abort(; name=nothing, error_msg=nothing, exit_without_error=nothing) + local desc + tf.with_op_name(name, "Abort") do + desc = tf.NodeDescription("Abort") + if error_msg !== nothing + desc["error_msg"] = Base.String(error_msg) + end + if exit_without_error !== nothing + desc["exit_without_error"] = Base.Bool(exit_without_error) + end + end + tf.Tensor(tf.Operation(desc)) + end + function abort(; name=nothing, error_msg=nothing, exit_without_error=nothing) + desc = tf.EagerOp("Abort") + if error_msg !== nothing + desc["error_msg"] = Base.String(error_msg) + end + if exit_without_error !== nothing + desc["exit_without_error"] = Base.Bool(exit_without_error) + end + (tf.execute(desc))[1] + end +end + + +""" + audio_spectrogram(input; magnitude_squared=false) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function audio_spectrogram(input_; name=nothing, window_size=nothing, stride=nothing, magnitude_squared=nothing) + local desc + tf.with_op_name(name, "AudioSpectrogram") do + desc = tf.NodeDescription("AudioSpectrogram") + input_ = convert(Tensor{Float32}, input_) + tf.add_input(desc, input_) + if window_size !== nothing + desc["window_size"] = Base.Int(window_size) + end + if stride !== nothing + desc["stride"] = Base.Int(stride) + end + if magnitude_squared !== nothing + desc["magnitude_squared"] = Base.Bool(magnitude_squared) + end + end + tf.Tensor(tf.Operation(desc)) + end + function audio_spectrogram(input_::tf.TensorHandle; name=nothing, window_size=nothing, stride=nothing, magnitude_squared=nothing) + desc = tf.EagerOp("AudioSpectrogram") + tf.add_input(desc, input_) + if window_size !== nothing + desc["window_size"] = Base.Int(window_size) + end + if stride !== nothing + desc["stride"] = Base.Int(stride) + end + if magnitude_squared !== nothing + desc["magnitude_squared"] = Base.Bool(magnitude_squared) + end + (tf.execute(desc))[1] + end +end + + +""" + variable_shape(input; out_type=Int32) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function variable_shape(input_; name=nothing, out_type=nothing) + local desc + tf.with_op_name(name, "VariableShape") do + desc = tf.NodeDescription("VariableShape") + input_ = convert(Tensor{Any}, input_) + tf.add_input(desc, input_) + if out_type !== nothing + desc["out_type"] = Base.identity(out_type) + end + end + tf.Tensor(tf.Operation(desc)) + end + function variable_shape(input_::tf.TensorHandle; name=nothing, out_type=nothing) + desc = tf.EagerOp("VariableShape") + tf.add_input(desc, input_) + if out_type !== nothing + desc["out_type"] = Base.identity(out_type) + end + (tf.execute(desc))[1] + end +end + + +""" + fifo_queue_v2(; shapes=Int64[], capacity=-1, container=, shared_name=) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function fifo_queue_v2(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "FIFOQueueV2") do + desc = tf.NodeDescription("FIFOQueueV2") + if component_types !== nothing + desc["component_types"] = map(Base.identity, component_types) + end + if shapes !== nothing + desc["shapes"] = map(Base.identity, shapes) + end + if capacity !== nothing + desc["capacity"] = Base.Int(capacity) + end + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + tf.Tensor(tf.Operation(desc)) + end + function fifo_queue_v2(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) + desc = tf.EagerOp("FIFOQueueV2") + if component_types !== nothing + desc["component_types"] = map(Base.identity, component_types) + end + if shapes !== nothing + desc["shapes"] = map(Base.identity, shapes) + end + if capacity !== nothing + desc["capacity"] = Base.Int(capacity) + end + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + (tf.execute(desc))[1] + end +end + + +""" + variable(; container=, shared_name=) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function variable(; name=nothing, shape=nothing, dtype=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "Variable") do + desc = tf.NodeDescription("Variable") + if shape !== nothing + desc["shape"] = Base.identity(shape) + end + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + tf.Tensor(tf.Operation(desc)) + end + function variable(; name=nothing, shape=nothing, dtype=nothing, container=nothing, shared_name=nothing) + desc = tf.EagerOp("Variable") + if shape !== nothing + desc["shape"] = Base.identity(shape) + end + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + (tf.execute(desc))[1] + end +end + + +""" + tensor_forest_create_tree_variable(tree_handle, tree_config) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tensor_forest_create_tree_variable(tree_handle_, tree_config_; name=nothing) + local desc + tf.with_op_name(name, "TensorForestCreateTreeVariable") do + desc = tf.NodeDescription("TensorForestCreateTreeVariable") + tree_handle_ = convert(Tensor{Any}, tree_handle_) + tree_config_ = convert(Tensor{String}, tree_config_) + tf.add_input(desc, tree_handle_) + tf.add_input(desc, tree_config_) + end + tf.Tensor(tf.Operation(desc)) + end + function tensor_forest_create_tree_variable(tree_handle_::tf.TensorHandle, tree_config_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("TensorForestCreateTreeVariable") + tf.add_input(desc, tree_handle_) + tf.add_input(desc, tree_config_) + (tf.execute(desc))[1] + end +end + + +""" + max_pool_grad_with_argmax(input, grad, argmax) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function max_pool_grad_with_argmax(input_, grad_, argmax_; name=nothing, ksize=nothing, strides=nothing, padding=nothing) + local desc + tf.with_op_name(name, "MaxPoolGradWithArgmax") do + desc = tf.NodeDescription("MaxPoolGradWithArgmax") + input_ = convert(Tensor{Any}, input_) + grad_ = convert(Tensor{Any}, grad_) + argmax_ = convert(Tensor{Any}, argmax_) + (argmax_,) = tf.tf_promote(argmax_) + (input_, grad_) = tf.tf_promote(input_, grad_) + tf.add_input(desc, input_) + tf.add_input(desc, grad_) + tf.add_input(desc, argmax_) + if ksize !== nothing + desc["ksize"] = map(Base.identity, ksize) + end + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + if padding !== nothing + desc["padding"] = Base.String(padding) + end + end + tf.Tensor(tf.Operation(desc)) + end + function max_pool_grad_with_argmax(input_::tf.TensorHandle, grad_::tf.TensorHandle, argmax_::tf.TensorHandle; name=nothing, ksize=nothing, strides=nothing, padding=nothing) + desc = tf.EagerOp("MaxPoolGradWithArgmax") + tf.add_input(desc, input_) + tf.add_input(desc, grad_) + tf.add_input(desc, argmax_) + if ksize !== nothing + desc["ksize"] = map(Base.identity, ksize) + end + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + if padding !== nothing + desc["padding"] = Base.String(padding) + end + desc["T"] = tf.data_type(input_) + desc["T"] = tf.data_type(grad_) + desc["Targmax"] = tf.data_type(argmax_) + (tf.execute(desc))[1] + end +end + + +""" + ref_switch(data, pred) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function ref_switch(data_, pred_; name=nothing) + local desc + tf.with_op_name(name, "RefSwitch") do + desc = tf.NodeDescription("RefSwitch") + data_ = convert(Tensor{Any}, data_) + pred_ = convert(Tensor{Bool}, pred_) + (data_,) = tf.tf_promote(data_) + tf.add_input(desc, data_) + tf.add_input(desc, pred_) + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function ref_switch(data_::tf.TensorHandle, pred_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("RefSwitch") + tf.add_input(desc, data_) + tf.add_input(desc, pred_) + desc["T"] = tf.data_type(data_) + tf.execute(desc) + end +end + + +""" + sdca_fprint(input) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function sdca_fprint(input_; name=nothing) + local desc + tf.with_op_name(name, "SdcaFprint") do + desc = tf.NodeDescription("SdcaFprint") + input_ = convert(Tensor{String}, input_) + tf.add_input(desc, input_) + end + tf.Tensor(tf.Operation(desc)) + end + function sdca_fprint(input_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("SdcaFprint") + tf.add_input(desc, input_) + (tf.execute(desc))[1] + end +end + + +""" + experimental_choose_fastest_dataset(input_datasets) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function experimental_choose_fastest_dataset(input_datasets_; name=nothing, N=nothing, num_experiments=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "ExperimentalChooseFastestDataset") do + desc = tf.NodeDescription("ExperimentalChooseFastestDataset") + input_datasets_ = [convert(Tensor{Any}, x) for x = input_datasets_] + tf.add_input(desc, input_datasets_) + if N !== nothing + desc["N"] = Base.Int(N) + end + if num_experiments !== nothing + desc["num_experiments"] = Base.Int(num_experiments) + end + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + tf.Tensor(tf.Operation(desc)) + end + function experimental_choose_fastest_dataset(input_datasets_::tf.TensorHandle; name=nothing, N=nothing, num_experiments=nothing, output_types=nothing, output_shapes=nothing) + desc = tf.EagerOp("ExperimentalChooseFastestDataset") + tf.add_input(desc, input_datasets_) + if N !== nothing + desc["N"] = Base.Int(N) + end + if num_experiments !== nothing + desc["num_experiments"] = Base.Int(num_experiments) + end + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + (tf.execute(desc))[1] + end +end + + +""" + leaky_relu(features; alpha=?) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function leaky_relu(features_; name=nothing, alpha=nothing) + local desc + tf.with_op_name(name, "LeakyRelu") do + desc = tf.NodeDescription("LeakyRelu") + features_ = convert(Tensor{Float32}, features_) + (features_,) = tf.tf_promote(features_) + tf.add_input(desc, features_) + if alpha !== nothing + desc["alpha"] = Base.identity(alpha) + end + end + tf.Tensor(tf.Operation(desc)) + end + function leaky_relu(features_::tf.TensorHandle; name=nothing, alpha=nothing) + desc = tf.EagerOp("LeakyRelu") + tf.add_input(desc, features_) + if alpha !== nothing + desc["alpha"] = Base.identity(alpha) + end + desc["T"] = tf.data_type(features_) + (tf.execute(desc))[1] + end +end + + +""" + identity_n(input) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function identity_n(input_; name=nothing, T=nothing) + local desc + tf.with_op_name(name, "IdentityN") do + desc = tf.NodeDescription("IdentityN") + input_ = [convert(Tensor{Any}, x) for x = input_] + tf.add_input(desc, input_) + if T !== nothing + desc["T"] = map(Base.identity, T) + end + end + tf.Tensor(tf.Operation(desc)) + end + function identity_n(input_::tf.TensorHandle; name=nothing, T=nothing) + desc = tf.EagerOp("IdentityN") + tf.add_input(desc, input_) + if T !== nothing + desc["T"] = map(Base.identity, T) + end + (tf.execute(desc))[1] + end +end + + +""" + cudnn_rnn_backprop_v2(input, input_h, input_c, params, output, output_h, output_c, output_backprop, output_h_backprop, output_c_backprop, reserve_space, host_reserved; rnn_mode=lstm, input_mode=linear_input, direction=unidirectional, dropout=?, seed=0, seed2=0) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function cudnn_rnn_backprop_v2(input_, input_h_, input_c_, params_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_, host_reserved_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) + local desc + tf.with_op_name(name, "CudnnRNNBackpropV2") do + desc = tf.NodeDescription("CudnnRNNBackpropV2") + input_ = convert(Tensor{Any}, input_) + input_h_ = convert(Tensor{Any}, input_h_) + input_c_ = convert(Tensor{Any}, input_c_) + params_ = convert(Tensor{Any}, params_) + output_ = convert(Tensor{Any}, output_) + output_h_ = convert(Tensor{Any}, output_h_) + output_c_ = convert(Tensor{Any}, output_c_) + output_backprop_ = convert(Tensor{Any}, output_backprop_) + output_h_backprop_ = convert(Tensor{Any}, output_h_backprop_) + output_c_backprop_ = convert(Tensor{Any}, output_c_backprop_) + reserve_space_ = convert(Tensor{Any}, reserve_space_) + host_reserved_ = convert(Tensor{Any}, host_reserved_) + (input_, input_h_, input_c_, params_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_) = tf.tf_promote(input_, input_h_, input_c_, params_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_) + tf.add_input(desc, input_) + tf.add_input(desc, input_h_) + tf.add_input(desc, input_c_) + tf.add_input(desc, params_) + tf.add_input(desc, output_) + tf.add_input(desc, output_h_) + tf.add_input(desc, output_c_) + tf.add_input(desc, output_backprop_) + tf.add_input(desc, output_h_backprop_) + tf.add_input(desc, output_c_backprop_) + tf.add_input(desc, reserve_space_) + tf.add_input(desc, host_reserved_) + if rnn_mode !== nothing + desc["rnn_mode"] = Base.String(rnn_mode) + end + if input_mode !== nothing + desc["input_mode"] = Base.String(input_mode) + end + if direction !== nothing + desc["direction"] = Base.String(direction) + end + if dropout !== nothing + desc["dropout"] = Base.identity(dropout) + end + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:4 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function cudnn_rnn_backprop_v2(input_::tf.TensorHandle, input_h_::tf.TensorHandle, input_c_::tf.TensorHandle, params_::tf.TensorHandle, output_::tf.TensorHandle, output_h_::tf.TensorHandle, output_c_::tf.TensorHandle, output_backprop_::tf.TensorHandle, output_h_backprop_::tf.TensorHandle, output_c_backprop_::tf.TensorHandle, reserve_space_::tf.TensorHandle, host_reserved_::tf.TensorHandle; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) + desc = tf.EagerOp("CudnnRNNBackpropV2") + tf.add_input(desc, input_) + tf.add_input(desc, input_h_) + tf.add_input(desc, input_c_) + tf.add_input(desc, params_) + tf.add_input(desc, output_) + tf.add_input(desc, output_h_) + tf.add_input(desc, output_c_) + tf.add_input(desc, output_backprop_) + tf.add_input(desc, output_h_backprop_) + tf.add_input(desc, output_c_backprop_) + tf.add_input(desc, reserve_space_) + tf.add_input(desc, host_reserved_) + if rnn_mode !== nothing + desc["rnn_mode"] = Base.String(rnn_mode) + end + if input_mode !== nothing + desc["input_mode"] = Base.String(input_mode) + end + if direction !== nothing + desc["direction"] = Base.String(direction) + end + if dropout !== nothing + desc["dropout"] = Base.identity(dropout) + end + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end + desc["T"] = tf.data_type(input_) + desc["T"] = tf.data_type(input_h_) + desc["T"] = tf.data_type(input_c_) + desc["T"] = tf.data_type(params_) + desc["T"] = tf.data_type(output_) + desc["T"] = tf.data_type(output_h_) + desc["T"] = tf.data_type(output_c_) + desc["T"] = tf.data_type(output_backprop_) + desc["T"] = tf.data_type(output_h_backprop_) + desc["T"] = tf.data_type(output_c_backprop_) + desc["T"] = tf.data_type(reserve_space_) + tf.execute(desc) + end +end + + +""" + requantization_range(input, input_min, input_max) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function requantization_range(input_, input_min_, input_max_; name=nothing) + local desc + tf.with_op_name(name, "RequantizationRange") do + desc = tf.NodeDescription("RequantizationRange") + input_ = convert(Tensor{Any}, input_) + input_min_ = convert(Tensor{Float32}, input_min_) + input_max_ = convert(Tensor{Float32}, input_max_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + tf.add_input(desc, input_min_) + tf.add_input(desc, input_max_) + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function requantization_range(input_::tf.TensorHandle, input_min_::tf.TensorHandle, input_max_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("RequantizationRange") + tf.add_input(desc, input_) + tf.add_input(desc, input_min_) + tf.add_input(desc, input_max_) + desc["Tinput"] = tf.data_type(input_) + tf.execute(desc) + end +end + + +""" + maximum(x, y) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function maximum(x_, y_; name=nothing) + local desc + tf.with_op_name(name, "Maximum") do + desc = tf.NodeDescription("Maximum") + x_ = convert(Tensor{Any}, x_) + y_ = convert(Tensor{Any}, y_) + (x_, y_) = tf.tf_promote(x_, y_) + tf.add_input(desc, x_) + tf.add_input(desc, y_) + end + tf.Tensor(tf.Operation(desc)) + end + function maximum(x_::tf.TensorHandle, y_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("Maximum") + tf.add_input(desc, x_) + tf.add_input(desc, y_) + desc["T"] = tf.data_type(x_) + desc["T"] = tf.data_type(y_) + (tf.execute(desc))[1] + end +end + + +""" + reshape(tensor, shape) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function reshape(tensor_, shape_; name=nothing) + local desc + tf.with_op_name(name, "Reshape") do + desc = tf.NodeDescription("Reshape") + tensor_ = convert(Tensor{Any}, tensor_) + shape_ = convert(Tensor{Int32}, shape_) + (tensor_,) = tf.tf_promote(tensor_) + (shape_,) = tf.tf_promote(shape_) + tf.add_input(desc, tensor_) + tf.add_input(desc, shape_) + end + tf.Tensor(tf.Operation(desc)) + end + function reshape(tensor_::tf.TensorHandle, shape_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("Reshape") + tf.add_input(desc, tensor_) + tf.add_input(desc, shape_) + desc["T"] = tf.data_type(tensor_) + desc["Tshape"] = tf.data_type(shape_) + (tf.execute(desc))[1] + end +end + + +""" + matrix_solve_ls(matrix, rhs, l2_regularizer; fast=true) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function matrix_solve_ls(matrix_, rhs_, l2_regularizer_; name=nothing, fast=nothing) + local desc + tf.with_op_name(name, "MatrixSolveLs") do + desc = tf.NodeDescription("MatrixSolveLs") + matrix_ = convert(Tensor{Any}, matrix_) + rhs_ = convert(Tensor{Any}, rhs_) + l2_regularizer_ = convert(Tensor{Float64}, l2_regularizer_) + (matrix_, rhs_) = tf.tf_promote(matrix_, rhs_) + tf.add_input(desc, matrix_) + tf.add_input(desc, rhs_) + tf.add_input(desc, l2_regularizer_) + if fast !== nothing + desc["fast"] = Base.Bool(fast) + end + end + tf.Tensor(tf.Operation(desc)) + end + function matrix_solve_ls(matrix_::tf.TensorHandle, rhs_::tf.TensorHandle, l2_regularizer_::tf.TensorHandle; name=nothing, fast=nothing) + desc = tf.EagerOp("MatrixSolveLs") + tf.add_input(desc, matrix_) + tf.add_input(desc, rhs_) + tf.add_input(desc, l2_regularizer_) + if fast !== nothing + desc["fast"] = Base.Bool(fast) + end + desc["T"] = tf.data_type(matrix_) + desc["T"] = tf.data_type(rhs_) + (tf.execute(desc))[1] + end +end + + +""" + tf_record_dataset(filenames, compression_type, buffer_size) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tf_record_dataset(filenames_, compression_type_, buffer_size_; name=nothing) + local desc + tf.with_op_name(name, "TFRecordDataset") do + desc = tf.NodeDescription("TFRecordDataset") + filenames_ = convert(Tensor{String}, filenames_) + compression_type_ = convert(Tensor{String}, compression_type_) + buffer_size_ = convert(Tensor{Int64}, buffer_size_) + tf.add_input(desc, filenames_) + tf.add_input(desc, compression_type_) + tf.add_input(desc, buffer_size_) + end + tf.Tensor(tf.Operation(desc)) + end + function tf_record_dataset(filenames_::tf.TensorHandle, compression_type_::tf.TensorHandle, buffer_size_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("TFRecordDataset") + tf.add_input(desc, filenames_) + tf.add_input(desc, compression_type_) + tf.add_input(desc, buffer_size_) + (tf.execute(desc))[1] + end +end + + +""" + boosted_trees_example_debug_outputs(tree_ensemble_handle, bucketized_features) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function boosted_trees_example_debug_outputs(tree_ensemble_handle_, bucketized_features_; name=nothing, num_bucketized_features=nothing, logits_dimension=nothing) + local desc + tf.with_op_name(name, "BoostedTreesExampleDebugOutputs") do + desc = tf.NodeDescription("BoostedTreesExampleDebugOutputs") + tree_ensemble_handle_ = convert(Tensor{Any}, tree_ensemble_handle_) + bucketized_features_ = [convert(Tensor{Int32}, x) for x = bucketized_features_] + tf.add_input(desc, tree_ensemble_handle_) + tf.add_input(desc, bucketized_features_) + if num_bucketized_features !== nothing + desc["num_bucketized_features"] = Base.Int(num_bucketized_features) + end + if logits_dimension !== nothing + desc["logits_dimension"] = Base.Int(logits_dimension) + end + end + tf.Tensor(tf.Operation(desc)) + end + function boosted_trees_example_debug_outputs(tree_ensemble_handle_::tf.TensorHandle, bucketized_features_::tf.TensorHandle; name=nothing, num_bucketized_features=nothing, logits_dimension=nothing) + desc = tf.EagerOp("BoostedTreesExampleDebugOutputs") + tf.add_input(desc, tree_ensemble_handle_) + tf.add_input(desc, bucketized_features_) + if num_bucketized_features !== nothing + desc["num_bucketized_features"] = Base.Int(num_bucketized_features) + end + if logits_dimension !== nothing + desc["logits_dimension"] = Base.Int(logits_dimension) + end + (tf.execute(desc))[1] + end +end + + +""" + hsv_to_rgb(images) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function hsv_to_rgb(images_; name=nothing) + local desc + tf.with_op_name(name, "HSVToRGB") do + desc = tf.NodeDescription("HSVToRGB") + images_ = convert(Tensor{Float32}, images_) + (images_,) = tf.tf_promote(images_) + tf.add_input(desc, images_) + end + tf.Tensor(tf.Operation(desc)) + end + function hsv_to_rgb(images_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("HSVToRGB") + tf.add_input(desc, images_) + desc["T"] = tf.data_type(images_) + (tf.execute(desc))[1] + end +end + + +""" + experimental_max_intra_op_parallelism_dataset(input_dataset, max_intra_op_parallelism) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function experimental_max_intra_op_parallelism_dataset(input_dataset_, max_intra_op_parallelism_; name=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "ExperimentalMaxIntraOpParallelismDataset") do + desc = tf.NodeDescription("ExperimentalMaxIntraOpParallelismDataset") + input_dataset_ = convert(Tensor{Any}, input_dataset_) + max_intra_op_parallelism_ = convert(Tensor{Int64}, max_intra_op_parallelism_) + tf.add_input(desc, input_dataset_) + tf.add_input(desc, max_intra_op_parallelism_) + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + tf.Tensor(tf.Operation(desc)) + end + function experimental_max_intra_op_parallelism_dataset(input_dataset_::tf.TensorHandle, max_intra_op_parallelism_::tf.TensorHandle; name=nothing, output_types=nothing, output_shapes=nothing) + desc = tf.EagerOp("ExperimentalMaxIntraOpParallelismDataset") + tf.add_input(desc, input_dataset_) + tf.add_input(desc, max_intra_op_parallelism_) + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + (tf.execute(desc))[1] + end +end + """ scatter_div(ref, indices, updates; use_locking=false) """ -tf.@op function scatter_div(ref_, indices_, updates_; name=nothing, use_locking=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("ScatterDiv") - ref_ = convert(TensorFlow.Tensor{Any}, ref_) - indices_ = convert(TensorFlow.Tensor{Any}, indices_) - indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) - updates_ = convert(TensorFlow.Tensor{Any}, updates_) - (ref_, updates_) = tf.tf_promote(ref_, updates_) - (indices_,) = tf.tf_promote(indices_) - tf.add_input(desc, ref_) - tf.add_input(desc, indices_) - tf.add_input(desc, updates_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - end), name, "ScatterDiv") - tf.Tensor(tf.Operation(desc)) +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function scatter_div(ref_, indices_, updates_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "ScatterDiv") do + desc = tf.NodeDescription("ScatterDiv") + ref_ = convert(Tensor{Any}, ref_) + indices_ = convert(Tensor{Any}, indices_) + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + updates_ = convert(Tensor{Any}, updates_) + (ref_, updates_) = tf.tf_promote(ref_, updates_) + (indices_,) = tf.tf_promote(indices_) + tf.add_input(desc, ref_) + tf.add_input(desc, indices_) + tf.add_input(desc, updates_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + tf.Tensor(tf.Operation(desc)) + end + function scatter_div(ref_::tf.TensorHandle, indices_::tf.TensorHandle, updates_::tf.TensorHandle; name=nothing, use_locking=nothing) + desc = tf.EagerOp("ScatterDiv") + tf.add_input(desc, ref_) + tf.add_input(desc, indices_) + tf.add_input(desc, updates_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + desc["T"] = tf.data_type(ref_) + desc["Tindices"] = tf.data_type(indices_) + desc["T"] = tf.data_type(updates_) + (tf.execute(desc))[1] + end +end + + +""" + decode_wav(contents; desired_channels=-1, desired_samples=-1) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function decode_wav(contents_; name=nothing, desired_channels=nothing, desired_samples=nothing) + local desc + tf.with_op_name(name, "DecodeWav") do + desc = tf.NodeDescription("DecodeWav") + contents_ = convert(Tensor{String}, contents_) + tf.add_input(desc, contents_) + if desired_channels !== nothing + desc["desired_channels"] = Base.Int(desired_channels) + end + if desired_samples !== nothing + desc["desired_samples"] = Base.Int(desired_samples) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function decode_wav(contents_::tf.TensorHandle; name=nothing, desired_channels=nothing, desired_samples=nothing) + desc = tf.EagerOp("DecodeWav") + tf.add_input(desc, contents_) + if desired_channels !== nothing + desc["desired_channels"] = Base.Int(desired_channels) + end + if desired_samples !== nothing + desc["desired_samples"] = Base.Int(desired_samples) + end + tf.execute(desc) + end +end + + +""" + log(x) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function log(x_; name=nothing) + local desc + tf.with_op_name(name, "Log") do + desc = tf.NodeDescription("Log") + x_ = convert(Tensor{Any}, x_) + (x_,) = tf.tf_promote(x_) + tf.add_input(desc, x_) + end + tf.Tensor(tf.Operation(desc)) + end + function log(x_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("Log") + tf.add_input(desc, x_) + desc["T"] = tf.data_type(x_) + (tf.execute(desc))[1] + end +end + + +""" + save_v2(prefix, tensor_names, shape_and_slices, tensors) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function save_v2(prefix_, tensor_names_, shape_and_slices_, tensors_; name=nothing, dtypes=nothing) + local desc + tf.with_op_name(name, "SaveV2") do + desc = tf.NodeDescription("SaveV2") + prefix_ = convert(Tensor{String}, prefix_) + tensor_names_ = convert(Tensor{String}, tensor_names_) + shape_and_slices_ = convert(Tensor{String}, shape_and_slices_) + tensors_ = [convert(Tensor{Any}, x) for x = tensors_] + tf.add_input(desc, prefix_) + tf.add_input(desc, tensor_names_) + tf.add_input(desc, shape_and_slices_) + tf.add_input(desc, tensors_) + if dtypes !== nothing + desc["dtypes"] = map(Base.identity, dtypes) + end + end + tf.Tensor(tf.Operation(desc)) + end + function save_v2(prefix_::tf.TensorHandle, tensor_names_::tf.TensorHandle, shape_and_slices_::tf.TensorHandle, tensors_::tf.TensorHandle; name=nothing, dtypes=nothing) + desc = tf.EagerOp("SaveV2") + tf.add_input(desc, prefix_) + tf.add_input(desc, tensor_names_) + tf.add_input(desc, shape_and_slices_) + tf.add_input(desc, tensors_) + if dtypes !== nothing + desc["dtypes"] = map(Base.identity, dtypes) + end + (tf.execute(desc))[1] + end +end + + +""" + deep_copy(x) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function deep_copy(x_; name=nothing) + local desc + tf.with_op_name(name, "DeepCopy") do + desc = tf.NodeDescription("DeepCopy") + x_ = convert(Tensor{Any}, x_) + (x_,) = tf.tf_promote(x_) + tf.add_input(desc, x_) + end + tf.Tensor(tf.Operation(desc)) + end + function deep_copy(x_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("DeepCopy") + tf.add_input(desc, x_) + desc["T"] = tf.data_type(x_) + (tf.execute(desc))[1] + end +end + + +""" + model_dataset(input_dataset) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function model_dataset(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "ModelDataset") do + desc = tf.NodeDescription("ModelDataset") + input_dataset_ = convert(Tensor{Any}, input_dataset_) + tf.add_input(desc, input_dataset_) + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + tf.Tensor(tf.Operation(desc)) + end + function model_dataset(input_dataset_::tf.TensorHandle; name=nothing, output_types=nothing, output_shapes=nothing) + desc = tf.EagerOp("ModelDataset") + tf.add_input(desc, input_dataset_) + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + (tf.execute(desc))[1] + end +end + + +""" + parse_sequence_example(serialized, debug_name, context_dense_defaults; Ncontext_sparse=0, Ncontext_dense=0, Nfeature_list_sparse=0, Nfeature_list_dense=0, context_sparse_types=Int64[], Tcontext_dense=Int64[], feature_list_dense_types=Int64[], context_dense_shapes=Int64[], feature_list_sparse_types=Int64[], feature_list_dense_shapes=Int64[]) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function parse_sequence_example(serialized_, debug_name_, context_dense_defaults_; name=nothing, feature_list_dense_missing_assumed_empty=nothing, context_sparse_keys=nothing, context_dense_keys=nothing, feature_list_sparse_keys=nothing, feature_list_dense_keys=nothing, Ncontext_sparse=nothing, Ncontext_dense=nothing, Nfeature_list_sparse=nothing, Nfeature_list_dense=nothing, context_sparse_types=nothing, Tcontext_dense=nothing, feature_list_dense_types=nothing, context_dense_shapes=nothing, feature_list_sparse_types=nothing, feature_list_dense_shapes=nothing) + local desc + tf.with_op_name(name, "ParseSequenceExample") do + desc = tf.NodeDescription("ParseSequenceExample") + serialized_ = convert(Tensor{String}, serialized_) + debug_name_ = convert(Tensor{String}, debug_name_) + context_dense_defaults_ = [convert(Tensor{Any}, x) for x = context_dense_defaults_] + tf.add_input(desc, serialized_) + tf.add_input(desc, debug_name_) + tf.add_input(desc, context_dense_defaults_) + if feature_list_dense_missing_assumed_empty !== nothing + desc["feature_list_dense_missing_assumed_empty"] = map(Base.identity, feature_list_dense_missing_assumed_empty) + end + if context_sparse_keys !== nothing + desc["context_sparse_keys"] = map(Base.identity, context_sparse_keys) + end + if context_dense_keys !== nothing + desc["context_dense_keys"] = map(Base.identity, context_dense_keys) + end + if feature_list_sparse_keys !== nothing + desc["feature_list_sparse_keys"] = map(Base.identity, feature_list_sparse_keys) + end + if feature_list_dense_keys !== nothing + desc["feature_list_dense_keys"] = map(Base.identity, feature_list_dense_keys) + end + if Ncontext_sparse !== nothing + desc["Ncontext_sparse"] = Base.Int(Ncontext_sparse) + end + if Ncontext_dense !== nothing + desc["Ncontext_dense"] = Base.Int(Ncontext_dense) + end + if Nfeature_list_sparse !== nothing + desc["Nfeature_list_sparse"] = Base.Int(Nfeature_list_sparse) + end + if Nfeature_list_dense !== nothing + desc["Nfeature_list_dense"] = Base.Int(Nfeature_list_dense) + end + if context_sparse_types !== nothing + desc["context_sparse_types"] = map(Base.identity, context_sparse_types) + end + if Tcontext_dense !== nothing + desc["Tcontext_dense"] = map(Base.identity, Tcontext_dense) + end + if feature_list_dense_types !== nothing + desc["feature_list_dense_types"] = map(Base.identity, feature_list_dense_types) + end + if context_dense_shapes !== nothing + desc["context_dense_shapes"] = map(Base.identity, context_dense_shapes) + end + if feature_list_sparse_types !== nothing + desc["feature_list_sparse_types"] = map(Base.identity, feature_list_sparse_types) + end + if feature_list_dense_shapes !== nothing + desc["feature_list_dense_shapes"] = map(Base.identity, feature_list_dense_shapes) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:9 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function parse_sequence_example(serialized_::tf.TensorHandle, debug_name_::tf.TensorHandle, context_dense_defaults_::tf.TensorHandle; name=nothing, feature_list_dense_missing_assumed_empty=nothing, context_sparse_keys=nothing, context_dense_keys=nothing, feature_list_sparse_keys=nothing, feature_list_dense_keys=nothing, Ncontext_sparse=nothing, Ncontext_dense=nothing, Nfeature_list_sparse=nothing, Nfeature_list_dense=nothing, context_sparse_types=nothing, Tcontext_dense=nothing, feature_list_dense_types=nothing, context_dense_shapes=nothing, feature_list_sparse_types=nothing, feature_list_dense_shapes=nothing) + desc = tf.EagerOp("ParseSequenceExample") + tf.add_input(desc, serialized_) + tf.add_input(desc, debug_name_) + tf.add_input(desc, context_dense_defaults_) + if feature_list_dense_missing_assumed_empty !== nothing + desc["feature_list_dense_missing_assumed_empty"] = map(Base.identity, feature_list_dense_missing_assumed_empty) + end + if context_sparse_keys !== nothing + desc["context_sparse_keys"] = map(Base.identity, context_sparse_keys) + end + if context_dense_keys !== nothing + desc["context_dense_keys"] = map(Base.identity, context_dense_keys) + end + if feature_list_sparse_keys !== nothing + desc["feature_list_sparse_keys"] = map(Base.identity, feature_list_sparse_keys) + end + if feature_list_dense_keys !== nothing + desc["feature_list_dense_keys"] = map(Base.identity, feature_list_dense_keys) + end + if Ncontext_sparse !== nothing + desc["Ncontext_sparse"] = Base.Int(Ncontext_sparse) + end + if Ncontext_dense !== nothing + desc["Ncontext_dense"] = Base.Int(Ncontext_dense) + end + if Nfeature_list_sparse !== nothing + desc["Nfeature_list_sparse"] = Base.Int(Nfeature_list_sparse) + end + if Nfeature_list_dense !== nothing + desc["Nfeature_list_dense"] = Base.Int(Nfeature_list_dense) + end + if context_sparse_types !== nothing + desc["context_sparse_types"] = map(Base.identity, context_sparse_types) + end + if Tcontext_dense !== nothing + desc["Tcontext_dense"] = map(Base.identity, Tcontext_dense) + end + if feature_list_dense_types !== nothing + desc["feature_list_dense_types"] = map(Base.identity, feature_list_dense_types) + end + if context_dense_shapes !== nothing + desc["context_dense_shapes"] = map(Base.identity, context_dense_shapes) + end + if feature_list_sparse_types !== nothing + desc["feature_list_sparse_types"] = map(Base.identity, feature_list_sparse_types) + end + if feature_list_dense_shapes !== nothing + desc["feature_list_dense_shapes"] = map(Base.identity, feature_list_dense_shapes) + end + tf.execute(desc) + end +end + + +""" + sinh(x) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function sinh(x_; name=nothing) + local desc + tf.with_op_name(name, "Sinh") do + desc = tf.NodeDescription("Sinh") + x_ = convert(Tensor{Any}, x_) + (x_,) = tf.tf_promote(x_) + tf.add_input(desc, x_) + end + tf.Tensor(tf.Operation(desc)) + end + function sinh(x_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("Sinh") + tf.add_input(desc, x_) + desc["T"] = tf.data_type(x_) + (tf.execute(desc))[1] + end +end + + +""" + iterator_v2() + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function iterator_v2(; name=nothing, shared_name=nothing, container=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "IteratorV2") do + desc = tf.NodeDescription("IteratorV2") + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + if container !== nothing + desc["container"] = Base.String(container) + end + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + tf.Tensor(tf.Operation(desc)) + end + function iterator_v2(; name=nothing, shared_name=nothing, container=nothing, output_types=nothing, output_shapes=nothing) + desc = tf.EagerOp("IteratorV2") + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + if container !== nothing + desc["container"] = Base.String(container) + end + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + (tf.execute(desc))[1] + end +end + + +""" + tensor_array_write_v2(handle, index, value, flow_in) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tensor_array_write_v2(handle_, index_, value_, flow_in_; name=nothing) + local desc + tf.with_op_name(name, "TensorArrayWriteV2") do + desc = tf.NodeDescription("TensorArrayWriteV2") + handle_ = convert(Tensor{String}, handle_) + index_ = convert(Tensor{Int32}, index_) + value_ = convert(Tensor{Any}, value_) + flow_in_ = convert(Tensor{Float32}, flow_in_) + (value_,) = tf.tf_promote(value_) + tf.add_input(desc, handle_) + tf.add_input(desc, index_) + tf.add_input(desc, value_) + tf.add_input(desc, flow_in_) + end + tf.Tensor(tf.Operation(desc)) + end + function tensor_array_write_v2(handle_::tf.TensorHandle, index_::tf.TensorHandle, value_::tf.TensorHandle, flow_in_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("TensorArrayWriteV2") + tf.add_input(desc, handle_) + tf.add_input(desc, index_) + tf.add_input(desc, value_) + tf.add_input(desc, flow_in_) + desc["T"] = tf.data_type(value_) + (tf.execute(desc))[1] + end +end + + +""" + tensor_list_element_shape(input_handle) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tensor_list_element_shape(input_handle_; name=nothing, shape_type=nothing) + local desc + tf.with_op_name(name, "TensorListElementShape") do + desc = tf.NodeDescription("TensorListElementShape") + input_handle_ = convert(Tensor{Any}, input_handle_) + tf.add_input(desc, input_handle_) + if shape_type !== nothing + desc["shape_type"] = Base.identity(shape_type) + end + end + tf.Tensor(tf.Operation(desc)) + end + function tensor_list_element_shape(input_handle_::tf.TensorHandle; name=nothing, shape_type=nothing) + desc = tf.EagerOp("TensorListElementShape") + tf.add_input(desc, input_handle_) + if shape_type !== nothing + desc["shape_type"] = Base.identity(shape_type) + end + (tf.execute(desc))[1] + end +end + + +""" + queue_size_v2(handle) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function queue_size_v2(handle_; name=nothing) + local desc + tf.with_op_name(name, "QueueSizeV2") do + desc = tf.NodeDescription("QueueSizeV2") + handle_ = convert(Tensor{Any}, handle_) + tf.add_input(desc, handle_) + end + tf.Tensor(tf.Operation(desc)) + end + function queue_size_v2(handle_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("QueueSizeV2") + tf.add_input(desc, handle_) + (tf.execute(desc))[1] + end +end + + +""" + expm1(x) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function expm1(x_; name=nothing) + local desc + tf.with_op_name(name, "Expm1") do + desc = tf.NodeDescription("Expm1") + x_ = convert(Tensor{Any}, x_) + (x_,) = tf.tf_promote(x_) + tf.add_input(desc, x_) + end + tf.Tensor(tf.Operation(desc)) + end + function expm1(x_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("Expm1") + tf.add_input(desc, x_) + desc["T"] = tf.data_type(x_) + (tf.execute(desc))[1] + end +end + + +""" + batch_matrix_band_part(input, num_lower, num_upper) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function batch_matrix_band_part(input_, num_lower_, num_upper_; name=nothing) + local desc + tf.with_op_name(name, "BatchMatrixBandPart") do + desc = tf.NodeDescription("BatchMatrixBandPart") + input_ = convert(Tensor{Any}, input_) + num_lower_ = convert(Tensor{Int64}, num_lower_) + num_upper_ = convert(Tensor{Int64}, num_upper_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + tf.add_input(desc, num_lower_) + tf.add_input(desc, num_upper_) + end + tf.Tensor(tf.Operation(desc)) + end + function batch_matrix_band_part(input_::tf.TensorHandle, num_lower_::tf.TensorHandle, num_upper_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("BatchMatrixBandPart") + tf.add_input(desc, input_) + tf.add_input(desc, num_lower_) + tf.add_input(desc, num_upper_) + desc["T"] = tf.data_type(input_) + (tf.execute(desc))[1] + end +end + + +""" + concatenate_dataset(input_dataset, another_dataset) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function concatenate_dataset(input_dataset_, another_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "ConcatenateDataset") do + desc = tf.NodeDescription("ConcatenateDataset") + input_dataset_ = convert(Tensor{Any}, input_dataset_) + another_dataset_ = convert(Tensor{Any}, another_dataset_) + tf.add_input(desc, input_dataset_) + tf.add_input(desc, another_dataset_) + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + tf.Tensor(tf.Operation(desc)) + end + function concatenate_dataset(input_dataset_::tf.TensorHandle, another_dataset_::tf.TensorHandle; name=nothing, output_types=nothing, output_shapes=nothing) + desc = tf.EagerOp("ConcatenateDataset") + tf.add_input(desc, input_dataset_) + tf.add_input(desc, another_dataset_) + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + (tf.execute(desc))[1] + end +end + + +""" + decode_gif(contents) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function decode_gif(contents_; name=nothing) + local desc + tf.with_op_name(name, "DecodeGif") do + desc = tf.NodeDescription("DecodeGif") + contents_ = convert(Tensor{String}, contents_) + tf.add_input(desc, contents_) + end + tf.Tensor(tf.Operation(desc)) + end + function decode_gif(contents_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("DecodeGif") + tf.add_input(desc, contents_) + (tf.execute(desc))[1] + end +end + + +""" + tpu_replicate(inputs, broadcast_inputs, variables, guaranteed_constants; num_cores_per_replica=1, topology=, use_tpu=true, device_assignment=Int64[], host_compute_core=Int64[], padding_map=Int64[], step_marker_location=STEP_MARK_AT_ENTRY) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tpu_replicate(inputs_, broadcast_inputs_, variables_, guaranteed_constants_; name=nothing, computation=nothing, num_replicas=nothing, num_cores_per_replica=nothing, topology=nothing, use_tpu=nothing, device_assignment=nothing, host_compute_core=nothing, Tinputs=nothing, Tbroadcast_inputs=nothing, NumVariables=nothing, Tguaranteed_constants=nothing, output_types=nothing, padding_map=nothing, step_marker_location=nothing) + local desc + tf.with_op_name(name, "TPUReplicate") do + desc = tf.NodeDescription("TPUReplicate") + inputs_ = [convert(Tensor{Any}, x) for x = inputs_] + broadcast_inputs_ = [convert(Tensor{Any}, x) for x = broadcast_inputs_] + variables_ = [convert(Tensor{Any}, x) for x = variables_] + guaranteed_constants_ = [convert(Tensor{Any}, x) for x = guaranteed_constants_] + tf.add_input(desc, inputs_) + tf.add_input(desc, broadcast_inputs_) + tf.add_input(desc, variables_) + tf.add_input(desc, guaranteed_constants_) + if computation !== nothing + desc["computation"] = Base.identity(computation) + end + if num_replicas !== nothing + desc["num_replicas"] = Base.Int(num_replicas) + end + if num_cores_per_replica !== nothing + desc["num_cores_per_replica"] = Base.Int(num_cores_per_replica) + end + if topology !== nothing + desc["topology"] = Base.String(topology) + end + if use_tpu !== nothing + desc["use_tpu"] = Base.Bool(use_tpu) + end + if device_assignment !== nothing + desc["device_assignment"] = map(Base.identity, device_assignment) + end + if host_compute_core !== nothing + desc["host_compute_core"] = map(Base.identity, host_compute_core) + end + if Tinputs !== nothing + desc["Tinputs"] = map(Base.identity, Tinputs) + end + if Tbroadcast_inputs !== nothing + desc["Tbroadcast_inputs"] = map(Base.identity, Tbroadcast_inputs) + end + if NumVariables !== nothing + desc["NumVariables"] = Base.Int(NumVariables) + end + if Tguaranteed_constants !== nothing + desc["Tguaranteed_constants"] = map(Base.identity, Tguaranteed_constants) + end + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if padding_map !== nothing + desc["padding_map"] = map(Base.identity, padding_map) + end + if step_marker_location !== nothing + desc["step_marker_location"] = Base.String(step_marker_location) + end + end + tf.Tensor(tf.Operation(desc)) + end + function tpu_replicate(inputs_::tf.TensorHandle, broadcast_inputs_::tf.TensorHandle, variables_::tf.TensorHandle, guaranteed_constants_::tf.TensorHandle; name=nothing, computation=nothing, num_replicas=nothing, num_cores_per_replica=nothing, topology=nothing, use_tpu=nothing, device_assignment=nothing, host_compute_core=nothing, Tinputs=nothing, Tbroadcast_inputs=nothing, NumVariables=nothing, Tguaranteed_constants=nothing, output_types=nothing, padding_map=nothing, step_marker_location=nothing) + desc = tf.EagerOp("TPUReplicate") + tf.add_input(desc, inputs_) + tf.add_input(desc, broadcast_inputs_) + tf.add_input(desc, variables_) + tf.add_input(desc, guaranteed_constants_) + if computation !== nothing + desc["computation"] = Base.identity(computation) + end + if num_replicas !== nothing + desc["num_replicas"] = Base.Int(num_replicas) + end + if num_cores_per_replica !== nothing + desc["num_cores_per_replica"] = Base.Int(num_cores_per_replica) + end + if topology !== nothing + desc["topology"] = Base.String(topology) + end + if use_tpu !== nothing + desc["use_tpu"] = Base.Bool(use_tpu) + end + if device_assignment !== nothing + desc["device_assignment"] = map(Base.identity, device_assignment) + end + if host_compute_core !== nothing + desc["host_compute_core"] = map(Base.identity, host_compute_core) + end + if Tinputs !== nothing + desc["Tinputs"] = map(Base.identity, Tinputs) + end + if Tbroadcast_inputs !== nothing + desc["Tbroadcast_inputs"] = map(Base.identity, Tbroadcast_inputs) + end + if NumVariables !== nothing + desc["NumVariables"] = Base.Int(NumVariables) + end + if Tguaranteed_constants !== nothing + desc["Tguaranteed_constants"] = map(Base.identity, Tguaranteed_constants) + end + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if padding_map !== nothing + desc["padding_map"] = map(Base.identity, padding_map) + end + if step_marker_location !== nothing + desc["step_marker_location"] = Base.String(step_marker_location) + end + (tf.execute(desc))[1] + end +end + + +""" + batch_self_adjoint_eig_v2(input; compute_v=true) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function batch_self_adjoint_eig_v2(input_; name=nothing, compute_v=nothing) + local desc + tf.with_op_name(name, "BatchSelfAdjointEigV2") do + desc = tf.NodeDescription("BatchSelfAdjointEigV2") + input_ = convert(Tensor{Any}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + if compute_v !== nothing + desc["compute_v"] = Base.Bool(compute_v) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function batch_self_adjoint_eig_v2(input_::tf.TensorHandle; name=nothing, compute_v=nothing) + desc = tf.EagerOp("BatchSelfAdjointEigV2") + tf.add_input(desc, input_) + if compute_v !== nothing + desc["compute_v"] = Base.Bool(compute_v) + end + desc["T"] = tf.data_type(input_) + tf.execute(desc) + end +end + + +""" + shape(input; out_type=Int32) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function shape(input_; name=nothing, out_type=nothing) + local desc + tf.with_op_name(name, "Shape") do + desc = tf.NodeDescription("Shape") + input_ = convert(Tensor{Any}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + if out_type !== nothing + desc["out_type"] = Base.identity(out_type) + end + end + tf.Tensor(tf.Operation(desc)) + end + function shape(input_::tf.TensorHandle; name=nothing, out_type=nothing) + desc = tf.EagerOp("Shape") + tf.add_input(desc, input_) + if out_type !== nothing + desc["out_type"] = Base.identity(out_type) + end + desc["T"] = tf.data_type(input_) + (tf.execute(desc))[1] + end +end + + +""" + repeat_dataset(input_dataset, count) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function repeat_dataset(input_dataset_, count_; name=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "RepeatDataset") do + desc = tf.NodeDescription("RepeatDataset") + input_dataset_ = convert(Tensor{Any}, input_dataset_) + count_ = convert(Tensor{Int64}, count_) + tf.add_input(desc, input_dataset_) + tf.add_input(desc, count_) + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + tf.Tensor(tf.Operation(desc)) + end + function repeat_dataset(input_dataset_::tf.TensorHandle, count_::tf.TensorHandle; name=nothing, output_types=nothing, output_shapes=nothing) + desc = tf.EagerOp("RepeatDataset") + tf.add_input(desc, input_dataset_) + tf.add_input(desc, count_) + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + (tf.execute(desc))[1] + end +end + + +""" + crop_and_resize_grad_boxes(grads, image, boxes, box_ind; method=bilinear) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function crop_and_resize_grad_boxes(grads_, image_, boxes_, box_ind_; name=nothing, method=nothing) + local desc + tf.with_op_name(name, "CropAndResizeGradBoxes") do + desc = tf.NodeDescription("CropAndResizeGradBoxes") + grads_ = convert(Tensor{Float32}, grads_) + image_ = convert(Tensor{Any}, image_) + boxes_ = convert(Tensor{Float32}, boxes_) + box_ind_ = convert(Tensor{Int32}, box_ind_) + (image_,) = tf.tf_promote(image_) + tf.add_input(desc, grads_) + tf.add_input(desc, image_) + tf.add_input(desc, boxes_) + tf.add_input(desc, box_ind_) + if method !== nothing + desc["method"] = Base.String(method) + end + end + tf.Tensor(tf.Operation(desc)) + end + function crop_and_resize_grad_boxes(grads_::tf.TensorHandle, image_::tf.TensorHandle, boxes_::tf.TensorHandle, box_ind_::tf.TensorHandle; name=nothing, method=nothing) + desc = tf.EagerOp("CropAndResizeGradBoxes") + tf.add_input(desc, grads_) + tf.add_input(desc, image_) + tf.add_input(desc, boxes_) + tf.add_input(desc, box_ind_) + if method !== nothing + desc["method"] = Base.String(method) + end + desc["T"] = tf.data_type(image_) + (tf.execute(desc))[1] + end +end + + +""" + reciprocal_grad(y, dy) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function reciprocal_grad(y_, dy_; name=nothing) + local desc + tf.with_op_name(name, "ReciprocalGrad") do + desc = tf.NodeDescription("ReciprocalGrad") + y_ = convert(Tensor{Any}, y_) + dy_ = convert(Tensor{Any}, dy_) + (y_, dy_) = tf.tf_promote(y_, dy_) + tf.add_input(desc, y_) + tf.add_input(desc, dy_) + end + tf.Tensor(tf.Operation(desc)) + end + function reciprocal_grad(y_::tf.TensorHandle, dy_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("ReciprocalGrad") + tf.add_input(desc, y_) + tf.add_input(desc, dy_) + desc["T"] = tf.data_type(y_) + desc["T"] = tf.data_type(dy_) + (tf.execute(desc))[1] + end +end + + +""" + batch_matrix_solve(matrix, rhs; adjoint=false) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function batch_matrix_solve(matrix_, rhs_; name=nothing, adjoint=nothing) + local desc + tf.with_op_name(name, "BatchMatrixSolve") do + desc = tf.NodeDescription("BatchMatrixSolve") + matrix_ = convert(Tensor{Any}, matrix_) + rhs_ = convert(Tensor{Any}, rhs_) + (matrix_, rhs_) = tf.tf_promote(matrix_, rhs_) + tf.add_input(desc, matrix_) + tf.add_input(desc, rhs_) + if adjoint !== nothing + desc["adjoint"] = Base.Bool(adjoint) + end + end + tf.Tensor(tf.Operation(desc)) + end + function batch_matrix_solve(matrix_::tf.TensorHandle, rhs_::tf.TensorHandle; name=nothing, adjoint=nothing) + desc = tf.EagerOp("BatchMatrixSolve") + tf.add_input(desc, matrix_) + tf.add_input(desc, rhs_) + if adjoint !== nothing + desc["adjoint"] = Base.Bool(adjoint) + end + desc["T"] = tf.data_type(matrix_) + desc["T"] = tf.data_type(rhs_) + (tf.execute(desc))[1] + end +end + + +""" + mutable_hash_table_v2(; container=, shared_name=, use_node_name_sharing=false) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function mutable_hash_table_v2(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing) + local desc + tf.with_op_name(name, "MutableHashTableV2") do + desc = tf.NodeDescription("MutableHashTableV2") + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + if use_node_name_sharing !== nothing + desc["use_node_name_sharing"] = Base.Bool(use_node_name_sharing) + end + if key_dtype !== nothing + desc["key_dtype"] = Base.identity(key_dtype) + end + if value_dtype !== nothing + desc["value_dtype"] = Base.identity(value_dtype) + end + end + tf.Tensor(tf.Operation(desc)) + end + function mutable_hash_table_v2(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing) + desc = tf.EagerOp("MutableHashTableV2") + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + if use_node_name_sharing !== nothing + desc["use_node_name_sharing"] = Base.Bool(use_node_name_sharing) + end + if key_dtype !== nothing + desc["key_dtype"] = Base.identity(key_dtype) + end + if value_dtype !== nothing + desc["value_dtype"] = Base.identity(value_dtype) + end + (tf.execute(desc))[1] + end +end + + +""" + exit(data) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function exit(data_; name=nothing) + local desc + tf.with_op_name(name, "Exit") do + desc = tf.NodeDescription("Exit") + data_ = convert(Tensor{Any}, data_) + (data_,) = tf.tf_promote(data_) + tf.add_input(desc, data_) + end + tf.Tensor(tf.Operation(desc)) + end + function exit(data_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("Exit") + tf.add_input(desc, data_) + desc["T"] = tf.data_type(data_) + (tf.execute(desc))[1] + end +end + + +""" + lrn(input; depth_radius=5, bias=?, alpha=?, beta=?) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function lrn(input_; name=nothing, depth_radius=nothing, bias=nothing, alpha=nothing, beta=nothing) + local desc + tf.with_op_name(name, "LRN") do + desc = tf.NodeDescription("LRN") + input_ = convert(Tensor{Float32}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + if depth_radius !== nothing + desc["depth_radius"] = Base.Int(depth_radius) + end + if bias !== nothing + desc["bias"] = Base.identity(bias) + end + if alpha !== nothing + desc["alpha"] = Base.identity(alpha) + end + if beta !== nothing + desc["beta"] = Base.identity(beta) + end + end + tf.Tensor(tf.Operation(desc)) + end + function lrn(input_::tf.TensorHandle; name=nothing, depth_radius=nothing, bias=nothing, alpha=nothing, beta=nothing) + desc = tf.EagerOp("LRN") + tf.add_input(desc, input_) + if depth_radius !== nothing + desc["depth_radius"] = Base.Int(depth_radius) + end + if bias !== nothing + desc["bias"] = Base.identity(bias) + end + if alpha !== nothing + desc["alpha"] = Base.identity(alpha) + end + if beta !== nothing + desc["beta"] = Base.identity(beta) + end + desc["T"] = tf.data_type(input_) + (tf.execute(desc))[1] + end +end + + +""" + stateless_if(cond, input) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function stateless_if(cond_, input_; name=nothing, Tin=nothing, Tout=nothing, then_branch=nothing, else_branch=nothing) + local desc + tf.with_op_name(name, "StatelessIf") do + desc = tf.NodeDescription("StatelessIf") + cond_ = convert(Tensor{Any}, cond_) + input_ = [convert(Tensor{Any}, x) for x = input_] + (cond_,) = tf.tf_promote(cond_) + tf.add_input(desc, cond_) + tf.add_input(desc, input_) + if Tin !== nothing + desc["Tin"] = map(Base.identity, Tin) + end + if Tout !== nothing + desc["Tout"] = map(Base.identity, Tout) + end + if then_branch !== nothing + desc["then_branch"] = Base.identity(then_branch) + end + if else_branch !== nothing + desc["else_branch"] = Base.identity(else_branch) + end + end + tf.Tensor(tf.Operation(desc)) + end + function stateless_if(cond_::tf.TensorHandle, input_::tf.TensorHandle; name=nothing, Tin=nothing, Tout=nothing, then_branch=nothing, else_branch=nothing) + desc = tf.EagerOp("StatelessIf") + tf.add_input(desc, cond_) + tf.add_input(desc, input_) + if Tin !== nothing + desc["Tin"] = map(Base.identity, Tin) + end + if Tout !== nothing + desc["Tout"] = map(Base.identity, Tout) + end + if then_branch !== nothing + desc["then_branch"] = Base.identity(then_branch) + end + if else_branch !== nothing + desc["else_branch"] = Base.identity(else_branch) + end + desc["Tcond"] = tf.data_type(cond_) + (tf.execute(desc))[1] + end +end + + +""" + tensor_list_set_item(input_handle, index, item) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tensor_list_set_item(input_handle_, index_, item_; name=nothing, element_dtype=nothing) + local desc + tf.with_op_name(name, "TensorListSetItem") do + desc = tf.NodeDescription("TensorListSetItem") + input_handle_ = convert(Tensor{Any}, input_handle_) + index_ = convert(Tensor{Int32}, index_) + item_ = convert(Tensor{Any}, item_) + (item_,) = tf.tf_promote(item_) + tf.add_input(desc, input_handle_) + tf.add_input(desc, index_) + tf.add_input(desc, item_) + if element_dtype !== nothing + desc["element_dtype"] = Base.identity(element_dtype) + end + end + tf.Tensor(tf.Operation(desc)) + end + function tensor_list_set_item(input_handle_::tf.TensorHandle, index_::tf.TensorHandle, item_::tf.TensorHandle; name=nothing, element_dtype=nothing) + desc = tf.EagerOp("TensorListSetItem") + tf.add_input(desc, input_handle_) + tf.add_input(desc, index_) + tf.add_input(desc, item_) + if element_dtype !== nothing + desc["element_dtype"] = Base.identity(element_dtype) + end + desc["element_dtype"] = tf.data_type(item_) + (tf.execute(desc))[1] + end +end + + +""" + rsqrt(x) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function rsqrt(x_; name=nothing) + local desc + tf.with_op_name(name, "Rsqrt") do + desc = tf.NodeDescription("Rsqrt") + x_ = convert(Tensor{Any}, x_) + (x_,) = tf.tf_promote(x_) + tf.add_input(desc, x_) + end + tf.Tensor(tf.Operation(desc)) + end + function rsqrt(x_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("Rsqrt") + tf.add_input(desc, x_) + desc["T"] = tf.data_type(x_) + (tf.execute(desc))[1] + end +end + + +""" + quantized_conv2d_with_bias_sum_and_relu_and_requantize(input, filter, bias, min_input, max_input, min_filter, max_filter, min_freezed_output, max_freezed_output, summand, min_summand, max_summand; out_type=Float32, dilations=[1, 1, 1, 1]) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function quantized_conv2d_with_bias_sum_and_relu_and_requantize(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_, summand_, min_summand_, max_summand_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) + local desc + tf.with_op_name(name, "QuantizedConv2DWithBiasSumAndReluAndRequantize") do + desc = tf.NodeDescription("QuantizedConv2DWithBiasSumAndReluAndRequantize") + input_ = convert(Tensor{Any}, input_) + filter_ = convert(Tensor{Any}, filter_) + bias_ = convert(Tensor{Any}, bias_) + min_input_ = convert(Tensor{Float32}, min_input_) + max_input_ = convert(Tensor{Float32}, max_input_) + min_filter_ = convert(Tensor{Float32}, min_filter_) + max_filter_ = convert(Tensor{Float32}, max_filter_) + min_freezed_output_ = convert(Tensor{Float32}, min_freezed_output_) + max_freezed_output_ = convert(Tensor{Float32}, max_freezed_output_) + summand_ = convert(Tensor{Any}, summand_) + min_summand_ = convert(Tensor{Float32}, min_summand_) + max_summand_ = convert(Tensor{Float32}, max_summand_) + (summand_,) = tf.tf_promote(summand_) + (filter_,) = tf.tf_promote(filter_) + (input_,) = tf.tf_promote(input_) + (bias_,) = tf.tf_promote(bias_) + tf.add_input(desc, input_) + tf.add_input(desc, filter_) + tf.add_input(desc, bias_) + tf.add_input(desc, min_input_) + tf.add_input(desc, max_input_) + tf.add_input(desc, min_filter_) + tf.add_input(desc, max_filter_) + tf.add_input(desc, min_freezed_output_) + tf.add_input(desc, max_freezed_output_) + tf.add_input(desc, summand_) + tf.add_input(desc, min_summand_) + tf.add_input(desc, max_summand_) + if out_type !== nothing + desc["out_type"] = Base.identity(out_type) + end + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + if padding !== nothing + desc["padding"] = Base.String(padding) + end + if dilations !== nothing + desc["dilations"] = map(Base.identity, dilations) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function quantized_conv2d_with_bias_sum_and_relu_and_requantize(input_::tf.TensorHandle, filter_::tf.TensorHandle, bias_::tf.TensorHandle, min_input_::tf.TensorHandle, max_input_::tf.TensorHandle, min_filter_::tf.TensorHandle, max_filter_::tf.TensorHandle, min_freezed_output_::tf.TensorHandle, max_freezed_output_::tf.TensorHandle, summand_::tf.TensorHandle, min_summand_::tf.TensorHandle, max_summand_::tf.TensorHandle; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) + desc = tf.EagerOp("QuantizedConv2DWithBiasSumAndReluAndRequantize") + tf.add_input(desc, input_) + tf.add_input(desc, filter_) + tf.add_input(desc, bias_) + tf.add_input(desc, min_input_) + tf.add_input(desc, max_input_) + tf.add_input(desc, min_filter_) + tf.add_input(desc, max_filter_) + tf.add_input(desc, min_freezed_output_) + tf.add_input(desc, max_freezed_output_) + tf.add_input(desc, summand_) + tf.add_input(desc, min_summand_) + tf.add_input(desc, max_summand_) + if out_type !== nothing + desc["out_type"] = Base.identity(out_type) + end + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + if padding !== nothing + desc["padding"] = Base.String(padding) + end + if dilations !== nothing + desc["dilations"] = map(Base.identity, dilations) + end + desc["Tinput"] = tf.data_type(input_) + desc["Tfilter"] = tf.data_type(filter_) + desc["Tbias"] = tf.data_type(bias_) + desc["Tsummand"] = tf.data_type(summand_) + tf.execute(desc) + end +end + + +""" + delete_session_tensor(handle) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function delete_session_tensor(handle_; name=nothing) + local desc + tf.with_op_name(name, "DeleteSessionTensor") do + desc = tf.NodeDescription("DeleteSessionTensor") + handle_ = convert(Tensor{String}, handle_) + tf.add_input(desc, handle_) + end + tf.Tensor(tf.Operation(desc)) + end + function delete_session_tensor(handle_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("DeleteSessionTensor") + tf.add_input(desc, handle_) + (tf.execute(desc))[1] + end +end + + +""" + one_hot(indices, depth, on_value, off_value; axis=-1) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function one_hot(indices_, depth_, on_value_, off_value_; name=nothing, axis=nothing) + local desc + tf.with_op_name(name, "OneHot") do + desc = tf.NodeDescription("OneHot") + indices_ = convert(Tensor{Int64}, indices_) + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + depth_ = convert(Tensor{Int32}, depth_) + on_value_ = convert(Tensor{Any}, on_value_) + off_value_ = convert(Tensor{Any}, off_value_) + (on_value_, off_value_) = tf.tf_promote(on_value_, off_value_) + (indices_,) = tf.tf_promote(indices_) + tf.add_input(desc, indices_) + tf.add_input(desc, depth_) + tf.add_input(desc, on_value_) + tf.add_input(desc, off_value_) + if axis !== nothing + axis = Base.Int(axis) - 1 + end + if axis !== nothing + desc["axis"] = Base.Int(axis) + end + end + tf.Tensor(tf.Operation(desc)) + end + function one_hot(indices_::tf.TensorHandle, depth_::tf.TensorHandle, on_value_::tf.TensorHandle, off_value_::tf.TensorHandle; name=nothing, axis=nothing) + desc = tf.EagerOp("OneHot") + tf.add_input(desc, indices_) + tf.add_input(desc, depth_) + tf.add_input(desc, on_value_) + tf.add_input(desc, off_value_) + if axis !== nothing + axis = Base.Int(axis) - 1 + end + if axis !== nothing + desc["axis"] = Base.Int(axis) + end + desc["TI"] = tf.data_type(indices_) + desc["T"] = tf.data_type(on_value_) + desc["T"] = tf.data_type(off_value_) + (tf.execute(desc))[1] + end +end + + +""" + resource_apply_ftrl(var, accum, linear, grad, lr, l1, l2, lr_power; use_locking=false) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function resource_apply_ftrl(var_, accum_, linear_, grad_, lr_, l1_, l2_, lr_power_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "ResourceApplyFtrl") do + desc = tf.NodeDescription("ResourceApplyFtrl") + var_ = convert(Tensor{Any}, var_) + accum_ = convert(Tensor{Any}, accum_) + linear_ = convert(Tensor{Any}, linear_) + grad_ = convert(Tensor{Any}, grad_) + lr_ = convert(Tensor{Any}, lr_) + l1_ = convert(Tensor{Any}, l1_) + l2_ = convert(Tensor{Any}, l2_) + lr_power_ = convert(Tensor{Any}, lr_power_) + (grad_, lr_, l1_, l2_, lr_power_) = tf.tf_promote(grad_, lr_, l1_, l2_, lr_power_) + tf.add_input(desc, var_) + tf.add_input(desc, accum_) + tf.add_input(desc, linear_) + tf.add_input(desc, grad_) + tf.add_input(desc, lr_) + tf.add_input(desc, l1_) + tf.add_input(desc, l2_) + tf.add_input(desc, lr_power_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + tf.Tensor(tf.Operation(desc)) + end + function resource_apply_ftrl(var_::tf.TensorHandle, accum_::tf.TensorHandle, linear_::tf.TensorHandle, grad_::tf.TensorHandle, lr_::tf.TensorHandle, l1_::tf.TensorHandle, l2_::tf.TensorHandle, lr_power_::tf.TensorHandle; name=nothing, use_locking=nothing) + desc = tf.EagerOp("ResourceApplyFtrl") + tf.add_input(desc, var_) + tf.add_input(desc, accum_) + tf.add_input(desc, linear_) + tf.add_input(desc, grad_) + tf.add_input(desc, lr_) + tf.add_input(desc, l1_) + tf.add_input(desc, l2_) + tf.add_input(desc, lr_power_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + desc["T"] = tf.data_type(grad_) + desc["T"] = tf.data_type(lr_) + desc["T"] = tf.data_type(l1_) + desc["T"] = tf.data_type(l2_) + desc["T"] = tf.data_type(lr_power_) + (tf.execute(desc))[1] + end +end + + +""" + sdca_optimizer_v2(sparse_example_indices, sparse_feature_indices, sparse_feature_values, dense_features, example_weights, example_labels, sparse_indices, sparse_weights, dense_weights, example_state_data; adaptive=false) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function sdca_optimizer_v2(sparse_example_indices_, sparse_feature_indices_, sparse_feature_values_, dense_features_, example_weights_, example_labels_, sparse_indices_, sparse_weights_, dense_weights_, example_state_data_; name=nothing, loss_type=nothing, adaptive=nothing, num_sparse_features=nothing, num_sparse_features_with_values=nothing, num_dense_features=nothing, l1=nothing, l2=nothing, num_loss_partitions=nothing, num_inner_iterations=nothing) + local desc + tf.with_op_name(name, "SdcaOptimizerV2") do + desc = tf.NodeDescription("SdcaOptimizerV2") + sparse_example_indices_ = [convert(Tensor{Int64}, x) for x = sparse_example_indices_] + sparse_feature_indices_ = [convert(Tensor{Int64}, x) for x = sparse_feature_indices_] + sparse_feature_values_ = [convert(Tensor{Float32}, x) for x = sparse_feature_values_] + dense_features_ = [convert(Tensor{Float32}, x) for x = dense_features_] + example_weights_ = convert(Tensor{Float32}, example_weights_) + example_labels_ = convert(Tensor{Float32}, example_labels_) + sparse_indices_ = [convert(Tensor{Int64}, x) for x = sparse_indices_] + sparse_weights_ = [convert(Tensor{Float32}, x) for x = sparse_weights_] + dense_weights_ = [convert(Tensor{Float32}, x) for x = dense_weights_] + example_state_data_ = convert(Tensor{Float32}, example_state_data_) + tf.add_input(desc, sparse_example_indices_) + tf.add_input(desc, sparse_feature_indices_) + tf.add_input(desc, sparse_feature_values_) + tf.add_input(desc, dense_features_) + tf.add_input(desc, example_weights_) + tf.add_input(desc, example_labels_) + tf.add_input(desc, sparse_indices_) + tf.add_input(desc, sparse_weights_) + tf.add_input(desc, dense_weights_) + tf.add_input(desc, example_state_data_) + if loss_type !== nothing + desc["loss_type"] = Base.String(loss_type) + end + if adaptive !== nothing + desc["adaptive"] = Base.Bool(adaptive) + end + if num_sparse_features !== nothing + desc["num_sparse_features"] = Base.Int(num_sparse_features) + end + if num_sparse_features_with_values !== nothing + desc["num_sparse_features_with_values"] = Base.Int(num_sparse_features_with_values) + end + if num_dense_features !== nothing + desc["num_dense_features"] = Base.Int(num_dense_features) + end + if l1 !== nothing + desc["l1"] = Base.identity(l1) + end + if l2 !== nothing + desc["l2"] = Base.identity(l2) + end + if num_loss_partitions !== nothing + desc["num_loss_partitions"] = Base.Int(num_loss_partitions) + end + if num_inner_iterations !== nothing + desc["num_inner_iterations"] = Base.Int(num_inner_iterations) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function sdca_optimizer_v2(sparse_example_indices_::tf.TensorHandle, sparse_feature_indices_::tf.TensorHandle, sparse_feature_values_::tf.TensorHandle, dense_features_::tf.TensorHandle, example_weights_::tf.TensorHandle, example_labels_::tf.TensorHandle, sparse_indices_::tf.TensorHandle, sparse_weights_::tf.TensorHandle, dense_weights_::tf.TensorHandle, example_state_data_::tf.TensorHandle; name=nothing, loss_type=nothing, adaptive=nothing, num_sparse_features=nothing, num_sparse_features_with_values=nothing, num_dense_features=nothing, l1=nothing, l2=nothing, num_loss_partitions=nothing, num_inner_iterations=nothing) + desc = tf.EagerOp("SdcaOptimizerV2") + tf.add_input(desc, sparse_example_indices_) + tf.add_input(desc, sparse_feature_indices_) + tf.add_input(desc, sparse_feature_values_) + tf.add_input(desc, dense_features_) + tf.add_input(desc, example_weights_) + tf.add_input(desc, example_labels_) + tf.add_input(desc, sparse_indices_) + tf.add_input(desc, sparse_weights_) + tf.add_input(desc, dense_weights_) + tf.add_input(desc, example_state_data_) + if loss_type !== nothing + desc["loss_type"] = Base.String(loss_type) + end + if adaptive !== nothing + desc["adaptive"] = Base.Bool(adaptive) + end + if num_sparse_features !== nothing + desc["num_sparse_features"] = Base.Int(num_sparse_features) + end + if num_sparse_features_with_values !== nothing + desc["num_sparse_features_with_values"] = Base.Int(num_sparse_features_with_values) + end + if num_dense_features !== nothing + desc["num_dense_features"] = Base.Int(num_dense_features) + end + if l1 !== nothing + desc["l1"] = Base.identity(l1) + end + if l2 !== nothing + desc["l2"] = Base.identity(l2) + end + if num_loss_partitions !== nothing + desc["num_loss_partitions"] = Base.Int(num_loss_partitions) + end + if num_inner_iterations !== nothing + desc["num_inner_iterations"] = Base.Int(num_inner_iterations) + end + tf.execute(desc) + end +end + + +""" + queue_enqueue(handle, components; timeout_ms=-1) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function queue_enqueue(handle_, components_; name=nothing, Tcomponents=nothing, timeout_ms=nothing) + local desc + tf.with_op_name(name, "QueueEnqueue") do + desc = tf.NodeDescription("QueueEnqueue") + handle_ = convert(Tensor{String}, handle_) + components_ = [convert(Tensor{Any}, x) for x = components_] + tf.add_input(desc, handle_) + tf.add_input(desc, components_) + if Tcomponents !== nothing + desc["Tcomponents"] = map(Base.identity, Tcomponents) + end + if timeout_ms !== nothing + desc["timeout_ms"] = Base.Int(timeout_ms) + end + end + tf.Tensor(tf.Operation(desc)) + end + function queue_enqueue(handle_::tf.TensorHandle, components_::tf.TensorHandle; name=nothing, Tcomponents=nothing, timeout_ms=nothing) + desc = tf.EagerOp("QueueEnqueue") + tf.add_input(desc, handle_) + tf.add_input(desc, components_) + if Tcomponents !== nothing + desc["Tcomponents"] = map(Base.identity, Tcomponents) + end + if timeout_ms !== nothing + desc["timeout_ms"] = Base.Int(timeout_ms) + end + (tf.execute(desc))[1] + end +end + + +""" + conditional_accumulator(; container=, shared_name=, reduction_type=MEAN) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function conditional_accumulator(; name=nothing, dtype=nothing, shape=nothing, container=nothing, shared_name=nothing, reduction_type=nothing) + local desc + tf.with_op_name(name, "ConditionalAccumulator") do + desc = tf.NodeDescription("ConditionalAccumulator") + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + if shape !== nothing + desc["shape"] = Base.identity(shape) + end + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + if reduction_type !== nothing + desc["reduction_type"] = Base.String(reduction_type) + end + end + tf.Tensor(tf.Operation(desc)) + end + function conditional_accumulator(; name=nothing, dtype=nothing, shape=nothing, container=nothing, shared_name=nothing, reduction_type=nothing) + desc = tf.EagerOp("ConditionalAccumulator") + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + if shape !== nothing + desc["shape"] = Base.identity(shape) + end + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + if reduction_type !== nothing + desc["reduction_type"] = Base.String(reduction_type) + end + (tf.execute(desc))[1] + end +end + + +""" + ctc_beam_search_decoder(inputs, sequence_length; merge_repeated=true) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function ctc_beam_search_decoder(inputs_, sequence_length_; name=nothing, beam_width=nothing, top_paths=nothing, merge_repeated=nothing) + local desc + tf.with_op_name(name, "CTCBeamSearchDecoder") do + desc = tf.NodeDescription("CTCBeamSearchDecoder") + inputs_ = convert(Tensor{Float32}, inputs_) + sequence_length_ = convert(Tensor{Int32}, sequence_length_) + tf.add_input(desc, inputs_) + tf.add_input(desc, sequence_length_) + if beam_width !== nothing + desc["beam_width"] = Base.Int(beam_width) + end + if top_paths !== nothing + desc["top_paths"] = Base.Int(top_paths) + end + if merge_repeated !== nothing + desc["merge_repeated"] = Base.Bool(merge_repeated) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:4 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function ctc_beam_search_decoder(inputs_::tf.TensorHandle, sequence_length_::tf.TensorHandle; name=nothing, beam_width=nothing, top_paths=nothing, merge_repeated=nothing) + desc = tf.EagerOp("CTCBeamSearchDecoder") + tf.add_input(desc, inputs_) + tf.add_input(desc, sequence_length_) + if beam_width !== nothing + desc["beam_width"] = Base.Int(beam_width) + end + if top_paths !== nothing + desc["top_paths"] = Base.Int(top_paths) + end + if merge_repeated !== nothing + desc["merge_repeated"] = Base.Bool(merge_repeated) + end + tf.execute(desc) + end +end + + +""" + whole_file_reader(; container=, shared_name=) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function whole_file_reader(; name=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "WholeFileReader") do + desc = tf.NodeDescription("WholeFileReader") + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + tf.Tensor(tf.Operation(desc)) + end + function whole_file_reader(; name=nothing, container=nothing, shared_name=nothing) + desc = tf.EagerOp("WholeFileReader") + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + (tf.execute(desc))[1] + end +end + + +""" + apply_rms_prop(var, ms, mom, lr, rho, momentum, epsilon, grad; use_locking=false) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function apply_rms_prop(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "ApplyRMSProp") do + desc = tf.NodeDescription("ApplyRMSProp") + var_ = convert(Tensor{Any}, var_) + ms_ = convert(Tensor{Any}, ms_) + mom_ = convert(Tensor{Any}, mom_) + lr_ = convert(Tensor{Any}, lr_) + rho_ = convert(Tensor{Any}, rho_) + momentum_ = convert(Tensor{Any}, momentum_) + epsilon_ = convert(Tensor{Any}, epsilon_) + grad_ = convert(Tensor{Any}, grad_) + (var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_) = tf.tf_promote(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_) + tf.add_input(desc, var_) + tf.add_input(desc, ms_) + tf.add_input(desc, mom_) + tf.add_input(desc, lr_) + tf.add_input(desc, rho_) + tf.add_input(desc, momentum_) + tf.add_input(desc, epsilon_) + tf.add_input(desc, grad_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + tf.Tensor(tf.Operation(desc)) + end + function apply_rms_prop(var_::tf.TensorHandle, ms_::tf.TensorHandle, mom_::tf.TensorHandle, lr_::tf.TensorHandle, rho_::tf.TensorHandle, momentum_::tf.TensorHandle, epsilon_::tf.TensorHandle, grad_::tf.TensorHandle; name=nothing, use_locking=nothing) + desc = tf.EagerOp("ApplyRMSProp") + tf.add_input(desc, var_) + tf.add_input(desc, ms_) + tf.add_input(desc, mom_) + tf.add_input(desc, lr_) + tf.add_input(desc, rho_) + tf.add_input(desc, momentum_) + tf.add_input(desc, epsilon_) + tf.add_input(desc, grad_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + desc["T"] = tf.data_type(var_) + desc["T"] = tf.data_type(ms_) + desc["T"] = tf.data_type(mom_) + desc["T"] = tf.data_type(lr_) + desc["T"] = tf.data_type(rho_) + desc["T"] = tf.data_type(momentum_) + desc["T"] = tf.data_type(epsilon_) + desc["T"] = tf.data_type(grad_) + (tf.execute(desc))[1] + end +end + + +""" + adjust_saturation(images, scale) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function adjust_saturation(images_, scale_; name=nothing) + local desc + tf.with_op_name(name, "AdjustSaturation") do + desc = tf.NodeDescription("AdjustSaturation") + images_ = convert(Tensor{Float32}, images_) + scale_ = convert(Tensor{Float32}, scale_) + (images_,) = tf.tf_promote(images_) + tf.add_input(desc, images_) + tf.add_input(desc, scale_) + end + tf.Tensor(tf.Operation(desc)) + end + function adjust_saturation(images_::tf.TensorHandle, scale_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("AdjustSaturation") + tf.add_input(desc, images_) + tf.add_input(desc, scale_) + desc["T"] = tf.data_type(images_) + (tf.execute(desc))[1] + end +end + + +""" + lookup_table_remove_v2(table_handle, keys) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function lookup_table_remove_v2(table_handle_, keys_; name=nothing) + local desc + tf.with_op_name(name, "LookupTableRemoveV2") do + desc = tf.NodeDescription("LookupTableRemoveV2") + table_handle_ = convert(Tensor{Any}, table_handle_) + keys_ = convert(Tensor{Any}, keys_) + (keys_,) = tf.tf_promote(keys_) + tf.add_input(desc, table_handle_) + tf.add_input(desc, keys_) + end + tf.Tensor(tf.Operation(desc)) + end + function lookup_table_remove_v2(table_handle_::tf.TensorHandle, keys_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("LookupTableRemoveV2") + tf.add_input(desc, table_handle_) + tf.add_input(desc, keys_) + desc["Tin"] = tf.data_type(keys_) + (tf.execute(desc))[1] + end +end + + +""" + queue_close(handle; cancel_pending_enqueues=false) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function queue_close(handle_; name=nothing, cancel_pending_enqueues=nothing) + local desc + tf.with_op_name(name, "QueueClose") do + desc = tf.NodeDescription("QueueClose") + handle_ = convert(Tensor{String}, handle_) + tf.add_input(desc, handle_) + if cancel_pending_enqueues !== nothing + desc["cancel_pending_enqueues"] = Base.Bool(cancel_pending_enqueues) + end + end + tf.Tensor(tf.Operation(desc)) + end + function queue_close(handle_::tf.TensorHandle; name=nothing, cancel_pending_enqueues=nothing) + desc = tf.EagerOp("QueueClose") + tf.add_input(desc, handle_) + if cancel_pending_enqueues !== nothing + desc["cancel_pending_enqueues"] = Base.Bool(cancel_pending_enqueues) + end + (tf.execute(desc))[1] + end +end + + +""" + prefetch_dataset(input_dataset, buffer_size) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function prefetch_dataset(input_dataset_, buffer_size_; name=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "PrefetchDataset") do + desc = tf.NodeDescription("PrefetchDataset") + input_dataset_ = convert(Tensor{Any}, input_dataset_) + buffer_size_ = convert(Tensor{Int64}, buffer_size_) + tf.add_input(desc, input_dataset_) + tf.add_input(desc, buffer_size_) + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + tf.Tensor(tf.Operation(desc)) + end + function prefetch_dataset(input_dataset_::tf.TensorHandle, buffer_size_::tf.TensorHandle; name=nothing, output_types=nothing, output_shapes=nothing) + desc = tf.EagerOp("PrefetchDataset") + tf.add_input(desc, input_dataset_) + tf.add_input(desc, buffer_size_) + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + (tf.execute(desc))[1] + end +end + + +""" + map_dataset(input_dataset, other_arguments; use_inter_op_parallelism=true, preserve_cardinality=false) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function map_dataset(input_dataset_, other_arguments_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing, preserve_cardinality=nothing) + local desc + tf.with_op_name(name, "MapDataset") do + desc = tf.NodeDescription("MapDataset") + input_dataset_ = convert(Tensor{Any}, input_dataset_) + other_arguments_ = [convert(Tensor{Any}, x) for x = other_arguments_] + tf.add_input(desc, input_dataset_) + tf.add_input(desc, other_arguments_) + if f !== nothing + desc["f"] = Base.identity(f) + end + if Targuments !== nothing + desc["Targuments"] = map(Base.identity, Targuments) + end + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + if use_inter_op_parallelism !== nothing + desc["use_inter_op_parallelism"] = Base.Bool(use_inter_op_parallelism) + end + if preserve_cardinality !== nothing + desc["preserve_cardinality"] = Base.Bool(preserve_cardinality) + end + end + tf.Tensor(tf.Operation(desc)) + end + function map_dataset(input_dataset_::tf.TensorHandle, other_arguments_::tf.TensorHandle; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing, preserve_cardinality=nothing) + desc = tf.EagerOp("MapDataset") + tf.add_input(desc, input_dataset_) + tf.add_input(desc, other_arguments_) + if f !== nothing + desc["f"] = Base.identity(f) + end + if Targuments !== nothing + desc["Targuments"] = map(Base.identity, Targuments) + end + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + if use_inter_op_parallelism !== nothing + desc["use_inter_op_parallelism"] = Base.Bool(use_inter_op_parallelism) + end + if preserve_cardinality !== nothing + desc["preserve_cardinality"] = Base.Bool(preserve_cardinality) + end + (tf.execute(desc))[1] + end +end + + +""" + quantized_conv2d_with_bias(input, filter, bias, min_input, max_input, min_filter, max_filter; out_type=Float32, dilations=[1, 1, 1, 1]) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function quantized_conv2d_with_bias(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) + local desc + tf.with_op_name(name, "QuantizedConv2DWithBias") do + desc = tf.NodeDescription("QuantizedConv2DWithBias") + input_ = convert(Tensor{Any}, input_) + filter_ = convert(Tensor{Any}, filter_) + bias_ = convert(Tensor{Float32}, bias_) + min_input_ = convert(Tensor{Float32}, min_input_) + max_input_ = convert(Tensor{Float32}, max_input_) + min_filter_ = convert(Tensor{Float32}, min_filter_) + max_filter_ = convert(Tensor{Float32}, max_filter_) + (filter_,) = tf.tf_promote(filter_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + tf.add_input(desc, filter_) + tf.add_input(desc, bias_) + tf.add_input(desc, min_input_) + tf.add_input(desc, max_input_) + tf.add_input(desc, min_filter_) + tf.add_input(desc, max_filter_) + if out_type !== nothing + desc["out_type"] = Base.identity(out_type) + end + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + if padding !== nothing + desc["padding"] = Base.String(padding) + end + if dilations !== nothing + desc["dilations"] = map(Base.identity, dilations) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function quantized_conv2d_with_bias(input_::tf.TensorHandle, filter_::tf.TensorHandle, bias_::tf.TensorHandle, min_input_::tf.TensorHandle, max_input_::tf.TensorHandle, min_filter_::tf.TensorHandle, max_filter_::tf.TensorHandle; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) + desc = tf.EagerOp("QuantizedConv2DWithBias") + tf.add_input(desc, input_) + tf.add_input(desc, filter_) + tf.add_input(desc, bias_) + tf.add_input(desc, min_input_) + tf.add_input(desc, max_input_) + tf.add_input(desc, min_filter_) + tf.add_input(desc, max_filter_) + if out_type !== nothing + desc["out_type"] = Base.identity(out_type) + end + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + if padding !== nothing + desc["padding"] = Base.String(padding) + end + if dilations !== nothing + desc["dilations"] = map(Base.identity, dilations) + end + desc["Tinput"] = tf.data_type(input_) + desc["Tfilter"] = tf.data_type(filter_) + tf.execute(desc) + end +end + + +""" + tensor_array_read_v3(handle, index, flow_in) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tensor_array_read_v3(handle_, index_, flow_in_; name=nothing, dtype=nothing) + local desc + tf.with_op_name(name, "TensorArrayReadV3") do + desc = tf.NodeDescription("TensorArrayReadV3") + handle_ = convert(Tensor{Any}, handle_) + index_ = convert(Tensor{Int32}, index_) + flow_in_ = convert(Tensor{Float32}, flow_in_) + tf.add_input(desc, handle_) + tf.add_input(desc, index_) + tf.add_input(desc, flow_in_) + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + tf.Tensor(tf.Operation(desc)) + end + function tensor_array_read_v3(handle_::tf.TensorHandle, index_::tf.TensorHandle, flow_in_::tf.TensorHandle; name=nothing, dtype=nothing) + desc = tf.EagerOp("TensorArrayReadV3") + tf.add_input(desc, handle_) + tf.add_input(desc, index_) + tf.add_input(desc, flow_in_) + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + (tf.execute(desc))[1] + end +end + + +""" + identity(input) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function identity(input_; name=nothing) + local desc + tf.with_op_name(name, "Identity") do + desc = tf.NodeDescription("Identity") + input_ = convert(Tensor{Any}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + end + tf.Tensor(tf.Operation(desc)) + end + function identity(input_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("Identity") + tf.add_input(desc, input_) + desc["T"] = tf.data_type(input_) + (tf.execute(desc))[1] + end +end + + +""" + print(input, data; message=, first_n=-1, summarize=3) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function print(input_, data_; name=nothing, U=nothing, message=nothing, first_n=nothing, summarize=nothing) + local desc + tf.with_op_name(name, "Print") do + desc = tf.NodeDescription("Print") + input_ = convert(Tensor{Any}, input_) + data_ = [convert(Tensor{Any}, x) for x = data_] + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + tf.add_input(desc, data_) + if U !== nothing + desc["U"] = map(Base.identity, U) + end + if message !== nothing + desc["message"] = Base.String(message) + end + if first_n !== nothing + desc["first_n"] = Base.Int(first_n) + end + if summarize !== nothing + desc["summarize"] = Base.Int(summarize) + end + end + tf.Tensor(tf.Operation(desc)) + end + function print(input_::tf.TensorHandle, data_::tf.TensorHandle; name=nothing, U=nothing, message=nothing, first_n=nothing, summarize=nothing) + desc = tf.EagerOp("Print") + tf.add_input(desc, input_) + tf.add_input(desc, data_) + if U !== nothing + desc["U"] = map(Base.identity, U) + end + if message !== nothing + desc["message"] = Base.String(message) + end + if first_n !== nothing + desc["first_n"] = Base.Int(first_n) + end + if summarize !== nothing + desc["summarize"] = Base.Int(summarize) + end + desc["T"] = tf.data_type(input_) + (tf.execute(desc))[1] + end +end + + +""" + collective_bcast_send(input) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function collective_bcast_send(input_; name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, shape=nothing) + local desc + tf.with_op_name(name, "CollectiveBcastSend") do + desc = tf.NodeDescription("CollectiveBcastSend") + input_ = convert(Tensor{Any}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + if group_size !== nothing + desc["group_size"] = Base.Int(group_size) + end + if group_key !== nothing + desc["group_key"] = Base.Int(group_key) + end + if instance_key !== nothing + desc["instance_key"] = Base.Int(instance_key) + end + if shape !== nothing + desc["shape"] = Base.identity(shape) + end + end + tf.Tensor(tf.Operation(desc)) + end + function collective_bcast_send(input_::tf.TensorHandle; name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, shape=nothing) + desc = tf.EagerOp("CollectiveBcastSend") + tf.add_input(desc, input_) + if group_size !== nothing + desc["group_size"] = Base.Int(group_size) + end + if group_key !== nothing + desc["group_key"] = Base.Int(group_key) + end + if instance_key !== nothing + desc["instance_key"] = Base.Int(instance_key) + end + if shape !== nothing + desc["shape"] = Base.identity(shape) + end + desc["T"] = tf.data_type(input_) + (tf.execute(desc))[1] + end +end + + +""" + _list_to_array(input) + +Converts a list of tensors to an array of tensors. +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function _list_to_array(input_; name=nothing, Tin=nothing, N=nothing) + local desc + tf.with_op_name(name, "_ListToArray") do + desc = tf.NodeDescription("_ListToArray") + input_ = [convert(Tensor{Any}, x) for x = input_] + tf.add_input(desc, input_) + if Tin !== nothing + desc["Tin"] = map(Base.identity, Tin) + end + if N !== nothing + desc["N"] = Base.Int(N) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:N + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function _list_to_array(input_::tf.TensorHandle; name=nothing, Tin=nothing, N=nothing) + desc = tf.EagerOp("_ListToArray") + tf.add_input(desc, input_) + if Tin !== nothing + desc["Tin"] = map(Base.identity, Tin) + end + if N !== nothing + desc["N"] = Base.Int(N) + end + tf.execute(desc) + end +end + + +""" + neg_train(w_in, w_out, examples, labels, lr) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function neg_train(w_in_, w_out_, examples_, labels_, lr_; name=nothing, vocab_count=nothing, num_negative_samples=nothing) + local desc + tf.with_op_name(name, "NegTrain") do + desc = tf.NodeDescription("NegTrain") + w_in_ = convert(Tensor{Float32}, w_in_) + w_out_ = convert(Tensor{Float32}, w_out_) + examples_ = convert(Tensor{Int32}, examples_) + labels_ = convert(Tensor{Int32}, labels_) + lr_ = convert(Tensor{Float32}, lr_) + tf.add_input(desc, w_in_) + tf.add_input(desc, w_out_) + tf.add_input(desc, examples_) + tf.add_input(desc, labels_) + tf.add_input(desc, lr_) + if vocab_count !== nothing + desc["vocab_count"] = map(Base.identity, vocab_count) + end + if num_negative_samples !== nothing + desc["num_negative_samples"] = Base.Int(num_negative_samples) + end + end + tf.Tensor(tf.Operation(desc)) + end + function neg_train(w_in_::tf.TensorHandle, w_out_::tf.TensorHandle, examples_::tf.TensorHandle, labels_::tf.TensorHandle, lr_::tf.TensorHandle; name=nothing, vocab_count=nothing, num_negative_samples=nothing) + desc = tf.EagerOp("NegTrain") + tf.add_input(desc, w_in_) + tf.add_input(desc, w_out_) + tf.add_input(desc, examples_) + tf.add_input(desc, labels_) + tf.add_input(desc, lr_) + if vocab_count !== nothing + desc["vocab_count"] = map(Base.identity, vocab_count) + end + if num_negative_samples !== nothing + desc["num_negative_samples"] = Base.Int(num_negative_samples) + end + (tf.execute(desc))[1] + end +end + + +""" + worker_heartbeat(request) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function worker_heartbeat(request_; name=nothing) + local desc + tf.with_op_name(name, "WorkerHeartbeat") do + desc = tf.NodeDescription("WorkerHeartbeat") + request_ = convert(Tensor{String}, request_) + tf.add_input(desc, request_) + end + tf.Tensor(tf.Operation(desc)) + end + function worker_heartbeat(request_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("WorkerHeartbeat") + tf.add_input(desc, request_) + (tf.execute(desc))[1] + end +end + + +""" + merge_v2checkpoints(checkpoint_prefixes, destination_prefix; delete_old_dirs=true) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function merge_v2checkpoints(checkpoint_prefixes_, destination_prefix_; name=nothing, delete_old_dirs=nothing) + local desc + tf.with_op_name(name, "MergeV2Checkpoints") do + desc = tf.NodeDescription("MergeV2Checkpoints") + checkpoint_prefixes_ = convert(Tensor{String}, checkpoint_prefixes_) + destination_prefix_ = convert(Tensor{String}, destination_prefix_) + tf.add_input(desc, checkpoint_prefixes_) + tf.add_input(desc, destination_prefix_) + if delete_old_dirs !== nothing + desc["delete_old_dirs"] = Base.Bool(delete_old_dirs) + end + end + tf.Tensor(tf.Operation(desc)) + end + function merge_v2checkpoints(checkpoint_prefixes_::tf.TensorHandle, destination_prefix_::tf.TensorHandle; name=nothing, delete_old_dirs=nothing) + desc = tf.EagerOp("MergeV2Checkpoints") + tf.add_input(desc, checkpoint_prefixes_) + tf.add_input(desc, destination_prefix_) + if delete_old_dirs !== nothing + desc["delete_old_dirs"] = Base.Bool(delete_old_dirs) + end + (tf.execute(desc))[1] + end +end + + +""" + collective_permute(input, source_target_pairs) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function collective_permute(input_, source_target_pairs_; name=nothing) + local desc + tf.with_op_name(name, "CollectivePermute") do + desc = tf.NodeDescription("CollectivePermute") + input_ = convert(Tensor{Any}, input_) + source_target_pairs_ = convert(Tensor{Int32}, source_target_pairs_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + tf.add_input(desc, source_target_pairs_) + end + tf.Tensor(tf.Operation(desc)) + end + function collective_permute(input_::tf.TensorHandle, source_target_pairs_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("CollectivePermute") + tf.add_input(desc, input_) + tf.add_input(desc, source_target_pairs_) + desc["T"] = tf.data_type(input_) + (tf.execute(desc))[1] + end +end + + +""" + quantize_and_dequantize_v3(input, input_min, input_max, num_bits; signed_input=true, range_given=true) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function quantize_and_dequantize_v3(input_, input_min_, input_max_, num_bits_; name=nothing, signed_input=nothing, range_given=nothing) + local desc + tf.with_op_name(name, "QuantizeAndDequantizeV3") do + desc = tf.NodeDescription("QuantizeAndDequantizeV3") + input_ = convert(Tensor{Any}, input_) + input_min_ = convert(Tensor{Any}, input_min_) + input_max_ = convert(Tensor{Any}, input_max_) + num_bits_ = convert(Tensor{Int32}, num_bits_) + (input_, input_min_, input_max_) = tf.tf_promote(input_, input_min_, input_max_) + tf.add_input(desc, input_) + tf.add_input(desc, input_min_) + tf.add_input(desc, input_max_) + tf.add_input(desc, num_bits_) + if signed_input !== nothing + desc["signed_input"] = Base.Bool(signed_input) + end + if range_given !== nothing + desc["range_given"] = Base.Bool(range_given) + end + end + tf.Tensor(tf.Operation(desc)) + end + function quantize_and_dequantize_v3(input_::tf.TensorHandle, input_min_::tf.TensorHandle, input_max_::tf.TensorHandle, num_bits_::tf.TensorHandle; name=nothing, signed_input=nothing, range_given=nothing) + desc = tf.EagerOp("QuantizeAndDequantizeV3") + tf.add_input(desc, input_) + tf.add_input(desc, input_min_) + tf.add_input(desc, input_max_) + tf.add_input(desc, num_bits_) + if signed_input !== nothing + desc["signed_input"] = Base.Bool(signed_input) + end + if range_given !== nothing + desc["range_given"] = Base.Bool(range_given) + end + desc["T"] = tf.data_type(input_) + desc["T"] = tf.data_type(input_min_) + desc["T"] = tf.data_type(input_max_) + (tf.execute(desc))[1] + end +end + + +""" + hash_table(; container=, shared_name=, use_node_name_sharing=false) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function hash_table(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing) + local desc + tf.with_op_name(name, "HashTable") do + desc = tf.NodeDescription("HashTable") + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + if use_node_name_sharing !== nothing + desc["use_node_name_sharing"] = Base.Bool(use_node_name_sharing) + end + if key_dtype !== nothing + desc["key_dtype"] = Base.identity(key_dtype) + end + if value_dtype !== nothing + desc["value_dtype"] = Base.identity(value_dtype) + end + end + tf.Tensor(tf.Operation(desc)) + end + function hash_table(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing) + desc = tf.EagerOp("HashTable") + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + if use_node_name_sharing !== nothing + desc["use_node_name_sharing"] = Base.Bool(use_node_name_sharing) + end + if key_dtype !== nothing + desc["key_dtype"] = Base.identity(key_dtype) + end + if value_dtype !== nothing + desc["value_dtype"] = Base.identity(value_dtype) + end + (tf.execute(desc))[1] + end +end + + +""" + softplus_grad(gradients, features) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function softplus_grad(gradients_, features_; name=nothing) + local desc + tf.with_op_name(name, "SoftplusGrad") do + desc = tf.NodeDescription("SoftplusGrad") + gradients_ = convert(Tensor{Any}, gradients_) + features_ = convert(Tensor{Any}, features_) + (gradients_, features_) = tf.tf_promote(gradients_, features_) + tf.add_input(desc, gradients_) + tf.add_input(desc, features_) + end + tf.Tensor(tf.Operation(desc)) + end + function softplus_grad(gradients_::tf.TensorHandle, features_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("SoftplusGrad") + tf.add_input(desc, gradients_) + tf.add_input(desc, features_) + desc["T"] = tf.data_type(gradients_) + desc["T"] = tf.data_type(features_) + (tf.execute(desc))[1] + end +end + + +""" + fixed_length_record_reader(; header_bytes=0, footer_bytes=0, hop_bytes=0, container=, shared_name=) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function fixed_length_record_reader(; name=nothing, header_bytes=nothing, record_bytes=nothing, footer_bytes=nothing, hop_bytes=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "FixedLengthRecordReader") do + desc = tf.NodeDescription("FixedLengthRecordReader") + if header_bytes !== nothing + desc["header_bytes"] = Base.Int(header_bytes) + end + if record_bytes !== nothing + desc["record_bytes"] = Base.Int(record_bytes) + end + if footer_bytes !== nothing + desc["footer_bytes"] = Base.Int(footer_bytes) + end + if hop_bytes !== nothing + desc["hop_bytes"] = Base.Int(hop_bytes) + end + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + tf.Tensor(tf.Operation(desc)) + end + function fixed_length_record_reader(; name=nothing, header_bytes=nothing, record_bytes=nothing, footer_bytes=nothing, hop_bytes=nothing, container=nothing, shared_name=nothing) + desc = tf.EagerOp("FixedLengthRecordReader") + if header_bytes !== nothing + desc["header_bytes"] = Base.Int(header_bytes) + end + if record_bytes !== nothing + desc["record_bytes"] = Base.Int(record_bytes) + end + if footer_bytes !== nothing + desc["footer_bytes"] = Base.Int(footer_bytes) + end + if hop_bytes !== nothing + desc["hop_bytes"] = Base.Int(hop_bytes) + end + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + (tf.execute(desc))[1] + end +end + + +""" + tensor_array_scatter_v2(handle, indices, value, flow_in) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tensor_array_scatter_v2(handle_, indices_, value_, flow_in_; name=nothing) + local desc + tf.with_op_name(name, "TensorArrayScatterV2") do + desc = tf.NodeDescription("TensorArrayScatterV2") + handle_ = convert(Tensor{String}, handle_) + indices_ = convert(Tensor{Int32}, indices_) + value_ = convert(Tensor{Any}, value_) + flow_in_ = convert(Tensor{Float32}, flow_in_) + (value_,) = tf.tf_promote(value_) + tf.add_input(desc, handle_) + tf.add_input(desc, indices_) + tf.add_input(desc, value_) + tf.add_input(desc, flow_in_) + end + tf.Tensor(tf.Operation(desc)) + end + function tensor_array_scatter_v2(handle_::tf.TensorHandle, indices_::tf.TensorHandle, value_::tf.TensorHandle, flow_in_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("TensorArrayScatterV2") + tf.add_input(desc, handle_) + tf.add_input(desc, indices_) + tf.add_input(desc, value_) + tf.add_input(desc, flow_in_) + desc["T"] = tf.data_type(value_) + (tf.execute(desc))[1] + end +end + + +""" + decode_json_example(json_examples) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function decode_json_example(json_examples_; name=nothing) + local desc + tf.with_op_name(name, "DecodeJSONExample") do + desc = tf.NodeDescription("DecodeJSONExample") + json_examples_ = convert(Tensor{String}, json_examples_) + tf.add_input(desc, json_examples_) + end + tf.Tensor(tf.Operation(desc)) + end + function decode_json_example(json_examples_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("DecodeJSONExample") + tf.add_input(desc, json_examples_) + (tf.execute(desc))[1] + end +end + + +""" + fused_batch_norm_grad_v2(y_backprop, x, scale, reserve_space_1, reserve_space_2; epsilon=?, data_format=NHWC, is_training=true) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function fused_batch_norm_grad_v2(y_backprop_, x_, scale_, reserve_space_1_, reserve_space_2_; name=nothing, U=nothing, epsilon=nothing, data_format=nothing, is_training=nothing) + local desc + tf.with_op_name(name, "FusedBatchNormGradV2") do + desc = tf.NodeDescription("FusedBatchNormGradV2") + y_backprop_ = convert(Tensor{Any}, y_backprop_) + x_ = convert(Tensor{Any}, x_) + scale_ = convert(Tensor{Float32}, scale_) + reserve_space_1_ = convert(Tensor{Any}, reserve_space_1_) + reserve_space_2_ = convert(Tensor{Any}, reserve_space_2_) + (reserve_space_1_, reserve_space_2_) = tf.tf_promote(reserve_space_1_, reserve_space_2_) + (y_backprop_, x_) = tf.tf_promote(y_backprop_, x_) + tf.add_input(desc, y_backprop_) + tf.add_input(desc, x_) + tf.add_input(desc, scale_) + tf.add_input(desc, reserve_space_1_) + tf.add_input(desc, reserve_space_2_) + if U !== nothing + desc["U"] = Base.identity(U) + end + if epsilon !== nothing + desc["epsilon"] = Base.identity(epsilon) + end + if data_format !== nothing + desc["data_format"] = Base.String(data_format) + end + if is_training !== nothing + desc["is_training"] = Base.Bool(is_training) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:5 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function fused_batch_norm_grad_v2(y_backprop_::tf.TensorHandle, x_::tf.TensorHandle, scale_::tf.TensorHandle, reserve_space_1_::tf.TensorHandle, reserve_space_2_::tf.TensorHandle; name=nothing, U=nothing, epsilon=nothing, data_format=nothing, is_training=nothing) + desc = tf.EagerOp("FusedBatchNormGradV2") + tf.add_input(desc, y_backprop_) + tf.add_input(desc, x_) + tf.add_input(desc, scale_) + tf.add_input(desc, reserve_space_1_) + tf.add_input(desc, reserve_space_2_) + if U !== nothing + desc["U"] = Base.identity(U) + end + if epsilon !== nothing + desc["epsilon"] = Base.identity(epsilon) + end + if data_format !== nothing + desc["data_format"] = Base.String(data_format) + end + if is_training !== nothing + desc["is_training"] = Base.Bool(is_training) + end + desc["T"] = tf.data_type(y_backprop_) + desc["T"] = tf.data_type(x_) + desc["U"] = tf.data_type(reserve_space_1_) + desc["U"] = tf.data_type(reserve_space_2_) + tf.execute(desc) + end +end + + +""" + _host_cast(x; Truncate=false) + +Cast x of type SrcT to y of DstT. +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function _host_cast(x_; name=nothing, SrcT=nothing, DstT=nothing, Truncate=nothing) + local desc + tf.with_op_name(name, "_HostCast") do + desc = tf.NodeDescription("_HostCast") + x_ = convert(Tensor{Any}, x_) + (x_,) = tf.tf_promote(x_) + tf.add_input(desc, x_) + if SrcT !== nothing + desc["SrcT"] = Base.identity(SrcT) + end + if DstT !== nothing + desc["DstT"] = Base.identity(DstT) + end + if Truncate !== nothing + desc["Truncate"] = Base.Bool(Truncate) + end + end + tf.Tensor(tf.Operation(desc)) + end + function _host_cast(x_::tf.TensorHandle; name=nothing, SrcT=nothing, DstT=nothing, Truncate=nothing) + desc = tf.EagerOp("_HostCast") + tf.add_input(desc, x_) + if SrcT !== nothing + desc["SrcT"] = Base.identity(SrcT) + end + if DstT !== nothing + desc["DstT"] = Base.identity(DstT) + end + if Truncate !== nothing + desc["Truncate"] = Base.Bool(Truncate) + end + desc["SrcT"] = tf.data_type(x_) + (tf.execute(desc))[1] + end +end + + +""" + tf_record_reader(; container=, shared_name=, compression_type=) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tf_record_reader(; name=nothing, container=nothing, shared_name=nothing, compression_type=nothing) + local desc + tf.with_op_name(name, "TFRecordReader") do + desc = tf.NodeDescription("TFRecordReader") + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + if compression_type !== nothing + desc["compression_type"] = Base.String(compression_type) + end + end + tf.Tensor(tf.Operation(desc)) + end + function tf_record_reader(; name=nothing, container=nothing, shared_name=nothing, compression_type=nothing) + desc = tf.EagerOp("TFRecordReader") + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + if compression_type !== nothing + desc["compression_type"] = Base.String(compression_type) + end + (tf.execute(desc))[1] + end +end + + +""" + while_(input; output_shapes=Int64[], parallel_iterations=10) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function while_(input_; name=nothing, T=nothing, cond=nothing, body=nothing, output_shapes=nothing, parallel_iterations=nothing) + local desc + tf.with_op_name(name, "While") do + desc = tf.NodeDescription("While") + input_ = [convert(Tensor{Any}, x) for x = input_] + tf.add_input(desc, input_) + if T !== nothing + desc["T"] = map(Base.identity, T) + end + if cond !== nothing + desc["cond"] = Base.identity(cond) + end + if body !== nothing + desc["body"] = Base.identity(body) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + if parallel_iterations !== nothing + desc["parallel_iterations"] = Base.Int(parallel_iterations) + end + end + tf.Tensor(tf.Operation(desc)) + end + function while_(input_::tf.TensorHandle; name=nothing, T=nothing, cond=nothing, body=nothing, output_shapes=nothing, parallel_iterations=nothing) + desc = tf.EagerOp("While") + tf.add_input(desc, input_) + if T !== nothing + desc["T"] = map(Base.identity, T) + end + if cond !== nothing + desc["cond"] = Base.identity(cond) + end + if body !== nothing + desc["body"] = Base.identity(body) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + if parallel_iterations !== nothing + desc["parallel_iterations"] = Base.Int(parallel_iterations) + end + (tf.execute(desc))[1] + end +end + + +""" + stateless_multinomial(logits, num_samples, seed; output_dtype=Int64) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function stateless_multinomial(logits_, num_samples_, seed_; name=nothing, output_dtype=nothing) + local desc + tf.with_op_name(name, "StatelessMultinomial") do + desc = tf.NodeDescription("StatelessMultinomial") + logits_ = convert(Tensor{Any}, logits_) + num_samples_ = convert(Tensor{Int32}, num_samples_) + seed_ = convert(Tensor{Int64}, seed_) + (logits_,) = tf.tf_promote(logits_) + (seed_,) = tf.tf_promote(seed_) + tf.add_input(desc, logits_) + tf.add_input(desc, num_samples_) + tf.add_input(desc, seed_) + if output_dtype !== nothing + desc["output_dtype"] = Base.identity(output_dtype) + end + end + tf.Tensor(tf.Operation(desc)) + end + function stateless_multinomial(logits_::tf.TensorHandle, num_samples_::tf.TensorHandle, seed_::tf.TensorHandle; name=nothing, output_dtype=nothing) + desc = tf.EagerOp("StatelessMultinomial") + tf.add_input(desc, logits_) + tf.add_input(desc, num_samples_) + tf.add_input(desc, seed_) + if output_dtype !== nothing + desc["output_dtype"] = Base.identity(output_dtype) + end + desc["T"] = tf.data_type(logits_) + desc["Tseed"] = tf.data_type(seed_) + (tf.execute(desc))[1] + end +end + + +""" + scatter_add(ref, indices, updates; use_locking=false) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function scatter_add(ref_, indices_, updates_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "ScatterAdd") do + desc = tf.NodeDescription("ScatterAdd") + ref_ = convert(Tensor{Any}, ref_) + indices_ = convert(Tensor{Any}, indices_) + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + updates_ = convert(Tensor{Any}, updates_) + (ref_, updates_) = tf.tf_promote(ref_, updates_) + (indices_,) = tf.tf_promote(indices_) + tf.add_input(desc, ref_) + tf.add_input(desc, indices_) + tf.add_input(desc, updates_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + tf.Tensor(tf.Operation(desc)) + end + function scatter_add(ref_::tf.TensorHandle, indices_::tf.TensorHandle, updates_::tf.TensorHandle; name=nothing, use_locking=nothing) + desc = tf.EagerOp("ScatterAdd") + tf.add_input(desc, ref_) + tf.add_input(desc, indices_) + tf.add_input(desc, updates_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + desc["T"] = tf.data_type(ref_) + desc["Tindices"] = tf.data_type(indices_) + desc["T"] = tf.data_type(updates_) + (tf.execute(desc))[1] + end +end + + +""" + conj(input) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function conj(input_; name=nothing) + local desc + tf.with_op_name(name, "Conj") do + desc = tf.NodeDescription("Conj") + input_ = convert(Tensor{Complex{Float32}}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + end + tf.Tensor(tf.Operation(desc)) + end + function conj(input_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("Conj") + tf.add_input(desc, input_) + desc["T"] = tf.data_type(input_) + (tf.execute(desc))[1] + end +end + + +""" + parallel_dynamic_stitch(indices, data) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function parallel_dynamic_stitch(indices_, data_; name=nothing, N=nothing) + local desc + tf.with_op_name(name, "ParallelDynamicStitch") do + desc = tf.NodeDescription("ParallelDynamicStitch") + indices_ = [convert(Tensor{Int32}, x) for x = indices_] + data_ = [convert(Tensor{Any}, x) for x = data_] + (data_,) = tf.tf_promote(data_) + tf.add_input(desc, indices_) + tf.add_input(desc, data_) + if N !== nothing + desc["N"] = Base.Int(N) + end + end + tf.Tensor(tf.Operation(desc)) + end + function parallel_dynamic_stitch(indices_::tf.TensorHandle, data_::tf.TensorHandle; name=nothing, N=nothing) + desc = tf.EagerOp("ParallelDynamicStitch") + tf.add_input(desc, indices_) + tf.add_input(desc, data_) + if N !== nothing + desc["N"] = Base.Int(N) + end + desc["T"] = tf.data_type(data_) + (tf.execute(desc))[1] + end +end + + +""" + make_iterator(dataset, iterator) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function make_iterator(dataset_, iterator_; name=nothing) + local desc + tf.with_op_name(name, "MakeIterator") do + desc = tf.NodeDescription("MakeIterator") + dataset_ = convert(Tensor{Any}, dataset_) + iterator_ = convert(Tensor{Any}, iterator_) + tf.add_input(desc, dataset_) + tf.add_input(desc, iterator_) + end + tf.Tensor(tf.Operation(desc)) + end + function make_iterator(dataset_::tf.TensorHandle, iterator_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("MakeIterator") + tf.add_input(desc, dataset_) + tf.add_input(desc, iterator_) + (tf.execute(desc))[1] + end +end + + +""" + rfft3d(input, fft_length) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function rfft3d(input_, fft_length_; name=nothing) + local desc + tf.with_op_name(name, "RFFT3D") do + desc = tf.NodeDescription("RFFT3D") + input_ = convert(Tensor{Float32}, input_) + fft_length_ = convert(Tensor{Int32}, fft_length_) + tf.add_input(desc, input_) + tf.add_input(desc, fft_length_) + end + tf.Tensor(tf.Operation(desc)) + end + function rfft3d(input_::tf.TensorHandle, fft_length_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("RFFT3D") + tf.add_input(desc, input_) + tf.add_input(desc, fft_length_) + (tf.execute(desc))[1] + end +end + + +""" + sparse_reduce_sum_sparse(input_indices, input_values, input_shape, reduction_axes; keep_dims=false) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function sparse_reduce_sum_sparse(input_indices_, input_values_, input_shape_, reduction_axes_; name=nothing, keep_dims=nothing) + local desc + tf.with_op_name(name, "SparseReduceSumSparse") do + desc = tf.NodeDescription("SparseReduceSumSparse") + input_indices_ = convert(Tensor{Int64}, input_indices_) + input_values_ = convert(Tensor{Any}, input_values_) + input_shape_ = convert(Tensor{Int64}, input_shape_) + reduction_axes_ = convert(Tensor{Int32}, reduction_axes_) + (input_values_,) = tf.tf_promote(input_values_) + tf.add_input(desc, input_indices_) + tf.add_input(desc, input_values_) + tf.add_input(desc, input_shape_) + tf.add_input(desc, reduction_axes_) + if keep_dims !== nothing + desc["keep_dims"] = Base.Bool(keep_dims) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function sparse_reduce_sum_sparse(input_indices_::tf.TensorHandle, input_values_::tf.TensorHandle, input_shape_::tf.TensorHandle, reduction_axes_::tf.TensorHandle; name=nothing, keep_dims=nothing) + desc = tf.EagerOp("SparseReduceSumSparse") + tf.add_input(desc, input_indices_) + tf.add_input(desc, input_values_) + tf.add_input(desc, input_shape_) + tf.add_input(desc, reduction_axes_) + if keep_dims !== nothing + desc["keep_dims"] = Base.Bool(keep_dims) + end + desc["T"] = tf.data_type(input_values_) + tf.execute(desc) + end +end + + +""" + collective_gather(input) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function collective_gather(input_; name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, shape=nothing) + local desc + tf.with_op_name(name, "CollectiveGather") do + desc = tf.NodeDescription("CollectiveGather") + input_ = convert(Tensor{Any}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + if group_size !== nothing + desc["group_size"] = Base.Int(group_size) + end + if group_key !== nothing + desc["group_key"] = Base.Int(group_key) + end + if instance_key !== nothing + desc["instance_key"] = Base.Int(instance_key) + end + if shape !== nothing + desc["shape"] = Base.identity(shape) + end + end + tf.Tensor(tf.Operation(desc)) + end + function collective_gather(input_::tf.TensorHandle; name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, shape=nothing) + desc = tf.EagerOp("CollectiveGather") + tf.add_input(desc, input_) + if group_size !== nothing + desc["group_size"] = Base.Int(group_size) + end + if group_key !== nothing + desc["group_key"] = Base.Int(group_key) + end + if instance_key !== nothing + desc["instance_key"] = Base.Int(instance_key) + end + if shape !== nothing + desc["shape"] = Base.identity(shape) + end + desc["T"] = tf.data_type(input_) + (tf.execute(desc))[1] + end +end + + +""" + combined_non_max_suppression(boxes, scores, max_output_size_per_class, max_total_size, iou_threshold, score_threshold; pad_per_class=false) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function combined_non_max_suppression(boxes_, scores_, max_output_size_per_class_, max_total_size_, iou_threshold_, score_threshold_; name=nothing, pad_per_class=nothing) + local desc + tf.with_op_name(name, "CombinedNonMaxSuppression") do + desc = tf.NodeDescription("CombinedNonMaxSuppression") + boxes_ = convert(Tensor{Float32}, boxes_) + scores_ = convert(Tensor{Float32}, scores_) + max_output_size_per_class_ = convert(Tensor{Int32}, max_output_size_per_class_) + max_total_size_ = convert(Tensor{Int32}, max_total_size_) + iou_threshold_ = convert(Tensor{Float32}, iou_threshold_) + score_threshold_ = convert(Tensor{Float32}, score_threshold_) + tf.add_input(desc, boxes_) + tf.add_input(desc, scores_) + tf.add_input(desc, max_output_size_per_class_) + tf.add_input(desc, max_total_size_) + tf.add_input(desc, iou_threshold_) + tf.add_input(desc, score_threshold_) + if pad_per_class !== nothing + desc["pad_per_class"] = Base.Bool(pad_per_class) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:4 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function combined_non_max_suppression(boxes_::tf.TensorHandle, scores_::tf.TensorHandle, max_output_size_per_class_::tf.TensorHandle, max_total_size_::tf.TensorHandle, iou_threshold_::tf.TensorHandle, score_threshold_::tf.TensorHandle; name=nothing, pad_per_class=nothing) + desc = tf.EagerOp("CombinedNonMaxSuppression") + tf.add_input(desc, boxes_) + tf.add_input(desc, scores_) + tf.add_input(desc, max_output_size_per_class_) + tf.add_input(desc, max_total_size_) + tf.add_input(desc, iou_threshold_) + tf.add_input(desc, score_threshold_) + if pad_per_class !== nothing + desc["pad_per_class"] = Base.Bool(pad_per_class) + end + tf.execute(desc) + end +end + + +""" + _scoped_allocator() + +Allocates a mutable tensor that becomes available to appropriately annotated +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function _scoped_allocator(; name=nothing, shapes=nothing, shape=nothing, sa_name=nothing, id=nothing, expected_call_count=nothing) + local desc + tf.with_op_name(name, "_ScopedAllocator") do + desc = tf.NodeDescription("_ScopedAllocator") + if shapes !== nothing + desc["shapes"] = map(Base.identity, shapes) + end + if shape !== nothing + desc["shape"] = Base.identity(shape) + end + if sa_name !== nothing + desc["sa_name"] = Base.String(sa_name) + end + if id !== nothing + desc["id"] = Base.Int(id) + end + if expected_call_count !== nothing + desc["expected_call_count"] = Base.Int(expected_call_count) + end + end + tf.Tensor(tf.Operation(desc)) + end + function _scoped_allocator(; name=nothing, shapes=nothing, shape=nothing, sa_name=nothing, id=nothing, expected_call_count=nothing) + desc = tf.EagerOp("_ScopedAllocator") + if shapes !== nothing + desc["shapes"] = map(Base.identity, shapes) + end + if shape !== nothing + desc["shape"] = Base.identity(shape) + end + if sa_name !== nothing + desc["sa_name"] = Base.String(sa_name) + end + if id !== nothing + desc["id"] = Base.Int(id) + end + if expected_call_count !== nothing + desc["expected_call_count"] = Base.Int(expected_call_count) + end + (tf.execute(desc))[1] + end +end + + +""" + load_tpu_embedding_adadelta_parameters(parameters, accumulators, updates; table_id=-1, table_name=) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function load_tpu_embedding_adadelta_parameters(parameters_, accumulators_, updates_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + local desc + tf.with_op_name(name, "LoadTPUEmbeddingAdadeltaParameters") do + desc = tf.NodeDescription("LoadTPUEmbeddingAdadeltaParameters") + parameters_ = convert(Tensor{Float32}, parameters_) + accumulators_ = convert(Tensor{Float32}, accumulators_) + updates_ = convert(Tensor{Float32}, updates_) + tf.add_input(desc, parameters_) + tf.add_input(desc, accumulators_) + tf.add_input(desc, updates_) + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + end + tf.Tensor(tf.Operation(desc)) + end + function load_tpu_embedding_adadelta_parameters(parameters_::tf.TensorHandle, accumulators_::tf.TensorHandle, updates_::tf.TensorHandle; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + desc = tf.EagerOp("LoadTPUEmbeddingAdadeltaParameters") + tf.add_input(desc, parameters_) + tf.add_input(desc, accumulators_) + tf.add_input(desc, updates_) + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + (tf.execute(desc))[1] + end +end + + +""" + sparse_add(a_indices, a_values, a_shape, b_indices, b_values, b_shape, thresh) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function sparse_add(a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_, thresh_; name=nothing) + local desc + tf.with_op_name(name, "SparseAdd") do + desc = tf.NodeDescription("SparseAdd") + a_indices_ = convert(Tensor{Int64}, a_indices_) + a_values_ = convert(Tensor{Any}, a_values_) + a_shape_ = convert(Tensor{Int64}, a_shape_) + b_indices_ = convert(Tensor{Int64}, b_indices_) + b_values_ = convert(Tensor{Any}, b_values_) + b_shape_ = convert(Tensor{Int64}, b_shape_) + thresh_ = convert(Tensor{Any}, thresh_) + (thresh_,) = tf.tf_promote(thresh_) + (a_values_, b_values_) = tf.tf_promote(a_values_, b_values_) + tf.add_input(desc, a_indices_) + tf.add_input(desc, a_values_) + tf.add_input(desc, a_shape_) + tf.add_input(desc, b_indices_) + tf.add_input(desc, b_values_) + tf.add_input(desc, b_shape_) + tf.add_input(desc, thresh_) + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function sparse_add(a_indices_::tf.TensorHandle, a_values_::tf.TensorHandle, a_shape_::tf.TensorHandle, b_indices_::tf.TensorHandle, b_values_::tf.TensorHandle, b_shape_::tf.TensorHandle, thresh_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("SparseAdd") + tf.add_input(desc, a_indices_) + tf.add_input(desc, a_values_) + tf.add_input(desc, a_shape_) + tf.add_input(desc, b_indices_) + tf.add_input(desc, b_values_) + tf.add_input(desc, b_shape_) + tf.add_input(desc, thresh_) + desc["T"] = tf.data_type(a_values_) + desc["T"] = tf.data_type(b_values_) + desc["Treal"] = tf.data_type(thresh_) + tf.execute(desc) + end +end + + +""" + ctc_greedy_decoder(inputs, sequence_length; merge_repeated=false) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function ctc_greedy_decoder(inputs_, sequence_length_; name=nothing, merge_repeated=nothing) + local desc + tf.with_op_name(name, "CTCGreedyDecoder") do + desc = tf.NodeDescription("CTCGreedyDecoder") + inputs_ = convert(Tensor{Float32}, inputs_) + sequence_length_ = convert(Tensor{Int32}, sequence_length_) + tf.add_input(desc, inputs_) + tf.add_input(desc, sequence_length_) + if merge_repeated !== nothing + desc["merge_repeated"] = Base.Bool(merge_repeated) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:4 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function ctc_greedy_decoder(inputs_::tf.TensorHandle, sequence_length_::tf.TensorHandle; name=nothing, merge_repeated=nothing) + desc = tf.EagerOp("CTCGreedyDecoder") + tf.add_input(desc, inputs_) + tf.add_input(desc, sequence_length_) + if merge_repeated !== nothing + desc["merge_repeated"] = Base.Bool(merge_repeated) + end + tf.execute(desc) + end +end + + +""" + immutable_const() + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function immutable_const(; name=nothing, dtype=nothing, shape=nothing, memory_region_name=nothing) + local desc + tf.with_op_name(name, "ImmutableConst") do + desc = tf.NodeDescription("ImmutableConst") + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + if shape !== nothing + desc["shape"] = Base.identity(shape) + end + if memory_region_name !== nothing + desc["memory_region_name"] = Base.String(memory_region_name) + end + end + tf.Tensor(tf.Operation(desc)) + end + function immutable_const(; name=nothing, dtype=nothing, shape=nothing, memory_region_name=nothing) + desc = tf.EagerOp("ImmutableConst") + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + if shape !== nothing + desc["shape"] = Base.identity(shape) + end + if memory_region_name !== nothing + desc["memory_region_name"] = Base.String(memory_region_name) + end + (tf.execute(desc))[1] + end +end + + +""" + consume_mutex_lock(mutex_lock) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function consume_mutex_lock(mutex_lock_; name=nothing) + local desc + tf.with_op_name(name, "ConsumeMutexLock") do + desc = tf.NodeDescription("ConsumeMutexLock") + mutex_lock_ = convert(Tensor{Any}, mutex_lock_) + tf.add_input(desc, mutex_lock_) + end + tf.Tensor(tf.Operation(desc)) + end + function consume_mutex_lock(mutex_lock_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("ConsumeMutexLock") + tf.add_input(desc, mutex_lock_) + (tf.execute(desc))[1] + end +end + + +""" + greater_equal(x, y) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function greater_equal(x_, y_; name=nothing) + local desc + tf.with_op_name(name, "GreaterEqual") do + desc = tf.NodeDescription("GreaterEqual") + x_ = convert(Tensor{Any}, x_) + y_ = convert(Tensor{Any}, y_) + (x_, y_) = tf.tf_promote(x_, y_) + tf.add_input(desc, x_) + tf.add_input(desc, y_) + end + tf.Tensor(tf.Operation(desc)) + end + function greater_equal(x_::tf.TensorHandle, y_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("GreaterEqual") + tf.add_input(desc, x_) + tf.add_input(desc, y_) + desc["T"] = tf.data_type(x_) + desc["T"] = tf.data_type(y_) + (tf.execute(desc))[1] + end +end + + +""" + initialize_table_from_text_file_v2(table_handle, filename; vocab_size=-1, delimiter= ) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function initialize_table_from_text_file_v2(table_handle_, filename_; name=nothing, key_index=nothing, value_index=nothing, vocab_size=nothing, delimiter=nothing) + local desc + tf.with_op_name(name, "InitializeTableFromTextFileV2") do + desc = tf.NodeDescription("InitializeTableFromTextFileV2") + table_handle_ = convert(Tensor{Any}, table_handle_) + filename_ = convert(Tensor{String}, filename_) + tf.add_input(desc, table_handle_) + tf.add_input(desc, filename_) + if key_index !== nothing + desc["key_index"] = Base.Int(key_index) + end + if value_index !== nothing + desc["value_index"] = Base.Int(value_index) + end + if vocab_size !== nothing + desc["vocab_size"] = Base.Int(vocab_size) + end + if delimiter !== nothing + desc["delimiter"] = Base.String(delimiter) + end + end + tf.Tensor(tf.Operation(desc)) + end + function initialize_table_from_text_file_v2(table_handle_::tf.TensorHandle, filename_::tf.TensorHandle; name=nothing, key_index=nothing, value_index=nothing, vocab_size=nothing, delimiter=nothing) + desc = tf.EagerOp("InitializeTableFromTextFileV2") + tf.add_input(desc, table_handle_) + tf.add_input(desc, filename_) + if key_index !== nothing + desc["key_index"] = Base.Int(key_index) + end + if value_index !== nothing + desc["value_index"] = Base.Int(value_index) + end + if vocab_size !== nothing + desc["vocab_size"] = Base.Int(vocab_size) + end + if delimiter !== nothing + desc["delimiter"] = Base.String(delimiter) + end + (tf.execute(desc))[1] + end +end + + +""" + queue_dequeue(handle; timeout_ms=-1) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function queue_dequeue(handle_; name=nothing, component_types=nothing, timeout_ms=nothing) + local desc + tf.with_op_name(name, "QueueDequeue") do + desc = tf.NodeDescription("QueueDequeue") + handle_ = convert(Tensor{String}, handle_) + tf.add_input(desc, handle_) + if component_types !== nothing + desc["component_types"] = map(Base.identity, component_types) + end + if timeout_ms !== nothing + desc["timeout_ms"] = Base.Int(timeout_ms) + end + end + tf.Tensor(tf.Operation(desc)) + end + function queue_dequeue(handle_::tf.TensorHandle; name=nothing, component_types=nothing, timeout_ms=nothing) + desc = tf.EagerOp("QueueDequeue") + tf.add_input(desc, handle_) + if component_types !== nothing + desc["component_types"] = map(Base.identity, component_types) + end + if timeout_ms !== nothing + desc["timeout_ms"] = Base.Int(timeout_ms) + end + (tf.execute(desc))[1] + end +end + + +""" + equal(x, y) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function equal(x_, y_; name=nothing) + local desc + tf.with_op_name(name, "Equal") do + desc = tf.NodeDescription("Equal") + x_ = convert(Tensor{Any}, x_) + y_ = convert(Tensor{Any}, y_) + (x_, y_) = tf.tf_promote(x_, y_) + tf.add_input(desc, x_) + tf.add_input(desc, y_) + end + tf.Tensor(tf.Operation(desc)) + end + function equal(x_::tf.TensorHandle, y_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("Equal") + tf.add_input(desc, x_) + tf.add_input(desc, y_) + desc["T"] = tf.data_type(x_) + desc["T"] = tf.data_type(y_) + (tf.execute(desc))[1] + end +end + + +""" + iterator_from_string_handle(string_handle; output_types=Int64[], output_shapes=Int64[]) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function iterator_from_string_handle(string_handle_; name=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "IteratorFromStringHandle") do + desc = tf.NodeDescription("IteratorFromStringHandle") + string_handle_ = convert(Tensor{String}, string_handle_) + tf.add_input(desc, string_handle_) + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + tf.Tensor(tf.Operation(desc)) + end + function iterator_from_string_handle(string_handle_::tf.TensorHandle; name=nothing, output_types=nothing, output_shapes=nothing) + desc = tf.EagerOp("IteratorFromStringHandle") + tf.add_input(desc, string_handle_) + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + (tf.execute(desc))[1] + end +end + + +""" + tensor_list_split(tensor, element_shape, lengths) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tensor_list_split(tensor_, element_shape_, lengths_; name=nothing, element_dtype=nothing, shape_type=nothing) + local desc + tf.with_op_name(name, "TensorListSplit") do + desc = tf.NodeDescription("TensorListSplit") + tensor_ = convert(Tensor{Any}, tensor_) + element_shape_ = convert(Tensor{Any}, element_shape_) + lengths_ = convert(Tensor{Int64}, lengths_) + (tensor_,) = tf.tf_promote(tensor_) + (element_shape_,) = tf.tf_promote(element_shape_) + tf.add_input(desc, tensor_) + tf.add_input(desc, element_shape_) + tf.add_input(desc, lengths_) + if element_dtype !== nothing + desc["element_dtype"] = Base.identity(element_dtype) + end + if shape_type !== nothing + desc["shape_type"] = Base.identity(shape_type) + end + end + tf.Tensor(tf.Operation(desc)) + end + function tensor_list_split(tensor_::tf.TensorHandle, element_shape_::tf.TensorHandle, lengths_::tf.TensorHandle; name=nothing, element_dtype=nothing, shape_type=nothing) + desc = tf.EagerOp("TensorListSplit") + tf.add_input(desc, tensor_) + tf.add_input(desc, element_shape_) + tf.add_input(desc, lengths_) + if element_dtype !== nothing + desc["element_dtype"] = Base.identity(element_dtype) + end + if shape_type !== nothing + desc["shape_type"] = Base.identity(shape_type) + end + desc["element_dtype"] = tf.data_type(tensor_) + desc["shape_type"] = tf.data_type(element_shape_) + (tf.execute(desc))[1] + end +end + + +""" + fractional_max_pool(value; pseudo_random=false, overlapping=false, deterministic=false, seed=0, seed2=0) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function fractional_max_pool(value_; name=nothing, pooling_ratio=nothing, pseudo_random=nothing, overlapping=nothing, deterministic=nothing, seed=nothing, seed2=nothing) + local desc + tf.with_op_name(name, "FractionalMaxPool") do + desc = tf.NodeDescription("FractionalMaxPool") + value_ = convert(Tensor{Any}, value_) + (value_,) = tf.tf_promote(value_) + tf.add_input(desc, value_) + if pooling_ratio !== nothing + desc["pooling_ratio"] = map(Base.identity, pooling_ratio) + end + if pseudo_random !== nothing + desc["pseudo_random"] = Base.Bool(pseudo_random) + end + if overlapping !== nothing + desc["overlapping"] = Base.Bool(overlapping) + end + if deterministic !== nothing + desc["deterministic"] = Base.Bool(deterministic) + end + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function fractional_max_pool(value_::tf.TensorHandle; name=nothing, pooling_ratio=nothing, pseudo_random=nothing, overlapping=nothing, deterministic=nothing, seed=nothing, seed2=nothing) + desc = tf.EagerOp("FractionalMaxPool") + tf.add_input(desc, value_) + if pooling_ratio !== nothing + desc["pooling_ratio"] = map(Base.identity, pooling_ratio) + end + if pseudo_random !== nothing + desc["pseudo_random"] = Base.Bool(pseudo_random) + end + if overlapping !== nothing + desc["overlapping"] = Base.Bool(overlapping) + end + if deterministic !== nothing + desc["deterministic"] = Base.Bool(deterministic) + end + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end + desc["T"] = tf.data_type(value_) + tf.execute(desc) + end +end + + +""" + scatter_nd(indices, updates, shape) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function scatter_nd(indices_, updates_, shape_; name=nothing) + local desc + tf.with_op_name(name, "ScatterNd") do + desc = tf.NodeDescription("ScatterNd") + indices_ = convert(Tensor{Any}, indices_) + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + updates_ = convert(Tensor{Any}, updates_) + shape_ = convert(Tensor{Any}, shape_) + (updates_,) = tf.tf_promote(updates_) + (indices_, shape_) = tf.tf_promote(indices_, shape_) + tf.add_input(desc, indices_) + tf.add_input(desc, updates_) + tf.add_input(desc, shape_) + end + tf.Tensor(tf.Operation(desc)) + end + function scatter_nd(indices_::tf.TensorHandle, updates_::tf.TensorHandle, shape_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("ScatterNd") + tf.add_input(desc, indices_) + tf.add_input(desc, updates_) + tf.add_input(desc, shape_) + desc["Tindices"] = tf.data_type(indices_) + desc["T"] = tf.data_type(updates_) + desc["Tindices"] = tf.data_type(shape_) + (tf.execute(desc))[1] + end +end + + +""" + tensor_list_scatter_into_existing_list(input_handle, tensor, indices) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tensor_list_scatter_into_existing_list(input_handle_, tensor_, indices_; name=nothing, element_dtype=nothing) + local desc + tf.with_op_name(name, "TensorListScatterIntoExistingList") do + desc = tf.NodeDescription("TensorListScatterIntoExistingList") + input_handle_ = convert(Tensor{Any}, input_handle_) + tensor_ = convert(Tensor{Any}, tensor_) + indices_ = convert(Tensor{Int32}, indices_) + (tensor_,) = tf.tf_promote(tensor_) + tf.add_input(desc, input_handle_) + tf.add_input(desc, tensor_) + tf.add_input(desc, indices_) + if element_dtype !== nothing + desc["element_dtype"] = Base.identity(element_dtype) + end + end + tf.Tensor(tf.Operation(desc)) + end + function tensor_list_scatter_into_existing_list(input_handle_::tf.TensorHandle, tensor_::tf.TensorHandle, indices_::tf.TensorHandle; name=nothing, element_dtype=nothing) + desc = tf.EagerOp("TensorListScatterIntoExistingList") + tf.add_input(desc, input_handle_) + tf.add_input(desc, tensor_) + tf.add_input(desc, indices_) + if element_dtype !== nothing + desc["element_dtype"] = Base.identity(element_dtype) + end + desc["element_dtype"] = tf.data_type(tensor_) + (tf.execute(desc))[1] + end +end + + +""" + select(condition, t, e) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function select(condition_, t_, e_; name=nothing) + local desc + tf.with_op_name(name, "Select") do + desc = tf.NodeDescription("Select") + condition_ = convert(Tensor{Bool}, condition_) + t_ = convert(Tensor{Any}, t_) + e_ = convert(Tensor{Any}, e_) + (t_, e_) = tf.tf_promote(t_, e_) + tf.add_input(desc, condition_) + tf.add_input(desc, t_) + tf.add_input(desc, e_) + end + tf.Tensor(tf.Operation(desc)) + end + function select(condition_::tf.TensorHandle, t_::tf.TensorHandle, e_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("Select") + tf.add_input(desc, condition_) + tf.add_input(desc, t_) + tf.add_input(desc, e_) + desc["T"] = tf.data_type(t_) + desc["T"] = tf.data_type(e_) + (tf.execute(desc))[1] + end +end + + +""" + min(input, reduction_indices; keep_dims=false) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function min(input_, reduction_indices_; name=nothing, keep_dims=nothing) + local desc + tf.with_op_name(name, "Min") do + desc = tf.NodeDescription("Min") + input_ = convert(Tensor{Any}, input_) + reduction_indices_ = convert(Tensor{Int32}, reduction_indices_) + reduction_indices_ = reduction_indices_ - convert(tf.Tensor{eltype(reduction_indices_)}, 1) + (input_,) = tf.tf_promote(input_) + (reduction_indices_,) = tf.tf_promote(reduction_indices_) + tf.add_input(desc, input_) + tf.add_input(desc, reduction_indices_) + if keep_dims !== nothing + desc["keep_dims"] = Base.Bool(keep_dims) + end + end + tf.Tensor(tf.Operation(desc)) + end + function min(input_::tf.TensorHandle, reduction_indices_::tf.TensorHandle; name=nothing, keep_dims=nothing) + desc = tf.EagerOp("Min") + tf.add_input(desc, input_) + tf.add_input(desc, reduction_indices_) + if keep_dims !== nothing + desc["keep_dims"] = Base.Bool(keep_dims) + end + desc["T"] = tf.data_type(input_) + desc["Tidx"] = tf.data_type(reduction_indices_) + (tf.execute(desc))[1] + end +end + + +""" + lrn_grad(input_grads, input_image, output_image; depth_radius=5, bias=?, alpha=?, beta=?) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function lrn_grad(input_grads_, input_image_, output_image_; name=nothing, depth_radius=nothing, bias=nothing, alpha=nothing, beta=nothing) + local desc + tf.with_op_name(name, "LRNGrad") do + desc = tf.NodeDescription("LRNGrad") + input_grads_ = convert(Tensor{Float32}, input_grads_) + input_image_ = convert(Tensor{Float32}, input_image_) + output_image_ = convert(Tensor{Float32}, output_image_) + (input_grads_, input_image_, output_image_) = tf.tf_promote(input_grads_, input_image_, output_image_) + tf.add_input(desc, input_grads_) + tf.add_input(desc, input_image_) + tf.add_input(desc, output_image_) + if depth_radius !== nothing + desc["depth_radius"] = Base.Int(depth_radius) + end + if bias !== nothing + desc["bias"] = Base.identity(bias) + end + if alpha !== nothing + desc["alpha"] = Base.identity(alpha) + end + if beta !== nothing + desc["beta"] = Base.identity(beta) + end + end + tf.Tensor(tf.Operation(desc)) + end + function lrn_grad(input_grads_::tf.TensorHandle, input_image_::tf.TensorHandle, output_image_::tf.TensorHandle; name=nothing, depth_radius=nothing, bias=nothing, alpha=nothing, beta=nothing) + desc = tf.EagerOp("LRNGrad") + tf.add_input(desc, input_grads_) + tf.add_input(desc, input_image_) + tf.add_input(desc, output_image_) + if depth_radius !== nothing + desc["depth_radius"] = Base.Int(depth_radius) + end + if bias !== nothing + desc["bias"] = Base.identity(bias) + end + if alpha !== nothing + desc["alpha"] = Base.identity(alpha) + end + if beta !== nothing + desc["beta"] = Base.identity(beta) + end + desc["T"] = tf.data_type(input_grads_) + desc["T"] = tf.data_type(input_image_) + desc["T"] = tf.data_type(output_image_) + (tf.execute(desc))[1] + end +end + + +""" + random_poisson_v2(shape, rate; seed=0, seed2=0, R=Float64, dtype=Int64) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function random_poisson_v2(shape_, rate_; name=nothing, seed=nothing, seed2=nothing, S=nothing, R=nothing, dtype=nothing) + local desc + tf.with_op_name(name, "RandomPoissonV2") do + desc = tf.NodeDescription("RandomPoissonV2") + shape_ = convert(Tensor{Any}, shape_) + rate_ = convert(Tensor{Float64}, rate_) + (shape_,) = tf.tf_promote(shape_) + (rate_,) = tf.tf_promote(rate_) + tf.add_input(desc, shape_) + tf.add_input(desc, rate_) + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end + if S !== nothing + desc["S"] = Base.identity(S) + end + if R !== nothing + desc["R"] = Base.identity(R) + end + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + tf.Tensor(tf.Operation(desc)) + end + function random_poisson_v2(shape_::tf.TensorHandle, rate_::tf.TensorHandle; name=nothing, seed=nothing, seed2=nothing, S=nothing, R=nothing, dtype=nothing) + desc = tf.EagerOp("RandomPoissonV2") + tf.add_input(desc, shape_) + tf.add_input(desc, rate_) + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end + if S !== nothing + desc["S"] = Base.identity(S) + end + if R !== nothing + desc["R"] = Base.identity(R) + end + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + desc["S"] = tf.data_type(shape_) + desc["R"] = tf.data_type(rate_) + (tf.execute(desc))[1] + end +end + + +""" + fifo_queue(; shapes=Int64[], capacity=-1, container=, shared_name=) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function fifo_queue(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "FIFOQueue") do + desc = tf.NodeDescription("FIFOQueue") + if component_types !== nothing + desc["component_types"] = map(Base.identity, component_types) + end + if shapes !== nothing + desc["shapes"] = map(Base.identity, shapes) + end + if capacity !== nothing + desc["capacity"] = Base.Int(capacity) + end + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + tf.Tensor(tf.Operation(desc)) + end + function fifo_queue(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) + desc = tf.EagerOp("FIFOQueue") + if component_types !== nothing + desc["component_types"] = map(Base.identity, component_types) + end + if shapes !== nothing + desc["shapes"] = map(Base.identity, shapes) + end + if capacity !== nothing + desc["capacity"] = Base.Int(capacity) + end + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + (tf.execute(desc))[1] + end +end + + +""" + resource_sparse_apply_proximal_gradient_descent(var, alpha, l1, l2, grad, indices; use_locking=false) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function resource_sparse_apply_proximal_gradient_descent(var_, alpha_, l1_, l2_, grad_, indices_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "ResourceSparseApplyProximalGradientDescent") do + desc = tf.NodeDescription("ResourceSparseApplyProximalGradientDescent") + var_ = convert(Tensor{Any}, var_) + alpha_ = convert(Tensor{Any}, alpha_) + l1_ = convert(Tensor{Any}, l1_) + l2_ = convert(Tensor{Any}, l2_) + grad_ = convert(Tensor{Any}, grad_) + indices_ = convert(Tensor{Any}, indices_) + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + (alpha_, l1_, l2_, grad_) = tf.tf_promote(alpha_, l1_, l2_, grad_) + (indices_,) = tf.tf_promote(indices_) + tf.add_input(desc, var_) + tf.add_input(desc, alpha_) + tf.add_input(desc, l1_) + tf.add_input(desc, l2_) + tf.add_input(desc, grad_) + tf.add_input(desc, indices_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + tf.Tensor(tf.Operation(desc)) + end + function resource_sparse_apply_proximal_gradient_descent(var_::tf.TensorHandle, alpha_::tf.TensorHandle, l1_::tf.TensorHandle, l2_::tf.TensorHandle, grad_::tf.TensorHandle, indices_::tf.TensorHandle; name=nothing, use_locking=nothing) + desc = tf.EagerOp("ResourceSparseApplyProximalGradientDescent") + tf.add_input(desc, var_) + tf.add_input(desc, alpha_) + tf.add_input(desc, l1_) + tf.add_input(desc, l2_) + tf.add_input(desc, grad_) + tf.add_input(desc, indices_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + desc["T"] = tf.data_type(alpha_) + desc["T"] = tf.data_type(l1_) + desc["T"] = tf.data_type(l2_) + desc["T"] = tf.data_type(grad_) + desc["Tindices"] = tf.data_type(indices_) + (tf.execute(desc))[1] + end +end + + +""" + experimental_non_serializable_dataset(input_dataset) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function experimental_non_serializable_dataset(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "ExperimentalNonSerializableDataset") do + desc = tf.NodeDescription("ExperimentalNonSerializableDataset") + input_dataset_ = convert(Tensor{Any}, input_dataset_) + tf.add_input(desc, input_dataset_) + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + tf.Tensor(tf.Operation(desc)) + end + function experimental_non_serializable_dataset(input_dataset_::tf.TensorHandle; name=nothing, output_types=nothing, output_shapes=nothing) + desc = tf.EagerOp("ExperimentalNonSerializableDataset") + tf.add_input(desc, input_dataset_) + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + (tf.execute(desc))[1] + end +end + + +""" + experimental_bytes_produced_stats_dataset(input_dataset, tag) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function experimental_bytes_produced_stats_dataset(input_dataset_, tag_; name=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "ExperimentalBytesProducedStatsDataset") do + desc = tf.NodeDescription("ExperimentalBytesProducedStatsDataset") + input_dataset_ = convert(Tensor{Any}, input_dataset_) + tag_ = convert(Tensor{String}, tag_) + tf.add_input(desc, input_dataset_) + tf.add_input(desc, tag_) + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + tf.Tensor(tf.Operation(desc)) + end + function experimental_bytes_produced_stats_dataset(input_dataset_::tf.TensorHandle, tag_::tf.TensorHandle; name=nothing, output_types=nothing, output_shapes=nothing) + desc = tf.EagerOp("ExperimentalBytesProducedStatsDataset") + tf.add_input(desc, input_dataset_) + tf.add_input(desc, tag_) + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + (tf.execute(desc))[1] + end +end + + +""" + dilation2d_backprop_filter(input, filter, out_backprop) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function dilation2d_backprop_filter(input_, filter_, out_backprop_; name=nothing, strides=nothing, rates=nothing, padding=nothing) + local desc + tf.with_op_name(name, "Dilation2DBackpropFilter") do + desc = tf.NodeDescription("Dilation2DBackpropFilter") + input_ = convert(Tensor{Any}, input_) + filter_ = convert(Tensor{Any}, filter_) + out_backprop_ = convert(Tensor{Any}, out_backprop_) + (input_, filter_, out_backprop_) = tf.tf_promote(input_, filter_, out_backprop_) + tf.add_input(desc, input_) + tf.add_input(desc, filter_) + tf.add_input(desc, out_backprop_) + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + if rates !== nothing + desc["rates"] = map(Base.identity, rates) + end + if padding !== nothing + desc["padding"] = Base.String(padding) + end + end + tf.Tensor(tf.Operation(desc)) + end + function dilation2d_backprop_filter(input_::tf.TensorHandle, filter_::tf.TensorHandle, out_backprop_::tf.TensorHandle; name=nothing, strides=nothing, rates=nothing, padding=nothing) + desc = tf.EagerOp("Dilation2DBackpropFilter") + tf.add_input(desc, input_) + tf.add_input(desc, filter_) + tf.add_input(desc, out_backprop_) + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + if rates !== nothing + desc["rates"] = map(Base.identity, rates) + end + if padding !== nothing + desc["padding"] = Base.String(padding) + end + desc["T"] = tf.data_type(input_) + desc["T"] = tf.data_type(filter_) + desc["T"] = tf.data_type(out_backprop_) + (tf.execute(desc))[1] + end +end + + +""" + _if(cond, input) + +output = cond ? then_branch(input) : else_branch(input) +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function _if(cond_, input_; name=nothing, Tin=nothing, Tout=nothing, then_branch=nothing, else_branch=nothing) + local desc + tf.with_op_name(name, "_If") do + desc = tf.NodeDescription("_If") + cond_ = convert(Tensor{Any}, cond_) + input_ = [convert(Tensor{Any}, x) for x = input_] + (cond_,) = tf.tf_promote(cond_) + tf.add_input(desc, cond_) + tf.add_input(desc, input_) + if Tin !== nothing + desc["Tin"] = map(Base.identity, Tin) + end + if Tout !== nothing + desc["Tout"] = map(Base.identity, Tout) + end + if then_branch !== nothing + desc["then_branch"] = Base.identity(then_branch) + end + if else_branch !== nothing + desc["else_branch"] = Base.identity(else_branch) + end + end + tf.Tensor(tf.Operation(desc)) + end + function _if(cond_::tf.TensorHandle, input_::tf.TensorHandle; name=nothing, Tin=nothing, Tout=nothing, then_branch=nothing, else_branch=nothing) + desc = tf.EagerOp("_If") + tf.add_input(desc, cond_) + tf.add_input(desc, input_) + if Tin !== nothing + desc["Tin"] = map(Base.identity, Tin) + end + if Tout !== nothing + desc["Tout"] = map(Base.identity, Tout) + end + if then_branch !== nothing + desc["then_branch"] = Base.identity(then_branch) + end + if else_branch !== nothing + desc["else_branch"] = Base.identity(else_branch) + end + desc["Tcond"] = tf.data_type(cond_) + (tf.execute(desc))[1] + end +end + + +""" + bias_add_grad(out_backprop; data_format=NHWC) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function bias_add_grad(out_backprop_; name=nothing, data_format=nothing) + local desc + tf.with_op_name(name, "BiasAddGrad") do + desc = tf.NodeDescription("BiasAddGrad") + out_backprop_ = convert(Tensor{Any}, out_backprop_) + (out_backprop_,) = tf.tf_promote(out_backprop_) + tf.add_input(desc, out_backprop_) + if data_format !== nothing + desc["data_format"] = Base.String(data_format) + end + end + tf.Tensor(tf.Operation(desc)) + end + function bias_add_grad(out_backprop_::tf.TensorHandle; name=nothing, data_format=nothing) + desc = tf.EagerOp("BiasAddGrad") + tf.add_input(desc, out_backprop_) + if data_format !== nothing + desc["data_format"] = Base.String(data_format) + end + desc["T"] = tf.data_type(out_backprop_) + (tf.execute(desc))[1] + end +end + + +""" + reader_serialize_state_v2(reader_handle) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function reader_serialize_state_v2(reader_handle_; name=nothing) + local desc + tf.with_op_name(name, "ReaderSerializeStateV2") do + desc = tf.NodeDescription("ReaderSerializeStateV2") + reader_handle_ = convert(Tensor{Any}, reader_handle_) + tf.add_input(desc, reader_handle_) + end + tf.Tensor(tf.Operation(desc)) + end + function reader_serialize_state_v2(reader_handle_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("ReaderSerializeStateV2") + tf.add_input(desc, reader_handle_) + (tf.execute(desc))[1] + end +end + + +""" + wrap_dataset_variant(input_handle) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function wrap_dataset_variant(input_handle_; name=nothing) + local desc + tf.with_op_name(name, "WrapDatasetVariant") do + desc = tf.NodeDescription("WrapDatasetVariant") + input_handle_ = convert(Tensor{Any}, input_handle_) + tf.add_input(desc, input_handle_) + end + tf.Tensor(tf.Operation(desc)) + end + function wrap_dataset_variant(input_handle_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("WrapDatasetVariant") + tf.add_input(desc, input_handle_) + (tf.execute(desc))[1] + end +end + + +""" + parallel_interleave_dataset_v2(input_dataset, other_arguments, cycle_length, block_length, num_parallel_calls; sloppy=false) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function parallel_interleave_dataset_v2(input_dataset_, other_arguments_, cycle_length_, block_length_, num_parallel_calls_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, sloppy=nothing) + local desc + tf.with_op_name(name, "ParallelInterleaveDatasetV2") do + desc = tf.NodeDescription("ParallelInterleaveDatasetV2") + input_dataset_ = convert(Tensor{Any}, input_dataset_) + other_arguments_ = [convert(Tensor{Any}, x) for x = other_arguments_] + cycle_length_ = convert(Tensor{Int64}, cycle_length_) + block_length_ = convert(Tensor{Int64}, block_length_) + num_parallel_calls_ = convert(Tensor{Int64}, num_parallel_calls_) + tf.add_input(desc, input_dataset_) + tf.add_input(desc, other_arguments_) + tf.add_input(desc, cycle_length_) + tf.add_input(desc, block_length_) + tf.add_input(desc, num_parallel_calls_) + if f !== nothing + desc["f"] = Base.identity(f) + end + if Targuments !== nothing + desc["Targuments"] = map(Base.identity, Targuments) + end + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + if sloppy !== nothing + desc["sloppy"] = Base.Bool(sloppy) + end + end + tf.Tensor(tf.Operation(desc)) + end + function parallel_interleave_dataset_v2(input_dataset_::tf.TensorHandle, other_arguments_::tf.TensorHandle, cycle_length_::tf.TensorHandle, block_length_::tf.TensorHandle, num_parallel_calls_::tf.TensorHandle; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, sloppy=nothing) + desc = tf.EagerOp("ParallelInterleaveDatasetV2") + tf.add_input(desc, input_dataset_) + tf.add_input(desc, other_arguments_) + tf.add_input(desc, cycle_length_) + tf.add_input(desc, block_length_) + tf.add_input(desc, num_parallel_calls_) + if f !== nothing + desc["f"] = Base.identity(f) + end + if Targuments !== nothing + desc["Targuments"] = map(Base.identity, Targuments) + end + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + if sloppy !== nothing + desc["sloppy"] = Base.Bool(sloppy) + end + (tf.execute(desc))[1] + end +end + + +""" + depthwise_conv2d_native_backprop_input(input_sizes, filter, out_backprop; data_format=NHWC, dilations=[1, 1, 1, 1]) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function depthwise_conv2d_native_backprop_input(input_sizes_, filter_, out_backprop_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) + local desc + tf.with_op_name(name, "DepthwiseConv2dNativeBackpropInput") do + desc = tf.NodeDescription("DepthwiseConv2dNativeBackpropInput") + input_sizes_ = convert(Tensor{Int32}, input_sizes_) + filter_ = convert(Tensor{Any}, filter_) + out_backprop_ = convert(Tensor{Any}, out_backprop_) + (filter_, out_backprop_) = tf.tf_promote(filter_, out_backprop_) + tf.add_input(desc, input_sizes_) + tf.add_input(desc, filter_) + tf.add_input(desc, out_backprop_) + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + if padding !== nothing + desc["padding"] = Base.String(padding) + end + if data_format !== nothing + desc["data_format"] = Base.String(data_format) + end + if dilations !== nothing + desc["dilations"] = map(Base.identity, dilations) + end + end + tf.Tensor(tf.Operation(desc)) + end + function depthwise_conv2d_native_backprop_input(input_sizes_::tf.TensorHandle, filter_::tf.TensorHandle, out_backprop_::tf.TensorHandle; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) + desc = tf.EagerOp("DepthwiseConv2dNativeBackpropInput") + tf.add_input(desc, input_sizes_) + tf.add_input(desc, filter_) + tf.add_input(desc, out_backprop_) + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + if padding !== nothing + desc["padding"] = Base.String(padding) + end + if data_format !== nothing + desc["data_format"] = Base.String(data_format) + end + if dilations !== nothing + desc["dilations"] = map(Base.identity, dilations) + end + desc["T"] = tf.data_type(filter_) + desc["T"] = tf.data_type(out_backprop_) + (tf.execute(desc))[1] + end +end + + +""" + resource_apply_rms_prop(var, ms, mom, lr, rho, momentum, epsilon, grad; use_locking=false) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function resource_apply_rms_prop(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "ResourceApplyRMSProp") do + desc = tf.NodeDescription("ResourceApplyRMSProp") + var_ = convert(Tensor{Any}, var_) + ms_ = convert(Tensor{Any}, ms_) + mom_ = convert(Tensor{Any}, mom_) + lr_ = convert(Tensor{Any}, lr_) + rho_ = convert(Tensor{Any}, rho_) + momentum_ = convert(Tensor{Any}, momentum_) + epsilon_ = convert(Tensor{Any}, epsilon_) + grad_ = convert(Tensor{Any}, grad_) + (lr_, rho_, momentum_, epsilon_, grad_) = tf.tf_promote(lr_, rho_, momentum_, epsilon_, grad_) + tf.add_input(desc, var_) + tf.add_input(desc, ms_) + tf.add_input(desc, mom_) + tf.add_input(desc, lr_) + tf.add_input(desc, rho_) + tf.add_input(desc, momentum_) + tf.add_input(desc, epsilon_) + tf.add_input(desc, grad_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + tf.Tensor(tf.Operation(desc)) + end + function resource_apply_rms_prop(var_::tf.TensorHandle, ms_::tf.TensorHandle, mom_::tf.TensorHandle, lr_::tf.TensorHandle, rho_::tf.TensorHandle, momentum_::tf.TensorHandle, epsilon_::tf.TensorHandle, grad_::tf.TensorHandle; name=nothing, use_locking=nothing) + desc = tf.EagerOp("ResourceApplyRMSProp") + tf.add_input(desc, var_) + tf.add_input(desc, ms_) + tf.add_input(desc, mom_) + tf.add_input(desc, lr_) + tf.add_input(desc, rho_) + tf.add_input(desc, momentum_) + tf.add_input(desc, epsilon_) + tf.add_input(desc, grad_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + desc["T"] = tf.data_type(lr_) + desc["T"] = tf.data_type(rho_) + desc["T"] = tf.data_type(momentum_) + desc["T"] = tf.data_type(epsilon_) + desc["T"] = tf.data_type(grad_) + (tf.execute(desc))[1] + end +end + + +""" + sparse_accumulator_take_gradient(handle, num_required) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function sparse_accumulator_take_gradient(handle_, num_required_; name=nothing, dtype=nothing) + local desc + tf.with_op_name(name, "SparseAccumulatorTakeGradient") do + desc = tf.NodeDescription("SparseAccumulatorTakeGradient") + handle_ = convert(Tensor{String}, handle_) + num_required_ = convert(Tensor{Int32}, num_required_) + tf.add_input(desc, handle_) + tf.add_input(desc, num_required_) + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function sparse_accumulator_take_gradient(handle_::tf.TensorHandle, num_required_::tf.TensorHandle; name=nothing, dtype=nothing) + desc = tf.EagerOp("SparseAccumulatorTakeGradient") + tf.add_input(desc, handle_) + tf.add_input(desc, num_required_) + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + tf.execute(desc) + end +end + + +""" + experimental_lmdb_dataset(filenames) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function experimental_lmdb_dataset(filenames_; name=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "ExperimentalLMDBDataset") do + desc = tf.NodeDescription("ExperimentalLMDBDataset") + filenames_ = convert(Tensor{String}, filenames_) + tf.add_input(desc, filenames_) + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + tf.Tensor(tf.Operation(desc)) + end + function experimental_lmdb_dataset(filenames_::tf.TensorHandle; name=nothing, output_types=nothing, output_shapes=nothing) + desc = tf.EagerOp("ExperimentalLMDBDataset") + tf.add_input(desc, filenames_) + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + (tf.execute(desc))[1] + end +end + + +""" + stack_close_v2(handle) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function stack_close_v2(handle_; name=nothing) + local desc + tf.with_op_name(name, "StackCloseV2") do + desc = tf.NodeDescription("StackCloseV2") + handle_ = convert(Tensor{Any}, handle_) + tf.add_input(desc, handle_) + end + tf.Tensor(tf.Operation(desc)) + end + function stack_close_v2(handle_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("StackCloseV2") + tf.add_input(desc, handle_) + (tf.execute(desc))[1] + end +end + + +""" + map_size(; capacity=0, memory_limit=0, container=, shared_name=) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function map_size(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "MapSize") do + desc = tf.NodeDescription("MapSize") + if capacity !== nothing + desc["capacity"] = Base.Int(capacity) + end + if memory_limit !== nothing + desc["memory_limit"] = Base.Int(memory_limit) + end + if dtypes !== nothing + desc["dtypes"] = map(Base.identity, dtypes) + end + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + tf.Tensor(tf.Operation(desc)) + end + function map_size(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + desc = tf.EagerOp("MapSize") + if capacity !== nothing + desc["capacity"] = Base.Int(capacity) + end + if memory_limit !== nothing + desc["memory_limit"] = Base.Int(memory_limit) + end + if dtypes !== nothing + desc["dtypes"] = map(Base.identity, dtypes) + end + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + (tf.execute(desc))[1] + end +end + + +""" + resource_apply_adagrad_da(var, gradient_accumulator, gradient_squared_accumulator, grad, lr, l1, l2, global_step; use_locking=false) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function resource_apply_adagrad_da(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, lr_, l1_, l2_, global_step_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "ResourceApplyAdagradDA") do + desc = tf.NodeDescription("ResourceApplyAdagradDA") + var_ = convert(Tensor{Any}, var_) + gradient_accumulator_ = convert(Tensor{Any}, gradient_accumulator_) + gradient_squared_accumulator_ = convert(Tensor{Any}, gradient_squared_accumulator_) + grad_ = convert(Tensor{Any}, grad_) + lr_ = convert(Tensor{Any}, lr_) + l1_ = convert(Tensor{Any}, l1_) + l2_ = convert(Tensor{Any}, l2_) + global_step_ = convert(Tensor{Int64}, global_step_) + (grad_, lr_, l1_, l2_) = tf.tf_promote(grad_, lr_, l1_, l2_) + tf.add_input(desc, var_) + tf.add_input(desc, gradient_accumulator_) + tf.add_input(desc, gradient_squared_accumulator_) + tf.add_input(desc, grad_) + tf.add_input(desc, lr_) + tf.add_input(desc, l1_) + tf.add_input(desc, l2_) + tf.add_input(desc, global_step_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + tf.Tensor(tf.Operation(desc)) + end + function resource_apply_adagrad_da(var_::tf.TensorHandle, gradient_accumulator_::tf.TensorHandle, gradient_squared_accumulator_::tf.TensorHandle, grad_::tf.TensorHandle, lr_::tf.TensorHandle, l1_::tf.TensorHandle, l2_::tf.TensorHandle, global_step_::tf.TensorHandle; name=nothing, use_locking=nothing) + desc = tf.EagerOp("ResourceApplyAdagradDA") + tf.add_input(desc, var_) + tf.add_input(desc, gradient_accumulator_) + tf.add_input(desc, gradient_squared_accumulator_) + tf.add_input(desc, grad_) + tf.add_input(desc, lr_) + tf.add_input(desc, l1_) + tf.add_input(desc, l2_) + tf.add_input(desc, global_step_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + desc["T"] = tf.data_type(grad_) + desc["T"] = tf.data_type(lr_) + desc["T"] = tf.data_type(l1_) + desc["T"] = tf.data_type(l2_) + (tf.execute(desc))[1] + end +end + + +""" + tensor_forest_tree_size(tree_handle) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tensor_forest_tree_size(tree_handle_; name=nothing) + local desc + tf.with_op_name(name, "TensorForestTreeSize") do + desc = tf.NodeDescription("TensorForestTreeSize") + tree_handle_ = convert(Tensor{Any}, tree_handle_) + tf.add_input(desc, tree_handle_) + end + tf.Tensor(tf.Operation(desc)) + end + function tensor_forest_tree_size(tree_handle_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("TensorForestTreeSize") + tf.add_input(desc, tree_handle_) + (tf.execute(desc))[1] + end +end + + +""" + matrix_diag_part(input) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function matrix_diag_part(input_; name=nothing) + local desc + tf.with_op_name(name, "MatrixDiagPart") do + desc = tf.NodeDescription("MatrixDiagPart") + input_ = convert(Tensor{Any}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + end + tf.Tensor(tf.Operation(desc)) + end + function matrix_diag_part(input_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("MatrixDiagPart") + tf.add_input(desc, input_) + desc["T"] = tf.data_type(input_) + (tf.execute(desc))[1] + end +end + + +""" + reader_num_work_units_completed_v2(reader_handle) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function reader_num_work_units_completed_v2(reader_handle_; name=nothing) + local desc + tf.with_op_name(name, "ReaderNumWorkUnitsCompletedV2") do + desc = tf.NodeDescription("ReaderNumWorkUnitsCompletedV2") + reader_handle_ = convert(Tensor{Any}, reader_handle_) + tf.add_input(desc, reader_handle_) + end + tf.Tensor(tf.Operation(desc)) + end + function reader_num_work_units_completed_v2(reader_handle_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("ReaderNumWorkUnitsCompletedV2") + tf.add_input(desc, reader_handle_) + (tf.execute(desc))[1] + end +end + + +""" + tensor_array_split_v3(handle, value, lengths, flow_in) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tensor_array_split_v3(handle_, value_, lengths_, flow_in_; name=nothing) + local desc + tf.with_op_name(name, "TensorArraySplitV3") do + desc = tf.NodeDescription("TensorArraySplitV3") + handle_ = convert(Tensor{Any}, handle_) + value_ = convert(Tensor{Any}, value_) + lengths_ = convert(Tensor{Int64}, lengths_) + flow_in_ = convert(Tensor{Float32}, flow_in_) + (value_,) = tf.tf_promote(value_) + tf.add_input(desc, handle_) + tf.add_input(desc, value_) + tf.add_input(desc, lengths_) + tf.add_input(desc, flow_in_) + end + tf.Tensor(tf.Operation(desc)) + end + function tensor_array_split_v3(handle_::tf.TensorHandle, value_::tf.TensorHandle, lengths_::tf.TensorHandle, flow_in_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("TensorArraySplitV3") + tf.add_input(desc, handle_) + tf.add_input(desc, value_) + tf.add_input(desc, lengths_) + tf.add_input(desc, flow_in_) + desc["T"] = tf.data_type(value_) + (tf.execute(desc))[1] + end +end + + +""" + sparse_to_dense(sparse_indices, output_shape, sparse_values, default_value; validate_indices=true) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function sparse_to_dense(sparse_indices_, output_shape_, sparse_values_, default_value_; name=nothing, validate_indices=nothing) + local desc + tf.with_op_name(name, "SparseToDense") do + desc = tf.NodeDescription("SparseToDense") + sparse_indices_ = convert(Tensor{Any}, sparse_indices_) + sparse_indices_ = sparse_indices_ - convert(tf.Tensor{eltype(sparse_indices_)}, 1) + output_shape_ = convert(Tensor{Any}, output_shape_) + output_shape_ = output_shape_ - convert(tf.Tensor{eltype(output_shape_)}, 1) + sparse_values_ = convert(Tensor{Any}, sparse_values_) + default_value_ = convert(Tensor{Any}, default_value_) + (sparse_values_, default_value_) = tf.tf_promote(sparse_values_, default_value_) + (sparse_indices_, output_shape_) = tf.tf_promote(sparse_indices_, output_shape_) + tf.add_input(desc, sparse_indices_) + tf.add_input(desc, output_shape_) + tf.add_input(desc, sparse_values_) + tf.add_input(desc, default_value_) + if validate_indices !== nothing + desc["validate_indices"] = Base.Bool(validate_indices) + end + end + tf.Tensor(tf.Operation(desc)) + end + function sparse_to_dense(sparse_indices_::tf.TensorHandle, output_shape_::tf.TensorHandle, sparse_values_::tf.TensorHandle, default_value_::tf.TensorHandle; name=nothing, validate_indices=nothing) + desc = tf.EagerOp("SparseToDense") + tf.add_input(desc, sparse_indices_) + tf.add_input(desc, output_shape_) + tf.add_input(desc, sparse_values_) + tf.add_input(desc, default_value_) + if validate_indices !== nothing + desc["validate_indices"] = Base.Bool(validate_indices) + end + desc["Tindices"] = tf.data_type(sparse_indices_) + desc["Tindices"] = tf.data_type(output_shape_) + desc["T"] = tf.data_type(sparse_values_) + desc["T"] = tf.data_type(default_value_) + (tf.execute(desc))[1] + end +end + + +""" + tpu_replicated_input(inputs) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tpu_replicated_input(inputs_; name=nothing, N=nothing) + local desc + tf.with_op_name(name, "TPUReplicatedInput") do + desc = tf.NodeDescription("TPUReplicatedInput") + inputs_ = [convert(Tensor{Any}, x) for x = inputs_] + (inputs_,) = tf.tf_promote(inputs_) + tf.add_input(desc, inputs_) + if N !== nothing + desc["N"] = Base.Int(N) + end + end + tf.Tensor(tf.Operation(desc)) + end + function tpu_replicated_input(inputs_::tf.TensorHandle; name=nothing, N=nothing) + desc = tf.EagerOp("TPUReplicatedInput") + tf.add_input(desc, inputs_) + if N !== nothing + desc["N"] = Base.Int(N) + end + desc["T"] = tf.data_type(inputs_) + (tf.execute(desc))[1] + end +end + + +""" + stack_close(handle) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function stack_close(handle_; name=nothing) + local desc + tf.with_op_name(name, "StackClose") do + desc = tf.NodeDescription("StackClose") + handle_ = convert(Tensor{String}, handle_) + tf.add_input(desc, handle_) + end + tf.Tensor(tf.Operation(desc)) + end + function stack_close(handle_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("StackClose") + tf.add_input(desc, handle_) + (tf.execute(desc))[1] + end +end + + +""" + deserialize_many_sparse(serialized_sparse) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function deserialize_many_sparse(serialized_sparse_; name=nothing, dtype=nothing) + local desc + tf.with_op_name(name, "DeserializeManySparse") do + desc = tf.NodeDescription("DeserializeManySparse") + serialized_sparse_ = convert(Tensor{String}, serialized_sparse_) + tf.add_input(desc, serialized_sparse_) + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function deserialize_many_sparse(serialized_sparse_::tf.TensorHandle; name=nothing, dtype=nothing) + desc = tf.EagerOp("DeserializeManySparse") + tf.add_input(desc, serialized_sparse_) + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + tf.execute(desc) + end +end + + +""" + _nccl_reduce_recv(input) + +Replacement node for NcclReduce. +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function _nccl_reduce_recv(input_; name=nothing, reduction=nothing, num_devices=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "_NcclReduceRecv") do + desc = tf.NodeDescription("_NcclReduceRecv") + input_ = convert(Tensor{Any}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + if reduction !== nothing + desc["reduction"] = Base.String(reduction) + end + if num_devices !== nothing + desc["num_devices"] = Base.Int(num_devices) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + tf.Tensor(tf.Operation(desc)) + end + function _nccl_reduce_recv(input_::tf.TensorHandle; name=nothing, reduction=nothing, num_devices=nothing, shared_name=nothing) + desc = tf.EagerOp("_NcclReduceRecv") + tf.add_input(desc, input_) + if reduction !== nothing + desc["reduction"] = Base.String(reduction) + end + if num_devices !== nothing + desc["num_devices"] = Base.Int(num_devices) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + desc["T"] = tf.data_type(input_) + (tf.execute(desc))[1] + end +end + + +""" + mirror_pad_grad(input, paddings) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function mirror_pad_grad(input_, paddings_; name=nothing, mode=nothing) + local desc + tf.with_op_name(name, "MirrorPadGrad") do + desc = tf.NodeDescription("MirrorPadGrad") + input_ = convert(Tensor{Any}, input_) + paddings_ = convert(Tensor{Int32}, paddings_) + (input_,) = tf.tf_promote(input_) + (paddings_,) = tf.tf_promote(paddings_) + tf.add_input(desc, input_) + tf.add_input(desc, paddings_) + if mode !== nothing + desc["mode"] = Base.String(mode) + end + end + tf.Tensor(tf.Operation(desc)) + end + function mirror_pad_grad(input_::tf.TensorHandle, paddings_::tf.TensorHandle; name=nothing, mode=nothing) + desc = tf.EagerOp("MirrorPadGrad") + tf.add_input(desc, input_) + tf.add_input(desc, paddings_) + if mode !== nothing + desc["mode"] = Base.String(mode) + end + desc["T"] = tf.data_type(input_) + desc["Tpaddings"] = tf.data_type(paddings_) + (tf.execute(desc))[1] + end +end + + +""" + broadcast_args(s0, s1) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function broadcast_args(s0_, s1_; name=nothing) + local desc + tf.with_op_name(name, "BroadcastArgs") do + desc = tf.NodeDescription("BroadcastArgs") + s0_ = convert(Tensor{Int32}, s0_) + s1_ = convert(Tensor{Int32}, s1_) + (s0_, s1_) = tf.tf_promote(s0_, s1_) + tf.add_input(desc, s0_) + tf.add_input(desc, s1_) + end + tf.Tensor(tf.Operation(desc)) + end + function broadcast_args(s0_::tf.TensorHandle, s1_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("BroadcastArgs") + tf.add_input(desc, s0_) + tf.add_input(desc, s1_) + desc["T"] = tf.data_type(s0_) + desc["T"] = tf.data_type(s1_) + (tf.execute(desc))[1] + end +end + + +""" + stateless_truncated_normal(shape, seed; dtype=Float32) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function stateless_truncated_normal(shape_, seed_; name=nothing, dtype=nothing) + local desc + tf.with_op_name(name, "StatelessTruncatedNormal") do + desc = tf.NodeDescription("StatelessTruncatedNormal") + shape_ = convert(Tensor{Int32}, shape_) + seed_ = convert(Tensor{Int64}, seed_) + (shape_,) = tf.tf_promote(shape_) + (seed_,) = tf.tf_promote(seed_) + tf.add_input(desc, shape_) + tf.add_input(desc, seed_) + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + tf.Tensor(tf.Operation(desc)) + end + function stateless_truncated_normal(shape_::tf.TensorHandle, seed_::tf.TensorHandle; name=nothing, dtype=nothing) + desc = tf.EagerOp("StatelessTruncatedNormal") + tf.add_input(desc, shape_) + tf.add_input(desc, seed_) + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + desc["T"] = tf.data_type(shape_) + desc["Tseed"] = tf.data_type(seed_) + (tf.execute(desc))[1] + end +end + + +""" + regex_full_match(input, pattern) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function regex_full_match(input_, pattern_; name=nothing) + local desc + tf.with_op_name(name, "RegexFullMatch") do + desc = tf.NodeDescription("RegexFullMatch") + input_ = convert(Tensor{String}, input_) + pattern_ = convert(Tensor{String}, pattern_) + tf.add_input(desc, input_) + tf.add_input(desc, pattern_) + end + tf.Tensor(tf.Operation(desc)) + end + function regex_full_match(input_::tf.TensorHandle, pattern_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("RegexFullMatch") + tf.add_input(desc, input_) + tf.add_input(desc, pattern_) + (tf.execute(desc))[1] + end +end + + +""" + unwrap_dataset_variant(input_handle) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function unwrap_dataset_variant(input_handle_; name=nothing) + local desc + tf.with_op_name(name, "UnwrapDatasetVariant") do + desc = tf.NodeDescription("UnwrapDatasetVariant") + input_handle_ = convert(Tensor{Any}, input_handle_) + tf.add_input(desc, input_handle_) + end + tf.Tensor(tf.Operation(desc)) + end + function unwrap_dataset_variant(input_handle_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("UnwrapDatasetVariant") + tf.add_input(desc, input_handle_) + (tf.execute(desc))[1] + end +end + + +""" + empty(shape; init=false) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function empty(shape_; name=nothing, dtype=nothing, init=nothing) + local desc + tf.with_op_name(name, "Empty") do + desc = tf.NodeDescription("Empty") + shape_ = convert(Tensor{Int32}, shape_) + tf.add_input(desc, shape_) + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + if init !== nothing + desc["init"] = Base.Bool(init) + end + end + tf.Tensor(tf.Operation(desc)) + end + function empty(shape_::tf.TensorHandle; name=nothing, dtype=nothing, init=nothing) + desc = tf.EagerOp("Empty") + tf.add_input(desc, shape_) + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + if init !== nothing + desc["init"] = Base.Bool(init) + end + (tf.execute(desc))[1] + end +end + + +""" + outfeed_dequeue_tuple(; device_ordinal=-1) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function outfeed_dequeue_tuple(; name=nothing, dtypes=nothing, shapes=nothing, device_ordinal=nothing) + local desc + tf.with_op_name(name, "OutfeedDequeueTuple") do + desc = tf.NodeDescription("OutfeedDequeueTuple") + if dtypes !== nothing + desc["dtypes"] = map(Base.identity, dtypes) + end + if shapes !== nothing + desc["shapes"] = map(Base.identity, shapes) + end + if device_ordinal !== nothing + desc["device_ordinal"] = Base.Int(device_ordinal) + end + end + tf.Tensor(tf.Operation(desc)) + end + function outfeed_dequeue_tuple(; name=nothing, dtypes=nothing, shapes=nothing, device_ordinal=nothing) + desc = tf.EagerOp("OutfeedDequeueTuple") + if dtypes !== nothing + desc["dtypes"] = map(Base.identity, dtypes) + end + if shapes !== nothing + desc["shapes"] = map(Base.identity, shapes) + end + if device_ordinal !== nothing + desc["device_ordinal"] = Base.Int(device_ordinal) + end + (tf.execute(desc))[1] + end +end + + +""" + div(x, y) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function div(x_, y_; name=nothing) + local desc + tf.with_op_name(name, "Div") do + desc = tf.NodeDescription("Div") + x_ = convert(Tensor{Any}, x_) + y_ = convert(Tensor{Any}, y_) + (x_, y_) = tf.tf_promote(x_, y_) + tf.add_input(desc, x_) + tf.add_input(desc, y_) + end + tf.Tensor(tf.Operation(desc)) + end + function div(x_::tf.TensorHandle, y_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("Div") + tf.add_input(desc, x_) + tf.add_input(desc, y_) + desc["T"] = tf.data_type(x_) + desc["T"] = tf.data_type(y_) + (tf.execute(desc))[1] + end +end + + +""" + barrier(; shapes=Int64[], capacity=-1, container=, shared_name=) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function barrier(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "Barrier") do + desc = tf.NodeDescription("Barrier") + if component_types !== nothing + desc["component_types"] = map(Base.identity, component_types) + end + if shapes !== nothing + desc["shapes"] = map(Base.identity, shapes) + end + if capacity !== nothing + desc["capacity"] = Base.Int(capacity) + end + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + tf.Tensor(tf.Operation(desc)) + end + function barrier(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) + desc = tf.EagerOp("Barrier") + if component_types !== nothing + desc["component_types"] = map(Base.identity, component_types) + end + if shapes !== nothing + desc["shapes"] = map(Base.identity, shapes) + end + if capacity !== nothing + desc["capacity"] = Base.Int(capacity) + end + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + (tf.execute(desc))[1] + end +end + + +""" + truncate_div(x, y) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function truncate_div(x_, y_; name=nothing) + local desc + tf.with_op_name(name, "TruncateDiv") do + desc = tf.NodeDescription("TruncateDiv") + x_ = convert(Tensor{Any}, x_) + y_ = convert(Tensor{Any}, y_) + (x_, y_) = tf.tf_promote(x_, y_) + tf.add_input(desc, x_) + tf.add_input(desc, y_) + end + tf.Tensor(tf.Operation(desc)) + end + function truncate_div(x_::tf.TensorHandle, y_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("TruncateDiv") + tf.add_input(desc, x_) + tf.add_input(desc, y_) + desc["T"] = tf.data_type(x_) + desc["T"] = tf.data_type(y_) + (tf.execute(desc))[1] + end +end + + +""" + unicode_encode(input_values, input_splits; errors=replace, replacement_char=65533) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function unicode_encode(input_values_, input_splits_; name=nothing, errors=nothing, output_encoding=nothing, replacement_char=nothing) + local desc + tf.with_op_name(name, "UnicodeEncode") do + desc = tf.NodeDescription("UnicodeEncode") + input_values_ = convert(Tensor{Int32}, input_values_) + input_splits_ = convert(Tensor{Int64}, input_splits_) + tf.add_input(desc, input_values_) + tf.add_input(desc, input_splits_) + if errors !== nothing + desc["errors"] = Base.String(errors) + end + if output_encoding !== nothing + desc["output_encoding"] = Base.String(output_encoding) + end + if replacement_char !== nothing + desc["replacement_char"] = Base.Int(replacement_char) + end + end + tf.Tensor(tf.Operation(desc)) + end + function unicode_encode(input_values_::tf.TensorHandle, input_splits_::tf.TensorHandle; name=nothing, errors=nothing, output_encoding=nothing, replacement_char=nothing) + desc = tf.EagerOp("UnicodeEncode") + tf.add_input(desc, input_values_) + tf.add_input(desc, input_splits_) + if errors !== nothing + desc["errors"] = Base.String(errors) + end + if output_encoding !== nothing + desc["output_encoding"] = Base.String(output_encoding) + end + if replacement_char !== nothing + desc["replacement_char"] = Base.Int(replacement_char) + end + (tf.execute(desc))[1] + end +end + + +""" + merge_summary(inputs) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function merge_summary(inputs_; name=nothing, N=nothing) + local desc + tf.with_op_name(name, "MergeSummary") do + desc = tf.NodeDescription("MergeSummary") + inputs_ = [convert(Tensor{String}, x) for x = inputs_] + tf.add_input(desc, inputs_) + if N !== nothing + desc["N"] = Base.Int(N) + end + end + tf.Tensor(tf.Operation(desc)) + end + function merge_summary(inputs_::tf.TensorHandle; name=nothing, N=nothing) + desc = tf.EagerOp("MergeSummary") + tf.add_input(desc, inputs_) + if N !== nothing + desc["N"] = Base.Int(N) + end + (tf.execute(desc))[1] + end +end + + +""" + fake_queue(resource) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function fake_queue(resource_; name=nothing) + local desc + tf.with_op_name(name, "FakeQueue") do + desc = tf.NodeDescription("FakeQueue") + resource_ = convert(Tensor{Any}, resource_) + tf.add_input(desc, resource_) + end + tf.Tensor(tf.Operation(desc)) + end + function fake_queue(resource_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("FakeQueue") + tf.add_input(desc, resource_) + (tf.execute(desc))[1] + end +end + + +""" + batch_cholesky(input) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function batch_cholesky(input_; name=nothing) + local desc + tf.with_op_name(name, "BatchCholesky") do + desc = tf.NodeDescription("BatchCholesky") + input_ = convert(Tensor{Any}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + end + tf.Tensor(tf.Operation(desc)) + end + function batch_cholesky(input_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("BatchCholesky") + tf.add_input(desc, input_) + desc["T"] = tf.data_type(input_) + (tf.execute(desc))[1] + end +end + + +""" + iterator() + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function iterator(; name=nothing, shared_name=nothing, container=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "Iterator") do + desc = tf.NodeDescription("Iterator") + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + if container !== nothing + desc["container"] = Base.String(container) + end + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + tf.Tensor(tf.Operation(desc)) + end + function iterator(; name=nothing, shared_name=nothing, container=nothing, output_types=nothing, output_shapes=nothing) + desc = tf.EagerOp("Iterator") + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + if container !== nothing + desc["container"] = Base.String(container) + end + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + (tf.execute(desc))[1] + end +end + + +""" + bessel_i1e(x) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function bessel_i1e(x_; name=nothing) + local desc + tf.with_op_name(name, "BesselI1e") do + desc = tf.NodeDescription("BesselI1e") + x_ = convert(Tensor{Any}, x_) + (x_,) = tf.tf_promote(x_) + tf.add_input(desc, x_) + end + tf.Tensor(tf.Operation(desc)) + end + function bessel_i1e(x_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("BesselI1e") + tf.add_input(desc, x_) + desc["T"] = tf.data_type(x_) + (tf.execute(desc))[1] + end +end + + +""" + import_event(writer, event) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function import_event(writer_, event_; name=nothing) + local desc + tf.with_op_name(name, "ImportEvent") do + desc = tf.NodeDescription("ImportEvent") + writer_ = convert(Tensor{Any}, writer_) + event_ = convert(Tensor{String}, event_) + tf.add_input(desc, writer_) + tf.add_input(desc, event_) + end + tf.Tensor(tf.Operation(desc)) + end + function import_event(writer_::tf.TensorHandle, event_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("ImportEvent") + tf.add_input(desc, writer_) + tf.add_input(desc, event_) + (tf.execute(desc))[1] + end +end + + +""" + quantized_instance_norm(x, x_min, x_max; output_range_given=false, given_y_min=?, given_y_max=?, variance_epsilon=?, min_separation=?) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function quantized_instance_norm(x_, x_min_, x_max_; name=nothing, output_range_given=nothing, given_y_min=nothing, given_y_max=nothing, variance_epsilon=nothing, min_separation=nothing) + local desc + tf.with_op_name(name, "QuantizedInstanceNorm") do + desc = tf.NodeDescription("QuantizedInstanceNorm") + x_ = convert(Tensor{Any}, x_) + x_min_ = convert(Tensor{Float32}, x_min_) + x_max_ = convert(Tensor{Float32}, x_max_) + (x_,) = tf.tf_promote(x_) + tf.add_input(desc, x_) + tf.add_input(desc, x_min_) + tf.add_input(desc, x_max_) + if output_range_given !== nothing + desc["output_range_given"] = Base.Bool(output_range_given) + end + if given_y_min !== nothing + desc["given_y_min"] = Base.identity(given_y_min) + end + if given_y_max !== nothing + desc["given_y_max"] = Base.identity(given_y_max) + end + if variance_epsilon !== nothing + desc["variance_epsilon"] = Base.identity(variance_epsilon) + end + if min_separation !== nothing + desc["min_separation"] = Base.identity(min_separation) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function quantized_instance_norm(x_::tf.TensorHandle, x_min_::tf.TensorHandle, x_max_::tf.TensorHandle; name=nothing, output_range_given=nothing, given_y_min=nothing, given_y_max=nothing, variance_epsilon=nothing, min_separation=nothing) + desc = tf.EagerOp("QuantizedInstanceNorm") + tf.add_input(desc, x_) + tf.add_input(desc, x_min_) + tf.add_input(desc, x_max_) + if output_range_given !== nothing + desc["output_range_given"] = Base.Bool(output_range_given) + end + if given_y_min !== nothing + desc["given_y_min"] = Base.identity(given_y_min) + end + if given_y_max !== nothing + desc["given_y_max"] = Base.identity(given_y_max) + end + if variance_epsilon !== nothing + desc["variance_epsilon"] = Base.identity(variance_epsilon) + end + if min_separation !== nothing + desc["min_separation"] = Base.identity(min_separation) + end + desc["T"] = tf.data_type(x_) + tf.execute(desc) + end +end + + +""" + load_tpu_embedding_adagrad_parameters(parameters, accumulators; table_id=-1, table_name=) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function load_tpu_embedding_adagrad_parameters(parameters_, accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + local desc + tf.with_op_name(name, "LoadTPUEmbeddingAdagradParameters") do + desc = tf.NodeDescription("LoadTPUEmbeddingAdagradParameters") + parameters_ = convert(Tensor{Float32}, parameters_) + accumulators_ = convert(Tensor{Float32}, accumulators_) + tf.add_input(desc, parameters_) + tf.add_input(desc, accumulators_) + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + end + tf.Tensor(tf.Operation(desc)) + end + function load_tpu_embedding_adagrad_parameters(parameters_::tf.TensorHandle, accumulators_::tf.TensorHandle; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + desc = tf.EagerOp("LoadTPUEmbeddingAdagradParameters") + tf.add_input(desc, parameters_) + tf.add_input(desc, accumulators_) + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + (tf.execute(desc))[1] + end +end + + +""" + tensor_array_write_v3(handle, index, value, flow_in) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tensor_array_write_v3(handle_, index_, value_, flow_in_; name=nothing) + local desc + tf.with_op_name(name, "TensorArrayWriteV3") do + desc = tf.NodeDescription("TensorArrayWriteV3") + handle_ = convert(Tensor{Any}, handle_) + index_ = convert(Tensor{Int32}, index_) + value_ = convert(Tensor{Any}, value_) + flow_in_ = convert(Tensor{Float32}, flow_in_) + (value_,) = tf.tf_promote(value_) + tf.add_input(desc, handle_) + tf.add_input(desc, index_) + tf.add_input(desc, value_) + tf.add_input(desc, flow_in_) + end + tf.Tensor(tf.Operation(desc)) + end + function tensor_array_write_v3(handle_::tf.TensorHandle, index_::tf.TensorHandle, value_::tf.TensorHandle, flow_in_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("TensorArrayWriteV3") + tf.add_input(desc, handle_) + tf.add_input(desc, index_) + tf.add_input(desc, value_) + tf.add_input(desc, flow_in_) + desc["T"] = tf.data_type(value_) + (tf.execute(desc))[1] + end +end + + +""" + dense_to_dense_set_operation(set1, set2; validate_indices=true) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function dense_to_dense_set_operation(set1_, set2_; name=nothing, set_operation=nothing, validate_indices=nothing) + local desc + tf.with_op_name(name, "DenseToDenseSetOperation") do + desc = tf.NodeDescription("DenseToDenseSetOperation") + set1_ = convert(Tensor{Any}, set1_) + set2_ = convert(Tensor{Any}, set2_) + (set1_, set2_) = tf.tf_promote(set1_, set2_) + tf.add_input(desc, set1_) + tf.add_input(desc, set2_) + if set_operation !== nothing + desc["set_operation"] = Base.String(set_operation) + end + if validate_indices !== nothing + desc["validate_indices"] = Base.Bool(validate_indices) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function dense_to_dense_set_operation(set1_::tf.TensorHandle, set2_::tf.TensorHandle; name=nothing, set_operation=nothing, validate_indices=nothing) + desc = tf.EagerOp("DenseToDenseSetOperation") + tf.add_input(desc, set1_) + tf.add_input(desc, set2_) + if set_operation !== nothing + desc["set_operation"] = Base.String(set_operation) + end + if validate_indices !== nothing + desc["validate_indices"] = Base.Bool(validate_indices) + end + desc["T"] = tf.data_type(set1_) + desc["T"] = tf.data_type(set2_) + tf.execute(desc) + end +end + + +""" + encode_jpeg(image; format=, quality=95, progressive=false, optimize_size=false, chroma_downsampling=true, density_unit=in, x_density=300, y_density=300, xmp_metadata=) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function encode_jpeg(image_; name=nothing, format=nothing, quality=nothing, progressive=nothing, optimize_size=nothing, chroma_downsampling=nothing, density_unit=nothing, x_density=nothing, y_density=nothing, xmp_metadata=nothing) + local desc + tf.with_op_name(name, "EncodeJpeg") do + desc = tf.NodeDescription("EncodeJpeg") + image_ = convert(Tensor{UInt8}, image_) + tf.add_input(desc, image_) + if format !== nothing + desc["format"] = Base.String(format) + end + if quality !== nothing + desc["quality"] = Base.Int(quality) + end + if progressive !== nothing + desc["progressive"] = Base.Bool(progressive) + end + if optimize_size !== nothing + desc["optimize_size"] = Base.Bool(optimize_size) + end + if chroma_downsampling !== nothing + desc["chroma_downsampling"] = Base.Bool(chroma_downsampling) + end + if density_unit !== nothing + desc["density_unit"] = Base.String(density_unit) + end + if x_density !== nothing + desc["x_density"] = Base.Int(x_density) + end + if y_density !== nothing + desc["y_density"] = Base.Int(y_density) + end + if xmp_metadata !== nothing + desc["xmp_metadata"] = Base.String(xmp_metadata) + end + end + tf.Tensor(tf.Operation(desc)) + end + function encode_jpeg(image_::tf.TensorHandle; name=nothing, format=nothing, quality=nothing, progressive=nothing, optimize_size=nothing, chroma_downsampling=nothing, density_unit=nothing, x_density=nothing, y_density=nothing, xmp_metadata=nothing) + desc = tf.EagerOp("EncodeJpeg") + tf.add_input(desc, image_) + if format !== nothing + desc["format"] = Base.String(format) + end + if quality !== nothing + desc["quality"] = Base.Int(quality) + end + if progressive !== nothing + desc["progressive"] = Base.Bool(progressive) + end + if optimize_size !== nothing + desc["optimize_size"] = Base.Bool(optimize_size) + end + if chroma_downsampling !== nothing + desc["chroma_downsampling"] = Base.Bool(chroma_downsampling) + end + if density_unit !== nothing + desc["density_unit"] = Base.String(density_unit) + end + if x_density !== nothing + desc["x_density"] = Base.Int(x_density) + end + if y_density !== nothing + desc["y_density"] = Base.Int(y_density) + end + if xmp_metadata !== nothing + desc["xmp_metadata"] = Base.String(xmp_metadata) + end + (tf.execute(desc))[1] + end +end + + +""" + inplace_update(x, i, v) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function inplace_update(x_, i_, v_; name=nothing) + local desc + tf.with_op_name(name, "InplaceUpdate") do + desc = tf.NodeDescription("InplaceUpdate") + x_ = convert(Tensor{Any}, x_) + i_ = convert(Tensor{Int32}, i_) + v_ = convert(Tensor{Any}, v_) + (x_, v_) = tf.tf_promote(x_, v_) + tf.add_input(desc, x_) + tf.add_input(desc, i_) + tf.add_input(desc, v_) + end + tf.Tensor(tf.Operation(desc)) + end + function inplace_update(x_::tf.TensorHandle, i_::tf.TensorHandle, v_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("InplaceUpdate") + tf.add_input(desc, x_) + tf.add_input(desc, i_) + tf.add_input(desc, v_) + desc["T"] = tf.data_type(x_) + desc["T"] = tf.data_type(v_) + (tf.execute(desc))[1] + end +end + + +""" + fused_pad_conv2d(input, paddings, filter) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function fused_pad_conv2d(input_, paddings_, filter_; name=nothing, mode=nothing, strides=nothing, padding=nothing) + local desc + tf.with_op_name(name, "FusedPadConv2D") do + desc = tf.NodeDescription("FusedPadConv2D") + input_ = convert(Tensor{Any}, input_) + paddings_ = convert(Tensor{Int32}, paddings_) + filter_ = convert(Tensor{Any}, filter_) + (input_, filter_) = tf.tf_promote(input_, filter_) + tf.add_input(desc, input_) + tf.add_input(desc, paddings_) + tf.add_input(desc, filter_) + if mode !== nothing + desc["mode"] = Base.String(mode) + end + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + if padding !== nothing + desc["padding"] = Base.String(padding) + end + end + tf.Tensor(tf.Operation(desc)) + end + function fused_pad_conv2d(input_::tf.TensorHandle, paddings_::tf.TensorHandle, filter_::tf.TensorHandle; name=nothing, mode=nothing, strides=nothing, padding=nothing) + desc = tf.EagerOp("FusedPadConv2D") + tf.add_input(desc, input_) + tf.add_input(desc, paddings_) + tf.add_input(desc, filter_) + if mode !== nothing + desc["mode"] = Base.String(mode) + end + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + if padding !== nothing + desc["padding"] = Base.String(padding) + end + desc["T"] = tf.data_type(input_) + desc["T"] = tf.data_type(filter_) + (tf.execute(desc))[1] + end +end + + +""" + quantized_relu(features, min_features, max_features; out_type=Float32) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function quantized_relu(features_, min_features_, max_features_; name=nothing, out_type=nothing) + local desc + tf.with_op_name(name, "QuantizedRelu") do + desc = tf.NodeDescription("QuantizedRelu") + features_ = convert(Tensor{Any}, features_) + min_features_ = convert(Tensor{Float32}, min_features_) + max_features_ = convert(Tensor{Float32}, max_features_) + (features_,) = tf.tf_promote(features_) + tf.add_input(desc, features_) + tf.add_input(desc, min_features_) + tf.add_input(desc, max_features_) + if out_type !== nothing + desc["out_type"] = Base.identity(out_type) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function quantized_relu(features_::tf.TensorHandle, min_features_::tf.TensorHandle, max_features_::tf.TensorHandle; name=nothing, out_type=nothing) + desc = tf.EagerOp("QuantizedRelu") + tf.add_input(desc, features_) + tf.add_input(desc, min_features_) + tf.add_input(desc, max_features_) + if out_type !== nothing + desc["out_type"] = Base.identity(out_type) + end + desc["Tinput"] = tf.data_type(features_) + tf.execute(desc) + end +end + + +""" + gather_nd(params, indices) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function gather_nd(params_, indices_; name=nothing) + local desc + tf.with_op_name(name, "GatherNd") do + desc = tf.NodeDescription("GatherNd") + params_ = convert(Tensor{Any}, params_) + indices_ = convert(Tensor{Any}, indices_) + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + (params_,) = tf.tf_promote(params_) + (indices_,) = tf.tf_promote(indices_) + tf.add_input(desc, params_) + tf.add_input(desc, indices_) + end + tf.Tensor(tf.Operation(desc)) + end + function gather_nd(params_::tf.TensorHandle, indices_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("GatherNd") + tf.add_input(desc, params_) + tf.add_input(desc, indices_) + desc["Tparams"] = tf.data_type(params_) + desc["Tindices"] = tf.data_type(indices_) + (tf.execute(desc))[1] + end +end + + +""" + placeholder(; shape=?) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function placeholder(; name=nothing, dtype=nothing, shape=nothing) + local desc + tf.with_op_name(name, "Placeholder") do + desc = tf.NodeDescription("Placeholder") + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + if shape !== nothing + desc["shape"] = Base.identity(shape) + end + end + tf.Tensor(tf.Operation(desc)) + end + function placeholder(; name=nothing, dtype=nothing, shape=nothing) + desc = tf.EagerOp("Placeholder") + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + if shape !== nothing + desc["shape"] = Base.identity(shape) + end + (tf.execute(desc))[1] + end +end + + +""" + filter_by_last_component_dataset(input_dataset) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function filter_by_last_component_dataset(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "FilterByLastComponentDataset") do + desc = tf.NodeDescription("FilterByLastComponentDataset") + input_dataset_ = convert(Tensor{Any}, input_dataset_) + tf.add_input(desc, input_dataset_) + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + tf.Tensor(tf.Operation(desc)) + end + function filter_by_last_component_dataset(input_dataset_::tf.TensorHandle; name=nothing, output_types=nothing, output_shapes=nothing) + desc = tf.EagerOp("FilterByLastComponentDataset") + tf.add_input(desc, input_dataset_) + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + (tf.execute(desc))[1] + end +end + + +""" + clip_by_value(t, clip_value_min, clip_value_max) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function clip_by_value(t_, clip_value_min_, clip_value_max_; name=nothing) + local desc + tf.with_op_name(name, "ClipByValue") do + desc = tf.NodeDescription("ClipByValue") + t_ = convert(Tensor{Any}, t_) + clip_value_min_ = convert(Tensor{Any}, clip_value_min_) + clip_value_max_ = convert(Tensor{Any}, clip_value_max_) + (t_, clip_value_min_, clip_value_max_) = tf.tf_promote(t_, clip_value_min_, clip_value_max_) + tf.add_input(desc, t_) + tf.add_input(desc, clip_value_min_) + tf.add_input(desc, clip_value_max_) + end + tf.Tensor(tf.Operation(desc)) + end + function clip_by_value(t_::tf.TensorHandle, clip_value_min_::tf.TensorHandle, clip_value_max_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("ClipByValue") + tf.add_input(desc, t_) + tf.add_input(desc, clip_value_min_) + tf.add_input(desc, clip_value_max_) + desc["T"] = tf.data_type(t_) + desc["T"] = tf.data_type(clip_value_min_) + desc["T"] = tf.data_type(clip_value_max_) + (tf.execute(desc))[1] + end +end + + +""" + image_summary(tag, tensor; max_images=3, bad_color=?) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function image_summary(tag_, tensor_; name=nothing, max_images=nothing, bad_color=nothing) + local desc + tf.with_op_name(name, "ImageSummary") do + desc = tf.NodeDescription("ImageSummary") + tag_ = convert(Tensor{String}, tag_) + tensor_ = convert(Tensor{Float32}, tensor_) + (tensor_,) = tf.tf_promote(tensor_) + tf.add_input(desc, tag_) + tf.add_input(desc, tensor_) + if max_images !== nothing + desc["max_images"] = Base.Int(max_images) + end + if bad_color !== nothing + desc["bad_color"] = TensorFlow.RawTensor(bad_color) + end + end + tf.Tensor(tf.Operation(desc)) + end + function image_summary(tag_::tf.TensorHandle, tensor_::tf.TensorHandle; name=nothing, max_images=nothing, bad_color=nothing) + desc = tf.EagerOp("ImageSummary") + tf.add_input(desc, tag_) + tf.add_input(desc, tensor_) + if max_images !== nothing + desc["max_images"] = Base.Int(max_images) + end + if bad_color !== nothing + desc["bad_color"] = TensorFlow.RawTensor(bad_color) + end + desc["T"] = tf.data_type(tensor_) + (tf.execute(desc))[1] + end +end + + +""" + retrieve_tpu_embedding_adadelta_parameters(; table_id=-1, table_name=) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function retrieve_tpu_embedding_adadelta_parameters(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + local desc + tf.with_op_name(name, "RetrieveTPUEmbeddingAdadeltaParameters") do + desc = tf.NodeDescription("RetrieveTPUEmbeddingAdadeltaParameters") + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function retrieve_tpu_embedding_adadelta_parameters(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + desc = tf.EagerOp("RetrieveTPUEmbeddingAdadeltaParameters") + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + tf.execute(desc) + end +end + + +""" + string_join(inputs; separator=) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function string_join(inputs_; name=nothing, N=nothing, separator=nothing) + local desc + tf.with_op_name(name, "StringJoin") do + desc = tf.NodeDescription("StringJoin") + inputs_ = [convert(Tensor{String}, x) for x = inputs_] + tf.add_input(desc, inputs_) + if N !== nothing + desc["N"] = Base.Int(N) + end + if separator !== nothing + desc["separator"] = Base.String(separator) + end + end + tf.Tensor(tf.Operation(desc)) + end + function string_join(inputs_::tf.TensorHandle; name=nothing, N=nothing, separator=nothing) + desc = tf.EagerOp("StringJoin") + tf.add_input(desc, inputs_) + if N !== nothing + desc["N"] = Base.Int(N) + end + if separator !== nothing + desc["separator"] = Base.String(separator) + end + (tf.execute(desc))[1] + end +end + + +""" + resource_scatter_nd_add(ref, indices, updates; use_locking=true) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function resource_scatter_nd_add(ref_, indices_, updates_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "ResourceScatterNdAdd") do + desc = tf.NodeDescription("ResourceScatterNdAdd") + ref_ = convert(Tensor{Any}, ref_) + indices_ = convert(Tensor{Any}, indices_) + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + updates_ = convert(Tensor{Any}, updates_) + (updates_,) = tf.tf_promote(updates_) + (indices_,) = tf.tf_promote(indices_) + tf.add_input(desc, ref_) + tf.add_input(desc, indices_) + tf.add_input(desc, updates_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + tf.Tensor(tf.Operation(desc)) + end + function resource_scatter_nd_add(ref_::tf.TensorHandle, indices_::tf.TensorHandle, updates_::tf.TensorHandle; name=nothing, use_locking=nothing) + desc = tf.EagerOp("ResourceScatterNdAdd") + tf.add_input(desc, ref_) + tf.add_input(desc, indices_) + tf.add_input(desc, updates_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + desc["Tindices"] = tf.data_type(indices_) + desc["T"] = tf.data_type(updates_) + (tf.execute(desc))[1] + end +end + + +""" + boosted_trees_quantile_stream_resource_deserialize(quantile_stream_resource_handle, bucket_boundaries) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function boosted_trees_quantile_stream_resource_deserialize(quantile_stream_resource_handle_, bucket_boundaries_; name=nothing, num_streams=nothing) + local desc + tf.with_op_name(name, "BoostedTreesQuantileStreamResourceDeserialize") do + desc = tf.NodeDescription("BoostedTreesQuantileStreamResourceDeserialize") + quantile_stream_resource_handle_ = convert(Tensor{Any}, quantile_stream_resource_handle_) + bucket_boundaries_ = [convert(Tensor{Float32}, x) for x = bucket_boundaries_] + tf.add_input(desc, quantile_stream_resource_handle_) + tf.add_input(desc, bucket_boundaries_) + if num_streams !== nothing + desc["num_streams"] = Base.Int(num_streams) + end + end + tf.Tensor(tf.Operation(desc)) + end + function boosted_trees_quantile_stream_resource_deserialize(quantile_stream_resource_handle_::tf.TensorHandle, bucket_boundaries_::tf.TensorHandle; name=nothing, num_streams=nothing) + desc = tf.EagerOp("BoostedTreesQuantileStreamResourceDeserialize") + tf.add_input(desc, quantile_stream_resource_handle_) + tf.add_input(desc, bucket_boundaries_) + if num_streams !== nothing + desc["num_streams"] = Base.Int(num_streams) + end + (tf.execute(desc))[1] + end +end + + +""" + left_shift(x, y) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function left_shift(x_, y_; name=nothing) + local desc + tf.with_op_name(name, "LeftShift") do + desc = tf.NodeDescription("LeftShift") + x_ = convert(Tensor{Any}, x_) + y_ = convert(Tensor{Any}, y_) + (x_, y_) = tf.tf_promote(x_, y_) + tf.add_input(desc, x_) + tf.add_input(desc, y_) + end + tf.Tensor(tf.Operation(desc)) + end + function left_shift(x_::tf.TensorHandle, y_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("LeftShift") + tf.add_input(desc, x_) + tf.add_input(desc, y_) + desc["T"] = tf.data_type(x_) + desc["T"] = tf.data_type(y_) + (tf.execute(desc))[1] + end +end + + +""" + requantize_per_channel(input, input_min, input_max, requested_output_min, requested_output_max; out_type=Float32) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function requantize_per_channel(input_, input_min_, input_max_, requested_output_min_, requested_output_max_; name=nothing, out_type=nothing) + local desc + tf.with_op_name(name, "RequantizePerChannel") do + desc = tf.NodeDescription("RequantizePerChannel") + input_ = convert(Tensor{Float32}, input_) + input_min_ = convert(Tensor{Float32}, input_min_) + input_max_ = convert(Tensor{Float32}, input_max_) + requested_output_min_ = convert(Tensor{Float32}, requested_output_min_) + requested_output_max_ = convert(Tensor{Float32}, requested_output_max_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + tf.add_input(desc, input_min_) + tf.add_input(desc, input_max_) + tf.add_input(desc, requested_output_min_) + tf.add_input(desc, requested_output_max_) + if out_type !== nothing + desc["out_type"] = Base.identity(out_type) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function requantize_per_channel(input_::tf.TensorHandle, input_min_::tf.TensorHandle, input_max_::tf.TensorHandle, requested_output_min_::tf.TensorHandle, requested_output_max_::tf.TensorHandle; name=nothing, out_type=nothing) + desc = tf.EagerOp("RequantizePerChannel") + tf.add_input(desc, input_) + tf.add_input(desc, input_min_) + tf.add_input(desc, input_max_) + tf.add_input(desc, requested_output_min_) + tf.add_input(desc, requested_output_max_) + if out_type !== nothing + desc["out_type"] = Base.identity(out_type) + end + desc["T"] = tf.data_type(input_) + tf.execute(desc) + end +end + + +""" + tensor_scatter_add(tensor, indices, updates) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tensor_scatter_add(tensor_, indices_, updates_; name=nothing) + local desc + tf.with_op_name(name, "TensorScatterAdd") do + desc = tf.NodeDescription("TensorScatterAdd") + tensor_ = convert(Tensor{Any}, tensor_) + indices_ = convert(Tensor{Any}, indices_) + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + updates_ = convert(Tensor{Any}, updates_) + (tensor_, updates_) = tf.tf_promote(tensor_, updates_) + (indices_,) = tf.tf_promote(indices_) + tf.add_input(desc, tensor_) + tf.add_input(desc, indices_) + tf.add_input(desc, updates_) + end + tf.Tensor(tf.Operation(desc)) + end + function tensor_scatter_add(tensor_::tf.TensorHandle, indices_::tf.TensorHandle, updates_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("TensorScatterAdd") + tf.add_input(desc, tensor_) + tf.add_input(desc, indices_) + tf.add_input(desc, updates_) + desc["T"] = tf.data_type(tensor_) + desc["Tindices"] = tf.data_type(indices_) + desc["T"] = tf.data_type(updates_) + (tf.execute(desc))[1] + end +end + + +""" + _var_handles_op() + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function _var_handles_op(; name=nothing, containers=nothing, shared_names=nothing, N=nothing, dtypes=nothing, shapes=nothing) + local desc + tf.with_op_name(name, "_VarHandlesOp") do + desc = tf.NodeDescription("_VarHandlesOp") + if containers !== nothing + desc["containers"] = map(Base.identity, containers) + end + if shared_names !== nothing + desc["shared_names"] = map(Base.identity, shared_names) + end + if N !== nothing + desc["N"] = Base.Int(N) + end + if dtypes !== nothing + desc["dtypes"] = map(Base.identity, dtypes) + end + if shapes !== nothing + desc["shapes"] = map(Base.identity, shapes) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:N + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function _var_handles_op(; name=nothing, containers=nothing, shared_names=nothing, N=nothing, dtypes=nothing, shapes=nothing) + desc = tf.EagerOp("_VarHandlesOp") + if containers !== nothing + desc["containers"] = map(Base.identity, containers) + end + if shared_names !== nothing + desc["shared_names"] = map(Base.identity, shared_names) + end + if N !== nothing + desc["N"] = Base.Int(N) + end + if dtypes !== nothing + desc["dtypes"] = map(Base.identity, dtypes) + end + if shapes !== nothing + desc["shapes"] = map(Base.identity, shapes) + end + tf.execute(desc) + end +end + + +""" + ifft3d(input) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function ifft3d(input_; name=nothing) + local desc + tf.with_op_name(name, "IFFT3D") do + desc = tf.NodeDescription("IFFT3D") + input_ = convert(Tensor{Complex{Float32}}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + end + tf.Tensor(tf.Operation(desc)) + end + function ifft3d(input_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("IFFT3D") + tf.add_input(desc, input_) + desc["Tcomplex"] = tf.data_type(input_) + (tf.execute(desc))[1] + end +end + + +""" + euclidean_norm(input, reduction_indices; keep_dims=false) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function euclidean_norm(input_, reduction_indices_; name=nothing, keep_dims=nothing) + local desc + tf.with_op_name(name, "EuclideanNorm") do + desc = tf.NodeDescription("EuclideanNorm") + input_ = convert(Tensor{Any}, input_) + reduction_indices_ = convert(Tensor{Int32}, reduction_indices_) + reduction_indices_ = reduction_indices_ - convert(tf.Tensor{eltype(reduction_indices_)}, 1) + (input_,) = tf.tf_promote(input_) + (reduction_indices_,) = tf.tf_promote(reduction_indices_) + tf.add_input(desc, input_) + tf.add_input(desc, reduction_indices_) + if keep_dims !== nothing + desc["keep_dims"] = Base.Bool(keep_dims) + end + end + tf.Tensor(tf.Operation(desc)) + end + function euclidean_norm(input_::tf.TensorHandle, reduction_indices_::tf.TensorHandle; name=nothing, keep_dims=nothing) + desc = tf.EagerOp("EuclideanNorm") + tf.add_input(desc, input_) + tf.add_input(desc, reduction_indices_) + if keep_dims !== nothing + desc["keep_dims"] = Base.Bool(keep_dims) + end + desc["T"] = tf.data_type(input_) + desc["Tidx"] = tf.data_type(reduction_indices_) + (tf.execute(desc))[1] + end +end + + +""" + ref_select(index, inputs) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function ref_select(index_, inputs_; name=nothing, N=nothing) + local desc + tf.with_op_name(name, "RefSelect") do + desc = tf.NodeDescription("RefSelect") + index_ = convert(Tensor{Int32}, index_) + inputs_ = [convert(Tensor{Any}, x) for x = inputs_] + (inputs_,) = tf.tf_promote(inputs_) + tf.add_input(desc, index_) + tf.add_input(desc, inputs_) + if N !== nothing + desc["N"] = Base.Int(N) + end + end + tf.Tensor(tf.Operation(desc)) + end + function ref_select(index_::tf.TensorHandle, inputs_::tf.TensorHandle; name=nothing, N=nothing) + desc = tf.EagerOp("RefSelect") + tf.add_input(desc, index_) + tf.add_input(desc, inputs_) + if N !== nothing + desc["N"] = Base.Int(N) + end + desc["T"] = tf.data_type(inputs_) + (tf.execute(desc))[1] + end +end + + +""" + sparse_tensor_slice_dataset(indices, values, dense_shape) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function sparse_tensor_slice_dataset(indices_, values_, dense_shape_; name=nothing) + local desc + tf.with_op_name(name, "SparseTensorSliceDataset") do + desc = tf.NodeDescription("SparseTensorSliceDataset") + indices_ = convert(Tensor{Int64}, indices_) + values_ = convert(Tensor{Any}, values_) + dense_shape_ = convert(Tensor{Int64}, dense_shape_) + (values_,) = tf.tf_promote(values_) + tf.add_input(desc, indices_) + tf.add_input(desc, values_) + tf.add_input(desc, dense_shape_) + end + tf.Tensor(tf.Operation(desc)) + end + function sparse_tensor_slice_dataset(indices_::tf.TensorHandle, values_::tf.TensorHandle, dense_shape_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("SparseTensorSliceDataset") + tf.add_input(desc, indices_) + tf.add_input(desc, values_) + tf.add_input(desc, dense_shape_) + desc["Tvalues"] = tf.data_type(values_) + (tf.execute(desc))[1] + end +end + + +""" + retrieve_tpu_embedding_ftrl_parameters_grad_accum_debug(; table_id=-1, table_name=) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function retrieve_tpu_embedding_ftrl_parameters_grad_accum_debug(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + local desc + tf.with_op_name(name, "RetrieveTPUEmbeddingFTRLParametersGradAccumDebug") do + desc = tf.NodeDescription("RetrieveTPUEmbeddingFTRLParametersGradAccumDebug") + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:4 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function retrieve_tpu_embedding_ftrl_parameters_grad_accum_debug(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + desc = tf.EagerOp("RetrieveTPUEmbeddingFTRLParametersGradAccumDebug") + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + tf.execute(desc) + end +end + + +""" + batch_ifft2d(input) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function batch_ifft2d(input_; name=nothing) + local desc + tf.with_op_name(name, "BatchIFFT2D") do + desc = tf.NodeDescription("BatchIFFT2D") + input_ = convert(Tensor{Complex{Float32}}, input_) + tf.add_input(desc, input_) + end + tf.Tensor(tf.Operation(desc)) + end + function batch_ifft2d(input_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("BatchIFFT2D") + tf.add_input(desc, input_) + (tf.execute(desc))[1] + end +end + + +""" + tensor_array_gather(handle, indices, flow_in; element_shape=?) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tensor_array_gather(handle_, indices_, flow_in_; name=nothing, dtype=nothing, element_shape=nothing) + local desc + tf.with_op_name(name, "TensorArrayGather") do + desc = tf.NodeDescription("TensorArrayGather") + handle_ = convert(Tensor{String}, handle_) + indices_ = convert(Tensor{Int32}, indices_) + flow_in_ = convert(Tensor{Float32}, flow_in_) + tf.add_input(desc, handle_) + tf.add_input(desc, indices_) + tf.add_input(desc, flow_in_) + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + if element_shape !== nothing + desc["element_shape"] = Base.identity(element_shape) + end + end + tf.Tensor(tf.Operation(desc)) + end + function tensor_array_gather(handle_::tf.TensorHandle, indices_::tf.TensorHandle, flow_in_::tf.TensorHandle; name=nothing, dtype=nothing, element_shape=nothing) + desc = tf.EagerOp("TensorArrayGather") + tf.add_input(desc, handle_) + tf.add_input(desc, indices_) + tf.add_input(desc, flow_in_) + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + if element_shape !== nothing + desc["element_shape"] = Base.identity(element_shape) + end + (tf.execute(desc))[1] + end +end + + +""" + sparse_segment_mean_with_num_segments(data, indices, segment_ids, num_segments) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function sparse_segment_mean_with_num_segments(data_, indices_, segment_ids_, num_segments_; name=nothing) + local desc + tf.with_op_name(name, "SparseSegmentMeanWithNumSegments") do + desc = tf.NodeDescription("SparseSegmentMeanWithNumSegments") + data_ = convert(Tensor{Any}, data_) + indices_ = convert(Tensor{Int32}, indices_) + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + segment_ids_ = convert(Tensor{Int32}, segment_ids_) + num_segments_ = convert(Tensor{Int32}, num_segments_) + (num_segments_,) = tf.tf_promote(num_segments_) + (data_,) = tf.tf_promote(data_) + (indices_,) = tf.tf_promote(indices_) + tf.add_input(desc, data_) + tf.add_input(desc, indices_) + tf.add_input(desc, segment_ids_) + tf.add_input(desc, num_segments_) + end + tf.Tensor(tf.Operation(desc)) + end + function sparse_segment_mean_with_num_segments(data_::tf.TensorHandle, indices_::tf.TensorHandle, segment_ids_::tf.TensorHandle, num_segments_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("SparseSegmentMeanWithNumSegments") + tf.add_input(desc, data_) + tf.add_input(desc, indices_) + tf.add_input(desc, segment_ids_) + tf.add_input(desc, num_segments_) + desc["T"] = tf.data_type(data_) + desc["Tidx"] = tf.data_type(indices_) + desc["Tnumsegments"] = tf.data_type(num_segments_) + (tf.execute(desc))[1] + end +end + + +""" + ensure_shape(input) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function ensure_shape(input_; name=nothing, shape=nothing) + local desc + tf.with_op_name(name, "EnsureShape") do + desc = tf.NodeDescription("EnsureShape") + input_ = convert(Tensor{Any}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + if shape !== nothing + desc["shape"] = Base.identity(shape) + end + end + tf.Tensor(tf.Operation(desc)) + end + function ensure_shape(input_::tf.TensorHandle; name=nothing, shape=nothing) + desc = tf.EagerOp("EnsureShape") + tf.add_input(desc, input_) + if shape !== nothing + desc["shape"] = Base.identity(shape) + end + desc["T"] = tf.data_type(input_) + (tf.execute(desc))[1] + end +end + + +""" + apply_proximal_gradient_descent(var, alpha, l1, l2, delta; use_locking=false) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function apply_proximal_gradient_descent(var_, alpha_, l1_, l2_, delta_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "ApplyProximalGradientDescent") do + desc = tf.NodeDescription("ApplyProximalGradientDescent") + var_ = convert(Tensor{Any}, var_) + alpha_ = convert(Tensor{Any}, alpha_) + l1_ = convert(Tensor{Any}, l1_) + l2_ = convert(Tensor{Any}, l2_) + delta_ = convert(Tensor{Any}, delta_) + (var_, alpha_, l1_, l2_, delta_) = tf.tf_promote(var_, alpha_, l1_, l2_, delta_) + tf.add_input(desc, var_) + tf.add_input(desc, alpha_) + tf.add_input(desc, l1_) + tf.add_input(desc, l2_) + tf.add_input(desc, delta_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + tf.Tensor(tf.Operation(desc)) + end + function apply_proximal_gradient_descent(var_::tf.TensorHandle, alpha_::tf.TensorHandle, l1_::tf.TensorHandle, l2_::tf.TensorHandle, delta_::tf.TensorHandle; name=nothing, use_locking=nothing) + desc = tf.EagerOp("ApplyProximalGradientDescent") + tf.add_input(desc, var_) + tf.add_input(desc, alpha_) + tf.add_input(desc, l1_) + tf.add_input(desc, l2_) + tf.add_input(desc, delta_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + desc["T"] = tf.data_type(var_) + desc["T"] = tf.data_type(alpha_) + desc["T"] = tf.data_type(l1_) + desc["T"] = tf.data_type(l2_) + desc["T"] = tf.data_type(delta_) + (tf.execute(desc))[1] + end +end + + +""" + collective_reduce(input; wait_for=Int64[]) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function collective_reduce(input_; name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, merge_op=nothing, final_op=nothing, subdiv_offsets=nothing, wait_for=nothing) + local desc + tf.with_op_name(name, "CollectiveReduce") do + desc = tf.NodeDescription("CollectiveReduce") + input_ = convert(Tensor{Any}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + if group_size !== nothing + desc["group_size"] = Base.Int(group_size) + end + if group_key !== nothing + desc["group_key"] = Base.Int(group_key) + end + if instance_key !== nothing + desc["instance_key"] = Base.Int(instance_key) + end + if merge_op !== nothing + desc["merge_op"] = Base.String(merge_op) + end + if final_op !== nothing + desc["final_op"] = Base.String(final_op) + end + if subdiv_offsets !== nothing + desc["subdiv_offsets"] = map(Base.identity, subdiv_offsets) + end + if wait_for !== nothing + desc["wait_for"] = map(Base.identity, wait_for) + end + end + tf.Tensor(tf.Operation(desc)) + end + function collective_reduce(input_::tf.TensorHandle; name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, merge_op=nothing, final_op=nothing, subdiv_offsets=nothing, wait_for=nothing) + desc = tf.EagerOp("CollectiveReduce") + tf.add_input(desc, input_) + if group_size !== nothing + desc["group_size"] = Base.Int(group_size) + end + if group_key !== nothing + desc["group_key"] = Base.Int(group_key) + end + if instance_key !== nothing + desc["instance_key"] = Base.Int(instance_key) + end + if merge_op !== nothing + desc["merge_op"] = Base.String(merge_op) + end + if final_op !== nothing + desc["final_op"] = Base.String(final_op) + end + if subdiv_offsets !== nothing + desc["subdiv_offsets"] = map(Base.identity, subdiv_offsets) + end + if wait_for !== nothing + desc["wait_for"] = map(Base.identity, wait_for) + end + desc["T"] = tf.data_type(input_) + (tf.execute(desc))[1] + end +end + + +""" + is_nan(x) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function is_nan(x_; name=nothing) + local desc + tf.with_op_name(name, "IsNan") do + desc = tf.NodeDescription("IsNan") + x_ = convert(Tensor{Any}, x_) + (x_,) = tf.tf_promote(x_) + tf.add_input(desc, x_) + end + tf.Tensor(tf.Operation(desc)) + end + function is_nan(x_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("IsNan") + tf.add_input(desc, x_) + desc["T"] = tf.data_type(x_) + (tf.execute(desc))[1] + end +end + + +""" + apply_ada_max(var, m, v, beta1_power, lr, beta1, beta2, epsilon, grad; use_locking=false) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function apply_ada_max(var_, m_, v_, beta1_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "ApplyAdaMax") do + desc = tf.NodeDescription("ApplyAdaMax") + var_ = convert(Tensor{Any}, var_) + m_ = convert(Tensor{Any}, m_) + v_ = convert(Tensor{Any}, v_) + beta1_power_ = convert(Tensor{Any}, beta1_power_) + lr_ = convert(Tensor{Any}, lr_) + beta1_ = convert(Tensor{Any}, beta1_) + beta2_ = convert(Tensor{Any}, beta2_) + epsilon_ = convert(Tensor{Any}, epsilon_) + grad_ = convert(Tensor{Any}, grad_) + (var_, m_, v_, beta1_power_, lr_, beta1_, beta2_, epsilon_, grad_) = tf.tf_promote(var_, m_, v_, beta1_power_, lr_, beta1_, beta2_, epsilon_, grad_) + tf.add_input(desc, var_) + tf.add_input(desc, m_) + tf.add_input(desc, v_) + tf.add_input(desc, beta1_power_) + tf.add_input(desc, lr_) + tf.add_input(desc, beta1_) + tf.add_input(desc, beta2_) + tf.add_input(desc, epsilon_) + tf.add_input(desc, grad_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + tf.Tensor(tf.Operation(desc)) + end + function apply_ada_max(var_::tf.TensorHandle, m_::tf.TensorHandle, v_::tf.TensorHandle, beta1_power_::tf.TensorHandle, lr_::tf.TensorHandle, beta1_::tf.TensorHandle, beta2_::tf.TensorHandle, epsilon_::tf.TensorHandle, grad_::tf.TensorHandle; name=nothing, use_locking=nothing) + desc = tf.EagerOp("ApplyAdaMax") + tf.add_input(desc, var_) + tf.add_input(desc, m_) + tf.add_input(desc, v_) + tf.add_input(desc, beta1_power_) + tf.add_input(desc, lr_) + tf.add_input(desc, beta1_) + tf.add_input(desc, beta2_) + tf.add_input(desc, epsilon_) + tf.add_input(desc, grad_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + desc["T"] = tf.data_type(var_) + desc["T"] = tf.data_type(m_) + desc["T"] = tf.data_type(v_) + desc["T"] = tf.data_type(beta1_power_) + desc["T"] = tf.data_type(lr_) + desc["T"] = tf.data_type(beta1_) + desc["T"] = tf.data_type(beta2_) + desc["T"] = tf.data_type(epsilon_) + desc["T"] = tf.data_type(grad_) + (tf.execute(desc))[1] + end +end + + +""" + decode_and_crop_jpeg(contents, crop_window; channels=0, ratio=1, fancy_upscaling=true, try_recover_truncated=false, acceptable_fraction=?, dct_method=) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function decode_and_crop_jpeg(contents_, crop_window_; name=nothing, channels=nothing, ratio=nothing, fancy_upscaling=nothing, try_recover_truncated=nothing, acceptable_fraction=nothing, dct_method=nothing) + local desc + tf.with_op_name(name, "DecodeAndCropJpeg") do + desc = tf.NodeDescription("DecodeAndCropJpeg") + contents_ = convert(Tensor{String}, contents_) + crop_window_ = convert(Tensor{Int32}, crop_window_) + tf.add_input(desc, contents_) + tf.add_input(desc, crop_window_) + if channels !== nothing + desc["channels"] = Base.Int(channels) + end + if ratio !== nothing + desc["ratio"] = Base.Int(ratio) + end + if fancy_upscaling !== nothing + desc["fancy_upscaling"] = Base.Bool(fancy_upscaling) + end + if try_recover_truncated !== nothing + desc["try_recover_truncated"] = Base.Bool(try_recover_truncated) + end + if acceptable_fraction !== nothing + desc["acceptable_fraction"] = Base.identity(acceptable_fraction) + end + if dct_method !== nothing + desc["dct_method"] = Base.String(dct_method) + end + end + tf.Tensor(tf.Operation(desc)) + end + function decode_and_crop_jpeg(contents_::tf.TensorHandle, crop_window_::tf.TensorHandle; name=nothing, channels=nothing, ratio=nothing, fancy_upscaling=nothing, try_recover_truncated=nothing, acceptable_fraction=nothing, dct_method=nothing) + desc = tf.EagerOp("DecodeAndCropJpeg") + tf.add_input(desc, contents_) + tf.add_input(desc, crop_window_) + if channels !== nothing + desc["channels"] = Base.Int(channels) + end + if ratio !== nothing + desc["ratio"] = Base.Int(ratio) + end + if fancy_upscaling !== nothing + desc["fancy_upscaling"] = Base.Bool(fancy_upscaling) + end + if try_recover_truncated !== nothing + desc["try_recover_truncated"] = Base.Bool(try_recover_truncated) + end + if acceptable_fraction !== nothing + desc["acceptable_fraction"] = Base.identity(acceptable_fraction) + end + if dct_method !== nothing + desc["dct_method"] = Base.String(dct_method) + end + (tf.execute(desc))[1] + end +end + + +""" + apply_centered_rms_prop(var, mg, ms, mom, lr, rho, momentum, epsilon, grad; use_locking=false) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function apply_centered_rms_prop(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "ApplyCenteredRMSProp") do + desc = tf.NodeDescription("ApplyCenteredRMSProp") + var_ = convert(Tensor{Any}, var_) + mg_ = convert(Tensor{Any}, mg_) + ms_ = convert(Tensor{Any}, ms_) + mom_ = convert(Tensor{Any}, mom_) + lr_ = convert(Tensor{Any}, lr_) + rho_ = convert(Tensor{Any}, rho_) + momentum_ = convert(Tensor{Any}, momentum_) + epsilon_ = convert(Tensor{Any}, epsilon_) + grad_ = convert(Tensor{Any}, grad_) + (var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_) = tf.tf_promote(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_) + tf.add_input(desc, var_) + tf.add_input(desc, mg_) + tf.add_input(desc, ms_) + tf.add_input(desc, mom_) + tf.add_input(desc, lr_) + tf.add_input(desc, rho_) + tf.add_input(desc, momentum_) + tf.add_input(desc, epsilon_) + tf.add_input(desc, grad_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + tf.Tensor(tf.Operation(desc)) + end + function apply_centered_rms_prop(var_::tf.TensorHandle, mg_::tf.TensorHandle, ms_::tf.TensorHandle, mom_::tf.TensorHandle, lr_::tf.TensorHandle, rho_::tf.TensorHandle, momentum_::tf.TensorHandle, epsilon_::tf.TensorHandle, grad_::tf.TensorHandle; name=nothing, use_locking=nothing) + desc = tf.EagerOp("ApplyCenteredRMSProp") + tf.add_input(desc, var_) + tf.add_input(desc, mg_) + tf.add_input(desc, ms_) + tf.add_input(desc, mom_) + tf.add_input(desc, lr_) + tf.add_input(desc, rho_) + tf.add_input(desc, momentum_) + tf.add_input(desc, epsilon_) + tf.add_input(desc, grad_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + desc["T"] = tf.data_type(var_) + desc["T"] = tf.data_type(mg_) + desc["T"] = tf.data_type(ms_) + desc["T"] = tf.data_type(mom_) + desc["T"] = tf.data_type(lr_) + desc["T"] = tf.data_type(rho_) + desc["T"] = tf.data_type(momentum_) + desc["T"] = tf.data_type(epsilon_) + desc["T"] = tf.data_type(grad_) + (tf.execute(desc))[1] + end +end + + +""" + conv3d_backprop_filter_v2(input, filter_sizes, out_backprop; data_format=NDHWC, dilations=[1, 1, 1, 1, 1]) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function conv3d_backprop_filter_v2(input_, filter_sizes_, out_backprop_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) + local desc + tf.with_op_name(name, "Conv3DBackpropFilterV2") do + desc = tf.NodeDescription("Conv3DBackpropFilterV2") + input_ = convert(Tensor{Any}, input_) + filter_sizes_ = convert(Tensor{Int32}, filter_sizes_) + out_backprop_ = convert(Tensor{Any}, out_backprop_) + (input_, out_backprop_) = tf.tf_promote(input_, out_backprop_) + tf.add_input(desc, input_) + tf.add_input(desc, filter_sizes_) + tf.add_input(desc, out_backprop_) + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + if padding !== nothing + desc["padding"] = Base.String(padding) + end + if data_format !== nothing + desc["data_format"] = Base.String(data_format) + end + if dilations !== nothing + desc["dilations"] = map(Base.identity, dilations) + end + end + tf.Tensor(tf.Operation(desc)) + end + function conv3d_backprop_filter_v2(input_::tf.TensorHandle, filter_sizes_::tf.TensorHandle, out_backprop_::tf.TensorHandle; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) + desc = tf.EagerOp("Conv3DBackpropFilterV2") + tf.add_input(desc, input_) + tf.add_input(desc, filter_sizes_) + tf.add_input(desc, out_backprop_) + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + if padding !== nothing + desc["padding"] = Base.String(padding) + end + if data_format !== nothing + desc["data_format"] = Base.String(data_format) + end + if dilations !== nothing + desc["dilations"] = map(Base.identity, dilations) + end + desc["T"] = tf.data_type(input_) + desc["T"] = tf.data_type(out_backprop_) + (tf.execute(desc))[1] + end +end + + +""" + matrix_triangular_solve(matrix, rhs; lower=true, adjoint=false) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function matrix_triangular_solve(matrix_, rhs_; name=nothing, lower=nothing, adjoint=nothing) + local desc + tf.with_op_name(name, "MatrixTriangularSolve") do + desc = tf.NodeDescription("MatrixTriangularSolve") + matrix_ = convert(Tensor{Any}, matrix_) + rhs_ = convert(Tensor{Any}, rhs_) + (matrix_, rhs_) = tf.tf_promote(matrix_, rhs_) + tf.add_input(desc, matrix_) + tf.add_input(desc, rhs_) + if lower !== nothing + desc["lower"] = Base.Bool(lower) + end + if adjoint !== nothing + desc["adjoint"] = Base.Bool(adjoint) + end + end + tf.Tensor(tf.Operation(desc)) + end + function matrix_triangular_solve(matrix_::tf.TensorHandle, rhs_::tf.TensorHandle; name=nothing, lower=nothing, adjoint=nothing) + desc = tf.EagerOp("MatrixTriangularSolve") + tf.add_input(desc, matrix_) + tf.add_input(desc, rhs_) + if lower !== nothing + desc["lower"] = Base.Bool(lower) + end + if adjoint !== nothing + desc["adjoint"] = Base.Bool(adjoint) + end + desc["T"] = tf.data_type(matrix_) + desc["T"] = tf.data_type(rhs_) + (tf.execute(desc))[1] + end +end + + +""" + reader_num_work_units_completed(reader_handle) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function reader_num_work_units_completed(reader_handle_; name=nothing) + local desc + tf.with_op_name(name, "ReaderNumWorkUnitsCompleted") do + desc = tf.NodeDescription("ReaderNumWorkUnitsCompleted") + reader_handle_ = convert(Tensor{String}, reader_handle_) + tf.add_input(desc, reader_handle_) + end + tf.Tensor(tf.Operation(desc)) + end + function reader_num_work_units_completed(reader_handle_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("ReaderNumWorkUnitsCompleted") + tf.add_input(desc, reader_handle_) + (tf.execute(desc))[1] + end +end + + +""" + write_audio_summary(writer, step, tag, tensor, sample_rate; max_outputs=3) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function write_audio_summary(writer_, step_, tag_, tensor_, sample_rate_; name=nothing, max_outputs=nothing) + local desc + tf.with_op_name(name, "WriteAudioSummary") do + desc = tf.NodeDescription("WriteAudioSummary") + writer_ = convert(Tensor{Any}, writer_) + step_ = convert(Tensor{Int64}, step_) + tag_ = convert(Tensor{String}, tag_) + tensor_ = convert(Tensor{Float32}, tensor_) + sample_rate_ = convert(Tensor{Float32}, sample_rate_) + tf.add_input(desc, writer_) + tf.add_input(desc, step_) + tf.add_input(desc, tag_) + tf.add_input(desc, tensor_) + tf.add_input(desc, sample_rate_) + if max_outputs !== nothing + desc["max_outputs"] = Base.Int(max_outputs) + end + end + tf.Tensor(tf.Operation(desc)) + end + function write_audio_summary(writer_::tf.TensorHandle, step_::tf.TensorHandle, tag_::tf.TensorHandle, tensor_::tf.TensorHandle, sample_rate_::tf.TensorHandle; name=nothing, max_outputs=nothing) + desc = tf.EagerOp("WriteAudioSummary") + tf.add_input(desc, writer_) + tf.add_input(desc, step_) + tf.add_input(desc, tag_) + tf.add_input(desc, tensor_) + tf.add_input(desc, sample_rate_) + if max_outputs !== nothing + desc["max_outputs"] = Base.Int(max_outputs) + end + (tf.execute(desc))[1] + end +end + + +""" + sharded_filespec(basename, num_shards) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function sharded_filespec(basename_, num_shards_; name=nothing) + local desc + tf.with_op_name(name, "ShardedFilespec") do + desc = tf.NodeDescription("ShardedFilespec") + basename_ = convert(Tensor{String}, basename_) + num_shards_ = convert(Tensor{Int32}, num_shards_) + tf.add_input(desc, basename_) + tf.add_input(desc, num_shards_) + end + tf.Tensor(tf.Operation(desc)) + end + function sharded_filespec(basename_::tf.TensorHandle, num_shards_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("ShardedFilespec") + tf.add_input(desc, basename_) + tf.add_input(desc, num_shards_) + (tf.execute(desc))[1] + end +end + + +""" + div_no_nan(x, y) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function div_no_nan(x_, y_; name=nothing) + local desc + tf.with_op_name(name, "DivNoNan") do + desc = tf.NodeDescription("DivNoNan") + x_ = convert(Tensor{Any}, x_) + y_ = convert(Tensor{Any}, y_) + (x_, y_) = tf.tf_promote(x_, y_) + tf.add_input(desc, x_) + tf.add_input(desc, y_) + end + tf.Tensor(tf.Operation(desc)) + end + function div_no_nan(x_::tf.TensorHandle, y_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("DivNoNan") + tf.add_input(desc, x_) + tf.add_input(desc, y_) + desc["T"] = tf.data_type(x_) + desc["T"] = tf.data_type(y_) + (tf.execute(desc))[1] + end +end + + +""" + sparse_accumulator_apply_gradient(handle, local_step, gradient_indices, gradient_values, gradient_shape) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function sparse_accumulator_apply_gradient(handle_, local_step_, gradient_indices_, gradient_values_, gradient_shape_; name=nothing, dtype=nothing, has_known_shape=nothing) + local desc + tf.with_op_name(name, "SparseAccumulatorApplyGradient") do + desc = tf.NodeDescription("SparseAccumulatorApplyGradient") + handle_ = convert(Tensor{String}, handle_) + local_step_ = convert(Tensor{Int64}, local_step_) + gradient_indices_ = convert(Tensor{Int64}, gradient_indices_) + gradient_values_ = convert(Tensor{Any}, gradient_values_) + gradient_shape_ = convert(Tensor{Int64}, gradient_shape_) + (gradient_values_,) = tf.tf_promote(gradient_values_) + tf.add_input(desc, handle_) + tf.add_input(desc, local_step_) + tf.add_input(desc, gradient_indices_) + tf.add_input(desc, gradient_values_) + tf.add_input(desc, gradient_shape_) + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + if has_known_shape !== nothing + desc["has_known_shape"] = Base.Bool(has_known_shape) + end + end + tf.Tensor(tf.Operation(desc)) + end + function sparse_accumulator_apply_gradient(handle_::tf.TensorHandle, local_step_::tf.TensorHandle, gradient_indices_::tf.TensorHandle, gradient_values_::tf.TensorHandle, gradient_shape_::tf.TensorHandle; name=nothing, dtype=nothing, has_known_shape=nothing) + desc = tf.EagerOp("SparseAccumulatorApplyGradient") + tf.add_input(desc, handle_) + tf.add_input(desc, local_step_) + tf.add_input(desc, gradient_indices_) + tf.add_input(desc, gradient_values_) + tf.add_input(desc, gradient_shape_) + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + if has_known_shape !== nothing + desc["has_known_shape"] = Base.Bool(has_known_shape) + end + desc["dtype"] = tf.data_type(gradient_values_) + (tf.execute(desc))[1] + end +end + + +""" + ragged_tensor_to_sparse(rt_nested_splits, rt_dense_values) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function ragged_tensor_to_sparse(rt_nested_splits_, rt_dense_values_; name=nothing, RAGGED_RANK=nothing) + local desc + tf.with_op_name(name, "RaggedTensorToSparse") do + desc = tf.NodeDescription("RaggedTensorToSparse") + rt_nested_splits_ = [convert(Tensor{Int64}, x) for x = rt_nested_splits_] + rt_dense_values_ = convert(Tensor{Any}, rt_dense_values_) + (rt_dense_values_,) = tf.tf_promote(rt_dense_values_) + tf.add_input(desc, rt_nested_splits_) + tf.add_input(desc, rt_dense_values_) + if RAGGED_RANK !== nothing + desc["RAGGED_RANK"] = Base.Int(RAGGED_RANK) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function ragged_tensor_to_sparse(rt_nested_splits_::tf.TensorHandle, rt_dense_values_::tf.TensorHandle; name=nothing, RAGGED_RANK=nothing) + desc = tf.EagerOp("RaggedTensorToSparse") + tf.add_input(desc, rt_nested_splits_) + tf.add_input(desc, rt_dense_values_) + if RAGGED_RANK !== nothing + desc["RAGGED_RANK"] = Base.Int(RAGGED_RANK) + end + desc["T"] = tf.data_type(rt_dense_values_) + tf.execute(desc) + end +end + + +""" + extract_volume_patches(input) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function extract_volume_patches(input_; name=nothing, ksizes=nothing, strides=nothing, padding=nothing) + local desc + tf.with_op_name(name, "ExtractVolumePatches") do + desc = tf.NodeDescription("ExtractVolumePatches") + input_ = convert(Tensor{Any}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + if ksizes !== nothing + desc["ksizes"] = map(Base.identity, ksizes) + end + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + if padding !== nothing + desc["padding"] = Base.String(padding) + end + end + tf.Tensor(tf.Operation(desc)) + end + function extract_volume_patches(input_::tf.TensorHandle; name=nothing, ksizes=nothing, strides=nothing, padding=nothing) + desc = tf.EagerOp("ExtractVolumePatches") + tf.add_input(desc, input_) + if ksizes !== nothing + desc["ksizes"] = map(Base.identity, ksizes) + end + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + if padding !== nothing + desc["padding"] = Base.String(padding) + end + desc["T"] = tf.data_type(input_) + (tf.execute(desc))[1] + end +end + + +""" + barrier_insert_many(handle, keys, values) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function barrier_insert_many(handle_, keys_, values_; name=nothing, component_index=nothing) + local desc + tf.with_op_name(name, "BarrierInsertMany") do + desc = tf.NodeDescription("BarrierInsertMany") + handle_ = convert(Tensor{String}, handle_) + keys_ = convert(Tensor{String}, keys_) + values_ = convert(Tensor{Any}, values_) + (values_,) = tf.tf_promote(values_) + tf.add_input(desc, handle_) + tf.add_input(desc, keys_) + tf.add_input(desc, values_) + if component_index !== nothing + component_index = Base.Int(component_index) - 1 + end + if component_index !== nothing + desc["component_index"] = Base.Int(component_index) + end + end + tf.Tensor(tf.Operation(desc)) + end + function barrier_insert_many(handle_::tf.TensorHandle, keys_::tf.TensorHandle, values_::tf.TensorHandle; name=nothing, component_index=nothing) + desc = tf.EagerOp("BarrierInsertMany") + tf.add_input(desc, handle_) + tf.add_input(desc, keys_) + tf.add_input(desc, values_) + if component_index !== nothing + component_index = Base.Int(component_index) - 1 + end + if component_index !== nothing + desc["component_index"] = Base.Int(component_index) + end + desc["T"] = tf.data_type(values_) + (tf.execute(desc))[1] + end +end + + +""" + const_() + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function const_(; name=nothing, value=nothing, dtype=nothing) + local desc + tf.with_op_name(name, "Const") do + desc = tf.NodeDescription("Const") + if value !== nothing + desc["value"] = TensorFlow.RawTensor(value) + end + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + tf.Tensor(tf.Operation(desc)) + end + function const_(; name=nothing, value=nothing, dtype=nothing) + desc = tf.EagerOp("Const") + if value !== nothing + desc["value"] = TensorFlow.RawTensor(value) + end + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + (tf.execute(desc))[1] + end +end + + +""" + space_to_batch(input, paddings) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function space_to_batch(input_, paddings_; name=nothing, block_size=nothing) + local desc + tf.with_op_name(name, "SpaceToBatch") do + desc = tf.NodeDescription("SpaceToBatch") + input_ = convert(Tensor{Any}, input_) + paddings_ = convert(Tensor{Int32}, paddings_) + (input_,) = tf.tf_promote(input_) + (paddings_,) = tf.tf_promote(paddings_) + tf.add_input(desc, input_) + tf.add_input(desc, paddings_) + if block_size !== nothing + desc["block_size"] = Base.Int(block_size) + end + end + tf.Tensor(tf.Operation(desc)) + end + function space_to_batch(input_::tf.TensorHandle, paddings_::tf.TensorHandle; name=nothing, block_size=nothing) + desc = tf.EagerOp("SpaceToBatch") + tf.add_input(desc, input_) + tf.add_input(desc, paddings_) + if block_size !== nothing + desc["block_size"] = Base.Int(block_size) + end + desc["T"] = tf.data_type(input_) + desc["Tpaddings"] = tf.data_type(paddings_) + (tf.execute(desc))[1] + end +end + + +""" + stage_size(; capacity=0, memory_limit=0, container=, shared_name=) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function stage_size(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "StageSize") do + desc = tf.NodeDescription("StageSize") + if capacity !== nothing + desc["capacity"] = Base.Int(capacity) + end + if memory_limit !== nothing + desc["memory_limit"] = Base.Int(memory_limit) + end + if dtypes !== nothing + desc["dtypes"] = map(Base.identity, dtypes) + end + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + tf.Tensor(tf.Operation(desc)) + end + function stage_size(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + desc = tf.EagerOp("StageSize") + if capacity !== nothing + desc["capacity"] = Base.Int(capacity) + end + if memory_limit !== nothing + desc["memory_limit"] = Base.Int(memory_limit) + end + if dtypes !== nothing + desc["dtypes"] = map(Base.identity, dtypes) + end + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + (tf.execute(desc))[1] + end +end + + +""" + empty_tensor_list(element_shape, max_num_elements) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function empty_tensor_list(element_shape_, max_num_elements_; name=nothing, element_dtype=nothing, shape_type=nothing) + local desc + tf.with_op_name(name, "EmptyTensorList") do + desc = tf.NodeDescription("EmptyTensorList") + element_shape_ = convert(Tensor{Any}, element_shape_) + max_num_elements_ = convert(Tensor{Int32}, max_num_elements_) + (element_shape_,) = tf.tf_promote(element_shape_) + tf.add_input(desc, element_shape_) + tf.add_input(desc, max_num_elements_) + if element_dtype !== nothing + desc["element_dtype"] = Base.identity(element_dtype) + end + if shape_type !== nothing + desc["shape_type"] = Base.identity(shape_type) + end + end + tf.Tensor(tf.Operation(desc)) + end + function empty_tensor_list(element_shape_::tf.TensorHandle, max_num_elements_::tf.TensorHandle; name=nothing, element_dtype=nothing, shape_type=nothing) + desc = tf.EagerOp("EmptyTensorList") + tf.add_input(desc, element_shape_) + tf.add_input(desc, max_num_elements_) + if element_dtype !== nothing + desc["element_dtype"] = Base.identity(element_dtype) + end + if shape_type !== nothing + desc["shape_type"] = Base.identity(shape_type) + end + desc["shape_type"] = tf.data_type(element_shape_) + (tf.execute(desc))[1] + end +end + + +""" + quantized_conv2d_and_requantize(input, filter, min_input, max_input, min_filter, max_filter, min_freezed_output, max_freezed_output; out_type=Float32, dilations=[1, 1, 1, 1]) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function quantized_conv2d_and_requantize(input_, filter_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) + local desc + tf.with_op_name(name, "QuantizedConv2DAndRequantize") do + desc = tf.NodeDescription("QuantizedConv2DAndRequantize") + input_ = convert(Tensor{Any}, input_) + filter_ = convert(Tensor{Any}, filter_) + min_input_ = convert(Tensor{Float32}, min_input_) + max_input_ = convert(Tensor{Float32}, max_input_) + min_filter_ = convert(Tensor{Float32}, min_filter_) + max_filter_ = convert(Tensor{Float32}, max_filter_) + min_freezed_output_ = convert(Tensor{Float32}, min_freezed_output_) + max_freezed_output_ = convert(Tensor{Float32}, max_freezed_output_) + (filter_,) = tf.tf_promote(filter_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + tf.add_input(desc, filter_) + tf.add_input(desc, min_input_) + tf.add_input(desc, max_input_) + tf.add_input(desc, min_filter_) + tf.add_input(desc, max_filter_) + tf.add_input(desc, min_freezed_output_) + tf.add_input(desc, max_freezed_output_) + if out_type !== nothing + desc["out_type"] = Base.identity(out_type) + end + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + if padding !== nothing + desc["padding"] = Base.String(padding) + end + if dilations !== nothing + desc["dilations"] = map(Base.identity, dilations) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function quantized_conv2d_and_requantize(input_::tf.TensorHandle, filter_::tf.TensorHandle, min_input_::tf.TensorHandle, max_input_::tf.TensorHandle, min_filter_::tf.TensorHandle, max_filter_::tf.TensorHandle, min_freezed_output_::tf.TensorHandle, max_freezed_output_::tf.TensorHandle; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) + desc = tf.EagerOp("QuantizedConv2DAndRequantize") + tf.add_input(desc, input_) + tf.add_input(desc, filter_) + tf.add_input(desc, min_input_) + tf.add_input(desc, max_input_) + tf.add_input(desc, min_filter_) + tf.add_input(desc, max_filter_) + tf.add_input(desc, min_freezed_output_) + tf.add_input(desc, max_freezed_output_) + if out_type !== nothing + desc["out_type"] = Base.identity(out_type) + end + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + if padding !== nothing + desc["padding"] = Base.String(padding) + end + if dilations !== nothing + desc["dilations"] = map(Base.identity, dilations) + end + desc["Tinput"] = tf.data_type(input_) + desc["Tfilter"] = tf.data_type(filter_) + tf.execute(desc) + end +end + + +""" + lu(input; output_idx_type=Int32) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function lu(input_; name=nothing, output_idx_type=nothing) + local desc + tf.with_op_name(name, "Lu") do + desc = tf.NodeDescription("Lu") + input_ = convert(Tensor{Any}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + if output_idx_type !== nothing + desc["output_idx_type"] = Base.identity(output_idx_type) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function lu(input_::tf.TensorHandle; name=nothing, output_idx_type=nothing) + desc = tf.EagerOp("Lu") + tf.add_input(desc, input_) + if output_idx_type !== nothing + desc["output_idx_type"] = Base.identity(output_idx_type) + end + desc["T"] = tf.data_type(input_) + tf.execute(desc) + end +end + + +""" + decode_compressed(bytes; compression_type=) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function decode_compressed(bytes_; name=nothing, compression_type=nothing) + local desc + tf.with_op_name(name, "DecodeCompressed") do + desc = tf.NodeDescription("DecodeCompressed") + bytes_ = convert(Tensor{String}, bytes_) + tf.add_input(desc, bytes_) + if compression_type !== nothing + desc["compression_type"] = Base.String(compression_type) + end + end + tf.Tensor(tf.Operation(desc)) + end + function decode_compressed(bytes_::tf.TensorHandle; name=nothing, compression_type=nothing) + desc = tf.EagerOp("DecodeCompressed") + tf.add_input(desc, bytes_) + if compression_type !== nothing + desc["compression_type"] = Base.String(compression_type) + end + (tf.execute(desc))[1] + end +end + + +""" + get_session_tensor(handle) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function get_session_tensor(handle_; name=nothing, dtype=nothing) + local desc + tf.with_op_name(name, "GetSessionTensor") do + desc = tf.NodeDescription("GetSessionTensor") + handle_ = convert(Tensor{String}, handle_) + tf.add_input(desc, handle_) + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + tf.Tensor(tf.Operation(desc)) + end + function get_session_tensor(handle_::tf.TensorHandle; name=nothing, dtype=nothing) + desc = tf.EagerOp("GetSessionTensor") + tf.add_input(desc, handle_) + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + (tf.execute(desc))[1] + end +end + + +""" + tensor_array_gather_v3(handle, indices, flow_in; element_shape=?) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tensor_array_gather_v3(handle_, indices_, flow_in_; name=nothing, dtype=nothing, element_shape=nothing) + local desc + tf.with_op_name(name, "TensorArrayGatherV3") do + desc = tf.NodeDescription("TensorArrayGatherV3") + handle_ = convert(Tensor{Any}, handle_) + indices_ = convert(Tensor{Int32}, indices_) + flow_in_ = convert(Tensor{Float32}, flow_in_) + tf.add_input(desc, handle_) + tf.add_input(desc, indices_) + tf.add_input(desc, flow_in_) + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + if element_shape !== nothing + desc["element_shape"] = Base.identity(element_shape) + end + end + tf.Tensor(tf.Operation(desc)) + end + function tensor_array_gather_v3(handle_::tf.TensorHandle, indices_::tf.TensorHandle, flow_in_::tf.TensorHandle; name=nothing, dtype=nothing, element_shape=nothing) + desc = tf.EagerOp("TensorArrayGatherV3") + tf.add_input(desc, handle_) + tf.add_input(desc, indices_) + tf.add_input(desc, flow_in_) + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + if element_shape !== nothing + desc["element_shape"] = Base.identity(element_shape) + end + (tf.execute(desc))[1] + end +end + + +""" + load_tpu_embedding_ftrl_parameters_grad_accum_debug(parameters, accumulators, linears, gradient_accumulators; table_id=-1, table_name=) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function load_tpu_embedding_ftrl_parameters_grad_accum_debug(parameters_, accumulators_, linears_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + local desc + tf.with_op_name(name, "LoadTPUEmbeddingFTRLParametersGradAccumDebug") do + desc = tf.NodeDescription("LoadTPUEmbeddingFTRLParametersGradAccumDebug") + parameters_ = convert(Tensor{Float32}, parameters_) + accumulators_ = convert(Tensor{Float32}, accumulators_) + linears_ = convert(Tensor{Float32}, linears_) + gradient_accumulators_ = convert(Tensor{Float32}, gradient_accumulators_) + tf.add_input(desc, parameters_) + tf.add_input(desc, accumulators_) + tf.add_input(desc, linears_) + tf.add_input(desc, gradient_accumulators_) + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + end + tf.Tensor(tf.Operation(desc)) + end + function load_tpu_embedding_ftrl_parameters_grad_accum_debug(parameters_::tf.TensorHandle, accumulators_::tf.TensorHandle, linears_::tf.TensorHandle, gradient_accumulators_::tf.TensorHandle; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + desc = tf.EagerOp("LoadTPUEmbeddingFTRLParametersGradAccumDebug") + tf.add_input(desc, parameters_) + tf.add_input(desc, accumulators_) + tf.add_input(desc, linears_) + tf.add_input(desc, gradient_accumulators_) + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + (tf.execute(desc))[1] + end +end + + +""" + destroy_resource_op(resource; ignore_lookup_error=true) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function destroy_resource_op(resource_; name=nothing, ignore_lookup_error=nothing) + local desc + tf.with_op_name(name, "DestroyResourceOp") do + desc = tf.NodeDescription("DestroyResourceOp") + resource_ = convert(Tensor{Any}, resource_) + tf.add_input(desc, resource_) + if ignore_lookup_error !== nothing + desc["ignore_lookup_error"] = Base.Bool(ignore_lookup_error) + end + end + tf.Tensor(tf.Operation(desc)) + end + function destroy_resource_op(resource_::tf.TensorHandle; name=nothing, ignore_lookup_error=nothing) + desc = tf.EagerOp("DestroyResourceOp") + tf.add_input(desc, resource_) + if ignore_lookup_error !== nothing + desc["ignore_lookup_error"] = Base.Bool(ignore_lookup_error) + end + (tf.execute(desc))[1] + end +end + + +""" + text_line_reader(; skip_header_lines=0, container=, shared_name=) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function text_line_reader(; name=nothing, skip_header_lines=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "TextLineReader") do + desc = tf.NodeDescription("TextLineReader") + if skip_header_lines !== nothing + desc["skip_header_lines"] = Base.Int(skip_header_lines) + end + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + tf.Tensor(tf.Operation(desc)) + end + function text_line_reader(; name=nothing, skip_header_lines=nothing, container=nothing, shared_name=nothing) + desc = tf.EagerOp("TextLineReader") + if skip_header_lines !== nothing + desc["skip_header_lines"] = Base.Int(skip_header_lines) + end + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + (tf.execute(desc))[1] + end +end + + +""" + create_summary_db_writer(writer, db_uri, experiment_name, run_name, user_name) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function create_summary_db_writer(writer_, db_uri_, experiment_name_, run_name_, user_name_; name=nothing) + local desc + tf.with_op_name(name, "CreateSummaryDbWriter") do + desc = tf.NodeDescription("CreateSummaryDbWriter") + writer_ = convert(Tensor{Any}, writer_) + db_uri_ = convert(Tensor{String}, db_uri_) + experiment_name_ = convert(Tensor{String}, experiment_name_) + run_name_ = convert(Tensor{String}, run_name_) + user_name_ = convert(Tensor{String}, user_name_) + tf.add_input(desc, writer_) + tf.add_input(desc, db_uri_) + tf.add_input(desc, experiment_name_) + tf.add_input(desc, run_name_) + tf.add_input(desc, user_name_) + end + tf.Tensor(tf.Operation(desc)) + end + function create_summary_db_writer(writer_::tf.TensorHandle, db_uri_::tf.TensorHandle, experiment_name_::tf.TensorHandle, run_name_::tf.TensorHandle, user_name_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("CreateSummaryDbWriter") + tf.add_input(desc, writer_) + tf.add_input(desc, db_uri_) + tf.add_input(desc, experiment_name_) + tf.add_input(desc, run_name_) + tf.add_input(desc, user_name_) + (tf.execute(desc))[1] + end +end + + +""" + tanh_grad(y, dy) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tanh_grad(y_, dy_; name=nothing) + local desc + tf.with_op_name(name, "TanhGrad") do + desc = tf.NodeDescription("TanhGrad") + y_ = convert(Tensor{Any}, y_) + dy_ = convert(Tensor{Any}, dy_) + (y_, dy_) = tf.tf_promote(y_, dy_) + tf.add_input(desc, y_) + tf.add_input(desc, dy_) + end + tf.Tensor(tf.Operation(desc)) + end + function tanh_grad(y_::tf.TensorHandle, dy_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("TanhGrad") + tf.add_input(desc, y_) + tf.add_input(desc, dy_) + desc["T"] = tf.data_type(y_) + desc["T"] = tf.data_type(dy_) + (tf.execute(desc))[1] + end +end + + +""" + decode_base64(input) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function decode_base64(input_; name=nothing) + local desc + tf.with_op_name(name, "DecodeBase64") do + desc = tf.NodeDescription("DecodeBase64") + input_ = convert(Tensor{String}, input_) + tf.add_input(desc, input_) + end + tf.Tensor(tf.Operation(desc)) + end + function decode_base64(input_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("DecodeBase64") + tf.add_input(desc, input_) + (tf.execute(desc))[1] + end +end + + +""" + max_pool_grad_grad_v2(orig_input, orig_output, grad, ksize, strides; data_format=NHWC) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function max_pool_grad_grad_v2(orig_input_, orig_output_, grad_, ksize_, strides_; name=nothing, padding=nothing, data_format=nothing) + local desc + tf.with_op_name(name, "MaxPoolGradGradV2") do + desc = tf.NodeDescription("MaxPoolGradGradV2") + orig_input_ = convert(Tensor{Any}, orig_input_) + orig_output_ = convert(Tensor{Any}, orig_output_) + grad_ = convert(Tensor{Any}, grad_) + ksize_ = convert(Tensor{Int32}, ksize_) + strides_ = convert(Tensor{Int32}, strides_) + (orig_input_, orig_output_, grad_) = tf.tf_promote(orig_input_, orig_output_, grad_) + tf.add_input(desc, orig_input_) + tf.add_input(desc, orig_output_) + tf.add_input(desc, grad_) + tf.add_input(desc, ksize_) + tf.add_input(desc, strides_) + if padding !== nothing + desc["padding"] = Base.String(padding) + end + if data_format !== nothing + desc["data_format"] = Base.String(data_format) + end + end + tf.Tensor(tf.Operation(desc)) + end + function max_pool_grad_grad_v2(orig_input_::tf.TensorHandle, orig_output_::tf.TensorHandle, grad_::tf.TensorHandle, ksize_::tf.TensorHandle, strides_::tf.TensorHandle; name=nothing, padding=nothing, data_format=nothing) + desc = tf.EagerOp("MaxPoolGradGradV2") + tf.add_input(desc, orig_input_) + tf.add_input(desc, orig_output_) + tf.add_input(desc, grad_) + tf.add_input(desc, ksize_) + tf.add_input(desc, strides_) + if padding !== nothing + desc["padding"] = Base.String(padding) + end + if data_format !== nothing + desc["data_format"] = Base.String(data_format) + end + desc["T"] = tf.data_type(orig_input_) + desc["T"] = tf.data_type(orig_output_) + desc["T"] = tf.data_type(grad_) + (tf.execute(desc))[1] + end +end + + +""" + audio_summary_v2(tag, tensor, sample_rate; max_outputs=3) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function audio_summary_v2(tag_, tensor_, sample_rate_; name=nothing, max_outputs=nothing) + local desc + tf.with_op_name(name, "AudioSummaryV2") do + desc = tf.NodeDescription("AudioSummaryV2") + tag_ = convert(Tensor{String}, tag_) + tensor_ = convert(Tensor{Float32}, tensor_) + sample_rate_ = convert(Tensor{Float32}, sample_rate_) + tf.add_input(desc, tag_) + tf.add_input(desc, tensor_) + tf.add_input(desc, sample_rate_) + if max_outputs !== nothing + desc["max_outputs"] = Base.Int(max_outputs) + end + end + tf.Tensor(tf.Operation(desc)) + end + function audio_summary_v2(tag_::tf.TensorHandle, tensor_::tf.TensorHandle, sample_rate_::tf.TensorHandle; name=nothing, max_outputs=nothing) + desc = tf.EagerOp("AudioSummaryV2") + tf.add_input(desc, tag_) + tf.add_input(desc, tensor_) + tf.add_input(desc, sample_rate_) + if max_outputs !== nothing + desc["max_outputs"] = Base.Int(max_outputs) + end + (tf.execute(desc))[1] + end +end + + +""" + stateful_partitioned_call(args; config=, config_proto=, executor_type=) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function stateful_partitioned_call(args_; name=nothing, Tin=nothing, Tout=nothing, f=nothing, config=nothing, config_proto=nothing, executor_type=nothing) + local desc + tf.with_op_name(name, "StatefulPartitionedCall") do + desc = tf.NodeDescription("StatefulPartitionedCall") + args_ = [convert(Tensor{Any}, x) for x = args_] + tf.add_input(desc, args_) + if Tin !== nothing + desc["Tin"] = map(Base.identity, Tin) + end + if Tout !== nothing + desc["Tout"] = map(Base.identity, Tout) + end + if f !== nothing + desc["f"] = Base.identity(f) + end + if config !== nothing + desc["config"] = Base.String(config) + end + if config_proto !== nothing + desc["config_proto"] = Base.String(config_proto) + end + if executor_type !== nothing + desc["executor_type"] = Base.String(executor_type) + end + end + tf.Tensor(tf.Operation(desc)) + end + function stateful_partitioned_call(args_::tf.TensorHandle; name=nothing, Tin=nothing, Tout=nothing, f=nothing, config=nothing, config_proto=nothing, executor_type=nothing) + desc = tf.EagerOp("StatefulPartitionedCall") + tf.add_input(desc, args_) + if Tin !== nothing + desc["Tin"] = map(Base.identity, Tin) + end + if Tout !== nothing + desc["Tout"] = map(Base.identity, Tout) + end + if f !== nothing + desc["f"] = Base.identity(f) + end + if config !== nothing + desc["config"] = Base.String(config) + end + if config_proto !== nothing + desc["config_proto"] = Base.String(config_proto) + end + if executor_type !== nothing + desc["executor_type"] = Base.String(executor_type) + end + (tf.execute(desc))[1] + end +end + + +""" + _scoped_allocator_concat(backing, inputs; reshape=false) + +Acts like a Concat Op that merges multple tensors into one, however it must +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function _scoped_allocator_concat(backing_, inputs_; name=nothing, shape=nothing, reshape=nothing, sa_name=nothing, id=nothing, N=nothing) + local desc + tf.with_op_name(name, "_ScopedAllocatorConcat") do + desc = tf.NodeDescription("_ScopedAllocatorConcat") + backing_ = convert(Tensor{Any}, backing_) + inputs_ = [convert(Tensor{Any}, x) for x = inputs_] + (backing_, inputs_) = tf.tf_promote(backing_, inputs_) + tf.add_input(desc, backing_) + tf.add_input(desc, inputs_) + if shape !== nothing + desc["shape"] = Base.identity(shape) + end + if reshape !== nothing + desc["reshape"] = Base.Bool(reshape) + end + if sa_name !== nothing + desc["sa_name"] = Base.String(sa_name) + end + if id !== nothing + desc["id"] = Base.Int(id) + end + if N !== nothing + desc["N"] = Base.Int(N) + end + end + tf.Tensor(tf.Operation(desc)) + end + function _scoped_allocator_concat(backing_::tf.TensorHandle, inputs_::tf.TensorHandle; name=nothing, shape=nothing, reshape=nothing, sa_name=nothing, id=nothing, N=nothing) + desc = tf.EagerOp("_ScopedAllocatorConcat") + tf.add_input(desc, backing_) + tf.add_input(desc, inputs_) + if shape !== nothing + desc["shape"] = Base.identity(shape) + end + if reshape !== nothing + desc["reshape"] = Base.Bool(reshape) + end + if sa_name !== nothing + desc["sa_name"] = Base.String(sa_name) + end + if id !== nothing + desc["id"] = Base.Int(id) + end + if N !== nothing + desc["N"] = Base.Int(N) + end + desc["T"] = tf.data_type(backing_) + desc["T"] = tf.data_type(inputs_) + (tf.execute(desc))[1] + end +end + + +""" + fake_quant_with_min_max_args_gradient(gradients, inputs; min=?, max=?, num_bits=8, narrow_range=false) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function fake_quant_with_min_max_args_gradient(gradients_, inputs_; name=nothing, min=nothing, max=nothing, num_bits=nothing, narrow_range=nothing) + local desc + tf.with_op_name(name, "FakeQuantWithMinMaxArgsGradient") do + desc = tf.NodeDescription("FakeQuantWithMinMaxArgsGradient") + gradients_ = convert(Tensor{Float32}, gradients_) + inputs_ = convert(Tensor{Float32}, inputs_) + tf.add_input(desc, gradients_) + tf.add_input(desc, inputs_) + if min !== nothing + desc["min"] = Base.identity(min) + end + if max !== nothing + desc["max"] = Base.identity(max) + end + if num_bits !== nothing + desc["num_bits"] = Base.Int(num_bits) + end + if narrow_range !== nothing + desc["narrow_range"] = Base.Bool(narrow_range) + end + end + tf.Tensor(tf.Operation(desc)) + end + function fake_quant_with_min_max_args_gradient(gradients_::tf.TensorHandle, inputs_::tf.TensorHandle; name=nothing, min=nothing, max=nothing, num_bits=nothing, narrow_range=nothing) + desc = tf.EagerOp("FakeQuantWithMinMaxArgsGradient") + tf.add_input(desc, gradients_) + tf.add_input(desc, inputs_) + if min !== nothing + desc["min"] = Base.identity(min) + end + if max !== nothing + desc["max"] = Base.identity(max) + end + if num_bits !== nothing + desc["num_bits"] = Base.Int(num_bits) + end + if narrow_range !== nothing + desc["narrow_range"] = Base.Bool(narrow_range) + end + (tf.execute(desc))[1] + end +end + + +""" + batch_svd(input; compute_uv=true, full_matrices=false) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function batch_svd(input_; name=nothing, compute_uv=nothing, full_matrices=nothing) + local desc + tf.with_op_name(name, "BatchSvd") do + desc = tf.NodeDescription("BatchSvd") + input_ = convert(Tensor{Any}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + if compute_uv !== nothing + desc["compute_uv"] = Base.Bool(compute_uv) + end + if full_matrices !== nothing + desc["full_matrices"] = Base.Bool(full_matrices) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function batch_svd(input_::tf.TensorHandle; name=nothing, compute_uv=nothing, full_matrices=nothing) + desc = tf.EagerOp("BatchSvd") + tf.add_input(desc, input_) + if compute_uv !== nothing + desc["compute_uv"] = Base.Bool(compute_uv) + end + if full_matrices !== nothing + desc["full_matrices"] = Base.Bool(full_matrices) + end + desc["T"] = tf.data_type(input_) + tf.execute(desc) + end +end + + +""" + map_stage(key, indices, values; capacity=0, memory_limit=0, container=, shared_name=) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function map_stage(key_, indices_, values_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, fake_dtypes=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "MapStage") do + desc = tf.NodeDescription("MapStage") + key_ = convert(Tensor{Int64}, key_) + indices_ = convert(Tensor{Int32}, indices_) + values_ = [convert(Tensor{Any}, x) for x = values_] + tf.add_input(desc, key_) + tf.add_input(desc, indices_) + tf.add_input(desc, values_) + if capacity !== nothing + desc["capacity"] = Base.Int(capacity) + end + if memory_limit !== nothing + desc["memory_limit"] = Base.Int(memory_limit) + end + if dtypes !== nothing + desc["dtypes"] = map(Base.identity, dtypes) + end + if fake_dtypes !== nothing + desc["fake_dtypes"] = map(Base.identity, fake_dtypes) + end + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + tf.Tensor(tf.Operation(desc)) + end + function map_stage(key_::tf.TensorHandle, indices_::tf.TensorHandle, values_::tf.TensorHandle; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, fake_dtypes=nothing, container=nothing, shared_name=nothing) + desc = tf.EagerOp("MapStage") + tf.add_input(desc, key_) + tf.add_input(desc, indices_) + tf.add_input(desc, values_) + if capacity !== nothing + desc["capacity"] = Base.Int(capacity) + end + if memory_limit !== nothing + desc["memory_limit"] = Base.Int(memory_limit) + end + if dtypes !== nothing + desc["dtypes"] = map(Base.identity, dtypes) + end + if fake_dtypes !== nothing + desc["fake_dtypes"] = map(Base.identity, fake_dtypes) + end + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + (tf.execute(desc))[1] + end +end + + +""" + resource_sparse_apply_ftrl(var, accum, linear, grad, indices, lr, l1, l2, lr_power; use_locking=false) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function resource_sparse_apply_ftrl(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, lr_power_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "ResourceSparseApplyFtrl") do + desc = tf.NodeDescription("ResourceSparseApplyFtrl") + var_ = convert(Tensor{Any}, var_) + accum_ = convert(Tensor{Any}, accum_) + linear_ = convert(Tensor{Any}, linear_) + grad_ = convert(Tensor{Any}, grad_) + indices_ = convert(Tensor{Any}, indices_) + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + lr_ = convert(Tensor{Any}, lr_) + l1_ = convert(Tensor{Any}, l1_) + l2_ = convert(Tensor{Any}, l2_) + lr_power_ = convert(Tensor{Any}, lr_power_) + (grad_, lr_, l1_, l2_, lr_power_) = tf.tf_promote(grad_, lr_, l1_, l2_, lr_power_) + (indices_,) = tf.tf_promote(indices_) + tf.add_input(desc, var_) + tf.add_input(desc, accum_) + tf.add_input(desc, linear_) + tf.add_input(desc, grad_) + tf.add_input(desc, indices_) + tf.add_input(desc, lr_) + tf.add_input(desc, l1_) + tf.add_input(desc, l2_) + tf.add_input(desc, lr_power_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + tf.Tensor(tf.Operation(desc)) + end + function resource_sparse_apply_ftrl(var_::tf.TensorHandle, accum_::tf.TensorHandle, linear_::tf.TensorHandle, grad_::tf.TensorHandle, indices_::tf.TensorHandle, lr_::tf.TensorHandle, l1_::tf.TensorHandle, l2_::tf.TensorHandle, lr_power_::tf.TensorHandle; name=nothing, use_locking=nothing) + desc = tf.EagerOp("ResourceSparseApplyFtrl") + tf.add_input(desc, var_) + tf.add_input(desc, accum_) + tf.add_input(desc, linear_) + tf.add_input(desc, grad_) + tf.add_input(desc, indices_) + tf.add_input(desc, lr_) + tf.add_input(desc, l1_) + tf.add_input(desc, l2_) + tf.add_input(desc, lr_power_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + desc["T"] = tf.data_type(grad_) + desc["Tindices"] = tf.data_type(indices_) + desc["T"] = tf.data_type(lr_) + desc["T"] = tf.data_type(l1_) + desc["T"] = tf.data_type(l2_) + desc["T"] = tf.data_type(lr_power_) + (tf.execute(desc))[1] + end +end + + +""" + resize_nearest_neighbor(images, size; align_corners=false) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function resize_nearest_neighbor(images_, size_; name=nothing, align_corners=nothing) + local desc + tf.with_op_name(name, "ResizeNearestNeighbor") do + desc = tf.NodeDescription("ResizeNearestNeighbor") + images_ = convert(Tensor{Any}, images_) + size_ = convert(Tensor{Int32}, size_) + (images_,) = tf.tf_promote(images_) + tf.add_input(desc, images_) + tf.add_input(desc, size_) + if align_corners !== nothing + desc["align_corners"] = Base.Bool(align_corners) + end + end + tf.Tensor(tf.Operation(desc)) + end + function resize_nearest_neighbor(images_::tf.TensorHandle, size_::tf.TensorHandle; name=nothing, align_corners=nothing) + desc = tf.EagerOp("ResizeNearestNeighbor") + tf.add_input(desc, images_) + tf.add_input(desc, size_) + if align_corners !== nothing + desc["align_corners"] = Base.Bool(align_corners) + end + desc["T"] = tf.data_type(images_) + (tf.execute(desc))[1] + end +end + + +""" + experimental_csv_dataset(filenames, compression_type, buffer_size, header, field_delim, use_quote_delim, na_value, select_cols, record_defaults) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function experimental_csv_dataset(filenames_, compression_type_, buffer_size_, header_, field_delim_, use_quote_delim_, na_value_, select_cols_, record_defaults_; name=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "ExperimentalCSVDataset") do + desc = tf.NodeDescription("ExperimentalCSVDataset") + filenames_ = convert(Tensor{String}, filenames_) + compression_type_ = convert(Tensor{String}, compression_type_) + buffer_size_ = convert(Tensor{Int64}, buffer_size_) + header_ = convert(Tensor{Bool}, header_) + field_delim_ = convert(Tensor{String}, field_delim_) + use_quote_delim_ = convert(Tensor{Bool}, use_quote_delim_) + na_value_ = convert(Tensor{String}, na_value_) + select_cols_ = convert(Tensor{Int64}, select_cols_) + record_defaults_ = [convert(Tensor{Any}, x) for x = record_defaults_] + tf.add_input(desc, filenames_) + tf.add_input(desc, compression_type_) + tf.add_input(desc, buffer_size_) + tf.add_input(desc, header_) + tf.add_input(desc, field_delim_) + tf.add_input(desc, use_quote_delim_) + tf.add_input(desc, na_value_) + tf.add_input(desc, select_cols_) + tf.add_input(desc, record_defaults_) + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + tf.Tensor(tf.Operation(desc)) + end + function experimental_csv_dataset(filenames_::tf.TensorHandle, compression_type_::tf.TensorHandle, buffer_size_::tf.TensorHandle, header_::tf.TensorHandle, field_delim_::tf.TensorHandle, use_quote_delim_::tf.TensorHandle, na_value_::tf.TensorHandle, select_cols_::tf.TensorHandle, record_defaults_::tf.TensorHandle; name=nothing, output_types=nothing, output_shapes=nothing) + desc = tf.EagerOp("ExperimentalCSVDataset") + tf.add_input(desc, filenames_) + tf.add_input(desc, compression_type_) + tf.add_input(desc, buffer_size_) + tf.add_input(desc, header_) + tf.add_input(desc, field_delim_) + tf.add_input(desc, use_quote_delim_) + tf.add_input(desc, na_value_) + tf.add_input(desc, select_cols_) + tf.add_input(desc, record_defaults_) + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + (tf.execute(desc))[1] + end +end + + +""" + _mkl_mul(x, y, mkl_x, mkl_y) + +Returns x * y element-wise. +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function _mkl_mul(x_, y_, mkl_x_, mkl_y_; name=nothing) + local desc + tf.with_op_name(name, "_MklMul") do + desc = tf.NodeDescription("_MklMul") + x_ = convert(Tensor{Any}, x_) + y_ = convert(Tensor{Any}, y_) + mkl_x_ = convert(Tensor{UInt8}, mkl_x_) + mkl_y_ = convert(Tensor{UInt8}, mkl_y_) + (x_, y_) = tf.tf_promote(x_, y_) + tf.add_input(desc, x_) + tf.add_input(desc, y_) + tf.add_input(desc, mkl_x_) + tf.add_input(desc, mkl_y_) + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function _mkl_mul(x_::tf.TensorHandle, y_::tf.TensorHandle, mkl_x_::tf.TensorHandle, mkl_y_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("_MklMul") + tf.add_input(desc, x_) + tf.add_input(desc, y_) + tf.add_input(desc, mkl_x_) + tf.add_input(desc, mkl_y_) + desc["T"] = tf.data_type(x_) + desc["T"] = tf.data_type(y_) + tf.execute(desc) + end +end + + +""" + batch_matrix_diag(diagonal) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function batch_matrix_diag(diagonal_; name=nothing) + local desc + tf.with_op_name(name, "BatchMatrixDiag") do + desc = tf.NodeDescription("BatchMatrixDiag") + diagonal_ = convert(Tensor{Any}, diagonal_) + (diagonal_,) = tf.tf_promote(diagonal_) + tf.add_input(desc, diagonal_) + end + tf.Tensor(tf.Operation(desc)) + end + function batch_matrix_diag(diagonal_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("BatchMatrixDiag") + tf.add_input(desc, diagonal_) + desc["T"] = tf.data_type(diagonal_) + (tf.execute(desc))[1] + end +end + + +""" + is_inf(x) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function is_inf(x_; name=nothing) + local desc + tf.with_op_name(name, "IsInf") do + desc = tf.NodeDescription("IsInf") + x_ = convert(Tensor{Any}, x_) + (x_,) = tf.tf_promote(x_) + tf.add_input(desc, x_) + end + tf.Tensor(tf.Operation(desc)) + end + function is_inf(x_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("IsInf") + tf.add_input(desc, x_) + desc["T"] = tf.data_type(x_) + (tf.execute(desc))[1] + end +end + + +""" + fixed_unigram_candidate_sampler(true_classes; vocab_file=, distortion=?, num_reserved_ids=0, num_shards=1, shard=0, unigrams=Int64[], seed=0, seed2=0) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function fixed_unigram_candidate_sampler(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, vocab_file=nothing, distortion=nothing, num_reserved_ids=nothing, num_shards=nothing, shard=nothing, unigrams=nothing, seed=nothing, seed2=nothing) + local desc + tf.with_op_name(name, "FixedUnigramCandidateSampler") do + desc = tf.NodeDescription("FixedUnigramCandidateSampler") + true_classes_ = convert(Tensor{Int64}, true_classes_) + tf.add_input(desc, true_classes_) + if num_true !== nothing + desc["num_true"] = Base.Int(num_true) + end + if num_sampled !== nothing + desc["num_sampled"] = Base.Int(num_sampled) + end + if unique !== nothing + desc["unique"] = Base.Bool(unique) + end + if range_max !== nothing + desc["range_max"] = Base.Int(range_max) + end + if vocab_file !== nothing + desc["vocab_file"] = Base.String(vocab_file) + end + if distortion !== nothing + desc["distortion"] = Base.identity(distortion) + end + if num_reserved_ids !== nothing + desc["num_reserved_ids"] = Base.Int(num_reserved_ids) + end + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + if shard !== nothing + desc["shard"] = Base.Int(shard) + end + if unigrams !== nothing + desc["unigrams"] = map(Base.identity, unigrams) + end + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function fixed_unigram_candidate_sampler(true_classes_::tf.TensorHandle; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, vocab_file=nothing, distortion=nothing, num_reserved_ids=nothing, num_shards=nothing, shard=nothing, unigrams=nothing, seed=nothing, seed2=nothing) + desc = tf.EagerOp("FixedUnigramCandidateSampler") + tf.add_input(desc, true_classes_) + if num_true !== nothing + desc["num_true"] = Base.Int(num_true) + end + if num_sampled !== nothing + desc["num_sampled"] = Base.Int(num_sampled) + end + if unique !== nothing + desc["unique"] = Base.Bool(unique) + end + if range_max !== nothing + desc["range_max"] = Base.Int(range_max) + end + if vocab_file !== nothing + desc["vocab_file"] = Base.String(vocab_file) + end + if distortion !== nothing + desc["distortion"] = Base.identity(distortion) + end + if num_reserved_ids !== nothing + desc["num_reserved_ids"] = Base.Int(num_reserved_ids) + end + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + if shard !== nothing + desc["shard"] = Base.Int(shard) + end + if unigrams !== nothing + desc["unigrams"] = map(Base.identity, unigrams) + end + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end + tf.execute(desc) + end +end + + +""" + sparse_apply_ftrl_v2(var, accum, linear, grad, indices, lr, l1, l2, l2_shrinkage, lr_power; use_locking=false) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function sparse_apply_ftrl_v2(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "SparseApplyFtrlV2") do + desc = tf.NodeDescription("SparseApplyFtrlV2") + var_ = convert(Tensor{Any}, var_) + accum_ = convert(Tensor{Any}, accum_) + linear_ = convert(Tensor{Any}, linear_) + grad_ = convert(Tensor{Any}, grad_) + indices_ = convert(Tensor{Any}, indices_) + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + lr_ = convert(Tensor{Any}, lr_) + l1_ = convert(Tensor{Any}, l1_) + l2_ = convert(Tensor{Any}, l2_) + l2_shrinkage_ = convert(Tensor{Any}, l2_shrinkage_) + lr_power_ = convert(Tensor{Any}, lr_power_) + (var_, accum_, linear_, grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_) = tf.tf_promote(var_, accum_, linear_, grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_) + (indices_,) = tf.tf_promote(indices_) + tf.add_input(desc, var_) + tf.add_input(desc, accum_) + tf.add_input(desc, linear_) + tf.add_input(desc, grad_) + tf.add_input(desc, indices_) + tf.add_input(desc, lr_) + tf.add_input(desc, l1_) + tf.add_input(desc, l2_) + tf.add_input(desc, l2_shrinkage_) + tf.add_input(desc, lr_power_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + tf.Tensor(tf.Operation(desc)) + end + function sparse_apply_ftrl_v2(var_::tf.TensorHandle, accum_::tf.TensorHandle, linear_::tf.TensorHandle, grad_::tf.TensorHandle, indices_::tf.TensorHandle, lr_::tf.TensorHandle, l1_::tf.TensorHandle, l2_::tf.TensorHandle, l2_shrinkage_::tf.TensorHandle, lr_power_::tf.TensorHandle; name=nothing, use_locking=nothing) + desc = tf.EagerOp("SparseApplyFtrlV2") + tf.add_input(desc, var_) + tf.add_input(desc, accum_) + tf.add_input(desc, linear_) + tf.add_input(desc, grad_) + tf.add_input(desc, indices_) + tf.add_input(desc, lr_) + tf.add_input(desc, l1_) + tf.add_input(desc, l2_) + tf.add_input(desc, l2_shrinkage_) + tf.add_input(desc, lr_power_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + desc["T"] = tf.data_type(var_) + desc["T"] = tf.data_type(accum_) + desc["T"] = tf.data_type(linear_) + desc["T"] = tf.data_type(grad_) + desc["Tindices"] = tf.data_type(indices_) + desc["T"] = tf.data_type(lr_) + desc["T"] = tf.data_type(l1_) + desc["T"] = tf.data_type(l2_) + desc["T"] = tf.data_type(l2_shrinkage_) + desc["T"] = tf.data_type(lr_power_) + (tf.execute(desc))[1] + end +end + + +""" + unravel_index(indices, dims) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function unravel_index(indices_, dims_; name=nothing) + local desc + tf.with_op_name(name, "UnravelIndex") do + desc = tf.NodeDescription("UnravelIndex") + indices_ = convert(Tensor{Int32}, indices_) + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + dims_ = convert(Tensor{Int32}, dims_) + dims_ = dims_ - convert(tf.Tensor{eltype(dims_)}, 1) + (indices_, dims_) = tf.tf_promote(indices_, dims_) + tf.add_input(desc, indices_) + tf.add_input(desc, dims_) + end + tf.Tensor(tf.Operation(desc)) + end + function unravel_index(indices_::tf.TensorHandle, dims_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("UnravelIndex") + tf.add_input(desc, indices_) + tf.add_input(desc, dims_) + desc["Tidx"] = tf.data_type(indices_) + desc["Tidx"] = tf.data_type(dims_) + (tf.execute(desc))[1] + end +end + + +""" + max(input, reduction_indices; keep_dims=false) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function max(input_, reduction_indices_; name=nothing, keep_dims=nothing) + local desc + tf.with_op_name(name, "Max") do + desc = tf.NodeDescription("Max") + input_ = convert(Tensor{Any}, input_) + reduction_indices_ = convert(Tensor{Int32}, reduction_indices_) + reduction_indices_ = reduction_indices_ - convert(tf.Tensor{eltype(reduction_indices_)}, 1) + (input_,) = tf.tf_promote(input_) + (reduction_indices_,) = tf.tf_promote(reduction_indices_) + tf.add_input(desc, input_) + tf.add_input(desc, reduction_indices_) + if keep_dims !== nothing + desc["keep_dims"] = Base.Bool(keep_dims) + end + end + tf.Tensor(tf.Operation(desc)) + end + function max(input_::tf.TensorHandle, reduction_indices_::tf.TensorHandle; name=nothing, keep_dims=nothing) + desc = tf.EagerOp("Max") + tf.add_input(desc, input_) + tf.add_input(desc, reduction_indices_) + if keep_dims !== nothing + desc["keep_dims"] = Base.Bool(keep_dims) + end + desc["T"] = tf.data_type(input_) + desc["Tidx"] = tf.data_type(reduction_indices_) + (tf.execute(desc))[1] + end +end + + +""" + ifft2d(input) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function ifft2d(input_; name=nothing) + local desc + tf.with_op_name(name, "IFFT2D") do + desc = tf.NodeDescription("IFFT2D") + input_ = convert(Tensor{Complex{Float32}}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + end + tf.Tensor(tf.Operation(desc)) + end + function ifft2d(input_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("IFFT2D") + tf.add_input(desc, input_) + desc["Tcomplex"] = tf.data_type(input_) + (tf.execute(desc))[1] + end +end + + +""" + sparse_concat(indices, values, shapes) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function sparse_concat(indices_, values_, shapes_; name=nothing, concat_dim=nothing, N=nothing) + local desc + tf.with_op_name(name, "SparseConcat") do + desc = tf.NodeDescription("SparseConcat") + indices_ = [convert(Tensor{Int64}, x) for x = indices_] + values_ = [convert(Tensor{Any}, x) for x = values_] + shapes_ = [convert(Tensor{Int64}, x) for x = shapes_] + (values_,) = tf.tf_promote(values_) + tf.add_input(desc, indices_) + tf.add_input(desc, values_) + tf.add_input(desc, shapes_) + if concat_dim !== nothing + concat_dim = Base.Int(concat_dim) - 1 + end + if concat_dim !== nothing + desc["concat_dim"] = Base.Int(concat_dim) + end + if N !== nothing + desc["N"] = Base.Int(N) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function sparse_concat(indices_::tf.TensorHandle, values_::tf.TensorHandle, shapes_::tf.TensorHandle; name=nothing, concat_dim=nothing, N=nothing) + desc = tf.EagerOp("SparseConcat") + tf.add_input(desc, indices_) + tf.add_input(desc, values_) + tf.add_input(desc, shapes_) + if concat_dim !== nothing + concat_dim = Base.Int(concat_dim) - 1 + end + if concat_dim !== nothing + desc["concat_dim"] = Base.Int(concat_dim) + end + if N !== nothing + desc["N"] = Base.Int(N) + end + desc["T"] = tf.data_type(values_) + tf.execute(desc) + end +end + + +""" + histogram_summary(tag, values) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function histogram_summary(tag_, values_; name=nothing) + local desc + tf.with_op_name(name, "HistogramSummary") do + desc = tf.NodeDescription("HistogramSummary") + tag_ = convert(Tensor{String}, tag_) + values_ = convert(Tensor{Float32}, values_) + (values_,) = tf.tf_promote(values_) + tf.add_input(desc, tag_) + tf.add_input(desc, values_) + end + tf.Tensor(tf.Operation(desc)) + end + function histogram_summary(tag_::tf.TensorHandle, values_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("HistogramSummary") + tf.add_input(desc, tag_) + tf.add_input(desc, values_) + desc["T"] = tf.data_type(values_) + (tf.execute(desc))[1] + end +end + + +""" + segment_sum(data, segment_ids) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function segment_sum(data_, segment_ids_; name=nothing) + local desc + tf.with_op_name(name, "SegmentSum") do + desc = tf.NodeDescription("SegmentSum") + data_ = convert(Tensor{Any}, data_) + segment_ids_ = convert(Tensor{Any}, segment_ids_) + segment_ids_ = segment_ids_ - convert(tf.Tensor{eltype(segment_ids_)}, 1) + (data_,) = tf.tf_promote(data_) + (segment_ids_,) = tf.tf_promote(segment_ids_) + tf.add_input(desc, data_) + tf.add_input(desc, segment_ids_) + end + tf.Tensor(tf.Operation(desc)) + end + function segment_sum(data_::tf.TensorHandle, segment_ids_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("SegmentSum") + tf.add_input(desc, data_) + tf.add_input(desc, segment_ids_) + desc["T"] = tf.data_type(data_) + desc["Tindices"] = tf.data_type(segment_ids_) + (tf.execute(desc))[1] + end +end + + +""" + exp(x) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function exp(x_; name=nothing) + local desc + tf.with_op_name(name, "Exp") do + desc = tf.NodeDescription("Exp") + x_ = convert(Tensor{Any}, x_) + (x_,) = tf.tf_promote(x_) + tf.add_input(desc, x_) + end + tf.Tensor(tf.Operation(desc)) + end + function exp(x_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("Exp") + tf.add_input(desc, x_) + desc["T"] = tf.data_type(x_) + (tf.execute(desc))[1] + end +end + + +""" + configure_distributed_tpu(; embedding_config=, tpu_embedding_config=, is_global_init=false) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function configure_distributed_tpu(; name=nothing, embedding_config=nothing, tpu_embedding_config=nothing, is_global_init=nothing) + local desc + tf.with_op_name(name, "ConfigureDistributedTPU") do + desc = tf.NodeDescription("ConfigureDistributedTPU") + if embedding_config !== nothing + desc["embedding_config"] = Base.String(embedding_config) + end + if tpu_embedding_config !== nothing + desc["tpu_embedding_config"] = Base.String(tpu_embedding_config) + end + if is_global_init !== nothing + desc["is_global_init"] = Base.Bool(is_global_init) + end + end + tf.Tensor(tf.Operation(desc)) + end + function configure_distributed_tpu(; name=nothing, embedding_config=nothing, tpu_embedding_config=nothing, is_global_init=nothing) + desc = tf.EagerOp("ConfigureDistributedTPU") + if embedding_config !== nothing + desc["embedding_config"] = Base.String(embedding_config) + end + if tpu_embedding_config !== nothing + desc["tpu_embedding_config"] = Base.String(tpu_embedding_config) + end + if is_global_init !== nothing + desc["is_global_init"] = Base.Bool(is_global_init) + end + (tf.execute(desc))[1] + end +end + + +""" + resource_scatter_nd_sub(ref, indices, updates; use_locking=true) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function resource_scatter_nd_sub(ref_, indices_, updates_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "ResourceScatterNdSub") do + desc = tf.NodeDescription("ResourceScatterNdSub") + ref_ = convert(Tensor{Any}, ref_) + indices_ = convert(Tensor{Any}, indices_) + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + updates_ = convert(Tensor{Any}, updates_) + (updates_,) = tf.tf_promote(updates_) + (indices_,) = tf.tf_promote(indices_) + tf.add_input(desc, ref_) + tf.add_input(desc, indices_) + tf.add_input(desc, updates_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + tf.Tensor(tf.Operation(desc)) + end + function resource_scatter_nd_sub(ref_::tf.TensorHandle, indices_::tf.TensorHandle, updates_::tf.TensorHandle; name=nothing, use_locking=nothing) + desc = tf.EagerOp("ResourceScatterNdSub") + tf.add_input(desc, ref_) + tf.add_input(desc, indices_) + tf.add_input(desc, updates_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + desc["Tindices"] = tf.data_type(indices_) + desc["T"] = tf.data_type(updates_) + (tf.execute(desc))[1] + end +end + + +""" + _xla_send_from_host(inputs, dynamic_key) + +A placeholder op for multiple values that will be sent from TensorFlow to a +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function _xla_send_from_host(inputs_, dynamic_key_; name=nothing, Tinputs=nothing, key=nothing, device_ordinal=nothing) + local desc + tf.with_op_name(name, "_XlaSendFromHost") do + desc = tf.NodeDescription("_XlaSendFromHost") + inputs_ = [convert(Tensor{Any}, x) for x = inputs_] + dynamic_key_ = convert(Tensor{String}, dynamic_key_) + tf.add_input(desc, inputs_) + tf.add_input(desc, dynamic_key_) + if Tinputs !== nothing + desc["Tinputs"] = map(Base.identity, Tinputs) + end + if key !== nothing + desc["key"] = Base.String(key) + end + if device_ordinal !== nothing + desc["device_ordinal"] = Base.Int(device_ordinal) + end + end + tf.Tensor(tf.Operation(desc)) + end + function _xla_send_from_host(inputs_::tf.TensorHandle, dynamic_key_::tf.TensorHandle; name=nothing, Tinputs=nothing, key=nothing, device_ordinal=nothing) + desc = tf.EagerOp("_XlaSendFromHost") + tf.add_input(desc, inputs_) + tf.add_input(desc, dynamic_key_) + if Tinputs !== nothing + desc["Tinputs"] = map(Base.identity, Tinputs) + end + if key !== nothing + desc["key"] = Base.String(key) + end + if device_ordinal !== nothing + desc["device_ordinal"] = Base.Int(device_ordinal) + end + (tf.execute(desc))[1] + end +end + + +""" + get_session_handle_v2(value) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function get_session_handle_v2(value_; name=nothing) + local desc + tf.with_op_name(name, "GetSessionHandleV2") do + desc = tf.NodeDescription("GetSessionHandleV2") + value_ = convert(Tensor{Any}, value_) + (value_,) = tf.tf_promote(value_) + tf.add_input(desc, value_) + end + tf.Tensor(tf.Operation(desc)) + end + function get_session_handle_v2(value_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("GetSessionHandleV2") + tf.add_input(desc, value_) + desc["T"] = tf.data_type(value_) + (tf.execute(desc))[1] + end +end + + +""" + relu_grad(gradients, features) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function relu_grad(gradients_, features_; name=nothing) + local desc + tf.with_op_name(name, "ReluGrad") do + desc = tf.NodeDescription("ReluGrad") + gradients_ = convert(Tensor{Any}, gradients_) + features_ = convert(Tensor{Any}, features_) + (gradients_, features_) = tf.tf_promote(gradients_, features_) + tf.add_input(desc, gradients_) + tf.add_input(desc, features_) + end + tf.Tensor(tf.Operation(desc)) + end + function relu_grad(gradients_::tf.TensorHandle, features_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("ReluGrad") + tf.add_input(desc, gradients_) + tf.add_input(desc, features_) + desc["T"] = tf.data_type(gradients_) + desc["T"] = tf.data_type(features_) + (tf.execute(desc))[1] + end +end + + +""" + unsorted_segment_min(data, segment_ids, num_segments) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function unsorted_segment_min(data_, segment_ids_, num_segments_; name=nothing) + local desc + tf.with_op_name(name, "UnsortedSegmentMin") do + desc = tf.NodeDescription("UnsortedSegmentMin") + data_ = convert(Tensor{Any}, data_) + segment_ids_ = convert(Tensor{Any}, segment_ids_) + segment_ids_ = segment_ids_ - convert(tf.Tensor{eltype(segment_ids_)}, 1) + num_segments_ = convert(Tensor{Int32}, num_segments_) + (num_segments_,) = tf.tf_promote(num_segments_) + (data_,) = tf.tf_promote(data_) + (segment_ids_,) = tf.tf_promote(segment_ids_) + tf.add_input(desc, data_) + tf.add_input(desc, segment_ids_) + tf.add_input(desc, num_segments_) + end + tf.Tensor(tf.Operation(desc)) + end + function unsorted_segment_min(data_::tf.TensorHandle, segment_ids_::tf.TensorHandle, num_segments_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("UnsortedSegmentMin") + tf.add_input(desc, data_) + tf.add_input(desc, segment_ids_) + tf.add_input(desc, num_segments_) + desc["T"] = tf.data_type(data_) + desc["Tindices"] = tf.data_type(segment_ids_) + desc["Tnumsegments"] = tf.data_type(num_segments_) + (tf.execute(desc))[1] + end +end + + +""" + parse_example(serialized, names, sparse_keys, dense_keys, dense_defaults) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function parse_example(serialized_, names_, sparse_keys_, dense_keys_, dense_defaults_; name=nothing, Nsparse=nothing, Ndense=nothing, sparse_types=nothing, Tdense=nothing, dense_shapes=nothing) + local desc + tf.with_op_name(name, "ParseExample") do + desc = tf.NodeDescription("ParseExample") + serialized_ = convert(Tensor{String}, serialized_) + names_ = convert(Tensor{String}, names_) + sparse_keys_ = [convert(Tensor{String}, x) for x = sparse_keys_] + dense_keys_ = [convert(Tensor{String}, x) for x = dense_keys_] + dense_defaults_ = [convert(Tensor{Any}, x) for x = dense_defaults_] + tf.add_input(desc, serialized_) + tf.add_input(desc, names_) + tf.add_input(desc, sparse_keys_) + tf.add_input(desc, dense_keys_) + tf.add_input(desc, dense_defaults_) + if Nsparse !== nothing + desc["Nsparse"] = Base.Int(Nsparse) + end + if Ndense !== nothing + desc["Ndense"] = Base.Int(Ndense) + end + if sparse_types !== nothing + desc["sparse_types"] = map(Base.identity, sparse_types) + end + if Tdense !== nothing + desc["Tdense"] = map(Base.identity, Tdense) + end + if dense_shapes !== nothing + desc["dense_shapes"] = map(Base.identity, dense_shapes) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:4 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function parse_example(serialized_::tf.TensorHandle, names_::tf.TensorHandle, sparse_keys_::tf.TensorHandle, dense_keys_::tf.TensorHandle, dense_defaults_::tf.TensorHandle; name=nothing, Nsparse=nothing, Ndense=nothing, sparse_types=nothing, Tdense=nothing, dense_shapes=nothing) + desc = tf.EagerOp("ParseExample") + tf.add_input(desc, serialized_) + tf.add_input(desc, names_) + tf.add_input(desc, sparse_keys_) + tf.add_input(desc, dense_keys_) + tf.add_input(desc, dense_defaults_) + if Nsparse !== nothing + desc["Nsparse"] = Base.Int(Nsparse) + end + if Ndense !== nothing + desc["Ndense"] = Base.Int(Ndense) + end + if sparse_types !== nothing + desc["sparse_types"] = map(Base.identity, sparse_types) + end + if Tdense !== nothing + desc["Tdense"] = map(Base.identity, Tdense) + end + if dense_shapes !== nothing + desc["dense_shapes"] = map(Base.identity, dense_shapes) + end + tf.execute(desc) + end +end + + +""" + queue_enqueue_v2(handle, components; timeout_ms=-1) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function queue_enqueue_v2(handle_, components_; name=nothing, Tcomponents=nothing, timeout_ms=nothing) + local desc + tf.with_op_name(name, "QueueEnqueueV2") do + desc = tf.NodeDescription("QueueEnqueueV2") + handle_ = convert(Tensor{Any}, handle_) + components_ = [convert(Tensor{Any}, x) for x = components_] + tf.add_input(desc, handle_) + tf.add_input(desc, components_) + if Tcomponents !== nothing + desc["Tcomponents"] = map(Base.identity, Tcomponents) + end + if timeout_ms !== nothing + desc["timeout_ms"] = Base.Int(timeout_ms) + end + end + tf.Tensor(tf.Operation(desc)) + end + function queue_enqueue_v2(handle_::tf.TensorHandle, components_::tf.TensorHandle; name=nothing, Tcomponents=nothing, timeout_ms=nothing) + desc = tf.EagerOp("QueueEnqueueV2") + tf.add_input(desc, handle_) + tf.add_input(desc, components_) + if Tcomponents !== nothing + desc["Tcomponents"] = map(Base.identity, Tcomponents) + end + if timeout_ms !== nothing + desc["timeout_ms"] = Base.Int(timeout_ms) + end + (tf.execute(desc))[1] + end +end + + +""" + scatter_nd_add(ref, indices, updates; use_locking=false) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function scatter_nd_add(ref_, indices_, updates_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "ScatterNdAdd") do + desc = tf.NodeDescription("ScatterNdAdd") + ref_ = convert(Tensor{Any}, ref_) + indices_ = convert(Tensor{Any}, indices_) + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + updates_ = convert(Tensor{Any}, updates_) + (ref_, updates_) = tf.tf_promote(ref_, updates_) + (indices_,) = tf.tf_promote(indices_) + tf.add_input(desc, ref_) + tf.add_input(desc, indices_) + tf.add_input(desc, updates_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + tf.Tensor(tf.Operation(desc)) + end + function scatter_nd_add(ref_::tf.TensorHandle, indices_::tf.TensorHandle, updates_::tf.TensorHandle; name=nothing, use_locking=nothing) + desc = tf.EagerOp("ScatterNdAdd") + tf.add_input(desc, ref_) + tf.add_input(desc, indices_) + tf.add_input(desc, updates_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + desc["T"] = tf.data_type(ref_) + desc["Tindices"] = tf.data_type(indices_) + desc["T"] = tf.data_type(updates_) + (tf.execute(desc))[1] + end +end + + +""" + reader_num_records_produced_v2(reader_handle) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function reader_num_records_produced_v2(reader_handle_; name=nothing) + local desc + tf.with_op_name(name, "ReaderNumRecordsProducedV2") do + desc = tf.NodeDescription("ReaderNumRecordsProducedV2") + reader_handle_ = convert(Tensor{Any}, reader_handle_) + tf.add_input(desc, reader_handle_) + end + tf.Tensor(tf.Operation(desc)) + end + function reader_num_records_produced_v2(reader_handle_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("ReaderNumRecordsProducedV2") + tf.add_input(desc, reader_handle_) + (tf.execute(desc))[1] + end +end + + +""" + load_tpu_embedding_centered_rms_prop_parameters(parameters, ms, mom, mg; table_id=-1, table_name=) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function load_tpu_embedding_centered_rms_prop_parameters(parameters_, ms_, mom_, mg_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + local desc + tf.with_op_name(name, "LoadTPUEmbeddingCenteredRMSPropParameters") do + desc = tf.NodeDescription("LoadTPUEmbeddingCenteredRMSPropParameters") + parameters_ = convert(Tensor{Float32}, parameters_) + ms_ = convert(Tensor{Float32}, ms_) + mom_ = convert(Tensor{Float32}, mom_) + mg_ = convert(Tensor{Float32}, mg_) + tf.add_input(desc, parameters_) + tf.add_input(desc, ms_) + tf.add_input(desc, mom_) + tf.add_input(desc, mg_) + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + end + tf.Tensor(tf.Operation(desc)) + end + function load_tpu_embedding_centered_rms_prop_parameters(parameters_::tf.TensorHandle, ms_::tf.TensorHandle, mom_::tf.TensorHandle, mg_::tf.TensorHandle; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + desc = tf.EagerOp("LoadTPUEmbeddingCenteredRMSPropParameters") + tf.add_input(desc, parameters_) + tf.add_input(desc, ms_) + tf.add_input(desc, mom_) + tf.add_input(desc, mg_) + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + (tf.execute(desc))[1] + end +end + + +""" + assign_sub(ref, value; use_locking=false) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function assign_sub(ref_, value_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "AssignSub") do + desc = tf.NodeDescription("AssignSub") + ref_ = convert(Tensor{Any}, ref_) + value_ = convert(Tensor{Any}, value_) + (ref_, value_) = tf.tf_promote(ref_, value_) + tf.add_input(desc, ref_) + tf.add_input(desc, value_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + tf.Tensor(tf.Operation(desc)) + end + function assign_sub(ref_::tf.TensorHandle, value_::tf.TensorHandle; name=nothing, use_locking=nothing) + desc = tf.EagerOp("AssignSub") + tf.add_input(desc, ref_) + tf.add_input(desc, value_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + desc["T"] = tf.data_type(ref_) + desc["T"] = tf.data_type(value_) + (tf.execute(desc))[1] + end +end + + +""" + unsorted_segment_sum(data, segment_ids, num_segments) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function unsorted_segment_sum(data_, segment_ids_, num_segments_; name=nothing) + local desc + tf.with_op_name(name, "UnsortedSegmentSum") do + desc = tf.NodeDescription("UnsortedSegmentSum") + data_ = convert(Tensor{Any}, data_) + segment_ids_ = convert(Tensor{Any}, segment_ids_) + segment_ids_ = segment_ids_ - convert(tf.Tensor{eltype(segment_ids_)}, 1) + num_segments_ = convert(Tensor{Int32}, num_segments_) + (num_segments_,) = tf.tf_promote(num_segments_) + (data_,) = tf.tf_promote(data_) + (segment_ids_,) = tf.tf_promote(segment_ids_) + tf.add_input(desc, data_) + tf.add_input(desc, segment_ids_) + tf.add_input(desc, num_segments_) + end + tf.Tensor(tf.Operation(desc)) + end + function unsorted_segment_sum(data_::tf.TensorHandle, segment_ids_::tf.TensorHandle, num_segments_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("UnsortedSegmentSum") + tf.add_input(desc, data_) + tf.add_input(desc, segment_ids_) + tf.add_input(desc, num_segments_) + desc["T"] = tf.data_type(data_) + desc["Tindices"] = tf.data_type(segment_ids_) + desc["Tnumsegments"] = tf.data_type(num_segments_) + (tf.execute(desc))[1] + end +end + + +""" + fused_batch_norm_grad(y_backprop, x, scale, reserve_space_1, reserve_space_2; epsilon=?, data_format=NHWC, is_training=true) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function fused_batch_norm_grad(y_backprop_, x_, scale_, reserve_space_1_, reserve_space_2_; name=nothing, epsilon=nothing, data_format=nothing, is_training=nothing) + local desc + tf.with_op_name(name, "FusedBatchNormGrad") do + desc = tf.NodeDescription("FusedBatchNormGrad") + y_backprop_ = convert(Tensor{Any}, y_backprop_) + x_ = convert(Tensor{Any}, x_) + scale_ = convert(Tensor{Any}, scale_) + reserve_space_1_ = convert(Tensor{Any}, reserve_space_1_) + reserve_space_2_ = convert(Tensor{Any}, reserve_space_2_) + (y_backprop_, x_, scale_, reserve_space_1_, reserve_space_2_) = tf.tf_promote(y_backprop_, x_, scale_, reserve_space_1_, reserve_space_2_) + tf.add_input(desc, y_backprop_) + tf.add_input(desc, x_) + tf.add_input(desc, scale_) + tf.add_input(desc, reserve_space_1_) + tf.add_input(desc, reserve_space_2_) + if epsilon !== nothing + desc["epsilon"] = Base.identity(epsilon) + end + if data_format !== nothing + desc["data_format"] = Base.String(data_format) + end + if is_training !== nothing + desc["is_training"] = Base.Bool(is_training) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:5 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function fused_batch_norm_grad(y_backprop_::tf.TensorHandle, x_::tf.TensorHandle, scale_::tf.TensorHandle, reserve_space_1_::tf.TensorHandle, reserve_space_2_::tf.TensorHandle; name=nothing, epsilon=nothing, data_format=nothing, is_training=nothing) + desc = tf.EagerOp("FusedBatchNormGrad") + tf.add_input(desc, y_backprop_) + tf.add_input(desc, x_) + tf.add_input(desc, scale_) + tf.add_input(desc, reserve_space_1_) + tf.add_input(desc, reserve_space_2_) + if epsilon !== nothing + desc["epsilon"] = Base.identity(epsilon) + end + if data_format !== nothing + desc["data_format"] = Base.String(data_format) + end + if is_training !== nothing + desc["is_training"] = Base.Bool(is_training) + end + desc["T"] = tf.data_type(y_backprop_) + desc["T"] = tf.data_type(x_) + desc["T"] = tf.data_type(scale_) + desc["T"] = tf.data_type(reserve_space_1_) + desc["T"] = tf.data_type(reserve_space_2_) + tf.execute(desc) + end +end + + +""" + max_pool_grad_v2(orig_input, orig_output, grad, ksize, strides; data_format=NHWC) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function max_pool_grad_v2(orig_input_, orig_output_, grad_, ksize_, strides_; name=nothing, padding=nothing, data_format=nothing) + local desc + tf.with_op_name(name, "MaxPoolGradV2") do + desc = tf.NodeDescription("MaxPoolGradV2") + orig_input_ = convert(Tensor{Float32}, orig_input_) + orig_output_ = convert(Tensor{Float32}, orig_output_) + grad_ = convert(Tensor{Float32}, grad_) + ksize_ = convert(Tensor{Int32}, ksize_) + strides_ = convert(Tensor{Int32}, strides_) + (orig_input_, orig_output_, grad_) = tf.tf_promote(orig_input_, orig_output_, grad_) + tf.add_input(desc, orig_input_) + tf.add_input(desc, orig_output_) + tf.add_input(desc, grad_) + tf.add_input(desc, ksize_) + tf.add_input(desc, strides_) + if padding !== nothing + desc["padding"] = Base.String(padding) + end + if data_format !== nothing + desc["data_format"] = Base.String(data_format) + end + end + tf.Tensor(tf.Operation(desc)) + end + function max_pool_grad_v2(orig_input_::tf.TensorHandle, orig_output_::tf.TensorHandle, grad_::tf.TensorHandle, ksize_::tf.TensorHandle, strides_::tf.TensorHandle; name=nothing, padding=nothing, data_format=nothing) + desc = tf.EagerOp("MaxPoolGradV2") + tf.add_input(desc, orig_input_) + tf.add_input(desc, orig_output_) + tf.add_input(desc, grad_) + tf.add_input(desc, ksize_) + tf.add_input(desc, strides_) + if padding !== nothing + desc["padding"] = Base.String(padding) + end + if data_format !== nothing + desc["data_format"] = Base.String(data_format) + end + desc["T"] = tf.data_type(orig_input_) + desc["T"] = tf.data_type(orig_output_) + desc["T"] = tf.data_type(grad_) + (tf.execute(desc))[1] + end +end + + +""" + quantized_conv2d_with_bias_and_relu(input, filter, bias, min_input, max_input, min_filter, max_filter; out_type=Float32, dilations=[1, 1, 1, 1]) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function quantized_conv2d_with_bias_and_relu(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) + local desc + tf.with_op_name(name, "QuantizedConv2DWithBiasAndRelu") do + desc = tf.NodeDescription("QuantizedConv2DWithBiasAndRelu") + input_ = convert(Tensor{Any}, input_) + filter_ = convert(Tensor{Any}, filter_) + bias_ = convert(Tensor{Float32}, bias_) + min_input_ = convert(Tensor{Float32}, min_input_) + max_input_ = convert(Tensor{Float32}, max_input_) + min_filter_ = convert(Tensor{Float32}, min_filter_) + max_filter_ = convert(Tensor{Float32}, max_filter_) + (filter_,) = tf.tf_promote(filter_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + tf.add_input(desc, filter_) + tf.add_input(desc, bias_) + tf.add_input(desc, min_input_) + tf.add_input(desc, max_input_) + tf.add_input(desc, min_filter_) + tf.add_input(desc, max_filter_) + if out_type !== nothing + desc["out_type"] = Base.identity(out_type) + end + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + if padding !== nothing + desc["padding"] = Base.String(padding) + end + if dilations !== nothing + desc["dilations"] = map(Base.identity, dilations) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function quantized_conv2d_with_bias_and_relu(input_::tf.TensorHandle, filter_::tf.TensorHandle, bias_::tf.TensorHandle, min_input_::tf.TensorHandle, max_input_::tf.TensorHandle, min_filter_::tf.TensorHandle, max_filter_::tf.TensorHandle; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) + desc = tf.EagerOp("QuantizedConv2DWithBiasAndRelu") + tf.add_input(desc, input_) + tf.add_input(desc, filter_) + tf.add_input(desc, bias_) + tf.add_input(desc, min_input_) + tf.add_input(desc, max_input_) + tf.add_input(desc, min_filter_) + tf.add_input(desc, max_filter_) + if out_type !== nothing + desc["out_type"] = Base.identity(out_type) + end + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + if padding !== nothing + desc["padding"] = Base.String(padding) + end + if dilations !== nothing + desc["dilations"] = map(Base.identity, dilations) + end + desc["Tinput"] = tf.data_type(input_) + desc["Tfilter"] = tf.data_type(filter_) + tf.execute(desc) + end +end + + +""" + boosted_trees_create_ensemble(tree_ensemble_handle, stamp_token, tree_ensemble_serialized) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function boosted_trees_create_ensemble(tree_ensemble_handle_, stamp_token_, tree_ensemble_serialized_; name=nothing) + local desc + tf.with_op_name(name, "BoostedTreesCreateEnsemble") do + desc = tf.NodeDescription("BoostedTreesCreateEnsemble") + tree_ensemble_handle_ = convert(Tensor{Any}, tree_ensemble_handle_) + stamp_token_ = convert(Tensor{Int64}, stamp_token_) + tree_ensemble_serialized_ = convert(Tensor{String}, tree_ensemble_serialized_) + tf.add_input(desc, tree_ensemble_handle_) + tf.add_input(desc, stamp_token_) + tf.add_input(desc, tree_ensemble_serialized_) + end + tf.Tensor(tf.Operation(desc)) + end + function boosted_trees_create_ensemble(tree_ensemble_handle_::tf.TensorHandle, stamp_token_::tf.TensorHandle, tree_ensemble_serialized_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("BoostedTreesCreateEnsemble") + tf.add_input(desc, tree_ensemble_handle_) + tf.add_input(desc, stamp_token_) + tf.add_input(desc, tree_ensemble_serialized_) + (tf.execute(desc))[1] + end +end + + +""" + ordered_map_incomplete_size(; capacity=0, memory_limit=0, container=, shared_name=) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function ordered_map_incomplete_size(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "OrderedMapIncompleteSize") do + desc = tf.NodeDescription("OrderedMapIncompleteSize") + if capacity !== nothing + desc["capacity"] = Base.Int(capacity) + end + if memory_limit !== nothing + desc["memory_limit"] = Base.Int(memory_limit) + end + if dtypes !== nothing + desc["dtypes"] = map(Base.identity, dtypes) + end + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + tf.Tensor(tf.Operation(desc)) + end + function ordered_map_incomplete_size(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + desc = tf.EagerOp("OrderedMapIncompleteSize") + if capacity !== nothing + desc["capacity"] = Base.Int(capacity) + end + if memory_limit !== nothing + desc["memory_limit"] = Base.Int(memory_limit) + end + if dtypes !== nothing + desc["dtypes"] = map(Base.identity, dtypes) + end + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + (tf.execute(desc))[1] + end +end + + +""" + skipgram(; window_size=5, min_count=5, subsample=?) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function skipgram(; name=nothing, filename=nothing, batch_size=nothing, window_size=nothing, min_count=nothing, subsample=nothing) + local desc + tf.with_op_name(name, "Skipgram") do + desc = tf.NodeDescription("Skipgram") + if filename !== nothing + desc["filename"] = Base.String(filename) + end + if batch_size !== nothing + desc["batch_size"] = Base.Int(batch_size) + end + if window_size !== nothing + desc["window_size"] = Base.Int(window_size) + end + if min_count !== nothing + desc["min_count"] = Base.Int(min_count) + end + if subsample !== nothing + desc["subsample"] = Base.identity(subsample) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:7 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function skipgram(; name=nothing, filename=nothing, batch_size=nothing, window_size=nothing, min_count=nothing, subsample=nothing) + desc = tf.EagerOp("Skipgram") + if filename !== nothing + desc["filename"] = Base.String(filename) + end + if batch_size !== nothing + desc["batch_size"] = Base.Int(batch_size) + end + if window_size !== nothing + desc["window_size"] = Base.Int(window_size) + end + if min_count !== nothing + desc["min_count"] = Base.Int(min_count) + end + if subsample !== nothing + desc["subsample"] = Base.identity(subsample) + end + tf.execute(desc) + end +end + + +""" + arg_min(input, dimension; output_type=Int64) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function arg_min(input_, dimension_; name=nothing, output_type=nothing) + local desc + tf.with_op_name(name, "ArgMin") do + desc = tf.NodeDescription("ArgMin") + input_ = convert(Tensor{Any}, input_) + dimension_ = convert(Tensor{Int32}, dimension_) + dimension_ = dimension_ - convert(tf.Tensor{eltype(dimension_)}, 1) + (input_,) = tf.tf_promote(input_) + (dimension_,) = tf.tf_promote(dimension_) + tf.add_input(desc, input_) + tf.add_input(desc, dimension_) + if output_type !== nothing + desc["output_type"] = Base.identity(output_type) + end + end + tf.Tensor(tf.Operation(desc)) + end + function arg_min(input_::tf.TensorHandle, dimension_::tf.TensorHandle; name=nothing, output_type=nothing) + desc = tf.EagerOp("ArgMin") + tf.add_input(desc, input_) + tf.add_input(desc, dimension_) + if output_type !== nothing + desc["output_type"] = Base.identity(output_type) + end + desc["T"] = tf.data_type(input_) + desc["Tidx"] = tf.data_type(dimension_) + (tf.execute(desc))[1] + end +end + + +""" + queue_dequeue_many(handle, n; timeout_ms=-1) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function queue_dequeue_many(handle_, n_; name=nothing, component_types=nothing, timeout_ms=nothing) + local desc + tf.with_op_name(name, "QueueDequeueMany") do + desc = tf.NodeDescription("QueueDequeueMany") + handle_ = convert(Tensor{String}, handle_) + n_ = convert(Tensor{Int32}, n_) + tf.add_input(desc, handle_) + tf.add_input(desc, n_) + if component_types !== nothing + desc["component_types"] = map(Base.identity, component_types) + end + if timeout_ms !== nothing + desc["timeout_ms"] = Base.Int(timeout_ms) + end + end + tf.Tensor(tf.Operation(desc)) + end + function queue_dequeue_many(handle_::tf.TensorHandle, n_::tf.TensorHandle; name=nothing, component_types=nothing, timeout_ms=nothing) + desc = tf.EagerOp("QueueDequeueMany") + tf.add_input(desc, handle_) + tf.add_input(desc, n_) + if component_types !== nothing + desc["component_types"] = map(Base.identity, component_types) + end + if timeout_ms !== nothing + desc["timeout_ms"] = Base.Int(timeout_ms) + end + (tf.execute(desc))[1] + end +end + + +""" + boosted_trees_serialize_ensemble(tree_ensemble_handle) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function boosted_trees_serialize_ensemble(tree_ensemble_handle_; name=nothing) + local desc + tf.with_op_name(name, "BoostedTreesSerializeEnsemble") do + desc = tf.NodeDescription("BoostedTreesSerializeEnsemble") + tree_ensemble_handle_ = convert(Tensor{Any}, tree_ensemble_handle_) + tf.add_input(desc, tree_ensemble_handle_) + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function boosted_trees_serialize_ensemble(tree_ensemble_handle_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("BoostedTreesSerializeEnsemble") + tf.add_input(desc, tree_ensemble_handle_) + tf.execute(desc) + end +end + + +""" + minimum(x, y) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function minimum(x_, y_; name=nothing) + local desc + tf.with_op_name(name, "Minimum") do + desc = tf.NodeDescription("Minimum") + x_ = convert(Tensor{Any}, x_) + y_ = convert(Tensor{Any}, y_) + (x_, y_) = tf.tf_promote(x_, y_) + tf.add_input(desc, x_) + tf.add_input(desc, y_) + end + tf.Tensor(tf.Operation(desc)) + end + function minimum(x_::tf.TensorHandle, y_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("Minimum") + tf.add_input(desc, x_) + tf.add_input(desc, y_) + desc["T"] = tf.data_type(x_) + desc["T"] = tf.data_type(y_) + (tf.execute(desc))[1] + end +end + + +""" + substr(input, pos, len; unit=BYTE) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function substr(input_, pos_, len_; name=nothing, unit=nothing) + local desc + tf.with_op_name(name, "Substr") do + desc = tf.NodeDescription("Substr") + input_ = convert(Tensor{String}, input_) + pos_ = convert(Tensor{Any}, pos_) + len_ = convert(Tensor{Any}, len_) + (pos_, len_) = tf.tf_promote(pos_, len_) + tf.add_input(desc, input_) + tf.add_input(desc, pos_) + tf.add_input(desc, len_) + if unit !== nothing + desc["unit"] = Base.String(unit) + end + end + tf.Tensor(tf.Operation(desc)) + end + function substr(input_::tf.TensorHandle, pos_::tf.TensorHandle, len_::tf.TensorHandle; name=nothing, unit=nothing) + desc = tf.EagerOp("Substr") + tf.add_input(desc, input_) + tf.add_input(desc, pos_) + tf.add_input(desc, len_) + if unit !== nothing + desc["unit"] = Base.String(unit) + end + desc["T"] = tf.data_type(pos_) + desc["T"] = tf.data_type(len_) + (tf.execute(desc))[1] + end +end + + +""" + queue_size(handle) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function queue_size(handle_; name=nothing) + local desc + tf.with_op_name(name, "QueueSize") do + desc = tf.NodeDescription("QueueSize") + handle_ = convert(Tensor{String}, handle_) + tf.add_input(desc, handle_) + end + tf.Tensor(tf.Operation(desc)) + end + function queue_size(handle_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("QueueSize") + tf.add_input(desc, handle_) + (tf.execute(desc))[1] + end +end + + +""" + apply_ftrl_v2(var, accum, linear, grad, lr, l1, l2, l2_shrinkage, lr_power; use_locking=false) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function apply_ftrl_v2(var_, accum_, linear_, grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "ApplyFtrlV2") do + desc = tf.NodeDescription("ApplyFtrlV2") + var_ = convert(Tensor{Any}, var_) + accum_ = convert(Tensor{Any}, accum_) + linear_ = convert(Tensor{Any}, linear_) + grad_ = convert(Tensor{Any}, grad_) + lr_ = convert(Tensor{Any}, lr_) + l1_ = convert(Tensor{Any}, l1_) + l2_ = convert(Tensor{Any}, l2_) + l2_shrinkage_ = convert(Tensor{Any}, l2_shrinkage_) + lr_power_ = convert(Tensor{Any}, lr_power_) + (var_, accum_, linear_, grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_) = tf.tf_promote(var_, accum_, linear_, grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_) + tf.add_input(desc, var_) + tf.add_input(desc, accum_) + tf.add_input(desc, linear_) + tf.add_input(desc, grad_) + tf.add_input(desc, lr_) + tf.add_input(desc, l1_) + tf.add_input(desc, l2_) + tf.add_input(desc, l2_shrinkage_) + tf.add_input(desc, lr_power_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + tf.Tensor(tf.Operation(desc)) + end + function apply_ftrl_v2(var_::tf.TensorHandle, accum_::tf.TensorHandle, linear_::tf.TensorHandle, grad_::tf.TensorHandle, lr_::tf.TensorHandle, l1_::tf.TensorHandle, l2_::tf.TensorHandle, l2_shrinkage_::tf.TensorHandle, lr_power_::tf.TensorHandle; name=nothing, use_locking=nothing) + desc = tf.EagerOp("ApplyFtrlV2") + tf.add_input(desc, var_) + tf.add_input(desc, accum_) + tf.add_input(desc, linear_) + tf.add_input(desc, grad_) + tf.add_input(desc, lr_) + tf.add_input(desc, l1_) + tf.add_input(desc, l2_) + tf.add_input(desc, l2_shrinkage_) + tf.add_input(desc, lr_power_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + desc["T"] = tf.data_type(var_) + desc["T"] = tf.data_type(accum_) + desc["T"] = tf.data_type(linear_) + desc["T"] = tf.data_type(grad_) + desc["T"] = tf.data_type(lr_) + desc["T"] = tf.data_type(l1_) + desc["T"] = tf.data_type(l2_) + desc["T"] = tf.data_type(l2_shrinkage_) + desc["T"] = tf.data_type(lr_power_) + (tf.execute(desc))[1] + end +end + + +""" + load_tpu_embedding_momentum_parameters(parameters, momenta; table_id=-1, table_name=) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function load_tpu_embedding_momentum_parameters(parameters_, momenta_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + local desc + tf.with_op_name(name, "LoadTPUEmbeddingMomentumParameters") do + desc = tf.NodeDescription("LoadTPUEmbeddingMomentumParameters") + parameters_ = convert(Tensor{Float32}, parameters_) + momenta_ = convert(Tensor{Float32}, momenta_) + tf.add_input(desc, parameters_) + tf.add_input(desc, momenta_) + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + end + tf.Tensor(tf.Operation(desc)) + end + function load_tpu_embedding_momentum_parameters(parameters_::tf.TensorHandle, momenta_::tf.TensorHandle; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + desc = tf.EagerOp("LoadTPUEmbeddingMomentumParameters") + tf.add_input(desc, parameters_) + tf.add_input(desc, momenta_) + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + (tf.execute(desc))[1] + end +end + + +""" + sparse_segment_mean(data, indices, segment_ids) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function sparse_segment_mean(data_, indices_, segment_ids_; name=nothing) + local desc + tf.with_op_name(name, "SparseSegmentMean") do + desc = tf.NodeDescription("SparseSegmentMean") + data_ = convert(Tensor{Any}, data_) + indices_ = convert(Tensor{Int32}, indices_) + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + segment_ids_ = convert(Tensor{Int32}, segment_ids_) + (data_,) = tf.tf_promote(data_) + (indices_,) = tf.tf_promote(indices_) + tf.add_input(desc, data_) + tf.add_input(desc, indices_) + tf.add_input(desc, segment_ids_) + end + tf.Tensor(tf.Operation(desc)) + end + function sparse_segment_mean(data_::tf.TensorHandle, indices_::tf.TensorHandle, segment_ids_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("SparseSegmentMean") + tf.add_input(desc, data_) + tf.add_input(desc, indices_) + tf.add_input(desc, segment_ids_) + desc["T"] = tf.data_type(data_) + desc["Tidx"] = tf.data_type(indices_) + (tf.execute(desc))[1] + end +end + + +""" + resource_apply_proximal_adagrad(var, accum, lr, l1, l2, grad; use_locking=false) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function resource_apply_proximal_adagrad(var_, accum_, lr_, l1_, l2_, grad_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "ResourceApplyProximalAdagrad") do + desc = tf.NodeDescription("ResourceApplyProximalAdagrad") + var_ = convert(Tensor{Any}, var_) + accum_ = convert(Tensor{Any}, accum_) + lr_ = convert(Tensor{Any}, lr_) + l1_ = convert(Tensor{Any}, l1_) + l2_ = convert(Tensor{Any}, l2_) + grad_ = convert(Tensor{Any}, grad_) + (lr_, l1_, l2_, grad_) = tf.tf_promote(lr_, l1_, l2_, grad_) + tf.add_input(desc, var_) + tf.add_input(desc, accum_) + tf.add_input(desc, lr_) + tf.add_input(desc, l1_) + tf.add_input(desc, l2_) + tf.add_input(desc, grad_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + tf.Tensor(tf.Operation(desc)) + end + function resource_apply_proximal_adagrad(var_::tf.TensorHandle, accum_::tf.TensorHandle, lr_::tf.TensorHandle, l1_::tf.TensorHandle, l2_::tf.TensorHandle, grad_::tf.TensorHandle; name=nothing, use_locking=nothing) + desc = tf.EagerOp("ResourceApplyProximalAdagrad") + tf.add_input(desc, var_) + tf.add_input(desc, accum_) + tf.add_input(desc, lr_) + tf.add_input(desc, l1_) + tf.add_input(desc, l2_) + tf.add_input(desc, grad_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + desc["T"] = tf.data_type(lr_) + desc["T"] = tf.data_type(l1_) + desc["T"] = tf.data_type(l2_) + desc["T"] = tf.data_type(grad_) + (tf.execute(desc))[1] + end +end + + +""" + tensor_array_gather_v2(handle, indices, flow_in; element_shape=?) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tensor_array_gather_v2(handle_, indices_, flow_in_; name=nothing, dtype=nothing, element_shape=nothing) + local desc + tf.with_op_name(name, "TensorArrayGatherV2") do + desc = tf.NodeDescription("TensorArrayGatherV2") + handle_ = convert(Tensor{String}, handle_) + indices_ = convert(Tensor{Int32}, indices_) + flow_in_ = convert(Tensor{Float32}, flow_in_) + tf.add_input(desc, handle_) + tf.add_input(desc, indices_) + tf.add_input(desc, flow_in_) + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + if element_shape !== nothing + desc["element_shape"] = Base.identity(element_shape) + end + end + tf.Tensor(tf.Operation(desc)) + end + function tensor_array_gather_v2(handle_::tf.TensorHandle, indices_::tf.TensorHandle, flow_in_::tf.TensorHandle; name=nothing, dtype=nothing, element_shape=nothing) + desc = tf.EagerOp("TensorArrayGatherV2") + tf.add_input(desc, handle_) + tf.add_input(desc, indices_) + tf.add_input(desc, flow_in_) + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + if element_shape !== nothing + desc["element_shape"] = Base.identity(element_shape) + end + (tf.execute(desc))[1] + end +end + + +""" + less(x, y) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function less(x_, y_; name=nothing) + local desc + tf.with_op_name(name, "Less") do + desc = tf.NodeDescription("Less") + x_ = convert(Tensor{Any}, x_) + y_ = convert(Tensor{Any}, y_) + (x_, y_) = tf.tf_promote(x_, y_) + tf.add_input(desc, x_) + tf.add_input(desc, y_) + end + tf.Tensor(tf.Operation(desc)) + end + function less(x_::tf.TensorHandle, y_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("Less") + tf.add_input(desc, x_) + tf.add_input(desc, y_) + desc["T"] = tf.data_type(x_) + desc["T"] = tf.data_type(y_) + (tf.execute(desc))[1] + end +end + + +""" + host_const() + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function host_const(; name=nothing, value=nothing, dtype=nothing) + local desc + tf.with_op_name(name, "HostConst") do + desc = tf.NodeDescription("HostConst") + if value !== nothing + desc["value"] = TensorFlow.RawTensor(value) + end + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + tf.Tensor(tf.Operation(desc)) + end + function host_const(; name=nothing, value=nothing, dtype=nothing) + desc = tf.EagerOp("HostConst") + if value !== nothing + desc["value"] = TensorFlow.RawTensor(value) + end + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + (tf.execute(desc))[1] + end +end + + +""" + upper_bound(sorted_inputs, values; out_type=Int32) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function upper_bound(sorted_inputs_, values_; name=nothing, out_type=nothing) + local desc + tf.with_op_name(name, "UpperBound") do + desc = tf.NodeDescription("UpperBound") + sorted_inputs_ = convert(Tensor{Any}, sorted_inputs_) + values_ = convert(Tensor{Any}, values_) + (sorted_inputs_, values_) = tf.tf_promote(sorted_inputs_, values_) + tf.add_input(desc, sorted_inputs_) + tf.add_input(desc, values_) + if out_type !== nothing + desc["out_type"] = Base.identity(out_type) + end + end + tf.Tensor(tf.Operation(desc)) + end + function upper_bound(sorted_inputs_::tf.TensorHandle, values_::tf.TensorHandle; name=nothing, out_type=nothing) + desc = tf.EagerOp("UpperBound") + tf.add_input(desc, sorted_inputs_) + tf.add_input(desc, values_) + if out_type !== nothing + desc["out_type"] = Base.identity(out_type) + end + desc["T"] = tf.data_type(sorted_inputs_) + desc["T"] = tf.data_type(values_) + (tf.execute(desc))[1] + end +end + + +""" + tensor_list_get_item(input_handle, index, element_shape) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tensor_list_get_item(input_handle_, index_, element_shape_; name=nothing, element_dtype=nothing) + local desc + tf.with_op_name(name, "TensorListGetItem") do + desc = tf.NodeDescription("TensorListGetItem") + input_handle_ = convert(Tensor{Any}, input_handle_) + index_ = convert(Tensor{Int32}, index_) + element_shape_ = convert(Tensor{Int32}, element_shape_) + tf.add_input(desc, input_handle_) + tf.add_input(desc, index_) + tf.add_input(desc, element_shape_) + if element_dtype !== nothing + desc["element_dtype"] = Base.identity(element_dtype) + end + end + tf.Tensor(tf.Operation(desc)) + end + function tensor_list_get_item(input_handle_::tf.TensorHandle, index_::tf.TensorHandle, element_shape_::tf.TensorHandle; name=nothing, element_dtype=nothing) + desc = tf.EagerOp("TensorListGetItem") + tf.add_input(desc, input_handle_) + tf.add_input(desc, index_) + tf.add_input(desc, element_shape_) + if element_dtype !== nothing + desc["element_dtype"] = Base.identity(element_dtype) + end + (tf.execute(desc))[1] + end +end + + +""" + fake_quant_with_min_max_vars(inputs, min, max; num_bits=8, narrow_range=false) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function fake_quant_with_min_max_vars(inputs_, min_, max_; name=nothing, num_bits=nothing, narrow_range=nothing) + local desc + tf.with_op_name(name, "FakeQuantWithMinMaxVars") do + desc = tf.NodeDescription("FakeQuantWithMinMaxVars") + inputs_ = convert(Tensor{Float32}, inputs_) + min_ = convert(Tensor{Float32}, min_) + max_ = convert(Tensor{Float32}, max_) + tf.add_input(desc, inputs_) + tf.add_input(desc, min_) + tf.add_input(desc, max_) + if num_bits !== nothing + desc["num_bits"] = Base.Int(num_bits) + end + if narrow_range !== nothing + desc["narrow_range"] = Base.Bool(narrow_range) + end + end + tf.Tensor(tf.Operation(desc)) + end + function fake_quant_with_min_max_vars(inputs_::tf.TensorHandle, min_::tf.TensorHandle, max_::tf.TensorHandle; name=nothing, num_bits=nothing, narrow_range=nothing) + desc = tf.EagerOp("FakeQuantWithMinMaxVars") + tf.add_input(desc, inputs_) + tf.add_input(desc, min_) + tf.add_input(desc, max_) + if num_bits !== nothing + desc["num_bits"] = Base.Int(num_bits) + end + if narrow_range !== nothing + desc["narrow_range"] = Base.Bool(narrow_range) + end + (tf.execute(desc))[1] + end +end + + +""" + is_boosted_trees_quantile_stream_resource_initialized(quantile_stream_resource_handle) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function is_boosted_trees_quantile_stream_resource_initialized(quantile_stream_resource_handle_; name=nothing) + local desc + tf.with_op_name(name, "IsBoostedTreesQuantileStreamResourceInitialized") do + desc = tf.NodeDescription("IsBoostedTreesQuantileStreamResourceInitialized") + quantile_stream_resource_handle_ = convert(Tensor{Any}, quantile_stream_resource_handle_) + tf.add_input(desc, quantile_stream_resource_handle_) + end + tf.Tensor(tf.Operation(desc)) + end + function is_boosted_trees_quantile_stream_resource_initialized(quantile_stream_resource_handle_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("IsBoostedTreesQuantileStreamResourceInitialized") + tf.add_input(desc, quantile_stream_resource_handle_) + (tf.execute(desc))[1] + end +end + + +""" + reader_read_up_to_v2(reader_handle, queue_handle, num_records) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function reader_read_up_to_v2(reader_handle_, queue_handle_, num_records_; name=nothing) + local desc + tf.with_op_name(name, "ReaderReadUpToV2") do + desc = tf.NodeDescription("ReaderReadUpToV2") + reader_handle_ = convert(Tensor{Any}, reader_handle_) + queue_handle_ = convert(Tensor{Any}, queue_handle_) + num_records_ = convert(Tensor{Int64}, num_records_) + tf.add_input(desc, reader_handle_) + tf.add_input(desc, queue_handle_) + tf.add_input(desc, num_records_) + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function reader_read_up_to_v2(reader_handle_::tf.TensorHandle, queue_handle_::tf.TensorHandle, num_records_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("ReaderReadUpToV2") + tf.add_input(desc, reader_handle_) + tf.add_input(desc, queue_handle_) + tf.add_input(desc, num_records_) + tf.execute(desc) + end +end + + +""" + complex(real, imag) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function complex(real_, imag_; name=nothing) + local desc + tf.with_op_name(name, "Complex") do + desc = tf.NodeDescription("Complex") + real_ = convert(Tensor{Float32}, real_) + imag_ = convert(Tensor{Float32}, imag_) + (real_, imag_) = tf.tf_promote(real_, imag_) + tf.add_input(desc, real_) + tf.add_input(desc, imag_) + end + tf.Tensor(tf.Operation(desc)) + end + function complex(real_::tf.TensorHandle, imag_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("Complex") + tf.add_input(desc, real_) + tf.add_input(desc, imag_) + desc["T"] = tf.data_type(real_) + desc["T"] = tf.data_type(imag_) + (tf.execute(desc))[1] + end +end + + +""" + tensor_list_reserve(element_shape, num_elements) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tensor_list_reserve(element_shape_, num_elements_; name=nothing, element_dtype=nothing, shape_type=nothing) + local desc + tf.with_op_name(name, "TensorListReserve") do + desc = tf.NodeDescription("TensorListReserve") + element_shape_ = convert(Tensor{Any}, element_shape_) + num_elements_ = convert(Tensor{Int32}, num_elements_) + (element_shape_,) = tf.tf_promote(element_shape_) + tf.add_input(desc, element_shape_) + tf.add_input(desc, num_elements_) + if element_dtype !== nothing + desc["element_dtype"] = Base.identity(element_dtype) + end + if shape_type !== nothing + desc["shape_type"] = Base.identity(shape_type) + end + end + tf.Tensor(tf.Operation(desc)) + end + function tensor_list_reserve(element_shape_::tf.TensorHandle, num_elements_::tf.TensorHandle; name=nothing, element_dtype=nothing, shape_type=nothing) + desc = tf.EagerOp("TensorListReserve") + tf.add_input(desc, element_shape_) + tf.add_input(desc, num_elements_) + if element_dtype !== nothing + desc["element_dtype"] = Base.identity(element_dtype) + end + if shape_type !== nothing + desc["shape_type"] = Base.identity(shape_type) + end + desc["shape_type"] = tf.data_type(element_shape_) + (tf.execute(desc))[1] + end +end + + +""" + bitcast(input) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function bitcast(input_; name=nothing, type_=nothing) + local desc + tf.with_op_name(name, "Bitcast") do + desc = tf.NodeDescription("Bitcast") + input_ = convert(Tensor{Any}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + if type_ !== nothing + desc["type"] = Base.identity(type_) + end + end + tf.Tensor(tf.Operation(desc)) + end + function bitcast(input_::tf.TensorHandle; name=nothing, type_=nothing) + desc = tf.EagerOp("Bitcast") + tf.add_input(desc, input_) + if type_ !== nothing + desc["type"] = Base.identity(type_) + end + desc["T"] = tf.data_type(input_) + (tf.execute(desc))[1] + end +end + + +""" + priority_queue(; component_types=Int64[], capacity=-1, container=, shared_name=) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function priority_queue(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "PriorityQueue") do + desc = tf.NodeDescription("PriorityQueue") + if component_types !== nothing + desc["component_types"] = map(Base.identity, component_types) + end + if shapes !== nothing + desc["shapes"] = map(Base.identity, shapes) + end + if capacity !== nothing + desc["capacity"] = Base.Int(capacity) + end + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + tf.Tensor(tf.Operation(desc)) + end + function priority_queue(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) + desc = tf.EagerOp("PriorityQueue") + if component_types !== nothing + desc["component_types"] = map(Base.identity, component_types) + end + if shapes !== nothing + desc["shapes"] = map(Base.identity, shapes) + end + if capacity !== nothing + desc["capacity"] = Base.Int(capacity) + end + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + (tf.execute(desc))[1] + end +end + + +""" + quantized_batch_norm_with_global_normalization(t, t_min, t_max, m, m_min, m_max, v, v_min, v_max, beta, beta_min, beta_max, gamma, gamma_min, gamma_max) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function quantized_batch_norm_with_global_normalization(t_, t_min_, t_max_, m_, m_min_, m_max_, v_, v_min_, v_max_, beta_, beta_min_, beta_max_, gamma_, gamma_min_, gamma_max_; name=nothing, out_type=nothing, variance_epsilon=nothing, scale_after_normalization=nothing) + local desc + tf.with_op_name(name, "QuantizedBatchNormWithGlobalNormalization") do + desc = tf.NodeDescription("QuantizedBatchNormWithGlobalNormalization") + t_ = convert(Tensor{Any}, t_) + t_min_ = convert(Tensor{Float32}, t_min_) + t_max_ = convert(Tensor{Float32}, t_max_) + m_ = convert(Tensor{Any}, m_) + m_min_ = convert(Tensor{Float32}, m_min_) + m_max_ = convert(Tensor{Float32}, m_max_) + v_ = convert(Tensor{Any}, v_) + v_min_ = convert(Tensor{Float32}, v_min_) + v_max_ = convert(Tensor{Float32}, v_max_) + beta_ = convert(Tensor{Any}, beta_) + beta_min_ = convert(Tensor{Float32}, beta_min_) + beta_max_ = convert(Tensor{Float32}, beta_max_) + gamma_ = convert(Tensor{Any}, gamma_) + gamma_min_ = convert(Tensor{Float32}, gamma_min_) + gamma_max_ = convert(Tensor{Float32}, gamma_max_) + (t_, m_, v_, beta_, gamma_) = tf.tf_promote(t_, m_, v_, beta_, gamma_) + tf.add_input(desc, t_) + tf.add_input(desc, t_min_) + tf.add_input(desc, t_max_) + tf.add_input(desc, m_) + tf.add_input(desc, m_min_) + tf.add_input(desc, m_max_) + tf.add_input(desc, v_) + tf.add_input(desc, v_min_) + tf.add_input(desc, v_max_) + tf.add_input(desc, beta_) + tf.add_input(desc, beta_min_) + tf.add_input(desc, beta_max_) + tf.add_input(desc, gamma_) + tf.add_input(desc, gamma_min_) + tf.add_input(desc, gamma_max_) + if out_type !== nothing + desc["out_type"] = Base.identity(out_type) + end + if variance_epsilon !== nothing + desc["variance_epsilon"] = Base.identity(variance_epsilon) + end + if scale_after_normalization !== nothing + desc["scale_after_normalization"] = Base.Bool(scale_after_normalization) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function quantized_batch_norm_with_global_normalization(t_::tf.TensorHandle, t_min_::tf.TensorHandle, t_max_::tf.TensorHandle, m_::tf.TensorHandle, m_min_::tf.TensorHandle, m_max_::tf.TensorHandle, v_::tf.TensorHandle, v_min_::tf.TensorHandle, v_max_::tf.TensorHandle, beta_::tf.TensorHandle, beta_min_::tf.TensorHandle, beta_max_::tf.TensorHandle, gamma_::tf.TensorHandle, gamma_min_::tf.TensorHandle, gamma_max_::tf.TensorHandle; name=nothing, out_type=nothing, variance_epsilon=nothing, scale_after_normalization=nothing) + desc = tf.EagerOp("QuantizedBatchNormWithGlobalNormalization") + tf.add_input(desc, t_) + tf.add_input(desc, t_min_) + tf.add_input(desc, t_max_) + tf.add_input(desc, m_) + tf.add_input(desc, m_min_) + tf.add_input(desc, m_max_) + tf.add_input(desc, v_) + tf.add_input(desc, v_min_) + tf.add_input(desc, v_max_) + tf.add_input(desc, beta_) + tf.add_input(desc, beta_min_) + tf.add_input(desc, beta_max_) + tf.add_input(desc, gamma_) + tf.add_input(desc, gamma_min_) + tf.add_input(desc, gamma_max_) + if out_type !== nothing + desc["out_type"] = Base.identity(out_type) + end + if variance_epsilon !== nothing + desc["variance_epsilon"] = Base.identity(variance_epsilon) + end + if scale_after_normalization !== nothing + desc["scale_after_normalization"] = Base.Bool(scale_after_normalization) + end + desc["Tinput"] = tf.data_type(t_) + desc["Tinput"] = tf.data_type(m_) + desc["Tinput"] = tf.data_type(v_) + desc["Tinput"] = tf.data_type(beta_) + desc["Tinput"] = tf.data_type(gamma_) + tf.execute(desc) + end +end + + +""" + cos(x) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function cos(x_; name=nothing) + local desc + tf.with_op_name(name, "Cos") do + desc = tf.NodeDescription("Cos") + x_ = convert(Tensor{Any}, x_) + (x_,) = tf.tf_promote(x_) + tf.add_input(desc, x_) + end + tf.Tensor(tf.Operation(desc)) + end + function cos(x_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("Cos") + tf.add_input(desc, x_) + desc["T"] = tf.data_type(x_) + (tf.execute(desc))[1] + end +end + + +""" + quantize_down_and_shrink_range(input, input_min, input_max) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function quantize_down_and_shrink_range(input_, input_min_, input_max_; name=nothing, out_type=nothing) + local desc + tf.with_op_name(name, "QuantizeDownAndShrinkRange") do + desc = tf.NodeDescription("QuantizeDownAndShrinkRange") + input_ = convert(Tensor{Any}, input_) + input_min_ = convert(Tensor{Float32}, input_min_) + input_max_ = convert(Tensor{Float32}, input_max_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + tf.add_input(desc, input_min_) + tf.add_input(desc, input_max_) + if out_type !== nothing + desc["out_type"] = Base.identity(out_type) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function quantize_down_and_shrink_range(input_::tf.TensorHandle, input_min_::tf.TensorHandle, input_max_::tf.TensorHandle; name=nothing, out_type=nothing) + desc = tf.EagerOp("QuantizeDownAndShrinkRange") + tf.add_input(desc, input_) + tf.add_input(desc, input_min_) + tf.add_input(desc, input_max_) + if out_type !== nothing + desc["out_type"] = Base.identity(out_type) + end + desc["Tinput"] = tf.data_type(input_) + tf.execute(desc) + end +end + + +""" + experimental_random_dataset(seed, seed2) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function experimental_random_dataset(seed_, seed2_; name=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "ExperimentalRandomDataset") do + desc = tf.NodeDescription("ExperimentalRandomDataset") + seed_ = convert(Tensor{Int64}, seed_) + seed2_ = convert(Tensor{Int64}, seed2_) + tf.add_input(desc, seed_) + tf.add_input(desc, seed2_) + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + tf.Tensor(tf.Operation(desc)) + end + function experimental_random_dataset(seed_::tf.TensorHandle, seed2_::tf.TensorHandle; name=nothing, output_types=nothing, output_shapes=nothing) + desc = tf.EagerOp("ExperimentalRandomDataset") + tf.add_input(desc, seed_) + tf.add_input(desc, seed2_) + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + (tf.execute(desc))[1] + end +end + + +""" + rpc(address, method, request; protocol=, fail_fast=true, timeout_in_ms=0) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function rpc(address_, method_, request_; name=nothing, protocol=nothing, fail_fast=nothing, timeout_in_ms=nothing) + local desc + tf.with_op_name(name, "Rpc") do + desc = tf.NodeDescription("Rpc") + address_ = convert(Tensor{String}, address_) + method_ = convert(Tensor{String}, method_) + request_ = convert(Tensor{String}, request_) + tf.add_input(desc, address_) + tf.add_input(desc, method_) + tf.add_input(desc, request_) + if protocol !== nothing + desc["protocol"] = Base.String(protocol) + end + if fail_fast !== nothing + desc["fail_fast"] = Base.Bool(fail_fast) + end + if timeout_in_ms !== nothing + desc["timeout_in_ms"] = Base.Int(timeout_in_ms) + end + end + tf.Tensor(tf.Operation(desc)) + end + function rpc(address_::tf.TensorHandle, method_::tf.TensorHandle, request_::tf.TensorHandle; name=nothing, protocol=nothing, fail_fast=nothing, timeout_in_ms=nothing) + desc = tf.EagerOp("Rpc") + tf.add_input(desc, address_) + tf.add_input(desc, method_) + tf.add_input(desc, request_) + if protocol !== nothing + desc["protocol"] = Base.String(protocol) + end + if fail_fast !== nothing + desc["fail_fast"] = Base.Bool(fail_fast) + end + if timeout_in_ms !== nothing + desc["timeout_in_ms"] = Base.Int(timeout_in_ms) + end + (tf.execute(desc))[1] + end +end + + +""" + quantized_conv2d_with_bias_signed_sum_and_relu_and_requantize(input, filter, bias, min_input, max_input, min_filter, max_filter, min_freezed_output, max_freezed_output, summand, min_summand, max_summand; out_type=Float32, dilations=[1, 1, 1, 1]) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function quantized_conv2d_with_bias_signed_sum_and_relu_and_requantize(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_, summand_, min_summand_, max_summand_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) + local desc + tf.with_op_name(name, "QuantizedConv2DWithBiasSignedSumAndReluAndRequantize") do + desc = tf.NodeDescription("QuantizedConv2DWithBiasSignedSumAndReluAndRequantize") + input_ = convert(Tensor{Any}, input_) + filter_ = convert(Tensor{Any}, filter_) + bias_ = convert(Tensor{Any}, bias_) + min_input_ = convert(Tensor{Float32}, min_input_) + max_input_ = convert(Tensor{Float32}, max_input_) + min_filter_ = convert(Tensor{Float32}, min_filter_) + max_filter_ = convert(Tensor{Float32}, max_filter_) + min_freezed_output_ = convert(Tensor{Float32}, min_freezed_output_) + max_freezed_output_ = convert(Tensor{Float32}, max_freezed_output_) + summand_ = convert(Tensor{Any}, summand_) + min_summand_ = convert(Tensor{Float32}, min_summand_) + max_summand_ = convert(Tensor{Float32}, max_summand_) + (summand_,) = tf.tf_promote(summand_) + (filter_,) = tf.tf_promote(filter_) + (input_,) = tf.tf_promote(input_) + (bias_,) = tf.tf_promote(bias_) + tf.add_input(desc, input_) + tf.add_input(desc, filter_) + tf.add_input(desc, bias_) + tf.add_input(desc, min_input_) + tf.add_input(desc, max_input_) + tf.add_input(desc, min_filter_) + tf.add_input(desc, max_filter_) + tf.add_input(desc, min_freezed_output_) + tf.add_input(desc, max_freezed_output_) + tf.add_input(desc, summand_) + tf.add_input(desc, min_summand_) + tf.add_input(desc, max_summand_) + if out_type !== nothing + desc["out_type"] = Base.identity(out_type) + end + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + if padding !== nothing + desc["padding"] = Base.String(padding) + end + if dilations !== nothing + desc["dilations"] = map(Base.identity, dilations) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function quantized_conv2d_with_bias_signed_sum_and_relu_and_requantize(input_::tf.TensorHandle, filter_::tf.TensorHandle, bias_::tf.TensorHandle, min_input_::tf.TensorHandle, max_input_::tf.TensorHandle, min_filter_::tf.TensorHandle, max_filter_::tf.TensorHandle, min_freezed_output_::tf.TensorHandle, max_freezed_output_::tf.TensorHandle, summand_::tf.TensorHandle, min_summand_::tf.TensorHandle, max_summand_::tf.TensorHandle; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) + desc = tf.EagerOp("QuantizedConv2DWithBiasSignedSumAndReluAndRequantize") + tf.add_input(desc, input_) + tf.add_input(desc, filter_) + tf.add_input(desc, bias_) + tf.add_input(desc, min_input_) + tf.add_input(desc, max_input_) + tf.add_input(desc, min_filter_) + tf.add_input(desc, max_filter_) + tf.add_input(desc, min_freezed_output_) + tf.add_input(desc, max_freezed_output_) + tf.add_input(desc, summand_) + tf.add_input(desc, min_summand_) + tf.add_input(desc, max_summand_) + if out_type !== nothing + desc["out_type"] = Base.identity(out_type) + end + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + if padding !== nothing + desc["padding"] = Base.String(padding) + end + if dilations !== nothing + desc["dilations"] = map(Base.identity, dilations) + end + desc["Tinput"] = tf.data_type(input_) + desc["Tfilter"] = tf.data_type(filter_) + desc["Tbias"] = tf.data_type(bias_) + desc["Tsummand"] = tf.data_type(summand_) + tf.execute(desc) + end +end + + +""" + tensor_list_length(input_handle) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tensor_list_length(input_handle_; name=nothing) + local desc + tf.with_op_name(name, "TensorListLength") do + desc = tf.NodeDescription("TensorListLength") + input_handle_ = convert(Tensor{Any}, input_handle_) + tf.add_input(desc, input_handle_) + end + tf.Tensor(tf.Operation(desc)) + end + function tensor_list_length(input_handle_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("TensorListLength") + tf.add_input(desc, input_handle_) + (tf.execute(desc))[1] + end +end + + +""" + map_incomplete_size(; capacity=0, memory_limit=0, container=, shared_name=) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function map_incomplete_size(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "MapIncompleteSize") do + desc = tf.NodeDescription("MapIncompleteSize") + if capacity !== nothing + desc["capacity"] = Base.Int(capacity) + end + if memory_limit !== nothing + desc["memory_limit"] = Base.Int(memory_limit) + end + if dtypes !== nothing + desc["dtypes"] = map(Base.identity, dtypes) + end + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + tf.Tensor(tf.Operation(desc)) + end + function map_incomplete_size(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + desc = tf.EagerOp("MapIncompleteSize") + if capacity !== nothing + desc["capacity"] = Base.Int(capacity) + end + if memory_limit !== nothing + desc["memory_limit"] = Base.Int(memory_limit) + end + if dtypes !== nothing + desc["dtypes"] = map(Base.identity, dtypes) + end + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + (tf.execute(desc))[1] + end +end + + +""" + stateless_while(input) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function stateless_while(input_; name=nothing, T=nothing, cond=nothing, body=nothing) + local desc + tf.with_op_name(name, "StatelessWhile") do + desc = tf.NodeDescription("StatelessWhile") + input_ = [convert(Tensor{Any}, x) for x = input_] + tf.add_input(desc, input_) + if T !== nothing + desc["T"] = map(Base.identity, T) + end + if cond !== nothing + desc["cond"] = Base.identity(cond) + end + if body !== nothing + desc["body"] = Base.identity(body) + end + end + tf.Tensor(tf.Operation(desc)) + end + function stateless_while(input_::tf.TensorHandle; name=nothing, T=nothing, cond=nothing, body=nothing) + desc = tf.EagerOp("StatelessWhile") + tf.add_input(desc, input_) + if T !== nothing + desc["T"] = map(Base.identity, T) + end + if cond !== nothing + desc["cond"] = Base.identity(cond) + end + if body !== nothing + desc["body"] = Base.identity(body) + end + (tf.execute(desc))[1] + end +end + + +""" + sparse_conditional_accumulator(; container=, shared_name=, reduction_type=MEAN) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function sparse_conditional_accumulator(; name=nothing, dtype=nothing, shape=nothing, container=nothing, shared_name=nothing, reduction_type=nothing) + local desc + tf.with_op_name(name, "SparseConditionalAccumulator") do + desc = tf.NodeDescription("SparseConditionalAccumulator") + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + if shape !== nothing + desc["shape"] = Base.identity(shape) + end + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + if reduction_type !== nothing + desc["reduction_type"] = Base.String(reduction_type) + end + end + tf.Tensor(tf.Operation(desc)) + end + function sparse_conditional_accumulator(; name=nothing, dtype=nothing, shape=nothing, container=nothing, shared_name=nothing, reduction_type=nothing) + desc = tf.EagerOp("SparseConditionalAccumulator") + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + if shape !== nothing + desc["shape"] = Base.identity(shape) + end + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + if reduction_type !== nothing + desc["reduction_type"] = Base.String(reduction_type) + end + (tf.execute(desc))[1] + end +end + + +""" + segment_min(data, segment_ids) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function segment_min(data_, segment_ids_; name=nothing) + local desc + tf.with_op_name(name, "SegmentMin") do + desc = tf.NodeDescription("SegmentMin") + data_ = convert(Tensor{Any}, data_) + segment_ids_ = convert(Tensor{Any}, segment_ids_) + segment_ids_ = segment_ids_ - convert(tf.Tensor{eltype(segment_ids_)}, 1) + (data_,) = tf.tf_promote(data_) + (segment_ids_,) = tf.tf_promote(segment_ids_) + tf.add_input(desc, data_) + tf.add_input(desc, segment_ids_) + end + tf.Tensor(tf.Operation(desc)) + end + function segment_min(data_::tf.TensorHandle, segment_ids_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("SegmentMin") + tf.add_input(desc, data_) + tf.add_input(desc, segment_ids_) + desc["T"] = tf.data_type(data_) + desc["Tindices"] = tf.data_type(segment_ids_) + (tf.execute(desc))[1] + end +end + + +""" + write_graph_summary(writer, step, tensor) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function write_graph_summary(writer_, step_, tensor_; name=nothing) + local desc + tf.with_op_name(name, "WriteGraphSummary") do + desc = tf.NodeDescription("WriteGraphSummary") + writer_ = convert(Tensor{Any}, writer_) + step_ = convert(Tensor{Int64}, step_) + tensor_ = convert(Tensor{String}, tensor_) + tf.add_input(desc, writer_) + tf.add_input(desc, step_) + tf.add_input(desc, tensor_) + end + tf.Tensor(tf.Operation(desc)) + end + function write_graph_summary(writer_::tf.TensorHandle, step_::tf.TensorHandle, tensor_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("WriteGraphSummary") + tf.add_input(desc, writer_) + tf.add_input(desc, step_) + tf.add_input(desc, tensor_) + (tf.execute(desc))[1] + end +end + + +""" + cholesky_grad(l, grad) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function cholesky_grad(l_, grad_; name=nothing) + local desc + tf.with_op_name(name, "CholeskyGrad") do + desc = tf.NodeDescription("CholeskyGrad") + l_ = convert(Tensor{Any}, l_) + grad_ = convert(Tensor{Any}, grad_) + (l_, grad_) = tf.tf_promote(l_, grad_) + tf.add_input(desc, l_) + tf.add_input(desc, grad_) + end + tf.Tensor(tf.Operation(desc)) + end + function cholesky_grad(l_::tf.TensorHandle, grad_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("CholeskyGrad") + tf.add_input(desc, l_) + tf.add_input(desc, grad_) + desc["T"] = tf.data_type(l_) + desc["T"] = tf.data_type(grad_) + (tf.execute(desc))[1] + end +end + + +""" + log_uniform_candidate_sampler(true_classes; seed=0, seed2=0) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function log_uniform_candidate_sampler(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing) + local desc + tf.with_op_name(name, "LogUniformCandidateSampler") do + desc = tf.NodeDescription("LogUniformCandidateSampler") + true_classes_ = convert(Tensor{Int64}, true_classes_) + tf.add_input(desc, true_classes_) + if num_true !== nothing + desc["num_true"] = Base.Int(num_true) + end + if num_sampled !== nothing + desc["num_sampled"] = Base.Int(num_sampled) + end + if unique !== nothing + desc["unique"] = Base.Bool(unique) + end + if range_max !== nothing + desc["range_max"] = Base.Int(range_max) + end + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function log_uniform_candidate_sampler(true_classes_::tf.TensorHandle; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing) + desc = tf.EagerOp("LogUniformCandidateSampler") + tf.add_input(desc, true_classes_) + if num_true !== nothing + desc["num_true"] = Base.Int(num_true) + end + if num_sampled !== nothing + desc["num_sampled"] = Base.Int(num_sampled) + end + if unique !== nothing + desc["unique"] = Base.Bool(unique) + end + if range_max !== nothing + desc["range_max"] = Base.Int(range_max) + end + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end + tf.execute(desc) + end +end + + +""" + serialize_sparse(sparse_indices, sparse_values, sparse_shape; out_type=String) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function serialize_sparse(sparse_indices_, sparse_values_, sparse_shape_; name=nothing, out_type=nothing) + local desc + tf.with_op_name(name, "SerializeSparse") do + desc = tf.NodeDescription("SerializeSparse") + sparse_indices_ = convert(Tensor{Int64}, sparse_indices_) + sparse_values_ = convert(Tensor{Any}, sparse_values_) + sparse_shape_ = convert(Tensor{Int64}, sparse_shape_) + (sparse_values_,) = tf.tf_promote(sparse_values_) + tf.add_input(desc, sparse_indices_) + tf.add_input(desc, sparse_values_) + tf.add_input(desc, sparse_shape_) + if out_type !== nothing + desc["out_type"] = Base.identity(out_type) + end + end + tf.Tensor(tf.Operation(desc)) + end + function serialize_sparse(sparse_indices_::tf.TensorHandle, sparse_values_::tf.TensorHandle, sparse_shape_::tf.TensorHandle; name=nothing, out_type=nothing) + desc = tf.EagerOp("SerializeSparse") + tf.add_input(desc, sparse_indices_) + tf.add_input(desc, sparse_values_) + tf.add_input(desc, sparse_shape_) + if out_type !== nothing + desc["out_type"] = Base.identity(out_type) + end + desc["T"] = tf.data_type(sparse_values_) + (tf.execute(desc))[1] + end +end + + +""" + scatter_nd_non_aliasing_add(input, indices, updates) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function scatter_nd_non_aliasing_add(input_, indices_, updates_; name=nothing) + local desc + tf.with_op_name(name, "ScatterNdNonAliasingAdd") do + desc = tf.NodeDescription("ScatterNdNonAliasingAdd") + input_ = convert(Tensor{Any}, input_) + indices_ = convert(Tensor{Any}, indices_) + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + updates_ = convert(Tensor{Any}, updates_) + (input_, updates_) = tf.tf_promote(input_, updates_) + (indices_,) = tf.tf_promote(indices_) + tf.add_input(desc, input_) + tf.add_input(desc, indices_) + tf.add_input(desc, updates_) + end + tf.Tensor(tf.Operation(desc)) + end + function scatter_nd_non_aliasing_add(input_::tf.TensorHandle, indices_::tf.TensorHandle, updates_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("ScatterNdNonAliasingAdd") + tf.add_input(desc, input_) + tf.add_input(desc, indices_) + tf.add_input(desc, updates_) + desc["T"] = tf.data_type(input_) + desc["Tindices"] = tf.data_type(indices_) + desc["T"] = tf.data_type(updates_) + (tf.execute(desc))[1] + end +end + + +""" + ref_merge(inputs) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function ref_merge(inputs_; name=nothing, N=nothing) + local desc + tf.with_op_name(name, "RefMerge") do + desc = tf.NodeDescription("RefMerge") + inputs_ = [convert(Tensor{Any}, x) for x = inputs_] + (inputs_,) = tf.tf_promote(inputs_) + tf.add_input(desc, inputs_) + if N !== nothing + desc["N"] = Base.Int(N) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function ref_merge(inputs_::tf.TensorHandle; name=nothing, N=nothing) + desc = tf.EagerOp("RefMerge") + tf.add_input(desc, inputs_) + if N !== nothing + desc["N"] = Base.Int(N) + end + desc["T"] = tf.data_type(inputs_) + tf.execute(desc) + end +end + + +""" + tensor_list_concat(input_handle; element_shape=?) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tensor_list_concat(input_handle_; name=nothing, element_dtype=nothing, element_shape=nothing) + local desc + tf.with_op_name(name, "TensorListConcat") do + desc = tf.NodeDescription("TensorListConcat") + input_handle_ = convert(Tensor{Any}, input_handle_) + tf.add_input(desc, input_handle_) + if element_dtype !== nothing + desc["element_dtype"] = Base.identity(element_dtype) + end + if element_shape !== nothing + desc["element_shape"] = Base.identity(element_shape) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function tensor_list_concat(input_handle_::tf.TensorHandle; name=nothing, element_dtype=nothing, element_shape=nothing) + desc = tf.EagerOp("TensorListConcat") + tf.add_input(desc, input_handle_) + if element_dtype !== nothing + desc["element_dtype"] = Base.identity(element_dtype) + end + if element_shape !== nothing + desc["element_shape"] = Base.identity(element_shape) + end + tf.execute(desc) + end +end + + +""" + cudnn_rnn_canonical_to_params(num_layers, num_units, input_size, weights, biases; rnn_mode=lstm, input_mode=linear_input, direction=unidirectional, dropout=?, seed=0, seed2=0) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function cudnn_rnn_canonical_to_params(num_layers_, num_units_, input_size_, weights_, biases_; name=nothing, num_params=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) + local desc + tf.with_op_name(name, "CudnnRNNCanonicalToParams") do + desc = tf.NodeDescription("CudnnRNNCanonicalToParams") + num_layers_ = convert(Tensor{Int32}, num_layers_) + num_units_ = convert(Tensor{Int32}, num_units_) + input_size_ = convert(Tensor{Int32}, input_size_) + weights_ = [convert(Tensor{Any}, x) for x = weights_] + biases_ = [convert(Tensor{Any}, x) for x = biases_] + (weights_, biases_) = tf.tf_promote(weights_, biases_) + tf.add_input(desc, num_layers_) + tf.add_input(desc, num_units_) + tf.add_input(desc, input_size_) + tf.add_input(desc, weights_) + tf.add_input(desc, biases_) + if num_params !== nothing + desc["num_params"] = Base.Int(num_params) + end + if rnn_mode !== nothing + desc["rnn_mode"] = Base.String(rnn_mode) + end + if input_mode !== nothing + desc["input_mode"] = Base.String(input_mode) + end + if direction !== nothing + desc["direction"] = Base.String(direction) + end + if dropout !== nothing + desc["dropout"] = Base.identity(dropout) + end + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end + end + tf.Tensor(tf.Operation(desc)) + end + function cudnn_rnn_canonical_to_params(num_layers_::tf.TensorHandle, num_units_::tf.TensorHandle, input_size_::tf.TensorHandle, weights_::tf.TensorHandle, biases_::tf.TensorHandle; name=nothing, num_params=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) + desc = tf.EagerOp("CudnnRNNCanonicalToParams") + tf.add_input(desc, num_layers_) + tf.add_input(desc, num_units_) + tf.add_input(desc, input_size_) + tf.add_input(desc, weights_) + tf.add_input(desc, biases_) + if num_params !== nothing + desc["num_params"] = Base.Int(num_params) + end + if rnn_mode !== nothing + desc["rnn_mode"] = Base.String(rnn_mode) + end + if input_mode !== nothing + desc["input_mode"] = Base.String(input_mode) + end + if direction !== nothing + desc["direction"] = Base.String(direction) + end + if dropout !== nothing + desc["dropout"] = Base.identity(dropout) + end + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end + desc["T"] = tf.data_type(weights_) + desc["T"] = tf.data_type(biases_) + (tf.execute(desc))[1] + end +end + + +""" + sparse_apply_adadelta(var, accum, accum_update, lr, rho, epsilon, grad, indices; use_locking=false) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function sparse_apply_adadelta(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "SparseApplyAdadelta") do + desc = tf.NodeDescription("SparseApplyAdadelta") + var_ = convert(Tensor{Any}, var_) + accum_ = convert(Tensor{Any}, accum_) + accum_update_ = convert(Tensor{Any}, accum_update_) + lr_ = convert(Tensor{Any}, lr_) + rho_ = convert(Tensor{Any}, rho_) + epsilon_ = convert(Tensor{Any}, epsilon_) + grad_ = convert(Tensor{Any}, grad_) + indices_ = convert(Tensor{Any}, indices_) + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + (var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_) = tf.tf_promote(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_) + (indices_,) = tf.tf_promote(indices_) + tf.add_input(desc, var_) + tf.add_input(desc, accum_) + tf.add_input(desc, accum_update_) + tf.add_input(desc, lr_) + tf.add_input(desc, rho_) + tf.add_input(desc, epsilon_) + tf.add_input(desc, grad_) + tf.add_input(desc, indices_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + tf.Tensor(tf.Operation(desc)) + end + function sparse_apply_adadelta(var_::tf.TensorHandle, accum_::tf.TensorHandle, accum_update_::tf.TensorHandle, lr_::tf.TensorHandle, rho_::tf.TensorHandle, epsilon_::tf.TensorHandle, grad_::tf.TensorHandle, indices_::tf.TensorHandle; name=nothing, use_locking=nothing) + desc = tf.EagerOp("SparseApplyAdadelta") + tf.add_input(desc, var_) + tf.add_input(desc, accum_) + tf.add_input(desc, accum_update_) + tf.add_input(desc, lr_) + tf.add_input(desc, rho_) + tf.add_input(desc, epsilon_) + tf.add_input(desc, grad_) + tf.add_input(desc, indices_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + desc["T"] = tf.data_type(var_) + desc["T"] = tf.data_type(accum_) + desc["T"] = tf.data_type(accum_update_) + desc["T"] = tf.data_type(lr_) + desc["T"] = tf.data_type(rho_) + desc["T"] = tf.data_type(epsilon_) + desc["T"] = tf.data_type(grad_) + desc["Tindices"] = tf.data_type(indices_) + (tf.execute(desc))[1] + end +end + + +""" + tensor_array_close(handle) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tensor_array_close(handle_; name=nothing) + local desc + tf.with_op_name(name, "TensorArrayClose") do + desc = tf.NodeDescription("TensorArrayClose") + handle_ = convert(Tensor{String}, handle_) + tf.add_input(desc, handle_) + end + tf.Tensor(tf.Operation(desc)) + end + function tensor_array_close(handle_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("TensorArrayClose") + tf.add_input(desc, handle_) + (tf.execute(desc))[1] + end +end + + +""" + selu_grad(gradients, outputs) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function selu_grad(gradients_, outputs_; name=nothing) + local desc + tf.with_op_name(name, "SeluGrad") do + desc = tf.NodeDescription("SeluGrad") + gradients_ = convert(Tensor{Any}, gradients_) + outputs_ = convert(Tensor{Any}, outputs_) + (gradients_, outputs_) = tf.tf_promote(gradients_, outputs_) + tf.add_input(desc, gradients_) + tf.add_input(desc, outputs_) + end + tf.Tensor(tf.Operation(desc)) + end + function selu_grad(gradients_::tf.TensorHandle, outputs_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("SeluGrad") + tf.add_input(desc, gradients_) + tf.add_input(desc, outputs_) + desc["T"] = tf.data_type(gradients_) + desc["T"] = tf.data_type(outputs_) + (tf.execute(desc))[1] + end +end + + +""" + crop_and_resize_grad_image(grads, boxes, box_ind, image_size; method=bilinear) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function crop_and_resize_grad_image(grads_, boxes_, box_ind_, image_size_; name=nothing, method=nothing) + local desc + tf.with_op_name(name, "CropAndResizeGradImage") do + desc = tf.NodeDescription("CropAndResizeGradImage") + grads_ = convert(Tensor{Float32}, grads_) + boxes_ = convert(Tensor{Float32}, boxes_) + box_ind_ = convert(Tensor{Int32}, box_ind_) + image_size_ = convert(Tensor{Int32}, image_size_) + tf.add_input(desc, grads_) + tf.add_input(desc, boxes_) + tf.add_input(desc, box_ind_) + tf.add_input(desc, image_size_) + if method !== nothing + desc["method"] = Base.String(method) + end + end + tf.Tensor(tf.Operation(desc)) + end + function crop_and_resize_grad_image(grads_::tf.TensorHandle, boxes_::tf.TensorHandle, box_ind_::tf.TensorHandle, image_size_::tf.TensorHandle; name=nothing, method=nothing) + desc = tf.EagerOp("CropAndResizeGradImage") + tf.add_input(desc, grads_) + tf.add_input(desc, boxes_) + tf.add_input(desc, box_ind_) + tf.add_input(desc, image_size_) + if method !== nothing + desc["method"] = Base.String(method) + end + (tf.execute(desc))[1] + end +end + + +""" + rfft(input, fft_length) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function rfft(input_, fft_length_; name=nothing) + local desc + tf.with_op_name(name, "RFFT") do + desc = tf.NodeDescription("RFFT") + input_ = convert(Tensor{Float32}, input_) + fft_length_ = convert(Tensor{Int32}, fft_length_) + tf.add_input(desc, input_) + tf.add_input(desc, fft_length_) + end + tf.Tensor(tf.Operation(desc)) + end + function rfft(input_::tf.TensorHandle, fft_length_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("RFFT") + tf.add_input(desc, input_) + tf.add_input(desc, fft_length_) + (tf.execute(desc))[1] + end +end + + +""" + experimental_sql_dataset(driver_name, data_source_name, query) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function experimental_sql_dataset(driver_name_, data_source_name_, query_; name=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "ExperimentalSqlDataset") do + desc = tf.NodeDescription("ExperimentalSqlDataset") + driver_name_ = convert(Tensor{String}, driver_name_) + data_source_name_ = convert(Tensor{String}, data_source_name_) + query_ = convert(Tensor{String}, query_) + tf.add_input(desc, driver_name_) + tf.add_input(desc, data_source_name_) + tf.add_input(desc, query_) + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + tf.Tensor(tf.Operation(desc)) + end + function experimental_sql_dataset(driver_name_::tf.TensorHandle, data_source_name_::tf.TensorHandle, query_::tf.TensorHandle; name=nothing, output_types=nothing, output_shapes=nothing) + desc = tf.EagerOp("ExperimentalSqlDataset") + tf.add_input(desc, driver_name_) + tf.add_input(desc, data_source_name_) + tf.add_input(desc, query_) + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + (tf.execute(desc))[1] + end +end + + +""" + resource_apply_power_sign(var, m, lr, logbase, sign_decay, beta, grad; use_locking=false) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function resource_apply_power_sign(var_, m_, lr_, logbase_, sign_decay_, beta_, grad_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "ResourceApplyPowerSign") do + desc = tf.NodeDescription("ResourceApplyPowerSign") + var_ = convert(Tensor{Any}, var_) + m_ = convert(Tensor{Any}, m_) + lr_ = convert(Tensor{Any}, lr_) + logbase_ = convert(Tensor{Any}, logbase_) + sign_decay_ = convert(Tensor{Any}, sign_decay_) + beta_ = convert(Tensor{Any}, beta_) + grad_ = convert(Tensor{Any}, grad_) + (lr_, logbase_, sign_decay_, beta_, grad_) = tf.tf_promote(lr_, logbase_, sign_decay_, beta_, grad_) + tf.add_input(desc, var_) + tf.add_input(desc, m_) + tf.add_input(desc, lr_) + tf.add_input(desc, logbase_) + tf.add_input(desc, sign_decay_) + tf.add_input(desc, beta_) + tf.add_input(desc, grad_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + tf.Tensor(tf.Operation(desc)) + end + function resource_apply_power_sign(var_::tf.TensorHandle, m_::tf.TensorHandle, lr_::tf.TensorHandle, logbase_::tf.TensorHandle, sign_decay_::tf.TensorHandle, beta_::tf.TensorHandle, grad_::tf.TensorHandle; name=nothing, use_locking=nothing) + desc = tf.EagerOp("ResourceApplyPowerSign") + tf.add_input(desc, var_) + tf.add_input(desc, m_) + tf.add_input(desc, lr_) + tf.add_input(desc, logbase_) + tf.add_input(desc, sign_decay_) + tf.add_input(desc, beta_) + tf.add_input(desc, grad_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + desc["T"] = tf.data_type(lr_) + desc["T"] = tf.data_type(logbase_) + desc["T"] = tf.data_type(sign_decay_) + desc["T"] = tf.data_type(beta_) + desc["T"] = tf.data_type(grad_) + (tf.execute(desc))[1] + end +end + + +""" + matrix_determinant(input) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function matrix_determinant(input_; name=nothing) + local desc + tf.with_op_name(name, "MatrixDeterminant") do + desc = tf.NodeDescription("MatrixDeterminant") + input_ = convert(Tensor{Any}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + end + tf.Tensor(tf.Operation(desc)) + end + function matrix_determinant(input_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("MatrixDeterminant") + tf.add_input(desc, input_) + desc["T"] = tf.data_type(input_) + (tf.execute(desc))[1] + end +end + + +""" + static_regex_replace(input; replace_global=true) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function static_regex_replace(input_; name=nothing, pattern=nothing, rewrite=nothing, replace_global=nothing) + local desc + tf.with_op_name(name, "StaticRegexReplace") do + desc = tf.NodeDescription("StaticRegexReplace") + input_ = convert(Tensor{String}, input_) + tf.add_input(desc, input_) + if pattern !== nothing + desc["pattern"] = Base.String(pattern) + end + if rewrite !== nothing + desc["rewrite"] = Base.String(rewrite) + end + if replace_global !== nothing + desc["replace_global"] = Base.Bool(replace_global) + end + end + tf.Tensor(tf.Operation(desc)) + end + function static_regex_replace(input_::tf.TensorHandle; name=nothing, pattern=nothing, rewrite=nothing, replace_global=nothing) + desc = tf.EagerOp("StaticRegexReplace") + tf.add_input(desc, input_) + if pattern !== nothing + desc["pattern"] = Base.String(pattern) + end + if rewrite !== nothing + desc["rewrite"] = Base.String(rewrite) + end + if replace_global !== nothing + desc["replace_global"] = Base.Bool(replace_global) + end + (tf.execute(desc))[1] + end +end + + +""" + avg_pool(value; data_format=NHWC) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function avg_pool(value_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + local desc + tf.with_op_name(name, "AvgPool") do + desc = tf.NodeDescription("AvgPool") + value_ = convert(Tensor{Any}, value_) + (value_,) = tf.tf_promote(value_) + tf.add_input(desc, value_) + if ksize !== nothing + desc["ksize"] = map(Base.identity, ksize) + end + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + if padding !== nothing + desc["padding"] = Base.String(padding) + end + if data_format !== nothing + desc["data_format"] = Base.String(data_format) + end + end + tf.Tensor(tf.Operation(desc)) + end + function avg_pool(value_::tf.TensorHandle; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + desc = tf.EagerOp("AvgPool") + tf.add_input(desc, value_) + if ksize !== nothing + desc["ksize"] = map(Base.identity, ksize) + end + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + if padding !== nothing + desc["padding"] = Base.String(padding) + end + if data_format !== nothing + desc["data_format"] = Base.String(data_format) + end + desc["T"] = tf.data_type(value_) + (tf.execute(desc))[1] + end +end + + +""" + sparse_dense_cwise_add(sp_indices, sp_values, sp_shape, dense) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function sparse_dense_cwise_add(sp_indices_, sp_values_, sp_shape_, dense_; name=nothing) + local desc + tf.with_op_name(name, "SparseDenseCwiseAdd") do + desc = tf.NodeDescription("SparseDenseCwiseAdd") + sp_indices_ = convert(Tensor{Int64}, sp_indices_) + sp_values_ = convert(Tensor{Any}, sp_values_) + sp_shape_ = convert(Tensor{Int64}, sp_shape_) + dense_ = convert(Tensor{Any}, dense_) + (sp_values_, dense_) = tf.tf_promote(sp_values_, dense_) + tf.add_input(desc, sp_indices_) + tf.add_input(desc, sp_values_) + tf.add_input(desc, sp_shape_) + tf.add_input(desc, dense_) + end + tf.Tensor(tf.Operation(desc)) + end + function sparse_dense_cwise_add(sp_indices_::tf.TensorHandle, sp_values_::tf.TensorHandle, sp_shape_::tf.TensorHandle, dense_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("SparseDenseCwiseAdd") + tf.add_input(desc, sp_indices_) + tf.add_input(desc, sp_values_) + tf.add_input(desc, sp_shape_) + tf.add_input(desc, dense_) + desc["T"] = tf.data_type(sp_values_) + desc["T"] = tf.data_type(dense_) + (tf.execute(desc))[1] + end +end + + +""" + bias_add_v1(value, bias) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function bias_add_v1(value_, bias_; name=nothing) + local desc + tf.with_op_name(name, "BiasAddV1") do + desc = tf.NodeDescription("BiasAddV1") + value_ = convert(Tensor{Any}, value_) + bias_ = convert(Tensor{Any}, bias_) + (value_, bias_) = tf.tf_promote(value_, bias_) + tf.add_input(desc, value_) + tf.add_input(desc, bias_) + end + tf.Tensor(tf.Operation(desc)) + end + function bias_add_v1(value_::tf.TensorHandle, bias_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("BiasAddV1") + tf.add_input(desc, value_) + tf.add_input(desc, bias_) + desc["T"] = tf.data_type(value_) + desc["T"] = tf.data_type(bias_) + (tf.execute(desc))[1] + end +end + + +""" + invert_permutation(x) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function invert_permutation(x_; name=nothing) + local desc + tf.with_op_name(name, "InvertPermutation") do + desc = tf.NodeDescription("InvertPermutation") + x_ = convert(Tensor{Int32}, x_) + (x_,) = tf.tf_promote(x_) + tf.add_input(desc, x_) + end + tf.Tensor(tf.Operation(desc)) + end + function invert_permutation(x_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("InvertPermutation") + tf.add_input(desc, x_) + desc["T"] = tf.data_type(x_) + (tf.execute(desc))[1] + end +end + + +""" + hash_table_v2(; container=, shared_name=, use_node_name_sharing=false) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function hash_table_v2(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing) + local desc + tf.with_op_name(name, "HashTableV2") do + desc = tf.NodeDescription("HashTableV2") + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + if use_node_name_sharing !== nothing + desc["use_node_name_sharing"] = Base.Bool(use_node_name_sharing) + end + if key_dtype !== nothing + desc["key_dtype"] = Base.identity(key_dtype) + end + if value_dtype !== nothing + desc["value_dtype"] = Base.identity(value_dtype) + end + end + tf.Tensor(tf.Operation(desc)) + end + function hash_table_v2(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing) + desc = tf.EagerOp("HashTableV2") + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + if use_node_name_sharing !== nothing + desc["use_node_name_sharing"] = Base.Bool(use_node_name_sharing) + end + if key_dtype !== nothing + desc["key_dtype"] = Base.identity(key_dtype) + end + if value_dtype !== nothing + desc["value_dtype"] = Base.identity(value_dtype) + end + (tf.execute(desc))[1] + end +end + + +""" + sparse_apply_momentum(var, accum, lr, grad, indices, momentum; use_locking=false, use_nesterov=false) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function sparse_apply_momentum(var_, accum_, lr_, grad_, indices_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) + local desc + tf.with_op_name(name, "SparseApplyMomentum") do + desc = tf.NodeDescription("SparseApplyMomentum") + var_ = convert(Tensor{Any}, var_) + accum_ = convert(Tensor{Any}, accum_) + lr_ = convert(Tensor{Any}, lr_) + grad_ = convert(Tensor{Any}, grad_) + indices_ = convert(Tensor{Any}, indices_) + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + momentum_ = convert(Tensor{Any}, momentum_) + (var_, accum_, lr_, grad_, momentum_) = tf.tf_promote(var_, accum_, lr_, grad_, momentum_) + (indices_,) = tf.tf_promote(indices_) + tf.add_input(desc, var_) + tf.add_input(desc, accum_) + tf.add_input(desc, lr_) + tf.add_input(desc, grad_) + tf.add_input(desc, indices_) + tf.add_input(desc, momentum_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + if use_nesterov !== nothing + desc["use_nesterov"] = Base.Bool(use_nesterov) + end + end + tf.Tensor(tf.Operation(desc)) + end + function sparse_apply_momentum(var_::tf.TensorHandle, accum_::tf.TensorHandle, lr_::tf.TensorHandle, grad_::tf.TensorHandle, indices_::tf.TensorHandle, momentum_::tf.TensorHandle; name=nothing, use_locking=nothing, use_nesterov=nothing) + desc = tf.EagerOp("SparseApplyMomentum") + tf.add_input(desc, var_) + tf.add_input(desc, accum_) + tf.add_input(desc, lr_) + tf.add_input(desc, grad_) + tf.add_input(desc, indices_) + tf.add_input(desc, momentum_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + if use_nesterov !== nothing + desc["use_nesterov"] = Base.Bool(use_nesterov) + end + desc["T"] = tf.data_type(var_) + desc["T"] = tf.data_type(accum_) + desc["T"] = tf.data_type(lr_) + desc["T"] = tf.data_type(grad_) + desc["Tindices"] = tf.data_type(indices_) + desc["T"] = tf.data_type(momentum_) + (tf.execute(desc))[1] + end +end + + +""" + infeed_enqueue(input; shape=?, layout=Int64[], device_ordinal=-1) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function infeed_enqueue(input_; name=nothing, dtype=nothing, shape=nothing, layout=nothing, device_ordinal=nothing) + local desc + tf.with_op_name(name, "InfeedEnqueue") do + desc = tf.NodeDescription("InfeedEnqueue") + input_ = convert(Tensor{Any}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + if shape !== nothing + desc["shape"] = Base.identity(shape) + end + if layout !== nothing + desc["layout"] = map(Base.identity, layout) + end + if device_ordinal !== nothing + desc["device_ordinal"] = Base.Int(device_ordinal) + end + end + tf.Tensor(tf.Operation(desc)) + end + function infeed_enqueue(input_::tf.TensorHandle; name=nothing, dtype=nothing, shape=nothing, layout=nothing, device_ordinal=nothing) + desc = tf.EagerOp("InfeedEnqueue") + tf.add_input(desc, input_) + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + if shape !== nothing + desc["shape"] = Base.identity(shape) + end + if layout !== nothing + desc["layout"] = map(Base.identity, layout) + end + if device_ordinal !== nothing + desc["device_ordinal"] = Base.Int(device_ordinal) + end + desc["dtype"] = tf.data_type(input_) + (tf.execute(desc))[1] + end +end + + +""" + stateless_random_uniform_int(shape, seed, minval, maxval) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function stateless_random_uniform_int(shape_, seed_, minval_, maxval_; name=nothing, dtype=nothing) + local desc + tf.with_op_name(name, "StatelessRandomUniformInt") do + desc = tf.NodeDescription("StatelessRandomUniformInt") + shape_ = convert(Tensor{Any}, shape_) + seed_ = convert(Tensor{Int64}, seed_) + minval_ = convert(Tensor{Any}, minval_) + maxval_ = convert(Tensor{Any}, maxval_) + (minval_, maxval_) = tf.tf_promote(minval_, maxval_) + (shape_,) = tf.tf_promote(shape_) + (seed_,) = tf.tf_promote(seed_) + tf.add_input(desc, shape_) + tf.add_input(desc, seed_) + tf.add_input(desc, minval_) + tf.add_input(desc, maxval_) + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + tf.Tensor(tf.Operation(desc)) + end + function stateless_random_uniform_int(shape_::tf.TensorHandle, seed_::tf.TensorHandle, minval_::tf.TensorHandle, maxval_::tf.TensorHandle; name=nothing, dtype=nothing) + desc = tf.EagerOp("StatelessRandomUniformInt") + tf.add_input(desc, shape_) + tf.add_input(desc, seed_) + tf.add_input(desc, minval_) + tf.add_input(desc, maxval_) + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + desc["T"] = tf.data_type(shape_) + desc["Tseed"] = tf.data_type(seed_) + desc["dtype"] = tf.data_type(minval_) + desc["dtype"] = tf.data_type(maxval_) + (tf.execute(desc))[1] + end +end + + +""" + load_tpu_embedding_adadelta_parameters_grad_accum_debug(parameters, accumulators, updates, gradient_accumulators; table_id=-1, table_name=) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function load_tpu_embedding_adadelta_parameters_grad_accum_debug(parameters_, accumulators_, updates_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + local desc + tf.with_op_name(name, "LoadTPUEmbeddingAdadeltaParametersGradAccumDebug") do + desc = tf.NodeDescription("LoadTPUEmbeddingAdadeltaParametersGradAccumDebug") + parameters_ = convert(Tensor{Float32}, parameters_) + accumulators_ = convert(Tensor{Float32}, accumulators_) + updates_ = convert(Tensor{Float32}, updates_) + gradient_accumulators_ = convert(Tensor{Float32}, gradient_accumulators_) + tf.add_input(desc, parameters_) + tf.add_input(desc, accumulators_) + tf.add_input(desc, updates_) + tf.add_input(desc, gradient_accumulators_) + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + end + tf.Tensor(tf.Operation(desc)) + end + function load_tpu_embedding_adadelta_parameters_grad_accum_debug(parameters_::tf.TensorHandle, accumulators_::tf.TensorHandle, updates_::tf.TensorHandle, gradient_accumulators_::tf.TensorHandle; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + desc = tf.EagerOp("LoadTPUEmbeddingAdadeltaParametersGradAccumDebug") + tf.add_input(desc, parameters_) + tf.add_input(desc, accumulators_) + tf.add_input(desc, updates_) + tf.add_input(desc, gradient_accumulators_) + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + (tf.execute(desc))[1] + end +end + + +""" + _send(tensor; client_terminated=false) + +Sends the named tensor from send_device to recv_device. +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function _send(tensor_; name=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing) + local desc + tf.with_op_name(name, "_Send") do + desc = tf.NodeDescription("_Send") + tensor_ = convert(Tensor{Any}, tensor_) + (tensor_,) = tf.tf_promote(tensor_) + tf.add_input(desc, tensor_) + if tensor_name !== nothing + desc["tensor_name"] = Base.String(tensor_name) + end + if send_device !== nothing + desc["send_device"] = Base.String(send_device) + end + if send_device_incarnation !== nothing + desc["send_device_incarnation"] = Base.Int(send_device_incarnation) + end + if recv_device !== nothing + desc["recv_device"] = Base.String(recv_device) + end + if client_terminated !== nothing + desc["client_terminated"] = Base.Bool(client_terminated) + end + end + tf.Tensor(tf.Operation(desc)) + end + function _send(tensor_::tf.TensorHandle; name=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing) + desc = tf.EagerOp("_Send") + tf.add_input(desc, tensor_) + if tensor_name !== nothing + desc["tensor_name"] = Base.String(tensor_name) + end + if send_device !== nothing + desc["send_device"] = Base.String(send_device) + end + if send_device_incarnation !== nothing + desc["send_device_incarnation"] = Base.Int(send_device_incarnation) + end + if recv_device !== nothing + desc["recv_device"] = Base.String(recv_device) + end + if client_terminated !== nothing + desc["client_terminated"] = Base.Bool(client_terminated) + end + desc["T"] = tf.data_type(tensor_) + (tf.execute(desc))[1] + end +end + + +""" + map_peek(key, indices; capacity=0, memory_limit=0, container=, shared_name=) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function map_peek(key_, indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "MapPeek") do + desc = tf.NodeDescription("MapPeek") + key_ = convert(Tensor{Int64}, key_) + indices_ = convert(Tensor{Int32}, indices_) + tf.add_input(desc, key_) + tf.add_input(desc, indices_) + if capacity !== nothing + desc["capacity"] = Base.Int(capacity) + end + if memory_limit !== nothing + desc["memory_limit"] = Base.Int(memory_limit) + end + if dtypes !== nothing + desc["dtypes"] = map(Base.identity, dtypes) + end + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + tf.Tensor(tf.Operation(desc)) + end + function map_peek(key_::tf.TensorHandle, indices_::tf.TensorHandle; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + desc = tf.EagerOp("MapPeek") + tf.add_input(desc, key_) + tf.add_input(desc, indices_) + if capacity !== nothing + desc["capacity"] = Base.Int(capacity) + end + if memory_limit !== nothing + desc["memory_limit"] = Base.Int(memory_limit) + end + if dtypes !== nothing + desc["dtypes"] = map(Base.identity, dtypes) + end + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + (tf.execute(desc))[1] + end +end + + +""" + write_scalar_summary(writer, step, tag, value) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function write_scalar_summary(writer_, step_, tag_, value_; name=nothing) + local desc + tf.with_op_name(name, "WriteScalarSummary") do + desc = tf.NodeDescription("WriteScalarSummary") + writer_ = convert(Tensor{Any}, writer_) + step_ = convert(Tensor{Int64}, step_) + tag_ = convert(Tensor{String}, tag_) + value_ = convert(Tensor{Any}, value_) + (value_,) = tf.tf_promote(value_) + tf.add_input(desc, writer_) + tf.add_input(desc, step_) + tf.add_input(desc, tag_) + tf.add_input(desc, value_) + end + tf.Tensor(tf.Operation(desc)) + end + function write_scalar_summary(writer_::tf.TensorHandle, step_::tf.TensorHandle, tag_::tf.TensorHandle, value_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("WriteScalarSummary") + tf.add_input(desc, writer_) + tf.add_input(desc, step_) + tf.add_input(desc, tag_) + tf.add_input(desc, value_) + desc["T"] = tf.data_type(value_) + (tf.execute(desc))[1] + end +end + + +""" + ordered_map_unstage_no_key(indices; capacity=0, memory_limit=0, container=, shared_name=) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function ordered_map_unstage_no_key(indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "OrderedMapUnstageNoKey") do + desc = tf.NodeDescription("OrderedMapUnstageNoKey") + indices_ = convert(Tensor{Int32}, indices_) + tf.add_input(desc, indices_) + if capacity !== nothing + desc["capacity"] = Base.Int(capacity) + end + if memory_limit !== nothing + desc["memory_limit"] = Base.Int(memory_limit) + end + if dtypes !== nothing + desc["dtypes"] = map(Base.identity, dtypes) + end + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function ordered_map_unstage_no_key(indices_::tf.TensorHandle; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + desc = tf.EagerOp("OrderedMapUnstageNoKey") + tf.add_input(desc, indices_) + if capacity !== nothing + desc["capacity"] = Base.Int(capacity) + end + if memory_limit !== nothing + desc["memory_limit"] = Base.Int(memory_limit) + end + if dtypes !== nothing + desc["dtypes"] = map(Base.identity, dtypes) + end + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + tf.execute(desc) + end +end + + +""" + sparse_apply_centered_rms_prop(var, mg, ms, mom, lr, rho, momentum, epsilon, grad, indices; use_locking=false) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function sparse_apply_centered_rms_prop(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "SparseApplyCenteredRMSProp") do + desc = tf.NodeDescription("SparseApplyCenteredRMSProp") + var_ = convert(Tensor{Any}, var_) + mg_ = convert(Tensor{Any}, mg_) + ms_ = convert(Tensor{Any}, ms_) + mom_ = convert(Tensor{Any}, mom_) + lr_ = convert(Tensor{Any}, lr_) + rho_ = convert(Tensor{Any}, rho_) + momentum_ = convert(Tensor{Any}, momentum_) + epsilon_ = convert(Tensor{Any}, epsilon_) + grad_ = convert(Tensor{Any}, grad_) + indices_ = convert(Tensor{Any}, indices_) + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + (var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_) = tf.tf_promote(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_) + (indices_,) = tf.tf_promote(indices_) + tf.add_input(desc, var_) + tf.add_input(desc, mg_) + tf.add_input(desc, ms_) + tf.add_input(desc, mom_) + tf.add_input(desc, lr_) + tf.add_input(desc, rho_) + tf.add_input(desc, momentum_) + tf.add_input(desc, epsilon_) + tf.add_input(desc, grad_) + tf.add_input(desc, indices_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + tf.Tensor(tf.Operation(desc)) + end + function sparse_apply_centered_rms_prop(var_::tf.TensorHandle, mg_::tf.TensorHandle, ms_::tf.TensorHandle, mom_::tf.TensorHandle, lr_::tf.TensorHandle, rho_::tf.TensorHandle, momentum_::tf.TensorHandle, epsilon_::tf.TensorHandle, grad_::tf.TensorHandle, indices_::tf.TensorHandle; name=nothing, use_locking=nothing) + desc = tf.EagerOp("SparseApplyCenteredRMSProp") + tf.add_input(desc, var_) + tf.add_input(desc, mg_) + tf.add_input(desc, ms_) + tf.add_input(desc, mom_) + tf.add_input(desc, lr_) + tf.add_input(desc, rho_) + tf.add_input(desc, momentum_) + tf.add_input(desc, epsilon_) + tf.add_input(desc, grad_) + tf.add_input(desc, indices_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + desc["T"] = tf.data_type(var_) + desc["T"] = tf.data_type(mg_) + desc["T"] = tf.data_type(ms_) + desc["T"] = tf.data_type(mom_) + desc["T"] = tf.data_type(lr_) + desc["T"] = tf.data_type(rho_) + desc["T"] = tf.data_type(momentum_) + desc["T"] = tf.data_type(epsilon_) + desc["T"] = tf.data_type(grad_) + desc["Tindices"] = tf.data_type(indices_) + (tf.execute(desc))[1] + end +end + + +""" + tensor_list_scatter_v2(tensor, indices, element_shape, num_elements) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tensor_list_scatter_v2(tensor_, indices_, element_shape_, num_elements_; name=nothing, element_dtype=nothing, shape_type=nothing) + local desc + tf.with_op_name(name, "TensorListScatterV2") do + desc = tf.NodeDescription("TensorListScatterV2") + tensor_ = convert(Tensor{Any}, tensor_) + indices_ = convert(Tensor{Int32}, indices_) + element_shape_ = convert(Tensor{Any}, element_shape_) + num_elements_ = convert(Tensor{Int32}, num_elements_) + (tensor_,) = tf.tf_promote(tensor_) + (element_shape_,) = tf.tf_promote(element_shape_) + tf.add_input(desc, tensor_) + tf.add_input(desc, indices_) + tf.add_input(desc, element_shape_) + tf.add_input(desc, num_elements_) + if element_dtype !== nothing + desc["element_dtype"] = Base.identity(element_dtype) + end + if shape_type !== nothing + desc["shape_type"] = Base.identity(shape_type) + end + end + tf.Tensor(tf.Operation(desc)) + end + function tensor_list_scatter_v2(tensor_::tf.TensorHandle, indices_::tf.TensorHandle, element_shape_::tf.TensorHandle, num_elements_::tf.TensorHandle; name=nothing, element_dtype=nothing, shape_type=nothing) + desc = tf.EagerOp("TensorListScatterV2") + tf.add_input(desc, tensor_) + tf.add_input(desc, indices_) + tf.add_input(desc, element_shape_) + tf.add_input(desc, num_elements_) + if element_dtype !== nothing + desc["element_dtype"] = Base.identity(element_dtype) + end + if shape_type !== nothing + desc["shape_type"] = Base.identity(shape_type) + end + desc["element_dtype"] = tf.data_type(tensor_) + desc["shape_type"] = tf.data_type(element_shape_) + (tf.execute(desc))[1] + end +end + + +""" + conv3d_backprop_input_v2(input_sizes, filter, out_backprop; data_format=NDHWC, dilations=[1, 1, 1, 1, 1]) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function conv3d_backprop_input_v2(input_sizes_, filter_, out_backprop_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) + local desc + tf.with_op_name(name, "Conv3DBackpropInputV2") do + desc = tf.NodeDescription("Conv3DBackpropInputV2") + input_sizes_ = convert(Tensor{Int32}, input_sizes_) + filter_ = convert(Tensor{Any}, filter_) + out_backprop_ = convert(Tensor{Any}, out_backprop_) + (filter_, out_backprop_) = tf.tf_promote(filter_, out_backprop_) + (input_sizes_,) = tf.tf_promote(input_sizes_) + tf.add_input(desc, input_sizes_) + tf.add_input(desc, filter_) + tf.add_input(desc, out_backprop_) + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + if padding !== nothing + desc["padding"] = Base.String(padding) + end + if data_format !== nothing + desc["data_format"] = Base.String(data_format) + end + if dilations !== nothing + desc["dilations"] = map(Base.identity, dilations) + end + end + tf.Tensor(tf.Operation(desc)) + end + function conv3d_backprop_input_v2(input_sizes_::tf.TensorHandle, filter_::tf.TensorHandle, out_backprop_::tf.TensorHandle; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) + desc = tf.EagerOp("Conv3DBackpropInputV2") + tf.add_input(desc, input_sizes_) + tf.add_input(desc, filter_) + tf.add_input(desc, out_backprop_) + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + if padding !== nothing + desc["padding"] = Base.String(padding) + end + if data_format !== nothing + desc["data_format"] = Base.String(data_format) + end + if dilations !== nothing + desc["dilations"] = map(Base.identity, dilations) + end + desc["Tshape"] = tf.data_type(input_sizes_) + desc["T"] = tf.data_type(filter_) + desc["T"] = tf.data_type(out_backprop_) + (tf.execute(desc))[1] + end +end + + +""" + retrieve_tpu_embedding_proximal_adagrad_parameters(; table_id=-1, table_name=) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function retrieve_tpu_embedding_proximal_adagrad_parameters(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + local desc + tf.with_op_name(name, "RetrieveTPUEmbeddingProximalAdagradParameters") do + desc = tf.NodeDescription("RetrieveTPUEmbeddingProximalAdagradParameters") + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function retrieve_tpu_embedding_proximal_adagrad_parameters(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + desc = tf.EagerOp("RetrieveTPUEmbeddingProximalAdagradParameters") + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + tf.execute(desc) + end +end + + +""" + random_shuffle(value; seed=0, seed2=0) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function random_shuffle(value_; name=nothing, seed=nothing, seed2=nothing) + local desc + tf.with_op_name(name, "RandomShuffle") do + desc = tf.NodeDescription("RandomShuffle") + value_ = convert(Tensor{Any}, value_) + (value_,) = tf.tf_promote(value_) + tf.add_input(desc, value_) + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end + end + tf.Tensor(tf.Operation(desc)) + end + function random_shuffle(value_::tf.TensorHandle; name=nothing, seed=nothing, seed2=nothing) + desc = tf.EagerOp("RandomShuffle") + tf.add_input(desc, value_) + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end + desc["T"] = tf.data_type(value_) + (tf.execute(desc))[1] + end +end + + +""" + uniform_candidate_sampler(true_classes; seed=0, seed2=0) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function uniform_candidate_sampler(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing) + local desc + tf.with_op_name(name, "UniformCandidateSampler") do + desc = tf.NodeDescription("UniformCandidateSampler") + true_classes_ = convert(Tensor{Int64}, true_classes_) + tf.add_input(desc, true_classes_) + if num_true !== nothing + desc["num_true"] = Base.Int(num_true) + end + if num_sampled !== nothing + desc["num_sampled"] = Base.Int(num_sampled) + end + if unique !== nothing + desc["unique"] = Base.Bool(unique) + end + if range_max !== nothing + desc["range_max"] = Base.Int(range_max) + end + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function uniform_candidate_sampler(true_classes_::tf.TensorHandle; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing) + desc = tf.EagerOp("UniformCandidateSampler") + tf.add_input(desc, true_classes_) + if num_true !== nothing + desc["num_true"] = Base.Int(num_true) + end + if num_sampled !== nothing + desc["num_sampled"] = Base.Int(num_sampled) + end + if unique !== nothing + desc["unique"] = Base.Bool(unique) + end + if range_max !== nothing + desc["range_max"] = Base.Int(range_max) + end + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end + tf.execute(desc) + end +end + + +""" + tensor_array_split_v2(handle, value, lengths, flow_in) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tensor_array_split_v2(handle_, value_, lengths_, flow_in_; name=nothing) + local desc + tf.with_op_name(name, "TensorArraySplitV2") do + desc = tf.NodeDescription("TensorArraySplitV2") + handle_ = convert(Tensor{String}, handle_) + value_ = convert(Tensor{Any}, value_) + lengths_ = convert(Tensor{Int64}, lengths_) + flow_in_ = convert(Tensor{Float32}, flow_in_) + (value_,) = tf.tf_promote(value_) + tf.add_input(desc, handle_) + tf.add_input(desc, value_) + tf.add_input(desc, lengths_) + tf.add_input(desc, flow_in_) + end + tf.Tensor(tf.Operation(desc)) + end + function tensor_array_split_v2(handle_::tf.TensorHandle, value_::tf.TensorHandle, lengths_::tf.TensorHandle, flow_in_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("TensorArraySplitV2") + tf.add_input(desc, handle_) + tf.add_input(desc, value_) + tf.add_input(desc, lengths_) + tf.add_input(desc, flow_in_) + desc["T"] = tf.data_type(value_) + (tf.execute(desc))[1] + end +end + + +""" + mutable_dense_hash_table_v2(empty_key, deleted_key; container=, shared_name=, use_node_name_sharing=false, value_shape=?, initial_num_buckets=131072, max_load_factor=?) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function mutable_dense_hash_table_v2(empty_key_, deleted_key_; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing, initial_num_buckets=nothing, max_load_factor=nothing) + local desc + tf.with_op_name(name, "MutableDenseHashTableV2") do + desc = tf.NodeDescription("MutableDenseHashTableV2") + empty_key_ = convert(Tensor{Any}, empty_key_) + deleted_key_ = convert(Tensor{Any}, deleted_key_) + (empty_key_, deleted_key_) = tf.tf_promote(empty_key_, deleted_key_) + tf.add_input(desc, empty_key_) + tf.add_input(desc, deleted_key_) + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + if use_node_name_sharing !== nothing + desc["use_node_name_sharing"] = Base.Bool(use_node_name_sharing) + end + if key_dtype !== nothing + desc["key_dtype"] = Base.identity(key_dtype) + end + if value_dtype !== nothing + desc["value_dtype"] = Base.identity(value_dtype) + end + if value_shape !== nothing + desc["value_shape"] = Base.identity(value_shape) + end + if initial_num_buckets !== nothing + desc["initial_num_buckets"] = Base.Int(initial_num_buckets) + end + if max_load_factor !== nothing + desc["max_load_factor"] = Base.identity(max_load_factor) + end + end + tf.Tensor(tf.Operation(desc)) + end + function mutable_dense_hash_table_v2(empty_key_::tf.TensorHandle, deleted_key_::tf.TensorHandle; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing, initial_num_buckets=nothing, max_load_factor=nothing) + desc = tf.EagerOp("MutableDenseHashTableV2") + tf.add_input(desc, empty_key_) + tf.add_input(desc, deleted_key_) + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + if use_node_name_sharing !== nothing + desc["use_node_name_sharing"] = Base.Bool(use_node_name_sharing) + end + if key_dtype !== nothing + desc["key_dtype"] = Base.identity(key_dtype) + end + if value_dtype !== nothing + desc["value_dtype"] = Base.identity(value_dtype) + end + if value_shape !== nothing + desc["value_shape"] = Base.identity(value_shape) + end + if initial_num_buckets !== nothing + desc["initial_num_buckets"] = Base.Int(initial_num_buckets) + end + if max_load_factor !== nothing + desc["max_load_factor"] = Base.identity(max_load_factor) + end + desc["key_dtype"] = tf.data_type(empty_key_) + desc["key_dtype"] = tf.data_type(deleted_key_) + (tf.execute(desc))[1] + end +end + + +""" + draw_bounding_boxes(images, boxes) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function draw_bounding_boxes(images_, boxes_; name=nothing) + local desc + tf.with_op_name(name, "DrawBoundingBoxes") do + desc = tf.NodeDescription("DrawBoundingBoxes") + images_ = convert(Tensor{Float32}, images_) + boxes_ = convert(Tensor{Float32}, boxes_) + (images_,) = tf.tf_promote(images_) + tf.add_input(desc, images_) + tf.add_input(desc, boxes_) + end + tf.Tensor(tf.Operation(desc)) + end + function draw_bounding_boxes(images_::tf.TensorHandle, boxes_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("DrawBoundingBoxes") + tf.add_input(desc, images_) + tf.add_input(desc, boxes_) + desc["T"] = tf.data_type(images_) + (tf.execute(desc))[1] + end +end + + +""" + sparse_apply_proximal_adagrad(var, accum, lr, l1, l2, grad, indices; use_locking=false) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function sparse_apply_proximal_adagrad(var_, accum_, lr_, l1_, l2_, grad_, indices_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "SparseApplyProximalAdagrad") do + desc = tf.NodeDescription("SparseApplyProximalAdagrad") + var_ = convert(Tensor{Any}, var_) + accum_ = convert(Tensor{Any}, accum_) + lr_ = convert(Tensor{Any}, lr_) + l1_ = convert(Tensor{Any}, l1_) + l2_ = convert(Tensor{Any}, l2_) + grad_ = convert(Tensor{Any}, grad_) + indices_ = convert(Tensor{Any}, indices_) + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + (var_, accum_, lr_, l1_, l2_, grad_) = tf.tf_promote(var_, accum_, lr_, l1_, l2_, grad_) + (indices_,) = tf.tf_promote(indices_) + tf.add_input(desc, var_) + tf.add_input(desc, accum_) + tf.add_input(desc, lr_) + tf.add_input(desc, l1_) + tf.add_input(desc, l2_) + tf.add_input(desc, grad_) + tf.add_input(desc, indices_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + tf.Tensor(tf.Operation(desc)) + end + function sparse_apply_proximal_adagrad(var_::tf.TensorHandle, accum_::tf.TensorHandle, lr_::tf.TensorHandle, l1_::tf.TensorHandle, l2_::tf.TensorHandle, grad_::tf.TensorHandle, indices_::tf.TensorHandle; name=nothing, use_locking=nothing) + desc = tf.EagerOp("SparseApplyProximalAdagrad") + tf.add_input(desc, var_) + tf.add_input(desc, accum_) + tf.add_input(desc, lr_) + tf.add_input(desc, l1_) + tf.add_input(desc, l2_) + tf.add_input(desc, grad_) + tf.add_input(desc, indices_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + desc["T"] = tf.data_type(var_) + desc["T"] = tf.data_type(accum_) + desc["T"] = tf.data_type(lr_) + desc["T"] = tf.data_type(l1_) + desc["T"] = tf.data_type(l2_) + desc["T"] = tf.data_type(grad_) + desc["Tindices"] = tf.data_type(indices_) + (tf.execute(desc))[1] + end +end + + +""" + range_dataset(start, stop, step) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function range_dataset(start_, stop_, step_; name=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "RangeDataset") do + desc = tf.NodeDescription("RangeDataset") + start_ = convert(Tensor{Int64}, start_) + stop_ = convert(Tensor{Int64}, stop_) + step_ = convert(Tensor{Int64}, step_) + tf.add_input(desc, start_) + tf.add_input(desc, stop_) + tf.add_input(desc, step_) + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + tf.Tensor(tf.Operation(desc)) + end + function range_dataset(start_::tf.TensorHandle, stop_::tf.TensorHandle, step_::tf.TensorHandle; name=nothing, output_types=nothing, output_shapes=nothing) + desc = tf.EagerOp("RangeDataset") + tf.add_input(desc, start_) + tf.add_input(desc, stop_) + tf.add_input(desc, step_) + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + (tf.execute(desc))[1] + end +end + + +""" + reader_restore_state_v2(reader_handle, state) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function reader_restore_state_v2(reader_handle_, state_; name=nothing) + local desc + tf.with_op_name(name, "ReaderRestoreStateV2") do + desc = tf.NodeDescription("ReaderRestoreStateV2") + reader_handle_ = convert(Tensor{Any}, reader_handle_) + state_ = convert(Tensor{String}, state_) + tf.add_input(desc, reader_handle_) + tf.add_input(desc, state_) + end + tf.Tensor(tf.Operation(desc)) + end + function reader_restore_state_v2(reader_handle_::tf.TensorHandle, state_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("ReaderRestoreStateV2") + tf.add_input(desc, reader_handle_) + tf.add_input(desc, state_) + (tf.execute(desc))[1] + end +end + + +""" + top_kv2(input, k; sorted=true) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function top_kv2(input_, k_; name=nothing, sorted=nothing) + local desc + tf.with_op_name(name, "TopKV2") do + desc = tf.NodeDescription("TopKV2") + input_ = convert(Tensor{Any}, input_) + k_ = convert(Tensor{Int32}, k_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + tf.add_input(desc, k_) + if sorted !== nothing + desc["sorted"] = Base.Bool(sorted) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function top_kv2(input_::tf.TensorHandle, k_::tf.TensorHandle; name=nothing, sorted=nothing) + desc = tf.EagerOp("TopKV2") + tf.add_input(desc, input_) + tf.add_input(desc, k_) + if sorted !== nothing + desc["sorted"] = Base.Bool(sorted) + end + desc["T"] = tf.data_type(input_) + tf.execute(desc) + end +end + + +""" + atanh(x) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function atanh(x_; name=nothing) + local desc + tf.with_op_name(name, "Atanh") do + desc = tf.NodeDescription("Atanh") + x_ = convert(Tensor{Any}, x_) + (x_,) = tf.tf_promote(x_) + tf.add_input(desc, x_) + end + tf.Tensor(tf.Operation(desc)) + end + function atanh(x_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("Atanh") + tf.add_input(desc, x_) + desc["T"] = tf.data_type(x_) + (tf.execute(desc))[1] + end +end + + +""" + debug_gradient_identity(input) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function debug_gradient_identity(input_; name=nothing) + local desc + tf.with_op_name(name, "DebugGradientIdentity") do + desc = tf.NodeDescription("DebugGradientIdentity") + input_ = convert(Tensor{Any}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + end + tf.Tensor(tf.Operation(desc)) + end + function debug_gradient_identity(input_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("DebugGradientIdentity") + tf.add_input(desc, input_) + desc["T"] = tf.data_type(input_) + (tf.execute(desc))[1] + end +end + + +""" + sparse_add_grad(backprop_val_grad, a_indices, b_indices, sum_indices) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function sparse_add_grad(backprop_val_grad_, a_indices_, b_indices_, sum_indices_; name=nothing) + local desc + tf.with_op_name(name, "SparseAddGrad") do + desc = tf.NodeDescription("SparseAddGrad") + backprop_val_grad_ = convert(Tensor{Any}, backprop_val_grad_) + a_indices_ = convert(Tensor{Int64}, a_indices_) + b_indices_ = convert(Tensor{Int64}, b_indices_) + sum_indices_ = convert(Tensor{Int64}, sum_indices_) + (backprop_val_grad_,) = tf.tf_promote(backprop_val_grad_) + tf.add_input(desc, backprop_val_grad_) + tf.add_input(desc, a_indices_) + tf.add_input(desc, b_indices_) + tf.add_input(desc, sum_indices_) + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function sparse_add_grad(backprop_val_grad_::tf.TensorHandle, a_indices_::tf.TensorHandle, b_indices_::tf.TensorHandle, sum_indices_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("SparseAddGrad") + tf.add_input(desc, backprop_val_grad_) + tf.add_input(desc, a_indices_) + tf.add_input(desc, b_indices_) + tf.add_input(desc, sum_indices_) + desc["T"] = tf.data_type(backprop_val_grad_) + tf.execute(desc) + end +end + + +""" + resource_scatter_add(resource, indices, updates) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function resource_scatter_add(resource_, indices_, updates_; name=nothing, dtype=nothing) + local desc + tf.with_op_name(name, "ResourceScatterAdd") do + desc = tf.NodeDescription("ResourceScatterAdd") + resource_ = convert(Tensor{Any}, resource_) + indices_ = convert(Tensor{Any}, indices_) + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + updates_ = convert(Tensor{Any}, updates_) + (updates_,) = tf.tf_promote(updates_) + (indices_,) = tf.tf_promote(indices_) + tf.add_input(desc, resource_) + tf.add_input(desc, indices_) + tf.add_input(desc, updates_) + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + tf.Tensor(tf.Operation(desc)) + end + function resource_scatter_add(resource_::tf.TensorHandle, indices_::tf.TensorHandle, updates_::tf.TensorHandle; name=nothing, dtype=nothing) + desc = tf.EagerOp("ResourceScatterAdd") + tf.add_input(desc, resource_) + tf.add_input(desc, indices_) + tf.add_input(desc, updates_) + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + desc["Tindices"] = tf.data_type(indices_) + desc["dtype"] = tf.data_type(updates_) + (tf.execute(desc))[1] + end +end + + +""" + ceil(x) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function ceil(x_; name=nothing) + local desc + tf.with_op_name(name, "Ceil") do + desc = tf.NodeDescription("Ceil") + x_ = convert(Tensor{Any}, x_) + (x_,) = tf.tf_promote(x_) + tf.add_input(desc, x_) + end + tf.Tensor(tf.Operation(desc)) + end + function ceil(x_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("Ceil") + tf.add_input(desc, x_) + desc["T"] = tf.data_type(x_) + (tf.execute(desc))[1] + end +end + + +""" + save(filename, tensor_names, data) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function save(filename_, tensor_names_, data_; name=nothing, T=nothing) + local desc + tf.with_op_name(name, "Save") do + desc = tf.NodeDescription("Save") + filename_ = convert(Tensor{String}, filename_) + tensor_names_ = convert(Tensor{String}, tensor_names_) + data_ = [convert(Tensor{Any}, x) for x = data_] + tf.add_input(desc, filename_) + tf.add_input(desc, tensor_names_) + tf.add_input(desc, data_) + if T !== nothing + desc["T"] = map(Base.identity, T) + end + end + tf.Tensor(tf.Operation(desc)) + end + function save(filename_::tf.TensorHandle, tensor_names_::tf.TensorHandle, data_::tf.TensorHandle; name=nothing, T=nothing) + desc = tf.EagerOp("Save") + tf.add_input(desc, filename_) + tf.add_input(desc, tensor_names_) + tf.add_input(desc, data_) + if T !== nothing + desc["T"] = map(Base.identity, T) + end + (tf.execute(desc))[1] + end +end + + +""" + retrieve_tpu_embedding_centered_rms_prop_parameters(; table_id=-1, table_name=) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function retrieve_tpu_embedding_centered_rms_prop_parameters(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + local desc + tf.with_op_name(name, "RetrieveTPUEmbeddingCenteredRMSPropParameters") do + desc = tf.NodeDescription("RetrieveTPUEmbeddingCenteredRMSPropParameters") + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:4 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function retrieve_tpu_embedding_centered_rms_prop_parameters(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + desc = tf.EagerOp("RetrieveTPUEmbeddingCenteredRMSPropParameters") + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + tf.execute(desc) + end +end + + +""" + quantized_concat(concat_dim, values, input_mins, input_maxes) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function quantized_concat(concat_dim_, values_, input_mins_, input_maxes_; name=nothing, N=nothing) + local desc + tf.with_op_name(name, "QuantizedConcat") do + desc = tf.NodeDescription("QuantizedConcat") + concat_dim_ = convert(Tensor{Int32}, concat_dim_) + values_ = [convert(Tensor{Any}, x) for x = values_] + input_mins_ = [convert(Tensor{Float32}, x) for x = input_mins_] + input_maxes_ = [convert(Tensor{Float32}, x) for x = input_maxes_] + (values_,) = tf.tf_promote(values_) + tf.add_input(desc, concat_dim_) + tf.add_input(desc, values_) + tf.add_input(desc, input_mins_) + tf.add_input(desc, input_maxes_) + if N !== nothing + desc["N"] = Base.Int(N) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function quantized_concat(concat_dim_::tf.TensorHandle, values_::tf.TensorHandle, input_mins_::tf.TensorHandle, input_maxes_::tf.TensorHandle; name=nothing, N=nothing) + desc = tf.EagerOp("QuantizedConcat") + tf.add_input(desc, concat_dim_) + tf.add_input(desc, values_) + tf.add_input(desc, input_mins_) + tf.add_input(desc, input_maxes_) + if N !== nothing + desc["N"] = Base.Int(N) + end + desc["T"] = tf.data_type(values_) + tf.execute(desc) + end +end + + +""" + zeros_like(x) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function zeros_like(x_; name=nothing) + local desc + tf.with_op_name(name, "ZerosLike") do + desc = tf.NodeDescription("ZerosLike") + x_ = convert(Tensor{Any}, x_) + (x_,) = tf.tf_promote(x_) + tf.add_input(desc, x_) + end + tf.Tensor(tf.Operation(desc)) + end + function zeros_like(x_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("ZerosLike") + tf.add_input(desc, x_) + desc["T"] = tf.data_type(x_) + (tf.execute(desc))[1] + end +end + + +""" + fractional_avg_pool(value; pseudo_random=false, overlapping=false, deterministic=false, seed=0, seed2=0) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function fractional_avg_pool(value_; name=nothing, pooling_ratio=nothing, pseudo_random=nothing, overlapping=nothing, deterministic=nothing, seed=nothing, seed2=nothing) + local desc + tf.with_op_name(name, "FractionalAvgPool") do + desc = tf.NodeDescription("FractionalAvgPool") + value_ = convert(Tensor{Any}, value_) + (value_,) = tf.tf_promote(value_) + tf.add_input(desc, value_) + if pooling_ratio !== nothing + desc["pooling_ratio"] = map(Base.identity, pooling_ratio) + end + if pseudo_random !== nothing + desc["pseudo_random"] = Base.Bool(pseudo_random) + end + if overlapping !== nothing + desc["overlapping"] = Base.Bool(overlapping) + end + if deterministic !== nothing + desc["deterministic"] = Base.Bool(deterministic) + end + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function fractional_avg_pool(value_::tf.TensorHandle; name=nothing, pooling_ratio=nothing, pseudo_random=nothing, overlapping=nothing, deterministic=nothing, seed=nothing, seed2=nothing) + desc = tf.EagerOp("FractionalAvgPool") + tf.add_input(desc, value_) + if pooling_ratio !== nothing + desc["pooling_ratio"] = map(Base.identity, pooling_ratio) + end + if pseudo_random !== nothing + desc["pseudo_random"] = Base.Bool(pseudo_random) + end + if overlapping !== nothing + desc["overlapping"] = Base.Bool(overlapping) + end + if deterministic !== nothing + desc["deterministic"] = Base.Bool(deterministic) + end + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end + desc["T"] = tf.data_type(value_) + tf.execute(desc) + end +end + + +""" + edit_distance(hypothesis_indices, hypothesis_values, hypothesis_shape, truth_indices, truth_values, truth_shape; normalize=true) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function edit_distance(hypothesis_indices_, hypothesis_values_, hypothesis_shape_, truth_indices_, truth_values_, truth_shape_; name=nothing, normalize=nothing) + local desc + tf.with_op_name(name, "EditDistance") do + desc = tf.NodeDescription("EditDistance") + hypothesis_indices_ = convert(Tensor{Int64}, hypothesis_indices_) + hypothesis_values_ = convert(Tensor{Any}, hypothesis_values_) + hypothesis_shape_ = convert(Tensor{Int64}, hypothesis_shape_) + truth_indices_ = convert(Tensor{Int64}, truth_indices_) + truth_values_ = convert(Tensor{Any}, truth_values_) + truth_shape_ = convert(Tensor{Int64}, truth_shape_) + (hypothesis_values_, truth_values_) = tf.tf_promote(hypothesis_values_, truth_values_) + tf.add_input(desc, hypothesis_indices_) + tf.add_input(desc, hypothesis_values_) + tf.add_input(desc, hypothesis_shape_) + tf.add_input(desc, truth_indices_) + tf.add_input(desc, truth_values_) + tf.add_input(desc, truth_shape_) + if normalize !== nothing + desc["normalize"] = Base.Bool(normalize) + end + end + tf.Tensor(tf.Operation(desc)) + end + function edit_distance(hypothesis_indices_::tf.TensorHandle, hypothesis_values_::tf.TensorHandle, hypothesis_shape_::tf.TensorHandle, truth_indices_::tf.TensorHandle, truth_values_::tf.TensorHandle, truth_shape_::tf.TensorHandle; name=nothing, normalize=nothing) + desc = tf.EagerOp("EditDistance") + tf.add_input(desc, hypothesis_indices_) + tf.add_input(desc, hypothesis_values_) + tf.add_input(desc, hypothesis_shape_) + tf.add_input(desc, truth_indices_) + tf.add_input(desc, truth_values_) + tf.add_input(desc, truth_shape_) + if normalize !== nothing + desc["normalize"] = Base.Bool(normalize) + end + desc["T"] = tf.data_type(hypothesis_values_) + desc["T"] = tf.data_type(truth_values_) + (tf.execute(desc))[1] + end +end + + +""" + unique_v2(x, axis; out_idx=Int32) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function unique_v2(x_, axis_; name=nothing, out_idx=nothing) + local desc + tf.with_op_name(name, "UniqueV2") do + desc = tf.NodeDescription("UniqueV2") + x_ = convert(Tensor{Any}, x_) + axis_ = convert(Tensor{Int64}, axis_) + (x_,) = tf.tf_promote(x_) + (axis_,) = tf.tf_promote(axis_) + tf.add_input(desc, x_) + tf.add_input(desc, axis_) + if out_idx !== nothing + desc["out_idx"] = Base.identity(out_idx) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function unique_v2(x_::tf.TensorHandle, axis_::tf.TensorHandle; name=nothing, out_idx=nothing) + desc = tf.EagerOp("UniqueV2") + tf.add_input(desc, x_) + tf.add_input(desc, axis_) + if out_idx !== nothing + desc["out_idx"] = Base.identity(out_idx) + end + desc["T"] = tf.data_type(x_) + desc["Taxis"] = tf.data_type(axis_) + tf.execute(desc) + end +end + + +""" + quantize_and_dequantize_v2(input, input_min, input_max; signed_input=true, num_bits=8, range_given=false, round_mode=HALF_TO_EVEN) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function quantize_and_dequantize_v2(input_, input_min_, input_max_; name=nothing, signed_input=nothing, num_bits=nothing, range_given=nothing, round_mode=nothing) + local desc + tf.with_op_name(name, "QuantizeAndDequantizeV2") do + desc = tf.NodeDescription("QuantizeAndDequantizeV2") + input_ = convert(Tensor{Any}, input_) + input_min_ = convert(Tensor{Any}, input_min_) + input_max_ = convert(Tensor{Any}, input_max_) + (input_, input_min_, input_max_) = tf.tf_promote(input_, input_min_, input_max_) + tf.add_input(desc, input_) + tf.add_input(desc, input_min_) + tf.add_input(desc, input_max_) + if signed_input !== nothing + desc["signed_input"] = Base.Bool(signed_input) + end + if num_bits !== nothing + desc["num_bits"] = Base.Int(num_bits) + end + if range_given !== nothing + desc["range_given"] = Base.Bool(range_given) + end + if round_mode !== nothing + desc["round_mode"] = Base.String(round_mode) + end + end + tf.Tensor(tf.Operation(desc)) + end + function quantize_and_dequantize_v2(input_::tf.TensorHandle, input_min_::tf.TensorHandle, input_max_::tf.TensorHandle; name=nothing, signed_input=nothing, num_bits=nothing, range_given=nothing, round_mode=nothing) + desc = tf.EagerOp("QuantizeAndDequantizeV2") + tf.add_input(desc, input_) + tf.add_input(desc, input_min_) + tf.add_input(desc, input_max_) + if signed_input !== nothing + desc["signed_input"] = Base.Bool(signed_input) + end + if num_bits !== nothing + desc["num_bits"] = Base.Int(num_bits) + end + if range_given !== nothing + desc["range_given"] = Base.Bool(range_given) + end + if round_mode !== nothing + desc["round_mode"] = Base.String(round_mode) + end + desc["T"] = tf.data_type(input_) + desc["T"] = tf.data_type(input_min_) + desc["T"] = tf.data_type(input_max_) + (tf.execute(desc))[1] + end +end + + +""" + quantize_and_dequantize(input; signed_input=true, num_bits=8, range_given=false, input_min=?, input_max=?) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function quantize_and_dequantize(input_; name=nothing, signed_input=nothing, num_bits=nothing, range_given=nothing, input_min=nothing, input_max=nothing) + local desc + tf.with_op_name(name, "QuantizeAndDequantize") do + desc = tf.NodeDescription("QuantizeAndDequantize") + input_ = convert(Tensor{Any}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + if signed_input !== nothing + desc["signed_input"] = Base.Bool(signed_input) + end + if num_bits !== nothing + desc["num_bits"] = Base.Int(num_bits) + end + if range_given !== nothing + desc["range_given"] = Base.Bool(range_given) + end + if input_min !== nothing + desc["input_min"] = Base.identity(input_min) + end + if input_max !== nothing + desc["input_max"] = Base.identity(input_max) + end + end + tf.Tensor(tf.Operation(desc)) + end + function quantize_and_dequantize(input_::tf.TensorHandle; name=nothing, signed_input=nothing, num_bits=nothing, range_given=nothing, input_min=nothing, input_max=nothing) + desc = tf.EagerOp("QuantizeAndDequantize") + tf.add_input(desc, input_) + if signed_input !== nothing + desc["signed_input"] = Base.Bool(signed_input) + end + if num_bits !== nothing + desc["num_bits"] = Base.Int(num_bits) + end + if range_given !== nothing + desc["range_given"] = Base.Bool(range_given) + end + if input_min !== nothing + desc["input_min"] = Base.identity(input_min) + end + if input_max !== nothing + desc["input_max"] = Base.identity(input_max) + end + desc["T"] = tf.data_type(input_) + (tf.execute(desc))[1] + end +end + + +""" + tensor_list_pop_back(input_handle, element_shape) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tensor_list_pop_back(input_handle_, element_shape_; name=nothing, element_dtype=nothing) + local desc + tf.with_op_name(name, "TensorListPopBack") do + desc = tf.NodeDescription("TensorListPopBack") + input_handle_ = convert(Tensor{Any}, input_handle_) + element_shape_ = convert(Tensor{Int32}, element_shape_) + tf.add_input(desc, input_handle_) + tf.add_input(desc, element_shape_) + if element_dtype !== nothing + desc["element_dtype"] = Base.identity(element_dtype) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function tensor_list_pop_back(input_handle_::tf.TensorHandle, element_shape_::tf.TensorHandle; name=nothing, element_dtype=nothing) + desc = tf.EagerOp("TensorListPopBack") + tf.add_input(desc, input_handle_) + tf.add_input(desc, element_shape_) + if element_dtype !== nothing + desc["element_dtype"] = Base.identity(element_dtype) + end + tf.execute(desc) + end +end + + +""" + debug_nan_count(input; device_name=, tensor_name=, debug_urls=Int64[], gated_grpc=false) + +Debug NaN Value Counter Op +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function debug_nan_count(input_; name=nothing, device_name=nothing, tensor_name=nothing, debug_urls=nothing, gated_grpc=nothing) + local desc + tf.with_op_name(name, "DebugNanCount") do + desc = tf.NodeDescription("DebugNanCount") + input_ = convert(Tensor{Any}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + if device_name !== nothing + desc["device_name"] = Base.String(device_name) + end + if tensor_name !== nothing + desc["tensor_name"] = Base.String(tensor_name) + end + if debug_urls !== nothing + desc["debug_urls"] = map(Base.identity, debug_urls) + end + if gated_grpc !== nothing + desc["gated_grpc"] = Base.Bool(gated_grpc) + end + end + tf.Tensor(tf.Operation(desc)) + end + function debug_nan_count(input_::tf.TensorHandle; name=nothing, device_name=nothing, tensor_name=nothing, debug_urls=nothing, gated_grpc=nothing) + desc = tf.EagerOp("DebugNanCount") + tf.add_input(desc, input_) + if device_name !== nothing + desc["device_name"] = Base.String(device_name) + end + if tensor_name !== nothing + desc["tensor_name"] = Base.String(tensor_name) + end + if debug_urls !== nothing + desc["debug_urls"] = map(Base.identity, debug_urls) + end + if gated_grpc !== nothing + desc["gated_grpc"] = Base.Bool(gated_grpc) + end + desc["T"] = tf.data_type(input_) + (tf.execute(desc))[1] + end +end + + +""" + apply_adagrad_da(var, gradient_accumulator, gradient_squared_accumulator, grad, lr, l1, l2, global_step; use_locking=false) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function apply_adagrad_da(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, lr_, l1_, l2_, global_step_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "ApplyAdagradDA") do + desc = tf.NodeDescription("ApplyAdagradDA") + var_ = convert(Tensor{Any}, var_) + gradient_accumulator_ = convert(Tensor{Any}, gradient_accumulator_) + gradient_squared_accumulator_ = convert(Tensor{Any}, gradient_squared_accumulator_) + grad_ = convert(Tensor{Any}, grad_) + lr_ = convert(Tensor{Any}, lr_) + l1_ = convert(Tensor{Any}, l1_) + l2_ = convert(Tensor{Any}, l2_) + global_step_ = convert(Tensor{Int64}, global_step_) + (var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, lr_, l1_, l2_) = tf.tf_promote(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, lr_, l1_, l2_) + tf.add_input(desc, var_) + tf.add_input(desc, gradient_accumulator_) + tf.add_input(desc, gradient_squared_accumulator_) + tf.add_input(desc, grad_) + tf.add_input(desc, lr_) + tf.add_input(desc, l1_) + tf.add_input(desc, l2_) + tf.add_input(desc, global_step_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + tf.Tensor(tf.Operation(desc)) + end + function apply_adagrad_da(var_::tf.TensorHandle, gradient_accumulator_::tf.TensorHandle, gradient_squared_accumulator_::tf.TensorHandle, grad_::tf.TensorHandle, lr_::tf.TensorHandle, l1_::tf.TensorHandle, l2_::tf.TensorHandle, global_step_::tf.TensorHandle; name=nothing, use_locking=nothing) + desc = tf.EagerOp("ApplyAdagradDA") + tf.add_input(desc, var_) + tf.add_input(desc, gradient_accumulator_) + tf.add_input(desc, gradient_squared_accumulator_) + tf.add_input(desc, grad_) + tf.add_input(desc, lr_) + tf.add_input(desc, l1_) + tf.add_input(desc, l2_) + tf.add_input(desc, global_step_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + desc["T"] = tf.data_type(var_) + desc["T"] = tf.data_type(gradient_accumulator_) + desc["T"] = tf.data_type(gradient_squared_accumulator_) + desc["T"] = tf.data_type(grad_) + desc["T"] = tf.data_type(lr_) + desc["T"] = tf.data_type(l1_) + desc["T"] = tf.data_type(l2_) + (tf.execute(desc))[1] + end +end + + +""" + depthwise_conv2d_native(input, filter; data_format=NHWC, dilations=[1, 1, 1, 1]) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function depthwise_conv2d_native(input_, filter_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) + local desc + tf.with_op_name(name, "DepthwiseConv2dNative") do + desc = tf.NodeDescription("DepthwiseConv2dNative") + input_ = convert(Tensor{Any}, input_) + filter_ = convert(Tensor{Any}, filter_) + (input_, filter_) = tf.tf_promote(input_, filter_) + tf.add_input(desc, input_) + tf.add_input(desc, filter_) + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + if padding !== nothing + desc["padding"] = Base.String(padding) + end + if data_format !== nothing + desc["data_format"] = Base.String(data_format) + end + if dilations !== nothing + desc["dilations"] = map(Base.identity, dilations) + end + end + tf.Tensor(tf.Operation(desc)) + end + function depthwise_conv2d_native(input_::tf.TensorHandle, filter_::tf.TensorHandle; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) + desc = tf.EagerOp("DepthwiseConv2dNative") + tf.add_input(desc, input_) + tf.add_input(desc, filter_) + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + if padding !== nothing + desc["padding"] = Base.String(padding) + end + if data_format !== nothing + desc["data_format"] = Base.String(data_format) + end + if dilations !== nothing + desc["dilations"] = map(Base.identity, dilations) + end + desc["T"] = tf.data_type(input_) + desc["T"] = tf.data_type(filter_) + (tf.execute(desc))[1] + end +end + + +""" + serialize_iterator(resource_handle) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function serialize_iterator(resource_handle_; name=nothing) + local desc + tf.with_op_name(name, "SerializeIterator") do + desc = tf.NodeDescription("SerializeIterator") + resource_handle_ = convert(Tensor{Any}, resource_handle_) + tf.add_input(desc, resource_handle_) + end + tf.Tensor(tf.Operation(desc)) + end + function serialize_iterator(resource_handle_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("SerializeIterator") + tf.add_input(desc, resource_handle_) + (tf.execute(desc))[1] + end +end + + +""" + dataset_to_graph(input_dataset) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function dataset_to_graph(input_dataset_; name=nothing) + local desc + tf.with_op_name(name, "DatasetToGraph") do + desc = tf.NodeDescription("DatasetToGraph") + input_dataset_ = convert(Tensor{Any}, input_dataset_) + tf.add_input(desc, input_dataset_) + end + tf.Tensor(tf.Operation(desc)) + end + function dataset_to_graph(input_dataset_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("DatasetToGraph") + tf.add_input(desc, input_dataset_) + (tf.execute(desc))[1] + end +end + + +""" + top_k(input; sorted=true) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function top_k(input_; name=nothing, k=nothing, sorted=nothing) + local desc + tf.with_op_name(name, "TopK") do + desc = tf.NodeDescription("TopK") + input_ = convert(Tensor{Any}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + if k !== nothing + desc["k"] = Base.Int(k) + end + if sorted !== nothing + desc["sorted"] = Base.Bool(sorted) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function top_k(input_::tf.TensorHandle; name=nothing, k=nothing, sorted=nothing) + desc = tf.EagerOp("TopK") + tf.add_input(desc, input_) + if k !== nothing + desc["k"] = Base.Int(k) + end + if sorted !== nothing + desc["sorted"] = Base.Bool(sorted) + end + desc["T"] = tf.data_type(input_) + tf.execute(desc) + end +end + + +""" + resource_apply_ftrl_v2(var, accum, linear, grad, lr, l1, l2, l2_shrinkage, lr_power; use_locking=false) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function resource_apply_ftrl_v2(var_, accum_, linear_, grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "ResourceApplyFtrlV2") do + desc = tf.NodeDescription("ResourceApplyFtrlV2") + var_ = convert(Tensor{Any}, var_) + accum_ = convert(Tensor{Any}, accum_) + linear_ = convert(Tensor{Any}, linear_) + grad_ = convert(Tensor{Any}, grad_) + lr_ = convert(Tensor{Any}, lr_) + l1_ = convert(Tensor{Any}, l1_) + l2_ = convert(Tensor{Any}, l2_) + l2_shrinkage_ = convert(Tensor{Any}, l2_shrinkage_) + lr_power_ = convert(Tensor{Any}, lr_power_) + (grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_) = tf.tf_promote(grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_) + tf.add_input(desc, var_) + tf.add_input(desc, accum_) + tf.add_input(desc, linear_) + tf.add_input(desc, grad_) + tf.add_input(desc, lr_) + tf.add_input(desc, l1_) + tf.add_input(desc, l2_) + tf.add_input(desc, l2_shrinkage_) + tf.add_input(desc, lr_power_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + tf.Tensor(tf.Operation(desc)) + end + function resource_apply_ftrl_v2(var_::tf.TensorHandle, accum_::tf.TensorHandle, linear_::tf.TensorHandle, grad_::tf.TensorHandle, lr_::tf.TensorHandle, l1_::tf.TensorHandle, l2_::tf.TensorHandle, l2_shrinkage_::tf.TensorHandle, lr_power_::tf.TensorHandle; name=nothing, use_locking=nothing) + desc = tf.EagerOp("ResourceApplyFtrlV2") + tf.add_input(desc, var_) + tf.add_input(desc, accum_) + tf.add_input(desc, linear_) + tf.add_input(desc, grad_) + tf.add_input(desc, lr_) + tf.add_input(desc, l1_) + tf.add_input(desc, l2_) + tf.add_input(desc, l2_shrinkage_) + tf.add_input(desc, lr_power_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + desc["T"] = tf.data_type(grad_) + desc["T"] = tf.data_type(lr_) + desc["T"] = tf.data_type(l1_) + desc["T"] = tf.data_type(l2_) + desc["T"] = tf.data_type(l2_shrinkage_) + desc["T"] = tf.data_type(lr_power_) + (tf.execute(desc))[1] + end +end + + +""" + _nccl_broadcast_recv(shape) + +Replacement node for NcclBroadcast. +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function _nccl_broadcast_recv(shape_; name=nothing, num_devices=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "_NcclBroadcastRecv") do + desc = tf.NodeDescription("_NcclBroadcastRecv") + shape_ = convert(Tensor{Int32}, shape_) + tf.add_input(desc, shape_) + if num_devices !== nothing + desc["num_devices"] = Base.Int(num_devices) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + tf.Tensor(tf.Operation(desc)) + end + function _nccl_broadcast_recv(shape_::tf.TensorHandle; name=nothing, num_devices=nothing, shared_name=nothing) + desc = tf.EagerOp("_NcclBroadcastRecv") + tf.add_input(desc, shape_) + if num_devices !== nothing + desc["num_devices"] = Base.Int(num_devices) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + (tf.execute(desc))[1] + end +end + + +""" + queue_is_closed(handle) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function queue_is_closed(handle_; name=nothing) + local desc + tf.with_op_name(name, "QueueIsClosed") do + desc = tf.NodeDescription("QueueIsClosed") + handle_ = convert(Tensor{String}, handle_) + tf.add_input(desc, handle_) + end + tf.Tensor(tf.Operation(desc)) + end + function queue_is_closed(handle_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("QueueIsClosed") + tf.add_input(desc, handle_) + (tf.execute(desc))[1] + end +end + + +""" + shuffle_dataset(input_dataset, buffer_size, seed, seed2; reshuffle_each_iteration=true) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function shuffle_dataset(input_dataset_, buffer_size_, seed_, seed2_; name=nothing, reshuffle_each_iteration=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "ShuffleDataset") do + desc = tf.NodeDescription("ShuffleDataset") + input_dataset_ = convert(Tensor{Any}, input_dataset_) + buffer_size_ = convert(Tensor{Int64}, buffer_size_) + seed_ = convert(Tensor{Int64}, seed_) + seed2_ = convert(Tensor{Int64}, seed2_) + tf.add_input(desc, input_dataset_) + tf.add_input(desc, buffer_size_) + tf.add_input(desc, seed_) + tf.add_input(desc, seed2_) + if reshuffle_each_iteration !== nothing + desc["reshuffle_each_iteration"] = Base.Bool(reshuffle_each_iteration) + end + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + tf.Tensor(tf.Operation(desc)) + end + function shuffle_dataset(input_dataset_::tf.TensorHandle, buffer_size_::tf.TensorHandle, seed_::tf.TensorHandle, seed2_::tf.TensorHandle; name=nothing, reshuffle_each_iteration=nothing, output_types=nothing, output_shapes=nothing) + desc = tf.EagerOp("ShuffleDataset") + tf.add_input(desc, input_dataset_) + tf.add_input(desc, buffer_size_) + tf.add_input(desc, seed_) + tf.add_input(desc, seed2_) + if reshuffle_each_iteration !== nothing + desc["reshuffle_each_iteration"] = Base.Bool(reshuffle_each_iteration) + end + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + (tf.execute(desc))[1] + end +end + + +""" + deserialize_sparse(serialized_sparse) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function deserialize_sparse(serialized_sparse_; name=nothing, dtype=nothing) + local desc + tf.with_op_name(name, "DeserializeSparse") do + desc = tf.NodeDescription("DeserializeSparse") + serialized_sparse_ = convert(Tensor{String}, serialized_sparse_) + (serialized_sparse_,) = tf.tf_promote(serialized_sparse_) + tf.add_input(desc, serialized_sparse_) + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function deserialize_sparse(serialized_sparse_::tf.TensorHandle; name=nothing, dtype=nothing) + desc = tf.EagerOp("DeserializeSparse") + tf.add_input(desc, serialized_sparse_) + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + desc["Tserialized"] = tf.data_type(serialized_sparse_) + tf.execute(desc) + end +end + + +""" + priority_queue_v2(; component_types=Int64[], capacity=-1, container=, shared_name=) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function priority_queue_v2(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "PriorityQueueV2") do + desc = tf.NodeDescription("PriorityQueueV2") + if component_types !== nothing + desc["component_types"] = map(Base.identity, component_types) + end + if shapes !== nothing + desc["shapes"] = map(Base.identity, shapes) + end + if capacity !== nothing + desc["capacity"] = Base.Int(capacity) + end + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + tf.Tensor(tf.Operation(desc)) + end + function priority_queue_v2(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) + desc = tf.EagerOp("PriorityQueueV2") + if component_types !== nothing + desc["component_types"] = map(Base.identity, component_types) + end + if shapes !== nothing + desc["shapes"] = map(Base.identity, shapes) + end + if capacity !== nothing + desc["capacity"] = Base.Int(capacity) + end + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + (tf.execute(desc))[1] + end +end + + +""" + _device_arg() + +A graph node which represents an argument to a function. +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function _device_arg(; name=nothing, index=nothing) + local desc + tf.with_op_name(name, "_DeviceArg") do + desc = tf.NodeDescription("_DeviceArg") + if index !== nothing + desc["index"] = Base.Int(index) + end + end + tf.Tensor(tf.Operation(desc)) + end + function _device_arg(; name=nothing, index=nothing) + desc = tf.EagerOp("_DeviceArg") + if index !== nothing + desc["index"] = Base.Int(index) + end + (tf.execute(desc))[1] + end +end + + +""" + truncated_normal(shape; seed=0, seed2=0) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function truncated_normal(shape_; name=nothing, seed=nothing, seed2=nothing, dtype=nothing) + local desc + tf.with_op_name(name, "TruncatedNormal") do + desc = tf.NodeDescription("TruncatedNormal") + shape_ = convert(Tensor{Any}, shape_) + (shape_,) = tf.tf_promote(shape_) + tf.add_input(desc, shape_) + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + tf.Tensor(tf.Operation(desc)) + end + function truncated_normal(shape_::tf.TensorHandle; name=nothing, seed=nothing, seed2=nothing, dtype=nothing) + desc = tf.EagerOp("TruncatedNormal") + tf.add_input(desc, shape_) + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + desc["T"] = tf.data_type(shape_) + (tf.execute(desc))[1] + end +end + + +""" + tensor_forest_tree_predict(tree_handle, dense_features) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tensor_forest_tree_predict(tree_handle_, dense_features_; name=nothing, logits_dimension=nothing) + local desc + tf.with_op_name(name, "TensorForestTreePredict") do + desc = tf.NodeDescription("TensorForestTreePredict") + tree_handle_ = convert(Tensor{Any}, tree_handle_) + dense_features_ = convert(Tensor{Float32}, dense_features_) + tf.add_input(desc, tree_handle_) + tf.add_input(desc, dense_features_) + if logits_dimension !== nothing + desc["logits_dimension"] = Base.Int(logits_dimension) + end + end + tf.Tensor(tf.Operation(desc)) + end + function tensor_forest_tree_predict(tree_handle_::tf.TensorHandle, dense_features_::tf.TensorHandle; name=nothing, logits_dimension=nothing) + desc = tf.EagerOp("TensorForestTreePredict") + tf.add_input(desc, tree_handle_) + tf.add_input(desc, dense_features_) + if logits_dimension !== nothing + desc["logits_dimension"] = Base.Int(logits_dimension) + end + (tf.execute(desc))[1] + end +end + + +""" + stack_v2(max_size; stack_name=) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function stack_v2(max_size_; name=nothing, elem_type=nothing, stack_name=nothing) + local desc + tf.with_op_name(name, "StackV2") do + desc = tf.NodeDescription("StackV2") + max_size_ = convert(Tensor{Int32}, max_size_) + tf.add_input(desc, max_size_) + if elem_type !== nothing + desc["elem_type"] = Base.identity(elem_type) + end + if stack_name !== nothing + desc["stack_name"] = Base.String(stack_name) + end + end + tf.Tensor(tf.Operation(desc)) + end + function stack_v2(max_size_::tf.TensorHandle; name=nothing, elem_type=nothing, stack_name=nothing) + desc = tf.EagerOp("StackV2") + tf.add_input(desc, max_size_) + if elem_type !== nothing + desc["elem_type"] = Base.identity(elem_type) + end + if stack_name !== nothing + desc["stack_name"] = Base.String(stack_name) + end + (tf.execute(desc))[1] + end +end + + +""" + accumulator_num_accumulated(handle) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function accumulator_num_accumulated(handle_; name=nothing) + local desc + tf.with_op_name(name, "AccumulatorNumAccumulated") do + desc = tf.NodeDescription("AccumulatorNumAccumulated") + handle_ = convert(Tensor{String}, handle_) + tf.add_input(desc, handle_) + end + tf.Tensor(tf.Operation(desc)) + end + function accumulator_num_accumulated(handle_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("AccumulatorNumAccumulated") + tf.add_input(desc, handle_) + (tf.execute(desc))[1] + end +end + + +""" + reader_reset_v2(reader_handle) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function reader_reset_v2(reader_handle_; name=nothing) + local desc + tf.with_op_name(name, "ReaderResetV2") do + desc = tf.NodeDescription("ReaderResetV2") + reader_handle_ = convert(Tensor{Any}, reader_handle_) + tf.add_input(desc, reader_handle_) + end + tf.Tensor(tf.Operation(desc)) + end + function reader_reset_v2(reader_handle_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("ReaderResetV2") + tf.add_input(desc, reader_handle_) + (tf.execute(desc))[1] + end +end + + +""" + apply_add_sign(var, m, lr, alpha, sign_decay, beta, grad; use_locking=false) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function apply_add_sign(var_, m_, lr_, alpha_, sign_decay_, beta_, grad_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "ApplyAddSign") do + desc = tf.NodeDescription("ApplyAddSign") + var_ = convert(Tensor{Any}, var_) + m_ = convert(Tensor{Any}, m_) + lr_ = convert(Tensor{Any}, lr_) + alpha_ = convert(Tensor{Any}, alpha_) + sign_decay_ = convert(Tensor{Any}, sign_decay_) + beta_ = convert(Tensor{Any}, beta_) + grad_ = convert(Tensor{Any}, grad_) + (var_, m_, lr_, alpha_, sign_decay_, beta_, grad_) = tf.tf_promote(var_, m_, lr_, alpha_, sign_decay_, beta_, grad_) + tf.add_input(desc, var_) + tf.add_input(desc, m_) + tf.add_input(desc, lr_) + tf.add_input(desc, alpha_) + tf.add_input(desc, sign_decay_) + tf.add_input(desc, beta_) + tf.add_input(desc, grad_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + tf.Tensor(tf.Operation(desc)) + end + function apply_add_sign(var_::tf.TensorHandle, m_::tf.TensorHandle, lr_::tf.TensorHandle, alpha_::tf.TensorHandle, sign_decay_::tf.TensorHandle, beta_::tf.TensorHandle, grad_::tf.TensorHandle; name=nothing, use_locking=nothing) + desc = tf.EagerOp("ApplyAddSign") + tf.add_input(desc, var_) + tf.add_input(desc, m_) + tf.add_input(desc, lr_) + tf.add_input(desc, alpha_) + tf.add_input(desc, sign_decay_) + tf.add_input(desc, beta_) + tf.add_input(desc, grad_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + desc["T"] = tf.data_type(var_) + desc["T"] = tf.data_type(m_) + desc["T"] = tf.data_type(lr_) + desc["T"] = tf.data_type(alpha_) + desc["T"] = tf.data_type(sign_decay_) + desc["T"] = tf.data_type(beta_) + desc["T"] = tf.data_type(grad_) + (tf.execute(desc))[1] + end +end + + +""" + retrieve_tpu_embedding_rms_prop_parameters_grad_accum_debug(; table_id=-1, table_name=) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function retrieve_tpu_embedding_rms_prop_parameters_grad_accum_debug(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + local desc + tf.with_op_name(name, "RetrieveTPUEmbeddingRMSPropParametersGradAccumDebug") do + desc = tf.NodeDescription("RetrieveTPUEmbeddingRMSPropParametersGradAccumDebug") + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:4 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function retrieve_tpu_embedding_rms_prop_parameters_grad_accum_debug(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + desc = tf.EagerOp("RetrieveTPUEmbeddingRMSPropParametersGradAccumDebug") + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + tf.execute(desc) + end +end + + +""" + rint(x) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function rint(x_; name=nothing) + local desc + tf.with_op_name(name, "Rint") do + desc = tf.NodeDescription("Rint") + x_ = convert(Tensor{Any}, x_) + (x_,) = tf.tf_promote(x_) + tf.add_input(desc, x_) + end + tf.Tensor(tf.Operation(desc)) + end + function rint(x_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("Rint") + tf.add_input(desc, x_) + desc["T"] = tf.data_type(x_) + (tf.execute(desc))[1] + end +end + + +""" + retrieve_tpu_embedding_adadelta_parameters_grad_accum_debug(; table_id=-1, table_name=) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function retrieve_tpu_embedding_adadelta_parameters_grad_accum_debug(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + local desc + tf.with_op_name(name, "RetrieveTPUEmbeddingAdadeltaParametersGradAccumDebug") do + desc = tf.NodeDescription("RetrieveTPUEmbeddingAdadeltaParametersGradAccumDebug") + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:4 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function retrieve_tpu_embedding_adadelta_parameters_grad_accum_debug(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + desc = tf.EagerOp("RetrieveTPUEmbeddingAdadeltaParametersGradAccumDebug") + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + tf.execute(desc) + end +end + + +""" + extract_glimpse(input, size, offsets; centered=true, normalized=true, uniform_noise=true, noise=uniform) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function extract_glimpse(input_, size_, offsets_; name=nothing, centered=nothing, normalized=nothing, uniform_noise=nothing, noise=nothing) + local desc + tf.with_op_name(name, "ExtractGlimpse") do + desc = tf.NodeDescription("ExtractGlimpse") + input_ = convert(Tensor{Float32}, input_) + size_ = convert(Tensor{Int32}, size_) + offsets_ = convert(Tensor{Float32}, offsets_) + tf.add_input(desc, input_) + tf.add_input(desc, size_) + tf.add_input(desc, offsets_) + if centered !== nothing + desc["centered"] = Base.Bool(centered) + end + if normalized !== nothing + desc["normalized"] = Base.Bool(normalized) + end + if uniform_noise !== nothing + desc["uniform_noise"] = Base.Bool(uniform_noise) + end + if noise !== nothing + desc["noise"] = Base.String(noise) + end + end + tf.Tensor(tf.Operation(desc)) + end + function extract_glimpse(input_::tf.TensorHandle, size_::tf.TensorHandle, offsets_::tf.TensorHandle; name=nothing, centered=nothing, normalized=nothing, uniform_noise=nothing, noise=nothing) + desc = tf.EagerOp("ExtractGlimpse") + tf.add_input(desc, input_) + tf.add_input(desc, size_) + tf.add_input(desc, offsets_) + if centered !== nothing + desc["centered"] = Base.Bool(centered) + end + if normalized !== nothing + desc["normalized"] = Base.Bool(normalized) + end + if uniform_noise !== nothing + desc["uniform_noise"] = Base.Bool(uniform_noise) + end + if noise !== nothing + desc["noise"] = Base.String(noise) + end + (tf.execute(desc))[1] + end +end + + +""" + string_to_hash_bucket_strong(input) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function string_to_hash_bucket_strong(input_; name=nothing, num_buckets=nothing, key=nothing) + local desc + tf.with_op_name(name, "StringToHashBucketStrong") do + desc = tf.NodeDescription("StringToHashBucketStrong") + input_ = convert(Tensor{String}, input_) + tf.add_input(desc, input_) + if num_buckets !== nothing + desc["num_buckets"] = Base.Int(num_buckets) + end + if key !== nothing + desc["key"] = map(Base.identity, key) + end + end + tf.Tensor(tf.Operation(desc)) + end + function string_to_hash_bucket_strong(input_::tf.TensorHandle; name=nothing, num_buckets=nothing, key=nothing) + desc = tf.EagerOp("StringToHashBucketStrong") + tf.add_input(desc, input_) + if num_buckets !== nothing + desc["num_buckets"] = Base.Int(num_buckets) + end + if key !== nothing + desc["key"] = map(Base.identity, key) + end + (tf.execute(desc))[1] + end +end + + +""" + one_shot_iterator(; container=, shared_name=) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function one_shot_iterator(; name=nothing, dataset_factory=nothing, output_types=nothing, output_shapes=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "OneShotIterator") do + desc = tf.NodeDescription("OneShotIterator") + if dataset_factory !== nothing + desc["dataset_factory"] = Base.identity(dataset_factory) + end + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + tf.Tensor(tf.Operation(desc)) + end + function one_shot_iterator(; name=nothing, dataset_factory=nothing, output_types=nothing, output_shapes=nothing, container=nothing, shared_name=nothing) + desc = tf.EagerOp("OneShotIterator") + if dataset_factory !== nothing + desc["dataset_factory"] = Base.identity(dataset_factory) + end + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + (tf.execute(desc))[1] + end +end + + +""" + resource_sparse_apply_momentum(var, accum, lr, grad, indices, momentum; use_locking=false, use_nesterov=false) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function resource_sparse_apply_momentum(var_, accum_, lr_, grad_, indices_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) + local desc + tf.with_op_name(name, "ResourceSparseApplyMomentum") do + desc = tf.NodeDescription("ResourceSparseApplyMomentum") + var_ = convert(Tensor{Any}, var_) + accum_ = convert(Tensor{Any}, accum_) + lr_ = convert(Tensor{Any}, lr_) + grad_ = convert(Tensor{Any}, grad_) + indices_ = convert(Tensor{Any}, indices_) + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + momentum_ = convert(Tensor{Any}, momentum_) + (lr_, grad_, momentum_) = tf.tf_promote(lr_, grad_, momentum_) + (indices_,) = tf.tf_promote(indices_) + tf.add_input(desc, var_) + tf.add_input(desc, accum_) + tf.add_input(desc, lr_) + tf.add_input(desc, grad_) + tf.add_input(desc, indices_) + tf.add_input(desc, momentum_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + if use_nesterov !== nothing + desc["use_nesterov"] = Base.Bool(use_nesterov) + end + end + tf.Tensor(tf.Operation(desc)) + end + function resource_sparse_apply_momentum(var_::tf.TensorHandle, accum_::tf.TensorHandle, lr_::tf.TensorHandle, grad_::tf.TensorHandle, indices_::tf.TensorHandle, momentum_::tf.TensorHandle; name=nothing, use_locking=nothing, use_nesterov=nothing) + desc = tf.EagerOp("ResourceSparseApplyMomentum") + tf.add_input(desc, var_) + tf.add_input(desc, accum_) + tf.add_input(desc, lr_) + tf.add_input(desc, grad_) + tf.add_input(desc, indices_) + tf.add_input(desc, momentum_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + if use_nesterov !== nothing + desc["use_nesterov"] = Base.Bool(use_nesterov) + end + desc["T"] = tf.data_type(lr_) + desc["T"] = tf.data_type(grad_) + desc["Tindices"] = tf.data_type(indices_) + desc["T"] = tf.data_type(momentum_) + (tf.execute(desc))[1] + end +end + + +""" + save_slices(filename, tensor_names, shapes_and_slices, data) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function save_slices(filename_, tensor_names_, shapes_and_slices_, data_; name=nothing, T=nothing) + local desc + tf.with_op_name(name, "SaveSlices") do + desc = tf.NodeDescription("SaveSlices") + filename_ = convert(Tensor{String}, filename_) + tensor_names_ = convert(Tensor{String}, tensor_names_) + shapes_and_slices_ = convert(Tensor{String}, shapes_and_slices_) + data_ = [convert(Tensor{Any}, x) for x = data_] + tf.add_input(desc, filename_) + tf.add_input(desc, tensor_names_) + tf.add_input(desc, shapes_and_slices_) + tf.add_input(desc, data_) + if T !== nothing + desc["T"] = map(Base.identity, T) + end + end + tf.Tensor(tf.Operation(desc)) + end + function save_slices(filename_::tf.TensorHandle, tensor_names_::tf.TensorHandle, shapes_and_slices_::tf.TensorHandle, data_::tf.TensorHandle; name=nothing, T=nothing) + desc = tf.EagerOp("SaveSlices") + tf.add_input(desc, filename_) + tf.add_input(desc, tensor_names_) + tf.add_input(desc, shapes_and_slices_) + tf.add_input(desc, data_) + if T !== nothing + desc["T"] = map(Base.identity, T) + end + (tf.execute(desc))[1] + end +end + + +""" + experimental_dataset_cardinality(input_dataset) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function experimental_dataset_cardinality(input_dataset_; name=nothing) + local desc + tf.with_op_name(name, "ExperimentalDatasetCardinality") do + desc = tf.NodeDescription("ExperimentalDatasetCardinality") + input_dataset_ = convert(Tensor{Any}, input_dataset_) + tf.add_input(desc, input_dataset_) + end + tf.Tensor(tf.Operation(desc)) + end + function experimental_dataset_cardinality(input_dataset_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("ExperimentalDatasetCardinality") + tf.add_input(desc, input_dataset_) + (tf.execute(desc))[1] + end +end + + +""" + is_finite(x) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function is_finite(x_; name=nothing) + local desc + tf.with_op_name(name, "IsFinite") do + desc = tf.NodeDescription("IsFinite") + x_ = convert(Tensor{Any}, x_) + (x_,) = tf.tf_promote(x_) + tf.add_input(desc, x_) + end + tf.Tensor(tf.Operation(desc)) + end + function is_finite(x_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("IsFinite") + tf.add_input(desc, x_) + desc["T"] = tf.data_type(x_) + (tf.execute(desc))[1] + end +end + + +""" + experimental_numa_map_and_batch_dataset(input_dataset, other_arguments, batch_size, num_parallel_calls, drop_remainder; preserve_cardinality=false) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function experimental_numa_map_and_batch_dataset(input_dataset_, other_arguments_, batch_size_, num_parallel_calls_, drop_remainder_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, preserve_cardinality=nothing) + local desc + tf.with_op_name(name, "ExperimentalNumaMapAndBatchDataset") do + desc = tf.NodeDescription("ExperimentalNumaMapAndBatchDataset") + input_dataset_ = convert(Tensor{Any}, input_dataset_) + other_arguments_ = [convert(Tensor{Any}, x) for x = other_arguments_] + batch_size_ = convert(Tensor{Int64}, batch_size_) + num_parallel_calls_ = convert(Tensor{Int64}, num_parallel_calls_) + drop_remainder_ = convert(Tensor{Bool}, drop_remainder_) + tf.add_input(desc, input_dataset_) + tf.add_input(desc, other_arguments_) + tf.add_input(desc, batch_size_) + tf.add_input(desc, num_parallel_calls_) + tf.add_input(desc, drop_remainder_) + if f !== nothing + desc["f"] = Base.identity(f) + end + if Targuments !== nothing + desc["Targuments"] = map(Base.identity, Targuments) + end + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + if preserve_cardinality !== nothing + desc["preserve_cardinality"] = Base.Bool(preserve_cardinality) + end + end + tf.Tensor(tf.Operation(desc)) + end + function experimental_numa_map_and_batch_dataset(input_dataset_::tf.TensorHandle, other_arguments_::tf.TensorHandle, batch_size_::tf.TensorHandle, num_parallel_calls_::tf.TensorHandle, drop_remainder_::tf.TensorHandle; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, preserve_cardinality=nothing) + desc = tf.EagerOp("ExperimentalNumaMapAndBatchDataset") + tf.add_input(desc, input_dataset_) + tf.add_input(desc, other_arguments_) + tf.add_input(desc, batch_size_) + tf.add_input(desc, num_parallel_calls_) + tf.add_input(desc, drop_remainder_) + if f !== nothing + desc["f"] = Base.identity(f) + end + if Targuments !== nothing + desc["Targuments"] = map(Base.identity, Targuments) + end + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + if preserve_cardinality !== nothing + desc["preserve_cardinality"] = Base.Bool(preserve_cardinality) + end + (tf.execute(desc))[1] + end +end + + +""" + all_to_all(input, group_assignment) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function all_to_all(input_, group_assignment_; name=nothing, concat_dimension=nothing, split_dimension=nothing, split_count=nothing) + local desc + tf.with_op_name(name, "AllToAll") do + desc = tf.NodeDescription("AllToAll") + input_ = convert(Tensor{Any}, input_) + group_assignment_ = convert(Tensor{Int32}, group_assignment_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + tf.add_input(desc, group_assignment_) + if concat_dimension !== nothing + desc["concat_dimension"] = Base.Int(concat_dimension) + end + if split_dimension !== nothing + desc["split_dimension"] = Base.Int(split_dimension) + end + if split_count !== nothing + desc["split_count"] = Base.Int(split_count) + end + end + tf.Tensor(tf.Operation(desc)) + end + function all_to_all(input_::tf.TensorHandle, group_assignment_::tf.TensorHandle; name=nothing, concat_dimension=nothing, split_dimension=nothing, split_count=nothing) + desc = tf.EagerOp("AllToAll") + tf.add_input(desc, input_) + tf.add_input(desc, group_assignment_) + if concat_dimension !== nothing + desc["concat_dimension"] = Base.Int(concat_dimension) + end + if split_dimension !== nothing + desc["split_dimension"] = Base.Int(split_dimension) + end + if split_count !== nothing + desc["split_count"] = Base.Int(split_count) + end + desc["T"] = tf.data_type(input_) + (tf.execute(desc))[1] + end +end + + +""" + take_many_sparse_from_tensors_map(sparse_handles; container=, shared_name=) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function take_many_sparse_from_tensors_map(sparse_handles_; name=nothing, dtype=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "TakeManySparseFromTensorsMap") do + desc = tf.NodeDescription("TakeManySparseFromTensorsMap") + sparse_handles_ = convert(Tensor{Int64}, sparse_handles_) + tf.add_input(desc, sparse_handles_) + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function take_many_sparse_from_tensors_map(sparse_handles_::tf.TensorHandle; name=nothing, dtype=nothing, container=nothing, shared_name=nothing) + desc = tf.EagerOp("TakeManySparseFromTensorsMap") + tf.add_input(desc, sparse_handles_) + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + tf.execute(desc) + end +end + + +""" + batch_matrix_diag_part(input) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function batch_matrix_diag_part(input_; name=nothing) + local desc + tf.with_op_name(name, "BatchMatrixDiagPart") do + desc = tf.NodeDescription("BatchMatrixDiagPart") + input_ = convert(Tensor{Any}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + end + tf.Tensor(tf.Operation(desc)) + end + function batch_matrix_diag_part(input_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("BatchMatrixDiagPart") + tf.add_input(desc, input_) + desc["T"] = tf.data_type(input_) + (tf.execute(desc))[1] + end +end + + +""" + fixed_length_record_dataset(filenames, header_bytes, record_bytes, footer_bytes, buffer_size) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function fixed_length_record_dataset(filenames_, header_bytes_, record_bytes_, footer_bytes_, buffer_size_; name=nothing) + local desc + tf.with_op_name(name, "FixedLengthRecordDataset") do + desc = tf.NodeDescription("FixedLengthRecordDataset") + filenames_ = convert(Tensor{String}, filenames_) + header_bytes_ = convert(Tensor{Int64}, header_bytes_) + record_bytes_ = convert(Tensor{Int64}, record_bytes_) + footer_bytes_ = convert(Tensor{Int64}, footer_bytes_) + buffer_size_ = convert(Tensor{Int64}, buffer_size_) + tf.add_input(desc, filenames_) + tf.add_input(desc, header_bytes_) + tf.add_input(desc, record_bytes_) + tf.add_input(desc, footer_bytes_) + tf.add_input(desc, buffer_size_) + end + tf.Tensor(tf.Operation(desc)) + end + function fixed_length_record_dataset(filenames_::tf.TensorHandle, header_bytes_::tf.TensorHandle, record_bytes_::tf.TensorHandle, footer_bytes_::tf.TensorHandle, buffer_size_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("FixedLengthRecordDataset") + tf.add_input(desc, filenames_) + tf.add_input(desc, header_bytes_) + tf.add_input(desc, record_bytes_) + tf.add_input(desc, footer_bytes_) + tf.add_input(desc, buffer_size_) + (tf.execute(desc))[1] + end +end + + +""" + stack_push(handle, elem; swap_memory=false) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function stack_push(handle_, elem_; name=nothing, swap_memory=nothing) + local desc + tf.with_op_name(name, "StackPush") do + desc = tf.NodeDescription("StackPush") + handle_ = convert(Tensor{String}, handle_) + elem_ = convert(Tensor{Any}, elem_) + (elem_,) = tf.tf_promote(elem_) + tf.add_input(desc, handle_) + tf.add_input(desc, elem_) + if swap_memory !== nothing + desc["swap_memory"] = Base.Bool(swap_memory) + end + end + tf.Tensor(tf.Operation(desc)) + end + function stack_push(handle_::tf.TensorHandle, elem_::tf.TensorHandle; name=nothing, swap_memory=nothing) + desc = tf.EagerOp("StackPush") + tf.add_input(desc, handle_) + tf.add_input(desc, elem_) + if swap_memory !== nothing + desc["swap_memory"] = Base.Bool(swap_memory) + end + desc["T"] = tf.data_type(elem_) + (tf.execute(desc))[1] + end +end + + +""" + placeholder_v2() + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function placeholder_v2(; name=nothing, dtype=nothing, shape=nothing) + local desc + tf.with_op_name(name, "PlaceholderV2") do + desc = tf.NodeDescription("PlaceholderV2") + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + if shape !== nothing + desc["shape"] = Base.identity(shape) + end + end + tf.Tensor(tf.Operation(desc)) + end + function placeholder_v2(; name=nothing, dtype=nothing, shape=nothing) + desc = tf.EagerOp("PlaceholderV2") + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + if shape !== nothing + desc["shape"] = Base.identity(shape) + end + (tf.execute(desc))[1] + end +end + + +""" + multi_device_iterator_init(dataset, multi_device_iterator, max_buffer_size) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function multi_device_iterator_init(dataset_, multi_device_iterator_, max_buffer_size_; name=nothing) + local desc + tf.with_op_name(name, "MultiDeviceIteratorInit") do + desc = tf.NodeDescription("MultiDeviceIteratorInit") + dataset_ = convert(Tensor{Any}, dataset_) + multi_device_iterator_ = convert(Tensor{Any}, multi_device_iterator_) + max_buffer_size_ = convert(Tensor{Int64}, max_buffer_size_) + tf.add_input(desc, dataset_) + tf.add_input(desc, multi_device_iterator_) + tf.add_input(desc, max_buffer_size_) + end + tf.Tensor(tf.Operation(desc)) + end + function multi_device_iterator_init(dataset_::tf.TensorHandle, multi_device_iterator_::tf.TensorHandle, max_buffer_size_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("MultiDeviceIteratorInit") + tf.add_input(desc, dataset_) + tf.add_input(desc, multi_device_iterator_) + tf.add_input(desc, max_buffer_size_) + (tf.execute(desc))[1] + end +end + + +""" + gcs_configure_block_cache(max_cache_size, block_size, max_staleness) + +Re-configures the GCS block cache with the new configuration values. +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function gcs_configure_block_cache(max_cache_size_, block_size_, max_staleness_; name=nothing) + local desc + tf.with_op_name(name, "GcsConfigureBlockCache") do + desc = tf.NodeDescription("GcsConfigureBlockCache") + max_cache_size_ = convert(Tensor{Any}, max_cache_size_) + block_size_ = convert(Tensor{Any}, block_size_) + max_staleness_ = convert(Tensor{Any}, max_staleness_) + tf.add_input(desc, max_cache_size_) + tf.add_input(desc, block_size_) + tf.add_input(desc, max_staleness_) + end + tf.Tensor(tf.Operation(desc)) + end + function gcs_configure_block_cache(max_cache_size_::tf.TensorHandle, block_size_::tf.TensorHandle, max_staleness_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("GcsConfigureBlockCache") + tf.add_input(desc, max_cache_size_) + tf.add_input(desc, block_size_) + tf.add_input(desc, max_staleness_) + (tf.execute(desc))[1] + end +end + + +""" + queue_dequeue_v2(handle; timeout_ms=-1) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function queue_dequeue_v2(handle_; name=nothing, component_types=nothing, timeout_ms=nothing) + local desc + tf.with_op_name(name, "QueueDequeueV2") do + desc = tf.NodeDescription("QueueDequeueV2") + handle_ = convert(Tensor{Any}, handle_) + tf.add_input(desc, handle_) + if component_types !== nothing + desc["component_types"] = map(Base.identity, component_types) + end + if timeout_ms !== nothing + desc["timeout_ms"] = Base.Int(timeout_ms) + end + end + tf.Tensor(tf.Operation(desc)) + end + function queue_dequeue_v2(handle_::tf.TensorHandle; name=nothing, component_types=nothing, timeout_ms=nothing) + desc = tf.EagerOp("QueueDequeueV2") + tf.add_input(desc, handle_) + if component_types !== nothing + desc["component_types"] = map(Base.identity, component_types) + end + if timeout_ms !== nothing + desc["timeout_ms"] = Base.Int(timeout_ms) + end + (tf.execute(desc))[1] + end +end + + +""" + retrieve_tpu_embedding_rms_prop_parameters(; table_id=-1, table_name=) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function retrieve_tpu_embedding_rms_prop_parameters(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + local desc + tf.with_op_name(name, "RetrieveTPUEmbeddingRMSPropParameters") do + desc = tf.NodeDescription("RetrieveTPUEmbeddingRMSPropParameters") + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function retrieve_tpu_embedding_rms_prop_parameters(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + desc = tf.EagerOp("RetrieveTPUEmbeddingRMSPropParameters") + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + tf.execute(desc) + end +end + + +""" + transpose(x, perm) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function transpose(x_, perm_; name=nothing) + local desc + tf.with_op_name(name, "Transpose") do + desc = tf.NodeDescription("Transpose") + x_ = convert(Tensor{Any}, x_) + perm_ = convert(Tensor{Int32}, perm_) + (perm_,) = tf.tf_promote(perm_) + (x_,) = tf.tf_promote(x_) + tf.add_input(desc, x_) + tf.add_input(desc, perm_) + end + tf.Tensor(tf.Operation(desc)) + end + function transpose(x_::tf.TensorHandle, perm_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("Transpose") + tf.add_input(desc, x_) + tf.add_input(desc, perm_) + desc["T"] = tf.data_type(x_) + desc["Tperm"] = tf.data_type(perm_) + (tf.execute(desc))[1] + end +end + + +""" + ifft(input) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function ifft(input_; name=nothing) + local desc + tf.with_op_name(name, "IFFT") do + desc = tf.NodeDescription("IFFT") + input_ = convert(Tensor{Complex{Float32}}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + end + tf.Tensor(tf.Operation(desc)) + end + function ifft(input_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("IFFT") + tf.add_input(desc, input_) + desc["Tcomplex"] = tf.data_type(input_) + (tf.execute(desc))[1] + end +end + + +""" + sparse_segment_sum_with_num_segments(data, indices, segment_ids, num_segments) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function sparse_segment_sum_with_num_segments(data_, indices_, segment_ids_, num_segments_; name=nothing) + local desc + tf.with_op_name(name, "SparseSegmentSumWithNumSegments") do + desc = tf.NodeDescription("SparseSegmentSumWithNumSegments") + data_ = convert(Tensor{Any}, data_) + indices_ = convert(Tensor{Int32}, indices_) + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + segment_ids_ = convert(Tensor{Int32}, segment_ids_) + num_segments_ = convert(Tensor{Int32}, num_segments_) + (num_segments_,) = tf.tf_promote(num_segments_) + (data_,) = tf.tf_promote(data_) + (indices_,) = tf.tf_promote(indices_) + tf.add_input(desc, data_) + tf.add_input(desc, indices_) + tf.add_input(desc, segment_ids_) + tf.add_input(desc, num_segments_) + end + tf.Tensor(tf.Operation(desc)) + end + function sparse_segment_sum_with_num_segments(data_::tf.TensorHandle, indices_::tf.TensorHandle, segment_ids_::tf.TensorHandle, num_segments_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("SparseSegmentSumWithNumSegments") + tf.add_input(desc, data_) + tf.add_input(desc, indices_) + tf.add_input(desc, segment_ids_) + tf.add_input(desc, num_segments_) + desc["T"] = tf.data_type(data_) + desc["Tidx"] = tf.data_type(indices_) + desc["Tnumsegments"] = tf.data_type(num_segments_) + (tf.execute(desc))[1] + end +end + + +""" + queue_is_closed_v2(handle) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function queue_is_closed_v2(handle_; name=nothing) + local desc + tf.with_op_name(name, "QueueIsClosedV2") do + desc = tf.NodeDescription("QueueIsClosedV2") + handle_ = convert(Tensor{Any}, handle_) + tf.add_input(desc, handle_) + end + tf.Tensor(tf.Operation(desc)) + end + function queue_is_closed_v2(handle_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("QueueIsClosedV2") + tf.add_input(desc, handle_) + (tf.execute(desc))[1] + end +end + + +""" + parameterized_truncated_normal(shape, means, stdevs, minvals, maxvals; seed=0, seed2=0) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function parameterized_truncated_normal(shape_, means_, stdevs_, minvals_, maxvals_; name=nothing, seed=nothing, seed2=nothing, dtype=nothing) + local desc + tf.with_op_name(name, "ParameterizedTruncatedNormal") do + desc = tf.NodeDescription("ParameterizedTruncatedNormal") + shape_ = convert(Tensor{Any}, shape_) + means_ = convert(Tensor{Any}, means_) + stdevs_ = convert(Tensor{Any}, stdevs_) + minvals_ = convert(Tensor{Any}, minvals_) + maxvals_ = convert(Tensor{Any}, maxvals_) + (means_, stdevs_, minvals_, maxvals_) = tf.tf_promote(means_, stdevs_, minvals_, maxvals_) + (shape_,) = tf.tf_promote(shape_) + tf.add_input(desc, shape_) + tf.add_input(desc, means_) + tf.add_input(desc, stdevs_) + tf.add_input(desc, minvals_) + tf.add_input(desc, maxvals_) + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + tf.Tensor(tf.Operation(desc)) + end + function parameterized_truncated_normal(shape_::tf.TensorHandle, means_::tf.TensorHandle, stdevs_::tf.TensorHandle, minvals_::tf.TensorHandle, maxvals_::tf.TensorHandle; name=nothing, seed=nothing, seed2=nothing, dtype=nothing) + desc = tf.EagerOp("ParameterizedTruncatedNormal") + tf.add_input(desc, shape_) + tf.add_input(desc, means_) + tf.add_input(desc, stdevs_) + tf.add_input(desc, minvals_) + tf.add_input(desc, maxvals_) + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + desc["T"] = tf.data_type(shape_) + desc["dtype"] = tf.data_type(means_) + desc["dtype"] = tf.data_type(stdevs_) + desc["dtype"] = tf.data_type(minvals_) + desc["dtype"] = tf.data_type(maxvals_) + (tf.execute(desc))[1] + end +end + + +""" + diag_part(input) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function diag_part(input_; name=nothing) + local desc + tf.with_op_name(name, "DiagPart") do + desc = tf.NodeDescription("DiagPart") + input_ = convert(Tensor{Any}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + end + tf.Tensor(tf.Operation(desc)) + end + function diag_part(input_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("DiagPart") + tf.add_input(desc, input_) + desc["T"] = tf.data_type(input_) + (tf.execute(desc))[1] + end +end + + +""" + kmeans_plus_plus_initialization(points, num_to_sample, seed, num_retries_per_sample) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function kmeans_plus_plus_initialization(points_, num_to_sample_, seed_, num_retries_per_sample_; name=nothing) + local desc + tf.with_op_name(name, "KmeansPlusPlusInitialization") do + desc = tf.NodeDescription("KmeansPlusPlusInitialization") + points_ = convert(Tensor{Float32}, points_) + num_to_sample_ = convert(Tensor{Int64}, num_to_sample_) + seed_ = convert(Tensor{Int64}, seed_) + num_retries_per_sample_ = convert(Tensor{Int64}, num_retries_per_sample_) + tf.add_input(desc, points_) + tf.add_input(desc, num_to_sample_) + tf.add_input(desc, seed_) + tf.add_input(desc, num_retries_per_sample_) + end + tf.Tensor(tf.Operation(desc)) + end + function kmeans_plus_plus_initialization(points_::tf.TensorHandle, num_to_sample_::tf.TensorHandle, seed_::tf.TensorHandle, num_retries_per_sample_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("KmeansPlusPlusInitialization") + tf.add_input(desc, points_) + tf.add_input(desc, num_to_sample_) + tf.add_input(desc, seed_) + tf.add_input(desc, num_retries_per_sample_) + (tf.execute(desc))[1] + end +end + + +""" + regex_replace(input, pattern, rewrite; replace_global=true) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function regex_replace(input_, pattern_, rewrite_; name=nothing, replace_global=nothing) + local desc + tf.with_op_name(name, "RegexReplace") do + desc = tf.NodeDescription("RegexReplace") + input_ = convert(Tensor{String}, input_) + pattern_ = convert(Tensor{String}, pattern_) + rewrite_ = convert(Tensor{String}, rewrite_) + tf.add_input(desc, input_) + tf.add_input(desc, pattern_) + tf.add_input(desc, rewrite_) + if replace_global !== nothing + desc["replace_global"] = Base.Bool(replace_global) + end + end + tf.Tensor(tf.Operation(desc)) + end + function regex_replace(input_::tf.TensorHandle, pattern_::tf.TensorHandle, rewrite_::tf.TensorHandle; name=nothing, replace_global=nothing) + desc = tf.EagerOp("RegexReplace") + tf.add_input(desc, input_) + tf.add_input(desc, pattern_) + tf.add_input(desc, rewrite_) + if replace_global !== nothing + desc["replace_global"] = Base.Bool(replace_global) + end + (tf.execute(desc))[1] + end +end + + +""" + sparse_tensor_dense_mat_mul(a_indices, a_values, a_shape, b; adjoint_a=false, adjoint_b=false) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function sparse_tensor_dense_mat_mul(a_indices_, a_values_, a_shape_, b_; name=nothing, adjoint_a=nothing, adjoint_b=nothing) + local desc + tf.with_op_name(name, "SparseTensorDenseMatMul") do + desc = tf.NodeDescription("SparseTensorDenseMatMul") + a_indices_ = convert(Tensor{Int64}, a_indices_) + a_indices_ = a_indices_ - convert(tf.Tensor{eltype(a_indices_)}, 1) + a_values_ = convert(Tensor{Any}, a_values_) + a_shape_ = convert(Tensor{Int64}, a_shape_) + b_ = convert(Tensor{Any}, b_) + (a_values_, b_) = tf.tf_promote(a_values_, b_) + (a_indices_,) = tf.tf_promote(a_indices_) + tf.add_input(desc, a_indices_) + tf.add_input(desc, a_values_) + tf.add_input(desc, a_shape_) + tf.add_input(desc, b_) + if adjoint_a !== nothing + desc["adjoint_a"] = Base.Bool(adjoint_a) + end + if adjoint_b !== nothing + desc["adjoint_b"] = Base.Bool(adjoint_b) + end + end + tf.Tensor(tf.Operation(desc)) + end + function sparse_tensor_dense_mat_mul(a_indices_::tf.TensorHandle, a_values_::tf.TensorHandle, a_shape_::tf.TensorHandle, b_::tf.TensorHandle; name=nothing, adjoint_a=nothing, adjoint_b=nothing) + desc = tf.EagerOp("SparseTensorDenseMatMul") + tf.add_input(desc, a_indices_) + tf.add_input(desc, a_values_) + tf.add_input(desc, a_shape_) + tf.add_input(desc, b_) + if adjoint_a !== nothing + desc["adjoint_a"] = Base.Bool(adjoint_a) + end + if adjoint_b !== nothing + desc["adjoint_b"] = Base.Bool(adjoint_b) + end + desc["Tindices"] = tf.data_type(a_indices_) + desc["T"] = tf.data_type(a_values_) + desc["T"] = tf.data_type(b_) + (tf.execute(desc))[1] + end +end + + +""" + map_defun(arguments, captured_inputs; Tcaptured=Int64[]) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function map_defun(arguments_, captured_inputs_; name=nothing, Targuments=nothing, Tcaptured=nothing, output_types=nothing, output_shapes=nothing, f=nothing) + local desc + tf.with_op_name(name, "MapDefun") do + desc = tf.NodeDescription("MapDefun") + arguments_ = [convert(Tensor{Any}, x) for x = arguments_] + captured_inputs_ = [convert(Tensor{Any}, x) for x = captured_inputs_] + tf.add_input(desc, arguments_) + tf.add_input(desc, captured_inputs_) + if Targuments !== nothing + desc["Targuments"] = map(Base.identity, Targuments) + end + if Tcaptured !== nothing + desc["Tcaptured"] = map(Base.identity, Tcaptured) + end + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + if f !== nothing + desc["f"] = Base.identity(f) + end + end + tf.Tensor(tf.Operation(desc)) + end + function map_defun(arguments_::tf.TensorHandle, captured_inputs_::tf.TensorHandle; name=nothing, Targuments=nothing, Tcaptured=nothing, output_types=nothing, output_shapes=nothing, f=nothing) + desc = tf.EagerOp("MapDefun") + tf.add_input(desc, arguments_) + tf.add_input(desc, captured_inputs_) + if Targuments !== nothing + desc["Targuments"] = map(Base.identity, Targuments) + end + if Tcaptured !== nothing + desc["Tcaptured"] = map(Base.identity, Tcaptured) + end + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + if f !== nothing + desc["f"] = Base.identity(f) + end + (tf.execute(desc))[1] + end +end + + +""" + thread_unsafe_unigram_candidate_sampler(true_classes; seed=0, seed2=0) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function thread_unsafe_unigram_candidate_sampler(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing) + local desc + tf.with_op_name(name, "ThreadUnsafeUnigramCandidateSampler") do + desc = tf.NodeDescription("ThreadUnsafeUnigramCandidateSampler") + true_classes_ = convert(Tensor{Int64}, true_classes_) + tf.add_input(desc, true_classes_) + if num_true !== nothing + desc["num_true"] = Base.Int(num_true) + end + if num_sampled !== nothing + desc["num_sampled"] = Base.Int(num_sampled) + end + if unique !== nothing + desc["unique"] = Base.Bool(unique) + end + if range_max !== nothing + desc["range_max"] = Base.Int(range_max) + end + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function thread_unsafe_unigram_candidate_sampler(true_classes_::tf.TensorHandle; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing) + desc = tf.EagerOp("ThreadUnsafeUnigramCandidateSampler") + tf.add_input(desc, true_classes_) + if num_true !== nothing + desc["num_true"] = Base.Int(num_true) + end + if num_sampled !== nothing + desc["num_sampled"] = Base.Int(num_sampled) + end + if unique !== nothing + desc["unique"] = Base.Bool(unique) + end + if range_max !== nothing + desc["range_max"] = Base.Int(range_max) + end + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end + tf.execute(desc) + end +end + + +""" + retrieve_tpu_embedding_adam_parameters_grad_accum_debug(; table_id=-1, table_name=) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function retrieve_tpu_embedding_adam_parameters_grad_accum_debug(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + local desc + tf.with_op_name(name, "RetrieveTPUEmbeddingADAMParametersGradAccumDebug") do + desc = tf.NodeDescription("RetrieveTPUEmbeddingADAMParametersGradAccumDebug") + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:4 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function retrieve_tpu_embedding_adam_parameters_grad_accum_debug(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + desc = tf.EagerOp("RetrieveTPUEmbeddingADAMParametersGradAccumDebug") + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + tf.execute(desc) + end +end + + +""" + parallel_concat(values) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function parallel_concat(values_; name=nothing, N=nothing, shape=nothing) + local desc + tf.with_op_name(name, "ParallelConcat") do + desc = tf.NodeDescription("ParallelConcat") + values_ = [convert(Tensor{Any}, x) for x = values_] + (values_,) = tf.tf_promote(values_) + tf.add_input(desc, values_) + if N !== nothing + desc["N"] = Base.Int(N) + end + if shape !== nothing + desc["shape"] = Base.identity(shape) + end + end + tf.Tensor(tf.Operation(desc)) + end + function parallel_concat(values_::tf.TensorHandle; name=nothing, N=nothing, shape=nothing) + desc = tf.EagerOp("ParallelConcat") + tf.add_input(desc, values_) + if N !== nothing + desc["N"] = Base.Int(N) + end + if shape !== nothing + desc["shape"] = Base.identity(shape) + end + desc["T"] = tf.data_type(values_) + (tf.execute(desc))[1] + end +end + + +""" + lookup_table_find_v2(table_handle, keys, default_value) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function lookup_table_find_v2(table_handle_, keys_, default_value_; name=nothing) + local desc + tf.with_op_name(name, "LookupTableFindV2") do + desc = tf.NodeDescription("LookupTableFindV2") + table_handle_ = convert(Tensor{Any}, table_handle_) + keys_ = convert(Tensor{Any}, keys_) + default_value_ = convert(Tensor{Any}, default_value_) + (keys_,) = tf.tf_promote(keys_) + (default_value_,) = tf.tf_promote(default_value_) + tf.add_input(desc, table_handle_) + tf.add_input(desc, keys_) + tf.add_input(desc, default_value_) + end + tf.Tensor(tf.Operation(desc)) + end + function lookup_table_find_v2(table_handle_::tf.TensorHandle, keys_::tf.TensorHandle, default_value_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("LookupTableFindV2") + tf.add_input(desc, table_handle_) + tf.add_input(desc, keys_) + tf.add_input(desc, default_value_) + desc["Tin"] = tf.data_type(keys_) + desc["Tout"] = tf.data_type(default_value_) + (tf.execute(desc))[1] + end +end + + +""" + tensor_forest_tree_deserialize(tree_handle, tree_config) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tensor_forest_tree_deserialize(tree_handle_, tree_config_; name=nothing) + local desc + tf.with_op_name(name, "TensorForestTreeDeserialize") do + desc = tf.NodeDescription("TensorForestTreeDeserialize") + tree_handle_ = convert(Tensor{Any}, tree_handle_) + tree_config_ = convert(Tensor{String}, tree_config_) + tf.add_input(desc, tree_handle_) + tf.add_input(desc, tree_config_) + end + tf.Tensor(tf.Operation(desc)) + end + function tensor_forest_tree_deserialize(tree_handle_::tf.TensorHandle, tree_config_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("TensorForestTreeDeserialize") + tf.add_input(desc, tree_handle_) + tf.add_input(desc, tree_config_) + (tf.execute(desc))[1] + end +end + + +""" + retrieve_tpu_embedding_momentum_parameters(; table_id=-1, table_name=) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function retrieve_tpu_embedding_momentum_parameters(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + local desc + tf.with_op_name(name, "RetrieveTPUEmbeddingMomentumParameters") do + desc = tf.NodeDescription("RetrieveTPUEmbeddingMomentumParameters") + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function retrieve_tpu_embedding_momentum_parameters(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + desc = tf.EagerOp("RetrieveTPUEmbeddingMomentumParameters") + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + tf.execute(desc) + end +end + + +""" + fake_quant_with_min_max_args(inputs; min=?, max=?, num_bits=8, narrow_range=false) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function fake_quant_with_min_max_args(inputs_; name=nothing, min=nothing, max=nothing, num_bits=nothing, narrow_range=nothing) + local desc + tf.with_op_name(name, "FakeQuantWithMinMaxArgs") do + desc = tf.NodeDescription("FakeQuantWithMinMaxArgs") + inputs_ = convert(Tensor{Float32}, inputs_) + tf.add_input(desc, inputs_) + if min !== nothing + desc["min"] = Base.identity(min) + end + if max !== nothing + desc["max"] = Base.identity(max) + end + if num_bits !== nothing + desc["num_bits"] = Base.Int(num_bits) + end + if narrow_range !== nothing + desc["narrow_range"] = Base.Bool(narrow_range) + end + end + tf.Tensor(tf.Operation(desc)) + end + function fake_quant_with_min_max_args(inputs_::tf.TensorHandle; name=nothing, min=nothing, max=nothing, num_bits=nothing, narrow_range=nothing) + desc = tf.EagerOp("FakeQuantWithMinMaxArgs") + tf.add_input(desc, inputs_) + if min !== nothing + desc["min"] = Base.identity(min) + end + if max !== nothing + desc["max"] = Base.identity(max) + end + if num_bits !== nothing + desc["num_bits"] = Base.Int(num_bits) + end + if narrow_range !== nothing + desc["narrow_range"] = Base.Bool(narrow_range) + end + (tf.execute(desc))[1] + end +end + + +""" + resource_apply_gradient_descent(var, alpha, delta; use_locking=false) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function resource_apply_gradient_descent(var_, alpha_, delta_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "ResourceApplyGradientDescent") do + desc = tf.NodeDescription("ResourceApplyGradientDescent") + var_ = convert(Tensor{Any}, var_) + alpha_ = convert(Tensor{Any}, alpha_) + delta_ = convert(Tensor{Any}, delta_) + (alpha_, delta_) = tf.tf_promote(alpha_, delta_) + tf.add_input(desc, var_) + tf.add_input(desc, alpha_) + tf.add_input(desc, delta_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + tf.Tensor(tf.Operation(desc)) + end + function resource_apply_gradient_descent(var_::tf.TensorHandle, alpha_::tf.TensorHandle, delta_::tf.TensorHandle; name=nothing, use_locking=nothing) + desc = tf.EagerOp("ResourceApplyGradientDescent") + tf.add_input(desc, var_) + tf.add_input(desc, alpha_) + tf.add_input(desc, delta_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + desc["T"] = tf.data_type(alpha_) + desc["T"] = tf.data_type(delta_) + (tf.execute(desc))[1] + end +end + + +""" + experimental_sliding_window_dataset(input_dataset, window_size, window_shift, window_stride) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function experimental_sliding_window_dataset(input_dataset_, window_size_, window_shift_, window_stride_; name=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "ExperimentalSlidingWindowDataset") do + desc = tf.NodeDescription("ExperimentalSlidingWindowDataset") + input_dataset_ = convert(Tensor{Any}, input_dataset_) + window_size_ = convert(Tensor{Int64}, window_size_) + window_shift_ = convert(Tensor{Int64}, window_shift_) + window_stride_ = convert(Tensor{Int64}, window_stride_) + tf.add_input(desc, input_dataset_) + tf.add_input(desc, window_size_) + tf.add_input(desc, window_shift_) + tf.add_input(desc, window_stride_) + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + tf.Tensor(tf.Operation(desc)) + end + function experimental_sliding_window_dataset(input_dataset_::tf.TensorHandle, window_size_::tf.TensorHandle, window_shift_::tf.TensorHandle, window_stride_::tf.TensorHandle; name=nothing, output_types=nothing, output_shapes=nothing) + desc = tf.EagerOp("ExperimentalSlidingWindowDataset") + tf.add_input(desc, input_dataset_) + tf.add_input(desc, window_size_) + tf.add_input(desc, window_shift_) + tf.add_input(desc, window_stride_) + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + (tf.execute(desc))[1] + end +end + + +""" + decode_raw(bytes; little_endian=true) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function decode_raw(bytes_; name=nothing, out_type=nothing, little_endian=nothing) + local desc + tf.with_op_name(name, "DecodeRaw") do + desc = tf.NodeDescription("DecodeRaw") + bytes_ = convert(Tensor{String}, bytes_) + tf.add_input(desc, bytes_) + if out_type !== nothing + desc["out_type"] = Base.identity(out_type) + end + if little_endian !== nothing + desc["little_endian"] = Base.Bool(little_endian) + end + end + tf.Tensor(tf.Operation(desc)) + end + function decode_raw(bytes_::tf.TensorHandle; name=nothing, out_type=nothing, little_endian=nothing) + desc = tf.EagerOp("DecodeRaw") + tf.add_input(desc, bytes_) + if out_type !== nothing + desc["out_type"] = Base.identity(out_type) + end + if little_endian !== nothing + desc["little_endian"] = Base.Bool(little_endian) + end + (tf.execute(desc))[1] + end +end + + +""" + fake_quant_with_min_max_vars_per_channel_gradient(gradients, inputs, min, max; num_bits=8, narrow_range=false) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function fake_quant_with_min_max_vars_per_channel_gradient(gradients_, inputs_, min_, max_; name=nothing, num_bits=nothing, narrow_range=nothing) + local desc + tf.with_op_name(name, "FakeQuantWithMinMaxVarsPerChannelGradient") do + desc = tf.NodeDescription("FakeQuantWithMinMaxVarsPerChannelGradient") + gradients_ = convert(Tensor{Float32}, gradients_) + inputs_ = convert(Tensor{Float32}, inputs_) + min_ = convert(Tensor{Float32}, min_) + max_ = convert(Tensor{Float32}, max_) + tf.add_input(desc, gradients_) + tf.add_input(desc, inputs_) + tf.add_input(desc, min_) + tf.add_input(desc, max_) + if num_bits !== nothing + desc["num_bits"] = Base.Int(num_bits) + end + if narrow_range !== nothing + desc["narrow_range"] = Base.Bool(narrow_range) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function fake_quant_with_min_max_vars_per_channel_gradient(gradients_::tf.TensorHandle, inputs_::tf.TensorHandle, min_::tf.TensorHandle, max_::tf.TensorHandle; name=nothing, num_bits=nothing, narrow_range=nothing) + desc = tf.EagerOp("FakeQuantWithMinMaxVarsPerChannelGradient") + tf.add_input(desc, gradients_) + tf.add_input(desc, inputs_) + tf.add_input(desc, min_) + tf.add_input(desc, max_) + if num_bits !== nothing + desc["num_bits"] = Base.Int(num_bits) + end + if narrow_range !== nothing + desc["narrow_range"] = Base.Bool(narrow_range) + end + tf.execute(desc) + end +end + + +""" + unique_with_counts_v2(x, axis; out_idx=Int32) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function unique_with_counts_v2(x_, axis_; name=nothing, out_idx=nothing) + local desc + tf.with_op_name(name, "UniqueWithCountsV2") do + desc = tf.NodeDescription("UniqueWithCountsV2") + x_ = convert(Tensor{Any}, x_) + axis_ = convert(Tensor{Int64}, axis_) + (x_,) = tf.tf_promote(x_) + (axis_,) = tf.tf_promote(axis_) + tf.add_input(desc, x_) + tf.add_input(desc, axis_) + if out_idx !== nothing + desc["out_idx"] = Base.identity(out_idx) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function unique_with_counts_v2(x_::tf.TensorHandle, axis_::tf.TensorHandle; name=nothing, out_idx=nothing) + desc = tf.EagerOp("UniqueWithCountsV2") + tf.add_input(desc, x_) + tf.add_input(desc, axis_) + if out_idx !== nothing + desc["out_idx"] = Base.identity(out_idx) + end + desc["T"] = tf.data_type(x_) + desc["Taxis"] = tf.data_type(axis_) + tf.execute(desc) + end +end + + +""" + experimental_sleep_dataset(input_dataset, sleep_microseconds) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function experimental_sleep_dataset(input_dataset_, sleep_microseconds_; name=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "ExperimentalSleepDataset") do + desc = tf.NodeDescription("ExperimentalSleepDataset") + input_dataset_ = convert(Tensor{Any}, input_dataset_) + sleep_microseconds_ = convert(Tensor{Int64}, sleep_microseconds_) + tf.add_input(desc, input_dataset_) + tf.add_input(desc, sleep_microseconds_) + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + tf.Tensor(tf.Operation(desc)) + end + function experimental_sleep_dataset(input_dataset_::tf.TensorHandle, sleep_microseconds_::tf.TensorHandle; name=nothing, output_types=nothing, output_shapes=nothing) + desc = tf.EagerOp("ExperimentalSleepDataset") + tf.add_input(desc, input_dataset_) + tf.add_input(desc, sleep_microseconds_) + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + (tf.execute(desc))[1] + end +end + + +""" + tpu_replicated_output(input) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tpu_replicated_output(input_; name=nothing, num_replicas=nothing) + local desc + tf.with_op_name(name, "TPUReplicatedOutput") do + desc = tf.NodeDescription("TPUReplicatedOutput") + input_ = convert(Tensor{Any}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + if num_replicas !== nothing + desc["num_replicas"] = Base.Int(num_replicas) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:num_replicas + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function tpu_replicated_output(input_::tf.TensorHandle; name=nothing, num_replicas=nothing) + desc = tf.EagerOp("TPUReplicatedOutput") + tf.add_input(desc, input_) + if num_replicas !== nothing + desc["num_replicas"] = Base.Int(num_replicas) + end + desc["T"] = tf.data_type(input_) + tf.execute(desc) + end +end + + +""" + lower_bound(sorted_inputs, values; out_type=Int32) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function lower_bound(sorted_inputs_, values_; name=nothing, out_type=nothing) + local desc + tf.with_op_name(name, "LowerBound") do + desc = tf.NodeDescription("LowerBound") + sorted_inputs_ = convert(Tensor{Any}, sorted_inputs_) + values_ = convert(Tensor{Any}, values_) + (sorted_inputs_, values_) = tf.tf_promote(sorted_inputs_, values_) + tf.add_input(desc, sorted_inputs_) + tf.add_input(desc, values_) + if out_type !== nothing + desc["out_type"] = Base.identity(out_type) + end + end + tf.Tensor(tf.Operation(desc)) + end + function lower_bound(sorted_inputs_::tf.TensorHandle, values_::tf.TensorHandle; name=nothing, out_type=nothing) + desc = tf.EagerOp("LowerBound") + tf.add_input(desc, sorted_inputs_) + tf.add_input(desc, values_) + if out_type !== nothing + desc["out_type"] = Base.identity(out_type) + end + desc["T"] = tf.data_type(sorted_inputs_) + desc["T"] = tf.data_type(values_) + (tf.execute(desc))[1] + end +end + + +""" + tan(x) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tan(x_; name=nothing) + local desc + tf.with_op_name(name, "Tan") do + desc = tf.NodeDescription("Tan") + x_ = convert(Tensor{Any}, x_) + (x_,) = tf.tf_promote(x_) + tf.add_input(desc, x_) + end + tf.Tensor(tf.Operation(desc)) + end + function tan(x_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("Tan") + tf.add_input(desc, x_) + desc["T"] = tf.data_type(x_) + (tf.execute(desc))[1] + end +end + + +""" + enter(data; is_constant=false, parallel_iterations=10) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function enter(data_; name=nothing, frame_name=nothing, is_constant=nothing, parallel_iterations=nothing) + local desc + tf.with_op_name(name, "Enter") do + desc = tf.NodeDescription("Enter") + data_ = convert(Tensor{Any}, data_) + (data_,) = tf.tf_promote(data_) + tf.add_input(desc, data_) + if frame_name !== nothing + desc["frame_name"] = Base.String(frame_name) + end + if is_constant !== nothing + desc["is_constant"] = Base.Bool(is_constant) + end + if parallel_iterations !== nothing + desc["parallel_iterations"] = Base.Int(parallel_iterations) + end + end + tf.Tensor(tf.Operation(desc)) + end + function enter(data_::tf.TensorHandle; name=nothing, frame_name=nothing, is_constant=nothing, parallel_iterations=nothing) + desc = tf.EagerOp("Enter") + tf.add_input(desc, data_) + if frame_name !== nothing + desc["frame_name"] = Base.String(frame_name) + end + if is_constant !== nothing + desc["is_constant"] = Base.Bool(is_constant) + end + if parallel_iterations !== nothing + desc["parallel_iterations"] = Base.Int(parallel_iterations) + end + desc["T"] = tf.data_type(data_) + (tf.execute(desc))[1] + end +end + + +""" + infeed_enqueue_tuple(inputs; layouts=Int64[], device_ordinal=-1) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function infeed_enqueue_tuple(inputs_; name=nothing, dtypes=nothing, shapes=nothing, layouts=nothing, device_ordinal=nothing) + local desc + tf.with_op_name(name, "InfeedEnqueueTuple") do + desc = tf.NodeDescription("InfeedEnqueueTuple") + inputs_ = [convert(Tensor{Any}, x) for x = inputs_] + tf.add_input(desc, inputs_) + if dtypes !== nothing + desc["dtypes"] = map(Base.identity, dtypes) + end + if shapes !== nothing + desc["shapes"] = map(Base.identity, shapes) + end + if layouts !== nothing + desc["layouts"] = map(Base.identity, layouts) + end + if device_ordinal !== nothing + desc["device_ordinal"] = Base.Int(device_ordinal) + end + end + tf.Tensor(tf.Operation(desc)) + end + function infeed_enqueue_tuple(inputs_::tf.TensorHandle; name=nothing, dtypes=nothing, shapes=nothing, layouts=nothing, device_ordinal=nothing) + desc = tf.EagerOp("InfeedEnqueueTuple") + tf.add_input(desc, inputs_) + if dtypes !== nothing + desc["dtypes"] = map(Base.identity, dtypes) + end + if shapes !== nothing + desc["shapes"] = map(Base.identity, shapes) + end + if layouts !== nothing + desc["layouts"] = map(Base.identity, layouts) + end + if device_ordinal !== nothing + desc["device_ordinal"] = Base.Int(device_ordinal) + end + (tf.execute(desc))[1] + end +end + + +""" + _set_global_tpu_array(topology) + +An op that informs a host of the global ids of all the of TPUs in the +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function _set_global_tpu_array(topology_; name=nothing) + local desc + tf.with_op_name(name, "_SetGlobalTPUArray") do + desc = tf.NodeDescription("_SetGlobalTPUArray") + topology_ = convert(Tensor{String}, topology_) + tf.add_input(desc, topology_) + end + tf.Tensor(tf.Operation(desc)) + end + function _set_global_tpu_array(topology_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("_SetGlobalTPUArray") + tf.add_input(desc, topology_) + (tf.execute(desc))[1] + end +end + + +""" + square(x) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function square(x_; name=nothing) + local desc + tf.with_op_name(name, "Square") do + desc = tf.NodeDescription("Square") + x_ = convert(Tensor{Any}, x_) + (x_,) = tf.tf_promote(x_) + tf.add_input(desc, x_) + end + tf.Tensor(tf.Operation(desc)) + end + function square(x_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("Square") + tf.add_input(desc, x_) + desc["T"] = tf.data_type(x_) + (tf.execute(desc))[1] + end +end + + +""" + debug_gradient_ref_identity(input) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function debug_gradient_ref_identity(input_; name=nothing) + local desc + tf.with_op_name(name, "DebugGradientRefIdentity") do + desc = tf.NodeDescription("DebugGradientRefIdentity") + input_ = convert(Tensor{Any}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + end + tf.Tensor(tf.Operation(desc)) + end + function debug_gradient_ref_identity(input_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("DebugGradientRefIdentity") + tf.add_input(desc, input_) + desc["T"] = tf.data_type(input_) + (tf.execute(desc))[1] + end +end + + +""" + apply_adadelta(var, accum, accum_update, lr, rho, epsilon, grad; use_locking=false) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function apply_adadelta(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "ApplyAdadelta") do + desc = tf.NodeDescription("ApplyAdadelta") + var_ = convert(Tensor{Any}, var_) + accum_ = convert(Tensor{Any}, accum_) + accum_update_ = convert(Tensor{Any}, accum_update_) + lr_ = convert(Tensor{Any}, lr_) + rho_ = convert(Tensor{Any}, rho_) + epsilon_ = convert(Tensor{Any}, epsilon_) + grad_ = convert(Tensor{Any}, grad_) + (var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_) = tf.tf_promote(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_) + tf.add_input(desc, var_) + tf.add_input(desc, accum_) + tf.add_input(desc, accum_update_) + tf.add_input(desc, lr_) + tf.add_input(desc, rho_) + tf.add_input(desc, epsilon_) + tf.add_input(desc, grad_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + tf.Tensor(tf.Operation(desc)) + end + function apply_adadelta(var_::tf.TensorHandle, accum_::tf.TensorHandle, accum_update_::tf.TensorHandle, lr_::tf.TensorHandle, rho_::tf.TensorHandle, epsilon_::tf.TensorHandle, grad_::tf.TensorHandle; name=nothing, use_locking=nothing) + desc = tf.EagerOp("ApplyAdadelta") + tf.add_input(desc, var_) + tf.add_input(desc, accum_) + tf.add_input(desc, accum_update_) + tf.add_input(desc, lr_) + tf.add_input(desc, rho_) + tf.add_input(desc, epsilon_) + tf.add_input(desc, grad_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + desc["T"] = tf.data_type(var_) + desc["T"] = tf.data_type(accum_) + desc["T"] = tf.data_type(accum_update_) + desc["T"] = tf.data_type(lr_) + desc["T"] = tf.data_type(rho_) + desc["T"] = tf.data_type(epsilon_) + desc["T"] = tf.data_type(grad_) + (tf.execute(desc))[1] + end +end + + +""" + experimental_group_by_window_dataset(input_dataset, key_func_other_arguments, reduce_func_other_arguments, window_size_func_other_arguments) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function experimental_group_by_window_dataset(input_dataset_, key_func_other_arguments_, reduce_func_other_arguments_, window_size_func_other_arguments_; name=nothing, key_func=nothing, reduce_func=nothing, window_size_func=nothing, Tkey_func_other_arguments=nothing, Treduce_func_other_arguments=nothing, Twindow_size_func_other_arguments=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "ExperimentalGroupByWindowDataset") do + desc = tf.NodeDescription("ExperimentalGroupByWindowDataset") + input_dataset_ = convert(Tensor{Any}, input_dataset_) + key_func_other_arguments_ = [convert(Tensor{Any}, x) for x = key_func_other_arguments_] + reduce_func_other_arguments_ = [convert(Tensor{Any}, x) for x = reduce_func_other_arguments_] + window_size_func_other_arguments_ = [convert(Tensor{Any}, x) for x = window_size_func_other_arguments_] + tf.add_input(desc, input_dataset_) + tf.add_input(desc, key_func_other_arguments_) + tf.add_input(desc, reduce_func_other_arguments_) + tf.add_input(desc, window_size_func_other_arguments_) + if key_func !== nothing + desc["key_func"] = Base.identity(key_func) + end + if reduce_func !== nothing + desc["reduce_func"] = Base.identity(reduce_func) + end + if window_size_func !== nothing + desc["window_size_func"] = Base.identity(window_size_func) + end + if Tkey_func_other_arguments !== nothing + desc["Tkey_func_other_arguments"] = map(Base.identity, Tkey_func_other_arguments) + end + if Treduce_func_other_arguments !== nothing + desc["Treduce_func_other_arguments"] = map(Base.identity, Treduce_func_other_arguments) + end + if Twindow_size_func_other_arguments !== nothing + desc["Twindow_size_func_other_arguments"] = map(Base.identity, Twindow_size_func_other_arguments) + end + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + tf.Tensor(tf.Operation(desc)) + end + function experimental_group_by_window_dataset(input_dataset_::tf.TensorHandle, key_func_other_arguments_::tf.TensorHandle, reduce_func_other_arguments_::tf.TensorHandle, window_size_func_other_arguments_::tf.TensorHandle; name=nothing, key_func=nothing, reduce_func=nothing, window_size_func=nothing, Tkey_func_other_arguments=nothing, Treduce_func_other_arguments=nothing, Twindow_size_func_other_arguments=nothing, output_types=nothing, output_shapes=nothing) + desc = tf.EagerOp("ExperimentalGroupByWindowDataset") + tf.add_input(desc, input_dataset_) + tf.add_input(desc, key_func_other_arguments_) + tf.add_input(desc, reduce_func_other_arguments_) + tf.add_input(desc, window_size_func_other_arguments_) + if key_func !== nothing + desc["key_func"] = Base.identity(key_func) + end + if reduce_func !== nothing + desc["reduce_func"] = Base.identity(reduce_func) + end + if window_size_func !== nothing + desc["window_size_func"] = Base.identity(window_size_func) + end + if Tkey_func_other_arguments !== nothing + desc["Tkey_func_other_arguments"] = map(Base.identity, Tkey_func_other_arguments) + end + if Treduce_func_other_arguments !== nothing + desc["Treduce_func_other_arguments"] = map(Base.identity, Treduce_func_other_arguments) + end + if Twindow_size_func_other_arguments !== nothing + desc["Twindow_size_func_other_arguments"] = map(Base.identity, Twindow_size_func_other_arguments) + end + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + (tf.execute(desc))[1] + end +end + + +""" + audio_summary(tag, tensor; max_outputs=3) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function audio_summary(tag_, tensor_; name=nothing, sample_rate=nothing, max_outputs=nothing) + local desc + tf.with_op_name(name, "AudioSummary") do + desc = tf.NodeDescription("AudioSummary") + tag_ = convert(Tensor{String}, tag_) + tensor_ = convert(Tensor{Float32}, tensor_) + tf.add_input(desc, tag_) + tf.add_input(desc, tensor_) + if sample_rate !== nothing + desc["sample_rate"] = Base.identity(sample_rate) + end + if max_outputs !== nothing + desc["max_outputs"] = Base.Int(max_outputs) + end + end + tf.Tensor(tf.Operation(desc)) + end + function audio_summary(tag_::tf.TensorHandle, tensor_::tf.TensorHandle; name=nothing, sample_rate=nothing, max_outputs=nothing) + desc = tf.EagerOp("AudioSummary") + tf.add_input(desc, tag_) + tf.add_input(desc, tensor_) + if sample_rate !== nothing + desc["sample_rate"] = Base.identity(sample_rate) + end + if max_outputs !== nothing + desc["max_outputs"] = Base.Int(max_outputs) + end + (tf.execute(desc))[1] + end +end + + +""" + squared_difference(x, y) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function squared_difference(x_, y_; name=nothing) + local desc + tf.with_op_name(name, "SquaredDifference") do + desc = tf.NodeDescription("SquaredDifference") + x_ = convert(Tensor{Any}, x_) + y_ = convert(Tensor{Any}, y_) + (x_, y_) = tf.tf_promote(x_, y_) + tf.add_input(desc, x_) + tf.add_input(desc, y_) + end + tf.Tensor(tf.Operation(desc)) + end + function squared_difference(x_::tf.TensorHandle, y_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("SquaredDifference") + tf.add_input(desc, x_) + tf.add_input(desc, y_) + desc["T"] = tf.data_type(x_) + desc["T"] = tf.data_type(y_) + (tf.execute(desc))[1] + end +end + + +""" + experimental_take_while_dataset(input_dataset, other_arguments) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function experimental_take_while_dataset(input_dataset_, other_arguments_; name=nothing, predicate=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "ExperimentalTakeWhileDataset") do + desc = tf.NodeDescription("ExperimentalTakeWhileDataset") + input_dataset_ = convert(Tensor{Any}, input_dataset_) + other_arguments_ = [convert(Tensor{Any}, x) for x = other_arguments_] + tf.add_input(desc, input_dataset_) + tf.add_input(desc, other_arguments_) + if predicate !== nothing + desc["predicate"] = Base.identity(predicate) + end + if Targuments !== nothing + desc["Targuments"] = map(Base.identity, Targuments) + end + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + tf.Tensor(tf.Operation(desc)) + end + function experimental_take_while_dataset(input_dataset_::tf.TensorHandle, other_arguments_::tf.TensorHandle; name=nothing, predicate=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) + desc = tf.EagerOp("ExperimentalTakeWhileDataset") + tf.add_input(desc, input_dataset_) + tf.add_input(desc, other_arguments_) + if predicate !== nothing + desc["predicate"] = Base.identity(predicate) + end + if Targuments !== nothing + desc["Targuments"] = map(Base.identity, Targuments) + end + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + (tf.execute(desc))[1] + end +end + + +""" + scatter_nd_update(ref, indices, updates; use_locking=true) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function scatter_nd_update(ref_, indices_, updates_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "ScatterNdUpdate") do + desc = tf.NodeDescription("ScatterNdUpdate") + ref_ = convert(Tensor{Any}, ref_) + indices_ = convert(Tensor{Any}, indices_) + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + updates_ = convert(Tensor{Any}, updates_) + (ref_, updates_) = tf.tf_promote(ref_, updates_) + (indices_,) = tf.tf_promote(indices_) + tf.add_input(desc, ref_) + tf.add_input(desc, indices_) + tf.add_input(desc, updates_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + tf.Tensor(tf.Operation(desc)) + end + function scatter_nd_update(ref_::tf.TensorHandle, indices_::tf.TensorHandle, updates_::tf.TensorHandle; name=nothing, use_locking=nothing) + desc = tf.EagerOp("ScatterNdUpdate") + tf.add_input(desc, ref_) + tf.add_input(desc, indices_) + tf.add_input(desc, updates_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + desc["T"] = tf.data_type(ref_) + desc["Tindices"] = tf.data_type(indices_) + desc["T"] = tf.data_type(updates_) + (tf.execute(desc))[1] + end +end + + +""" + dynamic_stitch(indices, data) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function dynamic_stitch(indices_, data_; name=nothing, N=nothing) + local desc + tf.with_op_name(name, "DynamicStitch") do + desc = tf.NodeDescription("DynamicStitch") + indices_ = [convert(Tensor{Int32}, x) for x = indices_] + data_ = [convert(Tensor{Any}, x) for x = data_] + (data_,) = tf.tf_promote(data_) + tf.add_input(desc, indices_) + tf.add_input(desc, data_) + if N !== nothing + desc["N"] = Base.Int(N) + end + end + tf.Tensor(tf.Operation(desc)) + end + function dynamic_stitch(indices_::tf.TensorHandle, data_::tf.TensorHandle; name=nothing, N=nothing) + desc = tf.EagerOp("DynamicStitch") + tf.add_input(desc, indices_) + tf.add_input(desc, data_) + if N !== nothing + desc["N"] = Base.Int(N) + end + desc["T"] = tf.data_type(data_) + (tf.execute(desc))[1] + end +end + + +""" + ones_like(x) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function ones_like(x_; name=nothing) + local desc + tf.with_op_name(name, "OnesLike") do + desc = tf.NodeDescription("OnesLike") + x_ = convert(Tensor{Any}, x_) + (x_,) = tf.tf_promote(x_) + tf.add_input(desc, x_) + end + tf.Tensor(tf.Operation(desc)) + end + function ones_like(x_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("OnesLike") + tf.add_input(desc, x_) + desc["T"] = tf.data_type(x_) + (tf.execute(desc))[1] + end +end + + +""" + fractional_max_pool_grad(orig_input, orig_output, out_backprop, row_pooling_sequence, col_pooling_sequence; overlapping=false) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function fractional_max_pool_grad(orig_input_, orig_output_, out_backprop_, row_pooling_sequence_, col_pooling_sequence_; name=nothing, overlapping=nothing) + local desc + tf.with_op_name(name, "FractionalMaxPoolGrad") do + desc = tf.NodeDescription("FractionalMaxPoolGrad") + orig_input_ = convert(Tensor{Any}, orig_input_) + orig_output_ = convert(Tensor{Any}, orig_output_) + out_backprop_ = convert(Tensor{Any}, out_backprop_) + row_pooling_sequence_ = convert(Tensor{Int64}, row_pooling_sequence_) + col_pooling_sequence_ = convert(Tensor{Int64}, col_pooling_sequence_) + (orig_input_, orig_output_, out_backprop_) = tf.tf_promote(orig_input_, orig_output_, out_backprop_) + tf.add_input(desc, orig_input_) + tf.add_input(desc, orig_output_) + tf.add_input(desc, out_backprop_) + tf.add_input(desc, row_pooling_sequence_) + tf.add_input(desc, col_pooling_sequence_) + if overlapping !== nothing + desc["overlapping"] = Base.Bool(overlapping) + end + end + tf.Tensor(tf.Operation(desc)) + end + function fractional_max_pool_grad(orig_input_::tf.TensorHandle, orig_output_::tf.TensorHandle, out_backprop_::tf.TensorHandle, row_pooling_sequence_::tf.TensorHandle, col_pooling_sequence_::tf.TensorHandle; name=nothing, overlapping=nothing) + desc = tf.EagerOp("FractionalMaxPoolGrad") + tf.add_input(desc, orig_input_) + tf.add_input(desc, orig_output_) + tf.add_input(desc, out_backprop_) + tf.add_input(desc, row_pooling_sequence_) + tf.add_input(desc, col_pooling_sequence_) + if overlapping !== nothing + desc["overlapping"] = Base.Bool(overlapping) + end + desc["T"] = tf.data_type(orig_input_) + desc["T"] = tf.data_type(orig_output_) + desc["T"] = tf.data_type(out_backprop_) + (tf.execute(desc))[1] + end +end + + +""" + remote_call(target, args) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function remote_call(target_, args_; name=nothing, Tin=nothing, Tout=nothing, f=nothing) + local desc + tf.with_op_name(name, "RemoteCall") do + desc = tf.NodeDescription("RemoteCall") + target_ = convert(Tensor{String}, target_) + args_ = [convert(Tensor{Any}, x) for x = args_] + tf.add_input(desc, target_) + tf.add_input(desc, args_) + if Tin !== nothing + desc["Tin"] = map(Base.identity, Tin) + end + if Tout !== nothing + desc["Tout"] = map(Base.identity, Tout) + end + if f !== nothing + desc["f"] = Base.identity(f) + end + end + tf.Tensor(tf.Operation(desc)) + end + function remote_call(target_::tf.TensorHandle, args_::tf.TensorHandle; name=nothing, Tin=nothing, Tout=nothing, f=nothing) + desc = tf.EagerOp("RemoteCall") + tf.add_input(desc, target_) + tf.add_input(desc, args_) + if Tin !== nothing + desc["Tin"] = map(Base.identity, Tin) + end + if Tout !== nothing + desc["Tout"] = map(Base.identity, Tout) + end + if f !== nothing + desc["f"] = Base.identity(f) + end + (tf.execute(desc))[1] + end +end + + +""" + gather(params, indices; validate_indices=true) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function gather(params_, indices_; name=nothing, validate_indices=nothing) + local desc + tf.with_op_name(name, "Gather") do + desc = tf.NodeDescription("Gather") + params_ = convert(Tensor{Any}, params_) + indices_ = convert(Tensor{Any}, indices_) + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + (params_,) = tf.tf_promote(params_) + (indices_,) = tf.tf_promote(indices_) + tf.add_input(desc, params_) + tf.add_input(desc, indices_) + if validate_indices !== nothing + desc["validate_indices"] = Base.Bool(validate_indices) + end + end + tf.Tensor(tf.Operation(desc)) + end + function gather(params_::tf.TensorHandle, indices_::tf.TensorHandle; name=nothing, validate_indices=nothing) + desc = tf.EagerOp("Gather") + tf.add_input(desc, params_) + tf.add_input(desc, indices_) + if validate_indices !== nothing + desc["validate_indices"] = Base.Bool(validate_indices) + end + desc["Tparams"] = tf.data_type(params_) + desc["Tindices"] = tf.data_type(indices_) + (tf.execute(desc))[1] + end +end + + +""" + quantized_mat_mul(a, b, min_a, max_a, min_b, max_b; transpose_a=false, transpose_b=false) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function quantized_mat_mul(a_, b_, min_a_, max_a_, min_b_, max_b_; name=nothing, transpose_a=nothing, transpose_b=nothing) + local desc + tf.with_op_name(name, "QuantizedMatMul") do + desc = tf.NodeDescription("QuantizedMatMul") + a_ = convert(Tensor{Any}, a_) + b_ = convert(Tensor{Any}, b_) + min_a_ = convert(Tensor{Float32}, min_a_) + max_a_ = convert(Tensor{Float32}, max_a_) + min_b_ = convert(Tensor{Float32}, min_b_) + max_b_ = convert(Tensor{Float32}, max_b_) + (a_,) = tf.tf_promote(a_) + (b_,) = tf.tf_promote(b_) + tf.add_input(desc, a_) + tf.add_input(desc, b_) + tf.add_input(desc, min_a_) + tf.add_input(desc, max_a_) + tf.add_input(desc, min_b_) + tf.add_input(desc, max_b_) + if transpose_a !== nothing + desc["transpose_a"] = Base.Bool(transpose_a) + end + if transpose_b !== nothing + desc["transpose_b"] = Base.Bool(transpose_b) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function quantized_mat_mul(a_::tf.TensorHandle, b_::tf.TensorHandle, min_a_::tf.TensorHandle, max_a_::tf.TensorHandle, min_b_::tf.TensorHandle, max_b_::tf.TensorHandle; name=nothing, transpose_a=nothing, transpose_b=nothing) + desc = tf.EagerOp("QuantizedMatMul") + tf.add_input(desc, a_) + tf.add_input(desc, b_) + tf.add_input(desc, min_a_) + tf.add_input(desc, max_a_) + tf.add_input(desc, min_b_) + tf.add_input(desc, max_b_) + if transpose_a !== nothing + desc["transpose_a"] = Base.Bool(transpose_a) + end + if transpose_b !== nothing + desc["transpose_b"] = Base.Bool(transpose_b) + end + desc["T1"] = tf.data_type(a_) + desc["T2"] = tf.data_type(b_) + tf.execute(desc) + end +end + + +""" + unicode_decode_with_offsets(input; errors=replace, replacement_char=65533, replace_control_characters=false) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function unicode_decode_with_offsets(input_; name=nothing, input_encoding=nothing, errors=nothing, replacement_char=nothing, replace_control_characters=nothing) + local desc + tf.with_op_name(name, "UnicodeDecodeWithOffsets") do + desc = tf.NodeDescription("UnicodeDecodeWithOffsets") + input_ = convert(Tensor{String}, input_) + tf.add_input(desc, input_) + if input_encoding !== nothing + desc["input_encoding"] = Base.String(input_encoding) + end + if errors !== nothing + desc["errors"] = Base.String(errors) + end + if replacement_char !== nothing + desc["replacement_char"] = Base.Int(replacement_char) + end + if replace_control_characters !== nothing + desc["replace_control_characters"] = Base.Bool(replace_control_characters) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function unicode_decode_with_offsets(input_::tf.TensorHandle; name=nothing, input_encoding=nothing, errors=nothing, replacement_char=nothing, replace_control_characters=nothing) + desc = tf.EagerOp("UnicodeDecodeWithOffsets") + tf.add_input(desc, input_) + if input_encoding !== nothing + desc["input_encoding"] = Base.String(input_encoding) + end + if errors !== nothing + desc["errors"] = Base.String(errors) + end + if replacement_char !== nothing + desc["replacement_char"] = Base.Int(replacement_char) + end + if replace_control_characters !== nothing + desc["replace_control_characters"] = Base.Bool(replace_control_characters) + end + tf.execute(desc) + end +end + + +""" + enqueue_tpu_embedding_sparse_tensor_batch(sample_indices, embedding_indices, aggregation_weights, mode_override; device_ordinal=-1, combiners=Int64[]) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function enqueue_tpu_embedding_sparse_tensor_batch(sample_indices_, embedding_indices_, aggregation_weights_, mode_override_; name=nothing, N=nothing, device_ordinal=nothing, combiners=nothing, table_ids=nothing) + local desc + tf.with_op_name(name, "EnqueueTPUEmbeddingSparseTensorBatch") do + desc = tf.NodeDescription("EnqueueTPUEmbeddingSparseTensorBatch") + sample_indices_ = [convert(Tensor{Int32}, x) for x = sample_indices_] + embedding_indices_ = [convert(Tensor{Int32}, x) for x = embedding_indices_] + aggregation_weights_ = [convert(Tensor{Float32}, x) for x = aggregation_weights_] + mode_override_ = convert(Tensor{String}, mode_override_) + tf.add_input(desc, sample_indices_) + tf.add_input(desc, embedding_indices_) + tf.add_input(desc, aggregation_weights_) + tf.add_input(desc, mode_override_) + if N !== nothing + desc["N"] = Base.Int(N) + end + if device_ordinal !== nothing + desc["device_ordinal"] = Base.Int(device_ordinal) + end + if combiners !== nothing + desc["combiners"] = map(Base.identity, combiners) + end + if table_ids !== nothing + desc["table_ids"] = map(Base.identity, table_ids) + end + end + tf.Tensor(tf.Operation(desc)) + end + function enqueue_tpu_embedding_sparse_tensor_batch(sample_indices_::tf.TensorHandle, embedding_indices_::tf.TensorHandle, aggregation_weights_::tf.TensorHandle, mode_override_::tf.TensorHandle; name=nothing, N=nothing, device_ordinal=nothing, combiners=nothing, table_ids=nothing) + desc = tf.EagerOp("EnqueueTPUEmbeddingSparseTensorBatch") + tf.add_input(desc, sample_indices_) + tf.add_input(desc, embedding_indices_) + tf.add_input(desc, aggregation_weights_) + tf.add_input(desc, mode_override_) + if N !== nothing + desc["N"] = Base.Int(N) + end + if device_ordinal !== nothing + desc["device_ordinal"] = Base.Int(device_ordinal) + end + if combiners !== nothing + desc["combiners"] = map(Base.identity, combiners) + end + if table_ids !== nothing + desc["table_ids"] = map(Base.identity, table_ids) + end + (tf.execute(desc))[1] + end +end + + +""" + accumulator_apply_gradient(handle, local_step, gradient) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function accumulator_apply_gradient(handle_, local_step_, gradient_; name=nothing, dtype=nothing) + local desc + tf.with_op_name(name, "AccumulatorApplyGradient") do + desc = tf.NodeDescription("AccumulatorApplyGradient") + handle_ = convert(Tensor{String}, handle_) + local_step_ = convert(Tensor{Int64}, local_step_) + gradient_ = convert(Tensor{Any}, gradient_) + (gradient_,) = tf.tf_promote(gradient_) + tf.add_input(desc, handle_) + tf.add_input(desc, local_step_) + tf.add_input(desc, gradient_) + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + tf.Tensor(tf.Operation(desc)) + end + function accumulator_apply_gradient(handle_::tf.TensorHandle, local_step_::tf.TensorHandle, gradient_::tf.TensorHandle; name=nothing, dtype=nothing) + desc = tf.EagerOp("AccumulatorApplyGradient") + tf.add_input(desc, handle_) + tf.add_input(desc, local_step_) + tf.add_input(desc, gradient_) + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + desc["dtype"] = tf.data_type(gradient_) + (tf.execute(desc))[1] + end +end + + +""" + write_summary(writer, step, tensor, tag, summary_metadata) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function write_summary(writer_, step_, tensor_, tag_, summary_metadata_; name=nothing) + local desc + tf.with_op_name(name, "WriteSummary") do + desc = tf.NodeDescription("WriteSummary") + writer_ = convert(Tensor{Any}, writer_) + step_ = convert(Tensor{Int64}, step_) + tensor_ = convert(Tensor{Any}, tensor_) + tag_ = convert(Tensor{String}, tag_) + summary_metadata_ = convert(Tensor{String}, summary_metadata_) + (tensor_,) = tf.tf_promote(tensor_) + tf.add_input(desc, writer_) + tf.add_input(desc, step_) + tf.add_input(desc, tensor_) + tf.add_input(desc, tag_) + tf.add_input(desc, summary_metadata_) + end + tf.Tensor(tf.Operation(desc)) + end + function write_summary(writer_::tf.TensorHandle, step_::tf.TensorHandle, tensor_::tf.TensorHandle, tag_::tf.TensorHandle, summary_metadata_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("WriteSummary") + tf.add_input(desc, writer_) + tf.add_input(desc, step_) + tf.add_input(desc, tensor_) + tf.add_input(desc, tag_) + tf.add_input(desc, summary_metadata_) + desc["T"] = tf.data_type(tensor_) + (tf.execute(desc))[1] + end +end + + +""" + quantized_conv2d(input, filter, min_input, max_input, min_filter, max_filter; out_type=Float32, dilations=[1, 1, 1, 1]) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function quantized_conv2d(input_, filter_, min_input_, max_input_, min_filter_, max_filter_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) + local desc + tf.with_op_name(name, "QuantizedConv2D") do + desc = tf.NodeDescription("QuantizedConv2D") + input_ = convert(Tensor{Any}, input_) + filter_ = convert(Tensor{Any}, filter_) + min_input_ = convert(Tensor{Float32}, min_input_) + max_input_ = convert(Tensor{Float32}, max_input_) + min_filter_ = convert(Tensor{Float32}, min_filter_) + max_filter_ = convert(Tensor{Float32}, max_filter_) + (filter_,) = tf.tf_promote(filter_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + tf.add_input(desc, filter_) + tf.add_input(desc, min_input_) + tf.add_input(desc, max_input_) + tf.add_input(desc, min_filter_) + tf.add_input(desc, max_filter_) + if out_type !== nothing + desc["out_type"] = Base.identity(out_type) + end + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + if padding !== nothing + desc["padding"] = Base.String(padding) + end + if dilations !== nothing + desc["dilations"] = map(Base.identity, dilations) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function quantized_conv2d(input_::tf.TensorHandle, filter_::tf.TensorHandle, min_input_::tf.TensorHandle, max_input_::tf.TensorHandle, min_filter_::tf.TensorHandle, max_filter_::tf.TensorHandle; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) + desc = tf.EagerOp("QuantizedConv2D") + tf.add_input(desc, input_) + tf.add_input(desc, filter_) + tf.add_input(desc, min_input_) + tf.add_input(desc, max_input_) + tf.add_input(desc, min_filter_) + tf.add_input(desc, max_filter_) + if out_type !== nothing + desc["out_type"] = Base.identity(out_type) + end + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + if padding !== nothing + desc["padding"] = Base.String(padding) + end + if dilations !== nothing + desc["dilations"] = map(Base.identity, dilations) + end + desc["Tinput"] = tf.data_type(input_) + desc["Tfilter"] = tf.data_type(filter_) + tf.execute(desc) + end +end + + +""" + resource_apply_momentum(var, accum, lr, grad, momentum; use_locking=false, use_nesterov=false) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function resource_apply_momentum(var_, accum_, lr_, grad_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) + local desc + tf.with_op_name(name, "ResourceApplyMomentum") do + desc = tf.NodeDescription("ResourceApplyMomentum") + var_ = convert(Tensor{Any}, var_) + accum_ = convert(Tensor{Any}, accum_) + lr_ = convert(Tensor{Any}, lr_) + grad_ = convert(Tensor{Any}, grad_) + momentum_ = convert(Tensor{Any}, momentum_) + (lr_, grad_, momentum_) = tf.tf_promote(lr_, grad_, momentum_) + tf.add_input(desc, var_) + tf.add_input(desc, accum_) + tf.add_input(desc, lr_) + tf.add_input(desc, grad_) + tf.add_input(desc, momentum_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + if use_nesterov !== nothing + desc["use_nesterov"] = Base.Bool(use_nesterov) + end + end + tf.Tensor(tf.Operation(desc)) + end + function resource_apply_momentum(var_::tf.TensorHandle, accum_::tf.TensorHandle, lr_::tf.TensorHandle, grad_::tf.TensorHandle, momentum_::tf.TensorHandle; name=nothing, use_locking=nothing, use_nesterov=nothing) + desc = tf.EagerOp("ResourceApplyMomentum") + tf.add_input(desc, var_) + tf.add_input(desc, accum_) + tf.add_input(desc, lr_) + tf.add_input(desc, grad_) + tf.add_input(desc, momentum_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + if use_nesterov !== nothing + desc["use_nesterov"] = Base.Bool(use_nesterov) + end + desc["T"] = tf.data_type(lr_) + desc["T"] = tf.data_type(grad_) + desc["T"] = tf.data_type(momentum_) + (tf.execute(desc))[1] + end +end + + +""" + log1p(x) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function log1p(x_; name=nothing) + local desc + tf.with_op_name(name, "Log1p") do + desc = tf.NodeDescription("Log1p") + x_ = convert(Tensor{Any}, x_) + (x_,) = tf.tf_promote(x_) + tf.add_input(desc, x_) + end + tf.Tensor(tf.Operation(desc)) + end + function log1p(x_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("Log1p") + tf.add_input(desc, x_) + desc["T"] = tf.data_type(x_) + (tf.execute(desc))[1] + end +end + + +""" + ordered_map_clear(; capacity=0, memory_limit=0, container=, shared_name=) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function ordered_map_clear(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "OrderedMapClear") do + desc = tf.NodeDescription("OrderedMapClear") + if capacity !== nothing + desc["capacity"] = Base.Int(capacity) + end + if memory_limit !== nothing + desc["memory_limit"] = Base.Int(memory_limit) + end + if dtypes !== nothing + desc["dtypes"] = map(Base.identity, dtypes) + end + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + tf.Tensor(tf.Operation(desc)) + end + function ordered_map_clear(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + desc = tf.EagerOp("OrderedMapClear") + if capacity !== nothing + desc["capacity"] = Base.Int(capacity) + end + if memory_limit !== nothing + desc["memory_limit"] = Base.Int(memory_limit) + end + if dtypes !== nothing + desc["dtypes"] = map(Base.identity, dtypes) + end + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + (tf.execute(desc))[1] + end +end + + +""" + resource_scatter_update(resource, indices, updates) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function resource_scatter_update(resource_, indices_, updates_; name=nothing, dtype=nothing) + local desc + tf.with_op_name(name, "ResourceScatterUpdate") do + desc = tf.NodeDescription("ResourceScatterUpdate") + resource_ = convert(Tensor{Any}, resource_) + indices_ = convert(Tensor{Any}, indices_) + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + updates_ = convert(Tensor{Any}, updates_) + (updates_,) = tf.tf_promote(updates_) + (indices_,) = tf.tf_promote(indices_) + tf.add_input(desc, resource_) + tf.add_input(desc, indices_) + tf.add_input(desc, updates_) + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + tf.Tensor(tf.Operation(desc)) + end + function resource_scatter_update(resource_::tf.TensorHandle, indices_::tf.TensorHandle, updates_::tf.TensorHandle; name=nothing, dtype=nothing) + desc = tf.EagerOp("ResourceScatterUpdate") + tf.add_input(desc, resource_) + tf.add_input(desc, indices_) + tf.add_input(desc, updates_) + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + desc["Tindices"] = tf.data_type(indices_) + desc["dtype"] = tf.data_type(updates_) + (tf.execute(desc))[1] + end +end + + +""" + barrier_take_many(handle, num_elements; allow_small_batch=false, wait_for_incomplete=false, timeout_ms=-1) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function barrier_take_many(handle_, num_elements_; name=nothing, component_types=nothing, allow_small_batch=nothing, wait_for_incomplete=nothing, timeout_ms=nothing) + local desc + tf.with_op_name(name, "BarrierTakeMany") do + desc = tf.NodeDescription("BarrierTakeMany") + handle_ = convert(Tensor{String}, handle_) + num_elements_ = convert(Tensor{Int32}, num_elements_) + tf.add_input(desc, handle_) + tf.add_input(desc, num_elements_) + if component_types !== nothing + desc["component_types"] = map(Base.identity, component_types) + end + if allow_small_batch !== nothing + desc["allow_small_batch"] = Base.Bool(allow_small_batch) + end + if wait_for_incomplete !== nothing + desc["wait_for_incomplete"] = Base.Bool(wait_for_incomplete) + end + if timeout_ms !== nothing + desc["timeout_ms"] = Base.Int(timeout_ms) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function barrier_take_many(handle_::tf.TensorHandle, num_elements_::tf.TensorHandle; name=nothing, component_types=nothing, allow_small_batch=nothing, wait_for_incomplete=nothing, timeout_ms=nothing) + desc = tf.EagerOp("BarrierTakeMany") + tf.add_input(desc, handle_) + tf.add_input(desc, num_elements_) + if component_types !== nothing + desc["component_types"] = map(Base.identity, component_types) + end + if allow_small_batch !== nothing + desc["allow_small_batch"] = Base.Bool(allow_small_batch) + end + if wait_for_incomplete !== nothing + desc["wait_for_incomplete"] = Base.Bool(wait_for_incomplete) + end + if timeout_ms !== nothing + desc["timeout_ms"] = Base.Int(timeout_ms) + end + tf.execute(desc) + end +end + + +""" + resource_apply_keras_momentum(var, accum, lr, grad, momentum; use_locking=false, use_nesterov=false) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function resource_apply_keras_momentum(var_, accum_, lr_, grad_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) + local desc + tf.with_op_name(name, "ResourceApplyKerasMomentum") do + desc = tf.NodeDescription("ResourceApplyKerasMomentum") + var_ = convert(Tensor{Any}, var_) + accum_ = convert(Tensor{Any}, accum_) + lr_ = convert(Tensor{Any}, lr_) + grad_ = convert(Tensor{Any}, grad_) + momentum_ = convert(Tensor{Any}, momentum_) + (lr_, grad_, momentum_) = tf.tf_promote(lr_, grad_, momentum_) + tf.add_input(desc, var_) + tf.add_input(desc, accum_) + tf.add_input(desc, lr_) + tf.add_input(desc, grad_) + tf.add_input(desc, momentum_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + if use_nesterov !== nothing + desc["use_nesterov"] = Base.Bool(use_nesterov) + end + end + tf.Tensor(tf.Operation(desc)) + end + function resource_apply_keras_momentum(var_::tf.TensorHandle, accum_::tf.TensorHandle, lr_::tf.TensorHandle, grad_::tf.TensorHandle, momentum_::tf.TensorHandle; name=nothing, use_locking=nothing, use_nesterov=nothing) + desc = tf.EagerOp("ResourceApplyKerasMomentum") + tf.add_input(desc, var_) + tf.add_input(desc, accum_) + tf.add_input(desc, lr_) + tf.add_input(desc, grad_) + tf.add_input(desc, momentum_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + if use_nesterov !== nothing + desc["use_nesterov"] = Base.Bool(use_nesterov) + end + desc["T"] = tf.data_type(lr_) + desc["T"] = tf.data_type(grad_) + desc["T"] = tf.data_type(momentum_) + (tf.execute(desc))[1] + end +end + + +""" + generate_big_query_reader_partitions(; test_end_point=) + +Generates serialized partition messages suitable for batch reads. +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function generate_big_query_reader_partitions(; name=nothing, project_id=nothing, dataset_id=nothing, table_id=nothing, columns=nothing, timestamp_millis=nothing, num_partitions=nothing, test_end_point=nothing) + local desc + tf.with_op_name(name, "GenerateBigQueryReaderPartitions") do + desc = tf.NodeDescription("GenerateBigQueryReaderPartitions") + if project_id !== nothing + desc["project_id"] = Base.String(project_id) + end + if dataset_id !== nothing + desc["dataset_id"] = Base.String(dataset_id) + end + if table_id !== nothing + desc["table_id"] = Base.String(table_id) + end + if columns !== nothing + desc["columns"] = map(Base.identity, columns) + end + if timestamp_millis !== nothing + desc["timestamp_millis"] = Base.Int(timestamp_millis) + end + if num_partitions !== nothing + desc["num_partitions"] = Base.Int(num_partitions) + end + if test_end_point !== nothing + desc["test_end_point"] = Base.String(test_end_point) + end + end + tf.Tensor(tf.Operation(desc)) + end + function generate_big_query_reader_partitions(; name=nothing, project_id=nothing, dataset_id=nothing, table_id=nothing, columns=nothing, timestamp_millis=nothing, num_partitions=nothing, test_end_point=nothing) + desc = tf.EagerOp("GenerateBigQueryReaderPartitions") + if project_id !== nothing + desc["project_id"] = Base.String(project_id) + end + if dataset_id !== nothing + desc["dataset_id"] = Base.String(dataset_id) + end + if table_id !== nothing + desc["table_id"] = Base.String(table_id) + end + if columns !== nothing + desc["columns"] = map(Base.identity, columns) + end + if timestamp_millis !== nothing + desc["timestamp_millis"] = Base.Int(timestamp_millis) + end + if num_partitions !== nothing + desc["num_partitions"] = Base.Int(num_partitions) + end + if test_end_point !== nothing + desc["test_end_point"] = Base.String(test_end_point) + end + (tf.execute(desc))[1] + end +end + + +""" + _xla_recv_at_host(dynamic_key) + +A placeholder op for multiple values that will be sent to TensorFlow from a +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function _xla_recv_at_host(dynamic_key_; name=nothing, Toutputs=nothing, key=nothing, device_ordinal=nothing) + local desc + tf.with_op_name(name, "_XlaRecvAtHost") do + desc = tf.NodeDescription("_XlaRecvAtHost") + dynamic_key_ = convert(Tensor{String}, dynamic_key_) + tf.add_input(desc, dynamic_key_) + if Toutputs !== nothing + desc["Toutputs"] = map(Base.identity, Toutputs) + end + if key !== nothing + desc["key"] = Base.String(key) + end + if device_ordinal !== nothing + desc["device_ordinal"] = Base.Int(device_ordinal) + end + end + tf.Tensor(tf.Operation(desc)) + end + function _xla_recv_at_host(dynamic_key_::tf.TensorHandle; name=nothing, Toutputs=nothing, key=nothing, device_ordinal=nothing) + desc = tf.EagerOp("_XlaRecvAtHost") + tf.add_input(desc, dynamic_key_) + if Toutputs !== nothing + desc["Toutputs"] = map(Base.identity, Toutputs) + end + if key !== nothing + desc["key"] = Base.String(key) + end + if device_ordinal !== nothing + desc["device_ordinal"] = Base.Int(device_ordinal) + end + (tf.execute(desc))[1] + end +end + + +""" + quantized_avg_pool(input, min_input, max_input) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function quantized_avg_pool(input_, min_input_, max_input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing) + local desc + tf.with_op_name(name, "QuantizedAvgPool") do + desc = tf.NodeDescription("QuantizedAvgPool") + input_ = convert(Tensor{Any}, input_) + min_input_ = convert(Tensor{Float32}, min_input_) + max_input_ = convert(Tensor{Float32}, max_input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + tf.add_input(desc, min_input_) + tf.add_input(desc, max_input_) + if ksize !== nothing + desc["ksize"] = map(Base.identity, ksize) + end + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + if padding !== nothing + desc["padding"] = Base.String(padding) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function quantized_avg_pool(input_::tf.TensorHandle, min_input_::tf.TensorHandle, max_input_::tf.TensorHandle; name=nothing, ksize=nothing, strides=nothing, padding=nothing) + desc = tf.EagerOp("QuantizedAvgPool") + tf.add_input(desc, input_) + tf.add_input(desc, min_input_) + tf.add_input(desc, max_input_) + if ksize !== nothing + desc["ksize"] = map(Base.identity, ksize) + end + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + if padding !== nothing + desc["padding"] = Base.String(padding) + end + desc["T"] = tf.data_type(input_) + tf.execute(desc) + end +end + + +""" + resource_apply_adam_with_amsgrad(var, m, v, vhat, beta1_power, beta2_power, lr, beta1, beta2, epsilon, grad; use_locking=false) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function resource_apply_adam_with_amsgrad(var_, m_, v_, vhat_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "ResourceApplyAdamWithAmsgrad") do + desc = tf.NodeDescription("ResourceApplyAdamWithAmsgrad") + var_ = convert(Tensor{Any}, var_) + m_ = convert(Tensor{Any}, m_) + v_ = convert(Tensor{Any}, v_) + vhat_ = convert(Tensor{Any}, vhat_) + beta1_power_ = convert(Tensor{Any}, beta1_power_) + beta2_power_ = convert(Tensor{Any}, beta2_power_) + lr_ = convert(Tensor{Any}, lr_) + beta1_ = convert(Tensor{Any}, beta1_) + beta2_ = convert(Tensor{Any}, beta2_) + epsilon_ = convert(Tensor{Any}, epsilon_) + grad_ = convert(Tensor{Any}, grad_) + (beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_) = tf.tf_promote(beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_) + tf.add_input(desc, var_) + tf.add_input(desc, m_) + tf.add_input(desc, v_) + tf.add_input(desc, vhat_) + tf.add_input(desc, beta1_power_) + tf.add_input(desc, beta2_power_) + tf.add_input(desc, lr_) + tf.add_input(desc, beta1_) + tf.add_input(desc, beta2_) + tf.add_input(desc, epsilon_) + tf.add_input(desc, grad_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + tf.Tensor(tf.Operation(desc)) + end + function resource_apply_adam_with_amsgrad(var_::tf.TensorHandle, m_::tf.TensorHandle, v_::tf.TensorHandle, vhat_::tf.TensorHandle, beta1_power_::tf.TensorHandle, beta2_power_::tf.TensorHandle, lr_::tf.TensorHandle, beta1_::tf.TensorHandle, beta2_::tf.TensorHandle, epsilon_::tf.TensorHandle, grad_::tf.TensorHandle; name=nothing, use_locking=nothing) + desc = tf.EagerOp("ResourceApplyAdamWithAmsgrad") + tf.add_input(desc, var_) + tf.add_input(desc, m_) + tf.add_input(desc, v_) + tf.add_input(desc, vhat_) + tf.add_input(desc, beta1_power_) + tf.add_input(desc, beta2_power_) + tf.add_input(desc, lr_) + tf.add_input(desc, beta1_) + tf.add_input(desc, beta2_) + tf.add_input(desc, epsilon_) + tf.add_input(desc, grad_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + desc["T"] = tf.data_type(beta1_power_) + desc["T"] = tf.data_type(beta2_power_) + desc["T"] = tf.data_type(lr_) + desc["T"] = tf.data_type(beta1_) + desc["T"] = tf.data_type(beta2_) + desc["T"] = tf.data_type(epsilon_) + desc["T"] = tf.data_type(grad_) + (tf.execute(desc))[1] + end +end + + +""" + tensor_list_resize(input_handle, size) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tensor_list_resize(input_handle_, size_; name=nothing) + local desc + tf.with_op_name(name, "TensorListResize") do + desc = tf.NodeDescription("TensorListResize") + input_handle_ = convert(Tensor{Any}, input_handle_) + size_ = convert(Tensor{Int32}, size_) + tf.add_input(desc, input_handle_) + tf.add_input(desc, size_) + end + tf.Tensor(tf.Operation(desc)) + end + function tensor_list_resize(input_handle_::tf.TensorHandle, size_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("TensorListResize") + tf.add_input(desc, input_handle_) + tf.add_input(desc, size_) + (tf.execute(desc))[1] + end +end + + +""" + _host_recv(; client_terminated=false) + +Receives the named tensor from send_device on recv_device. +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function _host_recv(; name=nothing, tensor_type=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing) + local desc + tf.with_op_name(name, "_HostRecv") do + desc = tf.NodeDescription("_HostRecv") + if tensor_type !== nothing + desc["tensor_type"] = Base.identity(tensor_type) + end + if tensor_name !== nothing + desc["tensor_name"] = Base.String(tensor_name) + end + if send_device !== nothing + desc["send_device"] = Base.String(send_device) + end + if send_device_incarnation !== nothing + desc["send_device_incarnation"] = Base.Int(send_device_incarnation) + end + if recv_device !== nothing + desc["recv_device"] = Base.String(recv_device) + end + if client_terminated !== nothing + desc["client_terminated"] = Base.Bool(client_terminated) + end + end + tf.Tensor(tf.Operation(desc)) + end + function _host_recv(; name=nothing, tensor_type=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing) + desc = tf.EagerOp("_HostRecv") + if tensor_type !== nothing + desc["tensor_type"] = Base.identity(tensor_type) + end + if tensor_name !== nothing + desc["tensor_name"] = Base.String(tensor_name) + end + if send_device !== nothing + desc["send_device"] = Base.String(send_device) + end + if send_device_incarnation !== nothing + desc["send_device_incarnation"] = Base.Int(send_device_incarnation) + end + if recv_device !== nothing + desc["recv_device"] = Base.String(recv_device) + end + if client_terminated !== nothing + desc["client_terminated"] = Base.Bool(client_terminated) + end + (tf.execute(desc))[1] + end +end + + +""" + boosted_trees_center_bias(tree_ensemble_handle, mean_gradients, mean_hessians, l1, l2) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function boosted_trees_center_bias(tree_ensemble_handle_, mean_gradients_, mean_hessians_, l1_, l2_; name=nothing) + local desc + tf.with_op_name(name, "BoostedTreesCenterBias") do + desc = tf.NodeDescription("BoostedTreesCenterBias") + tree_ensemble_handle_ = convert(Tensor{Any}, tree_ensemble_handle_) + mean_gradients_ = convert(Tensor{Float32}, mean_gradients_) + mean_hessians_ = convert(Tensor{Float32}, mean_hessians_) + l1_ = convert(Tensor{Float32}, l1_) + l2_ = convert(Tensor{Float32}, l2_) + tf.add_input(desc, tree_ensemble_handle_) + tf.add_input(desc, mean_gradients_) + tf.add_input(desc, mean_hessians_) + tf.add_input(desc, l1_) + tf.add_input(desc, l2_) + end + tf.Tensor(tf.Operation(desc)) + end + function boosted_trees_center_bias(tree_ensemble_handle_::tf.TensorHandle, mean_gradients_::tf.TensorHandle, mean_hessians_::tf.TensorHandle, l1_::tf.TensorHandle, l2_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("BoostedTreesCenterBias") + tf.add_input(desc, tree_ensemble_handle_) + tf.add_input(desc, mean_gradients_) + tf.add_input(desc, mean_hessians_) + tf.add_input(desc, l1_) + tf.add_input(desc, l2_) + (tf.execute(desc))[1] + end +end + + +""" + lookup_table_size_v2(table_handle) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function lookup_table_size_v2(table_handle_; name=nothing) + local desc + tf.with_op_name(name, "LookupTableSizeV2") do + desc = tf.NodeDescription("LookupTableSizeV2") + table_handle_ = convert(Tensor{Any}, table_handle_) + tf.add_input(desc, table_handle_) + end + tf.Tensor(tf.Operation(desc)) + end + function lookup_table_size_v2(table_handle_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("LookupTableSizeV2") + tf.add_input(desc, table_handle_) + (tf.execute(desc))[1] + end +end + + +""" + irfft(input, fft_length) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function irfft(input_, fft_length_; name=nothing) + local desc + tf.with_op_name(name, "IRFFT") do + desc = tf.NodeDescription("IRFFT") + input_ = convert(Tensor{Complex{Float32}}, input_) + fft_length_ = convert(Tensor{Int32}, fft_length_) + tf.add_input(desc, input_) + tf.add_input(desc, fft_length_) + end + tf.Tensor(tf.Operation(desc)) + end + function irfft(input_::tf.TensorHandle, fft_length_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("IRFFT") + tf.add_input(desc, input_) + tf.add_input(desc, fft_length_) + (tf.execute(desc))[1] + end +end + + +""" + inplace_add(x, i, v) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function inplace_add(x_, i_, v_; name=nothing) + local desc + tf.with_op_name(name, "InplaceAdd") do + desc = tf.NodeDescription("InplaceAdd") + x_ = convert(Tensor{Any}, x_) + i_ = convert(Tensor{Int32}, i_) + v_ = convert(Tensor{Any}, v_) + (x_, v_) = tf.tf_promote(x_, v_) + tf.add_input(desc, x_) + tf.add_input(desc, i_) + tf.add_input(desc, v_) + end + tf.Tensor(tf.Operation(desc)) + end + function inplace_add(x_::tf.TensorHandle, i_::tf.TensorHandle, v_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("InplaceAdd") + tf.add_input(desc, x_) + tf.add_input(desc, i_) + tf.add_input(desc, v_) + desc["T"] = tf.data_type(x_) + desc["T"] = tf.data_type(v_) + (tf.execute(desc))[1] + end +end + + +""" + bias_add(value, bias; data_format=NHWC) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function bias_add(value_, bias_; name=nothing, data_format=nothing) + local desc + tf.with_op_name(name, "BiasAdd") do + desc = tf.NodeDescription("BiasAdd") + value_ = convert(Tensor{Any}, value_) + bias_ = convert(Tensor{Any}, bias_) + (value_, bias_) = tf.tf_promote(value_, bias_) + tf.add_input(desc, value_) + tf.add_input(desc, bias_) + if data_format !== nothing + desc["data_format"] = Base.String(data_format) + end + end + tf.Tensor(tf.Operation(desc)) + end + function bias_add(value_::tf.TensorHandle, bias_::tf.TensorHandle; name=nothing, data_format=nothing) + desc = tf.EagerOp("BiasAdd") + tf.add_input(desc, value_) + tf.add_input(desc, bias_) + if data_format !== nothing + desc["data_format"] = Base.String(data_format) + end + desc["T"] = tf.data_type(value_) + desc["T"] = tf.data_type(bias_) + (tf.execute(desc))[1] + end +end + + +""" + load_tpu_embedding_adam_parameters_grad_accum_debug(parameters, momenta, velocities, gradient_accumulators; table_id=-1, table_name=) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function load_tpu_embedding_adam_parameters_grad_accum_debug(parameters_, momenta_, velocities_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + local desc + tf.with_op_name(name, "LoadTPUEmbeddingADAMParametersGradAccumDebug") do + desc = tf.NodeDescription("LoadTPUEmbeddingADAMParametersGradAccumDebug") + parameters_ = convert(Tensor{Float32}, parameters_) + momenta_ = convert(Tensor{Float32}, momenta_) + velocities_ = convert(Tensor{Float32}, velocities_) + gradient_accumulators_ = convert(Tensor{Float32}, gradient_accumulators_) + tf.add_input(desc, parameters_) + tf.add_input(desc, momenta_) + tf.add_input(desc, velocities_) + tf.add_input(desc, gradient_accumulators_) + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + end + tf.Tensor(tf.Operation(desc)) + end + function load_tpu_embedding_adam_parameters_grad_accum_debug(parameters_::tf.TensorHandle, momenta_::tf.TensorHandle, velocities_::tf.TensorHandle, gradient_accumulators_::tf.TensorHandle; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + desc = tf.EagerOp("LoadTPUEmbeddingADAMParametersGradAccumDebug") + tf.add_input(desc, parameters_) + tf.add_input(desc, momenta_) + tf.add_input(desc, velocities_) + tf.add_input(desc, gradient_accumulators_) + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + (tf.execute(desc))[1] + end +end + + +""" + _disconnect_host_from_distributed_tpu_system() + +An op that disconnects the TPUs on a host from a running distributed +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function _disconnect_host_from_distributed_tpu_system(; name=nothing) + local desc + tf.with_op_name(name, "_DisconnectHostFromDistributedTPUSystem") do + desc + tf.NodeDescription("_DisconnectHostFromDistributedTPUSystem") + end + tf.Tensor(tf.Operation(desc)) + end + function _disconnect_host_from_distributed_tpu_system(; name=nothing) + desc = tf.EagerOp("_DisconnectHostFromDistributedTPUSystem") + (tf.execute(desc))[1] + end +end + + +""" + ragged_range(starts, limits, deltas) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function ragged_range(starts_, limits_, deltas_; name=nothing) + local desc + tf.with_op_name(name, "RaggedRange") do + desc = tf.NodeDescription("RaggedRange") + starts_ = convert(Tensor{Int32}, starts_) + limits_ = convert(Tensor{Int32}, limits_) + deltas_ = convert(Tensor{Int32}, deltas_) + (starts_, limits_, deltas_) = tf.tf_promote(starts_, limits_, deltas_) + tf.add_input(desc, starts_) + tf.add_input(desc, limits_) + tf.add_input(desc, deltas_) + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function ragged_range(starts_::tf.TensorHandle, limits_::tf.TensorHandle, deltas_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("RaggedRange") + tf.add_input(desc, starts_) + tf.add_input(desc, limits_) + tf.add_input(desc, deltas_) + desc["T"] = tf.data_type(starts_) + desc["T"] = tf.data_type(limits_) + desc["T"] = tf.data_type(deltas_) + tf.execute(desc) + end +end + + +""" + window_dataset(input_dataset, size, shift, stride, drop_remainder) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function window_dataset(input_dataset_, size_, shift_, stride_, drop_remainder_; name=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "WindowDataset") do + desc = tf.NodeDescription("WindowDataset") + input_dataset_ = convert(Tensor{Any}, input_dataset_) + size_ = convert(Tensor{Int64}, size_) + shift_ = convert(Tensor{Int64}, shift_) + stride_ = convert(Tensor{Int64}, stride_) + drop_remainder_ = convert(Tensor{Bool}, drop_remainder_) + tf.add_input(desc, input_dataset_) + tf.add_input(desc, size_) + tf.add_input(desc, shift_) + tf.add_input(desc, stride_) + tf.add_input(desc, drop_remainder_) + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + tf.Tensor(tf.Operation(desc)) + end + function window_dataset(input_dataset_::tf.TensorHandle, size_::tf.TensorHandle, shift_::tf.TensorHandle, stride_::tf.TensorHandle, drop_remainder_::tf.TensorHandle; name=nothing, output_types=nothing, output_shapes=nothing) + desc = tf.EagerOp("WindowDataset") + tf.add_input(desc, input_dataset_) + tf.add_input(desc, size_) + tf.add_input(desc, shift_) + tf.add_input(desc, stride_) + tf.add_input(desc, drop_remainder_) + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + (tf.execute(desc))[1] + end +end + + +""" + diag(diagonal) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function diag(diagonal_; name=nothing) + local desc + tf.with_op_name(name, "Diag") do + desc = tf.NodeDescription("Diag") + diagonal_ = convert(Tensor{Any}, diagonal_) + (diagonal_,) = tf.tf_promote(diagonal_) + tf.add_input(desc, diagonal_) + end + tf.Tensor(tf.Operation(desc)) + end + function diag(diagonal_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("Diag") + tf.add_input(desc, diagonal_) + desc["T"] = tf.data_type(diagonal_) + (tf.execute(desc))[1] + end +end + + +""" + infeed_dequeue() + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function infeed_dequeue(; name=nothing, dtype=nothing, shape=nothing) + local desc + tf.with_op_name(name, "InfeedDequeue") do + desc = tf.NodeDescription("InfeedDequeue") + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + if shape !== nothing + desc["shape"] = Base.identity(shape) + end + end + tf.Tensor(tf.Operation(desc)) + end + function infeed_dequeue(; name=nothing, dtype=nothing, shape=nothing) + desc = tf.EagerOp("InfeedDequeue") + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + if shape !== nothing + desc["shape"] = Base.identity(shape) + end + (tf.execute(desc))[1] + end +end + + +""" + experimental_latency_stats_dataset(input_dataset, tag) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function experimental_latency_stats_dataset(input_dataset_, tag_; name=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "ExperimentalLatencyStatsDataset") do + desc = tf.NodeDescription("ExperimentalLatencyStatsDataset") + input_dataset_ = convert(Tensor{Any}, input_dataset_) + tag_ = convert(Tensor{String}, tag_) + tf.add_input(desc, input_dataset_) + tf.add_input(desc, tag_) + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + tf.Tensor(tf.Operation(desc)) + end + function experimental_latency_stats_dataset(input_dataset_::tf.TensorHandle, tag_::tf.TensorHandle; name=nothing, output_types=nothing, output_shapes=nothing) + desc = tf.EagerOp("ExperimentalLatencyStatsDataset") + tf.add_input(desc, input_dataset_) + tf.add_input(desc, tag_) + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + (tf.execute(desc))[1] + end +end + + +""" + add_sparse_to_tensors_map(sparse_indices, sparse_values, sparse_shape; container=, shared_name=) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function add_sparse_to_tensors_map(sparse_indices_, sparse_values_, sparse_shape_; name=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "AddSparseToTensorsMap") do + desc = tf.NodeDescription("AddSparseToTensorsMap") + sparse_indices_ = convert(Tensor{Int64}, sparse_indices_) + sparse_values_ = convert(Tensor{Any}, sparse_values_) + sparse_shape_ = convert(Tensor{Int64}, sparse_shape_) + (sparse_values_,) = tf.tf_promote(sparse_values_) + tf.add_input(desc, sparse_indices_) + tf.add_input(desc, sparse_values_) + tf.add_input(desc, sparse_shape_) + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + tf.Tensor(tf.Operation(desc)) + end + function add_sparse_to_tensors_map(sparse_indices_::tf.TensorHandle, sparse_values_::tf.TensorHandle, sparse_shape_::tf.TensorHandle; name=nothing, container=nothing, shared_name=nothing) + desc = tf.EagerOp("AddSparseToTensorsMap") + tf.add_input(desc, sparse_indices_) + tf.add_input(desc, sparse_values_) + tf.add_input(desc, sparse_shape_) + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + desc["T"] = tf.data_type(sparse_values_) + (tf.execute(desc))[1] + end +end + + +""" + ragged_gather(params_nested_splits, params_dense_values, indices) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function ragged_gather(params_nested_splits_, params_dense_values_, indices_; name=nothing, PARAMS_RAGGED_RANK=nothing, OUTPUT_RAGGED_RANK=nothing) + local desc + tf.with_op_name(name, "RaggedGather") do + desc = tf.NodeDescription("RaggedGather") + params_nested_splits_ = [convert(Tensor{Int64}, x) for x = params_nested_splits_] + params_dense_values_ = convert(Tensor{Any}, params_dense_values_) + indices_ = convert(Tensor{Any}, indices_) + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + (indices_,) = tf.tf_promote(indices_) + (params_dense_values_,) = tf.tf_promote(params_dense_values_) + tf.add_input(desc, params_nested_splits_) + tf.add_input(desc, params_dense_values_) + tf.add_input(desc, indices_) + if PARAMS_RAGGED_RANK !== nothing + desc["PARAMS_RAGGED_RANK"] = Base.Int(PARAMS_RAGGED_RANK) + end + if OUTPUT_RAGGED_RANK !== nothing + desc["OUTPUT_RAGGED_RANK"] = Base.Int(OUTPUT_RAGGED_RANK) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function ragged_gather(params_nested_splits_::tf.TensorHandle, params_dense_values_::tf.TensorHandle, indices_::tf.TensorHandle; name=nothing, PARAMS_RAGGED_RANK=nothing, OUTPUT_RAGGED_RANK=nothing) + desc = tf.EagerOp("RaggedGather") + tf.add_input(desc, params_nested_splits_) + tf.add_input(desc, params_dense_values_) + tf.add_input(desc, indices_) + if PARAMS_RAGGED_RANK !== nothing + desc["PARAMS_RAGGED_RANK"] = Base.Int(PARAMS_RAGGED_RANK) + end + if OUTPUT_RAGGED_RANK !== nothing + desc["OUTPUT_RAGGED_RANK"] = Base.Int(OUTPUT_RAGGED_RANK) + end + desc["Tvalues"] = tf.data_type(params_dense_values_) + desc["Tindices"] = tf.data_type(indices_) + tf.execute(desc) + end +end + + +""" + rgb_to_hsv(images) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function rgb_to_hsv(images_; name=nothing) + local desc + tf.with_op_name(name, "RGBToHSV") do + desc = tf.NodeDescription("RGBToHSV") + images_ = convert(Tensor{Float32}, images_) + (images_,) = tf.tf_promote(images_) + tf.add_input(desc, images_) + end + tf.Tensor(tf.Operation(desc)) + end + function rgb_to_hsv(images_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("RGBToHSV") + tf.add_input(desc, images_) + desc["T"] = tf.data_type(images_) + (tf.execute(desc))[1] + end +end + + +""" + multi_device_iterator_to_string_handle(multi_device_iterator) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function multi_device_iterator_to_string_handle(multi_device_iterator_; name=nothing) + local desc + tf.with_op_name(name, "MultiDeviceIteratorToStringHandle") do + desc = tf.NodeDescription("MultiDeviceIteratorToStringHandle") + multi_device_iterator_ = convert(Tensor{Any}, multi_device_iterator_) + tf.add_input(desc, multi_device_iterator_) + end + tf.Tensor(tf.Operation(desc)) + end + function multi_device_iterator_to_string_handle(multi_device_iterator_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("MultiDeviceIteratorToStringHandle") + tf.add_input(desc, multi_device_iterator_) + (tf.execute(desc))[1] + end +end + + +""" + for_(start, limit, delta, input) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function for_(start_, limit_, delta_, input_; name=nothing, T=nothing, body=nothing) + local desc + tf.with_op_name(name, "For") do + desc = tf.NodeDescription("For") + start_ = convert(Tensor{Int32}, start_) + limit_ = convert(Tensor{Int32}, limit_) + delta_ = convert(Tensor{Int32}, delta_) + input_ = [convert(Tensor{Any}, x) for x = input_] + tf.add_input(desc, start_) + tf.add_input(desc, limit_) + tf.add_input(desc, delta_) + tf.add_input(desc, input_) + if T !== nothing + desc["T"] = map(Base.identity, T) + end + if body !== nothing + desc["body"] = Base.identity(body) + end + end + tf.Tensor(tf.Operation(desc)) + end + function for_(start_::tf.TensorHandle, limit_::tf.TensorHandle, delta_::tf.TensorHandle, input_::tf.TensorHandle; name=nothing, T=nothing, body=nothing) + desc = tf.EagerOp("For") + tf.add_input(desc, start_) + tf.add_input(desc, limit_) + tf.add_input(desc, delta_) + tf.add_input(desc, input_) + if T !== nothing + desc["T"] = map(Base.identity, T) + end + if body !== nothing + desc["body"] = Base.identity(body) + end + (tf.execute(desc))[1] + end +end + + +""" + sparse_reduce_max_sparse(input_indices, input_values, input_shape, reduction_axes; keep_dims=false) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function sparse_reduce_max_sparse(input_indices_, input_values_, input_shape_, reduction_axes_; name=nothing, keep_dims=nothing) + local desc + tf.with_op_name(name, "SparseReduceMaxSparse") do + desc = tf.NodeDescription("SparseReduceMaxSparse") + input_indices_ = convert(Tensor{Int64}, input_indices_) + input_values_ = convert(Tensor{Any}, input_values_) + input_shape_ = convert(Tensor{Int64}, input_shape_) + reduction_axes_ = convert(Tensor{Int32}, reduction_axes_) + (input_values_,) = tf.tf_promote(input_values_) + tf.add_input(desc, input_indices_) + tf.add_input(desc, input_values_) + tf.add_input(desc, input_shape_) + tf.add_input(desc, reduction_axes_) + if keep_dims !== nothing + desc["keep_dims"] = Base.Bool(keep_dims) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function sparse_reduce_max_sparse(input_indices_::tf.TensorHandle, input_values_::tf.TensorHandle, input_shape_::tf.TensorHandle, reduction_axes_::tf.TensorHandle; name=nothing, keep_dims=nothing) + desc = tf.EagerOp("SparseReduceMaxSparse") + tf.add_input(desc, input_indices_) + tf.add_input(desc, input_values_) + tf.add_input(desc, input_shape_) + tf.add_input(desc, reduction_axes_) + if keep_dims !== nothing + desc["keep_dims"] = Base.Bool(keep_dims) + end + desc["T"] = tf.data_type(input_values_) + tf.execute(desc) + end +end + + +""" + concat_offset(concat_dim, shape) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function concat_offset(concat_dim_, shape_; name=nothing, N=nothing) + local desc + tf.with_op_name(name, "ConcatOffset") do + desc = tf.NodeDescription("ConcatOffset") + concat_dim_ = convert(Tensor{Int32}, concat_dim_) + shape_ = [convert(Tensor{Int32}, x) for x = shape_] + tf.add_input(desc, concat_dim_) + tf.add_input(desc, shape_) + if N !== nothing + desc["N"] = Base.Int(N) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:N + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function concat_offset(concat_dim_::tf.TensorHandle, shape_::tf.TensorHandle; name=nothing, N=nothing) + desc = tf.EagerOp("ConcatOffset") + tf.add_input(desc, concat_dim_) + tf.add_input(desc, shape_) + if N !== nothing + desc["N"] = Base.Int(N) + end + tf.execute(desc) + end +end + + +""" + stage(values; capacity=0, memory_limit=0, container=, shared_name=) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function stage(values_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "Stage") do + desc = tf.NodeDescription("Stage") + values_ = [convert(Tensor{Any}, x) for x = values_] + tf.add_input(desc, values_) + if capacity !== nothing + desc["capacity"] = Base.Int(capacity) + end + if memory_limit !== nothing + desc["memory_limit"] = Base.Int(memory_limit) + end + if dtypes !== nothing + desc["dtypes"] = map(Base.identity, dtypes) + end + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + tf.Tensor(tf.Operation(desc)) + end + function stage(values_::tf.TensorHandle; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + desc = tf.EagerOp("Stage") + tf.add_input(desc, values_) + if capacity !== nothing + desc["capacity"] = Base.Int(capacity) + end + if memory_limit !== nothing + desc["memory_limit"] = Base.Int(memory_limit) + end + if dtypes !== nothing + desc["dtypes"] = map(Base.identity, dtypes) + end + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + (tf.execute(desc))[1] + end +end + + +""" + switch(data, pred) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function switch(data_, pred_; name=nothing) + local desc + tf.with_op_name(name, "Switch") do + desc = tf.NodeDescription("Switch") + data_ = convert(Tensor{Any}, data_) + pred_ = convert(Tensor{Bool}, pred_) + (data_,) = tf.tf_promote(data_) + tf.add_input(desc, data_) + tf.add_input(desc, pred_) + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function switch(data_::tf.TensorHandle, pred_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("Switch") + tf.add_input(desc, data_) + tf.add_input(desc, pred_) + desc["T"] = tf.data_type(data_) + tf.execute(desc) + end +end + + +""" + queue_dequeue_many_v2(handle, n; timeout_ms=-1) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function queue_dequeue_many_v2(handle_, n_; name=nothing, component_types=nothing, timeout_ms=nothing) + local desc + tf.with_op_name(name, "QueueDequeueManyV2") do + desc = tf.NodeDescription("QueueDequeueManyV2") + handle_ = convert(Tensor{Any}, handle_) + n_ = convert(Tensor{Int32}, n_) + tf.add_input(desc, handle_) + tf.add_input(desc, n_) + if component_types !== nothing + desc["component_types"] = map(Base.identity, component_types) + end + if timeout_ms !== nothing + desc["timeout_ms"] = Base.Int(timeout_ms) + end + end + tf.Tensor(tf.Operation(desc)) + end + function queue_dequeue_many_v2(handle_::tf.TensorHandle, n_::tf.TensorHandle; name=nothing, component_types=nothing, timeout_ms=nothing) + desc = tf.EagerOp("QueueDequeueManyV2") + tf.add_input(desc, handle_) + tf.add_input(desc, n_) + if component_types !== nothing + desc["component_types"] = map(Base.identity, component_types) + end + if timeout_ms !== nothing + desc["timeout_ms"] = Base.Int(timeout_ms) + end + (tf.execute(desc))[1] + end +end + + +""" + segment_prod(data, segment_ids) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function segment_prod(data_, segment_ids_; name=nothing) + local desc + tf.with_op_name(name, "SegmentProd") do + desc = tf.NodeDescription("SegmentProd") + data_ = convert(Tensor{Any}, data_) + segment_ids_ = convert(Tensor{Any}, segment_ids_) + segment_ids_ = segment_ids_ - convert(tf.Tensor{eltype(segment_ids_)}, 1) + (data_,) = tf.tf_promote(data_) + (segment_ids_,) = tf.tf_promote(segment_ids_) + tf.add_input(desc, data_) + tf.add_input(desc, segment_ids_) + end + tf.Tensor(tf.Operation(desc)) + end + function segment_prod(data_::tf.TensorHandle, segment_ids_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("SegmentProd") + tf.add_input(desc, data_) + tf.add_input(desc, segment_ids_) + desc["T"] = tf.data_type(data_) + desc["Tindices"] = tf.data_type(segment_ids_) + (tf.execute(desc))[1] + end +end + + +""" + approximate_equal(x, y; tolerance=?) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function approximate_equal(x_, y_; name=nothing, tolerance=nothing) + local desc + tf.with_op_name(name, "ApproximateEqual") do + desc = tf.NodeDescription("ApproximateEqual") + x_ = convert(Tensor{Any}, x_) + y_ = convert(Tensor{Any}, y_) + (x_, y_) = tf.tf_promote(x_, y_) + tf.add_input(desc, x_) + tf.add_input(desc, y_) + if tolerance !== nothing + desc["tolerance"] = Base.identity(tolerance) + end + end + tf.Tensor(tf.Operation(desc)) + end + function approximate_equal(x_::tf.TensorHandle, y_::tf.TensorHandle; name=nothing, tolerance=nothing) + desc = tf.EagerOp("ApproximateEqual") + tf.add_input(desc, x_) + tf.add_input(desc, y_) + if tolerance !== nothing + desc["tolerance"] = Base.identity(tolerance) + end + desc["T"] = tf.data_type(x_) + desc["T"] = tf.data_type(y_) + (tf.execute(desc))[1] + end +end + + +""" + conv2d(input, filter; use_cudnn_on_gpu=true, explicit_paddings=Int64[], data_format=NHWC, dilations=[1, 1, 1, 1]) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function conv2d(input_, filter_; name=nothing, strides=nothing, use_cudnn_on_gpu=nothing, padding=nothing, explicit_paddings=nothing, data_format=nothing, dilations=nothing) + local desc + tf.with_op_name(name, "Conv2D") do + desc = tf.NodeDescription("Conv2D") + input_ = convert(Tensor{Any}, input_) + filter_ = convert(Tensor{Any}, filter_) + (input_, filter_) = tf.tf_promote(input_, filter_) + tf.add_input(desc, input_) + tf.add_input(desc, filter_) + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + if use_cudnn_on_gpu !== nothing + desc["use_cudnn_on_gpu"] = Base.Bool(use_cudnn_on_gpu) + end + if padding !== nothing + desc["padding"] = Base.String(padding) + end + if explicit_paddings !== nothing + desc["explicit_paddings"] = map(Base.identity, explicit_paddings) + end + if data_format !== nothing + desc["data_format"] = Base.String(data_format) + end + if dilations !== nothing + desc["dilations"] = map(Base.identity, dilations) + end + end + tf.Tensor(tf.Operation(desc)) + end + function conv2d(input_::tf.TensorHandle, filter_::tf.TensorHandle; name=nothing, strides=nothing, use_cudnn_on_gpu=nothing, padding=nothing, explicit_paddings=nothing, data_format=nothing, dilations=nothing) + desc = tf.EagerOp("Conv2D") + tf.add_input(desc, input_) + tf.add_input(desc, filter_) + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + if use_cudnn_on_gpu !== nothing + desc["use_cudnn_on_gpu"] = Base.Bool(use_cudnn_on_gpu) + end + if padding !== nothing + desc["padding"] = Base.String(padding) + end + if explicit_paddings !== nothing + desc["explicit_paddings"] = map(Base.identity, explicit_paddings) + end + if data_format !== nothing + desc["data_format"] = Base.String(data_format) + end + if dilations !== nothing + desc["dilations"] = map(Base.identity, dilations) + end + desc["T"] = tf.data_type(input_) + desc["T"] = tf.data_type(filter_) + (tf.execute(desc))[1] + end +end + + +""" + cross_replica_sum(input, group_assignment) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function cross_replica_sum(input_, group_assignment_; name=nothing) + local desc + tf.with_op_name(name, "CrossReplicaSum") do + desc = tf.NodeDescription("CrossReplicaSum") + input_ = convert(Tensor{Any}, input_) + group_assignment_ = convert(Tensor{Int32}, group_assignment_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + tf.add_input(desc, group_assignment_) + end + tf.Tensor(tf.Operation(desc)) + end + function cross_replica_sum(input_::tf.TensorHandle, group_assignment_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("CrossReplicaSum") + tf.add_input(desc, input_) + tf.add_input(desc, group_assignment_) + desc["T"] = tf.data_type(input_) + (tf.execute(desc))[1] + end +end + + +""" + sparse_mat_mul(a, b; transpose_a=false, transpose_b=false, a_is_sparse=false, b_is_sparse=false) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function sparse_mat_mul(a_, b_; name=nothing, transpose_a=nothing, transpose_b=nothing, a_is_sparse=nothing, b_is_sparse=nothing) + local desc + tf.with_op_name(name, "SparseMatMul") do + desc = tf.NodeDescription("SparseMatMul") + a_ = convert(Tensor{Float32}, a_) + b_ = convert(Tensor{Float32}, b_) + (b_,) = tf.tf_promote(b_) + (a_,) = tf.tf_promote(a_) + tf.add_input(desc, a_) + tf.add_input(desc, b_) + if transpose_a !== nothing + desc["transpose_a"] = Base.Bool(transpose_a) + end + if transpose_b !== nothing + desc["transpose_b"] = Base.Bool(transpose_b) + end + if a_is_sparse !== nothing + desc["a_is_sparse"] = Base.Bool(a_is_sparse) + end + if b_is_sparse !== nothing + desc["b_is_sparse"] = Base.Bool(b_is_sparse) + end + end + tf.Tensor(tf.Operation(desc)) + end + function sparse_mat_mul(a_::tf.TensorHandle, b_::tf.TensorHandle; name=nothing, transpose_a=nothing, transpose_b=nothing, a_is_sparse=nothing, b_is_sparse=nothing) + desc = tf.EagerOp("SparseMatMul") + tf.add_input(desc, a_) + tf.add_input(desc, b_) + if transpose_a !== nothing + desc["transpose_a"] = Base.Bool(transpose_a) + end + if transpose_b !== nothing + desc["transpose_b"] = Base.Bool(transpose_b) + end + if a_is_sparse !== nothing + desc["a_is_sparse"] = Base.Bool(a_is_sparse) + end + if b_is_sparse !== nothing + desc["b_is_sparse"] = Base.Bool(b_is_sparse) + end + desc["Ta"] = tf.data_type(a_) + desc["Tb"] = tf.data_type(b_) + (tf.execute(desc))[1] + end +end + + +""" + _scoped_allocator_split(concat, split) + +Acts roughly like a SplitV Op that splits one tensor into multiple tensors +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function _scoped_allocator_split(concat_, split_; name=nothing, sa_name=nothing, id=nothing, N=nothing, shapes=nothing) + local desc + tf.with_op_name(name, "_ScopedAllocatorSplit") do + desc = tf.NodeDescription("_ScopedAllocatorSplit") + concat_ = convert(Tensor{Any}, concat_) + split_ = [convert(Tensor{Any}, x) for x = split_] + (concat_, split_) = tf.tf_promote(concat_, split_) + tf.add_input(desc, concat_) + tf.add_input(desc, split_) + if sa_name !== nothing + desc["sa_name"] = Base.String(sa_name) + end + if id !== nothing + desc["id"] = Base.Int(id) + end + if N !== nothing + desc["N"] = Base.Int(N) + end + if shapes !== nothing + desc["shapes"] = map(Base.identity, shapes) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:N + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function _scoped_allocator_split(concat_::tf.TensorHandle, split_::tf.TensorHandle; name=nothing, sa_name=nothing, id=nothing, N=nothing, shapes=nothing) + desc = tf.EagerOp("_ScopedAllocatorSplit") + tf.add_input(desc, concat_) + tf.add_input(desc, split_) + if sa_name !== nothing + desc["sa_name"] = Base.String(sa_name) + end + if id !== nothing + desc["id"] = Base.Int(id) + end + if N !== nothing + desc["N"] = Base.Int(N) + end + if shapes !== nothing + desc["shapes"] = map(Base.identity, shapes) + end + desc["T"] = tf.data_type(concat_) + desc["T"] = tf.data_type(split_) + tf.execute(desc) + end +end + + +""" + igammac(a, x) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function igammac(a_, x_; name=nothing) + local desc + tf.with_op_name(name, "Igammac") do + desc = tf.NodeDescription("Igammac") + a_ = convert(Tensor{Any}, a_) + x_ = convert(Tensor{Any}, x_) + (a_, x_) = tf.tf_promote(a_, x_) + tf.add_input(desc, a_) + tf.add_input(desc, x_) + end + tf.Tensor(tf.Operation(desc)) + end + function igammac(a_::tf.TensorHandle, x_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("Igammac") + tf.add_input(desc, a_) + tf.add_input(desc, x_) + desc["T"] = tf.data_type(a_) + desc["T"] = tf.data_type(x_) + (tf.execute(desc))[1] + end +end + + +""" + batch_mat_mul(x, y; adj_x=false, adj_y=false) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function batch_mat_mul(x_, y_; name=nothing, adj_x=nothing, adj_y=nothing) + local desc + tf.with_op_name(name, "BatchMatMul") do + desc = tf.NodeDescription("BatchMatMul") + x_ = convert(Tensor{Any}, x_) + y_ = convert(Tensor{Any}, y_) + (x_, y_) = tf.tf_promote(x_, y_) + tf.add_input(desc, x_) + tf.add_input(desc, y_) + if adj_x !== nothing + desc["adj_x"] = Base.Bool(adj_x) + end + if adj_y !== nothing + desc["adj_y"] = Base.Bool(adj_y) + end + end + tf.Tensor(tf.Operation(desc)) + end + function batch_mat_mul(x_::tf.TensorHandle, y_::tf.TensorHandle; name=nothing, adj_x=nothing, adj_y=nothing) + desc = tf.EagerOp("BatchMatMul") + tf.add_input(desc, x_) + tf.add_input(desc, y_) + if adj_x !== nothing + desc["adj_x"] = Base.Bool(adj_x) + end + if adj_y !== nothing + desc["adj_y"] = Base.Bool(adj_y) + end + desc["T"] = tf.data_type(x_) + desc["T"] = tf.data_type(y_) + (tf.execute(desc))[1] + end +end + + +""" + enqueue_tpu_embedding_sparse_batch(sample_indices, embedding_indices, aggregation_weights, mode_override; device_ordinal=-1, combiners=Int64[]) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function enqueue_tpu_embedding_sparse_batch(sample_indices_, embedding_indices_, aggregation_weights_, mode_override_; name=nothing, N=nothing, device_ordinal=nothing, combiners=nothing) + local desc + tf.with_op_name(name, "EnqueueTPUEmbeddingSparseBatch") do + desc = tf.NodeDescription("EnqueueTPUEmbeddingSparseBatch") + sample_indices_ = [convert(Tensor{Int32}, x) for x = sample_indices_] + embedding_indices_ = [convert(Tensor{Int32}, x) for x = embedding_indices_] + aggregation_weights_ = [convert(Tensor{Float32}, x) for x = aggregation_weights_] + mode_override_ = convert(Tensor{String}, mode_override_) + tf.add_input(desc, sample_indices_) + tf.add_input(desc, embedding_indices_) + tf.add_input(desc, aggregation_weights_) + tf.add_input(desc, mode_override_) + if N !== nothing + desc["N"] = Base.Int(N) + end + if device_ordinal !== nothing + desc["device_ordinal"] = Base.Int(device_ordinal) + end + if combiners !== nothing + desc["combiners"] = map(Base.identity, combiners) + end + end + tf.Tensor(tf.Operation(desc)) + end + function enqueue_tpu_embedding_sparse_batch(sample_indices_::tf.TensorHandle, embedding_indices_::tf.TensorHandle, aggregation_weights_::tf.TensorHandle, mode_override_::tf.TensorHandle; name=nothing, N=nothing, device_ordinal=nothing, combiners=nothing) + desc = tf.EagerOp("EnqueueTPUEmbeddingSparseBatch") + tf.add_input(desc, sample_indices_) + tf.add_input(desc, embedding_indices_) + tf.add_input(desc, aggregation_weights_) + tf.add_input(desc, mode_override_) + if N !== nothing + desc["N"] = Base.Int(N) + end + if device_ordinal !== nothing + desc["device_ordinal"] = Base.Int(device_ordinal) + end + if combiners !== nothing + desc["combiners"] = map(Base.identity, combiners) + end + (tf.execute(desc))[1] + end +end + + +""" + queue_close_v2(handle; cancel_pending_enqueues=false) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function queue_close_v2(handle_; name=nothing, cancel_pending_enqueues=nothing) + local desc + tf.with_op_name(name, "QueueCloseV2") do + desc = tf.NodeDescription("QueueCloseV2") + handle_ = convert(Tensor{Any}, handle_) + tf.add_input(desc, handle_) + if cancel_pending_enqueues !== nothing + desc["cancel_pending_enqueues"] = Base.Bool(cancel_pending_enqueues) + end + end + tf.Tensor(tf.Operation(desc)) + end + function queue_close_v2(handle_::tf.TensorHandle; name=nothing, cancel_pending_enqueues=nothing) + desc = tf.EagerOp("QueueCloseV2") + tf.add_input(desc, handle_) + if cancel_pending_enqueues !== nothing + desc["cancel_pending_enqueues"] = Base.Bool(cancel_pending_enqueues) + end + (tf.execute(desc))[1] + end +end + + +""" + tensor_array_pack(handle, flow_in; element_shape=?) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tensor_array_pack(handle_, flow_in_; name=nothing, dtype=nothing, element_shape=nothing) + local desc + tf.with_op_name(name, "TensorArrayPack") do + desc = tf.NodeDescription("TensorArrayPack") + handle_ = convert(Tensor{String}, handle_) + flow_in_ = convert(Tensor{Float32}, flow_in_) + tf.add_input(desc, handle_) + tf.add_input(desc, flow_in_) + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + if element_shape !== nothing + desc["element_shape"] = Base.identity(element_shape) + end + end + tf.Tensor(tf.Operation(desc)) + end + function tensor_array_pack(handle_::tf.TensorHandle, flow_in_::tf.TensorHandle; name=nothing, dtype=nothing, element_shape=nothing) + desc = tf.EagerOp("TensorArrayPack") + tf.add_input(desc, handle_) + tf.add_input(desc, flow_in_) + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + if element_shape !== nothing + desc["element_shape"] = Base.identity(element_shape) + end + (tf.execute(desc))[1] + end +end + + +""" + reader_restore_state(reader_handle, state) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function reader_restore_state(reader_handle_, state_; name=nothing) + local desc + tf.with_op_name(name, "ReaderRestoreState") do + desc = tf.NodeDescription("ReaderRestoreState") + reader_handle_ = convert(Tensor{String}, reader_handle_) + state_ = convert(Tensor{String}, state_) + tf.add_input(desc, reader_handle_) + tf.add_input(desc, state_) + end + tf.Tensor(tf.Operation(desc)) + end + function reader_restore_state(reader_handle_::tf.TensorHandle, state_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("ReaderRestoreState") + tf.add_input(desc, reader_handle_) + tf.add_input(desc, state_) + (tf.execute(desc))[1] + end +end + + +""" + _fused_conv2d(input, filter, args; data_format=NHWC, dilations=[1, 1, 1, 1], use_cudnn_on_gpu=true, fused_ops=Int64[], epsilon=?) + +*NOTE*: Do not invoke this operator directly in Python. Grappler is +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function _fused_conv2d(input_, filter_, args_; name=nothing, num_args=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing, use_cudnn_on_gpu=nothing, fused_ops=nothing, epsilon=nothing) + local desc + tf.with_op_name(name, "_FusedConv2D") do + desc = tf.NodeDescription("_FusedConv2D") + input_ = convert(Tensor{Any}, input_) + filter_ = convert(Tensor{Any}, filter_) + args_ = [convert(Tensor{Any}, x) for x = args_] + (input_, filter_, args_) = tf.tf_promote(input_, filter_, args_) + tf.add_input(desc, input_) + tf.add_input(desc, filter_) + tf.add_input(desc, args_) + if num_args !== nothing + desc["num_args"] = Base.Int(num_args) + end + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + if padding !== nothing + desc["padding"] = Base.String(padding) + end + if data_format !== nothing + desc["data_format"] = Base.String(data_format) + end + if dilations !== nothing + desc["dilations"] = map(Base.identity, dilations) + end + if use_cudnn_on_gpu !== nothing + desc["use_cudnn_on_gpu"] = Base.Bool(use_cudnn_on_gpu) + end + if fused_ops !== nothing + desc["fused_ops"] = map(Base.identity, fused_ops) + end + if epsilon !== nothing + desc["epsilon"] = Base.identity(epsilon) + end + end + tf.Tensor(tf.Operation(desc)) + end + function _fused_conv2d(input_::tf.TensorHandle, filter_::tf.TensorHandle, args_::tf.TensorHandle; name=nothing, num_args=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing, use_cudnn_on_gpu=nothing, fused_ops=nothing, epsilon=nothing) + desc = tf.EagerOp("_FusedConv2D") + tf.add_input(desc, input_) + tf.add_input(desc, filter_) + tf.add_input(desc, args_) + if num_args !== nothing + desc["num_args"] = Base.Int(num_args) + end + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + if padding !== nothing + desc["padding"] = Base.String(padding) + end + if data_format !== nothing + desc["data_format"] = Base.String(data_format) + end + if dilations !== nothing + desc["dilations"] = map(Base.identity, dilations) + end + if use_cudnn_on_gpu !== nothing + desc["use_cudnn_on_gpu"] = Base.Bool(use_cudnn_on_gpu) + end + if fused_ops !== nothing + desc["fused_ops"] = map(Base.identity, fused_ops) + end + if epsilon !== nothing + desc["epsilon"] = Base.identity(epsilon) + end + desc["T"] = tf.data_type(input_) + desc["T"] = tf.data_type(filter_) + desc["T"] = tf.data_type(args_) + (tf.execute(desc))[1] + end +end + + +""" + _read_variables_op(resources) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function _read_variables_op(resources_; name=nothing, N=nothing, dtypes=nothing) + local desc + tf.with_op_name(name, "_ReadVariablesOp") do + desc = tf.NodeDescription("_ReadVariablesOp") + resources_ = [convert(Tensor{Any}, x) for x = resources_] + tf.add_input(desc, resources_) + if N !== nothing + desc["N"] = Base.Int(N) + end + if dtypes !== nothing + desc["dtypes"] = map(Base.identity, dtypes) + end + end + tf.Tensor(tf.Operation(desc)) + end + function _read_variables_op(resources_::tf.TensorHandle; name=nothing, N=nothing, dtypes=nothing) + desc = tf.EagerOp("_ReadVariablesOp") + tf.add_input(desc, resources_) + if N !== nothing + desc["N"] = Base.Int(N) + end + if dtypes !== nothing + desc["dtypes"] = map(Base.identity, dtypes) + end + (tf.execute(desc))[1] + end +end + + +""" + mutable_hash_table_of_tensors(; container=, shared_name=, use_node_name_sharing=false, value_shape=?) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function mutable_hash_table_of_tensors(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing) + local desc + tf.with_op_name(name, "MutableHashTableOfTensors") do + desc = tf.NodeDescription("MutableHashTableOfTensors") + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + if use_node_name_sharing !== nothing + desc["use_node_name_sharing"] = Base.Bool(use_node_name_sharing) + end + if key_dtype !== nothing + desc["key_dtype"] = Base.identity(key_dtype) + end + if value_dtype !== nothing + desc["value_dtype"] = Base.identity(value_dtype) + end + if value_shape !== nothing + desc["value_shape"] = Base.identity(value_shape) + end + end + tf.Tensor(tf.Operation(desc)) + end + function mutable_hash_table_of_tensors(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing) + desc = tf.EagerOp("MutableHashTableOfTensors") + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + if use_node_name_sharing !== nothing + desc["use_node_name_sharing"] = Base.Bool(use_node_name_sharing) + end + if key_dtype !== nothing + desc["key_dtype"] = Base.identity(key_dtype) + end + if value_dtype !== nothing + desc["value_dtype"] = Base.identity(value_dtype) + end + if value_shape !== nothing + desc["value_shape"] = Base.identity(value_shape) + end + (tf.execute(desc))[1] + end +end + + +""" + read_file(filename) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function read_file(filename_; name=nothing) + local desc + tf.with_op_name(name, "ReadFile") do + desc = tf.NodeDescription("ReadFile") + filename_ = convert(Tensor{String}, filename_) + tf.add_input(desc, filename_) + end + tf.Tensor(tf.Operation(desc)) + end + function read_file(filename_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("ReadFile") + tf.add_input(desc, filename_) + (tf.execute(desc))[1] + end +end + + +""" + load_tpu_embedding_mdl_adagrad_light_parameters(parameters, accumulators, weights, benefits; table_id=-1, table_name=) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function load_tpu_embedding_mdl_adagrad_light_parameters(parameters_, accumulators_, weights_, benefits_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + local desc + tf.with_op_name(name, "LoadTPUEmbeddingMDLAdagradLightParameters") do + desc = tf.NodeDescription("LoadTPUEmbeddingMDLAdagradLightParameters") + parameters_ = convert(Tensor{Float32}, parameters_) + accumulators_ = convert(Tensor{Float32}, accumulators_) + weights_ = convert(Tensor{Float32}, weights_) + benefits_ = convert(Tensor{Float32}, benefits_) + tf.add_input(desc, parameters_) + tf.add_input(desc, accumulators_) + tf.add_input(desc, weights_) + tf.add_input(desc, benefits_) + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + end + tf.Tensor(tf.Operation(desc)) + end + function load_tpu_embedding_mdl_adagrad_light_parameters(parameters_::tf.TensorHandle, accumulators_::tf.TensorHandle, weights_::tf.TensorHandle, benefits_::tf.TensorHandle; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + desc = tf.EagerOp("LoadTPUEmbeddingMDLAdagradLightParameters") + tf.add_input(desc, parameters_) + tf.add_input(desc, accumulators_) + tf.add_input(desc, weights_) + tf.add_input(desc, benefits_) + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + (tf.execute(desc))[1] + end +end + + +""" + fractional_avg_pool_grad(orig_input_tensor_shape, out_backprop, row_pooling_sequence, col_pooling_sequence; overlapping=false) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function fractional_avg_pool_grad(orig_input_tensor_shape_, out_backprop_, row_pooling_sequence_, col_pooling_sequence_; name=nothing, overlapping=nothing) + local desc + tf.with_op_name(name, "FractionalAvgPoolGrad") do + desc = tf.NodeDescription("FractionalAvgPoolGrad") + orig_input_tensor_shape_ = convert(Tensor{Int64}, orig_input_tensor_shape_) + out_backprop_ = convert(Tensor{Any}, out_backprop_) + row_pooling_sequence_ = convert(Tensor{Int64}, row_pooling_sequence_) + col_pooling_sequence_ = convert(Tensor{Int64}, col_pooling_sequence_) + (out_backprop_,) = tf.tf_promote(out_backprop_) + tf.add_input(desc, orig_input_tensor_shape_) + tf.add_input(desc, out_backprop_) + tf.add_input(desc, row_pooling_sequence_) + tf.add_input(desc, col_pooling_sequence_) + if overlapping !== nothing + desc["overlapping"] = Base.Bool(overlapping) + end + end + tf.Tensor(tf.Operation(desc)) + end + function fractional_avg_pool_grad(orig_input_tensor_shape_::tf.TensorHandle, out_backprop_::tf.TensorHandle, row_pooling_sequence_::tf.TensorHandle, col_pooling_sequence_::tf.TensorHandle; name=nothing, overlapping=nothing) + desc = tf.EagerOp("FractionalAvgPoolGrad") + tf.add_input(desc, orig_input_tensor_shape_) + tf.add_input(desc, out_backprop_) + tf.add_input(desc, row_pooling_sequence_) + tf.add_input(desc, col_pooling_sequence_) + if overlapping !== nothing + desc["overlapping"] = Base.Bool(overlapping) + end + desc["T"] = tf.data_type(out_backprop_) + (tf.execute(desc))[1] + end +end + + +""" + load_tpu_embedding_adagrad_parameters_grad_accum_debug(parameters, accumulators, gradient_accumulators; table_id=-1, table_name=) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function load_tpu_embedding_adagrad_parameters_grad_accum_debug(parameters_, accumulators_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + local desc + tf.with_op_name(name, "LoadTPUEmbeddingAdagradParametersGradAccumDebug") do + desc = tf.NodeDescription("LoadTPUEmbeddingAdagradParametersGradAccumDebug") + parameters_ = convert(Tensor{Float32}, parameters_) + accumulators_ = convert(Tensor{Float32}, accumulators_) + gradient_accumulators_ = convert(Tensor{Float32}, gradient_accumulators_) + tf.add_input(desc, parameters_) + tf.add_input(desc, accumulators_) + tf.add_input(desc, gradient_accumulators_) + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + end + tf.Tensor(tf.Operation(desc)) + end + function load_tpu_embedding_adagrad_parameters_grad_accum_debug(parameters_::tf.TensorHandle, accumulators_::tf.TensorHandle, gradient_accumulators_::tf.TensorHandle; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + desc = tf.EagerOp("LoadTPUEmbeddingAdagradParametersGradAccumDebug") + tf.add_input(desc, parameters_) + tf.add_input(desc, accumulators_) + tf.add_input(desc, gradient_accumulators_) + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + (tf.execute(desc))[1] + end +end + + +""" + stateful_standard_normal_v2(resource, algorithm, shape; dtype=Float32, shape_dtype=Int64) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function stateful_standard_normal_v2(resource_, algorithm_, shape_; name=nothing, dtype=nothing, shape_dtype=nothing) + local desc + tf.with_op_name(name, "StatefulStandardNormalV2") do + desc = tf.NodeDescription("StatefulStandardNormalV2") + resource_ = convert(Tensor{Any}, resource_) + algorithm_ = convert(Tensor{Int64}, algorithm_) + shape_ = convert(Tensor{Int64}, shape_) + (shape_,) = tf.tf_promote(shape_) + tf.add_input(desc, resource_) + tf.add_input(desc, algorithm_) + tf.add_input(desc, shape_) + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + if shape_dtype !== nothing + desc["shape_dtype"] = Base.identity(shape_dtype) + end + end + tf.Tensor(tf.Operation(desc)) + end + function stateful_standard_normal_v2(resource_::tf.TensorHandle, algorithm_::tf.TensorHandle, shape_::tf.TensorHandle; name=nothing, dtype=nothing, shape_dtype=nothing) + desc = tf.EagerOp("StatefulStandardNormalV2") + tf.add_input(desc, resource_) + tf.add_input(desc, algorithm_) + tf.add_input(desc, shape_) + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + if shape_dtype !== nothing + desc["shape_dtype"] = Base.identity(shape_dtype) + end + desc["shape_dtype"] = tf.data_type(shape_) + (tf.execute(desc))[1] + end +end + + +""" + bincount(arr, size, weights) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function bincount(arr_, size_, weights_; name=nothing) + local desc + tf.with_op_name(name, "Bincount") do + desc = tf.NodeDescription("Bincount") + arr_ = convert(Tensor{Int32}, arr_) + size_ = convert(Tensor{Int32}, size_) + weights_ = convert(Tensor{Any}, weights_) + (weights_,) = tf.tf_promote(weights_) + tf.add_input(desc, arr_) + tf.add_input(desc, size_) + tf.add_input(desc, weights_) + end + tf.Tensor(tf.Operation(desc)) + end + function bincount(arr_::tf.TensorHandle, size_::tf.TensorHandle, weights_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("Bincount") + tf.add_input(desc, arr_) + tf.add_input(desc, size_) + tf.add_input(desc, weights_) + desc["T"] = tf.data_type(weights_) + (tf.execute(desc))[1] + end +end + + +""" + inv(x) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function inv(x_; name=nothing) + local desc + tf.with_op_name(name, "Inv") do + desc = tf.NodeDescription("Inv") + x_ = convert(Tensor{Any}, x_) + (x_,) = tf.tf_promote(x_) + tf.add_input(desc, x_) + end + tf.Tensor(tf.Operation(desc)) + end + function inv(x_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("Inv") + tf.add_input(desc, x_) + desc["T"] = tf.data_type(x_) + (tf.execute(desc))[1] + end +end + + +""" + apply_proximal_adagrad(var, accum, lr, l1, l2, grad; use_locking=false) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function apply_proximal_adagrad(var_, accum_, lr_, l1_, l2_, grad_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "ApplyProximalAdagrad") do + desc = tf.NodeDescription("ApplyProximalAdagrad") + var_ = convert(Tensor{Any}, var_) + accum_ = convert(Tensor{Any}, accum_) + lr_ = convert(Tensor{Any}, lr_) + l1_ = convert(Tensor{Any}, l1_) + l2_ = convert(Tensor{Any}, l2_) + grad_ = convert(Tensor{Any}, grad_) + (var_, accum_, lr_, l1_, l2_, grad_) = tf.tf_promote(var_, accum_, lr_, l1_, l2_, grad_) + tf.add_input(desc, var_) + tf.add_input(desc, accum_) + tf.add_input(desc, lr_) + tf.add_input(desc, l1_) + tf.add_input(desc, l2_) + tf.add_input(desc, grad_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + tf.Tensor(tf.Operation(desc)) + end + function apply_proximal_adagrad(var_::tf.TensorHandle, accum_::tf.TensorHandle, lr_::tf.TensorHandle, l1_::tf.TensorHandle, l2_::tf.TensorHandle, grad_::tf.TensorHandle; name=nothing, use_locking=nothing) + desc = tf.EagerOp("ApplyProximalAdagrad") + tf.add_input(desc, var_) + tf.add_input(desc, accum_) + tf.add_input(desc, lr_) + tf.add_input(desc, l1_) + tf.add_input(desc, l2_) + tf.add_input(desc, grad_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + desc["T"] = tf.data_type(var_) + desc["T"] = tf.data_type(accum_) + desc["T"] = tf.data_type(lr_) + desc["T"] = tf.data_type(l1_) + desc["T"] = tf.data_type(l2_) + desc["T"] = tf.data_type(grad_) + (tf.execute(desc))[1] + end +end + + +""" + gather_v2(params, indices, axis) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function gather_v2(params_, indices_, axis_; name=nothing) + local desc + tf.with_op_name(name, "GatherV2") do + desc = tf.NodeDescription("GatherV2") + params_ = convert(Tensor{Any}, params_) + indices_ = convert(Tensor{Any}, indices_) + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + axis_ = convert(Tensor{Any}, axis_) + (params_,) = tf.tf_promote(params_) + (indices_,) = tf.tf_promote(indices_) + (axis_,) = tf.tf_promote(axis_) + tf.add_input(desc, params_) + tf.add_input(desc, indices_) + tf.add_input(desc, axis_) + end + tf.Tensor(tf.Operation(desc)) + end + function gather_v2(params_::tf.TensorHandle, indices_::tf.TensorHandle, axis_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("GatherV2") + tf.add_input(desc, params_) + tf.add_input(desc, indices_) + tf.add_input(desc, axis_) + desc["Tparams"] = tf.data_type(params_) + desc["Tindices"] = tf.data_type(indices_) + desc["Taxis"] = tf.data_type(axis_) + (tf.execute(desc))[1] + end +end + + +""" + write_file(filename, contents) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function write_file(filename_, contents_; name=nothing) + local desc + tf.with_op_name(name, "WriteFile") do + desc = tf.NodeDescription("WriteFile") + filename_ = convert(Tensor{String}, filename_) + contents_ = convert(Tensor{String}, contents_) + tf.add_input(desc, filename_) + tf.add_input(desc, contents_) + end + tf.Tensor(tf.Operation(desc)) + end + function write_file(filename_::tf.TensorHandle, contents_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("WriteFile") + tf.add_input(desc, filename_) + tf.add_input(desc, contents_) + (tf.execute(desc))[1] + end +end + + +""" + boosted_trees_get_ensemble_states(tree_ensemble_handle) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function boosted_trees_get_ensemble_states(tree_ensemble_handle_; name=nothing) + local desc + tf.with_op_name(name, "BoostedTreesGetEnsembleStates") do + desc = tf.NodeDescription("BoostedTreesGetEnsembleStates") + tree_ensemble_handle_ = convert(Tensor{Any}, tree_ensemble_handle_) + tf.add_input(desc, tree_ensemble_handle_) + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:5 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function boosted_trees_get_ensemble_states(tree_ensemble_handle_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("BoostedTreesGetEnsembleStates") + tf.add_input(desc, tree_ensemble_handle_) + tf.execute(desc) + end +end + + +""" + resource_gather(resource, indices; validate_indices=true) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function resource_gather(resource_, indices_; name=nothing, validate_indices=nothing, dtype=nothing) + local desc + tf.with_op_name(name, "ResourceGather") do + desc = tf.NodeDescription("ResourceGather") + resource_ = convert(Tensor{Any}, resource_) + indices_ = convert(Tensor{Any}, indices_) + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + (indices_,) = tf.tf_promote(indices_) + tf.add_input(desc, resource_) + tf.add_input(desc, indices_) + if validate_indices !== nothing + desc["validate_indices"] = Base.Bool(validate_indices) + end + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + tf.Tensor(tf.Operation(desc)) + end + function resource_gather(resource_::tf.TensorHandle, indices_::tf.TensorHandle; name=nothing, validate_indices=nothing, dtype=nothing) + desc = tf.EagerOp("ResourceGather") + tf.add_input(desc, resource_) + tf.add_input(desc, indices_) + if validate_indices !== nothing + desc["validate_indices"] = Base.Bool(validate_indices) + end + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + desc["Tindices"] = tf.data_type(indices_) + (tf.execute(desc))[1] + end +end + + +""" + resource_apply_proximal_gradient_descent(var, alpha, l1, l2, delta; use_locking=false) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function resource_apply_proximal_gradient_descent(var_, alpha_, l1_, l2_, delta_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "ResourceApplyProximalGradientDescent") do + desc = tf.NodeDescription("ResourceApplyProximalGradientDescent") + var_ = convert(Tensor{Any}, var_) + alpha_ = convert(Tensor{Any}, alpha_) + l1_ = convert(Tensor{Any}, l1_) + l2_ = convert(Tensor{Any}, l2_) + delta_ = convert(Tensor{Any}, delta_) + (alpha_, l1_, l2_, delta_) = tf.tf_promote(alpha_, l1_, l2_, delta_) + tf.add_input(desc, var_) + tf.add_input(desc, alpha_) + tf.add_input(desc, l1_) + tf.add_input(desc, l2_) + tf.add_input(desc, delta_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + tf.Tensor(tf.Operation(desc)) + end + function resource_apply_proximal_gradient_descent(var_::tf.TensorHandle, alpha_::tf.TensorHandle, l1_::tf.TensorHandle, l2_::tf.TensorHandle, delta_::tf.TensorHandle; name=nothing, use_locking=nothing) + desc = tf.EagerOp("ResourceApplyProximalGradientDescent") + tf.add_input(desc, var_) + tf.add_input(desc, alpha_) + tf.add_input(desc, l1_) + tf.add_input(desc, l2_) + tf.add_input(desc, delta_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + desc["T"] = tf.data_type(alpha_) + desc["T"] = tf.data_type(l1_) + desc["T"] = tf.data_type(l2_) + desc["T"] = tf.data_type(delta_) + (tf.execute(desc))[1] + end +end + + +""" + truncate_mod(x, y) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function truncate_mod(x_, y_; name=nothing) + local desc + tf.with_op_name(name, "TruncateMod") do + desc = tf.NodeDescription("TruncateMod") + x_ = convert(Tensor{Any}, x_) + y_ = convert(Tensor{Any}, y_) + (x_, y_) = tf.tf_promote(x_, y_) + tf.add_input(desc, x_) + tf.add_input(desc, y_) + end + tf.Tensor(tf.Operation(desc)) + end + function truncate_mod(x_::tf.TensorHandle, y_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("TruncateMod") + tf.add_input(desc, x_) + tf.add_input(desc, y_) + desc["T"] = tf.data_type(x_) + desc["T"] = tf.data_type(y_) + (tf.execute(desc))[1] + end +end + + +""" + log_matrix_determinant(input) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function log_matrix_determinant(input_; name=nothing) + local desc + tf.with_op_name(name, "LogMatrixDeterminant") do + desc = tf.NodeDescription("LogMatrixDeterminant") + input_ = convert(Tensor{Any}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function log_matrix_determinant(input_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("LogMatrixDeterminant") + tf.add_input(desc, input_) + desc["T"] = tf.data_type(input_) + tf.execute(desc) + end +end + + +""" + irfft2d(input, fft_length) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function irfft2d(input_, fft_length_; name=nothing) + local desc + tf.with_op_name(name, "IRFFT2D") do + desc = tf.NodeDescription("IRFFT2D") + input_ = convert(Tensor{Complex{Float32}}, input_) + fft_length_ = convert(Tensor{Int32}, fft_length_) + tf.add_input(desc, input_) + tf.add_input(desc, fft_length_) + end + tf.Tensor(tf.Operation(desc)) + end + function irfft2d(input_::tf.TensorHandle, fft_length_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("IRFFT2D") + tf.add_input(desc, input_) + tf.add_input(desc, fft_length_) + (tf.execute(desc))[1] + end +end + + +""" + boosted_trees_training_predict(tree_ensemble_handle, cached_tree_ids, cached_node_ids, bucketized_features) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function boosted_trees_training_predict(tree_ensemble_handle_, cached_tree_ids_, cached_node_ids_, bucketized_features_; name=nothing, num_bucketized_features=nothing, logits_dimension=nothing) + local desc + tf.with_op_name(name, "BoostedTreesTrainingPredict") do + desc = tf.NodeDescription("BoostedTreesTrainingPredict") + tree_ensemble_handle_ = convert(Tensor{Any}, tree_ensemble_handle_) + cached_tree_ids_ = convert(Tensor{Int32}, cached_tree_ids_) + cached_node_ids_ = convert(Tensor{Int32}, cached_node_ids_) + bucketized_features_ = [convert(Tensor{Int32}, x) for x = bucketized_features_] + tf.add_input(desc, tree_ensemble_handle_) + tf.add_input(desc, cached_tree_ids_) + tf.add_input(desc, cached_node_ids_) + tf.add_input(desc, bucketized_features_) + if num_bucketized_features !== nothing + desc["num_bucketized_features"] = Base.Int(num_bucketized_features) + end + if logits_dimension !== nothing + desc["logits_dimension"] = Base.Int(logits_dimension) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function boosted_trees_training_predict(tree_ensemble_handle_::tf.TensorHandle, cached_tree_ids_::tf.TensorHandle, cached_node_ids_::tf.TensorHandle, bucketized_features_::tf.TensorHandle; name=nothing, num_bucketized_features=nothing, logits_dimension=nothing) + desc = tf.EagerOp("BoostedTreesTrainingPredict") + tf.add_input(desc, tree_ensemble_handle_) + tf.add_input(desc, cached_tree_ids_) + tf.add_input(desc, cached_node_ids_) + tf.add_input(desc, bucketized_features_) + if num_bucketized_features !== nothing + desc["num_bucketized_features"] = Base.Int(num_bucketized_features) + end + if logits_dimension !== nothing + desc["logits_dimension"] = Base.Int(logits_dimension) + end + tf.execute(desc) + end +end + + +""" + nearest_neighbors(points, centers, k) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function nearest_neighbors(points_, centers_, k_; name=nothing) + local desc + tf.with_op_name(name, "NearestNeighbors") do + desc = tf.NodeDescription("NearestNeighbors") + points_ = convert(Tensor{Float32}, points_) + centers_ = convert(Tensor{Float32}, centers_) + k_ = convert(Tensor{Int64}, k_) + tf.add_input(desc, points_) + tf.add_input(desc, centers_) + tf.add_input(desc, k_) + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function nearest_neighbors(points_::tf.TensorHandle, centers_::tf.TensorHandle, k_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("NearestNeighbors") + tf.add_input(desc, points_) + tf.add_input(desc, centers_) + tf.add_input(desc, k_) + tf.execute(desc) + end +end + + +""" + floor(x) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function floor(x_; name=nothing) + local desc + tf.with_op_name(name, "Floor") do + desc = tf.NodeDescription("Floor") + x_ = convert(Tensor{Any}, x_) + (x_,) = tf.tf_promote(x_) + tf.add_input(desc, x_) + end + tf.Tensor(tf.Operation(desc)) + end + function floor(x_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("Floor") + tf.add_input(desc, x_) + desc["T"] = tf.data_type(x_) + (tf.execute(desc))[1] + end +end + + +""" + load_tpu_embedding_proximal_adagrad_parameters_grad_accum_debug(parameters, accumulators, gradient_accumulators; table_id=-1, table_name=) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function load_tpu_embedding_proximal_adagrad_parameters_grad_accum_debug(parameters_, accumulators_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + local desc + tf.with_op_name(name, "LoadTPUEmbeddingProximalAdagradParametersGradAccumDebug") do + desc = tf.NodeDescription("LoadTPUEmbeddingProximalAdagradParametersGradAccumDebug") + parameters_ = convert(Tensor{Float32}, parameters_) + accumulators_ = convert(Tensor{Float32}, accumulators_) + gradient_accumulators_ = convert(Tensor{Float32}, gradient_accumulators_) + tf.add_input(desc, parameters_) + tf.add_input(desc, accumulators_) + tf.add_input(desc, gradient_accumulators_) + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + end + tf.Tensor(tf.Operation(desc)) + end + function load_tpu_embedding_proximal_adagrad_parameters_grad_accum_debug(parameters_::tf.TensorHandle, accumulators_::tf.TensorHandle, gradient_accumulators_::tf.TensorHandle; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + desc = tf.EagerOp("LoadTPUEmbeddingProximalAdagradParametersGradAccumDebug") + tf.add_input(desc, parameters_) + tf.add_input(desc, accumulators_) + tf.add_input(desc, gradient_accumulators_) + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + (tf.execute(desc))[1] + end +end + + +""" + write_image_summary(writer, step, tag, tensor, bad_color; max_images=3) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function write_image_summary(writer_, step_, tag_, tensor_, bad_color_; name=nothing, max_images=nothing) + local desc + tf.with_op_name(name, "WriteImageSummary") do + desc = tf.NodeDescription("WriteImageSummary") + writer_ = convert(Tensor{Any}, writer_) + step_ = convert(Tensor{Int64}, step_) + tag_ = convert(Tensor{String}, tag_) + tensor_ = convert(Tensor{Float32}, tensor_) + bad_color_ = convert(Tensor{UInt8}, bad_color_) + (tensor_,) = tf.tf_promote(tensor_) + tf.add_input(desc, writer_) + tf.add_input(desc, step_) + tf.add_input(desc, tag_) + tf.add_input(desc, tensor_) + tf.add_input(desc, bad_color_) + if max_images !== nothing + desc["max_images"] = Base.Int(max_images) + end + end + tf.Tensor(tf.Operation(desc)) + end + function write_image_summary(writer_::tf.TensorHandle, step_::tf.TensorHandle, tag_::tf.TensorHandle, tensor_::tf.TensorHandle, bad_color_::tf.TensorHandle; name=nothing, max_images=nothing) + desc = tf.EagerOp("WriteImageSummary") + tf.add_input(desc, writer_) + tf.add_input(desc, step_) + tf.add_input(desc, tag_) + tf.add_input(desc, tensor_) + tf.add_input(desc, bad_color_) + if max_images !== nothing + desc["max_images"] = Base.Int(max_images) + end + desc["T"] = tf.data_type(tensor_) + (tf.execute(desc))[1] + end +end + + +""" + tile_grad(input, multiples) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tile_grad(input_, multiples_; name=nothing) + local desc + tf.with_op_name(name, "TileGrad") do + desc = tf.NodeDescription("TileGrad") + input_ = convert(Tensor{Any}, input_) + multiples_ = convert(Tensor{Int32}, multiples_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + tf.add_input(desc, multiples_) + end + tf.Tensor(tf.Operation(desc)) + end + function tile_grad(input_::tf.TensorHandle, multiples_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("TileGrad") + tf.add_input(desc, input_) + tf.add_input(desc, multiples_) + desc["T"] = tf.data_type(input_) + (tf.execute(desc))[1] + end +end + + +""" + tensor_array_grad_v3(handle, flow_in) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tensor_array_grad_v3(handle_, flow_in_; name=nothing, source=nothing) + local desc + tf.with_op_name(name, "TensorArrayGradV3") do + desc = tf.NodeDescription("TensorArrayGradV3") + handle_ = convert(Tensor{Any}, handle_) + flow_in_ = convert(Tensor{Float32}, flow_in_) + tf.add_input(desc, handle_) + tf.add_input(desc, flow_in_) + if source !== nothing + desc["source"] = Base.String(source) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function tensor_array_grad_v3(handle_::tf.TensorHandle, flow_in_::tf.TensorHandle; name=nothing, source=nothing) + desc = tf.EagerOp("TensorArrayGradV3") + tf.add_input(desc, handle_) + tf.add_input(desc, flow_in_) + if source !== nothing + desc["source"] = Base.String(source) + end + tf.execute(desc) + end +end + + +""" + enqueue_tpu_embedding_integer_batch(batch, mode_override; device_ordinal=-1) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function enqueue_tpu_embedding_integer_batch(batch_, mode_override_; name=nothing, N=nothing, device_ordinal=nothing) + local desc + tf.with_op_name(name, "EnqueueTPUEmbeddingIntegerBatch") do + desc = tf.NodeDescription("EnqueueTPUEmbeddingIntegerBatch") + batch_ = [convert(Tensor{Int32}, x) for x = batch_] + mode_override_ = convert(Tensor{String}, mode_override_) + tf.add_input(desc, batch_) + tf.add_input(desc, mode_override_) + if N !== nothing + desc["N"] = Base.Int(N) + end + if device_ordinal !== nothing + desc["device_ordinal"] = Base.Int(device_ordinal) + end + end + tf.Tensor(tf.Operation(desc)) + end + function enqueue_tpu_embedding_integer_batch(batch_::tf.TensorHandle, mode_override_::tf.TensorHandle; name=nothing, N=nothing, device_ordinal=nothing) + desc = tf.EagerOp("EnqueueTPUEmbeddingIntegerBatch") + tf.add_input(desc, batch_) + tf.add_input(desc, mode_override_) + if N !== nothing + desc["N"] = Base.Int(N) + end + if device_ordinal !== nothing + desc["device_ordinal"] = Base.Int(device_ordinal) + end + (tf.execute(desc))[1] + end +end + + +""" + fused_batch_norm(x, scale, offset, mean, variance; epsilon=?, data_format=NHWC, is_training=true) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function fused_batch_norm(x_, scale_, offset_, mean_, variance_; name=nothing, epsilon=nothing, data_format=nothing, is_training=nothing) + local desc + tf.with_op_name(name, "FusedBatchNorm") do + desc = tf.NodeDescription("FusedBatchNorm") + x_ = convert(Tensor{Any}, x_) + scale_ = convert(Tensor{Any}, scale_) + offset_ = convert(Tensor{Any}, offset_) + mean_ = convert(Tensor{Any}, mean_) + variance_ = convert(Tensor{Any}, variance_) + (x_, scale_, offset_, mean_, variance_) = tf.tf_promote(x_, scale_, offset_, mean_, variance_) + tf.add_input(desc, x_) + tf.add_input(desc, scale_) + tf.add_input(desc, offset_) + tf.add_input(desc, mean_) + tf.add_input(desc, variance_) + if epsilon !== nothing + desc["epsilon"] = Base.identity(epsilon) + end + if data_format !== nothing + desc["data_format"] = Base.String(data_format) + end + if is_training !== nothing + desc["is_training"] = Base.Bool(is_training) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:5 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function fused_batch_norm(x_::tf.TensorHandle, scale_::tf.TensorHandle, offset_::tf.TensorHandle, mean_::tf.TensorHandle, variance_::tf.TensorHandle; name=nothing, epsilon=nothing, data_format=nothing, is_training=nothing) + desc = tf.EagerOp("FusedBatchNorm") + tf.add_input(desc, x_) + tf.add_input(desc, scale_) + tf.add_input(desc, offset_) + tf.add_input(desc, mean_) + tf.add_input(desc, variance_) + if epsilon !== nothing + desc["epsilon"] = Base.identity(epsilon) + end + if data_format !== nothing + desc["data_format"] = Base.String(data_format) + end + if is_training !== nothing + desc["is_training"] = Base.Bool(is_training) + end + desc["T"] = tf.data_type(x_) + desc["T"] = tf.data_type(scale_) + desc["T"] = tf.data_type(offset_) + desc["T"] = tf.data_type(mean_) + desc["T"] = tf.data_type(variance_) + tf.execute(desc) + end +end + + +""" + logical_and(x, y) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function logical_and(x_, y_; name=nothing) + local desc + tf.with_op_name(name, "LogicalAnd") do + desc = tf.NodeDescription("LogicalAnd") + x_ = convert(Tensor{Bool}, x_) + y_ = convert(Tensor{Bool}, y_) + tf.add_input(desc, x_) + tf.add_input(desc, y_) + end + tf.Tensor(tf.Operation(desc)) + end + function logical_and(x_::tf.TensorHandle, y_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("LogicalAnd") + tf.add_input(desc, x_) + tf.add_input(desc, y_) + (tf.execute(desc))[1] + end +end + + +""" + tensor_scatter_update(tensor, indices, updates) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tensor_scatter_update(tensor_, indices_, updates_; name=nothing) + local desc + tf.with_op_name(name, "TensorScatterUpdate") do + desc = tf.NodeDescription("TensorScatterUpdate") + tensor_ = convert(Tensor{Any}, tensor_) + indices_ = convert(Tensor{Any}, indices_) + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + updates_ = convert(Tensor{Any}, updates_) + (tensor_, updates_) = tf.tf_promote(tensor_, updates_) + (indices_,) = tf.tf_promote(indices_) + tf.add_input(desc, tensor_) + tf.add_input(desc, indices_) + tf.add_input(desc, updates_) + end + tf.Tensor(tf.Operation(desc)) + end + function tensor_scatter_update(tensor_::tf.TensorHandle, indices_::tf.TensorHandle, updates_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("TensorScatterUpdate") + tf.add_input(desc, tensor_) + tf.add_input(desc, indices_) + tf.add_input(desc, updates_) + desc["T"] = tf.data_type(tensor_) + desc["Tindices"] = tf.data_type(indices_) + desc["T"] = tf.data_type(updates_) + (tf.execute(desc))[1] + end +end + + +""" + text_line_reader_v2(; skip_header_lines=0, container=, shared_name=) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function text_line_reader_v2(; name=nothing, skip_header_lines=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "TextLineReaderV2") do + desc = tf.NodeDescription("TextLineReaderV2") + if skip_header_lines !== nothing + desc["skip_header_lines"] = Base.Int(skip_header_lines) + end + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + tf.Tensor(tf.Operation(desc)) + end + function text_line_reader_v2(; name=nothing, skip_header_lines=nothing, container=nothing, shared_name=nothing) + desc = tf.EagerOp("TextLineReaderV2") + if skip_header_lines !== nothing + desc["skip_header_lines"] = Base.Int(skip_header_lines) + end + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + (tf.execute(desc))[1] + end +end + + +""" + tensor_slice_dataset(components) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tensor_slice_dataset(components_; name=nothing, Toutput_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "TensorSliceDataset") do + desc = tf.NodeDescription("TensorSliceDataset") + components_ = [convert(Tensor{Any}, x) for x = components_] + tf.add_input(desc, components_) + if Toutput_types !== nothing + desc["Toutput_types"] = map(Base.identity, Toutput_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + tf.Tensor(tf.Operation(desc)) + end + function tensor_slice_dataset(components_::tf.TensorHandle; name=nothing, Toutput_types=nothing, output_shapes=nothing) + desc = tf.EagerOp("TensorSliceDataset") + tf.add_input(desc, components_) + if Toutput_types !== nothing + desc["Toutput_types"] = map(Base.identity, Toutput_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + (tf.execute(desc))[1] + end +end + + +""" + tensor_array_scatter_v3(handle, indices, value, flow_in) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tensor_array_scatter_v3(handle_, indices_, value_, flow_in_; name=nothing) + local desc + tf.with_op_name(name, "TensorArrayScatterV3") do + desc = tf.NodeDescription("TensorArrayScatterV3") + handle_ = convert(Tensor{Any}, handle_) + indices_ = convert(Tensor{Int32}, indices_) + value_ = convert(Tensor{Any}, value_) + flow_in_ = convert(Tensor{Float32}, flow_in_) + (value_,) = tf.tf_promote(value_) + tf.add_input(desc, handle_) + tf.add_input(desc, indices_) + tf.add_input(desc, value_) + tf.add_input(desc, flow_in_) + end + tf.Tensor(tf.Operation(desc)) + end + function tensor_array_scatter_v3(handle_::tf.TensorHandle, indices_::tf.TensorHandle, value_::tf.TensorHandle, flow_in_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("TensorArrayScatterV3") + tf.add_input(desc, handle_) + tf.add_input(desc, indices_) + tf.add_input(desc, value_) + tf.add_input(desc, flow_in_) + desc["T"] = tf.data_type(value_) + (tf.execute(desc))[1] + end +end + + +""" + resize_nearest_neighbor_grad(grads, size; align_corners=false) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function resize_nearest_neighbor_grad(grads_, size_; name=nothing, align_corners=nothing) + local desc + tf.with_op_name(name, "ResizeNearestNeighborGrad") do + desc = tf.NodeDescription("ResizeNearestNeighborGrad") + grads_ = convert(Tensor{Any}, grads_) + size_ = convert(Tensor{Int32}, size_) + (grads_,) = tf.tf_promote(grads_) + tf.add_input(desc, grads_) + tf.add_input(desc, size_) + if align_corners !== nothing + desc["align_corners"] = Base.Bool(align_corners) + end + end + tf.Tensor(tf.Operation(desc)) + end + function resize_nearest_neighbor_grad(grads_::tf.TensorHandle, size_::tf.TensorHandle; name=nothing, align_corners=nothing) + desc = tf.EagerOp("ResizeNearestNeighborGrad") + tf.add_input(desc, grads_) + tf.add_input(desc, size_) + if align_corners !== nothing + desc["align_corners"] = Base.Bool(align_corners) + end + desc["T"] = tf.data_type(grads_) + (tf.execute(desc))[1] + end +end + + +""" + apply_power_sign(var, m, lr, logbase, sign_decay, beta, grad; use_locking=false) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function apply_power_sign(var_, m_, lr_, logbase_, sign_decay_, beta_, grad_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "ApplyPowerSign") do + desc = tf.NodeDescription("ApplyPowerSign") + var_ = convert(Tensor{Any}, var_) + m_ = convert(Tensor{Any}, m_) + lr_ = convert(Tensor{Any}, lr_) + logbase_ = convert(Tensor{Any}, logbase_) + sign_decay_ = convert(Tensor{Any}, sign_decay_) + beta_ = convert(Tensor{Any}, beta_) + grad_ = convert(Tensor{Any}, grad_) + (var_, m_, lr_, logbase_, sign_decay_, beta_, grad_) = tf.tf_promote(var_, m_, lr_, logbase_, sign_decay_, beta_, grad_) + tf.add_input(desc, var_) + tf.add_input(desc, m_) + tf.add_input(desc, lr_) + tf.add_input(desc, logbase_) + tf.add_input(desc, sign_decay_) + tf.add_input(desc, beta_) + tf.add_input(desc, grad_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + tf.Tensor(tf.Operation(desc)) + end + function apply_power_sign(var_::tf.TensorHandle, m_::tf.TensorHandle, lr_::tf.TensorHandle, logbase_::tf.TensorHandle, sign_decay_::tf.TensorHandle, beta_::tf.TensorHandle, grad_::tf.TensorHandle; name=nothing, use_locking=nothing) + desc = tf.EagerOp("ApplyPowerSign") + tf.add_input(desc, var_) + tf.add_input(desc, m_) + tf.add_input(desc, lr_) + tf.add_input(desc, logbase_) + tf.add_input(desc, sign_decay_) + tf.add_input(desc, beta_) + tf.add_input(desc, grad_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + desc["T"] = tf.data_type(var_) + desc["T"] = tf.data_type(m_) + desc["T"] = tf.data_type(lr_) + desc["T"] = tf.data_type(logbase_) + desc["T"] = tf.data_type(sign_decay_) + desc["T"] = tf.data_type(beta_) + desc["T"] = tf.data_type(grad_) + (tf.execute(desc))[1] + end +end + + +""" + experimental_rebatch_dataset(input_dataset, num_workers) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function experimental_rebatch_dataset(input_dataset_, num_workers_; name=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "ExperimentalRebatchDataset") do + desc = tf.NodeDescription("ExperimentalRebatchDataset") + input_dataset_ = convert(Tensor{Any}, input_dataset_) + num_workers_ = convert(Tensor{Int64}, num_workers_) + tf.add_input(desc, input_dataset_) + tf.add_input(desc, num_workers_) + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + tf.Tensor(tf.Operation(desc)) + end + function experimental_rebatch_dataset(input_dataset_::tf.TensorHandle, num_workers_::tf.TensorHandle; name=nothing, output_types=nothing, output_shapes=nothing) + desc = tf.EagerOp("ExperimentalRebatchDataset") + tf.add_input(desc, input_dataset_) + tf.add_input(desc, num_workers_) + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + (tf.execute(desc))[1] + end +end + + +""" + mirror_pad(input, paddings) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function mirror_pad(input_, paddings_; name=nothing, mode=nothing) + local desc + tf.with_op_name(name, "MirrorPad") do + desc = tf.NodeDescription("MirrorPad") + input_ = convert(Tensor{Any}, input_) + paddings_ = convert(Tensor{Int32}, paddings_) + (input_,) = tf.tf_promote(input_) + (paddings_,) = tf.tf_promote(paddings_) + tf.add_input(desc, input_) + tf.add_input(desc, paddings_) + if mode !== nothing + desc["mode"] = Base.String(mode) + end + end + tf.Tensor(tf.Operation(desc)) + end + function mirror_pad(input_::tf.TensorHandle, paddings_::tf.TensorHandle; name=nothing, mode=nothing) + desc = tf.EagerOp("MirrorPad") + tf.add_input(desc, input_) + tf.add_input(desc, paddings_) + if mode !== nothing + desc["mode"] = Base.String(mode) + end + desc["T"] = tf.data_type(input_) + desc["Tpaddings"] = tf.data_type(paddings_) + (tf.execute(desc))[1] + end +end + + +""" + logical_not(x) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function logical_not(x_; name=nothing) + local desc + tf.with_op_name(name, "LogicalNot") do + desc = tf.NodeDescription("LogicalNot") + x_ = convert(Tensor{Bool}, x_) + tf.add_input(desc, x_) + end + tf.Tensor(tf.Operation(desc)) + end + function logical_not(x_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("LogicalNot") + tf.add_input(desc, x_) + (tf.execute(desc))[1] + end +end + + +""" + batch_ifft(input) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function batch_ifft(input_; name=nothing) + local desc + tf.with_op_name(name, "BatchIFFT") do + desc = tf.NodeDescription("BatchIFFT") + input_ = convert(Tensor{Complex{Float32}}, input_) + tf.add_input(desc, input_) + end + tf.Tensor(tf.Operation(desc)) + end + function batch_ifft(input_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("BatchIFFT") + tf.add_input(desc, input_) + (tf.execute(desc))[1] + end +end + + +""" + tensor_array_concat_v2(handle, flow_in; element_shape_except0=?) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tensor_array_concat_v2(handle_, flow_in_; name=nothing, dtype=nothing, element_shape_except0=nothing) + local desc + tf.with_op_name(name, "TensorArrayConcatV2") do + desc = tf.NodeDescription("TensorArrayConcatV2") + handle_ = convert(Tensor{String}, handle_) + flow_in_ = convert(Tensor{Float32}, flow_in_) + tf.add_input(desc, handle_) + tf.add_input(desc, flow_in_) + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + if element_shape_except0 !== nothing + desc["element_shape_except0"] = Base.identity(element_shape_except0) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function tensor_array_concat_v2(handle_::tf.TensorHandle, flow_in_::tf.TensorHandle; name=nothing, dtype=nothing, element_shape_except0=nothing) + desc = tf.EagerOp("TensorArrayConcatV2") + tf.add_input(desc, handle_) + tf.add_input(desc, flow_in_) + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + if element_shape_except0 !== nothing + desc["element_shape_except0"] = Base.identity(element_shape_except0) + end + tf.execute(desc) + end +end + + +""" + sum(input, reduction_indices; keep_dims=false) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function sum(input_, reduction_indices_; name=nothing, keep_dims=nothing) + local desc + tf.with_op_name(name, "Sum") do + desc = tf.NodeDescription("Sum") + input_ = convert(Tensor{Any}, input_) + reduction_indices_ = convert(Tensor{Int32}, reduction_indices_) + reduction_indices_ = reduction_indices_ - convert(tf.Tensor{eltype(reduction_indices_)}, 1) + (input_,) = tf.tf_promote(input_) + (reduction_indices_,) = tf.tf_promote(reduction_indices_) + tf.add_input(desc, input_) + tf.add_input(desc, reduction_indices_) + if keep_dims !== nothing + desc["keep_dims"] = Base.Bool(keep_dims) + end + end + tf.Tensor(tf.Operation(desc)) + end + function sum(input_::tf.TensorHandle, reduction_indices_::tf.TensorHandle; name=nothing, keep_dims=nothing) + desc = tf.EagerOp("Sum") + tf.add_input(desc, input_) + tf.add_input(desc, reduction_indices_) + if keep_dims !== nothing + desc["keep_dims"] = Base.Bool(keep_dims) + end + desc["T"] = tf.data_type(input_) + desc["Tidx"] = tf.data_type(reduction_indices_) + (tf.execute(desc))[1] + end +end + + +""" + boosted_trees_predict(tree_ensemble_handle, bucketized_features) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function boosted_trees_predict(tree_ensemble_handle_, bucketized_features_; name=nothing, num_bucketized_features=nothing, logits_dimension=nothing) + local desc + tf.with_op_name(name, "BoostedTreesPredict") do + desc = tf.NodeDescription("BoostedTreesPredict") + tree_ensemble_handle_ = convert(Tensor{Any}, tree_ensemble_handle_) + bucketized_features_ = [convert(Tensor{Int32}, x) for x = bucketized_features_] + tf.add_input(desc, tree_ensemble_handle_) + tf.add_input(desc, bucketized_features_) + if num_bucketized_features !== nothing + desc["num_bucketized_features"] = Base.Int(num_bucketized_features) + end + if logits_dimension !== nothing + desc["logits_dimension"] = Base.Int(logits_dimension) + end + end + tf.Tensor(tf.Operation(desc)) + end + function boosted_trees_predict(tree_ensemble_handle_::tf.TensorHandle, bucketized_features_::tf.TensorHandle; name=nothing, num_bucketized_features=nothing, logits_dimension=nothing) + desc = tf.EagerOp("BoostedTreesPredict") + tf.add_input(desc, tree_ensemble_handle_) + tf.add_input(desc, bucketized_features_) + if num_bucketized_features !== nothing + desc["num_bucketized_features"] = Base.Int(num_bucketized_features) + end + if logits_dimension !== nothing + desc["logits_dimension"] = Base.Int(logits_dimension) + end + (tf.execute(desc))[1] + end +end + + +""" + quantized_conv2d_with_bias_and_relu_and_requantize(input, filter, bias, min_input, max_input, min_filter, max_filter, min_freezed_output, max_freezed_output; out_type=Float32, dilations=[1, 1, 1, 1]) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function quantized_conv2d_with_bias_and_relu_and_requantize(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) + local desc + tf.with_op_name(name, "QuantizedConv2DWithBiasAndReluAndRequantize") do + desc = tf.NodeDescription("QuantizedConv2DWithBiasAndReluAndRequantize") + input_ = convert(Tensor{Any}, input_) + filter_ = convert(Tensor{Any}, filter_) + bias_ = convert(Tensor{Any}, bias_) + min_input_ = convert(Tensor{Float32}, min_input_) + max_input_ = convert(Tensor{Float32}, max_input_) + min_filter_ = convert(Tensor{Float32}, min_filter_) + max_filter_ = convert(Tensor{Float32}, max_filter_) + min_freezed_output_ = convert(Tensor{Float32}, min_freezed_output_) + max_freezed_output_ = convert(Tensor{Float32}, max_freezed_output_) + (filter_,) = tf.tf_promote(filter_) + (input_,) = tf.tf_promote(input_) + (bias_,) = tf.tf_promote(bias_) + tf.add_input(desc, input_) + tf.add_input(desc, filter_) + tf.add_input(desc, bias_) + tf.add_input(desc, min_input_) + tf.add_input(desc, max_input_) + tf.add_input(desc, min_filter_) + tf.add_input(desc, max_filter_) + tf.add_input(desc, min_freezed_output_) + tf.add_input(desc, max_freezed_output_) + if out_type !== nothing + desc["out_type"] = Base.identity(out_type) + end + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + if padding !== nothing + desc["padding"] = Base.String(padding) + end + if dilations !== nothing + desc["dilations"] = map(Base.identity, dilations) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function quantized_conv2d_with_bias_and_relu_and_requantize(input_::tf.TensorHandle, filter_::tf.TensorHandle, bias_::tf.TensorHandle, min_input_::tf.TensorHandle, max_input_::tf.TensorHandle, min_filter_::tf.TensorHandle, max_filter_::tf.TensorHandle, min_freezed_output_::tf.TensorHandle, max_freezed_output_::tf.TensorHandle; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) + desc = tf.EagerOp("QuantizedConv2DWithBiasAndReluAndRequantize") + tf.add_input(desc, input_) + tf.add_input(desc, filter_) + tf.add_input(desc, bias_) + tf.add_input(desc, min_input_) + tf.add_input(desc, max_input_) + tf.add_input(desc, min_filter_) + tf.add_input(desc, max_filter_) + tf.add_input(desc, min_freezed_output_) + tf.add_input(desc, max_freezed_output_) + if out_type !== nothing + desc["out_type"] = Base.identity(out_type) + end + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + if padding !== nothing + desc["padding"] = Base.String(padding) + end + if dilations !== nothing + desc["dilations"] = map(Base.identity, dilations) + end + desc["Tinput"] = tf.data_type(input_) + desc["Tfilter"] = tf.data_type(filter_) + desc["Tbias"] = tf.data_type(bias_) + tf.execute(desc) + end +end + + +""" + resource_sparse_apply_adagrad(var, accum, lr, grad, indices; use_locking=false, update_slots=true) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function resource_sparse_apply_adagrad(var_, accum_, lr_, grad_, indices_; name=nothing, use_locking=nothing, update_slots=nothing) + local desc + tf.with_op_name(name, "ResourceSparseApplyAdagrad") do + desc = tf.NodeDescription("ResourceSparseApplyAdagrad") + var_ = convert(Tensor{Any}, var_) + accum_ = convert(Tensor{Any}, accum_) + lr_ = convert(Tensor{Any}, lr_) + grad_ = convert(Tensor{Any}, grad_) + indices_ = convert(Tensor{Any}, indices_) + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + (lr_, grad_) = tf.tf_promote(lr_, grad_) + (indices_,) = tf.tf_promote(indices_) + tf.add_input(desc, var_) + tf.add_input(desc, accum_) + tf.add_input(desc, lr_) + tf.add_input(desc, grad_) + tf.add_input(desc, indices_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + if update_slots !== nothing + desc["update_slots"] = Base.Bool(update_slots) + end + end + tf.Tensor(tf.Operation(desc)) + end + function resource_sparse_apply_adagrad(var_::tf.TensorHandle, accum_::tf.TensorHandle, lr_::tf.TensorHandle, grad_::tf.TensorHandle, indices_::tf.TensorHandle; name=nothing, use_locking=nothing, update_slots=nothing) + desc = tf.EagerOp("ResourceSparseApplyAdagrad") + tf.add_input(desc, var_) + tf.add_input(desc, accum_) + tf.add_input(desc, lr_) + tf.add_input(desc, grad_) + tf.add_input(desc, indices_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + if update_slots !== nothing + desc["update_slots"] = Base.Bool(update_slots) + end + desc["T"] = tf.data_type(lr_) + desc["T"] = tf.data_type(grad_) + desc["Tindices"] = tf.data_type(indices_) + (tf.execute(desc))[1] + end +end + + +""" + leaky_relu_grad(gradients, features; alpha=?) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function leaky_relu_grad(gradients_, features_; name=nothing, alpha=nothing) + local desc + tf.with_op_name(name, "LeakyReluGrad") do + desc = tf.NodeDescription("LeakyReluGrad") + gradients_ = convert(Tensor{Float32}, gradients_) + features_ = convert(Tensor{Float32}, features_) + (gradients_, features_) = tf.tf_promote(gradients_, features_) + tf.add_input(desc, gradients_) + tf.add_input(desc, features_) + if alpha !== nothing + desc["alpha"] = Base.identity(alpha) + end + end + tf.Tensor(tf.Operation(desc)) + end + function leaky_relu_grad(gradients_::tf.TensorHandle, features_::tf.TensorHandle; name=nothing, alpha=nothing) + desc = tf.EagerOp("LeakyReluGrad") + tf.add_input(desc, gradients_) + tf.add_input(desc, features_) + if alpha !== nothing + desc["alpha"] = Base.identity(alpha) + end + desc["T"] = tf.data_type(gradients_) + desc["T"] = tf.data_type(features_) + (tf.execute(desc))[1] + end +end + + +""" + _device_retval(input) + +A graph node which represents a return value of a function. +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function _device_retval(input_; name=nothing, index=nothing) + local desc + tf.with_op_name(name, "_DeviceRetval") do + desc = tf.NodeDescription("_DeviceRetval") + input_ = convert(Tensor{Any}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + if index !== nothing + desc["index"] = Base.Int(index) + end + end + tf.Tensor(tf.Operation(desc)) + end + function _device_retval(input_::tf.TensorHandle; name=nothing, index=nothing) + desc = tf.EagerOp("_DeviceRetval") + tf.add_input(desc, input_) + if index !== nothing + desc["index"] = Base.Int(index) + end + desc["T"] = tf.data_type(input_) + (tf.execute(desc))[1] + end +end + + +""" + pad(input, paddings) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function pad(input_, paddings_; name=nothing) + local desc + tf.with_op_name(name, "Pad") do + desc = tf.NodeDescription("Pad") + input_ = convert(Tensor{Any}, input_) + paddings_ = convert(Tensor{Int32}, paddings_) + (input_,) = tf.tf_promote(input_) + (paddings_,) = tf.tf_promote(paddings_) + tf.add_input(desc, input_) + tf.add_input(desc, paddings_) + end + tf.Tensor(tf.Operation(desc)) + end + function pad(input_::tf.TensorHandle, paddings_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("Pad") + tf.add_input(desc, input_) + tf.add_input(desc, paddings_) + desc["T"] = tf.data_type(input_) + desc["Tpaddings"] = tf.data_type(paddings_) + (tf.execute(desc))[1] + end +end + + +""" + add_many_sparse_to_tensors_map(sparse_indices, sparse_values, sparse_shape; container=, shared_name=) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function add_many_sparse_to_tensors_map(sparse_indices_, sparse_values_, sparse_shape_; name=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "AddManySparseToTensorsMap") do + desc = tf.NodeDescription("AddManySparseToTensorsMap") + sparse_indices_ = convert(Tensor{Int64}, sparse_indices_) + sparse_values_ = convert(Tensor{Any}, sparse_values_) + sparse_shape_ = convert(Tensor{Int64}, sparse_shape_) + (sparse_values_,) = tf.tf_promote(sparse_values_) + tf.add_input(desc, sparse_indices_) + tf.add_input(desc, sparse_values_) + tf.add_input(desc, sparse_shape_) + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + tf.Tensor(tf.Operation(desc)) + end + function add_many_sparse_to_tensors_map(sparse_indices_::tf.TensorHandle, sparse_values_::tf.TensorHandle, sparse_shape_::tf.TensorHandle; name=nothing, container=nothing, shared_name=nothing) + desc = tf.EagerOp("AddManySparseToTensorsMap") + tf.add_input(desc, sparse_indices_) + tf.add_input(desc, sparse_values_) + tf.add_input(desc, sparse_shape_) + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + desc["T"] = tf.data_type(sparse_values_) + (tf.execute(desc))[1] + end +end + + +""" + sparse_reorder(input_indices, input_values, input_shape) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function sparse_reorder(input_indices_, input_values_, input_shape_; name=nothing) + local desc + tf.with_op_name(name, "SparseReorder") do + desc = tf.NodeDescription("SparseReorder") + input_indices_ = convert(Tensor{Int64}, input_indices_) + input_values_ = convert(Tensor{Any}, input_values_) + input_shape_ = convert(Tensor{Int64}, input_shape_) + (input_values_,) = tf.tf_promote(input_values_) + tf.add_input(desc, input_indices_) + tf.add_input(desc, input_values_) + tf.add_input(desc, input_shape_) + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function sparse_reorder(input_indices_::tf.TensorHandle, input_values_::tf.TensorHandle, input_shape_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("SparseReorder") + tf.add_input(desc, input_indices_) + tf.add_input(desc, input_values_) + tf.add_input(desc, input_shape_) + desc["T"] = tf.data_type(input_values_) + tf.execute(desc) + end +end + + +""" + bitwise_xor(x, y) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function bitwise_xor(x_, y_; name=nothing) + local desc + tf.with_op_name(name, "BitwiseXor") do + desc = tf.NodeDescription("BitwiseXor") + x_ = convert(Tensor{Any}, x_) + y_ = convert(Tensor{Any}, y_) + (x_, y_) = tf.tf_promote(x_, y_) + tf.add_input(desc, x_) + tf.add_input(desc, y_) + end + tf.Tensor(tf.Operation(desc)) + end + function bitwise_xor(x_::tf.TensorHandle, y_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("BitwiseXor") + tf.add_input(desc, x_) + tf.add_input(desc, y_) + desc["T"] = tf.data_type(x_) + desc["T"] = tf.data_type(y_) + (tf.execute(desc))[1] + end +end + + +""" + batch_matrix_set_diag(input, diagonal) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function batch_matrix_set_diag(input_, diagonal_; name=nothing) + local desc + tf.with_op_name(name, "BatchMatrixSetDiag") do + desc = tf.NodeDescription("BatchMatrixSetDiag") + input_ = convert(Tensor{Any}, input_) + diagonal_ = convert(Tensor{Any}, diagonal_) + (input_, diagonal_) = tf.tf_promote(input_, diagonal_) + tf.add_input(desc, input_) + tf.add_input(desc, diagonal_) + end + tf.Tensor(tf.Operation(desc)) + end + function batch_matrix_set_diag(input_::tf.TensorHandle, diagonal_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("BatchMatrixSetDiag") + tf.add_input(desc, input_) + tf.add_input(desc, diagonal_) + desc["T"] = tf.data_type(input_) + desc["T"] = tf.data_type(diagonal_) + (tf.execute(desc))[1] + end +end + + +""" + lookup_table_insert_v2(table_handle, keys, values) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function lookup_table_insert_v2(table_handle_, keys_, values_; name=nothing) + local desc + tf.with_op_name(name, "LookupTableInsertV2") do + desc = tf.NodeDescription("LookupTableInsertV2") + table_handle_ = convert(Tensor{Any}, table_handle_) + keys_ = convert(Tensor{Any}, keys_) + values_ = convert(Tensor{Any}, values_) + (keys_,) = tf.tf_promote(keys_) + (values_,) = tf.tf_promote(values_) + tf.add_input(desc, table_handle_) + tf.add_input(desc, keys_) + tf.add_input(desc, values_) + end + tf.Tensor(tf.Operation(desc)) + end + function lookup_table_insert_v2(table_handle_::tf.TensorHandle, keys_::tf.TensorHandle, values_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("LookupTableInsertV2") + tf.add_input(desc, table_handle_) + tf.add_input(desc, keys_) + tf.add_input(desc, values_) + desc["Tin"] = tf.data_type(keys_) + desc["Tout"] = tf.data_type(values_) + (tf.execute(desc))[1] + end +end + + +""" + experimental_dense_to_sparse_batch_dataset(input_dataset, batch_size, row_shape) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function experimental_dense_to_sparse_batch_dataset(input_dataset_, batch_size_, row_shape_; name=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "ExperimentalDenseToSparseBatchDataset") do + desc = tf.NodeDescription("ExperimentalDenseToSparseBatchDataset") + input_dataset_ = convert(Tensor{Any}, input_dataset_) + batch_size_ = convert(Tensor{Int64}, batch_size_) + row_shape_ = convert(Tensor{Int64}, row_shape_) + tf.add_input(desc, input_dataset_) + tf.add_input(desc, batch_size_) + tf.add_input(desc, row_shape_) + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + tf.Tensor(tf.Operation(desc)) + end + function experimental_dense_to_sparse_batch_dataset(input_dataset_::tf.TensorHandle, batch_size_::tf.TensorHandle, row_shape_::tf.TensorHandle; name=nothing, output_types=nothing, output_shapes=nothing) + desc = tf.EagerOp("ExperimentalDenseToSparseBatchDataset") + tf.add_input(desc, input_dataset_) + tf.add_input(desc, batch_size_) + tf.add_input(desc, row_shape_) + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + (tf.execute(desc))[1] + end +end + + +""" + resource_sparse_apply_rms_prop(var, ms, mom, lr, rho, momentum, epsilon, grad, indices; use_locking=false) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function resource_sparse_apply_rms_prop(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "ResourceSparseApplyRMSProp") do + desc = tf.NodeDescription("ResourceSparseApplyRMSProp") + var_ = convert(Tensor{Any}, var_) + ms_ = convert(Tensor{Any}, ms_) + mom_ = convert(Tensor{Any}, mom_) + lr_ = convert(Tensor{Any}, lr_) + rho_ = convert(Tensor{Any}, rho_) + momentum_ = convert(Tensor{Any}, momentum_) + epsilon_ = convert(Tensor{Any}, epsilon_) + grad_ = convert(Tensor{Any}, grad_) + indices_ = convert(Tensor{Any}, indices_) + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + (lr_, rho_, momentum_, epsilon_, grad_) = tf.tf_promote(lr_, rho_, momentum_, epsilon_, grad_) + (indices_,) = tf.tf_promote(indices_) + tf.add_input(desc, var_) + tf.add_input(desc, ms_) + tf.add_input(desc, mom_) + tf.add_input(desc, lr_) + tf.add_input(desc, rho_) + tf.add_input(desc, momentum_) + tf.add_input(desc, epsilon_) + tf.add_input(desc, grad_) + tf.add_input(desc, indices_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + tf.Tensor(tf.Operation(desc)) + end + function resource_sparse_apply_rms_prop(var_::tf.TensorHandle, ms_::tf.TensorHandle, mom_::tf.TensorHandle, lr_::tf.TensorHandle, rho_::tf.TensorHandle, momentum_::tf.TensorHandle, epsilon_::tf.TensorHandle, grad_::tf.TensorHandle, indices_::tf.TensorHandle; name=nothing, use_locking=nothing) + desc = tf.EagerOp("ResourceSparseApplyRMSProp") + tf.add_input(desc, var_) + tf.add_input(desc, ms_) + tf.add_input(desc, mom_) + tf.add_input(desc, lr_) + tf.add_input(desc, rho_) + tf.add_input(desc, momentum_) + tf.add_input(desc, epsilon_) + tf.add_input(desc, grad_) + tf.add_input(desc, indices_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + desc["T"] = tf.data_type(lr_) + desc["T"] = tf.data_type(rho_) + desc["T"] = tf.data_type(momentum_) + desc["T"] = tf.data_type(epsilon_) + desc["T"] = tf.data_type(grad_) + desc["Tindices"] = tf.data_type(indices_) + (tf.execute(desc))[1] + end +end + + +""" + random_crop(image, size; seed=0, seed2=0) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function random_crop(image_, size_; name=nothing, seed=nothing, seed2=nothing) + local desc + tf.with_op_name(name, "RandomCrop") do + desc = tf.NodeDescription("RandomCrop") + image_ = convert(Tensor{Any}, image_) + size_ = convert(Tensor{Int64}, size_) + (image_,) = tf.tf_promote(image_) + tf.add_input(desc, image_) + tf.add_input(desc, size_) + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end + end + tf.Tensor(tf.Operation(desc)) + end + function random_crop(image_::tf.TensorHandle, size_::tf.TensorHandle; name=nothing, seed=nothing, seed2=nothing) + desc = tf.EagerOp("RandomCrop") + tf.add_input(desc, image_) + tf.add_input(desc, size_) + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end + desc["T"] = tf.data_type(image_) + (tf.execute(desc))[1] + end +end + + +""" + lookup_table_import_v2(table_handle, keys, values) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function lookup_table_import_v2(table_handle_, keys_, values_; name=nothing) + local desc + tf.with_op_name(name, "LookupTableImportV2") do + desc = tf.NodeDescription("LookupTableImportV2") + table_handle_ = convert(Tensor{Any}, table_handle_) + keys_ = convert(Tensor{Any}, keys_) + values_ = convert(Tensor{Any}, values_) + (keys_,) = tf.tf_promote(keys_) + (values_,) = tf.tf_promote(values_) + tf.add_input(desc, table_handle_) + tf.add_input(desc, keys_) + tf.add_input(desc, values_) + end + tf.Tensor(tf.Operation(desc)) + end + function lookup_table_import_v2(table_handle_::tf.TensorHandle, keys_::tf.TensorHandle, values_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("LookupTableImportV2") + tf.add_input(desc, table_handle_) + tf.add_input(desc, keys_) + tf.add_input(desc, values_) + desc["Tin"] = tf.data_type(keys_) + desc["Tout"] = tf.data_type(values_) + (tf.execute(desc))[1] + end +end + + +""" + resource_scatter_nd_update(ref, indices, updates; use_locking=true) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function resource_scatter_nd_update(ref_, indices_, updates_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "ResourceScatterNdUpdate") do + desc = tf.NodeDescription("ResourceScatterNdUpdate") + ref_ = convert(Tensor{Any}, ref_) + indices_ = convert(Tensor{Any}, indices_) + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + updates_ = convert(Tensor{Any}, updates_) + (updates_,) = tf.tf_promote(updates_) + (indices_,) = tf.tf_promote(indices_) + tf.add_input(desc, ref_) + tf.add_input(desc, indices_) + tf.add_input(desc, updates_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + tf.Tensor(tf.Operation(desc)) + end + function resource_scatter_nd_update(ref_::tf.TensorHandle, indices_::tf.TensorHandle, updates_::tf.TensorHandle; name=nothing, use_locking=nothing) + desc = tf.EagerOp("ResourceScatterNdUpdate") + tf.add_input(desc, ref_) + tf.add_input(desc, indices_) + tf.add_input(desc, updates_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + desc["Tindices"] = tf.data_type(indices_) + desc["T"] = tf.data_type(updates_) + (tf.execute(desc))[1] + end +end + + +""" + static_regex_full_match(input) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function static_regex_full_match(input_; name=nothing, pattern=nothing) + local desc + tf.with_op_name(name, "StaticRegexFullMatch") do + desc = tf.NodeDescription("StaticRegexFullMatch") + input_ = convert(Tensor{String}, input_) + tf.add_input(desc, input_) + if pattern !== nothing + desc["pattern"] = Base.String(pattern) + end + end + tf.Tensor(tf.Operation(desc)) + end + function static_regex_full_match(input_::tf.TensorHandle; name=nothing, pattern=nothing) + desc = tf.EagerOp("StaticRegexFullMatch") + tf.add_input(desc, input_) + if pattern !== nothing + desc["pattern"] = Base.String(pattern) + end + (tf.execute(desc))[1] + end +end + + +""" + gcs_configure_credentials(json) + +Configures the credentials used by the GCS client of the local TF runtime. +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function gcs_configure_credentials(json_; name=nothing) + local desc + tf.with_op_name(name, "GcsConfigureCredentials") do + desc = tf.NodeDescription("GcsConfigureCredentials") + json_ = convert(Tensor{String}, json_) + tf.add_input(desc, json_) + end + tf.Tensor(tf.Operation(desc)) + end + function gcs_configure_credentials(json_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("GcsConfigureCredentials") + tf.add_input(desc, json_) + (tf.execute(desc))[1] + end +end + + +""" + tensor_array_size_v3(handle, flow_in) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tensor_array_size_v3(handle_, flow_in_; name=nothing) + local desc + tf.with_op_name(name, "TensorArraySizeV3") do + desc = tf.NodeDescription("TensorArraySizeV3") + handle_ = convert(Tensor{Any}, handle_) + flow_in_ = convert(Tensor{Float32}, flow_in_) + tf.add_input(desc, handle_) + tf.add_input(desc, flow_in_) + end + tf.Tensor(tf.Operation(desc)) + end + function tensor_array_size_v3(handle_::tf.TensorHandle, flow_in_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("TensorArraySizeV3") + tf.add_input(desc, handle_) + tf.add_input(desc, flow_in_) + (tf.execute(desc))[1] + end +end + + +""" + sparse_segment_sqrt_n_with_num_segments(data, indices, segment_ids, num_segments) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function sparse_segment_sqrt_n_with_num_segments(data_, indices_, segment_ids_, num_segments_; name=nothing) + local desc + tf.with_op_name(name, "SparseSegmentSqrtNWithNumSegments") do + desc = tf.NodeDescription("SparseSegmentSqrtNWithNumSegments") + data_ = convert(Tensor{Any}, data_) + indices_ = convert(Tensor{Int32}, indices_) + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + segment_ids_ = convert(Tensor{Int32}, segment_ids_) + num_segments_ = convert(Tensor{Int32}, num_segments_) + (num_segments_,) = tf.tf_promote(num_segments_) + (data_,) = tf.tf_promote(data_) + (indices_,) = tf.tf_promote(indices_) + tf.add_input(desc, data_) + tf.add_input(desc, indices_) + tf.add_input(desc, segment_ids_) + tf.add_input(desc, num_segments_) + end + tf.Tensor(tf.Operation(desc)) + end + function sparse_segment_sqrt_n_with_num_segments(data_::tf.TensorHandle, indices_::tf.TensorHandle, segment_ids_::tf.TensorHandle, num_segments_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("SparseSegmentSqrtNWithNumSegments") + tf.add_input(desc, data_) + tf.add_input(desc, indices_) + tf.add_input(desc, segment_ids_) + tf.add_input(desc, num_segments_) + desc["T"] = tf.data_type(data_) + desc["Tidx"] = tf.data_type(indices_) + desc["Tnumsegments"] = tf.data_type(num_segments_) + (tf.execute(desc))[1] + end +end + + +""" + experimental_group_by_reducer_dataset(input_dataset, key_func_other_arguments, init_func_other_arguments, reduce_func_other_arguments, finalize_func_other_arguments) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function experimental_group_by_reducer_dataset(input_dataset_, key_func_other_arguments_, init_func_other_arguments_, reduce_func_other_arguments_, finalize_func_other_arguments_; name=nothing, key_func=nothing, init_func=nothing, reduce_func=nothing, finalize_func=nothing, Tkey_func_other_arguments=nothing, Tinit_func_other_arguments=nothing, Treduce_func_other_arguments=nothing, Tfinalize_func_other_arguments=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "ExperimentalGroupByReducerDataset") do + desc = tf.NodeDescription("ExperimentalGroupByReducerDataset") + input_dataset_ = convert(Tensor{Any}, input_dataset_) + key_func_other_arguments_ = [convert(Tensor{Any}, x) for x = key_func_other_arguments_] + init_func_other_arguments_ = [convert(Tensor{Any}, x) for x = init_func_other_arguments_] + reduce_func_other_arguments_ = [convert(Tensor{Any}, x) for x = reduce_func_other_arguments_] + finalize_func_other_arguments_ = [convert(Tensor{Any}, x) for x = finalize_func_other_arguments_] + tf.add_input(desc, input_dataset_) + tf.add_input(desc, key_func_other_arguments_) + tf.add_input(desc, init_func_other_arguments_) + tf.add_input(desc, reduce_func_other_arguments_) + tf.add_input(desc, finalize_func_other_arguments_) + if key_func !== nothing + desc["key_func"] = Base.identity(key_func) + end + if init_func !== nothing + desc["init_func"] = Base.identity(init_func) + end + if reduce_func !== nothing + desc["reduce_func"] = Base.identity(reduce_func) + end + if finalize_func !== nothing + desc["finalize_func"] = Base.identity(finalize_func) + end + if Tkey_func_other_arguments !== nothing + desc["Tkey_func_other_arguments"] = map(Base.identity, Tkey_func_other_arguments) + end + if Tinit_func_other_arguments !== nothing + desc["Tinit_func_other_arguments"] = map(Base.identity, Tinit_func_other_arguments) + end + if Treduce_func_other_arguments !== nothing + desc["Treduce_func_other_arguments"] = map(Base.identity, Treduce_func_other_arguments) + end + if Tfinalize_func_other_arguments !== nothing + desc["Tfinalize_func_other_arguments"] = map(Base.identity, Tfinalize_func_other_arguments) + end + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + tf.Tensor(tf.Operation(desc)) + end + function experimental_group_by_reducer_dataset(input_dataset_::tf.TensorHandle, key_func_other_arguments_::tf.TensorHandle, init_func_other_arguments_::tf.TensorHandle, reduce_func_other_arguments_::tf.TensorHandle, finalize_func_other_arguments_::tf.TensorHandle; name=nothing, key_func=nothing, init_func=nothing, reduce_func=nothing, finalize_func=nothing, Tkey_func_other_arguments=nothing, Tinit_func_other_arguments=nothing, Treduce_func_other_arguments=nothing, Tfinalize_func_other_arguments=nothing, output_types=nothing, output_shapes=nothing) + desc = tf.EagerOp("ExperimentalGroupByReducerDataset") + tf.add_input(desc, input_dataset_) + tf.add_input(desc, key_func_other_arguments_) + tf.add_input(desc, init_func_other_arguments_) + tf.add_input(desc, reduce_func_other_arguments_) + tf.add_input(desc, finalize_func_other_arguments_) + if key_func !== nothing + desc["key_func"] = Base.identity(key_func) + end + if init_func !== nothing + desc["init_func"] = Base.identity(init_func) + end + if reduce_func !== nothing + desc["reduce_func"] = Base.identity(reduce_func) + end + if finalize_func !== nothing + desc["finalize_func"] = Base.identity(finalize_func) + end + if Tkey_func_other_arguments !== nothing + desc["Tkey_func_other_arguments"] = map(Base.identity, Tkey_func_other_arguments) + end + if Tinit_func_other_arguments !== nothing + desc["Tinit_func_other_arguments"] = map(Base.identity, Tinit_func_other_arguments) + end + if Treduce_func_other_arguments !== nothing + desc["Treduce_func_other_arguments"] = map(Base.identity, Treduce_func_other_arguments) + end + if Tfinalize_func_other_arguments !== nothing + desc["Tfinalize_func_other_arguments"] = map(Base.identity, Tfinalize_func_other_arguments) + end + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + (tf.execute(desc))[1] + end +end + + +""" + conv2d_backprop_filter(input, filter_sizes, out_backprop; use_cudnn_on_gpu=true, explicit_paddings=Int64[], data_format=NHWC, dilations=[1, 1, 1, 1]) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function conv2d_backprop_filter(input_, filter_sizes_, out_backprop_; name=nothing, strides=nothing, use_cudnn_on_gpu=nothing, padding=nothing, explicit_paddings=nothing, data_format=nothing, dilations=nothing) + local desc + tf.with_op_name(name, "Conv2DBackpropFilter") do + desc = tf.NodeDescription("Conv2DBackpropFilter") + input_ = convert(Tensor{Any}, input_) + filter_sizes_ = convert(Tensor{Int32}, filter_sizes_) + out_backprop_ = convert(Tensor{Any}, out_backprop_) + (input_, out_backprop_) = tf.tf_promote(input_, out_backprop_) + tf.add_input(desc, input_) + tf.add_input(desc, filter_sizes_) + tf.add_input(desc, out_backprop_) + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + if use_cudnn_on_gpu !== nothing + desc["use_cudnn_on_gpu"] = Base.Bool(use_cudnn_on_gpu) + end + if padding !== nothing + desc["padding"] = Base.String(padding) + end + if explicit_paddings !== nothing + desc["explicit_paddings"] = map(Base.identity, explicit_paddings) + end + if data_format !== nothing + desc["data_format"] = Base.String(data_format) + end + if dilations !== nothing + desc["dilations"] = map(Base.identity, dilations) + end + end + tf.Tensor(tf.Operation(desc)) + end + function conv2d_backprop_filter(input_::tf.TensorHandle, filter_sizes_::tf.TensorHandle, out_backprop_::tf.TensorHandle; name=nothing, strides=nothing, use_cudnn_on_gpu=nothing, padding=nothing, explicit_paddings=nothing, data_format=nothing, dilations=nothing) + desc = tf.EagerOp("Conv2DBackpropFilter") + tf.add_input(desc, input_) + tf.add_input(desc, filter_sizes_) + tf.add_input(desc, out_backprop_) + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + if use_cudnn_on_gpu !== nothing + desc["use_cudnn_on_gpu"] = Base.Bool(use_cudnn_on_gpu) + end + if padding !== nothing + desc["padding"] = Base.String(padding) + end + if explicit_paddings !== nothing + desc["explicit_paddings"] = map(Base.identity, explicit_paddings) + end + if data_format !== nothing + desc["data_format"] = Base.String(data_format) + end + if dilations !== nothing + desc["dilations"] = map(Base.identity, dilations) + end + desc["T"] = tf.data_type(input_) + desc["T"] = tf.data_type(out_backprop_) + (tf.execute(desc))[1] + end +end + + +""" + max_pool_grad(orig_input, orig_output, grad; data_format=NHWC) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function max_pool_grad(orig_input_, orig_output_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + local desc + tf.with_op_name(name, "MaxPoolGrad") do + desc = tf.NodeDescription("MaxPoolGrad") + orig_input_ = convert(Tensor{Float32}, orig_input_) + orig_output_ = convert(Tensor{Float32}, orig_output_) + grad_ = convert(Tensor{Float32}, grad_) + (orig_input_, orig_output_, grad_) = tf.tf_promote(orig_input_, orig_output_, grad_) + tf.add_input(desc, orig_input_) + tf.add_input(desc, orig_output_) + tf.add_input(desc, grad_) + if ksize !== nothing + desc["ksize"] = map(Base.identity, ksize) + end + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + if padding !== nothing + desc["padding"] = Base.String(padding) + end + if data_format !== nothing + desc["data_format"] = Base.String(data_format) + end + end + tf.Tensor(tf.Operation(desc)) + end + function max_pool_grad(orig_input_::tf.TensorHandle, orig_output_::tf.TensorHandle, grad_::tf.TensorHandle; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + desc = tf.EagerOp("MaxPoolGrad") + tf.add_input(desc, orig_input_) + tf.add_input(desc, orig_output_) + tf.add_input(desc, grad_) + if ksize !== nothing + desc["ksize"] = map(Base.identity, ksize) + end + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + if padding !== nothing + desc["padding"] = Base.String(padding) + end + if data_format !== nothing + desc["data_format"] = Base.String(data_format) + end + desc["T"] = tf.data_type(orig_input_) + desc["T"] = tf.data_type(orig_output_) + desc["T"] = tf.data_type(grad_) + (tf.execute(desc))[1] + end +end + + +""" + _initialize_host_for_distributed_tpu(input) + +An op that connects each chip on the host to a centralized UberDriver to allow +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function _initialize_host_for_distributed_tpu(input_; name=nothing) + local desc + tf.with_op_name(name, "_InitializeHostForDistributedTPU") do + desc = tf.NodeDescription("_InitializeHostForDistributedTPU") + input_ = convert(Tensor{String}, input_) + tf.add_input(desc, input_) + end + tf.Tensor(tf.Operation(desc)) + end + function _initialize_host_for_distributed_tpu(input_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("_InitializeHostForDistributedTPU") + tf.add_input(desc, input_) + (tf.execute(desc))[1] + end +end + + +""" + stage_peek(index; capacity=0, memory_limit=0, container=, shared_name=) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function stage_peek(index_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "StagePeek") do + desc = tf.NodeDescription("StagePeek") + index_ = convert(Tensor{Int32}, index_) + tf.add_input(desc, index_) + if capacity !== nothing + desc["capacity"] = Base.Int(capacity) + end + if memory_limit !== nothing + desc["memory_limit"] = Base.Int(memory_limit) + end + if dtypes !== nothing + desc["dtypes"] = map(Base.identity, dtypes) + end + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + tf.Tensor(tf.Operation(desc)) + end + function stage_peek(index_::tf.TensorHandle; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + desc = tf.EagerOp("StagePeek") + tf.add_input(desc, index_) + if capacity !== nothing + desc["capacity"] = Base.Int(capacity) + end + if memory_limit !== nothing + desc["memory_limit"] = Base.Int(memory_limit) + end + if dtypes !== nothing + desc["dtypes"] = map(Base.identity, dtypes) + end + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + (tf.execute(desc))[1] + end +end + + +""" + pad_v2(input, paddings, constant_values) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function pad_v2(input_, paddings_, constant_values_; name=nothing) + local desc + tf.with_op_name(name, "PadV2") do + desc = tf.NodeDescription("PadV2") + input_ = convert(Tensor{Any}, input_) + paddings_ = convert(Tensor{Int32}, paddings_) + constant_values_ = convert(Tensor{Any}, constant_values_) + (input_, constant_values_) = tf.tf_promote(input_, constant_values_) + (paddings_,) = tf.tf_promote(paddings_) + tf.add_input(desc, input_) + tf.add_input(desc, paddings_) + tf.add_input(desc, constant_values_) + end + tf.Tensor(tf.Operation(desc)) + end + function pad_v2(input_::tf.TensorHandle, paddings_::tf.TensorHandle, constant_values_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("PadV2") + tf.add_input(desc, input_) + tf.add_input(desc, paddings_) + tf.add_input(desc, constant_values_) + desc["T"] = tf.data_type(input_) + desc["Tpaddings"] = tf.data_type(paddings_) + desc["T"] = tf.data_type(constant_values_) + (tf.execute(desc))[1] + end +end + + +""" + _parallel_concat_start() + +Creates an empty Tensor with shape `shape` and type `dtype`. +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function _parallel_concat_start(; name=nothing, shape=nothing, dtype=nothing) + local desc + tf.with_op_name(name, "_ParallelConcatStart") do + desc = tf.NodeDescription("_ParallelConcatStart") + if shape !== nothing + desc["shape"] = Base.identity(shape) + end + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + tf.Tensor(tf.Operation(desc)) + end + function _parallel_concat_start(; name=nothing, shape=nothing, dtype=nothing) + desc = tf.EagerOp("_ParallelConcatStart") + if shape !== nothing + desc["shape"] = Base.identity(shape) + end + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + (tf.execute(desc))[1] + end +end + + +""" + print_v2(input; output_stream=stderr) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function print_v2(input_; name=nothing, output_stream=nothing) + local desc + tf.with_op_name(name, "PrintV2") do + desc = tf.NodeDescription("PrintV2") + input_ = convert(Tensor{String}, input_) + tf.add_input(desc, input_) + if output_stream !== nothing + desc["output_stream"] = Base.String(output_stream) + end + end + tf.Tensor(tf.Operation(desc)) + end + function print_v2(input_::tf.TensorHandle; name=nothing, output_stream=nothing) + desc = tf.EagerOp("PrintV2") + tf.add_input(desc, input_) + if output_stream !== nothing + desc["output_stream"] = Base.String(output_stream) + end + (tf.execute(desc))[1] + end +end + + +""" + optional_get_value(optional) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function optional_get_value(optional_; name=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "OptionalGetValue") do + desc = tf.NodeDescription("OptionalGetValue") + optional_ = convert(Tensor{Any}, optional_) + tf.add_input(desc, optional_) + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + tf.Tensor(tf.Operation(desc)) + end + function optional_get_value(optional_::tf.TensorHandle; name=nothing, output_types=nothing, output_shapes=nothing) + desc = tf.EagerOp("OptionalGetValue") + tf.add_input(desc, optional_) + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + (tf.execute(desc))[1] + end +end + + +""" + load_tpu_embedding_ftrl_parameters(parameters, accumulators, linears; table_id=-1, table_name=) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function load_tpu_embedding_ftrl_parameters(parameters_, accumulators_, linears_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + local desc + tf.with_op_name(name, "LoadTPUEmbeddingFTRLParameters") do + desc = tf.NodeDescription("LoadTPUEmbeddingFTRLParameters") + parameters_ = convert(Tensor{Float32}, parameters_) + accumulators_ = convert(Tensor{Float32}, accumulators_) + linears_ = convert(Tensor{Float32}, linears_) + tf.add_input(desc, parameters_) + tf.add_input(desc, accumulators_) + tf.add_input(desc, linears_) + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + end + tf.Tensor(tf.Operation(desc)) + end + function load_tpu_embedding_ftrl_parameters(parameters_::tf.TensorHandle, accumulators_::tf.TensorHandle, linears_::tf.TensorHandle; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + desc = tf.EagerOp("LoadTPUEmbeddingFTRLParameters") + tf.add_input(desc, parameters_) + tf.add_input(desc, accumulators_) + tf.add_input(desc, linears_) + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + (tf.execute(desc))[1] + end +end + + +""" + sparse_slice(indices, values, shape, start, size) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function sparse_slice(indices_, values_, shape_, start_, size_; name=nothing) + local desc + tf.with_op_name(name, "SparseSlice") do + desc = tf.NodeDescription("SparseSlice") + indices_ = convert(Tensor{Int64}, indices_) + values_ = convert(Tensor{Any}, values_) + shape_ = convert(Tensor{Int64}, shape_) + start_ = convert(Tensor{Int64}, start_) + size_ = convert(Tensor{Int64}, size_) + (values_,) = tf.tf_promote(values_) + tf.add_input(desc, indices_) + tf.add_input(desc, values_) + tf.add_input(desc, shape_) + tf.add_input(desc, start_) + tf.add_input(desc, size_) + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function sparse_slice(indices_::tf.TensorHandle, values_::tf.TensorHandle, shape_::tf.TensorHandle, start_::tf.TensorHandle, size_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("SparseSlice") + tf.add_input(desc, indices_) + tf.add_input(desc, values_) + tf.add_input(desc, shape_) + tf.add_input(desc, start_) + tf.add_input(desc, size_) + desc["T"] = tf.data_type(values_) + tf.execute(desc) + end +end + + +""" + boosted_trees_make_quantile_summaries(float_values, example_weights, epsilon) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function boosted_trees_make_quantile_summaries(float_values_, example_weights_, epsilon_; name=nothing, num_features=nothing) + local desc + tf.with_op_name(name, "BoostedTreesMakeQuantileSummaries") do + desc = tf.NodeDescription("BoostedTreesMakeQuantileSummaries") + float_values_ = [convert(Tensor{Float32}, x) for x = float_values_] + example_weights_ = convert(Tensor{Float32}, example_weights_) + epsilon_ = convert(Tensor{Float32}, epsilon_) + tf.add_input(desc, float_values_) + tf.add_input(desc, example_weights_) + tf.add_input(desc, epsilon_) + if num_features !== nothing + desc["num_features"] = Base.Int(num_features) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:num_features + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function boosted_trees_make_quantile_summaries(float_values_::tf.TensorHandle, example_weights_::tf.TensorHandle, epsilon_::tf.TensorHandle; name=nothing, num_features=nothing) + desc = tf.EagerOp("BoostedTreesMakeQuantileSummaries") + tf.add_input(desc, float_values_) + tf.add_input(desc, example_weights_) + tf.add_input(desc, epsilon_) + if num_features !== nothing + desc["num_features"] = Base.Int(num_features) + end + tf.execute(desc) + end +end + + +""" + matrix_solve(matrix, rhs; adjoint=false) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function matrix_solve(matrix_, rhs_; name=nothing, adjoint=nothing) + local desc + tf.with_op_name(name, "MatrixSolve") do + desc = tf.NodeDescription("MatrixSolve") + matrix_ = convert(Tensor{Any}, matrix_) + rhs_ = convert(Tensor{Any}, rhs_) + (matrix_, rhs_) = tf.tf_promote(matrix_, rhs_) + tf.add_input(desc, matrix_) + tf.add_input(desc, rhs_) + if adjoint !== nothing + desc["adjoint"] = Base.Bool(adjoint) + end + end + tf.Tensor(tf.Operation(desc)) + end + function matrix_solve(matrix_::tf.TensorHandle, rhs_::tf.TensorHandle; name=nothing, adjoint=nothing) + desc = tf.EagerOp("MatrixSolve") + tf.add_input(desc, matrix_) + tf.add_input(desc, rhs_) + if adjoint !== nothing + desc["adjoint"] = Base.Bool(adjoint) + end + desc["T"] = tf.data_type(matrix_) + desc["T"] = tf.data_type(rhs_) + (tf.execute(desc))[1] + end +end + + +""" + _configure_distributed_tpu(inputs) + +An op that sets up the centralized structures for a distributed TPU +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function _configure_distributed_tpu(inputs_; name=nothing, N=nothing) + local desc + tf.with_op_name(name, "_ConfigureDistributedTPU") do + desc = tf.NodeDescription("_ConfigureDistributedTPU") + inputs_ = [convert(Tensor{Int32}, x) for x = inputs_] + tf.add_input(desc, inputs_) + if N !== nothing + desc["N"] = Base.Int(N) + end + end + tf.Tensor(tf.Operation(desc)) + end + function _configure_distributed_tpu(inputs_::tf.TensorHandle; name=nothing, N=nothing) + desc = tf.EagerOp("_ConfigureDistributedTPU") + tf.add_input(desc, inputs_) + if N !== nothing + desc["N"] = Base.Int(N) + end + (tf.execute(desc))[1] + end +end + + +""" + adjust_contrastv2(images, contrast_factor) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function adjust_contrastv2(images_, contrast_factor_; name=nothing) + local desc + tf.with_op_name(name, "AdjustContrastv2") do + desc = tf.NodeDescription("AdjustContrastv2") + images_ = convert(Tensor{Float32}, images_) + contrast_factor_ = convert(Tensor{Float32}, contrast_factor_) + (images_,) = tf.tf_promote(images_) + tf.add_input(desc, images_) + tf.add_input(desc, contrast_factor_) + end + tf.Tensor(tf.Operation(desc)) + end + function adjust_contrastv2(images_::tf.TensorHandle, contrast_factor_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("AdjustContrastv2") + tf.add_input(desc, images_) + tf.add_input(desc, contrast_factor_) + desc["T"] = tf.data_type(images_) + (tf.execute(desc))[1] + end +end + + +""" + _mkl_maximum(x, y, mkl_x, mkl_y) + +Returns the max of x and y (i.e. x > y ? x : y) element-wise. +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function _mkl_maximum(x_, y_, mkl_x_, mkl_y_; name=nothing) + local desc + tf.with_op_name(name, "_MklMaximum") do + desc = tf.NodeDescription("_MklMaximum") + x_ = convert(Tensor{Any}, x_) + y_ = convert(Tensor{Any}, y_) + mkl_x_ = convert(Tensor{UInt8}, mkl_x_) + mkl_y_ = convert(Tensor{UInt8}, mkl_y_) + (x_, y_) = tf.tf_promote(x_, y_) + tf.add_input(desc, x_) + tf.add_input(desc, y_) + tf.add_input(desc, mkl_x_) + tf.add_input(desc, mkl_y_) + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function _mkl_maximum(x_::tf.TensorHandle, y_::tf.TensorHandle, mkl_x_::tf.TensorHandle, mkl_y_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("_MklMaximum") + tf.add_input(desc, x_) + tf.add_input(desc, y_) + tf.add_input(desc, mkl_x_) + tf.add_input(desc, mkl_y_) + desc["T"] = tf.data_type(x_) + desc["T"] = tf.data_type(y_) + tf.execute(desc) + end +end + + +""" + cudnn_rnn_params_size(num_layers, num_units, input_size; rnn_mode=lstm, input_mode=linear_input, direction=unidirectional, dropout=?, seed=0, seed2=0) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function cudnn_rnn_params_size(num_layers_, num_units_, input_size_; name=nothing, S=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) + local desc + tf.with_op_name(name, "CudnnRNNParamsSize") do + desc = tf.NodeDescription("CudnnRNNParamsSize") + num_layers_ = convert(Tensor{Int32}, num_layers_) + num_units_ = convert(Tensor{Int32}, num_units_) + input_size_ = convert(Tensor{Int32}, input_size_) + tf.add_input(desc, num_layers_) + tf.add_input(desc, num_units_) + tf.add_input(desc, input_size_) + if S !== nothing + desc["S"] = Base.identity(S) + end + if rnn_mode !== nothing + desc["rnn_mode"] = Base.String(rnn_mode) + end + if input_mode !== nothing + desc["input_mode"] = Base.String(input_mode) + end + if direction !== nothing + desc["direction"] = Base.String(direction) + end + if dropout !== nothing + desc["dropout"] = Base.identity(dropout) + end + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end + end + tf.Tensor(tf.Operation(desc)) + end + function cudnn_rnn_params_size(num_layers_::tf.TensorHandle, num_units_::tf.TensorHandle, input_size_::tf.TensorHandle; name=nothing, S=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) + desc = tf.EagerOp("CudnnRNNParamsSize") + tf.add_input(desc, num_layers_) + tf.add_input(desc, num_units_) + tf.add_input(desc, input_size_) + if S !== nothing + desc["S"] = Base.identity(S) + end + if rnn_mode !== nothing + desc["rnn_mode"] = Base.String(rnn_mode) + end + if input_mode !== nothing + desc["input_mode"] = Base.String(input_mode) + end + if direction !== nothing + desc["direction"] = Base.String(direction) + end + if dropout !== nothing + desc["dropout"] = Base.identity(dropout) + end + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end + (tf.execute(desc))[1] + end +end + + +""" + boosted_trees_quantile_stream_resource_add_summaries(quantile_stream_resource_handle, summaries) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function boosted_trees_quantile_stream_resource_add_summaries(quantile_stream_resource_handle_, summaries_; name=nothing, num_features=nothing) + local desc + tf.with_op_name(name, "BoostedTreesQuantileStreamResourceAddSummaries") do + desc = tf.NodeDescription("BoostedTreesQuantileStreamResourceAddSummaries") + quantile_stream_resource_handle_ = convert(Tensor{Any}, quantile_stream_resource_handle_) + summaries_ = [convert(Tensor{Float32}, x) for x = summaries_] + tf.add_input(desc, quantile_stream_resource_handle_) + tf.add_input(desc, summaries_) + if num_features !== nothing + desc["num_features"] = Base.Int(num_features) + end + end + tf.Tensor(tf.Operation(desc)) + end + function boosted_trees_quantile_stream_resource_add_summaries(quantile_stream_resource_handle_::tf.TensorHandle, summaries_::tf.TensorHandle; name=nothing, num_features=nothing) + desc = tf.EagerOp("BoostedTreesQuantileStreamResourceAddSummaries") + tf.add_input(desc, quantile_stream_resource_handle_) + tf.add_input(desc, summaries_) + if num_features !== nothing + desc["num_features"] = Base.Int(num_features) + end + (tf.execute(desc))[1] + end +end + + +""" + batch_ifft3d(input) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function batch_ifft3d(input_; name=nothing) + local desc + tf.with_op_name(name, "BatchIFFT3D") do + desc = tf.NodeDescription("BatchIFFT3D") + input_ = convert(Tensor{Complex{Float32}}, input_) + tf.add_input(desc, input_) + end + tf.Tensor(tf.Operation(desc)) + end + function batch_ifft3d(input_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("BatchIFFT3D") + tf.add_input(desc, input_) + (tf.execute(desc))[1] + end +end + + +""" + sigmoid(x) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function sigmoid(x_; name=nothing) + local desc + tf.with_op_name(name, "Sigmoid") do + desc = tf.NodeDescription("Sigmoid") + x_ = convert(Tensor{Any}, x_) + (x_,) = tf.tf_promote(x_) + tf.add_input(desc, x_) + end + tf.Tensor(tf.Operation(desc)) + end + function sigmoid(x_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("Sigmoid") + tf.add_input(desc, x_) + desc["T"] = tf.data_type(x_) + (tf.execute(desc))[1] + end +end + + +""" + segment_mean(data, segment_ids) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function segment_mean(data_, segment_ids_; name=nothing) + local desc + tf.with_op_name(name, "SegmentMean") do + desc = tf.NodeDescription("SegmentMean") + data_ = convert(Tensor{Any}, data_) + segment_ids_ = convert(Tensor{Any}, segment_ids_) + segment_ids_ = segment_ids_ - convert(tf.Tensor{eltype(segment_ids_)}, 1) + (data_,) = tf.tf_promote(data_) + (segment_ids_,) = tf.tf_promote(segment_ids_) + tf.add_input(desc, data_) + tf.add_input(desc, segment_ids_) + end + tf.Tensor(tf.Operation(desc)) + end + function segment_mean(data_::tf.TensorHandle, segment_ids_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("SegmentMean") + tf.add_input(desc, data_) + tf.add_input(desc, segment_ids_) + desc["T"] = tf.data_type(data_) + desc["Tindices"] = tf.data_type(segment_ids_) + (tf.execute(desc))[1] + end +end + + +""" + is_boosted_trees_ensemble_initialized(tree_ensemble_handle) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function is_boosted_trees_ensemble_initialized(tree_ensemble_handle_; name=nothing) + local desc + tf.with_op_name(name, "IsBoostedTreesEnsembleInitialized") do + desc = tf.NodeDescription("IsBoostedTreesEnsembleInitialized") + tree_ensemble_handle_ = convert(Tensor{Any}, tree_ensemble_handle_) + tf.add_input(desc, tree_ensemble_handle_) + end + tf.Tensor(tf.Operation(desc)) + end + function is_boosted_trees_ensemble_initialized(tree_ensemble_handle_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("IsBoostedTreesEnsembleInitialized") + tf.add_input(desc, tree_ensemble_handle_) + (tf.execute(desc))[1] + end +end + + +""" + tensor_array_size_v2(handle, flow_in) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tensor_array_size_v2(handle_, flow_in_; name=nothing) + local desc + tf.with_op_name(name, "TensorArraySizeV2") do + desc = tf.NodeDescription("TensorArraySizeV2") + handle_ = convert(Tensor{String}, handle_) + flow_in_ = convert(Tensor{Float32}, flow_in_) + tf.add_input(desc, handle_) + tf.add_input(desc, flow_in_) + end + tf.Tensor(tf.Operation(desc)) + end + function tensor_array_size_v2(handle_::tf.TensorHandle, flow_in_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("TensorArraySizeV2") + tf.add_input(desc, handle_) + tf.add_input(desc, flow_in_) + (tf.execute(desc))[1] + end +end + + +""" + _mkl_sub(x, y, mkl_x, mkl_y) + +Returns x - y element-wise. +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function _mkl_sub(x_, y_, mkl_x_, mkl_y_; name=nothing) + local desc + tf.with_op_name(name, "_MklSub") do + desc = tf.NodeDescription("_MklSub") + x_ = convert(Tensor{Any}, x_) + y_ = convert(Tensor{Any}, y_) + mkl_x_ = convert(Tensor{UInt8}, mkl_x_) + mkl_y_ = convert(Tensor{UInt8}, mkl_y_) + (x_, y_) = tf.tf_promote(x_, y_) + tf.add_input(desc, x_) + tf.add_input(desc, y_) + tf.add_input(desc, mkl_x_) + tf.add_input(desc, mkl_y_) + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function _mkl_sub(x_::tf.TensorHandle, y_::tf.TensorHandle, mkl_x_::tf.TensorHandle, mkl_y_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("_MklSub") + tf.add_input(desc, x_) + tf.add_input(desc, y_) + tf.add_input(desc, mkl_x_) + tf.add_input(desc, mkl_y_) + desc["T"] = tf.data_type(x_) + desc["T"] = tf.data_type(y_) + tf.execute(desc) + end +end + + +""" + send_tpu_embedding_gradients(inputs, learning_rates; NN=0) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function send_tpu_embedding_gradients(inputs_, learning_rates_; name=nothing, N=nothing, NN=nothing, config=nothing) + local desc + tf.with_op_name(name, "SendTPUEmbeddingGradients") do + desc = tf.NodeDescription("SendTPUEmbeddingGradients") + inputs_ = [convert(Tensor{Float32}, x) for x = inputs_] + learning_rates_ = [convert(Tensor{Float32}, x) for x = learning_rates_] + tf.add_input(desc, inputs_) + tf.add_input(desc, learning_rates_) + if N !== nothing + desc["N"] = Base.Int(N) + end + if NN !== nothing + desc["NN"] = Base.Int(NN) + end + if config !== nothing + desc["config"] = Base.String(config) + end + end + tf.Tensor(tf.Operation(desc)) + end + function send_tpu_embedding_gradients(inputs_::tf.TensorHandle, learning_rates_::tf.TensorHandle; name=nothing, N=nothing, NN=nothing, config=nothing) + desc = tf.EagerOp("SendTPUEmbeddingGradients") + tf.add_input(desc, inputs_) + tf.add_input(desc, learning_rates_) + if N !== nothing + desc["N"] = Base.Int(N) + end + if NN !== nothing + desc["NN"] = Base.Int(NN) + end + if config !== nothing + desc["config"] = Base.String(config) + end + (tf.execute(desc))[1] + end +end + + +""" + max_pool3d(input; data_format=NDHWC) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function max_pool3d(input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + local desc + tf.with_op_name(name, "MaxPool3D") do + desc = tf.NodeDescription("MaxPool3D") + input_ = convert(Tensor{Any}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + if ksize !== nothing + desc["ksize"] = map(Base.identity, ksize) + end + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + if padding !== nothing + desc["padding"] = Base.String(padding) + end + if data_format !== nothing + desc["data_format"] = Base.String(data_format) + end + end + tf.Tensor(tf.Operation(desc)) + end + function max_pool3d(input_::tf.TensorHandle; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + desc = tf.EagerOp("MaxPool3D") + tf.add_input(desc, input_) + if ksize !== nothing + desc["ksize"] = map(Base.identity, ksize) + end + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + if padding !== nothing + desc["padding"] = Base.String(padding) + end + if data_format !== nothing + desc["data_format"] = Base.String(data_format) + end + desc["T"] = tf.data_type(input_) + (tf.execute(desc))[1] + end +end + + +""" + prod(input, reduction_indices; keep_dims=false) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function prod(input_, reduction_indices_; name=nothing, keep_dims=nothing) + local desc + tf.with_op_name(name, "Prod") do + desc = tf.NodeDescription("Prod") + input_ = convert(Tensor{Any}, input_) + reduction_indices_ = convert(Tensor{Int32}, reduction_indices_) + reduction_indices_ = reduction_indices_ - convert(tf.Tensor{eltype(reduction_indices_)}, 1) + (input_,) = tf.tf_promote(input_) + (reduction_indices_,) = tf.tf_promote(reduction_indices_) + tf.add_input(desc, input_) + tf.add_input(desc, reduction_indices_) + if keep_dims !== nothing + desc["keep_dims"] = Base.Bool(keep_dims) + end + end + tf.Tensor(tf.Operation(desc)) + end + function prod(input_::tf.TensorHandle, reduction_indices_::tf.TensorHandle; name=nothing, keep_dims=nothing) + desc = tf.EagerOp("Prod") + tf.add_input(desc, input_) + tf.add_input(desc, reduction_indices_) + if keep_dims !== nothing + desc["keep_dims"] = Base.Bool(keep_dims) + end + desc["T"] = tf.data_type(input_) + desc["Tidx"] = tf.data_type(reduction_indices_) + (tf.execute(desc))[1] + end +end + + +""" + experimental_identity_indexed_dataset(size) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function experimental_identity_indexed_dataset(size_; name=nothing) + local desc + tf.with_op_name(name, "ExperimentalIdentityIndexedDataset") do + desc = tf.NodeDescription("ExperimentalIdentityIndexedDataset") + size_ = convert(Tensor{Any}, size_) + tf.add_input(desc, size_) + end + tf.Tensor(tf.Operation(desc)) + end + function experimental_identity_indexed_dataset(size_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("ExperimentalIdentityIndexedDataset") + tf.add_input(desc, size_) + (tf.execute(desc))[1] + end +end + + +""" + tensor_list_push_back(input_handle, tensor) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tensor_list_push_back(input_handle_, tensor_; name=nothing, element_dtype=nothing) + local desc + tf.with_op_name(name, "TensorListPushBack") do + desc = tf.NodeDescription("TensorListPushBack") + input_handle_ = convert(Tensor{Any}, input_handle_) + tensor_ = convert(Tensor{Any}, tensor_) + (tensor_,) = tf.tf_promote(tensor_) + tf.add_input(desc, input_handle_) + tf.add_input(desc, tensor_) + if element_dtype !== nothing + desc["element_dtype"] = Base.identity(element_dtype) + end + end + tf.Tensor(tf.Operation(desc)) + end + function tensor_list_push_back(input_handle_::tf.TensorHandle, tensor_::tf.TensorHandle; name=nothing, element_dtype=nothing) + desc = tf.EagerOp("TensorListPushBack") + tf.add_input(desc, input_handle_) + tf.add_input(desc, tensor_) + if element_dtype !== nothing + desc["element_dtype"] = Base.identity(element_dtype) + end + desc["element_dtype"] = tf.data_type(tensor_) + (tf.execute(desc))[1] + end +end + + +""" + batch_function(in_tensors, captured_tensors; max_enqueued_batches=10, allowed_batch_sizes=Int64[], container=, shared_name=, batching_queue=) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function batch_function(in_tensors_, captured_tensors_; name=nothing, f=nothing, num_batch_threads=nothing, max_batch_size=nothing, batch_timeout_micros=nothing, max_enqueued_batches=nothing, allowed_batch_sizes=nothing, container=nothing, shared_name=nothing, batching_queue=nothing, Tin=nothing, Tcaptured=nothing, Tout=nothing) + local desc + tf.with_op_name(name, "BatchFunction") do + desc = tf.NodeDescription("BatchFunction") + in_tensors_ = [convert(Tensor{Any}, x) for x = in_tensors_] + captured_tensors_ = [convert(Tensor{Any}, x) for x = captured_tensors_] + tf.add_input(desc, in_tensors_) + tf.add_input(desc, captured_tensors_) + if f !== nothing + desc["f"] = Base.identity(f) + end + if num_batch_threads !== nothing + desc["num_batch_threads"] = Base.Int(num_batch_threads) + end + if max_batch_size !== nothing + desc["max_batch_size"] = Base.Int(max_batch_size) + end + if batch_timeout_micros !== nothing + desc["batch_timeout_micros"] = Base.Int(batch_timeout_micros) + end + if max_enqueued_batches !== nothing + desc["max_enqueued_batches"] = Base.Int(max_enqueued_batches) + end + if allowed_batch_sizes !== nothing + desc["allowed_batch_sizes"] = map(Base.identity, allowed_batch_sizes) + end + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + if batching_queue !== nothing + desc["batching_queue"] = Base.String(batching_queue) + end + if Tin !== nothing + desc["Tin"] = map(Base.identity, Tin) + end + if Tcaptured !== nothing + desc["Tcaptured"] = map(Base.identity, Tcaptured) + end + if Tout !== nothing + desc["Tout"] = map(Base.identity, Tout) + end + end + tf.Tensor(tf.Operation(desc)) + end + function batch_function(in_tensors_::tf.TensorHandle, captured_tensors_::tf.TensorHandle; name=nothing, f=nothing, num_batch_threads=nothing, max_batch_size=nothing, batch_timeout_micros=nothing, max_enqueued_batches=nothing, allowed_batch_sizes=nothing, container=nothing, shared_name=nothing, batching_queue=nothing, Tin=nothing, Tcaptured=nothing, Tout=nothing) + desc = tf.EagerOp("BatchFunction") + tf.add_input(desc, in_tensors_) + tf.add_input(desc, captured_tensors_) + if f !== nothing + desc["f"] = Base.identity(f) + end + if num_batch_threads !== nothing + desc["num_batch_threads"] = Base.Int(num_batch_threads) + end + if max_batch_size !== nothing + desc["max_batch_size"] = Base.Int(max_batch_size) + end + if batch_timeout_micros !== nothing + desc["batch_timeout_micros"] = Base.Int(batch_timeout_micros) + end + if max_enqueued_batches !== nothing + desc["max_enqueued_batches"] = Base.Int(max_enqueued_batches) + end + if allowed_batch_sizes !== nothing + desc["allowed_batch_sizes"] = map(Base.identity, allowed_batch_sizes) + end + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + if batching_queue !== nothing + desc["batching_queue"] = Base.String(batching_queue) + end + if Tin !== nothing + desc["Tin"] = map(Base.identity, Tin) + end + if Tcaptured !== nothing + desc["Tcaptured"] = map(Base.identity, Tcaptured) + end + if Tout !== nothing + desc["Tout"] = map(Base.identity, Tout) + end + (tf.execute(desc))[1] + end +end + + +""" + sparse_fill_empty_rows(indices, values, dense_shape, default_value) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function sparse_fill_empty_rows(indices_, values_, dense_shape_, default_value_; name=nothing) + local desc + tf.with_op_name(name, "SparseFillEmptyRows") do + desc = tf.NodeDescription("SparseFillEmptyRows") + indices_ = convert(Tensor{Int64}, indices_) + values_ = convert(Tensor{Any}, values_) + dense_shape_ = convert(Tensor{Int64}, dense_shape_) + default_value_ = convert(Tensor{Any}, default_value_) + (values_, default_value_) = tf.tf_promote(values_, default_value_) + tf.add_input(desc, indices_) + tf.add_input(desc, values_) + tf.add_input(desc, dense_shape_) + tf.add_input(desc, default_value_) + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:4 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function sparse_fill_empty_rows(indices_::tf.TensorHandle, values_::tf.TensorHandle, dense_shape_::tf.TensorHandle, default_value_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("SparseFillEmptyRows") + tf.add_input(desc, indices_) + tf.add_input(desc, values_) + tf.add_input(desc, dense_shape_) + tf.add_input(desc, default_value_) + desc["T"] = tf.data_type(values_) + desc["T"] = tf.data_type(default_value_) + tf.execute(desc) + end +end + + +""" + self_adjoint_eig_v2(input; compute_v=true) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function self_adjoint_eig_v2(input_; name=nothing, compute_v=nothing) + local desc + tf.with_op_name(name, "SelfAdjointEigV2") do + desc = tf.NodeDescription("SelfAdjointEigV2") + input_ = convert(Tensor{Any}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + if compute_v !== nothing + desc["compute_v"] = Base.Bool(compute_v) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function self_adjoint_eig_v2(input_::tf.TensorHandle; name=nothing, compute_v=nothing) + desc = tf.EagerOp("SelfAdjointEigV2") + tf.add_input(desc, input_) + if compute_v !== nothing + desc["compute_v"] = Base.Bool(compute_v) + end + desc["T"] = tf.data_type(input_) + tf.execute(desc) + end +end + + +""" + retrieve_tpu_embedding_ftrl_parameters(; table_id=-1, table_name=) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function retrieve_tpu_embedding_ftrl_parameters(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + local desc + tf.with_op_name(name, "RetrieveTPUEmbeddingFTRLParameters") do + desc = tf.NodeDescription("RetrieveTPUEmbeddingFTRLParameters") + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function retrieve_tpu_embedding_ftrl_parameters(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + desc = tf.EagerOp("RetrieveTPUEmbeddingFTRLParameters") + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + tf.execute(desc) + end +end + + +""" + resource_sparse_apply_adagrad_da(var, gradient_accumulator, gradient_squared_accumulator, grad, indices, lr, l1, l2, global_step; use_locking=false) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function resource_sparse_apply_adagrad_da(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, indices_, lr_, l1_, l2_, global_step_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "ResourceSparseApplyAdagradDA") do + desc = tf.NodeDescription("ResourceSparseApplyAdagradDA") + var_ = convert(Tensor{Any}, var_) + gradient_accumulator_ = convert(Tensor{Any}, gradient_accumulator_) + gradient_squared_accumulator_ = convert(Tensor{Any}, gradient_squared_accumulator_) + grad_ = convert(Tensor{Any}, grad_) + indices_ = convert(Tensor{Any}, indices_) + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + lr_ = convert(Tensor{Any}, lr_) + l1_ = convert(Tensor{Any}, l1_) + l2_ = convert(Tensor{Any}, l2_) + global_step_ = convert(Tensor{Int64}, global_step_) + (grad_, lr_, l1_, l2_) = tf.tf_promote(grad_, lr_, l1_, l2_) + (indices_,) = tf.tf_promote(indices_) + tf.add_input(desc, var_) + tf.add_input(desc, gradient_accumulator_) + tf.add_input(desc, gradient_squared_accumulator_) + tf.add_input(desc, grad_) + tf.add_input(desc, indices_) + tf.add_input(desc, lr_) + tf.add_input(desc, l1_) + tf.add_input(desc, l2_) + tf.add_input(desc, global_step_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + tf.Tensor(tf.Operation(desc)) + end + function resource_sparse_apply_adagrad_da(var_::tf.TensorHandle, gradient_accumulator_::tf.TensorHandle, gradient_squared_accumulator_::tf.TensorHandle, grad_::tf.TensorHandle, indices_::tf.TensorHandle, lr_::tf.TensorHandle, l1_::tf.TensorHandle, l2_::tf.TensorHandle, global_step_::tf.TensorHandle; name=nothing, use_locking=nothing) + desc = tf.EagerOp("ResourceSparseApplyAdagradDA") + tf.add_input(desc, var_) + tf.add_input(desc, gradient_accumulator_) + tf.add_input(desc, gradient_squared_accumulator_) + tf.add_input(desc, grad_) + tf.add_input(desc, indices_) + tf.add_input(desc, lr_) + tf.add_input(desc, l1_) + tf.add_input(desc, l2_) + tf.add_input(desc, global_step_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + desc["T"] = tf.data_type(grad_) + desc["Tindices"] = tf.data_type(indices_) + desc["T"] = tf.data_type(lr_) + desc["T"] = tf.data_type(l1_) + desc["T"] = tf.data_type(l2_) + (tf.execute(desc))[1] + end +end + + +""" + temporary_variable(; var_name=) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function temporary_variable(; name=nothing, shape=nothing, dtype=nothing, var_name=nothing) + local desc + tf.with_op_name(name, "TemporaryVariable") do + desc = tf.NodeDescription("TemporaryVariable") + if shape !== nothing + desc["shape"] = Base.identity(shape) + end + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + if var_name !== nothing + desc["var_name"] = Base.String(var_name) + end + end + tf.Tensor(tf.Operation(desc)) + end + function temporary_variable(; name=nothing, shape=nothing, dtype=nothing, var_name=nothing) + desc = tf.EagerOp("TemporaryVariable") + if shape !== nothing + desc["shape"] = Base.identity(shape) + end + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + if var_name !== nothing + desc["var_name"] = Base.String(var_name) + end + (tf.execute(desc))[1] + end +end + + +""" + resource_apply_add_sign(var, m, lr, alpha, sign_decay, beta, grad; use_locking=false) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function resource_apply_add_sign(var_, m_, lr_, alpha_, sign_decay_, beta_, grad_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "ResourceApplyAddSign") do + desc = tf.NodeDescription("ResourceApplyAddSign") + var_ = convert(Tensor{Any}, var_) + m_ = convert(Tensor{Any}, m_) + lr_ = convert(Tensor{Any}, lr_) + alpha_ = convert(Tensor{Any}, alpha_) + sign_decay_ = convert(Tensor{Any}, sign_decay_) + beta_ = convert(Tensor{Any}, beta_) + grad_ = convert(Tensor{Any}, grad_) + (lr_, alpha_, sign_decay_, beta_, grad_) = tf.tf_promote(lr_, alpha_, sign_decay_, beta_, grad_) + tf.add_input(desc, var_) + tf.add_input(desc, m_) + tf.add_input(desc, lr_) + tf.add_input(desc, alpha_) + tf.add_input(desc, sign_decay_) + tf.add_input(desc, beta_) + tf.add_input(desc, grad_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + tf.Tensor(tf.Operation(desc)) + end + function resource_apply_add_sign(var_::tf.TensorHandle, m_::tf.TensorHandle, lr_::tf.TensorHandle, alpha_::tf.TensorHandle, sign_decay_::tf.TensorHandle, beta_::tf.TensorHandle, grad_::tf.TensorHandle; name=nothing, use_locking=nothing) + desc = tf.EagerOp("ResourceApplyAddSign") + tf.add_input(desc, var_) + tf.add_input(desc, m_) + tf.add_input(desc, lr_) + tf.add_input(desc, alpha_) + tf.add_input(desc, sign_decay_) + tf.add_input(desc, beta_) + tf.add_input(desc, grad_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + desc["T"] = tf.data_type(lr_) + desc["T"] = tf.data_type(alpha_) + desc["T"] = tf.data_type(sign_decay_) + desc["T"] = tf.data_type(beta_) + desc["T"] = tf.data_type(grad_) + (tf.execute(desc))[1] + end +end + + +""" + roll(input, shift, axis) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function roll(input_, shift_, axis_; name=nothing) + local desc + tf.with_op_name(name, "Roll") do + desc = tf.NodeDescription("Roll") + input_ = convert(Tensor{Any}, input_) + shift_ = convert(Tensor{Any}, shift_) + axis_ = convert(Tensor{Any}, axis_) + (input_,) = tf.tf_promote(input_) + (shift_,) = tf.tf_promote(shift_) + (axis_,) = tf.tf_promote(axis_) + tf.add_input(desc, input_) + tf.add_input(desc, shift_) + tf.add_input(desc, axis_) + end + tf.Tensor(tf.Operation(desc)) + end + function roll(input_::tf.TensorHandle, shift_::tf.TensorHandle, axis_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("Roll") + tf.add_input(desc, input_) + tf.add_input(desc, shift_) + tf.add_input(desc, axis_) + desc["T"] = tf.data_type(input_) + desc["Tshift"] = tf.data_type(shift_) + desc["Taxis"] = tf.data_type(axis_) + (tf.execute(desc))[1] + end +end + + +""" + xdivy(x, y) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function xdivy(x_, y_; name=nothing) + local desc + tf.with_op_name(name, "Xdivy") do + desc = tf.NodeDescription("Xdivy") + x_ = convert(Tensor{Any}, x_) + y_ = convert(Tensor{Any}, y_) + (x_, y_) = tf.tf_promote(x_, y_) + tf.add_input(desc, x_) + tf.add_input(desc, y_) + end + tf.Tensor(tf.Operation(desc)) + end + function xdivy(x_::tf.TensorHandle, y_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("Xdivy") + tf.add_input(desc, x_) + tf.add_input(desc, y_) + desc["T"] = tf.data_type(x_) + desc["T"] = tf.data_type(y_) + (tf.execute(desc))[1] + end +end + + +""" + max_pool3d_grad_grad(orig_input, orig_output, grad; data_format=NDHWC) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function max_pool3d_grad_grad(orig_input_, orig_output_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + local desc + tf.with_op_name(name, "MaxPool3DGradGrad") do + desc = tf.NodeDescription("MaxPool3DGradGrad") + orig_input_ = convert(Tensor{Any}, orig_input_) + orig_output_ = convert(Tensor{Any}, orig_output_) + grad_ = convert(Tensor{Any}, grad_) + (orig_input_, orig_output_, grad_) = tf.tf_promote(orig_input_, orig_output_, grad_) + tf.add_input(desc, orig_input_) + tf.add_input(desc, orig_output_) + tf.add_input(desc, grad_) + if ksize !== nothing + desc["ksize"] = map(Base.identity, ksize) + end + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + if padding !== nothing + desc["padding"] = Base.String(padding) + end + if data_format !== nothing + desc["data_format"] = Base.String(data_format) + end + end + tf.Tensor(tf.Operation(desc)) + end + function max_pool3d_grad_grad(orig_input_::tf.TensorHandle, orig_output_::tf.TensorHandle, grad_::tf.TensorHandle; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + desc = tf.EagerOp("MaxPool3DGradGrad") + tf.add_input(desc, orig_input_) + tf.add_input(desc, orig_output_) + tf.add_input(desc, grad_) + if ksize !== nothing + desc["ksize"] = map(Base.identity, ksize) + end + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + if padding !== nothing + desc["padding"] = Base.String(padding) + end + if data_format !== nothing + desc["data_format"] = Base.String(data_format) + end + desc["T"] = tf.data_type(orig_input_) + desc["T"] = tf.data_type(orig_output_) + desc["T"] = tf.data_type(grad_) + (tf.execute(desc))[1] + end +end + + +""" + crop_and_resize(image, boxes, box_ind, crop_size; method=bilinear, extrapolation_value=?) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function crop_and_resize(image_, boxes_, box_ind_, crop_size_; name=nothing, method=nothing, extrapolation_value=nothing) + local desc + tf.with_op_name(name, "CropAndResize") do + desc = tf.NodeDescription("CropAndResize") + image_ = convert(Tensor{Any}, image_) + boxes_ = convert(Tensor{Float32}, boxes_) + box_ind_ = convert(Tensor{Int32}, box_ind_) + crop_size_ = convert(Tensor{Int32}, crop_size_) + (image_,) = tf.tf_promote(image_) + tf.add_input(desc, image_) + tf.add_input(desc, boxes_) + tf.add_input(desc, box_ind_) + tf.add_input(desc, crop_size_) + if method !== nothing + desc["method"] = Base.String(method) + end + if extrapolation_value !== nothing + desc["extrapolation_value"] = Base.identity(extrapolation_value) + end + end + tf.Tensor(tf.Operation(desc)) + end + function crop_and_resize(image_::tf.TensorHandle, boxes_::tf.TensorHandle, box_ind_::tf.TensorHandle, crop_size_::tf.TensorHandle; name=nothing, method=nothing, extrapolation_value=nothing) + desc = tf.EagerOp("CropAndResize") + tf.add_input(desc, image_) + tf.add_input(desc, boxes_) + tf.add_input(desc, box_ind_) + tf.add_input(desc, crop_size_) + if method !== nothing + desc["method"] = Base.String(method) + end + if extrapolation_value !== nothing + desc["extrapolation_value"] = Base.identity(extrapolation_value) + end + desc["T"] = tf.data_type(image_) + (tf.execute(desc))[1] + end +end + + +""" + quantized_bias_add(input, bias, min_input, max_input, min_bias, max_bias) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function quantized_bias_add(input_, bias_, min_input_, max_input_, min_bias_, max_bias_; name=nothing, out_type=nothing) + local desc + tf.with_op_name(name, "QuantizedBiasAdd") do + desc = tf.NodeDescription("QuantizedBiasAdd") + input_ = convert(Tensor{Any}, input_) + bias_ = convert(Tensor{Any}, bias_) + min_input_ = convert(Tensor{Float32}, min_input_) + max_input_ = convert(Tensor{Float32}, max_input_) + min_bias_ = convert(Tensor{Float32}, min_bias_) + max_bias_ = convert(Tensor{Float32}, max_bias_) + (input_,) = tf.tf_promote(input_) + (bias_,) = tf.tf_promote(bias_) + tf.add_input(desc, input_) + tf.add_input(desc, bias_) + tf.add_input(desc, min_input_) + tf.add_input(desc, max_input_) + tf.add_input(desc, min_bias_) + tf.add_input(desc, max_bias_) + if out_type !== nothing + desc["out_type"] = Base.identity(out_type) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function quantized_bias_add(input_::tf.TensorHandle, bias_::tf.TensorHandle, min_input_::tf.TensorHandle, max_input_::tf.TensorHandle, min_bias_::tf.TensorHandle, max_bias_::tf.TensorHandle; name=nothing, out_type=nothing) + desc = tf.EagerOp("QuantizedBiasAdd") + tf.add_input(desc, input_) + tf.add_input(desc, bias_) + tf.add_input(desc, min_input_) + tf.add_input(desc, max_input_) + tf.add_input(desc, min_bias_) + tf.add_input(desc, max_bias_) + if out_type !== nothing + desc["out_type"] = Base.identity(out_type) + end + desc["T1"] = tf.data_type(input_) + desc["T2"] = tf.data_type(bias_) + tf.execute(desc) + end +end + + +""" + kmc2chain_initialization(distances, seed) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function kmc2chain_initialization(distances_, seed_; name=nothing) + local desc + tf.with_op_name(name, "KMC2ChainInitialization") do + desc = tf.NodeDescription("KMC2ChainInitialization") + distances_ = convert(Tensor{Float32}, distances_) + seed_ = convert(Tensor{Int64}, seed_) + tf.add_input(desc, distances_) + tf.add_input(desc, seed_) + end + tf.Tensor(tf.Operation(desc)) + end + function kmc2chain_initialization(distances_::tf.TensorHandle, seed_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("KMC2ChainInitialization") + tf.add_input(desc, distances_) + tf.add_input(desc, seed_) + (tf.execute(desc))[1] + end +end + + +""" + map_unstage_no_key(indices; capacity=0, memory_limit=0, container=, shared_name=) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function map_unstage_no_key(indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "MapUnstageNoKey") do + desc = tf.NodeDescription("MapUnstageNoKey") + indices_ = convert(Tensor{Int32}, indices_) + tf.add_input(desc, indices_) + if capacity !== nothing + desc["capacity"] = Base.Int(capacity) + end + if memory_limit !== nothing + desc["memory_limit"] = Base.Int(memory_limit) + end + if dtypes !== nothing + desc["dtypes"] = map(Base.identity, dtypes) + end + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function map_unstage_no_key(indices_::tf.TensorHandle; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + desc = tf.EagerOp("MapUnstageNoKey") + tf.add_input(desc, indices_) + if capacity !== nothing + desc["capacity"] = Base.Int(capacity) + end + if memory_limit !== nothing + desc["memory_limit"] = Base.Int(memory_limit) + end + if dtypes !== nothing + desc["dtypes"] = map(Base.identity, dtypes) + end + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + tf.execute(desc) + end +end + + +""" + scatter_nd_sub(ref, indices, updates; use_locking=false) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function scatter_nd_sub(ref_, indices_, updates_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "ScatterNdSub") do + desc = tf.NodeDescription("ScatterNdSub") + ref_ = convert(Tensor{Any}, ref_) + indices_ = convert(Tensor{Any}, indices_) + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + updates_ = convert(Tensor{Any}, updates_) + (ref_, updates_) = tf.tf_promote(ref_, updates_) + (indices_,) = tf.tf_promote(indices_) + tf.add_input(desc, ref_) + tf.add_input(desc, indices_) + tf.add_input(desc, updates_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + tf.Tensor(tf.Operation(desc)) + end + function scatter_nd_sub(ref_::tf.TensorHandle, indices_::tf.TensorHandle, updates_::tf.TensorHandle; name=nothing, use_locking=nothing) + desc = tf.EagerOp("ScatterNdSub") + tf.add_input(desc, ref_) + tf.add_input(desc, indices_) + tf.add_input(desc, updates_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + desc["T"] = tf.data_type(ref_) + desc["Tindices"] = tf.data_type(indices_) + desc["T"] = tf.data_type(updates_) + (tf.execute(desc))[1] + end +end + + +""" + resize_bilinear(images, size; align_corners=false) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function resize_bilinear(images_, size_; name=nothing, align_corners=nothing) + local desc + tf.with_op_name(name, "ResizeBilinear") do + desc = tf.NodeDescription("ResizeBilinear") + images_ = convert(Tensor{Any}, images_) + size_ = convert(Tensor{Int32}, size_) + (images_,) = tf.tf_promote(images_) + tf.add_input(desc, images_) + tf.add_input(desc, size_) + if align_corners !== nothing + desc["align_corners"] = Base.Bool(align_corners) + end + end + tf.Tensor(tf.Operation(desc)) + end + function resize_bilinear(images_::tf.TensorHandle, size_::tf.TensorHandle; name=nothing, align_corners=nothing) + desc = tf.EagerOp("ResizeBilinear") + tf.add_input(desc, images_) + tf.add_input(desc, size_) + if align_corners !== nothing + desc["align_corners"] = Base.Bool(align_corners) + end + desc["T"] = tf.data_type(images_) + (tf.execute(desc))[1] + end +end + + +""" + ordered_map_peek(key, indices; capacity=0, memory_limit=0, container=, shared_name=) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function ordered_map_peek(key_, indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "OrderedMapPeek") do + desc = tf.NodeDescription("OrderedMapPeek") + key_ = convert(Tensor{Int64}, key_) + indices_ = convert(Tensor{Int32}, indices_) + tf.add_input(desc, key_) + tf.add_input(desc, indices_) + if capacity !== nothing + desc["capacity"] = Base.Int(capacity) + end + if memory_limit !== nothing + desc["memory_limit"] = Base.Int(memory_limit) + end + if dtypes !== nothing + desc["dtypes"] = map(Base.identity, dtypes) + end + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + tf.Tensor(tf.Operation(desc)) + end + function ordered_map_peek(key_::tf.TensorHandle, indices_::tf.TensorHandle; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + desc = tf.EagerOp("OrderedMapPeek") + tf.add_input(desc, key_) + tf.add_input(desc, indices_) + if capacity !== nothing + desc["capacity"] = Base.Int(capacity) + end + if memory_limit !== nothing + desc["memory_limit"] = Base.Int(memory_limit) + end + if dtypes !== nothing + desc["dtypes"] = map(Base.identity, dtypes) + end + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + (tf.execute(desc))[1] + end +end + + +""" + tensor_array(size; dynamic_size=false, clear_after_read=true, tensor_array_name=, element_shape=?) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tensor_array(size_; name=nothing, dtype=nothing, dynamic_size=nothing, clear_after_read=nothing, tensor_array_name=nothing, element_shape=nothing) + local desc + tf.with_op_name(name, "TensorArray") do + desc = tf.NodeDescription("TensorArray") + size_ = convert(Tensor{Int32}, size_) + tf.add_input(desc, size_) + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + if dynamic_size !== nothing + desc["dynamic_size"] = Base.Bool(dynamic_size) + end + if clear_after_read !== nothing + desc["clear_after_read"] = Base.Bool(clear_after_read) + end + if tensor_array_name !== nothing + desc["tensor_array_name"] = Base.String(tensor_array_name) + end + if element_shape !== nothing + desc["element_shape"] = Base.identity(element_shape) + end + end + tf.Tensor(tf.Operation(desc)) + end + function tensor_array(size_::tf.TensorHandle; name=nothing, dtype=nothing, dynamic_size=nothing, clear_after_read=nothing, tensor_array_name=nothing, element_shape=nothing) + desc = tf.EagerOp("TensorArray") + tf.add_input(desc, size_) + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + if dynamic_size !== nothing + desc["dynamic_size"] = Base.Bool(dynamic_size) + end + if clear_after_read !== nothing + desc["clear_after_read"] = Base.Bool(clear_after_read) + end + if tensor_array_name !== nothing + desc["tensor_array_name"] = Base.String(tensor_array_name) + end + if element_shape !== nothing + desc["element_shape"] = Base.identity(element_shape) + end + (tf.execute(desc))[1] + end +end + + +""" + inplace_sub(x, i, v) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function inplace_sub(x_, i_, v_; name=nothing) + local desc + tf.with_op_name(name, "InplaceSub") do + desc = tf.NodeDescription("InplaceSub") + x_ = convert(Tensor{Any}, x_) + i_ = convert(Tensor{Int32}, i_) + v_ = convert(Tensor{Any}, v_) + (x_, v_) = tf.tf_promote(x_, v_) + tf.add_input(desc, x_) + tf.add_input(desc, i_) + tf.add_input(desc, v_) + end + tf.Tensor(tf.Operation(desc)) + end + function inplace_sub(x_::tf.TensorHandle, i_::tf.TensorHandle, v_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("InplaceSub") + tf.add_input(desc, x_) + tf.add_input(desc, i_) + tf.add_input(desc, v_) + desc["T"] = tf.data_type(x_) + desc["T"] = tf.data_type(v_) + (tf.execute(desc))[1] + end +end + + +""" + pow(x, y) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function pow(x_, y_; name=nothing) + local desc + tf.with_op_name(name, "Pow") do + desc = tf.NodeDescription("Pow") + x_ = convert(Tensor{Any}, x_) + y_ = convert(Tensor{Any}, y_) + (x_, y_) = tf.tf_promote(x_, y_) + tf.add_input(desc, x_) + tf.add_input(desc, y_) + end + tf.Tensor(tf.Operation(desc)) + end + function pow(x_::tf.TensorHandle, y_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("Pow") + tf.add_input(desc, x_) + tf.add_input(desc, y_) + desc["T"] = tf.data_type(x_) + desc["T"] = tf.data_type(y_) + (tf.execute(desc))[1] + end +end + + +""" + stateful_standard_normal(resource, shape; dtype=Float32, shape_dtype=Int64) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function stateful_standard_normal(resource_, shape_; name=nothing, dtype=nothing, shape_dtype=nothing) + local desc + tf.with_op_name(name, "StatefulStandardNormal") do + desc = tf.NodeDescription("StatefulStandardNormal") + resource_ = convert(Tensor{Any}, resource_) + shape_ = convert(Tensor{Int64}, shape_) + (shape_,) = tf.tf_promote(shape_) + tf.add_input(desc, resource_) + tf.add_input(desc, shape_) + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + if shape_dtype !== nothing + desc["shape_dtype"] = Base.identity(shape_dtype) + end + end + tf.Tensor(tf.Operation(desc)) + end + function stateful_standard_normal(resource_::tf.TensorHandle, shape_::tf.TensorHandle; name=nothing, dtype=nothing, shape_dtype=nothing) + desc = tf.EagerOp("StatefulStandardNormal") + tf.add_input(desc, resource_) + tf.add_input(desc, shape_) + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + if shape_dtype !== nothing + desc["shape_dtype"] = Base.identity(shape_dtype) + end + desc["shape_dtype"] = tf.data_type(shape_) + (tf.execute(desc))[1] + end +end + + +""" + ref_next_iteration(data) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function ref_next_iteration(data_; name=nothing) + local desc + tf.with_op_name(name, "RefNextIteration") do + desc = tf.NodeDescription("RefNextIteration") + data_ = convert(Tensor{Any}, data_) + (data_,) = tf.tf_promote(data_) + tf.add_input(desc, data_) + end + tf.Tensor(tf.Operation(desc)) + end + function ref_next_iteration(data_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("RefNextIteration") + tf.add_input(desc, data_) + desc["T"] = tf.data_type(data_) + (tf.execute(desc))[1] + end +end + + +""" + scalar_summary(tags, values) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function scalar_summary(tags_, values_; name=nothing) + local desc + tf.with_op_name(name, "ScalarSummary") do + desc = tf.NodeDescription("ScalarSummary") + tags_ = convert(Tensor{String}, tags_) + values_ = convert(Tensor{Any}, values_) + (values_,) = tf.tf_promote(values_) + tf.add_input(desc, tags_) + tf.add_input(desc, values_) + end + tf.Tensor(tf.Operation(desc)) + end + function scalar_summary(tags_::tf.TensorHandle, values_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("ScalarSummary") + tf.add_input(desc, tags_) + tf.add_input(desc, values_) + desc["T"] = tf.data_type(values_) + (tf.execute(desc))[1] + end +end + + +""" + string_split_v2(input, sep; maxsplit=-1) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function string_split_v2(input_, sep_; name=nothing, maxsplit=nothing) + local desc + tf.with_op_name(name, "StringSplitV2") do + desc = tf.NodeDescription("StringSplitV2") + input_ = convert(Tensor{String}, input_) + sep_ = convert(Tensor{String}, sep_) + tf.add_input(desc, input_) + tf.add_input(desc, sep_) + if maxsplit !== nothing + desc["maxsplit"] = Base.Int(maxsplit) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function string_split_v2(input_::tf.TensorHandle, sep_::tf.TensorHandle; name=nothing, maxsplit=nothing) + desc = tf.EagerOp("StringSplitV2") + tf.add_input(desc, input_) + tf.add_input(desc, sep_) + if maxsplit !== nothing + desc["maxsplit"] = Base.Int(maxsplit) + end + tf.execute(desc) + end +end + + +""" + bessel_i0e(x) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function bessel_i0e(x_; name=nothing) + local desc + tf.with_op_name(name, "BesselI0e") do + desc = tf.NodeDescription("BesselI0e") + x_ = convert(Tensor{Any}, x_) + (x_,) = tf.tf_promote(x_) + tf.add_input(desc, x_) + end + tf.Tensor(tf.Operation(desc)) + end + function bessel_i0e(x_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("BesselI0e") + tf.add_input(desc, x_) + desc["T"] = tf.data_type(x_) + (tf.execute(desc))[1] + end +end + + +""" + unique(x; out_idx=Int32) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function unique(x_; name=nothing, out_idx=nothing) + local desc + tf.with_op_name(name, "Unique") do + desc = tf.NodeDescription("Unique") + x_ = convert(Tensor{Any}, x_) + (x_,) = tf.tf_promote(x_) + tf.add_input(desc, x_) + if out_idx !== nothing + desc["out_idx"] = Base.identity(out_idx) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function unique(x_::tf.TensorHandle; name=nothing, out_idx=nothing) + desc = tf.EagerOp("Unique") + tf.add_input(desc, x_) + if out_idx !== nothing + desc["out_idx"] = Base.identity(out_idx) + end + desc["T"] = tf.data_type(x_) + tf.execute(desc) + end +end + + +""" + load_tpu_embedding_rms_prop_parameters(parameters, ms, mom; table_id=-1, table_name=) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function load_tpu_embedding_rms_prop_parameters(parameters_, ms_, mom_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + local desc + tf.with_op_name(name, "LoadTPUEmbeddingRMSPropParameters") do + desc = tf.NodeDescription("LoadTPUEmbeddingRMSPropParameters") + parameters_ = convert(Tensor{Float32}, parameters_) + ms_ = convert(Tensor{Float32}, ms_) + mom_ = convert(Tensor{Float32}, mom_) + tf.add_input(desc, parameters_) + tf.add_input(desc, ms_) + tf.add_input(desc, mom_) + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + end + tf.Tensor(tf.Operation(desc)) + end + function load_tpu_embedding_rms_prop_parameters(parameters_::tf.TensorHandle, ms_::tf.TensorHandle, mom_::tf.TensorHandle; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + desc = tf.EagerOp("LoadTPUEmbeddingRMSPropParameters") + tf.add_input(desc, parameters_) + tf.add_input(desc, ms_) + tf.add_input(desc, mom_) + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + (tf.execute(desc))[1] + end +end + + +""" + whole_file_reader_v2(; container=, shared_name=) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function whole_file_reader_v2(; name=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "WholeFileReaderV2") do + desc = tf.NodeDescription("WholeFileReaderV2") + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + tf.Tensor(tf.Operation(desc)) + end + function whole_file_reader_v2(; name=nothing, container=nothing, shared_name=nothing) + desc = tf.EagerOp("WholeFileReaderV2") + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + (tf.execute(desc))[1] + end +end + + +""" + eager_py_func(input) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function eager_py_func(input_; name=nothing, token=nothing, Tin=nothing, Tout=nothing) + local desc + tf.with_op_name(name, "EagerPyFunc") do + desc = tf.NodeDescription("EagerPyFunc") + input_ = [convert(Tensor{Any}, x) for x = input_] + tf.add_input(desc, input_) + if token !== nothing + desc["token"] = Base.String(token) + end + if Tin !== nothing + desc["Tin"] = map(Base.identity, Tin) + end + if Tout !== nothing + desc["Tout"] = map(Base.identity, Tout) + end + end + tf.Tensor(tf.Operation(desc)) + end + function eager_py_func(input_::tf.TensorHandle; name=nothing, token=nothing, Tin=nothing, Tout=nothing) + desc = tf.EagerOp("EagerPyFunc") + tf.add_input(desc, input_) + if token !== nothing + desc["token"] = Base.String(token) + end + if Tin !== nothing + desc["Tin"] = map(Base.identity, Tin) + end + if Tout !== nothing + desc["Tout"] = map(Base.identity, Tout) + end + (tf.execute(desc))[1] + end +end + + +""" + next_iteration(data) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function next_iteration(data_; name=nothing) + local desc + tf.with_op_name(name, "NextIteration") do + desc = tf.NodeDescription("NextIteration") + data_ = convert(Tensor{Any}, data_) + (data_,) = tf.tf_promote(data_) + tf.add_input(desc, data_) + end + tf.Tensor(tf.Operation(desc)) + end + function next_iteration(data_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("NextIteration") + tf.add_input(desc, data_) + desc["T"] = tf.data_type(data_) + (tf.execute(desc))[1] + end +end + + +""" + case(branch_index, input; output_shapes=Int64[]) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function case(branch_index_, input_; name=nothing, Tin=nothing, Tout=nothing, branches=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "Case") do + desc = tf.NodeDescription("Case") + branch_index_ = convert(Tensor{Int32}, branch_index_) + input_ = [convert(Tensor{Any}, x) for x = input_] + tf.add_input(desc, branch_index_) + tf.add_input(desc, input_) + if Tin !== nothing + desc["Tin"] = map(Base.identity, Tin) + end + if Tout !== nothing + desc["Tout"] = map(Base.identity, Tout) + end + if branches !== nothing + desc["branches"] = map(Base.identity, branches) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + tf.Tensor(tf.Operation(desc)) + end + function case(branch_index_::tf.TensorHandle, input_::tf.TensorHandle; name=nothing, Tin=nothing, Tout=nothing, branches=nothing, output_shapes=nothing) + desc = tf.EagerOp("Case") + tf.add_input(desc, branch_index_) + tf.add_input(desc, input_) + if Tin !== nothing + desc["Tin"] = map(Base.identity, Tin) + end + if Tout !== nothing + desc["Tout"] = map(Base.identity, Tout) + end + if branches !== nothing + desc["branches"] = map(Base.identity, branches) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + (tf.execute(desc))[1] + end +end + + +""" + tensor_scatter_sub(tensor, indices, updates) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tensor_scatter_sub(tensor_, indices_, updates_; name=nothing) + local desc + tf.with_op_name(name, "TensorScatterSub") do + desc = tf.NodeDescription("TensorScatterSub") + tensor_ = convert(Tensor{Any}, tensor_) + indices_ = convert(Tensor{Any}, indices_) + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + updates_ = convert(Tensor{Any}, updates_) + (tensor_, updates_) = tf.tf_promote(tensor_, updates_) + (indices_,) = tf.tf_promote(indices_) + tf.add_input(desc, tensor_) + tf.add_input(desc, indices_) + tf.add_input(desc, updates_) + end + tf.Tensor(tf.Operation(desc)) + end + function tensor_scatter_sub(tensor_::tf.TensorHandle, indices_::tf.TensorHandle, updates_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("TensorScatterSub") + tf.add_input(desc, tensor_) + tf.add_input(desc, indices_) + tf.add_input(desc, updates_) + desc["T"] = tf.data_type(tensor_) + desc["Tindices"] = tf.data_type(indices_) + desc["T"] = tf.data_type(updates_) + (tf.execute(desc))[1] + end +end + + +""" + scatter_max(ref, indices, updates; use_locking=false) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function scatter_max(ref_, indices_, updates_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "ScatterMax") do + desc = tf.NodeDescription("ScatterMax") + ref_ = convert(Tensor{Any}, ref_) + indices_ = convert(Tensor{Any}, indices_) + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + updates_ = convert(Tensor{Any}, updates_) + (ref_, updates_) = tf.tf_promote(ref_, updates_) + (indices_,) = tf.tf_promote(indices_) + tf.add_input(desc, ref_) + tf.add_input(desc, indices_) + tf.add_input(desc, updates_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + tf.Tensor(tf.Operation(desc)) + end + function scatter_max(ref_::tf.TensorHandle, indices_::tf.TensorHandle, updates_::tf.TensorHandle; name=nothing, use_locking=nothing) + desc = tf.EagerOp("ScatterMax") + tf.add_input(desc, ref_) + tf.add_input(desc, indices_) + tf.add_input(desc, updates_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + desc["T"] = tf.data_type(ref_) + desc["Tindices"] = tf.data_type(indices_) + desc["T"] = tf.data_type(updates_) + (tf.execute(desc))[1] + end +end + + +""" + sqrt(x) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function sqrt(x_; name=nothing) + local desc + tf.with_op_name(name, "Sqrt") do + desc = tf.NodeDescription("Sqrt") + x_ = convert(Tensor{Any}, x_) + (x_,) = tf.tf_promote(x_) + tf.add_input(desc, x_) + end + tf.Tensor(tf.Operation(desc)) + end + function sqrt(x_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("Sqrt") + tf.add_input(desc, x_) + desc["T"] = tf.data_type(x_) + (tf.execute(desc))[1] + end +end + + +""" + accumulator_take_gradient(handle, num_required) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function accumulator_take_gradient(handle_, num_required_; name=nothing, dtype=nothing) + local desc + tf.with_op_name(name, "AccumulatorTakeGradient") do + desc = tf.NodeDescription("AccumulatorTakeGradient") + handle_ = convert(Tensor{String}, handle_) + num_required_ = convert(Tensor{Int32}, num_required_) + tf.add_input(desc, handle_) + tf.add_input(desc, num_required_) + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + tf.Tensor(tf.Operation(desc)) + end + function accumulator_take_gradient(handle_::tf.TensorHandle, num_required_::tf.TensorHandle; name=nothing, dtype=nothing) + desc = tf.EagerOp("AccumulatorTakeGradient") + tf.add_input(desc, handle_) + tf.add_input(desc, num_required_) + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + (tf.execute(desc))[1] + end +end + + +""" + _mkl_add(x, y, mkl_x, mkl_y) + +Returns x + y element-wise. +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function _mkl_add(x_, y_, mkl_x_, mkl_y_; name=nothing) + local desc + tf.with_op_name(name, "_MklAdd") do + desc = tf.NodeDescription("_MklAdd") + x_ = convert(Tensor{Any}, x_) + y_ = convert(Tensor{Any}, y_) + mkl_x_ = convert(Tensor{UInt8}, mkl_x_) + mkl_y_ = convert(Tensor{UInt8}, mkl_y_) + (x_, y_) = tf.tf_promote(x_, y_) + tf.add_input(desc, x_) + tf.add_input(desc, y_) + tf.add_input(desc, mkl_x_) + tf.add_input(desc, mkl_y_) + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function _mkl_add(x_::tf.TensorHandle, y_::tf.TensorHandle, mkl_x_::tf.TensorHandle, mkl_y_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("_MklAdd") + tf.add_input(desc, x_) + tf.add_input(desc, y_) + tf.add_input(desc, mkl_x_) + tf.add_input(desc, mkl_y_) + desc["T"] = tf.data_type(x_) + desc["T"] = tf.data_type(y_) + tf.execute(desc) + end +end + + +""" + reciprocal(x) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function reciprocal(x_; name=nothing) + local desc + tf.with_op_name(name, "Reciprocal") do + desc = tf.NodeDescription("Reciprocal") + x_ = convert(Tensor{Any}, x_) + (x_,) = tf.tf_promote(x_) + tf.add_input(desc, x_) + end + tf.Tensor(tf.Operation(desc)) + end + function reciprocal(x_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("Reciprocal") + tf.add_input(desc, x_) + desc["T"] = tf.data_type(x_) + (tf.execute(desc))[1] + end +end + + +""" + outfeed_enqueue_tuple(inputs) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function outfeed_enqueue_tuple(inputs_; name=nothing, dtypes=nothing) + local desc + tf.with_op_name(name, "OutfeedEnqueueTuple") do + desc = tf.NodeDescription("OutfeedEnqueueTuple") + inputs_ = [convert(Tensor{Any}, x) for x = inputs_] + tf.add_input(desc, inputs_) + if dtypes !== nothing + desc["dtypes"] = map(Base.identity, dtypes) + end + end + tf.Tensor(tf.Operation(desc)) + end + function outfeed_enqueue_tuple(inputs_::tf.TensorHandle; name=nothing, dtypes=nothing) + desc = tf.EagerOp("OutfeedEnqueueTuple") + tf.add_input(desc, inputs_) + if dtypes !== nothing + desc["dtypes"] = map(Base.identity, dtypes) + end + (tf.execute(desc))[1] + end +end + + +""" + string_strip(input) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function string_strip(input_; name=nothing) + local desc + tf.with_op_name(name, "StringStrip") do + desc = tf.NodeDescription("StringStrip") + input_ = convert(Tensor{String}, input_) + tf.add_input(desc, input_) + end + tf.Tensor(tf.Operation(desc)) + end + function string_strip(input_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("StringStrip") + tf.add_input(desc, input_) + (tf.execute(desc))[1] + end +end + + +""" + fake_quant_with_min_max_vars_per_channel(inputs, min, max; num_bits=8, narrow_range=false) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function fake_quant_with_min_max_vars_per_channel(inputs_, min_, max_; name=nothing, num_bits=nothing, narrow_range=nothing) + local desc + tf.with_op_name(name, "FakeQuantWithMinMaxVarsPerChannel") do + desc = tf.NodeDescription("FakeQuantWithMinMaxVarsPerChannel") + inputs_ = convert(Tensor{Float32}, inputs_) + min_ = convert(Tensor{Float32}, min_) + max_ = convert(Tensor{Float32}, max_) + tf.add_input(desc, inputs_) + tf.add_input(desc, min_) + tf.add_input(desc, max_) + if num_bits !== nothing + desc["num_bits"] = Base.Int(num_bits) + end + if narrow_range !== nothing + desc["narrow_range"] = Base.Bool(narrow_range) + end + end + tf.Tensor(tf.Operation(desc)) + end + function fake_quant_with_min_max_vars_per_channel(inputs_::tf.TensorHandle, min_::tf.TensorHandle, max_::tf.TensorHandle; name=nothing, num_bits=nothing, narrow_range=nothing) + desc = tf.EagerOp("FakeQuantWithMinMaxVarsPerChannel") + tf.add_input(desc, inputs_) + tf.add_input(desc, min_) + tf.add_input(desc, max_) + if num_bits !== nothing + desc["num_bits"] = Base.Int(num_bits) + end + if narrow_range !== nothing + desc["narrow_range"] = Base.Bool(narrow_range) + end + (tf.execute(desc))[1] + end +end + + +""" + barrier_ready_size(handle) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function barrier_ready_size(handle_; name=nothing) + local desc + tf.with_op_name(name, "BarrierReadySize") do + desc = tf.NodeDescription("BarrierReadySize") + handle_ = convert(Tensor{String}, handle_) + tf.add_input(desc, handle_) + end + tf.Tensor(tf.Operation(desc)) + end + function barrier_ready_size(handle_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("BarrierReadySize") + tf.add_input(desc, handle_) + (tf.execute(desc))[1] + end +end + + +""" + string_to_hash_bucket(string_tensor) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function string_to_hash_bucket(string_tensor_; name=nothing, num_buckets=nothing) + local desc + tf.with_op_name(name, "StringToHashBucket") do + desc = tf.NodeDescription("StringToHashBucket") + string_tensor_ = convert(Tensor{String}, string_tensor_) + tf.add_input(desc, string_tensor_) + if num_buckets !== nothing + desc["num_buckets"] = Base.Int(num_buckets) + end + end + tf.Tensor(tf.Operation(desc)) + end + function string_to_hash_bucket(string_tensor_::tf.TensorHandle; name=nothing, num_buckets=nothing) + desc = tf.EagerOp("StringToHashBucket") + tf.add_input(desc, string_tensor_) + if num_buckets !== nothing + desc["num_buckets"] = Base.Int(num_buckets) + end + (tf.execute(desc))[1] + end +end + + +""" + tensor_array_concat(handle, flow_in; element_shape_except0=?) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tensor_array_concat(handle_, flow_in_; name=nothing, dtype=nothing, element_shape_except0=nothing) + local desc + tf.with_op_name(name, "TensorArrayConcat") do + desc = tf.NodeDescription("TensorArrayConcat") + handle_ = convert(Tensor{String}, handle_) + flow_in_ = convert(Tensor{Float32}, flow_in_) + tf.add_input(desc, handle_) + tf.add_input(desc, flow_in_) + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + if element_shape_except0 !== nothing + desc["element_shape_except0"] = Base.identity(element_shape_except0) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function tensor_array_concat(handle_::tf.TensorHandle, flow_in_::tf.TensorHandle; name=nothing, dtype=nothing, element_shape_except0=nothing) + desc = tf.EagerOp("TensorArrayConcat") + tf.add_input(desc, handle_) + tf.add_input(desc, flow_in_) + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + if element_shape_except0 !== nothing + desc["element_shape_except0"] = Base.identity(element_shape_except0) + end + tf.execute(desc) + end +end + + +""" + sharded_filename(basename, shard, num_shards) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function sharded_filename(basename_, shard_, num_shards_; name=nothing) + local desc + tf.with_op_name(name, "ShardedFilename") do + desc = tf.NodeDescription("ShardedFilename") + basename_ = convert(Tensor{String}, basename_) + shard_ = convert(Tensor{Int32}, shard_) + num_shards_ = convert(Tensor{Int32}, num_shards_) + tf.add_input(desc, basename_) + tf.add_input(desc, shard_) + tf.add_input(desc, num_shards_) + end + tf.Tensor(tf.Operation(desc)) + end + function sharded_filename(basename_::tf.TensorHandle, shard_::tf.TensorHandle, num_shards_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("ShardedFilename") + tf.add_input(desc, basename_) + tf.add_input(desc, shard_) + tf.add_input(desc, num_shards_) + (tf.execute(desc))[1] + end +end + + +""" + py_func(input) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function py_func(input_; name=nothing, token=nothing, Tin=nothing, Tout=nothing) + local desc + tf.with_op_name(name, "PyFunc") do + desc = tf.NodeDescription("PyFunc") + input_ = [convert(Tensor{Any}, x) for x = input_] + tf.add_input(desc, input_) + if token !== nothing + desc["token"] = Base.String(token) + end + if Tin !== nothing + desc["Tin"] = map(Base.identity, Tin) + end + if Tout !== nothing + desc["Tout"] = map(Base.identity, Tout) + end + end + tf.Tensor(tf.Operation(desc)) + end + function py_func(input_::tf.TensorHandle; name=nothing, token=nothing, Tin=nothing, Tout=nothing) + desc = tf.EagerOp("PyFunc") + tf.add_input(desc, input_) + if token !== nothing + desc["token"] = Base.String(token) + end + if Tin !== nothing + desc["Tin"] = map(Base.identity, Tin) + end + if Tout !== nothing + desc["Tout"] = map(Base.identity, Tout) + end + (tf.execute(desc))[1] + end +end + + +""" + unsorted_segment_prod(data, segment_ids, num_segments) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function unsorted_segment_prod(data_, segment_ids_, num_segments_; name=nothing) + local desc + tf.with_op_name(name, "UnsortedSegmentProd") do + desc = tf.NodeDescription("UnsortedSegmentProd") + data_ = convert(Tensor{Any}, data_) + segment_ids_ = convert(Tensor{Any}, segment_ids_) + segment_ids_ = segment_ids_ - convert(tf.Tensor{eltype(segment_ids_)}, 1) + num_segments_ = convert(Tensor{Int32}, num_segments_) + (num_segments_,) = tf.tf_promote(num_segments_) + (data_,) = tf.tf_promote(data_) + (segment_ids_,) = tf.tf_promote(segment_ids_) + tf.add_input(desc, data_) + tf.add_input(desc, segment_ids_) + tf.add_input(desc, num_segments_) + end + tf.Tensor(tf.Operation(desc)) + end + function unsorted_segment_prod(data_::tf.TensorHandle, segment_ids_::tf.TensorHandle, num_segments_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("UnsortedSegmentProd") + tf.add_input(desc, data_) + tf.add_input(desc, segment_ids_) + tf.add_input(desc, num_segments_) + desc["T"] = tf.data_type(data_) + desc["Tindices"] = tf.data_type(segment_ids_) + desc["Tnumsegments"] = tf.data_type(num_segments_) + (tf.execute(desc))[1] + end +end + + +""" + count_up_to(ref) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function count_up_to(ref_; name=nothing, limit=nothing) + local desc + tf.with_op_name(name, "CountUpTo") do + desc = tf.NodeDescription("CountUpTo") + ref_ = convert(Tensor{Any}, ref_) + (ref_,) = tf.tf_promote(ref_) + tf.add_input(desc, ref_) + if limit !== nothing + desc["limit"] = Base.Int(limit) + end + end + tf.Tensor(tf.Operation(desc)) + end + function count_up_to(ref_::tf.TensorHandle; name=nothing, limit=nothing) + desc = tf.EagerOp("CountUpTo") + tf.add_input(desc, ref_) + if limit !== nothing + desc["limit"] = Base.Int(limit) + end + desc["T"] = tf.data_type(ref_) + (tf.execute(desc))[1] + end +end + + +""" + random_gamma(shape, alpha; seed=0, seed2=0) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function random_gamma(shape_, alpha_; name=nothing, seed=nothing, seed2=nothing, S=nothing) + local desc + tf.with_op_name(name, "RandomGamma") do + desc = tf.NodeDescription("RandomGamma") + shape_ = convert(Tensor{Any}, shape_) + alpha_ = convert(Tensor{Any}, alpha_) + (shape_,) = tf.tf_promote(shape_) + (alpha_,) = tf.tf_promote(alpha_) + tf.add_input(desc, shape_) + tf.add_input(desc, alpha_) + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end + if S !== nothing + desc["S"] = Base.identity(S) + end + end + tf.Tensor(tf.Operation(desc)) + end + function random_gamma(shape_::tf.TensorHandle, alpha_::tf.TensorHandle; name=nothing, seed=nothing, seed2=nothing, S=nothing) + desc = tf.EagerOp("RandomGamma") + tf.add_input(desc, shape_) + tf.add_input(desc, alpha_) + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end + if S !== nothing + desc["S"] = Base.identity(S) + end + desc["S"] = tf.data_type(shape_) + desc["T"] = tf.data_type(alpha_) + (tf.execute(desc))[1] + end +end + + +""" + tensor_array_grad(handle, flow_in) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tensor_array_grad(handle_, flow_in_; name=nothing, source=nothing) + local desc + tf.with_op_name(name, "TensorArrayGrad") do + desc = tf.NodeDescription("TensorArrayGrad") + handle_ = convert(Tensor{String}, handle_) + flow_in_ = convert(Tensor{Float32}, flow_in_) + tf.add_input(desc, handle_) + tf.add_input(desc, flow_in_) + if source !== nothing + desc["source"] = Base.String(source) + end + end + tf.Tensor(tf.Operation(desc)) + end + function tensor_array_grad(handle_::tf.TensorHandle, flow_in_::tf.TensorHandle; name=nothing, source=nothing) + desc = tf.EagerOp("TensorArrayGrad") + tf.add_input(desc, handle_) + tf.add_input(desc, flow_in_) + if source !== nothing + desc["source"] = Base.String(source) + end + (tf.execute(desc))[1] + end +end + + +""" + dilation2d(input, filter) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function dilation2d(input_, filter_; name=nothing, strides=nothing, rates=nothing, padding=nothing) + local desc + tf.with_op_name(name, "Dilation2D") do + desc = tf.NodeDescription("Dilation2D") + input_ = convert(Tensor{Any}, input_) + filter_ = convert(Tensor{Any}, filter_) + (input_, filter_) = tf.tf_promote(input_, filter_) + tf.add_input(desc, input_) + tf.add_input(desc, filter_) + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + if rates !== nothing + desc["rates"] = map(Base.identity, rates) + end + if padding !== nothing + desc["padding"] = Base.String(padding) + end + end + tf.Tensor(tf.Operation(desc)) + end + function dilation2d(input_::tf.TensorHandle, filter_::tf.TensorHandle; name=nothing, strides=nothing, rates=nothing, padding=nothing) + desc = tf.EagerOp("Dilation2D") + tf.add_input(desc, input_) + tf.add_input(desc, filter_) + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + if rates !== nothing + desc["rates"] = map(Base.identity, rates) + end + if padding !== nothing + desc["padding"] = Base.String(padding) + end + desc["T"] = tf.data_type(input_) + desc["T"] = tf.data_type(filter_) + (tf.execute(desc))[1] + end +end + + +""" + unbatch(batched_tensor, batch_index, id; container=, shared_name=) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function unbatch(batched_tensor_, batch_index_, id_; name=nothing, timeout_micros=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "Unbatch") do + desc = tf.NodeDescription("Unbatch") + batched_tensor_ = convert(Tensor{Any}, batched_tensor_) + batch_index_ = convert(Tensor{Int64}, batch_index_) + id_ = convert(Tensor{Int64}, id_) + (batched_tensor_,) = tf.tf_promote(batched_tensor_) + tf.add_input(desc, batched_tensor_) + tf.add_input(desc, batch_index_) + tf.add_input(desc, id_) + if timeout_micros !== nothing + desc["timeout_micros"] = Base.Int(timeout_micros) + end + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + tf.Tensor(tf.Operation(desc)) + end + function unbatch(batched_tensor_::tf.TensorHandle, batch_index_::tf.TensorHandle, id_::tf.TensorHandle; name=nothing, timeout_micros=nothing, container=nothing, shared_name=nothing) + desc = tf.EagerOp("Unbatch") + tf.add_input(desc, batched_tensor_) + tf.add_input(desc, batch_index_) + tf.add_input(desc, id_) + if timeout_micros !== nothing + desc["timeout_micros"] = Base.Int(timeout_micros) + end + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + desc["T"] = tf.data_type(batched_tensor_) + (tf.execute(desc))[1] + end +end + + +""" + get_session_handle(value) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function get_session_handle(value_; name=nothing) + local desc + tf.with_op_name(name, "GetSessionHandle") do + desc = tf.NodeDescription("GetSessionHandle") + value_ = convert(Tensor{Any}, value_) + (value_,) = tf.tf_promote(value_) + tf.add_input(desc, value_) + end + tf.Tensor(tf.Operation(desc)) + end + function get_session_handle(value_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("GetSessionHandle") + tf.add_input(desc, value_) + desc["T"] = tf.data_type(value_) + (tf.execute(desc))[1] + end +end + + +""" + retrieve_tpu_embedding_adam_parameters(; table_id=-1, table_name=) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function retrieve_tpu_embedding_adam_parameters(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + local desc + tf.with_op_name(name, "RetrieveTPUEmbeddingADAMParameters") do + desc = tf.NodeDescription("RetrieveTPUEmbeddingADAMParameters") + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function retrieve_tpu_embedding_adam_parameters(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + desc = tf.EagerOp("RetrieveTPUEmbeddingADAMParameters") + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + tf.execute(desc) + end +end + + +""" + mutable_hash_table_of_tensors_v2(; container=, shared_name=, use_node_name_sharing=false, value_shape=?) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function mutable_hash_table_of_tensors_v2(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing) + local desc + tf.with_op_name(name, "MutableHashTableOfTensorsV2") do + desc = tf.NodeDescription("MutableHashTableOfTensorsV2") + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + if use_node_name_sharing !== nothing + desc["use_node_name_sharing"] = Base.Bool(use_node_name_sharing) + end + if key_dtype !== nothing + desc["key_dtype"] = Base.identity(key_dtype) + end + if value_dtype !== nothing + desc["value_dtype"] = Base.identity(value_dtype) + end + if value_shape !== nothing + desc["value_shape"] = Base.identity(value_shape) + end + end + tf.Tensor(tf.Operation(desc)) + end + function mutable_hash_table_of_tensors_v2(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing) + desc = tf.EagerOp("MutableHashTableOfTensorsV2") + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + if use_node_name_sharing !== nothing + desc["use_node_name_sharing"] = Base.Bool(use_node_name_sharing) + end + if key_dtype !== nothing + desc["key_dtype"] = Base.identity(key_dtype) + end + if value_dtype !== nothing + desc["value_dtype"] = Base.identity(value_dtype) + end + if value_shape !== nothing + desc["value_shape"] = Base.identity(value_shape) + end + (tf.execute(desc))[1] + end +end + + +""" + sparse_apply_ftrl(var, accum, linear, grad, indices, lr, l1, l2, lr_power; use_locking=false) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function sparse_apply_ftrl(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, lr_power_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "SparseApplyFtrl") do + desc = tf.NodeDescription("SparseApplyFtrl") + var_ = convert(Tensor{Any}, var_) + accum_ = convert(Tensor{Any}, accum_) + linear_ = convert(Tensor{Any}, linear_) + grad_ = convert(Tensor{Any}, grad_) + indices_ = convert(Tensor{Any}, indices_) + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + lr_ = convert(Tensor{Any}, lr_) + l1_ = convert(Tensor{Any}, l1_) + l2_ = convert(Tensor{Any}, l2_) + lr_power_ = convert(Tensor{Any}, lr_power_) + (var_, accum_, linear_, grad_, lr_, l1_, l2_, lr_power_) = tf.tf_promote(var_, accum_, linear_, grad_, lr_, l1_, l2_, lr_power_) + (indices_,) = tf.tf_promote(indices_) + tf.add_input(desc, var_) + tf.add_input(desc, accum_) + tf.add_input(desc, linear_) + tf.add_input(desc, grad_) + tf.add_input(desc, indices_) + tf.add_input(desc, lr_) + tf.add_input(desc, l1_) + tf.add_input(desc, l2_) + tf.add_input(desc, lr_power_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + tf.Tensor(tf.Operation(desc)) + end + function sparse_apply_ftrl(var_::tf.TensorHandle, accum_::tf.TensorHandle, linear_::tf.TensorHandle, grad_::tf.TensorHandle, indices_::tf.TensorHandle, lr_::tf.TensorHandle, l1_::tf.TensorHandle, l2_::tf.TensorHandle, lr_power_::tf.TensorHandle; name=nothing, use_locking=nothing) + desc = tf.EagerOp("SparseApplyFtrl") + tf.add_input(desc, var_) + tf.add_input(desc, accum_) + tf.add_input(desc, linear_) + tf.add_input(desc, grad_) + tf.add_input(desc, indices_) + tf.add_input(desc, lr_) + tf.add_input(desc, l1_) + tf.add_input(desc, l2_) + tf.add_input(desc, lr_power_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + desc["T"] = tf.data_type(var_) + desc["T"] = tf.data_type(accum_) + desc["T"] = tf.data_type(linear_) + desc["T"] = tf.data_type(grad_) + desc["Tindices"] = tf.data_type(indices_) + desc["T"] = tf.data_type(lr_) + desc["T"] = tf.data_type(l1_) + desc["T"] = tf.data_type(l2_) + desc["T"] = tf.data_type(lr_power_) + (tf.execute(desc))[1] + end +end + + +""" + batch_dataset_v2(input_dataset, batch_size, drop_remainder) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function batch_dataset_v2(input_dataset_, batch_size_, drop_remainder_; name=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "BatchDatasetV2") do + desc = tf.NodeDescription("BatchDatasetV2") + input_dataset_ = convert(Tensor{Any}, input_dataset_) + batch_size_ = convert(Tensor{Int64}, batch_size_) + drop_remainder_ = convert(Tensor{Bool}, drop_remainder_) + tf.add_input(desc, input_dataset_) + tf.add_input(desc, batch_size_) + tf.add_input(desc, drop_remainder_) + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + tf.Tensor(tf.Operation(desc)) + end + function batch_dataset_v2(input_dataset_::tf.TensorHandle, batch_size_::tf.TensorHandle, drop_remainder_::tf.TensorHandle; name=nothing, output_types=nothing, output_shapes=nothing) + desc = tf.EagerOp("BatchDatasetV2") + tf.add_input(desc, input_dataset_) + tf.add_input(desc, batch_size_) + tf.add_input(desc, drop_remainder_) + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + (tf.execute(desc))[1] + end +end + + +""" + sparse_sparse_minimum(a_indices, a_values, a_shape, b_indices, b_values, b_shape) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function sparse_sparse_minimum(a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_; name=nothing) + local desc + tf.with_op_name(name, "SparseSparseMinimum") do + desc = tf.NodeDescription("SparseSparseMinimum") + a_indices_ = convert(Tensor{Int64}, a_indices_) + a_values_ = convert(Tensor{Any}, a_values_) + a_shape_ = convert(Tensor{Int64}, a_shape_) + b_indices_ = convert(Tensor{Int64}, b_indices_) + b_values_ = convert(Tensor{Any}, b_values_) + b_shape_ = convert(Tensor{Int64}, b_shape_) + (a_values_, b_values_) = tf.tf_promote(a_values_, b_values_) + tf.add_input(desc, a_indices_) + tf.add_input(desc, a_values_) + tf.add_input(desc, a_shape_) + tf.add_input(desc, b_indices_) + tf.add_input(desc, b_values_) + tf.add_input(desc, b_shape_) + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function sparse_sparse_minimum(a_indices_::tf.TensorHandle, a_values_::tf.TensorHandle, a_shape_::tf.TensorHandle, b_indices_::tf.TensorHandle, b_values_::tf.TensorHandle, b_shape_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("SparseSparseMinimum") + tf.add_input(desc, a_indices_) + tf.add_input(desc, a_values_) + tf.add_input(desc, a_shape_) + tf.add_input(desc, b_indices_) + tf.add_input(desc, b_values_) + tf.add_input(desc, b_shape_) + desc["T"] = tf.data_type(a_values_) + desc["T"] = tf.data_type(b_values_) + tf.execute(desc) + end +end + + +""" + reverse_v2(tensor, axis) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function reverse_v2(tensor_, axis_; name=nothing) + local desc + tf.with_op_name(name, "ReverseV2") do + desc = tf.NodeDescription("ReverseV2") + tensor_ = convert(Tensor{Any}, tensor_) + axis_ = convert(Tensor{Int32}, axis_) + axis_ = axis_ - convert(tf.Tensor{eltype(axis_)}, 1) + (tensor_,) = tf.tf_promote(tensor_) + (axis_,) = tf.tf_promote(axis_) + tf.add_input(desc, tensor_) + tf.add_input(desc, axis_) + end + tf.Tensor(tf.Operation(desc)) + end + function reverse_v2(tensor_::tf.TensorHandle, axis_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("ReverseV2") + tf.add_input(desc, tensor_) + tf.add_input(desc, axis_) + desc["T"] = tf.data_type(tensor_) + desc["Tidx"] = tf.data_type(axis_) + (tf.execute(desc))[1] + end +end + + +""" + strided_slice(input, begin, end, strides; begin_mask=0, end_mask=0, ellipsis_mask=0, new_axis_mask=0, shrink_axis_mask=0) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function strided_slice(input_, begin_, end_, strides_; name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing) + local desc + tf.with_op_name(name, "StridedSlice") do + desc = tf.NodeDescription("StridedSlice") + input_ = convert(Tensor{Any}, input_) + begin_ = convert(Tensor{Any}, begin_) + begin_ = begin_ - convert(tf.Tensor{eltype(begin_)}, 1) + end_ = convert(Tensor{Any}, end_) + end_ = end_ - convert(tf.Tensor{eltype(end_)}, 1) + strides_ = convert(Tensor{Any}, strides_) + strides_ = strides_ - convert(tf.Tensor{eltype(strides_)}, 1) + (input_,) = tf.tf_promote(input_) + (begin_, end_, strides_) = tf.tf_promote(begin_, end_, strides_) + tf.add_input(desc, input_) + tf.add_input(desc, begin_) + tf.add_input(desc, end_) + tf.add_input(desc, strides_) + if Index !== nothing + desc["Index"] = Base.identity(Index) + end + if begin_mask !== nothing + begin_mask = Base.Int(begin_mask) - 1 + end + if begin_mask !== nothing + desc["begin_mask"] = Base.Int(begin_mask) + end + if end_mask !== nothing + end_mask = Base.Int(end_mask) - 1 + end + if end_mask !== nothing + desc["end_mask"] = Base.Int(end_mask) + end + if ellipsis_mask !== nothing + ellipsis_mask = Base.Int(ellipsis_mask) - 1 + end + if ellipsis_mask !== nothing + desc["ellipsis_mask"] = Base.Int(ellipsis_mask) + end + if new_axis_mask !== nothing + new_axis_mask = Base.Int(new_axis_mask) - 1 + end + if new_axis_mask !== nothing + desc["new_axis_mask"] = Base.Int(new_axis_mask) + end + if shrink_axis_mask !== nothing + shrink_axis_mask = Base.Int(shrink_axis_mask) - 1 + end + if shrink_axis_mask !== nothing + desc["shrink_axis_mask"] = Base.Int(shrink_axis_mask) + end + end + tf.Tensor(tf.Operation(desc)) + end + function strided_slice(input_::tf.TensorHandle, begin_::tf.TensorHandle, end_::tf.TensorHandle, strides_::tf.TensorHandle; name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing) + desc = tf.EagerOp("StridedSlice") + tf.add_input(desc, input_) + tf.add_input(desc, begin_) + tf.add_input(desc, end_) + tf.add_input(desc, strides_) + if Index !== nothing + desc["Index"] = Base.identity(Index) + end + if begin_mask !== nothing + begin_mask = Base.Int(begin_mask) - 1 + end + if begin_mask !== nothing + desc["begin_mask"] = Base.Int(begin_mask) + end + if end_mask !== nothing + end_mask = Base.Int(end_mask) - 1 + end + if end_mask !== nothing + desc["end_mask"] = Base.Int(end_mask) + end + if ellipsis_mask !== nothing + ellipsis_mask = Base.Int(ellipsis_mask) - 1 + end + if ellipsis_mask !== nothing + desc["ellipsis_mask"] = Base.Int(ellipsis_mask) + end + if new_axis_mask !== nothing + new_axis_mask = Base.Int(new_axis_mask) - 1 + end + if new_axis_mask !== nothing + desc["new_axis_mask"] = Base.Int(new_axis_mask) + end + if shrink_axis_mask !== nothing + shrink_axis_mask = Base.Int(shrink_axis_mask) - 1 + end + if shrink_axis_mask !== nothing + desc["shrink_axis_mask"] = Base.Int(shrink_axis_mask) + end + desc["T"] = tf.data_type(input_) + desc["Index"] = tf.data_type(begin_) + desc["Index"] = tf.data_type(end_) + desc["Index"] = tf.data_type(strides_) + (tf.execute(desc))[1] + end +end + + +""" + matching_files(pattern) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function matching_files(pattern_; name=nothing) + local desc + tf.with_op_name(name, "MatchingFiles") do + desc = tf.NodeDescription("MatchingFiles") + pattern_ = convert(Tensor{String}, pattern_) + tf.add_input(desc, pattern_) + end + tf.Tensor(tf.Operation(desc)) + end + function matching_files(pattern_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("MatchingFiles") + tf.add_input(desc, pattern_) + (tf.execute(desc))[1] + end +end + + +""" + encode_base64(input; pad=false) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function encode_base64(input_; name=nothing, pad=nothing) + local desc + tf.with_op_name(name, "EncodeBase64") do + desc = tf.NodeDescription("EncodeBase64") + input_ = convert(Tensor{String}, input_) + tf.add_input(desc, input_) + if pad !== nothing + desc["pad"] = Base.Bool(pad) + end + end + tf.Tensor(tf.Operation(desc)) + end + function encode_base64(input_::tf.TensorHandle; name=nothing, pad=nothing) + desc = tf.EagerOp("EncodeBase64") + tf.add_input(desc, input_) + if pad !== nothing + desc["pad"] = Base.Bool(pad) + end + (tf.execute(desc))[1] + end +end + + +""" + iterator_get_next_as_optional(iterator) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function iterator_get_next_as_optional(iterator_; name=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "IteratorGetNextAsOptional") do + desc = tf.NodeDescription("IteratorGetNextAsOptional") + iterator_ = convert(Tensor{Any}, iterator_) + tf.add_input(desc, iterator_) + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + tf.Tensor(tf.Operation(desc)) + end + function iterator_get_next_as_optional(iterator_::tf.TensorHandle; name=nothing, output_types=nothing, output_shapes=nothing) + desc = tf.EagerOp("IteratorGetNextAsOptional") + tf.add_input(desc, iterator_) + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + (tf.execute(desc))[1] + end +end + + +""" + padding_fifo_queue(; shapes=Int64[], capacity=-1, container=, shared_name=) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function padding_fifo_queue(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "PaddingFIFOQueue") do + desc = tf.NodeDescription("PaddingFIFOQueue") + if component_types !== nothing + desc["component_types"] = map(Base.identity, component_types) + end + if shapes !== nothing + desc["shapes"] = map(Base.identity, shapes) + end + if capacity !== nothing + desc["capacity"] = Base.Int(capacity) + end + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + tf.Tensor(tf.Operation(desc)) + end + function padding_fifo_queue(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) + desc = tf.EagerOp("PaddingFIFOQueue") + if component_types !== nothing + desc["component_types"] = map(Base.identity, component_types) + end + if shapes !== nothing + desc["shapes"] = map(Base.identity, shapes) + end + if capacity !== nothing + desc["capacity"] = Base.Int(capacity) + end + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + (tf.execute(desc))[1] + end +end + + +""" + iterator_to_string_handle(resource_handle) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function iterator_to_string_handle(resource_handle_; name=nothing) + local desc + tf.with_op_name(name, "IteratorToStringHandle") do + desc = tf.NodeDescription("IteratorToStringHandle") + resource_handle_ = convert(Tensor{Any}, resource_handle_) + tf.add_input(desc, resource_handle_) + end + tf.Tensor(tf.Operation(desc)) + end + function iterator_to_string_handle(resource_handle_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("IteratorToStringHandle") + tf.add_input(desc, resource_handle_) + (tf.execute(desc))[1] + end +end + + +""" + max_pool_grad_grad_with_argmax(input, grad, argmax) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function max_pool_grad_grad_with_argmax(input_, grad_, argmax_; name=nothing, ksize=nothing, strides=nothing, padding=nothing) + local desc + tf.with_op_name(name, "MaxPoolGradGradWithArgmax") do + desc = tf.NodeDescription("MaxPoolGradGradWithArgmax") + input_ = convert(Tensor{Any}, input_) + grad_ = convert(Tensor{Any}, grad_) + argmax_ = convert(Tensor{Any}, argmax_) + (argmax_,) = tf.tf_promote(argmax_) + (input_, grad_) = tf.tf_promote(input_, grad_) + tf.add_input(desc, input_) + tf.add_input(desc, grad_) + tf.add_input(desc, argmax_) + if ksize !== nothing + desc["ksize"] = map(Base.identity, ksize) + end + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + if padding !== nothing + desc["padding"] = Base.String(padding) + end + end + tf.Tensor(tf.Operation(desc)) + end + function max_pool_grad_grad_with_argmax(input_::tf.TensorHandle, grad_::tf.TensorHandle, argmax_::tf.TensorHandle; name=nothing, ksize=nothing, strides=nothing, padding=nothing) + desc = tf.EagerOp("MaxPoolGradGradWithArgmax") + tf.add_input(desc, input_) + tf.add_input(desc, grad_) + tf.add_input(desc, argmax_) + if ksize !== nothing + desc["ksize"] = map(Base.identity, ksize) + end + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + if padding !== nothing + desc["padding"] = Base.String(padding) + end + desc["T"] = tf.data_type(input_) + desc["T"] = tf.data_type(grad_) + desc["Targmax"] = tf.data_type(argmax_) + (tf.execute(desc))[1] + end +end + + +""" + tensor_list_gather(input_handle, indices, element_shape) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tensor_list_gather(input_handle_, indices_, element_shape_; name=nothing, element_dtype=nothing) + local desc + tf.with_op_name(name, "TensorListGather") do + desc = tf.NodeDescription("TensorListGather") + input_handle_ = convert(Tensor{Any}, input_handle_) + indices_ = convert(Tensor{Int32}, indices_) + element_shape_ = convert(Tensor{Int32}, element_shape_) + tf.add_input(desc, input_handle_) + tf.add_input(desc, indices_) + tf.add_input(desc, element_shape_) + if element_dtype !== nothing + desc["element_dtype"] = Base.identity(element_dtype) + end + end + tf.Tensor(tf.Operation(desc)) + end + function tensor_list_gather(input_handle_::tf.TensorHandle, indices_::tf.TensorHandle, element_shape_::tf.TensorHandle; name=nothing, element_dtype=nothing) + desc = tf.EagerOp("TensorListGather") + tf.add_input(desc, input_handle_) + tf.add_input(desc, indices_) + tf.add_input(desc, element_shape_) + if element_dtype !== nothing + desc["element_dtype"] = Base.identity(element_dtype) + end + (tf.execute(desc))[1] + end +end + + +""" + multinomial(logits, num_samples; seed=0, seed2=0, output_dtype=Int64) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function multinomial(logits_, num_samples_; name=nothing, seed=nothing, seed2=nothing, output_dtype=nothing) + local desc + tf.with_op_name(name, "Multinomial") do + desc = tf.NodeDescription("Multinomial") + logits_ = convert(Tensor{Any}, logits_) + num_samples_ = convert(Tensor{Int32}, num_samples_) + (logits_,) = tf.tf_promote(logits_) + tf.add_input(desc, logits_) + tf.add_input(desc, num_samples_) + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end + if output_dtype !== nothing + desc["output_dtype"] = Base.identity(output_dtype) + end + end + tf.Tensor(tf.Operation(desc)) + end + function multinomial(logits_::tf.TensorHandle, num_samples_::tf.TensorHandle; name=nothing, seed=nothing, seed2=nothing, output_dtype=nothing) + desc = tf.EagerOp("Multinomial") + tf.add_input(desc, logits_) + tf.add_input(desc, num_samples_) + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end + if output_dtype !== nothing + desc["output_dtype"] = Base.identity(output_dtype) + end + desc["T"] = tf.data_type(logits_) + (tf.execute(desc))[1] + end +end + + +""" + tensor_array_read(handle, index, flow_in) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tensor_array_read(handle_, index_, flow_in_; name=nothing, dtype=nothing) + local desc + tf.with_op_name(name, "TensorArrayRead") do + desc = tf.NodeDescription("TensorArrayRead") + handle_ = convert(Tensor{String}, handle_) + index_ = convert(Tensor{Int32}, index_) + flow_in_ = convert(Tensor{Float32}, flow_in_) + tf.add_input(desc, handle_) + tf.add_input(desc, index_) + tf.add_input(desc, flow_in_) + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + tf.Tensor(tf.Operation(desc)) + end + function tensor_array_read(handle_::tf.TensorHandle, index_::tf.TensorHandle, flow_in_::tf.TensorHandle; name=nothing, dtype=nothing) + desc = tf.EagerOp("TensorArrayRead") + tf.add_input(desc, handle_) + tf.add_input(desc, index_) + tf.add_input(desc, flow_in_) + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + (tf.execute(desc))[1] + end +end + + +""" + experimental_indexed_dataset_get(materialized, index) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function experimental_indexed_dataset_get(materialized_, index_; name=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "ExperimentalIndexedDatasetGet") do + desc = tf.NodeDescription("ExperimentalIndexedDatasetGet") + materialized_ = convert(Tensor{Any}, materialized_) + index_ = convert(Tensor{Any}, index_) + tf.add_input(desc, materialized_) + tf.add_input(desc, index_) + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + tf.Tensor(tf.Operation(desc)) + end + function experimental_indexed_dataset_get(materialized_::tf.TensorHandle, index_::tf.TensorHandle; name=nothing, output_types=nothing, output_shapes=nothing) + desc = tf.EagerOp("ExperimentalIndexedDatasetGet") + tf.add_input(desc, materialized_) + tf.add_input(desc, index_) + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + (tf.execute(desc))[1] + end +end + + +""" + tpu_partitioned_call(args, device_ordinal) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tpu_partitioned_call(args_, device_ordinal_; name=nothing, Tin=nothing, Tout=nothing, f=nothing) + local desc + tf.with_op_name(name, "TPUPartitionedCall") do + desc = tf.NodeDescription("TPUPartitionedCall") + args_ = [convert(Tensor{Any}, x) for x = args_] + device_ordinal_ = convert(Tensor{Int32}, device_ordinal_) + tf.add_input(desc, args_) + tf.add_input(desc, device_ordinal_) + if Tin !== nothing + desc["Tin"] = map(Base.identity, Tin) + end + if Tout !== nothing + desc["Tout"] = map(Base.identity, Tout) + end + if f !== nothing + desc["f"] = Base.identity(f) + end + end + tf.Tensor(tf.Operation(desc)) + end + function tpu_partitioned_call(args_::tf.TensorHandle, device_ordinal_::tf.TensorHandle; name=nothing, Tin=nothing, Tout=nothing, f=nothing) + desc = tf.EagerOp("TPUPartitionedCall") + tf.add_input(desc, args_) + tf.add_input(desc, device_ordinal_) + if Tin !== nothing + desc["Tin"] = map(Base.identity, Tin) + end + if Tout !== nothing + desc["Tout"] = map(Base.identity, Tout) + end + if f !== nothing + desc["f"] = Base.identity(f) + end + (tf.execute(desc))[1] + end +end + + +""" + quantized_conv2d_and_relu_and_requantize(input, filter, min_input, max_input, min_filter, max_filter, min_freezed_output, max_freezed_output; out_type=Float32, dilations=[1, 1, 1, 1]) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function quantized_conv2d_and_relu_and_requantize(input_, filter_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) + local desc + tf.with_op_name(name, "QuantizedConv2DAndReluAndRequantize") do + desc = tf.NodeDescription("QuantizedConv2DAndReluAndRequantize") + input_ = convert(Tensor{Any}, input_) + filter_ = convert(Tensor{Any}, filter_) + min_input_ = convert(Tensor{Float32}, min_input_) + max_input_ = convert(Tensor{Float32}, max_input_) + min_filter_ = convert(Tensor{Float32}, min_filter_) + max_filter_ = convert(Tensor{Float32}, max_filter_) + min_freezed_output_ = convert(Tensor{Float32}, min_freezed_output_) + max_freezed_output_ = convert(Tensor{Float32}, max_freezed_output_) + (filter_,) = tf.tf_promote(filter_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + tf.add_input(desc, filter_) + tf.add_input(desc, min_input_) + tf.add_input(desc, max_input_) + tf.add_input(desc, min_filter_) + tf.add_input(desc, max_filter_) + tf.add_input(desc, min_freezed_output_) + tf.add_input(desc, max_freezed_output_) + if out_type !== nothing + desc["out_type"] = Base.identity(out_type) + end + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + if padding !== nothing + desc["padding"] = Base.String(padding) + end + if dilations !== nothing + desc["dilations"] = map(Base.identity, dilations) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function quantized_conv2d_and_relu_and_requantize(input_::tf.TensorHandle, filter_::tf.TensorHandle, min_input_::tf.TensorHandle, max_input_::tf.TensorHandle, min_filter_::tf.TensorHandle, max_filter_::tf.TensorHandle, min_freezed_output_::tf.TensorHandle, max_freezed_output_::tf.TensorHandle; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) + desc = tf.EagerOp("QuantizedConv2DAndReluAndRequantize") + tf.add_input(desc, input_) + tf.add_input(desc, filter_) + tf.add_input(desc, min_input_) + tf.add_input(desc, max_input_) + tf.add_input(desc, min_filter_) + tf.add_input(desc, max_filter_) + tf.add_input(desc, min_freezed_output_) + tf.add_input(desc, max_freezed_output_) + if out_type !== nothing + desc["out_type"] = Base.identity(out_type) + end + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + if padding !== nothing + desc["padding"] = Base.String(padding) + end + if dilations !== nothing + desc["dilations"] = map(Base.identity, dilations) + end + desc["Tinput"] = tf.data_type(input_) + desc["Tfilter"] = tf.data_type(filter_) + tf.execute(desc) + end +end + + +""" + iterator_from_string_handle_v2(string_handle; output_types=Int64[], output_shapes=Int64[]) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function iterator_from_string_handle_v2(string_handle_; name=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "IteratorFromStringHandleV2") do + desc = tf.NodeDescription("IteratorFromStringHandleV2") + string_handle_ = convert(Tensor{String}, string_handle_) + tf.add_input(desc, string_handle_) + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + tf.Tensor(tf.Operation(desc)) + end + function iterator_from_string_handle_v2(string_handle_::tf.TensorHandle; name=nothing, output_types=nothing, output_shapes=nothing) + desc = tf.EagerOp("IteratorFromStringHandleV2") + tf.add_input(desc, string_handle_) + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + (tf.execute(desc))[1] + end +end + + +""" + bitwise_or(x, y) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function bitwise_or(x_, y_; name=nothing) + local desc + tf.with_op_name(name, "BitwiseOr") do + desc = tf.NodeDescription("BitwiseOr") + x_ = convert(Tensor{Any}, x_) + y_ = convert(Tensor{Any}, y_) + (x_, y_) = tf.tf_promote(x_, y_) + tf.add_input(desc, x_) + tf.add_input(desc, y_) + end + tf.Tensor(tf.Operation(desc)) + end + function bitwise_or(x_::tf.TensorHandle, y_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("BitwiseOr") + tf.add_input(desc, x_) + tf.add_input(desc, y_) + desc["T"] = tf.data_type(x_) + desc["T"] = tf.data_type(y_) + (tf.execute(desc))[1] + end +end + + +""" + unsorted_segment_max(data, segment_ids, num_segments) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function unsorted_segment_max(data_, segment_ids_, num_segments_; name=nothing) + local desc + tf.with_op_name(name, "UnsortedSegmentMax") do + desc = tf.NodeDescription("UnsortedSegmentMax") + data_ = convert(Tensor{Any}, data_) + segment_ids_ = convert(Tensor{Any}, segment_ids_) + segment_ids_ = segment_ids_ - convert(tf.Tensor{eltype(segment_ids_)}, 1) + num_segments_ = convert(Tensor{Int32}, num_segments_) + (num_segments_,) = tf.tf_promote(num_segments_) + (data_,) = tf.tf_promote(data_) + (segment_ids_,) = tf.tf_promote(segment_ids_) + tf.add_input(desc, data_) + tf.add_input(desc, segment_ids_) + tf.add_input(desc, num_segments_) + end + tf.Tensor(tf.Operation(desc)) + end + function unsorted_segment_max(data_::tf.TensorHandle, segment_ids_::tf.TensorHandle, num_segments_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("UnsortedSegmentMax") + tf.add_input(desc, data_) + tf.add_input(desc, segment_ids_) + tf.add_input(desc, num_segments_) + desc["T"] = tf.data_type(data_) + desc["Tindices"] = tf.data_type(segment_ids_) + desc["Tnumsegments"] = tf.data_type(num_segments_) + (tf.execute(desc))[1] + end +end + + +""" + _mkl_squared_difference(x, y, mkl_x, mkl_y) + +Returns (x - y)(x - y) element-wise. +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function _mkl_squared_difference(x_, y_, mkl_x_, mkl_y_; name=nothing) + local desc + tf.with_op_name(name, "_MklSquaredDifference") do + desc = tf.NodeDescription("_MklSquaredDifference") + x_ = convert(Tensor{Any}, x_) + y_ = convert(Tensor{Any}, y_) + mkl_x_ = convert(Tensor{UInt8}, mkl_x_) + mkl_y_ = convert(Tensor{UInt8}, mkl_y_) + (x_, y_) = tf.tf_promote(x_, y_) + tf.add_input(desc, x_) + tf.add_input(desc, y_) + tf.add_input(desc, mkl_x_) + tf.add_input(desc, mkl_y_) + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function _mkl_squared_difference(x_::tf.TensorHandle, y_::tf.TensorHandle, mkl_x_::tf.TensorHandle, mkl_y_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("_MklSquaredDifference") + tf.add_input(desc, x_) + tf.add_input(desc, y_) + tf.add_input(desc, mkl_x_) + tf.add_input(desc, mkl_y_) + desc["T"] = tf.data_type(x_) + desc["T"] = tf.data_type(y_) + tf.execute(desc) + end +end + + +""" + conv3d_backprop_filter(input, filter, out_backprop; dilations=[1, 1, 1, 1, 1]) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function conv3d_backprop_filter(input_, filter_, out_backprop_; name=nothing, strides=nothing, padding=nothing, dilations=nothing) + local desc + tf.with_op_name(name, "Conv3DBackpropFilter") do + desc = tf.NodeDescription("Conv3DBackpropFilter") + input_ = convert(Tensor{Any}, input_) + filter_ = convert(Tensor{Any}, filter_) + out_backprop_ = convert(Tensor{Any}, out_backprop_) + (input_, filter_, out_backprop_) = tf.tf_promote(input_, filter_, out_backprop_) + tf.add_input(desc, input_) + tf.add_input(desc, filter_) + tf.add_input(desc, out_backprop_) + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + if padding !== nothing + desc["padding"] = Base.String(padding) + end + if dilations !== nothing + desc["dilations"] = map(Base.identity, dilations) + end + end + tf.Tensor(tf.Operation(desc)) + end + function conv3d_backprop_filter(input_::tf.TensorHandle, filter_::tf.TensorHandle, out_backprop_::tf.TensorHandle; name=nothing, strides=nothing, padding=nothing, dilations=nothing) + desc = tf.EagerOp("Conv3DBackpropFilter") + tf.add_input(desc, input_) + tf.add_input(desc, filter_) + tf.add_input(desc, out_backprop_) + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + if padding !== nothing + desc["padding"] = Base.String(padding) + end + if dilations !== nothing + desc["dilations"] = map(Base.identity, dilations) + end + desc["T"] = tf.data_type(input_) + desc["T"] = tf.data_type(filter_) + desc["T"] = tf.data_type(out_backprop_) + (tf.execute(desc))[1] + end +end + + +""" + if_(cond, input; output_shapes=Int64[]) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function if_(cond_, input_; name=nothing, Tin=nothing, Tout=nothing, then_branch=nothing, else_branch=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "If") do + desc = tf.NodeDescription("If") + cond_ = convert(Tensor{Any}, cond_) + input_ = [convert(Tensor{Any}, x) for x = input_] + (cond_,) = tf.tf_promote(cond_) + tf.add_input(desc, cond_) + tf.add_input(desc, input_) + if Tin !== nothing + desc["Tin"] = map(Base.identity, Tin) + end + if Tout !== nothing + desc["Tout"] = map(Base.identity, Tout) + end + if then_branch !== nothing + desc["then_branch"] = Base.identity(then_branch) + end + if else_branch !== nothing + desc["else_branch"] = Base.identity(else_branch) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + tf.Tensor(tf.Operation(desc)) + end + function if_(cond_::tf.TensorHandle, input_::tf.TensorHandle; name=nothing, Tin=nothing, Tout=nothing, then_branch=nothing, else_branch=nothing, output_shapes=nothing) + desc = tf.EagerOp("If") + tf.add_input(desc, cond_) + tf.add_input(desc, input_) + if Tin !== nothing + desc["Tin"] = map(Base.identity, Tin) + end + if Tout !== nothing + desc["Tout"] = map(Base.identity, Tout) + end + if then_branch !== nothing + desc["then_branch"] = Base.identity(then_branch) + end + if else_branch !== nothing + desc["else_branch"] = Base.identity(else_branch) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + desc["Tcond"] = tf.data_type(cond_) + (tf.execute(desc))[1] end +end + """ - merge_summary(inputs) + flat_map_dataset(input_dataset, other_arguments) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function flat_map_dataset(input_dataset_, other_arguments_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "FlatMapDataset") do + desc = tf.NodeDescription("FlatMapDataset") + input_dataset_ = convert(Tensor{Any}, input_dataset_) + other_arguments_ = [convert(Tensor{Any}, x) for x = other_arguments_] + tf.add_input(desc, input_dataset_) + tf.add_input(desc, other_arguments_) + if f !== nothing + desc["f"] = Base.identity(f) + end + if Targuments !== nothing + desc["Targuments"] = map(Base.identity, Targuments) + end + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + tf.Tensor(tf.Operation(desc)) + end + function flat_map_dataset(input_dataset_::tf.TensorHandle, other_arguments_::tf.TensorHandle; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) + desc = tf.EagerOp("FlatMapDataset") + tf.add_input(desc, input_dataset_) + tf.add_input(desc, other_arguments_) + if f !== nothing + desc["f"] = Base.identity(f) + end + if Targuments !== nothing + desc["Targuments"] = map(Base.identity, Targuments) + end + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + (tf.execute(desc))[1] + end +end """ -tf.@op function merge_summary(inputs_; name=nothing, N=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("MergeSummary") - inputs_ = [convert(TensorFlow.Tensor{String}, x) for x = inputs_] - tf.add_input(desc, inputs_) - if N !== nothing - desc["N"] = Base.Int(N) - end - end), name, "MergeSummary") - tf.Tensor(tf.Operation(desc)) + tensor_list_scatter(tensor, indices, element_shape) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tensor_list_scatter(tensor_, indices_, element_shape_; name=nothing, element_dtype=nothing, shape_type=nothing) + local desc + tf.with_op_name(name, "TensorListScatter") do + desc = tf.NodeDescription("TensorListScatter") + tensor_ = convert(Tensor{Any}, tensor_) + indices_ = convert(Tensor{Int32}, indices_) + element_shape_ = convert(Tensor{Any}, element_shape_) + (tensor_,) = tf.tf_promote(tensor_) + (element_shape_,) = tf.tf_promote(element_shape_) + tf.add_input(desc, tensor_) + tf.add_input(desc, indices_) + tf.add_input(desc, element_shape_) + if element_dtype !== nothing + desc["element_dtype"] = Base.identity(element_dtype) + end + if shape_type !== nothing + desc["shape_type"] = Base.identity(shape_type) + end + end + tf.Tensor(tf.Operation(desc)) + end + function tensor_list_scatter(tensor_::tf.TensorHandle, indices_::tf.TensorHandle, element_shape_::tf.TensorHandle; name=nothing, element_dtype=nothing, shape_type=nothing) + desc = tf.EagerOp("TensorListScatter") + tf.add_input(desc, tensor_) + tf.add_input(desc, indices_) + tf.add_input(desc, element_shape_) + if element_dtype !== nothing + desc["element_dtype"] = Base.identity(element_dtype) + end + if shape_type !== nothing + desc["shape_type"] = Base.identity(shape_type) + end + desc["element_dtype"] = tf.data_type(tensor_) + desc["shape_type"] = tf.data_type(element_shape_) + (tf.execute(desc))[1] end +end + """ - scalar_summary(tags, values) + softsign_grad(gradients, features) """ -tf.@op function scalar_summary(tags_, values_; name=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("ScalarSummary") - tags_ = convert(TensorFlow.Tensor{String}, tags_) - values_ = convert(TensorFlow.Tensor{Any}, values_) - (values_,) = tf.tf_promote(values_) - tf.add_input(desc, tags_) - tf.add_input(desc, values_) - end), name, "ScalarSummary") - tf.Tensor(tf.Operation(desc)) +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function softsign_grad(gradients_, features_; name=nothing) + local desc + tf.with_op_name(name, "SoftsignGrad") do + desc = tf.NodeDescription("SoftsignGrad") + gradients_ = convert(Tensor{Any}, gradients_) + features_ = convert(Tensor{Any}, features_) + (gradients_, features_) = tf.tf_promote(gradients_, features_) + tf.add_input(desc, gradients_) + tf.add_input(desc, features_) + end + tf.Tensor(tf.Operation(desc)) + end + function softsign_grad(gradients_::tf.TensorHandle, features_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("SoftsignGrad") + tf.add_input(desc, gradients_) + tf.add_input(desc, features_) + desc["T"] = tf.data_type(gradients_) + desc["T"] = tf.data_type(features_) + (tf.execute(desc))[1] end +end -""" - audio_summary_v2(tag, tensor, sample_rate; max_outputs=3) +""" + copy_host(input; tensor_name=, debug_ops_spec=Int64[]) +Copy Host Op. """ -tf.@op function audio_summary_v2(tag_, tensor_, sample_rate_; name=nothing, max_outputs=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("AudioSummaryV2") - tag_ = convert(TensorFlow.Tensor{String}, tag_) - tensor_ = convert(TensorFlow.Tensor{Float32}, tensor_) - sample_rate_ = convert(TensorFlow.Tensor{Float32}, sample_rate_) - tf.add_input(desc, tag_) - tf.add_input(desc, tensor_) - tf.add_input(desc, sample_rate_) - if max_outputs !== nothing - desc["max_outputs"] = Base.Int(max_outputs) - end - end), name, "AudioSummaryV2") - tf.Tensor(tf.Operation(desc)) +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function copy_host(input_; name=nothing, tensor_name=nothing, debug_ops_spec=nothing) + local desc + tf.with_op_name(name, "CopyHost") do + desc = tf.NodeDescription("CopyHost") + input_ = convert(Tensor{Any}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + if tensor_name !== nothing + desc["tensor_name"] = Base.String(tensor_name) + end + if debug_ops_spec !== nothing + desc["debug_ops_spec"] = map(Base.identity, debug_ops_spec) + end + end + tf.Tensor(tf.Operation(desc)) + end + function copy_host(input_::tf.TensorHandle; name=nothing, tensor_name=nothing, debug_ops_spec=nothing) + desc = tf.EagerOp("CopyHost") + tf.add_input(desc, input_) + if tensor_name !== nothing + desc["tensor_name"] = Base.String(tensor_name) + end + if debug_ops_spec !== nothing + desc["debug_ops_spec"] = map(Base.identity, debug_ops_spec) + end + desc["T"] = tf.data_type(input_) + (tf.execute(desc))[1] end +end + """ - histogram_summary(tag, values) + lin_space(start, stop, num) """ -tf.@op function histogram_summary(tag_, values_; name=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("HistogramSummary") - tag_ = convert(TensorFlow.Tensor{String}, tag_) - values_ = convert(TensorFlow.Tensor{Float32}, values_) - (values_,) = tf.tf_promote(values_) - tf.add_input(desc, tag_) - tf.add_input(desc, values_) - end), name, "HistogramSummary") - tf.Tensor(tf.Operation(desc)) +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function lin_space(start_, stop_, num_; name=nothing) + local desc + tf.with_op_name(name, "LinSpace") do + desc = tf.NodeDescription("LinSpace") + start_ = convert(Tensor{Any}, start_) + stop_ = convert(Tensor{Any}, stop_) + num_ = convert(Tensor{Int32}, num_) + num_ = num_ - convert(tf.Tensor{eltype(num_)}, 1) + (start_, stop_) = tf.tf_promote(start_, stop_) + (num_,) = tf.tf_promote(num_) + tf.add_input(desc, start_) + tf.add_input(desc, stop_) + tf.add_input(desc, num_) + end + tf.Tensor(tf.Operation(desc)) + end + function lin_space(start_::tf.TensorHandle, stop_::tf.TensorHandle, num_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("LinSpace") + tf.add_input(desc, start_) + tf.add_input(desc, stop_) + tf.add_input(desc, num_) + desc["T"] = tf.data_type(start_) + desc["T"] = tf.data_type(stop_) + desc["Tidx"] = tf.data_type(num_) + (tf.execute(desc))[1] end +end -""" - image_summary(tag, tensor; max_images=3, bad_color=?) +""" + _parallel_concat_update(value, update) +Updates input `value` at `loc` with `update`. """ -tf.@op function image_summary(tag_, tensor_; name=nothing, max_images=nothing, bad_color=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("ImageSummary") - tag_ = convert(TensorFlow.Tensor{String}, tag_) - tensor_ = convert(TensorFlow.Tensor{Float32}, tensor_) - (tensor_,) = tf.tf_promote(tensor_) - tf.add_input(desc, tag_) - tf.add_input(desc, tensor_) - if max_images !== nothing - desc["max_images"] = Base.Int(max_images) - end - if bad_color !== nothing - desc["bad_color"] = TensorFlow.RawTensor(bad_color) - end - end), name, "ImageSummary") - tf.Tensor(tf.Operation(desc)) +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function _parallel_concat_update(value_, update_; name=nothing, loc=nothing) + local desc + tf.with_op_name(name, "_ParallelConcatUpdate") do + desc = tf.NodeDescription("_ParallelConcatUpdate") + value_ = convert(Tensor{Any}, value_) + update_ = convert(Tensor{Any}, update_) + (value_, update_) = tf.tf_promote(value_, update_) + tf.add_input(desc, value_) + tf.add_input(desc, update_) + if loc !== nothing + desc["loc"] = Base.Int(loc) + end + end + tf.Tensor(tf.Operation(desc)) + end + function _parallel_concat_update(value_::tf.TensorHandle, update_::tf.TensorHandle; name=nothing, loc=nothing) + desc = tf.EagerOp("_ParallelConcatUpdate") + tf.add_input(desc, value_) + tf.add_input(desc, update_) + if loc !== nothing + desc["loc"] = Base.Int(loc) + end + desc["T"] = tf.data_type(value_) + desc["T"] = tf.data_type(update_) + (tf.execute(desc))[1] end +end + """ - decode_png(contents; channels=0, dtype=UInt8) + stack(; stack_name=) """ -tf.@op function decode_png(contents_; name=nothing, channels=nothing, dtype=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("DecodePng") - contents_ = convert(TensorFlow.Tensor{String}, contents_) - tf.add_input(desc, contents_) - if channels !== nothing - desc["channels"] = Base.Int(channels) - end - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end - end), name, "DecodePng") - tf.Tensor(tf.Operation(desc)) +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function stack(; name=nothing, elem_type=nothing, stack_name=nothing) + local desc + tf.with_op_name(name, "Stack") do + desc = tf.NodeDescription("Stack") + if elem_type !== nothing + desc["elem_type"] = Base.identity(elem_type) + end + if stack_name !== nothing + desc["stack_name"] = Base.String(stack_name) + end + end + tf.Tensor(tf.Operation(desc)) + end + function stack(; name=nothing, elem_type=nothing, stack_name=nothing) + desc = tf.EagerOp("Stack") + if elem_type !== nothing + desc["elem_type"] = Base.identity(elem_type) + end + if stack_name !== nothing + desc["stack_name"] = Base.String(stack_name) + end + (tf.execute(desc))[1] end +end + """ - where(input) + stack_push_v2(handle, elem; swap_memory=false) """ -tf.@op function where(input_; name=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("Where") - input_ = convert(TensorFlow.Tensor{Bool}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - end), name, "Where") - tf.Tensor(tf.Operation(desc)) +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function stack_push_v2(handle_, elem_; name=nothing, swap_memory=nothing) + local desc + tf.with_op_name(name, "StackPushV2") do + desc = tf.NodeDescription("StackPushV2") + handle_ = convert(Tensor{Any}, handle_) + elem_ = convert(Tensor{Any}, elem_) + (elem_,) = tf.tf_promote(elem_) + tf.add_input(desc, handle_) + tf.add_input(desc, elem_) + if swap_memory !== nothing + desc["swap_memory"] = Base.Bool(swap_memory) + end + end + tf.Tensor(tf.Operation(desc)) + end + function stack_push_v2(handle_::tf.TensorHandle, elem_::tf.TensorHandle; name=nothing, swap_memory=nothing) + desc = tf.EagerOp("StackPushV2") + tf.add_input(desc, handle_) + tf.add_input(desc, elem_) + if swap_memory !== nothing + desc["swap_memory"] = Base.Bool(swap_memory) + end + desc["T"] = tf.data_type(elem_) + (tf.execute(desc))[1] end +end + """ - const_() + assign_variable_op(resource, value) """ -tf.@op function const_(; name=nothing, value=nothing, dtype=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("Const") - if value !== nothing - desc["value"] = TensorFlow.RawTensor(value) - end - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end - end), name, "Const") - tf.Tensor(tf.Operation(desc)) +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function assign_variable_op(resource_, value_; name=nothing, dtype=nothing) + local desc + tf.with_op_name(name, "AssignVariableOp") do + desc = tf.NodeDescription("AssignVariableOp") + resource_ = convert(Tensor{Any}, resource_) + value_ = convert(Tensor{Any}, value_) + (value_,) = tf.tf_promote(value_) + tf.add_input(desc, resource_) + tf.add_input(desc, value_) + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + tf.Tensor(tf.Operation(desc)) + end + function assign_variable_op(resource_::tf.TensorHandle, value_::tf.TensorHandle; name=nothing, dtype=nothing) + desc = tf.EagerOp("AssignVariableOp") + tf.add_input(desc, resource_) + tf.add_input(desc, value_) + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + desc["dtype"] = tf.data_type(value_) + (tf.execute(desc))[1] end - -""" - variable_v2(; container=, shared_name=) +end """ -tf.@op function variable_v2(; name=nothing, shape=nothing, dtype=nothing, container=nothing, shared_name=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("VariableV2") - if shape !== nothing - desc["shape"] = Base.identity(shape) - end - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - end), name, "VariableV2") - tf.Tensor(tf.Operation(desc)) + sparse_split(split_dim, indices, values, shape) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function sparse_split(split_dim_, indices_, values_, shape_; name=nothing, num_split=nothing) + local desc + tf.with_op_name(name, "SparseSplit") do + desc = tf.NodeDescription("SparseSplit") + split_dim_ = convert(Tensor{Int64}, split_dim_) + split_dim_ = split_dim_ - convert(tf.Tensor{eltype(split_dim_)}, 1) + indices_ = convert(Tensor{Int64}, indices_) + values_ = convert(Tensor{Any}, values_) + shape_ = convert(Tensor{Int64}, shape_) + (values_,) = tf.tf_promote(values_) + tf.add_input(desc, split_dim_) + tf.add_input(desc, indices_) + tf.add_input(desc, values_) + tf.add_input(desc, shape_) + if num_split !== nothing + desc["num_split"] = Base.Int(num_split) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function sparse_split(split_dim_::tf.TensorHandle, indices_::tf.TensorHandle, values_::tf.TensorHandle, shape_::tf.TensorHandle; name=nothing, num_split=nothing) + desc = tf.EagerOp("SparseSplit") + tf.add_input(desc, split_dim_) + tf.add_input(desc, indices_) + tf.add_input(desc, values_) + tf.add_input(desc, shape_) + if num_split !== nothing + desc["num_split"] = Base.Int(num_split) + end + desc["T"] = tf.data_type(values_) + tf.execute(desc) end +end + """ - log(x) + tensor_array_unpack(handle, value, flow_in) """ -tf.@op function log(x_; name=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("Log") - x_ = convert(TensorFlow.Tensor{Any}, x_) - (x_,) = tf.tf_promote(x_) - tf.add_input(desc, x_) - end), name, "Log") - tf.Tensor(tf.Operation(desc)) +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tensor_array_unpack(handle_, value_, flow_in_; name=nothing) + local desc + tf.with_op_name(name, "TensorArrayUnpack") do + desc = tf.NodeDescription("TensorArrayUnpack") + handle_ = convert(Tensor{String}, handle_) + value_ = convert(Tensor{Any}, value_) + flow_in_ = convert(Tensor{Float32}, flow_in_) + (value_,) = tf.tf_promote(value_) + tf.add_input(desc, handle_) + tf.add_input(desc, value_) + tf.add_input(desc, flow_in_) + end + tf.Tensor(tf.Operation(desc)) + end + function tensor_array_unpack(handle_::tf.TensorHandle, value_::tf.TensorHandle, flow_in_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("TensorArrayUnpack") + tf.add_input(desc, handle_) + tf.add_input(desc, value_) + tf.add_input(desc, flow_in_) + desc["T"] = tf.data_type(value_) + (tf.execute(desc))[1] end +end + """ - exp(x) + tensor_list_stack(input_handle, element_shape; num_elements=-1) """ -tf.@op function exp(x_; name=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("Exp") - x_ = convert(TensorFlow.Tensor{Any}, x_) - (x_,) = tf.tf_promote(x_) - tf.add_input(desc, x_) - end), name, "Exp") - tf.Tensor(tf.Operation(desc)) +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tensor_list_stack(input_handle_, element_shape_; name=nothing, element_dtype=nothing, num_elements=nothing) + local desc + tf.with_op_name(name, "TensorListStack") do + desc = tf.NodeDescription("TensorListStack") + input_handle_ = convert(Tensor{Any}, input_handle_) + element_shape_ = convert(Tensor{Int32}, element_shape_) + tf.add_input(desc, input_handle_) + tf.add_input(desc, element_shape_) + if element_dtype !== nothing + desc["element_dtype"] = Base.identity(element_dtype) + end + if num_elements !== nothing + desc["num_elements"] = Base.Int(num_elements) + end + end + tf.Tensor(tf.Operation(desc)) + end + function tensor_list_stack(input_handle_::tf.TensorHandle, element_shape_::tf.TensorHandle; name=nothing, element_dtype=nothing, num_elements=nothing) + desc = tf.EagerOp("TensorListStack") + tf.add_input(desc, input_handle_) + tf.add_input(desc, element_shape_) + if element_dtype !== nothing + desc["element_dtype"] = Base.identity(element_dtype) + end + if num_elements !== nothing + desc["num_elements"] = Base.Int(num_elements) + end + (tf.execute(desc))[1] end +end + """ - ceil(x) + barrier_incomplete_size(handle) """ -tf.@op function ceil(x_; name=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("Ceil") - x_ = convert(TensorFlow.Tensor{Any}, x_) - (x_,) = tf.tf_promote(x_) - tf.add_input(desc, x_) - end), name, "Ceil") - tf.Tensor(tf.Operation(desc)) +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function barrier_incomplete_size(handle_; name=nothing) + local desc + tf.with_op_name(name, "BarrierIncompleteSize") do + desc = tf.NodeDescription("BarrierIncompleteSize") + handle_ = convert(Tensor{String}, handle_) + tf.add_input(desc, handle_) + end + tf.Tensor(tf.Operation(desc)) + end + function barrier_incomplete_size(handle_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("BarrierIncompleteSize") + tf.add_input(desc, handle_) + (tf.execute(desc))[1] end +end + """ - floor(x) + restore(file_pattern, tensor_name; preferred_shard=-1) """ -tf.@op function floor(x_; name=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("Floor") - x_ = convert(TensorFlow.Tensor{Any}, x_) - (x_,) = tf.tf_promote(x_) - tf.add_input(desc, x_) - end), name, "Floor") - tf.Tensor(tf.Operation(desc)) +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function restore(file_pattern_, tensor_name_; name=nothing, dt=nothing, preferred_shard=nothing) + local desc + tf.with_op_name(name, "Restore") do + desc = tf.NodeDescription("Restore") + file_pattern_ = convert(Tensor{String}, file_pattern_) + tensor_name_ = convert(Tensor{String}, tensor_name_) + tf.add_input(desc, file_pattern_) + tf.add_input(desc, tensor_name_) + if dt !== nothing + desc["dt"] = Base.identity(dt) + end + if preferred_shard !== nothing + desc["preferred_shard"] = Base.Int(preferred_shard) + end + end + tf.Tensor(tf.Operation(desc)) + end + function restore(file_pattern_::tf.TensorHandle, tensor_name_::tf.TensorHandle; name=nothing, dt=nothing, preferred_shard=nothing) + desc = tf.EagerOp("Restore") + tf.add_input(desc, file_pattern_) + tf.add_input(desc, tensor_name_) + if dt !== nothing + desc["dt"] = Base.identity(dt) + end + if preferred_shard !== nothing + desc["preferred_shard"] = Base.Int(preferred_shard) + end + (tf.execute(desc))[1] end - -""" - sqrt(x) +end """ -tf.@op function sqrt(x_; name=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("Sqrt") - x_ = convert(TensorFlow.Tensor{Any}, x_) - (x_,) = tf.tf_promote(x_) - tf.add_input(desc, x_) - end), name, "Sqrt") - tf.Tensor(tf.Operation(desc)) + tensor_array_v3(size; element_shape=?, dynamic_size=false, clear_after_read=true, identical_element_shapes=false, tensor_array_name=) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tensor_array_v3(size_; name=nothing, dtype=nothing, element_shape=nothing, dynamic_size=nothing, clear_after_read=nothing, identical_element_shapes=nothing, tensor_array_name=nothing) + local desc + tf.with_op_name(name, "TensorArrayV3") do + desc = tf.NodeDescription("TensorArrayV3") + size_ = convert(Tensor{Int32}, size_) + tf.add_input(desc, size_) + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + if element_shape !== nothing + desc["element_shape"] = Base.identity(element_shape) + end + if dynamic_size !== nothing + desc["dynamic_size"] = Base.Bool(dynamic_size) + end + if clear_after_read !== nothing + desc["clear_after_read"] = Base.Bool(clear_after_read) + end + if identical_element_shapes !== nothing + desc["identical_element_shapes"] = Base.Bool(identical_element_shapes) + end + if tensor_array_name !== nothing + desc["tensor_array_name"] = Base.String(tensor_array_name) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function tensor_array_v3(size_::tf.TensorHandle; name=nothing, dtype=nothing, element_shape=nothing, dynamic_size=nothing, clear_after_read=nothing, identical_element_shapes=nothing, tensor_array_name=nothing) + desc = tf.EagerOp("TensorArrayV3") + tf.add_input(desc, size_) + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + if element_shape !== nothing + desc["element_shape"] = Base.identity(element_shape) + end + if dynamic_size !== nothing + desc["dynamic_size"] = Base.Bool(dynamic_size) + end + if clear_after_read !== nothing + desc["clear_after_read"] = Base.Bool(clear_after_read) + end + if identical_element_shapes !== nothing + desc["identical_element_shapes"] = Base.Bool(identical_element_shapes) + end + if tensor_array_name !== nothing + desc["tensor_array_name"] = Base.String(tensor_array_name) + end + tf.execute(desc) end +end + """ - abs(x) + experimental_assert_next_dataset(input_dataset, transformations) """ -tf.@op function abs(x_; name=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("Abs") - x_ = convert(TensorFlow.Tensor{Any}, x_) - (x_,) = tf.tf_promote(x_) - tf.add_input(desc, x_) - end), name, "Abs") - tf.Tensor(tf.Operation(desc)) +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function experimental_assert_next_dataset(input_dataset_, transformations_; name=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "ExperimentalAssertNextDataset") do + desc = tf.NodeDescription("ExperimentalAssertNextDataset") + input_dataset_ = convert(Tensor{Any}, input_dataset_) + transformations_ = convert(Tensor{String}, transformations_) + tf.add_input(desc, input_dataset_) + tf.add_input(desc, transformations_) + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + tf.Tensor(tf.Operation(desc)) + end + function experimental_assert_next_dataset(input_dataset_::tf.TensorHandle, transformations_::tf.TensorHandle; name=nothing, output_types=nothing, output_shapes=nothing) + desc = tf.EagerOp("ExperimentalAssertNextDataset") + tf.add_input(desc, input_dataset_) + tf.add_input(desc, transformations_) + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + (tf.execute(desc))[1] end +end + """ - cos(x) + in_top_k(predictions, targets) """ -tf.@op function cos(x_; name=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("Cos") - x_ = convert(TensorFlow.Tensor{Any}, x_) - (x_,) = tf.tf_promote(x_) - tf.add_input(desc, x_) - end), name, "Cos") - tf.Tensor(tf.Operation(desc)) +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function in_top_k(predictions_, targets_; name=nothing, k=nothing) + local desc + tf.with_op_name(name, "InTopK") do + desc = tf.NodeDescription("InTopK") + predictions_ = convert(Tensor{Float32}, predictions_) + targets_ = convert(Tensor{Int32}, targets_) + (targets_,) = tf.tf_promote(targets_) + tf.add_input(desc, predictions_) + tf.add_input(desc, targets_) + if k !== nothing + desc["k"] = Base.Int(k) + end + end + tf.Tensor(tf.Operation(desc)) + end + function in_top_k(predictions_::tf.TensorHandle, targets_::tf.TensorHandle; name=nothing, k=nothing) + desc = tf.EagerOp("InTopK") + tf.add_input(desc, predictions_) + tf.add_input(desc, targets_) + if k !== nothing + desc["k"] = Base.Int(k) + end + desc["T"] = tf.data_type(targets_) + (tf.execute(desc))[1] end +end + """ - sin(x) + scatter_sub(ref, indices, updates; use_locking=false) """ -tf.@op function sin(x_; name=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("Sin") - x_ = convert(TensorFlow.Tensor{Any}, x_) - (x_,) = tf.tf_promote(x_) - tf.add_input(desc, x_) - end), name, "Sin") - tf.Tensor(tf.Operation(desc)) +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function scatter_sub(ref_, indices_, updates_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "ScatterSub") do + desc = tf.NodeDescription("ScatterSub") + ref_ = convert(Tensor{Any}, ref_) + indices_ = convert(Tensor{Any}, indices_) + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + updates_ = convert(Tensor{Any}, updates_) + (ref_, updates_) = tf.tf_promote(ref_, updates_) + (indices_,) = tf.tf_promote(indices_) + tf.add_input(desc, ref_) + tf.add_input(desc, indices_) + tf.add_input(desc, updates_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + tf.Tensor(tf.Operation(desc)) + end + function scatter_sub(ref_::tf.TensorHandle, indices_::tf.TensorHandle, updates_::tf.TensorHandle; name=nothing, use_locking=nothing) + desc = tf.EagerOp("ScatterSub") + tf.add_input(desc, ref_) + tf.add_input(desc, indices_) + tf.add_input(desc, updates_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + desc["T"] = tf.data_type(ref_) + desc["Tindices"] = tf.data_type(indices_) + desc["T"] = tf.data_type(updates_) + (tf.execute(desc))[1] end +end + """ - tan(x) + acosh(x) """ -tf.@op function tan(x_; name=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("Tan") - x_ = convert(TensorFlow.Tensor{Any}, x_) - (x_,) = tf.tf_promote(x_) - tf.add_input(desc, x_) - end), name, "Tan") - tf.Tensor(tf.Operation(desc)) +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function acosh(x_; name=nothing) + local desc + tf.with_op_name(name, "Acosh") do + desc = tf.NodeDescription("Acosh") + x_ = convert(Tensor{Any}, x_) + (x_,) = tf.tf_promote(x_) + tf.add_input(desc, x_) + end + tf.Tensor(tf.Operation(desc)) + end + function acosh(x_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("Acosh") + tf.add_input(desc, x_) + desc["T"] = tf.data_type(x_) + (tf.execute(desc))[1] end - -""" - atan(x) +end """ -tf.@op function atan(x_; name=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("Atan") - x_ = convert(TensorFlow.Tensor{Any}, x_) - (x_,) = tf.tf_promote(x_) - tf.add_input(desc, x_) - end), name, "Atan") - tf.Tensor(tf.Operation(desc)) + depthwise_conv2d_native_backprop_filter(input, filter_sizes, out_backprop; data_format=NHWC, dilations=[1, 1, 1, 1]) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function depthwise_conv2d_native_backprop_filter(input_, filter_sizes_, out_backprop_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) + local desc + tf.with_op_name(name, "DepthwiseConv2dNativeBackpropFilter") do + desc = tf.NodeDescription("DepthwiseConv2dNativeBackpropFilter") + input_ = convert(Tensor{Any}, input_) + filter_sizes_ = convert(Tensor{Int32}, filter_sizes_) + out_backprop_ = convert(Tensor{Any}, out_backprop_) + (input_, out_backprop_) = tf.tf_promote(input_, out_backprop_) + tf.add_input(desc, input_) + tf.add_input(desc, filter_sizes_) + tf.add_input(desc, out_backprop_) + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + if padding !== nothing + desc["padding"] = Base.String(padding) + end + if data_format !== nothing + desc["data_format"] = Base.String(data_format) + end + if dilations !== nothing + desc["dilations"] = map(Base.identity, dilations) + end + end + tf.Tensor(tf.Operation(desc)) + end + function depthwise_conv2d_native_backprop_filter(input_::tf.TensorHandle, filter_sizes_::tf.TensorHandle, out_backprop_::tf.TensorHandle; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) + desc = tf.EagerOp("DepthwiseConv2dNativeBackpropFilter") + tf.add_input(desc, input_) + tf.add_input(desc, filter_sizes_) + tf.add_input(desc, out_backprop_) + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + if padding !== nothing + desc["padding"] = Base.String(padding) + end + if data_format !== nothing + desc["data_format"] = Base.String(data_format) + end + if dilations !== nothing + desc["dilations"] = map(Base.identity, dilations) + end + desc["T"] = tf.data_type(input_) + desc["T"] = tf.data_type(out_backprop_) + (tf.execute(desc))[1] end +end + """ - asin(x) + cast(x; Truncate=false) """ -tf.@op function asin(x_; name=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("Asin") - x_ = convert(TensorFlow.Tensor{Any}, x_) - (x_,) = tf.tf_promote(x_) - tf.add_input(desc, x_) - end), name, "Asin") - tf.Tensor(tf.Operation(desc)) +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function cast(x_; name=nothing, SrcT=nothing, DstT=nothing, Truncate=nothing) + local desc + tf.with_op_name(name, "Cast") do + desc = tf.NodeDescription("Cast") + x_ = convert(Tensor{Any}, x_) + (x_,) = tf.tf_promote(x_) + tf.add_input(desc, x_) + if SrcT !== nothing + desc["SrcT"] = Base.identity(SrcT) + end + if DstT !== nothing + desc["DstT"] = Base.identity(DstT) + end + if Truncate !== nothing + desc["Truncate"] = Base.Bool(Truncate) + end + end + tf.Tensor(tf.Operation(desc)) + end + function cast(x_::tf.TensorHandle; name=nothing, SrcT=nothing, DstT=nothing, Truncate=nothing) + desc = tf.EagerOp("Cast") + tf.add_input(desc, x_) + if SrcT !== nothing + desc["SrcT"] = Base.identity(SrcT) + end + if DstT !== nothing + desc["DstT"] = Base.identity(DstT) + end + if Truncate !== nothing + desc["Truncate"] = Base.Bool(Truncate) + end + desc["SrcT"] = tf.data_type(x_) + (tf.execute(desc))[1] end +end + """ - acos(x) + quantize_v2(input, min_range, max_range; mode=MIN_COMBINED, round_mode=HALF_AWAY_FROM_ZERO) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function quantize_v2(input_, min_range_, max_range_; name=nothing, mode=nothing, round_mode=nothing) + local desc + tf.with_op_name(name, "QuantizeV2") do + desc = tf.NodeDescription("QuantizeV2") + input_ = convert(Tensor{Float32}, input_) + min_range_ = convert(Tensor{Float32}, min_range_) + max_range_ = convert(Tensor{Float32}, max_range_) + tf.add_input(desc, input_) + tf.add_input(desc, min_range_) + tf.add_input(desc, max_range_) + if mode !== nothing + desc["mode"] = Base.String(mode) + end + if round_mode !== nothing + desc["round_mode"] = Base.String(round_mode) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function quantize_v2(input_::tf.TensorHandle, min_range_::tf.TensorHandle, max_range_::tf.TensorHandle; name=nothing, mode=nothing, round_mode=nothing) + desc = tf.EagerOp("QuantizeV2") + tf.add_input(desc, input_) + tf.add_input(desc, min_range_) + tf.add_input(desc, max_range_) + if mode !== nothing + desc["mode"] = Base.String(mode) + end + if round_mode !== nothing + desc["round_mode"] = Base.String(round_mode) + end + tf.execute(desc) + end +end """ -tf.@op function acos(x_; name=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("Acos") - x_ = convert(TensorFlow.Tensor{Any}, x_) - (x_,) = tf.tf_promote(x_) - tf.add_input(desc, x_) - end), name, "Acos") - tf.Tensor(tf.Operation(desc)) + generator_dataset(init_func_other_args, next_func_other_args, finalize_func_other_args) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function generator_dataset(init_func_other_args_, next_func_other_args_, finalize_func_other_args_; name=nothing, init_func=nothing, next_func=nothing, finalize_func=nothing, Tinit_func_args=nothing, Tnext_func_args=nothing, Tfinalize_func_args=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "GeneratorDataset") do + desc = tf.NodeDescription("GeneratorDataset") + init_func_other_args_ = [convert(Tensor{Any}, x) for x = init_func_other_args_] + next_func_other_args_ = [convert(Tensor{Any}, x) for x = next_func_other_args_] + finalize_func_other_args_ = [convert(Tensor{Any}, x) for x = finalize_func_other_args_] + tf.add_input(desc, init_func_other_args_) + tf.add_input(desc, next_func_other_args_) + tf.add_input(desc, finalize_func_other_args_) + if init_func !== nothing + desc["init_func"] = Base.identity(init_func) + end + if next_func !== nothing + desc["next_func"] = Base.identity(next_func) + end + if finalize_func !== nothing + desc["finalize_func"] = Base.identity(finalize_func) + end + if Tinit_func_args !== nothing + desc["Tinit_func_args"] = map(Base.identity, Tinit_func_args) + end + if Tnext_func_args !== nothing + desc["Tnext_func_args"] = map(Base.identity, Tnext_func_args) + end + if Tfinalize_func_args !== nothing + desc["Tfinalize_func_args"] = map(Base.identity, Tfinalize_func_args) + end + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + tf.Tensor(tf.Operation(desc)) + end + function generator_dataset(init_func_other_args_::tf.TensorHandle, next_func_other_args_::tf.TensorHandle, finalize_func_other_args_::tf.TensorHandle; name=nothing, init_func=nothing, next_func=nothing, finalize_func=nothing, Tinit_func_args=nothing, Tnext_func_args=nothing, Tfinalize_func_args=nothing, output_types=nothing, output_shapes=nothing) + desc = tf.EagerOp("GeneratorDataset") + tf.add_input(desc, init_func_other_args_) + tf.add_input(desc, next_func_other_args_) + tf.add_input(desc, finalize_func_other_args_) + if init_func !== nothing + desc["init_func"] = Base.identity(init_func) + end + if next_func !== nothing + desc["next_func"] = Base.identity(next_func) + end + if finalize_func !== nothing + desc["finalize_func"] = Base.identity(finalize_func) + end + if Tinit_func_args !== nothing + desc["Tinit_func_args"] = map(Base.identity, Tinit_func_args) + end + if Tnext_func_args !== nothing + desc["Tnext_func_args"] = map(Base.identity, Tnext_func_args) + end + if Tfinalize_func_args !== nothing + desc["Tfinalize_func_args"] = map(Base.identity, Tfinalize_func_args) + end + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + (tf.execute(desc))[1] end +end + """ - tanh(x) + tensor_forest_tree_serialize(tree_handle) """ -tf.@op function tanh(x_; name=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("Tanh") - x_ = convert(TensorFlow.Tensor{Any}, x_) - (x_,) = tf.tf_promote(x_) - tf.add_input(desc, x_) - end), name, "Tanh") - tf.Tensor(tf.Operation(desc)) +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tensor_forest_tree_serialize(tree_handle_; name=nothing) + local desc + tf.with_op_name(name, "TensorForestTreeSerialize") do + desc = tf.NodeDescription("TensorForestTreeSerialize") + tree_handle_ = convert(Tensor{Any}, tree_handle_) + tf.add_input(desc, tree_handle_) + end + tf.Tensor(tf.Operation(desc)) + end + function tensor_forest_tree_serialize(tree_handle_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("TensorForestTreeSerialize") + tf.add_input(desc, tree_handle_) + (tf.execute(desc))[1] end +end + """ - lgamma(x) + next_after(x1, x2) """ -tf.@op function lgamma(x_; name=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("Lgamma") - x_ = convert(TensorFlow.Tensor{Any}, x_) - (x_,) = tf.tf_promote(x_) - tf.add_input(desc, x_) - end), name, "Lgamma") - tf.Tensor(tf.Operation(desc)) +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function next_after(x1_, x2_; name=nothing) + local desc + tf.with_op_name(name, "NextAfter") do + desc = tf.NodeDescription("NextAfter") + x1_ = convert(Tensor{Float32}, x1_) + x2_ = convert(Tensor{Float32}, x2_) + (x1_, x2_) = tf.tf_promote(x1_, x2_) + tf.add_input(desc, x1_) + tf.add_input(desc, x2_) + end + tf.Tensor(tf.Operation(desc)) + end + function next_after(x1_::tf.TensorHandle, x2_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("NextAfter") + tf.add_input(desc, x1_) + tf.add_input(desc, x2_) + desc["T"] = tf.data_type(x1_) + desc["T"] = tf.data_type(x2_) + (tf.execute(desc))[1] end +end + """ - erf(x) + tensor_array_close_v2(handle) """ -tf.@op function erf(x_; name=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("Erf") - x_ = convert(TensorFlow.Tensor{Any}, x_) - (x_,) = tf.tf_promote(x_) - tf.add_input(desc, x_) - end), name, "Erf") - tf.Tensor(tf.Operation(desc)) +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tensor_array_close_v2(handle_; name=nothing) + local desc + tf.with_op_name(name, "TensorArrayCloseV2") do + desc = tf.NodeDescription("TensorArrayCloseV2") + handle_ = convert(Tensor{String}, handle_) + tf.add_input(desc, handle_) + end + tf.Tensor(tf.Operation(desc)) + end + function tensor_array_close_v2(handle_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("TensorArrayCloseV2") + tf.add_input(desc, handle_) + (tf.execute(desc))[1] end +end + """ - erfc(x) + big_query_reader(; container=, shared_name=, test_end_point=) + +A Reader that outputs rows from a BigQuery table as tensorflow Examples. +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function big_query_reader(; name=nothing, container=nothing, shared_name=nothing, project_id=nothing, dataset_id=nothing, table_id=nothing, columns=nothing, timestamp_millis=nothing, test_end_point=nothing) + local desc + tf.with_op_name(name, "BigQueryReader") do + desc = tf.NodeDescription("BigQueryReader") + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + if project_id !== nothing + desc["project_id"] = Base.String(project_id) + end + if dataset_id !== nothing + desc["dataset_id"] = Base.String(dataset_id) + end + if table_id !== nothing + desc["table_id"] = Base.String(table_id) + end + if columns !== nothing + desc["columns"] = map(Base.identity, columns) + end + if timestamp_millis !== nothing + desc["timestamp_millis"] = Base.Int(timestamp_millis) + end + if test_end_point !== nothing + desc["test_end_point"] = Base.String(test_end_point) + end + end + tf.Tensor(tf.Operation(desc)) + end + function big_query_reader(; name=nothing, container=nothing, shared_name=nothing, project_id=nothing, dataset_id=nothing, table_id=nothing, columns=nothing, timestamp_millis=nothing, test_end_point=nothing) + desc = tf.EagerOp("BigQueryReader") + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + if project_id !== nothing + desc["project_id"] = Base.String(project_id) + end + if dataset_id !== nothing + desc["dataset_id"] = Base.String(dataset_id) + end + if table_id !== nothing + desc["table_id"] = Base.String(table_id) + end + if columns !== nothing + desc["columns"] = map(Base.identity, columns) + end + if timestamp_millis !== nothing + desc["timestamp_millis"] = Base.Int(timestamp_millis) + end + if test_end_point !== nothing + desc["test_end_point"] = Base.String(test_end_point) + end + (tf.execute(desc))[1] + end +end """ -tf.@op function erfc(x_; name=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("Erfc") - x_ = convert(TensorFlow.Tensor{Any}, x_) - (x_,) = tf.tf_promote(x_) - tf.add_input(desc, x_) - end), name, "Erfc") - tf.Tensor(tf.Operation(desc)) + reader_read_v2(reader_handle, queue_handle) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function reader_read_v2(reader_handle_, queue_handle_; name=nothing) + local desc + tf.with_op_name(name, "ReaderReadV2") do + desc = tf.NodeDescription("ReaderReadV2") + reader_handle_ = convert(Tensor{Any}, reader_handle_) + queue_handle_ = convert(Tensor{Any}, queue_handle_) + tf.add_input(desc, reader_handle_) + tf.add_input(desc, queue_handle_) + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function reader_read_v2(reader_handle_::tf.TensorHandle, queue_handle_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("ReaderReadV2") + tf.add_input(desc, reader_handle_) + tf.add_input(desc, queue_handle_) + tf.execute(desc) end +end + """ - real(input) + mod(x, y) """ -tf.@op function real(input_; name=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("Real") - input_ = convert(TensorFlow.Tensor{Complex{Float32}}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - end), name, "Real") - tf.Tensor(tf.Operation(desc)) +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function mod(x_, y_; name=nothing) + local desc + tf.with_op_name(name, "Mod") do + desc = tf.NodeDescription("Mod") + x_ = convert(Tensor{Any}, x_) + y_ = convert(Tensor{Any}, y_) + (x_, y_) = tf.tf_promote(x_, y_) + tf.add_input(desc, x_) + tf.add_input(desc, y_) + end + tf.Tensor(tf.Operation(desc)) + end + function mod(x_::tf.TensorHandle, y_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("Mod") + tf.add_input(desc, x_) + tf.add_input(desc, y_) + desc["T"] = tf.data_type(x_) + desc["T"] = tf.data_type(y_) + (tf.execute(desc))[1] end +end + """ - imag(input) + add_v2(x, y) """ -tf.@op function imag(input_; name=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("Imag") - input_ = convert(TensorFlow.Tensor{Complex{Float32}}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - end), name, "Imag") - tf.Tensor(tf.Operation(desc)) +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function add_v2(x_, y_; name=nothing) + local desc + tf.with_op_name(name, "AddV2") do + desc = tf.NodeDescription("AddV2") + x_ = convert(Tensor{Any}, x_) + y_ = convert(Tensor{Any}, y_) + (x_, y_) = tf.tf_promote(x_, y_) + tf.add_input(desc, x_) + tf.add_input(desc, y_) + end + tf.Tensor(tf.Operation(desc)) + end + function add_v2(x_::tf.TensorHandle, y_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("AddV2") + tf.add_input(desc, x_) + tf.add_input(desc, y_) + desc["T"] = tf.data_type(x_) + desc["T"] = tf.data_type(y_) + (tf.execute(desc))[1] end +end + """ - sign(x) + stateless_random_normal(shape, seed; dtype=Float32) """ -tf.@op function sign(x_; name=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("Sign") - x_ = convert(TensorFlow.Tensor{Any}, x_) - (x_,) = tf.tf_promote(x_) - tf.add_input(desc, x_) - end), name, "Sign") - tf.Tensor(tf.Operation(desc)) +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function stateless_random_normal(shape_, seed_; name=nothing, dtype=nothing) + local desc + tf.with_op_name(name, "StatelessRandomNormal") do + desc = tf.NodeDescription("StatelessRandomNormal") + shape_ = convert(Tensor{Int32}, shape_) + seed_ = convert(Tensor{Int64}, seed_) + (shape_,) = tf.tf_promote(shape_) + (seed_,) = tf.tf_promote(seed_) + tf.add_input(desc, shape_) + tf.add_input(desc, seed_) + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + tf.Tensor(tf.Operation(desc)) + end + function stateless_random_normal(shape_::tf.TensorHandle, seed_::tf.TensorHandle; name=nothing, dtype=nothing) + desc = tf.EagerOp("StatelessRandomNormal") + tf.add_input(desc, shape_) + tf.add_input(desc, seed_) + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + desc["T"] = tf.data_type(shape_) + desc["Tseed"] = tf.data_type(seed_) + (tf.execute(desc))[1] end +end + """ - conj(input) + strided_slice_assign(ref, begin, end, strides, value; begin_mask=0, end_mask=0, ellipsis_mask=0, new_axis_mask=0, shrink_axis_mask=0) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function strided_slice_assign(ref_, begin_, end_, strides_, value_; name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing) + local desc + tf.with_op_name(name, "StridedSliceAssign") do + desc = tf.NodeDescription("StridedSliceAssign") + ref_ = convert(Tensor{Any}, ref_) + begin_ = convert(Tensor{Any}, begin_) + begin_ = begin_ - convert(tf.Tensor{eltype(begin_)}, 1) + end_ = convert(Tensor{Any}, end_) + end_ = end_ - convert(tf.Tensor{eltype(end_)}, 1) + strides_ = convert(Tensor{Any}, strides_) + strides_ = strides_ - convert(tf.Tensor{eltype(strides_)}, 1) + value_ = convert(Tensor{Any}, value_) + (ref_, value_) = tf.tf_promote(ref_, value_) + (begin_, end_, strides_) = tf.tf_promote(begin_, end_, strides_) + tf.add_input(desc, ref_) + tf.add_input(desc, begin_) + tf.add_input(desc, end_) + tf.add_input(desc, strides_) + tf.add_input(desc, value_) + if Index !== nothing + desc["Index"] = Base.identity(Index) + end + if begin_mask !== nothing + begin_mask = Base.Int(begin_mask) - 1 + end + if begin_mask !== nothing + desc["begin_mask"] = Base.Int(begin_mask) + end + if end_mask !== nothing + end_mask = Base.Int(end_mask) - 1 + end + if end_mask !== nothing + desc["end_mask"] = Base.Int(end_mask) + end + if ellipsis_mask !== nothing + ellipsis_mask = Base.Int(ellipsis_mask) - 1 + end + if ellipsis_mask !== nothing + desc["ellipsis_mask"] = Base.Int(ellipsis_mask) + end + if new_axis_mask !== nothing + new_axis_mask = Base.Int(new_axis_mask) - 1 + end + if new_axis_mask !== nothing + desc["new_axis_mask"] = Base.Int(new_axis_mask) + end + if shrink_axis_mask !== nothing + shrink_axis_mask = Base.Int(shrink_axis_mask) - 1 + end + if shrink_axis_mask !== nothing + desc["shrink_axis_mask"] = Base.Int(shrink_axis_mask) + end + end + tf.Tensor(tf.Operation(desc)) + end + function strided_slice_assign(ref_::tf.TensorHandle, begin_::tf.TensorHandle, end_::tf.TensorHandle, strides_::tf.TensorHandle, value_::tf.TensorHandle; name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing) + desc = tf.EagerOp("StridedSliceAssign") + tf.add_input(desc, ref_) + tf.add_input(desc, begin_) + tf.add_input(desc, end_) + tf.add_input(desc, strides_) + tf.add_input(desc, value_) + if Index !== nothing + desc["Index"] = Base.identity(Index) + end + if begin_mask !== nothing + begin_mask = Base.Int(begin_mask) - 1 + end + if begin_mask !== nothing + desc["begin_mask"] = Base.Int(begin_mask) + end + if end_mask !== nothing + end_mask = Base.Int(end_mask) - 1 + end + if end_mask !== nothing + desc["end_mask"] = Base.Int(end_mask) + end + if ellipsis_mask !== nothing + ellipsis_mask = Base.Int(ellipsis_mask) - 1 + end + if ellipsis_mask !== nothing + desc["ellipsis_mask"] = Base.Int(ellipsis_mask) + end + if new_axis_mask !== nothing + new_axis_mask = Base.Int(new_axis_mask) - 1 + end + if new_axis_mask !== nothing + desc["new_axis_mask"] = Base.Int(new_axis_mask) + end + if shrink_axis_mask !== nothing + shrink_axis_mask = Base.Int(shrink_axis_mask) - 1 + end + if shrink_axis_mask !== nothing + desc["shrink_axis_mask"] = Base.Int(shrink_axis_mask) + end + desc["T"] = tf.data_type(ref_) + desc["Index"] = tf.data_type(begin_) + desc["Index"] = tf.data_type(end_) + desc["Index"] = tf.data_type(strides_) + desc["T"] = tf.data_type(value_) + (tf.execute(desc))[1] + end +end """ -tf.@op function conj(input_; name=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("Conj") - input_ = convert(TensorFlow.Tensor{Complex{Float32}}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - end), name, "Conj") - tf.Tensor(tf.Operation(desc)) - end + scatter_min(ref, indices, updates; use_locking=false) + """ - round(x) +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function scatter_min(ref_, indices_, updates_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "ScatterMin") do + desc = tf.NodeDescription("ScatterMin") + ref_ = convert(Tensor{Any}, ref_) + indices_ = convert(Tensor{Any}, indices_) + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + updates_ = convert(Tensor{Any}, updates_) + (ref_, updates_) = tf.tf_promote(ref_, updates_) + (indices_,) = tf.tf_promote(indices_) + tf.add_input(desc, ref_) + tf.add_input(desc, indices_) + tf.add_input(desc, updates_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + tf.Tensor(tf.Operation(desc)) + end + function scatter_min(ref_::tf.TensorHandle, indices_::tf.TensorHandle, updates_::tf.TensorHandle; name=nothing, use_locking=nothing) + desc = tf.EagerOp("ScatterMin") + tf.add_input(desc, ref_) + tf.add_input(desc, indices_) + tf.add_input(desc, updates_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + desc["T"] = tf.data_type(ref_) + desc["Tindices"] = tf.data_type(indices_) + desc["T"] = tf.data_type(updates_) + (tf.execute(desc))[1] + end +end """ -tf.@op function round(x_; name=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("Round") - x_ = convert(TensorFlow.Tensor{Any}, x_) - (x_,) = tf.tf_promote(x_) - tf.add_input(desc, x_) - end), name, "Round") - tf.Tensor(tf.Operation(desc)) + resource_strided_slice_assign(ref, begin, end, strides, value; begin_mask=0, end_mask=0, ellipsis_mask=0, new_axis_mask=0, shrink_axis_mask=0) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function resource_strided_slice_assign(ref_, begin_, end_, strides_, value_; name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing) + local desc + tf.with_op_name(name, "ResourceStridedSliceAssign") do + desc = tf.NodeDescription("ResourceStridedSliceAssign") + ref_ = convert(Tensor{Any}, ref_) + begin_ = convert(Tensor{Any}, begin_) + begin_ = begin_ - convert(tf.Tensor{eltype(begin_)}, 1) + end_ = convert(Tensor{Any}, end_) + end_ = end_ - convert(tf.Tensor{eltype(end_)}, 1) + strides_ = convert(Tensor{Any}, strides_) + strides_ = strides_ - convert(tf.Tensor{eltype(strides_)}, 1) + value_ = convert(Tensor{Any}, value_) + (value_,) = tf.tf_promote(value_) + (begin_, end_, strides_) = tf.tf_promote(begin_, end_, strides_) + tf.add_input(desc, ref_) + tf.add_input(desc, begin_) + tf.add_input(desc, end_) + tf.add_input(desc, strides_) + tf.add_input(desc, value_) + if Index !== nothing + desc["Index"] = Base.identity(Index) + end + if begin_mask !== nothing + begin_mask = Base.Int(begin_mask) - 1 + end + if begin_mask !== nothing + desc["begin_mask"] = Base.Int(begin_mask) + end + if end_mask !== nothing + end_mask = Base.Int(end_mask) - 1 + end + if end_mask !== nothing + desc["end_mask"] = Base.Int(end_mask) + end + if ellipsis_mask !== nothing + ellipsis_mask = Base.Int(ellipsis_mask) - 1 + end + if ellipsis_mask !== nothing + desc["ellipsis_mask"] = Base.Int(ellipsis_mask) + end + if new_axis_mask !== nothing + new_axis_mask = Base.Int(new_axis_mask) - 1 + end + if new_axis_mask !== nothing + desc["new_axis_mask"] = Base.Int(new_axis_mask) + end + if shrink_axis_mask !== nothing + shrink_axis_mask = Base.Int(shrink_axis_mask) - 1 + end + if shrink_axis_mask !== nothing + desc["shrink_axis_mask"] = Base.Int(shrink_axis_mask) + end + end + tf.Tensor(tf.Operation(desc)) + end + function resource_strided_slice_assign(ref_::tf.TensorHandle, begin_::tf.TensorHandle, end_::tf.TensorHandle, strides_::tf.TensorHandle, value_::tf.TensorHandle; name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing) + desc = tf.EagerOp("ResourceStridedSliceAssign") + tf.add_input(desc, ref_) + tf.add_input(desc, begin_) + tf.add_input(desc, end_) + tf.add_input(desc, strides_) + tf.add_input(desc, value_) + if Index !== nothing + desc["Index"] = Base.identity(Index) + end + if begin_mask !== nothing + begin_mask = Base.Int(begin_mask) - 1 + end + if begin_mask !== nothing + desc["begin_mask"] = Base.Int(begin_mask) + end + if end_mask !== nothing + end_mask = Base.Int(end_mask) - 1 + end + if end_mask !== nothing + desc["end_mask"] = Base.Int(end_mask) + end + if ellipsis_mask !== nothing + ellipsis_mask = Base.Int(ellipsis_mask) - 1 + end + if ellipsis_mask !== nothing + desc["ellipsis_mask"] = Base.Int(ellipsis_mask) + end + if new_axis_mask !== nothing + new_axis_mask = Base.Int(new_axis_mask) - 1 + end + if new_axis_mask !== nothing + desc["new_axis_mask"] = Base.Int(new_axis_mask) + end + if shrink_axis_mask !== nothing + shrink_axis_mask = Base.Int(shrink_axis_mask) - 1 + end + if shrink_axis_mask !== nothing + desc["shrink_axis_mask"] = Base.Int(shrink_axis_mask) + end + desc["Index"] = tf.data_type(begin_) + desc["Index"] = tf.data_type(end_) + desc["Index"] = tf.data_type(strides_) + desc["T"] = tf.data_type(value_) + (tf.execute(desc))[1] end +end + """ - polygamma(a, x) + random_gamma_grad(alpha, sample) """ -tf.@op function polygamma(a_, x_; name=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("Polygamma") - a_ = convert(TensorFlow.Tensor{Any}, a_) - x_ = convert(TensorFlow.Tensor{Any}, x_) - (a_, x_) = tf.tf_promote(a_, x_) - tf.add_input(desc, a_) - tf.add_input(desc, x_) - end), name, "Polygamma") - tf.Tensor(tf.Operation(desc)) +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function random_gamma_grad(alpha_, sample_; name=nothing) + local desc + tf.with_op_name(name, "RandomGammaGrad") do + desc = tf.NodeDescription("RandomGammaGrad") + alpha_ = convert(Tensor{Any}, alpha_) + sample_ = convert(Tensor{Any}, sample_) + (alpha_, sample_) = tf.tf_promote(alpha_, sample_) + tf.add_input(desc, alpha_) + tf.add_input(desc, sample_) + end + tf.Tensor(tf.Operation(desc)) + end + function random_gamma_grad(alpha_::tf.TensorHandle, sample_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("RandomGammaGrad") + tf.add_input(desc, alpha_) + tf.add_input(desc, sample_) + desc["T"] = tf.data_type(alpha_) + desc["T"] = tf.data_type(sample_) + (tf.execute(desc))[1] end +end + """ - zeta(x, q) + resource_sparse_apply_keras_momentum(var, accum, lr, grad, indices, momentum; use_locking=false, use_nesterov=false) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function resource_sparse_apply_keras_momentum(var_, accum_, lr_, grad_, indices_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) + local desc + tf.with_op_name(name, "ResourceSparseApplyKerasMomentum") do + desc = tf.NodeDescription("ResourceSparseApplyKerasMomentum") + var_ = convert(Tensor{Any}, var_) + accum_ = convert(Tensor{Any}, accum_) + lr_ = convert(Tensor{Any}, lr_) + grad_ = convert(Tensor{Any}, grad_) + indices_ = convert(Tensor{Any}, indices_) + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + momentum_ = convert(Tensor{Any}, momentum_) + (lr_, grad_, momentum_) = tf.tf_promote(lr_, grad_, momentum_) + (indices_,) = tf.tf_promote(indices_) + tf.add_input(desc, var_) + tf.add_input(desc, accum_) + tf.add_input(desc, lr_) + tf.add_input(desc, grad_) + tf.add_input(desc, indices_) + tf.add_input(desc, momentum_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + if use_nesterov !== nothing + desc["use_nesterov"] = Base.Bool(use_nesterov) + end + end + tf.Tensor(tf.Operation(desc)) + end + function resource_sparse_apply_keras_momentum(var_::tf.TensorHandle, accum_::tf.TensorHandle, lr_::tf.TensorHandle, grad_::tf.TensorHandle, indices_::tf.TensorHandle, momentum_::tf.TensorHandle; name=nothing, use_locking=nothing, use_nesterov=nothing) + desc = tf.EagerOp("ResourceSparseApplyKerasMomentum") + tf.add_input(desc, var_) + tf.add_input(desc, accum_) + tf.add_input(desc, lr_) + tf.add_input(desc, grad_) + tf.add_input(desc, indices_) + tf.add_input(desc, momentum_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + if use_nesterov !== nothing + desc["use_nesterov"] = Base.Bool(use_nesterov) + end + desc["T"] = tf.data_type(lr_) + desc["T"] = tf.data_type(grad_) + desc["Tindices"] = tf.data_type(indices_) + desc["T"] = tf.data_type(momentum_) + (tf.execute(desc))[1] + end +end """ -tf.@op function zeta(x_, q_; name=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("Zeta") - x_ = convert(TensorFlow.Tensor{Any}, x_) - q_ = convert(TensorFlow.Tensor{Any}, q_) - (x_, q_) = tf.tf_promote(x_, q_) - tf.add_input(desc, x_) - tf.add_input(desc, q_) - end), name, "Zeta") - tf.Tensor(tf.Operation(desc)) - end + boosted_trees_create_quantile_stream_resource(quantile_stream_resource_handle, epsilon, num_streams; max_elements=1099511627776) + """ - matrix_inverse(input; adjoint=false) +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function boosted_trees_create_quantile_stream_resource(quantile_stream_resource_handle_, epsilon_, num_streams_; name=nothing, max_elements=nothing) + local desc + tf.with_op_name(name, "BoostedTreesCreateQuantileStreamResource") do + desc = tf.NodeDescription("BoostedTreesCreateQuantileStreamResource") + quantile_stream_resource_handle_ = convert(Tensor{Any}, quantile_stream_resource_handle_) + epsilon_ = convert(Tensor{Float32}, epsilon_) + num_streams_ = convert(Tensor{Int64}, num_streams_) + tf.add_input(desc, quantile_stream_resource_handle_) + tf.add_input(desc, epsilon_) + tf.add_input(desc, num_streams_) + if max_elements !== nothing + desc["max_elements"] = Base.Int(max_elements) + end + end + tf.Tensor(tf.Operation(desc)) + end + function boosted_trees_create_quantile_stream_resource(quantile_stream_resource_handle_::tf.TensorHandle, epsilon_::tf.TensorHandle, num_streams_::tf.TensorHandle; name=nothing, max_elements=nothing) + desc = tf.EagerOp("BoostedTreesCreateQuantileStreamResource") + tf.add_input(desc, quantile_stream_resource_handle_) + tf.add_input(desc, epsilon_) + tf.add_input(desc, num_streams_) + if max_elements !== nothing + desc["max_elements"] = Base.Int(max_elements) + end + (tf.execute(desc))[1] + end +end """ -tf.@op function matrix_inverse(input_; name=nothing, adjoint=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("MatrixInverse") - input_ = convert(TensorFlow.Tensor{Any}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - if adjoint !== nothing - desc["adjoint"] = Base.Bool(adjoint) - end - end), name, "MatrixInverse") - tf.Tensor(tf.Operation(desc)) + quantized_relu6(features, min_features, max_features; out_type=Float32) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function quantized_relu6(features_, min_features_, max_features_; name=nothing, out_type=nothing) + local desc + tf.with_op_name(name, "QuantizedRelu6") do + desc = tf.NodeDescription("QuantizedRelu6") + features_ = convert(Tensor{Any}, features_) + min_features_ = convert(Tensor{Float32}, min_features_) + max_features_ = convert(Tensor{Float32}, max_features_) + (features_,) = tf.tf_promote(features_) + tf.add_input(desc, features_) + tf.add_input(desc, min_features_) + tf.add_input(desc, max_features_) + if out_type !== nothing + desc["out_type"] = Base.identity(out_type) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function quantized_relu6(features_::tf.TensorHandle, min_features_::tf.TensorHandle, max_features_::tf.TensorHandle; name=nothing, out_type=nothing) + desc = tf.EagerOp("QuantizedRelu6") + tf.add_input(desc, features_) + tf.add_input(desc, min_features_) + tf.add_input(desc, max_features_) + if out_type !== nothing + desc["out_type"] = Base.identity(out_type) + end + desc["Tinput"] = tf.data_type(features_) + tf.execute(desc) end +end + """ - matrix_determinant(input) + sparse_sparse_maximum(a_indices, a_values, a_shape, b_indices, b_values, b_shape) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function sparse_sparse_maximum(a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_; name=nothing) + local desc + tf.with_op_name(name, "SparseSparseMaximum") do + desc = tf.NodeDescription("SparseSparseMaximum") + a_indices_ = convert(Tensor{Int64}, a_indices_) + a_values_ = convert(Tensor{Any}, a_values_) + a_shape_ = convert(Tensor{Int64}, a_shape_) + b_indices_ = convert(Tensor{Int64}, b_indices_) + b_values_ = convert(Tensor{Any}, b_values_) + b_shape_ = convert(Tensor{Int64}, b_shape_) + (a_values_, b_values_) = tf.tf_promote(a_values_, b_values_) + tf.add_input(desc, a_indices_) + tf.add_input(desc, a_values_) + tf.add_input(desc, a_shape_) + tf.add_input(desc, b_indices_) + tf.add_input(desc, b_values_) + tf.add_input(desc, b_shape_) + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function sparse_sparse_maximum(a_indices_::tf.TensorHandle, a_values_::tf.TensorHandle, a_shape_::tf.TensorHandle, b_indices_::tf.TensorHandle, b_values_::tf.TensorHandle, b_shape_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("SparseSparseMaximum") + tf.add_input(desc, a_indices_) + tf.add_input(desc, a_values_) + tf.add_input(desc, a_shape_) + tf.add_input(desc, b_indices_) + tf.add_input(desc, b_values_) + tf.add_input(desc, b_shape_) + desc["T"] = tf.data_type(a_values_) + desc["T"] = tf.data_type(b_values_) + tf.execute(desc) + end +end """ -tf.@op function matrix_determinant(input_; name=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("MatrixDeterminant") - input_ = convert(TensorFlow.Tensor{Any}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - end), name, "MatrixDeterminant") - tf.Tensor(tf.Operation(desc)) + batch_norm_with_global_normalization(t, m, v, beta, gamma) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function batch_norm_with_global_normalization(t_, m_, v_, beta_, gamma_; name=nothing, variance_epsilon=nothing, scale_after_normalization=nothing) + local desc + tf.with_op_name(name, "BatchNormWithGlobalNormalization") do + desc = tf.NodeDescription("BatchNormWithGlobalNormalization") + t_ = convert(Tensor{Any}, t_) + m_ = convert(Tensor{Any}, m_) + v_ = convert(Tensor{Any}, v_) + beta_ = convert(Tensor{Any}, beta_) + gamma_ = convert(Tensor{Any}, gamma_) + (t_, m_, v_, beta_, gamma_) = tf.tf_promote(t_, m_, v_, beta_, gamma_) + tf.add_input(desc, t_) + tf.add_input(desc, m_) + tf.add_input(desc, v_) + tf.add_input(desc, beta_) + tf.add_input(desc, gamma_) + if variance_epsilon !== nothing + desc["variance_epsilon"] = Base.identity(variance_epsilon) + end + if scale_after_normalization !== nothing + desc["scale_after_normalization"] = Base.Bool(scale_after_normalization) + end + end + tf.Tensor(tf.Operation(desc)) + end + function batch_norm_with_global_normalization(t_::tf.TensorHandle, m_::tf.TensorHandle, v_::tf.TensorHandle, beta_::tf.TensorHandle, gamma_::tf.TensorHandle; name=nothing, variance_epsilon=nothing, scale_after_normalization=nothing) + desc = tf.EagerOp("BatchNormWithGlobalNormalization") + tf.add_input(desc, t_) + tf.add_input(desc, m_) + tf.add_input(desc, v_) + tf.add_input(desc, beta_) + tf.add_input(desc, gamma_) + if variance_epsilon !== nothing + desc["variance_epsilon"] = Base.identity(variance_epsilon) + end + if scale_after_normalization !== nothing + desc["scale_after_normalization"] = Base.Bool(scale_after_normalization) + end + desc["T"] = tf.data_type(t_) + desc["T"] = tf.data_type(m_) + desc["T"] = tf.data_type(v_) + desc["T"] = tf.data_type(beta_) + desc["T"] = tf.data_type(gamma_) + (tf.execute(desc))[1] end +end + """ - diag(diagonal) + in_top_kv2(predictions, targets, k) """ -tf.@op function diag(diagonal_; name=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("Diag") - diagonal_ = convert(TensorFlow.Tensor{Any}, diagonal_) - (diagonal_,) = tf.tf_promote(diagonal_) - tf.add_input(desc, diagonal_) - end), name, "Diag") - tf.Tensor(tf.Operation(desc)) +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function in_top_kv2(predictions_, targets_, k_; name=nothing) + local desc + tf.with_op_name(name, "InTopKV2") do + desc = tf.NodeDescription("InTopKV2") + predictions_ = convert(Tensor{Float32}, predictions_) + targets_ = convert(Tensor{Int32}, targets_) + k_ = convert(Tensor{Int32}, k_) + (targets_, k_) = tf.tf_promote(targets_, k_) + tf.add_input(desc, predictions_) + tf.add_input(desc, targets_) + tf.add_input(desc, k_) + end + tf.Tensor(tf.Operation(desc)) + end + function in_top_kv2(predictions_::tf.TensorHandle, targets_::tf.TensorHandle, k_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("InTopKV2") + tf.add_input(desc, predictions_) + tf.add_input(desc, targets_) + tf.add_input(desc, k_) + desc["T"] = tf.data_type(targets_) + desc["T"] = tf.data_type(k_) + (tf.execute(desc))[1] end +end + """ - matrix_diag_part(input) + cholesky(input) """ -tf.@op function matrix_diag_part(input_; name=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("MatrixDiagPart") - input_ = convert(TensorFlow.Tensor{Any}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - end), name, "MatrixDiagPart") - tf.Tensor(tf.Operation(desc)) +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function cholesky(input_; name=nothing) + local desc + tf.with_op_name(name, "Cholesky") do + desc = tf.NodeDescription("Cholesky") + input_ = convert(Tensor{Any}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + end + tf.Tensor(tf.Operation(desc)) + end + function cholesky(input_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("Cholesky") + tf.add_input(desc, input_) + desc["T"] = tf.data_type(input_) + (tf.execute(desc))[1] end - -""" - cast(x) +end """ -tf.@op function cast(x_; name=nothing, SrcT=nothing, DstT=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("Cast") - x_ = convert(TensorFlow.Tensor{Any}, x_) - (x_,) = tf.tf_promote(x_) - tf.add_input(desc, x_) - if SrcT !== nothing - desc["SrcT"] = Base.identity(SrcT) - end - if DstT !== nothing - desc["DstT"] = Base.identity(DstT) - end - end), name, "Cast") - tf.Tensor(tf.Operation(desc)) + resource_apply_centered_rms_prop(var, mg, ms, mom, lr, rho, momentum, epsilon, grad; use_locking=false) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function resource_apply_centered_rms_prop(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "ResourceApplyCenteredRMSProp") do + desc = tf.NodeDescription("ResourceApplyCenteredRMSProp") + var_ = convert(Tensor{Any}, var_) + mg_ = convert(Tensor{Any}, mg_) + ms_ = convert(Tensor{Any}, ms_) + mom_ = convert(Tensor{Any}, mom_) + lr_ = convert(Tensor{Any}, lr_) + rho_ = convert(Tensor{Any}, rho_) + momentum_ = convert(Tensor{Any}, momentum_) + epsilon_ = convert(Tensor{Any}, epsilon_) + grad_ = convert(Tensor{Any}, grad_) + (lr_, rho_, momentum_, epsilon_, grad_) = tf.tf_promote(lr_, rho_, momentum_, epsilon_, grad_) + tf.add_input(desc, var_) + tf.add_input(desc, mg_) + tf.add_input(desc, ms_) + tf.add_input(desc, mom_) + tf.add_input(desc, lr_) + tf.add_input(desc, rho_) + tf.add_input(desc, momentum_) + tf.add_input(desc, epsilon_) + tf.add_input(desc, grad_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + tf.Tensor(tf.Operation(desc)) + end + function resource_apply_centered_rms_prop(var_::tf.TensorHandle, mg_::tf.TensorHandle, ms_::tf.TensorHandle, mom_::tf.TensorHandle, lr_::tf.TensorHandle, rho_::tf.TensorHandle, momentum_::tf.TensorHandle, epsilon_::tf.TensorHandle, grad_::tf.TensorHandle; name=nothing, use_locking=nothing) + desc = tf.EagerOp("ResourceApplyCenteredRMSProp") + tf.add_input(desc, var_) + tf.add_input(desc, mg_) + tf.add_input(desc, ms_) + tf.add_input(desc, mom_) + tf.add_input(desc, lr_) + tf.add_input(desc, rho_) + tf.add_input(desc, momentum_) + tf.add_input(desc, epsilon_) + tf.add_input(desc, grad_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + desc["T"] = tf.data_type(lr_) + desc["T"] = tf.data_type(rho_) + desc["T"] = tf.data_type(momentum_) + desc["T"] = tf.data_type(epsilon_) + desc["T"] = tf.data_type(grad_) + (tf.execute(desc))[1] end +end + """ - one_hot(indices, depth, on_value, off_value; axis=-1) + resource_apply_adagrad(var, accum, lr, grad; use_locking=false, update_slots=true) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function resource_apply_adagrad(var_, accum_, lr_, grad_; name=nothing, use_locking=nothing, update_slots=nothing) + local desc + tf.with_op_name(name, "ResourceApplyAdagrad") do + desc = tf.NodeDescription("ResourceApplyAdagrad") + var_ = convert(Tensor{Any}, var_) + accum_ = convert(Tensor{Any}, accum_) + lr_ = convert(Tensor{Any}, lr_) + grad_ = convert(Tensor{Any}, grad_) + (lr_, grad_) = tf.tf_promote(lr_, grad_) + tf.add_input(desc, var_) + tf.add_input(desc, accum_) + tf.add_input(desc, lr_) + tf.add_input(desc, grad_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + if update_slots !== nothing + desc["update_slots"] = Base.Bool(update_slots) + end + end + tf.Tensor(tf.Operation(desc)) + end + function resource_apply_adagrad(var_::tf.TensorHandle, accum_::tf.TensorHandle, lr_::tf.TensorHandle, grad_::tf.TensorHandle; name=nothing, use_locking=nothing, update_slots=nothing) + desc = tf.EagerOp("ResourceApplyAdagrad") + tf.add_input(desc, var_) + tf.add_input(desc, accum_) + tf.add_input(desc, lr_) + tf.add_input(desc, grad_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + if update_slots !== nothing + desc["update_slots"] = Base.Bool(update_slots) + end + desc["T"] = tf.data_type(lr_) + desc["T"] = tf.data_type(grad_) + (tf.execute(desc))[1] + end +end """ -tf.@op function one_hot(indices_, depth_, on_value_, off_value_; name=nothing, axis=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("OneHot") - indices_ = convert(TensorFlow.Tensor{Int64}, indices_) - indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) - depth_ = convert(TensorFlow.Tensor{Int32}, depth_) - on_value_ = convert(TensorFlow.Tensor{Any}, on_value_) - off_value_ = convert(TensorFlow.Tensor{Any}, off_value_) - (on_value_, off_value_) = tf.tf_promote(on_value_, off_value_) - (indices_,) = tf.tf_promote(indices_) - tf.add_input(desc, indices_) - tf.add_input(desc, depth_) - tf.add_input(desc, on_value_) - tf.add_input(desc, off_value_) - if axis !== nothing - axis = Base.Int(axis) - 1 - end - if axis !== nothing - desc["axis"] = Base.Int(axis) - end - end), name, "OneHot") - tf.Tensor(tf.Operation(desc)) + experimental_parallel_interleave_dataset(input_dataset, other_arguments, cycle_length, block_length, sloppy, buffer_output_elements, prefetch_input_elements) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function experimental_parallel_interleave_dataset(input_dataset_, other_arguments_, cycle_length_, block_length_, sloppy_, buffer_output_elements_, prefetch_input_elements_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "ExperimentalParallelInterleaveDataset") do + desc = tf.NodeDescription("ExperimentalParallelInterleaveDataset") + input_dataset_ = convert(Tensor{Any}, input_dataset_) + other_arguments_ = [convert(Tensor{Any}, x) for x = other_arguments_] + cycle_length_ = convert(Tensor{Int64}, cycle_length_) + block_length_ = convert(Tensor{Int64}, block_length_) + sloppy_ = convert(Tensor{Bool}, sloppy_) + buffer_output_elements_ = convert(Tensor{Int64}, buffer_output_elements_) + prefetch_input_elements_ = convert(Tensor{Int64}, prefetch_input_elements_) + tf.add_input(desc, input_dataset_) + tf.add_input(desc, other_arguments_) + tf.add_input(desc, cycle_length_) + tf.add_input(desc, block_length_) + tf.add_input(desc, sloppy_) + tf.add_input(desc, buffer_output_elements_) + tf.add_input(desc, prefetch_input_elements_) + if f !== nothing + desc["f"] = Base.identity(f) + end + if Targuments !== nothing + desc["Targuments"] = map(Base.identity, Targuments) + end + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + tf.Tensor(tf.Operation(desc)) + end + function experimental_parallel_interleave_dataset(input_dataset_::tf.TensorHandle, other_arguments_::tf.TensorHandle, cycle_length_::tf.TensorHandle, block_length_::tf.TensorHandle, sloppy_::tf.TensorHandle, buffer_output_elements_::tf.TensorHandle, prefetch_input_elements_::tf.TensorHandle; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) + desc = tf.EagerOp("ExperimentalParallelInterleaveDataset") + tf.add_input(desc, input_dataset_) + tf.add_input(desc, other_arguments_) + tf.add_input(desc, cycle_length_) + tf.add_input(desc, block_length_) + tf.add_input(desc, sloppy_) + tf.add_input(desc, buffer_output_elements_) + tf.add_input(desc, prefetch_input_elements_) + if f !== nothing + desc["f"] = Base.identity(f) + end + if Targuments !== nothing + desc["Targuments"] = map(Base.identity, Targuments) + end + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + (tf.execute(desc))[1] end +end + """ - reshape(tensor, shape) + resize_bicubic_grad(grads, original_image; align_corners=false) """ -tf.@op function reshape(tensor_, shape_; name=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("Reshape") - tensor_ = convert(TensorFlow.Tensor{Any}, tensor_) - shape_ = convert(TensorFlow.Tensor{Int32}, shape_) - (tensor_,) = tf.tf_promote(tensor_) - (shape_,) = tf.tf_promote(shape_) - tf.add_input(desc, tensor_) - tf.add_input(desc, shape_) - end), name, "Reshape") - tf.Tensor(tf.Operation(desc)) +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function resize_bicubic_grad(grads_, original_image_; name=nothing, align_corners=nothing) + local desc + tf.with_op_name(name, "ResizeBicubicGrad") do + desc = tf.NodeDescription("ResizeBicubicGrad") + grads_ = convert(Tensor{Float32}, grads_) + original_image_ = convert(Tensor{Any}, original_image_) + (original_image_,) = tf.tf_promote(original_image_) + tf.add_input(desc, grads_) + tf.add_input(desc, original_image_) + if align_corners !== nothing + desc["align_corners"] = Base.Bool(align_corners) + end + end + tf.Tensor(tf.Operation(desc)) + end + function resize_bicubic_grad(grads_::tf.TensorHandle, original_image_::tf.TensorHandle; name=nothing, align_corners=nothing) + desc = tf.EagerOp("ResizeBicubicGrad") + tf.add_input(desc, grads_) + tf.add_input(desc, original_image_) + if align_corners !== nothing + desc["align_corners"] = Base.Bool(align_corners) + end + desc["T"] = tf.data_type(original_image_) + (tf.execute(desc))[1] end +end + """ - split(split_dim, value) + batch_self_adjoint_eig(input) """ -tf.@op function split(split_dim_, value_; name=nothing, num_split=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("Split") - split_dim_ = convert(TensorFlow.Tensor{Int32}, split_dim_) - split_dim_ = split_dim_ - convert(tf.Tensor{eltype(split_dim_)}, 1) - value_ = convert(TensorFlow.Tensor{Any}, value_) - (value_,) = tf.tf_promote(value_) - tf.add_input(desc, split_dim_) - tf.add_input(desc, value_) - if num_split !== nothing - desc["num_split"] = Base.Int(num_split) - end - end), name, "Split") - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:num_split - push!(out, tf.Tensor(op, out_idx)) +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function batch_self_adjoint_eig(input_; name=nothing) + local desc + tf.with_op_name(name, "BatchSelfAdjointEig") do + desc = tf.NodeDescription("BatchSelfAdjointEig") + input_ = convert(Tensor{Any}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + end + tf.Tensor(tf.Operation(desc)) end - out + function batch_self_adjoint_eig(input_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("BatchSelfAdjointEig") + tf.add_input(desc, input_) + desc["T"] = tf.data_type(input_) + (tf.execute(desc))[1] end +end + """ - div(x, y) + sparse_softmax(sp_indices, sp_values, sp_shape) """ -tf.@op function div(x_, y_; name=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("Div") - x_ = convert(TensorFlow.Tensor{Any}, x_) - y_ = convert(TensorFlow.Tensor{Any}, y_) - (x_, y_) = tf.tf_promote(x_, y_) - tf.add_input(desc, x_) - tf.add_input(desc, y_) - end), name, "Div") - tf.Tensor(tf.Operation(desc)) +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function sparse_softmax(sp_indices_, sp_values_, sp_shape_; name=nothing) + local desc + tf.with_op_name(name, "SparseSoftmax") do + desc = tf.NodeDescription("SparseSoftmax") + sp_indices_ = convert(Tensor{Int64}, sp_indices_) + sp_values_ = convert(Tensor{Any}, sp_values_) + sp_shape_ = convert(Tensor{Int64}, sp_shape_) + (sp_values_,) = tf.tf_promote(sp_values_) + tf.add_input(desc, sp_indices_) + tf.add_input(desc, sp_values_) + tf.add_input(desc, sp_shape_) + end + tf.Tensor(tf.Operation(desc)) + end + function sparse_softmax(sp_indices_::tf.TensorHandle, sp_values_::tf.TensorHandle, sp_shape_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("SparseSoftmax") + tf.add_input(desc, sp_indices_) + tf.add_input(desc, sp_values_) + tf.add_input(desc, sp_shape_) + desc["T"] = tf.data_type(sp_values_) + (tf.execute(desc))[1] end +end + """ - minimum(x, y) + asinh(x) """ -tf.@op function minimum(x_, y_; name=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("Minimum") - x_ = convert(TensorFlow.Tensor{Any}, x_) - y_ = convert(TensorFlow.Tensor{Any}, y_) - (x_, y_) = tf.tf_promote(x_, y_) - tf.add_input(desc, x_) - tf.add_input(desc, y_) - end), name, "Minimum") - tf.Tensor(tf.Operation(desc)) +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function asinh(x_; name=nothing) + local desc + tf.with_op_name(name, "Asinh") do + desc = tf.NodeDescription("Asinh") + x_ = convert(Tensor{Any}, x_) + (x_,) = tf.tf_promote(x_) + tf.add_input(desc, x_) + end + tf.Tensor(tf.Operation(desc)) + end + function asinh(x_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("Asinh") + tf.add_input(desc, x_) + desc["T"] = tf.data_type(x_) + (tf.execute(desc))[1] end - -""" - maximum(x, y) +end """ -tf.@op function maximum(x_, y_; name=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("Maximum") - x_ = convert(TensorFlow.Tensor{Any}, x_) - y_ = convert(TensorFlow.Tensor{Any}, y_) - (x_, y_) = tf.tf_promote(x_, y_) - tf.add_input(desc, x_) - tf.add_input(desc, y_) - end), name, "Maximum") - tf.Tensor(tf.Operation(desc)) + quantized_conv2d_and_relu(input, filter, min_input, max_input, min_filter, max_filter; out_type=Float32, dilations=[1, 1, 1, 1]) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function quantized_conv2d_and_relu(input_, filter_, min_input_, max_input_, min_filter_, max_filter_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) + local desc + tf.with_op_name(name, "QuantizedConv2DAndRelu") do + desc = tf.NodeDescription("QuantizedConv2DAndRelu") + input_ = convert(Tensor{Any}, input_) + filter_ = convert(Tensor{Any}, filter_) + min_input_ = convert(Tensor{Float32}, min_input_) + max_input_ = convert(Tensor{Float32}, max_input_) + min_filter_ = convert(Tensor{Float32}, min_filter_) + max_filter_ = convert(Tensor{Float32}, max_filter_) + (filter_,) = tf.tf_promote(filter_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + tf.add_input(desc, filter_) + tf.add_input(desc, min_input_) + tf.add_input(desc, max_input_) + tf.add_input(desc, min_filter_) + tf.add_input(desc, max_filter_) + if out_type !== nothing + desc["out_type"] = Base.identity(out_type) + end + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + if padding !== nothing + desc["padding"] = Base.String(padding) + end + if dilations !== nothing + desc["dilations"] = map(Base.identity, dilations) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function quantized_conv2d_and_relu(input_::tf.TensorHandle, filter_::tf.TensorHandle, min_input_::tf.TensorHandle, max_input_::tf.TensorHandle, min_filter_::tf.TensorHandle, max_filter_::tf.TensorHandle; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) + desc = tf.EagerOp("QuantizedConv2DAndRelu") + tf.add_input(desc, input_) + tf.add_input(desc, filter_) + tf.add_input(desc, min_input_) + tf.add_input(desc, max_input_) + tf.add_input(desc, min_filter_) + tf.add_input(desc, max_filter_) + if out_type !== nothing + desc["out_type"] = Base.identity(out_type) + end + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + if padding !== nothing + desc["padding"] = Base.String(padding) + end + if dilations !== nothing + desc["dilations"] = map(Base.identity, dilations) + end + desc["Tinput"] = tf.data_type(input_) + desc["Tfilter"] = tf.data_type(filter_) + tf.execute(desc) end +end + """ - select(condition, t, e) + matrix_inverse(input; adjoint=false) """ -tf.@op function select(condition_, t_, e_; name=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("Select") - condition_ = convert(TensorFlow.Tensor{Bool}, condition_) - t_ = convert(TensorFlow.Tensor{Any}, t_) - e_ = convert(TensorFlow.Tensor{Any}, e_) - (t_, e_) = tf.tf_promote(t_, e_) - tf.add_input(desc, condition_) - tf.add_input(desc, t_) - tf.add_input(desc, e_) - end), name, "Select") - tf.Tensor(tf.Operation(desc)) +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function matrix_inverse(input_; name=nothing, adjoint=nothing) + local desc + tf.with_op_name(name, "MatrixInverse") do + desc = tf.NodeDescription("MatrixInverse") + input_ = convert(Tensor{Any}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + if adjoint !== nothing + desc["adjoint"] = Base.Bool(adjoint) + end + end + tf.Tensor(tf.Operation(desc)) + end + function matrix_inverse(input_::tf.TensorHandle; name=nothing, adjoint=nothing) + desc = tf.EagerOp("MatrixInverse") + tf.add_input(desc, input_) + if adjoint !== nothing + desc["adjoint"] = Base.Bool(adjoint) + end + desc["T"] = tf.data_type(input_) + (tf.execute(desc))[1] end +end + """ - switch(data, pred) + tensor_list_concat_lists(input_a, input_b) """ -tf.@op function switch(data_, pred_; name=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("Switch") - data_ = convert(TensorFlow.Tensor{Any}, data_) - pred_ = convert(TensorFlow.Tensor{Bool}, pred_) - (data_,) = tf.tf_promote(data_) - tf.add_input(desc, data_) - tf.add_input(desc, pred_) - end), name, "Switch") - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:2 - push!(out, tf.Tensor(op, out_idx)) +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tensor_list_concat_lists(input_a_, input_b_; name=nothing, element_dtype=nothing) + local desc + tf.with_op_name(name, "TensorListConcatLists") do + desc = tf.NodeDescription("TensorListConcatLists") + input_a_ = convert(Tensor{Any}, input_a_) + input_b_ = convert(Tensor{Any}, input_b_) + tf.add_input(desc, input_a_) + tf.add_input(desc, input_b_) + if element_dtype !== nothing + desc["element_dtype"] = Base.identity(element_dtype) + end + end + tf.Tensor(tf.Operation(desc)) end - out + function tensor_list_concat_lists(input_a_::tf.TensorHandle, input_b_::tf.TensorHandle; name=nothing, element_dtype=nothing) + desc = tf.EagerOp("TensorListConcatLists") + tf.add_input(desc, input_a_) + tf.add_input(desc, input_b_) + if element_dtype !== nothing + desc["element_dtype"] = Base.identity(element_dtype) + end + (tf.execute(desc))[1] end - -""" - identity(input) +end """ -tf.@op function identity(input_; name=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("Identity") - input_ = convert(TensorFlow.Tensor{Any}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - end), name, "Identity") - tf.Tensor(tf.Operation(desc)) + requantize(input, input_min, input_max, requested_output_min, requested_output_max) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function requantize(input_, input_min_, input_max_, requested_output_min_, requested_output_max_; name=nothing, out_type=nothing) + local desc + tf.with_op_name(name, "Requantize") do + desc = tf.NodeDescription("Requantize") + input_ = convert(Tensor{Any}, input_) + input_min_ = convert(Tensor{Float32}, input_min_) + input_max_ = convert(Tensor{Float32}, input_max_) + requested_output_min_ = convert(Tensor{Float32}, requested_output_min_) + requested_output_max_ = convert(Tensor{Float32}, requested_output_max_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + tf.add_input(desc, input_min_) + tf.add_input(desc, input_max_) + tf.add_input(desc, requested_output_min_) + tf.add_input(desc, requested_output_max_) + if out_type !== nothing + desc["out_type"] = Base.identity(out_type) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function requantize(input_::tf.TensorHandle, input_min_::tf.TensorHandle, input_max_::tf.TensorHandle, requested_output_min_::tf.TensorHandle, requested_output_max_::tf.TensorHandle; name=nothing, out_type=nothing) + desc = tf.EagerOp("Requantize") + tf.add_input(desc, input_) + tf.add_input(desc, input_min_) + tf.add_input(desc, input_max_) + tf.add_input(desc, requested_output_min_) + tf.add_input(desc, requested_output_max_) + if out_type !== nothing + desc["out_type"] = Base.identity(out_type) + end + desc["Tinput"] = tf.data_type(input_) + tf.execute(desc) end +end + """ - merge(inputs) + fft(input) """ -tf.@op function merge(inputs_; name=nothing, N=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("Merge") - inputs_ = [convert(TensorFlow.Tensor{Any}, x) for x = inputs_] - (inputs_,) = tf.tf_promote(inputs_) - tf.add_input(desc, inputs_) - if N !== nothing - desc["N"] = Base.Int(N) - end - end), name, "Merge") - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:2 - push!(out, tf.Tensor(op, out_idx)) +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function fft(input_; name=nothing) + local desc + tf.with_op_name(name, "FFT") do + desc = tf.NodeDescription("FFT") + input_ = convert(Tensor{Complex{Float32}}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + end + tf.Tensor(tf.Operation(desc)) end - out + function fft(input_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("FFT") + tf.add_input(desc, input_) + desc["Tcomplex"] = tf.data_type(input_) + (tf.execute(desc))[1] end +end + """ - enter(data; is_constant=false, parallel_iterations=10) + conjugate_transpose(x, perm) """ -tf.@op function enter(data_; name=nothing, frame_name=nothing, is_constant=nothing, parallel_iterations=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("Enter") - data_ = convert(TensorFlow.Tensor{Any}, data_) - (data_,) = tf.tf_promote(data_) - tf.add_input(desc, data_) - if frame_name !== nothing - desc["frame_name"] = Base.String(frame_name) - end - if is_constant !== nothing - desc["is_constant"] = Base.Bool(is_constant) - end - if parallel_iterations !== nothing - desc["parallel_iterations"] = Base.Int(parallel_iterations) - end - end), name, "Enter") - tf.Tensor(tf.Operation(desc)) +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function conjugate_transpose(x_, perm_; name=nothing) + local desc + tf.with_op_name(name, "ConjugateTranspose") do + desc = tf.NodeDescription("ConjugateTranspose") + x_ = convert(Tensor{Any}, x_) + perm_ = convert(Tensor{Int32}, perm_) + (perm_,) = tf.tf_promote(perm_) + (x_,) = tf.tf_promote(x_) + tf.add_input(desc, x_) + tf.add_input(desc, perm_) + end + tf.Tensor(tf.Operation(desc)) + end + function conjugate_transpose(x_::tf.TensorHandle, perm_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("ConjugateTranspose") + tf.add_input(desc, x_) + tf.add_input(desc, perm_) + desc["T"] = tf.data_type(x_) + desc["Tperm"] = tf.data_type(perm_) + (tf.execute(desc))[1] end - -""" - loop_cond(input) +end """ -tf.@op function loop_cond(input_; name=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("LoopCond") - input_ = convert(TensorFlow.Tensor{Bool}, input_) - tf.add_input(desc, input_) - end), name, "LoopCond") - tf.Tensor(tf.Operation(desc)) + unstage(; capacity=0, memory_limit=0, container=, shared_name=) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function unstage(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "Unstage") do + desc = tf.NodeDescription("Unstage") + if capacity !== nothing + desc["capacity"] = Base.Int(capacity) + end + if memory_limit !== nothing + desc["memory_limit"] = Base.Int(memory_limit) + end + if dtypes !== nothing + desc["dtypes"] = map(Base.identity, dtypes) + end + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + tf.Tensor(tf.Operation(desc)) + end + function unstage(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + desc = tf.EagerOp("Unstage") + if capacity !== nothing + desc["capacity"] = Base.Int(capacity) + end + if memory_limit !== nothing + desc["memory_limit"] = Base.Int(memory_limit) + end + if dtypes !== nothing + desc["dtypes"] = map(Base.identity, dtypes) + end + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + (tf.execute(desc))[1] end +end + """ - exit(data) + relu6grad(gradients, features) """ -tf.@op function exit(data_; name=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("Exit") - data_ = convert(TensorFlow.Tensor{Any}, data_) - (data_,) = tf.tf_promote(data_) - tf.add_input(desc, data_) - end), name, "Exit") - tf.Tensor(tf.Operation(desc)) +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function relu6grad(gradients_, features_; name=nothing) + local desc + tf.with_op_name(name, "Relu6Grad") do + desc = tf.NodeDescription("Relu6Grad") + gradients_ = convert(Tensor{Any}, gradients_) + features_ = convert(Tensor{Any}, features_) + (gradients_, features_) = tf.tf_promote(gradients_, features_) + tf.add_input(desc, gradients_) + tf.add_input(desc, features_) + end + tf.Tensor(tf.Operation(desc)) + end + function relu6grad(gradients_::tf.TensorHandle, features_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("Relu6Grad") + tf.add_input(desc, gradients_) + tf.add_input(desc, features_) + desc["T"] = tf.data_type(gradients_) + desc["T"] = tf.data_type(features_) + (tf.execute(desc))[1] end +end + """ - next_iteration(data) + scale_and_translate_grad(grads, original_image, scale, translation; kernel_type=lanczos3) """ -tf.@op function next_iteration(data_; name=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("NextIteration") - data_ = convert(TensorFlow.Tensor{Any}, data_) - (data_,) = tf.tf_promote(data_) - tf.add_input(desc, data_) - end), name, "NextIteration") - tf.Tensor(tf.Operation(desc)) +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function scale_and_translate_grad(grads_, original_image_, scale_, translation_; name=nothing, kernel_type=nothing) + local desc + tf.with_op_name(name, "ScaleAndTranslateGrad") do + desc = tf.NodeDescription("ScaleAndTranslateGrad") + grads_ = convert(Tensor{Any}, grads_) + original_image_ = convert(Tensor{Any}, original_image_) + scale_ = convert(Tensor{Float32}, scale_) + translation_ = convert(Tensor{Float32}, translation_) + (grads_, original_image_) = tf.tf_promote(grads_, original_image_) + tf.add_input(desc, grads_) + tf.add_input(desc, original_image_) + tf.add_input(desc, scale_) + tf.add_input(desc, translation_) + if kernel_type !== nothing + desc["kernel_type"] = Base.String(kernel_type) + end + end + tf.Tensor(tf.Operation(desc)) + end + function scale_and_translate_grad(grads_::tf.TensorHandle, original_image_::tf.TensorHandle, scale_::tf.TensorHandle, translation_::tf.TensorHandle; name=nothing, kernel_type=nothing) + desc = tf.EagerOp("ScaleAndTranslateGrad") + tf.add_input(desc, grads_) + tf.add_input(desc, original_image_) + tf.add_input(desc, scale_) + tf.add_input(desc, translation_) + if kernel_type !== nothing + desc["kernel_type"] = Base.String(kernel_type) + end + desc["T"] = tf.data_type(grads_) + desc["T"] = tf.data_type(original_image_) + (tf.execute(desc))[1] end - -""" - complex(real, imag) +end """ -tf.@op function complex(real_, imag_; name=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("Complex") - real_ = convert(TensorFlow.Tensor{Float32}, real_) - imag_ = convert(TensorFlow.Tensor{Float32}, imag_) - (real_, imag_) = tf.tf_promote(real_, imag_) - tf.add_input(desc, real_) - tf.add_input(desc, imag_) - end), name, "Complex") - tf.Tensor(tf.Operation(desc)) - end + _array_to_list(input) +Converts an array of tensors to a list of tensors. """ - print(input, data; message=, first_n=-1, summarize=3) +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function _array_to_list(input_; name=nothing, N=nothing, out_types=nothing) + local desc + tf.with_op_name(name, "_ArrayToList") do + desc = tf.NodeDescription("_ArrayToList") + input_ = [convert(Tensor{Any}, x) for x = input_] + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + if N !== nothing + desc["N"] = Base.Int(N) + end + if out_types !== nothing + desc["out_types"] = map(Base.identity, out_types) + end + end + tf.Tensor(tf.Operation(desc)) + end + function _array_to_list(input_::tf.TensorHandle; name=nothing, N=nothing, out_types=nothing) + desc = tf.EagerOp("_ArrayToList") + tf.add_input(desc, input_) + if N !== nothing + desc["N"] = Base.Int(N) + end + if out_types !== nothing + desc["out_types"] = map(Base.identity, out_types) + end + desc["T"] = tf.data_type(input_) + (tf.execute(desc))[1] + end +end """ -tf.@op function print(input_, data_; name=nothing, U=nothing, message=nothing, first_n=nothing, summarize=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("Print") - input_ = convert(TensorFlow.Tensor{Any}, input_) - data_ = [convert(TensorFlow.Tensor{Any}, x) for x = data_] - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - tf.add_input(desc, data_) - if U !== nothing - desc["U"] = map(Base.identity, U) - end - if message !== nothing - desc["message"] = Base.String(message) - end - if first_n !== nothing - desc["first_n"] = Base.Int(first_n) - end - if summarize !== nothing - desc["summarize"] = Base.Int(summarize) - end - end), name, "Print") - tf.Tensor(tf.Operation(desc)) + cudnn_rnnv3(input, input_h, input_c, params, sequence_lengths; rnn_mode=lstm, input_mode=linear_input, direction=unidirectional, dropout=?, seed=0, seed2=0, is_training=true) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function cudnn_rnnv3(input_, input_h_, input_c_, params_, sequence_lengths_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing, is_training=nothing) + local desc + tf.with_op_name(name, "CudnnRNNV3") do + desc = tf.NodeDescription("CudnnRNNV3") + input_ = convert(Tensor{Any}, input_) + input_h_ = convert(Tensor{Any}, input_h_) + input_c_ = convert(Tensor{Any}, input_c_) + params_ = convert(Tensor{Any}, params_) + sequence_lengths_ = convert(Tensor{Int32}, sequence_lengths_) + (input_, input_h_, input_c_, params_) = tf.tf_promote(input_, input_h_, input_c_, params_) + tf.add_input(desc, input_) + tf.add_input(desc, input_h_) + tf.add_input(desc, input_c_) + tf.add_input(desc, params_) + tf.add_input(desc, sequence_lengths_) + if rnn_mode !== nothing + desc["rnn_mode"] = Base.String(rnn_mode) + end + if input_mode !== nothing + desc["input_mode"] = Base.String(input_mode) + end + if direction !== nothing + desc["direction"] = Base.String(direction) + end + if dropout !== nothing + desc["dropout"] = Base.identity(dropout) + end + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end + if is_training !== nothing + desc["is_training"] = Base.Bool(is_training) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:5 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function cudnn_rnnv3(input_::tf.TensorHandle, input_h_::tf.TensorHandle, input_c_::tf.TensorHandle, params_::tf.TensorHandle, sequence_lengths_::tf.TensorHandle; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing, is_training=nothing) + desc = tf.EagerOp("CudnnRNNV3") + tf.add_input(desc, input_) + tf.add_input(desc, input_h_) + tf.add_input(desc, input_c_) + tf.add_input(desc, params_) + tf.add_input(desc, sequence_lengths_) + if rnn_mode !== nothing + desc["rnn_mode"] = Base.String(rnn_mode) + end + if input_mode !== nothing + desc["input_mode"] = Base.String(input_mode) + end + if direction !== nothing + desc["direction"] = Base.String(direction) + end + if dropout !== nothing + desc["dropout"] = Base.identity(dropout) + end + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end + if is_training !== nothing + desc["is_training"] = Base.Bool(is_training) + end + desc["T"] = tf.data_type(input_) + desc["T"] = tf.data_type(input_h_) + desc["T"] = tf.data_type(input_c_) + desc["T"] = tf.data_type(params_) + tf.execute(desc) end +end + """ - reverse_v2(tensor, axis) + expand_dims(input, dim) """ -tf.@op function reverse_v2(tensor_, axis_; name=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("ReverseV2") - tensor_ = convert(TensorFlow.Tensor{Any}, tensor_) - axis_ = convert(TensorFlow.Tensor{Int32}, axis_) - axis_ = axis_ - convert(tf.Tensor{eltype(axis_)}, 1) - (tensor_,) = tf.tf_promote(tensor_) - (axis_,) = tf.tf_promote(axis_) - tf.add_input(desc, tensor_) - tf.add_input(desc, axis_) - end), name, "ReverseV2") - tf.Tensor(tf.Operation(desc)) +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function expand_dims(input_, dim_; name=nothing) + local desc + tf.with_op_name(name, "ExpandDims") do + desc = tf.NodeDescription("ExpandDims") + input_ = convert(Tensor{Any}, input_) + dim_ = convert(Tensor{Int32}, dim_) + dim_ = dim_ - convert(tf.Tensor{eltype(dim_)}, 1) + (input_,) = tf.tf_promote(input_) + (dim_,) = tf.tf_promote(dim_) + tf.add_input(desc, input_) + tf.add_input(desc, dim_) + end + tf.Tensor(tf.Operation(desc)) + end + function expand_dims(input_::tf.TensorHandle, dim_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("ExpandDims") + tf.add_input(desc, input_) + tf.add_input(desc, dim_) + desc["T"] = tf.data_type(input_) + desc["Tdim"] = tf.data_type(dim_) + (tf.execute(desc))[1] end +end + """ - size(input; out_type=Int32) + inv_grad(y, dy) """ -tf.@op function size(input_; name=nothing, out_type=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("Size") - input_ = convert(TensorFlow.Tensor{Any}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - if out_type !== nothing - desc["out_type"] = Base.identity(out_type) - end - end), name, "Size") - tf.Tensor(tf.Operation(desc)) +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function inv_grad(y_, dy_; name=nothing) + local desc + tf.with_op_name(name, "InvGrad") do + desc = tf.NodeDescription("InvGrad") + y_ = convert(Tensor{Any}, y_) + dy_ = convert(Tensor{Any}, dy_) + (y_, dy_) = tf.tf_promote(y_, dy_) + tf.add_input(desc, y_) + tf.add_input(desc, dy_) + end + tf.Tensor(tf.Operation(desc)) + end + function inv_grad(y_::tf.TensorHandle, dy_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("InvGrad") + tf.add_input(desc, y_) + tf.add_input(desc, dy_) + desc["T"] = tf.data_type(y_) + desc["T"] = tf.data_type(dy_) + (tf.execute(desc))[1] end +end + """ - softmax_cross_entropy_with_logits(features, labels) + non_max_suppression(boxes, scores, max_output_size; iou_threshold=?) """ -tf.@op function softmax_cross_entropy_with_logits(features_, labels_; name=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("SoftmaxCrossEntropyWithLogits") - features_ = convert(TensorFlow.Tensor{Any}, features_) - labels_ = convert(TensorFlow.Tensor{Any}, labels_) - (features_, labels_) = tf.tf_promote(features_, labels_) - tf.add_input(desc, features_) - tf.add_input(desc, labels_) - end), name, "SoftmaxCrossEntropyWithLogits") - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:2 - push!(out, tf.Tensor(op, out_idx)) +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function non_max_suppression(boxes_, scores_, max_output_size_; name=nothing, iou_threshold=nothing) + local desc + tf.with_op_name(name, "NonMaxSuppression") do + desc = tf.NodeDescription("NonMaxSuppression") + boxes_ = convert(Tensor{Float32}, boxes_) + scores_ = convert(Tensor{Float32}, scores_) + max_output_size_ = convert(Tensor{Int32}, max_output_size_) + tf.add_input(desc, boxes_) + tf.add_input(desc, scores_) + tf.add_input(desc, max_output_size_) + if iou_threshold !== nothing + desc["iou_threshold"] = Base.identity(iou_threshold) + end + end + tf.Tensor(tf.Operation(desc)) end - out + function non_max_suppression(boxes_::tf.TensorHandle, scores_::tf.TensorHandle, max_output_size_::tf.TensorHandle; name=nothing, iou_threshold=nothing) + desc = tf.EagerOp("NonMaxSuppression") + tf.add_input(desc, boxes_) + tf.add_input(desc, scores_) + tf.add_input(desc, max_output_size_) + if iou_threshold !== nothing + desc["iou_threshold"] = Base.identity(iou_threshold) + end + (tf.execute(desc))[1] end +end + """ - sparse_softmax_cross_entropy_with_logits(features, labels) + l2loss(t) """ -tf.@op function sparse_softmax_cross_entropy_with_logits(features_, labels_; name=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("SparseSoftmaxCrossEntropyWithLogits") - features_ = convert(TensorFlow.Tensor{Any}, features_) - labels_ = convert(TensorFlow.Tensor{Int64}, labels_) - (features_,) = tf.tf_promote(features_) - (labels_,) = tf.tf_promote(labels_) - tf.add_input(desc, features_) - tf.add_input(desc, labels_) - end), name, "SparseSoftmaxCrossEntropyWithLogits") - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:2 - push!(out, tf.Tensor(op, out_idx)) +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function l2loss(t_; name=nothing) + local desc + tf.with_op_name(name, "L2Loss") do + desc = tf.NodeDescription("L2Loss") + t_ = convert(Tensor{Any}, t_) + (t_,) = tf.tf_promote(t_) + tf.add_input(desc, t_) + end + tf.Tensor(tf.Operation(desc)) end - out + function l2loss(t_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("L2Loss") + tf.add_input(desc, t_) + desc["T"] = tf.data_type(t_) + (tf.execute(desc))[1] end +end + """ - top_kv2(input, k; sorted=true) + resize_area(images, size; align_corners=false) """ -tf.@op function top_kv2(input_, k_; name=nothing, sorted=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("TopKV2") - input_ = convert(TensorFlow.Tensor{Any}, input_) - k_ = convert(TensorFlow.Tensor{Int32}, k_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - tf.add_input(desc, k_) - if sorted !== nothing - desc["sorted"] = Base.Bool(sorted) - end - end), name, "TopKV2") - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:2 - push!(out, tf.Tensor(op, out_idx)) +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function resize_area(images_, size_; name=nothing, align_corners=nothing) + local desc + tf.with_op_name(name, "ResizeArea") do + desc = tf.NodeDescription("ResizeArea") + images_ = convert(Tensor{Any}, images_) + size_ = convert(Tensor{Int32}, size_) + (images_,) = tf.tf_promote(images_) + tf.add_input(desc, images_) + tf.add_input(desc, size_) + if align_corners !== nothing + desc["align_corners"] = Base.Bool(align_corners) + end + end + tf.Tensor(tf.Operation(desc)) end - out + function resize_area(images_::tf.TensorHandle, size_::tf.TensorHandle; name=nothing, align_corners=nothing) + desc = tf.EagerOp("ResizeArea") + tf.add_input(desc, images_) + tf.add_input(desc, size_) + if align_corners !== nothing + desc["align_corners"] = Base.Bool(align_corners) + end + desc["T"] = tf.data_type(images_) + (tf.execute(desc))[1] end - -""" - in_top_k(predictions, targets) +end """ -tf.@op function in_top_k(predictions_, targets_; name=nothing, k=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("InTopK") - predictions_ = convert(TensorFlow.Tensor{Float32}, predictions_) - targets_ = convert(TensorFlow.Tensor{Int32}, targets_) - (targets_,) = tf.tf_promote(targets_) - tf.add_input(desc, predictions_) - tf.add_input(desc, targets_) - if k !== nothing - desc["k"] = Base.Int(k) - end - end), name, "InTopK") - tf.Tensor(tf.Operation(desc)) + sparse_cross(indices, values, shapes, dense_inputs) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function sparse_cross(indices_, values_, shapes_, dense_inputs_; name=nothing, N=nothing, hashed_output=nothing, num_buckets=nothing, hash_key=nothing, sparse_types=nothing, dense_types=nothing, out_type=nothing, internal_type=nothing) + local desc + tf.with_op_name(name, "SparseCross") do + desc = tf.NodeDescription("SparseCross") + indices_ = [convert(Tensor{Int64}, x) for x = indices_] + values_ = [convert(Tensor{Any}, x) for x = values_] + shapes_ = [convert(Tensor{Int64}, x) for x = shapes_] + dense_inputs_ = [convert(Tensor{Any}, x) for x = dense_inputs_] + tf.add_input(desc, indices_) + tf.add_input(desc, values_) + tf.add_input(desc, shapes_) + tf.add_input(desc, dense_inputs_) + if N !== nothing + desc["N"] = Base.Int(N) + end + if hashed_output !== nothing + desc["hashed_output"] = Base.Bool(hashed_output) + end + if num_buckets !== nothing + desc["num_buckets"] = Base.Int(num_buckets) + end + if hash_key !== nothing + desc["hash_key"] = Base.Int(hash_key) + end + if sparse_types !== nothing + desc["sparse_types"] = map(Base.identity, sparse_types) + end + if dense_types !== nothing + desc["dense_types"] = map(Base.identity, dense_types) + end + if out_type !== nothing + desc["out_type"] = Base.identity(out_type) + end + if internal_type !== nothing + desc["internal_type"] = Base.identity(internal_type) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function sparse_cross(indices_::tf.TensorHandle, values_::tf.TensorHandle, shapes_::tf.TensorHandle, dense_inputs_::tf.TensorHandle; name=nothing, N=nothing, hashed_output=nothing, num_buckets=nothing, hash_key=nothing, sparse_types=nothing, dense_types=nothing, out_type=nothing, internal_type=nothing) + desc = tf.EagerOp("SparseCross") + tf.add_input(desc, indices_) + tf.add_input(desc, values_) + tf.add_input(desc, shapes_) + tf.add_input(desc, dense_inputs_) + if N !== nothing + desc["N"] = Base.Int(N) + end + if hashed_output !== nothing + desc["hashed_output"] = Base.Bool(hashed_output) + end + if num_buckets !== nothing + desc["num_buckets"] = Base.Int(num_buckets) + end + if hash_key !== nothing + desc["hash_key"] = Base.Int(hash_key) + end + if sparse_types !== nothing + desc["sparse_types"] = map(Base.identity, sparse_types) + end + if dense_types !== nothing + desc["dense_types"] = map(Base.identity, dense_types) + end + if out_type !== nothing + desc["out_type"] = Base.identity(out_type) + end + if internal_type !== nothing + desc["internal_type"] = Base.identity(internal_type) + end + tf.execute(desc) end +end + """ - fifo_queue_v2(; shapes=Int64[], capacity=-1, container=, shared_name=) + batch_fft3d(input) """ -tf.@op function fifo_queue_v2(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("FIFOQueueV2") - if component_types !== nothing - desc["component_types"] = map(Base.identity, component_types) - end - if shapes !== nothing - desc["shapes"] = map(Base.identity, shapes) - end - if capacity !== nothing - desc["capacity"] = Base.Int(capacity) - end - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - end), name, "FIFOQueueV2") - tf.Tensor(tf.Operation(desc)) +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function batch_fft3d(input_; name=nothing) + local desc + tf.with_op_name(name, "BatchFFT3D") do + desc = tf.NodeDescription("BatchFFT3D") + input_ = convert(Tensor{Complex{Float32}}, input_) + tf.add_input(desc, input_) + end + tf.Tensor(tf.Operation(desc)) + end + function batch_fft3d(input_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("BatchFFT3D") + tf.add_input(desc, input_) + (tf.execute(desc))[1] end +end + """ - random_shuffle_queue_v2(; shapes=Int64[], capacity=-1, min_after_dequeue=0, seed=0, seed2=0, container=, shared_name=) + random_standard_normal(shape; seed=0, seed2=0) """ -tf.@op function random_shuffle_queue_v2(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, min_after_dequeue=nothing, seed=nothing, seed2=nothing, container=nothing, shared_name=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("RandomShuffleQueueV2") - if component_types !== nothing - desc["component_types"] = map(Base.identity, component_types) - end - if shapes !== nothing - desc["shapes"] = map(Base.identity, shapes) - end - if capacity !== nothing - desc["capacity"] = Base.Int(capacity) - end - if min_after_dequeue !== nothing - desc["min_after_dequeue"] = Base.Int(min_after_dequeue) - end - if seed !== nothing - desc["seed"] = Base.Int(seed) - end - if seed2 !== nothing - desc["seed2"] = Base.Int(seed2) - end - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - end), name, "RandomShuffleQueueV2") - tf.Tensor(tf.Operation(desc)) +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function random_standard_normal(shape_; name=nothing, seed=nothing, seed2=nothing, dtype=nothing) + local desc + tf.with_op_name(name, "RandomStandardNormal") do + desc = tf.NodeDescription("RandomStandardNormal") + shape_ = convert(Tensor{Any}, shape_) + (shape_,) = tf.tf_promote(shape_) + tf.add_input(desc, shape_) + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + tf.Tensor(tf.Operation(desc)) + end + function random_standard_normal(shape_::tf.TensorHandle; name=nothing, seed=nothing, seed2=nothing, dtype=nothing) + desc = tf.EagerOp("RandomStandardNormal") + tf.add_input(desc, shape_) + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + desc["T"] = tf.data_type(shape_) + (tf.execute(desc))[1] end +end + """ - queue_enqueue_v2(handle, components; timeout_ms=-1) + resource_scatter_mul(resource, indices, updates) """ -tf.@op function queue_enqueue_v2(handle_, components_; name=nothing, Tcomponents=nothing, timeout_ms=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("QueueEnqueueV2") - handle_ = convert(TensorFlow.Tensor{Any}, handle_) - components_ = [convert(TensorFlow.Tensor{Any}, x) for x = components_] - tf.add_input(desc, handle_) - tf.add_input(desc, components_) - if Tcomponents !== nothing - desc["Tcomponents"] = map(Base.identity, Tcomponents) - end - if timeout_ms !== nothing - desc["timeout_ms"] = Base.Int(timeout_ms) - end - end), name, "QueueEnqueueV2") - tf.Tensor(tf.Operation(desc)) +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function resource_scatter_mul(resource_, indices_, updates_; name=nothing, dtype=nothing) + local desc + tf.with_op_name(name, "ResourceScatterMul") do + desc = tf.NodeDescription("ResourceScatterMul") + resource_ = convert(Tensor{Any}, resource_) + indices_ = convert(Tensor{Any}, indices_) + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + updates_ = convert(Tensor{Any}, updates_) + (updates_,) = tf.tf_promote(updates_) + (indices_,) = tf.tf_promote(indices_) + tf.add_input(desc, resource_) + tf.add_input(desc, indices_) + tf.add_input(desc, updates_) + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + tf.Tensor(tf.Operation(desc)) + end + function resource_scatter_mul(resource_::tf.TensorHandle, indices_::tf.TensorHandle, updates_::tf.TensorHandle; name=nothing, dtype=nothing) + desc = tf.EagerOp("ResourceScatterMul") + tf.add_input(desc, resource_) + tf.add_input(desc, indices_) + tf.add_input(desc, updates_) + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + desc["Tindices"] = tf.data_type(indices_) + desc["dtype"] = tf.data_type(updates_) + (tf.execute(desc))[1] end - -""" - queue_enqueue_many_v2(handle, components; timeout_ms=-1) +end """ -tf.@op function queue_enqueue_many_v2(handle_, components_; name=nothing, Tcomponents=nothing, timeout_ms=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("QueueEnqueueManyV2") - handle_ = convert(TensorFlow.Tensor{Any}, handle_) - components_ = [convert(TensorFlow.Tensor{Any}, x) for x = components_] - tf.add_input(desc, handle_) - tf.add_input(desc, components_) - if Tcomponents !== nothing - desc["Tcomponents"] = map(Base.identity, Tcomponents) - end - if timeout_ms !== nothing - desc["timeout_ms"] = Base.Int(timeout_ms) - end - end), name, "QueueEnqueueManyV2") - tf.Tensor(tf.Operation(desc)) + sdca_optimizer(sparse_example_indices, sparse_feature_indices, sparse_feature_values, dense_features, example_weights, example_labels, sparse_indices, sparse_weights, dense_weights, example_state_data; adaptative=false) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function sdca_optimizer(sparse_example_indices_, sparse_feature_indices_, sparse_feature_values_, dense_features_, example_weights_, example_labels_, sparse_indices_, sparse_weights_, dense_weights_, example_state_data_; name=nothing, loss_type=nothing, adaptative=nothing, num_sparse_features=nothing, num_sparse_features_with_values=nothing, num_dense_features=nothing, l1=nothing, l2=nothing, num_loss_partitions=nothing, num_inner_iterations=nothing) + local desc + tf.with_op_name(name, "SdcaOptimizer") do + desc = tf.NodeDescription("SdcaOptimizer") + sparse_example_indices_ = [convert(Tensor{Int64}, x) for x = sparse_example_indices_] + sparse_feature_indices_ = [convert(Tensor{Int64}, x) for x = sparse_feature_indices_] + sparse_feature_values_ = [convert(Tensor{Float32}, x) for x = sparse_feature_values_] + dense_features_ = [convert(Tensor{Float32}, x) for x = dense_features_] + example_weights_ = convert(Tensor{Float32}, example_weights_) + example_labels_ = convert(Tensor{Float32}, example_labels_) + sparse_indices_ = [convert(Tensor{Int64}, x) for x = sparse_indices_] + sparse_weights_ = [convert(Tensor{Float32}, x) for x = sparse_weights_] + dense_weights_ = [convert(Tensor{Float32}, x) for x = dense_weights_] + example_state_data_ = convert(Tensor{Float32}, example_state_data_) + tf.add_input(desc, sparse_example_indices_) + tf.add_input(desc, sparse_feature_indices_) + tf.add_input(desc, sparse_feature_values_) + tf.add_input(desc, dense_features_) + tf.add_input(desc, example_weights_) + tf.add_input(desc, example_labels_) + tf.add_input(desc, sparse_indices_) + tf.add_input(desc, sparse_weights_) + tf.add_input(desc, dense_weights_) + tf.add_input(desc, example_state_data_) + if loss_type !== nothing + desc["loss_type"] = Base.String(loss_type) + end + if adaptative !== nothing + desc["adaptative"] = Base.Bool(adaptative) + end + if num_sparse_features !== nothing + desc["num_sparse_features"] = Base.Int(num_sparse_features) + end + if num_sparse_features_with_values !== nothing + desc["num_sparse_features_with_values"] = Base.Int(num_sparse_features_with_values) + end + if num_dense_features !== nothing + desc["num_dense_features"] = Base.Int(num_dense_features) + end + if l1 !== nothing + desc["l1"] = Base.identity(l1) + end + if l2 !== nothing + desc["l2"] = Base.identity(l2) + end + if num_loss_partitions !== nothing + desc["num_loss_partitions"] = Base.Int(num_loss_partitions) + end + if num_inner_iterations !== nothing + desc["num_inner_iterations"] = Base.Int(num_inner_iterations) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function sdca_optimizer(sparse_example_indices_::tf.TensorHandle, sparse_feature_indices_::tf.TensorHandle, sparse_feature_values_::tf.TensorHandle, dense_features_::tf.TensorHandle, example_weights_::tf.TensorHandle, example_labels_::tf.TensorHandle, sparse_indices_::tf.TensorHandle, sparse_weights_::tf.TensorHandle, dense_weights_::tf.TensorHandle, example_state_data_::tf.TensorHandle; name=nothing, loss_type=nothing, adaptative=nothing, num_sparse_features=nothing, num_sparse_features_with_values=nothing, num_dense_features=nothing, l1=nothing, l2=nothing, num_loss_partitions=nothing, num_inner_iterations=nothing) + desc = tf.EagerOp("SdcaOptimizer") + tf.add_input(desc, sparse_example_indices_) + tf.add_input(desc, sparse_feature_indices_) + tf.add_input(desc, sparse_feature_values_) + tf.add_input(desc, dense_features_) + tf.add_input(desc, example_weights_) + tf.add_input(desc, example_labels_) + tf.add_input(desc, sparse_indices_) + tf.add_input(desc, sparse_weights_) + tf.add_input(desc, dense_weights_) + tf.add_input(desc, example_state_data_) + if loss_type !== nothing + desc["loss_type"] = Base.String(loss_type) + end + if adaptative !== nothing + desc["adaptative"] = Base.Bool(adaptative) + end + if num_sparse_features !== nothing + desc["num_sparse_features"] = Base.Int(num_sparse_features) + end + if num_sparse_features_with_values !== nothing + desc["num_sparse_features_with_values"] = Base.Int(num_sparse_features_with_values) + end + if num_dense_features !== nothing + desc["num_dense_features"] = Base.Int(num_dense_features) + end + if l1 !== nothing + desc["l1"] = Base.identity(l1) + end + if l2 !== nothing + desc["l2"] = Base.identity(l2) + end + if num_loss_partitions !== nothing + desc["num_loss_partitions"] = Base.Int(num_loss_partitions) + end + if num_inner_iterations !== nothing + desc["num_inner_iterations"] = Base.Int(num_inner_iterations) + end + tf.execute(desc) end +end + """ - queue_size_v2(handle) + zeta(x, q) """ -tf.@op function queue_size_v2(handle_; name=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("QueueSizeV2") - handle_ = convert(TensorFlow.Tensor{Any}, handle_) - tf.add_input(desc, handle_) - end), name, "QueueSizeV2") - tf.Tensor(tf.Operation(desc)) +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function zeta(x_, q_; name=nothing) + local desc + tf.with_op_name(name, "Zeta") do + desc = tf.NodeDescription("Zeta") + x_ = convert(Tensor{Any}, x_) + q_ = convert(Tensor{Any}, q_) + (x_, q_) = tf.tf_promote(x_, q_) + tf.add_input(desc, x_) + tf.add_input(desc, q_) + end + tf.Tensor(tf.Operation(desc)) + end + function zeta(x_::tf.TensorHandle, q_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("Zeta") + tf.add_input(desc, x_) + tf.add_input(desc, q_) + desc["T"] = tf.data_type(x_) + desc["T"] = tf.data_type(q_) + (tf.execute(desc))[1] end - -""" - queue_close_v2(handle; cancel_pending_enqueues=false) +end """ -tf.@op function queue_close_v2(handle_; name=nothing, cancel_pending_enqueues=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("QueueCloseV2") - handle_ = convert(TensorFlow.Tensor{Any}, handle_) - tf.add_input(desc, handle_) - if cancel_pending_enqueues !== nothing - desc["cancel_pending_enqueues"] = Base.Bool(cancel_pending_enqueues) - end - end), name, "QueueCloseV2") - tf.Tensor(tf.Operation(desc)) + sample_distorted_bounding_box(image_size, bounding_boxes; seed=0, seed2=0, min_object_covered=?, aspect_ratio_range=Int64[], area_range=Int64[], max_attempts=100, use_image_if_no_bounding_boxes=false) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function sample_distorted_bounding_box(image_size_, bounding_boxes_; name=nothing, seed=nothing, seed2=nothing, min_object_covered=nothing, aspect_ratio_range=nothing, area_range=nothing, max_attempts=nothing, use_image_if_no_bounding_boxes=nothing) + local desc + tf.with_op_name(name, "SampleDistortedBoundingBox") do + desc = tf.NodeDescription("SampleDistortedBoundingBox") + image_size_ = convert(Tensor{Any}, image_size_) + bounding_boxes_ = convert(Tensor{Float32}, bounding_boxes_) + (image_size_,) = tf.tf_promote(image_size_) + tf.add_input(desc, image_size_) + tf.add_input(desc, bounding_boxes_) + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end + if min_object_covered !== nothing + desc["min_object_covered"] = Base.identity(min_object_covered) + end + if aspect_ratio_range !== nothing + desc["aspect_ratio_range"] = map(Base.identity, aspect_ratio_range) + end + if area_range !== nothing + desc["area_range"] = map(Base.identity, area_range) + end + if max_attempts !== nothing + desc["max_attempts"] = Base.Int(max_attempts) + end + if use_image_if_no_bounding_boxes !== nothing + desc["use_image_if_no_bounding_boxes"] = Base.Bool(use_image_if_no_bounding_boxes) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function sample_distorted_bounding_box(image_size_::tf.TensorHandle, bounding_boxes_::tf.TensorHandle; name=nothing, seed=nothing, seed2=nothing, min_object_covered=nothing, aspect_ratio_range=nothing, area_range=nothing, max_attempts=nothing, use_image_if_no_bounding_boxes=nothing) + desc = tf.EagerOp("SampleDistortedBoundingBox") + tf.add_input(desc, image_size_) + tf.add_input(desc, bounding_boxes_) + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end + if min_object_covered !== nothing + desc["min_object_covered"] = Base.identity(min_object_covered) + end + if aspect_ratio_range !== nothing + desc["aspect_ratio_range"] = map(Base.identity, aspect_ratio_range) + end + if area_range !== nothing + desc["area_range"] = map(Base.identity, area_range) + end + if max_attempts !== nothing + desc["max_attempts"] = Base.Int(max_attempts) + end + if use_image_if_no_bounding_boxes !== nothing + desc["use_image_if_no_bounding_boxes"] = Base.Bool(use_image_if_no_bounding_boxes) + end + desc["T"] = tf.data_type(image_size_) + tf.execute(desc) end +end + """ - lin_space(start, stop, num) + igamma_grad_a(a, x) """ -tf.@op function lin_space(start_, stop_, num_; name=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("LinSpace") - start_ = convert(TensorFlow.Tensor{Any}, start_) - stop_ = convert(TensorFlow.Tensor{Any}, stop_) - num_ = convert(TensorFlow.Tensor{Int32}, num_) - num_ = num_ - convert(tf.Tensor{eltype(num_)}, 1) - (start_, stop_) = tf.tf_promote(start_, stop_) - (num_,) = tf.tf_promote(num_) - tf.add_input(desc, start_) - tf.add_input(desc, stop_) - tf.add_input(desc, num_) - end), name, "LinSpace") - tf.Tensor(tf.Operation(desc)) +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function igamma_grad_a(a_, x_; name=nothing) + local desc + tf.with_op_name(name, "IgammaGradA") do + desc = tf.NodeDescription("IgammaGradA") + a_ = convert(Tensor{Any}, a_) + x_ = convert(Tensor{Any}, x_) + (a_, x_) = tf.tf_promote(a_, x_) + tf.add_input(desc, a_) + tf.add_input(desc, x_) + end + tf.Tensor(tf.Operation(desc)) + end + function igamma_grad_a(a_::tf.TensorHandle, x_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("IgammaGradA") + tf.add_input(desc, a_) + tf.add_input(desc, x_) + desc["T"] = tf.data_type(a_) + desc["T"] = tf.data_type(x_) + (tf.execute(desc))[1] end +end + """ - range(start, limit, delta) + segment_max(data, segment_ids) """ -tf.@op function range(start_, limit_, delta_; name=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("Range") - start_ = convert(TensorFlow.Tensor{Int32}, start_) - limit_ = convert(TensorFlow.Tensor{Int32}, limit_) - delta_ = convert(TensorFlow.Tensor{Int32}, delta_) - (start_, limit_, delta_) = tf.tf_promote(start_, limit_, delta_) - tf.add_input(desc, start_) - tf.add_input(desc, limit_) - tf.add_input(desc, delta_) - end), name, "Range") - tf.Tensor(tf.Operation(desc)) +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function segment_max(data_, segment_ids_; name=nothing) + local desc + tf.with_op_name(name, "SegmentMax") do + desc = tf.NodeDescription("SegmentMax") + data_ = convert(Tensor{Any}, data_) + segment_ids_ = convert(Tensor{Any}, segment_ids_) + segment_ids_ = segment_ids_ - convert(tf.Tensor{eltype(segment_ids_)}, 1) + (data_,) = tf.tf_promote(data_) + (segment_ids_,) = tf.tf_promote(segment_ids_) + tf.add_input(desc, data_) + tf.add_input(desc, segment_ids_) + end + tf.Tensor(tf.Operation(desc)) + end + function segment_max(data_::tf.TensorHandle, segment_ids_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("SegmentMax") + tf.add_input(desc, data_) + tf.add_input(desc, segment_ids_) + desc["T"] = tf.data_type(data_) + desc["Tindices"] = tf.data_type(segment_ids_) + (tf.execute(desc))[1] end +end + """ - fill(dims, value; index_type=Int32) + range(start, limit, delta) """ -tf.@op function fill(dims_, value_; name=nothing, index_type=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("Fill") - dims_ = convert(TensorFlow.Tensor{Int32}, dims_) - value_ = convert(TensorFlow.Tensor{Any}, value_) - (value_,) = tf.tf_promote(value_) - (dims_,) = tf.tf_promote(dims_) - tf.add_input(desc, dims_) - tf.add_input(desc, value_) - if index_type !== nothing - desc["index_type"] = Base.identity(index_type) - end - end), name, "Fill") - tf.Tensor(tf.Operation(desc)) +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function range(start_, limit_, delta_; name=nothing) + local desc + tf.with_op_name(name, "Range") do + desc = tf.NodeDescription("Range") + start_ = convert(Tensor{Int32}, start_) + limit_ = convert(Tensor{Int32}, limit_) + delta_ = convert(Tensor{Int32}, delta_) + (start_, limit_, delta_) = tf.tf_promote(start_, limit_, delta_) + tf.add_input(desc, start_) + tf.add_input(desc, limit_) + tf.add_input(desc, delta_) + end + tf.Tensor(tf.Operation(desc)) + end + function range(start_::tf.TensorHandle, limit_::tf.TensorHandle, delta_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("Range") + tf.add_input(desc, start_) + tf.add_input(desc, limit_) + tf.add_input(desc, delta_) + desc["Tidx"] = tf.data_type(start_) + desc["Tidx"] = tf.data_type(limit_) + desc["Tidx"] = tf.data_type(delta_) + (tf.execute(desc))[1] end - -""" - squeeze(input; squeeze_dims=Int64[]) +end """ -tf.@op function squeeze(input_; name=nothing, squeeze_dims=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("Squeeze") - input_ = convert(TensorFlow.Tensor{Any}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - if squeeze_dims !== nothing - desc["squeeze_dims"] = map(Base.identity, squeeze_dims) - end - end), name, "Squeeze") - tf.Tensor(tf.Operation(desc)) + retrieve_tpu_embedding_momentum_parameters_grad_accum_debug(; table_id=-1, table_name=) + + +""" +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function retrieve_tpu_embedding_momentum_parameters_grad_accum_debug(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + local desc + tf.with_op_name(name, "RetrieveTPUEmbeddingMomentumParametersGradAccumDebug") do + desc = tf.NodeDescription("RetrieveTPUEmbeddingMomentumParametersGradAccumDebug") + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function retrieve_tpu_embedding_momentum_parameters_grad_accum_debug(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + desc = tf.EagerOp("RetrieveTPUEmbeddingMomentumParametersGradAccumDebug") + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + tf.execute(desc) end +end + """ - unpack(value; axis=0) + flush_summary_writer(writer) """ -tf.@op function unpack(value_; name=nothing, num=nothing, axis=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("Unpack") - value_ = convert(TensorFlow.Tensor{Any}, value_) - (value_,) = tf.tf_promote(value_) - tf.add_input(desc, value_) - if num !== nothing - desc["num"] = Base.Int(num) - end - if axis !== nothing - axis = Base.Int(axis) - 1 - end - if axis !== nothing - desc["axis"] = Base.Int(axis) - end - end), name, "Unpack") - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:num - push!(out, tf.Tensor(op, out_idx)) +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function flush_summary_writer(writer_; name=nothing) + local desc + tf.with_op_name(name, "FlushSummaryWriter") do + desc = tf.NodeDescription("FlushSummaryWriter") + writer_ = convert(Tensor{Any}, writer_) + tf.add_input(desc, writer_) + end + tf.Tensor(tf.Operation(desc)) end - out + function flush_summary_writer(writer_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("FlushSummaryWriter") + tf.add_input(desc, writer_) + (tf.execute(desc))[1] end +end + """ - transpose(x, perm) + dequantize(input, min_range, max_range; mode=MIN_COMBINED) """ -tf.@op function transpose(x_, perm_; name=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("Transpose") - x_ = convert(TensorFlow.Tensor{Any}, x_) - perm_ = convert(TensorFlow.Tensor{Int32}, perm_) - (perm_,) = tf.tf_promote(perm_) - (x_,) = tf.tf_promote(x_) - tf.add_input(desc, x_) - tf.add_input(desc, perm_) - end), name, "Transpose") - tf.Tensor(tf.Operation(desc)) +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function dequantize(input_, min_range_, max_range_; name=nothing, mode=nothing) + local desc + tf.with_op_name(name, "Dequantize") do + desc = tf.NodeDescription("Dequantize") + input_ = convert(Tensor{Any}, input_) + min_range_ = convert(Tensor{Float32}, min_range_) + max_range_ = convert(Tensor{Float32}, max_range_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + tf.add_input(desc, min_range_) + tf.add_input(desc, max_range_) + if mode !== nothing + desc["mode"] = Base.String(mode) + end + end + tf.Tensor(tf.Operation(desc)) + end + function dequantize(input_::tf.TensorHandle, min_range_::tf.TensorHandle, max_range_::tf.TensorHandle; name=nothing, mode=nothing) + desc = tf.EagerOp("Dequantize") + tf.add_input(desc, input_) + tf.add_input(desc, min_range_) + tf.add_input(desc, max_range_) + if mode !== nothing + desc["mode"] = Base.String(mode) + end + desc["T"] = tf.data_type(input_) + (tf.execute(desc))[1] end +end + """ - slice(input, begin, size) + sparse_fill_empty_rows_grad(reverse_index_map, grad_values) """ -tf.@op function slice(input_, begin_, size_; name=nothing, Index=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("Slice") - input_ = convert(TensorFlow.Tensor{Any}, input_) - begin_ = convert(TensorFlow.Tensor{Any}, begin_) - begin_ = begin_ - convert(tf.Tensor{eltype(begin_)}, 1) - size_ = convert(TensorFlow.Tensor{Any}, size_) - (input_,) = tf.tf_promote(input_) - (begin_, size_) = tf.tf_promote(begin_, size_) - tf.add_input(desc, input_) - tf.add_input(desc, begin_) - tf.add_input(desc, size_) - if Index !== nothing - desc["Index"] = Base.identity(Index) - end - end), name, "Slice") - tf.Tensor(tf.Operation(desc)) +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function sparse_fill_empty_rows_grad(reverse_index_map_, grad_values_; name=nothing) + local desc + tf.with_op_name(name, "SparseFillEmptyRowsGrad") do + desc = tf.NodeDescription("SparseFillEmptyRowsGrad") + reverse_index_map_ = convert(Tensor{Int64}, reverse_index_map_) + grad_values_ = convert(Tensor{Any}, grad_values_) + (grad_values_,) = tf.tf_promote(grad_values_) + tf.add_input(desc, reverse_index_map_) + tf.add_input(desc, grad_values_) + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function sparse_fill_empty_rows_grad(reverse_index_map_::tf.TensorHandle, grad_values_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("SparseFillEmptyRowsGrad") + tf.add_input(desc, reverse_index_map_) + tf.add_input(desc, grad_values_) + desc["T"] = tf.data_type(grad_values_) + tf.execute(desc) end +end + """ - rank(input) + iterator_get_next(iterator) """ -tf.@op function rank(input_; name=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("Rank") - input_ = convert(TensorFlow.Tensor{Any}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - end), name, "Rank") - tf.Tensor(tf.Operation(desc)) +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function iterator_get_next(iterator_; name=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "IteratorGetNext") do + desc = tf.NodeDescription("IteratorGetNext") + iterator_ = convert(Tensor{Any}, iterator_) + tf.add_input(desc, iterator_) + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + tf.Tensor(tf.Operation(desc)) + end + function iterator_get_next(iterator_::tf.TensorHandle; name=nothing, output_types=nothing, output_shapes=nothing) + desc = tf.EagerOp("IteratorGetNext") + tf.add_input(desc, iterator_) + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + (tf.execute(desc))[1] end +end + """ - conv2d_backprop_input(input_sizes, filter, out_backprop; use_cudnn_on_gpu=true, data_format=NHWC, dilations=[1, 1, 1, 1]) + sparse_tensor_dense_add(a_indices, a_values, a_shape, b) """ -tf.@op function conv2d_backprop_input(input_sizes_, filter_, out_backprop_; name=nothing, strides=nothing, use_cudnn_on_gpu=nothing, padding=nothing, data_format=nothing, dilations=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("Conv2DBackpropInput") - input_sizes_ = convert(TensorFlow.Tensor{Int32}, input_sizes_) - filter_ = convert(TensorFlow.Tensor{Any}, filter_) - out_backprop_ = convert(TensorFlow.Tensor{Any}, out_backprop_) - (filter_, out_backprop_) = tf.tf_promote(filter_, out_backprop_) - tf.add_input(desc, input_sizes_) - tf.add_input(desc, filter_) - tf.add_input(desc, out_backprop_) - if strides !== nothing - desc["strides"] = map(Base.identity, strides) - end - if use_cudnn_on_gpu !== nothing - desc["use_cudnn_on_gpu"] = Base.Bool(use_cudnn_on_gpu) - end - if padding !== nothing - desc["padding"] = Base.String(padding) - end - if data_format !== nothing - desc["data_format"] = Base.String(data_format) - end - if dilations !== nothing - desc["dilations"] = map(Base.identity, dilations) - end - end), name, "Conv2DBackpropInput") - tf.Tensor(tf.Operation(desc)) +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function sparse_tensor_dense_add(a_indices_, a_values_, a_shape_, b_; name=nothing) + local desc + tf.with_op_name(name, "SparseTensorDenseAdd") do + desc = tf.NodeDescription("SparseTensorDenseAdd") + a_indices_ = convert(Tensor{Any}, a_indices_) + a_indices_ = a_indices_ - convert(tf.Tensor{eltype(a_indices_)}, 1) + a_values_ = convert(Tensor{Any}, a_values_) + a_shape_ = convert(Tensor{Any}, a_shape_) + a_shape_ = a_shape_ - convert(tf.Tensor{eltype(a_shape_)}, 1) + b_ = convert(Tensor{Any}, b_) + (a_values_, b_) = tf.tf_promote(a_values_, b_) + (a_indices_, a_shape_) = tf.tf_promote(a_indices_, a_shape_) + tf.add_input(desc, a_indices_) + tf.add_input(desc, a_values_) + tf.add_input(desc, a_shape_) + tf.add_input(desc, b_) + end + tf.Tensor(tf.Operation(desc)) + end + function sparse_tensor_dense_add(a_indices_::tf.TensorHandle, a_values_::tf.TensorHandle, a_shape_::tf.TensorHandle, b_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("SparseTensorDenseAdd") + tf.add_input(desc, a_indices_) + tf.add_input(desc, a_values_) + tf.add_input(desc, a_shape_) + tf.add_input(desc, b_) + desc["Tindices"] = tf.data_type(a_indices_) + desc["T"] = tf.data_type(a_values_) + desc["Tindices"] = tf.data_type(a_shape_) + desc["T"] = tf.data_type(b_) + (tf.execute(desc))[1] end +end + """ - svd(input; compute_uv=true, full_matrices=false) + prevent_gradient(input; message=) """ -tf.@op function svd(input_; name=nothing, compute_uv=nothing, full_matrices=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("Svd") - input_ = convert(TensorFlow.Tensor{Any}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - if compute_uv !== nothing - desc["compute_uv"] = Base.Bool(compute_uv) - end - if full_matrices !== nothing - desc["full_matrices"] = Base.Bool(full_matrices) - end - end), name, "Svd") - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:3 - push!(out, tf.Tensor(op, out_idx)) +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function prevent_gradient(input_; name=nothing, message=nothing) + local desc + tf.with_op_name(name, "PreventGradient") do + desc = tf.NodeDescription("PreventGradient") + input_ = convert(Tensor{Any}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + if message !== nothing + desc["message"] = Base.String(message) + end + end + tf.Tensor(tf.Operation(desc)) + end + function prevent_gradient(input_::tf.TensorHandle; name=nothing, message=nothing) + desc = tf.EagerOp("PreventGradient") + tf.add_input(desc, input_) + if message !== nothing + desc["message"] = Base.String(message) end - out + desc["T"] = tf.data_type(input_) + (tf.execute(desc))[1] end +end + """ - cross(a, b) + lookup_table_export(table_handle) """ -tf.@op function cross(a_, b_; name=nothing) - local desc - tf.with_op_name((()->begin - desc = tf.NodeDescription("Cross") - a_ = convert(TensorFlow.Tensor{Any}, a_) - b_ = convert(TensorFlow.Tensor{Any}, b_) - (a_, b_) = tf.tf_promote(a_, b_) - tf.add_input(desc, a_) - tf.add_input(desc, b_) - end), name, "Cross") - tf.Tensor(tf.Operation(desc)) +begin + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function lookup_table_export(table_handle_; name=nothing) + local desc + tf.with_op_name(name, "LookupTableExport") do + desc = tf.NodeDescription("LookupTableExport") + table_handle_ = convert(Tensor{String}, table_handle_) + tf.add_input(desc, table_handle_) + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function lookup_table_export(table_handle_::tf.TensorHandle; name=nothing) + desc = tf.EagerOp("LookupTableExport") + tf.add_input(desc, table_handle_) + tf.execute(desc) end +end + end diff --git a/src/ops/math.jl b/src/ops/math.jl index a7a52e72..56bfa457 100644 --- a/src/ops/math.jl +++ b/src/ops/math.jl @@ -27,18 +27,18 @@ import .Ops: segment_prod -@op Base.argmin(n::AbstractTensor, dim; name=nothing) = Ops.arg_min(n, dim; name=name)+1 +@op Base.argmin(n::AbstractTensor, dim; name = nothing) = Ops.arg_min(n, dim; name = name) + 1 -@op Base.argmax(n::AbstractTensor, dim; name=nothing) = Ops.arg_max(n, dim; name=name)+1 +@op Base.argmax(n::AbstractTensor, dim; name = nothing) = Ops.arg_max(n, dim; name = name) + 1 @op Base.max(x::AbstractTensor, y; kwargs...) = Ops.maximum(x, y; kwargs...) @op Base.min(x::AbstractTensor, y; kwargs...) = Ops.minimum(x, y; kwargs...) -@op function LinearAlgebra.svd(a::AbstractTensor; full=false, kwargs...) +@op function LinearAlgebra.svd(a::AbstractTensor; full = false, kwargs...) # Match Base names and ordering of results - s,u,v = Ops.svd(a; compute_uv=true, full_matrices=full, kwargs...) - u,s,v + s, u, v = Ops.svd(a; compute_uv = true, full_matrices = full, kwargs...) + u, s, v end @@ -68,7 +68,7 @@ const matmul = mat_mul Broadcast.broadcasted(::typeof(Base.literal_pow), ::typeof(^), x::AbstractTensor, y::Val{T}) where T = x^Tensor(T) -@op function batch_matmul(x::AbstractTensor,y::AbstractTensor; adj_x=false, adj_y=false, name=nothing) +@op function batch_matmul(x::AbstractTensor, y::AbstractTensor; adj_x = false, adj_y = false, name = nothing) if tf_version() >= v"1.0.0-" Base.depwarn(""" batch_matmul is deprecated. Its functionality is now subsumed by matmul. @@ -102,8 +102,9 @@ Args: Returns: A `Tensor`. Has the same type as `x`. - """ -@op function squared_difference(x, y; name=nothing) + +""" +@op function squared_difference(x, y; name = nothing) local desc with_op_name(name, "SquaredDifference") do x = Tensor(x) @@ -119,9 +120,9 @@ end Ops.cross(n1, n2; kwargs...) end -*(x::Number, n::AbstractTensor) = x.*n # For supporting notation like `2x` +*(x::Number, n::AbstractTensor) = x .* n # For supporting notation like `2x` -^(n::AbstractTensor, x::Int) = invoke(^, Tuple{AbstractTensor, Any}, n, x) +^(n::AbstractTensor, x::Int) = invoke(^, Tuple{AbstractTensor,Any}, n, x) for jl_func_name in [ :log, @@ -186,7 +187,7 @@ for (jl_func_name, tf_func_name) in [ @eval @define_unary LinearAlgebra.$jl_func_name Ops.$tf_func_name end -function LinearAlgebra.diagm(kv::Pair{T, S}) where {T<:Integer, S<:AbstractTensor} +function LinearAlgebra.diagm(kv::Pair{T,S}) where {T <: Integer,S <: AbstractTensor} if kv.first == 0 return Ops.diag(kv.second) end @@ -197,36 +198,46 @@ end # TODO Clean this up for reduction in [:sum, :prod, :min, :max, :all, :any, :mean] - @eval @op function $(Symbol("reduce_", reduction))(n::AbstractTensor; axis=nothing, keep_dims=false, name=nothing) + @eval @op function $(Symbol("reduce_", reduction))(n::AbstractTensor; axis = nothing, keep_dims = false, name = nothing) if name === nothing name = get_name("reduce") end - if axis == nothing - n = Tensor(n) # TODO: rewrite this - range_start = constant(Int32(0)) - range_delta = constant(Int32(1)) - desc = NodeDescription("Rank", "$name/rank") - add_input(desc, n) - rank = Tensor(Operation(desc), 1) - desc = NodeDescription("Range", "$name/range") - add_input(desc, range_start) - add_input(desc, rank) - add_input(desc, range_delta) - range = Tensor(Operation(desc), 1) - desc = NodeDescription($(capitalize(reduction)), name) - add_input(desc, n) - add_input(desc, range) - Tensor(Operation(desc), 1) + if eager_mode + if axis === nothing + n_value = Array(n) # TODO use shape functions instead + num_axis = length(size(n_value)) + axis = Ops.range(constant(0), constant(num_axis), constant(1)) + fn = Ops.$reduction + fn(n, axis, keep_dims = keep_dims) + end # TODO else case else - if isa(axis, Number) - axis = [axis] + if axis == nothing + n = Tensor(n) # TODO: rewrite this + range_start = constant(Int32(0)) + range_delta = constant(Int32(1)) + desc = NodeDescription("Rank", "$name/rank") + add_input(desc, n) + rank = Tensor(Operation(desc), 1) + desc = NodeDescription("Range", "$name/range") + add_input(desc, range_start) + add_input(desc, rank) + add_input(desc, range_delta) + range = Tensor(Operation(desc), 1) + desc = NodeDescription($(capitalize(reduction)), name) + add_input(desc, n) + add_input(desc, range) + Tensor(Operation(desc), 1) + else + if isa(axis, Number) + axis = [axis] + end + axis = [Int32(idx - 1) for idx in axis] + desc = NodeDescription($(capitalize(reduction)), name) + add_input(desc, Tensor(n)) + add_input(desc, Tensor(axis)) + desc["keep_dims"] = keep_dims + Tensor(Operation(desc), 1) end - axis = [Int32(idx-1) for idx in axis] - desc = NodeDescription($(capitalize(reduction)), name) - add_input(desc, Tensor(n)) - add_input(desc, Tensor(axis)) - desc["keep_dims"] = keep_dims - Tensor(Operation(desc), 1) end end end @@ -241,7 +252,7 @@ for (jl_func, tf_func) in [ (:(Base.any), :reduce_any), (:(Statistics.mean), :reduce_mean), ] - @eval function $jl_func(n::AbstractTensor, axis=nothing; kwargs...) - $tf_func(n; axis=axis, kwargs...) + @eval function $jl_func(n::AbstractTensor, axis = nothing; kwargs...) + $tf_func(n; axis = axis, kwargs...) end end diff --git a/src/ops/sequences.jl b/src/ops/sequences.jl index 6633ab6b..f6a61822 100644 --- a/src/ops/sequences.jl +++ b/src/ops/sequences.jl @@ -15,7 +15,7 @@ end convert_eltype(x, dtype) = x -@op function constant(value; dtype=nothing, kwargs...) +@op function constant(value; dtype = nothing, kwargs...) if dtype === nothing if isa(value, AbstractString) dtype = String @@ -25,23 +25,27 @@ convert_eltype(x, dtype) = x else value = convert_eltype(value, dtype) end - Ops.const_(; value=value, dtype=dtype, kwargs...) + if eager_mode + EagerTensor(value) + else + Ops.const_(; value = value, dtype = dtype, kwargs...) + end end for f in [:zeros, :ones] @eval Base.$f(::Type{Tensor}, args::Integer...) = $f(Tensor{Float32}, args...) - @eval Base.$f(::Type{Tensor}, args::NTuple{N, Integer}) where N = $f(Tensor, args...) + @eval Base.$f(::Type{Tensor}, args::NTuple{N,Integer}) where N = $f(Tensor, args...) @eval Base.$f(::Type{Tensor{T}}, args::Integer...) where {T} = constant($f(T, args...)) - @eval Base.$f(::Type{Tensor{T}}, args::NTuple{N, Integer}) where {T, N} = constant($f(T, args)) + @eval Base.$f(::Type{Tensor{T}}, args::NTuple{N,Integer}) where {T,N} = constant($f(T, args)) end -@op function random_normal(shape; mean=0.0, stddev=1.0, dtype=Float32, name=nothing, kwargs...) +@op function random_normal(shape; mean = 0.0, stddev = 1.0, dtype = Float32, name = nothing, kwargs...) local out with_op_name(name, "random_normal") do mean = convert(Tensor{dtype}, mean) stddev = convert(Tensor{dtype}, stddev) - standard = Ops.random_standard_normal(shape; name=name, dtype=dtype, kwargs...) - out = standard.*stddev + mean + standard = Ops.random_standard_normal(shape; name = name, dtype = dtype, kwargs...) + out = standard .* stddev + mean end out end @@ -63,7 +67,7 @@ Args: Returns: A `Tensor` of the specified `shape` and `dtype` containing random values. """ -@op function random_uniform(shape, minval, maxval; name=nothing, seed=0, dtype=Float32) +@op function random_uniform(shape, minval, maxval; name = nothing, seed = 0, dtype = Float32) local out with_op_name(name, "RandomUniformScaled") do seed1 = 0 @@ -71,8 +75,8 @@ A `Tensor` of the specified `shape` and `dtype` containing random values. seed2 = seed minval = convert(Tensor{dtype}, minval) maxval = convert(Tensor{dtype}, maxval) - r = random_uniform(shape; seed=seed1, seed2=seed2, dtype=dtype, name=name) - out = r .* (maxval-minval) + minval + r = random_uniform(shape; seed = seed1, seed2 = seed2, dtype = dtype, name = name) + out = r .* (maxval - minval) + minval end out end @@ -82,14 +86,14 @@ end Ops.random_shuffle(t; kwargs...) end -@op function Base.range(start::AbstractTensor; stop, num=Union{Integer, Nothin}, kwargs...) +@op function Base.range(start::AbstractTensor; stop, num = Union{Integer,Nothin}, kwargs...) Ops.lin_space(start, stop, num; kwargs...) end @op Base.range(start::AbstractTensor, length; kwargs...) = range(start, 1, length; kwargs...) @op function Base.range(start::AbstractTensor, step, length; kwargs...) - Ops.range(start, length+1, step; kwargs...) + Ops.range(start, length + 1, step; kwargs...) end @op function Base.fill(n::AbstractTensor, dims; kwargs...) #TODO: I think this is uncallable in 0.5 @@ -105,6 +109,6 @@ end Ops.reverse_v2(x, indices; kwargs...) end -@op function Base.fill(n::AbstractTensor, dims::Tuple{Vararg{Int64, N}} where N; kwargs...) - invoke(fill, Tuple{AbstractTensor, Any}, n, dims; kwargs...) +@op function Base.fill(n::AbstractTensor, dims::Tuple{Vararg{Int64,N}} where N; kwargs...) + invoke(fill, Tuple{AbstractTensor,Any}, n, dims; kwargs...) end diff --git a/src/show.jl b/src/show.jl index 678f9531..f54ef127 100644 --- a/src/show.jl +++ b/src/show.jl @@ -4,10 +4,10 @@ import Juno: Tree, Row, fade, interleave import Printf @render Juno.Inline t::Tensor begin - s = get_shape(t) - shape = s.rank_unknown ? [fade("unknown")] : - interleave(map(dim -> ismissing(dim) ? "?" : dim , s.dims), fade("×")) - Tree(Row(fade(try string(eltype(t)," ") catch e "" end), + s = get_shape(t) + shape = s.rank_unknown ? [fade("unknown")] : + interleave(map(dim->ismissing(dim) ? "?" : dim, s.dims), fade("×")) + Tree(Row(fade(try string(eltype(t), " ") catch e "" end), HTML("Tensor "), shape...), [Text("name: $(node_name(t.op))"), @@ -40,12 +40,19 @@ function Base.show(io::IO, t::RawTensor) end end +function Base.show(io::IO, t::TensorHandle) + raw_tensor = resolve(t) + jl_array = convert(Array, raw_tensor) + ptr = pointer_from_objref(t) + print(io, "Tensor<$ptr>($(jl_array))") +end + function Base.show(io::IO, n::Operation) print(io, "") end function Base.show(io::IO, t::Tensor{T}) where T - @assert(T==eltype(t), "eltype = $(eltype(t)), but Tensor{$(T)})") + @assert(T == eltype(t), "eltype = $(eltype(t)), but Tensor{$(T)})") s = get_shape(t) if s.rank_unknown @@ -165,7 +172,7 @@ function find_tensorboard() return path end -function get_tensorboard(logdir=nothing) +function get_tensorboard(logdir = nothing) if isdefined(tensorboard, :x) port = tensorboard[].port + 1 else @@ -209,7 +216,7 @@ function visualize end @with_def_graph function visualize(g::Graph) tensorboard = get_tensorboard() - writer = summary.FileWriter(tensorboard.logdir, graph=g) + writer = summary.FileWriter(tensorboard.logdir, graph = g) visualize(writer) close(writer) end From 8cadecfe6b3d5eb9550a006858ae3669bca8e4d5 Mon Sep 17 00:00:00 2001 From: Jon Malmaud Date: Wed, 20 Feb 2019 17:16:43 -0500 Subject: [PATCH 11/49] Switch to dispatch --- src/generate_ops.jl | 43 +- src/ops/imported_ops.jl | 12768 +++++++++++++++++++++++++++++++------- 2 files changed, 10432 insertions(+), 2379 deletions(-) diff --git a/src/generate_ops.jl b/src/generate_ops.jl index 4312473c..cc64681c 100644 --- a/src/generate_ops.jl +++ b/src/generate_ops.jl @@ -9,6 +9,7 @@ using Dates struct OpFunc expr::Expr eager_expr::Expr + dispatch_expr::Expr docstring::String name::Symbol end @@ -223,8 +224,10 @@ function to_function(op::tensorflow.OpDef) tf.execute(desc) end end + graph_name = Symbol("$(jl_name)_graph") + eager_name = Symbol("$(jl_name)_eager") expr = quote - @tf.op function $(jl_name)($(inputs...)) + @tf.op function $graph_name($(inputs...)) local desc tf.with_op_name(name, $(op.name)) do desc = tf.NodeDescription($(op.name)) @@ -235,14 +238,10 @@ function to_function(op::tensorflow.OpDef) $output_block end end - eager_inputs = [] - push!(eager_inputs, inputs[1]) - for i in 2:length(inputs) - push!(eager_inputs, :($(inputs[i])::tf.TensorHandle)) - end + eager_expr = quote - function $(jl_name)($(eager_inputs...)) + function $eager_name($(inputs...)) desc = tf.EagerOp($(op.name)) # $convert_block $eager_input_block @@ -252,6 +251,23 @@ function to_function(op::tensorflow.OpDef) end end + call_kw_params = Expr(:parameters) + for arg in inputs[1].args + push!(call_kw_params.args, Expr(:kw, arg.args[1], arg.args[1])) + end + call_args = Any[call_kw_params] + for input in inputs[2:end] + push!(call_args, input) + end + dispatch_expr = quote + function $jl_name($(inputs...)) + if tf.eager_mode + $(eager_name)($(call_args...)) + else + $(graph_name)($(call_args...)) + end + end + end posargs_str = join((arg.name for arg in op.input_arg), ", ") kwargs_str = [] for arg in op.attr @@ -279,7 +295,7 @@ function to_function(op::tensorflow.OpDef) escape_string(op.summary)) #TODO Workout how to get descriptions for docstrings expr = unblock(MacroTools.flatten(MacroTools.striplines(expr))) eager_expr = unblock(MacroTools.flatten(MacroTools.striplines(eager_expr))) - OpFunc(expr, eager_expr, doc_str, jl_name) + OpFunc(expr, eager_expr, dispatch_expr, doc_str, jl_name) end """ @@ -294,6 +310,7 @@ function stringify_func(opfunc::OpFunc) expr = quote $(opfunc.expr) $(opfunc.eager_expr) + $(opfunc.dispatch_expr) end expr = unblock(MacroTools.flatten(MacroTools.striplines(expr))) @@ -333,15 +350,15 @@ function import_ops(op_names) """) for name in op_names op = ops[name] - try + # try f = to_function(op) s = stringify_func(f) write(ops_file, s) print(ops_file, "\n\n") - catch err - err_msg = sprint(showerror, err) - @warn("Could not import operation $name: $err_msg") - end + # catch err + # err_msg = sprint(showerror, err) + # @warn("Could not import operation $name: $err_msg") + # end end write(ops_file, """ end diff --git a/src/ops/imported_ops.jl b/src/ops/imported_ops.jl index ed247580..6662d7c3 100644 --- a/src/ops/imported_ops.jl +++ b/src/ops/imported_ops.jl @@ -1,4 +1,4 @@ -# Autogenerated on 2019-02-20T16:49:23.66 +# Autogenerated on 2019-02-20T17:14:58.083 module Ops import TensorFlow @@ -9,7 +9,7 @@ const tf = TensorFlow """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function reduce_join(inputs_, reduction_indices_; name=nothing, keep_dims=nothing, separator=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function reduce_join_graph(inputs_, reduction_indices_; name=nothing, keep_dims=nothing, separator=nothing) local desc tf.with_op_name(name, "ReduceJoin") do desc = tf.NodeDescription("ReduceJoin") @@ -26,7 +26,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function reduce_join(inputs_::tf.TensorHandle, reduction_indices_::tf.TensorHandle; name=nothing, keep_dims=nothing, separator=nothing) + function reduce_join_eager(inputs_, reduction_indices_; name=nothing, keep_dims=nothing, separator=nothing) desc = tf.EagerOp("ReduceJoin") tf.add_input(desc, inputs_) tf.add_input(desc, reduction_indices_) @@ -38,6 +38,13 @@ begin end (tf.execute(desc))[1] end + function reduce_join(inputs_, reduction_indices_; name=nothing, keep_dims=nothing, separator=nothing) + if tf.eager_mode + reduce_join_eager(inputs_, reduction_indices_; name=name, keep_dims=keep_dims, separator=separator) + else + reduce_join_graph(inputs_, reduction_indices_; name=name, keep_dims=keep_dims, separator=separator) + end + end end @@ -47,7 +54,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function reduce_dataset(input_dataset_, initial_state_, other_arguments_; name=nothing, f=nothing, Tstate=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function reduce_dataset_graph(input_dataset_, initial_state_, other_arguments_; name=nothing, f=nothing, Tstate=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing) local desc tf.with_op_name(name, "ReduceDataset") do desc = tf.NodeDescription("ReduceDataset") @@ -78,7 +85,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function reduce_dataset(input_dataset_::tf.TensorHandle, initial_state_::tf.TensorHandle, other_arguments_::tf.TensorHandle; name=nothing, f=nothing, Tstate=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing) + function reduce_dataset_eager(input_dataset_, initial_state_, other_arguments_; name=nothing, f=nothing, Tstate=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing) desc = tf.EagerOp("ReduceDataset") tf.add_input(desc, input_dataset_) tf.add_input(desc, initial_state_) @@ -103,6 +110,13 @@ begin end (tf.execute(desc))[1] end + function reduce_dataset(input_dataset_, initial_state_, other_arguments_; name=nothing, f=nothing, Tstate=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing) + if tf.eager_mode + reduce_dataset_eager(input_dataset_, initial_state_, other_arguments_; name=name, f=f, Tstate=Tstate, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes, use_inter_op_parallelism=use_inter_op_parallelism) + else + reduce_dataset_graph(input_dataset_, initial_state_, other_arguments_; name=name, f=f, Tstate=Tstate, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes, use_inter_op_parallelism=use_inter_op_parallelism) + end + end end @@ -112,7 +126,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tensor_list_from_tensor(tensor_, element_shape_; name=nothing, element_dtype=nothing, shape_type=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tensor_list_from_tensor_graph(tensor_, element_shape_; name=nothing, element_dtype=nothing, shape_type=nothing) local desc tf.with_op_name(name, "TensorListFromTensor") do desc = tf.NodeDescription("TensorListFromTensor") @@ -131,7 +145,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function tensor_list_from_tensor(tensor_::tf.TensorHandle, element_shape_::tf.TensorHandle; name=nothing, element_dtype=nothing, shape_type=nothing) + function tensor_list_from_tensor_eager(tensor_, element_shape_; name=nothing, element_dtype=nothing, shape_type=nothing) desc = tf.EagerOp("TensorListFromTensor") tf.add_input(desc, tensor_) tf.add_input(desc, element_shape_) @@ -145,6 +159,13 @@ begin desc["shape_type"] = tf.data_type(element_shape_) (tf.execute(desc))[1] end + function tensor_list_from_tensor(tensor_, element_shape_; name=nothing, element_dtype=nothing, shape_type=nothing) + if tf.eager_mode + tensor_list_from_tensor_eager(tensor_, element_shape_; name=name, element_dtype=element_dtype, shape_type=shape_type) + else + tensor_list_from_tensor_graph(tensor_, element_shape_; name=name, element_dtype=element_dtype, shape_type=shape_type) + end + end end @@ -154,7 +175,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function extract_jpeg_shape(contents_; name=nothing, output_type=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function extract_jpeg_shape_graph(contents_; name=nothing, output_type=nothing) local desc tf.with_op_name(name, "ExtractJpegShape") do desc = tf.NodeDescription("ExtractJpegShape") @@ -166,7 +187,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function extract_jpeg_shape(contents_::tf.TensorHandle; name=nothing, output_type=nothing) + function extract_jpeg_shape_eager(contents_; name=nothing, output_type=nothing) desc = tf.EagerOp("ExtractJpegShape") tf.add_input(desc, contents_) if output_type !== nothing @@ -174,6 +195,13 @@ begin end (tf.execute(desc))[1] end + function extract_jpeg_shape(contents_; name=nothing, output_type=nothing) + if tf.eager_mode + extract_jpeg_shape_eager(contents_; name=name, output_type=output_type) + else + extract_jpeg_shape_graph(contents_; name=name, output_type=output_type) + end + end end @@ -183,7 +211,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function svd(input_; name=nothing, compute_uv=nothing, full_matrices=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function svd_graph(input_; name=nothing, compute_uv=nothing, full_matrices=nothing) local desc tf.with_op_name(name, "Svd") do desc = tf.NodeDescription("Svd") @@ -204,7 +232,7 @@ begin end out end - function svd(input_::tf.TensorHandle; name=nothing, compute_uv=nothing, full_matrices=nothing) + function svd_eager(input_; name=nothing, compute_uv=nothing, full_matrices=nothing) desc = tf.EagerOp("Svd") tf.add_input(desc, input_) if compute_uv !== nothing @@ -216,6 +244,13 @@ begin desc["T"] = tf.data_type(input_) tf.execute(desc) end + function svd(input_; name=nothing, compute_uv=nothing, full_matrices=nothing) + if tf.eager_mode + svd_eager(input_; name=name, compute_uv=compute_uv, full_matrices=full_matrices) + else + svd_graph(input_; name=name, compute_uv=compute_uv, full_matrices=full_matrices) + end + end end @@ -225,7 +260,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function iterator_get_next_sync(iterator_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function iterator_get_next_sync_graph(iterator_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "IteratorGetNextSync") do desc = tf.NodeDescription("IteratorGetNextSync") @@ -240,7 +275,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function iterator_get_next_sync(iterator_::tf.TensorHandle; name=nothing, output_types=nothing, output_shapes=nothing) + function iterator_get_next_sync_eager(iterator_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("IteratorGetNextSync") tf.add_input(desc, iterator_) if output_types !== nothing @@ -251,6 +286,13 @@ begin end (tf.execute(desc))[1] end + function iterator_get_next_sync(iterator_; name=nothing, output_types=nothing, output_shapes=nothing) + if tf.eager_mode + iterator_get_next_sync_eager(iterator_; name=name, output_types=output_types, output_shapes=output_shapes) + else + iterator_get_next_sync_graph(iterator_; name=name, output_types=output_types, output_shapes=output_shapes) + end + end end @@ -260,7 +302,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function ref_enter(data_; name=nothing, frame_name=nothing, is_constant=nothing, parallel_iterations=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function ref_enter_graph(data_; name=nothing, frame_name=nothing, is_constant=nothing, parallel_iterations=nothing) local desc tf.with_op_name(name, "RefEnter") do desc = tf.NodeDescription("RefEnter") @@ -279,7 +321,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function ref_enter(data_::tf.TensorHandle; name=nothing, frame_name=nothing, is_constant=nothing, parallel_iterations=nothing) + function ref_enter_eager(data_; name=nothing, frame_name=nothing, is_constant=nothing, parallel_iterations=nothing) desc = tf.EagerOp("RefEnter") tf.add_input(desc, data_) if frame_name !== nothing @@ -294,6 +336,13 @@ begin desc["T"] = tf.data_type(data_) (tf.execute(desc))[1] end + function ref_enter(data_; name=nothing, frame_name=nothing, is_constant=nothing, parallel_iterations=nothing) + if tf.eager_mode + ref_enter_eager(data_; name=name, frame_name=frame_name, is_constant=is_constant, parallel_iterations=parallel_iterations) + else + ref_enter_graph(data_; name=name, frame_name=frame_name, is_constant=is_constant, parallel_iterations=parallel_iterations) + end + end end @@ -303,7 +352,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function erf(x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function erf_graph(x_; name=nothing) local desc tf.with_op_name(name, "Erf") do desc = tf.NodeDescription("Erf") @@ -313,12 +362,19 @@ begin end tf.Tensor(tf.Operation(desc)) end - function erf(x_::tf.TensorHandle; name=nothing) + function erf_eager(x_; name=nothing) desc = tf.EagerOp("Erf") tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) (tf.execute(desc))[1] end + function erf(x_; name=nothing) + if tf.eager_mode + erf_eager(x_; name=name) + else + erf_graph(x_; name=name) + end + end end @@ -328,7 +384,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function lookup_table_export_v2(table_handle_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function lookup_table_export_v2_graph(table_handle_; name=nothing) local desc tf.with_op_name(name, "LookupTableExportV2") do desc = tf.NodeDescription("LookupTableExportV2") @@ -342,11 +398,18 @@ begin end out end - function lookup_table_export_v2(table_handle_::tf.TensorHandle; name=nothing) + function lookup_table_export_v2_eager(table_handle_; name=nothing) desc = tf.EagerOp("LookupTableExportV2") tf.add_input(desc, table_handle_) tf.execute(desc) end + function lookup_table_export_v2(table_handle_; name=nothing) + if tf.eager_mode + lookup_table_export_v2_eager(table_handle_; name=name) + else + lookup_table_export_v2_graph(table_handle_; name=name) + end + end end @@ -356,7 +419,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function round(x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function round_graph(x_; name=nothing) local desc tf.with_op_name(name, "Round") do desc = tf.NodeDescription("Round") @@ -366,12 +429,19 @@ begin end tf.Tensor(tf.Operation(desc)) end - function round(x_::tf.TensorHandle; name=nothing) + function round_eager(x_; name=nothing) desc = tf.EagerOp("Round") tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) (tf.execute(desc))[1] end + function round(x_; name=nothing) + if tf.eager_mode + round_eager(x_; name=name) + else + round_graph(x_; name=name) + end + end end @@ -381,7 +451,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function outfeed_dequeue(; name=nothing, dtype=nothing, shape=nothing, device_ordinal=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function outfeed_dequeue_graph(; name=nothing, dtype=nothing, shape=nothing, device_ordinal=nothing) local desc tf.with_op_name(name, "OutfeedDequeue") do desc = tf.NodeDescription("OutfeedDequeue") @@ -397,7 +467,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function outfeed_dequeue(; name=nothing, dtype=nothing, shape=nothing, device_ordinal=nothing) + function outfeed_dequeue_eager(; name=nothing, dtype=nothing, shape=nothing, device_ordinal=nothing) desc = tf.EagerOp("OutfeedDequeue") if dtype !== nothing desc["dtype"] = Base.identity(dtype) @@ -410,6 +480,13 @@ begin end (tf.execute(desc))[1] end + function outfeed_dequeue(; name=nothing, dtype=nothing, shape=nothing, device_ordinal=nothing) + if tf.eager_mode + outfeed_dequeue_eager(; name=name, dtype=dtype, shape=shape, device_ordinal=device_ordinal) + else + outfeed_dequeue_graph(; name=name, dtype=dtype, shape=shape, device_ordinal=device_ordinal) + end + end end @@ -419,7 +496,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tensor_forest_tree_is_initialized_op(tree_handle_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tensor_forest_tree_is_initialized_op_graph(tree_handle_; name=nothing) local desc tf.with_op_name(name, "TensorForestTreeIsInitializedOp") do desc = tf.NodeDescription("TensorForestTreeIsInitializedOp") @@ -428,11 +505,18 @@ begin end tf.Tensor(tf.Operation(desc)) end - function tensor_forest_tree_is_initialized_op(tree_handle_::tf.TensorHandle; name=nothing) + function tensor_forest_tree_is_initialized_op_eager(tree_handle_; name=nothing) desc = tf.EagerOp("TensorForestTreeIsInitializedOp") tf.add_input(desc, tree_handle_) (tf.execute(desc))[1] end + function tensor_forest_tree_is_initialized_op(tree_handle_; name=nothing) + if tf.eager_mode + tensor_forest_tree_is_initialized_op_eager(tree_handle_; name=name) + else + tensor_forest_tree_is_initialized_op_graph(tree_handle_; name=name) + end + end end @@ -442,7 +526,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function merge(inputs_; name=nothing, N=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function merge_graph(inputs_; name=nothing, N=nothing) local desc tf.with_op_name(name, "Merge") do desc = tf.NodeDescription("Merge") @@ -460,7 +544,7 @@ begin end out end - function merge(inputs_::tf.TensorHandle; name=nothing, N=nothing) + function merge_eager(inputs_; name=nothing, N=nothing) desc = tf.EagerOp("Merge") tf.add_input(desc, inputs_) if N !== nothing @@ -469,6 +553,13 @@ begin desc["T"] = tf.data_type(inputs_) tf.execute(desc) end + function merge(inputs_; name=nothing, N=nothing) + if tf.eager_mode + merge_eager(inputs_; name=name, N=N) + else + merge_graph(inputs_; name=name, N=N) + end + end end @@ -478,7 +569,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function histogram_fixed_width(values_, value_range_, nbins_; name=nothing, dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function histogram_fixed_width_graph(values_, value_range_, nbins_; name=nothing, dtype=nothing) local desc tf.with_op_name(name, "HistogramFixedWidth") do desc = tf.NodeDescription("HistogramFixedWidth") @@ -495,7 +586,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function histogram_fixed_width(values_::tf.TensorHandle, value_range_::tf.TensorHandle, nbins_::tf.TensorHandle; name=nothing, dtype=nothing) + function histogram_fixed_width_eager(values_, value_range_, nbins_; name=nothing, dtype=nothing) desc = tf.EagerOp("HistogramFixedWidth") tf.add_input(desc, values_) tf.add_input(desc, value_range_) @@ -507,6 +598,13 @@ begin desc["T"] = tf.data_type(value_range_) (tf.execute(desc))[1] end + function histogram_fixed_width(values_, value_range_, nbins_; name=nothing, dtype=nothing) + if tf.eager_mode + histogram_fixed_width_eager(values_, value_range_, nbins_; name=name, dtype=dtype) + else + histogram_fixed_width_graph(values_, value_range_, nbins_; name=name, dtype=dtype) + end + end end @@ -516,7 +614,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function asin(x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function asin_graph(x_; name=nothing) local desc tf.with_op_name(name, "Asin") do desc = tf.NodeDescription("Asin") @@ -526,12 +624,19 @@ begin end tf.Tensor(tf.Operation(desc)) end - function asin(x_::tf.TensorHandle; name=nothing) + function asin_eager(x_; name=nothing) desc = tf.EagerOp("Asin") tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) (tf.execute(desc))[1] end + function asin(x_; name=nothing) + if tf.eager_mode + asin_eager(x_; name=name) + else + asin_graph(x_; name=name) + end + end end @@ -541,7 +646,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function any(input_, reduction_indices_; name=nothing, keep_dims=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function any_graph(input_, reduction_indices_; name=nothing, keep_dims=nothing) local desc tf.with_op_name(name, "Any") do desc = tf.NodeDescription("Any") @@ -557,7 +662,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function any(input_::tf.TensorHandle, reduction_indices_::tf.TensorHandle; name=nothing, keep_dims=nothing) + function any_eager(input_, reduction_indices_; name=nothing, keep_dims=nothing) desc = tf.EagerOp("Any") tf.add_input(desc, input_) tf.add_input(desc, reduction_indices_) @@ -567,6 +672,13 @@ begin desc["Tidx"] = tf.data_type(reduction_indices_) (tf.execute(desc))[1] end + function any(input_, reduction_indices_; name=nothing, keep_dims=nothing) + if tf.eager_mode + any_eager(input_, reduction_indices_; name=name, keep_dims=keep_dims) + else + any_graph(input_, reduction_indices_; name=name, keep_dims=keep_dims) + end + end end @@ -576,7 +688,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function rsqrt_grad(y_, dy_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function rsqrt_grad_graph(y_, dy_; name=nothing) local desc tf.with_op_name(name, "RsqrtGrad") do desc = tf.NodeDescription("RsqrtGrad") @@ -588,7 +700,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function rsqrt_grad(y_::tf.TensorHandle, dy_::tf.TensorHandle; name=nothing) + function rsqrt_grad_eager(y_, dy_; name=nothing) desc = tf.EagerOp("RsqrtGrad") tf.add_input(desc, y_) tf.add_input(desc, dy_) @@ -596,6 +708,13 @@ begin desc["T"] = tf.data_type(dy_) (tf.execute(desc))[1] end + function rsqrt_grad(y_, dy_; name=nothing) + if tf.eager_mode + rsqrt_grad_eager(y_, dy_; name=name) + else + rsqrt_grad_graph(y_, dy_; name=name) + end + end end @@ -605,7 +724,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tensor_array_scatter(handle_, indices_, value_, flow_in_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tensor_array_scatter_graph(handle_, indices_, value_, flow_in_; name=nothing) local desc tf.with_op_name(name, "TensorArrayScatter") do desc = tf.NodeDescription("TensorArrayScatter") @@ -621,7 +740,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function tensor_array_scatter(handle_::tf.TensorHandle, indices_::tf.TensorHandle, value_::tf.TensorHandle, flow_in_::tf.TensorHandle; name=nothing) + function tensor_array_scatter_eager(handle_, indices_, value_, flow_in_; name=nothing) desc = tf.EagerOp("TensorArrayScatter") tf.add_input(desc, handle_) tf.add_input(desc, indices_) @@ -630,6 +749,13 @@ begin desc["T"] = tf.data_type(value_) (tf.execute(desc))[1] end + function tensor_array_scatter(handle_, indices_, value_, flow_in_; name=nothing) + if tf.eager_mode + tensor_array_scatter_eager(handle_, indices_, value_, flow_in_; name=name) + else + tensor_array_scatter_graph(handle_, indices_, value_, flow_in_; name=name) + end + end end @@ -639,7 +765,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function dynamic_partition(data_, partitions_; name=nothing, num_partitions=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function dynamic_partition_graph(data_, partitions_; name=nothing, num_partitions=nothing) local desc tf.with_op_name(name, "DynamicPartition") do desc = tf.NodeDescription("DynamicPartition") @@ -659,7 +785,7 @@ begin end out end - function dynamic_partition(data_::tf.TensorHandle, partitions_::tf.TensorHandle; name=nothing, num_partitions=nothing) + function dynamic_partition_eager(data_, partitions_; name=nothing, num_partitions=nothing) desc = tf.EagerOp("DynamicPartition") tf.add_input(desc, data_) tf.add_input(desc, partitions_) @@ -669,6 +795,13 @@ begin desc["T"] = tf.data_type(data_) tf.execute(desc) end + function dynamic_partition(data_, partitions_; name=nothing, num_partitions=nothing) + if tf.eager_mode + dynamic_partition_eager(data_, partitions_; name=name, num_partitions=num_partitions) + else + dynamic_partition_graph(data_, partitions_; name=name, num_partitions=num_partitions) + end + end end @@ -678,7 +811,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function experimental_private_thread_pool_dataset(input_dataset_, num_threads_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function experimental_private_thread_pool_dataset_graph(input_dataset_, num_threads_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "ExperimentalPrivateThreadPoolDataset") do desc = tf.NodeDescription("ExperimentalPrivateThreadPoolDataset") @@ -695,7 +828,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function experimental_private_thread_pool_dataset(input_dataset_::tf.TensorHandle, num_threads_::tf.TensorHandle; name=nothing, output_types=nothing, output_shapes=nothing) + function experimental_private_thread_pool_dataset_eager(input_dataset_, num_threads_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("ExperimentalPrivateThreadPoolDataset") tf.add_input(desc, input_dataset_) tf.add_input(desc, num_threads_) @@ -707,6 +840,13 @@ begin end (tf.execute(desc))[1] end + function experimental_private_thread_pool_dataset(input_dataset_, num_threads_; name=nothing, output_types=nothing, output_shapes=nothing) + if tf.eager_mode + experimental_private_thread_pool_dataset_eager(input_dataset_, num_threads_; name=name, output_types=output_types, output_shapes=output_shapes) + else + experimental_private_thread_pool_dataset_graph(input_dataset_, num_threads_; name=name, output_types=output_types, output_shapes=output_shapes) + end + end end @@ -716,7 +856,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function reader_serialize_state(reader_handle_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function reader_serialize_state_graph(reader_handle_; name=nothing) local desc tf.with_op_name(name, "ReaderSerializeState") do desc = tf.NodeDescription("ReaderSerializeState") @@ -725,11 +865,18 @@ begin end tf.Tensor(tf.Operation(desc)) end - function reader_serialize_state(reader_handle_::tf.TensorHandle; name=nothing) + function reader_serialize_state_eager(reader_handle_; name=nothing) desc = tf.EagerOp("ReaderSerializeState") tf.add_input(desc, reader_handle_) (tf.execute(desc))[1] end + function reader_serialize_state(reader_handle_; name=nothing) + if tf.eager_mode + reader_serialize_state_eager(reader_handle_; name=name) + else + reader_serialize_state_graph(reader_handle_; name=name) + end + end end @@ -739,7 +886,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function right_shift(x_, y_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function right_shift_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "RightShift") do desc = tf.NodeDescription("RightShift") @@ -751,7 +898,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function right_shift(x_::tf.TensorHandle, y_::tf.TensorHandle; name=nothing) + function right_shift_eager(x_, y_; name=nothing) desc = tf.EagerOp("RightShift") tf.add_input(desc, x_) tf.add_input(desc, y_) @@ -759,16 +906,23 @@ begin desc["T"] = tf.data_type(y_) (tf.execute(desc))[1] end + function right_shift(x_, y_; name=nothing) + if tf.eager_mode + right_shift_eager(x_, y_; name=name) + else + right_shift_graph(x_, y_; name=name) + end + end end """ - avg_pool3d(input; data_format=NDHWC) + avg_pool3d(input; data_format=) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function avg_pool3d(input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function avg_pool3d_graph(input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) local desc tf.with_op_name(name, "AvgPool3D") do desc = tf.NodeDescription("AvgPool3D") @@ -790,7 +944,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function avg_pool3d(input_::tf.TensorHandle; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + function avg_pool3d_eager(input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) desc = tf.EagerOp("AvgPool3D") tf.add_input(desc, input_) if ksize !== nothing @@ -808,6 +962,13 @@ begin desc["T"] = tf.data_type(input_) (tf.execute(desc))[1] end + function avg_pool3d(input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + if tf.eager_mode + avg_pool3d_eager(input_; name=name, ksize=ksize, strides=strides, padding=padding, data_format=data_format) + else + avg_pool3d_graph(input_; name=name, ksize=ksize, strides=strides, padding=padding, data_format=data_format) + end + end end @@ -817,7 +978,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function encode_png(image_; name=nothing, compression=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function encode_png_graph(image_; name=nothing, compression=nothing) local desc tf.with_op_name(name, "EncodePng") do desc = tf.NodeDescription("EncodePng") @@ -830,7 +991,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function encode_png(image_::tf.TensorHandle; name=nothing, compression=nothing) + function encode_png_eager(image_; name=nothing, compression=nothing) desc = tf.EagerOp("EncodePng") tf.add_input(desc, image_) if compression !== nothing @@ -839,6 +1000,13 @@ begin desc["T"] = tf.data_type(image_) (tf.execute(desc))[1] end + function encode_png(image_; name=nothing, compression=nothing) + if tf.eager_mode + encode_png_eager(image_; name=name, compression=compression) + else + encode_png_graph(image_; name=name, compression=compression) + end + end end @@ -848,7 +1016,7 @@ end Debug Identity Op. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function debug_identity(input_; name=nothing, device_name=nothing, tensor_name=nothing, debug_urls=nothing, gated_grpc=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function debug_identity_graph(input_; name=nothing, device_name=nothing, tensor_name=nothing, debug_urls=nothing, gated_grpc=nothing) local desc tf.with_op_name(name, "DebugIdentity") do desc = tf.NodeDescription("DebugIdentity") @@ -870,7 +1038,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function debug_identity(input_::tf.TensorHandle; name=nothing, device_name=nothing, tensor_name=nothing, debug_urls=nothing, gated_grpc=nothing) + function debug_identity_eager(input_; name=nothing, device_name=nothing, tensor_name=nothing, debug_urls=nothing, gated_grpc=nothing) desc = tf.EagerOp("DebugIdentity") tf.add_input(desc, input_) if device_name !== nothing @@ -888,6 +1056,13 @@ begin desc["T"] = tf.data_type(input_) (tf.execute(desc))[1] end + function debug_identity(input_; name=nothing, device_name=nothing, tensor_name=nothing, debug_urls=nothing, gated_grpc=nothing) + if tf.eager_mode + debug_identity_eager(input_; name=name, device_name=device_name, tensor_name=tensor_name, debug_urls=debug_urls, gated_grpc=gated_grpc) + else + debug_identity_graph(input_; name=name, device_name=device_name, tensor_name=tensor_name, debug_urls=debug_urls, gated_grpc=gated_grpc) + end + end end @@ -897,7 +1072,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function imag(input_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function imag_graph(input_; name=nothing) local desc tf.with_op_name(name, "Imag") do desc = tf.NodeDescription("Imag") @@ -907,12 +1082,19 @@ begin end tf.Tensor(tf.Operation(desc)) end - function imag(input_::tf.TensorHandle; name=nothing) + function imag_eager(input_; name=nothing) desc = tf.EagerOp("Imag") tf.add_input(desc, input_) desc["T"] = tf.data_type(input_) (tf.execute(desc))[1] end + function imag(input_; name=nothing) + if tf.eager_mode + imag_eager(input_; name=name) + else + imag_graph(input_; name=name) + end + end end @@ -922,7 +1104,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function resource_sparse_apply_ftrl_v2(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function resource_sparse_apply_ftrl_v2_graph(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ResourceSparseApplyFtrlV2") do desc = tf.NodeDescription("ResourceSparseApplyFtrlV2") @@ -955,7 +1137,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function resource_sparse_apply_ftrl_v2(var_::tf.TensorHandle, accum_::tf.TensorHandle, linear_::tf.TensorHandle, grad_::tf.TensorHandle, indices_::tf.TensorHandle, lr_::tf.TensorHandle, l1_::tf.TensorHandle, l2_::tf.TensorHandle, l2_shrinkage_::tf.TensorHandle, lr_power_::tf.TensorHandle; name=nothing, use_locking=nothing) + function resource_sparse_apply_ftrl_v2_eager(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ResourceSparseApplyFtrlV2") tf.add_input(desc, var_) tf.add_input(desc, accum_) @@ -979,6 +1161,13 @@ begin desc["T"] = tf.data_type(lr_power_) (tf.execute(desc))[1] end + function resource_sparse_apply_ftrl_v2(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=nothing, use_locking=nothing) + if tf.eager_mode + resource_sparse_apply_ftrl_v2_eager(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=name, use_locking=use_locking) + else + resource_sparse_apply_ftrl_v2_graph(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=name, use_locking=use_locking) + end + end end @@ -988,7 +1177,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function stage_clear(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function stage_clear_graph(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "StageClear") do desc = tf.NodeDescription("StageClear") @@ -1010,7 +1199,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function stage_clear(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + function stage_clear_eager(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) desc = tf.EagerOp("StageClear") if capacity !== nothing desc["capacity"] = Base.Int(capacity) @@ -1029,6 +1218,13 @@ begin end (tf.execute(desc))[1] end + function stage_clear(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + if tf.eager_mode + stage_clear_eager(; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) + else + stage_clear_graph(; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) + end + end end @@ -1038,7 +1234,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function sign(x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function sign_graph(x_; name=nothing) local desc tf.with_op_name(name, "Sign") do desc = tf.NodeDescription("Sign") @@ -1048,12 +1244,19 @@ begin end tf.Tensor(tf.Operation(desc)) end - function sign(x_::tf.TensorHandle; name=nothing) + function sign_eager(x_; name=nothing) desc = tf.EagerOp("Sign") tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) (tf.execute(desc))[1] end + function sign(x_; name=nothing) + if tf.eager_mode + sign_eager(x_; name=name) + else + sign_graph(x_; name=name) + end + end end @@ -1063,7 +1266,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function population_count(x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function population_count_graph(x_; name=nothing) local desc tf.with_op_name(name, "PopulationCount") do desc = tf.NodeDescription("PopulationCount") @@ -1073,12 +1276,19 @@ begin end tf.Tensor(tf.Operation(desc)) end - function population_count(x_::tf.TensorHandle; name=nothing) + function population_count_eager(x_; name=nothing) desc = tf.EagerOp("PopulationCount") tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) (tf.execute(desc))[1] end + function population_count(x_; name=nothing) + if tf.eager_mode + population_count_eager(x_; name=name) + else + population_count_graph(x_; name=name) + end + end end @@ -1088,7 +1298,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function neg(x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function neg_graph(x_; name=nothing) local desc tf.with_op_name(name, "Neg") do desc = tf.NodeDescription("Neg") @@ -1098,12 +1308,19 @@ begin end tf.Tensor(tf.Operation(desc)) end - function neg(x_::tf.TensorHandle; name=nothing) + function neg_eager(x_; name=nothing) desc = tf.EagerOp("Neg") tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) (tf.execute(desc))[1] end + function neg(x_; name=nothing) + if tf.eager_mode + neg_eager(x_; name=name) + else + neg_graph(x_; name=name) + end + end end @@ -1113,7 +1330,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function anonymous_iterator(; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function anonymous_iterator_graph(; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "AnonymousIterator") do desc = tf.NodeDescription("AnonymousIterator") @@ -1126,7 +1343,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function anonymous_iterator(; name=nothing, output_types=nothing, output_shapes=nothing) + function anonymous_iterator_eager(; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("AnonymousIterator") if output_types !== nothing desc["output_types"] = map(Base.identity, output_types) @@ -1136,6 +1353,13 @@ begin end (tf.execute(desc))[1] end + function anonymous_iterator(; name=nothing, output_types=nothing, output_shapes=nothing) + if tf.eager_mode + anonymous_iterator_eager(; name=name, output_types=output_types, output_shapes=output_shapes) + else + anonymous_iterator_graph(; name=name, output_types=output_types, output_shapes=output_shapes) + end + end end @@ -1145,7 +1369,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function sparse_reduce_sum(input_indices_, input_values_, input_shape_, reduction_axes_; name=nothing, keep_dims=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function sparse_reduce_sum_graph(input_indices_, input_values_, input_shape_, reduction_axes_; name=nothing, keep_dims=nothing) local desc tf.with_op_name(name, "SparseReduceSum") do desc = tf.NodeDescription("SparseReduceSum") @@ -1164,7 +1388,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function sparse_reduce_sum(input_indices_::tf.TensorHandle, input_values_::tf.TensorHandle, input_shape_::tf.TensorHandle, reduction_axes_::tf.TensorHandle; name=nothing, keep_dims=nothing) + function sparse_reduce_sum_eager(input_indices_, input_values_, input_shape_, reduction_axes_; name=nothing, keep_dims=nothing) desc = tf.EagerOp("SparseReduceSum") tf.add_input(desc, input_indices_) tf.add_input(desc, input_values_) @@ -1176,6 +1400,13 @@ begin desc["T"] = tf.data_type(input_values_) (tf.execute(desc))[1] end + function sparse_reduce_sum(input_indices_, input_values_, input_shape_, reduction_axes_; name=nothing, keep_dims=nothing) + if tf.eager_mode + sparse_reduce_sum_eager(input_indices_, input_values_, input_shape_, reduction_axes_; name=name, keep_dims=keep_dims) + else + sparse_reduce_sum_graph(input_indices_, input_values_, input_shape_, reduction_axes_; name=name, keep_dims=keep_dims) + end + end end @@ -1185,7 +1416,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function filter_dataset(input_dataset_, other_arguments_; name=nothing, predicate=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function filter_dataset_graph(input_dataset_, other_arguments_; name=nothing, predicate=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "FilterDataset") do desc = tf.NodeDescription("FilterDataset") @@ -1208,7 +1439,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function filter_dataset(input_dataset_::tf.TensorHandle, other_arguments_::tf.TensorHandle; name=nothing, predicate=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) + function filter_dataset_eager(input_dataset_, other_arguments_; name=nothing, predicate=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("FilterDataset") tf.add_input(desc, input_dataset_) tf.add_input(desc, other_arguments_) @@ -1226,16 +1457,23 @@ begin end (tf.execute(desc))[1] end + function filter_dataset(input_dataset_, other_arguments_; name=nothing, predicate=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) + if tf.eager_mode + filter_dataset_eager(input_dataset_, other_arguments_; name=name, predicate=predicate, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes) + else + filter_dataset_graph(input_dataset_, other_arguments_; name=name, predicate=predicate, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes) + end + end end """ - string_length(input; unit=BYTE) + string_length(input; unit=) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function string_length(input_; name=nothing, unit=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function string_length_graph(input_; name=nothing, unit=nothing) local desc tf.with_op_name(name, "StringLength") do desc = tf.NodeDescription("StringLength") @@ -1247,7 +1485,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function string_length(input_::tf.TensorHandle; name=nothing, unit=nothing) + function string_length_eager(input_; name=nothing, unit=nothing) desc = tf.EagerOp("StringLength") tf.add_input(desc, input_) if unit !== nothing @@ -1255,16 +1493,23 @@ begin end (tf.execute(desc))[1] end + function string_length(input_; name=nothing, unit=nothing) + if tf.eager_mode + string_length_eager(input_; name=name, unit=unit) + else + string_length_graph(input_; name=name, unit=unit) + end + end end """ - conv3d(input, filter; data_format=NDHWC, dilations=[1, 1, 1, 1, 1]) + conv3d(input, filter; data_format=, dilations=[1, 1, 1, 1, 1]) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function conv3d(input_, filter_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function conv3d_graph(input_, filter_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) local desc tf.with_op_name(name, "Conv3D") do desc = tf.NodeDescription("Conv3D") @@ -1288,7 +1533,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function conv3d(input_::tf.TensorHandle, filter_::tf.TensorHandle; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) + function conv3d_eager(input_, filter_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) desc = tf.EagerOp("Conv3D") tf.add_input(desc, input_) tf.add_input(desc, filter_) @@ -1308,6 +1553,13 @@ begin desc["T"] = tf.data_type(filter_) (tf.execute(desc))[1] end + function conv3d(input_, filter_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) + if tf.eager_mode + conv3d_eager(input_, filter_; name=name, strides=strides, padding=padding, data_format=data_format, dilations=dilations) + else + conv3d_graph(input_, filter_; name=name, strides=strides, padding=padding, data_format=data_format, dilations=dilations) + end + end end @@ -1317,7 +1569,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function retrieve_tpu_embedding_adagrad_parameters(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function retrieve_tpu_embedding_adagrad_parameters_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "RetrieveTPUEmbeddingAdagradParameters") do desc = tf.NodeDescription("RetrieveTPUEmbeddingAdagradParameters") @@ -1341,7 +1593,7 @@ begin end out end - function retrieve_tpu_embedding_adagrad_parameters(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + function retrieve_tpu_embedding_adagrad_parameters_eager(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) desc = tf.EagerOp("RetrieveTPUEmbeddingAdagradParameters") if table_id !== nothing desc["table_id"] = Base.Int(table_id) @@ -1357,6 +1609,13 @@ begin end tf.execute(desc) end + function retrieve_tpu_embedding_adagrad_parameters(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + if tf.eager_mode + retrieve_tpu_embedding_adagrad_parameters_eager(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + else + retrieve_tpu_embedding_adagrad_parameters_graph(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + end + end end @@ -1366,7 +1625,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function optional_has_value(optional_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function optional_has_value_graph(optional_; name=nothing) local desc tf.with_op_name(name, "OptionalHasValue") do desc = tf.NodeDescription("OptionalHasValue") @@ -1375,11 +1634,18 @@ begin end tf.Tensor(tf.Operation(desc)) end - function optional_has_value(optional_::tf.TensorHandle; name=nothing) + function optional_has_value_eager(optional_; name=nothing) desc = tf.EagerOp("OptionalHasValue") tf.add_input(desc, optional_) (tf.execute(desc))[1] end + function optional_has_value(optional_; name=nothing) + if tf.eager_mode + optional_has_value_eager(optional_; name=name) + else + optional_has_value_graph(optional_; name=name) + end + end end @@ -1389,7 +1655,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function apply_adam(var_, m_, v_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing, use_nesterov=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function apply_adam_graph(var_, m_, v_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing, use_nesterov=nothing) local desc tf.with_op_name(name, "ApplyAdam") do desc = tf.NodeDescription("ApplyAdam") @@ -1423,7 +1689,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function apply_adam(var_::tf.TensorHandle, m_::tf.TensorHandle, v_::tf.TensorHandle, beta1_power_::tf.TensorHandle, beta2_power_::tf.TensorHandle, lr_::tf.TensorHandle, beta1_::tf.TensorHandle, beta2_::tf.TensorHandle, epsilon_::tf.TensorHandle, grad_::tf.TensorHandle; name=nothing, use_locking=nothing, use_nesterov=nothing) + function apply_adam_eager(var_, m_, v_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing, use_nesterov=nothing) desc = tf.EagerOp("ApplyAdam") tf.add_input(desc, var_) tf.add_input(desc, m_) @@ -1453,16 +1719,23 @@ begin desc["T"] = tf.data_type(grad_) (tf.execute(desc))[1] end + function apply_adam(var_, m_, v_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing, use_nesterov=nothing) + if tf.eager_mode + apply_adam_eager(var_, m_, v_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=name, use_locking=use_locking, use_nesterov=use_nesterov) + else + apply_adam_graph(var_, m_, v_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=name, use_locking=use_locking, use_nesterov=use_nesterov) + end + end end """ - cudnn_rnn_params_to_canonical(num_layers, num_units, input_size, params; rnn_mode=lstm, input_mode=linear_input, direction=unidirectional, dropout=?, seed=0, seed2=0) + cudnn_rnn_params_to_canonical(num_layers, num_units, input_size, params; rnn_mode=, input_mode=, direction=, dropout=?, seed=0, seed2=0) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function cudnn_rnn_params_to_canonical(num_layers_, num_units_, input_size_, params_; name=nothing, num_params=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function cudnn_rnn_params_to_canonical_graph(num_layers_, num_units_, input_size_, params_; name=nothing, num_params=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) local desc tf.with_op_name(name, "CudnnRNNParamsToCanonical") do desc = tf.NodeDescription("CudnnRNNParamsToCanonical") @@ -1504,7 +1777,7 @@ begin end out end - function cudnn_rnn_params_to_canonical(num_layers_::tf.TensorHandle, num_units_::tf.TensorHandle, input_size_::tf.TensorHandle, params_::tf.TensorHandle; name=nothing, num_params=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) + function cudnn_rnn_params_to_canonical_eager(num_layers_, num_units_, input_size_, params_; name=nothing, num_params=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) desc = tf.EagerOp("CudnnRNNParamsToCanonical") tf.add_input(desc, num_layers_) tf.add_input(desc, num_units_) @@ -1534,6 +1807,13 @@ begin desc["T"] = tf.data_type(params_) tf.execute(desc) end + function cudnn_rnn_params_to_canonical(num_layers_, num_units_, input_size_, params_; name=nothing, num_params=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) + if tf.eager_mode + cudnn_rnn_params_to_canonical_eager(num_layers_, num_units_, input_size_, params_; name=name, num_params=num_params, rnn_mode=rnn_mode, input_mode=input_mode, direction=direction, dropout=dropout, seed=seed, seed2=seed2) + else + cudnn_rnn_params_to_canonical_graph(num_layers_, num_units_, input_size_, params_; name=name, num_params=num_params, rnn_mode=rnn_mode, input_mode=input_mode, direction=direction, dropout=dropout, seed=seed, seed2=seed2) + end + end end @@ -1543,7 +1823,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function irfft3d(input_, fft_length_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function irfft3d_graph(input_, fft_length_; name=nothing) local desc tf.with_op_name(name, "IRFFT3D") do desc = tf.NodeDescription("IRFFT3D") @@ -1554,12 +1834,19 @@ begin end tf.Tensor(tf.Operation(desc)) end - function irfft3d(input_::tf.TensorHandle, fft_length_::tf.TensorHandle; name=nothing) + function irfft3d_eager(input_, fft_length_; name=nothing) desc = tf.EagerOp("IRFFT3D") tf.add_input(desc, input_) tf.add_input(desc, fft_length_) (tf.execute(desc))[1] end + function irfft3d(input_, fft_length_; name=nothing) + if tf.eager_mode + irfft3d_eager(input_, fft_length_; name=name) + else + irfft3d_graph(input_, fft_length_; name=name) + end + end end @@ -1569,7 +1856,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function angle(input_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function angle_graph(input_; name=nothing) local desc tf.with_op_name(name, "Angle") do desc = tf.NodeDescription("Angle") @@ -1579,12 +1866,19 @@ begin end tf.Tensor(tf.Operation(desc)) end - function angle(input_::tf.TensorHandle; name=nothing) + function angle_eager(input_; name=nothing) desc = tf.EagerOp("Angle") tf.add_input(desc, input_) desc["T"] = tf.data_type(input_) (tf.execute(desc))[1] end + function angle(input_; name=nothing) + if tf.eager_mode + angle_eager(input_; name=name) + else + angle_graph(input_; name=name) + end + end end @@ -1594,7 +1888,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tensor_forest_tree_resource_handle_op(; name=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tensor_forest_tree_resource_handle_op_graph(; name=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "TensorForestTreeResourceHandleOp") do desc = tf.NodeDescription("TensorForestTreeResourceHandleOp") @@ -1607,7 +1901,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function tensor_forest_tree_resource_handle_op(; name=nothing, container=nothing, shared_name=nothing) + function tensor_forest_tree_resource_handle_op_eager(; name=nothing, container=nothing, shared_name=nothing) desc = tf.EagerOp("TensorForestTreeResourceHandleOp") if container !== nothing desc["container"] = Base.String(container) @@ -1617,6 +1911,13 @@ begin end (tf.execute(desc))[1] end + function tensor_forest_tree_resource_handle_op(; name=nothing, container=nothing, shared_name=nothing) + if tf.eager_mode + tensor_forest_tree_resource_handle_op_eager(; name=name, container=container, shared_name=shared_name) + else + tensor_forest_tree_resource_handle_op_graph(; name=name, container=container, shared_name=shared_name) + end + end end @@ -1626,7 +1927,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function learned_unigram_candidate_sampler(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function learned_unigram_candidate_sampler_graph(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing) local desc tf.with_op_name(name, "LearnedUnigramCandidateSampler") do desc = tf.NodeDescription("LearnedUnigramCandidateSampler") @@ -1658,7 +1959,7 @@ begin end out end - function learned_unigram_candidate_sampler(true_classes_::tf.TensorHandle; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing) + function learned_unigram_candidate_sampler_eager(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing) desc = tf.EagerOp("LearnedUnigramCandidateSampler") tf.add_input(desc, true_classes_) if num_true !== nothing @@ -1681,6 +1982,13 @@ begin end tf.execute(desc) end + function learned_unigram_candidate_sampler(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing) + if tf.eager_mode + learned_unigram_candidate_sampler_eager(true_classes_; name=name, num_true=num_true, num_sampled=num_sampled, unique=unique, range_max=range_max, seed=seed, seed2=seed2) + else + learned_unigram_candidate_sampler_graph(true_classes_; name=name, num_true=num_true, num_sampled=num_sampled, unique=unique, range_max=range_max, seed=seed, seed2=seed2) + end + end end @@ -1690,7 +1998,7 @@ end A graph node which represents an argument to a function. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function _arg(; name=nothing, index=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function _arg_graph(; name=nothing, index=nothing) local desc tf.with_op_name(name, "_Arg") do desc = tf.NodeDescription("_Arg") @@ -1700,13 +2008,20 @@ begin end tf.Tensor(tf.Operation(desc)) end - function _arg(; name=nothing, index=nothing) + function _arg_eager(; name=nothing, index=nothing) desc = tf.EagerOp("_Arg") if index !== nothing desc["index"] = Base.Int(index) end (tf.execute(desc))[1] end + function _arg(; name=nothing, index=nothing) + if tf.eager_mode + _arg_eager(; name=name, index=index) + else + _arg_graph(; name=name, index=index) + end + end end @@ -1716,7 +2031,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function matrix_square_root(input_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function matrix_square_root_graph(input_; name=nothing) local desc tf.with_op_name(name, "MatrixSquareRoot") do desc = tf.NodeDescription("MatrixSquareRoot") @@ -1726,12 +2041,19 @@ begin end tf.Tensor(tf.Operation(desc)) end - function matrix_square_root(input_::tf.TensorHandle; name=nothing) + function matrix_square_root_eager(input_; name=nothing) desc = tf.EagerOp("MatrixSquareRoot") tf.add_input(desc, input_) desc["T"] = tf.data_type(input_) (tf.execute(desc))[1] end + function matrix_square_root(input_; name=nothing) + if tf.eager_mode + matrix_square_root_eager(input_; name=name) + else + matrix_square_root_graph(input_; name=name) + end + end end @@ -1741,7 +2063,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function sparse_dense_cwise_mul(sp_indices_, sp_values_, sp_shape_, dense_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function sparse_dense_cwise_mul_graph(sp_indices_, sp_values_, sp_shape_, dense_; name=nothing) local desc tf.with_op_name(name, "SparseDenseCwiseMul") do desc = tf.NodeDescription("SparseDenseCwiseMul") @@ -1757,7 +2079,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function sparse_dense_cwise_mul(sp_indices_::tf.TensorHandle, sp_values_::tf.TensorHandle, sp_shape_::tf.TensorHandle, dense_::tf.TensorHandle; name=nothing) + function sparse_dense_cwise_mul_eager(sp_indices_, sp_values_, sp_shape_, dense_; name=nothing) desc = tf.EagerOp("SparseDenseCwiseMul") tf.add_input(desc, sp_indices_) tf.add_input(desc, sp_values_) @@ -1767,6 +2089,13 @@ begin desc["T"] = tf.data_type(dense_) (tf.execute(desc))[1] end + function sparse_dense_cwise_mul(sp_indices_, sp_values_, sp_shape_, dense_; name=nothing) + if tf.eager_mode + sparse_dense_cwise_mul_eager(sp_indices_, sp_values_, sp_shape_, dense_; name=name) + else + sparse_dense_cwise_mul_graph(sp_indices_, sp_values_, sp_shape_, dense_; name=name) + end + end end @@ -1776,7 +2105,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tensor_array_concat_v3(handle_, flow_in_; name=nothing, dtype=nothing, element_shape_except0=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tensor_array_concat_v3_graph(handle_, flow_in_; name=nothing, dtype=nothing, element_shape_except0=nothing) local desc tf.with_op_name(name, "TensorArrayConcatV3") do desc = tf.NodeDescription("TensorArrayConcatV3") @@ -1798,7 +2127,7 @@ begin end out end - function tensor_array_concat_v3(handle_::tf.TensorHandle, flow_in_::tf.TensorHandle; name=nothing, dtype=nothing, element_shape_except0=nothing) + function tensor_array_concat_v3_eager(handle_, flow_in_; name=nothing, dtype=nothing, element_shape_except0=nothing) desc = tf.EagerOp("TensorArrayConcatV3") tf.add_input(desc, handle_) tf.add_input(desc, flow_in_) @@ -1810,6 +2139,13 @@ begin end tf.execute(desc) end + function tensor_array_concat_v3(handle_, flow_in_; name=nothing, dtype=nothing, element_shape_except0=nothing) + if tf.eager_mode + tensor_array_concat_v3_eager(handle_, flow_in_; name=name, dtype=dtype, element_shape_except0=element_shape_except0) + else + tensor_array_concat_v3_graph(handle_, flow_in_; name=name, dtype=dtype, element_shape_except0=element_shape_except0) + end + end end @@ -1819,7 +2155,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function unicode_script(input_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function unicode_script_graph(input_; name=nothing) local desc tf.with_op_name(name, "UnicodeScript") do desc = tf.NodeDescription("UnicodeScript") @@ -1828,11 +2164,18 @@ begin end tf.Tensor(tf.Operation(desc)) end - function unicode_script(input_::tf.TensorHandle; name=nothing) + function unicode_script_eager(input_; name=nothing) desc = tf.EagerOp("UnicodeScript") tf.add_input(desc, input_) (tf.execute(desc))[1] end + function unicode_script(input_; name=nothing) + if tf.eager_mode + unicode_script_eager(input_; name=name) + else + unicode_script_graph(input_; name=name) + end + end end @@ -1842,7 +2185,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function batch_cholesky_grad(l_, grad_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function batch_cholesky_grad_graph(l_, grad_; name=nothing) local desc tf.with_op_name(name, "BatchCholeskyGrad") do desc = tf.NodeDescription("BatchCholeskyGrad") @@ -1854,7 +2197,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function batch_cholesky_grad(l_::tf.TensorHandle, grad_::tf.TensorHandle; name=nothing) + function batch_cholesky_grad_eager(l_, grad_; name=nothing) desc = tf.EagerOp("BatchCholeskyGrad") tf.add_input(desc, l_) tf.add_input(desc, grad_) @@ -1862,6 +2205,13 @@ begin desc["T"] = tf.data_type(grad_) (tf.execute(desc))[1] end + function batch_cholesky_grad(l_, grad_; name=nothing) + if tf.eager_mode + batch_cholesky_grad_eager(l_, grad_; name=name) + else + batch_cholesky_grad_graph(l_, grad_; name=name) + end + end end @@ -1871,7 +2221,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function mean(input_, reduction_indices_; name=nothing, keep_dims=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function mean_graph(input_, reduction_indices_; name=nothing, keep_dims=nothing) local desc tf.with_op_name(name, "Mean") do desc = tf.NodeDescription("Mean") @@ -1888,7 +2238,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function mean(input_::tf.TensorHandle, reduction_indices_::tf.TensorHandle; name=nothing, keep_dims=nothing) + function mean_eager(input_, reduction_indices_; name=nothing, keep_dims=nothing) desc = tf.EagerOp("Mean") tf.add_input(desc, input_) tf.add_input(desc, reduction_indices_) @@ -1899,6 +2249,13 @@ begin desc["Tidx"] = tf.data_type(reduction_indices_) (tf.execute(desc))[1] end + function mean(input_, reduction_indices_; name=nothing, keep_dims=nothing) + if tf.eager_mode + mean_eager(input_, reduction_indices_; name=name, keep_dims=keep_dims) + else + mean_graph(input_, reduction_indices_; name=name, keep_dims=keep_dims) + end + end end @@ -1908,7 +2265,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function batch_fft(input_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function batch_fft_graph(input_; name=nothing) local desc tf.with_op_name(name, "BatchFFT") do desc = tf.NodeDescription("BatchFFT") @@ -1917,11 +2274,18 @@ begin end tf.Tensor(tf.Operation(desc)) end - function batch_fft(input_::tf.TensorHandle; name=nothing) + function batch_fft_eager(input_; name=nothing) desc = tf.EagerOp("BatchFFT") tf.add_input(desc, input_) (tf.execute(desc))[1] end + function batch_fft(input_; name=nothing) + if tf.eager_mode + batch_fft_eager(input_; name=name) + else + batch_fft_graph(input_; name=name) + end + end end @@ -1931,7 +2295,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function sin(x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function sin_graph(x_; name=nothing) local desc tf.with_op_name(name, "Sin") do desc = tf.NodeDescription("Sin") @@ -1941,12 +2305,19 @@ begin end tf.Tensor(tf.Operation(desc)) end - function sin(x_::tf.TensorHandle; name=nothing) + function sin_eager(x_; name=nothing) desc = tf.EagerOp("Sin") tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) (tf.execute(desc))[1] end + function sin(x_; name=nothing) + if tf.eager_mode + sin_eager(x_; name=name) + else + sin_graph(x_; name=name) + end + end end @@ -1956,7 +2327,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function boosted_trees_ensemble_resource_handle_op(; name=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function boosted_trees_ensemble_resource_handle_op_graph(; name=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "BoostedTreesEnsembleResourceHandleOp") do desc = tf.NodeDescription("BoostedTreesEnsembleResourceHandleOp") @@ -1969,7 +2340,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function boosted_trees_ensemble_resource_handle_op(; name=nothing, container=nothing, shared_name=nothing) + function boosted_trees_ensemble_resource_handle_op_eager(; name=nothing, container=nothing, shared_name=nothing) desc = tf.EagerOp("BoostedTreesEnsembleResourceHandleOp") if container !== nothing desc["container"] = Base.String(container) @@ -1979,6 +2350,13 @@ begin end (tf.execute(desc))[1] end + function boosted_trees_ensemble_resource_handle_op(; name=nothing, container=nothing, shared_name=nothing) + if tf.eager_mode + boosted_trees_ensemble_resource_handle_op_eager(; name=name, container=container, shared_name=shared_name) + else + boosted_trees_ensemble_resource_handle_op_graph(; name=name, container=container, shared_name=shared_name) + end + end end @@ -1988,7 +2366,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function quantized_max_pool(input_, min_input_, max_input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function quantized_max_pool_graph(input_, min_input_, max_input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing) local desc tf.with_op_name(name, "QuantizedMaxPool") do desc = tf.NodeDescription("QuantizedMaxPool") @@ -2016,7 +2394,7 @@ begin end out end - function quantized_max_pool(input_::tf.TensorHandle, min_input_::tf.TensorHandle, max_input_::tf.TensorHandle; name=nothing, ksize=nothing, strides=nothing, padding=nothing) + function quantized_max_pool_eager(input_, min_input_, max_input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing) desc = tf.EagerOp("QuantizedMaxPool") tf.add_input(desc, input_) tf.add_input(desc, min_input_) @@ -2033,6 +2411,13 @@ begin desc["T"] = tf.data_type(input_) tf.execute(desc) end + function quantized_max_pool(input_, min_input_, max_input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing) + if tf.eager_mode + quantized_max_pool_eager(input_, min_input_, max_input_; name=name, ksize=ksize, strides=strides, padding=padding) + else + quantized_max_pool_graph(input_, min_input_, max_input_; name=name, ksize=ksize, strides=strides, padding=padding) + end + end end @@ -2042,7 +2427,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function ordered_map_stage(key_, indices_, values_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, fake_dtypes=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function ordered_map_stage_graph(key_, indices_, values_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, fake_dtypes=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "OrderedMapStage") do desc = tf.NodeDescription("OrderedMapStage") @@ -2073,7 +2458,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function ordered_map_stage(key_::tf.TensorHandle, indices_::tf.TensorHandle, values_::tf.TensorHandle; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, fake_dtypes=nothing, container=nothing, shared_name=nothing) + function ordered_map_stage_eager(key_, indices_, values_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, fake_dtypes=nothing, container=nothing, shared_name=nothing) desc = tf.EagerOp("OrderedMapStage") tf.add_input(desc, key_) tf.add_input(desc, indices_) @@ -2098,6 +2483,13 @@ begin end (tf.execute(desc))[1] end + function ordered_map_stage(key_, indices_, values_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, fake_dtypes=nothing, container=nothing, shared_name=nothing) + if tf.eager_mode + ordered_map_stage_eager(key_, indices_, values_; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, fake_dtypes=fake_dtypes, container=container, shared_name=shared_name) + else + ordered_map_stage_graph(key_, indices_, values_; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, fake_dtypes=fake_dtypes, container=container, shared_name=shared_name) + end + end end @@ -2107,7 +2499,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function partitioned_call(args_; name=nothing, Tin=nothing, Tout=nothing, f=nothing, config=nothing, config_proto=nothing, executor_type=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function partitioned_call_graph(args_; name=nothing, Tin=nothing, Tout=nothing, f=nothing, config=nothing, config_proto=nothing, executor_type=nothing) local desc tf.with_op_name(name, "PartitionedCall") do desc = tf.NodeDescription("PartitionedCall") @@ -2134,7 +2526,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function partitioned_call(args_::tf.TensorHandle; name=nothing, Tin=nothing, Tout=nothing, f=nothing, config=nothing, config_proto=nothing, executor_type=nothing) + function partitioned_call_eager(args_; name=nothing, Tin=nothing, Tout=nothing, f=nothing, config=nothing, config_proto=nothing, executor_type=nothing) desc = tf.EagerOp("PartitionedCall") tf.add_input(desc, args_) if Tin !== nothing @@ -2157,6 +2549,13 @@ begin end (tf.execute(desc))[1] end + function partitioned_call(args_; name=nothing, Tin=nothing, Tout=nothing, f=nothing, config=nothing, config_proto=nothing, executor_type=nothing) + if tf.eager_mode + partitioned_call_eager(args_; name=name, Tin=Tin, Tout=Tout, f=f, config=config, config_proto=config_proto, executor_type=executor_type) + else + partitioned_call_graph(args_; name=name, Tin=Tin, Tout=Tout, f=f, config=config, config_proto=config_proto, executor_type=executor_type) + end + end end @@ -2166,7 +2565,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function sparse_apply_adagrad(var_, accum_, lr_, grad_, indices_; name=nothing, use_locking=nothing, update_slots=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function sparse_apply_adagrad_graph(var_, accum_, lr_, grad_, indices_; name=nothing, use_locking=nothing, update_slots=nothing) local desc tf.with_op_name(name, "SparseApplyAdagrad") do desc = tf.NodeDescription("SparseApplyAdagrad") @@ -2192,7 +2591,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function sparse_apply_adagrad(var_::tf.TensorHandle, accum_::tf.TensorHandle, lr_::tf.TensorHandle, grad_::tf.TensorHandle, indices_::tf.TensorHandle; name=nothing, use_locking=nothing, update_slots=nothing) + function sparse_apply_adagrad_eager(var_, accum_, lr_, grad_, indices_; name=nothing, use_locking=nothing, update_slots=nothing) desc = tf.EagerOp("SparseApplyAdagrad") tf.add_input(desc, var_) tf.add_input(desc, accum_) @@ -2212,16 +2611,23 @@ begin desc["Tindices"] = tf.data_type(indices_) (tf.execute(desc))[1] end + function sparse_apply_adagrad(var_, accum_, lr_, grad_, indices_; name=nothing, use_locking=nothing, update_slots=nothing) + if tf.eager_mode + sparse_apply_adagrad_eager(var_, accum_, lr_, grad_, indices_; name=name, use_locking=use_locking, update_slots=update_slots) + else + sparse_apply_adagrad_graph(var_, accum_, lr_, grad_, indices_; name=name, use_locking=use_locking, update_slots=update_slots) + end + end end """ - decode_proto_v2(bytes; descriptor_source=local://, message_format=binary, sanitize=false) + decode_proto_v2(bytes; descriptor_source=, message_format=, sanitize=false) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function decode_proto_v2(bytes_; name=nothing, message_type=nothing, field_names=nothing, output_types=nothing, descriptor_source=nothing, message_format=nothing, sanitize=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function decode_proto_v2_graph(bytes_; name=nothing, message_type=nothing, field_names=nothing, output_types=nothing, descriptor_source=nothing, message_format=nothing, sanitize=nothing) local desc tf.with_op_name(name, "DecodeProtoV2") do desc = tf.NodeDescription("DecodeProtoV2") @@ -2253,7 +2659,7 @@ begin end out end - function decode_proto_v2(bytes_::tf.TensorHandle; name=nothing, message_type=nothing, field_names=nothing, output_types=nothing, descriptor_source=nothing, message_format=nothing, sanitize=nothing) + function decode_proto_v2_eager(bytes_; name=nothing, message_type=nothing, field_names=nothing, output_types=nothing, descriptor_source=nothing, message_format=nothing, sanitize=nothing) desc = tf.EagerOp("DecodeProtoV2") tf.add_input(desc, bytes_) if message_type !== nothing @@ -2276,6 +2682,13 @@ begin end tf.execute(desc) end + function decode_proto_v2(bytes_; name=nothing, message_type=nothing, field_names=nothing, output_types=nothing, descriptor_source=nothing, message_format=nothing, sanitize=nothing) + if tf.eager_mode + decode_proto_v2_eager(bytes_; name=name, message_type=message_type, field_names=field_names, output_types=output_types, descriptor_source=descriptor_source, message_format=message_format, sanitize=sanitize) + else + decode_proto_v2_graph(bytes_; name=name, message_type=message_type, field_names=field_names, output_types=output_types, descriptor_source=descriptor_source, message_format=message_format, sanitize=sanitize) + end + end end @@ -2285,7 +2698,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function betainc(a_, b_, x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function betainc_graph(a_, b_, x_; name=nothing) local desc tf.with_op_name(name, "Betainc") do desc = tf.NodeDescription("Betainc") @@ -2299,7 +2712,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function betainc(a_::tf.TensorHandle, b_::tf.TensorHandle, x_::tf.TensorHandle; name=nothing) + function betainc_eager(a_, b_, x_; name=nothing) desc = tf.EagerOp("Betainc") tf.add_input(desc, a_) tf.add_input(desc, b_) @@ -2309,6 +2722,13 @@ begin desc["T"] = tf.data_type(x_) (tf.execute(desc))[1] end + function betainc(a_, b_, x_; name=nothing) + if tf.eager_mode + betainc_eager(a_, b_, x_; name=name) + else + betainc_graph(a_, b_, x_; name=name) + end + end end @@ -2318,7 +2738,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function guarantee_const(input_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function guarantee_const_graph(input_; name=nothing) local desc tf.with_op_name(name, "GuaranteeConst") do desc = tf.NodeDescription("GuaranteeConst") @@ -2328,12 +2748,19 @@ begin end tf.Tensor(tf.Operation(desc)) end - function guarantee_const(input_::tf.TensorHandle; name=nothing) + function guarantee_const_eager(input_; name=nothing) desc = tf.EagerOp("GuaranteeConst") tf.add_input(desc, input_) desc["T"] = tf.data_type(input_) (tf.execute(desc))[1] end + function guarantee_const(input_; name=nothing) + if tf.eager_mode + guarantee_const_eager(input_; name=name) + else + guarantee_const_graph(input_; name=name) + end + end end @@ -2343,7 +2770,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function decode_bmp(contents_; name=nothing, channels=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function decode_bmp_graph(contents_; name=nothing, channels=nothing) local desc tf.with_op_name(name, "DecodeBmp") do desc = tf.NodeDescription("DecodeBmp") @@ -2355,7 +2782,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function decode_bmp(contents_::tf.TensorHandle; name=nothing, channels=nothing) + function decode_bmp_eager(contents_; name=nothing, channels=nothing) desc = tf.EagerOp("DecodeBmp") tf.add_input(desc, contents_) if channels !== nothing @@ -2363,6 +2790,13 @@ begin end (tf.execute(desc))[1] end + function decode_bmp(contents_; name=nothing, channels=nothing) + if tf.eager_mode + decode_bmp_eager(contents_; name=name, channels=channels) + else + decode_bmp_graph(contents_; name=name, channels=channels) + end + end end @@ -2372,7 +2806,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function boosted_trees_bucketize(float_values_, bucket_boundaries_; name=nothing, num_features=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function boosted_trees_bucketize_graph(float_values_, bucket_boundaries_; name=nothing, num_features=nothing) local desc tf.with_op_name(name, "BoostedTreesBucketize") do desc = tf.NodeDescription("BoostedTreesBucketize") @@ -2391,7 +2825,7 @@ begin end out end - function boosted_trees_bucketize(float_values_::tf.TensorHandle, bucket_boundaries_::tf.TensorHandle; name=nothing, num_features=nothing) + function boosted_trees_bucketize_eager(float_values_, bucket_boundaries_; name=nothing, num_features=nothing) desc = tf.EagerOp("BoostedTreesBucketize") tf.add_input(desc, float_values_) tf.add_input(desc, bucket_boundaries_) @@ -2400,6 +2834,13 @@ begin end tf.execute(desc) end + function boosted_trees_bucketize(float_values_, bucket_boundaries_; name=nothing, num_features=nothing) + if tf.eager_mode + boosted_trees_bucketize_eager(float_values_, bucket_boundaries_; name=name, num_features=num_features) + else + boosted_trees_bucketize_graph(float_values_, bucket_boundaries_; name=name, num_features=num_features) + end + end end @@ -2409,7 +2850,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function shutdown_distributed_tpu(; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function shutdown_distributed_tpu_graph(; name=nothing) local desc tf.with_op_name(name, "ShutdownDistributedTPU") do desc @@ -2417,10 +2858,17 @@ begin end tf.Tensor(tf.Operation(desc)) end - function shutdown_distributed_tpu(; name=nothing) + function shutdown_distributed_tpu_eager(; name=nothing) desc = tf.EagerOp("ShutdownDistributedTPU") (tf.execute(desc))[1] end + function shutdown_distributed_tpu(; name=nothing) + if tf.eager_mode + shutdown_distributed_tpu_eager(; name=name) + else + shutdown_distributed_tpu_graph(; name=name) + end + end end @@ -2430,7 +2878,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function experimental_stats_aggregator_summary(iterator_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function experimental_stats_aggregator_summary_graph(iterator_; name=nothing) local desc tf.with_op_name(name, "ExperimentalStatsAggregatorSummary") do desc = tf.NodeDescription("ExperimentalStatsAggregatorSummary") @@ -2439,11 +2887,18 @@ begin end tf.Tensor(tf.Operation(desc)) end - function experimental_stats_aggregator_summary(iterator_::tf.TensorHandle; name=nothing) + function experimental_stats_aggregator_summary_eager(iterator_; name=nothing) desc = tf.EagerOp("ExperimentalStatsAggregatorSummary") tf.add_input(desc, iterator_) (tf.execute(desc))[1] end + function experimental_stats_aggregator_summary(iterator_; name=nothing) + if tf.eager_mode + experimental_stats_aggregator_summary_eager(iterator_; name=name) + else + experimental_stats_aggregator_summary_graph(iterator_; name=name) + end + end end @@ -2453,7 +2908,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function timestamp(; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function timestamp_graph(; name=nothing) local desc tf.with_op_name(name, "Timestamp") do desc @@ -2461,10 +2916,17 @@ begin end tf.Tensor(tf.Operation(desc)) end - function timestamp(; name=nothing) + function timestamp_eager(; name=nothing) desc = tf.EagerOp("Timestamp") (tf.execute(desc))[1] end + function timestamp(; name=nothing) + if tf.eager_mode + timestamp_eager(; name=name) + else + timestamp_graph(; name=name) + end + end end @@ -2474,7 +2936,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function matrix_exponential(input_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function matrix_exponential_graph(input_; name=nothing) local desc tf.with_op_name(name, "MatrixExponential") do desc = tf.NodeDescription("MatrixExponential") @@ -2484,12 +2946,19 @@ begin end tf.Tensor(tf.Operation(desc)) end - function matrix_exponential(input_::tf.TensorHandle; name=nothing) + function matrix_exponential_eager(input_; name=nothing) desc = tf.EagerOp("MatrixExponential") tf.add_input(desc, input_) desc["T"] = tf.data_type(input_) (tf.execute(desc))[1] end + function matrix_exponential(input_; name=nothing) + if tf.eager_mode + matrix_exponential_eager(input_; name=name) + else + matrix_exponential_graph(input_; name=name) + end + end end @@ -2499,7 +2968,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function size(input_; name=nothing, out_type=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function size_graph(input_; name=nothing, out_type=nothing) local desc tf.with_op_name(name, "Size") do desc = tf.NodeDescription("Size") @@ -2512,7 +2981,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function size(input_::tf.TensorHandle; name=nothing, out_type=nothing) + function size_eager(input_; name=nothing, out_type=nothing) desc = tf.EagerOp("Size") tf.add_input(desc, input_) if out_type !== nothing @@ -2521,6 +2990,13 @@ begin desc["T"] = tf.data_type(input_) (tf.execute(desc))[1] end + function size(input_; name=nothing, out_type=nothing) + if tf.eager_mode + size_eager(input_; name=name, out_type=out_type) + else + size_graph(input_; name=name, out_type=out_type) + end + end end @@ -2530,7 +3006,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function add_n(inputs_; name=nothing, N=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function add_n_graph(inputs_; name=nothing, N=nothing) local desc tf.with_op_name(name, "AddN") do desc = tf.NodeDescription("AddN") @@ -2543,7 +3019,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function add_n(inputs_::tf.TensorHandle; name=nothing, N=nothing) + function add_n_eager(inputs_; name=nothing, N=nothing) desc = tf.EagerOp("AddN") tf.add_input(desc, inputs_) if N !== nothing @@ -2552,6 +3028,13 @@ begin desc["T"] = tf.data_type(inputs_) (tf.execute(desc))[1] end + function add_n(inputs_; name=nothing, N=nothing) + if tf.eager_mode + add_n_eager(inputs_; name=name, N=N) + else + add_n_graph(inputs_; name=name, N=N) + end + end end @@ -2561,7 +3044,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function sparse_segment_sum(data_, indices_, segment_ids_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function sparse_segment_sum_graph(data_, indices_, segment_ids_; name=nothing) local desc tf.with_op_name(name, "SparseSegmentSum") do desc = tf.NodeDescription("SparseSegmentSum") @@ -2577,7 +3060,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function sparse_segment_sum(data_::tf.TensorHandle, indices_::tf.TensorHandle, segment_ids_::tf.TensorHandle; name=nothing) + function sparse_segment_sum_eager(data_, indices_, segment_ids_; name=nothing) desc = tf.EagerOp("SparseSegmentSum") tf.add_input(desc, data_) tf.add_input(desc, indices_) @@ -2586,6 +3069,13 @@ begin desc["Tidx"] = tf.data_type(indices_) (tf.execute(desc))[1] end + function sparse_segment_sum(data_, indices_, segment_ids_; name=nothing) + if tf.eager_mode + sparse_segment_sum_eager(data_, indices_, segment_ids_; name=name) + else + sparse_segment_sum_graph(data_, indices_, segment_ids_; name=name) + end + end end @@ -2595,7 +3085,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function batch_dataset(input_dataset_, batch_size_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function batch_dataset_graph(input_dataset_, batch_size_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "BatchDataset") do desc = tf.NodeDescription("BatchDataset") @@ -2612,7 +3102,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function batch_dataset(input_dataset_::tf.TensorHandle, batch_size_::tf.TensorHandle; name=nothing, output_types=nothing, output_shapes=nothing) + function batch_dataset_eager(input_dataset_, batch_size_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("BatchDataset") tf.add_input(desc, input_dataset_) tf.add_input(desc, batch_size_) @@ -2624,6 +3114,13 @@ begin end (tf.execute(desc))[1] end + function batch_dataset(input_dataset_, batch_size_; name=nothing, output_types=nothing, output_shapes=nothing) + if tf.eager_mode + batch_dataset_eager(input_dataset_, batch_size_; name=name, output_types=output_types, output_shapes=output_shapes) + else + batch_dataset_graph(input_dataset_, batch_size_; name=name, output_types=output_types, output_shapes=output_shapes) + end + end end @@ -2633,7 +3130,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function record_input(; name=nothing, file_pattern=nothing, file_random_seed=nothing, file_shuffle_shift_ratio=nothing, file_buffer_size=nothing, file_parallelism=nothing, batch_size=nothing, compression_type=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function record_input_graph(; name=nothing, file_pattern=nothing, file_random_seed=nothing, file_shuffle_shift_ratio=nothing, file_buffer_size=nothing, file_parallelism=nothing, batch_size=nothing, compression_type=nothing) local desc tf.with_op_name(name, "RecordInput") do desc = tf.NodeDescription("RecordInput") @@ -2661,7 +3158,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function record_input(; name=nothing, file_pattern=nothing, file_random_seed=nothing, file_shuffle_shift_ratio=nothing, file_buffer_size=nothing, file_parallelism=nothing, batch_size=nothing, compression_type=nothing) + function record_input_eager(; name=nothing, file_pattern=nothing, file_random_seed=nothing, file_shuffle_shift_ratio=nothing, file_buffer_size=nothing, file_parallelism=nothing, batch_size=nothing, compression_type=nothing) desc = tf.EagerOp("RecordInput") if file_pattern !== nothing desc["file_pattern"] = Base.String(file_pattern) @@ -2686,6 +3183,13 @@ begin end (tf.execute(desc))[1] end + function record_input(; name=nothing, file_pattern=nothing, file_random_seed=nothing, file_shuffle_shift_ratio=nothing, file_buffer_size=nothing, file_parallelism=nothing, batch_size=nothing, compression_type=nothing) + if tf.eager_mode + record_input_eager(; name=name, file_pattern=file_pattern, file_random_seed=file_random_seed, file_shuffle_shift_ratio=file_shuffle_shift_ratio, file_buffer_size=file_buffer_size, file_parallelism=file_parallelism, batch_size=batch_size, compression_type=compression_type) + else + record_input_graph(; name=name, file_pattern=file_pattern, file_random_seed=file_random_seed, file_shuffle_shift_ratio=file_shuffle_shift_ratio, file_buffer_size=file_buffer_size, file_parallelism=file_parallelism, batch_size=batch_size, compression_type=compression_type) + end + end end @@ -2695,7 +3199,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function queue_dequeue_up_to_v2(handle_, n_; name=nothing, component_types=nothing, timeout_ms=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function queue_dequeue_up_to_v2_graph(handle_, n_; name=nothing, component_types=nothing, timeout_ms=nothing) local desc tf.with_op_name(name, "QueueDequeueUpToV2") do desc = tf.NodeDescription("QueueDequeueUpToV2") @@ -2712,7 +3216,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function queue_dequeue_up_to_v2(handle_::tf.TensorHandle, n_::tf.TensorHandle; name=nothing, component_types=nothing, timeout_ms=nothing) + function queue_dequeue_up_to_v2_eager(handle_, n_; name=nothing, component_types=nothing, timeout_ms=nothing) desc = tf.EagerOp("QueueDequeueUpToV2") tf.add_input(desc, handle_) tf.add_input(desc, n_) @@ -2724,6 +3228,13 @@ begin end (tf.execute(desc))[1] end + function queue_dequeue_up_to_v2(handle_, n_; name=nothing, component_types=nothing, timeout_ms=nothing) + if tf.eager_mode + queue_dequeue_up_to_v2_eager(handle_, n_; name=name, component_types=component_types, timeout_ms=timeout_ms) + else + queue_dequeue_up_to_v2_graph(handle_, n_; name=name, component_types=component_types, timeout_ms=timeout_ms) + end + end end @@ -2733,7 +3244,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function retrieve_tpu_embedding_proximal_adagrad_parameters_grad_accum_debug(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function retrieve_tpu_embedding_proximal_adagrad_parameters_grad_accum_debug_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "RetrieveTPUEmbeddingProximalAdagradParametersGradAccumDebug") do desc = tf.NodeDescription("RetrieveTPUEmbeddingProximalAdagradParametersGradAccumDebug") @@ -2757,7 +3268,7 @@ begin end out end - function retrieve_tpu_embedding_proximal_adagrad_parameters_grad_accum_debug(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + function retrieve_tpu_embedding_proximal_adagrad_parameters_grad_accum_debug_eager(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) desc = tf.EagerOp("RetrieveTPUEmbeddingProximalAdagradParametersGradAccumDebug") if table_id !== nothing desc["table_id"] = Base.Int(table_id) @@ -2773,6 +3284,13 @@ begin end tf.execute(desc) end + function retrieve_tpu_embedding_proximal_adagrad_parameters_grad_accum_debug(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + if tf.eager_mode + retrieve_tpu_embedding_proximal_adagrad_parameters_grad_accum_debug_eager(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + else + retrieve_tpu_embedding_proximal_adagrad_parameters_grad_accum_debug_graph(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + end + end end @@ -2782,7 +3300,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function load_tpu_embedding_rms_prop_parameters_grad_accum_debug(parameters_, ms_, mom_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function load_tpu_embedding_rms_prop_parameters_grad_accum_debug_graph(parameters_, ms_, mom_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "LoadTPUEmbeddingRMSPropParametersGradAccumDebug") do desc = tf.NodeDescription("LoadTPUEmbeddingRMSPropParametersGradAccumDebug") @@ -2809,7 +3327,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function load_tpu_embedding_rms_prop_parameters_grad_accum_debug(parameters_::tf.TensorHandle, ms_::tf.TensorHandle, mom_::tf.TensorHandle, gradient_accumulators_::tf.TensorHandle; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + function load_tpu_embedding_rms_prop_parameters_grad_accum_debug_eager(parameters_, ms_, mom_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) desc = tf.EagerOp("LoadTPUEmbeddingRMSPropParametersGradAccumDebug") tf.add_input(desc, parameters_) tf.add_input(desc, ms_) @@ -2829,6 +3347,13 @@ begin end (tf.execute(desc))[1] end + function load_tpu_embedding_rms_prop_parameters_grad_accum_debug(parameters_, ms_, mom_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + if tf.eager_mode + load_tpu_embedding_rms_prop_parameters_grad_accum_debug_eager(parameters_, ms_, mom_, gradient_accumulators_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + else + load_tpu_embedding_rms_prop_parameters_grad_accum_debug_graph(parameters_, ms_, mom_, gradient_accumulators_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + end + end end @@ -2838,7 +3363,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function serialize_tensor(tensor_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function serialize_tensor_graph(tensor_; name=nothing) local desc tf.with_op_name(name, "SerializeTensor") do desc = tf.NodeDescription("SerializeTensor") @@ -2848,12 +3373,19 @@ begin end tf.Tensor(tf.Operation(desc)) end - function serialize_tensor(tensor_::tf.TensorHandle; name=nothing) + function serialize_tensor_eager(tensor_; name=nothing) desc = tf.EagerOp("SerializeTensor") tf.add_input(desc, tensor_) desc["T"] = tf.data_type(tensor_) (tf.execute(desc))[1] end + function serialize_tensor(tensor_; name=nothing) + if tf.eager_mode + serialize_tensor_eager(tensor_; name=name) + else + serialize_tensor_graph(tensor_; name=name) + end + end end @@ -2863,7 +3395,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function mul(x_, y_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function mul_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "Mul") do desc = tf.NodeDescription("Mul") @@ -2875,7 +3407,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function mul(x_::tf.TensorHandle, y_::tf.TensorHandle; name=nothing) + function mul_eager(x_, y_; name=nothing) desc = tf.EagerOp("Mul") tf.add_input(desc, x_) tf.add_input(desc, y_) @@ -2883,6 +3415,13 @@ begin desc["T"] = tf.data_type(y_) (tf.execute(desc))[1] end + function mul(x_, y_; name=nothing) + if tf.eager_mode + mul_eager(x_, y_; name=name) + else + mul_graph(x_, y_; name=name) + end + end end @@ -2892,7 +3431,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function softmax_cross_entropy_with_logits(features_, labels_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function softmax_cross_entropy_with_logits_graph(features_, labels_; name=nothing) local desc tf.with_op_name(name, "SoftmaxCrossEntropyWithLogits") do desc = tf.NodeDescription("SoftmaxCrossEntropyWithLogits") @@ -2909,7 +3448,7 @@ begin end out end - function softmax_cross_entropy_with_logits(features_::tf.TensorHandle, labels_::tf.TensorHandle; name=nothing) + function softmax_cross_entropy_with_logits_eager(features_, labels_; name=nothing) desc = tf.EagerOp("SoftmaxCrossEntropyWithLogits") tf.add_input(desc, features_) tf.add_input(desc, labels_) @@ -2917,6 +3456,13 @@ begin desc["T"] = tf.data_type(labels_) tf.execute(desc) end + function softmax_cross_entropy_with_logits(features_, labels_; name=nothing) + if tf.eager_mode + softmax_cross_entropy_with_logits_eager(features_, labels_; name=name) + else + softmax_cross_entropy_with_logits_graph(features_, labels_; name=name) + end + end end @@ -2926,7 +3472,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function resource_scatter_div(resource_, indices_, updates_; name=nothing, dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function resource_scatter_div_graph(resource_, indices_, updates_; name=nothing, dtype=nothing) local desc tf.with_op_name(name, "ResourceScatterDiv") do desc = tf.NodeDescription("ResourceScatterDiv") @@ -2945,7 +3491,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function resource_scatter_div(resource_::tf.TensorHandle, indices_::tf.TensorHandle, updates_::tf.TensorHandle; name=nothing, dtype=nothing) + function resource_scatter_div_eager(resource_, indices_, updates_; name=nothing, dtype=nothing) desc = tf.EagerOp("ResourceScatterDiv") tf.add_input(desc, resource_) tf.add_input(desc, indices_) @@ -2957,6 +3503,13 @@ begin desc["dtype"] = tf.data_type(updates_) (tf.execute(desc))[1] end + function resource_scatter_div(resource_, indices_, updates_; name=nothing, dtype=nothing) + if tf.eager_mode + resource_scatter_div_eager(resource_, indices_, updates_; name=name, dtype=dtype) + else + resource_scatter_div_graph(resource_, indices_, updates_; name=name, dtype=dtype) + end + end end @@ -2966,7 +3519,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function fixed_length_record_dataset_v2(filenames_, header_bytes_, record_bytes_, footer_bytes_, buffer_size_, compression_type_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function fixed_length_record_dataset_v2_graph(filenames_, header_bytes_, record_bytes_, footer_bytes_, buffer_size_, compression_type_; name=nothing) local desc tf.with_op_name(name, "FixedLengthRecordDatasetV2") do desc = tf.NodeDescription("FixedLengthRecordDatasetV2") @@ -2985,7 +3538,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function fixed_length_record_dataset_v2(filenames_::tf.TensorHandle, header_bytes_::tf.TensorHandle, record_bytes_::tf.TensorHandle, footer_bytes_::tf.TensorHandle, buffer_size_::tf.TensorHandle, compression_type_::tf.TensorHandle; name=nothing) + function fixed_length_record_dataset_v2_eager(filenames_, header_bytes_, record_bytes_, footer_bytes_, buffer_size_, compression_type_; name=nothing) desc = tf.EagerOp("FixedLengthRecordDatasetV2") tf.add_input(desc, filenames_) tf.add_input(desc, header_bytes_) @@ -2995,6 +3548,13 @@ begin tf.add_input(desc, compression_type_) (tf.execute(desc))[1] end + function fixed_length_record_dataset_v2(filenames_, header_bytes_, record_bytes_, footer_bytes_, buffer_size_, compression_type_; name=nothing) + if tf.eager_mode + fixed_length_record_dataset_v2_eager(filenames_, header_bytes_, record_bytes_, footer_bytes_, buffer_size_, compression_type_; name=name) + else + fixed_length_record_dataset_v2_graph(filenames_, header_bytes_, record_bytes_, footer_bytes_, buffer_size_, compression_type_; name=name) + end + end end @@ -3004,7 +3564,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function skip_dataset(input_dataset_, count_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function skip_dataset_graph(input_dataset_, count_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "SkipDataset") do desc = tf.NodeDescription("SkipDataset") @@ -3021,7 +3581,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function skip_dataset(input_dataset_::tf.TensorHandle, count_::tf.TensorHandle; name=nothing, output_types=nothing, output_shapes=nothing) + function skip_dataset_eager(input_dataset_, count_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("SkipDataset") tf.add_input(desc, input_dataset_) tf.add_input(desc, count_) @@ -3033,6 +3593,13 @@ begin end (tf.execute(desc))[1] end + function skip_dataset(input_dataset_, count_; name=nothing, output_types=nothing, output_shapes=nothing) + if tf.eager_mode + skip_dataset_eager(input_dataset_, count_; name=name, output_types=output_types, output_shapes=output_shapes) + else + skip_dataset_graph(input_dataset_, count_; name=name, output_types=output_types, output_shapes=output_shapes) + end + end end @@ -3042,7 +3609,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function cosh(x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function cosh_graph(x_; name=nothing) local desc tf.with_op_name(name, "Cosh") do desc = tf.NodeDescription("Cosh") @@ -3052,22 +3619,29 @@ begin end tf.Tensor(tf.Operation(desc)) end - function cosh(x_::tf.TensorHandle; name=nothing) + function cosh_eager(x_; name=nothing) desc = tf.EagerOp("Cosh") tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) (tf.execute(desc))[1] end + function cosh(x_; name=nothing) + if tf.eager_mode + cosh_eager(x_; name=name) + else + cosh_graph(x_; name=name) + end + end end """ - fused_batch_norm_v2(x, scale, offset, mean, variance; epsilon=?, data_format=NHWC, is_training=true) + fused_batch_norm_v2(x, scale, offset, mean, variance; epsilon=?, data_format=, is_training=true) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function fused_batch_norm_v2(x_, scale_, offset_, mean_, variance_; name=nothing, U=nothing, epsilon=nothing, data_format=nothing, is_training=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function fused_batch_norm_v2_graph(x_, scale_, offset_, mean_, variance_; name=nothing, U=nothing, epsilon=nothing, data_format=nothing, is_training=nothing) local desc tf.with_op_name(name, "FusedBatchNormV2") do desc = tf.NodeDescription("FusedBatchNormV2") @@ -3103,7 +3677,7 @@ begin end out end - function fused_batch_norm_v2(x_::tf.TensorHandle, scale_::tf.TensorHandle, offset_::tf.TensorHandle, mean_::tf.TensorHandle, variance_::tf.TensorHandle; name=nothing, U=nothing, epsilon=nothing, data_format=nothing, is_training=nothing) + function fused_batch_norm_v2_eager(x_, scale_, offset_, mean_, variance_; name=nothing, U=nothing, epsilon=nothing, data_format=nothing, is_training=nothing) desc = tf.EagerOp("FusedBatchNormV2") tf.add_input(desc, x_) tf.add_input(desc, scale_) @@ -3129,6 +3703,13 @@ begin desc["U"] = tf.data_type(variance_) tf.execute(desc) end + function fused_batch_norm_v2(x_, scale_, offset_, mean_, variance_; name=nothing, U=nothing, epsilon=nothing, data_format=nothing, is_training=nothing) + if tf.eager_mode + fused_batch_norm_v2_eager(x_, scale_, offset_, mean_, variance_; name=name, U=U, epsilon=epsilon, data_format=data_format, is_training=is_training) + else + fused_batch_norm_v2_graph(x_, scale_, offset_, mean_, variance_; name=name, U=U, epsilon=epsilon, data_format=data_format, is_training=is_training) + end + end end @@ -3138,7 +3719,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tensor_array_split(handle_, value_, lengths_, flow_in_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tensor_array_split_graph(handle_, value_, lengths_, flow_in_; name=nothing) local desc tf.with_op_name(name, "TensorArraySplit") do desc = tf.NodeDescription("TensorArraySplit") @@ -3154,7 +3735,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function tensor_array_split(handle_::tf.TensorHandle, value_::tf.TensorHandle, lengths_::tf.TensorHandle, flow_in_::tf.TensorHandle; name=nothing) + function tensor_array_split_eager(handle_, value_, lengths_, flow_in_; name=nothing) desc = tf.EagerOp("TensorArraySplit") tf.add_input(desc, handle_) tf.add_input(desc, value_) @@ -3163,6 +3744,13 @@ begin desc["T"] = tf.data_type(value_) (tf.execute(desc))[1] end + function tensor_array_split(handle_, value_, lengths_, flow_in_; name=nothing) + if tf.eager_mode + tensor_array_split_eager(handle_, value_, lengths_, flow_in_; name=name) + else + tensor_array_split_graph(handle_, value_, lengths_, flow_in_; name=name) + end + end end @@ -3172,7 +3760,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function ctc_loss(inputs_, labels_indices_, labels_values_, sequence_length_; name=nothing, preprocess_collapse_repeated=nothing, ctc_merge_repeated=nothing, ignore_longer_outputs_than_inputs=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function ctc_loss_graph(inputs_, labels_indices_, labels_values_, sequence_length_; name=nothing, preprocess_collapse_repeated=nothing, ctc_merge_repeated=nothing, ignore_longer_outputs_than_inputs=nothing) local desc tf.with_op_name(name, "CTCLoss") do desc = tf.NodeDescription("CTCLoss") @@ -3201,7 +3789,7 @@ begin end out end - function ctc_loss(inputs_::tf.TensorHandle, labels_indices_::tf.TensorHandle, labels_values_::tf.TensorHandle, sequence_length_::tf.TensorHandle; name=nothing, preprocess_collapse_repeated=nothing, ctc_merge_repeated=nothing, ignore_longer_outputs_than_inputs=nothing) + function ctc_loss_eager(inputs_, labels_indices_, labels_values_, sequence_length_; name=nothing, preprocess_collapse_repeated=nothing, ctc_merge_repeated=nothing, ignore_longer_outputs_than_inputs=nothing) desc = tf.EagerOp("CTCLoss") tf.add_input(desc, inputs_) tf.add_input(desc, labels_indices_) @@ -3218,6 +3806,13 @@ begin end tf.execute(desc) end + function ctc_loss(inputs_, labels_indices_, labels_values_, sequence_length_; name=nothing, preprocess_collapse_repeated=nothing, ctc_merge_repeated=nothing, ignore_longer_outputs_than_inputs=nothing) + if tf.eager_mode + ctc_loss_eager(inputs_, labels_indices_, labels_values_, sequence_length_; name=name, preprocess_collapse_repeated=preprocess_collapse_repeated, ctc_merge_repeated=ctc_merge_repeated, ignore_longer_outputs_than_inputs=ignore_longer_outputs_than_inputs) + else + ctc_loss_graph(inputs_, labels_indices_, labels_values_, sequence_length_; name=name, preprocess_collapse_repeated=preprocess_collapse_repeated, ctc_merge_repeated=ctc_merge_repeated, ignore_longer_outputs_than_inputs=ignore_longer_outputs_than_inputs) + end + end end @@ -3227,7 +3822,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function quantized_reshape(tensor_, shape_, input_min_, input_max_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function quantized_reshape_graph(tensor_, shape_, input_min_, input_max_; name=nothing) local desc tf.with_op_name(name, "QuantizedReshape") do desc = tf.NodeDescription("QuantizedReshape") @@ -3249,7 +3844,7 @@ begin end out end - function quantized_reshape(tensor_::tf.TensorHandle, shape_::tf.TensorHandle, input_min_::tf.TensorHandle, input_max_::tf.TensorHandle; name=nothing) + function quantized_reshape_eager(tensor_, shape_, input_min_, input_max_; name=nothing) desc = tf.EagerOp("QuantizedReshape") tf.add_input(desc, tensor_) tf.add_input(desc, shape_) @@ -3259,6 +3854,13 @@ begin desc["Tshape"] = tf.data_type(shape_) tf.execute(desc) end + function quantized_reshape(tensor_, shape_, input_min_, input_max_; name=nothing) + if tf.eager_mode + quantized_reshape_eager(tensor_, shape_, input_min_, input_max_; name=name) + else + quantized_reshape_graph(tensor_, shape_, input_min_, input_max_; name=name) + end + end end @@ -3268,7 +3870,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function floor_div(x_, y_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function floor_div_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "FloorDiv") do desc = tf.NodeDescription("FloorDiv") @@ -3280,7 +3882,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function floor_div(x_::tf.TensorHandle, y_::tf.TensorHandle; name=nothing) + function floor_div_eager(x_, y_; name=nothing) desc = tf.EagerOp("FloorDiv") tf.add_input(desc, x_) tf.add_input(desc, y_) @@ -3288,6 +3890,13 @@ begin desc["T"] = tf.data_type(y_) (tf.execute(desc))[1] end + function floor_div(x_, y_; name=nothing) + if tf.eager_mode + floor_div_eager(x_, y_; name=name) + else + floor_div_graph(x_, y_; name=name) + end + end end @@ -3297,7 +3906,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tensor_array_v2(size_; name=nothing, dtype=nothing, element_shape=nothing, dynamic_size=nothing, clear_after_read=nothing, tensor_array_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tensor_array_v2_graph(size_; name=nothing, dtype=nothing, element_shape=nothing, dynamic_size=nothing, clear_after_read=nothing, tensor_array_name=nothing) local desc tf.with_op_name(name, "TensorArrayV2") do desc = tf.NodeDescription("TensorArrayV2") @@ -3321,7 +3930,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function tensor_array_v2(size_::tf.TensorHandle; name=nothing, dtype=nothing, element_shape=nothing, dynamic_size=nothing, clear_after_read=nothing, tensor_array_name=nothing) + function tensor_array_v2_eager(size_; name=nothing, dtype=nothing, element_shape=nothing, dynamic_size=nothing, clear_after_read=nothing, tensor_array_name=nothing) desc = tf.EagerOp("TensorArrayV2") tf.add_input(desc, size_) if dtype !== nothing @@ -3341,6 +3950,13 @@ begin end (tf.execute(desc))[1] end + function tensor_array_v2(size_; name=nothing, dtype=nothing, element_shape=nothing, dynamic_size=nothing, clear_after_read=nothing, tensor_array_name=nothing) + if tf.eager_mode + tensor_array_v2_eager(size_; name=name, dtype=dtype, element_shape=element_shape, dynamic_size=dynamic_size, clear_after_read=clear_after_read, tensor_array_name=tensor_array_name) + else + tensor_array_v2_graph(size_; name=name, dtype=dtype, element_shape=element_shape, dynamic_size=dynamic_size, clear_after_read=clear_after_read, tensor_array_name=tensor_array_name) + end + end end @@ -3350,7 +3966,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function barrier_close(handle_; name=nothing, cancel_pending_enqueues=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function barrier_close_graph(handle_; name=nothing, cancel_pending_enqueues=nothing) local desc tf.with_op_name(name, "BarrierClose") do desc = tf.NodeDescription("BarrierClose") @@ -3362,7 +3978,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function barrier_close(handle_::tf.TensorHandle; name=nothing, cancel_pending_enqueues=nothing) + function barrier_close_eager(handle_; name=nothing, cancel_pending_enqueues=nothing) desc = tf.EagerOp("BarrierClose") tf.add_input(desc, handle_) if cancel_pending_enqueues !== nothing @@ -3370,6 +3986,13 @@ begin end (tf.execute(desc))[1] end + function barrier_close(handle_; name=nothing, cancel_pending_enqueues=nothing) + if tf.eager_mode + barrier_close_eager(handle_; name=name, cancel_pending_enqueues=cancel_pending_enqueues) + else + barrier_close_graph(handle_; name=name, cancel_pending_enqueues=cancel_pending_enqueues) + end + end end @@ -3379,7 +4002,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function read_variable_op(resource_; name=nothing, dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function read_variable_op_graph(resource_; name=nothing, dtype=nothing) local desc tf.with_op_name(name, "ReadVariableOp") do desc = tf.NodeDescription("ReadVariableOp") @@ -3391,7 +4014,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function read_variable_op(resource_::tf.TensorHandle; name=nothing, dtype=nothing) + function read_variable_op_eager(resource_; name=nothing, dtype=nothing) desc = tf.EagerOp("ReadVariableOp") tf.add_input(desc, resource_) if dtype !== nothing @@ -3399,6 +4022,13 @@ begin end (tf.execute(desc))[1] end + function read_variable_op(resource_; name=nothing, dtype=nothing) + if tf.eager_mode + read_variable_op_eager(resource_; name=name, dtype=dtype) + else + read_variable_op_graph(resource_; name=name, dtype=dtype) + end + end end @@ -3408,7 +4038,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function quantized_mul(x_, y_, min_x_, max_x_, min_y_, max_y_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function quantized_mul_graph(x_, y_, min_x_, max_x_, min_y_, max_y_; name=nothing) local desc tf.with_op_name(name, "QuantizedMul") do desc = tf.NodeDescription("QuantizedMul") @@ -3434,7 +4064,7 @@ begin end out end - function quantized_mul(x_::tf.TensorHandle, y_::tf.TensorHandle, min_x_::tf.TensorHandle, max_x_::tf.TensorHandle, min_y_::tf.TensorHandle, max_y_::tf.TensorHandle; name=nothing) + function quantized_mul_eager(x_, y_, min_x_, max_x_, min_y_, max_y_; name=nothing) desc = tf.EagerOp("QuantizedMul") tf.add_input(desc, x_) tf.add_input(desc, y_) @@ -3446,6 +4076,13 @@ begin desc["T2"] = tf.data_type(y_) tf.execute(desc) end + function quantized_mul(x_, y_, min_x_, max_x_, min_y_, max_y_; name=nothing) + if tf.eager_mode + quantized_mul_eager(x_, y_, min_x_, max_x_, min_y_, max_y_; name=name) + else + quantized_mul_graph(x_, y_, min_x_, max_x_, min_y_, max_y_; name=name) + end + end end @@ -3455,7 +4092,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function selu(features_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function selu_graph(features_; name=nothing) local desc tf.with_op_name(name, "Selu") do desc = tf.NodeDescription("Selu") @@ -3465,22 +4102,29 @@ begin end tf.Tensor(tf.Operation(desc)) end - function selu(features_::tf.TensorHandle; name=nothing) + function selu_eager(features_; name=nothing) desc = tf.EagerOp("Selu") tf.add_input(desc, features_) desc["T"] = tf.data_type(features_) (tf.execute(desc))[1] end + function selu(features_; name=nothing) + if tf.eager_mode + selu_eager(features_; name=name) + else + selu_graph(features_; name=name) + end + end end """ - cudnn_rnn_backprop_v3(input, input_h, input_c, params, sequence_lengths, output, output_h, output_c, output_backprop, output_h_backprop, output_c_backprop, reserve_space, host_reserved; rnn_mode=lstm, input_mode=linear_input, direction=unidirectional, dropout=?, seed=0, seed2=0) + cudnn_rnn_backprop_v3(input, input_h, input_c, params, sequence_lengths, output, output_h, output_c, output_backprop, output_h_backprop, output_c_backprop, reserve_space, host_reserved; rnn_mode=, input_mode=, direction=, dropout=?, seed=0, seed2=0) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function cudnn_rnn_backprop_v3(input_, input_h_, input_c_, params_, sequence_lengths_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_, host_reserved_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function cudnn_rnn_backprop_v3_graph(input_, input_h_, input_c_, params_, sequence_lengths_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_, host_reserved_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) local desc tf.with_op_name(name, "CudnnRNNBackpropV3") do desc = tf.NodeDescription("CudnnRNNBackpropV3") @@ -3537,7 +4181,7 @@ begin end out end - function cudnn_rnn_backprop_v3(input_::tf.TensorHandle, input_h_::tf.TensorHandle, input_c_::tf.TensorHandle, params_::tf.TensorHandle, sequence_lengths_::tf.TensorHandle, output_::tf.TensorHandle, output_h_::tf.TensorHandle, output_c_::tf.TensorHandle, output_backprop_::tf.TensorHandle, output_h_backprop_::tf.TensorHandle, output_c_backprop_::tf.TensorHandle, reserve_space_::tf.TensorHandle, host_reserved_::tf.TensorHandle; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) + function cudnn_rnn_backprop_v3_eager(input_, input_h_, input_c_, params_, sequence_lengths_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_, host_reserved_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) desc = tf.EagerOp("CudnnRNNBackpropV3") tf.add_input(desc, input_) tf.add_input(desc, input_h_) @@ -3583,6 +4227,13 @@ begin desc["T"] = tf.data_type(reserve_space_) tf.execute(desc) end + function cudnn_rnn_backprop_v3(input_, input_h_, input_c_, params_, sequence_lengths_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_, host_reserved_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) + if tf.eager_mode + cudnn_rnn_backprop_v3_eager(input_, input_h_, input_c_, params_, sequence_lengths_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_, host_reserved_; name=name, rnn_mode=rnn_mode, input_mode=input_mode, direction=direction, dropout=dropout, seed=seed, seed2=seed2) + else + cudnn_rnn_backprop_v3_graph(input_, input_h_, input_c_, params_, sequence_lengths_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_, host_reserved_; name=name, rnn_mode=rnn_mode, input_mode=input_mode, direction=direction, dropout=dropout, seed=seed, seed2=seed2) + end + end end @@ -3592,7 +4243,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function lookup_table_insert(table_handle_, keys_, values_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function lookup_table_insert_graph(table_handle_, keys_, values_; name=nothing) local desc tf.with_op_name(name, "LookupTableInsert") do desc = tf.NodeDescription("LookupTableInsert") @@ -3607,7 +4258,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function lookup_table_insert(table_handle_::tf.TensorHandle, keys_::tf.TensorHandle, values_::tf.TensorHandle; name=nothing) + function lookup_table_insert_eager(table_handle_, keys_, values_; name=nothing) desc = tf.EagerOp("LookupTableInsert") tf.add_input(desc, table_handle_) tf.add_input(desc, keys_) @@ -3616,6 +4267,13 @@ begin desc["Tout"] = tf.data_type(values_) (tf.execute(desc))[1] end + function lookup_table_insert(table_handle_, keys_, values_; name=nothing) + if tf.eager_mode + lookup_table_insert_eager(table_handle_, keys_, values_; name=name) + else + lookup_table_insert_graph(table_handle_, keys_, values_; name=name) + end + end end @@ -3625,7 +4283,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function complex_abs(x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function complex_abs_graph(x_; name=nothing) local desc tf.with_op_name(name, "ComplexAbs") do desc = tf.NodeDescription("ComplexAbs") @@ -3635,12 +4293,19 @@ begin end tf.Tensor(tf.Operation(desc)) end - function complex_abs(x_::tf.TensorHandle; name=nothing) + function complex_abs_eager(x_; name=nothing) desc = tf.EagerOp("ComplexAbs") tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) (tf.execute(desc))[1] end + function complex_abs(x_; name=nothing) + if tf.eager_mode + complex_abs_eager(x_; name=name) + else + complex_abs_graph(x_; name=name) + end + end end @@ -3650,7 +4315,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tridiagonal_solve(diagonals_, rhs_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tridiagonal_solve_graph(diagonals_, rhs_; name=nothing) local desc tf.with_op_name(name, "TridiagonalSolve") do desc = tf.NodeDescription("TridiagonalSolve") @@ -3662,7 +4327,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function tridiagonal_solve(diagonals_::tf.TensorHandle, rhs_::tf.TensorHandle; name=nothing) + function tridiagonal_solve_eager(diagonals_, rhs_; name=nothing) desc = tf.EagerOp("TridiagonalSolve") tf.add_input(desc, diagonals_) tf.add_input(desc, rhs_) @@ -3670,6 +4335,13 @@ begin desc["T"] = tf.data_type(rhs_) (tf.execute(desc))[1] end + function tridiagonal_solve(diagonals_, rhs_; name=nothing) + if tf.eager_mode + tridiagonal_solve_eager(diagonals_, rhs_; name=name) + else + tridiagonal_solve_graph(diagonals_, rhs_; name=name) + end + end end @@ -3679,7 +4351,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function lookup_table_import(table_handle_, keys_, values_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function lookup_table_import_graph(table_handle_, keys_, values_; name=nothing) local desc tf.with_op_name(name, "LookupTableImport") do desc = tf.NodeDescription("LookupTableImport") @@ -3694,7 +4366,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function lookup_table_import(table_handle_::tf.TensorHandle, keys_::tf.TensorHandle, values_::tf.TensorHandle; name=nothing) + function lookup_table_import_eager(table_handle_, keys_, values_; name=nothing) desc = tf.EagerOp("LookupTableImport") tf.add_input(desc, table_handle_) tf.add_input(desc, keys_) @@ -3703,6 +4375,13 @@ begin desc["Tout"] = tf.data_type(values_) (tf.execute(desc))[1] end + function lookup_table_import(table_handle_, keys_, values_; name=nothing) + if tf.eager_mode + lookup_table_import_eager(table_handle_, keys_, values_; name=name) + else + lookup_table_import_graph(table_handle_, keys_, values_; name=name) + end + end end @@ -3712,7 +4391,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function abs(x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function abs_graph(x_; name=nothing) local desc tf.with_op_name(name, "Abs") do desc = tf.NodeDescription("Abs") @@ -3722,12 +4401,19 @@ begin end tf.Tensor(tf.Operation(desc)) end - function abs(x_::tf.TensorHandle; name=nothing) + function abs_eager(x_; name=nothing) desc = tf.EagerOp("Abs") tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) (tf.execute(desc))[1] end + function abs(x_; name=nothing) + if tf.eager_mode + abs_eager(x_; name=name) + else + abs_graph(x_; name=name) + end + end end @@ -3737,7 +4423,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function resource_apply_adam(var_, m_, v_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing, use_nesterov=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function resource_apply_adam_graph(var_, m_, v_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing, use_nesterov=nothing) local desc tf.with_op_name(name, "ResourceApplyAdam") do desc = tf.NodeDescription("ResourceApplyAdam") @@ -3771,7 +4457,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function resource_apply_adam(var_::tf.TensorHandle, m_::tf.TensorHandle, v_::tf.TensorHandle, beta1_power_::tf.TensorHandle, beta2_power_::tf.TensorHandle, lr_::tf.TensorHandle, beta1_::tf.TensorHandle, beta2_::tf.TensorHandle, epsilon_::tf.TensorHandle, grad_::tf.TensorHandle; name=nothing, use_locking=nothing, use_nesterov=nothing) + function resource_apply_adam_eager(var_, m_, v_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing, use_nesterov=nothing) desc = tf.EagerOp("ResourceApplyAdam") tf.add_input(desc, var_) tf.add_input(desc, m_) @@ -3798,6 +4484,13 @@ begin desc["T"] = tf.data_type(grad_) (tf.execute(desc))[1] end + function resource_apply_adam(var_, m_, v_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing, use_nesterov=nothing) + if tf.eager_mode + resource_apply_adam_eager(var_, m_, v_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=name, use_locking=use_locking, use_nesterov=use_nesterov) + else + resource_apply_adam_graph(var_, m_, v_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=name, use_locking=use_locking, use_nesterov=use_nesterov) + end + end end @@ -3807,7 +4500,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function write_histogram_summary(writer_, step_, tag_, values_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function write_histogram_summary_graph(writer_, step_, tag_, values_; name=nothing) local desc tf.with_op_name(name, "WriteHistogramSummary") do desc = tf.NodeDescription("WriteHistogramSummary") @@ -3823,7 +4516,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function write_histogram_summary(writer_::tf.TensorHandle, step_::tf.TensorHandle, tag_::tf.TensorHandle, values_::tf.TensorHandle; name=nothing) + function write_histogram_summary_eager(writer_, step_, tag_, values_; name=nothing) desc = tf.EagerOp("WriteHistogramSummary") tf.add_input(desc, writer_) tf.add_input(desc, step_) @@ -3832,6 +4525,13 @@ begin desc["T"] = tf.data_type(values_) (tf.execute(desc))[1] end + function write_histogram_summary(writer_, step_, tag_, values_; name=nothing) + if tf.eager_mode + write_histogram_summary_eager(writer_, step_, tag_, values_; name=name) + else + write_histogram_summary_graph(writer_, step_, tag_, values_; name=name) + end + end end @@ -3841,7 +4541,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function experimental_indexed_dataset_materialize(dataset_, materialized_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function experimental_indexed_dataset_materialize_graph(dataset_, materialized_; name=nothing) local desc tf.with_op_name(name, "ExperimentalIndexedDatasetMaterialize") do desc = tf.NodeDescription("ExperimentalIndexedDatasetMaterialize") @@ -3852,12 +4552,19 @@ begin end tf.Tensor(tf.Operation(desc)) end - function experimental_indexed_dataset_materialize(dataset_::tf.TensorHandle, materialized_::tf.TensorHandle; name=nothing) + function experimental_indexed_dataset_materialize_eager(dataset_, materialized_; name=nothing) desc = tf.EagerOp("ExperimentalIndexedDatasetMaterialize") tf.add_input(desc, dataset_) tf.add_input(desc, materialized_) (tf.execute(desc))[1] end + function experimental_indexed_dataset_materialize(dataset_, materialized_; name=nothing) + if tf.eager_mode + experimental_indexed_dataset_materialize_eager(dataset_, materialized_; name=name) + else + experimental_indexed_dataset_materialize_graph(dataset_, materialized_; name=name) + end + end end @@ -3867,7 +4574,7 @@ end Sends the named tensor from send_device to recv_device. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function _host_send(tensor_; name=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function _host_send_graph(tensor_; name=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing) local desc tf.with_op_name(name, "_HostSend") do desc = tf.NodeDescription("_HostSend") @@ -3892,7 +4599,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function _host_send(tensor_::tf.TensorHandle; name=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing) + function _host_send_eager(tensor_; name=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing) desc = tf.EagerOp("_HostSend") tf.add_input(desc, tensor_) if tensor_name !== nothing @@ -3913,6 +4620,13 @@ begin desc["T"] = tf.data_type(tensor_) (tf.execute(desc))[1] end + function _host_send(tensor_; name=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing) + if tf.eager_mode + _host_send_eager(tensor_; name=name, tensor_name=tensor_name, send_device=send_device, send_device_incarnation=send_device_incarnation, recv_device=recv_device, client_terminated=client_terminated) + else + _host_send_graph(tensor_; name=name, tensor_name=tensor_name, send_device=send_device, send_device_incarnation=send_device_incarnation, recv_device=recv_device, client_terminated=client_terminated) + end + end end @@ -3922,7 +4636,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function greater(x_, y_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function greater_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "Greater") do desc = tf.NodeDescription("Greater") @@ -3934,7 +4648,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function greater(x_::tf.TensorHandle, y_::tf.TensorHandle; name=nothing) + function greater_eager(x_, y_; name=nothing) desc = tf.EagerOp("Greater") tf.add_input(desc, x_) tf.add_input(desc, y_) @@ -3942,6 +4656,13 @@ begin desc["T"] = tf.data_type(y_) (tf.execute(desc))[1] end + function greater(x_, y_; name=nothing) + if tf.eager_mode + greater_eager(x_, y_; name=name) + else + greater_graph(x_, y_; name=name) + end + end end @@ -3951,7 +4672,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function nccl_broadcast(input_; name=nothing, shape=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function nccl_broadcast_graph(input_; name=nothing, shape=nothing) local desc tf.with_op_name(name, "NcclBroadcast") do desc = tf.NodeDescription("NcclBroadcast") @@ -3964,7 +4685,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function nccl_broadcast(input_::tf.TensorHandle; name=nothing, shape=nothing) + function nccl_broadcast_eager(input_; name=nothing, shape=nothing) desc = tf.EagerOp("NcclBroadcast") tf.add_input(desc, input_) if shape !== nothing @@ -3973,6 +4694,13 @@ begin desc["T"] = tf.data_type(input_) (tf.execute(desc))[1] end + function nccl_broadcast(input_; name=nothing, shape=nothing) + if tf.eager_mode + nccl_broadcast_eager(input_; name=name, shape=shape) + else + nccl_broadcast_graph(input_; name=name, shape=shape) + end + end end @@ -3982,7 +4710,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tensor_list_push_back_batch(input_handles_, tensor_; name=nothing, element_dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tensor_list_push_back_batch_graph(input_handles_, tensor_; name=nothing, element_dtype=nothing) local desc tf.with_op_name(name, "TensorListPushBackBatch") do desc = tf.NodeDescription("TensorListPushBackBatch") @@ -3997,7 +4725,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function tensor_list_push_back_batch(input_handles_::tf.TensorHandle, tensor_::tf.TensorHandle; name=nothing, element_dtype=nothing) + function tensor_list_push_back_batch_eager(input_handles_, tensor_; name=nothing, element_dtype=nothing) desc = tf.EagerOp("TensorListPushBackBatch") tf.add_input(desc, input_handles_) tf.add_input(desc, tensor_) @@ -4007,6 +4735,13 @@ begin desc["element_dtype"] = tf.data_type(tensor_) (tf.execute(desc))[1] end + function tensor_list_push_back_batch(input_handles_, tensor_; name=nothing, element_dtype=nothing) + if tf.eager_mode + tensor_list_push_back_batch_eager(input_handles_, tensor_; name=name, element_dtype=element_dtype) + else + tensor_list_push_back_batch_graph(input_handles_, tensor_; name=name, element_dtype=element_dtype) + end + end end @@ -4016,7 +4751,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function resource_scatter_min(resource_, indices_, updates_; name=nothing, dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function resource_scatter_min_graph(resource_, indices_, updates_; name=nothing, dtype=nothing) local desc tf.with_op_name(name, "ResourceScatterMin") do desc = tf.NodeDescription("ResourceScatterMin") @@ -4035,7 +4770,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function resource_scatter_min(resource_::tf.TensorHandle, indices_::tf.TensorHandle, updates_::tf.TensorHandle; name=nothing, dtype=nothing) + function resource_scatter_min_eager(resource_, indices_, updates_; name=nothing, dtype=nothing) desc = tf.EagerOp("ResourceScatterMin") tf.add_input(desc, resource_) tf.add_input(desc, indices_) @@ -4047,6 +4782,13 @@ begin desc["dtype"] = tf.data_type(updates_) (tf.execute(desc))[1] end + function resource_scatter_min(resource_, indices_, updates_; name=nothing, dtype=nothing) + if tf.eager_mode + resource_scatter_min_eager(resource_, indices_, updates_; name=name, dtype=dtype) + else + resource_scatter_min_graph(resource_, indices_, updates_; name=name, dtype=dtype) + end + end end @@ -4056,7 +4798,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function slice(input_, begin_, size_; name=nothing, Index=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function slice_graph(input_, begin_, size_; name=nothing, Index=nothing) local desc tf.with_op_name(name, "Slice") do desc = tf.NodeDescription("Slice") @@ -4075,7 +4817,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function slice(input_::tf.TensorHandle, begin_::tf.TensorHandle, size_::tf.TensorHandle; name=nothing, Index=nothing) + function slice_eager(input_, begin_, size_; name=nothing, Index=nothing) desc = tf.EagerOp("Slice") tf.add_input(desc, input_) tf.add_input(desc, begin_) @@ -4088,16 +4830,23 @@ begin desc["Index"] = tf.data_type(size_) (tf.execute(desc))[1] end + function slice(input_, begin_, size_; name=nothing, Index=nothing) + if tf.eager_mode + slice_eager(input_, begin_, size_; name=name, Index=Index) + else + slice_graph(input_, begin_, size_; name=name, Index=Index) + end + end end """ - unicode_decode(input; errors=replace, replacement_char=65533, replace_control_characters=false) + unicode_decode(input; errors=, replacement_char=65533, replace_control_characters=false) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function unicode_decode(input_; name=nothing, input_encoding=nothing, errors=nothing, replacement_char=nothing, replace_control_characters=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function unicode_decode_graph(input_; name=nothing, input_encoding=nothing, errors=nothing, replacement_char=nothing, replace_control_characters=nothing) local desc tf.with_op_name(name, "UnicodeDecode") do desc = tf.NodeDescription("UnicodeDecode") @@ -4123,7 +4872,7 @@ begin end out end - function unicode_decode(input_::tf.TensorHandle; name=nothing, input_encoding=nothing, errors=nothing, replacement_char=nothing, replace_control_characters=nothing) + function unicode_decode_eager(input_; name=nothing, input_encoding=nothing, errors=nothing, replacement_char=nothing, replace_control_characters=nothing) desc = tf.EagerOp("UnicodeDecode") tf.add_input(desc, input_) if input_encoding !== nothing @@ -4140,6 +4889,13 @@ begin end tf.execute(desc) end + function unicode_decode(input_; name=nothing, input_encoding=nothing, errors=nothing, replacement_char=nothing, replace_control_characters=nothing) + if tf.eager_mode + unicode_decode_eager(input_; name=name, input_encoding=input_encoding, errors=errors, replacement_char=replacement_char, replace_control_characters=replace_control_characters) + else + unicode_decode_graph(input_; name=name, input_encoding=input_encoding, errors=errors, replacement_char=replacement_char, replace_control_characters=replace_control_characters) + end + end end @@ -4149,7 +4905,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function take_dataset(input_dataset_, count_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function take_dataset_graph(input_dataset_, count_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "TakeDataset") do desc = tf.NodeDescription("TakeDataset") @@ -4166,7 +4922,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function take_dataset(input_dataset_::tf.TensorHandle, count_::tf.TensorHandle; name=nothing, output_types=nothing, output_shapes=nothing) + function take_dataset_eager(input_dataset_, count_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("TakeDataset") tf.add_input(desc, input_dataset_) tf.add_input(desc, count_) @@ -4178,6 +4934,13 @@ begin end (tf.execute(desc))[1] end + function take_dataset(input_dataset_, count_; name=nothing, output_types=nothing, output_shapes=nothing) + if tf.eager_mode + take_dataset_eager(input_dataset_, count_; name=name, output_types=output_types, output_shapes=output_shapes) + else + take_dataset_graph(input_dataset_, count_; name=name, output_types=output_types, output_shapes=output_shapes) + end + end end @@ -4187,7 +4950,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function boosted_trees_make_stats_summary(node_ids_, gradients_, hessians_, bucketized_features_list_; name=nothing, max_splits=nothing, num_buckets=nothing, num_features=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function boosted_trees_make_stats_summary_graph(node_ids_, gradients_, hessians_, bucketized_features_list_; name=nothing, max_splits=nothing, num_buckets=nothing, num_features=nothing) local desc tf.with_op_name(name, "BoostedTreesMakeStatsSummary") do desc = tf.NodeDescription("BoostedTreesMakeStatsSummary") @@ -4211,7 +4974,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function boosted_trees_make_stats_summary(node_ids_::tf.TensorHandle, gradients_::tf.TensorHandle, hessians_::tf.TensorHandle, bucketized_features_list_::tf.TensorHandle; name=nothing, max_splits=nothing, num_buckets=nothing, num_features=nothing) + function boosted_trees_make_stats_summary_eager(node_ids_, gradients_, hessians_, bucketized_features_list_; name=nothing, max_splits=nothing, num_buckets=nothing, num_features=nothing) desc = tf.EagerOp("BoostedTreesMakeStatsSummary") tf.add_input(desc, node_ids_) tf.add_input(desc, gradients_) @@ -4228,6 +4991,13 @@ begin end (tf.execute(desc))[1] end + function boosted_trees_make_stats_summary(node_ids_, gradients_, hessians_, bucketized_features_list_; name=nothing, max_splits=nothing, num_buckets=nothing, num_features=nothing) + if tf.eager_mode + boosted_trees_make_stats_summary_eager(node_ids_, gradients_, hessians_, bucketized_features_list_; name=name, max_splits=max_splits, num_buckets=num_buckets, num_features=num_features) + else + boosted_trees_make_stats_summary_graph(node_ids_, gradients_, hessians_, bucketized_features_list_; name=name, max_splits=max_splits, num_buckets=num_buckets, num_features=num_features) + end + end end @@ -4237,7 +5007,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function all_candidate_sampler(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, seed=nothing, seed2=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function all_candidate_sampler_graph(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, seed=nothing, seed2=nothing) local desc tf.with_op_name(name, "AllCandidateSampler") do desc = tf.NodeDescription("AllCandidateSampler") @@ -4266,7 +5036,7 @@ begin end out end - function all_candidate_sampler(true_classes_::tf.TensorHandle; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, seed=nothing, seed2=nothing) + function all_candidate_sampler_eager(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, seed=nothing, seed2=nothing) desc = tf.EagerOp("AllCandidateSampler") tf.add_input(desc, true_classes_) if num_true !== nothing @@ -4286,16 +5056,23 @@ begin end tf.execute(desc) end + function all_candidate_sampler(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, seed=nothing, seed2=nothing) + if tf.eager_mode + all_candidate_sampler_eager(true_classes_; name=name, num_true=num_true, num_sampled=num_sampled, unique=unique, seed=seed, seed2=seed2) + else + all_candidate_sampler_graph(true_classes_; name=name, num_true=num_true, num_sampled=num_sampled, unique=unique, seed=seed, seed2=seed2) + end + end end """ - conv2d_backprop_input(input_sizes, filter, out_backprop; use_cudnn_on_gpu=true, explicit_paddings=Int64[], data_format=NHWC, dilations=[1, 1, 1, 1]) + conv2d_backprop_input(input_sizes, filter, out_backprop; use_cudnn_on_gpu=true, explicit_paddings=Int64[], data_format=, dilations=[1, 1, 1, 1]) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function conv2d_backprop_input(input_sizes_, filter_, out_backprop_; name=nothing, strides=nothing, use_cudnn_on_gpu=nothing, padding=nothing, explicit_paddings=nothing, data_format=nothing, dilations=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function conv2d_backprop_input_graph(input_sizes_, filter_, out_backprop_; name=nothing, strides=nothing, use_cudnn_on_gpu=nothing, padding=nothing, explicit_paddings=nothing, data_format=nothing, dilations=nothing) local desc tf.with_op_name(name, "Conv2DBackpropInput") do desc = tf.NodeDescription("Conv2DBackpropInput") @@ -4327,7 +5104,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function conv2d_backprop_input(input_sizes_::tf.TensorHandle, filter_::tf.TensorHandle, out_backprop_::tf.TensorHandle; name=nothing, strides=nothing, use_cudnn_on_gpu=nothing, padding=nothing, explicit_paddings=nothing, data_format=nothing, dilations=nothing) + function conv2d_backprop_input_eager(input_sizes_, filter_, out_backprop_; name=nothing, strides=nothing, use_cudnn_on_gpu=nothing, padding=nothing, explicit_paddings=nothing, data_format=nothing, dilations=nothing) desc = tf.EagerOp("Conv2DBackpropInput") tf.add_input(desc, input_sizes_) tf.add_input(desc, filter_) @@ -4354,6 +5131,13 @@ begin desc["T"] = tf.data_type(out_backprop_) (tf.execute(desc))[1] end + function conv2d_backprop_input(input_sizes_, filter_, out_backprop_; name=nothing, strides=nothing, use_cudnn_on_gpu=nothing, padding=nothing, explicit_paddings=nothing, data_format=nothing, dilations=nothing) + if tf.eager_mode + conv2d_backprop_input_eager(input_sizes_, filter_, out_backprop_; name=name, strides=strides, use_cudnn_on_gpu=use_cudnn_on_gpu, padding=padding, explicit_paddings=explicit_paddings, data_format=data_format, dilations=dilations) + else + conv2d_backprop_input_graph(input_sizes_, filter_, out_backprop_; name=name, strides=strides, use_cudnn_on_gpu=use_cudnn_on_gpu, padding=padding, explicit_paddings=explicit_paddings, data_format=data_format, dilations=dilations) + end + end end @@ -4363,7 +5147,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function dataset_to_single_element(dataset_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function dataset_to_single_element_graph(dataset_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "DatasetToSingleElement") do desc = tf.NodeDescription("DatasetToSingleElement") @@ -4378,7 +5162,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function dataset_to_single_element(dataset_::tf.TensorHandle; name=nothing, output_types=nothing, output_shapes=nothing) + function dataset_to_single_element_eager(dataset_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("DatasetToSingleElement") tf.add_input(desc, dataset_) if output_types !== nothing @@ -4389,6 +5173,13 @@ begin end (tf.execute(desc))[1] end + function dataset_to_single_element(dataset_; name=nothing, output_types=nothing, output_shapes=nothing) + if tf.eager_mode + dataset_to_single_element_eager(dataset_; name=name, output_types=output_types, output_shapes=output_shapes) + else + dataset_to_single_element_graph(dataset_; name=name, output_types=output_types, output_shapes=output_shapes) + end + end end @@ -4398,7 +5189,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function cache_dataset(input_dataset_, filename_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function cache_dataset_graph(input_dataset_, filename_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "CacheDataset") do desc = tf.NodeDescription("CacheDataset") @@ -4415,7 +5206,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function cache_dataset(input_dataset_::tf.TensorHandle, filename_::tf.TensorHandle; name=nothing, output_types=nothing, output_shapes=nothing) + function cache_dataset_eager(input_dataset_, filename_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("CacheDataset") tf.add_input(desc, input_dataset_) tf.add_input(desc, filename_) @@ -4427,6 +5218,13 @@ begin end (tf.execute(desc))[1] end + function cache_dataset(input_dataset_, filename_; name=nothing, output_types=nothing, output_shapes=nothing) + if tf.eager_mode + cache_dataset_eager(input_dataset_, filename_; name=name, output_types=output_types, output_shapes=output_shapes) + else + cache_dataset_graph(input_dataset_, filename_; name=name, output_types=output_types, output_shapes=output_shapes) + end + end end @@ -4436,7 +5234,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function fake_quant_with_min_max_vars_gradient(gradients_, inputs_, min_, max_; name=nothing, num_bits=nothing, narrow_range=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function fake_quant_with_min_max_vars_gradient_graph(gradients_, inputs_, min_, max_; name=nothing, num_bits=nothing, narrow_range=nothing) local desc tf.with_op_name(name, "FakeQuantWithMinMaxVarsGradient") do desc = tf.NodeDescription("FakeQuantWithMinMaxVarsGradient") @@ -4462,7 +5260,7 @@ begin end out end - function fake_quant_with_min_max_vars_gradient(gradients_::tf.TensorHandle, inputs_::tf.TensorHandle, min_::tf.TensorHandle, max_::tf.TensorHandle; name=nothing, num_bits=nothing, narrow_range=nothing) + function fake_quant_with_min_max_vars_gradient_eager(gradients_, inputs_, min_, max_; name=nothing, num_bits=nothing, narrow_range=nothing) desc = tf.EagerOp("FakeQuantWithMinMaxVarsGradient") tf.add_input(desc, gradients_) tf.add_input(desc, inputs_) @@ -4476,6 +5274,13 @@ begin end tf.execute(desc) end + function fake_quant_with_min_max_vars_gradient(gradients_, inputs_, min_, max_; name=nothing, num_bits=nothing, narrow_range=nothing) + if tf.eager_mode + fake_quant_with_min_max_vars_gradient_eager(gradients_, inputs_, min_, max_; name=name, num_bits=num_bits, narrow_range=narrow_range) + else + fake_quant_with_min_max_vars_gradient_graph(gradients_, inputs_, min_, max_; name=name, num_bits=num_bits, narrow_range=narrow_range) + end + end end @@ -4485,7 +5290,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function fused_resize_and_pad_conv2d(input_, size_, paddings_, filter_; name=nothing, resize_align_corners=nothing, mode=nothing, strides=nothing, padding=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function fused_resize_and_pad_conv2d_graph(input_, size_, paddings_, filter_; name=nothing, resize_align_corners=nothing, mode=nothing, strides=nothing, padding=nothing) local desc tf.with_op_name(name, "FusedResizeAndPadConv2D") do desc = tf.NodeDescription("FusedResizeAndPadConv2D") @@ -4513,7 +5318,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function fused_resize_and_pad_conv2d(input_::tf.TensorHandle, size_::tf.TensorHandle, paddings_::tf.TensorHandle, filter_::tf.TensorHandle; name=nothing, resize_align_corners=nothing, mode=nothing, strides=nothing, padding=nothing) + function fused_resize_and_pad_conv2d_eager(input_, size_, paddings_, filter_; name=nothing, resize_align_corners=nothing, mode=nothing, strides=nothing, padding=nothing) desc = tf.EagerOp("FusedResizeAndPadConv2D") tf.add_input(desc, input_) tf.add_input(desc, size_) @@ -4535,6 +5340,13 @@ begin desc["T"] = tf.data_type(filter_) (tf.execute(desc))[1] end + function fused_resize_and_pad_conv2d(input_, size_, paddings_, filter_; name=nothing, resize_align_corners=nothing, mode=nothing, strides=nothing, padding=nothing) + if tf.eager_mode + fused_resize_and_pad_conv2d_eager(input_, size_, paddings_, filter_; name=name, resize_align_corners=resize_align_corners, mode=mode, strides=strides, padding=padding) + else + fused_resize_and_pad_conv2d_graph(input_, size_, paddings_, filter_; name=name, resize_align_corners=resize_align_corners, mode=mode, strides=strides, padding=padding) + end + end end @@ -4544,7 +5356,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function batch(in_tensors_; name=nothing, num_batch_threads=nothing, max_batch_size=nothing, max_enqueued_batches=nothing, batch_timeout_micros=nothing, allowed_batch_sizes=nothing, grad_timeout_micros=nothing, container=nothing, shared_name=nothing, batching_queue=nothing, T=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function batch_graph(in_tensors_; name=nothing, num_batch_threads=nothing, max_batch_size=nothing, max_enqueued_batches=nothing, batch_timeout_micros=nothing, allowed_batch_sizes=nothing, grad_timeout_micros=nothing, container=nothing, shared_name=nothing, batching_queue=nothing, T=nothing) local desc tf.with_op_name(name, "Batch") do desc = tf.NodeDescription("Batch") @@ -4588,7 +5400,7 @@ begin end out end - function batch(in_tensors_::tf.TensorHandle; name=nothing, num_batch_threads=nothing, max_batch_size=nothing, max_enqueued_batches=nothing, batch_timeout_micros=nothing, allowed_batch_sizes=nothing, grad_timeout_micros=nothing, container=nothing, shared_name=nothing, batching_queue=nothing, T=nothing) + function batch_eager(in_tensors_; name=nothing, num_batch_threads=nothing, max_batch_size=nothing, max_enqueued_batches=nothing, batch_timeout_micros=nothing, allowed_batch_sizes=nothing, grad_timeout_micros=nothing, container=nothing, shared_name=nothing, batching_queue=nothing, T=nothing) desc = tf.EagerOp("Batch") tf.add_input(desc, in_tensors_) if num_batch_threads !== nothing @@ -4623,6 +5435,13 @@ begin end tf.execute(desc) end + function batch(in_tensors_; name=nothing, num_batch_threads=nothing, max_batch_size=nothing, max_enqueued_batches=nothing, batch_timeout_micros=nothing, allowed_batch_sizes=nothing, grad_timeout_micros=nothing, container=nothing, shared_name=nothing, batching_queue=nothing, T=nothing) + if tf.eager_mode + batch_eager(in_tensors_; name=name, num_batch_threads=num_batch_threads, max_batch_size=max_batch_size, max_enqueued_batches=max_enqueued_batches, batch_timeout_micros=batch_timeout_micros, allowed_batch_sizes=allowed_batch_sizes, grad_timeout_micros=grad_timeout_micros, container=container, shared_name=shared_name, batching_queue=batching_queue, T=T) + else + batch_graph(in_tensors_; name=name, num_batch_threads=num_batch_threads, max_batch_size=max_batch_size, max_enqueued_batches=max_enqueued_batches, batch_timeout_micros=batch_timeout_micros, allowed_batch_sizes=allowed_batch_sizes, grad_timeout_micros=grad_timeout_micros, container=container, shared_name=shared_name, batching_queue=batching_queue, T=T) + end + end end @@ -4632,7 +5451,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function collective_bcast_recv(; name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, shape=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function collective_bcast_recv_graph(; name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, shape=nothing) local desc tf.with_op_name(name, "CollectiveBcastRecv") do desc = tf.NodeDescription("CollectiveBcastRecv") @@ -4651,7 +5470,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function collective_bcast_recv(; name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, shape=nothing) + function collective_bcast_recv_eager(; name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, shape=nothing) desc = tf.EagerOp("CollectiveBcastRecv") if group_size !== nothing desc["group_size"] = Base.Int(group_size) @@ -4667,6 +5486,13 @@ begin end (tf.execute(desc))[1] end + function collective_bcast_recv(; name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, shape=nothing) + if tf.eager_mode + collective_bcast_recv_eager(; name=name, group_size=group_size, group_key=group_key, instance_key=instance_key, shape=shape) + else + collective_bcast_recv_graph(; name=name, group_size=group_size, group_key=group_key, instance_key=instance_key, shape=shape) + end + end end @@ -4676,7 +5502,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function batch_to_space_nd(input_, block_shape_, crops_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function batch_to_space_nd_graph(input_, block_shape_, crops_; name=nothing) local desc tf.with_op_name(name, "BatchToSpaceND") do desc = tf.NodeDescription("BatchToSpaceND") @@ -4692,7 +5518,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function batch_to_space_nd(input_::tf.TensorHandle, block_shape_::tf.TensorHandle, crops_::tf.TensorHandle; name=nothing) + function batch_to_space_nd_eager(input_, block_shape_, crops_; name=nothing) desc = tf.EagerOp("BatchToSpaceND") tf.add_input(desc, input_) tf.add_input(desc, block_shape_) @@ -4702,6 +5528,13 @@ begin desc["Tcrops"] = tf.data_type(crops_) (tf.execute(desc))[1] end + function batch_to_space_nd(input_, block_shape_, crops_; name=nothing) + if tf.eager_mode + batch_to_space_nd_eager(input_, block_shape_, crops_; name=name) + else + batch_to_space_nd_graph(input_, block_shape_, crops_; name=name) + end + end end @@ -4711,7 +5544,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function loop_cond(input_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function loop_cond_graph(input_; name=nothing) local desc tf.with_op_name(name, "LoopCond") do desc = tf.NodeDescription("LoopCond") @@ -4720,21 +5553,28 @@ begin end tf.Tensor(tf.Operation(desc)) end - function loop_cond(input_::tf.TensorHandle; name=nothing) + function loop_cond_eager(input_; name=nothing) desc = tf.EagerOp("LoopCond") tf.add_input(desc, input_) (tf.execute(desc))[1] end + function loop_cond(input_; name=nothing) + if tf.eager_mode + loop_cond_eager(input_; name=name) + else + loop_cond_graph(input_; name=name) + end + end end """ - depth_to_space(input; data_format=NHWC) + depth_to_space(input; data_format=) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function depth_to_space(input_; name=nothing, block_size=nothing, data_format=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function depth_to_space_graph(input_; name=nothing, block_size=nothing, data_format=nothing) local desc tf.with_op_name(name, "DepthToSpace") do desc = tf.NodeDescription("DepthToSpace") @@ -4750,7 +5590,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function depth_to_space(input_::tf.TensorHandle; name=nothing, block_size=nothing, data_format=nothing) + function depth_to_space_eager(input_; name=nothing, block_size=nothing, data_format=nothing) desc = tf.EagerOp("DepthToSpace") tf.add_input(desc, input_) if block_size !== nothing @@ -4762,6 +5602,13 @@ begin desc["T"] = tf.data_type(input_) (tf.execute(desc))[1] end + function depth_to_space(input_; name=nothing, block_size=nothing, data_format=nothing) + if tf.eager_mode + depth_to_space_eager(input_; name=name, block_size=block_size, data_format=data_format) + else + depth_to_space_graph(input_; name=name, block_size=block_size, data_format=data_format) + end + end end @@ -4771,7 +5618,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function destroy_temporary_variable(ref_; name=nothing, var_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function destroy_temporary_variable_graph(ref_; name=nothing, var_name=nothing) local desc tf.with_op_name(name, "DestroyTemporaryVariable") do desc = tf.NodeDescription("DestroyTemporaryVariable") @@ -4784,7 +5631,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function destroy_temporary_variable(ref_::tf.TensorHandle; name=nothing, var_name=nothing) + function destroy_temporary_variable_eager(ref_; name=nothing, var_name=nothing) desc = tf.EagerOp("DestroyTemporaryVariable") tf.add_input(desc, ref_) if var_name !== nothing @@ -4793,16 +5640,23 @@ begin desc["T"] = tf.data_type(ref_) (tf.execute(desc))[1] end + function destroy_temporary_variable(ref_; name=nothing, var_name=nothing) + if tf.eager_mode + destroy_temporary_variable_eager(ref_; name=name, var_name=var_name) + else + destroy_temporary_variable_graph(ref_; name=name, var_name=var_name) + end + end end """ - cudnn_rnn(input, input_h, input_c, params; rnn_mode=lstm, input_mode=linear_input, direction=unidirectional, dropout=?, seed=0, seed2=0, is_training=true) + cudnn_rnn(input, input_h, input_c, params; rnn_mode=, input_mode=, direction=, dropout=?, seed=0, seed2=0, is_training=true) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function cudnn_rnn(input_, input_h_, input_c_, params_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing, is_training=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function cudnn_rnn_graph(input_, input_h_, input_c_, params_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing, is_training=nothing) local desc tf.with_op_name(name, "CudnnRNN") do desc = tf.NodeDescription("CudnnRNN") @@ -4844,7 +5698,7 @@ begin end out end - function cudnn_rnn(input_::tf.TensorHandle, input_h_::tf.TensorHandle, input_c_::tf.TensorHandle, params_::tf.TensorHandle; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing, is_training=nothing) + function cudnn_rnn_eager(input_, input_h_, input_c_, params_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing, is_training=nothing) desc = tf.EagerOp("CudnnRNN") tf.add_input(desc, input_) tf.add_input(desc, input_h_) @@ -4877,6 +5731,13 @@ begin desc["T"] = tf.data_type(params_) tf.execute(desc) end + function cudnn_rnn(input_, input_h_, input_c_, params_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing, is_training=nothing) + if tf.eager_mode + cudnn_rnn_eager(input_, input_h_, input_c_, params_; name=name, rnn_mode=rnn_mode, input_mode=input_mode, direction=direction, dropout=dropout, seed=seed, seed2=seed2, is_training=is_training) + else + cudnn_rnn_graph(input_, input_h_, input_c_, params_; name=name, rnn_mode=rnn_mode, input_mode=input_mode, direction=direction, dropout=dropout, seed=seed, seed2=seed2, is_training=is_training) + end + end end @@ -4886,7 +5747,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function ref_identity(input_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function ref_identity_graph(input_; name=nothing) local desc tf.with_op_name(name, "RefIdentity") do desc = tf.NodeDescription("RefIdentity") @@ -4896,22 +5757,29 @@ begin end tf.Tensor(tf.Operation(desc)) end - function ref_identity(input_::tf.TensorHandle; name=nothing) + function ref_identity_eager(input_; name=nothing) desc = tf.EagerOp("RefIdentity") tf.add_input(desc, input_) desc["T"] = tf.data_type(input_) (tf.execute(desc))[1] end + function ref_identity(input_; name=nothing) + if tf.eager_mode + ref_identity_eager(input_; name=name) + else + ref_identity_graph(input_; name=name) + end + end end """ - max_pool3d_grad(orig_input, orig_output, grad; data_format=NDHWC) + max_pool3d_grad(orig_input, orig_output, grad; data_format=) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function max_pool3d_grad(orig_input_, orig_output_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function max_pool3d_grad_graph(orig_input_, orig_output_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) local desc tf.with_op_name(name, "MaxPool3DGrad") do desc = tf.NodeDescription("MaxPool3DGrad") @@ -4938,7 +5806,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function max_pool3d_grad(orig_input_::tf.TensorHandle, orig_output_::tf.TensorHandle, grad_::tf.TensorHandle; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + function max_pool3d_grad_eager(orig_input_, orig_output_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) desc = tf.EagerOp("MaxPool3DGrad") tf.add_input(desc, orig_input_) tf.add_input(desc, orig_output_) @@ -4960,6 +5828,13 @@ begin desc["T"] = tf.data_type(grad_) (tf.execute(desc))[1] end + function max_pool3d_grad(orig_input_, orig_output_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + if tf.eager_mode + max_pool3d_grad_eager(orig_input_, orig_output_, grad_; name=name, ksize=ksize, strides=strides, padding=padding, data_format=data_format) + else + max_pool3d_grad_graph(orig_input_, orig_output_, grad_; name=name, ksize=ksize, strides=strides, padding=padding, data_format=data_format) + end + end end @@ -4969,7 +5844,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function load_tpu_embedding_momentum_parameters_grad_accum_debug(parameters_, momenta_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function load_tpu_embedding_momentum_parameters_grad_accum_debug_graph(parameters_, momenta_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "LoadTPUEmbeddingMomentumParametersGradAccumDebug") do desc = tf.NodeDescription("LoadTPUEmbeddingMomentumParametersGradAccumDebug") @@ -4994,7 +5869,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function load_tpu_embedding_momentum_parameters_grad_accum_debug(parameters_::tf.TensorHandle, momenta_::tf.TensorHandle, gradient_accumulators_::tf.TensorHandle; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + function load_tpu_embedding_momentum_parameters_grad_accum_debug_eager(parameters_, momenta_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) desc = tf.EagerOp("LoadTPUEmbeddingMomentumParametersGradAccumDebug") tf.add_input(desc, parameters_) tf.add_input(desc, momenta_) @@ -5013,6 +5888,13 @@ begin end (tf.execute(desc))[1] end + function load_tpu_embedding_momentum_parameters_grad_accum_debug(parameters_, momenta_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + if tf.eager_mode + load_tpu_embedding_momentum_parameters_grad_accum_debug_eager(parameters_, momenta_, gradient_accumulators_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + else + load_tpu_embedding_momentum_parameters_grad_accum_debug_graph(parameters_, momenta_, gradient_accumulators_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + end + end end @@ -5022,7 +5904,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function padding_fifo_queue_v2(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function padding_fifo_queue_v2_graph(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "PaddingFIFOQueueV2") do desc = tf.NodeDescription("PaddingFIFOQueueV2") @@ -5044,7 +5926,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function padding_fifo_queue_v2(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) + function padding_fifo_queue_v2_eager(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) desc = tf.EagerOp("PaddingFIFOQueueV2") if component_types !== nothing desc["component_types"] = map(Base.identity, component_types) @@ -5063,6 +5945,13 @@ begin end (tf.execute(desc))[1] end + function padding_fifo_queue_v2(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) + if tf.eager_mode + padding_fifo_queue_v2_eager(; name=name, component_types=component_types, shapes=shapes, capacity=capacity, container=container, shared_name=shared_name) + else + padding_fifo_queue_v2_graph(; name=name, component_types=component_types, shapes=shapes, capacity=capacity, container=container, shared_name=shared_name) + end + end end @@ -5072,7 +5961,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function conv3d_backprop_input(input_, filter_, out_backprop_; name=nothing, strides=nothing, padding=nothing, dilations=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function conv3d_backprop_input_graph(input_, filter_, out_backprop_; name=nothing, strides=nothing, padding=nothing, dilations=nothing) local desc tf.with_op_name(name, "Conv3DBackpropInput") do desc = tf.NodeDescription("Conv3DBackpropInput") @@ -5095,7 +5984,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function conv3d_backprop_input(input_::tf.TensorHandle, filter_::tf.TensorHandle, out_backprop_::tf.TensorHandle; name=nothing, strides=nothing, padding=nothing, dilations=nothing) + function conv3d_backprop_input_eager(input_, filter_, out_backprop_; name=nothing, strides=nothing, padding=nothing, dilations=nothing) desc = tf.EagerOp("Conv3DBackpropInput") tf.add_input(desc, input_) tf.add_input(desc, filter_) @@ -5114,6 +6003,13 @@ begin desc["T"] = tf.data_type(out_backprop_) (tf.execute(desc))[1] end + function conv3d_backprop_input(input_, filter_, out_backprop_; name=nothing, strides=nothing, padding=nothing, dilations=nothing) + if tf.eager_mode + conv3d_backprop_input_eager(input_, filter_, out_backprop_; name=name, strides=strides, padding=padding, dilations=dilations) + else + conv3d_backprop_input_graph(input_, filter_, out_backprop_; name=name, strides=strides, padding=padding, dilations=dilations) + end + end end @@ -5123,7 +6019,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function ref_exit(data_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function ref_exit_graph(data_; name=nothing) local desc tf.with_op_name(name, "RefExit") do desc = tf.NodeDescription("RefExit") @@ -5133,12 +6029,19 @@ begin end tf.Tensor(tf.Operation(desc)) end - function ref_exit(data_::tf.TensorHandle; name=nothing) + function ref_exit_eager(data_; name=nothing) desc = tf.EagerOp("RefExit") tf.add_input(desc, data_) desc["T"] = tf.data_type(data_) (tf.execute(desc))[1] end + function ref_exit(data_; name=nothing) + if tf.eager_mode + ref_exit_eager(data_; name=name) + else + ref_exit_graph(data_; name=name) + end + end end @@ -5148,7 +6051,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function map_clear(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function map_clear_graph(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "MapClear") do desc = tf.NodeDescription("MapClear") @@ -5170,7 +6073,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function map_clear(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + function map_clear_eager(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) desc = tf.EagerOp("MapClear") if capacity !== nothing desc["capacity"] = Base.Int(capacity) @@ -5189,6 +6092,13 @@ begin end (tf.execute(desc))[1] end + function map_clear(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + if tf.eager_mode + map_clear_eager(; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) + else + map_clear_graph(; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) + end + end end @@ -5198,7 +6108,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function encode_wav(audio_, sample_rate_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function encode_wav_graph(audio_, sample_rate_; name=nothing) local desc tf.with_op_name(name, "EncodeWav") do desc = tf.NodeDescription("EncodeWav") @@ -5209,12 +6119,19 @@ begin end tf.Tensor(tf.Operation(desc)) end - function encode_wav(audio_::tf.TensorHandle, sample_rate_::tf.TensorHandle; name=nothing) + function encode_wav_eager(audio_, sample_rate_; name=nothing) desc = tf.EagerOp("EncodeWav") tf.add_input(desc, audio_) tf.add_input(desc, sample_rate_) (tf.execute(desc))[1] end + function encode_wav(audio_, sample_rate_; name=nothing) + if tf.eager_mode + encode_wav_eager(audio_, sample_rate_; name=name) + else + encode_wav_graph(audio_, sample_rate_; name=name) + end + end end @@ -5224,7 +6141,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tensor_summary_v2(tag_, tensor_, serialized_summary_metadata_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tensor_summary_v2_graph(tag_, tensor_, serialized_summary_metadata_; name=nothing) local desc tf.with_op_name(name, "TensorSummaryV2") do desc = tf.NodeDescription("TensorSummaryV2") @@ -5238,7 +6155,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function tensor_summary_v2(tag_::tf.TensorHandle, tensor_::tf.TensorHandle, serialized_summary_metadata_::tf.TensorHandle; name=nothing) + function tensor_summary_v2_eager(tag_, tensor_, serialized_summary_metadata_; name=nothing) desc = tf.EagerOp("TensorSummaryV2") tf.add_input(desc, tag_) tf.add_input(desc, tensor_) @@ -5246,6 +6163,13 @@ begin desc["T"] = tf.data_type(tensor_) (tf.execute(desc))[1] end + function tensor_summary_v2(tag_, tensor_, serialized_summary_metadata_; name=nothing) + if tf.eager_mode + tensor_summary_v2_eager(tag_, tensor_, serialized_summary_metadata_; name=name) + else + tensor_summary_v2_graph(tag_, tensor_, serialized_summary_metadata_; name=name) + end + end end @@ -5255,7 +6179,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function queue_dequeue_up_to(handle_, n_; name=nothing, component_types=nothing, timeout_ms=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function queue_dequeue_up_to_graph(handle_, n_; name=nothing, component_types=nothing, timeout_ms=nothing) local desc tf.with_op_name(name, "QueueDequeueUpTo") do desc = tf.NodeDescription("QueueDequeueUpTo") @@ -5272,7 +6196,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function queue_dequeue_up_to(handle_::tf.TensorHandle, n_::tf.TensorHandle; name=nothing, component_types=nothing, timeout_ms=nothing) + function queue_dequeue_up_to_eager(handle_, n_; name=nothing, component_types=nothing, timeout_ms=nothing) desc = tf.EagerOp("QueueDequeueUpTo") tf.add_input(desc, handle_) tf.add_input(desc, n_) @@ -5284,6 +6208,13 @@ begin end (tf.execute(desc))[1] end + function queue_dequeue_up_to(handle_, n_; name=nothing, component_types=nothing, timeout_ms=nothing) + if tf.eager_mode + queue_dequeue_up_to_eager(handle_, n_; name=name, component_types=component_types, timeout_ms=timeout_ms) + else + queue_dequeue_up_to_graph(handle_, n_; name=name, component_types=component_types, timeout_ms=timeout_ms) + end + end end @@ -5293,7 +6224,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function matrix_band_part(input_, num_lower_, num_upper_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function matrix_band_part_graph(input_, num_lower_, num_upper_; name=nothing) local desc tf.with_op_name(name, "MatrixBandPart") do desc = tf.NodeDescription("MatrixBandPart") @@ -5308,7 +6239,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function matrix_band_part(input_::tf.TensorHandle, num_lower_::tf.TensorHandle, num_upper_::tf.TensorHandle; name=nothing) + function matrix_band_part_eager(input_, num_lower_, num_upper_; name=nothing) desc = tf.EagerOp("MatrixBandPart") tf.add_input(desc, input_) tf.add_input(desc, num_lower_) @@ -5318,6 +6249,13 @@ begin desc["Tindex"] = tf.data_type(num_upper_) (tf.execute(desc))[1] end + function matrix_band_part(input_, num_lower_, num_upper_; name=nothing) + if tf.eager_mode + matrix_band_part_eager(input_, num_lower_, num_upper_; name=name) + else + matrix_band_part_graph(input_, num_lower_, num_upper_; name=name) + end + end end @@ -5327,7 +6265,7 @@ end Copy Op. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function copy(input_; name=nothing, tensor_name=nothing, debug_ops_spec=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function copy_graph(input_; name=nothing, tensor_name=nothing, debug_ops_spec=nothing) local desc tf.with_op_name(name, "Copy") do desc = tf.NodeDescription("Copy") @@ -5343,7 +6281,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function copy(input_::tf.TensorHandle; name=nothing, tensor_name=nothing, debug_ops_spec=nothing) + function copy_eager(input_; name=nothing, tensor_name=nothing, debug_ops_spec=nothing) desc = tf.EagerOp("Copy") tf.add_input(desc, input_) if tensor_name !== nothing @@ -5355,6 +6293,13 @@ begin desc["T"] = tf.data_type(input_) (tf.execute(desc))[1] end + function copy(input_; name=nothing, tensor_name=nothing, debug_ops_spec=nothing) + if tf.eager_mode + copy_eager(input_; name=name, tensor_name=tensor_name, debug_ops_spec=debug_ops_spec) + else + copy_graph(input_; name=name, tensor_name=tensor_name, debug_ops_spec=debug_ops_spec) + end + end end @@ -5364,7 +6309,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function shape_n(input_; name=nothing, N=nothing, out_type=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function shape_n_graph(input_; name=nothing, N=nothing, out_type=nothing) local desc tf.with_op_name(name, "ShapeN") do desc = tf.NodeDescription("ShapeN") @@ -5385,7 +6330,7 @@ begin end out end - function shape_n(input_::tf.TensorHandle; name=nothing, N=nothing, out_type=nothing) + function shape_n_eager(input_; name=nothing, N=nothing, out_type=nothing) desc = tf.EagerOp("ShapeN") tf.add_input(desc, input_) if N !== nothing @@ -5397,6 +6342,13 @@ begin desc["T"] = tf.data_type(input_) tf.execute(desc) end + function shape_n(input_; name=nothing, N=nothing, out_type=nothing) + if tf.eager_mode + shape_n_eager(input_; name=name, N=N, out_type=out_type) + else + shape_n_graph(input_; name=name, N=N, out_type=out_type) + end + end end @@ -5406,7 +6358,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function experimental_parse_example_dataset(input_dataset_, num_parallel_calls_, dense_defaults_; name=nothing, sparse_keys=nothing, dense_keys=nothing, sparse_types=nothing, Tdense=nothing, dense_shapes=nothing, output_types=nothing, output_shapes=nothing, sloppy=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function experimental_parse_example_dataset_graph(input_dataset_, num_parallel_calls_, dense_defaults_; name=nothing, sparse_keys=nothing, dense_keys=nothing, sparse_types=nothing, Tdense=nothing, dense_shapes=nothing, output_types=nothing, output_shapes=nothing, sloppy=nothing) local desc tf.with_op_name(name, "ExperimentalParseExampleDataset") do desc = tf.NodeDescription("ExperimentalParseExampleDataset") @@ -5443,7 +6395,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function experimental_parse_example_dataset(input_dataset_::tf.TensorHandle, num_parallel_calls_::tf.TensorHandle, dense_defaults_::tf.TensorHandle; name=nothing, sparse_keys=nothing, dense_keys=nothing, sparse_types=nothing, Tdense=nothing, dense_shapes=nothing, output_types=nothing, output_shapes=nothing, sloppy=nothing) + function experimental_parse_example_dataset_eager(input_dataset_, num_parallel_calls_, dense_defaults_; name=nothing, sparse_keys=nothing, dense_keys=nothing, sparse_types=nothing, Tdense=nothing, dense_shapes=nothing, output_types=nothing, output_shapes=nothing, sloppy=nothing) desc = tf.EagerOp("ExperimentalParseExampleDataset") tf.add_input(desc, input_dataset_) tf.add_input(desc, num_parallel_calls_) @@ -5474,6 +6426,13 @@ begin end (tf.execute(desc))[1] end + function experimental_parse_example_dataset(input_dataset_, num_parallel_calls_, dense_defaults_; name=nothing, sparse_keys=nothing, dense_keys=nothing, sparse_types=nothing, Tdense=nothing, dense_shapes=nothing, output_types=nothing, output_shapes=nothing, sloppy=nothing) + if tf.eager_mode + experimental_parse_example_dataset_eager(input_dataset_, num_parallel_calls_, dense_defaults_; name=name, sparse_keys=sparse_keys, dense_keys=dense_keys, sparse_types=sparse_types, Tdense=Tdense, dense_shapes=dense_shapes, output_types=output_types, output_shapes=output_shapes, sloppy=sloppy) + else + experimental_parse_example_dataset_graph(input_dataset_, num_parallel_calls_, dense_defaults_; name=name, sparse_keys=sparse_keys, dense_keys=dense_keys, sparse_types=sparse_types, Tdense=Tdense, dense_shapes=dense_shapes, output_types=output_types, output_shapes=output_shapes, sloppy=sloppy) + end + end end @@ -5483,7 +6442,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function concat(concat_dim_, values_; name=nothing, N=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function concat_graph(concat_dim_, values_; name=nothing, N=nothing) local desc tf.with_op_name(name, "Concat") do desc = tf.NodeDescription("Concat") @@ -5498,7 +6457,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function concat(concat_dim_::tf.TensorHandle, values_::tf.TensorHandle; name=nothing, N=nothing) + function concat_eager(concat_dim_, values_; name=nothing, N=nothing) desc = tf.EagerOp("Concat") tf.add_input(desc, concat_dim_) tf.add_input(desc, values_) @@ -5508,16 +6467,23 @@ begin desc["T"] = tf.data_type(values_) (tf.execute(desc))[1] end + function concat(concat_dim_, values_; name=nothing, N=nothing) + if tf.eager_mode + concat_eager(concat_dim_, values_; name=name, N=N) + else + concat_graph(concat_dim_, values_; name=name, N=N) + end + end end """ - data_format_dim_map(x; src_format=NHWC, dst_format=NCHW) + data_format_dim_map(x; src_format=, dst_format=) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function data_format_dim_map(x_; name=nothing, src_format=nothing, dst_format=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function data_format_dim_map_graph(x_; name=nothing, src_format=nothing, dst_format=nothing) local desc tf.with_op_name(name, "DataFormatDimMap") do desc = tf.NodeDescription("DataFormatDimMap") @@ -5533,7 +6499,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function data_format_dim_map(x_::tf.TensorHandle; name=nothing, src_format=nothing, dst_format=nothing) + function data_format_dim_map_eager(x_; name=nothing, src_format=nothing, dst_format=nothing) desc = tf.EagerOp("DataFormatDimMap") tf.add_input(desc, x_) if src_format !== nothing @@ -5545,6 +6511,13 @@ begin desc["T"] = tf.data_type(x_) (tf.execute(desc))[1] end + function data_format_dim_map(x_; name=nothing, src_format=nothing, dst_format=nothing) + if tf.eager_mode + data_format_dim_map_eager(x_; name=name, src_format=src_format, dst_format=dst_format) + else + data_format_dim_map_graph(x_; name=name, src_format=src_format, dst_format=dst_format) + end + end end @@ -5554,7 +6527,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function identity_reader(; name=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function identity_reader_graph(; name=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "IdentityReader") do desc = tf.NodeDescription("IdentityReader") @@ -5567,7 +6540,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function identity_reader(; name=nothing, container=nothing, shared_name=nothing) + function identity_reader_eager(; name=nothing, container=nothing, shared_name=nothing) desc = tf.EagerOp("IdentityReader") if container !== nothing desc["container"] = Base.String(container) @@ -5577,6 +6550,13 @@ begin end (tf.execute(desc))[1] end + function identity_reader(; name=nothing, container=nothing, shared_name=nothing) + if tf.eager_mode + identity_reader_eager(; name=name, container=container, shared_name=shared_name) + else + identity_reader_graph(; name=name, container=container, shared_name=shared_name) + end + end end @@ -5586,7 +6566,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function softplus(features_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function softplus_graph(features_; name=nothing) local desc tf.with_op_name(name, "Softplus") do desc = tf.NodeDescription("Softplus") @@ -5596,12 +6576,19 @@ begin end tf.Tensor(tf.Operation(desc)) end - function softplus(features_::tf.TensorHandle; name=nothing) + function softplus_eager(features_; name=nothing) desc = tf.EagerOp("Softplus") tf.add_input(desc, features_) desc["T"] = tf.data_type(features_) (tf.execute(desc))[1] end + function softplus(features_; name=nothing) + if tf.eager_mode + softplus_eager(features_; name=name) + else + softplus_graph(features_; name=name) + end + end end @@ -5611,7 +6598,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function resource_sparse_apply_proximal_adagrad(var_, accum_, lr_, l1_, l2_, grad_, indices_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function resource_sparse_apply_proximal_adagrad_graph(var_, accum_, lr_, l1_, l2_, grad_, indices_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ResourceSparseApplyProximalAdagrad") do desc = tf.NodeDescription("ResourceSparseApplyProximalAdagrad") @@ -5638,7 +6625,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function resource_sparse_apply_proximal_adagrad(var_::tf.TensorHandle, accum_::tf.TensorHandle, lr_::tf.TensorHandle, l1_::tf.TensorHandle, l2_::tf.TensorHandle, grad_::tf.TensorHandle, indices_::tf.TensorHandle; name=nothing, use_locking=nothing) + function resource_sparse_apply_proximal_adagrad_eager(var_, accum_, lr_, l1_, l2_, grad_, indices_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ResourceSparseApplyProximalAdagrad") tf.add_input(desc, var_) tf.add_input(desc, accum_) @@ -5657,6 +6644,13 @@ begin desc["Tindices"] = tf.data_type(indices_) (tf.execute(desc))[1] end + function resource_sparse_apply_proximal_adagrad(var_, accum_, lr_, l1_, l2_, grad_, indices_; name=nothing, use_locking=nothing) + if tf.eager_mode + resource_sparse_apply_proximal_adagrad_eager(var_, accum_, lr_, l1_, l2_, grad_, indices_; name=name, use_locking=use_locking) + else + resource_sparse_apply_proximal_adagrad_graph(var_, accum_, lr_, l1_, l2_, grad_, indices_; name=name, use_locking=use_locking) + end + end end @@ -5666,7 +6660,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function parse_single_sequence_example(serialized_, feature_list_dense_missing_assumed_empty_, context_sparse_keys_, context_dense_keys_, feature_list_sparse_keys_, feature_list_dense_keys_, context_dense_defaults_, debug_name_; name=nothing, Ncontext_sparse=nothing, Ncontext_dense=nothing, Nfeature_list_sparse=nothing, Nfeature_list_dense=nothing, context_sparse_types=nothing, Tcontext_dense=nothing, feature_list_dense_types=nothing, context_dense_shapes=nothing, feature_list_sparse_types=nothing, feature_list_dense_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function parse_single_sequence_example_graph(serialized_, feature_list_dense_missing_assumed_empty_, context_sparse_keys_, context_dense_keys_, feature_list_sparse_keys_, feature_list_dense_keys_, context_dense_defaults_, debug_name_; name=nothing, Ncontext_sparse=nothing, Ncontext_dense=nothing, Nfeature_list_sparse=nothing, Nfeature_list_dense=nothing, context_sparse_types=nothing, Tcontext_dense=nothing, feature_list_dense_types=nothing, context_dense_shapes=nothing, feature_list_sparse_types=nothing, feature_list_dense_shapes=nothing) local desc tf.with_op_name(name, "ParseSingleSequenceExample") do desc = tf.NodeDescription("ParseSingleSequenceExample") @@ -5724,7 +6718,7 @@ begin end out end - function parse_single_sequence_example(serialized_::tf.TensorHandle, feature_list_dense_missing_assumed_empty_::tf.TensorHandle, context_sparse_keys_::tf.TensorHandle, context_dense_keys_::tf.TensorHandle, feature_list_sparse_keys_::tf.TensorHandle, feature_list_dense_keys_::tf.TensorHandle, context_dense_defaults_::tf.TensorHandle, debug_name_::tf.TensorHandle; name=nothing, Ncontext_sparse=nothing, Ncontext_dense=nothing, Nfeature_list_sparse=nothing, Nfeature_list_dense=nothing, context_sparse_types=nothing, Tcontext_dense=nothing, feature_list_dense_types=nothing, context_dense_shapes=nothing, feature_list_sparse_types=nothing, feature_list_dense_shapes=nothing) + function parse_single_sequence_example_eager(serialized_, feature_list_dense_missing_assumed_empty_, context_sparse_keys_, context_dense_keys_, feature_list_sparse_keys_, feature_list_dense_keys_, context_dense_defaults_, debug_name_; name=nothing, Ncontext_sparse=nothing, Ncontext_dense=nothing, Nfeature_list_sparse=nothing, Nfeature_list_dense=nothing, context_sparse_types=nothing, Tcontext_dense=nothing, feature_list_dense_types=nothing, context_dense_shapes=nothing, feature_list_sparse_types=nothing, feature_list_dense_shapes=nothing) desc = tf.EagerOp("ParseSingleSequenceExample") tf.add_input(desc, serialized_) tf.add_input(desc, feature_list_dense_missing_assumed_empty_) @@ -5766,6 +6760,13 @@ begin end tf.execute(desc) end + function parse_single_sequence_example(serialized_, feature_list_dense_missing_assumed_empty_, context_sparse_keys_, context_dense_keys_, feature_list_sparse_keys_, feature_list_dense_keys_, context_dense_defaults_, debug_name_; name=nothing, Ncontext_sparse=nothing, Ncontext_dense=nothing, Nfeature_list_sparse=nothing, Nfeature_list_dense=nothing, context_sparse_types=nothing, Tcontext_dense=nothing, feature_list_dense_types=nothing, context_dense_shapes=nothing, feature_list_sparse_types=nothing, feature_list_dense_shapes=nothing) + if tf.eager_mode + parse_single_sequence_example_eager(serialized_, feature_list_dense_missing_assumed_empty_, context_sparse_keys_, context_dense_keys_, feature_list_sparse_keys_, feature_list_dense_keys_, context_dense_defaults_, debug_name_; name=name, Ncontext_sparse=Ncontext_sparse, Ncontext_dense=Ncontext_dense, Nfeature_list_sparse=Nfeature_list_sparse, Nfeature_list_dense=Nfeature_list_dense, context_sparse_types=context_sparse_types, Tcontext_dense=Tcontext_dense, feature_list_dense_types=feature_list_dense_types, context_dense_shapes=context_dense_shapes, feature_list_sparse_types=feature_list_sparse_types, feature_list_dense_shapes=feature_list_dense_shapes) + else + parse_single_sequence_example_graph(serialized_, feature_list_dense_missing_assumed_empty_, context_sparse_keys_, context_dense_keys_, feature_list_sparse_keys_, feature_list_dense_keys_, context_dense_defaults_, debug_name_; name=name, Ncontext_sparse=Ncontext_sparse, Ncontext_dense=Ncontext_dense, Nfeature_list_sparse=Nfeature_list_sparse, Nfeature_list_dense=Nfeature_list_dense, context_sparse_types=context_sparse_types, Tcontext_dense=Tcontext_dense, feature_list_dense_types=feature_list_dense_types, context_dense_shapes=context_dense_shapes, feature_list_sparse_types=feature_list_sparse_types, feature_list_dense_shapes=feature_list_dense_shapes) + end + end end @@ -5775,7 +6776,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function matrix_diag(diagonal_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function matrix_diag_graph(diagonal_; name=nothing) local desc tf.with_op_name(name, "MatrixDiag") do desc = tf.NodeDescription("MatrixDiag") @@ -5785,12 +6786,19 @@ begin end tf.Tensor(tf.Operation(desc)) end - function matrix_diag(diagonal_::tf.TensorHandle; name=nothing) + function matrix_diag_eager(diagonal_; name=nothing) desc = tf.EagerOp("MatrixDiag") tf.add_input(desc, diagonal_) desc["T"] = tf.data_type(diagonal_) (tf.execute(desc))[1] end + function matrix_diag(diagonal_; name=nothing) + if tf.eager_mode + matrix_diag_eager(diagonal_; name=name) + else + matrix_diag_graph(diagonal_; name=name) + end + end end @@ -5800,7 +6808,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function fact(; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function fact_graph(; name=nothing) local desc tf.with_op_name(name, "Fact") do desc @@ -5808,10 +6816,17 @@ begin end tf.Tensor(tf.Operation(desc)) end - function fact(; name=nothing) + function fact_eager(; name=nothing) desc = tf.EagerOp("Fact") (tf.execute(desc))[1] end + function fact(; name=nothing) + if tf.eager_mode + fact_eager(; name=name) + else + fact_graph(; name=name) + end + end end @@ -5821,7 +6836,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function shard_dataset(input_dataset_, num_shards_, index_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function shard_dataset_graph(input_dataset_, num_shards_, index_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "ShardDataset") do desc = tf.NodeDescription("ShardDataset") @@ -5840,7 +6855,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function shard_dataset(input_dataset_::tf.TensorHandle, num_shards_::tf.TensorHandle, index_::tf.TensorHandle; name=nothing, output_types=nothing, output_shapes=nothing) + function shard_dataset_eager(input_dataset_, num_shards_, index_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("ShardDataset") tf.add_input(desc, input_dataset_) tf.add_input(desc, num_shards_) @@ -5853,16 +6868,23 @@ begin end (tf.execute(desc))[1] end + function shard_dataset(input_dataset_, num_shards_, index_; name=nothing, output_types=nothing, output_shapes=nothing) + if tf.eager_mode + shard_dataset_eager(input_dataset_, num_shards_, index_; name=name, output_types=output_types, output_shapes=output_shapes) + else + shard_dataset_graph(input_dataset_, num_shards_, index_; name=name, output_types=output_types, output_shapes=output_shapes) + end + end end """ - max_pool_grad_grad(orig_input, orig_output, grad; data_format=NHWC) + max_pool_grad_grad(orig_input, orig_output, grad; data_format=) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function max_pool_grad_grad(orig_input_, orig_output_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function max_pool_grad_grad_graph(orig_input_, orig_output_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) local desc tf.with_op_name(name, "MaxPoolGradGrad") do desc = tf.NodeDescription("MaxPoolGradGrad") @@ -5888,7 +6910,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function max_pool_grad_grad(orig_input_::tf.TensorHandle, orig_output_::tf.TensorHandle, grad_::tf.TensorHandle; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + function max_pool_grad_grad_eager(orig_input_, orig_output_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) desc = tf.EagerOp("MaxPoolGradGrad") tf.add_input(desc, orig_input_) tf.add_input(desc, orig_output_) @@ -5910,6 +6932,13 @@ begin desc["T"] = tf.data_type(grad_) (tf.execute(desc))[1] end + function max_pool_grad_grad(orig_input_, orig_output_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + if tf.eager_mode + max_pool_grad_grad_eager(orig_input_, orig_output_, grad_; name=name, ksize=ksize, strides=strides, padding=padding, data_format=data_format) + else + max_pool_grad_grad_graph(orig_input_, orig_output_, grad_; name=name, ksize=ksize, strides=strides, padding=padding, data_format=data_format) + end + end end @@ -5919,7 +6948,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function resize_bilinear_grad(grads_, original_image_; name=nothing, align_corners=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function resize_bilinear_grad_graph(grads_, original_image_; name=nothing, align_corners=nothing) local desc tf.with_op_name(name, "ResizeBilinearGrad") do desc = tf.NodeDescription("ResizeBilinearGrad") @@ -5934,7 +6963,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function resize_bilinear_grad(grads_::tf.TensorHandle, original_image_::tf.TensorHandle; name=nothing, align_corners=nothing) + function resize_bilinear_grad_eager(grads_, original_image_; name=nothing, align_corners=nothing) desc = tf.EagerOp("ResizeBilinearGrad") tf.add_input(desc, grads_) tf.add_input(desc, original_image_) @@ -5944,6 +6973,13 @@ begin desc["T"] = tf.data_type(original_image_) (tf.execute(desc))[1] end + function resize_bilinear_grad(grads_, original_image_; name=nothing, align_corners=nothing) + if tf.eager_mode + resize_bilinear_grad_eager(grads_, original_image_; name=name, align_corners=align_corners) + else + resize_bilinear_grad_graph(grads_, original_image_; name=name, align_corners=align_corners) + end + end end @@ -5953,7 +6989,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function batch_to_space(input_, crops_; name=nothing, block_size=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function batch_to_space_graph(input_, crops_; name=nothing, block_size=nothing) local desc tf.with_op_name(name, "BatchToSpace") do desc = tf.NodeDescription("BatchToSpace") @@ -5970,7 +7006,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function batch_to_space(input_::tf.TensorHandle, crops_::tf.TensorHandle; name=nothing, block_size=nothing) + function batch_to_space_eager(input_, crops_; name=nothing, block_size=nothing) desc = tf.EagerOp("BatchToSpace") tf.add_input(desc, input_) tf.add_input(desc, crops_) @@ -5981,6 +7017,13 @@ begin desc["Tidx"] = tf.data_type(crops_) (tf.execute(desc))[1] end + function batch_to_space(input_, crops_; name=nothing, block_size=nothing) + if tf.eager_mode + batch_to_space_eager(input_, crops_; name=name, block_size=block_size) + else + batch_to_space_graph(input_, crops_; name=name, block_size=block_size) + end + end end @@ -5990,7 +7033,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function optional_from_value(components_; name=nothing, Toutput_types=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function optional_from_value_graph(components_; name=nothing, Toutput_types=nothing) local desc tf.with_op_name(name, "OptionalFromValue") do desc = tf.NodeDescription("OptionalFromValue") @@ -6002,7 +7045,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function optional_from_value(components_::tf.TensorHandle; name=nothing, Toutput_types=nothing) + function optional_from_value_eager(components_; name=nothing, Toutput_types=nothing) desc = tf.EagerOp("OptionalFromValue") tf.add_input(desc, components_) if Toutput_types !== nothing @@ -6010,6 +7053,13 @@ begin end (tf.execute(desc))[1] end + function optional_from_value(components_; name=nothing, Toutput_types=nothing) + if tf.eager_mode + optional_from_value_eager(components_; name=name, Toutput_types=Toutput_types) + else + optional_from_value_graph(components_; name=name, Toutput_types=Toutput_types) + end + end end @@ -6019,7 +7069,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function xlogy(x_, y_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function xlogy_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "Xlogy") do desc = tf.NodeDescription("Xlogy") @@ -6031,7 +7081,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function xlogy(x_::tf.TensorHandle, y_::tf.TensorHandle; name=nothing) + function xlogy_eager(x_, y_; name=nothing) desc = tf.EagerOp("Xlogy") tf.add_input(desc, x_) tf.add_input(desc, y_) @@ -6039,6 +7089,13 @@ begin desc["T"] = tf.data_type(y_) (tf.execute(desc))[1] end + function xlogy(x_, y_; name=nothing) + if tf.eager_mode + xlogy_eager(x_, y_; name=name) + else + xlogy_graph(x_, y_; name=name) + end + end end @@ -6048,7 +7105,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function cross(a_, b_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function cross_graph(a_, b_; name=nothing) local desc tf.with_op_name(name, "Cross") do desc = tf.NodeDescription("Cross") @@ -6060,7 +7117,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function cross(a_::tf.TensorHandle, b_::tf.TensorHandle; name=nothing) + function cross_eager(a_, b_; name=nothing) desc = tf.EagerOp("Cross") tf.add_input(desc, a_) tf.add_input(desc, b_) @@ -6068,6 +7125,13 @@ begin desc["T"] = tf.data_type(b_) (tf.execute(desc))[1] end + function cross(a_, b_; name=nothing) + if tf.eager_mode + cross_eager(a_, b_; name=name) + else + cross_graph(a_, b_; name=name) + end + end end @@ -6077,7 +7141,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function bitwise_and(x_, y_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function bitwise_and_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "BitwiseAnd") do desc = tf.NodeDescription("BitwiseAnd") @@ -6089,7 +7153,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function bitwise_and(x_::tf.TensorHandle, y_::tf.TensorHandle; name=nothing) + function bitwise_and_eager(x_, y_; name=nothing) desc = tf.EagerOp("BitwiseAnd") tf.add_input(desc, x_) tf.add_input(desc, y_) @@ -6097,6 +7161,13 @@ begin desc["T"] = tf.data_type(y_) (tf.execute(desc))[1] end + function bitwise_and(x_, y_; name=nothing) + if tf.eager_mode + bitwise_and_eager(x_, y_; name=name) + else + bitwise_and_graph(x_, y_; name=name) + end + end end @@ -6106,7 +7177,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function broadcast_to(input_, shape_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function broadcast_to_graph(input_, shape_; name=nothing) local desc tf.with_op_name(name, "BroadcastTo") do desc = tf.NodeDescription("BroadcastTo") @@ -6119,7 +7190,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function broadcast_to(input_::tf.TensorHandle, shape_::tf.TensorHandle; name=nothing) + function broadcast_to_eager(input_, shape_; name=nothing) desc = tf.EagerOp("BroadcastTo") tf.add_input(desc, input_) tf.add_input(desc, shape_) @@ -6127,6 +7198,13 @@ begin desc["Tidx"] = tf.data_type(shape_) (tf.execute(desc))[1] end + function broadcast_to(input_, shape_; name=nothing) + if tf.eager_mode + broadcast_to_eager(input_, shape_; name=name) + else + broadcast_to_graph(input_, shape_; name=name) + end + end end @@ -6136,7 +7214,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function elu_grad(gradients_, outputs_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function elu_grad_graph(gradients_, outputs_; name=nothing) local desc tf.with_op_name(name, "EluGrad") do desc = tf.NodeDescription("EluGrad") @@ -6148,7 +7226,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function elu_grad(gradients_::tf.TensorHandle, outputs_::tf.TensorHandle; name=nothing) + function elu_grad_eager(gradients_, outputs_; name=nothing) desc = tf.EagerOp("EluGrad") tf.add_input(desc, gradients_) tf.add_input(desc, outputs_) @@ -6156,16 +7234,23 @@ begin desc["T"] = tf.data_type(outputs_) (tf.execute(desc))[1] end + function elu_grad(gradients_, outputs_; name=nothing) + if tf.eager_mode + elu_grad_eager(gradients_, outputs_; name=name) + else + elu_grad_graph(gradients_, outputs_; name=name) + end + end end """ - cudnn_rnn_backprop(input, input_h, input_c, params, output, output_h, output_c, output_backprop, output_h_backprop, output_c_backprop, reserve_space; rnn_mode=lstm, input_mode=linear_input, direction=unidirectional, dropout=?, seed=0, seed2=0) + cudnn_rnn_backprop(input, input_h, input_c, params, output, output_h, output_c, output_backprop, output_h_backprop, output_c_backprop, reserve_space; rnn_mode=, input_mode=, direction=, dropout=?, seed=0, seed2=0) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function cudnn_rnn_backprop(input_, input_h_, input_c_, params_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function cudnn_rnn_backprop_graph(input_, input_h_, input_c_, params_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) local desc tf.with_op_name(name, "CudnnRNNBackprop") do desc = tf.NodeDescription("CudnnRNNBackprop") @@ -6218,7 +7303,7 @@ begin end out end - function cudnn_rnn_backprop(input_::tf.TensorHandle, input_h_::tf.TensorHandle, input_c_::tf.TensorHandle, params_::tf.TensorHandle, output_::tf.TensorHandle, output_h_::tf.TensorHandle, output_c_::tf.TensorHandle, output_backprop_::tf.TensorHandle, output_h_backprop_::tf.TensorHandle, output_c_backprop_::tf.TensorHandle, reserve_space_::tf.TensorHandle; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) + function cudnn_rnn_backprop_eager(input_, input_h_, input_c_, params_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) desc = tf.EagerOp("CudnnRNNBackprop") tf.add_input(desc, input_) tf.add_input(desc, input_h_) @@ -6262,6 +7347,13 @@ begin desc["T"] = tf.data_type(reserve_space_) tf.execute(desc) end + function cudnn_rnn_backprop(input_, input_h_, input_c_, params_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) + if tf.eager_mode + cudnn_rnn_backprop_eager(input_, input_h_, input_c_, params_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_; name=name, rnn_mode=rnn_mode, input_mode=input_mode, direction=direction, dropout=dropout, seed=seed, seed2=seed2) + else + cudnn_rnn_backprop_graph(input_, input_h_, input_c_, params_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_; name=name, rnn_mode=rnn_mode, input_mode=input_mode, direction=direction, dropout=dropout, seed=seed, seed2=seed2) + end + end end @@ -6271,7 +7363,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function string_to_hash_bucket_fast(input_; name=nothing, num_buckets=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function string_to_hash_bucket_fast_graph(input_; name=nothing, num_buckets=nothing) local desc tf.with_op_name(name, "StringToHashBucketFast") do desc = tf.NodeDescription("StringToHashBucketFast") @@ -6283,7 +7375,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function string_to_hash_bucket_fast(input_::tf.TensorHandle; name=nothing, num_buckets=nothing) + function string_to_hash_bucket_fast_eager(input_; name=nothing, num_buckets=nothing) desc = tf.EagerOp("StringToHashBucketFast") tf.add_input(desc, input_) if num_buckets !== nothing @@ -6291,6 +7383,13 @@ begin end (tf.execute(desc))[1] end + function string_to_hash_bucket_fast(input_; name=nothing, num_buckets=nothing) + if tf.eager_mode + string_to_hash_bucket_fast_eager(input_; name=name, num_buckets=num_buckets) + else + string_to_hash_bucket_fast_graph(input_; name=name, num_buckets=num_buckets) + end + end end @@ -6300,7 +7399,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function mutable_hash_table(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function mutable_hash_table_graph(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing) local desc tf.with_op_name(name, "MutableHashTable") do desc = tf.NodeDescription("MutableHashTable") @@ -6322,7 +7421,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function mutable_hash_table(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing) + function mutable_hash_table_eager(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing) desc = tf.EagerOp("MutableHashTable") if container !== nothing desc["container"] = Base.String(container) @@ -6341,6 +7440,13 @@ begin end (tf.execute(desc))[1] end + function mutable_hash_table(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing) + if tf.eager_mode + mutable_hash_table_eager(; name=name, container=container, shared_name=shared_name, use_node_name_sharing=use_node_name_sharing, key_dtype=key_dtype, value_dtype=value_dtype) + else + mutable_hash_table_graph(; name=name, container=container, shared_name=shared_name, use_node_name_sharing=use_node_name_sharing, key_dtype=key_dtype, value_dtype=value_dtype) + end + end end @@ -6350,7 +7456,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function relu(features_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function relu_graph(features_; name=nothing) local desc tf.with_op_name(name, "Relu") do desc = tf.NodeDescription("Relu") @@ -6360,12 +7466,19 @@ begin end tf.Tensor(tf.Operation(desc)) end - function relu(features_::tf.TensorHandle; name=nothing) + function relu_eager(features_; name=nothing) desc = tf.EagerOp("Relu") tf.add_input(desc, features_) desc["T"] = tf.data_type(features_) (tf.execute(desc))[1] end + function relu(features_; name=nothing) + if tf.eager_mode + relu_eager(features_; name=name) + else + relu_graph(features_; name=name) + end + end end @@ -6375,7 +7488,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function nth_element(input_, n_; name=nothing, reverse=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function nth_element_graph(input_, n_; name=nothing, reverse=nothing) local desc tf.with_op_name(name, "NthElement") do desc = tf.NodeDescription("NthElement") @@ -6390,7 +7503,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function nth_element(input_::tf.TensorHandle, n_::tf.TensorHandle; name=nothing, reverse=nothing) + function nth_element_eager(input_, n_; name=nothing, reverse=nothing) desc = tf.EagerOp("NthElement") tf.add_input(desc, input_) tf.add_input(desc, n_) @@ -6400,6 +7513,13 @@ begin desc["T"] = tf.data_type(input_) (tf.execute(desc))[1] end + function nth_element(input_, n_; name=nothing, reverse=nothing) + if tf.eager_mode + nth_element_eager(input_, n_; name=name, reverse=reverse) + else + nth_element_graph(input_, n_; name=name, reverse=reverse) + end + end end @@ -6409,7 +7529,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function softsign(features_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function softsign_graph(features_; name=nothing) local desc tf.with_op_name(name, "Softsign") do desc = tf.NodeDescription("Softsign") @@ -6419,12 +7539,19 @@ begin end tf.Tensor(tf.Operation(desc)) end - function softsign(features_::tf.TensorHandle; name=nothing) + function softsign_eager(features_; name=nothing) desc = tf.EagerOp("Softsign") tf.add_input(desc, features_) desc["T"] = tf.data_type(features_) (tf.execute(desc))[1] end + function softsign(features_; name=nothing) + if tf.eager_mode + softsign_eager(features_; name=name) + else + softsign_graph(features_; name=name) + end + end end @@ -6434,7 +7561,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function mutable_dense_hash_table(empty_key_; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing, initial_num_buckets=nothing, max_load_factor=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function mutable_dense_hash_table_graph(empty_key_; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing, initial_num_buckets=nothing, max_load_factor=nothing) local desc tf.with_op_name(name, "MutableDenseHashTable") do desc = tf.NodeDescription("MutableDenseHashTable") @@ -6468,7 +7595,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function mutable_dense_hash_table(empty_key_::tf.TensorHandle; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing, initial_num_buckets=nothing, max_load_factor=nothing) + function mutable_dense_hash_table_eager(empty_key_; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing, initial_num_buckets=nothing, max_load_factor=nothing) desc = tf.EagerOp("MutableDenseHashTable") tf.add_input(desc, empty_key_) if container !== nothing @@ -6498,6 +7625,13 @@ begin desc["key_dtype"] = tf.data_type(empty_key_) (tf.execute(desc))[1] end + function mutable_dense_hash_table(empty_key_; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing, initial_num_buckets=nothing, max_load_factor=nothing) + if tf.eager_mode + mutable_dense_hash_table_eager(empty_key_; name=name, container=container, shared_name=shared_name, use_node_name_sharing=use_node_name_sharing, key_dtype=key_dtype, value_dtype=value_dtype, value_shape=value_shape, initial_num_buckets=initial_num_buckets, max_load_factor=max_load_factor) + else + mutable_dense_hash_table_graph(empty_key_; name=name, container=container, shared_name=shared_name, use_node_name_sharing=use_node_name_sharing, key_dtype=key_dtype, value_dtype=value_dtype, value_shape=value_shape, initial_num_buckets=initial_num_buckets, max_load_factor=max_load_factor) + end + end end @@ -6507,7 +7641,7 @@ end An op that shuts down a running distributed TPU system. The Op returns """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function _shutdown_distributed_tpu(; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function _shutdown_distributed_tpu_graph(; name=nothing) local desc tf.with_op_name(name, "_ShutdownDistributedTPU") do desc @@ -6515,10 +7649,17 @@ begin end tf.Tensor(tf.Operation(desc)) end - function _shutdown_distributed_tpu(; name=nothing) + function _shutdown_distributed_tpu_eager(; name=nothing) desc = tf.EagerOp("_ShutdownDistributedTPU") (tf.execute(desc))[1] end + function _shutdown_distributed_tpu(; name=nothing) + if tf.eager_mode + _shutdown_distributed_tpu_eager(; name=name) + else + _shutdown_distributed_tpu_graph(; name=name) + end + end end @@ -6528,7 +7669,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function polygamma(a_, x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function polygamma_graph(a_, x_; name=nothing) local desc tf.with_op_name(name, "Polygamma") do desc = tf.NodeDescription("Polygamma") @@ -6540,7 +7681,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function polygamma(a_::tf.TensorHandle, x_::tf.TensorHandle; name=nothing) + function polygamma_eager(a_, x_; name=nothing) desc = tf.EagerOp("Polygamma") tf.add_input(desc, a_) tf.add_input(desc, x_) @@ -6548,6 +7689,13 @@ begin desc["T"] = tf.data_type(x_) (tf.execute(desc))[1] end + function polygamma(a_, x_; name=nothing) + if tf.eager_mode + polygamma_eager(a_, x_; name=name) + else + polygamma_graph(a_, x_; name=name) + end + end end @@ -6557,7 +7705,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function nccl_reduce(input_; name=nothing, reduction=nothing, num_devices=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function nccl_reduce_graph(input_; name=nothing, reduction=nothing, num_devices=nothing) local desc tf.with_op_name(name, "NcclReduce") do desc = tf.NodeDescription("NcclReduce") @@ -6573,7 +7721,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function nccl_reduce(input_::tf.TensorHandle; name=nothing, reduction=nothing, num_devices=nothing) + function nccl_reduce_eager(input_; name=nothing, reduction=nothing, num_devices=nothing) desc = tf.EagerOp("NcclReduce") tf.add_input(desc, input_) if reduction !== nothing @@ -6585,6 +7733,13 @@ begin desc["T"] = tf.data_type(input_) (tf.execute(desc))[1] end + function nccl_reduce(input_; name=nothing, reduction=nothing, num_devices=nothing) + if tf.eager_mode + nccl_reduce_eager(input_; name=name, reduction=reduction, num_devices=num_devices) + else + nccl_reduce_graph(input_; name=name, reduction=reduction, num_devices=num_devices) + end + end end @@ -6594,7 +7749,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function arg_max(input_, dimension_; name=nothing, output_type=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function arg_max_graph(input_, dimension_; name=nothing, output_type=nothing) local desc tf.with_op_name(name, "ArgMax") do desc = tf.NodeDescription("ArgMax") @@ -6611,7 +7766,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function arg_max(input_::tf.TensorHandle, dimension_::tf.TensorHandle; name=nothing, output_type=nothing) + function arg_max_eager(input_, dimension_; name=nothing, output_type=nothing) desc = tf.EagerOp("ArgMax") tf.add_input(desc, input_) tf.add_input(desc, dimension_) @@ -6622,6 +7777,13 @@ begin desc["Tidx"] = tf.data_type(dimension_) (tf.execute(desc))[1] end + function arg_max(input_, dimension_; name=nothing, output_type=nothing) + if tf.eager_mode + arg_max_eager(input_, dimension_; name=name, output_type=output_type) + else + arg_max_graph(input_, dimension_; name=name, output_type=output_type) + end + end end @@ -6631,7 +7793,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function matrix_set_diag(input_, diagonal_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function matrix_set_diag_graph(input_, diagonal_; name=nothing) local desc tf.with_op_name(name, "MatrixSetDiag") do desc = tf.NodeDescription("MatrixSetDiag") @@ -6643,7 +7805,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function matrix_set_diag(input_::tf.TensorHandle, diagonal_::tf.TensorHandle; name=nothing) + function matrix_set_diag_eager(input_, diagonal_; name=nothing) desc = tf.EagerOp("MatrixSetDiag") tf.add_input(desc, input_) tf.add_input(desc, diagonal_) @@ -6651,6 +7813,13 @@ begin desc["T"] = tf.data_type(diagonal_) (tf.execute(desc))[1] end + function matrix_set_diag(input_, diagonal_; name=nothing) + if tf.eager_mode + matrix_set_diag_eager(input_, diagonal_; name=name) + else + matrix_set_diag_graph(input_, diagonal_; name=name) + end + end end @@ -6660,7 +7829,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function space_to_batch_nd(input_, block_shape_, paddings_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function space_to_batch_nd_graph(input_, block_shape_, paddings_; name=nothing) local desc tf.with_op_name(name, "SpaceToBatchND") do desc = tf.NodeDescription("SpaceToBatchND") @@ -6676,7 +7845,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function space_to_batch_nd(input_::tf.TensorHandle, block_shape_::tf.TensorHandle, paddings_::tf.TensorHandle; name=nothing) + function space_to_batch_nd_eager(input_, block_shape_, paddings_; name=nothing) desc = tf.EagerOp("SpaceToBatchND") tf.add_input(desc, input_) tf.add_input(desc, block_shape_) @@ -6686,6 +7855,13 @@ begin desc["Tpaddings"] = tf.data_type(paddings_) (tf.execute(desc))[1] end + function space_to_batch_nd(input_, block_shape_, paddings_; name=nothing) + if tf.eager_mode + space_to_batch_nd_eager(input_, block_shape_, paddings_; name=name) + else + space_to_batch_nd_graph(input_, block_shape_, paddings_; name=name) + end + end end @@ -6695,7 +7871,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function sparse_reshape(input_indices_, input_shape_, new_shape_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function sparse_reshape_graph(input_indices_, input_shape_, new_shape_; name=nothing) local desc tf.with_op_name(name, "SparseReshape") do desc = tf.NodeDescription("SparseReshape") @@ -6713,13 +7889,20 @@ begin end out end - function sparse_reshape(input_indices_::tf.TensorHandle, input_shape_::tf.TensorHandle, new_shape_::tf.TensorHandle; name=nothing) + function sparse_reshape_eager(input_indices_, input_shape_, new_shape_; name=nothing) desc = tf.EagerOp("SparseReshape") tf.add_input(desc, input_indices_) tf.add_input(desc, input_shape_) tf.add_input(desc, new_shape_) tf.execute(desc) end + function sparse_reshape(input_indices_, input_shape_, new_shape_; name=nothing) + if tf.eager_mode + sparse_reshape_eager(input_indices_, input_shape_, new_shape_; name=name) + else + sparse_reshape_graph(input_indices_, input_shape_, new_shape_; name=name) + end + end end @@ -6729,7 +7912,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function optimize_dataset(input_dataset_, optimizations_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function optimize_dataset_graph(input_dataset_, optimizations_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "OptimizeDataset") do desc = tf.NodeDescription("OptimizeDataset") @@ -6746,7 +7929,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function optimize_dataset(input_dataset_::tf.TensorHandle, optimizations_::tf.TensorHandle; name=nothing, output_types=nothing, output_shapes=nothing) + function optimize_dataset_eager(input_dataset_, optimizations_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("OptimizeDataset") tf.add_input(desc, input_dataset_) tf.add_input(desc, optimizations_) @@ -6758,6 +7941,13 @@ begin end (tf.execute(desc))[1] end + function optimize_dataset(input_dataset_, optimizations_; name=nothing, output_types=nothing, output_shapes=nothing) + if tf.eager_mode + optimize_dataset_eager(input_dataset_, optimizations_; name=name, output_types=output_types, output_shapes=output_shapes) + else + optimize_dataset_graph(input_dataset_, optimizations_; name=name, output_types=output_types, output_shapes=output_shapes) + end + end end @@ -6767,7 +7957,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function concat_v2(values_, axis_; name=nothing, N=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function concat_v2_graph(values_, axis_; name=nothing, N=nothing) local desc tf.with_op_name(name, "ConcatV2") do desc = tf.NodeDescription("ConcatV2") @@ -6784,7 +7974,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function concat_v2(values_::tf.TensorHandle, axis_::tf.TensorHandle; name=nothing, N=nothing) + function concat_v2_eager(values_, axis_; name=nothing, N=nothing) desc = tf.EagerOp("ConcatV2") tf.add_input(desc, values_) tf.add_input(desc, axis_) @@ -6795,6 +7985,13 @@ begin desc["Tidx"] = tf.data_type(axis_) (tf.execute(desc))[1] end + function concat_v2(values_, axis_; name=nothing, N=nothing) + if tf.eager_mode + concat_v2_eager(values_, axis_; name=name, N=N) + else + concat_v2_graph(values_, axis_; name=name, N=N) + end + end end @@ -6804,7 +8001,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function resource_sparse_apply_adadelta(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function resource_sparse_apply_adadelta_graph(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ResourceSparseApplyAdadelta") do desc = tf.NodeDescription("ResourceSparseApplyAdadelta") @@ -6833,7 +8030,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function resource_sparse_apply_adadelta(var_::tf.TensorHandle, accum_::tf.TensorHandle, accum_update_::tf.TensorHandle, lr_::tf.TensorHandle, rho_::tf.TensorHandle, epsilon_::tf.TensorHandle, grad_::tf.TensorHandle, indices_::tf.TensorHandle; name=nothing, use_locking=nothing) + function resource_sparse_apply_adadelta_eager(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ResourceSparseApplyAdadelta") tf.add_input(desc, var_) tf.add_input(desc, accum_) @@ -6853,6 +8050,13 @@ begin desc["Tindices"] = tf.data_type(indices_) (tf.execute(desc))[1] end + function resource_sparse_apply_adadelta(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) + if tf.eager_mode + resource_sparse_apply_adadelta_eager(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_, indices_; name=name, use_locking=use_locking) + else + resource_sparse_apply_adadelta_graph(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_, indices_; name=name, use_locking=use_locking) + end + end end @@ -6862,7 +8066,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tile(input_, multiples_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tile_graph(input_, multiples_; name=nothing) local desc tf.with_op_name(name, "Tile") do desc = tf.NodeDescription("Tile") @@ -6875,7 +8079,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function tile(input_::tf.TensorHandle, multiples_::tf.TensorHandle; name=nothing) + function tile_eager(input_, multiples_; name=nothing) desc = tf.EagerOp("Tile") tf.add_input(desc, input_) tf.add_input(desc, multiples_) @@ -6883,6 +8087,13 @@ begin desc["Tmultiples"] = tf.data_type(multiples_) (tf.execute(desc))[1] end + function tile(input_, multiples_; name=nothing) + if tf.eager_mode + tile_eager(input_, multiples_; name=name) + else + tile_graph(input_, multiples_; name=name) + end + end end @@ -6892,7 +8103,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function mutex_v2(; name=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function mutex_v2_graph(; name=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "MutexV2") do desc = tf.NodeDescription("MutexV2") @@ -6905,7 +8116,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function mutex_v2(; name=nothing, container=nothing, shared_name=nothing) + function mutex_v2_eager(; name=nothing, container=nothing, shared_name=nothing) desc = tf.EagerOp("MutexV2") if container !== nothing desc["container"] = Base.String(container) @@ -6915,6 +8126,13 @@ begin end (tf.execute(desc))[1] end + function mutex_v2(; name=nothing, container=nothing, shared_name=nothing) + if tf.eager_mode + mutex_v2_eager(; name=name, container=container, shared_name=shared_name) + else + mutex_v2_graph(; name=name, container=container, shared_name=shared_name) + end + end end @@ -6924,7 +8142,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function serialize_many_sparse(sparse_indices_, sparse_values_, sparse_shape_; name=nothing, out_type=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function serialize_many_sparse_graph(sparse_indices_, sparse_values_, sparse_shape_; name=nothing, out_type=nothing) local desc tf.with_op_name(name, "SerializeManySparse") do desc = tf.NodeDescription("SerializeManySparse") @@ -6941,7 +8159,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function serialize_many_sparse(sparse_indices_::tf.TensorHandle, sparse_values_::tf.TensorHandle, sparse_shape_::tf.TensorHandle; name=nothing, out_type=nothing) + function serialize_many_sparse_eager(sparse_indices_, sparse_values_, sparse_shape_; name=nothing, out_type=nothing) desc = tf.EagerOp("SerializeManySparse") tf.add_input(desc, sparse_indices_) tf.add_input(desc, sparse_values_) @@ -6952,6 +8170,13 @@ begin desc["T"] = tf.data_type(sparse_values_) (tf.execute(desc))[1] end + function serialize_many_sparse(sparse_indices_, sparse_values_, sparse_shape_; name=nothing, out_type=nothing) + if tf.eager_mode + serialize_many_sparse_eager(sparse_indices_, sparse_values_, sparse_shape_; name=name, out_type=out_type) + else + serialize_many_sparse_graph(sparse_indices_, sparse_values_, sparse_shape_; name=name, out_type=out_type) + end + end end @@ -6961,7 +8186,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tpu_embedding_activations(embedding_variable_, sliced_activations_; name=nothing, table_id=nothing, lookup_id=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tpu_embedding_activations_graph(embedding_variable_, sliced_activations_; name=nothing, table_id=nothing, lookup_id=nothing) local desc tf.with_op_name(name, "TPUEmbeddingActivations") do desc = tf.NodeDescription("TPUEmbeddingActivations") @@ -6978,7 +8203,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function tpu_embedding_activations(embedding_variable_::tf.TensorHandle, sliced_activations_::tf.TensorHandle; name=nothing, table_id=nothing, lookup_id=nothing) + function tpu_embedding_activations_eager(embedding_variable_, sliced_activations_; name=nothing, table_id=nothing, lookup_id=nothing) desc = tf.EagerOp("TPUEmbeddingActivations") tf.add_input(desc, embedding_variable_) tf.add_input(desc, sliced_activations_) @@ -6990,6 +8215,13 @@ begin end (tf.execute(desc))[1] end + function tpu_embedding_activations(embedding_variable_, sliced_activations_; name=nothing, table_id=nothing, lookup_id=nothing) + if tf.eager_mode + tpu_embedding_activations_eager(embedding_variable_, sliced_activations_; name=name, table_id=table_id, lookup_id=lookup_id) + else + tpu_embedding_activations_graph(embedding_variable_, sliced_activations_; name=name, table_id=table_id, lookup_id=lookup_id) + end + end end @@ -6999,7 +8231,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function batch_matrix_solve_ls(matrix_, rhs_, l2_regularizer_; name=nothing, fast=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function batch_matrix_solve_ls_graph(matrix_, rhs_, l2_regularizer_; name=nothing, fast=nothing) local desc tf.with_op_name(name, "BatchMatrixSolveLs") do desc = tf.NodeDescription("BatchMatrixSolveLs") @@ -7016,7 +8248,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function batch_matrix_solve_ls(matrix_::tf.TensorHandle, rhs_::tf.TensorHandle, l2_regularizer_::tf.TensorHandle; name=nothing, fast=nothing) + function batch_matrix_solve_ls_eager(matrix_, rhs_, l2_regularizer_; name=nothing, fast=nothing) desc = tf.EagerOp("BatchMatrixSolveLs") tf.add_input(desc, matrix_) tf.add_input(desc, rhs_) @@ -7028,6 +8260,13 @@ begin desc["T"] = tf.data_type(rhs_) (tf.execute(desc))[1] end + function batch_matrix_solve_ls(matrix_, rhs_, l2_regularizer_; name=nothing, fast=nothing) + if tf.eager_mode + batch_matrix_solve_ls_eager(matrix_, rhs_, l2_regularizer_; name=name, fast=fast) + else + batch_matrix_solve_ls_graph(matrix_, rhs_, l2_regularizer_; name=name, fast=fast) + end + end end @@ -7037,7 +8276,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function not_equal(x_, y_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function not_equal_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "NotEqual") do desc = tf.NodeDescription("NotEqual") @@ -7049,7 +8288,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function not_equal(x_::tf.TensorHandle, y_::tf.TensorHandle; name=nothing) + function not_equal_eager(x_, y_; name=nothing) desc = tf.EagerOp("NotEqual") tf.add_input(desc, x_) tf.add_input(desc, y_) @@ -7057,6 +8296,13 @@ begin desc["T"] = tf.data_type(y_) (tf.execute(desc))[1] end + function not_equal(x_, y_; name=nothing) + if tf.eager_mode + not_equal_eager(x_, y_; name=name) + else + not_equal_graph(x_, y_; name=name) + end + end end @@ -7066,7 +8312,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function lgamma(x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function lgamma_graph(x_; name=nothing) local desc tf.with_op_name(name, "Lgamma") do desc = tf.NodeDescription("Lgamma") @@ -7076,22 +8322,29 @@ begin end tf.Tensor(tf.Operation(desc)) end - function lgamma(x_::tf.TensorHandle; name=nothing) + function lgamma_eager(x_; name=nothing) desc = tf.EagerOp("Lgamma") tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) (tf.execute(desc))[1] end + function lgamma(x_; name=nothing) + if tf.eager_mode + lgamma_eager(x_; name=name) + else + lgamma_graph(x_; name=name) + end + end end """ - tpu_replicate_metadata(; num_cores_per_replica=1, topology=, use_tpu=true, device_assignment=Int64[], computation_shape=Int64[], host_compute_core=Int64[], padding_map=Int64[], step_marker_location=STEP_MARK_AT_ENTRY) + tpu_replicate_metadata(; num_cores_per_replica=1, topology=, use_tpu=true, device_assignment=Int64[], computation_shape=Int64[], host_compute_core=Int64[], padding_map=Int64[], step_marker_location=) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tpu_replicate_metadata(; name=nothing, num_replicas=nothing, num_cores_per_replica=nothing, topology=nothing, use_tpu=nothing, device_assignment=nothing, computation_shape=nothing, host_compute_core=nothing, padding_map=nothing, step_marker_location=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tpu_replicate_metadata_graph(; name=nothing, num_replicas=nothing, num_cores_per_replica=nothing, topology=nothing, use_tpu=nothing, device_assignment=nothing, computation_shape=nothing, host_compute_core=nothing, padding_map=nothing, step_marker_location=nothing) local desc tf.with_op_name(name, "TPUReplicateMetadata") do desc = tf.NodeDescription("TPUReplicateMetadata") @@ -7125,7 +8378,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function tpu_replicate_metadata(; name=nothing, num_replicas=nothing, num_cores_per_replica=nothing, topology=nothing, use_tpu=nothing, device_assignment=nothing, computation_shape=nothing, host_compute_core=nothing, padding_map=nothing, step_marker_location=nothing) + function tpu_replicate_metadata_eager(; name=nothing, num_replicas=nothing, num_cores_per_replica=nothing, topology=nothing, use_tpu=nothing, device_assignment=nothing, computation_shape=nothing, host_compute_core=nothing, padding_map=nothing, step_marker_location=nothing) desc = tf.EagerOp("TPUReplicateMetadata") if num_replicas !== nothing desc["num_replicas"] = Base.Int(num_replicas) @@ -7156,6 +8409,13 @@ begin end (tf.execute(desc))[1] end + function tpu_replicate_metadata(; name=nothing, num_replicas=nothing, num_cores_per_replica=nothing, topology=nothing, use_tpu=nothing, device_assignment=nothing, computation_shape=nothing, host_compute_core=nothing, padding_map=nothing, step_marker_location=nothing) + if tf.eager_mode + tpu_replicate_metadata_eager(; name=name, num_replicas=num_replicas, num_cores_per_replica=num_cores_per_replica, topology=topology, use_tpu=use_tpu, device_assignment=device_assignment, computation_shape=computation_shape, host_compute_core=host_compute_core, padding_map=padding_map, step_marker_location=step_marker_location) + else + tpu_replicate_metadata_graph(; name=name, num_replicas=num_replicas, num_cores_per_replica=num_cores_per_replica, topology=topology, use_tpu=use_tpu, device_assignment=device_assignment, computation_shape=computation_shape, host_compute_core=host_compute_core, padding_map=padding_map, step_marker_location=step_marker_location) + end + end end @@ -7165,7 +8425,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function experimental_thread_pool_handle(; name=nothing, num_threads=nothing, max_intra_op_parallelism=nothing, display_name=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function experimental_thread_pool_handle_graph(; name=nothing, num_threads=nothing, max_intra_op_parallelism=nothing, display_name=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "ExperimentalThreadPoolHandle") do desc = tf.NodeDescription("ExperimentalThreadPoolHandle") @@ -7187,7 +8447,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function experimental_thread_pool_handle(; name=nothing, num_threads=nothing, max_intra_op_parallelism=nothing, display_name=nothing, container=nothing, shared_name=nothing) + function experimental_thread_pool_handle_eager(; name=nothing, num_threads=nothing, max_intra_op_parallelism=nothing, display_name=nothing, container=nothing, shared_name=nothing) desc = tf.EagerOp("ExperimentalThreadPoolHandle") if num_threads !== nothing desc["num_threads"] = Base.Int(num_threads) @@ -7206,6 +8466,13 @@ begin end (tf.execute(desc))[1] end + function experimental_thread_pool_handle(; name=nothing, num_threads=nothing, max_intra_op_parallelism=nothing, display_name=nothing, container=nothing, shared_name=nothing) + if tf.eager_mode + experimental_thread_pool_handle_eager(; name=name, num_threads=num_threads, max_intra_op_parallelism=max_intra_op_parallelism, display_name=display_name, container=container, shared_name=shared_name) + else + experimental_thread_pool_handle_graph(; name=name, num_threads=num_threads, max_intra_op_parallelism=max_intra_op_parallelism, display_name=display_name, container=container, shared_name=shared_name) + end + end end @@ -7215,7 +8482,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function self_adjoint_eig(input_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function self_adjoint_eig_graph(input_; name=nothing) local desc tf.with_op_name(name, "SelfAdjointEig") do desc = tf.NodeDescription("SelfAdjointEig") @@ -7225,12 +8492,19 @@ begin end tf.Tensor(tf.Operation(desc)) end - function self_adjoint_eig(input_::tf.TensorHandle; name=nothing) + function self_adjoint_eig_eager(input_; name=nothing) desc = tf.EagerOp("SelfAdjointEig") tf.add_input(desc, input_) desc["T"] = tf.data_type(input_) (tf.execute(desc))[1] end + function self_adjoint_eig(input_; name=nothing) + if tf.eager_mode + self_adjoint_eig_eager(input_; name=name) + else + self_adjoint_eig_graph(input_; name=name) + end + end end @@ -7240,7 +8514,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function boosted_trees_quantile_stream_resource_get_bucket_boundaries(quantile_stream_resource_handle_; name=nothing, num_features=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function boosted_trees_quantile_stream_resource_get_bucket_boundaries_graph(quantile_stream_resource_handle_; name=nothing, num_features=nothing) local desc tf.with_op_name(name, "BoostedTreesQuantileStreamResourceGetBucketBoundaries") do desc = tf.NodeDescription("BoostedTreesQuantileStreamResourceGetBucketBoundaries") @@ -7257,7 +8531,7 @@ begin end out end - function boosted_trees_quantile_stream_resource_get_bucket_boundaries(quantile_stream_resource_handle_::tf.TensorHandle; name=nothing, num_features=nothing) + function boosted_trees_quantile_stream_resource_get_bucket_boundaries_eager(quantile_stream_resource_handle_; name=nothing, num_features=nothing) desc = tf.EagerOp("BoostedTreesQuantileStreamResourceGetBucketBoundaries") tf.add_input(desc, quantile_stream_resource_handle_) if num_features !== nothing @@ -7265,6 +8539,13 @@ begin end tf.execute(desc) end + function boosted_trees_quantile_stream_resource_get_bucket_boundaries(quantile_stream_resource_handle_; name=nothing, num_features=nothing) + if tf.eager_mode + boosted_trees_quantile_stream_resource_get_bucket_boundaries_eager(quantile_stream_resource_handle_; name=name, num_features=num_features) + else + boosted_trees_quantile_stream_resource_get_bucket_boundaries_graph(quantile_stream_resource_handle_; name=name, num_features=num_features) + end + end end @@ -7274,7 +8555,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function sparse_dense_cwise_div(sp_indices_, sp_values_, sp_shape_, dense_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function sparse_dense_cwise_div_graph(sp_indices_, sp_values_, sp_shape_, dense_; name=nothing) local desc tf.with_op_name(name, "SparseDenseCwiseDiv") do desc = tf.NodeDescription("SparseDenseCwiseDiv") @@ -7290,7 +8571,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function sparse_dense_cwise_div(sp_indices_::tf.TensorHandle, sp_values_::tf.TensorHandle, sp_shape_::tf.TensorHandle, dense_::tf.TensorHandle; name=nothing) + function sparse_dense_cwise_div_eager(sp_indices_, sp_values_, sp_shape_, dense_; name=nothing) desc = tf.EagerOp("SparseDenseCwiseDiv") tf.add_input(desc, sp_indices_) tf.add_input(desc, sp_values_) @@ -7300,6 +8581,13 @@ begin desc["T"] = tf.data_type(dense_) (tf.execute(desc))[1] end + function sparse_dense_cwise_div(sp_indices_, sp_values_, sp_shape_, dense_; name=nothing) + if tf.eager_mode + sparse_dense_cwise_div_eager(sp_indices_, sp_values_, sp_shape_, dense_; name=name) + else + sparse_dense_cwise_div_graph(sp_indices_, sp_values_, sp_shape_, dense_; name=name) + end + end end @@ -7309,7 +8597,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function acos(x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function acos_graph(x_; name=nothing) local desc tf.with_op_name(name, "Acos") do desc = tf.NodeDescription("Acos") @@ -7319,12 +8607,19 @@ begin end tf.Tensor(tf.Operation(desc)) end - function acos(x_::tf.TensorHandle; name=nothing) + function acos_eager(x_; name=nothing) desc = tf.EagerOp("Acos") tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) (tf.execute(desc))[1] end + function acos(x_; name=nothing) + if tf.eager_mode + acos_eager(x_; name=name) + else + acos_graph(x_; name=name) + end + end end @@ -7334,7 +8629,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function all(input_, reduction_indices_; name=nothing, keep_dims=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function all_graph(input_, reduction_indices_; name=nothing, keep_dims=nothing) local desc tf.with_op_name(name, "All") do desc = tf.NodeDescription("All") @@ -7350,7 +8645,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function all(input_::tf.TensorHandle, reduction_indices_::tf.TensorHandle; name=nothing, keep_dims=nothing) + function all_eager(input_, reduction_indices_; name=nothing, keep_dims=nothing) desc = tf.EagerOp("All") tf.add_input(desc, input_) tf.add_input(desc, reduction_indices_) @@ -7360,6 +8655,13 @@ begin desc["Tidx"] = tf.data_type(reduction_indices_) (tf.execute(desc))[1] end + function all(input_, reduction_indices_; name=nothing, keep_dims=nothing) + if tf.eager_mode + all_eager(input_, reduction_indices_; name=name, keep_dims=keep_dims) + else + all_graph(input_, reduction_indices_; name=name, keep_dims=keep_dims) + end + end end @@ -7369,7 +8671,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function compare_and_bitpack(input_, threshold_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function compare_and_bitpack_graph(input_, threshold_; name=nothing) local desc tf.with_op_name(name, "CompareAndBitpack") do desc = tf.NodeDescription("CompareAndBitpack") @@ -7381,7 +8683,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function compare_and_bitpack(input_::tf.TensorHandle, threshold_::tf.TensorHandle; name=nothing) + function compare_and_bitpack_eager(input_, threshold_; name=nothing) desc = tf.EagerOp("CompareAndBitpack") tf.add_input(desc, input_) tf.add_input(desc, threshold_) @@ -7389,6 +8691,13 @@ begin desc["T"] = tf.data_type(threshold_) (tf.execute(desc))[1] end + function compare_and_bitpack(input_, threshold_; name=nothing) + if tf.eager_mode + compare_and_bitpack_eager(input_, threshold_; name=name) + else + compare_and_bitpack_graph(input_, threshold_; name=name) + end + end end @@ -7398,7 +8707,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function var_handle_op(; name=nothing, container=nothing, shared_name=nothing, dtype=nothing, shape=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function var_handle_op_graph(; name=nothing, container=nothing, shared_name=nothing, dtype=nothing, shape=nothing) local desc tf.with_op_name(name, "VarHandleOp") do desc = tf.NodeDescription("VarHandleOp") @@ -7417,7 +8726,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function var_handle_op(; name=nothing, container=nothing, shared_name=nothing, dtype=nothing, shape=nothing) + function var_handle_op_eager(; name=nothing, container=nothing, shared_name=nothing, dtype=nothing, shape=nothing) desc = tf.EagerOp("VarHandleOp") if container !== nothing desc["container"] = Base.String(container) @@ -7433,6 +8742,13 @@ begin end (tf.execute(desc))[1] end + function var_handle_op(; name=nothing, container=nothing, shared_name=nothing, dtype=nothing, shape=nothing) + if tf.eager_mode + var_handle_op_eager(; name=name, container=container, shared_name=shared_name, dtype=dtype, shape=shape) + else + var_handle_op_graph(; name=name, container=container, shared_name=shared_name, dtype=dtype, shape=shape) + end + end end @@ -7442,7 +8758,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function experimental_unique_dataset(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function experimental_unique_dataset_graph(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "ExperimentalUniqueDataset") do desc = tf.NodeDescription("ExperimentalUniqueDataset") @@ -7457,7 +8773,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function experimental_unique_dataset(input_dataset_::tf.TensorHandle; name=nothing, output_types=nothing, output_shapes=nothing) + function experimental_unique_dataset_eager(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("ExperimentalUniqueDataset") tf.add_input(desc, input_dataset_) if output_types !== nothing @@ -7468,6 +8784,13 @@ begin end (tf.execute(desc))[1] end + function experimental_unique_dataset(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) + if tf.eager_mode + experimental_unique_dataset_eager(input_dataset_; name=name, output_types=output_types, output_shapes=output_shapes) + else + experimental_unique_dataset_graph(input_dataset_; name=name, output_types=output_types, output_shapes=output_shapes) + end + end end @@ -7477,7 +8800,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function quantized_conv2d_with_bias_sum_and_relu(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_, summand_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function quantized_conv2d_with_bias_sum_and_relu_graph(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_, summand_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) local desc tf.with_op_name(name, "QuantizedConv2DWithBiasSumAndRelu") do desc = tf.NodeDescription("QuantizedConv2DWithBiasSumAndRelu") @@ -7519,7 +8842,7 @@ begin end out end - function quantized_conv2d_with_bias_sum_and_relu(input_::tf.TensorHandle, filter_::tf.TensorHandle, bias_::tf.TensorHandle, min_input_::tf.TensorHandle, max_input_::tf.TensorHandle, min_filter_::tf.TensorHandle, max_filter_::tf.TensorHandle, summand_::tf.TensorHandle; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) + function quantized_conv2d_with_bias_sum_and_relu_eager(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_, summand_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) desc = tf.EagerOp("QuantizedConv2DWithBiasSumAndRelu") tf.add_input(desc, input_) tf.add_input(desc, filter_) @@ -7545,6 +8868,13 @@ begin desc["Tfilter"] = tf.data_type(filter_) tf.execute(desc) end + function quantized_conv2d_with_bias_sum_and_relu(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_, summand_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) + if tf.eager_mode + quantized_conv2d_with_bias_sum_and_relu_eager(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_, summand_; name=name, out_type=out_type, strides=strides, padding=padding, dilations=dilations) + else + quantized_conv2d_with_bias_sum_and_relu_graph(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_, summand_; name=name, out_type=out_type, strides=strides, padding=padding, dilations=dilations) + end + end end @@ -7554,7 +8884,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function list_diff(x_, y_; name=nothing, out_idx=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function list_diff_graph(x_, y_; name=nothing, out_idx=nothing) local desc tf.with_op_name(name, "ListDiff") do desc = tf.NodeDescription("ListDiff") @@ -7574,7 +8904,7 @@ begin end out end - function list_diff(x_::tf.TensorHandle, y_::tf.TensorHandle; name=nothing, out_idx=nothing) + function list_diff_eager(x_, y_; name=nothing, out_idx=nothing) desc = tf.EagerOp("ListDiff") tf.add_input(desc, x_) tf.add_input(desc, y_) @@ -7585,6 +8915,13 @@ begin desc["T"] = tf.data_type(y_) tf.execute(desc) end + function list_diff(x_, y_; name=nothing, out_idx=nothing) + if tf.eager_mode + list_diff_eager(x_, y_; name=name, out_idx=out_idx) + else + list_diff_graph(x_, y_; name=name, out_idx=out_idx) + end + end end @@ -7594,7 +8931,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function create_summary_file_writer(writer_, logdir_, max_queue_, flush_millis_, filename_suffix_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function create_summary_file_writer_graph(writer_, logdir_, max_queue_, flush_millis_, filename_suffix_; name=nothing) local desc tf.with_op_name(name, "CreateSummaryFileWriter") do desc = tf.NodeDescription("CreateSummaryFileWriter") @@ -7611,7 +8948,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function create_summary_file_writer(writer_::tf.TensorHandle, logdir_::tf.TensorHandle, max_queue_::tf.TensorHandle, flush_millis_::tf.TensorHandle, filename_suffix_::tf.TensorHandle; name=nothing) + function create_summary_file_writer_eager(writer_, logdir_, max_queue_, flush_millis_, filename_suffix_; name=nothing) desc = tf.EagerOp("CreateSummaryFileWriter") tf.add_input(desc, writer_) tf.add_input(desc, logdir_) @@ -7620,6 +8957,13 @@ begin tf.add_input(desc, filename_suffix_) (tf.execute(desc))[1] end + function create_summary_file_writer(writer_, logdir_, max_queue_, flush_millis_, filename_suffix_; name=nothing) + if tf.eager_mode + create_summary_file_writer_eager(writer_, logdir_, max_queue_, flush_millis_, filename_suffix_; name=name) + else + create_summary_file_writer_graph(writer_, logdir_, max_queue_, flush_millis_, filename_suffix_; name=name) + end + end end @@ -7629,7 +8973,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function generate_vocab_remapping(new_vocab_file_, old_vocab_file_; name=nothing, new_vocab_offset=nothing, num_new_vocab=nothing, old_vocab_size=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function generate_vocab_remapping_graph(new_vocab_file_, old_vocab_file_; name=nothing, new_vocab_offset=nothing, num_new_vocab=nothing, old_vocab_size=nothing) local desc tf.with_op_name(name, "GenerateVocabRemapping") do desc = tf.NodeDescription("GenerateVocabRemapping") @@ -7654,7 +8998,7 @@ begin end out end - function generate_vocab_remapping(new_vocab_file_::tf.TensorHandle, old_vocab_file_::tf.TensorHandle; name=nothing, new_vocab_offset=nothing, num_new_vocab=nothing, old_vocab_size=nothing) + function generate_vocab_remapping_eager(new_vocab_file_, old_vocab_file_; name=nothing, new_vocab_offset=nothing, num_new_vocab=nothing, old_vocab_size=nothing) desc = tf.EagerOp("GenerateVocabRemapping") tf.add_input(desc, new_vocab_file_) tf.add_input(desc, old_vocab_file_) @@ -7669,6 +9013,13 @@ begin end tf.execute(desc) end + function generate_vocab_remapping(new_vocab_file_, old_vocab_file_; name=nothing, new_vocab_offset=nothing, num_new_vocab=nothing, old_vocab_size=nothing) + if tf.eager_mode + generate_vocab_remapping_eager(new_vocab_file_, old_vocab_file_; name=name, new_vocab_offset=new_vocab_offset, num_new_vocab=num_new_vocab, old_vocab_size=old_vocab_size) + else + generate_vocab_remapping_graph(new_vocab_file_, old_vocab_file_; name=name, new_vocab_offset=new_vocab_offset, num_new_vocab=num_new_vocab, old_vocab_size=old_vocab_size) + end + end end @@ -7678,7 +9029,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function batch_matrix_inverse(input_; name=nothing, adjoint=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function batch_matrix_inverse_graph(input_; name=nothing, adjoint=nothing) local desc tf.with_op_name(name, "BatchMatrixInverse") do desc = tf.NodeDescription("BatchMatrixInverse") @@ -7691,7 +9042,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function batch_matrix_inverse(input_::tf.TensorHandle; name=nothing, adjoint=nothing) + function batch_matrix_inverse_eager(input_; name=nothing, adjoint=nothing) desc = tf.EagerOp("BatchMatrixInverse") tf.add_input(desc, input_) if adjoint !== nothing @@ -7700,6 +9051,13 @@ begin desc["T"] = tf.data_type(input_) (tf.execute(desc))[1] end + function batch_matrix_inverse(input_; name=nothing, adjoint=nothing) + if tf.eager_mode + batch_matrix_inverse_eager(input_; name=name, adjoint=adjoint) + else + batch_matrix_inverse_graph(input_; name=name, adjoint=adjoint) + end + end end @@ -7709,7 +9067,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function control_trigger(; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function control_trigger_graph(; name=nothing) local desc tf.with_op_name(name, "ControlTrigger") do desc @@ -7717,10 +9075,17 @@ begin end tf.Tensor(tf.Operation(desc)) end - function control_trigger(; name=nothing) + function control_trigger_eager(; name=nothing) desc = tf.EagerOp("ControlTrigger") (tf.execute(desc))[1] end + function control_trigger(; name=nothing) + if tf.eager_mode + control_trigger_eager(; name=name) + else + control_trigger_graph(; name=name) + end + end end @@ -7730,7 +9095,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tpu_ordinal_selector(; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tpu_ordinal_selector_graph(; name=nothing) local desc tf.with_op_name(name, "TPUOrdinalSelector") do desc @@ -7738,10 +9103,17 @@ begin end tf.Tensor(tf.Operation(desc)) end - function tpu_ordinal_selector(; name=nothing) + function tpu_ordinal_selector_eager(; name=nothing) desc = tf.EagerOp("TPUOrdinalSelector") (tf.execute(desc))[1] end + function tpu_ordinal_selector(; name=nothing) + if tf.eager_mode + tpu_ordinal_selector_eager(; name=name) + else + tpu_ordinal_selector_graph(; name=name) + end + end end @@ -7751,7 +9123,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function stop_gradient(input_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function stop_gradient_graph(input_; name=nothing) local desc tf.with_op_name(name, "StopGradient") do desc = tf.NodeDescription("StopGradient") @@ -7761,12 +9133,19 @@ begin end tf.Tensor(tf.Operation(desc)) end - function stop_gradient(input_::tf.TensorHandle; name=nothing) + function stop_gradient_eager(input_; name=nothing) desc = tf.EagerOp("StopGradient") tf.add_input(desc, input_) desc["T"] = tf.data_type(input_) (tf.execute(desc))[1] end + function stop_gradient(input_; name=nothing) + if tf.eager_mode + stop_gradient_eager(input_; name=name) + else + stop_gradient_graph(input_; name=name) + end + end end @@ -7776,7 +9155,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function split(split_dim_, value_; name=nothing, num_split=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function split_graph(split_dim_, value_; name=nothing, num_split=nothing) local desc tf.with_op_name(name, "Split") do desc = tf.NodeDescription("Split") @@ -7797,7 +9176,7 @@ begin end out end - function split(split_dim_::tf.TensorHandle, value_::tf.TensorHandle; name=nothing, num_split=nothing) + function split_eager(split_dim_, value_; name=nothing, num_split=nothing) desc = tf.EagerOp("Split") tf.add_input(desc, split_dim_) tf.add_input(desc, value_) @@ -7807,6 +9186,13 @@ begin desc["T"] = tf.data_type(value_) tf.execute(desc) end + function split(split_dim_, value_; name=nothing, num_split=nothing) + if tf.eager_mode + split_eager(split_dim_, value_; name=name, num_split=num_split) + else + split_graph(split_dim_, value_; name=name, num_split=num_split) + end + end end @@ -7816,7 +9202,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function unpack(value_; name=nothing, num=nothing, axis=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function unpack_graph(value_; name=nothing, num=nothing, axis=nothing) local desc tf.with_op_name(name, "Unpack") do desc = tf.NodeDescription("Unpack") @@ -7840,7 +9226,7 @@ begin end out end - function unpack(value_::tf.TensorHandle; name=nothing, num=nothing, axis=nothing) + function unpack_eager(value_; name=nothing, num=nothing, axis=nothing) desc = tf.EagerOp("Unpack") tf.add_input(desc, value_) if num !== nothing @@ -7855,6 +9241,13 @@ begin desc["T"] = tf.data_type(value_) tf.execute(desc) end + function unpack(value_; name=nothing, num=nothing, axis=nothing) + if tf.eager_mode + unpack_eager(value_; name=name, num=num, axis=axis) + else + unpack_graph(value_; name=name, num=num, axis=axis) + end + end end @@ -7864,7 +9257,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function resource_scatter_max(resource_, indices_, updates_; name=nothing, dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function resource_scatter_max_graph(resource_, indices_, updates_; name=nothing, dtype=nothing) local desc tf.with_op_name(name, "ResourceScatterMax") do desc = tf.NodeDescription("ResourceScatterMax") @@ -7883,7 +9276,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function resource_scatter_max(resource_::tf.TensorHandle, indices_::tf.TensorHandle, updates_::tf.TensorHandle; name=nothing, dtype=nothing) + function resource_scatter_max_eager(resource_, indices_, updates_; name=nothing, dtype=nothing) desc = tf.EagerOp("ResourceScatterMax") tf.add_input(desc, resource_) tf.add_input(desc, indices_) @@ -7895,6 +9288,13 @@ begin desc["dtype"] = tf.data_type(updates_) (tf.execute(desc))[1] end + function resource_scatter_max(resource_, indices_, updates_; name=nothing, dtype=nothing) + if tf.eager_mode + resource_scatter_max_eager(resource_, indices_, updates_; name=name, dtype=dtype) + else + resource_scatter_max_graph(resource_, indices_, updates_; name=name, dtype=dtype) + end + end end @@ -7904,7 +9304,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tensor_array_write(handle_, index_, value_, flow_in_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tensor_array_write_graph(handle_, index_, value_, flow_in_; name=nothing) local desc tf.with_op_name(name, "TensorArrayWrite") do desc = tf.NodeDescription("TensorArrayWrite") @@ -7920,7 +9320,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function tensor_array_write(handle_::tf.TensorHandle, index_::tf.TensorHandle, value_::tf.TensorHandle, flow_in_::tf.TensorHandle; name=nothing) + function tensor_array_write_eager(handle_, index_, value_, flow_in_; name=nothing) desc = tf.EagerOp("TensorArrayWrite") tf.add_input(desc, handle_) tf.add_input(desc, index_) @@ -7929,6 +9329,13 @@ begin desc["T"] = tf.data_type(value_) (tf.execute(desc))[1] end + function tensor_array_write(handle_, index_, value_, flow_in_; name=nothing) + if tf.eager_mode + tensor_array_write_eager(handle_, index_, value_, flow_in_; name=name) + else + tensor_array_write_graph(handle_, index_, value_, flow_in_; name=name) + end + end end @@ -7938,7 +9345,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function fill(dims_, value_; name=nothing, index_type=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function fill_graph(dims_, value_; name=nothing, index_type=nothing) local desc tf.with_op_name(name, "Fill") do desc = tf.NodeDescription("Fill") @@ -7954,7 +9361,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function fill(dims_::tf.TensorHandle, value_::tf.TensorHandle; name=nothing, index_type=nothing) + function fill_eager(dims_, value_; name=nothing, index_type=nothing) desc = tf.EagerOp("Fill") tf.add_input(desc, dims_) tf.add_input(desc, value_) @@ -7965,6 +9372,13 @@ begin desc["T"] = tf.data_type(value_) (tf.execute(desc))[1] end + function fill(dims_, value_; name=nothing, index_type=nothing) + if tf.eager_mode + fill_eager(dims_, value_; name=name, index_type=index_type) + else + fill_graph(dims_, value_; name=name, index_type=index_type) + end + end end @@ -7974,7 +9388,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function quantized_conv2d_with_bias_and_requantize(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function quantized_conv2d_with_bias_and_requantize_graph(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) local desc tf.with_op_name(name, "QuantizedConv2DWithBiasAndRequantize") do desc = tf.NodeDescription("QuantizedConv2DWithBiasAndRequantize") @@ -8019,7 +9433,7 @@ begin end out end - function quantized_conv2d_with_bias_and_requantize(input_::tf.TensorHandle, filter_::tf.TensorHandle, bias_::tf.TensorHandle, min_input_::tf.TensorHandle, max_input_::tf.TensorHandle, min_filter_::tf.TensorHandle, max_filter_::tf.TensorHandle, min_freezed_output_::tf.TensorHandle, max_freezed_output_::tf.TensorHandle; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) + function quantized_conv2d_with_bias_and_requantize_eager(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) desc = tf.EagerOp("QuantizedConv2DWithBiasAndRequantize") tf.add_input(desc, input_) tf.add_input(desc, filter_) @@ -8047,6 +9461,13 @@ begin desc["Tbias"] = tf.data_type(bias_) tf.execute(desc) end + function quantized_conv2d_with_bias_and_requantize(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) + if tf.eager_mode + quantized_conv2d_with_bias_and_requantize_eager(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_; name=name, out_type=out_type, strides=strides, padding=padding, dilations=dilations) + else + quantized_conv2d_with_bias_and_requantize_graph(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_; name=name, out_type=out_type, strides=strides, padding=padding, dilations=dilations) + end + end end @@ -8056,7 +9477,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function softmax(logits_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function softmax_graph(logits_; name=nothing) local desc tf.with_op_name(name, "Softmax") do desc = tf.NodeDescription("Softmax") @@ -8066,12 +9487,19 @@ begin end tf.Tensor(tf.Operation(desc)) end - function softmax(logits_::tf.TensorHandle; name=nothing) + function softmax_eager(logits_; name=nothing) desc = tf.EagerOp("Softmax") tf.add_input(desc, logits_) desc["T"] = tf.data_type(logits_) (tf.execute(desc))[1] end + function softmax(logits_; name=nothing) + if tf.eager_mode + softmax_eager(logits_; name=name) + else + softmax_graph(logits_; name=name) + end + end end @@ -8081,7 +9509,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function resize_bicubic(images_, size_; name=nothing, align_corners=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function resize_bicubic_graph(images_, size_; name=nothing, align_corners=nothing) local desc tf.with_op_name(name, "ResizeBicubic") do desc = tf.NodeDescription("ResizeBicubic") @@ -8096,7 +9524,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function resize_bicubic(images_::tf.TensorHandle, size_::tf.TensorHandle; name=nothing, align_corners=nothing) + function resize_bicubic_eager(images_, size_; name=nothing, align_corners=nothing) desc = tf.EagerOp("ResizeBicubic") tf.add_input(desc, images_) tf.add_input(desc, size_) @@ -8106,6 +9534,13 @@ begin desc["T"] = tf.data_type(images_) (tf.execute(desc))[1] end + function resize_bicubic(images_, size_; name=nothing, align_corners=nothing) + if tf.eager_mode + resize_bicubic_eager(images_, size_; name=name, align_corners=align_corners) + else + resize_bicubic_graph(images_, size_; name=name, align_corners=align_corners) + end + end end @@ -8115,7 +9550,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function infeed_dequeue_tuple(; name=nothing, dtypes=nothing, shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function infeed_dequeue_tuple_graph(; name=nothing, dtypes=nothing, shapes=nothing) local desc tf.with_op_name(name, "InfeedDequeueTuple") do desc = tf.NodeDescription("InfeedDequeueTuple") @@ -8128,7 +9563,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function infeed_dequeue_tuple(; name=nothing, dtypes=nothing, shapes=nothing) + function infeed_dequeue_tuple_eager(; name=nothing, dtypes=nothing, shapes=nothing) desc = tf.EagerOp("InfeedDequeueTuple") if dtypes !== nothing desc["dtypes"] = map(Base.identity, dtypes) @@ -8138,6 +9573,13 @@ begin end (tf.execute(desc))[1] end + function infeed_dequeue_tuple(; name=nothing, dtypes=nothing, shapes=nothing) + if tf.eager_mode + infeed_dequeue_tuple_eager(; name=name, dtypes=dtypes, shapes=shapes) + else + infeed_dequeue_tuple_graph(; name=name, dtypes=dtypes, shapes=shapes) + end + end end @@ -8147,7 +9589,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function multi_device_iterator(; name=nothing, devices=nothing, shared_name=nothing, container=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function multi_device_iterator_graph(; name=nothing, devices=nothing, shared_name=nothing, container=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "MultiDeviceIterator") do desc = tf.NodeDescription("MultiDeviceIterator") @@ -8169,7 +9611,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function multi_device_iterator(; name=nothing, devices=nothing, shared_name=nothing, container=nothing, output_types=nothing, output_shapes=nothing) + function multi_device_iterator_eager(; name=nothing, devices=nothing, shared_name=nothing, container=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("MultiDeviceIterator") if devices !== nothing desc["devices"] = map(Base.identity, devices) @@ -8188,16 +9630,23 @@ begin end (tf.execute(desc))[1] end + function multi_device_iterator(; name=nothing, devices=nothing, shared_name=nothing, container=nothing, output_types=nothing, output_shapes=nothing) + if tf.eager_mode + multi_device_iterator_eager(; name=name, devices=devices, shared_name=shared_name, container=container, output_types=output_types, output_shapes=output_shapes) + else + multi_device_iterator_graph(; name=name, devices=devices, shared_name=shared_name, container=container, output_types=output_types, output_shapes=output_shapes) + end + end end """ - decode_csv(records, record_defaults; field_delim=,, use_quote_delim=true, na_value=, select_cols=Int64[]) + decode_csv(records, record_defaults; field_delim=, use_quote_delim=true, na_value=, select_cols=Int64[]) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function decode_csv(records_, record_defaults_; name=nothing, OUT_TYPE=nothing, field_delim=nothing, use_quote_delim=nothing, na_value=nothing, select_cols=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function decode_csv_graph(records_, record_defaults_; name=nothing, OUT_TYPE=nothing, field_delim=nothing, use_quote_delim=nothing, na_value=nothing, select_cols=nothing) local desc tf.with_op_name(name, "DecodeCSV") do desc = tf.NodeDescription("DecodeCSV") @@ -8223,7 +9672,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function decode_csv(records_::tf.TensorHandle, record_defaults_::tf.TensorHandle; name=nothing, OUT_TYPE=nothing, field_delim=nothing, use_quote_delim=nothing, na_value=nothing, select_cols=nothing) + function decode_csv_eager(records_, record_defaults_; name=nothing, OUT_TYPE=nothing, field_delim=nothing, use_quote_delim=nothing, na_value=nothing, select_cols=nothing) desc = tf.EagerOp("DecodeCSV") tf.add_input(desc, records_) tf.add_input(desc, record_defaults_) @@ -8244,6 +9693,13 @@ begin end (tf.execute(desc))[1] end + function decode_csv(records_, record_defaults_; name=nothing, OUT_TYPE=nothing, field_delim=nothing, use_quote_delim=nothing, na_value=nothing, select_cols=nothing) + if tf.eager_mode + decode_csv_eager(records_, record_defaults_; name=name, OUT_TYPE=OUT_TYPE, field_delim=field_delim, use_quote_delim=use_quote_delim, na_value=na_value, select_cols=select_cols) + else + decode_csv_graph(records_, record_defaults_; name=name, OUT_TYPE=OUT_TYPE, field_delim=field_delim, use_quote_delim=use_quote_delim, na_value=na_value, select_cols=select_cols) + end + end end @@ -8253,7 +9709,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function lookup_table_find(table_handle_, keys_, default_value_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function lookup_table_find_graph(table_handle_, keys_, default_value_; name=nothing) local desc tf.with_op_name(name, "LookupTableFind") do desc = tf.NodeDescription("LookupTableFind") @@ -8268,7 +9724,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function lookup_table_find(table_handle_::tf.TensorHandle, keys_::tf.TensorHandle, default_value_::tf.TensorHandle; name=nothing) + function lookup_table_find_eager(table_handle_, keys_, default_value_; name=nothing) desc = tf.EagerOp("LookupTableFind") tf.add_input(desc, table_handle_) tf.add_input(desc, keys_) @@ -8277,6 +9733,13 @@ begin desc["Tout"] = tf.data_type(default_value_) (tf.execute(desc))[1] end + function lookup_table_find(table_handle_, keys_, default_value_; name=nothing) + if tf.eager_mode + lookup_table_find_eager(table_handle_, keys_, default_value_; name=name) + else + lookup_table_find_graph(table_handle_, keys_, default_value_; name=name) + end + end end @@ -8286,7 +9749,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function shuffle_and_repeat_dataset(input_dataset_, buffer_size_, seed_, seed2_, count_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function shuffle_and_repeat_dataset_graph(input_dataset_, buffer_size_, seed_, seed2_, count_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "ShuffleAndRepeatDataset") do desc = tf.NodeDescription("ShuffleAndRepeatDataset") @@ -8309,7 +9772,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function shuffle_and_repeat_dataset(input_dataset_::tf.TensorHandle, buffer_size_::tf.TensorHandle, seed_::tf.TensorHandle, seed2_::tf.TensorHandle, count_::tf.TensorHandle; name=nothing, output_types=nothing, output_shapes=nothing) + function shuffle_and_repeat_dataset_eager(input_dataset_, buffer_size_, seed_, seed2_, count_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("ShuffleAndRepeatDataset") tf.add_input(desc, input_dataset_) tf.add_input(desc, buffer_size_) @@ -8324,6 +9787,13 @@ begin end (tf.execute(desc))[1] end + function shuffle_and_repeat_dataset(input_dataset_, buffer_size_, seed_, seed2_, count_; name=nothing, output_types=nothing, output_shapes=nothing) + if tf.eager_mode + shuffle_and_repeat_dataset_eager(input_dataset_, buffer_size_, seed_, seed2_, count_; name=name, output_types=output_types, output_shapes=output_shapes) + else + shuffle_and_repeat_dataset_graph(input_dataset_, buffer_size_, seed_, seed2_, count_; name=name, output_types=output_types, output_shapes=output_shapes) + end + end end @@ -8333,7 +9803,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function requantization_range_per_channel(input_, input_min_, input_max_; name=nothing, clip_value_max=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function requantization_range_per_channel_graph(input_, input_min_, input_max_; name=nothing, clip_value_max=nothing) local desc tf.with_op_name(name, "RequantizationRangePerChannel") do desc = tf.NodeDescription("RequantizationRangePerChannel") @@ -8355,7 +9825,7 @@ begin end out end - function requantization_range_per_channel(input_::tf.TensorHandle, input_min_::tf.TensorHandle, input_max_::tf.TensorHandle; name=nothing, clip_value_max=nothing) + function requantization_range_per_channel_eager(input_, input_min_, input_max_; name=nothing, clip_value_max=nothing) desc = tf.EagerOp("RequantizationRangePerChannel") tf.add_input(desc, input_) tf.add_input(desc, input_min_) @@ -8366,6 +9836,13 @@ begin desc["T"] = tf.data_type(input_) tf.execute(desc) end + function requantization_range_per_channel(input_, input_min_, input_max_; name=nothing, clip_value_max=nothing) + if tf.eager_mode + requantization_range_per_channel_eager(input_, input_min_, input_max_; name=name, clip_value_max=clip_value_max) + else + requantization_range_per_channel_graph(input_, input_min_, input_max_; name=name, clip_value_max=clip_value_max) + end + end end @@ -8375,7 +9852,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function experimental_unbatch_dataset(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function experimental_unbatch_dataset_graph(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "ExperimentalUnbatchDataset") do desc = tf.NodeDescription("ExperimentalUnbatchDataset") @@ -8390,7 +9867,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function experimental_unbatch_dataset(input_dataset_::tf.TensorHandle; name=nothing, output_types=nothing, output_shapes=nothing) + function experimental_unbatch_dataset_eager(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("ExperimentalUnbatchDataset") tf.add_input(desc, input_dataset_) if output_types !== nothing @@ -8401,16 +9878,23 @@ begin end (tf.execute(desc))[1] end + function experimental_unbatch_dataset(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) + if tf.eager_mode + experimental_unbatch_dataset_eager(input_dataset_; name=name, output_types=output_types, output_shapes=output_shapes) + else + experimental_unbatch_dataset_graph(input_dataset_; name=name, output_types=output_types, output_shapes=output_shapes) + end + end end """ - avg_pool3d_grad(orig_input_shape, grad; data_format=NDHWC) + avg_pool3d_grad(orig_input_shape, grad; data_format=) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function avg_pool3d_grad(orig_input_shape_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function avg_pool3d_grad_graph(orig_input_shape_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) local desc tf.with_op_name(name, "AvgPool3DGrad") do desc = tf.NodeDescription("AvgPool3DGrad") @@ -8434,7 +9918,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function avg_pool3d_grad(orig_input_shape_::tf.TensorHandle, grad_::tf.TensorHandle; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + function avg_pool3d_grad_eager(orig_input_shape_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) desc = tf.EagerOp("AvgPool3DGrad") tf.add_input(desc, orig_input_shape_) tf.add_input(desc, grad_) @@ -8453,6 +9937,13 @@ begin desc["T"] = tf.data_type(grad_) (tf.execute(desc))[1] end + function avg_pool3d_grad(orig_input_shape_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + if tf.eager_mode + avg_pool3d_grad_eager(orig_input_shape_, grad_; name=name, ksize=ksize, strides=strides, padding=padding, data_format=data_format) + else + avg_pool3d_grad_graph(orig_input_shape_, grad_; name=name, ksize=ksize, strides=strides, padding=padding, data_format=data_format) + end + end end @@ -8462,7 +9953,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function placeholder_with_default(input_; name=nothing, dtype=nothing, shape=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function placeholder_with_default_graph(input_; name=nothing, dtype=nothing, shape=nothing) local desc tf.with_op_name(name, "PlaceholderWithDefault") do desc = tf.NodeDescription("PlaceholderWithDefault") @@ -8478,7 +9969,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function placeholder_with_default(input_::tf.TensorHandle; name=nothing, dtype=nothing, shape=nothing) + function placeholder_with_default_eager(input_; name=nothing, dtype=nothing, shape=nothing) desc = tf.EagerOp("PlaceholderWithDefault") tf.add_input(desc, input_) if dtype !== nothing @@ -8490,6 +9981,13 @@ begin desc["dtype"] = tf.data_type(input_) (tf.execute(desc))[1] end + function placeholder_with_default(input_; name=nothing, dtype=nothing, shape=nothing) + if tf.eager_mode + placeholder_with_default_eager(input_; name=name, dtype=dtype, shape=shape) + else + placeholder_with_default_graph(input_; name=name, dtype=dtype, shape=shape) + end + end end @@ -8499,7 +9997,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function initialize_table_v2(table_handle_, keys_, values_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function initialize_table_v2_graph(table_handle_, keys_, values_; name=nothing) local desc tf.with_op_name(name, "InitializeTableV2") do desc = tf.NodeDescription("InitializeTableV2") @@ -8514,7 +10012,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function initialize_table_v2(table_handle_::tf.TensorHandle, keys_::tf.TensorHandle, values_::tf.TensorHandle; name=nothing) + function initialize_table_v2_eager(table_handle_, keys_, values_; name=nothing) desc = tf.EagerOp("InitializeTableV2") tf.add_input(desc, table_handle_) tf.add_input(desc, keys_) @@ -8523,6 +10021,13 @@ begin desc["Tval"] = tf.data_type(values_) (tf.execute(desc))[1] end + function initialize_table_v2(table_handle_, keys_, values_; name=nothing) + if tf.eager_mode + initialize_table_v2_eager(table_handle_, keys_, values_; name=name) + else + initialize_table_v2_graph(table_handle_, keys_, values_; name=name) + end + end end @@ -8532,7 +10037,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function set_size(set_indices_, set_values_, set_shape_; name=nothing, validate_indices=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function set_size_graph(set_indices_, set_values_, set_shape_; name=nothing, validate_indices=nothing) local desc tf.with_op_name(name, "SetSize") do desc = tf.NodeDescription("SetSize") @@ -8549,7 +10054,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function set_size(set_indices_::tf.TensorHandle, set_values_::tf.TensorHandle, set_shape_::tf.TensorHandle; name=nothing, validate_indices=nothing) + function set_size_eager(set_indices_, set_values_, set_shape_; name=nothing, validate_indices=nothing) desc = tf.EagerOp("SetSize") tf.add_input(desc, set_indices_) tf.add_input(desc, set_values_) @@ -8560,6 +10065,13 @@ begin desc["T"] = tf.data_type(set_values_) (tf.execute(desc))[1] end + function set_size(set_indices_, set_values_, set_shape_; name=nothing, validate_indices=nothing) + if tf.eager_mode + set_size_eager(set_indices_, set_values_, set_shape_; name=name, validate_indices=validate_indices) + else + set_size_graph(set_indices_, set_values_, set_shape_; name=name, validate_indices=validate_indices) + end + end end @@ -8569,7 +10081,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function assert(condition_, data_; name=nothing, T=nothing, summarize=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function assert_graph(condition_, data_; name=nothing, T=nothing, summarize=nothing) local desc tf.with_op_name(name, "Assert") do desc = tf.NodeDescription("Assert") @@ -8586,7 +10098,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function assert(condition_::tf.TensorHandle, data_::tf.TensorHandle; name=nothing, T=nothing, summarize=nothing) + function assert_eager(condition_, data_; name=nothing, T=nothing, summarize=nothing) desc = tf.EagerOp("Assert") tf.add_input(desc, condition_) tf.add_input(desc, data_) @@ -8598,6 +10110,13 @@ begin end (tf.execute(desc))[1] end + function assert(condition_, data_; name=nothing, T=nothing, summarize=nothing) + if tf.eager_mode + assert_eager(condition_, data_; name=name, T=T, summarize=summarize) + else + assert_graph(condition_, data_; name=name, T=T, summarize=summarize) + end + end end @@ -8607,7 +10126,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function non_max_suppression_v4(boxes_, scores_, max_output_size_, iou_threshold_, score_threshold_; name=nothing, pad_to_max_output_size=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function non_max_suppression_v4_graph(boxes_, scores_, max_output_size_, iou_threshold_, score_threshold_; name=nothing, pad_to_max_output_size=nothing) local desc tf.with_op_name(name, "NonMaxSuppressionV4") do desc = tf.NodeDescription("NonMaxSuppressionV4") @@ -8633,7 +10152,7 @@ begin end out end - function non_max_suppression_v4(boxes_::tf.TensorHandle, scores_::tf.TensorHandle, max_output_size_::tf.TensorHandle, iou_threshold_::tf.TensorHandle, score_threshold_::tf.TensorHandle; name=nothing, pad_to_max_output_size=nothing) + function non_max_suppression_v4_eager(boxes_, scores_, max_output_size_, iou_threshold_, score_threshold_; name=nothing, pad_to_max_output_size=nothing) desc = tf.EagerOp("NonMaxSuppressionV4") tf.add_input(desc, boxes_) tf.add_input(desc, scores_) @@ -8647,6 +10166,13 @@ begin desc["T"] = tf.data_type(scores_) tf.execute(desc) end + function non_max_suppression_v4(boxes_, scores_, max_output_size_, iou_threshold_, score_threshold_; name=nothing, pad_to_max_output_size=nothing) + if tf.eager_mode + non_max_suppression_v4_eager(boxes_, scores_, max_output_size_, iou_threshold_, score_threshold_; name=name, pad_to_max_output_size=pad_to_max_output_size) + else + non_max_suppression_v4_graph(boxes_, scores_, max_output_size_, iou_threshold_, score_threshold_; name=name, pad_to_max_output_size=pad_to_max_output_size) + end + end end @@ -8656,7 +10182,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function sample_distorted_bounding_box_v2(image_size_, bounding_boxes_, min_object_covered_; name=nothing, seed=nothing, seed2=nothing, aspect_ratio_range=nothing, area_range=nothing, max_attempts=nothing, use_image_if_no_bounding_boxes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function sample_distorted_bounding_box_v2_graph(image_size_, bounding_boxes_, min_object_covered_; name=nothing, seed=nothing, seed2=nothing, aspect_ratio_range=nothing, area_range=nothing, max_attempts=nothing, use_image_if_no_bounding_boxes=nothing) local desc tf.with_op_name(name, "SampleDistortedBoundingBoxV2") do desc = tf.NodeDescription("SampleDistortedBoundingBoxV2") @@ -8693,7 +10219,7 @@ begin end out end - function sample_distorted_bounding_box_v2(image_size_::tf.TensorHandle, bounding_boxes_::tf.TensorHandle, min_object_covered_::tf.TensorHandle; name=nothing, seed=nothing, seed2=nothing, aspect_ratio_range=nothing, area_range=nothing, max_attempts=nothing, use_image_if_no_bounding_boxes=nothing) + function sample_distorted_bounding_box_v2_eager(image_size_, bounding_boxes_, min_object_covered_; name=nothing, seed=nothing, seed2=nothing, aspect_ratio_range=nothing, area_range=nothing, max_attempts=nothing, use_image_if_no_bounding_boxes=nothing) desc = tf.EagerOp("SampleDistortedBoundingBoxV2") tf.add_input(desc, image_size_) tf.add_input(desc, bounding_boxes_) @@ -8719,16 +10245,23 @@ begin desc["T"] = tf.data_type(image_size_) tf.execute(desc) end + function sample_distorted_bounding_box_v2(image_size_, bounding_boxes_, min_object_covered_; name=nothing, seed=nothing, seed2=nothing, aspect_ratio_range=nothing, area_range=nothing, max_attempts=nothing, use_image_if_no_bounding_boxes=nothing) + if tf.eager_mode + sample_distorted_bounding_box_v2_eager(image_size_, bounding_boxes_, min_object_covered_; name=name, seed=seed, seed2=seed2, aspect_ratio_range=aspect_ratio_range, area_range=area_range, max_attempts=max_attempts, use_image_if_no_bounding_boxes=use_image_if_no_bounding_boxes) + else + sample_distorted_bounding_box_v2_graph(image_size_, bounding_boxes_, min_object_covered_; name=name, seed=seed, seed2=seed2, aspect_ratio_range=aspect_ratio_range, area_range=area_range, max_attempts=max_attempts, use_image_if_no_bounding_boxes=use_image_if_no_bounding_boxes) + end + end end """ - initialize_table_from_text_file(table_handle, filename; vocab_size=-1, delimiter= ) + initialize_table_from_text_file(table_handle, filename; vocab_size=-1, delimiter=) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function initialize_table_from_text_file(table_handle_, filename_; name=nothing, key_index=nothing, value_index=nothing, vocab_size=nothing, delimiter=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function initialize_table_from_text_file_graph(table_handle_, filename_; name=nothing, key_index=nothing, value_index=nothing, vocab_size=nothing, delimiter=nothing) local desc tf.with_op_name(name, "InitializeTableFromTextFile") do desc = tf.NodeDescription("InitializeTableFromTextFile") @@ -8751,7 +10284,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function initialize_table_from_text_file(table_handle_::tf.TensorHandle, filename_::tf.TensorHandle; name=nothing, key_index=nothing, value_index=nothing, vocab_size=nothing, delimiter=nothing) + function initialize_table_from_text_file_eager(table_handle_, filename_; name=nothing, key_index=nothing, value_index=nothing, vocab_size=nothing, delimiter=nothing) desc = tf.EagerOp("InitializeTableFromTextFile") tf.add_input(desc, table_handle_) tf.add_input(desc, filename_) @@ -8769,6 +10302,13 @@ begin end (tf.execute(desc))[1] end + function initialize_table_from_text_file(table_handle_, filename_; name=nothing, key_index=nothing, value_index=nothing, vocab_size=nothing, delimiter=nothing) + if tf.eager_mode + initialize_table_from_text_file_eager(table_handle_, filename_; name=name, key_index=key_index, value_index=value_index, vocab_size=vocab_size, delimiter=delimiter) + else + initialize_table_from_text_file_graph(table_handle_, filename_; name=name, key_index=key_index, value_index=value_index, vocab_size=vocab_size, delimiter=delimiter) + end + end end @@ -8778,7 +10318,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function lookup_table_size(table_handle_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function lookup_table_size_graph(table_handle_; name=nothing) local desc tf.with_op_name(name, "LookupTableSize") do desc = tf.NodeDescription("LookupTableSize") @@ -8787,11 +10327,18 @@ begin end tf.Tensor(tf.Operation(desc)) end - function lookup_table_size(table_handle_::tf.TensorHandle; name=nothing) + function lookup_table_size_eager(table_handle_; name=nothing) desc = tf.EagerOp("LookupTableSize") tf.add_input(desc, table_handle_) (tf.execute(desc))[1] end + function lookup_table_size(table_handle_; name=nothing) + if tf.eager_mode + lookup_table_size_eager(table_handle_; name=name) + else + lookup_table_size_graph(table_handle_; name=name) + end + end end @@ -8801,7 +10348,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function sparse_apply_adagrad_da(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, indices_, lr_, l1_, l2_, global_step_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function sparse_apply_adagrad_da_graph(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, indices_, lr_, l1_, l2_, global_step_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "SparseApplyAdagradDA") do desc = tf.NodeDescription("SparseApplyAdagradDA") @@ -8832,7 +10379,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function sparse_apply_adagrad_da(var_::tf.TensorHandle, gradient_accumulator_::tf.TensorHandle, gradient_squared_accumulator_::tf.TensorHandle, grad_::tf.TensorHandle, indices_::tf.TensorHandle, lr_::tf.TensorHandle, l1_::tf.TensorHandle, l2_::tf.TensorHandle, global_step_::tf.TensorHandle; name=nothing, use_locking=nothing) + function sparse_apply_adagrad_da_eager(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, indices_, lr_, l1_, l2_, global_step_; name=nothing, use_locking=nothing) desc = tf.EagerOp("SparseApplyAdagradDA") tf.add_input(desc, var_) tf.add_input(desc, gradient_accumulator_) @@ -8856,6 +10403,13 @@ begin desc["T"] = tf.data_type(l2_) (tf.execute(desc))[1] end + function sparse_apply_adagrad_da(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, indices_, lr_, l1_, l2_, global_step_; name=nothing, use_locking=nothing) + if tf.eager_mode + sparse_apply_adagrad_da_eager(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, indices_, lr_, l1_, l2_, global_step_; name=name, use_locking=use_locking) + else + sparse_apply_adagrad_da_graph(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, indices_, lr_, l1_, l2_, global_step_; name=name, use_locking=use_locking) + end + end end @@ -8865,7 +10419,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function broadcast_gradient_args(s0_, s1_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function broadcast_gradient_args_graph(s0_, s1_; name=nothing) local desc tf.with_op_name(name, "BroadcastGradientArgs") do desc = tf.NodeDescription("BroadcastGradientArgs") @@ -8882,7 +10436,7 @@ begin end out end - function broadcast_gradient_args(s0_::tf.TensorHandle, s1_::tf.TensorHandle; name=nothing) + function broadcast_gradient_args_eager(s0_, s1_; name=nothing) desc = tf.EagerOp("BroadcastGradientArgs") tf.add_input(desc, s0_) tf.add_input(desc, s1_) @@ -8890,6 +10444,13 @@ begin desc["T"] = tf.data_type(s1_) tf.execute(desc) end + function broadcast_gradient_args(s0_, s1_; name=nothing) + if tf.eager_mode + broadcast_gradient_args_eager(s0_, s1_; name=name) + else + broadcast_gradient_args_graph(s0_, s1_; name=name) + end + end end @@ -8899,7 +10460,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function summary_writer(; name=nothing, shared_name=nothing, container=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function summary_writer_graph(; name=nothing, shared_name=nothing, container=nothing) local desc tf.with_op_name(name, "SummaryWriter") do desc = tf.NodeDescription("SummaryWriter") @@ -8912,7 +10473,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function summary_writer(; name=nothing, shared_name=nothing, container=nothing) + function summary_writer_eager(; name=nothing, shared_name=nothing, container=nothing) desc = tf.EagerOp("SummaryWriter") if shared_name !== nothing desc["shared_name"] = Base.String(shared_name) @@ -8922,6 +10483,13 @@ begin end (tf.execute(desc))[1] end + function summary_writer(; name=nothing, shared_name=nothing, container=nothing) + if tf.eager_mode + summary_writer_eager(; name=name, shared_name=shared_name, container=container) + else + summary_writer_graph(; name=name, shared_name=shared_name, container=container) + end + end end @@ -8931,7 +10499,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function recv_tpu_embedding_activations(; name=nothing, num_outputs=nothing, config=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function recv_tpu_embedding_activations_graph(; name=nothing, num_outputs=nothing, config=nothing) local desc tf.with_op_name(name, "RecvTPUEmbeddingActivations") do desc = tf.NodeDescription("RecvTPUEmbeddingActivations") @@ -8949,7 +10517,7 @@ begin end out end - function recv_tpu_embedding_activations(; name=nothing, num_outputs=nothing, config=nothing) + function recv_tpu_embedding_activations_eager(; name=nothing, num_outputs=nothing, config=nothing) desc = tf.EagerOp("RecvTPUEmbeddingActivations") if num_outputs !== nothing desc["num_outputs"] = Base.Int(num_outputs) @@ -8959,6 +10527,13 @@ begin end tf.execute(desc) end + function recv_tpu_embedding_activations(; name=nothing, num_outputs=nothing, config=nothing) + if tf.eager_mode + recv_tpu_embedding_activations_eager(; name=name, num_outputs=num_outputs, config=config) + else + recv_tpu_embedding_activations_graph(; name=name, num_outputs=num_outputs, config=config) + end + end end @@ -8968,7 +10543,7 @@ end output = input; While (Cond(output)) { output = Body(output) } """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function _while(input_; name=nothing, T=nothing, cond=nothing, body=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function _while_graph(input_; name=nothing, T=nothing, cond=nothing, body=nothing) local desc tf.with_op_name(name, "_While") do desc = tf.NodeDescription("_While") @@ -8986,7 +10561,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function _while(input_::tf.TensorHandle; name=nothing, T=nothing, cond=nothing, body=nothing) + function _while_eager(input_; name=nothing, T=nothing, cond=nothing, body=nothing) desc = tf.EagerOp("_While") tf.add_input(desc, input_) if T !== nothing @@ -9000,6 +10575,13 @@ begin end (tf.execute(desc))[1] end + function _while(input_; name=nothing, T=nothing, cond=nothing, body=nothing) + if tf.eager_mode + _while_eager(input_; name=name, T=T, cond=cond, body=body) + else + _while_graph(input_; name=name, T=T, cond=cond, body=body) + end + end end @@ -9009,7 +10591,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function initialize_table(table_handle_, keys_, values_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function initialize_table_graph(table_handle_, keys_, values_; name=nothing) local desc tf.with_op_name(name, "InitializeTable") do desc = tf.NodeDescription("InitializeTable") @@ -9024,7 +10606,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function initialize_table(table_handle_::tf.TensorHandle, keys_::tf.TensorHandle, values_::tf.TensorHandle; name=nothing) + function initialize_table_eager(table_handle_, keys_, values_; name=nothing) desc = tf.EagerOp("InitializeTable") tf.add_input(desc, table_handle_) tf.add_input(desc, keys_) @@ -9033,6 +10615,13 @@ begin desc["Tval"] = tf.data_type(values_) (tf.execute(desc))[1] end + function initialize_table(table_handle_, keys_, values_; name=nothing) + if tf.eager_mode + initialize_table_eager(table_handle_, keys_, values_; name=name) + else + initialize_table_graph(table_handle_, keys_, values_; name=name) + end + end end @@ -9042,7 +10631,7 @@ end Debug Numeric Summary Op. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function debug_numeric_summary(input_; name=nothing, device_name=nothing, tensor_name=nothing, debug_urls=nothing, lower_bound=nothing, upper_bound=nothing, mute_if_healthy=nothing, gated_grpc=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function debug_numeric_summary_graph(input_; name=nothing, device_name=nothing, tensor_name=nothing, debug_urls=nothing, lower_bound=nothing, upper_bound=nothing, mute_if_healthy=nothing, gated_grpc=nothing) local desc tf.with_op_name(name, "DebugNumericSummary") do desc = tf.NodeDescription("DebugNumericSummary") @@ -9073,7 +10662,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function debug_numeric_summary(input_::tf.TensorHandle; name=nothing, device_name=nothing, tensor_name=nothing, debug_urls=nothing, lower_bound=nothing, upper_bound=nothing, mute_if_healthy=nothing, gated_grpc=nothing) + function debug_numeric_summary_eager(input_; name=nothing, device_name=nothing, tensor_name=nothing, debug_urls=nothing, lower_bound=nothing, upper_bound=nothing, mute_if_healthy=nothing, gated_grpc=nothing) desc = tf.EagerOp("DebugNumericSummary") tf.add_input(desc, input_) if device_name !== nothing @@ -9100,6 +10689,13 @@ begin desc["T"] = tf.data_type(input_) (tf.execute(desc))[1] end + function debug_numeric_summary(input_; name=nothing, device_name=nothing, tensor_name=nothing, debug_urls=nothing, lower_bound=nothing, upper_bound=nothing, mute_if_healthy=nothing, gated_grpc=nothing) + if tf.eager_mode + debug_numeric_summary_eager(input_; name=name, device_name=device_name, tensor_name=tensor_name, debug_urls=debug_urls, lower_bound=lower_bound, upper_bound=upper_bound, mute_if_healthy=mute_if_healthy, gated_grpc=gated_grpc) + else + debug_numeric_summary_graph(input_; name=name, device_name=device_name, tensor_name=tensor_name, debug_urls=debug_urls, lower_bound=lower_bound, upper_bound=upper_bound, mute_if_healthy=mute_if_healthy, gated_grpc=gated_grpc) + end + end end @@ -9109,7 +10705,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function retrieve_tpu_embedding_adagrad_parameters_grad_accum_debug(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function retrieve_tpu_embedding_adagrad_parameters_grad_accum_debug_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "RetrieveTPUEmbeddingAdagradParametersGradAccumDebug") do desc = tf.NodeDescription("RetrieveTPUEmbeddingAdagradParametersGradAccumDebug") @@ -9133,7 +10729,7 @@ begin end out end - function retrieve_tpu_embedding_adagrad_parameters_grad_accum_debug(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + function retrieve_tpu_embedding_adagrad_parameters_grad_accum_debug_eager(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) desc = tf.EagerOp("RetrieveTPUEmbeddingAdagradParametersGradAccumDebug") if table_id !== nothing desc["table_id"] = Base.Int(table_id) @@ -9149,6 +10745,13 @@ begin end tf.execute(desc) end + function retrieve_tpu_embedding_adagrad_parameters_grad_accum_debug(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + if tf.eager_mode + retrieve_tpu_embedding_adagrad_parameters_grad_accum_debug_eager(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + else + retrieve_tpu_embedding_adagrad_parameters_grad_accum_debug_graph(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + end + end end @@ -9158,7 +10761,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tanh(x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tanh_graph(x_; name=nothing) local desc tf.with_op_name(name, "Tanh") do desc = tf.NodeDescription("Tanh") @@ -9168,12 +10771,19 @@ begin end tf.Tensor(tf.Operation(desc)) end - function tanh(x_::tf.TensorHandle; name=nothing) + function tanh_eager(x_; name=nothing) desc = tf.EagerOp("Tanh") tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) (tf.execute(desc))[1] end + function tanh(x_; name=nothing) + if tf.eager_mode + tanh_eager(x_; name=name) + else + tanh_graph(x_; name=name) + end + end end @@ -9183,7 +10793,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function symbolic_gradient(input_; name=nothing, Tin=nothing, Tout=nothing, f=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function symbolic_gradient_graph(input_; name=nothing, Tin=nothing, Tout=nothing, f=nothing) local desc tf.with_op_name(name, "SymbolicGradient") do desc = tf.NodeDescription("SymbolicGradient") @@ -9201,7 +10811,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function symbolic_gradient(input_::tf.TensorHandle; name=nothing, Tin=nothing, Tout=nothing, f=nothing) + function symbolic_gradient_eager(input_; name=nothing, Tin=nothing, Tout=nothing, f=nothing) desc = tf.EagerOp("SymbolicGradient") tf.add_input(desc, input_) if Tin !== nothing @@ -9215,6 +10825,13 @@ begin end (tf.execute(desc))[1] end + function symbolic_gradient(input_; name=nothing, Tin=nothing, Tout=nothing, f=nothing) + if tf.eager_mode + symbolic_gradient_eager(input_; name=name, Tin=Tin, Tout=Tout, f=f) + else + symbolic_gradient_graph(input_; name=name, Tin=Tin, Tout=Tout, f=f) + end + end end @@ -9224,7 +10841,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function boosted_trees_update_ensemble(tree_ensemble_handle_, feature_ids_, node_ids_, gains_, thresholds_, left_node_contribs_, right_node_contribs_, max_depth_, learning_rate_; name=nothing, pruning_mode=nothing, num_features=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function boosted_trees_update_ensemble_graph(tree_ensemble_handle_, feature_ids_, node_ids_, gains_, thresholds_, left_node_contribs_, right_node_contribs_, max_depth_, learning_rate_; name=nothing, pruning_mode=nothing, num_features=nothing) local desc tf.with_op_name(name, "BoostedTreesUpdateEnsemble") do desc = tf.NodeDescription("BoostedTreesUpdateEnsemble") @@ -9255,7 +10872,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function boosted_trees_update_ensemble(tree_ensemble_handle_::tf.TensorHandle, feature_ids_::tf.TensorHandle, node_ids_::tf.TensorHandle, gains_::tf.TensorHandle, thresholds_::tf.TensorHandle, left_node_contribs_::tf.TensorHandle, right_node_contribs_::tf.TensorHandle, max_depth_::tf.TensorHandle, learning_rate_::tf.TensorHandle; name=nothing, pruning_mode=nothing, num_features=nothing) + function boosted_trees_update_ensemble_eager(tree_ensemble_handle_, feature_ids_, node_ids_, gains_, thresholds_, left_node_contribs_, right_node_contribs_, max_depth_, learning_rate_; name=nothing, pruning_mode=nothing, num_features=nothing) desc = tf.EagerOp("BoostedTreesUpdateEnsemble") tf.add_input(desc, tree_ensemble_handle_) tf.add_input(desc, feature_ids_) @@ -9274,6 +10891,13 @@ begin end (tf.execute(desc))[1] end + function boosted_trees_update_ensemble(tree_ensemble_handle_, feature_ids_, node_ids_, gains_, thresholds_, left_node_contribs_, right_node_contribs_, max_depth_, learning_rate_; name=nothing, pruning_mode=nothing, num_features=nothing) + if tf.eager_mode + boosted_trees_update_ensemble_eager(tree_ensemble_handle_, feature_ids_, node_ids_, gains_, thresholds_, left_node_contribs_, right_node_contribs_, max_depth_, learning_rate_; name=name, pruning_mode=pruning_mode, num_features=num_features) + else + boosted_trees_update_ensemble_graph(tree_ensemble_handle_, feature_ids_, node_ids_, gains_, thresholds_, left_node_contribs_, right_node_contribs_, max_depth_, learning_rate_; name=name, pruning_mode=pruning_mode, num_features=num_features) + end + end end @@ -9283,7 +10907,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function apply_momentum(var_, accum_, lr_, grad_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function apply_momentum_graph(var_, accum_, lr_, grad_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) local desc tf.with_op_name(name, "ApplyMomentum") do desc = tf.NodeDescription("ApplyMomentum") @@ -9307,7 +10931,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function apply_momentum(var_::tf.TensorHandle, accum_::tf.TensorHandle, lr_::tf.TensorHandle, grad_::tf.TensorHandle, momentum_::tf.TensorHandle; name=nothing, use_locking=nothing, use_nesterov=nothing) + function apply_momentum_eager(var_, accum_, lr_, grad_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) desc = tf.EagerOp("ApplyMomentum") tf.add_input(desc, var_) tf.add_input(desc, accum_) @@ -9327,6 +10951,13 @@ begin desc["T"] = tf.data_type(momentum_) (tf.execute(desc))[1] end + function apply_momentum(var_, accum_, lr_, grad_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) + if tf.eager_mode + apply_momentum_eager(var_, accum_, lr_, grad_, momentum_; name=name, use_locking=use_locking, use_nesterov=use_nesterov) + else + apply_momentum_graph(var_, accum_, lr_, grad_, momentum_; name=name, use_locking=use_locking, use_nesterov=use_nesterov) + end + end end @@ -9336,7 +10967,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function reader_read(reader_handle_, queue_handle_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function reader_read_graph(reader_handle_, queue_handle_; name=nothing) local desc tf.with_op_name(name, "ReaderRead") do desc = tf.NodeDescription("ReaderRead") @@ -9352,12 +10983,19 @@ begin end out end - function reader_read(reader_handle_::tf.TensorHandle, queue_handle_::tf.TensorHandle; name=nothing) + function reader_read_eager(reader_handle_, queue_handle_; name=nothing) desc = tf.EagerOp("ReaderRead") tf.add_input(desc, reader_handle_) tf.add_input(desc, queue_handle_) tf.execute(desc) end + function reader_read(reader_handle_, queue_handle_; name=nothing) + if tf.eager_mode + reader_read_eager(reader_handle_, queue_handle_; name=name) + else + reader_read_graph(reader_handle_, queue_handle_; name=name) + end + end end @@ -9367,7 +11005,7 @@ end An op that blocks execution until a distributed TPU system has """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function _wait_for_distributed_tpu(inputs_; name=nothing, startup_timeout_sec=nothing, N=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function _wait_for_distributed_tpu_graph(inputs_; name=nothing, startup_timeout_sec=nothing, N=nothing) local desc tf.with_op_name(name, "_WaitForDistributedTPU") do desc = tf.NodeDescription("_WaitForDistributedTPU") @@ -9382,7 +11020,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function _wait_for_distributed_tpu(inputs_::tf.TensorHandle; name=nothing, startup_timeout_sec=nothing, N=nothing) + function _wait_for_distributed_tpu_eager(inputs_; name=nothing, startup_timeout_sec=nothing, N=nothing) desc = tf.EagerOp("_WaitForDistributedTPU") tf.add_input(desc, inputs_) if startup_timeout_sec !== nothing @@ -9393,6 +11031,13 @@ begin end (tf.execute(desc))[1] end + function _wait_for_distributed_tpu(inputs_; name=nothing, startup_timeout_sec=nothing, N=nothing) + if tf.eager_mode + _wait_for_distributed_tpu_eager(inputs_; name=name, startup_timeout_sec=startup_timeout_sec, N=N) + else + _wait_for_distributed_tpu_graph(inputs_; name=name, startup_timeout_sec=startup_timeout_sec, N=N) + end + end end @@ -9402,7 +11047,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function mutex_lock(mutex_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function mutex_lock_graph(mutex_; name=nothing) local desc tf.with_op_name(name, "MutexLock") do desc = tf.NodeDescription("MutexLock") @@ -9411,11 +11056,18 @@ begin end tf.Tensor(tf.Operation(desc)) end - function mutex_lock(mutex_::tf.TensorHandle; name=nothing) + function mutex_lock_eager(mutex_; name=nothing) desc = tf.EagerOp("MutexLock") tf.add_input(desc, mutex_) (tf.execute(desc))[1] end + function mutex_lock(mutex_; name=nothing) + if tf.eager_mode + mutex_lock_eager(mutex_; name=name) + else + mutex_lock_graph(mutex_; name=name) + end + end end @@ -9425,7 +11077,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function accumulator_set_global_step(handle_, new_global_step_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function accumulator_set_global_step_graph(handle_, new_global_step_; name=nothing) local desc tf.with_op_name(name, "AccumulatorSetGlobalStep") do desc = tf.NodeDescription("AccumulatorSetGlobalStep") @@ -9436,12 +11088,19 @@ begin end tf.Tensor(tf.Operation(desc)) end - function accumulator_set_global_step(handle_::tf.TensorHandle, new_global_step_::tf.TensorHandle; name=nothing) + function accumulator_set_global_step_eager(handle_, new_global_step_; name=nothing) desc = tf.EagerOp("AccumulatorSetGlobalStep") tf.add_input(desc, handle_) tf.add_input(desc, new_global_step_) (tf.execute(desc))[1] end + function accumulator_set_global_step(handle_, new_global_step_; name=nothing) + if tf.eager_mode + accumulator_set_global_step_eager(handle_, new_global_step_; name=name) + else + accumulator_set_global_step_graph(handle_, new_global_step_; name=name) + end + end end @@ -9451,7 +11110,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function quantized_add(x_, y_, min_x_, max_x_, min_y_, max_y_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function quantized_add_graph(x_, y_, min_x_, max_x_, min_y_, max_y_; name=nothing) local desc tf.with_op_name(name, "QuantizedAdd") do desc = tf.NodeDescription("QuantizedAdd") @@ -9477,7 +11136,7 @@ begin end out end - function quantized_add(x_::tf.TensorHandle, y_::tf.TensorHandle, min_x_::tf.TensorHandle, max_x_::tf.TensorHandle, min_y_::tf.TensorHandle, max_y_::tf.TensorHandle; name=nothing) + function quantized_add_eager(x_, y_, min_x_, max_x_, min_y_, max_y_; name=nothing) desc = tf.EagerOp("QuantizedAdd") tf.add_input(desc, x_) tf.add_input(desc, y_) @@ -9489,6 +11148,13 @@ begin desc["T2"] = tf.data_type(y_) tf.execute(desc) end + function quantized_add(x_, y_, min_x_, max_x_, min_y_, max_y_; name=nothing) + if tf.eager_mode + quantized_add_eager(x_, y_, min_x_, max_x_, min_y_, max_y_; name=name) + else + quantized_add_graph(x_, y_, min_x_, max_x_, min_y_, max_y_; name=name) + end + end end @@ -9498,7 +11164,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function squeeze(input_; name=nothing, squeeze_dims=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function squeeze_graph(input_; name=nothing, squeeze_dims=nothing) local desc tf.with_op_name(name, "Squeeze") do desc = tf.NodeDescription("Squeeze") @@ -9511,7 +11177,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function squeeze(input_::tf.TensorHandle; name=nothing, squeeze_dims=nothing) + function squeeze_eager(input_; name=nothing, squeeze_dims=nothing) desc = tf.EagerOp("Squeeze") tf.add_input(desc, input_) if squeeze_dims !== nothing @@ -9520,6 +11186,13 @@ begin desc["T"] = tf.data_type(input_) (tf.execute(desc))[1] end + function squeeze(input_; name=nothing, squeeze_dims=nothing) + if tf.eager_mode + squeeze_eager(input_; name=name, squeeze_dims=squeeze_dims) + else + squeeze_graph(input_; name=name, squeeze_dims=squeeze_dims) + end + end end @@ -9529,7 +11202,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function experimental_matching_files_dataset(patterns_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function experimental_matching_files_dataset_graph(patterns_; name=nothing) local desc tf.with_op_name(name, "ExperimentalMatchingFilesDataset") do desc = tf.NodeDescription("ExperimentalMatchingFilesDataset") @@ -9538,11 +11211,18 @@ begin end tf.Tensor(tf.Operation(desc)) end - function experimental_matching_files_dataset(patterns_::tf.TensorHandle; name=nothing) + function experimental_matching_files_dataset_eager(patterns_; name=nothing) desc = tf.EagerOp("ExperimentalMatchingFilesDataset") tf.add_input(desc, patterns_) (tf.execute(desc))[1] end + function experimental_matching_files_dataset(patterns_; name=nothing) + if tf.eager_mode + experimental_matching_files_dataset_eager(patterns_; name=name) + else + experimental_matching_files_dataset_graph(patterns_; name=name) + end + end end @@ -9552,7 +11232,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function experimental_dataset_to_tf_record(input_dataset_, filename_, compression_type_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function experimental_dataset_to_tf_record_graph(input_dataset_, filename_, compression_type_; name=nothing) local desc tf.with_op_name(name, "ExperimentalDatasetToTFRecord") do desc = tf.NodeDescription("ExperimentalDatasetToTFRecord") @@ -9565,13 +11245,20 @@ begin end tf.Tensor(tf.Operation(desc)) end - function experimental_dataset_to_tf_record(input_dataset_::tf.TensorHandle, filename_::tf.TensorHandle, compression_type_::tf.TensorHandle; name=nothing) + function experimental_dataset_to_tf_record_eager(input_dataset_, filename_, compression_type_; name=nothing) desc = tf.EagerOp("ExperimentalDatasetToTFRecord") tf.add_input(desc, input_dataset_) tf.add_input(desc, filename_) tf.add_input(desc, compression_type_) (tf.execute(desc))[1] end + function experimental_dataset_to_tf_record(input_dataset_, filename_, compression_type_; name=nothing) + if tf.eager_mode + experimental_dataset_to_tf_record_eager(input_dataset_, filename_, compression_type_; name=name) + else + experimental_dataset_to_tf_record_graph(input_dataset_, filename_, compression_type_; name=name) + end + end end @@ -9581,7 +11268,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function load_tpu_embedding_stochastic_gradient_descent_parameters(parameters_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function load_tpu_embedding_stochastic_gradient_descent_parameters_graph(parameters_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "LoadTPUEmbeddingStochasticGradientDescentParameters") do desc = tf.NodeDescription("LoadTPUEmbeddingStochasticGradientDescentParameters") @@ -9602,7 +11289,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function load_tpu_embedding_stochastic_gradient_descent_parameters(parameters_::tf.TensorHandle; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + function load_tpu_embedding_stochastic_gradient_descent_parameters_eager(parameters_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) desc = tf.EagerOp("LoadTPUEmbeddingStochasticGradientDescentParameters") tf.add_input(desc, parameters_) if table_id !== nothing @@ -9619,6 +11306,13 @@ begin end (tf.execute(desc))[1] end + function load_tpu_embedding_stochastic_gradient_descent_parameters(parameters_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + if tf.eager_mode + load_tpu_embedding_stochastic_gradient_descent_parameters_eager(parameters_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + else + load_tpu_embedding_stochastic_gradient_descent_parameters_graph(parameters_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + end + end end @@ -9628,7 +11322,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function no_op(; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function no_op_graph(; name=nothing) local desc tf.with_op_name(name, "NoOp") do desc @@ -9636,10 +11330,17 @@ begin end tf.Tensor(tf.Operation(desc)) end - function no_op(; name=nothing) + function no_op_eager(; name=nothing) desc = tf.EagerOp("NoOp") (tf.execute(desc))[1] end + function no_op(; name=nothing) + if tf.eager_mode + no_op_eager(; name=name) + else + no_op_graph(; name=name) + end + end end @@ -9649,7 +11350,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function zip_dataset(input_datasets_; name=nothing, output_types=nothing, output_shapes=nothing, N=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function zip_dataset_graph(input_datasets_; name=nothing, output_types=nothing, output_shapes=nothing, N=nothing) local desc tf.with_op_name(name, "ZipDataset") do desc = tf.NodeDescription("ZipDataset") @@ -9667,7 +11368,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function zip_dataset(input_datasets_::tf.TensorHandle; name=nothing, output_types=nothing, output_shapes=nothing, N=nothing) + function zip_dataset_eager(input_datasets_; name=nothing, output_types=nothing, output_shapes=nothing, N=nothing) desc = tf.EagerOp("ZipDataset") tf.add_input(desc, input_datasets_) if output_types !== nothing @@ -9681,6 +11382,13 @@ begin end (tf.execute(desc))[1] end + function zip_dataset(input_datasets_; name=nothing, output_types=nothing, output_shapes=nothing, N=nothing) + if tf.eager_mode + zip_dataset_eager(input_datasets_; name=name, output_types=output_types, output_shapes=output_shapes, N=N) + else + zip_dataset_graph(input_datasets_; name=name, output_types=output_types, output_shapes=output_shapes, N=N) + end + end end @@ -9690,7 +11398,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function identity_reader_v2(; name=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function identity_reader_v2_graph(; name=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "IdentityReaderV2") do desc = tf.NodeDescription("IdentityReaderV2") @@ -9703,7 +11411,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function identity_reader_v2(; name=nothing, container=nothing, shared_name=nothing) + function identity_reader_v2_eager(; name=nothing, container=nothing, shared_name=nothing) desc = tf.EagerOp("IdentityReaderV2") if container !== nothing desc["container"] = Base.String(container) @@ -9713,6 +11421,13 @@ begin end (tf.execute(desc))[1] end + function identity_reader_v2(; name=nothing, container=nothing, shared_name=nothing) + if tf.eager_mode + identity_reader_v2_eager(; name=name, container=container, shared_name=shared_name) + else + identity_reader_v2_graph(; name=name, container=container, shared_name=shared_name) + end + end end @@ -9722,7 +11437,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function lmdb_reader(; name=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function lmdb_reader_graph(; name=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "LMDBReader") do desc = tf.NodeDescription("LMDBReader") @@ -9735,7 +11450,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function lmdb_reader(; name=nothing, container=nothing, shared_name=nothing) + function lmdb_reader_eager(; name=nothing, container=nothing, shared_name=nothing) desc = tf.EagerOp("LMDBReader") if container !== nothing desc["container"] = Base.String(container) @@ -9745,6 +11460,13 @@ begin end (tf.execute(desc))[1] end + function lmdb_reader(; name=nothing, container=nothing, shared_name=nothing) + if tf.eager_mode + lmdb_reader_eager(; name=name, container=container, shared_name=shared_name) + else + lmdb_reader_graph(; name=name, container=container, shared_name=shared_name) + end + end end @@ -9754,7 +11476,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function nccl_all_reduce(input_; name=nothing, reduction=nothing, num_devices=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function nccl_all_reduce_graph(input_; name=nothing, reduction=nothing, num_devices=nothing, shared_name=nothing) local desc tf.with_op_name(name, "NcclAllReduce") do desc = tf.NodeDescription("NcclAllReduce") @@ -9773,7 +11495,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function nccl_all_reduce(input_::tf.TensorHandle; name=nothing, reduction=nothing, num_devices=nothing, shared_name=nothing) + function nccl_all_reduce_eager(input_; name=nothing, reduction=nothing, num_devices=nothing, shared_name=nothing) desc = tf.EagerOp("NcclAllReduce") tf.add_input(desc, input_) if reduction !== nothing @@ -9788,6 +11510,13 @@ begin desc["T"] = tf.data_type(input_) (tf.execute(desc))[1] end + function nccl_all_reduce(input_; name=nothing, reduction=nothing, num_devices=nothing, shared_name=nothing) + if tf.eager_mode + nccl_all_reduce_eager(input_; name=name, reduction=reduction, num_devices=num_devices, shared_name=shared_name) + else + nccl_all_reduce_graph(input_; name=name, reduction=reduction, num_devices=num_devices, shared_name=shared_name) + end + end end @@ -9797,7 +11526,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function text_line_dataset(filenames_, compression_type_, buffer_size_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function text_line_dataset_graph(filenames_, compression_type_, buffer_size_; name=nothing) local desc tf.with_op_name(name, "TextLineDataset") do desc = tf.NodeDescription("TextLineDataset") @@ -9810,13 +11539,20 @@ begin end tf.Tensor(tf.Operation(desc)) end - function text_line_dataset(filenames_::tf.TensorHandle, compression_type_::tf.TensorHandle, buffer_size_::tf.TensorHandle; name=nothing) + function text_line_dataset_eager(filenames_, compression_type_, buffer_size_; name=nothing) desc = tf.EagerOp("TextLineDataset") tf.add_input(desc, filenames_) tf.add_input(desc, compression_type_) tf.add_input(desc, buffer_size_) (tf.execute(desc))[1] end + function text_line_dataset(filenames_, compression_type_, buffer_size_; name=nothing) + if tf.eager_mode + text_line_dataset_eager(filenames_, compression_type_, buffer_size_; name=name) + else + text_line_dataset_graph(filenames_, compression_type_, buffer_size_; name=name) + end + end end @@ -9826,7 +11562,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function sdca_shrink_l1(weights_; name=nothing, num_features=nothing, l1=nothing, l2=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function sdca_shrink_l1_graph(weights_; name=nothing, num_features=nothing, l1=nothing, l2=nothing) local desc tf.with_op_name(name, "SdcaShrinkL1") do desc = tf.NodeDescription("SdcaShrinkL1") @@ -9844,7 +11580,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function sdca_shrink_l1(weights_::tf.TensorHandle; name=nothing, num_features=nothing, l1=nothing, l2=nothing) + function sdca_shrink_l1_eager(weights_; name=nothing, num_features=nothing, l1=nothing, l2=nothing) desc = tf.EagerOp("SdcaShrinkL1") tf.add_input(desc, weights_) if num_features !== nothing @@ -9858,6 +11594,13 @@ begin end (tf.execute(desc))[1] end + function sdca_shrink_l1(weights_; name=nothing, num_features=nothing, l1=nothing, l2=nothing) + if tf.eager_mode + sdca_shrink_l1_eager(weights_; name=name, num_features=num_features, l1=l1, l2=l2) + else + sdca_shrink_l1_graph(weights_; name=name, num_features=num_features, l1=l1, l2=l2) + end + end end @@ -9867,7 +11610,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tf_record_reader_v2(; name=nothing, container=nothing, shared_name=nothing, compression_type=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tf_record_reader_v2_graph(; name=nothing, container=nothing, shared_name=nothing, compression_type=nothing) local desc tf.with_op_name(name, "TFRecordReaderV2") do desc = tf.NodeDescription("TFRecordReaderV2") @@ -9883,7 +11626,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function tf_record_reader_v2(; name=nothing, container=nothing, shared_name=nothing, compression_type=nothing) + function tf_record_reader_v2_eager(; name=nothing, container=nothing, shared_name=nothing, compression_type=nothing) desc = tf.EagerOp("TFRecordReaderV2") if container !== nothing desc["container"] = Base.String(container) @@ -9896,6 +11639,13 @@ begin end (tf.execute(desc))[1] end + function tf_record_reader_v2(; name=nothing, container=nothing, shared_name=nothing, compression_type=nothing) + if tf.eager_mode + tf_record_reader_v2_eager(; name=name, container=container, shared_name=shared_name, compression_type=compression_type) + else + tf_record_reader_v2_graph(; name=name, container=container, shared_name=shared_name, compression_type=compression_type) + end + end end @@ -9905,7 +11655,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function multi_device_iterator_from_string_handle(string_handle_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function multi_device_iterator_from_string_handle_graph(string_handle_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "MultiDeviceIteratorFromStringHandle") do desc = tf.NodeDescription("MultiDeviceIteratorFromStringHandle") @@ -9920,7 +11670,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function multi_device_iterator_from_string_handle(string_handle_::tf.TensorHandle; name=nothing, output_types=nothing, output_shapes=nothing) + function multi_device_iterator_from_string_handle_eager(string_handle_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("MultiDeviceIteratorFromStringHandle") tf.add_input(desc, string_handle_) if output_types !== nothing @@ -9931,6 +11681,13 @@ begin end (tf.execute(desc))[1] end + function multi_device_iterator_from_string_handle(string_handle_; name=nothing, output_types=nothing, output_shapes=nothing) + if tf.eager_mode + multi_device_iterator_from_string_handle_eager(string_handle_; name=name, output_types=output_types, output_shapes=output_shapes) + else + multi_device_iterator_from_string_handle_graph(string_handle_; name=name, output_types=output_types, output_shapes=output_shapes) + end + end end @@ -9940,7 +11697,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function padded_batch_dataset_v2(input_dataset_, batch_size_, padded_shapes_, padding_values_, drop_remainder_; name=nothing, Toutput_types=nothing, output_shapes=nothing, N=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function padded_batch_dataset_v2_graph(input_dataset_, batch_size_, padded_shapes_, padding_values_, drop_remainder_; name=nothing, Toutput_types=nothing, output_shapes=nothing, N=nothing) local desc tf.with_op_name(name, "PaddedBatchDatasetV2") do desc = tf.NodeDescription("PaddedBatchDatasetV2") @@ -9966,7 +11723,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function padded_batch_dataset_v2(input_dataset_::tf.TensorHandle, batch_size_::tf.TensorHandle, padded_shapes_::tf.TensorHandle, padding_values_::tf.TensorHandle, drop_remainder_::tf.TensorHandle; name=nothing, Toutput_types=nothing, output_shapes=nothing, N=nothing) + function padded_batch_dataset_v2_eager(input_dataset_, batch_size_, padded_shapes_, padding_values_, drop_remainder_; name=nothing, Toutput_types=nothing, output_shapes=nothing, N=nothing) desc = tf.EagerOp("PaddedBatchDatasetV2") tf.add_input(desc, input_dataset_) tf.add_input(desc, batch_size_) @@ -9984,6 +11741,13 @@ begin end (tf.execute(desc))[1] end + function padded_batch_dataset_v2(input_dataset_, batch_size_, padded_shapes_, padding_values_, drop_remainder_; name=nothing, Toutput_types=nothing, output_shapes=nothing, N=nothing) + if tf.eager_mode + padded_batch_dataset_v2_eager(input_dataset_, batch_size_, padded_shapes_, padding_values_, drop_remainder_; name=name, Toutput_types=Toutput_types, output_shapes=output_shapes, N=N) + else + padded_batch_dataset_v2_graph(input_dataset_, batch_size_, padded_shapes_, padding_values_, drop_remainder_; name=name, Toutput_types=Toutput_types, output_shapes=output_shapes, N=N) + end + end end @@ -9993,7 +11757,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function load_tpu_embedding_proximal_adagrad_parameters(parameters_, accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function load_tpu_embedding_proximal_adagrad_parameters_graph(parameters_, accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "LoadTPUEmbeddingProximalAdagradParameters") do desc = tf.NodeDescription("LoadTPUEmbeddingProximalAdagradParameters") @@ -10016,7 +11780,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function load_tpu_embedding_proximal_adagrad_parameters(parameters_::tf.TensorHandle, accumulators_::tf.TensorHandle; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + function load_tpu_embedding_proximal_adagrad_parameters_eager(parameters_, accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) desc = tf.EagerOp("LoadTPUEmbeddingProximalAdagradParameters") tf.add_input(desc, parameters_) tf.add_input(desc, accumulators_) @@ -10034,6 +11798,13 @@ begin end (tf.execute(desc))[1] end + function load_tpu_embedding_proximal_adagrad_parameters(parameters_, accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + if tf.eager_mode + load_tpu_embedding_proximal_adagrad_parameters_eager(parameters_, accumulators_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + else + load_tpu_embedding_proximal_adagrad_parameters_graph(parameters_, accumulators_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + end + end end @@ -10043,7 +11814,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tensor_array_size(handle_, flow_in_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tensor_array_size_graph(handle_, flow_in_; name=nothing) local desc tf.with_op_name(name, "TensorArraySize") do desc = tf.NodeDescription("TensorArraySize") @@ -10054,12 +11825,19 @@ begin end tf.Tensor(tf.Operation(desc)) end - function tensor_array_size(handle_::tf.TensorHandle, flow_in_::tf.TensorHandle; name=nothing) + function tensor_array_size_eager(handle_, flow_in_; name=nothing) desc = tf.EagerOp("TensorArraySize") tf.add_input(desc, handle_) tf.add_input(desc, flow_in_) (tf.execute(desc))[1] end + function tensor_array_size(handle_, flow_in_; name=nothing) + if tf.eager_mode + tensor_array_size_eager(handle_, flow_in_; name=name) + else + tensor_array_size_graph(handle_, flow_in_; name=name) + end + end end @@ -10069,7 +11847,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function ordered_map_size(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function ordered_map_size_graph(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "OrderedMapSize") do desc = tf.NodeDescription("OrderedMapSize") @@ -10091,7 +11869,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function ordered_map_size(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + function ordered_map_size_eager(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) desc = tf.EagerOp("OrderedMapSize") if capacity !== nothing desc["capacity"] = Base.Int(capacity) @@ -10110,6 +11888,13 @@ begin end (tf.execute(desc))[1] end + function ordered_map_size(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + if tf.eager_mode + ordered_map_size_eager(; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) + else + ordered_map_size_graph(; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) + end + end end @@ -10119,7 +11904,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function stateless_random_uniform(shape_, seed_; name=nothing, dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function stateless_random_uniform_graph(shape_, seed_; name=nothing, dtype=nothing) local desc tf.with_op_name(name, "StatelessRandomUniform") do desc = tf.NodeDescription("StatelessRandomUniform") @@ -10135,7 +11920,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function stateless_random_uniform(shape_::tf.TensorHandle, seed_::tf.TensorHandle; name=nothing, dtype=nothing) + function stateless_random_uniform_eager(shape_, seed_; name=nothing, dtype=nothing) desc = tf.EagerOp("StatelessRandomUniform") tf.add_input(desc, shape_) tf.add_input(desc, seed_) @@ -10146,6 +11931,13 @@ begin desc["Tseed"] = tf.data_type(seed_) (tf.execute(desc))[1] end + function stateless_random_uniform(shape_, seed_; name=nothing, dtype=nothing) + if tf.eager_mode + stateless_random_uniform_eager(shape_, seed_; name=name, dtype=dtype) + else + stateless_random_uniform_graph(shape_, seed_; name=name, dtype=dtype) + end + end end @@ -10155,7 +11947,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function sparse_to_sparse_set_operation(set1_indices_, set1_values_, set1_shape_, set2_indices_, set2_values_, set2_shape_; name=nothing, set_operation=nothing, validate_indices=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function sparse_to_sparse_set_operation_graph(set1_indices_, set1_values_, set1_shape_, set2_indices_, set2_values_, set2_shape_; name=nothing, set_operation=nothing, validate_indices=nothing) local desc tf.with_op_name(name, "SparseToSparseSetOperation") do desc = tf.NodeDescription("SparseToSparseSetOperation") @@ -10186,7 +11978,7 @@ begin end out end - function sparse_to_sparse_set_operation(set1_indices_::tf.TensorHandle, set1_values_::tf.TensorHandle, set1_shape_::tf.TensorHandle, set2_indices_::tf.TensorHandle, set2_values_::tf.TensorHandle, set2_shape_::tf.TensorHandle; name=nothing, set_operation=nothing, validate_indices=nothing) + function sparse_to_sparse_set_operation_eager(set1_indices_, set1_values_, set1_shape_, set2_indices_, set2_values_, set2_shape_; name=nothing, set_operation=nothing, validate_indices=nothing) desc = tf.EagerOp("SparseToSparseSetOperation") tf.add_input(desc, set1_indices_) tf.add_input(desc, set1_values_) @@ -10204,6 +11996,13 @@ begin desc["T"] = tf.data_type(set2_values_) tf.execute(desc) end + function sparse_to_sparse_set_operation(set1_indices_, set1_values_, set1_shape_, set2_indices_, set2_values_, set2_shape_; name=nothing, set_operation=nothing, validate_indices=nothing) + if tf.eager_mode + sparse_to_sparse_set_operation_eager(set1_indices_, set1_values_, set1_shape_, set2_indices_, set2_values_, set2_shape_; name=name, set_operation=set_operation, validate_indices=validate_indices) + else + sparse_to_sparse_set_operation_graph(set1_indices_, set1_values_, set1_shape_, set2_indices_, set2_values_, set2_shape_; name=name, set_operation=set_operation, validate_indices=validate_indices) + end + end end @@ -10213,7 +12012,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tensor_summary(tensor_; name=nothing, description=nothing, labels=nothing, display_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tensor_summary_graph(tensor_; name=nothing, description=nothing, labels=nothing, display_name=nothing) local desc tf.with_op_name(name, "TensorSummary") do desc = tf.NodeDescription("TensorSummary") @@ -10232,7 +12031,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function tensor_summary(tensor_::tf.TensorHandle; name=nothing, description=nothing, labels=nothing, display_name=nothing) + function tensor_summary_eager(tensor_; name=nothing, description=nothing, labels=nothing, display_name=nothing) desc = tf.EagerOp("TensorSummary") tf.add_input(desc, tensor_) if description !== nothing @@ -10247,6 +12046,13 @@ begin desc["T"] = tf.data_type(tensor_) (tf.execute(desc))[1] end + function tensor_summary(tensor_; name=nothing, description=nothing, labels=nothing, display_name=nothing) + if tf.eager_mode + tensor_summary_eager(tensor_; name=name, description=description, labels=labels, display_name=display_name) + else + tensor_summary_graph(tensor_; name=name, description=description, labels=labels, display_name=display_name) + end + end end @@ -10256,7 +12062,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function remote_fused_graph_execute(inputs_; name=nothing, Tinputs=nothing, Toutputs=nothing, serialized_remote_fused_graph_execute_info=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function remote_fused_graph_execute_graph(inputs_; name=nothing, Tinputs=nothing, Toutputs=nothing, serialized_remote_fused_graph_execute_info=nothing) local desc tf.with_op_name(name, "RemoteFusedGraphExecute") do desc = tf.NodeDescription("RemoteFusedGraphExecute") @@ -10274,7 +12080,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function remote_fused_graph_execute(inputs_::tf.TensorHandle; name=nothing, Tinputs=nothing, Toutputs=nothing, serialized_remote_fused_graph_execute_info=nothing) + function remote_fused_graph_execute_eager(inputs_; name=nothing, Tinputs=nothing, Toutputs=nothing, serialized_remote_fused_graph_execute_info=nothing) desc = tf.EagerOp("RemoteFusedGraphExecute") tf.add_input(desc, inputs_) if Tinputs !== nothing @@ -10288,6 +12094,13 @@ begin end (tf.execute(desc))[1] end + function remote_fused_graph_execute(inputs_; name=nothing, Tinputs=nothing, Toutputs=nothing, serialized_remote_fused_graph_execute_info=nothing) + if tf.eager_mode + remote_fused_graph_execute_eager(inputs_; name=name, Tinputs=Tinputs, Toutputs=Toutputs, serialized_remote_fused_graph_execute_info=serialized_remote_fused_graph_execute_info) + else + remote_fused_graph_execute_graph(inputs_; name=name, Tinputs=Tinputs, Toutputs=Toutputs, serialized_remote_fused_graph_execute_info=serialized_remote_fused_graph_execute_info) + end + end end @@ -10297,7 +12110,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function sparse_slice_grad(backprop_val_grad_, input_indices_, input_start_, output_indices_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function sparse_slice_grad_graph(backprop_val_grad_, input_indices_, input_start_, output_indices_; name=nothing) local desc tf.with_op_name(name, "SparseSliceGrad") do desc = tf.NodeDescription("SparseSliceGrad") @@ -10313,7 +12126,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function sparse_slice_grad(backprop_val_grad_::tf.TensorHandle, input_indices_::tf.TensorHandle, input_start_::tf.TensorHandle, output_indices_::tf.TensorHandle; name=nothing) + function sparse_slice_grad_eager(backprop_val_grad_, input_indices_, input_start_, output_indices_; name=nothing) desc = tf.EagerOp("SparseSliceGrad") tf.add_input(desc, backprop_val_grad_) tf.add_input(desc, input_indices_) @@ -10322,6 +12135,13 @@ begin desc["T"] = tf.data_type(backprop_val_grad_) (tf.execute(desc))[1] end + function sparse_slice_grad(backprop_val_grad_, input_indices_, input_start_, output_indices_; name=nothing) + if tf.eager_mode + sparse_slice_grad_eager(backprop_val_grad_, input_indices_, input_start_, output_indices_; name=name) + else + sparse_slice_grad_graph(backprop_val_grad_, input_indices_, input_start_, output_indices_; name=name) + end + end end @@ -10331,7 +12151,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function cumsum(x_, axis_; name=nothing, exclusive=nothing, reverse=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function cumsum_graph(x_, axis_; name=nothing, exclusive=nothing, reverse=nothing) local desc tf.with_op_name(name, "Cumsum") do desc = tf.NodeDescription("Cumsum") @@ -10351,7 +12171,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function cumsum(x_::tf.TensorHandle, axis_::tf.TensorHandle; name=nothing, exclusive=nothing, reverse=nothing) + function cumsum_eager(x_, axis_; name=nothing, exclusive=nothing, reverse=nothing) desc = tf.EagerOp("Cumsum") tf.add_input(desc, x_) tf.add_input(desc, axis_) @@ -10365,6 +12185,13 @@ begin desc["Tidx"] = tf.data_type(axis_) (tf.execute(desc))[1] end + function cumsum(x_, axis_; name=nothing, exclusive=nothing, reverse=nothing) + if tf.eager_mode + cumsum_eager(x_, axis_; name=name, exclusive=exclusive, reverse=reverse) + else + cumsum_graph(x_, axis_; name=name, exclusive=exclusive, reverse=reverse) + end + end end @@ -10374,7 +12201,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function batch_norm_with_global_normalization_grad(t_, m_, v_, gamma_, backprop_; name=nothing, variance_epsilon=nothing, scale_after_normalization=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function batch_norm_with_global_normalization_grad_graph(t_, m_, v_, gamma_, backprop_; name=nothing, variance_epsilon=nothing, scale_after_normalization=nothing) local desc tf.with_op_name(name, "BatchNormWithGlobalNormalizationGrad") do desc = tf.NodeDescription("BatchNormWithGlobalNormalizationGrad") @@ -10403,7 +12230,7 @@ begin end out end - function batch_norm_with_global_normalization_grad(t_::tf.TensorHandle, m_::tf.TensorHandle, v_::tf.TensorHandle, gamma_::tf.TensorHandle, backprop_::tf.TensorHandle; name=nothing, variance_epsilon=nothing, scale_after_normalization=nothing) + function batch_norm_with_global_normalization_grad_eager(t_, m_, v_, gamma_, backprop_; name=nothing, variance_epsilon=nothing, scale_after_normalization=nothing) desc = tf.EagerOp("BatchNormWithGlobalNormalizationGrad") tf.add_input(desc, t_) tf.add_input(desc, m_) @@ -10423,16 +12250,23 @@ begin desc["T"] = tf.data_type(backprop_) tf.execute(desc) end + function batch_norm_with_global_normalization_grad(t_, m_, v_, gamma_, backprop_; name=nothing, variance_epsilon=nothing, scale_after_normalization=nothing) + if tf.eager_mode + batch_norm_with_global_normalization_grad_eager(t_, m_, v_, gamma_, backprop_; name=name, variance_epsilon=variance_epsilon, scale_after_normalization=scale_after_normalization) + else + batch_norm_with_global_normalization_grad_graph(t_, m_, v_, gamma_, backprop_; name=name, variance_epsilon=variance_epsilon, scale_after_normalization=scale_after_normalization) + end + end end """ - avg_pool_grad(orig_input_shape, grad; data_format=NHWC) + avg_pool_grad(orig_input_shape, grad; data_format=) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function avg_pool_grad(orig_input_shape_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function avg_pool_grad_graph(orig_input_shape_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) local desc tf.with_op_name(name, "AvgPoolGrad") do desc = tf.NodeDescription("AvgPoolGrad") @@ -10456,7 +12290,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function avg_pool_grad(orig_input_shape_::tf.TensorHandle, grad_::tf.TensorHandle; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + function avg_pool_grad_eager(orig_input_shape_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) desc = tf.EagerOp("AvgPoolGrad") tf.add_input(desc, orig_input_shape_) tf.add_input(desc, grad_) @@ -10475,6 +12309,13 @@ begin desc["T"] = tf.data_type(grad_) (tf.execute(desc))[1] end + function avg_pool_grad(orig_input_shape_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + if tf.eager_mode + avg_pool_grad_eager(orig_input_shape_, grad_; name=name, ksize=ksize, strides=strides, padding=padding, data_format=data_format) + else + avg_pool_grad_graph(orig_input_shape_, grad_; name=name, ksize=ksize, strides=strides, padding=padding, data_format=data_format) + end + end end @@ -10484,7 +12325,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function restore_v2(prefix_, tensor_names_, shape_and_slices_; name=nothing, dtypes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function restore_v2_graph(prefix_, tensor_names_, shape_and_slices_; name=nothing, dtypes=nothing) local desc tf.with_op_name(name, "RestoreV2") do desc = tf.NodeDescription("RestoreV2") @@ -10500,7 +12341,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function restore_v2(prefix_::tf.TensorHandle, tensor_names_::tf.TensorHandle, shape_and_slices_::tf.TensorHandle; name=nothing, dtypes=nothing) + function restore_v2_eager(prefix_, tensor_names_, shape_and_slices_; name=nothing, dtypes=nothing) desc = tf.EagerOp("RestoreV2") tf.add_input(desc, prefix_) tf.add_input(desc, tensor_names_) @@ -10510,6 +12351,13 @@ begin end (tf.execute(desc))[1] end + function restore_v2(prefix_, tensor_names_, shape_and_slices_; name=nothing, dtypes=nothing) + if tf.eager_mode + restore_v2_eager(prefix_, tensor_names_, shape_and_slices_; name=name, dtypes=dtypes) + else + restore_v2_graph(prefix_, tensor_names_, shape_and_slices_; name=name, dtypes=dtypes) + end + end end @@ -10519,7 +12367,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function relu6(features_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function relu6_graph(features_; name=nothing) local desc tf.with_op_name(name, "Relu6") do desc = tf.NodeDescription("Relu6") @@ -10529,12 +12377,19 @@ begin end tf.Tensor(tf.Operation(desc)) end - function relu6(features_::tf.TensorHandle; name=nothing) + function relu6_eager(features_; name=nothing) desc = tf.EagerOp("Relu6") tf.add_input(desc, features_) desc["T"] = tf.data_type(features_) (tf.execute(desc))[1] end + function relu6(features_; name=nothing) + if tf.eager_mode + relu6_eager(features_; name=name) + else + relu6_graph(features_; name=name) + end + end end @@ -10544,7 +12399,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function sparse_apply_rms_prop(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function sparse_apply_rms_prop_graph(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "SparseApplyRMSProp") do desc = tf.NodeDescription("SparseApplyRMSProp") @@ -10575,7 +12430,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function sparse_apply_rms_prop(var_::tf.TensorHandle, ms_::tf.TensorHandle, mom_::tf.TensorHandle, lr_::tf.TensorHandle, rho_::tf.TensorHandle, momentum_::tf.TensorHandle, epsilon_::tf.TensorHandle, grad_::tf.TensorHandle, indices_::tf.TensorHandle; name=nothing, use_locking=nothing) + function sparse_apply_rms_prop_eager(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) desc = tf.EagerOp("SparseApplyRMSProp") tf.add_input(desc, var_) tf.add_input(desc, ms_) @@ -10600,6 +12455,13 @@ begin desc["Tindices"] = tf.data_type(indices_) (tf.execute(desc))[1] end + function sparse_apply_rms_prop(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) + if tf.eager_mode + sparse_apply_rms_prop_eager(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=name, use_locking=use_locking) + else + sparse_apply_rms_prop_graph(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=name, use_locking=use_locking) + end + end end @@ -10609,7 +12471,7 @@ end Receives the named tensor from send_device on recv_device. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function _recv(; name=nothing, tensor_type=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function _recv_graph(; name=nothing, tensor_type=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing) local desc tf.with_op_name(name, "_Recv") do desc = tf.NodeDescription("_Recv") @@ -10634,7 +12496,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function _recv(; name=nothing, tensor_type=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing) + function _recv_eager(; name=nothing, tensor_type=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing) desc = tf.EagerOp("_Recv") if tensor_type !== nothing desc["tensor_type"] = Base.identity(tensor_type) @@ -10656,16 +12518,23 @@ begin end (tf.execute(desc))[1] end + function _recv(; name=nothing, tensor_type=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing) + if tf.eager_mode + _recv_eager(; name=name, tensor_type=tensor_type, tensor_name=tensor_name, send_device=send_device, send_device_incarnation=send_device_incarnation, recv_device=recv_device, client_terminated=client_terminated) + else + _recv_graph(; name=name, tensor_type=tensor_type, tensor_name=tensor_name, send_device=send_device, send_device_incarnation=send_device_incarnation, recv_device=recv_device, client_terminated=client_terminated) + end + end end """ - max_pool(input; data_format=NHWC) + max_pool(input; data_format=) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function max_pool(input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function max_pool_graph(input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) local desc tf.with_op_name(name, "MaxPool") do desc = tf.NodeDescription("MaxPool") @@ -10687,7 +12556,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function max_pool(input_::tf.TensorHandle; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + function max_pool_eager(input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) desc = tf.EagerOp("MaxPool") tf.add_input(desc, input_) if ksize !== nothing @@ -10705,6 +12574,13 @@ begin desc["T"] = tf.data_type(input_) (tf.execute(desc))[1] end + function max_pool(input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + if tf.eager_mode + max_pool_eager(input_; name=name, ksize=ksize, strides=strides, padding=padding, data_format=data_format) + else + max_pool_graph(input_; name=name, ksize=ksize, strides=strides, padding=padding, data_format=data_format) + end + end end @@ -10714,7 +12590,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function invert(x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function invert_graph(x_; name=nothing) local desc tf.with_op_name(name, "Invert") do desc = tf.NodeDescription("Invert") @@ -10724,12 +12600,19 @@ begin end tf.Tensor(tf.Operation(desc)) end - function invert(x_::tf.TensorHandle; name=nothing) + function invert_eager(x_; name=nothing) desc = tf.EagerOp("Invert") tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) (tf.execute(desc))[1] end + function invert(x_; name=nothing) + if tf.eager_mode + invert_eager(x_; name=name) + else + invert_graph(x_; name=name) + end + end end @@ -10739,7 +12622,7 @@ end *NOTE*: Do not invoke this operator directly in Python. Graph rewrite pass is """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function _unary_ops_composition(x_; name=nothing, op_names=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function _unary_ops_composition_graph(x_; name=nothing, op_names=nothing) local desc tf.with_op_name(name, "_UnaryOpsComposition") do desc = tf.NodeDescription("_UnaryOpsComposition") @@ -10752,7 +12635,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function _unary_ops_composition(x_::tf.TensorHandle; name=nothing, op_names=nothing) + function _unary_ops_composition_eager(x_; name=nothing, op_names=nothing) desc = tf.EagerOp("_UnaryOpsComposition") tf.add_input(desc, x_) if op_names !== nothing @@ -10761,6 +12644,13 @@ begin desc["T"] = tf.data_type(x_) (tf.execute(desc))[1] end + function _unary_ops_composition(x_; name=nothing, op_names=nothing) + if tf.eager_mode + _unary_ops_composition_eager(x_; name=name, op_names=op_names) + else + _unary_ops_composition_graph(x_; name=name, op_names=op_names) + end + end end @@ -10770,7 +12660,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function experimental_map_dataset(input_dataset_, other_arguments_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing, preserve_cardinality=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function experimental_map_dataset_graph(input_dataset_, other_arguments_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing, preserve_cardinality=nothing) local desc tf.with_op_name(name, "ExperimentalMapDataset") do desc = tf.NodeDescription("ExperimentalMapDataset") @@ -10799,7 +12689,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function experimental_map_dataset(input_dataset_::tf.TensorHandle, other_arguments_::tf.TensorHandle; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing, preserve_cardinality=nothing) + function experimental_map_dataset_eager(input_dataset_, other_arguments_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing, preserve_cardinality=nothing) desc = tf.EagerOp("ExperimentalMapDataset") tf.add_input(desc, input_dataset_) tf.add_input(desc, other_arguments_) @@ -10823,6 +12713,13 @@ begin end (tf.execute(desc))[1] end + function experimental_map_dataset(input_dataset_, other_arguments_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing, preserve_cardinality=nothing) + if tf.eager_mode + experimental_map_dataset_eager(input_dataset_, other_arguments_; name=name, f=f, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes, use_inter_op_parallelism=use_inter_op_parallelism, preserve_cardinality=preserve_cardinality) + else + experimental_map_dataset_graph(input_dataset_, other_arguments_; name=name, f=f, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes, use_inter_op_parallelism=use_inter_op_parallelism, preserve_cardinality=preserve_cardinality) + end + end end @@ -10832,7 +12729,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function load_tpu_embedding_adam_parameters(parameters_, momenta_, velocities_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function load_tpu_embedding_adam_parameters_graph(parameters_, momenta_, velocities_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "LoadTPUEmbeddingADAMParameters") do desc = tf.NodeDescription("LoadTPUEmbeddingADAMParameters") @@ -10857,7 +12754,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function load_tpu_embedding_adam_parameters(parameters_::tf.TensorHandle, momenta_::tf.TensorHandle, velocities_::tf.TensorHandle; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + function load_tpu_embedding_adam_parameters_eager(parameters_, momenta_, velocities_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) desc = tf.EagerOp("LoadTPUEmbeddingADAMParameters") tf.add_input(desc, parameters_) tf.add_input(desc, momenta_) @@ -10876,6 +12773,13 @@ begin end (tf.execute(desc))[1] end + function load_tpu_embedding_adam_parameters(parameters_, momenta_, velocities_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + if tf.eager_mode + load_tpu_embedding_adam_parameters_eager(parameters_, momenta_, velocities_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + else + load_tpu_embedding_adam_parameters_graph(parameters_, momenta_, velocities_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + end + end end @@ -10885,7 +12789,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function parse_tensor(serialized_; name=nothing, out_type=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function parse_tensor_graph(serialized_; name=nothing, out_type=nothing) local desc tf.with_op_name(name, "ParseTensor") do desc = tf.NodeDescription("ParseTensor") @@ -10897,7 +12801,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function parse_tensor(serialized_::tf.TensorHandle; name=nothing, out_type=nothing) + function parse_tensor_eager(serialized_; name=nothing, out_type=nothing) desc = tf.EagerOp("ParseTensor") tf.add_input(desc, serialized_) if out_type !== nothing @@ -10905,6 +12809,13 @@ begin end (tf.execute(desc))[1] end + function parse_tensor(serialized_; name=nothing, out_type=nothing) + if tf.eager_mode + parse_tensor_eager(serialized_; name=name, out_type=out_type) + else + parse_tensor_graph(serialized_; name=name, out_type=out_type) + end + end end @@ -10914,7 +12825,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function experimental_materialized_index_dataset_handle(; name=nothing, container=nothing, shared_name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function experimental_materialized_index_dataset_handle_graph(; name=nothing, container=nothing, shared_name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "ExperimentalMaterializedIndexDatasetHandle") do desc = tf.NodeDescription("ExperimentalMaterializedIndexDatasetHandle") @@ -10933,7 +12844,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function experimental_materialized_index_dataset_handle(; name=nothing, container=nothing, shared_name=nothing, output_types=nothing, output_shapes=nothing) + function experimental_materialized_index_dataset_handle_eager(; name=nothing, container=nothing, shared_name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("ExperimentalMaterializedIndexDatasetHandle") if container !== nothing desc["container"] = Base.String(container) @@ -10949,6 +12860,13 @@ begin end (tf.execute(desc))[1] end + function experimental_materialized_index_dataset_handle(; name=nothing, container=nothing, shared_name=nothing, output_types=nothing, output_shapes=nothing) + if tf.eager_mode + experimental_materialized_index_dataset_handle_eager(; name=name, container=container, shared_name=shared_name, output_types=output_types, output_shapes=output_shapes) + else + experimental_materialized_index_dataset_handle_graph(; name=name, container=container, shared_name=shared_name, output_types=output_types, output_shapes=output_shapes) + end + end end @@ -10958,7 +12876,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function multi_device_iterator_get_next_from_shard(multi_device_iterator_, shard_num_, incarnation_id_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function multi_device_iterator_get_next_from_shard_graph(multi_device_iterator_, shard_num_, incarnation_id_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "MultiDeviceIteratorGetNextFromShard") do desc = tf.NodeDescription("MultiDeviceIteratorGetNextFromShard") @@ -10977,7 +12895,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function multi_device_iterator_get_next_from_shard(multi_device_iterator_::tf.TensorHandle, shard_num_::tf.TensorHandle, incarnation_id_::tf.TensorHandle; name=nothing, output_types=nothing, output_shapes=nothing) + function multi_device_iterator_get_next_from_shard_eager(multi_device_iterator_, shard_num_, incarnation_id_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("MultiDeviceIteratorGetNextFromShard") tf.add_input(desc, multi_device_iterator_) tf.add_input(desc, shard_num_) @@ -10990,6 +12908,13 @@ begin end (tf.execute(desc))[1] end + function multi_device_iterator_get_next_from_shard(multi_device_iterator_, shard_num_, incarnation_id_; name=nothing, output_types=nothing, output_shapes=nothing) + if tf.eager_mode + multi_device_iterator_get_next_from_shard_eager(multi_device_iterator_, shard_num_, incarnation_id_; name=name, output_types=output_types, output_shapes=output_shapes) + else + multi_device_iterator_get_next_from_shard_graph(multi_device_iterator_, shard_num_, incarnation_id_; name=name, output_types=output_types, output_shapes=output_shapes) + end + end end @@ -10999,7 +12924,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function random_uniform_int(shape_, minval_, maxval_; name=nothing, seed=nothing, seed2=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function random_uniform_int_graph(shape_, minval_, maxval_; name=nothing, seed=nothing, seed2=nothing) local desc tf.with_op_name(name, "RandomUniformInt") do desc = tf.NodeDescription("RandomUniformInt") @@ -11020,7 +12945,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function random_uniform_int(shape_::tf.TensorHandle, minval_::tf.TensorHandle, maxval_::tf.TensorHandle; name=nothing, seed=nothing, seed2=nothing) + function random_uniform_int_eager(shape_, minval_, maxval_; name=nothing, seed=nothing, seed2=nothing) desc = tf.EagerOp("RandomUniformInt") tf.add_input(desc, shape_) tf.add_input(desc, minval_) @@ -11036,6 +12961,13 @@ begin desc["Tout"] = tf.data_type(maxval_) (tf.execute(desc))[1] end + function random_uniform_int(shape_, minval_, maxval_; name=nothing, seed=nothing, seed2=nothing) + if tf.eager_mode + random_uniform_int_eager(shape_, minval_, maxval_; name=name, seed=seed, seed2=seed2) + else + random_uniform_int_graph(shape_, minval_, maxval_; name=name, seed=seed, seed2=seed2) + end + end end @@ -11045,7 +12977,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function sparse_softmax_cross_entropy_with_logits(features_, labels_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function sparse_softmax_cross_entropy_with_logits_graph(features_, labels_; name=nothing) local desc tf.with_op_name(name, "SparseSoftmaxCrossEntropyWithLogits") do desc = tf.NodeDescription("SparseSoftmaxCrossEntropyWithLogits") @@ -11063,7 +12995,7 @@ begin end out end - function sparse_softmax_cross_entropy_with_logits(features_::tf.TensorHandle, labels_::tf.TensorHandle; name=nothing) + function sparse_softmax_cross_entropy_with_logits_eager(features_, labels_; name=nothing) desc = tf.EagerOp("SparseSoftmaxCrossEntropyWithLogits") tf.add_input(desc, features_) tf.add_input(desc, labels_) @@ -11071,6 +13003,13 @@ begin desc["Tlabels"] = tf.data_type(labels_) tf.execute(desc) end + function sparse_softmax_cross_entropy_with_logits(features_, labels_; name=nothing) + if tf.eager_mode + sparse_softmax_cross_entropy_with_logits_eager(features_, labels_; name=name) + else + sparse_softmax_cross_entropy_with_logits_graph(features_, labels_; name=name) + end + end end @@ -11080,7 +13019,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tensor_array_read_v2(handle_, index_, flow_in_; name=nothing, dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tensor_array_read_v2_graph(handle_, index_, flow_in_; name=nothing, dtype=nothing) local desc tf.with_op_name(name, "TensorArrayReadV2") do desc = tf.NodeDescription("TensorArrayReadV2") @@ -11096,7 +13035,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function tensor_array_read_v2(handle_::tf.TensorHandle, index_::tf.TensorHandle, flow_in_::tf.TensorHandle; name=nothing, dtype=nothing) + function tensor_array_read_v2_eager(handle_, index_, flow_in_; name=nothing, dtype=nothing) desc = tf.EagerOp("TensorArrayReadV2") tf.add_input(desc, handle_) tf.add_input(desc, index_) @@ -11106,6 +13045,13 @@ begin end (tf.execute(desc))[1] end + function tensor_array_read_v2(handle_, index_, flow_in_; name=nothing, dtype=nothing) + if tf.eager_mode + tensor_array_read_v2_eager(handle_, index_, flow_in_; name=name, dtype=dtype) + else + tensor_array_read_v2_graph(handle_, index_, flow_in_; name=name, dtype=dtype) + end + end end @@ -11115,7 +13061,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function reader_read_up_to(reader_handle_, queue_handle_, num_records_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function reader_read_up_to_graph(reader_handle_, queue_handle_, num_records_; name=nothing) local desc tf.with_op_name(name, "ReaderReadUpTo") do desc = tf.NodeDescription("ReaderReadUpTo") @@ -11133,23 +13079,30 @@ begin end out end - function reader_read_up_to(reader_handle_::tf.TensorHandle, queue_handle_::tf.TensorHandle, num_records_::tf.TensorHandle; name=nothing) + function reader_read_up_to_eager(reader_handle_, queue_handle_, num_records_; name=nothing) desc = tf.EagerOp("ReaderReadUpTo") tf.add_input(desc, reader_handle_) tf.add_input(desc, queue_handle_) tf.add_input(desc, num_records_) tf.execute(desc) end + function reader_read_up_to(reader_handle_, queue_handle_, num_records_; name=nothing) + if tf.eager_mode + reader_read_up_to_eager(reader_handle_, queue_handle_, num_records_; name=name) + else + reader_read_up_to_graph(reader_handle_, queue_handle_, num_records_; name=name) + end + end end """ - encode_proto(sizes, values; descriptor_source=local://) + encode_proto(sizes, values; descriptor_source=) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function encode_proto(sizes_, values_; name=nothing, field_names=nothing, message_type=nothing, descriptor_source=nothing, Tinput_types=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function encode_proto_graph(sizes_, values_; name=nothing, field_names=nothing, message_type=nothing, descriptor_source=nothing, Tinput_types=nothing) local desc tf.with_op_name(name, "EncodeProto") do desc = tf.NodeDescription("EncodeProto") @@ -11172,7 +13125,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function encode_proto(sizes_::tf.TensorHandle, values_::tf.TensorHandle; name=nothing, field_names=nothing, message_type=nothing, descriptor_source=nothing, Tinput_types=nothing) + function encode_proto_eager(sizes_, values_; name=nothing, field_names=nothing, message_type=nothing, descriptor_source=nothing, Tinput_types=nothing) desc = tf.EagerOp("EncodeProto") tf.add_input(desc, sizes_) tf.add_input(desc, values_) @@ -11190,6 +13143,13 @@ begin end (tf.execute(desc))[1] end + function encode_proto(sizes_, values_; name=nothing, field_names=nothing, message_type=nothing, descriptor_source=nothing, Tinput_types=nothing) + if tf.eager_mode + encode_proto_eager(sizes_, values_; name=name, field_names=field_names, message_type=message_type, descriptor_source=descriptor_source, Tinput_types=Tinput_types) + else + encode_proto_graph(sizes_, values_; name=name, field_names=field_names, message_type=message_type, descriptor_source=descriptor_source, Tinput_types=Tinput_types) + end + end end @@ -11199,7 +13159,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function strided_slice_grad(shape_, begin_, end_, strides_, dy_; name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function strided_slice_grad_graph(shape_, begin_, end_, strides_, dy_; name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing) local desc tf.with_op_name(name, "StridedSliceGrad") do desc = tf.NodeDescription("StridedSliceGrad") @@ -11254,7 +13214,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function strided_slice_grad(shape_::tf.TensorHandle, begin_::tf.TensorHandle, end_::tf.TensorHandle, strides_::tf.TensorHandle, dy_::tf.TensorHandle; name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing) + function strided_slice_grad_eager(shape_, begin_, end_, strides_, dy_; name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing) desc = tf.EagerOp("StridedSliceGrad") tf.add_input(desc, shape_) tf.add_input(desc, begin_) @@ -11301,6 +13261,13 @@ begin desc["T"] = tf.data_type(dy_) (tf.execute(desc))[1] end + function strided_slice_grad(shape_, begin_, end_, strides_, dy_; name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing) + if tf.eager_mode + strided_slice_grad_eager(shape_, begin_, end_, strides_, dy_; name=name, Index=Index, begin_mask=begin_mask, end_mask=end_mask, ellipsis_mask=ellipsis_mask, new_axis_mask=new_axis_mask, shrink_axis_mask=shrink_axis_mask) + else + strided_slice_grad_graph(shape_, begin_, end_, strides_, dy_; name=name, Index=Index, begin_mask=begin_mask, end_mask=end_mask, ellipsis_mask=ellipsis_mask, new_axis_mask=new_axis_mask, shrink_axis_mask=shrink_axis_mask) + end + end end @@ -11310,7 +13277,7 @@ end Replacement node for NcclReduce. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function _nccl_reduce_send(input_; name=nothing, reduction=nothing, num_devices=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function _nccl_reduce_send_graph(input_; name=nothing, reduction=nothing, num_devices=nothing, shared_name=nothing) local desc tf.with_op_name(name, "_NcclReduceSend") do desc = tf.NodeDescription("_NcclReduceSend") @@ -11329,7 +13296,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function _nccl_reduce_send(input_::tf.TensorHandle; name=nothing, reduction=nothing, num_devices=nothing, shared_name=nothing) + function _nccl_reduce_send_eager(input_; name=nothing, reduction=nothing, num_devices=nothing, shared_name=nothing) desc = tf.EagerOp("_NcclReduceSend") tf.add_input(desc, input_) if reduction !== nothing @@ -11344,6 +13311,13 @@ begin desc["T"] = tf.data_type(input_) (tf.execute(desc))[1] end + function _nccl_reduce_send(input_; name=nothing, reduction=nothing, num_devices=nothing, shared_name=nothing) + if tf.eager_mode + _nccl_reduce_send_eager(input_; name=name, reduction=reduction, num_devices=num_devices, shared_name=shared_name) + else + _nccl_reduce_send_graph(input_; name=name, reduction=reduction, num_devices=num_devices, shared_name=shared_name) + end + end end @@ -11353,7 +13327,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function padded_batch_dataset(input_dataset_, batch_size_, padded_shapes_, padding_values_; name=nothing, Toutput_types=nothing, output_shapes=nothing, N=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function padded_batch_dataset_graph(input_dataset_, batch_size_, padded_shapes_, padding_values_; name=nothing, Toutput_types=nothing, output_shapes=nothing, N=nothing) local desc tf.with_op_name(name, "PaddedBatchDataset") do desc = tf.NodeDescription("PaddedBatchDataset") @@ -11377,7 +13351,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function padded_batch_dataset(input_dataset_::tf.TensorHandle, batch_size_::tf.TensorHandle, padded_shapes_::tf.TensorHandle, padding_values_::tf.TensorHandle; name=nothing, Toutput_types=nothing, output_shapes=nothing, N=nothing) + function padded_batch_dataset_eager(input_dataset_, batch_size_, padded_shapes_, padding_values_; name=nothing, Toutput_types=nothing, output_shapes=nothing, N=nothing) desc = tf.EagerOp("PaddedBatchDataset") tf.add_input(desc, input_dataset_) tf.add_input(desc, batch_size_) @@ -11394,16 +13368,23 @@ begin end (tf.execute(desc))[1] end + function padded_batch_dataset(input_dataset_, batch_size_, padded_shapes_, padding_values_; name=nothing, Toutput_types=nothing, output_shapes=nothing, N=nothing) + if tf.eager_mode + padded_batch_dataset_eager(input_dataset_, batch_size_, padded_shapes_, padding_values_; name=name, Toutput_types=Toutput_types, output_shapes=output_shapes, N=N) + else + padded_batch_dataset_graph(input_dataset_, batch_size_, padded_shapes_, padding_values_; name=name, Toutput_types=Toutput_types, output_shapes=output_shapes, N=N) + end + end end """ - data_format_vec_permute(x; src_format=NHWC, dst_format=NCHW) + data_format_vec_permute(x; src_format=, dst_format=) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function data_format_vec_permute(x_; name=nothing, src_format=nothing, dst_format=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function data_format_vec_permute_graph(x_; name=nothing, src_format=nothing, dst_format=nothing) local desc tf.with_op_name(name, "DataFormatVecPermute") do desc = tf.NodeDescription("DataFormatVecPermute") @@ -11419,7 +13400,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function data_format_vec_permute(x_::tf.TensorHandle; name=nothing, src_format=nothing, dst_format=nothing) + function data_format_vec_permute_eager(x_; name=nothing, src_format=nothing, dst_format=nothing) desc = tf.EagerOp("DataFormatVecPermute") tf.add_input(desc, x_) if src_format !== nothing @@ -11431,16 +13412,23 @@ begin desc["T"] = tf.data_type(x_) (tf.execute(desc))[1] end + function data_format_vec_permute(x_; name=nothing, src_format=nothing, dst_format=nothing) + if tf.eager_mode + data_format_vec_permute_eager(x_; name=name, src_format=src_format, dst_format=dst_format) + else + data_format_vec_permute_graph(x_; name=name, src_format=src_format, dst_format=dst_format) + end + end end """ - string_format(inputs; template=%s, placeholder=%s, summarize=3) + string_format(inputs; template=, placeholder=, summarize=3) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function string_format(inputs_; name=nothing, T=nothing, template=nothing, placeholder=nothing, summarize=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function string_format_graph(inputs_; name=nothing, T=nothing, template=nothing, placeholder=nothing, summarize=nothing) local desc tf.with_op_name(name, "StringFormat") do desc = tf.NodeDescription("StringFormat") @@ -11461,7 +13449,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function string_format(inputs_::tf.TensorHandle; name=nothing, T=nothing, template=nothing, placeholder=nothing, summarize=nothing) + function string_format_eager(inputs_; name=nothing, T=nothing, template=nothing, placeholder=nothing, summarize=nothing) desc = tf.EagerOp("StringFormat") tf.add_input(desc, inputs_) if T !== nothing @@ -11478,6 +13466,13 @@ begin end (tf.execute(desc))[1] end + function string_format(inputs_; name=nothing, T=nothing, template=nothing, placeholder=nothing, summarize=nothing) + if tf.eager_mode + string_format_eager(inputs_; name=name, T=T, template=template, placeholder=placeholder, summarize=summarize) + else + string_format_graph(inputs_; name=name, T=T, template=template, placeholder=placeholder, summarize=summarize) + end + end end @@ -11487,7 +13482,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function as_string(input_; name=nothing, precision=nothing, scientific=nothing, shortest=nothing, width=nothing, fill=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function as_string_graph(input_; name=nothing, precision=nothing, scientific=nothing, shortest=nothing, width=nothing, fill=nothing) local desc tf.with_op_name(name, "AsString") do desc = tf.NodeDescription("AsString") @@ -11512,7 +13507,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function as_string(input_::tf.TensorHandle; name=nothing, precision=nothing, scientific=nothing, shortest=nothing, width=nothing, fill=nothing) + function as_string_eager(input_; name=nothing, precision=nothing, scientific=nothing, shortest=nothing, width=nothing, fill=nothing) desc = tf.EagerOp("AsString") tf.add_input(desc, input_) if precision !== nothing @@ -11533,6 +13528,13 @@ begin desc["T"] = tf.data_type(input_) (tf.execute(desc))[1] end + function as_string(input_; name=nothing, precision=nothing, scientific=nothing, shortest=nothing, width=nothing, fill=nothing) + if tf.eager_mode + as_string_eager(input_; name=name, precision=precision, scientific=scientific, shortest=shortest, width=width, fill=fill) + else + as_string_graph(input_; name=name, precision=precision, scientific=scientific, shortest=shortest, width=width, fill=fill) + end + end end @@ -11542,7 +13544,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function queue_enqueue_many(handle_, components_; name=nothing, Tcomponents=nothing, timeout_ms=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function queue_enqueue_many_graph(handle_, components_; name=nothing, Tcomponents=nothing, timeout_ms=nothing) local desc tf.with_op_name(name, "QueueEnqueueMany") do desc = tf.NodeDescription("QueueEnqueueMany") @@ -11559,7 +13561,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function queue_enqueue_many(handle_::tf.TensorHandle, components_::tf.TensorHandle; name=nothing, Tcomponents=nothing, timeout_ms=nothing) + function queue_enqueue_many_eager(handle_, components_; name=nothing, Tcomponents=nothing, timeout_ms=nothing) desc = tf.EagerOp("QueueEnqueueMany") tf.add_input(desc, handle_) tf.add_input(desc, components_) @@ -11571,6 +13573,13 @@ begin end (tf.execute(desc))[1] end + function queue_enqueue_many(handle_, components_; name=nothing, Tcomponents=nothing, timeout_ms=nothing) + if tf.eager_mode + queue_enqueue_many_eager(handle_, components_; name=name, Tcomponents=Tcomponents, timeout_ms=timeout_ms) + else + queue_enqueue_many_graph(handle_, components_; name=name, Tcomponents=Tcomponents, timeout_ms=timeout_ms) + end + end end @@ -11580,7 +13589,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function fake_param(; name=nothing, dtype=nothing, shape=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function fake_param_graph(; name=nothing, dtype=nothing, shape=nothing) local desc tf.with_op_name(name, "FakeParam") do desc = tf.NodeDescription("FakeParam") @@ -11593,7 +13602,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function fake_param(; name=nothing, dtype=nothing, shape=nothing) + function fake_param_eager(; name=nothing, dtype=nothing, shape=nothing) desc = tf.EagerOp("FakeParam") if dtype !== nothing desc["dtype"] = Base.identity(dtype) @@ -11603,6 +13612,13 @@ begin end (tf.execute(desc))[1] end + function fake_param(; name=nothing, dtype=nothing, shape=nothing) + if tf.eager_mode + fake_param_eager(; name=name, dtype=dtype, shape=shape) + else + fake_param_graph(; name=name, dtype=dtype, shape=shape) + end + end end @@ -11612,7 +13628,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function apply_adagrad(var_, accum_, lr_, grad_; name=nothing, use_locking=nothing, update_slots=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function apply_adagrad_graph(var_, accum_, lr_, grad_; name=nothing, use_locking=nothing, update_slots=nothing) local desc tf.with_op_name(name, "ApplyAdagrad") do desc = tf.NodeDescription("ApplyAdagrad") @@ -11634,7 +13650,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function apply_adagrad(var_::tf.TensorHandle, accum_::tf.TensorHandle, lr_::tf.TensorHandle, grad_::tf.TensorHandle; name=nothing, use_locking=nothing, update_slots=nothing) + function apply_adagrad_eager(var_, accum_, lr_, grad_; name=nothing, use_locking=nothing, update_slots=nothing) desc = tf.EagerOp("ApplyAdagrad") tf.add_input(desc, var_) tf.add_input(desc, accum_) @@ -11652,6 +13668,13 @@ begin desc["T"] = tf.data_type(grad_) (tf.execute(desc))[1] end + function apply_adagrad(var_, accum_, lr_, grad_; name=nothing, use_locking=nothing, update_slots=nothing) + if tf.eager_mode + apply_adagrad_eager(var_, accum_, lr_, grad_; name=name, use_locking=use_locking, update_slots=update_slots) + else + apply_adagrad_graph(var_, accum_, lr_, grad_; name=name, use_locking=use_locking, update_slots=update_slots) + end + end end @@ -11661,7 +13684,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function experimental_iterator_get_device(resource_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function experimental_iterator_get_device_graph(resource_; name=nothing) local desc tf.with_op_name(name, "ExperimentalIteratorGetDevice") do desc = tf.NodeDescription("ExperimentalIteratorGetDevice") @@ -11670,11 +13693,18 @@ begin end tf.Tensor(tf.Operation(desc)) end - function experimental_iterator_get_device(resource_::tf.TensorHandle; name=nothing) + function experimental_iterator_get_device_eager(resource_; name=nothing) desc = tf.EagerOp("ExperimentalIteratorGetDevice") tf.add_input(desc, resource_) (tf.execute(desc))[1] end + function experimental_iterator_get_device(resource_; name=nothing) + if tf.eager_mode + experimental_iterator_get_device_eager(resource_; name=name) + else + experimental_iterator_get_device_graph(resource_; name=name) + end + end end @@ -11684,7 +13714,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function adjust_contrast(images_, contrast_factor_, min_value_, max_value_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function adjust_contrast_graph(images_, contrast_factor_, min_value_, max_value_; name=nothing) local desc tf.with_op_name(name, "AdjustContrast") do desc = tf.NodeDescription("AdjustContrast") @@ -11700,7 +13730,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function adjust_contrast(images_::tf.TensorHandle, contrast_factor_::tf.TensorHandle, min_value_::tf.TensorHandle, max_value_::tf.TensorHandle; name=nothing) + function adjust_contrast_eager(images_, contrast_factor_, min_value_, max_value_; name=nothing) desc = tf.EagerOp("AdjustContrast") tf.add_input(desc, images_) tf.add_input(desc, contrast_factor_) @@ -11709,6 +13739,13 @@ begin desc["T"] = tf.data_type(images_) (tf.execute(desc))[1] end + function adjust_contrast(images_, contrast_factor_, min_value_, max_value_; name=nothing) + if tf.eager_mode + adjust_contrast_eager(images_, contrast_factor_, min_value_, max_value_; name=name) + else + adjust_contrast_graph(images_, contrast_factor_, min_value_, max_value_; name=name) + end + end end @@ -11718,7 +13755,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function extract_image_patches(images_; name=nothing, ksizes=nothing, strides=nothing, rates=nothing, padding=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function extract_image_patches_graph(images_; name=nothing, ksizes=nothing, strides=nothing, rates=nothing, padding=nothing) local desc tf.with_op_name(name, "ExtractImagePatches") do desc = tf.NodeDescription("ExtractImagePatches") @@ -11740,7 +13777,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function extract_image_patches(images_::tf.TensorHandle; name=nothing, ksizes=nothing, strides=nothing, rates=nothing, padding=nothing) + function extract_image_patches_eager(images_; name=nothing, ksizes=nothing, strides=nothing, rates=nothing, padding=nothing) desc = tf.EagerOp("ExtractImagePatches") tf.add_input(desc, images_) if ksizes !== nothing @@ -11758,16 +13795,23 @@ begin desc["T"] = tf.data_type(images_) (tf.execute(desc))[1] end + function extract_image_patches(images_; name=nothing, ksizes=nothing, strides=nothing, rates=nothing, padding=nothing) + if tf.eager_mode + extract_image_patches_eager(images_; name=name, ksizes=ksizes, strides=strides, rates=rates, padding=padding) + else + extract_image_patches_graph(images_; name=name, ksizes=ksizes, strides=strides, rates=rates, padding=padding) + end + end end """ - scale_and_translate(images, size, scale, translation; kernel_type=lanczos3) + scale_and_translate(images, size, scale, translation; kernel_type=) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function scale_and_translate(images_, size_, scale_, translation_; name=nothing, kernel_type=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function scale_and_translate_graph(images_, size_, scale_, translation_; name=nothing, kernel_type=nothing) local desc tf.with_op_name(name, "ScaleAndTranslate") do desc = tf.NodeDescription("ScaleAndTranslate") @@ -11786,7 +13830,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function scale_and_translate(images_::tf.TensorHandle, size_::tf.TensorHandle, scale_::tf.TensorHandle, translation_::tf.TensorHandle; name=nothing, kernel_type=nothing) + function scale_and_translate_eager(images_, size_, scale_, translation_; name=nothing, kernel_type=nothing) desc = tf.EagerOp("ScaleAndTranslate") tf.add_input(desc, images_) tf.add_input(desc, size_) @@ -11798,6 +13842,13 @@ begin desc["T"] = tf.data_type(images_) (tf.execute(desc))[1] end + function scale_and_translate(images_, size_, scale_, translation_; name=nothing, kernel_type=nothing) + if tf.eager_mode + scale_and_translate_eager(images_, size_, scale_, translation_; name=name, kernel_type=kernel_type) + else + scale_and_translate_graph(images_, size_, scale_, translation_; name=name, kernel_type=kernel_type) + end + end end @@ -11807,7 +13858,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function optional_none(; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function optional_none_graph(; name=nothing) local desc tf.with_op_name(name, "OptionalNone") do desc @@ -11815,10 +13866,17 @@ begin end tf.Tensor(tf.Operation(desc)) end - function optional_none(; name=nothing) + function optional_none_eager(; name=nothing) desc = tf.EagerOp("OptionalNone") (tf.execute(desc))[1] end + function optional_none(; name=nothing) + if tf.eager_mode + optional_none_eager(; name=name) + else + optional_none_graph(; name=name) + end + end end @@ -11828,7 +13886,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function variable_v2(; name=nothing, shape=nothing, dtype=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function variable_v2_graph(; name=nothing, shape=nothing, dtype=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "VariableV2") do desc = tf.NodeDescription("VariableV2") @@ -11847,7 +13905,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function variable_v2(; name=nothing, shape=nothing, dtype=nothing, container=nothing, shared_name=nothing) + function variable_v2_eager(; name=nothing, shape=nothing, dtype=nothing, container=nothing, shared_name=nothing) desc = tf.EagerOp("VariableV2") if shape !== nothing desc["shape"] = Base.identity(shape) @@ -11863,6 +13921,13 @@ begin end (tf.execute(desc))[1] end + function variable_v2(; name=nothing, shape=nothing, dtype=nothing, container=nothing, shared_name=nothing) + if tf.eager_mode + variable_v2_eager(; name=name, shape=shape, dtype=dtype, container=container, shared_name=shared_name) + else + variable_v2_graph(; name=name, shape=shape, dtype=dtype, container=container, shared_name=shared_name) + end + end end @@ -11872,7 +13937,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function elu(features_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function elu_graph(features_; name=nothing) local desc tf.with_op_name(name, "Elu") do desc = tf.NodeDescription("Elu") @@ -11882,12 +13947,19 @@ begin end tf.Tensor(tf.Operation(desc)) end - function elu(features_::tf.TensorHandle; name=nothing) + function elu_eager(features_; name=nothing) desc = tf.EagerOp("Elu") tf.add_input(desc, features_) desc["T"] = tf.data_type(features_) (tf.execute(desc))[1] end + function elu(features_; name=nothing) + if tf.eager_mode + elu_eager(features_; name=name) + else + elu_graph(features_; name=name) + end + end end @@ -11897,7 +13969,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function scatter_update(ref_, indices_, updates_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function scatter_update_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ScatterUpdate") do desc = tf.NodeDescription("ScatterUpdate") @@ -11916,7 +13988,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function scatter_update(ref_::tf.TensorHandle, indices_::tf.TensorHandle, updates_::tf.TensorHandle; name=nothing, use_locking=nothing) + function scatter_update_eager(ref_, indices_, updates_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ScatterUpdate") tf.add_input(desc, ref_) tf.add_input(desc, indices_) @@ -11929,6 +14001,13 @@ begin desc["T"] = tf.data_type(updates_) (tf.execute(desc))[1] end + function scatter_update(ref_, indices_, updates_; name=nothing, use_locking=nothing) + if tf.eager_mode + scatter_update_eager(ref_, indices_, updates_; name=name, use_locking=use_locking) + else + scatter_update_graph(ref_, indices_, updates_; name=name, use_locking=use_locking) + end + end end @@ -11938,7 +14017,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function floor_mod(x_, y_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function floor_mod_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "FloorMod") do desc = tf.NodeDescription("FloorMod") @@ -11950,7 +14029,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function floor_mod(x_::tf.TensorHandle, y_::tf.TensorHandle; name=nothing) + function floor_mod_eager(x_, y_; name=nothing) desc = tf.EagerOp("FloorMod") tf.add_input(desc, x_) tf.add_input(desc, y_) @@ -11958,6 +14037,13 @@ begin desc["T"] = tf.data_type(y_) (tf.execute(desc))[1] end + function floor_mod(x_, y_; name=nothing) + if tf.eager_mode + floor_mod_eager(x_, y_; name=name) + else + floor_mod_graph(x_, y_; name=name) + end + end end @@ -11967,7 +14053,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function experimental_ignore_errors_dataset(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function experimental_ignore_errors_dataset_graph(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "ExperimentalIgnoreErrorsDataset") do desc = tf.NodeDescription("ExperimentalIgnoreErrorsDataset") @@ -11982,7 +14068,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function experimental_ignore_errors_dataset(input_dataset_::tf.TensorHandle; name=nothing, output_types=nothing, output_shapes=nothing) + function experimental_ignore_errors_dataset_eager(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("ExperimentalIgnoreErrorsDataset") tf.add_input(desc, input_dataset_) if output_types !== nothing @@ -11993,6 +14079,13 @@ begin end (tf.execute(desc))[1] end + function experimental_ignore_errors_dataset(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) + if tf.eager_mode + experimental_ignore_errors_dataset_eager(input_dataset_; name=name, output_types=output_types, output_shapes=output_shapes) + else + experimental_ignore_errors_dataset_graph(input_dataset_; name=name, output_types=output_types, output_shapes=output_shapes) + end + end end @@ -12002,7 +14095,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function experimental_set_stats_aggregator_dataset(input_dataset_, stats_aggregator_, tag_, counter_prefix_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function experimental_set_stats_aggregator_dataset_graph(input_dataset_, stats_aggregator_, tag_, counter_prefix_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "ExperimentalSetStatsAggregatorDataset") do desc = tf.NodeDescription("ExperimentalSetStatsAggregatorDataset") @@ -12023,7 +14116,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function experimental_set_stats_aggregator_dataset(input_dataset_::tf.TensorHandle, stats_aggregator_::tf.TensorHandle, tag_::tf.TensorHandle, counter_prefix_::tf.TensorHandle; name=nothing, output_types=nothing, output_shapes=nothing) + function experimental_set_stats_aggregator_dataset_eager(input_dataset_, stats_aggregator_, tag_, counter_prefix_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("ExperimentalSetStatsAggregatorDataset") tf.add_input(desc, input_dataset_) tf.add_input(desc, stats_aggregator_) @@ -12037,6 +14130,13 @@ begin end (tf.execute(desc))[1] end + function experimental_set_stats_aggregator_dataset(input_dataset_, stats_aggregator_, tag_, counter_prefix_; name=nothing, output_types=nothing, output_shapes=nothing) + if tf.eager_mode + experimental_set_stats_aggregator_dataset_eager(input_dataset_, stats_aggregator_, tag_, counter_prefix_; name=name, output_types=output_types, output_shapes=output_shapes) + else + experimental_set_stats_aggregator_dataset_graph(input_dataset_, stats_aggregator_, tag_, counter_prefix_; name=name, output_types=output_types, output_shapes=output_shapes) + end + end end @@ -12046,7 +14146,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function compute_accidental_hits(true_classes_, sampled_candidates_; name=nothing, num_true=nothing, seed=nothing, seed2=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function compute_accidental_hits_graph(true_classes_, sampled_candidates_; name=nothing, num_true=nothing, seed=nothing, seed2=nothing) local desc tf.with_op_name(name, "ComputeAccidentalHits") do desc = tf.NodeDescription("ComputeAccidentalHits") @@ -12071,7 +14171,7 @@ begin end out end - function compute_accidental_hits(true_classes_::tf.TensorHandle, sampled_candidates_::tf.TensorHandle; name=nothing, num_true=nothing, seed=nothing, seed2=nothing) + function compute_accidental_hits_eager(true_classes_, sampled_candidates_; name=nothing, num_true=nothing, seed=nothing, seed2=nothing) desc = tf.EagerOp("ComputeAccidentalHits") tf.add_input(desc, true_classes_) tf.add_input(desc, sampled_candidates_) @@ -12086,6 +14186,13 @@ begin end tf.execute(desc) end + function compute_accidental_hits(true_classes_, sampled_candidates_; name=nothing, num_true=nothing, seed=nothing, seed2=nothing) + if tf.eager_mode + compute_accidental_hits_eager(true_classes_, sampled_candidates_; name=name, num_true=num_true, seed=seed, seed2=seed2) + else + compute_accidental_hits_graph(true_classes_, sampled_candidates_; name=name, num_true=num_true, seed=seed, seed2=seed2) + end + end end @@ -12095,7 +14202,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function string_to_number(string_tensor_; name=nothing, out_type=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function string_to_number_graph(string_tensor_; name=nothing, out_type=nothing) local desc tf.with_op_name(name, "StringToNumber") do desc = tf.NodeDescription("StringToNumber") @@ -12107,7 +14214,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function string_to_number(string_tensor_::tf.TensorHandle; name=nothing, out_type=nothing) + function string_to_number_eager(string_tensor_; name=nothing, out_type=nothing) desc = tf.EagerOp("StringToNumber") tf.add_input(desc, string_tensor_) if out_type !== nothing @@ -12115,6 +14222,13 @@ begin end (tf.execute(desc))[1] end + function string_to_number(string_tensor_; name=nothing, out_type=nothing) + if tf.eager_mode + string_to_number_eager(string_tensor_; name=name, out_type=out_type) + else + string_to_number_graph(string_tensor_; name=name, out_type=out_type) + end + end end @@ -12124,7 +14238,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function snapshot(input_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function snapshot_graph(input_; name=nothing) local desc tf.with_op_name(name, "Snapshot") do desc = tf.NodeDescription("Snapshot") @@ -12134,12 +14248,19 @@ begin end tf.Tensor(tf.Operation(desc)) end - function snapshot(input_::tf.TensorHandle; name=nothing) + function snapshot_eager(input_; name=nothing) desc = tf.EagerOp("Snapshot") tf.add_input(desc, input_) desc["T"] = tf.data_type(input_) (tf.execute(desc))[1] end + function snapshot(input_; name=nothing) + if tf.eager_mode + snapshot_eager(input_; name=name) + else + snapshot_graph(input_; name=name) + end + end end @@ -12149,7 +14270,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function deserialize_iterator(resource_handle_, serialized_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function deserialize_iterator_graph(resource_handle_, serialized_; name=nothing) local desc tf.with_op_name(name, "DeserializeIterator") do desc = tf.NodeDescription("DeserializeIterator") @@ -12160,12 +14281,19 @@ begin end tf.Tensor(tf.Operation(desc)) end - function deserialize_iterator(resource_handle_::tf.TensorHandle, serialized_::tf.TensorHandle; name=nothing) + function deserialize_iterator_eager(resource_handle_, serialized_; name=nothing) desc = tf.EagerOp("DeserializeIterator") tf.add_input(desc, resource_handle_) tf.add_input(desc, serialized_) (tf.execute(desc))[1] end + function deserialize_iterator(resource_handle_, serialized_; name=nothing) + if tf.eager_mode + deserialize_iterator_eager(resource_handle_, serialized_; name=name) + else + deserialize_iterator_graph(resource_handle_, serialized_; name=name) + end + end end @@ -12175,7 +14303,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function atan(x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function atan_graph(x_; name=nothing) local desc tf.with_op_name(name, "Atan") do desc = tf.NodeDescription("Atan") @@ -12185,12 +14313,19 @@ begin end tf.Tensor(tf.Operation(desc)) end - function atan(x_::tf.TensorHandle; name=nothing) + function atan_eager(x_; name=nothing) desc = tf.EagerOp("Atan") tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) (tf.execute(desc))[1] end + function atan(x_; name=nothing) + if tf.eager_mode + atan_eager(x_; name=name) + else + atan_graph(x_; name=name) + end + end end @@ -12200,7 +14335,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function mat_mul(a_, b_; name=nothing, transpose_a=nothing, transpose_b=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function mat_mul_graph(a_, b_; name=nothing, transpose_a=nothing, transpose_b=nothing) local desc tf.with_op_name(name, "MatMul") do desc = tf.NodeDescription("MatMul") @@ -12218,7 +14353,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function mat_mul(a_::tf.TensorHandle, b_::tf.TensorHandle; name=nothing, transpose_a=nothing, transpose_b=nothing) + function mat_mul_eager(a_, b_; name=nothing, transpose_a=nothing, transpose_b=nothing) desc = tf.EagerOp("MatMul") tf.add_input(desc, a_) tf.add_input(desc, b_) @@ -12232,6 +14367,13 @@ begin desc["T"] = tf.data_type(b_) (tf.execute(desc))[1] end + function mat_mul(a_, b_; name=nothing, transpose_a=nothing, transpose_b=nothing) + if tf.eager_mode + mat_mul_eager(a_, b_; name=name, transpose_a=transpose_a, transpose_b=transpose_b) + else + mat_mul_graph(a_, b_; name=name, transpose_a=transpose_a, transpose_b=transpose_b) + end + end end @@ -12241,7 +14383,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function erfc(x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function erfc_graph(x_; name=nothing) local desc tf.with_op_name(name, "Erfc") do desc = tf.NodeDescription("Erfc") @@ -12251,12 +14393,19 @@ begin end tf.Tensor(tf.Operation(desc)) end - function erfc(x_::tf.TensorHandle; name=nothing) + function erfc_eager(x_; name=nothing) desc = tf.EagerOp("Erfc") tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) (tf.execute(desc))[1] end + function erfc(x_; name=nothing) + if tf.eager_mode + erfc_eager(x_; name=name) + else + erfc_graph(x_; name=name) + end + end end @@ -12266,7 +14415,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function sigmoid_grad(y_, dy_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function sigmoid_grad_graph(y_, dy_; name=nothing) local desc tf.with_op_name(name, "SigmoidGrad") do desc = tf.NodeDescription("SigmoidGrad") @@ -12278,7 +14427,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function sigmoid_grad(y_::tf.TensorHandle, dy_::tf.TensorHandle; name=nothing) + function sigmoid_grad_eager(y_, dy_; name=nothing) desc = tf.EagerOp("SigmoidGrad") tf.add_input(desc, y_) tf.add_input(desc, dy_) @@ -12286,6 +14435,13 @@ begin desc["T"] = tf.data_type(dy_) (tf.execute(desc))[1] end + function sigmoid_grad(y_, dy_; name=nothing) + if tf.eager_mode + sigmoid_grad_eager(y_, dy_; name=name) + else + sigmoid_grad_graph(y_, dy_; name=name) + end + end end @@ -12295,7 +14451,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function fixed_length_record_reader_v2(; name=nothing, header_bytes=nothing, record_bytes=nothing, footer_bytes=nothing, hop_bytes=nothing, container=nothing, shared_name=nothing, encoding=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function fixed_length_record_reader_v2_graph(; name=nothing, header_bytes=nothing, record_bytes=nothing, footer_bytes=nothing, hop_bytes=nothing, container=nothing, shared_name=nothing, encoding=nothing) local desc tf.with_op_name(name, "FixedLengthRecordReaderV2") do desc = tf.NodeDescription("FixedLengthRecordReaderV2") @@ -12323,7 +14479,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function fixed_length_record_reader_v2(; name=nothing, header_bytes=nothing, record_bytes=nothing, footer_bytes=nothing, hop_bytes=nothing, container=nothing, shared_name=nothing, encoding=nothing) + function fixed_length_record_reader_v2_eager(; name=nothing, header_bytes=nothing, record_bytes=nothing, footer_bytes=nothing, hop_bytes=nothing, container=nothing, shared_name=nothing, encoding=nothing) desc = tf.EagerOp("FixedLengthRecordReaderV2") if header_bytes !== nothing desc["header_bytes"] = Base.Int(header_bytes) @@ -12348,6 +14504,13 @@ begin end (tf.execute(desc))[1] end + function fixed_length_record_reader_v2(; name=nothing, header_bytes=nothing, record_bytes=nothing, footer_bytes=nothing, hop_bytes=nothing, container=nothing, shared_name=nothing, encoding=nothing) + if tf.eager_mode + fixed_length_record_reader_v2_eager(; name=name, header_bytes=header_bytes, record_bytes=record_bytes, footer_bytes=footer_bytes, hop_bytes=hop_bytes, container=container, shared_name=shared_name, encoding=encoding) + else + fixed_length_record_reader_v2_graph(; name=name, header_bytes=header_bytes, record_bytes=record_bytes, footer_bytes=footer_bytes, hop_bytes=hop_bytes, container=container, shared_name=shared_name, encoding=encoding) + end + end end @@ -12357,7 +14520,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function non_max_suppression_v3(boxes_, scores_, max_output_size_, iou_threshold_, score_threshold_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function non_max_suppression_v3_graph(boxes_, scores_, max_output_size_, iou_threshold_, score_threshold_; name=nothing) local desc tf.with_op_name(name, "NonMaxSuppressionV3") do desc = tf.NodeDescription("NonMaxSuppressionV3") @@ -12375,7 +14538,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function non_max_suppression_v3(boxes_::tf.TensorHandle, scores_::tf.TensorHandle, max_output_size_::tf.TensorHandle, iou_threshold_::tf.TensorHandle, score_threshold_::tf.TensorHandle; name=nothing) + function non_max_suppression_v3_eager(boxes_, scores_, max_output_size_, iou_threshold_, score_threshold_; name=nothing) desc = tf.EagerOp("NonMaxSuppressionV3") tf.add_input(desc, boxes_) tf.add_input(desc, scores_) @@ -12386,6 +14549,13 @@ begin desc["T"] = tf.data_type(scores_) (tf.execute(desc))[1] end + function non_max_suppression_v3(boxes_, scores_, max_output_size_, iou_threshold_, score_threshold_; name=nothing) + if tf.eager_mode + non_max_suppression_v3_eager(boxes_, scores_, max_output_size_, iou_threshold_, score_threshold_; name=name) + else + non_max_suppression_v3_graph(boxes_, scores_, max_output_size_, iou_threshold_, score_threshold_; name=name) + end + end end @@ -12395,7 +14565,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function dilation2d_backprop_input(input_, filter_, out_backprop_; name=nothing, strides=nothing, rates=nothing, padding=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function dilation2d_backprop_input_graph(input_, filter_, out_backprop_; name=nothing, strides=nothing, rates=nothing, padding=nothing) local desc tf.with_op_name(name, "Dilation2DBackpropInput") do desc = tf.NodeDescription("Dilation2DBackpropInput") @@ -12418,7 +14588,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function dilation2d_backprop_input(input_::tf.TensorHandle, filter_::tf.TensorHandle, out_backprop_::tf.TensorHandle; name=nothing, strides=nothing, rates=nothing, padding=nothing) + function dilation2d_backprop_input_eager(input_, filter_, out_backprop_; name=nothing, strides=nothing, rates=nothing, padding=nothing) desc = tf.EagerOp("Dilation2DBackpropInput") tf.add_input(desc, input_) tf.add_input(desc, filter_) @@ -12437,6 +14607,13 @@ begin desc["T"] = tf.data_type(out_backprop_) (tf.execute(desc))[1] end + function dilation2d_backprop_input(input_, filter_, out_backprop_; name=nothing, strides=nothing, rates=nothing, padding=nothing) + if tf.eager_mode + dilation2d_backprop_input_eager(input_, filter_, out_backprop_; name=name, strides=strides, rates=rates, padding=padding) + else + dilation2d_backprop_input_graph(input_, filter_, out_backprop_; name=name, strides=strides, rates=rates, padding=padding) + end + end end @@ -12446,7 +14623,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function logical_or(x_, y_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function logical_or_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "LogicalOr") do desc = tf.NodeDescription("LogicalOr") @@ -12457,12 +14634,19 @@ begin end tf.Tensor(tf.Operation(desc)) end - function logical_or(x_::tf.TensorHandle, y_::tf.TensorHandle; name=nothing) + function logical_or_eager(x_, y_; name=nothing) desc = tf.EagerOp("LogicalOr") tf.add_input(desc, x_) tf.add_input(desc, y_) (tf.execute(desc))[1] end + function logical_or(x_, y_; name=nothing) + if tf.eager_mode + logical_or_eager(x_, y_; name=name) + else + logical_or_graph(x_, y_; name=name) + end + end end @@ -12472,7 +14656,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function resource_apply_adadelta(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function resource_apply_adadelta_graph(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ResourceApplyAdadelta") do desc = tf.NodeDescription("ResourceApplyAdadelta") @@ -12497,7 +14681,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function resource_apply_adadelta(var_::tf.TensorHandle, accum_::tf.TensorHandle, accum_update_::tf.TensorHandle, lr_::tf.TensorHandle, rho_::tf.TensorHandle, epsilon_::tf.TensorHandle, grad_::tf.TensorHandle; name=nothing, use_locking=nothing) + function resource_apply_adadelta_eager(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ResourceApplyAdadelta") tf.add_input(desc, var_) tf.add_input(desc, accum_) @@ -12515,6 +14699,13 @@ begin desc["T"] = tf.data_type(grad_) (tf.execute(desc))[1] end + function resource_apply_adadelta(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_; name=nothing, use_locking=nothing) + if tf.eager_mode + resource_apply_adadelta_eager(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_; name=name, use_locking=use_locking) + else + resource_apply_adadelta_graph(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_; name=name, use_locking=use_locking) + end + end end @@ -12524,7 +14715,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function dense_to_sparse_set_operation(set1_, set2_indices_, set2_values_, set2_shape_; name=nothing, set_operation=nothing, validate_indices=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function dense_to_sparse_set_operation_graph(set1_, set2_indices_, set2_values_, set2_shape_; name=nothing, set_operation=nothing, validate_indices=nothing) local desc tf.with_op_name(name, "DenseToSparseSetOperation") do desc = tf.NodeDescription("DenseToSparseSetOperation") @@ -12551,7 +14742,7 @@ begin end out end - function dense_to_sparse_set_operation(set1_::tf.TensorHandle, set2_indices_::tf.TensorHandle, set2_values_::tf.TensorHandle, set2_shape_::tf.TensorHandle; name=nothing, set_operation=nothing, validate_indices=nothing) + function dense_to_sparse_set_operation_eager(set1_, set2_indices_, set2_values_, set2_shape_; name=nothing, set_operation=nothing, validate_indices=nothing) desc = tf.EagerOp("DenseToSparseSetOperation") tf.add_input(desc, set1_) tf.add_input(desc, set2_indices_) @@ -12567,6 +14758,13 @@ begin desc["T"] = tf.data_type(set2_values_) tf.execute(desc) end + function dense_to_sparse_set_operation(set1_, set2_indices_, set2_values_, set2_shape_; name=nothing, set_operation=nothing, validate_indices=nothing) + if tf.eager_mode + dense_to_sparse_set_operation_eager(set1_, set2_indices_, set2_values_, set2_shape_; name=name, set_operation=set_operation, validate_indices=validate_indices) + else + dense_to_sparse_set_operation_graph(set1_, set2_indices_, set2_values_, set2_shape_; name=name, set_operation=set_operation, validate_indices=validate_indices) + end + end end @@ -12576,7 +14774,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function reader_num_records_produced(reader_handle_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function reader_num_records_produced_graph(reader_handle_; name=nothing) local desc tf.with_op_name(name, "ReaderNumRecordsProduced") do desc = tf.NodeDescription("ReaderNumRecordsProduced") @@ -12585,11 +14783,18 @@ begin end tf.Tensor(tf.Operation(desc)) end - function reader_num_records_produced(reader_handle_::tf.TensorHandle; name=nothing) + function reader_num_records_produced_eager(reader_handle_; name=nothing) desc = tf.EagerOp("ReaderNumRecordsProduced") tf.add_input(desc, reader_handle_) (tf.execute(desc))[1] end + function reader_num_records_produced(reader_handle_; name=nothing) + if tf.eager_mode + reader_num_records_produced_eager(reader_handle_; name=name) + else + reader_num_records_produced_graph(reader_handle_; name=name) + end + end end @@ -12599,7 +14804,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function adjust_hue(images_, delta_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function adjust_hue_graph(images_, delta_; name=nothing) local desc tf.with_op_name(name, "AdjustHue") do desc = tf.NodeDescription("AdjustHue") @@ -12611,13 +14816,20 @@ begin end tf.Tensor(tf.Operation(desc)) end - function adjust_hue(images_::tf.TensorHandle, delta_::tf.TensorHandle; name=nothing) + function adjust_hue_eager(images_, delta_; name=nothing) desc = tf.EagerOp("AdjustHue") tf.add_input(desc, images_) tf.add_input(desc, delta_) desc["T"] = tf.data_type(images_) (tf.execute(desc))[1] end + function adjust_hue(images_, delta_; name=nothing) + if tf.eager_mode + adjust_hue_eager(images_, delta_; name=name) + else + adjust_hue_graph(images_, delta_; name=name) + end + end end @@ -12627,7 +14839,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function boosted_trees_quantile_stream_resource_flush(quantile_stream_resource_handle_, num_buckets_; name=nothing, generate_quantiles=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function boosted_trees_quantile_stream_resource_flush_graph(quantile_stream_resource_handle_, num_buckets_; name=nothing, generate_quantiles=nothing) local desc tf.with_op_name(name, "BoostedTreesQuantileStreamResourceFlush") do desc = tf.NodeDescription("BoostedTreesQuantileStreamResourceFlush") @@ -12641,7 +14853,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function boosted_trees_quantile_stream_resource_flush(quantile_stream_resource_handle_::tf.TensorHandle, num_buckets_::tf.TensorHandle; name=nothing, generate_quantiles=nothing) + function boosted_trees_quantile_stream_resource_flush_eager(quantile_stream_resource_handle_, num_buckets_; name=nothing, generate_quantiles=nothing) desc = tf.EagerOp("BoostedTreesQuantileStreamResourceFlush") tf.add_input(desc, quantile_stream_resource_handle_) tf.add_input(desc, num_buckets_) @@ -12650,6 +14862,13 @@ begin end (tf.execute(desc))[1] end + function boosted_trees_quantile_stream_resource_flush(quantile_stream_resource_handle_, num_buckets_; name=nothing, generate_quantiles=nothing) + if tf.eager_mode + boosted_trees_quantile_stream_resource_flush_eager(quantile_stream_resource_handle_, num_buckets_; name=name, generate_quantiles=generate_quantiles) + else + boosted_trees_quantile_stream_resource_flush_graph(quantile_stream_resource_handle_, num_buckets_; name=name, generate_quantiles=generate_quantiles) + end + end end @@ -12659,7 +14878,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function experimental_map_and_batch_dataset(input_dataset_, other_arguments_, batch_size_, num_parallel_calls_, drop_remainder_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, preserve_cardinality=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function experimental_map_and_batch_dataset_graph(input_dataset_, other_arguments_, batch_size_, num_parallel_calls_, drop_remainder_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, preserve_cardinality=nothing) local desc tf.with_op_name(name, "ExperimentalMapAndBatchDataset") do desc = tf.NodeDescription("ExperimentalMapAndBatchDataset") @@ -12691,7 +14910,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function experimental_map_and_batch_dataset(input_dataset_::tf.TensorHandle, other_arguments_::tf.TensorHandle, batch_size_::tf.TensorHandle, num_parallel_calls_::tf.TensorHandle, drop_remainder_::tf.TensorHandle; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, preserve_cardinality=nothing) + function experimental_map_and_batch_dataset_eager(input_dataset_, other_arguments_, batch_size_, num_parallel_calls_, drop_remainder_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, preserve_cardinality=nothing) desc = tf.EagerOp("ExperimentalMapAndBatchDataset") tf.add_input(desc, input_dataset_) tf.add_input(desc, other_arguments_) @@ -12715,6 +14934,13 @@ begin end (tf.execute(desc))[1] end + function experimental_map_and_batch_dataset(input_dataset_, other_arguments_, batch_size_, num_parallel_calls_, drop_remainder_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, preserve_cardinality=nothing) + if tf.eager_mode + experimental_map_and_batch_dataset_eager(input_dataset_, other_arguments_, batch_size_, num_parallel_calls_, drop_remainder_; name=name, f=f, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes, preserve_cardinality=preserve_cardinality) + else + experimental_map_and_batch_dataset_graph(input_dataset_, other_arguments_, batch_size_, num_parallel_calls_, drop_remainder_; name=name, f=f, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes, preserve_cardinality=preserve_cardinality) + end + end end @@ -12724,7 +14950,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function real_div(x_, y_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function real_div_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "RealDiv") do desc = tf.NodeDescription("RealDiv") @@ -12736,7 +14962,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function real_div(x_::tf.TensorHandle, y_::tf.TensorHandle; name=nothing) + function real_div_eager(x_, y_; name=nothing) desc = tf.EagerOp("RealDiv") tf.add_input(desc, x_) tf.add_input(desc, y_) @@ -12744,6 +14970,13 @@ begin desc["T"] = tf.data_type(y_) (tf.execute(desc))[1] end + function real_div(x_, y_; name=nothing) + if tf.eager_mode + real_div_eager(x_, y_; name=name) + else + real_div_graph(x_, y_; name=name) + end + end end @@ -12753,7 +14986,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function restore_slice(file_pattern_, tensor_name_, shape_and_slice_; name=nothing, dt=nothing, preferred_shard=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function restore_slice_graph(file_pattern_, tensor_name_, shape_and_slice_; name=nothing, dt=nothing, preferred_shard=nothing) local desc tf.with_op_name(name, "RestoreSlice") do desc = tf.NodeDescription("RestoreSlice") @@ -12772,7 +15005,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function restore_slice(file_pattern_::tf.TensorHandle, tensor_name_::tf.TensorHandle, shape_and_slice_::tf.TensorHandle; name=nothing, dt=nothing, preferred_shard=nothing) + function restore_slice_eager(file_pattern_, tensor_name_, shape_and_slice_; name=nothing, dt=nothing, preferred_shard=nothing) desc = tf.EagerOp("RestoreSlice") tf.add_input(desc, file_pattern_) tf.add_input(desc, tensor_name_) @@ -12785,6 +15018,13 @@ begin end (tf.execute(desc))[1] end + function restore_slice(file_pattern_, tensor_name_, shape_and_slice_; name=nothing, dt=nothing, preferred_shard=nothing) + if tf.eager_mode + restore_slice_eager(file_pattern_, tensor_name_, shape_and_slice_; name=name, dt=dt, preferred_shard=preferred_shard) + else + restore_slice_graph(file_pattern_, tensor_name_, shape_and_slice_; name=name, dt=dt, preferred_shard=preferred_shard) + end + end end @@ -12794,7 +15034,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function stack_pop_v2(handle_; name=nothing, elem_type=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function stack_pop_v2_graph(handle_; name=nothing, elem_type=nothing) local desc tf.with_op_name(name, "StackPopV2") do desc = tf.NodeDescription("StackPopV2") @@ -12806,7 +15046,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function stack_pop_v2(handle_::tf.TensorHandle; name=nothing, elem_type=nothing) + function stack_pop_v2_eager(handle_; name=nothing, elem_type=nothing) desc = tf.EagerOp("StackPopV2") tf.add_input(desc, handle_) if elem_type !== nothing @@ -12814,6 +15054,13 @@ begin end (tf.execute(desc))[1] end + function stack_pop_v2(handle_; name=nothing, elem_type=nothing) + if tf.eager_mode + stack_pop_v2_eager(handle_; name=name, elem_type=elem_type) + else + stack_pop_v2_graph(handle_; name=name, elem_type=elem_type) + end + end end @@ -12823,7 +15070,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function reverse(tensor_, dims_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function reverse_graph(tensor_, dims_; name=nothing) local desc tf.with_op_name(name, "Reverse") do desc = tf.NodeDescription("Reverse") @@ -12835,13 +15082,20 @@ begin end tf.Tensor(tf.Operation(desc)) end - function reverse(tensor_::tf.TensorHandle, dims_::tf.TensorHandle; name=nothing) + function reverse_eager(tensor_, dims_; name=nothing) desc = tf.EagerOp("Reverse") tf.add_input(desc, tensor_) tf.add_input(desc, dims_) desc["T"] = tf.data_type(tensor_) (tf.execute(desc))[1] end + function reverse(tensor_, dims_; name=nothing) + if tf.eager_mode + reverse_eager(tensor_, dims_; name=name) + else + reverse_graph(tensor_, dims_; name=name) + end + end end @@ -12851,7 +15105,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function decode_png(contents_; name=nothing, channels=nothing, dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function decode_png_graph(contents_; name=nothing, channels=nothing, dtype=nothing) local desc tf.with_op_name(name, "DecodePng") do desc = tf.NodeDescription("DecodePng") @@ -12866,7 +15120,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function decode_png(contents_::tf.TensorHandle; name=nothing, channels=nothing, dtype=nothing) + function decode_png_eager(contents_; name=nothing, channels=nothing, dtype=nothing) desc = tf.EagerOp("DecodePng") tf.add_input(desc, contents_) if channels !== nothing @@ -12877,6 +15131,13 @@ begin end (tf.execute(desc))[1] end + function decode_png(contents_; name=nothing, channels=nothing, dtype=nothing) + if tf.eager_mode + decode_png_eager(contents_; name=name, channels=channels, dtype=dtype) + else + decode_png_graph(contents_; name=name, channels=channels, dtype=dtype) + end + end end @@ -12886,7 +15147,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function non_max_suppression_v2(boxes_, scores_, max_output_size_, iou_threshold_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function non_max_suppression_v2_graph(boxes_, scores_, max_output_size_, iou_threshold_; name=nothing) local desc tf.with_op_name(name, "NonMaxSuppressionV2") do desc = tf.NodeDescription("NonMaxSuppressionV2") @@ -12902,7 +15163,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function non_max_suppression_v2(boxes_::tf.TensorHandle, scores_::tf.TensorHandle, max_output_size_::tf.TensorHandle, iou_threshold_::tf.TensorHandle; name=nothing) + function non_max_suppression_v2_eager(boxes_, scores_, max_output_size_, iou_threshold_; name=nothing) desc = tf.EagerOp("NonMaxSuppressionV2") tf.add_input(desc, boxes_) tf.add_input(desc, scores_) @@ -12912,6 +15173,13 @@ begin desc["T"] = tf.data_type(scores_) (tf.execute(desc))[1] end + function non_max_suppression_v2(boxes_, scores_, max_output_size_, iou_threshold_; name=nothing) + if tf.eager_mode + non_max_suppression_v2_eager(boxes_, scores_, max_output_size_, iou_threshold_; name=name) + else + non_max_suppression_v2_graph(boxes_, scores_, max_output_size_, iou_threshold_; name=name) + end + end end @@ -12921,7 +15189,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function igamma(a_, x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function igamma_graph(a_, x_; name=nothing) local desc tf.with_op_name(name, "Igamma") do desc = tf.NodeDescription("Igamma") @@ -12933,7 +15201,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function igamma(a_::tf.TensorHandle, x_::tf.TensorHandle; name=nothing) + function igamma_eager(a_, x_; name=nothing) desc = tf.EagerOp("Igamma") tf.add_input(desc, a_) tf.add_input(desc, x_) @@ -12941,6 +15209,13 @@ begin desc["T"] = tf.data_type(x_) (tf.execute(desc))[1] end + function igamma(a_, x_; name=nothing) + if tf.eager_mode + igamma_eager(a_, x_; name=name) + else + igamma_graph(a_, x_; name=name) + end + end end @@ -12950,7 +15225,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function digamma(x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function digamma_graph(x_; name=nothing) local desc tf.with_op_name(name, "Digamma") do desc = tf.NodeDescription("Digamma") @@ -12960,12 +15235,19 @@ begin end tf.Tensor(tf.Operation(desc)) end - function digamma(x_::tf.TensorHandle; name=nothing) + function digamma_eager(x_; name=nothing) desc = tf.EagerOp("Digamma") tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) (tf.execute(desc))[1] end + function digamma(x_; name=nothing) + if tf.eager_mode + digamma_eager(x_; name=name) + else + digamma_graph(x_; name=name) + end + end end @@ -12975,7 +15257,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function resource_apply_ada_max(var_, m_, v_, beta1_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function resource_apply_ada_max_graph(var_, m_, v_, beta1_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ResourceApplyAdaMax") do desc = tf.NodeDescription("ResourceApplyAdaMax") @@ -13004,7 +15286,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function resource_apply_ada_max(var_::tf.TensorHandle, m_::tf.TensorHandle, v_::tf.TensorHandle, beta1_power_::tf.TensorHandle, lr_::tf.TensorHandle, beta1_::tf.TensorHandle, beta2_::tf.TensorHandle, epsilon_::tf.TensorHandle, grad_::tf.TensorHandle; name=nothing, use_locking=nothing) + function resource_apply_ada_max_eager(var_, m_, v_, beta1_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ResourceApplyAdaMax") tf.add_input(desc, var_) tf.add_input(desc, m_) @@ -13026,16 +15308,23 @@ begin desc["T"] = tf.data_type(grad_) (tf.execute(desc))[1] end + function resource_apply_ada_max(var_, m_, v_, beta1_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing) + if tf.eager_mode + resource_apply_ada_max_eager(var_, m_, v_, beta1_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=name, use_locking=use_locking) + else + resource_apply_ada_max_graph(var_, m_, v_, beta1_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=name, use_locking=use_locking) + end + end end """ - space_to_depth(input; data_format=NHWC) + space_to_depth(input; data_format=) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function space_to_depth(input_; name=nothing, block_size=nothing, data_format=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function space_to_depth_graph(input_; name=nothing, block_size=nothing, data_format=nothing) local desc tf.with_op_name(name, "SpaceToDepth") do desc = tf.NodeDescription("SpaceToDepth") @@ -13051,7 +15340,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function space_to_depth(input_::tf.TensorHandle; name=nothing, block_size=nothing, data_format=nothing) + function space_to_depth_eager(input_; name=nothing, block_size=nothing, data_format=nothing) desc = tf.EagerOp("SpaceToDepth") tf.add_input(desc, input_) if block_size !== nothing @@ -13063,6 +15352,13 @@ begin desc["T"] = tf.data_type(input_) (tf.execute(desc))[1] end + function space_to_depth(input_; name=nothing, block_size=nothing, data_format=nothing) + if tf.eager_mode + space_to_depth_eager(input_; name=name, block_size=block_size, data_format=data_format) + else + space_to_depth_graph(input_; name=name, block_size=block_size, data_format=data_format) + end + end end @@ -13072,7 +15368,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function sqrt_grad(y_, dy_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function sqrt_grad_graph(y_, dy_; name=nothing) local desc tf.with_op_name(name, "SqrtGrad") do desc = tf.NodeDescription("SqrtGrad") @@ -13084,7 +15380,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function sqrt_grad(y_::tf.TensorHandle, dy_::tf.TensorHandle; name=nothing) + function sqrt_grad_eager(y_, dy_; name=nothing) desc = tf.EagerOp("SqrtGrad") tf.add_input(desc, y_) tf.add_input(desc, dy_) @@ -13092,6 +15388,13 @@ begin desc["T"] = tf.data_type(dy_) (tf.execute(desc))[1] end + function sqrt_grad(y_, dy_; name=nothing) + if tf.eager_mode + sqrt_grad_eager(y_, dy_; name=name) + else + sqrt_grad_graph(y_, dy_; name=name) + end + end end @@ -13101,7 +15404,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function map_unstage(key_, indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function map_unstage_graph(key_, indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "MapUnstage") do desc = tf.NodeDescription("MapUnstage") @@ -13127,7 +15430,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function map_unstage(key_::tf.TensorHandle, indices_::tf.TensorHandle; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + function map_unstage_eager(key_, indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) desc = tf.EagerOp("MapUnstage") tf.add_input(desc, key_) tf.add_input(desc, indices_) @@ -13148,6 +15451,13 @@ begin end (tf.execute(desc))[1] end + function map_unstage(key_, indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + if tf.eager_mode + map_unstage_eager(key_, indices_; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) + else + map_unstage_graph(key_, indices_; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) + end + end end @@ -13157,7 +15467,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function qr(input_; name=nothing, full_matrices=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function qr_graph(input_; name=nothing, full_matrices=nothing) local desc tf.with_op_name(name, "Qr") do desc = tf.NodeDescription("Qr") @@ -13175,7 +15485,7 @@ begin end out end - function qr(input_::tf.TensorHandle; name=nothing, full_matrices=nothing) + function qr_eager(input_; name=nothing, full_matrices=nothing) desc = tf.EagerOp("Qr") tf.add_input(desc, input_) if full_matrices !== nothing @@ -13184,6 +15494,13 @@ begin desc["T"] = tf.data_type(input_) tf.execute(desc) end + function qr(input_; name=nothing, full_matrices=nothing) + if tf.eager_mode + qr_eager(input_; name=name, full_matrices=full_matrices) + else + qr_graph(input_; name=name, full_matrices=full_matrices) + end + end end @@ -13193,7 +15510,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function boosted_trees_calculate_best_gains_per_feature(node_id_range_, stats_summary_list_, l1_, l2_, tree_complexity_, min_node_weight_; name=nothing, max_splits=nothing, num_features=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function boosted_trees_calculate_best_gains_per_feature_graph(node_id_range_, stats_summary_list_, l1_, l2_, tree_complexity_, min_node_weight_; name=nothing, max_splits=nothing, num_features=nothing) local desc tf.with_op_name(name, "BoostedTreesCalculateBestGainsPerFeature") do desc = tf.NodeDescription("BoostedTreesCalculateBestGainsPerFeature") @@ -13223,7 +15540,7 @@ begin end out end - function boosted_trees_calculate_best_gains_per_feature(node_id_range_::tf.TensorHandle, stats_summary_list_::tf.TensorHandle, l1_::tf.TensorHandle, l2_::tf.TensorHandle, tree_complexity_::tf.TensorHandle, min_node_weight_::tf.TensorHandle; name=nothing, max_splits=nothing, num_features=nothing) + function boosted_trees_calculate_best_gains_per_feature_eager(node_id_range_, stats_summary_list_, l1_, l2_, tree_complexity_, min_node_weight_; name=nothing, max_splits=nothing, num_features=nothing) desc = tf.EagerOp("BoostedTreesCalculateBestGainsPerFeature") tf.add_input(desc, node_id_range_) tf.add_input(desc, stats_summary_list_) @@ -13239,6 +15556,13 @@ begin end tf.execute(desc) end + function boosted_trees_calculate_best_gains_per_feature(node_id_range_, stats_summary_list_, l1_, l2_, tree_complexity_, min_node_weight_; name=nothing, max_splits=nothing, num_features=nothing) + if tf.eager_mode + boosted_trees_calculate_best_gains_per_feature_eager(node_id_range_, stats_summary_list_, l1_, l2_, tree_complexity_, min_node_weight_; name=name, max_splits=max_splits, num_features=num_features) + else + boosted_trees_calculate_best_gains_per_feature_graph(node_id_range_, stats_summary_list_, l1_, l2_, tree_complexity_, min_node_weight_; name=name, max_splits=max_splits, num_features=num_features) + end + end end @@ -13248,7 +15572,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function unbatch_grad(original_input_, batch_index_, grad_, id_; name=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function unbatch_grad_graph(original_input_, batch_index_, grad_, id_; name=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "UnbatchGrad") do desc = tf.NodeDescription("UnbatchGrad") @@ -13270,7 +15594,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function unbatch_grad(original_input_::tf.TensorHandle, batch_index_::tf.TensorHandle, grad_::tf.TensorHandle, id_::tf.TensorHandle; name=nothing, container=nothing, shared_name=nothing) + function unbatch_grad_eager(original_input_, batch_index_, grad_, id_; name=nothing, container=nothing, shared_name=nothing) desc = tf.EagerOp("UnbatchGrad") tf.add_input(desc, original_input_) tf.add_input(desc, batch_index_) @@ -13286,6 +15610,13 @@ begin desc["T"] = tf.data_type(grad_) (tf.execute(desc))[1] end + function unbatch_grad(original_input_, batch_index_, grad_, id_; name=nothing, container=nothing, shared_name=nothing) + if tf.eager_mode + unbatch_grad_eager(original_input_, batch_index_, grad_, id_; name=name, container=container, shared_name=shared_name) + else + unbatch_grad_graph(original_input_, batch_index_, grad_, id_; name=name, container=container, shared_name=shared_name) + end + end end @@ -13295,7 +15626,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function log_softmax(logits_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function log_softmax_graph(logits_; name=nothing) local desc tf.with_op_name(name, "LogSoftmax") do desc = tf.NodeDescription("LogSoftmax") @@ -13305,12 +15636,19 @@ begin end tf.Tensor(tf.Operation(desc)) end - function log_softmax(logits_::tf.TensorHandle; name=nothing) + function log_softmax_eager(logits_; name=nothing) desc = tf.EagerOp("LogSoftmax") tf.add_input(desc, logits_) desc["T"] = tf.data_type(logits_) (tf.execute(desc))[1] end + function log_softmax(logits_; name=nothing) + if tf.eager_mode + log_softmax_eager(logits_; name=name) + else + log_softmax_graph(logits_; name=name) + end + end end @@ -13320,7 +15658,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function resource_count_up_to(resource_; name=nothing, limit=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function resource_count_up_to_graph(resource_; name=nothing, limit=nothing) local desc tf.with_op_name(name, "ResourceCountUpTo") do desc = tf.NodeDescription("ResourceCountUpTo") @@ -13332,7 +15670,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function resource_count_up_to(resource_::tf.TensorHandle; name=nothing, limit=nothing) + function resource_count_up_to_eager(resource_; name=nothing, limit=nothing) desc = tf.EagerOp("ResourceCountUpTo") tf.add_input(desc, resource_) if limit !== nothing @@ -13340,6 +15678,13 @@ begin end (tf.execute(desc))[1] end + function resource_count_up_to(resource_; name=nothing, limit=nothing) + if tf.eager_mode + resource_count_up_to_eager(resource_; name=name, limit=limit) + else + resource_count_up_to_graph(resource_; name=name, limit=limit) + end + end end @@ -13349,7 +15694,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function accumulate_nv2(inputs_; name=nothing, N=nothing, shape=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function accumulate_nv2_graph(inputs_; name=nothing, N=nothing, shape=nothing) local desc tf.with_op_name(name, "AccumulateNV2") do desc = tf.NodeDescription("AccumulateNV2") @@ -13365,7 +15710,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function accumulate_nv2(inputs_::tf.TensorHandle; name=nothing, N=nothing, shape=nothing) + function accumulate_nv2_eager(inputs_; name=nothing, N=nothing, shape=nothing) desc = tf.EagerOp("AccumulateNV2") tf.add_input(desc, inputs_) if N !== nothing @@ -13377,6 +15722,13 @@ begin desc["T"] = tf.data_type(inputs_) (tf.execute(desc))[1] end + function accumulate_nv2(inputs_; name=nothing, N=nothing, shape=nothing) + if tf.eager_mode + accumulate_nv2_eager(inputs_; name=name, N=N, shape=shape) + else + accumulate_nv2_graph(inputs_; name=name, N=N, shape=shape) + end + end end @@ -13386,7 +15738,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function parallel_map_dataset(input_dataset_, other_arguments_, num_parallel_calls_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing, sloppy=nothing, preserve_cardinality=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function parallel_map_dataset_graph(input_dataset_, other_arguments_, num_parallel_calls_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing, sloppy=nothing, preserve_cardinality=nothing) local desc tf.with_op_name(name, "ParallelMapDataset") do desc = tf.NodeDescription("ParallelMapDataset") @@ -13420,7 +15772,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function parallel_map_dataset(input_dataset_::tf.TensorHandle, other_arguments_::tf.TensorHandle, num_parallel_calls_::tf.TensorHandle; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing, sloppy=nothing, preserve_cardinality=nothing) + function parallel_map_dataset_eager(input_dataset_, other_arguments_, num_parallel_calls_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing, sloppy=nothing, preserve_cardinality=nothing) desc = tf.EagerOp("ParallelMapDataset") tf.add_input(desc, input_dataset_) tf.add_input(desc, other_arguments_) @@ -13448,6 +15800,13 @@ begin end (tf.execute(desc))[1] end + function parallel_map_dataset(input_dataset_, other_arguments_, num_parallel_calls_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing, sloppy=nothing, preserve_cardinality=nothing) + if tf.eager_mode + parallel_map_dataset_eager(input_dataset_, other_arguments_, num_parallel_calls_; name=name, f=f, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes, use_inter_op_parallelism=use_inter_op_parallelism, sloppy=sloppy, preserve_cardinality=preserve_cardinality) + else + parallel_map_dataset_graph(input_dataset_, other_arguments_, num_parallel_calls_; name=name, f=f, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes, use_inter_op_parallelism=use_inter_op_parallelism, sloppy=sloppy, preserve_cardinality=preserve_cardinality) + end + end end @@ -13457,7 +15816,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function random_uniform(shape_; name=nothing, seed=nothing, seed2=nothing, dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function random_uniform_graph(shape_; name=nothing, seed=nothing, seed2=nothing, dtype=nothing) local desc tf.with_op_name(name, "RandomUniform") do desc = tf.NodeDescription("RandomUniform") @@ -13476,7 +15835,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function random_uniform(shape_::tf.TensorHandle; name=nothing, seed=nothing, seed2=nothing, dtype=nothing) + function random_uniform_eager(shape_; name=nothing, seed=nothing, seed2=nothing, dtype=nothing) desc = tf.EagerOp("RandomUniform") tf.add_input(desc, shape_) if seed !== nothing @@ -13491,16 +15850,23 @@ begin desc["T"] = tf.data_type(shape_) (tf.execute(desc))[1] end + function random_uniform(shape_; name=nothing, seed=nothing, seed2=nothing, dtype=nothing) + if tf.eager_mode + random_uniform_eager(shape_; name=name, seed=seed, seed2=seed2, dtype=dtype) + else + random_uniform_graph(shape_; name=name, seed=seed, seed2=seed2, dtype=dtype) + end + end end """ - unicode_transcode(input; errors=replace, replacement_char=65533, replace_control_characters=false) + unicode_transcode(input; errors=, replacement_char=65533, replace_control_characters=false) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function unicode_transcode(input_; name=nothing, input_encoding=nothing, output_encoding=nothing, errors=nothing, replacement_char=nothing, replace_control_characters=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function unicode_transcode_graph(input_; name=nothing, input_encoding=nothing, output_encoding=nothing, errors=nothing, replacement_char=nothing, replace_control_characters=nothing) local desc tf.with_op_name(name, "UnicodeTranscode") do desc = tf.NodeDescription("UnicodeTranscode") @@ -13524,7 +15890,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function unicode_transcode(input_::tf.TensorHandle; name=nothing, input_encoding=nothing, output_encoding=nothing, errors=nothing, replacement_char=nothing, replace_control_characters=nothing) + function unicode_transcode_eager(input_; name=nothing, input_encoding=nothing, output_encoding=nothing, errors=nothing, replacement_char=nothing, replace_control_characters=nothing) desc = tf.EagerOp("UnicodeTranscode") tf.add_input(desc, input_) if input_encoding !== nothing @@ -13544,6 +15910,13 @@ begin end (tf.execute(desc))[1] end + function unicode_transcode(input_; name=nothing, input_encoding=nothing, output_encoding=nothing, errors=nothing, replacement_char=nothing, replace_control_characters=nothing) + if tf.eager_mode + unicode_transcode_eager(input_; name=name, input_encoding=input_encoding, output_encoding=output_encoding, errors=errors, replacement_char=replacement_char, replace_control_characters=replace_control_characters) + else + unicode_transcode_graph(input_; name=name, input_encoding=input_encoding, output_encoding=output_encoding, errors=errors, replacement_char=replacement_char, replace_control_characters=replace_control_characters) + end + end end @@ -13553,7 +15926,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function reader_reset(reader_handle_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function reader_reset_graph(reader_handle_; name=nothing) local desc tf.with_op_name(name, "ReaderReset") do desc = tf.NodeDescription("ReaderReset") @@ -13562,11 +15935,18 @@ begin end tf.Tensor(tf.Operation(desc)) end - function reader_reset(reader_handle_::tf.TensorHandle; name=nothing) + function reader_reset_eager(reader_handle_; name=nothing) desc = tf.EagerOp("ReaderReset") tf.add_input(desc, reader_handle_) (tf.execute(desc))[1] end + function reader_reset(reader_handle_; name=nothing) + if tf.eager_mode + reader_reset_eager(reader_handle_; name=name) + else + reader_reset_graph(reader_handle_; name=name) + end + end end @@ -13576,7 +15956,7 @@ end Replacement node for NcclBroadcast. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function _nccl_broadcast_send(input_; name=nothing, num_devices=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function _nccl_broadcast_send_graph(input_; name=nothing, num_devices=nothing, shared_name=nothing) local desc tf.with_op_name(name, "_NcclBroadcastSend") do desc = tf.NodeDescription("_NcclBroadcastSend") @@ -13592,7 +15972,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function _nccl_broadcast_send(input_::tf.TensorHandle; name=nothing, num_devices=nothing, shared_name=nothing) + function _nccl_broadcast_send_eager(input_; name=nothing, num_devices=nothing, shared_name=nothing) desc = tf.EagerOp("_NcclBroadcastSend") tf.add_input(desc, input_) if num_devices !== nothing @@ -13604,6 +15984,13 @@ begin desc["T"] = tf.data_type(input_) (tf.execute(desc))[1] end + function _nccl_broadcast_send(input_; name=nothing, num_devices=nothing, shared_name=nothing) + if tf.eager_mode + _nccl_broadcast_send_eager(input_; name=name, num_devices=num_devices, shared_name=shared_name) + else + _nccl_broadcast_send_graph(input_; name=name, num_devices=num_devices, shared_name=shared_name) + end + end end @@ -13613,7 +16000,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function batch_matrix_determinant(input_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function batch_matrix_determinant_graph(input_; name=nothing) local desc tf.with_op_name(name, "BatchMatrixDeterminant") do desc = tf.NodeDescription("BatchMatrixDeterminant") @@ -13623,12 +16010,19 @@ begin end tf.Tensor(tf.Operation(desc)) end - function batch_matrix_determinant(input_::tf.TensorHandle; name=nothing) + function batch_matrix_determinant_eager(input_; name=nothing) desc = tf.EagerOp("BatchMatrixDeterminant") tf.add_input(desc, input_) desc["T"] = tf.data_type(input_) (tf.execute(desc))[1] end + function batch_matrix_determinant(input_; name=nothing) + if tf.eager_mode + batch_matrix_determinant_eager(input_; name=name) + else + batch_matrix_determinant_graph(input_; name=name) + end + end end @@ -13638,7 +16032,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function less_equal(x_, y_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function less_equal_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "LessEqual") do desc = tf.NodeDescription("LessEqual") @@ -13650,7 +16044,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function less_equal(x_::tf.TensorHandle, y_::tf.TensorHandle; name=nothing) + function less_equal_eager(x_, y_; name=nothing) desc = tf.EagerOp("LessEqual") tf.add_input(desc, x_) tf.add_input(desc, y_) @@ -13658,6 +16052,13 @@ begin desc["T"] = tf.data_type(y_) (tf.execute(desc))[1] end + function less_equal(x_, y_; name=nothing) + if tf.eager_mode + less_equal_eager(x_, y_; name=name) + else + less_equal_graph(x_, y_; name=name) + end + end end @@ -13667,7 +16068,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function apply_gradient_descent(var_, alpha_, delta_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function apply_gradient_descent_graph(var_, alpha_, delta_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ApplyGradientDescent") do desc = tf.NodeDescription("ApplyGradientDescent") @@ -13684,7 +16085,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function apply_gradient_descent(var_::tf.TensorHandle, alpha_::tf.TensorHandle, delta_::tf.TensorHandle; name=nothing, use_locking=nothing) + function apply_gradient_descent_eager(var_, alpha_, delta_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ApplyGradientDescent") tf.add_input(desc, var_) tf.add_input(desc, alpha_) @@ -13697,6 +16098,13 @@ begin desc["T"] = tf.data_type(delta_) (tf.execute(desc))[1] end + function apply_gradient_descent(var_, alpha_, delta_; name=nothing, use_locking=nothing) + if tf.eager_mode + apply_gradient_descent_eager(var_, alpha_, delta_; name=name, use_locking=use_locking) + else + apply_gradient_descent_graph(var_, alpha_, delta_; name=name, use_locking=use_locking) + end + end end @@ -13706,7 +16114,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function sparse_segment_sqrt_n(data_, indices_, segment_ids_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function sparse_segment_sqrt_n_graph(data_, indices_, segment_ids_; name=nothing) local desc tf.with_op_name(name, "SparseSegmentSqrtN") do desc = tf.NodeDescription("SparseSegmentSqrtN") @@ -13722,7 +16130,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function sparse_segment_sqrt_n(data_::tf.TensorHandle, indices_::tf.TensorHandle, segment_ids_::tf.TensorHandle; name=nothing) + function sparse_segment_sqrt_n_eager(data_, indices_, segment_ids_; name=nothing) desc = tf.EagerOp("SparseSegmentSqrtN") tf.add_input(desc, data_) tf.add_input(desc, indices_) @@ -13731,6 +16139,13 @@ begin desc["Tidx"] = tf.data_type(indices_) (tf.execute(desc))[1] end + function sparse_segment_sqrt_n(data_, indices_, segment_ids_; name=nothing) + if tf.eager_mode + sparse_segment_sqrt_n_eager(data_, indices_, segment_ids_; name=name) + else + sparse_segment_sqrt_n_graph(data_, indices_, segment_ids_; name=name) + end + end end @@ -13740,7 +16155,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function matrix_logarithm(input_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function matrix_logarithm_graph(input_; name=nothing) local desc tf.with_op_name(name, "MatrixLogarithm") do desc = tf.NodeDescription("MatrixLogarithm") @@ -13750,12 +16165,19 @@ begin end tf.Tensor(tf.Operation(desc)) end - function matrix_logarithm(input_::tf.TensorHandle; name=nothing) + function matrix_logarithm_eager(input_; name=nothing) desc = tf.EagerOp("MatrixLogarithm") tf.add_input(desc, input_) desc["T"] = tf.data_type(input_) (tf.execute(desc))[1] end + function matrix_logarithm(input_; name=nothing) + if tf.eager_mode + matrix_logarithm_eager(input_; name=name) + else + matrix_logarithm_graph(input_; name=name) + end + end end @@ -13765,7 +16187,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function scatter_mul(ref_, indices_, updates_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function scatter_mul_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ScatterMul") do desc = tf.NodeDescription("ScatterMul") @@ -13784,7 +16206,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function scatter_mul(ref_::tf.TensorHandle, indices_::tf.TensorHandle, updates_::tf.TensorHandle; name=nothing, use_locking=nothing) + function scatter_mul_eager(ref_, indices_, updates_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ScatterMul") tf.add_input(desc, ref_) tf.add_input(desc, indices_) @@ -13797,6 +16219,13 @@ begin desc["T"] = tf.data_type(updates_) (tf.execute(desc))[1] end + function scatter_mul(ref_, indices_, updates_; name=nothing, use_locking=nothing) + if tf.eager_mode + scatter_mul_eager(ref_, indices_, updates_; name=name, use_locking=use_locking) + else + scatter_mul_graph(ref_, indices_, updates_; name=name, use_locking=use_locking) + end + end end @@ -13806,7 +16235,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function decode_jpeg(contents_; name=nothing, channels=nothing, ratio=nothing, fancy_upscaling=nothing, try_recover_truncated=nothing, acceptable_fraction=nothing, dct_method=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function decode_jpeg_graph(contents_; name=nothing, channels=nothing, ratio=nothing, fancy_upscaling=nothing, try_recover_truncated=nothing, acceptable_fraction=nothing, dct_method=nothing) local desc tf.with_op_name(name, "DecodeJpeg") do desc = tf.NodeDescription("DecodeJpeg") @@ -13833,7 +16262,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function decode_jpeg(contents_::tf.TensorHandle; name=nothing, channels=nothing, ratio=nothing, fancy_upscaling=nothing, try_recover_truncated=nothing, acceptable_fraction=nothing, dct_method=nothing) + function decode_jpeg_eager(contents_; name=nothing, channels=nothing, ratio=nothing, fancy_upscaling=nothing, try_recover_truncated=nothing, acceptable_fraction=nothing, dct_method=nothing) desc = tf.EagerOp("DecodeJpeg") tf.add_input(desc, contents_) if channels !== nothing @@ -13856,6 +16285,13 @@ begin end (tf.execute(desc))[1] end + function decode_jpeg(contents_; name=nothing, channels=nothing, ratio=nothing, fancy_upscaling=nothing, try_recover_truncated=nothing, acceptable_fraction=nothing, dct_method=nothing) + if tf.eager_mode + decode_jpeg_eager(contents_; name=name, channels=channels, ratio=ratio, fancy_upscaling=fancy_upscaling, try_recover_truncated=try_recover_truncated, acceptable_fraction=acceptable_fraction, dct_method=dct_method) + else + decode_jpeg_graph(contents_; name=name, channels=channels, ratio=ratio, fancy_upscaling=fancy_upscaling, try_recover_truncated=try_recover_truncated, acceptable_fraction=acceptable_fraction, dct_method=dct_method) + end + end end @@ -13865,7 +16301,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function random_shuffle_queue_v2(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, min_after_dequeue=nothing, seed=nothing, seed2=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function random_shuffle_queue_v2_graph(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, min_after_dequeue=nothing, seed=nothing, seed2=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "RandomShuffleQueueV2") do desc = tf.NodeDescription("RandomShuffleQueueV2") @@ -13896,7 +16332,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function random_shuffle_queue_v2(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, min_after_dequeue=nothing, seed=nothing, seed2=nothing, container=nothing, shared_name=nothing) + function random_shuffle_queue_v2_eager(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, min_after_dequeue=nothing, seed=nothing, seed2=nothing, container=nothing, shared_name=nothing) desc = tf.EagerOp("RandomShuffleQueueV2") if component_types !== nothing desc["component_types"] = map(Base.identity, component_types) @@ -13924,6 +16360,13 @@ begin end (tf.execute(desc))[1] end + function random_shuffle_queue_v2(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, min_after_dequeue=nothing, seed=nothing, seed2=nothing, container=nothing, shared_name=nothing) + if tf.eager_mode + random_shuffle_queue_v2_eager(; name=name, component_types=component_types, shapes=shapes, capacity=capacity, min_after_dequeue=min_after_dequeue, seed=seed, seed2=seed2, container=container, shared_name=shared_name) + else + random_shuffle_queue_v2_graph(; name=name, component_types=component_types, shapes=shapes, capacity=capacity, min_after_dequeue=min_after_dequeue, seed=seed, seed2=seed2, container=container, shared_name=shared_name) + end + end end @@ -13933,7 +16376,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function queue_enqueue_many_v2(handle_, components_; name=nothing, Tcomponents=nothing, timeout_ms=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function queue_enqueue_many_v2_graph(handle_, components_; name=nothing, Tcomponents=nothing, timeout_ms=nothing) local desc tf.with_op_name(name, "QueueEnqueueManyV2") do desc = tf.NodeDescription("QueueEnqueueManyV2") @@ -13950,7 +16393,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function queue_enqueue_many_v2(handle_::tf.TensorHandle, components_::tf.TensorHandle; name=nothing, Tcomponents=nothing, timeout_ms=nothing) + function queue_enqueue_many_v2_eager(handle_, components_; name=nothing, Tcomponents=nothing, timeout_ms=nothing) desc = tf.EagerOp("QueueEnqueueManyV2") tf.add_input(desc, handle_) tf.add_input(desc, components_) @@ -13962,6 +16405,13 @@ begin end (tf.execute(desc))[1] end + function queue_enqueue_many_v2(handle_, components_; name=nothing, Tcomponents=nothing, timeout_ms=nothing) + if tf.eager_mode + queue_enqueue_many_v2_eager(handle_, components_; name=name, Tcomponents=Tcomponents, timeout_ms=timeout_ms) + else + queue_enqueue_many_v2_graph(handle_, components_; name=name, Tcomponents=Tcomponents, timeout_ms=timeout_ms) + end + end end @@ -13971,7 +16421,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function resource_sparse_apply_centered_rms_prop(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function resource_sparse_apply_centered_rms_prop_graph(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ResourceSparseApplyCenteredRMSProp") do desc = tf.NodeDescription("ResourceSparseApplyCenteredRMSProp") @@ -14004,7 +16454,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function resource_sparse_apply_centered_rms_prop(var_::tf.TensorHandle, mg_::tf.TensorHandle, ms_::tf.TensorHandle, mom_::tf.TensorHandle, lr_::tf.TensorHandle, rho_::tf.TensorHandle, momentum_::tf.TensorHandle, epsilon_::tf.TensorHandle, grad_::tf.TensorHandle, indices_::tf.TensorHandle; name=nothing, use_locking=nothing) + function resource_sparse_apply_centered_rms_prop_eager(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ResourceSparseApplyCenteredRMSProp") tf.add_input(desc, var_) tf.add_input(desc, mg_) @@ -14027,6 +16477,13 @@ begin desc["Tindices"] = tf.data_type(indices_) (tf.execute(desc))[1] end + function resource_sparse_apply_centered_rms_prop(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) + if tf.eager_mode + resource_sparse_apply_centered_rms_prop_eager(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=name, use_locking=use_locking) + else + resource_sparse_apply_centered_rms_prop_graph(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=name, use_locking=use_locking) + end + end end @@ -14036,7 +16493,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function interleave_dataset(input_dataset_, other_arguments_, cycle_length_, block_length_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function interleave_dataset_graph(input_dataset_, other_arguments_, cycle_length_, block_length_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "InterleaveDataset") do desc = tf.NodeDescription("InterleaveDataset") @@ -14063,7 +16520,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function interleave_dataset(input_dataset_::tf.TensorHandle, other_arguments_::tf.TensorHandle, cycle_length_::tf.TensorHandle, block_length_::tf.TensorHandle; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) + function interleave_dataset_eager(input_dataset_, other_arguments_, cycle_length_, block_length_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("InterleaveDataset") tf.add_input(desc, input_dataset_) tf.add_input(desc, other_arguments_) @@ -14083,6 +16540,13 @@ begin end (tf.execute(desc))[1] end + function interleave_dataset(input_dataset_, other_arguments_, cycle_length_, block_length_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) + if tf.eager_mode + interleave_dataset_eager(input_dataset_, other_arguments_, cycle_length_, block_length_; name=name, f=f, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes) + else + interleave_dataset_graph(input_dataset_, other_arguments_, cycle_length_, block_length_; name=name, f=f, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes) + end + end end @@ -14092,7 +16556,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function stack_pop(handle_; name=nothing, elem_type=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function stack_pop_graph(handle_; name=nothing, elem_type=nothing) local desc tf.with_op_name(name, "StackPop") do desc = tf.NodeDescription("StackPop") @@ -14104,7 +16568,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function stack_pop(handle_::tf.TensorHandle; name=nothing, elem_type=nothing) + function stack_pop_eager(handle_; name=nothing, elem_type=nothing) desc = tf.EagerOp("StackPop") tf.add_input(desc, handle_) if elem_type !== nothing @@ -14112,16 +16576,23 @@ begin end (tf.execute(desc))[1] end + function stack_pop(handle_; name=nothing, elem_type=nothing) + if tf.eager_mode + stack_pop_eager(handle_; name=name, elem_type=elem_type) + else + stack_pop_graph(handle_; name=name, elem_type=elem_type) + end + end end """ - max_pool_v2(input, ksize, strides; data_format=NHWC) + max_pool_v2(input, ksize, strides; data_format=) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function max_pool_v2(input_, ksize_, strides_; name=nothing, padding=nothing, data_format=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function max_pool_v2_graph(input_, ksize_, strides_; name=nothing, padding=nothing, data_format=nothing) local desc tf.with_op_name(name, "MaxPoolV2") do desc = tf.NodeDescription("MaxPoolV2") @@ -14141,7 +16612,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function max_pool_v2(input_::tf.TensorHandle, ksize_::tf.TensorHandle, strides_::tf.TensorHandle; name=nothing, padding=nothing, data_format=nothing) + function max_pool_v2_eager(input_, ksize_, strides_; name=nothing, padding=nothing, data_format=nothing) desc = tf.EagerOp("MaxPoolV2") tf.add_input(desc, input_) tf.add_input(desc, ksize_) @@ -14155,6 +16626,13 @@ begin desc["T"] = tf.data_type(input_) (tf.execute(desc))[1] end + function max_pool_v2(input_, ksize_, strides_; name=nothing, padding=nothing, data_format=nothing) + if tf.eager_mode + max_pool_v2_eager(input_, ksize_, strides_; name=name, padding=padding, data_format=data_format) + else + max_pool_v2_graph(input_, ksize_, strides_; name=name, padding=padding, data_format=data_format) + end + end end @@ -14164,7 +16642,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function boosted_trees_deserialize_ensemble(tree_ensemble_handle_, stamp_token_, tree_ensemble_serialized_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function boosted_trees_deserialize_ensemble_graph(tree_ensemble_handle_, stamp_token_, tree_ensemble_serialized_; name=nothing) local desc tf.with_op_name(name, "BoostedTreesDeserializeEnsemble") do desc = tf.NodeDescription("BoostedTreesDeserializeEnsemble") @@ -14177,13 +16655,20 @@ begin end tf.Tensor(tf.Operation(desc)) end - function boosted_trees_deserialize_ensemble(tree_ensemble_handle_::tf.TensorHandle, stamp_token_::tf.TensorHandle, tree_ensemble_serialized_::tf.TensorHandle; name=nothing) + function boosted_trees_deserialize_ensemble_eager(tree_ensemble_handle_, stamp_token_, tree_ensemble_serialized_; name=nothing) desc = tf.EagerOp("BoostedTreesDeserializeEnsemble") tf.add_input(desc, tree_ensemble_handle_) tf.add_input(desc, stamp_token_) tf.add_input(desc, tree_ensemble_serialized_) (tf.execute(desc))[1] end + function boosted_trees_deserialize_ensemble(tree_ensemble_handle_, stamp_token_, tree_ensemble_serialized_; name=nothing) + if tf.eager_mode + boosted_trees_deserialize_ensemble_eager(tree_ensemble_handle_, stamp_token_, tree_ensemble_serialized_; name=name) + else + boosted_trees_deserialize_ensemble_graph(tree_ensemble_handle_, stamp_token_, tree_ensemble_serialized_; name=name) + end + end end @@ -14193,7 +16678,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function load_and_remap_matrix(ckpt_path_, old_tensor_name_, row_remapping_, col_remapping_, initializing_values_; name=nothing, num_rows=nothing, num_cols=nothing, max_rows_in_memory=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function load_and_remap_matrix_graph(ckpt_path_, old_tensor_name_, row_remapping_, col_remapping_, initializing_values_; name=nothing, num_rows=nothing, num_cols=nothing, max_rows_in_memory=nothing) local desc tf.with_op_name(name, "LoadAndRemapMatrix") do desc = tf.NodeDescription("LoadAndRemapMatrix") @@ -14219,7 +16704,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function load_and_remap_matrix(ckpt_path_::tf.TensorHandle, old_tensor_name_::tf.TensorHandle, row_remapping_::tf.TensorHandle, col_remapping_::tf.TensorHandle, initializing_values_::tf.TensorHandle; name=nothing, num_rows=nothing, num_cols=nothing, max_rows_in_memory=nothing) + function load_and_remap_matrix_eager(ckpt_path_, old_tensor_name_, row_remapping_, col_remapping_, initializing_values_; name=nothing, num_rows=nothing, num_cols=nothing, max_rows_in_memory=nothing) desc = tf.EagerOp("LoadAndRemapMatrix") tf.add_input(desc, ckpt_path_) tf.add_input(desc, old_tensor_name_) @@ -14237,6 +16722,13 @@ begin end (tf.execute(desc))[1] end + function load_and_remap_matrix(ckpt_path_, old_tensor_name_, row_remapping_, col_remapping_, initializing_values_; name=nothing, num_rows=nothing, num_cols=nothing, max_rows_in_memory=nothing) + if tf.eager_mode + load_and_remap_matrix_eager(ckpt_path_, old_tensor_name_, row_remapping_, col_remapping_, initializing_values_; name=name, num_rows=num_rows, num_cols=num_cols, max_rows_in_memory=max_rows_in_memory) + else + load_and_remap_matrix_graph(ckpt_path_, old_tensor_name_, row_remapping_, col_remapping_, initializing_values_; name=name, num_rows=num_rows, num_cols=num_cols, max_rows_in_memory=max_rows_in_memory) + end + end end @@ -14246,7 +16738,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function sparse_apply_proximal_gradient_descent(var_, alpha_, l1_, l2_, grad_, indices_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function sparse_apply_proximal_gradient_descent_graph(var_, alpha_, l1_, l2_, grad_, indices_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "SparseApplyProximalGradientDescent") do desc = tf.NodeDescription("SparseApplyProximalGradientDescent") @@ -14271,7 +16763,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function sparse_apply_proximal_gradient_descent(var_::tf.TensorHandle, alpha_::tf.TensorHandle, l1_::tf.TensorHandle, l2_::tf.TensorHandle, grad_::tf.TensorHandle, indices_::tf.TensorHandle; name=nothing, use_locking=nothing) + function sparse_apply_proximal_gradient_descent_eager(var_, alpha_, l1_, l2_, grad_, indices_; name=nothing, use_locking=nothing) desc = tf.EagerOp("SparseApplyProximalGradientDescent") tf.add_input(desc, var_) tf.add_input(desc, alpha_) @@ -14290,6 +16782,13 @@ begin desc["Tindices"] = tf.data_type(indices_) (tf.execute(desc))[1] end + function sparse_apply_proximal_gradient_descent(var_, alpha_, l1_, l2_, grad_, indices_; name=nothing, use_locking=nothing) + if tf.eager_mode + sparse_apply_proximal_gradient_descent_eager(var_, alpha_, l1_, l2_, grad_, indices_; name=name, use_locking=use_locking) + else + sparse_apply_proximal_gradient_descent_graph(var_, alpha_, l1_, l2_, grad_, indices_; name=name, use_locking=use_locking) + end + end end @@ -14299,7 +16798,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function py_func_stateless(input_; name=nothing, token=nothing, Tin=nothing, Tout=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function py_func_stateless_graph(input_; name=nothing, token=nothing, Tin=nothing, Tout=nothing) local desc tf.with_op_name(name, "PyFuncStateless") do desc = tf.NodeDescription("PyFuncStateless") @@ -14317,7 +16816,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function py_func_stateless(input_::tf.TensorHandle; name=nothing, token=nothing, Tin=nothing, Tout=nothing) + function py_func_stateless_eager(input_; name=nothing, token=nothing, Tin=nothing, Tout=nothing) desc = tf.EagerOp("PyFuncStateless") tf.add_input(desc, input_) if token !== nothing @@ -14331,6 +16830,13 @@ begin end (tf.execute(desc))[1] end + function py_func_stateless(input_; name=nothing, token=nothing, Tin=nothing, Tout=nothing) + if tf.eager_mode + py_func_stateless_eager(input_; name=name, token=token, Tin=Tin, Tout=Tout) + else + py_func_stateless_graph(input_; name=name, token=token, Tin=Tin, Tout=Tout) + end + end end @@ -14340,7 +16846,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function where(input_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function where_graph(input_; name=nothing) local desc tf.with_op_name(name, "Where") do desc = tf.NodeDescription("Where") @@ -14350,12 +16856,19 @@ begin end tf.Tensor(tf.Operation(desc)) end - function where(input_::tf.TensorHandle; name=nothing) + function where_eager(input_; name=nothing) desc = tf.EagerOp("Where") tf.add_input(desc, input_) desc["T"] = tf.data_type(input_) (tf.execute(desc))[1] end + function where(input_; name=nothing) + if tf.eager_mode + where_eager(input_; name=name) + else + where_graph(input_; name=name) + end + end end @@ -14365,7 +16878,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function mfcc(spectrogram_, sample_rate_; name=nothing, upper_frequency_limit=nothing, lower_frequency_limit=nothing, filterbank_channel_count=nothing, dct_coefficient_count=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function mfcc_graph(spectrogram_, sample_rate_; name=nothing, upper_frequency_limit=nothing, lower_frequency_limit=nothing, filterbank_channel_count=nothing, dct_coefficient_count=nothing) local desc tf.with_op_name(name, "Mfcc") do desc = tf.NodeDescription("Mfcc") @@ -14388,7 +16901,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function mfcc(spectrogram_::tf.TensorHandle, sample_rate_::tf.TensorHandle; name=nothing, upper_frequency_limit=nothing, lower_frequency_limit=nothing, filterbank_channel_count=nothing, dct_coefficient_count=nothing) + function mfcc_eager(spectrogram_, sample_rate_; name=nothing, upper_frequency_limit=nothing, lower_frequency_limit=nothing, filterbank_channel_count=nothing, dct_coefficient_count=nothing) desc = tf.EagerOp("Mfcc") tf.add_input(desc, spectrogram_) tf.add_input(desc, sample_rate_) @@ -14406,6 +16919,13 @@ begin end (tf.execute(desc))[1] end + function mfcc(spectrogram_, sample_rate_; name=nothing, upper_frequency_limit=nothing, lower_frequency_limit=nothing, filterbank_channel_count=nothing, dct_coefficient_count=nothing) + if tf.eager_mode + mfcc_eager(spectrogram_, sample_rate_; name=name, upper_frequency_limit=upper_frequency_limit, lower_frequency_limit=lower_frequency_limit, filterbank_channel_count=filterbank_channel_count, dct_coefficient_count=dct_coefficient_count) + else + mfcc_graph(spectrogram_, sample_rate_; name=name, upper_frequency_limit=upper_frequency_limit, lower_frequency_limit=lower_frequency_limit, filterbank_channel_count=filterbank_channel_count, dct_coefficient_count=dct_coefficient_count) + end + end end @@ -14415,7 +16935,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function check_numerics(tensor_; name=nothing, message=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function check_numerics_graph(tensor_; name=nothing, message=nothing) local desc tf.with_op_name(name, "CheckNumerics") do desc = tf.NodeDescription("CheckNumerics") @@ -14428,7 +16948,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function check_numerics(tensor_::tf.TensorHandle; name=nothing, message=nothing) + function check_numerics_eager(tensor_; name=nothing, message=nothing) desc = tf.EagerOp("CheckNumerics") tf.add_input(desc, tensor_) if message !== nothing @@ -14437,6 +16957,13 @@ begin desc["T"] = tf.data_type(tensor_) (tf.execute(desc))[1] end + function check_numerics(tensor_; name=nothing, message=nothing) + if tf.eager_mode + check_numerics_eager(tensor_; name=name, message=message) + else + check_numerics_graph(tensor_; name=name, message=message) + end + end end @@ -14446,7 +16973,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tpu_compilation_result(; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tpu_compilation_result_graph(; name=nothing) local desc tf.with_op_name(name, "TPUCompilationResult") do desc @@ -14454,10 +16981,17 @@ begin end tf.Tensor(tf.Operation(desc)) end - function tpu_compilation_result(; name=nothing) + function tpu_compilation_result_eager(; name=nothing) desc = tf.EagerOp("TPUCompilationResult") (tf.execute(desc))[1] end + function tpu_compilation_result(; name=nothing) + if tf.eager_mode + tpu_compilation_result_eager(; name=name) + else + tpu_compilation_result_graph(; name=name) + end + end end @@ -14467,7 +17001,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function retrieve_tpu_embedding_stochastic_gradient_descent_parameters(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function retrieve_tpu_embedding_stochastic_gradient_descent_parameters_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "RetrieveTPUEmbeddingStochasticGradientDescentParameters") do desc = tf.NodeDescription("RetrieveTPUEmbeddingStochasticGradientDescentParameters") @@ -14486,7 +17020,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function retrieve_tpu_embedding_stochastic_gradient_descent_parameters(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + function retrieve_tpu_embedding_stochastic_gradient_descent_parameters_eager(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) desc = tf.EagerOp("RetrieveTPUEmbeddingStochasticGradientDescentParameters") if table_id !== nothing desc["table_id"] = Base.Int(table_id) @@ -14502,6 +17036,13 @@ begin end (tf.execute(desc))[1] end + function retrieve_tpu_embedding_stochastic_gradient_descent_parameters(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + if tf.eager_mode + retrieve_tpu_embedding_stochastic_gradient_descent_parameters_eager(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + else + retrieve_tpu_embedding_stochastic_gradient_descent_parameters_graph(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + end + end end @@ -14511,7 +17052,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function sparse_segment_mean_grad(grad_, indices_, segment_ids_, output_dim0_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function sparse_segment_mean_grad_graph(grad_, indices_, segment_ids_, output_dim0_; name=nothing) local desc tf.with_op_name(name, "SparseSegmentMeanGrad") do desc = tf.NodeDescription("SparseSegmentMeanGrad") @@ -14529,7 +17070,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function sparse_segment_mean_grad(grad_::tf.TensorHandle, indices_::tf.TensorHandle, segment_ids_::tf.TensorHandle, output_dim0_::tf.TensorHandle; name=nothing) + function sparse_segment_mean_grad_eager(grad_, indices_, segment_ids_, output_dim0_; name=nothing) desc = tf.EagerOp("SparseSegmentMeanGrad") tf.add_input(desc, grad_) tf.add_input(desc, indices_) @@ -14539,6 +17080,13 @@ begin desc["Tidx"] = tf.data_type(indices_) (tf.execute(desc))[1] end + function sparse_segment_mean_grad(grad_, indices_, segment_ids_, output_dim0_; name=nothing) + if tf.eager_mode + sparse_segment_mean_grad_eager(grad_, indices_, segment_ids_, output_dim0_; name=name) + else + sparse_segment_mean_grad_graph(grad_, indices_, segment_ids_, output_dim0_; name=name) + end + end end @@ -14548,7 +17096,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function try_rpc(address_, method_, request_; name=nothing, protocol=nothing, fail_fast=nothing, timeout_in_ms=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function try_rpc_graph(address_, method_, request_; name=nothing, protocol=nothing, fail_fast=nothing, timeout_in_ms=nothing) local desc tf.with_op_name(name, "TryRpc") do desc = tf.NodeDescription("TryRpc") @@ -14575,7 +17123,7 @@ begin end out end - function try_rpc(address_::tf.TensorHandle, method_::tf.TensorHandle, request_::tf.TensorHandle; name=nothing, protocol=nothing, fail_fast=nothing, timeout_in_ms=nothing) + function try_rpc_eager(address_, method_, request_; name=nothing, protocol=nothing, fail_fast=nothing, timeout_in_ms=nothing) desc = tf.EagerOp("TryRpc") tf.add_input(desc, address_) tf.add_input(desc, method_) @@ -14591,6 +17139,13 @@ begin end tf.execute(desc) end + function try_rpc(address_, method_, request_; name=nothing, protocol=nothing, fail_fast=nothing, timeout_in_ms=nothing) + if tf.eager_mode + try_rpc_eager(address_, method_, request_; name=name, protocol=protocol, fail_fast=fail_fast, timeout_in_ms=timeout_in_ms) + else + try_rpc_graph(address_, method_, request_; name=name, protocol=protocol, fail_fast=fail_fast, timeout_in_ms=timeout_in_ms) + end + end end @@ -14600,7 +17155,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function batch_matrix_triangular_solve(matrix_, rhs_; name=nothing, lower=nothing, adjoint=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function batch_matrix_triangular_solve_graph(matrix_, rhs_; name=nothing, lower=nothing, adjoint=nothing) local desc tf.with_op_name(name, "BatchMatrixTriangularSolve") do desc = tf.NodeDescription("BatchMatrixTriangularSolve") @@ -14618,7 +17173,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function batch_matrix_triangular_solve(matrix_::tf.TensorHandle, rhs_::tf.TensorHandle; name=nothing, lower=nothing, adjoint=nothing) + function batch_matrix_triangular_solve_eager(matrix_, rhs_; name=nothing, lower=nothing, adjoint=nothing) desc = tf.EagerOp("BatchMatrixTriangularSolve") tf.add_input(desc, matrix_) tf.add_input(desc, rhs_) @@ -14632,6 +17187,13 @@ begin desc["T"] = tf.data_type(rhs_) (tf.execute(desc))[1] end + function batch_matrix_triangular_solve(matrix_, rhs_; name=nothing, lower=nothing, adjoint=nothing) + if tf.eager_mode + batch_matrix_triangular_solve_eager(matrix_, rhs_; name=name, lower=lower, adjoint=adjoint) + else + batch_matrix_triangular_solve_graph(matrix_, rhs_; name=name, lower=lower, adjoint=adjoint) + end + end end @@ -14641,7 +17203,7 @@ end A graph node which represents a return value of a function. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function _retval(input_; name=nothing, index=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function _retval_graph(input_; name=nothing, index=nothing) local desc tf.with_op_name(name, "_Retval") do desc = tf.NodeDescription("_Retval") @@ -14654,7 +17216,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function _retval(input_::tf.TensorHandle; name=nothing, index=nothing) + function _retval_eager(input_; name=nothing, index=nothing) desc = tf.EagerOp("_Retval") tf.add_input(desc, input_) if index !== nothing @@ -14663,6 +17225,13 @@ begin desc["T"] = tf.data_type(input_) (tf.execute(desc))[1] end + function _retval(input_; name=nothing, index=nothing) + if tf.eager_mode + _retval_eager(input_; name=name, index=index) + else + _retval_graph(input_; name=name, index=index) + end + end end @@ -14672,7 +17241,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function unique_with_counts(x_; name=nothing, out_idx=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function unique_with_counts_graph(x_; name=nothing, out_idx=nothing) local desc tf.with_op_name(name, "UniqueWithCounts") do desc = tf.NodeDescription("UniqueWithCounts") @@ -14690,7 +17259,7 @@ begin end out end - function unique_with_counts(x_::tf.TensorHandle; name=nothing, out_idx=nothing) + function unique_with_counts_eager(x_; name=nothing, out_idx=nothing) desc = tf.EagerOp("UniqueWithCounts") tf.add_input(desc, x_) if out_idx !== nothing @@ -14699,6 +17268,13 @@ begin desc["T"] = tf.data_type(x_) tf.execute(desc) end + function unique_with_counts(x_; name=nothing, out_idx=nothing) + if tf.eager_mode + unique_with_counts_eager(x_; name=name, out_idx=out_idx) + else + unique_with_counts_graph(x_; name=name, out_idx=out_idx) + end + end end @@ -14708,7 +17284,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function add(x_, y_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function add_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "Add") do desc = tf.NodeDescription("Add") @@ -14720,7 +17296,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function add(x_::tf.TensorHandle, y_::tf.TensorHandle; name=nothing) + function add_eager(x_, y_; name=nothing) desc = tf.EagerOp("Add") tf.add_input(desc, x_) tf.add_input(desc, y_) @@ -14728,6 +17304,13 @@ begin desc["T"] = tf.data_type(y_) (tf.execute(desc))[1] end + function add(x_, y_; name=nothing) + if tf.eager_mode + add_eager(x_, y_; name=name) + else + add_graph(x_, y_; name=name) + end + end end @@ -14737,7 +17320,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function experimental_scan_dataset(input_dataset_, initial_state_, other_arguments_; name=nothing, f=nothing, Tstate=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, preserve_cardinality=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function experimental_scan_dataset_graph(input_dataset_, initial_state_, other_arguments_; name=nothing, f=nothing, Tstate=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, preserve_cardinality=nothing) local desc tf.with_op_name(name, "ExperimentalScanDataset") do desc = tf.NodeDescription("ExperimentalScanDataset") @@ -14768,7 +17351,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function experimental_scan_dataset(input_dataset_::tf.TensorHandle, initial_state_::tf.TensorHandle, other_arguments_::tf.TensorHandle; name=nothing, f=nothing, Tstate=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, preserve_cardinality=nothing) + function experimental_scan_dataset_eager(input_dataset_, initial_state_, other_arguments_; name=nothing, f=nothing, Tstate=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, preserve_cardinality=nothing) desc = tf.EagerOp("ExperimentalScanDataset") tf.add_input(desc, input_dataset_) tf.add_input(desc, initial_state_) @@ -14793,6 +17376,13 @@ begin end (tf.execute(desc))[1] end + function experimental_scan_dataset(input_dataset_, initial_state_, other_arguments_; name=nothing, f=nothing, Tstate=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, preserve_cardinality=nothing) + if tf.eager_mode + experimental_scan_dataset_eager(input_dataset_, initial_state_, other_arguments_; name=name, f=f, Tstate=Tstate, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes, preserve_cardinality=preserve_cardinality) + else + experimental_scan_dataset_graph(input_dataset_, initial_state_, other_arguments_; name=name, f=f, Tstate=Tstate, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes, preserve_cardinality=preserve_cardinality) + end + end end @@ -14802,7 +17392,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function assign_add_variable_op(resource_, value_; name=nothing, dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function assign_add_variable_op_graph(resource_, value_; name=nothing, dtype=nothing) local desc tf.with_op_name(name, "AssignAddVariableOp") do desc = tf.NodeDescription("AssignAddVariableOp") @@ -14817,7 +17407,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function assign_add_variable_op(resource_::tf.TensorHandle, value_::tf.TensorHandle; name=nothing, dtype=nothing) + function assign_add_variable_op_eager(resource_, value_; name=nothing, dtype=nothing) desc = tf.EagerOp("AssignAddVariableOp") tf.add_input(desc, resource_) tf.add_input(desc, value_) @@ -14827,6 +17417,13 @@ begin desc["dtype"] = tf.data_type(value_) (tf.execute(desc))[1] end + function assign_add_variable_op(resource_, value_; name=nothing, dtype=nothing) + if tf.eager_mode + assign_add_variable_op_eager(resource_, value_; name=name, dtype=dtype) + else + assign_add_variable_op_graph(resource_, value_; name=name, dtype=dtype) + end + end end @@ -14836,7 +17433,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function split_v(value_, size_splits_, split_dim_; name=nothing, num_split=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function split_v_graph(value_, size_splits_, split_dim_; name=nothing, num_split=nothing) local desc tf.with_op_name(name, "SplitV") do desc = tf.NodeDescription("SplitV") @@ -14860,7 +17457,7 @@ begin end out end - function split_v(value_::tf.TensorHandle, size_splits_::tf.TensorHandle, split_dim_::tf.TensorHandle; name=nothing, num_split=nothing) + function split_v_eager(value_, size_splits_, split_dim_; name=nothing, num_split=nothing) desc = tf.EagerOp("SplitV") tf.add_input(desc, value_) tf.add_input(desc, size_splits_) @@ -14872,6 +17469,13 @@ begin desc["Tlen"] = tf.data_type(size_splits_) tf.execute(desc) end + function split_v(value_, size_splits_, split_dim_; name=nothing, num_split=nothing) + if tf.eager_mode + split_v_eager(value_, size_splits_, split_dim_; name=name, num_split=num_split) + else + split_v_graph(value_, size_splits_, split_dim_; name=name, num_split=num_split) + end + end end @@ -14881,7 +17485,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function assign(ref_, value_; name=nothing, validate_shape=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function assign_graph(ref_, value_; name=nothing, validate_shape=nothing, use_locking=nothing) local desc tf.with_op_name(name, "Assign") do desc = tf.NodeDescription("Assign") @@ -14899,7 +17503,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function assign(ref_::tf.TensorHandle, value_::tf.TensorHandle; name=nothing, validate_shape=nothing, use_locking=nothing) + function assign_eager(ref_, value_; name=nothing, validate_shape=nothing, use_locking=nothing) desc = tf.EagerOp("Assign") tf.add_input(desc, ref_) tf.add_input(desc, value_) @@ -14913,6 +17517,13 @@ begin desc["T"] = tf.data_type(value_) (tf.execute(desc))[1] end + function assign(ref_, value_; name=nothing, validate_shape=nothing, use_locking=nothing) + if tf.eager_mode + assign_eager(ref_, value_; name=name, validate_shape=validate_shape, use_locking=use_locking) + else + assign_graph(ref_, value_; name=name, validate_shape=validate_shape, use_locking=use_locking) + end + end end @@ -14922,7 +17533,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function max_pool_with_argmax(input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function max_pool_with_argmax_graph(input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing) local desc tf.with_op_name(name, "MaxPoolWithArgmax") do desc = tf.NodeDescription("MaxPoolWithArgmax") @@ -14946,7 +17557,7 @@ begin end out end - function max_pool_with_argmax(input_::tf.TensorHandle; name=nothing, ksize=nothing, strides=nothing, padding=nothing) + function max_pool_with_argmax_eager(input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing) desc = tf.EagerOp("MaxPoolWithArgmax") tf.add_input(desc, input_) if ksize !== nothing @@ -14961,6 +17572,13 @@ begin desc["T"] = tf.data_type(input_) tf.execute(desc) end + function max_pool_with_argmax(input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing) + if tf.eager_mode + max_pool_with_argmax_eager(input_; name=name, ksize=ksize, strides=strides, padding=padding) + else + max_pool_with_argmax_graph(input_; name=name, ksize=ksize, strides=strides, padding=padding) + end + end end @@ -14970,7 +17588,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function quantized_relu_x(features_, max_value_, min_features_, max_features_; name=nothing, out_type=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function quantized_relu_x_graph(features_, max_value_, min_features_, max_features_; name=nothing, out_type=nothing) local desc tf.with_op_name(name, "QuantizedReluX") do desc = tf.NodeDescription("QuantizedReluX") @@ -14994,7 +17612,7 @@ begin end out end - function quantized_relu_x(features_::tf.TensorHandle, max_value_::tf.TensorHandle, min_features_::tf.TensorHandle, max_features_::tf.TensorHandle; name=nothing, out_type=nothing) + function quantized_relu_x_eager(features_, max_value_, min_features_, max_features_; name=nothing, out_type=nothing) desc = tf.EagerOp("QuantizedReluX") tf.add_input(desc, features_) tf.add_input(desc, max_value_) @@ -15006,6 +17624,13 @@ begin desc["Tinput"] = tf.data_type(features_) tf.execute(desc) end + function quantized_relu_x(features_, max_value_, min_features_, max_features_; name=nothing, out_type=nothing) + if tf.eager_mode + quantized_relu_x_eager(features_, max_value_, min_features_, max_features_; name=name, out_type=out_type) + else + quantized_relu_x_graph(features_, max_value_, min_features_, max_features_; name=name, out_type=out_type) + end + end end @@ -15015,7 +17640,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function random_shuffle_queue(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, min_after_dequeue=nothing, seed=nothing, seed2=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function random_shuffle_queue_graph(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, min_after_dequeue=nothing, seed=nothing, seed2=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "RandomShuffleQueue") do desc = tf.NodeDescription("RandomShuffleQueue") @@ -15046,7 +17671,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function random_shuffle_queue(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, min_after_dequeue=nothing, seed=nothing, seed2=nothing, container=nothing, shared_name=nothing) + function random_shuffle_queue_eager(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, min_after_dequeue=nothing, seed=nothing, seed2=nothing, container=nothing, shared_name=nothing) desc = tf.EagerOp("RandomShuffleQueue") if component_types !== nothing desc["component_types"] = map(Base.identity, component_types) @@ -15074,6 +17699,13 @@ begin end (tf.execute(desc))[1] end + function random_shuffle_queue(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, min_after_dequeue=nothing, seed=nothing, seed2=nothing, container=nothing, shared_name=nothing) + if tf.eager_mode + random_shuffle_queue_eager(; name=name, component_types=component_types, shapes=shapes, capacity=capacity, min_after_dequeue=min_after_dequeue, seed=seed, seed2=seed2, container=container, shared_name=shared_name) + else + random_shuffle_queue_graph(; name=name, component_types=component_types, shapes=shapes, capacity=capacity, min_after_dequeue=min_after_dequeue, seed=seed, seed2=seed2, container=container, shared_name=shared_name) + end + end end @@ -15083,7 +17715,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function fft2d(input_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function fft2d_graph(input_; name=nothing) local desc tf.with_op_name(name, "FFT2D") do desc = tf.NodeDescription("FFT2D") @@ -15093,12 +17725,19 @@ begin end tf.Tensor(tf.Operation(desc)) end - function fft2d(input_::tf.TensorHandle; name=nothing) + function fft2d_eager(input_; name=nothing) desc = tf.EagerOp("FFT2D") tf.add_input(desc, input_) desc["Tcomplex"] = tf.data_type(input_) (tf.execute(desc))[1] end + function fft2d(input_; name=nothing) + if tf.eager_mode + fft2d_eager(input_; name=name) + else + fft2d_graph(input_; name=name) + end + end end @@ -15108,7 +17747,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function experimental_thread_pool_dataset(input_dataset_, thread_pool_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function experimental_thread_pool_dataset_graph(input_dataset_, thread_pool_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "ExperimentalThreadPoolDataset") do desc = tf.NodeDescription("ExperimentalThreadPoolDataset") @@ -15125,7 +17764,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function experimental_thread_pool_dataset(input_dataset_::tf.TensorHandle, thread_pool_::tf.TensorHandle; name=nothing, output_types=nothing, output_shapes=nothing) + function experimental_thread_pool_dataset_eager(input_dataset_, thread_pool_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("ExperimentalThreadPoolDataset") tf.add_input(desc, input_dataset_) tf.add_input(desc, thread_pool_) @@ -15137,6 +17776,13 @@ begin end (tf.execute(desc))[1] end + function experimental_thread_pool_dataset(input_dataset_, thread_pool_; name=nothing, output_types=nothing, output_shapes=nothing) + if tf.eager_mode + experimental_thread_pool_dataset_eager(input_dataset_, thread_pool_; name=name, output_types=output_types, output_shapes=output_shapes) + else + experimental_thread_pool_dataset_graph(input_dataset_, thread_pool_; name=name, output_types=output_types, output_shapes=output_shapes) + end + end end @@ -15146,7 +17792,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function experimental_directed_interleave_dataset(selector_input_dataset_, data_input_datasets_; name=nothing, output_types=nothing, output_shapes=nothing, N=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function experimental_directed_interleave_dataset_graph(selector_input_dataset_, data_input_datasets_; name=nothing, output_types=nothing, output_shapes=nothing, N=nothing) local desc tf.with_op_name(name, "ExperimentalDirectedInterleaveDataset") do desc = tf.NodeDescription("ExperimentalDirectedInterleaveDataset") @@ -15166,7 +17812,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function experimental_directed_interleave_dataset(selector_input_dataset_::tf.TensorHandle, data_input_datasets_::tf.TensorHandle; name=nothing, output_types=nothing, output_shapes=nothing, N=nothing) + function experimental_directed_interleave_dataset_eager(selector_input_dataset_, data_input_datasets_; name=nothing, output_types=nothing, output_shapes=nothing, N=nothing) desc = tf.EagerOp("ExperimentalDirectedInterleaveDataset") tf.add_input(desc, selector_input_dataset_) tf.add_input(desc, data_input_datasets_) @@ -15181,6 +17827,13 @@ begin end (tf.execute(desc))[1] end + function experimental_directed_interleave_dataset(selector_input_dataset_, data_input_datasets_; name=nothing, output_types=nothing, output_shapes=nothing, N=nothing) + if tf.eager_mode + experimental_directed_interleave_dataset_eager(selector_input_dataset_, data_input_datasets_; name=name, output_types=output_types, output_shapes=output_shapes, N=N) + else + experimental_directed_interleave_dataset_graph(selector_input_dataset_, data_input_datasets_; name=name, output_types=output_types, output_shapes=output_shapes, N=N) + end + end end @@ -15190,7 +17843,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function sparse_segment_sqrt_n_grad(grad_, indices_, segment_ids_, output_dim0_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function sparse_segment_sqrt_n_grad_graph(grad_, indices_, segment_ids_, output_dim0_; name=nothing) local desc tf.with_op_name(name, "SparseSegmentSqrtNGrad") do desc = tf.NodeDescription("SparseSegmentSqrtNGrad") @@ -15208,7 +17861,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function sparse_segment_sqrt_n_grad(grad_::tf.TensorHandle, indices_::tf.TensorHandle, segment_ids_::tf.TensorHandle, output_dim0_::tf.TensorHandle; name=nothing) + function sparse_segment_sqrt_n_grad_eager(grad_, indices_, segment_ids_, output_dim0_; name=nothing) desc = tf.EagerOp("SparseSegmentSqrtNGrad") tf.add_input(desc, grad_) tf.add_input(desc, indices_) @@ -15218,6 +17871,13 @@ begin desc["Tidx"] = tf.data_type(indices_) (tf.execute(desc))[1] end + function sparse_segment_sqrt_n_grad(grad_, indices_, segment_ids_, output_dim0_; name=nothing) + if tf.eager_mode + sparse_segment_sqrt_n_grad_eager(grad_, indices_, segment_ids_, output_dim0_; name=name) + else + sparse_segment_sqrt_n_grad_graph(grad_, indices_, segment_ids_, output_dim0_; name=name) + end + end end @@ -15227,7 +17887,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function real(input_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function real_graph(input_; name=nothing) local desc tf.with_op_name(name, "Real") do desc = tf.NodeDescription("Real") @@ -15237,12 +17897,19 @@ begin end tf.Tensor(tf.Operation(desc)) end - function real(input_::tf.TensorHandle; name=nothing) + function real_eager(input_; name=nothing) desc = tf.EagerOp("Real") tf.add_input(desc, input_) desc["T"] = tf.data_type(input_) (tf.execute(desc))[1] end + function real(input_; name=nothing) + if tf.eager_mode + real_eager(input_; name=name) + else + real_graph(input_; name=name) + end + end end @@ -15252,7 +17919,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function ordered_map_unstage(key_, indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function ordered_map_unstage_graph(key_, indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "OrderedMapUnstage") do desc = tf.NodeDescription("OrderedMapUnstage") @@ -15278,7 +17945,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function ordered_map_unstage(key_::tf.TensorHandle, indices_::tf.TensorHandle; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + function ordered_map_unstage_eager(key_, indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) desc = tf.EagerOp("OrderedMapUnstage") tf.add_input(desc, key_) tf.add_input(desc, indices_) @@ -15299,6 +17966,13 @@ begin end (tf.execute(desc))[1] end + function ordered_map_unstage(key_, indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + if tf.eager_mode + ordered_map_unstage_eager(key_, indices_; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) + else + ordered_map_unstage_graph(key_, indices_; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) + end + end end @@ -15308,7 +17982,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function rfft2d(input_, fft_length_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function rfft2d_graph(input_, fft_length_; name=nothing) local desc tf.with_op_name(name, "RFFT2D") do desc = tf.NodeDescription("RFFT2D") @@ -15319,12 +17993,19 @@ begin end tf.Tensor(tf.Operation(desc)) end - function rfft2d(input_::tf.TensorHandle, fft_length_::tf.TensorHandle; name=nothing) + function rfft2d_eager(input_, fft_length_; name=nothing) desc = tf.EagerOp("RFFT2D") tf.add_input(desc, input_) tf.add_input(desc, fft_length_) (tf.execute(desc))[1] end + function rfft2d(input_, fft_length_; name=nothing) + if tf.eager_mode + rfft2d_eager(input_, fft_length_; name=name) + else + rfft2d_graph(input_, fft_length_; name=name) + end + end end @@ -15334,7 +18015,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function var_is_initialized_op(resource_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function var_is_initialized_op_graph(resource_; name=nothing) local desc tf.with_op_name(name, "VarIsInitializedOp") do desc = tf.NodeDescription("VarIsInitializedOp") @@ -15343,11 +18024,18 @@ begin end tf.Tensor(tf.Operation(desc)) end - function var_is_initialized_op(resource_::tf.TensorHandle; name=nothing) + function var_is_initialized_op_eager(resource_; name=nothing) desc = tf.EagerOp("VarIsInitializedOp") tf.add_input(desc, resource_) (tf.execute(desc))[1] end + function var_is_initialized_op(resource_; name=nothing) + if tf.eager_mode + var_is_initialized_op_eager(resource_; name=name) + else + var_is_initialized_op_graph(resource_; name=name) + end + end end @@ -15357,7 +18045,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function boosted_trees_quantile_stream_resource_handle_op(; name=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function boosted_trees_quantile_stream_resource_handle_op_graph(; name=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "BoostedTreesQuantileStreamResourceHandleOp") do desc = tf.NodeDescription("BoostedTreesQuantileStreamResourceHandleOp") @@ -15370,7 +18058,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function boosted_trees_quantile_stream_resource_handle_op(; name=nothing, container=nothing, shared_name=nothing) + function boosted_trees_quantile_stream_resource_handle_op_eager(; name=nothing, container=nothing, shared_name=nothing) desc = tf.EagerOp("BoostedTreesQuantileStreamResourceHandleOp") if container !== nothing desc["container"] = Base.String(container) @@ -15380,6 +18068,13 @@ begin end (tf.execute(desc))[1] end + function boosted_trees_quantile_stream_resource_handle_op(; name=nothing, container=nothing, shared_name=nothing) + if tf.eager_mode + boosted_trees_quantile_stream_resource_handle_op_eager(; name=name, container=container, shared_name=shared_name) + else + boosted_trees_quantile_stream_resource_handle_op_graph(; name=name, container=container, shared_name=shared_name) + end + end end @@ -15389,7 +18084,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function atan2(y_, x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function atan2_graph(y_, x_; name=nothing) local desc tf.with_op_name(name, "Atan2") do desc = tf.NodeDescription("Atan2") @@ -15401,7 +18096,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function atan2(y_::tf.TensorHandle, x_::tf.TensorHandle; name=nothing) + function atan2_eager(y_, x_; name=nothing) desc = tf.EagerOp("Atan2") tf.add_input(desc, y_) tf.add_input(desc, x_) @@ -15409,6 +18104,13 @@ begin desc["T"] = tf.data_type(x_) (tf.execute(desc))[1] end + function atan2(y_, x_; name=nothing) + if tf.eager_mode + atan2_eager(y_, x_; name=name) + else + atan2_graph(y_, x_; name=name) + end + end end @@ -15418,7 +18120,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function random_poisson(shape_, rate_; name=nothing, seed=nothing, seed2=nothing, S=nothing, dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function random_poisson_graph(shape_, rate_; name=nothing, seed=nothing, seed2=nothing, S=nothing, dtype=nothing) local desc tf.with_op_name(name, "RandomPoisson") do desc = tf.NodeDescription("RandomPoisson") @@ -15443,7 +18145,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function random_poisson(shape_::tf.TensorHandle, rate_::tf.TensorHandle; name=nothing, seed=nothing, seed2=nothing, S=nothing, dtype=nothing) + function random_poisson_eager(shape_, rate_; name=nothing, seed=nothing, seed2=nothing, S=nothing, dtype=nothing) desc = tf.EagerOp("RandomPoisson") tf.add_input(desc, shape_) tf.add_input(desc, rate_) @@ -15463,6 +18165,13 @@ begin desc["dtype"] = tf.data_type(rate_) (tf.execute(desc))[1] end + function random_poisson(shape_, rate_; name=nothing, seed=nothing, seed2=nothing, S=nothing, dtype=nothing) + if tf.eager_mode + random_poisson_eager(shape_, rate_; name=name, seed=seed, seed2=seed2, S=S, dtype=dtype) + else + random_poisson_graph(shape_, rate_; name=name, seed=seed, seed2=seed2, S=S, dtype=dtype) + end + end end @@ -15472,7 +18181,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function reverse_sequence(input_, seq_lengths_; name=nothing, seq_dim=nothing, batch_dim=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function reverse_sequence_graph(input_, seq_lengths_; name=nothing, seq_dim=nothing, batch_dim=nothing) local desc tf.with_op_name(name, "ReverseSequence") do desc = tf.NodeDescription("ReverseSequence") @@ -15491,7 +18200,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function reverse_sequence(input_::tf.TensorHandle, seq_lengths_::tf.TensorHandle; name=nothing, seq_dim=nothing, batch_dim=nothing) + function reverse_sequence_eager(input_, seq_lengths_; name=nothing, seq_dim=nothing, batch_dim=nothing) desc = tf.EagerOp("ReverseSequence") tf.add_input(desc, input_) tf.add_input(desc, seq_lengths_) @@ -15505,6 +18214,13 @@ begin desc["Tlen"] = tf.data_type(seq_lengths_) (tf.execute(desc))[1] end + function reverse_sequence(input_, seq_lengths_; name=nothing, seq_dim=nothing, batch_dim=nothing) + if tf.eager_mode + reverse_sequence_eager(input_, seq_lengths_; name=name, seq_dim=seq_dim, batch_dim=batch_dim) + else + reverse_sequence_graph(input_, seq_lengths_; name=name, seq_dim=seq_dim, batch_dim=batch_dim) + end + end end @@ -15514,7 +18230,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function outfeed_enqueue(input_; name=nothing, dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function outfeed_enqueue_graph(input_; name=nothing, dtype=nothing) local desc tf.with_op_name(name, "OutfeedEnqueue") do desc = tf.NodeDescription("OutfeedEnqueue") @@ -15527,7 +18243,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function outfeed_enqueue(input_::tf.TensorHandle; name=nothing, dtype=nothing) + function outfeed_enqueue_eager(input_; name=nothing, dtype=nothing) desc = tf.EagerOp("OutfeedEnqueue") tf.add_input(desc, input_) if dtype !== nothing @@ -15536,6 +18252,13 @@ begin desc["dtype"] = tf.data_type(input_) (tf.execute(desc))[1] end + function outfeed_enqueue(input_; name=nothing, dtype=nothing) + if tf.eager_mode + outfeed_enqueue_eager(input_; name=name, dtype=dtype) + else + outfeed_enqueue_graph(input_; name=name, dtype=dtype) + end + end end @@ -15545,7 +18268,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function sub(x_, y_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function sub_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "Sub") do desc = tf.NodeDescription("Sub") @@ -15557,7 +18280,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function sub(x_::tf.TensorHandle, y_::tf.TensorHandle; name=nothing) + function sub_eager(x_, y_; name=nothing) desc = tf.EagerOp("Sub") tf.add_input(desc, x_) tf.add_input(desc, y_) @@ -15565,6 +18288,13 @@ begin desc["T"] = tf.data_type(y_) (tf.execute(desc))[1] end + function sub(x_, y_; name=nothing) + if tf.eager_mode + sub_eager(x_, y_; name=name) + else + sub_graph(x_, y_; name=name) + end + end end @@ -15574,7 +18304,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function string_split(input_, delimiter_; name=nothing, skip_empty=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function string_split_graph(input_, delimiter_; name=nothing, skip_empty=nothing) local desc tf.with_op_name(name, "StringSplit") do desc = tf.NodeDescription("StringSplit") @@ -15593,7 +18323,7 @@ begin end out end - function string_split(input_::tf.TensorHandle, delimiter_::tf.TensorHandle; name=nothing, skip_empty=nothing) + function string_split_eager(input_, delimiter_; name=nothing, skip_empty=nothing) desc = tf.EagerOp("StringSplit") tf.add_input(desc, input_) tf.add_input(desc, delimiter_) @@ -15602,6 +18332,13 @@ begin end tf.execute(desc) end + function string_split(input_, delimiter_; name=nothing, skip_empty=nothing) + if tf.eager_mode + string_split_eager(input_, delimiter_; name=name, skip_empty=skip_empty) + else + string_split_graph(input_, delimiter_; name=name, skip_empty=skip_empty) + end + end end @@ -15611,7 +18348,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function cumprod(x_, axis_; name=nothing, exclusive=nothing, reverse=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function cumprod_graph(x_, axis_; name=nothing, exclusive=nothing, reverse=nothing) local desc tf.with_op_name(name, "Cumprod") do desc = tf.NodeDescription("Cumprod") @@ -15631,7 +18368,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function cumprod(x_::tf.TensorHandle, axis_::tf.TensorHandle; name=nothing, exclusive=nothing, reverse=nothing) + function cumprod_eager(x_, axis_; name=nothing, exclusive=nothing, reverse=nothing) desc = tf.EagerOp("Cumprod") tf.add_input(desc, x_) tf.add_input(desc, axis_) @@ -15645,6 +18382,13 @@ begin desc["Tidx"] = tf.data_type(axis_) (tf.execute(desc))[1] end + function cumprod(x_, axis_; name=nothing, exclusive=nothing, reverse=nothing) + if tf.eager_mode + cumprod_eager(x_, axis_; name=name, exclusive=exclusive, reverse=reverse) + else + cumprod_graph(x_, axis_; name=name, exclusive=exclusive, reverse=reverse) + end + end end @@ -15654,7 +18398,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function quantized_resize_bilinear(images_, size_, min_, max_; name=nothing, align_corners=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function quantized_resize_bilinear_graph(images_, size_, min_, max_; name=nothing, align_corners=nothing) local desc tf.with_op_name(name, "QuantizedResizeBilinear") do desc = tf.NodeDescription("QuantizedResizeBilinear") @@ -15678,7 +18422,7 @@ begin end out end - function quantized_resize_bilinear(images_::tf.TensorHandle, size_::tf.TensorHandle, min_::tf.TensorHandle, max_::tf.TensorHandle; name=nothing, align_corners=nothing) + function quantized_resize_bilinear_eager(images_, size_, min_, max_; name=nothing, align_corners=nothing) desc = tf.EagerOp("QuantizedResizeBilinear") tf.add_input(desc, images_) tf.add_input(desc, size_) @@ -15690,6 +18434,13 @@ begin desc["T"] = tf.data_type(images_) tf.execute(desc) end + function quantized_resize_bilinear(images_, size_, min_, max_; name=nothing, align_corners=nothing) + if tf.eager_mode + quantized_resize_bilinear_eager(images_, size_, min_, max_; name=name, align_corners=align_corners) + else + quantized_resize_bilinear_graph(images_, size_, min_, max_; name=name, align_corners=align_corners) + end + end end @@ -15699,7 +18450,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function parse_single_example(serialized_, dense_defaults_; name=nothing, num_sparse=nothing, sparse_keys=nothing, dense_keys=nothing, sparse_types=nothing, Tdense=nothing, dense_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function parse_single_example_graph(serialized_, dense_defaults_; name=nothing, num_sparse=nothing, sparse_keys=nothing, dense_keys=nothing, sparse_types=nothing, Tdense=nothing, dense_shapes=nothing) local desc tf.with_op_name(name, "ParseSingleExample") do desc = tf.NodeDescription("ParseSingleExample") @@ -15733,7 +18484,7 @@ begin end out end - function parse_single_example(serialized_::tf.TensorHandle, dense_defaults_::tf.TensorHandle; name=nothing, num_sparse=nothing, sparse_keys=nothing, dense_keys=nothing, sparse_types=nothing, Tdense=nothing, dense_shapes=nothing) + function parse_single_example_eager(serialized_, dense_defaults_; name=nothing, num_sparse=nothing, sparse_keys=nothing, dense_keys=nothing, sparse_types=nothing, Tdense=nothing, dense_shapes=nothing) desc = tf.EagerOp("ParseSingleExample") tf.add_input(desc, serialized_) tf.add_input(desc, dense_defaults_) @@ -15757,6 +18508,13 @@ begin end tf.execute(desc) end + function parse_single_example(serialized_, dense_defaults_; name=nothing, num_sparse=nothing, sparse_keys=nothing, dense_keys=nothing, sparse_types=nothing, Tdense=nothing, dense_shapes=nothing) + if tf.eager_mode + parse_single_example_eager(serialized_, dense_defaults_; name=name, num_sparse=num_sparse, sparse_keys=sparse_keys, dense_keys=dense_keys, sparse_types=sparse_types, Tdense=Tdense, dense_shapes=dense_shapes) + else + parse_single_example_graph(serialized_, dense_defaults_; name=name, num_sparse=num_sparse, sparse_keys=sparse_keys, dense_keys=dense_keys, sparse_types=sparse_types, Tdense=Tdense, dense_shapes=dense_shapes) + end + end end @@ -15766,7 +18524,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function is_variable_initialized(ref_; name=nothing, dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function is_variable_initialized_graph(ref_; name=nothing, dtype=nothing) local desc tf.with_op_name(name, "IsVariableInitialized") do desc = tf.NodeDescription("IsVariableInitialized") @@ -15779,7 +18537,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function is_variable_initialized(ref_::tf.TensorHandle; name=nothing, dtype=nothing) + function is_variable_initialized_eager(ref_; name=nothing, dtype=nothing) desc = tf.EagerOp("IsVariableInitialized") tf.add_input(desc, ref_) if dtype !== nothing @@ -15788,6 +18546,13 @@ begin desc["dtype"] = tf.data_type(ref_) (tf.execute(desc))[1] end + function is_variable_initialized(ref_; name=nothing, dtype=nothing) + if tf.eager_mode + is_variable_initialized_eager(ref_; name=name, dtype=dtype) + else + is_variable_initialized_graph(ref_; name=name, dtype=dtype) + end + end end @@ -15797,7 +18562,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function experimental_stats_aggregator_handle(; name=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function experimental_stats_aggregator_handle_graph(; name=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "ExperimentalStatsAggregatorHandle") do desc = tf.NodeDescription("ExperimentalStatsAggregatorHandle") @@ -15810,7 +18575,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function experimental_stats_aggregator_handle(; name=nothing, container=nothing, shared_name=nothing) + function experimental_stats_aggregator_handle_eager(; name=nothing, container=nothing, shared_name=nothing) desc = tf.EagerOp("ExperimentalStatsAggregatorHandle") if container !== nothing desc["container"] = Base.String(container) @@ -15820,6 +18585,13 @@ begin end (tf.execute(desc))[1] end + function experimental_stats_aggregator_handle(; name=nothing, container=nothing, shared_name=nothing) + if tf.eager_mode + experimental_stats_aggregator_handle_eager(; name=name, container=container, shared_name=shared_name) + else + experimental_stats_aggregator_handle_graph(; name=name, container=container, shared_name=shared_name) + end + end end @@ -15829,7 +18601,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tensor_list_concat_v2(input_handle_, element_shape_, leading_dims_; name=nothing, element_dtype=nothing, shape_type=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tensor_list_concat_v2_graph(input_handle_, element_shape_, leading_dims_; name=nothing, element_dtype=nothing, shape_type=nothing) local desc tf.with_op_name(name, "TensorListConcatV2") do desc = tf.NodeDescription("TensorListConcatV2") @@ -15854,7 +18626,7 @@ begin end out end - function tensor_list_concat_v2(input_handle_::tf.TensorHandle, element_shape_::tf.TensorHandle, leading_dims_::tf.TensorHandle; name=nothing, element_dtype=nothing, shape_type=nothing) + function tensor_list_concat_v2_eager(input_handle_, element_shape_, leading_dims_; name=nothing, element_dtype=nothing, shape_type=nothing) desc = tf.EagerOp("TensorListConcatV2") tf.add_input(desc, input_handle_) tf.add_input(desc, element_shape_) @@ -15868,16 +18640,23 @@ begin desc["shape_type"] = tf.data_type(element_shape_) tf.execute(desc) end + function tensor_list_concat_v2(input_handle_, element_shape_, leading_dims_; name=nothing, element_dtype=nothing, shape_type=nothing) + if tf.eager_mode + tensor_list_concat_v2_eager(input_handle_, element_shape_, leading_dims_; name=name, element_dtype=element_dtype, shape_type=shape_type) + else + tensor_list_concat_v2_graph(input_handle_, element_shape_, leading_dims_; name=name, element_dtype=element_dtype, shape_type=shape_type) + end + end end """ - cudnn_rnnv2(input, input_h, input_c, params; rnn_mode=lstm, input_mode=linear_input, direction=unidirectional, dropout=?, seed=0, seed2=0, is_training=true) + cudnn_rnnv2(input, input_h, input_c, params; rnn_mode=, input_mode=, direction=, dropout=?, seed=0, seed2=0, is_training=true) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function cudnn_rnnv2(input_, input_h_, input_c_, params_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing, is_training=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function cudnn_rnnv2_graph(input_, input_h_, input_c_, params_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing, is_training=nothing) local desc tf.with_op_name(name, "CudnnRNNV2") do desc = tf.NodeDescription("CudnnRNNV2") @@ -15919,7 +18698,7 @@ begin end out end - function cudnn_rnnv2(input_::tf.TensorHandle, input_h_::tf.TensorHandle, input_c_::tf.TensorHandle, params_::tf.TensorHandle; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing, is_training=nothing) + function cudnn_rnnv2_eager(input_, input_h_, input_c_, params_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing, is_training=nothing) desc = tf.EagerOp("CudnnRNNV2") tf.add_input(desc, input_) tf.add_input(desc, input_h_) @@ -15952,6 +18731,13 @@ begin desc["T"] = tf.data_type(params_) tf.execute(desc) end + function cudnn_rnnv2(input_, input_h_, input_c_, params_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing, is_training=nothing) + if tf.eager_mode + cudnn_rnnv2_eager(input_, input_h_, input_c_, params_; name=name, rnn_mode=rnn_mode, input_mode=input_mode, direction=direction, dropout=dropout, seed=seed, seed2=seed2, is_training=is_training) + else + cudnn_rnnv2_graph(input_, input_h_, input_c_, params_; name=name, rnn_mode=rnn_mode, input_mode=input_mode, direction=direction, dropout=dropout, seed=seed, seed2=seed2, is_training=is_training) + end + end end @@ -15961,7 +18747,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function resource_scatter_sub(resource_, indices_, updates_; name=nothing, dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function resource_scatter_sub_graph(resource_, indices_, updates_; name=nothing, dtype=nothing) local desc tf.with_op_name(name, "ResourceScatterSub") do desc = tf.NodeDescription("ResourceScatterSub") @@ -15980,7 +18766,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function resource_scatter_sub(resource_::tf.TensorHandle, indices_::tf.TensorHandle, updates_::tf.TensorHandle; name=nothing, dtype=nothing) + function resource_scatter_sub_eager(resource_, indices_, updates_; name=nothing, dtype=nothing) desc = tf.EagerOp("ResourceScatterSub") tf.add_input(desc, resource_) tf.add_input(desc, indices_) @@ -15992,6 +18778,13 @@ begin desc["dtype"] = tf.data_type(updates_) (tf.execute(desc))[1] end + function resource_scatter_sub(resource_, indices_, updates_; name=nothing, dtype=nothing) + if tf.eager_mode + resource_scatter_sub_eager(resource_, indices_, updates_; name=name, dtype=dtype) + else + resource_scatter_sub_graph(resource_, indices_, updates_; name=name, dtype=dtype) + end + end end @@ -16001,7 +18794,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function assign_add(ref_, value_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function assign_add_graph(ref_, value_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "AssignAdd") do desc = tf.NodeDescription("AssignAdd") @@ -16016,7 +18809,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function assign_add(ref_::tf.TensorHandle, value_::tf.TensorHandle; name=nothing, use_locking=nothing) + function assign_add_eager(ref_, value_; name=nothing, use_locking=nothing) desc = tf.EagerOp("AssignAdd") tf.add_input(desc, ref_) tf.add_input(desc, value_) @@ -16027,6 +18820,13 @@ begin desc["T"] = tf.data_type(value_) (tf.execute(desc))[1] end + function assign_add(ref_, value_; name=nothing, use_locking=nothing) + if tf.eager_mode + assign_add_eager(ref_, value_; name=name, use_locking=use_locking) + else + assign_add_graph(ref_, value_; name=name, use_locking=use_locking) + end + end end @@ -16036,7 +18836,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tensor_dataset(components_; name=nothing, Toutput_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tensor_dataset_graph(components_; name=nothing, Toutput_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "TensorDataset") do desc = tf.NodeDescription("TensorDataset") @@ -16051,7 +18851,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function tensor_dataset(components_::tf.TensorHandle; name=nothing, Toutput_types=nothing, output_shapes=nothing) + function tensor_dataset_eager(components_; name=nothing, Toutput_types=nothing, output_shapes=nothing) desc = tf.EagerOp("TensorDataset") tf.add_input(desc, components_) if Toutput_types !== nothing @@ -16062,6 +18862,13 @@ begin end (tf.execute(desc))[1] end + function tensor_dataset(components_; name=nothing, Toutput_types=nothing, output_shapes=nothing) + if tf.eager_mode + tensor_dataset_eager(components_; name=name, Toutput_types=Toutput_types, output_shapes=output_shapes) + else + tensor_dataset_graph(components_; name=name, Toutput_types=Toutput_types, output_shapes=output_shapes) + end + end end @@ -16071,7 +18878,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function bucketize(input_; name=nothing, boundaries=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function bucketize_graph(input_; name=nothing, boundaries=nothing) local desc tf.with_op_name(name, "Bucketize") do desc = tf.NodeDescription("Bucketize") @@ -16084,7 +18891,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function bucketize(input_::tf.TensorHandle; name=nothing, boundaries=nothing) + function bucketize_eager(input_; name=nothing, boundaries=nothing) desc = tf.EagerOp("Bucketize") tf.add_input(desc, input_) if boundaries !== nothing @@ -16093,6 +18900,13 @@ begin desc["T"] = tf.data_type(input_) (tf.execute(desc))[1] end + function bucketize(input_; name=nothing, boundaries=nothing) + if tf.eager_mode + bucketize_eager(input_; name=name, boundaries=boundaries) + else + bucketize_graph(input_; name=name, boundaries=boundaries) + end + end end @@ -16102,7 +18916,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function sparse_reduce_max(input_indices_, input_values_, input_shape_, reduction_axes_; name=nothing, keep_dims=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function sparse_reduce_max_graph(input_indices_, input_values_, input_shape_, reduction_axes_; name=nothing, keep_dims=nothing) local desc tf.with_op_name(name, "SparseReduceMax") do desc = tf.NodeDescription("SparseReduceMax") @@ -16121,7 +18935,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function sparse_reduce_max(input_indices_::tf.TensorHandle, input_values_::tf.TensorHandle, input_shape_::tf.TensorHandle, reduction_axes_::tf.TensorHandle; name=nothing, keep_dims=nothing) + function sparse_reduce_max_eager(input_indices_, input_values_, input_shape_, reduction_axes_; name=nothing, keep_dims=nothing) desc = tf.EagerOp("SparseReduceMax") tf.add_input(desc, input_indices_) tf.add_input(desc, input_values_) @@ -16133,6 +18947,13 @@ begin desc["T"] = tf.data_type(input_values_) (tf.execute(desc))[1] end + function sparse_reduce_max(input_indices_, input_values_, input_shape_, reduction_axes_; name=nothing, keep_dims=nothing) + if tf.eager_mode + sparse_reduce_max_eager(input_indices_, input_values_, input_shape_, reduction_axes_; name=name, keep_dims=keep_dims) + else + sparse_reduce_max_graph(input_indices_, input_values_, input_shape_, reduction_axes_; name=name, keep_dims=keep_dims) + end + end end @@ -16142,7 +18963,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function retrieve_tpu_embedding_mdl_adagrad_light_parameters(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function retrieve_tpu_embedding_mdl_adagrad_light_parameters_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "RetrieveTPUEmbeddingMDLAdagradLightParameters") do desc = tf.NodeDescription("RetrieveTPUEmbeddingMDLAdagradLightParameters") @@ -16166,7 +18987,7 @@ begin end out end - function retrieve_tpu_embedding_mdl_adagrad_light_parameters(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + function retrieve_tpu_embedding_mdl_adagrad_light_parameters_eager(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) desc = tf.EagerOp("RetrieveTPUEmbeddingMDLAdagradLightParameters") if table_id !== nothing desc["table_id"] = Base.Int(table_id) @@ -16182,6 +19003,13 @@ begin end tf.execute(desc) end + function retrieve_tpu_embedding_mdl_adagrad_light_parameters(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + if tf.eager_mode + retrieve_tpu_embedding_mdl_adagrad_light_parameters_eager(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + else + retrieve_tpu_embedding_mdl_adagrad_light_parameters_graph(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + end + end end @@ -16191,7 +19019,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tensor_array_grad_with_shape(handle_, flow_in_, shape_to_prepend_; name=nothing, source=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tensor_array_grad_with_shape_graph(handle_, flow_in_, shape_to_prepend_; name=nothing, source=nothing) local desc tf.with_op_name(name, "TensorArrayGradWithShape") do desc = tf.NodeDescription("TensorArrayGradWithShape") @@ -16212,7 +19040,7 @@ begin end out end - function tensor_array_grad_with_shape(handle_::tf.TensorHandle, flow_in_::tf.TensorHandle, shape_to_prepend_::tf.TensorHandle; name=nothing, source=nothing) + function tensor_array_grad_with_shape_eager(handle_, flow_in_, shape_to_prepend_; name=nothing, source=nothing) desc = tf.EagerOp("TensorArrayGradWithShape") tf.add_input(desc, handle_) tf.add_input(desc, flow_in_) @@ -16222,6 +19050,13 @@ begin end tf.execute(desc) end + function tensor_array_grad_with_shape(handle_, flow_in_, shape_to_prepend_; name=nothing, source=nothing) + if tf.eager_mode + tensor_array_grad_with_shape_eager(handle_, flow_in_, shape_to_prepend_; name=name, source=source) + else + tensor_array_grad_with_shape_graph(handle_, flow_in_, shape_to_prepend_; name=name, source=source) + end + end end @@ -16231,7 +19066,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tensor_array_close_v3(handle_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tensor_array_close_v3_graph(handle_; name=nothing) local desc tf.with_op_name(name, "TensorArrayCloseV3") do desc = tf.NodeDescription("TensorArrayCloseV3") @@ -16240,11 +19075,18 @@ begin end tf.Tensor(tf.Operation(desc)) end - function tensor_array_close_v3(handle_::tf.TensorHandle; name=nothing) + function tensor_array_close_v3_eager(handle_; name=nothing) desc = tf.EagerOp("TensorArrayCloseV3") tf.add_input(desc, handle_) (tf.execute(desc))[1] end + function tensor_array_close_v3(handle_; name=nothing) + if tf.eager_mode + tensor_array_close_v3_eager(handle_; name=name) + else + tensor_array_close_v3_graph(handle_; name=name) + end + end end @@ -16254,7 +19096,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function non_max_suppression_with_overlaps(overlaps_, scores_, max_output_size_, overlap_threshold_, score_threshold_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function non_max_suppression_with_overlaps_graph(overlaps_, scores_, max_output_size_, overlap_threshold_, score_threshold_; name=nothing) local desc tf.with_op_name(name, "NonMaxSuppressionWithOverlaps") do desc = tf.NodeDescription("NonMaxSuppressionWithOverlaps") @@ -16271,7 +19113,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function non_max_suppression_with_overlaps(overlaps_::tf.TensorHandle, scores_::tf.TensorHandle, max_output_size_::tf.TensorHandle, overlap_threshold_::tf.TensorHandle, score_threshold_::tf.TensorHandle; name=nothing) + function non_max_suppression_with_overlaps_eager(overlaps_, scores_, max_output_size_, overlap_threshold_, score_threshold_; name=nothing) desc = tf.EagerOp("NonMaxSuppressionWithOverlaps") tf.add_input(desc, overlaps_) tf.add_input(desc, scores_) @@ -16280,6 +19122,13 @@ begin tf.add_input(desc, score_threshold_) (tf.execute(desc))[1] end + function non_max_suppression_with_overlaps(overlaps_, scores_, max_output_size_, overlap_threshold_, score_threshold_; name=nothing) + if tf.eager_mode + non_max_suppression_with_overlaps_eager(overlaps_, scores_, max_output_size_, overlap_threshold_, score_threshold_; name=name) + else + non_max_suppression_with_overlaps_graph(overlaps_, scores_, max_output_size_, overlap_threshold_, score_threshold_; name=name) + end + end end @@ -16289,7 +19138,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function pack(values_; name=nothing, N=nothing, axis=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function pack_graph(values_; name=nothing, N=nothing, axis=nothing) local desc tf.with_op_name(name, "Pack") do desc = tf.NodeDescription("Pack") @@ -16308,7 +19157,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function pack(values_::tf.TensorHandle; name=nothing, N=nothing, axis=nothing) + function pack_eager(values_; name=nothing, N=nothing, axis=nothing) desc = tf.EagerOp("Pack") tf.add_input(desc, values_) if N !== nothing @@ -16323,6 +19172,13 @@ begin desc["T"] = tf.data_type(values_) (tf.execute(desc))[1] end + function pack(values_; name=nothing, N=nothing, axis=nothing) + if tf.eager_mode + pack_eager(values_; name=name, N=N, axis=axis) + else + pack_graph(values_; name=name, N=N, axis=axis) + end + end end @@ -16332,7 +19188,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tensor_array_grad_v2(handle_, flow_in_; name=nothing, source=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tensor_array_grad_v2_graph(handle_, flow_in_; name=nothing, source=nothing) local desc tf.with_op_name(name, "TensorArrayGradV2") do desc = tf.NodeDescription("TensorArrayGradV2") @@ -16346,7 +19202,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function tensor_array_grad_v2(handle_::tf.TensorHandle, flow_in_::tf.TensorHandle; name=nothing, source=nothing) + function tensor_array_grad_v2_eager(handle_, flow_in_; name=nothing, source=nothing) desc = tf.EagerOp("TensorArrayGradV2") tf.add_input(desc, handle_) tf.add_input(desc, flow_in_) @@ -16355,6 +19211,13 @@ begin end (tf.execute(desc))[1] end + function tensor_array_grad_v2(handle_, flow_in_; name=nothing, source=nothing) + if tf.eager_mode + tensor_array_grad_v2_eager(handle_, flow_in_; name=name, source=source) + else + tensor_array_grad_v2_graph(handle_, flow_in_; name=name, source=source) + end + end end @@ -16364,7 +19227,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function assign_sub_variable_op(resource_, value_; name=nothing, dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function assign_sub_variable_op_graph(resource_, value_; name=nothing, dtype=nothing) local desc tf.with_op_name(name, "AssignSubVariableOp") do desc = tf.NodeDescription("AssignSubVariableOp") @@ -16379,7 +19242,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function assign_sub_variable_op(resource_::tf.TensorHandle, value_::tf.TensorHandle; name=nothing, dtype=nothing) + function assign_sub_variable_op_eager(resource_, value_; name=nothing, dtype=nothing) desc = tf.EagerOp("AssignSubVariableOp") tf.add_input(desc, resource_) tf.add_input(desc, value_) @@ -16389,6 +19252,13 @@ begin desc["dtype"] = tf.data_type(value_) (tf.execute(desc))[1] end + function assign_sub_variable_op(resource_, value_; name=nothing, dtype=nothing) + if tf.eager_mode + assign_sub_variable_op_eager(resource_, value_; name=name, dtype=dtype) + else + assign_sub_variable_op_graph(resource_, value_; name=name, dtype=dtype) + end + end end @@ -16398,7 +19268,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function batch_fft2d(input_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function batch_fft2d_graph(input_; name=nothing) local desc tf.with_op_name(name, "BatchFFT2D") do desc = tf.NodeDescription("BatchFFT2D") @@ -16407,11 +19277,18 @@ begin end tf.Tensor(tf.Operation(desc)) end - function batch_fft2d(input_::tf.TensorHandle; name=nothing) + function batch_fft2d_eager(input_; name=nothing) desc = tf.EagerOp("BatchFFT2D") tf.add_input(desc, input_) (tf.execute(desc))[1] end + function batch_fft2d(input_; name=nothing) + if tf.eager_mode + batch_fft2d_eager(input_; name=name) + else + batch_fft2d_graph(input_; name=name) + end + end end @@ -16421,7 +19298,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function close_summary_writer(writer_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function close_summary_writer_graph(writer_; name=nothing) local desc tf.with_op_name(name, "CloseSummaryWriter") do desc = tf.NodeDescription("CloseSummaryWriter") @@ -16430,11 +19307,18 @@ begin end tf.Tensor(tf.Operation(desc)) end - function close_summary_writer(writer_::tf.TensorHandle; name=nothing) + function close_summary_writer_eager(writer_; name=nothing) desc = tf.EagerOp("CloseSummaryWriter") tf.add_input(desc, writer_) (tf.execute(desc))[1] end + function close_summary_writer(writer_; name=nothing) + if tf.eager_mode + close_summary_writer_eager(writer_; name=name) + else + close_summary_writer_graph(writer_; name=name) + end + end end @@ -16444,7 +19328,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function rank(input_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function rank_graph(input_; name=nothing) local desc tf.with_op_name(name, "Rank") do desc = tf.NodeDescription("Rank") @@ -16454,12 +19338,19 @@ begin end tf.Tensor(tf.Operation(desc)) end - function rank(input_::tf.TensorHandle; name=nothing) + function rank_eager(input_; name=nothing) desc = tf.EagerOp("Rank") tf.add_input(desc, input_) desc["T"] = tf.data_type(input_) (tf.execute(desc))[1] end + function rank(input_; name=nothing) + if tf.eager_mode + rank_eager(input_; name=name) + else + rank_graph(input_; name=name) + end + end end @@ -16469,7 +19360,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function fft3d(input_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function fft3d_graph(input_; name=nothing) local desc tf.with_op_name(name, "FFT3D") do desc = tf.NodeDescription("FFT3D") @@ -16479,12 +19370,19 @@ begin end tf.Tensor(tf.Operation(desc)) end - function fft3d(input_::tf.TensorHandle; name=nothing) + function fft3d_eager(input_; name=nothing) desc = tf.EagerOp("FFT3D") tf.add_input(desc, input_) desc["Tcomplex"] = tf.data_type(input_) (tf.execute(desc))[1] end + function fft3d(input_; name=nothing) + if tf.eager_mode + fft3d_eager(input_; name=name) + else + fft3d_graph(input_; name=name) + end + end end @@ -16494,7 +19392,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function apply_ftrl(var_, accum_, linear_, grad_, lr_, l1_, l2_, lr_power_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function apply_ftrl_graph(var_, accum_, linear_, grad_, lr_, l1_, l2_, lr_power_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ApplyFtrl") do desc = tf.NodeDescription("ApplyFtrl") @@ -16521,7 +19419,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function apply_ftrl(var_::tf.TensorHandle, accum_::tf.TensorHandle, linear_::tf.TensorHandle, grad_::tf.TensorHandle, lr_::tf.TensorHandle, l1_::tf.TensorHandle, l2_::tf.TensorHandle, lr_power_::tf.TensorHandle; name=nothing, use_locking=nothing) + function apply_ftrl_eager(var_, accum_, linear_, grad_, lr_, l1_, l2_, lr_power_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ApplyFtrl") tf.add_input(desc, var_) tf.add_input(desc, accum_) @@ -16544,6 +19442,13 @@ begin desc["T"] = tf.data_type(lr_power_) (tf.execute(desc))[1] end + function apply_ftrl(var_, accum_, linear_, grad_, lr_, l1_, l2_, lr_power_; name=nothing, use_locking=nothing) + if tf.eager_mode + apply_ftrl_eager(var_, accum_, linear_, grad_, lr_, l1_, l2_, lr_power_; name=name, use_locking=use_locking) + else + apply_ftrl_graph(var_, accum_, linear_, grad_, lr_, l1_, l2_, lr_power_; name=name, use_locking=use_locking) + end + end end @@ -16553,7 +19458,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function abort(; name=nothing, error_msg=nothing, exit_without_error=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function abort_graph(; name=nothing, error_msg=nothing, exit_without_error=nothing) local desc tf.with_op_name(name, "Abort") do desc = tf.NodeDescription("Abort") @@ -16566,7 +19471,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function abort(; name=nothing, error_msg=nothing, exit_without_error=nothing) + function abort_eager(; name=nothing, error_msg=nothing, exit_without_error=nothing) desc = tf.EagerOp("Abort") if error_msg !== nothing desc["error_msg"] = Base.String(error_msg) @@ -16576,6 +19481,13 @@ begin end (tf.execute(desc))[1] end + function abort(; name=nothing, error_msg=nothing, exit_without_error=nothing) + if tf.eager_mode + abort_eager(; name=name, error_msg=error_msg, exit_without_error=exit_without_error) + else + abort_graph(; name=name, error_msg=error_msg, exit_without_error=exit_without_error) + end + end end @@ -16585,7 +19497,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function audio_spectrogram(input_; name=nothing, window_size=nothing, stride=nothing, magnitude_squared=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function audio_spectrogram_graph(input_; name=nothing, window_size=nothing, stride=nothing, magnitude_squared=nothing) local desc tf.with_op_name(name, "AudioSpectrogram") do desc = tf.NodeDescription("AudioSpectrogram") @@ -16603,7 +19515,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function audio_spectrogram(input_::tf.TensorHandle; name=nothing, window_size=nothing, stride=nothing, magnitude_squared=nothing) + function audio_spectrogram_eager(input_; name=nothing, window_size=nothing, stride=nothing, magnitude_squared=nothing) desc = tf.EagerOp("AudioSpectrogram") tf.add_input(desc, input_) if window_size !== nothing @@ -16617,6 +19529,13 @@ begin end (tf.execute(desc))[1] end + function audio_spectrogram(input_; name=nothing, window_size=nothing, stride=nothing, magnitude_squared=nothing) + if tf.eager_mode + audio_spectrogram_eager(input_; name=name, window_size=window_size, stride=stride, magnitude_squared=magnitude_squared) + else + audio_spectrogram_graph(input_; name=name, window_size=window_size, stride=stride, magnitude_squared=magnitude_squared) + end + end end @@ -16626,7 +19545,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function variable_shape(input_; name=nothing, out_type=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function variable_shape_graph(input_; name=nothing, out_type=nothing) local desc tf.with_op_name(name, "VariableShape") do desc = tf.NodeDescription("VariableShape") @@ -16638,7 +19557,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function variable_shape(input_::tf.TensorHandle; name=nothing, out_type=nothing) + function variable_shape_eager(input_; name=nothing, out_type=nothing) desc = tf.EagerOp("VariableShape") tf.add_input(desc, input_) if out_type !== nothing @@ -16646,6 +19565,13 @@ begin end (tf.execute(desc))[1] end + function variable_shape(input_; name=nothing, out_type=nothing) + if tf.eager_mode + variable_shape_eager(input_; name=name, out_type=out_type) + else + variable_shape_graph(input_; name=name, out_type=out_type) + end + end end @@ -16655,7 +19581,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function fifo_queue_v2(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function fifo_queue_v2_graph(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "FIFOQueueV2") do desc = tf.NodeDescription("FIFOQueueV2") @@ -16677,7 +19603,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function fifo_queue_v2(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) + function fifo_queue_v2_eager(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) desc = tf.EagerOp("FIFOQueueV2") if component_types !== nothing desc["component_types"] = map(Base.identity, component_types) @@ -16696,6 +19622,13 @@ begin end (tf.execute(desc))[1] end + function fifo_queue_v2(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) + if tf.eager_mode + fifo_queue_v2_eager(; name=name, component_types=component_types, shapes=shapes, capacity=capacity, container=container, shared_name=shared_name) + else + fifo_queue_v2_graph(; name=name, component_types=component_types, shapes=shapes, capacity=capacity, container=container, shared_name=shared_name) + end + end end @@ -16705,7 +19638,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function variable(; name=nothing, shape=nothing, dtype=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function variable_graph(; name=nothing, shape=nothing, dtype=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "Variable") do desc = tf.NodeDescription("Variable") @@ -16724,7 +19657,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function variable(; name=nothing, shape=nothing, dtype=nothing, container=nothing, shared_name=nothing) + function variable_eager(; name=nothing, shape=nothing, dtype=nothing, container=nothing, shared_name=nothing) desc = tf.EagerOp("Variable") if shape !== nothing desc["shape"] = Base.identity(shape) @@ -16740,6 +19673,13 @@ begin end (tf.execute(desc))[1] end + function variable(; name=nothing, shape=nothing, dtype=nothing, container=nothing, shared_name=nothing) + if tf.eager_mode + variable_eager(; name=name, shape=shape, dtype=dtype, container=container, shared_name=shared_name) + else + variable_graph(; name=name, shape=shape, dtype=dtype, container=container, shared_name=shared_name) + end + end end @@ -16749,7 +19689,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tensor_forest_create_tree_variable(tree_handle_, tree_config_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tensor_forest_create_tree_variable_graph(tree_handle_, tree_config_; name=nothing) local desc tf.with_op_name(name, "TensorForestCreateTreeVariable") do desc = tf.NodeDescription("TensorForestCreateTreeVariable") @@ -16760,12 +19700,19 @@ begin end tf.Tensor(tf.Operation(desc)) end - function tensor_forest_create_tree_variable(tree_handle_::tf.TensorHandle, tree_config_::tf.TensorHandle; name=nothing) + function tensor_forest_create_tree_variable_eager(tree_handle_, tree_config_; name=nothing) desc = tf.EagerOp("TensorForestCreateTreeVariable") tf.add_input(desc, tree_handle_) tf.add_input(desc, tree_config_) (tf.execute(desc))[1] end + function tensor_forest_create_tree_variable(tree_handle_, tree_config_; name=nothing) + if tf.eager_mode + tensor_forest_create_tree_variable_eager(tree_handle_, tree_config_; name=name) + else + tensor_forest_create_tree_variable_graph(tree_handle_, tree_config_; name=name) + end + end end @@ -16775,7 +19722,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function max_pool_grad_with_argmax(input_, grad_, argmax_; name=nothing, ksize=nothing, strides=nothing, padding=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function max_pool_grad_with_argmax_graph(input_, grad_, argmax_; name=nothing, ksize=nothing, strides=nothing, padding=nothing) local desc tf.with_op_name(name, "MaxPoolGradWithArgmax") do desc = tf.NodeDescription("MaxPoolGradWithArgmax") @@ -16799,7 +19746,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function max_pool_grad_with_argmax(input_::tf.TensorHandle, grad_::tf.TensorHandle, argmax_::tf.TensorHandle; name=nothing, ksize=nothing, strides=nothing, padding=nothing) + function max_pool_grad_with_argmax_eager(input_, grad_, argmax_; name=nothing, ksize=nothing, strides=nothing, padding=nothing) desc = tf.EagerOp("MaxPoolGradWithArgmax") tf.add_input(desc, input_) tf.add_input(desc, grad_) @@ -16818,6 +19765,13 @@ begin desc["Targmax"] = tf.data_type(argmax_) (tf.execute(desc))[1] end + function max_pool_grad_with_argmax(input_, grad_, argmax_; name=nothing, ksize=nothing, strides=nothing, padding=nothing) + if tf.eager_mode + max_pool_grad_with_argmax_eager(input_, grad_, argmax_; name=name, ksize=ksize, strides=strides, padding=padding) + else + max_pool_grad_with_argmax_graph(input_, grad_, argmax_; name=name, ksize=ksize, strides=strides, padding=padding) + end + end end @@ -16827,7 +19781,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function ref_switch(data_, pred_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function ref_switch_graph(data_, pred_; name=nothing) local desc tf.with_op_name(name, "RefSwitch") do desc = tf.NodeDescription("RefSwitch") @@ -16844,13 +19798,20 @@ begin end out end - function ref_switch(data_::tf.TensorHandle, pred_::tf.TensorHandle; name=nothing) + function ref_switch_eager(data_, pred_; name=nothing) desc = tf.EagerOp("RefSwitch") tf.add_input(desc, data_) tf.add_input(desc, pred_) desc["T"] = tf.data_type(data_) tf.execute(desc) end + function ref_switch(data_, pred_; name=nothing) + if tf.eager_mode + ref_switch_eager(data_, pred_; name=name) + else + ref_switch_graph(data_, pred_; name=name) + end + end end @@ -16860,7 +19821,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function sdca_fprint(input_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function sdca_fprint_graph(input_; name=nothing) local desc tf.with_op_name(name, "SdcaFprint") do desc = tf.NodeDescription("SdcaFprint") @@ -16869,11 +19830,18 @@ begin end tf.Tensor(tf.Operation(desc)) end - function sdca_fprint(input_::tf.TensorHandle; name=nothing) + function sdca_fprint_eager(input_; name=nothing) desc = tf.EagerOp("SdcaFprint") tf.add_input(desc, input_) (tf.execute(desc))[1] end + function sdca_fprint(input_; name=nothing) + if tf.eager_mode + sdca_fprint_eager(input_; name=name) + else + sdca_fprint_graph(input_; name=name) + end + end end @@ -16883,7 +19851,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function experimental_choose_fastest_dataset(input_datasets_; name=nothing, N=nothing, num_experiments=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function experimental_choose_fastest_dataset_graph(input_datasets_; name=nothing, N=nothing, num_experiments=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "ExperimentalChooseFastestDataset") do desc = tf.NodeDescription("ExperimentalChooseFastestDataset") @@ -16904,7 +19872,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function experimental_choose_fastest_dataset(input_datasets_::tf.TensorHandle; name=nothing, N=nothing, num_experiments=nothing, output_types=nothing, output_shapes=nothing) + function experimental_choose_fastest_dataset_eager(input_datasets_; name=nothing, N=nothing, num_experiments=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("ExperimentalChooseFastestDataset") tf.add_input(desc, input_datasets_) if N !== nothing @@ -16921,6 +19889,13 @@ begin end (tf.execute(desc))[1] end + function experimental_choose_fastest_dataset(input_datasets_; name=nothing, N=nothing, num_experiments=nothing, output_types=nothing, output_shapes=nothing) + if tf.eager_mode + experimental_choose_fastest_dataset_eager(input_datasets_; name=name, N=N, num_experiments=num_experiments, output_types=output_types, output_shapes=output_shapes) + else + experimental_choose_fastest_dataset_graph(input_datasets_; name=name, N=N, num_experiments=num_experiments, output_types=output_types, output_shapes=output_shapes) + end + end end @@ -16930,7 +19905,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function leaky_relu(features_; name=nothing, alpha=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function leaky_relu_graph(features_; name=nothing, alpha=nothing) local desc tf.with_op_name(name, "LeakyRelu") do desc = tf.NodeDescription("LeakyRelu") @@ -16943,7 +19918,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function leaky_relu(features_::tf.TensorHandle; name=nothing, alpha=nothing) + function leaky_relu_eager(features_; name=nothing, alpha=nothing) desc = tf.EagerOp("LeakyRelu") tf.add_input(desc, features_) if alpha !== nothing @@ -16952,6 +19927,13 @@ begin desc["T"] = tf.data_type(features_) (tf.execute(desc))[1] end + function leaky_relu(features_; name=nothing, alpha=nothing) + if tf.eager_mode + leaky_relu_eager(features_; name=name, alpha=alpha) + else + leaky_relu_graph(features_; name=name, alpha=alpha) + end + end end @@ -16961,7 +19943,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function identity_n(input_; name=nothing, T=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function identity_n_graph(input_; name=nothing, T=nothing) local desc tf.with_op_name(name, "IdentityN") do desc = tf.NodeDescription("IdentityN") @@ -16973,7 +19955,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function identity_n(input_::tf.TensorHandle; name=nothing, T=nothing) + function identity_n_eager(input_; name=nothing, T=nothing) desc = tf.EagerOp("IdentityN") tf.add_input(desc, input_) if T !== nothing @@ -16981,16 +19963,23 @@ begin end (tf.execute(desc))[1] end + function identity_n(input_; name=nothing, T=nothing) + if tf.eager_mode + identity_n_eager(input_; name=name, T=T) + else + identity_n_graph(input_; name=name, T=T) + end + end end """ - cudnn_rnn_backprop_v2(input, input_h, input_c, params, output, output_h, output_c, output_backprop, output_h_backprop, output_c_backprop, reserve_space, host_reserved; rnn_mode=lstm, input_mode=linear_input, direction=unidirectional, dropout=?, seed=0, seed2=0) + cudnn_rnn_backprop_v2(input, input_h, input_c, params, output, output_h, output_c, output_backprop, output_h_backprop, output_c_backprop, reserve_space, host_reserved; rnn_mode=, input_mode=, direction=, dropout=?, seed=0, seed2=0) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function cudnn_rnn_backprop_v2(input_, input_h_, input_c_, params_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_, host_reserved_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function cudnn_rnn_backprop_v2_graph(input_, input_h_, input_c_, params_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_, host_reserved_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) local desc tf.with_op_name(name, "CudnnRNNBackpropV2") do desc = tf.NodeDescription("CudnnRNNBackpropV2") @@ -17045,7 +20034,7 @@ begin end out end - function cudnn_rnn_backprop_v2(input_::tf.TensorHandle, input_h_::tf.TensorHandle, input_c_::tf.TensorHandle, params_::tf.TensorHandle, output_::tf.TensorHandle, output_h_::tf.TensorHandle, output_c_::tf.TensorHandle, output_backprop_::tf.TensorHandle, output_h_backprop_::tf.TensorHandle, output_c_backprop_::tf.TensorHandle, reserve_space_::tf.TensorHandle, host_reserved_::tf.TensorHandle; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) + function cudnn_rnn_backprop_v2_eager(input_, input_h_, input_c_, params_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_, host_reserved_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) desc = tf.EagerOp("CudnnRNNBackpropV2") tf.add_input(desc, input_) tf.add_input(desc, input_h_) @@ -17090,6 +20079,13 @@ begin desc["T"] = tf.data_type(reserve_space_) tf.execute(desc) end + function cudnn_rnn_backprop_v2(input_, input_h_, input_c_, params_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_, host_reserved_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) + if tf.eager_mode + cudnn_rnn_backprop_v2_eager(input_, input_h_, input_c_, params_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_, host_reserved_; name=name, rnn_mode=rnn_mode, input_mode=input_mode, direction=direction, dropout=dropout, seed=seed, seed2=seed2) + else + cudnn_rnn_backprop_v2_graph(input_, input_h_, input_c_, params_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_, host_reserved_; name=name, rnn_mode=rnn_mode, input_mode=input_mode, direction=direction, dropout=dropout, seed=seed, seed2=seed2) + end + end end @@ -17099,7 +20095,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function requantization_range(input_, input_min_, input_max_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function requantization_range_graph(input_, input_min_, input_max_; name=nothing) local desc tf.with_op_name(name, "RequantizationRange") do desc = tf.NodeDescription("RequantizationRange") @@ -17118,7 +20114,7 @@ begin end out end - function requantization_range(input_::tf.TensorHandle, input_min_::tf.TensorHandle, input_max_::tf.TensorHandle; name=nothing) + function requantization_range_eager(input_, input_min_, input_max_; name=nothing) desc = tf.EagerOp("RequantizationRange") tf.add_input(desc, input_) tf.add_input(desc, input_min_) @@ -17126,6 +20122,13 @@ begin desc["Tinput"] = tf.data_type(input_) tf.execute(desc) end + function requantization_range(input_, input_min_, input_max_; name=nothing) + if tf.eager_mode + requantization_range_eager(input_, input_min_, input_max_; name=name) + else + requantization_range_graph(input_, input_min_, input_max_; name=name) + end + end end @@ -17135,7 +20138,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function maximum(x_, y_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function maximum_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "Maximum") do desc = tf.NodeDescription("Maximum") @@ -17147,7 +20150,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function maximum(x_::tf.TensorHandle, y_::tf.TensorHandle; name=nothing) + function maximum_eager(x_, y_; name=nothing) desc = tf.EagerOp("Maximum") tf.add_input(desc, x_) tf.add_input(desc, y_) @@ -17155,6 +20158,13 @@ begin desc["T"] = tf.data_type(y_) (tf.execute(desc))[1] end + function maximum(x_, y_; name=nothing) + if tf.eager_mode + maximum_eager(x_, y_; name=name) + else + maximum_graph(x_, y_; name=name) + end + end end @@ -17164,7 +20174,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function reshape(tensor_, shape_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function reshape_graph(tensor_, shape_; name=nothing) local desc tf.with_op_name(name, "Reshape") do desc = tf.NodeDescription("Reshape") @@ -17177,7 +20187,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function reshape(tensor_::tf.TensorHandle, shape_::tf.TensorHandle; name=nothing) + function reshape_eager(tensor_, shape_; name=nothing) desc = tf.EagerOp("Reshape") tf.add_input(desc, tensor_) tf.add_input(desc, shape_) @@ -17185,6 +20195,13 @@ begin desc["Tshape"] = tf.data_type(shape_) (tf.execute(desc))[1] end + function reshape(tensor_, shape_; name=nothing) + if tf.eager_mode + reshape_eager(tensor_, shape_; name=name) + else + reshape_graph(tensor_, shape_; name=name) + end + end end @@ -17194,7 +20211,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function matrix_solve_ls(matrix_, rhs_, l2_regularizer_; name=nothing, fast=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function matrix_solve_ls_graph(matrix_, rhs_, l2_regularizer_; name=nothing, fast=nothing) local desc tf.with_op_name(name, "MatrixSolveLs") do desc = tf.NodeDescription("MatrixSolveLs") @@ -17211,7 +20228,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function matrix_solve_ls(matrix_::tf.TensorHandle, rhs_::tf.TensorHandle, l2_regularizer_::tf.TensorHandle; name=nothing, fast=nothing) + function matrix_solve_ls_eager(matrix_, rhs_, l2_regularizer_; name=nothing, fast=nothing) desc = tf.EagerOp("MatrixSolveLs") tf.add_input(desc, matrix_) tf.add_input(desc, rhs_) @@ -17223,6 +20240,13 @@ begin desc["T"] = tf.data_type(rhs_) (tf.execute(desc))[1] end + function matrix_solve_ls(matrix_, rhs_, l2_regularizer_; name=nothing, fast=nothing) + if tf.eager_mode + matrix_solve_ls_eager(matrix_, rhs_, l2_regularizer_; name=name, fast=fast) + else + matrix_solve_ls_graph(matrix_, rhs_, l2_regularizer_; name=name, fast=fast) + end + end end @@ -17232,7 +20256,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tf_record_dataset(filenames_, compression_type_, buffer_size_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tf_record_dataset_graph(filenames_, compression_type_, buffer_size_; name=nothing) local desc tf.with_op_name(name, "TFRecordDataset") do desc = tf.NodeDescription("TFRecordDataset") @@ -17245,13 +20269,20 @@ begin end tf.Tensor(tf.Operation(desc)) end - function tf_record_dataset(filenames_::tf.TensorHandle, compression_type_::tf.TensorHandle, buffer_size_::tf.TensorHandle; name=nothing) + function tf_record_dataset_eager(filenames_, compression_type_, buffer_size_; name=nothing) desc = tf.EagerOp("TFRecordDataset") tf.add_input(desc, filenames_) tf.add_input(desc, compression_type_) tf.add_input(desc, buffer_size_) (tf.execute(desc))[1] end + function tf_record_dataset(filenames_, compression_type_, buffer_size_; name=nothing) + if tf.eager_mode + tf_record_dataset_eager(filenames_, compression_type_, buffer_size_; name=name) + else + tf_record_dataset_graph(filenames_, compression_type_, buffer_size_; name=name) + end + end end @@ -17261,7 +20292,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function boosted_trees_example_debug_outputs(tree_ensemble_handle_, bucketized_features_; name=nothing, num_bucketized_features=nothing, logits_dimension=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function boosted_trees_example_debug_outputs_graph(tree_ensemble_handle_, bucketized_features_; name=nothing, num_bucketized_features=nothing, logits_dimension=nothing) local desc tf.with_op_name(name, "BoostedTreesExampleDebugOutputs") do desc = tf.NodeDescription("BoostedTreesExampleDebugOutputs") @@ -17278,7 +20309,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function boosted_trees_example_debug_outputs(tree_ensemble_handle_::tf.TensorHandle, bucketized_features_::tf.TensorHandle; name=nothing, num_bucketized_features=nothing, logits_dimension=nothing) + function boosted_trees_example_debug_outputs_eager(tree_ensemble_handle_, bucketized_features_; name=nothing, num_bucketized_features=nothing, logits_dimension=nothing) desc = tf.EagerOp("BoostedTreesExampleDebugOutputs") tf.add_input(desc, tree_ensemble_handle_) tf.add_input(desc, bucketized_features_) @@ -17290,6 +20321,13 @@ begin end (tf.execute(desc))[1] end + function boosted_trees_example_debug_outputs(tree_ensemble_handle_, bucketized_features_; name=nothing, num_bucketized_features=nothing, logits_dimension=nothing) + if tf.eager_mode + boosted_trees_example_debug_outputs_eager(tree_ensemble_handle_, bucketized_features_; name=name, num_bucketized_features=num_bucketized_features, logits_dimension=logits_dimension) + else + boosted_trees_example_debug_outputs_graph(tree_ensemble_handle_, bucketized_features_; name=name, num_bucketized_features=num_bucketized_features, logits_dimension=logits_dimension) + end + end end @@ -17299,7 +20337,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function hsv_to_rgb(images_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function hsv_to_rgb_graph(images_; name=nothing) local desc tf.with_op_name(name, "HSVToRGB") do desc = tf.NodeDescription("HSVToRGB") @@ -17309,12 +20347,19 @@ begin end tf.Tensor(tf.Operation(desc)) end - function hsv_to_rgb(images_::tf.TensorHandle; name=nothing) + function hsv_to_rgb_eager(images_; name=nothing) desc = tf.EagerOp("HSVToRGB") tf.add_input(desc, images_) desc["T"] = tf.data_type(images_) (tf.execute(desc))[1] end + function hsv_to_rgb(images_; name=nothing) + if tf.eager_mode + hsv_to_rgb_eager(images_; name=name) + else + hsv_to_rgb_graph(images_; name=name) + end + end end @@ -17324,7 +20369,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function experimental_max_intra_op_parallelism_dataset(input_dataset_, max_intra_op_parallelism_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function experimental_max_intra_op_parallelism_dataset_graph(input_dataset_, max_intra_op_parallelism_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "ExperimentalMaxIntraOpParallelismDataset") do desc = tf.NodeDescription("ExperimentalMaxIntraOpParallelismDataset") @@ -17341,7 +20386,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function experimental_max_intra_op_parallelism_dataset(input_dataset_::tf.TensorHandle, max_intra_op_parallelism_::tf.TensorHandle; name=nothing, output_types=nothing, output_shapes=nothing) + function experimental_max_intra_op_parallelism_dataset_eager(input_dataset_, max_intra_op_parallelism_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("ExperimentalMaxIntraOpParallelismDataset") tf.add_input(desc, input_dataset_) tf.add_input(desc, max_intra_op_parallelism_) @@ -17353,6 +20398,13 @@ begin end (tf.execute(desc))[1] end + function experimental_max_intra_op_parallelism_dataset(input_dataset_, max_intra_op_parallelism_; name=nothing, output_types=nothing, output_shapes=nothing) + if tf.eager_mode + experimental_max_intra_op_parallelism_dataset_eager(input_dataset_, max_intra_op_parallelism_; name=name, output_types=output_types, output_shapes=output_shapes) + else + experimental_max_intra_op_parallelism_dataset_graph(input_dataset_, max_intra_op_parallelism_; name=name, output_types=output_types, output_shapes=output_shapes) + end + end end @@ -17362,7 +20414,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function scatter_div(ref_, indices_, updates_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function scatter_div_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ScatterDiv") do desc = tf.NodeDescription("ScatterDiv") @@ -17381,7 +20433,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function scatter_div(ref_::tf.TensorHandle, indices_::tf.TensorHandle, updates_::tf.TensorHandle; name=nothing, use_locking=nothing) + function scatter_div_eager(ref_, indices_, updates_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ScatterDiv") tf.add_input(desc, ref_) tf.add_input(desc, indices_) @@ -17394,6 +20446,13 @@ begin desc["T"] = tf.data_type(updates_) (tf.execute(desc))[1] end + function scatter_div(ref_, indices_, updates_; name=nothing, use_locking=nothing) + if tf.eager_mode + scatter_div_eager(ref_, indices_, updates_; name=name, use_locking=use_locking) + else + scatter_div_graph(ref_, indices_, updates_; name=name, use_locking=use_locking) + end + end end @@ -17403,7 +20462,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function decode_wav(contents_; name=nothing, desired_channels=nothing, desired_samples=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function decode_wav_graph(contents_; name=nothing, desired_channels=nothing, desired_samples=nothing) local desc tf.with_op_name(name, "DecodeWav") do desc = tf.NodeDescription("DecodeWav") @@ -17423,7 +20482,7 @@ begin end out end - function decode_wav(contents_::tf.TensorHandle; name=nothing, desired_channels=nothing, desired_samples=nothing) + function decode_wav_eager(contents_; name=nothing, desired_channels=nothing, desired_samples=nothing) desc = tf.EagerOp("DecodeWav") tf.add_input(desc, contents_) if desired_channels !== nothing @@ -17434,6 +20493,13 @@ begin end tf.execute(desc) end + function decode_wav(contents_; name=nothing, desired_channels=nothing, desired_samples=nothing) + if tf.eager_mode + decode_wav_eager(contents_; name=name, desired_channels=desired_channels, desired_samples=desired_samples) + else + decode_wav_graph(contents_; name=name, desired_channels=desired_channels, desired_samples=desired_samples) + end + end end @@ -17443,7 +20509,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function log(x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function log_graph(x_; name=nothing) local desc tf.with_op_name(name, "Log") do desc = tf.NodeDescription("Log") @@ -17453,12 +20519,19 @@ begin end tf.Tensor(tf.Operation(desc)) end - function log(x_::tf.TensorHandle; name=nothing) + function log_eager(x_; name=nothing) desc = tf.EagerOp("Log") tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) (tf.execute(desc))[1] end + function log(x_; name=nothing) + if tf.eager_mode + log_eager(x_; name=name) + else + log_graph(x_; name=name) + end + end end @@ -17468,7 +20541,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function save_v2(prefix_, tensor_names_, shape_and_slices_, tensors_; name=nothing, dtypes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function save_v2_graph(prefix_, tensor_names_, shape_and_slices_, tensors_; name=nothing, dtypes=nothing) local desc tf.with_op_name(name, "SaveV2") do desc = tf.NodeDescription("SaveV2") @@ -17486,7 +20559,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function save_v2(prefix_::tf.TensorHandle, tensor_names_::tf.TensorHandle, shape_and_slices_::tf.TensorHandle, tensors_::tf.TensorHandle; name=nothing, dtypes=nothing) + function save_v2_eager(prefix_, tensor_names_, shape_and_slices_, tensors_; name=nothing, dtypes=nothing) desc = tf.EagerOp("SaveV2") tf.add_input(desc, prefix_) tf.add_input(desc, tensor_names_) @@ -17497,6 +20570,13 @@ begin end (tf.execute(desc))[1] end + function save_v2(prefix_, tensor_names_, shape_and_slices_, tensors_; name=nothing, dtypes=nothing) + if tf.eager_mode + save_v2_eager(prefix_, tensor_names_, shape_and_slices_, tensors_; name=name, dtypes=dtypes) + else + save_v2_graph(prefix_, tensor_names_, shape_and_slices_, tensors_; name=name, dtypes=dtypes) + end + end end @@ -17506,7 +20586,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function deep_copy(x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function deep_copy_graph(x_; name=nothing) local desc tf.with_op_name(name, "DeepCopy") do desc = tf.NodeDescription("DeepCopy") @@ -17516,12 +20596,19 @@ begin end tf.Tensor(tf.Operation(desc)) end - function deep_copy(x_::tf.TensorHandle; name=nothing) + function deep_copy_eager(x_; name=nothing) desc = tf.EagerOp("DeepCopy") tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) (tf.execute(desc))[1] end + function deep_copy(x_; name=nothing) + if tf.eager_mode + deep_copy_eager(x_; name=name) + else + deep_copy_graph(x_; name=name) + end + end end @@ -17531,7 +20618,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function model_dataset(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function model_dataset_graph(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "ModelDataset") do desc = tf.NodeDescription("ModelDataset") @@ -17546,7 +20633,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function model_dataset(input_dataset_::tf.TensorHandle; name=nothing, output_types=nothing, output_shapes=nothing) + function model_dataset_eager(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("ModelDataset") tf.add_input(desc, input_dataset_) if output_types !== nothing @@ -17557,6 +20644,13 @@ begin end (tf.execute(desc))[1] end + function model_dataset(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) + if tf.eager_mode + model_dataset_eager(input_dataset_; name=name, output_types=output_types, output_shapes=output_shapes) + else + model_dataset_graph(input_dataset_; name=name, output_types=output_types, output_shapes=output_shapes) + end + end end @@ -17566,7 +20660,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function parse_sequence_example(serialized_, debug_name_, context_dense_defaults_; name=nothing, feature_list_dense_missing_assumed_empty=nothing, context_sparse_keys=nothing, context_dense_keys=nothing, feature_list_sparse_keys=nothing, feature_list_dense_keys=nothing, Ncontext_sparse=nothing, Ncontext_dense=nothing, Nfeature_list_sparse=nothing, Nfeature_list_dense=nothing, context_sparse_types=nothing, Tcontext_dense=nothing, feature_list_dense_types=nothing, context_dense_shapes=nothing, feature_list_sparse_types=nothing, feature_list_dense_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function parse_sequence_example_graph(serialized_, debug_name_, context_dense_defaults_; name=nothing, feature_list_dense_missing_assumed_empty=nothing, context_sparse_keys=nothing, context_dense_keys=nothing, feature_list_sparse_keys=nothing, feature_list_dense_keys=nothing, Ncontext_sparse=nothing, Ncontext_dense=nothing, Nfeature_list_sparse=nothing, Nfeature_list_dense=nothing, context_sparse_types=nothing, Tcontext_dense=nothing, feature_list_dense_types=nothing, context_dense_shapes=nothing, feature_list_sparse_types=nothing, feature_list_dense_shapes=nothing) local desc tf.with_op_name(name, "ParseSequenceExample") do desc = tf.NodeDescription("ParseSequenceExample") @@ -17629,7 +20723,7 @@ begin end out end - function parse_sequence_example(serialized_::tf.TensorHandle, debug_name_::tf.TensorHandle, context_dense_defaults_::tf.TensorHandle; name=nothing, feature_list_dense_missing_assumed_empty=nothing, context_sparse_keys=nothing, context_dense_keys=nothing, feature_list_sparse_keys=nothing, feature_list_dense_keys=nothing, Ncontext_sparse=nothing, Ncontext_dense=nothing, Nfeature_list_sparse=nothing, Nfeature_list_dense=nothing, context_sparse_types=nothing, Tcontext_dense=nothing, feature_list_dense_types=nothing, context_dense_shapes=nothing, feature_list_sparse_types=nothing, feature_list_dense_shapes=nothing) + function parse_sequence_example_eager(serialized_, debug_name_, context_dense_defaults_; name=nothing, feature_list_dense_missing_assumed_empty=nothing, context_sparse_keys=nothing, context_dense_keys=nothing, feature_list_sparse_keys=nothing, feature_list_dense_keys=nothing, Ncontext_sparse=nothing, Ncontext_dense=nothing, Nfeature_list_sparse=nothing, Nfeature_list_dense=nothing, context_sparse_types=nothing, Tcontext_dense=nothing, feature_list_dense_types=nothing, context_dense_shapes=nothing, feature_list_sparse_types=nothing, feature_list_dense_shapes=nothing) desc = tf.EagerOp("ParseSequenceExample") tf.add_input(desc, serialized_) tf.add_input(desc, debug_name_) @@ -17681,6 +20775,13 @@ begin end tf.execute(desc) end + function parse_sequence_example(serialized_, debug_name_, context_dense_defaults_; name=nothing, feature_list_dense_missing_assumed_empty=nothing, context_sparse_keys=nothing, context_dense_keys=nothing, feature_list_sparse_keys=nothing, feature_list_dense_keys=nothing, Ncontext_sparse=nothing, Ncontext_dense=nothing, Nfeature_list_sparse=nothing, Nfeature_list_dense=nothing, context_sparse_types=nothing, Tcontext_dense=nothing, feature_list_dense_types=nothing, context_dense_shapes=nothing, feature_list_sparse_types=nothing, feature_list_dense_shapes=nothing) + if tf.eager_mode + parse_sequence_example_eager(serialized_, debug_name_, context_dense_defaults_; name=name, feature_list_dense_missing_assumed_empty=feature_list_dense_missing_assumed_empty, context_sparse_keys=context_sparse_keys, context_dense_keys=context_dense_keys, feature_list_sparse_keys=feature_list_sparse_keys, feature_list_dense_keys=feature_list_dense_keys, Ncontext_sparse=Ncontext_sparse, Ncontext_dense=Ncontext_dense, Nfeature_list_sparse=Nfeature_list_sparse, Nfeature_list_dense=Nfeature_list_dense, context_sparse_types=context_sparse_types, Tcontext_dense=Tcontext_dense, feature_list_dense_types=feature_list_dense_types, context_dense_shapes=context_dense_shapes, feature_list_sparse_types=feature_list_sparse_types, feature_list_dense_shapes=feature_list_dense_shapes) + else + parse_sequence_example_graph(serialized_, debug_name_, context_dense_defaults_; name=name, feature_list_dense_missing_assumed_empty=feature_list_dense_missing_assumed_empty, context_sparse_keys=context_sparse_keys, context_dense_keys=context_dense_keys, feature_list_sparse_keys=feature_list_sparse_keys, feature_list_dense_keys=feature_list_dense_keys, Ncontext_sparse=Ncontext_sparse, Ncontext_dense=Ncontext_dense, Nfeature_list_sparse=Nfeature_list_sparse, Nfeature_list_dense=Nfeature_list_dense, context_sparse_types=context_sparse_types, Tcontext_dense=Tcontext_dense, feature_list_dense_types=feature_list_dense_types, context_dense_shapes=context_dense_shapes, feature_list_sparse_types=feature_list_sparse_types, feature_list_dense_shapes=feature_list_dense_shapes) + end + end end @@ -17690,7 +20791,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function sinh(x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function sinh_graph(x_; name=nothing) local desc tf.with_op_name(name, "Sinh") do desc = tf.NodeDescription("Sinh") @@ -17700,12 +20801,19 @@ begin end tf.Tensor(tf.Operation(desc)) end - function sinh(x_::tf.TensorHandle; name=nothing) + function sinh_eager(x_; name=nothing) desc = tf.EagerOp("Sinh") tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) (tf.execute(desc))[1] end + function sinh(x_; name=nothing) + if tf.eager_mode + sinh_eager(x_; name=name) + else + sinh_graph(x_; name=name) + end + end end @@ -17715,7 +20823,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function iterator_v2(; name=nothing, shared_name=nothing, container=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function iterator_v2_graph(; name=nothing, shared_name=nothing, container=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "IteratorV2") do desc = tf.NodeDescription("IteratorV2") @@ -17734,7 +20842,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function iterator_v2(; name=nothing, shared_name=nothing, container=nothing, output_types=nothing, output_shapes=nothing) + function iterator_v2_eager(; name=nothing, shared_name=nothing, container=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("IteratorV2") if shared_name !== nothing desc["shared_name"] = Base.String(shared_name) @@ -17750,6 +20858,13 @@ begin end (tf.execute(desc))[1] end + function iterator_v2(; name=nothing, shared_name=nothing, container=nothing, output_types=nothing, output_shapes=nothing) + if tf.eager_mode + iterator_v2_eager(; name=name, shared_name=shared_name, container=container, output_types=output_types, output_shapes=output_shapes) + else + iterator_v2_graph(; name=name, shared_name=shared_name, container=container, output_types=output_types, output_shapes=output_shapes) + end + end end @@ -17759,7 +20874,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tensor_array_write_v2(handle_, index_, value_, flow_in_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tensor_array_write_v2_graph(handle_, index_, value_, flow_in_; name=nothing) local desc tf.with_op_name(name, "TensorArrayWriteV2") do desc = tf.NodeDescription("TensorArrayWriteV2") @@ -17775,7 +20890,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function tensor_array_write_v2(handle_::tf.TensorHandle, index_::tf.TensorHandle, value_::tf.TensorHandle, flow_in_::tf.TensorHandle; name=nothing) + function tensor_array_write_v2_eager(handle_, index_, value_, flow_in_; name=nothing) desc = tf.EagerOp("TensorArrayWriteV2") tf.add_input(desc, handle_) tf.add_input(desc, index_) @@ -17784,6 +20899,13 @@ begin desc["T"] = tf.data_type(value_) (tf.execute(desc))[1] end + function tensor_array_write_v2(handle_, index_, value_, flow_in_; name=nothing) + if tf.eager_mode + tensor_array_write_v2_eager(handle_, index_, value_, flow_in_; name=name) + else + tensor_array_write_v2_graph(handle_, index_, value_, flow_in_; name=name) + end + end end @@ -17793,7 +20915,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tensor_list_element_shape(input_handle_; name=nothing, shape_type=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tensor_list_element_shape_graph(input_handle_; name=nothing, shape_type=nothing) local desc tf.with_op_name(name, "TensorListElementShape") do desc = tf.NodeDescription("TensorListElementShape") @@ -17805,7 +20927,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function tensor_list_element_shape(input_handle_::tf.TensorHandle; name=nothing, shape_type=nothing) + function tensor_list_element_shape_eager(input_handle_; name=nothing, shape_type=nothing) desc = tf.EagerOp("TensorListElementShape") tf.add_input(desc, input_handle_) if shape_type !== nothing @@ -17813,6 +20935,13 @@ begin end (tf.execute(desc))[1] end + function tensor_list_element_shape(input_handle_; name=nothing, shape_type=nothing) + if tf.eager_mode + tensor_list_element_shape_eager(input_handle_; name=name, shape_type=shape_type) + else + tensor_list_element_shape_graph(input_handle_; name=name, shape_type=shape_type) + end + end end @@ -17822,7 +20951,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function queue_size_v2(handle_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function queue_size_v2_graph(handle_; name=nothing) local desc tf.with_op_name(name, "QueueSizeV2") do desc = tf.NodeDescription("QueueSizeV2") @@ -17831,11 +20960,18 @@ begin end tf.Tensor(tf.Operation(desc)) end - function queue_size_v2(handle_::tf.TensorHandle; name=nothing) + function queue_size_v2_eager(handle_; name=nothing) desc = tf.EagerOp("QueueSizeV2") tf.add_input(desc, handle_) (tf.execute(desc))[1] end + function queue_size_v2(handle_; name=nothing) + if tf.eager_mode + queue_size_v2_eager(handle_; name=name) + else + queue_size_v2_graph(handle_; name=name) + end + end end @@ -17845,7 +20981,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function expm1(x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function expm1_graph(x_; name=nothing) local desc tf.with_op_name(name, "Expm1") do desc = tf.NodeDescription("Expm1") @@ -17855,12 +20991,19 @@ begin end tf.Tensor(tf.Operation(desc)) end - function expm1(x_::tf.TensorHandle; name=nothing) + function expm1_eager(x_; name=nothing) desc = tf.EagerOp("Expm1") tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) (tf.execute(desc))[1] end + function expm1(x_; name=nothing) + if tf.eager_mode + expm1_eager(x_; name=name) + else + expm1_graph(x_; name=name) + end + end end @@ -17870,7 +21013,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function batch_matrix_band_part(input_, num_lower_, num_upper_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function batch_matrix_band_part_graph(input_, num_lower_, num_upper_; name=nothing) local desc tf.with_op_name(name, "BatchMatrixBandPart") do desc = tf.NodeDescription("BatchMatrixBandPart") @@ -17884,7 +21027,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function batch_matrix_band_part(input_::tf.TensorHandle, num_lower_::tf.TensorHandle, num_upper_::tf.TensorHandle; name=nothing) + function batch_matrix_band_part_eager(input_, num_lower_, num_upper_; name=nothing) desc = tf.EagerOp("BatchMatrixBandPart") tf.add_input(desc, input_) tf.add_input(desc, num_lower_) @@ -17892,6 +21035,13 @@ begin desc["T"] = tf.data_type(input_) (tf.execute(desc))[1] end + function batch_matrix_band_part(input_, num_lower_, num_upper_; name=nothing) + if tf.eager_mode + batch_matrix_band_part_eager(input_, num_lower_, num_upper_; name=name) + else + batch_matrix_band_part_graph(input_, num_lower_, num_upper_; name=name) + end + end end @@ -17901,7 +21051,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function concatenate_dataset(input_dataset_, another_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function concatenate_dataset_graph(input_dataset_, another_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "ConcatenateDataset") do desc = tf.NodeDescription("ConcatenateDataset") @@ -17918,7 +21068,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function concatenate_dataset(input_dataset_::tf.TensorHandle, another_dataset_::tf.TensorHandle; name=nothing, output_types=nothing, output_shapes=nothing) + function concatenate_dataset_eager(input_dataset_, another_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("ConcatenateDataset") tf.add_input(desc, input_dataset_) tf.add_input(desc, another_dataset_) @@ -17930,6 +21080,13 @@ begin end (tf.execute(desc))[1] end + function concatenate_dataset(input_dataset_, another_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) + if tf.eager_mode + concatenate_dataset_eager(input_dataset_, another_dataset_; name=name, output_types=output_types, output_shapes=output_shapes) + else + concatenate_dataset_graph(input_dataset_, another_dataset_; name=name, output_types=output_types, output_shapes=output_shapes) + end + end end @@ -17939,7 +21096,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function decode_gif(contents_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function decode_gif_graph(contents_; name=nothing) local desc tf.with_op_name(name, "DecodeGif") do desc = tf.NodeDescription("DecodeGif") @@ -17948,21 +21105,28 @@ begin end tf.Tensor(tf.Operation(desc)) end - function decode_gif(contents_::tf.TensorHandle; name=nothing) + function decode_gif_eager(contents_; name=nothing) desc = tf.EagerOp("DecodeGif") tf.add_input(desc, contents_) (tf.execute(desc))[1] end + function decode_gif(contents_; name=nothing) + if tf.eager_mode + decode_gif_eager(contents_; name=name) + else + decode_gif_graph(contents_; name=name) + end + end end """ - tpu_replicate(inputs, broadcast_inputs, variables, guaranteed_constants; num_cores_per_replica=1, topology=, use_tpu=true, device_assignment=Int64[], host_compute_core=Int64[], padding_map=Int64[], step_marker_location=STEP_MARK_AT_ENTRY) + tpu_replicate(inputs, broadcast_inputs, variables, guaranteed_constants; num_cores_per_replica=1, topology=, use_tpu=true, device_assignment=Int64[], host_compute_core=Int64[], padding_map=Int64[], step_marker_location=) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tpu_replicate(inputs_, broadcast_inputs_, variables_, guaranteed_constants_; name=nothing, computation=nothing, num_replicas=nothing, num_cores_per_replica=nothing, topology=nothing, use_tpu=nothing, device_assignment=nothing, host_compute_core=nothing, Tinputs=nothing, Tbroadcast_inputs=nothing, NumVariables=nothing, Tguaranteed_constants=nothing, output_types=nothing, padding_map=nothing, step_marker_location=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tpu_replicate_graph(inputs_, broadcast_inputs_, variables_, guaranteed_constants_; name=nothing, computation=nothing, num_replicas=nothing, num_cores_per_replica=nothing, topology=nothing, use_tpu=nothing, device_assignment=nothing, host_compute_core=nothing, Tinputs=nothing, Tbroadcast_inputs=nothing, NumVariables=nothing, Tguaranteed_constants=nothing, output_types=nothing, padding_map=nothing, step_marker_location=nothing) local desc tf.with_op_name(name, "TPUReplicate") do desc = tf.NodeDescription("TPUReplicate") @@ -18019,7 +21183,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function tpu_replicate(inputs_::tf.TensorHandle, broadcast_inputs_::tf.TensorHandle, variables_::tf.TensorHandle, guaranteed_constants_::tf.TensorHandle; name=nothing, computation=nothing, num_replicas=nothing, num_cores_per_replica=nothing, topology=nothing, use_tpu=nothing, device_assignment=nothing, host_compute_core=nothing, Tinputs=nothing, Tbroadcast_inputs=nothing, NumVariables=nothing, Tguaranteed_constants=nothing, output_types=nothing, padding_map=nothing, step_marker_location=nothing) + function tpu_replicate_eager(inputs_, broadcast_inputs_, variables_, guaranteed_constants_; name=nothing, computation=nothing, num_replicas=nothing, num_cores_per_replica=nothing, topology=nothing, use_tpu=nothing, device_assignment=nothing, host_compute_core=nothing, Tinputs=nothing, Tbroadcast_inputs=nothing, NumVariables=nothing, Tguaranteed_constants=nothing, output_types=nothing, padding_map=nothing, step_marker_location=nothing) desc = tf.EagerOp("TPUReplicate") tf.add_input(desc, inputs_) tf.add_input(desc, broadcast_inputs_) @@ -18069,6 +21233,13 @@ begin end (tf.execute(desc))[1] end + function tpu_replicate(inputs_, broadcast_inputs_, variables_, guaranteed_constants_; name=nothing, computation=nothing, num_replicas=nothing, num_cores_per_replica=nothing, topology=nothing, use_tpu=nothing, device_assignment=nothing, host_compute_core=nothing, Tinputs=nothing, Tbroadcast_inputs=nothing, NumVariables=nothing, Tguaranteed_constants=nothing, output_types=nothing, padding_map=nothing, step_marker_location=nothing) + if tf.eager_mode + tpu_replicate_eager(inputs_, broadcast_inputs_, variables_, guaranteed_constants_; name=name, computation=computation, num_replicas=num_replicas, num_cores_per_replica=num_cores_per_replica, topology=topology, use_tpu=use_tpu, device_assignment=device_assignment, host_compute_core=host_compute_core, Tinputs=Tinputs, Tbroadcast_inputs=Tbroadcast_inputs, NumVariables=NumVariables, Tguaranteed_constants=Tguaranteed_constants, output_types=output_types, padding_map=padding_map, step_marker_location=step_marker_location) + else + tpu_replicate_graph(inputs_, broadcast_inputs_, variables_, guaranteed_constants_; name=name, computation=computation, num_replicas=num_replicas, num_cores_per_replica=num_cores_per_replica, topology=topology, use_tpu=use_tpu, device_assignment=device_assignment, host_compute_core=host_compute_core, Tinputs=Tinputs, Tbroadcast_inputs=Tbroadcast_inputs, NumVariables=NumVariables, Tguaranteed_constants=Tguaranteed_constants, output_types=output_types, padding_map=padding_map, step_marker_location=step_marker_location) + end + end end @@ -18078,7 +21249,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function batch_self_adjoint_eig_v2(input_; name=nothing, compute_v=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function batch_self_adjoint_eig_v2_graph(input_; name=nothing, compute_v=nothing) local desc tf.with_op_name(name, "BatchSelfAdjointEigV2") do desc = tf.NodeDescription("BatchSelfAdjointEigV2") @@ -18096,7 +21267,7 @@ begin end out end - function batch_self_adjoint_eig_v2(input_::tf.TensorHandle; name=nothing, compute_v=nothing) + function batch_self_adjoint_eig_v2_eager(input_; name=nothing, compute_v=nothing) desc = tf.EagerOp("BatchSelfAdjointEigV2") tf.add_input(desc, input_) if compute_v !== nothing @@ -18105,6 +21276,13 @@ begin desc["T"] = tf.data_type(input_) tf.execute(desc) end + function batch_self_adjoint_eig_v2(input_; name=nothing, compute_v=nothing) + if tf.eager_mode + batch_self_adjoint_eig_v2_eager(input_; name=name, compute_v=compute_v) + else + batch_self_adjoint_eig_v2_graph(input_; name=name, compute_v=compute_v) + end + end end @@ -18114,7 +21292,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function shape(input_; name=nothing, out_type=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function shape_graph(input_; name=nothing, out_type=nothing) local desc tf.with_op_name(name, "Shape") do desc = tf.NodeDescription("Shape") @@ -18127,7 +21305,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function shape(input_::tf.TensorHandle; name=nothing, out_type=nothing) + function shape_eager(input_; name=nothing, out_type=nothing) desc = tf.EagerOp("Shape") tf.add_input(desc, input_) if out_type !== nothing @@ -18136,6 +21314,13 @@ begin desc["T"] = tf.data_type(input_) (tf.execute(desc))[1] end + function shape(input_; name=nothing, out_type=nothing) + if tf.eager_mode + shape_eager(input_; name=name, out_type=out_type) + else + shape_graph(input_; name=name, out_type=out_type) + end + end end @@ -18145,7 +21330,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function repeat_dataset(input_dataset_, count_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function repeat_dataset_graph(input_dataset_, count_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "RepeatDataset") do desc = tf.NodeDescription("RepeatDataset") @@ -18162,7 +21347,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function repeat_dataset(input_dataset_::tf.TensorHandle, count_::tf.TensorHandle; name=nothing, output_types=nothing, output_shapes=nothing) + function repeat_dataset_eager(input_dataset_, count_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("RepeatDataset") tf.add_input(desc, input_dataset_) tf.add_input(desc, count_) @@ -18174,16 +21359,23 @@ begin end (tf.execute(desc))[1] end + function repeat_dataset(input_dataset_, count_; name=nothing, output_types=nothing, output_shapes=nothing) + if tf.eager_mode + repeat_dataset_eager(input_dataset_, count_; name=name, output_types=output_types, output_shapes=output_shapes) + else + repeat_dataset_graph(input_dataset_, count_; name=name, output_types=output_types, output_shapes=output_shapes) + end + end end """ - crop_and_resize_grad_boxes(grads, image, boxes, box_ind; method=bilinear) + crop_and_resize_grad_boxes(grads, image, boxes, box_ind; method=) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function crop_and_resize_grad_boxes(grads_, image_, boxes_, box_ind_; name=nothing, method=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function crop_and_resize_grad_boxes_graph(grads_, image_, boxes_, box_ind_; name=nothing, method=nothing) local desc tf.with_op_name(name, "CropAndResizeGradBoxes") do desc = tf.NodeDescription("CropAndResizeGradBoxes") @@ -18202,7 +21394,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function crop_and_resize_grad_boxes(grads_::tf.TensorHandle, image_::tf.TensorHandle, boxes_::tf.TensorHandle, box_ind_::tf.TensorHandle; name=nothing, method=nothing) + function crop_and_resize_grad_boxes_eager(grads_, image_, boxes_, box_ind_; name=nothing, method=nothing) desc = tf.EagerOp("CropAndResizeGradBoxes") tf.add_input(desc, grads_) tf.add_input(desc, image_) @@ -18214,6 +21406,13 @@ begin desc["T"] = tf.data_type(image_) (tf.execute(desc))[1] end + function crop_and_resize_grad_boxes(grads_, image_, boxes_, box_ind_; name=nothing, method=nothing) + if tf.eager_mode + crop_and_resize_grad_boxes_eager(grads_, image_, boxes_, box_ind_; name=name, method=method) + else + crop_and_resize_grad_boxes_graph(grads_, image_, boxes_, box_ind_; name=name, method=method) + end + end end @@ -18223,7 +21422,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function reciprocal_grad(y_, dy_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function reciprocal_grad_graph(y_, dy_; name=nothing) local desc tf.with_op_name(name, "ReciprocalGrad") do desc = tf.NodeDescription("ReciprocalGrad") @@ -18235,7 +21434,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function reciprocal_grad(y_::tf.TensorHandle, dy_::tf.TensorHandle; name=nothing) + function reciprocal_grad_eager(y_, dy_; name=nothing) desc = tf.EagerOp("ReciprocalGrad") tf.add_input(desc, y_) tf.add_input(desc, dy_) @@ -18243,6 +21442,13 @@ begin desc["T"] = tf.data_type(dy_) (tf.execute(desc))[1] end + function reciprocal_grad(y_, dy_; name=nothing) + if tf.eager_mode + reciprocal_grad_eager(y_, dy_; name=name) + else + reciprocal_grad_graph(y_, dy_; name=name) + end + end end @@ -18252,7 +21458,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function batch_matrix_solve(matrix_, rhs_; name=nothing, adjoint=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function batch_matrix_solve_graph(matrix_, rhs_; name=nothing, adjoint=nothing) local desc tf.with_op_name(name, "BatchMatrixSolve") do desc = tf.NodeDescription("BatchMatrixSolve") @@ -18267,7 +21473,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function batch_matrix_solve(matrix_::tf.TensorHandle, rhs_::tf.TensorHandle; name=nothing, adjoint=nothing) + function batch_matrix_solve_eager(matrix_, rhs_; name=nothing, adjoint=nothing) desc = tf.EagerOp("BatchMatrixSolve") tf.add_input(desc, matrix_) tf.add_input(desc, rhs_) @@ -18278,6 +21484,13 @@ begin desc["T"] = tf.data_type(rhs_) (tf.execute(desc))[1] end + function batch_matrix_solve(matrix_, rhs_; name=nothing, adjoint=nothing) + if tf.eager_mode + batch_matrix_solve_eager(matrix_, rhs_; name=name, adjoint=adjoint) + else + batch_matrix_solve_graph(matrix_, rhs_; name=name, adjoint=adjoint) + end + end end @@ -18287,7 +21500,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function mutable_hash_table_v2(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function mutable_hash_table_v2_graph(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing) local desc tf.with_op_name(name, "MutableHashTableV2") do desc = tf.NodeDescription("MutableHashTableV2") @@ -18309,7 +21522,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function mutable_hash_table_v2(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing) + function mutable_hash_table_v2_eager(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing) desc = tf.EagerOp("MutableHashTableV2") if container !== nothing desc["container"] = Base.String(container) @@ -18328,6 +21541,13 @@ begin end (tf.execute(desc))[1] end + function mutable_hash_table_v2(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing) + if tf.eager_mode + mutable_hash_table_v2_eager(; name=name, container=container, shared_name=shared_name, use_node_name_sharing=use_node_name_sharing, key_dtype=key_dtype, value_dtype=value_dtype) + else + mutable_hash_table_v2_graph(; name=name, container=container, shared_name=shared_name, use_node_name_sharing=use_node_name_sharing, key_dtype=key_dtype, value_dtype=value_dtype) + end + end end @@ -18337,7 +21557,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function exit(data_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function exit_graph(data_; name=nothing) local desc tf.with_op_name(name, "Exit") do desc = tf.NodeDescription("Exit") @@ -18347,12 +21567,19 @@ begin end tf.Tensor(tf.Operation(desc)) end - function exit(data_::tf.TensorHandle; name=nothing) + function exit_eager(data_; name=nothing) desc = tf.EagerOp("Exit") tf.add_input(desc, data_) desc["T"] = tf.data_type(data_) (tf.execute(desc))[1] end + function exit(data_; name=nothing) + if tf.eager_mode + exit_eager(data_; name=name) + else + exit_graph(data_; name=name) + end + end end @@ -18362,7 +21589,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function lrn(input_; name=nothing, depth_radius=nothing, bias=nothing, alpha=nothing, beta=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function lrn_graph(input_; name=nothing, depth_radius=nothing, bias=nothing, alpha=nothing, beta=nothing) local desc tf.with_op_name(name, "LRN") do desc = tf.NodeDescription("LRN") @@ -18384,7 +21611,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function lrn(input_::tf.TensorHandle; name=nothing, depth_radius=nothing, bias=nothing, alpha=nothing, beta=nothing) + function lrn_eager(input_; name=nothing, depth_radius=nothing, bias=nothing, alpha=nothing, beta=nothing) desc = tf.EagerOp("LRN") tf.add_input(desc, input_) if depth_radius !== nothing @@ -18402,6 +21629,13 @@ begin desc["T"] = tf.data_type(input_) (tf.execute(desc))[1] end + function lrn(input_; name=nothing, depth_radius=nothing, bias=nothing, alpha=nothing, beta=nothing) + if tf.eager_mode + lrn_eager(input_; name=name, depth_radius=depth_radius, bias=bias, alpha=alpha, beta=beta) + else + lrn_graph(input_; name=name, depth_radius=depth_radius, bias=bias, alpha=alpha, beta=beta) + end + end end @@ -18411,7 +21645,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function stateless_if(cond_, input_; name=nothing, Tin=nothing, Tout=nothing, then_branch=nothing, else_branch=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function stateless_if_graph(cond_, input_; name=nothing, Tin=nothing, Tout=nothing, then_branch=nothing, else_branch=nothing) local desc tf.with_op_name(name, "StatelessIf") do desc = tf.NodeDescription("StatelessIf") @@ -18435,7 +21669,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function stateless_if(cond_::tf.TensorHandle, input_::tf.TensorHandle; name=nothing, Tin=nothing, Tout=nothing, then_branch=nothing, else_branch=nothing) + function stateless_if_eager(cond_, input_; name=nothing, Tin=nothing, Tout=nothing, then_branch=nothing, else_branch=nothing) desc = tf.EagerOp("StatelessIf") tf.add_input(desc, cond_) tf.add_input(desc, input_) @@ -18454,6 +21688,13 @@ begin desc["Tcond"] = tf.data_type(cond_) (tf.execute(desc))[1] end + function stateless_if(cond_, input_; name=nothing, Tin=nothing, Tout=nothing, then_branch=nothing, else_branch=nothing) + if tf.eager_mode + stateless_if_eager(cond_, input_; name=name, Tin=Tin, Tout=Tout, then_branch=then_branch, else_branch=else_branch) + else + stateless_if_graph(cond_, input_; name=name, Tin=Tin, Tout=Tout, then_branch=then_branch, else_branch=else_branch) + end + end end @@ -18463,7 +21704,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tensor_list_set_item(input_handle_, index_, item_; name=nothing, element_dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tensor_list_set_item_graph(input_handle_, index_, item_; name=nothing, element_dtype=nothing) local desc tf.with_op_name(name, "TensorListSetItem") do desc = tf.NodeDescription("TensorListSetItem") @@ -18480,7 +21721,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function tensor_list_set_item(input_handle_::tf.TensorHandle, index_::tf.TensorHandle, item_::tf.TensorHandle; name=nothing, element_dtype=nothing) + function tensor_list_set_item_eager(input_handle_, index_, item_; name=nothing, element_dtype=nothing) desc = tf.EagerOp("TensorListSetItem") tf.add_input(desc, input_handle_) tf.add_input(desc, index_) @@ -18491,6 +21732,13 @@ begin desc["element_dtype"] = tf.data_type(item_) (tf.execute(desc))[1] end + function tensor_list_set_item(input_handle_, index_, item_; name=nothing, element_dtype=nothing) + if tf.eager_mode + tensor_list_set_item_eager(input_handle_, index_, item_; name=name, element_dtype=element_dtype) + else + tensor_list_set_item_graph(input_handle_, index_, item_; name=name, element_dtype=element_dtype) + end + end end @@ -18500,7 +21748,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function rsqrt(x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function rsqrt_graph(x_; name=nothing) local desc tf.with_op_name(name, "Rsqrt") do desc = tf.NodeDescription("Rsqrt") @@ -18510,12 +21758,19 @@ begin end tf.Tensor(tf.Operation(desc)) end - function rsqrt(x_::tf.TensorHandle; name=nothing) + function rsqrt_eager(x_; name=nothing) desc = tf.EagerOp("Rsqrt") tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) (tf.execute(desc))[1] end + function rsqrt(x_; name=nothing) + if tf.eager_mode + rsqrt_eager(x_; name=name) + else + rsqrt_graph(x_; name=name) + end + end end @@ -18525,7 +21780,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function quantized_conv2d_with_bias_sum_and_relu_and_requantize(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_, summand_, min_summand_, max_summand_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function quantized_conv2d_with_bias_sum_and_relu_and_requantize_graph(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_, summand_, min_summand_, max_summand_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) local desc tf.with_op_name(name, "QuantizedConv2DWithBiasSumAndReluAndRequantize") do desc = tf.NodeDescription("QuantizedConv2DWithBiasSumAndReluAndRequantize") @@ -18577,7 +21832,7 @@ begin end out end - function quantized_conv2d_with_bias_sum_and_relu_and_requantize(input_::tf.TensorHandle, filter_::tf.TensorHandle, bias_::tf.TensorHandle, min_input_::tf.TensorHandle, max_input_::tf.TensorHandle, min_filter_::tf.TensorHandle, max_filter_::tf.TensorHandle, min_freezed_output_::tf.TensorHandle, max_freezed_output_::tf.TensorHandle, summand_::tf.TensorHandle, min_summand_::tf.TensorHandle, max_summand_::tf.TensorHandle; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) + function quantized_conv2d_with_bias_sum_and_relu_and_requantize_eager(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_, summand_, min_summand_, max_summand_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) desc = tf.EagerOp("QuantizedConv2DWithBiasSumAndReluAndRequantize") tf.add_input(desc, input_) tf.add_input(desc, filter_) @@ -18609,6 +21864,13 @@ begin desc["Tsummand"] = tf.data_type(summand_) tf.execute(desc) end + function quantized_conv2d_with_bias_sum_and_relu_and_requantize(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_, summand_, min_summand_, max_summand_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) + if tf.eager_mode + quantized_conv2d_with_bias_sum_and_relu_and_requantize_eager(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_, summand_, min_summand_, max_summand_; name=name, out_type=out_type, strides=strides, padding=padding, dilations=dilations) + else + quantized_conv2d_with_bias_sum_and_relu_and_requantize_graph(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_, summand_, min_summand_, max_summand_; name=name, out_type=out_type, strides=strides, padding=padding, dilations=dilations) + end + end end @@ -18618,7 +21880,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function delete_session_tensor(handle_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function delete_session_tensor_graph(handle_; name=nothing) local desc tf.with_op_name(name, "DeleteSessionTensor") do desc = tf.NodeDescription("DeleteSessionTensor") @@ -18627,11 +21889,18 @@ begin end tf.Tensor(tf.Operation(desc)) end - function delete_session_tensor(handle_::tf.TensorHandle; name=nothing) + function delete_session_tensor_eager(handle_; name=nothing) desc = tf.EagerOp("DeleteSessionTensor") tf.add_input(desc, handle_) (tf.execute(desc))[1] end + function delete_session_tensor(handle_; name=nothing) + if tf.eager_mode + delete_session_tensor_eager(handle_; name=name) + else + delete_session_tensor_graph(handle_; name=name) + end + end end @@ -18641,7 +21910,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function one_hot(indices_, depth_, on_value_, off_value_; name=nothing, axis=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function one_hot_graph(indices_, depth_, on_value_, off_value_; name=nothing, axis=nothing) local desc tf.with_op_name(name, "OneHot") do desc = tf.NodeDescription("OneHot") @@ -18665,7 +21934,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function one_hot(indices_::tf.TensorHandle, depth_::tf.TensorHandle, on_value_::tf.TensorHandle, off_value_::tf.TensorHandle; name=nothing, axis=nothing) + function one_hot_eager(indices_, depth_, on_value_, off_value_; name=nothing, axis=nothing) desc = tf.EagerOp("OneHot") tf.add_input(desc, indices_) tf.add_input(desc, depth_) @@ -18682,6 +21951,13 @@ begin desc["T"] = tf.data_type(off_value_) (tf.execute(desc))[1] end + function one_hot(indices_, depth_, on_value_, off_value_; name=nothing, axis=nothing) + if tf.eager_mode + one_hot_eager(indices_, depth_, on_value_, off_value_; name=name, axis=axis) + else + one_hot_graph(indices_, depth_, on_value_, off_value_; name=name, axis=axis) + end + end end @@ -18691,7 +21967,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function resource_apply_ftrl(var_, accum_, linear_, grad_, lr_, l1_, l2_, lr_power_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function resource_apply_ftrl_graph(var_, accum_, linear_, grad_, lr_, l1_, l2_, lr_power_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ResourceApplyFtrl") do desc = tf.NodeDescription("ResourceApplyFtrl") @@ -18718,7 +21994,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function resource_apply_ftrl(var_::tf.TensorHandle, accum_::tf.TensorHandle, linear_::tf.TensorHandle, grad_::tf.TensorHandle, lr_::tf.TensorHandle, l1_::tf.TensorHandle, l2_::tf.TensorHandle, lr_power_::tf.TensorHandle; name=nothing, use_locking=nothing) + function resource_apply_ftrl_eager(var_, accum_, linear_, grad_, lr_, l1_, l2_, lr_power_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ResourceApplyFtrl") tf.add_input(desc, var_) tf.add_input(desc, accum_) @@ -18738,6 +22014,13 @@ begin desc["T"] = tf.data_type(lr_power_) (tf.execute(desc))[1] end + function resource_apply_ftrl(var_, accum_, linear_, grad_, lr_, l1_, l2_, lr_power_; name=nothing, use_locking=nothing) + if tf.eager_mode + resource_apply_ftrl_eager(var_, accum_, linear_, grad_, lr_, l1_, l2_, lr_power_; name=name, use_locking=use_locking) + else + resource_apply_ftrl_graph(var_, accum_, linear_, grad_, lr_, l1_, l2_, lr_power_; name=name, use_locking=use_locking) + end + end end @@ -18747,7 +22030,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function sdca_optimizer_v2(sparse_example_indices_, sparse_feature_indices_, sparse_feature_values_, dense_features_, example_weights_, example_labels_, sparse_indices_, sparse_weights_, dense_weights_, example_state_data_; name=nothing, loss_type=nothing, adaptive=nothing, num_sparse_features=nothing, num_sparse_features_with_values=nothing, num_dense_features=nothing, l1=nothing, l2=nothing, num_loss_partitions=nothing, num_inner_iterations=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function sdca_optimizer_v2_graph(sparse_example_indices_, sparse_feature_indices_, sparse_feature_values_, dense_features_, example_weights_, example_labels_, sparse_indices_, sparse_weights_, dense_weights_, example_state_data_; name=nothing, loss_type=nothing, adaptive=nothing, num_sparse_features=nothing, num_sparse_features_with_values=nothing, num_dense_features=nothing, l1=nothing, l2=nothing, num_loss_partitions=nothing, num_inner_iterations=nothing) local desc tf.with_op_name(name, "SdcaOptimizerV2") do desc = tf.NodeDescription("SdcaOptimizerV2") @@ -18806,7 +22089,7 @@ begin end out end - function sdca_optimizer_v2(sparse_example_indices_::tf.TensorHandle, sparse_feature_indices_::tf.TensorHandle, sparse_feature_values_::tf.TensorHandle, dense_features_::tf.TensorHandle, example_weights_::tf.TensorHandle, example_labels_::tf.TensorHandle, sparse_indices_::tf.TensorHandle, sparse_weights_::tf.TensorHandle, dense_weights_::tf.TensorHandle, example_state_data_::tf.TensorHandle; name=nothing, loss_type=nothing, adaptive=nothing, num_sparse_features=nothing, num_sparse_features_with_values=nothing, num_dense_features=nothing, l1=nothing, l2=nothing, num_loss_partitions=nothing, num_inner_iterations=nothing) + function sdca_optimizer_v2_eager(sparse_example_indices_, sparse_feature_indices_, sparse_feature_values_, dense_features_, example_weights_, example_labels_, sparse_indices_, sparse_weights_, dense_weights_, example_state_data_; name=nothing, loss_type=nothing, adaptive=nothing, num_sparse_features=nothing, num_sparse_features_with_values=nothing, num_dense_features=nothing, l1=nothing, l2=nothing, num_loss_partitions=nothing, num_inner_iterations=nothing) desc = tf.EagerOp("SdcaOptimizerV2") tf.add_input(desc, sparse_example_indices_) tf.add_input(desc, sparse_feature_indices_) @@ -18847,6 +22130,13 @@ begin end tf.execute(desc) end + function sdca_optimizer_v2(sparse_example_indices_, sparse_feature_indices_, sparse_feature_values_, dense_features_, example_weights_, example_labels_, sparse_indices_, sparse_weights_, dense_weights_, example_state_data_; name=nothing, loss_type=nothing, adaptive=nothing, num_sparse_features=nothing, num_sparse_features_with_values=nothing, num_dense_features=nothing, l1=nothing, l2=nothing, num_loss_partitions=nothing, num_inner_iterations=nothing) + if tf.eager_mode + sdca_optimizer_v2_eager(sparse_example_indices_, sparse_feature_indices_, sparse_feature_values_, dense_features_, example_weights_, example_labels_, sparse_indices_, sparse_weights_, dense_weights_, example_state_data_; name=name, loss_type=loss_type, adaptive=adaptive, num_sparse_features=num_sparse_features, num_sparse_features_with_values=num_sparse_features_with_values, num_dense_features=num_dense_features, l1=l1, l2=l2, num_loss_partitions=num_loss_partitions, num_inner_iterations=num_inner_iterations) + else + sdca_optimizer_v2_graph(sparse_example_indices_, sparse_feature_indices_, sparse_feature_values_, dense_features_, example_weights_, example_labels_, sparse_indices_, sparse_weights_, dense_weights_, example_state_data_; name=name, loss_type=loss_type, adaptive=adaptive, num_sparse_features=num_sparse_features, num_sparse_features_with_values=num_sparse_features_with_values, num_dense_features=num_dense_features, l1=l1, l2=l2, num_loss_partitions=num_loss_partitions, num_inner_iterations=num_inner_iterations) + end + end end @@ -18856,7 +22146,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function queue_enqueue(handle_, components_; name=nothing, Tcomponents=nothing, timeout_ms=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function queue_enqueue_graph(handle_, components_; name=nothing, Tcomponents=nothing, timeout_ms=nothing) local desc tf.with_op_name(name, "QueueEnqueue") do desc = tf.NodeDescription("QueueEnqueue") @@ -18873,7 +22163,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function queue_enqueue(handle_::tf.TensorHandle, components_::tf.TensorHandle; name=nothing, Tcomponents=nothing, timeout_ms=nothing) + function queue_enqueue_eager(handle_, components_; name=nothing, Tcomponents=nothing, timeout_ms=nothing) desc = tf.EagerOp("QueueEnqueue") tf.add_input(desc, handle_) tf.add_input(desc, components_) @@ -18885,16 +22175,23 @@ begin end (tf.execute(desc))[1] end + function queue_enqueue(handle_, components_; name=nothing, Tcomponents=nothing, timeout_ms=nothing) + if tf.eager_mode + queue_enqueue_eager(handle_, components_; name=name, Tcomponents=Tcomponents, timeout_ms=timeout_ms) + else + queue_enqueue_graph(handle_, components_; name=name, Tcomponents=Tcomponents, timeout_ms=timeout_ms) + end + end end """ - conditional_accumulator(; container=, shared_name=, reduction_type=MEAN) + conditional_accumulator(; container=, shared_name=, reduction_type=) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function conditional_accumulator(; name=nothing, dtype=nothing, shape=nothing, container=nothing, shared_name=nothing, reduction_type=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function conditional_accumulator_graph(; name=nothing, dtype=nothing, shape=nothing, container=nothing, shared_name=nothing, reduction_type=nothing) local desc tf.with_op_name(name, "ConditionalAccumulator") do desc = tf.NodeDescription("ConditionalAccumulator") @@ -18916,7 +22213,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function conditional_accumulator(; name=nothing, dtype=nothing, shape=nothing, container=nothing, shared_name=nothing, reduction_type=nothing) + function conditional_accumulator_eager(; name=nothing, dtype=nothing, shape=nothing, container=nothing, shared_name=nothing, reduction_type=nothing) desc = tf.EagerOp("ConditionalAccumulator") if dtype !== nothing desc["dtype"] = Base.identity(dtype) @@ -18935,6 +22232,13 @@ begin end (tf.execute(desc))[1] end + function conditional_accumulator(; name=nothing, dtype=nothing, shape=nothing, container=nothing, shared_name=nothing, reduction_type=nothing) + if tf.eager_mode + conditional_accumulator_eager(; name=name, dtype=dtype, shape=shape, container=container, shared_name=shared_name, reduction_type=reduction_type) + else + conditional_accumulator_graph(; name=name, dtype=dtype, shape=shape, container=container, shared_name=shared_name, reduction_type=reduction_type) + end + end end @@ -18944,7 +22248,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function ctc_beam_search_decoder(inputs_, sequence_length_; name=nothing, beam_width=nothing, top_paths=nothing, merge_repeated=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function ctc_beam_search_decoder_graph(inputs_, sequence_length_; name=nothing, beam_width=nothing, top_paths=nothing, merge_repeated=nothing) local desc tf.with_op_name(name, "CTCBeamSearchDecoder") do desc = tf.NodeDescription("CTCBeamSearchDecoder") @@ -18969,7 +22273,7 @@ begin end out end - function ctc_beam_search_decoder(inputs_::tf.TensorHandle, sequence_length_::tf.TensorHandle; name=nothing, beam_width=nothing, top_paths=nothing, merge_repeated=nothing) + function ctc_beam_search_decoder_eager(inputs_, sequence_length_; name=nothing, beam_width=nothing, top_paths=nothing, merge_repeated=nothing) desc = tf.EagerOp("CTCBeamSearchDecoder") tf.add_input(desc, inputs_) tf.add_input(desc, sequence_length_) @@ -18984,6 +22288,13 @@ begin end tf.execute(desc) end + function ctc_beam_search_decoder(inputs_, sequence_length_; name=nothing, beam_width=nothing, top_paths=nothing, merge_repeated=nothing) + if tf.eager_mode + ctc_beam_search_decoder_eager(inputs_, sequence_length_; name=name, beam_width=beam_width, top_paths=top_paths, merge_repeated=merge_repeated) + else + ctc_beam_search_decoder_graph(inputs_, sequence_length_; name=name, beam_width=beam_width, top_paths=top_paths, merge_repeated=merge_repeated) + end + end end @@ -18993,7 +22304,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function whole_file_reader(; name=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function whole_file_reader_graph(; name=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "WholeFileReader") do desc = tf.NodeDescription("WholeFileReader") @@ -19006,7 +22317,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function whole_file_reader(; name=nothing, container=nothing, shared_name=nothing) + function whole_file_reader_eager(; name=nothing, container=nothing, shared_name=nothing) desc = tf.EagerOp("WholeFileReader") if container !== nothing desc["container"] = Base.String(container) @@ -19016,6 +22327,13 @@ begin end (tf.execute(desc))[1] end + function whole_file_reader(; name=nothing, container=nothing, shared_name=nothing) + if tf.eager_mode + whole_file_reader_eager(; name=name, container=container, shared_name=shared_name) + else + whole_file_reader_graph(; name=name, container=container, shared_name=shared_name) + end + end end @@ -19025,7 +22343,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function apply_rms_prop(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function apply_rms_prop_graph(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ApplyRMSProp") do desc = tf.NodeDescription("ApplyRMSProp") @@ -19052,7 +22370,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function apply_rms_prop(var_::tf.TensorHandle, ms_::tf.TensorHandle, mom_::tf.TensorHandle, lr_::tf.TensorHandle, rho_::tf.TensorHandle, momentum_::tf.TensorHandle, epsilon_::tf.TensorHandle, grad_::tf.TensorHandle; name=nothing, use_locking=nothing) + function apply_rms_prop_eager(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ApplyRMSProp") tf.add_input(desc, var_) tf.add_input(desc, ms_) @@ -19075,6 +22393,13 @@ begin desc["T"] = tf.data_type(grad_) (tf.execute(desc))[1] end + function apply_rms_prop(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=nothing, use_locking=nothing) + if tf.eager_mode + apply_rms_prop_eager(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=name, use_locking=use_locking) + else + apply_rms_prop_graph(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=name, use_locking=use_locking) + end + end end @@ -19084,7 +22409,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function adjust_saturation(images_, scale_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function adjust_saturation_graph(images_, scale_; name=nothing) local desc tf.with_op_name(name, "AdjustSaturation") do desc = tf.NodeDescription("AdjustSaturation") @@ -19096,13 +22421,20 @@ begin end tf.Tensor(tf.Operation(desc)) end - function adjust_saturation(images_::tf.TensorHandle, scale_::tf.TensorHandle; name=nothing) + function adjust_saturation_eager(images_, scale_; name=nothing) desc = tf.EagerOp("AdjustSaturation") tf.add_input(desc, images_) tf.add_input(desc, scale_) desc["T"] = tf.data_type(images_) (tf.execute(desc))[1] end + function adjust_saturation(images_, scale_; name=nothing) + if tf.eager_mode + adjust_saturation_eager(images_, scale_; name=name) + else + adjust_saturation_graph(images_, scale_; name=name) + end + end end @@ -19112,7 +22444,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function lookup_table_remove_v2(table_handle_, keys_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function lookup_table_remove_v2_graph(table_handle_, keys_; name=nothing) local desc tf.with_op_name(name, "LookupTableRemoveV2") do desc = tf.NodeDescription("LookupTableRemoveV2") @@ -19124,13 +22456,20 @@ begin end tf.Tensor(tf.Operation(desc)) end - function lookup_table_remove_v2(table_handle_::tf.TensorHandle, keys_::tf.TensorHandle; name=nothing) + function lookup_table_remove_v2_eager(table_handle_, keys_; name=nothing) desc = tf.EagerOp("LookupTableRemoveV2") tf.add_input(desc, table_handle_) tf.add_input(desc, keys_) desc["Tin"] = tf.data_type(keys_) (tf.execute(desc))[1] end + function lookup_table_remove_v2(table_handle_, keys_; name=nothing) + if tf.eager_mode + lookup_table_remove_v2_eager(table_handle_, keys_; name=name) + else + lookup_table_remove_v2_graph(table_handle_, keys_; name=name) + end + end end @@ -19140,7 +22479,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function queue_close(handle_; name=nothing, cancel_pending_enqueues=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function queue_close_graph(handle_; name=nothing, cancel_pending_enqueues=nothing) local desc tf.with_op_name(name, "QueueClose") do desc = tf.NodeDescription("QueueClose") @@ -19152,7 +22491,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function queue_close(handle_::tf.TensorHandle; name=nothing, cancel_pending_enqueues=nothing) + function queue_close_eager(handle_; name=nothing, cancel_pending_enqueues=nothing) desc = tf.EagerOp("QueueClose") tf.add_input(desc, handle_) if cancel_pending_enqueues !== nothing @@ -19160,6 +22499,13 @@ begin end (tf.execute(desc))[1] end + function queue_close(handle_; name=nothing, cancel_pending_enqueues=nothing) + if tf.eager_mode + queue_close_eager(handle_; name=name, cancel_pending_enqueues=cancel_pending_enqueues) + else + queue_close_graph(handle_; name=name, cancel_pending_enqueues=cancel_pending_enqueues) + end + end end @@ -19169,7 +22515,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function prefetch_dataset(input_dataset_, buffer_size_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function prefetch_dataset_graph(input_dataset_, buffer_size_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "PrefetchDataset") do desc = tf.NodeDescription("PrefetchDataset") @@ -19186,7 +22532,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function prefetch_dataset(input_dataset_::tf.TensorHandle, buffer_size_::tf.TensorHandle; name=nothing, output_types=nothing, output_shapes=nothing) + function prefetch_dataset_eager(input_dataset_, buffer_size_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("PrefetchDataset") tf.add_input(desc, input_dataset_) tf.add_input(desc, buffer_size_) @@ -19198,6 +22544,13 @@ begin end (tf.execute(desc))[1] end + function prefetch_dataset(input_dataset_, buffer_size_; name=nothing, output_types=nothing, output_shapes=nothing) + if tf.eager_mode + prefetch_dataset_eager(input_dataset_, buffer_size_; name=name, output_types=output_types, output_shapes=output_shapes) + else + prefetch_dataset_graph(input_dataset_, buffer_size_; name=name, output_types=output_types, output_shapes=output_shapes) + end + end end @@ -19207,7 +22560,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function map_dataset(input_dataset_, other_arguments_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing, preserve_cardinality=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function map_dataset_graph(input_dataset_, other_arguments_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing, preserve_cardinality=nothing) local desc tf.with_op_name(name, "MapDataset") do desc = tf.NodeDescription("MapDataset") @@ -19236,7 +22589,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function map_dataset(input_dataset_::tf.TensorHandle, other_arguments_::tf.TensorHandle; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing, preserve_cardinality=nothing) + function map_dataset_eager(input_dataset_, other_arguments_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing, preserve_cardinality=nothing) desc = tf.EagerOp("MapDataset") tf.add_input(desc, input_dataset_) tf.add_input(desc, other_arguments_) @@ -19260,6 +22613,13 @@ begin end (tf.execute(desc))[1] end + function map_dataset(input_dataset_, other_arguments_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing, preserve_cardinality=nothing) + if tf.eager_mode + map_dataset_eager(input_dataset_, other_arguments_; name=name, f=f, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes, use_inter_op_parallelism=use_inter_op_parallelism, preserve_cardinality=preserve_cardinality) + else + map_dataset_graph(input_dataset_, other_arguments_; name=name, f=f, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes, use_inter_op_parallelism=use_inter_op_parallelism, preserve_cardinality=preserve_cardinality) + end + end end @@ -19269,7 +22629,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function quantized_conv2d_with_bias(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function quantized_conv2d_with_bias_graph(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) local desc tf.with_op_name(name, "QuantizedConv2DWithBias") do desc = tf.NodeDescription("QuantizedConv2DWithBias") @@ -19309,7 +22669,7 @@ begin end out end - function quantized_conv2d_with_bias(input_::tf.TensorHandle, filter_::tf.TensorHandle, bias_::tf.TensorHandle, min_input_::tf.TensorHandle, max_input_::tf.TensorHandle, min_filter_::tf.TensorHandle, max_filter_::tf.TensorHandle; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) + function quantized_conv2d_with_bias_eager(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) desc = tf.EagerOp("QuantizedConv2DWithBias") tf.add_input(desc, input_) tf.add_input(desc, filter_) @@ -19334,6 +22694,13 @@ begin desc["Tfilter"] = tf.data_type(filter_) tf.execute(desc) end + function quantized_conv2d_with_bias(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) + if tf.eager_mode + quantized_conv2d_with_bias_eager(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_; name=name, out_type=out_type, strides=strides, padding=padding, dilations=dilations) + else + quantized_conv2d_with_bias_graph(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_; name=name, out_type=out_type, strides=strides, padding=padding, dilations=dilations) + end + end end @@ -19343,7 +22710,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tensor_array_read_v3(handle_, index_, flow_in_; name=nothing, dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tensor_array_read_v3_graph(handle_, index_, flow_in_; name=nothing, dtype=nothing) local desc tf.with_op_name(name, "TensorArrayReadV3") do desc = tf.NodeDescription("TensorArrayReadV3") @@ -19359,7 +22726,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function tensor_array_read_v3(handle_::tf.TensorHandle, index_::tf.TensorHandle, flow_in_::tf.TensorHandle; name=nothing, dtype=nothing) + function tensor_array_read_v3_eager(handle_, index_, flow_in_; name=nothing, dtype=nothing) desc = tf.EagerOp("TensorArrayReadV3") tf.add_input(desc, handle_) tf.add_input(desc, index_) @@ -19369,6 +22736,13 @@ begin end (tf.execute(desc))[1] end + function tensor_array_read_v3(handle_, index_, flow_in_; name=nothing, dtype=nothing) + if tf.eager_mode + tensor_array_read_v3_eager(handle_, index_, flow_in_; name=name, dtype=dtype) + else + tensor_array_read_v3_graph(handle_, index_, flow_in_; name=name, dtype=dtype) + end + end end @@ -19378,7 +22752,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function identity(input_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function identity_graph(input_; name=nothing) local desc tf.with_op_name(name, "Identity") do desc = tf.NodeDescription("Identity") @@ -19388,12 +22762,19 @@ begin end tf.Tensor(tf.Operation(desc)) end - function identity(input_::tf.TensorHandle; name=nothing) + function identity_eager(input_; name=nothing) desc = tf.EagerOp("Identity") tf.add_input(desc, input_) desc["T"] = tf.data_type(input_) (tf.execute(desc))[1] end + function identity(input_; name=nothing) + if tf.eager_mode + identity_eager(input_; name=name) + else + identity_graph(input_; name=name) + end + end end @@ -19403,7 +22784,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function print(input_, data_; name=nothing, U=nothing, message=nothing, first_n=nothing, summarize=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function print_graph(input_, data_; name=nothing, U=nothing, message=nothing, first_n=nothing, summarize=nothing) local desc tf.with_op_name(name, "Print") do desc = tf.NodeDescription("Print") @@ -19427,7 +22808,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function print(input_::tf.TensorHandle, data_::tf.TensorHandle; name=nothing, U=nothing, message=nothing, first_n=nothing, summarize=nothing) + function print_eager(input_, data_; name=nothing, U=nothing, message=nothing, first_n=nothing, summarize=nothing) desc = tf.EagerOp("Print") tf.add_input(desc, input_) tf.add_input(desc, data_) @@ -19446,6 +22827,13 @@ begin desc["T"] = tf.data_type(input_) (tf.execute(desc))[1] end + function print(input_, data_; name=nothing, U=nothing, message=nothing, first_n=nothing, summarize=nothing) + if tf.eager_mode + print_eager(input_, data_; name=name, U=U, message=message, first_n=first_n, summarize=summarize) + else + print_graph(input_, data_; name=name, U=U, message=message, first_n=first_n, summarize=summarize) + end + end end @@ -19455,7 +22843,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function collective_bcast_send(input_; name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, shape=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function collective_bcast_send_graph(input_; name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, shape=nothing) local desc tf.with_op_name(name, "CollectiveBcastSend") do desc = tf.NodeDescription("CollectiveBcastSend") @@ -19477,7 +22865,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function collective_bcast_send(input_::tf.TensorHandle; name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, shape=nothing) + function collective_bcast_send_eager(input_; name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, shape=nothing) desc = tf.EagerOp("CollectiveBcastSend") tf.add_input(desc, input_) if group_size !== nothing @@ -19495,6 +22883,13 @@ begin desc["T"] = tf.data_type(input_) (tf.execute(desc))[1] end + function collective_bcast_send(input_; name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, shape=nothing) + if tf.eager_mode + collective_bcast_send_eager(input_; name=name, group_size=group_size, group_key=group_key, instance_key=instance_key, shape=shape) + else + collective_bcast_send_graph(input_; name=name, group_size=group_size, group_key=group_key, instance_key=instance_key, shape=shape) + end + end end @@ -19504,7 +22899,7 @@ end Converts a list of tensors to an array of tensors. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function _list_to_array(input_; name=nothing, Tin=nothing, N=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function _list_to_array_graph(input_; name=nothing, Tin=nothing, N=nothing) local desc tf.with_op_name(name, "_ListToArray") do desc = tf.NodeDescription("_ListToArray") @@ -19524,7 +22919,7 @@ begin end out end - function _list_to_array(input_::tf.TensorHandle; name=nothing, Tin=nothing, N=nothing) + function _list_to_array_eager(input_; name=nothing, Tin=nothing, N=nothing) desc = tf.EagerOp("_ListToArray") tf.add_input(desc, input_) if Tin !== nothing @@ -19535,6 +22930,13 @@ begin end tf.execute(desc) end + function _list_to_array(input_; name=nothing, Tin=nothing, N=nothing) + if tf.eager_mode + _list_to_array_eager(input_; name=name, Tin=Tin, N=N) + else + _list_to_array_graph(input_; name=name, Tin=Tin, N=N) + end + end end @@ -19544,7 +22946,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function neg_train(w_in_, w_out_, examples_, labels_, lr_; name=nothing, vocab_count=nothing, num_negative_samples=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function neg_train_graph(w_in_, w_out_, examples_, labels_, lr_; name=nothing, vocab_count=nothing, num_negative_samples=nothing) local desc tf.with_op_name(name, "NegTrain") do desc = tf.NodeDescription("NegTrain") @@ -19567,7 +22969,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function neg_train(w_in_::tf.TensorHandle, w_out_::tf.TensorHandle, examples_::tf.TensorHandle, labels_::tf.TensorHandle, lr_::tf.TensorHandle; name=nothing, vocab_count=nothing, num_negative_samples=nothing) + function neg_train_eager(w_in_, w_out_, examples_, labels_, lr_; name=nothing, vocab_count=nothing, num_negative_samples=nothing) desc = tf.EagerOp("NegTrain") tf.add_input(desc, w_in_) tf.add_input(desc, w_out_) @@ -19582,6 +22984,13 @@ begin end (tf.execute(desc))[1] end + function neg_train(w_in_, w_out_, examples_, labels_, lr_; name=nothing, vocab_count=nothing, num_negative_samples=nothing) + if tf.eager_mode + neg_train_eager(w_in_, w_out_, examples_, labels_, lr_; name=name, vocab_count=vocab_count, num_negative_samples=num_negative_samples) + else + neg_train_graph(w_in_, w_out_, examples_, labels_, lr_; name=name, vocab_count=vocab_count, num_negative_samples=num_negative_samples) + end + end end @@ -19591,7 +23000,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function worker_heartbeat(request_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function worker_heartbeat_graph(request_; name=nothing) local desc tf.with_op_name(name, "WorkerHeartbeat") do desc = tf.NodeDescription("WorkerHeartbeat") @@ -19600,11 +23009,18 @@ begin end tf.Tensor(tf.Operation(desc)) end - function worker_heartbeat(request_::tf.TensorHandle; name=nothing) + function worker_heartbeat_eager(request_; name=nothing) desc = tf.EagerOp("WorkerHeartbeat") tf.add_input(desc, request_) (tf.execute(desc))[1] end + function worker_heartbeat(request_; name=nothing) + if tf.eager_mode + worker_heartbeat_eager(request_; name=name) + else + worker_heartbeat_graph(request_; name=name) + end + end end @@ -19614,7 +23030,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function merge_v2checkpoints(checkpoint_prefixes_, destination_prefix_; name=nothing, delete_old_dirs=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function merge_v2checkpoints_graph(checkpoint_prefixes_, destination_prefix_; name=nothing, delete_old_dirs=nothing) local desc tf.with_op_name(name, "MergeV2Checkpoints") do desc = tf.NodeDescription("MergeV2Checkpoints") @@ -19628,7 +23044,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function merge_v2checkpoints(checkpoint_prefixes_::tf.TensorHandle, destination_prefix_::tf.TensorHandle; name=nothing, delete_old_dirs=nothing) + function merge_v2checkpoints_eager(checkpoint_prefixes_, destination_prefix_; name=nothing, delete_old_dirs=nothing) desc = tf.EagerOp("MergeV2Checkpoints") tf.add_input(desc, checkpoint_prefixes_) tf.add_input(desc, destination_prefix_) @@ -19637,6 +23053,13 @@ begin end (tf.execute(desc))[1] end + function merge_v2checkpoints(checkpoint_prefixes_, destination_prefix_; name=nothing, delete_old_dirs=nothing) + if tf.eager_mode + merge_v2checkpoints_eager(checkpoint_prefixes_, destination_prefix_; name=name, delete_old_dirs=delete_old_dirs) + else + merge_v2checkpoints_graph(checkpoint_prefixes_, destination_prefix_; name=name, delete_old_dirs=delete_old_dirs) + end + end end @@ -19646,7 +23069,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function collective_permute(input_, source_target_pairs_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function collective_permute_graph(input_, source_target_pairs_; name=nothing) local desc tf.with_op_name(name, "CollectivePermute") do desc = tf.NodeDescription("CollectivePermute") @@ -19658,13 +23081,20 @@ begin end tf.Tensor(tf.Operation(desc)) end - function collective_permute(input_::tf.TensorHandle, source_target_pairs_::tf.TensorHandle; name=nothing) + function collective_permute_eager(input_, source_target_pairs_; name=nothing) desc = tf.EagerOp("CollectivePermute") tf.add_input(desc, input_) tf.add_input(desc, source_target_pairs_) desc["T"] = tf.data_type(input_) (tf.execute(desc))[1] end + function collective_permute(input_, source_target_pairs_; name=nothing) + if tf.eager_mode + collective_permute_eager(input_, source_target_pairs_; name=name) + else + collective_permute_graph(input_, source_target_pairs_; name=name) + end + end end @@ -19674,7 +23104,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function quantize_and_dequantize_v3(input_, input_min_, input_max_, num_bits_; name=nothing, signed_input=nothing, range_given=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function quantize_and_dequantize_v3_graph(input_, input_min_, input_max_, num_bits_; name=nothing, signed_input=nothing, range_given=nothing) local desc tf.with_op_name(name, "QuantizeAndDequantizeV3") do desc = tf.NodeDescription("QuantizeAndDequantizeV3") @@ -19696,7 +23126,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function quantize_and_dequantize_v3(input_::tf.TensorHandle, input_min_::tf.TensorHandle, input_max_::tf.TensorHandle, num_bits_::tf.TensorHandle; name=nothing, signed_input=nothing, range_given=nothing) + function quantize_and_dequantize_v3_eager(input_, input_min_, input_max_, num_bits_; name=nothing, signed_input=nothing, range_given=nothing) desc = tf.EagerOp("QuantizeAndDequantizeV3") tf.add_input(desc, input_) tf.add_input(desc, input_min_) @@ -19713,6 +23143,13 @@ begin desc["T"] = tf.data_type(input_max_) (tf.execute(desc))[1] end + function quantize_and_dequantize_v3(input_, input_min_, input_max_, num_bits_; name=nothing, signed_input=nothing, range_given=nothing) + if tf.eager_mode + quantize_and_dequantize_v3_eager(input_, input_min_, input_max_, num_bits_; name=name, signed_input=signed_input, range_given=range_given) + else + quantize_and_dequantize_v3_graph(input_, input_min_, input_max_, num_bits_; name=name, signed_input=signed_input, range_given=range_given) + end + end end @@ -19722,7 +23159,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function hash_table(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function hash_table_graph(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing) local desc tf.with_op_name(name, "HashTable") do desc = tf.NodeDescription("HashTable") @@ -19744,7 +23181,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function hash_table(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing) + function hash_table_eager(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing) desc = tf.EagerOp("HashTable") if container !== nothing desc["container"] = Base.String(container) @@ -19763,6 +23200,13 @@ begin end (tf.execute(desc))[1] end + function hash_table(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing) + if tf.eager_mode + hash_table_eager(; name=name, container=container, shared_name=shared_name, use_node_name_sharing=use_node_name_sharing, key_dtype=key_dtype, value_dtype=value_dtype) + else + hash_table_graph(; name=name, container=container, shared_name=shared_name, use_node_name_sharing=use_node_name_sharing, key_dtype=key_dtype, value_dtype=value_dtype) + end + end end @@ -19772,7 +23216,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function softplus_grad(gradients_, features_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function softplus_grad_graph(gradients_, features_; name=nothing) local desc tf.with_op_name(name, "SoftplusGrad") do desc = tf.NodeDescription("SoftplusGrad") @@ -19784,7 +23228,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function softplus_grad(gradients_::tf.TensorHandle, features_::tf.TensorHandle; name=nothing) + function softplus_grad_eager(gradients_, features_; name=nothing) desc = tf.EagerOp("SoftplusGrad") tf.add_input(desc, gradients_) tf.add_input(desc, features_) @@ -19792,6 +23236,13 @@ begin desc["T"] = tf.data_type(features_) (tf.execute(desc))[1] end + function softplus_grad(gradients_, features_; name=nothing) + if tf.eager_mode + softplus_grad_eager(gradients_, features_; name=name) + else + softplus_grad_graph(gradients_, features_; name=name) + end + end end @@ -19801,7 +23252,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function fixed_length_record_reader(; name=nothing, header_bytes=nothing, record_bytes=nothing, footer_bytes=nothing, hop_bytes=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function fixed_length_record_reader_graph(; name=nothing, header_bytes=nothing, record_bytes=nothing, footer_bytes=nothing, hop_bytes=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "FixedLengthRecordReader") do desc = tf.NodeDescription("FixedLengthRecordReader") @@ -19826,7 +23277,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function fixed_length_record_reader(; name=nothing, header_bytes=nothing, record_bytes=nothing, footer_bytes=nothing, hop_bytes=nothing, container=nothing, shared_name=nothing) + function fixed_length_record_reader_eager(; name=nothing, header_bytes=nothing, record_bytes=nothing, footer_bytes=nothing, hop_bytes=nothing, container=nothing, shared_name=nothing) desc = tf.EagerOp("FixedLengthRecordReader") if header_bytes !== nothing desc["header_bytes"] = Base.Int(header_bytes) @@ -19848,6 +23299,13 @@ begin end (tf.execute(desc))[1] end + function fixed_length_record_reader(; name=nothing, header_bytes=nothing, record_bytes=nothing, footer_bytes=nothing, hop_bytes=nothing, container=nothing, shared_name=nothing) + if tf.eager_mode + fixed_length_record_reader_eager(; name=name, header_bytes=header_bytes, record_bytes=record_bytes, footer_bytes=footer_bytes, hop_bytes=hop_bytes, container=container, shared_name=shared_name) + else + fixed_length_record_reader_graph(; name=name, header_bytes=header_bytes, record_bytes=record_bytes, footer_bytes=footer_bytes, hop_bytes=hop_bytes, container=container, shared_name=shared_name) + end + end end @@ -19857,7 +23315,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tensor_array_scatter_v2(handle_, indices_, value_, flow_in_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tensor_array_scatter_v2_graph(handle_, indices_, value_, flow_in_; name=nothing) local desc tf.with_op_name(name, "TensorArrayScatterV2") do desc = tf.NodeDescription("TensorArrayScatterV2") @@ -19873,7 +23331,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function tensor_array_scatter_v2(handle_::tf.TensorHandle, indices_::tf.TensorHandle, value_::tf.TensorHandle, flow_in_::tf.TensorHandle; name=nothing) + function tensor_array_scatter_v2_eager(handle_, indices_, value_, flow_in_; name=nothing) desc = tf.EagerOp("TensorArrayScatterV2") tf.add_input(desc, handle_) tf.add_input(desc, indices_) @@ -19882,6 +23340,13 @@ begin desc["T"] = tf.data_type(value_) (tf.execute(desc))[1] end + function tensor_array_scatter_v2(handle_, indices_, value_, flow_in_; name=nothing) + if tf.eager_mode + tensor_array_scatter_v2_eager(handle_, indices_, value_, flow_in_; name=name) + else + tensor_array_scatter_v2_graph(handle_, indices_, value_, flow_in_; name=name) + end + end end @@ -19891,7 +23356,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function decode_json_example(json_examples_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function decode_json_example_graph(json_examples_; name=nothing) local desc tf.with_op_name(name, "DecodeJSONExample") do desc = tf.NodeDescription("DecodeJSONExample") @@ -19900,21 +23365,28 @@ begin end tf.Tensor(tf.Operation(desc)) end - function decode_json_example(json_examples_::tf.TensorHandle; name=nothing) + function decode_json_example_eager(json_examples_; name=nothing) desc = tf.EagerOp("DecodeJSONExample") tf.add_input(desc, json_examples_) (tf.execute(desc))[1] end + function decode_json_example(json_examples_; name=nothing) + if tf.eager_mode + decode_json_example_eager(json_examples_; name=name) + else + decode_json_example_graph(json_examples_; name=name) + end + end end """ - fused_batch_norm_grad_v2(y_backprop, x, scale, reserve_space_1, reserve_space_2; epsilon=?, data_format=NHWC, is_training=true) + fused_batch_norm_grad_v2(y_backprop, x, scale, reserve_space_1, reserve_space_2; epsilon=?, data_format=, is_training=true) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function fused_batch_norm_grad_v2(y_backprop_, x_, scale_, reserve_space_1_, reserve_space_2_; name=nothing, U=nothing, epsilon=nothing, data_format=nothing, is_training=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function fused_batch_norm_grad_v2_graph(y_backprop_, x_, scale_, reserve_space_1_, reserve_space_2_; name=nothing, U=nothing, epsilon=nothing, data_format=nothing, is_training=nothing) local desc tf.with_op_name(name, "FusedBatchNormGradV2") do desc = tf.NodeDescription("FusedBatchNormGradV2") @@ -19950,7 +23422,7 @@ begin end out end - function fused_batch_norm_grad_v2(y_backprop_::tf.TensorHandle, x_::tf.TensorHandle, scale_::tf.TensorHandle, reserve_space_1_::tf.TensorHandle, reserve_space_2_::tf.TensorHandle; name=nothing, U=nothing, epsilon=nothing, data_format=nothing, is_training=nothing) + function fused_batch_norm_grad_v2_eager(y_backprop_, x_, scale_, reserve_space_1_, reserve_space_2_; name=nothing, U=nothing, epsilon=nothing, data_format=nothing, is_training=nothing) desc = tf.EagerOp("FusedBatchNormGradV2") tf.add_input(desc, y_backprop_) tf.add_input(desc, x_) @@ -19975,6 +23447,13 @@ begin desc["U"] = tf.data_type(reserve_space_2_) tf.execute(desc) end + function fused_batch_norm_grad_v2(y_backprop_, x_, scale_, reserve_space_1_, reserve_space_2_; name=nothing, U=nothing, epsilon=nothing, data_format=nothing, is_training=nothing) + if tf.eager_mode + fused_batch_norm_grad_v2_eager(y_backprop_, x_, scale_, reserve_space_1_, reserve_space_2_; name=name, U=U, epsilon=epsilon, data_format=data_format, is_training=is_training) + else + fused_batch_norm_grad_v2_graph(y_backprop_, x_, scale_, reserve_space_1_, reserve_space_2_; name=name, U=U, epsilon=epsilon, data_format=data_format, is_training=is_training) + end + end end @@ -19984,7 +23463,7 @@ end Cast x of type SrcT to y of DstT. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function _host_cast(x_; name=nothing, SrcT=nothing, DstT=nothing, Truncate=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function _host_cast_graph(x_; name=nothing, SrcT=nothing, DstT=nothing, Truncate=nothing) local desc tf.with_op_name(name, "_HostCast") do desc = tf.NodeDescription("_HostCast") @@ -20003,7 +23482,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function _host_cast(x_::tf.TensorHandle; name=nothing, SrcT=nothing, DstT=nothing, Truncate=nothing) + function _host_cast_eager(x_; name=nothing, SrcT=nothing, DstT=nothing, Truncate=nothing) desc = tf.EagerOp("_HostCast") tf.add_input(desc, x_) if SrcT !== nothing @@ -20018,6 +23497,13 @@ begin desc["SrcT"] = tf.data_type(x_) (tf.execute(desc))[1] end + function _host_cast(x_; name=nothing, SrcT=nothing, DstT=nothing, Truncate=nothing) + if tf.eager_mode + _host_cast_eager(x_; name=name, SrcT=SrcT, DstT=DstT, Truncate=Truncate) + else + _host_cast_graph(x_; name=name, SrcT=SrcT, DstT=DstT, Truncate=Truncate) + end + end end @@ -20027,7 +23513,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tf_record_reader(; name=nothing, container=nothing, shared_name=nothing, compression_type=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tf_record_reader_graph(; name=nothing, container=nothing, shared_name=nothing, compression_type=nothing) local desc tf.with_op_name(name, "TFRecordReader") do desc = tf.NodeDescription("TFRecordReader") @@ -20043,7 +23529,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function tf_record_reader(; name=nothing, container=nothing, shared_name=nothing, compression_type=nothing) + function tf_record_reader_eager(; name=nothing, container=nothing, shared_name=nothing, compression_type=nothing) desc = tf.EagerOp("TFRecordReader") if container !== nothing desc["container"] = Base.String(container) @@ -20056,6 +23542,13 @@ begin end (tf.execute(desc))[1] end + function tf_record_reader(; name=nothing, container=nothing, shared_name=nothing, compression_type=nothing) + if tf.eager_mode + tf_record_reader_eager(; name=name, container=container, shared_name=shared_name, compression_type=compression_type) + else + tf_record_reader_graph(; name=name, container=container, shared_name=shared_name, compression_type=compression_type) + end + end end @@ -20065,7 +23558,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function while_(input_; name=nothing, T=nothing, cond=nothing, body=nothing, output_shapes=nothing, parallel_iterations=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function while__graph(input_; name=nothing, T=nothing, cond=nothing, body=nothing, output_shapes=nothing, parallel_iterations=nothing) local desc tf.with_op_name(name, "While") do desc = tf.NodeDescription("While") @@ -20089,7 +23582,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function while_(input_::tf.TensorHandle; name=nothing, T=nothing, cond=nothing, body=nothing, output_shapes=nothing, parallel_iterations=nothing) + function while__eager(input_; name=nothing, T=nothing, cond=nothing, body=nothing, output_shapes=nothing, parallel_iterations=nothing) desc = tf.EagerOp("While") tf.add_input(desc, input_) if T !== nothing @@ -20109,6 +23602,13 @@ begin end (tf.execute(desc))[1] end + function while_(input_; name=nothing, T=nothing, cond=nothing, body=nothing, output_shapes=nothing, parallel_iterations=nothing) + if tf.eager_mode + while__eager(input_; name=name, T=T, cond=cond, body=body, output_shapes=output_shapes, parallel_iterations=parallel_iterations) + else + while__graph(input_; name=name, T=T, cond=cond, body=body, output_shapes=output_shapes, parallel_iterations=parallel_iterations) + end + end end @@ -20118,7 +23618,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function stateless_multinomial(logits_, num_samples_, seed_; name=nothing, output_dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function stateless_multinomial_graph(logits_, num_samples_, seed_; name=nothing, output_dtype=nothing) local desc tf.with_op_name(name, "StatelessMultinomial") do desc = tf.NodeDescription("StatelessMultinomial") @@ -20136,7 +23636,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function stateless_multinomial(logits_::tf.TensorHandle, num_samples_::tf.TensorHandle, seed_::tf.TensorHandle; name=nothing, output_dtype=nothing) + function stateless_multinomial_eager(logits_, num_samples_, seed_; name=nothing, output_dtype=nothing) desc = tf.EagerOp("StatelessMultinomial") tf.add_input(desc, logits_) tf.add_input(desc, num_samples_) @@ -20148,6 +23648,13 @@ begin desc["Tseed"] = tf.data_type(seed_) (tf.execute(desc))[1] end + function stateless_multinomial(logits_, num_samples_, seed_; name=nothing, output_dtype=nothing) + if tf.eager_mode + stateless_multinomial_eager(logits_, num_samples_, seed_; name=name, output_dtype=output_dtype) + else + stateless_multinomial_graph(logits_, num_samples_, seed_; name=name, output_dtype=output_dtype) + end + end end @@ -20157,7 +23664,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function scatter_add(ref_, indices_, updates_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function scatter_add_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ScatterAdd") do desc = tf.NodeDescription("ScatterAdd") @@ -20176,7 +23683,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function scatter_add(ref_::tf.TensorHandle, indices_::tf.TensorHandle, updates_::tf.TensorHandle; name=nothing, use_locking=nothing) + function scatter_add_eager(ref_, indices_, updates_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ScatterAdd") tf.add_input(desc, ref_) tf.add_input(desc, indices_) @@ -20189,6 +23696,13 @@ begin desc["T"] = tf.data_type(updates_) (tf.execute(desc))[1] end + function scatter_add(ref_, indices_, updates_; name=nothing, use_locking=nothing) + if tf.eager_mode + scatter_add_eager(ref_, indices_, updates_; name=name, use_locking=use_locking) + else + scatter_add_graph(ref_, indices_, updates_; name=name, use_locking=use_locking) + end + end end @@ -20198,7 +23712,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function conj(input_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function conj_graph(input_; name=nothing) local desc tf.with_op_name(name, "Conj") do desc = tf.NodeDescription("Conj") @@ -20208,12 +23722,19 @@ begin end tf.Tensor(tf.Operation(desc)) end - function conj(input_::tf.TensorHandle; name=nothing) + function conj_eager(input_; name=nothing) desc = tf.EagerOp("Conj") tf.add_input(desc, input_) desc["T"] = tf.data_type(input_) (tf.execute(desc))[1] end + function conj(input_; name=nothing) + if tf.eager_mode + conj_eager(input_; name=name) + else + conj_graph(input_; name=name) + end + end end @@ -20223,7 +23744,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function parallel_dynamic_stitch(indices_, data_; name=nothing, N=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function parallel_dynamic_stitch_graph(indices_, data_; name=nothing, N=nothing) local desc tf.with_op_name(name, "ParallelDynamicStitch") do desc = tf.NodeDescription("ParallelDynamicStitch") @@ -20238,7 +23759,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function parallel_dynamic_stitch(indices_::tf.TensorHandle, data_::tf.TensorHandle; name=nothing, N=nothing) + function parallel_dynamic_stitch_eager(indices_, data_; name=nothing, N=nothing) desc = tf.EagerOp("ParallelDynamicStitch") tf.add_input(desc, indices_) tf.add_input(desc, data_) @@ -20248,6 +23769,13 @@ begin desc["T"] = tf.data_type(data_) (tf.execute(desc))[1] end + function parallel_dynamic_stitch(indices_, data_; name=nothing, N=nothing) + if tf.eager_mode + parallel_dynamic_stitch_eager(indices_, data_; name=name, N=N) + else + parallel_dynamic_stitch_graph(indices_, data_; name=name, N=N) + end + end end @@ -20257,7 +23785,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function make_iterator(dataset_, iterator_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function make_iterator_graph(dataset_, iterator_; name=nothing) local desc tf.with_op_name(name, "MakeIterator") do desc = tf.NodeDescription("MakeIterator") @@ -20268,12 +23796,19 @@ begin end tf.Tensor(tf.Operation(desc)) end - function make_iterator(dataset_::tf.TensorHandle, iterator_::tf.TensorHandle; name=nothing) + function make_iterator_eager(dataset_, iterator_; name=nothing) desc = tf.EagerOp("MakeIterator") tf.add_input(desc, dataset_) tf.add_input(desc, iterator_) (tf.execute(desc))[1] end + function make_iterator(dataset_, iterator_; name=nothing) + if tf.eager_mode + make_iterator_eager(dataset_, iterator_; name=name) + else + make_iterator_graph(dataset_, iterator_; name=name) + end + end end @@ -20283,7 +23818,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function rfft3d(input_, fft_length_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function rfft3d_graph(input_, fft_length_; name=nothing) local desc tf.with_op_name(name, "RFFT3D") do desc = tf.NodeDescription("RFFT3D") @@ -20294,12 +23829,19 @@ begin end tf.Tensor(tf.Operation(desc)) end - function rfft3d(input_::tf.TensorHandle, fft_length_::tf.TensorHandle; name=nothing) + function rfft3d_eager(input_, fft_length_; name=nothing) desc = tf.EagerOp("RFFT3D") tf.add_input(desc, input_) tf.add_input(desc, fft_length_) (tf.execute(desc))[1] end + function rfft3d(input_, fft_length_; name=nothing) + if tf.eager_mode + rfft3d_eager(input_, fft_length_; name=name) + else + rfft3d_graph(input_, fft_length_; name=name) + end + end end @@ -20309,7 +23851,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function sparse_reduce_sum_sparse(input_indices_, input_values_, input_shape_, reduction_axes_; name=nothing, keep_dims=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function sparse_reduce_sum_sparse_graph(input_indices_, input_values_, input_shape_, reduction_axes_; name=nothing, keep_dims=nothing) local desc tf.with_op_name(name, "SparseReduceSumSparse") do desc = tf.NodeDescription("SparseReduceSumSparse") @@ -20333,7 +23875,7 @@ begin end out end - function sparse_reduce_sum_sparse(input_indices_::tf.TensorHandle, input_values_::tf.TensorHandle, input_shape_::tf.TensorHandle, reduction_axes_::tf.TensorHandle; name=nothing, keep_dims=nothing) + function sparse_reduce_sum_sparse_eager(input_indices_, input_values_, input_shape_, reduction_axes_; name=nothing, keep_dims=nothing) desc = tf.EagerOp("SparseReduceSumSparse") tf.add_input(desc, input_indices_) tf.add_input(desc, input_values_) @@ -20345,6 +23887,13 @@ begin desc["T"] = tf.data_type(input_values_) tf.execute(desc) end + function sparse_reduce_sum_sparse(input_indices_, input_values_, input_shape_, reduction_axes_; name=nothing, keep_dims=nothing) + if tf.eager_mode + sparse_reduce_sum_sparse_eager(input_indices_, input_values_, input_shape_, reduction_axes_; name=name, keep_dims=keep_dims) + else + sparse_reduce_sum_sparse_graph(input_indices_, input_values_, input_shape_, reduction_axes_; name=name, keep_dims=keep_dims) + end + end end @@ -20354,7 +23903,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function collective_gather(input_; name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, shape=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function collective_gather_graph(input_; name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, shape=nothing) local desc tf.with_op_name(name, "CollectiveGather") do desc = tf.NodeDescription("CollectiveGather") @@ -20376,7 +23925,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function collective_gather(input_::tf.TensorHandle; name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, shape=nothing) + function collective_gather_eager(input_; name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, shape=nothing) desc = tf.EagerOp("CollectiveGather") tf.add_input(desc, input_) if group_size !== nothing @@ -20394,6 +23943,13 @@ begin desc["T"] = tf.data_type(input_) (tf.execute(desc))[1] end + function collective_gather(input_; name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, shape=nothing) + if tf.eager_mode + collective_gather_eager(input_; name=name, group_size=group_size, group_key=group_key, instance_key=instance_key, shape=shape) + else + collective_gather_graph(input_; name=name, group_size=group_size, group_key=group_key, instance_key=instance_key, shape=shape) + end + end end @@ -20403,7 +23959,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function combined_non_max_suppression(boxes_, scores_, max_output_size_per_class_, max_total_size_, iou_threshold_, score_threshold_; name=nothing, pad_per_class=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function combined_non_max_suppression_graph(boxes_, scores_, max_output_size_per_class_, max_total_size_, iou_threshold_, score_threshold_; name=nothing, pad_per_class=nothing) local desc tf.with_op_name(name, "CombinedNonMaxSuppression") do desc = tf.NodeDescription("CombinedNonMaxSuppression") @@ -20430,7 +23986,7 @@ begin end out end - function combined_non_max_suppression(boxes_::tf.TensorHandle, scores_::tf.TensorHandle, max_output_size_per_class_::tf.TensorHandle, max_total_size_::tf.TensorHandle, iou_threshold_::tf.TensorHandle, score_threshold_::tf.TensorHandle; name=nothing, pad_per_class=nothing) + function combined_non_max_suppression_eager(boxes_, scores_, max_output_size_per_class_, max_total_size_, iou_threshold_, score_threshold_; name=nothing, pad_per_class=nothing) desc = tf.EagerOp("CombinedNonMaxSuppression") tf.add_input(desc, boxes_) tf.add_input(desc, scores_) @@ -20443,6 +23999,13 @@ begin end tf.execute(desc) end + function combined_non_max_suppression(boxes_, scores_, max_output_size_per_class_, max_total_size_, iou_threshold_, score_threshold_; name=nothing, pad_per_class=nothing) + if tf.eager_mode + combined_non_max_suppression_eager(boxes_, scores_, max_output_size_per_class_, max_total_size_, iou_threshold_, score_threshold_; name=name, pad_per_class=pad_per_class) + else + combined_non_max_suppression_graph(boxes_, scores_, max_output_size_per_class_, max_total_size_, iou_threshold_, score_threshold_; name=name, pad_per_class=pad_per_class) + end + end end @@ -20452,7 +24015,7 @@ end Allocates a mutable tensor that becomes available to appropriately annotated """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function _scoped_allocator(; name=nothing, shapes=nothing, shape=nothing, sa_name=nothing, id=nothing, expected_call_count=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function _scoped_allocator_graph(; name=nothing, shapes=nothing, shape=nothing, sa_name=nothing, id=nothing, expected_call_count=nothing) local desc tf.with_op_name(name, "_ScopedAllocator") do desc = tf.NodeDescription("_ScopedAllocator") @@ -20474,7 +24037,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function _scoped_allocator(; name=nothing, shapes=nothing, shape=nothing, sa_name=nothing, id=nothing, expected_call_count=nothing) + function _scoped_allocator_eager(; name=nothing, shapes=nothing, shape=nothing, sa_name=nothing, id=nothing, expected_call_count=nothing) desc = tf.EagerOp("_ScopedAllocator") if shapes !== nothing desc["shapes"] = map(Base.identity, shapes) @@ -20493,6 +24056,13 @@ begin end (tf.execute(desc))[1] end + function _scoped_allocator(; name=nothing, shapes=nothing, shape=nothing, sa_name=nothing, id=nothing, expected_call_count=nothing) + if tf.eager_mode + _scoped_allocator_eager(; name=name, shapes=shapes, shape=shape, sa_name=sa_name, id=id, expected_call_count=expected_call_count) + else + _scoped_allocator_graph(; name=name, shapes=shapes, shape=shape, sa_name=sa_name, id=id, expected_call_count=expected_call_count) + end + end end @@ -20502,7 +24072,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function load_tpu_embedding_adadelta_parameters(parameters_, accumulators_, updates_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function load_tpu_embedding_adadelta_parameters_graph(parameters_, accumulators_, updates_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "LoadTPUEmbeddingAdadeltaParameters") do desc = tf.NodeDescription("LoadTPUEmbeddingAdadeltaParameters") @@ -20527,7 +24097,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function load_tpu_embedding_adadelta_parameters(parameters_::tf.TensorHandle, accumulators_::tf.TensorHandle, updates_::tf.TensorHandle; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + function load_tpu_embedding_adadelta_parameters_eager(parameters_, accumulators_, updates_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) desc = tf.EagerOp("LoadTPUEmbeddingAdadeltaParameters") tf.add_input(desc, parameters_) tf.add_input(desc, accumulators_) @@ -20546,6 +24116,13 @@ begin end (tf.execute(desc))[1] end + function load_tpu_embedding_adadelta_parameters(parameters_, accumulators_, updates_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + if tf.eager_mode + load_tpu_embedding_adadelta_parameters_eager(parameters_, accumulators_, updates_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + else + load_tpu_embedding_adadelta_parameters_graph(parameters_, accumulators_, updates_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + end + end end @@ -20555,7 +24132,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function sparse_add(a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_, thresh_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function sparse_add_graph(a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_, thresh_; name=nothing) local desc tf.with_op_name(name, "SparseAdd") do desc = tf.NodeDescription("SparseAdd") @@ -20583,7 +24160,7 @@ begin end out end - function sparse_add(a_indices_::tf.TensorHandle, a_values_::tf.TensorHandle, a_shape_::tf.TensorHandle, b_indices_::tf.TensorHandle, b_values_::tf.TensorHandle, b_shape_::tf.TensorHandle, thresh_::tf.TensorHandle; name=nothing) + function sparse_add_eager(a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_, thresh_; name=nothing) desc = tf.EagerOp("SparseAdd") tf.add_input(desc, a_indices_) tf.add_input(desc, a_values_) @@ -20597,6 +24174,13 @@ begin desc["Treal"] = tf.data_type(thresh_) tf.execute(desc) end + function sparse_add(a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_, thresh_; name=nothing) + if tf.eager_mode + sparse_add_eager(a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_, thresh_; name=name) + else + sparse_add_graph(a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_, thresh_; name=name) + end + end end @@ -20606,7 +24190,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function ctc_greedy_decoder(inputs_, sequence_length_; name=nothing, merge_repeated=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function ctc_greedy_decoder_graph(inputs_, sequence_length_; name=nothing, merge_repeated=nothing) local desc tf.with_op_name(name, "CTCGreedyDecoder") do desc = tf.NodeDescription("CTCGreedyDecoder") @@ -20625,7 +24209,7 @@ begin end out end - function ctc_greedy_decoder(inputs_::tf.TensorHandle, sequence_length_::tf.TensorHandle; name=nothing, merge_repeated=nothing) + function ctc_greedy_decoder_eager(inputs_, sequence_length_; name=nothing, merge_repeated=nothing) desc = tf.EagerOp("CTCGreedyDecoder") tf.add_input(desc, inputs_) tf.add_input(desc, sequence_length_) @@ -20634,6 +24218,13 @@ begin end tf.execute(desc) end + function ctc_greedy_decoder(inputs_, sequence_length_; name=nothing, merge_repeated=nothing) + if tf.eager_mode + ctc_greedy_decoder_eager(inputs_, sequence_length_; name=name, merge_repeated=merge_repeated) + else + ctc_greedy_decoder_graph(inputs_, sequence_length_; name=name, merge_repeated=merge_repeated) + end + end end @@ -20643,7 +24234,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function immutable_const(; name=nothing, dtype=nothing, shape=nothing, memory_region_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function immutable_const_graph(; name=nothing, dtype=nothing, shape=nothing, memory_region_name=nothing) local desc tf.with_op_name(name, "ImmutableConst") do desc = tf.NodeDescription("ImmutableConst") @@ -20659,7 +24250,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function immutable_const(; name=nothing, dtype=nothing, shape=nothing, memory_region_name=nothing) + function immutable_const_eager(; name=nothing, dtype=nothing, shape=nothing, memory_region_name=nothing) desc = tf.EagerOp("ImmutableConst") if dtype !== nothing desc["dtype"] = Base.identity(dtype) @@ -20672,6 +24263,13 @@ begin end (tf.execute(desc))[1] end + function immutable_const(; name=nothing, dtype=nothing, shape=nothing, memory_region_name=nothing) + if tf.eager_mode + immutable_const_eager(; name=name, dtype=dtype, shape=shape, memory_region_name=memory_region_name) + else + immutable_const_graph(; name=name, dtype=dtype, shape=shape, memory_region_name=memory_region_name) + end + end end @@ -20681,7 +24279,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function consume_mutex_lock(mutex_lock_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function consume_mutex_lock_graph(mutex_lock_; name=nothing) local desc tf.with_op_name(name, "ConsumeMutexLock") do desc = tf.NodeDescription("ConsumeMutexLock") @@ -20690,11 +24288,18 @@ begin end tf.Tensor(tf.Operation(desc)) end - function consume_mutex_lock(mutex_lock_::tf.TensorHandle; name=nothing) + function consume_mutex_lock_eager(mutex_lock_; name=nothing) desc = tf.EagerOp("ConsumeMutexLock") tf.add_input(desc, mutex_lock_) (tf.execute(desc))[1] end + function consume_mutex_lock(mutex_lock_; name=nothing) + if tf.eager_mode + consume_mutex_lock_eager(mutex_lock_; name=name) + else + consume_mutex_lock_graph(mutex_lock_; name=name) + end + end end @@ -20704,7 +24309,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function greater_equal(x_, y_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function greater_equal_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "GreaterEqual") do desc = tf.NodeDescription("GreaterEqual") @@ -20716,7 +24321,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function greater_equal(x_::tf.TensorHandle, y_::tf.TensorHandle; name=nothing) + function greater_equal_eager(x_, y_; name=nothing) desc = tf.EagerOp("GreaterEqual") tf.add_input(desc, x_) tf.add_input(desc, y_) @@ -20724,16 +24329,23 @@ begin desc["T"] = tf.data_type(y_) (tf.execute(desc))[1] end + function greater_equal(x_, y_; name=nothing) + if tf.eager_mode + greater_equal_eager(x_, y_; name=name) + else + greater_equal_graph(x_, y_; name=name) + end + end end """ - initialize_table_from_text_file_v2(table_handle, filename; vocab_size=-1, delimiter= ) + initialize_table_from_text_file_v2(table_handle, filename; vocab_size=-1, delimiter=) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function initialize_table_from_text_file_v2(table_handle_, filename_; name=nothing, key_index=nothing, value_index=nothing, vocab_size=nothing, delimiter=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function initialize_table_from_text_file_v2_graph(table_handle_, filename_; name=nothing, key_index=nothing, value_index=nothing, vocab_size=nothing, delimiter=nothing) local desc tf.with_op_name(name, "InitializeTableFromTextFileV2") do desc = tf.NodeDescription("InitializeTableFromTextFileV2") @@ -20756,7 +24368,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function initialize_table_from_text_file_v2(table_handle_::tf.TensorHandle, filename_::tf.TensorHandle; name=nothing, key_index=nothing, value_index=nothing, vocab_size=nothing, delimiter=nothing) + function initialize_table_from_text_file_v2_eager(table_handle_, filename_; name=nothing, key_index=nothing, value_index=nothing, vocab_size=nothing, delimiter=nothing) desc = tf.EagerOp("InitializeTableFromTextFileV2") tf.add_input(desc, table_handle_) tf.add_input(desc, filename_) @@ -20774,6 +24386,13 @@ begin end (tf.execute(desc))[1] end + function initialize_table_from_text_file_v2(table_handle_, filename_; name=nothing, key_index=nothing, value_index=nothing, vocab_size=nothing, delimiter=nothing) + if tf.eager_mode + initialize_table_from_text_file_v2_eager(table_handle_, filename_; name=name, key_index=key_index, value_index=value_index, vocab_size=vocab_size, delimiter=delimiter) + else + initialize_table_from_text_file_v2_graph(table_handle_, filename_; name=name, key_index=key_index, value_index=value_index, vocab_size=vocab_size, delimiter=delimiter) + end + end end @@ -20783,7 +24402,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function queue_dequeue(handle_; name=nothing, component_types=nothing, timeout_ms=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function queue_dequeue_graph(handle_; name=nothing, component_types=nothing, timeout_ms=nothing) local desc tf.with_op_name(name, "QueueDequeue") do desc = tf.NodeDescription("QueueDequeue") @@ -20798,7 +24417,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function queue_dequeue(handle_::tf.TensorHandle; name=nothing, component_types=nothing, timeout_ms=nothing) + function queue_dequeue_eager(handle_; name=nothing, component_types=nothing, timeout_ms=nothing) desc = tf.EagerOp("QueueDequeue") tf.add_input(desc, handle_) if component_types !== nothing @@ -20809,6 +24428,13 @@ begin end (tf.execute(desc))[1] end + function queue_dequeue(handle_; name=nothing, component_types=nothing, timeout_ms=nothing) + if tf.eager_mode + queue_dequeue_eager(handle_; name=name, component_types=component_types, timeout_ms=timeout_ms) + else + queue_dequeue_graph(handle_; name=name, component_types=component_types, timeout_ms=timeout_ms) + end + end end @@ -20818,7 +24444,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function equal(x_, y_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function equal_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "Equal") do desc = tf.NodeDescription("Equal") @@ -20830,7 +24456,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function equal(x_::tf.TensorHandle, y_::tf.TensorHandle; name=nothing) + function equal_eager(x_, y_; name=nothing) desc = tf.EagerOp("Equal") tf.add_input(desc, x_) tf.add_input(desc, y_) @@ -20838,6 +24464,13 @@ begin desc["T"] = tf.data_type(y_) (tf.execute(desc))[1] end + function equal(x_, y_; name=nothing) + if tf.eager_mode + equal_eager(x_, y_; name=name) + else + equal_graph(x_, y_; name=name) + end + end end @@ -20847,7 +24480,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function iterator_from_string_handle(string_handle_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function iterator_from_string_handle_graph(string_handle_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "IteratorFromStringHandle") do desc = tf.NodeDescription("IteratorFromStringHandle") @@ -20862,7 +24495,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function iterator_from_string_handle(string_handle_::tf.TensorHandle; name=nothing, output_types=nothing, output_shapes=nothing) + function iterator_from_string_handle_eager(string_handle_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("IteratorFromStringHandle") tf.add_input(desc, string_handle_) if output_types !== nothing @@ -20873,6 +24506,13 @@ begin end (tf.execute(desc))[1] end + function iterator_from_string_handle(string_handle_; name=nothing, output_types=nothing, output_shapes=nothing) + if tf.eager_mode + iterator_from_string_handle_eager(string_handle_; name=name, output_types=output_types, output_shapes=output_shapes) + else + iterator_from_string_handle_graph(string_handle_; name=name, output_types=output_types, output_shapes=output_shapes) + end + end end @@ -20882,7 +24522,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tensor_list_split(tensor_, element_shape_, lengths_; name=nothing, element_dtype=nothing, shape_type=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tensor_list_split_graph(tensor_, element_shape_, lengths_; name=nothing, element_dtype=nothing, shape_type=nothing) local desc tf.with_op_name(name, "TensorListSplit") do desc = tf.NodeDescription("TensorListSplit") @@ -20903,7 +24543,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function tensor_list_split(tensor_::tf.TensorHandle, element_shape_::tf.TensorHandle, lengths_::tf.TensorHandle; name=nothing, element_dtype=nothing, shape_type=nothing) + function tensor_list_split_eager(tensor_, element_shape_, lengths_; name=nothing, element_dtype=nothing, shape_type=nothing) desc = tf.EagerOp("TensorListSplit") tf.add_input(desc, tensor_) tf.add_input(desc, element_shape_) @@ -20918,6 +24558,13 @@ begin desc["shape_type"] = tf.data_type(element_shape_) (tf.execute(desc))[1] end + function tensor_list_split(tensor_, element_shape_, lengths_; name=nothing, element_dtype=nothing, shape_type=nothing) + if tf.eager_mode + tensor_list_split_eager(tensor_, element_shape_, lengths_; name=name, element_dtype=element_dtype, shape_type=shape_type) + else + tensor_list_split_graph(tensor_, element_shape_, lengths_; name=name, element_dtype=element_dtype, shape_type=shape_type) + end + end end @@ -20927,7 +24574,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function fractional_max_pool(value_; name=nothing, pooling_ratio=nothing, pseudo_random=nothing, overlapping=nothing, deterministic=nothing, seed=nothing, seed2=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function fractional_max_pool_graph(value_; name=nothing, pooling_ratio=nothing, pseudo_random=nothing, overlapping=nothing, deterministic=nothing, seed=nothing, seed2=nothing) local desc tf.with_op_name(name, "FractionalMaxPool") do desc = tf.NodeDescription("FractionalMaxPool") @@ -20960,7 +24607,7 @@ begin end out end - function fractional_max_pool(value_::tf.TensorHandle; name=nothing, pooling_ratio=nothing, pseudo_random=nothing, overlapping=nothing, deterministic=nothing, seed=nothing, seed2=nothing) + function fractional_max_pool_eager(value_; name=nothing, pooling_ratio=nothing, pseudo_random=nothing, overlapping=nothing, deterministic=nothing, seed=nothing, seed2=nothing) desc = tf.EagerOp("FractionalMaxPool") tf.add_input(desc, value_) if pooling_ratio !== nothing @@ -20984,6 +24631,13 @@ begin desc["T"] = tf.data_type(value_) tf.execute(desc) end + function fractional_max_pool(value_; name=nothing, pooling_ratio=nothing, pseudo_random=nothing, overlapping=nothing, deterministic=nothing, seed=nothing, seed2=nothing) + if tf.eager_mode + fractional_max_pool_eager(value_; name=name, pooling_ratio=pooling_ratio, pseudo_random=pseudo_random, overlapping=overlapping, deterministic=deterministic, seed=seed, seed2=seed2) + else + fractional_max_pool_graph(value_; name=name, pooling_ratio=pooling_ratio, pseudo_random=pseudo_random, overlapping=overlapping, deterministic=deterministic, seed=seed, seed2=seed2) + end + end end @@ -20993,7 +24647,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function scatter_nd(indices_, updates_, shape_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function scatter_nd_graph(indices_, updates_, shape_; name=nothing) local desc tf.with_op_name(name, "ScatterNd") do desc = tf.NodeDescription("ScatterNd") @@ -21009,7 +24663,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function scatter_nd(indices_::tf.TensorHandle, updates_::tf.TensorHandle, shape_::tf.TensorHandle; name=nothing) + function scatter_nd_eager(indices_, updates_, shape_; name=nothing) desc = tf.EagerOp("ScatterNd") tf.add_input(desc, indices_) tf.add_input(desc, updates_) @@ -21019,6 +24673,13 @@ begin desc["Tindices"] = tf.data_type(shape_) (tf.execute(desc))[1] end + function scatter_nd(indices_, updates_, shape_; name=nothing) + if tf.eager_mode + scatter_nd_eager(indices_, updates_, shape_; name=name) + else + scatter_nd_graph(indices_, updates_, shape_; name=name) + end + end end @@ -21028,7 +24689,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tensor_list_scatter_into_existing_list(input_handle_, tensor_, indices_; name=nothing, element_dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tensor_list_scatter_into_existing_list_graph(input_handle_, tensor_, indices_; name=nothing, element_dtype=nothing) local desc tf.with_op_name(name, "TensorListScatterIntoExistingList") do desc = tf.NodeDescription("TensorListScatterIntoExistingList") @@ -21045,7 +24706,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function tensor_list_scatter_into_existing_list(input_handle_::tf.TensorHandle, tensor_::tf.TensorHandle, indices_::tf.TensorHandle; name=nothing, element_dtype=nothing) + function tensor_list_scatter_into_existing_list_eager(input_handle_, tensor_, indices_; name=nothing, element_dtype=nothing) desc = tf.EagerOp("TensorListScatterIntoExistingList") tf.add_input(desc, input_handle_) tf.add_input(desc, tensor_) @@ -21056,6 +24717,13 @@ begin desc["element_dtype"] = tf.data_type(tensor_) (tf.execute(desc))[1] end + function tensor_list_scatter_into_existing_list(input_handle_, tensor_, indices_; name=nothing, element_dtype=nothing) + if tf.eager_mode + tensor_list_scatter_into_existing_list_eager(input_handle_, tensor_, indices_; name=name, element_dtype=element_dtype) + else + tensor_list_scatter_into_existing_list_graph(input_handle_, tensor_, indices_; name=name, element_dtype=element_dtype) + end + end end @@ -21065,7 +24733,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function select(condition_, t_, e_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function select_graph(condition_, t_, e_; name=nothing) local desc tf.with_op_name(name, "Select") do desc = tf.NodeDescription("Select") @@ -21079,7 +24747,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function select(condition_::tf.TensorHandle, t_::tf.TensorHandle, e_::tf.TensorHandle; name=nothing) + function select_eager(condition_, t_, e_; name=nothing) desc = tf.EagerOp("Select") tf.add_input(desc, condition_) tf.add_input(desc, t_) @@ -21088,6 +24756,13 @@ begin desc["T"] = tf.data_type(e_) (tf.execute(desc))[1] end + function select(condition_, t_, e_; name=nothing) + if tf.eager_mode + select_eager(condition_, t_, e_; name=name) + else + select_graph(condition_, t_, e_; name=name) + end + end end @@ -21097,7 +24772,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function min(input_, reduction_indices_; name=nothing, keep_dims=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function min_graph(input_, reduction_indices_; name=nothing, keep_dims=nothing) local desc tf.with_op_name(name, "Min") do desc = tf.NodeDescription("Min") @@ -21114,7 +24789,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function min(input_::tf.TensorHandle, reduction_indices_::tf.TensorHandle; name=nothing, keep_dims=nothing) + function min_eager(input_, reduction_indices_; name=nothing, keep_dims=nothing) desc = tf.EagerOp("Min") tf.add_input(desc, input_) tf.add_input(desc, reduction_indices_) @@ -21125,6 +24800,13 @@ begin desc["Tidx"] = tf.data_type(reduction_indices_) (tf.execute(desc))[1] end + function min(input_, reduction_indices_; name=nothing, keep_dims=nothing) + if tf.eager_mode + min_eager(input_, reduction_indices_; name=name, keep_dims=keep_dims) + else + min_graph(input_, reduction_indices_; name=name, keep_dims=keep_dims) + end + end end @@ -21134,7 +24816,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function lrn_grad(input_grads_, input_image_, output_image_; name=nothing, depth_radius=nothing, bias=nothing, alpha=nothing, beta=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function lrn_grad_graph(input_grads_, input_image_, output_image_; name=nothing, depth_radius=nothing, bias=nothing, alpha=nothing, beta=nothing) local desc tf.with_op_name(name, "LRNGrad") do desc = tf.NodeDescription("LRNGrad") @@ -21160,7 +24842,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function lrn_grad(input_grads_::tf.TensorHandle, input_image_::tf.TensorHandle, output_image_::tf.TensorHandle; name=nothing, depth_radius=nothing, bias=nothing, alpha=nothing, beta=nothing) + function lrn_grad_eager(input_grads_, input_image_, output_image_; name=nothing, depth_radius=nothing, bias=nothing, alpha=nothing, beta=nothing) desc = tf.EagerOp("LRNGrad") tf.add_input(desc, input_grads_) tf.add_input(desc, input_image_) @@ -21182,6 +24864,13 @@ begin desc["T"] = tf.data_type(output_image_) (tf.execute(desc))[1] end + function lrn_grad(input_grads_, input_image_, output_image_; name=nothing, depth_radius=nothing, bias=nothing, alpha=nothing, beta=nothing) + if tf.eager_mode + lrn_grad_eager(input_grads_, input_image_, output_image_; name=name, depth_radius=depth_radius, bias=bias, alpha=alpha, beta=beta) + else + lrn_grad_graph(input_grads_, input_image_, output_image_; name=name, depth_radius=depth_radius, bias=bias, alpha=alpha, beta=beta) + end + end end @@ -21191,7 +24880,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function random_poisson_v2(shape_, rate_; name=nothing, seed=nothing, seed2=nothing, S=nothing, R=nothing, dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function random_poisson_v2_graph(shape_, rate_; name=nothing, seed=nothing, seed2=nothing, S=nothing, R=nothing, dtype=nothing) local desc tf.with_op_name(name, "RandomPoissonV2") do desc = tf.NodeDescription("RandomPoissonV2") @@ -21219,7 +24908,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function random_poisson_v2(shape_::tf.TensorHandle, rate_::tf.TensorHandle; name=nothing, seed=nothing, seed2=nothing, S=nothing, R=nothing, dtype=nothing) + function random_poisson_v2_eager(shape_, rate_; name=nothing, seed=nothing, seed2=nothing, S=nothing, R=nothing, dtype=nothing) desc = tf.EagerOp("RandomPoissonV2") tf.add_input(desc, shape_) tf.add_input(desc, rate_) @@ -21242,6 +24931,13 @@ begin desc["R"] = tf.data_type(rate_) (tf.execute(desc))[1] end + function random_poisson_v2(shape_, rate_; name=nothing, seed=nothing, seed2=nothing, S=nothing, R=nothing, dtype=nothing) + if tf.eager_mode + random_poisson_v2_eager(shape_, rate_; name=name, seed=seed, seed2=seed2, S=S, R=R, dtype=dtype) + else + random_poisson_v2_graph(shape_, rate_; name=name, seed=seed, seed2=seed2, S=S, R=R, dtype=dtype) + end + end end @@ -21251,7 +24947,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function fifo_queue(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function fifo_queue_graph(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "FIFOQueue") do desc = tf.NodeDescription("FIFOQueue") @@ -21273,7 +24969,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function fifo_queue(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) + function fifo_queue_eager(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) desc = tf.EagerOp("FIFOQueue") if component_types !== nothing desc["component_types"] = map(Base.identity, component_types) @@ -21292,6 +24988,13 @@ begin end (tf.execute(desc))[1] end + function fifo_queue(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) + if tf.eager_mode + fifo_queue_eager(; name=name, component_types=component_types, shapes=shapes, capacity=capacity, container=container, shared_name=shared_name) + else + fifo_queue_graph(; name=name, component_types=component_types, shapes=shapes, capacity=capacity, container=container, shared_name=shared_name) + end + end end @@ -21301,7 +25004,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function resource_sparse_apply_proximal_gradient_descent(var_, alpha_, l1_, l2_, grad_, indices_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function resource_sparse_apply_proximal_gradient_descent_graph(var_, alpha_, l1_, l2_, grad_, indices_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ResourceSparseApplyProximalGradientDescent") do desc = tf.NodeDescription("ResourceSparseApplyProximalGradientDescent") @@ -21326,7 +25029,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function resource_sparse_apply_proximal_gradient_descent(var_::tf.TensorHandle, alpha_::tf.TensorHandle, l1_::tf.TensorHandle, l2_::tf.TensorHandle, grad_::tf.TensorHandle, indices_::tf.TensorHandle; name=nothing, use_locking=nothing) + function resource_sparse_apply_proximal_gradient_descent_eager(var_, alpha_, l1_, l2_, grad_, indices_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ResourceSparseApplyProximalGradientDescent") tf.add_input(desc, var_) tf.add_input(desc, alpha_) @@ -21344,6 +25047,13 @@ begin desc["Tindices"] = tf.data_type(indices_) (tf.execute(desc))[1] end + function resource_sparse_apply_proximal_gradient_descent(var_, alpha_, l1_, l2_, grad_, indices_; name=nothing, use_locking=nothing) + if tf.eager_mode + resource_sparse_apply_proximal_gradient_descent_eager(var_, alpha_, l1_, l2_, grad_, indices_; name=name, use_locking=use_locking) + else + resource_sparse_apply_proximal_gradient_descent_graph(var_, alpha_, l1_, l2_, grad_, indices_; name=name, use_locking=use_locking) + end + end end @@ -21353,7 +25063,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function experimental_non_serializable_dataset(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function experimental_non_serializable_dataset_graph(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "ExperimentalNonSerializableDataset") do desc = tf.NodeDescription("ExperimentalNonSerializableDataset") @@ -21368,7 +25078,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function experimental_non_serializable_dataset(input_dataset_::tf.TensorHandle; name=nothing, output_types=nothing, output_shapes=nothing) + function experimental_non_serializable_dataset_eager(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("ExperimentalNonSerializableDataset") tf.add_input(desc, input_dataset_) if output_types !== nothing @@ -21379,6 +25089,13 @@ begin end (tf.execute(desc))[1] end + function experimental_non_serializable_dataset(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) + if tf.eager_mode + experimental_non_serializable_dataset_eager(input_dataset_; name=name, output_types=output_types, output_shapes=output_shapes) + else + experimental_non_serializable_dataset_graph(input_dataset_; name=name, output_types=output_types, output_shapes=output_shapes) + end + end end @@ -21388,7 +25105,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function experimental_bytes_produced_stats_dataset(input_dataset_, tag_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function experimental_bytes_produced_stats_dataset_graph(input_dataset_, tag_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "ExperimentalBytesProducedStatsDataset") do desc = tf.NodeDescription("ExperimentalBytesProducedStatsDataset") @@ -21405,7 +25122,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function experimental_bytes_produced_stats_dataset(input_dataset_::tf.TensorHandle, tag_::tf.TensorHandle; name=nothing, output_types=nothing, output_shapes=nothing) + function experimental_bytes_produced_stats_dataset_eager(input_dataset_, tag_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("ExperimentalBytesProducedStatsDataset") tf.add_input(desc, input_dataset_) tf.add_input(desc, tag_) @@ -21417,6 +25134,13 @@ begin end (tf.execute(desc))[1] end + function experimental_bytes_produced_stats_dataset(input_dataset_, tag_; name=nothing, output_types=nothing, output_shapes=nothing) + if tf.eager_mode + experimental_bytes_produced_stats_dataset_eager(input_dataset_, tag_; name=name, output_types=output_types, output_shapes=output_shapes) + else + experimental_bytes_produced_stats_dataset_graph(input_dataset_, tag_; name=name, output_types=output_types, output_shapes=output_shapes) + end + end end @@ -21426,7 +25150,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function dilation2d_backprop_filter(input_, filter_, out_backprop_; name=nothing, strides=nothing, rates=nothing, padding=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function dilation2d_backprop_filter_graph(input_, filter_, out_backprop_; name=nothing, strides=nothing, rates=nothing, padding=nothing) local desc tf.with_op_name(name, "Dilation2DBackpropFilter") do desc = tf.NodeDescription("Dilation2DBackpropFilter") @@ -21449,7 +25173,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function dilation2d_backprop_filter(input_::tf.TensorHandle, filter_::tf.TensorHandle, out_backprop_::tf.TensorHandle; name=nothing, strides=nothing, rates=nothing, padding=nothing) + function dilation2d_backprop_filter_eager(input_, filter_, out_backprop_; name=nothing, strides=nothing, rates=nothing, padding=nothing) desc = tf.EagerOp("Dilation2DBackpropFilter") tf.add_input(desc, input_) tf.add_input(desc, filter_) @@ -21468,6 +25192,13 @@ begin desc["T"] = tf.data_type(out_backprop_) (tf.execute(desc))[1] end + function dilation2d_backprop_filter(input_, filter_, out_backprop_; name=nothing, strides=nothing, rates=nothing, padding=nothing) + if tf.eager_mode + dilation2d_backprop_filter_eager(input_, filter_, out_backprop_; name=name, strides=strides, rates=rates, padding=padding) + else + dilation2d_backprop_filter_graph(input_, filter_, out_backprop_; name=name, strides=strides, rates=rates, padding=padding) + end + end end @@ -21477,7 +25208,7 @@ end output = cond ? then_branch(input) : else_branch(input) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function _if(cond_, input_; name=nothing, Tin=nothing, Tout=nothing, then_branch=nothing, else_branch=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function _if_graph(cond_, input_; name=nothing, Tin=nothing, Tout=nothing, then_branch=nothing, else_branch=nothing) local desc tf.with_op_name(name, "_If") do desc = tf.NodeDescription("_If") @@ -21501,7 +25232,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function _if(cond_::tf.TensorHandle, input_::tf.TensorHandle; name=nothing, Tin=nothing, Tout=nothing, then_branch=nothing, else_branch=nothing) + function _if_eager(cond_, input_; name=nothing, Tin=nothing, Tout=nothing, then_branch=nothing, else_branch=nothing) desc = tf.EagerOp("_If") tf.add_input(desc, cond_) tf.add_input(desc, input_) @@ -21520,16 +25251,23 @@ begin desc["Tcond"] = tf.data_type(cond_) (tf.execute(desc))[1] end + function _if(cond_, input_; name=nothing, Tin=nothing, Tout=nothing, then_branch=nothing, else_branch=nothing) + if tf.eager_mode + _if_eager(cond_, input_; name=name, Tin=Tin, Tout=Tout, then_branch=then_branch, else_branch=else_branch) + else + _if_graph(cond_, input_; name=name, Tin=Tin, Tout=Tout, then_branch=then_branch, else_branch=else_branch) + end + end end """ - bias_add_grad(out_backprop; data_format=NHWC) + bias_add_grad(out_backprop; data_format=) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function bias_add_grad(out_backprop_; name=nothing, data_format=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function bias_add_grad_graph(out_backprop_; name=nothing, data_format=nothing) local desc tf.with_op_name(name, "BiasAddGrad") do desc = tf.NodeDescription("BiasAddGrad") @@ -21542,7 +25280,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function bias_add_grad(out_backprop_::tf.TensorHandle; name=nothing, data_format=nothing) + function bias_add_grad_eager(out_backprop_; name=nothing, data_format=nothing) desc = tf.EagerOp("BiasAddGrad") tf.add_input(desc, out_backprop_) if data_format !== nothing @@ -21551,6 +25289,13 @@ begin desc["T"] = tf.data_type(out_backprop_) (tf.execute(desc))[1] end + function bias_add_grad(out_backprop_; name=nothing, data_format=nothing) + if tf.eager_mode + bias_add_grad_eager(out_backprop_; name=name, data_format=data_format) + else + bias_add_grad_graph(out_backprop_; name=name, data_format=data_format) + end + end end @@ -21560,7 +25305,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function reader_serialize_state_v2(reader_handle_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function reader_serialize_state_v2_graph(reader_handle_; name=nothing) local desc tf.with_op_name(name, "ReaderSerializeStateV2") do desc = tf.NodeDescription("ReaderSerializeStateV2") @@ -21569,11 +25314,18 @@ begin end tf.Tensor(tf.Operation(desc)) end - function reader_serialize_state_v2(reader_handle_::tf.TensorHandle; name=nothing) + function reader_serialize_state_v2_eager(reader_handle_; name=nothing) desc = tf.EagerOp("ReaderSerializeStateV2") tf.add_input(desc, reader_handle_) (tf.execute(desc))[1] end + function reader_serialize_state_v2(reader_handle_; name=nothing) + if tf.eager_mode + reader_serialize_state_v2_eager(reader_handle_; name=name) + else + reader_serialize_state_v2_graph(reader_handle_; name=name) + end + end end @@ -21583,7 +25335,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function wrap_dataset_variant(input_handle_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function wrap_dataset_variant_graph(input_handle_; name=nothing) local desc tf.with_op_name(name, "WrapDatasetVariant") do desc = tf.NodeDescription("WrapDatasetVariant") @@ -21592,11 +25344,18 @@ begin end tf.Tensor(tf.Operation(desc)) end - function wrap_dataset_variant(input_handle_::tf.TensorHandle; name=nothing) + function wrap_dataset_variant_eager(input_handle_; name=nothing) desc = tf.EagerOp("WrapDatasetVariant") tf.add_input(desc, input_handle_) (tf.execute(desc))[1] end + function wrap_dataset_variant(input_handle_; name=nothing) + if tf.eager_mode + wrap_dataset_variant_eager(input_handle_; name=name) + else + wrap_dataset_variant_graph(input_handle_; name=name) + end + end end @@ -21606,7 +25365,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function parallel_interleave_dataset_v2(input_dataset_, other_arguments_, cycle_length_, block_length_, num_parallel_calls_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, sloppy=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function parallel_interleave_dataset_v2_graph(input_dataset_, other_arguments_, cycle_length_, block_length_, num_parallel_calls_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, sloppy=nothing) local desc tf.with_op_name(name, "ParallelInterleaveDatasetV2") do desc = tf.NodeDescription("ParallelInterleaveDatasetV2") @@ -21638,7 +25397,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function parallel_interleave_dataset_v2(input_dataset_::tf.TensorHandle, other_arguments_::tf.TensorHandle, cycle_length_::tf.TensorHandle, block_length_::tf.TensorHandle, num_parallel_calls_::tf.TensorHandle; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, sloppy=nothing) + function parallel_interleave_dataset_v2_eager(input_dataset_, other_arguments_, cycle_length_, block_length_, num_parallel_calls_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, sloppy=nothing) desc = tf.EagerOp("ParallelInterleaveDatasetV2") tf.add_input(desc, input_dataset_) tf.add_input(desc, other_arguments_) @@ -21662,16 +25421,23 @@ begin end (tf.execute(desc))[1] end + function parallel_interleave_dataset_v2(input_dataset_, other_arguments_, cycle_length_, block_length_, num_parallel_calls_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, sloppy=nothing) + if tf.eager_mode + parallel_interleave_dataset_v2_eager(input_dataset_, other_arguments_, cycle_length_, block_length_, num_parallel_calls_; name=name, f=f, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes, sloppy=sloppy) + else + parallel_interleave_dataset_v2_graph(input_dataset_, other_arguments_, cycle_length_, block_length_, num_parallel_calls_; name=name, f=f, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes, sloppy=sloppy) + end + end end """ - depthwise_conv2d_native_backprop_input(input_sizes, filter, out_backprop; data_format=NHWC, dilations=[1, 1, 1, 1]) + depthwise_conv2d_native_backprop_input(input_sizes, filter, out_backprop; data_format=, dilations=[1, 1, 1, 1]) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function depthwise_conv2d_native_backprop_input(input_sizes_, filter_, out_backprop_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function depthwise_conv2d_native_backprop_input_graph(input_sizes_, filter_, out_backprop_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) local desc tf.with_op_name(name, "DepthwiseConv2dNativeBackpropInput") do desc = tf.NodeDescription("DepthwiseConv2dNativeBackpropInput") @@ -21697,7 +25463,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function depthwise_conv2d_native_backprop_input(input_sizes_::tf.TensorHandle, filter_::tf.TensorHandle, out_backprop_::tf.TensorHandle; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) + function depthwise_conv2d_native_backprop_input_eager(input_sizes_, filter_, out_backprop_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) desc = tf.EagerOp("DepthwiseConv2dNativeBackpropInput") tf.add_input(desc, input_sizes_) tf.add_input(desc, filter_) @@ -21718,6 +25484,13 @@ begin desc["T"] = tf.data_type(out_backprop_) (tf.execute(desc))[1] end + function depthwise_conv2d_native_backprop_input(input_sizes_, filter_, out_backprop_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) + if tf.eager_mode + depthwise_conv2d_native_backprop_input_eager(input_sizes_, filter_, out_backprop_; name=name, strides=strides, padding=padding, data_format=data_format, dilations=dilations) + else + depthwise_conv2d_native_backprop_input_graph(input_sizes_, filter_, out_backprop_; name=name, strides=strides, padding=padding, data_format=data_format, dilations=dilations) + end + end end @@ -21727,7 +25500,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function resource_apply_rms_prop(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function resource_apply_rms_prop_graph(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ResourceApplyRMSProp") do desc = tf.NodeDescription("ResourceApplyRMSProp") @@ -21754,7 +25527,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function resource_apply_rms_prop(var_::tf.TensorHandle, ms_::tf.TensorHandle, mom_::tf.TensorHandle, lr_::tf.TensorHandle, rho_::tf.TensorHandle, momentum_::tf.TensorHandle, epsilon_::tf.TensorHandle, grad_::tf.TensorHandle; name=nothing, use_locking=nothing) + function resource_apply_rms_prop_eager(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ResourceApplyRMSProp") tf.add_input(desc, var_) tf.add_input(desc, ms_) @@ -21774,6 +25547,13 @@ begin desc["T"] = tf.data_type(grad_) (tf.execute(desc))[1] end + function resource_apply_rms_prop(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=nothing, use_locking=nothing) + if tf.eager_mode + resource_apply_rms_prop_eager(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=name, use_locking=use_locking) + else + resource_apply_rms_prop_graph(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=name, use_locking=use_locking) + end + end end @@ -21783,7 +25563,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function sparse_accumulator_take_gradient(handle_, num_required_; name=nothing, dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function sparse_accumulator_take_gradient_graph(handle_, num_required_; name=nothing, dtype=nothing) local desc tf.with_op_name(name, "SparseAccumulatorTakeGradient") do desc = tf.NodeDescription("SparseAccumulatorTakeGradient") @@ -21802,7 +25582,7 @@ begin end out end - function sparse_accumulator_take_gradient(handle_::tf.TensorHandle, num_required_::tf.TensorHandle; name=nothing, dtype=nothing) + function sparse_accumulator_take_gradient_eager(handle_, num_required_; name=nothing, dtype=nothing) desc = tf.EagerOp("SparseAccumulatorTakeGradient") tf.add_input(desc, handle_) tf.add_input(desc, num_required_) @@ -21811,6 +25591,13 @@ begin end tf.execute(desc) end + function sparse_accumulator_take_gradient(handle_, num_required_; name=nothing, dtype=nothing) + if tf.eager_mode + sparse_accumulator_take_gradient_eager(handle_, num_required_; name=name, dtype=dtype) + else + sparse_accumulator_take_gradient_graph(handle_, num_required_; name=name, dtype=dtype) + end + end end @@ -21820,7 +25607,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function experimental_lmdb_dataset(filenames_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function experimental_lmdb_dataset_graph(filenames_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "ExperimentalLMDBDataset") do desc = tf.NodeDescription("ExperimentalLMDBDataset") @@ -21835,7 +25622,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function experimental_lmdb_dataset(filenames_::tf.TensorHandle; name=nothing, output_types=nothing, output_shapes=nothing) + function experimental_lmdb_dataset_eager(filenames_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("ExperimentalLMDBDataset") tf.add_input(desc, filenames_) if output_types !== nothing @@ -21846,6 +25633,13 @@ begin end (tf.execute(desc))[1] end + function experimental_lmdb_dataset(filenames_; name=nothing, output_types=nothing, output_shapes=nothing) + if tf.eager_mode + experimental_lmdb_dataset_eager(filenames_; name=name, output_types=output_types, output_shapes=output_shapes) + else + experimental_lmdb_dataset_graph(filenames_; name=name, output_types=output_types, output_shapes=output_shapes) + end + end end @@ -21855,7 +25649,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function stack_close_v2(handle_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function stack_close_v2_graph(handle_; name=nothing) local desc tf.with_op_name(name, "StackCloseV2") do desc = tf.NodeDescription("StackCloseV2") @@ -21864,11 +25658,18 @@ begin end tf.Tensor(tf.Operation(desc)) end - function stack_close_v2(handle_::tf.TensorHandle; name=nothing) + function stack_close_v2_eager(handle_; name=nothing) desc = tf.EagerOp("StackCloseV2") tf.add_input(desc, handle_) (tf.execute(desc))[1] end + function stack_close_v2(handle_; name=nothing) + if tf.eager_mode + stack_close_v2_eager(handle_; name=name) + else + stack_close_v2_graph(handle_; name=name) + end + end end @@ -21878,7 +25679,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function map_size(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function map_size_graph(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "MapSize") do desc = tf.NodeDescription("MapSize") @@ -21900,7 +25701,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function map_size(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + function map_size_eager(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) desc = tf.EagerOp("MapSize") if capacity !== nothing desc["capacity"] = Base.Int(capacity) @@ -21919,6 +25720,13 @@ begin end (tf.execute(desc))[1] end + function map_size(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + if tf.eager_mode + map_size_eager(; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) + else + map_size_graph(; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) + end + end end @@ -21928,7 +25736,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function resource_apply_adagrad_da(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, lr_, l1_, l2_, global_step_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function resource_apply_adagrad_da_graph(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, lr_, l1_, l2_, global_step_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ResourceApplyAdagradDA") do desc = tf.NodeDescription("ResourceApplyAdagradDA") @@ -21955,7 +25763,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function resource_apply_adagrad_da(var_::tf.TensorHandle, gradient_accumulator_::tf.TensorHandle, gradient_squared_accumulator_::tf.TensorHandle, grad_::tf.TensorHandle, lr_::tf.TensorHandle, l1_::tf.TensorHandle, l2_::tf.TensorHandle, global_step_::tf.TensorHandle; name=nothing, use_locking=nothing) + function resource_apply_adagrad_da_eager(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, lr_, l1_, l2_, global_step_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ResourceApplyAdagradDA") tf.add_input(desc, var_) tf.add_input(desc, gradient_accumulator_) @@ -21974,6 +25782,13 @@ begin desc["T"] = tf.data_type(l2_) (tf.execute(desc))[1] end + function resource_apply_adagrad_da(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, lr_, l1_, l2_, global_step_; name=nothing, use_locking=nothing) + if tf.eager_mode + resource_apply_adagrad_da_eager(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, lr_, l1_, l2_, global_step_; name=name, use_locking=use_locking) + else + resource_apply_adagrad_da_graph(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, lr_, l1_, l2_, global_step_; name=name, use_locking=use_locking) + end + end end @@ -21983,7 +25798,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tensor_forest_tree_size(tree_handle_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tensor_forest_tree_size_graph(tree_handle_; name=nothing) local desc tf.with_op_name(name, "TensorForestTreeSize") do desc = tf.NodeDescription("TensorForestTreeSize") @@ -21992,11 +25807,18 @@ begin end tf.Tensor(tf.Operation(desc)) end - function tensor_forest_tree_size(tree_handle_::tf.TensorHandle; name=nothing) + function tensor_forest_tree_size_eager(tree_handle_; name=nothing) desc = tf.EagerOp("TensorForestTreeSize") tf.add_input(desc, tree_handle_) (tf.execute(desc))[1] end + function tensor_forest_tree_size(tree_handle_; name=nothing) + if tf.eager_mode + tensor_forest_tree_size_eager(tree_handle_; name=name) + else + tensor_forest_tree_size_graph(tree_handle_; name=name) + end + end end @@ -22006,7 +25828,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function matrix_diag_part(input_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function matrix_diag_part_graph(input_; name=nothing) local desc tf.with_op_name(name, "MatrixDiagPart") do desc = tf.NodeDescription("MatrixDiagPart") @@ -22016,12 +25838,19 @@ begin end tf.Tensor(tf.Operation(desc)) end - function matrix_diag_part(input_::tf.TensorHandle; name=nothing) + function matrix_diag_part_eager(input_; name=nothing) desc = tf.EagerOp("MatrixDiagPart") tf.add_input(desc, input_) desc["T"] = tf.data_type(input_) (tf.execute(desc))[1] end + function matrix_diag_part(input_; name=nothing) + if tf.eager_mode + matrix_diag_part_eager(input_; name=name) + else + matrix_diag_part_graph(input_; name=name) + end + end end @@ -22031,7 +25860,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function reader_num_work_units_completed_v2(reader_handle_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function reader_num_work_units_completed_v2_graph(reader_handle_; name=nothing) local desc tf.with_op_name(name, "ReaderNumWorkUnitsCompletedV2") do desc = tf.NodeDescription("ReaderNumWorkUnitsCompletedV2") @@ -22040,11 +25869,18 @@ begin end tf.Tensor(tf.Operation(desc)) end - function reader_num_work_units_completed_v2(reader_handle_::tf.TensorHandle; name=nothing) + function reader_num_work_units_completed_v2_eager(reader_handle_; name=nothing) desc = tf.EagerOp("ReaderNumWorkUnitsCompletedV2") tf.add_input(desc, reader_handle_) (tf.execute(desc))[1] end + function reader_num_work_units_completed_v2(reader_handle_; name=nothing) + if tf.eager_mode + reader_num_work_units_completed_v2_eager(reader_handle_; name=name) + else + reader_num_work_units_completed_v2_graph(reader_handle_; name=name) + end + end end @@ -22054,7 +25890,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tensor_array_split_v3(handle_, value_, lengths_, flow_in_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tensor_array_split_v3_graph(handle_, value_, lengths_, flow_in_; name=nothing) local desc tf.with_op_name(name, "TensorArraySplitV3") do desc = tf.NodeDescription("TensorArraySplitV3") @@ -22070,7 +25906,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function tensor_array_split_v3(handle_::tf.TensorHandle, value_::tf.TensorHandle, lengths_::tf.TensorHandle, flow_in_::tf.TensorHandle; name=nothing) + function tensor_array_split_v3_eager(handle_, value_, lengths_, flow_in_; name=nothing) desc = tf.EagerOp("TensorArraySplitV3") tf.add_input(desc, handle_) tf.add_input(desc, value_) @@ -22079,6 +25915,13 @@ begin desc["T"] = tf.data_type(value_) (tf.execute(desc))[1] end + function tensor_array_split_v3(handle_, value_, lengths_, flow_in_; name=nothing) + if tf.eager_mode + tensor_array_split_v3_eager(handle_, value_, lengths_, flow_in_; name=name) + else + tensor_array_split_v3_graph(handle_, value_, lengths_, flow_in_; name=name) + end + end end @@ -22088,7 +25931,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function sparse_to_dense(sparse_indices_, output_shape_, sparse_values_, default_value_; name=nothing, validate_indices=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function sparse_to_dense_graph(sparse_indices_, output_shape_, sparse_values_, default_value_; name=nothing, validate_indices=nothing) local desc tf.with_op_name(name, "SparseToDense") do desc = tf.NodeDescription("SparseToDense") @@ -22110,7 +25953,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function sparse_to_dense(sparse_indices_::tf.TensorHandle, output_shape_::tf.TensorHandle, sparse_values_::tf.TensorHandle, default_value_::tf.TensorHandle; name=nothing, validate_indices=nothing) + function sparse_to_dense_eager(sparse_indices_, output_shape_, sparse_values_, default_value_; name=nothing, validate_indices=nothing) desc = tf.EagerOp("SparseToDense") tf.add_input(desc, sparse_indices_) tf.add_input(desc, output_shape_) @@ -22125,6 +25968,13 @@ begin desc["T"] = tf.data_type(default_value_) (tf.execute(desc))[1] end + function sparse_to_dense(sparse_indices_, output_shape_, sparse_values_, default_value_; name=nothing, validate_indices=nothing) + if tf.eager_mode + sparse_to_dense_eager(sparse_indices_, output_shape_, sparse_values_, default_value_; name=name, validate_indices=validate_indices) + else + sparse_to_dense_graph(sparse_indices_, output_shape_, sparse_values_, default_value_; name=name, validate_indices=validate_indices) + end + end end @@ -22134,7 +25984,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tpu_replicated_input(inputs_; name=nothing, N=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tpu_replicated_input_graph(inputs_; name=nothing, N=nothing) local desc tf.with_op_name(name, "TPUReplicatedInput") do desc = tf.NodeDescription("TPUReplicatedInput") @@ -22147,7 +25997,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function tpu_replicated_input(inputs_::tf.TensorHandle; name=nothing, N=nothing) + function tpu_replicated_input_eager(inputs_; name=nothing, N=nothing) desc = tf.EagerOp("TPUReplicatedInput") tf.add_input(desc, inputs_) if N !== nothing @@ -22156,6 +26006,13 @@ begin desc["T"] = tf.data_type(inputs_) (tf.execute(desc))[1] end + function tpu_replicated_input(inputs_; name=nothing, N=nothing) + if tf.eager_mode + tpu_replicated_input_eager(inputs_; name=name, N=N) + else + tpu_replicated_input_graph(inputs_; name=name, N=N) + end + end end @@ -22165,7 +26022,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function stack_close(handle_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function stack_close_graph(handle_; name=nothing) local desc tf.with_op_name(name, "StackClose") do desc = tf.NodeDescription("StackClose") @@ -22174,11 +26031,18 @@ begin end tf.Tensor(tf.Operation(desc)) end - function stack_close(handle_::tf.TensorHandle; name=nothing) + function stack_close_eager(handle_; name=nothing) desc = tf.EagerOp("StackClose") tf.add_input(desc, handle_) (tf.execute(desc))[1] end + function stack_close(handle_; name=nothing) + if tf.eager_mode + stack_close_eager(handle_; name=name) + else + stack_close_graph(handle_; name=name) + end + end end @@ -22188,7 +26052,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function deserialize_many_sparse(serialized_sparse_; name=nothing, dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function deserialize_many_sparse_graph(serialized_sparse_; name=nothing, dtype=nothing) local desc tf.with_op_name(name, "DeserializeManySparse") do desc = tf.NodeDescription("DeserializeManySparse") @@ -22205,7 +26069,7 @@ begin end out end - function deserialize_many_sparse(serialized_sparse_::tf.TensorHandle; name=nothing, dtype=nothing) + function deserialize_many_sparse_eager(serialized_sparse_; name=nothing, dtype=nothing) desc = tf.EagerOp("DeserializeManySparse") tf.add_input(desc, serialized_sparse_) if dtype !== nothing @@ -22213,6 +26077,13 @@ begin end tf.execute(desc) end + function deserialize_many_sparse(serialized_sparse_; name=nothing, dtype=nothing) + if tf.eager_mode + deserialize_many_sparse_eager(serialized_sparse_; name=name, dtype=dtype) + else + deserialize_many_sparse_graph(serialized_sparse_; name=name, dtype=dtype) + end + end end @@ -22222,7 +26093,7 @@ end Replacement node for NcclReduce. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function _nccl_reduce_recv(input_; name=nothing, reduction=nothing, num_devices=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function _nccl_reduce_recv_graph(input_; name=nothing, reduction=nothing, num_devices=nothing, shared_name=nothing) local desc tf.with_op_name(name, "_NcclReduceRecv") do desc = tf.NodeDescription("_NcclReduceRecv") @@ -22241,7 +26112,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function _nccl_reduce_recv(input_::tf.TensorHandle; name=nothing, reduction=nothing, num_devices=nothing, shared_name=nothing) + function _nccl_reduce_recv_eager(input_; name=nothing, reduction=nothing, num_devices=nothing, shared_name=nothing) desc = tf.EagerOp("_NcclReduceRecv") tf.add_input(desc, input_) if reduction !== nothing @@ -22256,6 +26127,13 @@ begin desc["T"] = tf.data_type(input_) (tf.execute(desc))[1] end + function _nccl_reduce_recv(input_; name=nothing, reduction=nothing, num_devices=nothing, shared_name=nothing) + if tf.eager_mode + _nccl_reduce_recv_eager(input_; name=name, reduction=reduction, num_devices=num_devices, shared_name=shared_name) + else + _nccl_reduce_recv_graph(input_; name=name, reduction=reduction, num_devices=num_devices, shared_name=shared_name) + end + end end @@ -22265,7 +26143,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function mirror_pad_grad(input_, paddings_; name=nothing, mode=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function mirror_pad_grad_graph(input_, paddings_; name=nothing, mode=nothing) local desc tf.with_op_name(name, "MirrorPadGrad") do desc = tf.NodeDescription("MirrorPadGrad") @@ -22281,7 +26159,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function mirror_pad_grad(input_::tf.TensorHandle, paddings_::tf.TensorHandle; name=nothing, mode=nothing) + function mirror_pad_grad_eager(input_, paddings_; name=nothing, mode=nothing) desc = tf.EagerOp("MirrorPadGrad") tf.add_input(desc, input_) tf.add_input(desc, paddings_) @@ -22292,6 +26170,13 @@ begin desc["Tpaddings"] = tf.data_type(paddings_) (tf.execute(desc))[1] end + function mirror_pad_grad(input_, paddings_; name=nothing, mode=nothing) + if tf.eager_mode + mirror_pad_grad_eager(input_, paddings_; name=name, mode=mode) + else + mirror_pad_grad_graph(input_, paddings_; name=name, mode=mode) + end + end end @@ -22301,7 +26186,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function broadcast_args(s0_, s1_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function broadcast_args_graph(s0_, s1_; name=nothing) local desc tf.with_op_name(name, "BroadcastArgs") do desc = tf.NodeDescription("BroadcastArgs") @@ -22313,7 +26198,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function broadcast_args(s0_::tf.TensorHandle, s1_::tf.TensorHandle; name=nothing) + function broadcast_args_eager(s0_, s1_; name=nothing) desc = tf.EagerOp("BroadcastArgs") tf.add_input(desc, s0_) tf.add_input(desc, s1_) @@ -22321,6 +26206,13 @@ begin desc["T"] = tf.data_type(s1_) (tf.execute(desc))[1] end + function broadcast_args(s0_, s1_; name=nothing) + if tf.eager_mode + broadcast_args_eager(s0_, s1_; name=name) + else + broadcast_args_graph(s0_, s1_; name=name) + end + end end @@ -22330,7 +26222,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function stateless_truncated_normal(shape_, seed_; name=nothing, dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function stateless_truncated_normal_graph(shape_, seed_; name=nothing, dtype=nothing) local desc tf.with_op_name(name, "StatelessTruncatedNormal") do desc = tf.NodeDescription("StatelessTruncatedNormal") @@ -22346,7 +26238,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function stateless_truncated_normal(shape_::tf.TensorHandle, seed_::tf.TensorHandle; name=nothing, dtype=nothing) + function stateless_truncated_normal_eager(shape_, seed_; name=nothing, dtype=nothing) desc = tf.EagerOp("StatelessTruncatedNormal") tf.add_input(desc, shape_) tf.add_input(desc, seed_) @@ -22357,6 +26249,13 @@ begin desc["Tseed"] = tf.data_type(seed_) (tf.execute(desc))[1] end + function stateless_truncated_normal(shape_, seed_; name=nothing, dtype=nothing) + if tf.eager_mode + stateless_truncated_normal_eager(shape_, seed_; name=name, dtype=dtype) + else + stateless_truncated_normal_graph(shape_, seed_; name=name, dtype=dtype) + end + end end @@ -22366,7 +26265,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function regex_full_match(input_, pattern_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function regex_full_match_graph(input_, pattern_; name=nothing) local desc tf.with_op_name(name, "RegexFullMatch") do desc = tf.NodeDescription("RegexFullMatch") @@ -22377,12 +26276,19 @@ begin end tf.Tensor(tf.Operation(desc)) end - function regex_full_match(input_::tf.TensorHandle, pattern_::tf.TensorHandle; name=nothing) + function regex_full_match_eager(input_, pattern_; name=nothing) desc = tf.EagerOp("RegexFullMatch") tf.add_input(desc, input_) tf.add_input(desc, pattern_) (tf.execute(desc))[1] end + function regex_full_match(input_, pattern_; name=nothing) + if tf.eager_mode + regex_full_match_eager(input_, pattern_; name=name) + else + regex_full_match_graph(input_, pattern_; name=name) + end + end end @@ -22392,7 +26298,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function unwrap_dataset_variant(input_handle_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function unwrap_dataset_variant_graph(input_handle_; name=nothing) local desc tf.with_op_name(name, "UnwrapDatasetVariant") do desc = tf.NodeDescription("UnwrapDatasetVariant") @@ -22401,11 +26307,18 @@ begin end tf.Tensor(tf.Operation(desc)) end - function unwrap_dataset_variant(input_handle_::tf.TensorHandle; name=nothing) + function unwrap_dataset_variant_eager(input_handle_; name=nothing) desc = tf.EagerOp("UnwrapDatasetVariant") tf.add_input(desc, input_handle_) (tf.execute(desc))[1] end + function unwrap_dataset_variant(input_handle_; name=nothing) + if tf.eager_mode + unwrap_dataset_variant_eager(input_handle_; name=name) + else + unwrap_dataset_variant_graph(input_handle_; name=name) + end + end end @@ -22415,7 +26328,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function empty(shape_; name=nothing, dtype=nothing, init=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function empty_graph(shape_; name=nothing, dtype=nothing, init=nothing) local desc tf.with_op_name(name, "Empty") do desc = tf.NodeDescription("Empty") @@ -22430,7 +26343,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function empty(shape_::tf.TensorHandle; name=nothing, dtype=nothing, init=nothing) + function empty_eager(shape_; name=nothing, dtype=nothing, init=nothing) desc = tf.EagerOp("Empty") tf.add_input(desc, shape_) if dtype !== nothing @@ -22441,6 +26354,13 @@ begin end (tf.execute(desc))[1] end + function empty(shape_; name=nothing, dtype=nothing, init=nothing) + if tf.eager_mode + empty_eager(shape_; name=name, dtype=dtype, init=init) + else + empty_graph(shape_; name=name, dtype=dtype, init=init) + end + end end @@ -22450,7 +26370,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function outfeed_dequeue_tuple(; name=nothing, dtypes=nothing, shapes=nothing, device_ordinal=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function outfeed_dequeue_tuple_graph(; name=nothing, dtypes=nothing, shapes=nothing, device_ordinal=nothing) local desc tf.with_op_name(name, "OutfeedDequeueTuple") do desc = tf.NodeDescription("OutfeedDequeueTuple") @@ -22466,7 +26386,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function outfeed_dequeue_tuple(; name=nothing, dtypes=nothing, shapes=nothing, device_ordinal=nothing) + function outfeed_dequeue_tuple_eager(; name=nothing, dtypes=nothing, shapes=nothing, device_ordinal=nothing) desc = tf.EagerOp("OutfeedDequeueTuple") if dtypes !== nothing desc["dtypes"] = map(Base.identity, dtypes) @@ -22479,6 +26399,13 @@ begin end (tf.execute(desc))[1] end + function outfeed_dequeue_tuple(; name=nothing, dtypes=nothing, shapes=nothing, device_ordinal=nothing) + if tf.eager_mode + outfeed_dequeue_tuple_eager(; name=name, dtypes=dtypes, shapes=shapes, device_ordinal=device_ordinal) + else + outfeed_dequeue_tuple_graph(; name=name, dtypes=dtypes, shapes=shapes, device_ordinal=device_ordinal) + end + end end @@ -22488,7 +26415,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function div(x_, y_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function div_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "Div") do desc = tf.NodeDescription("Div") @@ -22500,7 +26427,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function div(x_::tf.TensorHandle, y_::tf.TensorHandle; name=nothing) + function div_eager(x_, y_; name=nothing) desc = tf.EagerOp("Div") tf.add_input(desc, x_) tf.add_input(desc, y_) @@ -22508,6 +26435,13 @@ begin desc["T"] = tf.data_type(y_) (tf.execute(desc))[1] end + function div(x_, y_; name=nothing) + if tf.eager_mode + div_eager(x_, y_; name=name) + else + div_graph(x_, y_; name=name) + end + end end @@ -22517,7 +26451,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function barrier(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function barrier_graph(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "Barrier") do desc = tf.NodeDescription("Barrier") @@ -22539,7 +26473,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function barrier(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) + function barrier_eager(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) desc = tf.EagerOp("Barrier") if component_types !== nothing desc["component_types"] = map(Base.identity, component_types) @@ -22558,6 +26492,13 @@ begin end (tf.execute(desc))[1] end + function barrier(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) + if tf.eager_mode + barrier_eager(; name=name, component_types=component_types, shapes=shapes, capacity=capacity, container=container, shared_name=shared_name) + else + barrier_graph(; name=name, component_types=component_types, shapes=shapes, capacity=capacity, container=container, shared_name=shared_name) + end + end end @@ -22567,7 +26508,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function truncate_div(x_, y_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function truncate_div_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "TruncateDiv") do desc = tf.NodeDescription("TruncateDiv") @@ -22579,7 +26520,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function truncate_div(x_::tf.TensorHandle, y_::tf.TensorHandle; name=nothing) + function truncate_div_eager(x_, y_; name=nothing) desc = tf.EagerOp("TruncateDiv") tf.add_input(desc, x_) tf.add_input(desc, y_) @@ -22587,16 +26528,23 @@ begin desc["T"] = tf.data_type(y_) (tf.execute(desc))[1] end + function truncate_div(x_, y_; name=nothing) + if tf.eager_mode + truncate_div_eager(x_, y_; name=name) + else + truncate_div_graph(x_, y_; name=name) + end + end end """ - unicode_encode(input_values, input_splits; errors=replace, replacement_char=65533) + unicode_encode(input_values, input_splits; errors=, replacement_char=65533) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function unicode_encode(input_values_, input_splits_; name=nothing, errors=nothing, output_encoding=nothing, replacement_char=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function unicode_encode_graph(input_values_, input_splits_; name=nothing, errors=nothing, output_encoding=nothing, replacement_char=nothing) local desc tf.with_op_name(name, "UnicodeEncode") do desc = tf.NodeDescription("UnicodeEncode") @@ -22616,7 +26564,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function unicode_encode(input_values_::tf.TensorHandle, input_splits_::tf.TensorHandle; name=nothing, errors=nothing, output_encoding=nothing, replacement_char=nothing) + function unicode_encode_eager(input_values_, input_splits_; name=nothing, errors=nothing, output_encoding=nothing, replacement_char=nothing) desc = tf.EagerOp("UnicodeEncode") tf.add_input(desc, input_values_) tf.add_input(desc, input_splits_) @@ -22631,6 +26579,13 @@ begin end (tf.execute(desc))[1] end + function unicode_encode(input_values_, input_splits_; name=nothing, errors=nothing, output_encoding=nothing, replacement_char=nothing) + if tf.eager_mode + unicode_encode_eager(input_values_, input_splits_; name=name, errors=errors, output_encoding=output_encoding, replacement_char=replacement_char) + else + unicode_encode_graph(input_values_, input_splits_; name=name, errors=errors, output_encoding=output_encoding, replacement_char=replacement_char) + end + end end @@ -22640,7 +26595,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function merge_summary(inputs_; name=nothing, N=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function merge_summary_graph(inputs_; name=nothing, N=nothing) local desc tf.with_op_name(name, "MergeSummary") do desc = tf.NodeDescription("MergeSummary") @@ -22652,7 +26607,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function merge_summary(inputs_::tf.TensorHandle; name=nothing, N=nothing) + function merge_summary_eager(inputs_; name=nothing, N=nothing) desc = tf.EagerOp("MergeSummary") tf.add_input(desc, inputs_) if N !== nothing @@ -22660,6 +26615,13 @@ begin end (tf.execute(desc))[1] end + function merge_summary(inputs_; name=nothing, N=nothing) + if tf.eager_mode + merge_summary_eager(inputs_; name=name, N=N) + else + merge_summary_graph(inputs_; name=name, N=N) + end + end end @@ -22669,7 +26631,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function fake_queue(resource_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function fake_queue_graph(resource_; name=nothing) local desc tf.with_op_name(name, "FakeQueue") do desc = tf.NodeDescription("FakeQueue") @@ -22678,11 +26640,18 @@ begin end tf.Tensor(tf.Operation(desc)) end - function fake_queue(resource_::tf.TensorHandle; name=nothing) + function fake_queue_eager(resource_; name=nothing) desc = tf.EagerOp("FakeQueue") tf.add_input(desc, resource_) (tf.execute(desc))[1] end + function fake_queue(resource_; name=nothing) + if tf.eager_mode + fake_queue_eager(resource_; name=name) + else + fake_queue_graph(resource_; name=name) + end + end end @@ -22692,7 +26661,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function batch_cholesky(input_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function batch_cholesky_graph(input_; name=nothing) local desc tf.with_op_name(name, "BatchCholesky") do desc = tf.NodeDescription("BatchCholesky") @@ -22702,12 +26671,19 @@ begin end tf.Tensor(tf.Operation(desc)) end - function batch_cholesky(input_::tf.TensorHandle; name=nothing) + function batch_cholesky_eager(input_; name=nothing) desc = tf.EagerOp("BatchCholesky") tf.add_input(desc, input_) desc["T"] = tf.data_type(input_) (tf.execute(desc))[1] end + function batch_cholesky(input_; name=nothing) + if tf.eager_mode + batch_cholesky_eager(input_; name=name) + else + batch_cholesky_graph(input_; name=name) + end + end end @@ -22717,7 +26693,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function iterator(; name=nothing, shared_name=nothing, container=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function iterator_graph(; name=nothing, shared_name=nothing, container=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "Iterator") do desc = tf.NodeDescription("Iterator") @@ -22736,7 +26712,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function iterator(; name=nothing, shared_name=nothing, container=nothing, output_types=nothing, output_shapes=nothing) + function iterator_eager(; name=nothing, shared_name=nothing, container=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("Iterator") if shared_name !== nothing desc["shared_name"] = Base.String(shared_name) @@ -22752,6 +26728,13 @@ begin end (tf.execute(desc))[1] end + function iterator(; name=nothing, shared_name=nothing, container=nothing, output_types=nothing, output_shapes=nothing) + if tf.eager_mode + iterator_eager(; name=name, shared_name=shared_name, container=container, output_types=output_types, output_shapes=output_shapes) + else + iterator_graph(; name=name, shared_name=shared_name, container=container, output_types=output_types, output_shapes=output_shapes) + end + end end @@ -22761,7 +26744,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function bessel_i1e(x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function bessel_i1e_graph(x_; name=nothing) local desc tf.with_op_name(name, "BesselI1e") do desc = tf.NodeDescription("BesselI1e") @@ -22771,12 +26754,19 @@ begin end tf.Tensor(tf.Operation(desc)) end - function bessel_i1e(x_::tf.TensorHandle; name=nothing) + function bessel_i1e_eager(x_; name=nothing) desc = tf.EagerOp("BesselI1e") tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) (tf.execute(desc))[1] end + function bessel_i1e(x_; name=nothing) + if tf.eager_mode + bessel_i1e_eager(x_; name=name) + else + bessel_i1e_graph(x_; name=name) + end + end end @@ -22786,7 +26776,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function import_event(writer_, event_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function import_event_graph(writer_, event_; name=nothing) local desc tf.with_op_name(name, "ImportEvent") do desc = tf.NodeDescription("ImportEvent") @@ -22797,12 +26787,19 @@ begin end tf.Tensor(tf.Operation(desc)) end - function import_event(writer_::tf.TensorHandle, event_::tf.TensorHandle; name=nothing) + function import_event_eager(writer_, event_; name=nothing) desc = tf.EagerOp("ImportEvent") tf.add_input(desc, writer_) tf.add_input(desc, event_) (tf.execute(desc))[1] end + function import_event(writer_, event_; name=nothing) + if tf.eager_mode + import_event_eager(writer_, event_; name=name) + else + import_event_graph(writer_, event_; name=name) + end + end end @@ -22812,7 +26809,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function quantized_instance_norm(x_, x_min_, x_max_; name=nothing, output_range_given=nothing, given_y_min=nothing, given_y_max=nothing, variance_epsilon=nothing, min_separation=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function quantized_instance_norm_graph(x_, x_min_, x_max_; name=nothing, output_range_given=nothing, given_y_min=nothing, given_y_max=nothing, variance_epsilon=nothing, min_separation=nothing) local desc tf.with_op_name(name, "QuantizedInstanceNorm") do desc = tf.NodeDescription("QuantizedInstanceNorm") @@ -22846,7 +26843,7 @@ begin end out end - function quantized_instance_norm(x_::tf.TensorHandle, x_min_::tf.TensorHandle, x_max_::tf.TensorHandle; name=nothing, output_range_given=nothing, given_y_min=nothing, given_y_max=nothing, variance_epsilon=nothing, min_separation=nothing) + function quantized_instance_norm_eager(x_, x_min_, x_max_; name=nothing, output_range_given=nothing, given_y_min=nothing, given_y_max=nothing, variance_epsilon=nothing, min_separation=nothing) desc = tf.EagerOp("QuantizedInstanceNorm") tf.add_input(desc, x_) tf.add_input(desc, x_min_) @@ -22869,6 +26866,13 @@ begin desc["T"] = tf.data_type(x_) tf.execute(desc) end + function quantized_instance_norm(x_, x_min_, x_max_; name=nothing, output_range_given=nothing, given_y_min=nothing, given_y_max=nothing, variance_epsilon=nothing, min_separation=nothing) + if tf.eager_mode + quantized_instance_norm_eager(x_, x_min_, x_max_; name=name, output_range_given=output_range_given, given_y_min=given_y_min, given_y_max=given_y_max, variance_epsilon=variance_epsilon, min_separation=min_separation) + else + quantized_instance_norm_graph(x_, x_min_, x_max_; name=name, output_range_given=output_range_given, given_y_min=given_y_min, given_y_max=given_y_max, variance_epsilon=variance_epsilon, min_separation=min_separation) + end + end end @@ -22878,7 +26882,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function load_tpu_embedding_adagrad_parameters(parameters_, accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function load_tpu_embedding_adagrad_parameters_graph(parameters_, accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "LoadTPUEmbeddingAdagradParameters") do desc = tf.NodeDescription("LoadTPUEmbeddingAdagradParameters") @@ -22901,7 +26905,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function load_tpu_embedding_adagrad_parameters(parameters_::tf.TensorHandle, accumulators_::tf.TensorHandle; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + function load_tpu_embedding_adagrad_parameters_eager(parameters_, accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) desc = tf.EagerOp("LoadTPUEmbeddingAdagradParameters") tf.add_input(desc, parameters_) tf.add_input(desc, accumulators_) @@ -22919,6 +26923,13 @@ begin end (tf.execute(desc))[1] end + function load_tpu_embedding_adagrad_parameters(parameters_, accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + if tf.eager_mode + load_tpu_embedding_adagrad_parameters_eager(parameters_, accumulators_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + else + load_tpu_embedding_adagrad_parameters_graph(parameters_, accumulators_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + end + end end @@ -22928,7 +26939,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tensor_array_write_v3(handle_, index_, value_, flow_in_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tensor_array_write_v3_graph(handle_, index_, value_, flow_in_; name=nothing) local desc tf.with_op_name(name, "TensorArrayWriteV3") do desc = tf.NodeDescription("TensorArrayWriteV3") @@ -22944,7 +26955,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function tensor_array_write_v3(handle_::tf.TensorHandle, index_::tf.TensorHandle, value_::tf.TensorHandle, flow_in_::tf.TensorHandle; name=nothing) + function tensor_array_write_v3_eager(handle_, index_, value_, flow_in_; name=nothing) desc = tf.EagerOp("TensorArrayWriteV3") tf.add_input(desc, handle_) tf.add_input(desc, index_) @@ -22953,6 +26964,13 @@ begin desc["T"] = tf.data_type(value_) (tf.execute(desc))[1] end + function tensor_array_write_v3(handle_, index_, value_, flow_in_; name=nothing) + if tf.eager_mode + tensor_array_write_v3_eager(handle_, index_, value_, flow_in_; name=name) + else + tensor_array_write_v3_graph(handle_, index_, value_, flow_in_; name=name) + end + end end @@ -22962,7 +26980,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function dense_to_dense_set_operation(set1_, set2_; name=nothing, set_operation=nothing, validate_indices=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function dense_to_dense_set_operation_graph(set1_, set2_; name=nothing, set_operation=nothing, validate_indices=nothing) local desc tf.with_op_name(name, "DenseToDenseSetOperation") do desc = tf.NodeDescription("DenseToDenseSetOperation") @@ -22985,7 +27003,7 @@ begin end out end - function dense_to_dense_set_operation(set1_::tf.TensorHandle, set2_::tf.TensorHandle; name=nothing, set_operation=nothing, validate_indices=nothing) + function dense_to_dense_set_operation_eager(set1_, set2_; name=nothing, set_operation=nothing, validate_indices=nothing) desc = tf.EagerOp("DenseToDenseSetOperation") tf.add_input(desc, set1_) tf.add_input(desc, set2_) @@ -22999,16 +27017,23 @@ begin desc["T"] = tf.data_type(set2_) tf.execute(desc) end + function dense_to_dense_set_operation(set1_, set2_; name=nothing, set_operation=nothing, validate_indices=nothing) + if tf.eager_mode + dense_to_dense_set_operation_eager(set1_, set2_; name=name, set_operation=set_operation, validate_indices=validate_indices) + else + dense_to_dense_set_operation_graph(set1_, set2_; name=name, set_operation=set_operation, validate_indices=validate_indices) + end + end end """ - encode_jpeg(image; format=, quality=95, progressive=false, optimize_size=false, chroma_downsampling=true, density_unit=in, x_density=300, y_density=300, xmp_metadata=) + encode_jpeg(image; format=, quality=95, progressive=false, optimize_size=false, chroma_downsampling=true, density_unit=, x_density=300, y_density=300, xmp_metadata=) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function encode_jpeg(image_; name=nothing, format=nothing, quality=nothing, progressive=nothing, optimize_size=nothing, chroma_downsampling=nothing, density_unit=nothing, x_density=nothing, y_density=nothing, xmp_metadata=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function encode_jpeg_graph(image_; name=nothing, format=nothing, quality=nothing, progressive=nothing, optimize_size=nothing, chroma_downsampling=nothing, density_unit=nothing, x_density=nothing, y_density=nothing, xmp_metadata=nothing) local desc tf.with_op_name(name, "EncodeJpeg") do desc = tf.NodeDescription("EncodeJpeg") @@ -23044,7 +27069,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function encode_jpeg(image_::tf.TensorHandle; name=nothing, format=nothing, quality=nothing, progressive=nothing, optimize_size=nothing, chroma_downsampling=nothing, density_unit=nothing, x_density=nothing, y_density=nothing, xmp_metadata=nothing) + function encode_jpeg_eager(image_; name=nothing, format=nothing, quality=nothing, progressive=nothing, optimize_size=nothing, chroma_downsampling=nothing, density_unit=nothing, x_density=nothing, y_density=nothing, xmp_metadata=nothing) desc = tf.EagerOp("EncodeJpeg") tf.add_input(desc, image_) if format !== nothing @@ -23076,6 +27101,13 @@ begin end (tf.execute(desc))[1] end + function encode_jpeg(image_; name=nothing, format=nothing, quality=nothing, progressive=nothing, optimize_size=nothing, chroma_downsampling=nothing, density_unit=nothing, x_density=nothing, y_density=nothing, xmp_metadata=nothing) + if tf.eager_mode + encode_jpeg_eager(image_; name=name, format=format, quality=quality, progressive=progressive, optimize_size=optimize_size, chroma_downsampling=chroma_downsampling, density_unit=density_unit, x_density=x_density, y_density=y_density, xmp_metadata=xmp_metadata) + else + encode_jpeg_graph(image_; name=name, format=format, quality=quality, progressive=progressive, optimize_size=optimize_size, chroma_downsampling=chroma_downsampling, density_unit=density_unit, x_density=x_density, y_density=y_density, xmp_metadata=xmp_metadata) + end + end end @@ -23085,7 +27117,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function inplace_update(x_, i_, v_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function inplace_update_graph(x_, i_, v_; name=nothing) local desc tf.with_op_name(name, "InplaceUpdate") do desc = tf.NodeDescription("InplaceUpdate") @@ -23099,7 +27131,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function inplace_update(x_::tf.TensorHandle, i_::tf.TensorHandle, v_::tf.TensorHandle; name=nothing) + function inplace_update_eager(x_, i_, v_; name=nothing) desc = tf.EagerOp("InplaceUpdate") tf.add_input(desc, x_) tf.add_input(desc, i_) @@ -23108,6 +27140,13 @@ begin desc["T"] = tf.data_type(v_) (tf.execute(desc))[1] end + function inplace_update(x_, i_, v_; name=nothing) + if tf.eager_mode + inplace_update_eager(x_, i_, v_; name=name) + else + inplace_update_graph(x_, i_, v_; name=name) + end + end end @@ -23117,7 +27156,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function fused_pad_conv2d(input_, paddings_, filter_; name=nothing, mode=nothing, strides=nothing, padding=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function fused_pad_conv2d_graph(input_, paddings_, filter_; name=nothing, mode=nothing, strides=nothing, padding=nothing) local desc tf.with_op_name(name, "FusedPadConv2D") do desc = tf.NodeDescription("FusedPadConv2D") @@ -23140,7 +27179,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function fused_pad_conv2d(input_::tf.TensorHandle, paddings_::tf.TensorHandle, filter_::tf.TensorHandle; name=nothing, mode=nothing, strides=nothing, padding=nothing) + function fused_pad_conv2d_eager(input_, paddings_, filter_; name=nothing, mode=nothing, strides=nothing, padding=nothing) desc = tf.EagerOp("FusedPadConv2D") tf.add_input(desc, input_) tf.add_input(desc, paddings_) @@ -23158,6 +27197,13 @@ begin desc["T"] = tf.data_type(filter_) (tf.execute(desc))[1] end + function fused_pad_conv2d(input_, paddings_, filter_; name=nothing, mode=nothing, strides=nothing, padding=nothing) + if tf.eager_mode + fused_pad_conv2d_eager(input_, paddings_, filter_; name=name, mode=mode, strides=strides, padding=padding) + else + fused_pad_conv2d_graph(input_, paddings_, filter_; name=name, mode=mode, strides=strides, padding=padding) + end + end end @@ -23167,7 +27213,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function quantized_relu(features_, min_features_, max_features_; name=nothing, out_type=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function quantized_relu_graph(features_, min_features_, max_features_; name=nothing, out_type=nothing) local desc tf.with_op_name(name, "QuantizedRelu") do desc = tf.NodeDescription("QuantizedRelu") @@ -23189,7 +27235,7 @@ begin end out end - function quantized_relu(features_::tf.TensorHandle, min_features_::tf.TensorHandle, max_features_::tf.TensorHandle; name=nothing, out_type=nothing) + function quantized_relu_eager(features_, min_features_, max_features_; name=nothing, out_type=nothing) desc = tf.EagerOp("QuantizedRelu") tf.add_input(desc, features_) tf.add_input(desc, min_features_) @@ -23200,6 +27246,13 @@ begin desc["Tinput"] = tf.data_type(features_) tf.execute(desc) end + function quantized_relu(features_, min_features_, max_features_; name=nothing, out_type=nothing) + if tf.eager_mode + quantized_relu_eager(features_, min_features_, max_features_; name=name, out_type=out_type) + else + quantized_relu_graph(features_, min_features_, max_features_; name=name, out_type=out_type) + end + end end @@ -23209,7 +27262,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function gather_nd(params_, indices_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function gather_nd_graph(params_, indices_; name=nothing) local desc tf.with_op_name(name, "GatherNd") do desc = tf.NodeDescription("GatherNd") @@ -23223,7 +27276,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function gather_nd(params_::tf.TensorHandle, indices_::tf.TensorHandle; name=nothing) + function gather_nd_eager(params_, indices_; name=nothing) desc = tf.EagerOp("GatherNd") tf.add_input(desc, params_) tf.add_input(desc, indices_) @@ -23231,6 +27284,13 @@ begin desc["Tindices"] = tf.data_type(indices_) (tf.execute(desc))[1] end + function gather_nd(params_, indices_; name=nothing) + if tf.eager_mode + gather_nd_eager(params_, indices_; name=name) + else + gather_nd_graph(params_, indices_; name=name) + end + end end @@ -23240,7 +27300,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function placeholder(; name=nothing, dtype=nothing, shape=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function placeholder_graph(; name=nothing, dtype=nothing, shape=nothing) local desc tf.with_op_name(name, "Placeholder") do desc = tf.NodeDescription("Placeholder") @@ -23253,7 +27313,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function placeholder(; name=nothing, dtype=nothing, shape=nothing) + function placeholder_eager(; name=nothing, dtype=nothing, shape=nothing) desc = tf.EagerOp("Placeholder") if dtype !== nothing desc["dtype"] = Base.identity(dtype) @@ -23263,6 +27323,13 @@ begin end (tf.execute(desc))[1] end + function placeholder(; name=nothing, dtype=nothing, shape=nothing) + if tf.eager_mode + placeholder_eager(; name=name, dtype=dtype, shape=shape) + else + placeholder_graph(; name=name, dtype=dtype, shape=shape) + end + end end @@ -23272,7 +27339,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function filter_by_last_component_dataset(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function filter_by_last_component_dataset_graph(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "FilterByLastComponentDataset") do desc = tf.NodeDescription("FilterByLastComponentDataset") @@ -23287,7 +27354,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function filter_by_last_component_dataset(input_dataset_::tf.TensorHandle; name=nothing, output_types=nothing, output_shapes=nothing) + function filter_by_last_component_dataset_eager(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("FilterByLastComponentDataset") tf.add_input(desc, input_dataset_) if output_types !== nothing @@ -23298,6 +27365,13 @@ begin end (tf.execute(desc))[1] end + function filter_by_last_component_dataset(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) + if tf.eager_mode + filter_by_last_component_dataset_eager(input_dataset_; name=name, output_types=output_types, output_shapes=output_shapes) + else + filter_by_last_component_dataset_graph(input_dataset_; name=name, output_types=output_types, output_shapes=output_shapes) + end + end end @@ -23307,7 +27381,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function clip_by_value(t_, clip_value_min_, clip_value_max_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function clip_by_value_graph(t_, clip_value_min_, clip_value_max_; name=nothing) local desc tf.with_op_name(name, "ClipByValue") do desc = tf.NodeDescription("ClipByValue") @@ -23321,7 +27395,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function clip_by_value(t_::tf.TensorHandle, clip_value_min_::tf.TensorHandle, clip_value_max_::tf.TensorHandle; name=nothing) + function clip_by_value_eager(t_, clip_value_min_, clip_value_max_; name=nothing) desc = tf.EagerOp("ClipByValue") tf.add_input(desc, t_) tf.add_input(desc, clip_value_min_) @@ -23331,6 +27405,13 @@ begin desc["T"] = tf.data_type(clip_value_max_) (tf.execute(desc))[1] end + function clip_by_value(t_, clip_value_min_, clip_value_max_; name=nothing) + if tf.eager_mode + clip_by_value_eager(t_, clip_value_min_, clip_value_max_; name=name) + else + clip_by_value_graph(t_, clip_value_min_, clip_value_max_; name=name) + end + end end @@ -23340,7 +27421,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function image_summary(tag_, tensor_; name=nothing, max_images=nothing, bad_color=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function image_summary_graph(tag_, tensor_; name=nothing, max_images=nothing, bad_color=nothing) local desc tf.with_op_name(name, "ImageSummary") do desc = tf.NodeDescription("ImageSummary") @@ -23358,7 +27439,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function image_summary(tag_::tf.TensorHandle, tensor_::tf.TensorHandle; name=nothing, max_images=nothing, bad_color=nothing) + function image_summary_eager(tag_, tensor_; name=nothing, max_images=nothing, bad_color=nothing) desc = tf.EagerOp("ImageSummary") tf.add_input(desc, tag_) tf.add_input(desc, tensor_) @@ -23371,6 +27452,13 @@ begin desc["T"] = tf.data_type(tensor_) (tf.execute(desc))[1] end + function image_summary(tag_, tensor_; name=nothing, max_images=nothing, bad_color=nothing) + if tf.eager_mode + image_summary_eager(tag_, tensor_; name=name, max_images=max_images, bad_color=bad_color) + else + image_summary_graph(tag_, tensor_; name=name, max_images=max_images, bad_color=bad_color) + end + end end @@ -23380,7 +27468,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function retrieve_tpu_embedding_adadelta_parameters(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function retrieve_tpu_embedding_adadelta_parameters_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "RetrieveTPUEmbeddingAdadeltaParameters") do desc = tf.NodeDescription("RetrieveTPUEmbeddingAdadeltaParameters") @@ -23404,7 +27492,7 @@ begin end out end - function retrieve_tpu_embedding_adadelta_parameters(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + function retrieve_tpu_embedding_adadelta_parameters_eager(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) desc = tf.EagerOp("RetrieveTPUEmbeddingAdadeltaParameters") if table_id !== nothing desc["table_id"] = Base.Int(table_id) @@ -23420,6 +27508,13 @@ begin end tf.execute(desc) end + function retrieve_tpu_embedding_adadelta_parameters(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + if tf.eager_mode + retrieve_tpu_embedding_adadelta_parameters_eager(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + else + retrieve_tpu_embedding_adadelta_parameters_graph(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + end + end end @@ -23429,7 +27524,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function string_join(inputs_; name=nothing, N=nothing, separator=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function string_join_graph(inputs_; name=nothing, N=nothing, separator=nothing) local desc tf.with_op_name(name, "StringJoin") do desc = tf.NodeDescription("StringJoin") @@ -23444,7 +27539,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function string_join(inputs_::tf.TensorHandle; name=nothing, N=nothing, separator=nothing) + function string_join_eager(inputs_; name=nothing, N=nothing, separator=nothing) desc = tf.EagerOp("StringJoin") tf.add_input(desc, inputs_) if N !== nothing @@ -23455,6 +27550,13 @@ begin end (tf.execute(desc))[1] end + function string_join(inputs_; name=nothing, N=nothing, separator=nothing) + if tf.eager_mode + string_join_eager(inputs_; name=name, N=N, separator=separator) + else + string_join_graph(inputs_; name=name, N=N, separator=separator) + end + end end @@ -23464,7 +27566,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function resource_scatter_nd_add(ref_, indices_, updates_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function resource_scatter_nd_add_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ResourceScatterNdAdd") do desc = tf.NodeDescription("ResourceScatterNdAdd") @@ -23483,7 +27585,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function resource_scatter_nd_add(ref_::tf.TensorHandle, indices_::tf.TensorHandle, updates_::tf.TensorHandle; name=nothing, use_locking=nothing) + function resource_scatter_nd_add_eager(ref_, indices_, updates_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ResourceScatterNdAdd") tf.add_input(desc, ref_) tf.add_input(desc, indices_) @@ -23495,6 +27597,13 @@ begin desc["T"] = tf.data_type(updates_) (tf.execute(desc))[1] end + function resource_scatter_nd_add(ref_, indices_, updates_; name=nothing, use_locking=nothing) + if tf.eager_mode + resource_scatter_nd_add_eager(ref_, indices_, updates_; name=name, use_locking=use_locking) + else + resource_scatter_nd_add_graph(ref_, indices_, updates_; name=name, use_locking=use_locking) + end + end end @@ -23504,7 +27613,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function boosted_trees_quantile_stream_resource_deserialize(quantile_stream_resource_handle_, bucket_boundaries_; name=nothing, num_streams=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function boosted_trees_quantile_stream_resource_deserialize_graph(quantile_stream_resource_handle_, bucket_boundaries_; name=nothing, num_streams=nothing) local desc tf.with_op_name(name, "BoostedTreesQuantileStreamResourceDeserialize") do desc = tf.NodeDescription("BoostedTreesQuantileStreamResourceDeserialize") @@ -23518,7 +27627,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function boosted_trees_quantile_stream_resource_deserialize(quantile_stream_resource_handle_::tf.TensorHandle, bucket_boundaries_::tf.TensorHandle; name=nothing, num_streams=nothing) + function boosted_trees_quantile_stream_resource_deserialize_eager(quantile_stream_resource_handle_, bucket_boundaries_; name=nothing, num_streams=nothing) desc = tf.EagerOp("BoostedTreesQuantileStreamResourceDeserialize") tf.add_input(desc, quantile_stream_resource_handle_) tf.add_input(desc, bucket_boundaries_) @@ -23527,6 +27636,13 @@ begin end (tf.execute(desc))[1] end + function boosted_trees_quantile_stream_resource_deserialize(quantile_stream_resource_handle_, bucket_boundaries_; name=nothing, num_streams=nothing) + if tf.eager_mode + boosted_trees_quantile_stream_resource_deserialize_eager(quantile_stream_resource_handle_, bucket_boundaries_; name=name, num_streams=num_streams) + else + boosted_trees_quantile_stream_resource_deserialize_graph(quantile_stream_resource_handle_, bucket_boundaries_; name=name, num_streams=num_streams) + end + end end @@ -23536,7 +27652,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function left_shift(x_, y_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function left_shift_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "LeftShift") do desc = tf.NodeDescription("LeftShift") @@ -23548,7 +27664,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function left_shift(x_::tf.TensorHandle, y_::tf.TensorHandle; name=nothing) + function left_shift_eager(x_, y_; name=nothing) desc = tf.EagerOp("LeftShift") tf.add_input(desc, x_) tf.add_input(desc, y_) @@ -23556,6 +27672,13 @@ begin desc["T"] = tf.data_type(y_) (tf.execute(desc))[1] end + function left_shift(x_, y_; name=nothing) + if tf.eager_mode + left_shift_eager(x_, y_; name=name) + else + left_shift_graph(x_, y_; name=name) + end + end end @@ -23565,7 +27688,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function requantize_per_channel(input_, input_min_, input_max_, requested_output_min_, requested_output_max_; name=nothing, out_type=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function requantize_per_channel_graph(input_, input_min_, input_max_, requested_output_min_, requested_output_max_; name=nothing, out_type=nothing) local desc tf.with_op_name(name, "RequantizePerChannel") do desc = tf.NodeDescription("RequantizePerChannel") @@ -23591,7 +27714,7 @@ begin end out end - function requantize_per_channel(input_::tf.TensorHandle, input_min_::tf.TensorHandle, input_max_::tf.TensorHandle, requested_output_min_::tf.TensorHandle, requested_output_max_::tf.TensorHandle; name=nothing, out_type=nothing) + function requantize_per_channel_eager(input_, input_min_, input_max_, requested_output_min_, requested_output_max_; name=nothing, out_type=nothing) desc = tf.EagerOp("RequantizePerChannel") tf.add_input(desc, input_) tf.add_input(desc, input_min_) @@ -23604,6 +27727,13 @@ begin desc["T"] = tf.data_type(input_) tf.execute(desc) end + function requantize_per_channel(input_, input_min_, input_max_, requested_output_min_, requested_output_max_; name=nothing, out_type=nothing) + if tf.eager_mode + requantize_per_channel_eager(input_, input_min_, input_max_, requested_output_min_, requested_output_max_; name=name, out_type=out_type) + else + requantize_per_channel_graph(input_, input_min_, input_max_, requested_output_min_, requested_output_max_; name=name, out_type=out_type) + end + end end @@ -23613,7 +27743,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tensor_scatter_add(tensor_, indices_, updates_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tensor_scatter_add_graph(tensor_, indices_, updates_; name=nothing) local desc tf.with_op_name(name, "TensorScatterAdd") do desc = tf.NodeDescription("TensorScatterAdd") @@ -23629,7 +27759,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function tensor_scatter_add(tensor_::tf.TensorHandle, indices_::tf.TensorHandle, updates_::tf.TensorHandle; name=nothing) + function tensor_scatter_add_eager(tensor_, indices_, updates_; name=nothing) desc = tf.EagerOp("TensorScatterAdd") tf.add_input(desc, tensor_) tf.add_input(desc, indices_) @@ -23639,6 +27769,13 @@ begin desc["T"] = tf.data_type(updates_) (tf.execute(desc))[1] end + function tensor_scatter_add(tensor_, indices_, updates_; name=nothing) + if tf.eager_mode + tensor_scatter_add_eager(tensor_, indices_, updates_; name=name) + else + tensor_scatter_add_graph(tensor_, indices_, updates_; name=name) + end + end end @@ -23648,7 +27785,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function _var_handles_op(; name=nothing, containers=nothing, shared_names=nothing, N=nothing, dtypes=nothing, shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function _var_handles_op_graph(; name=nothing, containers=nothing, shared_names=nothing, N=nothing, dtypes=nothing, shapes=nothing) local desc tf.with_op_name(name, "_VarHandlesOp") do desc = tf.NodeDescription("_VarHandlesOp") @@ -23675,7 +27812,7 @@ begin end out end - function _var_handles_op(; name=nothing, containers=nothing, shared_names=nothing, N=nothing, dtypes=nothing, shapes=nothing) + function _var_handles_op_eager(; name=nothing, containers=nothing, shared_names=nothing, N=nothing, dtypes=nothing, shapes=nothing) desc = tf.EagerOp("_VarHandlesOp") if containers !== nothing desc["containers"] = map(Base.identity, containers) @@ -23694,6 +27831,13 @@ begin end tf.execute(desc) end + function _var_handles_op(; name=nothing, containers=nothing, shared_names=nothing, N=nothing, dtypes=nothing, shapes=nothing) + if tf.eager_mode + _var_handles_op_eager(; name=name, containers=containers, shared_names=shared_names, N=N, dtypes=dtypes, shapes=shapes) + else + _var_handles_op_graph(; name=name, containers=containers, shared_names=shared_names, N=N, dtypes=dtypes, shapes=shapes) + end + end end @@ -23703,7 +27847,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function ifft3d(input_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function ifft3d_graph(input_; name=nothing) local desc tf.with_op_name(name, "IFFT3D") do desc = tf.NodeDescription("IFFT3D") @@ -23713,12 +27857,19 @@ begin end tf.Tensor(tf.Operation(desc)) end - function ifft3d(input_::tf.TensorHandle; name=nothing) + function ifft3d_eager(input_; name=nothing) desc = tf.EagerOp("IFFT3D") tf.add_input(desc, input_) desc["Tcomplex"] = tf.data_type(input_) (tf.execute(desc))[1] end + function ifft3d(input_; name=nothing) + if tf.eager_mode + ifft3d_eager(input_; name=name) + else + ifft3d_graph(input_; name=name) + end + end end @@ -23728,7 +27879,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function euclidean_norm(input_, reduction_indices_; name=nothing, keep_dims=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function euclidean_norm_graph(input_, reduction_indices_; name=nothing, keep_dims=nothing) local desc tf.with_op_name(name, "EuclideanNorm") do desc = tf.NodeDescription("EuclideanNorm") @@ -23745,7 +27896,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function euclidean_norm(input_::tf.TensorHandle, reduction_indices_::tf.TensorHandle; name=nothing, keep_dims=nothing) + function euclidean_norm_eager(input_, reduction_indices_; name=nothing, keep_dims=nothing) desc = tf.EagerOp("EuclideanNorm") tf.add_input(desc, input_) tf.add_input(desc, reduction_indices_) @@ -23756,6 +27907,13 @@ begin desc["Tidx"] = tf.data_type(reduction_indices_) (tf.execute(desc))[1] end + function euclidean_norm(input_, reduction_indices_; name=nothing, keep_dims=nothing) + if tf.eager_mode + euclidean_norm_eager(input_, reduction_indices_; name=name, keep_dims=keep_dims) + else + euclidean_norm_graph(input_, reduction_indices_; name=name, keep_dims=keep_dims) + end + end end @@ -23765,7 +27923,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function ref_select(index_, inputs_; name=nothing, N=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function ref_select_graph(index_, inputs_; name=nothing, N=nothing) local desc tf.with_op_name(name, "RefSelect") do desc = tf.NodeDescription("RefSelect") @@ -23780,7 +27938,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function ref_select(index_::tf.TensorHandle, inputs_::tf.TensorHandle; name=nothing, N=nothing) + function ref_select_eager(index_, inputs_; name=nothing, N=nothing) desc = tf.EagerOp("RefSelect") tf.add_input(desc, index_) tf.add_input(desc, inputs_) @@ -23790,6 +27948,13 @@ begin desc["T"] = tf.data_type(inputs_) (tf.execute(desc))[1] end + function ref_select(index_, inputs_; name=nothing, N=nothing) + if tf.eager_mode + ref_select_eager(index_, inputs_; name=name, N=N) + else + ref_select_graph(index_, inputs_; name=name, N=N) + end + end end @@ -23799,7 +27964,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function sparse_tensor_slice_dataset(indices_, values_, dense_shape_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function sparse_tensor_slice_dataset_graph(indices_, values_, dense_shape_; name=nothing) local desc tf.with_op_name(name, "SparseTensorSliceDataset") do desc = tf.NodeDescription("SparseTensorSliceDataset") @@ -23813,7 +27978,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function sparse_tensor_slice_dataset(indices_::tf.TensorHandle, values_::tf.TensorHandle, dense_shape_::tf.TensorHandle; name=nothing) + function sparse_tensor_slice_dataset_eager(indices_, values_, dense_shape_; name=nothing) desc = tf.EagerOp("SparseTensorSliceDataset") tf.add_input(desc, indices_) tf.add_input(desc, values_) @@ -23821,6 +27986,13 @@ begin desc["Tvalues"] = tf.data_type(values_) (tf.execute(desc))[1] end + function sparse_tensor_slice_dataset(indices_, values_, dense_shape_; name=nothing) + if tf.eager_mode + sparse_tensor_slice_dataset_eager(indices_, values_, dense_shape_; name=name) + else + sparse_tensor_slice_dataset_graph(indices_, values_, dense_shape_; name=name) + end + end end @@ -23830,7 +28002,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function retrieve_tpu_embedding_ftrl_parameters_grad_accum_debug(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function retrieve_tpu_embedding_ftrl_parameters_grad_accum_debug_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "RetrieveTPUEmbeddingFTRLParametersGradAccumDebug") do desc = tf.NodeDescription("RetrieveTPUEmbeddingFTRLParametersGradAccumDebug") @@ -23854,7 +28026,7 @@ begin end out end - function retrieve_tpu_embedding_ftrl_parameters_grad_accum_debug(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + function retrieve_tpu_embedding_ftrl_parameters_grad_accum_debug_eager(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) desc = tf.EagerOp("RetrieveTPUEmbeddingFTRLParametersGradAccumDebug") if table_id !== nothing desc["table_id"] = Base.Int(table_id) @@ -23870,6 +28042,13 @@ begin end tf.execute(desc) end + function retrieve_tpu_embedding_ftrl_parameters_grad_accum_debug(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + if tf.eager_mode + retrieve_tpu_embedding_ftrl_parameters_grad_accum_debug_eager(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + else + retrieve_tpu_embedding_ftrl_parameters_grad_accum_debug_graph(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + end + end end @@ -23879,7 +28058,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function batch_ifft2d(input_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function batch_ifft2d_graph(input_; name=nothing) local desc tf.with_op_name(name, "BatchIFFT2D") do desc = tf.NodeDescription("BatchIFFT2D") @@ -23888,11 +28067,18 @@ begin end tf.Tensor(tf.Operation(desc)) end - function batch_ifft2d(input_::tf.TensorHandle; name=nothing) + function batch_ifft2d_eager(input_; name=nothing) desc = tf.EagerOp("BatchIFFT2D") tf.add_input(desc, input_) (tf.execute(desc))[1] end + function batch_ifft2d(input_; name=nothing) + if tf.eager_mode + batch_ifft2d_eager(input_; name=name) + else + batch_ifft2d_graph(input_; name=name) + end + end end @@ -23902,7 +28088,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tensor_array_gather(handle_, indices_, flow_in_; name=nothing, dtype=nothing, element_shape=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tensor_array_gather_graph(handle_, indices_, flow_in_; name=nothing, dtype=nothing, element_shape=nothing) local desc tf.with_op_name(name, "TensorArrayGather") do desc = tf.NodeDescription("TensorArrayGather") @@ -23921,7 +28107,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function tensor_array_gather(handle_::tf.TensorHandle, indices_::tf.TensorHandle, flow_in_::tf.TensorHandle; name=nothing, dtype=nothing, element_shape=nothing) + function tensor_array_gather_eager(handle_, indices_, flow_in_; name=nothing, dtype=nothing, element_shape=nothing) desc = tf.EagerOp("TensorArrayGather") tf.add_input(desc, handle_) tf.add_input(desc, indices_) @@ -23934,6 +28120,13 @@ begin end (tf.execute(desc))[1] end + function tensor_array_gather(handle_, indices_, flow_in_; name=nothing, dtype=nothing, element_shape=nothing) + if tf.eager_mode + tensor_array_gather_eager(handle_, indices_, flow_in_; name=name, dtype=dtype, element_shape=element_shape) + else + tensor_array_gather_graph(handle_, indices_, flow_in_; name=name, dtype=dtype, element_shape=element_shape) + end + end end @@ -23943,7 +28136,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function sparse_segment_mean_with_num_segments(data_, indices_, segment_ids_, num_segments_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function sparse_segment_mean_with_num_segments_graph(data_, indices_, segment_ids_, num_segments_; name=nothing) local desc tf.with_op_name(name, "SparseSegmentMeanWithNumSegments") do desc = tf.NodeDescription("SparseSegmentMeanWithNumSegments") @@ -23962,7 +28155,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function sparse_segment_mean_with_num_segments(data_::tf.TensorHandle, indices_::tf.TensorHandle, segment_ids_::tf.TensorHandle, num_segments_::tf.TensorHandle; name=nothing) + function sparse_segment_mean_with_num_segments_eager(data_, indices_, segment_ids_, num_segments_; name=nothing) desc = tf.EagerOp("SparseSegmentMeanWithNumSegments") tf.add_input(desc, data_) tf.add_input(desc, indices_) @@ -23973,6 +28166,13 @@ begin desc["Tnumsegments"] = tf.data_type(num_segments_) (tf.execute(desc))[1] end + function sparse_segment_mean_with_num_segments(data_, indices_, segment_ids_, num_segments_; name=nothing) + if tf.eager_mode + sparse_segment_mean_with_num_segments_eager(data_, indices_, segment_ids_, num_segments_; name=name) + else + sparse_segment_mean_with_num_segments_graph(data_, indices_, segment_ids_, num_segments_; name=name) + end + end end @@ -23982,7 +28182,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function ensure_shape(input_; name=nothing, shape=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function ensure_shape_graph(input_; name=nothing, shape=nothing) local desc tf.with_op_name(name, "EnsureShape") do desc = tf.NodeDescription("EnsureShape") @@ -23995,7 +28195,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function ensure_shape(input_::tf.TensorHandle; name=nothing, shape=nothing) + function ensure_shape_eager(input_; name=nothing, shape=nothing) desc = tf.EagerOp("EnsureShape") tf.add_input(desc, input_) if shape !== nothing @@ -24004,6 +28204,13 @@ begin desc["T"] = tf.data_type(input_) (tf.execute(desc))[1] end + function ensure_shape(input_; name=nothing, shape=nothing) + if tf.eager_mode + ensure_shape_eager(input_; name=name, shape=shape) + else + ensure_shape_graph(input_; name=name, shape=shape) + end + end end @@ -24013,7 +28220,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function apply_proximal_gradient_descent(var_, alpha_, l1_, l2_, delta_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function apply_proximal_gradient_descent_graph(var_, alpha_, l1_, l2_, delta_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ApplyProximalGradientDescent") do desc = tf.NodeDescription("ApplyProximalGradientDescent") @@ -24034,7 +28241,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function apply_proximal_gradient_descent(var_::tf.TensorHandle, alpha_::tf.TensorHandle, l1_::tf.TensorHandle, l2_::tf.TensorHandle, delta_::tf.TensorHandle; name=nothing, use_locking=nothing) + function apply_proximal_gradient_descent_eager(var_, alpha_, l1_, l2_, delta_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ApplyProximalGradientDescent") tf.add_input(desc, var_) tf.add_input(desc, alpha_) @@ -24051,6 +28258,13 @@ begin desc["T"] = tf.data_type(delta_) (tf.execute(desc))[1] end + function apply_proximal_gradient_descent(var_, alpha_, l1_, l2_, delta_; name=nothing, use_locking=nothing) + if tf.eager_mode + apply_proximal_gradient_descent_eager(var_, alpha_, l1_, l2_, delta_; name=name, use_locking=use_locking) + else + apply_proximal_gradient_descent_graph(var_, alpha_, l1_, l2_, delta_; name=name, use_locking=use_locking) + end + end end @@ -24060,7 +28274,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function collective_reduce(input_; name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, merge_op=nothing, final_op=nothing, subdiv_offsets=nothing, wait_for=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function collective_reduce_graph(input_; name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, merge_op=nothing, final_op=nothing, subdiv_offsets=nothing, wait_for=nothing) local desc tf.with_op_name(name, "CollectiveReduce") do desc = tf.NodeDescription("CollectiveReduce") @@ -24091,7 +28305,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function collective_reduce(input_::tf.TensorHandle; name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, merge_op=nothing, final_op=nothing, subdiv_offsets=nothing, wait_for=nothing) + function collective_reduce_eager(input_; name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, merge_op=nothing, final_op=nothing, subdiv_offsets=nothing, wait_for=nothing) desc = tf.EagerOp("CollectiveReduce") tf.add_input(desc, input_) if group_size !== nothing @@ -24118,6 +28332,13 @@ begin desc["T"] = tf.data_type(input_) (tf.execute(desc))[1] end + function collective_reduce(input_; name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, merge_op=nothing, final_op=nothing, subdiv_offsets=nothing, wait_for=nothing) + if tf.eager_mode + collective_reduce_eager(input_; name=name, group_size=group_size, group_key=group_key, instance_key=instance_key, merge_op=merge_op, final_op=final_op, subdiv_offsets=subdiv_offsets, wait_for=wait_for) + else + collective_reduce_graph(input_; name=name, group_size=group_size, group_key=group_key, instance_key=instance_key, merge_op=merge_op, final_op=final_op, subdiv_offsets=subdiv_offsets, wait_for=wait_for) + end + end end @@ -24127,7 +28348,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function is_nan(x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function is_nan_graph(x_; name=nothing) local desc tf.with_op_name(name, "IsNan") do desc = tf.NodeDescription("IsNan") @@ -24137,12 +28358,19 @@ begin end tf.Tensor(tf.Operation(desc)) end - function is_nan(x_::tf.TensorHandle; name=nothing) + function is_nan_eager(x_; name=nothing) desc = tf.EagerOp("IsNan") tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) (tf.execute(desc))[1] end + function is_nan(x_; name=nothing) + if tf.eager_mode + is_nan_eager(x_; name=name) + else + is_nan_graph(x_; name=name) + end + end end @@ -24152,7 +28380,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function apply_ada_max(var_, m_, v_, beta1_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function apply_ada_max_graph(var_, m_, v_, beta1_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ApplyAdaMax") do desc = tf.NodeDescription("ApplyAdaMax") @@ -24181,7 +28409,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function apply_ada_max(var_::tf.TensorHandle, m_::tf.TensorHandle, v_::tf.TensorHandle, beta1_power_::tf.TensorHandle, lr_::tf.TensorHandle, beta1_::tf.TensorHandle, beta2_::tf.TensorHandle, epsilon_::tf.TensorHandle, grad_::tf.TensorHandle; name=nothing, use_locking=nothing) + function apply_ada_max_eager(var_, m_, v_, beta1_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ApplyAdaMax") tf.add_input(desc, var_) tf.add_input(desc, m_) @@ -24206,6 +28434,13 @@ begin desc["T"] = tf.data_type(grad_) (tf.execute(desc))[1] end + function apply_ada_max(var_, m_, v_, beta1_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing) + if tf.eager_mode + apply_ada_max_eager(var_, m_, v_, beta1_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=name, use_locking=use_locking) + else + apply_ada_max_graph(var_, m_, v_, beta1_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=name, use_locking=use_locking) + end + end end @@ -24215,7 +28450,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function decode_and_crop_jpeg(contents_, crop_window_; name=nothing, channels=nothing, ratio=nothing, fancy_upscaling=nothing, try_recover_truncated=nothing, acceptable_fraction=nothing, dct_method=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function decode_and_crop_jpeg_graph(contents_, crop_window_; name=nothing, channels=nothing, ratio=nothing, fancy_upscaling=nothing, try_recover_truncated=nothing, acceptable_fraction=nothing, dct_method=nothing) local desc tf.with_op_name(name, "DecodeAndCropJpeg") do desc = tf.NodeDescription("DecodeAndCropJpeg") @@ -24244,7 +28479,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function decode_and_crop_jpeg(contents_::tf.TensorHandle, crop_window_::tf.TensorHandle; name=nothing, channels=nothing, ratio=nothing, fancy_upscaling=nothing, try_recover_truncated=nothing, acceptable_fraction=nothing, dct_method=nothing) + function decode_and_crop_jpeg_eager(contents_, crop_window_; name=nothing, channels=nothing, ratio=nothing, fancy_upscaling=nothing, try_recover_truncated=nothing, acceptable_fraction=nothing, dct_method=nothing) desc = tf.EagerOp("DecodeAndCropJpeg") tf.add_input(desc, contents_) tf.add_input(desc, crop_window_) @@ -24268,6 +28503,13 @@ begin end (tf.execute(desc))[1] end + function decode_and_crop_jpeg(contents_, crop_window_; name=nothing, channels=nothing, ratio=nothing, fancy_upscaling=nothing, try_recover_truncated=nothing, acceptable_fraction=nothing, dct_method=nothing) + if tf.eager_mode + decode_and_crop_jpeg_eager(contents_, crop_window_; name=name, channels=channels, ratio=ratio, fancy_upscaling=fancy_upscaling, try_recover_truncated=try_recover_truncated, acceptable_fraction=acceptable_fraction, dct_method=dct_method) + else + decode_and_crop_jpeg_graph(contents_, crop_window_; name=name, channels=channels, ratio=ratio, fancy_upscaling=fancy_upscaling, try_recover_truncated=try_recover_truncated, acceptable_fraction=acceptable_fraction, dct_method=dct_method) + end + end end @@ -24277,7 +28519,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function apply_centered_rms_prop(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function apply_centered_rms_prop_graph(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ApplyCenteredRMSProp") do desc = tf.NodeDescription("ApplyCenteredRMSProp") @@ -24306,7 +28548,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function apply_centered_rms_prop(var_::tf.TensorHandle, mg_::tf.TensorHandle, ms_::tf.TensorHandle, mom_::tf.TensorHandle, lr_::tf.TensorHandle, rho_::tf.TensorHandle, momentum_::tf.TensorHandle, epsilon_::tf.TensorHandle, grad_::tf.TensorHandle; name=nothing, use_locking=nothing) + function apply_centered_rms_prop_eager(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ApplyCenteredRMSProp") tf.add_input(desc, var_) tf.add_input(desc, mg_) @@ -24331,16 +28573,23 @@ begin desc["T"] = tf.data_type(grad_) (tf.execute(desc))[1] end + function apply_centered_rms_prop(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=nothing, use_locking=nothing) + if tf.eager_mode + apply_centered_rms_prop_eager(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=name, use_locking=use_locking) + else + apply_centered_rms_prop_graph(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=name, use_locking=use_locking) + end + end end """ - conv3d_backprop_filter_v2(input, filter_sizes, out_backprop; data_format=NDHWC, dilations=[1, 1, 1, 1, 1]) + conv3d_backprop_filter_v2(input, filter_sizes, out_backprop; data_format=, dilations=[1, 1, 1, 1, 1]) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function conv3d_backprop_filter_v2(input_, filter_sizes_, out_backprop_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function conv3d_backprop_filter_v2_graph(input_, filter_sizes_, out_backprop_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) local desc tf.with_op_name(name, "Conv3DBackpropFilterV2") do desc = tf.NodeDescription("Conv3DBackpropFilterV2") @@ -24366,7 +28615,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function conv3d_backprop_filter_v2(input_::tf.TensorHandle, filter_sizes_::tf.TensorHandle, out_backprop_::tf.TensorHandle; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) + function conv3d_backprop_filter_v2_eager(input_, filter_sizes_, out_backprop_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) desc = tf.EagerOp("Conv3DBackpropFilterV2") tf.add_input(desc, input_) tf.add_input(desc, filter_sizes_) @@ -24387,6 +28636,13 @@ begin desc["T"] = tf.data_type(out_backprop_) (tf.execute(desc))[1] end + function conv3d_backprop_filter_v2(input_, filter_sizes_, out_backprop_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) + if tf.eager_mode + conv3d_backprop_filter_v2_eager(input_, filter_sizes_, out_backprop_; name=name, strides=strides, padding=padding, data_format=data_format, dilations=dilations) + else + conv3d_backprop_filter_v2_graph(input_, filter_sizes_, out_backprop_; name=name, strides=strides, padding=padding, data_format=data_format, dilations=dilations) + end + end end @@ -24396,7 +28652,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function matrix_triangular_solve(matrix_, rhs_; name=nothing, lower=nothing, adjoint=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function matrix_triangular_solve_graph(matrix_, rhs_; name=nothing, lower=nothing, adjoint=nothing) local desc tf.with_op_name(name, "MatrixTriangularSolve") do desc = tf.NodeDescription("MatrixTriangularSolve") @@ -24414,7 +28670,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function matrix_triangular_solve(matrix_::tf.TensorHandle, rhs_::tf.TensorHandle; name=nothing, lower=nothing, adjoint=nothing) + function matrix_triangular_solve_eager(matrix_, rhs_; name=nothing, lower=nothing, adjoint=nothing) desc = tf.EagerOp("MatrixTriangularSolve") tf.add_input(desc, matrix_) tf.add_input(desc, rhs_) @@ -24428,6 +28684,13 @@ begin desc["T"] = tf.data_type(rhs_) (tf.execute(desc))[1] end + function matrix_triangular_solve(matrix_, rhs_; name=nothing, lower=nothing, adjoint=nothing) + if tf.eager_mode + matrix_triangular_solve_eager(matrix_, rhs_; name=name, lower=lower, adjoint=adjoint) + else + matrix_triangular_solve_graph(matrix_, rhs_; name=name, lower=lower, adjoint=adjoint) + end + end end @@ -24437,7 +28700,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function reader_num_work_units_completed(reader_handle_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function reader_num_work_units_completed_graph(reader_handle_; name=nothing) local desc tf.with_op_name(name, "ReaderNumWorkUnitsCompleted") do desc = tf.NodeDescription("ReaderNumWorkUnitsCompleted") @@ -24446,11 +28709,18 @@ begin end tf.Tensor(tf.Operation(desc)) end - function reader_num_work_units_completed(reader_handle_::tf.TensorHandle; name=nothing) + function reader_num_work_units_completed_eager(reader_handle_; name=nothing) desc = tf.EagerOp("ReaderNumWorkUnitsCompleted") tf.add_input(desc, reader_handle_) (tf.execute(desc))[1] end + function reader_num_work_units_completed(reader_handle_; name=nothing) + if tf.eager_mode + reader_num_work_units_completed_eager(reader_handle_; name=name) + else + reader_num_work_units_completed_graph(reader_handle_; name=name) + end + end end @@ -24460,7 +28730,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function write_audio_summary(writer_, step_, tag_, tensor_, sample_rate_; name=nothing, max_outputs=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function write_audio_summary_graph(writer_, step_, tag_, tensor_, sample_rate_; name=nothing, max_outputs=nothing) local desc tf.with_op_name(name, "WriteAudioSummary") do desc = tf.NodeDescription("WriteAudioSummary") @@ -24480,7 +28750,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function write_audio_summary(writer_::tf.TensorHandle, step_::tf.TensorHandle, tag_::tf.TensorHandle, tensor_::tf.TensorHandle, sample_rate_::tf.TensorHandle; name=nothing, max_outputs=nothing) + function write_audio_summary_eager(writer_, step_, tag_, tensor_, sample_rate_; name=nothing, max_outputs=nothing) desc = tf.EagerOp("WriteAudioSummary") tf.add_input(desc, writer_) tf.add_input(desc, step_) @@ -24492,6 +28762,13 @@ begin end (tf.execute(desc))[1] end + function write_audio_summary(writer_, step_, tag_, tensor_, sample_rate_; name=nothing, max_outputs=nothing) + if tf.eager_mode + write_audio_summary_eager(writer_, step_, tag_, tensor_, sample_rate_; name=name, max_outputs=max_outputs) + else + write_audio_summary_graph(writer_, step_, tag_, tensor_, sample_rate_; name=name, max_outputs=max_outputs) + end + end end @@ -24501,7 +28778,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function sharded_filespec(basename_, num_shards_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function sharded_filespec_graph(basename_, num_shards_; name=nothing) local desc tf.with_op_name(name, "ShardedFilespec") do desc = tf.NodeDescription("ShardedFilespec") @@ -24512,12 +28789,19 @@ begin end tf.Tensor(tf.Operation(desc)) end - function sharded_filespec(basename_::tf.TensorHandle, num_shards_::tf.TensorHandle; name=nothing) + function sharded_filespec_eager(basename_, num_shards_; name=nothing) desc = tf.EagerOp("ShardedFilespec") tf.add_input(desc, basename_) tf.add_input(desc, num_shards_) (tf.execute(desc))[1] end + function sharded_filespec(basename_, num_shards_; name=nothing) + if tf.eager_mode + sharded_filespec_eager(basename_, num_shards_; name=name) + else + sharded_filespec_graph(basename_, num_shards_; name=name) + end + end end @@ -24527,7 +28811,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function div_no_nan(x_, y_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function div_no_nan_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "DivNoNan") do desc = tf.NodeDescription("DivNoNan") @@ -24539,7 +28823,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function div_no_nan(x_::tf.TensorHandle, y_::tf.TensorHandle; name=nothing) + function div_no_nan_eager(x_, y_; name=nothing) desc = tf.EagerOp("DivNoNan") tf.add_input(desc, x_) tf.add_input(desc, y_) @@ -24547,6 +28831,13 @@ begin desc["T"] = tf.data_type(y_) (tf.execute(desc))[1] end + function div_no_nan(x_, y_; name=nothing) + if tf.eager_mode + div_no_nan_eager(x_, y_; name=name) + else + div_no_nan_graph(x_, y_; name=name) + end + end end @@ -24556,7 +28847,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function sparse_accumulator_apply_gradient(handle_, local_step_, gradient_indices_, gradient_values_, gradient_shape_; name=nothing, dtype=nothing, has_known_shape=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function sparse_accumulator_apply_gradient_graph(handle_, local_step_, gradient_indices_, gradient_values_, gradient_shape_; name=nothing, dtype=nothing, has_known_shape=nothing) local desc tf.with_op_name(name, "SparseAccumulatorApplyGradient") do desc = tf.NodeDescription("SparseAccumulatorApplyGradient") @@ -24580,7 +28871,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function sparse_accumulator_apply_gradient(handle_::tf.TensorHandle, local_step_::tf.TensorHandle, gradient_indices_::tf.TensorHandle, gradient_values_::tf.TensorHandle, gradient_shape_::tf.TensorHandle; name=nothing, dtype=nothing, has_known_shape=nothing) + function sparse_accumulator_apply_gradient_eager(handle_, local_step_, gradient_indices_, gradient_values_, gradient_shape_; name=nothing, dtype=nothing, has_known_shape=nothing) desc = tf.EagerOp("SparseAccumulatorApplyGradient") tf.add_input(desc, handle_) tf.add_input(desc, local_step_) @@ -24596,6 +28887,13 @@ begin desc["dtype"] = tf.data_type(gradient_values_) (tf.execute(desc))[1] end + function sparse_accumulator_apply_gradient(handle_, local_step_, gradient_indices_, gradient_values_, gradient_shape_; name=nothing, dtype=nothing, has_known_shape=nothing) + if tf.eager_mode + sparse_accumulator_apply_gradient_eager(handle_, local_step_, gradient_indices_, gradient_values_, gradient_shape_; name=name, dtype=dtype, has_known_shape=has_known_shape) + else + sparse_accumulator_apply_gradient_graph(handle_, local_step_, gradient_indices_, gradient_values_, gradient_shape_; name=name, dtype=dtype, has_known_shape=has_known_shape) + end + end end @@ -24605,7 +28903,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function ragged_tensor_to_sparse(rt_nested_splits_, rt_dense_values_; name=nothing, RAGGED_RANK=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function ragged_tensor_to_sparse_graph(rt_nested_splits_, rt_dense_values_; name=nothing, RAGGED_RANK=nothing) local desc tf.with_op_name(name, "RaggedTensorToSparse") do desc = tf.NodeDescription("RaggedTensorToSparse") @@ -24625,7 +28923,7 @@ begin end out end - function ragged_tensor_to_sparse(rt_nested_splits_::tf.TensorHandle, rt_dense_values_::tf.TensorHandle; name=nothing, RAGGED_RANK=nothing) + function ragged_tensor_to_sparse_eager(rt_nested_splits_, rt_dense_values_; name=nothing, RAGGED_RANK=nothing) desc = tf.EagerOp("RaggedTensorToSparse") tf.add_input(desc, rt_nested_splits_) tf.add_input(desc, rt_dense_values_) @@ -24635,6 +28933,13 @@ begin desc["T"] = tf.data_type(rt_dense_values_) tf.execute(desc) end + function ragged_tensor_to_sparse(rt_nested_splits_, rt_dense_values_; name=nothing, RAGGED_RANK=nothing) + if tf.eager_mode + ragged_tensor_to_sparse_eager(rt_nested_splits_, rt_dense_values_; name=name, RAGGED_RANK=RAGGED_RANK) + else + ragged_tensor_to_sparse_graph(rt_nested_splits_, rt_dense_values_; name=name, RAGGED_RANK=RAGGED_RANK) + end + end end @@ -24644,7 +28949,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function extract_volume_patches(input_; name=nothing, ksizes=nothing, strides=nothing, padding=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function extract_volume_patches_graph(input_; name=nothing, ksizes=nothing, strides=nothing, padding=nothing) local desc tf.with_op_name(name, "ExtractVolumePatches") do desc = tf.NodeDescription("ExtractVolumePatches") @@ -24663,7 +28968,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function extract_volume_patches(input_::tf.TensorHandle; name=nothing, ksizes=nothing, strides=nothing, padding=nothing) + function extract_volume_patches_eager(input_; name=nothing, ksizes=nothing, strides=nothing, padding=nothing) desc = tf.EagerOp("ExtractVolumePatches") tf.add_input(desc, input_) if ksizes !== nothing @@ -24678,6 +28983,13 @@ begin desc["T"] = tf.data_type(input_) (tf.execute(desc))[1] end + function extract_volume_patches(input_; name=nothing, ksizes=nothing, strides=nothing, padding=nothing) + if tf.eager_mode + extract_volume_patches_eager(input_; name=name, ksizes=ksizes, strides=strides, padding=padding) + else + extract_volume_patches_graph(input_; name=name, ksizes=ksizes, strides=strides, padding=padding) + end + end end @@ -24687,7 +28999,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function barrier_insert_many(handle_, keys_, values_; name=nothing, component_index=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function barrier_insert_many_graph(handle_, keys_, values_; name=nothing, component_index=nothing) local desc tf.with_op_name(name, "BarrierInsertMany") do desc = tf.NodeDescription("BarrierInsertMany") @@ -24707,7 +29019,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function barrier_insert_many(handle_::tf.TensorHandle, keys_::tf.TensorHandle, values_::tf.TensorHandle; name=nothing, component_index=nothing) + function barrier_insert_many_eager(handle_, keys_, values_; name=nothing, component_index=nothing) desc = tf.EagerOp("BarrierInsertMany") tf.add_input(desc, handle_) tf.add_input(desc, keys_) @@ -24721,6 +29033,13 @@ begin desc["T"] = tf.data_type(values_) (tf.execute(desc))[1] end + function barrier_insert_many(handle_, keys_, values_; name=nothing, component_index=nothing) + if tf.eager_mode + barrier_insert_many_eager(handle_, keys_, values_; name=name, component_index=component_index) + else + barrier_insert_many_graph(handle_, keys_, values_; name=name, component_index=component_index) + end + end end @@ -24730,7 +29049,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function const_(; name=nothing, value=nothing, dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function const__graph(; name=nothing, value=nothing, dtype=nothing) local desc tf.with_op_name(name, "Const") do desc = tf.NodeDescription("Const") @@ -24743,7 +29062,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function const_(; name=nothing, value=nothing, dtype=nothing) + function const__eager(; name=nothing, value=nothing, dtype=nothing) desc = tf.EagerOp("Const") if value !== nothing desc["value"] = TensorFlow.RawTensor(value) @@ -24753,6 +29072,13 @@ begin end (tf.execute(desc))[1] end + function const_(; name=nothing, value=nothing, dtype=nothing) + if tf.eager_mode + const__eager(; name=name, value=value, dtype=dtype) + else + const__graph(; name=name, value=value, dtype=dtype) + end + end end @@ -24762,7 +29088,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function space_to_batch(input_, paddings_; name=nothing, block_size=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function space_to_batch_graph(input_, paddings_; name=nothing, block_size=nothing) local desc tf.with_op_name(name, "SpaceToBatch") do desc = tf.NodeDescription("SpaceToBatch") @@ -24778,7 +29104,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function space_to_batch(input_::tf.TensorHandle, paddings_::tf.TensorHandle; name=nothing, block_size=nothing) + function space_to_batch_eager(input_, paddings_; name=nothing, block_size=nothing) desc = tf.EagerOp("SpaceToBatch") tf.add_input(desc, input_) tf.add_input(desc, paddings_) @@ -24789,6 +29115,13 @@ begin desc["Tpaddings"] = tf.data_type(paddings_) (tf.execute(desc))[1] end + function space_to_batch(input_, paddings_; name=nothing, block_size=nothing) + if tf.eager_mode + space_to_batch_eager(input_, paddings_; name=name, block_size=block_size) + else + space_to_batch_graph(input_, paddings_; name=name, block_size=block_size) + end + end end @@ -24798,7 +29131,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function stage_size(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function stage_size_graph(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "StageSize") do desc = tf.NodeDescription("StageSize") @@ -24820,7 +29153,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function stage_size(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + function stage_size_eager(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) desc = tf.EagerOp("StageSize") if capacity !== nothing desc["capacity"] = Base.Int(capacity) @@ -24839,6 +29172,13 @@ begin end (tf.execute(desc))[1] end + function stage_size(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + if tf.eager_mode + stage_size_eager(; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) + else + stage_size_graph(; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) + end + end end @@ -24848,7 +29188,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function empty_tensor_list(element_shape_, max_num_elements_; name=nothing, element_dtype=nothing, shape_type=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function empty_tensor_list_graph(element_shape_, max_num_elements_; name=nothing, element_dtype=nothing, shape_type=nothing) local desc tf.with_op_name(name, "EmptyTensorList") do desc = tf.NodeDescription("EmptyTensorList") @@ -24866,7 +29206,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function empty_tensor_list(element_shape_::tf.TensorHandle, max_num_elements_::tf.TensorHandle; name=nothing, element_dtype=nothing, shape_type=nothing) + function empty_tensor_list_eager(element_shape_, max_num_elements_; name=nothing, element_dtype=nothing, shape_type=nothing) desc = tf.EagerOp("EmptyTensorList") tf.add_input(desc, element_shape_) tf.add_input(desc, max_num_elements_) @@ -24879,6 +29219,13 @@ begin desc["shape_type"] = tf.data_type(element_shape_) (tf.execute(desc))[1] end + function empty_tensor_list(element_shape_, max_num_elements_; name=nothing, element_dtype=nothing, shape_type=nothing) + if tf.eager_mode + empty_tensor_list_eager(element_shape_, max_num_elements_; name=name, element_dtype=element_dtype, shape_type=shape_type) + else + empty_tensor_list_graph(element_shape_, max_num_elements_; name=name, element_dtype=element_dtype, shape_type=shape_type) + end + end end @@ -24888,7 +29235,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function quantized_conv2d_and_requantize(input_, filter_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function quantized_conv2d_and_requantize_graph(input_, filter_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) local desc tf.with_op_name(name, "QuantizedConv2DAndRequantize") do desc = tf.NodeDescription("QuantizedConv2DAndRequantize") @@ -24930,7 +29277,7 @@ begin end out end - function quantized_conv2d_and_requantize(input_::tf.TensorHandle, filter_::tf.TensorHandle, min_input_::tf.TensorHandle, max_input_::tf.TensorHandle, min_filter_::tf.TensorHandle, max_filter_::tf.TensorHandle, min_freezed_output_::tf.TensorHandle, max_freezed_output_::tf.TensorHandle; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) + function quantized_conv2d_and_requantize_eager(input_, filter_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) desc = tf.EagerOp("QuantizedConv2DAndRequantize") tf.add_input(desc, input_) tf.add_input(desc, filter_) @@ -24956,6 +29303,13 @@ begin desc["Tfilter"] = tf.data_type(filter_) tf.execute(desc) end + function quantized_conv2d_and_requantize(input_, filter_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) + if tf.eager_mode + quantized_conv2d_and_requantize_eager(input_, filter_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_; name=name, out_type=out_type, strides=strides, padding=padding, dilations=dilations) + else + quantized_conv2d_and_requantize_graph(input_, filter_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_; name=name, out_type=out_type, strides=strides, padding=padding, dilations=dilations) + end + end end @@ -24965,7 +29319,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function lu(input_; name=nothing, output_idx_type=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function lu_graph(input_; name=nothing, output_idx_type=nothing) local desc tf.with_op_name(name, "Lu") do desc = tf.NodeDescription("Lu") @@ -24983,7 +29337,7 @@ begin end out end - function lu(input_::tf.TensorHandle; name=nothing, output_idx_type=nothing) + function lu_eager(input_; name=nothing, output_idx_type=nothing) desc = tf.EagerOp("Lu") tf.add_input(desc, input_) if output_idx_type !== nothing @@ -24992,6 +29346,13 @@ begin desc["T"] = tf.data_type(input_) tf.execute(desc) end + function lu(input_; name=nothing, output_idx_type=nothing) + if tf.eager_mode + lu_eager(input_; name=name, output_idx_type=output_idx_type) + else + lu_graph(input_; name=name, output_idx_type=output_idx_type) + end + end end @@ -25001,7 +29362,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function decode_compressed(bytes_; name=nothing, compression_type=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function decode_compressed_graph(bytes_; name=nothing, compression_type=nothing) local desc tf.with_op_name(name, "DecodeCompressed") do desc = tf.NodeDescription("DecodeCompressed") @@ -25013,7 +29374,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function decode_compressed(bytes_::tf.TensorHandle; name=nothing, compression_type=nothing) + function decode_compressed_eager(bytes_; name=nothing, compression_type=nothing) desc = tf.EagerOp("DecodeCompressed") tf.add_input(desc, bytes_) if compression_type !== nothing @@ -25021,6 +29382,13 @@ begin end (tf.execute(desc))[1] end + function decode_compressed(bytes_; name=nothing, compression_type=nothing) + if tf.eager_mode + decode_compressed_eager(bytes_; name=name, compression_type=compression_type) + else + decode_compressed_graph(bytes_; name=name, compression_type=compression_type) + end + end end @@ -25030,7 +29398,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function get_session_tensor(handle_; name=nothing, dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function get_session_tensor_graph(handle_; name=nothing, dtype=nothing) local desc tf.with_op_name(name, "GetSessionTensor") do desc = tf.NodeDescription("GetSessionTensor") @@ -25042,7 +29410,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function get_session_tensor(handle_::tf.TensorHandle; name=nothing, dtype=nothing) + function get_session_tensor_eager(handle_; name=nothing, dtype=nothing) desc = tf.EagerOp("GetSessionTensor") tf.add_input(desc, handle_) if dtype !== nothing @@ -25050,6 +29418,13 @@ begin end (tf.execute(desc))[1] end + function get_session_tensor(handle_; name=nothing, dtype=nothing) + if tf.eager_mode + get_session_tensor_eager(handle_; name=name, dtype=dtype) + else + get_session_tensor_graph(handle_; name=name, dtype=dtype) + end + end end @@ -25059,7 +29434,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tensor_array_gather_v3(handle_, indices_, flow_in_; name=nothing, dtype=nothing, element_shape=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tensor_array_gather_v3_graph(handle_, indices_, flow_in_; name=nothing, dtype=nothing, element_shape=nothing) local desc tf.with_op_name(name, "TensorArrayGatherV3") do desc = tf.NodeDescription("TensorArrayGatherV3") @@ -25078,7 +29453,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function tensor_array_gather_v3(handle_::tf.TensorHandle, indices_::tf.TensorHandle, flow_in_::tf.TensorHandle; name=nothing, dtype=nothing, element_shape=nothing) + function tensor_array_gather_v3_eager(handle_, indices_, flow_in_; name=nothing, dtype=nothing, element_shape=nothing) desc = tf.EagerOp("TensorArrayGatherV3") tf.add_input(desc, handle_) tf.add_input(desc, indices_) @@ -25091,6 +29466,13 @@ begin end (tf.execute(desc))[1] end + function tensor_array_gather_v3(handle_, indices_, flow_in_; name=nothing, dtype=nothing, element_shape=nothing) + if tf.eager_mode + tensor_array_gather_v3_eager(handle_, indices_, flow_in_; name=name, dtype=dtype, element_shape=element_shape) + else + tensor_array_gather_v3_graph(handle_, indices_, flow_in_; name=name, dtype=dtype, element_shape=element_shape) + end + end end @@ -25100,7 +29482,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function load_tpu_embedding_ftrl_parameters_grad_accum_debug(parameters_, accumulators_, linears_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function load_tpu_embedding_ftrl_parameters_grad_accum_debug_graph(parameters_, accumulators_, linears_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "LoadTPUEmbeddingFTRLParametersGradAccumDebug") do desc = tf.NodeDescription("LoadTPUEmbeddingFTRLParametersGradAccumDebug") @@ -25127,7 +29509,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function load_tpu_embedding_ftrl_parameters_grad_accum_debug(parameters_::tf.TensorHandle, accumulators_::tf.TensorHandle, linears_::tf.TensorHandle, gradient_accumulators_::tf.TensorHandle; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + function load_tpu_embedding_ftrl_parameters_grad_accum_debug_eager(parameters_, accumulators_, linears_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) desc = tf.EagerOp("LoadTPUEmbeddingFTRLParametersGradAccumDebug") tf.add_input(desc, parameters_) tf.add_input(desc, accumulators_) @@ -25147,6 +29529,13 @@ begin end (tf.execute(desc))[1] end + function load_tpu_embedding_ftrl_parameters_grad_accum_debug(parameters_, accumulators_, linears_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + if tf.eager_mode + load_tpu_embedding_ftrl_parameters_grad_accum_debug_eager(parameters_, accumulators_, linears_, gradient_accumulators_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + else + load_tpu_embedding_ftrl_parameters_grad_accum_debug_graph(parameters_, accumulators_, linears_, gradient_accumulators_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + end + end end @@ -25156,7 +29545,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function destroy_resource_op(resource_; name=nothing, ignore_lookup_error=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function destroy_resource_op_graph(resource_; name=nothing, ignore_lookup_error=nothing) local desc tf.with_op_name(name, "DestroyResourceOp") do desc = tf.NodeDescription("DestroyResourceOp") @@ -25168,7 +29557,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function destroy_resource_op(resource_::tf.TensorHandle; name=nothing, ignore_lookup_error=nothing) + function destroy_resource_op_eager(resource_; name=nothing, ignore_lookup_error=nothing) desc = tf.EagerOp("DestroyResourceOp") tf.add_input(desc, resource_) if ignore_lookup_error !== nothing @@ -25176,6 +29565,13 @@ begin end (tf.execute(desc))[1] end + function destroy_resource_op(resource_; name=nothing, ignore_lookup_error=nothing) + if tf.eager_mode + destroy_resource_op_eager(resource_; name=name, ignore_lookup_error=ignore_lookup_error) + else + destroy_resource_op_graph(resource_; name=name, ignore_lookup_error=ignore_lookup_error) + end + end end @@ -25185,7 +29581,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function text_line_reader(; name=nothing, skip_header_lines=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function text_line_reader_graph(; name=nothing, skip_header_lines=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "TextLineReader") do desc = tf.NodeDescription("TextLineReader") @@ -25201,7 +29597,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function text_line_reader(; name=nothing, skip_header_lines=nothing, container=nothing, shared_name=nothing) + function text_line_reader_eager(; name=nothing, skip_header_lines=nothing, container=nothing, shared_name=nothing) desc = tf.EagerOp("TextLineReader") if skip_header_lines !== nothing desc["skip_header_lines"] = Base.Int(skip_header_lines) @@ -25214,6 +29610,13 @@ begin end (tf.execute(desc))[1] end + function text_line_reader(; name=nothing, skip_header_lines=nothing, container=nothing, shared_name=nothing) + if tf.eager_mode + text_line_reader_eager(; name=name, skip_header_lines=skip_header_lines, container=container, shared_name=shared_name) + else + text_line_reader_graph(; name=name, skip_header_lines=skip_header_lines, container=container, shared_name=shared_name) + end + end end @@ -25223,7 +29626,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function create_summary_db_writer(writer_, db_uri_, experiment_name_, run_name_, user_name_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function create_summary_db_writer_graph(writer_, db_uri_, experiment_name_, run_name_, user_name_; name=nothing) local desc tf.with_op_name(name, "CreateSummaryDbWriter") do desc = tf.NodeDescription("CreateSummaryDbWriter") @@ -25240,7 +29643,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function create_summary_db_writer(writer_::tf.TensorHandle, db_uri_::tf.TensorHandle, experiment_name_::tf.TensorHandle, run_name_::tf.TensorHandle, user_name_::tf.TensorHandle; name=nothing) + function create_summary_db_writer_eager(writer_, db_uri_, experiment_name_, run_name_, user_name_; name=nothing) desc = tf.EagerOp("CreateSummaryDbWriter") tf.add_input(desc, writer_) tf.add_input(desc, db_uri_) @@ -25249,6 +29652,13 @@ begin tf.add_input(desc, user_name_) (tf.execute(desc))[1] end + function create_summary_db_writer(writer_, db_uri_, experiment_name_, run_name_, user_name_; name=nothing) + if tf.eager_mode + create_summary_db_writer_eager(writer_, db_uri_, experiment_name_, run_name_, user_name_; name=name) + else + create_summary_db_writer_graph(writer_, db_uri_, experiment_name_, run_name_, user_name_; name=name) + end + end end @@ -25258,7 +29668,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tanh_grad(y_, dy_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tanh_grad_graph(y_, dy_; name=nothing) local desc tf.with_op_name(name, "TanhGrad") do desc = tf.NodeDescription("TanhGrad") @@ -25270,7 +29680,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function tanh_grad(y_::tf.TensorHandle, dy_::tf.TensorHandle; name=nothing) + function tanh_grad_eager(y_, dy_; name=nothing) desc = tf.EagerOp("TanhGrad") tf.add_input(desc, y_) tf.add_input(desc, dy_) @@ -25278,6 +29688,13 @@ begin desc["T"] = tf.data_type(dy_) (tf.execute(desc))[1] end + function tanh_grad(y_, dy_; name=nothing) + if tf.eager_mode + tanh_grad_eager(y_, dy_; name=name) + else + tanh_grad_graph(y_, dy_; name=name) + end + end end @@ -25287,7 +29704,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function decode_base64(input_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function decode_base64_graph(input_; name=nothing) local desc tf.with_op_name(name, "DecodeBase64") do desc = tf.NodeDescription("DecodeBase64") @@ -25296,21 +29713,28 @@ begin end tf.Tensor(tf.Operation(desc)) end - function decode_base64(input_::tf.TensorHandle; name=nothing) + function decode_base64_eager(input_; name=nothing) desc = tf.EagerOp("DecodeBase64") tf.add_input(desc, input_) (tf.execute(desc))[1] end + function decode_base64(input_; name=nothing) + if tf.eager_mode + decode_base64_eager(input_; name=name) + else + decode_base64_graph(input_; name=name) + end + end end """ - max_pool_grad_grad_v2(orig_input, orig_output, grad, ksize, strides; data_format=NHWC) + max_pool_grad_grad_v2(orig_input, orig_output, grad, ksize, strides; data_format=) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function max_pool_grad_grad_v2(orig_input_, orig_output_, grad_, ksize_, strides_; name=nothing, padding=nothing, data_format=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function max_pool_grad_grad_v2_graph(orig_input_, orig_output_, grad_, ksize_, strides_; name=nothing, padding=nothing, data_format=nothing) local desc tf.with_op_name(name, "MaxPoolGradGradV2") do desc = tf.NodeDescription("MaxPoolGradGradV2") @@ -25334,7 +29758,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function max_pool_grad_grad_v2(orig_input_::tf.TensorHandle, orig_output_::tf.TensorHandle, grad_::tf.TensorHandle, ksize_::tf.TensorHandle, strides_::tf.TensorHandle; name=nothing, padding=nothing, data_format=nothing) + function max_pool_grad_grad_v2_eager(orig_input_, orig_output_, grad_, ksize_, strides_; name=nothing, padding=nothing, data_format=nothing) desc = tf.EagerOp("MaxPoolGradGradV2") tf.add_input(desc, orig_input_) tf.add_input(desc, orig_output_) @@ -25352,6 +29776,13 @@ begin desc["T"] = tf.data_type(grad_) (tf.execute(desc))[1] end + function max_pool_grad_grad_v2(orig_input_, orig_output_, grad_, ksize_, strides_; name=nothing, padding=nothing, data_format=nothing) + if tf.eager_mode + max_pool_grad_grad_v2_eager(orig_input_, orig_output_, grad_, ksize_, strides_; name=name, padding=padding, data_format=data_format) + else + max_pool_grad_grad_v2_graph(orig_input_, orig_output_, grad_, ksize_, strides_; name=name, padding=padding, data_format=data_format) + end + end end @@ -25361,7 +29792,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function audio_summary_v2(tag_, tensor_, sample_rate_; name=nothing, max_outputs=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function audio_summary_v2_graph(tag_, tensor_, sample_rate_; name=nothing, max_outputs=nothing) local desc tf.with_op_name(name, "AudioSummaryV2") do desc = tf.NodeDescription("AudioSummaryV2") @@ -25377,7 +29808,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function audio_summary_v2(tag_::tf.TensorHandle, tensor_::tf.TensorHandle, sample_rate_::tf.TensorHandle; name=nothing, max_outputs=nothing) + function audio_summary_v2_eager(tag_, tensor_, sample_rate_; name=nothing, max_outputs=nothing) desc = tf.EagerOp("AudioSummaryV2") tf.add_input(desc, tag_) tf.add_input(desc, tensor_) @@ -25387,6 +29818,13 @@ begin end (tf.execute(desc))[1] end + function audio_summary_v2(tag_, tensor_, sample_rate_; name=nothing, max_outputs=nothing) + if tf.eager_mode + audio_summary_v2_eager(tag_, tensor_, sample_rate_; name=name, max_outputs=max_outputs) + else + audio_summary_v2_graph(tag_, tensor_, sample_rate_; name=name, max_outputs=max_outputs) + end + end end @@ -25396,7 +29834,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function stateful_partitioned_call(args_; name=nothing, Tin=nothing, Tout=nothing, f=nothing, config=nothing, config_proto=nothing, executor_type=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function stateful_partitioned_call_graph(args_; name=nothing, Tin=nothing, Tout=nothing, f=nothing, config=nothing, config_proto=nothing, executor_type=nothing) local desc tf.with_op_name(name, "StatefulPartitionedCall") do desc = tf.NodeDescription("StatefulPartitionedCall") @@ -25423,7 +29861,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function stateful_partitioned_call(args_::tf.TensorHandle; name=nothing, Tin=nothing, Tout=nothing, f=nothing, config=nothing, config_proto=nothing, executor_type=nothing) + function stateful_partitioned_call_eager(args_; name=nothing, Tin=nothing, Tout=nothing, f=nothing, config=nothing, config_proto=nothing, executor_type=nothing) desc = tf.EagerOp("StatefulPartitionedCall") tf.add_input(desc, args_) if Tin !== nothing @@ -25446,6 +29884,13 @@ begin end (tf.execute(desc))[1] end + function stateful_partitioned_call(args_; name=nothing, Tin=nothing, Tout=nothing, f=nothing, config=nothing, config_proto=nothing, executor_type=nothing) + if tf.eager_mode + stateful_partitioned_call_eager(args_; name=name, Tin=Tin, Tout=Tout, f=f, config=config, config_proto=config_proto, executor_type=executor_type) + else + stateful_partitioned_call_graph(args_; name=name, Tin=Tin, Tout=Tout, f=f, config=config, config_proto=config_proto, executor_type=executor_type) + end + end end @@ -25455,7 +29900,7 @@ end Acts like a Concat Op that merges multple tensors into one, however it must """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function _scoped_allocator_concat(backing_, inputs_; name=nothing, shape=nothing, reshape=nothing, sa_name=nothing, id=nothing, N=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function _scoped_allocator_concat_graph(backing_, inputs_; name=nothing, shape=nothing, reshape=nothing, sa_name=nothing, id=nothing, N=nothing) local desc tf.with_op_name(name, "_ScopedAllocatorConcat") do desc = tf.NodeDescription("_ScopedAllocatorConcat") @@ -25482,7 +29927,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function _scoped_allocator_concat(backing_::tf.TensorHandle, inputs_::tf.TensorHandle; name=nothing, shape=nothing, reshape=nothing, sa_name=nothing, id=nothing, N=nothing) + function _scoped_allocator_concat_eager(backing_, inputs_; name=nothing, shape=nothing, reshape=nothing, sa_name=nothing, id=nothing, N=nothing) desc = tf.EagerOp("_ScopedAllocatorConcat") tf.add_input(desc, backing_) tf.add_input(desc, inputs_) @@ -25505,6 +29950,13 @@ begin desc["T"] = tf.data_type(inputs_) (tf.execute(desc))[1] end + function _scoped_allocator_concat(backing_, inputs_; name=nothing, shape=nothing, reshape=nothing, sa_name=nothing, id=nothing, N=nothing) + if tf.eager_mode + _scoped_allocator_concat_eager(backing_, inputs_; name=name, shape=shape, reshape=reshape, sa_name=sa_name, id=id, N=N) + else + _scoped_allocator_concat_graph(backing_, inputs_; name=name, shape=shape, reshape=reshape, sa_name=sa_name, id=id, N=N) + end + end end @@ -25514,7 +29966,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function fake_quant_with_min_max_args_gradient(gradients_, inputs_; name=nothing, min=nothing, max=nothing, num_bits=nothing, narrow_range=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function fake_quant_with_min_max_args_gradient_graph(gradients_, inputs_; name=nothing, min=nothing, max=nothing, num_bits=nothing, narrow_range=nothing) local desc tf.with_op_name(name, "FakeQuantWithMinMaxArgsGradient") do desc = tf.NodeDescription("FakeQuantWithMinMaxArgsGradient") @@ -25537,7 +29989,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function fake_quant_with_min_max_args_gradient(gradients_::tf.TensorHandle, inputs_::tf.TensorHandle; name=nothing, min=nothing, max=nothing, num_bits=nothing, narrow_range=nothing) + function fake_quant_with_min_max_args_gradient_eager(gradients_, inputs_; name=nothing, min=nothing, max=nothing, num_bits=nothing, narrow_range=nothing) desc = tf.EagerOp("FakeQuantWithMinMaxArgsGradient") tf.add_input(desc, gradients_) tf.add_input(desc, inputs_) @@ -25555,6 +30007,13 @@ begin end (tf.execute(desc))[1] end + function fake_quant_with_min_max_args_gradient(gradients_, inputs_; name=nothing, min=nothing, max=nothing, num_bits=nothing, narrow_range=nothing) + if tf.eager_mode + fake_quant_with_min_max_args_gradient_eager(gradients_, inputs_; name=name, min=min, max=max, num_bits=num_bits, narrow_range=narrow_range) + else + fake_quant_with_min_max_args_gradient_graph(gradients_, inputs_; name=name, min=min, max=max, num_bits=num_bits, narrow_range=narrow_range) + end + end end @@ -25564,7 +30023,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function batch_svd(input_; name=nothing, compute_uv=nothing, full_matrices=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function batch_svd_graph(input_; name=nothing, compute_uv=nothing, full_matrices=nothing) local desc tf.with_op_name(name, "BatchSvd") do desc = tf.NodeDescription("BatchSvd") @@ -25585,7 +30044,7 @@ begin end out end - function batch_svd(input_::tf.TensorHandle; name=nothing, compute_uv=nothing, full_matrices=nothing) + function batch_svd_eager(input_; name=nothing, compute_uv=nothing, full_matrices=nothing) desc = tf.EagerOp("BatchSvd") tf.add_input(desc, input_) if compute_uv !== nothing @@ -25597,6 +30056,13 @@ begin desc["T"] = tf.data_type(input_) tf.execute(desc) end + function batch_svd(input_; name=nothing, compute_uv=nothing, full_matrices=nothing) + if tf.eager_mode + batch_svd_eager(input_; name=name, compute_uv=compute_uv, full_matrices=full_matrices) + else + batch_svd_graph(input_; name=name, compute_uv=compute_uv, full_matrices=full_matrices) + end + end end @@ -25606,7 +30072,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function map_stage(key_, indices_, values_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, fake_dtypes=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function map_stage_graph(key_, indices_, values_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, fake_dtypes=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "MapStage") do desc = tf.NodeDescription("MapStage") @@ -25637,7 +30103,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function map_stage(key_::tf.TensorHandle, indices_::tf.TensorHandle, values_::tf.TensorHandle; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, fake_dtypes=nothing, container=nothing, shared_name=nothing) + function map_stage_eager(key_, indices_, values_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, fake_dtypes=nothing, container=nothing, shared_name=nothing) desc = tf.EagerOp("MapStage") tf.add_input(desc, key_) tf.add_input(desc, indices_) @@ -25662,6 +30128,13 @@ begin end (tf.execute(desc))[1] end + function map_stage(key_, indices_, values_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, fake_dtypes=nothing, container=nothing, shared_name=nothing) + if tf.eager_mode + map_stage_eager(key_, indices_, values_; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, fake_dtypes=fake_dtypes, container=container, shared_name=shared_name) + else + map_stage_graph(key_, indices_, values_; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, fake_dtypes=fake_dtypes, container=container, shared_name=shared_name) + end + end end @@ -25671,7 +30144,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function resource_sparse_apply_ftrl(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, lr_power_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function resource_sparse_apply_ftrl_graph(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, lr_power_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ResourceSparseApplyFtrl") do desc = tf.NodeDescription("ResourceSparseApplyFtrl") @@ -25702,7 +30175,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function resource_sparse_apply_ftrl(var_::tf.TensorHandle, accum_::tf.TensorHandle, linear_::tf.TensorHandle, grad_::tf.TensorHandle, indices_::tf.TensorHandle, lr_::tf.TensorHandle, l1_::tf.TensorHandle, l2_::tf.TensorHandle, lr_power_::tf.TensorHandle; name=nothing, use_locking=nothing) + function resource_sparse_apply_ftrl_eager(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, lr_power_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ResourceSparseApplyFtrl") tf.add_input(desc, var_) tf.add_input(desc, accum_) @@ -25724,6 +30197,13 @@ begin desc["T"] = tf.data_type(lr_power_) (tf.execute(desc))[1] end + function resource_sparse_apply_ftrl(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, lr_power_; name=nothing, use_locking=nothing) + if tf.eager_mode + resource_sparse_apply_ftrl_eager(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, lr_power_; name=name, use_locking=use_locking) + else + resource_sparse_apply_ftrl_graph(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, lr_power_; name=name, use_locking=use_locking) + end + end end @@ -25733,7 +30213,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function resize_nearest_neighbor(images_, size_; name=nothing, align_corners=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function resize_nearest_neighbor_graph(images_, size_; name=nothing, align_corners=nothing) local desc tf.with_op_name(name, "ResizeNearestNeighbor") do desc = tf.NodeDescription("ResizeNearestNeighbor") @@ -25748,7 +30228,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function resize_nearest_neighbor(images_::tf.TensorHandle, size_::tf.TensorHandle; name=nothing, align_corners=nothing) + function resize_nearest_neighbor_eager(images_, size_; name=nothing, align_corners=nothing) desc = tf.EagerOp("ResizeNearestNeighbor") tf.add_input(desc, images_) tf.add_input(desc, size_) @@ -25758,6 +30238,13 @@ begin desc["T"] = tf.data_type(images_) (tf.execute(desc))[1] end + function resize_nearest_neighbor(images_, size_; name=nothing, align_corners=nothing) + if tf.eager_mode + resize_nearest_neighbor_eager(images_, size_; name=name, align_corners=align_corners) + else + resize_nearest_neighbor_graph(images_, size_; name=name, align_corners=align_corners) + end + end end @@ -25767,7 +30254,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function experimental_csv_dataset(filenames_, compression_type_, buffer_size_, header_, field_delim_, use_quote_delim_, na_value_, select_cols_, record_defaults_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function experimental_csv_dataset_graph(filenames_, compression_type_, buffer_size_, header_, field_delim_, use_quote_delim_, na_value_, select_cols_, record_defaults_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "ExperimentalCSVDataset") do desc = tf.NodeDescription("ExperimentalCSVDataset") @@ -25798,7 +30285,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function experimental_csv_dataset(filenames_::tf.TensorHandle, compression_type_::tf.TensorHandle, buffer_size_::tf.TensorHandle, header_::tf.TensorHandle, field_delim_::tf.TensorHandle, use_quote_delim_::tf.TensorHandle, na_value_::tf.TensorHandle, select_cols_::tf.TensorHandle, record_defaults_::tf.TensorHandle; name=nothing, output_types=nothing, output_shapes=nothing) + function experimental_csv_dataset_eager(filenames_, compression_type_, buffer_size_, header_, field_delim_, use_quote_delim_, na_value_, select_cols_, record_defaults_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("ExperimentalCSVDataset") tf.add_input(desc, filenames_) tf.add_input(desc, compression_type_) @@ -25817,6 +30304,13 @@ begin end (tf.execute(desc))[1] end + function experimental_csv_dataset(filenames_, compression_type_, buffer_size_, header_, field_delim_, use_quote_delim_, na_value_, select_cols_, record_defaults_; name=nothing, output_types=nothing, output_shapes=nothing) + if tf.eager_mode + experimental_csv_dataset_eager(filenames_, compression_type_, buffer_size_, header_, field_delim_, use_quote_delim_, na_value_, select_cols_, record_defaults_; name=name, output_types=output_types, output_shapes=output_shapes) + else + experimental_csv_dataset_graph(filenames_, compression_type_, buffer_size_, header_, field_delim_, use_quote_delim_, na_value_, select_cols_, record_defaults_; name=name, output_types=output_types, output_shapes=output_shapes) + end + end end @@ -25826,7 +30320,7 @@ end Returns x * y element-wise. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function _mkl_mul(x_, y_, mkl_x_, mkl_y_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function _mkl_mul_graph(x_, y_, mkl_x_, mkl_y_; name=nothing) local desc tf.with_op_name(name, "_MklMul") do desc = tf.NodeDescription("_MklMul") @@ -25847,7 +30341,7 @@ begin end out end - function _mkl_mul(x_::tf.TensorHandle, y_::tf.TensorHandle, mkl_x_::tf.TensorHandle, mkl_y_::tf.TensorHandle; name=nothing) + function _mkl_mul_eager(x_, y_, mkl_x_, mkl_y_; name=nothing) desc = tf.EagerOp("_MklMul") tf.add_input(desc, x_) tf.add_input(desc, y_) @@ -25857,6 +30351,13 @@ begin desc["T"] = tf.data_type(y_) tf.execute(desc) end + function _mkl_mul(x_, y_, mkl_x_, mkl_y_; name=nothing) + if tf.eager_mode + _mkl_mul_eager(x_, y_, mkl_x_, mkl_y_; name=name) + else + _mkl_mul_graph(x_, y_, mkl_x_, mkl_y_; name=name) + end + end end @@ -25866,7 +30367,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function batch_matrix_diag(diagonal_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function batch_matrix_diag_graph(diagonal_; name=nothing) local desc tf.with_op_name(name, "BatchMatrixDiag") do desc = tf.NodeDescription("BatchMatrixDiag") @@ -25876,12 +30377,19 @@ begin end tf.Tensor(tf.Operation(desc)) end - function batch_matrix_diag(diagonal_::tf.TensorHandle; name=nothing) + function batch_matrix_diag_eager(diagonal_; name=nothing) desc = tf.EagerOp("BatchMatrixDiag") tf.add_input(desc, diagonal_) desc["T"] = tf.data_type(diagonal_) (tf.execute(desc))[1] end + function batch_matrix_diag(diagonal_; name=nothing) + if tf.eager_mode + batch_matrix_diag_eager(diagonal_; name=name) + else + batch_matrix_diag_graph(diagonal_; name=name) + end + end end @@ -25891,7 +30399,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function is_inf(x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function is_inf_graph(x_; name=nothing) local desc tf.with_op_name(name, "IsInf") do desc = tf.NodeDescription("IsInf") @@ -25901,12 +30409,19 @@ begin end tf.Tensor(tf.Operation(desc)) end - function is_inf(x_::tf.TensorHandle; name=nothing) + function is_inf_eager(x_; name=nothing) desc = tf.EagerOp("IsInf") tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) (tf.execute(desc))[1] end + function is_inf(x_; name=nothing) + if tf.eager_mode + is_inf_eager(x_; name=name) + else + is_inf_graph(x_; name=name) + end + end end @@ -25916,7 +30431,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function fixed_unigram_candidate_sampler(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, vocab_file=nothing, distortion=nothing, num_reserved_ids=nothing, num_shards=nothing, shard=nothing, unigrams=nothing, seed=nothing, seed2=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function fixed_unigram_candidate_sampler_graph(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, vocab_file=nothing, distortion=nothing, num_reserved_ids=nothing, num_shards=nothing, shard=nothing, unigrams=nothing, seed=nothing, seed2=nothing) local desc tf.with_op_name(name, "FixedUnigramCandidateSampler") do desc = tf.NodeDescription("FixedUnigramCandidateSampler") @@ -25966,7 +30481,7 @@ begin end out end - function fixed_unigram_candidate_sampler(true_classes_::tf.TensorHandle; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, vocab_file=nothing, distortion=nothing, num_reserved_ids=nothing, num_shards=nothing, shard=nothing, unigrams=nothing, seed=nothing, seed2=nothing) + function fixed_unigram_candidate_sampler_eager(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, vocab_file=nothing, distortion=nothing, num_reserved_ids=nothing, num_shards=nothing, shard=nothing, unigrams=nothing, seed=nothing, seed2=nothing) desc = tf.EagerOp("FixedUnigramCandidateSampler") tf.add_input(desc, true_classes_) if num_true !== nothing @@ -26007,6 +30522,13 @@ begin end tf.execute(desc) end + function fixed_unigram_candidate_sampler(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, vocab_file=nothing, distortion=nothing, num_reserved_ids=nothing, num_shards=nothing, shard=nothing, unigrams=nothing, seed=nothing, seed2=nothing) + if tf.eager_mode + fixed_unigram_candidate_sampler_eager(true_classes_; name=name, num_true=num_true, num_sampled=num_sampled, unique=unique, range_max=range_max, vocab_file=vocab_file, distortion=distortion, num_reserved_ids=num_reserved_ids, num_shards=num_shards, shard=shard, unigrams=unigrams, seed=seed, seed2=seed2) + else + fixed_unigram_candidate_sampler_graph(true_classes_; name=name, num_true=num_true, num_sampled=num_sampled, unique=unique, range_max=range_max, vocab_file=vocab_file, distortion=distortion, num_reserved_ids=num_reserved_ids, num_shards=num_shards, shard=shard, unigrams=unigrams, seed=seed, seed2=seed2) + end + end end @@ -26016,7 +30538,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function sparse_apply_ftrl_v2(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function sparse_apply_ftrl_v2_graph(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "SparseApplyFtrlV2") do desc = tf.NodeDescription("SparseApplyFtrlV2") @@ -26049,7 +30571,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function sparse_apply_ftrl_v2(var_::tf.TensorHandle, accum_::tf.TensorHandle, linear_::tf.TensorHandle, grad_::tf.TensorHandle, indices_::tf.TensorHandle, lr_::tf.TensorHandle, l1_::tf.TensorHandle, l2_::tf.TensorHandle, l2_shrinkage_::tf.TensorHandle, lr_power_::tf.TensorHandle; name=nothing, use_locking=nothing) + function sparse_apply_ftrl_v2_eager(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=nothing, use_locking=nothing) desc = tf.EagerOp("SparseApplyFtrlV2") tf.add_input(desc, var_) tf.add_input(desc, accum_) @@ -26076,6 +30598,13 @@ begin desc["T"] = tf.data_type(lr_power_) (tf.execute(desc))[1] end + function sparse_apply_ftrl_v2(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=nothing, use_locking=nothing) + if tf.eager_mode + sparse_apply_ftrl_v2_eager(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=name, use_locking=use_locking) + else + sparse_apply_ftrl_v2_graph(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=name, use_locking=use_locking) + end + end end @@ -26085,7 +30614,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function unravel_index(indices_, dims_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function unravel_index_graph(indices_, dims_; name=nothing) local desc tf.with_op_name(name, "UnravelIndex") do desc = tf.NodeDescription("UnravelIndex") @@ -26099,7 +30628,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function unravel_index(indices_::tf.TensorHandle, dims_::tf.TensorHandle; name=nothing) + function unravel_index_eager(indices_, dims_; name=nothing) desc = tf.EagerOp("UnravelIndex") tf.add_input(desc, indices_) tf.add_input(desc, dims_) @@ -26107,6 +30636,13 @@ begin desc["Tidx"] = tf.data_type(dims_) (tf.execute(desc))[1] end + function unravel_index(indices_, dims_; name=nothing) + if tf.eager_mode + unravel_index_eager(indices_, dims_; name=name) + else + unravel_index_graph(indices_, dims_; name=name) + end + end end @@ -26116,7 +30652,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function max(input_, reduction_indices_; name=nothing, keep_dims=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function max_graph(input_, reduction_indices_; name=nothing, keep_dims=nothing) local desc tf.with_op_name(name, "Max") do desc = tf.NodeDescription("Max") @@ -26133,7 +30669,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function max(input_::tf.TensorHandle, reduction_indices_::tf.TensorHandle; name=nothing, keep_dims=nothing) + function max_eager(input_, reduction_indices_; name=nothing, keep_dims=nothing) desc = tf.EagerOp("Max") tf.add_input(desc, input_) tf.add_input(desc, reduction_indices_) @@ -26144,6 +30680,13 @@ begin desc["Tidx"] = tf.data_type(reduction_indices_) (tf.execute(desc))[1] end + function max(input_, reduction_indices_; name=nothing, keep_dims=nothing) + if tf.eager_mode + max_eager(input_, reduction_indices_; name=name, keep_dims=keep_dims) + else + max_graph(input_, reduction_indices_; name=name, keep_dims=keep_dims) + end + end end @@ -26153,7 +30696,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function ifft2d(input_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function ifft2d_graph(input_; name=nothing) local desc tf.with_op_name(name, "IFFT2D") do desc = tf.NodeDescription("IFFT2D") @@ -26163,12 +30706,19 @@ begin end tf.Tensor(tf.Operation(desc)) end - function ifft2d(input_::tf.TensorHandle; name=nothing) + function ifft2d_eager(input_; name=nothing) desc = tf.EagerOp("IFFT2D") tf.add_input(desc, input_) desc["Tcomplex"] = tf.data_type(input_) (tf.execute(desc))[1] end + function ifft2d(input_; name=nothing) + if tf.eager_mode + ifft2d_eager(input_; name=name) + else + ifft2d_graph(input_; name=name) + end + end end @@ -26178,7 +30728,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function sparse_concat(indices_, values_, shapes_; name=nothing, concat_dim=nothing, N=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function sparse_concat_graph(indices_, values_, shapes_; name=nothing, concat_dim=nothing, N=nothing) local desc tf.with_op_name(name, "SparseConcat") do desc = tf.NodeDescription("SparseConcat") @@ -26206,7 +30756,7 @@ begin end out end - function sparse_concat(indices_::tf.TensorHandle, values_::tf.TensorHandle, shapes_::tf.TensorHandle; name=nothing, concat_dim=nothing, N=nothing) + function sparse_concat_eager(indices_, values_, shapes_; name=nothing, concat_dim=nothing, N=nothing) desc = tf.EagerOp("SparseConcat") tf.add_input(desc, indices_) tf.add_input(desc, values_) @@ -26223,6 +30773,13 @@ begin desc["T"] = tf.data_type(values_) tf.execute(desc) end + function sparse_concat(indices_, values_, shapes_; name=nothing, concat_dim=nothing, N=nothing) + if tf.eager_mode + sparse_concat_eager(indices_, values_, shapes_; name=name, concat_dim=concat_dim, N=N) + else + sparse_concat_graph(indices_, values_, shapes_; name=name, concat_dim=concat_dim, N=N) + end + end end @@ -26232,7 +30789,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function histogram_summary(tag_, values_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function histogram_summary_graph(tag_, values_; name=nothing) local desc tf.with_op_name(name, "HistogramSummary") do desc = tf.NodeDescription("HistogramSummary") @@ -26244,13 +30801,20 @@ begin end tf.Tensor(tf.Operation(desc)) end - function histogram_summary(tag_::tf.TensorHandle, values_::tf.TensorHandle; name=nothing) + function histogram_summary_eager(tag_, values_; name=nothing) desc = tf.EagerOp("HistogramSummary") tf.add_input(desc, tag_) tf.add_input(desc, values_) desc["T"] = tf.data_type(values_) (tf.execute(desc))[1] end + function histogram_summary(tag_, values_; name=nothing) + if tf.eager_mode + histogram_summary_eager(tag_, values_; name=name) + else + histogram_summary_graph(tag_, values_; name=name) + end + end end @@ -26260,7 +30824,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function segment_sum(data_, segment_ids_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function segment_sum_graph(data_, segment_ids_; name=nothing) local desc tf.with_op_name(name, "SegmentSum") do desc = tf.NodeDescription("SegmentSum") @@ -26274,7 +30838,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function segment_sum(data_::tf.TensorHandle, segment_ids_::tf.TensorHandle; name=nothing) + function segment_sum_eager(data_, segment_ids_; name=nothing) desc = tf.EagerOp("SegmentSum") tf.add_input(desc, data_) tf.add_input(desc, segment_ids_) @@ -26282,6 +30846,13 @@ begin desc["Tindices"] = tf.data_type(segment_ids_) (tf.execute(desc))[1] end + function segment_sum(data_, segment_ids_; name=nothing) + if tf.eager_mode + segment_sum_eager(data_, segment_ids_; name=name) + else + segment_sum_graph(data_, segment_ids_; name=name) + end + end end @@ -26291,7 +30862,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function exp(x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function exp_graph(x_; name=nothing) local desc tf.with_op_name(name, "Exp") do desc = tf.NodeDescription("Exp") @@ -26301,12 +30872,19 @@ begin end tf.Tensor(tf.Operation(desc)) end - function exp(x_::tf.TensorHandle; name=nothing) + function exp_eager(x_; name=nothing) desc = tf.EagerOp("Exp") tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) (tf.execute(desc))[1] end + function exp(x_; name=nothing) + if tf.eager_mode + exp_eager(x_; name=name) + else + exp_graph(x_; name=name) + end + end end @@ -26316,7 +30894,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function configure_distributed_tpu(; name=nothing, embedding_config=nothing, tpu_embedding_config=nothing, is_global_init=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function configure_distributed_tpu_graph(; name=nothing, embedding_config=nothing, tpu_embedding_config=nothing, is_global_init=nothing) local desc tf.with_op_name(name, "ConfigureDistributedTPU") do desc = tf.NodeDescription("ConfigureDistributedTPU") @@ -26332,7 +30910,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function configure_distributed_tpu(; name=nothing, embedding_config=nothing, tpu_embedding_config=nothing, is_global_init=nothing) + function configure_distributed_tpu_eager(; name=nothing, embedding_config=nothing, tpu_embedding_config=nothing, is_global_init=nothing) desc = tf.EagerOp("ConfigureDistributedTPU") if embedding_config !== nothing desc["embedding_config"] = Base.String(embedding_config) @@ -26345,6 +30923,13 @@ begin end (tf.execute(desc))[1] end + function configure_distributed_tpu(; name=nothing, embedding_config=nothing, tpu_embedding_config=nothing, is_global_init=nothing) + if tf.eager_mode + configure_distributed_tpu_eager(; name=name, embedding_config=embedding_config, tpu_embedding_config=tpu_embedding_config, is_global_init=is_global_init) + else + configure_distributed_tpu_graph(; name=name, embedding_config=embedding_config, tpu_embedding_config=tpu_embedding_config, is_global_init=is_global_init) + end + end end @@ -26354,7 +30939,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function resource_scatter_nd_sub(ref_, indices_, updates_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function resource_scatter_nd_sub_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ResourceScatterNdSub") do desc = tf.NodeDescription("ResourceScatterNdSub") @@ -26373,7 +30958,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function resource_scatter_nd_sub(ref_::tf.TensorHandle, indices_::tf.TensorHandle, updates_::tf.TensorHandle; name=nothing, use_locking=nothing) + function resource_scatter_nd_sub_eager(ref_, indices_, updates_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ResourceScatterNdSub") tf.add_input(desc, ref_) tf.add_input(desc, indices_) @@ -26385,6 +30970,13 @@ begin desc["T"] = tf.data_type(updates_) (tf.execute(desc))[1] end + function resource_scatter_nd_sub(ref_, indices_, updates_; name=nothing, use_locking=nothing) + if tf.eager_mode + resource_scatter_nd_sub_eager(ref_, indices_, updates_; name=name, use_locking=use_locking) + else + resource_scatter_nd_sub_graph(ref_, indices_, updates_; name=name, use_locking=use_locking) + end + end end @@ -26394,7 +30986,7 @@ end A placeholder op for multiple values that will be sent from TensorFlow to a """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function _xla_send_from_host(inputs_, dynamic_key_; name=nothing, Tinputs=nothing, key=nothing, device_ordinal=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function _xla_send_from_host_graph(inputs_, dynamic_key_; name=nothing, Tinputs=nothing, key=nothing, device_ordinal=nothing) local desc tf.with_op_name(name, "_XlaSendFromHost") do desc = tf.NodeDescription("_XlaSendFromHost") @@ -26414,7 +31006,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function _xla_send_from_host(inputs_::tf.TensorHandle, dynamic_key_::tf.TensorHandle; name=nothing, Tinputs=nothing, key=nothing, device_ordinal=nothing) + function _xla_send_from_host_eager(inputs_, dynamic_key_; name=nothing, Tinputs=nothing, key=nothing, device_ordinal=nothing) desc = tf.EagerOp("_XlaSendFromHost") tf.add_input(desc, inputs_) tf.add_input(desc, dynamic_key_) @@ -26429,6 +31021,13 @@ begin end (tf.execute(desc))[1] end + function _xla_send_from_host(inputs_, dynamic_key_; name=nothing, Tinputs=nothing, key=nothing, device_ordinal=nothing) + if tf.eager_mode + _xla_send_from_host_eager(inputs_, dynamic_key_; name=name, Tinputs=Tinputs, key=key, device_ordinal=device_ordinal) + else + _xla_send_from_host_graph(inputs_, dynamic_key_; name=name, Tinputs=Tinputs, key=key, device_ordinal=device_ordinal) + end + end end @@ -26438,7 +31037,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function get_session_handle_v2(value_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function get_session_handle_v2_graph(value_; name=nothing) local desc tf.with_op_name(name, "GetSessionHandleV2") do desc = tf.NodeDescription("GetSessionHandleV2") @@ -26448,12 +31047,19 @@ begin end tf.Tensor(tf.Operation(desc)) end - function get_session_handle_v2(value_::tf.TensorHandle; name=nothing) + function get_session_handle_v2_eager(value_; name=nothing) desc = tf.EagerOp("GetSessionHandleV2") tf.add_input(desc, value_) desc["T"] = tf.data_type(value_) (tf.execute(desc))[1] end + function get_session_handle_v2(value_; name=nothing) + if tf.eager_mode + get_session_handle_v2_eager(value_; name=name) + else + get_session_handle_v2_graph(value_; name=name) + end + end end @@ -26463,7 +31069,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function relu_grad(gradients_, features_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function relu_grad_graph(gradients_, features_; name=nothing) local desc tf.with_op_name(name, "ReluGrad") do desc = tf.NodeDescription("ReluGrad") @@ -26475,7 +31081,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function relu_grad(gradients_::tf.TensorHandle, features_::tf.TensorHandle; name=nothing) + function relu_grad_eager(gradients_, features_; name=nothing) desc = tf.EagerOp("ReluGrad") tf.add_input(desc, gradients_) tf.add_input(desc, features_) @@ -26483,6 +31089,13 @@ begin desc["T"] = tf.data_type(features_) (tf.execute(desc))[1] end + function relu_grad(gradients_, features_; name=nothing) + if tf.eager_mode + relu_grad_eager(gradients_, features_; name=name) + else + relu_grad_graph(gradients_, features_; name=name) + end + end end @@ -26492,7 +31105,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function unsorted_segment_min(data_, segment_ids_, num_segments_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function unsorted_segment_min_graph(data_, segment_ids_, num_segments_; name=nothing) local desc tf.with_op_name(name, "UnsortedSegmentMin") do desc = tf.NodeDescription("UnsortedSegmentMin") @@ -26509,7 +31122,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function unsorted_segment_min(data_::tf.TensorHandle, segment_ids_::tf.TensorHandle, num_segments_::tf.TensorHandle; name=nothing) + function unsorted_segment_min_eager(data_, segment_ids_, num_segments_; name=nothing) desc = tf.EagerOp("UnsortedSegmentMin") tf.add_input(desc, data_) tf.add_input(desc, segment_ids_) @@ -26519,6 +31132,13 @@ begin desc["Tnumsegments"] = tf.data_type(num_segments_) (tf.execute(desc))[1] end + function unsorted_segment_min(data_, segment_ids_, num_segments_; name=nothing) + if tf.eager_mode + unsorted_segment_min_eager(data_, segment_ids_, num_segments_; name=name) + else + unsorted_segment_min_graph(data_, segment_ids_, num_segments_; name=name) + end + end end @@ -26528,7 +31148,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function parse_example(serialized_, names_, sparse_keys_, dense_keys_, dense_defaults_; name=nothing, Nsparse=nothing, Ndense=nothing, sparse_types=nothing, Tdense=nothing, dense_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function parse_example_graph(serialized_, names_, sparse_keys_, dense_keys_, dense_defaults_; name=nothing, Nsparse=nothing, Ndense=nothing, sparse_types=nothing, Tdense=nothing, dense_shapes=nothing) local desc tf.with_op_name(name, "ParseExample") do desc = tf.NodeDescription("ParseExample") @@ -26565,7 +31185,7 @@ begin end out end - function parse_example(serialized_::tf.TensorHandle, names_::tf.TensorHandle, sparse_keys_::tf.TensorHandle, dense_keys_::tf.TensorHandle, dense_defaults_::tf.TensorHandle; name=nothing, Nsparse=nothing, Ndense=nothing, sparse_types=nothing, Tdense=nothing, dense_shapes=nothing) + function parse_example_eager(serialized_, names_, sparse_keys_, dense_keys_, dense_defaults_; name=nothing, Nsparse=nothing, Ndense=nothing, sparse_types=nothing, Tdense=nothing, dense_shapes=nothing) desc = tf.EagerOp("ParseExample") tf.add_input(desc, serialized_) tf.add_input(desc, names_) @@ -26589,6 +31209,13 @@ begin end tf.execute(desc) end + function parse_example(serialized_, names_, sparse_keys_, dense_keys_, dense_defaults_; name=nothing, Nsparse=nothing, Ndense=nothing, sparse_types=nothing, Tdense=nothing, dense_shapes=nothing) + if tf.eager_mode + parse_example_eager(serialized_, names_, sparse_keys_, dense_keys_, dense_defaults_; name=name, Nsparse=Nsparse, Ndense=Ndense, sparse_types=sparse_types, Tdense=Tdense, dense_shapes=dense_shapes) + else + parse_example_graph(serialized_, names_, sparse_keys_, dense_keys_, dense_defaults_; name=name, Nsparse=Nsparse, Ndense=Ndense, sparse_types=sparse_types, Tdense=Tdense, dense_shapes=dense_shapes) + end + end end @@ -26598,7 +31225,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function queue_enqueue_v2(handle_, components_; name=nothing, Tcomponents=nothing, timeout_ms=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function queue_enqueue_v2_graph(handle_, components_; name=nothing, Tcomponents=nothing, timeout_ms=nothing) local desc tf.with_op_name(name, "QueueEnqueueV2") do desc = tf.NodeDescription("QueueEnqueueV2") @@ -26615,7 +31242,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function queue_enqueue_v2(handle_::tf.TensorHandle, components_::tf.TensorHandle; name=nothing, Tcomponents=nothing, timeout_ms=nothing) + function queue_enqueue_v2_eager(handle_, components_; name=nothing, Tcomponents=nothing, timeout_ms=nothing) desc = tf.EagerOp("QueueEnqueueV2") tf.add_input(desc, handle_) tf.add_input(desc, components_) @@ -26627,6 +31254,13 @@ begin end (tf.execute(desc))[1] end + function queue_enqueue_v2(handle_, components_; name=nothing, Tcomponents=nothing, timeout_ms=nothing) + if tf.eager_mode + queue_enqueue_v2_eager(handle_, components_; name=name, Tcomponents=Tcomponents, timeout_ms=timeout_ms) + else + queue_enqueue_v2_graph(handle_, components_; name=name, Tcomponents=Tcomponents, timeout_ms=timeout_ms) + end + end end @@ -26636,7 +31270,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function scatter_nd_add(ref_, indices_, updates_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function scatter_nd_add_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ScatterNdAdd") do desc = tf.NodeDescription("ScatterNdAdd") @@ -26655,7 +31289,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function scatter_nd_add(ref_::tf.TensorHandle, indices_::tf.TensorHandle, updates_::tf.TensorHandle; name=nothing, use_locking=nothing) + function scatter_nd_add_eager(ref_, indices_, updates_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ScatterNdAdd") tf.add_input(desc, ref_) tf.add_input(desc, indices_) @@ -26668,6 +31302,13 @@ begin desc["T"] = tf.data_type(updates_) (tf.execute(desc))[1] end + function scatter_nd_add(ref_, indices_, updates_; name=nothing, use_locking=nothing) + if tf.eager_mode + scatter_nd_add_eager(ref_, indices_, updates_; name=name, use_locking=use_locking) + else + scatter_nd_add_graph(ref_, indices_, updates_; name=name, use_locking=use_locking) + end + end end @@ -26677,7 +31318,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function reader_num_records_produced_v2(reader_handle_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function reader_num_records_produced_v2_graph(reader_handle_; name=nothing) local desc tf.with_op_name(name, "ReaderNumRecordsProducedV2") do desc = tf.NodeDescription("ReaderNumRecordsProducedV2") @@ -26686,11 +31327,18 @@ begin end tf.Tensor(tf.Operation(desc)) end - function reader_num_records_produced_v2(reader_handle_::tf.TensorHandle; name=nothing) + function reader_num_records_produced_v2_eager(reader_handle_; name=nothing) desc = tf.EagerOp("ReaderNumRecordsProducedV2") tf.add_input(desc, reader_handle_) (tf.execute(desc))[1] end + function reader_num_records_produced_v2(reader_handle_; name=nothing) + if tf.eager_mode + reader_num_records_produced_v2_eager(reader_handle_; name=name) + else + reader_num_records_produced_v2_graph(reader_handle_; name=name) + end + end end @@ -26700,7 +31348,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function load_tpu_embedding_centered_rms_prop_parameters(parameters_, ms_, mom_, mg_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function load_tpu_embedding_centered_rms_prop_parameters_graph(parameters_, ms_, mom_, mg_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "LoadTPUEmbeddingCenteredRMSPropParameters") do desc = tf.NodeDescription("LoadTPUEmbeddingCenteredRMSPropParameters") @@ -26727,7 +31375,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function load_tpu_embedding_centered_rms_prop_parameters(parameters_::tf.TensorHandle, ms_::tf.TensorHandle, mom_::tf.TensorHandle, mg_::tf.TensorHandle; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + function load_tpu_embedding_centered_rms_prop_parameters_eager(parameters_, ms_, mom_, mg_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) desc = tf.EagerOp("LoadTPUEmbeddingCenteredRMSPropParameters") tf.add_input(desc, parameters_) tf.add_input(desc, ms_) @@ -26747,6 +31395,13 @@ begin end (tf.execute(desc))[1] end + function load_tpu_embedding_centered_rms_prop_parameters(parameters_, ms_, mom_, mg_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + if tf.eager_mode + load_tpu_embedding_centered_rms_prop_parameters_eager(parameters_, ms_, mom_, mg_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + else + load_tpu_embedding_centered_rms_prop_parameters_graph(parameters_, ms_, mom_, mg_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + end + end end @@ -26756,7 +31411,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function assign_sub(ref_, value_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function assign_sub_graph(ref_, value_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "AssignSub") do desc = tf.NodeDescription("AssignSub") @@ -26771,7 +31426,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function assign_sub(ref_::tf.TensorHandle, value_::tf.TensorHandle; name=nothing, use_locking=nothing) + function assign_sub_eager(ref_, value_; name=nothing, use_locking=nothing) desc = tf.EagerOp("AssignSub") tf.add_input(desc, ref_) tf.add_input(desc, value_) @@ -26782,6 +31437,13 @@ begin desc["T"] = tf.data_type(value_) (tf.execute(desc))[1] end + function assign_sub(ref_, value_; name=nothing, use_locking=nothing) + if tf.eager_mode + assign_sub_eager(ref_, value_; name=name, use_locking=use_locking) + else + assign_sub_graph(ref_, value_; name=name, use_locking=use_locking) + end + end end @@ -26791,7 +31453,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function unsorted_segment_sum(data_, segment_ids_, num_segments_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function unsorted_segment_sum_graph(data_, segment_ids_, num_segments_; name=nothing) local desc tf.with_op_name(name, "UnsortedSegmentSum") do desc = tf.NodeDescription("UnsortedSegmentSum") @@ -26808,7 +31470,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function unsorted_segment_sum(data_::tf.TensorHandle, segment_ids_::tf.TensorHandle, num_segments_::tf.TensorHandle; name=nothing) + function unsorted_segment_sum_eager(data_, segment_ids_, num_segments_; name=nothing) desc = tf.EagerOp("UnsortedSegmentSum") tf.add_input(desc, data_) tf.add_input(desc, segment_ids_) @@ -26818,16 +31480,23 @@ begin desc["Tnumsegments"] = tf.data_type(num_segments_) (tf.execute(desc))[1] end + function unsorted_segment_sum(data_, segment_ids_, num_segments_; name=nothing) + if tf.eager_mode + unsorted_segment_sum_eager(data_, segment_ids_, num_segments_; name=name) + else + unsorted_segment_sum_graph(data_, segment_ids_, num_segments_; name=name) + end + end end """ - fused_batch_norm_grad(y_backprop, x, scale, reserve_space_1, reserve_space_2; epsilon=?, data_format=NHWC, is_training=true) + fused_batch_norm_grad(y_backprop, x, scale, reserve_space_1, reserve_space_2; epsilon=?, data_format=, is_training=true) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function fused_batch_norm_grad(y_backprop_, x_, scale_, reserve_space_1_, reserve_space_2_; name=nothing, epsilon=nothing, data_format=nothing, is_training=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function fused_batch_norm_grad_graph(y_backprop_, x_, scale_, reserve_space_1_, reserve_space_2_; name=nothing, epsilon=nothing, data_format=nothing, is_training=nothing) local desc tf.with_op_name(name, "FusedBatchNormGrad") do desc = tf.NodeDescription("FusedBatchNormGrad") @@ -26859,7 +31528,7 @@ begin end out end - function fused_batch_norm_grad(y_backprop_::tf.TensorHandle, x_::tf.TensorHandle, scale_::tf.TensorHandle, reserve_space_1_::tf.TensorHandle, reserve_space_2_::tf.TensorHandle; name=nothing, epsilon=nothing, data_format=nothing, is_training=nothing) + function fused_batch_norm_grad_eager(y_backprop_, x_, scale_, reserve_space_1_, reserve_space_2_; name=nothing, epsilon=nothing, data_format=nothing, is_training=nothing) desc = tf.EagerOp("FusedBatchNormGrad") tf.add_input(desc, y_backprop_) tf.add_input(desc, x_) @@ -26882,16 +31551,23 @@ begin desc["T"] = tf.data_type(reserve_space_2_) tf.execute(desc) end + function fused_batch_norm_grad(y_backprop_, x_, scale_, reserve_space_1_, reserve_space_2_; name=nothing, epsilon=nothing, data_format=nothing, is_training=nothing) + if tf.eager_mode + fused_batch_norm_grad_eager(y_backprop_, x_, scale_, reserve_space_1_, reserve_space_2_; name=name, epsilon=epsilon, data_format=data_format, is_training=is_training) + else + fused_batch_norm_grad_graph(y_backprop_, x_, scale_, reserve_space_1_, reserve_space_2_; name=name, epsilon=epsilon, data_format=data_format, is_training=is_training) + end + end end """ - max_pool_grad_v2(orig_input, orig_output, grad, ksize, strides; data_format=NHWC) + max_pool_grad_v2(orig_input, orig_output, grad, ksize, strides; data_format=) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function max_pool_grad_v2(orig_input_, orig_output_, grad_, ksize_, strides_; name=nothing, padding=nothing, data_format=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function max_pool_grad_v2_graph(orig_input_, orig_output_, grad_, ksize_, strides_; name=nothing, padding=nothing, data_format=nothing) local desc tf.with_op_name(name, "MaxPoolGradV2") do desc = tf.NodeDescription("MaxPoolGradV2") @@ -26915,7 +31591,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function max_pool_grad_v2(orig_input_::tf.TensorHandle, orig_output_::tf.TensorHandle, grad_::tf.TensorHandle, ksize_::tf.TensorHandle, strides_::tf.TensorHandle; name=nothing, padding=nothing, data_format=nothing) + function max_pool_grad_v2_eager(orig_input_, orig_output_, grad_, ksize_, strides_; name=nothing, padding=nothing, data_format=nothing) desc = tf.EagerOp("MaxPoolGradV2") tf.add_input(desc, orig_input_) tf.add_input(desc, orig_output_) @@ -26933,6 +31609,13 @@ begin desc["T"] = tf.data_type(grad_) (tf.execute(desc))[1] end + function max_pool_grad_v2(orig_input_, orig_output_, grad_, ksize_, strides_; name=nothing, padding=nothing, data_format=nothing) + if tf.eager_mode + max_pool_grad_v2_eager(orig_input_, orig_output_, grad_, ksize_, strides_; name=name, padding=padding, data_format=data_format) + else + max_pool_grad_v2_graph(orig_input_, orig_output_, grad_, ksize_, strides_; name=name, padding=padding, data_format=data_format) + end + end end @@ -26942,7 +31625,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function quantized_conv2d_with_bias_and_relu(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function quantized_conv2d_with_bias_and_relu_graph(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) local desc tf.with_op_name(name, "QuantizedConv2DWithBiasAndRelu") do desc = tf.NodeDescription("QuantizedConv2DWithBiasAndRelu") @@ -26982,7 +31665,7 @@ begin end out end - function quantized_conv2d_with_bias_and_relu(input_::tf.TensorHandle, filter_::tf.TensorHandle, bias_::tf.TensorHandle, min_input_::tf.TensorHandle, max_input_::tf.TensorHandle, min_filter_::tf.TensorHandle, max_filter_::tf.TensorHandle; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) + function quantized_conv2d_with_bias_and_relu_eager(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) desc = tf.EagerOp("QuantizedConv2DWithBiasAndRelu") tf.add_input(desc, input_) tf.add_input(desc, filter_) @@ -27007,6 +31690,13 @@ begin desc["Tfilter"] = tf.data_type(filter_) tf.execute(desc) end + function quantized_conv2d_with_bias_and_relu(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) + if tf.eager_mode + quantized_conv2d_with_bias_and_relu_eager(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_; name=name, out_type=out_type, strides=strides, padding=padding, dilations=dilations) + else + quantized_conv2d_with_bias_and_relu_graph(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_; name=name, out_type=out_type, strides=strides, padding=padding, dilations=dilations) + end + end end @@ -27016,7 +31706,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function boosted_trees_create_ensemble(tree_ensemble_handle_, stamp_token_, tree_ensemble_serialized_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function boosted_trees_create_ensemble_graph(tree_ensemble_handle_, stamp_token_, tree_ensemble_serialized_; name=nothing) local desc tf.with_op_name(name, "BoostedTreesCreateEnsemble") do desc = tf.NodeDescription("BoostedTreesCreateEnsemble") @@ -27029,13 +31719,20 @@ begin end tf.Tensor(tf.Operation(desc)) end - function boosted_trees_create_ensemble(tree_ensemble_handle_::tf.TensorHandle, stamp_token_::tf.TensorHandle, tree_ensemble_serialized_::tf.TensorHandle; name=nothing) + function boosted_trees_create_ensemble_eager(tree_ensemble_handle_, stamp_token_, tree_ensemble_serialized_; name=nothing) desc = tf.EagerOp("BoostedTreesCreateEnsemble") tf.add_input(desc, tree_ensemble_handle_) tf.add_input(desc, stamp_token_) tf.add_input(desc, tree_ensemble_serialized_) (tf.execute(desc))[1] end + function boosted_trees_create_ensemble(tree_ensemble_handle_, stamp_token_, tree_ensemble_serialized_; name=nothing) + if tf.eager_mode + boosted_trees_create_ensemble_eager(tree_ensemble_handle_, stamp_token_, tree_ensemble_serialized_; name=name) + else + boosted_trees_create_ensemble_graph(tree_ensemble_handle_, stamp_token_, tree_ensemble_serialized_; name=name) + end + end end @@ -27045,7 +31742,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function ordered_map_incomplete_size(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function ordered_map_incomplete_size_graph(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "OrderedMapIncompleteSize") do desc = tf.NodeDescription("OrderedMapIncompleteSize") @@ -27067,7 +31764,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function ordered_map_incomplete_size(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + function ordered_map_incomplete_size_eager(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) desc = tf.EagerOp("OrderedMapIncompleteSize") if capacity !== nothing desc["capacity"] = Base.Int(capacity) @@ -27086,6 +31783,13 @@ begin end (tf.execute(desc))[1] end + function ordered_map_incomplete_size(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + if tf.eager_mode + ordered_map_incomplete_size_eager(; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) + else + ordered_map_incomplete_size_graph(; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) + end + end end @@ -27095,7 +31799,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function skipgram(; name=nothing, filename=nothing, batch_size=nothing, window_size=nothing, min_count=nothing, subsample=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function skipgram_graph(; name=nothing, filename=nothing, batch_size=nothing, window_size=nothing, min_count=nothing, subsample=nothing) local desc tf.with_op_name(name, "Skipgram") do desc = tf.NodeDescription("Skipgram") @@ -27122,7 +31826,7 @@ begin end out end - function skipgram(; name=nothing, filename=nothing, batch_size=nothing, window_size=nothing, min_count=nothing, subsample=nothing) + function skipgram_eager(; name=nothing, filename=nothing, batch_size=nothing, window_size=nothing, min_count=nothing, subsample=nothing) desc = tf.EagerOp("Skipgram") if filename !== nothing desc["filename"] = Base.String(filename) @@ -27141,6 +31845,13 @@ begin end tf.execute(desc) end + function skipgram(; name=nothing, filename=nothing, batch_size=nothing, window_size=nothing, min_count=nothing, subsample=nothing) + if tf.eager_mode + skipgram_eager(; name=name, filename=filename, batch_size=batch_size, window_size=window_size, min_count=min_count, subsample=subsample) + else + skipgram_graph(; name=name, filename=filename, batch_size=batch_size, window_size=window_size, min_count=min_count, subsample=subsample) + end + end end @@ -27150,7 +31861,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function arg_min(input_, dimension_; name=nothing, output_type=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function arg_min_graph(input_, dimension_; name=nothing, output_type=nothing) local desc tf.with_op_name(name, "ArgMin") do desc = tf.NodeDescription("ArgMin") @@ -27167,7 +31878,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function arg_min(input_::tf.TensorHandle, dimension_::tf.TensorHandle; name=nothing, output_type=nothing) + function arg_min_eager(input_, dimension_; name=nothing, output_type=nothing) desc = tf.EagerOp("ArgMin") tf.add_input(desc, input_) tf.add_input(desc, dimension_) @@ -27178,6 +31889,13 @@ begin desc["Tidx"] = tf.data_type(dimension_) (tf.execute(desc))[1] end + function arg_min(input_, dimension_; name=nothing, output_type=nothing) + if tf.eager_mode + arg_min_eager(input_, dimension_; name=name, output_type=output_type) + else + arg_min_graph(input_, dimension_; name=name, output_type=output_type) + end + end end @@ -27187,7 +31905,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function queue_dequeue_many(handle_, n_; name=nothing, component_types=nothing, timeout_ms=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function queue_dequeue_many_graph(handle_, n_; name=nothing, component_types=nothing, timeout_ms=nothing) local desc tf.with_op_name(name, "QueueDequeueMany") do desc = tf.NodeDescription("QueueDequeueMany") @@ -27204,7 +31922,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function queue_dequeue_many(handle_::tf.TensorHandle, n_::tf.TensorHandle; name=nothing, component_types=nothing, timeout_ms=nothing) + function queue_dequeue_many_eager(handle_, n_; name=nothing, component_types=nothing, timeout_ms=nothing) desc = tf.EagerOp("QueueDequeueMany") tf.add_input(desc, handle_) tf.add_input(desc, n_) @@ -27216,6 +31934,13 @@ begin end (tf.execute(desc))[1] end + function queue_dequeue_many(handle_, n_; name=nothing, component_types=nothing, timeout_ms=nothing) + if tf.eager_mode + queue_dequeue_many_eager(handle_, n_; name=name, component_types=component_types, timeout_ms=timeout_ms) + else + queue_dequeue_many_graph(handle_, n_; name=name, component_types=component_types, timeout_ms=timeout_ms) + end + end end @@ -27225,7 +31950,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function boosted_trees_serialize_ensemble(tree_ensemble_handle_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function boosted_trees_serialize_ensemble_graph(tree_ensemble_handle_; name=nothing) local desc tf.with_op_name(name, "BoostedTreesSerializeEnsemble") do desc = tf.NodeDescription("BoostedTreesSerializeEnsemble") @@ -27239,11 +31964,18 @@ begin end out end - function boosted_trees_serialize_ensemble(tree_ensemble_handle_::tf.TensorHandle; name=nothing) + function boosted_trees_serialize_ensemble_eager(tree_ensemble_handle_; name=nothing) desc = tf.EagerOp("BoostedTreesSerializeEnsemble") tf.add_input(desc, tree_ensemble_handle_) tf.execute(desc) end + function boosted_trees_serialize_ensemble(tree_ensemble_handle_; name=nothing) + if tf.eager_mode + boosted_trees_serialize_ensemble_eager(tree_ensemble_handle_; name=name) + else + boosted_trees_serialize_ensemble_graph(tree_ensemble_handle_; name=name) + end + end end @@ -27253,7 +31985,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function minimum(x_, y_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function minimum_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "Minimum") do desc = tf.NodeDescription("Minimum") @@ -27265,7 +31997,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function minimum(x_::tf.TensorHandle, y_::tf.TensorHandle; name=nothing) + function minimum_eager(x_, y_; name=nothing) desc = tf.EagerOp("Minimum") tf.add_input(desc, x_) tf.add_input(desc, y_) @@ -27273,16 +32005,23 @@ begin desc["T"] = tf.data_type(y_) (tf.execute(desc))[1] end + function minimum(x_, y_; name=nothing) + if tf.eager_mode + minimum_eager(x_, y_; name=name) + else + minimum_graph(x_, y_; name=name) + end + end end """ - substr(input, pos, len; unit=BYTE) + substr(input, pos, len; unit=) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function substr(input_, pos_, len_; name=nothing, unit=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function substr_graph(input_, pos_, len_; name=nothing, unit=nothing) local desc tf.with_op_name(name, "Substr") do desc = tf.NodeDescription("Substr") @@ -27299,7 +32038,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function substr(input_::tf.TensorHandle, pos_::tf.TensorHandle, len_::tf.TensorHandle; name=nothing, unit=nothing) + function substr_eager(input_, pos_, len_; name=nothing, unit=nothing) desc = tf.EagerOp("Substr") tf.add_input(desc, input_) tf.add_input(desc, pos_) @@ -27311,6 +32050,13 @@ begin desc["T"] = tf.data_type(len_) (tf.execute(desc))[1] end + function substr(input_, pos_, len_; name=nothing, unit=nothing) + if tf.eager_mode + substr_eager(input_, pos_, len_; name=name, unit=unit) + else + substr_graph(input_, pos_, len_; name=name, unit=unit) + end + end end @@ -27320,7 +32066,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function queue_size(handle_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function queue_size_graph(handle_; name=nothing) local desc tf.with_op_name(name, "QueueSize") do desc = tf.NodeDescription("QueueSize") @@ -27329,11 +32075,18 @@ begin end tf.Tensor(tf.Operation(desc)) end - function queue_size(handle_::tf.TensorHandle; name=nothing) + function queue_size_eager(handle_; name=nothing) desc = tf.EagerOp("QueueSize") tf.add_input(desc, handle_) (tf.execute(desc))[1] end + function queue_size(handle_; name=nothing) + if tf.eager_mode + queue_size_eager(handle_; name=name) + else + queue_size_graph(handle_; name=name) + end + end end @@ -27343,7 +32096,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function apply_ftrl_v2(var_, accum_, linear_, grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function apply_ftrl_v2_graph(var_, accum_, linear_, grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ApplyFtrlV2") do desc = tf.NodeDescription("ApplyFtrlV2") @@ -27372,7 +32125,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function apply_ftrl_v2(var_::tf.TensorHandle, accum_::tf.TensorHandle, linear_::tf.TensorHandle, grad_::tf.TensorHandle, lr_::tf.TensorHandle, l1_::tf.TensorHandle, l2_::tf.TensorHandle, l2_shrinkage_::tf.TensorHandle, lr_power_::tf.TensorHandle; name=nothing, use_locking=nothing) + function apply_ftrl_v2_eager(var_, accum_, linear_, grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ApplyFtrlV2") tf.add_input(desc, var_) tf.add_input(desc, accum_) @@ -27397,6 +32150,13 @@ begin desc["T"] = tf.data_type(lr_power_) (tf.execute(desc))[1] end + function apply_ftrl_v2(var_, accum_, linear_, grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=nothing, use_locking=nothing) + if tf.eager_mode + apply_ftrl_v2_eager(var_, accum_, linear_, grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=name, use_locking=use_locking) + else + apply_ftrl_v2_graph(var_, accum_, linear_, grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=name, use_locking=use_locking) + end + end end @@ -27406,7 +32166,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function load_tpu_embedding_momentum_parameters(parameters_, momenta_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function load_tpu_embedding_momentum_parameters_graph(parameters_, momenta_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "LoadTPUEmbeddingMomentumParameters") do desc = tf.NodeDescription("LoadTPUEmbeddingMomentumParameters") @@ -27429,7 +32189,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function load_tpu_embedding_momentum_parameters(parameters_::tf.TensorHandle, momenta_::tf.TensorHandle; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + function load_tpu_embedding_momentum_parameters_eager(parameters_, momenta_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) desc = tf.EagerOp("LoadTPUEmbeddingMomentumParameters") tf.add_input(desc, parameters_) tf.add_input(desc, momenta_) @@ -27447,6 +32207,13 @@ begin end (tf.execute(desc))[1] end + function load_tpu_embedding_momentum_parameters(parameters_, momenta_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + if tf.eager_mode + load_tpu_embedding_momentum_parameters_eager(parameters_, momenta_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + else + load_tpu_embedding_momentum_parameters_graph(parameters_, momenta_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + end + end end @@ -27456,7 +32223,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function sparse_segment_mean(data_, indices_, segment_ids_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function sparse_segment_mean_graph(data_, indices_, segment_ids_; name=nothing) local desc tf.with_op_name(name, "SparseSegmentMean") do desc = tf.NodeDescription("SparseSegmentMean") @@ -27472,7 +32239,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function sparse_segment_mean(data_::tf.TensorHandle, indices_::tf.TensorHandle, segment_ids_::tf.TensorHandle; name=nothing) + function sparse_segment_mean_eager(data_, indices_, segment_ids_; name=nothing) desc = tf.EagerOp("SparseSegmentMean") tf.add_input(desc, data_) tf.add_input(desc, indices_) @@ -27481,6 +32248,13 @@ begin desc["Tidx"] = tf.data_type(indices_) (tf.execute(desc))[1] end + function sparse_segment_mean(data_, indices_, segment_ids_; name=nothing) + if tf.eager_mode + sparse_segment_mean_eager(data_, indices_, segment_ids_; name=name) + else + sparse_segment_mean_graph(data_, indices_, segment_ids_; name=name) + end + end end @@ -27490,7 +32264,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function resource_apply_proximal_adagrad(var_, accum_, lr_, l1_, l2_, grad_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function resource_apply_proximal_adagrad_graph(var_, accum_, lr_, l1_, l2_, grad_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ResourceApplyProximalAdagrad") do desc = tf.NodeDescription("ResourceApplyProximalAdagrad") @@ -27513,7 +32287,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function resource_apply_proximal_adagrad(var_::tf.TensorHandle, accum_::tf.TensorHandle, lr_::tf.TensorHandle, l1_::tf.TensorHandle, l2_::tf.TensorHandle, grad_::tf.TensorHandle; name=nothing, use_locking=nothing) + function resource_apply_proximal_adagrad_eager(var_, accum_, lr_, l1_, l2_, grad_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ResourceApplyProximalAdagrad") tf.add_input(desc, var_) tf.add_input(desc, accum_) @@ -27530,6 +32304,13 @@ begin desc["T"] = tf.data_type(grad_) (tf.execute(desc))[1] end + function resource_apply_proximal_adagrad(var_, accum_, lr_, l1_, l2_, grad_; name=nothing, use_locking=nothing) + if tf.eager_mode + resource_apply_proximal_adagrad_eager(var_, accum_, lr_, l1_, l2_, grad_; name=name, use_locking=use_locking) + else + resource_apply_proximal_adagrad_graph(var_, accum_, lr_, l1_, l2_, grad_; name=name, use_locking=use_locking) + end + end end @@ -27539,7 +32320,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tensor_array_gather_v2(handle_, indices_, flow_in_; name=nothing, dtype=nothing, element_shape=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tensor_array_gather_v2_graph(handle_, indices_, flow_in_; name=nothing, dtype=nothing, element_shape=nothing) local desc tf.with_op_name(name, "TensorArrayGatherV2") do desc = tf.NodeDescription("TensorArrayGatherV2") @@ -27558,7 +32339,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function tensor_array_gather_v2(handle_::tf.TensorHandle, indices_::tf.TensorHandle, flow_in_::tf.TensorHandle; name=nothing, dtype=nothing, element_shape=nothing) + function tensor_array_gather_v2_eager(handle_, indices_, flow_in_; name=nothing, dtype=nothing, element_shape=nothing) desc = tf.EagerOp("TensorArrayGatherV2") tf.add_input(desc, handle_) tf.add_input(desc, indices_) @@ -27571,6 +32352,13 @@ begin end (tf.execute(desc))[1] end + function tensor_array_gather_v2(handle_, indices_, flow_in_; name=nothing, dtype=nothing, element_shape=nothing) + if tf.eager_mode + tensor_array_gather_v2_eager(handle_, indices_, flow_in_; name=name, dtype=dtype, element_shape=element_shape) + else + tensor_array_gather_v2_graph(handle_, indices_, flow_in_; name=name, dtype=dtype, element_shape=element_shape) + end + end end @@ -27580,7 +32368,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function less(x_, y_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function less_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "Less") do desc = tf.NodeDescription("Less") @@ -27592,7 +32380,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function less(x_::tf.TensorHandle, y_::tf.TensorHandle; name=nothing) + function less_eager(x_, y_; name=nothing) desc = tf.EagerOp("Less") tf.add_input(desc, x_) tf.add_input(desc, y_) @@ -27600,6 +32388,13 @@ begin desc["T"] = tf.data_type(y_) (tf.execute(desc))[1] end + function less(x_, y_; name=nothing) + if tf.eager_mode + less_eager(x_, y_; name=name) + else + less_graph(x_, y_; name=name) + end + end end @@ -27609,7 +32404,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function host_const(; name=nothing, value=nothing, dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function host_const_graph(; name=nothing, value=nothing, dtype=nothing) local desc tf.with_op_name(name, "HostConst") do desc = tf.NodeDescription("HostConst") @@ -27622,7 +32417,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function host_const(; name=nothing, value=nothing, dtype=nothing) + function host_const_eager(; name=nothing, value=nothing, dtype=nothing) desc = tf.EagerOp("HostConst") if value !== nothing desc["value"] = TensorFlow.RawTensor(value) @@ -27632,6 +32427,13 @@ begin end (tf.execute(desc))[1] end + function host_const(; name=nothing, value=nothing, dtype=nothing) + if tf.eager_mode + host_const_eager(; name=name, value=value, dtype=dtype) + else + host_const_graph(; name=name, value=value, dtype=dtype) + end + end end @@ -27641,7 +32443,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function upper_bound(sorted_inputs_, values_; name=nothing, out_type=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function upper_bound_graph(sorted_inputs_, values_; name=nothing, out_type=nothing) local desc tf.with_op_name(name, "UpperBound") do desc = tf.NodeDescription("UpperBound") @@ -27656,7 +32458,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function upper_bound(sorted_inputs_::tf.TensorHandle, values_::tf.TensorHandle; name=nothing, out_type=nothing) + function upper_bound_eager(sorted_inputs_, values_; name=nothing, out_type=nothing) desc = tf.EagerOp("UpperBound") tf.add_input(desc, sorted_inputs_) tf.add_input(desc, values_) @@ -27667,6 +32469,13 @@ begin desc["T"] = tf.data_type(values_) (tf.execute(desc))[1] end + function upper_bound(sorted_inputs_, values_; name=nothing, out_type=nothing) + if tf.eager_mode + upper_bound_eager(sorted_inputs_, values_; name=name, out_type=out_type) + else + upper_bound_graph(sorted_inputs_, values_; name=name, out_type=out_type) + end + end end @@ -27676,7 +32485,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tensor_list_get_item(input_handle_, index_, element_shape_; name=nothing, element_dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tensor_list_get_item_graph(input_handle_, index_, element_shape_; name=nothing, element_dtype=nothing) local desc tf.with_op_name(name, "TensorListGetItem") do desc = tf.NodeDescription("TensorListGetItem") @@ -27692,7 +32501,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function tensor_list_get_item(input_handle_::tf.TensorHandle, index_::tf.TensorHandle, element_shape_::tf.TensorHandle; name=nothing, element_dtype=nothing) + function tensor_list_get_item_eager(input_handle_, index_, element_shape_; name=nothing, element_dtype=nothing) desc = tf.EagerOp("TensorListGetItem") tf.add_input(desc, input_handle_) tf.add_input(desc, index_) @@ -27702,6 +32511,13 @@ begin end (tf.execute(desc))[1] end + function tensor_list_get_item(input_handle_, index_, element_shape_; name=nothing, element_dtype=nothing) + if tf.eager_mode + tensor_list_get_item_eager(input_handle_, index_, element_shape_; name=name, element_dtype=element_dtype) + else + tensor_list_get_item_graph(input_handle_, index_, element_shape_; name=name, element_dtype=element_dtype) + end + end end @@ -27711,7 +32527,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function fake_quant_with_min_max_vars(inputs_, min_, max_; name=nothing, num_bits=nothing, narrow_range=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function fake_quant_with_min_max_vars_graph(inputs_, min_, max_; name=nothing, num_bits=nothing, narrow_range=nothing) local desc tf.with_op_name(name, "FakeQuantWithMinMaxVars") do desc = tf.NodeDescription("FakeQuantWithMinMaxVars") @@ -27730,7 +32546,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function fake_quant_with_min_max_vars(inputs_::tf.TensorHandle, min_::tf.TensorHandle, max_::tf.TensorHandle; name=nothing, num_bits=nothing, narrow_range=nothing) + function fake_quant_with_min_max_vars_eager(inputs_, min_, max_; name=nothing, num_bits=nothing, narrow_range=nothing) desc = tf.EagerOp("FakeQuantWithMinMaxVars") tf.add_input(desc, inputs_) tf.add_input(desc, min_) @@ -27743,6 +32559,13 @@ begin end (tf.execute(desc))[1] end + function fake_quant_with_min_max_vars(inputs_, min_, max_; name=nothing, num_bits=nothing, narrow_range=nothing) + if tf.eager_mode + fake_quant_with_min_max_vars_eager(inputs_, min_, max_; name=name, num_bits=num_bits, narrow_range=narrow_range) + else + fake_quant_with_min_max_vars_graph(inputs_, min_, max_; name=name, num_bits=num_bits, narrow_range=narrow_range) + end + end end @@ -27752,7 +32575,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function is_boosted_trees_quantile_stream_resource_initialized(quantile_stream_resource_handle_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function is_boosted_trees_quantile_stream_resource_initialized_graph(quantile_stream_resource_handle_; name=nothing) local desc tf.with_op_name(name, "IsBoostedTreesQuantileStreamResourceInitialized") do desc = tf.NodeDescription("IsBoostedTreesQuantileStreamResourceInitialized") @@ -27761,11 +32584,18 @@ begin end tf.Tensor(tf.Operation(desc)) end - function is_boosted_trees_quantile_stream_resource_initialized(quantile_stream_resource_handle_::tf.TensorHandle; name=nothing) + function is_boosted_trees_quantile_stream_resource_initialized_eager(quantile_stream_resource_handle_; name=nothing) desc = tf.EagerOp("IsBoostedTreesQuantileStreamResourceInitialized") tf.add_input(desc, quantile_stream_resource_handle_) (tf.execute(desc))[1] end + function is_boosted_trees_quantile_stream_resource_initialized(quantile_stream_resource_handle_; name=nothing) + if tf.eager_mode + is_boosted_trees_quantile_stream_resource_initialized_eager(quantile_stream_resource_handle_; name=name) + else + is_boosted_trees_quantile_stream_resource_initialized_graph(quantile_stream_resource_handle_; name=name) + end + end end @@ -27775,7 +32605,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function reader_read_up_to_v2(reader_handle_, queue_handle_, num_records_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function reader_read_up_to_v2_graph(reader_handle_, queue_handle_, num_records_; name=nothing) local desc tf.with_op_name(name, "ReaderReadUpToV2") do desc = tf.NodeDescription("ReaderReadUpToV2") @@ -27793,13 +32623,20 @@ begin end out end - function reader_read_up_to_v2(reader_handle_::tf.TensorHandle, queue_handle_::tf.TensorHandle, num_records_::tf.TensorHandle; name=nothing) + function reader_read_up_to_v2_eager(reader_handle_, queue_handle_, num_records_; name=nothing) desc = tf.EagerOp("ReaderReadUpToV2") tf.add_input(desc, reader_handle_) tf.add_input(desc, queue_handle_) tf.add_input(desc, num_records_) tf.execute(desc) end + function reader_read_up_to_v2(reader_handle_, queue_handle_, num_records_; name=nothing) + if tf.eager_mode + reader_read_up_to_v2_eager(reader_handle_, queue_handle_, num_records_; name=name) + else + reader_read_up_to_v2_graph(reader_handle_, queue_handle_, num_records_; name=name) + end + end end @@ -27809,7 +32646,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function complex(real_, imag_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function complex_graph(real_, imag_; name=nothing) local desc tf.with_op_name(name, "Complex") do desc = tf.NodeDescription("Complex") @@ -27821,7 +32658,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function complex(real_::tf.TensorHandle, imag_::tf.TensorHandle; name=nothing) + function complex_eager(real_, imag_; name=nothing) desc = tf.EagerOp("Complex") tf.add_input(desc, real_) tf.add_input(desc, imag_) @@ -27829,6 +32666,13 @@ begin desc["T"] = tf.data_type(imag_) (tf.execute(desc))[1] end + function complex(real_, imag_; name=nothing) + if tf.eager_mode + complex_eager(real_, imag_; name=name) + else + complex_graph(real_, imag_; name=name) + end + end end @@ -27838,7 +32682,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tensor_list_reserve(element_shape_, num_elements_; name=nothing, element_dtype=nothing, shape_type=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tensor_list_reserve_graph(element_shape_, num_elements_; name=nothing, element_dtype=nothing, shape_type=nothing) local desc tf.with_op_name(name, "TensorListReserve") do desc = tf.NodeDescription("TensorListReserve") @@ -27856,7 +32700,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function tensor_list_reserve(element_shape_::tf.TensorHandle, num_elements_::tf.TensorHandle; name=nothing, element_dtype=nothing, shape_type=nothing) + function tensor_list_reserve_eager(element_shape_, num_elements_; name=nothing, element_dtype=nothing, shape_type=nothing) desc = tf.EagerOp("TensorListReserve") tf.add_input(desc, element_shape_) tf.add_input(desc, num_elements_) @@ -27869,6 +32713,13 @@ begin desc["shape_type"] = tf.data_type(element_shape_) (tf.execute(desc))[1] end + function tensor_list_reserve(element_shape_, num_elements_; name=nothing, element_dtype=nothing, shape_type=nothing) + if tf.eager_mode + tensor_list_reserve_eager(element_shape_, num_elements_; name=name, element_dtype=element_dtype, shape_type=shape_type) + else + tensor_list_reserve_graph(element_shape_, num_elements_; name=name, element_dtype=element_dtype, shape_type=shape_type) + end + end end @@ -27878,7 +32729,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function bitcast(input_; name=nothing, type_=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function bitcast_graph(input_; name=nothing, type_=nothing) local desc tf.with_op_name(name, "Bitcast") do desc = tf.NodeDescription("Bitcast") @@ -27891,7 +32742,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function bitcast(input_::tf.TensorHandle; name=nothing, type_=nothing) + function bitcast_eager(input_; name=nothing, type_=nothing) desc = tf.EagerOp("Bitcast") tf.add_input(desc, input_) if type_ !== nothing @@ -27900,6 +32751,13 @@ begin desc["T"] = tf.data_type(input_) (tf.execute(desc))[1] end + function bitcast(input_; name=nothing, type_=nothing) + if tf.eager_mode + bitcast_eager(input_; name=name, type_=type_) + else + bitcast_graph(input_; name=name, type_=type_) + end + end end @@ -27909,7 +32767,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function priority_queue(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function priority_queue_graph(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "PriorityQueue") do desc = tf.NodeDescription("PriorityQueue") @@ -27931,7 +32789,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function priority_queue(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) + function priority_queue_eager(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) desc = tf.EagerOp("PriorityQueue") if component_types !== nothing desc["component_types"] = map(Base.identity, component_types) @@ -27950,6 +32808,13 @@ begin end (tf.execute(desc))[1] end + function priority_queue(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) + if tf.eager_mode + priority_queue_eager(; name=name, component_types=component_types, shapes=shapes, capacity=capacity, container=container, shared_name=shared_name) + else + priority_queue_graph(; name=name, component_types=component_types, shapes=shapes, capacity=capacity, container=container, shared_name=shared_name) + end + end end @@ -27959,7 +32824,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function quantized_batch_norm_with_global_normalization(t_, t_min_, t_max_, m_, m_min_, m_max_, v_, v_min_, v_max_, beta_, beta_min_, beta_max_, gamma_, gamma_min_, gamma_max_; name=nothing, out_type=nothing, variance_epsilon=nothing, scale_after_normalization=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function quantized_batch_norm_with_global_normalization_graph(t_, t_min_, t_max_, m_, m_min_, m_max_, v_, v_min_, v_max_, beta_, beta_min_, beta_max_, gamma_, gamma_min_, gamma_max_; name=nothing, out_type=nothing, variance_epsilon=nothing, scale_after_normalization=nothing) local desc tf.with_op_name(name, "QuantizedBatchNormWithGlobalNormalization") do desc = tf.NodeDescription("QuantizedBatchNormWithGlobalNormalization") @@ -28011,7 +32876,7 @@ begin end out end - function quantized_batch_norm_with_global_normalization(t_::tf.TensorHandle, t_min_::tf.TensorHandle, t_max_::tf.TensorHandle, m_::tf.TensorHandle, m_min_::tf.TensorHandle, m_max_::tf.TensorHandle, v_::tf.TensorHandle, v_min_::tf.TensorHandle, v_max_::tf.TensorHandle, beta_::tf.TensorHandle, beta_min_::tf.TensorHandle, beta_max_::tf.TensorHandle, gamma_::tf.TensorHandle, gamma_min_::tf.TensorHandle, gamma_max_::tf.TensorHandle; name=nothing, out_type=nothing, variance_epsilon=nothing, scale_after_normalization=nothing) + function quantized_batch_norm_with_global_normalization_eager(t_, t_min_, t_max_, m_, m_min_, m_max_, v_, v_min_, v_max_, beta_, beta_min_, beta_max_, gamma_, gamma_min_, gamma_max_; name=nothing, out_type=nothing, variance_epsilon=nothing, scale_after_normalization=nothing) desc = tf.EagerOp("QuantizedBatchNormWithGlobalNormalization") tf.add_input(desc, t_) tf.add_input(desc, t_min_) @@ -28044,6 +32909,13 @@ begin desc["Tinput"] = tf.data_type(gamma_) tf.execute(desc) end + function quantized_batch_norm_with_global_normalization(t_, t_min_, t_max_, m_, m_min_, m_max_, v_, v_min_, v_max_, beta_, beta_min_, beta_max_, gamma_, gamma_min_, gamma_max_; name=nothing, out_type=nothing, variance_epsilon=nothing, scale_after_normalization=nothing) + if tf.eager_mode + quantized_batch_norm_with_global_normalization_eager(t_, t_min_, t_max_, m_, m_min_, m_max_, v_, v_min_, v_max_, beta_, beta_min_, beta_max_, gamma_, gamma_min_, gamma_max_; name=name, out_type=out_type, variance_epsilon=variance_epsilon, scale_after_normalization=scale_after_normalization) + else + quantized_batch_norm_with_global_normalization_graph(t_, t_min_, t_max_, m_, m_min_, m_max_, v_, v_min_, v_max_, beta_, beta_min_, beta_max_, gamma_, gamma_min_, gamma_max_; name=name, out_type=out_type, variance_epsilon=variance_epsilon, scale_after_normalization=scale_after_normalization) + end + end end @@ -28053,7 +32925,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function cos(x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function cos_graph(x_; name=nothing) local desc tf.with_op_name(name, "Cos") do desc = tf.NodeDescription("Cos") @@ -28063,12 +32935,19 @@ begin end tf.Tensor(tf.Operation(desc)) end - function cos(x_::tf.TensorHandle; name=nothing) + function cos_eager(x_; name=nothing) desc = tf.EagerOp("Cos") tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) (tf.execute(desc))[1] end + function cos(x_; name=nothing) + if tf.eager_mode + cos_eager(x_; name=name) + else + cos_graph(x_; name=name) + end + end end @@ -28078,7 +32957,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function quantize_down_and_shrink_range(input_, input_min_, input_max_; name=nothing, out_type=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function quantize_down_and_shrink_range_graph(input_, input_min_, input_max_; name=nothing, out_type=nothing) local desc tf.with_op_name(name, "QuantizeDownAndShrinkRange") do desc = tf.NodeDescription("QuantizeDownAndShrinkRange") @@ -28100,7 +32979,7 @@ begin end out end - function quantize_down_and_shrink_range(input_::tf.TensorHandle, input_min_::tf.TensorHandle, input_max_::tf.TensorHandle; name=nothing, out_type=nothing) + function quantize_down_and_shrink_range_eager(input_, input_min_, input_max_; name=nothing, out_type=nothing) desc = tf.EagerOp("QuantizeDownAndShrinkRange") tf.add_input(desc, input_) tf.add_input(desc, input_min_) @@ -28111,6 +32990,13 @@ begin desc["Tinput"] = tf.data_type(input_) tf.execute(desc) end + function quantize_down_and_shrink_range(input_, input_min_, input_max_; name=nothing, out_type=nothing) + if tf.eager_mode + quantize_down_and_shrink_range_eager(input_, input_min_, input_max_; name=name, out_type=out_type) + else + quantize_down_and_shrink_range_graph(input_, input_min_, input_max_; name=name, out_type=out_type) + end + end end @@ -28120,7 +33006,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function experimental_random_dataset(seed_, seed2_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function experimental_random_dataset_graph(seed_, seed2_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "ExperimentalRandomDataset") do desc = tf.NodeDescription("ExperimentalRandomDataset") @@ -28137,7 +33023,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function experimental_random_dataset(seed_::tf.TensorHandle, seed2_::tf.TensorHandle; name=nothing, output_types=nothing, output_shapes=nothing) + function experimental_random_dataset_eager(seed_, seed2_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("ExperimentalRandomDataset") tf.add_input(desc, seed_) tf.add_input(desc, seed2_) @@ -28149,6 +33035,13 @@ begin end (tf.execute(desc))[1] end + function experimental_random_dataset(seed_, seed2_; name=nothing, output_types=nothing, output_shapes=nothing) + if tf.eager_mode + experimental_random_dataset_eager(seed_, seed2_; name=name, output_types=output_types, output_shapes=output_shapes) + else + experimental_random_dataset_graph(seed_, seed2_; name=name, output_types=output_types, output_shapes=output_shapes) + end + end end @@ -28158,7 +33051,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function rpc(address_, method_, request_; name=nothing, protocol=nothing, fail_fast=nothing, timeout_in_ms=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function rpc_graph(address_, method_, request_; name=nothing, protocol=nothing, fail_fast=nothing, timeout_in_ms=nothing) local desc tf.with_op_name(name, "Rpc") do desc = tf.NodeDescription("Rpc") @@ -28180,7 +33073,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function rpc(address_::tf.TensorHandle, method_::tf.TensorHandle, request_::tf.TensorHandle; name=nothing, protocol=nothing, fail_fast=nothing, timeout_in_ms=nothing) + function rpc_eager(address_, method_, request_; name=nothing, protocol=nothing, fail_fast=nothing, timeout_in_ms=nothing) desc = tf.EagerOp("Rpc") tf.add_input(desc, address_) tf.add_input(desc, method_) @@ -28196,6 +33089,13 @@ begin end (tf.execute(desc))[1] end + function rpc(address_, method_, request_; name=nothing, protocol=nothing, fail_fast=nothing, timeout_in_ms=nothing) + if tf.eager_mode + rpc_eager(address_, method_, request_; name=name, protocol=protocol, fail_fast=fail_fast, timeout_in_ms=timeout_in_ms) + else + rpc_graph(address_, method_, request_; name=name, protocol=protocol, fail_fast=fail_fast, timeout_in_ms=timeout_in_ms) + end + end end @@ -28205,7 +33105,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function quantized_conv2d_with_bias_signed_sum_and_relu_and_requantize(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_, summand_, min_summand_, max_summand_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function quantized_conv2d_with_bias_signed_sum_and_relu_and_requantize_graph(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_, summand_, min_summand_, max_summand_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) local desc tf.with_op_name(name, "QuantizedConv2DWithBiasSignedSumAndReluAndRequantize") do desc = tf.NodeDescription("QuantizedConv2DWithBiasSignedSumAndReluAndRequantize") @@ -28257,7 +33157,7 @@ begin end out end - function quantized_conv2d_with_bias_signed_sum_and_relu_and_requantize(input_::tf.TensorHandle, filter_::tf.TensorHandle, bias_::tf.TensorHandle, min_input_::tf.TensorHandle, max_input_::tf.TensorHandle, min_filter_::tf.TensorHandle, max_filter_::tf.TensorHandle, min_freezed_output_::tf.TensorHandle, max_freezed_output_::tf.TensorHandle, summand_::tf.TensorHandle, min_summand_::tf.TensorHandle, max_summand_::tf.TensorHandle; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) + function quantized_conv2d_with_bias_signed_sum_and_relu_and_requantize_eager(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_, summand_, min_summand_, max_summand_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) desc = tf.EagerOp("QuantizedConv2DWithBiasSignedSumAndReluAndRequantize") tf.add_input(desc, input_) tf.add_input(desc, filter_) @@ -28289,6 +33189,13 @@ begin desc["Tsummand"] = tf.data_type(summand_) tf.execute(desc) end + function quantized_conv2d_with_bias_signed_sum_and_relu_and_requantize(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_, summand_, min_summand_, max_summand_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) + if tf.eager_mode + quantized_conv2d_with_bias_signed_sum_and_relu_and_requantize_eager(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_, summand_, min_summand_, max_summand_; name=name, out_type=out_type, strides=strides, padding=padding, dilations=dilations) + else + quantized_conv2d_with_bias_signed_sum_and_relu_and_requantize_graph(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_, summand_, min_summand_, max_summand_; name=name, out_type=out_type, strides=strides, padding=padding, dilations=dilations) + end + end end @@ -28298,7 +33205,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tensor_list_length(input_handle_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tensor_list_length_graph(input_handle_; name=nothing) local desc tf.with_op_name(name, "TensorListLength") do desc = tf.NodeDescription("TensorListLength") @@ -28307,11 +33214,18 @@ begin end tf.Tensor(tf.Operation(desc)) end - function tensor_list_length(input_handle_::tf.TensorHandle; name=nothing) + function tensor_list_length_eager(input_handle_; name=nothing) desc = tf.EagerOp("TensorListLength") tf.add_input(desc, input_handle_) (tf.execute(desc))[1] end + function tensor_list_length(input_handle_; name=nothing) + if tf.eager_mode + tensor_list_length_eager(input_handle_; name=name) + else + tensor_list_length_graph(input_handle_; name=name) + end + end end @@ -28321,7 +33235,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function map_incomplete_size(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function map_incomplete_size_graph(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "MapIncompleteSize") do desc = tf.NodeDescription("MapIncompleteSize") @@ -28343,7 +33257,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function map_incomplete_size(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + function map_incomplete_size_eager(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) desc = tf.EagerOp("MapIncompleteSize") if capacity !== nothing desc["capacity"] = Base.Int(capacity) @@ -28362,6 +33276,13 @@ begin end (tf.execute(desc))[1] end + function map_incomplete_size(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + if tf.eager_mode + map_incomplete_size_eager(; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) + else + map_incomplete_size_graph(; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) + end + end end @@ -28371,7 +33292,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function stateless_while(input_; name=nothing, T=nothing, cond=nothing, body=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function stateless_while_graph(input_; name=nothing, T=nothing, cond=nothing, body=nothing) local desc tf.with_op_name(name, "StatelessWhile") do desc = tf.NodeDescription("StatelessWhile") @@ -28389,7 +33310,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function stateless_while(input_::tf.TensorHandle; name=nothing, T=nothing, cond=nothing, body=nothing) + function stateless_while_eager(input_; name=nothing, T=nothing, cond=nothing, body=nothing) desc = tf.EagerOp("StatelessWhile") tf.add_input(desc, input_) if T !== nothing @@ -28403,16 +33324,23 @@ begin end (tf.execute(desc))[1] end + function stateless_while(input_; name=nothing, T=nothing, cond=nothing, body=nothing) + if tf.eager_mode + stateless_while_eager(input_; name=name, T=T, cond=cond, body=body) + else + stateless_while_graph(input_; name=name, T=T, cond=cond, body=body) + end + end end """ - sparse_conditional_accumulator(; container=, shared_name=, reduction_type=MEAN) + sparse_conditional_accumulator(; container=, shared_name=, reduction_type=) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function sparse_conditional_accumulator(; name=nothing, dtype=nothing, shape=nothing, container=nothing, shared_name=nothing, reduction_type=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function sparse_conditional_accumulator_graph(; name=nothing, dtype=nothing, shape=nothing, container=nothing, shared_name=nothing, reduction_type=nothing) local desc tf.with_op_name(name, "SparseConditionalAccumulator") do desc = tf.NodeDescription("SparseConditionalAccumulator") @@ -28434,7 +33362,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function sparse_conditional_accumulator(; name=nothing, dtype=nothing, shape=nothing, container=nothing, shared_name=nothing, reduction_type=nothing) + function sparse_conditional_accumulator_eager(; name=nothing, dtype=nothing, shape=nothing, container=nothing, shared_name=nothing, reduction_type=nothing) desc = tf.EagerOp("SparseConditionalAccumulator") if dtype !== nothing desc["dtype"] = Base.identity(dtype) @@ -28453,6 +33381,13 @@ begin end (tf.execute(desc))[1] end + function sparse_conditional_accumulator(; name=nothing, dtype=nothing, shape=nothing, container=nothing, shared_name=nothing, reduction_type=nothing) + if tf.eager_mode + sparse_conditional_accumulator_eager(; name=name, dtype=dtype, shape=shape, container=container, shared_name=shared_name, reduction_type=reduction_type) + else + sparse_conditional_accumulator_graph(; name=name, dtype=dtype, shape=shape, container=container, shared_name=shared_name, reduction_type=reduction_type) + end + end end @@ -28462,7 +33397,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function segment_min(data_, segment_ids_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function segment_min_graph(data_, segment_ids_; name=nothing) local desc tf.with_op_name(name, "SegmentMin") do desc = tf.NodeDescription("SegmentMin") @@ -28476,7 +33411,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function segment_min(data_::tf.TensorHandle, segment_ids_::tf.TensorHandle; name=nothing) + function segment_min_eager(data_, segment_ids_; name=nothing) desc = tf.EagerOp("SegmentMin") tf.add_input(desc, data_) tf.add_input(desc, segment_ids_) @@ -28484,6 +33419,13 @@ begin desc["Tindices"] = tf.data_type(segment_ids_) (tf.execute(desc))[1] end + function segment_min(data_, segment_ids_; name=nothing) + if tf.eager_mode + segment_min_eager(data_, segment_ids_; name=name) + else + segment_min_graph(data_, segment_ids_; name=name) + end + end end @@ -28493,7 +33435,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function write_graph_summary(writer_, step_, tensor_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function write_graph_summary_graph(writer_, step_, tensor_; name=nothing) local desc tf.with_op_name(name, "WriteGraphSummary") do desc = tf.NodeDescription("WriteGraphSummary") @@ -28506,13 +33448,20 @@ begin end tf.Tensor(tf.Operation(desc)) end - function write_graph_summary(writer_::tf.TensorHandle, step_::tf.TensorHandle, tensor_::tf.TensorHandle; name=nothing) + function write_graph_summary_eager(writer_, step_, tensor_; name=nothing) desc = tf.EagerOp("WriteGraphSummary") tf.add_input(desc, writer_) tf.add_input(desc, step_) tf.add_input(desc, tensor_) (tf.execute(desc))[1] end + function write_graph_summary(writer_, step_, tensor_; name=nothing) + if tf.eager_mode + write_graph_summary_eager(writer_, step_, tensor_; name=name) + else + write_graph_summary_graph(writer_, step_, tensor_; name=name) + end + end end @@ -28522,7 +33471,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function cholesky_grad(l_, grad_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function cholesky_grad_graph(l_, grad_; name=nothing) local desc tf.with_op_name(name, "CholeskyGrad") do desc = tf.NodeDescription("CholeskyGrad") @@ -28534,7 +33483,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function cholesky_grad(l_::tf.TensorHandle, grad_::tf.TensorHandle; name=nothing) + function cholesky_grad_eager(l_, grad_; name=nothing) desc = tf.EagerOp("CholeskyGrad") tf.add_input(desc, l_) tf.add_input(desc, grad_) @@ -28542,6 +33491,13 @@ begin desc["T"] = tf.data_type(grad_) (tf.execute(desc))[1] end + function cholesky_grad(l_, grad_; name=nothing) + if tf.eager_mode + cholesky_grad_eager(l_, grad_; name=name) + else + cholesky_grad_graph(l_, grad_; name=name) + end + end end @@ -28551,7 +33507,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function log_uniform_candidate_sampler(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function log_uniform_candidate_sampler_graph(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing) local desc tf.with_op_name(name, "LogUniformCandidateSampler") do desc = tf.NodeDescription("LogUniformCandidateSampler") @@ -28583,7 +33539,7 @@ begin end out end - function log_uniform_candidate_sampler(true_classes_::tf.TensorHandle; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing) + function log_uniform_candidate_sampler_eager(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing) desc = tf.EagerOp("LogUniformCandidateSampler") tf.add_input(desc, true_classes_) if num_true !== nothing @@ -28606,6 +33562,13 @@ begin end tf.execute(desc) end + function log_uniform_candidate_sampler(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing) + if tf.eager_mode + log_uniform_candidate_sampler_eager(true_classes_; name=name, num_true=num_true, num_sampled=num_sampled, unique=unique, range_max=range_max, seed=seed, seed2=seed2) + else + log_uniform_candidate_sampler_graph(true_classes_; name=name, num_true=num_true, num_sampled=num_sampled, unique=unique, range_max=range_max, seed=seed, seed2=seed2) + end + end end @@ -28615,7 +33578,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function serialize_sparse(sparse_indices_, sparse_values_, sparse_shape_; name=nothing, out_type=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function serialize_sparse_graph(sparse_indices_, sparse_values_, sparse_shape_; name=nothing, out_type=nothing) local desc tf.with_op_name(name, "SerializeSparse") do desc = tf.NodeDescription("SerializeSparse") @@ -28632,7 +33595,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function serialize_sparse(sparse_indices_::tf.TensorHandle, sparse_values_::tf.TensorHandle, sparse_shape_::tf.TensorHandle; name=nothing, out_type=nothing) + function serialize_sparse_eager(sparse_indices_, sparse_values_, sparse_shape_; name=nothing, out_type=nothing) desc = tf.EagerOp("SerializeSparse") tf.add_input(desc, sparse_indices_) tf.add_input(desc, sparse_values_) @@ -28643,6 +33606,13 @@ begin desc["T"] = tf.data_type(sparse_values_) (tf.execute(desc))[1] end + function serialize_sparse(sparse_indices_, sparse_values_, sparse_shape_; name=nothing, out_type=nothing) + if tf.eager_mode + serialize_sparse_eager(sparse_indices_, sparse_values_, sparse_shape_; name=name, out_type=out_type) + else + serialize_sparse_graph(sparse_indices_, sparse_values_, sparse_shape_; name=name, out_type=out_type) + end + end end @@ -28652,7 +33622,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function scatter_nd_non_aliasing_add(input_, indices_, updates_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function scatter_nd_non_aliasing_add_graph(input_, indices_, updates_; name=nothing) local desc tf.with_op_name(name, "ScatterNdNonAliasingAdd") do desc = tf.NodeDescription("ScatterNdNonAliasingAdd") @@ -28668,7 +33638,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function scatter_nd_non_aliasing_add(input_::tf.TensorHandle, indices_::tf.TensorHandle, updates_::tf.TensorHandle; name=nothing) + function scatter_nd_non_aliasing_add_eager(input_, indices_, updates_; name=nothing) desc = tf.EagerOp("ScatterNdNonAliasingAdd") tf.add_input(desc, input_) tf.add_input(desc, indices_) @@ -28678,6 +33648,13 @@ begin desc["T"] = tf.data_type(updates_) (tf.execute(desc))[1] end + function scatter_nd_non_aliasing_add(input_, indices_, updates_; name=nothing) + if tf.eager_mode + scatter_nd_non_aliasing_add_eager(input_, indices_, updates_; name=name) + else + scatter_nd_non_aliasing_add_graph(input_, indices_, updates_; name=name) + end + end end @@ -28687,7 +33664,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function ref_merge(inputs_; name=nothing, N=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function ref_merge_graph(inputs_; name=nothing, N=nothing) local desc tf.with_op_name(name, "RefMerge") do desc = tf.NodeDescription("RefMerge") @@ -28705,7 +33682,7 @@ begin end out end - function ref_merge(inputs_::tf.TensorHandle; name=nothing, N=nothing) + function ref_merge_eager(inputs_; name=nothing, N=nothing) desc = tf.EagerOp("RefMerge") tf.add_input(desc, inputs_) if N !== nothing @@ -28714,6 +33691,13 @@ begin desc["T"] = tf.data_type(inputs_) tf.execute(desc) end + function ref_merge(inputs_; name=nothing, N=nothing) + if tf.eager_mode + ref_merge_eager(inputs_; name=name, N=N) + else + ref_merge_graph(inputs_; name=name, N=N) + end + end end @@ -28723,7 +33707,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tensor_list_concat(input_handle_; name=nothing, element_dtype=nothing, element_shape=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tensor_list_concat_graph(input_handle_; name=nothing, element_dtype=nothing, element_shape=nothing) local desc tf.with_op_name(name, "TensorListConcat") do desc = tf.NodeDescription("TensorListConcat") @@ -28743,7 +33727,7 @@ begin end out end - function tensor_list_concat(input_handle_::tf.TensorHandle; name=nothing, element_dtype=nothing, element_shape=nothing) + function tensor_list_concat_eager(input_handle_; name=nothing, element_dtype=nothing, element_shape=nothing) desc = tf.EagerOp("TensorListConcat") tf.add_input(desc, input_handle_) if element_dtype !== nothing @@ -28754,16 +33738,23 @@ begin end tf.execute(desc) end + function tensor_list_concat(input_handle_; name=nothing, element_dtype=nothing, element_shape=nothing) + if tf.eager_mode + tensor_list_concat_eager(input_handle_; name=name, element_dtype=element_dtype, element_shape=element_shape) + else + tensor_list_concat_graph(input_handle_; name=name, element_dtype=element_dtype, element_shape=element_shape) + end + end end """ - cudnn_rnn_canonical_to_params(num_layers, num_units, input_size, weights, biases; rnn_mode=lstm, input_mode=linear_input, direction=unidirectional, dropout=?, seed=0, seed2=0) + cudnn_rnn_canonical_to_params(num_layers, num_units, input_size, weights, biases; rnn_mode=, input_mode=, direction=, dropout=?, seed=0, seed2=0) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function cudnn_rnn_canonical_to_params(num_layers_, num_units_, input_size_, weights_, biases_; name=nothing, num_params=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function cudnn_rnn_canonical_to_params_graph(num_layers_, num_units_, input_size_, weights_, biases_; name=nothing, num_params=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) local desc tf.with_op_name(name, "CudnnRNNCanonicalToParams") do desc = tf.NodeDescription("CudnnRNNCanonicalToParams") @@ -28802,7 +33793,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function cudnn_rnn_canonical_to_params(num_layers_::tf.TensorHandle, num_units_::tf.TensorHandle, input_size_::tf.TensorHandle, weights_::tf.TensorHandle, biases_::tf.TensorHandle; name=nothing, num_params=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) + function cudnn_rnn_canonical_to_params_eager(num_layers_, num_units_, input_size_, weights_, biases_; name=nothing, num_params=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) desc = tf.EagerOp("CudnnRNNCanonicalToParams") tf.add_input(desc, num_layers_) tf.add_input(desc, num_units_) @@ -28834,6 +33825,13 @@ begin desc["T"] = tf.data_type(biases_) (tf.execute(desc))[1] end + function cudnn_rnn_canonical_to_params(num_layers_, num_units_, input_size_, weights_, biases_; name=nothing, num_params=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) + if tf.eager_mode + cudnn_rnn_canonical_to_params_eager(num_layers_, num_units_, input_size_, weights_, biases_; name=name, num_params=num_params, rnn_mode=rnn_mode, input_mode=input_mode, direction=direction, dropout=dropout, seed=seed, seed2=seed2) + else + cudnn_rnn_canonical_to_params_graph(num_layers_, num_units_, input_size_, weights_, biases_; name=name, num_params=num_params, rnn_mode=rnn_mode, input_mode=input_mode, direction=direction, dropout=dropout, seed=seed, seed2=seed2) + end + end end @@ -28843,7 +33841,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function sparse_apply_adadelta(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function sparse_apply_adadelta_graph(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "SparseApplyAdadelta") do desc = tf.NodeDescription("SparseApplyAdadelta") @@ -28872,7 +33870,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function sparse_apply_adadelta(var_::tf.TensorHandle, accum_::tf.TensorHandle, accum_update_::tf.TensorHandle, lr_::tf.TensorHandle, rho_::tf.TensorHandle, epsilon_::tf.TensorHandle, grad_::tf.TensorHandle, indices_::tf.TensorHandle; name=nothing, use_locking=nothing) + function sparse_apply_adadelta_eager(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) desc = tf.EagerOp("SparseApplyAdadelta") tf.add_input(desc, var_) tf.add_input(desc, accum_) @@ -28895,6 +33893,13 @@ begin desc["Tindices"] = tf.data_type(indices_) (tf.execute(desc))[1] end + function sparse_apply_adadelta(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) + if tf.eager_mode + sparse_apply_adadelta_eager(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_, indices_; name=name, use_locking=use_locking) + else + sparse_apply_adadelta_graph(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_, indices_; name=name, use_locking=use_locking) + end + end end @@ -28904,7 +33909,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tensor_array_close(handle_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tensor_array_close_graph(handle_; name=nothing) local desc tf.with_op_name(name, "TensorArrayClose") do desc = tf.NodeDescription("TensorArrayClose") @@ -28913,11 +33918,18 @@ begin end tf.Tensor(tf.Operation(desc)) end - function tensor_array_close(handle_::tf.TensorHandle; name=nothing) + function tensor_array_close_eager(handle_; name=nothing) desc = tf.EagerOp("TensorArrayClose") tf.add_input(desc, handle_) (tf.execute(desc))[1] end + function tensor_array_close(handle_; name=nothing) + if tf.eager_mode + tensor_array_close_eager(handle_; name=name) + else + tensor_array_close_graph(handle_; name=name) + end + end end @@ -28927,7 +33939,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function selu_grad(gradients_, outputs_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function selu_grad_graph(gradients_, outputs_; name=nothing) local desc tf.with_op_name(name, "SeluGrad") do desc = tf.NodeDescription("SeluGrad") @@ -28939,7 +33951,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function selu_grad(gradients_::tf.TensorHandle, outputs_::tf.TensorHandle; name=nothing) + function selu_grad_eager(gradients_, outputs_; name=nothing) desc = tf.EagerOp("SeluGrad") tf.add_input(desc, gradients_) tf.add_input(desc, outputs_) @@ -28947,16 +33959,23 @@ begin desc["T"] = tf.data_type(outputs_) (tf.execute(desc))[1] end + function selu_grad(gradients_, outputs_; name=nothing) + if tf.eager_mode + selu_grad_eager(gradients_, outputs_; name=name) + else + selu_grad_graph(gradients_, outputs_; name=name) + end + end end """ - crop_and_resize_grad_image(grads, boxes, box_ind, image_size; method=bilinear) + crop_and_resize_grad_image(grads, boxes, box_ind, image_size; method=) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function crop_and_resize_grad_image(grads_, boxes_, box_ind_, image_size_; name=nothing, method=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function crop_and_resize_grad_image_graph(grads_, boxes_, box_ind_, image_size_; name=nothing, method=nothing) local desc tf.with_op_name(name, "CropAndResizeGradImage") do desc = tf.NodeDescription("CropAndResizeGradImage") @@ -28974,7 +33993,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function crop_and_resize_grad_image(grads_::tf.TensorHandle, boxes_::tf.TensorHandle, box_ind_::tf.TensorHandle, image_size_::tf.TensorHandle; name=nothing, method=nothing) + function crop_and_resize_grad_image_eager(grads_, boxes_, box_ind_, image_size_; name=nothing, method=nothing) desc = tf.EagerOp("CropAndResizeGradImage") tf.add_input(desc, grads_) tf.add_input(desc, boxes_) @@ -28985,6 +34004,13 @@ begin end (tf.execute(desc))[1] end + function crop_and_resize_grad_image(grads_, boxes_, box_ind_, image_size_; name=nothing, method=nothing) + if tf.eager_mode + crop_and_resize_grad_image_eager(grads_, boxes_, box_ind_, image_size_; name=name, method=method) + else + crop_and_resize_grad_image_graph(grads_, boxes_, box_ind_, image_size_; name=name, method=method) + end + end end @@ -28994,7 +34020,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function rfft(input_, fft_length_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function rfft_graph(input_, fft_length_; name=nothing) local desc tf.with_op_name(name, "RFFT") do desc = tf.NodeDescription("RFFT") @@ -29005,12 +34031,19 @@ begin end tf.Tensor(tf.Operation(desc)) end - function rfft(input_::tf.TensorHandle, fft_length_::tf.TensorHandle; name=nothing) + function rfft_eager(input_, fft_length_; name=nothing) desc = tf.EagerOp("RFFT") tf.add_input(desc, input_) tf.add_input(desc, fft_length_) (tf.execute(desc))[1] end + function rfft(input_, fft_length_; name=nothing) + if tf.eager_mode + rfft_eager(input_, fft_length_; name=name) + else + rfft_graph(input_, fft_length_; name=name) + end + end end @@ -29020,7 +34053,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function experimental_sql_dataset(driver_name_, data_source_name_, query_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function experimental_sql_dataset_graph(driver_name_, data_source_name_, query_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "ExperimentalSqlDataset") do desc = tf.NodeDescription("ExperimentalSqlDataset") @@ -29039,7 +34072,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function experimental_sql_dataset(driver_name_::tf.TensorHandle, data_source_name_::tf.TensorHandle, query_::tf.TensorHandle; name=nothing, output_types=nothing, output_shapes=nothing) + function experimental_sql_dataset_eager(driver_name_, data_source_name_, query_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("ExperimentalSqlDataset") tf.add_input(desc, driver_name_) tf.add_input(desc, data_source_name_) @@ -29052,6 +34085,13 @@ begin end (tf.execute(desc))[1] end + function experimental_sql_dataset(driver_name_, data_source_name_, query_; name=nothing, output_types=nothing, output_shapes=nothing) + if tf.eager_mode + experimental_sql_dataset_eager(driver_name_, data_source_name_, query_; name=name, output_types=output_types, output_shapes=output_shapes) + else + experimental_sql_dataset_graph(driver_name_, data_source_name_, query_; name=name, output_types=output_types, output_shapes=output_shapes) + end + end end @@ -29061,7 +34101,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function resource_apply_power_sign(var_, m_, lr_, logbase_, sign_decay_, beta_, grad_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function resource_apply_power_sign_graph(var_, m_, lr_, logbase_, sign_decay_, beta_, grad_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ResourceApplyPowerSign") do desc = tf.NodeDescription("ResourceApplyPowerSign") @@ -29086,7 +34126,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function resource_apply_power_sign(var_::tf.TensorHandle, m_::tf.TensorHandle, lr_::tf.TensorHandle, logbase_::tf.TensorHandle, sign_decay_::tf.TensorHandle, beta_::tf.TensorHandle, grad_::tf.TensorHandle; name=nothing, use_locking=nothing) + function resource_apply_power_sign_eager(var_, m_, lr_, logbase_, sign_decay_, beta_, grad_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ResourceApplyPowerSign") tf.add_input(desc, var_) tf.add_input(desc, m_) @@ -29105,6 +34145,13 @@ begin desc["T"] = tf.data_type(grad_) (tf.execute(desc))[1] end + function resource_apply_power_sign(var_, m_, lr_, logbase_, sign_decay_, beta_, grad_; name=nothing, use_locking=nothing) + if tf.eager_mode + resource_apply_power_sign_eager(var_, m_, lr_, logbase_, sign_decay_, beta_, grad_; name=name, use_locking=use_locking) + else + resource_apply_power_sign_graph(var_, m_, lr_, logbase_, sign_decay_, beta_, grad_; name=name, use_locking=use_locking) + end + end end @@ -29114,7 +34161,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function matrix_determinant(input_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function matrix_determinant_graph(input_; name=nothing) local desc tf.with_op_name(name, "MatrixDeterminant") do desc = tf.NodeDescription("MatrixDeterminant") @@ -29124,12 +34171,19 @@ begin end tf.Tensor(tf.Operation(desc)) end - function matrix_determinant(input_::tf.TensorHandle; name=nothing) + function matrix_determinant_eager(input_; name=nothing) desc = tf.EagerOp("MatrixDeterminant") tf.add_input(desc, input_) desc["T"] = tf.data_type(input_) (tf.execute(desc))[1] end + function matrix_determinant(input_; name=nothing) + if tf.eager_mode + matrix_determinant_eager(input_; name=name) + else + matrix_determinant_graph(input_; name=name) + end + end end @@ -29139,7 +34193,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function static_regex_replace(input_; name=nothing, pattern=nothing, rewrite=nothing, replace_global=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function static_regex_replace_graph(input_; name=nothing, pattern=nothing, rewrite=nothing, replace_global=nothing) local desc tf.with_op_name(name, "StaticRegexReplace") do desc = tf.NodeDescription("StaticRegexReplace") @@ -29157,7 +34211,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function static_regex_replace(input_::tf.TensorHandle; name=nothing, pattern=nothing, rewrite=nothing, replace_global=nothing) + function static_regex_replace_eager(input_; name=nothing, pattern=nothing, rewrite=nothing, replace_global=nothing) desc = tf.EagerOp("StaticRegexReplace") tf.add_input(desc, input_) if pattern !== nothing @@ -29171,16 +34225,23 @@ begin end (tf.execute(desc))[1] end + function static_regex_replace(input_; name=nothing, pattern=nothing, rewrite=nothing, replace_global=nothing) + if tf.eager_mode + static_regex_replace_eager(input_; name=name, pattern=pattern, rewrite=rewrite, replace_global=replace_global) + else + static_regex_replace_graph(input_; name=name, pattern=pattern, rewrite=rewrite, replace_global=replace_global) + end + end end """ - avg_pool(value; data_format=NHWC) + avg_pool(value; data_format=) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function avg_pool(value_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function avg_pool_graph(value_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) local desc tf.with_op_name(name, "AvgPool") do desc = tf.NodeDescription("AvgPool") @@ -29202,7 +34263,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function avg_pool(value_::tf.TensorHandle; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + function avg_pool_eager(value_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) desc = tf.EagerOp("AvgPool") tf.add_input(desc, value_) if ksize !== nothing @@ -29220,6 +34281,13 @@ begin desc["T"] = tf.data_type(value_) (tf.execute(desc))[1] end + function avg_pool(value_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + if tf.eager_mode + avg_pool_eager(value_; name=name, ksize=ksize, strides=strides, padding=padding, data_format=data_format) + else + avg_pool_graph(value_; name=name, ksize=ksize, strides=strides, padding=padding, data_format=data_format) + end + end end @@ -29229,7 +34297,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function sparse_dense_cwise_add(sp_indices_, sp_values_, sp_shape_, dense_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function sparse_dense_cwise_add_graph(sp_indices_, sp_values_, sp_shape_, dense_; name=nothing) local desc tf.with_op_name(name, "SparseDenseCwiseAdd") do desc = tf.NodeDescription("SparseDenseCwiseAdd") @@ -29245,7 +34313,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function sparse_dense_cwise_add(sp_indices_::tf.TensorHandle, sp_values_::tf.TensorHandle, sp_shape_::tf.TensorHandle, dense_::tf.TensorHandle; name=nothing) + function sparse_dense_cwise_add_eager(sp_indices_, sp_values_, sp_shape_, dense_; name=nothing) desc = tf.EagerOp("SparseDenseCwiseAdd") tf.add_input(desc, sp_indices_) tf.add_input(desc, sp_values_) @@ -29255,6 +34323,13 @@ begin desc["T"] = tf.data_type(dense_) (tf.execute(desc))[1] end + function sparse_dense_cwise_add(sp_indices_, sp_values_, sp_shape_, dense_; name=nothing) + if tf.eager_mode + sparse_dense_cwise_add_eager(sp_indices_, sp_values_, sp_shape_, dense_; name=name) + else + sparse_dense_cwise_add_graph(sp_indices_, sp_values_, sp_shape_, dense_; name=name) + end + end end @@ -29264,7 +34339,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function bias_add_v1(value_, bias_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function bias_add_v1_graph(value_, bias_; name=nothing) local desc tf.with_op_name(name, "BiasAddV1") do desc = tf.NodeDescription("BiasAddV1") @@ -29276,7 +34351,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function bias_add_v1(value_::tf.TensorHandle, bias_::tf.TensorHandle; name=nothing) + function bias_add_v1_eager(value_, bias_; name=nothing) desc = tf.EagerOp("BiasAddV1") tf.add_input(desc, value_) tf.add_input(desc, bias_) @@ -29284,6 +34359,13 @@ begin desc["T"] = tf.data_type(bias_) (tf.execute(desc))[1] end + function bias_add_v1(value_, bias_; name=nothing) + if tf.eager_mode + bias_add_v1_eager(value_, bias_; name=name) + else + bias_add_v1_graph(value_, bias_; name=name) + end + end end @@ -29293,7 +34375,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function invert_permutation(x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function invert_permutation_graph(x_; name=nothing) local desc tf.with_op_name(name, "InvertPermutation") do desc = tf.NodeDescription("InvertPermutation") @@ -29303,12 +34385,19 @@ begin end tf.Tensor(tf.Operation(desc)) end - function invert_permutation(x_::tf.TensorHandle; name=nothing) + function invert_permutation_eager(x_; name=nothing) desc = tf.EagerOp("InvertPermutation") tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) (tf.execute(desc))[1] end + function invert_permutation(x_; name=nothing) + if tf.eager_mode + invert_permutation_eager(x_; name=name) + else + invert_permutation_graph(x_; name=name) + end + end end @@ -29318,7 +34407,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function hash_table_v2(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function hash_table_v2_graph(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing) local desc tf.with_op_name(name, "HashTableV2") do desc = tf.NodeDescription("HashTableV2") @@ -29340,7 +34429,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function hash_table_v2(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing) + function hash_table_v2_eager(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing) desc = tf.EagerOp("HashTableV2") if container !== nothing desc["container"] = Base.String(container) @@ -29359,6 +34448,13 @@ begin end (tf.execute(desc))[1] end + function hash_table_v2(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing) + if tf.eager_mode + hash_table_v2_eager(; name=name, container=container, shared_name=shared_name, use_node_name_sharing=use_node_name_sharing, key_dtype=key_dtype, value_dtype=value_dtype) + else + hash_table_v2_graph(; name=name, container=container, shared_name=shared_name, use_node_name_sharing=use_node_name_sharing, key_dtype=key_dtype, value_dtype=value_dtype) + end + end end @@ -29368,7 +34464,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function sparse_apply_momentum(var_, accum_, lr_, grad_, indices_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function sparse_apply_momentum_graph(var_, accum_, lr_, grad_, indices_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) local desc tf.with_op_name(name, "SparseApplyMomentum") do desc = tf.NodeDescription("SparseApplyMomentum") @@ -29396,7 +34492,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function sparse_apply_momentum(var_::tf.TensorHandle, accum_::tf.TensorHandle, lr_::tf.TensorHandle, grad_::tf.TensorHandle, indices_::tf.TensorHandle, momentum_::tf.TensorHandle; name=nothing, use_locking=nothing, use_nesterov=nothing) + function sparse_apply_momentum_eager(var_, accum_, lr_, grad_, indices_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) desc = tf.EagerOp("SparseApplyMomentum") tf.add_input(desc, var_) tf.add_input(desc, accum_) @@ -29418,6 +34514,13 @@ begin desc["T"] = tf.data_type(momentum_) (tf.execute(desc))[1] end + function sparse_apply_momentum(var_, accum_, lr_, grad_, indices_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) + if tf.eager_mode + sparse_apply_momentum_eager(var_, accum_, lr_, grad_, indices_, momentum_; name=name, use_locking=use_locking, use_nesterov=use_nesterov) + else + sparse_apply_momentum_graph(var_, accum_, lr_, grad_, indices_, momentum_; name=name, use_locking=use_locking, use_nesterov=use_nesterov) + end + end end @@ -29427,7 +34530,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function infeed_enqueue(input_; name=nothing, dtype=nothing, shape=nothing, layout=nothing, device_ordinal=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function infeed_enqueue_graph(input_; name=nothing, dtype=nothing, shape=nothing, layout=nothing, device_ordinal=nothing) local desc tf.with_op_name(name, "InfeedEnqueue") do desc = tf.NodeDescription("InfeedEnqueue") @@ -29449,7 +34552,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function infeed_enqueue(input_::tf.TensorHandle; name=nothing, dtype=nothing, shape=nothing, layout=nothing, device_ordinal=nothing) + function infeed_enqueue_eager(input_; name=nothing, dtype=nothing, shape=nothing, layout=nothing, device_ordinal=nothing) desc = tf.EagerOp("InfeedEnqueue") tf.add_input(desc, input_) if dtype !== nothing @@ -29467,6 +34570,13 @@ begin desc["dtype"] = tf.data_type(input_) (tf.execute(desc))[1] end + function infeed_enqueue(input_; name=nothing, dtype=nothing, shape=nothing, layout=nothing, device_ordinal=nothing) + if tf.eager_mode + infeed_enqueue_eager(input_; name=name, dtype=dtype, shape=shape, layout=layout, device_ordinal=device_ordinal) + else + infeed_enqueue_graph(input_; name=name, dtype=dtype, shape=shape, layout=layout, device_ordinal=device_ordinal) + end + end end @@ -29476,7 +34586,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function stateless_random_uniform_int(shape_, seed_, minval_, maxval_; name=nothing, dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function stateless_random_uniform_int_graph(shape_, seed_, minval_, maxval_; name=nothing, dtype=nothing) local desc tf.with_op_name(name, "StatelessRandomUniformInt") do desc = tf.NodeDescription("StatelessRandomUniformInt") @@ -29497,7 +34607,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function stateless_random_uniform_int(shape_::tf.TensorHandle, seed_::tf.TensorHandle, minval_::tf.TensorHandle, maxval_::tf.TensorHandle; name=nothing, dtype=nothing) + function stateless_random_uniform_int_eager(shape_, seed_, minval_, maxval_; name=nothing, dtype=nothing) desc = tf.EagerOp("StatelessRandomUniformInt") tf.add_input(desc, shape_) tf.add_input(desc, seed_) @@ -29512,6 +34622,13 @@ begin desc["dtype"] = tf.data_type(maxval_) (tf.execute(desc))[1] end + function stateless_random_uniform_int(shape_, seed_, minval_, maxval_; name=nothing, dtype=nothing) + if tf.eager_mode + stateless_random_uniform_int_eager(shape_, seed_, minval_, maxval_; name=name, dtype=dtype) + else + stateless_random_uniform_int_graph(shape_, seed_, minval_, maxval_; name=name, dtype=dtype) + end + end end @@ -29521,7 +34638,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function load_tpu_embedding_adadelta_parameters_grad_accum_debug(parameters_, accumulators_, updates_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function load_tpu_embedding_adadelta_parameters_grad_accum_debug_graph(parameters_, accumulators_, updates_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "LoadTPUEmbeddingAdadeltaParametersGradAccumDebug") do desc = tf.NodeDescription("LoadTPUEmbeddingAdadeltaParametersGradAccumDebug") @@ -29548,7 +34665,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function load_tpu_embedding_adadelta_parameters_grad_accum_debug(parameters_::tf.TensorHandle, accumulators_::tf.TensorHandle, updates_::tf.TensorHandle, gradient_accumulators_::tf.TensorHandle; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + function load_tpu_embedding_adadelta_parameters_grad_accum_debug_eager(parameters_, accumulators_, updates_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) desc = tf.EagerOp("LoadTPUEmbeddingAdadeltaParametersGradAccumDebug") tf.add_input(desc, parameters_) tf.add_input(desc, accumulators_) @@ -29568,6 +34685,13 @@ begin end (tf.execute(desc))[1] end + function load_tpu_embedding_adadelta_parameters_grad_accum_debug(parameters_, accumulators_, updates_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + if tf.eager_mode + load_tpu_embedding_adadelta_parameters_grad_accum_debug_eager(parameters_, accumulators_, updates_, gradient_accumulators_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + else + load_tpu_embedding_adadelta_parameters_grad_accum_debug_graph(parameters_, accumulators_, updates_, gradient_accumulators_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + end + end end @@ -29577,7 +34701,7 @@ end Sends the named tensor from send_device to recv_device. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function _send(tensor_; name=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function _send_graph(tensor_; name=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing) local desc tf.with_op_name(name, "_Send") do desc = tf.NodeDescription("_Send") @@ -29602,7 +34726,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function _send(tensor_::tf.TensorHandle; name=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing) + function _send_eager(tensor_; name=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing) desc = tf.EagerOp("_Send") tf.add_input(desc, tensor_) if tensor_name !== nothing @@ -29623,6 +34747,13 @@ begin desc["T"] = tf.data_type(tensor_) (tf.execute(desc))[1] end + function _send(tensor_; name=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing) + if tf.eager_mode + _send_eager(tensor_; name=name, tensor_name=tensor_name, send_device=send_device, send_device_incarnation=send_device_incarnation, recv_device=recv_device, client_terminated=client_terminated) + else + _send_graph(tensor_; name=name, tensor_name=tensor_name, send_device=send_device, send_device_incarnation=send_device_incarnation, recv_device=recv_device, client_terminated=client_terminated) + end + end end @@ -29632,7 +34763,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function map_peek(key_, indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function map_peek_graph(key_, indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "MapPeek") do desc = tf.NodeDescription("MapPeek") @@ -29658,7 +34789,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function map_peek(key_::tf.TensorHandle, indices_::tf.TensorHandle; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + function map_peek_eager(key_, indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) desc = tf.EagerOp("MapPeek") tf.add_input(desc, key_) tf.add_input(desc, indices_) @@ -29679,6 +34810,13 @@ begin end (tf.execute(desc))[1] end + function map_peek(key_, indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + if tf.eager_mode + map_peek_eager(key_, indices_; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) + else + map_peek_graph(key_, indices_; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) + end + end end @@ -29688,7 +34826,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function write_scalar_summary(writer_, step_, tag_, value_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function write_scalar_summary_graph(writer_, step_, tag_, value_; name=nothing) local desc tf.with_op_name(name, "WriteScalarSummary") do desc = tf.NodeDescription("WriteScalarSummary") @@ -29704,7 +34842,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function write_scalar_summary(writer_::tf.TensorHandle, step_::tf.TensorHandle, tag_::tf.TensorHandle, value_::tf.TensorHandle; name=nothing) + function write_scalar_summary_eager(writer_, step_, tag_, value_; name=nothing) desc = tf.EagerOp("WriteScalarSummary") tf.add_input(desc, writer_) tf.add_input(desc, step_) @@ -29713,6 +34851,13 @@ begin desc["T"] = tf.data_type(value_) (tf.execute(desc))[1] end + function write_scalar_summary(writer_, step_, tag_, value_; name=nothing) + if tf.eager_mode + write_scalar_summary_eager(writer_, step_, tag_, value_; name=name) + else + write_scalar_summary_graph(writer_, step_, tag_, value_; name=name) + end + end end @@ -29722,7 +34867,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function ordered_map_unstage_no_key(indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function ordered_map_unstage_no_key_graph(indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "OrderedMapUnstageNoKey") do desc = tf.NodeDescription("OrderedMapUnstageNoKey") @@ -29751,7 +34896,7 @@ begin end out end - function ordered_map_unstage_no_key(indices_::tf.TensorHandle; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + function ordered_map_unstage_no_key_eager(indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) desc = tf.EagerOp("OrderedMapUnstageNoKey") tf.add_input(desc, indices_) if capacity !== nothing @@ -29771,6 +34916,13 @@ begin end tf.execute(desc) end + function ordered_map_unstage_no_key(indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + if tf.eager_mode + ordered_map_unstage_no_key_eager(indices_; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) + else + ordered_map_unstage_no_key_graph(indices_; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) + end + end end @@ -29780,7 +34932,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function sparse_apply_centered_rms_prop(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function sparse_apply_centered_rms_prop_graph(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "SparseApplyCenteredRMSProp") do desc = tf.NodeDescription("SparseApplyCenteredRMSProp") @@ -29813,7 +34965,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function sparse_apply_centered_rms_prop(var_::tf.TensorHandle, mg_::tf.TensorHandle, ms_::tf.TensorHandle, mom_::tf.TensorHandle, lr_::tf.TensorHandle, rho_::tf.TensorHandle, momentum_::tf.TensorHandle, epsilon_::tf.TensorHandle, grad_::tf.TensorHandle, indices_::tf.TensorHandle; name=nothing, use_locking=nothing) + function sparse_apply_centered_rms_prop_eager(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) desc = tf.EagerOp("SparseApplyCenteredRMSProp") tf.add_input(desc, var_) tf.add_input(desc, mg_) @@ -29840,6 +34992,13 @@ begin desc["Tindices"] = tf.data_type(indices_) (tf.execute(desc))[1] end + function sparse_apply_centered_rms_prop(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) + if tf.eager_mode + sparse_apply_centered_rms_prop_eager(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=name, use_locking=use_locking) + else + sparse_apply_centered_rms_prop_graph(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=name, use_locking=use_locking) + end + end end @@ -29849,7 +35008,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tensor_list_scatter_v2(tensor_, indices_, element_shape_, num_elements_; name=nothing, element_dtype=nothing, shape_type=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tensor_list_scatter_v2_graph(tensor_, indices_, element_shape_, num_elements_; name=nothing, element_dtype=nothing, shape_type=nothing) local desc tf.with_op_name(name, "TensorListScatterV2") do desc = tf.NodeDescription("TensorListScatterV2") @@ -29872,7 +35031,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function tensor_list_scatter_v2(tensor_::tf.TensorHandle, indices_::tf.TensorHandle, element_shape_::tf.TensorHandle, num_elements_::tf.TensorHandle; name=nothing, element_dtype=nothing, shape_type=nothing) + function tensor_list_scatter_v2_eager(tensor_, indices_, element_shape_, num_elements_; name=nothing, element_dtype=nothing, shape_type=nothing) desc = tf.EagerOp("TensorListScatterV2") tf.add_input(desc, tensor_) tf.add_input(desc, indices_) @@ -29888,16 +35047,23 @@ begin desc["shape_type"] = tf.data_type(element_shape_) (tf.execute(desc))[1] end + function tensor_list_scatter_v2(tensor_, indices_, element_shape_, num_elements_; name=nothing, element_dtype=nothing, shape_type=nothing) + if tf.eager_mode + tensor_list_scatter_v2_eager(tensor_, indices_, element_shape_, num_elements_; name=name, element_dtype=element_dtype, shape_type=shape_type) + else + tensor_list_scatter_v2_graph(tensor_, indices_, element_shape_, num_elements_; name=name, element_dtype=element_dtype, shape_type=shape_type) + end + end end """ - conv3d_backprop_input_v2(input_sizes, filter, out_backprop; data_format=NDHWC, dilations=[1, 1, 1, 1, 1]) + conv3d_backprop_input_v2(input_sizes, filter, out_backprop; data_format=, dilations=[1, 1, 1, 1, 1]) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function conv3d_backprop_input_v2(input_sizes_, filter_, out_backprop_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function conv3d_backprop_input_v2_graph(input_sizes_, filter_, out_backprop_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) local desc tf.with_op_name(name, "Conv3DBackpropInputV2") do desc = tf.NodeDescription("Conv3DBackpropInputV2") @@ -29924,7 +35090,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function conv3d_backprop_input_v2(input_sizes_::tf.TensorHandle, filter_::tf.TensorHandle, out_backprop_::tf.TensorHandle; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) + function conv3d_backprop_input_v2_eager(input_sizes_, filter_, out_backprop_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) desc = tf.EagerOp("Conv3DBackpropInputV2") tf.add_input(desc, input_sizes_) tf.add_input(desc, filter_) @@ -29946,6 +35112,13 @@ begin desc["T"] = tf.data_type(out_backprop_) (tf.execute(desc))[1] end + function conv3d_backprop_input_v2(input_sizes_, filter_, out_backprop_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) + if tf.eager_mode + conv3d_backprop_input_v2_eager(input_sizes_, filter_, out_backprop_; name=name, strides=strides, padding=padding, data_format=data_format, dilations=dilations) + else + conv3d_backprop_input_v2_graph(input_sizes_, filter_, out_backprop_; name=name, strides=strides, padding=padding, data_format=data_format, dilations=dilations) + end + end end @@ -29955,7 +35128,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function retrieve_tpu_embedding_proximal_adagrad_parameters(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function retrieve_tpu_embedding_proximal_adagrad_parameters_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "RetrieveTPUEmbeddingProximalAdagradParameters") do desc = tf.NodeDescription("RetrieveTPUEmbeddingProximalAdagradParameters") @@ -29979,7 +35152,7 @@ begin end out end - function retrieve_tpu_embedding_proximal_adagrad_parameters(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + function retrieve_tpu_embedding_proximal_adagrad_parameters_eager(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) desc = tf.EagerOp("RetrieveTPUEmbeddingProximalAdagradParameters") if table_id !== nothing desc["table_id"] = Base.Int(table_id) @@ -29995,6 +35168,13 @@ begin end tf.execute(desc) end + function retrieve_tpu_embedding_proximal_adagrad_parameters(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + if tf.eager_mode + retrieve_tpu_embedding_proximal_adagrad_parameters_eager(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + else + retrieve_tpu_embedding_proximal_adagrad_parameters_graph(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + end + end end @@ -30004,7 +35184,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function random_shuffle(value_; name=nothing, seed=nothing, seed2=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function random_shuffle_graph(value_; name=nothing, seed=nothing, seed2=nothing) local desc tf.with_op_name(name, "RandomShuffle") do desc = tf.NodeDescription("RandomShuffle") @@ -30020,7 +35200,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function random_shuffle(value_::tf.TensorHandle; name=nothing, seed=nothing, seed2=nothing) + function random_shuffle_eager(value_; name=nothing, seed=nothing, seed2=nothing) desc = tf.EagerOp("RandomShuffle") tf.add_input(desc, value_) if seed !== nothing @@ -30032,6 +35212,13 @@ begin desc["T"] = tf.data_type(value_) (tf.execute(desc))[1] end + function random_shuffle(value_; name=nothing, seed=nothing, seed2=nothing) + if tf.eager_mode + random_shuffle_eager(value_; name=name, seed=seed, seed2=seed2) + else + random_shuffle_graph(value_; name=name, seed=seed, seed2=seed2) + end + end end @@ -30041,7 +35228,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function uniform_candidate_sampler(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function uniform_candidate_sampler_graph(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing) local desc tf.with_op_name(name, "UniformCandidateSampler") do desc = tf.NodeDescription("UniformCandidateSampler") @@ -30073,7 +35260,7 @@ begin end out end - function uniform_candidate_sampler(true_classes_::tf.TensorHandle; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing) + function uniform_candidate_sampler_eager(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing) desc = tf.EagerOp("UniformCandidateSampler") tf.add_input(desc, true_classes_) if num_true !== nothing @@ -30096,6 +35283,13 @@ begin end tf.execute(desc) end + function uniform_candidate_sampler(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing) + if tf.eager_mode + uniform_candidate_sampler_eager(true_classes_; name=name, num_true=num_true, num_sampled=num_sampled, unique=unique, range_max=range_max, seed=seed, seed2=seed2) + else + uniform_candidate_sampler_graph(true_classes_; name=name, num_true=num_true, num_sampled=num_sampled, unique=unique, range_max=range_max, seed=seed, seed2=seed2) + end + end end @@ -30105,7 +35299,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tensor_array_split_v2(handle_, value_, lengths_, flow_in_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tensor_array_split_v2_graph(handle_, value_, lengths_, flow_in_; name=nothing) local desc tf.with_op_name(name, "TensorArraySplitV2") do desc = tf.NodeDescription("TensorArraySplitV2") @@ -30121,7 +35315,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function tensor_array_split_v2(handle_::tf.TensorHandle, value_::tf.TensorHandle, lengths_::tf.TensorHandle, flow_in_::tf.TensorHandle; name=nothing) + function tensor_array_split_v2_eager(handle_, value_, lengths_, flow_in_; name=nothing) desc = tf.EagerOp("TensorArraySplitV2") tf.add_input(desc, handle_) tf.add_input(desc, value_) @@ -30130,6 +35324,13 @@ begin desc["T"] = tf.data_type(value_) (tf.execute(desc))[1] end + function tensor_array_split_v2(handle_, value_, lengths_, flow_in_; name=nothing) + if tf.eager_mode + tensor_array_split_v2_eager(handle_, value_, lengths_, flow_in_; name=name) + else + tensor_array_split_v2_graph(handle_, value_, lengths_, flow_in_; name=name) + end + end end @@ -30139,7 +35340,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function mutable_dense_hash_table_v2(empty_key_, deleted_key_; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing, initial_num_buckets=nothing, max_load_factor=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function mutable_dense_hash_table_v2_graph(empty_key_, deleted_key_; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing, initial_num_buckets=nothing, max_load_factor=nothing) local desc tf.with_op_name(name, "MutableDenseHashTableV2") do desc = tf.NodeDescription("MutableDenseHashTableV2") @@ -30175,7 +35376,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function mutable_dense_hash_table_v2(empty_key_::tf.TensorHandle, deleted_key_::tf.TensorHandle; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing, initial_num_buckets=nothing, max_load_factor=nothing) + function mutable_dense_hash_table_v2_eager(empty_key_, deleted_key_; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing, initial_num_buckets=nothing, max_load_factor=nothing) desc = tf.EagerOp("MutableDenseHashTableV2") tf.add_input(desc, empty_key_) tf.add_input(desc, deleted_key_) @@ -30207,6 +35408,13 @@ begin desc["key_dtype"] = tf.data_type(deleted_key_) (tf.execute(desc))[1] end + function mutable_dense_hash_table_v2(empty_key_, deleted_key_; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing, initial_num_buckets=nothing, max_load_factor=nothing) + if tf.eager_mode + mutable_dense_hash_table_v2_eager(empty_key_, deleted_key_; name=name, container=container, shared_name=shared_name, use_node_name_sharing=use_node_name_sharing, key_dtype=key_dtype, value_dtype=value_dtype, value_shape=value_shape, initial_num_buckets=initial_num_buckets, max_load_factor=max_load_factor) + else + mutable_dense_hash_table_v2_graph(empty_key_, deleted_key_; name=name, container=container, shared_name=shared_name, use_node_name_sharing=use_node_name_sharing, key_dtype=key_dtype, value_dtype=value_dtype, value_shape=value_shape, initial_num_buckets=initial_num_buckets, max_load_factor=max_load_factor) + end + end end @@ -30216,7 +35424,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function draw_bounding_boxes(images_, boxes_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function draw_bounding_boxes_graph(images_, boxes_; name=nothing) local desc tf.with_op_name(name, "DrawBoundingBoxes") do desc = tf.NodeDescription("DrawBoundingBoxes") @@ -30228,13 +35436,20 @@ begin end tf.Tensor(tf.Operation(desc)) end - function draw_bounding_boxes(images_::tf.TensorHandle, boxes_::tf.TensorHandle; name=nothing) + function draw_bounding_boxes_eager(images_, boxes_; name=nothing) desc = tf.EagerOp("DrawBoundingBoxes") tf.add_input(desc, images_) tf.add_input(desc, boxes_) desc["T"] = tf.data_type(images_) (tf.execute(desc))[1] end + function draw_bounding_boxes(images_, boxes_; name=nothing) + if tf.eager_mode + draw_bounding_boxes_eager(images_, boxes_; name=name) + else + draw_bounding_boxes_graph(images_, boxes_; name=name) + end + end end @@ -30244,7 +35459,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function sparse_apply_proximal_adagrad(var_, accum_, lr_, l1_, l2_, grad_, indices_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function sparse_apply_proximal_adagrad_graph(var_, accum_, lr_, l1_, l2_, grad_, indices_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "SparseApplyProximalAdagrad") do desc = tf.NodeDescription("SparseApplyProximalAdagrad") @@ -30271,7 +35486,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function sparse_apply_proximal_adagrad(var_::tf.TensorHandle, accum_::tf.TensorHandle, lr_::tf.TensorHandle, l1_::tf.TensorHandle, l2_::tf.TensorHandle, grad_::tf.TensorHandle, indices_::tf.TensorHandle; name=nothing, use_locking=nothing) + function sparse_apply_proximal_adagrad_eager(var_, accum_, lr_, l1_, l2_, grad_, indices_; name=nothing, use_locking=nothing) desc = tf.EagerOp("SparseApplyProximalAdagrad") tf.add_input(desc, var_) tf.add_input(desc, accum_) @@ -30292,6 +35507,13 @@ begin desc["Tindices"] = tf.data_type(indices_) (tf.execute(desc))[1] end + function sparse_apply_proximal_adagrad(var_, accum_, lr_, l1_, l2_, grad_, indices_; name=nothing, use_locking=nothing) + if tf.eager_mode + sparse_apply_proximal_adagrad_eager(var_, accum_, lr_, l1_, l2_, grad_, indices_; name=name, use_locking=use_locking) + else + sparse_apply_proximal_adagrad_graph(var_, accum_, lr_, l1_, l2_, grad_, indices_; name=name, use_locking=use_locking) + end + end end @@ -30301,7 +35523,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function range_dataset(start_, stop_, step_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function range_dataset_graph(start_, stop_, step_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "RangeDataset") do desc = tf.NodeDescription("RangeDataset") @@ -30320,7 +35542,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function range_dataset(start_::tf.TensorHandle, stop_::tf.TensorHandle, step_::tf.TensorHandle; name=nothing, output_types=nothing, output_shapes=nothing) + function range_dataset_eager(start_, stop_, step_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("RangeDataset") tf.add_input(desc, start_) tf.add_input(desc, stop_) @@ -30333,6 +35555,13 @@ begin end (tf.execute(desc))[1] end + function range_dataset(start_, stop_, step_; name=nothing, output_types=nothing, output_shapes=nothing) + if tf.eager_mode + range_dataset_eager(start_, stop_, step_; name=name, output_types=output_types, output_shapes=output_shapes) + else + range_dataset_graph(start_, stop_, step_; name=name, output_types=output_types, output_shapes=output_shapes) + end + end end @@ -30342,7 +35571,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function reader_restore_state_v2(reader_handle_, state_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function reader_restore_state_v2_graph(reader_handle_, state_; name=nothing) local desc tf.with_op_name(name, "ReaderRestoreStateV2") do desc = tf.NodeDescription("ReaderRestoreStateV2") @@ -30353,12 +35582,19 @@ begin end tf.Tensor(tf.Operation(desc)) end - function reader_restore_state_v2(reader_handle_::tf.TensorHandle, state_::tf.TensorHandle; name=nothing) + function reader_restore_state_v2_eager(reader_handle_, state_; name=nothing) desc = tf.EagerOp("ReaderRestoreStateV2") tf.add_input(desc, reader_handle_) tf.add_input(desc, state_) (tf.execute(desc))[1] end + function reader_restore_state_v2(reader_handle_, state_; name=nothing) + if tf.eager_mode + reader_restore_state_v2_eager(reader_handle_, state_; name=name) + else + reader_restore_state_v2_graph(reader_handle_, state_; name=name) + end + end end @@ -30368,7 +35604,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function top_kv2(input_, k_; name=nothing, sorted=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function top_kv2_graph(input_, k_; name=nothing, sorted=nothing) local desc tf.with_op_name(name, "TopKV2") do desc = tf.NodeDescription("TopKV2") @@ -30388,7 +35624,7 @@ begin end out end - function top_kv2(input_::tf.TensorHandle, k_::tf.TensorHandle; name=nothing, sorted=nothing) + function top_kv2_eager(input_, k_; name=nothing, sorted=nothing) desc = tf.EagerOp("TopKV2") tf.add_input(desc, input_) tf.add_input(desc, k_) @@ -30398,6 +35634,13 @@ begin desc["T"] = tf.data_type(input_) tf.execute(desc) end + function top_kv2(input_, k_; name=nothing, sorted=nothing) + if tf.eager_mode + top_kv2_eager(input_, k_; name=name, sorted=sorted) + else + top_kv2_graph(input_, k_; name=name, sorted=sorted) + end + end end @@ -30407,7 +35650,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function atanh(x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function atanh_graph(x_; name=nothing) local desc tf.with_op_name(name, "Atanh") do desc = tf.NodeDescription("Atanh") @@ -30417,12 +35660,19 @@ begin end tf.Tensor(tf.Operation(desc)) end - function atanh(x_::tf.TensorHandle; name=nothing) + function atanh_eager(x_; name=nothing) desc = tf.EagerOp("Atanh") tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) (tf.execute(desc))[1] end + function atanh(x_; name=nothing) + if tf.eager_mode + atanh_eager(x_; name=name) + else + atanh_graph(x_; name=name) + end + end end @@ -30432,7 +35682,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function debug_gradient_identity(input_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function debug_gradient_identity_graph(input_; name=nothing) local desc tf.with_op_name(name, "DebugGradientIdentity") do desc = tf.NodeDescription("DebugGradientIdentity") @@ -30442,12 +35692,19 @@ begin end tf.Tensor(tf.Operation(desc)) end - function debug_gradient_identity(input_::tf.TensorHandle; name=nothing) + function debug_gradient_identity_eager(input_; name=nothing) desc = tf.EagerOp("DebugGradientIdentity") tf.add_input(desc, input_) desc["T"] = tf.data_type(input_) (tf.execute(desc))[1] end + function debug_gradient_identity(input_; name=nothing) + if tf.eager_mode + debug_gradient_identity_eager(input_; name=name) + else + debug_gradient_identity_graph(input_; name=name) + end + end end @@ -30457,7 +35714,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function sparse_add_grad(backprop_val_grad_, a_indices_, b_indices_, sum_indices_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function sparse_add_grad_graph(backprop_val_grad_, a_indices_, b_indices_, sum_indices_; name=nothing) local desc tf.with_op_name(name, "SparseAddGrad") do desc = tf.NodeDescription("SparseAddGrad") @@ -30478,7 +35735,7 @@ begin end out end - function sparse_add_grad(backprop_val_grad_::tf.TensorHandle, a_indices_::tf.TensorHandle, b_indices_::tf.TensorHandle, sum_indices_::tf.TensorHandle; name=nothing) + function sparse_add_grad_eager(backprop_val_grad_, a_indices_, b_indices_, sum_indices_; name=nothing) desc = tf.EagerOp("SparseAddGrad") tf.add_input(desc, backprop_val_grad_) tf.add_input(desc, a_indices_) @@ -30487,6 +35744,13 @@ begin desc["T"] = tf.data_type(backprop_val_grad_) tf.execute(desc) end + function sparse_add_grad(backprop_val_grad_, a_indices_, b_indices_, sum_indices_; name=nothing) + if tf.eager_mode + sparse_add_grad_eager(backprop_val_grad_, a_indices_, b_indices_, sum_indices_; name=name) + else + sparse_add_grad_graph(backprop_val_grad_, a_indices_, b_indices_, sum_indices_; name=name) + end + end end @@ -30496,7 +35760,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function resource_scatter_add(resource_, indices_, updates_; name=nothing, dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function resource_scatter_add_graph(resource_, indices_, updates_; name=nothing, dtype=nothing) local desc tf.with_op_name(name, "ResourceScatterAdd") do desc = tf.NodeDescription("ResourceScatterAdd") @@ -30515,7 +35779,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function resource_scatter_add(resource_::tf.TensorHandle, indices_::tf.TensorHandle, updates_::tf.TensorHandle; name=nothing, dtype=nothing) + function resource_scatter_add_eager(resource_, indices_, updates_; name=nothing, dtype=nothing) desc = tf.EagerOp("ResourceScatterAdd") tf.add_input(desc, resource_) tf.add_input(desc, indices_) @@ -30527,6 +35791,13 @@ begin desc["dtype"] = tf.data_type(updates_) (tf.execute(desc))[1] end + function resource_scatter_add(resource_, indices_, updates_; name=nothing, dtype=nothing) + if tf.eager_mode + resource_scatter_add_eager(resource_, indices_, updates_; name=name, dtype=dtype) + else + resource_scatter_add_graph(resource_, indices_, updates_; name=name, dtype=dtype) + end + end end @@ -30536,7 +35807,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function ceil(x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function ceil_graph(x_; name=nothing) local desc tf.with_op_name(name, "Ceil") do desc = tf.NodeDescription("Ceil") @@ -30546,12 +35817,19 @@ begin end tf.Tensor(tf.Operation(desc)) end - function ceil(x_::tf.TensorHandle; name=nothing) + function ceil_eager(x_; name=nothing) desc = tf.EagerOp("Ceil") tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) (tf.execute(desc))[1] end + function ceil(x_; name=nothing) + if tf.eager_mode + ceil_eager(x_; name=name) + else + ceil_graph(x_; name=name) + end + end end @@ -30561,7 +35839,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function save(filename_, tensor_names_, data_; name=nothing, T=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function save_graph(filename_, tensor_names_, data_; name=nothing, T=nothing) local desc tf.with_op_name(name, "Save") do desc = tf.NodeDescription("Save") @@ -30577,7 +35855,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function save(filename_::tf.TensorHandle, tensor_names_::tf.TensorHandle, data_::tf.TensorHandle; name=nothing, T=nothing) + function save_eager(filename_, tensor_names_, data_; name=nothing, T=nothing) desc = tf.EagerOp("Save") tf.add_input(desc, filename_) tf.add_input(desc, tensor_names_) @@ -30587,6 +35865,13 @@ begin end (tf.execute(desc))[1] end + function save(filename_, tensor_names_, data_; name=nothing, T=nothing) + if tf.eager_mode + save_eager(filename_, tensor_names_, data_; name=name, T=T) + else + save_graph(filename_, tensor_names_, data_; name=name, T=T) + end + end end @@ -30596,7 +35881,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function retrieve_tpu_embedding_centered_rms_prop_parameters(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function retrieve_tpu_embedding_centered_rms_prop_parameters_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "RetrieveTPUEmbeddingCenteredRMSPropParameters") do desc = tf.NodeDescription("RetrieveTPUEmbeddingCenteredRMSPropParameters") @@ -30620,7 +35905,7 @@ begin end out end - function retrieve_tpu_embedding_centered_rms_prop_parameters(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + function retrieve_tpu_embedding_centered_rms_prop_parameters_eager(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) desc = tf.EagerOp("RetrieveTPUEmbeddingCenteredRMSPropParameters") if table_id !== nothing desc["table_id"] = Base.Int(table_id) @@ -30636,6 +35921,13 @@ begin end tf.execute(desc) end + function retrieve_tpu_embedding_centered_rms_prop_parameters(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + if tf.eager_mode + retrieve_tpu_embedding_centered_rms_prop_parameters_eager(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + else + retrieve_tpu_embedding_centered_rms_prop_parameters_graph(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + end + end end @@ -30645,7 +35937,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function quantized_concat(concat_dim_, values_, input_mins_, input_maxes_; name=nothing, N=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function quantized_concat_graph(concat_dim_, values_, input_mins_, input_maxes_; name=nothing, N=nothing) local desc tf.with_op_name(name, "QuantizedConcat") do desc = tf.NodeDescription("QuantizedConcat") @@ -30669,7 +35961,7 @@ begin end out end - function quantized_concat(concat_dim_::tf.TensorHandle, values_::tf.TensorHandle, input_mins_::tf.TensorHandle, input_maxes_::tf.TensorHandle; name=nothing, N=nothing) + function quantized_concat_eager(concat_dim_, values_, input_mins_, input_maxes_; name=nothing, N=nothing) desc = tf.EagerOp("QuantizedConcat") tf.add_input(desc, concat_dim_) tf.add_input(desc, values_) @@ -30681,6 +35973,13 @@ begin desc["T"] = tf.data_type(values_) tf.execute(desc) end + function quantized_concat(concat_dim_, values_, input_mins_, input_maxes_; name=nothing, N=nothing) + if tf.eager_mode + quantized_concat_eager(concat_dim_, values_, input_mins_, input_maxes_; name=name, N=N) + else + quantized_concat_graph(concat_dim_, values_, input_mins_, input_maxes_; name=name, N=N) + end + end end @@ -30690,7 +35989,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function zeros_like(x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function zeros_like_graph(x_; name=nothing) local desc tf.with_op_name(name, "ZerosLike") do desc = tf.NodeDescription("ZerosLike") @@ -30700,12 +35999,19 @@ begin end tf.Tensor(tf.Operation(desc)) end - function zeros_like(x_::tf.TensorHandle; name=nothing) + function zeros_like_eager(x_; name=nothing) desc = tf.EagerOp("ZerosLike") tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) (tf.execute(desc))[1] end + function zeros_like(x_; name=nothing) + if tf.eager_mode + zeros_like_eager(x_; name=name) + else + zeros_like_graph(x_; name=name) + end + end end @@ -30715,7 +36021,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function fractional_avg_pool(value_; name=nothing, pooling_ratio=nothing, pseudo_random=nothing, overlapping=nothing, deterministic=nothing, seed=nothing, seed2=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function fractional_avg_pool_graph(value_; name=nothing, pooling_ratio=nothing, pseudo_random=nothing, overlapping=nothing, deterministic=nothing, seed=nothing, seed2=nothing) local desc tf.with_op_name(name, "FractionalAvgPool") do desc = tf.NodeDescription("FractionalAvgPool") @@ -30748,7 +36054,7 @@ begin end out end - function fractional_avg_pool(value_::tf.TensorHandle; name=nothing, pooling_ratio=nothing, pseudo_random=nothing, overlapping=nothing, deterministic=nothing, seed=nothing, seed2=nothing) + function fractional_avg_pool_eager(value_; name=nothing, pooling_ratio=nothing, pseudo_random=nothing, overlapping=nothing, deterministic=nothing, seed=nothing, seed2=nothing) desc = tf.EagerOp("FractionalAvgPool") tf.add_input(desc, value_) if pooling_ratio !== nothing @@ -30772,6 +36078,13 @@ begin desc["T"] = tf.data_type(value_) tf.execute(desc) end + function fractional_avg_pool(value_; name=nothing, pooling_ratio=nothing, pseudo_random=nothing, overlapping=nothing, deterministic=nothing, seed=nothing, seed2=nothing) + if tf.eager_mode + fractional_avg_pool_eager(value_; name=name, pooling_ratio=pooling_ratio, pseudo_random=pseudo_random, overlapping=overlapping, deterministic=deterministic, seed=seed, seed2=seed2) + else + fractional_avg_pool_graph(value_; name=name, pooling_ratio=pooling_ratio, pseudo_random=pseudo_random, overlapping=overlapping, deterministic=deterministic, seed=seed, seed2=seed2) + end + end end @@ -30781,7 +36094,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function edit_distance(hypothesis_indices_, hypothesis_values_, hypothesis_shape_, truth_indices_, truth_values_, truth_shape_; name=nothing, normalize=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function edit_distance_graph(hypothesis_indices_, hypothesis_values_, hypothesis_shape_, truth_indices_, truth_values_, truth_shape_; name=nothing, normalize=nothing) local desc tf.with_op_name(name, "EditDistance") do desc = tf.NodeDescription("EditDistance") @@ -30804,7 +36117,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function edit_distance(hypothesis_indices_::tf.TensorHandle, hypothesis_values_::tf.TensorHandle, hypothesis_shape_::tf.TensorHandle, truth_indices_::tf.TensorHandle, truth_values_::tf.TensorHandle, truth_shape_::tf.TensorHandle; name=nothing, normalize=nothing) + function edit_distance_eager(hypothesis_indices_, hypothesis_values_, hypothesis_shape_, truth_indices_, truth_values_, truth_shape_; name=nothing, normalize=nothing) desc = tf.EagerOp("EditDistance") tf.add_input(desc, hypothesis_indices_) tf.add_input(desc, hypothesis_values_) @@ -30819,6 +36132,13 @@ begin desc["T"] = tf.data_type(truth_values_) (tf.execute(desc))[1] end + function edit_distance(hypothesis_indices_, hypothesis_values_, hypothesis_shape_, truth_indices_, truth_values_, truth_shape_; name=nothing, normalize=nothing) + if tf.eager_mode + edit_distance_eager(hypothesis_indices_, hypothesis_values_, hypothesis_shape_, truth_indices_, truth_values_, truth_shape_; name=name, normalize=normalize) + else + edit_distance_graph(hypothesis_indices_, hypothesis_values_, hypothesis_shape_, truth_indices_, truth_values_, truth_shape_; name=name, normalize=normalize) + end + end end @@ -30828,7 +36148,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function unique_v2(x_, axis_; name=nothing, out_idx=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function unique_v2_graph(x_, axis_; name=nothing, out_idx=nothing) local desc tf.with_op_name(name, "UniqueV2") do desc = tf.NodeDescription("UniqueV2") @@ -30849,7 +36169,7 @@ begin end out end - function unique_v2(x_::tf.TensorHandle, axis_::tf.TensorHandle; name=nothing, out_idx=nothing) + function unique_v2_eager(x_, axis_; name=nothing, out_idx=nothing) desc = tf.EagerOp("UniqueV2") tf.add_input(desc, x_) tf.add_input(desc, axis_) @@ -30860,16 +36180,23 @@ begin desc["Taxis"] = tf.data_type(axis_) tf.execute(desc) end + function unique_v2(x_, axis_; name=nothing, out_idx=nothing) + if tf.eager_mode + unique_v2_eager(x_, axis_; name=name, out_idx=out_idx) + else + unique_v2_graph(x_, axis_; name=name, out_idx=out_idx) + end + end end """ - quantize_and_dequantize_v2(input, input_min, input_max; signed_input=true, num_bits=8, range_given=false, round_mode=HALF_TO_EVEN) + quantize_and_dequantize_v2(input, input_min, input_max; signed_input=true, num_bits=8, range_given=false, round_mode=) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function quantize_and_dequantize_v2(input_, input_min_, input_max_; name=nothing, signed_input=nothing, num_bits=nothing, range_given=nothing, round_mode=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function quantize_and_dequantize_v2_graph(input_, input_min_, input_max_; name=nothing, signed_input=nothing, num_bits=nothing, range_given=nothing, round_mode=nothing) local desc tf.with_op_name(name, "QuantizeAndDequantizeV2") do desc = tf.NodeDescription("QuantizeAndDequantizeV2") @@ -30895,7 +36222,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function quantize_and_dequantize_v2(input_::tf.TensorHandle, input_min_::tf.TensorHandle, input_max_::tf.TensorHandle; name=nothing, signed_input=nothing, num_bits=nothing, range_given=nothing, round_mode=nothing) + function quantize_and_dequantize_v2_eager(input_, input_min_, input_max_; name=nothing, signed_input=nothing, num_bits=nothing, range_given=nothing, round_mode=nothing) desc = tf.EagerOp("QuantizeAndDequantizeV2") tf.add_input(desc, input_) tf.add_input(desc, input_min_) @@ -30917,6 +36244,13 @@ begin desc["T"] = tf.data_type(input_max_) (tf.execute(desc))[1] end + function quantize_and_dequantize_v2(input_, input_min_, input_max_; name=nothing, signed_input=nothing, num_bits=nothing, range_given=nothing, round_mode=nothing) + if tf.eager_mode + quantize_and_dequantize_v2_eager(input_, input_min_, input_max_; name=name, signed_input=signed_input, num_bits=num_bits, range_given=range_given, round_mode=round_mode) + else + quantize_and_dequantize_v2_graph(input_, input_min_, input_max_; name=name, signed_input=signed_input, num_bits=num_bits, range_given=range_given, round_mode=round_mode) + end + end end @@ -30926,7 +36260,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function quantize_and_dequantize(input_; name=nothing, signed_input=nothing, num_bits=nothing, range_given=nothing, input_min=nothing, input_max=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function quantize_and_dequantize_graph(input_; name=nothing, signed_input=nothing, num_bits=nothing, range_given=nothing, input_min=nothing, input_max=nothing) local desc tf.with_op_name(name, "QuantizeAndDequantize") do desc = tf.NodeDescription("QuantizeAndDequantize") @@ -30951,7 +36285,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function quantize_and_dequantize(input_::tf.TensorHandle; name=nothing, signed_input=nothing, num_bits=nothing, range_given=nothing, input_min=nothing, input_max=nothing) + function quantize_and_dequantize_eager(input_; name=nothing, signed_input=nothing, num_bits=nothing, range_given=nothing, input_min=nothing, input_max=nothing) desc = tf.EagerOp("QuantizeAndDequantize") tf.add_input(desc, input_) if signed_input !== nothing @@ -30972,6 +36306,13 @@ begin desc["T"] = tf.data_type(input_) (tf.execute(desc))[1] end + function quantize_and_dequantize(input_; name=nothing, signed_input=nothing, num_bits=nothing, range_given=nothing, input_min=nothing, input_max=nothing) + if tf.eager_mode + quantize_and_dequantize_eager(input_; name=name, signed_input=signed_input, num_bits=num_bits, range_given=range_given, input_min=input_min, input_max=input_max) + else + quantize_and_dequantize_graph(input_; name=name, signed_input=signed_input, num_bits=num_bits, range_given=range_given, input_min=input_min, input_max=input_max) + end + end end @@ -30981,7 +36322,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tensor_list_pop_back(input_handle_, element_shape_; name=nothing, element_dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tensor_list_pop_back_graph(input_handle_, element_shape_; name=nothing, element_dtype=nothing) local desc tf.with_op_name(name, "TensorListPopBack") do desc = tf.NodeDescription("TensorListPopBack") @@ -31000,7 +36341,7 @@ begin end out end - function tensor_list_pop_back(input_handle_::tf.TensorHandle, element_shape_::tf.TensorHandle; name=nothing, element_dtype=nothing) + function tensor_list_pop_back_eager(input_handle_, element_shape_; name=nothing, element_dtype=nothing) desc = tf.EagerOp("TensorListPopBack") tf.add_input(desc, input_handle_) tf.add_input(desc, element_shape_) @@ -31009,6 +36350,13 @@ begin end tf.execute(desc) end + function tensor_list_pop_back(input_handle_, element_shape_; name=nothing, element_dtype=nothing) + if tf.eager_mode + tensor_list_pop_back_eager(input_handle_, element_shape_; name=name, element_dtype=element_dtype) + else + tensor_list_pop_back_graph(input_handle_, element_shape_; name=name, element_dtype=element_dtype) + end + end end @@ -31018,7 +36366,7 @@ end Debug NaN Value Counter Op """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function debug_nan_count(input_; name=nothing, device_name=nothing, tensor_name=nothing, debug_urls=nothing, gated_grpc=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function debug_nan_count_graph(input_; name=nothing, device_name=nothing, tensor_name=nothing, debug_urls=nothing, gated_grpc=nothing) local desc tf.with_op_name(name, "DebugNanCount") do desc = tf.NodeDescription("DebugNanCount") @@ -31040,7 +36388,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function debug_nan_count(input_::tf.TensorHandle; name=nothing, device_name=nothing, tensor_name=nothing, debug_urls=nothing, gated_grpc=nothing) + function debug_nan_count_eager(input_; name=nothing, device_name=nothing, tensor_name=nothing, debug_urls=nothing, gated_grpc=nothing) desc = tf.EagerOp("DebugNanCount") tf.add_input(desc, input_) if device_name !== nothing @@ -31058,6 +36406,13 @@ begin desc["T"] = tf.data_type(input_) (tf.execute(desc))[1] end + function debug_nan_count(input_; name=nothing, device_name=nothing, tensor_name=nothing, debug_urls=nothing, gated_grpc=nothing) + if tf.eager_mode + debug_nan_count_eager(input_; name=name, device_name=device_name, tensor_name=tensor_name, debug_urls=debug_urls, gated_grpc=gated_grpc) + else + debug_nan_count_graph(input_; name=name, device_name=device_name, tensor_name=tensor_name, debug_urls=debug_urls, gated_grpc=gated_grpc) + end + end end @@ -31067,7 +36422,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function apply_adagrad_da(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, lr_, l1_, l2_, global_step_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function apply_adagrad_da_graph(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, lr_, l1_, l2_, global_step_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ApplyAdagradDA") do desc = tf.NodeDescription("ApplyAdagradDA") @@ -31094,7 +36449,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function apply_adagrad_da(var_::tf.TensorHandle, gradient_accumulator_::tf.TensorHandle, gradient_squared_accumulator_::tf.TensorHandle, grad_::tf.TensorHandle, lr_::tf.TensorHandle, l1_::tf.TensorHandle, l2_::tf.TensorHandle, global_step_::tf.TensorHandle; name=nothing, use_locking=nothing) + function apply_adagrad_da_eager(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, lr_, l1_, l2_, global_step_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ApplyAdagradDA") tf.add_input(desc, var_) tf.add_input(desc, gradient_accumulator_) @@ -31116,16 +36471,23 @@ begin desc["T"] = tf.data_type(l2_) (tf.execute(desc))[1] end + function apply_adagrad_da(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, lr_, l1_, l2_, global_step_; name=nothing, use_locking=nothing) + if tf.eager_mode + apply_adagrad_da_eager(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, lr_, l1_, l2_, global_step_; name=name, use_locking=use_locking) + else + apply_adagrad_da_graph(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, lr_, l1_, l2_, global_step_; name=name, use_locking=use_locking) + end + end end """ - depthwise_conv2d_native(input, filter; data_format=NHWC, dilations=[1, 1, 1, 1]) + depthwise_conv2d_native(input, filter; data_format=, dilations=[1, 1, 1, 1]) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function depthwise_conv2d_native(input_, filter_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function depthwise_conv2d_native_graph(input_, filter_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) local desc tf.with_op_name(name, "DepthwiseConv2dNative") do desc = tf.NodeDescription("DepthwiseConv2dNative") @@ -31149,7 +36511,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function depthwise_conv2d_native(input_::tf.TensorHandle, filter_::tf.TensorHandle; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) + function depthwise_conv2d_native_eager(input_, filter_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) desc = tf.EagerOp("DepthwiseConv2dNative") tf.add_input(desc, input_) tf.add_input(desc, filter_) @@ -31169,6 +36531,13 @@ begin desc["T"] = tf.data_type(filter_) (tf.execute(desc))[1] end + function depthwise_conv2d_native(input_, filter_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) + if tf.eager_mode + depthwise_conv2d_native_eager(input_, filter_; name=name, strides=strides, padding=padding, data_format=data_format, dilations=dilations) + else + depthwise_conv2d_native_graph(input_, filter_; name=name, strides=strides, padding=padding, data_format=data_format, dilations=dilations) + end + end end @@ -31178,7 +36547,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function serialize_iterator(resource_handle_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function serialize_iterator_graph(resource_handle_; name=nothing) local desc tf.with_op_name(name, "SerializeIterator") do desc = tf.NodeDescription("SerializeIterator") @@ -31187,11 +36556,18 @@ begin end tf.Tensor(tf.Operation(desc)) end - function serialize_iterator(resource_handle_::tf.TensorHandle; name=nothing) + function serialize_iterator_eager(resource_handle_; name=nothing) desc = tf.EagerOp("SerializeIterator") tf.add_input(desc, resource_handle_) (tf.execute(desc))[1] end + function serialize_iterator(resource_handle_; name=nothing) + if tf.eager_mode + serialize_iterator_eager(resource_handle_; name=name) + else + serialize_iterator_graph(resource_handle_; name=name) + end + end end @@ -31201,7 +36577,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function dataset_to_graph(input_dataset_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function dataset_to_graph_graph(input_dataset_; name=nothing) local desc tf.with_op_name(name, "DatasetToGraph") do desc = tf.NodeDescription("DatasetToGraph") @@ -31210,11 +36586,18 @@ begin end tf.Tensor(tf.Operation(desc)) end - function dataset_to_graph(input_dataset_::tf.TensorHandle; name=nothing) + function dataset_to_graph_eager(input_dataset_; name=nothing) desc = tf.EagerOp("DatasetToGraph") tf.add_input(desc, input_dataset_) (tf.execute(desc))[1] end + function dataset_to_graph(input_dataset_; name=nothing) + if tf.eager_mode + dataset_to_graph_eager(input_dataset_; name=name) + else + dataset_to_graph_graph(input_dataset_; name=name) + end + end end @@ -31224,7 +36607,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function top_k(input_; name=nothing, k=nothing, sorted=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function top_k_graph(input_; name=nothing, k=nothing, sorted=nothing) local desc tf.with_op_name(name, "TopK") do desc = tf.NodeDescription("TopK") @@ -31245,7 +36628,7 @@ begin end out end - function top_k(input_::tf.TensorHandle; name=nothing, k=nothing, sorted=nothing) + function top_k_eager(input_; name=nothing, k=nothing, sorted=nothing) desc = tf.EagerOp("TopK") tf.add_input(desc, input_) if k !== nothing @@ -31257,6 +36640,13 @@ begin desc["T"] = tf.data_type(input_) tf.execute(desc) end + function top_k(input_; name=nothing, k=nothing, sorted=nothing) + if tf.eager_mode + top_k_eager(input_; name=name, k=k, sorted=sorted) + else + top_k_graph(input_; name=name, k=k, sorted=sorted) + end + end end @@ -31266,7 +36656,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function resource_apply_ftrl_v2(var_, accum_, linear_, grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function resource_apply_ftrl_v2_graph(var_, accum_, linear_, grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ResourceApplyFtrlV2") do desc = tf.NodeDescription("ResourceApplyFtrlV2") @@ -31295,7 +36685,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function resource_apply_ftrl_v2(var_::tf.TensorHandle, accum_::tf.TensorHandle, linear_::tf.TensorHandle, grad_::tf.TensorHandle, lr_::tf.TensorHandle, l1_::tf.TensorHandle, l2_::tf.TensorHandle, l2_shrinkage_::tf.TensorHandle, lr_power_::tf.TensorHandle; name=nothing, use_locking=nothing) + function resource_apply_ftrl_v2_eager(var_, accum_, linear_, grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ResourceApplyFtrlV2") tf.add_input(desc, var_) tf.add_input(desc, accum_) @@ -31317,6 +36707,13 @@ begin desc["T"] = tf.data_type(lr_power_) (tf.execute(desc))[1] end + function resource_apply_ftrl_v2(var_, accum_, linear_, grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=nothing, use_locking=nothing) + if tf.eager_mode + resource_apply_ftrl_v2_eager(var_, accum_, linear_, grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=name, use_locking=use_locking) + else + resource_apply_ftrl_v2_graph(var_, accum_, linear_, grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=name, use_locking=use_locking) + end + end end @@ -31326,7 +36723,7 @@ end Replacement node for NcclBroadcast. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function _nccl_broadcast_recv(shape_; name=nothing, num_devices=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function _nccl_broadcast_recv_graph(shape_; name=nothing, num_devices=nothing, shared_name=nothing) local desc tf.with_op_name(name, "_NcclBroadcastRecv") do desc = tf.NodeDescription("_NcclBroadcastRecv") @@ -31341,7 +36738,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function _nccl_broadcast_recv(shape_::tf.TensorHandle; name=nothing, num_devices=nothing, shared_name=nothing) + function _nccl_broadcast_recv_eager(shape_; name=nothing, num_devices=nothing, shared_name=nothing) desc = tf.EagerOp("_NcclBroadcastRecv") tf.add_input(desc, shape_) if num_devices !== nothing @@ -31352,6 +36749,13 @@ begin end (tf.execute(desc))[1] end + function _nccl_broadcast_recv(shape_; name=nothing, num_devices=nothing, shared_name=nothing) + if tf.eager_mode + _nccl_broadcast_recv_eager(shape_; name=name, num_devices=num_devices, shared_name=shared_name) + else + _nccl_broadcast_recv_graph(shape_; name=name, num_devices=num_devices, shared_name=shared_name) + end + end end @@ -31361,7 +36765,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function queue_is_closed(handle_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function queue_is_closed_graph(handle_; name=nothing) local desc tf.with_op_name(name, "QueueIsClosed") do desc = tf.NodeDescription("QueueIsClosed") @@ -31370,11 +36774,18 @@ begin end tf.Tensor(tf.Operation(desc)) end - function queue_is_closed(handle_::tf.TensorHandle; name=nothing) + function queue_is_closed_eager(handle_; name=nothing) desc = tf.EagerOp("QueueIsClosed") tf.add_input(desc, handle_) (tf.execute(desc))[1] end + function queue_is_closed(handle_; name=nothing) + if tf.eager_mode + queue_is_closed_eager(handle_; name=name) + else + queue_is_closed_graph(handle_; name=name) + end + end end @@ -31384,7 +36795,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function shuffle_dataset(input_dataset_, buffer_size_, seed_, seed2_; name=nothing, reshuffle_each_iteration=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function shuffle_dataset_graph(input_dataset_, buffer_size_, seed_, seed2_; name=nothing, reshuffle_each_iteration=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "ShuffleDataset") do desc = tf.NodeDescription("ShuffleDataset") @@ -31408,7 +36819,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function shuffle_dataset(input_dataset_::tf.TensorHandle, buffer_size_::tf.TensorHandle, seed_::tf.TensorHandle, seed2_::tf.TensorHandle; name=nothing, reshuffle_each_iteration=nothing, output_types=nothing, output_shapes=nothing) + function shuffle_dataset_eager(input_dataset_, buffer_size_, seed_, seed2_; name=nothing, reshuffle_each_iteration=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("ShuffleDataset") tf.add_input(desc, input_dataset_) tf.add_input(desc, buffer_size_) @@ -31425,6 +36836,13 @@ begin end (tf.execute(desc))[1] end + function shuffle_dataset(input_dataset_, buffer_size_, seed_, seed2_; name=nothing, reshuffle_each_iteration=nothing, output_types=nothing, output_shapes=nothing) + if tf.eager_mode + shuffle_dataset_eager(input_dataset_, buffer_size_, seed_, seed2_; name=name, reshuffle_each_iteration=reshuffle_each_iteration, output_types=output_types, output_shapes=output_shapes) + else + shuffle_dataset_graph(input_dataset_, buffer_size_, seed_, seed2_; name=name, reshuffle_each_iteration=reshuffle_each_iteration, output_types=output_types, output_shapes=output_shapes) + end + end end @@ -31434,7 +36852,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function deserialize_sparse(serialized_sparse_; name=nothing, dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function deserialize_sparse_graph(serialized_sparse_; name=nothing, dtype=nothing) local desc tf.with_op_name(name, "DeserializeSparse") do desc = tf.NodeDescription("DeserializeSparse") @@ -31452,7 +36870,7 @@ begin end out end - function deserialize_sparse(serialized_sparse_::tf.TensorHandle; name=nothing, dtype=nothing) + function deserialize_sparse_eager(serialized_sparse_; name=nothing, dtype=nothing) desc = tf.EagerOp("DeserializeSparse") tf.add_input(desc, serialized_sparse_) if dtype !== nothing @@ -31461,6 +36879,13 @@ begin desc["Tserialized"] = tf.data_type(serialized_sparse_) tf.execute(desc) end + function deserialize_sparse(serialized_sparse_; name=nothing, dtype=nothing) + if tf.eager_mode + deserialize_sparse_eager(serialized_sparse_; name=name, dtype=dtype) + else + deserialize_sparse_graph(serialized_sparse_; name=name, dtype=dtype) + end + end end @@ -31470,7 +36895,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function priority_queue_v2(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function priority_queue_v2_graph(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "PriorityQueueV2") do desc = tf.NodeDescription("PriorityQueueV2") @@ -31492,7 +36917,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function priority_queue_v2(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) + function priority_queue_v2_eager(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) desc = tf.EagerOp("PriorityQueueV2") if component_types !== nothing desc["component_types"] = map(Base.identity, component_types) @@ -31511,6 +36936,13 @@ begin end (tf.execute(desc))[1] end + function priority_queue_v2(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) + if tf.eager_mode + priority_queue_v2_eager(; name=name, component_types=component_types, shapes=shapes, capacity=capacity, container=container, shared_name=shared_name) + else + priority_queue_v2_graph(; name=name, component_types=component_types, shapes=shapes, capacity=capacity, container=container, shared_name=shared_name) + end + end end @@ -31520,7 +36952,7 @@ end A graph node which represents an argument to a function. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function _device_arg(; name=nothing, index=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function _device_arg_graph(; name=nothing, index=nothing) local desc tf.with_op_name(name, "_DeviceArg") do desc = tf.NodeDescription("_DeviceArg") @@ -31530,13 +36962,20 @@ begin end tf.Tensor(tf.Operation(desc)) end - function _device_arg(; name=nothing, index=nothing) + function _device_arg_eager(; name=nothing, index=nothing) desc = tf.EagerOp("_DeviceArg") if index !== nothing desc["index"] = Base.Int(index) end (tf.execute(desc))[1] end + function _device_arg(; name=nothing, index=nothing) + if tf.eager_mode + _device_arg_eager(; name=name, index=index) + else + _device_arg_graph(; name=name, index=index) + end + end end @@ -31546,7 +36985,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function truncated_normal(shape_; name=nothing, seed=nothing, seed2=nothing, dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function truncated_normal_graph(shape_; name=nothing, seed=nothing, seed2=nothing, dtype=nothing) local desc tf.with_op_name(name, "TruncatedNormal") do desc = tf.NodeDescription("TruncatedNormal") @@ -31565,7 +37004,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function truncated_normal(shape_::tf.TensorHandle; name=nothing, seed=nothing, seed2=nothing, dtype=nothing) + function truncated_normal_eager(shape_; name=nothing, seed=nothing, seed2=nothing, dtype=nothing) desc = tf.EagerOp("TruncatedNormal") tf.add_input(desc, shape_) if seed !== nothing @@ -31580,6 +37019,13 @@ begin desc["T"] = tf.data_type(shape_) (tf.execute(desc))[1] end + function truncated_normal(shape_; name=nothing, seed=nothing, seed2=nothing, dtype=nothing) + if tf.eager_mode + truncated_normal_eager(shape_; name=name, seed=seed, seed2=seed2, dtype=dtype) + else + truncated_normal_graph(shape_; name=name, seed=seed, seed2=seed2, dtype=dtype) + end + end end @@ -31589,7 +37035,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tensor_forest_tree_predict(tree_handle_, dense_features_; name=nothing, logits_dimension=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tensor_forest_tree_predict_graph(tree_handle_, dense_features_; name=nothing, logits_dimension=nothing) local desc tf.with_op_name(name, "TensorForestTreePredict") do desc = tf.NodeDescription("TensorForestTreePredict") @@ -31603,7 +37049,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function tensor_forest_tree_predict(tree_handle_::tf.TensorHandle, dense_features_::tf.TensorHandle; name=nothing, logits_dimension=nothing) + function tensor_forest_tree_predict_eager(tree_handle_, dense_features_; name=nothing, logits_dimension=nothing) desc = tf.EagerOp("TensorForestTreePredict") tf.add_input(desc, tree_handle_) tf.add_input(desc, dense_features_) @@ -31612,6 +37058,13 @@ begin end (tf.execute(desc))[1] end + function tensor_forest_tree_predict(tree_handle_, dense_features_; name=nothing, logits_dimension=nothing) + if tf.eager_mode + tensor_forest_tree_predict_eager(tree_handle_, dense_features_; name=name, logits_dimension=logits_dimension) + else + tensor_forest_tree_predict_graph(tree_handle_, dense_features_; name=name, logits_dimension=logits_dimension) + end + end end @@ -31621,7 +37074,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function stack_v2(max_size_; name=nothing, elem_type=nothing, stack_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function stack_v2_graph(max_size_; name=nothing, elem_type=nothing, stack_name=nothing) local desc tf.with_op_name(name, "StackV2") do desc = tf.NodeDescription("StackV2") @@ -31636,7 +37089,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function stack_v2(max_size_::tf.TensorHandle; name=nothing, elem_type=nothing, stack_name=nothing) + function stack_v2_eager(max_size_; name=nothing, elem_type=nothing, stack_name=nothing) desc = tf.EagerOp("StackV2") tf.add_input(desc, max_size_) if elem_type !== nothing @@ -31647,6 +37100,13 @@ begin end (tf.execute(desc))[1] end + function stack_v2(max_size_; name=nothing, elem_type=nothing, stack_name=nothing) + if tf.eager_mode + stack_v2_eager(max_size_; name=name, elem_type=elem_type, stack_name=stack_name) + else + stack_v2_graph(max_size_; name=name, elem_type=elem_type, stack_name=stack_name) + end + end end @@ -31656,7 +37116,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function accumulator_num_accumulated(handle_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function accumulator_num_accumulated_graph(handle_; name=nothing) local desc tf.with_op_name(name, "AccumulatorNumAccumulated") do desc = tf.NodeDescription("AccumulatorNumAccumulated") @@ -31665,11 +37125,18 @@ begin end tf.Tensor(tf.Operation(desc)) end - function accumulator_num_accumulated(handle_::tf.TensorHandle; name=nothing) + function accumulator_num_accumulated_eager(handle_; name=nothing) desc = tf.EagerOp("AccumulatorNumAccumulated") tf.add_input(desc, handle_) (tf.execute(desc))[1] end + function accumulator_num_accumulated(handle_; name=nothing) + if tf.eager_mode + accumulator_num_accumulated_eager(handle_; name=name) + else + accumulator_num_accumulated_graph(handle_; name=name) + end + end end @@ -31679,7 +37146,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function reader_reset_v2(reader_handle_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function reader_reset_v2_graph(reader_handle_; name=nothing) local desc tf.with_op_name(name, "ReaderResetV2") do desc = tf.NodeDescription("ReaderResetV2") @@ -31688,11 +37155,18 @@ begin end tf.Tensor(tf.Operation(desc)) end - function reader_reset_v2(reader_handle_::tf.TensorHandle; name=nothing) + function reader_reset_v2_eager(reader_handle_; name=nothing) desc = tf.EagerOp("ReaderResetV2") tf.add_input(desc, reader_handle_) (tf.execute(desc))[1] end + function reader_reset_v2(reader_handle_; name=nothing) + if tf.eager_mode + reader_reset_v2_eager(reader_handle_; name=name) + else + reader_reset_v2_graph(reader_handle_; name=name) + end + end end @@ -31702,7 +37176,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function apply_add_sign(var_, m_, lr_, alpha_, sign_decay_, beta_, grad_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function apply_add_sign_graph(var_, m_, lr_, alpha_, sign_decay_, beta_, grad_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ApplyAddSign") do desc = tf.NodeDescription("ApplyAddSign") @@ -31727,7 +37201,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function apply_add_sign(var_::tf.TensorHandle, m_::tf.TensorHandle, lr_::tf.TensorHandle, alpha_::tf.TensorHandle, sign_decay_::tf.TensorHandle, beta_::tf.TensorHandle, grad_::tf.TensorHandle; name=nothing, use_locking=nothing) + function apply_add_sign_eager(var_, m_, lr_, alpha_, sign_decay_, beta_, grad_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ApplyAddSign") tf.add_input(desc, var_) tf.add_input(desc, m_) @@ -31748,6 +37222,13 @@ begin desc["T"] = tf.data_type(grad_) (tf.execute(desc))[1] end + function apply_add_sign(var_, m_, lr_, alpha_, sign_decay_, beta_, grad_; name=nothing, use_locking=nothing) + if tf.eager_mode + apply_add_sign_eager(var_, m_, lr_, alpha_, sign_decay_, beta_, grad_; name=name, use_locking=use_locking) + else + apply_add_sign_graph(var_, m_, lr_, alpha_, sign_decay_, beta_, grad_; name=name, use_locking=use_locking) + end + end end @@ -31757,7 +37238,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function retrieve_tpu_embedding_rms_prop_parameters_grad_accum_debug(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function retrieve_tpu_embedding_rms_prop_parameters_grad_accum_debug_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "RetrieveTPUEmbeddingRMSPropParametersGradAccumDebug") do desc = tf.NodeDescription("RetrieveTPUEmbeddingRMSPropParametersGradAccumDebug") @@ -31781,7 +37262,7 @@ begin end out end - function retrieve_tpu_embedding_rms_prop_parameters_grad_accum_debug(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + function retrieve_tpu_embedding_rms_prop_parameters_grad_accum_debug_eager(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) desc = tf.EagerOp("RetrieveTPUEmbeddingRMSPropParametersGradAccumDebug") if table_id !== nothing desc["table_id"] = Base.Int(table_id) @@ -31797,6 +37278,13 @@ begin end tf.execute(desc) end + function retrieve_tpu_embedding_rms_prop_parameters_grad_accum_debug(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + if tf.eager_mode + retrieve_tpu_embedding_rms_prop_parameters_grad_accum_debug_eager(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + else + retrieve_tpu_embedding_rms_prop_parameters_grad_accum_debug_graph(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + end + end end @@ -31806,7 +37294,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function rint(x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function rint_graph(x_; name=nothing) local desc tf.with_op_name(name, "Rint") do desc = tf.NodeDescription("Rint") @@ -31816,12 +37304,19 @@ begin end tf.Tensor(tf.Operation(desc)) end - function rint(x_::tf.TensorHandle; name=nothing) + function rint_eager(x_; name=nothing) desc = tf.EagerOp("Rint") tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) (tf.execute(desc))[1] end + function rint(x_; name=nothing) + if tf.eager_mode + rint_eager(x_; name=name) + else + rint_graph(x_; name=name) + end + end end @@ -31831,7 +37326,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function retrieve_tpu_embedding_adadelta_parameters_grad_accum_debug(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function retrieve_tpu_embedding_adadelta_parameters_grad_accum_debug_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "RetrieveTPUEmbeddingAdadeltaParametersGradAccumDebug") do desc = tf.NodeDescription("RetrieveTPUEmbeddingAdadeltaParametersGradAccumDebug") @@ -31855,7 +37350,7 @@ begin end out end - function retrieve_tpu_embedding_adadelta_parameters_grad_accum_debug(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + function retrieve_tpu_embedding_adadelta_parameters_grad_accum_debug_eager(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) desc = tf.EagerOp("RetrieveTPUEmbeddingAdadeltaParametersGradAccumDebug") if table_id !== nothing desc["table_id"] = Base.Int(table_id) @@ -31871,16 +37366,23 @@ begin end tf.execute(desc) end + function retrieve_tpu_embedding_adadelta_parameters_grad_accum_debug(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + if tf.eager_mode + retrieve_tpu_embedding_adadelta_parameters_grad_accum_debug_eager(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + else + retrieve_tpu_embedding_adadelta_parameters_grad_accum_debug_graph(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + end + end end """ - extract_glimpse(input, size, offsets; centered=true, normalized=true, uniform_noise=true, noise=uniform) + extract_glimpse(input, size, offsets; centered=true, normalized=true, uniform_noise=true, noise=) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function extract_glimpse(input_, size_, offsets_; name=nothing, centered=nothing, normalized=nothing, uniform_noise=nothing, noise=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function extract_glimpse_graph(input_, size_, offsets_; name=nothing, centered=nothing, normalized=nothing, uniform_noise=nothing, noise=nothing) local desc tf.with_op_name(name, "ExtractGlimpse") do desc = tf.NodeDescription("ExtractGlimpse") @@ -31905,7 +37407,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function extract_glimpse(input_::tf.TensorHandle, size_::tf.TensorHandle, offsets_::tf.TensorHandle; name=nothing, centered=nothing, normalized=nothing, uniform_noise=nothing, noise=nothing) + function extract_glimpse_eager(input_, size_, offsets_; name=nothing, centered=nothing, normalized=nothing, uniform_noise=nothing, noise=nothing) desc = tf.EagerOp("ExtractGlimpse") tf.add_input(desc, input_) tf.add_input(desc, size_) @@ -31924,6 +37426,13 @@ begin end (tf.execute(desc))[1] end + function extract_glimpse(input_, size_, offsets_; name=nothing, centered=nothing, normalized=nothing, uniform_noise=nothing, noise=nothing) + if tf.eager_mode + extract_glimpse_eager(input_, size_, offsets_; name=name, centered=centered, normalized=normalized, uniform_noise=uniform_noise, noise=noise) + else + extract_glimpse_graph(input_, size_, offsets_; name=name, centered=centered, normalized=normalized, uniform_noise=uniform_noise, noise=noise) + end + end end @@ -31933,7 +37442,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function string_to_hash_bucket_strong(input_; name=nothing, num_buckets=nothing, key=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function string_to_hash_bucket_strong_graph(input_; name=nothing, num_buckets=nothing, key=nothing) local desc tf.with_op_name(name, "StringToHashBucketStrong") do desc = tf.NodeDescription("StringToHashBucketStrong") @@ -31948,7 +37457,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function string_to_hash_bucket_strong(input_::tf.TensorHandle; name=nothing, num_buckets=nothing, key=nothing) + function string_to_hash_bucket_strong_eager(input_; name=nothing, num_buckets=nothing, key=nothing) desc = tf.EagerOp("StringToHashBucketStrong") tf.add_input(desc, input_) if num_buckets !== nothing @@ -31959,6 +37468,13 @@ begin end (tf.execute(desc))[1] end + function string_to_hash_bucket_strong(input_; name=nothing, num_buckets=nothing, key=nothing) + if tf.eager_mode + string_to_hash_bucket_strong_eager(input_; name=name, num_buckets=num_buckets, key=key) + else + string_to_hash_bucket_strong_graph(input_; name=name, num_buckets=num_buckets, key=key) + end + end end @@ -31968,7 +37484,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function one_shot_iterator(; name=nothing, dataset_factory=nothing, output_types=nothing, output_shapes=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function one_shot_iterator_graph(; name=nothing, dataset_factory=nothing, output_types=nothing, output_shapes=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "OneShotIterator") do desc = tf.NodeDescription("OneShotIterator") @@ -31990,7 +37506,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function one_shot_iterator(; name=nothing, dataset_factory=nothing, output_types=nothing, output_shapes=nothing, container=nothing, shared_name=nothing) + function one_shot_iterator_eager(; name=nothing, dataset_factory=nothing, output_types=nothing, output_shapes=nothing, container=nothing, shared_name=nothing) desc = tf.EagerOp("OneShotIterator") if dataset_factory !== nothing desc["dataset_factory"] = Base.identity(dataset_factory) @@ -32009,6 +37525,13 @@ begin end (tf.execute(desc))[1] end + function one_shot_iterator(; name=nothing, dataset_factory=nothing, output_types=nothing, output_shapes=nothing, container=nothing, shared_name=nothing) + if tf.eager_mode + one_shot_iterator_eager(; name=name, dataset_factory=dataset_factory, output_types=output_types, output_shapes=output_shapes, container=container, shared_name=shared_name) + else + one_shot_iterator_graph(; name=name, dataset_factory=dataset_factory, output_types=output_types, output_shapes=output_shapes, container=container, shared_name=shared_name) + end + end end @@ -32018,7 +37541,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function resource_sparse_apply_momentum(var_, accum_, lr_, grad_, indices_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function resource_sparse_apply_momentum_graph(var_, accum_, lr_, grad_, indices_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) local desc tf.with_op_name(name, "ResourceSparseApplyMomentum") do desc = tf.NodeDescription("ResourceSparseApplyMomentum") @@ -32046,7 +37569,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function resource_sparse_apply_momentum(var_::tf.TensorHandle, accum_::tf.TensorHandle, lr_::tf.TensorHandle, grad_::tf.TensorHandle, indices_::tf.TensorHandle, momentum_::tf.TensorHandle; name=nothing, use_locking=nothing, use_nesterov=nothing) + function resource_sparse_apply_momentum_eager(var_, accum_, lr_, grad_, indices_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) desc = tf.EagerOp("ResourceSparseApplyMomentum") tf.add_input(desc, var_) tf.add_input(desc, accum_) @@ -32066,6 +37589,13 @@ begin desc["T"] = tf.data_type(momentum_) (tf.execute(desc))[1] end + function resource_sparse_apply_momentum(var_, accum_, lr_, grad_, indices_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) + if tf.eager_mode + resource_sparse_apply_momentum_eager(var_, accum_, lr_, grad_, indices_, momentum_; name=name, use_locking=use_locking, use_nesterov=use_nesterov) + else + resource_sparse_apply_momentum_graph(var_, accum_, lr_, grad_, indices_, momentum_; name=name, use_locking=use_locking, use_nesterov=use_nesterov) + end + end end @@ -32075,7 +37605,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function save_slices(filename_, tensor_names_, shapes_and_slices_, data_; name=nothing, T=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function save_slices_graph(filename_, tensor_names_, shapes_and_slices_, data_; name=nothing, T=nothing) local desc tf.with_op_name(name, "SaveSlices") do desc = tf.NodeDescription("SaveSlices") @@ -32093,7 +37623,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function save_slices(filename_::tf.TensorHandle, tensor_names_::tf.TensorHandle, shapes_and_slices_::tf.TensorHandle, data_::tf.TensorHandle; name=nothing, T=nothing) + function save_slices_eager(filename_, tensor_names_, shapes_and_slices_, data_; name=nothing, T=nothing) desc = tf.EagerOp("SaveSlices") tf.add_input(desc, filename_) tf.add_input(desc, tensor_names_) @@ -32104,6 +37634,13 @@ begin end (tf.execute(desc))[1] end + function save_slices(filename_, tensor_names_, shapes_and_slices_, data_; name=nothing, T=nothing) + if tf.eager_mode + save_slices_eager(filename_, tensor_names_, shapes_and_slices_, data_; name=name, T=T) + else + save_slices_graph(filename_, tensor_names_, shapes_and_slices_, data_; name=name, T=T) + end + end end @@ -32113,7 +37650,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function experimental_dataset_cardinality(input_dataset_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function experimental_dataset_cardinality_graph(input_dataset_; name=nothing) local desc tf.with_op_name(name, "ExperimentalDatasetCardinality") do desc = tf.NodeDescription("ExperimentalDatasetCardinality") @@ -32122,11 +37659,18 @@ begin end tf.Tensor(tf.Operation(desc)) end - function experimental_dataset_cardinality(input_dataset_::tf.TensorHandle; name=nothing) + function experimental_dataset_cardinality_eager(input_dataset_; name=nothing) desc = tf.EagerOp("ExperimentalDatasetCardinality") tf.add_input(desc, input_dataset_) (tf.execute(desc))[1] end + function experimental_dataset_cardinality(input_dataset_; name=nothing) + if tf.eager_mode + experimental_dataset_cardinality_eager(input_dataset_; name=name) + else + experimental_dataset_cardinality_graph(input_dataset_; name=name) + end + end end @@ -32136,7 +37680,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function is_finite(x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function is_finite_graph(x_; name=nothing) local desc tf.with_op_name(name, "IsFinite") do desc = tf.NodeDescription("IsFinite") @@ -32146,12 +37690,19 @@ begin end tf.Tensor(tf.Operation(desc)) end - function is_finite(x_::tf.TensorHandle; name=nothing) + function is_finite_eager(x_; name=nothing) desc = tf.EagerOp("IsFinite") tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) (tf.execute(desc))[1] end + function is_finite(x_; name=nothing) + if tf.eager_mode + is_finite_eager(x_; name=name) + else + is_finite_graph(x_; name=name) + end + end end @@ -32161,7 +37712,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function experimental_numa_map_and_batch_dataset(input_dataset_, other_arguments_, batch_size_, num_parallel_calls_, drop_remainder_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, preserve_cardinality=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function experimental_numa_map_and_batch_dataset_graph(input_dataset_, other_arguments_, batch_size_, num_parallel_calls_, drop_remainder_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, preserve_cardinality=nothing) local desc tf.with_op_name(name, "ExperimentalNumaMapAndBatchDataset") do desc = tf.NodeDescription("ExperimentalNumaMapAndBatchDataset") @@ -32193,7 +37744,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function experimental_numa_map_and_batch_dataset(input_dataset_::tf.TensorHandle, other_arguments_::tf.TensorHandle, batch_size_::tf.TensorHandle, num_parallel_calls_::tf.TensorHandle, drop_remainder_::tf.TensorHandle; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, preserve_cardinality=nothing) + function experimental_numa_map_and_batch_dataset_eager(input_dataset_, other_arguments_, batch_size_, num_parallel_calls_, drop_remainder_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, preserve_cardinality=nothing) desc = tf.EagerOp("ExperimentalNumaMapAndBatchDataset") tf.add_input(desc, input_dataset_) tf.add_input(desc, other_arguments_) @@ -32217,6 +37768,13 @@ begin end (tf.execute(desc))[1] end + function experimental_numa_map_and_batch_dataset(input_dataset_, other_arguments_, batch_size_, num_parallel_calls_, drop_remainder_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, preserve_cardinality=nothing) + if tf.eager_mode + experimental_numa_map_and_batch_dataset_eager(input_dataset_, other_arguments_, batch_size_, num_parallel_calls_, drop_remainder_; name=name, f=f, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes, preserve_cardinality=preserve_cardinality) + else + experimental_numa_map_and_batch_dataset_graph(input_dataset_, other_arguments_, batch_size_, num_parallel_calls_, drop_remainder_; name=name, f=f, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes, preserve_cardinality=preserve_cardinality) + end + end end @@ -32226,7 +37784,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function all_to_all(input_, group_assignment_; name=nothing, concat_dimension=nothing, split_dimension=nothing, split_count=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function all_to_all_graph(input_, group_assignment_; name=nothing, concat_dimension=nothing, split_dimension=nothing, split_count=nothing) local desc tf.with_op_name(name, "AllToAll") do desc = tf.NodeDescription("AllToAll") @@ -32247,7 +37805,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function all_to_all(input_::tf.TensorHandle, group_assignment_::tf.TensorHandle; name=nothing, concat_dimension=nothing, split_dimension=nothing, split_count=nothing) + function all_to_all_eager(input_, group_assignment_; name=nothing, concat_dimension=nothing, split_dimension=nothing, split_count=nothing) desc = tf.EagerOp("AllToAll") tf.add_input(desc, input_) tf.add_input(desc, group_assignment_) @@ -32263,6 +37821,13 @@ begin desc["T"] = tf.data_type(input_) (tf.execute(desc))[1] end + function all_to_all(input_, group_assignment_; name=nothing, concat_dimension=nothing, split_dimension=nothing, split_count=nothing) + if tf.eager_mode + all_to_all_eager(input_, group_assignment_; name=name, concat_dimension=concat_dimension, split_dimension=split_dimension, split_count=split_count) + else + all_to_all_graph(input_, group_assignment_; name=name, concat_dimension=concat_dimension, split_dimension=split_dimension, split_count=split_count) + end + end end @@ -32272,7 +37837,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function take_many_sparse_from_tensors_map(sparse_handles_; name=nothing, dtype=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function take_many_sparse_from_tensors_map_graph(sparse_handles_; name=nothing, dtype=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "TakeManySparseFromTensorsMap") do desc = tf.NodeDescription("TakeManySparseFromTensorsMap") @@ -32295,7 +37860,7 @@ begin end out end - function take_many_sparse_from_tensors_map(sparse_handles_::tf.TensorHandle; name=nothing, dtype=nothing, container=nothing, shared_name=nothing) + function take_many_sparse_from_tensors_map_eager(sparse_handles_; name=nothing, dtype=nothing, container=nothing, shared_name=nothing) desc = tf.EagerOp("TakeManySparseFromTensorsMap") tf.add_input(desc, sparse_handles_) if dtype !== nothing @@ -32309,6 +37874,13 @@ begin end tf.execute(desc) end + function take_many_sparse_from_tensors_map(sparse_handles_; name=nothing, dtype=nothing, container=nothing, shared_name=nothing) + if tf.eager_mode + take_many_sparse_from_tensors_map_eager(sparse_handles_; name=name, dtype=dtype, container=container, shared_name=shared_name) + else + take_many_sparse_from_tensors_map_graph(sparse_handles_; name=name, dtype=dtype, container=container, shared_name=shared_name) + end + end end @@ -32318,7 +37890,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function batch_matrix_diag_part(input_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function batch_matrix_diag_part_graph(input_; name=nothing) local desc tf.with_op_name(name, "BatchMatrixDiagPart") do desc = tf.NodeDescription("BatchMatrixDiagPart") @@ -32328,12 +37900,19 @@ begin end tf.Tensor(tf.Operation(desc)) end - function batch_matrix_diag_part(input_::tf.TensorHandle; name=nothing) + function batch_matrix_diag_part_eager(input_; name=nothing) desc = tf.EagerOp("BatchMatrixDiagPart") tf.add_input(desc, input_) desc["T"] = tf.data_type(input_) (tf.execute(desc))[1] end + function batch_matrix_diag_part(input_; name=nothing) + if tf.eager_mode + batch_matrix_diag_part_eager(input_; name=name) + else + batch_matrix_diag_part_graph(input_; name=name) + end + end end @@ -32343,7 +37922,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function fixed_length_record_dataset(filenames_, header_bytes_, record_bytes_, footer_bytes_, buffer_size_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function fixed_length_record_dataset_graph(filenames_, header_bytes_, record_bytes_, footer_bytes_, buffer_size_; name=nothing) local desc tf.with_op_name(name, "FixedLengthRecordDataset") do desc = tf.NodeDescription("FixedLengthRecordDataset") @@ -32360,7 +37939,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function fixed_length_record_dataset(filenames_::tf.TensorHandle, header_bytes_::tf.TensorHandle, record_bytes_::tf.TensorHandle, footer_bytes_::tf.TensorHandle, buffer_size_::tf.TensorHandle; name=nothing) + function fixed_length_record_dataset_eager(filenames_, header_bytes_, record_bytes_, footer_bytes_, buffer_size_; name=nothing) desc = tf.EagerOp("FixedLengthRecordDataset") tf.add_input(desc, filenames_) tf.add_input(desc, header_bytes_) @@ -32369,6 +37948,13 @@ begin tf.add_input(desc, buffer_size_) (tf.execute(desc))[1] end + function fixed_length_record_dataset(filenames_, header_bytes_, record_bytes_, footer_bytes_, buffer_size_; name=nothing) + if tf.eager_mode + fixed_length_record_dataset_eager(filenames_, header_bytes_, record_bytes_, footer_bytes_, buffer_size_; name=name) + else + fixed_length_record_dataset_graph(filenames_, header_bytes_, record_bytes_, footer_bytes_, buffer_size_; name=name) + end + end end @@ -32378,7 +37964,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function stack_push(handle_, elem_; name=nothing, swap_memory=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function stack_push_graph(handle_, elem_; name=nothing, swap_memory=nothing) local desc tf.with_op_name(name, "StackPush") do desc = tf.NodeDescription("StackPush") @@ -32393,7 +37979,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function stack_push(handle_::tf.TensorHandle, elem_::tf.TensorHandle; name=nothing, swap_memory=nothing) + function stack_push_eager(handle_, elem_; name=nothing, swap_memory=nothing) desc = tf.EagerOp("StackPush") tf.add_input(desc, handle_) tf.add_input(desc, elem_) @@ -32403,6 +37989,13 @@ begin desc["T"] = tf.data_type(elem_) (tf.execute(desc))[1] end + function stack_push(handle_, elem_; name=nothing, swap_memory=nothing) + if tf.eager_mode + stack_push_eager(handle_, elem_; name=name, swap_memory=swap_memory) + else + stack_push_graph(handle_, elem_; name=name, swap_memory=swap_memory) + end + end end @@ -32412,7 +38005,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function placeholder_v2(; name=nothing, dtype=nothing, shape=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function placeholder_v2_graph(; name=nothing, dtype=nothing, shape=nothing) local desc tf.with_op_name(name, "PlaceholderV2") do desc = tf.NodeDescription("PlaceholderV2") @@ -32425,7 +38018,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function placeholder_v2(; name=nothing, dtype=nothing, shape=nothing) + function placeholder_v2_eager(; name=nothing, dtype=nothing, shape=nothing) desc = tf.EagerOp("PlaceholderV2") if dtype !== nothing desc["dtype"] = Base.identity(dtype) @@ -32435,6 +38028,13 @@ begin end (tf.execute(desc))[1] end + function placeholder_v2(; name=nothing, dtype=nothing, shape=nothing) + if tf.eager_mode + placeholder_v2_eager(; name=name, dtype=dtype, shape=shape) + else + placeholder_v2_graph(; name=name, dtype=dtype, shape=shape) + end + end end @@ -32444,7 +38044,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function multi_device_iterator_init(dataset_, multi_device_iterator_, max_buffer_size_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function multi_device_iterator_init_graph(dataset_, multi_device_iterator_, max_buffer_size_; name=nothing) local desc tf.with_op_name(name, "MultiDeviceIteratorInit") do desc = tf.NodeDescription("MultiDeviceIteratorInit") @@ -32457,13 +38057,20 @@ begin end tf.Tensor(tf.Operation(desc)) end - function multi_device_iterator_init(dataset_::tf.TensorHandle, multi_device_iterator_::tf.TensorHandle, max_buffer_size_::tf.TensorHandle; name=nothing) + function multi_device_iterator_init_eager(dataset_, multi_device_iterator_, max_buffer_size_; name=nothing) desc = tf.EagerOp("MultiDeviceIteratorInit") tf.add_input(desc, dataset_) tf.add_input(desc, multi_device_iterator_) tf.add_input(desc, max_buffer_size_) (tf.execute(desc))[1] end + function multi_device_iterator_init(dataset_, multi_device_iterator_, max_buffer_size_; name=nothing) + if tf.eager_mode + multi_device_iterator_init_eager(dataset_, multi_device_iterator_, max_buffer_size_; name=name) + else + multi_device_iterator_init_graph(dataset_, multi_device_iterator_, max_buffer_size_; name=name) + end + end end @@ -32473,7 +38080,7 @@ end Re-configures the GCS block cache with the new configuration values. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function gcs_configure_block_cache(max_cache_size_, block_size_, max_staleness_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function gcs_configure_block_cache_graph(max_cache_size_, block_size_, max_staleness_; name=nothing) local desc tf.with_op_name(name, "GcsConfigureBlockCache") do desc = tf.NodeDescription("GcsConfigureBlockCache") @@ -32486,13 +38093,20 @@ begin end tf.Tensor(tf.Operation(desc)) end - function gcs_configure_block_cache(max_cache_size_::tf.TensorHandle, block_size_::tf.TensorHandle, max_staleness_::tf.TensorHandle; name=nothing) + function gcs_configure_block_cache_eager(max_cache_size_, block_size_, max_staleness_; name=nothing) desc = tf.EagerOp("GcsConfigureBlockCache") tf.add_input(desc, max_cache_size_) tf.add_input(desc, block_size_) tf.add_input(desc, max_staleness_) (tf.execute(desc))[1] end + function gcs_configure_block_cache(max_cache_size_, block_size_, max_staleness_; name=nothing) + if tf.eager_mode + gcs_configure_block_cache_eager(max_cache_size_, block_size_, max_staleness_; name=name) + else + gcs_configure_block_cache_graph(max_cache_size_, block_size_, max_staleness_; name=name) + end + end end @@ -32502,7 +38116,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function queue_dequeue_v2(handle_; name=nothing, component_types=nothing, timeout_ms=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function queue_dequeue_v2_graph(handle_; name=nothing, component_types=nothing, timeout_ms=nothing) local desc tf.with_op_name(name, "QueueDequeueV2") do desc = tf.NodeDescription("QueueDequeueV2") @@ -32517,7 +38131,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function queue_dequeue_v2(handle_::tf.TensorHandle; name=nothing, component_types=nothing, timeout_ms=nothing) + function queue_dequeue_v2_eager(handle_; name=nothing, component_types=nothing, timeout_ms=nothing) desc = tf.EagerOp("QueueDequeueV2") tf.add_input(desc, handle_) if component_types !== nothing @@ -32528,6 +38142,13 @@ begin end (tf.execute(desc))[1] end + function queue_dequeue_v2(handle_; name=nothing, component_types=nothing, timeout_ms=nothing) + if tf.eager_mode + queue_dequeue_v2_eager(handle_; name=name, component_types=component_types, timeout_ms=timeout_ms) + else + queue_dequeue_v2_graph(handle_; name=name, component_types=component_types, timeout_ms=timeout_ms) + end + end end @@ -32537,7 +38158,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function retrieve_tpu_embedding_rms_prop_parameters(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function retrieve_tpu_embedding_rms_prop_parameters_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "RetrieveTPUEmbeddingRMSPropParameters") do desc = tf.NodeDescription("RetrieveTPUEmbeddingRMSPropParameters") @@ -32561,7 +38182,7 @@ begin end out end - function retrieve_tpu_embedding_rms_prop_parameters(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + function retrieve_tpu_embedding_rms_prop_parameters_eager(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) desc = tf.EagerOp("RetrieveTPUEmbeddingRMSPropParameters") if table_id !== nothing desc["table_id"] = Base.Int(table_id) @@ -32577,6 +38198,13 @@ begin end tf.execute(desc) end + function retrieve_tpu_embedding_rms_prop_parameters(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + if tf.eager_mode + retrieve_tpu_embedding_rms_prop_parameters_eager(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + else + retrieve_tpu_embedding_rms_prop_parameters_graph(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + end + end end @@ -32586,7 +38214,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function transpose(x_, perm_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function transpose_graph(x_, perm_; name=nothing) local desc tf.with_op_name(name, "Transpose") do desc = tf.NodeDescription("Transpose") @@ -32599,7 +38227,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function transpose(x_::tf.TensorHandle, perm_::tf.TensorHandle; name=nothing) + function transpose_eager(x_, perm_; name=nothing) desc = tf.EagerOp("Transpose") tf.add_input(desc, x_) tf.add_input(desc, perm_) @@ -32607,6 +38235,13 @@ begin desc["Tperm"] = tf.data_type(perm_) (tf.execute(desc))[1] end + function transpose(x_, perm_; name=nothing) + if tf.eager_mode + transpose_eager(x_, perm_; name=name) + else + transpose_graph(x_, perm_; name=name) + end + end end @@ -32616,7 +38251,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function ifft(input_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function ifft_graph(input_; name=nothing) local desc tf.with_op_name(name, "IFFT") do desc = tf.NodeDescription("IFFT") @@ -32626,12 +38261,19 @@ begin end tf.Tensor(tf.Operation(desc)) end - function ifft(input_::tf.TensorHandle; name=nothing) + function ifft_eager(input_; name=nothing) desc = tf.EagerOp("IFFT") tf.add_input(desc, input_) desc["Tcomplex"] = tf.data_type(input_) (tf.execute(desc))[1] end + function ifft(input_; name=nothing) + if tf.eager_mode + ifft_eager(input_; name=name) + else + ifft_graph(input_; name=name) + end + end end @@ -32641,7 +38283,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function sparse_segment_sum_with_num_segments(data_, indices_, segment_ids_, num_segments_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function sparse_segment_sum_with_num_segments_graph(data_, indices_, segment_ids_, num_segments_; name=nothing) local desc tf.with_op_name(name, "SparseSegmentSumWithNumSegments") do desc = tf.NodeDescription("SparseSegmentSumWithNumSegments") @@ -32660,7 +38302,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function sparse_segment_sum_with_num_segments(data_::tf.TensorHandle, indices_::tf.TensorHandle, segment_ids_::tf.TensorHandle, num_segments_::tf.TensorHandle; name=nothing) + function sparse_segment_sum_with_num_segments_eager(data_, indices_, segment_ids_, num_segments_; name=nothing) desc = tf.EagerOp("SparseSegmentSumWithNumSegments") tf.add_input(desc, data_) tf.add_input(desc, indices_) @@ -32671,6 +38313,13 @@ begin desc["Tnumsegments"] = tf.data_type(num_segments_) (tf.execute(desc))[1] end + function sparse_segment_sum_with_num_segments(data_, indices_, segment_ids_, num_segments_; name=nothing) + if tf.eager_mode + sparse_segment_sum_with_num_segments_eager(data_, indices_, segment_ids_, num_segments_; name=name) + else + sparse_segment_sum_with_num_segments_graph(data_, indices_, segment_ids_, num_segments_; name=name) + end + end end @@ -32680,7 +38329,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function queue_is_closed_v2(handle_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function queue_is_closed_v2_graph(handle_; name=nothing) local desc tf.with_op_name(name, "QueueIsClosedV2") do desc = tf.NodeDescription("QueueIsClosedV2") @@ -32689,11 +38338,18 @@ begin end tf.Tensor(tf.Operation(desc)) end - function queue_is_closed_v2(handle_::tf.TensorHandle; name=nothing) + function queue_is_closed_v2_eager(handle_; name=nothing) desc = tf.EagerOp("QueueIsClosedV2") tf.add_input(desc, handle_) (tf.execute(desc))[1] end + function queue_is_closed_v2(handle_; name=nothing) + if tf.eager_mode + queue_is_closed_v2_eager(handle_; name=name) + else + queue_is_closed_v2_graph(handle_; name=name) + end + end end @@ -32703,7 +38359,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function parameterized_truncated_normal(shape_, means_, stdevs_, minvals_, maxvals_; name=nothing, seed=nothing, seed2=nothing, dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function parameterized_truncated_normal_graph(shape_, means_, stdevs_, minvals_, maxvals_; name=nothing, seed=nothing, seed2=nothing, dtype=nothing) local desc tf.with_op_name(name, "ParameterizedTruncatedNormal") do desc = tf.NodeDescription("ParameterizedTruncatedNormal") @@ -32731,7 +38387,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function parameterized_truncated_normal(shape_::tf.TensorHandle, means_::tf.TensorHandle, stdevs_::tf.TensorHandle, minvals_::tf.TensorHandle, maxvals_::tf.TensorHandle; name=nothing, seed=nothing, seed2=nothing, dtype=nothing) + function parameterized_truncated_normal_eager(shape_, means_, stdevs_, minvals_, maxvals_; name=nothing, seed=nothing, seed2=nothing, dtype=nothing) desc = tf.EagerOp("ParameterizedTruncatedNormal") tf.add_input(desc, shape_) tf.add_input(desc, means_) @@ -32754,6 +38410,13 @@ begin desc["dtype"] = tf.data_type(maxvals_) (tf.execute(desc))[1] end + function parameterized_truncated_normal(shape_, means_, stdevs_, minvals_, maxvals_; name=nothing, seed=nothing, seed2=nothing, dtype=nothing) + if tf.eager_mode + parameterized_truncated_normal_eager(shape_, means_, stdevs_, minvals_, maxvals_; name=name, seed=seed, seed2=seed2, dtype=dtype) + else + parameterized_truncated_normal_graph(shape_, means_, stdevs_, minvals_, maxvals_; name=name, seed=seed, seed2=seed2, dtype=dtype) + end + end end @@ -32763,7 +38426,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function diag_part(input_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function diag_part_graph(input_; name=nothing) local desc tf.with_op_name(name, "DiagPart") do desc = tf.NodeDescription("DiagPart") @@ -32773,12 +38436,19 @@ begin end tf.Tensor(tf.Operation(desc)) end - function diag_part(input_::tf.TensorHandle; name=nothing) + function diag_part_eager(input_; name=nothing) desc = tf.EagerOp("DiagPart") tf.add_input(desc, input_) desc["T"] = tf.data_type(input_) (tf.execute(desc))[1] end + function diag_part(input_; name=nothing) + if tf.eager_mode + diag_part_eager(input_; name=name) + else + diag_part_graph(input_; name=name) + end + end end @@ -32788,7 +38458,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function kmeans_plus_plus_initialization(points_, num_to_sample_, seed_, num_retries_per_sample_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function kmeans_plus_plus_initialization_graph(points_, num_to_sample_, seed_, num_retries_per_sample_; name=nothing) local desc tf.with_op_name(name, "KmeansPlusPlusInitialization") do desc = tf.NodeDescription("KmeansPlusPlusInitialization") @@ -32803,7 +38473,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function kmeans_plus_plus_initialization(points_::tf.TensorHandle, num_to_sample_::tf.TensorHandle, seed_::tf.TensorHandle, num_retries_per_sample_::tf.TensorHandle; name=nothing) + function kmeans_plus_plus_initialization_eager(points_, num_to_sample_, seed_, num_retries_per_sample_; name=nothing) desc = tf.EagerOp("KmeansPlusPlusInitialization") tf.add_input(desc, points_) tf.add_input(desc, num_to_sample_) @@ -32811,6 +38481,13 @@ begin tf.add_input(desc, num_retries_per_sample_) (tf.execute(desc))[1] end + function kmeans_plus_plus_initialization(points_, num_to_sample_, seed_, num_retries_per_sample_; name=nothing) + if tf.eager_mode + kmeans_plus_plus_initialization_eager(points_, num_to_sample_, seed_, num_retries_per_sample_; name=name) + else + kmeans_plus_plus_initialization_graph(points_, num_to_sample_, seed_, num_retries_per_sample_; name=name) + end + end end @@ -32820,7 +38497,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function regex_replace(input_, pattern_, rewrite_; name=nothing, replace_global=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function regex_replace_graph(input_, pattern_, rewrite_; name=nothing, replace_global=nothing) local desc tf.with_op_name(name, "RegexReplace") do desc = tf.NodeDescription("RegexReplace") @@ -32836,7 +38513,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function regex_replace(input_::tf.TensorHandle, pattern_::tf.TensorHandle, rewrite_::tf.TensorHandle; name=nothing, replace_global=nothing) + function regex_replace_eager(input_, pattern_, rewrite_; name=nothing, replace_global=nothing) desc = tf.EagerOp("RegexReplace") tf.add_input(desc, input_) tf.add_input(desc, pattern_) @@ -32846,6 +38523,13 @@ begin end (tf.execute(desc))[1] end + function regex_replace(input_, pattern_, rewrite_; name=nothing, replace_global=nothing) + if tf.eager_mode + regex_replace_eager(input_, pattern_, rewrite_; name=name, replace_global=replace_global) + else + regex_replace_graph(input_, pattern_, rewrite_; name=name, replace_global=replace_global) + end + end end @@ -32855,7 +38539,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function sparse_tensor_dense_mat_mul(a_indices_, a_values_, a_shape_, b_; name=nothing, adjoint_a=nothing, adjoint_b=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function sparse_tensor_dense_mat_mul_graph(a_indices_, a_values_, a_shape_, b_; name=nothing, adjoint_a=nothing, adjoint_b=nothing) local desc tf.with_op_name(name, "SparseTensorDenseMatMul") do desc = tf.NodeDescription("SparseTensorDenseMatMul") @@ -32879,7 +38563,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function sparse_tensor_dense_mat_mul(a_indices_::tf.TensorHandle, a_values_::tf.TensorHandle, a_shape_::tf.TensorHandle, b_::tf.TensorHandle; name=nothing, adjoint_a=nothing, adjoint_b=nothing) + function sparse_tensor_dense_mat_mul_eager(a_indices_, a_values_, a_shape_, b_; name=nothing, adjoint_a=nothing, adjoint_b=nothing) desc = tf.EagerOp("SparseTensorDenseMatMul") tf.add_input(desc, a_indices_) tf.add_input(desc, a_values_) @@ -32896,6 +38580,13 @@ begin desc["T"] = tf.data_type(b_) (tf.execute(desc))[1] end + function sparse_tensor_dense_mat_mul(a_indices_, a_values_, a_shape_, b_; name=nothing, adjoint_a=nothing, adjoint_b=nothing) + if tf.eager_mode + sparse_tensor_dense_mat_mul_eager(a_indices_, a_values_, a_shape_, b_; name=name, adjoint_a=adjoint_a, adjoint_b=adjoint_b) + else + sparse_tensor_dense_mat_mul_graph(a_indices_, a_values_, a_shape_, b_; name=name, adjoint_a=adjoint_a, adjoint_b=adjoint_b) + end + end end @@ -32905,7 +38596,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function map_defun(arguments_, captured_inputs_; name=nothing, Targuments=nothing, Tcaptured=nothing, output_types=nothing, output_shapes=nothing, f=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function map_defun_graph(arguments_, captured_inputs_; name=nothing, Targuments=nothing, Tcaptured=nothing, output_types=nothing, output_shapes=nothing, f=nothing) local desc tf.with_op_name(name, "MapDefun") do desc = tf.NodeDescription("MapDefun") @@ -32931,7 +38622,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function map_defun(arguments_::tf.TensorHandle, captured_inputs_::tf.TensorHandle; name=nothing, Targuments=nothing, Tcaptured=nothing, output_types=nothing, output_shapes=nothing, f=nothing) + function map_defun_eager(arguments_, captured_inputs_; name=nothing, Targuments=nothing, Tcaptured=nothing, output_types=nothing, output_shapes=nothing, f=nothing) desc = tf.EagerOp("MapDefun") tf.add_input(desc, arguments_) tf.add_input(desc, captured_inputs_) @@ -32952,6 +38643,13 @@ begin end (tf.execute(desc))[1] end + function map_defun(arguments_, captured_inputs_; name=nothing, Targuments=nothing, Tcaptured=nothing, output_types=nothing, output_shapes=nothing, f=nothing) + if tf.eager_mode + map_defun_eager(arguments_, captured_inputs_; name=name, Targuments=Targuments, Tcaptured=Tcaptured, output_types=output_types, output_shapes=output_shapes, f=f) + else + map_defun_graph(arguments_, captured_inputs_; name=name, Targuments=Targuments, Tcaptured=Tcaptured, output_types=output_types, output_shapes=output_shapes, f=f) + end + end end @@ -32961,7 +38659,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function thread_unsafe_unigram_candidate_sampler(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function thread_unsafe_unigram_candidate_sampler_graph(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing) local desc tf.with_op_name(name, "ThreadUnsafeUnigramCandidateSampler") do desc = tf.NodeDescription("ThreadUnsafeUnigramCandidateSampler") @@ -32993,7 +38691,7 @@ begin end out end - function thread_unsafe_unigram_candidate_sampler(true_classes_::tf.TensorHandle; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing) + function thread_unsafe_unigram_candidate_sampler_eager(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing) desc = tf.EagerOp("ThreadUnsafeUnigramCandidateSampler") tf.add_input(desc, true_classes_) if num_true !== nothing @@ -33016,6 +38714,13 @@ begin end tf.execute(desc) end + function thread_unsafe_unigram_candidate_sampler(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing) + if tf.eager_mode + thread_unsafe_unigram_candidate_sampler_eager(true_classes_; name=name, num_true=num_true, num_sampled=num_sampled, unique=unique, range_max=range_max, seed=seed, seed2=seed2) + else + thread_unsafe_unigram_candidate_sampler_graph(true_classes_; name=name, num_true=num_true, num_sampled=num_sampled, unique=unique, range_max=range_max, seed=seed, seed2=seed2) + end + end end @@ -33025,7 +38730,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function retrieve_tpu_embedding_adam_parameters_grad_accum_debug(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function retrieve_tpu_embedding_adam_parameters_grad_accum_debug_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "RetrieveTPUEmbeddingADAMParametersGradAccumDebug") do desc = tf.NodeDescription("RetrieveTPUEmbeddingADAMParametersGradAccumDebug") @@ -33049,7 +38754,7 @@ begin end out end - function retrieve_tpu_embedding_adam_parameters_grad_accum_debug(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + function retrieve_tpu_embedding_adam_parameters_grad_accum_debug_eager(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) desc = tf.EagerOp("RetrieveTPUEmbeddingADAMParametersGradAccumDebug") if table_id !== nothing desc["table_id"] = Base.Int(table_id) @@ -33065,6 +38770,13 @@ begin end tf.execute(desc) end + function retrieve_tpu_embedding_adam_parameters_grad_accum_debug(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + if tf.eager_mode + retrieve_tpu_embedding_adam_parameters_grad_accum_debug_eager(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + else + retrieve_tpu_embedding_adam_parameters_grad_accum_debug_graph(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + end + end end @@ -33074,7 +38786,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function parallel_concat(values_; name=nothing, N=nothing, shape=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function parallel_concat_graph(values_; name=nothing, N=nothing, shape=nothing) local desc tf.with_op_name(name, "ParallelConcat") do desc = tf.NodeDescription("ParallelConcat") @@ -33090,7 +38802,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function parallel_concat(values_::tf.TensorHandle; name=nothing, N=nothing, shape=nothing) + function parallel_concat_eager(values_; name=nothing, N=nothing, shape=nothing) desc = tf.EagerOp("ParallelConcat") tf.add_input(desc, values_) if N !== nothing @@ -33102,6 +38814,13 @@ begin desc["T"] = tf.data_type(values_) (tf.execute(desc))[1] end + function parallel_concat(values_; name=nothing, N=nothing, shape=nothing) + if tf.eager_mode + parallel_concat_eager(values_; name=name, N=N, shape=shape) + else + parallel_concat_graph(values_; name=name, N=N, shape=shape) + end + end end @@ -33111,7 +38830,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function lookup_table_find_v2(table_handle_, keys_, default_value_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function lookup_table_find_v2_graph(table_handle_, keys_, default_value_; name=nothing) local desc tf.with_op_name(name, "LookupTableFindV2") do desc = tf.NodeDescription("LookupTableFindV2") @@ -33126,7 +38845,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function lookup_table_find_v2(table_handle_::tf.TensorHandle, keys_::tf.TensorHandle, default_value_::tf.TensorHandle; name=nothing) + function lookup_table_find_v2_eager(table_handle_, keys_, default_value_; name=nothing) desc = tf.EagerOp("LookupTableFindV2") tf.add_input(desc, table_handle_) tf.add_input(desc, keys_) @@ -33135,6 +38854,13 @@ begin desc["Tout"] = tf.data_type(default_value_) (tf.execute(desc))[1] end + function lookup_table_find_v2(table_handle_, keys_, default_value_; name=nothing) + if tf.eager_mode + lookup_table_find_v2_eager(table_handle_, keys_, default_value_; name=name) + else + lookup_table_find_v2_graph(table_handle_, keys_, default_value_; name=name) + end + end end @@ -33144,7 +38870,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tensor_forest_tree_deserialize(tree_handle_, tree_config_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tensor_forest_tree_deserialize_graph(tree_handle_, tree_config_; name=nothing) local desc tf.with_op_name(name, "TensorForestTreeDeserialize") do desc = tf.NodeDescription("TensorForestTreeDeserialize") @@ -33155,12 +38881,19 @@ begin end tf.Tensor(tf.Operation(desc)) end - function tensor_forest_tree_deserialize(tree_handle_::tf.TensorHandle, tree_config_::tf.TensorHandle; name=nothing) + function tensor_forest_tree_deserialize_eager(tree_handle_, tree_config_; name=nothing) desc = tf.EagerOp("TensorForestTreeDeserialize") tf.add_input(desc, tree_handle_) tf.add_input(desc, tree_config_) (tf.execute(desc))[1] end + function tensor_forest_tree_deserialize(tree_handle_, tree_config_; name=nothing) + if tf.eager_mode + tensor_forest_tree_deserialize_eager(tree_handle_, tree_config_; name=name) + else + tensor_forest_tree_deserialize_graph(tree_handle_, tree_config_; name=name) + end + end end @@ -33170,7 +38903,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function retrieve_tpu_embedding_momentum_parameters(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function retrieve_tpu_embedding_momentum_parameters_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "RetrieveTPUEmbeddingMomentumParameters") do desc = tf.NodeDescription("RetrieveTPUEmbeddingMomentumParameters") @@ -33194,7 +38927,7 @@ begin end out end - function retrieve_tpu_embedding_momentum_parameters(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + function retrieve_tpu_embedding_momentum_parameters_eager(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) desc = tf.EagerOp("RetrieveTPUEmbeddingMomentumParameters") if table_id !== nothing desc["table_id"] = Base.Int(table_id) @@ -33210,6 +38943,13 @@ begin end tf.execute(desc) end + function retrieve_tpu_embedding_momentum_parameters(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + if tf.eager_mode + retrieve_tpu_embedding_momentum_parameters_eager(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + else + retrieve_tpu_embedding_momentum_parameters_graph(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + end + end end @@ -33219,7 +38959,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function fake_quant_with_min_max_args(inputs_; name=nothing, min=nothing, max=nothing, num_bits=nothing, narrow_range=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function fake_quant_with_min_max_args_graph(inputs_; name=nothing, min=nothing, max=nothing, num_bits=nothing, narrow_range=nothing) local desc tf.with_op_name(name, "FakeQuantWithMinMaxArgs") do desc = tf.NodeDescription("FakeQuantWithMinMaxArgs") @@ -33240,7 +38980,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function fake_quant_with_min_max_args(inputs_::tf.TensorHandle; name=nothing, min=nothing, max=nothing, num_bits=nothing, narrow_range=nothing) + function fake_quant_with_min_max_args_eager(inputs_; name=nothing, min=nothing, max=nothing, num_bits=nothing, narrow_range=nothing) desc = tf.EagerOp("FakeQuantWithMinMaxArgs") tf.add_input(desc, inputs_) if min !== nothing @@ -33257,6 +38997,13 @@ begin end (tf.execute(desc))[1] end + function fake_quant_with_min_max_args(inputs_; name=nothing, min=nothing, max=nothing, num_bits=nothing, narrow_range=nothing) + if tf.eager_mode + fake_quant_with_min_max_args_eager(inputs_; name=name, min=min, max=max, num_bits=num_bits, narrow_range=narrow_range) + else + fake_quant_with_min_max_args_graph(inputs_; name=name, min=min, max=max, num_bits=num_bits, narrow_range=narrow_range) + end + end end @@ -33266,7 +39013,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function resource_apply_gradient_descent(var_, alpha_, delta_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function resource_apply_gradient_descent_graph(var_, alpha_, delta_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ResourceApplyGradientDescent") do desc = tf.NodeDescription("ResourceApplyGradientDescent") @@ -33283,7 +39030,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function resource_apply_gradient_descent(var_::tf.TensorHandle, alpha_::tf.TensorHandle, delta_::tf.TensorHandle; name=nothing, use_locking=nothing) + function resource_apply_gradient_descent_eager(var_, alpha_, delta_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ResourceApplyGradientDescent") tf.add_input(desc, var_) tf.add_input(desc, alpha_) @@ -33295,6 +39042,13 @@ begin desc["T"] = tf.data_type(delta_) (tf.execute(desc))[1] end + function resource_apply_gradient_descent(var_, alpha_, delta_; name=nothing, use_locking=nothing) + if tf.eager_mode + resource_apply_gradient_descent_eager(var_, alpha_, delta_; name=name, use_locking=use_locking) + else + resource_apply_gradient_descent_graph(var_, alpha_, delta_; name=name, use_locking=use_locking) + end + end end @@ -33304,7 +39058,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function experimental_sliding_window_dataset(input_dataset_, window_size_, window_shift_, window_stride_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function experimental_sliding_window_dataset_graph(input_dataset_, window_size_, window_shift_, window_stride_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "ExperimentalSlidingWindowDataset") do desc = tf.NodeDescription("ExperimentalSlidingWindowDataset") @@ -33325,7 +39079,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function experimental_sliding_window_dataset(input_dataset_::tf.TensorHandle, window_size_::tf.TensorHandle, window_shift_::tf.TensorHandle, window_stride_::tf.TensorHandle; name=nothing, output_types=nothing, output_shapes=nothing) + function experimental_sliding_window_dataset_eager(input_dataset_, window_size_, window_shift_, window_stride_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("ExperimentalSlidingWindowDataset") tf.add_input(desc, input_dataset_) tf.add_input(desc, window_size_) @@ -33339,6 +39093,13 @@ begin end (tf.execute(desc))[1] end + function experimental_sliding_window_dataset(input_dataset_, window_size_, window_shift_, window_stride_; name=nothing, output_types=nothing, output_shapes=nothing) + if tf.eager_mode + experimental_sliding_window_dataset_eager(input_dataset_, window_size_, window_shift_, window_stride_; name=name, output_types=output_types, output_shapes=output_shapes) + else + experimental_sliding_window_dataset_graph(input_dataset_, window_size_, window_shift_, window_stride_; name=name, output_types=output_types, output_shapes=output_shapes) + end + end end @@ -33348,7 +39109,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function decode_raw(bytes_; name=nothing, out_type=nothing, little_endian=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function decode_raw_graph(bytes_; name=nothing, out_type=nothing, little_endian=nothing) local desc tf.with_op_name(name, "DecodeRaw") do desc = tf.NodeDescription("DecodeRaw") @@ -33363,7 +39124,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function decode_raw(bytes_::tf.TensorHandle; name=nothing, out_type=nothing, little_endian=nothing) + function decode_raw_eager(bytes_; name=nothing, out_type=nothing, little_endian=nothing) desc = tf.EagerOp("DecodeRaw") tf.add_input(desc, bytes_) if out_type !== nothing @@ -33374,6 +39135,13 @@ begin end (tf.execute(desc))[1] end + function decode_raw(bytes_; name=nothing, out_type=nothing, little_endian=nothing) + if tf.eager_mode + decode_raw_eager(bytes_; name=name, out_type=out_type, little_endian=little_endian) + else + decode_raw_graph(bytes_; name=name, out_type=out_type, little_endian=little_endian) + end + end end @@ -33383,7 +39151,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function fake_quant_with_min_max_vars_per_channel_gradient(gradients_, inputs_, min_, max_; name=nothing, num_bits=nothing, narrow_range=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function fake_quant_with_min_max_vars_per_channel_gradient_graph(gradients_, inputs_, min_, max_; name=nothing, num_bits=nothing, narrow_range=nothing) local desc tf.with_op_name(name, "FakeQuantWithMinMaxVarsPerChannelGradient") do desc = tf.NodeDescription("FakeQuantWithMinMaxVarsPerChannelGradient") @@ -33409,7 +39177,7 @@ begin end out end - function fake_quant_with_min_max_vars_per_channel_gradient(gradients_::tf.TensorHandle, inputs_::tf.TensorHandle, min_::tf.TensorHandle, max_::tf.TensorHandle; name=nothing, num_bits=nothing, narrow_range=nothing) + function fake_quant_with_min_max_vars_per_channel_gradient_eager(gradients_, inputs_, min_, max_; name=nothing, num_bits=nothing, narrow_range=nothing) desc = tf.EagerOp("FakeQuantWithMinMaxVarsPerChannelGradient") tf.add_input(desc, gradients_) tf.add_input(desc, inputs_) @@ -33423,6 +39191,13 @@ begin end tf.execute(desc) end + function fake_quant_with_min_max_vars_per_channel_gradient(gradients_, inputs_, min_, max_; name=nothing, num_bits=nothing, narrow_range=nothing) + if tf.eager_mode + fake_quant_with_min_max_vars_per_channel_gradient_eager(gradients_, inputs_, min_, max_; name=name, num_bits=num_bits, narrow_range=narrow_range) + else + fake_quant_with_min_max_vars_per_channel_gradient_graph(gradients_, inputs_, min_, max_; name=name, num_bits=num_bits, narrow_range=narrow_range) + end + end end @@ -33432,7 +39207,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function unique_with_counts_v2(x_, axis_; name=nothing, out_idx=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function unique_with_counts_v2_graph(x_, axis_; name=nothing, out_idx=nothing) local desc tf.with_op_name(name, "UniqueWithCountsV2") do desc = tf.NodeDescription("UniqueWithCountsV2") @@ -33453,7 +39228,7 @@ begin end out end - function unique_with_counts_v2(x_::tf.TensorHandle, axis_::tf.TensorHandle; name=nothing, out_idx=nothing) + function unique_with_counts_v2_eager(x_, axis_; name=nothing, out_idx=nothing) desc = tf.EagerOp("UniqueWithCountsV2") tf.add_input(desc, x_) tf.add_input(desc, axis_) @@ -33464,6 +39239,13 @@ begin desc["Taxis"] = tf.data_type(axis_) tf.execute(desc) end + function unique_with_counts_v2(x_, axis_; name=nothing, out_idx=nothing) + if tf.eager_mode + unique_with_counts_v2_eager(x_, axis_; name=name, out_idx=out_idx) + else + unique_with_counts_v2_graph(x_, axis_; name=name, out_idx=out_idx) + end + end end @@ -33473,7 +39255,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function experimental_sleep_dataset(input_dataset_, sleep_microseconds_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function experimental_sleep_dataset_graph(input_dataset_, sleep_microseconds_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "ExperimentalSleepDataset") do desc = tf.NodeDescription("ExperimentalSleepDataset") @@ -33490,7 +39272,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function experimental_sleep_dataset(input_dataset_::tf.TensorHandle, sleep_microseconds_::tf.TensorHandle; name=nothing, output_types=nothing, output_shapes=nothing) + function experimental_sleep_dataset_eager(input_dataset_, sleep_microseconds_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("ExperimentalSleepDataset") tf.add_input(desc, input_dataset_) tf.add_input(desc, sleep_microseconds_) @@ -33502,6 +39284,13 @@ begin end (tf.execute(desc))[1] end + function experimental_sleep_dataset(input_dataset_, sleep_microseconds_; name=nothing, output_types=nothing, output_shapes=nothing) + if tf.eager_mode + experimental_sleep_dataset_eager(input_dataset_, sleep_microseconds_; name=name, output_types=output_types, output_shapes=output_shapes) + else + experimental_sleep_dataset_graph(input_dataset_, sleep_microseconds_; name=name, output_types=output_types, output_shapes=output_shapes) + end + end end @@ -33511,7 +39300,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tpu_replicated_output(input_; name=nothing, num_replicas=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tpu_replicated_output_graph(input_; name=nothing, num_replicas=nothing) local desc tf.with_op_name(name, "TPUReplicatedOutput") do desc = tf.NodeDescription("TPUReplicatedOutput") @@ -33529,7 +39318,7 @@ begin end out end - function tpu_replicated_output(input_::tf.TensorHandle; name=nothing, num_replicas=nothing) + function tpu_replicated_output_eager(input_; name=nothing, num_replicas=nothing) desc = tf.EagerOp("TPUReplicatedOutput") tf.add_input(desc, input_) if num_replicas !== nothing @@ -33538,6 +39327,13 @@ begin desc["T"] = tf.data_type(input_) tf.execute(desc) end + function tpu_replicated_output(input_; name=nothing, num_replicas=nothing) + if tf.eager_mode + tpu_replicated_output_eager(input_; name=name, num_replicas=num_replicas) + else + tpu_replicated_output_graph(input_; name=name, num_replicas=num_replicas) + end + end end @@ -33547,7 +39343,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function lower_bound(sorted_inputs_, values_; name=nothing, out_type=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function lower_bound_graph(sorted_inputs_, values_; name=nothing, out_type=nothing) local desc tf.with_op_name(name, "LowerBound") do desc = tf.NodeDescription("LowerBound") @@ -33562,7 +39358,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function lower_bound(sorted_inputs_::tf.TensorHandle, values_::tf.TensorHandle; name=nothing, out_type=nothing) + function lower_bound_eager(sorted_inputs_, values_; name=nothing, out_type=nothing) desc = tf.EagerOp("LowerBound") tf.add_input(desc, sorted_inputs_) tf.add_input(desc, values_) @@ -33573,6 +39369,13 @@ begin desc["T"] = tf.data_type(values_) (tf.execute(desc))[1] end + function lower_bound(sorted_inputs_, values_; name=nothing, out_type=nothing) + if tf.eager_mode + lower_bound_eager(sorted_inputs_, values_; name=name, out_type=out_type) + else + lower_bound_graph(sorted_inputs_, values_; name=name, out_type=out_type) + end + end end @@ -33582,7 +39385,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tan(x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tan_graph(x_; name=nothing) local desc tf.with_op_name(name, "Tan") do desc = tf.NodeDescription("Tan") @@ -33592,12 +39395,19 @@ begin end tf.Tensor(tf.Operation(desc)) end - function tan(x_::tf.TensorHandle; name=nothing) + function tan_eager(x_; name=nothing) desc = tf.EagerOp("Tan") tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) (tf.execute(desc))[1] end + function tan(x_; name=nothing) + if tf.eager_mode + tan_eager(x_; name=name) + else + tan_graph(x_; name=name) + end + end end @@ -33607,7 +39417,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function enter(data_; name=nothing, frame_name=nothing, is_constant=nothing, parallel_iterations=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function enter_graph(data_; name=nothing, frame_name=nothing, is_constant=nothing, parallel_iterations=nothing) local desc tf.with_op_name(name, "Enter") do desc = tf.NodeDescription("Enter") @@ -33626,7 +39436,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function enter(data_::tf.TensorHandle; name=nothing, frame_name=nothing, is_constant=nothing, parallel_iterations=nothing) + function enter_eager(data_; name=nothing, frame_name=nothing, is_constant=nothing, parallel_iterations=nothing) desc = tf.EagerOp("Enter") tf.add_input(desc, data_) if frame_name !== nothing @@ -33641,6 +39451,13 @@ begin desc["T"] = tf.data_type(data_) (tf.execute(desc))[1] end + function enter(data_; name=nothing, frame_name=nothing, is_constant=nothing, parallel_iterations=nothing) + if tf.eager_mode + enter_eager(data_; name=name, frame_name=frame_name, is_constant=is_constant, parallel_iterations=parallel_iterations) + else + enter_graph(data_; name=name, frame_name=frame_name, is_constant=is_constant, parallel_iterations=parallel_iterations) + end + end end @@ -33650,7 +39467,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function infeed_enqueue_tuple(inputs_; name=nothing, dtypes=nothing, shapes=nothing, layouts=nothing, device_ordinal=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function infeed_enqueue_tuple_graph(inputs_; name=nothing, dtypes=nothing, shapes=nothing, layouts=nothing, device_ordinal=nothing) local desc tf.with_op_name(name, "InfeedEnqueueTuple") do desc = tf.NodeDescription("InfeedEnqueueTuple") @@ -33671,7 +39488,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function infeed_enqueue_tuple(inputs_::tf.TensorHandle; name=nothing, dtypes=nothing, shapes=nothing, layouts=nothing, device_ordinal=nothing) + function infeed_enqueue_tuple_eager(inputs_; name=nothing, dtypes=nothing, shapes=nothing, layouts=nothing, device_ordinal=nothing) desc = tf.EagerOp("InfeedEnqueueTuple") tf.add_input(desc, inputs_) if dtypes !== nothing @@ -33688,6 +39505,13 @@ begin end (tf.execute(desc))[1] end + function infeed_enqueue_tuple(inputs_; name=nothing, dtypes=nothing, shapes=nothing, layouts=nothing, device_ordinal=nothing) + if tf.eager_mode + infeed_enqueue_tuple_eager(inputs_; name=name, dtypes=dtypes, shapes=shapes, layouts=layouts, device_ordinal=device_ordinal) + else + infeed_enqueue_tuple_graph(inputs_; name=name, dtypes=dtypes, shapes=shapes, layouts=layouts, device_ordinal=device_ordinal) + end + end end @@ -33697,7 +39521,7 @@ end An op that informs a host of the global ids of all the of TPUs in the """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function _set_global_tpu_array(topology_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function _set_global_tpu_array_graph(topology_; name=nothing) local desc tf.with_op_name(name, "_SetGlobalTPUArray") do desc = tf.NodeDescription("_SetGlobalTPUArray") @@ -33706,11 +39530,18 @@ begin end tf.Tensor(tf.Operation(desc)) end - function _set_global_tpu_array(topology_::tf.TensorHandle; name=nothing) + function _set_global_tpu_array_eager(topology_; name=nothing) desc = tf.EagerOp("_SetGlobalTPUArray") tf.add_input(desc, topology_) (tf.execute(desc))[1] end + function _set_global_tpu_array(topology_; name=nothing) + if tf.eager_mode + _set_global_tpu_array_eager(topology_; name=name) + else + _set_global_tpu_array_graph(topology_; name=name) + end + end end @@ -33720,7 +39551,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function square(x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function square_graph(x_; name=nothing) local desc tf.with_op_name(name, "Square") do desc = tf.NodeDescription("Square") @@ -33730,12 +39561,19 @@ begin end tf.Tensor(tf.Operation(desc)) end - function square(x_::tf.TensorHandle; name=nothing) + function square_eager(x_; name=nothing) desc = tf.EagerOp("Square") tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) (tf.execute(desc))[1] end + function square(x_; name=nothing) + if tf.eager_mode + square_eager(x_; name=name) + else + square_graph(x_; name=name) + end + end end @@ -33745,7 +39583,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function debug_gradient_ref_identity(input_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function debug_gradient_ref_identity_graph(input_; name=nothing) local desc tf.with_op_name(name, "DebugGradientRefIdentity") do desc = tf.NodeDescription("DebugGradientRefIdentity") @@ -33755,12 +39593,19 @@ begin end tf.Tensor(tf.Operation(desc)) end - function debug_gradient_ref_identity(input_::tf.TensorHandle; name=nothing) + function debug_gradient_ref_identity_eager(input_; name=nothing) desc = tf.EagerOp("DebugGradientRefIdentity") tf.add_input(desc, input_) desc["T"] = tf.data_type(input_) (tf.execute(desc))[1] end + function debug_gradient_ref_identity(input_; name=nothing) + if tf.eager_mode + debug_gradient_ref_identity_eager(input_; name=name) + else + debug_gradient_ref_identity_graph(input_; name=name) + end + end end @@ -33770,7 +39615,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function apply_adadelta(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function apply_adadelta_graph(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ApplyAdadelta") do desc = tf.NodeDescription("ApplyAdadelta") @@ -33795,7 +39640,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function apply_adadelta(var_::tf.TensorHandle, accum_::tf.TensorHandle, accum_update_::tf.TensorHandle, lr_::tf.TensorHandle, rho_::tf.TensorHandle, epsilon_::tf.TensorHandle, grad_::tf.TensorHandle; name=nothing, use_locking=nothing) + function apply_adadelta_eager(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ApplyAdadelta") tf.add_input(desc, var_) tf.add_input(desc, accum_) @@ -33816,6 +39661,13 @@ begin desc["T"] = tf.data_type(grad_) (tf.execute(desc))[1] end + function apply_adadelta(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_; name=nothing, use_locking=nothing) + if tf.eager_mode + apply_adadelta_eager(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_; name=name, use_locking=use_locking) + else + apply_adadelta_graph(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_; name=name, use_locking=use_locking) + end + end end @@ -33825,7 +39677,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function experimental_group_by_window_dataset(input_dataset_, key_func_other_arguments_, reduce_func_other_arguments_, window_size_func_other_arguments_; name=nothing, key_func=nothing, reduce_func=nothing, window_size_func=nothing, Tkey_func_other_arguments=nothing, Treduce_func_other_arguments=nothing, Twindow_size_func_other_arguments=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function experimental_group_by_window_dataset_graph(input_dataset_, key_func_other_arguments_, reduce_func_other_arguments_, window_size_func_other_arguments_; name=nothing, key_func=nothing, reduce_func=nothing, window_size_func=nothing, Tkey_func_other_arguments=nothing, Treduce_func_other_arguments=nothing, Twindow_size_func_other_arguments=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "ExperimentalGroupByWindowDataset") do desc = tf.NodeDescription("ExperimentalGroupByWindowDataset") @@ -33864,7 +39716,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function experimental_group_by_window_dataset(input_dataset_::tf.TensorHandle, key_func_other_arguments_::tf.TensorHandle, reduce_func_other_arguments_::tf.TensorHandle, window_size_func_other_arguments_::tf.TensorHandle; name=nothing, key_func=nothing, reduce_func=nothing, window_size_func=nothing, Tkey_func_other_arguments=nothing, Treduce_func_other_arguments=nothing, Twindow_size_func_other_arguments=nothing, output_types=nothing, output_shapes=nothing) + function experimental_group_by_window_dataset_eager(input_dataset_, key_func_other_arguments_, reduce_func_other_arguments_, window_size_func_other_arguments_; name=nothing, key_func=nothing, reduce_func=nothing, window_size_func=nothing, Tkey_func_other_arguments=nothing, Treduce_func_other_arguments=nothing, Twindow_size_func_other_arguments=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("ExperimentalGroupByWindowDataset") tf.add_input(desc, input_dataset_) tf.add_input(desc, key_func_other_arguments_) @@ -33896,6 +39748,13 @@ begin end (tf.execute(desc))[1] end + function experimental_group_by_window_dataset(input_dataset_, key_func_other_arguments_, reduce_func_other_arguments_, window_size_func_other_arguments_; name=nothing, key_func=nothing, reduce_func=nothing, window_size_func=nothing, Tkey_func_other_arguments=nothing, Treduce_func_other_arguments=nothing, Twindow_size_func_other_arguments=nothing, output_types=nothing, output_shapes=nothing) + if tf.eager_mode + experimental_group_by_window_dataset_eager(input_dataset_, key_func_other_arguments_, reduce_func_other_arguments_, window_size_func_other_arguments_; name=name, key_func=key_func, reduce_func=reduce_func, window_size_func=window_size_func, Tkey_func_other_arguments=Tkey_func_other_arguments, Treduce_func_other_arguments=Treduce_func_other_arguments, Twindow_size_func_other_arguments=Twindow_size_func_other_arguments, output_types=output_types, output_shapes=output_shapes) + else + experimental_group_by_window_dataset_graph(input_dataset_, key_func_other_arguments_, reduce_func_other_arguments_, window_size_func_other_arguments_; name=name, key_func=key_func, reduce_func=reduce_func, window_size_func=window_size_func, Tkey_func_other_arguments=Tkey_func_other_arguments, Treduce_func_other_arguments=Treduce_func_other_arguments, Twindow_size_func_other_arguments=Twindow_size_func_other_arguments, output_types=output_types, output_shapes=output_shapes) + end + end end @@ -33905,7 +39764,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function audio_summary(tag_, tensor_; name=nothing, sample_rate=nothing, max_outputs=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function audio_summary_graph(tag_, tensor_; name=nothing, sample_rate=nothing, max_outputs=nothing) local desc tf.with_op_name(name, "AudioSummary") do desc = tf.NodeDescription("AudioSummary") @@ -33922,7 +39781,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function audio_summary(tag_::tf.TensorHandle, tensor_::tf.TensorHandle; name=nothing, sample_rate=nothing, max_outputs=nothing) + function audio_summary_eager(tag_, tensor_; name=nothing, sample_rate=nothing, max_outputs=nothing) desc = tf.EagerOp("AudioSummary") tf.add_input(desc, tag_) tf.add_input(desc, tensor_) @@ -33934,6 +39793,13 @@ begin end (tf.execute(desc))[1] end + function audio_summary(tag_, tensor_; name=nothing, sample_rate=nothing, max_outputs=nothing) + if tf.eager_mode + audio_summary_eager(tag_, tensor_; name=name, sample_rate=sample_rate, max_outputs=max_outputs) + else + audio_summary_graph(tag_, tensor_; name=name, sample_rate=sample_rate, max_outputs=max_outputs) + end + end end @@ -33943,7 +39809,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function squared_difference(x_, y_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function squared_difference_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "SquaredDifference") do desc = tf.NodeDescription("SquaredDifference") @@ -33955,7 +39821,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function squared_difference(x_::tf.TensorHandle, y_::tf.TensorHandle; name=nothing) + function squared_difference_eager(x_, y_; name=nothing) desc = tf.EagerOp("SquaredDifference") tf.add_input(desc, x_) tf.add_input(desc, y_) @@ -33963,6 +39829,13 @@ begin desc["T"] = tf.data_type(y_) (tf.execute(desc))[1] end + function squared_difference(x_, y_; name=nothing) + if tf.eager_mode + squared_difference_eager(x_, y_; name=name) + else + squared_difference_graph(x_, y_; name=name) + end + end end @@ -33972,7 +39845,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function experimental_take_while_dataset(input_dataset_, other_arguments_; name=nothing, predicate=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function experimental_take_while_dataset_graph(input_dataset_, other_arguments_; name=nothing, predicate=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "ExperimentalTakeWhileDataset") do desc = tf.NodeDescription("ExperimentalTakeWhileDataset") @@ -33995,7 +39868,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function experimental_take_while_dataset(input_dataset_::tf.TensorHandle, other_arguments_::tf.TensorHandle; name=nothing, predicate=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) + function experimental_take_while_dataset_eager(input_dataset_, other_arguments_; name=nothing, predicate=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("ExperimentalTakeWhileDataset") tf.add_input(desc, input_dataset_) tf.add_input(desc, other_arguments_) @@ -34013,6 +39886,13 @@ begin end (tf.execute(desc))[1] end + function experimental_take_while_dataset(input_dataset_, other_arguments_; name=nothing, predicate=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) + if tf.eager_mode + experimental_take_while_dataset_eager(input_dataset_, other_arguments_; name=name, predicate=predicate, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes) + else + experimental_take_while_dataset_graph(input_dataset_, other_arguments_; name=name, predicate=predicate, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes) + end + end end @@ -34022,7 +39902,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function scatter_nd_update(ref_, indices_, updates_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function scatter_nd_update_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ScatterNdUpdate") do desc = tf.NodeDescription("ScatterNdUpdate") @@ -34041,7 +39921,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function scatter_nd_update(ref_::tf.TensorHandle, indices_::tf.TensorHandle, updates_::tf.TensorHandle; name=nothing, use_locking=nothing) + function scatter_nd_update_eager(ref_, indices_, updates_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ScatterNdUpdate") tf.add_input(desc, ref_) tf.add_input(desc, indices_) @@ -34054,6 +39934,13 @@ begin desc["T"] = tf.data_type(updates_) (tf.execute(desc))[1] end + function scatter_nd_update(ref_, indices_, updates_; name=nothing, use_locking=nothing) + if tf.eager_mode + scatter_nd_update_eager(ref_, indices_, updates_; name=name, use_locking=use_locking) + else + scatter_nd_update_graph(ref_, indices_, updates_; name=name, use_locking=use_locking) + end + end end @@ -34063,7 +39950,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function dynamic_stitch(indices_, data_; name=nothing, N=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function dynamic_stitch_graph(indices_, data_; name=nothing, N=nothing) local desc tf.with_op_name(name, "DynamicStitch") do desc = tf.NodeDescription("DynamicStitch") @@ -34078,7 +39965,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function dynamic_stitch(indices_::tf.TensorHandle, data_::tf.TensorHandle; name=nothing, N=nothing) + function dynamic_stitch_eager(indices_, data_; name=nothing, N=nothing) desc = tf.EagerOp("DynamicStitch") tf.add_input(desc, indices_) tf.add_input(desc, data_) @@ -34088,6 +39975,13 @@ begin desc["T"] = tf.data_type(data_) (tf.execute(desc))[1] end + function dynamic_stitch(indices_, data_; name=nothing, N=nothing) + if tf.eager_mode + dynamic_stitch_eager(indices_, data_; name=name, N=N) + else + dynamic_stitch_graph(indices_, data_; name=name, N=N) + end + end end @@ -34097,7 +39991,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function ones_like(x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function ones_like_graph(x_; name=nothing) local desc tf.with_op_name(name, "OnesLike") do desc = tf.NodeDescription("OnesLike") @@ -34107,12 +40001,19 @@ begin end tf.Tensor(tf.Operation(desc)) end - function ones_like(x_::tf.TensorHandle; name=nothing) + function ones_like_eager(x_; name=nothing) desc = tf.EagerOp("OnesLike") tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) (tf.execute(desc))[1] end + function ones_like(x_; name=nothing) + if tf.eager_mode + ones_like_eager(x_; name=name) + else + ones_like_graph(x_; name=name) + end + end end @@ -34122,7 +40023,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function fractional_max_pool_grad(orig_input_, orig_output_, out_backprop_, row_pooling_sequence_, col_pooling_sequence_; name=nothing, overlapping=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function fractional_max_pool_grad_graph(orig_input_, orig_output_, out_backprop_, row_pooling_sequence_, col_pooling_sequence_; name=nothing, overlapping=nothing) local desc tf.with_op_name(name, "FractionalMaxPoolGrad") do desc = tf.NodeDescription("FractionalMaxPoolGrad") @@ -34143,7 +40044,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function fractional_max_pool_grad(orig_input_::tf.TensorHandle, orig_output_::tf.TensorHandle, out_backprop_::tf.TensorHandle, row_pooling_sequence_::tf.TensorHandle, col_pooling_sequence_::tf.TensorHandle; name=nothing, overlapping=nothing) + function fractional_max_pool_grad_eager(orig_input_, orig_output_, out_backprop_, row_pooling_sequence_, col_pooling_sequence_; name=nothing, overlapping=nothing) desc = tf.EagerOp("FractionalMaxPoolGrad") tf.add_input(desc, orig_input_) tf.add_input(desc, orig_output_) @@ -34158,6 +40059,13 @@ begin desc["T"] = tf.data_type(out_backprop_) (tf.execute(desc))[1] end + function fractional_max_pool_grad(orig_input_, orig_output_, out_backprop_, row_pooling_sequence_, col_pooling_sequence_; name=nothing, overlapping=nothing) + if tf.eager_mode + fractional_max_pool_grad_eager(orig_input_, orig_output_, out_backprop_, row_pooling_sequence_, col_pooling_sequence_; name=name, overlapping=overlapping) + else + fractional_max_pool_grad_graph(orig_input_, orig_output_, out_backprop_, row_pooling_sequence_, col_pooling_sequence_; name=name, overlapping=overlapping) + end + end end @@ -34167,7 +40075,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function remote_call(target_, args_; name=nothing, Tin=nothing, Tout=nothing, f=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function remote_call_graph(target_, args_; name=nothing, Tin=nothing, Tout=nothing, f=nothing) local desc tf.with_op_name(name, "RemoteCall") do desc = tf.NodeDescription("RemoteCall") @@ -34187,7 +40095,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function remote_call(target_::tf.TensorHandle, args_::tf.TensorHandle; name=nothing, Tin=nothing, Tout=nothing, f=nothing) + function remote_call_eager(target_, args_; name=nothing, Tin=nothing, Tout=nothing, f=nothing) desc = tf.EagerOp("RemoteCall") tf.add_input(desc, target_) tf.add_input(desc, args_) @@ -34202,6 +40110,13 @@ begin end (tf.execute(desc))[1] end + function remote_call(target_, args_; name=nothing, Tin=nothing, Tout=nothing, f=nothing) + if tf.eager_mode + remote_call_eager(target_, args_; name=name, Tin=Tin, Tout=Tout, f=f) + else + remote_call_graph(target_, args_; name=name, Tin=Tin, Tout=Tout, f=f) + end + end end @@ -34211,7 +40126,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function gather(params_, indices_; name=nothing, validate_indices=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function gather_graph(params_, indices_; name=nothing, validate_indices=nothing) local desc tf.with_op_name(name, "Gather") do desc = tf.NodeDescription("Gather") @@ -34228,7 +40143,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function gather(params_::tf.TensorHandle, indices_::tf.TensorHandle; name=nothing, validate_indices=nothing) + function gather_eager(params_, indices_; name=nothing, validate_indices=nothing) desc = tf.EagerOp("Gather") tf.add_input(desc, params_) tf.add_input(desc, indices_) @@ -34239,6 +40154,13 @@ begin desc["Tindices"] = tf.data_type(indices_) (tf.execute(desc))[1] end + function gather(params_, indices_; name=nothing, validate_indices=nothing) + if tf.eager_mode + gather_eager(params_, indices_; name=name, validate_indices=validate_indices) + else + gather_graph(params_, indices_; name=name, validate_indices=validate_indices) + end + end end @@ -34248,7 +40170,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function quantized_mat_mul(a_, b_, min_a_, max_a_, min_b_, max_b_; name=nothing, transpose_a=nothing, transpose_b=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function quantized_mat_mul_graph(a_, b_, min_a_, max_a_, min_b_, max_b_; name=nothing, transpose_a=nothing, transpose_b=nothing) local desc tf.with_op_name(name, "QuantizedMatMul") do desc = tf.NodeDescription("QuantizedMatMul") @@ -34280,7 +40202,7 @@ begin end out end - function quantized_mat_mul(a_::tf.TensorHandle, b_::tf.TensorHandle, min_a_::tf.TensorHandle, max_a_::tf.TensorHandle, min_b_::tf.TensorHandle, max_b_::tf.TensorHandle; name=nothing, transpose_a=nothing, transpose_b=nothing) + function quantized_mat_mul_eager(a_, b_, min_a_, max_a_, min_b_, max_b_; name=nothing, transpose_a=nothing, transpose_b=nothing) desc = tf.EagerOp("QuantizedMatMul") tf.add_input(desc, a_) tf.add_input(desc, b_) @@ -34298,16 +40220,23 @@ begin desc["T2"] = tf.data_type(b_) tf.execute(desc) end + function quantized_mat_mul(a_, b_, min_a_, max_a_, min_b_, max_b_; name=nothing, transpose_a=nothing, transpose_b=nothing) + if tf.eager_mode + quantized_mat_mul_eager(a_, b_, min_a_, max_a_, min_b_, max_b_; name=name, transpose_a=transpose_a, transpose_b=transpose_b) + else + quantized_mat_mul_graph(a_, b_, min_a_, max_a_, min_b_, max_b_; name=name, transpose_a=transpose_a, transpose_b=transpose_b) + end + end end """ - unicode_decode_with_offsets(input; errors=replace, replacement_char=65533, replace_control_characters=false) + unicode_decode_with_offsets(input; errors=, replacement_char=65533, replace_control_characters=false) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function unicode_decode_with_offsets(input_; name=nothing, input_encoding=nothing, errors=nothing, replacement_char=nothing, replace_control_characters=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function unicode_decode_with_offsets_graph(input_; name=nothing, input_encoding=nothing, errors=nothing, replacement_char=nothing, replace_control_characters=nothing) local desc tf.with_op_name(name, "UnicodeDecodeWithOffsets") do desc = tf.NodeDescription("UnicodeDecodeWithOffsets") @@ -34333,7 +40262,7 @@ begin end out end - function unicode_decode_with_offsets(input_::tf.TensorHandle; name=nothing, input_encoding=nothing, errors=nothing, replacement_char=nothing, replace_control_characters=nothing) + function unicode_decode_with_offsets_eager(input_; name=nothing, input_encoding=nothing, errors=nothing, replacement_char=nothing, replace_control_characters=nothing) desc = tf.EagerOp("UnicodeDecodeWithOffsets") tf.add_input(desc, input_) if input_encoding !== nothing @@ -34350,6 +40279,13 @@ begin end tf.execute(desc) end + function unicode_decode_with_offsets(input_; name=nothing, input_encoding=nothing, errors=nothing, replacement_char=nothing, replace_control_characters=nothing) + if tf.eager_mode + unicode_decode_with_offsets_eager(input_; name=name, input_encoding=input_encoding, errors=errors, replacement_char=replacement_char, replace_control_characters=replace_control_characters) + else + unicode_decode_with_offsets_graph(input_; name=name, input_encoding=input_encoding, errors=errors, replacement_char=replacement_char, replace_control_characters=replace_control_characters) + end + end end @@ -34359,7 +40295,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function enqueue_tpu_embedding_sparse_tensor_batch(sample_indices_, embedding_indices_, aggregation_weights_, mode_override_; name=nothing, N=nothing, device_ordinal=nothing, combiners=nothing, table_ids=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function enqueue_tpu_embedding_sparse_tensor_batch_graph(sample_indices_, embedding_indices_, aggregation_weights_, mode_override_; name=nothing, N=nothing, device_ordinal=nothing, combiners=nothing, table_ids=nothing) local desc tf.with_op_name(name, "EnqueueTPUEmbeddingSparseTensorBatch") do desc = tf.NodeDescription("EnqueueTPUEmbeddingSparseTensorBatch") @@ -34386,7 +40322,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function enqueue_tpu_embedding_sparse_tensor_batch(sample_indices_::tf.TensorHandle, embedding_indices_::tf.TensorHandle, aggregation_weights_::tf.TensorHandle, mode_override_::tf.TensorHandle; name=nothing, N=nothing, device_ordinal=nothing, combiners=nothing, table_ids=nothing) + function enqueue_tpu_embedding_sparse_tensor_batch_eager(sample_indices_, embedding_indices_, aggregation_weights_, mode_override_; name=nothing, N=nothing, device_ordinal=nothing, combiners=nothing, table_ids=nothing) desc = tf.EagerOp("EnqueueTPUEmbeddingSparseTensorBatch") tf.add_input(desc, sample_indices_) tf.add_input(desc, embedding_indices_) @@ -34406,6 +40342,13 @@ begin end (tf.execute(desc))[1] end + function enqueue_tpu_embedding_sparse_tensor_batch(sample_indices_, embedding_indices_, aggregation_weights_, mode_override_; name=nothing, N=nothing, device_ordinal=nothing, combiners=nothing, table_ids=nothing) + if tf.eager_mode + enqueue_tpu_embedding_sparse_tensor_batch_eager(sample_indices_, embedding_indices_, aggregation_weights_, mode_override_; name=name, N=N, device_ordinal=device_ordinal, combiners=combiners, table_ids=table_ids) + else + enqueue_tpu_embedding_sparse_tensor_batch_graph(sample_indices_, embedding_indices_, aggregation_weights_, mode_override_; name=name, N=N, device_ordinal=device_ordinal, combiners=combiners, table_ids=table_ids) + end + end end @@ -34415,7 +40358,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function accumulator_apply_gradient(handle_, local_step_, gradient_; name=nothing, dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function accumulator_apply_gradient_graph(handle_, local_step_, gradient_; name=nothing, dtype=nothing) local desc tf.with_op_name(name, "AccumulatorApplyGradient") do desc = tf.NodeDescription("AccumulatorApplyGradient") @@ -34432,7 +40375,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function accumulator_apply_gradient(handle_::tf.TensorHandle, local_step_::tf.TensorHandle, gradient_::tf.TensorHandle; name=nothing, dtype=nothing) + function accumulator_apply_gradient_eager(handle_, local_step_, gradient_; name=nothing, dtype=nothing) desc = tf.EagerOp("AccumulatorApplyGradient") tf.add_input(desc, handle_) tf.add_input(desc, local_step_) @@ -34443,6 +40386,13 @@ begin desc["dtype"] = tf.data_type(gradient_) (tf.execute(desc))[1] end + function accumulator_apply_gradient(handle_, local_step_, gradient_; name=nothing, dtype=nothing) + if tf.eager_mode + accumulator_apply_gradient_eager(handle_, local_step_, gradient_; name=name, dtype=dtype) + else + accumulator_apply_gradient_graph(handle_, local_step_, gradient_; name=name, dtype=dtype) + end + end end @@ -34452,7 +40402,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function write_summary(writer_, step_, tensor_, tag_, summary_metadata_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function write_summary_graph(writer_, step_, tensor_, tag_, summary_metadata_; name=nothing) local desc tf.with_op_name(name, "WriteSummary") do desc = tf.NodeDescription("WriteSummary") @@ -34470,7 +40420,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function write_summary(writer_::tf.TensorHandle, step_::tf.TensorHandle, tensor_::tf.TensorHandle, tag_::tf.TensorHandle, summary_metadata_::tf.TensorHandle; name=nothing) + function write_summary_eager(writer_, step_, tensor_, tag_, summary_metadata_; name=nothing) desc = tf.EagerOp("WriteSummary") tf.add_input(desc, writer_) tf.add_input(desc, step_) @@ -34480,6 +40430,13 @@ begin desc["T"] = tf.data_type(tensor_) (tf.execute(desc))[1] end + function write_summary(writer_, step_, tensor_, tag_, summary_metadata_; name=nothing) + if tf.eager_mode + write_summary_eager(writer_, step_, tensor_, tag_, summary_metadata_; name=name) + else + write_summary_graph(writer_, step_, tensor_, tag_, summary_metadata_; name=name) + end + end end @@ -34489,7 +40446,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function quantized_conv2d(input_, filter_, min_input_, max_input_, min_filter_, max_filter_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function quantized_conv2d_graph(input_, filter_, min_input_, max_input_, min_filter_, max_filter_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) local desc tf.with_op_name(name, "QuantizedConv2D") do desc = tf.NodeDescription("QuantizedConv2D") @@ -34527,7 +40484,7 @@ begin end out end - function quantized_conv2d(input_::tf.TensorHandle, filter_::tf.TensorHandle, min_input_::tf.TensorHandle, max_input_::tf.TensorHandle, min_filter_::tf.TensorHandle, max_filter_::tf.TensorHandle; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) + function quantized_conv2d_eager(input_, filter_, min_input_, max_input_, min_filter_, max_filter_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) desc = tf.EagerOp("QuantizedConv2D") tf.add_input(desc, input_) tf.add_input(desc, filter_) @@ -34551,6 +40508,13 @@ begin desc["Tfilter"] = tf.data_type(filter_) tf.execute(desc) end + function quantized_conv2d(input_, filter_, min_input_, max_input_, min_filter_, max_filter_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) + if tf.eager_mode + quantized_conv2d_eager(input_, filter_, min_input_, max_input_, min_filter_, max_filter_; name=name, out_type=out_type, strides=strides, padding=padding, dilations=dilations) + else + quantized_conv2d_graph(input_, filter_, min_input_, max_input_, min_filter_, max_filter_; name=name, out_type=out_type, strides=strides, padding=padding, dilations=dilations) + end + end end @@ -34560,7 +40524,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function resource_apply_momentum(var_, accum_, lr_, grad_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function resource_apply_momentum_graph(var_, accum_, lr_, grad_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) local desc tf.with_op_name(name, "ResourceApplyMomentum") do desc = tf.NodeDescription("ResourceApplyMomentum") @@ -34584,7 +40548,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function resource_apply_momentum(var_::tf.TensorHandle, accum_::tf.TensorHandle, lr_::tf.TensorHandle, grad_::tf.TensorHandle, momentum_::tf.TensorHandle; name=nothing, use_locking=nothing, use_nesterov=nothing) + function resource_apply_momentum_eager(var_, accum_, lr_, grad_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) desc = tf.EagerOp("ResourceApplyMomentum") tf.add_input(desc, var_) tf.add_input(desc, accum_) @@ -34602,6 +40566,13 @@ begin desc["T"] = tf.data_type(momentum_) (tf.execute(desc))[1] end + function resource_apply_momentum(var_, accum_, lr_, grad_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) + if tf.eager_mode + resource_apply_momentum_eager(var_, accum_, lr_, grad_, momentum_; name=name, use_locking=use_locking, use_nesterov=use_nesterov) + else + resource_apply_momentum_graph(var_, accum_, lr_, grad_, momentum_; name=name, use_locking=use_locking, use_nesterov=use_nesterov) + end + end end @@ -34611,7 +40582,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function log1p(x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function log1p_graph(x_; name=nothing) local desc tf.with_op_name(name, "Log1p") do desc = tf.NodeDescription("Log1p") @@ -34621,12 +40592,19 @@ begin end tf.Tensor(tf.Operation(desc)) end - function log1p(x_::tf.TensorHandle; name=nothing) + function log1p_eager(x_; name=nothing) desc = tf.EagerOp("Log1p") tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) (tf.execute(desc))[1] end + function log1p(x_; name=nothing) + if tf.eager_mode + log1p_eager(x_; name=name) + else + log1p_graph(x_; name=name) + end + end end @@ -34636,7 +40614,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function ordered_map_clear(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function ordered_map_clear_graph(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "OrderedMapClear") do desc = tf.NodeDescription("OrderedMapClear") @@ -34658,7 +40636,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function ordered_map_clear(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + function ordered_map_clear_eager(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) desc = tf.EagerOp("OrderedMapClear") if capacity !== nothing desc["capacity"] = Base.Int(capacity) @@ -34677,6 +40655,13 @@ begin end (tf.execute(desc))[1] end + function ordered_map_clear(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + if tf.eager_mode + ordered_map_clear_eager(; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) + else + ordered_map_clear_graph(; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) + end + end end @@ -34686,7 +40671,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function resource_scatter_update(resource_, indices_, updates_; name=nothing, dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function resource_scatter_update_graph(resource_, indices_, updates_; name=nothing, dtype=nothing) local desc tf.with_op_name(name, "ResourceScatterUpdate") do desc = tf.NodeDescription("ResourceScatterUpdate") @@ -34705,7 +40690,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function resource_scatter_update(resource_::tf.TensorHandle, indices_::tf.TensorHandle, updates_::tf.TensorHandle; name=nothing, dtype=nothing) + function resource_scatter_update_eager(resource_, indices_, updates_; name=nothing, dtype=nothing) desc = tf.EagerOp("ResourceScatterUpdate") tf.add_input(desc, resource_) tf.add_input(desc, indices_) @@ -34717,6 +40702,13 @@ begin desc["dtype"] = tf.data_type(updates_) (tf.execute(desc))[1] end + function resource_scatter_update(resource_, indices_, updates_; name=nothing, dtype=nothing) + if tf.eager_mode + resource_scatter_update_eager(resource_, indices_, updates_; name=name, dtype=dtype) + else + resource_scatter_update_graph(resource_, indices_, updates_; name=name, dtype=dtype) + end + end end @@ -34726,7 +40718,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function barrier_take_many(handle_, num_elements_; name=nothing, component_types=nothing, allow_small_batch=nothing, wait_for_incomplete=nothing, timeout_ms=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function barrier_take_many_graph(handle_, num_elements_; name=nothing, component_types=nothing, allow_small_batch=nothing, wait_for_incomplete=nothing, timeout_ms=nothing) local desc tf.with_op_name(name, "BarrierTakeMany") do desc = tf.NodeDescription("BarrierTakeMany") @@ -34754,7 +40746,7 @@ begin end out end - function barrier_take_many(handle_::tf.TensorHandle, num_elements_::tf.TensorHandle; name=nothing, component_types=nothing, allow_small_batch=nothing, wait_for_incomplete=nothing, timeout_ms=nothing) + function barrier_take_many_eager(handle_, num_elements_; name=nothing, component_types=nothing, allow_small_batch=nothing, wait_for_incomplete=nothing, timeout_ms=nothing) desc = tf.EagerOp("BarrierTakeMany") tf.add_input(desc, handle_) tf.add_input(desc, num_elements_) @@ -34772,6 +40764,13 @@ begin end tf.execute(desc) end + function barrier_take_many(handle_, num_elements_; name=nothing, component_types=nothing, allow_small_batch=nothing, wait_for_incomplete=nothing, timeout_ms=nothing) + if tf.eager_mode + barrier_take_many_eager(handle_, num_elements_; name=name, component_types=component_types, allow_small_batch=allow_small_batch, wait_for_incomplete=wait_for_incomplete, timeout_ms=timeout_ms) + else + barrier_take_many_graph(handle_, num_elements_; name=name, component_types=component_types, allow_small_batch=allow_small_batch, wait_for_incomplete=wait_for_incomplete, timeout_ms=timeout_ms) + end + end end @@ -34781,7 +40780,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function resource_apply_keras_momentum(var_, accum_, lr_, grad_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function resource_apply_keras_momentum_graph(var_, accum_, lr_, grad_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) local desc tf.with_op_name(name, "ResourceApplyKerasMomentum") do desc = tf.NodeDescription("ResourceApplyKerasMomentum") @@ -34805,7 +40804,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function resource_apply_keras_momentum(var_::tf.TensorHandle, accum_::tf.TensorHandle, lr_::tf.TensorHandle, grad_::tf.TensorHandle, momentum_::tf.TensorHandle; name=nothing, use_locking=nothing, use_nesterov=nothing) + function resource_apply_keras_momentum_eager(var_, accum_, lr_, grad_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) desc = tf.EagerOp("ResourceApplyKerasMomentum") tf.add_input(desc, var_) tf.add_input(desc, accum_) @@ -34823,6 +40822,13 @@ begin desc["T"] = tf.data_type(momentum_) (tf.execute(desc))[1] end + function resource_apply_keras_momentum(var_, accum_, lr_, grad_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) + if tf.eager_mode + resource_apply_keras_momentum_eager(var_, accum_, lr_, grad_, momentum_; name=name, use_locking=use_locking, use_nesterov=use_nesterov) + else + resource_apply_keras_momentum_graph(var_, accum_, lr_, grad_, momentum_; name=name, use_locking=use_locking, use_nesterov=use_nesterov) + end + end end @@ -34832,7 +40838,7 @@ end Generates serialized partition messages suitable for batch reads. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function generate_big_query_reader_partitions(; name=nothing, project_id=nothing, dataset_id=nothing, table_id=nothing, columns=nothing, timestamp_millis=nothing, num_partitions=nothing, test_end_point=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function generate_big_query_reader_partitions_graph(; name=nothing, project_id=nothing, dataset_id=nothing, table_id=nothing, columns=nothing, timestamp_millis=nothing, num_partitions=nothing, test_end_point=nothing) local desc tf.with_op_name(name, "GenerateBigQueryReaderPartitions") do desc = tf.NodeDescription("GenerateBigQueryReaderPartitions") @@ -34860,7 +40866,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function generate_big_query_reader_partitions(; name=nothing, project_id=nothing, dataset_id=nothing, table_id=nothing, columns=nothing, timestamp_millis=nothing, num_partitions=nothing, test_end_point=nothing) + function generate_big_query_reader_partitions_eager(; name=nothing, project_id=nothing, dataset_id=nothing, table_id=nothing, columns=nothing, timestamp_millis=nothing, num_partitions=nothing, test_end_point=nothing) desc = tf.EagerOp("GenerateBigQueryReaderPartitions") if project_id !== nothing desc["project_id"] = Base.String(project_id) @@ -34885,6 +40891,13 @@ begin end (tf.execute(desc))[1] end + function generate_big_query_reader_partitions(; name=nothing, project_id=nothing, dataset_id=nothing, table_id=nothing, columns=nothing, timestamp_millis=nothing, num_partitions=nothing, test_end_point=nothing) + if tf.eager_mode + generate_big_query_reader_partitions_eager(; name=name, project_id=project_id, dataset_id=dataset_id, table_id=table_id, columns=columns, timestamp_millis=timestamp_millis, num_partitions=num_partitions, test_end_point=test_end_point) + else + generate_big_query_reader_partitions_graph(; name=name, project_id=project_id, dataset_id=dataset_id, table_id=table_id, columns=columns, timestamp_millis=timestamp_millis, num_partitions=num_partitions, test_end_point=test_end_point) + end + end end @@ -34894,7 +40907,7 @@ end A placeholder op for multiple values that will be sent to TensorFlow from a """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function _xla_recv_at_host(dynamic_key_; name=nothing, Toutputs=nothing, key=nothing, device_ordinal=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function _xla_recv_at_host_graph(dynamic_key_; name=nothing, Toutputs=nothing, key=nothing, device_ordinal=nothing) local desc tf.with_op_name(name, "_XlaRecvAtHost") do desc = tf.NodeDescription("_XlaRecvAtHost") @@ -34912,7 +40925,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function _xla_recv_at_host(dynamic_key_::tf.TensorHandle; name=nothing, Toutputs=nothing, key=nothing, device_ordinal=nothing) + function _xla_recv_at_host_eager(dynamic_key_; name=nothing, Toutputs=nothing, key=nothing, device_ordinal=nothing) desc = tf.EagerOp("_XlaRecvAtHost") tf.add_input(desc, dynamic_key_) if Toutputs !== nothing @@ -34926,6 +40939,13 @@ begin end (tf.execute(desc))[1] end + function _xla_recv_at_host(dynamic_key_; name=nothing, Toutputs=nothing, key=nothing, device_ordinal=nothing) + if tf.eager_mode + _xla_recv_at_host_eager(dynamic_key_; name=name, Toutputs=Toutputs, key=key, device_ordinal=device_ordinal) + else + _xla_recv_at_host_graph(dynamic_key_; name=name, Toutputs=Toutputs, key=key, device_ordinal=device_ordinal) + end + end end @@ -34935,7 +40955,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function quantized_avg_pool(input_, min_input_, max_input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function quantized_avg_pool_graph(input_, min_input_, max_input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing) local desc tf.with_op_name(name, "QuantizedAvgPool") do desc = tf.NodeDescription("QuantizedAvgPool") @@ -34963,7 +40983,7 @@ begin end out end - function quantized_avg_pool(input_::tf.TensorHandle, min_input_::tf.TensorHandle, max_input_::tf.TensorHandle; name=nothing, ksize=nothing, strides=nothing, padding=nothing) + function quantized_avg_pool_eager(input_, min_input_, max_input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing) desc = tf.EagerOp("QuantizedAvgPool") tf.add_input(desc, input_) tf.add_input(desc, min_input_) @@ -34980,6 +41000,13 @@ begin desc["T"] = tf.data_type(input_) tf.execute(desc) end + function quantized_avg_pool(input_, min_input_, max_input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing) + if tf.eager_mode + quantized_avg_pool_eager(input_, min_input_, max_input_; name=name, ksize=ksize, strides=strides, padding=padding) + else + quantized_avg_pool_graph(input_, min_input_, max_input_; name=name, ksize=ksize, strides=strides, padding=padding) + end + end end @@ -34989,7 +41016,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function resource_apply_adam_with_amsgrad(var_, m_, v_, vhat_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function resource_apply_adam_with_amsgrad_graph(var_, m_, v_, vhat_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ResourceApplyAdamWithAmsgrad") do desc = tf.NodeDescription("ResourceApplyAdamWithAmsgrad") @@ -35022,7 +41049,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function resource_apply_adam_with_amsgrad(var_::tf.TensorHandle, m_::tf.TensorHandle, v_::tf.TensorHandle, vhat_::tf.TensorHandle, beta1_power_::tf.TensorHandle, beta2_power_::tf.TensorHandle, lr_::tf.TensorHandle, beta1_::tf.TensorHandle, beta2_::tf.TensorHandle, epsilon_::tf.TensorHandle, grad_::tf.TensorHandle; name=nothing, use_locking=nothing) + function resource_apply_adam_with_amsgrad_eager(var_, m_, v_, vhat_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ResourceApplyAdamWithAmsgrad") tf.add_input(desc, var_) tf.add_input(desc, m_) @@ -35047,6 +41074,13 @@ begin desc["T"] = tf.data_type(grad_) (tf.execute(desc))[1] end + function resource_apply_adam_with_amsgrad(var_, m_, v_, vhat_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing) + if tf.eager_mode + resource_apply_adam_with_amsgrad_eager(var_, m_, v_, vhat_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=name, use_locking=use_locking) + else + resource_apply_adam_with_amsgrad_graph(var_, m_, v_, vhat_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=name, use_locking=use_locking) + end + end end @@ -35056,7 +41090,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tensor_list_resize(input_handle_, size_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tensor_list_resize_graph(input_handle_, size_; name=nothing) local desc tf.with_op_name(name, "TensorListResize") do desc = tf.NodeDescription("TensorListResize") @@ -35067,12 +41101,19 @@ begin end tf.Tensor(tf.Operation(desc)) end - function tensor_list_resize(input_handle_::tf.TensorHandle, size_::tf.TensorHandle; name=nothing) + function tensor_list_resize_eager(input_handle_, size_; name=nothing) desc = tf.EagerOp("TensorListResize") tf.add_input(desc, input_handle_) tf.add_input(desc, size_) (tf.execute(desc))[1] end + function tensor_list_resize(input_handle_, size_; name=nothing) + if tf.eager_mode + tensor_list_resize_eager(input_handle_, size_; name=name) + else + tensor_list_resize_graph(input_handle_, size_; name=name) + end + end end @@ -35082,7 +41123,7 @@ end Receives the named tensor from send_device on recv_device. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function _host_recv(; name=nothing, tensor_type=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function _host_recv_graph(; name=nothing, tensor_type=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing) local desc tf.with_op_name(name, "_HostRecv") do desc = tf.NodeDescription("_HostRecv") @@ -35107,7 +41148,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function _host_recv(; name=nothing, tensor_type=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing) + function _host_recv_eager(; name=nothing, tensor_type=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing) desc = tf.EagerOp("_HostRecv") if tensor_type !== nothing desc["tensor_type"] = Base.identity(tensor_type) @@ -35129,6 +41170,13 @@ begin end (tf.execute(desc))[1] end + function _host_recv(; name=nothing, tensor_type=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing) + if tf.eager_mode + _host_recv_eager(; name=name, tensor_type=tensor_type, tensor_name=tensor_name, send_device=send_device, send_device_incarnation=send_device_incarnation, recv_device=recv_device, client_terminated=client_terminated) + else + _host_recv_graph(; name=name, tensor_type=tensor_type, tensor_name=tensor_name, send_device=send_device, send_device_incarnation=send_device_incarnation, recv_device=recv_device, client_terminated=client_terminated) + end + end end @@ -35138,7 +41186,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function boosted_trees_center_bias(tree_ensemble_handle_, mean_gradients_, mean_hessians_, l1_, l2_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function boosted_trees_center_bias_graph(tree_ensemble_handle_, mean_gradients_, mean_hessians_, l1_, l2_; name=nothing) local desc tf.with_op_name(name, "BoostedTreesCenterBias") do desc = tf.NodeDescription("BoostedTreesCenterBias") @@ -35155,7 +41203,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function boosted_trees_center_bias(tree_ensemble_handle_::tf.TensorHandle, mean_gradients_::tf.TensorHandle, mean_hessians_::tf.TensorHandle, l1_::tf.TensorHandle, l2_::tf.TensorHandle; name=nothing) + function boosted_trees_center_bias_eager(tree_ensemble_handle_, mean_gradients_, mean_hessians_, l1_, l2_; name=nothing) desc = tf.EagerOp("BoostedTreesCenterBias") tf.add_input(desc, tree_ensemble_handle_) tf.add_input(desc, mean_gradients_) @@ -35164,6 +41212,13 @@ begin tf.add_input(desc, l2_) (tf.execute(desc))[1] end + function boosted_trees_center_bias(tree_ensemble_handle_, mean_gradients_, mean_hessians_, l1_, l2_; name=nothing) + if tf.eager_mode + boosted_trees_center_bias_eager(tree_ensemble_handle_, mean_gradients_, mean_hessians_, l1_, l2_; name=name) + else + boosted_trees_center_bias_graph(tree_ensemble_handle_, mean_gradients_, mean_hessians_, l1_, l2_; name=name) + end + end end @@ -35173,7 +41228,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function lookup_table_size_v2(table_handle_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function lookup_table_size_v2_graph(table_handle_; name=nothing) local desc tf.with_op_name(name, "LookupTableSizeV2") do desc = tf.NodeDescription("LookupTableSizeV2") @@ -35182,11 +41237,18 @@ begin end tf.Tensor(tf.Operation(desc)) end - function lookup_table_size_v2(table_handle_::tf.TensorHandle; name=nothing) + function lookup_table_size_v2_eager(table_handle_; name=nothing) desc = tf.EagerOp("LookupTableSizeV2") tf.add_input(desc, table_handle_) (tf.execute(desc))[1] end + function lookup_table_size_v2(table_handle_; name=nothing) + if tf.eager_mode + lookup_table_size_v2_eager(table_handle_; name=name) + else + lookup_table_size_v2_graph(table_handle_; name=name) + end + end end @@ -35196,7 +41258,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function irfft(input_, fft_length_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function irfft_graph(input_, fft_length_; name=nothing) local desc tf.with_op_name(name, "IRFFT") do desc = tf.NodeDescription("IRFFT") @@ -35207,12 +41269,19 @@ begin end tf.Tensor(tf.Operation(desc)) end - function irfft(input_::tf.TensorHandle, fft_length_::tf.TensorHandle; name=nothing) + function irfft_eager(input_, fft_length_; name=nothing) desc = tf.EagerOp("IRFFT") tf.add_input(desc, input_) tf.add_input(desc, fft_length_) (tf.execute(desc))[1] end + function irfft(input_, fft_length_; name=nothing) + if tf.eager_mode + irfft_eager(input_, fft_length_; name=name) + else + irfft_graph(input_, fft_length_; name=name) + end + end end @@ -35222,7 +41291,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function inplace_add(x_, i_, v_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function inplace_add_graph(x_, i_, v_; name=nothing) local desc tf.with_op_name(name, "InplaceAdd") do desc = tf.NodeDescription("InplaceAdd") @@ -35236,7 +41305,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function inplace_add(x_::tf.TensorHandle, i_::tf.TensorHandle, v_::tf.TensorHandle; name=nothing) + function inplace_add_eager(x_, i_, v_; name=nothing) desc = tf.EagerOp("InplaceAdd") tf.add_input(desc, x_) tf.add_input(desc, i_) @@ -35245,16 +41314,23 @@ begin desc["T"] = tf.data_type(v_) (tf.execute(desc))[1] end + function inplace_add(x_, i_, v_; name=nothing) + if tf.eager_mode + inplace_add_eager(x_, i_, v_; name=name) + else + inplace_add_graph(x_, i_, v_; name=name) + end + end end """ - bias_add(value, bias; data_format=NHWC) + bias_add(value, bias; data_format=) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function bias_add(value_, bias_; name=nothing, data_format=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function bias_add_graph(value_, bias_; name=nothing, data_format=nothing) local desc tf.with_op_name(name, "BiasAdd") do desc = tf.NodeDescription("BiasAdd") @@ -35269,7 +41345,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function bias_add(value_::tf.TensorHandle, bias_::tf.TensorHandle; name=nothing, data_format=nothing) + function bias_add_eager(value_, bias_; name=nothing, data_format=nothing) desc = tf.EagerOp("BiasAdd") tf.add_input(desc, value_) tf.add_input(desc, bias_) @@ -35280,6 +41356,13 @@ begin desc["T"] = tf.data_type(bias_) (tf.execute(desc))[1] end + function bias_add(value_, bias_; name=nothing, data_format=nothing) + if tf.eager_mode + bias_add_eager(value_, bias_; name=name, data_format=data_format) + else + bias_add_graph(value_, bias_; name=name, data_format=data_format) + end + end end @@ -35289,7 +41372,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function load_tpu_embedding_adam_parameters_grad_accum_debug(parameters_, momenta_, velocities_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function load_tpu_embedding_adam_parameters_grad_accum_debug_graph(parameters_, momenta_, velocities_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "LoadTPUEmbeddingADAMParametersGradAccumDebug") do desc = tf.NodeDescription("LoadTPUEmbeddingADAMParametersGradAccumDebug") @@ -35316,7 +41399,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function load_tpu_embedding_adam_parameters_grad_accum_debug(parameters_::tf.TensorHandle, momenta_::tf.TensorHandle, velocities_::tf.TensorHandle, gradient_accumulators_::tf.TensorHandle; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + function load_tpu_embedding_adam_parameters_grad_accum_debug_eager(parameters_, momenta_, velocities_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) desc = tf.EagerOp("LoadTPUEmbeddingADAMParametersGradAccumDebug") tf.add_input(desc, parameters_) tf.add_input(desc, momenta_) @@ -35336,6 +41419,13 @@ begin end (tf.execute(desc))[1] end + function load_tpu_embedding_adam_parameters_grad_accum_debug(parameters_, momenta_, velocities_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + if tf.eager_mode + load_tpu_embedding_adam_parameters_grad_accum_debug_eager(parameters_, momenta_, velocities_, gradient_accumulators_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + else + load_tpu_embedding_adam_parameters_grad_accum_debug_graph(parameters_, momenta_, velocities_, gradient_accumulators_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + end + end end @@ -35345,7 +41435,7 @@ end An op that disconnects the TPUs on a host from a running distributed """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function _disconnect_host_from_distributed_tpu_system(; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function _disconnect_host_from_distributed_tpu_system_graph(; name=nothing) local desc tf.with_op_name(name, "_DisconnectHostFromDistributedTPUSystem") do desc @@ -35353,10 +41443,17 @@ begin end tf.Tensor(tf.Operation(desc)) end - function _disconnect_host_from_distributed_tpu_system(; name=nothing) + function _disconnect_host_from_distributed_tpu_system_eager(; name=nothing) desc = tf.EagerOp("_DisconnectHostFromDistributedTPUSystem") (tf.execute(desc))[1] end + function _disconnect_host_from_distributed_tpu_system(; name=nothing) + if tf.eager_mode + _disconnect_host_from_distributed_tpu_system_eager(; name=name) + else + _disconnect_host_from_distributed_tpu_system_graph(; name=name) + end + end end @@ -35366,7 +41463,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function ragged_range(starts_, limits_, deltas_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function ragged_range_graph(starts_, limits_, deltas_; name=nothing) local desc tf.with_op_name(name, "RaggedRange") do desc = tf.NodeDescription("RaggedRange") @@ -35385,7 +41482,7 @@ begin end out end - function ragged_range(starts_::tf.TensorHandle, limits_::tf.TensorHandle, deltas_::tf.TensorHandle; name=nothing) + function ragged_range_eager(starts_, limits_, deltas_; name=nothing) desc = tf.EagerOp("RaggedRange") tf.add_input(desc, starts_) tf.add_input(desc, limits_) @@ -35395,6 +41492,13 @@ begin desc["T"] = tf.data_type(deltas_) tf.execute(desc) end + function ragged_range(starts_, limits_, deltas_; name=nothing) + if tf.eager_mode + ragged_range_eager(starts_, limits_, deltas_; name=name) + else + ragged_range_graph(starts_, limits_, deltas_; name=name) + end + end end @@ -35404,7 +41508,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function window_dataset(input_dataset_, size_, shift_, stride_, drop_remainder_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function window_dataset_graph(input_dataset_, size_, shift_, stride_, drop_remainder_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "WindowDataset") do desc = tf.NodeDescription("WindowDataset") @@ -35427,7 +41531,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function window_dataset(input_dataset_::tf.TensorHandle, size_::tf.TensorHandle, shift_::tf.TensorHandle, stride_::tf.TensorHandle, drop_remainder_::tf.TensorHandle; name=nothing, output_types=nothing, output_shapes=nothing) + function window_dataset_eager(input_dataset_, size_, shift_, stride_, drop_remainder_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("WindowDataset") tf.add_input(desc, input_dataset_) tf.add_input(desc, size_) @@ -35442,6 +41546,13 @@ begin end (tf.execute(desc))[1] end + function window_dataset(input_dataset_, size_, shift_, stride_, drop_remainder_; name=nothing, output_types=nothing, output_shapes=nothing) + if tf.eager_mode + window_dataset_eager(input_dataset_, size_, shift_, stride_, drop_remainder_; name=name, output_types=output_types, output_shapes=output_shapes) + else + window_dataset_graph(input_dataset_, size_, shift_, stride_, drop_remainder_; name=name, output_types=output_types, output_shapes=output_shapes) + end + end end @@ -35451,7 +41562,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function diag(diagonal_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function diag_graph(diagonal_; name=nothing) local desc tf.with_op_name(name, "Diag") do desc = tf.NodeDescription("Diag") @@ -35461,12 +41572,19 @@ begin end tf.Tensor(tf.Operation(desc)) end - function diag(diagonal_::tf.TensorHandle; name=nothing) + function diag_eager(diagonal_; name=nothing) desc = tf.EagerOp("Diag") tf.add_input(desc, diagonal_) desc["T"] = tf.data_type(diagonal_) (tf.execute(desc))[1] end + function diag(diagonal_; name=nothing) + if tf.eager_mode + diag_eager(diagonal_; name=name) + else + diag_graph(diagonal_; name=name) + end + end end @@ -35476,7 +41594,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function infeed_dequeue(; name=nothing, dtype=nothing, shape=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function infeed_dequeue_graph(; name=nothing, dtype=nothing, shape=nothing) local desc tf.with_op_name(name, "InfeedDequeue") do desc = tf.NodeDescription("InfeedDequeue") @@ -35489,7 +41607,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function infeed_dequeue(; name=nothing, dtype=nothing, shape=nothing) + function infeed_dequeue_eager(; name=nothing, dtype=nothing, shape=nothing) desc = tf.EagerOp("InfeedDequeue") if dtype !== nothing desc["dtype"] = Base.identity(dtype) @@ -35499,6 +41617,13 @@ begin end (tf.execute(desc))[1] end + function infeed_dequeue(; name=nothing, dtype=nothing, shape=nothing) + if tf.eager_mode + infeed_dequeue_eager(; name=name, dtype=dtype, shape=shape) + else + infeed_dequeue_graph(; name=name, dtype=dtype, shape=shape) + end + end end @@ -35508,7 +41633,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function experimental_latency_stats_dataset(input_dataset_, tag_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function experimental_latency_stats_dataset_graph(input_dataset_, tag_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "ExperimentalLatencyStatsDataset") do desc = tf.NodeDescription("ExperimentalLatencyStatsDataset") @@ -35525,7 +41650,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function experimental_latency_stats_dataset(input_dataset_::tf.TensorHandle, tag_::tf.TensorHandle; name=nothing, output_types=nothing, output_shapes=nothing) + function experimental_latency_stats_dataset_eager(input_dataset_, tag_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("ExperimentalLatencyStatsDataset") tf.add_input(desc, input_dataset_) tf.add_input(desc, tag_) @@ -35537,6 +41662,13 @@ begin end (tf.execute(desc))[1] end + function experimental_latency_stats_dataset(input_dataset_, tag_; name=nothing, output_types=nothing, output_shapes=nothing) + if tf.eager_mode + experimental_latency_stats_dataset_eager(input_dataset_, tag_; name=name, output_types=output_types, output_shapes=output_shapes) + else + experimental_latency_stats_dataset_graph(input_dataset_, tag_; name=name, output_types=output_types, output_shapes=output_shapes) + end + end end @@ -35546,7 +41678,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function add_sparse_to_tensors_map(sparse_indices_, sparse_values_, sparse_shape_; name=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function add_sparse_to_tensors_map_graph(sparse_indices_, sparse_values_, sparse_shape_; name=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "AddSparseToTensorsMap") do desc = tf.NodeDescription("AddSparseToTensorsMap") @@ -35566,7 +41698,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function add_sparse_to_tensors_map(sparse_indices_::tf.TensorHandle, sparse_values_::tf.TensorHandle, sparse_shape_::tf.TensorHandle; name=nothing, container=nothing, shared_name=nothing) + function add_sparse_to_tensors_map_eager(sparse_indices_, sparse_values_, sparse_shape_; name=nothing, container=nothing, shared_name=nothing) desc = tf.EagerOp("AddSparseToTensorsMap") tf.add_input(desc, sparse_indices_) tf.add_input(desc, sparse_values_) @@ -35580,6 +41712,13 @@ begin desc["T"] = tf.data_type(sparse_values_) (tf.execute(desc))[1] end + function add_sparse_to_tensors_map(sparse_indices_, sparse_values_, sparse_shape_; name=nothing, container=nothing, shared_name=nothing) + if tf.eager_mode + add_sparse_to_tensors_map_eager(sparse_indices_, sparse_values_, sparse_shape_; name=name, container=container, shared_name=shared_name) + else + add_sparse_to_tensors_map_graph(sparse_indices_, sparse_values_, sparse_shape_; name=name, container=container, shared_name=shared_name) + end + end end @@ -35589,7 +41728,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function ragged_gather(params_nested_splits_, params_dense_values_, indices_; name=nothing, PARAMS_RAGGED_RANK=nothing, OUTPUT_RAGGED_RANK=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function ragged_gather_graph(params_nested_splits_, params_dense_values_, indices_; name=nothing, PARAMS_RAGGED_RANK=nothing, OUTPUT_RAGGED_RANK=nothing) local desc tf.with_op_name(name, "RaggedGather") do desc = tf.NodeDescription("RaggedGather") @@ -35616,7 +41755,7 @@ begin end out end - function ragged_gather(params_nested_splits_::tf.TensorHandle, params_dense_values_::tf.TensorHandle, indices_::tf.TensorHandle; name=nothing, PARAMS_RAGGED_RANK=nothing, OUTPUT_RAGGED_RANK=nothing) + function ragged_gather_eager(params_nested_splits_, params_dense_values_, indices_; name=nothing, PARAMS_RAGGED_RANK=nothing, OUTPUT_RAGGED_RANK=nothing) desc = tf.EagerOp("RaggedGather") tf.add_input(desc, params_nested_splits_) tf.add_input(desc, params_dense_values_) @@ -35631,6 +41770,13 @@ begin desc["Tindices"] = tf.data_type(indices_) tf.execute(desc) end + function ragged_gather(params_nested_splits_, params_dense_values_, indices_; name=nothing, PARAMS_RAGGED_RANK=nothing, OUTPUT_RAGGED_RANK=nothing) + if tf.eager_mode + ragged_gather_eager(params_nested_splits_, params_dense_values_, indices_; name=name, PARAMS_RAGGED_RANK=PARAMS_RAGGED_RANK, OUTPUT_RAGGED_RANK=OUTPUT_RAGGED_RANK) + else + ragged_gather_graph(params_nested_splits_, params_dense_values_, indices_; name=name, PARAMS_RAGGED_RANK=PARAMS_RAGGED_RANK, OUTPUT_RAGGED_RANK=OUTPUT_RAGGED_RANK) + end + end end @@ -35640,7 +41786,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function rgb_to_hsv(images_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function rgb_to_hsv_graph(images_; name=nothing) local desc tf.with_op_name(name, "RGBToHSV") do desc = tf.NodeDescription("RGBToHSV") @@ -35650,12 +41796,19 @@ begin end tf.Tensor(tf.Operation(desc)) end - function rgb_to_hsv(images_::tf.TensorHandle; name=nothing) + function rgb_to_hsv_eager(images_; name=nothing) desc = tf.EagerOp("RGBToHSV") tf.add_input(desc, images_) desc["T"] = tf.data_type(images_) (tf.execute(desc))[1] end + function rgb_to_hsv(images_; name=nothing) + if tf.eager_mode + rgb_to_hsv_eager(images_; name=name) + else + rgb_to_hsv_graph(images_; name=name) + end + end end @@ -35665,7 +41818,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function multi_device_iterator_to_string_handle(multi_device_iterator_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function multi_device_iterator_to_string_handle_graph(multi_device_iterator_; name=nothing) local desc tf.with_op_name(name, "MultiDeviceIteratorToStringHandle") do desc = tf.NodeDescription("MultiDeviceIteratorToStringHandle") @@ -35674,11 +41827,18 @@ begin end tf.Tensor(tf.Operation(desc)) end - function multi_device_iterator_to_string_handle(multi_device_iterator_::tf.TensorHandle; name=nothing) + function multi_device_iterator_to_string_handle_eager(multi_device_iterator_; name=nothing) desc = tf.EagerOp("MultiDeviceIteratorToStringHandle") tf.add_input(desc, multi_device_iterator_) (tf.execute(desc))[1] end + function multi_device_iterator_to_string_handle(multi_device_iterator_; name=nothing) + if tf.eager_mode + multi_device_iterator_to_string_handle_eager(multi_device_iterator_; name=name) + else + multi_device_iterator_to_string_handle_graph(multi_device_iterator_; name=name) + end + end end @@ -35688,7 +41848,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function for_(start_, limit_, delta_, input_; name=nothing, T=nothing, body=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function for__graph(start_, limit_, delta_, input_; name=nothing, T=nothing, body=nothing) local desc tf.with_op_name(name, "For") do desc = tf.NodeDescription("For") @@ -35709,7 +41869,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function for_(start_::tf.TensorHandle, limit_::tf.TensorHandle, delta_::tf.TensorHandle, input_::tf.TensorHandle; name=nothing, T=nothing, body=nothing) + function for__eager(start_, limit_, delta_, input_; name=nothing, T=nothing, body=nothing) desc = tf.EagerOp("For") tf.add_input(desc, start_) tf.add_input(desc, limit_) @@ -35723,6 +41883,13 @@ begin end (tf.execute(desc))[1] end + function for_(start_, limit_, delta_, input_; name=nothing, T=nothing, body=nothing) + if tf.eager_mode + for__eager(start_, limit_, delta_, input_; name=name, T=T, body=body) + else + for__graph(start_, limit_, delta_, input_; name=name, T=T, body=body) + end + end end @@ -35732,7 +41899,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function sparse_reduce_max_sparse(input_indices_, input_values_, input_shape_, reduction_axes_; name=nothing, keep_dims=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function sparse_reduce_max_sparse_graph(input_indices_, input_values_, input_shape_, reduction_axes_; name=nothing, keep_dims=nothing) local desc tf.with_op_name(name, "SparseReduceMaxSparse") do desc = tf.NodeDescription("SparseReduceMaxSparse") @@ -35756,7 +41923,7 @@ begin end out end - function sparse_reduce_max_sparse(input_indices_::tf.TensorHandle, input_values_::tf.TensorHandle, input_shape_::tf.TensorHandle, reduction_axes_::tf.TensorHandle; name=nothing, keep_dims=nothing) + function sparse_reduce_max_sparse_eager(input_indices_, input_values_, input_shape_, reduction_axes_; name=nothing, keep_dims=nothing) desc = tf.EagerOp("SparseReduceMaxSparse") tf.add_input(desc, input_indices_) tf.add_input(desc, input_values_) @@ -35768,6 +41935,13 @@ begin desc["T"] = tf.data_type(input_values_) tf.execute(desc) end + function sparse_reduce_max_sparse(input_indices_, input_values_, input_shape_, reduction_axes_; name=nothing, keep_dims=nothing) + if tf.eager_mode + sparse_reduce_max_sparse_eager(input_indices_, input_values_, input_shape_, reduction_axes_; name=name, keep_dims=keep_dims) + else + sparse_reduce_max_sparse_graph(input_indices_, input_values_, input_shape_, reduction_axes_; name=name, keep_dims=keep_dims) + end + end end @@ -35777,7 +41951,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function concat_offset(concat_dim_, shape_; name=nothing, N=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function concat_offset_graph(concat_dim_, shape_; name=nothing, N=nothing) local desc tf.with_op_name(name, "ConcatOffset") do desc = tf.NodeDescription("ConcatOffset") @@ -35796,7 +41970,7 @@ begin end out end - function concat_offset(concat_dim_::tf.TensorHandle, shape_::tf.TensorHandle; name=nothing, N=nothing) + function concat_offset_eager(concat_dim_, shape_; name=nothing, N=nothing) desc = tf.EagerOp("ConcatOffset") tf.add_input(desc, concat_dim_) tf.add_input(desc, shape_) @@ -35805,6 +41979,13 @@ begin end tf.execute(desc) end + function concat_offset(concat_dim_, shape_; name=nothing, N=nothing) + if tf.eager_mode + concat_offset_eager(concat_dim_, shape_; name=name, N=N) + else + concat_offset_graph(concat_dim_, shape_; name=name, N=N) + end + end end @@ -35814,7 +41995,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function stage(values_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function stage_graph(values_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "Stage") do desc = tf.NodeDescription("Stage") @@ -35838,7 +42019,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function stage(values_::tf.TensorHandle; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + function stage_eager(values_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) desc = tf.EagerOp("Stage") tf.add_input(desc, values_) if capacity !== nothing @@ -35858,6 +42039,13 @@ begin end (tf.execute(desc))[1] end + function stage(values_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + if tf.eager_mode + stage_eager(values_; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) + else + stage_graph(values_; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) + end + end end @@ -35867,7 +42055,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function switch(data_, pred_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function switch_graph(data_, pred_; name=nothing) local desc tf.with_op_name(name, "Switch") do desc = tf.NodeDescription("Switch") @@ -35884,13 +42072,20 @@ begin end out end - function switch(data_::tf.TensorHandle, pred_::tf.TensorHandle; name=nothing) + function switch_eager(data_, pred_; name=nothing) desc = tf.EagerOp("Switch") tf.add_input(desc, data_) tf.add_input(desc, pred_) desc["T"] = tf.data_type(data_) tf.execute(desc) end + function switch(data_, pred_; name=nothing) + if tf.eager_mode + switch_eager(data_, pred_; name=name) + else + switch_graph(data_, pred_; name=name) + end + end end @@ -35900,7 +42095,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function queue_dequeue_many_v2(handle_, n_; name=nothing, component_types=nothing, timeout_ms=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function queue_dequeue_many_v2_graph(handle_, n_; name=nothing, component_types=nothing, timeout_ms=nothing) local desc tf.with_op_name(name, "QueueDequeueManyV2") do desc = tf.NodeDescription("QueueDequeueManyV2") @@ -35917,7 +42112,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function queue_dequeue_many_v2(handle_::tf.TensorHandle, n_::tf.TensorHandle; name=nothing, component_types=nothing, timeout_ms=nothing) + function queue_dequeue_many_v2_eager(handle_, n_; name=nothing, component_types=nothing, timeout_ms=nothing) desc = tf.EagerOp("QueueDequeueManyV2") tf.add_input(desc, handle_) tf.add_input(desc, n_) @@ -35929,6 +42124,13 @@ begin end (tf.execute(desc))[1] end + function queue_dequeue_many_v2(handle_, n_; name=nothing, component_types=nothing, timeout_ms=nothing) + if tf.eager_mode + queue_dequeue_many_v2_eager(handle_, n_; name=name, component_types=component_types, timeout_ms=timeout_ms) + else + queue_dequeue_many_v2_graph(handle_, n_; name=name, component_types=component_types, timeout_ms=timeout_ms) + end + end end @@ -35938,7 +42140,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function segment_prod(data_, segment_ids_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function segment_prod_graph(data_, segment_ids_; name=nothing) local desc tf.with_op_name(name, "SegmentProd") do desc = tf.NodeDescription("SegmentProd") @@ -35952,7 +42154,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function segment_prod(data_::tf.TensorHandle, segment_ids_::tf.TensorHandle; name=nothing) + function segment_prod_eager(data_, segment_ids_; name=nothing) desc = tf.EagerOp("SegmentProd") tf.add_input(desc, data_) tf.add_input(desc, segment_ids_) @@ -35960,6 +42162,13 @@ begin desc["Tindices"] = tf.data_type(segment_ids_) (tf.execute(desc))[1] end + function segment_prod(data_, segment_ids_; name=nothing) + if tf.eager_mode + segment_prod_eager(data_, segment_ids_; name=name) + else + segment_prod_graph(data_, segment_ids_; name=name) + end + end end @@ -35969,7 +42178,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function approximate_equal(x_, y_; name=nothing, tolerance=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function approximate_equal_graph(x_, y_; name=nothing, tolerance=nothing) local desc tf.with_op_name(name, "ApproximateEqual") do desc = tf.NodeDescription("ApproximateEqual") @@ -35984,7 +42193,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function approximate_equal(x_::tf.TensorHandle, y_::tf.TensorHandle; name=nothing, tolerance=nothing) + function approximate_equal_eager(x_, y_; name=nothing, tolerance=nothing) desc = tf.EagerOp("ApproximateEqual") tf.add_input(desc, x_) tf.add_input(desc, y_) @@ -35995,16 +42204,23 @@ begin desc["T"] = tf.data_type(y_) (tf.execute(desc))[1] end + function approximate_equal(x_, y_; name=nothing, tolerance=nothing) + if tf.eager_mode + approximate_equal_eager(x_, y_; name=name, tolerance=tolerance) + else + approximate_equal_graph(x_, y_; name=name, tolerance=tolerance) + end + end end """ - conv2d(input, filter; use_cudnn_on_gpu=true, explicit_paddings=Int64[], data_format=NHWC, dilations=[1, 1, 1, 1]) + conv2d(input, filter; use_cudnn_on_gpu=true, explicit_paddings=Int64[], data_format=, dilations=[1, 1, 1, 1]) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function conv2d(input_, filter_; name=nothing, strides=nothing, use_cudnn_on_gpu=nothing, padding=nothing, explicit_paddings=nothing, data_format=nothing, dilations=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function conv2d_graph(input_, filter_; name=nothing, strides=nothing, use_cudnn_on_gpu=nothing, padding=nothing, explicit_paddings=nothing, data_format=nothing, dilations=nothing) local desc tf.with_op_name(name, "Conv2D") do desc = tf.NodeDescription("Conv2D") @@ -36034,7 +42250,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function conv2d(input_::tf.TensorHandle, filter_::tf.TensorHandle; name=nothing, strides=nothing, use_cudnn_on_gpu=nothing, padding=nothing, explicit_paddings=nothing, data_format=nothing, dilations=nothing) + function conv2d_eager(input_, filter_; name=nothing, strides=nothing, use_cudnn_on_gpu=nothing, padding=nothing, explicit_paddings=nothing, data_format=nothing, dilations=nothing) desc = tf.EagerOp("Conv2D") tf.add_input(desc, input_) tf.add_input(desc, filter_) @@ -36060,6 +42276,13 @@ begin desc["T"] = tf.data_type(filter_) (tf.execute(desc))[1] end + function conv2d(input_, filter_; name=nothing, strides=nothing, use_cudnn_on_gpu=nothing, padding=nothing, explicit_paddings=nothing, data_format=nothing, dilations=nothing) + if tf.eager_mode + conv2d_eager(input_, filter_; name=name, strides=strides, use_cudnn_on_gpu=use_cudnn_on_gpu, padding=padding, explicit_paddings=explicit_paddings, data_format=data_format, dilations=dilations) + else + conv2d_graph(input_, filter_; name=name, strides=strides, use_cudnn_on_gpu=use_cudnn_on_gpu, padding=padding, explicit_paddings=explicit_paddings, data_format=data_format, dilations=dilations) + end + end end @@ -36069,7 +42292,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function cross_replica_sum(input_, group_assignment_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function cross_replica_sum_graph(input_, group_assignment_; name=nothing) local desc tf.with_op_name(name, "CrossReplicaSum") do desc = tf.NodeDescription("CrossReplicaSum") @@ -36081,13 +42304,20 @@ begin end tf.Tensor(tf.Operation(desc)) end - function cross_replica_sum(input_::tf.TensorHandle, group_assignment_::tf.TensorHandle; name=nothing) + function cross_replica_sum_eager(input_, group_assignment_; name=nothing) desc = tf.EagerOp("CrossReplicaSum") tf.add_input(desc, input_) tf.add_input(desc, group_assignment_) desc["T"] = tf.data_type(input_) (tf.execute(desc))[1] end + function cross_replica_sum(input_, group_assignment_; name=nothing) + if tf.eager_mode + cross_replica_sum_eager(input_, group_assignment_; name=name) + else + cross_replica_sum_graph(input_, group_assignment_; name=name) + end + end end @@ -36097,7 +42327,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function sparse_mat_mul(a_, b_; name=nothing, transpose_a=nothing, transpose_b=nothing, a_is_sparse=nothing, b_is_sparse=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function sparse_mat_mul_graph(a_, b_; name=nothing, transpose_a=nothing, transpose_b=nothing, a_is_sparse=nothing, b_is_sparse=nothing) local desc tf.with_op_name(name, "SparseMatMul") do desc = tf.NodeDescription("SparseMatMul") @@ -36122,7 +42352,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function sparse_mat_mul(a_::tf.TensorHandle, b_::tf.TensorHandle; name=nothing, transpose_a=nothing, transpose_b=nothing, a_is_sparse=nothing, b_is_sparse=nothing) + function sparse_mat_mul_eager(a_, b_; name=nothing, transpose_a=nothing, transpose_b=nothing, a_is_sparse=nothing, b_is_sparse=nothing) desc = tf.EagerOp("SparseMatMul") tf.add_input(desc, a_) tf.add_input(desc, b_) @@ -36142,6 +42372,13 @@ begin desc["Tb"] = tf.data_type(b_) (tf.execute(desc))[1] end + function sparse_mat_mul(a_, b_; name=nothing, transpose_a=nothing, transpose_b=nothing, a_is_sparse=nothing, b_is_sparse=nothing) + if tf.eager_mode + sparse_mat_mul_eager(a_, b_; name=name, transpose_a=transpose_a, transpose_b=transpose_b, a_is_sparse=a_is_sparse, b_is_sparse=b_is_sparse) + else + sparse_mat_mul_graph(a_, b_; name=name, transpose_a=transpose_a, transpose_b=transpose_b, a_is_sparse=a_is_sparse, b_is_sparse=b_is_sparse) + end + end end @@ -36151,7 +42388,7 @@ end Acts roughly like a SplitV Op that splits one tensor into multiple tensors """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function _scoped_allocator_split(concat_, split_; name=nothing, sa_name=nothing, id=nothing, N=nothing, shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function _scoped_allocator_split_graph(concat_, split_; name=nothing, sa_name=nothing, id=nothing, N=nothing, shapes=nothing) local desc tf.with_op_name(name, "_ScopedAllocatorSplit") do desc = tf.NodeDescription("_ScopedAllocatorSplit") @@ -36180,7 +42417,7 @@ begin end out end - function _scoped_allocator_split(concat_::tf.TensorHandle, split_::tf.TensorHandle; name=nothing, sa_name=nothing, id=nothing, N=nothing, shapes=nothing) + function _scoped_allocator_split_eager(concat_, split_; name=nothing, sa_name=nothing, id=nothing, N=nothing, shapes=nothing) desc = tf.EagerOp("_ScopedAllocatorSplit") tf.add_input(desc, concat_) tf.add_input(desc, split_) @@ -36200,6 +42437,13 @@ begin desc["T"] = tf.data_type(split_) tf.execute(desc) end + function _scoped_allocator_split(concat_, split_; name=nothing, sa_name=nothing, id=nothing, N=nothing, shapes=nothing) + if tf.eager_mode + _scoped_allocator_split_eager(concat_, split_; name=name, sa_name=sa_name, id=id, N=N, shapes=shapes) + else + _scoped_allocator_split_graph(concat_, split_; name=name, sa_name=sa_name, id=id, N=N, shapes=shapes) + end + end end @@ -36209,7 +42453,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function igammac(a_, x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function igammac_graph(a_, x_; name=nothing) local desc tf.with_op_name(name, "Igammac") do desc = tf.NodeDescription("Igammac") @@ -36221,7 +42465,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function igammac(a_::tf.TensorHandle, x_::tf.TensorHandle; name=nothing) + function igammac_eager(a_, x_; name=nothing) desc = tf.EagerOp("Igammac") tf.add_input(desc, a_) tf.add_input(desc, x_) @@ -36229,6 +42473,13 @@ begin desc["T"] = tf.data_type(x_) (tf.execute(desc))[1] end + function igammac(a_, x_; name=nothing) + if tf.eager_mode + igammac_eager(a_, x_; name=name) + else + igammac_graph(a_, x_; name=name) + end + end end @@ -36238,7 +42489,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function batch_mat_mul(x_, y_; name=nothing, adj_x=nothing, adj_y=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function batch_mat_mul_graph(x_, y_; name=nothing, adj_x=nothing, adj_y=nothing) local desc tf.with_op_name(name, "BatchMatMul") do desc = tf.NodeDescription("BatchMatMul") @@ -36256,7 +42507,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function batch_mat_mul(x_::tf.TensorHandle, y_::tf.TensorHandle; name=nothing, adj_x=nothing, adj_y=nothing) + function batch_mat_mul_eager(x_, y_; name=nothing, adj_x=nothing, adj_y=nothing) desc = tf.EagerOp("BatchMatMul") tf.add_input(desc, x_) tf.add_input(desc, y_) @@ -36270,6 +42521,13 @@ begin desc["T"] = tf.data_type(y_) (tf.execute(desc))[1] end + function batch_mat_mul(x_, y_; name=nothing, adj_x=nothing, adj_y=nothing) + if tf.eager_mode + batch_mat_mul_eager(x_, y_; name=name, adj_x=adj_x, adj_y=adj_y) + else + batch_mat_mul_graph(x_, y_; name=name, adj_x=adj_x, adj_y=adj_y) + end + end end @@ -36279,7 +42537,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function enqueue_tpu_embedding_sparse_batch(sample_indices_, embedding_indices_, aggregation_weights_, mode_override_; name=nothing, N=nothing, device_ordinal=nothing, combiners=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function enqueue_tpu_embedding_sparse_batch_graph(sample_indices_, embedding_indices_, aggregation_weights_, mode_override_; name=nothing, N=nothing, device_ordinal=nothing, combiners=nothing) local desc tf.with_op_name(name, "EnqueueTPUEmbeddingSparseBatch") do desc = tf.NodeDescription("EnqueueTPUEmbeddingSparseBatch") @@ -36303,7 +42561,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function enqueue_tpu_embedding_sparse_batch(sample_indices_::tf.TensorHandle, embedding_indices_::tf.TensorHandle, aggregation_weights_::tf.TensorHandle, mode_override_::tf.TensorHandle; name=nothing, N=nothing, device_ordinal=nothing, combiners=nothing) + function enqueue_tpu_embedding_sparse_batch_eager(sample_indices_, embedding_indices_, aggregation_weights_, mode_override_; name=nothing, N=nothing, device_ordinal=nothing, combiners=nothing) desc = tf.EagerOp("EnqueueTPUEmbeddingSparseBatch") tf.add_input(desc, sample_indices_) tf.add_input(desc, embedding_indices_) @@ -36320,6 +42578,13 @@ begin end (tf.execute(desc))[1] end + function enqueue_tpu_embedding_sparse_batch(sample_indices_, embedding_indices_, aggregation_weights_, mode_override_; name=nothing, N=nothing, device_ordinal=nothing, combiners=nothing) + if tf.eager_mode + enqueue_tpu_embedding_sparse_batch_eager(sample_indices_, embedding_indices_, aggregation_weights_, mode_override_; name=name, N=N, device_ordinal=device_ordinal, combiners=combiners) + else + enqueue_tpu_embedding_sparse_batch_graph(sample_indices_, embedding_indices_, aggregation_weights_, mode_override_; name=name, N=N, device_ordinal=device_ordinal, combiners=combiners) + end + end end @@ -36329,7 +42594,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function queue_close_v2(handle_; name=nothing, cancel_pending_enqueues=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function queue_close_v2_graph(handle_; name=nothing, cancel_pending_enqueues=nothing) local desc tf.with_op_name(name, "QueueCloseV2") do desc = tf.NodeDescription("QueueCloseV2") @@ -36341,7 +42606,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function queue_close_v2(handle_::tf.TensorHandle; name=nothing, cancel_pending_enqueues=nothing) + function queue_close_v2_eager(handle_; name=nothing, cancel_pending_enqueues=nothing) desc = tf.EagerOp("QueueCloseV2") tf.add_input(desc, handle_) if cancel_pending_enqueues !== nothing @@ -36349,6 +42614,13 @@ begin end (tf.execute(desc))[1] end + function queue_close_v2(handle_; name=nothing, cancel_pending_enqueues=nothing) + if tf.eager_mode + queue_close_v2_eager(handle_; name=name, cancel_pending_enqueues=cancel_pending_enqueues) + else + queue_close_v2_graph(handle_; name=name, cancel_pending_enqueues=cancel_pending_enqueues) + end + end end @@ -36358,7 +42630,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tensor_array_pack(handle_, flow_in_; name=nothing, dtype=nothing, element_shape=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tensor_array_pack_graph(handle_, flow_in_; name=nothing, dtype=nothing, element_shape=nothing) local desc tf.with_op_name(name, "TensorArrayPack") do desc = tf.NodeDescription("TensorArrayPack") @@ -36375,7 +42647,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function tensor_array_pack(handle_::tf.TensorHandle, flow_in_::tf.TensorHandle; name=nothing, dtype=nothing, element_shape=nothing) + function tensor_array_pack_eager(handle_, flow_in_; name=nothing, dtype=nothing, element_shape=nothing) desc = tf.EagerOp("TensorArrayPack") tf.add_input(desc, handle_) tf.add_input(desc, flow_in_) @@ -36387,6 +42659,13 @@ begin end (tf.execute(desc))[1] end + function tensor_array_pack(handle_, flow_in_; name=nothing, dtype=nothing, element_shape=nothing) + if tf.eager_mode + tensor_array_pack_eager(handle_, flow_in_; name=name, dtype=dtype, element_shape=element_shape) + else + tensor_array_pack_graph(handle_, flow_in_; name=name, dtype=dtype, element_shape=element_shape) + end + end end @@ -36396,7 +42675,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function reader_restore_state(reader_handle_, state_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function reader_restore_state_graph(reader_handle_, state_; name=nothing) local desc tf.with_op_name(name, "ReaderRestoreState") do desc = tf.NodeDescription("ReaderRestoreState") @@ -36407,22 +42686,29 @@ begin end tf.Tensor(tf.Operation(desc)) end - function reader_restore_state(reader_handle_::tf.TensorHandle, state_::tf.TensorHandle; name=nothing) + function reader_restore_state_eager(reader_handle_, state_; name=nothing) desc = tf.EagerOp("ReaderRestoreState") tf.add_input(desc, reader_handle_) tf.add_input(desc, state_) (tf.execute(desc))[1] end + function reader_restore_state(reader_handle_, state_; name=nothing) + if tf.eager_mode + reader_restore_state_eager(reader_handle_, state_; name=name) + else + reader_restore_state_graph(reader_handle_, state_; name=name) + end + end end """ - _fused_conv2d(input, filter, args; data_format=NHWC, dilations=[1, 1, 1, 1], use_cudnn_on_gpu=true, fused_ops=Int64[], epsilon=?) + _fused_conv2d(input, filter, args; data_format=, dilations=[1, 1, 1, 1], use_cudnn_on_gpu=true, fused_ops=Int64[], epsilon=?) *NOTE*: Do not invoke this operator directly in Python. Grappler is """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function _fused_conv2d(input_, filter_, args_; name=nothing, num_args=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing, use_cudnn_on_gpu=nothing, fused_ops=nothing, epsilon=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function _fused_conv2d_graph(input_, filter_, args_; name=nothing, num_args=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing, use_cudnn_on_gpu=nothing, fused_ops=nothing, epsilon=nothing) local desc tf.with_op_name(name, "_FusedConv2D") do desc = tf.NodeDescription("_FusedConv2D") @@ -36460,7 +42746,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function _fused_conv2d(input_::tf.TensorHandle, filter_::tf.TensorHandle, args_::tf.TensorHandle; name=nothing, num_args=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing, use_cudnn_on_gpu=nothing, fused_ops=nothing, epsilon=nothing) + function _fused_conv2d_eager(input_, filter_, args_; name=nothing, num_args=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing, use_cudnn_on_gpu=nothing, fused_ops=nothing, epsilon=nothing) desc = tf.EagerOp("_FusedConv2D") tf.add_input(desc, input_) tf.add_input(desc, filter_) @@ -36494,6 +42780,13 @@ begin desc["T"] = tf.data_type(args_) (tf.execute(desc))[1] end + function _fused_conv2d(input_, filter_, args_; name=nothing, num_args=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing, use_cudnn_on_gpu=nothing, fused_ops=nothing, epsilon=nothing) + if tf.eager_mode + _fused_conv2d_eager(input_, filter_, args_; name=name, num_args=num_args, strides=strides, padding=padding, data_format=data_format, dilations=dilations, use_cudnn_on_gpu=use_cudnn_on_gpu, fused_ops=fused_ops, epsilon=epsilon) + else + _fused_conv2d_graph(input_, filter_, args_; name=name, num_args=num_args, strides=strides, padding=padding, data_format=data_format, dilations=dilations, use_cudnn_on_gpu=use_cudnn_on_gpu, fused_ops=fused_ops, epsilon=epsilon) + end + end end @@ -36503,7 +42796,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function _read_variables_op(resources_; name=nothing, N=nothing, dtypes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function _read_variables_op_graph(resources_; name=nothing, N=nothing, dtypes=nothing) local desc tf.with_op_name(name, "_ReadVariablesOp") do desc = tf.NodeDescription("_ReadVariablesOp") @@ -36518,7 +42811,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function _read_variables_op(resources_::tf.TensorHandle; name=nothing, N=nothing, dtypes=nothing) + function _read_variables_op_eager(resources_; name=nothing, N=nothing, dtypes=nothing) desc = tf.EagerOp("_ReadVariablesOp") tf.add_input(desc, resources_) if N !== nothing @@ -36529,6 +42822,13 @@ begin end (tf.execute(desc))[1] end + function _read_variables_op(resources_; name=nothing, N=nothing, dtypes=nothing) + if tf.eager_mode + _read_variables_op_eager(resources_; name=name, N=N, dtypes=dtypes) + else + _read_variables_op_graph(resources_; name=name, N=N, dtypes=dtypes) + end + end end @@ -36538,7 +42838,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function mutable_hash_table_of_tensors(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function mutable_hash_table_of_tensors_graph(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing) local desc tf.with_op_name(name, "MutableHashTableOfTensors") do desc = tf.NodeDescription("MutableHashTableOfTensors") @@ -36563,7 +42863,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function mutable_hash_table_of_tensors(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing) + function mutable_hash_table_of_tensors_eager(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing) desc = tf.EagerOp("MutableHashTableOfTensors") if container !== nothing desc["container"] = Base.String(container) @@ -36585,6 +42885,13 @@ begin end (tf.execute(desc))[1] end + function mutable_hash_table_of_tensors(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing) + if tf.eager_mode + mutable_hash_table_of_tensors_eager(; name=name, container=container, shared_name=shared_name, use_node_name_sharing=use_node_name_sharing, key_dtype=key_dtype, value_dtype=value_dtype, value_shape=value_shape) + else + mutable_hash_table_of_tensors_graph(; name=name, container=container, shared_name=shared_name, use_node_name_sharing=use_node_name_sharing, key_dtype=key_dtype, value_dtype=value_dtype, value_shape=value_shape) + end + end end @@ -36594,7 +42901,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function read_file(filename_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function read_file_graph(filename_; name=nothing) local desc tf.with_op_name(name, "ReadFile") do desc = tf.NodeDescription("ReadFile") @@ -36603,11 +42910,18 @@ begin end tf.Tensor(tf.Operation(desc)) end - function read_file(filename_::tf.TensorHandle; name=nothing) + function read_file_eager(filename_; name=nothing) desc = tf.EagerOp("ReadFile") tf.add_input(desc, filename_) (tf.execute(desc))[1] end + function read_file(filename_; name=nothing) + if tf.eager_mode + read_file_eager(filename_; name=name) + else + read_file_graph(filename_; name=name) + end + end end @@ -36617,7 +42931,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function load_tpu_embedding_mdl_adagrad_light_parameters(parameters_, accumulators_, weights_, benefits_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function load_tpu_embedding_mdl_adagrad_light_parameters_graph(parameters_, accumulators_, weights_, benefits_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "LoadTPUEmbeddingMDLAdagradLightParameters") do desc = tf.NodeDescription("LoadTPUEmbeddingMDLAdagradLightParameters") @@ -36644,7 +42958,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function load_tpu_embedding_mdl_adagrad_light_parameters(parameters_::tf.TensorHandle, accumulators_::tf.TensorHandle, weights_::tf.TensorHandle, benefits_::tf.TensorHandle; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + function load_tpu_embedding_mdl_adagrad_light_parameters_eager(parameters_, accumulators_, weights_, benefits_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) desc = tf.EagerOp("LoadTPUEmbeddingMDLAdagradLightParameters") tf.add_input(desc, parameters_) tf.add_input(desc, accumulators_) @@ -36664,6 +42978,13 @@ begin end (tf.execute(desc))[1] end + function load_tpu_embedding_mdl_adagrad_light_parameters(parameters_, accumulators_, weights_, benefits_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + if tf.eager_mode + load_tpu_embedding_mdl_adagrad_light_parameters_eager(parameters_, accumulators_, weights_, benefits_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + else + load_tpu_embedding_mdl_adagrad_light_parameters_graph(parameters_, accumulators_, weights_, benefits_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + end + end end @@ -36673,7 +42994,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function fractional_avg_pool_grad(orig_input_tensor_shape_, out_backprop_, row_pooling_sequence_, col_pooling_sequence_; name=nothing, overlapping=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function fractional_avg_pool_grad_graph(orig_input_tensor_shape_, out_backprop_, row_pooling_sequence_, col_pooling_sequence_; name=nothing, overlapping=nothing) local desc tf.with_op_name(name, "FractionalAvgPoolGrad") do desc = tf.NodeDescription("FractionalAvgPoolGrad") @@ -36692,7 +43013,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function fractional_avg_pool_grad(orig_input_tensor_shape_::tf.TensorHandle, out_backprop_::tf.TensorHandle, row_pooling_sequence_::tf.TensorHandle, col_pooling_sequence_::tf.TensorHandle; name=nothing, overlapping=nothing) + function fractional_avg_pool_grad_eager(orig_input_tensor_shape_, out_backprop_, row_pooling_sequence_, col_pooling_sequence_; name=nothing, overlapping=nothing) desc = tf.EagerOp("FractionalAvgPoolGrad") tf.add_input(desc, orig_input_tensor_shape_) tf.add_input(desc, out_backprop_) @@ -36704,6 +43025,13 @@ begin desc["T"] = tf.data_type(out_backprop_) (tf.execute(desc))[1] end + function fractional_avg_pool_grad(orig_input_tensor_shape_, out_backprop_, row_pooling_sequence_, col_pooling_sequence_; name=nothing, overlapping=nothing) + if tf.eager_mode + fractional_avg_pool_grad_eager(orig_input_tensor_shape_, out_backprop_, row_pooling_sequence_, col_pooling_sequence_; name=name, overlapping=overlapping) + else + fractional_avg_pool_grad_graph(orig_input_tensor_shape_, out_backprop_, row_pooling_sequence_, col_pooling_sequence_; name=name, overlapping=overlapping) + end + end end @@ -36713,7 +43041,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function load_tpu_embedding_adagrad_parameters_grad_accum_debug(parameters_, accumulators_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function load_tpu_embedding_adagrad_parameters_grad_accum_debug_graph(parameters_, accumulators_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "LoadTPUEmbeddingAdagradParametersGradAccumDebug") do desc = tf.NodeDescription("LoadTPUEmbeddingAdagradParametersGradAccumDebug") @@ -36738,7 +43066,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function load_tpu_embedding_adagrad_parameters_grad_accum_debug(parameters_::tf.TensorHandle, accumulators_::tf.TensorHandle, gradient_accumulators_::tf.TensorHandle; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + function load_tpu_embedding_adagrad_parameters_grad_accum_debug_eager(parameters_, accumulators_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) desc = tf.EagerOp("LoadTPUEmbeddingAdagradParametersGradAccumDebug") tf.add_input(desc, parameters_) tf.add_input(desc, accumulators_) @@ -36757,6 +43085,13 @@ begin end (tf.execute(desc))[1] end + function load_tpu_embedding_adagrad_parameters_grad_accum_debug(parameters_, accumulators_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + if tf.eager_mode + load_tpu_embedding_adagrad_parameters_grad_accum_debug_eager(parameters_, accumulators_, gradient_accumulators_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + else + load_tpu_embedding_adagrad_parameters_grad_accum_debug_graph(parameters_, accumulators_, gradient_accumulators_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + end + end end @@ -36766,7 +43101,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function stateful_standard_normal_v2(resource_, algorithm_, shape_; name=nothing, dtype=nothing, shape_dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function stateful_standard_normal_v2_graph(resource_, algorithm_, shape_; name=nothing, dtype=nothing, shape_dtype=nothing) local desc tf.with_op_name(name, "StatefulStandardNormalV2") do desc = tf.NodeDescription("StatefulStandardNormalV2") @@ -36786,7 +43121,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function stateful_standard_normal_v2(resource_::tf.TensorHandle, algorithm_::tf.TensorHandle, shape_::tf.TensorHandle; name=nothing, dtype=nothing, shape_dtype=nothing) + function stateful_standard_normal_v2_eager(resource_, algorithm_, shape_; name=nothing, dtype=nothing, shape_dtype=nothing) desc = tf.EagerOp("StatefulStandardNormalV2") tf.add_input(desc, resource_) tf.add_input(desc, algorithm_) @@ -36800,6 +43135,13 @@ begin desc["shape_dtype"] = tf.data_type(shape_) (tf.execute(desc))[1] end + function stateful_standard_normal_v2(resource_, algorithm_, shape_; name=nothing, dtype=nothing, shape_dtype=nothing) + if tf.eager_mode + stateful_standard_normal_v2_eager(resource_, algorithm_, shape_; name=name, dtype=dtype, shape_dtype=shape_dtype) + else + stateful_standard_normal_v2_graph(resource_, algorithm_, shape_; name=name, dtype=dtype, shape_dtype=shape_dtype) + end + end end @@ -36809,7 +43151,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function bincount(arr_, size_, weights_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function bincount_graph(arr_, size_, weights_; name=nothing) local desc tf.with_op_name(name, "Bincount") do desc = tf.NodeDescription("Bincount") @@ -36823,7 +43165,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function bincount(arr_::tf.TensorHandle, size_::tf.TensorHandle, weights_::tf.TensorHandle; name=nothing) + function bincount_eager(arr_, size_, weights_; name=nothing) desc = tf.EagerOp("Bincount") tf.add_input(desc, arr_) tf.add_input(desc, size_) @@ -36831,6 +43173,13 @@ begin desc["T"] = tf.data_type(weights_) (tf.execute(desc))[1] end + function bincount(arr_, size_, weights_; name=nothing) + if tf.eager_mode + bincount_eager(arr_, size_, weights_; name=name) + else + bincount_graph(arr_, size_, weights_; name=name) + end + end end @@ -36840,7 +43189,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function inv(x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function inv_graph(x_; name=nothing) local desc tf.with_op_name(name, "Inv") do desc = tf.NodeDescription("Inv") @@ -36850,12 +43199,19 @@ begin end tf.Tensor(tf.Operation(desc)) end - function inv(x_::tf.TensorHandle; name=nothing) + function inv_eager(x_; name=nothing) desc = tf.EagerOp("Inv") tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) (tf.execute(desc))[1] end + function inv(x_; name=nothing) + if tf.eager_mode + inv_eager(x_; name=name) + else + inv_graph(x_; name=name) + end + end end @@ -36865,7 +43221,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function apply_proximal_adagrad(var_, accum_, lr_, l1_, l2_, grad_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function apply_proximal_adagrad_graph(var_, accum_, lr_, l1_, l2_, grad_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ApplyProximalAdagrad") do desc = tf.NodeDescription("ApplyProximalAdagrad") @@ -36888,7 +43244,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function apply_proximal_adagrad(var_::tf.TensorHandle, accum_::tf.TensorHandle, lr_::tf.TensorHandle, l1_::tf.TensorHandle, l2_::tf.TensorHandle, grad_::tf.TensorHandle; name=nothing, use_locking=nothing) + function apply_proximal_adagrad_eager(var_, accum_, lr_, l1_, l2_, grad_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ApplyProximalAdagrad") tf.add_input(desc, var_) tf.add_input(desc, accum_) @@ -36907,6 +43263,13 @@ begin desc["T"] = tf.data_type(grad_) (tf.execute(desc))[1] end + function apply_proximal_adagrad(var_, accum_, lr_, l1_, l2_, grad_; name=nothing, use_locking=nothing) + if tf.eager_mode + apply_proximal_adagrad_eager(var_, accum_, lr_, l1_, l2_, grad_; name=name, use_locking=use_locking) + else + apply_proximal_adagrad_graph(var_, accum_, lr_, l1_, l2_, grad_; name=name, use_locking=use_locking) + end + end end @@ -36916,7 +43279,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function gather_v2(params_, indices_, axis_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function gather_v2_graph(params_, indices_, axis_; name=nothing) local desc tf.with_op_name(name, "GatherV2") do desc = tf.NodeDescription("GatherV2") @@ -36933,7 +43296,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function gather_v2(params_::tf.TensorHandle, indices_::tf.TensorHandle, axis_::tf.TensorHandle; name=nothing) + function gather_v2_eager(params_, indices_, axis_; name=nothing) desc = tf.EagerOp("GatherV2") tf.add_input(desc, params_) tf.add_input(desc, indices_) @@ -36943,6 +43306,13 @@ begin desc["Taxis"] = tf.data_type(axis_) (tf.execute(desc))[1] end + function gather_v2(params_, indices_, axis_; name=nothing) + if tf.eager_mode + gather_v2_eager(params_, indices_, axis_; name=name) + else + gather_v2_graph(params_, indices_, axis_; name=name) + end + end end @@ -36952,7 +43322,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function write_file(filename_, contents_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function write_file_graph(filename_, contents_; name=nothing) local desc tf.with_op_name(name, "WriteFile") do desc = tf.NodeDescription("WriteFile") @@ -36963,12 +43333,19 @@ begin end tf.Tensor(tf.Operation(desc)) end - function write_file(filename_::tf.TensorHandle, contents_::tf.TensorHandle; name=nothing) + function write_file_eager(filename_, contents_; name=nothing) desc = tf.EagerOp("WriteFile") tf.add_input(desc, filename_) tf.add_input(desc, contents_) (tf.execute(desc))[1] end + function write_file(filename_, contents_; name=nothing) + if tf.eager_mode + write_file_eager(filename_, contents_; name=name) + else + write_file_graph(filename_, contents_; name=name) + end + end end @@ -36978,7 +43355,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function boosted_trees_get_ensemble_states(tree_ensemble_handle_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function boosted_trees_get_ensemble_states_graph(tree_ensemble_handle_; name=nothing) local desc tf.with_op_name(name, "BoostedTreesGetEnsembleStates") do desc = tf.NodeDescription("BoostedTreesGetEnsembleStates") @@ -36992,11 +43369,18 @@ begin end out end - function boosted_trees_get_ensemble_states(tree_ensemble_handle_::tf.TensorHandle; name=nothing) + function boosted_trees_get_ensemble_states_eager(tree_ensemble_handle_; name=nothing) desc = tf.EagerOp("BoostedTreesGetEnsembleStates") tf.add_input(desc, tree_ensemble_handle_) tf.execute(desc) end + function boosted_trees_get_ensemble_states(tree_ensemble_handle_; name=nothing) + if tf.eager_mode + boosted_trees_get_ensemble_states_eager(tree_ensemble_handle_; name=name) + else + boosted_trees_get_ensemble_states_graph(tree_ensemble_handle_; name=name) + end + end end @@ -37006,7 +43390,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function resource_gather(resource_, indices_; name=nothing, validate_indices=nothing, dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function resource_gather_graph(resource_, indices_; name=nothing, validate_indices=nothing, dtype=nothing) local desc tf.with_op_name(name, "ResourceGather") do desc = tf.NodeDescription("ResourceGather") @@ -37025,7 +43409,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function resource_gather(resource_::tf.TensorHandle, indices_::tf.TensorHandle; name=nothing, validate_indices=nothing, dtype=nothing) + function resource_gather_eager(resource_, indices_; name=nothing, validate_indices=nothing, dtype=nothing) desc = tf.EagerOp("ResourceGather") tf.add_input(desc, resource_) tf.add_input(desc, indices_) @@ -37038,6 +43422,13 @@ begin desc["Tindices"] = tf.data_type(indices_) (tf.execute(desc))[1] end + function resource_gather(resource_, indices_; name=nothing, validate_indices=nothing, dtype=nothing) + if tf.eager_mode + resource_gather_eager(resource_, indices_; name=name, validate_indices=validate_indices, dtype=dtype) + else + resource_gather_graph(resource_, indices_; name=name, validate_indices=validate_indices, dtype=dtype) + end + end end @@ -37047,7 +43438,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function resource_apply_proximal_gradient_descent(var_, alpha_, l1_, l2_, delta_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function resource_apply_proximal_gradient_descent_graph(var_, alpha_, l1_, l2_, delta_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ResourceApplyProximalGradientDescent") do desc = tf.NodeDescription("ResourceApplyProximalGradientDescent") @@ -37068,7 +43459,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function resource_apply_proximal_gradient_descent(var_::tf.TensorHandle, alpha_::tf.TensorHandle, l1_::tf.TensorHandle, l2_::tf.TensorHandle, delta_::tf.TensorHandle; name=nothing, use_locking=nothing) + function resource_apply_proximal_gradient_descent_eager(var_, alpha_, l1_, l2_, delta_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ResourceApplyProximalGradientDescent") tf.add_input(desc, var_) tf.add_input(desc, alpha_) @@ -37084,6 +43475,13 @@ begin desc["T"] = tf.data_type(delta_) (tf.execute(desc))[1] end + function resource_apply_proximal_gradient_descent(var_, alpha_, l1_, l2_, delta_; name=nothing, use_locking=nothing) + if tf.eager_mode + resource_apply_proximal_gradient_descent_eager(var_, alpha_, l1_, l2_, delta_; name=name, use_locking=use_locking) + else + resource_apply_proximal_gradient_descent_graph(var_, alpha_, l1_, l2_, delta_; name=name, use_locking=use_locking) + end + end end @@ -37093,7 +43491,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function truncate_mod(x_, y_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function truncate_mod_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "TruncateMod") do desc = tf.NodeDescription("TruncateMod") @@ -37105,7 +43503,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function truncate_mod(x_::tf.TensorHandle, y_::tf.TensorHandle; name=nothing) + function truncate_mod_eager(x_, y_; name=nothing) desc = tf.EagerOp("TruncateMod") tf.add_input(desc, x_) tf.add_input(desc, y_) @@ -37113,6 +43511,13 @@ begin desc["T"] = tf.data_type(y_) (tf.execute(desc))[1] end + function truncate_mod(x_, y_; name=nothing) + if tf.eager_mode + truncate_mod_eager(x_, y_; name=name) + else + truncate_mod_graph(x_, y_; name=name) + end + end end @@ -37122,7 +43527,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function log_matrix_determinant(input_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function log_matrix_determinant_graph(input_; name=nothing) local desc tf.with_op_name(name, "LogMatrixDeterminant") do desc = tf.NodeDescription("LogMatrixDeterminant") @@ -37137,12 +43542,19 @@ begin end out end - function log_matrix_determinant(input_::tf.TensorHandle; name=nothing) + function log_matrix_determinant_eager(input_; name=nothing) desc = tf.EagerOp("LogMatrixDeterminant") tf.add_input(desc, input_) desc["T"] = tf.data_type(input_) tf.execute(desc) end + function log_matrix_determinant(input_; name=nothing) + if tf.eager_mode + log_matrix_determinant_eager(input_; name=name) + else + log_matrix_determinant_graph(input_; name=name) + end + end end @@ -37152,7 +43564,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function irfft2d(input_, fft_length_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function irfft2d_graph(input_, fft_length_; name=nothing) local desc tf.with_op_name(name, "IRFFT2D") do desc = tf.NodeDescription("IRFFT2D") @@ -37163,12 +43575,19 @@ begin end tf.Tensor(tf.Operation(desc)) end - function irfft2d(input_::tf.TensorHandle, fft_length_::tf.TensorHandle; name=nothing) + function irfft2d_eager(input_, fft_length_; name=nothing) desc = tf.EagerOp("IRFFT2D") tf.add_input(desc, input_) tf.add_input(desc, fft_length_) (tf.execute(desc))[1] end + function irfft2d(input_, fft_length_; name=nothing) + if tf.eager_mode + irfft2d_eager(input_, fft_length_; name=name) + else + irfft2d_graph(input_, fft_length_; name=name) + end + end end @@ -37178,7 +43597,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function boosted_trees_training_predict(tree_ensemble_handle_, cached_tree_ids_, cached_node_ids_, bucketized_features_; name=nothing, num_bucketized_features=nothing, logits_dimension=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function boosted_trees_training_predict_graph(tree_ensemble_handle_, cached_tree_ids_, cached_node_ids_, bucketized_features_; name=nothing, num_bucketized_features=nothing, logits_dimension=nothing) local desc tf.with_op_name(name, "BoostedTreesTrainingPredict") do desc = tf.NodeDescription("BoostedTreesTrainingPredict") @@ -37204,7 +43623,7 @@ begin end out end - function boosted_trees_training_predict(tree_ensemble_handle_::tf.TensorHandle, cached_tree_ids_::tf.TensorHandle, cached_node_ids_::tf.TensorHandle, bucketized_features_::tf.TensorHandle; name=nothing, num_bucketized_features=nothing, logits_dimension=nothing) + function boosted_trees_training_predict_eager(tree_ensemble_handle_, cached_tree_ids_, cached_node_ids_, bucketized_features_; name=nothing, num_bucketized_features=nothing, logits_dimension=nothing) desc = tf.EagerOp("BoostedTreesTrainingPredict") tf.add_input(desc, tree_ensemble_handle_) tf.add_input(desc, cached_tree_ids_) @@ -37218,6 +43637,13 @@ begin end tf.execute(desc) end + function boosted_trees_training_predict(tree_ensemble_handle_, cached_tree_ids_, cached_node_ids_, bucketized_features_; name=nothing, num_bucketized_features=nothing, logits_dimension=nothing) + if tf.eager_mode + boosted_trees_training_predict_eager(tree_ensemble_handle_, cached_tree_ids_, cached_node_ids_, bucketized_features_; name=name, num_bucketized_features=num_bucketized_features, logits_dimension=logits_dimension) + else + boosted_trees_training_predict_graph(tree_ensemble_handle_, cached_tree_ids_, cached_node_ids_, bucketized_features_; name=name, num_bucketized_features=num_bucketized_features, logits_dimension=logits_dimension) + end + end end @@ -37227,7 +43653,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function nearest_neighbors(points_, centers_, k_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function nearest_neighbors_graph(points_, centers_, k_; name=nothing) local desc tf.with_op_name(name, "NearestNeighbors") do desc = tf.NodeDescription("NearestNeighbors") @@ -37245,13 +43671,20 @@ begin end out end - function nearest_neighbors(points_::tf.TensorHandle, centers_::tf.TensorHandle, k_::tf.TensorHandle; name=nothing) + function nearest_neighbors_eager(points_, centers_, k_; name=nothing) desc = tf.EagerOp("NearestNeighbors") tf.add_input(desc, points_) tf.add_input(desc, centers_) tf.add_input(desc, k_) tf.execute(desc) end + function nearest_neighbors(points_, centers_, k_; name=nothing) + if tf.eager_mode + nearest_neighbors_eager(points_, centers_, k_; name=name) + else + nearest_neighbors_graph(points_, centers_, k_; name=name) + end + end end @@ -37261,7 +43694,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function floor(x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function floor_graph(x_; name=nothing) local desc tf.with_op_name(name, "Floor") do desc = tf.NodeDescription("Floor") @@ -37271,12 +43704,19 @@ begin end tf.Tensor(tf.Operation(desc)) end - function floor(x_::tf.TensorHandle; name=nothing) + function floor_eager(x_; name=nothing) desc = tf.EagerOp("Floor") tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) (tf.execute(desc))[1] end + function floor(x_; name=nothing) + if tf.eager_mode + floor_eager(x_; name=name) + else + floor_graph(x_; name=name) + end + end end @@ -37286,7 +43726,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function load_tpu_embedding_proximal_adagrad_parameters_grad_accum_debug(parameters_, accumulators_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function load_tpu_embedding_proximal_adagrad_parameters_grad_accum_debug_graph(parameters_, accumulators_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "LoadTPUEmbeddingProximalAdagradParametersGradAccumDebug") do desc = tf.NodeDescription("LoadTPUEmbeddingProximalAdagradParametersGradAccumDebug") @@ -37311,7 +43751,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function load_tpu_embedding_proximal_adagrad_parameters_grad_accum_debug(parameters_::tf.TensorHandle, accumulators_::tf.TensorHandle, gradient_accumulators_::tf.TensorHandle; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + function load_tpu_embedding_proximal_adagrad_parameters_grad_accum_debug_eager(parameters_, accumulators_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) desc = tf.EagerOp("LoadTPUEmbeddingProximalAdagradParametersGradAccumDebug") tf.add_input(desc, parameters_) tf.add_input(desc, accumulators_) @@ -37330,6 +43770,13 @@ begin end (tf.execute(desc))[1] end + function load_tpu_embedding_proximal_adagrad_parameters_grad_accum_debug(parameters_, accumulators_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + if tf.eager_mode + load_tpu_embedding_proximal_adagrad_parameters_grad_accum_debug_eager(parameters_, accumulators_, gradient_accumulators_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + else + load_tpu_embedding_proximal_adagrad_parameters_grad_accum_debug_graph(parameters_, accumulators_, gradient_accumulators_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + end + end end @@ -37339,7 +43786,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function write_image_summary(writer_, step_, tag_, tensor_, bad_color_; name=nothing, max_images=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function write_image_summary_graph(writer_, step_, tag_, tensor_, bad_color_; name=nothing, max_images=nothing) local desc tf.with_op_name(name, "WriteImageSummary") do desc = tf.NodeDescription("WriteImageSummary") @@ -37360,7 +43807,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function write_image_summary(writer_::tf.TensorHandle, step_::tf.TensorHandle, tag_::tf.TensorHandle, tensor_::tf.TensorHandle, bad_color_::tf.TensorHandle; name=nothing, max_images=nothing) + function write_image_summary_eager(writer_, step_, tag_, tensor_, bad_color_; name=nothing, max_images=nothing) desc = tf.EagerOp("WriteImageSummary") tf.add_input(desc, writer_) tf.add_input(desc, step_) @@ -37373,6 +43820,13 @@ begin desc["T"] = tf.data_type(tensor_) (tf.execute(desc))[1] end + function write_image_summary(writer_, step_, tag_, tensor_, bad_color_; name=nothing, max_images=nothing) + if tf.eager_mode + write_image_summary_eager(writer_, step_, tag_, tensor_, bad_color_; name=name, max_images=max_images) + else + write_image_summary_graph(writer_, step_, tag_, tensor_, bad_color_; name=name, max_images=max_images) + end + end end @@ -37382,7 +43836,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tile_grad(input_, multiples_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tile_grad_graph(input_, multiples_; name=nothing) local desc tf.with_op_name(name, "TileGrad") do desc = tf.NodeDescription("TileGrad") @@ -37394,13 +43848,20 @@ begin end tf.Tensor(tf.Operation(desc)) end - function tile_grad(input_::tf.TensorHandle, multiples_::tf.TensorHandle; name=nothing) + function tile_grad_eager(input_, multiples_; name=nothing) desc = tf.EagerOp("TileGrad") tf.add_input(desc, input_) tf.add_input(desc, multiples_) desc["T"] = tf.data_type(input_) (tf.execute(desc))[1] end + function tile_grad(input_, multiples_; name=nothing) + if tf.eager_mode + tile_grad_eager(input_, multiples_; name=name) + else + tile_grad_graph(input_, multiples_; name=name) + end + end end @@ -37410,7 +43871,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tensor_array_grad_v3(handle_, flow_in_; name=nothing, source=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tensor_array_grad_v3_graph(handle_, flow_in_; name=nothing, source=nothing) local desc tf.with_op_name(name, "TensorArrayGradV3") do desc = tf.NodeDescription("TensorArrayGradV3") @@ -37429,7 +43890,7 @@ begin end out end - function tensor_array_grad_v3(handle_::tf.TensorHandle, flow_in_::tf.TensorHandle; name=nothing, source=nothing) + function tensor_array_grad_v3_eager(handle_, flow_in_; name=nothing, source=nothing) desc = tf.EagerOp("TensorArrayGradV3") tf.add_input(desc, handle_) tf.add_input(desc, flow_in_) @@ -37438,6 +43899,13 @@ begin end tf.execute(desc) end + function tensor_array_grad_v3(handle_, flow_in_; name=nothing, source=nothing) + if tf.eager_mode + tensor_array_grad_v3_eager(handle_, flow_in_; name=name, source=source) + else + tensor_array_grad_v3_graph(handle_, flow_in_; name=name, source=source) + end + end end @@ -37447,7 +43915,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function enqueue_tpu_embedding_integer_batch(batch_, mode_override_; name=nothing, N=nothing, device_ordinal=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function enqueue_tpu_embedding_integer_batch_graph(batch_, mode_override_; name=nothing, N=nothing, device_ordinal=nothing) local desc tf.with_op_name(name, "EnqueueTPUEmbeddingIntegerBatch") do desc = tf.NodeDescription("EnqueueTPUEmbeddingIntegerBatch") @@ -37464,7 +43932,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function enqueue_tpu_embedding_integer_batch(batch_::tf.TensorHandle, mode_override_::tf.TensorHandle; name=nothing, N=nothing, device_ordinal=nothing) + function enqueue_tpu_embedding_integer_batch_eager(batch_, mode_override_; name=nothing, N=nothing, device_ordinal=nothing) desc = tf.EagerOp("EnqueueTPUEmbeddingIntegerBatch") tf.add_input(desc, batch_) tf.add_input(desc, mode_override_) @@ -37476,16 +43944,23 @@ begin end (tf.execute(desc))[1] end + function enqueue_tpu_embedding_integer_batch(batch_, mode_override_; name=nothing, N=nothing, device_ordinal=nothing) + if tf.eager_mode + enqueue_tpu_embedding_integer_batch_eager(batch_, mode_override_; name=name, N=N, device_ordinal=device_ordinal) + else + enqueue_tpu_embedding_integer_batch_graph(batch_, mode_override_; name=name, N=N, device_ordinal=device_ordinal) + end + end end """ - fused_batch_norm(x, scale, offset, mean, variance; epsilon=?, data_format=NHWC, is_training=true) + fused_batch_norm(x, scale, offset, mean, variance; epsilon=?, data_format=, is_training=true) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function fused_batch_norm(x_, scale_, offset_, mean_, variance_; name=nothing, epsilon=nothing, data_format=nothing, is_training=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function fused_batch_norm_graph(x_, scale_, offset_, mean_, variance_; name=nothing, epsilon=nothing, data_format=nothing, is_training=nothing) local desc tf.with_op_name(name, "FusedBatchNorm") do desc = tf.NodeDescription("FusedBatchNorm") @@ -37517,7 +43992,7 @@ begin end out end - function fused_batch_norm(x_::tf.TensorHandle, scale_::tf.TensorHandle, offset_::tf.TensorHandle, mean_::tf.TensorHandle, variance_::tf.TensorHandle; name=nothing, epsilon=nothing, data_format=nothing, is_training=nothing) + function fused_batch_norm_eager(x_, scale_, offset_, mean_, variance_; name=nothing, epsilon=nothing, data_format=nothing, is_training=nothing) desc = tf.EagerOp("FusedBatchNorm") tf.add_input(desc, x_) tf.add_input(desc, scale_) @@ -37540,6 +44015,13 @@ begin desc["T"] = tf.data_type(variance_) tf.execute(desc) end + function fused_batch_norm(x_, scale_, offset_, mean_, variance_; name=nothing, epsilon=nothing, data_format=nothing, is_training=nothing) + if tf.eager_mode + fused_batch_norm_eager(x_, scale_, offset_, mean_, variance_; name=name, epsilon=epsilon, data_format=data_format, is_training=is_training) + else + fused_batch_norm_graph(x_, scale_, offset_, mean_, variance_; name=name, epsilon=epsilon, data_format=data_format, is_training=is_training) + end + end end @@ -37549,7 +44031,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function logical_and(x_, y_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function logical_and_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "LogicalAnd") do desc = tf.NodeDescription("LogicalAnd") @@ -37560,12 +44042,19 @@ begin end tf.Tensor(tf.Operation(desc)) end - function logical_and(x_::tf.TensorHandle, y_::tf.TensorHandle; name=nothing) + function logical_and_eager(x_, y_; name=nothing) desc = tf.EagerOp("LogicalAnd") tf.add_input(desc, x_) tf.add_input(desc, y_) (tf.execute(desc))[1] end + function logical_and(x_, y_; name=nothing) + if tf.eager_mode + logical_and_eager(x_, y_; name=name) + else + logical_and_graph(x_, y_; name=name) + end + end end @@ -37575,7 +44064,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tensor_scatter_update(tensor_, indices_, updates_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tensor_scatter_update_graph(tensor_, indices_, updates_; name=nothing) local desc tf.with_op_name(name, "TensorScatterUpdate") do desc = tf.NodeDescription("TensorScatterUpdate") @@ -37591,7 +44080,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function tensor_scatter_update(tensor_::tf.TensorHandle, indices_::tf.TensorHandle, updates_::tf.TensorHandle; name=nothing) + function tensor_scatter_update_eager(tensor_, indices_, updates_; name=nothing) desc = tf.EagerOp("TensorScatterUpdate") tf.add_input(desc, tensor_) tf.add_input(desc, indices_) @@ -37601,6 +44090,13 @@ begin desc["T"] = tf.data_type(updates_) (tf.execute(desc))[1] end + function tensor_scatter_update(tensor_, indices_, updates_; name=nothing) + if tf.eager_mode + tensor_scatter_update_eager(tensor_, indices_, updates_; name=name) + else + tensor_scatter_update_graph(tensor_, indices_, updates_; name=name) + end + end end @@ -37610,7 +44106,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function text_line_reader_v2(; name=nothing, skip_header_lines=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function text_line_reader_v2_graph(; name=nothing, skip_header_lines=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "TextLineReaderV2") do desc = tf.NodeDescription("TextLineReaderV2") @@ -37626,7 +44122,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function text_line_reader_v2(; name=nothing, skip_header_lines=nothing, container=nothing, shared_name=nothing) + function text_line_reader_v2_eager(; name=nothing, skip_header_lines=nothing, container=nothing, shared_name=nothing) desc = tf.EagerOp("TextLineReaderV2") if skip_header_lines !== nothing desc["skip_header_lines"] = Base.Int(skip_header_lines) @@ -37639,6 +44135,13 @@ begin end (tf.execute(desc))[1] end + function text_line_reader_v2(; name=nothing, skip_header_lines=nothing, container=nothing, shared_name=nothing) + if tf.eager_mode + text_line_reader_v2_eager(; name=name, skip_header_lines=skip_header_lines, container=container, shared_name=shared_name) + else + text_line_reader_v2_graph(; name=name, skip_header_lines=skip_header_lines, container=container, shared_name=shared_name) + end + end end @@ -37648,7 +44151,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tensor_slice_dataset(components_; name=nothing, Toutput_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tensor_slice_dataset_graph(components_; name=nothing, Toutput_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "TensorSliceDataset") do desc = tf.NodeDescription("TensorSliceDataset") @@ -37663,7 +44166,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function tensor_slice_dataset(components_::tf.TensorHandle; name=nothing, Toutput_types=nothing, output_shapes=nothing) + function tensor_slice_dataset_eager(components_; name=nothing, Toutput_types=nothing, output_shapes=nothing) desc = tf.EagerOp("TensorSliceDataset") tf.add_input(desc, components_) if Toutput_types !== nothing @@ -37674,6 +44177,13 @@ begin end (tf.execute(desc))[1] end + function tensor_slice_dataset(components_; name=nothing, Toutput_types=nothing, output_shapes=nothing) + if tf.eager_mode + tensor_slice_dataset_eager(components_; name=name, Toutput_types=Toutput_types, output_shapes=output_shapes) + else + tensor_slice_dataset_graph(components_; name=name, Toutput_types=Toutput_types, output_shapes=output_shapes) + end + end end @@ -37683,7 +44193,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tensor_array_scatter_v3(handle_, indices_, value_, flow_in_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tensor_array_scatter_v3_graph(handle_, indices_, value_, flow_in_; name=nothing) local desc tf.with_op_name(name, "TensorArrayScatterV3") do desc = tf.NodeDescription("TensorArrayScatterV3") @@ -37699,7 +44209,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function tensor_array_scatter_v3(handle_::tf.TensorHandle, indices_::tf.TensorHandle, value_::tf.TensorHandle, flow_in_::tf.TensorHandle; name=nothing) + function tensor_array_scatter_v3_eager(handle_, indices_, value_, flow_in_; name=nothing) desc = tf.EagerOp("TensorArrayScatterV3") tf.add_input(desc, handle_) tf.add_input(desc, indices_) @@ -37708,6 +44218,13 @@ begin desc["T"] = tf.data_type(value_) (tf.execute(desc))[1] end + function tensor_array_scatter_v3(handle_, indices_, value_, flow_in_; name=nothing) + if tf.eager_mode + tensor_array_scatter_v3_eager(handle_, indices_, value_, flow_in_; name=name) + else + tensor_array_scatter_v3_graph(handle_, indices_, value_, flow_in_; name=name) + end + end end @@ -37717,7 +44234,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function resize_nearest_neighbor_grad(grads_, size_; name=nothing, align_corners=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function resize_nearest_neighbor_grad_graph(grads_, size_; name=nothing, align_corners=nothing) local desc tf.with_op_name(name, "ResizeNearestNeighborGrad") do desc = tf.NodeDescription("ResizeNearestNeighborGrad") @@ -37732,7 +44249,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function resize_nearest_neighbor_grad(grads_::tf.TensorHandle, size_::tf.TensorHandle; name=nothing, align_corners=nothing) + function resize_nearest_neighbor_grad_eager(grads_, size_; name=nothing, align_corners=nothing) desc = tf.EagerOp("ResizeNearestNeighborGrad") tf.add_input(desc, grads_) tf.add_input(desc, size_) @@ -37742,6 +44259,13 @@ begin desc["T"] = tf.data_type(grads_) (tf.execute(desc))[1] end + function resize_nearest_neighbor_grad(grads_, size_; name=nothing, align_corners=nothing) + if tf.eager_mode + resize_nearest_neighbor_grad_eager(grads_, size_; name=name, align_corners=align_corners) + else + resize_nearest_neighbor_grad_graph(grads_, size_; name=name, align_corners=align_corners) + end + end end @@ -37751,7 +44275,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function apply_power_sign(var_, m_, lr_, logbase_, sign_decay_, beta_, grad_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function apply_power_sign_graph(var_, m_, lr_, logbase_, sign_decay_, beta_, grad_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ApplyPowerSign") do desc = tf.NodeDescription("ApplyPowerSign") @@ -37776,7 +44300,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function apply_power_sign(var_::tf.TensorHandle, m_::tf.TensorHandle, lr_::tf.TensorHandle, logbase_::tf.TensorHandle, sign_decay_::tf.TensorHandle, beta_::tf.TensorHandle, grad_::tf.TensorHandle; name=nothing, use_locking=nothing) + function apply_power_sign_eager(var_, m_, lr_, logbase_, sign_decay_, beta_, grad_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ApplyPowerSign") tf.add_input(desc, var_) tf.add_input(desc, m_) @@ -37797,6 +44321,13 @@ begin desc["T"] = tf.data_type(grad_) (tf.execute(desc))[1] end + function apply_power_sign(var_, m_, lr_, logbase_, sign_decay_, beta_, grad_; name=nothing, use_locking=nothing) + if tf.eager_mode + apply_power_sign_eager(var_, m_, lr_, logbase_, sign_decay_, beta_, grad_; name=name, use_locking=use_locking) + else + apply_power_sign_graph(var_, m_, lr_, logbase_, sign_decay_, beta_, grad_; name=name, use_locking=use_locking) + end + end end @@ -37806,7 +44337,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function experimental_rebatch_dataset(input_dataset_, num_workers_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function experimental_rebatch_dataset_graph(input_dataset_, num_workers_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "ExperimentalRebatchDataset") do desc = tf.NodeDescription("ExperimentalRebatchDataset") @@ -37823,7 +44354,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function experimental_rebatch_dataset(input_dataset_::tf.TensorHandle, num_workers_::tf.TensorHandle; name=nothing, output_types=nothing, output_shapes=nothing) + function experimental_rebatch_dataset_eager(input_dataset_, num_workers_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("ExperimentalRebatchDataset") tf.add_input(desc, input_dataset_) tf.add_input(desc, num_workers_) @@ -37835,6 +44366,13 @@ begin end (tf.execute(desc))[1] end + function experimental_rebatch_dataset(input_dataset_, num_workers_; name=nothing, output_types=nothing, output_shapes=nothing) + if tf.eager_mode + experimental_rebatch_dataset_eager(input_dataset_, num_workers_; name=name, output_types=output_types, output_shapes=output_shapes) + else + experimental_rebatch_dataset_graph(input_dataset_, num_workers_; name=name, output_types=output_types, output_shapes=output_shapes) + end + end end @@ -37844,7 +44382,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function mirror_pad(input_, paddings_; name=nothing, mode=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function mirror_pad_graph(input_, paddings_; name=nothing, mode=nothing) local desc tf.with_op_name(name, "MirrorPad") do desc = tf.NodeDescription("MirrorPad") @@ -37860,7 +44398,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function mirror_pad(input_::tf.TensorHandle, paddings_::tf.TensorHandle; name=nothing, mode=nothing) + function mirror_pad_eager(input_, paddings_; name=nothing, mode=nothing) desc = tf.EagerOp("MirrorPad") tf.add_input(desc, input_) tf.add_input(desc, paddings_) @@ -37871,6 +44409,13 @@ begin desc["Tpaddings"] = tf.data_type(paddings_) (tf.execute(desc))[1] end + function mirror_pad(input_, paddings_; name=nothing, mode=nothing) + if tf.eager_mode + mirror_pad_eager(input_, paddings_; name=name, mode=mode) + else + mirror_pad_graph(input_, paddings_; name=name, mode=mode) + end + end end @@ -37880,7 +44425,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function logical_not(x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function logical_not_graph(x_; name=nothing) local desc tf.with_op_name(name, "LogicalNot") do desc = tf.NodeDescription("LogicalNot") @@ -37889,11 +44434,18 @@ begin end tf.Tensor(tf.Operation(desc)) end - function logical_not(x_::tf.TensorHandle; name=nothing) + function logical_not_eager(x_; name=nothing) desc = tf.EagerOp("LogicalNot") tf.add_input(desc, x_) (tf.execute(desc))[1] end + function logical_not(x_; name=nothing) + if tf.eager_mode + logical_not_eager(x_; name=name) + else + logical_not_graph(x_; name=name) + end + end end @@ -37903,7 +44455,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function batch_ifft(input_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function batch_ifft_graph(input_; name=nothing) local desc tf.with_op_name(name, "BatchIFFT") do desc = tf.NodeDescription("BatchIFFT") @@ -37912,11 +44464,18 @@ begin end tf.Tensor(tf.Operation(desc)) end - function batch_ifft(input_::tf.TensorHandle; name=nothing) + function batch_ifft_eager(input_; name=nothing) desc = tf.EagerOp("BatchIFFT") tf.add_input(desc, input_) (tf.execute(desc))[1] end + function batch_ifft(input_; name=nothing) + if tf.eager_mode + batch_ifft_eager(input_; name=name) + else + batch_ifft_graph(input_; name=name) + end + end end @@ -37926,7 +44485,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tensor_array_concat_v2(handle_, flow_in_; name=nothing, dtype=nothing, element_shape_except0=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tensor_array_concat_v2_graph(handle_, flow_in_; name=nothing, dtype=nothing, element_shape_except0=nothing) local desc tf.with_op_name(name, "TensorArrayConcatV2") do desc = tf.NodeDescription("TensorArrayConcatV2") @@ -37948,7 +44507,7 @@ begin end out end - function tensor_array_concat_v2(handle_::tf.TensorHandle, flow_in_::tf.TensorHandle; name=nothing, dtype=nothing, element_shape_except0=nothing) + function tensor_array_concat_v2_eager(handle_, flow_in_; name=nothing, dtype=nothing, element_shape_except0=nothing) desc = tf.EagerOp("TensorArrayConcatV2") tf.add_input(desc, handle_) tf.add_input(desc, flow_in_) @@ -37960,6 +44519,13 @@ begin end tf.execute(desc) end + function tensor_array_concat_v2(handle_, flow_in_; name=nothing, dtype=nothing, element_shape_except0=nothing) + if tf.eager_mode + tensor_array_concat_v2_eager(handle_, flow_in_; name=name, dtype=dtype, element_shape_except0=element_shape_except0) + else + tensor_array_concat_v2_graph(handle_, flow_in_; name=name, dtype=dtype, element_shape_except0=element_shape_except0) + end + end end @@ -37969,7 +44535,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function sum(input_, reduction_indices_; name=nothing, keep_dims=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function sum_graph(input_, reduction_indices_; name=nothing, keep_dims=nothing) local desc tf.with_op_name(name, "Sum") do desc = tf.NodeDescription("Sum") @@ -37986,7 +44552,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function sum(input_::tf.TensorHandle, reduction_indices_::tf.TensorHandle; name=nothing, keep_dims=nothing) + function sum_eager(input_, reduction_indices_; name=nothing, keep_dims=nothing) desc = tf.EagerOp("Sum") tf.add_input(desc, input_) tf.add_input(desc, reduction_indices_) @@ -37997,6 +44563,13 @@ begin desc["Tidx"] = tf.data_type(reduction_indices_) (tf.execute(desc))[1] end + function sum(input_, reduction_indices_; name=nothing, keep_dims=nothing) + if tf.eager_mode + sum_eager(input_, reduction_indices_; name=name, keep_dims=keep_dims) + else + sum_graph(input_, reduction_indices_; name=name, keep_dims=keep_dims) + end + end end @@ -38006,7 +44579,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function boosted_trees_predict(tree_ensemble_handle_, bucketized_features_; name=nothing, num_bucketized_features=nothing, logits_dimension=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function boosted_trees_predict_graph(tree_ensemble_handle_, bucketized_features_; name=nothing, num_bucketized_features=nothing, logits_dimension=nothing) local desc tf.with_op_name(name, "BoostedTreesPredict") do desc = tf.NodeDescription("BoostedTreesPredict") @@ -38023,7 +44596,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function boosted_trees_predict(tree_ensemble_handle_::tf.TensorHandle, bucketized_features_::tf.TensorHandle; name=nothing, num_bucketized_features=nothing, logits_dimension=nothing) + function boosted_trees_predict_eager(tree_ensemble_handle_, bucketized_features_; name=nothing, num_bucketized_features=nothing, logits_dimension=nothing) desc = tf.EagerOp("BoostedTreesPredict") tf.add_input(desc, tree_ensemble_handle_) tf.add_input(desc, bucketized_features_) @@ -38035,6 +44608,13 @@ begin end (tf.execute(desc))[1] end + function boosted_trees_predict(tree_ensemble_handle_, bucketized_features_; name=nothing, num_bucketized_features=nothing, logits_dimension=nothing) + if tf.eager_mode + boosted_trees_predict_eager(tree_ensemble_handle_, bucketized_features_; name=name, num_bucketized_features=num_bucketized_features, logits_dimension=logits_dimension) + else + boosted_trees_predict_graph(tree_ensemble_handle_, bucketized_features_; name=name, num_bucketized_features=num_bucketized_features, logits_dimension=logits_dimension) + end + end end @@ -38044,7 +44624,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function quantized_conv2d_with_bias_and_relu_and_requantize(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function quantized_conv2d_with_bias_and_relu_and_requantize_graph(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) local desc tf.with_op_name(name, "QuantizedConv2DWithBiasAndReluAndRequantize") do desc = tf.NodeDescription("QuantizedConv2DWithBiasAndReluAndRequantize") @@ -38089,7 +44669,7 @@ begin end out end - function quantized_conv2d_with_bias_and_relu_and_requantize(input_::tf.TensorHandle, filter_::tf.TensorHandle, bias_::tf.TensorHandle, min_input_::tf.TensorHandle, max_input_::tf.TensorHandle, min_filter_::tf.TensorHandle, max_filter_::tf.TensorHandle, min_freezed_output_::tf.TensorHandle, max_freezed_output_::tf.TensorHandle; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) + function quantized_conv2d_with_bias_and_relu_and_requantize_eager(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) desc = tf.EagerOp("QuantizedConv2DWithBiasAndReluAndRequantize") tf.add_input(desc, input_) tf.add_input(desc, filter_) @@ -38117,6 +44697,13 @@ begin desc["Tbias"] = tf.data_type(bias_) tf.execute(desc) end + function quantized_conv2d_with_bias_and_relu_and_requantize(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) + if tf.eager_mode + quantized_conv2d_with_bias_and_relu_and_requantize_eager(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_; name=name, out_type=out_type, strides=strides, padding=padding, dilations=dilations) + else + quantized_conv2d_with_bias_and_relu_and_requantize_graph(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_; name=name, out_type=out_type, strides=strides, padding=padding, dilations=dilations) + end + end end @@ -38126,7 +44713,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function resource_sparse_apply_adagrad(var_, accum_, lr_, grad_, indices_; name=nothing, use_locking=nothing, update_slots=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function resource_sparse_apply_adagrad_graph(var_, accum_, lr_, grad_, indices_; name=nothing, use_locking=nothing, update_slots=nothing) local desc tf.with_op_name(name, "ResourceSparseApplyAdagrad") do desc = tf.NodeDescription("ResourceSparseApplyAdagrad") @@ -38152,7 +44739,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function resource_sparse_apply_adagrad(var_::tf.TensorHandle, accum_::tf.TensorHandle, lr_::tf.TensorHandle, grad_::tf.TensorHandle, indices_::tf.TensorHandle; name=nothing, use_locking=nothing, update_slots=nothing) + function resource_sparse_apply_adagrad_eager(var_, accum_, lr_, grad_, indices_; name=nothing, use_locking=nothing, update_slots=nothing) desc = tf.EagerOp("ResourceSparseApplyAdagrad") tf.add_input(desc, var_) tf.add_input(desc, accum_) @@ -38170,6 +44757,13 @@ begin desc["Tindices"] = tf.data_type(indices_) (tf.execute(desc))[1] end + function resource_sparse_apply_adagrad(var_, accum_, lr_, grad_, indices_; name=nothing, use_locking=nothing, update_slots=nothing) + if tf.eager_mode + resource_sparse_apply_adagrad_eager(var_, accum_, lr_, grad_, indices_; name=name, use_locking=use_locking, update_slots=update_slots) + else + resource_sparse_apply_adagrad_graph(var_, accum_, lr_, grad_, indices_; name=name, use_locking=use_locking, update_slots=update_slots) + end + end end @@ -38179,7 +44773,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function leaky_relu_grad(gradients_, features_; name=nothing, alpha=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function leaky_relu_grad_graph(gradients_, features_; name=nothing, alpha=nothing) local desc tf.with_op_name(name, "LeakyReluGrad") do desc = tf.NodeDescription("LeakyReluGrad") @@ -38194,7 +44788,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function leaky_relu_grad(gradients_::tf.TensorHandle, features_::tf.TensorHandle; name=nothing, alpha=nothing) + function leaky_relu_grad_eager(gradients_, features_; name=nothing, alpha=nothing) desc = tf.EagerOp("LeakyReluGrad") tf.add_input(desc, gradients_) tf.add_input(desc, features_) @@ -38205,6 +44799,13 @@ begin desc["T"] = tf.data_type(features_) (tf.execute(desc))[1] end + function leaky_relu_grad(gradients_, features_; name=nothing, alpha=nothing) + if tf.eager_mode + leaky_relu_grad_eager(gradients_, features_; name=name, alpha=alpha) + else + leaky_relu_grad_graph(gradients_, features_; name=name, alpha=alpha) + end + end end @@ -38214,7 +44815,7 @@ end A graph node which represents a return value of a function. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function _device_retval(input_; name=nothing, index=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function _device_retval_graph(input_; name=nothing, index=nothing) local desc tf.with_op_name(name, "_DeviceRetval") do desc = tf.NodeDescription("_DeviceRetval") @@ -38227,7 +44828,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function _device_retval(input_::tf.TensorHandle; name=nothing, index=nothing) + function _device_retval_eager(input_; name=nothing, index=nothing) desc = tf.EagerOp("_DeviceRetval") tf.add_input(desc, input_) if index !== nothing @@ -38236,6 +44837,13 @@ begin desc["T"] = tf.data_type(input_) (tf.execute(desc))[1] end + function _device_retval(input_; name=nothing, index=nothing) + if tf.eager_mode + _device_retval_eager(input_; name=name, index=index) + else + _device_retval_graph(input_; name=name, index=index) + end + end end @@ -38245,7 +44853,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function pad(input_, paddings_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function pad_graph(input_, paddings_; name=nothing) local desc tf.with_op_name(name, "Pad") do desc = tf.NodeDescription("Pad") @@ -38258,7 +44866,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function pad(input_::tf.TensorHandle, paddings_::tf.TensorHandle; name=nothing) + function pad_eager(input_, paddings_; name=nothing) desc = tf.EagerOp("Pad") tf.add_input(desc, input_) tf.add_input(desc, paddings_) @@ -38266,6 +44874,13 @@ begin desc["Tpaddings"] = tf.data_type(paddings_) (tf.execute(desc))[1] end + function pad(input_, paddings_; name=nothing) + if tf.eager_mode + pad_eager(input_, paddings_; name=name) + else + pad_graph(input_, paddings_; name=name) + end + end end @@ -38275,7 +44890,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function add_many_sparse_to_tensors_map(sparse_indices_, sparse_values_, sparse_shape_; name=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function add_many_sparse_to_tensors_map_graph(sparse_indices_, sparse_values_, sparse_shape_; name=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "AddManySparseToTensorsMap") do desc = tf.NodeDescription("AddManySparseToTensorsMap") @@ -38295,7 +44910,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function add_many_sparse_to_tensors_map(sparse_indices_::tf.TensorHandle, sparse_values_::tf.TensorHandle, sparse_shape_::tf.TensorHandle; name=nothing, container=nothing, shared_name=nothing) + function add_many_sparse_to_tensors_map_eager(sparse_indices_, sparse_values_, sparse_shape_; name=nothing, container=nothing, shared_name=nothing) desc = tf.EagerOp("AddManySparseToTensorsMap") tf.add_input(desc, sparse_indices_) tf.add_input(desc, sparse_values_) @@ -38309,6 +44924,13 @@ begin desc["T"] = tf.data_type(sparse_values_) (tf.execute(desc))[1] end + function add_many_sparse_to_tensors_map(sparse_indices_, sparse_values_, sparse_shape_; name=nothing, container=nothing, shared_name=nothing) + if tf.eager_mode + add_many_sparse_to_tensors_map_eager(sparse_indices_, sparse_values_, sparse_shape_; name=name, container=container, shared_name=shared_name) + else + add_many_sparse_to_tensors_map_graph(sparse_indices_, sparse_values_, sparse_shape_; name=name, container=container, shared_name=shared_name) + end + end end @@ -38318,7 +44940,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function sparse_reorder(input_indices_, input_values_, input_shape_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function sparse_reorder_graph(input_indices_, input_values_, input_shape_; name=nothing) local desc tf.with_op_name(name, "SparseReorder") do desc = tf.NodeDescription("SparseReorder") @@ -38337,7 +44959,7 @@ begin end out end - function sparse_reorder(input_indices_::tf.TensorHandle, input_values_::tf.TensorHandle, input_shape_::tf.TensorHandle; name=nothing) + function sparse_reorder_eager(input_indices_, input_values_, input_shape_; name=nothing) desc = tf.EagerOp("SparseReorder") tf.add_input(desc, input_indices_) tf.add_input(desc, input_values_) @@ -38345,6 +44967,13 @@ begin desc["T"] = tf.data_type(input_values_) tf.execute(desc) end + function sparse_reorder(input_indices_, input_values_, input_shape_; name=nothing) + if tf.eager_mode + sparse_reorder_eager(input_indices_, input_values_, input_shape_; name=name) + else + sparse_reorder_graph(input_indices_, input_values_, input_shape_; name=name) + end + end end @@ -38354,7 +44983,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function bitwise_xor(x_, y_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function bitwise_xor_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "BitwiseXor") do desc = tf.NodeDescription("BitwiseXor") @@ -38366,7 +44995,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function bitwise_xor(x_::tf.TensorHandle, y_::tf.TensorHandle; name=nothing) + function bitwise_xor_eager(x_, y_; name=nothing) desc = tf.EagerOp("BitwiseXor") tf.add_input(desc, x_) tf.add_input(desc, y_) @@ -38374,6 +45003,13 @@ begin desc["T"] = tf.data_type(y_) (tf.execute(desc))[1] end + function bitwise_xor(x_, y_; name=nothing) + if tf.eager_mode + bitwise_xor_eager(x_, y_; name=name) + else + bitwise_xor_graph(x_, y_; name=name) + end + end end @@ -38383,7 +45019,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function batch_matrix_set_diag(input_, diagonal_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function batch_matrix_set_diag_graph(input_, diagonal_; name=nothing) local desc tf.with_op_name(name, "BatchMatrixSetDiag") do desc = tf.NodeDescription("BatchMatrixSetDiag") @@ -38395,7 +45031,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function batch_matrix_set_diag(input_::tf.TensorHandle, diagonal_::tf.TensorHandle; name=nothing) + function batch_matrix_set_diag_eager(input_, diagonal_; name=nothing) desc = tf.EagerOp("BatchMatrixSetDiag") tf.add_input(desc, input_) tf.add_input(desc, diagonal_) @@ -38403,6 +45039,13 @@ begin desc["T"] = tf.data_type(diagonal_) (tf.execute(desc))[1] end + function batch_matrix_set_diag(input_, diagonal_; name=nothing) + if tf.eager_mode + batch_matrix_set_diag_eager(input_, diagonal_; name=name) + else + batch_matrix_set_diag_graph(input_, diagonal_; name=name) + end + end end @@ -38412,7 +45055,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function lookup_table_insert_v2(table_handle_, keys_, values_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function lookup_table_insert_v2_graph(table_handle_, keys_, values_; name=nothing) local desc tf.with_op_name(name, "LookupTableInsertV2") do desc = tf.NodeDescription("LookupTableInsertV2") @@ -38427,7 +45070,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function lookup_table_insert_v2(table_handle_::tf.TensorHandle, keys_::tf.TensorHandle, values_::tf.TensorHandle; name=nothing) + function lookup_table_insert_v2_eager(table_handle_, keys_, values_; name=nothing) desc = tf.EagerOp("LookupTableInsertV2") tf.add_input(desc, table_handle_) tf.add_input(desc, keys_) @@ -38436,6 +45079,13 @@ begin desc["Tout"] = tf.data_type(values_) (tf.execute(desc))[1] end + function lookup_table_insert_v2(table_handle_, keys_, values_; name=nothing) + if tf.eager_mode + lookup_table_insert_v2_eager(table_handle_, keys_, values_; name=name) + else + lookup_table_insert_v2_graph(table_handle_, keys_, values_; name=name) + end + end end @@ -38445,7 +45095,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function experimental_dense_to_sparse_batch_dataset(input_dataset_, batch_size_, row_shape_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function experimental_dense_to_sparse_batch_dataset_graph(input_dataset_, batch_size_, row_shape_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "ExperimentalDenseToSparseBatchDataset") do desc = tf.NodeDescription("ExperimentalDenseToSparseBatchDataset") @@ -38464,7 +45114,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function experimental_dense_to_sparse_batch_dataset(input_dataset_::tf.TensorHandle, batch_size_::tf.TensorHandle, row_shape_::tf.TensorHandle; name=nothing, output_types=nothing, output_shapes=nothing) + function experimental_dense_to_sparse_batch_dataset_eager(input_dataset_, batch_size_, row_shape_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("ExperimentalDenseToSparseBatchDataset") tf.add_input(desc, input_dataset_) tf.add_input(desc, batch_size_) @@ -38477,6 +45127,13 @@ begin end (tf.execute(desc))[1] end + function experimental_dense_to_sparse_batch_dataset(input_dataset_, batch_size_, row_shape_; name=nothing, output_types=nothing, output_shapes=nothing) + if tf.eager_mode + experimental_dense_to_sparse_batch_dataset_eager(input_dataset_, batch_size_, row_shape_; name=name, output_types=output_types, output_shapes=output_shapes) + else + experimental_dense_to_sparse_batch_dataset_graph(input_dataset_, batch_size_, row_shape_; name=name, output_types=output_types, output_shapes=output_shapes) + end + end end @@ -38486,7 +45143,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function resource_sparse_apply_rms_prop(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function resource_sparse_apply_rms_prop_graph(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ResourceSparseApplyRMSProp") do desc = tf.NodeDescription("ResourceSparseApplyRMSProp") @@ -38517,7 +45174,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function resource_sparse_apply_rms_prop(var_::tf.TensorHandle, ms_::tf.TensorHandle, mom_::tf.TensorHandle, lr_::tf.TensorHandle, rho_::tf.TensorHandle, momentum_::tf.TensorHandle, epsilon_::tf.TensorHandle, grad_::tf.TensorHandle, indices_::tf.TensorHandle; name=nothing, use_locking=nothing) + function resource_sparse_apply_rms_prop_eager(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ResourceSparseApplyRMSProp") tf.add_input(desc, var_) tf.add_input(desc, ms_) @@ -38539,6 +45196,13 @@ begin desc["Tindices"] = tf.data_type(indices_) (tf.execute(desc))[1] end + function resource_sparse_apply_rms_prop(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) + if tf.eager_mode + resource_sparse_apply_rms_prop_eager(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=name, use_locking=use_locking) + else + resource_sparse_apply_rms_prop_graph(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=name, use_locking=use_locking) + end + end end @@ -38548,7 +45212,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function random_crop(image_, size_; name=nothing, seed=nothing, seed2=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function random_crop_graph(image_, size_; name=nothing, seed=nothing, seed2=nothing) local desc tf.with_op_name(name, "RandomCrop") do desc = tf.NodeDescription("RandomCrop") @@ -38566,7 +45230,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function random_crop(image_::tf.TensorHandle, size_::tf.TensorHandle; name=nothing, seed=nothing, seed2=nothing) + function random_crop_eager(image_, size_; name=nothing, seed=nothing, seed2=nothing) desc = tf.EagerOp("RandomCrop") tf.add_input(desc, image_) tf.add_input(desc, size_) @@ -38579,6 +45243,13 @@ begin desc["T"] = tf.data_type(image_) (tf.execute(desc))[1] end + function random_crop(image_, size_; name=nothing, seed=nothing, seed2=nothing) + if tf.eager_mode + random_crop_eager(image_, size_; name=name, seed=seed, seed2=seed2) + else + random_crop_graph(image_, size_; name=name, seed=seed, seed2=seed2) + end + end end @@ -38588,7 +45259,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function lookup_table_import_v2(table_handle_, keys_, values_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function lookup_table_import_v2_graph(table_handle_, keys_, values_; name=nothing) local desc tf.with_op_name(name, "LookupTableImportV2") do desc = tf.NodeDescription("LookupTableImportV2") @@ -38603,7 +45274,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function lookup_table_import_v2(table_handle_::tf.TensorHandle, keys_::tf.TensorHandle, values_::tf.TensorHandle; name=nothing) + function lookup_table_import_v2_eager(table_handle_, keys_, values_; name=nothing) desc = tf.EagerOp("LookupTableImportV2") tf.add_input(desc, table_handle_) tf.add_input(desc, keys_) @@ -38612,6 +45283,13 @@ begin desc["Tout"] = tf.data_type(values_) (tf.execute(desc))[1] end + function lookup_table_import_v2(table_handle_, keys_, values_; name=nothing) + if tf.eager_mode + lookup_table_import_v2_eager(table_handle_, keys_, values_; name=name) + else + lookup_table_import_v2_graph(table_handle_, keys_, values_; name=name) + end + end end @@ -38621,7 +45299,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function resource_scatter_nd_update(ref_, indices_, updates_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function resource_scatter_nd_update_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ResourceScatterNdUpdate") do desc = tf.NodeDescription("ResourceScatterNdUpdate") @@ -38640,7 +45318,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function resource_scatter_nd_update(ref_::tf.TensorHandle, indices_::tf.TensorHandle, updates_::tf.TensorHandle; name=nothing, use_locking=nothing) + function resource_scatter_nd_update_eager(ref_, indices_, updates_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ResourceScatterNdUpdate") tf.add_input(desc, ref_) tf.add_input(desc, indices_) @@ -38652,6 +45330,13 @@ begin desc["T"] = tf.data_type(updates_) (tf.execute(desc))[1] end + function resource_scatter_nd_update(ref_, indices_, updates_; name=nothing, use_locking=nothing) + if tf.eager_mode + resource_scatter_nd_update_eager(ref_, indices_, updates_; name=name, use_locking=use_locking) + else + resource_scatter_nd_update_graph(ref_, indices_, updates_; name=name, use_locking=use_locking) + end + end end @@ -38661,7 +45346,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function static_regex_full_match(input_; name=nothing, pattern=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function static_regex_full_match_graph(input_; name=nothing, pattern=nothing) local desc tf.with_op_name(name, "StaticRegexFullMatch") do desc = tf.NodeDescription("StaticRegexFullMatch") @@ -38673,7 +45358,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function static_regex_full_match(input_::tf.TensorHandle; name=nothing, pattern=nothing) + function static_regex_full_match_eager(input_; name=nothing, pattern=nothing) desc = tf.EagerOp("StaticRegexFullMatch") tf.add_input(desc, input_) if pattern !== nothing @@ -38681,6 +45366,13 @@ begin end (tf.execute(desc))[1] end + function static_regex_full_match(input_; name=nothing, pattern=nothing) + if tf.eager_mode + static_regex_full_match_eager(input_; name=name, pattern=pattern) + else + static_regex_full_match_graph(input_; name=name, pattern=pattern) + end + end end @@ -38690,7 +45382,7 @@ end Configures the credentials used by the GCS client of the local TF runtime. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function gcs_configure_credentials(json_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function gcs_configure_credentials_graph(json_; name=nothing) local desc tf.with_op_name(name, "GcsConfigureCredentials") do desc = tf.NodeDescription("GcsConfigureCredentials") @@ -38699,11 +45391,18 @@ begin end tf.Tensor(tf.Operation(desc)) end - function gcs_configure_credentials(json_::tf.TensorHandle; name=nothing) + function gcs_configure_credentials_eager(json_; name=nothing) desc = tf.EagerOp("GcsConfigureCredentials") tf.add_input(desc, json_) (tf.execute(desc))[1] end + function gcs_configure_credentials(json_; name=nothing) + if tf.eager_mode + gcs_configure_credentials_eager(json_; name=name) + else + gcs_configure_credentials_graph(json_; name=name) + end + end end @@ -38713,7 +45412,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tensor_array_size_v3(handle_, flow_in_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tensor_array_size_v3_graph(handle_, flow_in_; name=nothing) local desc tf.with_op_name(name, "TensorArraySizeV3") do desc = tf.NodeDescription("TensorArraySizeV3") @@ -38724,12 +45423,19 @@ begin end tf.Tensor(tf.Operation(desc)) end - function tensor_array_size_v3(handle_::tf.TensorHandle, flow_in_::tf.TensorHandle; name=nothing) + function tensor_array_size_v3_eager(handle_, flow_in_; name=nothing) desc = tf.EagerOp("TensorArraySizeV3") tf.add_input(desc, handle_) tf.add_input(desc, flow_in_) (tf.execute(desc))[1] end + function tensor_array_size_v3(handle_, flow_in_; name=nothing) + if tf.eager_mode + tensor_array_size_v3_eager(handle_, flow_in_; name=name) + else + tensor_array_size_v3_graph(handle_, flow_in_; name=name) + end + end end @@ -38739,7 +45445,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function sparse_segment_sqrt_n_with_num_segments(data_, indices_, segment_ids_, num_segments_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function sparse_segment_sqrt_n_with_num_segments_graph(data_, indices_, segment_ids_, num_segments_; name=nothing) local desc tf.with_op_name(name, "SparseSegmentSqrtNWithNumSegments") do desc = tf.NodeDescription("SparseSegmentSqrtNWithNumSegments") @@ -38758,7 +45464,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function sparse_segment_sqrt_n_with_num_segments(data_::tf.TensorHandle, indices_::tf.TensorHandle, segment_ids_::tf.TensorHandle, num_segments_::tf.TensorHandle; name=nothing) + function sparse_segment_sqrt_n_with_num_segments_eager(data_, indices_, segment_ids_, num_segments_; name=nothing) desc = tf.EagerOp("SparseSegmentSqrtNWithNumSegments") tf.add_input(desc, data_) tf.add_input(desc, indices_) @@ -38769,6 +45475,13 @@ begin desc["Tnumsegments"] = tf.data_type(num_segments_) (tf.execute(desc))[1] end + function sparse_segment_sqrt_n_with_num_segments(data_, indices_, segment_ids_, num_segments_; name=nothing) + if tf.eager_mode + sparse_segment_sqrt_n_with_num_segments_eager(data_, indices_, segment_ids_, num_segments_; name=name) + else + sparse_segment_sqrt_n_with_num_segments_graph(data_, indices_, segment_ids_, num_segments_; name=name) + end + end end @@ -38778,7 +45491,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function experimental_group_by_reducer_dataset(input_dataset_, key_func_other_arguments_, init_func_other_arguments_, reduce_func_other_arguments_, finalize_func_other_arguments_; name=nothing, key_func=nothing, init_func=nothing, reduce_func=nothing, finalize_func=nothing, Tkey_func_other_arguments=nothing, Tinit_func_other_arguments=nothing, Treduce_func_other_arguments=nothing, Tfinalize_func_other_arguments=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function experimental_group_by_reducer_dataset_graph(input_dataset_, key_func_other_arguments_, init_func_other_arguments_, reduce_func_other_arguments_, finalize_func_other_arguments_; name=nothing, key_func=nothing, init_func=nothing, reduce_func=nothing, finalize_func=nothing, Tkey_func_other_arguments=nothing, Tinit_func_other_arguments=nothing, Treduce_func_other_arguments=nothing, Tfinalize_func_other_arguments=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "ExperimentalGroupByReducerDataset") do desc = tf.NodeDescription("ExperimentalGroupByReducerDataset") @@ -38825,7 +45538,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function experimental_group_by_reducer_dataset(input_dataset_::tf.TensorHandle, key_func_other_arguments_::tf.TensorHandle, init_func_other_arguments_::tf.TensorHandle, reduce_func_other_arguments_::tf.TensorHandle, finalize_func_other_arguments_::tf.TensorHandle; name=nothing, key_func=nothing, init_func=nothing, reduce_func=nothing, finalize_func=nothing, Tkey_func_other_arguments=nothing, Tinit_func_other_arguments=nothing, Treduce_func_other_arguments=nothing, Tfinalize_func_other_arguments=nothing, output_types=nothing, output_shapes=nothing) + function experimental_group_by_reducer_dataset_eager(input_dataset_, key_func_other_arguments_, init_func_other_arguments_, reduce_func_other_arguments_, finalize_func_other_arguments_; name=nothing, key_func=nothing, init_func=nothing, reduce_func=nothing, finalize_func=nothing, Tkey_func_other_arguments=nothing, Tinit_func_other_arguments=nothing, Treduce_func_other_arguments=nothing, Tfinalize_func_other_arguments=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("ExperimentalGroupByReducerDataset") tf.add_input(desc, input_dataset_) tf.add_input(desc, key_func_other_arguments_) @@ -38864,16 +45577,23 @@ begin end (tf.execute(desc))[1] end + function experimental_group_by_reducer_dataset(input_dataset_, key_func_other_arguments_, init_func_other_arguments_, reduce_func_other_arguments_, finalize_func_other_arguments_; name=nothing, key_func=nothing, init_func=nothing, reduce_func=nothing, finalize_func=nothing, Tkey_func_other_arguments=nothing, Tinit_func_other_arguments=nothing, Treduce_func_other_arguments=nothing, Tfinalize_func_other_arguments=nothing, output_types=nothing, output_shapes=nothing) + if tf.eager_mode + experimental_group_by_reducer_dataset_eager(input_dataset_, key_func_other_arguments_, init_func_other_arguments_, reduce_func_other_arguments_, finalize_func_other_arguments_; name=name, key_func=key_func, init_func=init_func, reduce_func=reduce_func, finalize_func=finalize_func, Tkey_func_other_arguments=Tkey_func_other_arguments, Tinit_func_other_arguments=Tinit_func_other_arguments, Treduce_func_other_arguments=Treduce_func_other_arguments, Tfinalize_func_other_arguments=Tfinalize_func_other_arguments, output_types=output_types, output_shapes=output_shapes) + else + experimental_group_by_reducer_dataset_graph(input_dataset_, key_func_other_arguments_, init_func_other_arguments_, reduce_func_other_arguments_, finalize_func_other_arguments_; name=name, key_func=key_func, init_func=init_func, reduce_func=reduce_func, finalize_func=finalize_func, Tkey_func_other_arguments=Tkey_func_other_arguments, Tinit_func_other_arguments=Tinit_func_other_arguments, Treduce_func_other_arguments=Treduce_func_other_arguments, Tfinalize_func_other_arguments=Tfinalize_func_other_arguments, output_types=output_types, output_shapes=output_shapes) + end + end end """ - conv2d_backprop_filter(input, filter_sizes, out_backprop; use_cudnn_on_gpu=true, explicit_paddings=Int64[], data_format=NHWC, dilations=[1, 1, 1, 1]) + conv2d_backprop_filter(input, filter_sizes, out_backprop; use_cudnn_on_gpu=true, explicit_paddings=Int64[], data_format=, dilations=[1, 1, 1, 1]) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function conv2d_backprop_filter(input_, filter_sizes_, out_backprop_; name=nothing, strides=nothing, use_cudnn_on_gpu=nothing, padding=nothing, explicit_paddings=nothing, data_format=nothing, dilations=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function conv2d_backprop_filter_graph(input_, filter_sizes_, out_backprop_; name=nothing, strides=nothing, use_cudnn_on_gpu=nothing, padding=nothing, explicit_paddings=nothing, data_format=nothing, dilations=nothing) local desc tf.with_op_name(name, "Conv2DBackpropFilter") do desc = tf.NodeDescription("Conv2DBackpropFilter") @@ -38905,7 +45625,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function conv2d_backprop_filter(input_::tf.TensorHandle, filter_sizes_::tf.TensorHandle, out_backprop_::tf.TensorHandle; name=nothing, strides=nothing, use_cudnn_on_gpu=nothing, padding=nothing, explicit_paddings=nothing, data_format=nothing, dilations=nothing) + function conv2d_backprop_filter_eager(input_, filter_sizes_, out_backprop_; name=nothing, strides=nothing, use_cudnn_on_gpu=nothing, padding=nothing, explicit_paddings=nothing, data_format=nothing, dilations=nothing) desc = tf.EagerOp("Conv2DBackpropFilter") tf.add_input(desc, input_) tf.add_input(desc, filter_sizes_) @@ -38932,16 +45652,23 @@ begin desc["T"] = tf.data_type(out_backprop_) (tf.execute(desc))[1] end + function conv2d_backprop_filter(input_, filter_sizes_, out_backprop_; name=nothing, strides=nothing, use_cudnn_on_gpu=nothing, padding=nothing, explicit_paddings=nothing, data_format=nothing, dilations=nothing) + if tf.eager_mode + conv2d_backprop_filter_eager(input_, filter_sizes_, out_backprop_; name=name, strides=strides, use_cudnn_on_gpu=use_cudnn_on_gpu, padding=padding, explicit_paddings=explicit_paddings, data_format=data_format, dilations=dilations) + else + conv2d_backprop_filter_graph(input_, filter_sizes_, out_backprop_; name=name, strides=strides, use_cudnn_on_gpu=use_cudnn_on_gpu, padding=padding, explicit_paddings=explicit_paddings, data_format=data_format, dilations=dilations) + end + end end """ - max_pool_grad(orig_input, orig_output, grad; data_format=NHWC) + max_pool_grad(orig_input, orig_output, grad; data_format=) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function max_pool_grad(orig_input_, orig_output_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function max_pool_grad_graph(orig_input_, orig_output_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) local desc tf.with_op_name(name, "MaxPoolGrad") do desc = tf.NodeDescription("MaxPoolGrad") @@ -38967,7 +45694,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function max_pool_grad(orig_input_::tf.TensorHandle, orig_output_::tf.TensorHandle, grad_::tf.TensorHandle; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + function max_pool_grad_eager(orig_input_, orig_output_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) desc = tf.EagerOp("MaxPoolGrad") tf.add_input(desc, orig_input_) tf.add_input(desc, orig_output_) @@ -38989,6 +45716,13 @@ begin desc["T"] = tf.data_type(grad_) (tf.execute(desc))[1] end + function max_pool_grad(orig_input_, orig_output_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + if tf.eager_mode + max_pool_grad_eager(orig_input_, orig_output_, grad_; name=name, ksize=ksize, strides=strides, padding=padding, data_format=data_format) + else + max_pool_grad_graph(orig_input_, orig_output_, grad_; name=name, ksize=ksize, strides=strides, padding=padding, data_format=data_format) + end + end end @@ -38998,7 +45732,7 @@ end An op that connects each chip on the host to a centralized UberDriver to allow """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function _initialize_host_for_distributed_tpu(input_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function _initialize_host_for_distributed_tpu_graph(input_; name=nothing) local desc tf.with_op_name(name, "_InitializeHostForDistributedTPU") do desc = tf.NodeDescription("_InitializeHostForDistributedTPU") @@ -39007,11 +45741,18 @@ begin end tf.Tensor(tf.Operation(desc)) end - function _initialize_host_for_distributed_tpu(input_::tf.TensorHandle; name=nothing) + function _initialize_host_for_distributed_tpu_eager(input_; name=nothing) desc = tf.EagerOp("_InitializeHostForDistributedTPU") tf.add_input(desc, input_) (tf.execute(desc))[1] end + function _initialize_host_for_distributed_tpu(input_; name=nothing) + if tf.eager_mode + _initialize_host_for_distributed_tpu_eager(input_; name=name) + else + _initialize_host_for_distributed_tpu_graph(input_; name=name) + end + end end @@ -39021,7 +45762,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function stage_peek(index_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function stage_peek_graph(index_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "StagePeek") do desc = tf.NodeDescription("StagePeek") @@ -39045,7 +45786,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function stage_peek(index_::tf.TensorHandle; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + function stage_peek_eager(index_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) desc = tf.EagerOp("StagePeek") tf.add_input(desc, index_) if capacity !== nothing @@ -39065,6 +45806,13 @@ begin end (tf.execute(desc))[1] end + function stage_peek(index_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + if tf.eager_mode + stage_peek_eager(index_; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) + else + stage_peek_graph(index_; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) + end + end end @@ -39074,7 +45822,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function pad_v2(input_, paddings_, constant_values_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function pad_v2_graph(input_, paddings_, constant_values_; name=nothing) local desc tf.with_op_name(name, "PadV2") do desc = tf.NodeDescription("PadV2") @@ -39089,7 +45837,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function pad_v2(input_::tf.TensorHandle, paddings_::tf.TensorHandle, constant_values_::tf.TensorHandle; name=nothing) + function pad_v2_eager(input_, paddings_, constant_values_; name=nothing) desc = tf.EagerOp("PadV2") tf.add_input(desc, input_) tf.add_input(desc, paddings_) @@ -39099,6 +45847,13 @@ begin desc["T"] = tf.data_type(constant_values_) (tf.execute(desc))[1] end + function pad_v2(input_, paddings_, constant_values_; name=nothing) + if tf.eager_mode + pad_v2_eager(input_, paddings_, constant_values_; name=name) + else + pad_v2_graph(input_, paddings_, constant_values_; name=name) + end + end end @@ -39108,7 +45863,7 @@ end Creates an empty Tensor with shape `shape` and type `dtype`. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function _parallel_concat_start(; name=nothing, shape=nothing, dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function _parallel_concat_start_graph(; name=nothing, shape=nothing, dtype=nothing) local desc tf.with_op_name(name, "_ParallelConcatStart") do desc = tf.NodeDescription("_ParallelConcatStart") @@ -39121,7 +45876,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function _parallel_concat_start(; name=nothing, shape=nothing, dtype=nothing) + function _parallel_concat_start_eager(; name=nothing, shape=nothing, dtype=nothing) desc = tf.EagerOp("_ParallelConcatStart") if shape !== nothing desc["shape"] = Base.identity(shape) @@ -39131,16 +45886,23 @@ begin end (tf.execute(desc))[1] end + function _parallel_concat_start(; name=nothing, shape=nothing, dtype=nothing) + if tf.eager_mode + _parallel_concat_start_eager(; name=name, shape=shape, dtype=dtype) + else + _parallel_concat_start_graph(; name=name, shape=shape, dtype=dtype) + end + end end """ - print_v2(input; output_stream=stderr) + print_v2(input; output_stream=) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function print_v2(input_; name=nothing, output_stream=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function print_v2_graph(input_; name=nothing, output_stream=nothing) local desc tf.with_op_name(name, "PrintV2") do desc = tf.NodeDescription("PrintV2") @@ -39152,7 +45914,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function print_v2(input_::tf.TensorHandle; name=nothing, output_stream=nothing) + function print_v2_eager(input_; name=nothing, output_stream=nothing) desc = tf.EagerOp("PrintV2") tf.add_input(desc, input_) if output_stream !== nothing @@ -39160,6 +45922,13 @@ begin end (tf.execute(desc))[1] end + function print_v2(input_; name=nothing, output_stream=nothing) + if tf.eager_mode + print_v2_eager(input_; name=name, output_stream=output_stream) + else + print_v2_graph(input_; name=name, output_stream=output_stream) + end + end end @@ -39169,7 +45938,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function optional_get_value(optional_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function optional_get_value_graph(optional_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "OptionalGetValue") do desc = tf.NodeDescription("OptionalGetValue") @@ -39184,7 +45953,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function optional_get_value(optional_::tf.TensorHandle; name=nothing, output_types=nothing, output_shapes=nothing) + function optional_get_value_eager(optional_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("OptionalGetValue") tf.add_input(desc, optional_) if output_types !== nothing @@ -39195,6 +45964,13 @@ begin end (tf.execute(desc))[1] end + function optional_get_value(optional_; name=nothing, output_types=nothing, output_shapes=nothing) + if tf.eager_mode + optional_get_value_eager(optional_; name=name, output_types=output_types, output_shapes=output_shapes) + else + optional_get_value_graph(optional_; name=name, output_types=output_types, output_shapes=output_shapes) + end + end end @@ -39204,7 +45980,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function load_tpu_embedding_ftrl_parameters(parameters_, accumulators_, linears_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function load_tpu_embedding_ftrl_parameters_graph(parameters_, accumulators_, linears_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "LoadTPUEmbeddingFTRLParameters") do desc = tf.NodeDescription("LoadTPUEmbeddingFTRLParameters") @@ -39229,7 +46005,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function load_tpu_embedding_ftrl_parameters(parameters_::tf.TensorHandle, accumulators_::tf.TensorHandle, linears_::tf.TensorHandle; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + function load_tpu_embedding_ftrl_parameters_eager(parameters_, accumulators_, linears_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) desc = tf.EagerOp("LoadTPUEmbeddingFTRLParameters") tf.add_input(desc, parameters_) tf.add_input(desc, accumulators_) @@ -39248,6 +46024,13 @@ begin end (tf.execute(desc))[1] end + function load_tpu_embedding_ftrl_parameters(parameters_, accumulators_, linears_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + if tf.eager_mode + load_tpu_embedding_ftrl_parameters_eager(parameters_, accumulators_, linears_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + else + load_tpu_embedding_ftrl_parameters_graph(parameters_, accumulators_, linears_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + end + end end @@ -39257,7 +46040,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function sparse_slice(indices_, values_, shape_, start_, size_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function sparse_slice_graph(indices_, values_, shape_, start_, size_; name=nothing) local desc tf.with_op_name(name, "SparseSlice") do desc = tf.NodeDescription("SparseSlice") @@ -39280,7 +46063,7 @@ begin end out end - function sparse_slice(indices_::tf.TensorHandle, values_::tf.TensorHandle, shape_::tf.TensorHandle, start_::tf.TensorHandle, size_::tf.TensorHandle; name=nothing) + function sparse_slice_eager(indices_, values_, shape_, start_, size_; name=nothing) desc = tf.EagerOp("SparseSlice") tf.add_input(desc, indices_) tf.add_input(desc, values_) @@ -39290,6 +46073,13 @@ begin desc["T"] = tf.data_type(values_) tf.execute(desc) end + function sparse_slice(indices_, values_, shape_, start_, size_; name=nothing) + if tf.eager_mode + sparse_slice_eager(indices_, values_, shape_, start_, size_; name=name) + else + sparse_slice_graph(indices_, values_, shape_, start_, size_; name=name) + end + end end @@ -39299,7 +46089,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function boosted_trees_make_quantile_summaries(float_values_, example_weights_, epsilon_; name=nothing, num_features=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function boosted_trees_make_quantile_summaries_graph(float_values_, example_weights_, epsilon_; name=nothing, num_features=nothing) local desc tf.with_op_name(name, "BoostedTreesMakeQuantileSummaries") do desc = tf.NodeDescription("BoostedTreesMakeQuantileSummaries") @@ -39320,7 +46110,7 @@ begin end out end - function boosted_trees_make_quantile_summaries(float_values_::tf.TensorHandle, example_weights_::tf.TensorHandle, epsilon_::tf.TensorHandle; name=nothing, num_features=nothing) + function boosted_trees_make_quantile_summaries_eager(float_values_, example_weights_, epsilon_; name=nothing, num_features=nothing) desc = tf.EagerOp("BoostedTreesMakeQuantileSummaries") tf.add_input(desc, float_values_) tf.add_input(desc, example_weights_) @@ -39330,6 +46120,13 @@ begin end tf.execute(desc) end + function boosted_trees_make_quantile_summaries(float_values_, example_weights_, epsilon_; name=nothing, num_features=nothing) + if tf.eager_mode + boosted_trees_make_quantile_summaries_eager(float_values_, example_weights_, epsilon_; name=name, num_features=num_features) + else + boosted_trees_make_quantile_summaries_graph(float_values_, example_weights_, epsilon_; name=name, num_features=num_features) + end + end end @@ -39339,7 +46136,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function matrix_solve(matrix_, rhs_; name=nothing, adjoint=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function matrix_solve_graph(matrix_, rhs_; name=nothing, adjoint=nothing) local desc tf.with_op_name(name, "MatrixSolve") do desc = tf.NodeDescription("MatrixSolve") @@ -39354,7 +46151,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function matrix_solve(matrix_::tf.TensorHandle, rhs_::tf.TensorHandle; name=nothing, adjoint=nothing) + function matrix_solve_eager(matrix_, rhs_; name=nothing, adjoint=nothing) desc = tf.EagerOp("MatrixSolve") tf.add_input(desc, matrix_) tf.add_input(desc, rhs_) @@ -39365,6 +46162,13 @@ begin desc["T"] = tf.data_type(rhs_) (tf.execute(desc))[1] end + function matrix_solve(matrix_, rhs_; name=nothing, adjoint=nothing) + if tf.eager_mode + matrix_solve_eager(matrix_, rhs_; name=name, adjoint=adjoint) + else + matrix_solve_graph(matrix_, rhs_; name=name, adjoint=adjoint) + end + end end @@ -39374,7 +46178,7 @@ end An op that sets up the centralized structures for a distributed TPU """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function _configure_distributed_tpu(inputs_; name=nothing, N=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function _configure_distributed_tpu_graph(inputs_; name=nothing, N=nothing) local desc tf.with_op_name(name, "_ConfigureDistributedTPU") do desc = tf.NodeDescription("_ConfigureDistributedTPU") @@ -39386,7 +46190,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function _configure_distributed_tpu(inputs_::tf.TensorHandle; name=nothing, N=nothing) + function _configure_distributed_tpu_eager(inputs_; name=nothing, N=nothing) desc = tf.EagerOp("_ConfigureDistributedTPU") tf.add_input(desc, inputs_) if N !== nothing @@ -39394,6 +46198,13 @@ begin end (tf.execute(desc))[1] end + function _configure_distributed_tpu(inputs_; name=nothing, N=nothing) + if tf.eager_mode + _configure_distributed_tpu_eager(inputs_; name=name, N=N) + else + _configure_distributed_tpu_graph(inputs_; name=name, N=N) + end + end end @@ -39403,7 +46214,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function adjust_contrastv2(images_, contrast_factor_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function adjust_contrastv2_graph(images_, contrast_factor_; name=nothing) local desc tf.with_op_name(name, "AdjustContrastv2") do desc = tf.NodeDescription("AdjustContrastv2") @@ -39415,13 +46226,20 @@ begin end tf.Tensor(tf.Operation(desc)) end - function adjust_contrastv2(images_::tf.TensorHandle, contrast_factor_::tf.TensorHandle; name=nothing) + function adjust_contrastv2_eager(images_, contrast_factor_; name=nothing) desc = tf.EagerOp("AdjustContrastv2") tf.add_input(desc, images_) tf.add_input(desc, contrast_factor_) desc["T"] = tf.data_type(images_) (tf.execute(desc))[1] end + function adjust_contrastv2(images_, contrast_factor_; name=nothing) + if tf.eager_mode + adjust_contrastv2_eager(images_, contrast_factor_; name=name) + else + adjust_contrastv2_graph(images_, contrast_factor_; name=name) + end + end end @@ -39431,7 +46249,7 @@ end Returns the max of x and y (i.e. x > y ? x : y) element-wise. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function _mkl_maximum(x_, y_, mkl_x_, mkl_y_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function _mkl_maximum_graph(x_, y_, mkl_x_, mkl_y_; name=nothing) local desc tf.with_op_name(name, "_MklMaximum") do desc = tf.NodeDescription("_MklMaximum") @@ -39452,7 +46270,7 @@ begin end out end - function _mkl_maximum(x_::tf.TensorHandle, y_::tf.TensorHandle, mkl_x_::tf.TensorHandle, mkl_y_::tf.TensorHandle; name=nothing) + function _mkl_maximum_eager(x_, y_, mkl_x_, mkl_y_; name=nothing) desc = tf.EagerOp("_MklMaximum") tf.add_input(desc, x_) tf.add_input(desc, y_) @@ -39462,16 +46280,23 @@ begin desc["T"] = tf.data_type(y_) tf.execute(desc) end + function _mkl_maximum(x_, y_, mkl_x_, mkl_y_; name=nothing) + if tf.eager_mode + _mkl_maximum_eager(x_, y_, mkl_x_, mkl_y_; name=name) + else + _mkl_maximum_graph(x_, y_, mkl_x_, mkl_y_; name=name) + end + end end """ - cudnn_rnn_params_size(num_layers, num_units, input_size; rnn_mode=lstm, input_mode=linear_input, direction=unidirectional, dropout=?, seed=0, seed2=0) + cudnn_rnn_params_size(num_layers, num_units, input_size; rnn_mode=, input_mode=, direction=, dropout=?, seed=0, seed2=0) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function cudnn_rnn_params_size(num_layers_, num_units_, input_size_; name=nothing, S=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function cudnn_rnn_params_size_graph(num_layers_, num_units_, input_size_; name=nothing, S=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) local desc tf.with_op_name(name, "CudnnRNNParamsSize") do desc = tf.NodeDescription("CudnnRNNParamsSize") @@ -39505,7 +46330,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function cudnn_rnn_params_size(num_layers_::tf.TensorHandle, num_units_::tf.TensorHandle, input_size_::tf.TensorHandle; name=nothing, S=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) + function cudnn_rnn_params_size_eager(num_layers_, num_units_, input_size_; name=nothing, S=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) desc = tf.EagerOp("CudnnRNNParamsSize") tf.add_input(desc, num_layers_) tf.add_input(desc, num_units_) @@ -39533,6 +46358,13 @@ begin end (tf.execute(desc))[1] end + function cudnn_rnn_params_size(num_layers_, num_units_, input_size_; name=nothing, S=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) + if tf.eager_mode + cudnn_rnn_params_size_eager(num_layers_, num_units_, input_size_; name=name, S=S, rnn_mode=rnn_mode, input_mode=input_mode, direction=direction, dropout=dropout, seed=seed, seed2=seed2) + else + cudnn_rnn_params_size_graph(num_layers_, num_units_, input_size_; name=name, S=S, rnn_mode=rnn_mode, input_mode=input_mode, direction=direction, dropout=dropout, seed=seed, seed2=seed2) + end + end end @@ -39542,7 +46374,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function boosted_trees_quantile_stream_resource_add_summaries(quantile_stream_resource_handle_, summaries_; name=nothing, num_features=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function boosted_trees_quantile_stream_resource_add_summaries_graph(quantile_stream_resource_handle_, summaries_; name=nothing, num_features=nothing) local desc tf.with_op_name(name, "BoostedTreesQuantileStreamResourceAddSummaries") do desc = tf.NodeDescription("BoostedTreesQuantileStreamResourceAddSummaries") @@ -39556,7 +46388,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function boosted_trees_quantile_stream_resource_add_summaries(quantile_stream_resource_handle_::tf.TensorHandle, summaries_::tf.TensorHandle; name=nothing, num_features=nothing) + function boosted_trees_quantile_stream_resource_add_summaries_eager(quantile_stream_resource_handle_, summaries_; name=nothing, num_features=nothing) desc = tf.EagerOp("BoostedTreesQuantileStreamResourceAddSummaries") tf.add_input(desc, quantile_stream_resource_handle_) tf.add_input(desc, summaries_) @@ -39565,6 +46397,13 @@ begin end (tf.execute(desc))[1] end + function boosted_trees_quantile_stream_resource_add_summaries(quantile_stream_resource_handle_, summaries_; name=nothing, num_features=nothing) + if tf.eager_mode + boosted_trees_quantile_stream_resource_add_summaries_eager(quantile_stream_resource_handle_, summaries_; name=name, num_features=num_features) + else + boosted_trees_quantile_stream_resource_add_summaries_graph(quantile_stream_resource_handle_, summaries_; name=name, num_features=num_features) + end + end end @@ -39574,7 +46413,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function batch_ifft3d(input_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function batch_ifft3d_graph(input_; name=nothing) local desc tf.with_op_name(name, "BatchIFFT3D") do desc = tf.NodeDescription("BatchIFFT3D") @@ -39583,11 +46422,18 @@ begin end tf.Tensor(tf.Operation(desc)) end - function batch_ifft3d(input_::tf.TensorHandle; name=nothing) + function batch_ifft3d_eager(input_; name=nothing) desc = tf.EagerOp("BatchIFFT3D") tf.add_input(desc, input_) (tf.execute(desc))[1] end + function batch_ifft3d(input_; name=nothing) + if tf.eager_mode + batch_ifft3d_eager(input_; name=name) + else + batch_ifft3d_graph(input_; name=name) + end + end end @@ -39597,7 +46443,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function sigmoid(x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function sigmoid_graph(x_; name=nothing) local desc tf.with_op_name(name, "Sigmoid") do desc = tf.NodeDescription("Sigmoid") @@ -39607,12 +46453,19 @@ begin end tf.Tensor(tf.Operation(desc)) end - function sigmoid(x_::tf.TensorHandle; name=nothing) + function sigmoid_eager(x_; name=nothing) desc = tf.EagerOp("Sigmoid") tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) (tf.execute(desc))[1] end + function sigmoid(x_; name=nothing) + if tf.eager_mode + sigmoid_eager(x_; name=name) + else + sigmoid_graph(x_; name=name) + end + end end @@ -39622,7 +46475,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function segment_mean(data_, segment_ids_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function segment_mean_graph(data_, segment_ids_; name=nothing) local desc tf.with_op_name(name, "SegmentMean") do desc = tf.NodeDescription("SegmentMean") @@ -39636,7 +46489,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function segment_mean(data_::tf.TensorHandle, segment_ids_::tf.TensorHandle; name=nothing) + function segment_mean_eager(data_, segment_ids_; name=nothing) desc = tf.EagerOp("SegmentMean") tf.add_input(desc, data_) tf.add_input(desc, segment_ids_) @@ -39644,6 +46497,13 @@ begin desc["Tindices"] = tf.data_type(segment_ids_) (tf.execute(desc))[1] end + function segment_mean(data_, segment_ids_; name=nothing) + if tf.eager_mode + segment_mean_eager(data_, segment_ids_; name=name) + else + segment_mean_graph(data_, segment_ids_; name=name) + end + end end @@ -39653,7 +46513,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function is_boosted_trees_ensemble_initialized(tree_ensemble_handle_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function is_boosted_trees_ensemble_initialized_graph(tree_ensemble_handle_; name=nothing) local desc tf.with_op_name(name, "IsBoostedTreesEnsembleInitialized") do desc = tf.NodeDescription("IsBoostedTreesEnsembleInitialized") @@ -39662,11 +46522,18 @@ begin end tf.Tensor(tf.Operation(desc)) end - function is_boosted_trees_ensemble_initialized(tree_ensemble_handle_::tf.TensorHandle; name=nothing) + function is_boosted_trees_ensemble_initialized_eager(tree_ensemble_handle_; name=nothing) desc = tf.EagerOp("IsBoostedTreesEnsembleInitialized") tf.add_input(desc, tree_ensemble_handle_) (tf.execute(desc))[1] end + function is_boosted_trees_ensemble_initialized(tree_ensemble_handle_; name=nothing) + if tf.eager_mode + is_boosted_trees_ensemble_initialized_eager(tree_ensemble_handle_; name=name) + else + is_boosted_trees_ensemble_initialized_graph(tree_ensemble_handle_; name=name) + end + end end @@ -39676,7 +46543,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tensor_array_size_v2(handle_, flow_in_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tensor_array_size_v2_graph(handle_, flow_in_; name=nothing) local desc tf.with_op_name(name, "TensorArraySizeV2") do desc = tf.NodeDescription("TensorArraySizeV2") @@ -39687,12 +46554,19 @@ begin end tf.Tensor(tf.Operation(desc)) end - function tensor_array_size_v2(handle_::tf.TensorHandle, flow_in_::tf.TensorHandle; name=nothing) + function tensor_array_size_v2_eager(handle_, flow_in_; name=nothing) desc = tf.EagerOp("TensorArraySizeV2") tf.add_input(desc, handle_) tf.add_input(desc, flow_in_) (tf.execute(desc))[1] end + function tensor_array_size_v2(handle_, flow_in_; name=nothing) + if tf.eager_mode + tensor_array_size_v2_eager(handle_, flow_in_; name=name) + else + tensor_array_size_v2_graph(handle_, flow_in_; name=name) + end + end end @@ -39702,7 +46576,7 @@ end Returns x - y element-wise. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function _mkl_sub(x_, y_, mkl_x_, mkl_y_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function _mkl_sub_graph(x_, y_, mkl_x_, mkl_y_; name=nothing) local desc tf.with_op_name(name, "_MklSub") do desc = tf.NodeDescription("_MklSub") @@ -39723,7 +46597,7 @@ begin end out end - function _mkl_sub(x_::tf.TensorHandle, y_::tf.TensorHandle, mkl_x_::tf.TensorHandle, mkl_y_::tf.TensorHandle; name=nothing) + function _mkl_sub_eager(x_, y_, mkl_x_, mkl_y_; name=nothing) desc = tf.EagerOp("_MklSub") tf.add_input(desc, x_) tf.add_input(desc, y_) @@ -39733,6 +46607,13 @@ begin desc["T"] = tf.data_type(y_) tf.execute(desc) end + function _mkl_sub(x_, y_, mkl_x_, mkl_y_; name=nothing) + if tf.eager_mode + _mkl_sub_eager(x_, y_, mkl_x_, mkl_y_; name=name) + else + _mkl_sub_graph(x_, y_, mkl_x_, mkl_y_; name=name) + end + end end @@ -39742,7 +46623,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function send_tpu_embedding_gradients(inputs_, learning_rates_; name=nothing, N=nothing, NN=nothing, config=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function send_tpu_embedding_gradients_graph(inputs_, learning_rates_; name=nothing, N=nothing, NN=nothing, config=nothing) local desc tf.with_op_name(name, "SendTPUEmbeddingGradients") do desc = tf.NodeDescription("SendTPUEmbeddingGradients") @@ -39762,7 +46643,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function send_tpu_embedding_gradients(inputs_::tf.TensorHandle, learning_rates_::tf.TensorHandle; name=nothing, N=nothing, NN=nothing, config=nothing) + function send_tpu_embedding_gradients_eager(inputs_, learning_rates_; name=nothing, N=nothing, NN=nothing, config=nothing) desc = tf.EagerOp("SendTPUEmbeddingGradients") tf.add_input(desc, inputs_) tf.add_input(desc, learning_rates_) @@ -39777,16 +46658,23 @@ begin end (tf.execute(desc))[1] end + function send_tpu_embedding_gradients(inputs_, learning_rates_; name=nothing, N=nothing, NN=nothing, config=nothing) + if tf.eager_mode + send_tpu_embedding_gradients_eager(inputs_, learning_rates_; name=name, N=N, NN=NN, config=config) + else + send_tpu_embedding_gradients_graph(inputs_, learning_rates_; name=name, N=N, NN=NN, config=config) + end + end end """ - max_pool3d(input; data_format=NDHWC) + max_pool3d(input; data_format=) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function max_pool3d(input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function max_pool3d_graph(input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) local desc tf.with_op_name(name, "MaxPool3D") do desc = tf.NodeDescription("MaxPool3D") @@ -39808,7 +46696,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function max_pool3d(input_::tf.TensorHandle; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + function max_pool3d_eager(input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) desc = tf.EagerOp("MaxPool3D") tf.add_input(desc, input_) if ksize !== nothing @@ -39826,6 +46714,13 @@ begin desc["T"] = tf.data_type(input_) (tf.execute(desc))[1] end + function max_pool3d(input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + if tf.eager_mode + max_pool3d_eager(input_; name=name, ksize=ksize, strides=strides, padding=padding, data_format=data_format) + else + max_pool3d_graph(input_; name=name, ksize=ksize, strides=strides, padding=padding, data_format=data_format) + end + end end @@ -39835,7 +46730,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function prod(input_, reduction_indices_; name=nothing, keep_dims=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function prod_graph(input_, reduction_indices_; name=nothing, keep_dims=nothing) local desc tf.with_op_name(name, "Prod") do desc = tf.NodeDescription("Prod") @@ -39852,7 +46747,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function prod(input_::tf.TensorHandle, reduction_indices_::tf.TensorHandle; name=nothing, keep_dims=nothing) + function prod_eager(input_, reduction_indices_; name=nothing, keep_dims=nothing) desc = tf.EagerOp("Prod") tf.add_input(desc, input_) tf.add_input(desc, reduction_indices_) @@ -39863,6 +46758,13 @@ begin desc["Tidx"] = tf.data_type(reduction_indices_) (tf.execute(desc))[1] end + function prod(input_, reduction_indices_; name=nothing, keep_dims=nothing) + if tf.eager_mode + prod_eager(input_, reduction_indices_; name=name, keep_dims=keep_dims) + else + prod_graph(input_, reduction_indices_; name=name, keep_dims=keep_dims) + end + end end @@ -39872,7 +46774,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function experimental_identity_indexed_dataset(size_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function experimental_identity_indexed_dataset_graph(size_; name=nothing) local desc tf.with_op_name(name, "ExperimentalIdentityIndexedDataset") do desc = tf.NodeDescription("ExperimentalIdentityIndexedDataset") @@ -39881,11 +46783,18 @@ begin end tf.Tensor(tf.Operation(desc)) end - function experimental_identity_indexed_dataset(size_::tf.TensorHandle; name=nothing) + function experimental_identity_indexed_dataset_eager(size_; name=nothing) desc = tf.EagerOp("ExperimentalIdentityIndexedDataset") tf.add_input(desc, size_) (tf.execute(desc))[1] end + function experimental_identity_indexed_dataset(size_; name=nothing) + if tf.eager_mode + experimental_identity_indexed_dataset_eager(size_; name=name) + else + experimental_identity_indexed_dataset_graph(size_; name=name) + end + end end @@ -39895,7 +46804,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tensor_list_push_back(input_handle_, tensor_; name=nothing, element_dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tensor_list_push_back_graph(input_handle_, tensor_; name=nothing, element_dtype=nothing) local desc tf.with_op_name(name, "TensorListPushBack") do desc = tf.NodeDescription("TensorListPushBack") @@ -39910,7 +46819,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function tensor_list_push_back(input_handle_::tf.TensorHandle, tensor_::tf.TensorHandle; name=nothing, element_dtype=nothing) + function tensor_list_push_back_eager(input_handle_, tensor_; name=nothing, element_dtype=nothing) desc = tf.EagerOp("TensorListPushBack") tf.add_input(desc, input_handle_) tf.add_input(desc, tensor_) @@ -39920,6 +46829,13 @@ begin desc["element_dtype"] = tf.data_type(tensor_) (tf.execute(desc))[1] end + function tensor_list_push_back(input_handle_, tensor_; name=nothing, element_dtype=nothing) + if tf.eager_mode + tensor_list_push_back_eager(input_handle_, tensor_; name=name, element_dtype=element_dtype) + else + tensor_list_push_back_graph(input_handle_, tensor_; name=name, element_dtype=element_dtype) + end + end end @@ -39929,7 +46845,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function batch_function(in_tensors_, captured_tensors_; name=nothing, f=nothing, num_batch_threads=nothing, max_batch_size=nothing, batch_timeout_micros=nothing, max_enqueued_batches=nothing, allowed_batch_sizes=nothing, container=nothing, shared_name=nothing, batching_queue=nothing, Tin=nothing, Tcaptured=nothing, Tout=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function batch_function_graph(in_tensors_, captured_tensors_; name=nothing, f=nothing, num_batch_threads=nothing, max_batch_size=nothing, batch_timeout_micros=nothing, max_enqueued_batches=nothing, allowed_batch_sizes=nothing, container=nothing, shared_name=nothing, batching_queue=nothing, Tin=nothing, Tcaptured=nothing, Tout=nothing) local desc tf.with_op_name(name, "BatchFunction") do desc = tf.NodeDescription("BatchFunction") @@ -39976,7 +46892,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function batch_function(in_tensors_::tf.TensorHandle, captured_tensors_::tf.TensorHandle; name=nothing, f=nothing, num_batch_threads=nothing, max_batch_size=nothing, batch_timeout_micros=nothing, max_enqueued_batches=nothing, allowed_batch_sizes=nothing, container=nothing, shared_name=nothing, batching_queue=nothing, Tin=nothing, Tcaptured=nothing, Tout=nothing) + function batch_function_eager(in_tensors_, captured_tensors_; name=nothing, f=nothing, num_batch_threads=nothing, max_batch_size=nothing, batch_timeout_micros=nothing, max_enqueued_batches=nothing, allowed_batch_sizes=nothing, container=nothing, shared_name=nothing, batching_queue=nothing, Tin=nothing, Tcaptured=nothing, Tout=nothing) desc = tf.EagerOp("BatchFunction") tf.add_input(desc, in_tensors_) tf.add_input(desc, captured_tensors_) @@ -40018,6 +46934,13 @@ begin end (tf.execute(desc))[1] end + function batch_function(in_tensors_, captured_tensors_; name=nothing, f=nothing, num_batch_threads=nothing, max_batch_size=nothing, batch_timeout_micros=nothing, max_enqueued_batches=nothing, allowed_batch_sizes=nothing, container=nothing, shared_name=nothing, batching_queue=nothing, Tin=nothing, Tcaptured=nothing, Tout=nothing) + if tf.eager_mode + batch_function_eager(in_tensors_, captured_tensors_; name=name, f=f, num_batch_threads=num_batch_threads, max_batch_size=max_batch_size, batch_timeout_micros=batch_timeout_micros, max_enqueued_batches=max_enqueued_batches, allowed_batch_sizes=allowed_batch_sizes, container=container, shared_name=shared_name, batching_queue=batching_queue, Tin=Tin, Tcaptured=Tcaptured, Tout=Tout) + else + batch_function_graph(in_tensors_, captured_tensors_; name=name, f=f, num_batch_threads=num_batch_threads, max_batch_size=max_batch_size, batch_timeout_micros=batch_timeout_micros, max_enqueued_batches=max_enqueued_batches, allowed_batch_sizes=allowed_batch_sizes, container=container, shared_name=shared_name, batching_queue=batching_queue, Tin=Tin, Tcaptured=Tcaptured, Tout=Tout) + end + end end @@ -40027,7 +46950,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function sparse_fill_empty_rows(indices_, values_, dense_shape_, default_value_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function sparse_fill_empty_rows_graph(indices_, values_, dense_shape_, default_value_; name=nothing) local desc tf.with_op_name(name, "SparseFillEmptyRows") do desc = tf.NodeDescription("SparseFillEmptyRows") @@ -40048,7 +46971,7 @@ begin end out end - function sparse_fill_empty_rows(indices_::tf.TensorHandle, values_::tf.TensorHandle, dense_shape_::tf.TensorHandle, default_value_::tf.TensorHandle; name=nothing) + function sparse_fill_empty_rows_eager(indices_, values_, dense_shape_, default_value_; name=nothing) desc = tf.EagerOp("SparseFillEmptyRows") tf.add_input(desc, indices_) tf.add_input(desc, values_) @@ -40058,6 +46981,13 @@ begin desc["T"] = tf.data_type(default_value_) tf.execute(desc) end + function sparse_fill_empty_rows(indices_, values_, dense_shape_, default_value_; name=nothing) + if tf.eager_mode + sparse_fill_empty_rows_eager(indices_, values_, dense_shape_, default_value_; name=name) + else + sparse_fill_empty_rows_graph(indices_, values_, dense_shape_, default_value_; name=name) + end + end end @@ -40067,7 +46997,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function self_adjoint_eig_v2(input_; name=nothing, compute_v=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function self_adjoint_eig_v2_graph(input_; name=nothing, compute_v=nothing) local desc tf.with_op_name(name, "SelfAdjointEigV2") do desc = tf.NodeDescription("SelfAdjointEigV2") @@ -40085,7 +47015,7 @@ begin end out end - function self_adjoint_eig_v2(input_::tf.TensorHandle; name=nothing, compute_v=nothing) + function self_adjoint_eig_v2_eager(input_; name=nothing, compute_v=nothing) desc = tf.EagerOp("SelfAdjointEigV2") tf.add_input(desc, input_) if compute_v !== nothing @@ -40094,6 +47024,13 @@ begin desc["T"] = tf.data_type(input_) tf.execute(desc) end + function self_adjoint_eig_v2(input_; name=nothing, compute_v=nothing) + if tf.eager_mode + self_adjoint_eig_v2_eager(input_; name=name, compute_v=compute_v) + else + self_adjoint_eig_v2_graph(input_; name=name, compute_v=compute_v) + end + end end @@ -40103,7 +47040,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function retrieve_tpu_embedding_ftrl_parameters(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function retrieve_tpu_embedding_ftrl_parameters_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "RetrieveTPUEmbeddingFTRLParameters") do desc = tf.NodeDescription("RetrieveTPUEmbeddingFTRLParameters") @@ -40127,7 +47064,7 @@ begin end out end - function retrieve_tpu_embedding_ftrl_parameters(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + function retrieve_tpu_embedding_ftrl_parameters_eager(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) desc = tf.EagerOp("RetrieveTPUEmbeddingFTRLParameters") if table_id !== nothing desc["table_id"] = Base.Int(table_id) @@ -40143,6 +47080,13 @@ begin end tf.execute(desc) end + function retrieve_tpu_embedding_ftrl_parameters(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + if tf.eager_mode + retrieve_tpu_embedding_ftrl_parameters_eager(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + else + retrieve_tpu_embedding_ftrl_parameters_graph(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + end + end end @@ -40152,7 +47096,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function resource_sparse_apply_adagrad_da(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, indices_, lr_, l1_, l2_, global_step_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function resource_sparse_apply_adagrad_da_graph(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, indices_, lr_, l1_, l2_, global_step_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ResourceSparseApplyAdagradDA") do desc = tf.NodeDescription("ResourceSparseApplyAdagradDA") @@ -40183,7 +47127,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function resource_sparse_apply_adagrad_da(var_::tf.TensorHandle, gradient_accumulator_::tf.TensorHandle, gradient_squared_accumulator_::tf.TensorHandle, grad_::tf.TensorHandle, indices_::tf.TensorHandle, lr_::tf.TensorHandle, l1_::tf.TensorHandle, l2_::tf.TensorHandle, global_step_::tf.TensorHandle; name=nothing, use_locking=nothing) + function resource_sparse_apply_adagrad_da_eager(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, indices_, lr_, l1_, l2_, global_step_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ResourceSparseApplyAdagradDA") tf.add_input(desc, var_) tf.add_input(desc, gradient_accumulator_) @@ -40204,6 +47148,13 @@ begin desc["T"] = tf.data_type(l2_) (tf.execute(desc))[1] end + function resource_sparse_apply_adagrad_da(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, indices_, lr_, l1_, l2_, global_step_; name=nothing, use_locking=nothing) + if tf.eager_mode + resource_sparse_apply_adagrad_da_eager(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, indices_, lr_, l1_, l2_, global_step_; name=name, use_locking=use_locking) + else + resource_sparse_apply_adagrad_da_graph(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, indices_, lr_, l1_, l2_, global_step_; name=name, use_locking=use_locking) + end + end end @@ -40213,7 +47164,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function temporary_variable(; name=nothing, shape=nothing, dtype=nothing, var_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function temporary_variable_graph(; name=nothing, shape=nothing, dtype=nothing, var_name=nothing) local desc tf.with_op_name(name, "TemporaryVariable") do desc = tf.NodeDescription("TemporaryVariable") @@ -40229,7 +47180,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function temporary_variable(; name=nothing, shape=nothing, dtype=nothing, var_name=nothing) + function temporary_variable_eager(; name=nothing, shape=nothing, dtype=nothing, var_name=nothing) desc = tf.EagerOp("TemporaryVariable") if shape !== nothing desc["shape"] = Base.identity(shape) @@ -40242,6 +47193,13 @@ begin end (tf.execute(desc))[1] end + function temporary_variable(; name=nothing, shape=nothing, dtype=nothing, var_name=nothing) + if tf.eager_mode + temporary_variable_eager(; name=name, shape=shape, dtype=dtype, var_name=var_name) + else + temporary_variable_graph(; name=name, shape=shape, dtype=dtype, var_name=var_name) + end + end end @@ -40251,7 +47209,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function resource_apply_add_sign(var_, m_, lr_, alpha_, sign_decay_, beta_, grad_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function resource_apply_add_sign_graph(var_, m_, lr_, alpha_, sign_decay_, beta_, grad_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ResourceApplyAddSign") do desc = tf.NodeDescription("ResourceApplyAddSign") @@ -40276,7 +47234,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function resource_apply_add_sign(var_::tf.TensorHandle, m_::tf.TensorHandle, lr_::tf.TensorHandle, alpha_::tf.TensorHandle, sign_decay_::tf.TensorHandle, beta_::tf.TensorHandle, grad_::tf.TensorHandle; name=nothing, use_locking=nothing) + function resource_apply_add_sign_eager(var_, m_, lr_, alpha_, sign_decay_, beta_, grad_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ResourceApplyAddSign") tf.add_input(desc, var_) tf.add_input(desc, m_) @@ -40295,6 +47253,13 @@ begin desc["T"] = tf.data_type(grad_) (tf.execute(desc))[1] end + function resource_apply_add_sign(var_, m_, lr_, alpha_, sign_decay_, beta_, grad_; name=nothing, use_locking=nothing) + if tf.eager_mode + resource_apply_add_sign_eager(var_, m_, lr_, alpha_, sign_decay_, beta_, grad_; name=name, use_locking=use_locking) + else + resource_apply_add_sign_graph(var_, m_, lr_, alpha_, sign_decay_, beta_, grad_; name=name, use_locking=use_locking) + end + end end @@ -40304,7 +47269,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function roll(input_, shift_, axis_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function roll_graph(input_, shift_, axis_; name=nothing) local desc tf.with_op_name(name, "Roll") do desc = tf.NodeDescription("Roll") @@ -40320,7 +47285,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function roll(input_::tf.TensorHandle, shift_::tf.TensorHandle, axis_::tf.TensorHandle; name=nothing) + function roll_eager(input_, shift_, axis_; name=nothing) desc = tf.EagerOp("Roll") tf.add_input(desc, input_) tf.add_input(desc, shift_) @@ -40330,6 +47295,13 @@ begin desc["Taxis"] = tf.data_type(axis_) (tf.execute(desc))[1] end + function roll(input_, shift_, axis_; name=nothing) + if tf.eager_mode + roll_eager(input_, shift_, axis_; name=name) + else + roll_graph(input_, shift_, axis_; name=name) + end + end end @@ -40339,7 +47311,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function xdivy(x_, y_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function xdivy_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "Xdivy") do desc = tf.NodeDescription("Xdivy") @@ -40351,7 +47323,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function xdivy(x_::tf.TensorHandle, y_::tf.TensorHandle; name=nothing) + function xdivy_eager(x_, y_; name=nothing) desc = tf.EagerOp("Xdivy") tf.add_input(desc, x_) tf.add_input(desc, y_) @@ -40359,16 +47331,23 @@ begin desc["T"] = tf.data_type(y_) (tf.execute(desc))[1] end + function xdivy(x_, y_; name=nothing) + if tf.eager_mode + xdivy_eager(x_, y_; name=name) + else + xdivy_graph(x_, y_; name=name) + end + end end """ - max_pool3d_grad_grad(orig_input, orig_output, grad; data_format=NDHWC) + max_pool3d_grad_grad(orig_input, orig_output, grad; data_format=) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function max_pool3d_grad_grad(orig_input_, orig_output_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function max_pool3d_grad_grad_graph(orig_input_, orig_output_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) local desc tf.with_op_name(name, "MaxPool3DGradGrad") do desc = tf.NodeDescription("MaxPool3DGradGrad") @@ -40394,7 +47373,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function max_pool3d_grad_grad(orig_input_::tf.TensorHandle, orig_output_::tf.TensorHandle, grad_::tf.TensorHandle; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + function max_pool3d_grad_grad_eager(orig_input_, orig_output_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) desc = tf.EagerOp("MaxPool3DGradGrad") tf.add_input(desc, orig_input_) tf.add_input(desc, orig_output_) @@ -40416,16 +47395,23 @@ begin desc["T"] = tf.data_type(grad_) (tf.execute(desc))[1] end + function max_pool3d_grad_grad(orig_input_, orig_output_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + if tf.eager_mode + max_pool3d_grad_grad_eager(orig_input_, orig_output_, grad_; name=name, ksize=ksize, strides=strides, padding=padding, data_format=data_format) + else + max_pool3d_grad_grad_graph(orig_input_, orig_output_, grad_; name=name, ksize=ksize, strides=strides, padding=padding, data_format=data_format) + end + end end """ - crop_and_resize(image, boxes, box_ind, crop_size; method=bilinear, extrapolation_value=?) + crop_and_resize(image, boxes, box_ind, crop_size; method=, extrapolation_value=?) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function crop_and_resize(image_, boxes_, box_ind_, crop_size_; name=nothing, method=nothing, extrapolation_value=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function crop_and_resize_graph(image_, boxes_, box_ind_, crop_size_; name=nothing, method=nothing, extrapolation_value=nothing) local desc tf.with_op_name(name, "CropAndResize") do desc = tf.NodeDescription("CropAndResize") @@ -40447,7 +47433,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function crop_and_resize(image_::tf.TensorHandle, boxes_::tf.TensorHandle, box_ind_::tf.TensorHandle, crop_size_::tf.TensorHandle; name=nothing, method=nothing, extrapolation_value=nothing) + function crop_and_resize_eager(image_, boxes_, box_ind_, crop_size_; name=nothing, method=nothing, extrapolation_value=nothing) desc = tf.EagerOp("CropAndResize") tf.add_input(desc, image_) tf.add_input(desc, boxes_) @@ -40462,6 +47448,13 @@ begin desc["T"] = tf.data_type(image_) (tf.execute(desc))[1] end + function crop_and_resize(image_, boxes_, box_ind_, crop_size_; name=nothing, method=nothing, extrapolation_value=nothing) + if tf.eager_mode + crop_and_resize_eager(image_, boxes_, box_ind_, crop_size_; name=name, method=method, extrapolation_value=extrapolation_value) + else + crop_and_resize_graph(image_, boxes_, box_ind_, crop_size_; name=name, method=method, extrapolation_value=extrapolation_value) + end + end end @@ -40471,7 +47464,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function quantized_bias_add(input_, bias_, min_input_, max_input_, min_bias_, max_bias_; name=nothing, out_type=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function quantized_bias_add_graph(input_, bias_, min_input_, max_input_, min_bias_, max_bias_; name=nothing, out_type=nothing) local desc tf.with_op_name(name, "QuantizedBiasAdd") do desc = tf.NodeDescription("QuantizedBiasAdd") @@ -40500,7 +47493,7 @@ begin end out end - function quantized_bias_add(input_::tf.TensorHandle, bias_::tf.TensorHandle, min_input_::tf.TensorHandle, max_input_::tf.TensorHandle, min_bias_::tf.TensorHandle, max_bias_::tf.TensorHandle; name=nothing, out_type=nothing) + function quantized_bias_add_eager(input_, bias_, min_input_, max_input_, min_bias_, max_bias_; name=nothing, out_type=nothing) desc = tf.EagerOp("QuantizedBiasAdd") tf.add_input(desc, input_) tf.add_input(desc, bias_) @@ -40515,6 +47508,13 @@ begin desc["T2"] = tf.data_type(bias_) tf.execute(desc) end + function quantized_bias_add(input_, bias_, min_input_, max_input_, min_bias_, max_bias_; name=nothing, out_type=nothing) + if tf.eager_mode + quantized_bias_add_eager(input_, bias_, min_input_, max_input_, min_bias_, max_bias_; name=name, out_type=out_type) + else + quantized_bias_add_graph(input_, bias_, min_input_, max_input_, min_bias_, max_bias_; name=name, out_type=out_type) + end + end end @@ -40524,7 +47524,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function kmc2chain_initialization(distances_, seed_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function kmc2chain_initialization_graph(distances_, seed_; name=nothing) local desc tf.with_op_name(name, "KMC2ChainInitialization") do desc = tf.NodeDescription("KMC2ChainInitialization") @@ -40535,12 +47535,19 @@ begin end tf.Tensor(tf.Operation(desc)) end - function kmc2chain_initialization(distances_::tf.TensorHandle, seed_::tf.TensorHandle; name=nothing) + function kmc2chain_initialization_eager(distances_, seed_; name=nothing) desc = tf.EagerOp("KMC2ChainInitialization") tf.add_input(desc, distances_) tf.add_input(desc, seed_) (tf.execute(desc))[1] end + function kmc2chain_initialization(distances_, seed_; name=nothing) + if tf.eager_mode + kmc2chain_initialization_eager(distances_, seed_; name=name) + else + kmc2chain_initialization_graph(distances_, seed_; name=name) + end + end end @@ -40550,7 +47557,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function map_unstage_no_key(indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function map_unstage_no_key_graph(indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "MapUnstageNoKey") do desc = tf.NodeDescription("MapUnstageNoKey") @@ -40579,7 +47586,7 @@ begin end out end - function map_unstage_no_key(indices_::tf.TensorHandle; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + function map_unstage_no_key_eager(indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) desc = tf.EagerOp("MapUnstageNoKey") tf.add_input(desc, indices_) if capacity !== nothing @@ -40599,6 +47606,13 @@ begin end tf.execute(desc) end + function map_unstage_no_key(indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + if tf.eager_mode + map_unstage_no_key_eager(indices_; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) + else + map_unstage_no_key_graph(indices_; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) + end + end end @@ -40608,7 +47622,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function scatter_nd_sub(ref_, indices_, updates_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function scatter_nd_sub_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ScatterNdSub") do desc = tf.NodeDescription("ScatterNdSub") @@ -40627,7 +47641,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function scatter_nd_sub(ref_::tf.TensorHandle, indices_::tf.TensorHandle, updates_::tf.TensorHandle; name=nothing, use_locking=nothing) + function scatter_nd_sub_eager(ref_, indices_, updates_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ScatterNdSub") tf.add_input(desc, ref_) tf.add_input(desc, indices_) @@ -40640,6 +47654,13 @@ begin desc["T"] = tf.data_type(updates_) (tf.execute(desc))[1] end + function scatter_nd_sub(ref_, indices_, updates_; name=nothing, use_locking=nothing) + if tf.eager_mode + scatter_nd_sub_eager(ref_, indices_, updates_; name=name, use_locking=use_locking) + else + scatter_nd_sub_graph(ref_, indices_, updates_; name=name, use_locking=use_locking) + end + end end @@ -40649,7 +47670,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function resize_bilinear(images_, size_; name=nothing, align_corners=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function resize_bilinear_graph(images_, size_; name=nothing, align_corners=nothing) local desc tf.with_op_name(name, "ResizeBilinear") do desc = tf.NodeDescription("ResizeBilinear") @@ -40664,7 +47685,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function resize_bilinear(images_::tf.TensorHandle, size_::tf.TensorHandle; name=nothing, align_corners=nothing) + function resize_bilinear_eager(images_, size_; name=nothing, align_corners=nothing) desc = tf.EagerOp("ResizeBilinear") tf.add_input(desc, images_) tf.add_input(desc, size_) @@ -40674,6 +47695,13 @@ begin desc["T"] = tf.data_type(images_) (tf.execute(desc))[1] end + function resize_bilinear(images_, size_; name=nothing, align_corners=nothing) + if tf.eager_mode + resize_bilinear_eager(images_, size_; name=name, align_corners=align_corners) + else + resize_bilinear_graph(images_, size_; name=name, align_corners=align_corners) + end + end end @@ -40683,7 +47711,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function ordered_map_peek(key_, indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function ordered_map_peek_graph(key_, indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "OrderedMapPeek") do desc = tf.NodeDescription("OrderedMapPeek") @@ -40709,7 +47737,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function ordered_map_peek(key_::tf.TensorHandle, indices_::tf.TensorHandle; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + function ordered_map_peek_eager(key_, indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) desc = tf.EagerOp("OrderedMapPeek") tf.add_input(desc, key_) tf.add_input(desc, indices_) @@ -40730,6 +47758,13 @@ begin end (tf.execute(desc))[1] end + function ordered_map_peek(key_, indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + if tf.eager_mode + ordered_map_peek_eager(key_, indices_; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) + else + ordered_map_peek_graph(key_, indices_; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) + end + end end @@ -40739,7 +47774,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tensor_array(size_; name=nothing, dtype=nothing, dynamic_size=nothing, clear_after_read=nothing, tensor_array_name=nothing, element_shape=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tensor_array_graph(size_; name=nothing, dtype=nothing, dynamic_size=nothing, clear_after_read=nothing, tensor_array_name=nothing, element_shape=nothing) local desc tf.with_op_name(name, "TensorArray") do desc = tf.NodeDescription("TensorArray") @@ -40763,7 +47798,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function tensor_array(size_::tf.TensorHandle; name=nothing, dtype=nothing, dynamic_size=nothing, clear_after_read=nothing, tensor_array_name=nothing, element_shape=nothing) + function tensor_array_eager(size_; name=nothing, dtype=nothing, dynamic_size=nothing, clear_after_read=nothing, tensor_array_name=nothing, element_shape=nothing) desc = tf.EagerOp("TensorArray") tf.add_input(desc, size_) if dtype !== nothing @@ -40783,6 +47818,13 @@ begin end (tf.execute(desc))[1] end + function tensor_array(size_; name=nothing, dtype=nothing, dynamic_size=nothing, clear_after_read=nothing, tensor_array_name=nothing, element_shape=nothing) + if tf.eager_mode + tensor_array_eager(size_; name=name, dtype=dtype, dynamic_size=dynamic_size, clear_after_read=clear_after_read, tensor_array_name=tensor_array_name, element_shape=element_shape) + else + tensor_array_graph(size_; name=name, dtype=dtype, dynamic_size=dynamic_size, clear_after_read=clear_after_read, tensor_array_name=tensor_array_name, element_shape=element_shape) + end + end end @@ -40792,7 +47834,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function inplace_sub(x_, i_, v_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function inplace_sub_graph(x_, i_, v_; name=nothing) local desc tf.with_op_name(name, "InplaceSub") do desc = tf.NodeDescription("InplaceSub") @@ -40806,7 +47848,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function inplace_sub(x_::tf.TensorHandle, i_::tf.TensorHandle, v_::tf.TensorHandle; name=nothing) + function inplace_sub_eager(x_, i_, v_; name=nothing) desc = tf.EagerOp("InplaceSub") tf.add_input(desc, x_) tf.add_input(desc, i_) @@ -40815,6 +47857,13 @@ begin desc["T"] = tf.data_type(v_) (tf.execute(desc))[1] end + function inplace_sub(x_, i_, v_; name=nothing) + if tf.eager_mode + inplace_sub_eager(x_, i_, v_; name=name) + else + inplace_sub_graph(x_, i_, v_; name=name) + end + end end @@ -40824,7 +47873,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function pow(x_, y_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function pow_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "Pow") do desc = tf.NodeDescription("Pow") @@ -40836,7 +47885,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function pow(x_::tf.TensorHandle, y_::tf.TensorHandle; name=nothing) + function pow_eager(x_, y_; name=nothing) desc = tf.EagerOp("Pow") tf.add_input(desc, x_) tf.add_input(desc, y_) @@ -40844,6 +47893,13 @@ begin desc["T"] = tf.data_type(y_) (tf.execute(desc))[1] end + function pow(x_, y_; name=nothing) + if tf.eager_mode + pow_eager(x_, y_; name=name) + else + pow_graph(x_, y_; name=name) + end + end end @@ -40853,7 +47909,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function stateful_standard_normal(resource_, shape_; name=nothing, dtype=nothing, shape_dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function stateful_standard_normal_graph(resource_, shape_; name=nothing, dtype=nothing, shape_dtype=nothing) local desc tf.with_op_name(name, "StatefulStandardNormal") do desc = tf.NodeDescription("StatefulStandardNormal") @@ -40871,7 +47927,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function stateful_standard_normal(resource_::tf.TensorHandle, shape_::tf.TensorHandle; name=nothing, dtype=nothing, shape_dtype=nothing) + function stateful_standard_normal_eager(resource_, shape_; name=nothing, dtype=nothing, shape_dtype=nothing) desc = tf.EagerOp("StatefulStandardNormal") tf.add_input(desc, resource_) tf.add_input(desc, shape_) @@ -40884,6 +47940,13 @@ begin desc["shape_dtype"] = tf.data_type(shape_) (tf.execute(desc))[1] end + function stateful_standard_normal(resource_, shape_; name=nothing, dtype=nothing, shape_dtype=nothing) + if tf.eager_mode + stateful_standard_normal_eager(resource_, shape_; name=name, dtype=dtype, shape_dtype=shape_dtype) + else + stateful_standard_normal_graph(resource_, shape_; name=name, dtype=dtype, shape_dtype=shape_dtype) + end + end end @@ -40893,7 +47956,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function ref_next_iteration(data_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function ref_next_iteration_graph(data_; name=nothing) local desc tf.with_op_name(name, "RefNextIteration") do desc = tf.NodeDescription("RefNextIteration") @@ -40903,12 +47966,19 @@ begin end tf.Tensor(tf.Operation(desc)) end - function ref_next_iteration(data_::tf.TensorHandle; name=nothing) + function ref_next_iteration_eager(data_; name=nothing) desc = tf.EagerOp("RefNextIteration") tf.add_input(desc, data_) desc["T"] = tf.data_type(data_) (tf.execute(desc))[1] end + function ref_next_iteration(data_; name=nothing) + if tf.eager_mode + ref_next_iteration_eager(data_; name=name) + else + ref_next_iteration_graph(data_; name=name) + end + end end @@ -40918,7 +47988,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function scalar_summary(tags_, values_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function scalar_summary_graph(tags_, values_; name=nothing) local desc tf.with_op_name(name, "ScalarSummary") do desc = tf.NodeDescription("ScalarSummary") @@ -40930,13 +48000,20 @@ begin end tf.Tensor(tf.Operation(desc)) end - function scalar_summary(tags_::tf.TensorHandle, values_::tf.TensorHandle; name=nothing) + function scalar_summary_eager(tags_, values_; name=nothing) desc = tf.EagerOp("ScalarSummary") tf.add_input(desc, tags_) tf.add_input(desc, values_) desc["T"] = tf.data_type(values_) (tf.execute(desc))[1] end + function scalar_summary(tags_, values_; name=nothing) + if tf.eager_mode + scalar_summary_eager(tags_, values_; name=name) + else + scalar_summary_graph(tags_, values_; name=name) + end + end end @@ -40946,7 +48023,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function string_split_v2(input_, sep_; name=nothing, maxsplit=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function string_split_v2_graph(input_, sep_; name=nothing, maxsplit=nothing) local desc tf.with_op_name(name, "StringSplitV2") do desc = tf.NodeDescription("StringSplitV2") @@ -40965,7 +48042,7 @@ begin end out end - function string_split_v2(input_::tf.TensorHandle, sep_::tf.TensorHandle; name=nothing, maxsplit=nothing) + function string_split_v2_eager(input_, sep_; name=nothing, maxsplit=nothing) desc = tf.EagerOp("StringSplitV2") tf.add_input(desc, input_) tf.add_input(desc, sep_) @@ -40974,6 +48051,13 @@ begin end tf.execute(desc) end + function string_split_v2(input_, sep_; name=nothing, maxsplit=nothing) + if tf.eager_mode + string_split_v2_eager(input_, sep_; name=name, maxsplit=maxsplit) + else + string_split_v2_graph(input_, sep_; name=name, maxsplit=maxsplit) + end + end end @@ -40983,7 +48067,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function bessel_i0e(x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function bessel_i0e_graph(x_; name=nothing) local desc tf.with_op_name(name, "BesselI0e") do desc = tf.NodeDescription("BesselI0e") @@ -40993,12 +48077,19 @@ begin end tf.Tensor(tf.Operation(desc)) end - function bessel_i0e(x_::tf.TensorHandle; name=nothing) + function bessel_i0e_eager(x_; name=nothing) desc = tf.EagerOp("BesselI0e") tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) (tf.execute(desc))[1] end + function bessel_i0e(x_; name=nothing) + if tf.eager_mode + bessel_i0e_eager(x_; name=name) + else + bessel_i0e_graph(x_; name=name) + end + end end @@ -41008,7 +48099,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function unique(x_; name=nothing, out_idx=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function unique_graph(x_; name=nothing, out_idx=nothing) local desc tf.with_op_name(name, "Unique") do desc = tf.NodeDescription("Unique") @@ -41026,7 +48117,7 @@ begin end out end - function unique(x_::tf.TensorHandle; name=nothing, out_idx=nothing) + function unique_eager(x_; name=nothing, out_idx=nothing) desc = tf.EagerOp("Unique") tf.add_input(desc, x_) if out_idx !== nothing @@ -41035,6 +48126,13 @@ begin desc["T"] = tf.data_type(x_) tf.execute(desc) end + function unique(x_; name=nothing, out_idx=nothing) + if tf.eager_mode + unique_eager(x_; name=name, out_idx=out_idx) + else + unique_graph(x_; name=name, out_idx=out_idx) + end + end end @@ -41044,7 +48142,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function load_tpu_embedding_rms_prop_parameters(parameters_, ms_, mom_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function load_tpu_embedding_rms_prop_parameters_graph(parameters_, ms_, mom_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "LoadTPUEmbeddingRMSPropParameters") do desc = tf.NodeDescription("LoadTPUEmbeddingRMSPropParameters") @@ -41069,7 +48167,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function load_tpu_embedding_rms_prop_parameters(parameters_::tf.TensorHandle, ms_::tf.TensorHandle, mom_::tf.TensorHandle; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + function load_tpu_embedding_rms_prop_parameters_eager(parameters_, ms_, mom_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) desc = tf.EagerOp("LoadTPUEmbeddingRMSPropParameters") tf.add_input(desc, parameters_) tf.add_input(desc, ms_) @@ -41088,6 +48186,13 @@ begin end (tf.execute(desc))[1] end + function load_tpu_embedding_rms_prop_parameters(parameters_, ms_, mom_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + if tf.eager_mode + load_tpu_embedding_rms_prop_parameters_eager(parameters_, ms_, mom_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + else + load_tpu_embedding_rms_prop_parameters_graph(parameters_, ms_, mom_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + end + end end @@ -41097,7 +48202,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function whole_file_reader_v2(; name=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function whole_file_reader_v2_graph(; name=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "WholeFileReaderV2") do desc = tf.NodeDescription("WholeFileReaderV2") @@ -41110,7 +48215,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function whole_file_reader_v2(; name=nothing, container=nothing, shared_name=nothing) + function whole_file_reader_v2_eager(; name=nothing, container=nothing, shared_name=nothing) desc = tf.EagerOp("WholeFileReaderV2") if container !== nothing desc["container"] = Base.String(container) @@ -41120,6 +48225,13 @@ begin end (tf.execute(desc))[1] end + function whole_file_reader_v2(; name=nothing, container=nothing, shared_name=nothing) + if tf.eager_mode + whole_file_reader_v2_eager(; name=name, container=container, shared_name=shared_name) + else + whole_file_reader_v2_graph(; name=name, container=container, shared_name=shared_name) + end + end end @@ -41129,7 +48241,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function eager_py_func(input_; name=nothing, token=nothing, Tin=nothing, Tout=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function eager_py_func_graph(input_; name=nothing, token=nothing, Tin=nothing, Tout=nothing) local desc tf.with_op_name(name, "EagerPyFunc") do desc = tf.NodeDescription("EagerPyFunc") @@ -41147,7 +48259,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function eager_py_func(input_::tf.TensorHandle; name=nothing, token=nothing, Tin=nothing, Tout=nothing) + function eager_py_func_eager(input_; name=nothing, token=nothing, Tin=nothing, Tout=nothing) desc = tf.EagerOp("EagerPyFunc") tf.add_input(desc, input_) if token !== nothing @@ -41161,6 +48273,13 @@ begin end (tf.execute(desc))[1] end + function eager_py_func(input_; name=nothing, token=nothing, Tin=nothing, Tout=nothing) + if tf.eager_mode + eager_py_func_eager(input_; name=name, token=token, Tin=Tin, Tout=Tout) + else + eager_py_func_graph(input_; name=name, token=token, Tin=Tin, Tout=Tout) + end + end end @@ -41170,7 +48289,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function next_iteration(data_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function next_iteration_graph(data_; name=nothing) local desc tf.with_op_name(name, "NextIteration") do desc = tf.NodeDescription("NextIteration") @@ -41180,12 +48299,19 @@ begin end tf.Tensor(tf.Operation(desc)) end - function next_iteration(data_::tf.TensorHandle; name=nothing) + function next_iteration_eager(data_; name=nothing) desc = tf.EagerOp("NextIteration") tf.add_input(desc, data_) desc["T"] = tf.data_type(data_) (tf.execute(desc))[1] end + function next_iteration(data_; name=nothing) + if tf.eager_mode + next_iteration_eager(data_; name=name) + else + next_iteration_graph(data_; name=name) + end + end end @@ -41195,7 +48321,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function case(branch_index_, input_; name=nothing, Tin=nothing, Tout=nothing, branches=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function case_graph(branch_index_, input_; name=nothing, Tin=nothing, Tout=nothing, branches=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "Case") do desc = tf.NodeDescription("Case") @@ -41218,7 +48344,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function case(branch_index_::tf.TensorHandle, input_::tf.TensorHandle; name=nothing, Tin=nothing, Tout=nothing, branches=nothing, output_shapes=nothing) + function case_eager(branch_index_, input_; name=nothing, Tin=nothing, Tout=nothing, branches=nothing, output_shapes=nothing) desc = tf.EagerOp("Case") tf.add_input(desc, branch_index_) tf.add_input(desc, input_) @@ -41236,6 +48362,13 @@ begin end (tf.execute(desc))[1] end + function case(branch_index_, input_; name=nothing, Tin=nothing, Tout=nothing, branches=nothing, output_shapes=nothing) + if tf.eager_mode + case_eager(branch_index_, input_; name=name, Tin=Tin, Tout=Tout, branches=branches, output_shapes=output_shapes) + else + case_graph(branch_index_, input_; name=name, Tin=Tin, Tout=Tout, branches=branches, output_shapes=output_shapes) + end + end end @@ -41245,7 +48378,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tensor_scatter_sub(tensor_, indices_, updates_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tensor_scatter_sub_graph(tensor_, indices_, updates_; name=nothing) local desc tf.with_op_name(name, "TensorScatterSub") do desc = tf.NodeDescription("TensorScatterSub") @@ -41261,7 +48394,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function tensor_scatter_sub(tensor_::tf.TensorHandle, indices_::tf.TensorHandle, updates_::tf.TensorHandle; name=nothing) + function tensor_scatter_sub_eager(tensor_, indices_, updates_; name=nothing) desc = tf.EagerOp("TensorScatterSub") tf.add_input(desc, tensor_) tf.add_input(desc, indices_) @@ -41271,6 +48404,13 @@ begin desc["T"] = tf.data_type(updates_) (tf.execute(desc))[1] end + function tensor_scatter_sub(tensor_, indices_, updates_; name=nothing) + if tf.eager_mode + tensor_scatter_sub_eager(tensor_, indices_, updates_; name=name) + else + tensor_scatter_sub_graph(tensor_, indices_, updates_; name=name) + end + end end @@ -41280,7 +48420,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function scatter_max(ref_, indices_, updates_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function scatter_max_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ScatterMax") do desc = tf.NodeDescription("ScatterMax") @@ -41299,7 +48439,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function scatter_max(ref_::tf.TensorHandle, indices_::tf.TensorHandle, updates_::tf.TensorHandle; name=nothing, use_locking=nothing) + function scatter_max_eager(ref_, indices_, updates_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ScatterMax") tf.add_input(desc, ref_) tf.add_input(desc, indices_) @@ -41312,6 +48452,13 @@ begin desc["T"] = tf.data_type(updates_) (tf.execute(desc))[1] end + function scatter_max(ref_, indices_, updates_; name=nothing, use_locking=nothing) + if tf.eager_mode + scatter_max_eager(ref_, indices_, updates_; name=name, use_locking=use_locking) + else + scatter_max_graph(ref_, indices_, updates_; name=name, use_locking=use_locking) + end + end end @@ -41321,7 +48468,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function sqrt(x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function sqrt_graph(x_; name=nothing) local desc tf.with_op_name(name, "Sqrt") do desc = tf.NodeDescription("Sqrt") @@ -41331,12 +48478,19 @@ begin end tf.Tensor(tf.Operation(desc)) end - function sqrt(x_::tf.TensorHandle; name=nothing) + function sqrt_eager(x_; name=nothing) desc = tf.EagerOp("Sqrt") tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) (tf.execute(desc))[1] end + function sqrt(x_; name=nothing) + if tf.eager_mode + sqrt_eager(x_; name=name) + else + sqrt_graph(x_; name=name) + end + end end @@ -41346,7 +48500,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function accumulator_take_gradient(handle_, num_required_; name=nothing, dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function accumulator_take_gradient_graph(handle_, num_required_; name=nothing, dtype=nothing) local desc tf.with_op_name(name, "AccumulatorTakeGradient") do desc = tf.NodeDescription("AccumulatorTakeGradient") @@ -41360,7 +48514,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function accumulator_take_gradient(handle_::tf.TensorHandle, num_required_::tf.TensorHandle; name=nothing, dtype=nothing) + function accumulator_take_gradient_eager(handle_, num_required_; name=nothing, dtype=nothing) desc = tf.EagerOp("AccumulatorTakeGradient") tf.add_input(desc, handle_) tf.add_input(desc, num_required_) @@ -41369,6 +48523,13 @@ begin end (tf.execute(desc))[1] end + function accumulator_take_gradient(handle_, num_required_; name=nothing, dtype=nothing) + if tf.eager_mode + accumulator_take_gradient_eager(handle_, num_required_; name=name, dtype=dtype) + else + accumulator_take_gradient_graph(handle_, num_required_; name=name, dtype=dtype) + end + end end @@ -41378,7 +48539,7 @@ end Returns x + y element-wise. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function _mkl_add(x_, y_, mkl_x_, mkl_y_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function _mkl_add_graph(x_, y_, mkl_x_, mkl_y_; name=nothing) local desc tf.with_op_name(name, "_MklAdd") do desc = tf.NodeDescription("_MklAdd") @@ -41399,7 +48560,7 @@ begin end out end - function _mkl_add(x_::tf.TensorHandle, y_::tf.TensorHandle, mkl_x_::tf.TensorHandle, mkl_y_::tf.TensorHandle; name=nothing) + function _mkl_add_eager(x_, y_, mkl_x_, mkl_y_; name=nothing) desc = tf.EagerOp("_MklAdd") tf.add_input(desc, x_) tf.add_input(desc, y_) @@ -41409,6 +48570,13 @@ begin desc["T"] = tf.data_type(y_) tf.execute(desc) end + function _mkl_add(x_, y_, mkl_x_, mkl_y_; name=nothing) + if tf.eager_mode + _mkl_add_eager(x_, y_, mkl_x_, mkl_y_; name=name) + else + _mkl_add_graph(x_, y_, mkl_x_, mkl_y_; name=name) + end + end end @@ -41418,7 +48586,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function reciprocal(x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function reciprocal_graph(x_; name=nothing) local desc tf.with_op_name(name, "Reciprocal") do desc = tf.NodeDescription("Reciprocal") @@ -41428,12 +48596,19 @@ begin end tf.Tensor(tf.Operation(desc)) end - function reciprocal(x_::tf.TensorHandle; name=nothing) + function reciprocal_eager(x_; name=nothing) desc = tf.EagerOp("Reciprocal") tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) (tf.execute(desc))[1] end + function reciprocal(x_; name=nothing) + if tf.eager_mode + reciprocal_eager(x_; name=name) + else + reciprocal_graph(x_; name=name) + end + end end @@ -41443,7 +48618,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function outfeed_enqueue_tuple(inputs_; name=nothing, dtypes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function outfeed_enqueue_tuple_graph(inputs_; name=nothing, dtypes=nothing) local desc tf.with_op_name(name, "OutfeedEnqueueTuple") do desc = tf.NodeDescription("OutfeedEnqueueTuple") @@ -41455,7 +48630,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function outfeed_enqueue_tuple(inputs_::tf.TensorHandle; name=nothing, dtypes=nothing) + function outfeed_enqueue_tuple_eager(inputs_; name=nothing, dtypes=nothing) desc = tf.EagerOp("OutfeedEnqueueTuple") tf.add_input(desc, inputs_) if dtypes !== nothing @@ -41463,6 +48638,13 @@ begin end (tf.execute(desc))[1] end + function outfeed_enqueue_tuple(inputs_; name=nothing, dtypes=nothing) + if tf.eager_mode + outfeed_enqueue_tuple_eager(inputs_; name=name, dtypes=dtypes) + else + outfeed_enqueue_tuple_graph(inputs_; name=name, dtypes=dtypes) + end + end end @@ -41472,7 +48654,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function string_strip(input_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function string_strip_graph(input_; name=nothing) local desc tf.with_op_name(name, "StringStrip") do desc = tf.NodeDescription("StringStrip") @@ -41481,11 +48663,18 @@ begin end tf.Tensor(tf.Operation(desc)) end - function string_strip(input_::tf.TensorHandle; name=nothing) + function string_strip_eager(input_; name=nothing) desc = tf.EagerOp("StringStrip") tf.add_input(desc, input_) (tf.execute(desc))[1] end + function string_strip(input_; name=nothing) + if tf.eager_mode + string_strip_eager(input_; name=name) + else + string_strip_graph(input_; name=name) + end + end end @@ -41495,7 +48684,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function fake_quant_with_min_max_vars_per_channel(inputs_, min_, max_; name=nothing, num_bits=nothing, narrow_range=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function fake_quant_with_min_max_vars_per_channel_graph(inputs_, min_, max_; name=nothing, num_bits=nothing, narrow_range=nothing) local desc tf.with_op_name(name, "FakeQuantWithMinMaxVarsPerChannel") do desc = tf.NodeDescription("FakeQuantWithMinMaxVarsPerChannel") @@ -41514,7 +48703,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function fake_quant_with_min_max_vars_per_channel(inputs_::tf.TensorHandle, min_::tf.TensorHandle, max_::tf.TensorHandle; name=nothing, num_bits=nothing, narrow_range=nothing) + function fake_quant_with_min_max_vars_per_channel_eager(inputs_, min_, max_; name=nothing, num_bits=nothing, narrow_range=nothing) desc = tf.EagerOp("FakeQuantWithMinMaxVarsPerChannel") tf.add_input(desc, inputs_) tf.add_input(desc, min_) @@ -41527,6 +48716,13 @@ begin end (tf.execute(desc))[1] end + function fake_quant_with_min_max_vars_per_channel(inputs_, min_, max_; name=nothing, num_bits=nothing, narrow_range=nothing) + if tf.eager_mode + fake_quant_with_min_max_vars_per_channel_eager(inputs_, min_, max_; name=name, num_bits=num_bits, narrow_range=narrow_range) + else + fake_quant_with_min_max_vars_per_channel_graph(inputs_, min_, max_; name=name, num_bits=num_bits, narrow_range=narrow_range) + end + end end @@ -41536,7 +48732,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function barrier_ready_size(handle_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function barrier_ready_size_graph(handle_; name=nothing) local desc tf.with_op_name(name, "BarrierReadySize") do desc = tf.NodeDescription("BarrierReadySize") @@ -41545,11 +48741,18 @@ begin end tf.Tensor(tf.Operation(desc)) end - function barrier_ready_size(handle_::tf.TensorHandle; name=nothing) + function barrier_ready_size_eager(handle_; name=nothing) desc = tf.EagerOp("BarrierReadySize") tf.add_input(desc, handle_) (tf.execute(desc))[1] end + function barrier_ready_size(handle_; name=nothing) + if tf.eager_mode + barrier_ready_size_eager(handle_; name=name) + else + barrier_ready_size_graph(handle_; name=name) + end + end end @@ -41559,7 +48762,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function string_to_hash_bucket(string_tensor_; name=nothing, num_buckets=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function string_to_hash_bucket_graph(string_tensor_; name=nothing, num_buckets=nothing) local desc tf.with_op_name(name, "StringToHashBucket") do desc = tf.NodeDescription("StringToHashBucket") @@ -41571,7 +48774,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function string_to_hash_bucket(string_tensor_::tf.TensorHandle; name=nothing, num_buckets=nothing) + function string_to_hash_bucket_eager(string_tensor_; name=nothing, num_buckets=nothing) desc = tf.EagerOp("StringToHashBucket") tf.add_input(desc, string_tensor_) if num_buckets !== nothing @@ -41579,6 +48782,13 @@ begin end (tf.execute(desc))[1] end + function string_to_hash_bucket(string_tensor_; name=nothing, num_buckets=nothing) + if tf.eager_mode + string_to_hash_bucket_eager(string_tensor_; name=name, num_buckets=num_buckets) + else + string_to_hash_bucket_graph(string_tensor_; name=name, num_buckets=num_buckets) + end + end end @@ -41588,7 +48798,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tensor_array_concat(handle_, flow_in_; name=nothing, dtype=nothing, element_shape_except0=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tensor_array_concat_graph(handle_, flow_in_; name=nothing, dtype=nothing, element_shape_except0=nothing) local desc tf.with_op_name(name, "TensorArrayConcat") do desc = tf.NodeDescription("TensorArrayConcat") @@ -41610,7 +48820,7 @@ begin end out end - function tensor_array_concat(handle_::tf.TensorHandle, flow_in_::tf.TensorHandle; name=nothing, dtype=nothing, element_shape_except0=nothing) + function tensor_array_concat_eager(handle_, flow_in_; name=nothing, dtype=nothing, element_shape_except0=nothing) desc = tf.EagerOp("TensorArrayConcat") tf.add_input(desc, handle_) tf.add_input(desc, flow_in_) @@ -41622,6 +48832,13 @@ begin end tf.execute(desc) end + function tensor_array_concat(handle_, flow_in_; name=nothing, dtype=nothing, element_shape_except0=nothing) + if tf.eager_mode + tensor_array_concat_eager(handle_, flow_in_; name=name, dtype=dtype, element_shape_except0=element_shape_except0) + else + tensor_array_concat_graph(handle_, flow_in_; name=name, dtype=dtype, element_shape_except0=element_shape_except0) + end + end end @@ -41631,7 +48848,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function sharded_filename(basename_, shard_, num_shards_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function sharded_filename_graph(basename_, shard_, num_shards_; name=nothing) local desc tf.with_op_name(name, "ShardedFilename") do desc = tf.NodeDescription("ShardedFilename") @@ -41644,13 +48861,20 @@ begin end tf.Tensor(tf.Operation(desc)) end - function sharded_filename(basename_::tf.TensorHandle, shard_::tf.TensorHandle, num_shards_::tf.TensorHandle; name=nothing) + function sharded_filename_eager(basename_, shard_, num_shards_; name=nothing) desc = tf.EagerOp("ShardedFilename") tf.add_input(desc, basename_) tf.add_input(desc, shard_) tf.add_input(desc, num_shards_) (tf.execute(desc))[1] end + function sharded_filename(basename_, shard_, num_shards_; name=nothing) + if tf.eager_mode + sharded_filename_eager(basename_, shard_, num_shards_; name=name) + else + sharded_filename_graph(basename_, shard_, num_shards_; name=name) + end + end end @@ -41660,7 +48884,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function py_func(input_; name=nothing, token=nothing, Tin=nothing, Tout=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function py_func_graph(input_; name=nothing, token=nothing, Tin=nothing, Tout=nothing) local desc tf.with_op_name(name, "PyFunc") do desc = tf.NodeDescription("PyFunc") @@ -41678,7 +48902,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function py_func(input_::tf.TensorHandle; name=nothing, token=nothing, Tin=nothing, Tout=nothing) + function py_func_eager(input_; name=nothing, token=nothing, Tin=nothing, Tout=nothing) desc = tf.EagerOp("PyFunc") tf.add_input(desc, input_) if token !== nothing @@ -41692,6 +48916,13 @@ begin end (tf.execute(desc))[1] end + function py_func(input_; name=nothing, token=nothing, Tin=nothing, Tout=nothing) + if tf.eager_mode + py_func_eager(input_; name=name, token=token, Tin=Tin, Tout=Tout) + else + py_func_graph(input_; name=name, token=token, Tin=Tin, Tout=Tout) + end + end end @@ -41701,7 +48932,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function unsorted_segment_prod(data_, segment_ids_, num_segments_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function unsorted_segment_prod_graph(data_, segment_ids_, num_segments_; name=nothing) local desc tf.with_op_name(name, "UnsortedSegmentProd") do desc = tf.NodeDescription("UnsortedSegmentProd") @@ -41718,7 +48949,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function unsorted_segment_prod(data_::tf.TensorHandle, segment_ids_::tf.TensorHandle, num_segments_::tf.TensorHandle; name=nothing) + function unsorted_segment_prod_eager(data_, segment_ids_, num_segments_; name=nothing) desc = tf.EagerOp("UnsortedSegmentProd") tf.add_input(desc, data_) tf.add_input(desc, segment_ids_) @@ -41728,6 +48959,13 @@ begin desc["Tnumsegments"] = tf.data_type(num_segments_) (tf.execute(desc))[1] end + function unsorted_segment_prod(data_, segment_ids_, num_segments_; name=nothing) + if tf.eager_mode + unsorted_segment_prod_eager(data_, segment_ids_, num_segments_; name=name) + else + unsorted_segment_prod_graph(data_, segment_ids_, num_segments_; name=name) + end + end end @@ -41737,7 +48975,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function count_up_to(ref_; name=nothing, limit=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function count_up_to_graph(ref_; name=nothing, limit=nothing) local desc tf.with_op_name(name, "CountUpTo") do desc = tf.NodeDescription("CountUpTo") @@ -41750,7 +48988,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function count_up_to(ref_::tf.TensorHandle; name=nothing, limit=nothing) + function count_up_to_eager(ref_; name=nothing, limit=nothing) desc = tf.EagerOp("CountUpTo") tf.add_input(desc, ref_) if limit !== nothing @@ -41759,6 +48997,13 @@ begin desc["T"] = tf.data_type(ref_) (tf.execute(desc))[1] end + function count_up_to(ref_; name=nothing, limit=nothing) + if tf.eager_mode + count_up_to_eager(ref_; name=name, limit=limit) + else + count_up_to_graph(ref_; name=name, limit=limit) + end + end end @@ -41768,7 +49013,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function random_gamma(shape_, alpha_; name=nothing, seed=nothing, seed2=nothing, S=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function random_gamma_graph(shape_, alpha_; name=nothing, seed=nothing, seed2=nothing, S=nothing) local desc tf.with_op_name(name, "RandomGamma") do desc = tf.NodeDescription("RandomGamma") @@ -41790,7 +49035,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function random_gamma(shape_::tf.TensorHandle, alpha_::tf.TensorHandle; name=nothing, seed=nothing, seed2=nothing, S=nothing) + function random_gamma_eager(shape_, alpha_; name=nothing, seed=nothing, seed2=nothing, S=nothing) desc = tf.EagerOp("RandomGamma") tf.add_input(desc, shape_) tf.add_input(desc, alpha_) @@ -41807,6 +49052,13 @@ begin desc["T"] = tf.data_type(alpha_) (tf.execute(desc))[1] end + function random_gamma(shape_, alpha_; name=nothing, seed=nothing, seed2=nothing, S=nothing) + if tf.eager_mode + random_gamma_eager(shape_, alpha_; name=name, seed=seed, seed2=seed2, S=S) + else + random_gamma_graph(shape_, alpha_; name=name, seed=seed, seed2=seed2, S=S) + end + end end @@ -41816,7 +49068,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tensor_array_grad(handle_, flow_in_; name=nothing, source=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tensor_array_grad_graph(handle_, flow_in_; name=nothing, source=nothing) local desc tf.with_op_name(name, "TensorArrayGrad") do desc = tf.NodeDescription("TensorArrayGrad") @@ -41830,7 +49082,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function tensor_array_grad(handle_::tf.TensorHandle, flow_in_::tf.TensorHandle; name=nothing, source=nothing) + function tensor_array_grad_eager(handle_, flow_in_; name=nothing, source=nothing) desc = tf.EagerOp("TensorArrayGrad") tf.add_input(desc, handle_) tf.add_input(desc, flow_in_) @@ -41839,6 +49091,13 @@ begin end (tf.execute(desc))[1] end + function tensor_array_grad(handle_, flow_in_; name=nothing, source=nothing) + if tf.eager_mode + tensor_array_grad_eager(handle_, flow_in_; name=name, source=source) + else + tensor_array_grad_graph(handle_, flow_in_; name=name, source=source) + end + end end @@ -41848,7 +49107,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function dilation2d(input_, filter_; name=nothing, strides=nothing, rates=nothing, padding=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function dilation2d_graph(input_, filter_; name=nothing, strides=nothing, rates=nothing, padding=nothing) local desc tf.with_op_name(name, "Dilation2D") do desc = tf.NodeDescription("Dilation2D") @@ -41869,7 +49128,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function dilation2d(input_::tf.TensorHandle, filter_::tf.TensorHandle; name=nothing, strides=nothing, rates=nothing, padding=nothing) + function dilation2d_eager(input_, filter_; name=nothing, strides=nothing, rates=nothing, padding=nothing) desc = tf.EagerOp("Dilation2D") tf.add_input(desc, input_) tf.add_input(desc, filter_) @@ -41886,6 +49145,13 @@ begin desc["T"] = tf.data_type(filter_) (tf.execute(desc))[1] end + function dilation2d(input_, filter_; name=nothing, strides=nothing, rates=nothing, padding=nothing) + if tf.eager_mode + dilation2d_eager(input_, filter_; name=name, strides=strides, rates=rates, padding=padding) + else + dilation2d_graph(input_, filter_; name=name, strides=strides, rates=rates, padding=padding) + end + end end @@ -41895,7 +49161,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function unbatch(batched_tensor_, batch_index_, id_; name=nothing, timeout_micros=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function unbatch_graph(batched_tensor_, batch_index_, id_; name=nothing, timeout_micros=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "Unbatch") do desc = tf.NodeDescription("Unbatch") @@ -41918,7 +49184,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function unbatch(batched_tensor_::tf.TensorHandle, batch_index_::tf.TensorHandle, id_::tf.TensorHandle; name=nothing, timeout_micros=nothing, container=nothing, shared_name=nothing) + function unbatch_eager(batched_tensor_, batch_index_, id_; name=nothing, timeout_micros=nothing, container=nothing, shared_name=nothing) desc = tf.EagerOp("Unbatch") tf.add_input(desc, batched_tensor_) tf.add_input(desc, batch_index_) @@ -41935,6 +49201,13 @@ begin desc["T"] = tf.data_type(batched_tensor_) (tf.execute(desc))[1] end + function unbatch(batched_tensor_, batch_index_, id_; name=nothing, timeout_micros=nothing, container=nothing, shared_name=nothing) + if tf.eager_mode + unbatch_eager(batched_tensor_, batch_index_, id_; name=name, timeout_micros=timeout_micros, container=container, shared_name=shared_name) + else + unbatch_graph(batched_tensor_, batch_index_, id_; name=name, timeout_micros=timeout_micros, container=container, shared_name=shared_name) + end + end end @@ -41944,7 +49217,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function get_session_handle(value_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function get_session_handle_graph(value_; name=nothing) local desc tf.with_op_name(name, "GetSessionHandle") do desc = tf.NodeDescription("GetSessionHandle") @@ -41954,12 +49227,19 @@ begin end tf.Tensor(tf.Operation(desc)) end - function get_session_handle(value_::tf.TensorHandle; name=nothing) + function get_session_handle_eager(value_; name=nothing) desc = tf.EagerOp("GetSessionHandle") tf.add_input(desc, value_) desc["T"] = tf.data_type(value_) (tf.execute(desc))[1] end + function get_session_handle(value_; name=nothing) + if tf.eager_mode + get_session_handle_eager(value_; name=name) + else + get_session_handle_graph(value_; name=name) + end + end end @@ -41969,7 +49249,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function retrieve_tpu_embedding_adam_parameters(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function retrieve_tpu_embedding_adam_parameters_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "RetrieveTPUEmbeddingADAMParameters") do desc = tf.NodeDescription("RetrieveTPUEmbeddingADAMParameters") @@ -41993,7 +49273,7 @@ begin end out end - function retrieve_tpu_embedding_adam_parameters(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + function retrieve_tpu_embedding_adam_parameters_eager(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) desc = tf.EagerOp("RetrieveTPUEmbeddingADAMParameters") if table_id !== nothing desc["table_id"] = Base.Int(table_id) @@ -42009,6 +49289,13 @@ begin end tf.execute(desc) end + function retrieve_tpu_embedding_adam_parameters(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + if tf.eager_mode + retrieve_tpu_embedding_adam_parameters_eager(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + else + retrieve_tpu_embedding_adam_parameters_graph(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + end + end end @@ -42018,7 +49305,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function mutable_hash_table_of_tensors_v2(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function mutable_hash_table_of_tensors_v2_graph(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing) local desc tf.with_op_name(name, "MutableHashTableOfTensorsV2") do desc = tf.NodeDescription("MutableHashTableOfTensorsV2") @@ -42043,7 +49330,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function mutable_hash_table_of_tensors_v2(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing) + function mutable_hash_table_of_tensors_v2_eager(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing) desc = tf.EagerOp("MutableHashTableOfTensorsV2") if container !== nothing desc["container"] = Base.String(container) @@ -42065,6 +49352,13 @@ begin end (tf.execute(desc))[1] end + function mutable_hash_table_of_tensors_v2(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing) + if tf.eager_mode + mutable_hash_table_of_tensors_v2_eager(; name=name, container=container, shared_name=shared_name, use_node_name_sharing=use_node_name_sharing, key_dtype=key_dtype, value_dtype=value_dtype, value_shape=value_shape) + else + mutable_hash_table_of_tensors_v2_graph(; name=name, container=container, shared_name=shared_name, use_node_name_sharing=use_node_name_sharing, key_dtype=key_dtype, value_dtype=value_dtype, value_shape=value_shape) + end + end end @@ -42074,7 +49368,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function sparse_apply_ftrl(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, lr_power_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function sparse_apply_ftrl_graph(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, lr_power_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "SparseApplyFtrl") do desc = tf.NodeDescription("SparseApplyFtrl") @@ -42105,7 +49399,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function sparse_apply_ftrl(var_::tf.TensorHandle, accum_::tf.TensorHandle, linear_::tf.TensorHandle, grad_::tf.TensorHandle, indices_::tf.TensorHandle, lr_::tf.TensorHandle, l1_::tf.TensorHandle, l2_::tf.TensorHandle, lr_power_::tf.TensorHandle; name=nothing, use_locking=nothing) + function sparse_apply_ftrl_eager(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, lr_power_; name=nothing, use_locking=nothing) desc = tf.EagerOp("SparseApplyFtrl") tf.add_input(desc, var_) tf.add_input(desc, accum_) @@ -42130,6 +49424,13 @@ begin desc["T"] = tf.data_type(lr_power_) (tf.execute(desc))[1] end + function sparse_apply_ftrl(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, lr_power_; name=nothing, use_locking=nothing) + if tf.eager_mode + sparse_apply_ftrl_eager(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, lr_power_; name=name, use_locking=use_locking) + else + sparse_apply_ftrl_graph(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, lr_power_; name=name, use_locking=use_locking) + end + end end @@ -42139,7 +49440,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function batch_dataset_v2(input_dataset_, batch_size_, drop_remainder_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function batch_dataset_v2_graph(input_dataset_, batch_size_, drop_remainder_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "BatchDatasetV2") do desc = tf.NodeDescription("BatchDatasetV2") @@ -42158,7 +49459,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function batch_dataset_v2(input_dataset_::tf.TensorHandle, batch_size_::tf.TensorHandle, drop_remainder_::tf.TensorHandle; name=nothing, output_types=nothing, output_shapes=nothing) + function batch_dataset_v2_eager(input_dataset_, batch_size_, drop_remainder_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("BatchDatasetV2") tf.add_input(desc, input_dataset_) tf.add_input(desc, batch_size_) @@ -42171,6 +49472,13 @@ begin end (tf.execute(desc))[1] end + function batch_dataset_v2(input_dataset_, batch_size_, drop_remainder_; name=nothing, output_types=nothing, output_shapes=nothing) + if tf.eager_mode + batch_dataset_v2_eager(input_dataset_, batch_size_, drop_remainder_; name=name, output_types=output_types, output_shapes=output_shapes) + else + batch_dataset_v2_graph(input_dataset_, batch_size_, drop_remainder_; name=name, output_types=output_types, output_shapes=output_shapes) + end + end end @@ -42180,7 +49488,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function sparse_sparse_minimum(a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function sparse_sparse_minimum_graph(a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_; name=nothing) local desc tf.with_op_name(name, "SparseSparseMinimum") do desc = tf.NodeDescription("SparseSparseMinimum") @@ -42205,7 +49513,7 @@ begin end out end - function sparse_sparse_minimum(a_indices_::tf.TensorHandle, a_values_::tf.TensorHandle, a_shape_::tf.TensorHandle, b_indices_::tf.TensorHandle, b_values_::tf.TensorHandle, b_shape_::tf.TensorHandle; name=nothing) + function sparse_sparse_minimum_eager(a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_; name=nothing) desc = tf.EagerOp("SparseSparseMinimum") tf.add_input(desc, a_indices_) tf.add_input(desc, a_values_) @@ -42217,6 +49525,13 @@ begin desc["T"] = tf.data_type(b_values_) tf.execute(desc) end + function sparse_sparse_minimum(a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_; name=nothing) + if tf.eager_mode + sparse_sparse_minimum_eager(a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_; name=name) + else + sparse_sparse_minimum_graph(a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_; name=name) + end + end end @@ -42226,7 +49541,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function reverse_v2(tensor_, axis_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function reverse_v2_graph(tensor_, axis_; name=nothing) local desc tf.with_op_name(name, "ReverseV2") do desc = tf.NodeDescription("ReverseV2") @@ -42240,7 +49555,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function reverse_v2(tensor_::tf.TensorHandle, axis_::tf.TensorHandle; name=nothing) + function reverse_v2_eager(tensor_, axis_; name=nothing) desc = tf.EagerOp("ReverseV2") tf.add_input(desc, tensor_) tf.add_input(desc, axis_) @@ -42248,6 +49563,13 @@ begin desc["Tidx"] = tf.data_type(axis_) (tf.execute(desc))[1] end + function reverse_v2(tensor_, axis_; name=nothing) + if tf.eager_mode + reverse_v2_eager(tensor_, axis_; name=name) + else + reverse_v2_graph(tensor_, axis_; name=name) + end + end end @@ -42257,7 +49579,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function strided_slice(input_, begin_, end_, strides_; name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function strided_slice_graph(input_, begin_, end_, strides_; name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing) local desc tf.with_op_name(name, "StridedSlice") do desc = tf.NodeDescription("StridedSlice") @@ -42310,7 +49632,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function strided_slice(input_::tf.TensorHandle, begin_::tf.TensorHandle, end_::tf.TensorHandle, strides_::tf.TensorHandle; name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing) + function strided_slice_eager(input_, begin_, end_, strides_; name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing) desc = tf.EagerOp("StridedSlice") tf.add_input(desc, input_) tf.add_input(desc, begin_) @@ -42355,6 +49677,13 @@ begin desc["Index"] = tf.data_type(strides_) (tf.execute(desc))[1] end + function strided_slice(input_, begin_, end_, strides_; name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing) + if tf.eager_mode + strided_slice_eager(input_, begin_, end_, strides_; name=name, Index=Index, begin_mask=begin_mask, end_mask=end_mask, ellipsis_mask=ellipsis_mask, new_axis_mask=new_axis_mask, shrink_axis_mask=shrink_axis_mask) + else + strided_slice_graph(input_, begin_, end_, strides_; name=name, Index=Index, begin_mask=begin_mask, end_mask=end_mask, ellipsis_mask=ellipsis_mask, new_axis_mask=new_axis_mask, shrink_axis_mask=shrink_axis_mask) + end + end end @@ -42364,7 +49693,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function matching_files(pattern_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function matching_files_graph(pattern_; name=nothing) local desc tf.with_op_name(name, "MatchingFiles") do desc = tf.NodeDescription("MatchingFiles") @@ -42373,11 +49702,18 @@ begin end tf.Tensor(tf.Operation(desc)) end - function matching_files(pattern_::tf.TensorHandle; name=nothing) + function matching_files_eager(pattern_; name=nothing) desc = tf.EagerOp("MatchingFiles") tf.add_input(desc, pattern_) (tf.execute(desc))[1] end + function matching_files(pattern_; name=nothing) + if tf.eager_mode + matching_files_eager(pattern_; name=name) + else + matching_files_graph(pattern_; name=name) + end + end end @@ -42387,7 +49723,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function encode_base64(input_; name=nothing, pad=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function encode_base64_graph(input_; name=nothing, pad=nothing) local desc tf.with_op_name(name, "EncodeBase64") do desc = tf.NodeDescription("EncodeBase64") @@ -42399,7 +49735,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function encode_base64(input_::tf.TensorHandle; name=nothing, pad=nothing) + function encode_base64_eager(input_; name=nothing, pad=nothing) desc = tf.EagerOp("EncodeBase64") tf.add_input(desc, input_) if pad !== nothing @@ -42407,6 +49743,13 @@ begin end (tf.execute(desc))[1] end + function encode_base64(input_; name=nothing, pad=nothing) + if tf.eager_mode + encode_base64_eager(input_; name=name, pad=pad) + else + encode_base64_graph(input_; name=name, pad=pad) + end + end end @@ -42416,7 +49759,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function iterator_get_next_as_optional(iterator_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function iterator_get_next_as_optional_graph(iterator_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "IteratorGetNextAsOptional") do desc = tf.NodeDescription("IteratorGetNextAsOptional") @@ -42431,7 +49774,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function iterator_get_next_as_optional(iterator_::tf.TensorHandle; name=nothing, output_types=nothing, output_shapes=nothing) + function iterator_get_next_as_optional_eager(iterator_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("IteratorGetNextAsOptional") tf.add_input(desc, iterator_) if output_types !== nothing @@ -42442,6 +49785,13 @@ begin end (tf.execute(desc))[1] end + function iterator_get_next_as_optional(iterator_; name=nothing, output_types=nothing, output_shapes=nothing) + if tf.eager_mode + iterator_get_next_as_optional_eager(iterator_; name=name, output_types=output_types, output_shapes=output_shapes) + else + iterator_get_next_as_optional_graph(iterator_; name=name, output_types=output_types, output_shapes=output_shapes) + end + end end @@ -42451,7 +49801,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function padding_fifo_queue(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function padding_fifo_queue_graph(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "PaddingFIFOQueue") do desc = tf.NodeDescription("PaddingFIFOQueue") @@ -42473,7 +49823,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function padding_fifo_queue(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) + function padding_fifo_queue_eager(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) desc = tf.EagerOp("PaddingFIFOQueue") if component_types !== nothing desc["component_types"] = map(Base.identity, component_types) @@ -42492,6 +49842,13 @@ begin end (tf.execute(desc))[1] end + function padding_fifo_queue(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) + if tf.eager_mode + padding_fifo_queue_eager(; name=name, component_types=component_types, shapes=shapes, capacity=capacity, container=container, shared_name=shared_name) + else + padding_fifo_queue_graph(; name=name, component_types=component_types, shapes=shapes, capacity=capacity, container=container, shared_name=shared_name) + end + end end @@ -42501,7 +49858,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function iterator_to_string_handle(resource_handle_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function iterator_to_string_handle_graph(resource_handle_; name=nothing) local desc tf.with_op_name(name, "IteratorToStringHandle") do desc = tf.NodeDescription("IteratorToStringHandle") @@ -42510,11 +49867,18 @@ begin end tf.Tensor(tf.Operation(desc)) end - function iterator_to_string_handle(resource_handle_::tf.TensorHandle; name=nothing) + function iterator_to_string_handle_eager(resource_handle_; name=nothing) desc = tf.EagerOp("IteratorToStringHandle") tf.add_input(desc, resource_handle_) (tf.execute(desc))[1] end + function iterator_to_string_handle(resource_handle_; name=nothing) + if tf.eager_mode + iterator_to_string_handle_eager(resource_handle_; name=name) + else + iterator_to_string_handle_graph(resource_handle_; name=name) + end + end end @@ -42524,7 +49888,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function max_pool_grad_grad_with_argmax(input_, grad_, argmax_; name=nothing, ksize=nothing, strides=nothing, padding=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function max_pool_grad_grad_with_argmax_graph(input_, grad_, argmax_; name=nothing, ksize=nothing, strides=nothing, padding=nothing) local desc tf.with_op_name(name, "MaxPoolGradGradWithArgmax") do desc = tf.NodeDescription("MaxPoolGradGradWithArgmax") @@ -42548,7 +49912,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function max_pool_grad_grad_with_argmax(input_::tf.TensorHandle, grad_::tf.TensorHandle, argmax_::tf.TensorHandle; name=nothing, ksize=nothing, strides=nothing, padding=nothing) + function max_pool_grad_grad_with_argmax_eager(input_, grad_, argmax_; name=nothing, ksize=nothing, strides=nothing, padding=nothing) desc = tf.EagerOp("MaxPoolGradGradWithArgmax") tf.add_input(desc, input_) tf.add_input(desc, grad_) @@ -42567,6 +49931,13 @@ begin desc["Targmax"] = tf.data_type(argmax_) (tf.execute(desc))[1] end + function max_pool_grad_grad_with_argmax(input_, grad_, argmax_; name=nothing, ksize=nothing, strides=nothing, padding=nothing) + if tf.eager_mode + max_pool_grad_grad_with_argmax_eager(input_, grad_, argmax_; name=name, ksize=ksize, strides=strides, padding=padding) + else + max_pool_grad_grad_with_argmax_graph(input_, grad_, argmax_; name=name, ksize=ksize, strides=strides, padding=padding) + end + end end @@ -42576,7 +49947,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tensor_list_gather(input_handle_, indices_, element_shape_; name=nothing, element_dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tensor_list_gather_graph(input_handle_, indices_, element_shape_; name=nothing, element_dtype=nothing) local desc tf.with_op_name(name, "TensorListGather") do desc = tf.NodeDescription("TensorListGather") @@ -42592,7 +49963,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function tensor_list_gather(input_handle_::tf.TensorHandle, indices_::tf.TensorHandle, element_shape_::tf.TensorHandle; name=nothing, element_dtype=nothing) + function tensor_list_gather_eager(input_handle_, indices_, element_shape_; name=nothing, element_dtype=nothing) desc = tf.EagerOp("TensorListGather") tf.add_input(desc, input_handle_) tf.add_input(desc, indices_) @@ -42602,6 +49973,13 @@ begin end (tf.execute(desc))[1] end + function tensor_list_gather(input_handle_, indices_, element_shape_; name=nothing, element_dtype=nothing) + if tf.eager_mode + tensor_list_gather_eager(input_handle_, indices_, element_shape_; name=name, element_dtype=element_dtype) + else + tensor_list_gather_graph(input_handle_, indices_, element_shape_; name=name, element_dtype=element_dtype) + end + end end @@ -42611,7 +49989,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function multinomial(logits_, num_samples_; name=nothing, seed=nothing, seed2=nothing, output_dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function multinomial_graph(logits_, num_samples_; name=nothing, seed=nothing, seed2=nothing, output_dtype=nothing) local desc tf.with_op_name(name, "Multinomial") do desc = tf.NodeDescription("Multinomial") @@ -42632,7 +50010,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function multinomial(logits_::tf.TensorHandle, num_samples_::tf.TensorHandle; name=nothing, seed=nothing, seed2=nothing, output_dtype=nothing) + function multinomial_eager(logits_, num_samples_; name=nothing, seed=nothing, seed2=nothing, output_dtype=nothing) desc = tf.EagerOp("Multinomial") tf.add_input(desc, logits_) tf.add_input(desc, num_samples_) @@ -42648,6 +50026,13 @@ begin desc["T"] = tf.data_type(logits_) (tf.execute(desc))[1] end + function multinomial(logits_, num_samples_; name=nothing, seed=nothing, seed2=nothing, output_dtype=nothing) + if tf.eager_mode + multinomial_eager(logits_, num_samples_; name=name, seed=seed, seed2=seed2, output_dtype=output_dtype) + else + multinomial_graph(logits_, num_samples_; name=name, seed=seed, seed2=seed2, output_dtype=output_dtype) + end + end end @@ -42657,7 +50042,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tensor_array_read(handle_, index_, flow_in_; name=nothing, dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tensor_array_read_graph(handle_, index_, flow_in_; name=nothing, dtype=nothing) local desc tf.with_op_name(name, "TensorArrayRead") do desc = tf.NodeDescription("TensorArrayRead") @@ -42673,7 +50058,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function tensor_array_read(handle_::tf.TensorHandle, index_::tf.TensorHandle, flow_in_::tf.TensorHandle; name=nothing, dtype=nothing) + function tensor_array_read_eager(handle_, index_, flow_in_; name=nothing, dtype=nothing) desc = tf.EagerOp("TensorArrayRead") tf.add_input(desc, handle_) tf.add_input(desc, index_) @@ -42683,6 +50068,13 @@ begin end (tf.execute(desc))[1] end + function tensor_array_read(handle_, index_, flow_in_; name=nothing, dtype=nothing) + if tf.eager_mode + tensor_array_read_eager(handle_, index_, flow_in_; name=name, dtype=dtype) + else + tensor_array_read_graph(handle_, index_, flow_in_; name=name, dtype=dtype) + end + end end @@ -42692,7 +50084,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function experimental_indexed_dataset_get(materialized_, index_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function experimental_indexed_dataset_get_graph(materialized_, index_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "ExperimentalIndexedDatasetGet") do desc = tf.NodeDescription("ExperimentalIndexedDatasetGet") @@ -42709,7 +50101,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function experimental_indexed_dataset_get(materialized_::tf.TensorHandle, index_::tf.TensorHandle; name=nothing, output_types=nothing, output_shapes=nothing) + function experimental_indexed_dataset_get_eager(materialized_, index_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("ExperimentalIndexedDatasetGet") tf.add_input(desc, materialized_) tf.add_input(desc, index_) @@ -42721,6 +50113,13 @@ begin end (tf.execute(desc))[1] end + function experimental_indexed_dataset_get(materialized_, index_; name=nothing, output_types=nothing, output_shapes=nothing) + if tf.eager_mode + experimental_indexed_dataset_get_eager(materialized_, index_; name=name, output_types=output_types, output_shapes=output_shapes) + else + experimental_indexed_dataset_get_graph(materialized_, index_; name=name, output_types=output_types, output_shapes=output_shapes) + end + end end @@ -42730,7 +50129,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tpu_partitioned_call(args_, device_ordinal_; name=nothing, Tin=nothing, Tout=nothing, f=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tpu_partitioned_call_graph(args_, device_ordinal_; name=nothing, Tin=nothing, Tout=nothing, f=nothing) local desc tf.with_op_name(name, "TPUPartitionedCall") do desc = tf.NodeDescription("TPUPartitionedCall") @@ -42750,7 +50149,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function tpu_partitioned_call(args_::tf.TensorHandle, device_ordinal_::tf.TensorHandle; name=nothing, Tin=nothing, Tout=nothing, f=nothing) + function tpu_partitioned_call_eager(args_, device_ordinal_; name=nothing, Tin=nothing, Tout=nothing, f=nothing) desc = tf.EagerOp("TPUPartitionedCall") tf.add_input(desc, args_) tf.add_input(desc, device_ordinal_) @@ -42765,6 +50164,13 @@ begin end (tf.execute(desc))[1] end + function tpu_partitioned_call(args_, device_ordinal_; name=nothing, Tin=nothing, Tout=nothing, f=nothing) + if tf.eager_mode + tpu_partitioned_call_eager(args_, device_ordinal_; name=name, Tin=Tin, Tout=Tout, f=f) + else + tpu_partitioned_call_graph(args_, device_ordinal_; name=name, Tin=Tin, Tout=Tout, f=f) + end + end end @@ -42774,7 +50180,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function quantized_conv2d_and_relu_and_requantize(input_, filter_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function quantized_conv2d_and_relu_and_requantize_graph(input_, filter_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) local desc tf.with_op_name(name, "QuantizedConv2DAndReluAndRequantize") do desc = tf.NodeDescription("QuantizedConv2DAndReluAndRequantize") @@ -42816,7 +50222,7 @@ begin end out end - function quantized_conv2d_and_relu_and_requantize(input_::tf.TensorHandle, filter_::tf.TensorHandle, min_input_::tf.TensorHandle, max_input_::tf.TensorHandle, min_filter_::tf.TensorHandle, max_filter_::tf.TensorHandle, min_freezed_output_::tf.TensorHandle, max_freezed_output_::tf.TensorHandle; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) + function quantized_conv2d_and_relu_and_requantize_eager(input_, filter_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) desc = tf.EagerOp("QuantizedConv2DAndReluAndRequantize") tf.add_input(desc, input_) tf.add_input(desc, filter_) @@ -42842,6 +50248,13 @@ begin desc["Tfilter"] = tf.data_type(filter_) tf.execute(desc) end + function quantized_conv2d_and_relu_and_requantize(input_, filter_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) + if tf.eager_mode + quantized_conv2d_and_relu_and_requantize_eager(input_, filter_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_; name=name, out_type=out_type, strides=strides, padding=padding, dilations=dilations) + else + quantized_conv2d_and_relu_and_requantize_graph(input_, filter_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_; name=name, out_type=out_type, strides=strides, padding=padding, dilations=dilations) + end + end end @@ -42851,7 +50264,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function iterator_from_string_handle_v2(string_handle_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function iterator_from_string_handle_v2_graph(string_handle_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "IteratorFromStringHandleV2") do desc = tf.NodeDescription("IteratorFromStringHandleV2") @@ -42866,7 +50279,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function iterator_from_string_handle_v2(string_handle_::tf.TensorHandle; name=nothing, output_types=nothing, output_shapes=nothing) + function iterator_from_string_handle_v2_eager(string_handle_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("IteratorFromStringHandleV2") tf.add_input(desc, string_handle_) if output_types !== nothing @@ -42877,6 +50290,13 @@ begin end (tf.execute(desc))[1] end + function iterator_from_string_handle_v2(string_handle_; name=nothing, output_types=nothing, output_shapes=nothing) + if tf.eager_mode + iterator_from_string_handle_v2_eager(string_handle_; name=name, output_types=output_types, output_shapes=output_shapes) + else + iterator_from_string_handle_v2_graph(string_handle_; name=name, output_types=output_types, output_shapes=output_shapes) + end + end end @@ -42886,7 +50306,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function bitwise_or(x_, y_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function bitwise_or_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "BitwiseOr") do desc = tf.NodeDescription("BitwiseOr") @@ -42898,7 +50318,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function bitwise_or(x_::tf.TensorHandle, y_::tf.TensorHandle; name=nothing) + function bitwise_or_eager(x_, y_; name=nothing) desc = tf.EagerOp("BitwiseOr") tf.add_input(desc, x_) tf.add_input(desc, y_) @@ -42906,6 +50326,13 @@ begin desc["T"] = tf.data_type(y_) (tf.execute(desc))[1] end + function bitwise_or(x_, y_; name=nothing) + if tf.eager_mode + bitwise_or_eager(x_, y_; name=name) + else + bitwise_or_graph(x_, y_; name=name) + end + end end @@ -42915,7 +50342,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function unsorted_segment_max(data_, segment_ids_, num_segments_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function unsorted_segment_max_graph(data_, segment_ids_, num_segments_; name=nothing) local desc tf.with_op_name(name, "UnsortedSegmentMax") do desc = tf.NodeDescription("UnsortedSegmentMax") @@ -42932,7 +50359,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function unsorted_segment_max(data_::tf.TensorHandle, segment_ids_::tf.TensorHandle, num_segments_::tf.TensorHandle; name=nothing) + function unsorted_segment_max_eager(data_, segment_ids_, num_segments_; name=nothing) desc = tf.EagerOp("UnsortedSegmentMax") tf.add_input(desc, data_) tf.add_input(desc, segment_ids_) @@ -42942,6 +50369,13 @@ begin desc["Tnumsegments"] = tf.data_type(num_segments_) (tf.execute(desc))[1] end + function unsorted_segment_max(data_, segment_ids_, num_segments_; name=nothing) + if tf.eager_mode + unsorted_segment_max_eager(data_, segment_ids_, num_segments_; name=name) + else + unsorted_segment_max_graph(data_, segment_ids_, num_segments_; name=name) + end + end end @@ -42951,7 +50385,7 @@ end Returns (x - y)(x - y) element-wise. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function _mkl_squared_difference(x_, y_, mkl_x_, mkl_y_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function _mkl_squared_difference_graph(x_, y_, mkl_x_, mkl_y_; name=nothing) local desc tf.with_op_name(name, "_MklSquaredDifference") do desc = tf.NodeDescription("_MklSquaredDifference") @@ -42972,7 +50406,7 @@ begin end out end - function _mkl_squared_difference(x_::tf.TensorHandle, y_::tf.TensorHandle, mkl_x_::tf.TensorHandle, mkl_y_::tf.TensorHandle; name=nothing) + function _mkl_squared_difference_eager(x_, y_, mkl_x_, mkl_y_; name=nothing) desc = tf.EagerOp("_MklSquaredDifference") tf.add_input(desc, x_) tf.add_input(desc, y_) @@ -42982,6 +50416,13 @@ begin desc["T"] = tf.data_type(y_) tf.execute(desc) end + function _mkl_squared_difference(x_, y_, mkl_x_, mkl_y_; name=nothing) + if tf.eager_mode + _mkl_squared_difference_eager(x_, y_, mkl_x_, mkl_y_; name=name) + else + _mkl_squared_difference_graph(x_, y_, mkl_x_, mkl_y_; name=name) + end + end end @@ -42991,7 +50432,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function conv3d_backprop_filter(input_, filter_, out_backprop_; name=nothing, strides=nothing, padding=nothing, dilations=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function conv3d_backprop_filter_graph(input_, filter_, out_backprop_; name=nothing, strides=nothing, padding=nothing, dilations=nothing) local desc tf.with_op_name(name, "Conv3DBackpropFilter") do desc = tf.NodeDescription("Conv3DBackpropFilter") @@ -43014,7 +50455,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function conv3d_backprop_filter(input_::tf.TensorHandle, filter_::tf.TensorHandle, out_backprop_::tf.TensorHandle; name=nothing, strides=nothing, padding=nothing, dilations=nothing) + function conv3d_backprop_filter_eager(input_, filter_, out_backprop_; name=nothing, strides=nothing, padding=nothing, dilations=nothing) desc = tf.EagerOp("Conv3DBackpropFilter") tf.add_input(desc, input_) tf.add_input(desc, filter_) @@ -43033,6 +50474,13 @@ begin desc["T"] = tf.data_type(out_backprop_) (tf.execute(desc))[1] end + function conv3d_backprop_filter(input_, filter_, out_backprop_; name=nothing, strides=nothing, padding=nothing, dilations=nothing) + if tf.eager_mode + conv3d_backprop_filter_eager(input_, filter_, out_backprop_; name=name, strides=strides, padding=padding, dilations=dilations) + else + conv3d_backprop_filter_graph(input_, filter_, out_backprop_; name=name, strides=strides, padding=padding, dilations=dilations) + end + end end @@ -43042,7 +50490,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function if_(cond_, input_; name=nothing, Tin=nothing, Tout=nothing, then_branch=nothing, else_branch=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function if__graph(cond_, input_; name=nothing, Tin=nothing, Tout=nothing, then_branch=nothing, else_branch=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "If") do desc = tf.NodeDescription("If") @@ -43069,7 +50517,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function if_(cond_::tf.TensorHandle, input_::tf.TensorHandle; name=nothing, Tin=nothing, Tout=nothing, then_branch=nothing, else_branch=nothing, output_shapes=nothing) + function if__eager(cond_, input_; name=nothing, Tin=nothing, Tout=nothing, then_branch=nothing, else_branch=nothing, output_shapes=nothing) desc = tf.EagerOp("If") tf.add_input(desc, cond_) tf.add_input(desc, input_) @@ -43091,6 +50539,13 @@ begin desc["Tcond"] = tf.data_type(cond_) (tf.execute(desc))[1] end + function if_(cond_, input_; name=nothing, Tin=nothing, Tout=nothing, then_branch=nothing, else_branch=nothing, output_shapes=nothing) + if tf.eager_mode + if__eager(cond_, input_; name=name, Tin=Tin, Tout=Tout, then_branch=then_branch, else_branch=else_branch, output_shapes=output_shapes) + else + if__graph(cond_, input_; name=name, Tin=Tin, Tout=Tout, then_branch=then_branch, else_branch=else_branch, output_shapes=output_shapes) + end + end end @@ -43100,7 +50555,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function flat_map_dataset(input_dataset_, other_arguments_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function flat_map_dataset_graph(input_dataset_, other_arguments_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "FlatMapDataset") do desc = tf.NodeDescription("FlatMapDataset") @@ -43123,7 +50578,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function flat_map_dataset(input_dataset_::tf.TensorHandle, other_arguments_::tf.TensorHandle; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) + function flat_map_dataset_eager(input_dataset_, other_arguments_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("FlatMapDataset") tf.add_input(desc, input_dataset_) tf.add_input(desc, other_arguments_) @@ -43141,6 +50596,13 @@ begin end (tf.execute(desc))[1] end + function flat_map_dataset(input_dataset_, other_arguments_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) + if tf.eager_mode + flat_map_dataset_eager(input_dataset_, other_arguments_; name=name, f=f, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes) + else + flat_map_dataset_graph(input_dataset_, other_arguments_; name=name, f=f, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes) + end + end end @@ -43150,7 +50612,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tensor_list_scatter(tensor_, indices_, element_shape_; name=nothing, element_dtype=nothing, shape_type=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tensor_list_scatter_graph(tensor_, indices_, element_shape_; name=nothing, element_dtype=nothing, shape_type=nothing) local desc tf.with_op_name(name, "TensorListScatter") do desc = tf.NodeDescription("TensorListScatter") @@ -43171,7 +50633,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function tensor_list_scatter(tensor_::tf.TensorHandle, indices_::tf.TensorHandle, element_shape_::tf.TensorHandle; name=nothing, element_dtype=nothing, shape_type=nothing) + function tensor_list_scatter_eager(tensor_, indices_, element_shape_; name=nothing, element_dtype=nothing, shape_type=nothing) desc = tf.EagerOp("TensorListScatter") tf.add_input(desc, tensor_) tf.add_input(desc, indices_) @@ -43186,6 +50648,13 @@ begin desc["shape_type"] = tf.data_type(element_shape_) (tf.execute(desc))[1] end + function tensor_list_scatter(tensor_, indices_, element_shape_; name=nothing, element_dtype=nothing, shape_type=nothing) + if tf.eager_mode + tensor_list_scatter_eager(tensor_, indices_, element_shape_; name=name, element_dtype=element_dtype, shape_type=shape_type) + else + tensor_list_scatter_graph(tensor_, indices_, element_shape_; name=name, element_dtype=element_dtype, shape_type=shape_type) + end + end end @@ -43195,7 +50664,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function softsign_grad(gradients_, features_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function softsign_grad_graph(gradients_, features_; name=nothing) local desc tf.with_op_name(name, "SoftsignGrad") do desc = tf.NodeDescription("SoftsignGrad") @@ -43207,7 +50676,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function softsign_grad(gradients_::tf.TensorHandle, features_::tf.TensorHandle; name=nothing) + function softsign_grad_eager(gradients_, features_; name=nothing) desc = tf.EagerOp("SoftsignGrad") tf.add_input(desc, gradients_) tf.add_input(desc, features_) @@ -43215,6 +50684,13 @@ begin desc["T"] = tf.data_type(features_) (tf.execute(desc))[1] end + function softsign_grad(gradients_, features_; name=nothing) + if tf.eager_mode + softsign_grad_eager(gradients_, features_; name=name) + else + softsign_grad_graph(gradients_, features_; name=name) + end + end end @@ -43224,7 +50700,7 @@ end Copy Host Op. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function copy_host(input_; name=nothing, tensor_name=nothing, debug_ops_spec=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function copy_host_graph(input_; name=nothing, tensor_name=nothing, debug_ops_spec=nothing) local desc tf.with_op_name(name, "CopyHost") do desc = tf.NodeDescription("CopyHost") @@ -43240,7 +50716,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function copy_host(input_::tf.TensorHandle; name=nothing, tensor_name=nothing, debug_ops_spec=nothing) + function copy_host_eager(input_; name=nothing, tensor_name=nothing, debug_ops_spec=nothing) desc = tf.EagerOp("CopyHost") tf.add_input(desc, input_) if tensor_name !== nothing @@ -43252,6 +50728,13 @@ begin desc["T"] = tf.data_type(input_) (tf.execute(desc))[1] end + function copy_host(input_; name=nothing, tensor_name=nothing, debug_ops_spec=nothing) + if tf.eager_mode + copy_host_eager(input_; name=name, tensor_name=tensor_name, debug_ops_spec=debug_ops_spec) + else + copy_host_graph(input_; name=name, tensor_name=tensor_name, debug_ops_spec=debug_ops_spec) + end + end end @@ -43261,7 +50744,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function lin_space(start_, stop_, num_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function lin_space_graph(start_, stop_, num_; name=nothing) local desc tf.with_op_name(name, "LinSpace") do desc = tf.NodeDescription("LinSpace") @@ -43277,7 +50760,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function lin_space(start_::tf.TensorHandle, stop_::tf.TensorHandle, num_::tf.TensorHandle; name=nothing) + function lin_space_eager(start_, stop_, num_; name=nothing) desc = tf.EagerOp("LinSpace") tf.add_input(desc, start_) tf.add_input(desc, stop_) @@ -43287,6 +50770,13 @@ begin desc["Tidx"] = tf.data_type(num_) (tf.execute(desc))[1] end + function lin_space(start_, stop_, num_; name=nothing) + if tf.eager_mode + lin_space_eager(start_, stop_, num_; name=name) + else + lin_space_graph(start_, stop_, num_; name=name) + end + end end @@ -43296,7 +50786,7 @@ end Updates input `value` at `loc` with `update`. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function _parallel_concat_update(value_, update_; name=nothing, loc=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function _parallel_concat_update_graph(value_, update_; name=nothing, loc=nothing) local desc tf.with_op_name(name, "_ParallelConcatUpdate") do desc = tf.NodeDescription("_ParallelConcatUpdate") @@ -43311,7 +50801,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function _parallel_concat_update(value_::tf.TensorHandle, update_::tf.TensorHandle; name=nothing, loc=nothing) + function _parallel_concat_update_eager(value_, update_; name=nothing, loc=nothing) desc = tf.EagerOp("_ParallelConcatUpdate") tf.add_input(desc, value_) tf.add_input(desc, update_) @@ -43322,6 +50812,13 @@ begin desc["T"] = tf.data_type(update_) (tf.execute(desc))[1] end + function _parallel_concat_update(value_, update_; name=nothing, loc=nothing) + if tf.eager_mode + _parallel_concat_update_eager(value_, update_; name=name, loc=loc) + else + _parallel_concat_update_graph(value_, update_; name=name, loc=loc) + end + end end @@ -43331,7 +50828,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function stack(; name=nothing, elem_type=nothing, stack_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function stack_graph(; name=nothing, elem_type=nothing, stack_name=nothing) local desc tf.with_op_name(name, "Stack") do desc = tf.NodeDescription("Stack") @@ -43344,7 +50841,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function stack(; name=nothing, elem_type=nothing, stack_name=nothing) + function stack_eager(; name=nothing, elem_type=nothing, stack_name=nothing) desc = tf.EagerOp("Stack") if elem_type !== nothing desc["elem_type"] = Base.identity(elem_type) @@ -43354,6 +50851,13 @@ begin end (tf.execute(desc))[1] end + function stack(; name=nothing, elem_type=nothing, stack_name=nothing) + if tf.eager_mode + stack_eager(; name=name, elem_type=elem_type, stack_name=stack_name) + else + stack_graph(; name=name, elem_type=elem_type, stack_name=stack_name) + end + end end @@ -43363,7 +50867,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function stack_push_v2(handle_, elem_; name=nothing, swap_memory=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function stack_push_v2_graph(handle_, elem_; name=nothing, swap_memory=nothing) local desc tf.with_op_name(name, "StackPushV2") do desc = tf.NodeDescription("StackPushV2") @@ -43378,7 +50882,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function stack_push_v2(handle_::tf.TensorHandle, elem_::tf.TensorHandle; name=nothing, swap_memory=nothing) + function stack_push_v2_eager(handle_, elem_; name=nothing, swap_memory=nothing) desc = tf.EagerOp("StackPushV2") tf.add_input(desc, handle_) tf.add_input(desc, elem_) @@ -43388,6 +50892,13 @@ begin desc["T"] = tf.data_type(elem_) (tf.execute(desc))[1] end + function stack_push_v2(handle_, elem_; name=nothing, swap_memory=nothing) + if tf.eager_mode + stack_push_v2_eager(handle_, elem_; name=name, swap_memory=swap_memory) + else + stack_push_v2_graph(handle_, elem_; name=name, swap_memory=swap_memory) + end + end end @@ -43397,7 +50908,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function assign_variable_op(resource_, value_; name=nothing, dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function assign_variable_op_graph(resource_, value_; name=nothing, dtype=nothing) local desc tf.with_op_name(name, "AssignVariableOp") do desc = tf.NodeDescription("AssignVariableOp") @@ -43412,7 +50923,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function assign_variable_op(resource_::tf.TensorHandle, value_::tf.TensorHandle; name=nothing, dtype=nothing) + function assign_variable_op_eager(resource_, value_; name=nothing, dtype=nothing) desc = tf.EagerOp("AssignVariableOp") tf.add_input(desc, resource_) tf.add_input(desc, value_) @@ -43422,6 +50933,13 @@ begin desc["dtype"] = tf.data_type(value_) (tf.execute(desc))[1] end + function assign_variable_op(resource_, value_; name=nothing, dtype=nothing) + if tf.eager_mode + assign_variable_op_eager(resource_, value_; name=name, dtype=dtype) + else + assign_variable_op_graph(resource_, value_; name=name, dtype=dtype) + end + end end @@ -43431,7 +50949,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function sparse_split(split_dim_, indices_, values_, shape_; name=nothing, num_split=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function sparse_split_graph(split_dim_, indices_, values_, shape_; name=nothing, num_split=nothing) local desc tf.with_op_name(name, "SparseSplit") do desc = tf.NodeDescription("SparseSplit") @@ -43456,7 +50974,7 @@ begin end out end - function sparse_split(split_dim_::tf.TensorHandle, indices_::tf.TensorHandle, values_::tf.TensorHandle, shape_::tf.TensorHandle; name=nothing, num_split=nothing) + function sparse_split_eager(split_dim_, indices_, values_, shape_; name=nothing, num_split=nothing) desc = tf.EagerOp("SparseSplit") tf.add_input(desc, split_dim_) tf.add_input(desc, indices_) @@ -43468,6 +50986,13 @@ begin desc["T"] = tf.data_type(values_) tf.execute(desc) end + function sparse_split(split_dim_, indices_, values_, shape_; name=nothing, num_split=nothing) + if tf.eager_mode + sparse_split_eager(split_dim_, indices_, values_, shape_; name=name, num_split=num_split) + else + sparse_split_graph(split_dim_, indices_, values_, shape_; name=name, num_split=num_split) + end + end end @@ -43477,7 +51002,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tensor_array_unpack(handle_, value_, flow_in_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tensor_array_unpack_graph(handle_, value_, flow_in_; name=nothing) local desc tf.with_op_name(name, "TensorArrayUnpack") do desc = tf.NodeDescription("TensorArrayUnpack") @@ -43491,7 +51016,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function tensor_array_unpack(handle_::tf.TensorHandle, value_::tf.TensorHandle, flow_in_::tf.TensorHandle; name=nothing) + function tensor_array_unpack_eager(handle_, value_, flow_in_; name=nothing) desc = tf.EagerOp("TensorArrayUnpack") tf.add_input(desc, handle_) tf.add_input(desc, value_) @@ -43499,6 +51024,13 @@ begin desc["T"] = tf.data_type(value_) (tf.execute(desc))[1] end + function tensor_array_unpack(handle_, value_, flow_in_; name=nothing) + if tf.eager_mode + tensor_array_unpack_eager(handle_, value_, flow_in_; name=name) + else + tensor_array_unpack_graph(handle_, value_, flow_in_; name=name) + end + end end @@ -43508,7 +51040,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tensor_list_stack(input_handle_, element_shape_; name=nothing, element_dtype=nothing, num_elements=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tensor_list_stack_graph(input_handle_, element_shape_; name=nothing, element_dtype=nothing, num_elements=nothing) local desc tf.with_op_name(name, "TensorListStack") do desc = tf.NodeDescription("TensorListStack") @@ -43525,7 +51057,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function tensor_list_stack(input_handle_::tf.TensorHandle, element_shape_::tf.TensorHandle; name=nothing, element_dtype=nothing, num_elements=nothing) + function tensor_list_stack_eager(input_handle_, element_shape_; name=nothing, element_dtype=nothing, num_elements=nothing) desc = tf.EagerOp("TensorListStack") tf.add_input(desc, input_handle_) tf.add_input(desc, element_shape_) @@ -43537,6 +51069,13 @@ begin end (tf.execute(desc))[1] end + function tensor_list_stack(input_handle_, element_shape_; name=nothing, element_dtype=nothing, num_elements=nothing) + if tf.eager_mode + tensor_list_stack_eager(input_handle_, element_shape_; name=name, element_dtype=element_dtype, num_elements=num_elements) + else + tensor_list_stack_graph(input_handle_, element_shape_; name=name, element_dtype=element_dtype, num_elements=num_elements) + end + end end @@ -43546,7 +51085,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function barrier_incomplete_size(handle_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function barrier_incomplete_size_graph(handle_; name=nothing) local desc tf.with_op_name(name, "BarrierIncompleteSize") do desc = tf.NodeDescription("BarrierIncompleteSize") @@ -43555,11 +51094,18 @@ begin end tf.Tensor(tf.Operation(desc)) end - function barrier_incomplete_size(handle_::tf.TensorHandle; name=nothing) + function barrier_incomplete_size_eager(handle_; name=nothing) desc = tf.EagerOp("BarrierIncompleteSize") tf.add_input(desc, handle_) (tf.execute(desc))[1] end + function barrier_incomplete_size(handle_; name=nothing) + if tf.eager_mode + barrier_incomplete_size_eager(handle_; name=name) + else + barrier_incomplete_size_graph(handle_; name=name) + end + end end @@ -43569,7 +51115,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function restore(file_pattern_, tensor_name_; name=nothing, dt=nothing, preferred_shard=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function restore_graph(file_pattern_, tensor_name_; name=nothing, dt=nothing, preferred_shard=nothing) local desc tf.with_op_name(name, "Restore") do desc = tf.NodeDescription("Restore") @@ -43586,7 +51132,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function restore(file_pattern_::tf.TensorHandle, tensor_name_::tf.TensorHandle; name=nothing, dt=nothing, preferred_shard=nothing) + function restore_eager(file_pattern_, tensor_name_; name=nothing, dt=nothing, preferred_shard=nothing) desc = tf.EagerOp("Restore") tf.add_input(desc, file_pattern_) tf.add_input(desc, tensor_name_) @@ -43598,6 +51144,13 @@ begin end (tf.execute(desc))[1] end + function restore(file_pattern_, tensor_name_; name=nothing, dt=nothing, preferred_shard=nothing) + if tf.eager_mode + restore_eager(file_pattern_, tensor_name_; name=name, dt=dt, preferred_shard=preferred_shard) + else + restore_graph(file_pattern_, tensor_name_; name=name, dt=dt, preferred_shard=preferred_shard) + end + end end @@ -43607,7 +51160,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tensor_array_v3(size_; name=nothing, dtype=nothing, element_shape=nothing, dynamic_size=nothing, clear_after_read=nothing, identical_element_shapes=nothing, tensor_array_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tensor_array_v3_graph(size_; name=nothing, dtype=nothing, element_shape=nothing, dynamic_size=nothing, clear_after_read=nothing, identical_element_shapes=nothing, tensor_array_name=nothing) local desc tf.with_op_name(name, "TensorArrayV3") do desc = tf.NodeDescription("TensorArrayV3") @@ -43639,7 +51192,7 @@ begin end out end - function tensor_array_v3(size_::tf.TensorHandle; name=nothing, dtype=nothing, element_shape=nothing, dynamic_size=nothing, clear_after_read=nothing, identical_element_shapes=nothing, tensor_array_name=nothing) + function tensor_array_v3_eager(size_; name=nothing, dtype=nothing, element_shape=nothing, dynamic_size=nothing, clear_after_read=nothing, identical_element_shapes=nothing, tensor_array_name=nothing) desc = tf.EagerOp("TensorArrayV3") tf.add_input(desc, size_) if dtype !== nothing @@ -43662,6 +51215,13 @@ begin end tf.execute(desc) end + function tensor_array_v3(size_; name=nothing, dtype=nothing, element_shape=nothing, dynamic_size=nothing, clear_after_read=nothing, identical_element_shapes=nothing, tensor_array_name=nothing) + if tf.eager_mode + tensor_array_v3_eager(size_; name=name, dtype=dtype, element_shape=element_shape, dynamic_size=dynamic_size, clear_after_read=clear_after_read, identical_element_shapes=identical_element_shapes, tensor_array_name=tensor_array_name) + else + tensor_array_v3_graph(size_; name=name, dtype=dtype, element_shape=element_shape, dynamic_size=dynamic_size, clear_after_read=clear_after_read, identical_element_shapes=identical_element_shapes, tensor_array_name=tensor_array_name) + end + end end @@ -43671,7 +51231,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function experimental_assert_next_dataset(input_dataset_, transformations_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function experimental_assert_next_dataset_graph(input_dataset_, transformations_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "ExperimentalAssertNextDataset") do desc = tf.NodeDescription("ExperimentalAssertNextDataset") @@ -43688,7 +51248,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function experimental_assert_next_dataset(input_dataset_::tf.TensorHandle, transformations_::tf.TensorHandle; name=nothing, output_types=nothing, output_shapes=nothing) + function experimental_assert_next_dataset_eager(input_dataset_, transformations_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("ExperimentalAssertNextDataset") tf.add_input(desc, input_dataset_) tf.add_input(desc, transformations_) @@ -43700,6 +51260,13 @@ begin end (tf.execute(desc))[1] end + function experimental_assert_next_dataset(input_dataset_, transformations_; name=nothing, output_types=nothing, output_shapes=nothing) + if tf.eager_mode + experimental_assert_next_dataset_eager(input_dataset_, transformations_; name=name, output_types=output_types, output_shapes=output_shapes) + else + experimental_assert_next_dataset_graph(input_dataset_, transformations_; name=name, output_types=output_types, output_shapes=output_shapes) + end + end end @@ -43709,7 +51276,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function in_top_k(predictions_, targets_; name=nothing, k=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function in_top_k_graph(predictions_, targets_; name=nothing, k=nothing) local desc tf.with_op_name(name, "InTopK") do desc = tf.NodeDescription("InTopK") @@ -43724,7 +51291,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function in_top_k(predictions_::tf.TensorHandle, targets_::tf.TensorHandle; name=nothing, k=nothing) + function in_top_k_eager(predictions_, targets_; name=nothing, k=nothing) desc = tf.EagerOp("InTopK") tf.add_input(desc, predictions_) tf.add_input(desc, targets_) @@ -43734,6 +51301,13 @@ begin desc["T"] = tf.data_type(targets_) (tf.execute(desc))[1] end + function in_top_k(predictions_, targets_; name=nothing, k=nothing) + if tf.eager_mode + in_top_k_eager(predictions_, targets_; name=name, k=k) + else + in_top_k_graph(predictions_, targets_; name=name, k=k) + end + end end @@ -43743,7 +51317,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function scatter_sub(ref_, indices_, updates_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function scatter_sub_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ScatterSub") do desc = tf.NodeDescription("ScatterSub") @@ -43762,7 +51336,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function scatter_sub(ref_::tf.TensorHandle, indices_::tf.TensorHandle, updates_::tf.TensorHandle; name=nothing, use_locking=nothing) + function scatter_sub_eager(ref_, indices_, updates_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ScatterSub") tf.add_input(desc, ref_) tf.add_input(desc, indices_) @@ -43775,6 +51349,13 @@ begin desc["T"] = tf.data_type(updates_) (tf.execute(desc))[1] end + function scatter_sub(ref_, indices_, updates_; name=nothing, use_locking=nothing) + if tf.eager_mode + scatter_sub_eager(ref_, indices_, updates_; name=name, use_locking=use_locking) + else + scatter_sub_graph(ref_, indices_, updates_; name=name, use_locking=use_locking) + end + end end @@ -43784,7 +51365,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function acosh(x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function acosh_graph(x_; name=nothing) local desc tf.with_op_name(name, "Acosh") do desc = tf.NodeDescription("Acosh") @@ -43794,22 +51375,29 @@ begin end tf.Tensor(tf.Operation(desc)) end - function acosh(x_::tf.TensorHandle; name=nothing) + function acosh_eager(x_; name=nothing) desc = tf.EagerOp("Acosh") tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) (tf.execute(desc))[1] end + function acosh(x_; name=nothing) + if tf.eager_mode + acosh_eager(x_; name=name) + else + acosh_graph(x_; name=name) + end + end end """ - depthwise_conv2d_native_backprop_filter(input, filter_sizes, out_backprop; data_format=NHWC, dilations=[1, 1, 1, 1]) + depthwise_conv2d_native_backprop_filter(input, filter_sizes, out_backprop; data_format=, dilations=[1, 1, 1, 1]) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function depthwise_conv2d_native_backprop_filter(input_, filter_sizes_, out_backprop_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function depthwise_conv2d_native_backprop_filter_graph(input_, filter_sizes_, out_backprop_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) local desc tf.with_op_name(name, "DepthwiseConv2dNativeBackpropFilter") do desc = tf.NodeDescription("DepthwiseConv2dNativeBackpropFilter") @@ -43835,7 +51423,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function depthwise_conv2d_native_backprop_filter(input_::tf.TensorHandle, filter_sizes_::tf.TensorHandle, out_backprop_::tf.TensorHandle; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) + function depthwise_conv2d_native_backprop_filter_eager(input_, filter_sizes_, out_backprop_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) desc = tf.EagerOp("DepthwiseConv2dNativeBackpropFilter") tf.add_input(desc, input_) tf.add_input(desc, filter_sizes_) @@ -43856,6 +51444,13 @@ begin desc["T"] = tf.data_type(out_backprop_) (tf.execute(desc))[1] end + function depthwise_conv2d_native_backprop_filter(input_, filter_sizes_, out_backprop_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) + if tf.eager_mode + depthwise_conv2d_native_backprop_filter_eager(input_, filter_sizes_, out_backprop_; name=name, strides=strides, padding=padding, data_format=data_format, dilations=dilations) + else + depthwise_conv2d_native_backprop_filter_graph(input_, filter_sizes_, out_backprop_; name=name, strides=strides, padding=padding, data_format=data_format, dilations=dilations) + end + end end @@ -43865,7 +51460,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function cast(x_; name=nothing, SrcT=nothing, DstT=nothing, Truncate=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function cast_graph(x_; name=nothing, SrcT=nothing, DstT=nothing, Truncate=nothing) local desc tf.with_op_name(name, "Cast") do desc = tf.NodeDescription("Cast") @@ -43884,7 +51479,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function cast(x_::tf.TensorHandle; name=nothing, SrcT=nothing, DstT=nothing, Truncate=nothing) + function cast_eager(x_; name=nothing, SrcT=nothing, DstT=nothing, Truncate=nothing) desc = tf.EagerOp("Cast") tf.add_input(desc, x_) if SrcT !== nothing @@ -43899,16 +51494,23 @@ begin desc["SrcT"] = tf.data_type(x_) (tf.execute(desc))[1] end + function cast(x_; name=nothing, SrcT=nothing, DstT=nothing, Truncate=nothing) + if tf.eager_mode + cast_eager(x_; name=name, SrcT=SrcT, DstT=DstT, Truncate=Truncate) + else + cast_graph(x_; name=name, SrcT=SrcT, DstT=DstT, Truncate=Truncate) + end + end end """ - quantize_v2(input, min_range, max_range; mode=MIN_COMBINED, round_mode=HALF_AWAY_FROM_ZERO) + quantize_v2(input, min_range, max_range; mode=, round_mode=) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function quantize_v2(input_, min_range_, max_range_; name=nothing, mode=nothing, round_mode=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function quantize_v2_graph(input_, min_range_, max_range_; name=nothing, mode=nothing, round_mode=nothing) local desc tf.with_op_name(name, "QuantizeV2") do desc = tf.NodeDescription("QuantizeV2") @@ -43932,7 +51534,7 @@ begin end out end - function quantize_v2(input_::tf.TensorHandle, min_range_::tf.TensorHandle, max_range_::tf.TensorHandle; name=nothing, mode=nothing, round_mode=nothing) + function quantize_v2_eager(input_, min_range_, max_range_; name=nothing, mode=nothing, round_mode=nothing) desc = tf.EagerOp("QuantizeV2") tf.add_input(desc, input_) tf.add_input(desc, min_range_) @@ -43945,6 +51547,13 @@ begin end tf.execute(desc) end + function quantize_v2(input_, min_range_, max_range_; name=nothing, mode=nothing, round_mode=nothing) + if tf.eager_mode + quantize_v2_eager(input_, min_range_, max_range_; name=name, mode=mode, round_mode=round_mode) + else + quantize_v2_graph(input_, min_range_, max_range_; name=name, mode=mode, round_mode=round_mode) + end + end end @@ -43954,7 +51563,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function generator_dataset(init_func_other_args_, next_func_other_args_, finalize_func_other_args_; name=nothing, init_func=nothing, next_func=nothing, finalize_func=nothing, Tinit_func_args=nothing, Tnext_func_args=nothing, Tfinalize_func_args=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function generator_dataset_graph(init_func_other_args_, next_func_other_args_, finalize_func_other_args_; name=nothing, init_func=nothing, next_func=nothing, finalize_func=nothing, Tinit_func_args=nothing, Tnext_func_args=nothing, Tfinalize_func_args=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "GeneratorDataset") do desc = tf.NodeDescription("GeneratorDataset") @@ -43991,7 +51600,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function generator_dataset(init_func_other_args_::tf.TensorHandle, next_func_other_args_::tf.TensorHandle, finalize_func_other_args_::tf.TensorHandle; name=nothing, init_func=nothing, next_func=nothing, finalize_func=nothing, Tinit_func_args=nothing, Tnext_func_args=nothing, Tfinalize_func_args=nothing, output_types=nothing, output_shapes=nothing) + function generator_dataset_eager(init_func_other_args_, next_func_other_args_, finalize_func_other_args_; name=nothing, init_func=nothing, next_func=nothing, finalize_func=nothing, Tinit_func_args=nothing, Tnext_func_args=nothing, Tfinalize_func_args=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("GeneratorDataset") tf.add_input(desc, init_func_other_args_) tf.add_input(desc, next_func_other_args_) @@ -44022,6 +51631,13 @@ begin end (tf.execute(desc))[1] end + function generator_dataset(init_func_other_args_, next_func_other_args_, finalize_func_other_args_; name=nothing, init_func=nothing, next_func=nothing, finalize_func=nothing, Tinit_func_args=nothing, Tnext_func_args=nothing, Tfinalize_func_args=nothing, output_types=nothing, output_shapes=nothing) + if tf.eager_mode + generator_dataset_eager(init_func_other_args_, next_func_other_args_, finalize_func_other_args_; name=name, init_func=init_func, next_func=next_func, finalize_func=finalize_func, Tinit_func_args=Tinit_func_args, Tnext_func_args=Tnext_func_args, Tfinalize_func_args=Tfinalize_func_args, output_types=output_types, output_shapes=output_shapes) + else + generator_dataset_graph(init_func_other_args_, next_func_other_args_, finalize_func_other_args_; name=name, init_func=init_func, next_func=next_func, finalize_func=finalize_func, Tinit_func_args=Tinit_func_args, Tnext_func_args=Tnext_func_args, Tfinalize_func_args=Tfinalize_func_args, output_types=output_types, output_shapes=output_shapes) + end + end end @@ -44031,7 +51647,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tensor_forest_tree_serialize(tree_handle_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tensor_forest_tree_serialize_graph(tree_handle_; name=nothing) local desc tf.with_op_name(name, "TensorForestTreeSerialize") do desc = tf.NodeDescription("TensorForestTreeSerialize") @@ -44040,11 +51656,18 @@ begin end tf.Tensor(tf.Operation(desc)) end - function tensor_forest_tree_serialize(tree_handle_::tf.TensorHandle; name=nothing) + function tensor_forest_tree_serialize_eager(tree_handle_; name=nothing) desc = tf.EagerOp("TensorForestTreeSerialize") tf.add_input(desc, tree_handle_) (tf.execute(desc))[1] end + function tensor_forest_tree_serialize(tree_handle_; name=nothing) + if tf.eager_mode + tensor_forest_tree_serialize_eager(tree_handle_; name=name) + else + tensor_forest_tree_serialize_graph(tree_handle_; name=name) + end + end end @@ -44054,7 +51677,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function next_after(x1_, x2_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function next_after_graph(x1_, x2_; name=nothing) local desc tf.with_op_name(name, "NextAfter") do desc = tf.NodeDescription("NextAfter") @@ -44066,7 +51689,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function next_after(x1_::tf.TensorHandle, x2_::tf.TensorHandle; name=nothing) + function next_after_eager(x1_, x2_; name=nothing) desc = tf.EagerOp("NextAfter") tf.add_input(desc, x1_) tf.add_input(desc, x2_) @@ -44074,6 +51697,13 @@ begin desc["T"] = tf.data_type(x2_) (tf.execute(desc))[1] end + function next_after(x1_, x2_; name=nothing) + if tf.eager_mode + next_after_eager(x1_, x2_; name=name) + else + next_after_graph(x1_, x2_; name=name) + end + end end @@ -44083,7 +51713,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tensor_array_close_v2(handle_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tensor_array_close_v2_graph(handle_; name=nothing) local desc tf.with_op_name(name, "TensorArrayCloseV2") do desc = tf.NodeDescription("TensorArrayCloseV2") @@ -44092,11 +51722,18 @@ begin end tf.Tensor(tf.Operation(desc)) end - function tensor_array_close_v2(handle_::tf.TensorHandle; name=nothing) + function tensor_array_close_v2_eager(handle_; name=nothing) desc = tf.EagerOp("TensorArrayCloseV2") tf.add_input(desc, handle_) (tf.execute(desc))[1] end + function tensor_array_close_v2(handle_; name=nothing) + if tf.eager_mode + tensor_array_close_v2_eager(handle_; name=name) + else + tensor_array_close_v2_graph(handle_; name=name) + end + end end @@ -44106,7 +51743,7 @@ end A Reader that outputs rows from a BigQuery table as tensorflow Examples. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function big_query_reader(; name=nothing, container=nothing, shared_name=nothing, project_id=nothing, dataset_id=nothing, table_id=nothing, columns=nothing, timestamp_millis=nothing, test_end_point=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function big_query_reader_graph(; name=nothing, container=nothing, shared_name=nothing, project_id=nothing, dataset_id=nothing, table_id=nothing, columns=nothing, timestamp_millis=nothing, test_end_point=nothing) local desc tf.with_op_name(name, "BigQueryReader") do desc = tf.NodeDescription("BigQueryReader") @@ -44137,7 +51774,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function big_query_reader(; name=nothing, container=nothing, shared_name=nothing, project_id=nothing, dataset_id=nothing, table_id=nothing, columns=nothing, timestamp_millis=nothing, test_end_point=nothing) + function big_query_reader_eager(; name=nothing, container=nothing, shared_name=nothing, project_id=nothing, dataset_id=nothing, table_id=nothing, columns=nothing, timestamp_millis=nothing, test_end_point=nothing) desc = tf.EagerOp("BigQueryReader") if container !== nothing desc["container"] = Base.String(container) @@ -44165,6 +51802,13 @@ begin end (tf.execute(desc))[1] end + function big_query_reader(; name=nothing, container=nothing, shared_name=nothing, project_id=nothing, dataset_id=nothing, table_id=nothing, columns=nothing, timestamp_millis=nothing, test_end_point=nothing) + if tf.eager_mode + big_query_reader_eager(; name=name, container=container, shared_name=shared_name, project_id=project_id, dataset_id=dataset_id, table_id=table_id, columns=columns, timestamp_millis=timestamp_millis, test_end_point=test_end_point) + else + big_query_reader_graph(; name=name, container=container, shared_name=shared_name, project_id=project_id, dataset_id=dataset_id, table_id=table_id, columns=columns, timestamp_millis=timestamp_millis, test_end_point=test_end_point) + end + end end @@ -44174,7 +51818,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function reader_read_v2(reader_handle_, queue_handle_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function reader_read_v2_graph(reader_handle_, queue_handle_; name=nothing) local desc tf.with_op_name(name, "ReaderReadV2") do desc = tf.NodeDescription("ReaderReadV2") @@ -44190,12 +51834,19 @@ begin end out end - function reader_read_v2(reader_handle_::tf.TensorHandle, queue_handle_::tf.TensorHandle; name=nothing) + function reader_read_v2_eager(reader_handle_, queue_handle_; name=nothing) desc = tf.EagerOp("ReaderReadV2") tf.add_input(desc, reader_handle_) tf.add_input(desc, queue_handle_) tf.execute(desc) end + function reader_read_v2(reader_handle_, queue_handle_; name=nothing) + if tf.eager_mode + reader_read_v2_eager(reader_handle_, queue_handle_; name=name) + else + reader_read_v2_graph(reader_handle_, queue_handle_; name=name) + end + end end @@ -44205,7 +51856,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function mod(x_, y_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function mod_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "Mod") do desc = tf.NodeDescription("Mod") @@ -44217,7 +51868,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function mod(x_::tf.TensorHandle, y_::tf.TensorHandle; name=nothing) + function mod_eager(x_, y_; name=nothing) desc = tf.EagerOp("Mod") tf.add_input(desc, x_) tf.add_input(desc, y_) @@ -44225,6 +51876,13 @@ begin desc["T"] = tf.data_type(y_) (tf.execute(desc))[1] end + function mod(x_, y_; name=nothing) + if tf.eager_mode + mod_eager(x_, y_; name=name) + else + mod_graph(x_, y_; name=name) + end + end end @@ -44234,7 +51892,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function add_v2(x_, y_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function add_v2_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "AddV2") do desc = tf.NodeDescription("AddV2") @@ -44246,7 +51904,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function add_v2(x_::tf.TensorHandle, y_::tf.TensorHandle; name=nothing) + function add_v2_eager(x_, y_; name=nothing) desc = tf.EagerOp("AddV2") tf.add_input(desc, x_) tf.add_input(desc, y_) @@ -44254,6 +51912,13 @@ begin desc["T"] = tf.data_type(y_) (tf.execute(desc))[1] end + function add_v2(x_, y_; name=nothing) + if tf.eager_mode + add_v2_eager(x_, y_; name=name) + else + add_v2_graph(x_, y_; name=name) + end + end end @@ -44263,7 +51928,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function stateless_random_normal(shape_, seed_; name=nothing, dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function stateless_random_normal_graph(shape_, seed_; name=nothing, dtype=nothing) local desc tf.with_op_name(name, "StatelessRandomNormal") do desc = tf.NodeDescription("StatelessRandomNormal") @@ -44279,7 +51944,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function stateless_random_normal(shape_::tf.TensorHandle, seed_::tf.TensorHandle; name=nothing, dtype=nothing) + function stateless_random_normal_eager(shape_, seed_; name=nothing, dtype=nothing) desc = tf.EagerOp("StatelessRandomNormal") tf.add_input(desc, shape_) tf.add_input(desc, seed_) @@ -44290,6 +51955,13 @@ begin desc["Tseed"] = tf.data_type(seed_) (tf.execute(desc))[1] end + function stateless_random_normal(shape_, seed_; name=nothing, dtype=nothing) + if tf.eager_mode + stateless_random_normal_eager(shape_, seed_; name=name, dtype=dtype) + else + stateless_random_normal_graph(shape_, seed_; name=name, dtype=dtype) + end + end end @@ -44299,7 +51971,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function strided_slice_assign(ref_, begin_, end_, strides_, value_; name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function strided_slice_assign_graph(ref_, begin_, end_, strides_, value_; name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing) local desc tf.with_op_name(name, "StridedSliceAssign") do desc = tf.NodeDescription("StridedSliceAssign") @@ -44354,7 +52026,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function strided_slice_assign(ref_::tf.TensorHandle, begin_::tf.TensorHandle, end_::tf.TensorHandle, strides_::tf.TensorHandle, value_::tf.TensorHandle; name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing) + function strided_slice_assign_eager(ref_, begin_, end_, strides_, value_; name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing) desc = tf.EagerOp("StridedSliceAssign") tf.add_input(desc, ref_) tf.add_input(desc, begin_) @@ -44401,6 +52073,13 @@ begin desc["T"] = tf.data_type(value_) (tf.execute(desc))[1] end + function strided_slice_assign(ref_, begin_, end_, strides_, value_; name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing) + if tf.eager_mode + strided_slice_assign_eager(ref_, begin_, end_, strides_, value_; name=name, Index=Index, begin_mask=begin_mask, end_mask=end_mask, ellipsis_mask=ellipsis_mask, new_axis_mask=new_axis_mask, shrink_axis_mask=shrink_axis_mask) + else + strided_slice_assign_graph(ref_, begin_, end_, strides_, value_; name=name, Index=Index, begin_mask=begin_mask, end_mask=end_mask, ellipsis_mask=ellipsis_mask, new_axis_mask=new_axis_mask, shrink_axis_mask=shrink_axis_mask) + end + end end @@ -44410,7 +52089,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function scatter_min(ref_, indices_, updates_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function scatter_min_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ScatterMin") do desc = tf.NodeDescription("ScatterMin") @@ -44429,7 +52108,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function scatter_min(ref_::tf.TensorHandle, indices_::tf.TensorHandle, updates_::tf.TensorHandle; name=nothing, use_locking=nothing) + function scatter_min_eager(ref_, indices_, updates_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ScatterMin") tf.add_input(desc, ref_) tf.add_input(desc, indices_) @@ -44442,6 +52121,13 @@ begin desc["T"] = tf.data_type(updates_) (tf.execute(desc))[1] end + function scatter_min(ref_, indices_, updates_; name=nothing, use_locking=nothing) + if tf.eager_mode + scatter_min_eager(ref_, indices_, updates_; name=name, use_locking=use_locking) + else + scatter_min_graph(ref_, indices_, updates_; name=name, use_locking=use_locking) + end + end end @@ -44451,7 +52137,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function resource_strided_slice_assign(ref_, begin_, end_, strides_, value_; name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function resource_strided_slice_assign_graph(ref_, begin_, end_, strides_, value_; name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing) local desc tf.with_op_name(name, "ResourceStridedSliceAssign") do desc = tf.NodeDescription("ResourceStridedSliceAssign") @@ -44506,7 +52192,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function resource_strided_slice_assign(ref_::tf.TensorHandle, begin_::tf.TensorHandle, end_::tf.TensorHandle, strides_::tf.TensorHandle, value_::tf.TensorHandle; name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing) + function resource_strided_slice_assign_eager(ref_, begin_, end_, strides_, value_; name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing) desc = tf.EagerOp("ResourceStridedSliceAssign") tf.add_input(desc, ref_) tf.add_input(desc, begin_) @@ -44552,6 +52238,13 @@ begin desc["T"] = tf.data_type(value_) (tf.execute(desc))[1] end + function resource_strided_slice_assign(ref_, begin_, end_, strides_, value_; name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing) + if tf.eager_mode + resource_strided_slice_assign_eager(ref_, begin_, end_, strides_, value_; name=name, Index=Index, begin_mask=begin_mask, end_mask=end_mask, ellipsis_mask=ellipsis_mask, new_axis_mask=new_axis_mask, shrink_axis_mask=shrink_axis_mask) + else + resource_strided_slice_assign_graph(ref_, begin_, end_, strides_, value_; name=name, Index=Index, begin_mask=begin_mask, end_mask=end_mask, ellipsis_mask=ellipsis_mask, new_axis_mask=new_axis_mask, shrink_axis_mask=shrink_axis_mask) + end + end end @@ -44561,7 +52254,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function random_gamma_grad(alpha_, sample_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function random_gamma_grad_graph(alpha_, sample_; name=nothing) local desc tf.with_op_name(name, "RandomGammaGrad") do desc = tf.NodeDescription("RandomGammaGrad") @@ -44573,7 +52266,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function random_gamma_grad(alpha_::tf.TensorHandle, sample_::tf.TensorHandle; name=nothing) + function random_gamma_grad_eager(alpha_, sample_; name=nothing) desc = tf.EagerOp("RandomGammaGrad") tf.add_input(desc, alpha_) tf.add_input(desc, sample_) @@ -44581,6 +52274,13 @@ begin desc["T"] = tf.data_type(sample_) (tf.execute(desc))[1] end + function random_gamma_grad(alpha_, sample_; name=nothing) + if tf.eager_mode + random_gamma_grad_eager(alpha_, sample_; name=name) + else + random_gamma_grad_graph(alpha_, sample_; name=name) + end + end end @@ -44590,7 +52290,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function resource_sparse_apply_keras_momentum(var_, accum_, lr_, grad_, indices_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function resource_sparse_apply_keras_momentum_graph(var_, accum_, lr_, grad_, indices_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) local desc tf.with_op_name(name, "ResourceSparseApplyKerasMomentum") do desc = tf.NodeDescription("ResourceSparseApplyKerasMomentum") @@ -44618,7 +52318,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function resource_sparse_apply_keras_momentum(var_::tf.TensorHandle, accum_::tf.TensorHandle, lr_::tf.TensorHandle, grad_::tf.TensorHandle, indices_::tf.TensorHandle, momentum_::tf.TensorHandle; name=nothing, use_locking=nothing, use_nesterov=nothing) + function resource_sparse_apply_keras_momentum_eager(var_, accum_, lr_, grad_, indices_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) desc = tf.EagerOp("ResourceSparseApplyKerasMomentum") tf.add_input(desc, var_) tf.add_input(desc, accum_) @@ -44638,6 +52338,13 @@ begin desc["T"] = tf.data_type(momentum_) (tf.execute(desc))[1] end + function resource_sparse_apply_keras_momentum(var_, accum_, lr_, grad_, indices_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) + if tf.eager_mode + resource_sparse_apply_keras_momentum_eager(var_, accum_, lr_, grad_, indices_, momentum_; name=name, use_locking=use_locking, use_nesterov=use_nesterov) + else + resource_sparse_apply_keras_momentum_graph(var_, accum_, lr_, grad_, indices_, momentum_; name=name, use_locking=use_locking, use_nesterov=use_nesterov) + end + end end @@ -44647,7 +52354,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function boosted_trees_create_quantile_stream_resource(quantile_stream_resource_handle_, epsilon_, num_streams_; name=nothing, max_elements=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function boosted_trees_create_quantile_stream_resource_graph(quantile_stream_resource_handle_, epsilon_, num_streams_; name=nothing, max_elements=nothing) local desc tf.with_op_name(name, "BoostedTreesCreateQuantileStreamResource") do desc = tf.NodeDescription("BoostedTreesCreateQuantileStreamResource") @@ -44663,7 +52370,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function boosted_trees_create_quantile_stream_resource(quantile_stream_resource_handle_::tf.TensorHandle, epsilon_::tf.TensorHandle, num_streams_::tf.TensorHandle; name=nothing, max_elements=nothing) + function boosted_trees_create_quantile_stream_resource_eager(quantile_stream_resource_handle_, epsilon_, num_streams_; name=nothing, max_elements=nothing) desc = tf.EagerOp("BoostedTreesCreateQuantileStreamResource") tf.add_input(desc, quantile_stream_resource_handle_) tf.add_input(desc, epsilon_) @@ -44673,6 +52380,13 @@ begin end (tf.execute(desc))[1] end + function boosted_trees_create_quantile_stream_resource(quantile_stream_resource_handle_, epsilon_, num_streams_; name=nothing, max_elements=nothing) + if tf.eager_mode + boosted_trees_create_quantile_stream_resource_eager(quantile_stream_resource_handle_, epsilon_, num_streams_; name=name, max_elements=max_elements) + else + boosted_trees_create_quantile_stream_resource_graph(quantile_stream_resource_handle_, epsilon_, num_streams_; name=name, max_elements=max_elements) + end + end end @@ -44682,7 +52396,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function quantized_relu6(features_, min_features_, max_features_; name=nothing, out_type=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function quantized_relu6_graph(features_, min_features_, max_features_; name=nothing, out_type=nothing) local desc tf.with_op_name(name, "QuantizedRelu6") do desc = tf.NodeDescription("QuantizedRelu6") @@ -44704,7 +52418,7 @@ begin end out end - function quantized_relu6(features_::tf.TensorHandle, min_features_::tf.TensorHandle, max_features_::tf.TensorHandle; name=nothing, out_type=nothing) + function quantized_relu6_eager(features_, min_features_, max_features_; name=nothing, out_type=nothing) desc = tf.EagerOp("QuantizedRelu6") tf.add_input(desc, features_) tf.add_input(desc, min_features_) @@ -44715,6 +52429,13 @@ begin desc["Tinput"] = tf.data_type(features_) tf.execute(desc) end + function quantized_relu6(features_, min_features_, max_features_; name=nothing, out_type=nothing) + if tf.eager_mode + quantized_relu6_eager(features_, min_features_, max_features_; name=name, out_type=out_type) + else + quantized_relu6_graph(features_, min_features_, max_features_; name=name, out_type=out_type) + end + end end @@ -44724,7 +52445,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function sparse_sparse_maximum(a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function sparse_sparse_maximum_graph(a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_; name=nothing) local desc tf.with_op_name(name, "SparseSparseMaximum") do desc = tf.NodeDescription("SparseSparseMaximum") @@ -44749,7 +52470,7 @@ begin end out end - function sparse_sparse_maximum(a_indices_::tf.TensorHandle, a_values_::tf.TensorHandle, a_shape_::tf.TensorHandle, b_indices_::tf.TensorHandle, b_values_::tf.TensorHandle, b_shape_::tf.TensorHandle; name=nothing) + function sparse_sparse_maximum_eager(a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_; name=nothing) desc = tf.EagerOp("SparseSparseMaximum") tf.add_input(desc, a_indices_) tf.add_input(desc, a_values_) @@ -44761,6 +52482,13 @@ begin desc["T"] = tf.data_type(b_values_) tf.execute(desc) end + function sparse_sparse_maximum(a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_; name=nothing) + if tf.eager_mode + sparse_sparse_maximum_eager(a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_; name=name) + else + sparse_sparse_maximum_graph(a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_; name=name) + end + end end @@ -44770,7 +52498,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function batch_norm_with_global_normalization(t_, m_, v_, beta_, gamma_; name=nothing, variance_epsilon=nothing, scale_after_normalization=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function batch_norm_with_global_normalization_graph(t_, m_, v_, beta_, gamma_; name=nothing, variance_epsilon=nothing, scale_after_normalization=nothing) local desc tf.with_op_name(name, "BatchNormWithGlobalNormalization") do desc = tf.NodeDescription("BatchNormWithGlobalNormalization") @@ -44794,7 +52522,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function batch_norm_with_global_normalization(t_::tf.TensorHandle, m_::tf.TensorHandle, v_::tf.TensorHandle, beta_::tf.TensorHandle, gamma_::tf.TensorHandle; name=nothing, variance_epsilon=nothing, scale_after_normalization=nothing) + function batch_norm_with_global_normalization_eager(t_, m_, v_, beta_, gamma_; name=nothing, variance_epsilon=nothing, scale_after_normalization=nothing) desc = tf.EagerOp("BatchNormWithGlobalNormalization") tf.add_input(desc, t_) tf.add_input(desc, m_) @@ -44814,6 +52542,13 @@ begin desc["T"] = tf.data_type(gamma_) (tf.execute(desc))[1] end + function batch_norm_with_global_normalization(t_, m_, v_, beta_, gamma_; name=nothing, variance_epsilon=nothing, scale_after_normalization=nothing) + if tf.eager_mode + batch_norm_with_global_normalization_eager(t_, m_, v_, beta_, gamma_; name=name, variance_epsilon=variance_epsilon, scale_after_normalization=scale_after_normalization) + else + batch_norm_with_global_normalization_graph(t_, m_, v_, beta_, gamma_; name=name, variance_epsilon=variance_epsilon, scale_after_normalization=scale_after_normalization) + end + end end @@ -44823,7 +52558,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function in_top_kv2(predictions_, targets_, k_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function in_top_kv2_graph(predictions_, targets_, k_; name=nothing) local desc tf.with_op_name(name, "InTopKV2") do desc = tf.NodeDescription("InTopKV2") @@ -44837,7 +52572,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function in_top_kv2(predictions_::tf.TensorHandle, targets_::tf.TensorHandle, k_::tf.TensorHandle; name=nothing) + function in_top_kv2_eager(predictions_, targets_, k_; name=nothing) desc = tf.EagerOp("InTopKV2") tf.add_input(desc, predictions_) tf.add_input(desc, targets_) @@ -44846,6 +52581,13 @@ begin desc["T"] = tf.data_type(k_) (tf.execute(desc))[1] end + function in_top_kv2(predictions_, targets_, k_; name=nothing) + if tf.eager_mode + in_top_kv2_eager(predictions_, targets_, k_; name=name) + else + in_top_kv2_graph(predictions_, targets_, k_; name=name) + end + end end @@ -44855,7 +52597,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function cholesky(input_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function cholesky_graph(input_; name=nothing) local desc tf.with_op_name(name, "Cholesky") do desc = tf.NodeDescription("Cholesky") @@ -44865,12 +52607,19 @@ begin end tf.Tensor(tf.Operation(desc)) end - function cholesky(input_::tf.TensorHandle; name=nothing) + function cholesky_eager(input_; name=nothing) desc = tf.EagerOp("Cholesky") tf.add_input(desc, input_) desc["T"] = tf.data_type(input_) (tf.execute(desc))[1] end + function cholesky(input_; name=nothing) + if tf.eager_mode + cholesky_eager(input_; name=name) + else + cholesky_graph(input_; name=name) + end + end end @@ -44880,7 +52629,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function resource_apply_centered_rms_prop(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function resource_apply_centered_rms_prop_graph(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ResourceApplyCenteredRMSProp") do desc = tf.NodeDescription("ResourceApplyCenteredRMSProp") @@ -44909,7 +52658,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function resource_apply_centered_rms_prop(var_::tf.TensorHandle, mg_::tf.TensorHandle, ms_::tf.TensorHandle, mom_::tf.TensorHandle, lr_::tf.TensorHandle, rho_::tf.TensorHandle, momentum_::tf.TensorHandle, epsilon_::tf.TensorHandle, grad_::tf.TensorHandle; name=nothing, use_locking=nothing) + function resource_apply_centered_rms_prop_eager(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ResourceApplyCenteredRMSProp") tf.add_input(desc, var_) tf.add_input(desc, mg_) @@ -44930,6 +52679,13 @@ begin desc["T"] = tf.data_type(grad_) (tf.execute(desc))[1] end + function resource_apply_centered_rms_prop(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=nothing, use_locking=nothing) + if tf.eager_mode + resource_apply_centered_rms_prop_eager(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=name, use_locking=use_locking) + else + resource_apply_centered_rms_prop_graph(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=name, use_locking=use_locking) + end + end end @@ -44939,7 +52695,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function resource_apply_adagrad(var_, accum_, lr_, grad_; name=nothing, use_locking=nothing, update_slots=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function resource_apply_adagrad_graph(var_, accum_, lr_, grad_; name=nothing, use_locking=nothing, update_slots=nothing) local desc tf.with_op_name(name, "ResourceApplyAdagrad") do desc = tf.NodeDescription("ResourceApplyAdagrad") @@ -44961,7 +52717,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function resource_apply_adagrad(var_::tf.TensorHandle, accum_::tf.TensorHandle, lr_::tf.TensorHandle, grad_::tf.TensorHandle; name=nothing, use_locking=nothing, update_slots=nothing) + function resource_apply_adagrad_eager(var_, accum_, lr_, grad_; name=nothing, use_locking=nothing, update_slots=nothing) desc = tf.EagerOp("ResourceApplyAdagrad") tf.add_input(desc, var_) tf.add_input(desc, accum_) @@ -44977,6 +52733,13 @@ begin desc["T"] = tf.data_type(grad_) (tf.execute(desc))[1] end + function resource_apply_adagrad(var_, accum_, lr_, grad_; name=nothing, use_locking=nothing, update_slots=nothing) + if tf.eager_mode + resource_apply_adagrad_eager(var_, accum_, lr_, grad_; name=name, use_locking=use_locking, update_slots=update_slots) + else + resource_apply_adagrad_graph(var_, accum_, lr_, grad_; name=name, use_locking=use_locking, update_slots=update_slots) + end + end end @@ -44986,7 +52749,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function experimental_parallel_interleave_dataset(input_dataset_, other_arguments_, cycle_length_, block_length_, sloppy_, buffer_output_elements_, prefetch_input_elements_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function experimental_parallel_interleave_dataset_graph(input_dataset_, other_arguments_, cycle_length_, block_length_, sloppy_, buffer_output_elements_, prefetch_input_elements_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "ExperimentalParallelInterleaveDataset") do desc = tf.NodeDescription("ExperimentalParallelInterleaveDataset") @@ -45019,7 +52782,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function experimental_parallel_interleave_dataset(input_dataset_::tf.TensorHandle, other_arguments_::tf.TensorHandle, cycle_length_::tf.TensorHandle, block_length_::tf.TensorHandle, sloppy_::tf.TensorHandle, buffer_output_elements_::tf.TensorHandle, prefetch_input_elements_::tf.TensorHandle; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) + function experimental_parallel_interleave_dataset_eager(input_dataset_, other_arguments_, cycle_length_, block_length_, sloppy_, buffer_output_elements_, prefetch_input_elements_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("ExperimentalParallelInterleaveDataset") tf.add_input(desc, input_dataset_) tf.add_input(desc, other_arguments_) @@ -45042,6 +52805,13 @@ begin end (tf.execute(desc))[1] end + function experimental_parallel_interleave_dataset(input_dataset_, other_arguments_, cycle_length_, block_length_, sloppy_, buffer_output_elements_, prefetch_input_elements_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) + if tf.eager_mode + experimental_parallel_interleave_dataset_eager(input_dataset_, other_arguments_, cycle_length_, block_length_, sloppy_, buffer_output_elements_, prefetch_input_elements_; name=name, f=f, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes) + else + experimental_parallel_interleave_dataset_graph(input_dataset_, other_arguments_, cycle_length_, block_length_, sloppy_, buffer_output_elements_, prefetch_input_elements_; name=name, f=f, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes) + end + end end @@ -45051,7 +52821,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function resize_bicubic_grad(grads_, original_image_; name=nothing, align_corners=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function resize_bicubic_grad_graph(grads_, original_image_; name=nothing, align_corners=nothing) local desc tf.with_op_name(name, "ResizeBicubicGrad") do desc = tf.NodeDescription("ResizeBicubicGrad") @@ -45066,7 +52836,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function resize_bicubic_grad(grads_::tf.TensorHandle, original_image_::tf.TensorHandle; name=nothing, align_corners=nothing) + function resize_bicubic_grad_eager(grads_, original_image_; name=nothing, align_corners=nothing) desc = tf.EagerOp("ResizeBicubicGrad") tf.add_input(desc, grads_) tf.add_input(desc, original_image_) @@ -45076,6 +52846,13 @@ begin desc["T"] = tf.data_type(original_image_) (tf.execute(desc))[1] end + function resize_bicubic_grad(grads_, original_image_; name=nothing, align_corners=nothing) + if tf.eager_mode + resize_bicubic_grad_eager(grads_, original_image_; name=name, align_corners=align_corners) + else + resize_bicubic_grad_graph(grads_, original_image_; name=name, align_corners=align_corners) + end + end end @@ -45085,7 +52862,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function batch_self_adjoint_eig(input_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function batch_self_adjoint_eig_graph(input_; name=nothing) local desc tf.with_op_name(name, "BatchSelfAdjointEig") do desc = tf.NodeDescription("BatchSelfAdjointEig") @@ -45095,12 +52872,19 @@ begin end tf.Tensor(tf.Operation(desc)) end - function batch_self_adjoint_eig(input_::tf.TensorHandle; name=nothing) + function batch_self_adjoint_eig_eager(input_; name=nothing) desc = tf.EagerOp("BatchSelfAdjointEig") tf.add_input(desc, input_) desc["T"] = tf.data_type(input_) (tf.execute(desc))[1] end + function batch_self_adjoint_eig(input_; name=nothing) + if tf.eager_mode + batch_self_adjoint_eig_eager(input_; name=name) + else + batch_self_adjoint_eig_graph(input_; name=name) + end + end end @@ -45110,7 +52894,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function sparse_softmax(sp_indices_, sp_values_, sp_shape_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function sparse_softmax_graph(sp_indices_, sp_values_, sp_shape_; name=nothing) local desc tf.with_op_name(name, "SparseSoftmax") do desc = tf.NodeDescription("SparseSoftmax") @@ -45124,7 +52908,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function sparse_softmax(sp_indices_::tf.TensorHandle, sp_values_::tf.TensorHandle, sp_shape_::tf.TensorHandle; name=nothing) + function sparse_softmax_eager(sp_indices_, sp_values_, sp_shape_; name=nothing) desc = tf.EagerOp("SparseSoftmax") tf.add_input(desc, sp_indices_) tf.add_input(desc, sp_values_) @@ -45132,6 +52916,13 @@ begin desc["T"] = tf.data_type(sp_values_) (tf.execute(desc))[1] end + function sparse_softmax(sp_indices_, sp_values_, sp_shape_; name=nothing) + if tf.eager_mode + sparse_softmax_eager(sp_indices_, sp_values_, sp_shape_; name=name) + else + sparse_softmax_graph(sp_indices_, sp_values_, sp_shape_; name=name) + end + end end @@ -45141,7 +52932,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function asinh(x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function asinh_graph(x_; name=nothing) local desc tf.with_op_name(name, "Asinh") do desc = tf.NodeDescription("Asinh") @@ -45151,12 +52942,19 @@ begin end tf.Tensor(tf.Operation(desc)) end - function asinh(x_::tf.TensorHandle; name=nothing) + function asinh_eager(x_; name=nothing) desc = tf.EagerOp("Asinh") tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) (tf.execute(desc))[1] end + function asinh(x_; name=nothing) + if tf.eager_mode + asinh_eager(x_; name=name) + else + asinh_graph(x_; name=name) + end + end end @@ -45166,7 +52964,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function quantized_conv2d_and_relu(input_, filter_, min_input_, max_input_, min_filter_, max_filter_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function quantized_conv2d_and_relu_graph(input_, filter_, min_input_, max_input_, min_filter_, max_filter_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) local desc tf.with_op_name(name, "QuantizedConv2DAndRelu") do desc = tf.NodeDescription("QuantizedConv2DAndRelu") @@ -45204,7 +53002,7 @@ begin end out end - function quantized_conv2d_and_relu(input_::tf.TensorHandle, filter_::tf.TensorHandle, min_input_::tf.TensorHandle, max_input_::tf.TensorHandle, min_filter_::tf.TensorHandle, max_filter_::tf.TensorHandle; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) + function quantized_conv2d_and_relu_eager(input_, filter_, min_input_, max_input_, min_filter_, max_filter_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) desc = tf.EagerOp("QuantizedConv2DAndRelu") tf.add_input(desc, input_) tf.add_input(desc, filter_) @@ -45228,6 +53026,13 @@ begin desc["Tfilter"] = tf.data_type(filter_) tf.execute(desc) end + function quantized_conv2d_and_relu(input_, filter_, min_input_, max_input_, min_filter_, max_filter_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) + if tf.eager_mode + quantized_conv2d_and_relu_eager(input_, filter_, min_input_, max_input_, min_filter_, max_filter_; name=name, out_type=out_type, strides=strides, padding=padding, dilations=dilations) + else + quantized_conv2d_and_relu_graph(input_, filter_, min_input_, max_input_, min_filter_, max_filter_; name=name, out_type=out_type, strides=strides, padding=padding, dilations=dilations) + end + end end @@ -45237,7 +53042,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function matrix_inverse(input_; name=nothing, adjoint=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function matrix_inverse_graph(input_; name=nothing, adjoint=nothing) local desc tf.with_op_name(name, "MatrixInverse") do desc = tf.NodeDescription("MatrixInverse") @@ -45250,7 +53055,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function matrix_inverse(input_::tf.TensorHandle; name=nothing, adjoint=nothing) + function matrix_inverse_eager(input_; name=nothing, adjoint=nothing) desc = tf.EagerOp("MatrixInverse") tf.add_input(desc, input_) if adjoint !== nothing @@ -45259,6 +53064,13 @@ begin desc["T"] = tf.data_type(input_) (tf.execute(desc))[1] end + function matrix_inverse(input_; name=nothing, adjoint=nothing) + if tf.eager_mode + matrix_inverse_eager(input_; name=name, adjoint=adjoint) + else + matrix_inverse_graph(input_; name=name, adjoint=adjoint) + end + end end @@ -45268,7 +53080,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function tensor_list_concat_lists(input_a_, input_b_; name=nothing, element_dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tensor_list_concat_lists_graph(input_a_, input_b_; name=nothing, element_dtype=nothing) local desc tf.with_op_name(name, "TensorListConcatLists") do desc = tf.NodeDescription("TensorListConcatLists") @@ -45282,7 +53094,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function tensor_list_concat_lists(input_a_::tf.TensorHandle, input_b_::tf.TensorHandle; name=nothing, element_dtype=nothing) + function tensor_list_concat_lists_eager(input_a_, input_b_; name=nothing, element_dtype=nothing) desc = tf.EagerOp("TensorListConcatLists") tf.add_input(desc, input_a_) tf.add_input(desc, input_b_) @@ -45291,6 +53103,13 @@ begin end (tf.execute(desc))[1] end + function tensor_list_concat_lists(input_a_, input_b_; name=nothing, element_dtype=nothing) + if tf.eager_mode + tensor_list_concat_lists_eager(input_a_, input_b_; name=name, element_dtype=element_dtype) + else + tensor_list_concat_lists_graph(input_a_, input_b_; name=name, element_dtype=element_dtype) + end + end end @@ -45300,7 +53119,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function requantize(input_, input_min_, input_max_, requested_output_min_, requested_output_max_; name=nothing, out_type=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function requantize_graph(input_, input_min_, input_max_, requested_output_min_, requested_output_max_; name=nothing, out_type=nothing) local desc tf.with_op_name(name, "Requantize") do desc = tf.NodeDescription("Requantize") @@ -45326,7 +53145,7 @@ begin end out end - function requantize(input_::tf.TensorHandle, input_min_::tf.TensorHandle, input_max_::tf.TensorHandle, requested_output_min_::tf.TensorHandle, requested_output_max_::tf.TensorHandle; name=nothing, out_type=nothing) + function requantize_eager(input_, input_min_, input_max_, requested_output_min_, requested_output_max_; name=nothing, out_type=nothing) desc = tf.EagerOp("Requantize") tf.add_input(desc, input_) tf.add_input(desc, input_min_) @@ -45339,6 +53158,13 @@ begin desc["Tinput"] = tf.data_type(input_) tf.execute(desc) end + function requantize(input_, input_min_, input_max_, requested_output_min_, requested_output_max_; name=nothing, out_type=nothing) + if tf.eager_mode + requantize_eager(input_, input_min_, input_max_, requested_output_min_, requested_output_max_; name=name, out_type=out_type) + else + requantize_graph(input_, input_min_, input_max_, requested_output_min_, requested_output_max_; name=name, out_type=out_type) + end + end end @@ -45348,7 +53174,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function fft(input_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function fft_graph(input_; name=nothing) local desc tf.with_op_name(name, "FFT") do desc = tf.NodeDescription("FFT") @@ -45358,12 +53184,19 @@ begin end tf.Tensor(tf.Operation(desc)) end - function fft(input_::tf.TensorHandle; name=nothing) + function fft_eager(input_; name=nothing) desc = tf.EagerOp("FFT") tf.add_input(desc, input_) desc["Tcomplex"] = tf.data_type(input_) (tf.execute(desc))[1] end + function fft(input_; name=nothing) + if tf.eager_mode + fft_eager(input_; name=name) + else + fft_graph(input_; name=name) + end + end end @@ -45373,7 +53206,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function conjugate_transpose(x_, perm_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function conjugate_transpose_graph(x_, perm_; name=nothing) local desc tf.with_op_name(name, "ConjugateTranspose") do desc = tf.NodeDescription("ConjugateTranspose") @@ -45386,7 +53219,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function conjugate_transpose(x_::tf.TensorHandle, perm_::tf.TensorHandle; name=nothing) + function conjugate_transpose_eager(x_, perm_; name=nothing) desc = tf.EagerOp("ConjugateTranspose") tf.add_input(desc, x_) tf.add_input(desc, perm_) @@ -45394,6 +53227,13 @@ begin desc["Tperm"] = tf.data_type(perm_) (tf.execute(desc))[1] end + function conjugate_transpose(x_, perm_; name=nothing) + if tf.eager_mode + conjugate_transpose_eager(x_, perm_; name=name) + else + conjugate_transpose_graph(x_, perm_; name=name) + end + end end @@ -45403,7 +53243,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function unstage(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function unstage_graph(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "Unstage") do desc = tf.NodeDescription("Unstage") @@ -45425,7 +53265,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function unstage(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + function unstage_eager(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) desc = tf.EagerOp("Unstage") if capacity !== nothing desc["capacity"] = Base.Int(capacity) @@ -45444,6 +53284,13 @@ begin end (tf.execute(desc))[1] end + function unstage(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + if tf.eager_mode + unstage_eager(; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) + else + unstage_graph(; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) + end + end end @@ -45453,7 +53300,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function relu6grad(gradients_, features_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function relu6grad_graph(gradients_, features_; name=nothing) local desc tf.with_op_name(name, "Relu6Grad") do desc = tf.NodeDescription("Relu6Grad") @@ -45465,7 +53312,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function relu6grad(gradients_::tf.TensorHandle, features_::tf.TensorHandle; name=nothing) + function relu6grad_eager(gradients_, features_; name=nothing) desc = tf.EagerOp("Relu6Grad") tf.add_input(desc, gradients_) tf.add_input(desc, features_) @@ -45473,16 +53320,23 @@ begin desc["T"] = tf.data_type(features_) (tf.execute(desc))[1] end + function relu6grad(gradients_, features_; name=nothing) + if tf.eager_mode + relu6grad_eager(gradients_, features_; name=name) + else + relu6grad_graph(gradients_, features_; name=name) + end + end end """ - scale_and_translate_grad(grads, original_image, scale, translation; kernel_type=lanczos3) + scale_and_translate_grad(grads, original_image, scale, translation; kernel_type=) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function scale_and_translate_grad(grads_, original_image_, scale_, translation_; name=nothing, kernel_type=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function scale_and_translate_grad_graph(grads_, original_image_, scale_, translation_; name=nothing, kernel_type=nothing) local desc tf.with_op_name(name, "ScaleAndTranslateGrad") do desc = tf.NodeDescription("ScaleAndTranslateGrad") @@ -45501,7 +53355,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function scale_and_translate_grad(grads_::tf.TensorHandle, original_image_::tf.TensorHandle, scale_::tf.TensorHandle, translation_::tf.TensorHandle; name=nothing, kernel_type=nothing) + function scale_and_translate_grad_eager(grads_, original_image_, scale_, translation_; name=nothing, kernel_type=nothing) desc = tf.EagerOp("ScaleAndTranslateGrad") tf.add_input(desc, grads_) tf.add_input(desc, original_image_) @@ -45514,6 +53368,13 @@ begin desc["T"] = tf.data_type(original_image_) (tf.execute(desc))[1] end + function scale_and_translate_grad(grads_, original_image_, scale_, translation_; name=nothing, kernel_type=nothing) + if tf.eager_mode + scale_and_translate_grad_eager(grads_, original_image_, scale_, translation_; name=name, kernel_type=kernel_type) + else + scale_and_translate_grad_graph(grads_, original_image_, scale_, translation_; name=name, kernel_type=kernel_type) + end + end end @@ -45523,7 +53384,7 @@ end Converts an array of tensors to a list of tensors. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function _array_to_list(input_; name=nothing, N=nothing, out_types=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function _array_to_list_graph(input_; name=nothing, N=nothing, out_types=nothing) local desc tf.with_op_name(name, "_ArrayToList") do desc = tf.NodeDescription("_ArrayToList") @@ -45539,7 +53400,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function _array_to_list(input_::tf.TensorHandle; name=nothing, N=nothing, out_types=nothing) + function _array_to_list_eager(input_; name=nothing, N=nothing, out_types=nothing) desc = tf.EagerOp("_ArrayToList") tf.add_input(desc, input_) if N !== nothing @@ -45551,16 +53412,23 @@ begin desc["T"] = tf.data_type(input_) (tf.execute(desc))[1] end + function _array_to_list(input_; name=nothing, N=nothing, out_types=nothing) + if tf.eager_mode + _array_to_list_eager(input_; name=name, N=N, out_types=out_types) + else + _array_to_list_graph(input_; name=name, N=N, out_types=out_types) + end + end end """ - cudnn_rnnv3(input, input_h, input_c, params, sequence_lengths; rnn_mode=lstm, input_mode=linear_input, direction=unidirectional, dropout=?, seed=0, seed2=0, is_training=true) + cudnn_rnnv3(input, input_h, input_c, params, sequence_lengths; rnn_mode=, input_mode=, direction=, dropout=?, seed=0, seed2=0, is_training=true) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function cudnn_rnnv3(input_, input_h_, input_c_, params_, sequence_lengths_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing, is_training=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function cudnn_rnnv3_graph(input_, input_h_, input_c_, params_, sequence_lengths_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing, is_training=nothing) local desc tf.with_op_name(name, "CudnnRNNV3") do desc = tf.NodeDescription("CudnnRNNV3") @@ -45604,7 +53472,7 @@ begin end out end - function cudnn_rnnv3(input_::tf.TensorHandle, input_h_::tf.TensorHandle, input_c_::tf.TensorHandle, params_::tf.TensorHandle, sequence_lengths_::tf.TensorHandle; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing, is_training=nothing) + function cudnn_rnnv3_eager(input_, input_h_, input_c_, params_, sequence_lengths_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing, is_training=nothing) desc = tf.EagerOp("CudnnRNNV3") tf.add_input(desc, input_) tf.add_input(desc, input_h_) @@ -45638,6 +53506,13 @@ begin desc["T"] = tf.data_type(params_) tf.execute(desc) end + function cudnn_rnnv3(input_, input_h_, input_c_, params_, sequence_lengths_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing, is_training=nothing) + if tf.eager_mode + cudnn_rnnv3_eager(input_, input_h_, input_c_, params_, sequence_lengths_; name=name, rnn_mode=rnn_mode, input_mode=input_mode, direction=direction, dropout=dropout, seed=seed, seed2=seed2, is_training=is_training) + else + cudnn_rnnv3_graph(input_, input_h_, input_c_, params_, sequence_lengths_; name=name, rnn_mode=rnn_mode, input_mode=input_mode, direction=direction, dropout=dropout, seed=seed, seed2=seed2, is_training=is_training) + end + end end @@ -45647,7 +53522,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function expand_dims(input_, dim_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function expand_dims_graph(input_, dim_; name=nothing) local desc tf.with_op_name(name, "ExpandDims") do desc = tf.NodeDescription("ExpandDims") @@ -45661,7 +53536,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function expand_dims(input_::tf.TensorHandle, dim_::tf.TensorHandle; name=nothing) + function expand_dims_eager(input_, dim_; name=nothing) desc = tf.EagerOp("ExpandDims") tf.add_input(desc, input_) tf.add_input(desc, dim_) @@ -45669,6 +53544,13 @@ begin desc["Tdim"] = tf.data_type(dim_) (tf.execute(desc))[1] end + function expand_dims(input_, dim_; name=nothing) + if tf.eager_mode + expand_dims_eager(input_, dim_; name=name) + else + expand_dims_graph(input_, dim_; name=name) + end + end end @@ -45678,7 +53560,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function inv_grad(y_, dy_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function inv_grad_graph(y_, dy_; name=nothing) local desc tf.with_op_name(name, "InvGrad") do desc = tf.NodeDescription("InvGrad") @@ -45690,7 +53572,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function inv_grad(y_::tf.TensorHandle, dy_::tf.TensorHandle; name=nothing) + function inv_grad_eager(y_, dy_; name=nothing) desc = tf.EagerOp("InvGrad") tf.add_input(desc, y_) tf.add_input(desc, dy_) @@ -45698,6 +53580,13 @@ begin desc["T"] = tf.data_type(dy_) (tf.execute(desc))[1] end + function inv_grad(y_, dy_; name=nothing) + if tf.eager_mode + inv_grad_eager(y_, dy_; name=name) + else + inv_grad_graph(y_, dy_; name=name) + end + end end @@ -45707,7 +53596,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function non_max_suppression(boxes_, scores_, max_output_size_; name=nothing, iou_threshold=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function non_max_suppression_graph(boxes_, scores_, max_output_size_; name=nothing, iou_threshold=nothing) local desc tf.with_op_name(name, "NonMaxSuppression") do desc = tf.NodeDescription("NonMaxSuppression") @@ -45723,7 +53612,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function non_max_suppression(boxes_::tf.TensorHandle, scores_::tf.TensorHandle, max_output_size_::tf.TensorHandle; name=nothing, iou_threshold=nothing) + function non_max_suppression_eager(boxes_, scores_, max_output_size_; name=nothing, iou_threshold=nothing) desc = tf.EagerOp("NonMaxSuppression") tf.add_input(desc, boxes_) tf.add_input(desc, scores_) @@ -45733,6 +53622,13 @@ begin end (tf.execute(desc))[1] end + function non_max_suppression(boxes_, scores_, max_output_size_; name=nothing, iou_threshold=nothing) + if tf.eager_mode + non_max_suppression_eager(boxes_, scores_, max_output_size_; name=name, iou_threshold=iou_threshold) + else + non_max_suppression_graph(boxes_, scores_, max_output_size_; name=name, iou_threshold=iou_threshold) + end + end end @@ -45742,7 +53638,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function l2loss(t_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function l2loss_graph(t_; name=nothing) local desc tf.with_op_name(name, "L2Loss") do desc = tf.NodeDescription("L2Loss") @@ -45752,12 +53648,19 @@ begin end tf.Tensor(tf.Operation(desc)) end - function l2loss(t_::tf.TensorHandle; name=nothing) + function l2loss_eager(t_; name=nothing) desc = tf.EagerOp("L2Loss") tf.add_input(desc, t_) desc["T"] = tf.data_type(t_) (tf.execute(desc))[1] end + function l2loss(t_; name=nothing) + if tf.eager_mode + l2loss_eager(t_; name=name) + else + l2loss_graph(t_; name=name) + end + end end @@ -45767,7 +53670,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function resize_area(images_, size_; name=nothing, align_corners=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function resize_area_graph(images_, size_; name=nothing, align_corners=nothing) local desc tf.with_op_name(name, "ResizeArea") do desc = tf.NodeDescription("ResizeArea") @@ -45782,7 +53685,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function resize_area(images_::tf.TensorHandle, size_::tf.TensorHandle; name=nothing, align_corners=nothing) + function resize_area_eager(images_, size_; name=nothing, align_corners=nothing) desc = tf.EagerOp("ResizeArea") tf.add_input(desc, images_) tf.add_input(desc, size_) @@ -45792,6 +53695,13 @@ begin desc["T"] = tf.data_type(images_) (tf.execute(desc))[1] end + function resize_area(images_, size_; name=nothing, align_corners=nothing) + if tf.eager_mode + resize_area_eager(images_, size_; name=name, align_corners=align_corners) + else + resize_area_graph(images_, size_; name=name, align_corners=align_corners) + end + end end @@ -45801,7 +53711,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function sparse_cross(indices_, values_, shapes_, dense_inputs_; name=nothing, N=nothing, hashed_output=nothing, num_buckets=nothing, hash_key=nothing, sparse_types=nothing, dense_types=nothing, out_type=nothing, internal_type=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function sparse_cross_graph(indices_, values_, shapes_, dense_inputs_; name=nothing, N=nothing, hashed_output=nothing, num_buckets=nothing, hash_key=nothing, sparse_types=nothing, dense_types=nothing, out_type=nothing, internal_type=nothing) local desc tf.with_op_name(name, "SparseCross") do desc = tf.NodeDescription("SparseCross") @@ -45845,7 +53755,7 @@ begin end out end - function sparse_cross(indices_::tf.TensorHandle, values_::tf.TensorHandle, shapes_::tf.TensorHandle, dense_inputs_::tf.TensorHandle; name=nothing, N=nothing, hashed_output=nothing, num_buckets=nothing, hash_key=nothing, sparse_types=nothing, dense_types=nothing, out_type=nothing, internal_type=nothing) + function sparse_cross_eager(indices_, values_, shapes_, dense_inputs_; name=nothing, N=nothing, hashed_output=nothing, num_buckets=nothing, hash_key=nothing, sparse_types=nothing, dense_types=nothing, out_type=nothing, internal_type=nothing) desc = tf.EagerOp("SparseCross") tf.add_input(desc, indices_) tf.add_input(desc, values_) @@ -45877,6 +53787,13 @@ begin end tf.execute(desc) end + function sparse_cross(indices_, values_, shapes_, dense_inputs_; name=nothing, N=nothing, hashed_output=nothing, num_buckets=nothing, hash_key=nothing, sparse_types=nothing, dense_types=nothing, out_type=nothing, internal_type=nothing) + if tf.eager_mode + sparse_cross_eager(indices_, values_, shapes_, dense_inputs_; name=name, N=N, hashed_output=hashed_output, num_buckets=num_buckets, hash_key=hash_key, sparse_types=sparse_types, dense_types=dense_types, out_type=out_type, internal_type=internal_type) + else + sparse_cross_graph(indices_, values_, shapes_, dense_inputs_; name=name, N=N, hashed_output=hashed_output, num_buckets=num_buckets, hash_key=hash_key, sparse_types=sparse_types, dense_types=dense_types, out_type=out_type, internal_type=internal_type) + end + end end @@ -45886,7 +53803,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function batch_fft3d(input_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function batch_fft3d_graph(input_; name=nothing) local desc tf.with_op_name(name, "BatchFFT3D") do desc = tf.NodeDescription("BatchFFT3D") @@ -45895,11 +53812,18 @@ begin end tf.Tensor(tf.Operation(desc)) end - function batch_fft3d(input_::tf.TensorHandle; name=nothing) + function batch_fft3d_eager(input_; name=nothing) desc = tf.EagerOp("BatchFFT3D") tf.add_input(desc, input_) (tf.execute(desc))[1] end + function batch_fft3d(input_; name=nothing) + if tf.eager_mode + batch_fft3d_eager(input_; name=name) + else + batch_fft3d_graph(input_; name=name) + end + end end @@ -45909,7 +53833,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function random_standard_normal(shape_; name=nothing, seed=nothing, seed2=nothing, dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function random_standard_normal_graph(shape_; name=nothing, seed=nothing, seed2=nothing, dtype=nothing) local desc tf.with_op_name(name, "RandomStandardNormal") do desc = tf.NodeDescription("RandomStandardNormal") @@ -45928,7 +53852,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function random_standard_normal(shape_::tf.TensorHandle; name=nothing, seed=nothing, seed2=nothing, dtype=nothing) + function random_standard_normal_eager(shape_; name=nothing, seed=nothing, seed2=nothing, dtype=nothing) desc = tf.EagerOp("RandomStandardNormal") tf.add_input(desc, shape_) if seed !== nothing @@ -45943,6 +53867,13 @@ begin desc["T"] = tf.data_type(shape_) (tf.execute(desc))[1] end + function random_standard_normal(shape_; name=nothing, seed=nothing, seed2=nothing, dtype=nothing) + if tf.eager_mode + random_standard_normal_eager(shape_; name=name, seed=seed, seed2=seed2, dtype=dtype) + else + random_standard_normal_graph(shape_; name=name, seed=seed, seed2=seed2, dtype=dtype) + end + end end @@ -45952,7 +53883,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function resource_scatter_mul(resource_, indices_, updates_; name=nothing, dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function resource_scatter_mul_graph(resource_, indices_, updates_; name=nothing, dtype=nothing) local desc tf.with_op_name(name, "ResourceScatterMul") do desc = tf.NodeDescription("ResourceScatterMul") @@ -45971,7 +53902,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function resource_scatter_mul(resource_::tf.TensorHandle, indices_::tf.TensorHandle, updates_::tf.TensorHandle; name=nothing, dtype=nothing) + function resource_scatter_mul_eager(resource_, indices_, updates_; name=nothing, dtype=nothing) desc = tf.EagerOp("ResourceScatterMul") tf.add_input(desc, resource_) tf.add_input(desc, indices_) @@ -45983,6 +53914,13 @@ begin desc["dtype"] = tf.data_type(updates_) (tf.execute(desc))[1] end + function resource_scatter_mul(resource_, indices_, updates_; name=nothing, dtype=nothing) + if tf.eager_mode + resource_scatter_mul_eager(resource_, indices_, updates_; name=name, dtype=dtype) + else + resource_scatter_mul_graph(resource_, indices_, updates_; name=name, dtype=dtype) + end + end end @@ -45992,7 +53930,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function sdca_optimizer(sparse_example_indices_, sparse_feature_indices_, sparse_feature_values_, dense_features_, example_weights_, example_labels_, sparse_indices_, sparse_weights_, dense_weights_, example_state_data_; name=nothing, loss_type=nothing, adaptative=nothing, num_sparse_features=nothing, num_sparse_features_with_values=nothing, num_dense_features=nothing, l1=nothing, l2=nothing, num_loss_partitions=nothing, num_inner_iterations=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function sdca_optimizer_graph(sparse_example_indices_, sparse_feature_indices_, sparse_feature_values_, dense_features_, example_weights_, example_labels_, sparse_indices_, sparse_weights_, dense_weights_, example_state_data_; name=nothing, loss_type=nothing, adaptative=nothing, num_sparse_features=nothing, num_sparse_features_with_values=nothing, num_dense_features=nothing, l1=nothing, l2=nothing, num_loss_partitions=nothing, num_inner_iterations=nothing) local desc tf.with_op_name(name, "SdcaOptimizer") do desc = tf.NodeDescription("SdcaOptimizer") @@ -46051,7 +53989,7 @@ begin end out end - function sdca_optimizer(sparse_example_indices_::tf.TensorHandle, sparse_feature_indices_::tf.TensorHandle, sparse_feature_values_::tf.TensorHandle, dense_features_::tf.TensorHandle, example_weights_::tf.TensorHandle, example_labels_::tf.TensorHandle, sparse_indices_::tf.TensorHandle, sparse_weights_::tf.TensorHandle, dense_weights_::tf.TensorHandle, example_state_data_::tf.TensorHandle; name=nothing, loss_type=nothing, adaptative=nothing, num_sparse_features=nothing, num_sparse_features_with_values=nothing, num_dense_features=nothing, l1=nothing, l2=nothing, num_loss_partitions=nothing, num_inner_iterations=nothing) + function sdca_optimizer_eager(sparse_example_indices_, sparse_feature_indices_, sparse_feature_values_, dense_features_, example_weights_, example_labels_, sparse_indices_, sparse_weights_, dense_weights_, example_state_data_; name=nothing, loss_type=nothing, adaptative=nothing, num_sparse_features=nothing, num_sparse_features_with_values=nothing, num_dense_features=nothing, l1=nothing, l2=nothing, num_loss_partitions=nothing, num_inner_iterations=nothing) desc = tf.EagerOp("SdcaOptimizer") tf.add_input(desc, sparse_example_indices_) tf.add_input(desc, sparse_feature_indices_) @@ -46092,6 +54030,13 @@ begin end tf.execute(desc) end + function sdca_optimizer(sparse_example_indices_, sparse_feature_indices_, sparse_feature_values_, dense_features_, example_weights_, example_labels_, sparse_indices_, sparse_weights_, dense_weights_, example_state_data_; name=nothing, loss_type=nothing, adaptative=nothing, num_sparse_features=nothing, num_sparse_features_with_values=nothing, num_dense_features=nothing, l1=nothing, l2=nothing, num_loss_partitions=nothing, num_inner_iterations=nothing) + if tf.eager_mode + sdca_optimizer_eager(sparse_example_indices_, sparse_feature_indices_, sparse_feature_values_, dense_features_, example_weights_, example_labels_, sparse_indices_, sparse_weights_, dense_weights_, example_state_data_; name=name, loss_type=loss_type, adaptative=adaptative, num_sparse_features=num_sparse_features, num_sparse_features_with_values=num_sparse_features_with_values, num_dense_features=num_dense_features, l1=l1, l2=l2, num_loss_partitions=num_loss_partitions, num_inner_iterations=num_inner_iterations) + else + sdca_optimizer_graph(sparse_example_indices_, sparse_feature_indices_, sparse_feature_values_, dense_features_, example_weights_, example_labels_, sparse_indices_, sparse_weights_, dense_weights_, example_state_data_; name=name, loss_type=loss_type, adaptative=adaptative, num_sparse_features=num_sparse_features, num_sparse_features_with_values=num_sparse_features_with_values, num_dense_features=num_dense_features, l1=l1, l2=l2, num_loss_partitions=num_loss_partitions, num_inner_iterations=num_inner_iterations) + end + end end @@ -46101,7 +54046,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function zeta(x_, q_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function zeta_graph(x_, q_; name=nothing) local desc tf.with_op_name(name, "Zeta") do desc = tf.NodeDescription("Zeta") @@ -46113,7 +54058,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function zeta(x_::tf.TensorHandle, q_::tf.TensorHandle; name=nothing) + function zeta_eager(x_, q_; name=nothing) desc = tf.EagerOp("Zeta") tf.add_input(desc, x_) tf.add_input(desc, q_) @@ -46121,6 +54066,13 @@ begin desc["T"] = tf.data_type(q_) (tf.execute(desc))[1] end + function zeta(x_, q_; name=nothing) + if tf.eager_mode + zeta_eager(x_, q_; name=name) + else + zeta_graph(x_, q_; name=name) + end + end end @@ -46130,7 +54082,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function sample_distorted_bounding_box(image_size_, bounding_boxes_; name=nothing, seed=nothing, seed2=nothing, min_object_covered=nothing, aspect_ratio_range=nothing, area_range=nothing, max_attempts=nothing, use_image_if_no_bounding_boxes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function sample_distorted_bounding_box_graph(image_size_, bounding_boxes_; name=nothing, seed=nothing, seed2=nothing, min_object_covered=nothing, aspect_ratio_range=nothing, area_range=nothing, max_attempts=nothing, use_image_if_no_bounding_boxes=nothing) local desc tf.with_op_name(name, "SampleDistortedBoundingBox") do desc = tf.NodeDescription("SampleDistortedBoundingBox") @@ -46168,7 +54120,7 @@ begin end out end - function sample_distorted_bounding_box(image_size_::tf.TensorHandle, bounding_boxes_::tf.TensorHandle; name=nothing, seed=nothing, seed2=nothing, min_object_covered=nothing, aspect_ratio_range=nothing, area_range=nothing, max_attempts=nothing, use_image_if_no_bounding_boxes=nothing) + function sample_distorted_bounding_box_eager(image_size_, bounding_boxes_; name=nothing, seed=nothing, seed2=nothing, min_object_covered=nothing, aspect_ratio_range=nothing, area_range=nothing, max_attempts=nothing, use_image_if_no_bounding_boxes=nothing) desc = tf.EagerOp("SampleDistortedBoundingBox") tf.add_input(desc, image_size_) tf.add_input(desc, bounding_boxes_) @@ -46196,6 +54148,13 @@ begin desc["T"] = tf.data_type(image_size_) tf.execute(desc) end + function sample_distorted_bounding_box(image_size_, bounding_boxes_; name=nothing, seed=nothing, seed2=nothing, min_object_covered=nothing, aspect_ratio_range=nothing, area_range=nothing, max_attempts=nothing, use_image_if_no_bounding_boxes=nothing) + if tf.eager_mode + sample_distorted_bounding_box_eager(image_size_, bounding_boxes_; name=name, seed=seed, seed2=seed2, min_object_covered=min_object_covered, aspect_ratio_range=aspect_ratio_range, area_range=area_range, max_attempts=max_attempts, use_image_if_no_bounding_boxes=use_image_if_no_bounding_boxes) + else + sample_distorted_bounding_box_graph(image_size_, bounding_boxes_; name=name, seed=seed, seed2=seed2, min_object_covered=min_object_covered, aspect_ratio_range=aspect_ratio_range, area_range=area_range, max_attempts=max_attempts, use_image_if_no_bounding_boxes=use_image_if_no_bounding_boxes) + end + end end @@ -46205,7 +54164,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function igamma_grad_a(a_, x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function igamma_grad_a_graph(a_, x_; name=nothing) local desc tf.with_op_name(name, "IgammaGradA") do desc = tf.NodeDescription("IgammaGradA") @@ -46217,7 +54176,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function igamma_grad_a(a_::tf.TensorHandle, x_::tf.TensorHandle; name=nothing) + function igamma_grad_a_eager(a_, x_; name=nothing) desc = tf.EagerOp("IgammaGradA") tf.add_input(desc, a_) tf.add_input(desc, x_) @@ -46225,6 +54184,13 @@ begin desc["T"] = tf.data_type(x_) (tf.execute(desc))[1] end + function igamma_grad_a(a_, x_; name=nothing) + if tf.eager_mode + igamma_grad_a_eager(a_, x_; name=name) + else + igamma_grad_a_graph(a_, x_; name=name) + end + end end @@ -46234,7 +54200,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function segment_max(data_, segment_ids_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function segment_max_graph(data_, segment_ids_; name=nothing) local desc tf.with_op_name(name, "SegmentMax") do desc = tf.NodeDescription("SegmentMax") @@ -46248,7 +54214,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function segment_max(data_::tf.TensorHandle, segment_ids_::tf.TensorHandle; name=nothing) + function segment_max_eager(data_, segment_ids_; name=nothing) desc = tf.EagerOp("SegmentMax") tf.add_input(desc, data_) tf.add_input(desc, segment_ids_) @@ -46256,6 +54222,13 @@ begin desc["Tindices"] = tf.data_type(segment_ids_) (tf.execute(desc))[1] end + function segment_max(data_, segment_ids_; name=nothing) + if tf.eager_mode + segment_max_eager(data_, segment_ids_; name=name) + else + segment_max_graph(data_, segment_ids_; name=name) + end + end end @@ -46265,7 +54238,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function range(start_, limit_, delta_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function range_graph(start_, limit_, delta_; name=nothing) local desc tf.with_op_name(name, "Range") do desc = tf.NodeDescription("Range") @@ -46279,7 +54252,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function range(start_::tf.TensorHandle, limit_::tf.TensorHandle, delta_::tf.TensorHandle; name=nothing) + function range_eager(start_, limit_, delta_; name=nothing) desc = tf.EagerOp("Range") tf.add_input(desc, start_) tf.add_input(desc, limit_) @@ -46289,6 +54262,13 @@ begin desc["Tidx"] = tf.data_type(delta_) (tf.execute(desc))[1] end + function range(start_, limit_, delta_; name=nothing) + if tf.eager_mode + range_eager(start_, limit_, delta_; name=name) + else + range_graph(start_, limit_, delta_; name=name) + end + end end @@ -46298,7 +54278,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function retrieve_tpu_embedding_momentum_parameters_grad_accum_debug(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function retrieve_tpu_embedding_momentum_parameters_grad_accum_debug_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "RetrieveTPUEmbeddingMomentumParametersGradAccumDebug") do desc = tf.NodeDescription("RetrieveTPUEmbeddingMomentumParametersGradAccumDebug") @@ -46322,7 +54302,7 @@ begin end out end - function retrieve_tpu_embedding_momentum_parameters_grad_accum_debug(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + function retrieve_tpu_embedding_momentum_parameters_grad_accum_debug_eager(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) desc = tf.EagerOp("RetrieveTPUEmbeddingMomentumParametersGradAccumDebug") if table_id !== nothing desc["table_id"] = Base.Int(table_id) @@ -46338,6 +54318,13 @@ begin end tf.execute(desc) end + function retrieve_tpu_embedding_momentum_parameters_grad_accum_debug(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + if tf.eager_mode + retrieve_tpu_embedding_momentum_parameters_grad_accum_debug_eager(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + else + retrieve_tpu_embedding_momentum_parameters_grad_accum_debug_graph(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + end + end end @@ -46347,7 +54334,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function flush_summary_writer(writer_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function flush_summary_writer_graph(writer_; name=nothing) local desc tf.with_op_name(name, "FlushSummaryWriter") do desc = tf.NodeDescription("FlushSummaryWriter") @@ -46356,21 +54343,28 @@ begin end tf.Tensor(tf.Operation(desc)) end - function flush_summary_writer(writer_::tf.TensorHandle; name=nothing) + function flush_summary_writer_eager(writer_; name=nothing) desc = tf.EagerOp("FlushSummaryWriter") tf.add_input(desc, writer_) (tf.execute(desc))[1] end + function flush_summary_writer(writer_; name=nothing) + if tf.eager_mode + flush_summary_writer_eager(writer_; name=name) + else + flush_summary_writer_graph(writer_; name=name) + end + end end """ - dequantize(input, min_range, max_range; mode=MIN_COMBINED) + dequantize(input, min_range, max_range; mode=) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function dequantize(input_, min_range_, max_range_; name=nothing, mode=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function dequantize_graph(input_, min_range_, max_range_; name=nothing, mode=nothing) local desc tf.with_op_name(name, "Dequantize") do desc = tf.NodeDescription("Dequantize") @@ -46387,7 +54381,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function dequantize(input_::tf.TensorHandle, min_range_::tf.TensorHandle, max_range_::tf.TensorHandle; name=nothing, mode=nothing) + function dequantize_eager(input_, min_range_, max_range_; name=nothing, mode=nothing) desc = tf.EagerOp("Dequantize") tf.add_input(desc, input_) tf.add_input(desc, min_range_) @@ -46398,6 +54392,13 @@ begin desc["T"] = tf.data_type(input_) (tf.execute(desc))[1] end + function dequantize(input_, min_range_, max_range_; name=nothing, mode=nothing) + if tf.eager_mode + dequantize_eager(input_, min_range_, max_range_; name=name, mode=mode) + else + dequantize_graph(input_, min_range_, max_range_; name=name, mode=mode) + end + end end @@ -46407,7 +54408,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function sparse_fill_empty_rows_grad(reverse_index_map_, grad_values_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function sparse_fill_empty_rows_grad_graph(reverse_index_map_, grad_values_; name=nothing) local desc tf.with_op_name(name, "SparseFillEmptyRowsGrad") do desc = tf.NodeDescription("SparseFillEmptyRowsGrad") @@ -46424,13 +54425,20 @@ begin end out end - function sparse_fill_empty_rows_grad(reverse_index_map_::tf.TensorHandle, grad_values_::tf.TensorHandle; name=nothing) + function sparse_fill_empty_rows_grad_eager(reverse_index_map_, grad_values_; name=nothing) desc = tf.EagerOp("SparseFillEmptyRowsGrad") tf.add_input(desc, reverse_index_map_) tf.add_input(desc, grad_values_) desc["T"] = tf.data_type(grad_values_) tf.execute(desc) end + function sparse_fill_empty_rows_grad(reverse_index_map_, grad_values_; name=nothing) + if tf.eager_mode + sparse_fill_empty_rows_grad_eager(reverse_index_map_, grad_values_; name=name) + else + sparse_fill_empty_rows_grad_graph(reverse_index_map_, grad_values_; name=name) + end + end end @@ -46440,7 +54448,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function iterator_get_next(iterator_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function iterator_get_next_graph(iterator_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "IteratorGetNext") do desc = tf.NodeDescription("IteratorGetNext") @@ -46455,7 +54463,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function iterator_get_next(iterator_::tf.TensorHandle; name=nothing, output_types=nothing, output_shapes=nothing) + function iterator_get_next_eager(iterator_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("IteratorGetNext") tf.add_input(desc, iterator_) if output_types !== nothing @@ -46466,6 +54474,13 @@ begin end (tf.execute(desc))[1] end + function iterator_get_next(iterator_; name=nothing, output_types=nothing, output_shapes=nothing) + if tf.eager_mode + iterator_get_next_eager(iterator_; name=name, output_types=output_types, output_shapes=output_shapes) + else + iterator_get_next_graph(iterator_; name=name, output_types=output_types, output_shapes=output_shapes) + end + end end @@ -46475,7 +54490,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function sparse_tensor_dense_add(a_indices_, a_values_, a_shape_, b_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function sparse_tensor_dense_add_graph(a_indices_, a_values_, a_shape_, b_; name=nothing) local desc tf.with_op_name(name, "SparseTensorDenseAdd") do desc = tf.NodeDescription("SparseTensorDenseAdd") @@ -46494,7 +54509,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function sparse_tensor_dense_add(a_indices_::tf.TensorHandle, a_values_::tf.TensorHandle, a_shape_::tf.TensorHandle, b_::tf.TensorHandle; name=nothing) + function sparse_tensor_dense_add_eager(a_indices_, a_values_, a_shape_, b_; name=nothing) desc = tf.EagerOp("SparseTensorDenseAdd") tf.add_input(desc, a_indices_) tf.add_input(desc, a_values_) @@ -46506,6 +54521,13 @@ begin desc["T"] = tf.data_type(b_) (tf.execute(desc))[1] end + function sparse_tensor_dense_add(a_indices_, a_values_, a_shape_, b_; name=nothing) + if tf.eager_mode + sparse_tensor_dense_add_eager(a_indices_, a_values_, a_shape_, b_; name=name) + else + sparse_tensor_dense_add_graph(a_indices_, a_values_, a_shape_, b_; name=name) + end + end end @@ -46515,7 +54537,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function prevent_gradient(input_; name=nothing, message=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function prevent_gradient_graph(input_; name=nothing, message=nothing) local desc tf.with_op_name(name, "PreventGradient") do desc = tf.NodeDescription("PreventGradient") @@ -46528,7 +54550,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function prevent_gradient(input_::tf.TensorHandle; name=nothing, message=nothing) + function prevent_gradient_eager(input_; name=nothing, message=nothing) desc = tf.EagerOp("PreventGradient") tf.add_input(desc, input_) if message !== nothing @@ -46537,6 +54559,13 @@ begin desc["T"] = tf.data_type(input_) (tf.execute(desc))[1] end + function prevent_gradient(input_; name=nothing, message=nothing) + if tf.eager_mode + prevent_gradient_eager(input_; name=name, message=message) + else + prevent_gradient_graph(input_; name=name, message=message) + end + end end @@ -46546,7 +54575,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:227 =# tf.@op function lookup_table_export(table_handle_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function lookup_table_export_graph(table_handle_; name=nothing) local desc tf.with_op_name(name, "LookupTableExport") do desc = tf.NodeDescription("LookupTableExport") @@ -46560,11 +54589,18 @@ begin end out end - function lookup_table_export(table_handle_::tf.TensorHandle; name=nothing) + function lookup_table_export_eager(table_handle_; name=nothing) desc = tf.EagerOp("LookupTableExport") tf.add_input(desc, table_handle_) tf.execute(desc) end + function lookup_table_export(table_handle_; name=nothing) + if tf.eager_mode + lookup_table_export_eager(table_handle_; name=name) + else + lookup_table_export_graph(table_handle_; name=name) + end + end end From 43393b873d470143dea824e5b2463ceea735bac5 Mon Sep 17 00:00:00 2001 From: Jon Malmaud Date: Wed, 20 Feb 2019 17:39:39 -0500 Subject: [PATCH 12/49] casting --- src/ops.jl | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/ops.jl b/src/ops.jl index ba05d225..ef85fd2a 100644 --- a/src/ops.jl +++ b/src/ops.jl @@ -36,8 +36,8 @@ end macro define_binary(jl_func, tf_func) quote @op $jl_func(t1::AbstractTensor, t2::AbstractTensor; kwargs...) = $tf_func(tf_promote(t1, t2)...; kwargs...) - @op $jl_func(t1::AbstractTensor, t2; kwargs...) = $tf_func(t1, Tensor(t2); kwargs...) - @op $jl_func(t1, t2::AbstractTensor; kwargs...) = $tf_func(Tensor(t1), t2; kwargs...) + @op $jl_func(t1::AbstractTensor, t2; kwargs...) = $jl_func(t1, Tensor(t2); kwargs...) + @op $jl_func(t1, t2::AbstractTensor; kwargs...) = $jl_func(Tensor(t1), t2; kwargs...) end |> esc end @@ -67,10 +67,10 @@ end macro define_broadcast(jl_op, tf_func) quote Base.Broadcast.broadcasted(::typeof($jl_op), t1::AbstractTensor, t2::AbstractTensor) = $tf_func(tf_promote(t1, t2)...) - Base.Broadcast.broadcasted(::typeof($jl_op), t1::AbstractTensor, t2) = $tf_func(t1, Tensor(t2)) - Base.Broadcast.broadcasted(::typeof($jl_op), t1, t2::AbstractTensor) = $tf_func(Tensor(t1), t2) - Base.Broadcast.broadcasted(::typeof($jl_op), t1::AbstractTensor, t2::Base.Broadcast.Broadcasted) = $tf_func(t1, Tensor(collect(t2))) - Base.Broadcast.broadcasted(::typeof($jl_op), t1::Base.Broadcast.Broadcasted, t2::AbstractTensor) = $tf_func(Tensor(collect(t1)), t2) + Base.Broadcast.broadcasted(::typeof($jl_op), t1::AbstractTensor, t2) = $tf_func(tf_promote(t1, Tensor(t2))...) # TODO don't replicate the tf_promote calls + Base.Broadcast.broadcasted(::typeof($jl_op), t1, t2::AbstractTensor) = $tf_func(tf_promote(Tensor(t1), t2)...) + Base.Broadcast.broadcasted(::typeof($jl_op), t1::AbstractTensor, t2::Base.Broadcast.Broadcasted) = $tf_func(tf_promote(t1, Tensor(collect(t2)))...) + Base.Broadcast.broadcasted(::typeof($jl_op), t1::Base.Broadcast.Broadcasted, t2::AbstractTensor) = $tf_func(tf_promote(Tensor(collect(t1)), t2)...) end |> esc end From fc3dfb96e014bddc927c77456afd8fa330b1be3e Mon Sep 17 00:00:00 2001 From: Jon Malmaud Date: Wed, 20 Feb 2019 19:38:45 -0500 Subject: [PATCH 13/49] import more ops --- src/core.jl | 2 +- src/eager.jl | 121 +++++++++++++++++++++++++++++++++++++++++++-------- 2 files changed, 103 insertions(+), 20 deletions(-) diff --git a/src/core.jl b/src/core.jl index 79a7b14f..2c917d04 100644 --- a/src/core.jl +++ b/src/core.jl @@ -809,7 +809,7 @@ function Base.sizeof(t::RawTensor) @tfcall(:TF_TensorByteSize, Csize_t, (Ptr{Cvoid},), t.ptr) |> Int end -function set_device(node_desc, device::String) +function set_device(node_desc, device) @tfcall(:TF_SetDevice, Cvoid, (Ptr{Cvoid}, Cstring), node_desc.ptr, device) diff --git a/src/eager.jl b/src/eager.jl index ec7ec25d..5228070a 100644 --- a/src/eager.jl +++ b/src/eager.jl @@ -1,24 +1,34 @@ mutable struct EagerContext ptr::Ptr{Cvoid} +end - function EagerContext() - # For some reason, this has to be called before :TFE_Execute or else tf - # crashes. Maybe something about TF_GetAllOpList is causing the tf - # library to enter a bad state. - get_all_op_list() +@enum PlacementPolicy begin + PLACEMENT_EXPLICIT=0 + PLACEMENT_WARN=1 + PLACEMENT_SILENT=2 + PLACEMENT_SILENT_FOR_INT32=3 +end - options = @tfcall(:TFE_NewContextOptions, Ptr{Cvoid}, ()) - @tfcall(:TFE_ContextOptionsSetAsync, Cvoid, (Ptr{Cvoid}, Cuchar), options, 0) - status = Status() - context = @tfcall(:TFE_NewContext, Ptr{Cvoid}, (Ptr{Cvoid}, Ptr{Cvoid}), options, status) - check_status(status) - this = new(context) - finalizer(this) do self - @tfcall(:TFE_DeleteContext, Cvoid, (Ptr{Cvoid},), self.ptr) - end - @tfcall(:TFE_DeleteContextOptions, Cvoid, (Ptr{Cvoid},), options) - return this +function EagerContext(;async=false, placement_policy=nothing) + # For some reason, this has to be called before :TFE_Execute or else tf + # crashes. Maybe something about TF_GetAllOpList is causing the tf + # library to enter a bad state. + get_all_op_list() + + options = @tfcall(:TFE_NewContextOptions, Ptr{Cvoid}, ()) + @tfcall(:TFE_ContextOptionsSetAsync, Cvoid, (Ptr{Cvoid}, Cuchar), options, async) + if placement_policy !== nothing + @tfcall(:TFE_ContextOptionsSetDevicePlacementPolicy, Cvoid, (Ptr{Cvoid}, Int), options, placement_policy) end + status = Status() + context = @tfcall(:TFE_NewContext, Ptr{Cvoid}, (Ptr{Cvoid}, Ptr{Cvoid}), options, status) + check_status(status) + this = EagerContext(context) + finalizer(this) do self + @tfcall(:TFE_DeleteContext, Cvoid, (Ptr{Cvoid},), self.ptr) + end + @tfcall(:TFE_DeleteContextOptions, Cvoid, (Ptr{Cvoid},), options) + return this end eager_ctx = nothing #EagerContext() @@ -31,7 +41,7 @@ function DeviceList(ctx::EagerContext) ptr = @tfcall(:TFE_ContextListDevices, Ptr{Cvoid}, (Ptr{Cvoid}, Ptr{Cvoid}), ctx, status) check_status(status) count = @tfcall(:TF_DeviceListCount, Cint, (Ptr{Cvoid},), ptr) - this = new(ptr, count) + this = DeviceList(ptr, count) return this end @@ -70,6 +80,14 @@ function device_name(h::TensorHandle) return unsafe_string(c_name) end +function backing_device_name(h::TensorHandle) + status = Status() + c_name = @tfcall(:TFE_TensorHandleBackingDeviceName, Cstring, (Ptr{Cvoid}, Ptr{Cvoid}), h, status) + check_status(status) + return unsafe_string(c_name) +end + + function data_type(h::TensorHandle) return @tfcall(:TFE_TensorHandleDataType, TF_DataType, (Ptr{Cvoid},), h) |> tf_to_jl_type end @@ -192,12 +210,14 @@ function setindex!(op::EagerOp, value::Vector, attr_name) end function set_attr_list(op::EagerOp, attr_name, list::Vector{<:Integer}) - list = Int64[Int64(x) for x in list] + # list = Int64[Int64(x) for x in list] + list = Int64.(list) @tfcall(:TFE_OpSetAttrIntList, Cvoid, (Ptr{Cvoid}, Cstring, Ptr{Int64}, Cint), op, attr_name, list, length(list)) end function set_attr_list(op::EagerOp, attr_name, list::Vector{<:AbstractFloat}) - list = Float32[Float32(x) for x in list] + # list = Float32[Float32(x) for x in list] + list = Float32.(list) @tfcall(:TFE_OpSetAttrFloatList, Cvoid, (Ptr{Cvoid}, Cstring, Ptr{Float32}, Cint), op, attr_name, list, length(list)) end @@ -218,3 +238,66 @@ function set_attr_shape_list(op::EagerOp, attr_name, list::Vector) Cint[length(x) for x in dims], length(dims)) end + +function clear_caches(ctx::EagerContext) + @tfcall(:TFE_ContextClearCaches, Cvoid, (Ptr{Cvoid},), ctx) +end + + +function num_dims(h::TensorHandle) + status = Status() + res = @tfcall(:TFE_TensorHandleNumDims, Cint, (Ptr{Cvoid}, Ptr{Cvoid}), h, status) + check_status(status) + Int(res) +end + +function num_elements(h::TensorHandle) + status = Status() + res = @tfcall(:TFE_TensorHandleNumElements, Int64, (Ptr{Cvoid}, Ptr{Cvoid}), h, status) + check_status(status) + Int(res) +end + + +function dim(h::TensorHandle, dim_index) + status = Status() + res = @tfcall(:TFE_TensorHandleDim, Int64, (Ptr{Cvoid}, Cint, Ptr{Cvoid}), h, dim_index-1, status) + check_status(status) + Int(res) +end + +function copy_sharing_tensor(h::TensorHandle) + status = Status() + res = TensorHandle() + res.ptr = @tfcall(:TFE_TensorHandleCopySharingTensor, Ptr{Cvoid}, (Ptr{Cvoid}, Ptr{Cvoid}), h, status) + check_status(status) + return res +end + +function copy_to_device(ctx::EagerContext, h::TensorHandle, device_name) + status = Status() + res = TensorHandle() + res.ptr = @tfcall(:TFE_TensorHandleCopyToDevice, Ptr{Cvoid}, (Ptr{Cvoid}, Ptr{Cvoid}, Cstring, Ptr{Cvoid}), h, ctx, device_name, status) + check_status(status) + return res +end + +copy_to_device(h, device_name) = copy_to_device(eager_ctx, h, device_name) + +function set_device(op::EagerOp, device_name) + status = Status() + @tfcall(:TFE_OpSetDevice, Ptr{Cvoid}, (Ptr{Cvoid}, Cstring, Ptr{Cvoid}), op, device_name, status) + check_status(status) +end + +function get_device(op::EagerOp) + status = Status() + str = @tfcall(:TFE_OpGetDevice, Cstring, (Ptr{Cvoid}, Ptr{Cvoid}), op, status) + return String(str) +end + +function set_xla_compilation(op::EagerOp, enable) + @tfcall(:TFE_OpSetXLACompilation, Ptr{Cvoid}, (Ptr{Cvoid}, Cuchar), op, enable) + return enable +end + From a152ca4471483098afc3f99ba5353df613e8f20e Mon Sep 17 00:00:00 2001 From: Jon Malmaud Date: Fri, 22 Feb 2019 13:01:16 -0500 Subject: [PATCH 14/49] Started gradients --- src/TensorFlow.jl | 1 + src/generate_ops.jl | 8 +- src/ops/imported_ops.jl | 8038 ++++++++++++++++++++++++++++----------- src/tape.jl | 123 +- 4 files changed, 5829 insertions(+), 2341 deletions(-) diff --git a/src/TensorFlow.jl b/src/TensorFlow.jl index bbbb478e..4bc3052d 100644 --- a/src/TensorFlow.jl +++ b/src/TensorFlow.jl @@ -212,5 +212,6 @@ include("summary.jl") include("deprecated.jl") include("show.jl") include("generate_ops.jl") +include("tape.jl") end diff --git a/src/generate_ops.jl b/src/generate_ops.jl index cc64681c..9065251d 100644 --- a/src/generate_ops.jl +++ b/src/generate_ops.jl @@ -217,11 +217,12 @@ function to_function(op::tensorflow.OpDef) end eager_output_block = if scalar_output quote - tf.execute(desc)[1] + return res[1] end else quote - tf.execute(desc) + #tf.execute(desc) + return res end end graph_name = Symbol("$(jl_name)_graph") @@ -247,6 +248,9 @@ function to_function(op::tensorflow.OpDef) $eager_input_block $attr_block $(t_block...) + res = tf.execute(desc) + node = tf.TapeNode($jl_name, [$(inputs[2:end]...)], $(inputs[1].args...)) + tf.add_node(res[1], node) $eager_output_block end diff --git a/src/ops/imported_ops.jl b/src/ops/imported_ops.jl index 6662d7c3..b21d80a0 100644 --- a/src/ops/imported_ops.jl +++ b/src/ops/imported_ops.jl @@ -1,4 +1,4 @@ -# Autogenerated on 2019-02-20T17:14:58.083 +# Autogenerated on 2019-02-22T12:12:16.859 module Ops import TensorFlow @@ -9,7 +9,7 @@ const tf = TensorFlow """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function reduce_join_graph(inputs_, reduction_indices_; name=nothing, keep_dims=nothing, separator=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function reduce_join_graph(inputs_, reduction_indices_; name=nothing, keep_dims=nothing, separator=nothing) local desc tf.with_op_name(name, "ReduceJoin") do desc = tf.NodeDescription("ReduceJoin") @@ -36,7 +36,10 @@ begin if separator !== nothing desc["separator"] = Base.String(separator) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(reduce_join, [inputs_, reduction_indices_], name=nothing, keep_dims=nothing, separator=nothing) + tf.add_node(res[1], node) + return res[1] end function reduce_join(inputs_, reduction_indices_; name=nothing, keep_dims=nothing, separator=nothing) if tf.eager_mode @@ -54,7 +57,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function reduce_dataset_graph(input_dataset_, initial_state_, other_arguments_; name=nothing, f=nothing, Tstate=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function reduce_dataset_graph(input_dataset_, initial_state_, other_arguments_; name=nothing, f=nothing, Tstate=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing) local desc tf.with_op_name(name, "ReduceDataset") do desc = tf.NodeDescription("ReduceDataset") @@ -108,7 +111,10 @@ begin if use_inter_op_parallelism !== nothing desc["use_inter_op_parallelism"] = Base.Bool(use_inter_op_parallelism) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(reduce_dataset, [input_dataset_, initial_state_, other_arguments_], name=nothing, f=nothing, Tstate=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing) + tf.add_node(res[1], node) + return res[1] end function reduce_dataset(input_dataset_, initial_state_, other_arguments_; name=nothing, f=nothing, Tstate=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing) if tf.eager_mode @@ -126,7 +132,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tensor_list_from_tensor_graph(tensor_, element_shape_; name=nothing, element_dtype=nothing, shape_type=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_list_from_tensor_graph(tensor_, element_shape_; name=nothing, element_dtype=nothing, shape_type=nothing) local desc tf.with_op_name(name, "TensorListFromTensor") do desc = tf.NodeDescription("TensorListFromTensor") @@ -157,7 +163,10 @@ begin end desc["element_dtype"] = tf.data_type(tensor_) desc["shape_type"] = tf.data_type(element_shape_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(tensor_list_from_tensor, [tensor_, element_shape_], name=nothing, element_dtype=nothing, shape_type=nothing) + tf.add_node(res[1], node) + return res[1] end function tensor_list_from_tensor(tensor_, element_shape_; name=nothing, element_dtype=nothing, shape_type=nothing) if tf.eager_mode @@ -175,7 +184,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function extract_jpeg_shape_graph(contents_; name=nothing, output_type=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function extract_jpeg_shape_graph(contents_; name=nothing, output_type=nothing) local desc tf.with_op_name(name, "ExtractJpegShape") do desc = tf.NodeDescription("ExtractJpegShape") @@ -193,7 +202,10 @@ begin if output_type !== nothing desc["output_type"] = Base.identity(output_type) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(extract_jpeg_shape, [contents_], name=nothing, output_type=nothing) + tf.add_node(res[1], node) + return res[1] end function extract_jpeg_shape(contents_; name=nothing, output_type=nothing) if tf.eager_mode @@ -211,7 +223,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function svd_graph(input_; name=nothing, compute_uv=nothing, full_matrices=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function svd_graph(input_; name=nothing, compute_uv=nothing, full_matrices=nothing) local desc tf.with_op_name(name, "Svd") do desc = tf.NodeDescription("Svd") @@ -242,7 +254,10 @@ begin desc["full_matrices"] = Base.Bool(full_matrices) end desc["T"] = tf.data_type(input_) - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(svd, [input_], name=nothing, compute_uv=nothing, full_matrices=nothing) + tf.add_node(res[1], node) + return res end function svd(input_; name=nothing, compute_uv=nothing, full_matrices=nothing) if tf.eager_mode @@ -260,7 +275,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function iterator_get_next_sync_graph(iterator_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function iterator_get_next_sync_graph(iterator_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "IteratorGetNextSync") do desc = tf.NodeDescription("IteratorGetNextSync") @@ -284,7 +299,10 @@ begin if output_shapes !== nothing desc["output_shapes"] = map(Base.identity, output_shapes) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(iterator_get_next_sync, [iterator_], name=nothing, output_types=nothing, output_shapes=nothing) + tf.add_node(res[1], node) + return res[1] end function iterator_get_next_sync(iterator_; name=nothing, output_types=nothing, output_shapes=nothing) if tf.eager_mode @@ -302,7 +320,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function ref_enter_graph(data_; name=nothing, frame_name=nothing, is_constant=nothing, parallel_iterations=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ref_enter_graph(data_; name=nothing, frame_name=nothing, is_constant=nothing, parallel_iterations=nothing) local desc tf.with_op_name(name, "RefEnter") do desc = tf.NodeDescription("RefEnter") @@ -334,7 +352,10 @@ begin desc["parallel_iterations"] = Base.Int(parallel_iterations) end desc["T"] = tf.data_type(data_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(ref_enter, [data_], name=nothing, frame_name=nothing, is_constant=nothing, parallel_iterations=nothing) + tf.add_node(res[1], node) + return res[1] end function ref_enter(data_; name=nothing, frame_name=nothing, is_constant=nothing, parallel_iterations=nothing) if tf.eager_mode @@ -352,7 +373,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function erf_graph(x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function erf_graph(x_; name=nothing) local desc tf.with_op_name(name, "Erf") do desc = tf.NodeDescription("Erf") @@ -366,7 +387,10 @@ begin desc = tf.EagerOp("Erf") tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(erf, [x_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function erf(x_; name=nothing) if tf.eager_mode @@ -384,7 +408,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function lookup_table_export_v2_graph(table_handle_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function lookup_table_export_v2_graph(table_handle_; name=nothing) local desc tf.with_op_name(name, "LookupTableExportV2") do desc = tf.NodeDescription("LookupTableExportV2") @@ -401,7 +425,10 @@ begin function lookup_table_export_v2_eager(table_handle_; name=nothing) desc = tf.EagerOp("LookupTableExportV2") tf.add_input(desc, table_handle_) - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(lookup_table_export_v2, [table_handle_], name=nothing) + tf.add_node(res[1], node) + return res end function lookup_table_export_v2(table_handle_; name=nothing) if tf.eager_mode @@ -419,7 +446,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function round_graph(x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function round_graph(x_; name=nothing) local desc tf.with_op_name(name, "Round") do desc = tf.NodeDescription("Round") @@ -433,7 +460,10 @@ begin desc = tf.EagerOp("Round") tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(round, [x_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function round(x_; name=nothing) if tf.eager_mode @@ -451,7 +481,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function outfeed_dequeue_graph(; name=nothing, dtype=nothing, shape=nothing, device_ordinal=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function outfeed_dequeue_graph(; name=nothing, dtype=nothing, shape=nothing, device_ordinal=nothing) local desc tf.with_op_name(name, "OutfeedDequeue") do desc = tf.NodeDescription("OutfeedDequeue") @@ -478,7 +508,10 @@ begin if device_ordinal !== nothing desc["device_ordinal"] = Base.Int(device_ordinal) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(outfeed_dequeue, [], name=nothing, dtype=nothing, shape=nothing, device_ordinal=nothing) + tf.add_node(res[1], node) + return res[1] end function outfeed_dequeue(; name=nothing, dtype=nothing, shape=nothing, device_ordinal=nothing) if tf.eager_mode @@ -496,7 +529,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tensor_forest_tree_is_initialized_op_graph(tree_handle_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_forest_tree_is_initialized_op_graph(tree_handle_; name=nothing) local desc tf.with_op_name(name, "TensorForestTreeIsInitializedOp") do desc = tf.NodeDescription("TensorForestTreeIsInitializedOp") @@ -508,7 +541,10 @@ begin function tensor_forest_tree_is_initialized_op_eager(tree_handle_; name=nothing) desc = tf.EagerOp("TensorForestTreeIsInitializedOp") tf.add_input(desc, tree_handle_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(tensor_forest_tree_is_initialized_op, [tree_handle_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function tensor_forest_tree_is_initialized_op(tree_handle_; name=nothing) if tf.eager_mode @@ -526,7 +562,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function merge_graph(inputs_; name=nothing, N=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function merge_graph(inputs_; name=nothing, N=nothing) local desc tf.with_op_name(name, "Merge") do desc = tf.NodeDescription("Merge") @@ -551,7 +587,10 @@ begin desc["N"] = Base.Int(N) end desc["T"] = tf.data_type(inputs_) - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(merge, [inputs_], name=nothing, N=nothing) + tf.add_node(res[1], node) + return res end function merge(inputs_; name=nothing, N=nothing) if tf.eager_mode @@ -569,7 +608,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function histogram_fixed_width_graph(values_, value_range_, nbins_; name=nothing, dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function histogram_fixed_width_graph(values_, value_range_, nbins_; name=nothing, dtype=nothing) local desc tf.with_op_name(name, "HistogramFixedWidth") do desc = tf.NodeDescription("HistogramFixedWidth") @@ -596,7 +635,10 @@ begin end desc["T"] = tf.data_type(values_) desc["T"] = tf.data_type(value_range_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(histogram_fixed_width, [values_, value_range_, nbins_], name=nothing, dtype=nothing) + tf.add_node(res[1], node) + return res[1] end function histogram_fixed_width(values_, value_range_, nbins_; name=nothing, dtype=nothing) if tf.eager_mode @@ -614,7 +656,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function asin_graph(x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function asin_graph(x_; name=nothing) local desc tf.with_op_name(name, "Asin") do desc = tf.NodeDescription("Asin") @@ -628,7 +670,10 @@ begin desc = tf.EagerOp("Asin") tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(asin, [x_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function asin(x_; name=nothing) if tf.eager_mode @@ -646,7 +691,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function any_graph(input_, reduction_indices_; name=nothing, keep_dims=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function any_graph(input_, reduction_indices_; name=nothing, keep_dims=nothing) local desc tf.with_op_name(name, "Any") do desc = tf.NodeDescription("Any") @@ -670,7 +715,10 @@ begin desc["keep_dims"] = Base.Bool(keep_dims) end desc["Tidx"] = tf.data_type(reduction_indices_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(any, [input_, reduction_indices_], name=nothing, keep_dims=nothing) + tf.add_node(res[1], node) + return res[1] end function any(input_, reduction_indices_; name=nothing, keep_dims=nothing) if tf.eager_mode @@ -688,7 +736,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function rsqrt_grad_graph(y_, dy_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function rsqrt_grad_graph(y_, dy_; name=nothing) local desc tf.with_op_name(name, "RsqrtGrad") do desc = tf.NodeDescription("RsqrtGrad") @@ -706,7 +754,10 @@ begin tf.add_input(desc, dy_) desc["T"] = tf.data_type(y_) desc["T"] = tf.data_type(dy_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(rsqrt_grad, [y_, dy_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function rsqrt_grad(y_, dy_; name=nothing) if tf.eager_mode @@ -724,7 +775,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tensor_array_scatter_graph(handle_, indices_, value_, flow_in_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_scatter_graph(handle_, indices_, value_, flow_in_; name=nothing) local desc tf.with_op_name(name, "TensorArrayScatter") do desc = tf.NodeDescription("TensorArrayScatter") @@ -747,7 +798,10 @@ begin tf.add_input(desc, value_) tf.add_input(desc, flow_in_) desc["T"] = tf.data_type(value_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(tensor_array_scatter, [handle_, indices_, value_, flow_in_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function tensor_array_scatter(handle_, indices_, value_, flow_in_; name=nothing) if tf.eager_mode @@ -765,7 +819,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function dynamic_partition_graph(data_, partitions_; name=nothing, num_partitions=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function dynamic_partition_graph(data_, partitions_; name=nothing, num_partitions=nothing) local desc tf.with_op_name(name, "DynamicPartition") do desc = tf.NodeDescription("DynamicPartition") @@ -793,7 +847,10 @@ begin desc["num_partitions"] = Base.Int(num_partitions) end desc["T"] = tf.data_type(data_) - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(dynamic_partition, [data_, partitions_], name=nothing, num_partitions=nothing) + tf.add_node(res[1], node) + return res end function dynamic_partition(data_, partitions_; name=nothing, num_partitions=nothing) if tf.eager_mode @@ -811,7 +868,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function experimental_private_thread_pool_dataset_graph(input_dataset_, num_threads_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_private_thread_pool_dataset_graph(input_dataset_, num_threads_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "ExperimentalPrivateThreadPoolDataset") do desc = tf.NodeDescription("ExperimentalPrivateThreadPoolDataset") @@ -838,7 +895,10 @@ begin if output_shapes !== nothing desc["output_shapes"] = map(Base.identity, output_shapes) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(experimental_private_thread_pool_dataset, [input_dataset_, num_threads_], name=nothing, output_types=nothing, output_shapes=nothing) + tf.add_node(res[1], node) + return res[1] end function experimental_private_thread_pool_dataset(input_dataset_, num_threads_; name=nothing, output_types=nothing, output_shapes=nothing) if tf.eager_mode @@ -856,7 +916,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function reader_serialize_state_graph(reader_handle_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function reader_serialize_state_graph(reader_handle_; name=nothing) local desc tf.with_op_name(name, "ReaderSerializeState") do desc = tf.NodeDescription("ReaderSerializeState") @@ -868,7 +928,10 @@ begin function reader_serialize_state_eager(reader_handle_; name=nothing) desc = tf.EagerOp("ReaderSerializeState") tf.add_input(desc, reader_handle_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(reader_serialize_state, [reader_handle_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function reader_serialize_state(reader_handle_; name=nothing) if tf.eager_mode @@ -886,7 +949,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function right_shift_graph(x_, y_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function right_shift_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "RightShift") do desc = tf.NodeDescription("RightShift") @@ -904,7 +967,10 @@ begin tf.add_input(desc, y_) desc["T"] = tf.data_type(x_) desc["T"] = tf.data_type(y_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(right_shift, [x_, y_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function right_shift(x_, y_; name=nothing) if tf.eager_mode @@ -922,7 +988,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function avg_pool3d_graph(input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function avg_pool3d_graph(input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) local desc tf.with_op_name(name, "AvgPool3D") do desc = tf.NodeDescription("AvgPool3D") @@ -960,7 +1026,10 @@ begin desc["data_format"] = Base.String(data_format) end desc["T"] = tf.data_type(input_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(avg_pool3d, [input_], name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + tf.add_node(res[1], node) + return res[1] end function avg_pool3d(input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) if tf.eager_mode @@ -978,7 +1047,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function encode_png_graph(image_; name=nothing, compression=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function encode_png_graph(image_; name=nothing, compression=nothing) local desc tf.with_op_name(name, "EncodePng") do desc = tf.NodeDescription("EncodePng") @@ -998,7 +1067,10 @@ begin desc["compression"] = Base.Int(compression) end desc["T"] = tf.data_type(image_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(encode_png, [image_], name=nothing, compression=nothing) + tf.add_node(res[1], node) + return res[1] end function encode_png(image_; name=nothing, compression=nothing) if tf.eager_mode @@ -1016,7 +1088,7 @@ end Debug Identity Op. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function debug_identity_graph(input_; name=nothing, device_name=nothing, tensor_name=nothing, debug_urls=nothing, gated_grpc=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function debug_identity_graph(input_; name=nothing, device_name=nothing, tensor_name=nothing, debug_urls=nothing, gated_grpc=nothing) local desc tf.with_op_name(name, "DebugIdentity") do desc = tf.NodeDescription("DebugIdentity") @@ -1054,7 +1126,10 @@ begin desc["gated_grpc"] = Base.Bool(gated_grpc) end desc["T"] = tf.data_type(input_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(debug_identity, [input_], name=nothing, device_name=nothing, tensor_name=nothing, debug_urls=nothing, gated_grpc=nothing) + tf.add_node(res[1], node) + return res[1] end function debug_identity(input_; name=nothing, device_name=nothing, tensor_name=nothing, debug_urls=nothing, gated_grpc=nothing) if tf.eager_mode @@ -1072,7 +1147,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function imag_graph(input_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function imag_graph(input_; name=nothing) local desc tf.with_op_name(name, "Imag") do desc = tf.NodeDescription("Imag") @@ -1086,7 +1161,10 @@ begin desc = tf.EagerOp("Imag") tf.add_input(desc, input_) desc["T"] = tf.data_type(input_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(imag, [input_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function imag(input_; name=nothing) if tf.eager_mode @@ -1104,7 +1182,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function resource_sparse_apply_ftrl_v2_graph(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_sparse_apply_ftrl_v2_graph(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ResourceSparseApplyFtrlV2") do desc = tf.NodeDescription("ResourceSparseApplyFtrlV2") @@ -1159,7 +1237,10 @@ begin desc["T"] = tf.data_type(l2_) desc["T"] = tf.data_type(l2_shrinkage_) desc["T"] = tf.data_type(lr_power_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(resource_sparse_apply_ftrl_v2, [var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, l2_shrinkage_, lr_power_], name=nothing, use_locking=nothing) + tf.add_node(res[1], node) + return res[1] end function resource_sparse_apply_ftrl_v2(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=nothing, use_locking=nothing) if tf.eager_mode @@ -1177,7 +1258,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function stage_clear_graph(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function stage_clear_graph(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "StageClear") do desc = tf.NodeDescription("StageClear") @@ -1216,7 +1297,10 @@ begin if shared_name !== nothing desc["shared_name"] = Base.String(shared_name) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(stage_clear, [], name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + tf.add_node(res[1], node) + return res[1] end function stage_clear(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) if tf.eager_mode @@ -1234,7 +1318,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function sign_graph(x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sign_graph(x_; name=nothing) local desc tf.with_op_name(name, "Sign") do desc = tf.NodeDescription("Sign") @@ -1248,7 +1332,10 @@ begin desc = tf.EagerOp("Sign") tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(sign, [x_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function sign(x_; name=nothing) if tf.eager_mode @@ -1266,7 +1353,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function population_count_graph(x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function population_count_graph(x_; name=nothing) local desc tf.with_op_name(name, "PopulationCount") do desc = tf.NodeDescription("PopulationCount") @@ -1280,7 +1367,10 @@ begin desc = tf.EagerOp("PopulationCount") tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(population_count, [x_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function population_count(x_; name=nothing) if tf.eager_mode @@ -1298,7 +1388,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function neg_graph(x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function neg_graph(x_; name=nothing) local desc tf.with_op_name(name, "Neg") do desc = tf.NodeDescription("Neg") @@ -1312,7 +1402,10 @@ begin desc = tf.EagerOp("Neg") tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(neg, [x_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function neg(x_; name=nothing) if tf.eager_mode @@ -1330,7 +1423,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function anonymous_iterator_graph(; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function anonymous_iterator_graph(; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "AnonymousIterator") do desc = tf.NodeDescription("AnonymousIterator") @@ -1351,7 +1444,10 @@ begin if output_shapes !== nothing desc["output_shapes"] = map(Base.identity, output_shapes) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(anonymous_iterator, [], name=nothing, output_types=nothing, output_shapes=nothing) + tf.add_node(res[1], node) + return res[1] end function anonymous_iterator(; name=nothing, output_types=nothing, output_shapes=nothing) if tf.eager_mode @@ -1369,7 +1465,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function sparse_reduce_sum_graph(input_indices_, input_values_, input_shape_, reduction_axes_; name=nothing, keep_dims=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_reduce_sum_graph(input_indices_, input_values_, input_shape_, reduction_axes_; name=nothing, keep_dims=nothing) local desc tf.with_op_name(name, "SparseReduceSum") do desc = tf.NodeDescription("SparseReduceSum") @@ -1398,7 +1494,10 @@ begin desc["keep_dims"] = Base.Bool(keep_dims) end desc["T"] = tf.data_type(input_values_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(sparse_reduce_sum, [input_indices_, input_values_, input_shape_, reduction_axes_], name=nothing, keep_dims=nothing) + tf.add_node(res[1], node) + return res[1] end function sparse_reduce_sum(input_indices_, input_values_, input_shape_, reduction_axes_; name=nothing, keep_dims=nothing) if tf.eager_mode @@ -1416,7 +1515,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function filter_dataset_graph(input_dataset_, other_arguments_; name=nothing, predicate=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function filter_dataset_graph(input_dataset_, other_arguments_; name=nothing, predicate=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "FilterDataset") do desc = tf.NodeDescription("FilterDataset") @@ -1455,7 +1554,10 @@ begin if output_shapes !== nothing desc["output_shapes"] = map(Base.identity, output_shapes) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(filter_dataset, [input_dataset_, other_arguments_], name=nothing, predicate=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) + tf.add_node(res[1], node) + return res[1] end function filter_dataset(input_dataset_, other_arguments_; name=nothing, predicate=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) if tf.eager_mode @@ -1473,7 +1575,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function string_length_graph(input_; name=nothing, unit=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function string_length_graph(input_; name=nothing, unit=nothing) local desc tf.with_op_name(name, "StringLength") do desc = tf.NodeDescription("StringLength") @@ -1491,7 +1593,10 @@ begin if unit !== nothing desc["unit"] = Base.String(unit) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(string_length, [input_], name=nothing, unit=nothing) + tf.add_node(res[1], node) + return res[1] end function string_length(input_; name=nothing, unit=nothing) if tf.eager_mode @@ -1509,7 +1614,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function conv3d_graph(input_, filter_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function conv3d_graph(input_, filter_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) local desc tf.with_op_name(name, "Conv3D") do desc = tf.NodeDescription("Conv3D") @@ -1551,7 +1656,10 @@ begin end desc["T"] = tf.data_type(input_) desc["T"] = tf.data_type(filter_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(conv3d, [input_, filter_], name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) + tf.add_node(res[1], node) + return res[1] end function conv3d(input_, filter_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) if tf.eager_mode @@ -1569,7 +1677,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function retrieve_tpu_embedding_adagrad_parameters_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function retrieve_tpu_embedding_adagrad_parameters_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "RetrieveTPUEmbeddingAdagradParameters") do desc = tf.NodeDescription("RetrieveTPUEmbeddingAdagradParameters") @@ -1607,7 +1715,10 @@ begin if shard_id !== nothing desc["shard_id"] = Base.Int(shard_id) end - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(retrieve_tpu_embedding_adagrad_parameters, [], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + tf.add_node(res[1], node) + return res end function retrieve_tpu_embedding_adagrad_parameters(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) if tf.eager_mode @@ -1625,7 +1736,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function optional_has_value_graph(optional_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function optional_has_value_graph(optional_; name=nothing) local desc tf.with_op_name(name, "OptionalHasValue") do desc = tf.NodeDescription("OptionalHasValue") @@ -1637,7 +1748,10 @@ begin function optional_has_value_eager(optional_; name=nothing) desc = tf.EagerOp("OptionalHasValue") tf.add_input(desc, optional_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(optional_has_value, [optional_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function optional_has_value(optional_; name=nothing) if tf.eager_mode @@ -1655,7 +1769,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function apply_adam_graph(var_, m_, v_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing, use_nesterov=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function apply_adam_graph(var_, m_, v_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing, use_nesterov=nothing) local desc tf.with_op_name(name, "ApplyAdam") do desc = tf.NodeDescription("ApplyAdam") @@ -1717,7 +1831,10 @@ begin desc["T"] = tf.data_type(beta2_) desc["T"] = tf.data_type(epsilon_) desc["T"] = tf.data_type(grad_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(apply_adam, [var_, m_, v_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_], name=nothing, use_locking=nothing, use_nesterov=nothing) + tf.add_node(res[1], node) + return res[1] end function apply_adam(var_, m_, v_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing, use_nesterov=nothing) if tf.eager_mode @@ -1735,7 +1852,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function cudnn_rnn_params_to_canonical_graph(num_layers_, num_units_, input_size_, params_; name=nothing, num_params=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function cudnn_rnn_params_to_canonical_graph(num_layers_, num_units_, input_size_, params_; name=nothing, num_params=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) local desc tf.with_op_name(name, "CudnnRNNParamsToCanonical") do desc = tf.NodeDescription("CudnnRNNParamsToCanonical") @@ -1805,7 +1922,10 @@ begin desc["seed2"] = Base.Int(seed2) end desc["T"] = tf.data_type(params_) - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(cudnn_rnn_params_to_canonical, [num_layers_, num_units_, input_size_, params_], name=nothing, num_params=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) + tf.add_node(res[1], node) + return res end function cudnn_rnn_params_to_canonical(num_layers_, num_units_, input_size_, params_; name=nothing, num_params=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) if tf.eager_mode @@ -1823,7 +1943,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function irfft3d_graph(input_, fft_length_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function irfft3d_graph(input_, fft_length_; name=nothing) local desc tf.with_op_name(name, "IRFFT3D") do desc = tf.NodeDescription("IRFFT3D") @@ -1838,7 +1958,10 @@ begin desc = tf.EagerOp("IRFFT3D") tf.add_input(desc, input_) tf.add_input(desc, fft_length_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(irfft3d, [input_, fft_length_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function irfft3d(input_, fft_length_; name=nothing) if tf.eager_mode @@ -1856,7 +1979,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function angle_graph(input_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function angle_graph(input_; name=nothing) local desc tf.with_op_name(name, "Angle") do desc = tf.NodeDescription("Angle") @@ -1870,7 +1993,10 @@ begin desc = tf.EagerOp("Angle") tf.add_input(desc, input_) desc["T"] = tf.data_type(input_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(angle, [input_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function angle(input_; name=nothing) if tf.eager_mode @@ -1888,7 +2014,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tensor_forest_tree_resource_handle_op_graph(; name=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_forest_tree_resource_handle_op_graph(; name=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "TensorForestTreeResourceHandleOp") do desc = tf.NodeDescription("TensorForestTreeResourceHandleOp") @@ -1909,7 +2035,10 @@ begin if shared_name !== nothing desc["shared_name"] = Base.String(shared_name) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(tensor_forest_tree_resource_handle_op, [], name=nothing, container=nothing, shared_name=nothing) + tf.add_node(res[1], node) + return res[1] end function tensor_forest_tree_resource_handle_op(; name=nothing, container=nothing, shared_name=nothing) if tf.eager_mode @@ -1927,7 +2056,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function learned_unigram_candidate_sampler_graph(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function learned_unigram_candidate_sampler_graph(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing) local desc tf.with_op_name(name, "LearnedUnigramCandidateSampler") do desc = tf.NodeDescription("LearnedUnigramCandidateSampler") @@ -1980,7 +2109,10 @@ begin if seed2 !== nothing desc["seed2"] = Base.Int(seed2) end - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(learned_unigram_candidate_sampler, [true_classes_], name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing) + tf.add_node(res[1], node) + return res end function learned_unigram_candidate_sampler(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing) if tf.eager_mode @@ -1998,7 +2130,7 @@ end A graph node which represents an argument to a function. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function _arg_graph(; name=nothing, index=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _arg_graph(; name=nothing, index=nothing) local desc tf.with_op_name(name, "_Arg") do desc = tf.NodeDescription("_Arg") @@ -2013,7 +2145,10 @@ begin if index !== nothing desc["index"] = Base.Int(index) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(_arg, [], name=nothing, index=nothing) + tf.add_node(res[1], node) + return res[1] end function _arg(; name=nothing, index=nothing) if tf.eager_mode @@ -2031,7 +2166,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function matrix_square_root_graph(input_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function matrix_square_root_graph(input_; name=nothing) local desc tf.with_op_name(name, "MatrixSquareRoot") do desc = tf.NodeDescription("MatrixSquareRoot") @@ -2045,7 +2180,10 @@ begin desc = tf.EagerOp("MatrixSquareRoot") tf.add_input(desc, input_) desc["T"] = tf.data_type(input_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(matrix_square_root, [input_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function matrix_square_root(input_; name=nothing) if tf.eager_mode @@ -2063,7 +2201,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function sparse_dense_cwise_mul_graph(sp_indices_, sp_values_, sp_shape_, dense_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_dense_cwise_mul_graph(sp_indices_, sp_values_, sp_shape_, dense_; name=nothing) local desc tf.with_op_name(name, "SparseDenseCwiseMul") do desc = tf.NodeDescription("SparseDenseCwiseMul") @@ -2087,7 +2225,10 @@ begin tf.add_input(desc, dense_) desc["T"] = tf.data_type(sp_values_) desc["T"] = tf.data_type(dense_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(sparse_dense_cwise_mul, [sp_indices_, sp_values_, sp_shape_, dense_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function sparse_dense_cwise_mul(sp_indices_, sp_values_, sp_shape_, dense_; name=nothing) if tf.eager_mode @@ -2105,7 +2246,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tensor_array_concat_v3_graph(handle_, flow_in_; name=nothing, dtype=nothing, element_shape_except0=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_concat_v3_graph(handle_, flow_in_; name=nothing, dtype=nothing, element_shape_except0=nothing) local desc tf.with_op_name(name, "TensorArrayConcatV3") do desc = tf.NodeDescription("TensorArrayConcatV3") @@ -2137,7 +2278,10 @@ begin if element_shape_except0 !== nothing desc["element_shape_except0"] = Base.identity(element_shape_except0) end - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(tensor_array_concat_v3, [handle_, flow_in_], name=nothing, dtype=nothing, element_shape_except0=nothing) + tf.add_node(res[1], node) + return res end function tensor_array_concat_v3(handle_, flow_in_; name=nothing, dtype=nothing, element_shape_except0=nothing) if tf.eager_mode @@ -2155,7 +2299,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function unicode_script_graph(input_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function unicode_script_graph(input_; name=nothing) local desc tf.with_op_name(name, "UnicodeScript") do desc = tf.NodeDescription("UnicodeScript") @@ -2167,7 +2311,10 @@ begin function unicode_script_eager(input_; name=nothing) desc = tf.EagerOp("UnicodeScript") tf.add_input(desc, input_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(unicode_script, [input_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function unicode_script(input_; name=nothing) if tf.eager_mode @@ -2185,7 +2332,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function batch_cholesky_grad_graph(l_, grad_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_cholesky_grad_graph(l_, grad_; name=nothing) local desc tf.with_op_name(name, "BatchCholeskyGrad") do desc = tf.NodeDescription("BatchCholeskyGrad") @@ -2203,7 +2350,10 @@ begin tf.add_input(desc, grad_) desc["T"] = tf.data_type(l_) desc["T"] = tf.data_type(grad_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(batch_cholesky_grad, [l_, grad_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function batch_cholesky_grad(l_, grad_; name=nothing) if tf.eager_mode @@ -2221,7 +2371,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function mean_graph(input_, reduction_indices_; name=nothing, keep_dims=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function mean_graph(input_, reduction_indices_; name=nothing, keep_dims=nothing) local desc tf.with_op_name(name, "Mean") do desc = tf.NodeDescription("Mean") @@ -2247,7 +2397,10 @@ begin end desc["T"] = tf.data_type(input_) desc["Tidx"] = tf.data_type(reduction_indices_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(mean, [input_, reduction_indices_], name=nothing, keep_dims=nothing) + tf.add_node(res[1], node) + return res[1] end function mean(input_, reduction_indices_; name=nothing, keep_dims=nothing) if tf.eager_mode @@ -2265,7 +2418,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function batch_fft_graph(input_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_fft_graph(input_; name=nothing) local desc tf.with_op_name(name, "BatchFFT") do desc = tf.NodeDescription("BatchFFT") @@ -2277,7 +2430,10 @@ begin function batch_fft_eager(input_; name=nothing) desc = tf.EagerOp("BatchFFT") tf.add_input(desc, input_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(batch_fft, [input_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function batch_fft(input_; name=nothing) if tf.eager_mode @@ -2295,7 +2451,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function sin_graph(x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sin_graph(x_; name=nothing) local desc tf.with_op_name(name, "Sin") do desc = tf.NodeDescription("Sin") @@ -2309,7 +2465,10 @@ begin desc = tf.EagerOp("Sin") tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(sin, [x_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function sin(x_; name=nothing) if tf.eager_mode @@ -2327,7 +2486,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function boosted_trees_ensemble_resource_handle_op_graph(; name=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function boosted_trees_ensemble_resource_handle_op_graph(; name=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "BoostedTreesEnsembleResourceHandleOp") do desc = tf.NodeDescription("BoostedTreesEnsembleResourceHandleOp") @@ -2348,7 +2507,10 @@ begin if shared_name !== nothing desc["shared_name"] = Base.String(shared_name) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(boosted_trees_ensemble_resource_handle_op, [], name=nothing, container=nothing, shared_name=nothing) + tf.add_node(res[1], node) + return res[1] end function boosted_trees_ensemble_resource_handle_op(; name=nothing, container=nothing, shared_name=nothing) if tf.eager_mode @@ -2366,7 +2528,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function quantized_max_pool_graph(input_, min_input_, max_input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantized_max_pool_graph(input_, min_input_, max_input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing) local desc tf.with_op_name(name, "QuantizedMaxPool") do desc = tf.NodeDescription("QuantizedMaxPool") @@ -2409,7 +2571,10 @@ begin desc["padding"] = Base.String(padding) end desc["T"] = tf.data_type(input_) - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(quantized_max_pool, [input_, min_input_, max_input_], name=nothing, ksize=nothing, strides=nothing, padding=nothing) + tf.add_node(res[1], node) + return res end function quantized_max_pool(input_, min_input_, max_input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing) if tf.eager_mode @@ -2427,7 +2592,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function ordered_map_stage_graph(key_, indices_, values_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, fake_dtypes=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ordered_map_stage_graph(key_, indices_, values_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, fake_dtypes=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "OrderedMapStage") do desc = tf.NodeDescription("OrderedMapStage") @@ -2481,7 +2646,10 @@ begin if shared_name !== nothing desc["shared_name"] = Base.String(shared_name) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(ordered_map_stage, [key_, indices_, values_], name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, fake_dtypes=nothing, container=nothing, shared_name=nothing) + tf.add_node(res[1], node) + return res[1] end function ordered_map_stage(key_, indices_, values_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, fake_dtypes=nothing, container=nothing, shared_name=nothing) if tf.eager_mode @@ -2499,7 +2667,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function partitioned_call_graph(args_; name=nothing, Tin=nothing, Tout=nothing, f=nothing, config=nothing, config_proto=nothing, executor_type=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function partitioned_call_graph(args_; name=nothing, Tin=nothing, Tout=nothing, f=nothing, config=nothing, config_proto=nothing, executor_type=nothing) local desc tf.with_op_name(name, "PartitionedCall") do desc = tf.NodeDescription("PartitionedCall") @@ -2547,7 +2715,10 @@ begin if executor_type !== nothing desc["executor_type"] = Base.String(executor_type) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(partitioned_call, [args_], name=nothing, Tin=nothing, Tout=nothing, f=nothing, config=nothing, config_proto=nothing, executor_type=nothing) + tf.add_node(res[1], node) + return res[1] end function partitioned_call(args_; name=nothing, Tin=nothing, Tout=nothing, f=nothing, config=nothing, config_proto=nothing, executor_type=nothing) if tf.eager_mode @@ -2565,7 +2736,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function sparse_apply_adagrad_graph(var_, accum_, lr_, grad_, indices_; name=nothing, use_locking=nothing, update_slots=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_apply_adagrad_graph(var_, accum_, lr_, grad_, indices_; name=nothing, use_locking=nothing, update_slots=nothing) local desc tf.with_op_name(name, "SparseApplyAdagrad") do desc = tf.NodeDescription("SparseApplyAdagrad") @@ -2609,7 +2780,10 @@ begin desc["T"] = tf.data_type(lr_) desc["T"] = tf.data_type(grad_) desc["Tindices"] = tf.data_type(indices_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(sparse_apply_adagrad, [var_, accum_, lr_, grad_, indices_], name=nothing, use_locking=nothing, update_slots=nothing) + tf.add_node(res[1], node) + return res[1] end function sparse_apply_adagrad(var_, accum_, lr_, grad_, indices_; name=nothing, use_locking=nothing, update_slots=nothing) if tf.eager_mode @@ -2627,7 +2801,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function decode_proto_v2_graph(bytes_; name=nothing, message_type=nothing, field_names=nothing, output_types=nothing, descriptor_source=nothing, message_format=nothing, sanitize=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function decode_proto_v2_graph(bytes_; name=nothing, message_type=nothing, field_names=nothing, output_types=nothing, descriptor_source=nothing, message_format=nothing, sanitize=nothing) local desc tf.with_op_name(name, "DecodeProtoV2") do desc = tf.NodeDescription("DecodeProtoV2") @@ -2680,7 +2854,10 @@ begin if sanitize !== nothing desc["sanitize"] = Base.Bool(sanitize) end - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(decode_proto_v2, [bytes_], name=nothing, message_type=nothing, field_names=nothing, output_types=nothing, descriptor_source=nothing, message_format=nothing, sanitize=nothing) + tf.add_node(res[1], node) + return res end function decode_proto_v2(bytes_; name=nothing, message_type=nothing, field_names=nothing, output_types=nothing, descriptor_source=nothing, message_format=nothing, sanitize=nothing) if tf.eager_mode @@ -2698,7 +2875,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function betainc_graph(a_, b_, x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function betainc_graph(a_, b_, x_; name=nothing) local desc tf.with_op_name(name, "Betainc") do desc = tf.NodeDescription("Betainc") @@ -2720,7 +2897,10 @@ begin desc["T"] = tf.data_type(a_) desc["T"] = tf.data_type(b_) desc["T"] = tf.data_type(x_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(betainc, [a_, b_, x_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function betainc(a_, b_, x_; name=nothing) if tf.eager_mode @@ -2738,7 +2918,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function guarantee_const_graph(input_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function guarantee_const_graph(input_; name=nothing) local desc tf.with_op_name(name, "GuaranteeConst") do desc = tf.NodeDescription("GuaranteeConst") @@ -2752,7 +2932,10 @@ begin desc = tf.EagerOp("GuaranteeConst") tf.add_input(desc, input_) desc["T"] = tf.data_type(input_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(guarantee_const, [input_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function guarantee_const(input_; name=nothing) if tf.eager_mode @@ -2770,7 +2953,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function decode_bmp_graph(contents_; name=nothing, channels=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function decode_bmp_graph(contents_; name=nothing, channels=nothing) local desc tf.with_op_name(name, "DecodeBmp") do desc = tf.NodeDescription("DecodeBmp") @@ -2788,7 +2971,10 @@ begin if channels !== nothing desc["channels"] = Base.Int(channels) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(decode_bmp, [contents_], name=nothing, channels=nothing) + tf.add_node(res[1], node) + return res[1] end function decode_bmp(contents_; name=nothing, channels=nothing) if tf.eager_mode @@ -2806,7 +2992,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function boosted_trees_bucketize_graph(float_values_, bucket_boundaries_; name=nothing, num_features=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function boosted_trees_bucketize_graph(float_values_, bucket_boundaries_; name=nothing, num_features=nothing) local desc tf.with_op_name(name, "BoostedTreesBucketize") do desc = tf.NodeDescription("BoostedTreesBucketize") @@ -2832,7 +3018,10 @@ begin if num_features !== nothing desc["num_features"] = Base.Int(num_features) end - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(boosted_trees_bucketize, [float_values_, bucket_boundaries_], name=nothing, num_features=nothing) + tf.add_node(res[1], node) + return res end function boosted_trees_bucketize(float_values_, bucket_boundaries_; name=nothing, num_features=nothing) if tf.eager_mode @@ -2850,7 +3039,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function shutdown_distributed_tpu_graph(; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function shutdown_distributed_tpu_graph(; name=nothing) local desc tf.with_op_name(name, "ShutdownDistributedTPU") do desc @@ -2860,7 +3049,10 @@ begin end function shutdown_distributed_tpu_eager(; name=nothing) desc = tf.EagerOp("ShutdownDistributedTPU") - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(shutdown_distributed_tpu, [], name=nothing) + tf.add_node(res[1], node) + return res[1] end function shutdown_distributed_tpu(; name=nothing) if tf.eager_mode @@ -2878,7 +3070,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function experimental_stats_aggregator_summary_graph(iterator_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_stats_aggregator_summary_graph(iterator_; name=nothing) local desc tf.with_op_name(name, "ExperimentalStatsAggregatorSummary") do desc = tf.NodeDescription("ExperimentalStatsAggregatorSummary") @@ -2890,7 +3082,10 @@ begin function experimental_stats_aggregator_summary_eager(iterator_; name=nothing) desc = tf.EagerOp("ExperimentalStatsAggregatorSummary") tf.add_input(desc, iterator_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(experimental_stats_aggregator_summary, [iterator_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function experimental_stats_aggregator_summary(iterator_; name=nothing) if tf.eager_mode @@ -2908,7 +3103,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function timestamp_graph(; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function timestamp_graph(; name=nothing) local desc tf.with_op_name(name, "Timestamp") do desc @@ -2918,7 +3113,10 @@ begin end function timestamp_eager(; name=nothing) desc = tf.EagerOp("Timestamp") - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(timestamp, [], name=nothing) + tf.add_node(res[1], node) + return res[1] end function timestamp(; name=nothing) if tf.eager_mode @@ -2936,7 +3134,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function matrix_exponential_graph(input_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function matrix_exponential_graph(input_; name=nothing) local desc tf.with_op_name(name, "MatrixExponential") do desc = tf.NodeDescription("MatrixExponential") @@ -2950,7 +3148,10 @@ begin desc = tf.EagerOp("MatrixExponential") tf.add_input(desc, input_) desc["T"] = tf.data_type(input_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(matrix_exponential, [input_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function matrix_exponential(input_; name=nothing) if tf.eager_mode @@ -2968,7 +3169,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function size_graph(input_; name=nothing, out_type=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function size_graph(input_; name=nothing, out_type=nothing) local desc tf.with_op_name(name, "Size") do desc = tf.NodeDescription("Size") @@ -2988,7 +3189,10 @@ begin desc["out_type"] = Base.identity(out_type) end desc["T"] = tf.data_type(input_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(size, [input_], name=nothing, out_type=nothing) + tf.add_node(res[1], node) + return res[1] end function size(input_; name=nothing, out_type=nothing) if tf.eager_mode @@ -3006,7 +3210,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function add_n_graph(inputs_; name=nothing, N=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function add_n_graph(inputs_; name=nothing, N=nothing) local desc tf.with_op_name(name, "AddN") do desc = tf.NodeDescription("AddN") @@ -3026,7 +3230,10 @@ begin desc["N"] = Base.Int(N) end desc["T"] = tf.data_type(inputs_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(add_n, [inputs_], name=nothing, N=nothing) + tf.add_node(res[1], node) + return res[1] end function add_n(inputs_; name=nothing, N=nothing) if tf.eager_mode @@ -3044,7 +3251,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function sparse_segment_sum_graph(data_, indices_, segment_ids_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_segment_sum_graph(data_, indices_, segment_ids_; name=nothing) local desc tf.with_op_name(name, "SparseSegmentSum") do desc = tf.NodeDescription("SparseSegmentSum") @@ -3067,7 +3274,10 @@ begin tf.add_input(desc, segment_ids_) desc["T"] = tf.data_type(data_) desc["Tidx"] = tf.data_type(indices_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(sparse_segment_sum, [data_, indices_, segment_ids_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function sparse_segment_sum(data_, indices_, segment_ids_; name=nothing) if tf.eager_mode @@ -3085,7 +3295,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function batch_dataset_graph(input_dataset_, batch_size_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_dataset_graph(input_dataset_, batch_size_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "BatchDataset") do desc = tf.NodeDescription("BatchDataset") @@ -3112,7 +3322,10 @@ begin if output_shapes !== nothing desc["output_shapes"] = map(Base.identity, output_shapes) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(batch_dataset, [input_dataset_, batch_size_], name=nothing, output_types=nothing, output_shapes=nothing) + tf.add_node(res[1], node) + return res[1] end function batch_dataset(input_dataset_, batch_size_; name=nothing, output_types=nothing, output_shapes=nothing) if tf.eager_mode @@ -3130,7 +3343,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function record_input_graph(; name=nothing, file_pattern=nothing, file_random_seed=nothing, file_shuffle_shift_ratio=nothing, file_buffer_size=nothing, file_parallelism=nothing, batch_size=nothing, compression_type=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function record_input_graph(; name=nothing, file_pattern=nothing, file_random_seed=nothing, file_shuffle_shift_ratio=nothing, file_buffer_size=nothing, file_parallelism=nothing, batch_size=nothing, compression_type=nothing) local desc tf.with_op_name(name, "RecordInput") do desc = tf.NodeDescription("RecordInput") @@ -3181,7 +3394,10 @@ begin if compression_type !== nothing desc["compression_type"] = Base.String(compression_type) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(record_input, [], name=nothing, file_pattern=nothing, file_random_seed=nothing, file_shuffle_shift_ratio=nothing, file_buffer_size=nothing, file_parallelism=nothing, batch_size=nothing, compression_type=nothing) + tf.add_node(res[1], node) + return res[1] end function record_input(; name=nothing, file_pattern=nothing, file_random_seed=nothing, file_shuffle_shift_ratio=nothing, file_buffer_size=nothing, file_parallelism=nothing, batch_size=nothing, compression_type=nothing) if tf.eager_mode @@ -3199,7 +3415,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function queue_dequeue_up_to_v2_graph(handle_, n_; name=nothing, component_types=nothing, timeout_ms=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function queue_dequeue_up_to_v2_graph(handle_, n_; name=nothing, component_types=nothing, timeout_ms=nothing) local desc tf.with_op_name(name, "QueueDequeueUpToV2") do desc = tf.NodeDescription("QueueDequeueUpToV2") @@ -3226,7 +3442,10 @@ begin if timeout_ms !== nothing desc["timeout_ms"] = Base.Int(timeout_ms) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(queue_dequeue_up_to_v2, [handle_, n_], name=nothing, component_types=nothing, timeout_ms=nothing) + tf.add_node(res[1], node) + return res[1] end function queue_dequeue_up_to_v2(handle_, n_; name=nothing, component_types=nothing, timeout_ms=nothing) if tf.eager_mode @@ -3244,7 +3463,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function retrieve_tpu_embedding_proximal_adagrad_parameters_grad_accum_debug_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function retrieve_tpu_embedding_proximal_adagrad_parameters_grad_accum_debug_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "RetrieveTPUEmbeddingProximalAdagradParametersGradAccumDebug") do desc = tf.NodeDescription("RetrieveTPUEmbeddingProximalAdagradParametersGradAccumDebug") @@ -3282,7 +3501,10 @@ begin if shard_id !== nothing desc["shard_id"] = Base.Int(shard_id) end - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(retrieve_tpu_embedding_proximal_adagrad_parameters_grad_accum_debug, [], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + tf.add_node(res[1], node) + return res end function retrieve_tpu_embedding_proximal_adagrad_parameters_grad_accum_debug(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) if tf.eager_mode @@ -3300,7 +3522,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function load_tpu_embedding_rms_prop_parameters_grad_accum_debug_graph(parameters_, ms_, mom_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function load_tpu_embedding_rms_prop_parameters_grad_accum_debug_graph(parameters_, ms_, mom_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "LoadTPUEmbeddingRMSPropParametersGradAccumDebug") do desc = tf.NodeDescription("LoadTPUEmbeddingRMSPropParametersGradAccumDebug") @@ -3345,7 +3567,10 @@ begin if shard_id !== nothing desc["shard_id"] = Base.Int(shard_id) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(load_tpu_embedding_rms_prop_parameters_grad_accum_debug, [parameters_, ms_, mom_, gradient_accumulators_], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + tf.add_node(res[1], node) + return res[1] end function load_tpu_embedding_rms_prop_parameters_grad_accum_debug(parameters_, ms_, mom_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) if tf.eager_mode @@ -3363,7 +3588,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function serialize_tensor_graph(tensor_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function serialize_tensor_graph(tensor_; name=nothing) local desc tf.with_op_name(name, "SerializeTensor") do desc = tf.NodeDescription("SerializeTensor") @@ -3377,7 +3602,10 @@ begin desc = tf.EagerOp("SerializeTensor") tf.add_input(desc, tensor_) desc["T"] = tf.data_type(tensor_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(serialize_tensor, [tensor_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function serialize_tensor(tensor_; name=nothing) if tf.eager_mode @@ -3395,7 +3623,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function mul_graph(x_, y_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function mul_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "Mul") do desc = tf.NodeDescription("Mul") @@ -3413,7 +3641,10 @@ begin tf.add_input(desc, y_) desc["T"] = tf.data_type(x_) desc["T"] = tf.data_type(y_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(mul, [x_, y_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function mul(x_, y_; name=nothing) if tf.eager_mode @@ -3431,7 +3662,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function softmax_cross_entropy_with_logits_graph(features_, labels_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function softmax_cross_entropy_with_logits_graph(features_, labels_; name=nothing) local desc tf.with_op_name(name, "SoftmaxCrossEntropyWithLogits") do desc = tf.NodeDescription("SoftmaxCrossEntropyWithLogits") @@ -3454,7 +3685,10 @@ begin tf.add_input(desc, labels_) desc["T"] = tf.data_type(features_) desc["T"] = tf.data_type(labels_) - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(softmax_cross_entropy_with_logits, [features_, labels_], name=nothing) + tf.add_node(res[1], node) + return res end function softmax_cross_entropy_with_logits(features_, labels_; name=nothing) if tf.eager_mode @@ -3472,7 +3706,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function resource_scatter_div_graph(resource_, indices_, updates_; name=nothing, dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_scatter_div_graph(resource_, indices_, updates_; name=nothing, dtype=nothing) local desc tf.with_op_name(name, "ResourceScatterDiv") do desc = tf.NodeDescription("ResourceScatterDiv") @@ -3501,7 +3735,10 @@ begin end desc["Tindices"] = tf.data_type(indices_) desc["dtype"] = tf.data_type(updates_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(resource_scatter_div, [resource_, indices_, updates_], name=nothing, dtype=nothing) + tf.add_node(res[1], node) + return res[1] end function resource_scatter_div(resource_, indices_, updates_; name=nothing, dtype=nothing) if tf.eager_mode @@ -3519,7 +3756,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function fixed_length_record_dataset_v2_graph(filenames_, header_bytes_, record_bytes_, footer_bytes_, buffer_size_, compression_type_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fixed_length_record_dataset_v2_graph(filenames_, header_bytes_, record_bytes_, footer_bytes_, buffer_size_, compression_type_; name=nothing) local desc tf.with_op_name(name, "FixedLengthRecordDatasetV2") do desc = tf.NodeDescription("FixedLengthRecordDatasetV2") @@ -3546,7 +3783,10 @@ begin tf.add_input(desc, footer_bytes_) tf.add_input(desc, buffer_size_) tf.add_input(desc, compression_type_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(fixed_length_record_dataset_v2, [filenames_, header_bytes_, record_bytes_, footer_bytes_, buffer_size_, compression_type_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function fixed_length_record_dataset_v2(filenames_, header_bytes_, record_bytes_, footer_bytes_, buffer_size_, compression_type_; name=nothing) if tf.eager_mode @@ -3564,7 +3804,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function skip_dataset_graph(input_dataset_, count_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function skip_dataset_graph(input_dataset_, count_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "SkipDataset") do desc = tf.NodeDescription("SkipDataset") @@ -3591,7 +3831,10 @@ begin if output_shapes !== nothing desc["output_shapes"] = map(Base.identity, output_shapes) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(skip_dataset, [input_dataset_, count_], name=nothing, output_types=nothing, output_shapes=nothing) + tf.add_node(res[1], node) + return res[1] end function skip_dataset(input_dataset_, count_; name=nothing, output_types=nothing, output_shapes=nothing) if tf.eager_mode @@ -3609,7 +3852,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function cosh_graph(x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function cosh_graph(x_; name=nothing) local desc tf.with_op_name(name, "Cosh") do desc = tf.NodeDescription("Cosh") @@ -3623,7 +3866,10 @@ begin desc = tf.EagerOp("Cosh") tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(cosh, [x_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function cosh(x_; name=nothing) if tf.eager_mode @@ -3641,7 +3887,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function fused_batch_norm_v2_graph(x_, scale_, offset_, mean_, variance_; name=nothing, U=nothing, epsilon=nothing, data_format=nothing, is_training=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fused_batch_norm_v2_graph(x_, scale_, offset_, mean_, variance_; name=nothing, U=nothing, epsilon=nothing, data_format=nothing, is_training=nothing) local desc tf.with_op_name(name, "FusedBatchNormV2") do desc = tf.NodeDescription("FusedBatchNormV2") @@ -3701,7 +3947,10 @@ begin desc["U"] = tf.data_type(offset_) desc["U"] = tf.data_type(mean_) desc["U"] = tf.data_type(variance_) - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(fused_batch_norm_v2, [x_, scale_, offset_, mean_, variance_], name=nothing, U=nothing, epsilon=nothing, data_format=nothing, is_training=nothing) + tf.add_node(res[1], node) + return res end function fused_batch_norm_v2(x_, scale_, offset_, mean_, variance_; name=nothing, U=nothing, epsilon=nothing, data_format=nothing, is_training=nothing) if tf.eager_mode @@ -3719,7 +3968,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tensor_array_split_graph(handle_, value_, lengths_, flow_in_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_split_graph(handle_, value_, lengths_, flow_in_; name=nothing) local desc tf.with_op_name(name, "TensorArraySplit") do desc = tf.NodeDescription("TensorArraySplit") @@ -3742,7 +3991,10 @@ begin tf.add_input(desc, lengths_) tf.add_input(desc, flow_in_) desc["T"] = tf.data_type(value_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(tensor_array_split, [handle_, value_, lengths_, flow_in_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function tensor_array_split(handle_, value_, lengths_, flow_in_; name=nothing) if tf.eager_mode @@ -3760,7 +4012,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function ctc_loss_graph(inputs_, labels_indices_, labels_values_, sequence_length_; name=nothing, preprocess_collapse_repeated=nothing, ctc_merge_repeated=nothing, ignore_longer_outputs_than_inputs=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ctc_loss_graph(inputs_, labels_indices_, labels_values_, sequence_length_; name=nothing, preprocess_collapse_repeated=nothing, ctc_merge_repeated=nothing, ignore_longer_outputs_than_inputs=nothing) local desc tf.with_op_name(name, "CTCLoss") do desc = tf.NodeDescription("CTCLoss") @@ -3804,7 +4056,10 @@ begin if ignore_longer_outputs_than_inputs !== nothing desc["ignore_longer_outputs_than_inputs"] = Base.Bool(ignore_longer_outputs_than_inputs) end - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(ctc_loss, [inputs_, labels_indices_, labels_values_, sequence_length_], name=nothing, preprocess_collapse_repeated=nothing, ctc_merge_repeated=nothing, ignore_longer_outputs_than_inputs=nothing) + tf.add_node(res[1], node) + return res end function ctc_loss(inputs_, labels_indices_, labels_values_, sequence_length_; name=nothing, preprocess_collapse_repeated=nothing, ctc_merge_repeated=nothing, ignore_longer_outputs_than_inputs=nothing) if tf.eager_mode @@ -3822,7 +4077,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function quantized_reshape_graph(tensor_, shape_, input_min_, input_max_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantized_reshape_graph(tensor_, shape_, input_min_, input_max_; name=nothing) local desc tf.with_op_name(name, "QuantizedReshape") do desc = tf.NodeDescription("QuantizedReshape") @@ -3852,7 +4107,10 @@ begin tf.add_input(desc, input_max_) desc["T"] = tf.data_type(tensor_) desc["Tshape"] = tf.data_type(shape_) - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(quantized_reshape, [tensor_, shape_, input_min_, input_max_], name=nothing) + tf.add_node(res[1], node) + return res end function quantized_reshape(tensor_, shape_, input_min_, input_max_; name=nothing) if tf.eager_mode @@ -3870,7 +4128,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function floor_div_graph(x_, y_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function floor_div_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "FloorDiv") do desc = tf.NodeDescription("FloorDiv") @@ -3888,7 +4146,10 @@ begin tf.add_input(desc, y_) desc["T"] = tf.data_type(x_) desc["T"] = tf.data_type(y_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(floor_div, [x_, y_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function floor_div(x_, y_; name=nothing) if tf.eager_mode @@ -3906,7 +4167,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tensor_array_v2_graph(size_; name=nothing, dtype=nothing, element_shape=nothing, dynamic_size=nothing, clear_after_read=nothing, tensor_array_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_v2_graph(size_; name=nothing, dtype=nothing, element_shape=nothing, dynamic_size=nothing, clear_after_read=nothing, tensor_array_name=nothing) local desc tf.with_op_name(name, "TensorArrayV2") do desc = tf.NodeDescription("TensorArrayV2") @@ -3948,7 +4209,10 @@ begin if tensor_array_name !== nothing desc["tensor_array_name"] = Base.String(tensor_array_name) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(tensor_array_v2, [size_], name=nothing, dtype=nothing, element_shape=nothing, dynamic_size=nothing, clear_after_read=nothing, tensor_array_name=nothing) + tf.add_node(res[1], node) + return res[1] end function tensor_array_v2(size_; name=nothing, dtype=nothing, element_shape=nothing, dynamic_size=nothing, clear_after_read=nothing, tensor_array_name=nothing) if tf.eager_mode @@ -3966,7 +4230,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function barrier_close_graph(handle_; name=nothing, cancel_pending_enqueues=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function barrier_close_graph(handle_; name=nothing, cancel_pending_enqueues=nothing) local desc tf.with_op_name(name, "BarrierClose") do desc = tf.NodeDescription("BarrierClose") @@ -3984,7 +4248,10 @@ begin if cancel_pending_enqueues !== nothing desc["cancel_pending_enqueues"] = Base.Bool(cancel_pending_enqueues) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(barrier_close, [handle_], name=nothing, cancel_pending_enqueues=nothing) + tf.add_node(res[1], node) + return res[1] end function barrier_close(handle_; name=nothing, cancel_pending_enqueues=nothing) if tf.eager_mode @@ -4002,7 +4269,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function read_variable_op_graph(resource_; name=nothing, dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function read_variable_op_graph(resource_; name=nothing, dtype=nothing) local desc tf.with_op_name(name, "ReadVariableOp") do desc = tf.NodeDescription("ReadVariableOp") @@ -4020,7 +4287,10 @@ begin if dtype !== nothing desc["dtype"] = Base.identity(dtype) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(read_variable_op, [resource_], name=nothing, dtype=nothing) + tf.add_node(res[1], node) + return res[1] end function read_variable_op(resource_; name=nothing, dtype=nothing) if tf.eager_mode @@ -4038,7 +4308,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function quantized_mul_graph(x_, y_, min_x_, max_x_, min_y_, max_y_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantized_mul_graph(x_, y_, min_x_, max_x_, min_y_, max_y_; name=nothing) local desc tf.with_op_name(name, "QuantizedMul") do desc = tf.NodeDescription("QuantizedMul") @@ -4074,7 +4344,10 @@ begin tf.add_input(desc, max_y_) desc["T1"] = tf.data_type(x_) desc["T2"] = tf.data_type(y_) - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(quantized_mul, [x_, y_, min_x_, max_x_, min_y_, max_y_], name=nothing) + tf.add_node(res[1], node) + return res end function quantized_mul(x_, y_, min_x_, max_x_, min_y_, max_y_; name=nothing) if tf.eager_mode @@ -4092,7 +4365,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function selu_graph(features_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function selu_graph(features_; name=nothing) local desc tf.with_op_name(name, "Selu") do desc = tf.NodeDescription("Selu") @@ -4106,7 +4379,10 @@ begin desc = tf.EagerOp("Selu") tf.add_input(desc, features_) desc["T"] = tf.data_type(features_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(selu, [features_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function selu(features_; name=nothing) if tf.eager_mode @@ -4124,7 +4400,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function cudnn_rnn_backprop_v3_graph(input_, input_h_, input_c_, params_, sequence_lengths_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_, host_reserved_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function cudnn_rnn_backprop_v3_graph(input_, input_h_, input_c_, params_, sequence_lengths_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_, host_reserved_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) local desc tf.with_op_name(name, "CudnnRNNBackpropV3") do desc = tf.NodeDescription("CudnnRNNBackpropV3") @@ -4225,7 +4501,10 @@ begin desc["T"] = tf.data_type(output_h_backprop_) desc["T"] = tf.data_type(output_c_backprop_) desc["T"] = tf.data_type(reserve_space_) - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(cudnn_rnn_backprop_v3, [input_, input_h_, input_c_, params_, sequence_lengths_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_, host_reserved_], name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) + tf.add_node(res[1], node) + return res end function cudnn_rnn_backprop_v3(input_, input_h_, input_c_, params_, sequence_lengths_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_, host_reserved_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) if tf.eager_mode @@ -4243,7 +4522,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function lookup_table_insert_graph(table_handle_, keys_, values_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function lookup_table_insert_graph(table_handle_, keys_, values_; name=nothing) local desc tf.with_op_name(name, "LookupTableInsert") do desc = tf.NodeDescription("LookupTableInsert") @@ -4265,7 +4544,10 @@ begin tf.add_input(desc, values_) desc["Tin"] = tf.data_type(keys_) desc["Tout"] = tf.data_type(values_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(lookup_table_insert, [table_handle_, keys_, values_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function lookup_table_insert(table_handle_, keys_, values_; name=nothing) if tf.eager_mode @@ -4283,7 +4565,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function complex_abs_graph(x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function complex_abs_graph(x_; name=nothing) local desc tf.with_op_name(name, "ComplexAbs") do desc = tf.NodeDescription("ComplexAbs") @@ -4297,7 +4579,10 @@ begin desc = tf.EagerOp("ComplexAbs") tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(complex_abs, [x_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function complex_abs(x_; name=nothing) if tf.eager_mode @@ -4315,7 +4600,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tridiagonal_solve_graph(diagonals_, rhs_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tridiagonal_solve_graph(diagonals_, rhs_; name=nothing) local desc tf.with_op_name(name, "TridiagonalSolve") do desc = tf.NodeDescription("TridiagonalSolve") @@ -4333,7 +4618,10 @@ begin tf.add_input(desc, rhs_) desc["T"] = tf.data_type(diagonals_) desc["T"] = tf.data_type(rhs_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(tridiagonal_solve, [diagonals_, rhs_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function tridiagonal_solve(diagonals_, rhs_; name=nothing) if tf.eager_mode @@ -4351,7 +4639,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function lookup_table_import_graph(table_handle_, keys_, values_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function lookup_table_import_graph(table_handle_, keys_, values_; name=nothing) local desc tf.with_op_name(name, "LookupTableImport") do desc = tf.NodeDescription("LookupTableImport") @@ -4373,7 +4661,10 @@ begin tf.add_input(desc, values_) desc["Tin"] = tf.data_type(keys_) desc["Tout"] = tf.data_type(values_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(lookup_table_import, [table_handle_, keys_, values_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function lookup_table_import(table_handle_, keys_, values_; name=nothing) if tf.eager_mode @@ -4391,7 +4682,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function abs_graph(x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function abs_graph(x_; name=nothing) local desc tf.with_op_name(name, "Abs") do desc = tf.NodeDescription("Abs") @@ -4405,7 +4696,10 @@ begin desc = tf.EagerOp("Abs") tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(abs, [x_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function abs(x_; name=nothing) if tf.eager_mode @@ -4423,7 +4717,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function resource_apply_adam_graph(var_, m_, v_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing, use_nesterov=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_apply_adam_graph(var_, m_, v_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing, use_nesterov=nothing) local desc tf.with_op_name(name, "ResourceApplyAdam") do desc = tf.NodeDescription("ResourceApplyAdam") @@ -4482,7 +4776,10 @@ begin desc["T"] = tf.data_type(beta2_) desc["T"] = tf.data_type(epsilon_) desc["T"] = tf.data_type(grad_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(resource_apply_adam, [var_, m_, v_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_], name=nothing, use_locking=nothing, use_nesterov=nothing) + tf.add_node(res[1], node) + return res[1] end function resource_apply_adam(var_, m_, v_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing, use_nesterov=nothing) if tf.eager_mode @@ -4500,7 +4797,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function write_histogram_summary_graph(writer_, step_, tag_, values_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function write_histogram_summary_graph(writer_, step_, tag_, values_; name=nothing) local desc tf.with_op_name(name, "WriteHistogramSummary") do desc = tf.NodeDescription("WriteHistogramSummary") @@ -4523,7 +4820,10 @@ begin tf.add_input(desc, tag_) tf.add_input(desc, values_) desc["T"] = tf.data_type(values_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(write_histogram_summary, [writer_, step_, tag_, values_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function write_histogram_summary(writer_, step_, tag_, values_; name=nothing) if tf.eager_mode @@ -4541,7 +4841,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function experimental_indexed_dataset_materialize_graph(dataset_, materialized_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_indexed_dataset_materialize_graph(dataset_, materialized_; name=nothing) local desc tf.with_op_name(name, "ExperimentalIndexedDatasetMaterialize") do desc = tf.NodeDescription("ExperimentalIndexedDatasetMaterialize") @@ -4556,7 +4856,10 @@ begin desc = tf.EagerOp("ExperimentalIndexedDatasetMaterialize") tf.add_input(desc, dataset_) tf.add_input(desc, materialized_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(experimental_indexed_dataset_materialize, [dataset_, materialized_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function experimental_indexed_dataset_materialize(dataset_, materialized_; name=nothing) if tf.eager_mode @@ -4574,7 +4877,7 @@ end Sends the named tensor from send_device to recv_device. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function _host_send_graph(tensor_; name=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _host_send_graph(tensor_; name=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing) local desc tf.with_op_name(name, "_HostSend") do desc = tf.NodeDescription("_HostSend") @@ -4618,7 +4921,10 @@ begin desc["client_terminated"] = Base.Bool(client_terminated) end desc["T"] = tf.data_type(tensor_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(_host_send, [tensor_], name=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing) + tf.add_node(res[1], node) + return res[1] end function _host_send(tensor_; name=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing) if tf.eager_mode @@ -4636,7 +4942,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function greater_graph(x_, y_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function greater_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "Greater") do desc = tf.NodeDescription("Greater") @@ -4654,7 +4960,10 @@ begin tf.add_input(desc, y_) desc["T"] = tf.data_type(x_) desc["T"] = tf.data_type(y_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(greater, [x_, y_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function greater(x_, y_; name=nothing) if tf.eager_mode @@ -4672,7 +4981,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function nccl_broadcast_graph(input_; name=nothing, shape=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function nccl_broadcast_graph(input_; name=nothing, shape=nothing) local desc tf.with_op_name(name, "NcclBroadcast") do desc = tf.NodeDescription("NcclBroadcast") @@ -4692,7 +5001,10 @@ begin desc["shape"] = Base.identity(shape) end desc["T"] = tf.data_type(input_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(nccl_broadcast, [input_], name=nothing, shape=nothing) + tf.add_node(res[1], node) + return res[1] end function nccl_broadcast(input_; name=nothing, shape=nothing) if tf.eager_mode @@ -4710,7 +5022,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tensor_list_push_back_batch_graph(input_handles_, tensor_; name=nothing, element_dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_list_push_back_batch_graph(input_handles_, tensor_; name=nothing, element_dtype=nothing) local desc tf.with_op_name(name, "TensorListPushBackBatch") do desc = tf.NodeDescription("TensorListPushBackBatch") @@ -4733,7 +5045,10 @@ begin desc["element_dtype"] = Base.identity(element_dtype) end desc["element_dtype"] = tf.data_type(tensor_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(tensor_list_push_back_batch, [input_handles_, tensor_], name=nothing, element_dtype=nothing) + tf.add_node(res[1], node) + return res[1] end function tensor_list_push_back_batch(input_handles_, tensor_; name=nothing, element_dtype=nothing) if tf.eager_mode @@ -4751,7 +5066,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function resource_scatter_min_graph(resource_, indices_, updates_; name=nothing, dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_scatter_min_graph(resource_, indices_, updates_; name=nothing, dtype=nothing) local desc tf.with_op_name(name, "ResourceScatterMin") do desc = tf.NodeDescription("ResourceScatterMin") @@ -4780,7 +5095,10 @@ begin end desc["Tindices"] = tf.data_type(indices_) desc["dtype"] = tf.data_type(updates_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(resource_scatter_min, [resource_, indices_, updates_], name=nothing, dtype=nothing) + tf.add_node(res[1], node) + return res[1] end function resource_scatter_min(resource_, indices_, updates_; name=nothing, dtype=nothing) if tf.eager_mode @@ -4798,7 +5116,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function slice_graph(input_, begin_, size_; name=nothing, Index=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function slice_graph(input_, begin_, size_; name=nothing, Index=nothing) local desc tf.with_op_name(name, "Slice") do desc = tf.NodeDescription("Slice") @@ -4828,7 +5146,10 @@ begin desc["T"] = tf.data_type(input_) desc["Index"] = tf.data_type(begin_) desc["Index"] = tf.data_type(size_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(slice, [input_, begin_, size_], name=nothing, Index=nothing) + tf.add_node(res[1], node) + return res[1] end function slice(input_, begin_, size_; name=nothing, Index=nothing) if tf.eager_mode @@ -4846,7 +5167,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function unicode_decode_graph(input_; name=nothing, input_encoding=nothing, errors=nothing, replacement_char=nothing, replace_control_characters=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function unicode_decode_graph(input_; name=nothing, input_encoding=nothing, errors=nothing, replacement_char=nothing, replace_control_characters=nothing) local desc tf.with_op_name(name, "UnicodeDecode") do desc = tf.NodeDescription("UnicodeDecode") @@ -4887,7 +5208,10 @@ begin if replace_control_characters !== nothing desc["replace_control_characters"] = Base.Bool(replace_control_characters) end - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(unicode_decode, [input_], name=nothing, input_encoding=nothing, errors=nothing, replacement_char=nothing, replace_control_characters=nothing) + tf.add_node(res[1], node) + return res end function unicode_decode(input_; name=nothing, input_encoding=nothing, errors=nothing, replacement_char=nothing, replace_control_characters=nothing) if tf.eager_mode @@ -4905,7 +5229,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function take_dataset_graph(input_dataset_, count_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function take_dataset_graph(input_dataset_, count_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "TakeDataset") do desc = tf.NodeDescription("TakeDataset") @@ -4932,7 +5256,10 @@ begin if output_shapes !== nothing desc["output_shapes"] = map(Base.identity, output_shapes) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(take_dataset, [input_dataset_, count_], name=nothing, output_types=nothing, output_shapes=nothing) + tf.add_node(res[1], node) + return res[1] end function take_dataset(input_dataset_, count_; name=nothing, output_types=nothing, output_shapes=nothing) if tf.eager_mode @@ -4950,7 +5277,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function boosted_trees_make_stats_summary_graph(node_ids_, gradients_, hessians_, bucketized_features_list_; name=nothing, max_splits=nothing, num_buckets=nothing, num_features=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function boosted_trees_make_stats_summary_graph(node_ids_, gradients_, hessians_, bucketized_features_list_; name=nothing, max_splits=nothing, num_buckets=nothing, num_features=nothing) local desc tf.with_op_name(name, "BoostedTreesMakeStatsSummary") do desc = tf.NodeDescription("BoostedTreesMakeStatsSummary") @@ -4989,7 +5316,10 @@ begin if num_features !== nothing desc["num_features"] = Base.Int(num_features) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(boosted_trees_make_stats_summary, [node_ids_, gradients_, hessians_, bucketized_features_list_], name=nothing, max_splits=nothing, num_buckets=nothing, num_features=nothing) + tf.add_node(res[1], node) + return res[1] end function boosted_trees_make_stats_summary(node_ids_, gradients_, hessians_, bucketized_features_list_; name=nothing, max_splits=nothing, num_buckets=nothing, num_features=nothing) if tf.eager_mode @@ -5007,7 +5337,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function all_candidate_sampler_graph(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, seed=nothing, seed2=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function all_candidate_sampler_graph(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, seed=nothing, seed2=nothing) local desc tf.with_op_name(name, "AllCandidateSampler") do desc = tf.NodeDescription("AllCandidateSampler") @@ -5054,7 +5384,10 @@ begin if seed2 !== nothing desc["seed2"] = Base.Int(seed2) end - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(all_candidate_sampler, [true_classes_], name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, seed=nothing, seed2=nothing) + tf.add_node(res[1], node) + return res end function all_candidate_sampler(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, seed=nothing, seed2=nothing) if tf.eager_mode @@ -5072,7 +5405,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function conv2d_backprop_input_graph(input_sizes_, filter_, out_backprop_; name=nothing, strides=nothing, use_cudnn_on_gpu=nothing, padding=nothing, explicit_paddings=nothing, data_format=nothing, dilations=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function conv2d_backprop_input_graph(input_sizes_, filter_, out_backprop_; name=nothing, strides=nothing, use_cudnn_on_gpu=nothing, padding=nothing, explicit_paddings=nothing, data_format=nothing, dilations=nothing) local desc tf.with_op_name(name, "Conv2DBackpropInput") do desc = tf.NodeDescription("Conv2DBackpropInput") @@ -5129,7 +5462,10 @@ begin end desc["T"] = tf.data_type(filter_) desc["T"] = tf.data_type(out_backprop_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(conv2d_backprop_input, [input_sizes_, filter_, out_backprop_], name=nothing, strides=nothing, use_cudnn_on_gpu=nothing, padding=nothing, explicit_paddings=nothing, data_format=nothing, dilations=nothing) + tf.add_node(res[1], node) + return res[1] end function conv2d_backprop_input(input_sizes_, filter_, out_backprop_; name=nothing, strides=nothing, use_cudnn_on_gpu=nothing, padding=nothing, explicit_paddings=nothing, data_format=nothing, dilations=nothing) if tf.eager_mode @@ -5147,7 +5483,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function dataset_to_single_element_graph(dataset_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function dataset_to_single_element_graph(dataset_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "DatasetToSingleElement") do desc = tf.NodeDescription("DatasetToSingleElement") @@ -5171,7 +5507,10 @@ begin if output_shapes !== nothing desc["output_shapes"] = map(Base.identity, output_shapes) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(dataset_to_single_element, [dataset_], name=nothing, output_types=nothing, output_shapes=nothing) + tf.add_node(res[1], node) + return res[1] end function dataset_to_single_element(dataset_; name=nothing, output_types=nothing, output_shapes=nothing) if tf.eager_mode @@ -5189,7 +5528,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function cache_dataset_graph(input_dataset_, filename_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function cache_dataset_graph(input_dataset_, filename_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "CacheDataset") do desc = tf.NodeDescription("CacheDataset") @@ -5216,7 +5555,10 @@ begin if output_shapes !== nothing desc["output_shapes"] = map(Base.identity, output_shapes) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(cache_dataset, [input_dataset_, filename_], name=nothing, output_types=nothing, output_shapes=nothing) + tf.add_node(res[1], node) + return res[1] end function cache_dataset(input_dataset_, filename_; name=nothing, output_types=nothing, output_shapes=nothing) if tf.eager_mode @@ -5234,7 +5576,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function fake_quant_with_min_max_vars_gradient_graph(gradients_, inputs_, min_, max_; name=nothing, num_bits=nothing, narrow_range=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fake_quant_with_min_max_vars_gradient_graph(gradients_, inputs_, min_, max_; name=nothing, num_bits=nothing, narrow_range=nothing) local desc tf.with_op_name(name, "FakeQuantWithMinMaxVarsGradient") do desc = tf.NodeDescription("FakeQuantWithMinMaxVarsGradient") @@ -5272,7 +5614,10 @@ begin if narrow_range !== nothing desc["narrow_range"] = Base.Bool(narrow_range) end - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(fake_quant_with_min_max_vars_gradient, [gradients_, inputs_, min_, max_], name=nothing, num_bits=nothing, narrow_range=nothing) + tf.add_node(res[1], node) + return res end function fake_quant_with_min_max_vars_gradient(gradients_, inputs_, min_, max_; name=nothing, num_bits=nothing, narrow_range=nothing) if tf.eager_mode @@ -5290,7 +5635,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function fused_resize_and_pad_conv2d_graph(input_, size_, paddings_, filter_; name=nothing, resize_align_corners=nothing, mode=nothing, strides=nothing, padding=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fused_resize_and_pad_conv2d_graph(input_, size_, paddings_, filter_; name=nothing, resize_align_corners=nothing, mode=nothing, strides=nothing, padding=nothing) local desc tf.with_op_name(name, "FusedResizeAndPadConv2D") do desc = tf.NodeDescription("FusedResizeAndPadConv2D") @@ -5338,7 +5683,10 @@ begin end desc["T"] = tf.data_type(input_) desc["T"] = tf.data_type(filter_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(fused_resize_and_pad_conv2d, [input_, size_, paddings_, filter_], name=nothing, resize_align_corners=nothing, mode=nothing, strides=nothing, padding=nothing) + tf.add_node(res[1], node) + return res[1] end function fused_resize_and_pad_conv2d(input_, size_, paddings_, filter_; name=nothing, resize_align_corners=nothing, mode=nothing, strides=nothing, padding=nothing) if tf.eager_mode @@ -5356,7 +5704,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function batch_graph(in_tensors_; name=nothing, num_batch_threads=nothing, max_batch_size=nothing, max_enqueued_batches=nothing, batch_timeout_micros=nothing, allowed_batch_sizes=nothing, grad_timeout_micros=nothing, container=nothing, shared_name=nothing, batching_queue=nothing, T=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_graph(in_tensors_; name=nothing, num_batch_threads=nothing, max_batch_size=nothing, max_enqueued_batches=nothing, batch_timeout_micros=nothing, allowed_batch_sizes=nothing, grad_timeout_micros=nothing, container=nothing, shared_name=nothing, batching_queue=nothing, T=nothing) local desc tf.with_op_name(name, "Batch") do desc = tf.NodeDescription("Batch") @@ -5433,7 +5781,10 @@ begin if T !== nothing desc["T"] = map(Base.identity, T) end - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(batch, [in_tensors_], name=nothing, num_batch_threads=nothing, max_batch_size=nothing, max_enqueued_batches=nothing, batch_timeout_micros=nothing, allowed_batch_sizes=nothing, grad_timeout_micros=nothing, container=nothing, shared_name=nothing, batching_queue=nothing, T=nothing) + tf.add_node(res[1], node) + return res end function batch(in_tensors_; name=nothing, num_batch_threads=nothing, max_batch_size=nothing, max_enqueued_batches=nothing, batch_timeout_micros=nothing, allowed_batch_sizes=nothing, grad_timeout_micros=nothing, container=nothing, shared_name=nothing, batching_queue=nothing, T=nothing) if tf.eager_mode @@ -5451,7 +5802,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function collective_bcast_recv_graph(; name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, shape=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function collective_bcast_recv_graph(; name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, shape=nothing) local desc tf.with_op_name(name, "CollectiveBcastRecv") do desc = tf.NodeDescription("CollectiveBcastRecv") @@ -5484,7 +5835,10 @@ begin if shape !== nothing desc["shape"] = Base.identity(shape) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(collective_bcast_recv, [], name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, shape=nothing) + tf.add_node(res[1], node) + return res[1] end function collective_bcast_recv(; name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, shape=nothing) if tf.eager_mode @@ -5502,7 +5856,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function batch_to_space_nd_graph(input_, block_shape_, crops_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_to_space_nd_graph(input_, block_shape_, crops_; name=nothing) local desc tf.with_op_name(name, "BatchToSpaceND") do desc = tf.NodeDescription("BatchToSpaceND") @@ -5526,7 +5880,10 @@ begin desc["T"] = tf.data_type(input_) desc["Tblock_shape"] = tf.data_type(block_shape_) desc["Tcrops"] = tf.data_type(crops_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(batch_to_space_nd, [input_, block_shape_, crops_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function batch_to_space_nd(input_, block_shape_, crops_; name=nothing) if tf.eager_mode @@ -5544,7 +5901,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function loop_cond_graph(input_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function loop_cond_graph(input_; name=nothing) local desc tf.with_op_name(name, "LoopCond") do desc = tf.NodeDescription("LoopCond") @@ -5556,7 +5913,10 @@ begin function loop_cond_eager(input_; name=nothing) desc = tf.EagerOp("LoopCond") tf.add_input(desc, input_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(loop_cond, [input_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function loop_cond(input_; name=nothing) if tf.eager_mode @@ -5574,7 +5934,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function depth_to_space_graph(input_; name=nothing, block_size=nothing, data_format=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function depth_to_space_graph(input_; name=nothing, block_size=nothing, data_format=nothing) local desc tf.with_op_name(name, "DepthToSpace") do desc = tf.NodeDescription("DepthToSpace") @@ -5600,7 +5960,10 @@ begin desc["data_format"] = Base.String(data_format) end desc["T"] = tf.data_type(input_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(depth_to_space, [input_], name=nothing, block_size=nothing, data_format=nothing) + tf.add_node(res[1], node) + return res[1] end function depth_to_space(input_; name=nothing, block_size=nothing, data_format=nothing) if tf.eager_mode @@ -5618,7 +5981,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function destroy_temporary_variable_graph(ref_; name=nothing, var_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function destroy_temporary_variable_graph(ref_; name=nothing, var_name=nothing) local desc tf.with_op_name(name, "DestroyTemporaryVariable") do desc = tf.NodeDescription("DestroyTemporaryVariable") @@ -5638,7 +6001,10 @@ begin desc["var_name"] = Base.String(var_name) end desc["T"] = tf.data_type(ref_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(destroy_temporary_variable, [ref_], name=nothing, var_name=nothing) + tf.add_node(res[1], node) + return res[1] end function destroy_temporary_variable(ref_; name=nothing, var_name=nothing) if tf.eager_mode @@ -5656,7 +6022,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function cudnn_rnn_graph(input_, input_h_, input_c_, params_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing, is_training=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function cudnn_rnn_graph(input_, input_h_, input_c_, params_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing, is_training=nothing) local desc tf.with_op_name(name, "CudnnRNN") do desc = tf.NodeDescription("CudnnRNN") @@ -5729,7 +6095,10 @@ begin desc["T"] = tf.data_type(input_h_) desc["T"] = tf.data_type(input_c_) desc["T"] = tf.data_type(params_) - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(cudnn_rnn, [input_, input_h_, input_c_, params_], name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing, is_training=nothing) + tf.add_node(res[1], node) + return res end function cudnn_rnn(input_, input_h_, input_c_, params_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing, is_training=nothing) if tf.eager_mode @@ -5747,7 +6116,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function ref_identity_graph(input_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ref_identity_graph(input_; name=nothing) local desc tf.with_op_name(name, "RefIdentity") do desc = tf.NodeDescription("RefIdentity") @@ -5761,7 +6130,10 @@ begin desc = tf.EagerOp("RefIdentity") tf.add_input(desc, input_) desc["T"] = tf.data_type(input_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(ref_identity, [input_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function ref_identity(input_; name=nothing) if tf.eager_mode @@ -5779,7 +6151,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function max_pool3d_grad_graph(orig_input_, orig_output_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function max_pool3d_grad_graph(orig_input_, orig_output_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) local desc tf.with_op_name(name, "MaxPool3DGrad") do desc = tf.NodeDescription("MaxPool3DGrad") @@ -5826,7 +6198,10 @@ begin desc["TInput"] = tf.data_type(orig_input_) desc["TInput"] = tf.data_type(orig_output_) desc["T"] = tf.data_type(grad_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(max_pool3d_grad, [orig_input_, orig_output_, grad_], name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + tf.add_node(res[1], node) + return res[1] end function max_pool3d_grad(orig_input_, orig_output_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) if tf.eager_mode @@ -5844,7 +6219,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function load_tpu_embedding_momentum_parameters_grad_accum_debug_graph(parameters_, momenta_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function load_tpu_embedding_momentum_parameters_grad_accum_debug_graph(parameters_, momenta_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "LoadTPUEmbeddingMomentumParametersGradAccumDebug") do desc = tf.NodeDescription("LoadTPUEmbeddingMomentumParametersGradAccumDebug") @@ -5886,7 +6261,10 @@ begin if shard_id !== nothing desc["shard_id"] = Base.Int(shard_id) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(load_tpu_embedding_momentum_parameters_grad_accum_debug, [parameters_, momenta_, gradient_accumulators_], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + tf.add_node(res[1], node) + return res[1] end function load_tpu_embedding_momentum_parameters_grad_accum_debug(parameters_, momenta_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) if tf.eager_mode @@ -5904,7 +6282,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function padding_fifo_queue_v2_graph(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function padding_fifo_queue_v2_graph(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "PaddingFIFOQueueV2") do desc = tf.NodeDescription("PaddingFIFOQueueV2") @@ -5943,7 +6321,10 @@ begin if shared_name !== nothing desc["shared_name"] = Base.String(shared_name) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(padding_fifo_queue_v2, [], name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) + tf.add_node(res[1], node) + return res[1] end function padding_fifo_queue_v2(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) if tf.eager_mode @@ -5961,7 +6342,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function conv3d_backprop_input_graph(input_, filter_, out_backprop_; name=nothing, strides=nothing, padding=nothing, dilations=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function conv3d_backprop_input_graph(input_, filter_, out_backprop_; name=nothing, strides=nothing, padding=nothing, dilations=nothing) local desc tf.with_op_name(name, "Conv3DBackpropInput") do desc = tf.NodeDescription("Conv3DBackpropInput") @@ -6001,7 +6382,10 @@ begin desc["T"] = tf.data_type(input_) desc["T"] = tf.data_type(filter_) desc["T"] = tf.data_type(out_backprop_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(conv3d_backprop_input, [input_, filter_, out_backprop_], name=nothing, strides=nothing, padding=nothing, dilations=nothing) + tf.add_node(res[1], node) + return res[1] end function conv3d_backprop_input(input_, filter_, out_backprop_; name=nothing, strides=nothing, padding=nothing, dilations=nothing) if tf.eager_mode @@ -6019,7 +6403,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function ref_exit_graph(data_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ref_exit_graph(data_; name=nothing) local desc tf.with_op_name(name, "RefExit") do desc = tf.NodeDescription("RefExit") @@ -6033,7 +6417,10 @@ begin desc = tf.EagerOp("RefExit") tf.add_input(desc, data_) desc["T"] = tf.data_type(data_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(ref_exit, [data_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function ref_exit(data_; name=nothing) if tf.eager_mode @@ -6051,7 +6438,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function map_clear_graph(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function map_clear_graph(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "MapClear") do desc = tf.NodeDescription("MapClear") @@ -6090,7 +6477,10 @@ begin if shared_name !== nothing desc["shared_name"] = Base.String(shared_name) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(map_clear, [], name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + tf.add_node(res[1], node) + return res[1] end function map_clear(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) if tf.eager_mode @@ -6108,7 +6498,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function encode_wav_graph(audio_, sample_rate_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function encode_wav_graph(audio_, sample_rate_; name=nothing) local desc tf.with_op_name(name, "EncodeWav") do desc = tf.NodeDescription("EncodeWav") @@ -6123,7 +6513,10 @@ begin desc = tf.EagerOp("EncodeWav") tf.add_input(desc, audio_) tf.add_input(desc, sample_rate_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(encode_wav, [audio_, sample_rate_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function encode_wav(audio_, sample_rate_; name=nothing) if tf.eager_mode @@ -6141,7 +6534,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tensor_summary_v2_graph(tag_, tensor_, serialized_summary_metadata_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_summary_v2_graph(tag_, tensor_, serialized_summary_metadata_; name=nothing) local desc tf.with_op_name(name, "TensorSummaryV2") do desc = tf.NodeDescription("TensorSummaryV2") @@ -6161,7 +6554,10 @@ begin tf.add_input(desc, tensor_) tf.add_input(desc, serialized_summary_metadata_) desc["T"] = tf.data_type(tensor_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(tensor_summary_v2, [tag_, tensor_, serialized_summary_metadata_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function tensor_summary_v2(tag_, tensor_, serialized_summary_metadata_; name=nothing) if tf.eager_mode @@ -6179,7 +6575,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function queue_dequeue_up_to_graph(handle_, n_; name=nothing, component_types=nothing, timeout_ms=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function queue_dequeue_up_to_graph(handle_, n_; name=nothing, component_types=nothing, timeout_ms=nothing) local desc tf.with_op_name(name, "QueueDequeueUpTo") do desc = tf.NodeDescription("QueueDequeueUpTo") @@ -6206,7 +6602,10 @@ begin if timeout_ms !== nothing desc["timeout_ms"] = Base.Int(timeout_ms) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(queue_dequeue_up_to, [handle_, n_], name=nothing, component_types=nothing, timeout_ms=nothing) + tf.add_node(res[1], node) + return res[1] end function queue_dequeue_up_to(handle_, n_; name=nothing, component_types=nothing, timeout_ms=nothing) if tf.eager_mode @@ -6224,7 +6623,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function matrix_band_part_graph(input_, num_lower_, num_upper_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function matrix_band_part_graph(input_, num_lower_, num_upper_; name=nothing) local desc tf.with_op_name(name, "MatrixBandPart") do desc = tf.NodeDescription("MatrixBandPart") @@ -6247,7 +6646,10 @@ begin desc["T"] = tf.data_type(input_) desc["Tindex"] = tf.data_type(num_lower_) desc["Tindex"] = tf.data_type(num_upper_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(matrix_band_part, [input_, num_lower_, num_upper_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function matrix_band_part(input_, num_lower_, num_upper_; name=nothing) if tf.eager_mode @@ -6265,7 +6667,7 @@ end Copy Op. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function copy_graph(input_; name=nothing, tensor_name=nothing, debug_ops_spec=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function copy_graph(input_; name=nothing, tensor_name=nothing, debug_ops_spec=nothing) local desc tf.with_op_name(name, "Copy") do desc = tf.NodeDescription("Copy") @@ -6291,7 +6693,10 @@ begin desc["debug_ops_spec"] = map(Base.identity, debug_ops_spec) end desc["T"] = tf.data_type(input_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(copy, [input_], name=nothing, tensor_name=nothing, debug_ops_spec=nothing) + tf.add_node(res[1], node) + return res[1] end function copy(input_; name=nothing, tensor_name=nothing, debug_ops_spec=nothing) if tf.eager_mode @@ -6309,7 +6714,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function shape_n_graph(input_; name=nothing, N=nothing, out_type=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function shape_n_graph(input_; name=nothing, N=nothing, out_type=nothing) local desc tf.with_op_name(name, "ShapeN") do desc = tf.NodeDescription("ShapeN") @@ -6340,7 +6745,10 @@ begin desc["out_type"] = Base.identity(out_type) end desc["T"] = tf.data_type(input_) - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(shape_n, [input_], name=nothing, N=nothing, out_type=nothing) + tf.add_node(res[1], node) + return res end function shape_n(input_; name=nothing, N=nothing, out_type=nothing) if tf.eager_mode @@ -6358,7 +6766,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function experimental_parse_example_dataset_graph(input_dataset_, num_parallel_calls_, dense_defaults_; name=nothing, sparse_keys=nothing, dense_keys=nothing, sparse_types=nothing, Tdense=nothing, dense_shapes=nothing, output_types=nothing, output_shapes=nothing, sloppy=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_parse_example_dataset_graph(input_dataset_, num_parallel_calls_, dense_defaults_; name=nothing, sparse_keys=nothing, dense_keys=nothing, sparse_types=nothing, Tdense=nothing, dense_shapes=nothing, output_types=nothing, output_shapes=nothing, sloppy=nothing) local desc tf.with_op_name(name, "ExperimentalParseExampleDataset") do desc = tf.NodeDescription("ExperimentalParseExampleDataset") @@ -6424,7 +6832,10 @@ begin if sloppy !== nothing desc["sloppy"] = Base.Bool(sloppy) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(experimental_parse_example_dataset, [input_dataset_, num_parallel_calls_, dense_defaults_], name=nothing, sparse_keys=nothing, dense_keys=nothing, sparse_types=nothing, Tdense=nothing, dense_shapes=nothing, output_types=nothing, output_shapes=nothing, sloppy=nothing) + tf.add_node(res[1], node) + return res[1] end function experimental_parse_example_dataset(input_dataset_, num_parallel_calls_, dense_defaults_; name=nothing, sparse_keys=nothing, dense_keys=nothing, sparse_types=nothing, Tdense=nothing, dense_shapes=nothing, output_types=nothing, output_shapes=nothing, sloppy=nothing) if tf.eager_mode @@ -6442,7 +6853,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function concat_graph(concat_dim_, values_; name=nothing, N=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function concat_graph(concat_dim_, values_; name=nothing, N=nothing) local desc tf.with_op_name(name, "Concat") do desc = tf.NodeDescription("Concat") @@ -6465,7 +6876,10 @@ begin desc["N"] = Base.Int(N) end desc["T"] = tf.data_type(values_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(concat, [concat_dim_, values_], name=nothing, N=nothing) + tf.add_node(res[1], node) + return res[1] end function concat(concat_dim_, values_; name=nothing, N=nothing) if tf.eager_mode @@ -6483,7 +6897,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function data_format_dim_map_graph(x_; name=nothing, src_format=nothing, dst_format=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function data_format_dim_map_graph(x_; name=nothing, src_format=nothing, dst_format=nothing) local desc tf.with_op_name(name, "DataFormatDimMap") do desc = tf.NodeDescription("DataFormatDimMap") @@ -6509,7 +6923,10 @@ begin desc["dst_format"] = Base.String(dst_format) end desc["T"] = tf.data_type(x_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(data_format_dim_map, [x_], name=nothing, src_format=nothing, dst_format=nothing) + tf.add_node(res[1], node) + return res[1] end function data_format_dim_map(x_; name=nothing, src_format=nothing, dst_format=nothing) if tf.eager_mode @@ -6527,7 +6944,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function identity_reader_graph(; name=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function identity_reader_graph(; name=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "IdentityReader") do desc = tf.NodeDescription("IdentityReader") @@ -6548,7 +6965,10 @@ begin if shared_name !== nothing desc["shared_name"] = Base.String(shared_name) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(identity_reader, [], name=nothing, container=nothing, shared_name=nothing) + tf.add_node(res[1], node) + return res[1] end function identity_reader(; name=nothing, container=nothing, shared_name=nothing) if tf.eager_mode @@ -6566,7 +6986,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function softplus_graph(features_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function softplus_graph(features_; name=nothing) local desc tf.with_op_name(name, "Softplus") do desc = tf.NodeDescription("Softplus") @@ -6580,7 +7000,10 @@ begin desc = tf.EagerOp("Softplus") tf.add_input(desc, features_) desc["T"] = tf.data_type(features_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(softplus, [features_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function softplus(features_; name=nothing) if tf.eager_mode @@ -6598,7 +7021,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function resource_sparse_apply_proximal_adagrad_graph(var_, accum_, lr_, l1_, l2_, grad_, indices_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_sparse_apply_proximal_adagrad_graph(var_, accum_, lr_, l1_, l2_, grad_, indices_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ResourceSparseApplyProximalAdagrad") do desc = tf.NodeDescription("ResourceSparseApplyProximalAdagrad") @@ -6642,7 +7065,10 @@ begin desc["T"] = tf.data_type(l2_) desc["T"] = tf.data_type(grad_) desc["Tindices"] = tf.data_type(indices_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(resource_sparse_apply_proximal_adagrad, [var_, accum_, lr_, l1_, l2_, grad_, indices_], name=nothing, use_locking=nothing) + tf.add_node(res[1], node) + return res[1] end function resource_sparse_apply_proximal_adagrad(var_, accum_, lr_, l1_, l2_, grad_, indices_; name=nothing, use_locking=nothing) if tf.eager_mode @@ -6660,7 +7086,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function parse_single_sequence_example_graph(serialized_, feature_list_dense_missing_assumed_empty_, context_sparse_keys_, context_dense_keys_, feature_list_sparse_keys_, feature_list_dense_keys_, context_dense_defaults_, debug_name_; name=nothing, Ncontext_sparse=nothing, Ncontext_dense=nothing, Nfeature_list_sparse=nothing, Nfeature_list_dense=nothing, context_sparse_types=nothing, Tcontext_dense=nothing, feature_list_dense_types=nothing, context_dense_shapes=nothing, feature_list_sparse_types=nothing, feature_list_dense_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function parse_single_sequence_example_graph(serialized_, feature_list_dense_missing_assumed_empty_, context_sparse_keys_, context_dense_keys_, feature_list_sparse_keys_, feature_list_dense_keys_, context_dense_defaults_, debug_name_; name=nothing, Ncontext_sparse=nothing, Ncontext_dense=nothing, Nfeature_list_sparse=nothing, Nfeature_list_dense=nothing, context_sparse_types=nothing, Tcontext_dense=nothing, feature_list_dense_types=nothing, context_dense_shapes=nothing, feature_list_sparse_types=nothing, feature_list_dense_shapes=nothing) local desc tf.with_op_name(name, "ParseSingleSequenceExample") do desc = tf.NodeDescription("ParseSingleSequenceExample") @@ -6758,7 +7184,10 @@ begin if feature_list_dense_shapes !== nothing desc["feature_list_dense_shapes"] = map(Base.identity, feature_list_dense_shapes) end - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(parse_single_sequence_example, [serialized_, feature_list_dense_missing_assumed_empty_, context_sparse_keys_, context_dense_keys_, feature_list_sparse_keys_, feature_list_dense_keys_, context_dense_defaults_, debug_name_], name=nothing, Ncontext_sparse=nothing, Ncontext_dense=nothing, Nfeature_list_sparse=nothing, Nfeature_list_dense=nothing, context_sparse_types=nothing, Tcontext_dense=nothing, feature_list_dense_types=nothing, context_dense_shapes=nothing, feature_list_sparse_types=nothing, feature_list_dense_shapes=nothing) + tf.add_node(res[1], node) + return res end function parse_single_sequence_example(serialized_, feature_list_dense_missing_assumed_empty_, context_sparse_keys_, context_dense_keys_, feature_list_sparse_keys_, feature_list_dense_keys_, context_dense_defaults_, debug_name_; name=nothing, Ncontext_sparse=nothing, Ncontext_dense=nothing, Nfeature_list_sparse=nothing, Nfeature_list_dense=nothing, context_sparse_types=nothing, Tcontext_dense=nothing, feature_list_dense_types=nothing, context_dense_shapes=nothing, feature_list_sparse_types=nothing, feature_list_dense_shapes=nothing) if tf.eager_mode @@ -6776,7 +7205,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function matrix_diag_graph(diagonal_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function matrix_diag_graph(diagonal_; name=nothing) local desc tf.with_op_name(name, "MatrixDiag") do desc = tf.NodeDescription("MatrixDiag") @@ -6790,7 +7219,10 @@ begin desc = tf.EagerOp("MatrixDiag") tf.add_input(desc, diagonal_) desc["T"] = tf.data_type(diagonal_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(matrix_diag, [diagonal_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function matrix_diag(diagonal_; name=nothing) if tf.eager_mode @@ -6808,7 +7240,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function fact_graph(; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fact_graph(; name=nothing) local desc tf.with_op_name(name, "Fact") do desc @@ -6818,7 +7250,10 @@ begin end function fact_eager(; name=nothing) desc = tf.EagerOp("Fact") - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(fact, [], name=nothing) + tf.add_node(res[1], node) + return res[1] end function fact(; name=nothing) if tf.eager_mode @@ -6836,7 +7271,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function shard_dataset_graph(input_dataset_, num_shards_, index_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function shard_dataset_graph(input_dataset_, num_shards_, index_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "ShardDataset") do desc = tf.NodeDescription("ShardDataset") @@ -6866,7 +7301,10 @@ begin if output_shapes !== nothing desc["output_shapes"] = map(Base.identity, output_shapes) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(shard_dataset, [input_dataset_, num_shards_, index_], name=nothing, output_types=nothing, output_shapes=nothing) + tf.add_node(res[1], node) + return res[1] end function shard_dataset(input_dataset_, num_shards_, index_; name=nothing, output_types=nothing, output_shapes=nothing) if tf.eager_mode @@ -6884,7 +7322,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function max_pool_grad_grad_graph(orig_input_, orig_output_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function max_pool_grad_grad_graph(orig_input_, orig_output_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) local desc tf.with_op_name(name, "MaxPoolGradGrad") do desc = tf.NodeDescription("MaxPoolGradGrad") @@ -6930,7 +7368,10 @@ begin desc["T"] = tf.data_type(orig_input_) desc["T"] = tf.data_type(orig_output_) desc["T"] = tf.data_type(grad_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(max_pool_grad_grad, [orig_input_, orig_output_, grad_], name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + tf.add_node(res[1], node) + return res[1] end function max_pool_grad_grad(orig_input_, orig_output_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) if tf.eager_mode @@ -6948,7 +7389,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function resize_bilinear_grad_graph(grads_, original_image_; name=nothing, align_corners=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resize_bilinear_grad_graph(grads_, original_image_; name=nothing, align_corners=nothing) local desc tf.with_op_name(name, "ResizeBilinearGrad") do desc = tf.NodeDescription("ResizeBilinearGrad") @@ -6971,7 +7412,10 @@ begin desc["align_corners"] = Base.Bool(align_corners) end desc["T"] = tf.data_type(original_image_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(resize_bilinear_grad, [grads_, original_image_], name=nothing, align_corners=nothing) + tf.add_node(res[1], node) + return res[1] end function resize_bilinear_grad(grads_, original_image_; name=nothing, align_corners=nothing) if tf.eager_mode @@ -6989,7 +7433,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function batch_to_space_graph(input_, crops_; name=nothing, block_size=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_to_space_graph(input_, crops_; name=nothing, block_size=nothing) local desc tf.with_op_name(name, "BatchToSpace") do desc = tf.NodeDescription("BatchToSpace") @@ -7015,7 +7459,10 @@ begin end desc["T"] = tf.data_type(input_) desc["Tidx"] = tf.data_type(crops_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(batch_to_space, [input_, crops_], name=nothing, block_size=nothing) + tf.add_node(res[1], node) + return res[1] end function batch_to_space(input_, crops_; name=nothing, block_size=nothing) if tf.eager_mode @@ -7033,7 +7480,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function optional_from_value_graph(components_; name=nothing, Toutput_types=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function optional_from_value_graph(components_; name=nothing, Toutput_types=nothing) local desc tf.with_op_name(name, "OptionalFromValue") do desc = tf.NodeDescription("OptionalFromValue") @@ -7051,7 +7498,10 @@ begin if Toutput_types !== nothing desc["Toutput_types"] = map(Base.identity, Toutput_types) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(optional_from_value, [components_], name=nothing, Toutput_types=nothing) + tf.add_node(res[1], node) + return res[1] end function optional_from_value(components_; name=nothing, Toutput_types=nothing) if tf.eager_mode @@ -7069,7 +7519,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function xlogy_graph(x_, y_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function xlogy_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "Xlogy") do desc = tf.NodeDescription("Xlogy") @@ -7087,7 +7537,10 @@ begin tf.add_input(desc, y_) desc["T"] = tf.data_type(x_) desc["T"] = tf.data_type(y_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(xlogy, [x_, y_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function xlogy(x_, y_; name=nothing) if tf.eager_mode @@ -7105,7 +7558,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function cross_graph(a_, b_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function cross_graph(a_, b_; name=nothing) local desc tf.with_op_name(name, "Cross") do desc = tf.NodeDescription("Cross") @@ -7123,7 +7576,10 @@ begin tf.add_input(desc, b_) desc["T"] = tf.data_type(a_) desc["T"] = tf.data_type(b_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(cross, [a_, b_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function cross(a_, b_; name=nothing) if tf.eager_mode @@ -7141,7 +7597,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function bitwise_and_graph(x_, y_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function bitwise_and_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "BitwiseAnd") do desc = tf.NodeDescription("BitwiseAnd") @@ -7159,7 +7615,10 @@ begin tf.add_input(desc, y_) desc["T"] = tf.data_type(x_) desc["T"] = tf.data_type(y_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(bitwise_and, [x_, y_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function bitwise_and(x_, y_; name=nothing) if tf.eager_mode @@ -7177,7 +7636,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function broadcast_to_graph(input_, shape_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function broadcast_to_graph(input_, shape_; name=nothing) local desc tf.with_op_name(name, "BroadcastTo") do desc = tf.NodeDescription("BroadcastTo") @@ -7196,7 +7655,10 @@ begin tf.add_input(desc, shape_) desc["T"] = tf.data_type(input_) desc["Tidx"] = tf.data_type(shape_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(broadcast_to, [input_, shape_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function broadcast_to(input_, shape_; name=nothing) if tf.eager_mode @@ -7214,7 +7676,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function elu_grad_graph(gradients_, outputs_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function elu_grad_graph(gradients_, outputs_; name=nothing) local desc tf.with_op_name(name, "EluGrad") do desc = tf.NodeDescription("EluGrad") @@ -7232,7 +7694,10 @@ begin tf.add_input(desc, outputs_) desc["T"] = tf.data_type(gradients_) desc["T"] = tf.data_type(outputs_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(elu_grad, [gradients_, outputs_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function elu_grad(gradients_, outputs_; name=nothing) if tf.eager_mode @@ -7250,7 +7715,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function cudnn_rnn_backprop_graph(input_, input_h_, input_c_, params_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function cudnn_rnn_backprop_graph(input_, input_h_, input_c_, params_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) local desc tf.with_op_name(name, "CudnnRNNBackprop") do desc = tf.NodeDescription("CudnnRNNBackprop") @@ -7345,7 +7810,10 @@ begin desc["T"] = tf.data_type(output_h_backprop_) desc["T"] = tf.data_type(output_c_backprop_) desc["T"] = tf.data_type(reserve_space_) - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(cudnn_rnn_backprop, [input_, input_h_, input_c_, params_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_], name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) + tf.add_node(res[1], node) + return res end function cudnn_rnn_backprop(input_, input_h_, input_c_, params_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) if tf.eager_mode @@ -7363,7 +7831,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function string_to_hash_bucket_fast_graph(input_; name=nothing, num_buckets=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function string_to_hash_bucket_fast_graph(input_; name=nothing, num_buckets=nothing) local desc tf.with_op_name(name, "StringToHashBucketFast") do desc = tf.NodeDescription("StringToHashBucketFast") @@ -7381,7 +7849,10 @@ begin if num_buckets !== nothing desc["num_buckets"] = Base.Int(num_buckets) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(string_to_hash_bucket_fast, [input_], name=nothing, num_buckets=nothing) + tf.add_node(res[1], node) + return res[1] end function string_to_hash_bucket_fast(input_; name=nothing, num_buckets=nothing) if tf.eager_mode @@ -7399,7 +7870,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function mutable_hash_table_graph(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function mutable_hash_table_graph(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing) local desc tf.with_op_name(name, "MutableHashTable") do desc = tf.NodeDescription("MutableHashTable") @@ -7438,7 +7909,10 @@ begin if value_dtype !== nothing desc["value_dtype"] = Base.identity(value_dtype) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(mutable_hash_table, [], name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing) + tf.add_node(res[1], node) + return res[1] end function mutable_hash_table(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing) if tf.eager_mode @@ -7456,7 +7930,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function relu_graph(features_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function relu_graph(features_; name=nothing) local desc tf.with_op_name(name, "Relu") do desc = tf.NodeDescription("Relu") @@ -7470,7 +7944,10 @@ begin desc = tf.EagerOp("Relu") tf.add_input(desc, features_) desc["T"] = tf.data_type(features_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(relu, [features_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function relu(features_; name=nothing) if tf.eager_mode @@ -7488,7 +7965,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function nth_element_graph(input_, n_; name=nothing, reverse=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function nth_element_graph(input_, n_; name=nothing, reverse=nothing) local desc tf.with_op_name(name, "NthElement") do desc = tf.NodeDescription("NthElement") @@ -7511,7 +7988,10 @@ begin desc["reverse"] = Base.Bool(reverse) end desc["T"] = tf.data_type(input_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(nth_element, [input_, n_], name=nothing, reverse=nothing) + tf.add_node(res[1], node) + return res[1] end function nth_element(input_, n_; name=nothing, reverse=nothing) if tf.eager_mode @@ -7529,7 +8009,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function softsign_graph(features_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function softsign_graph(features_; name=nothing) local desc tf.with_op_name(name, "Softsign") do desc = tf.NodeDescription("Softsign") @@ -7543,7 +8023,10 @@ begin desc = tf.EagerOp("Softsign") tf.add_input(desc, features_) desc["T"] = tf.data_type(features_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(softsign, [features_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function softsign(features_; name=nothing) if tf.eager_mode @@ -7561,7 +8044,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function mutable_dense_hash_table_graph(empty_key_; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing, initial_num_buckets=nothing, max_load_factor=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function mutable_dense_hash_table_graph(empty_key_; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing, initial_num_buckets=nothing, max_load_factor=nothing) local desc tf.with_op_name(name, "MutableDenseHashTable") do desc = tf.NodeDescription("MutableDenseHashTable") @@ -7623,7 +8106,10 @@ begin desc["max_load_factor"] = Base.identity(max_load_factor) end desc["key_dtype"] = tf.data_type(empty_key_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(mutable_dense_hash_table, [empty_key_], name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing, initial_num_buckets=nothing, max_load_factor=nothing) + tf.add_node(res[1], node) + return res[1] end function mutable_dense_hash_table(empty_key_; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing, initial_num_buckets=nothing, max_load_factor=nothing) if tf.eager_mode @@ -7641,7 +8127,7 @@ end An op that shuts down a running distributed TPU system. The Op returns """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function _shutdown_distributed_tpu_graph(; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _shutdown_distributed_tpu_graph(; name=nothing) local desc tf.with_op_name(name, "_ShutdownDistributedTPU") do desc @@ -7651,7 +8137,10 @@ begin end function _shutdown_distributed_tpu_eager(; name=nothing) desc = tf.EagerOp("_ShutdownDistributedTPU") - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(_shutdown_distributed_tpu, [], name=nothing) + tf.add_node(res[1], node) + return res[1] end function _shutdown_distributed_tpu(; name=nothing) if tf.eager_mode @@ -7669,7 +8158,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function polygamma_graph(a_, x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function polygamma_graph(a_, x_; name=nothing) local desc tf.with_op_name(name, "Polygamma") do desc = tf.NodeDescription("Polygamma") @@ -7687,7 +8176,10 @@ begin tf.add_input(desc, x_) desc["T"] = tf.data_type(a_) desc["T"] = tf.data_type(x_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(polygamma, [a_, x_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function polygamma(a_, x_; name=nothing) if tf.eager_mode @@ -7705,7 +8197,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function nccl_reduce_graph(input_; name=nothing, reduction=nothing, num_devices=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function nccl_reduce_graph(input_; name=nothing, reduction=nothing, num_devices=nothing) local desc tf.with_op_name(name, "NcclReduce") do desc = tf.NodeDescription("NcclReduce") @@ -7731,7 +8223,10 @@ begin desc["num_devices"] = Base.Int(num_devices) end desc["T"] = tf.data_type(input_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(nccl_reduce, [input_], name=nothing, reduction=nothing, num_devices=nothing) + tf.add_node(res[1], node) + return res[1] end function nccl_reduce(input_; name=nothing, reduction=nothing, num_devices=nothing) if tf.eager_mode @@ -7749,7 +8244,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function arg_max_graph(input_, dimension_; name=nothing, output_type=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function arg_max_graph(input_, dimension_; name=nothing, output_type=nothing) local desc tf.with_op_name(name, "ArgMax") do desc = tf.NodeDescription("ArgMax") @@ -7775,7 +8270,10 @@ begin end desc["T"] = tf.data_type(input_) desc["Tidx"] = tf.data_type(dimension_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(arg_max, [input_, dimension_], name=nothing, output_type=nothing) + tf.add_node(res[1], node) + return res[1] end function arg_max(input_, dimension_; name=nothing, output_type=nothing) if tf.eager_mode @@ -7793,7 +8291,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function matrix_set_diag_graph(input_, diagonal_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function matrix_set_diag_graph(input_, diagonal_; name=nothing) local desc tf.with_op_name(name, "MatrixSetDiag") do desc = tf.NodeDescription("MatrixSetDiag") @@ -7811,7 +8309,10 @@ begin tf.add_input(desc, diagonal_) desc["T"] = tf.data_type(input_) desc["T"] = tf.data_type(diagonal_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(matrix_set_diag, [input_, diagonal_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function matrix_set_diag(input_, diagonal_; name=nothing) if tf.eager_mode @@ -7829,7 +8330,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function space_to_batch_nd_graph(input_, block_shape_, paddings_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function space_to_batch_nd_graph(input_, block_shape_, paddings_; name=nothing) local desc tf.with_op_name(name, "SpaceToBatchND") do desc = tf.NodeDescription("SpaceToBatchND") @@ -7853,7 +8354,10 @@ begin desc["T"] = tf.data_type(input_) desc["Tblock_shape"] = tf.data_type(block_shape_) desc["Tpaddings"] = tf.data_type(paddings_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(space_to_batch_nd, [input_, block_shape_, paddings_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function space_to_batch_nd(input_, block_shape_, paddings_; name=nothing) if tf.eager_mode @@ -7871,7 +8375,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function sparse_reshape_graph(input_indices_, input_shape_, new_shape_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_reshape_graph(input_indices_, input_shape_, new_shape_; name=nothing) local desc tf.with_op_name(name, "SparseReshape") do desc = tf.NodeDescription("SparseReshape") @@ -7894,7 +8398,10 @@ begin tf.add_input(desc, input_indices_) tf.add_input(desc, input_shape_) tf.add_input(desc, new_shape_) - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(sparse_reshape, [input_indices_, input_shape_, new_shape_], name=nothing) + tf.add_node(res[1], node) + return res end function sparse_reshape(input_indices_, input_shape_, new_shape_; name=nothing) if tf.eager_mode @@ -7912,7 +8419,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function optimize_dataset_graph(input_dataset_, optimizations_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function optimize_dataset_graph(input_dataset_, optimizations_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "OptimizeDataset") do desc = tf.NodeDescription("OptimizeDataset") @@ -7939,7 +8446,10 @@ begin if output_shapes !== nothing desc["output_shapes"] = map(Base.identity, output_shapes) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(optimize_dataset, [input_dataset_, optimizations_], name=nothing, output_types=nothing, output_shapes=nothing) + tf.add_node(res[1], node) + return res[1] end function optimize_dataset(input_dataset_, optimizations_; name=nothing, output_types=nothing, output_shapes=nothing) if tf.eager_mode @@ -7957,7 +8467,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function concat_v2_graph(values_, axis_; name=nothing, N=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function concat_v2_graph(values_, axis_; name=nothing, N=nothing) local desc tf.with_op_name(name, "ConcatV2") do desc = tf.NodeDescription("ConcatV2") @@ -7983,7 +8493,10 @@ begin end desc["T"] = tf.data_type(values_) desc["Tidx"] = tf.data_type(axis_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(concat_v2, [values_, axis_], name=nothing, N=nothing) + tf.add_node(res[1], node) + return res[1] end function concat_v2(values_, axis_; name=nothing, N=nothing) if tf.eager_mode @@ -8001,7 +8514,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function resource_sparse_apply_adadelta_graph(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_sparse_apply_adadelta_graph(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ResourceSparseApplyAdadelta") do desc = tf.NodeDescription("ResourceSparseApplyAdadelta") @@ -8048,7 +8561,10 @@ begin desc["T"] = tf.data_type(epsilon_) desc["T"] = tf.data_type(grad_) desc["Tindices"] = tf.data_type(indices_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(resource_sparse_apply_adadelta, [var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_, indices_], name=nothing, use_locking=nothing) + tf.add_node(res[1], node) + return res[1] end function resource_sparse_apply_adadelta(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) if tf.eager_mode @@ -8066,7 +8582,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tile_graph(input_, multiples_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tile_graph(input_, multiples_; name=nothing) local desc tf.with_op_name(name, "Tile") do desc = tf.NodeDescription("Tile") @@ -8085,7 +8601,10 @@ begin tf.add_input(desc, multiples_) desc["T"] = tf.data_type(input_) desc["Tmultiples"] = tf.data_type(multiples_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(tile, [input_, multiples_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function tile(input_, multiples_; name=nothing) if tf.eager_mode @@ -8103,7 +8622,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function mutex_v2_graph(; name=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function mutex_v2_graph(; name=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "MutexV2") do desc = tf.NodeDescription("MutexV2") @@ -8124,7 +8643,10 @@ begin if shared_name !== nothing desc["shared_name"] = Base.String(shared_name) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(mutex_v2, [], name=nothing, container=nothing, shared_name=nothing) + tf.add_node(res[1], node) + return res[1] end function mutex_v2(; name=nothing, container=nothing, shared_name=nothing) if tf.eager_mode @@ -8142,7 +8664,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function serialize_many_sparse_graph(sparse_indices_, sparse_values_, sparse_shape_; name=nothing, out_type=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function serialize_many_sparse_graph(sparse_indices_, sparse_values_, sparse_shape_; name=nothing, out_type=nothing) local desc tf.with_op_name(name, "SerializeManySparse") do desc = tf.NodeDescription("SerializeManySparse") @@ -8168,7 +8690,10 @@ begin desc["out_type"] = Base.identity(out_type) end desc["T"] = tf.data_type(sparse_values_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(serialize_many_sparse, [sparse_indices_, sparse_values_, sparse_shape_], name=nothing, out_type=nothing) + tf.add_node(res[1], node) + return res[1] end function serialize_many_sparse(sparse_indices_, sparse_values_, sparse_shape_; name=nothing, out_type=nothing) if tf.eager_mode @@ -8186,7 +8711,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tpu_embedding_activations_graph(embedding_variable_, sliced_activations_; name=nothing, table_id=nothing, lookup_id=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tpu_embedding_activations_graph(embedding_variable_, sliced_activations_; name=nothing, table_id=nothing, lookup_id=nothing) local desc tf.with_op_name(name, "TPUEmbeddingActivations") do desc = tf.NodeDescription("TPUEmbeddingActivations") @@ -8213,7 +8738,10 @@ begin if lookup_id !== nothing desc["lookup_id"] = Base.Int(lookup_id) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(tpu_embedding_activations, [embedding_variable_, sliced_activations_], name=nothing, table_id=nothing, lookup_id=nothing) + tf.add_node(res[1], node) + return res[1] end function tpu_embedding_activations(embedding_variable_, sliced_activations_; name=nothing, table_id=nothing, lookup_id=nothing) if tf.eager_mode @@ -8231,7 +8759,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function batch_matrix_solve_ls_graph(matrix_, rhs_, l2_regularizer_; name=nothing, fast=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_matrix_solve_ls_graph(matrix_, rhs_, l2_regularizer_; name=nothing, fast=nothing) local desc tf.with_op_name(name, "BatchMatrixSolveLs") do desc = tf.NodeDescription("BatchMatrixSolveLs") @@ -8258,7 +8786,10 @@ begin end desc["T"] = tf.data_type(matrix_) desc["T"] = tf.data_type(rhs_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(batch_matrix_solve_ls, [matrix_, rhs_, l2_regularizer_], name=nothing, fast=nothing) + tf.add_node(res[1], node) + return res[1] end function batch_matrix_solve_ls(matrix_, rhs_, l2_regularizer_; name=nothing, fast=nothing) if tf.eager_mode @@ -8276,7 +8807,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function not_equal_graph(x_, y_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function not_equal_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "NotEqual") do desc = tf.NodeDescription("NotEqual") @@ -8294,7 +8825,10 @@ begin tf.add_input(desc, y_) desc["T"] = tf.data_type(x_) desc["T"] = tf.data_type(y_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(not_equal, [x_, y_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function not_equal(x_, y_; name=nothing) if tf.eager_mode @@ -8312,7 +8846,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function lgamma_graph(x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function lgamma_graph(x_; name=nothing) local desc tf.with_op_name(name, "Lgamma") do desc = tf.NodeDescription("Lgamma") @@ -8326,7 +8860,10 @@ begin desc = tf.EagerOp("Lgamma") tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(lgamma, [x_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function lgamma(x_; name=nothing) if tf.eager_mode @@ -8344,7 +8881,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tpu_replicate_metadata_graph(; name=nothing, num_replicas=nothing, num_cores_per_replica=nothing, topology=nothing, use_tpu=nothing, device_assignment=nothing, computation_shape=nothing, host_compute_core=nothing, padding_map=nothing, step_marker_location=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tpu_replicate_metadata_graph(; name=nothing, num_replicas=nothing, num_cores_per_replica=nothing, topology=nothing, use_tpu=nothing, device_assignment=nothing, computation_shape=nothing, host_compute_core=nothing, padding_map=nothing, step_marker_location=nothing) local desc tf.with_op_name(name, "TPUReplicateMetadata") do desc = tf.NodeDescription("TPUReplicateMetadata") @@ -8407,7 +8944,10 @@ begin if step_marker_location !== nothing desc["step_marker_location"] = Base.String(step_marker_location) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(tpu_replicate_metadata, [], name=nothing, num_replicas=nothing, num_cores_per_replica=nothing, topology=nothing, use_tpu=nothing, device_assignment=nothing, computation_shape=nothing, host_compute_core=nothing, padding_map=nothing, step_marker_location=nothing) + tf.add_node(res[1], node) + return res[1] end function tpu_replicate_metadata(; name=nothing, num_replicas=nothing, num_cores_per_replica=nothing, topology=nothing, use_tpu=nothing, device_assignment=nothing, computation_shape=nothing, host_compute_core=nothing, padding_map=nothing, step_marker_location=nothing) if tf.eager_mode @@ -8425,7 +8965,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function experimental_thread_pool_handle_graph(; name=nothing, num_threads=nothing, max_intra_op_parallelism=nothing, display_name=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_thread_pool_handle_graph(; name=nothing, num_threads=nothing, max_intra_op_parallelism=nothing, display_name=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "ExperimentalThreadPoolHandle") do desc = tf.NodeDescription("ExperimentalThreadPoolHandle") @@ -8464,7 +9004,10 @@ begin if shared_name !== nothing desc["shared_name"] = Base.String(shared_name) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(experimental_thread_pool_handle, [], name=nothing, num_threads=nothing, max_intra_op_parallelism=nothing, display_name=nothing, container=nothing, shared_name=nothing) + tf.add_node(res[1], node) + return res[1] end function experimental_thread_pool_handle(; name=nothing, num_threads=nothing, max_intra_op_parallelism=nothing, display_name=nothing, container=nothing, shared_name=nothing) if tf.eager_mode @@ -8482,7 +9025,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function self_adjoint_eig_graph(input_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function self_adjoint_eig_graph(input_; name=nothing) local desc tf.with_op_name(name, "SelfAdjointEig") do desc = tf.NodeDescription("SelfAdjointEig") @@ -8496,7 +9039,10 @@ begin desc = tf.EagerOp("SelfAdjointEig") tf.add_input(desc, input_) desc["T"] = tf.data_type(input_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(self_adjoint_eig, [input_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function self_adjoint_eig(input_; name=nothing) if tf.eager_mode @@ -8514,7 +9060,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function boosted_trees_quantile_stream_resource_get_bucket_boundaries_graph(quantile_stream_resource_handle_; name=nothing, num_features=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function boosted_trees_quantile_stream_resource_get_bucket_boundaries_graph(quantile_stream_resource_handle_; name=nothing, num_features=nothing) local desc tf.with_op_name(name, "BoostedTreesQuantileStreamResourceGetBucketBoundaries") do desc = tf.NodeDescription("BoostedTreesQuantileStreamResourceGetBucketBoundaries") @@ -8537,7 +9083,10 @@ begin if num_features !== nothing desc["num_features"] = Base.Int(num_features) end - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(boosted_trees_quantile_stream_resource_get_bucket_boundaries, [quantile_stream_resource_handle_], name=nothing, num_features=nothing) + tf.add_node(res[1], node) + return res end function boosted_trees_quantile_stream_resource_get_bucket_boundaries(quantile_stream_resource_handle_; name=nothing, num_features=nothing) if tf.eager_mode @@ -8555,7 +9104,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function sparse_dense_cwise_div_graph(sp_indices_, sp_values_, sp_shape_, dense_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_dense_cwise_div_graph(sp_indices_, sp_values_, sp_shape_, dense_; name=nothing) local desc tf.with_op_name(name, "SparseDenseCwiseDiv") do desc = tf.NodeDescription("SparseDenseCwiseDiv") @@ -8579,7 +9128,10 @@ begin tf.add_input(desc, dense_) desc["T"] = tf.data_type(sp_values_) desc["T"] = tf.data_type(dense_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(sparse_dense_cwise_div, [sp_indices_, sp_values_, sp_shape_, dense_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function sparse_dense_cwise_div(sp_indices_, sp_values_, sp_shape_, dense_; name=nothing) if tf.eager_mode @@ -8597,7 +9149,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function acos_graph(x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function acos_graph(x_; name=nothing) local desc tf.with_op_name(name, "Acos") do desc = tf.NodeDescription("Acos") @@ -8611,7 +9163,10 @@ begin desc = tf.EagerOp("Acos") tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(acos, [x_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function acos(x_; name=nothing) if tf.eager_mode @@ -8629,7 +9184,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function all_graph(input_, reduction_indices_; name=nothing, keep_dims=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function all_graph(input_, reduction_indices_; name=nothing, keep_dims=nothing) local desc tf.with_op_name(name, "All") do desc = tf.NodeDescription("All") @@ -8653,7 +9208,10 @@ begin desc["keep_dims"] = Base.Bool(keep_dims) end desc["Tidx"] = tf.data_type(reduction_indices_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(all, [input_, reduction_indices_], name=nothing, keep_dims=nothing) + tf.add_node(res[1], node) + return res[1] end function all(input_, reduction_indices_; name=nothing, keep_dims=nothing) if tf.eager_mode @@ -8671,7 +9229,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function compare_and_bitpack_graph(input_, threshold_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function compare_and_bitpack_graph(input_, threshold_; name=nothing) local desc tf.with_op_name(name, "CompareAndBitpack") do desc = tf.NodeDescription("CompareAndBitpack") @@ -8689,7 +9247,10 @@ begin tf.add_input(desc, threshold_) desc["T"] = tf.data_type(input_) desc["T"] = tf.data_type(threshold_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(compare_and_bitpack, [input_, threshold_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function compare_and_bitpack(input_, threshold_; name=nothing) if tf.eager_mode @@ -8707,7 +9268,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function var_handle_op_graph(; name=nothing, container=nothing, shared_name=nothing, dtype=nothing, shape=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function var_handle_op_graph(; name=nothing, container=nothing, shared_name=nothing, dtype=nothing, shape=nothing) local desc tf.with_op_name(name, "VarHandleOp") do desc = tf.NodeDescription("VarHandleOp") @@ -8740,7 +9301,10 @@ begin if shape !== nothing desc["shape"] = Base.identity(shape) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(var_handle_op, [], name=nothing, container=nothing, shared_name=nothing, dtype=nothing, shape=nothing) + tf.add_node(res[1], node) + return res[1] end function var_handle_op(; name=nothing, container=nothing, shared_name=nothing, dtype=nothing, shape=nothing) if tf.eager_mode @@ -8758,7 +9322,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function experimental_unique_dataset_graph(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_unique_dataset_graph(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "ExperimentalUniqueDataset") do desc = tf.NodeDescription("ExperimentalUniqueDataset") @@ -8782,7 +9346,10 @@ begin if output_shapes !== nothing desc["output_shapes"] = map(Base.identity, output_shapes) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(experimental_unique_dataset, [input_dataset_], name=nothing, output_types=nothing, output_shapes=nothing) + tf.add_node(res[1], node) + return res[1] end function experimental_unique_dataset(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) if tf.eager_mode @@ -8800,7 +9367,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function quantized_conv2d_with_bias_sum_and_relu_graph(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_, summand_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantized_conv2d_with_bias_sum_and_relu_graph(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_, summand_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) local desc tf.with_op_name(name, "QuantizedConv2DWithBiasSumAndRelu") do desc = tf.NodeDescription("QuantizedConv2DWithBiasSumAndRelu") @@ -8866,7 +9433,10 @@ begin end desc["Tinput"] = tf.data_type(input_) desc["Tfilter"] = tf.data_type(filter_) - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(quantized_conv2d_with_bias_sum_and_relu, [input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_, summand_], name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) + tf.add_node(res[1], node) + return res end function quantized_conv2d_with_bias_sum_and_relu(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_, summand_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) if tf.eager_mode @@ -8884,7 +9454,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function list_diff_graph(x_, y_; name=nothing, out_idx=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function list_diff_graph(x_, y_; name=nothing, out_idx=nothing) local desc tf.with_op_name(name, "ListDiff") do desc = tf.NodeDescription("ListDiff") @@ -8913,7 +9483,10 @@ begin end desc["T"] = tf.data_type(x_) desc["T"] = tf.data_type(y_) - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(list_diff, [x_, y_], name=nothing, out_idx=nothing) + tf.add_node(res[1], node) + return res end function list_diff(x_, y_; name=nothing, out_idx=nothing) if tf.eager_mode @@ -8931,7 +9504,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function create_summary_file_writer_graph(writer_, logdir_, max_queue_, flush_millis_, filename_suffix_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function create_summary_file_writer_graph(writer_, logdir_, max_queue_, flush_millis_, filename_suffix_; name=nothing) local desc tf.with_op_name(name, "CreateSummaryFileWriter") do desc = tf.NodeDescription("CreateSummaryFileWriter") @@ -8955,7 +9528,10 @@ begin tf.add_input(desc, max_queue_) tf.add_input(desc, flush_millis_) tf.add_input(desc, filename_suffix_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(create_summary_file_writer, [writer_, logdir_, max_queue_, flush_millis_, filename_suffix_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function create_summary_file_writer(writer_, logdir_, max_queue_, flush_millis_, filename_suffix_; name=nothing) if tf.eager_mode @@ -8973,7 +9549,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function generate_vocab_remapping_graph(new_vocab_file_, old_vocab_file_; name=nothing, new_vocab_offset=nothing, num_new_vocab=nothing, old_vocab_size=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function generate_vocab_remapping_graph(new_vocab_file_, old_vocab_file_; name=nothing, new_vocab_offset=nothing, num_new_vocab=nothing, old_vocab_size=nothing) local desc tf.with_op_name(name, "GenerateVocabRemapping") do desc = tf.NodeDescription("GenerateVocabRemapping") @@ -9011,7 +9587,10 @@ begin if old_vocab_size !== nothing desc["old_vocab_size"] = Base.Int(old_vocab_size) end - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(generate_vocab_remapping, [new_vocab_file_, old_vocab_file_], name=nothing, new_vocab_offset=nothing, num_new_vocab=nothing, old_vocab_size=nothing) + tf.add_node(res[1], node) + return res end function generate_vocab_remapping(new_vocab_file_, old_vocab_file_; name=nothing, new_vocab_offset=nothing, num_new_vocab=nothing, old_vocab_size=nothing) if tf.eager_mode @@ -9029,7 +9608,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function batch_matrix_inverse_graph(input_; name=nothing, adjoint=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_matrix_inverse_graph(input_; name=nothing, adjoint=nothing) local desc tf.with_op_name(name, "BatchMatrixInverse") do desc = tf.NodeDescription("BatchMatrixInverse") @@ -9049,7 +9628,10 @@ begin desc["adjoint"] = Base.Bool(adjoint) end desc["T"] = tf.data_type(input_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(batch_matrix_inverse, [input_], name=nothing, adjoint=nothing) + tf.add_node(res[1], node) + return res[1] end function batch_matrix_inverse(input_; name=nothing, adjoint=nothing) if tf.eager_mode @@ -9067,7 +9649,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function control_trigger_graph(; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function control_trigger_graph(; name=nothing) local desc tf.with_op_name(name, "ControlTrigger") do desc @@ -9077,7 +9659,10 @@ begin end function control_trigger_eager(; name=nothing) desc = tf.EagerOp("ControlTrigger") - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(control_trigger, [], name=nothing) + tf.add_node(res[1], node) + return res[1] end function control_trigger(; name=nothing) if tf.eager_mode @@ -9095,7 +9680,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tpu_ordinal_selector_graph(; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tpu_ordinal_selector_graph(; name=nothing) local desc tf.with_op_name(name, "TPUOrdinalSelector") do desc @@ -9105,7 +9690,10 @@ begin end function tpu_ordinal_selector_eager(; name=nothing) desc = tf.EagerOp("TPUOrdinalSelector") - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(tpu_ordinal_selector, [], name=nothing) + tf.add_node(res[1], node) + return res[1] end function tpu_ordinal_selector(; name=nothing) if tf.eager_mode @@ -9123,7 +9711,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function stop_gradient_graph(input_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function stop_gradient_graph(input_; name=nothing) local desc tf.with_op_name(name, "StopGradient") do desc = tf.NodeDescription("StopGradient") @@ -9137,7 +9725,10 @@ begin desc = tf.EagerOp("StopGradient") tf.add_input(desc, input_) desc["T"] = tf.data_type(input_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(stop_gradient, [input_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function stop_gradient(input_; name=nothing) if tf.eager_mode @@ -9155,7 +9746,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function split_graph(split_dim_, value_; name=nothing, num_split=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function split_graph(split_dim_, value_; name=nothing, num_split=nothing) local desc tf.with_op_name(name, "Split") do desc = tf.NodeDescription("Split") @@ -9184,7 +9775,10 @@ begin desc["num_split"] = Base.Int(num_split) end desc["T"] = tf.data_type(value_) - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(split, [split_dim_, value_], name=nothing, num_split=nothing) + tf.add_node(res[1], node) + return res end function split(split_dim_, value_; name=nothing, num_split=nothing) if tf.eager_mode @@ -9202,7 +9796,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function unpack_graph(value_; name=nothing, num=nothing, axis=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function unpack_graph(value_; name=nothing, num=nothing, axis=nothing) local desc tf.with_op_name(name, "Unpack") do desc = tf.NodeDescription("Unpack") @@ -9239,7 +9833,10 @@ begin desc["axis"] = Base.Int(axis) end desc["T"] = tf.data_type(value_) - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(unpack, [value_], name=nothing, num=nothing, axis=nothing) + tf.add_node(res[1], node) + return res end function unpack(value_; name=nothing, num=nothing, axis=nothing) if tf.eager_mode @@ -9257,7 +9854,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function resource_scatter_max_graph(resource_, indices_, updates_; name=nothing, dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_scatter_max_graph(resource_, indices_, updates_; name=nothing, dtype=nothing) local desc tf.with_op_name(name, "ResourceScatterMax") do desc = tf.NodeDescription("ResourceScatterMax") @@ -9286,7 +9883,10 @@ begin end desc["Tindices"] = tf.data_type(indices_) desc["dtype"] = tf.data_type(updates_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(resource_scatter_max, [resource_, indices_, updates_], name=nothing, dtype=nothing) + tf.add_node(res[1], node) + return res[1] end function resource_scatter_max(resource_, indices_, updates_; name=nothing, dtype=nothing) if tf.eager_mode @@ -9304,7 +9904,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tensor_array_write_graph(handle_, index_, value_, flow_in_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_write_graph(handle_, index_, value_, flow_in_; name=nothing) local desc tf.with_op_name(name, "TensorArrayWrite") do desc = tf.NodeDescription("TensorArrayWrite") @@ -9327,7 +9927,10 @@ begin tf.add_input(desc, value_) tf.add_input(desc, flow_in_) desc["T"] = tf.data_type(value_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(tensor_array_write, [handle_, index_, value_, flow_in_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function tensor_array_write(handle_, index_, value_, flow_in_; name=nothing) if tf.eager_mode @@ -9345,7 +9948,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function fill_graph(dims_, value_; name=nothing, index_type=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fill_graph(dims_, value_; name=nothing, index_type=nothing) local desc tf.with_op_name(name, "Fill") do desc = tf.NodeDescription("Fill") @@ -9370,7 +9973,10 @@ begin end desc["index_type"] = tf.data_type(dims_) desc["T"] = tf.data_type(value_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(fill, [dims_, value_], name=nothing, index_type=nothing) + tf.add_node(res[1], node) + return res[1] end function fill(dims_, value_; name=nothing, index_type=nothing) if tf.eager_mode @@ -9388,7 +9994,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function quantized_conv2d_with_bias_and_requantize_graph(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantized_conv2d_with_bias_and_requantize_graph(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) local desc tf.with_op_name(name, "QuantizedConv2DWithBiasAndRequantize") do desc = tf.NodeDescription("QuantizedConv2DWithBiasAndRequantize") @@ -9459,7 +10065,10 @@ begin desc["Tinput"] = tf.data_type(input_) desc["Tfilter"] = tf.data_type(filter_) desc["Tbias"] = tf.data_type(bias_) - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(quantized_conv2d_with_bias_and_requantize, [input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_], name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) + tf.add_node(res[1], node) + return res end function quantized_conv2d_with_bias_and_requantize(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) if tf.eager_mode @@ -9477,7 +10086,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function softmax_graph(logits_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function softmax_graph(logits_; name=nothing) local desc tf.with_op_name(name, "Softmax") do desc = tf.NodeDescription("Softmax") @@ -9491,7 +10100,10 @@ begin desc = tf.EagerOp("Softmax") tf.add_input(desc, logits_) desc["T"] = tf.data_type(logits_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(softmax, [logits_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function softmax(logits_; name=nothing) if tf.eager_mode @@ -9509,7 +10121,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function resize_bicubic_graph(images_, size_; name=nothing, align_corners=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resize_bicubic_graph(images_, size_; name=nothing, align_corners=nothing) local desc tf.with_op_name(name, "ResizeBicubic") do desc = tf.NodeDescription("ResizeBicubic") @@ -9532,7 +10144,10 @@ begin desc["align_corners"] = Base.Bool(align_corners) end desc["T"] = tf.data_type(images_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(resize_bicubic, [images_, size_], name=nothing, align_corners=nothing) + tf.add_node(res[1], node) + return res[1] end function resize_bicubic(images_, size_; name=nothing, align_corners=nothing) if tf.eager_mode @@ -9550,7 +10165,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function infeed_dequeue_tuple_graph(; name=nothing, dtypes=nothing, shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function infeed_dequeue_tuple_graph(; name=nothing, dtypes=nothing, shapes=nothing) local desc tf.with_op_name(name, "InfeedDequeueTuple") do desc = tf.NodeDescription("InfeedDequeueTuple") @@ -9571,7 +10186,10 @@ begin if shapes !== nothing desc["shapes"] = map(Base.identity, shapes) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(infeed_dequeue_tuple, [], name=nothing, dtypes=nothing, shapes=nothing) + tf.add_node(res[1], node) + return res[1] end function infeed_dequeue_tuple(; name=nothing, dtypes=nothing, shapes=nothing) if tf.eager_mode @@ -9589,7 +10207,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function multi_device_iterator_graph(; name=nothing, devices=nothing, shared_name=nothing, container=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function multi_device_iterator_graph(; name=nothing, devices=nothing, shared_name=nothing, container=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "MultiDeviceIterator") do desc = tf.NodeDescription("MultiDeviceIterator") @@ -9628,7 +10246,10 @@ begin if output_shapes !== nothing desc["output_shapes"] = map(Base.identity, output_shapes) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(multi_device_iterator, [], name=nothing, devices=nothing, shared_name=nothing, container=nothing, output_types=nothing, output_shapes=nothing) + tf.add_node(res[1], node) + return res[1] end function multi_device_iterator(; name=nothing, devices=nothing, shared_name=nothing, container=nothing, output_types=nothing, output_shapes=nothing) if tf.eager_mode @@ -9646,7 +10267,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function decode_csv_graph(records_, record_defaults_; name=nothing, OUT_TYPE=nothing, field_delim=nothing, use_quote_delim=nothing, na_value=nothing, select_cols=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function decode_csv_graph(records_, record_defaults_; name=nothing, OUT_TYPE=nothing, field_delim=nothing, use_quote_delim=nothing, na_value=nothing, select_cols=nothing) local desc tf.with_op_name(name, "DecodeCSV") do desc = tf.NodeDescription("DecodeCSV") @@ -9691,7 +10312,10 @@ begin if select_cols !== nothing desc["select_cols"] = map(Base.identity, select_cols) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(decode_csv, [records_, record_defaults_], name=nothing, OUT_TYPE=nothing, field_delim=nothing, use_quote_delim=nothing, na_value=nothing, select_cols=nothing) + tf.add_node(res[1], node) + return res[1] end function decode_csv(records_, record_defaults_; name=nothing, OUT_TYPE=nothing, field_delim=nothing, use_quote_delim=nothing, na_value=nothing, select_cols=nothing) if tf.eager_mode @@ -9709,7 +10333,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function lookup_table_find_graph(table_handle_, keys_, default_value_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function lookup_table_find_graph(table_handle_, keys_, default_value_; name=nothing) local desc tf.with_op_name(name, "LookupTableFind") do desc = tf.NodeDescription("LookupTableFind") @@ -9731,7 +10355,10 @@ begin tf.add_input(desc, default_value_) desc["Tin"] = tf.data_type(keys_) desc["Tout"] = tf.data_type(default_value_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(lookup_table_find, [table_handle_, keys_, default_value_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function lookup_table_find(table_handle_, keys_, default_value_; name=nothing) if tf.eager_mode @@ -9749,7 +10376,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function shuffle_and_repeat_dataset_graph(input_dataset_, buffer_size_, seed_, seed2_, count_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function shuffle_and_repeat_dataset_graph(input_dataset_, buffer_size_, seed_, seed2_, count_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "ShuffleAndRepeatDataset") do desc = tf.NodeDescription("ShuffleAndRepeatDataset") @@ -9785,7 +10412,10 @@ begin if output_shapes !== nothing desc["output_shapes"] = map(Base.identity, output_shapes) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(shuffle_and_repeat_dataset, [input_dataset_, buffer_size_, seed_, seed2_, count_], name=nothing, output_types=nothing, output_shapes=nothing) + tf.add_node(res[1], node) + return res[1] end function shuffle_and_repeat_dataset(input_dataset_, buffer_size_, seed_, seed2_, count_; name=nothing, output_types=nothing, output_shapes=nothing) if tf.eager_mode @@ -9803,7 +10433,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function requantization_range_per_channel_graph(input_, input_min_, input_max_; name=nothing, clip_value_max=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function requantization_range_per_channel_graph(input_, input_min_, input_max_; name=nothing, clip_value_max=nothing) local desc tf.with_op_name(name, "RequantizationRangePerChannel") do desc = tf.NodeDescription("RequantizationRangePerChannel") @@ -9834,7 +10464,10 @@ begin desc["clip_value_max"] = Base.identity(clip_value_max) end desc["T"] = tf.data_type(input_) - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(requantization_range_per_channel, [input_, input_min_, input_max_], name=nothing, clip_value_max=nothing) + tf.add_node(res[1], node) + return res end function requantization_range_per_channel(input_, input_min_, input_max_; name=nothing, clip_value_max=nothing) if tf.eager_mode @@ -9852,7 +10485,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function experimental_unbatch_dataset_graph(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_unbatch_dataset_graph(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "ExperimentalUnbatchDataset") do desc = tf.NodeDescription("ExperimentalUnbatchDataset") @@ -9876,7 +10509,10 @@ begin if output_shapes !== nothing desc["output_shapes"] = map(Base.identity, output_shapes) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(experimental_unbatch_dataset, [input_dataset_], name=nothing, output_types=nothing, output_shapes=nothing) + tf.add_node(res[1], node) + return res[1] end function experimental_unbatch_dataset(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) if tf.eager_mode @@ -9894,7 +10530,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function avg_pool3d_grad_graph(orig_input_shape_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function avg_pool3d_grad_graph(orig_input_shape_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) local desc tf.with_op_name(name, "AvgPool3DGrad") do desc = tf.NodeDescription("AvgPool3DGrad") @@ -9935,7 +10571,10 @@ begin desc["data_format"] = Base.String(data_format) end desc["T"] = tf.data_type(grad_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(avg_pool3d_grad, [orig_input_shape_, grad_], name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + tf.add_node(res[1], node) + return res[1] end function avg_pool3d_grad(orig_input_shape_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) if tf.eager_mode @@ -9953,7 +10592,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function placeholder_with_default_graph(input_; name=nothing, dtype=nothing, shape=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function placeholder_with_default_graph(input_; name=nothing, dtype=nothing, shape=nothing) local desc tf.with_op_name(name, "PlaceholderWithDefault") do desc = tf.NodeDescription("PlaceholderWithDefault") @@ -9979,7 +10618,10 @@ begin desc["shape"] = Base.identity(shape) end desc["dtype"] = tf.data_type(input_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(placeholder_with_default, [input_], name=nothing, dtype=nothing, shape=nothing) + tf.add_node(res[1], node) + return res[1] end function placeholder_with_default(input_; name=nothing, dtype=nothing, shape=nothing) if tf.eager_mode @@ -9997,7 +10639,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function initialize_table_v2_graph(table_handle_, keys_, values_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function initialize_table_v2_graph(table_handle_, keys_, values_; name=nothing) local desc tf.with_op_name(name, "InitializeTableV2") do desc = tf.NodeDescription("InitializeTableV2") @@ -10019,7 +10661,10 @@ begin tf.add_input(desc, values_) desc["Tkey"] = tf.data_type(keys_) desc["Tval"] = tf.data_type(values_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(initialize_table_v2, [table_handle_, keys_, values_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function initialize_table_v2(table_handle_, keys_, values_; name=nothing) if tf.eager_mode @@ -10037,7 +10682,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function set_size_graph(set_indices_, set_values_, set_shape_; name=nothing, validate_indices=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function set_size_graph(set_indices_, set_values_, set_shape_; name=nothing, validate_indices=nothing) local desc tf.with_op_name(name, "SetSize") do desc = tf.NodeDescription("SetSize") @@ -10063,7 +10708,10 @@ begin desc["validate_indices"] = Base.Bool(validate_indices) end desc["T"] = tf.data_type(set_values_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(set_size, [set_indices_, set_values_, set_shape_], name=nothing, validate_indices=nothing) + tf.add_node(res[1], node) + return res[1] end function set_size(set_indices_, set_values_, set_shape_; name=nothing, validate_indices=nothing) if tf.eager_mode @@ -10081,7 +10729,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function assert_graph(condition_, data_; name=nothing, T=nothing, summarize=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function assert_graph(condition_, data_; name=nothing, T=nothing, summarize=nothing) local desc tf.with_op_name(name, "Assert") do desc = tf.NodeDescription("Assert") @@ -10108,7 +10756,10 @@ begin if summarize !== nothing desc["summarize"] = Base.Int(summarize) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(assert, [condition_, data_], name=nothing, T=nothing, summarize=nothing) + tf.add_node(res[1], node) + return res[1] end function assert(condition_, data_; name=nothing, T=nothing, summarize=nothing) if tf.eager_mode @@ -10126,7 +10777,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function non_max_suppression_v4_graph(boxes_, scores_, max_output_size_, iou_threshold_, score_threshold_; name=nothing, pad_to_max_output_size=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function non_max_suppression_v4_graph(boxes_, scores_, max_output_size_, iou_threshold_, score_threshold_; name=nothing, pad_to_max_output_size=nothing) local desc tf.with_op_name(name, "NonMaxSuppressionV4") do desc = tf.NodeDescription("NonMaxSuppressionV4") @@ -10164,7 +10815,10 @@ begin end desc["T"] = tf.data_type(boxes_) desc["T"] = tf.data_type(scores_) - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(non_max_suppression_v4, [boxes_, scores_, max_output_size_, iou_threshold_, score_threshold_], name=nothing, pad_to_max_output_size=nothing) + tf.add_node(res[1], node) + return res end function non_max_suppression_v4(boxes_, scores_, max_output_size_, iou_threshold_, score_threshold_; name=nothing, pad_to_max_output_size=nothing) if tf.eager_mode @@ -10182,7 +10836,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function sample_distorted_bounding_box_v2_graph(image_size_, bounding_boxes_, min_object_covered_; name=nothing, seed=nothing, seed2=nothing, aspect_ratio_range=nothing, area_range=nothing, max_attempts=nothing, use_image_if_no_bounding_boxes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sample_distorted_bounding_box_v2_graph(image_size_, bounding_boxes_, min_object_covered_; name=nothing, seed=nothing, seed2=nothing, aspect_ratio_range=nothing, area_range=nothing, max_attempts=nothing, use_image_if_no_bounding_boxes=nothing) local desc tf.with_op_name(name, "SampleDistortedBoundingBoxV2") do desc = tf.NodeDescription("SampleDistortedBoundingBoxV2") @@ -10243,7 +10897,10 @@ begin desc["use_image_if_no_bounding_boxes"] = Base.Bool(use_image_if_no_bounding_boxes) end desc["T"] = tf.data_type(image_size_) - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(sample_distorted_bounding_box_v2, [image_size_, bounding_boxes_, min_object_covered_], name=nothing, seed=nothing, seed2=nothing, aspect_ratio_range=nothing, area_range=nothing, max_attempts=nothing, use_image_if_no_bounding_boxes=nothing) + tf.add_node(res[1], node) + return res end function sample_distorted_bounding_box_v2(image_size_, bounding_boxes_, min_object_covered_; name=nothing, seed=nothing, seed2=nothing, aspect_ratio_range=nothing, area_range=nothing, max_attempts=nothing, use_image_if_no_bounding_boxes=nothing) if tf.eager_mode @@ -10261,7 +10918,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function initialize_table_from_text_file_graph(table_handle_, filename_; name=nothing, key_index=nothing, value_index=nothing, vocab_size=nothing, delimiter=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function initialize_table_from_text_file_graph(table_handle_, filename_; name=nothing, key_index=nothing, value_index=nothing, vocab_size=nothing, delimiter=nothing) local desc tf.with_op_name(name, "InitializeTableFromTextFile") do desc = tf.NodeDescription("InitializeTableFromTextFile") @@ -10300,7 +10957,10 @@ begin if delimiter !== nothing desc["delimiter"] = Base.String(delimiter) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(initialize_table_from_text_file, [table_handle_, filename_], name=nothing, key_index=nothing, value_index=nothing, vocab_size=nothing, delimiter=nothing) + tf.add_node(res[1], node) + return res[1] end function initialize_table_from_text_file(table_handle_, filename_; name=nothing, key_index=nothing, value_index=nothing, vocab_size=nothing, delimiter=nothing) if tf.eager_mode @@ -10318,7 +10978,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function lookup_table_size_graph(table_handle_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function lookup_table_size_graph(table_handle_; name=nothing) local desc tf.with_op_name(name, "LookupTableSize") do desc = tf.NodeDescription("LookupTableSize") @@ -10330,7 +10990,10 @@ begin function lookup_table_size_eager(table_handle_; name=nothing) desc = tf.EagerOp("LookupTableSize") tf.add_input(desc, table_handle_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(lookup_table_size, [table_handle_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function lookup_table_size(table_handle_; name=nothing) if tf.eager_mode @@ -10348,7 +11011,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function sparse_apply_adagrad_da_graph(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, indices_, lr_, l1_, l2_, global_step_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_apply_adagrad_da_graph(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, indices_, lr_, l1_, l2_, global_step_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "SparseApplyAdagradDA") do desc = tf.NodeDescription("SparseApplyAdagradDA") @@ -10401,7 +11064,10 @@ begin desc["T"] = tf.data_type(lr_) desc["T"] = tf.data_type(l1_) desc["T"] = tf.data_type(l2_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(sparse_apply_adagrad_da, [var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, indices_, lr_, l1_, l2_, global_step_], name=nothing, use_locking=nothing) + tf.add_node(res[1], node) + return res[1] end function sparse_apply_adagrad_da(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, indices_, lr_, l1_, l2_, global_step_; name=nothing, use_locking=nothing) if tf.eager_mode @@ -10419,7 +11085,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function broadcast_gradient_args_graph(s0_, s1_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function broadcast_gradient_args_graph(s0_, s1_; name=nothing) local desc tf.with_op_name(name, "BroadcastGradientArgs") do desc = tf.NodeDescription("BroadcastGradientArgs") @@ -10442,7 +11108,10 @@ begin tf.add_input(desc, s1_) desc["T"] = tf.data_type(s0_) desc["T"] = tf.data_type(s1_) - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(broadcast_gradient_args, [s0_, s1_], name=nothing) + tf.add_node(res[1], node) + return res end function broadcast_gradient_args(s0_, s1_; name=nothing) if tf.eager_mode @@ -10460,7 +11129,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function summary_writer_graph(; name=nothing, shared_name=nothing, container=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function summary_writer_graph(; name=nothing, shared_name=nothing, container=nothing) local desc tf.with_op_name(name, "SummaryWriter") do desc = tf.NodeDescription("SummaryWriter") @@ -10481,7 +11150,10 @@ begin if container !== nothing desc["container"] = Base.String(container) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(summary_writer, [], name=nothing, shared_name=nothing, container=nothing) + tf.add_node(res[1], node) + return res[1] end function summary_writer(; name=nothing, shared_name=nothing, container=nothing) if tf.eager_mode @@ -10499,7 +11171,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function recv_tpu_embedding_activations_graph(; name=nothing, num_outputs=nothing, config=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function recv_tpu_embedding_activations_graph(; name=nothing, num_outputs=nothing, config=nothing) local desc tf.with_op_name(name, "RecvTPUEmbeddingActivations") do desc = tf.NodeDescription("RecvTPUEmbeddingActivations") @@ -10525,7 +11197,10 @@ begin if config !== nothing desc["config"] = Base.String(config) end - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(recv_tpu_embedding_activations, [], name=nothing, num_outputs=nothing, config=nothing) + tf.add_node(res[1], node) + return res end function recv_tpu_embedding_activations(; name=nothing, num_outputs=nothing, config=nothing) if tf.eager_mode @@ -10543,7 +11218,7 @@ end output = input; While (Cond(output)) { output = Body(output) } """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function _while_graph(input_; name=nothing, T=nothing, cond=nothing, body=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _while_graph(input_; name=nothing, T=nothing, cond=nothing, body=nothing) local desc tf.with_op_name(name, "_While") do desc = tf.NodeDescription("_While") @@ -10573,7 +11248,10 @@ begin if body !== nothing desc["body"] = Base.identity(body) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(_while, [input_], name=nothing, T=nothing, cond=nothing, body=nothing) + tf.add_node(res[1], node) + return res[1] end function _while(input_; name=nothing, T=nothing, cond=nothing, body=nothing) if tf.eager_mode @@ -10591,7 +11269,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function initialize_table_graph(table_handle_, keys_, values_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function initialize_table_graph(table_handle_, keys_, values_; name=nothing) local desc tf.with_op_name(name, "InitializeTable") do desc = tf.NodeDescription("InitializeTable") @@ -10613,7 +11291,10 @@ begin tf.add_input(desc, values_) desc["Tkey"] = tf.data_type(keys_) desc["Tval"] = tf.data_type(values_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(initialize_table, [table_handle_, keys_, values_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function initialize_table(table_handle_, keys_, values_; name=nothing) if tf.eager_mode @@ -10631,7 +11312,7 @@ end Debug Numeric Summary Op. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function debug_numeric_summary_graph(input_; name=nothing, device_name=nothing, tensor_name=nothing, debug_urls=nothing, lower_bound=nothing, upper_bound=nothing, mute_if_healthy=nothing, gated_grpc=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function debug_numeric_summary_graph(input_; name=nothing, device_name=nothing, tensor_name=nothing, debug_urls=nothing, lower_bound=nothing, upper_bound=nothing, mute_if_healthy=nothing, gated_grpc=nothing) local desc tf.with_op_name(name, "DebugNumericSummary") do desc = tf.NodeDescription("DebugNumericSummary") @@ -10687,7 +11368,10 @@ begin desc["gated_grpc"] = Base.Bool(gated_grpc) end desc["T"] = tf.data_type(input_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(debug_numeric_summary, [input_], name=nothing, device_name=nothing, tensor_name=nothing, debug_urls=nothing, lower_bound=nothing, upper_bound=nothing, mute_if_healthy=nothing, gated_grpc=nothing) + tf.add_node(res[1], node) + return res[1] end function debug_numeric_summary(input_; name=nothing, device_name=nothing, tensor_name=nothing, debug_urls=nothing, lower_bound=nothing, upper_bound=nothing, mute_if_healthy=nothing, gated_grpc=nothing) if tf.eager_mode @@ -10705,7 +11389,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function retrieve_tpu_embedding_adagrad_parameters_grad_accum_debug_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function retrieve_tpu_embedding_adagrad_parameters_grad_accum_debug_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "RetrieveTPUEmbeddingAdagradParametersGradAccumDebug") do desc = tf.NodeDescription("RetrieveTPUEmbeddingAdagradParametersGradAccumDebug") @@ -10743,7 +11427,10 @@ begin if shard_id !== nothing desc["shard_id"] = Base.Int(shard_id) end - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(retrieve_tpu_embedding_adagrad_parameters_grad_accum_debug, [], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + tf.add_node(res[1], node) + return res end function retrieve_tpu_embedding_adagrad_parameters_grad_accum_debug(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) if tf.eager_mode @@ -10761,7 +11448,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tanh_graph(x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tanh_graph(x_; name=nothing) local desc tf.with_op_name(name, "Tanh") do desc = tf.NodeDescription("Tanh") @@ -10775,7 +11462,10 @@ begin desc = tf.EagerOp("Tanh") tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(tanh, [x_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function tanh(x_; name=nothing) if tf.eager_mode @@ -10793,7 +11483,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function symbolic_gradient_graph(input_; name=nothing, Tin=nothing, Tout=nothing, f=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function symbolic_gradient_graph(input_; name=nothing, Tin=nothing, Tout=nothing, f=nothing) local desc tf.with_op_name(name, "SymbolicGradient") do desc = tf.NodeDescription("SymbolicGradient") @@ -10823,7 +11513,10 @@ begin if f !== nothing desc["f"] = Base.identity(f) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(symbolic_gradient, [input_], name=nothing, Tin=nothing, Tout=nothing, f=nothing) + tf.add_node(res[1], node) + return res[1] end function symbolic_gradient(input_; name=nothing, Tin=nothing, Tout=nothing, f=nothing) if tf.eager_mode @@ -10841,7 +11534,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function boosted_trees_update_ensemble_graph(tree_ensemble_handle_, feature_ids_, node_ids_, gains_, thresholds_, left_node_contribs_, right_node_contribs_, max_depth_, learning_rate_; name=nothing, pruning_mode=nothing, num_features=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function boosted_trees_update_ensemble_graph(tree_ensemble_handle_, feature_ids_, node_ids_, gains_, thresholds_, left_node_contribs_, right_node_contribs_, max_depth_, learning_rate_; name=nothing, pruning_mode=nothing, num_features=nothing) local desc tf.with_op_name(name, "BoostedTreesUpdateEnsemble") do desc = tf.NodeDescription("BoostedTreesUpdateEnsemble") @@ -10889,7 +11582,10 @@ begin if num_features !== nothing desc["num_features"] = Base.Int(num_features) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(boosted_trees_update_ensemble, [tree_ensemble_handle_, feature_ids_, node_ids_, gains_, thresholds_, left_node_contribs_, right_node_contribs_, max_depth_, learning_rate_], name=nothing, pruning_mode=nothing, num_features=nothing) + tf.add_node(res[1], node) + return res[1] end function boosted_trees_update_ensemble(tree_ensemble_handle_, feature_ids_, node_ids_, gains_, thresholds_, left_node_contribs_, right_node_contribs_, max_depth_, learning_rate_; name=nothing, pruning_mode=nothing, num_features=nothing) if tf.eager_mode @@ -10907,7 +11603,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function apply_momentum_graph(var_, accum_, lr_, grad_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function apply_momentum_graph(var_, accum_, lr_, grad_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) local desc tf.with_op_name(name, "ApplyMomentum") do desc = tf.NodeDescription("ApplyMomentum") @@ -10949,7 +11645,10 @@ begin desc["T"] = tf.data_type(lr_) desc["T"] = tf.data_type(grad_) desc["T"] = tf.data_type(momentum_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(apply_momentum, [var_, accum_, lr_, grad_, momentum_], name=nothing, use_locking=nothing, use_nesterov=nothing) + tf.add_node(res[1], node) + return res[1] end function apply_momentum(var_, accum_, lr_, grad_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) if tf.eager_mode @@ -10967,7 +11666,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function reader_read_graph(reader_handle_, queue_handle_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function reader_read_graph(reader_handle_, queue_handle_; name=nothing) local desc tf.with_op_name(name, "ReaderRead") do desc = tf.NodeDescription("ReaderRead") @@ -10987,7 +11686,10 @@ begin desc = tf.EagerOp("ReaderRead") tf.add_input(desc, reader_handle_) tf.add_input(desc, queue_handle_) - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(reader_read, [reader_handle_, queue_handle_], name=nothing) + tf.add_node(res[1], node) + return res end function reader_read(reader_handle_, queue_handle_; name=nothing) if tf.eager_mode @@ -11005,7 +11707,7 @@ end An op that blocks execution until a distributed TPU system has """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function _wait_for_distributed_tpu_graph(inputs_; name=nothing, startup_timeout_sec=nothing, N=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _wait_for_distributed_tpu_graph(inputs_; name=nothing, startup_timeout_sec=nothing, N=nothing) local desc tf.with_op_name(name, "_WaitForDistributedTPU") do desc = tf.NodeDescription("_WaitForDistributedTPU") @@ -11029,7 +11731,10 @@ begin if N !== nothing desc["N"] = Base.Int(N) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(_wait_for_distributed_tpu, [inputs_], name=nothing, startup_timeout_sec=nothing, N=nothing) + tf.add_node(res[1], node) + return res[1] end function _wait_for_distributed_tpu(inputs_; name=nothing, startup_timeout_sec=nothing, N=nothing) if tf.eager_mode @@ -11047,7 +11752,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function mutex_lock_graph(mutex_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function mutex_lock_graph(mutex_; name=nothing) local desc tf.with_op_name(name, "MutexLock") do desc = tf.NodeDescription("MutexLock") @@ -11059,7 +11764,10 @@ begin function mutex_lock_eager(mutex_; name=nothing) desc = tf.EagerOp("MutexLock") tf.add_input(desc, mutex_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(mutex_lock, [mutex_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function mutex_lock(mutex_; name=nothing) if tf.eager_mode @@ -11077,7 +11785,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function accumulator_set_global_step_graph(handle_, new_global_step_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function accumulator_set_global_step_graph(handle_, new_global_step_; name=nothing) local desc tf.with_op_name(name, "AccumulatorSetGlobalStep") do desc = tf.NodeDescription("AccumulatorSetGlobalStep") @@ -11092,7 +11800,10 @@ begin desc = tf.EagerOp("AccumulatorSetGlobalStep") tf.add_input(desc, handle_) tf.add_input(desc, new_global_step_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(accumulator_set_global_step, [handle_, new_global_step_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function accumulator_set_global_step(handle_, new_global_step_; name=nothing) if tf.eager_mode @@ -11110,7 +11821,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function quantized_add_graph(x_, y_, min_x_, max_x_, min_y_, max_y_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantized_add_graph(x_, y_, min_x_, max_x_, min_y_, max_y_; name=nothing) local desc tf.with_op_name(name, "QuantizedAdd") do desc = tf.NodeDescription("QuantizedAdd") @@ -11146,7 +11857,10 @@ begin tf.add_input(desc, max_y_) desc["T1"] = tf.data_type(x_) desc["T2"] = tf.data_type(y_) - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(quantized_add, [x_, y_, min_x_, max_x_, min_y_, max_y_], name=nothing) + tf.add_node(res[1], node) + return res end function quantized_add(x_, y_, min_x_, max_x_, min_y_, max_y_; name=nothing) if tf.eager_mode @@ -11164,7 +11878,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function squeeze_graph(input_; name=nothing, squeeze_dims=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function squeeze_graph(input_; name=nothing, squeeze_dims=nothing) local desc tf.with_op_name(name, "Squeeze") do desc = tf.NodeDescription("Squeeze") @@ -11184,7 +11898,10 @@ begin desc["squeeze_dims"] = map(Base.identity, squeeze_dims) end desc["T"] = tf.data_type(input_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(squeeze, [input_], name=nothing, squeeze_dims=nothing) + tf.add_node(res[1], node) + return res[1] end function squeeze(input_; name=nothing, squeeze_dims=nothing) if tf.eager_mode @@ -11202,7 +11919,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function experimental_matching_files_dataset_graph(patterns_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_matching_files_dataset_graph(patterns_; name=nothing) local desc tf.with_op_name(name, "ExperimentalMatchingFilesDataset") do desc = tf.NodeDescription("ExperimentalMatchingFilesDataset") @@ -11214,7 +11931,10 @@ begin function experimental_matching_files_dataset_eager(patterns_; name=nothing) desc = tf.EagerOp("ExperimentalMatchingFilesDataset") tf.add_input(desc, patterns_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(experimental_matching_files_dataset, [patterns_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function experimental_matching_files_dataset(patterns_; name=nothing) if tf.eager_mode @@ -11232,7 +11952,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function experimental_dataset_to_tf_record_graph(input_dataset_, filename_, compression_type_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_dataset_to_tf_record_graph(input_dataset_, filename_, compression_type_; name=nothing) local desc tf.with_op_name(name, "ExperimentalDatasetToTFRecord") do desc = tf.NodeDescription("ExperimentalDatasetToTFRecord") @@ -11250,7 +11970,10 @@ begin tf.add_input(desc, input_dataset_) tf.add_input(desc, filename_) tf.add_input(desc, compression_type_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(experimental_dataset_to_tf_record, [input_dataset_, filename_, compression_type_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function experimental_dataset_to_tf_record(input_dataset_, filename_, compression_type_; name=nothing) if tf.eager_mode @@ -11268,7 +11991,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function load_tpu_embedding_stochastic_gradient_descent_parameters_graph(parameters_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function load_tpu_embedding_stochastic_gradient_descent_parameters_graph(parameters_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "LoadTPUEmbeddingStochasticGradientDescentParameters") do desc = tf.NodeDescription("LoadTPUEmbeddingStochasticGradientDescentParameters") @@ -11304,7 +12027,10 @@ begin if shard_id !== nothing desc["shard_id"] = Base.Int(shard_id) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(load_tpu_embedding_stochastic_gradient_descent_parameters, [parameters_], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + tf.add_node(res[1], node) + return res[1] end function load_tpu_embedding_stochastic_gradient_descent_parameters(parameters_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) if tf.eager_mode @@ -11322,7 +12048,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function no_op_graph(; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function no_op_graph(; name=nothing) local desc tf.with_op_name(name, "NoOp") do desc @@ -11332,7 +12058,10 @@ begin end function no_op_eager(; name=nothing) desc = tf.EagerOp("NoOp") - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(no_op, [], name=nothing) + tf.add_node(res[1], node) + return res[1] end function no_op(; name=nothing) if tf.eager_mode @@ -11350,7 +12079,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function zip_dataset_graph(input_datasets_; name=nothing, output_types=nothing, output_shapes=nothing, N=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function zip_dataset_graph(input_datasets_; name=nothing, output_types=nothing, output_shapes=nothing, N=nothing) local desc tf.with_op_name(name, "ZipDataset") do desc = tf.NodeDescription("ZipDataset") @@ -11380,7 +12109,10 @@ begin if N !== nothing desc["N"] = Base.Int(N) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(zip_dataset, [input_datasets_], name=nothing, output_types=nothing, output_shapes=nothing, N=nothing) + tf.add_node(res[1], node) + return res[1] end function zip_dataset(input_datasets_; name=nothing, output_types=nothing, output_shapes=nothing, N=nothing) if tf.eager_mode @@ -11398,7 +12130,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function identity_reader_v2_graph(; name=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function identity_reader_v2_graph(; name=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "IdentityReaderV2") do desc = tf.NodeDescription("IdentityReaderV2") @@ -11419,7 +12151,10 @@ begin if shared_name !== nothing desc["shared_name"] = Base.String(shared_name) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(identity_reader_v2, [], name=nothing, container=nothing, shared_name=nothing) + tf.add_node(res[1], node) + return res[1] end function identity_reader_v2(; name=nothing, container=nothing, shared_name=nothing) if tf.eager_mode @@ -11437,7 +12172,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function lmdb_reader_graph(; name=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function lmdb_reader_graph(; name=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "LMDBReader") do desc = tf.NodeDescription("LMDBReader") @@ -11458,7 +12193,10 @@ begin if shared_name !== nothing desc["shared_name"] = Base.String(shared_name) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(lmdb_reader, [], name=nothing, container=nothing, shared_name=nothing) + tf.add_node(res[1], node) + return res[1] end function lmdb_reader(; name=nothing, container=nothing, shared_name=nothing) if tf.eager_mode @@ -11476,7 +12214,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function nccl_all_reduce_graph(input_; name=nothing, reduction=nothing, num_devices=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function nccl_all_reduce_graph(input_; name=nothing, reduction=nothing, num_devices=nothing, shared_name=nothing) local desc tf.with_op_name(name, "NcclAllReduce") do desc = tf.NodeDescription("NcclAllReduce") @@ -11508,7 +12246,10 @@ begin desc["shared_name"] = Base.String(shared_name) end desc["T"] = tf.data_type(input_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(nccl_all_reduce, [input_], name=nothing, reduction=nothing, num_devices=nothing, shared_name=nothing) + tf.add_node(res[1], node) + return res[1] end function nccl_all_reduce(input_; name=nothing, reduction=nothing, num_devices=nothing, shared_name=nothing) if tf.eager_mode @@ -11526,7 +12267,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function text_line_dataset_graph(filenames_, compression_type_, buffer_size_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function text_line_dataset_graph(filenames_, compression_type_, buffer_size_; name=nothing) local desc tf.with_op_name(name, "TextLineDataset") do desc = tf.NodeDescription("TextLineDataset") @@ -11544,7 +12285,10 @@ begin tf.add_input(desc, filenames_) tf.add_input(desc, compression_type_) tf.add_input(desc, buffer_size_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(text_line_dataset, [filenames_, compression_type_, buffer_size_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function text_line_dataset(filenames_, compression_type_, buffer_size_; name=nothing) if tf.eager_mode @@ -11562,7 +12306,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function sdca_shrink_l1_graph(weights_; name=nothing, num_features=nothing, l1=nothing, l2=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sdca_shrink_l1_graph(weights_; name=nothing, num_features=nothing, l1=nothing, l2=nothing) local desc tf.with_op_name(name, "SdcaShrinkL1") do desc = tf.NodeDescription("SdcaShrinkL1") @@ -11592,7 +12336,10 @@ begin if l2 !== nothing desc["l2"] = Base.identity(l2) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(sdca_shrink_l1, [weights_], name=nothing, num_features=nothing, l1=nothing, l2=nothing) + tf.add_node(res[1], node) + return res[1] end function sdca_shrink_l1(weights_; name=nothing, num_features=nothing, l1=nothing, l2=nothing) if tf.eager_mode @@ -11610,7 +12357,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tf_record_reader_v2_graph(; name=nothing, container=nothing, shared_name=nothing, compression_type=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tf_record_reader_v2_graph(; name=nothing, container=nothing, shared_name=nothing, compression_type=nothing) local desc tf.with_op_name(name, "TFRecordReaderV2") do desc = tf.NodeDescription("TFRecordReaderV2") @@ -11637,7 +12384,10 @@ begin if compression_type !== nothing desc["compression_type"] = Base.String(compression_type) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(tf_record_reader_v2, [], name=nothing, container=nothing, shared_name=nothing, compression_type=nothing) + tf.add_node(res[1], node) + return res[1] end function tf_record_reader_v2(; name=nothing, container=nothing, shared_name=nothing, compression_type=nothing) if tf.eager_mode @@ -11655,7 +12405,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function multi_device_iterator_from_string_handle_graph(string_handle_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function multi_device_iterator_from_string_handle_graph(string_handle_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "MultiDeviceIteratorFromStringHandle") do desc = tf.NodeDescription("MultiDeviceIteratorFromStringHandle") @@ -11679,7 +12429,10 @@ begin if output_shapes !== nothing desc["output_shapes"] = map(Base.identity, output_shapes) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(multi_device_iterator_from_string_handle, [string_handle_], name=nothing, output_types=nothing, output_shapes=nothing) + tf.add_node(res[1], node) + return res[1] end function multi_device_iterator_from_string_handle(string_handle_; name=nothing, output_types=nothing, output_shapes=nothing) if tf.eager_mode @@ -11697,7 +12450,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function padded_batch_dataset_v2_graph(input_dataset_, batch_size_, padded_shapes_, padding_values_, drop_remainder_; name=nothing, Toutput_types=nothing, output_shapes=nothing, N=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function padded_batch_dataset_v2_graph(input_dataset_, batch_size_, padded_shapes_, padding_values_, drop_remainder_; name=nothing, Toutput_types=nothing, output_shapes=nothing, N=nothing) local desc tf.with_op_name(name, "PaddedBatchDatasetV2") do desc = tf.NodeDescription("PaddedBatchDatasetV2") @@ -11739,7 +12492,10 @@ begin if N !== nothing desc["N"] = Base.Int(N) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(padded_batch_dataset_v2, [input_dataset_, batch_size_, padded_shapes_, padding_values_, drop_remainder_], name=nothing, Toutput_types=nothing, output_shapes=nothing, N=nothing) + tf.add_node(res[1], node) + return res[1] end function padded_batch_dataset_v2(input_dataset_, batch_size_, padded_shapes_, padding_values_, drop_remainder_; name=nothing, Toutput_types=nothing, output_shapes=nothing, N=nothing) if tf.eager_mode @@ -11757,7 +12513,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function load_tpu_embedding_proximal_adagrad_parameters_graph(parameters_, accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function load_tpu_embedding_proximal_adagrad_parameters_graph(parameters_, accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "LoadTPUEmbeddingProximalAdagradParameters") do desc = tf.NodeDescription("LoadTPUEmbeddingProximalAdagradParameters") @@ -11796,7 +12552,10 @@ begin if shard_id !== nothing desc["shard_id"] = Base.Int(shard_id) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(load_tpu_embedding_proximal_adagrad_parameters, [parameters_, accumulators_], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + tf.add_node(res[1], node) + return res[1] end function load_tpu_embedding_proximal_adagrad_parameters(parameters_, accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) if tf.eager_mode @@ -11814,7 +12573,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tensor_array_size_graph(handle_, flow_in_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_size_graph(handle_, flow_in_; name=nothing) local desc tf.with_op_name(name, "TensorArraySize") do desc = tf.NodeDescription("TensorArraySize") @@ -11829,7 +12588,10 @@ begin desc = tf.EagerOp("TensorArraySize") tf.add_input(desc, handle_) tf.add_input(desc, flow_in_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(tensor_array_size, [handle_, flow_in_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function tensor_array_size(handle_, flow_in_; name=nothing) if tf.eager_mode @@ -11847,7 +12609,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function ordered_map_size_graph(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ordered_map_size_graph(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "OrderedMapSize") do desc = tf.NodeDescription("OrderedMapSize") @@ -11886,7 +12648,10 @@ begin if shared_name !== nothing desc["shared_name"] = Base.String(shared_name) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(ordered_map_size, [], name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + tf.add_node(res[1], node) + return res[1] end function ordered_map_size(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) if tf.eager_mode @@ -11904,7 +12669,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function stateless_random_uniform_graph(shape_, seed_; name=nothing, dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function stateless_random_uniform_graph(shape_, seed_; name=nothing, dtype=nothing) local desc tf.with_op_name(name, "StatelessRandomUniform") do desc = tf.NodeDescription("StatelessRandomUniform") @@ -11929,7 +12694,10 @@ begin end desc["T"] = tf.data_type(shape_) desc["Tseed"] = tf.data_type(seed_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(stateless_random_uniform, [shape_, seed_], name=nothing, dtype=nothing) + tf.add_node(res[1], node) + return res[1] end function stateless_random_uniform(shape_, seed_; name=nothing, dtype=nothing) if tf.eager_mode @@ -11947,7 +12715,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function sparse_to_sparse_set_operation_graph(set1_indices_, set1_values_, set1_shape_, set2_indices_, set2_values_, set2_shape_; name=nothing, set_operation=nothing, validate_indices=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_to_sparse_set_operation_graph(set1_indices_, set1_values_, set1_shape_, set2_indices_, set2_values_, set2_shape_; name=nothing, set_operation=nothing, validate_indices=nothing) local desc tf.with_op_name(name, "SparseToSparseSetOperation") do desc = tf.NodeDescription("SparseToSparseSetOperation") @@ -11994,7 +12762,10 @@ begin end desc["T"] = tf.data_type(set1_values_) desc["T"] = tf.data_type(set2_values_) - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(sparse_to_sparse_set_operation, [set1_indices_, set1_values_, set1_shape_, set2_indices_, set2_values_, set2_shape_], name=nothing, set_operation=nothing, validate_indices=nothing) + tf.add_node(res[1], node) + return res end function sparse_to_sparse_set_operation(set1_indices_, set1_values_, set1_shape_, set2_indices_, set2_values_, set2_shape_; name=nothing, set_operation=nothing, validate_indices=nothing) if tf.eager_mode @@ -12012,7 +12783,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tensor_summary_graph(tensor_; name=nothing, description=nothing, labels=nothing, display_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_summary_graph(tensor_; name=nothing, description=nothing, labels=nothing, display_name=nothing) local desc tf.with_op_name(name, "TensorSummary") do desc = tf.NodeDescription("TensorSummary") @@ -12044,7 +12815,10 @@ begin desc["display_name"] = Base.String(display_name) end desc["T"] = tf.data_type(tensor_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(tensor_summary, [tensor_], name=nothing, description=nothing, labels=nothing, display_name=nothing) + tf.add_node(res[1], node) + return res[1] end function tensor_summary(tensor_; name=nothing, description=nothing, labels=nothing, display_name=nothing) if tf.eager_mode @@ -12062,7 +12836,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function remote_fused_graph_execute_graph(inputs_; name=nothing, Tinputs=nothing, Toutputs=nothing, serialized_remote_fused_graph_execute_info=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function remote_fused_graph_execute_graph(inputs_; name=nothing, Tinputs=nothing, Toutputs=nothing, serialized_remote_fused_graph_execute_info=nothing) local desc tf.with_op_name(name, "RemoteFusedGraphExecute") do desc = tf.NodeDescription("RemoteFusedGraphExecute") @@ -12092,7 +12866,10 @@ begin if serialized_remote_fused_graph_execute_info !== nothing desc["serialized_remote_fused_graph_execute_info"] = Base.String(serialized_remote_fused_graph_execute_info) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(remote_fused_graph_execute, [inputs_], name=nothing, Tinputs=nothing, Toutputs=nothing, serialized_remote_fused_graph_execute_info=nothing) + tf.add_node(res[1], node) + return res[1] end function remote_fused_graph_execute(inputs_; name=nothing, Tinputs=nothing, Toutputs=nothing, serialized_remote_fused_graph_execute_info=nothing) if tf.eager_mode @@ -12110,7 +12887,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function sparse_slice_grad_graph(backprop_val_grad_, input_indices_, input_start_, output_indices_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_slice_grad_graph(backprop_val_grad_, input_indices_, input_start_, output_indices_; name=nothing) local desc tf.with_op_name(name, "SparseSliceGrad") do desc = tf.NodeDescription("SparseSliceGrad") @@ -12133,7 +12910,10 @@ begin tf.add_input(desc, input_start_) tf.add_input(desc, output_indices_) desc["T"] = tf.data_type(backprop_val_grad_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(sparse_slice_grad, [backprop_val_grad_, input_indices_, input_start_, output_indices_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function sparse_slice_grad(backprop_val_grad_, input_indices_, input_start_, output_indices_; name=nothing) if tf.eager_mode @@ -12151,7 +12931,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function cumsum_graph(x_, axis_; name=nothing, exclusive=nothing, reverse=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function cumsum_graph(x_, axis_; name=nothing, exclusive=nothing, reverse=nothing) local desc tf.with_op_name(name, "Cumsum") do desc = tf.NodeDescription("Cumsum") @@ -12183,7 +12963,10 @@ begin end desc["T"] = tf.data_type(x_) desc["Tidx"] = tf.data_type(axis_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(cumsum, [x_, axis_], name=nothing, exclusive=nothing, reverse=nothing) + tf.add_node(res[1], node) + return res[1] end function cumsum(x_, axis_; name=nothing, exclusive=nothing, reverse=nothing) if tf.eager_mode @@ -12201,7 +12984,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function batch_norm_with_global_normalization_grad_graph(t_, m_, v_, gamma_, backprop_; name=nothing, variance_epsilon=nothing, scale_after_normalization=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_norm_with_global_normalization_grad_graph(t_, m_, v_, gamma_, backprop_; name=nothing, variance_epsilon=nothing, scale_after_normalization=nothing) local desc tf.with_op_name(name, "BatchNormWithGlobalNormalizationGrad") do desc = tf.NodeDescription("BatchNormWithGlobalNormalizationGrad") @@ -12248,7 +13031,10 @@ begin desc["T"] = tf.data_type(v_) desc["T"] = tf.data_type(gamma_) desc["T"] = tf.data_type(backprop_) - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(batch_norm_with_global_normalization_grad, [t_, m_, v_, gamma_, backprop_], name=nothing, variance_epsilon=nothing, scale_after_normalization=nothing) + tf.add_node(res[1], node) + return res end function batch_norm_with_global_normalization_grad(t_, m_, v_, gamma_, backprop_; name=nothing, variance_epsilon=nothing, scale_after_normalization=nothing) if tf.eager_mode @@ -12266,7 +13052,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function avg_pool_grad_graph(orig_input_shape_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function avg_pool_grad_graph(orig_input_shape_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) local desc tf.with_op_name(name, "AvgPoolGrad") do desc = tf.NodeDescription("AvgPoolGrad") @@ -12307,7 +13093,10 @@ begin desc["data_format"] = Base.String(data_format) end desc["T"] = tf.data_type(grad_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(avg_pool_grad, [orig_input_shape_, grad_], name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + tf.add_node(res[1], node) + return res[1] end function avg_pool_grad(orig_input_shape_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) if tf.eager_mode @@ -12325,7 +13114,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function restore_v2_graph(prefix_, tensor_names_, shape_and_slices_; name=nothing, dtypes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function restore_v2_graph(prefix_, tensor_names_, shape_and_slices_; name=nothing, dtypes=nothing) local desc tf.with_op_name(name, "RestoreV2") do desc = tf.NodeDescription("RestoreV2") @@ -12349,7 +13138,10 @@ begin if dtypes !== nothing desc["dtypes"] = map(Base.identity, dtypes) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(restore_v2, [prefix_, tensor_names_, shape_and_slices_], name=nothing, dtypes=nothing) + tf.add_node(res[1], node) + return res[1] end function restore_v2(prefix_, tensor_names_, shape_and_slices_; name=nothing, dtypes=nothing) if tf.eager_mode @@ -12367,7 +13159,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function relu6_graph(features_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function relu6_graph(features_; name=nothing) local desc tf.with_op_name(name, "Relu6") do desc = tf.NodeDescription("Relu6") @@ -12381,7 +13173,10 @@ begin desc = tf.EagerOp("Relu6") tf.add_input(desc, features_) desc["T"] = tf.data_type(features_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(relu6, [features_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function relu6(features_; name=nothing) if tf.eager_mode @@ -12399,7 +13194,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function sparse_apply_rms_prop_graph(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_apply_rms_prop_graph(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "SparseApplyRMSProp") do desc = tf.NodeDescription("SparseApplyRMSProp") @@ -12453,7 +13248,10 @@ begin desc["T"] = tf.data_type(epsilon_) desc["T"] = tf.data_type(grad_) desc["Tindices"] = tf.data_type(indices_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(sparse_apply_rms_prop, [var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_], name=nothing, use_locking=nothing) + tf.add_node(res[1], node) + return res[1] end function sparse_apply_rms_prop(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) if tf.eager_mode @@ -12471,7 +13269,7 @@ end Receives the named tensor from send_device on recv_device. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function _recv_graph(; name=nothing, tensor_type=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _recv_graph(; name=nothing, tensor_type=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing) local desc tf.with_op_name(name, "_Recv") do desc = tf.NodeDescription("_Recv") @@ -12516,7 +13314,10 @@ begin if client_terminated !== nothing desc["client_terminated"] = Base.Bool(client_terminated) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(_recv, [], name=nothing, tensor_type=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing) + tf.add_node(res[1], node) + return res[1] end function _recv(; name=nothing, tensor_type=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing) if tf.eager_mode @@ -12534,7 +13335,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function max_pool_graph(input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function max_pool_graph(input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) local desc tf.with_op_name(name, "MaxPool") do desc = tf.NodeDescription("MaxPool") @@ -12572,7 +13373,10 @@ begin desc["data_format"] = Base.String(data_format) end desc["T"] = tf.data_type(input_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(max_pool, [input_], name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + tf.add_node(res[1], node) + return res[1] end function max_pool(input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) if tf.eager_mode @@ -12590,7 +13394,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function invert_graph(x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function invert_graph(x_; name=nothing) local desc tf.with_op_name(name, "Invert") do desc = tf.NodeDescription("Invert") @@ -12604,7 +13408,10 @@ begin desc = tf.EagerOp("Invert") tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(invert, [x_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function invert(x_; name=nothing) if tf.eager_mode @@ -12622,7 +13429,7 @@ end *NOTE*: Do not invoke this operator directly in Python. Graph rewrite pass is """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function _unary_ops_composition_graph(x_; name=nothing, op_names=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _unary_ops_composition_graph(x_; name=nothing, op_names=nothing) local desc tf.with_op_name(name, "_UnaryOpsComposition") do desc = tf.NodeDescription("_UnaryOpsComposition") @@ -12642,7 +13449,10 @@ begin desc["op_names"] = map(Base.identity, op_names) end desc["T"] = tf.data_type(x_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(_unary_ops_composition, [x_], name=nothing, op_names=nothing) + tf.add_node(res[1], node) + return res[1] end function _unary_ops_composition(x_; name=nothing, op_names=nothing) if tf.eager_mode @@ -12660,7 +13470,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function experimental_map_dataset_graph(input_dataset_, other_arguments_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing, preserve_cardinality=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_map_dataset_graph(input_dataset_, other_arguments_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing, preserve_cardinality=nothing) local desc tf.with_op_name(name, "ExperimentalMapDataset") do desc = tf.NodeDescription("ExperimentalMapDataset") @@ -12711,7 +13521,10 @@ begin if preserve_cardinality !== nothing desc["preserve_cardinality"] = Base.Bool(preserve_cardinality) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(experimental_map_dataset, [input_dataset_, other_arguments_], name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing, preserve_cardinality=nothing) + tf.add_node(res[1], node) + return res[1] end function experimental_map_dataset(input_dataset_, other_arguments_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing, preserve_cardinality=nothing) if tf.eager_mode @@ -12729,7 +13542,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function load_tpu_embedding_adam_parameters_graph(parameters_, momenta_, velocities_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function load_tpu_embedding_adam_parameters_graph(parameters_, momenta_, velocities_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "LoadTPUEmbeddingADAMParameters") do desc = tf.NodeDescription("LoadTPUEmbeddingADAMParameters") @@ -12771,7 +13584,10 @@ begin if shard_id !== nothing desc["shard_id"] = Base.Int(shard_id) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(load_tpu_embedding_adam_parameters, [parameters_, momenta_, velocities_], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + tf.add_node(res[1], node) + return res[1] end function load_tpu_embedding_adam_parameters(parameters_, momenta_, velocities_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) if tf.eager_mode @@ -12789,7 +13605,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function parse_tensor_graph(serialized_; name=nothing, out_type=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function parse_tensor_graph(serialized_; name=nothing, out_type=nothing) local desc tf.with_op_name(name, "ParseTensor") do desc = tf.NodeDescription("ParseTensor") @@ -12807,7 +13623,10 @@ begin if out_type !== nothing desc["out_type"] = Base.identity(out_type) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(parse_tensor, [serialized_], name=nothing, out_type=nothing) + tf.add_node(res[1], node) + return res[1] end function parse_tensor(serialized_; name=nothing, out_type=nothing) if tf.eager_mode @@ -12825,7 +13644,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function experimental_materialized_index_dataset_handle_graph(; name=nothing, container=nothing, shared_name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_materialized_index_dataset_handle_graph(; name=nothing, container=nothing, shared_name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "ExperimentalMaterializedIndexDatasetHandle") do desc = tf.NodeDescription("ExperimentalMaterializedIndexDatasetHandle") @@ -12858,7 +13677,10 @@ begin if output_shapes !== nothing desc["output_shapes"] = map(Base.identity, output_shapes) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(experimental_materialized_index_dataset_handle, [], name=nothing, container=nothing, shared_name=nothing, output_types=nothing, output_shapes=nothing) + tf.add_node(res[1], node) + return res[1] end function experimental_materialized_index_dataset_handle(; name=nothing, container=nothing, shared_name=nothing, output_types=nothing, output_shapes=nothing) if tf.eager_mode @@ -12876,7 +13698,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function multi_device_iterator_get_next_from_shard_graph(multi_device_iterator_, shard_num_, incarnation_id_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function multi_device_iterator_get_next_from_shard_graph(multi_device_iterator_, shard_num_, incarnation_id_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "MultiDeviceIteratorGetNextFromShard") do desc = tf.NodeDescription("MultiDeviceIteratorGetNextFromShard") @@ -12906,7 +13728,10 @@ begin if output_shapes !== nothing desc["output_shapes"] = map(Base.identity, output_shapes) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(multi_device_iterator_get_next_from_shard, [multi_device_iterator_, shard_num_, incarnation_id_], name=nothing, output_types=nothing, output_shapes=nothing) + tf.add_node(res[1], node) + return res[1] end function multi_device_iterator_get_next_from_shard(multi_device_iterator_, shard_num_, incarnation_id_; name=nothing, output_types=nothing, output_shapes=nothing) if tf.eager_mode @@ -12924,7 +13749,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function random_uniform_int_graph(shape_, minval_, maxval_; name=nothing, seed=nothing, seed2=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function random_uniform_int_graph(shape_, minval_, maxval_; name=nothing, seed=nothing, seed2=nothing) local desc tf.with_op_name(name, "RandomUniformInt") do desc = tf.NodeDescription("RandomUniformInt") @@ -12959,7 +13784,10 @@ begin desc["T"] = tf.data_type(shape_) desc["Tout"] = tf.data_type(minval_) desc["Tout"] = tf.data_type(maxval_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(random_uniform_int, [shape_, minval_, maxval_], name=nothing, seed=nothing, seed2=nothing) + tf.add_node(res[1], node) + return res[1] end function random_uniform_int(shape_, minval_, maxval_; name=nothing, seed=nothing, seed2=nothing) if tf.eager_mode @@ -12977,7 +13805,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function sparse_softmax_cross_entropy_with_logits_graph(features_, labels_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_softmax_cross_entropy_with_logits_graph(features_, labels_; name=nothing) local desc tf.with_op_name(name, "SparseSoftmaxCrossEntropyWithLogits") do desc = tf.NodeDescription("SparseSoftmaxCrossEntropyWithLogits") @@ -13001,7 +13829,10 @@ begin tf.add_input(desc, labels_) desc["T"] = tf.data_type(features_) desc["Tlabels"] = tf.data_type(labels_) - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(sparse_softmax_cross_entropy_with_logits, [features_, labels_], name=nothing) + tf.add_node(res[1], node) + return res end function sparse_softmax_cross_entropy_with_logits(features_, labels_; name=nothing) if tf.eager_mode @@ -13019,7 +13850,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tensor_array_read_v2_graph(handle_, index_, flow_in_; name=nothing, dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_read_v2_graph(handle_, index_, flow_in_; name=nothing, dtype=nothing) local desc tf.with_op_name(name, "TensorArrayReadV2") do desc = tf.NodeDescription("TensorArrayReadV2") @@ -13043,7 +13874,10 @@ begin if dtype !== nothing desc["dtype"] = Base.identity(dtype) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(tensor_array_read_v2, [handle_, index_, flow_in_], name=nothing, dtype=nothing) + tf.add_node(res[1], node) + return res[1] end function tensor_array_read_v2(handle_, index_, flow_in_; name=nothing, dtype=nothing) if tf.eager_mode @@ -13061,7 +13895,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function reader_read_up_to_graph(reader_handle_, queue_handle_, num_records_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function reader_read_up_to_graph(reader_handle_, queue_handle_, num_records_; name=nothing) local desc tf.with_op_name(name, "ReaderReadUpTo") do desc = tf.NodeDescription("ReaderReadUpTo") @@ -13084,7 +13918,10 @@ begin tf.add_input(desc, reader_handle_) tf.add_input(desc, queue_handle_) tf.add_input(desc, num_records_) - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(reader_read_up_to, [reader_handle_, queue_handle_, num_records_], name=nothing) + tf.add_node(res[1], node) + return res end function reader_read_up_to(reader_handle_, queue_handle_, num_records_; name=nothing) if tf.eager_mode @@ -13102,7 +13939,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function encode_proto_graph(sizes_, values_; name=nothing, field_names=nothing, message_type=nothing, descriptor_source=nothing, Tinput_types=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function encode_proto_graph(sizes_, values_; name=nothing, field_names=nothing, message_type=nothing, descriptor_source=nothing, Tinput_types=nothing) local desc tf.with_op_name(name, "EncodeProto") do desc = tf.NodeDescription("EncodeProto") @@ -13141,7 +13978,10 @@ begin if Tinput_types !== nothing desc["Tinput_types"] = map(Base.identity, Tinput_types) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(encode_proto, [sizes_, values_], name=nothing, field_names=nothing, message_type=nothing, descriptor_source=nothing, Tinput_types=nothing) + tf.add_node(res[1], node) + return res[1] end function encode_proto(sizes_, values_; name=nothing, field_names=nothing, message_type=nothing, descriptor_source=nothing, Tinput_types=nothing) if tf.eager_mode @@ -13159,7 +13999,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function strided_slice_grad_graph(shape_, begin_, end_, strides_, dy_; name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function strided_slice_grad_graph(shape_, begin_, end_, strides_, dy_; name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing) local desc tf.with_op_name(name, "StridedSliceGrad") do desc = tf.NodeDescription("StridedSliceGrad") @@ -13259,7 +14099,10 @@ begin desc["Index"] = tf.data_type(end_) desc["Index"] = tf.data_type(strides_) desc["T"] = tf.data_type(dy_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(strided_slice_grad, [shape_, begin_, end_, strides_, dy_], name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing) + tf.add_node(res[1], node) + return res[1] end function strided_slice_grad(shape_, begin_, end_, strides_, dy_; name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing) if tf.eager_mode @@ -13277,7 +14120,7 @@ end Replacement node for NcclReduce. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function _nccl_reduce_send_graph(input_; name=nothing, reduction=nothing, num_devices=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _nccl_reduce_send_graph(input_; name=nothing, reduction=nothing, num_devices=nothing, shared_name=nothing) local desc tf.with_op_name(name, "_NcclReduceSend") do desc = tf.NodeDescription("_NcclReduceSend") @@ -13309,7 +14152,10 @@ begin desc["shared_name"] = Base.String(shared_name) end desc["T"] = tf.data_type(input_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(_nccl_reduce_send, [input_], name=nothing, reduction=nothing, num_devices=nothing, shared_name=nothing) + tf.add_node(res[1], node) + return res[1] end function _nccl_reduce_send(input_; name=nothing, reduction=nothing, num_devices=nothing, shared_name=nothing) if tf.eager_mode @@ -13327,7 +14173,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function padded_batch_dataset_graph(input_dataset_, batch_size_, padded_shapes_, padding_values_; name=nothing, Toutput_types=nothing, output_shapes=nothing, N=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function padded_batch_dataset_graph(input_dataset_, batch_size_, padded_shapes_, padding_values_; name=nothing, Toutput_types=nothing, output_shapes=nothing, N=nothing) local desc tf.with_op_name(name, "PaddedBatchDataset") do desc = tf.NodeDescription("PaddedBatchDataset") @@ -13366,7 +14212,10 @@ begin if N !== nothing desc["N"] = Base.Int(N) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(padded_batch_dataset, [input_dataset_, batch_size_, padded_shapes_, padding_values_], name=nothing, Toutput_types=nothing, output_shapes=nothing, N=nothing) + tf.add_node(res[1], node) + return res[1] end function padded_batch_dataset(input_dataset_, batch_size_, padded_shapes_, padding_values_; name=nothing, Toutput_types=nothing, output_shapes=nothing, N=nothing) if tf.eager_mode @@ -13384,7 +14233,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function data_format_vec_permute_graph(x_; name=nothing, src_format=nothing, dst_format=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function data_format_vec_permute_graph(x_; name=nothing, src_format=nothing, dst_format=nothing) local desc tf.with_op_name(name, "DataFormatVecPermute") do desc = tf.NodeDescription("DataFormatVecPermute") @@ -13410,7 +14259,10 @@ begin desc["dst_format"] = Base.String(dst_format) end desc["T"] = tf.data_type(x_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(data_format_vec_permute, [x_], name=nothing, src_format=nothing, dst_format=nothing) + tf.add_node(res[1], node) + return res[1] end function data_format_vec_permute(x_; name=nothing, src_format=nothing, dst_format=nothing) if tf.eager_mode @@ -13428,7 +14280,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function string_format_graph(inputs_; name=nothing, T=nothing, template=nothing, placeholder=nothing, summarize=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function string_format_graph(inputs_; name=nothing, T=nothing, template=nothing, placeholder=nothing, summarize=nothing) local desc tf.with_op_name(name, "StringFormat") do desc = tf.NodeDescription("StringFormat") @@ -13464,7 +14316,10 @@ begin if summarize !== nothing desc["summarize"] = Base.Int(summarize) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(string_format, [inputs_], name=nothing, T=nothing, template=nothing, placeholder=nothing, summarize=nothing) + tf.add_node(res[1], node) + return res[1] end function string_format(inputs_; name=nothing, T=nothing, template=nothing, placeholder=nothing, summarize=nothing) if tf.eager_mode @@ -13482,7 +14337,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function as_string_graph(input_; name=nothing, precision=nothing, scientific=nothing, shortest=nothing, width=nothing, fill=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function as_string_graph(input_; name=nothing, precision=nothing, scientific=nothing, shortest=nothing, width=nothing, fill=nothing) local desc tf.with_op_name(name, "AsString") do desc = tf.NodeDescription("AsString") @@ -13526,7 +14381,10 @@ begin desc["fill"] = Base.String(fill) end desc["T"] = tf.data_type(input_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(as_string, [input_], name=nothing, precision=nothing, scientific=nothing, shortest=nothing, width=nothing, fill=nothing) + tf.add_node(res[1], node) + return res[1] end function as_string(input_; name=nothing, precision=nothing, scientific=nothing, shortest=nothing, width=nothing, fill=nothing) if tf.eager_mode @@ -13544,7 +14402,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function queue_enqueue_many_graph(handle_, components_; name=nothing, Tcomponents=nothing, timeout_ms=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function queue_enqueue_many_graph(handle_, components_; name=nothing, Tcomponents=nothing, timeout_ms=nothing) local desc tf.with_op_name(name, "QueueEnqueueMany") do desc = tf.NodeDescription("QueueEnqueueMany") @@ -13571,7 +14429,10 @@ begin if timeout_ms !== nothing desc["timeout_ms"] = Base.Int(timeout_ms) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(queue_enqueue_many, [handle_, components_], name=nothing, Tcomponents=nothing, timeout_ms=nothing) + tf.add_node(res[1], node) + return res[1] end function queue_enqueue_many(handle_, components_; name=nothing, Tcomponents=nothing, timeout_ms=nothing) if tf.eager_mode @@ -13589,7 +14450,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function fake_param_graph(; name=nothing, dtype=nothing, shape=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fake_param_graph(; name=nothing, dtype=nothing, shape=nothing) local desc tf.with_op_name(name, "FakeParam") do desc = tf.NodeDescription("FakeParam") @@ -13610,7 +14471,10 @@ begin if shape !== nothing desc["shape"] = Base.identity(shape) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(fake_param, [], name=nothing, dtype=nothing, shape=nothing) + tf.add_node(res[1], node) + return res[1] end function fake_param(; name=nothing, dtype=nothing, shape=nothing) if tf.eager_mode @@ -13628,7 +14492,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function apply_adagrad_graph(var_, accum_, lr_, grad_; name=nothing, use_locking=nothing, update_slots=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function apply_adagrad_graph(var_, accum_, lr_, grad_; name=nothing, use_locking=nothing, update_slots=nothing) local desc tf.with_op_name(name, "ApplyAdagrad") do desc = tf.NodeDescription("ApplyAdagrad") @@ -13666,7 +14530,10 @@ begin desc["T"] = tf.data_type(accum_) desc["T"] = tf.data_type(lr_) desc["T"] = tf.data_type(grad_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(apply_adagrad, [var_, accum_, lr_, grad_], name=nothing, use_locking=nothing, update_slots=nothing) + tf.add_node(res[1], node) + return res[1] end function apply_adagrad(var_, accum_, lr_, grad_; name=nothing, use_locking=nothing, update_slots=nothing) if tf.eager_mode @@ -13684,7 +14551,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function experimental_iterator_get_device_graph(resource_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_iterator_get_device_graph(resource_; name=nothing) local desc tf.with_op_name(name, "ExperimentalIteratorGetDevice") do desc = tf.NodeDescription("ExperimentalIteratorGetDevice") @@ -13696,7 +14563,10 @@ begin function experimental_iterator_get_device_eager(resource_; name=nothing) desc = tf.EagerOp("ExperimentalIteratorGetDevice") tf.add_input(desc, resource_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(experimental_iterator_get_device, [resource_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function experimental_iterator_get_device(resource_; name=nothing) if tf.eager_mode @@ -13714,7 +14584,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function adjust_contrast_graph(images_, contrast_factor_, min_value_, max_value_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function adjust_contrast_graph(images_, contrast_factor_, min_value_, max_value_; name=nothing) local desc tf.with_op_name(name, "AdjustContrast") do desc = tf.NodeDescription("AdjustContrast") @@ -13737,7 +14607,10 @@ begin tf.add_input(desc, min_value_) tf.add_input(desc, max_value_) desc["T"] = tf.data_type(images_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(adjust_contrast, [images_, contrast_factor_, min_value_, max_value_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function adjust_contrast(images_, contrast_factor_, min_value_, max_value_; name=nothing) if tf.eager_mode @@ -13755,7 +14628,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function extract_image_patches_graph(images_; name=nothing, ksizes=nothing, strides=nothing, rates=nothing, padding=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function extract_image_patches_graph(images_; name=nothing, ksizes=nothing, strides=nothing, rates=nothing, padding=nothing) local desc tf.with_op_name(name, "ExtractImagePatches") do desc = tf.NodeDescription("ExtractImagePatches") @@ -13793,7 +14666,10 @@ begin desc["padding"] = Base.String(padding) end desc["T"] = tf.data_type(images_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(extract_image_patches, [images_], name=nothing, ksizes=nothing, strides=nothing, rates=nothing, padding=nothing) + tf.add_node(res[1], node) + return res[1] end function extract_image_patches(images_; name=nothing, ksizes=nothing, strides=nothing, rates=nothing, padding=nothing) if tf.eager_mode @@ -13811,7 +14687,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function scale_and_translate_graph(images_, size_, scale_, translation_; name=nothing, kernel_type=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function scale_and_translate_graph(images_, size_, scale_, translation_; name=nothing, kernel_type=nothing) local desc tf.with_op_name(name, "ScaleAndTranslate") do desc = tf.NodeDescription("ScaleAndTranslate") @@ -13840,7 +14716,10 @@ begin desc["kernel_type"] = Base.String(kernel_type) end desc["T"] = tf.data_type(images_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(scale_and_translate, [images_, size_, scale_, translation_], name=nothing, kernel_type=nothing) + tf.add_node(res[1], node) + return res[1] end function scale_and_translate(images_, size_, scale_, translation_; name=nothing, kernel_type=nothing) if tf.eager_mode @@ -13858,7 +14737,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function optional_none_graph(; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function optional_none_graph(; name=nothing) local desc tf.with_op_name(name, "OptionalNone") do desc @@ -13868,7 +14747,10 @@ begin end function optional_none_eager(; name=nothing) desc = tf.EagerOp("OptionalNone") - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(optional_none, [], name=nothing) + tf.add_node(res[1], node) + return res[1] end function optional_none(; name=nothing) if tf.eager_mode @@ -13886,7 +14768,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function variable_v2_graph(; name=nothing, shape=nothing, dtype=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function variable_v2_graph(; name=nothing, shape=nothing, dtype=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "VariableV2") do desc = tf.NodeDescription("VariableV2") @@ -13919,7 +14801,10 @@ begin if shared_name !== nothing desc["shared_name"] = Base.String(shared_name) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(variable_v2, [], name=nothing, shape=nothing, dtype=nothing, container=nothing, shared_name=nothing) + tf.add_node(res[1], node) + return res[1] end function variable_v2(; name=nothing, shape=nothing, dtype=nothing, container=nothing, shared_name=nothing) if tf.eager_mode @@ -13937,7 +14822,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function elu_graph(features_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function elu_graph(features_; name=nothing) local desc tf.with_op_name(name, "Elu") do desc = tf.NodeDescription("Elu") @@ -13951,7 +14836,10 @@ begin desc = tf.EagerOp("Elu") tf.add_input(desc, features_) desc["T"] = tf.data_type(features_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(elu, [features_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function elu(features_; name=nothing) if tf.eager_mode @@ -13969,7 +14857,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function scatter_update_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function scatter_update_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ScatterUpdate") do desc = tf.NodeDescription("ScatterUpdate") @@ -13999,7 +14887,10 @@ begin desc["T"] = tf.data_type(ref_) desc["Tindices"] = tf.data_type(indices_) desc["T"] = tf.data_type(updates_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(scatter_update, [ref_, indices_, updates_], name=nothing, use_locking=nothing) + tf.add_node(res[1], node) + return res[1] end function scatter_update(ref_, indices_, updates_; name=nothing, use_locking=nothing) if tf.eager_mode @@ -14017,7 +14908,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function floor_mod_graph(x_, y_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function floor_mod_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "FloorMod") do desc = tf.NodeDescription("FloorMod") @@ -14035,7 +14926,10 @@ begin tf.add_input(desc, y_) desc["T"] = tf.data_type(x_) desc["T"] = tf.data_type(y_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(floor_mod, [x_, y_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function floor_mod(x_, y_; name=nothing) if tf.eager_mode @@ -14053,7 +14947,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function experimental_ignore_errors_dataset_graph(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_ignore_errors_dataset_graph(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "ExperimentalIgnoreErrorsDataset") do desc = tf.NodeDescription("ExperimentalIgnoreErrorsDataset") @@ -14077,7 +14971,10 @@ begin if output_shapes !== nothing desc["output_shapes"] = map(Base.identity, output_shapes) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(experimental_ignore_errors_dataset, [input_dataset_], name=nothing, output_types=nothing, output_shapes=nothing) + tf.add_node(res[1], node) + return res[1] end function experimental_ignore_errors_dataset(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) if tf.eager_mode @@ -14095,7 +14992,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function experimental_set_stats_aggregator_dataset_graph(input_dataset_, stats_aggregator_, tag_, counter_prefix_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_set_stats_aggregator_dataset_graph(input_dataset_, stats_aggregator_, tag_, counter_prefix_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "ExperimentalSetStatsAggregatorDataset") do desc = tf.NodeDescription("ExperimentalSetStatsAggregatorDataset") @@ -14128,7 +15025,10 @@ begin if output_shapes !== nothing desc["output_shapes"] = map(Base.identity, output_shapes) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(experimental_set_stats_aggregator_dataset, [input_dataset_, stats_aggregator_, tag_, counter_prefix_], name=nothing, output_types=nothing, output_shapes=nothing) + tf.add_node(res[1], node) + return res[1] end function experimental_set_stats_aggregator_dataset(input_dataset_, stats_aggregator_, tag_, counter_prefix_; name=nothing, output_types=nothing, output_shapes=nothing) if tf.eager_mode @@ -14146,7 +15046,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function compute_accidental_hits_graph(true_classes_, sampled_candidates_; name=nothing, num_true=nothing, seed=nothing, seed2=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function compute_accidental_hits_graph(true_classes_, sampled_candidates_; name=nothing, num_true=nothing, seed=nothing, seed2=nothing) local desc tf.with_op_name(name, "ComputeAccidentalHits") do desc = tf.NodeDescription("ComputeAccidentalHits") @@ -14184,7 +15084,10 @@ begin if seed2 !== nothing desc["seed2"] = Base.Int(seed2) end - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(compute_accidental_hits, [true_classes_, sampled_candidates_], name=nothing, num_true=nothing, seed=nothing, seed2=nothing) + tf.add_node(res[1], node) + return res end function compute_accidental_hits(true_classes_, sampled_candidates_; name=nothing, num_true=nothing, seed=nothing, seed2=nothing) if tf.eager_mode @@ -14202,7 +15105,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function string_to_number_graph(string_tensor_; name=nothing, out_type=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function string_to_number_graph(string_tensor_; name=nothing, out_type=nothing) local desc tf.with_op_name(name, "StringToNumber") do desc = tf.NodeDescription("StringToNumber") @@ -14220,7 +15123,10 @@ begin if out_type !== nothing desc["out_type"] = Base.identity(out_type) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(string_to_number, [string_tensor_], name=nothing, out_type=nothing) + tf.add_node(res[1], node) + return res[1] end function string_to_number(string_tensor_; name=nothing, out_type=nothing) if tf.eager_mode @@ -14238,7 +15144,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function snapshot_graph(input_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function snapshot_graph(input_; name=nothing) local desc tf.with_op_name(name, "Snapshot") do desc = tf.NodeDescription("Snapshot") @@ -14252,7 +15158,10 @@ begin desc = tf.EagerOp("Snapshot") tf.add_input(desc, input_) desc["T"] = tf.data_type(input_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(snapshot, [input_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function snapshot(input_; name=nothing) if tf.eager_mode @@ -14270,7 +15179,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function deserialize_iterator_graph(resource_handle_, serialized_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function deserialize_iterator_graph(resource_handle_, serialized_; name=nothing) local desc tf.with_op_name(name, "DeserializeIterator") do desc = tf.NodeDescription("DeserializeIterator") @@ -14285,7 +15194,10 @@ begin desc = tf.EagerOp("DeserializeIterator") tf.add_input(desc, resource_handle_) tf.add_input(desc, serialized_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(deserialize_iterator, [resource_handle_, serialized_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function deserialize_iterator(resource_handle_, serialized_; name=nothing) if tf.eager_mode @@ -14303,7 +15215,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function atan_graph(x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function atan_graph(x_; name=nothing) local desc tf.with_op_name(name, "Atan") do desc = tf.NodeDescription("Atan") @@ -14317,7 +15229,10 @@ begin desc = tf.EagerOp("Atan") tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(atan, [x_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function atan(x_; name=nothing) if tf.eager_mode @@ -14335,7 +15250,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function mat_mul_graph(a_, b_; name=nothing, transpose_a=nothing, transpose_b=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function mat_mul_graph(a_, b_; name=nothing, transpose_a=nothing, transpose_b=nothing) local desc tf.with_op_name(name, "MatMul") do desc = tf.NodeDescription("MatMul") @@ -14365,7 +15280,10 @@ begin end desc["T"] = tf.data_type(a_) desc["T"] = tf.data_type(b_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(mat_mul, [a_, b_], name=nothing, transpose_a=nothing, transpose_b=nothing) + tf.add_node(res[1], node) + return res[1] end function mat_mul(a_, b_; name=nothing, transpose_a=nothing, transpose_b=nothing) if tf.eager_mode @@ -14383,7 +15301,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function erfc_graph(x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function erfc_graph(x_; name=nothing) local desc tf.with_op_name(name, "Erfc") do desc = tf.NodeDescription("Erfc") @@ -14397,7 +15315,10 @@ begin desc = tf.EagerOp("Erfc") tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(erfc, [x_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function erfc(x_; name=nothing) if tf.eager_mode @@ -14415,7 +15336,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function sigmoid_grad_graph(y_, dy_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sigmoid_grad_graph(y_, dy_; name=nothing) local desc tf.with_op_name(name, "SigmoidGrad") do desc = tf.NodeDescription("SigmoidGrad") @@ -14433,7 +15354,10 @@ begin tf.add_input(desc, dy_) desc["T"] = tf.data_type(y_) desc["T"] = tf.data_type(dy_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(sigmoid_grad, [y_, dy_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function sigmoid_grad(y_, dy_; name=nothing) if tf.eager_mode @@ -14451,7 +15375,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function fixed_length_record_reader_v2_graph(; name=nothing, header_bytes=nothing, record_bytes=nothing, footer_bytes=nothing, hop_bytes=nothing, container=nothing, shared_name=nothing, encoding=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fixed_length_record_reader_v2_graph(; name=nothing, header_bytes=nothing, record_bytes=nothing, footer_bytes=nothing, hop_bytes=nothing, container=nothing, shared_name=nothing, encoding=nothing) local desc tf.with_op_name(name, "FixedLengthRecordReaderV2") do desc = tf.NodeDescription("FixedLengthRecordReaderV2") @@ -14502,7 +15426,10 @@ begin if encoding !== nothing desc["encoding"] = Base.String(encoding) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(fixed_length_record_reader_v2, [], name=nothing, header_bytes=nothing, record_bytes=nothing, footer_bytes=nothing, hop_bytes=nothing, container=nothing, shared_name=nothing, encoding=nothing) + tf.add_node(res[1], node) + return res[1] end function fixed_length_record_reader_v2(; name=nothing, header_bytes=nothing, record_bytes=nothing, footer_bytes=nothing, hop_bytes=nothing, container=nothing, shared_name=nothing, encoding=nothing) if tf.eager_mode @@ -14520,7 +15447,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function non_max_suppression_v3_graph(boxes_, scores_, max_output_size_, iou_threshold_, score_threshold_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function non_max_suppression_v3_graph(boxes_, scores_, max_output_size_, iou_threshold_, score_threshold_; name=nothing) local desc tf.with_op_name(name, "NonMaxSuppressionV3") do desc = tf.NodeDescription("NonMaxSuppressionV3") @@ -14547,7 +15474,10 @@ begin tf.add_input(desc, score_threshold_) desc["T"] = tf.data_type(boxes_) desc["T"] = tf.data_type(scores_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(non_max_suppression_v3, [boxes_, scores_, max_output_size_, iou_threshold_, score_threshold_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function non_max_suppression_v3(boxes_, scores_, max_output_size_, iou_threshold_, score_threshold_; name=nothing) if tf.eager_mode @@ -14565,7 +15495,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function dilation2d_backprop_input_graph(input_, filter_, out_backprop_; name=nothing, strides=nothing, rates=nothing, padding=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function dilation2d_backprop_input_graph(input_, filter_, out_backprop_; name=nothing, strides=nothing, rates=nothing, padding=nothing) local desc tf.with_op_name(name, "Dilation2DBackpropInput") do desc = tf.NodeDescription("Dilation2DBackpropInput") @@ -14605,7 +15535,10 @@ begin desc["T"] = tf.data_type(input_) desc["T"] = tf.data_type(filter_) desc["T"] = tf.data_type(out_backprop_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(dilation2d_backprop_input, [input_, filter_, out_backprop_], name=nothing, strides=nothing, rates=nothing, padding=nothing) + tf.add_node(res[1], node) + return res[1] end function dilation2d_backprop_input(input_, filter_, out_backprop_; name=nothing, strides=nothing, rates=nothing, padding=nothing) if tf.eager_mode @@ -14623,7 +15556,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function logical_or_graph(x_, y_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function logical_or_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "LogicalOr") do desc = tf.NodeDescription("LogicalOr") @@ -14638,7 +15571,10 @@ begin desc = tf.EagerOp("LogicalOr") tf.add_input(desc, x_) tf.add_input(desc, y_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(logical_or, [x_, y_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function logical_or(x_, y_; name=nothing) if tf.eager_mode @@ -14656,7 +15592,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function resource_apply_adadelta_graph(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_apply_adadelta_graph(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ResourceApplyAdadelta") do desc = tf.NodeDescription("ResourceApplyAdadelta") @@ -14697,7 +15633,10 @@ begin desc["T"] = tf.data_type(rho_) desc["T"] = tf.data_type(epsilon_) desc["T"] = tf.data_type(grad_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(resource_apply_adadelta, [var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_], name=nothing, use_locking=nothing) + tf.add_node(res[1], node) + return res[1] end function resource_apply_adadelta(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_; name=nothing, use_locking=nothing) if tf.eager_mode @@ -14715,7 +15654,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function dense_to_sparse_set_operation_graph(set1_, set2_indices_, set2_values_, set2_shape_; name=nothing, set_operation=nothing, validate_indices=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function dense_to_sparse_set_operation_graph(set1_, set2_indices_, set2_values_, set2_shape_; name=nothing, set_operation=nothing, validate_indices=nothing) local desc tf.with_op_name(name, "DenseToSparseSetOperation") do desc = tf.NodeDescription("DenseToSparseSetOperation") @@ -14756,7 +15695,10 @@ begin end desc["T"] = tf.data_type(set1_) desc["T"] = tf.data_type(set2_values_) - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(dense_to_sparse_set_operation, [set1_, set2_indices_, set2_values_, set2_shape_], name=nothing, set_operation=nothing, validate_indices=nothing) + tf.add_node(res[1], node) + return res end function dense_to_sparse_set_operation(set1_, set2_indices_, set2_values_, set2_shape_; name=nothing, set_operation=nothing, validate_indices=nothing) if tf.eager_mode @@ -14774,7 +15716,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function reader_num_records_produced_graph(reader_handle_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function reader_num_records_produced_graph(reader_handle_; name=nothing) local desc tf.with_op_name(name, "ReaderNumRecordsProduced") do desc = tf.NodeDescription("ReaderNumRecordsProduced") @@ -14786,7 +15728,10 @@ begin function reader_num_records_produced_eager(reader_handle_; name=nothing) desc = tf.EagerOp("ReaderNumRecordsProduced") tf.add_input(desc, reader_handle_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(reader_num_records_produced, [reader_handle_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function reader_num_records_produced(reader_handle_; name=nothing) if tf.eager_mode @@ -14804,7 +15749,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function adjust_hue_graph(images_, delta_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function adjust_hue_graph(images_, delta_; name=nothing) local desc tf.with_op_name(name, "AdjustHue") do desc = tf.NodeDescription("AdjustHue") @@ -14821,7 +15766,10 @@ begin tf.add_input(desc, images_) tf.add_input(desc, delta_) desc["T"] = tf.data_type(images_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(adjust_hue, [images_, delta_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function adjust_hue(images_, delta_; name=nothing) if tf.eager_mode @@ -14839,7 +15787,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function boosted_trees_quantile_stream_resource_flush_graph(quantile_stream_resource_handle_, num_buckets_; name=nothing, generate_quantiles=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function boosted_trees_quantile_stream_resource_flush_graph(quantile_stream_resource_handle_, num_buckets_; name=nothing, generate_quantiles=nothing) local desc tf.with_op_name(name, "BoostedTreesQuantileStreamResourceFlush") do desc = tf.NodeDescription("BoostedTreesQuantileStreamResourceFlush") @@ -14860,7 +15808,10 @@ begin if generate_quantiles !== nothing desc["generate_quantiles"] = Base.Bool(generate_quantiles) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(boosted_trees_quantile_stream_resource_flush, [quantile_stream_resource_handle_, num_buckets_], name=nothing, generate_quantiles=nothing) + tf.add_node(res[1], node) + return res[1] end function boosted_trees_quantile_stream_resource_flush(quantile_stream_resource_handle_, num_buckets_; name=nothing, generate_quantiles=nothing) if tf.eager_mode @@ -14878,7 +15829,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function experimental_map_and_batch_dataset_graph(input_dataset_, other_arguments_, batch_size_, num_parallel_calls_, drop_remainder_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, preserve_cardinality=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_map_and_batch_dataset_graph(input_dataset_, other_arguments_, batch_size_, num_parallel_calls_, drop_remainder_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, preserve_cardinality=nothing) local desc tf.with_op_name(name, "ExperimentalMapAndBatchDataset") do desc = tf.NodeDescription("ExperimentalMapAndBatchDataset") @@ -14932,7 +15883,10 @@ begin if preserve_cardinality !== nothing desc["preserve_cardinality"] = Base.Bool(preserve_cardinality) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(experimental_map_and_batch_dataset, [input_dataset_, other_arguments_, batch_size_, num_parallel_calls_, drop_remainder_], name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, preserve_cardinality=nothing) + tf.add_node(res[1], node) + return res[1] end function experimental_map_and_batch_dataset(input_dataset_, other_arguments_, batch_size_, num_parallel_calls_, drop_remainder_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, preserve_cardinality=nothing) if tf.eager_mode @@ -14950,7 +15904,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function real_div_graph(x_, y_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function real_div_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "RealDiv") do desc = tf.NodeDescription("RealDiv") @@ -14968,7 +15922,10 @@ begin tf.add_input(desc, y_) desc["T"] = tf.data_type(x_) desc["T"] = tf.data_type(y_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(real_div, [x_, y_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function real_div(x_, y_; name=nothing) if tf.eager_mode @@ -14986,7 +15943,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function restore_slice_graph(file_pattern_, tensor_name_, shape_and_slice_; name=nothing, dt=nothing, preferred_shard=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function restore_slice_graph(file_pattern_, tensor_name_, shape_and_slice_; name=nothing, dt=nothing, preferred_shard=nothing) local desc tf.with_op_name(name, "RestoreSlice") do desc = tf.NodeDescription("RestoreSlice") @@ -15016,7 +15973,10 @@ begin if preferred_shard !== nothing desc["preferred_shard"] = Base.Int(preferred_shard) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(restore_slice, [file_pattern_, tensor_name_, shape_and_slice_], name=nothing, dt=nothing, preferred_shard=nothing) + tf.add_node(res[1], node) + return res[1] end function restore_slice(file_pattern_, tensor_name_, shape_and_slice_; name=nothing, dt=nothing, preferred_shard=nothing) if tf.eager_mode @@ -15034,7 +15994,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function stack_pop_v2_graph(handle_; name=nothing, elem_type=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function stack_pop_v2_graph(handle_; name=nothing, elem_type=nothing) local desc tf.with_op_name(name, "StackPopV2") do desc = tf.NodeDescription("StackPopV2") @@ -15052,7 +16012,10 @@ begin if elem_type !== nothing desc["elem_type"] = Base.identity(elem_type) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(stack_pop_v2, [handle_], name=nothing, elem_type=nothing) + tf.add_node(res[1], node) + return res[1] end function stack_pop_v2(handle_; name=nothing, elem_type=nothing) if tf.eager_mode @@ -15070,7 +16033,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function reverse_graph(tensor_, dims_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function reverse_graph(tensor_, dims_; name=nothing) local desc tf.with_op_name(name, "Reverse") do desc = tf.NodeDescription("Reverse") @@ -15087,7 +16050,10 @@ begin tf.add_input(desc, tensor_) tf.add_input(desc, dims_) desc["T"] = tf.data_type(tensor_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(reverse, [tensor_, dims_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function reverse(tensor_, dims_; name=nothing) if tf.eager_mode @@ -15105,7 +16071,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function decode_png_graph(contents_; name=nothing, channels=nothing, dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function decode_png_graph(contents_; name=nothing, channels=nothing, dtype=nothing) local desc tf.with_op_name(name, "DecodePng") do desc = tf.NodeDescription("DecodePng") @@ -15129,7 +16095,10 @@ begin if dtype !== nothing desc["dtype"] = Base.identity(dtype) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(decode_png, [contents_], name=nothing, channels=nothing, dtype=nothing) + tf.add_node(res[1], node) + return res[1] end function decode_png(contents_; name=nothing, channels=nothing, dtype=nothing) if tf.eager_mode @@ -15147,7 +16116,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function non_max_suppression_v2_graph(boxes_, scores_, max_output_size_, iou_threshold_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function non_max_suppression_v2_graph(boxes_, scores_, max_output_size_, iou_threshold_; name=nothing) local desc tf.with_op_name(name, "NonMaxSuppressionV2") do desc = tf.NodeDescription("NonMaxSuppressionV2") @@ -15171,7 +16140,10 @@ begin tf.add_input(desc, iou_threshold_) desc["T"] = tf.data_type(boxes_) desc["T"] = tf.data_type(scores_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(non_max_suppression_v2, [boxes_, scores_, max_output_size_, iou_threshold_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function non_max_suppression_v2(boxes_, scores_, max_output_size_, iou_threshold_; name=nothing) if tf.eager_mode @@ -15189,7 +16161,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function igamma_graph(a_, x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function igamma_graph(a_, x_; name=nothing) local desc tf.with_op_name(name, "Igamma") do desc = tf.NodeDescription("Igamma") @@ -15207,7 +16179,10 @@ begin tf.add_input(desc, x_) desc["T"] = tf.data_type(a_) desc["T"] = tf.data_type(x_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(igamma, [a_, x_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function igamma(a_, x_; name=nothing) if tf.eager_mode @@ -15225,7 +16200,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function digamma_graph(x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function digamma_graph(x_; name=nothing) local desc tf.with_op_name(name, "Digamma") do desc = tf.NodeDescription("Digamma") @@ -15239,7 +16214,10 @@ begin desc = tf.EagerOp("Digamma") tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(digamma, [x_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function digamma(x_; name=nothing) if tf.eager_mode @@ -15257,7 +16235,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function resource_apply_ada_max_graph(var_, m_, v_, beta1_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_apply_ada_max_graph(var_, m_, v_, beta1_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ResourceApplyAdaMax") do desc = tf.NodeDescription("ResourceApplyAdaMax") @@ -15306,7 +16284,10 @@ begin desc["T"] = tf.data_type(beta2_) desc["T"] = tf.data_type(epsilon_) desc["T"] = tf.data_type(grad_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(resource_apply_ada_max, [var_, m_, v_, beta1_power_, lr_, beta1_, beta2_, epsilon_, grad_], name=nothing, use_locking=nothing) + tf.add_node(res[1], node) + return res[1] end function resource_apply_ada_max(var_, m_, v_, beta1_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing) if tf.eager_mode @@ -15324,7 +16305,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function space_to_depth_graph(input_; name=nothing, block_size=nothing, data_format=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function space_to_depth_graph(input_; name=nothing, block_size=nothing, data_format=nothing) local desc tf.with_op_name(name, "SpaceToDepth") do desc = tf.NodeDescription("SpaceToDepth") @@ -15350,7 +16331,10 @@ begin desc["data_format"] = Base.String(data_format) end desc["T"] = tf.data_type(input_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(space_to_depth, [input_], name=nothing, block_size=nothing, data_format=nothing) + tf.add_node(res[1], node) + return res[1] end function space_to_depth(input_; name=nothing, block_size=nothing, data_format=nothing) if tf.eager_mode @@ -15368,7 +16352,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function sqrt_grad_graph(y_, dy_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sqrt_grad_graph(y_, dy_; name=nothing) local desc tf.with_op_name(name, "SqrtGrad") do desc = tf.NodeDescription("SqrtGrad") @@ -15386,7 +16370,10 @@ begin tf.add_input(desc, dy_) desc["T"] = tf.data_type(y_) desc["T"] = tf.data_type(dy_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(sqrt_grad, [y_, dy_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function sqrt_grad(y_, dy_; name=nothing) if tf.eager_mode @@ -15404,7 +16391,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function map_unstage_graph(key_, indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function map_unstage_graph(key_, indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "MapUnstage") do desc = tf.NodeDescription("MapUnstage") @@ -15449,7 +16436,10 @@ begin if shared_name !== nothing desc["shared_name"] = Base.String(shared_name) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(map_unstage, [key_, indices_], name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + tf.add_node(res[1], node) + return res[1] end function map_unstage(key_, indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) if tf.eager_mode @@ -15467,7 +16457,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function qr_graph(input_; name=nothing, full_matrices=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function qr_graph(input_; name=nothing, full_matrices=nothing) local desc tf.with_op_name(name, "Qr") do desc = tf.NodeDescription("Qr") @@ -15492,7 +16482,10 @@ begin desc["full_matrices"] = Base.Bool(full_matrices) end desc["T"] = tf.data_type(input_) - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(qr, [input_], name=nothing, full_matrices=nothing) + tf.add_node(res[1], node) + return res end function qr(input_; name=nothing, full_matrices=nothing) if tf.eager_mode @@ -15510,7 +16503,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function boosted_trees_calculate_best_gains_per_feature_graph(node_id_range_, stats_summary_list_, l1_, l2_, tree_complexity_, min_node_weight_; name=nothing, max_splits=nothing, num_features=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function boosted_trees_calculate_best_gains_per_feature_graph(node_id_range_, stats_summary_list_, l1_, l2_, tree_complexity_, min_node_weight_; name=nothing, max_splits=nothing, num_features=nothing) local desc tf.with_op_name(name, "BoostedTreesCalculateBestGainsPerFeature") do desc = tf.NodeDescription("BoostedTreesCalculateBestGainsPerFeature") @@ -15554,7 +16547,10 @@ begin if num_features !== nothing desc["num_features"] = Base.Int(num_features) end - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(boosted_trees_calculate_best_gains_per_feature, [node_id_range_, stats_summary_list_, l1_, l2_, tree_complexity_, min_node_weight_], name=nothing, max_splits=nothing, num_features=nothing) + tf.add_node(res[1], node) + return res end function boosted_trees_calculate_best_gains_per_feature(node_id_range_, stats_summary_list_, l1_, l2_, tree_complexity_, min_node_weight_; name=nothing, max_splits=nothing, num_features=nothing) if tf.eager_mode @@ -15572,7 +16568,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function unbatch_grad_graph(original_input_, batch_index_, grad_, id_; name=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function unbatch_grad_graph(original_input_, batch_index_, grad_, id_; name=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "UnbatchGrad") do desc = tf.NodeDescription("UnbatchGrad") @@ -15608,7 +16604,10 @@ begin end desc["T"] = tf.data_type(original_input_) desc["T"] = tf.data_type(grad_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(unbatch_grad, [original_input_, batch_index_, grad_, id_], name=nothing, container=nothing, shared_name=nothing) + tf.add_node(res[1], node) + return res[1] end function unbatch_grad(original_input_, batch_index_, grad_, id_; name=nothing, container=nothing, shared_name=nothing) if tf.eager_mode @@ -15626,7 +16625,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function log_softmax_graph(logits_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function log_softmax_graph(logits_; name=nothing) local desc tf.with_op_name(name, "LogSoftmax") do desc = tf.NodeDescription("LogSoftmax") @@ -15640,7 +16639,10 @@ begin desc = tf.EagerOp("LogSoftmax") tf.add_input(desc, logits_) desc["T"] = tf.data_type(logits_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(log_softmax, [logits_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function log_softmax(logits_; name=nothing) if tf.eager_mode @@ -15658,7 +16660,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function resource_count_up_to_graph(resource_; name=nothing, limit=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_count_up_to_graph(resource_; name=nothing, limit=nothing) local desc tf.with_op_name(name, "ResourceCountUpTo") do desc = tf.NodeDescription("ResourceCountUpTo") @@ -15676,7 +16678,10 @@ begin if limit !== nothing desc["limit"] = Base.Int(limit) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(resource_count_up_to, [resource_], name=nothing, limit=nothing) + tf.add_node(res[1], node) + return res[1] end function resource_count_up_to(resource_; name=nothing, limit=nothing) if tf.eager_mode @@ -15694,7 +16699,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function accumulate_nv2_graph(inputs_; name=nothing, N=nothing, shape=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function accumulate_nv2_graph(inputs_; name=nothing, N=nothing, shape=nothing) local desc tf.with_op_name(name, "AccumulateNV2") do desc = tf.NodeDescription("AccumulateNV2") @@ -15720,7 +16725,10 @@ begin desc["shape"] = Base.identity(shape) end desc["T"] = tf.data_type(inputs_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(accumulate_nv2, [inputs_], name=nothing, N=nothing, shape=nothing) + tf.add_node(res[1], node) + return res[1] end function accumulate_nv2(inputs_; name=nothing, N=nothing, shape=nothing) if tf.eager_mode @@ -15738,7 +16746,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function parallel_map_dataset_graph(input_dataset_, other_arguments_, num_parallel_calls_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing, sloppy=nothing, preserve_cardinality=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function parallel_map_dataset_graph(input_dataset_, other_arguments_, num_parallel_calls_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing, sloppy=nothing, preserve_cardinality=nothing) local desc tf.with_op_name(name, "ParallelMapDataset") do desc = tf.NodeDescription("ParallelMapDataset") @@ -15798,7 +16806,10 @@ begin if preserve_cardinality !== nothing desc["preserve_cardinality"] = Base.Bool(preserve_cardinality) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(parallel_map_dataset, [input_dataset_, other_arguments_, num_parallel_calls_], name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing, sloppy=nothing, preserve_cardinality=nothing) + tf.add_node(res[1], node) + return res[1] end function parallel_map_dataset(input_dataset_, other_arguments_, num_parallel_calls_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing, sloppy=nothing, preserve_cardinality=nothing) if tf.eager_mode @@ -15816,7 +16827,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function random_uniform_graph(shape_; name=nothing, seed=nothing, seed2=nothing, dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function random_uniform_graph(shape_; name=nothing, seed=nothing, seed2=nothing, dtype=nothing) local desc tf.with_op_name(name, "RandomUniform") do desc = tf.NodeDescription("RandomUniform") @@ -15848,7 +16859,10 @@ begin desc["dtype"] = Base.identity(dtype) end desc["T"] = tf.data_type(shape_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(random_uniform, [shape_], name=nothing, seed=nothing, seed2=nothing, dtype=nothing) + tf.add_node(res[1], node) + return res[1] end function random_uniform(shape_; name=nothing, seed=nothing, seed2=nothing, dtype=nothing) if tf.eager_mode @@ -15866,7 +16880,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function unicode_transcode_graph(input_; name=nothing, input_encoding=nothing, output_encoding=nothing, errors=nothing, replacement_char=nothing, replace_control_characters=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function unicode_transcode_graph(input_; name=nothing, input_encoding=nothing, output_encoding=nothing, errors=nothing, replacement_char=nothing, replace_control_characters=nothing) local desc tf.with_op_name(name, "UnicodeTranscode") do desc = tf.NodeDescription("UnicodeTranscode") @@ -15908,7 +16922,10 @@ begin if replace_control_characters !== nothing desc["replace_control_characters"] = Base.Bool(replace_control_characters) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(unicode_transcode, [input_], name=nothing, input_encoding=nothing, output_encoding=nothing, errors=nothing, replacement_char=nothing, replace_control_characters=nothing) + tf.add_node(res[1], node) + return res[1] end function unicode_transcode(input_; name=nothing, input_encoding=nothing, output_encoding=nothing, errors=nothing, replacement_char=nothing, replace_control_characters=nothing) if tf.eager_mode @@ -15926,7 +16943,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function reader_reset_graph(reader_handle_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function reader_reset_graph(reader_handle_; name=nothing) local desc tf.with_op_name(name, "ReaderReset") do desc = tf.NodeDescription("ReaderReset") @@ -15938,7 +16955,10 @@ begin function reader_reset_eager(reader_handle_; name=nothing) desc = tf.EagerOp("ReaderReset") tf.add_input(desc, reader_handle_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(reader_reset, [reader_handle_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function reader_reset(reader_handle_; name=nothing) if tf.eager_mode @@ -15956,7 +16976,7 @@ end Replacement node for NcclBroadcast. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function _nccl_broadcast_send_graph(input_; name=nothing, num_devices=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _nccl_broadcast_send_graph(input_; name=nothing, num_devices=nothing, shared_name=nothing) local desc tf.with_op_name(name, "_NcclBroadcastSend") do desc = tf.NodeDescription("_NcclBroadcastSend") @@ -15982,7 +17002,10 @@ begin desc["shared_name"] = Base.String(shared_name) end desc["T"] = tf.data_type(input_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(_nccl_broadcast_send, [input_], name=nothing, num_devices=nothing, shared_name=nothing) + tf.add_node(res[1], node) + return res[1] end function _nccl_broadcast_send(input_; name=nothing, num_devices=nothing, shared_name=nothing) if tf.eager_mode @@ -16000,7 +17023,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function batch_matrix_determinant_graph(input_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_matrix_determinant_graph(input_; name=nothing) local desc tf.with_op_name(name, "BatchMatrixDeterminant") do desc = tf.NodeDescription("BatchMatrixDeterminant") @@ -16014,7 +17037,10 @@ begin desc = tf.EagerOp("BatchMatrixDeterminant") tf.add_input(desc, input_) desc["T"] = tf.data_type(input_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(batch_matrix_determinant, [input_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function batch_matrix_determinant(input_; name=nothing) if tf.eager_mode @@ -16032,7 +17058,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function less_equal_graph(x_, y_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function less_equal_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "LessEqual") do desc = tf.NodeDescription("LessEqual") @@ -16050,7 +17076,10 @@ begin tf.add_input(desc, y_) desc["T"] = tf.data_type(x_) desc["T"] = tf.data_type(y_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(less_equal, [x_, y_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function less_equal(x_, y_; name=nothing) if tf.eager_mode @@ -16068,7 +17097,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function apply_gradient_descent_graph(var_, alpha_, delta_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function apply_gradient_descent_graph(var_, alpha_, delta_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ApplyGradientDescent") do desc = tf.NodeDescription("ApplyGradientDescent") @@ -16096,7 +17125,10 @@ begin desc["T"] = tf.data_type(var_) desc["T"] = tf.data_type(alpha_) desc["T"] = tf.data_type(delta_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(apply_gradient_descent, [var_, alpha_, delta_], name=nothing, use_locking=nothing) + tf.add_node(res[1], node) + return res[1] end function apply_gradient_descent(var_, alpha_, delta_; name=nothing, use_locking=nothing) if tf.eager_mode @@ -16114,7 +17146,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function sparse_segment_sqrt_n_graph(data_, indices_, segment_ids_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_segment_sqrt_n_graph(data_, indices_, segment_ids_; name=nothing) local desc tf.with_op_name(name, "SparseSegmentSqrtN") do desc = tf.NodeDescription("SparseSegmentSqrtN") @@ -16137,7 +17169,10 @@ begin tf.add_input(desc, segment_ids_) desc["T"] = tf.data_type(data_) desc["Tidx"] = tf.data_type(indices_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(sparse_segment_sqrt_n, [data_, indices_, segment_ids_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function sparse_segment_sqrt_n(data_, indices_, segment_ids_; name=nothing) if tf.eager_mode @@ -16155,7 +17190,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function matrix_logarithm_graph(input_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function matrix_logarithm_graph(input_; name=nothing) local desc tf.with_op_name(name, "MatrixLogarithm") do desc = tf.NodeDescription("MatrixLogarithm") @@ -16169,7 +17204,10 @@ begin desc = tf.EagerOp("MatrixLogarithm") tf.add_input(desc, input_) desc["T"] = tf.data_type(input_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(matrix_logarithm, [input_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function matrix_logarithm(input_; name=nothing) if tf.eager_mode @@ -16187,7 +17225,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function scatter_mul_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function scatter_mul_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ScatterMul") do desc = tf.NodeDescription("ScatterMul") @@ -16217,7 +17255,10 @@ begin desc["T"] = tf.data_type(ref_) desc["Tindices"] = tf.data_type(indices_) desc["T"] = tf.data_type(updates_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(scatter_mul, [ref_, indices_, updates_], name=nothing, use_locking=nothing) + tf.add_node(res[1], node) + return res[1] end function scatter_mul(ref_, indices_, updates_; name=nothing, use_locking=nothing) if tf.eager_mode @@ -16235,7 +17276,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function decode_jpeg_graph(contents_; name=nothing, channels=nothing, ratio=nothing, fancy_upscaling=nothing, try_recover_truncated=nothing, acceptable_fraction=nothing, dct_method=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function decode_jpeg_graph(contents_; name=nothing, channels=nothing, ratio=nothing, fancy_upscaling=nothing, try_recover_truncated=nothing, acceptable_fraction=nothing, dct_method=nothing) local desc tf.with_op_name(name, "DecodeJpeg") do desc = tf.NodeDescription("DecodeJpeg") @@ -16283,7 +17324,10 @@ begin if dct_method !== nothing desc["dct_method"] = Base.String(dct_method) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(decode_jpeg, [contents_], name=nothing, channels=nothing, ratio=nothing, fancy_upscaling=nothing, try_recover_truncated=nothing, acceptable_fraction=nothing, dct_method=nothing) + tf.add_node(res[1], node) + return res[1] end function decode_jpeg(contents_; name=nothing, channels=nothing, ratio=nothing, fancy_upscaling=nothing, try_recover_truncated=nothing, acceptable_fraction=nothing, dct_method=nothing) if tf.eager_mode @@ -16301,7 +17345,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function random_shuffle_queue_v2_graph(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, min_after_dequeue=nothing, seed=nothing, seed2=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function random_shuffle_queue_v2_graph(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, min_after_dequeue=nothing, seed=nothing, seed2=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "RandomShuffleQueueV2") do desc = tf.NodeDescription("RandomShuffleQueueV2") @@ -16358,7 +17402,10 @@ begin if shared_name !== nothing desc["shared_name"] = Base.String(shared_name) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(random_shuffle_queue_v2, [], name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, min_after_dequeue=nothing, seed=nothing, seed2=nothing, container=nothing, shared_name=nothing) + tf.add_node(res[1], node) + return res[1] end function random_shuffle_queue_v2(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, min_after_dequeue=nothing, seed=nothing, seed2=nothing, container=nothing, shared_name=nothing) if tf.eager_mode @@ -16376,7 +17423,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function queue_enqueue_many_v2_graph(handle_, components_; name=nothing, Tcomponents=nothing, timeout_ms=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function queue_enqueue_many_v2_graph(handle_, components_; name=nothing, Tcomponents=nothing, timeout_ms=nothing) local desc tf.with_op_name(name, "QueueEnqueueManyV2") do desc = tf.NodeDescription("QueueEnqueueManyV2") @@ -16403,7 +17450,10 @@ begin if timeout_ms !== nothing desc["timeout_ms"] = Base.Int(timeout_ms) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(queue_enqueue_many_v2, [handle_, components_], name=nothing, Tcomponents=nothing, timeout_ms=nothing) + tf.add_node(res[1], node) + return res[1] end function queue_enqueue_many_v2(handle_, components_; name=nothing, Tcomponents=nothing, timeout_ms=nothing) if tf.eager_mode @@ -16421,7 +17471,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function resource_sparse_apply_centered_rms_prop_graph(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_sparse_apply_centered_rms_prop_graph(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ResourceSparseApplyCenteredRMSProp") do desc = tf.NodeDescription("ResourceSparseApplyCenteredRMSProp") @@ -16475,7 +17525,10 @@ begin desc["T"] = tf.data_type(epsilon_) desc["T"] = tf.data_type(grad_) desc["Tindices"] = tf.data_type(indices_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(resource_sparse_apply_centered_rms_prop, [var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_], name=nothing, use_locking=nothing) + tf.add_node(res[1], node) + return res[1] end function resource_sparse_apply_centered_rms_prop(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) if tf.eager_mode @@ -16493,7 +17546,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function interleave_dataset_graph(input_dataset_, other_arguments_, cycle_length_, block_length_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function interleave_dataset_graph(input_dataset_, other_arguments_, cycle_length_, block_length_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "InterleaveDataset") do desc = tf.NodeDescription("InterleaveDataset") @@ -16538,7 +17591,10 @@ begin if output_shapes !== nothing desc["output_shapes"] = map(Base.identity, output_shapes) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(interleave_dataset, [input_dataset_, other_arguments_, cycle_length_, block_length_], name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) + tf.add_node(res[1], node) + return res[1] end function interleave_dataset(input_dataset_, other_arguments_, cycle_length_, block_length_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) if tf.eager_mode @@ -16556,7 +17612,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function stack_pop_graph(handle_; name=nothing, elem_type=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function stack_pop_graph(handle_; name=nothing, elem_type=nothing) local desc tf.with_op_name(name, "StackPop") do desc = tf.NodeDescription("StackPop") @@ -16574,7 +17630,10 @@ begin if elem_type !== nothing desc["elem_type"] = Base.identity(elem_type) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(stack_pop, [handle_], name=nothing, elem_type=nothing) + tf.add_node(res[1], node) + return res[1] end function stack_pop(handle_; name=nothing, elem_type=nothing) if tf.eager_mode @@ -16592,7 +17651,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function max_pool_v2_graph(input_, ksize_, strides_; name=nothing, padding=nothing, data_format=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function max_pool_v2_graph(input_, ksize_, strides_; name=nothing, padding=nothing, data_format=nothing) local desc tf.with_op_name(name, "MaxPoolV2") do desc = tf.NodeDescription("MaxPoolV2") @@ -16624,7 +17683,10 @@ begin desc["data_format"] = Base.String(data_format) end desc["T"] = tf.data_type(input_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(max_pool_v2, [input_, ksize_, strides_], name=nothing, padding=nothing, data_format=nothing) + tf.add_node(res[1], node) + return res[1] end function max_pool_v2(input_, ksize_, strides_; name=nothing, padding=nothing, data_format=nothing) if tf.eager_mode @@ -16642,7 +17704,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function boosted_trees_deserialize_ensemble_graph(tree_ensemble_handle_, stamp_token_, tree_ensemble_serialized_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function boosted_trees_deserialize_ensemble_graph(tree_ensemble_handle_, stamp_token_, tree_ensemble_serialized_; name=nothing) local desc tf.with_op_name(name, "BoostedTreesDeserializeEnsemble") do desc = tf.NodeDescription("BoostedTreesDeserializeEnsemble") @@ -16660,7 +17722,10 @@ begin tf.add_input(desc, tree_ensemble_handle_) tf.add_input(desc, stamp_token_) tf.add_input(desc, tree_ensemble_serialized_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(boosted_trees_deserialize_ensemble, [tree_ensemble_handle_, stamp_token_, tree_ensemble_serialized_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function boosted_trees_deserialize_ensemble(tree_ensemble_handle_, stamp_token_, tree_ensemble_serialized_; name=nothing) if tf.eager_mode @@ -16678,7 +17743,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function load_and_remap_matrix_graph(ckpt_path_, old_tensor_name_, row_remapping_, col_remapping_, initializing_values_; name=nothing, num_rows=nothing, num_cols=nothing, max_rows_in_memory=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function load_and_remap_matrix_graph(ckpt_path_, old_tensor_name_, row_remapping_, col_remapping_, initializing_values_; name=nothing, num_rows=nothing, num_cols=nothing, max_rows_in_memory=nothing) local desc tf.with_op_name(name, "LoadAndRemapMatrix") do desc = tf.NodeDescription("LoadAndRemapMatrix") @@ -16720,7 +17785,10 @@ begin if max_rows_in_memory !== nothing desc["max_rows_in_memory"] = Base.Int(max_rows_in_memory) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(load_and_remap_matrix, [ckpt_path_, old_tensor_name_, row_remapping_, col_remapping_, initializing_values_], name=nothing, num_rows=nothing, num_cols=nothing, max_rows_in_memory=nothing) + tf.add_node(res[1], node) + return res[1] end function load_and_remap_matrix(ckpt_path_, old_tensor_name_, row_remapping_, col_remapping_, initializing_values_; name=nothing, num_rows=nothing, num_cols=nothing, max_rows_in_memory=nothing) if tf.eager_mode @@ -16738,7 +17806,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function sparse_apply_proximal_gradient_descent_graph(var_, alpha_, l1_, l2_, grad_, indices_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_apply_proximal_gradient_descent_graph(var_, alpha_, l1_, l2_, grad_, indices_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "SparseApplyProximalGradientDescent") do desc = tf.NodeDescription("SparseApplyProximalGradientDescent") @@ -16780,7 +17848,10 @@ begin desc["T"] = tf.data_type(l2_) desc["T"] = tf.data_type(grad_) desc["Tindices"] = tf.data_type(indices_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(sparse_apply_proximal_gradient_descent, [var_, alpha_, l1_, l2_, grad_, indices_], name=nothing, use_locking=nothing) + tf.add_node(res[1], node) + return res[1] end function sparse_apply_proximal_gradient_descent(var_, alpha_, l1_, l2_, grad_, indices_; name=nothing, use_locking=nothing) if tf.eager_mode @@ -16798,7 +17869,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function py_func_stateless_graph(input_; name=nothing, token=nothing, Tin=nothing, Tout=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function py_func_stateless_graph(input_; name=nothing, token=nothing, Tin=nothing, Tout=nothing) local desc tf.with_op_name(name, "PyFuncStateless") do desc = tf.NodeDescription("PyFuncStateless") @@ -16828,7 +17899,10 @@ begin if Tout !== nothing desc["Tout"] = map(Base.identity, Tout) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(py_func_stateless, [input_], name=nothing, token=nothing, Tin=nothing, Tout=nothing) + tf.add_node(res[1], node) + return res[1] end function py_func_stateless(input_; name=nothing, token=nothing, Tin=nothing, Tout=nothing) if tf.eager_mode @@ -16846,7 +17920,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function where_graph(input_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function where_graph(input_; name=nothing) local desc tf.with_op_name(name, "Where") do desc = tf.NodeDescription("Where") @@ -16860,7 +17934,10 @@ begin desc = tf.EagerOp("Where") tf.add_input(desc, input_) desc["T"] = tf.data_type(input_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(where, [input_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function where(input_; name=nothing) if tf.eager_mode @@ -16878,7 +17955,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function mfcc_graph(spectrogram_, sample_rate_; name=nothing, upper_frequency_limit=nothing, lower_frequency_limit=nothing, filterbank_channel_count=nothing, dct_coefficient_count=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function mfcc_graph(spectrogram_, sample_rate_; name=nothing, upper_frequency_limit=nothing, lower_frequency_limit=nothing, filterbank_channel_count=nothing, dct_coefficient_count=nothing) local desc tf.with_op_name(name, "Mfcc") do desc = tf.NodeDescription("Mfcc") @@ -16917,7 +17994,10 @@ begin if dct_coefficient_count !== nothing desc["dct_coefficient_count"] = Base.Int(dct_coefficient_count) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(mfcc, [spectrogram_, sample_rate_], name=nothing, upper_frequency_limit=nothing, lower_frequency_limit=nothing, filterbank_channel_count=nothing, dct_coefficient_count=nothing) + tf.add_node(res[1], node) + return res[1] end function mfcc(spectrogram_, sample_rate_; name=nothing, upper_frequency_limit=nothing, lower_frequency_limit=nothing, filterbank_channel_count=nothing, dct_coefficient_count=nothing) if tf.eager_mode @@ -16935,7 +18015,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function check_numerics_graph(tensor_; name=nothing, message=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function check_numerics_graph(tensor_; name=nothing, message=nothing) local desc tf.with_op_name(name, "CheckNumerics") do desc = tf.NodeDescription("CheckNumerics") @@ -16955,7 +18035,10 @@ begin desc["message"] = Base.String(message) end desc["T"] = tf.data_type(tensor_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(check_numerics, [tensor_], name=nothing, message=nothing) + tf.add_node(res[1], node) + return res[1] end function check_numerics(tensor_; name=nothing, message=nothing) if tf.eager_mode @@ -16973,7 +18056,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tpu_compilation_result_graph(; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tpu_compilation_result_graph(; name=nothing) local desc tf.with_op_name(name, "TPUCompilationResult") do desc @@ -16983,7 +18066,10 @@ begin end function tpu_compilation_result_eager(; name=nothing) desc = tf.EagerOp("TPUCompilationResult") - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(tpu_compilation_result, [], name=nothing) + tf.add_node(res[1], node) + return res[1] end function tpu_compilation_result(; name=nothing) if tf.eager_mode @@ -17001,7 +18087,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function retrieve_tpu_embedding_stochastic_gradient_descent_parameters_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function retrieve_tpu_embedding_stochastic_gradient_descent_parameters_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "RetrieveTPUEmbeddingStochasticGradientDescentParameters") do desc = tf.NodeDescription("RetrieveTPUEmbeddingStochasticGradientDescentParameters") @@ -17034,7 +18120,10 @@ begin if shard_id !== nothing desc["shard_id"] = Base.Int(shard_id) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(retrieve_tpu_embedding_stochastic_gradient_descent_parameters, [], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + tf.add_node(res[1], node) + return res[1] end function retrieve_tpu_embedding_stochastic_gradient_descent_parameters(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) if tf.eager_mode @@ -17052,7 +18141,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function sparse_segment_mean_grad_graph(grad_, indices_, segment_ids_, output_dim0_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_segment_mean_grad_graph(grad_, indices_, segment_ids_, output_dim0_; name=nothing) local desc tf.with_op_name(name, "SparseSegmentMeanGrad") do desc = tf.NodeDescription("SparseSegmentMeanGrad") @@ -17078,7 +18167,10 @@ begin tf.add_input(desc, output_dim0_) desc["T"] = tf.data_type(grad_) desc["Tidx"] = tf.data_type(indices_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(sparse_segment_mean_grad, [grad_, indices_, segment_ids_, output_dim0_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function sparse_segment_mean_grad(grad_, indices_, segment_ids_, output_dim0_; name=nothing) if tf.eager_mode @@ -17096,7 +18188,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function try_rpc_graph(address_, method_, request_; name=nothing, protocol=nothing, fail_fast=nothing, timeout_in_ms=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function try_rpc_graph(address_, method_, request_; name=nothing, protocol=nothing, fail_fast=nothing, timeout_in_ms=nothing) local desc tf.with_op_name(name, "TryRpc") do desc = tf.NodeDescription("TryRpc") @@ -17137,7 +18229,10 @@ begin if timeout_in_ms !== nothing desc["timeout_in_ms"] = Base.Int(timeout_in_ms) end - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(try_rpc, [address_, method_, request_], name=nothing, protocol=nothing, fail_fast=nothing, timeout_in_ms=nothing) + tf.add_node(res[1], node) + return res end function try_rpc(address_, method_, request_; name=nothing, protocol=nothing, fail_fast=nothing, timeout_in_ms=nothing) if tf.eager_mode @@ -17155,7 +18250,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function batch_matrix_triangular_solve_graph(matrix_, rhs_; name=nothing, lower=nothing, adjoint=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_matrix_triangular_solve_graph(matrix_, rhs_; name=nothing, lower=nothing, adjoint=nothing) local desc tf.with_op_name(name, "BatchMatrixTriangularSolve") do desc = tf.NodeDescription("BatchMatrixTriangularSolve") @@ -17185,7 +18280,10 @@ begin end desc["T"] = tf.data_type(matrix_) desc["T"] = tf.data_type(rhs_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(batch_matrix_triangular_solve, [matrix_, rhs_], name=nothing, lower=nothing, adjoint=nothing) + tf.add_node(res[1], node) + return res[1] end function batch_matrix_triangular_solve(matrix_, rhs_; name=nothing, lower=nothing, adjoint=nothing) if tf.eager_mode @@ -17203,7 +18301,7 @@ end A graph node which represents a return value of a function. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function _retval_graph(input_; name=nothing, index=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _retval_graph(input_; name=nothing, index=nothing) local desc tf.with_op_name(name, "_Retval") do desc = tf.NodeDescription("_Retval") @@ -17223,7 +18321,10 @@ begin desc["index"] = Base.Int(index) end desc["T"] = tf.data_type(input_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(_retval, [input_], name=nothing, index=nothing) + tf.add_node(res[1], node) + return res[1] end function _retval(input_; name=nothing, index=nothing) if tf.eager_mode @@ -17241,7 +18342,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function unique_with_counts_graph(x_; name=nothing, out_idx=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function unique_with_counts_graph(x_; name=nothing, out_idx=nothing) local desc tf.with_op_name(name, "UniqueWithCounts") do desc = tf.NodeDescription("UniqueWithCounts") @@ -17266,7 +18367,10 @@ begin desc["out_idx"] = Base.identity(out_idx) end desc["T"] = tf.data_type(x_) - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(unique_with_counts, [x_], name=nothing, out_idx=nothing) + tf.add_node(res[1], node) + return res end function unique_with_counts(x_; name=nothing, out_idx=nothing) if tf.eager_mode @@ -17284,7 +18388,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function add_graph(x_, y_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function add_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "Add") do desc = tf.NodeDescription("Add") @@ -17302,7 +18406,10 @@ begin tf.add_input(desc, y_) desc["T"] = tf.data_type(x_) desc["T"] = tf.data_type(y_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(add, [x_, y_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function add(x_, y_; name=nothing) if tf.eager_mode @@ -17320,7 +18427,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function experimental_scan_dataset_graph(input_dataset_, initial_state_, other_arguments_; name=nothing, f=nothing, Tstate=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, preserve_cardinality=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_scan_dataset_graph(input_dataset_, initial_state_, other_arguments_; name=nothing, f=nothing, Tstate=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, preserve_cardinality=nothing) local desc tf.with_op_name(name, "ExperimentalScanDataset") do desc = tf.NodeDescription("ExperimentalScanDataset") @@ -17374,7 +18481,10 @@ begin if preserve_cardinality !== nothing desc["preserve_cardinality"] = Base.Bool(preserve_cardinality) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(experimental_scan_dataset, [input_dataset_, initial_state_, other_arguments_], name=nothing, f=nothing, Tstate=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, preserve_cardinality=nothing) + tf.add_node(res[1], node) + return res[1] end function experimental_scan_dataset(input_dataset_, initial_state_, other_arguments_; name=nothing, f=nothing, Tstate=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, preserve_cardinality=nothing) if tf.eager_mode @@ -17392,7 +18502,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function assign_add_variable_op_graph(resource_, value_; name=nothing, dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function assign_add_variable_op_graph(resource_, value_; name=nothing, dtype=nothing) local desc tf.with_op_name(name, "AssignAddVariableOp") do desc = tf.NodeDescription("AssignAddVariableOp") @@ -17415,7 +18525,10 @@ begin desc["dtype"] = Base.identity(dtype) end desc["dtype"] = tf.data_type(value_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(assign_add_variable_op, [resource_, value_], name=nothing, dtype=nothing) + tf.add_node(res[1], node) + return res[1] end function assign_add_variable_op(resource_, value_; name=nothing, dtype=nothing) if tf.eager_mode @@ -17433,7 +18546,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function split_v_graph(value_, size_splits_, split_dim_; name=nothing, num_split=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function split_v_graph(value_, size_splits_, split_dim_; name=nothing, num_split=nothing) local desc tf.with_op_name(name, "SplitV") do desc = tf.NodeDescription("SplitV") @@ -17467,7 +18580,10 @@ begin end desc["T"] = tf.data_type(value_) desc["Tlen"] = tf.data_type(size_splits_) - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(split_v, [value_, size_splits_, split_dim_], name=nothing, num_split=nothing) + tf.add_node(res[1], node) + return res end function split_v(value_, size_splits_, split_dim_; name=nothing, num_split=nothing) if tf.eager_mode @@ -17485,7 +18601,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function assign_graph(ref_, value_; name=nothing, validate_shape=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function assign_graph(ref_, value_; name=nothing, validate_shape=nothing, use_locking=nothing) local desc tf.with_op_name(name, "Assign") do desc = tf.NodeDescription("Assign") @@ -17515,7 +18631,10 @@ begin end desc["T"] = tf.data_type(ref_) desc["T"] = tf.data_type(value_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(assign, [ref_, value_], name=nothing, validate_shape=nothing, use_locking=nothing) + tf.add_node(res[1], node) + return res[1] end function assign(ref_, value_; name=nothing, validate_shape=nothing, use_locking=nothing) if tf.eager_mode @@ -17533,7 +18652,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function max_pool_with_argmax_graph(input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function max_pool_with_argmax_graph(input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing) local desc tf.with_op_name(name, "MaxPoolWithArgmax") do desc = tf.NodeDescription("MaxPoolWithArgmax") @@ -17570,7 +18689,10 @@ begin desc["padding"] = Base.String(padding) end desc["T"] = tf.data_type(input_) - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(max_pool_with_argmax, [input_], name=nothing, ksize=nothing, strides=nothing, padding=nothing) + tf.add_node(res[1], node) + return res end function max_pool_with_argmax(input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing) if tf.eager_mode @@ -17588,7 +18710,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function quantized_relu_x_graph(features_, max_value_, min_features_, max_features_; name=nothing, out_type=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantized_relu_x_graph(features_, max_value_, min_features_, max_features_; name=nothing, out_type=nothing) local desc tf.with_op_name(name, "QuantizedReluX") do desc = tf.NodeDescription("QuantizedReluX") @@ -17622,7 +18744,10 @@ begin desc["out_type"] = Base.identity(out_type) end desc["Tinput"] = tf.data_type(features_) - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(quantized_relu_x, [features_, max_value_, min_features_, max_features_], name=nothing, out_type=nothing) + tf.add_node(res[1], node) + return res end function quantized_relu_x(features_, max_value_, min_features_, max_features_; name=nothing, out_type=nothing) if tf.eager_mode @@ -17640,7 +18765,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function random_shuffle_queue_graph(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, min_after_dequeue=nothing, seed=nothing, seed2=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function random_shuffle_queue_graph(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, min_after_dequeue=nothing, seed=nothing, seed2=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "RandomShuffleQueue") do desc = tf.NodeDescription("RandomShuffleQueue") @@ -17697,7 +18822,10 @@ begin if shared_name !== nothing desc["shared_name"] = Base.String(shared_name) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(random_shuffle_queue, [], name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, min_after_dequeue=nothing, seed=nothing, seed2=nothing, container=nothing, shared_name=nothing) + tf.add_node(res[1], node) + return res[1] end function random_shuffle_queue(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, min_after_dequeue=nothing, seed=nothing, seed2=nothing, container=nothing, shared_name=nothing) if tf.eager_mode @@ -17715,7 +18843,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function fft2d_graph(input_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fft2d_graph(input_; name=nothing) local desc tf.with_op_name(name, "FFT2D") do desc = tf.NodeDescription("FFT2D") @@ -17729,7 +18857,10 @@ begin desc = tf.EagerOp("FFT2D") tf.add_input(desc, input_) desc["Tcomplex"] = tf.data_type(input_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(fft2d, [input_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function fft2d(input_; name=nothing) if tf.eager_mode @@ -17747,7 +18878,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function experimental_thread_pool_dataset_graph(input_dataset_, thread_pool_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_thread_pool_dataset_graph(input_dataset_, thread_pool_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "ExperimentalThreadPoolDataset") do desc = tf.NodeDescription("ExperimentalThreadPoolDataset") @@ -17774,7 +18905,10 @@ begin if output_shapes !== nothing desc["output_shapes"] = map(Base.identity, output_shapes) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(experimental_thread_pool_dataset, [input_dataset_, thread_pool_], name=nothing, output_types=nothing, output_shapes=nothing) + tf.add_node(res[1], node) + return res[1] end function experimental_thread_pool_dataset(input_dataset_, thread_pool_; name=nothing, output_types=nothing, output_shapes=nothing) if tf.eager_mode @@ -17792,7 +18926,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function experimental_directed_interleave_dataset_graph(selector_input_dataset_, data_input_datasets_; name=nothing, output_types=nothing, output_shapes=nothing, N=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_directed_interleave_dataset_graph(selector_input_dataset_, data_input_datasets_; name=nothing, output_types=nothing, output_shapes=nothing, N=nothing) local desc tf.with_op_name(name, "ExperimentalDirectedInterleaveDataset") do desc = tf.NodeDescription("ExperimentalDirectedInterleaveDataset") @@ -17825,7 +18959,10 @@ begin if N !== nothing desc["N"] = Base.Int(N) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(experimental_directed_interleave_dataset, [selector_input_dataset_, data_input_datasets_], name=nothing, output_types=nothing, output_shapes=nothing, N=nothing) + tf.add_node(res[1], node) + return res[1] end function experimental_directed_interleave_dataset(selector_input_dataset_, data_input_datasets_; name=nothing, output_types=nothing, output_shapes=nothing, N=nothing) if tf.eager_mode @@ -17843,7 +18980,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function sparse_segment_sqrt_n_grad_graph(grad_, indices_, segment_ids_, output_dim0_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_segment_sqrt_n_grad_graph(grad_, indices_, segment_ids_, output_dim0_; name=nothing) local desc tf.with_op_name(name, "SparseSegmentSqrtNGrad") do desc = tf.NodeDescription("SparseSegmentSqrtNGrad") @@ -17869,7 +19006,10 @@ begin tf.add_input(desc, output_dim0_) desc["T"] = tf.data_type(grad_) desc["Tidx"] = tf.data_type(indices_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(sparse_segment_sqrt_n_grad, [grad_, indices_, segment_ids_, output_dim0_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function sparse_segment_sqrt_n_grad(grad_, indices_, segment_ids_, output_dim0_; name=nothing) if tf.eager_mode @@ -17887,7 +19027,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function real_graph(input_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function real_graph(input_; name=nothing) local desc tf.with_op_name(name, "Real") do desc = tf.NodeDescription("Real") @@ -17901,7 +19041,10 @@ begin desc = tf.EagerOp("Real") tf.add_input(desc, input_) desc["T"] = tf.data_type(input_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(real, [input_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function real(input_; name=nothing) if tf.eager_mode @@ -17919,7 +19062,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function ordered_map_unstage_graph(key_, indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ordered_map_unstage_graph(key_, indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "OrderedMapUnstage") do desc = tf.NodeDescription("OrderedMapUnstage") @@ -17964,7 +19107,10 @@ begin if shared_name !== nothing desc["shared_name"] = Base.String(shared_name) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(ordered_map_unstage, [key_, indices_], name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + tf.add_node(res[1], node) + return res[1] end function ordered_map_unstage(key_, indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) if tf.eager_mode @@ -17982,7 +19128,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function rfft2d_graph(input_, fft_length_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function rfft2d_graph(input_, fft_length_; name=nothing) local desc tf.with_op_name(name, "RFFT2D") do desc = tf.NodeDescription("RFFT2D") @@ -17997,7 +19143,10 @@ begin desc = tf.EagerOp("RFFT2D") tf.add_input(desc, input_) tf.add_input(desc, fft_length_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(rfft2d, [input_, fft_length_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function rfft2d(input_, fft_length_; name=nothing) if tf.eager_mode @@ -18015,7 +19164,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function var_is_initialized_op_graph(resource_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function var_is_initialized_op_graph(resource_; name=nothing) local desc tf.with_op_name(name, "VarIsInitializedOp") do desc = tf.NodeDescription("VarIsInitializedOp") @@ -18027,7 +19176,10 @@ begin function var_is_initialized_op_eager(resource_; name=nothing) desc = tf.EagerOp("VarIsInitializedOp") tf.add_input(desc, resource_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(var_is_initialized_op, [resource_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function var_is_initialized_op(resource_; name=nothing) if tf.eager_mode @@ -18045,7 +19197,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function boosted_trees_quantile_stream_resource_handle_op_graph(; name=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function boosted_trees_quantile_stream_resource_handle_op_graph(; name=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "BoostedTreesQuantileStreamResourceHandleOp") do desc = tf.NodeDescription("BoostedTreesQuantileStreamResourceHandleOp") @@ -18066,7 +19218,10 @@ begin if shared_name !== nothing desc["shared_name"] = Base.String(shared_name) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(boosted_trees_quantile_stream_resource_handle_op, [], name=nothing, container=nothing, shared_name=nothing) + tf.add_node(res[1], node) + return res[1] end function boosted_trees_quantile_stream_resource_handle_op(; name=nothing, container=nothing, shared_name=nothing) if tf.eager_mode @@ -18084,7 +19239,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function atan2_graph(y_, x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function atan2_graph(y_, x_; name=nothing) local desc tf.with_op_name(name, "Atan2") do desc = tf.NodeDescription("Atan2") @@ -18102,7 +19257,10 @@ begin tf.add_input(desc, x_) desc["T"] = tf.data_type(y_) desc["T"] = tf.data_type(x_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(atan2, [y_, x_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function atan2(y_, x_; name=nothing) if tf.eager_mode @@ -18120,7 +19278,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function random_poisson_graph(shape_, rate_; name=nothing, seed=nothing, seed2=nothing, S=nothing, dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function random_poisson_graph(shape_, rate_; name=nothing, seed=nothing, seed2=nothing, S=nothing, dtype=nothing) local desc tf.with_op_name(name, "RandomPoisson") do desc = tf.NodeDescription("RandomPoisson") @@ -18163,7 +19321,10 @@ begin end desc["S"] = tf.data_type(shape_) desc["dtype"] = tf.data_type(rate_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(random_poisson, [shape_, rate_], name=nothing, seed=nothing, seed2=nothing, S=nothing, dtype=nothing) + tf.add_node(res[1], node) + return res[1] end function random_poisson(shape_, rate_; name=nothing, seed=nothing, seed2=nothing, S=nothing, dtype=nothing) if tf.eager_mode @@ -18181,7 +19342,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function reverse_sequence_graph(input_, seq_lengths_; name=nothing, seq_dim=nothing, batch_dim=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function reverse_sequence_graph(input_, seq_lengths_; name=nothing, seq_dim=nothing, batch_dim=nothing) local desc tf.with_op_name(name, "ReverseSequence") do desc = tf.NodeDescription("ReverseSequence") @@ -18212,7 +19373,10 @@ begin end desc["T"] = tf.data_type(input_) desc["Tlen"] = tf.data_type(seq_lengths_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(reverse_sequence, [input_, seq_lengths_], name=nothing, seq_dim=nothing, batch_dim=nothing) + tf.add_node(res[1], node) + return res[1] end function reverse_sequence(input_, seq_lengths_; name=nothing, seq_dim=nothing, batch_dim=nothing) if tf.eager_mode @@ -18230,7 +19394,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function outfeed_enqueue_graph(input_; name=nothing, dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function outfeed_enqueue_graph(input_; name=nothing, dtype=nothing) local desc tf.with_op_name(name, "OutfeedEnqueue") do desc = tf.NodeDescription("OutfeedEnqueue") @@ -18250,7 +19414,10 @@ begin desc["dtype"] = Base.identity(dtype) end desc["dtype"] = tf.data_type(input_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(outfeed_enqueue, [input_], name=nothing, dtype=nothing) + tf.add_node(res[1], node) + return res[1] end function outfeed_enqueue(input_; name=nothing, dtype=nothing) if tf.eager_mode @@ -18268,7 +19435,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function sub_graph(x_, y_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sub_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "Sub") do desc = tf.NodeDescription("Sub") @@ -18286,7 +19453,10 @@ begin tf.add_input(desc, y_) desc["T"] = tf.data_type(x_) desc["T"] = tf.data_type(y_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(sub, [x_, y_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function sub(x_, y_; name=nothing) if tf.eager_mode @@ -18304,7 +19474,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function string_split_graph(input_, delimiter_; name=nothing, skip_empty=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function string_split_graph(input_, delimiter_; name=nothing, skip_empty=nothing) local desc tf.with_op_name(name, "StringSplit") do desc = tf.NodeDescription("StringSplit") @@ -18330,7 +19500,10 @@ begin if skip_empty !== nothing desc["skip_empty"] = Base.Bool(skip_empty) end - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(string_split, [input_, delimiter_], name=nothing, skip_empty=nothing) + tf.add_node(res[1], node) + return res end function string_split(input_, delimiter_; name=nothing, skip_empty=nothing) if tf.eager_mode @@ -18348,7 +19521,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function cumprod_graph(x_, axis_; name=nothing, exclusive=nothing, reverse=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function cumprod_graph(x_, axis_; name=nothing, exclusive=nothing, reverse=nothing) local desc tf.with_op_name(name, "Cumprod") do desc = tf.NodeDescription("Cumprod") @@ -18380,7 +19553,10 @@ begin end desc["T"] = tf.data_type(x_) desc["Tidx"] = tf.data_type(axis_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(cumprod, [x_, axis_], name=nothing, exclusive=nothing, reverse=nothing) + tf.add_node(res[1], node) + return res[1] end function cumprod(x_, axis_; name=nothing, exclusive=nothing, reverse=nothing) if tf.eager_mode @@ -18398,7 +19574,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function quantized_resize_bilinear_graph(images_, size_, min_, max_; name=nothing, align_corners=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantized_resize_bilinear_graph(images_, size_, min_, max_; name=nothing, align_corners=nothing) local desc tf.with_op_name(name, "QuantizedResizeBilinear") do desc = tf.NodeDescription("QuantizedResizeBilinear") @@ -18432,7 +19608,10 @@ begin desc["align_corners"] = Base.Bool(align_corners) end desc["T"] = tf.data_type(images_) - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(quantized_resize_bilinear, [images_, size_, min_, max_], name=nothing, align_corners=nothing) + tf.add_node(res[1], node) + return res end function quantized_resize_bilinear(images_, size_, min_, max_; name=nothing, align_corners=nothing) if tf.eager_mode @@ -18450,7 +19629,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function parse_single_example_graph(serialized_, dense_defaults_; name=nothing, num_sparse=nothing, sparse_keys=nothing, dense_keys=nothing, sparse_types=nothing, Tdense=nothing, dense_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function parse_single_example_graph(serialized_, dense_defaults_; name=nothing, num_sparse=nothing, sparse_keys=nothing, dense_keys=nothing, sparse_types=nothing, Tdense=nothing, dense_shapes=nothing) local desc tf.with_op_name(name, "ParseSingleExample") do desc = tf.NodeDescription("ParseSingleExample") @@ -18506,7 +19685,10 @@ begin if dense_shapes !== nothing desc["dense_shapes"] = map(Base.identity, dense_shapes) end - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(parse_single_example, [serialized_, dense_defaults_], name=nothing, num_sparse=nothing, sparse_keys=nothing, dense_keys=nothing, sparse_types=nothing, Tdense=nothing, dense_shapes=nothing) + tf.add_node(res[1], node) + return res end function parse_single_example(serialized_, dense_defaults_; name=nothing, num_sparse=nothing, sparse_keys=nothing, dense_keys=nothing, sparse_types=nothing, Tdense=nothing, dense_shapes=nothing) if tf.eager_mode @@ -18524,7 +19706,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function is_variable_initialized_graph(ref_; name=nothing, dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function is_variable_initialized_graph(ref_; name=nothing, dtype=nothing) local desc tf.with_op_name(name, "IsVariableInitialized") do desc = tf.NodeDescription("IsVariableInitialized") @@ -18544,7 +19726,10 @@ begin desc["dtype"] = Base.identity(dtype) end desc["dtype"] = tf.data_type(ref_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(is_variable_initialized, [ref_], name=nothing, dtype=nothing) + tf.add_node(res[1], node) + return res[1] end function is_variable_initialized(ref_; name=nothing, dtype=nothing) if tf.eager_mode @@ -18562,7 +19747,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function experimental_stats_aggregator_handle_graph(; name=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_stats_aggregator_handle_graph(; name=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "ExperimentalStatsAggregatorHandle") do desc = tf.NodeDescription("ExperimentalStatsAggregatorHandle") @@ -18583,7 +19768,10 @@ begin if shared_name !== nothing desc["shared_name"] = Base.String(shared_name) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(experimental_stats_aggregator_handle, [], name=nothing, container=nothing, shared_name=nothing) + tf.add_node(res[1], node) + return res[1] end function experimental_stats_aggregator_handle(; name=nothing, container=nothing, shared_name=nothing) if tf.eager_mode @@ -18601,7 +19789,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tensor_list_concat_v2_graph(input_handle_, element_shape_, leading_dims_; name=nothing, element_dtype=nothing, shape_type=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_list_concat_v2_graph(input_handle_, element_shape_, leading_dims_; name=nothing, element_dtype=nothing, shape_type=nothing) local desc tf.with_op_name(name, "TensorListConcatV2") do desc = tf.NodeDescription("TensorListConcatV2") @@ -18638,7 +19826,10 @@ begin desc["shape_type"] = Base.identity(shape_type) end desc["shape_type"] = tf.data_type(element_shape_) - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(tensor_list_concat_v2, [input_handle_, element_shape_, leading_dims_], name=nothing, element_dtype=nothing, shape_type=nothing) + tf.add_node(res[1], node) + return res end function tensor_list_concat_v2(input_handle_, element_shape_, leading_dims_; name=nothing, element_dtype=nothing, shape_type=nothing) if tf.eager_mode @@ -18656,7 +19847,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function cudnn_rnnv2_graph(input_, input_h_, input_c_, params_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing, is_training=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function cudnn_rnnv2_graph(input_, input_h_, input_c_, params_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing, is_training=nothing) local desc tf.with_op_name(name, "CudnnRNNV2") do desc = tf.NodeDescription("CudnnRNNV2") @@ -18729,7 +19920,10 @@ begin desc["T"] = tf.data_type(input_h_) desc["T"] = tf.data_type(input_c_) desc["T"] = tf.data_type(params_) - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(cudnn_rnnv2, [input_, input_h_, input_c_, params_], name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing, is_training=nothing) + tf.add_node(res[1], node) + return res end function cudnn_rnnv2(input_, input_h_, input_c_, params_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing, is_training=nothing) if tf.eager_mode @@ -18747,7 +19941,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function resource_scatter_sub_graph(resource_, indices_, updates_; name=nothing, dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_scatter_sub_graph(resource_, indices_, updates_; name=nothing, dtype=nothing) local desc tf.with_op_name(name, "ResourceScatterSub") do desc = tf.NodeDescription("ResourceScatterSub") @@ -18776,7 +19970,10 @@ begin end desc["Tindices"] = tf.data_type(indices_) desc["dtype"] = tf.data_type(updates_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(resource_scatter_sub, [resource_, indices_, updates_], name=nothing, dtype=nothing) + tf.add_node(res[1], node) + return res[1] end function resource_scatter_sub(resource_, indices_, updates_; name=nothing, dtype=nothing) if tf.eager_mode @@ -18794,7 +19991,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function assign_add_graph(ref_, value_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function assign_add_graph(ref_, value_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "AssignAdd") do desc = tf.NodeDescription("AssignAdd") @@ -18818,7 +20015,10 @@ begin end desc["T"] = tf.data_type(ref_) desc["T"] = tf.data_type(value_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(assign_add, [ref_, value_], name=nothing, use_locking=nothing) + tf.add_node(res[1], node) + return res[1] end function assign_add(ref_, value_; name=nothing, use_locking=nothing) if tf.eager_mode @@ -18836,7 +20036,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tensor_dataset_graph(components_; name=nothing, Toutput_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_dataset_graph(components_; name=nothing, Toutput_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "TensorDataset") do desc = tf.NodeDescription("TensorDataset") @@ -18860,7 +20060,10 @@ begin if output_shapes !== nothing desc["output_shapes"] = map(Base.identity, output_shapes) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(tensor_dataset, [components_], name=nothing, Toutput_types=nothing, output_shapes=nothing) + tf.add_node(res[1], node) + return res[1] end function tensor_dataset(components_; name=nothing, Toutput_types=nothing, output_shapes=nothing) if tf.eager_mode @@ -18878,7 +20081,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function bucketize_graph(input_; name=nothing, boundaries=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function bucketize_graph(input_; name=nothing, boundaries=nothing) local desc tf.with_op_name(name, "Bucketize") do desc = tf.NodeDescription("Bucketize") @@ -18898,7 +20101,10 @@ begin desc["boundaries"] = map(Base.identity, boundaries) end desc["T"] = tf.data_type(input_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(bucketize, [input_], name=nothing, boundaries=nothing) + tf.add_node(res[1], node) + return res[1] end function bucketize(input_; name=nothing, boundaries=nothing) if tf.eager_mode @@ -18916,7 +20122,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function sparse_reduce_max_graph(input_indices_, input_values_, input_shape_, reduction_axes_; name=nothing, keep_dims=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_reduce_max_graph(input_indices_, input_values_, input_shape_, reduction_axes_; name=nothing, keep_dims=nothing) local desc tf.with_op_name(name, "SparseReduceMax") do desc = tf.NodeDescription("SparseReduceMax") @@ -18945,7 +20151,10 @@ begin desc["keep_dims"] = Base.Bool(keep_dims) end desc["T"] = tf.data_type(input_values_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(sparse_reduce_max, [input_indices_, input_values_, input_shape_, reduction_axes_], name=nothing, keep_dims=nothing) + tf.add_node(res[1], node) + return res[1] end function sparse_reduce_max(input_indices_, input_values_, input_shape_, reduction_axes_; name=nothing, keep_dims=nothing) if tf.eager_mode @@ -18963,7 +20172,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function retrieve_tpu_embedding_mdl_adagrad_light_parameters_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function retrieve_tpu_embedding_mdl_adagrad_light_parameters_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "RetrieveTPUEmbeddingMDLAdagradLightParameters") do desc = tf.NodeDescription("RetrieveTPUEmbeddingMDLAdagradLightParameters") @@ -19001,7 +20210,10 @@ begin if shard_id !== nothing desc["shard_id"] = Base.Int(shard_id) end - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(retrieve_tpu_embedding_mdl_adagrad_light_parameters, [], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + tf.add_node(res[1], node) + return res end function retrieve_tpu_embedding_mdl_adagrad_light_parameters(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) if tf.eager_mode @@ -19019,7 +20231,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tensor_array_grad_with_shape_graph(handle_, flow_in_, shape_to_prepend_; name=nothing, source=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_grad_with_shape_graph(handle_, flow_in_, shape_to_prepend_; name=nothing, source=nothing) local desc tf.with_op_name(name, "TensorArrayGradWithShape") do desc = tf.NodeDescription("TensorArrayGradWithShape") @@ -19048,7 +20260,10 @@ begin if source !== nothing desc["source"] = Base.String(source) end - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(tensor_array_grad_with_shape, [handle_, flow_in_, shape_to_prepend_], name=nothing, source=nothing) + tf.add_node(res[1], node) + return res end function tensor_array_grad_with_shape(handle_, flow_in_, shape_to_prepend_; name=nothing, source=nothing) if tf.eager_mode @@ -19066,7 +20281,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tensor_array_close_v3_graph(handle_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_close_v3_graph(handle_; name=nothing) local desc tf.with_op_name(name, "TensorArrayCloseV3") do desc = tf.NodeDescription("TensorArrayCloseV3") @@ -19078,7 +20293,10 @@ begin function tensor_array_close_v3_eager(handle_; name=nothing) desc = tf.EagerOp("TensorArrayCloseV3") tf.add_input(desc, handle_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(tensor_array_close_v3, [handle_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function tensor_array_close_v3(handle_; name=nothing) if tf.eager_mode @@ -19096,7 +20314,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function non_max_suppression_with_overlaps_graph(overlaps_, scores_, max_output_size_, overlap_threshold_, score_threshold_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function non_max_suppression_with_overlaps_graph(overlaps_, scores_, max_output_size_, overlap_threshold_, score_threshold_; name=nothing) local desc tf.with_op_name(name, "NonMaxSuppressionWithOverlaps") do desc = tf.NodeDescription("NonMaxSuppressionWithOverlaps") @@ -19120,7 +20338,10 @@ begin tf.add_input(desc, max_output_size_) tf.add_input(desc, overlap_threshold_) tf.add_input(desc, score_threshold_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(non_max_suppression_with_overlaps, [overlaps_, scores_, max_output_size_, overlap_threshold_, score_threshold_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function non_max_suppression_with_overlaps(overlaps_, scores_, max_output_size_, overlap_threshold_, score_threshold_; name=nothing) if tf.eager_mode @@ -19138,7 +20359,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function pack_graph(values_; name=nothing, N=nothing, axis=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function pack_graph(values_; name=nothing, N=nothing, axis=nothing) local desc tf.with_op_name(name, "Pack") do desc = tf.NodeDescription("Pack") @@ -19170,7 +20391,10 @@ begin desc["axis"] = Base.Int(axis) end desc["T"] = tf.data_type(values_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(pack, [values_], name=nothing, N=nothing, axis=nothing) + tf.add_node(res[1], node) + return res[1] end function pack(values_; name=nothing, N=nothing, axis=nothing) if tf.eager_mode @@ -19188,7 +20412,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tensor_array_grad_v2_graph(handle_, flow_in_; name=nothing, source=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_grad_v2_graph(handle_, flow_in_; name=nothing, source=nothing) local desc tf.with_op_name(name, "TensorArrayGradV2") do desc = tf.NodeDescription("TensorArrayGradV2") @@ -19209,7 +20433,10 @@ begin if source !== nothing desc["source"] = Base.String(source) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(tensor_array_grad_v2, [handle_, flow_in_], name=nothing, source=nothing) + tf.add_node(res[1], node) + return res[1] end function tensor_array_grad_v2(handle_, flow_in_; name=nothing, source=nothing) if tf.eager_mode @@ -19227,7 +20454,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function assign_sub_variable_op_graph(resource_, value_; name=nothing, dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function assign_sub_variable_op_graph(resource_, value_; name=nothing, dtype=nothing) local desc tf.with_op_name(name, "AssignSubVariableOp") do desc = tf.NodeDescription("AssignSubVariableOp") @@ -19250,7 +20477,10 @@ begin desc["dtype"] = Base.identity(dtype) end desc["dtype"] = tf.data_type(value_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(assign_sub_variable_op, [resource_, value_], name=nothing, dtype=nothing) + tf.add_node(res[1], node) + return res[1] end function assign_sub_variable_op(resource_, value_; name=nothing, dtype=nothing) if tf.eager_mode @@ -19268,7 +20498,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function batch_fft2d_graph(input_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_fft2d_graph(input_; name=nothing) local desc tf.with_op_name(name, "BatchFFT2D") do desc = tf.NodeDescription("BatchFFT2D") @@ -19280,7 +20510,10 @@ begin function batch_fft2d_eager(input_; name=nothing) desc = tf.EagerOp("BatchFFT2D") tf.add_input(desc, input_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(batch_fft2d, [input_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function batch_fft2d(input_; name=nothing) if tf.eager_mode @@ -19298,7 +20531,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function close_summary_writer_graph(writer_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function close_summary_writer_graph(writer_; name=nothing) local desc tf.with_op_name(name, "CloseSummaryWriter") do desc = tf.NodeDescription("CloseSummaryWriter") @@ -19310,7 +20543,10 @@ begin function close_summary_writer_eager(writer_; name=nothing) desc = tf.EagerOp("CloseSummaryWriter") tf.add_input(desc, writer_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(close_summary_writer, [writer_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function close_summary_writer(writer_; name=nothing) if tf.eager_mode @@ -19328,7 +20564,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function rank_graph(input_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function rank_graph(input_; name=nothing) local desc tf.with_op_name(name, "Rank") do desc = tf.NodeDescription("Rank") @@ -19342,7 +20578,10 @@ begin desc = tf.EagerOp("Rank") tf.add_input(desc, input_) desc["T"] = tf.data_type(input_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(rank, [input_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function rank(input_; name=nothing) if tf.eager_mode @@ -19360,7 +20599,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function fft3d_graph(input_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fft3d_graph(input_; name=nothing) local desc tf.with_op_name(name, "FFT3D") do desc = tf.NodeDescription("FFT3D") @@ -19374,7 +20613,10 @@ begin desc = tf.EagerOp("FFT3D") tf.add_input(desc, input_) desc["Tcomplex"] = tf.data_type(input_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(fft3d, [input_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function fft3d(input_; name=nothing) if tf.eager_mode @@ -19392,7 +20634,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function apply_ftrl_graph(var_, accum_, linear_, grad_, lr_, l1_, l2_, lr_power_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function apply_ftrl_graph(var_, accum_, linear_, grad_, lr_, l1_, l2_, lr_power_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ApplyFtrl") do desc = tf.NodeDescription("ApplyFtrl") @@ -19440,7 +20682,10 @@ begin desc["T"] = tf.data_type(l1_) desc["T"] = tf.data_type(l2_) desc["T"] = tf.data_type(lr_power_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(apply_ftrl, [var_, accum_, linear_, grad_, lr_, l1_, l2_, lr_power_], name=nothing, use_locking=nothing) + tf.add_node(res[1], node) + return res[1] end function apply_ftrl(var_, accum_, linear_, grad_, lr_, l1_, l2_, lr_power_; name=nothing, use_locking=nothing) if tf.eager_mode @@ -19458,7 +20703,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function abort_graph(; name=nothing, error_msg=nothing, exit_without_error=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function abort_graph(; name=nothing, error_msg=nothing, exit_without_error=nothing) local desc tf.with_op_name(name, "Abort") do desc = tf.NodeDescription("Abort") @@ -19479,7 +20724,10 @@ begin if exit_without_error !== nothing desc["exit_without_error"] = Base.Bool(exit_without_error) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(abort, [], name=nothing, error_msg=nothing, exit_without_error=nothing) + tf.add_node(res[1], node) + return res[1] end function abort(; name=nothing, error_msg=nothing, exit_without_error=nothing) if tf.eager_mode @@ -19497,7 +20745,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function audio_spectrogram_graph(input_; name=nothing, window_size=nothing, stride=nothing, magnitude_squared=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function audio_spectrogram_graph(input_; name=nothing, window_size=nothing, stride=nothing, magnitude_squared=nothing) local desc tf.with_op_name(name, "AudioSpectrogram") do desc = tf.NodeDescription("AudioSpectrogram") @@ -19527,7 +20775,10 @@ begin if magnitude_squared !== nothing desc["magnitude_squared"] = Base.Bool(magnitude_squared) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(audio_spectrogram, [input_], name=nothing, window_size=nothing, stride=nothing, magnitude_squared=nothing) + tf.add_node(res[1], node) + return res[1] end function audio_spectrogram(input_; name=nothing, window_size=nothing, stride=nothing, magnitude_squared=nothing) if tf.eager_mode @@ -19545,7 +20796,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function variable_shape_graph(input_; name=nothing, out_type=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function variable_shape_graph(input_; name=nothing, out_type=nothing) local desc tf.with_op_name(name, "VariableShape") do desc = tf.NodeDescription("VariableShape") @@ -19563,7 +20814,10 @@ begin if out_type !== nothing desc["out_type"] = Base.identity(out_type) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(variable_shape, [input_], name=nothing, out_type=nothing) + tf.add_node(res[1], node) + return res[1] end function variable_shape(input_; name=nothing, out_type=nothing) if tf.eager_mode @@ -19581,7 +20835,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function fifo_queue_v2_graph(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fifo_queue_v2_graph(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "FIFOQueueV2") do desc = tf.NodeDescription("FIFOQueueV2") @@ -19620,7 +20874,10 @@ begin if shared_name !== nothing desc["shared_name"] = Base.String(shared_name) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(fifo_queue_v2, [], name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) + tf.add_node(res[1], node) + return res[1] end function fifo_queue_v2(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) if tf.eager_mode @@ -19638,7 +20895,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function variable_graph(; name=nothing, shape=nothing, dtype=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function variable_graph(; name=nothing, shape=nothing, dtype=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "Variable") do desc = tf.NodeDescription("Variable") @@ -19671,7 +20928,10 @@ begin if shared_name !== nothing desc["shared_name"] = Base.String(shared_name) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(variable, [], name=nothing, shape=nothing, dtype=nothing, container=nothing, shared_name=nothing) + tf.add_node(res[1], node) + return res[1] end function variable(; name=nothing, shape=nothing, dtype=nothing, container=nothing, shared_name=nothing) if tf.eager_mode @@ -19689,7 +20949,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tensor_forest_create_tree_variable_graph(tree_handle_, tree_config_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_forest_create_tree_variable_graph(tree_handle_, tree_config_; name=nothing) local desc tf.with_op_name(name, "TensorForestCreateTreeVariable") do desc = tf.NodeDescription("TensorForestCreateTreeVariable") @@ -19704,7 +20964,10 @@ begin desc = tf.EagerOp("TensorForestCreateTreeVariable") tf.add_input(desc, tree_handle_) tf.add_input(desc, tree_config_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(tensor_forest_create_tree_variable, [tree_handle_, tree_config_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function tensor_forest_create_tree_variable(tree_handle_, tree_config_; name=nothing) if tf.eager_mode @@ -19722,7 +20985,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function max_pool_grad_with_argmax_graph(input_, grad_, argmax_; name=nothing, ksize=nothing, strides=nothing, padding=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function max_pool_grad_with_argmax_graph(input_, grad_, argmax_; name=nothing, ksize=nothing, strides=nothing, padding=nothing) local desc tf.with_op_name(name, "MaxPoolGradWithArgmax") do desc = tf.NodeDescription("MaxPoolGradWithArgmax") @@ -19763,7 +21026,10 @@ begin desc["T"] = tf.data_type(input_) desc["T"] = tf.data_type(grad_) desc["Targmax"] = tf.data_type(argmax_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(max_pool_grad_with_argmax, [input_, grad_, argmax_], name=nothing, ksize=nothing, strides=nothing, padding=nothing) + tf.add_node(res[1], node) + return res[1] end function max_pool_grad_with_argmax(input_, grad_, argmax_; name=nothing, ksize=nothing, strides=nothing, padding=nothing) if tf.eager_mode @@ -19781,7 +21047,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function ref_switch_graph(data_, pred_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ref_switch_graph(data_, pred_; name=nothing) local desc tf.with_op_name(name, "RefSwitch") do desc = tf.NodeDescription("RefSwitch") @@ -19803,7 +21069,10 @@ begin tf.add_input(desc, data_) tf.add_input(desc, pred_) desc["T"] = tf.data_type(data_) - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(ref_switch, [data_, pred_], name=nothing) + tf.add_node(res[1], node) + return res end function ref_switch(data_, pred_; name=nothing) if tf.eager_mode @@ -19821,7 +21090,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function sdca_fprint_graph(input_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sdca_fprint_graph(input_; name=nothing) local desc tf.with_op_name(name, "SdcaFprint") do desc = tf.NodeDescription("SdcaFprint") @@ -19833,7 +21102,10 @@ begin function sdca_fprint_eager(input_; name=nothing) desc = tf.EagerOp("SdcaFprint") tf.add_input(desc, input_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(sdca_fprint, [input_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function sdca_fprint(input_; name=nothing) if tf.eager_mode @@ -19851,7 +21123,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function experimental_choose_fastest_dataset_graph(input_datasets_; name=nothing, N=nothing, num_experiments=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_choose_fastest_dataset_graph(input_datasets_; name=nothing, N=nothing, num_experiments=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "ExperimentalChooseFastestDataset") do desc = tf.NodeDescription("ExperimentalChooseFastestDataset") @@ -19887,7 +21159,10 @@ begin if output_shapes !== nothing desc["output_shapes"] = map(Base.identity, output_shapes) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(experimental_choose_fastest_dataset, [input_datasets_], name=nothing, N=nothing, num_experiments=nothing, output_types=nothing, output_shapes=nothing) + tf.add_node(res[1], node) + return res[1] end function experimental_choose_fastest_dataset(input_datasets_; name=nothing, N=nothing, num_experiments=nothing, output_types=nothing, output_shapes=nothing) if tf.eager_mode @@ -19905,7 +21180,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function leaky_relu_graph(features_; name=nothing, alpha=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function leaky_relu_graph(features_; name=nothing, alpha=nothing) local desc tf.with_op_name(name, "LeakyRelu") do desc = tf.NodeDescription("LeakyRelu") @@ -19925,7 +21200,10 @@ begin desc["alpha"] = Base.identity(alpha) end desc["T"] = tf.data_type(features_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(leaky_relu, [features_], name=nothing, alpha=nothing) + tf.add_node(res[1], node) + return res[1] end function leaky_relu(features_; name=nothing, alpha=nothing) if tf.eager_mode @@ -19943,7 +21221,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function identity_n_graph(input_; name=nothing, T=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function identity_n_graph(input_; name=nothing, T=nothing) local desc tf.with_op_name(name, "IdentityN") do desc = tf.NodeDescription("IdentityN") @@ -19961,7 +21239,10 @@ begin if T !== nothing desc["T"] = map(Base.identity, T) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(identity_n, [input_], name=nothing, T=nothing) + tf.add_node(res[1], node) + return res[1] end function identity_n(input_; name=nothing, T=nothing) if tf.eager_mode @@ -19979,7 +21260,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function cudnn_rnn_backprop_v2_graph(input_, input_h_, input_c_, params_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_, host_reserved_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function cudnn_rnn_backprop_v2_graph(input_, input_h_, input_c_, params_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_, host_reserved_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) local desc tf.with_op_name(name, "CudnnRNNBackpropV2") do desc = tf.NodeDescription("CudnnRNNBackpropV2") @@ -20077,7 +21358,10 @@ begin desc["T"] = tf.data_type(output_h_backprop_) desc["T"] = tf.data_type(output_c_backprop_) desc["T"] = tf.data_type(reserve_space_) - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(cudnn_rnn_backprop_v2, [input_, input_h_, input_c_, params_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_, host_reserved_], name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) + tf.add_node(res[1], node) + return res end function cudnn_rnn_backprop_v2(input_, input_h_, input_c_, params_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_, host_reserved_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) if tf.eager_mode @@ -20095,7 +21379,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function requantization_range_graph(input_, input_min_, input_max_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function requantization_range_graph(input_, input_min_, input_max_; name=nothing) local desc tf.with_op_name(name, "RequantizationRange") do desc = tf.NodeDescription("RequantizationRange") @@ -20120,7 +21404,10 @@ begin tf.add_input(desc, input_min_) tf.add_input(desc, input_max_) desc["Tinput"] = tf.data_type(input_) - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(requantization_range, [input_, input_min_, input_max_], name=nothing) + tf.add_node(res[1], node) + return res end function requantization_range(input_, input_min_, input_max_; name=nothing) if tf.eager_mode @@ -20138,7 +21425,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function maximum_graph(x_, y_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function maximum_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "Maximum") do desc = tf.NodeDescription("Maximum") @@ -20156,7 +21443,10 @@ begin tf.add_input(desc, y_) desc["T"] = tf.data_type(x_) desc["T"] = tf.data_type(y_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(maximum, [x_, y_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function maximum(x_, y_; name=nothing) if tf.eager_mode @@ -20174,7 +21464,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function reshape_graph(tensor_, shape_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function reshape_graph(tensor_, shape_; name=nothing) local desc tf.with_op_name(name, "Reshape") do desc = tf.NodeDescription("Reshape") @@ -20193,7 +21483,10 @@ begin tf.add_input(desc, shape_) desc["T"] = tf.data_type(tensor_) desc["Tshape"] = tf.data_type(shape_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(reshape, [tensor_, shape_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function reshape(tensor_, shape_; name=nothing) if tf.eager_mode @@ -20211,7 +21504,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function matrix_solve_ls_graph(matrix_, rhs_, l2_regularizer_; name=nothing, fast=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function matrix_solve_ls_graph(matrix_, rhs_, l2_regularizer_; name=nothing, fast=nothing) local desc tf.with_op_name(name, "MatrixSolveLs") do desc = tf.NodeDescription("MatrixSolveLs") @@ -20238,7 +21531,10 @@ begin end desc["T"] = tf.data_type(matrix_) desc["T"] = tf.data_type(rhs_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(matrix_solve_ls, [matrix_, rhs_, l2_regularizer_], name=nothing, fast=nothing) + tf.add_node(res[1], node) + return res[1] end function matrix_solve_ls(matrix_, rhs_, l2_regularizer_; name=nothing, fast=nothing) if tf.eager_mode @@ -20256,7 +21552,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tf_record_dataset_graph(filenames_, compression_type_, buffer_size_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tf_record_dataset_graph(filenames_, compression_type_, buffer_size_; name=nothing) local desc tf.with_op_name(name, "TFRecordDataset") do desc = tf.NodeDescription("TFRecordDataset") @@ -20274,7 +21570,10 @@ begin tf.add_input(desc, filenames_) tf.add_input(desc, compression_type_) tf.add_input(desc, buffer_size_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(tf_record_dataset, [filenames_, compression_type_, buffer_size_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function tf_record_dataset(filenames_, compression_type_, buffer_size_; name=nothing) if tf.eager_mode @@ -20292,7 +21591,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function boosted_trees_example_debug_outputs_graph(tree_ensemble_handle_, bucketized_features_; name=nothing, num_bucketized_features=nothing, logits_dimension=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function boosted_trees_example_debug_outputs_graph(tree_ensemble_handle_, bucketized_features_; name=nothing, num_bucketized_features=nothing, logits_dimension=nothing) local desc tf.with_op_name(name, "BoostedTreesExampleDebugOutputs") do desc = tf.NodeDescription("BoostedTreesExampleDebugOutputs") @@ -20319,7 +21618,10 @@ begin if logits_dimension !== nothing desc["logits_dimension"] = Base.Int(logits_dimension) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(boosted_trees_example_debug_outputs, [tree_ensemble_handle_, bucketized_features_], name=nothing, num_bucketized_features=nothing, logits_dimension=nothing) + tf.add_node(res[1], node) + return res[1] end function boosted_trees_example_debug_outputs(tree_ensemble_handle_, bucketized_features_; name=nothing, num_bucketized_features=nothing, logits_dimension=nothing) if tf.eager_mode @@ -20337,7 +21639,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function hsv_to_rgb_graph(images_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function hsv_to_rgb_graph(images_; name=nothing) local desc tf.with_op_name(name, "HSVToRGB") do desc = tf.NodeDescription("HSVToRGB") @@ -20351,7 +21653,10 @@ begin desc = tf.EagerOp("HSVToRGB") tf.add_input(desc, images_) desc["T"] = tf.data_type(images_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(hsv_to_rgb, [images_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function hsv_to_rgb(images_; name=nothing) if tf.eager_mode @@ -20369,7 +21674,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function experimental_max_intra_op_parallelism_dataset_graph(input_dataset_, max_intra_op_parallelism_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_max_intra_op_parallelism_dataset_graph(input_dataset_, max_intra_op_parallelism_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "ExperimentalMaxIntraOpParallelismDataset") do desc = tf.NodeDescription("ExperimentalMaxIntraOpParallelismDataset") @@ -20396,7 +21701,10 @@ begin if output_shapes !== nothing desc["output_shapes"] = map(Base.identity, output_shapes) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(experimental_max_intra_op_parallelism_dataset, [input_dataset_, max_intra_op_parallelism_], name=nothing, output_types=nothing, output_shapes=nothing) + tf.add_node(res[1], node) + return res[1] end function experimental_max_intra_op_parallelism_dataset(input_dataset_, max_intra_op_parallelism_; name=nothing, output_types=nothing, output_shapes=nothing) if tf.eager_mode @@ -20414,7 +21722,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function scatter_div_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function scatter_div_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ScatterDiv") do desc = tf.NodeDescription("ScatterDiv") @@ -20444,7 +21752,10 @@ begin desc["T"] = tf.data_type(ref_) desc["Tindices"] = tf.data_type(indices_) desc["T"] = tf.data_type(updates_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(scatter_div, [ref_, indices_, updates_], name=nothing, use_locking=nothing) + tf.add_node(res[1], node) + return res[1] end function scatter_div(ref_, indices_, updates_; name=nothing, use_locking=nothing) if tf.eager_mode @@ -20462,7 +21773,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function decode_wav_graph(contents_; name=nothing, desired_channels=nothing, desired_samples=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function decode_wav_graph(contents_; name=nothing, desired_channels=nothing, desired_samples=nothing) local desc tf.with_op_name(name, "DecodeWav") do desc = tf.NodeDescription("DecodeWav") @@ -20491,7 +21802,10 @@ begin if desired_samples !== nothing desc["desired_samples"] = Base.Int(desired_samples) end - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(decode_wav, [contents_], name=nothing, desired_channels=nothing, desired_samples=nothing) + tf.add_node(res[1], node) + return res end function decode_wav(contents_; name=nothing, desired_channels=nothing, desired_samples=nothing) if tf.eager_mode @@ -20509,7 +21823,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function log_graph(x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function log_graph(x_; name=nothing) local desc tf.with_op_name(name, "Log") do desc = tf.NodeDescription("Log") @@ -20523,7 +21837,10 @@ begin desc = tf.EagerOp("Log") tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(log, [x_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function log(x_; name=nothing) if tf.eager_mode @@ -20541,7 +21858,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function save_v2_graph(prefix_, tensor_names_, shape_and_slices_, tensors_; name=nothing, dtypes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function save_v2_graph(prefix_, tensor_names_, shape_and_slices_, tensors_; name=nothing, dtypes=nothing) local desc tf.with_op_name(name, "SaveV2") do desc = tf.NodeDescription("SaveV2") @@ -20568,7 +21885,10 @@ begin if dtypes !== nothing desc["dtypes"] = map(Base.identity, dtypes) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(save_v2, [prefix_, tensor_names_, shape_and_slices_, tensors_], name=nothing, dtypes=nothing) + tf.add_node(res[1], node) + return res[1] end function save_v2(prefix_, tensor_names_, shape_and_slices_, tensors_; name=nothing, dtypes=nothing) if tf.eager_mode @@ -20586,7 +21906,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function deep_copy_graph(x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function deep_copy_graph(x_; name=nothing) local desc tf.with_op_name(name, "DeepCopy") do desc = tf.NodeDescription("DeepCopy") @@ -20600,7 +21920,10 @@ begin desc = tf.EagerOp("DeepCopy") tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(deep_copy, [x_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function deep_copy(x_; name=nothing) if tf.eager_mode @@ -20618,7 +21941,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function model_dataset_graph(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function model_dataset_graph(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "ModelDataset") do desc = tf.NodeDescription("ModelDataset") @@ -20642,7 +21965,10 @@ begin if output_shapes !== nothing desc["output_shapes"] = map(Base.identity, output_shapes) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(model_dataset, [input_dataset_], name=nothing, output_types=nothing, output_shapes=nothing) + tf.add_node(res[1], node) + return res[1] end function model_dataset(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) if tf.eager_mode @@ -20660,7 +21986,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function parse_sequence_example_graph(serialized_, debug_name_, context_dense_defaults_; name=nothing, feature_list_dense_missing_assumed_empty=nothing, context_sparse_keys=nothing, context_dense_keys=nothing, feature_list_sparse_keys=nothing, feature_list_dense_keys=nothing, Ncontext_sparse=nothing, Ncontext_dense=nothing, Nfeature_list_sparse=nothing, Nfeature_list_dense=nothing, context_sparse_types=nothing, Tcontext_dense=nothing, feature_list_dense_types=nothing, context_dense_shapes=nothing, feature_list_sparse_types=nothing, feature_list_dense_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function parse_sequence_example_graph(serialized_, debug_name_, context_dense_defaults_; name=nothing, feature_list_dense_missing_assumed_empty=nothing, context_sparse_keys=nothing, context_dense_keys=nothing, feature_list_sparse_keys=nothing, feature_list_dense_keys=nothing, Ncontext_sparse=nothing, Ncontext_dense=nothing, Nfeature_list_sparse=nothing, Nfeature_list_dense=nothing, context_sparse_types=nothing, Tcontext_dense=nothing, feature_list_dense_types=nothing, context_dense_shapes=nothing, feature_list_sparse_types=nothing, feature_list_dense_shapes=nothing) local desc tf.with_op_name(name, "ParseSequenceExample") do desc = tf.NodeDescription("ParseSequenceExample") @@ -20773,7 +22099,10 @@ begin if feature_list_dense_shapes !== nothing desc["feature_list_dense_shapes"] = map(Base.identity, feature_list_dense_shapes) end - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(parse_sequence_example, [serialized_, debug_name_, context_dense_defaults_], name=nothing, feature_list_dense_missing_assumed_empty=nothing, context_sparse_keys=nothing, context_dense_keys=nothing, feature_list_sparse_keys=nothing, feature_list_dense_keys=nothing, Ncontext_sparse=nothing, Ncontext_dense=nothing, Nfeature_list_sparse=nothing, Nfeature_list_dense=nothing, context_sparse_types=nothing, Tcontext_dense=nothing, feature_list_dense_types=nothing, context_dense_shapes=nothing, feature_list_sparse_types=nothing, feature_list_dense_shapes=nothing) + tf.add_node(res[1], node) + return res end function parse_sequence_example(serialized_, debug_name_, context_dense_defaults_; name=nothing, feature_list_dense_missing_assumed_empty=nothing, context_sparse_keys=nothing, context_dense_keys=nothing, feature_list_sparse_keys=nothing, feature_list_dense_keys=nothing, Ncontext_sparse=nothing, Ncontext_dense=nothing, Nfeature_list_sparse=nothing, Nfeature_list_dense=nothing, context_sparse_types=nothing, Tcontext_dense=nothing, feature_list_dense_types=nothing, context_dense_shapes=nothing, feature_list_sparse_types=nothing, feature_list_dense_shapes=nothing) if tf.eager_mode @@ -20791,7 +22120,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function sinh_graph(x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sinh_graph(x_; name=nothing) local desc tf.with_op_name(name, "Sinh") do desc = tf.NodeDescription("Sinh") @@ -20805,7 +22134,10 @@ begin desc = tf.EagerOp("Sinh") tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(sinh, [x_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function sinh(x_; name=nothing) if tf.eager_mode @@ -20823,7 +22155,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function iterator_v2_graph(; name=nothing, shared_name=nothing, container=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function iterator_v2_graph(; name=nothing, shared_name=nothing, container=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "IteratorV2") do desc = tf.NodeDescription("IteratorV2") @@ -20856,7 +22188,10 @@ begin if output_shapes !== nothing desc["output_shapes"] = map(Base.identity, output_shapes) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(iterator_v2, [], name=nothing, shared_name=nothing, container=nothing, output_types=nothing, output_shapes=nothing) + tf.add_node(res[1], node) + return res[1] end function iterator_v2(; name=nothing, shared_name=nothing, container=nothing, output_types=nothing, output_shapes=nothing) if tf.eager_mode @@ -20874,7 +22209,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tensor_array_write_v2_graph(handle_, index_, value_, flow_in_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_write_v2_graph(handle_, index_, value_, flow_in_; name=nothing) local desc tf.with_op_name(name, "TensorArrayWriteV2") do desc = tf.NodeDescription("TensorArrayWriteV2") @@ -20897,7 +22232,10 @@ begin tf.add_input(desc, value_) tf.add_input(desc, flow_in_) desc["T"] = tf.data_type(value_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(tensor_array_write_v2, [handle_, index_, value_, flow_in_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function tensor_array_write_v2(handle_, index_, value_, flow_in_; name=nothing) if tf.eager_mode @@ -20915,7 +22253,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tensor_list_element_shape_graph(input_handle_; name=nothing, shape_type=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_list_element_shape_graph(input_handle_; name=nothing, shape_type=nothing) local desc tf.with_op_name(name, "TensorListElementShape") do desc = tf.NodeDescription("TensorListElementShape") @@ -20933,7 +22271,10 @@ begin if shape_type !== nothing desc["shape_type"] = Base.identity(shape_type) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(tensor_list_element_shape, [input_handle_], name=nothing, shape_type=nothing) + tf.add_node(res[1], node) + return res[1] end function tensor_list_element_shape(input_handle_; name=nothing, shape_type=nothing) if tf.eager_mode @@ -20951,7 +22292,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function queue_size_v2_graph(handle_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function queue_size_v2_graph(handle_; name=nothing) local desc tf.with_op_name(name, "QueueSizeV2") do desc = tf.NodeDescription("QueueSizeV2") @@ -20963,7 +22304,10 @@ begin function queue_size_v2_eager(handle_; name=nothing) desc = tf.EagerOp("QueueSizeV2") tf.add_input(desc, handle_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(queue_size_v2, [handle_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function queue_size_v2(handle_; name=nothing) if tf.eager_mode @@ -20981,7 +22325,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function expm1_graph(x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function expm1_graph(x_; name=nothing) local desc tf.with_op_name(name, "Expm1") do desc = tf.NodeDescription("Expm1") @@ -20995,7 +22339,10 @@ begin desc = tf.EagerOp("Expm1") tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(expm1, [x_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function expm1(x_; name=nothing) if tf.eager_mode @@ -21013,7 +22360,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function batch_matrix_band_part_graph(input_, num_lower_, num_upper_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_matrix_band_part_graph(input_, num_lower_, num_upper_; name=nothing) local desc tf.with_op_name(name, "BatchMatrixBandPart") do desc = tf.NodeDescription("BatchMatrixBandPart") @@ -21033,7 +22380,10 @@ begin tf.add_input(desc, num_lower_) tf.add_input(desc, num_upper_) desc["T"] = tf.data_type(input_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(batch_matrix_band_part, [input_, num_lower_, num_upper_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function batch_matrix_band_part(input_, num_lower_, num_upper_; name=nothing) if tf.eager_mode @@ -21051,7 +22401,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function concatenate_dataset_graph(input_dataset_, another_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function concatenate_dataset_graph(input_dataset_, another_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "ConcatenateDataset") do desc = tf.NodeDescription("ConcatenateDataset") @@ -21078,7 +22428,10 @@ begin if output_shapes !== nothing desc["output_shapes"] = map(Base.identity, output_shapes) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(concatenate_dataset, [input_dataset_, another_dataset_], name=nothing, output_types=nothing, output_shapes=nothing) + tf.add_node(res[1], node) + return res[1] end function concatenate_dataset(input_dataset_, another_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) if tf.eager_mode @@ -21096,7 +22449,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function decode_gif_graph(contents_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function decode_gif_graph(contents_; name=nothing) local desc tf.with_op_name(name, "DecodeGif") do desc = tf.NodeDescription("DecodeGif") @@ -21108,7 +22461,10 @@ begin function decode_gif_eager(contents_; name=nothing) desc = tf.EagerOp("DecodeGif") tf.add_input(desc, contents_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(decode_gif, [contents_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function decode_gif(contents_; name=nothing) if tf.eager_mode @@ -21126,7 +22482,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tpu_replicate_graph(inputs_, broadcast_inputs_, variables_, guaranteed_constants_; name=nothing, computation=nothing, num_replicas=nothing, num_cores_per_replica=nothing, topology=nothing, use_tpu=nothing, device_assignment=nothing, host_compute_core=nothing, Tinputs=nothing, Tbroadcast_inputs=nothing, NumVariables=nothing, Tguaranteed_constants=nothing, output_types=nothing, padding_map=nothing, step_marker_location=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tpu_replicate_graph(inputs_, broadcast_inputs_, variables_, guaranteed_constants_; name=nothing, computation=nothing, num_replicas=nothing, num_cores_per_replica=nothing, topology=nothing, use_tpu=nothing, device_assignment=nothing, host_compute_core=nothing, Tinputs=nothing, Tbroadcast_inputs=nothing, NumVariables=nothing, Tguaranteed_constants=nothing, output_types=nothing, padding_map=nothing, step_marker_location=nothing) local desc tf.with_op_name(name, "TPUReplicate") do desc = tf.NodeDescription("TPUReplicate") @@ -21231,7 +22587,10 @@ begin if step_marker_location !== nothing desc["step_marker_location"] = Base.String(step_marker_location) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(tpu_replicate, [inputs_, broadcast_inputs_, variables_, guaranteed_constants_], name=nothing, computation=nothing, num_replicas=nothing, num_cores_per_replica=nothing, topology=nothing, use_tpu=nothing, device_assignment=nothing, host_compute_core=nothing, Tinputs=nothing, Tbroadcast_inputs=nothing, NumVariables=nothing, Tguaranteed_constants=nothing, output_types=nothing, padding_map=nothing, step_marker_location=nothing) + tf.add_node(res[1], node) + return res[1] end function tpu_replicate(inputs_, broadcast_inputs_, variables_, guaranteed_constants_; name=nothing, computation=nothing, num_replicas=nothing, num_cores_per_replica=nothing, topology=nothing, use_tpu=nothing, device_assignment=nothing, host_compute_core=nothing, Tinputs=nothing, Tbroadcast_inputs=nothing, NumVariables=nothing, Tguaranteed_constants=nothing, output_types=nothing, padding_map=nothing, step_marker_location=nothing) if tf.eager_mode @@ -21249,7 +22608,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function batch_self_adjoint_eig_v2_graph(input_; name=nothing, compute_v=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_self_adjoint_eig_v2_graph(input_; name=nothing, compute_v=nothing) local desc tf.with_op_name(name, "BatchSelfAdjointEigV2") do desc = tf.NodeDescription("BatchSelfAdjointEigV2") @@ -21274,7 +22633,10 @@ begin desc["compute_v"] = Base.Bool(compute_v) end desc["T"] = tf.data_type(input_) - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(batch_self_adjoint_eig_v2, [input_], name=nothing, compute_v=nothing) + tf.add_node(res[1], node) + return res end function batch_self_adjoint_eig_v2(input_; name=nothing, compute_v=nothing) if tf.eager_mode @@ -21292,7 +22654,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function shape_graph(input_; name=nothing, out_type=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function shape_graph(input_; name=nothing, out_type=nothing) local desc tf.with_op_name(name, "Shape") do desc = tf.NodeDescription("Shape") @@ -21312,7 +22674,10 @@ begin desc["out_type"] = Base.identity(out_type) end desc["T"] = tf.data_type(input_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(shape, [input_], name=nothing, out_type=nothing) + tf.add_node(res[1], node) + return res[1] end function shape(input_; name=nothing, out_type=nothing) if tf.eager_mode @@ -21330,7 +22695,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function repeat_dataset_graph(input_dataset_, count_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function repeat_dataset_graph(input_dataset_, count_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "RepeatDataset") do desc = tf.NodeDescription("RepeatDataset") @@ -21357,7 +22722,10 @@ begin if output_shapes !== nothing desc["output_shapes"] = map(Base.identity, output_shapes) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(repeat_dataset, [input_dataset_, count_], name=nothing, output_types=nothing, output_shapes=nothing) + tf.add_node(res[1], node) + return res[1] end function repeat_dataset(input_dataset_, count_; name=nothing, output_types=nothing, output_shapes=nothing) if tf.eager_mode @@ -21375,7 +22743,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function crop_and_resize_grad_boxes_graph(grads_, image_, boxes_, box_ind_; name=nothing, method=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function crop_and_resize_grad_boxes_graph(grads_, image_, boxes_, box_ind_; name=nothing, method=nothing) local desc tf.with_op_name(name, "CropAndResizeGradBoxes") do desc = tf.NodeDescription("CropAndResizeGradBoxes") @@ -21404,7 +22772,10 @@ begin desc["method"] = Base.String(method) end desc["T"] = tf.data_type(image_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(crop_and_resize_grad_boxes, [grads_, image_, boxes_, box_ind_], name=nothing, method=nothing) + tf.add_node(res[1], node) + return res[1] end function crop_and_resize_grad_boxes(grads_, image_, boxes_, box_ind_; name=nothing, method=nothing) if tf.eager_mode @@ -21422,7 +22793,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function reciprocal_grad_graph(y_, dy_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function reciprocal_grad_graph(y_, dy_; name=nothing) local desc tf.with_op_name(name, "ReciprocalGrad") do desc = tf.NodeDescription("ReciprocalGrad") @@ -21440,7 +22811,10 @@ begin tf.add_input(desc, dy_) desc["T"] = tf.data_type(y_) desc["T"] = tf.data_type(dy_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(reciprocal_grad, [y_, dy_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function reciprocal_grad(y_, dy_; name=nothing) if tf.eager_mode @@ -21458,7 +22832,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function batch_matrix_solve_graph(matrix_, rhs_; name=nothing, adjoint=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_matrix_solve_graph(matrix_, rhs_; name=nothing, adjoint=nothing) local desc tf.with_op_name(name, "BatchMatrixSolve") do desc = tf.NodeDescription("BatchMatrixSolve") @@ -21482,7 +22856,10 @@ begin end desc["T"] = tf.data_type(matrix_) desc["T"] = tf.data_type(rhs_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(batch_matrix_solve, [matrix_, rhs_], name=nothing, adjoint=nothing) + tf.add_node(res[1], node) + return res[1] end function batch_matrix_solve(matrix_, rhs_; name=nothing, adjoint=nothing) if tf.eager_mode @@ -21500,7 +22877,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function mutable_hash_table_v2_graph(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function mutable_hash_table_v2_graph(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing) local desc tf.with_op_name(name, "MutableHashTableV2") do desc = tf.NodeDescription("MutableHashTableV2") @@ -21539,7 +22916,10 @@ begin if value_dtype !== nothing desc["value_dtype"] = Base.identity(value_dtype) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(mutable_hash_table_v2, [], name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing) + tf.add_node(res[1], node) + return res[1] end function mutable_hash_table_v2(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing) if tf.eager_mode @@ -21557,7 +22937,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function exit_graph(data_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function exit_graph(data_; name=nothing) local desc tf.with_op_name(name, "Exit") do desc = tf.NodeDescription("Exit") @@ -21571,7 +22951,10 @@ begin desc = tf.EagerOp("Exit") tf.add_input(desc, data_) desc["T"] = tf.data_type(data_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(exit, [data_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function exit(data_; name=nothing) if tf.eager_mode @@ -21589,7 +22972,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function lrn_graph(input_; name=nothing, depth_radius=nothing, bias=nothing, alpha=nothing, beta=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function lrn_graph(input_; name=nothing, depth_radius=nothing, bias=nothing, alpha=nothing, beta=nothing) local desc tf.with_op_name(name, "LRN") do desc = tf.NodeDescription("LRN") @@ -21627,7 +23010,10 @@ begin desc["beta"] = Base.identity(beta) end desc["T"] = tf.data_type(input_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(lrn, [input_], name=nothing, depth_radius=nothing, bias=nothing, alpha=nothing, beta=nothing) + tf.add_node(res[1], node) + return res[1] end function lrn(input_; name=nothing, depth_radius=nothing, bias=nothing, alpha=nothing, beta=nothing) if tf.eager_mode @@ -21645,7 +23031,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function stateless_if_graph(cond_, input_; name=nothing, Tin=nothing, Tout=nothing, then_branch=nothing, else_branch=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function stateless_if_graph(cond_, input_; name=nothing, Tin=nothing, Tout=nothing, then_branch=nothing, else_branch=nothing) local desc tf.with_op_name(name, "StatelessIf") do desc = tf.NodeDescription("StatelessIf") @@ -21686,7 +23072,10 @@ begin desc["else_branch"] = Base.identity(else_branch) end desc["Tcond"] = tf.data_type(cond_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(stateless_if, [cond_, input_], name=nothing, Tin=nothing, Tout=nothing, then_branch=nothing, else_branch=nothing) + tf.add_node(res[1], node) + return res[1] end function stateless_if(cond_, input_; name=nothing, Tin=nothing, Tout=nothing, then_branch=nothing, else_branch=nothing) if tf.eager_mode @@ -21704,7 +23093,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tensor_list_set_item_graph(input_handle_, index_, item_; name=nothing, element_dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_list_set_item_graph(input_handle_, index_, item_; name=nothing, element_dtype=nothing) local desc tf.with_op_name(name, "TensorListSetItem") do desc = tf.NodeDescription("TensorListSetItem") @@ -21730,7 +23119,10 @@ begin desc["element_dtype"] = Base.identity(element_dtype) end desc["element_dtype"] = tf.data_type(item_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(tensor_list_set_item, [input_handle_, index_, item_], name=nothing, element_dtype=nothing) + tf.add_node(res[1], node) + return res[1] end function tensor_list_set_item(input_handle_, index_, item_; name=nothing, element_dtype=nothing) if tf.eager_mode @@ -21748,7 +23140,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function rsqrt_graph(x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function rsqrt_graph(x_; name=nothing) local desc tf.with_op_name(name, "Rsqrt") do desc = tf.NodeDescription("Rsqrt") @@ -21762,7 +23154,10 @@ begin desc = tf.EagerOp("Rsqrt") tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(rsqrt, [x_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function rsqrt(x_; name=nothing) if tf.eager_mode @@ -21780,7 +23175,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function quantized_conv2d_with_bias_sum_and_relu_and_requantize_graph(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_, summand_, min_summand_, max_summand_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantized_conv2d_with_bias_sum_and_relu_and_requantize_graph(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_, summand_, min_summand_, max_summand_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) local desc tf.with_op_name(name, "QuantizedConv2DWithBiasSumAndReluAndRequantize") do desc = tf.NodeDescription("QuantizedConv2DWithBiasSumAndReluAndRequantize") @@ -21862,7 +23257,10 @@ begin desc["Tfilter"] = tf.data_type(filter_) desc["Tbias"] = tf.data_type(bias_) desc["Tsummand"] = tf.data_type(summand_) - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(quantized_conv2d_with_bias_sum_and_relu_and_requantize, [input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_, summand_, min_summand_, max_summand_], name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) + tf.add_node(res[1], node) + return res end function quantized_conv2d_with_bias_sum_and_relu_and_requantize(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_, summand_, min_summand_, max_summand_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) if tf.eager_mode @@ -21880,7 +23278,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function delete_session_tensor_graph(handle_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function delete_session_tensor_graph(handle_; name=nothing) local desc tf.with_op_name(name, "DeleteSessionTensor") do desc = tf.NodeDescription("DeleteSessionTensor") @@ -21892,7 +23290,10 @@ begin function delete_session_tensor_eager(handle_; name=nothing) desc = tf.EagerOp("DeleteSessionTensor") tf.add_input(desc, handle_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(delete_session_tensor, [handle_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function delete_session_tensor(handle_; name=nothing) if tf.eager_mode @@ -21910,7 +23311,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function one_hot_graph(indices_, depth_, on_value_, off_value_; name=nothing, axis=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function one_hot_graph(indices_, depth_, on_value_, off_value_; name=nothing, axis=nothing) local desc tf.with_op_name(name, "OneHot") do desc = tf.NodeDescription("OneHot") @@ -21949,7 +23350,10 @@ begin desc["TI"] = tf.data_type(indices_) desc["T"] = tf.data_type(on_value_) desc["T"] = tf.data_type(off_value_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(one_hot, [indices_, depth_, on_value_, off_value_], name=nothing, axis=nothing) + tf.add_node(res[1], node) + return res[1] end function one_hot(indices_, depth_, on_value_, off_value_; name=nothing, axis=nothing) if tf.eager_mode @@ -21967,7 +23371,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function resource_apply_ftrl_graph(var_, accum_, linear_, grad_, lr_, l1_, l2_, lr_power_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_apply_ftrl_graph(var_, accum_, linear_, grad_, lr_, l1_, l2_, lr_power_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ResourceApplyFtrl") do desc = tf.NodeDescription("ResourceApplyFtrl") @@ -22012,7 +23416,10 @@ begin desc["T"] = tf.data_type(l1_) desc["T"] = tf.data_type(l2_) desc["T"] = tf.data_type(lr_power_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(resource_apply_ftrl, [var_, accum_, linear_, grad_, lr_, l1_, l2_, lr_power_], name=nothing, use_locking=nothing) + tf.add_node(res[1], node) + return res[1] end function resource_apply_ftrl(var_, accum_, linear_, grad_, lr_, l1_, l2_, lr_power_; name=nothing, use_locking=nothing) if tf.eager_mode @@ -22030,7 +23437,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function sdca_optimizer_v2_graph(sparse_example_indices_, sparse_feature_indices_, sparse_feature_values_, dense_features_, example_weights_, example_labels_, sparse_indices_, sparse_weights_, dense_weights_, example_state_data_; name=nothing, loss_type=nothing, adaptive=nothing, num_sparse_features=nothing, num_sparse_features_with_values=nothing, num_dense_features=nothing, l1=nothing, l2=nothing, num_loss_partitions=nothing, num_inner_iterations=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sdca_optimizer_v2_graph(sparse_example_indices_, sparse_feature_indices_, sparse_feature_values_, dense_features_, example_weights_, example_labels_, sparse_indices_, sparse_weights_, dense_weights_, example_state_data_; name=nothing, loss_type=nothing, adaptive=nothing, num_sparse_features=nothing, num_sparse_features_with_values=nothing, num_dense_features=nothing, l1=nothing, l2=nothing, num_loss_partitions=nothing, num_inner_iterations=nothing) local desc tf.with_op_name(name, "SdcaOptimizerV2") do desc = tf.NodeDescription("SdcaOptimizerV2") @@ -22128,7 +23535,10 @@ begin if num_inner_iterations !== nothing desc["num_inner_iterations"] = Base.Int(num_inner_iterations) end - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(sdca_optimizer_v2, [sparse_example_indices_, sparse_feature_indices_, sparse_feature_values_, dense_features_, example_weights_, example_labels_, sparse_indices_, sparse_weights_, dense_weights_, example_state_data_], name=nothing, loss_type=nothing, adaptive=nothing, num_sparse_features=nothing, num_sparse_features_with_values=nothing, num_dense_features=nothing, l1=nothing, l2=nothing, num_loss_partitions=nothing, num_inner_iterations=nothing) + tf.add_node(res[1], node) + return res end function sdca_optimizer_v2(sparse_example_indices_, sparse_feature_indices_, sparse_feature_values_, dense_features_, example_weights_, example_labels_, sparse_indices_, sparse_weights_, dense_weights_, example_state_data_; name=nothing, loss_type=nothing, adaptive=nothing, num_sparse_features=nothing, num_sparse_features_with_values=nothing, num_dense_features=nothing, l1=nothing, l2=nothing, num_loss_partitions=nothing, num_inner_iterations=nothing) if tf.eager_mode @@ -22146,7 +23556,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function queue_enqueue_graph(handle_, components_; name=nothing, Tcomponents=nothing, timeout_ms=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function queue_enqueue_graph(handle_, components_; name=nothing, Tcomponents=nothing, timeout_ms=nothing) local desc tf.with_op_name(name, "QueueEnqueue") do desc = tf.NodeDescription("QueueEnqueue") @@ -22173,7 +23583,10 @@ begin if timeout_ms !== nothing desc["timeout_ms"] = Base.Int(timeout_ms) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(queue_enqueue, [handle_, components_], name=nothing, Tcomponents=nothing, timeout_ms=nothing) + tf.add_node(res[1], node) + return res[1] end function queue_enqueue(handle_, components_; name=nothing, Tcomponents=nothing, timeout_ms=nothing) if tf.eager_mode @@ -22191,7 +23604,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function conditional_accumulator_graph(; name=nothing, dtype=nothing, shape=nothing, container=nothing, shared_name=nothing, reduction_type=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function conditional_accumulator_graph(; name=nothing, dtype=nothing, shape=nothing, container=nothing, shared_name=nothing, reduction_type=nothing) local desc tf.with_op_name(name, "ConditionalAccumulator") do desc = tf.NodeDescription("ConditionalAccumulator") @@ -22230,7 +23643,10 @@ begin if reduction_type !== nothing desc["reduction_type"] = Base.String(reduction_type) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(conditional_accumulator, [], name=nothing, dtype=nothing, shape=nothing, container=nothing, shared_name=nothing, reduction_type=nothing) + tf.add_node(res[1], node) + return res[1] end function conditional_accumulator(; name=nothing, dtype=nothing, shape=nothing, container=nothing, shared_name=nothing, reduction_type=nothing) if tf.eager_mode @@ -22248,7 +23664,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function ctc_beam_search_decoder_graph(inputs_, sequence_length_; name=nothing, beam_width=nothing, top_paths=nothing, merge_repeated=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ctc_beam_search_decoder_graph(inputs_, sequence_length_; name=nothing, beam_width=nothing, top_paths=nothing, merge_repeated=nothing) local desc tf.with_op_name(name, "CTCBeamSearchDecoder") do desc = tf.NodeDescription("CTCBeamSearchDecoder") @@ -22286,7 +23702,10 @@ begin if merge_repeated !== nothing desc["merge_repeated"] = Base.Bool(merge_repeated) end - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(ctc_beam_search_decoder, [inputs_, sequence_length_], name=nothing, beam_width=nothing, top_paths=nothing, merge_repeated=nothing) + tf.add_node(res[1], node) + return res end function ctc_beam_search_decoder(inputs_, sequence_length_; name=nothing, beam_width=nothing, top_paths=nothing, merge_repeated=nothing) if tf.eager_mode @@ -22304,7 +23723,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function whole_file_reader_graph(; name=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function whole_file_reader_graph(; name=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "WholeFileReader") do desc = tf.NodeDescription("WholeFileReader") @@ -22325,7 +23744,10 @@ begin if shared_name !== nothing desc["shared_name"] = Base.String(shared_name) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(whole_file_reader, [], name=nothing, container=nothing, shared_name=nothing) + tf.add_node(res[1], node) + return res[1] end function whole_file_reader(; name=nothing, container=nothing, shared_name=nothing) if tf.eager_mode @@ -22343,7 +23765,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function apply_rms_prop_graph(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function apply_rms_prop_graph(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ApplyRMSProp") do desc = tf.NodeDescription("ApplyRMSProp") @@ -22391,7 +23813,10 @@ begin desc["T"] = tf.data_type(momentum_) desc["T"] = tf.data_type(epsilon_) desc["T"] = tf.data_type(grad_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(apply_rms_prop, [var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_], name=nothing, use_locking=nothing) + tf.add_node(res[1], node) + return res[1] end function apply_rms_prop(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=nothing, use_locking=nothing) if tf.eager_mode @@ -22409,7 +23834,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function adjust_saturation_graph(images_, scale_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function adjust_saturation_graph(images_, scale_; name=nothing) local desc tf.with_op_name(name, "AdjustSaturation") do desc = tf.NodeDescription("AdjustSaturation") @@ -22426,7 +23851,10 @@ begin tf.add_input(desc, images_) tf.add_input(desc, scale_) desc["T"] = tf.data_type(images_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(adjust_saturation, [images_, scale_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function adjust_saturation(images_, scale_; name=nothing) if tf.eager_mode @@ -22444,7 +23872,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function lookup_table_remove_v2_graph(table_handle_, keys_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function lookup_table_remove_v2_graph(table_handle_, keys_; name=nothing) local desc tf.with_op_name(name, "LookupTableRemoveV2") do desc = tf.NodeDescription("LookupTableRemoveV2") @@ -22461,7 +23889,10 @@ begin tf.add_input(desc, table_handle_) tf.add_input(desc, keys_) desc["Tin"] = tf.data_type(keys_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(lookup_table_remove_v2, [table_handle_, keys_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function lookup_table_remove_v2(table_handle_, keys_; name=nothing) if tf.eager_mode @@ -22479,7 +23910,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function queue_close_graph(handle_; name=nothing, cancel_pending_enqueues=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function queue_close_graph(handle_; name=nothing, cancel_pending_enqueues=nothing) local desc tf.with_op_name(name, "QueueClose") do desc = tf.NodeDescription("QueueClose") @@ -22497,7 +23928,10 @@ begin if cancel_pending_enqueues !== nothing desc["cancel_pending_enqueues"] = Base.Bool(cancel_pending_enqueues) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(queue_close, [handle_], name=nothing, cancel_pending_enqueues=nothing) + tf.add_node(res[1], node) + return res[1] end function queue_close(handle_; name=nothing, cancel_pending_enqueues=nothing) if tf.eager_mode @@ -22515,7 +23949,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function prefetch_dataset_graph(input_dataset_, buffer_size_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function prefetch_dataset_graph(input_dataset_, buffer_size_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "PrefetchDataset") do desc = tf.NodeDescription("PrefetchDataset") @@ -22542,7 +23976,10 @@ begin if output_shapes !== nothing desc["output_shapes"] = map(Base.identity, output_shapes) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(prefetch_dataset, [input_dataset_, buffer_size_], name=nothing, output_types=nothing, output_shapes=nothing) + tf.add_node(res[1], node) + return res[1] end function prefetch_dataset(input_dataset_, buffer_size_; name=nothing, output_types=nothing, output_shapes=nothing) if tf.eager_mode @@ -22560,7 +23997,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function map_dataset_graph(input_dataset_, other_arguments_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing, preserve_cardinality=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function map_dataset_graph(input_dataset_, other_arguments_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing, preserve_cardinality=nothing) local desc tf.with_op_name(name, "MapDataset") do desc = tf.NodeDescription("MapDataset") @@ -22611,7 +24048,10 @@ begin if preserve_cardinality !== nothing desc["preserve_cardinality"] = Base.Bool(preserve_cardinality) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(map_dataset, [input_dataset_, other_arguments_], name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing, preserve_cardinality=nothing) + tf.add_node(res[1], node) + return res[1] end function map_dataset(input_dataset_, other_arguments_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing, preserve_cardinality=nothing) if tf.eager_mode @@ -22629,7 +24069,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function quantized_conv2d_with_bias_graph(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantized_conv2d_with_bias_graph(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) local desc tf.with_op_name(name, "QuantizedConv2DWithBias") do desc = tf.NodeDescription("QuantizedConv2DWithBias") @@ -22692,7 +24132,10 @@ begin end desc["Tinput"] = tf.data_type(input_) desc["Tfilter"] = tf.data_type(filter_) - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(quantized_conv2d_with_bias, [input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_], name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) + tf.add_node(res[1], node) + return res end function quantized_conv2d_with_bias(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) if tf.eager_mode @@ -22710,7 +24153,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tensor_array_read_v3_graph(handle_, index_, flow_in_; name=nothing, dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_read_v3_graph(handle_, index_, flow_in_; name=nothing, dtype=nothing) local desc tf.with_op_name(name, "TensorArrayReadV3") do desc = tf.NodeDescription("TensorArrayReadV3") @@ -22734,7 +24177,10 @@ begin if dtype !== nothing desc["dtype"] = Base.identity(dtype) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(tensor_array_read_v3, [handle_, index_, flow_in_], name=nothing, dtype=nothing) + tf.add_node(res[1], node) + return res[1] end function tensor_array_read_v3(handle_, index_, flow_in_; name=nothing, dtype=nothing) if tf.eager_mode @@ -22752,7 +24198,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function identity_graph(input_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function identity_graph(input_; name=nothing) local desc tf.with_op_name(name, "Identity") do desc = tf.NodeDescription("Identity") @@ -22766,7 +24212,10 @@ begin desc = tf.EagerOp("Identity") tf.add_input(desc, input_) desc["T"] = tf.data_type(input_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(identity, [input_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function identity(input_; name=nothing) if tf.eager_mode @@ -22784,7 +24233,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function print_graph(input_, data_; name=nothing, U=nothing, message=nothing, first_n=nothing, summarize=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function print_graph(input_, data_; name=nothing, U=nothing, message=nothing, first_n=nothing, summarize=nothing) local desc tf.with_op_name(name, "Print") do desc = tf.NodeDescription("Print") @@ -22825,7 +24274,10 @@ begin desc["summarize"] = Base.Int(summarize) end desc["T"] = tf.data_type(input_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(print, [input_, data_], name=nothing, U=nothing, message=nothing, first_n=nothing, summarize=nothing) + tf.add_node(res[1], node) + return res[1] end function print(input_, data_; name=nothing, U=nothing, message=nothing, first_n=nothing, summarize=nothing) if tf.eager_mode @@ -22843,7 +24295,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function collective_bcast_send_graph(input_; name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, shape=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function collective_bcast_send_graph(input_; name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, shape=nothing) local desc tf.with_op_name(name, "CollectiveBcastSend") do desc = tf.NodeDescription("CollectiveBcastSend") @@ -22881,7 +24333,10 @@ begin desc["shape"] = Base.identity(shape) end desc["T"] = tf.data_type(input_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(collective_bcast_send, [input_], name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, shape=nothing) + tf.add_node(res[1], node) + return res[1] end function collective_bcast_send(input_; name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, shape=nothing) if tf.eager_mode @@ -22899,7 +24354,7 @@ end Converts a list of tensors to an array of tensors. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function _list_to_array_graph(input_; name=nothing, Tin=nothing, N=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _list_to_array_graph(input_; name=nothing, Tin=nothing, N=nothing) local desc tf.with_op_name(name, "_ListToArray") do desc = tf.NodeDescription("_ListToArray") @@ -22928,7 +24383,10 @@ begin if N !== nothing desc["N"] = Base.Int(N) end - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(_list_to_array, [input_], name=nothing, Tin=nothing, N=nothing) + tf.add_node(res[1], node) + return res end function _list_to_array(input_; name=nothing, Tin=nothing, N=nothing) if tf.eager_mode @@ -22946,7 +24404,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function neg_train_graph(w_in_, w_out_, examples_, labels_, lr_; name=nothing, vocab_count=nothing, num_negative_samples=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function neg_train_graph(w_in_, w_out_, examples_, labels_, lr_; name=nothing, vocab_count=nothing, num_negative_samples=nothing) local desc tf.with_op_name(name, "NegTrain") do desc = tf.NodeDescription("NegTrain") @@ -22982,7 +24440,10 @@ begin if num_negative_samples !== nothing desc["num_negative_samples"] = Base.Int(num_negative_samples) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(neg_train, [w_in_, w_out_, examples_, labels_, lr_], name=nothing, vocab_count=nothing, num_negative_samples=nothing) + tf.add_node(res[1], node) + return res[1] end function neg_train(w_in_, w_out_, examples_, labels_, lr_; name=nothing, vocab_count=nothing, num_negative_samples=nothing) if tf.eager_mode @@ -23000,7 +24461,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function worker_heartbeat_graph(request_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function worker_heartbeat_graph(request_; name=nothing) local desc tf.with_op_name(name, "WorkerHeartbeat") do desc = tf.NodeDescription("WorkerHeartbeat") @@ -23012,7 +24473,10 @@ begin function worker_heartbeat_eager(request_; name=nothing) desc = tf.EagerOp("WorkerHeartbeat") tf.add_input(desc, request_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(worker_heartbeat, [request_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function worker_heartbeat(request_; name=nothing) if tf.eager_mode @@ -23030,7 +24494,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function merge_v2checkpoints_graph(checkpoint_prefixes_, destination_prefix_; name=nothing, delete_old_dirs=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function merge_v2checkpoints_graph(checkpoint_prefixes_, destination_prefix_; name=nothing, delete_old_dirs=nothing) local desc tf.with_op_name(name, "MergeV2Checkpoints") do desc = tf.NodeDescription("MergeV2Checkpoints") @@ -23051,7 +24515,10 @@ begin if delete_old_dirs !== nothing desc["delete_old_dirs"] = Base.Bool(delete_old_dirs) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(merge_v2checkpoints, [checkpoint_prefixes_, destination_prefix_], name=nothing, delete_old_dirs=nothing) + tf.add_node(res[1], node) + return res[1] end function merge_v2checkpoints(checkpoint_prefixes_, destination_prefix_; name=nothing, delete_old_dirs=nothing) if tf.eager_mode @@ -23069,7 +24536,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function collective_permute_graph(input_, source_target_pairs_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function collective_permute_graph(input_, source_target_pairs_; name=nothing) local desc tf.with_op_name(name, "CollectivePermute") do desc = tf.NodeDescription("CollectivePermute") @@ -23086,7 +24553,10 @@ begin tf.add_input(desc, input_) tf.add_input(desc, source_target_pairs_) desc["T"] = tf.data_type(input_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(collective_permute, [input_, source_target_pairs_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function collective_permute(input_, source_target_pairs_; name=nothing) if tf.eager_mode @@ -23104,7 +24574,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function quantize_and_dequantize_v3_graph(input_, input_min_, input_max_, num_bits_; name=nothing, signed_input=nothing, range_given=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantize_and_dequantize_v3_graph(input_, input_min_, input_max_, num_bits_; name=nothing, signed_input=nothing, range_given=nothing) local desc tf.with_op_name(name, "QuantizeAndDequantizeV3") do desc = tf.NodeDescription("QuantizeAndDequantizeV3") @@ -23141,7 +24611,10 @@ begin desc["T"] = tf.data_type(input_) desc["T"] = tf.data_type(input_min_) desc["T"] = tf.data_type(input_max_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(quantize_and_dequantize_v3, [input_, input_min_, input_max_, num_bits_], name=nothing, signed_input=nothing, range_given=nothing) + tf.add_node(res[1], node) + return res[1] end function quantize_and_dequantize_v3(input_, input_min_, input_max_, num_bits_; name=nothing, signed_input=nothing, range_given=nothing) if tf.eager_mode @@ -23159,7 +24632,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function hash_table_graph(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function hash_table_graph(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing) local desc tf.with_op_name(name, "HashTable") do desc = tf.NodeDescription("HashTable") @@ -23198,7 +24671,10 @@ begin if value_dtype !== nothing desc["value_dtype"] = Base.identity(value_dtype) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(hash_table, [], name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing) + tf.add_node(res[1], node) + return res[1] end function hash_table(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing) if tf.eager_mode @@ -23216,7 +24692,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function softplus_grad_graph(gradients_, features_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function softplus_grad_graph(gradients_, features_; name=nothing) local desc tf.with_op_name(name, "SoftplusGrad") do desc = tf.NodeDescription("SoftplusGrad") @@ -23234,7 +24710,10 @@ begin tf.add_input(desc, features_) desc["T"] = tf.data_type(gradients_) desc["T"] = tf.data_type(features_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(softplus_grad, [gradients_, features_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function softplus_grad(gradients_, features_; name=nothing) if tf.eager_mode @@ -23252,7 +24731,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function fixed_length_record_reader_graph(; name=nothing, header_bytes=nothing, record_bytes=nothing, footer_bytes=nothing, hop_bytes=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fixed_length_record_reader_graph(; name=nothing, header_bytes=nothing, record_bytes=nothing, footer_bytes=nothing, hop_bytes=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "FixedLengthRecordReader") do desc = tf.NodeDescription("FixedLengthRecordReader") @@ -23297,7 +24776,10 @@ begin if shared_name !== nothing desc["shared_name"] = Base.String(shared_name) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(fixed_length_record_reader, [], name=nothing, header_bytes=nothing, record_bytes=nothing, footer_bytes=nothing, hop_bytes=nothing, container=nothing, shared_name=nothing) + tf.add_node(res[1], node) + return res[1] end function fixed_length_record_reader(; name=nothing, header_bytes=nothing, record_bytes=nothing, footer_bytes=nothing, hop_bytes=nothing, container=nothing, shared_name=nothing) if tf.eager_mode @@ -23315,7 +24797,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tensor_array_scatter_v2_graph(handle_, indices_, value_, flow_in_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_scatter_v2_graph(handle_, indices_, value_, flow_in_; name=nothing) local desc tf.with_op_name(name, "TensorArrayScatterV2") do desc = tf.NodeDescription("TensorArrayScatterV2") @@ -23338,7 +24820,10 @@ begin tf.add_input(desc, value_) tf.add_input(desc, flow_in_) desc["T"] = tf.data_type(value_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(tensor_array_scatter_v2, [handle_, indices_, value_, flow_in_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function tensor_array_scatter_v2(handle_, indices_, value_, flow_in_; name=nothing) if tf.eager_mode @@ -23356,7 +24841,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function decode_json_example_graph(json_examples_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function decode_json_example_graph(json_examples_; name=nothing) local desc tf.with_op_name(name, "DecodeJSONExample") do desc = tf.NodeDescription("DecodeJSONExample") @@ -23368,7 +24853,10 @@ begin function decode_json_example_eager(json_examples_; name=nothing) desc = tf.EagerOp("DecodeJSONExample") tf.add_input(desc, json_examples_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(decode_json_example, [json_examples_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function decode_json_example(json_examples_; name=nothing) if tf.eager_mode @@ -23386,7 +24874,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function fused_batch_norm_grad_v2_graph(y_backprop_, x_, scale_, reserve_space_1_, reserve_space_2_; name=nothing, U=nothing, epsilon=nothing, data_format=nothing, is_training=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fused_batch_norm_grad_v2_graph(y_backprop_, x_, scale_, reserve_space_1_, reserve_space_2_; name=nothing, U=nothing, epsilon=nothing, data_format=nothing, is_training=nothing) local desc tf.with_op_name(name, "FusedBatchNormGradV2") do desc = tf.NodeDescription("FusedBatchNormGradV2") @@ -23445,7 +24933,10 @@ begin desc["T"] = tf.data_type(x_) desc["U"] = tf.data_type(reserve_space_1_) desc["U"] = tf.data_type(reserve_space_2_) - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(fused_batch_norm_grad_v2, [y_backprop_, x_, scale_, reserve_space_1_, reserve_space_2_], name=nothing, U=nothing, epsilon=nothing, data_format=nothing, is_training=nothing) + tf.add_node(res[1], node) + return res end function fused_batch_norm_grad_v2(y_backprop_, x_, scale_, reserve_space_1_, reserve_space_2_; name=nothing, U=nothing, epsilon=nothing, data_format=nothing, is_training=nothing) if tf.eager_mode @@ -23463,7 +24954,7 @@ end Cast x of type SrcT to y of DstT. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function _host_cast_graph(x_; name=nothing, SrcT=nothing, DstT=nothing, Truncate=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _host_cast_graph(x_; name=nothing, SrcT=nothing, DstT=nothing, Truncate=nothing) local desc tf.with_op_name(name, "_HostCast") do desc = tf.NodeDescription("_HostCast") @@ -23495,7 +24986,10 @@ begin desc["Truncate"] = Base.Bool(Truncate) end desc["SrcT"] = tf.data_type(x_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(_host_cast, [x_], name=nothing, SrcT=nothing, DstT=nothing, Truncate=nothing) + tf.add_node(res[1], node) + return res[1] end function _host_cast(x_; name=nothing, SrcT=nothing, DstT=nothing, Truncate=nothing) if tf.eager_mode @@ -23513,7 +25007,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tf_record_reader_graph(; name=nothing, container=nothing, shared_name=nothing, compression_type=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tf_record_reader_graph(; name=nothing, container=nothing, shared_name=nothing, compression_type=nothing) local desc tf.with_op_name(name, "TFRecordReader") do desc = tf.NodeDescription("TFRecordReader") @@ -23540,7 +25034,10 @@ begin if compression_type !== nothing desc["compression_type"] = Base.String(compression_type) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(tf_record_reader, [], name=nothing, container=nothing, shared_name=nothing, compression_type=nothing) + tf.add_node(res[1], node) + return res[1] end function tf_record_reader(; name=nothing, container=nothing, shared_name=nothing, compression_type=nothing) if tf.eager_mode @@ -23558,7 +25055,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function while__graph(input_; name=nothing, T=nothing, cond=nothing, body=nothing, output_shapes=nothing, parallel_iterations=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function while__graph(input_; name=nothing, T=nothing, cond=nothing, body=nothing, output_shapes=nothing, parallel_iterations=nothing) local desc tf.with_op_name(name, "While") do desc = tf.NodeDescription("While") @@ -23600,7 +25097,10 @@ begin if parallel_iterations !== nothing desc["parallel_iterations"] = Base.Int(parallel_iterations) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(while_, [input_], name=nothing, T=nothing, cond=nothing, body=nothing, output_shapes=nothing, parallel_iterations=nothing) + tf.add_node(res[1], node) + return res[1] end function while_(input_; name=nothing, T=nothing, cond=nothing, body=nothing, output_shapes=nothing, parallel_iterations=nothing) if tf.eager_mode @@ -23618,7 +25118,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function stateless_multinomial_graph(logits_, num_samples_, seed_; name=nothing, output_dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function stateless_multinomial_graph(logits_, num_samples_, seed_; name=nothing, output_dtype=nothing) local desc tf.with_op_name(name, "StatelessMultinomial") do desc = tf.NodeDescription("StatelessMultinomial") @@ -23646,7 +25146,10 @@ begin end desc["T"] = tf.data_type(logits_) desc["Tseed"] = tf.data_type(seed_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(stateless_multinomial, [logits_, num_samples_, seed_], name=nothing, output_dtype=nothing) + tf.add_node(res[1], node) + return res[1] end function stateless_multinomial(logits_, num_samples_, seed_; name=nothing, output_dtype=nothing) if tf.eager_mode @@ -23664,7 +25167,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function scatter_add_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function scatter_add_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ScatterAdd") do desc = tf.NodeDescription("ScatterAdd") @@ -23694,7 +25197,10 @@ begin desc["T"] = tf.data_type(ref_) desc["Tindices"] = tf.data_type(indices_) desc["T"] = tf.data_type(updates_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(scatter_add, [ref_, indices_, updates_], name=nothing, use_locking=nothing) + tf.add_node(res[1], node) + return res[1] end function scatter_add(ref_, indices_, updates_; name=nothing, use_locking=nothing) if tf.eager_mode @@ -23712,7 +25218,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function conj_graph(input_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function conj_graph(input_; name=nothing) local desc tf.with_op_name(name, "Conj") do desc = tf.NodeDescription("Conj") @@ -23726,7 +25232,10 @@ begin desc = tf.EagerOp("Conj") tf.add_input(desc, input_) desc["T"] = tf.data_type(input_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(conj, [input_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function conj(input_; name=nothing) if tf.eager_mode @@ -23744,7 +25253,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function parallel_dynamic_stitch_graph(indices_, data_; name=nothing, N=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function parallel_dynamic_stitch_graph(indices_, data_; name=nothing, N=nothing) local desc tf.with_op_name(name, "ParallelDynamicStitch") do desc = tf.NodeDescription("ParallelDynamicStitch") @@ -23767,7 +25276,10 @@ begin desc["N"] = Base.Int(N) end desc["T"] = tf.data_type(data_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(parallel_dynamic_stitch, [indices_, data_], name=nothing, N=nothing) + tf.add_node(res[1], node) + return res[1] end function parallel_dynamic_stitch(indices_, data_; name=nothing, N=nothing) if tf.eager_mode @@ -23785,7 +25297,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function make_iterator_graph(dataset_, iterator_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function make_iterator_graph(dataset_, iterator_; name=nothing) local desc tf.with_op_name(name, "MakeIterator") do desc = tf.NodeDescription("MakeIterator") @@ -23800,7 +25312,10 @@ begin desc = tf.EagerOp("MakeIterator") tf.add_input(desc, dataset_) tf.add_input(desc, iterator_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(make_iterator, [dataset_, iterator_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function make_iterator(dataset_, iterator_; name=nothing) if tf.eager_mode @@ -23818,7 +25333,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function rfft3d_graph(input_, fft_length_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function rfft3d_graph(input_, fft_length_; name=nothing) local desc tf.with_op_name(name, "RFFT3D") do desc = tf.NodeDescription("RFFT3D") @@ -23833,7 +25348,10 @@ begin desc = tf.EagerOp("RFFT3D") tf.add_input(desc, input_) tf.add_input(desc, fft_length_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(rfft3d, [input_, fft_length_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function rfft3d(input_, fft_length_; name=nothing) if tf.eager_mode @@ -23851,7 +25369,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function sparse_reduce_sum_sparse_graph(input_indices_, input_values_, input_shape_, reduction_axes_; name=nothing, keep_dims=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_reduce_sum_sparse_graph(input_indices_, input_values_, input_shape_, reduction_axes_; name=nothing, keep_dims=nothing) local desc tf.with_op_name(name, "SparseReduceSumSparse") do desc = tf.NodeDescription("SparseReduceSumSparse") @@ -23885,7 +25403,10 @@ begin desc["keep_dims"] = Base.Bool(keep_dims) end desc["T"] = tf.data_type(input_values_) - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(sparse_reduce_sum_sparse, [input_indices_, input_values_, input_shape_, reduction_axes_], name=nothing, keep_dims=nothing) + tf.add_node(res[1], node) + return res end function sparse_reduce_sum_sparse(input_indices_, input_values_, input_shape_, reduction_axes_; name=nothing, keep_dims=nothing) if tf.eager_mode @@ -23903,7 +25424,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function collective_gather_graph(input_; name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, shape=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function collective_gather_graph(input_; name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, shape=nothing) local desc tf.with_op_name(name, "CollectiveGather") do desc = tf.NodeDescription("CollectiveGather") @@ -23941,7 +25462,10 @@ begin desc["shape"] = Base.identity(shape) end desc["T"] = tf.data_type(input_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(collective_gather, [input_], name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, shape=nothing) + tf.add_node(res[1], node) + return res[1] end function collective_gather(input_; name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, shape=nothing) if tf.eager_mode @@ -23959,7 +25483,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function combined_non_max_suppression_graph(boxes_, scores_, max_output_size_per_class_, max_total_size_, iou_threshold_, score_threshold_; name=nothing, pad_per_class=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function combined_non_max_suppression_graph(boxes_, scores_, max_output_size_per_class_, max_total_size_, iou_threshold_, score_threshold_; name=nothing, pad_per_class=nothing) local desc tf.with_op_name(name, "CombinedNonMaxSuppression") do desc = tf.NodeDescription("CombinedNonMaxSuppression") @@ -23997,7 +25521,10 @@ begin if pad_per_class !== nothing desc["pad_per_class"] = Base.Bool(pad_per_class) end - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(combined_non_max_suppression, [boxes_, scores_, max_output_size_per_class_, max_total_size_, iou_threshold_, score_threshold_], name=nothing, pad_per_class=nothing) + tf.add_node(res[1], node) + return res end function combined_non_max_suppression(boxes_, scores_, max_output_size_per_class_, max_total_size_, iou_threshold_, score_threshold_; name=nothing, pad_per_class=nothing) if tf.eager_mode @@ -24015,7 +25542,7 @@ end Allocates a mutable tensor that becomes available to appropriately annotated """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function _scoped_allocator_graph(; name=nothing, shapes=nothing, shape=nothing, sa_name=nothing, id=nothing, expected_call_count=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _scoped_allocator_graph(; name=nothing, shapes=nothing, shape=nothing, sa_name=nothing, id=nothing, expected_call_count=nothing) local desc tf.with_op_name(name, "_ScopedAllocator") do desc = tf.NodeDescription("_ScopedAllocator") @@ -24054,7 +25581,10 @@ begin if expected_call_count !== nothing desc["expected_call_count"] = Base.Int(expected_call_count) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(_scoped_allocator, [], name=nothing, shapes=nothing, shape=nothing, sa_name=nothing, id=nothing, expected_call_count=nothing) + tf.add_node(res[1], node) + return res[1] end function _scoped_allocator(; name=nothing, shapes=nothing, shape=nothing, sa_name=nothing, id=nothing, expected_call_count=nothing) if tf.eager_mode @@ -24072,7 +25602,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function load_tpu_embedding_adadelta_parameters_graph(parameters_, accumulators_, updates_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function load_tpu_embedding_adadelta_parameters_graph(parameters_, accumulators_, updates_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "LoadTPUEmbeddingAdadeltaParameters") do desc = tf.NodeDescription("LoadTPUEmbeddingAdadeltaParameters") @@ -24114,7 +25644,10 @@ begin if shard_id !== nothing desc["shard_id"] = Base.Int(shard_id) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(load_tpu_embedding_adadelta_parameters, [parameters_, accumulators_, updates_], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + tf.add_node(res[1], node) + return res[1] end function load_tpu_embedding_adadelta_parameters(parameters_, accumulators_, updates_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) if tf.eager_mode @@ -24132,7 +25665,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function sparse_add_graph(a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_, thresh_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_add_graph(a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_, thresh_; name=nothing) local desc tf.with_op_name(name, "SparseAdd") do desc = tf.NodeDescription("SparseAdd") @@ -24172,7 +25705,10 @@ begin desc["T"] = tf.data_type(a_values_) desc["T"] = tf.data_type(b_values_) desc["Treal"] = tf.data_type(thresh_) - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(sparse_add, [a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_, thresh_], name=nothing) + tf.add_node(res[1], node) + return res end function sparse_add(a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_, thresh_; name=nothing) if tf.eager_mode @@ -24190,7 +25726,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function ctc_greedy_decoder_graph(inputs_, sequence_length_; name=nothing, merge_repeated=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ctc_greedy_decoder_graph(inputs_, sequence_length_; name=nothing, merge_repeated=nothing) local desc tf.with_op_name(name, "CTCGreedyDecoder") do desc = tf.NodeDescription("CTCGreedyDecoder") @@ -24216,7 +25752,10 @@ begin if merge_repeated !== nothing desc["merge_repeated"] = Base.Bool(merge_repeated) end - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(ctc_greedy_decoder, [inputs_, sequence_length_], name=nothing, merge_repeated=nothing) + tf.add_node(res[1], node) + return res end function ctc_greedy_decoder(inputs_, sequence_length_; name=nothing, merge_repeated=nothing) if tf.eager_mode @@ -24234,7 +25773,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function immutable_const_graph(; name=nothing, dtype=nothing, shape=nothing, memory_region_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function immutable_const_graph(; name=nothing, dtype=nothing, shape=nothing, memory_region_name=nothing) local desc tf.with_op_name(name, "ImmutableConst") do desc = tf.NodeDescription("ImmutableConst") @@ -24261,7 +25800,10 @@ begin if memory_region_name !== nothing desc["memory_region_name"] = Base.String(memory_region_name) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(immutable_const, [], name=nothing, dtype=nothing, shape=nothing, memory_region_name=nothing) + tf.add_node(res[1], node) + return res[1] end function immutable_const(; name=nothing, dtype=nothing, shape=nothing, memory_region_name=nothing) if tf.eager_mode @@ -24279,7 +25821,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function consume_mutex_lock_graph(mutex_lock_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function consume_mutex_lock_graph(mutex_lock_; name=nothing) local desc tf.with_op_name(name, "ConsumeMutexLock") do desc = tf.NodeDescription("ConsumeMutexLock") @@ -24291,7 +25833,10 @@ begin function consume_mutex_lock_eager(mutex_lock_; name=nothing) desc = tf.EagerOp("ConsumeMutexLock") tf.add_input(desc, mutex_lock_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(consume_mutex_lock, [mutex_lock_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function consume_mutex_lock(mutex_lock_; name=nothing) if tf.eager_mode @@ -24309,7 +25854,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function greater_equal_graph(x_, y_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function greater_equal_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "GreaterEqual") do desc = tf.NodeDescription("GreaterEqual") @@ -24327,7 +25872,10 @@ begin tf.add_input(desc, y_) desc["T"] = tf.data_type(x_) desc["T"] = tf.data_type(y_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(greater_equal, [x_, y_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function greater_equal(x_, y_; name=nothing) if tf.eager_mode @@ -24345,7 +25893,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function initialize_table_from_text_file_v2_graph(table_handle_, filename_; name=nothing, key_index=nothing, value_index=nothing, vocab_size=nothing, delimiter=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function initialize_table_from_text_file_v2_graph(table_handle_, filename_; name=nothing, key_index=nothing, value_index=nothing, vocab_size=nothing, delimiter=nothing) local desc tf.with_op_name(name, "InitializeTableFromTextFileV2") do desc = tf.NodeDescription("InitializeTableFromTextFileV2") @@ -24384,7 +25932,10 @@ begin if delimiter !== nothing desc["delimiter"] = Base.String(delimiter) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(initialize_table_from_text_file_v2, [table_handle_, filename_], name=nothing, key_index=nothing, value_index=nothing, vocab_size=nothing, delimiter=nothing) + tf.add_node(res[1], node) + return res[1] end function initialize_table_from_text_file_v2(table_handle_, filename_; name=nothing, key_index=nothing, value_index=nothing, vocab_size=nothing, delimiter=nothing) if tf.eager_mode @@ -24402,7 +25953,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function queue_dequeue_graph(handle_; name=nothing, component_types=nothing, timeout_ms=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function queue_dequeue_graph(handle_; name=nothing, component_types=nothing, timeout_ms=nothing) local desc tf.with_op_name(name, "QueueDequeue") do desc = tf.NodeDescription("QueueDequeue") @@ -24426,7 +25977,10 @@ begin if timeout_ms !== nothing desc["timeout_ms"] = Base.Int(timeout_ms) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(queue_dequeue, [handle_], name=nothing, component_types=nothing, timeout_ms=nothing) + tf.add_node(res[1], node) + return res[1] end function queue_dequeue(handle_; name=nothing, component_types=nothing, timeout_ms=nothing) if tf.eager_mode @@ -24444,7 +25998,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function equal_graph(x_, y_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function equal_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "Equal") do desc = tf.NodeDescription("Equal") @@ -24462,7 +26016,10 @@ begin tf.add_input(desc, y_) desc["T"] = tf.data_type(x_) desc["T"] = tf.data_type(y_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(equal, [x_, y_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function equal(x_, y_; name=nothing) if tf.eager_mode @@ -24480,7 +26037,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function iterator_from_string_handle_graph(string_handle_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function iterator_from_string_handle_graph(string_handle_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "IteratorFromStringHandle") do desc = tf.NodeDescription("IteratorFromStringHandle") @@ -24504,7 +26061,10 @@ begin if output_shapes !== nothing desc["output_shapes"] = map(Base.identity, output_shapes) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(iterator_from_string_handle, [string_handle_], name=nothing, output_types=nothing, output_shapes=nothing) + tf.add_node(res[1], node) + return res[1] end function iterator_from_string_handle(string_handle_; name=nothing, output_types=nothing, output_shapes=nothing) if tf.eager_mode @@ -24522,7 +26082,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tensor_list_split_graph(tensor_, element_shape_, lengths_; name=nothing, element_dtype=nothing, shape_type=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_list_split_graph(tensor_, element_shape_, lengths_; name=nothing, element_dtype=nothing, shape_type=nothing) local desc tf.with_op_name(name, "TensorListSplit") do desc = tf.NodeDescription("TensorListSplit") @@ -24556,7 +26116,10 @@ begin end desc["element_dtype"] = tf.data_type(tensor_) desc["shape_type"] = tf.data_type(element_shape_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(tensor_list_split, [tensor_, element_shape_, lengths_], name=nothing, element_dtype=nothing, shape_type=nothing) + tf.add_node(res[1], node) + return res[1] end function tensor_list_split(tensor_, element_shape_, lengths_; name=nothing, element_dtype=nothing, shape_type=nothing) if tf.eager_mode @@ -24574,7 +26137,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function fractional_max_pool_graph(value_; name=nothing, pooling_ratio=nothing, pseudo_random=nothing, overlapping=nothing, deterministic=nothing, seed=nothing, seed2=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fractional_max_pool_graph(value_; name=nothing, pooling_ratio=nothing, pseudo_random=nothing, overlapping=nothing, deterministic=nothing, seed=nothing, seed2=nothing) local desc tf.with_op_name(name, "FractionalMaxPool") do desc = tf.NodeDescription("FractionalMaxPool") @@ -24629,7 +26192,10 @@ begin desc["seed2"] = Base.Int(seed2) end desc["T"] = tf.data_type(value_) - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(fractional_max_pool, [value_], name=nothing, pooling_ratio=nothing, pseudo_random=nothing, overlapping=nothing, deterministic=nothing, seed=nothing, seed2=nothing) + tf.add_node(res[1], node) + return res end function fractional_max_pool(value_; name=nothing, pooling_ratio=nothing, pseudo_random=nothing, overlapping=nothing, deterministic=nothing, seed=nothing, seed2=nothing) if tf.eager_mode @@ -24647,7 +26213,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function scatter_nd_graph(indices_, updates_, shape_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function scatter_nd_graph(indices_, updates_, shape_; name=nothing) local desc tf.with_op_name(name, "ScatterNd") do desc = tf.NodeDescription("ScatterNd") @@ -24671,7 +26237,10 @@ begin desc["Tindices"] = tf.data_type(indices_) desc["T"] = tf.data_type(updates_) desc["Tindices"] = tf.data_type(shape_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(scatter_nd, [indices_, updates_, shape_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function scatter_nd(indices_, updates_, shape_; name=nothing) if tf.eager_mode @@ -24689,7 +26258,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tensor_list_scatter_into_existing_list_graph(input_handle_, tensor_, indices_; name=nothing, element_dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_list_scatter_into_existing_list_graph(input_handle_, tensor_, indices_; name=nothing, element_dtype=nothing) local desc tf.with_op_name(name, "TensorListScatterIntoExistingList") do desc = tf.NodeDescription("TensorListScatterIntoExistingList") @@ -24715,7 +26284,10 @@ begin desc["element_dtype"] = Base.identity(element_dtype) end desc["element_dtype"] = tf.data_type(tensor_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(tensor_list_scatter_into_existing_list, [input_handle_, tensor_, indices_], name=nothing, element_dtype=nothing) + tf.add_node(res[1], node) + return res[1] end function tensor_list_scatter_into_existing_list(input_handle_, tensor_, indices_; name=nothing, element_dtype=nothing) if tf.eager_mode @@ -24733,7 +26305,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function select_graph(condition_, t_, e_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function select_graph(condition_, t_, e_; name=nothing) local desc tf.with_op_name(name, "Select") do desc = tf.NodeDescription("Select") @@ -24754,7 +26326,10 @@ begin tf.add_input(desc, e_) desc["T"] = tf.data_type(t_) desc["T"] = tf.data_type(e_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(select, [condition_, t_, e_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function select(condition_, t_, e_; name=nothing) if tf.eager_mode @@ -24772,7 +26347,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function min_graph(input_, reduction_indices_; name=nothing, keep_dims=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function min_graph(input_, reduction_indices_; name=nothing, keep_dims=nothing) local desc tf.with_op_name(name, "Min") do desc = tf.NodeDescription("Min") @@ -24798,7 +26373,10 @@ begin end desc["T"] = tf.data_type(input_) desc["Tidx"] = tf.data_type(reduction_indices_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(min, [input_, reduction_indices_], name=nothing, keep_dims=nothing) + tf.add_node(res[1], node) + return res[1] end function min(input_, reduction_indices_; name=nothing, keep_dims=nothing) if tf.eager_mode @@ -24816,7 +26394,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function lrn_grad_graph(input_grads_, input_image_, output_image_; name=nothing, depth_radius=nothing, bias=nothing, alpha=nothing, beta=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function lrn_grad_graph(input_grads_, input_image_, output_image_; name=nothing, depth_radius=nothing, bias=nothing, alpha=nothing, beta=nothing) local desc tf.with_op_name(name, "LRNGrad") do desc = tf.NodeDescription("LRNGrad") @@ -24862,7 +26440,10 @@ begin desc["T"] = tf.data_type(input_grads_) desc["T"] = tf.data_type(input_image_) desc["T"] = tf.data_type(output_image_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(lrn_grad, [input_grads_, input_image_, output_image_], name=nothing, depth_radius=nothing, bias=nothing, alpha=nothing, beta=nothing) + tf.add_node(res[1], node) + return res[1] end function lrn_grad(input_grads_, input_image_, output_image_; name=nothing, depth_radius=nothing, bias=nothing, alpha=nothing, beta=nothing) if tf.eager_mode @@ -24880,7 +26461,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function random_poisson_v2_graph(shape_, rate_; name=nothing, seed=nothing, seed2=nothing, S=nothing, R=nothing, dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function random_poisson_v2_graph(shape_, rate_; name=nothing, seed=nothing, seed2=nothing, S=nothing, R=nothing, dtype=nothing) local desc tf.with_op_name(name, "RandomPoissonV2") do desc = tf.NodeDescription("RandomPoissonV2") @@ -24929,7 +26510,10 @@ begin end desc["S"] = tf.data_type(shape_) desc["R"] = tf.data_type(rate_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(random_poisson_v2, [shape_, rate_], name=nothing, seed=nothing, seed2=nothing, S=nothing, R=nothing, dtype=nothing) + tf.add_node(res[1], node) + return res[1] end function random_poisson_v2(shape_, rate_; name=nothing, seed=nothing, seed2=nothing, S=nothing, R=nothing, dtype=nothing) if tf.eager_mode @@ -24947,7 +26531,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function fifo_queue_graph(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fifo_queue_graph(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "FIFOQueue") do desc = tf.NodeDescription("FIFOQueue") @@ -24986,7 +26570,10 @@ begin if shared_name !== nothing desc["shared_name"] = Base.String(shared_name) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(fifo_queue, [], name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) + tf.add_node(res[1], node) + return res[1] end function fifo_queue(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) if tf.eager_mode @@ -25004,7 +26591,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function resource_sparse_apply_proximal_gradient_descent_graph(var_, alpha_, l1_, l2_, grad_, indices_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_sparse_apply_proximal_gradient_descent_graph(var_, alpha_, l1_, l2_, grad_, indices_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ResourceSparseApplyProximalGradientDescent") do desc = tf.NodeDescription("ResourceSparseApplyProximalGradientDescent") @@ -25045,7 +26632,10 @@ begin desc["T"] = tf.data_type(l2_) desc["T"] = tf.data_type(grad_) desc["Tindices"] = tf.data_type(indices_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(resource_sparse_apply_proximal_gradient_descent, [var_, alpha_, l1_, l2_, grad_, indices_], name=nothing, use_locking=nothing) + tf.add_node(res[1], node) + return res[1] end function resource_sparse_apply_proximal_gradient_descent(var_, alpha_, l1_, l2_, grad_, indices_; name=nothing, use_locking=nothing) if tf.eager_mode @@ -25063,7 +26653,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function experimental_non_serializable_dataset_graph(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_non_serializable_dataset_graph(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "ExperimentalNonSerializableDataset") do desc = tf.NodeDescription("ExperimentalNonSerializableDataset") @@ -25087,7 +26677,10 @@ begin if output_shapes !== nothing desc["output_shapes"] = map(Base.identity, output_shapes) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(experimental_non_serializable_dataset, [input_dataset_], name=nothing, output_types=nothing, output_shapes=nothing) + tf.add_node(res[1], node) + return res[1] end function experimental_non_serializable_dataset(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) if tf.eager_mode @@ -25105,7 +26698,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function experimental_bytes_produced_stats_dataset_graph(input_dataset_, tag_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_bytes_produced_stats_dataset_graph(input_dataset_, tag_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "ExperimentalBytesProducedStatsDataset") do desc = tf.NodeDescription("ExperimentalBytesProducedStatsDataset") @@ -25132,7 +26725,10 @@ begin if output_shapes !== nothing desc["output_shapes"] = map(Base.identity, output_shapes) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(experimental_bytes_produced_stats_dataset, [input_dataset_, tag_], name=nothing, output_types=nothing, output_shapes=nothing) + tf.add_node(res[1], node) + return res[1] end function experimental_bytes_produced_stats_dataset(input_dataset_, tag_; name=nothing, output_types=nothing, output_shapes=nothing) if tf.eager_mode @@ -25150,7 +26746,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function dilation2d_backprop_filter_graph(input_, filter_, out_backprop_; name=nothing, strides=nothing, rates=nothing, padding=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function dilation2d_backprop_filter_graph(input_, filter_, out_backprop_; name=nothing, strides=nothing, rates=nothing, padding=nothing) local desc tf.with_op_name(name, "Dilation2DBackpropFilter") do desc = tf.NodeDescription("Dilation2DBackpropFilter") @@ -25190,7 +26786,10 @@ begin desc["T"] = tf.data_type(input_) desc["T"] = tf.data_type(filter_) desc["T"] = tf.data_type(out_backprop_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(dilation2d_backprop_filter, [input_, filter_, out_backprop_], name=nothing, strides=nothing, rates=nothing, padding=nothing) + tf.add_node(res[1], node) + return res[1] end function dilation2d_backprop_filter(input_, filter_, out_backprop_; name=nothing, strides=nothing, rates=nothing, padding=nothing) if tf.eager_mode @@ -25208,7 +26807,7 @@ end output = cond ? then_branch(input) : else_branch(input) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function _if_graph(cond_, input_; name=nothing, Tin=nothing, Tout=nothing, then_branch=nothing, else_branch=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _if_graph(cond_, input_; name=nothing, Tin=nothing, Tout=nothing, then_branch=nothing, else_branch=nothing) local desc tf.with_op_name(name, "_If") do desc = tf.NodeDescription("_If") @@ -25249,7 +26848,10 @@ begin desc["else_branch"] = Base.identity(else_branch) end desc["Tcond"] = tf.data_type(cond_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(_if, [cond_, input_], name=nothing, Tin=nothing, Tout=nothing, then_branch=nothing, else_branch=nothing) + tf.add_node(res[1], node) + return res[1] end function _if(cond_, input_; name=nothing, Tin=nothing, Tout=nothing, then_branch=nothing, else_branch=nothing) if tf.eager_mode @@ -25267,7 +26869,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function bias_add_grad_graph(out_backprop_; name=nothing, data_format=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function bias_add_grad_graph(out_backprop_; name=nothing, data_format=nothing) local desc tf.with_op_name(name, "BiasAddGrad") do desc = tf.NodeDescription("BiasAddGrad") @@ -25287,7 +26889,10 @@ begin desc["data_format"] = Base.String(data_format) end desc["T"] = tf.data_type(out_backprop_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(bias_add_grad, [out_backprop_], name=nothing, data_format=nothing) + tf.add_node(res[1], node) + return res[1] end function bias_add_grad(out_backprop_; name=nothing, data_format=nothing) if tf.eager_mode @@ -25305,7 +26910,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function reader_serialize_state_v2_graph(reader_handle_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function reader_serialize_state_v2_graph(reader_handle_; name=nothing) local desc tf.with_op_name(name, "ReaderSerializeStateV2") do desc = tf.NodeDescription("ReaderSerializeStateV2") @@ -25317,7 +26922,10 @@ begin function reader_serialize_state_v2_eager(reader_handle_; name=nothing) desc = tf.EagerOp("ReaderSerializeStateV2") tf.add_input(desc, reader_handle_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(reader_serialize_state_v2, [reader_handle_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function reader_serialize_state_v2(reader_handle_; name=nothing) if tf.eager_mode @@ -25335,7 +26943,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function wrap_dataset_variant_graph(input_handle_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function wrap_dataset_variant_graph(input_handle_; name=nothing) local desc tf.with_op_name(name, "WrapDatasetVariant") do desc = tf.NodeDescription("WrapDatasetVariant") @@ -25347,7 +26955,10 @@ begin function wrap_dataset_variant_eager(input_handle_; name=nothing) desc = tf.EagerOp("WrapDatasetVariant") tf.add_input(desc, input_handle_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(wrap_dataset_variant, [input_handle_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function wrap_dataset_variant(input_handle_; name=nothing) if tf.eager_mode @@ -25365,7 +26976,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function parallel_interleave_dataset_v2_graph(input_dataset_, other_arguments_, cycle_length_, block_length_, num_parallel_calls_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, sloppy=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function parallel_interleave_dataset_v2_graph(input_dataset_, other_arguments_, cycle_length_, block_length_, num_parallel_calls_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, sloppy=nothing) local desc tf.with_op_name(name, "ParallelInterleaveDatasetV2") do desc = tf.NodeDescription("ParallelInterleaveDatasetV2") @@ -25419,7 +27030,10 @@ begin if sloppy !== nothing desc["sloppy"] = Base.Bool(sloppy) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(parallel_interleave_dataset_v2, [input_dataset_, other_arguments_, cycle_length_, block_length_, num_parallel_calls_], name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, sloppy=nothing) + tf.add_node(res[1], node) + return res[1] end function parallel_interleave_dataset_v2(input_dataset_, other_arguments_, cycle_length_, block_length_, num_parallel_calls_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, sloppy=nothing) if tf.eager_mode @@ -25437,7 +27051,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function depthwise_conv2d_native_backprop_input_graph(input_sizes_, filter_, out_backprop_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function depthwise_conv2d_native_backprop_input_graph(input_sizes_, filter_, out_backprop_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) local desc tf.with_op_name(name, "DepthwiseConv2dNativeBackpropInput") do desc = tf.NodeDescription("DepthwiseConv2dNativeBackpropInput") @@ -25482,7 +27096,10 @@ begin end desc["T"] = tf.data_type(filter_) desc["T"] = tf.data_type(out_backprop_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(depthwise_conv2d_native_backprop_input, [input_sizes_, filter_, out_backprop_], name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) + tf.add_node(res[1], node) + return res[1] end function depthwise_conv2d_native_backprop_input(input_sizes_, filter_, out_backprop_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) if tf.eager_mode @@ -25500,7 +27117,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function resource_apply_rms_prop_graph(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_apply_rms_prop_graph(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ResourceApplyRMSProp") do desc = tf.NodeDescription("ResourceApplyRMSProp") @@ -25545,7 +27162,10 @@ begin desc["T"] = tf.data_type(momentum_) desc["T"] = tf.data_type(epsilon_) desc["T"] = tf.data_type(grad_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(resource_apply_rms_prop, [var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_], name=nothing, use_locking=nothing) + tf.add_node(res[1], node) + return res[1] end function resource_apply_rms_prop(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=nothing, use_locking=nothing) if tf.eager_mode @@ -25563,7 +27183,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function sparse_accumulator_take_gradient_graph(handle_, num_required_; name=nothing, dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_accumulator_take_gradient_graph(handle_, num_required_; name=nothing, dtype=nothing) local desc tf.with_op_name(name, "SparseAccumulatorTakeGradient") do desc = tf.NodeDescription("SparseAccumulatorTakeGradient") @@ -25589,7 +27209,10 @@ begin if dtype !== nothing desc["dtype"] = Base.identity(dtype) end - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(sparse_accumulator_take_gradient, [handle_, num_required_], name=nothing, dtype=nothing) + tf.add_node(res[1], node) + return res end function sparse_accumulator_take_gradient(handle_, num_required_; name=nothing, dtype=nothing) if tf.eager_mode @@ -25607,7 +27230,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function experimental_lmdb_dataset_graph(filenames_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_lmdb_dataset_graph(filenames_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "ExperimentalLMDBDataset") do desc = tf.NodeDescription("ExperimentalLMDBDataset") @@ -25631,7 +27254,10 @@ begin if output_shapes !== nothing desc["output_shapes"] = map(Base.identity, output_shapes) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(experimental_lmdb_dataset, [filenames_], name=nothing, output_types=nothing, output_shapes=nothing) + tf.add_node(res[1], node) + return res[1] end function experimental_lmdb_dataset(filenames_; name=nothing, output_types=nothing, output_shapes=nothing) if tf.eager_mode @@ -25649,7 +27275,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function stack_close_v2_graph(handle_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function stack_close_v2_graph(handle_; name=nothing) local desc tf.with_op_name(name, "StackCloseV2") do desc = tf.NodeDescription("StackCloseV2") @@ -25661,7 +27287,10 @@ begin function stack_close_v2_eager(handle_; name=nothing) desc = tf.EagerOp("StackCloseV2") tf.add_input(desc, handle_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(stack_close_v2, [handle_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function stack_close_v2(handle_; name=nothing) if tf.eager_mode @@ -25679,7 +27308,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function map_size_graph(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function map_size_graph(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "MapSize") do desc = tf.NodeDescription("MapSize") @@ -25718,7 +27347,10 @@ begin if shared_name !== nothing desc["shared_name"] = Base.String(shared_name) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(map_size, [], name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + tf.add_node(res[1], node) + return res[1] end function map_size(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) if tf.eager_mode @@ -25736,7 +27368,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function resource_apply_adagrad_da_graph(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, lr_, l1_, l2_, global_step_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_apply_adagrad_da_graph(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, lr_, l1_, l2_, global_step_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ResourceApplyAdagradDA") do desc = tf.NodeDescription("ResourceApplyAdagradDA") @@ -25780,7 +27412,10 @@ begin desc["T"] = tf.data_type(lr_) desc["T"] = tf.data_type(l1_) desc["T"] = tf.data_type(l2_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(resource_apply_adagrad_da, [var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, lr_, l1_, l2_, global_step_], name=nothing, use_locking=nothing) + tf.add_node(res[1], node) + return res[1] end function resource_apply_adagrad_da(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, lr_, l1_, l2_, global_step_; name=nothing, use_locking=nothing) if tf.eager_mode @@ -25798,7 +27433,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tensor_forest_tree_size_graph(tree_handle_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_forest_tree_size_graph(tree_handle_; name=nothing) local desc tf.with_op_name(name, "TensorForestTreeSize") do desc = tf.NodeDescription("TensorForestTreeSize") @@ -25810,7 +27445,10 @@ begin function tensor_forest_tree_size_eager(tree_handle_; name=nothing) desc = tf.EagerOp("TensorForestTreeSize") tf.add_input(desc, tree_handle_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(tensor_forest_tree_size, [tree_handle_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function tensor_forest_tree_size(tree_handle_; name=nothing) if tf.eager_mode @@ -25828,7 +27466,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function matrix_diag_part_graph(input_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function matrix_diag_part_graph(input_; name=nothing) local desc tf.with_op_name(name, "MatrixDiagPart") do desc = tf.NodeDescription("MatrixDiagPart") @@ -25842,7 +27480,10 @@ begin desc = tf.EagerOp("MatrixDiagPart") tf.add_input(desc, input_) desc["T"] = tf.data_type(input_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(matrix_diag_part, [input_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function matrix_diag_part(input_; name=nothing) if tf.eager_mode @@ -25860,7 +27501,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function reader_num_work_units_completed_v2_graph(reader_handle_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function reader_num_work_units_completed_v2_graph(reader_handle_; name=nothing) local desc tf.with_op_name(name, "ReaderNumWorkUnitsCompletedV2") do desc = tf.NodeDescription("ReaderNumWorkUnitsCompletedV2") @@ -25872,7 +27513,10 @@ begin function reader_num_work_units_completed_v2_eager(reader_handle_; name=nothing) desc = tf.EagerOp("ReaderNumWorkUnitsCompletedV2") tf.add_input(desc, reader_handle_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(reader_num_work_units_completed_v2, [reader_handle_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function reader_num_work_units_completed_v2(reader_handle_; name=nothing) if tf.eager_mode @@ -25890,7 +27534,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tensor_array_split_v3_graph(handle_, value_, lengths_, flow_in_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_split_v3_graph(handle_, value_, lengths_, flow_in_; name=nothing) local desc tf.with_op_name(name, "TensorArraySplitV3") do desc = tf.NodeDescription("TensorArraySplitV3") @@ -25913,7 +27557,10 @@ begin tf.add_input(desc, lengths_) tf.add_input(desc, flow_in_) desc["T"] = tf.data_type(value_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(tensor_array_split_v3, [handle_, value_, lengths_, flow_in_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function tensor_array_split_v3(handle_, value_, lengths_, flow_in_; name=nothing) if tf.eager_mode @@ -25931,7 +27578,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function sparse_to_dense_graph(sparse_indices_, output_shape_, sparse_values_, default_value_; name=nothing, validate_indices=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_to_dense_graph(sparse_indices_, output_shape_, sparse_values_, default_value_; name=nothing, validate_indices=nothing) local desc tf.with_op_name(name, "SparseToDense") do desc = tf.NodeDescription("SparseToDense") @@ -25966,7 +27613,10 @@ begin desc["Tindices"] = tf.data_type(output_shape_) desc["T"] = tf.data_type(sparse_values_) desc["T"] = tf.data_type(default_value_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(sparse_to_dense, [sparse_indices_, output_shape_, sparse_values_, default_value_], name=nothing, validate_indices=nothing) + tf.add_node(res[1], node) + return res[1] end function sparse_to_dense(sparse_indices_, output_shape_, sparse_values_, default_value_; name=nothing, validate_indices=nothing) if tf.eager_mode @@ -25984,7 +27634,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tpu_replicated_input_graph(inputs_; name=nothing, N=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tpu_replicated_input_graph(inputs_; name=nothing, N=nothing) local desc tf.with_op_name(name, "TPUReplicatedInput") do desc = tf.NodeDescription("TPUReplicatedInput") @@ -26004,7 +27654,10 @@ begin desc["N"] = Base.Int(N) end desc["T"] = tf.data_type(inputs_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(tpu_replicated_input, [inputs_], name=nothing, N=nothing) + tf.add_node(res[1], node) + return res[1] end function tpu_replicated_input(inputs_; name=nothing, N=nothing) if tf.eager_mode @@ -26022,7 +27675,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function stack_close_graph(handle_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function stack_close_graph(handle_; name=nothing) local desc tf.with_op_name(name, "StackClose") do desc = tf.NodeDescription("StackClose") @@ -26034,7 +27687,10 @@ begin function stack_close_eager(handle_; name=nothing) desc = tf.EagerOp("StackClose") tf.add_input(desc, handle_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(stack_close, [handle_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function stack_close(handle_; name=nothing) if tf.eager_mode @@ -26052,7 +27708,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function deserialize_many_sparse_graph(serialized_sparse_; name=nothing, dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function deserialize_many_sparse_graph(serialized_sparse_; name=nothing, dtype=nothing) local desc tf.with_op_name(name, "DeserializeManySparse") do desc = tf.NodeDescription("DeserializeManySparse") @@ -26075,7 +27731,10 @@ begin if dtype !== nothing desc["dtype"] = Base.identity(dtype) end - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(deserialize_many_sparse, [serialized_sparse_], name=nothing, dtype=nothing) + tf.add_node(res[1], node) + return res end function deserialize_many_sparse(serialized_sparse_; name=nothing, dtype=nothing) if tf.eager_mode @@ -26093,7 +27752,7 @@ end Replacement node for NcclReduce. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function _nccl_reduce_recv_graph(input_; name=nothing, reduction=nothing, num_devices=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _nccl_reduce_recv_graph(input_; name=nothing, reduction=nothing, num_devices=nothing, shared_name=nothing) local desc tf.with_op_name(name, "_NcclReduceRecv") do desc = tf.NodeDescription("_NcclReduceRecv") @@ -26125,7 +27784,10 @@ begin desc["shared_name"] = Base.String(shared_name) end desc["T"] = tf.data_type(input_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(_nccl_reduce_recv, [input_], name=nothing, reduction=nothing, num_devices=nothing, shared_name=nothing) + tf.add_node(res[1], node) + return res[1] end function _nccl_reduce_recv(input_; name=nothing, reduction=nothing, num_devices=nothing, shared_name=nothing) if tf.eager_mode @@ -26143,7 +27805,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function mirror_pad_grad_graph(input_, paddings_; name=nothing, mode=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function mirror_pad_grad_graph(input_, paddings_; name=nothing, mode=nothing) local desc tf.with_op_name(name, "MirrorPadGrad") do desc = tf.NodeDescription("MirrorPadGrad") @@ -26168,7 +27830,10 @@ begin end desc["T"] = tf.data_type(input_) desc["Tpaddings"] = tf.data_type(paddings_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(mirror_pad_grad, [input_, paddings_], name=nothing, mode=nothing) + tf.add_node(res[1], node) + return res[1] end function mirror_pad_grad(input_, paddings_; name=nothing, mode=nothing) if tf.eager_mode @@ -26186,7 +27851,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function broadcast_args_graph(s0_, s1_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function broadcast_args_graph(s0_, s1_; name=nothing) local desc tf.with_op_name(name, "BroadcastArgs") do desc = tf.NodeDescription("BroadcastArgs") @@ -26204,7 +27869,10 @@ begin tf.add_input(desc, s1_) desc["T"] = tf.data_type(s0_) desc["T"] = tf.data_type(s1_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(broadcast_args, [s0_, s1_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function broadcast_args(s0_, s1_; name=nothing) if tf.eager_mode @@ -26222,7 +27890,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function stateless_truncated_normal_graph(shape_, seed_; name=nothing, dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function stateless_truncated_normal_graph(shape_, seed_; name=nothing, dtype=nothing) local desc tf.with_op_name(name, "StatelessTruncatedNormal") do desc = tf.NodeDescription("StatelessTruncatedNormal") @@ -26247,7 +27915,10 @@ begin end desc["T"] = tf.data_type(shape_) desc["Tseed"] = tf.data_type(seed_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(stateless_truncated_normal, [shape_, seed_], name=nothing, dtype=nothing) + tf.add_node(res[1], node) + return res[1] end function stateless_truncated_normal(shape_, seed_; name=nothing, dtype=nothing) if tf.eager_mode @@ -26265,7 +27936,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function regex_full_match_graph(input_, pattern_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function regex_full_match_graph(input_, pattern_; name=nothing) local desc tf.with_op_name(name, "RegexFullMatch") do desc = tf.NodeDescription("RegexFullMatch") @@ -26280,7 +27951,10 @@ begin desc = tf.EagerOp("RegexFullMatch") tf.add_input(desc, input_) tf.add_input(desc, pattern_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(regex_full_match, [input_, pattern_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function regex_full_match(input_, pattern_; name=nothing) if tf.eager_mode @@ -26298,7 +27972,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function unwrap_dataset_variant_graph(input_handle_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function unwrap_dataset_variant_graph(input_handle_; name=nothing) local desc tf.with_op_name(name, "UnwrapDatasetVariant") do desc = tf.NodeDescription("UnwrapDatasetVariant") @@ -26310,7 +27984,10 @@ begin function unwrap_dataset_variant_eager(input_handle_; name=nothing) desc = tf.EagerOp("UnwrapDatasetVariant") tf.add_input(desc, input_handle_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(unwrap_dataset_variant, [input_handle_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function unwrap_dataset_variant(input_handle_; name=nothing) if tf.eager_mode @@ -26328,7 +28005,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function empty_graph(shape_; name=nothing, dtype=nothing, init=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function empty_graph(shape_; name=nothing, dtype=nothing, init=nothing) local desc tf.with_op_name(name, "Empty") do desc = tf.NodeDescription("Empty") @@ -26352,7 +28029,10 @@ begin if init !== nothing desc["init"] = Base.Bool(init) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(empty, [shape_], name=nothing, dtype=nothing, init=nothing) + tf.add_node(res[1], node) + return res[1] end function empty(shape_; name=nothing, dtype=nothing, init=nothing) if tf.eager_mode @@ -26370,7 +28050,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function outfeed_dequeue_tuple_graph(; name=nothing, dtypes=nothing, shapes=nothing, device_ordinal=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function outfeed_dequeue_tuple_graph(; name=nothing, dtypes=nothing, shapes=nothing, device_ordinal=nothing) local desc tf.with_op_name(name, "OutfeedDequeueTuple") do desc = tf.NodeDescription("OutfeedDequeueTuple") @@ -26397,7 +28077,10 @@ begin if device_ordinal !== nothing desc["device_ordinal"] = Base.Int(device_ordinal) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(outfeed_dequeue_tuple, [], name=nothing, dtypes=nothing, shapes=nothing, device_ordinal=nothing) + tf.add_node(res[1], node) + return res[1] end function outfeed_dequeue_tuple(; name=nothing, dtypes=nothing, shapes=nothing, device_ordinal=nothing) if tf.eager_mode @@ -26415,7 +28098,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function div_graph(x_, y_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function div_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "Div") do desc = tf.NodeDescription("Div") @@ -26433,7 +28116,10 @@ begin tf.add_input(desc, y_) desc["T"] = tf.data_type(x_) desc["T"] = tf.data_type(y_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(div, [x_, y_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function div(x_, y_; name=nothing) if tf.eager_mode @@ -26451,7 +28137,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function barrier_graph(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function barrier_graph(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "Barrier") do desc = tf.NodeDescription("Barrier") @@ -26490,7 +28176,10 @@ begin if shared_name !== nothing desc["shared_name"] = Base.String(shared_name) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(barrier, [], name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) + tf.add_node(res[1], node) + return res[1] end function barrier(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) if tf.eager_mode @@ -26508,7 +28197,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function truncate_div_graph(x_, y_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function truncate_div_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "TruncateDiv") do desc = tf.NodeDescription("TruncateDiv") @@ -26526,7 +28215,10 @@ begin tf.add_input(desc, y_) desc["T"] = tf.data_type(x_) desc["T"] = tf.data_type(y_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(truncate_div, [x_, y_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function truncate_div(x_, y_; name=nothing) if tf.eager_mode @@ -26544,7 +28236,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function unicode_encode_graph(input_values_, input_splits_; name=nothing, errors=nothing, output_encoding=nothing, replacement_char=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function unicode_encode_graph(input_values_, input_splits_; name=nothing, errors=nothing, output_encoding=nothing, replacement_char=nothing) local desc tf.with_op_name(name, "UnicodeEncode") do desc = tf.NodeDescription("UnicodeEncode") @@ -26577,7 +28269,10 @@ begin if replacement_char !== nothing desc["replacement_char"] = Base.Int(replacement_char) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(unicode_encode, [input_values_, input_splits_], name=nothing, errors=nothing, output_encoding=nothing, replacement_char=nothing) + tf.add_node(res[1], node) + return res[1] end function unicode_encode(input_values_, input_splits_; name=nothing, errors=nothing, output_encoding=nothing, replacement_char=nothing) if tf.eager_mode @@ -26595,7 +28290,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function merge_summary_graph(inputs_; name=nothing, N=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function merge_summary_graph(inputs_; name=nothing, N=nothing) local desc tf.with_op_name(name, "MergeSummary") do desc = tf.NodeDescription("MergeSummary") @@ -26613,7 +28308,10 @@ begin if N !== nothing desc["N"] = Base.Int(N) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(merge_summary, [inputs_], name=nothing, N=nothing) + tf.add_node(res[1], node) + return res[1] end function merge_summary(inputs_; name=nothing, N=nothing) if tf.eager_mode @@ -26631,7 +28329,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function fake_queue_graph(resource_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fake_queue_graph(resource_; name=nothing) local desc tf.with_op_name(name, "FakeQueue") do desc = tf.NodeDescription("FakeQueue") @@ -26643,7 +28341,10 @@ begin function fake_queue_eager(resource_; name=nothing) desc = tf.EagerOp("FakeQueue") tf.add_input(desc, resource_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(fake_queue, [resource_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function fake_queue(resource_; name=nothing) if tf.eager_mode @@ -26661,7 +28362,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function batch_cholesky_graph(input_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_cholesky_graph(input_; name=nothing) local desc tf.with_op_name(name, "BatchCholesky") do desc = tf.NodeDescription("BatchCholesky") @@ -26675,7 +28376,10 @@ begin desc = tf.EagerOp("BatchCholesky") tf.add_input(desc, input_) desc["T"] = tf.data_type(input_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(batch_cholesky, [input_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function batch_cholesky(input_; name=nothing) if tf.eager_mode @@ -26693,7 +28397,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function iterator_graph(; name=nothing, shared_name=nothing, container=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function iterator_graph(; name=nothing, shared_name=nothing, container=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "Iterator") do desc = tf.NodeDescription("Iterator") @@ -26726,7 +28430,10 @@ begin if output_shapes !== nothing desc["output_shapes"] = map(Base.identity, output_shapes) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(iterator, [], name=nothing, shared_name=nothing, container=nothing, output_types=nothing, output_shapes=nothing) + tf.add_node(res[1], node) + return res[1] end function iterator(; name=nothing, shared_name=nothing, container=nothing, output_types=nothing, output_shapes=nothing) if tf.eager_mode @@ -26744,7 +28451,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function bessel_i1e_graph(x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function bessel_i1e_graph(x_; name=nothing) local desc tf.with_op_name(name, "BesselI1e") do desc = tf.NodeDescription("BesselI1e") @@ -26758,7 +28465,10 @@ begin desc = tf.EagerOp("BesselI1e") tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(bessel_i1e, [x_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function bessel_i1e(x_; name=nothing) if tf.eager_mode @@ -26776,7 +28486,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function import_event_graph(writer_, event_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function import_event_graph(writer_, event_; name=nothing) local desc tf.with_op_name(name, "ImportEvent") do desc = tf.NodeDescription("ImportEvent") @@ -26791,7 +28501,10 @@ begin desc = tf.EagerOp("ImportEvent") tf.add_input(desc, writer_) tf.add_input(desc, event_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(import_event, [writer_, event_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function import_event(writer_, event_; name=nothing) if tf.eager_mode @@ -26809,7 +28522,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function quantized_instance_norm_graph(x_, x_min_, x_max_; name=nothing, output_range_given=nothing, given_y_min=nothing, given_y_max=nothing, variance_epsilon=nothing, min_separation=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantized_instance_norm_graph(x_, x_min_, x_max_; name=nothing, output_range_given=nothing, given_y_min=nothing, given_y_max=nothing, variance_epsilon=nothing, min_separation=nothing) local desc tf.with_op_name(name, "QuantizedInstanceNorm") do desc = tf.NodeDescription("QuantizedInstanceNorm") @@ -26864,7 +28577,10 @@ begin desc["min_separation"] = Base.identity(min_separation) end desc["T"] = tf.data_type(x_) - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(quantized_instance_norm, [x_, x_min_, x_max_], name=nothing, output_range_given=nothing, given_y_min=nothing, given_y_max=nothing, variance_epsilon=nothing, min_separation=nothing) + tf.add_node(res[1], node) + return res end function quantized_instance_norm(x_, x_min_, x_max_; name=nothing, output_range_given=nothing, given_y_min=nothing, given_y_max=nothing, variance_epsilon=nothing, min_separation=nothing) if tf.eager_mode @@ -26882,7 +28598,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function load_tpu_embedding_adagrad_parameters_graph(parameters_, accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function load_tpu_embedding_adagrad_parameters_graph(parameters_, accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "LoadTPUEmbeddingAdagradParameters") do desc = tf.NodeDescription("LoadTPUEmbeddingAdagradParameters") @@ -26921,7 +28637,10 @@ begin if shard_id !== nothing desc["shard_id"] = Base.Int(shard_id) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(load_tpu_embedding_adagrad_parameters, [parameters_, accumulators_], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + tf.add_node(res[1], node) + return res[1] end function load_tpu_embedding_adagrad_parameters(parameters_, accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) if tf.eager_mode @@ -26939,7 +28658,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tensor_array_write_v3_graph(handle_, index_, value_, flow_in_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_write_v3_graph(handle_, index_, value_, flow_in_; name=nothing) local desc tf.with_op_name(name, "TensorArrayWriteV3") do desc = tf.NodeDescription("TensorArrayWriteV3") @@ -26962,7 +28681,10 @@ begin tf.add_input(desc, value_) tf.add_input(desc, flow_in_) desc["T"] = tf.data_type(value_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(tensor_array_write_v3, [handle_, index_, value_, flow_in_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function tensor_array_write_v3(handle_, index_, value_, flow_in_; name=nothing) if tf.eager_mode @@ -26980,7 +28702,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function dense_to_dense_set_operation_graph(set1_, set2_; name=nothing, set_operation=nothing, validate_indices=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function dense_to_dense_set_operation_graph(set1_, set2_; name=nothing, set_operation=nothing, validate_indices=nothing) local desc tf.with_op_name(name, "DenseToDenseSetOperation") do desc = tf.NodeDescription("DenseToDenseSetOperation") @@ -27015,7 +28737,10 @@ begin end desc["T"] = tf.data_type(set1_) desc["T"] = tf.data_type(set2_) - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(dense_to_dense_set_operation, [set1_, set2_], name=nothing, set_operation=nothing, validate_indices=nothing) + tf.add_node(res[1], node) + return res end function dense_to_dense_set_operation(set1_, set2_; name=nothing, set_operation=nothing, validate_indices=nothing) if tf.eager_mode @@ -27033,7 +28758,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function encode_jpeg_graph(image_; name=nothing, format=nothing, quality=nothing, progressive=nothing, optimize_size=nothing, chroma_downsampling=nothing, density_unit=nothing, x_density=nothing, y_density=nothing, xmp_metadata=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function encode_jpeg_graph(image_; name=nothing, format=nothing, quality=nothing, progressive=nothing, optimize_size=nothing, chroma_downsampling=nothing, density_unit=nothing, x_density=nothing, y_density=nothing, xmp_metadata=nothing) local desc tf.with_op_name(name, "EncodeJpeg") do desc = tf.NodeDescription("EncodeJpeg") @@ -27099,7 +28824,10 @@ begin if xmp_metadata !== nothing desc["xmp_metadata"] = Base.String(xmp_metadata) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(encode_jpeg, [image_], name=nothing, format=nothing, quality=nothing, progressive=nothing, optimize_size=nothing, chroma_downsampling=nothing, density_unit=nothing, x_density=nothing, y_density=nothing, xmp_metadata=nothing) + tf.add_node(res[1], node) + return res[1] end function encode_jpeg(image_; name=nothing, format=nothing, quality=nothing, progressive=nothing, optimize_size=nothing, chroma_downsampling=nothing, density_unit=nothing, x_density=nothing, y_density=nothing, xmp_metadata=nothing) if tf.eager_mode @@ -27117,7 +28845,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function inplace_update_graph(x_, i_, v_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function inplace_update_graph(x_, i_, v_; name=nothing) local desc tf.with_op_name(name, "InplaceUpdate") do desc = tf.NodeDescription("InplaceUpdate") @@ -27138,7 +28866,10 @@ begin tf.add_input(desc, v_) desc["T"] = tf.data_type(x_) desc["T"] = tf.data_type(v_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(inplace_update, [x_, i_, v_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function inplace_update(x_, i_, v_; name=nothing) if tf.eager_mode @@ -27156,7 +28887,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function fused_pad_conv2d_graph(input_, paddings_, filter_; name=nothing, mode=nothing, strides=nothing, padding=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fused_pad_conv2d_graph(input_, paddings_, filter_; name=nothing, mode=nothing, strides=nothing, padding=nothing) local desc tf.with_op_name(name, "FusedPadConv2D") do desc = tf.NodeDescription("FusedPadConv2D") @@ -27195,7 +28926,10 @@ begin end desc["T"] = tf.data_type(input_) desc["T"] = tf.data_type(filter_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(fused_pad_conv2d, [input_, paddings_, filter_], name=nothing, mode=nothing, strides=nothing, padding=nothing) + tf.add_node(res[1], node) + return res[1] end function fused_pad_conv2d(input_, paddings_, filter_; name=nothing, mode=nothing, strides=nothing, padding=nothing) if tf.eager_mode @@ -27213,7 +28947,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function quantized_relu_graph(features_, min_features_, max_features_; name=nothing, out_type=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantized_relu_graph(features_, min_features_, max_features_; name=nothing, out_type=nothing) local desc tf.with_op_name(name, "QuantizedRelu") do desc = tf.NodeDescription("QuantizedRelu") @@ -27244,7 +28978,10 @@ begin desc["out_type"] = Base.identity(out_type) end desc["Tinput"] = tf.data_type(features_) - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(quantized_relu, [features_, min_features_, max_features_], name=nothing, out_type=nothing) + tf.add_node(res[1], node) + return res end function quantized_relu(features_, min_features_, max_features_; name=nothing, out_type=nothing) if tf.eager_mode @@ -27262,7 +28999,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function gather_nd_graph(params_, indices_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function gather_nd_graph(params_, indices_; name=nothing) local desc tf.with_op_name(name, "GatherNd") do desc = tf.NodeDescription("GatherNd") @@ -27282,7 +29019,10 @@ begin tf.add_input(desc, indices_) desc["Tparams"] = tf.data_type(params_) desc["Tindices"] = tf.data_type(indices_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(gather_nd, [params_, indices_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function gather_nd(params_, indices_; name=nothing) if tf.eager_mode @@ -27300,7 +29040,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function placeholder_graph(; name=nothing, dtype=nothing, shape=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function placeholder_graph(; name=nothing, dtype=nothing, shape=nothing) local desc tf.with_op_name(name, "Placeholder") do desc = tf.NodeDescription("Placeholder") @@ -27321,7 +29061,10 @@ begin if shape !== nothing desc["shape"] = Base.identity(shape) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(placeholder, [], name=nothing, dtype=nothing, shape=nothing) + tf.add_node(res[1], node) + return res[1] end function placeholder(; name=nothing, dtype=nothing, shape=nothing) if tf.eager_mode @@ -27339,7 +29082,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function filter_by_last_component_dataset_graph(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function filter_by_last_component_dataset_graph(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "FilterByLastComponentDataset") do desc = tf.NodeDescription("FilterByLastComponentDataset") @@ -27363,7 +29106,10 @@ begin if output_shapes !== nothing desc["output_shapes"] = map(Base.identity, output_shapes) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(filter_by_last_component_dataset, [input_dataset_], name=nothing, output_types=nothing, output_shapes=nothing) + tf.add_node(res[1], node) + return res[1] end function filter_by_last_component_dataset(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) if tf.eager_mode @@ -27381,7 +29127,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function clip_by_value_graph(t_, clip_value_min_, clip_value_max_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function clip_by_value_graph(t_, clip_value_min_, clip_value_max_; name=nothing) local desc tf.with_op_name(name, "ClipByValue") do desc = tf.NodeDescription("ClipByValue") @@ -27403,7 +29149,10 @@ begin desc["T"] = tf.data_type(t_) desc["T"] = tf.data_type(clip_value_min_) desc["T"] = tf.data_type(clip_value_max_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(clip_by_value, [t_, clip_value_min_, clip_value_max_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function clip_by_value(t_, clip_value_min_, clip_value_max_; name=nothing) if tf.eager_mode @@ -27421,7 +29170,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function image_summary_graph(tag_, tensor_; name=nothing, max_images=nothing, bad_color=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function image_summary_graph(tag_, tensor_; name=nothing, max_images=nothing, bad_color=nothing) local desc tf.with_op_name(name, "ImageSummary") do desc = tf.NodeDescription("ImageSummary") @@ -27450,7 +29199,10 @@ begin desc["bad_color"] = TensorFlow.RawTensor(bad_color) end desc["T"] = tf.data_type(tensor_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(image_summary, [tag_, tensor_], name=nothing, max_images=nothing, bad_color=nothing) + tf.add_node(res[1], node) + return res[1] end function image_summary(tag_, tensor_; name=nothing, max_images=nothing, bad_color=nothing) if tf.eager_mode @@ -27468,7 +29220,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function retrieve_tpu_embedding_adadelta_parameters_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function retrieve_tpu_embedding_adadelta_parameters_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "RetrieveTPUEmbeddingAdadeltaParameters") do desc = tf.NodeDescription("RetrieveTPUEmbeddingAdadeltaParameters") @@ -27506,7 +29258,10 @@ begin if shard_id !== nothing desc["shard_id"] = Base.Int(shard_id) end - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(retrieve_tpu_embedding_adadelta_parameters, [], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + tf.add_node(res[1], node) + return res end function retrieve_tpu_embedding_adadelta_parameters(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) if tf.eager_mode @@ -27524,7 +29279,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function string_join_graph(inputs_; name=nothing, N=nothing, separator=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function string_join_graph(inputs_; name=nothing, N=nothing, separator=nothing) local desc tf.with_op_name(name, "StringJoin") do desc = tf.NodeDescription("StringJoin") @@ -27548,7 +29303,10 @@ begin if separator !== nothing desc["separator"] = Base.String(separator) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(string_join, [inputs_], name=nothing, N=nothing, separator=nothing) + tf.add_node(res[1], node) + return res[1] end function string_join(inputs_; name=nothing, N=nothing, separator=nothing) if tf.eager_mode @@ -27566,7 +29324,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function resource_scatter_nd_add_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_scatter_nd_add_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ResourceScatterNdAdd") do desc = tf.NodeDescription("ResourceScatterNdAdd") @@ -27595,7 +29353,10 @@ begin end desc["Tindices"] = tf.data_type(indices_) desc["T"] = tf.data_type(updates_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(resource_scatter_nd_add, [ref_, indices_, updates_], name=nothing, use_locking=nothing) + tf.add_node(res[1], node) + return res[1] end function resource_scatter_nd_add(ref_, indices_, updates_; name=nothing, use_locking=nothing) if tf.eager_mode @@ -27613,7 +29374,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function boosted_trees_quantile_stream_resource_deserialize_graph(quantile_stream_resource_handle_, bucket_boundaries_; name=nothing, num_streams=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function boosted_trees_quantile_stream_resource_deserialize_graph(quantile_stream_resource_handle_, bucket_boundaries_; name=nothing, num_streams=nothing) local desc tf.with_op_name(name, "BoostedTreesQuantileStreamResourceDeserialize") do desc = tf.NodeDescription("BoostedTreesQuantileStreamResourceDeserialize") @@ -27634,7 +29395,10 @@ begin if num_streams !== nothing desc["num_streams"] = Base.Int(num_streams) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(boosted_trees_quantile_stream_resource_deserialize, [quantile_stream_resource_handle_, bucket_boundaries_], name=nothing, num_streams=nothing) + tf.add_node(res[1], node) + return res[1] end function boosted_trees_quantile_stream_resource_deserialize(quantile_stream_resource_handle_, bucket_boundaries_; name=nothing, num_streams=nothing) if tf.eager_mode @@ -27652,7 +29416,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function left_shift_graph(x_, y_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function left_shift_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "LeftShift") do desc = tf.NodeDescription("LeftShift") @@ -27670,7 +29434,10 @@ begin tf.add_input(desc, y_) desc["T"] = tf.data_type(x_) desc["T"] = tf.data_type(y_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(left_shift, [x_, y_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function left_shift(x_, y_; name=nothing) if tf.eager_mode @@ -27688,7 +29455,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function requantize_per_channel_graph(input_, input_min_, input_max_, requested_output_min_, requested_output_max_; name=nothing, out_type=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function requantize_per_channel_graph(input_, input_min_, input_max_, requested_output_min_, requested_output_max_; name=nothing, out_type=nothing) local desc tf.with_op_name(name, "RequantizePerChannel") do desc = tf.NodeDescription("RequantizePerChannel") @@ -27725,7 +29492,10 @@ begin desc["out_type"] = Base.identity(out_type) end desc["T"] = tf.data_type(input_) - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(requantize_per_channel, [input_, input_min_, input_max_, requested_output_min_, requested_output_max_], name=nothing, out_type=nothing) + tf.add_node(res[1], node) + return res end function requantize_per_channel(input_, input_min_, input_max_, requested_output_min_, requested_output_max_; name=nothing, out_type=nothing) if tf.eager_mode @@ -27743,7 +29513,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tensor_scatter_add_graph(tensor_, indices_, updates_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_scatter_add_graph(tensor_, indices_, updates_; name=nothing) local desc tf.with_op_name(name, "TensorScatterAdd") do desc = tf.NodeDescription("TensorScatterAdd") @@ -27767,7 +29537,10 @@ begin desc["T"] = tf.data_type(tensor_) desc["Tindices"] = tf.data_type(indices_) desc["T"] = tf.data_type(updates_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(tensor_scatter_add, [tensor_, indices_, updates_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function tensor_scatter_add(tensor_, indices_, updates_; name=nothing) if tf.eager_mode @@ -27785,7 +29558,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function _var_handles_op_graph(; name=nothing, containers=nothing, shared_names=nothing, N=nothing, dtypes=nothing, shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _var_handles_op_graph(; name=nothing, containers=nothing, shared_names=nothing, N=nothing, dtypes=nothing, shapes=nothing) local desc tf.with_op_name(name, "_VarHandlesOp") do desc = tf.NodeDescription("_VarHandlesOp") @@ -27829,7 +29602,10 @@ begin if shapes !== nothing desc["shapes"] = map(Base.identity, shapes) end - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(_var_handles_op, [], name=nothing, containers=nothing, shared_names=nothing, N=nothing, dtypes=nothing, shapes=nothing) + tf.add_node(res[1], node) + return res end function _var_handles_op(; name=nothing, containers=nothing, shared_names=nothing, N=nothing, dtypes=nothing, shapes=nothing) if tf.eager_mode @@ -27847,7 +29623,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function ifft3d_graph(input_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ifft3d_graph(input_; name=nothing) local desc tf.with_op_name(name, "IFFT3D") do desc = tf.NodeDescription("IFFT3D") @@ -27861,7 +29637,10 @@ begin desc = tf.EagerOp("IFFT3D") tf.add_input(desc, input_) desc["Tcomplex"] = tf.data_type(input_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(ifft3d, [input_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function ifft3d(input_; name=nothing) if tf.eager_mode @@ -27879,7 +29658,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function euclidean_norm_graph(input_, reduction_indices_; name=nothing, keep_dims=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function euclidean_norm_graph(input_, reduction_indices_; name=nothing, keep_dims=nothing) local desc tf.with_op_name(name, "EuclideanNorm") do desc = tf.NodeDescription("EuclideanNorm") @@ -27905,7 +29684,10 @@ begin end desc["T"] = tf.data_type(input_) desc["Tidx"] = tf.data_type(reduction_indices_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(euclidean_norm, [input_, reduction_indices_], name=nothing, keep_dims=nothing) + tf.add_node(res[1], node) + return res[1] end function euclidean_norm(input_, reduction_indices_; name=nothing, keep_dims=nothing) if tf.eager_mode @@ -27923,7 +29705,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function ref_select_graph(index_, inputs_; name=nothing, N=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ref_select_graph(index_, inputs_; name=nothing, N=nothing) local desc tf.with_op_name(name, "RefSelect") do desc = tf.NodeDescription("RefSelect") @@ -27946,7 +29728,10 @@ begin desc["N"] = Base.Int(N) end desc["T"] = tf.data_type(inputs_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(ref_select, [index_, inputs_], name=nothing, N=nothing) + tf.add_node(res[1], node) + return res[1] end function ref_select(index_, inputs_; name=nothing, N=nothing) if tf.eager_mode @@ -27964,7 +29749,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function sparse_tensor_slice_dataset_graph(indices_, values_, dense_shape_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_tensor_slice_dataset_graph(indices_, values_, dense_shape_; name=nothing) local desc tf.with_op_name(name, "SparseTensorSliceDataset") do desc = tf.NodeDescription("SparseTensorSliceDataset") @@ -27984,7 +29769,10 @@ begin tf.add_input(desc, values_) tf.add_input(desc, dense_shape_) desc["Tvalues"] = tf.data_type(values_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(sparse_tensor_slice_dataset, [indices_, values_, dense_shape_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function sparse_tensor_slice_dataset(indices_, values_, dense_shape_; name=nothing) if tf.eager_mode @@ -28002,7 +29790,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function retrieve_tpu_embedding_ftrl_parameters_grad_accum_debug_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function retrieve_tpu_embedding_ftrl_parameters_grad_accum_debug_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "RetrieveTPUEmbeddingFTRLParametersGradAccumDebug") do desc = tf.NodeDescription("RetrieveTPUEmbeddingFTRLParametersGradAccumDebug") @@ -28040,7 +29828,10 @@ begin if shard_id !== nothing desc["shard_id"] = Base.Int(shard_id) end - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(retrieve_tpu_embedding_ftrl_parameters_grad_accum_debug, [], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + tf.add_node(res[1], node) + return res end function retrieve_tpu_embedding_ftrl_parameters_grad_accum_debug(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) if tf.eager_mode @@ -28058,7 +29849,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function batch_ifft2d_graph(input_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_ifft2d_graph(input_; name=nothing) local desc tf.with_op_name(name, "BatchIFFT2D") do desc = tf.NodeDescription("BatchIFFT2D") @@ -28070,7 +29861,10 @@ begin function batch_ifft2d_eager(input_; name=nothing) desc = tf.EagerOp("BatchIFFT2D") tf.add_input(desc, input_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(batch_ifft2d, [input_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function batch_ifft2d(input_; name=nothing) if tf.eager_mode @@ -28088,7 +29882,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tensor_array_gather_graph(handle_, indices_, flow_in_; name=nothing, dtype=nothing, element_shape=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_gather_graph(handle_, indices_, flow_in_; name=nothing, dtype=nothing, element_shape=nothing) local desc tf.with_op_name(name, "TensorArrayGather") do desc = tf.NodeDescription("TensorArrayGather") @@ -28118,7 +29912,10 @@ begin if element_shape !== nothing desc["element_shape"] = Base.identity(element_shape) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(tensor_array_gather, [handle_, indices_, flow_in_], name=nothing, dtype=nothing, element_shape=nothing) + tf.add_node(res[1], node) + return res[1] end function tensor_array_gather(handle_, indices_, flow_in_; name=nothing, dtype=nothing, element_shape=nothing) if tf.eager_mode @@ -28136,7 +29933,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function sparse_segment_mean_with_num_segments_graph(data_, indices_, segment_ids_, num_segments_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_segment_mean_with_num_segments_graph(data_, indices_, segment_ids_, num_segments_; name=nothing) local desc tf.with_op_name(name, "SparseSegmentMeanWithNumSegments") do desc = tf.NodeDescription("SparseSegmentMeanWithNumSegments") @@ -28164,7 +29961,10 @@ begin desc["T"] = tf.data_type(data_) desc["Tidx"] = tf.data_type(indices_) desc["Tnumsegments"] = tf.data_type(num_segments_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(sparse_segment_mean_with_num_segments, [data_, indices_, segment_ids_, num_segments_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function sparse_segment_mean_with_num_segments(data_, indices_, segment_ids_, num_segments_; name=nothing) if tf.eager_mode @@ -28182,7 +29982,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function ensure_shape_graph(input_; name=nothing, shape=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ensure_shape_graph(input_; name=nothing, shape=nothing) local desc tf.with_op_name(name, "EnsureShape") do desc = tf.NodeDescription("EnsureShape") @@ -28202,7 +30002,10 @@ begin desc["shape"] = Base.identity(shape) end desc["T"] = tf.data_type(input_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(ensure_shape, [input_], name=nothing, shape=nothing) + tf.add_node(res[1], node) + return res[1] end function ensure_shape(input_; name=nothing, shape=nothing) if tf.eager_mode @@ -28220,7 +30023,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function apply_proximal_gradient_descent_graph(var_, alpha_, l1_, l2_, delta_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function apply_proximal_gradient_descent_graph(var_, alpha_, l1_, l2_, delta_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ApplyProximalGradientDescent") do desc = tf.NodeDescription("ApplyProximalGradientDescent") @@ -28256,7 +30059,10 @@ begin desc["T"] = tf.data_type(l1_) desc["T"] = tf.data_type(l2_) desc["T"] = tf.data_type(delta_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(apply_proximal_gradient_descent, [var_, alpha_, l1_, l2_, delta_], name=nothing, use_locking=nothing) + tf.add_node(res[1], node) + return res[1] end function apply_proximal_gradient_descent(var_, alpha_, l1_, l2_, delta_; name=nothing, use_locking=nothing) if tf.eager_mode @@ -28274,7 +30080,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function collective_reduce_graph(input_; name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, merge_op=nothing, final_op=nothing, subdiv_offsets=nothing, wait_for=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function collective_reduce_graph(input_; name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, merge_op=nothing, final_op=nothing, subdiv_offsets=nothing, wait_for=nothing) local desc tf.with_op_name(name, "CollectiveReduce") do desc = tf.NodeDescription("CollectiveReduce") @@ -28330,7 +30136,10 @@ begin desc["wait_for"] = map(Base.identity, wait_for) end desc["T"] = tf.data_type(input_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(collective_reduce, [input_], name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, merge_op=nothing, final_op=nothing, subdiv_offsets=nothing, wait_for=nothing) + tf.add_node(res[1], node) + return res[1] end function collective_reduce(input_; name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, merge_op=nothing, final_op=nothing, subdiv_offsets=nothing, wait_for=nothing) if tf.eager_mode @@ -28348,7 +30157,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function is_nan_graph(x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function is_nan_graph(x_; name=nothing) local desc tf.with_op_name(name, "IsNan") do desc = tf.NodeDescription("IsNan") @@ -28362,7 +30171,10 @@ begin desc = tf.EagerOp("IsNan") tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(is_nan, [x_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function is_nan(x_; name=nothing) if tf.eager_mode @@ -28380,7 +30192,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function apply_ada_max_graph(var_, m_, v_, beta1_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function apply_ada_max_graph(var_, m_, v_, beta1_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ApplyAdaMax") do desc = tf.NodeDescription("ApplyAdaMax") @@ -28432,7 +30244,10 @@ begin desc["T"] = tf.data_type(beta2_) desc["T"] = tf.data_type(epsilon_) desc["T"] = tf.data_type(grad_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(apply_ada_max, [var_, m_, v_, beta1_power_, lr_, beta1_, beta2_, epsilon_, grad_], name=nothing, use_locking=nothing) + tf.add_node(res[1], node) + return res[1] end function apply_ada_max(var_, m_, v_, beta1_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing) if tf.eager_mode @@ -28450,7 +30265,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function decode_and_crop_jpeg_graph(contents_, crop_window_; name=nothing, channels=nothing, ratio=nothing, fancy_upscaling=nothing, try_recover_truncated=nothing, acceptable_fraction=nothing, dct_method=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function decode_and_crop_jpeg_graph(contents_, crop_window_; name=nothing, channels=nothing, ratio=nothing, fancy_upscaling=nothing, try_recover_truncated=nothing, acceptable_fraction=nothing, dct_method=nothing) local desc tf.with_op_name(name, "DecodeAndCropJpeg") do desc = tf.NodeDescription("DecodeAndCropJpeg") @@ -28501,7 +30316,10 @@ begin if dct_method !== nothing desc["dct_method"] = Base.String(dct_method) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(decode_and_crop_jpeg, [contents_, crop_window_], name=nothing, channels=nothing, ratio=nothing, fancy_upscaling=nothing, try_recover_truncated=nothing, acceptable_fraction=nothing, dct_method=nothing) + tf.add_node(res[1], node) + return res[1] end function decode_and_crop_jpeg(contents_, crop_window_; name=nothing, channels=nothing, ratio=nothing, fancy_upscaling=nothing, try_recover_truncated=nothing, acceptable_fraction=nothing, dct_method=nothing) if tf.eager_mode @@ -28519,7 +30337,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function apply_centered_rms_prop_graph(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function apply_centered_rms_prop_graph(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ApplyCenteredRMSProp") do desc = tf.NodeDescription("ApplyCenteredRMSProp") @@ -28571,7 +30389,10 @@ begin desc["T"] = tf.data_type(momentum_) desc["T"] = tf.data_type(epsilon_) desc["T"] = tf.data_type(grad_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(apply_centered_rms_prop, [var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_], name=nothing, use_locking=nothing) + tf.add_node(res[1], node) + return res[1] end function apply_centered_rms_prop(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=nothing, use_locking=nothing) if tf.eager_mode @@ -28589,7 +30410,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function conv3d_backprop_filter_v2_graph(input_, filter_sizes_, out_backprop_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function conv3d_backprop_filter_v2_graph(input_, filter_sizes_, out_backprop_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) local desc tf.with_op_name(name, "Conv3DBackpropFilterV2") do desc = tf.NodeDescription("Conv3DBackpropFilterV2") @@ -28634,7 +30455,10 @@ begin end desc["T"] = tf.data_type(input_) desc["T"] = tf.data_type(out_backprop_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(conv3d_backprop_filter_v2, [input_, filter_sizes_, out_backprop_], name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) + tf.add_node(res[1], node) + return res[1] end function conv3d_backprop_filter_v2(input_, filter_sizes_, out_backprop_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) if tf.eager_mode @@ -28652,7 +30476,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function matrix_triangular_solve_graph(matrix_, rhs_; name=nothing, lower=nothing, adjoint=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function matrix_triangular_solve_graph(matrix_, rhs_; name=nothing, lower=nothing, adjoint=nothing) local desc tf.with_op_name(name, "MatrixTriangularSolve") do desc = tf.NodeDescription("MatrixTriangularSolve") @@ -28682,7 +30506,10 @@ begin end desc["T"] = tf.data_type(matrix_) desc["T"] = tf.data_type(rhs_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(matrix_triangular_solve, [matrix_, rhs_], name=nothing, lower=nothing, adjoint=nothing) + tf.add_node(res[1], node) + return res[1] end function matrix_triangular_solve(matrix_, rhs_; name=nothing, lower=nothing, adjoint=nothing) if tf.eager_mode @@ -28700,7 +30527,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function reader_num_work_units_completed_graph(reader_handle_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function reader_num_work_units_completed_graph(reader_handle_; name=nothing) local desc tf.with_op_name(name, "ReaderNumWorkUnitsCompleted") do desc = tf.NodeDescription("ReaderNumWorkUnitsCompleted") @@ -28712,7 +30539,10 @@ begin function reader_num_work_units_completed_eager(reader_handle_; name=nothing) desc = tf.EagerOp("ReaderNumWorkUnitsCompleted") tf.add_input(desc, reader_handle_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(reader_num_work_units_completed, [reader_handle_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function reader_num_work_units_completed(reader_handle_; name=nothing) if tf.eager_mode @@ -28730,7 +30560,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function write_audio_summary_graph(writer_, step_, tag_, tensor_, sample_rate_; name=nothing, max_outputs=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function write_audio_summary_graph(writer_, step_, tag_, tensor_, sample_rate_; name=nothing, max_outputs=nothing) local desc tf.with_op_name(name, "WriteAudioSummary") do desc = tf.NodeDescription("WriteAudioSummary") @@ -28760,7 +30590,10 @@ begin if max_outputs !== nothing desc["max_outputs"] = Base.Int(max_outputs) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(write_audio_summary, [writer_, step_, tag_, tensor_, sample_rate_], name=nothing, max_outputs=nothing) + tf.add_node(res[1], node) + return res[1] end function write_audio_summary(writer_, step_, tag_, tensor_, sample_rate_; name=nothing, max_outputs=nothing) if tf.eager_mode @@ -28778,7 +30611,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function sharded_filespec_graph(basename_, num_shards_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sharded_filespec_graph(basename_, num_shards_; name=nothing) local desc tf.with_op_name(name, "ShardedFilespec") do desc = tf.NodeDescription("ShardedFilespec") @@ -28793,7 +30626,10 @@ begin desc = tf.EagerOp("ShardedFilespec") tf.add_input(desc, basename_) tf.add_input(desc, num_shards_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(sharded_filespec, [basename_, num_shards_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function sharded_filespec(basename_, num_shards_; name=nothing) if tf.eager_mode @@ -28811,7 +30647,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function div_no_nan_graph(x_, y_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function div_no_nan_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "DivNoNan") do desc = tf.NodeDescription("DivNoNan") @@ -28829,7 +30665,10 @@ begin tf.add_input(desc, y_) desc["T"] = tf.data_type(x_) desc["T"] = tf.data_type(y_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(div_no_nan, [x_, y_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function div_no_nan(x_, y_; name=nothing) if tf.eager_mode @@ -28847,7 +30686,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function sparse_accumulator_apply_gradient_graph(handle_, local_step_, gradient_indices_, gradient_values_, gradient_shape_; name=nothing, dtype=nothing, has_known_shape=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_accumulator_apply_gradient_graph(handle_, local_step_, gradient_indices_, gradient_values_, gradient_shape_; name=nothing, dtype=nothing, has_known_shape=nothing) local desc tf.with_op_name(name, "SparseAccumulatorApplyGradient") do desc = tf.NodeDescription("SparseAccumulatorApplyGradient") @@ -28885,7 +30724,10 @@ begin desc["has_known_shape"] = Base.Bool(has_known_shape) end desc["dtype"] = tf.data_type(gradient_values_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(sparse_accumulator_apply_gradient, [handle_, local_step_, gradient_indices_, gradient_values_, gradient_shape_], name=nothing, dtype=nothing, has_known_shape=nothing) + tf.add_node(res[1], node) + return res[1] end function sparse_accumulator_apply_gradient(handle_, local_step_, gradient_indices_, gradient_values_, gradient_shape_; name=nothing, dtype=nothing, has_known_shape=nothing) if tf.eager_mode @@ -28903,7 +30745,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function ragged_tensor_to_sparse_graph(rt_nested_splits_, rt_dense_values_; name=nothing, RAGGED_RANK=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ragged_tensor_to_sparse_graph(rt_nested_splits_, rt_dense_values_; name=nothing, RAGGED_RANK=nothing) local desc tf.with_op_name(name, "RaggedTensorToSparse") do desc = tf.NodeDescription("RaggedTensorToSparse") @@ -28931,7 +30773,10 @@ begin desc["RAGGED_RANK"] = Base.Int(RAGGED_RANK) end desc["T"] = tf.data_type(rt_dense_values_) - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(ragged_tensor_to_sparse, [rt_nested_splits_, rt_dense_values_], name=nothing, RAGGED_RANK=nothing) + tf.add_node(res[1], node) + return res end function ragged_tensor_to_sparse(rt_nested_splits_, rt_dense_values_; name=nothing, RAGGED_RANK=nothing) if tf.eager_mode @@ -28949,7 +30794,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function extract_volume_patches_graph(input_; name=nothing, ksizes=nothing, strides=nothing, padding=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function extract_volume_patches_graph(input_; name=nothing, ksizes=nothing, strides=nothing, padding=nothing) local desc tf.with_op_name(name, "ExtractVolumePatches") do desc = tf.NodeDescription("ExtractVolumePatches") @@ -28981,7 +30826,10 @@ begin desc["padding"] = Base.String(padding) end desc["T"] = tf.data_type(input_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(extract_volume_patches, [input_], name=nothing, ksizes=nothing, strides=nothing, padding=nothing) + tf.add_node(res[1], node) + return res[1] end function extract_volume_patches(input_; name=nothing, ksizes=nothing, strides=nothing, padding=nothing) if tf.eager_mode @@ -28999,7 +30847,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function barrier_insert_many_graph(handle_, keys_, values_; name=nothing, component_index=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function barrier_insert_many_graph(handle_, keys_, values_; name=nothing, component_index=nothing) local desc tf.with_op_name(name, "BarrierInsertMany") do desc = tf.NodeDescription("BarrierInsertMany") @@ -29031,7 +30879,10 @@ begin desc["component_index"] = Base.Int(component_index) end desc["T"] = tf.data_type(values_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(barrier_insert_many, [handle_, keys_, values_], name=nothing, component_index=nothing) + tf.add_node(res[1], node) + return res[1] end function barrier_insert_many(handle_, keys_, values_; name=nothing, component_index=nothing) if tf.eager_mode @@ -29049,7 +30900,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function const__graph(; name=nothing, value=nothing, dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function const__graph(; name=nothing, value=nothing, dtype=nothing) local desc tf.with_op_name(name, "Const") do desc = tf.NodeDescription("Const") @@ -29070,7 +30921,10 @@ begin if dtype !== nothing desc["dtype"] = Base.identity(dtype) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(const_, [], name=nothing, value=nothing, dtype=nothing) + tf.add_node(res[1], node) + return res[1] end function const_(; name=nothing, value=nothing, dtype=nothing) if tf.eager_mode @@ -29088,7 +30942,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function space_to_batch_graph(input_, paddings_; name=nothing, block_size=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function space_to_batch_graph(input_, paddings_; name=nothing, block_size=nothing) local desc tf.with_op_name(name, "SpaceToBatch") do desc = tf.NodeDescription("SpaceToBatch") @@ -29113,7 +30967,10 @@ begin end desc["T"] = tf.data_type(input_) desc["Tpaddings"] = tf.data_type(paddings_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(space_to_batch, [input_, paddings_], name=nothing, block_size=nothing) + tf.add_node(res[1], node) + return res[1] end function space_to_batch(input_, paddings_; name=nothing, block_size=nothing) if tf.eager_mode @@ -29131,7 +30988,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function stage_size_graph(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function stage_size_graph(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "StageSize") do desc = tf.NodeDescription("StageSize") @@ -29170,7 +31027,10 @@ begin if shared_name !== nothing desc["shared_name"] = Base.String(shared_name) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(stage_size, [], name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + tf.add_node(res[1], node) + return res[1] end function stage_size(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) if tf.eager_mode @@ -29188,7 +31048,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function empty_tensor_list_graph(element_shape_, max_num_elements_; name=nothing, element_dtype=nothing, shape_type=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function empty_tensor_list_graph(element_shape_, max_num_elements_; name=nothing, element_dtype=nothing, shape_type=nothing) local desc tf.with_op_name(name, "EmptyTensorList") do desc = tf.NodeDescription("EmptyTensorList") @@ -29217,7 +31077,10 @@ begin desc["shape_type"] = Base.identity(shape_type) end desc["shape_type"] = tf.data_type(element_shape_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(empty_tensor_list, [element_shape_, max_num_elements_], name=nothing, element_dtype=nothing, shape_type=nothing) + tf.add_node(res[1], node) + return res[1] end function empty_tensor_list(element_shape_, max_num_elements_; name=nothing, element_dtype=nothing, shape_type=nothing) if tf.eager_mode @@ -29235,7 +31098,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function quantized_conv2d_and_requantize_graph(input_, filter_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantized_conv2d_and_requantize_graph(input_, filter_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) local desc tf.with_op_name(name, "QuantizedConv2DAndRequantize") do desc = tf.NodeDescription("QuantizedConv2DAndRequantize") @@ -29301,7 +31164,10 @@ begin end desc["Tinput"] = tf.data_type(input_) desc["Tfilter"] = tf.data_type(filter_) - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(quantized_conv2d_and_requantize, [input_, filter_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_], name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) + tf.add_node(res[1], node) + return res end function quantized_conv2d_and_requantize(input_, filter_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) if tf.eager_mode @@ -29319,7 +31185,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function lu_graph(input_; name=nothing, output_idx_type=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function lu_graph(input_; name=nothing, output_idx_type=nothing) local desc tf.with_op_name(name, "Lu") do desc = tf.NodeDescription("Lu") @@ -29344,7 +31210,10 @@ begin desc["output_idx_type"] = Base.identity(output_idx_type) end desc["T"] = tf.data_type(input_) - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(lu, [input_], name=nothing, output_idx_type=nothing) + tf.add_node(res[1], node) + return res end function lu(input_; name=nothing, output_idx_type=nothing) if tf.eager_mode @@ -29362,7 +31231,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function decode_compressed_graph(bytes_; name=nothing, compression_type=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function decode_compressed_graph(bytes_; name=nothing, compression_type=nothing) local desc tf.with_op_name(name, "DecodeCompressed") do desc = tf.NodeDescription("DecodeCompressed") @@ -29380,7 +31249,10 @@ begin if compression_type !== nothing desc["compression_type"] = Base.String(compression_type) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(decode_compressed, [bytes_], name=nothing, compression_type=nothing) + tf.add_node(res[1], node) + return res[1] end function decode_compressed(bytes_; name=nothing, compression_type=nothing) if tf.eager_mode @@ -29398,7 +31270,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function get_session_tensor_graph(handle_; name=nothing, dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function get_session_tensor_graph(handle_; name=nothing, dtype=nothing) local desc tf.with_op_name(name, "GetSessionTensor") do desc = tf.NodeDescription("GetSessionTensor") @@ -29416,7 +31288,10 @@ begin if dtype !== nothing desc["dtype"] = Base.identity(dtype) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(get_session_tensor, [handle_], name=nothing, dtype=nothing) + tf.add_node(res[1], node) + return res[1] end function get_session_tensor(handle_; name=nothing, dtype=nothing) if tf.eager_mode @@ -29434,7 +31309,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tensor_array_gather_v3_graph(handle_, indices_, flow_in_; name=nothing, dtype=nothing, element_shape=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_gather_v3_graph(handle_, indices_, flow_in_; name=nothing, dtype=nothing, element_shape=nothing) local desc tf.with_op_name(name, "TensorArrayGatherV3") do desc = tf.NodeDescription("TensorArrayGatherV3") @@ -29464,7 +31339,10 @@ begin if element_shape !== nothing desc["element_shape"] = Base.identity(element_shape) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(tensor_array_gather_v3, [handle_, indices_, flow_in_], name=nothing, dtype=nothing, element_shape=nothing) + tf.add_node(res[1], node) + return res[1] end function tensor_array_gather_v3(handle_, indices_, flow_in_; name=nothing, dtype=nothing, element_shape=nothing) if tf.eager_mode @@ -29482,7 +31360,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function load_tpu_embedding_ftrl_parameters_grad_accum_debug_graph(parameters_, accumulators_, linears_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function load_tpu_embedding_ftrl_parameters_grad_accum_debug_graph(parameters_, accumulators_, linears_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "LoadTPUEmbeddingFTRLParametersGradAccumDebug") do desc = tf.NodeDescription("LoadTPUEmbeddingFTRLParametersGradAccumDebug") @@ -29527,7 +31405,10 @@ begin if shard_id !== nothing desc["shard_id"] = Base.Int(shard_id) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(load_tpu_embedding_ftrl_parameters_grad_accum_debug, [parameters_, accumulators_, linears_, gradient_accumulators_], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + tf.add_node(res[1], node) + return res[1] end function load_tpu_embedding_ftrl_parameters_grad_accum_debug(parameters_, accumulators_, linears_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) if tf.eager_mode @@ -29545,7 +31426,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function destroy_resource_op_graph(resource_; name=nothing, ignore_lookup_error=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function destroy_resource_op_graph(resource_; name=nothing, ignore_lookup_error=nothing) local desc tf.with_op_name(name, "DestroyResourceOp") do desc = tf.NodeDescription("DestroyResourceOp") @@ -29563,7 +31444,10 @@ begin if ignore_lookup_error !== nothing desc["ignore_lookup_error"] = Base.Bool(ignore_lookup_error) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(destroy_resource_op, [resource_], name=nothing, ignore_lookup_error=nothing) + tf.add_node(res[1], node) + return res[1] end function destroy_resource_op(resource_; name=nothing, ignore_lookup_error=nothing) if tf.eager_mode @@ -29581,7 +31465,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function text_line_reader_graph(; name=nothing, skip_header_lines=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function text_line_reader_graph(; name=nothing, skip_header_lines=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "TextLineReader") do desc = tf.NodeDescription("TextLineReader") @@ -29608,7 +31492,10 @@ begin if shared_name !== nothing desc["shared_name"] = Base.String(shared_name) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(text_line_reader, [], name=nothing, skip_header_lines=nothing, container=nothing, shared_name=nothing) + tf.add_node(res[1], node) + return res[1] end function text_line_reader(; name=nothing, skip_header_lines=nothing, container=nothing, shared_name=nothing) if tf.eager_mode @@ -29626,7 +31513,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function create_summary_db_writer_graph(writer_, db_uri_, experiment_name_, run_name_, user_name_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function create_summary_db_writer_graph(writer_, db_uri_, experiment_name_, run_name_, user_name_; name=nothing) local desc tf.with_op_name(name, "CreateSummaryDbWriter") do desc = tf.NodeDescription("CreateSummaryDbWriter") @@ -29650,7 +31537,10 @@ begin tf.add_input(desc, experiment_name_) tf.add_input(desc, run_name_) tf.add_input(desc, user_name_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(create_summary_db_writer, [writer_, db_uri_, experiment_name_, run_name_, user_name_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function create_summary_db_writer(writer_, db_uri_, experiment_name_, run_name_, user_name_; name=nothing) if tf.eager_mode @@ -29668,7 +31558,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tanh_grad_graph(y_, dy_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tanh_grad_graph(y_, dy_; name=nothing) local desc tf.with_op_name(name, "TanhGrad") do desc = tf.NodeDescription("TanhGrad") @@ -29686,7 +31576,10 @@ begin tf.add_input(desc, dy_) desc["T"] = tf.data_type(y_) desc["T"] = tf.data_type(dy_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(tanh_grad, [y_, dy_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function tanh_grad(y_, dy_; name=nothing) if tf.eager_mode @@ -29704,7 +31597,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function decode_base64_graph(input_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function decode_base64_graph(input_; name=nothing) local desc tf.with_op_name(name, "DecodeBase64") do desc = tf.NodeDescription("DecodeBase64") @@ -29716,7 +31609,10 @@ begin function decode_base64_eager(input_; name=nothing) desc = tf.EagerOp("DecodeBase64") tf.add_input(desc, input_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(decode_base64, [input_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function decode_base64(input_; name=nothing) if tf.eager_mode @@ -29734,7 +31630,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function max_pool_grad_grad_v2_graph(orig_input_, orig_output_, grad_, ksize_, strides_; name=nothing, padding=nothing, data_format=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function max_pool_grad_grad_v2_graph(orig_input_, orig_output_, grad_, ksize_, strides_; name=nothing, padding=nothing, data_format=nothing) local desc tf.with_op_name(name, "MaxPoolGradGradV2") do desc = tf.NodeDescription("MaxPoolGradGradV2") @@ -29774,7 +31670,10 @@ begin desc["T"] = tf.data_type(orig_input_) desc["T"] = tf.data_type(orig_output_) desc["T"] = tf.data_type(grad_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(max_pool_grad_grad_v2, [orig_input_, orig_output_, grad_, ksize_, strides_], name=nothing, padding=nothing, data_format=nothing) + tf.add_node(res[1], node) + return res[1] end function max_pool_grad_grad_v2(orig_input_, orig_output_, grad_, ksize_, strides_; name=nothing, padding=nothing, data_format=nothing) if tf.eager_mode @@ -29792,7 +31691,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function audio_summary_v2_graph(tag_, tensor_, sample_rate_; name=nothing, max_outputs=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function audio_summary_v2_graph(tag_, tensor_, sample_rate_; name=nothing, max_outputs=nothing) local desc tf.with_op_name(name, "AudioSummaryV2") do desc = tf.NodeDescription("AudioSummaryV2") @@ -29816,7 +31715,10 @@ begin if max_outputs !== nothing desc["max_outputs"] = Base.Int(max_outputs) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(audio_summary_v2, [tag_, tensor_, sample_rate_], name=nothing, max_outputs=nothing) + tf.add_node(res[1], node) + return res[1] end function audio_summary_v2(tag_, tensor_, sample_rate_; name=nothing, max_outputs=nothing) if tf.eager_mode @@ -29834,7 +31736,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function stateful_partitioned_call_graph(args_; name=nothing, Tin=nothing, Tout=nothing, f=nothing, config=nothing, config_proto=nothing, executor_type=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function stateful_partitioned_call_graph(args_; name=nothing, Tin=nothing, Tout=nothing, f=nothing, config=nothing, config_proto=nothing, executor_type=nothing) local desc tf.with_op_name(name, "StatefulPartitionedCall") do desc = tf.NodeDescription("StatefulPartitionedCall") @@ -29882,7 +31784,10 @@ begin if executor_type !== nothing desc["executor_type"] = Base.String(executor_type) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(stateful_partitioned_call, [args_], name=nothing, Tin=nothing, Tout=nothing, f=nothing, config=nothing, config_proto=nothing, executor_type=nothing) + tf.add_node(res[1], node) + return res[1] end function stateful_partitioned_call(args_; name=nothing, Tin=nothing, Tout=nothing, f=nothing, config=nothing, config_proto=nothing, executor_type=nothing) if tf.eager_mode @@ -29900,7 +31805,7 @@ end Acts like a Concat Op that merges multple tensors into one, however it must """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function _scoped_allocator_concat_graph(backing_, inputs_; name=nothing, shape=nothing, reshape=nothing, sa_name=nothing, id=nothing, N=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _scoped_allocator_concat_graph(backing_, inputs_; name=nothing, shape=nothing, reshape=nothing, sa_name=nothing, id=nothing, N=nothing) local desc tf.with_op_name(name, "_ScopedAllocatorConcat") do desc = tf.NodeDescription("_ScopedAllocatorConcat") @@ -29948,7 +31853,10 @@ begin end desc["T"] = tf.data_type(backing_) desc["T"] = tf.data_type(inputs_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(_scoped_allocator_concat, [backing_, inputs_], name=nothing, shape=nothing, reshape=nothing, sa_name=nothing, id=nothing, N=nothing) + tf.add_node(res[1], node) + return res[1] end function _scoped_allocator_concat(backing_, inputs_; name=nothing, shape=nothing, reshape=nothing, sa_name=nothing, id=nothing, N=nothing) if tf.eager_mode @@ -29966,7 +31874,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function fake_quant_with_min_max_args_gradient_graph(gradients_, inputs_; name=nothing, min=nothing, max=nothing, num_bits=nothing, narrow_range=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fake_quant_with_min_max_args_gradient_graph(gradients_, inputs_; name=nothing, min=nothing, max=nothing, num_bits=nothing, narrow_range=nothing) local desc tf.with_op_name(name, "FakeQuantWithMinMaxArgsGradient") do desc = tf.NodeDescription("FakeQuantWithMinMaxArgsGradient") @@ -30005,7 +31913,10 @@ begin if narrow_range !== nothing desc["narrow_range"] = Base.Bool(narrow_range) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(fake_quant_with_min_max_args_gradient, [gradients_, inputs_], name=nothing, min=nothing, max=nothing, num_bits=nothing, narrow_range=nothing) + tf.add_node(res[1], node) + return res[1] end function fake_quant_with_min_max_args_gradient(gradients_, inputs_; name=nothing, min=nothing, max=nothing, num_bits=nothing, narrow_range=nothing) if tf.eager_mode @@ -30023,7 +31934,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function batch_svd_graph(input_; name=nothing, compute_uv=nothing, full_matrices=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_svd_graph(input_; name=nothing, compute_uv=nothing, full_matrices=nothing) local desc tf.with_op_name(name, "BatchSvd") do desc = tf.NodeDescription("BatchSvd") @@ -30054,7 +31965,10 @@ begin desc["full_matrices"] = Base.Bool(full_matrices) end desc["T"] = tf.data_type(input_) - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(batch_svd, [input_], name=nothing, compute_uv=nothing, full_matrices=nothing) + tf.add_node(res[1], node) + return res end function batch_svd(input_; name=nothing, compute_uv=nothing, full_matrices=nothing) if tf.eager_mode @@ -30072,7 +31986,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function map_stage_graph(key_, indices_, values_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, fake_dtypes=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function map_stage_graph(key_, indices_, values_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, fake_dtypes=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "MapStage") do desc = tf.NodeDescription("MapStage") @@ -30126,7 +32040,10 @@ begin if shared_name !== nothing desc["shared_name"] = Base.String(shared_name) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(map_stage, [key_, indices_, values_], name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, fake_dtypes=nothing, container=nothing, shared_name=nothing) + tf.add_node(res[1], node) + return res[1] end function map_stage(key_, indices_, values_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, fake_dtypes=nothing, container=nothing, shared_name=nothing) if tf.eager_mode @@ -30144,7 +32061,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function resource_sparse_apply_ftrl_graph(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, lr_power_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_sparse_apply_ftrl_graph(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, lr_power_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ResourceSparseApplyFtrl") do desc = tf.NodeDescription("ResourceSparseApplyFtrl") @@ -30195,7 +32112,10 @@ begin desc["T"] = tf.data_type(l1_) desc["T"] = tf.data_type(l2_) desc["T"] = tf.data_type(lr_power_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(resource_sparse_apply_ftrl, [var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, lr_power_], name=nothing, use_locking=nothing) + tf.add_node(res[1], node) + return res[1] end function resource_sparse_apply_ftrl(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, lr_power_; name=nothing, use_locking=nothing) if tf.eager_mode @@ -30213,7 +32133,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function resize_nearest_neighbor_graph(images_, size_; name=nothing, align_corners=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resize_nearest_neighbor_graph(images_, size_; name=nothing, align_corners=nothing) local desc tf.with_op_name(name, "ResizeNearestNeighbor") do desc = tf.NodeDescription("ResizeNearestNeighbor") @@ -30236,7 +32156,10 @@ begin desc["align_corners"] = Base.Bool(align_corners) end desc["T"] = tf.data_type(images_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(resize_nearest_neighbor, [images_, size_], name=nothing, align_corners=nothing) + tf.add_node(res[1], node) + return res[1] end function resize_nearest_neighbor(images_, size_; name=nothing, align_corners=nothing) if tf.eager_mode @@ -30254,7 +32177,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function experimental_csv_dataset_graph(filenames_, compression_type_, buffer_size_, header_, field_delim_, use_quote_delim_, na_value_, select_cols_, record_defaults_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_csv_dataset_graph(filenames_, compression_type_, buffer_size_, header_, field_delim_, use_quote_delim_, na_value_, select_cols_, record_defaults_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "ExperimentalCSVDataset") do desc = tf.NodeDescription("ExperimentalCSVDataset") @@ -30302,7 +32225,10 @@ begin if output_shapes !== nothing desc["output_shapes"] = map(Base.identity, output_shapes) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(experimental_csv_dataset, [filenames_, compression_type_, buffer_size_, header_, field_delim_, use_quote_delim_, na_value_, select_cols_, record_defaults_], name=nothing, output_types=nothing, output_shapes=nothing) + tf.add_node(res[1], node) + return res[1] end function experimental_csv_dataset(filenames_, compression_type_, buffer_size_, header_, field_delim_, use_quote_delim_, na_value_, select_cols_, record_defaults_; name=nothing, output_types=nothing, output_shapes=nothing) if tf.eager_mode @@ -30320,7 +32246,7 @@ end Returns x * y element-wise. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function _mkl_mul_graph(x_, y_, mkl_x_, mkl_y_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _mkl_mul_graph(x_, y_, mkl_x_, mkl_y_; name=nothing) local desc tf.with_op_name(name, "_MklMul") do desc = tf.NodeDescription("_MklMul") @@ -30349,7 +32275,10 @@ begin tf.add_input(desc, mkl_y_) desc["T"] = tf.data_type(x_) desc["T"] = tf.data_type(y_) - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(_mkl_mul, [x_, y_, mkl_x_, mkl_y_], name=nothing) + tf.add_node(res[1], node) + return res end function _mkl_mul(x_, y_, mkl_x_, mkl_y_; name=nothing) if tf.eager_mode @@ -30367,7 +32296,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function batch_matrix_diag_graph(diagonal_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_matrix_diag_graph(diagonal_; name=nothing) local desc tf.with_op_name(name, "BatchMatrixDiag") do desc = tf.NodeDescription("BatchMatrixDiag") @@ -30381,7 +32310,10 @@ begin desc = tf.EagerOp("BatchMatrixDiag") tf.add_input(desc, diagonal_) desc["T"] = tf.data_type(diagonal_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(batch_matrix_diag, [diagonal_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function batch_matrix_diag(diagonal_; name=nothing) if tf.eager_mode @@ -30399,7 +32331,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function is_inf_graph(x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function is_inf_graph(x_; name=nothing) local desc tf.with_op_name(name, "IsInf") do desc = tf.NodeDescription("IsInf") @@ -30413,7 +32345,10 @@ begin desc = tf.EagerOp("IsInf") tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(is_inf, [x_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function is_inf(x_; name=nothing) if tf.eager_mode @@ -30431,7 +32366,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function fixed_unigram_candidate_sampler_graph(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, vocab_file=nothing, distortion=nothing, num_reserved_ids=nothing, num_shards=nothing, shard=nothing, unigrams=nothing, seed=nothing, seed2=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fixed_unigram_candidate_sampler_graph(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, vocab_file=nothing, distortion=nothing, num_reserved_ids=nothing, num_shards=nothing, shard=nothing, unigrams=nothing, seed=nothing, seed2=nothing) local desc tf.with_op_name(name, "FixedUnigramCandidateSampler") do desc = tf.NodeDescription("FixedUnigramCandidateSampler") @@ -30520,7 +32455,10 @@ begin if seed2 !== nothing desc["seed2"] = Base.Int(seed2) end - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(fixed_unigram_candidate_sampler, [true_classes_], name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, vocab_file=nothing, distortion=nothing, num_reserved_ids=nothing, num_shards=nothing, shard=nothing, unigrams=nothing, seed=nothing, seed2=nothing) + tf.add_node(res[1], node) + return res end function fixed_unigram_candidate_sampler(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, vocab_file=nothing, distortion=nothing, num_reserved_ids=nothing, num_shards=nothing, shard=nothing, unigrams=nothing, seed=nothing, seed2=nothing) if tf.eager_mode @@ -30538,7 +32476,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function sparse_apply_ftrl_v2_graph(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_apply_ftrl_v2_graph(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "SparseApplyFtrlV2") do desc = tf.NodeDescription("SparseApplyFtrlV2") @@ -30596,7 +32534,10 @@ begin desc["T"] = tf.data_type(l2_) desc["T"] = tf.data_type(l2_shrinkage_) desc["T"] = tf.data_type(lr_power_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(sparse_apply_ftrl_v2, [var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, l2_shrinkage_, lr_power_], name=nothing, use_locking=nothing) + tf.add_node(res[1], node) + return res[1] end function sparse_apply_ftrl_v2(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=nothing, use_locking=nothing) if tf.eager_mode @@ -30614,7 +32555,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function unravel_index_graph(indices_, dims_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function unravel_index_graph(indices_, dims_; name=nothing) local desc tf.with_op_name(name, "UnravelIndex") do desc = tf.NodeDescription("UnravelIndex") @@ -30634,7 +32575,10 @@ begin tf.add_input(desc, dims_) desc["Tidx"] = tf.data_type(indices_) desc["Tidx"] = tf.data_type(dims_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(unravel_index, [indices_, dims_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function unravel_index(indices_, dims_; name=nothing) if tf.eager_mode @@ -30652,7 +32596,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function max_graph(input_, reduction_indices_; name=nothing, keep_dims=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function max_graph(input_, reduction_indices_; name=nothing, keep_dims=nothing) local desc tf.with_op_name(name, "Max") do desc = tf.NodeDescription("Max") @@ -30678,7 +32622,10 @@ begin end desc["T"] = tf.data_type(input_) desc["Tidx"] = tf.data_type(reduction_indices_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(max, [input_, reduction_indices_], name=nothing, keep_dims=nothing) + tf.add_node(res[1], node) + return res[1] end function max(input_, reduction_indices_; name=nothing, keep_dims=nothing) if tf.eager_mode @@ -30696,7 +32643,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function ifft2d_graph(input_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ifft2d_graph(input_; name=nothing) local desc tf.with_op_name(name, "IFFT2D") do desc = tf.NodeDescription("IFFT2D") @@ -30710,7 +32657,10 @@ begin desc = tf.EagerOp("IFFT2D") tf.add_input(desc, input_) desc["Tcomplex"] = tf.data_type(input_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(ifft2d, [input_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function ifft2d(input_; name=nothing) if tf.eager_mode @@ -30728,7 +32678,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function sparse_concat_graph(indices_, values_, shapes_; name=nothing, concat_dim=nothing, N=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_concat_graph(indices_, values_, shapes_; name=nothing, concat_dim=nothing, N=nothing) local desc tf.with_op_name(name, "SparseConcat") do desc = tf.NodeDescription("SparseConcat") @@ -30771,7 +32721,10 @@ begin desc["N"] = Base.Int(N) end desc["T"] = tf.data_type(values_) - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(sparse_concat, [indices_, values_, shapes_], name=nothing, concat_dim=nothing, N=nothing) + tf.add_node(res[1], node) + return res end function sparse_concat(indices_, values_, shapes_; name=nothing, concat_dim=nothing, N=nothing) if tf.eager_mode @@ -30789,7 +32742,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function histogram_summary_graph(tag_, values_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function histogram_summary_graph(tag_, values_; name=nothing) local desc tf.with_op_name(name, "HistogramSummary") do desc = tf.NodeDescription("HistogramSummary") @@ -30806,7 +32759,10 @@ begin tf.add_input(desc, tag_) tf.add_input(desc, values_) desc["T"] = tf.data_type(values_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(histogram_summary, [tag_, values_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function histogram_summary(tag_, values_; name=nothing) if tf.eager_mode @@ -30824,7 +32780,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function segment_sum_graph(data_, segment_ids_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function segment_sum_graph(data_, segment_ids_; name=nothing) local desc tf.with_op_name(name, "SegmentSum") do desc = tf.NodeDescription("SegmentSum") @@ -30844,7 +32800,10 @@ begin tf.add_input(desc, segment_ids_) desc["T"] = tf.data_type(data_) desc["Tindices"] = tf.data_type(segment_ids_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(segment_sum, [data_, segment_ids_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function segment_sum(data_, segment_ids_; name=nothing) if tf.eager_mode @@ -30862,7 +32821,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function exp_graph(x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function exp_graph(x_; name=nothing) local desc tf.with_op_name(name, "Exp") do desc = tf.NodeDescription("Exp") @@ -30876,7 +32835,10 @@ begin desc = tf.EagerOp("Exp") tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(exp, [x_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function exp(x_; name=nothing) if tf.eager_mode @@ -30894,7 +32856,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function configure_distributed_tpu_graph(; name=nothing, embedding_config=nothing, tpu_embedding_config=nothing, is_global_init=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function configure_distributed_tpu_graph(; name=nothing, embedding_config=nothing, tpu_embedding_config=nothing, is_global_init=nothing) local desc tf.with_op_name(name, "ConfigureDistributedTPU") do desc = tf.NodeDescription("ConfigureDistributedTPU") @@ -30921,7 +32883,10 @@ begin if is_global_init !== nothing desc["is_global_init"] = Base.Bool(is_global_init) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(configure_distributed_tpu, [], name=nothing, embedding_config=nothing, tpu_embedding_config=nothing, is_global_init=nothing) + tf.add_node(res[1], node) + return res[1] end function configure_distributed_tpu(; name=nothing, embedding_config=nothing, tpu_embedding_config=nothing, is_global_init=nothing) if tf.eager_mode @@ -30939,7 +32904,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function resource_scatter_nd_sub_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_scatter_nd_sub_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ResourceScatterNdSub") do desc = tf.NodeDescription("ResourceScatterNdSub") @@ -30968,7 +32933,10 @@ begin end desc["Tindices"] = tf.data_type(indices_) desc["T"] = tf.data_type(updates_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(resource_scatter_nd_sub, [ref_, indices_, updates_], name=nothing, use_locking=nothing) + tf.add_node(res[1], node) + return res[1] end function resource_scatter_nd_sub(ref_, indices_, updates_; name=nothing, use_locking=nothing) if tf.eager_mode @@ -30986,7 +32954,7 @@ end A placeholder op for multiple values that will be sent from TensorFlow to a """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function _xla_send_from_host_graph(inputs_, dynamic_key_; name=nothing, Tinputs=nothing, key=nothing, device_ordinal=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _xla_send_from_host_graph(inputs_, dynamic_key_; name=nothing, Tinputs=nothing, key=nothing, device_ordinal=nothing) local desc tf.with_op_name(name, "_XlaSendFromHost") do desc = tf.NodeDescription("_XlaSendFromHost") @@ -31019,7 +32987,10 @@ begin if device_ordinal !== nothing desc["device_ordinal"] = Base.Int(device_ordinal) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(_xla_send_from_host, [inputs_, dynamic_key_], name=nothing, Tinputs=nothing, key=nothing, device_ordinal=nothing) + tf.add_node(res[1], node) + return res[1] end function _xla_send_from_host(inputs_, dynamic_key_; name=nothing, Tinputs=nothing, key=nothing, device_ordinal=nothing) if tf.eager_mode @@ -31037,7 +33008,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function get_session_handle_v2_graph(value_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function get_session_handle_v2_graph(value_; name=nothing) local desc tf.with_op_name(name, "GetSessionHandleV2") do desc = tf.NodeDescription("GetSessionHandleV2") @@ -31051,7 +33022,10 @@ begin desc = tf.EagerOp("GetSessionHandleV2") tf.add_input(desc, value_) desc["T"] = tf.data_type(value_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(get_session_handle_v2, [value_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function get_session_handle_v2(value_; name=nothing) if tf.eager_mode @@ -31069,7 +33043,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function relu_grad_graph(gradients_, features_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function relu_grad_graph(gradients_, features_; name=nothing) local desc tf.with_op_name(name, "ReluGrad") do desc = tf.NodeDescription("ReluGrad") @@ -31087,7 +33061,10 @@ begin tf.add_input(desc, features_) desc["T"] = tf.data_type(gradients_) desc["T"] = tf.data_type(features_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(relu_grad, [gradients_, features_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function relu_grad(gradients_, features_; name=nothing) if tf.eager_mode @@ -31105,7 +33082,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function unsorted_segment_min_graph(data_, segment_ids_, num_segments_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function unsorted_segment_min_graph(data_, segment_ids_, num_segments_; name=nothing) local desc tf.with_op_name(name, "UnsortedSegmentMin") do desc = tf.NodeDescription("UnsortedSegmentMin") @@ -31130,7 +33107,10 @@ begin desc["T"] = tf.data_type(data_) desc["Tindices"] = tf.data_type(segment_ids_) desc["Tnumsegments"] = tf.data_type(num_segments_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(unsorted_segment_min, [data_, segment_ids_, num_segments_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function unsorted_segment_min(data_, segment_ids_, num_segments_; name=nothing) if tf.eager_mode @@ -31148,7 +33128,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function parse_example_graph(serialized_, names_, sparse_keys_, dense_keys_, dense_defaults_; name=nothing, Nsparse=nothing, Ndense=nothing, sparse_types=nothing, Tdense=nothing, dense_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function parse_example_graph(serialized_, names_, sparse_keys_, dense_keys_, dense_defaults_; name=nothing, Nsparse=nothing, Ndense=nothing, sparse_types=nothing, Tdense=nothing, dense_shapes=nothing) local desc tf.with_op_name(name, "ParseExample") do desc = tf.NodeDescription("ParseExample") @@ -31207,7 +33187,10 @@ begin if dense_shapes !== nothing desc["dense_shapes"] = map(Base.identity, dense_shapes) end - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(parse_example, [serialized_, names_, sparse_keys_, dense_keys_, dense_defaults_], name=nothing, Nsparse=nothing, Ndense=nothing, sparse_types=nothing, Tdense=nothing, dense_shapes=nothing) + tf.add_node(res[1], node) + return res end function parse_example(serialized_, names_, sparse_keys_, dense_keys_, dense_defaults_; name=nothing, Nsparse=nothing, Ndense=nothing, sparse_types=nothing, Tdense=nothing, dense_shapes=nothing) if tf.eager_mode @@ -31225,7 +33208,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function queue_enqueue_v2_graph(handle_, components_; name=nothing, Tcomponents=nothing, timeout_ms=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function queue_enqueue_v2_graph(handle_, components_; name=nothing, Tcomponents=nothing, timeout_ms=nothing) local desc tf.with_op_name(name, "QueueEnqueueV2") do desc = tf.NodeDescription("QueueEnqueueV2") @@ -31252,7 +33235,10 @@ begin if timeout_ms !== nothing desc["timeout_ms"] = Base.Int(timeout_ms) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(queue_enqueue_v2, [handle_, components_], name=nothing, Tcomponents=nothing, timeout_ms=nothing) + tf.add_node(res[1], node) + return res[1] end function queue_enqueue_v2(handle_, components_; name=nothing, Tcomponents=nothing, timeout_ms=nothing) if tf.eager_mode @@ -31270,7 +33256,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function scatter_nd_add_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function scatter_nd_add_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ScatterNdAdd") do desc = tf.NodeDescription("ScatterNdAdd") @@ -31300,7 +33286,10 @@ begin desc["T"] = tf.data_type(ref_) desc["Tindices"] = tf.data_type(indices_) desc["T"] = tf.data_type(updates_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(scatter_nd_add, [ref_, indices_, updates_], name=nothing, use_locking=nothing) + tf.add_node(res[1], node) + return res[1] end function scatter_nd_add(ref_, indices_, updates_; name=nothing, use_locking=nothing) if tf.eager_mode @@ -31318,7 +33307,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function reader_num_records_produced_v2_graph(reader_handle_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function reader_num_records_produced_v2_graph(reader_handle_; name=nothing) local desc tf.with_op_name(name, "ReaderNumRecordsProducedV2") do desc = tf.NodeDescription("ReaderNumRecordsProducedV2") @@ -31330,7 +33319,10 @@ begin function reader_num_records_produced_v2_eager(reader_handle_; name=nothing) desc = tf.EagerOp("ReaderNumRecordsProducedV2") tf.add_input(desc, reader_handle_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(reader_num_records_produced_v2, [reader_handle_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function reader_num_records_produced_v2(reader_handle_; name=nothing) if tf.eager_mode @@ -31348,7 +33340,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function load_tpu_embedding_centered_rms_prop_parameters_graph(parameters_, ms_, mom_, mg_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function load_tpu_embedding_centered_rms_prop_parameters_graph(parameters_, ms_, mom_, mg_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "LoadTPUEmbeddingCenteredRMSPropParameters") do desc = tf.NodeDescription("LoadTPUEmbeddingCenteredRMSPropParameters") @@ -31393,7 +33385,10 @@ begin if shard_id !== nothing desc["shard_id"] = Base.Int(shard_id) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(load_tpu_embedding_centered_rms_prop_parameters, [parameters_, ms_, mom_, mg_], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + tf.add_node(res[1], node) + return res[1] end function load_tpu_embedding_centered_rms_prop_parameters(parameters_, ms_, mom_, mg_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) if tf.eager_mode @@ -31411,7 +33406,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function assign_sub_graph(ref_, value_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function assign_sub_graph(ref_, value_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "AssignSub") do desc = tf.NodeDescription("AssignSub") @@ -31435,7 +33430,10 @@ begin end desc["T"] = tf.data_type(ref_) desc["T"] = tf.data_type(value_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(assign_sub, [ref_, value_], name=nothing, use_locking=nothing) + tf.add_node(res[1], node) + return res[1] end function assign_sub(ref_, value_; name=nothing, use_locking=nothing) if tf.eager_mode @@ -31453,7 +33451,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function unsorted_segment_sum_graph(data_, segment_ids_, num_segments_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function unsorted_segment_sum_graph(data_, segment_ids_, num_segments_; name=nothing) local desc tf.with_op_name(name, "UnsortedSegmentSum") do desc = tf.NodeDescription("UnsortedSegmentSum") @@ -31478,7 +33476,10 @@ begin desc["T"] = tf.data_type(data_) desc["Tindices"] = tf.data_type(segment_ids_) desc["Tnumsegments"] = tf.data_type(num_segments_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(unsorted_segment_sum, [data_, segment_ids_, num_segments_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function unsorted_segment_sum(data_, segment_ids_, num_segments_; name=nothing) if tf.eager_mode @@ -31496,7 +33497,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function fused_batch_norm_grad_graph(y_backprop_, x_, scale_, reserve_space_1_, reserve_space_2_; name=nothing, epsilon=nothing, data_format=nothing, is_training=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fused_batch_norm_grad_graph(y_backprop_, x_, scale_, reserve_space_1_, reserve_space_2_; name=nothing, epsilon=nothing, data_format=nothing, is_training=nothing) local desc tf.with_op_name(name, "FusedBatchNormGrad") do desc = tf.NodeDescription("FusedBatchNormGrad") @@ -31549,7 +33550,10 @@ begin desc["T"] = tf.data_type(scale_) desc["T"] = tf.data_type(reserve_space_1_) desc["T"] = tf.data_type(reserve_space_2_) - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(fused_batch_norm_grad, [y_backprop_, x_, scale_, reserve_space_1_, reserve_space_2_], name=nothing, epsilon=nothing, data_format=nothing, is_training=nothing) + tf.add_node(res[1], node) + return res end function fused_batch_norm_grad(y_backprop_, x_, scale_, reserve_space_1_, reserve_space_2_; name=nothing, epsilon=nothing, data_format=nothing, is_training=nothing) if tf.eager_mode @@ -31567,7 +33571,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function max_pool_grad_v2_graph(orig_input_, orig_output_, grad_, ksize_, strides_; name=nothing, padding=nothing, data_format=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function max_pool_grad_v2_graph(orig_input_, orig_output_, grad_, ksize_, strides_; name=nothing, padding=nothing, data_format=nothing) local desc tf.with_op_name(name, "MaxPoolGradV2") do desc = tf.NodeDescription("MaxPoolGradV2") @@ -31607,7 +33611,10 @@ begin desc["T"] = tf.data_type(orig_input_) desc["T"] = tf.data_type(orig_output_) desc["T"] = tf.data_type(grad_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(max_pool_grad_v2, [orig_input_, orig_output_, grad_, ksize_, strides_], name=nothing, padding=nothing, data_format=nothing) + tf.add_node(res[1], node) + return res[1] end function max_pool_grad_v2(orig_input_, orig_output_, grad_, ksize_, strides_; name=nothing, padding=nothing, data_format=nothing) if tf.eager_mode @@ -31625,7 +33632,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function quantized_conv2d_with_bias_and_relu_graph(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantized_conv2d_with_bias_and_relu_graph(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) local desc tf.with_op_name(name, "QuantizedConv2DWithBiasAndRelu") do desc = tf.NodeDescription("QuantizedConv2DWithBiasAndRelu") @@ -31688,7 +33695,10 @@ begin end desc["Tinput"] = tf.data_type(input_) desc["Tfilter"] = tf.data_type(filter_) - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(quantized_conv2d_with_bias_and_relu, [input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_], name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) + tf.add_node(res[1], node) + return res end function quantized_conv2d_with_bias_and_relu(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) if tf.eager_mode @@ -31706,7 +33716,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function boosted_trees_create_ensemble_graph(tree_ensemble_handle_, stamp_token_, tree_ensemble_serialized_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function boosted_trees_create_ensemble_graph(tree_ensemble_handle_, stamp_token_, tree_ensemble_serialized_; name=nothing) local desc tf.with_op_name(name, "BoostedTreesCreateEnsemble") do desc = tf.NodeDescription("BoostedTreesCreateEnsemble") @@ -31724,7 +33734,10 @@ begin tf.add_input(desc, tree_ensemble_handle_) tf.add_input(desc, stamp_token_) tf.add_input(desc, tree_ensemble_serialized_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(boosted_trees_create_ensemble, [tree_ensemble_handle_, stamp_token_, tree_ensemble_serialized_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function boosted_trees_create_ensemble(tree_ensemble_handle_, stamp_token_, tree_ensemble_serialized_; name=nothing) if tf.eager_mode @@ -31742,7 +33755,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function ordered_map_incomplete_size_graph(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ordered_map_incomplete_size_graph(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "OrderedMapIncompleteSize") do desc = tf.NodeDescription("OrderedMapIncompleteSize") @@ -31781,7 +33794,10 @@ begin if shared_name !== nothing desc["shared_name"] = Base.String(shared_name) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(ordered_map_incomplete_size, [], name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + tf.add_node(res[1], node) + return res[1] end function ordered_map_incomplete_size(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) if tf.eager_mode @@ -31799,7 +33815,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function skipgram_graph(; name=nothing, filename=nothing, batch_size=nothing, window_size=nothing, min_count=nothing, subsample=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function skipgram_graph(; name=nothing, filename=nothing, batch_size=nothing, window_size=nothing, min_count=nothing, subsample=nothing) local desc tf.with_op_name(name, "Skipgram") do desc = tf.NodeDescription("Skipgram") @@ -31843,7 +33859,10 @@ begin if subsample !== nothing desc["subsample"] = Base.identity(subsample) end - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(skipgram, [], name=nothing, filename=nothing, batch_size=nothing, window_size=nothing, min_count=nothing, subsample=nothing) + tf.add_node(res[1], node) + return res end function skipgram(; name=nothing, filename=nothing, batch_size=nothing, window_size=nothing, min_count=nothing, subsample=nothing) if tf.eager_mode @@ -31861,7 +33880,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function arg_min_graph(input_, dimension_; name=nothing, output_type=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function arg_min_graph(input_, dimension_; name=nothing, output_type=nothing) local desc tf.with_op_name(name, "ArgMin") do desc = tf.NodeDescription("ArgMin") @@ -31887,7 +33906,10 @@ begin end desc["T"] = tf.data_type(input_) desc["Tidx"] = tf.data_type(dimension_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(arg_min, [input_, dimension_], name=nothing, output_type=nothing) + tf.add_node(res[1], node) + return res[1] end function arg_min(input_, dimension_; name=nothing, output_type=nothing) if tf.eager_mode @@ -31905,7 +33927,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function queue_dequeue_many_graph(handle_, n_; name=nothing, component_types=nothing, timeout_ms=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function queue_dequeue_many_graph(handle_, n_; name=nothing, component_types=nothing, timeout_ms=nothing) local desc tf.with_op_name(name, "QueueDequeueMany") do desc = tf.NodeDescription("QueueDequeueMany") @@ -31932,7 +33954,10 @@ begin if timeout_ms !== nothing desc["timeout_ms"] = Base.Int(timeout_ms) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(queue_dequeue_many, [handle_, n_], name=nothing, component_types=nothing, timeout_ms=nothing) + tf.add_node(res[1], node) + return res[1] end function queue_dequeue_many(handle_, n_; name=nothing, component_types=nothing, timeout_ms=nothing) if tf.eager_mode @@ -31950,7 +33975,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function boosted_trees_serialize_ensemble_graph(tree_ensemble_handle_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function boosted_trees_serialize_ensemble_graph(tree_ensemble_handle_; name=nothing) local desc tf.with_op_name(name, "BoostedTreesSerializeEnsemble") do desc = tf.NodeDescription("BoostedTreesSerializeEnsemble") @@ -31967,7 +33992,10 @@ begin function boosted_trees_serialize_ensemble_eager(tree_ensemble_handle_; name=nothing) desc = tf.EagerOp("BoostedTreesSerializeEnsemble") tf.add_input(desc, tree_ensemble_handle_) - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(boosted_trees_serialize_ensemble, [tree_ensemble_handle_], name=nothing) + tf.add_node(res[1], node) + return res end function boosted_trees_serialize_ensemble(tree_ensemble_handle_; name=nothing) if tf.eager_mode @@ -31985,7 +34013,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function minimum_graph(x_, y_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function minimum_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "Minimum") do desc = tf.NodeDescription("Minimum") @@ -32003,7 +34031,10 @@ begin tf.add_input(desc, y_) desc["T"] = tf.data_type(x_) desc["T"] = tf.data_type(y_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(minimum, [x_, y_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function minimum(x_, y_; name=nothing) if tf.eager_mode @@ -32021,7 +34052,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function substr_graph(input_, pos_, len_; name=nothing, unit=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function substr_graph(input_, pos_, len_; name=nothing, unit=nothing) local desc tf.with_op_name(name, "Substr") do desc = tf.NodeDescription("Substr") @@ -32048,7 +34079,10 @@ begin end desc["T"] = tf.data_type(pos_) desc["T"] = tf.data_type(len_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(substr, [input_, pos_, len_], name=nothing, unit=nothing) + tf.add_node(res[1], node) + return res[1] end function substr(input_, pos_, len_; name=nothing, unit=nothing) if tf.eager_mode @@ -32066,7 +34100,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function queue_size_graph(handle_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function queue_size_graph(handle_; name=nothing) local desc tf.with_op_name(name, "QueueSize") do desc = tf.NodeDescription("QueueSize") @@ -32078,7 +34112,10 @@ begin function queue_size_eager(handle_; name=nothing) desc = tf.EagerOp("QueueSize") tf.add_input(desc, handle_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(queue_size, [handle_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function queue_size(handle_; name=nothing) if tf.eager_mode @@ -32096,7 +34133,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function apply_ftrl_v2_graph(var_, accum_, linear_, grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function apply_ftrl_v2_graph(var_, accum_, linear_, grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ApplyFtrlV2") do desc = tf.NodeDescription("ApplyFtrlV2") @@ -32148,7 +34185,10 @@ begin desc["T"] = tf.data_type(l2_) desc["T"] = tf.data_type(l2_shrinkage_) desc["T"] = tf.data_type(lr_power_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(apply_ftrl_v2, [var_, accum_, linear_, grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_], name=nothing, use_locking=nothing) + tf.add_node(res[1], node) + return res[1] end function apply_ftrl_v2(var_, accum_, linear_, grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=nothing, use_locking=nothing) if tf.eager_mode @@ -32166,7 +34206,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function load_tpu_embedding_momentum_parameters_graph(parameters_, momenta_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function load_tpu_embedding_momentum_parameters_graph(parameters_, momenta_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "LoadTPUEmbeddingMomentumParameters") do desc = tf.NodeDescription("LoadTPUEmbeddingMomentumParameters") @@ -32205,7 +34245,10 @@ begin if shard_id !== nothing desc["shard_id"] = Base.Int(shard_id) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(load_tpu_embedding_momentum_parameters, [parameters_, momenta_], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + tf.add_node(res[1], node) + return res[1] end function load_tpu_embedding_momentum_parameters(parameters_, momenta_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) if tf.eager_mode @@ -32223,7 +34266,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function sparse_segment_mean_graph(data_, indices_, segment_ids_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_segment_mean_graph(data_, indices_, segment_ids_; name=nothing) local desc tf.with_op_name(name, "SparseSegmentMean") do desc = tf.NodeDescription("SparseSegmentMean") @@ -32246,7 +34289,10 @@ begin tf.add_input(desc, segment_ids_) desc["T"] = tf.data_type(data_) desc["Tidx"] = tf.data_type(indices_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(sparse_segment_mean, [data_, indices_, segment_ids_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function sparse_segment_mean(data_, indices_, segment_ids_; name=nothing) if tf.eager_mode @@ -32264,7 +34310,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function resource_apply_proximal_adagrad_graph(var_, accum_, lr_, l1_, l2_, grad_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_apply_proximal_adagrad_graph(var_, accum_, lr_, l1_, l2_, grad_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ResourceApplyProximalAdagrad") do desc = tf.NodeDescription("ResourceApplyProximalAdagrad") @@ -32302,7 +34348,10 @@ begin desc["T"] = tf.data_type(l1_) desc["T"] = tf.data_type(l2_) desc["T"] = tf.data_type(grad_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(resource_apply_proximal_adagrad, [var_, accum_, lr_, l1_, l2_, grad_], name=nothing, use_locking=nothing) + tf.add_node(res[1], node) + return res[1] end function resource_apply_proximal_adagrad(var_, accum_, lr_, l1_, l2_, grad_; name=nothing, use_locking=nothing) if tf.eager_mode @@ -32320,7 +34369,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tensor_array_gather_v2_graph(handle_, indices_, flow_in_; name=nothing, dtype=nothing, element_shape=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_gather_v2_graph(handle_, indices_, flow_in_; name=nothing, dtype=nothing, element_shape=nothing) local desc tf.with_op_name(name, "TensorArrayGatherV2") do desc = tf.NodeDescription("TensorArrayGatherV2") @@ -32350,7 +34399,10 @@ begin if element_shape !== nothing desc["element_shape"] = Base.identity(element_shape) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(tensor_array_gather_v2, [handle_, indices_, flow_in_], name=nothing, dtype=nothing, element_shape=nothing) + tf.add_node(res[1], node) + return res[1] end function tensor_array_gather_v2(handle_, indices_, flow_in_; name=nothing, dtype=nothing, element_shape=nothing) if tf.eager_mode @@ -32368,7 +34420,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function less_graph(x_, y_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function less_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "Less") do desc = tf.NodeDescription("Less") @@ -32386,7 +34438,10 @@ begin tf.add_input(desc, y_) desc["T"] = tf.data_type(x_) desc["T"] = tf.data_type(y_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(less, [x_, y_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function less(x_, y_; name=nothing) if tf.eager_mode @@ -32404,7 +34459,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function host_const_graph(; name=nothing, value=nothing, dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function host_const_graph(; name=nothing, value=nothing, dtype=nothing) local desc tf.with_op_name(name, "HostConst") do desc = tf.NodeDescription("HostConst") @@ -32425,7 +34480,10 @@ begin if dtype !== nothing desc["dtype"] = Base.identity(dtype) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(host_const, [], name=nothing, value=nothing, dtype=nothing) + tf.add_node(res[1], node) + return res[1] end function host_const(; name=nothing, value=nothing, dtype=nothing) if tf.eager_mode @@ -32443,7 +34501,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function upper_bound_graph(sorted_inputs_, values_; name=nothing, out_type=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function upper_bound_graph(sorted_inputs_, values_; name=nothing, out_type=nothing) local desc tf.with_op_name(name, "UpperBound") do desc = tf.NodeDescription("UpperBound") @@ -32467,7 +34525,10 @@ begin end desc["T"] = tf.data_type(sorted_inputs_) desc["T"] = tf.data_type(values_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(upper_bound, [sorted_inputs_, values_], name=nothing, out_type=nothing) + tf.add_node(res[1], node) + return res[1] end function upper_bound(sorted_inputs_, values_; name=nothing, out_type=nothing) if tf.eager_mode @@ -32485,7 +34546,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tensor_list_get_item_graph(input_handle_, index_, element_shape_; name=nothing, element_dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_list_get_item_graph(input_handle_, index_, element_shape_; name=nothing, element_dtype=nothing) local desc tf.with_op_name(name, "TensorListGetItem") do desc = tf.NodeDescription("TensorListGetItem") @@ -32509,7 +34570,10 @@ begin if element_dtype !== nothing desc["element_dtype"] = Base.identity(element_dtype) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(tensor_list_get_item, [input_handle_, index_, element_shape_], name=nothing, element_dtype=nothing) + tf.add_node(res[1], node) + return res[1] end function tensor_list_get_item(input_handle_, index_, element_shape_; name=nothing, element_dtype=nothing) if tf.eager_mode @@ -32527,7 +34591,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function fake_quant_with_min_max_vars_graph(inputs_, min_, max_; name=nothing, num_bits=nothing, narrow_range=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fake_quant_with_min_max_vars_graph(inputs_, min_, max_; name=nothing, num_bits=nothing, narrow_range=nothing) local desc tf.with_op_name(name, "FakeQuantWithMinMaxVars") do desc = tf.NodeDescription("FakeQuantWithMinMaxVars") @@ -32557,7 +34621,10 @@ begin if narrow_range !== nothing desc["narrow_range"] = Base.Bool(narrow_range) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(fake_quant_with_min_max_vars, [inputs_, min_, max_], name=nothing, num_bits=nothing, narrow_range=nothing) + tf.add_node(res[1], node) + return res[1] end function fake_quant_with_min_max_vars(inputs_, min_, max_; name=nothing, num_bits=nothing, narrow_range=nothing) if tf.eager_mode @@ -32575,7 +34642,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function is_boosted_trees_quantile_stream_resource_initialized_graph(quantile_stream_resource_handle_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function is_boosted_trees_quantile_stream_resource_initialized_graph(quantile_stream_resource_handle_; name=nothing) local desc tf.with_op_name(name, "IsBoostedTreesQuantileStreamResourceInitialized") do desc = tf.NodeDescription("IsBoostedTreesQuantileStreamResourceInitialized") @@ -32587,7 +34654,10 @@ begin function is_boosted_trees_quantile_stream_resource_initialized_eager(quantile_stream_resource_handle_; name=nothing) desc = tf.EagerOp("IsBoostedTreesQuantileStreamResourceInitialized") tf.add_input(desc, quantile_stream_resource_handle_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(is_boosted_trees_quantile_stream_resource_initialized, [quantile_stream_resource_handle_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function is_boosted_trees_quantile_stream_resource_initialized(quantile_stream_resource_handle_; name=nothing) if tf.eager_mode @@ -32605,7 +34675,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function reader_read_up_to_v2_graph(reader_handle_, queue_handle_, num_records_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function reader_read_up_to_v2_graph(reader_handle_, queue_handle_, num_records_; name=nothing) local desc tf.with_op_name(name, "ReaderReadUpToV2") do desc = tf.NodeDescription("ReaderReadUpToV2") @@ -32628,7 +34698,10 @@ begin tf.add_input(desc, reader_handle_) tf.add_input(desc, queue_handle_) tf.add_input(desc, num_records_) - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(reader_read_up_to_v2, [reader_handle_, queue_handle_, num_records_], name=nothing) + tf.add_node(res[1], node) + return res end function reader_read_up_to_v2(reader_handle_, queue_handle_, num_records_; name=nothing) if tf.eager_mode @@ -32646,7 +34719,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function complex_graph(real_, imag_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function complex_graph(real_, imag_; name=nothing) local desc tf.with_op_name(name, "Complex") do desc = tf.NodeDescription("Complex") @@ -32664,7 +34737,10 @@ begin tf.add_input(desc, imag_) desc["T"] = tf.data_type(real_) desc["T"] = tf.data_type(imag_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(complex, [real_, imag_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function complex(real_, imag_; name=nothing) if tf.eager_mode @@ -32682,7 +34758,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tensor_list_reserve_graph(element_shape_, num_elements_; name=nothing, element_dtype=nothing, shape_type=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_list_reserve_graph(element_shape_, num_elements_; name=nothing, element_dtype=nothing, shape_type=nothing) local desc tf.with_op_name(name, "TensorListReserve") do desc = tf.NodeDescription("TensorListReserve") @@ -32711,7 +34787,10 @@ begin desc["shape_type"] = Base.identity(shape_type) end desc["shape_type"] = tf.data_type(element_shape_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(tensor_list_reserve, [element_shape_, num_elements_], name=nothing, element_dtype=nothing, shape_type=nothing) + tf.add_node(res[1], node) + return res[1] end function tensor_list_reserve(element_shape_, num_elements_; name=nothing, element_dtype=nothing, shape_type=nothing) if tf.eager_mode @@ -32729,7 +34808,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function bitcast_graph(input_; name=nothing, type_=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function bitcast_graph(input_; name=nothing, type_=nothing) local desc tf.with_op_name(name, "Bitcast") do desc = tf.NodeDescription("Bitcast") @@ -32749,7 +34828,10 @@ begin desc["type"] = Base.identity(type_) end desc["T"] = tf.data_type(input_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(bitcast, [input_], name=nothing, type_=nothing) + tf.add_node(res[1], node) + return res[1] end function bitcast(input_; name=nothing, type_=nothing) if tf.eager_mode @@ -32767,7 +34849,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function priority_queue_graph(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function priority_queue_graph(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "PriorityQueue") do desc = tf.NodeDescription("PriorityQueue") @@ -32806,7 +34888,10 @@ begin if shared_name !== nothing desc["shared_name"] = Base.String(shared_name) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(priority_queue, [], name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) + tf.add_node(res[1], node) + return res[1] end function priority_queue(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) if tf.eager_mode @@ -32824,7 +34909,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function quantized_batch_norm_with_global_normalization_graph(t_, t_min_, t_max_, m_, m_min_, m_max_, v_, v_min_, v_max_, beta_, beta_min_, beta_max_, gamma_, gamma_min_, gamma_max_; name=nothing, out_type=nothing, variance_epsilon=nothing, scale_after_normalization=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantized_batch_norm_with_global_normalization_graph(t_, t_min_, t_max_, m_, m_min_, m_max_, v_, v_min_, v_max_, beta_, beta_min_, beta_max_, gamma_, gamma_min_, gamma_max_; name=nothing, out_type=nothing, variance_epsilon=nothing, scale_after_normalization=nothing) local desc tf.with_op_name(name, "QuantizedBatchNormWithGlobalNormalization") do desc = tf.NodeDescription("QuantizedBatchNormWithGlobalNormalization") @@ -32907,7 +34992,10 @@ begin desc["Tinput"] = tf.data_type(v_) desc["Tinput"] = tf.data_type(beta_) desc["Tinput"] = tf.data_type(gamma_) - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(quantized_batch_norm_with_global_normalization, [t_, t_min_, t_max_, m_, m_min_, m_max_, v_, v_min_, v_max_, beta_, beta_min_, beta_max_, gamma_, gamma_min_, gamma_max_], name=nothing, out_type=nothing, variance_epsilon=nothing, scale_after_normalization=nothing) + tf.add_node(res[1], node) + return res end function quantized_batch_norm_with_global_normalization(t_, t_min_, t_max_, m_, m_min_, m_max_, v_, v_min_, v_max_, beta_, beta_min_, beta_max_, gamma_, gamma_min_, gamma_max_; name=nothing, out_type=nothing, variance_epsilon=nothing, scale_after_normalization=nothing) if tf.eager_mode @@ -32925,7 +35013,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function cos_graph(x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function cos_graph(x_; name=nothing) local desc tf.with_op_name(name, "Cos") do desc = tf.NodeDescription("Cos") @@ -32939,7 +35027,10 @@ begin desc = tf.EagerOp("Cos") tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(cos, [x_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function cos(x_; name=nothing) if tf.eager_mode @@ -32957,7 +35048,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function quantize_down_and_shrink_range_graph(input_, input_min_, input_max_; name=nothing, out_type=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantize_down_and_shrink_range_graph(input_, input_min_, input_max_; name=nothing, out_type=nothing) local desc tf.with_op_name(name, "QuantizeDownAndShrinkRange") do desc = tf.NodeDescription("QuantizeDownAndShrinkRange") @@ -32988,7 +35079,10 @@ begin desc["out_type"] = Base.identity(out_type) end desc["Tinput"] = tf.data_type(input_) - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(quantize_down_and_shrink_range, [input_, input_min_, input_max_], name=nothing, out_type=nothing) + tf.add_node(res[1], node) + return res end function quantize_down_and_shrink_range(input_, input_min_, input_max_; name=nothing, out_type=nothing) if tf.eager_mode @@ -33006,7 +35100,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function experimental_random_dataset_graph(seed_, seed2_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_random_dataset_graph(seed_, seed2_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "ExperimentalRandomDataset") do desc = tf.NodeDescription("ExperimentalRandomDataset") @@ -33033,7 +35127,10 @@ begin if output_shapes !== nothing desc["output_shapes"] = map(Base.identity, output_shapes) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(experimental_random_dataset, [seed_, seed2_], name=nothing, output_types=nothing, output_shapes=nothing) + tf.add_node(res[1], node) + return res[1] end function experimental_random_dataset(seed_, seed2_; name=nothing, output_types=nothing, output_shapes=nothing) if tf.eager_mode @@ -33051,7 +35148,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function rpc_graph(address_, method_, request_; name=nothing, protocol=nothing, fail_fast=nothing, timeout_in_ms=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function rpc_graph(address_, method_, request_; name=nothing, protocol=nothing, fail_fast=nothing, timeout_in_ms=nothing) local desc tf.with_op_name(name, "Rpc") do desc = tf.NodeDescription("Rpc") @@ -33087,7 +35184,10 @@ begin if timeout_in_ms !== nothing desc["timeout_in_ms"] = Base.Int(timeout_in_ms) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(rpc, [address_, method_, request_], name=nothing, protocol=nothing, fail_fast=nothing, timeout_in_ms=nothing) + tf.add_node(res[1], node) + return res[1] end function rpc(address_, method_, request_; name=nothing, protocol=nothing, fail_fast=nothing, timeout_in_ms=nothing) if tf.eager_mode @@ -33105,7 +35205,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function quantized_conv2d_with_bias_signed_sum_and_relu_and_requantize_graph(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_, summand_, min_summand_, max_summand_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantized_conv2d_with_bias_signed_sum_and_relu_and_requantize_graph(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_, summand_, min_summand_, max_summand_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) local desc tf.with_op_name(name, "QuantizedConv2DWithBiasSignedSumAndReluAndRequantize") do desc = tf.NodeDescription("QuantizedConv2DWithBiasSignedSumAndReluAndRequantize") @@ -33187,7 +35287,10 @@ begin desc["Tfilter"] = tf.data_type(filter_) desc["Tbias"] = tf.data_type(bias_) desc["Tsummand"] = tf.data_type(summand_) - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(quantized_conv2d_with_bias_signed_sum_and_relu_and_requantize, [input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_, summand_, min_summand_, max_summand_], name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) + tf.add_node(res[1], node) + return res end function quantized_conv2d_with_bias_signed_sum_and_relu_and_requantize(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_, summand_, min_summand_, max_summand_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) if tf.eager_mode @@ -33205,7 +35308,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tensor_list_length_graph(input_handle_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_list_length_graph(input_handle_; name=nothing) local desc tf.with_op_name(name, "TensorListLength") do desc = tf.NodeDescription("TensorListLength") @@ -33217,7 +35320,10 @@ begin function tensor_list_length_eager(input_handle_; name=nothing) desc = tf.EagerOp("TensorListLength") tf.add_input(desc, input_handle_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(tensor_list_length, [input_handle_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function tensor_list_length(input_handle_; name=nothing) if tf.eager_mode @@ -33235,7 +35341,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function map_incomplete_size_graph(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function map_incomplete_size_graph(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "MapIncompleteSize") do desc = tf.NodeDescription("MapIncompleteSize") @@ -33274,7 +35380,10 @@ begin if shared_name !== nothing desc["shared_name"] = Base.String(shared_name) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(map_incomplete_size, [], name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + tf.add_node(res[1], node) + return res[1] end function map_incomplete_size(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) if tf.eager_mode @@ -33292,7 +35401,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function stateless_while_graph(input_; name=nothing, T=nothing, cond=nothing, body=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function stateless_while_graph(input_; name=nothing, T=nothing, cond=nothing, body=nothing) local desc tf.with_op_name(name, "StatelessWhile") do desc = tf.NodeDescription("StatelessWhile") @@ -33322,7 +35431,10 @@ begin if body !== nothing desc["body"] = Base.identity(body) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(stateless_while, [input_], name=nothing, T=nothing, cond=nothing, body=nothing) + tf.add_node(res[1], node) + return res[1] end function stateless_while(input_; name=nothing, T=nothing, cond=nothing, body=nothing) if tf.eager_mode @@ -33340,7 +35452,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function sparse_conditional_accumulator_graph(; name=nothing, dtype=nothing, shape=nothing, container=nothing, shared_name=nothing, reduction_type=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_conditional_accumulator_graph(; name=nothing, dtype=nothing, shape=nothing, container=nothing, shared_name=nothing, reduction_type=nothing) local desc tf.with_op_name(name, "SparseConditionalAccumulator") do desc = tf.NodeDescription("SparseConditionalAccumulator") @@ -33379,7 +35491,10 @@ begin if reduction_type !== nothing desc["reduction_type"] = Base.String(reduction_type) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(sparse_conditional_accumulator, [], name=nothing, dtype=nothing, shape=nothing, container=nothing, shared_name=nothing, reduction_type=nothing) + tf.add_node(res[1], node) + return res[1] end function sparse_conditional_accumulator(; name=nothing, dtype=nothing, shape=nothing, container=nothing, shared_name=nothing, reduction_type=nothing) if tf.eager_mode @@ -33397,7 +35512,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function segment_min_graph(data_, segment_ids_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function segment_min_graph(data_, segment_ids_; name=nothing) local desc tf.with_op_name(name, "SegmentMin") do desc = tf.NodeDescription("SegmentMin") @@ -33417,7 +35532,10 @@ begin tf.add_input(desc, segment_ids_) desc["T"] = tf.data_type(data_) desc["Tindices"] = tf.data_type(segment_ids_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(segment_min, [data_, segment_ids_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function segment_min(data_, segment_ids_; name=nothing) if tf.eager_mode @@ -33435,7 +35553,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function write_graph_summary_graph(writer_, step_, tensor_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function write_graph_summary_graph(writer_, step_, tensor_; name=nothing) local desc tf.with_op_name(name, "WriteGraphSummary") do desc = tf.NodeDescription("WriteGraphSummary") @@ -33453,7 +35571,10 @@ begin tf.add_input(desc, writer_) tf.add_input(desc, step_) tf.add_input(desc, tensor_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(write_graph_summary, [writer_, step_, tensor_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function write_graph_summary(writer_, step_, tensor_; name=nothing) if tf.eager_mode @@ -33471,7 +35592,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function cholesky_grad_graph(l_, grad_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function cholesky_grad_graph(l_, grad_; name=nothing) local desc tf.with_op_name(name, "CholeskyGrad") do desc = tf.NodeDescription("CholeskyGrad") @@ -33489,7 +35610,10 @@ begin tf.add_input(desc, grad_) desc["T"] = tf.data_type(l_) desc["T"] = tf.data_type(grad_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(cholesky_grad, [l_, grad_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function cholesky_grad(l_, grad_; name=nothing) if tf.eager_mode @@ -33507,7 +35631,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function log_uniform_candidate_sampler_graph(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function log_uniform_candidate_sampler_graph(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing) local desc tf.with_op_name(name, "LogUniformCandidateSampler") do desc = tf.NodeDescription("LogUniformCandidateSampler") @@ -33560,7 +35684,10 @@ begin if seed2 !== nothing desc["seed2"] = Base.Int(seed2) end - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(log_uniform_candidate_sampler, [true_classes_], name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing) + tf.add_node(res[1], node) + return res end function log_uniform_candidate_sampler(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing) if tf.eager_mode @@ -33578,7 +35705,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function serialize_sparse_graph(sparse_indices_, sparse_values_, sparse_shape_; name=nothing, out_type=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function serialize_sparse_graph(sparse_indices_, sparse_values_, sparse_shape_; name=nothing, out_type=nothing) local desc tf.with_op_name(name, "SerializeSparse") do desc = tf.NodeDescription("SerializeSparse") @@ -33604,7 +35731,10 @@ begin desc["out_type"] = Base.identity(out_type) end desc["T"] = tf.data_type(sparse_values_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(serialize_sparse, [sparse_indices_, sparse_values_, sparse_shape_], name=nothing, out_type=nothing) + tf.add_node(res[1], node) + return res[1] end function serialize_sparse(sparse_indices_, sparse_values_, sparse_shape_; name=nothing, out_type=nothing) if tf.eager_mode @@ -33622,7 +35752,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function scatter_nd_non_aliasing_add_graph(input_, indices_, updates_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function scatter_nd_non_aliasing_add_graph(input_, indices_, updates_; name=nothing) local desc tf.with_op_name(name, "ScatterNdNonAliasingAdd") do desc = tf.NodeDescription("ScatterNdNonAliasingAdd") @@ -33646,7 +35776,10 @@ begin desc["T"] = tf.data_type(input_) desc["Tindices"] = tf.data_type(indices_) desc["T"] = tf.data_type(updates_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(scatter_nd_non_aliasing_add, [input_, indices_, updates_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function scatter_nd_non_aliasing_add(input_, indices_, updates_; name=nothing) if tf.eager_mode @@ -33664,7 +35797,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function ref_merge_graph(inputs_; name=nothing, N=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ref_merge_graph(inputs_; name=nothing, N=nothing) local desc tf.with_op_name(name, "RefMerge") do desc = tf.NodeDescription("RefMerge") @@ -33689,7 +35822,10 @@ begin desc["N"] = Base.Int(N) end desc["T"] = tf.data_type(inputs_) - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(ref_merge, [inputs_], name=nothing, N=nothing) + tf.add_node(res[1], node) + return res end function ref_merge(inputs_; name=nothing, N=nothing) if tf.eager_mode @@ -33707,7 +35843,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tensor_list_concat_graph(input_handle_; name=nothing, element_dtype=nothing, element_shape=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_list_concat_graph(input_handle_; name=nothing, element_dtype=nothing, element_shape=nothing) local desc tf.with_op_name(name, "TensorListConcat") do desc = tf.NodeDescription("TensorListConcat") @@ -33736,7 +35872,10 @@ begin if element_shape !== nothing desc["element_shape"] = Base.identity(element_shape) end - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(tensor_list_concat, [input_handle_], name=nothing, element_dtype=nothing, element_shape=nothing) + tf.add_node(res[1], node) + return res end function tensor_list_concat(input_handle_; name=nothing, element_dtype=nothing, element_shape=nothing) if tf.eager_mode @@ -33754,7 +35893,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function cudnn_rnn_canonical_to_params_graph(num_layers_, num_units_, input_size_, weights_, biases_; name=nothing, num_params=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function cudnn_rnn_canonical_to_params_graph(num_layers_, num_units_, input_size_, weights_, biases_; name=nothing, num_params=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) local desc tf.with_op_name(name, "CudnnRNNCanonicalToParams") do desc = tf.NodeDescription("CudnnRNNCanonicalToParams") @@ -33823,7 +35962,10 @@ begin end desc["T"] = tf.data_type(weights_) desc["T"] = tf.data_type(biases_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(cudnn_rnn_canonical_to_params, [num_layers_, num_units_, input_size_, weights_, biases_], name=nothing, num_params=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) + tf.add_node(res[1], node) + return res[1] end function cudnn_rnn_canonical_to_params(num_layers_, num_units_, input_size_, weights_, biases_; name=nothing, num_params=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) if tf.eager_mode @@ -33841,7 +35983,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function sparse_apply_adadelta_graph(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_apply_adadelta_graph(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "SparseApplyAdadelta") do desc = tf.NodeDescription("SparseApplyAdadelta") @@ -33891,7 +36033,10 @@ begin desc["T"] = tf.data_type(epsilon_) desc["T"] = tf.data_type(grad_) desc["Tindices"] = tf.data_type(indices_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(sparse_apply_adadelta, [var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_, indices_], name=nothing, use_locking=nothing) + tf.add_node(res[1], node) + return res[1] end function sparse_apply_adadelta(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) if tf.eager_mode @@ -33909,7 +36054,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tensor_array_close_graph(handle_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_close_graph(handle_; name=nothing) local desc tf.with_op_name(name, "TensorArrayClose") do desc = tf.NodeDescription("TensorArrayClose") @@ -33921,7 +36066,10 @@ begin function tensor_array_close_eager(handle_; name=nothing) desc = tf.EagerOp("TensorArrayClose") tf.add_input(desc, handle_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(tensor_array_close, [handle_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function tensor_array_close(handle_; name=nothing) if tf.eager_mode @@ -33939,7 +36087,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function selu_grad_graph(gradients_, outputs_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function selu_grad_graph(gradients_, outputs_; name=nothing) local desc tf.with_op_name(name, "SeluGrad") do desc = tf.NodeDescription("SeluGrad") @@ -33957,7 +36105,10 @@ begin tf.add_input(desc, outputs_) desc["T"] = tf.data_type(gradients_) desc["T"] = tf.data_type(outputs_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(selu_grad, [gradients_, outputs_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function selu_grad(gradients_, outputs_; name=nothing) if tf.eager_mode @@ -33975,7 +36126,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function crop_and_resize_grad_image_graph(grads_, boxes_, box_ind_, image_size_; name=nothing, method=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function crop_and_resize_grad_image_graph(grads_, boxes_, box_ind_, image_size_; name=nothing, method=nothing) local desc tf.with_op_name(name, "CropAndResizeGradImage") do desc = tf.NodeDescription("CropAndResizeGradImage") @@ -34002,7 +36153,10 @@ begin if method !== nothing desc["method"] = Base.String(method) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(crop_and_resize_grad_image, [grads_, boxes_, box_ind_, image_size_], name=nothing, method=nothing) + tf.add_node(res[1], node) + return res[1] end function crop_and_resize_grad_image(grads_, boxes_, box_ind_, image_size_; name=nothing, method=nothing) if tf.eager_mode @@ -34020,7 +36174,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function rfft_graph(input_, fft_length_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function rfft_graph(input_, fft_length_; name=nothing) local desc tf.with_op_name(name, "RFFT") do desc = tf.NodeDescription("RFFT") @@ -34035,7 +36189,10 @@ begin desc = tf.EagerOp("RFFT") tf.add_input(desc, input_) tf.add_input(desc, fft_length_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(rfft, [input_, fft_length_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function rfft(input_, fft_length_; name=nothing) if tf.eager_mode @@ -34053,7 +36210,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function experimental_sql_dataset_graph(driver_name_, data_source_name_, query_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_sql_dataset_graph(driver_name_, data_source_name_, query_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "ExperimentalSqlDataset") do desc = tf.NodeDescription("ExperimentalSqlDataset") @@ -34083,7 +36240,10 @@ begin if output_shapes !== nothing desc["output_shapes"] = map(Base.identity, output_shapes) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(experimental_sql_dataset, [driver_name_, data_source_name_, query_], name=nothing, output_types=nothing, output_shapes=nothing) + tf.add_node(res[1], node) + return res[1] end function experimental_sql_dataset(driver_name_, data_source_name_, query_; name=nothing, output_types=nothing, output_shapes=nothing) if tf.eager_mode @@ -34101,7 +36261,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function resource_apply_power_sign_graph(var_, m_, lr_, logbase_, sign_decay_, beta_, grad_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_apply_power_sign_graph(var_, m_, lr_, logbase_, sign_decay_, beta_, grad_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ResourceApplyPowerSign") do desc = tf.NodeDescription("ResourceApplyPowerSign") @@ -34143,7 +36303,10 @@ begin desc["T"] = tf.data_type(sign_decay_) desc["T"] = tf.data_type(beta_) desc["T"] = tf.data_type(grad_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(resource_apply_power_sign, [var_, m_, lr_, logbase_, sign_decay_, beta_, grad_], name=nothing, use_locking=nothing) + tf.add_node(res[1], node) + return res[1] end function resource_apply_power_sign(var_, m_, lr_, logbase_, sign_decay_, beta_, grad_; name=nothing, use_locking=nothing) if tf.eager_mode @@ -34161,7 +36324,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function matrix_determinant_graph(input_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function matrix_determinant_graph(input_; name=nothing) local desc tf.with_op_name(name, "MatrixDeterminant") do desc = tf.NodeDescription("MatrixDeterminant") @@ -34175,7 +36338,10 @@ begin desc = tf.EagerOp("MatrixDeterminant") tf.add_input(desc, input_) desc["T"] = tf.data_type(input_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(matrix_determinant, [input_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function matrix_determinant(input_; name=nothing) if tf.eager_mode @@ -34193,7 +36359,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function static_regex_replace_graph(input_; name=nothing, pattern=nothing, rewrite=nothing, replace_global=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function static_regex_replace_graph(input_; name=nothing, pattern=nothing, rewrite=nothing, replace_global=nothing) local desc tf.with_op_name(name, "StaticRegexReplace") do desc = tf.NodeDescription("StaticRegexReplace") @@ -34223,7 +36389,10 @@ begin if replace_global !== nothing desc["replace_global"] = Base.Bool(replace_global) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(static_regex_replace, [input_], name=nothing, pattern=nothing, rewrite=nothing, replace_global=nothing) + tf.add_node(res[1], node) + return res[1] end function static_regex_replace(input_; name=nothing, pattern=nothing, rewrite=nothing, replace_global=nothing) if tf.eager_mode @@ -34241,7 +36410,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function avg_pool_graph(value_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function avg_pool_graph(value_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) local desc tf.with_op_name(name, "AvgPool") do desc = tf.NodeDescription("AvgPool") @@ -34279,7 +36448,10 @@ begin desc["data_format"] = Base.String(data_format) end desc["T"] = tf.data_type(value_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(avg_pool, [value_], name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + tf.add_node(res[1], node) + return res[1] end function avg_pool(value_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) if tf.eager_mode @@ -34297,7 +36469,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function sparse_dense_cwise_add_graph(sp_indices_, sp_values_, sp_shape_, dense_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_dense_cwise_add_graph(sp_indices_, sp_values_, sp_shape_, dense_; name=nothing) local desc tf.with_op_name(name, "SparseDenseCwiseAdd") do desc = tf.NodeDescription("SparseDenseCwiseAdd") @@ -34321,7 +36493,10 @@ begin tf.add_input(desc, dense_) desc["T"] = tf.data_type(sp_values_) desc["T"] = tf.data_type(dense_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(sparse_dense_cwise_add, [sp_indices_, sp_values_, sp_shape_, dense_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function sparse_dense_cwise_add(sp_indices_, sp_values_, sp_shape_, dense_; name=nothing) if tf.eager_mode @@ -34339,7 +36514,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function bias_add_v1_graph(value_, bias_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function bias_add_v1_graph(value_, bias_; name=nothing) local desc tf.with_op_name(name, "BiasAddV1") do desc = tf.NodeDescription("BiasAddV1") @@ -34357,7 +36532,10 @@ begin tf.add_input(desc, bias_) desc["T"] = tf.data_type(value_) desc["T"] = tf.data_type(bias_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(bias_add_v1, [value_, bias_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function bias_add_v1(value_, bias_; name=nothing) if tf.eager_mode @@ -34375,7 +36553,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function invert_permutation_graph(x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function invert_permutation_graph(x_; name=nothing) local desc tf.with_op_name(name, "InvertPermutation") do desc = tf.NodeDescription("InvertPermutation") @@ -34389,7 +36567,10 @@ begin desc = tf.EagerOp("InvertPermutation") tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(invert_permutation, [x_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function invert_permutation(x_; name=nothing) if tf.eager_mode @@ -34407,7 +36588,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function hash_table_v2_graph(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function hash_table_v2_graph(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing) local desc tf.with_op_name(name, "HashTableV2") do desc = tf.NodeDescription("HashTableV2") @@ -34446,7 +36627,10 @@ begin if value_dtype !== nothing desc["value_dtype"] = Base.identity(value_dtype) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(hash_table_v2, [], name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing) + tf.add_node(res[1], node) + return res[1] end function hash_table_v2(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing) if tf.eager_mode @@ -34464,7 +36648,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function sparse_apply_momentum_graph(var_, accum_, lr_, grad_, indices_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_apply_momentum_graph(var_, accum_, lr_, grad_, indices_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) local desc tf.with_op_name(name, "SparseApplyMomentum") do desc = tf.NodeDescription("SparseApplyMomentum") @@ -34512,7 +36696,10 @@ begin desc["T"] = tf.data_type(grad_) desc["Tindices"] = tf.data_type(indices_) desc["T"] = tf.data_type(momentum_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(sparse_apply_momentum, [var_, accum_, lr_, grad_, indices_, momentum_], name=nothing, use_locking=nothing, use_nesterov=nothing) + tf.add_node(res[1], node) + return res[1] end function sparse_apply_momentum(var_, accum_, lr_, grad_, indices_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) if tf.eager_mode @@ -34530,7 +36717,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function infeed_enqueue_graph(input_; name=nothing, dtype=nothing, shape=nothing, layout=nothing, device_ordinal=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function infeed_enqueue_graph(input_; name=nothing, dtype=nothing, shape=nothing, layout=nothing, device_ordinal=nothing) local desc tf.with_op_name(name, "InfeedEnqueue") do desc = tf.NodeDescription("InfeedEnqueue") @@ -34568,7 +36755,10 @@ begin desc["device_ordinal"] = Base.Int(device_ordinal) end desc["dtype"] = tf.data_type(input_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(infeed_enqueue, [input_], name=nothing, dtype=nothing, shape=nothing, layout=nothing, device_ordinal=nothing) + tf.add_node(res[1], node) + return res[1] end function infeed_enqueue(input_; name=nothing, dtype=nothing, shape=nothing, layout=nothing, device_ordinal=nothing) if tf.eager_mode @@ -34586,7 +36776,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function stateless_random_uniform_int_graph(shape_, seed_, minval_, maxval_; name=nothing, dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function stateless_random_uniform_int_graph(shape_, seed_, minval_, maxval_; name=nothing, dtype=nothing) local desc tf.with_op_name(name, "StatelessRandomUniformInt") do desc = tf.NodeDescription("StatelessRandomUniformInt") @@ -34620,7 +36810,10 @@ begin desc["Tseed"] = tf.data_type(seed_) desc["dtype"] = tf.data_type(minval_) desc["dtype"] = tf.data_type(maxval_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(stateless_random_uniform_int, [shape_, seed_, minval_, maxval_], name=nothing, dtype=nothing) + tf.add_node(res[1], node) + return res[1] end function stateless_random_uniform_int(shape_, seed_, minval_, maxval_; name=nothing, dtype=nothing) if tf.eager_mode @@ -34638,7 +36831,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function load_tpu_embedding_adadelta_parameters_grad_accum_debug_graph(parameters_, accumulators_, updates_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function load_tpu_embedding_adadelta_parameters_grad_accum_debug_graph(parameters_, accumulators_, updates_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "LoadTPUEmbeddingAdadeltaParametersGradAccumDebug") do desc = tf.NodeDescription("LoadTPUEmbeddingAdadeltaParametersGradAccumDebug") @@ -34683,7 +36876,10 @@ begin if shard_id !== nothing desc["shard_id"] = Base.Int(shard_id) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(load_tpu_embedding_adadelta_parameters_grad_accum_debug, [parameters_, accumulators_, updates_, gradient_accumulators_], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + tf.add_node(res[1], node) + return res[1] end function load_tpu_embedding_adadelta_parameters_grad_accum_debug(parameters_, accumulators_, updates_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) if tf.eager_mode @@ -34701,7 +36897,7 @@ end Sends the named tensor from send_device to recv_device. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function _send_graph(tensor_; name=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _send_graph(tensor_; name=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing) local desc tf.with_op_name(name, "_Send") do desc = tf.NodeDescription("_Send") @@ -34745,7 +36941,10 @@ begin desc["client_terminated"] = Base.Bool(client_terminated) end desc["T"] = tf.data_type(tensor_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(_send, [tensor_], name=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing) + tf.add_node(res[1], node) + return res[1] end function _send(tensor_; name=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing) if tf.eager_mode @@ -34763,7 +36962,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function map_peek_graph(key_, indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function map_peek_graph(key_, indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "MapPeek") do desc = tf.NodeDescription("MapPeek") @@ -34808,7 +37007,10 @@ begin if shared_name !== nothing desc["shared_name"] = Base.String(shared_name) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(map_peek, [key_, indices_], name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + tf.add_node(res[1], node) + return res[1] end function map_peek(key_, indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) if tf.eager_mode @@ -34826,7 +37028,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function write_scalar_summary_graph(writer_, step_, tag_, value_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function write_scalar_summary_graph(writer_, step_, tag_, value_; name=nothing) local desc tf.with_op_name(name, "WriteScalarSummary") do desc = tf.NodeDescription("WriteScalarSummary") @@ -34849,7 +37051,10 @@ begin tf.add_input(desc, tag_) tf.add_input(desc, value_) desc["T"] = tf.data_type(value_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(write_scalar_summary, [writer_, step_, tag_, value_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function write_scalar_summary(writer_, step_, tag_, value_; name=nothing) if tf.eager_mode @@ -34867,7 +37072,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function ordered_map_unstage_no_key_graph(indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ordered_map_unstage_no_key_graph(indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "OrderedMapUnstageNoKey") do desc = tf.NodeDescription("OrderedMapUnstageNoKey") @@ -34914,7 +37119,10 @@ begin if shared_name !== nothing desc["shared_name"] = Base.String(shared_name) end - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(ordered_map_unstage_no_key, [indices_], name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + tf.add_node(res[1], node) + return res end function ordered_map_unstage_no_key(indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) if tf.eager_mode @@ -34932,7 +37140,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function sparse_apply_centered_rms_prop_graph(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_apply_centered_rms_prop_graph(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "SparseApplyCenteredRMSProp") do desc = tf.NodeDescription("SparseApplyCenteredRMSProp") @@ -34990,7 +37198,10 @@ begin desc["T"] = tf.data_type(epsilon_) desc["T"] = tf.data_type(grad_) desc["Tindices"] = tf.data_type(indices_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(sparse_apply_centered_rms_prop, [var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_], name=nothing, use_locking=nothing) + tf.add_node(res[1], node) + return res[1] end function sparse_apply_centered_rms_prop(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) if tf.eager_mode @@ -35008,7 +37219,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tensor_list_scatter_v2_graph(tensor_, indices_, element_shape_, num_elements_; name=nothing, element_dtype=nothing, shape_type=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_list_scatter_v2_graph(tensor_, indices_, element_shape_, num_elements_; name=nothing, element_dtype=nothing, shape_type=nothing) local desc tf.with_op_name(name, "TensorListScatterV2") do desc = tf.NodeDescription("TensorListScatterV2") @@ -35045,7 +37256,10 @@ begin end desc["element_dtype"] = tf.data_type(tensor_) desc["shape_type"] = tf.data_type(element_shape_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(tensor_list_scatter_v2, [tensor_, indices_, element_shape_, num_elements_], name=nothing, element_dtype=nothing, shape_type=nothing) + tf.add_node(res[1], node) + return res[1] end function tensor_list_scatter_v2(tensor_, indices_, element_shape_, num_elements_; name=nothing, element_dtype=nothing, shape_type=nothing) if tf.eager_mode @@ -35063,7 +37277,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function conv3d_backprop_input_v2_graph(input_sizes_, filter_, out_backprop_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function conv3d_backprop_input_v2_graph(input_sizes_, filter_, out_backprop_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) local desc tf.with_op_name(name, "Conv3DBackpropInputV2") do desc = tf.NodeDescription("Conv3DBackpropInputV2") @@ -35110,7 +37324,10 @@ begin desc["Tshape"] = tf.data_type(input_sizes_) desc["T"] = tf.data_type(filter_) desc["T"] = tf.data_type(out_backprop_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(conv3d_backprop_input_v2, [input_sizes_, filter_, out_backprop_], name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) + tf.add_node(res[1], node) + return res[1] end function conv3d_backprop_input_v2(input_sizes_, filter_, out_backprop_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) if tf.eager_mode @@ -35128,7 +37345,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function retrieve_tpu_embedding_proximal_adagrad_parameters_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function retrieve_tpu_embedding_proximal_adagrad_parameters_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "RetrieveTPUEmbeddingProximalAdagradParameters") do desc = tf.NodeDescription("RetrieveTPUEmbeddingProximalAdagradParameters") @@ -35166,7 +37383,10 @@ begin if shard_id !== nothing desc["shard_id"] = Base.Int(shard_id) end - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(retrieve_tpu_embedding_proximal_adagrad_parameters, [], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + tf.add_node(res[1], node) + return res end function retrieve_tpu_embedding_proximal_adagrad_parameters(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) if tf.eager_mode @@ -35184,7 +37404,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function random_shuffle_graph(value_; name=nothing, seed=nothing, seed2=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function random_shuffle_graph(value_; name=nothing, seed=nothing, seed2=nothing) local desc tf.with_op_name(name, "RandomShuffle") do desc = tf.NodeDescription("RandomShuffle") @@ -35210,7 +37430,10 @@ begin desc["seed2"] = Base.Int(seed2) end desc["T"] = tf.data_type(value_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(random_shuffle, [value_], name=nothing, seed=nothing, seed2=nothing) + tf.add_node(res[1], node) + return res[1] end function random_shuffle(value_; name=nothing, seed=nothing, seed2=nothing) if tf.eager_mode @@ -35228,7 +37451,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function uniform_candidate_sampler_graph(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function uniform_candidate_sampler_graph(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing) local desc tf.with_op_name(name, "UniformCandidateSampler") do desc = tf.NodeDescription("UniformCandidateSampler") @@ -35281,7 +37504,10 @@ begin if seed2 !== nothing desc["seed2"] = Base.Int(seed2) end - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(uniform_candidate_sampler, [true_classes_], name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing) + tf.add_node(res[1], node) + return res end function uniform_candidate_sampler(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing) if tf.eager_mode @@ -35299,7 +37525,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tensor_array_split_v2_graph(handle_, value_, lengths_, flow_in_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_split_v2_graph(handle_, value_, lengths_, flow_in_; name=nothing) local desc tf.with_op_name(name, "TensorArraySplitV2") do desc = tf.NodeDescription("TensorArraySplitV2") @@ -35322,7 +37548,10 @@ begin tf.add_input(desc, lengths_) tf.add_input(desc, flow_in_) desc["T"] = tf.data_type(value_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(tensor_array_split_v2, [handle_, value_, lengths_, flow_in_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function tensor_array_split_v2(handle_, value_, lengths_, flow_in_; name=nothing) if tf.eager_mode @@ -35340,7 +37569,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function mutable_dense_hash_table_v2_graph(empty_key_, deleted_key_; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing, initial_num_buckets=nothing, max_load_factor=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function mutable_dense_hash_table_v2_graph(empty_key_, deleted_key_; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing, initial_num_buckets=nothing, max_load_factor=nothing) local desc tf.with_op_name(name, "MutableDenseHashTableV2") do desc = tf.NodeDescription("MutableDenseHashTableV2") @@ -35406,7 +37635,10 @@ begin end desc["key_dtype"] = tf.data_type(empty_key_) desc["key_dtype"] = tf.data_type(deleted_key_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(mutable_dense_hash_table_v2, [empty_key_, deleted_key_], name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing, initial_num_buckets=nothing, max_load_factor=nothing) + tf.add_node(res[1], node) + return res[1] end function mutable_dense_hash_table_v2(empty_key_, deleted_key_; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing, initial_num_buckets=nothing, max_load_factor=nothing) if tf.eager_mode @@ -35424,7 +37656,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function draw_bounding_boxes_graph(images_, boxes_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function draw_bounding_boxes_graph(images_, boxes_; name=nothing) local desc tf.with_op_name(name, "DrawBoundingBoxes") do desc = tf.NodeDescription("DrawBoundingBoxes") @@ -35441,7 +37673,10 @@ begin tf.add_input(desc, images_) tf.add_input(desc, boxes_) desc["T"] = tf.data_type(images_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(draw_bounding_boxes, [images_, boxes_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function draw_bounding_boxes(images_, boxes_; name=nothing) if tf.eager_mode @@ -35459,7 +37694,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function sparse_apply_proximal_adagrad_graph(var_, accum_, lr_, l1_, l2_, grad_, indices_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_apply_proximal_adagrad_graph(var_, accum_, lr_, l1_, l2_, grad_, indices_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "SparseApplyProximalAdagrad") do desc = tf.NodeDescription("SparseApplyProximalAdagrad") @@ -35505,7 +37740,10 @@ begin desc["T"] = tf.data_type(l2_) desc["T"] = tf.data_type(grad_) desc["Tindices"] = tf.data_type(indices_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(sparse_apply_proximal_adagrad, [var_, accum_, lr_, l1_, l2_, grad_, indices_], name=nothing, use_locking=nothing) + tf.add_node(res[1], node) + return res[1] end function sparse_apply_proximal_adagrad(var_, accum_, lr_, l1_, l2_, grad_, indices_; name=nothing, use_locking=nothing) if tf.eager_mode @@ -35523,7 +37761,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function range_dataset_graph(start_, stop_, step_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function range_dataset_graph(start_, stop_, step_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "RangeDataset") do desc = tf.NodeDescription("RangeDataset") @@ -35553,7 +37791,10 @@ begin if output_shapes !== nothing desc["output_shapes"] = map(Base.identity, output_shapes) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(range_dataset, [start_, stop_, step_], name=nothing, output_types=nothing, output_shapes=nothing) + tf.add_node(res[1], node) + return res[1] end function range_dataset(start_, stop_, step_; name=nothing, output_types=nothing, output_shapes=nothing) if tf.eager_mode @@ -35571,7 +37812,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function reader_restore_state_v2_graph(reader_handle_, state_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function reader_restore_state_v2_graph(reader_handle_, state_; name=nothing) local desc tf.with_op_name(name, "ReaderRestoreStateV2") do desc = tf.NodeDescription("ReaderRestoreStateV2") @@ -35586,7 +37827,10 @@ begin desc = tf.EagerOp("ReaderRestoreStateV2") tf.add_input(desc, reader_handle_) tf.add_input(desc, state_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(reader_restore_state_v2, [reader_handle_, state_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function reader_restore_state_v2(reader_handle_, state_; name=nothing) if tf.eager_mode @@ -35604,7 +37848,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function top_kv2_graph(input_, k_; name=nothing, sorted=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function top_kv2_graph(input_, k_; name=nothing, sorted=nothing) local desc tf.with_op_name(name, "TopKV2") do desc = tf.NodeDescription("TopKV2") @@ -35632,7 +37876,10 @@ begin desc["sorted"] = Base.Bool(sorted) end desc["T"] = tf.data_type(input_) - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(top_kv2, [input_, k_], name=nothing, sorted=nothing) + tf.add_node(res[1], node) + return res end function top_kv2(input_, k_; name=nothing, sorted=nothing) if tf.eager_mode @@ -35650,7 +37897,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function atanh_graph(x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function atanh_graph(x_; name=nothing) local desc tf.with_op_name(name, "Atanh") do desc = tf.NodeDescription("Atanh") @@ -35664,7 +37911,10 @@ begin desc = tf.EagerOp("Atanh") tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(atanh, [x_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function atanh(x_; name=nothing) if tf.eager_mode @@ -35682,7 +37932,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function debug_gradient_identity_graph(input_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function debug_gradient_identity_graph(input_; name=nothing) local desc tf.with_op_name(name, "DebugGradientIdentity") do desc = tf.NodeDescription("DebugGradientIdentity") @@ -35696,7 +37946,10 @@ begin desc = tf.EagerOp("DebugGradientIdentity") tf.add_input(desc, input_) desc["T"] = tf.data_type(input_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(debug_gradient_identity, [input_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function debug_gradient_identity(input_; name=nothing) if tf.eager_mode @@ -35714,7 +37967,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function sparse_add_grad_graph(backprop_val_grad_, a_indices_, b_indices_, sum_indices_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_add_grad_graph(backprop_val_grad_, a_indices_, b_indices_, sum_indices_; name=nothing) local desc tf.with_op_name(name, "SparseAddGrad") do desc = tf.NodeDescription("SparseAddGrad") @@ -35742,7 +37995,10 @@ begin tf.add_input(desc, b_indices_) tf.add_input(desc, sum_indices_) desc["T"] = tf.data_type(backprop_val_grad_) - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(sparse_add_grad, [backprop_val_grad_, a_indices_, b_indices_, sum_indices_], name=nothing) + tf.add_node(res[1], node) + return res end function sparse_add_grad(backprop_val_grad_, a_indices_, b_indices_, sum_indices_; name=nothing) if tf.eager_mode @@ -35760,7 +38016,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function resource_scatter_add_graph(resource_, indices_, updates_; name=nothing, dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_scatter_add_graph(resource_, indices_, updates_; name=nothing, dtype=nothing) local desc tf.with_op_name(name, "ResourceScatterAdd") do desc = tf.NodeDescription("ResourceScatterAdd") @@ -35789,7 +38045,10 @@ begin end desc["Tindices"] = tf.data_type(indices_) desc["dtype"] = tf.data_type(updates_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(resource_scatter_add, [resource_, indices_, updates_], name=nothing, dtype=nothing) + tf.add_node(res[1], node) + return res[1] end function resource_scatter_add(resource_, indices_, updates_; name=nothing, dtype=nothing) if tf.eager_mode @@ -35807,7 +38066,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function ceil_graph(x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ceil_graph(x_; name=nothing) local desc tf.with_op_name(name, "Ceil") do desc = tf.NodeDescription("Ceil") @@ -35821,7 +38080,10 @@ begin desc = tf.EagerOp("Ceil") tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(ceil, [x_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function ceil(x_; name=nothing) if tf.eager_mode @@ -35839,7 +38101,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function save_graph(filename_, tensor_names_, data_; name=nothing, T=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function save_graph(filename_, tensor_names_, data_; name=nothing, T=nothing) local desc tf.with_op_name(name, "Save") do desc = tf.NodeDescription("Save") @@ -35863,7 +38125,10 @@ begin if T !== nothing desc["T"] = map(Base.identity, T) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(save, [filename_, tensor_names_, data_], name=nothing, T=nothing) + tf.add_node(res[1], node) + return res[1] end function save(filename_, tensor_names_, data_; name=nothing, T=nothing) if tf.eager_mode @@ -35881,7 +38146,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function retrieve_tpu_embedding_centered_rms_prop_parameters_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function retrieve_tpu_embedding_centered_rms_prop_parameters_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "RetrieveTPUEmbeddingCenteredRMSPropParameters") do desc = tf.NodeDescription("RetrieveTPUEmbeddingCenteredRMSPropParameters") @@ -35919,7 +38184,10 @@ begin if shard_id !== nothing desc["shard_id"] = Base.Int(shard_id) end - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(retrieve_tpu_embedding_centered_rms_prop_parameters, [], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + tf.add_node(res[1], node) + return res end function retrieve_tpu_embedding_centered_rms_prop_parameters(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) if tf.eager_mode @@ -35937,7 +38205,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function quantized_concat_graph(concat_dim_, values_, input_mins_, input_maxes_; name=nothing, N=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantized_concat_graph(concat_dim_, values_, input_mins_, input_maxes_; name=nothing, N=nothing) local desc tf.with_op_name(name, "QuantizedConcat") do desc = tf.NodeDescription("QuantizedConcat") @@ -35971,7 +38239,10 @@ begin desc["N"] = Base.Int(N) end desc["T"] = tf.data_type(values_) - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(quantized_concat, [concat_dim_, values_, input_mins_, input_maxes_], name=nothing, N=nothing) + tf.add_node(res[1], node) + return res end function quantized_concat(concat_dim_, values_, input_mins_, input_maxes_; name=nothing, N=nothing) if tf.eager_mode @@ -35989,7 +38260,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function zeros_like_graph(x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function zeros_like_graph(x_; name=nothing) local desc tf.with_op_name(name, "ZerosLike") do desc = tf.NodeDescription("ZerosLike") @@ -36003,7 +38274,10 @@ begin desc = tf.EagerOp("ZerosLike") tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(zeros_like, [x_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function zeros_like(x_; name=nothing) if tf.eager_mode @@ -36021,7 +38295,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function fractional_avg_pool_graph(value_; name=nothing, pooling_ratio=nothing, pseudo_random=nothing, overlapping=nothing, deterministic=nothing, seed=nothing, seed2=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fractional_avg_pool_graph(value_; name=nothing, pooling_ratio=nothing, pseudo_random=nothing, overlapping=nothing, deterministic=nothing, seed=nothing, seed2=nothing) local desc tf.with_op_name(name, "FractionalAvgPool") do desc = tf.NodeDescription("FractionalAvgPool") @@ -36076,7 +38350,10 @@ begin desc["seed2"] = Base.Int(seed2) end desc["T"] = tf.data_type(value_) - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(fractional_avg_pool, [value_], name=nothing, pooling_ratio=nothing, pseudo_random=nothing, overlapping=nothing, deterministic=nothing, seed=nothing, seed2=nothing) + tf.add_node(res[1], node) + return res end function fractional_avg_pool(value_; name=nothing, pooling_ratio=nothing, pseudo_random=nothing, overlapping=nothing, deterministic=nothing, seed=nothing, seed2=nothing) if tf.eager_mode @@ -36094,7 +38371,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function edit_distance_graph(hypothesis_indices_, hypothesis_values_, hypothesis_shape_, truth_indices_, truth_values_, truth_shape_; name=nothing, normalize=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function edit_distance_graph(hypothesis_indices_, hypothesis_values_, hypothesis_shape_, truth_indices_, truth_values_, truth_shape_; name=nothing, normalize=nothing) local desc tf.with_op_name(name, "EditDistance") do desc = tf.NodeDescription("EditDistance") @@ -36130,7 +38407,10 @@ begin end desc["T"] = tf.data_type(hypothesis_values_) desc["T"] = tf.data_type(truth_values_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(edit_distance, [hypothesis_indices_, hypothesis_values_, hypothesis_shape_, truth_indices_, truth_values_, truth_shape_], name=nothing, normalize=nothing) + tf.add_node(res[1], node) + return res[1] end function edit_distance(hypothesis_indices_, hypothesis_values_, hypothesis_shape_, truth_indices_, truth_values_, truth_shape_; name=nothing, normalize=nothing) if tf.eager_mode @@ -36148,7 +38428,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function unique_v2_graph(x_, axis_; name=nothing, out_idx=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function unique_v2_graph(x_, axis_; name=nothing, out_idx=nothing) local desc tf.with_op_name(name, "UniqueV2") do desc = tf.NodeDescription("UniqueV2") @@ -36178,7 +38458,10 @@ begin end desc["T"] = tf.data_type(x_) desc["Taxis"] = tf.data_type(axis_) - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(unique_v2, [x_, axis_], name=nothing, out_idx=nothing) + tf.add_node(res[1], node) + return res end function unique_v2(x_, axis_; name=nothing, out_idx=nothing) if tf.eager_mode @@ -36196,7 +38479,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function quantize_and_dequantize_v2_graph(input_, input_min_, input_max_; name=nothing, signed_input=nothing, num_bits=nothing, range_given=nothing, round_mode=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantize_and_dequantize_v2_graph(input_, input_min_, input_max_; name=nothing, signed_input=nothing, num_bits=nothing, range_given=nothing, round_mode=nothing) local desc tf.with_op_name(name, "QuantizeAndDequantizeV2") do desc = tf.NodeDescription("QuantizeAndDequantizeV2") @@ -36242,7 +38525,10 @@ begin desc["T"] = tf.data_type(input_) desc["T"] = tf.data_type(input_min_) desc["T"] = tf.data_type(input_max_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(quantize_and_dequantize_v2, [input_, input_min_, input_max_], name=nothing, signed_input=nothing, num_bits=nothing, range_given=nothing, round_mode=nothing) + tf.add_node(res[1], node) + return res[1] end function quantize_and_dequantize_v2(input_, input_min_, input_max_; name=nothing, signed_input=nothing, num_bits=nothing, range_given=nothing, round_mode=nothing) if tf.eager_mode @@ -36260,7 +38546,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function quantize_and_dequantize_graph(input_; name=nothing, signed_input=nothing, num_bits=nothing, range_given=nothing, input_min=nothing, input_max=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantize_and_dequantize_graph(input_; name=nothing, signed_input=nothing, num_bits=nothing, range_given=nothing, input_min=nothing, input_max=nothing) local desc tf.with_op_name(name, "QuantizeAndDequantize") do desc = tf.NodeDescription("QuantizeAndDequantize") @@ -36304,7 +38590,10 @@ begin desc["input_max"] = Base.identity(input_max) end desc["T"] = tf.data_type(input_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(quantize_and_dequantize, [input_], name=nothing, signed_input=nothing, num_bits=nothing, range_given=nothing, input_min=nothing, input_max=nothing) + tf.add_node(res[1], node) + return res[1] end function quantize_and_dequantize(input_; name=nothing, signed_input=nothing, num_bits=nothing, range_given=nothing, input_min=nothing, input_max=nothing) if tf.eager_mode @@ -36322,7 +38611,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tensor_list_pop_back_graph(input_handle_, element_shape_; name=nothing, element_dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_list_pop_back_graph(input_handle_, element_shape_; name=nothing, element_dtype=nothing) local desc tf.with_op_name(name, "TensorListPopBack") do desc = tf.NodeDescription("TensorListPopBack") @@ -36348,7 +38637,10 @@ begin if element_dtype !== nothing desc["element_dtype"] = Base.identity(element_dtype) end - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(tensor_list_pop_back, [input_handle_, element_shape_], name=nothing, element_dtype=nothing) + tf.add_node(res[1], node) + return res end function tensor_list_pop_back(input_handle_, element_shape_; name=nothing, element_dtype=nothing) if tf.eager_mode @@ -36366,7 +38658,7 @@ end Debug NaN Value Counter Op """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function debug_nan_count_graph(input_; name=nothing, device_name=nothing, tensor_name=nothing, debug_urls=nothing, gated_grpc=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function debug_nan_count_graph(input_; name=nothing, device_name=nothing, tensor_name=nothing, debug_urls=nothing, gated_grpc=nothing) local desc tf.with_op_name(name, "DebugNanCount") do desc = tf.NodeDescription("DebugNanCount") @@ -36404,7 +38696,10 @@ begin desc["gated_grpc"] = Base.Bool(gated_grpc) end desc["T"] = tf.data_type(input_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(debug_nan_count, [input_], name=nothing, device_name=nothing, tensor_name=nothing, debug_urls=nothing, gated_grpc=nothing) + tf.add_node(res[1], node) + return res[1] end function debug_nan_count(input_; name=nothing, device_name=nothing, tensor_name=nothing, debug_urls=nothing, gated_grpc=nothing) if tf.eager_mode @@ -36422,7 +38717,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function apply_adagrad_da_graph(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, lr_, l1_, l2_, global_step_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function apply_adagrad_da_graph(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, lr_, l1_, l2_, global_step_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ApplyAdagradDA") do desc = tf.NodeDescription("ApplyAdagradDA") @@ -36469,7 +38764,10 @@ begin desc["T"] = tf.data_type(lr_) desc["T"] = tf.data_type(l1_) desc["T"] = tf.data_type(l2_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(apply_adagrad_da, [var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, lr_, l1_, l2_, global_step_], name=nothing, use_locking=nothing) + tf.add_node(res[1], node) + return res[1] end function apply_adagrad_da(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, lr_, l1_, l2_, global_step_; name=nothing, use_locking=nothing) if tf.eager_mode @@ -36487,7 +38785,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function depthwise_conv2d_native_graph(input_, filter_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function depthwise_conv2d_native_graph(input_, filter_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) local desc tf.with_op_name(name, "DepthwiseConv2dNative") do desc = tf.NodeDescription("DepthwiseConv2dNative") @@ -36529,7 +38827,10 @@ begin end desc["T"] = tf.data_type(input_) desc["T"] = tf.data_type(filter_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(depthwise_conv2d_native, [input_, filter_], name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) + tf.add_node(res[1], node) + return res[1] end function depthwise_conv2d_native(input_, filter_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) if tf.eager_mode @@ -36547,7 +38848,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function serialize_iterator_graph(resource_handle_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function serialize_iterator_graph(resource_handle_; name=nothing) local desc tf.with_op_name(name, "SerializeIterator") do desc = tf.NodeDescription("SerializeIterator") @@ -36559,7 +38860,10 @@ begin function serialize_iterator_eager(resource_handle_; name=nothing) desc = tf.EagerOp("SerializeIterator") tf.add_input(desc, resource_handle_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(serialize_iterator, [resource_handle_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function serialize_iterator(resource_handle_; name=nothing) if tf.eager_mode @@ -36577,7 +38881,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function dataset_to_graph_graph(input_dataset_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function dataset_to_graph_graph(input_dataset_; name=nothing) local desc tf.with_op_name(name, "DatasetToGraph") do desc = tf.NodeDescription("DatasetToGraph") @@ -36589,7 +38893,10 @@ begin function dataset_to_graph_eager(input_dataset_; name=nothing) desc = tf.EagerOp("DatasetToGraph") tf.add_input(desc, input_dataset_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(dataset_to_graph, [input_dataset_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function dataset_to_graph(input_dataset_; name=nothing) if tf.eager_mode @@ -36607,7 +38914,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function top_k_graph(input_; name=nothing, k=nothing, sorted=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function top_k_graph(input_; name=nothing, k=nothing, sorted=nothing) local desc tf.with_op_name(name, "TopK") do desc = tf.NodeDescription("TopK") @@ -36638,7 +38945,10 @@ begin desc["sorted"] = Base.Bool(sorted) end desc["T"] = tf.data_type(input_) - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(top_k, [input_], name=nothing, k=nothing, sorted=nothing) + tf.add_node(res[1], node) + return res end function top_k(input_; name=nothing, k=nothing, sorted=nothing) if tf.eager_mode @@ -36656,7 +38966,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function resource_apply_ftrl_v2_graph(var_, accum_, linear_, grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_apply_ftrl_v2_graph(var_, accum_, linear_, grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ResourceApplyFtrlV2") do desc = tf.NodeDescription("ResourceApplyFtrlV2") @@ -36705,7 +39015,10 @@ begin desc["T"] = tf.data_type(l2_) desc["T"] = tf.data_type(l2_shrinkage_) desc["T"] = tf.data_type(lr_power_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(resource_apply_ftrl_v2, [var_, accum_, linear_, grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_], name=nothing, use_locking=nothing) + tf.add_node(res[1], node) + return res[1] end function resource_apply_ftrl_v2(var_, accum_, linear_, grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=nothing, use_locking=nothing) if tf.eager_mode @@ -36723,7 +39036,7 @@ end Replacement node for NcclBroadcast. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function _nccl_broadcast_recv_graph(shape_; name=nothing, num_devices=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _nccl_broadcast_recv_graph(shape_; name=nothing, num_devices=nothing, shared_name=nothing) local desc tf.with_op_name(name, "_NcclBroadcastRecv") do desc = tf.NodeDescription("_NcclBroadcastRecv") @@ -36747,7 +39060,10 @@ begin if shared_name !== nothing desc["shared_name"] = Base.String(shared_name) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(_nccl_broadcast_recv, [shape_], name=nothing, num_devices=nothing, shared_name=nothing) + tf.add_node(res[1], node) + return res[1] end function _nccl_broadcast_recv(shape_; name=nothing, num_devices=nothing, shared_name=nothing) if tf.eager_mode @@ -36765,7 +39081,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function queue_is_closed_graph(handle_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function queue_is_closed_graph(handle_; name=nothing) local desc tf.with_op_name(name, "QueueIsClosed") do desc = tf.NodeDescription("QueueIsClosed") @@ -36777,7 +39093,10 @@ begin function queue_is_closed_eager(handle_; name=nothing) desc = tf.EagerOp("QueueIsClosed") tf.add_input(desc, handle_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(queue_is_closed, [handle_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function queue_is_closed(handle_; name=nothing) if tf.eager_mode @@ -36795,7 +39114,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function shuffle_dataset_graph(input_dataset_, buffer_size_, seed_, seed2_; name=nothing, reshuffle_each_iteration=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function shuffle_dataset_graph(input_dataset_, buffer_size_, seed_, seed2_; name=nothing, reshuffle_each_iteration=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "ShuffleDataset") do desc = tf.NodeDescription("ShuffleDataset") @@ -36834,7 +39153,10 @@ begin if output_shapes !== nothing desc["output_shapes"] = map(Base.identity, output_shapes) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(shuffle_dataset, [input_dataset_, buffer_size_, seed_, seed2_], name=nothing, reshuffle_each_iteration=nothing, output_types=nothing, output_shapes=nothing) + tf.add_node(res[1], node) + return res[1] end function shuffle_dataset(input_dataset_, buffer_size_, seed_, seed2_; name=nothing, reshuffle_each_iteration=nothing, output_types=nothing, output_shapes=nothing) if tf.eager_mode @@ -36852,7 +39174,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function deserialize_sparse_graph(serialized_sparse_; name=nothing, dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function deserialize_sparse_graph(serialized_sparse_; name=nothing, dtype=nothing) local desc tf.with_op_name(name, "DeserializeSparse") do desc = tf.NodeDescription("DeserializeSparse") @@ -36877,7 +39199,10 @@ begin desc["dtype"] = Base.identity(dtype) end desc["Tserialized"] = tf.data_type(serialized_sparse_) - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(deserialize_sparse, [serialized_sparse_], name=nothing, dtype=nothing) + tf.add_node(res[1], node) + return res end function deserialize_sparse(serialized_sparse_; name=nothing, dtype=nothing) if tf.eager_mode @@ -36895,7 +39220,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function priority_queue_v2_graph(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function priority_queue_v2_graph(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "PriorityQueueV2") do desc = tf.NodeDescription("PriorityQueueV2") @@ -36934,7 +39259,10 @@ begin if shared_name !== nothing desc["shared_name"] = Base.String(shared_name) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(priority_queue_v2, [], name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) + tf.add_node(res[1], node) + return res[1] end function priority_queue_v2(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) if tf.eager_mode @@ -36952,7 +39280,7 @@ end A graph node which represents an argument to a function. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function _device_arg_graph(; name=nothing, index=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _device_arg_graph(; name=nothing, index=nothing) local desc tf.with_op_name(name, "_DeviceArg") do desc = tf.NodeDescription("_DeviceArg") @@ -36967,7 +39295,10 @@ begin if index !== nothing desc["index"] = Base.Int(index) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(_device_arg, [], name=nothing, index=nothing) + tf.add_node(res[1], node) + return res[1] end function _device_arg(; name=nothing, index=nothing) if tf.eager_mode @@ -36985,7 +39316,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function truncated_normal_graph(shape_; name=nothing, seed=nothing, seed2=nothing, dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function truncated_normal_graph(shape_; name=nothing, seed=nothing, seed2=nothing, dtype=nothing) local desc tf.with_op_name(name, "TruncatedNormal") do desc = tf.NodeDescription("TruncatedNormal") @@ -37017,7 +39348,10 @@ begin desc["dtype"] = Base.identity(dtype) end desc["T"] = tf.data_type(shape_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(truncated_normal, [shape_], name=nothing, seed=nothing, seed2=nothing, dtype=nothing) + tf.add_node(res[1], node) + return res[1] end function truncated_normal(shape_; name=nothing, seed=nothing, seed2=nothing, dtype=nothing) if tf.eager_mode @@ -37035,7 +39369,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tensor_forest_tree_predict_graph(tree_handle_, dense_features_; name=nothing, logits_dimension=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_forest_tree_predict_graph(tree_handle_, dense_features_; name=nothing, logits_dimension=nothing) local desc tf.with_op_name(name, "TensorForestTreePredict") do desc = tf.NodeDescription("TensorForestTreePredict") @@ -37056,7 +39390,10 @@ begin if logits_dimension !== nothing desc["logits_dimension"] = Base.Int(logits_dimension) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(tensor_forest_tree_predict, [tree_handle_, dense_features_], name=nothing, logits_dimension=nothing) + tf.add_node(res[1], node) + return res[1] end function tensor_forest_tree_predict(tree_handle_, dense_features_; name=nothing, logits_dimension=nothing) if tf.eager_mode @@ -37074,7 +39411,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function stack_v2_graph(max_size_; name=nothing, elem_type=nothing, stack_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function stack_v2_graph(max_size_; name=nothing, elem_type=nothing, stack_name=nothing) local desc tf.with_op_name(name, "StackV2") do desc = tf.NodeDescription("StackV2") @@ -37098,7 +39435,10 @@ begin if stack_name !== nothing desc["stack_name"] = Base.String(stack_name) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(stack_v2, [max_size_], name=nothing, elem_type=nothing, stack_name=nothing) + tf.add_node(res[1], node) + return res[1] end function stack_v2(max_size_; name=nothing, elem_type=nothing, stack_name=nothing) if tf.eager_mode @@ -37116,7 +39456,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function accumulator_num_accumulated_graph(handle_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function accumulator_num_accumulated_graph(handle_; name=nothing) local desc tf.with_op_name(name, "AccumulatorNumAccumulated") do desc = tf.NodeDescription("AccumulatorNumAccumulated") @@ -37128,7 +39468,10 @@ begin function accumulator_num_accumulated_eager(handle_; name=nothing) desc = tf.EagerOp("AccumulatorNumAccumulated") tf.add_input(desc, handle_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(accumulator_num_accumulated, [handle_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function accumulator_num_accumulated(handle_; name=nothing) if tf.eager_mode @@ -37146,7 +39489,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function reader_reset_v2_graph(reader_handle_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function reader_reset_v2_graph(reader_handle_; name=nothing) local desc tf.with_op_name(name, "ReaderResetV2") do desc = tf.NodeDescription("ReaderResetV2") @@ -37158,7 +39501,10 @@ begin function reader_reset_v2_eager(reader_handle_; name=nothing) desc = tf.EagerOp("ReaderResetV2") tf.add_input(desc, reader_handle_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(reader_reset_v2, [reader_handle_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function reader_reset_v2(reader_handle_; name=nothing) if tf.eager_mode @@ -37176,7 +39522,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function apply_add_sign_graph(var_, m_, lr_, alpha_, sign_decay_, beta_, grad_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function apply_add_sign_graph(var_, m_, lr_, alpha_, sign_decay_, beta_, grad_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ApplyAddSign") do desc = tf.NodeDescription("ApplyAddSign") @@ -37220,7 +39566,10 @@ begin desc["T"] = tf.data_type(sign_decay_) desc["T"] = tf.data_type(beta_) desc["T"] = tf.data_type(grad_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(apply_add_sign, [var_, m_, lr_, alpha_, sign_decay_, beta_, grad_], name=nothing, use_locking=nothing) + tf.add_node(res[1], node) + return res[1] end function apply_add_sign(var_, m_, lr_, alpha_, sign_decay_, beta_, grad_; name=nothing, use_locking=nothing) if tf.eager_mode @@ -37238,7 +39587,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function retrieve_tpu_embedding_rms_prop_parameters_grad_accum_debug_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function retrieve_tpu_embedding_rms_prop_parameters_grad_accum_debug_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "RetrieveTPUEmbeddingRMSPropParametersGradAccumDebug") do desc = tf.NodeDescription("RetrieveTPUEmbeddingRMSPropParametersGradAccumDebug") @@ -37276,7 +39625,10 @@ begin if shard_id !== nothing desc["shard_id"] = Base.Int(shard_id) end - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(retrieve_tpu_embedding_rms_prop_parameters_grad_accum_debug, [], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + tf.add_node(res[1], node) + return res end function retrieve_tpu_embedding_rms_prop_parameters_grad_accum_debug(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) if tf.eager_mode @@ -37294,7 +39646,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function rint_graph(x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function rint_graph(x_; name=nothing) local desc tf.with_op_name(name, "Rint") do desc = tf.NodeDescription("Rint") @@ -37308,7 +39660,10 @@ begin desc = tf.EagerOp("Rint") tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(rint, [x_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function rint(x_; name=nothing) if tf.eager_mode @@ -37326,7 +39681,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function retrieve_tpu_embedding_adadelta_parameters_grad_accum_debug_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function retrieve_tpu_embedding_adadelta_parameters_grad_accum_debug_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "RetrieveTPUEmbeddingAdadeltaParametersGradAccumDebug") do desc = tf.NodeDescription("RetrieveTPUEmbeddingAdadeltaParametersGradAccumDebug") @@ -37364,7 +39719,10 @@ begin if shard_id !== nothing desc["shard_id"] = Base.Int(shard_id) end - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(retrieve_tpu_embedding_adadelta_parameters_grad_accum_debug, [], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + tf.add_node(res[1], node) + return res end function retrieve_tpu_embedding_adadelta_parameters_grad_accum_debug(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) if tf.eager_mode @@ -37382,7 +39740,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function extract_glimpse_graph(input_, size_, offsets_; name=nothing, centered=nothing, normalized=nothing, uniform_noise=nothing, noise=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function extract_glimpse_graph(input_, size_, offsets_; name=nothing, centered=nothing, normalized=nothing, uniform_noise=nothing, noise=nothing) local desc tf.with_op_name(name, "ExtractGlimpse") do desc = tf.NodeDescription("ExtractGlimpse") @@ -37424,7 +39782,10 @@ begin if noise !== nothing desc["noise"] = Base.String(noise) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(extract_glimpse, [input_, size_, offsets_], name=nothing, centered=nothing, normalized=nothing, uniform_noise=nothing, noise=nothing) + tf.add_node(res[1], node) + return res[1] end function extract_glimpse(input_, size_, offsets_; name=nothing, centered=nothing, normalized=nothing, uniform_noise=nothing, noise=nothing) if tf.eager_mode @@ -37442,7 +39803,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function string_to_hash_bucket_strong_graph(input_; name=nothing, num_buckets=nothing, key=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function string_to_hash_bucket_strong_graph(input_; name=nothing, num_buckets=nothing, key=nothing) local desc tf.with_op_name(name, "StringToHashBucketStrong") do desc = tf.NodeDescription("StringToHashBucketStrong") @@ -37466,7 +39827,10 @@ begin if key !== nothing desc["key"] = map(Base.identity, key) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(string_to_hash_bucket_strong, [input_], name=nothing, num_buckets=nothing, key=nothing) + tf.add_node(res[1], node) + return res[1] end function string_to_hash_bucket_strong(input_; name=nothing, num_buckets=nothing, key=nothing) if tf.eager_mode @@ -37484,7 +39848,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function one_shot_iterator_graph(; name=nothing, dataset_factory=nothing, output_types=nothing, output_shapes=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function one_shot_iterator_graph(; name=nothing, dataset_factory=nothing, output_types=nothing, output_shapes=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "OneShotIterator") do desc = tf.NodeDescription("OneShotIterator") @@ -37523,7 +39887,10 @@ begin if shared_name !== nothing desc["shared_name"] = Base.String(shared_name) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(one_shot_iterator, [], name=nothing, dataset_factory=nothing, output_types=nothing, output_shapes=nothing, container=nothing, shared_name=nothing) + tf.add_node(res[1], node) + return res[1] end function one_shot_iterator(; name=nothing, dataset_factory=nothing, output_types=nothing, output_shapes=nothing, container=nothing, shared_name=nothing) if tf.eager_mode @@ -37541,7 +39908,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function resource_sparse_apply_momentum_graph(var_, accum_, lr_, grad_, indices_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_sparse_apply_momentum_graph(var_, accum_, lr_, grad_, indices_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) local desc tf.with_op_name(name, "ResourceSparseApplyMomentum") do desc = tf.NodeDescription("ResourceSparseApplyMomentum") @@ -37587,7 +39954,10 @@ begin desc["T"] = tf.data_type(grad_) desc["Tindices"] = tf.data_type(indices_) desc["T"] = tf.data_type(momentum_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(resource_sparse_apply_momentum, [var_, accum_, lr_, grad_, indices_, momentum_], name=nothing, use_locking=nothing, use_nesterov=nothing) + tf.add_node(res[1], node) + return res[1] end function resource_sparse_apply_momentum(var_, accum_, lr_, grad_, indices_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) if tf.eager_mode @@ -37605,7 +39975,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function save_slices_graph(filename_, tensor_names_, shapes_and_slices_, data_; name=nothing, T=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function save_slices_graph(filename_, tensor_names_, shapes_and_slices_, data_; name=nothing, T=nothing) local desc tf.with_op_name(name, "SaveSlices") do desc = tf.NodeDescription("SaveSlices") @@ -37632,7 +40002,10 @@ begin if T !== nothing desc["T"] = map(Base.identity, T) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(save_slices, [filename_, tensor_names_, shapes_and_slices_, data_], name=nothing, T=nothing) + tf.add_node(res[1], node) + return res[1] end function save_slices(filename_, tensor_names_, shapes_and_slices_, data_; name=nothing, T=nothing) if tf.eager_mode @@ -37650,7 +40023,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function experimental_dataset_cardinality_graph(input_dataset_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_dataset_cardinality_graph(input_dataset_; name=nothing) local desc tf.with_op_name(name, "ExperimentalDatasetCardinality") do desc = tf.NodeDescription("ExperimentalDatasetCardinality") @@ -37662,7 +40035,10 @@ begin function experimental_dataset_cardinality_eager(input_dataset_; name=nothing) desc = tf.EagerOp("ExperimentalDatasetCardinality") tf.add_input(desc, input_dataset_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(experimental_dataset_cardinality, [input_dataset_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function experimental_dataset_cardinality(input_dataset_; name=nothing) if tf.eager_mode @@ -37680,7 +40056,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function is_finite_graph(x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function is_finite_graph(x_; name=nothing) local desc tf.with_op_name(name, "IsFinite") do desc = tf.NodeDescription("IsFinite") @@ -37694,7 +40070,10 @@ begin desc = tf.EagerOp("IsFinite") tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(is_finite, [x_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function is_finite(x_; name=nothing) if tf.eager_mode @@ -37712,7 +40091,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function experimental_numa_map_and_batch_dataset_graph(input_dataset_, other_arguments_, batch_size_, num_parallel_calls_, drop_remainder_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, preserve_cardinality=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_numa_map_and_batch_dataset_graph(input_dataset_, other_arguments_, batch_size_, num_parallel_calls_, drop_remainder_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, preserve_cardinality=nothing) local desc tf.with_op_name(name, "ExperimentalNumaMapAndBatchDataset") do desc = tf.NodeDescription("ExperimentalNumaMapAndBatchDataset") @@ -37766,7 +40145,10 @@ begin if preserve_cardinality !== nothing desc["preserve_cardinality"] = Base.Bool(preserve_cardinality) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(experimental_numa_map_and_batch_dataset, [input_dataset_, other_arguments_, batch_size_, num_parallel_calls_, drop_remainder_], name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, preserve_cardinality=nothing) + tf.add_node(res[1], node) + return res[1] end function experimental_numa_map_and_batch_dataset(input_dataset_, other_arguments_, batch_size_, num_parallel_calls_, drop_remainder_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, preserve_cardinality=nothing) if tf.eager_mode @@ -37784,7 +40166,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function all_to_all_graph(input_, group_assignment_; name=nothing, concat_dimension=nothing, split_dimension=nothing, split_count=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function all_to_all_graph(input_, group_assignment_; name=nothing, concat_dimension=nothing, split_dimension=nothing, split_count=nothing) local desc tf.with_op_name(name, "AllToAll") do desc = tf.NodeDescription("AllToAll") @@ -37819,7 +40201,10 @@ begin desc["split_count"] = Base.Int(split_count) end desc["T"] = tf.data_type(input_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(all_to_all, [input_, group_assignment_], name=nothing, concat_dimension=nothing, split_dimension=nothing, split_count=nothing) + tf.add_node(res[1], node) + return res[1] end function all_to_all(input_, group_assignment_; name=nothing, concat_dimension=nothing, split_dimension=nothing, split_count=nothing) if tf.eager_mode @@ -37837,7 +40222,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function take_many_sparse_from_tensors_map_graph(sparse_handles_; name=nothing, dtype=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function take_many_sparse_from_tensors_map_graph(sparse_handles_; name=nothing, dtype=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "TakeManySparseFromTensorsMap") do desc = tf.NodeDescription("TakeManySparseFromTensorsMap") @@ -37872,7 +40257,10 @@ begin if shared_name !== nothing desc["shared_name"] = Base.String(shared_name) end - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(take_many_sparse_from_tensors_map, [sparse_handles_], name=nothing, dtype=nothing, container=nothing, shared_name=nothing) + tf.add_node(res[1], node) + return res end function take_many_sparse_from_tensors_map(sparse_handles_; name=nothing, dtype=nothing, container=nothing, shared_name=nothing) if tf.eager_mode @@ -37890,7 +40278,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function batch_matrix_diag_part_graph(input_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_matrix_diag_part_graph(input_; name=nothing) local desc tf.with_op_name(name, "BatchMatrixDiagPart") do desc = tf.NodeDescription("BatchMatrixDiagPart") @@ -37904,7 +40292,10 @@ begin desc = tf.EagerOp("BatchMatrixDiagPart") tf.add_input(desc, input_) desc["T"] = tf.data_type(input_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(batch_matrix_diag_part, [input_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function batch_matrix_diag_part(input_; name=nothing) if tf.eager_mode @@ -37922,7 +40313,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function fixed_length_record_dataset_graph(filenames_, header_bytes_, record_bytes_, footer_bytes_, buffer_size_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fixed_length_record_dataset_graph(filenames_, header_bytes_, record_bytes_, footer_bytes_, buffer_size_; name=nothing) local desc tf.with_op_name(name, "FixedLengthRecordDataset") do desc = tf.NodeDescription("FixedLengthRecordDataset") @@ -37946,7 +40337,10 @@ begin tf.add_input(desc, record_bytes_) tf.add_input(desc, footer_bytes_) tf.add_input(desc, buffer_size_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(fixed_length_record_dataset, [filenames_, header_bytes_, record_bytes_, footer_bytes_, buffer_size_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function fixed_length_record_dataset(filenames_, header_bytes_, record_bytes_, footer_bytes_, buffer_size_; name=nothing) if tf.eager_mode @@ -37964,7 +40358,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function stack_push_graph(handle_, elem_; name=nothing, swap_memory=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function stack_push_graph(handle_, elem_; name=nothing, swap_memory=nothing) local desc tf.with_op_name(name, "StackPush") do desc = tf.NodeDescription("StackPush") @@ -37987,7 +40381,10 @@ begin desc["swap_memory"] = Base.Bool(swap_memory) end desc["T"] = tf.data_type(elem_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(stack_push, [handle_, elem_], name=nothing, swap_memory=nothing) + tf.add_node(res[1], node) + return res[1] end function stack_push(handle_, elem_; name=nothing, swap_memory=nothing) if tf.eager_mode @@ -38005,7 +40402,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function placeholder_v2_graph(; name=nothing, dtype=nothing, shape=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function placeholder_v2_graph(; name=nothing, dtype=nothing, shape=nothing) local desc tf.with_op_name(name, "PlaceholderV2") do desc = tf.NodeDescription("PlaceholderV2") @@ -38026,7 +40423,10 @@ begin if shape !== nothing desc["shape"] = Base.identity(shape) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(placeholder_v2, [], name=nothing, dtype=nothing, shape=nothing) + tf.add_node(res[1], node) + return res[1] end function placeholder_v2(; name=nothing, dtype=nothing, shape=nothing) if tf.eager_mode @@ -38044,7 +40444,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function multi_device_iterator_init_graph(dataset_, multi_device_iterator_, max_buffer_size_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function multi_device_iterator_init_graph(dataset_, multi_device_iterator_, max_buffer_size_; name=nothing) local desc tf.with_op_name(name, "MultiDeviceIteratorInit") do desc = tf.NodeDescription("MultiDeviceIteratorInit") @@ -38062,7 +40462,10 @@ begin tf.add_input(desc, dataset_) tf.add_input(desc, multi_device_iterator_) tf.add_input(desc, max_buffer_size_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(multi_device_iterator_init, [dataset_, multi_device_iterator_, max_buffer_size_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function multi_device_iterator_init(dataset_, multi_device_iterator_, max_buffer_size_; name=nothing) if tf.eager_mode @@ -38080,7 +40483,7 @@ end Re-configures the GCS block cache with the new configuration values. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function gcs_configure_block_cache_graph(max_cache_size_, block_size_, max_staleness_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function gcs_configure_block_cache_graph(max_cache_size_, block_size_, max_staleness_; name=nothing) local desc tf.with_op_name(name, "GcsConfigureBlockCache") do desc = tf.NodeDescription("GcsConfigureBlockCache") @@ -38098,7 +40501,10 @@ begin tf.add_input(desc, max_cache_size_) tf.add_input(desc, block_size_) tf.add_input(desc, max_staleness_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(gcs_configure_block_cache, [max_cache_size_, block_size_, max_staleness_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function gcs_configure_block_cache(max_cache_size_, block_size_, max_staleness_; name=nothing) if tf.eager_mode @@ -38116,7 +40522,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function queue_dequeue_v2_graph(handle_; name=nothing, component_types=nothing, timeout_ms=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function queue_dequeue_v2_graph(handle_; name=nothing, component_types=nothing, timeout_ms=nothing) local desc tf.with_op_name(name, "QueueDequeueV2") do desc = tf.NodeDescription("QueueDequeueV2") @@ -38140,7 +40546,10 @@ begin if timeout_ms !== nothing desc["timeout_ms"] = Base.Int(timeout_ms) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(queue_dequeue_v2, [handle_], name=nothing, component_types=nothing, timeout_ms=nothing) + tf.add_node(res[1], node) + return res[1] end function queue_dequeue_v2(handle_; name=nothing, component_types=nothing, timeout_ms=nothing) if tf.eager_mode @@ -38158,7 +40567,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function retrieve_tpu_embedding_rms_prop_parameters_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function retrieve_tpu_embedding_rms_prop_parameters_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "RetrieveTPUEmbeddingRMSPropParameters") do desc = tf.NodeDescription("RetrieveTPUEmbeddingRMSPropParameters") @@ -38196,7 +40605,10 @@ begin if shard_id !== nothing desc["shard_id"] = Base.Int(shard_id) end - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(retrieve_tpu_embedding_rms_prop_parameters, [], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + tf.add_node(res[1], node) + return res end function retrieve_tpu_embedding_rms_prop_parameters(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) if tf.eager_mode @@ -38214,7 +40626,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function transpose_graph(x_, perm_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function transpose_graph(x_, perm_; name=nothing) local desc tf.with_op_name(name, "Transpose") do desc = tf.NodeDescription("Transpose") @@ -38233,7 +40645,10 @@ begin tf.add_input(desc, perm_) desc["T"] = tf.data_type(x_) desc["Tperm"] = tf.data_type(perm_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(transpose, [x_, perm_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function transpose(x_, perm_; name=nothing) if tf.eager_mode @@ -38251,7 +40666,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function ifft_graph(input_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ifft_graph(input_; name=nothing) local desc tf.with_op_name(name, "IFFT") do desc = tf.NodeDescription("IFFT") @@ -38265,7 +40680,10 @@ begin desc = tf.EagerOp("IFFT") tf.add_input(desc, input_) desc["Tcomplex"] = tf.data_type(input_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(ifft, [input_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function ifft(input_; name=nothing) if tf.eager_mode @@ -38283,7 +40701,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function sparse_segment_sum_with_num_segments_graph(data_, indices_, segment_ids_, num_segments_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_segment_sum_with_num_segments_graph(data_, indices_, segment_ids_, num_segments_; name=nothing) local desc tf.with_op_name(name, "SparseSegmentSumWithNumSegments") do desc = tf.NodeDescription("SparseSegmentSumWithNumSegments") @@ -38311,7 +40729,10 @@ begin desc["T"] = tf.data_type(data_) desc["Tidx"] = tf.data_type(indices_) desc["Tnumsegments"] = tf.data_type(num_segments_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(sparse_segment_sum_with_num_segments, [data_, indices_, segment_ids_, num_segments_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function sparse_segment_sum_with_num_segments(data_, indices_, segment_ids_, num_segments_; name=nothing) if tf.eager_mode @@ -38329,7 +40750,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function queue_is_closed_v2_graph(handle_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function queue_is_closed_v2_graph(handle_; name=nothing) local desc tf.with_op_name(name, "QueueIsClosedV2") do desc = tf.NodeDescription("QueueIsClosedV2") @@ -38341,7 +40762,10 @@ begin function queue_is_closed_v2_eager(handle_; name=nothing) desc = tf.EagerOp("QueueIsClosedV2") tf.add_input(desc, handle_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(queue_is_closed_v2, [handle_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function queue_is_closed_v2(handle_; name=nothing) if tf.eager_mode @@ -38359,7 +40783,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function parameterized_truncated_normal_graph(shape_, means_, stdevs_, minvals_, maxvals_; name=nothing, seed=nothing, seed2=nothing, dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function parameterized_truncated_normal_graph(shape_, means_, stdevs_, minvals_, maxvals_; name=nothing, seed=nothing, seed2=nothing, dtype=nothing) local desc tf.with_op_name(name, "ParameterizedTruncatedNormal") do desc = tf.NodeDescription("ParameterizedTruncatedNormal") @@ -38408,7 +40832,10 @@ begin desc["dtype"] = tf.data_type(stdevs_) desc["dtype"] = tf.data_type(minvals_) desc["dtype"] = tf.data_type(maxvals_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(parameterized_truncated_normal, [shape_, means_, stdevs_, minvals_, maxvals_], name=nothing, seed=nothing, seed2=nothing, dtype=nothing) + tf.add_node(res[1], node) + return res[1] end function parameterized_truncated_normal(shape_, means_, stdevs_, minvals_, maxvals_; name=nothing, seed=nothing, seed2=nothing, dtype=nothing) if tf.eager_mode @@ -38426,7 +40853,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function diag_part_graph(input_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function diag_part_graph(input_; name=nothing) local desc tf.with_op_name(name, "DiagPart") do desc = tf.NodeDescription("DiagPart") @@ -38440,7 +40867,10 @@ begin desc = tf.EagerOp("DiagPart") tf.add_input(desc, input_) desc["T"] = tf.data_type(input_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(diag_part, [input_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function diag_part(input_; name=nothing) if tf.eager_mode @@ -38458,7 +40888,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function kmeans_plus_plus_initialization_graph(points_, num_to_sample_, seed_, num_retries_per_sample_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function kmeans_plus_plus_initialization_graph(points_, num_to_sample_, seed_, num_retries_per_sample_; name=nothing) local desc tf.with_op_name(name, "KmeansPlusPlusInitialization") do desc = tf.NodeDescription("KmeansPlusPlusInitialization") @@ -38479,7 +40909,10 @@ begin tf.add_input(desc, num_to_sample_) tf.add_input(desc, seed_) tf.add_input(desc, num_retries_per_sample_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(kmeans_plus_plus_initialization, [points_, num_to_sample_, seed_, num_retries_per_sample_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function kmeans_plus_plus_initialization(points_, num_to_sample_, seed_, num_retries_per_sample_; name=nothing) if tf.eager_mode @@ -38497,7 +40930,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function regex_replace_graph(input_, pattern_, rewrite_; name=nothing, replace_global=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function regex_replace_graph(input_, pattern_, rewrite_; name=nothing, replace_global=nothing) local desc tf.with_op_name(name, "RegexReplace") do desc = tf.NodeDescription("RegexReplace") @@ -38521,7 +40954,10 @@ begin if replace_global !== nothing desc["replace_global"] = Base.Bool(replace_global) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(regex_replace, [input_, pattern_, rewrite_], name=nothing, replace_global=nothing) + tf.add_node(res[1], node) + return res[1] end function regex_replace(input_, pattern_, rewrite_; name=nothing, replace_global=nothing) if tf.eager_mode @@ -38539,7 +40975,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function sparse_tensor_dense_mat_mul_graph(a_indices_, a_values_, a_shape_, b_; name=nothing, adjoint_a=nothing, adjoint_b=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_tensor_dense_mat_mul_graph(a_indices_, a_values_, a_shape_, b_; name=nothing, adjoint_a=nothing, adjoint_b=nothing) local desc tf.with_op_name(name, "SparseTensorDenseMatMul") do desc = tf.NodeDescription("SparseTensorDenseMatMul") @@ -38578,7 +41014,10 @@ begin desc["Tindices"] = tf.data_type(a_indices_) desc["T"] = tf.data_type(a_values_) desc["T"] = tf.data_type(b_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(sparse_tensor_dense_mat_mul, [a_indices_, a_values_, a_shape_, b_], name=nothing, adjoint_a=nothing, adjoint_b=nothing) + tf.add_node(res[1], node) + return res[1] end function sparse_tensor_dense_mat_mul(a_indices_, a_values_, a_shape_, b_; name=nothing, adjoint_a=nothing, adjoint_b=nothing) if tf.eager_mode @@ -38596,7 +41035,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function map_defun_graph(arguments_, captured_inputs_; name=nothing, Targuments=nothing, Tcaptured=nothing, output_types=nothing, output_shapes=nothing, f=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function map_defun_graph(arguments_, captured_inputs_; name=nothing, Targuments=nothing, Tcaptured=nothing, output_types=nothing, output_shapes=nothing, f=nothing) local desc tf.with_op_name(name, "MapDefun") do desc = tf.NodeDescription("MapDefun") @@ -38641,7 +41080,10 @@ begin if f !== nothing desc["f"] = Base.identity(f) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(map_defun, [arguments_, captured_inputs_], name=nothing, Targuments=nothing, Tcaptured=nothing, output_types=nothing, output_shapes=nothing, f=nothing) + tf.add_node(res[1], node) + return res[1] end function map_defun(arguments_, captured_inputs_; name=nothing, Targuments=nothing, Tcaptured=nothing, output_types=nothing, output_shapes=nothing, f=nothing) if tf.eager_mode @@ -38659,7 +41101,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function thread_unsafe_unigram_candidate_sampler_graph(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function thread_unsafe_unigram_candidate_sampler_graph(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing) local desc tf.with_op_name(name, "ThreadUnsafeUnigramCandidateSampler") do desc = tf.NodeDescription("ThreadUnsafeUnigramCandidateSampler") @@ -38712,7 +41154,10 @@ begin if seed2 !== nothing desc["seed2"] = Base.Int(seed2) end - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(thread_unsafe_unigram_candidate_sampler, [true_classes_], name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing) + tf.add_node(res[1], node) + return res end function thread_unsafe_unigram_candidate_sampler(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing) if tf.eager_mode @@ -38730,7 +41175,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function retrieve_tpu_embedding_adam_parameters_grad_accum_debug_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function retrieve_tpu_embedding_adam_parameters_grad_accum_debug_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "RetrieveTPUEmbeddingADAMParametersGradAccumDebug") do desc = tf.NodeDescription("RetrieveTPUEmbeddingADAMParametersGradAccumDebug") @@ -38768,7 +41213,10 @@ begin if shard_id !== nothing desc["shard_id"] = Base.Int(shard_id) end - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(retrieve_tpu_embedding_adam_parameters_grad_accum_debug, [], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + tf.add_node(res[1], node) + return res end function retrieve_tpu_embedding_adam_parameters_grad_accum_debug(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) if tf.eager_mode @@ -38786,7 +41234,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function parallel_concat_graph(values_; name=nothing, N=nothing, shape=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function parallel_concat_graph(values_; name=nothing, N=nothing, shape=nothing) local desc tf.with_op_name(name, "ParallelConcat") do desc = tf.NodeDescription("ParallelConcat") @@ -38812,7 +41260,10 @@ begin desc["shape"] = Base.identity(shape) end desc["T"] = tf.data_type(values_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(parallel_concat, [values_], name=nothing, N=nothing, shape=nothing) + tf.add_node(res[1], node) + return res[1] end function parallel_concat(values_; name=nothing, N=nothing, shape=nothing) if tf.eager_mode @@ -38830,7 +41281,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function lookup_table_find_v2_graph(table_handle_, keys_, default_value_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function lookup_table_find_v2_graph(table_handle_, keys_, default_value_; name=nothing) local desc tf.with_op_name(name, "LookupTableFindV2") do desc = tf.NodeDescription("LookupTableFindV2") @@ -38852,7 +41303,10 @@ begin tf.add_input(desc, default_value_) desc["Tin"] = tf.data_type(keys_) desc["Tout"] = tf.data_type(default_value_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(lookup_table_find_v2, [table_handle_, keys_, default_value_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function lookup_table_find_v2(table_handle_, keys_, default_value_; name=nothing) if tf.eager_mode @@ -38870,7 +41324,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tensor_forest_tree_deserialize_graph(tree_handle_, tree_config_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_forest_tree_deserialize_graph(tree_handle_, tree_config_; name=nothing) local desc tf.with_op_name(name, "TensorForestTreeDeserialize") do desc = tf.NodeDescription("TensorForestTreeDeserialize") @@ -38885,7 +41339,10 @@ begin desc = tf.EagerOp("TensorForestTreeDeserialize") tf.add_input(desc, tree_handle_) tf.add_input(desc, tree_config_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(tensor_forest_tree_deserialize, [tree_handle_, tree_config_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function tensor_forest_tree_deserialize(tree_handle_, tree_config_; name=nothing) if tf.eager_mode @@ -38903,7 +41360,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function retrieve_tpu_embedding_momentum_parameters_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function retrieve_tpu_embedding_momentum_parameters_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "RetrieveTPUEmbeddingMomentumParameters") do desc = tf.NodeDescription("RetrieveTPUEmbeddingMomentumParameters") @@ -38941,7 +41398,10 @@ begin if shard_id !== nothing desc["shard_id"] = Base.Int(shard_id) end - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(retrieve_tpu_embedding_momentum_parameters, [], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + tf.add_node(res[1], node) + return res end function retrieve_tpu_embedding_momentum_parameters(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) if tf.eager_mode @@ -38959,7 +41419,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function fake_quant_with_min_max_args_graph(inputs_; name=nothing, min=nothing, max=nothing, num_bits=nothing, narrow_range=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fake_quant_with_min_max_args_graph(inputs_; name=nothing, min=nothing, max=nothing, num_bits=nothing, narrow_range=nothing) local desc tf.with_op_name(name, "FakeQuantWithMinMaxArgs") do desc = tf.NodeDescription("FakeQuantWithMinMaxArgs") @@ -38995,7 +41455,10 @@ begin if narrow_range !== nothing desc["narrow_range"] = Base.Bool(narrow_range) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(fake_quant_with_min_max_args, [inputs_], name=nothing, min=nothing, max=nothing, num_bits=nothing, narrow_range=nothing) + tf.add_node(res[1], node) + return res[1] end function fake_quant_with_min_max_args(inputs_; name=nothing, min=nothing, max=nothing, num_bits=nothing, narrow_range=nothing) if tf.eager_mode @@ -39013,7 +41476,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function resource_apply_gradient_descent_graph(var_, alpha_, delta_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_apply_gradient_descent_graph(var_, alpha_, delta_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ResourceApplyGradientDescent") do desc = tf.NodeDescription("ResourceApplyGradientDescent") @@ -39040,7 +41503,10 @@ begin end desc["T"] = tf.data_type(alpha_) desc["T"] = tf.data_type(delta_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(resource_apply_gradient_descent, [var_, alpha_, delta_], name=nothing, use_locking=nothing) + tf.add_node(res[1], node) + return res[1] end function resource_apply_gradient_descent(var_, alpha_, delta_; name=nothing, use_locking=nothing) if tf.eager_mode @@ -39058,7 +41524,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function experimental_sliding_window_dataset_graph(input_dataset_, window_size_, window_shift_, window_stride_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_sliding_window_dataset_graph(input_dataset_, window_size_, window_shift_, window_stride_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "ExperimentalSlidingWindowDataset") do desc = tf.NodeDescription("ExperimentalSlidingWindowDataset") @@ -39091,7 +41557,10 @@ begin if output_shapes !== nothing desc["output_shapes"] = map(Base.identity, output_shapes) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(experimental_sliding_window_dataset, [input_dataset_, window_size_, window_shift_, window_stride_], name=nothing, output_types=nothing, output_shapes=nothing) + tf.add_node(res[1], node) + return res[1] end function experimental_sliding_window_dataset(input_dataset_, window_size_, window_shift_, window_stride_; name=nothing, output_types=nothing, output_shapes=nothing) if tf.eager_mode @@ -39109,7 +41578,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function decode_raw_graph(bytes_; name=nothing, out_type=nothing, little_endian=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function decode_raw_graph(bytes_; name=nothing, out_type=nothing, little_endian=nothing) local desc tf.with_op_name(name, "DecodeRaw") do desc = tf.NodeDescription("DecodeRaw") @@ -39133,7 +41602,10 @@ begin if little_endian !== nothing desc["little_endian"] = Base.Bool(little_endian) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(decode_raw, [bytes_], name=nothing, out_type=nothing, little_endian=nothing) + tf.add_node(res[1], node) + return res[1] end function decode_raw(bytes_; name=nothing, out_type=nothing, little_endian=nothing) if tf.eager_mode @@ -39151,7 +41623,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function fake_quant_with_min_max_vars_per_channel_gradient_graph(gradients_, inputs_, min_, max_; name=nothing, num_bits=nothing, narrow_range=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fake_quant_with_min_max_vars_per_channel_gradient_graph(gradients_, inputs_, min_, max_; name=nothing, num_bits=nothing, narrow_range=nothing) local desc tf.with_op_name(name, "FakeQuantWithMinMaxVarsPerChannelGradient") do desc = tf.NodeDescription("FakeQuantWithMinMaxVarsPerChannelGradient") @@ -39189,7 +41661,10 @@ begin if narrow_range !== nothing desc["narrow_range"] = Base.Bool(narrow_range) end - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(fake_quant_with_min_max_vars_per_channel_gradient, [gradients_, inputs_, min_, max_], name=nothing, num_bits=nothing, narrow_range=nothing) + tf.add_node(res[1], node) + return res end function fake_quant_with_min_max_vars_per_channel_gradient(gradients_, inputs_, min_, max_; name=nothing, num_bits=nothing, narrow_range=nothing) if tf.eager_mode @@ -39207,7 +41682,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function unique_with_counts_v2_graph(x_, axis_; name=nothing, out_idx=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function unique_with_counts_v2_graph(x_, axis_; name=nothing, out_idx=nothing) local desc tf.with_op_name(name, "UniqueWithCountsV2") do desc = tf.NodeDescription("UniqueWithCountsV2") @@ -39237,7 +41712,10 @@ begin end desc["T"] = tf.data_type(x_) desc["Taxis"] = tf.data_type(axis_) - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(unique_with_counts_v2, [x_, axis_], name=nothing, out_idx=nothing) + tf.add_node(res[1], node) + return res end function unique_with_counts_v2(x_, axis_; name=nothing, out_idx=nothing) if tf.eager_mode @@ -39255,7 +41733,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function experimental_sleep_dataset_graph(input_dataset_, sleep_microseconds_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_sleep_dataset_graph(input_dataset_, sleep_microseconds_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "ExperimentalSleepDataset") do desc = tf.NodeDescription("ExperimentalSleepDataset") @@ -39282,7 +41760,10 @@ begin if output_shapes !== nothing desc["output_shapes"] = map(Base.identity, output_shapes) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(experimental_sleep_dataset, [input_dataset_, sleep_microseconds_], name=nothing, output_types=nothing, output_shapes=nothing) + tf.add_node(res[1], node) + return res[1] end function experimental_sleep_dataset(input_dataset_, sleep_microseconds_; name=nothing, output_types=nothing, output_shapes=nothing) if tf.eager_mode @@ -39300,7 +41781,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tpu_replicated_output_graph(input_; name=nothing, num_replicas=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tpu_replicated_output_graph(input_; name=nothing, num_replicas=nothing) local desc tf.with_op_name(name, "TPUReplicatedOutput") do desc = tf.NodeDescription("TPUReplicatedOutput") @@ -39325,7 +41806,10 @@ begin desc["num_replicas"] = Base.Int(num_replicas) end desc["T"] = tf.data_type(input_) - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(tpu_replicated_output, [input_], name=nothing, num_replicas=nothing) + tf.add_node(res[1], node) + return res end function tpu_replicated_output(input_; name=nothing, num_replicas=nothing) if tf.eager_mode @@ -39343,7 +41827,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function lower_bound_graph(sorted_inputs_, values_; name=nothing, out_type=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function lower_bound_graph(sorted_inputs_, values_; name=nothing, out_type=nothing) local desc tf.with_op_name(name, "LowerBound") do desc = tf.NodeDescription("LowerBound") @@ -39367,7 +41851,10 @@ begin end desc["T"] = tf.data_type(sorted_inputs_) desc["T"] = tf.data_type(values_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(lower_bound, [sorted_inputs_, values_], name=nothing, out_type=nothing) + tf.add_node(res[1], node) + return res[1] end function lower_bound(sorted_inputs_, values_; name=nothing, out_type=nothing) if tf.eager_mode @@ -39385,7 +41872,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tan_graph(x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tan_graph(x_; name=nothing) local desc tf.with_op_name(name, "Tan") do desc = tf.NodeDescription("Tan") @@ -39399,7 +41886,10 @@ begin desc = tf.EagerOp("Tan") tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(tan, [x_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function tan(x_; name=nothing) if tf.eager_mode @@ -39417,7 +41907,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function enter_graph(data_; name=nothing, frame_name=nothing, is_constant=nothing, parallel_iterations=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function enter_graph(data_; name=nothing, frame_name=nothing, is_constant=nothing, parallel_iterations=nothing) local desc tf.with_op_name(name, "Enter") do desc = tf.NodeDescription("Enter") @@ -39449,7 +41939,10 @@ begin desc["parallel_iterations"] = Base.Int(parallel_iterations) end desc["T"] = tf.data_type(data_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(enter, [data_], name=nothing, frame_name=nothing, is_constant=nothing, parallel_iterations=nothing) + tf.add_node(res[1], node) + return res[1] end function enter(data_; name=nothing, frame_name=nothing, is_constant=nothing, parallel_iterations=nothing) if tf.eager_mode @@ -39467,7 +41960,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function infeed_enqueue_tuple_graph(inputs_; name=nothing, dtypes=nothing, shapes=nothing, layouts=nothing, device_ordinal=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function infeed_enqueue_tuple_graph(inputs_; name=nothing, dtypes=nothing, shapes=nothing, layouts=nothing, device_ordinal=nothing) local desc tf.with_op_name(name, "InfeedEnqueueTuple") do desc = tf.NodeDescription("InfeedEnqueueTuple") @@ -39503,7 +41996,10 @@ begin if device_ordinal !== nothing desc["device_ordinal"] = Base.Int(device_ordinal) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(infeed_enqueue_tuple, [inputs_], name=nothing, dtypes=nothing, shapes=nothing, layouts=nothing, device_ordinal=nothing) + tf.add_node(res[1], node) + return res[1] end function infeed_enqueue_tuple(inputs_; name=nothing, dtypes=nothing, shapes=nothing, layouts=nothing, device_ordinal=nothing) if tf.eager_mode @@ -39521,7 +42017,7 @@ end An op that informs a host of the global ids of all the of TPUs in the """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function _set_global_tpu_array_graph(topology_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _set_global_tpu_array_graph(topology_; name=nothing) local desc tf.with_op_name(name, "_SetGlobalTPUArray") do desc = tf.NodeDescription("_SetGlobalTPUArray") @@ -39533,7 +42029,10 @@ begin function _set_global_tpu_array_eager(topology_; name=nothing) desc = tf.EagerOp("_SetGlobalTPUArray") tf.add_input(desc, topology_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(_set_global_tpu_array, [topology_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function _set_global_tpu_array(topology_; name=nothing) if tf.eager_mode @@ -39551,7 +42050,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function square_graph(x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function square_graph(x_; name=nothing) local desc tf.with_op_name(name, "Square") do desc = tf.NodeDescription("Square") @@ -39565,7 +42064,10 @@ begin desc = tf.EagerOp("Square") tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(square, [x_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function square(x_; name=nothing) if tf.eager_mode @@ -39583,7 +42085,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function debug_gradient_ref_identity_graph(input_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function debug_gradient_ref_identity_graph(input_; name=nothing) local desc tf.with_op_name(name, "DebugGradientRefIdentity") do desc = tf.NodeDescription("DebugGradientRefIdentity") @@ -39597,7 +42099,10 @@ begin desc = tf.EagerOp("DebugGradientRefIdentity") tf.add_input(desc, input_) desc["T"] = tf.data_type(input_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(debug_gradient_ref_identity, [input_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function debug_gradient_ref_identity(input_; name=nothing) if tf.eager_mode @@ -39615,7 +42120,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function apply_adadelta_graph(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function apply_adadelta_graph(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ApplyAdadelta") do desc = tf.NodeDescription("ApplyAdadelta") @@ -39659,7 +42164,10 @@ begin desc["T"] = tf.data_type(rho_) desc["T"] = tf.data_type(epsilon_) desc["T"] = tf.data_type(grad_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(apply_adadelta, [var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_], name=nothing, use_locking=nothing) + tf.add_node(res[1], node) + return res[1] end function apply_adadelta(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_; name=nothing, use_locking=nothing) if tf.eager_mode @@ -39677,7 +42185,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function experimental_group_by_window_dataset_graph(input_dataset_, key_func_other_arguments_, reduce_func_other_arguments_, window_size_func_other_arguments_; name=nothing, key_func=nothing, reduce_func=nothing, window_size_func=nothing, Tkey_func_other_arguments=nothing, Treduce_func_other_arguments=nothing, Twindow_size_func_other_arguments=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_group_by_window_dataset_graph(input_dataset_, key_func_other_arguments_, reduce_func_other_arguments_, window_size_func_other_arguments_; name=nothing, key_func=nothing, reduce_func=nothing, window_size_func=nothing, Tkey_func_other_arguments=nothing, Treduce_func_other_arguments=nothing, Twindow_size_func_other_arguments=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "ExperimentalGroupByWindowDataset") do desc = tf.NodeDescription("ExperimentalGroupByWindowDataset") @@ -39746,7 +42254,10 @@ begin if output_shapes !== nothing desc["output_shapes"] = map(Base.identity, output_shapes) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(experimental_group_by_window_dataset, [input_dataset_, key_func_other_arguments_, reduce_func_other_arguments_, window_size_func_other_arguments_], name=nothing, key_func=nothing, reduce_func=nothing, window_size_func=nothing, Tkey_func_other_arguments=nothing, Treduce_func_other_arguments=nothing, Twindow_size_func_other_arguments=nothing, output_types=nothing, output_shapes=nothing) + tf.add_node(res[1], node) + return res[1] end function experimental_group_by_window_dataset(input_dataset_, key_func_other_arguments_, reduce_func_other_arguments_, window_size_func_other_arguments_; name=nothing, key_func=nothing, reduce_func=nothing, window_size_func=nothing, Tkey_func_other_arguments=nothing, Treduce_func_other_arguments=nothing, Twindow_size_func_other_arguments=nothing, output_types=nothing, output_shapes=nothing) if tf.eager_mode @@ -39764,7 +42275,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function audio_summary_graph(tag_, tensor_; name=nothing, sample_rate=nothing, max_outputs=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function audio_summary_graph(tag_, tensor_; name=nothing, sample_rate=nothing, max_outputs=nothing) local desc tf.with_op_name(name, "AudioSummary") do desc = tf.NodeDescription("AudioSummary") @@ -39791,7 +42302,10 @@ begin if max_outputs !== nothing desc["max_outputs"] = Base.Int(max_outputs) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(audio_summary, [tag_, tensor_], name=nothing, sample_rate=nothing, max_outputs=nothing) + tf.add_node(res[1], node) + return res[1] end function audio_summary(tag_, tensor_; name=nothing, sample_rate=nothing, max_outputs=nothing) if tf.eager_mode @@ -39809,7 +42323,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function squared_difference_graph(x_, y_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function squared_difference_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "SquaredDifference") do desc = tf.NodeDescription("SquaredDifference") @@ -39827,7 +42341,10 @@ begin tf.add_input(desc, y_) desc["T"] = tf.data_type(x_) desc["T"] = tf.data_type(y_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(squared_difference, [x_, y_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function squared_difference(x_, y_; name=nothing) if tf.eager_mode @@ -39845,7 +42362,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function experimental_take_while_dataset_graph(input_dataset_, other_arguments_; name=nothing, predicate=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_take_while_dataset_graph(input_dataset_, other_arguments_; name=nothing, predicate=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "ExperimentalTakeWhileDataset") do desc = tf.NodeDescription("ExperimentalTakeWhileDataset") @@ -39884,7 +42401,10 @@ begin if output_shapes !== nothing desc["output_shapes"] = map(Base.identity, output_shapes) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(experimental_take_while_dataset, [input_dataset_, other_arguments_], name=nothing, predicate=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) + tf.add_node(res[1], node) + return res[1] end function experimental_take_while_dataset(input_dataset_, other_arguments_; name=nothing, predicate=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) if tf.eager_mode @@ -39902,7 +42422,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function scatter_nd_update_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function scatter_nd_update_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ScatterNdUpdate") do desc = tf.NodeDescription("ScatterNdUpdate") @@ -39932,7 +42452,10 @@ begin desc["T"] = tf.data_type(ref_) desc["Tindices"] = tf.data_type(indices_) desc["T"] = tf.data_type(updates_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(scatter_nd_update, [ref_, indices_, updates_], name=nothing, use_locking=nothing) + tf.add_node(res[1], node) + return res[1] end function scatter_nd_update(ref_, indices_, updates_; name=nothing, use_locking=nothing) if tf.eager_mode @@ -39950,7 +42473,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function dynamic_stitch_graph(indices_, data_; name=nothing, N=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function dynamic_stitch_graph(indices_, data_; name=nothing, N=nothing) local desc tf.with_op_name(name, "DynamicStitch") do desc = tf.NodeDescription("DynamicStitch") @@ -39973,7 +42496,10 @@ begin desc["N"] = Base.Int(N) end desc["T"] = tf.data_type(data_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(dynamic_stitch, [indices_, data_], name=nothing, N=nothing) + tf.add_node(res[1], node) + return res[1] end function dynamic_stitch(indices_, data_; name=nothing, N=nothing) if tf.eager_mode @@ -39991,7 +42517,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function ones_like_graph(x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ones_like_graph(x_; name=nothing) local desc tf.with_op_name(name, "OnesLike") do desc = tf.NodeDescription("OnesLike") @@ -40005,7 +42531,10 @@ begin desc = tf.EagerOp("OnesLike") tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(ones_like, [x_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function ones_like(x_; name=nothing) if tf.eager_mode @@ -40023,7 +42552,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function fractional_max_pool_grad_graph(orig_input_, orig_output_, out_backprop_, row_pooling_sequence_, col_pooling_sequence_; name=nothing, overlapping=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fractional_max_pool_grad_graph(orig_input_, orig_output_, out_backprop_, row_pooling_sequence_, col_pooling_sequence_; name=nothing, overlapping=nothing) local desc tf.with_op_name(name, "FractionalMaxPoolGrad") do desc = tf.NodeDescription("FractionalMaxPoolGrad") @@ -40057,7 +42586,10 @@ begin desc["T"] = tf.data_type(orig_input_) desc["T"] = tf.data_type(orig_output_) desc["T"] = tf.data_type(out_backprop_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(fractional_max_pool_grad, [orig_input_, orig_output_, out_backprop_, row_pooling_sequence_, col_pooling_sequence_], name=nothing, overlapping=nothing) + tf.add_node(res[1], node) + return res[1] end function fractional_max_pool_grad(orig_input_, orig_output_, out_backprop_, row_pooling_sequence_, col_pooling_sequence_; name=nothing, overlapping=nothing) if tf.eager_mode @@ -40075,7 +42607,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function remote_call_graph(target_, args_; name=nothing, Tin=nothing, Tout=nothing, f=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function remote_call_graph(target_, args_; name=nothing, Tin=nothing, Tout=nothing, f=nothing) local desc tf.with_op_name(name, "RemoteCall") do desc = tf.NodeDescription("RemoteCall") @@ -40108,7 +42640,10 @@ begin if f !== nothing desc["f"] = Base.identity(f) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(remote_call, [target_, args_], name=nothing, Tin=nothing, Tout=nothing, f=nothing) + tf.add_node(res[1], node) + return res[1] end function remote_call(target_, args_; name=nothing, Tin=nothing, Tout=nothing, f=nothing) if tf.eager_mode @@ -40126,7 +42661,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function gather_graph(params_, indices_; name=nothing, validate_indices=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function gather_graph(params_, indices_; name=nothing, validate_indices=nothing) local desc tf.with_op_name(name, "Gather") do desc = tf.NodeDescription("Gather") @@ -40152,7 +42687,10 @@ begin end desc["Tparams"] = tf.data_type(params_) desc["Tindices"] = tf.data_type(indices_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(gather, [params_, indices_], name=nothing, validate_indices=nothing) + tf.add_node(res[1], node) + return res[1] end function gather(params_, indices_; name=nothing, validate_indices=nothing) if tf.eager_mode @@ -40170,7 +42708,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function quantized_mat_mul_graph(a_, b_, min_a_, max_a_, min_b_, max_b_; name=nothing, transpose_a=nothing, transpose_b=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantized_mat_mul_graph(a_, b_, min_a_, max_a_, min_b_, max_b_; name=nothing, transpose_a=nothing, transpose_b=nothing) local desc tf.with_op_name(name, "QuantizedMatMul") do desc = tf.NodeDescription("QuantizedMatMul") @@ -40218,7 +42756,10 @@ begin end desc["T1"] = tf.data_type(a_) desc["T2"] = tf.data_type(b_) - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(quantized_mat_mul, [a_, b_, min_a_, max_a_, min_b_, max_b_], name=nothing, transpose_a=nothing, transpose_b=nothing) + tf.add_node(res[1], node) + return res end function quantized_mat_mul(a_, b_, min_a_, max_a_, min_b_, max_b_; name=nothing, transpose_a=nothing, transpose_b=nothing) if tf.eager_mode @@ -40236,7 +42777,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function unicode_decode_with_offsets_graph(input_; name=nothing, input_encoding=nothing, errors=nothing, replacement_char=nothing, replace_control_characters=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function unicode_decode_with_offsets_graph(input_; name=nothing, input_encoding=nothing, errors=nothing, replacement_char=nothing, replace_control_characters=nothing) local desc tf.with_op_name(name, "UnicodeDecodeWithOffsets") do desc = tf.NodeDescription("UnicodeDecodeWithOffsets") @@ -40277,7 +42818,10 @@ begin if replace_control_characters !== nothing desc["replace_control_characters"] = Base.Bool(replace_control_characters) end - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(unicode_decode_with_offsets, [input_], name=nothing, input_encoding=nothing, errors=nothing, replacement_char=nothing, replace_control_characters=nothing) + tf.add_node(res[1], node) + return res end function unicode_decode_with_offsets(input_; name=nothing, input_encoding=nothing, errors=nothing, replacement_char=nothing, replace_control_characters=nothing) if tf.eager_mode @@ -40295,7 +42839,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function enqueue_tpu_embedding_sparse_tensor_batch_graph(sample_indices_, embedding_indices_, aggregation_weights_, mode_override_; name=nothing, N=nothing, device_ordinal=nothing, combiners=nothing, table_ids=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function enqueue_tpu_embedding_sparse_tensor_batch_graph(sample_indices_, embedding_indices_, aggregation_weights_, mode_override_; name=nothing, N=nothing, device_ordinal=nothing, combiners=nothing, table_ids=nothing) local desc tf.with_op_name(name, "EnqueueTPUEmbeddingSparseTensorBatch") do desc = tf.NodeDescription("EnqueueTPUEmbeddingSparseTensorBatch") @@ -40340,7 +42884,10 @@ begin if table_ids !== nothing desc["table_ids"] = map(Base.identity, table_ids) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(enqueue_tpu_embedding_sparse_tensor_batch, [sample_indices_, embedding_indices_, aggregation_weights_, mode_override_], name=nothing, N=nothing, device_ordinal=nothing, combiners=nothing, table_ids=nothing) + tf.add_node(res[1], node) + return res[1] end function enqueue_tpu_embedding_sparse_tensor_batch(sample_indices_, embedding_indices_, aggregation_weights_, mode_override_; name=nothing, N=nothing, device_ordinal=nothing, combiners=nothing, table_ids=nothing) if tf.eager_mode @@ -40358,7 +42905,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function accumulator_apply_gradient_graph(handle_, local_step_, gradient_; name=nothing, dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function accumulator_apply_gradient_graph(handle_, local_step_, gradient_; name=nothing, dtype=nothing) local desc tf.with_op_name(name, "AccumulatorApplyGradient") do desc = tf.NodeDescription("AccumulatorApplyGradient") @@ -40384,7 +42931,10 @@ begin desc["dtype"] = Base.identity(dtype) end desc["dtype"] = tf.data_type(gradient_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(accumulator_apply_gradient, [handle_, local_step_, gradient_], name=nothing, dtype=nothing) + tf.add_node(res[1], node) + return res[1] end function accumulator_apply_gradient(handle_, local_step_, gradient_; name=nothing, dtype=nothing) if tf.eager_mode @@ -40402,7 +42952,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function write_summary_graph(writer_, step_, tensor_, tag_, summary_metadata_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function write_summary_graph(writer_, step_, tensor_, tag_, summary_metadata_; name=nothing) local desc tf.with_op_name(name, "WriteSummary") do desc = tf.NodeDescription("WriteSummary") @@ -40428,7 +42978,10 @@ begin tf.add_input(desc, tag_) tf.add_input(desc, summary_metadata_) desc["T"] = tf.data_type(tensor_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(write_summary, [writer_, step_, tensor_, tag_, summary_metadata_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function write_summary(writer_, step_, tensor_, tag_, summary_metadata_; name=nothing) if tf.eager_mode @@ -40446,7 +42999,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function quantized_conv2d_graph(input_, filter_, min_input_, max_input_, min_filter_, max_filter_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantized_conv2d_graph(input_, filter_, min_input_, max_input_, min_filter_, max_filter_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) local desc tf.with_op_name(name, "QuantizedConv2D") do desc = tf.NodeDescription("QuantizedConv2D") @@ -40506,7 +43059,10 @@ begin end desc["Tinput"] = tf.data_type(input_) desc["Tfilter"] = tf.data_type(filter_) - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(quantized_conv2d, [input_, filter_, min_input_, max_input_, min_filter_, max_filter_], name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) + tf.add_node(res[1], node) + return res end function quantized_conv2d(input_, filter_, min_input_, max_input_, min_filter_, max_filter_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) if tf.eager_mode @@ -40524,7 +43080,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function resource_apply_momentum_graph(var_, accum_, lr_, grad_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_apply_momentum_graph(var_, accum_, lr_, grad_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) local desc tf.with_op_name(name, "ResourceApplyMomentum") do desc = tf.NodeDescription("ResourceApplyMomentum") @@ -40564,7 +43120,10 @@ begin desc["T"] = tf.data_type(lr_) desc["T"] = tf.data_type(grad_) desc["T"] = tf.data_type(momentum_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(resource_apply_momentum, [var_, accum_, lr_, grad_, momentum_], name=nothing, use_locking=nothing, use_nesterov=nothing) + tf.add_node(res[1], node) + return res[1] end function resource_apply_momentum(var_, accum_, lr_, grad_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) if tf.eager_mode @@ -40582,7 +43141,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function log1p_graph(x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function log1p_graph(x_; name=nothing) local desc tf.with_op_name(name, "Log1p") do desc = tf.NodeDescription("Log1p") @@ -40596,7 +43155,10 @@ begin desc = tf.EagerOp("Log1p") tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(log1p, [x_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function log1p(x_; name=nothing) if tf.eager_mode @@ -40614,7 +43176,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function ordered_map_clear_graph(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ordered_map_clear_graph(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "OrderedMapClear") do desc = tf.NodeDescription("OrderedMapClear") @@ -40653,7 +43215,10 @@ begin if shared_name !== nothing desc["shared_name"] = Base.String(shared_name) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(ordered_map_clear, [], name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + tf.add_node(res[1], node) + return res[1] end function ordered_map_clear(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) if tf.eager_mode @@ -40671,7 +43236,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function resource_scatter_update_graph(resource_, indices_, updates_; name=nothing, dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_scatter_update_graph(resource_, indices_, updates_; name=nothing, dtype=nothing) local desc tf.with_op_name(name, "ResourceScatterUpdate") do desc = tf.NodeDescription("ResourceScatterUpdate") @@ -40700,7 +43265,10 @@ begin end desc["Tindices"] = tf.data_type(indices_) desc["dtype"] = tf.data_type(updates_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(resource_scatter_update, [resource_, indices_, updates_], name=nothing, dtype=nothing) + tf.add_node(res[1], node) + return res[1] end function resource_scatter_update(resource_, indices_, updates_; name=nothing, dtype=nothing) if tf.eager_mode @@ -40718,7 +43286,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function barrier_take_many_graph(handle_, num_elements_; name=nothing, component_types=nothing, allow_small_batch=nothing, wait_for_incomplete=nothing, timeout_ms=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function barrier_take_many_graph(handle_, num_elements_; name=nothing, component_types=nothing, allow_small_batch=nothing, wait_for_incomplete=nothing, timeout_ms=nothing) local desc tf.with_op_name(name, "BarrierTakeMany") do desc = tf.NodeDescription("BarrierTakeMany") @@ -40762,7 +43330,10 @@ begin if timeout_ms !== nothing desc["timeout_ms"] = Base.Int(timeout_ms) end - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(barrier_take_many, [handle_, num_elements_], name=nothing, component_types=nothing, allow_small_batch=nothing, wait_for_incomplete=nothing, timeout_ms=nothing) + tf.add_node(res[1], node) + return res end function barrier_take_many(handle_, num_elements_; name=nothing, component_types=nothing, allow_small_batch=nothing, wait_for_incomplete=nothing, timeout_ms=nothing) if tf.eager_mode @@ -40780,7 +43351,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function resource_apply_keras_momentum_graph(var_, accum_, lr_, grad_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_apply_keras_momentum_graph(var_, accum_, lr_, grad_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) local desc tf.with_op_name(name, "ResourceApplyKerasMomentum") do desc = tf.NodeDescription("ResourceApplyKerasMomentum") @@ -40820,7 +43391,10 @@ begin desc["T"] = tf.data_type(lr_) desc["T"] = tf.data_type(grad_) desc["T"] = tf.data_type(momentum_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(resource_apply_keras_momentum, [var_, accum_, lr_, grad_, momentum_], name=nothing, use_locking=nothing, use_nesterov=nothing) + tf.add_node(res[1], node) + return res[1] end function resource_apply_keras_momentum(var_, accum_, lr_, grad_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) if tf.eager_mode @@ -40838,7 +43412,7 @@ end Generates serialized partition messages suitable for batch reads. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function generate_big_query_reader_partitions_graph(; name=nothing, project_id=nothing, dataset_id=nothing, table_id=nothing, columns=nothing, timestamp_millis=nothing, num_partitions=nothing, test_end_point=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function generate_big_query_reader_partitions_graph(; name=nothing, project_id=nothing, dataset_id=nothing, table_id=nothing, columns=nothing, timestamp_millis=nothing, num_partitions=nothing, test_end_point=nothing) local desc tf.with_op_name(name, "GenerateBigQueryReaderPartitions") do desc = tf.NodeDescription("GenerateBigQueryReaderPartitions") @@ -40889,7 +43463,10 @@ begin if test_end_point !== nothing desc["test_end_point"] = Base.String(test_end_point) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(generate_big_query_reader_partitions, [], name=nothing, project_id=nothing, dataset_id=nothing, table_id=nothing, columns=nothing, timestamp_millis=nothing, num_partitions=nothing, test_end_point=nothing) + tf.add_node(res[1], node) + return res[1] end function generate_big_query_reader_partitions(; name=nothing, project_id=nothing, dataset_id=nothing, table_id=nothing, columns=nothing, timestamp_millis=nothing, num_partitions=nothing, test_end_point=nothing) if tf.eager_mode @@ -40907,7 +43484,7 @@ end A placeholder op for multiple values that will be sent to TensorFlow from a """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function _xla_recv_at_host_graph(dynamic_key_; name=nothing, Toutputs=nothing, key=nothing, device_ordinal=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _xla_recv_at_host_graph(dynamic_key_; name=nothing, Toutputs=nothing, key=nothing, device_ordinal=nothing) local desc tf.with_op_name(name, "_XlaRecvAtHost") do desc = tf.NodeDescription("_XlaRecvAtHost") @@ -40937,7 +43514,10 @@ begin if device_ordinal !== nothing desc["device_ordinal"] = Base.Int(device_ordinal) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(_xla_recv_at_host, [dynamic_key_], name=nothing, Toutputs=nothing, key=nothing, device_ordinal=nothing) + tf.add_node(res[1], node) + return res[1] end function _xla_recv_at_host(dynamic_key_; name=nothing, Toutputs=nothing, key=nothing, device_ordinal=nothing) if tf.eager_mode @@ -40955,7 +43535,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function quantized_avg_pool_graph(input_, min_input_, max_input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantized_avg_pool_graph(input_, min_input_, max_input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing) local desc tf.with_op_name(name, "QuantizedAvgPool") do desc = tf.NodeDescription("QuantizedAvgPool") @@ -40998,7 +43578,10 @@ begin desc["padding"] = Base.String(padding) end desc["T"] = tf.data_type(input_) - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(quantized_avg_pool, [input_, min_input_, max_input_], name=nothing, ksize=nothing, strides=nothing, padding=nothing) + tf.add_node(res[1], node) + return res end function quantized_avg_pool(input_, min_input_, max_input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing) if tf.eager_mode @@ -41016,7 +43599,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function resource_apply_adam_with_amsgrad_graph(var_, m_, v_, vhat_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_apply_adam_with_amsgrad_graph(var_, m_, v_, vhat_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ResourceApplyAdamWithAmsgrad") do desc = tf.NodeDescription("ResourceApplyAdamWithAmsgrad") @@ -41072,7 +43655,10 @@ begin desc["T"] = tf.data_type(beta2_) desc["T"] = tf.data_type(epsilon_) desc["T"] = tf.data_type(grad_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(resource_apply_adam_with_amsgrad, [var_, m_, v_, vhat_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_], name=nothing, use_locking=nothing) + tf.add_node(res[1], node) + return res[1] end function resource_apply_adam_with_amsgrad(var_, m_, v_, vhat_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing) if tf.eager_mode @@ -41090,7 +43676,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tensor_list_resize_graph(input_handle_, size_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_list_resize_graph(input_handle_, size_; name=nothing) local desc tf.with_op_name(name, "TensorListResize") do desc = tf.NodeDescription("TensorListResize") @@ -41105,7 +43691,10 @@ begin desc = tf.EagerOp("TensorListResize") tf.add_input(desc, input_handle_) tf.add_input(desc, size_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(tensor_list_resize, [input_handle_, size_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function tensor_list_resize(input_handle_, size_; name=nothing) if tf.eager_mode @@ -41123,7 +43712,7 @@ end Receives the named tensor from send_device on recv_device. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function _host_recv_graph(; name=nothing, tensor_type=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _host_recv_graph(; name=nothing, tensor_type=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing) local desc tf.with_op_name(name, "_HostRecv") do desc = tf.NodeDescription("_HostRecv") @@ -41168,7 +43757,10 @@ begin if client_terminated !== nothing desc["client_terminated"] = Base.Bool(client_terminated) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(_host_recv, [], name=nothing, tensor_type=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing) + tf.add_node(res[1], node) + return res[1] end function _host_recv(; name=nothing, tensor_type=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing) if tf.eager_mode @@ -41186,7 +43778,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function boosted_trees_center_bias_graph(tree_ensemble_handle_, mean_gradients_, mean_hessians_, l1_, l2_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function boosted_trees_center_bias_graph(tree_ensemble_handle_, mean_gradients_, mean_hessians_, l1_, l2_; name=nothing) local desc tf.with_op_name(name, "BoostedTreesCenterBias") do desc = tf.NodeDescription("BoostedTreesCenterBias") @@ -41210,7 +43802,10 @@ begin tf.add_input(desc, mean_hessians_) tf.add_input(desc, l1_) tf.add_input(desc, l2_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(boosted_trees_center_bias, [tree_ensemble_handle_, mean_gradients_, mean_hessians_, l1_, l2_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function boosted_trees_center_bias(tree_ensemble_handle_, mean_gradients_, mean_hessians_, l1_, l2_; name=nothing) if tf.eager_mode @@ -41228,7 +43823,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function lookup_table_size_v2_graph(table_handle_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function lookup_table_size_v2_graph(table_handle_; name=nothing) local desc tf.with_op_name(name, "LookupTableSizeV2") do desc = tf.NodeDescription("LookupTableSizeV2") @@ -41240,7 +43835,10 @@ begin function lookup_table_size_v2_eager(table_handle_; name=nothing) desc = tf.EagerOp("LookupTableSizeV2") tf.add_input(desc, table_handle_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(lookup_table_size_v2, [table_handle_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function lookup_table_size_v2(table_handle_; name=nothing) if tf.eager_mode @@ -41258,7 +43856,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function irfft_graph(input_, fft_length_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function irfft_graph(input_, fft_length_; name=nothing) local desc tf.with_op_name(name, "IRFFT") do desc = tf.NodeDescription("IRFFT") @@ -41273,7 +43871,10 @@ begin desc = tf.EagerOp("IRFFT") tf.add_input(desc, input_) tf.add_input(desc, fft_length_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(irfft, [input_, fft_length_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function irfft(input_, fft_length_; name=nothing) if tf.eager_mode @@ -41291,7 +43892,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function inplace_add_graph(x_, i_, v_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function inplace_add_graph(x_, i_, v_; name=nothing) local desc tf.with_op_name(name, "InplaceAdd") do desc = tf.NodeDescription("InplaceAdd") @@ -41312,7 +43913,10 @@ begin tf.add_input(desc, v_) desc["T"] = tf.data_type(x_) desc["T"] = tf.data_type(v_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(inplace_add, [x_, i_, v_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function inplace_add(x_, i_, v_; name=nothing) if tf.eager_mode @@ -41330,7 +43934,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function bias_add_graph(value_, bias_; name=nothing, data_format=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function bias_add_graph(value_, bias_; name=nothing, data_format=nothing) local desc tf.with_op_name(name, "BiasAdd") do desc = tf.NodeDescription("BiasAdd") @@ -41354,7 +43958,10 @@ begin end desc["T"] = tf.data_type(value_) desc["T"] = tf.data_type(bias_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(bias_add, [value_, bias_], name=nothing, data_format=nothing) + tf.add_node(res[1], node) + return res[1] end function bias_add(value_, bias_; name=nothing, data_format=nothing) if tf.eager_mode @@ -41372,7 +43979,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function load_tpu_embedding_adam_parameters_grad_accum_debug_graph(parameters_, momenta_, velocities_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function load_tpu_embedding_adam_parameters_grad_accum_debug_graph(parameters_, momenta_, velocities_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "LoadTPUEmbeddingADAMParametersGradAccumDebug") do desc = tf.NodeDescription("LoadTPUEmbeddingADAMParametersGradAccumDebug") @@ -41417,7 +44024,10 @@ begin if shard_id !== nothing desc["shard_id"] = Base.Int(shard_id) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(load_tpu_embedding_adam_parameters_grad_accum_debug, [parameters_, momenta_, velocities_, gradient_accumulators_], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + tf.add_node(res[1], node) + return res[1] end function load_tpu_embedding_adam_parameters_grad_accum_debug(parameters_, momenta_, velocities_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) if tf.eager_mode @@ -41435,7 +44045,7 @@ end An op that disconnects the TPUs on a host from a running distributed """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function _disconnect_host_from_distributed_tpu_system_graph(; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _disconnect_host_from_distributed_tpu_system_graph(; name=nothing) local desc tf.with_op_name(name, "_DisconnectHostFromDistributedTPUSystem") do desc @@ -41445,7 +44055,10 @@ begin end function _disconnect_host_from_distributed_tpu_system_eager(; name=nothing) desc = tf.EagerOp("_DisconnectHostFromDistributedTPUSystem") - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(_disconnect_host_from_distributed_tpu_system, [], name=nothing) + tf.add_node(res[1], node) + return res[1] end function _disconnect_host_from_distributed_tpu_system(; name=nothing) if tf.eager_mode @@ -41463,7 +44076,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function ragged_range_graph(starts_, limits_, deltas_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ragged_range_graph(starts_, limits_, deltas_; name=nothing) local desc tf.with_op_name(name, "RaggedRange") do desc = tf.NodeDescription("RaggedRange") @@ -41490,7 +44103,10 @@ begin desc["T"] = tf.data_type(starts_) desc["T"] = tf.data_type(limits_) desc["T"] = tf.data_type(deltas_) - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(ragged_range, [starts_, limits_, deltas_], name=nothing) + tf.add_node(res[1], node) + return res end function ragged_range(starts_, limits_, deltas_; name=nothing) if tf.eager_mode @@ -41508,7 +44124,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function window_dataset_graph(input_dataset_, size_, shift_, stride_, drop_remainder_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function window_dataset_graph(input_dataset_, size_, shift_, stride_, drop_remainder_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "WindowDataset") do desc = tf.NodeDescription("WindowDataset") @@ -41544,7 +44160,10 @@ begin if output_shapes !== nothing desc["output_shapes"] = map(Base.identity, output_shapes) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(window_dataset, [input_dataset_, size_, shift_, stride_, drop_remainder_], name=nothing, output_types=nothing, output_shapes=nothing) + tf.add_node(res[1], node) + return res[1] end function window_dataset(input_dataset_, size_, shift_, stride_, drop_remainder_; name=nothing, output_types=nothing, output_shapes=nothing) if tf.eager_mode @@ -41562,7 +44181,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function diag_graph(diagonal_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function diag_graph(diagonal_; name=nothing) local desc tf.with_op_name(name, "Diag") do desc = tf.NodeDescription("Diag") @@ -41576,7 +44195,10 @@ begin desc = tf.EagerOp("Diag") tf.add_input(desc, diagonal_) desc["T"] = tf.data_type(diagonal_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(diag, [diagonal_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function diag(diagonal_; name=nothing) if tf.eager_mode @@ -41594,7 +44216,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function infeed_dequeue_graph(; name=nothing, dtype=nothing, shape=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function infeed_dequeue_graph(; name=nothing, dtype=nothing, shape=nothing) local desc tf.with_op_name(name, "InfeedDequeue") do desc = tf.NodeDescription("InfeedDequeue") @@ -41615,7 +44237,10 @@ begin if shape !== nothing desc["shape"] = Base.identity(shape) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(infeed_dequeue, [], name=nothing, dtype=nothing, shape=nothing) + tf.add_node(res[1], node) + return res[1] end function infeed_dequeue(; name=nothing, dtype=nothing, shape=nothing) if tf.eager_mode @@ -41633,7 +44258,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function experimental_latency_stats_dataset_graph(input_dataset_, tag_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_latency_stats_dataset_graph(input_dataset_, tag_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "ExperimentalLatencyStatsDataset") do desc = tf.NodeDescription("ExperimentalLatencyStatsDataset") @@ -41660,7 +44285,10 @@ begin if output_shapes !== nothing desc["output_shapes"] = map(Base.identity, output_shapes) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(experimental_latency_stats_dataset, [input_dataset_, tag_], name=nothing, output_types=nothing, output_shapes=nothing) + tf.add_node(res[1], node) + return res[1] end function experimental_latency_stats_dataset(input_dataset_, tag_; name=nothing, output_types=nothing, output_shapes=nothing) if tf.eager_mode @@ -41678,7 +44306,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function add_sparse_to_tensors_map_graph(sparse_indices_, sparse_values_, sparse_shape_; name=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function add_sparse_to_tensors_map_graph(sparse_indices_, sparse_values_, sparse_shape_; name=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "AddSparseToTensorsMap") do desc = tf.NodeDescription("AddSparseToTensorsMap") @@ -41710,7 +44338,10 @@ begin desc["shared_name"] = Base.String(shared_name) end desc["T"] = tf.data_type(sparse_values_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(add_sparse_to_tensors_map, [sparse_indices_, sparse_values_, sparse_shape_], name=nothing, container=nothing, shared_name=nothing) + tf.add_node(res[1], node) + return res[1] end function add_sparse_to_tensors_map(sparse_indices_, sparse_values_, sparse_shape_; name=nothing, container=nothing, shared_name=nothing) if tf.eager_mode @@ -41728,7 +44359,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function ragged_gather_graph(params_nested_splits_, params_dense_values_, indices_; name=nothing, PARAMS_RAGGED_RANK=nothing, OUTPUT_RAGGED_RANK=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ragged_gather_graph(params_nested_splits_, params_dense_values_, indices_; name=nothing, PARAMS_RAGGED_RANK=nothing, OUTPUT_RAGGED_RANK=nothing) local desc tf.with_op_name(name, "RaggedGather") do desc = tf.NodeDescription("RaggedGather") @@ -41768,7 +44399,10 @@ begin end desc["Tvalues"] = tf.data_type(params_dense_values_) desc["Tindices"] = tf.data_type(indices_) - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(ragged_gather, [params_nested_splits_, params_dense_values_, indices_], name=nothing, PARAMS_RAGGED_RANK=nothing, OUTPUT_RAGGED_RANK=nothing) + tf.add_node(res[1], node) + return res end function ragged_gather(params_nested_splits_, params_dense_values_, indices_; name=nothing, PARAMS_RAGGED_RANK=nothing, OUTPUT_RAGGED_RANK=nothing) if tf.eager_mode @@ -41786,7 +44420,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function rgb_to_hsv_graph(images_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function rgb_to_hsv_graph(images_; name=nothing) local desc tf.with_op_name(name, "RGBToHSV") do desc = tf.NodeDescription("RGBToHSV") @@ -41800,7 +44434,10 @@ begin desc = tf.EagerOp("RGBToHSV") tf.add_input(desc, images_) desc["T"] = tf.data_type(images_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(rgb_to_hsv, [images_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function rgb_to_hsv(images_; name=nothing) if tf.eager_mode @@ -41818,7 +44455,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function multi_device_iterator_to_string_handle_graph(multi_device_iterator_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function multi_device_iterator_to_string_handle_graph(multi_device_iterator_; name=nothing) local desc tf.with_op_name(name, "MultiDeviceIteratorToStringHandle") do desc = tf.NodeDescription("MultiDeviceIteratorToStringHandle") @@ -41830,7 +44467,10 @@ begin function multi_device_iterator_to_string_handle_eager(multi_device_iterator_; name=nothing) desc = tf.EagerOp("MultiDeviceIteratorToStringHandle") tf.add_input(desc, multi_device_iterator_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(multi_device_iterator_to_string_handle, [multi_device_iterator_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function multi_device_iterator_to_string_handle(multi_device_iterator_; name=nothing) if tf.eager_mode @@ -41848,7 +44488,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function for__graph(start_, limit_, delta_, input_; name=nothing, T=nothing, body=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function for__graph(start_, limit_, delta_, input_; name=nothing, T=nothing, body=nothing) local desc tf.with_op_name(name, "For") do desc = tf.NodeDescription("For") @@ -41881,7 +44521,10 @@ begin if body !== nothing desc["body"] = Base.identity(body) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(for_, [start_, limit_, delta_, input_], name=nothing, T=nothing, body=nothing) + tf.add_node(res[1], node) + return res[1] end function for_(start_, limit_, delta_, input_; name=nothing, T=nothing, body=nothing) if tf.eager_mode @@ -41899,7 +44542,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function sparse_reduce_max_sparse_graph(input_indices_, input_values_, input_shape_, reduction_axes_; name=nothing, keep_dims=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_reduce_max_sparse_graph(input_indices_, input_values_, input_shape_, reduction_axes_; name=nothing, keep_dims=nothing) local desc tf.with_op_name(name, "SparseReduceMaxSparse") do desc = tf.NodeDescription("SparseReduceMaxSparse") @@ -41933,7 +44576,10 @@ begin desc["keep_dims"] = Base.Bool(keep_dims) end desc["T"] = tf.data_type(input_values_) - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(sparse_reduce_max_sparse, [input_indices_, input_values_, input_shape_, reduction_axes_], name=nothing, keep_dims=nothing) + tf.add_node(res[1], node) + return res end function sparse_reduce_max_sparse(input_indices_, input_values_, input_shape_, reduction_axes_; name=nothing, keep_dims=nothing) if tf.eager_mode @@ -41951,7 +44597,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function concat_offset_graph(concat_dim_, shape_; name=nothing, N=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function concat_offset_graph(concat_dim_, shape_; name=nothing, N=nothing) local desc tf.with_op_name(name, "ConcatOffset") do desc = tf.NodeDescription("ConcatOffset") @@ -41977,7 +44623,10 @@ begin if N !== nothing desc["N"] = Base.Int(N) end - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(concat_offset, [concat_dim_, shape_], name=nothing, N=nothing) + tf.add_node(res[1], node) + return res end function concat_offset(concat_dim_, shape_; name=nothing, N=nothing) if tf.eager_mode @@ -41995,7 +44644,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function stage_graph(values_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function stage_graph(values_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "Stage") do desc = tf.NodeDescription("Stage") @@ -42037,7 +44686,10 @@ begin if shared_name !== nothing desc["shared_name"] = Base.String(shared_name) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(stage, [values_], name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + tf.add_node(res[1], node) + return res[1] end function stage(values_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) if tf.eager_mode @@ -42055,7 +44707,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function switch_graph(data_, pred_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function switch_graph(data_, pred_; name=nothing) local desc tf.with_op_name(name, "Switch") do desc = tf.NodeDescription("Switch") @@ -42077,7 +44729,10 @@ begin tf.add_input(desc, data_) tf.add_input(desc, pred_) desc["T"] = tf.data_type(data_) - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(switch, [data_, pred_], name=nothing) + tf.add_node(res[1], node) + return res end function switch(data_, pred_; name=nothing) if tf.eager_mode @@ -42095,7 +44750,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function queue_dequeue_many_v2_graph(handle_, n_; name=nothing, component_types=nothing, timeout_ms=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function queue_dequeue_many_v2_graph(handle_, n_; name=nothing, component_types=nothing, timeout_ms=nothing) local desc tf.with_op_name(name, "QueueDequeueManyV2") do desc = tf.NodeDescription("QueueDequeueManyV2") @@ -42122,7 +44777,10 @@ begin if timeout_ms !== nothing desc["timeout_ms"] = Base.Int(timeout_ms) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(queue_dequeue_many_v2, [handle_, n_], name=nothing, component_types=nothing, timeout_ms=nothing) + tf.add_node(res[1], node) + return res[1] end function queue_dequeue_many_v2(handle_, n_; name=nothing, component_types=nothing, timeout_ms=nothing) if tf.eager_mode @@ -42140,7 +44798,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function segment_prod_graph(data_, segment_ids_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function segment_prod_graph(data_, segment_ids_; name=nothing) local desc tf.with_op_name(name, "SegmentProd") do desc = tf.NodeDescription("SegmentProd") @@ -42160,7 +44818,10 @@ begin tf.add_input(desc, segment_ids_) desc["T"] = tf.data_type(data_) desc["Tindices"] = tf.data_type(segment_ids_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(segment_prod, [data_, segment_ids_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function segment_prod(data_, segment_ids_; name=nothing) if tf.eager_mode @@ -42178,7 +44839,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function approximate_equal_graph(x_, y_; name=nothing, tolerance=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function approximate_equal_graph(x_, y_; name=nothing, tolerance=nothing) local desc tf.with_op_name(name, "ApproximateEqual") do desc = tf.NodeDescription("ApproximateEqual") @@ -42202,7 +44863,10 @@ begin end desc["T"] = tf.data_type(x_) desc["T"] = tf.data_type(y_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(approximate_equal, [x_, y_], name=nothing, tolerance=nothing) + tf.add_node(res[1], node) + return res[1] end function approximate_equal(x_, y_; name=nothing, tolerance=nothing) if tf.eager_mode @@ -42220,7 +44884,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function conv2d_graph(input_, filter_; name=nothing, strides=nothing, use_cudnn_on_gpu=nothing, padding=nothing, explicit_paddings=nothing, data_format=nothing, dilations=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function conv2d_graph(input_, filter_; name=nothing, strides=nothing, use_cudnn_on_gpu=nothing, padding=nothing, explicit_paddings=nothing, data_format=nothing, dilations=nothing) local desc tf.with_op_name(name, "Conv2D") do desc = tf.NodeDescription("Conv2D") @@ -42274,7 +44938,10 @@ begin end desc["T"] = tf.data_type(input_) desc["T"] = tf.data_type(filter_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(conv2d, [input_, filter_], name=nothing, strides=nothing, use_cudnn_on_gpu=nothing, padding=nothing, explicit_paddings=nothing, data_format=nothing, dilations=nothing) + tf.add_node(res[1], node) + return res[1] end function conv2d(input_, filter_; name=nothing, strides=nothing, use_cudnn_on_gpu=nothing, padding=nothing, explicit_paddings=nothing, data_format=nothing, dilations=nothing) if tf.eager_mode @@ -42292,7 +44959,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function cross_replica_sum_graph(input_, group_assignment_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function cross_replica_sum_graph(input_, group_assignment_; name=nothing) local desc tf.with_op_name(name, "CrossReplicaSum") do desc = tf.NodeDescription("CrossReplicaSum") @@ -42309,7 +44976,10 @@ begin tf.add_input(desc, input_) tf.add_input(desc, group_assignment_) desc["T"] = tf.data_type(input_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(cross_replica_sum, [input_, group_assignment_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function cross_replica_sum(input_, group_assignment_; name=nothing) if tf.eager_mode @@ -42327,7 +44997,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function sparse_mat_mul_graph(a_, b_; name=nothing, transpose_a=nothing, transpose_b=nothing, a_is_sparse=nothing, b_is_sparse=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_mat_mul_graph(a_, b_; name=nothing, transpose_a=nothing, transpose_b=nothing, a_is_sparse=nothing, b_is_sparse=nothing) local desc tf.with_op_name(name, "SparseMatMul") do desc = tf.NodeDescription("SparseMatMul") @@ -42370,7 +45040,10 @@ begin end desc["Ta"] = tf.data_type(a_) desc["Tb"] = tf.data_type(b_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(sparse_mat_mul, [a_, b_], name=nothing, transpose_a=nothing, transpose_b=nothing, a_is_sparse=nothing, b_is_sparse=nothing) + tf.add_node(res[1], node) + return res[1] end function sparse_mat_mul(a_, b_; name=nothing, transpose_a=nothing, transpose_b=nothing, a_is_sparse=nothing, b_is_sparse=nothing) if tf.eager_mode @@ -42388,7 +45061,7 @@ end Acts roughly like a SplitV Op that splits one tensor into multiple tensors """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function _scoped_allocator_split_graph(concat_, split_; name=nothing, sa_name=nothing, id=nothing, N=nothing, shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _scoped_allocator_split_graph(concat_, split_; name=nothing, sa_name=nothing, id=nothing, N=nothing, shapes=nothing) local desc tf.with_op_name(name, "_ScopedAllocatorSplit") do desc = tf.NodeDescription("_ScopedAllocatorSplit") @@ -42435,7 +45108,10 @@ begin end desc["T"] = tf.data_type(concat_) desc["T"] = tf.data_type(split_) - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(_scoped_allocator_split, [concat_, split_], name=nothing, sa_name=nothing, id=nothing, N=nothing, shapes=nothing) + tf.add_node(res[1], node) + return res end function _scoped_allocator_split(concat_, split_; name=nothing, sa_name=nothing, id=nothing, N=nothing, shapes=nothing) if tf.eager_mode @@ -42453,7 +45129,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function igammac_graph(a_, x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function igammac_graph(a_, x_; name=nothing) local desc tf.with_op_name(name, "Igammac") do desc = tf.NodeDescription("Igammac") @@ -42471,7 +45147,10 @@ begin tf.add_input(desc, x_) desc["T"] = tf.data_type(a_) desc["T"] = tf.data_type(x_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(igammac, [a_, x_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function igammac(a_, x_; name=nothing) if tf.eager_mode @@ -42489,7 +45168,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function batch_mat_mul_graph(x_, y_; name=nothing, adj_x=nothing, adj_y=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_mat_mul_graph(x_, y_; name=nothing, adj_x=nothing, adj_y=nothing) local desc tf.with_op_name(name, "BatchMatMul") do desc = tf.NodeDescription("BatchMatMul") @@ -42519,7 +45198,10 @@ begin end desc["T"] = tf.data_type(x_) desc["T"] = tf.data_type(y_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(batch_mat_mul, [x_, y_], name=nothing, adj_x=nothing, adj_y=nothing) + tf.add_node(res[1], node) + return res[1] end function batch_mat_mul(x_, y_; name=nothing, adj_x=nothing, adj_y=nothing) if tf.eager_mode @@ -42537,7 +45219,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function enqueue_tpu_embedding_sparse_batch_graph(sample_indices_, embedding_indices_, aggregation_weights_, mode_override_; name=nothing, N=nothing, device_ordinal=nothing, combiners=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function enqueue_tpu_embedding_sparse_batch_graph(sample_indices_, embedding_indices_, aggregation_weights_, mode_override_; name=nothing, N=nothing, device_ordinal=nothing, combiners=nothing) local desc tf.with_op_name(name, "EnqueueTPUEmbeddingSparseBatch") do desc = tf.NodeDescription("EnqueueTPUEmbeddingSparseBatch") @@ -42576,7 +45258,10 @@ begin if combiners !== nothing desc["combiners"] = map(Base.identity, combiners) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(enqueue_tpu_embedding_sparse_batch, [sample_indices_, embedding_indices_, aggregation_weights_, mode_override_], name=nothing, N=nothing, device_ordinal=nothing, combiners=nothing) + tf.add_node(res[1], node) + return res[1] end function enqueue_tpu_embedding_sparse_batch(sample_indices_, embedding_indices_, aggregation_weights_, mode_override_; name=nothing, N=nothing, device_ordinal=nothing, combiners=nothing) if tf.eager_mode @@ -42594,7 +45279,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function queue_close_v2_graph(handle_; name=nothing, cancel_pending_enqueues=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function queue_close_v2_graph(handle_; name=nothing, cancel_pending_enqueues=nothing) local desc tf.with_op_name(name, "QueueCloseV2") do desc = tf.NodeDescription("QueueCloseV2") @@ -42612,7 +45297,10 @@ begin if cancel_pending_enqueues !== nothing desc["cancel_pending_enqueues"] = Base.Bool(cancel_pending_enqueues) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(queue_close_v2, [handle_], name=nothing, cancel_pending_enqueues=nothing) + tf.add_node(res[1], node) + return res[1] end function queue_close_v2(handle_; name=nothing, cancel_pending_enqueues=nothing) if tf.eager_mode @@ -42630,7 +45318,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tensor_array_pack_graph(handle_, flow_in_; name=nothing, dtype=nothing, element_shape=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_pack_graph(handle_, flow_in_; name=nothing, dtype=nothing, element_shape=nothing) local desc tf.with_op_name(name, "TensorArrayPack") do desc = tf.NodeDescription("TensorArrayPack") @@ -42657,7 +45345,10 @@ begin if element_shape !== nothing desc["element_shape"] = Base.identity(element_shape) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(tensor_array_pack, [handle_, flow_in_], name=nothing, dtype=nothing, element_shape=nothing) + tf.add_node(res[1], node) + return res[1] end function tensor_array_pack(handle_, flow_in_; name=nothing, dtype=nothing, element_shape=nothing) if tf.eager_mode @@ -42675,7 +45366,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function reader_restore_state_graph(reader_handle_, state_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function reader_restore_state_graph(reader_handle_, state_; name=nothing) local desc tf.with_op_name(name, "ReaderRestoreState") do desc = tf.NodeDescription("ReaderRestoreState") @@ -42690,7 +45381,10 @@ begin desc = tf.EagerOp("ReaderRestoreState") tf.add_input(desc, reader_handle_) tf.add_input(desc, state_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(reader_restore_state, [reader_handle_, state_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function reader_restore_state(reader_handle_, state_; name=nothing) if tf.eager_mode @@ -42708,7 +45402,7 @@ end *NOTE*: Do not invoke this operator directly in Python. Grappler is """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function _fused_conv2d_graph(input_, filter_, args_; name=nothing, num_args=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing, use_cudnn_on_gpu=nothing, fused_ops=nothing, epsilon=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _fused_conv2d_graph(input_, filter_, args_; name=nothing, num_args=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing, use_cudnn_on_gpu=nothing, fused_ops=nothing, epsilon=nothing) local desc tf.with_op_name(name, "_FusedConv2D") do desc = tf.NodeDescription("_FusedConv2D") @@ -42778,7 +45472,10 @@ begin desc["T"] = tf.data_type(input_) desc["T"] = tf.data_type(filter_) desc["T"] = tf.data_type(args_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(_fused_conv2d, [input_, filter_, args_], name=nothing, num_args=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing, use_cudnn_on_gpu=nothing, fused_ops=nothing, epsilon=nothing) + tf.add_node(res[1], node) + return res[1] end function _fused_conv2d(input_, filter_, args_; name=nothing, num_args=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing, use_cudnn_on_gpu=nothing, fused_ops=nothing, epsilon=nothing) if tf.eager_mode @@ -42796,7 +45493,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function _read_variables_op_graph(resources_; name=nothing, N=nothing, dtypes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _read_variables_op_graph(resources_; name=nothing, N=nothing, dtypes=nothing) local desc tf.with_op_name(name, "_ReadVariablesOp") do desc = tf.NodeDescription("_ReadVariablesOp") @@ -42820,7 +45517,10 @@ begin if dtypes !== nothing desc["dtypes"] = map(Base.identity, dtypes) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(_read_variables_op, [resources_], name=nothing, N=nothing, dtypes=nothing) + tf.add_node(res[1], node) + return res[1] end function _read_variables_op(resources_; name=nothing, N=nothing, dtypes=nothing) if tf.eager_mode @@ -42838,7 +45538,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function mutable_hash_table_of_tensors_graph(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function mutable_hash_table_of_tensors_graph(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing) local desc tf.with_op_name(name, "MutableHashTableOfTensors") do desc = tf.NodeDescription("MutableHashTableOfTensors") @@ -42883,7 +45583,10 @@ begin if value_shape !== nothing desc["value_shape"] = Base.identity(value_shape) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(mutable_hash_table_of_tensors, [], name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing) + tf.add_node(res[1], node) + return res[1] end function mutable_hash_table_of_tensors(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing) if tf.eager_mode @@ -42901,7 +45604,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function read_file_graph(filename_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function read_file_graph(filename_; name=nothing) local desc tf.with_op_name(name, "ReadFile") do desc = tf.NodeDescription("ReadFile") @@ -42913,7 +45616,10 @@ begin function read_file_eager(filename_; name=nothing) desc = tf.EagerOp("ReadFile") tf.add_input(desc, filename_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(read_file, [filename_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function read_file(filename_; name=nothing) if tf.eager_mode @@ -42931,7 +45637,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function load_tpu_embedding_mdl_adagrad_light_parameters_graph(parameters_, accumulators_, weights_, benefits_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function load_tpu_embedding_mdl_adagrad_light_parameters_graph(parameters_, accumulators_, weights_, benefits_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "LoadTPUEmbeddingMDLAdagradLightParameters") do desc = tf.NodeDescription("LoadTPUEmbeddingMDLAdagradLightParameters") @@ -42976,7 +45682,10 @@ begin if shard_id !== nothing desc["shard_id"] = Base.Int(shard_id) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(load_tpu_embedding_mdl_adagrad_light_parameters, [parameters_, accumulators_, weights_, benefits_], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + tf.add_node(res[1], node) + return res[1] end function load_tpu_embedding_mdl_adagrad_light_parameters(parameters_, accumulators_, weights_, benefits_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) if tf.eager_mode @@ -42994,7 +45703,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function fractional_avg_pool_grad_graph(orig_input_tensor_shape_, out_backprop_, row_pooling_sequence_, col_pooling_sequence_; name=nothing, overlapping=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fractional_avg_pool_grad_graph(orig_input_tensor_shape_, out_backprop_, row_pooling_sequence_, col_pooling_sequence_; name=nothing, overlapping=nothing) local desc tf.with_op_name(name, "FractionalAvgPoolGrad") do desc = tf.NodeDescription("FractionalAvgPoolGrad") @@ -43023,7 +45732,10 @@ begin desc["overlapping"] = Base.Bool(overlapping) end desc["T"] = tf.data_type(out_backprop_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(fractional_avg_pool_grad, [orig_input_tensor_shape_, out_backprop_, row_pooling_sequence_, col_pooling_sequence_], name=nothing, overlapping=nothing) + tf.add_node(res[1], node) + return res[1] end function fractional_avg_pool_grad(orig_input_tensor_shape_, out_backprop_, row_pooling_sequence_, col_pooling_sequence_; name=nothing, overlapping=nothing) if tf.eager_mode @@ -43041,7 +45753,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function load_tpu_embedding_adagrad_parameters_grad_accum_debug_graph(parameters_, accumulators_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function load_tpu_embedding_adagrad_parameters_grad_accum_debug_graph(parameters_, accumulators_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "LoadTPUEmbeddingAdagradParametersGradAccumDebug") do desc = tf.NodeDescription("LoadTPUEmbeddingAdagradParametersGradAccumDebug") @@ -43083,7 +45795,10 @@ begin if shard_id !== nothing desc["shard_id"] = Base.Int(shard_id) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(load_tpu_embedding_adagrad_parameters_grad_accum_debug, [parameters_, accumulators_, gradient_accumulators_], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + tf.add_node(res[1], node) + return res[1] end function load_tpu_embedding_adagrad_parameters_grad_accum_debug(parameters_, accumulators_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) if tf.eager_mode @@ -43101,7 +45816,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function stateful_standard_normal_v2_graph(resource_, algorithm_, shape_; name=nothing, dtype=nothing, shape_dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function stateful_standard_normal_v2_graph(resource_, algorithm_, shape_; name=nothing, dtype=nothing, shape_dtype=nothing) local desc tf.with_op_name(name, "StatefulStandardNormalV2") do desc = tf.NodeDescription("StatefulStandardNormalV2") @@ -43133,7 +45848,10 @@ begin desc["shape_dtype"] = Base.identity(shape_dtype) end desc["shape_dtype"] = tf.data_type(shape_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(stateful_standard_normal_v2, [resource_, algorithm_, shape_], name=nothing, dtype=nothing, shape_dtype=nothing) + tf.add_node(res[1], node) + return res[1] end function stateful_standard_normal_v2(resource_, algorithm_, shape_; name=nothing, dtype=nothing, shape_dtype=nothing) if tf.eager_mode @@ -43151,7 +45869,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function bincount_graph(arr_, size_, weights_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function bincount_graph(arr_, size_, weights_; name=nothing) local desc tf.with_op_name(name, "Bincount") do desc = tf.NodeDescription("Bincount") @@ -43171,7 +45889,10 @@ begin tf.add_input(desc, size_) tf.add_input(desc, weights_) desc["T"] = tf.data_type(weights_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(bincount, [arr_, size_, weights_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function bincount(arr_, size_, weights_; name=nothing) if tf.eager_mode @@ -43189,7 +45910,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function inv_graph(x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function inv_graph(x_; name=nothing) local desc tf.with_op_name(name, "Inv") do desc = tf.NodeDescription("Inv") @@ -43203,7 +45924,10 @@ begin desc = tf.EagerOp("Inv") tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(inv, [x_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function inv(x_; name=nothing) if tf.eager_mode @@ -43221,7 +45945,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function apply_proximal_adagrad_graph(var_, accum_, lr_, l1_, l2_, grad_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function apply_proximal_adagrad_graph(var_, accum_, lr_, l1_, l2_, grad_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ApplyProximalAdagrad") do desc = tf.NodeDescription("ApplyProximalAdagrad") @@ -43261,7 +45985,10 @@ begin desc["T"] = tf.data_type(l1_) desc["T"] = tf.data_type(l2_) desc["T"] = tf.data_type(grad_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(apply_proximal_adagrad, [var_, accum_, lr_, l1_, l2_, grad_], name=nothing, use_locking=nothing) + tf.add_node(res[1], node) + return res[1] end function apply_proximal_adagrad(var_, accum_, lr_, l1_, l2_, grad_; name=nothing, use_locking=nothing) if tf.eager_mode @@ -43279,7 +46006,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function gather_v2_graph(params_, indices_, axis_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function gather_v2_graph(params_, indices_, axis_; name=nothing) local desc tf.with_op_name(name, "GatherV2") do desc = tf.NodeDescription("GatherV2") @@ -43304,7 +46031,10 @@ begin desc["Tparams"] = tf.data_type(params_) desc["Tindices"] = tf.data_type(indices_) desc["Taxis"] = tf.data_type(axis_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(gather_v2, [params_, indices_, axis_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function gather_v2(params_, indices_, axis_; name=nothing) if tf.eager_mode @@ -43322,7 +46052,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function write_file_graph(filename_, contents_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function write_file_graph(filename_, contents_; name=nothing) local desc tf.with_op_name(name, "WriteFile") do desc = tf.NodeDescription("WriteFile") @@ -43337,7 +46067,10 @@ begin desc = tf.EagerOp("WriteFile") tf.add_input(desc, filename_) tf.add_input(desc, contents_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(write_file, [filename_, contents_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function write_file(filename_, contents_; name=nothing) if tf.eager_mode @@ -43355,7 +46088,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function boosted_trees_get_ensemble_states_graph(tree_ensemble_handle_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function boosted_trees_get_ensemble_states_graph(tree_ensemble_handle_; name=nothing) local desc tf.with_op_name(name, "BoostedTreesGetEnsembleStates") do desc = tf.NodeDescription("BoostedTreesGetEnsembleStates") @@ -43372,7 +46105,10 @@ begin function boosted_trees_get_ensemble_states_eager(tree_ensemble_handle_; name=nothing) desc = tf.EagerOp("BoostedTreesGetEnsembleStates") tf.add_input(desc, tree_ensemble_handle_) - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(boosted_trees_get_ensemble_states, [tree_ensemble_handle_], name=nothing) + tf.add_node(res[1], node) + return res end function boosted_trees_get_ensemble_states(tree_ensemble_handle_; name=nothing) if tf.eager_mode @@ -43390,7 +46126,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function resource_gather_graph(resource_, indices_; name=nothing, validate_indices=nothing, dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_gather_graph(resource_, indices_; name=nothing, validate_indices=nothing, dtype=nothing) local desc tf.with_op_name(name, "ResourceGather") do desc = tf.NodeDescription("ResourceGather") @@ -43420,7 +46156,10 @@ begin desc["dtype"] = Base.identity(dtype) end desc["Tindices"] = tf.data_type(indices_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(resource_gather, [resource_, indices_], name=nothing, validate_indices=nothing, dtype=nothing) + tf.add_node(res[1], node) + return res[1] end function resource_gather(resource_, indices_; name=nothing, validate_indices=nothing, dtype=nothing) if tf.eager_mode @@ -43438,7 +46177,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function resource_apply_proximal_gradient_descent_graph(var_, alpha_, l1_, l2_, delta_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_apply_proximal_gradient_descent_graph(var_, alpha_, l1_, l2_, delta_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ResourceApplyProximalGradientDescent") do desc = tf.NodeDescription("ResourceApplyProximalGradientDescent") @@ -43473,7 +46212,10 @@ begin desc["T"] = tf.data_type(l1_) desc["T"] = tf.data_type(l2_) desc["T"] = tf.data_type(delta_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(resource_apply_proximal_gradient_descent, [var_, alpha_, l1_, l2_, delta_], name=nothing, use_locking=nothing) + tf.add_node(res[1], node) + return res[1] end function resource_apply_proximal_gradient_descent(var_, alpha_, l1_, l2_, delta_; name=nothing, use_locking=nothing) if tf.eager_mode @@ -43491,7 +46233,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function truncate_mod_graph(x_, y_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function truncate_mod_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "TruncateMod") do desc = tf.NodeDescription("TruncateMod") @@ -43509,7 +46251,10 @@ begin tf.add_input(desc, y_) desc["T"] = tf.data_type(x_) desc["T"] = tf.data_type(y_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(truncate_mod, [x_, y_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function truncate_mod(x_, y_; name=nothing) if tf.eager_mode @@ -43527,7 +46272,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function log_matrix_determinant_graph(input_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function log_matrix_determinant_graph(input_; name=nothing) local desc tf.with_op_name(name, "LogMatrixDeterminant") do desc = tf.NodeDescription("LogMatrixDeterminant") @@ -43546,7 +46291,10 @@ begin desc = tf.EagerOp("LogMatrixDeterminant") tf.add_input(desc, input_) desc["T"] = tf.data_type(input_) - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(log_matrix_determinant, [input_], name=nothing) + tf.add_node(res[1], node) + return res end function log_matrix_determinant(input_; name=nothing) if tf.eager_mode @@ -43564,7 +46312,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function irfft2d_graph(input_, fft_length_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function irfft2d_graph(input_, fft_length_; name=nothing) local desc tf.with_op_name(name, "IRFFT2D") do desc = tf.NodeDescription("IRFFT2D") @@ -43579,7 +46327,10 @@ begin desc = tf.EagerOp("IRFFT2D") tf.add_input(desc, input_) tf.add_input(desc, fft_length_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(irfft2d, [input_, fft_length_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function irfft2d(input_, fft_length_; name=nothing) if tf.eager_mode @@ -43597,7 +46348,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function boosted_trees_training_predict_graph(tree_ensemble_handle_, cached_tree_ids_, cached_node_ids_, bucketized_features_; name=nothing, num_bucketized_features=nothing, logits_dimension=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function boosted_trees_training_predict_graph(tree_ensemble_handle_, cached_tree_ids_, cached_node_ids_, bucketized_features_; name=nothing, num_bucketized_features=nothing, logits_dimension=nothing) local desc tf.with_op_name(name, "BoostedTreesTrainingPredict") do desc = tf.NodeDescription("BoostedTreesTrainingPredict") @@ -43635,7 +46386,10 @@ begin if logits_dimension !== nothing desc["logits_dimension"] = Base.Int(logits_dimension) end - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(boosted_trees_training_predict, [tree_ensemble_handle_, cached_tree_ids_, cached_node_ids_, bucketized_features_], name=nothing, num_bucketized_features=nothing, logits_dimension=nothing) + tf.add_node(res[1], node) + return res end function boosted_trees_training_predict(tree_ensemble_handle_, cached_tree_ids_, cached_node_ids_, bucketized_features_; name=nothing, num_bucketized_features=nothing, logits_dimension=nothing) if tf.eager_mode @@ -43653,7 +46407,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function nearest_neighbors_graph(points_, centers_, k_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function nearest_neighbors_graph(points_, centers_, k_; name=nothing) local desc tf.with_op_name(name, "NearestNeighbors") do desc = tf.NodeDescription("NearestNeighbors") @@ -43676,7 +46430,10 @@ begin tf.add_input(desc, points_) tf.add_input(desc, centers_) tf.add_input(desc, k_) - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(nearest_neighbors, [points_, centers_, k_], name=nothing) + tf.add_node(res[1], node) + return res end function nearest_neighbors(points_, centers_, k_; name=nothing) if tf.eager_mode @@ -43694,7 +46451,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function floor_graph(x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function floor_graph(x_; name=nothing) local desc tf.with_op_name(name, "Floor") do desc = tf.NodeDescription("Floor") @@ -43708,7 +46465,10 @@ begin desc = tf.EagerOp("Floor") tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(floor, [x_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function floor(x_; name=nothing) if tf.eager_mode @@ -43726,7 +46486,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function load_tpu_embedding_proximal_adagrad_parameters_grad_accum_debug_graph(parameters_, accumulators_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function load_tpu_embedding_proximal_adagrad_parameters_grad_accum_debug_graph(parameters_, accumulators_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "LoadTPUEmbeddingProximalAdagradParametersGradAccumDebug") do desc = tf.NodeDescription("LoadTPUEmbeddingProximalAdagradParametersGradAccumDebug") @@ -43768,7 +46528,10 @@ begin if shard_id !== nothing desc["shard_id"] = Base.Int(shard_id) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(load_tpu_embedding_proximal_adagrad_parameters_grad_accum_debug, [parameters_, accumulators_, gradient_accumulators_], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + tf.add_node(res[1], node) + return res[1] end function load_tpu_embedding_proximal_adagrad_parameters_grad_accum_debug(parameters_, accumulators_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) if tf.eager_mode @@ -43786,7 +46549,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function write_image_summary_graph(writer_, step_, tag_, tensor_, bad_color_; name=nothing, max_images=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function write_image_summary_graph(writer_, step_, tag_, tensor_, bad_color_; name=nothing, max_images=nothing) local desc tf.with_op_name(name, "WriteImageSummary") do desc = tf.NodeDescription("WriteImageSummary") @@ -43818,7 +46581,10 @@ begin desc["max_images"] = Base.Int(max_images) end desc["T"] = tf.data_type(tensor_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(write_image_summary, [writer_, step_, tag_, tensor_, bad_color_], name=nothing, max_images=nothing) + tf.add_node(res[1], node) + return res[1] end function write_image_summary(writer_, step_, tag_, tensor_, bad_color_; name=nothing, max_images=nothing) if tf.eager_mode @@ -43836,7 +46602,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tile_grad_graph(input_, multiples_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tile_grad_graph(input_, multiples_; name=nothing) local desc tf.with_op_name(name, "TileGrad") do desc = tf.NodeDescription("TileGrad") @@ -43853,7 +46619,10 @@ begin tf.add_input(desc, input_) tf.add_input(desc, multiples_) desc["T"] = tf.data_type(input_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(tile_grad, [input_, multiples_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function tile_grad(input_, multiples_; name=nothing) if tf.eager_mode @@ -43871,7 +46640,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tensor_array_grad_v3_graph(handle_, flow_in_; name=nothing, source=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_grad_v3_graph(handle_, flow_in_; name=nothing, source=nothing) local desc tf.with_op_name(name, "TensorArrayGradV3") do desc = tf.NodeDescription("TensorArrayGradV3") @@ -43897,7 +46666,10 @@ begin if source !== nothing desc["source"] = Base.String(source) end - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(tensor_array_grad_v3, [handle_, flow_in_], name=nothing, source=nothing) + tf.add_node(res[1], node) + return res end function tensor_array_grad_v3(handle_, flow_in_; name=nothing, source=nothing) if tf.eager_mode @@ -43915,7 +46687,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function enqueue_tpu_embedding_integer_batch_graph(batch_, mode_override_; name=nothing, N=nothing, device_ordinal=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function enqueue_tpu_embedding_integer_batch_graph(batch_, mode_override_; name=nothing, N=nothing, device_ordinal=nothing) local desc tf.with_op_name(name, "EnqueueTPUEmbeddingIntegerBatch") do desc = tf.NodeDescription("EnqueueTPUEmbeddingIntegerBatch") @@ -43942,7 +46714,10 @@ begin if device_ordinal !== nothing desc["device_ordinal"] = Base.Int(device_ordinal) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(enqueue_tpu_embedding_integer_batch, [batch_, mode_override_], name=nothing, N=nothing, device_ordinal=nothing) + tf.add_node(res[1], node) + return res[1] end function enqueue_tpu_embedding_integer_batch(batch_, mode_override_; name=nothing, N=nothing, device_ordinal=nothing) if tf.eager_mode @@ -43960,7 +46735,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function fused_batch_norm_graph(x_, scale_, offset_, mean_, variance_; name=nothing, epsilon=nothing, data_format=nothing, is_training=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fused_batch_norm_graph(x_, scale_, offset_, mean_, variance_; name=nothing, epsilon=nothing, data_format=nothing, is_training=nothing) local desc tf.with_op_name(name, "FusedBatchNorm") do desc = tf.NodeDescription("FusedBatchNorm") @@ -44013,7 +46788,10 @@ begin desc["T"] = tf.data_type(offset_) desc["T"] = tf.data_type(mean_) desc["T"] = tf.data_type(variance_) - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(fused_batch_norm, [x_, scale_, offset_, mean_, variance_], name=nothing, epsilon=nothing, data_format=nothing, is_training=nothing) + tf.add_node(res[1], node) + return res end function fused_batch_norm(x_, scale_, offset_, mean_, variance_; name=nothing, epsilon=nothing, data_format=nothing, is_training=nothing) if tf.eager_mode @@ -44031,7 +46809,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function logical_and_graph(x_, y_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function logical_and_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "LogicalAnd") do desc = tf.NodeDescription("LogicalAnd") @@ -44046,7 +46824,10 @@ begin desc = tf.EagerOp("LogicalAnd") tf.add_input(desc, x_) tf.add_input(desc, y_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(logical_and, [x_, y_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function logical_and(x_, y_; name=nothing) if tf.eager_mode @@ -44064,7 +46845,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tensor_scatter_update_graph(tensor_, indices_, updates_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_scatter_update_graph(tensor_, indices_, updates_; name=nothing) local desc tf.with_op_name(name, "TensorScatterUpdate") do desc = tf.NodeDescription("TensorScatterUpdate") @@ -44088,7 +46869,10 @@ begin desc["T"] = tf.data_type(tensor_) desc["Tindices"] = tf.data_type(indices_) desc["T"] = tf.data_type(updates_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(tensor_scatter_update, [tensor_, indices_, updates_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function tensor_scatter_update(tensor_, indices_, updates_; name=nothing) if tf.eager_mode @@ -44106,7 +46890,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function text_line_reader_v2_graph(; name=nothing, skip_header_lines=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function text_line_reader_v2_graph(; name=nothing, skip_header_lines=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "TextLineReaderV2") do desc = tf.NodeDescription("TextLineReaderV2") @@ -44133,7 +46917,10 @@ begin if shared_name !== nothing desc["shared_name"] = Base.String(shared_name) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(text_line_reader_v2, [], name=nothing, skip_header_lines=nothing, container=nothing, shared_name=nothing) + tf.add_node(res[1], node) + return res[1] end function text_line_reader_v2(; name=nothing, skip_header_lines=nothing, container=nothing, shared_name=nothing) if tf.eager_mode @@ -44151,7 +46938,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tensor_slice_dataset_graph(components_; name=nothing, Toutput_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_slice_dataset_graph(components_; name=nothing, Toutput_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "TensorSliceDataset") do desc = tf.NodeDescription("TensorSliceDataset") @@ -44175,7 +46962,10 @@ begin if output_shapes !== nothing desc["output_shapes"] = map(Base.identity, output_shapes) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(tensor_slice_dataset, [components_], name=nothing, Toutput_types=nothing, output_shapes=nothing) + tf.add_node(res[1], node) + return res[1] end function tensor_slice_dataset(components_; name=nothing, Toutput_types=nothing, output_shapes=nothing) if tf.eager_mode @@ -44193,7 +46983,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tensor_array_scatter_v3_graph(handle_, indices_, value_, flow_in_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_scatter_v3_graph(handle_, indices_, value_, flow_in_; name=nothing) local desc tf.with_op_name(name, "TensorArrayScatterV3") do desc = tf.NodeDescription("TensorArrayScatterV3") @@ -44216,7 +47006,10 @@ begin tf.add_input(desc, value_) tf.add_input(desc, flow_in_) desc["T"] = tf.data_type(value_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(tensor_array_scatter_v3, [handle_, indices_, value_, flow_in_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function tensor_array_scatter_v3(handle_, indices_, value_, flow_in_; name=nothing) if tf.eager_mode @@ -44234,7 +47027,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function resize_nearest_neighbor_grad_graph(grads_, size_; name=nothing, align_corners=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resize_nearest_neighbor_grad_graph(grads_, size_; name=nothing, align_corners=nothing) local desc tf.with_op_name(name, "ResizeNearestNeighborGrad") do desc = tf.NodeDescription("ResizeNearestNeighborGrad") @@ -44257,7 +47050,10 @@ begin desc["align_corners"] = Base.Bool(align_corners) end desc["T"] = tf.data_type(grads_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(resize_nearest_neighbor_grad, [grads_, size_], name=nothing, align_corners=nothing) + tf.add_node(res[1], node) + return res[1] end function resize_nearest_neighbor_grad(grads_, size_; name=nothing, align_corners=nothing) if tf.eager_mode @@ -44275,7 +47071,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function apply_power_sign_graph(var_, m_, lr_, logbase_, sign_decay_, beta_, grad_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function apply_power_sign_graph(var_, m_, lr_, logbase_, sign_decay_, beta_, grad_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ApplyPowerSign") do desc = tf.NodeDescription("ApplyPowerSign") @@ -44319,7 +47115,10 @@ begin desc["T"] = tf.data_type(sign_decay_) desc["T"] = tf.data_type(beta_) desc["T"] = tf.data_type(grad_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(apply_power_sign, [var_, m_, lr_, logbase_, sign_decay_, beta_, grad_], name=nothing, use_locking=nothing) + tf.add_node(res[1], node) + return res[1] end function apply_power_sign(var_, m_, lr_, logbase_, sign_decay_, beta_, grad_; name=nothing, use_locking=nothing) if tf.eager_mode @@ -44337,7 +47136,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function experimental_rebatch_dataset_graph(input_dataset_, num_workers_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_rebatch_dataset_graph(input_dataset_, num_workers_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "ExperimentalRebatchDataset") do desc = tf.NodeDescription("ExperimentalRebatchDataset") @@ -44364,7 +47163,10 @@ begin if output_shapes !== nothing desc["output_shapes"] = map(Base.identity, output_shapes) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(experimental_rebatch_dataset, [input_dataset_, num_workers_], name=nothing, output_types=nothing, output_shapes=nothing) + tf.add_node(res[1], node) + return res[1] end function experimental_rebatch_dataset(input_dataset_, num_workers_; name=nothing, output_types=nothing, output_shapes=nothing) if tf.eager_mode @@ -44382,7 +47184,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function mirror_pad_graph(input_, paddings_; name=nothing, mode=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function mirror_pad_graph(input_, paddings_; name=nothing, mode=nothing) local desc tf.with_op_name(name, "MirrorPad") do desc = tf.NodeDescription("MirrorPad") @@ -44407,7 +47209,10 @@ begin end desc["T"] = tf.data_type(input_) desc["Tpaddings"] = tf.data_type(paddings_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(mirror_pad, [input_, paddings_], name=nothing, mode=nothing) + tf.add_node(res[1], node) + return res[1] end function mirror_pad(input_, paddings_; name=nothing, mode=nothing) if tf.eager_mode @@ -44425,7 +47230,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function logical_not_graph(x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function logical_not_graph(x_; name=nothing) local desc tf.with_op_name(name, "LogicalNot") do desc = tf.NodeDescription("LogicalNot") @@ -44437,7 +47242,10 @@ begin function logical_not_eager(x_; name=nothing) desc = tf.EagerOp("LogicalNot") tf.add_input(desc, x_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(logical_not, [x_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function logical_not(x_; name=nothing) if tf.eager_mode @@ -44455,7 +47263,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function batch_ifft_graph(input_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_ifft_graph(input_; name=nothing) local desc tf.with_op_name(name, "BatchIFFT") do desc = tf.NodeDescription("BatchIFFT") @@ -44467,7 +47275,10 @@ begin function batch_ifft_eager(input_; name=nothing) desc = tf.EagerOp("BatchIFFT") tf.add_input(desc, input_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(batch_ifft, [input_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function batch_ifft(input_; name=nothing) if tf.eager_mode @@ -44485,7 +47296,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tensor_array_concat_v2_graph(handle_, flow_in_; name=nothing, dtype=nothing, element_shape_except0=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_concat_v2_graph(handle_, flow_in_; name=nothing, dtype=nothing, element_shape_except0=nothing) local desc tf.with_op_name(name, "TensorArrayConcatV2") do desc = tf.NodeDescription("TensorArrayConcatV2") @@ -44517,7 +47328,10 @@ begin if element_shape_except0 !== nothing desc["element_shape_except0"] = Base.identity(element_shape_except0) end - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(tensor_array_concat_v2, [handle_, flow_in_], name=nothing, dtype=nothing, element_shape_except0=nothing) + tf.add_node(res[1], node) + return res end function tensor_array_concat_v2(handle_, flow_in_; name=nothing, dtype=nothing, element_shape_except0=nothing) if tf.eager_mode @@ -44535,7 +47349,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function sum_graph(input_, reduction_indices_; name=nothing, keep_dims=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sum_graph(input_, reduction_indices_; name=nothing, keep_dims=nothing) local desc tf.with_op_name(name, "Sum") do desc = tf.NodeDescription("Sum") @@ -44561,7 +47375,10 @@ begin end desc["T"] = tf.data_type(input_) desc["Tidx"] = tf.data_type(reduction_indices_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(sum, [input_, reduction_indices_], name=nothing, keep_dims=nothing) + tf.add_node(res[1], node) + return res[1] end function sum(input_, reduction_indices_; name=nothing, keep_dims=nothing) if tf.eager_mode @@ -44579,7 +47396,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function boosted_trees_predict_graph(tree_ensemble_handle_, bucketized_features_; name=nothing, num_bucketized_features=nothing, logits_dimension=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function boosted_trees_predict_graph(tree_ensemble_handle_, bucketized_features_; name=nothing, num_bucketized_features=nothing, logits_dimension=nothing) local desc tf.with_op_name(name, "BoostedTreesPredict") do desc = tf.NodeDescription("BoostedTreesPredict") @@ -44606,7 +47423,10 @@ begin if logits_dimension !== nothing desc["logits_dimension"] = Base.Int(logits_dimension) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(boosted_trees_predict, [tree_ensemble_handle_, bucketized_features_], name=nothing, num_bucketized_features=nothing, logits_dimension=nothing) + tf.add_node(res[1], node) + return res[1] end function boosted_trees_predict(tree_ensemble_handle_, bucketized_features_; name=nothing, num_bucketized_features=nothing, logits_dimension=nothing) if tf.eager_mode @@ -44624,7 +47444,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function quantized_conv2d_with_bias_and_relu_and_requantize_graph(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantized_conv2d_with_bias_and_relu_and_requantize_graph(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) local desc tf.with_op_name(name, "QuantizedConv2DWithBiasAndReluAndRequantize") do desc = tf.NodeDescription("QuantizedConv2DWithBiasAndReluAndRequantize") @@ -44695,7 +47515,10 @@ begin desc["Tinput"] = tf.data_type(input_) desc["Tfilter"] = tf.data_type(filter_) desc["Tbias"] = tf.data_type(bias_) - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(quantized_conv2d_with_bias_and_relu_and_requantize, [input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_], name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) + tf.add_node(res[1], node) + return res end function quantized_conv2d_with_bias_and_relu_and_requantize(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) if tf.eager_mode @@ -44713,7 +47536,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function resource_sparse_apply_adagrad_graph(var_, accum_, lr_, grad_, indices_; name=nothing, use_locking=nothing, update_slots=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_sparse_apply_adagrad_graph(var_, accum_, lr_, grad_, indices_; name=nothing, use_locking=nothing, update_slots=nothing) local desc tf.with_op_name(name, "ResourceSparseApplyAdagrad") do desc = tf.NodeDescription("ResourceSparseApplyAdagrad") @@ -44755,7 +47578,10 @@ begin desc["T"] = tf.data_type(lr_) desc["T"] = tf.data_type(grad_) desc["Tindices"] = tf.data_type(indices_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(resource_sparse_apply_adagrad, [var_, accum_, lr_, grad_, indices_], name=nothing, use_locking=nothing, update_slots=nothing) + tf.add_node(res[1], node) + return res[1] end function resource_sparse_apply_adagrad(var_, accum_, lr_, grad_, indices_; name=nothing, use_locking=nothing, update_slots=nothing) if tf.eager_mode @@ -44773,7 +47599,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function leaky_relu_grad_graph(gradients_, features_; name=nothing, alpha=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function leaky_relu_grad_graph(gradients_, features_; name=nothing, alpha=nothing) local desc tf.with_op_name(name, "LeakyReluGrad") do desc = tf.NodeDescription("LeakyReluGrad") @@ -44797,7 +47623,10 @@ begin end desc["T"] = tf.data_type(gradients_) desc["T"] = tf.data_type(features_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(leaky_relu_grad, [gradients_, features_], name=nothing, alpha=nothing) + tf.add_node(res[1], node) + return res[1] end function leaky_relu_grad(gradients_, features_; name=nothing, alpha=nothing) if tf.eager_mode @@ -44815,7 +47644,7 @@ end A graph node which represents a return value of a function. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function _device_retval_graph(input_; name=nothing, index=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _device_retval_graph(input_; name=nothing, index=nothing) local desc tf.with_op_name(name, "_DeviceRetval") do desc = tf.NodeDescription("_DeviceRetval") @@ -44835,7 +47664,10 @@ begin desc["index"] = Base.Int(index) end desc["T"] = tf.data_type(input_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(_device_retval, [input_], name=nothing, index=nothing) + tf.add_node(res[1], node) + return res[1] end function _device_retval(input_; name=nothing, index=nothing) if tf.eager_mode @@ -44853,7 +47685,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function pad_graph(input_, paddings_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function pad_graph(input_, paddings_; name=nothing) local desc tf.with_op_name(name, "Pad") do desc = tf.NodeDescription("Pad") @@ -44872,7 +47704,10 @@ begin tf.add_input(desc, paddings_) desc["T"] = tf.data_type(input_) desc["Tpaddings"] = tf.data_type(paddings_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(pad, [input_, paddings_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function pad(input_, paddings_; name=nothing) if tf.eager_mode @@ -44890,7 +47725,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function add_many_sparse_to_tensors_map_graph(sparse_indices_, sparse_values_, sparse_shape_; name=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function add_many_sparse_to_tensors_map_graph(sparse_indices_, sparse_values_, sparse_shape_; name=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "AddManySparseToTensorsMap") do desc = tf.NodeDescription("AddManySparseToTensorsMap") @@ -44922,7 +47757,10 @@ begin desc["shared_name"] = Base.String(shared_name) end desc["T"] = tf.data_type(sparse_values_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(add_many_sparse_to_tensors_map, [sparse_indices_, sparse_values_, sparse_shape_], name=nothing, container=nothing, shared_name=nothing) + tf.add_node(res[1], node) + return res[1] end function add_many_sparse_to_tensors_map(sparse_indices_, sparse_values_, sparse_shape_; name=nothing, container=nothing, shared_name=nothing) if tf.eager_mode @@ -44940,7 +47778,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function sparse_reorder_graph(input_indices_, input_values_, input_shape_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_reorder_graph(input_indices_, input_values_, input_shape_; name=nothing) local desc tf.with_op_name(name, "SparseReorder") do desc = tf.NodeDescription("SparseReorder") @@ -44965,7 +47803,10 @@ begin tf.add_input(desc, input_values_) tf.add_input(desc, input_shape_) desc["T"] = tf.data_type(input_values_) - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(sparse_reorder, [input_indices_, input_values_, input_shape_], name=nothing) + tf.add_node(res[1], node) + return res end function sparse_reorder(input_indices_, input_values_, input_shape_; name=nothing) if tf.eager_mode @@ -44983,7 +47824,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function bitwise_xor_graph(x_, y_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function bitwise_xor_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "BitwiseXor") do desc = tf.NodeDescription("BitwiseXor") @@ -45001,7 +47842,10 @@ begin tf.add_input(desc, y_) desc["T"] = tf.data_type(x_) desc["T"] = tf.data_type(y_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(bitwise_xor, [x_, y_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function bitwise_xor(x_, y_; name=nothing) if tf.eager_mode @@ -45019,7 +47863,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function batch_matrix_set_diag_graph(input_, diagonal_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_matrix_set_diag_graph(input_, diagonal_; name=nothing) local desc tf.with_op_name(name, "BatchMatrixSetDiag") do desc = tf.NodeDescription("BatchMatrixSetDiag") @@ -45037,7 +47881,10 @@ begin tf.add_input(desc, diagonal_) desc["T"] = tf.data_type(input_) desc["T"] = tf.data_type(diagonal_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(batch_matrix_set_diag, [input_, diagonal_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function batch_matrix_set_diag(input_, diagonal_; name=nothing) if tf.eager_mode @@ -45055,7 +47902,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function lookup_table_insert_v2_graph(table_handle_, keys_, values_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function lookup_table_insert_v2_graph(table_handle_, keys_, values_; name=nothing) local desc tf.with_op_name(name, "LookupTableInsertV2") do desc = tf.NodeDescription("LookupTableInsertV2") @@ -45077,7 +47924,10 @@ begin tf.add_input(desc, values_) desc["Tin"] = tf.data_type(keys_) desc["Tout"] = tf.data_type(values_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(lookup_table_insert_v2, [table_handle_, keys_, values_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function lookup_table_insert_v2(table_handle_, keys_, values_; name=nothing) if tf.eager_mode @@ -45095,7 +47945,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function experimental_dense_to_sparse_batch_dataset_graph(input_dataset_, batch_size_, row_shape_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_dense_to_sparse_batch_dataset_graph(input_dataset_, batch_size_, row_shape_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "ExperimentalDenseToSparseBatchDataset") do desc = tf.NodeDescription("ExperimentalDenseToSparseBatchDataset") @@ -45125,7 +47975,10 @@ begin if output_shapes !== nothing desc["output_shapes"] = map(Base.identity, output_shapes) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(experimental_dense_to_sparse_batch_dataset, [input_dataset_, batch_size_, row_shape_], name=nothing, output_types=nothing, output_shapes=nothing) + tf.add_node(res[1], node) + return res[1] end function experimental_dense_to_sparse_batch_dataset(input_dataset_, batch_size_, row_shape_; name=nothing, output_types=nothing, output_shapes=nothing) if tf.eager_mode @@ -45143,7 +47996,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function resource_sparse_apply_rms_prop_graph(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_sparse_apply_rms_prop_graph(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ResourceSparseApplyRMSProp") do desc = tf.NodeDescription("ResourceSparseApplyRMSProp") @@ -45194,7 +48047,10 @@ begin desc["T"] = tf.data_type(epsilon_) desc["T"] = tf.data_type(grad_) desc["Tindices"] = tf.data_type(indices_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(resource_sparse_apply_rms_prop, [var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_], name=nothing, use_locking=nothing) + tf.add_node(res[1], node) + return res[1] end function resource_sparse_apply_rms_prop(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) if tf.eager_mode @@ -45212,7 +48068,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function random_crop_graph(image_, size_; name=nothing, seed=nothing, seed2=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function random_crop_graph(image_, size_; name=nothing, seed=nothing, seed2=nothing) local desc tf.with_op_name(name, "RandomCrop") do desc = tf.NodeDescription("RandomCrop") @@ -45241,7 +48097,10 @@ begin desc["seed2"] = Base.Int(seed2) end desc["T"] = tf.data_type(image_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(random_crop, [image_, size_], name=nothing, seed=nothing, seed2=nothing) + tf.add_node(res[1], node) + return res[1] end function random_crop(image_, size_; name=nothing, seed=nothing, seed2=nothing) if tf.eager_mode @@ -45259,7 +48118,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function lookup_table_import_v2_graph(table_handle_, keys_, values_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function lookup_table_import_v2_graph(table_handle_, keys_, values_; name=nothing) local desc tf.with_op_name(name, "LookupTableImportV2") do desc = tf.NodeDescription("LookupTableImportV2") @@ -45281,7 +48140,10 @@ begin tf.add_input(desc, values_) desc["Tin"] = tf.data_type(keys_) desc["Tout"] = tf.data_type(values_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(lookup_table_import_v2, [table_handle_, keys_, values_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function lookup_table_import_v2(table_handle_, keys_, values_; name=nothing) if tf.eager_mode @@ -45299,7 +48161,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function resource_scatter_nd_update_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_scatter_nd_update_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ResourceScatterNdUpdate") do desc = tf.NodeDescription("ResourceScatterNdUpdate") @@ -45328,7 +48190,10 @@ begin end desc["Tindices"] = tf.data_type(indices_) desc["T"] = tf.data_type(updates_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(resource_scatter_nd_update, [ref_, indices_, updates_], name=nothing, use_locking=nothing) + tf.add_node(res[1], node) + return res[1] end function resource_scatter_nd_update(ref_, indices_, updates_; name=nothing, use_locking=nothing) if tf.eager_mode @@ -45346,7 +48211,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function static_regex_full_match_graph(input_; name=nothing, pattern=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function static_regex_full_match_graph(input_; name=nothing, pattern=nothing) local desc tf.with_op_name(name, "StaticRegexFullMatch") do desc = tf.NodeDescription("StaticRegexFullMatch") @@ -45364,7 +48229,10 @@ begin if pattern !== nothing desc["pattern"] = Base.String(pattern) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(static_regex_full_match, [input_], name=nothing, pattern=nothing) + tf.add_node(res[1], node) + return res[1] end function static_regex_full_match(input_; name=nothing, pattern=nothing) if tf.eager_mode @@ -45382,7 +48250,7 @@ end Configures the credentials used by the GCS client of the local TF runtime. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function gcs_configure_credentials_graph(json_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function gcs_configure_credentials_graph(json_; name=nothing) local desc tf.with_op_name(name, "GcsConfigureCredentials") do desc = tf.NodeDescription("GcsConfigureCredentials") @@ -45394,7 +48262,10 @@ begin function gcs_configure_credentials_eager(json_; name=nothing) desc = tf.EagerOp("GcsConfigureCredentials") tf.add_input(desc, json_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(gcs_configure_credentials, [json_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function gcs_configure_credentials(json_; name=nothing) if tf.eager_mode @@ -45412,7 +48283,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tensor_array_size_v3_graph(handle_, flow_in_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_size_v3_graph(handle_, flow_in_; name=nothing) local desc tf.with_op_name(name, "TensorArraySizeV3") do desc = tf.NodeDescription("TensorArraySizeV3") @@ -45427,7 +48298,10 @@ begin desc = tf.EagerOp("TensorArraySizeV3") tf.add_input(desc, handle_) tf.add_input(desc, flow_in_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(tensor_array_size_v3, [handle_, flow_in_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function tensor_array_size_v3(handle_, flow_in_; name=nothing) if tf.eager_mode @@ -45445,7 +48319,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function sparse_segment_sqrt_n_with_num_segments_graph(data_, indices_, segment_ids_, num_segments_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_segment_sqrt_n_with_num_segments_graph(data_, indices_, segment_ids_, num_segments_; name=nothing) local desc tf.with_op_name(name, "SparseSegmentSqrtNWithNumSegments") do desc = tf.NodeDescription("SparseSegmentSqrtNWithNumSegments") @@ -45473,7 +48347,10 @@ begin desc["T"] = tf.data_type(data_) desc["Tidx"] = tf.data_type(indices_) desc["Tnumsegments"] = tf.data_type(num_segments_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(sparse_segment_sqrt_n_with_num_segments, [data_, indices_, segment_ids_, num_segments_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function sparse_segment_sqrt_n_with_num_segments(data_, indices_, segment_ids_, num_segments_; name=nothing) if tf.eager_mode @@ -45491,7 +48368,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function experimental_group_by_reducer_dataset_graph(input_dataset_, key_func_other_arguments_, init_func_other_arguments_, reduce_func_other_arguments_, finalize_func_other_arguments_; name=nothing, key_func=nothing, init_func=nothing, reduce_func=nothing, finalize_func=nothing, Tkey_func_other_arguments=nothing, Tinit_func_other_arguments=nothing, Treduce_func_other_arguments=nothing, Tfinalize_func_other_arguments=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_group_by_reducer_dataset_graph(input_dataset_, key_func_other_arguments_, init_func_other_arguments_, reduce_func_other_arguments_, finalize_func_other_arguments_; name=nothing, key_func=nothing, init_func=nothing, reduce_func=nothing, finalize_func=nothing, Tkey_func_other_arguments=nothing, Tinit_func_other_arguments=nothing, Treduce_func_other_arguments=nothing, Tfinalize_func_other_arguments=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "ExperimentalGroupByReducerDataset") do desc = tf.NodeDescription("ExperimentalGroupByReducerDataset") @@ -45575,7 +48452,10 @@ begin if output_shapes !== nothing desc["output_shapes"] = map(Base.identity, output_shapes) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(experimental_group_by_reducer_dataset, [input_dataset_, key_func_other_arguments_, init_func_other_arguments_, reduce_func_other_arguments_, finalize_func_other_arguments_], name=nothing, key_func=nothing, init_func=nothing, reduce_func=nothing, finalize_func=nothing, Tkey_func_other_arguments=nothing, Tinit_func_other_arguments=nothing, Treduce_func_other_arguments=nothing, Tfinalize_func_other_arguments=nothing, output_types=nothing, output_shapes=nothing) + tf.add_node(res[1], node) + return res[1] end function experimental_group_by_reducer_dataset(input_dataset_, key_func_other_arguments_, init_func_other_arguments_, reduce_func_other_arguments_, finalize_func_other_arguments_; name=nothing, key_func=nothing, init_func=nothing, reduce_func=nothing, finalize_func=nothing, Tkey_func_other_arguments=nothing, Tinit_func_other_arguments=nothing, Treduce_func_other_arguments=nothing, Tfinalize_func_other_arguments=nothing, output_types=nothing, output_shapes=nothing) if tf.eager_mode @@ -45593,7 +48473,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function conv2d_backprop_filter_graph(input_, filter_sizes_, out_backprop_; name=nothing, strides=nothing, use_cudnn_on_gpu=nothing, padding=nothing, explicit_paddings=nothing, data_format=nothing, dilations=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function conv2d_backprop_filter_graph(input_, filter_sizes_, out_backprop_; name=nothing, strides=nothing, use_cudnn_on_gpu=nothing, padding=nothing, explicit_paddings=nothing, data_format=nothing, dilations=nothing) local desc tf.with_op_name(name, "Conv2DBackpropFilter") do desc = tf.NodeDescription("Conv2DBackpropFilter") @@ -45650,7 +48530,10 @@ begin end desc["T"] = tf.data_type(input_) desc["T"] = tf.data_type(out_backprop_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(conv2d_backprop_filter, [input_, filter_sizes_, out_backprop_], name=nothing, strides=nothing, use_cudnn_on_gpu=nothing, padding=nothing, explicit_paddings=nothing, data_format=nothing, dilations=nothing) + tf.add_node(res[1], node) + return res[1] end function conv2d_backprop_filter(input_, filter_sizes_, out_backprop_; name=nothing, strides=nothing, use_cudnn_on_gpu=nothing, padding=nothing, explicit_paddings=nothing, data_format=nothing, dilations=nothing) if tf.eager_mode @@ -45668,7 +48551,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function max_pool_grad_graph(orig_input_, orig_output_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function max_pool_grad_graph(orig_input_, orig_output_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) local desc tf.with_op_name(name, "MaxPoolGrad") do desc = tf.NodeDescription("MaxPoolGrad") @@ -45714,7 +48597,10 @@ begin desc["T"] = tf.data_type(orig_input_) desc["T"] = tf.data_type(orig_output_) desc["T"] = tf.data_type(grad_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(max_pool_grad, [orig_input_, orig_output_, grad_], name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + tf.add_node(res[1], node) + return res[1] end function max_pool_grad(orig_input_, orig_output_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) if tf.eager_mode @@ -45732,7 +48618,7 @@ end An op that connects each chip on the host to a centralized UberDriver to allow """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function _initialize_host_for_distributed_tpu_graph(input_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _initialize_host_for_distributed_tpu_graph(input_; name=nothing) local desc tf.with_op_name(name, "_InitializeHostForDistributedTPU") do desc = tf.NodeDescription("_InitializeHostForDistributedTPU") @@ -45744,7 +48630,10 @@ begin function _initialize_host_for_distributed_tpu_eager(input_; name=nothing) desc = tf.EagerOp("_InitializeHostForDistributedTPU") tf.add_input(desc, input_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(_initialize_host_for_distributed_tpu, [input_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function _initialize_host_for_distributed_tpu(input_; name=nothing) if tf.eager_mode @@ -45762,7 +48651,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function stage_peek_graph(index_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function stage_peek_graph(index_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "StagePeek") do desc = tf.NodeDescription("StagePeek") @@ -45804,7 +48693,10 @@ begin if shared_name !== nothing desc["shared_name"] = Base.String(shared_name) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(stage_peek, [index_], name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + tf.add_node(res[1], node) + return res[1] end function stage_peek(index_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) if tf.eager_mode @@ -45822,7 +48714,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function pad_v2_graph(input_, paddings_, constant_values_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function pad_v2_graph(input_, paddings_, constant_values_; name=nothing) local desc tf.with_op_name(name, "PadV2") do desc = tf.NodeDescription("PadV2") @@ -45845,7 +48737,10 @@ begin desc["T"] = tf.data_type(input_) desc["Tpaddings"] = tf.data_type(paddings_) desc["T"] = tf.data_type(constant_values_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(pad_v2, [input_, paddings_, constant_values_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function pad_v2(input_, paddings_, constant_values_; name=nothing) if tf.eager_mode @@ -45863,7 +48758,7 @@ end Creates an empty Tensor with shape `shape` and type `dtype`. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function _parallel_concat_start_graph(; name=nothing, shape=nothing, dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _parallel_concat_start_graph(; name=nothing, shape=nothing, dtype=nothing) local desc tf.with_op_name(name, "_ParallelConcatStart") do desc = tf.NodeDescription("_ParallelConcatStart") @@ -45884,7 +48779,10 @@ begin if dtype !== nothing desc["dtype"] = Base.identity(dtype) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(_parallel_concat_start, [], name=nothing, shape=nothing, dtype=nothing) + tf.add_node(res[1], node) + return res[1] end function _parallel_concat_start(; name=nothing, shape=nothing, dtype=nothing) if tf.eager_mode @@ -45902,7 +48800,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function print_v2_graph(input_; name=nothing, output_stream=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function print_v2_graph(input_; name=nothing, output_stream=nothing) local desc tf.with_op_name(name, "PrintV2") do desc = tf.NodeDescription("PrintV2") @@ -45920,7 +48818,10 @@ begin if output_stream !== nothing desc["output_stream"] = Base.String(output_stream) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(print_v2, [input_], name=nothing, output_stream=nothing) + tf.add_node(res[1], node) + return res[1] end function print_v2(input_; name=nothing, output_stream=nothing) if tf.eager_mode @@ -45938,7 +48839,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function optional_get_value_graph(optional_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function optional_get_value_graph(optional_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "OptionalGetValue") do desc = tf.NodeDescription("OptionalGetValue") @@ -45962,7 +48863,10 @@ begin if output_shapes !== nothing desc["output_shapes"] = map(Base.identity, output_shapes) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(optional_get_value, [optional_], name=nothing, output_types=nothing, output_shapes=nothing) + tf.add_node(res[1], node) + return res[1] end function optional_get_value(optional_; name=nothing, output_types=nothing, output_shapes=nothing) if tf.eager_mode @@ -45980,7 +48884,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function load_tpu_embedding_ftrl_parameters_graph(parameters_, accumulators_, linears_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function load_tpu_embedding_ftrl_parameters_graph(parameters_, accumulators_, linears_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "LoadTPUEmbeddingFTRLParameters") do desc = tf.NodeDescription("LoadTPUEmbeddingFTRLParameters") @@ -46022,7 +48926,10 @@ begin if shard_id !== nothing desc["shard_id"] = Base.Int(shard_id) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(load_tpu_embedding_ftrl_parameters, [parameters_, accumulators_, linears_], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + tf.add_node(res[1], node) + return res[1] end function load_tpu_embedding_ftrl_parameters(parameters_, accumulators_, linears_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) if tf.eager_mode @@ -46040,7 +48947,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function sparse_slice_graph(indices_, values_, shape_, start_, size_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_slice_graph(indices_, values_, shape_, start_, size_; name=nothing) local desc tf.with_op_name(name, "SparseSlice") do desc = tf.NodeDescription("SparseSlice") @@ -46071,7 +48978,10 @@ begin tf.add_input(desc, start_) tf.add_input(desc, size_) desc["T"] = tf.data_type(values_) - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(sparse_slice, [indices_, values_, shape_, start_, size_], name=nothing) + tf.add_node(res[1], node) + return res end function sparse_slice(indices_, values_, shape_, start_, size_; name=nothing) if tf.eager_mode @@ -46089,7 +48999,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function boosted_trees_make_quantile_summaries_graph(float_values_, example_weights_, epsilon_; name=nothing, num_features=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function boosted_trees_make_quantile_summaries_graph(float_values_, example_weights_, epsilon_; name=nothing, num_features=nothing) local desc tf.with_op_name(name, "BoostedTreesMakeQuantileSummaries") do desc = tf.NodeDescription("BoostedTreesMakeQuantileSummaries") @@ -46118,7 +49028,10 @@ begin if num_features !== nothing desc["num_features"] = Base.Int(num_features) end - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(boosted_trees_make_quantile_summaries, [float_values_, example_weights_, epsilon_], name=nothing, num_features=nothing) + tf.add_node(res[1], node) + return res end function boosted_trees_make_quantile_summaries(float_values_, example_weights_, epsilon_; name=nothing, num_features=nothing) if tf.eager_mode @@ -46136,7 +49049,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function matrix_solve_graph(matrix_, rhs_; name=nothing, adjoint=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function matrix_solve_graph(matrix_, rhs_; name=nothing, adjoint=nothing) local desc tf.with_op_name(name, "MatrixSolve") do desc = tf.NodeDescription("MatrixSolve") @@ -46160,7 +49073,10 @@ begin end desc["T"] = tf.data_type(matrix_) desc["T"] = tf.data_type(rhs_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(matrix_solve, [matrix_, rhs_], name=nothing, adjoint=nothing) + tf.add_node(res[1], node) + return res[1] end function matrix_solve(matrix_, rhs_; name=nothing, adjoint=nothing) if tf.eager_mode @@ -46178,7 +49094,7 @@ end An op that sets up the centralized structures for a distributed TPU """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function _configure_distributed_tpu_graph(inputs_; name=nothing, N=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _configure_distributed_tpu_graph(inputs_; name=nothing, N=nothing) local desc tf.with_op_name(name, "_ConfigureDistributedTPU") do desc = tf.NodeDescription("_ConfigureDistributedTPU") @@ -46196,7 +49112,10 @@ begin if N !== nothing desc["N"] = Base.Int(N) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(_configure_distributed_tpu, [inputs_], name=nothing, N=nothing) + tf.add_node(res[1], node) + return res[1] end function _configure_distributed_tpu(inputs_; name=nothing, N=nothing) if tf.eager_mode @@ -46214,7 +49133,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function adjust_contrastv2_graph(images_, contrast_factor_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function adjust_contrastv2_graph(images_, contrast_factor_; name=nothing) local desc tf.with_op_name(name, "AdjustContrastv2") do desc = tf.NodeDescription("AdjustContrastv2") @@ -46231,7 +49150,10 @@ begin tf.add_input(desc, images_) tf.add_input(desc, contrast_factor_) desc["T"] = tf.data_type(images_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(adjust_contrastv2, [images_, contrast_factor_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function adjust_contrastv2(images_, contrast_factor_; name=nothing) if tf.eager_mode @@ -46249,7 +49171,7 @@ end Returns the max of x and y (i.e. x > y ? x : y) element-wise. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function _mkl_maximum_graph(x_, y_, mkl_x_, mkl_y_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _mkl_maximum_graph(x_, y_, mkl_x_, mkl_y_; name=nothing) local desc tf.with_op_name(name, "_MklMaximum") do desc = tf.NodeDescription("_MklMaximum") @@ -46278,7 +49200,10 @@ begin tf.add_input(desc, mkl_y_) desc["T"] = tf.data_type(x_) desc["T"] = tf.data_type(y_) - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(_mkl_maximum, [x_, y_, mkl_x_, mkl_y_], name=nothing) + tf.add_node(res[1], node) + return res end function _mkl_maximum(x_, y_, mkl_x_, mkl_y_; name=nothing) if tf.eager_mode @@ -46296,7 +49221,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function cudnn_rnn_params_size_graph(num_layers_, num_units_, input_size_; name=nothing, S=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function cudnn_rnn_params_size_graph(num_layers_, num_units_, input_size_; name=nothing, S=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) local desc tf.with_op_name(name, "CudnnRNNParamsSize") do desc = tf.NodeDescription("CudnnRNNParamsSize") @@ -46356,7 +49281,10 @@ begin if seed2 !== nothing desc["seed2"] = Base.Int(seed2) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(cudnn_rnn_params_size, [num_layers_, num_units_, input_size_], name=nothing, S=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) + tf.add_node(res[1], node) + return res[1] end function cudnn_rnn_params_size(num_layers_, num_units_, input_size_; name=nothing, S=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) if tf.eager_mode @@ -46374,7 +49302,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function boosted_trees_quantile_stream_resource_add_summaries_graph(quantile_stream_resource_handle_, summaries_; name=nothing, num_features=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function boosted_trees_quantile_stream_resource_add_summaries_graph(quantile_stream_resource_handle_, summaries_; name=nothing, num_features=nothing) local desc tf.with_op_name(name, "BoostedTreesQuantileStreamResourceAddSummaries") do desc = tf.NodeDescription("BoostedTreesQuantileStreamResourceAddSummaries") @@ -46395,7 +49323,10 @@ begin if num_features !== nothing desc["num_features"] = Base.Int(num_features) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(boosted_trees_quantile_stream_resource_add_summaries, [quantile_stream_resource_handle_, summaries_], name=nothing, num_features=nothing) + tf.add_node(res[1], node) + return res[1] end function boosted_trees_quantile_stream_resource_add_summaries(quantile_stream_resource_handle_, summaries_; name=nothing, num_features=nothing) if tf.eager_mode @@ -46413,7 +49344,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function batch_ifft3d_graph(input_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_ifft3d_graph(input_; name=nothing) local desc tf.with_op_name(name, "BatchIFFT3D") do desc = tf.NodeDescription("BatchIFFT3D") @@ -46425,7 +49356,10 @@ begin function batch_ifft3d_eager(input_; name=nothing) desc = tf.EagerOp("BatchIFFT3D") tf.add_input(desc, input_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(batch_ifft3d, [input_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function batch_ifft3d(input_; name=nothing) if tf.eager_mode @@ -46443,7 +49377,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function sigmoid_graph(x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sigmoid_graph(x_; name=nothing) local desc tf.with_op_name(name, "Sigmoid") do desc = tf.NodeDescription("Sigmoid") @@ -46457,7 +49391,10 @@ begin desc = tf.EagerOp("Sigmoid") tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(sigmoid, [x_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function sigmoid(x_; name=nothing) if tf.eager_mode @@ -46475,7 +49412,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function segment_mean_graph(data_, segment_ids_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function segment_mean_graph(data_, segment_ids_; name=nothing) local desc tf.with_op_name(name, "SegmentMean") do desc = tf.NodeDescription("SegmentMean") @@ -46495,7 +49432,10 @@ begin tf.add_input(desc, segment_ids_) desc["T"] = tf.data_type(data_) desc["Tindices"] = tf.data_type(segment_ids_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(segment_mean, [data_, segment_ids_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function segment_mean(data_, segment_ids_; name=nothing) if tf.eager_mode @@ -46513,7 +49453,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function is_boosted_trees_ensemble_initialized_graph(tree_ensemble_handle_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function is_boosted_trees_ensemble_initialized_graph(tree_ensemble_handle_; name=nothing) local desc tf.with_op_name(name, "IsBoostedTreesEnsembleInitialized") do desc = tf.NodeDescription("IsBoostedTreesEnsembleInitialized") @@ -46525,7 +49465,10 @@ begin function is_boosted_trees_ensemble_initialized_eager(tree_ensemble_handle_; name=nothing) desc = tf.EagerOp("IsBoostedTreesEnsembleInitialized") tf.add_input(desc, tree_ensemble_handle_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(is_boosted_trees_ensemble_initialized, [tree_ensemble_handle_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function is_boosted_trees_ensemble_initialized(tree_ensemble_handle_; name=nothing) if tf.eager_mode @@ -46543,7 +49486,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tensor_array_size_v2_graph(handle_, flow_in_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_size_v2_graph(handle_, flow_in_; name=nothing) local desc tf.with_op_name(name, "TensorArraySizeV2") do desc = tf.NodeDescription("TensorArraySizeV2") @@ -46558,7 +49501,10 @@ begin desc = tf.EagerOp("TensorArraySizeV2") tf.add_input(desc, handle_) tf.add_input(desc, flow_in_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(tensor_array_size_v2, [handle_, flow_in_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function tensor_array_size_v2(handle_, flow_in_; name=nothing) if tf.eager_mode @@ -46576,7 +49522,7 @@ end Returns x - y element-wise. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function _mkl_sub_graph(x_, y_, mkl_x_, mkl_y_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _mkl_sub_graph(x_, y_, mkl_x_, mkl_y_; name=nothing) local desc tf.with_op_name(name, "_MklSub") do desc = tf.NodeDescription("_MklSub") @@ -46605,7 +49551,10 @@ begin tf.add_input(desc, mkl_y_) desc["T"] = tf.data_type(x_) desc["T"] = tf.data_type(y_) - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(_mkl_sub, [x_, y_, mkl_x_, mkl_y_], name=nothing) + tf.add_node(res[1], node) + return res end function _mkl_sub(x_, y_, mkl_x_, mkl_y_; name=nothing) if tf.eager_mode @@ -46623,7 +49572,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function send_tpu_embedding_gradients_graph(inputs_, learning_rates_; name=nothing, N=nothing, NN=nothing, config=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function send_tpu_embedding_gradients_graph(inputs_, learning_rates_; name=nothing, N=nothing, NN=nothing, config=nothing) local desc tf.with_op_name(name, "SendTPUEmbeddingGradients") do desc = tf.NodeDescription("SendTPUEmbeddingGradients") @@ -46656,7 +49605,10 @@ begin if config !== nothing desc["config"] = Base.String(config) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(send_tpu_embedding_gradients, [inputs_, learning_rates_], name=nothing, N=nothing, NN=nothing, config=nothing) + tf.add_node(res[1], node) + return res[1] end function send_tpu_embedding_gradients(inputs_, learning_rates_; name=nothing, N=nothing, NN=nothing, config=nothing) if tf.eager_mode @@ -46674,7 +49626,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function max_pool3d_graph(input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function max_pool3d_graph(input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) local desc tf.with_op_name(name, "MaxPool3D") do desc = tf.NodeDescription("MaxPool3D") @@ -46712,7 +49664,10 @@ begin desc["data_format"] = Base.String(data_format) end desc["T"] = tf.data_type(input_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(max_pool3d, [input_], name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + tf.add_node(res[1], node) + return res[1] end function max_pool3d(input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) if tf.eager_mode @@ -46730,7 +49685,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function prod_graph(input_, reduction_indices_; name=nothing, keep_dims=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function prod_graph(input_, reduction_indices_; name=nothing, keep_dims=nothing) local desc tf.with_op_name(name, "Prod") do desc = tf.NodeDescription("Prod") @@ -46756,7 +49711,10 @@ begin end desc["T"] = tf.data_type(input_) desc["Tidx"] = tf.data_type(reduction_indices_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(prod, [input_, reduction_indices_], name=nothing, keep_dims=nothing) + tf.add_node(res[1], node) + return res[1] end function prod(input_, reduction_indices_; name=nothing, keep_dims=nothing) if tf.eager_mode @@ -46774,7 +49732,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function experimental_identity_indexed_dataset_graph(size_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_identity_indexed_dataset_graph(size_; name=nothing) local desc tf.with_op_name(name, "ExperimentalIdentityIndexedDataset") do desc = tf.NodeDescription("ExperimentalIdentityIndexedDataset") @@ -46786,7 +49744,10 @@ begin function experimental_identity_indexed_dataset_eager(size_; name=nothing) desc = tf.EagerOp("ExperimentalIdentityIndexedDataset") tf.add_input(desc, size_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(experimental_identity_indexed_dataset, [size_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function experimental_identity_indexed_dataset(size_; name=nothing) if tf.eager_mode @@ -46804,7 +49765,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tensor_list_push_back_graph(input_handle_, tensor_; name=nothing, element_dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_list_push_back_graph(input_handle_, tensor_; name=nothing, element_dtype=nothing) local desc tf.with_op_name(name, "TensorListPushBack") do desc = tf.NodeDescription("TensorListPushBack") @@ -46827,7 +49788,10 @@ begin desc["element_dtype"] = Base.identity(element_dtype) end desc["element_dtype"] = tf.data_type(tensor_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(tensor_list_push_back, [input_handle_, tensor_], name=nothing, element_dtype=nothing) + tf.add_node(res[1], node) + return res[1] end function tensor_list_push_back(input_handle_, tensor_; name=nothing, element_dtype=nothing) if tf.eager_mode @@ -46845,7 +49809,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function batch_function_graph(in_tensors_, captured_tensors_; name=nothing, f=nothing, num_batch_threads=nothing, max_batch_size=nothing, batch_timeout_micros=nothing, max_enqueued_batches=nothing, allowed_batch_sizes=nothing, container=nothing, shared_name=nothing, batching_queue=nothing, Tin=nothing, Tcaptured=nothing, Tout=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_function_graph(in_tensors_, captured_tensors_; name=nothing, f=nothing, num_batch_threads=nothing, max_batch_size=nothing, batch_timeout_micros=nothing, max_enqueued_batches=nothing, allowed_batch_sizes=nothing, container=nothing, shared_name=nothing, batching_queue=nothing, Tin=nothing, Tcaptured=nothing, Tout=nothing) local desc tf.with_op_name(name, "BatchFunction") do desc = tf.NodeDescription("BatchFunction") @@ -46932,7 +49896,10 @@ begin if Tout !== nothing desc["Tout"] = map(Base.identity, Tout) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(batch_function, [in_tensors_, captured_tensors_], name=nothing, f=nothing, num_batch_threads=nothing, max_batch_size=nothing, batch_timeout_micros=nothing, max_enqueued_batches=nothing, allowed_batch_sizes=nothing, container=nothing, shared_name=nothing, batching_queue=nothing, Tin=nothing, Tcaptured=nothing, Tout=nothing) + tf.add_node(res[1], node) + return res[1] end function batch_function(in_tensors_, captured_tensors_; name=nothing, f=nothing, num_batch_threads=nothing, max_batch_size=nothing, batch_timeout_micros=nothing, max_enqueued_batches=nothing, allowed_batch_sizes=nothing, container=nothing, shared_name=nothing, batching_queue=nothing, Tin=nothing, Tcaptured=nothing, Tout=nothing) if tf.eager_mode @@ -46950,7 +49917,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function sparse_fill_empty_rows_graph(indices_, values_, dense_shape_, default_value_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_fill_empty_rows_graph(indices_, values_, dense_shape_, default_value_; name=nothing) local desc tf.with_op_name(name, "SparseFillEmptyRows") do desc = tf.NodeDescription("SparseFillEmptyRows") @@ -46979,7 +49946,10 @@ begin tf.add_input(desc, default_value_) desc["T"] = tf.data_type(values_) desc["T"] = tf.data_type(default_value_) - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(sparse_fill_empty_rows, [indices_, values_, dense_shape_, default_value_], name=nothing) + tf.add_node(res[1], node) + return res end function sparse_fill_empty_rows(indices_, values_, dense_shape_, default_value_; name=nothing) if tf.eager_mode @@ -46997,7 +49967,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function self_adjoint_eig_v2_graph(input_; name=nothing, compute_v=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function self_adjoint_eig_v2_graph(input_; name=nothing, compute_v=nothing) local desc tf.with_op_name(name, "SelfAdjointEigV2") do desc = tf.NodeDescription("SelfAdjointEigV2") @@ -47022,7 +49992,10 @@ begin desc["compute_v"] = Base.Bool(compute_v) end desc["T"] = tf.data_type(input_) - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(self_adjoint_eig_v2, [input_], name=nothing, compute_v=nothing) + tf.add_node(res[1], node) + return res end function self_adjoint_eig_v2(input_; name=nothing, compute_v=nothing) if tf.eager_mode @@ -47040,7 +50013,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function retrieve_tpu_embedding_ftrl_parameters_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function retrieve_tpu_embedding_ftrl_parameters_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "RetrieveTPUEmbeddingFTRLParameters") do desc = tf.NodeDescription("RetrieveTPUEmbeddingFTRLParameters") @@ -47078,7 +50051,10 @@ begin if shard_id !== nothing desc["shard_id"] = Base.Int(shard_id) end - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(retrieve_tpu_embedding_ftrl_parameters, [], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + tf.add_node(res[1], node) + return res end function retrieve_tpu_embedding_ftrl_parameters(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) if tf.eager_mode @@ -47096,7 +50072,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function resource_sparse_apply_adagrad_da_graph(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, indices_, lr_, l1_, l2_, global_step_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_sparse_apply_adagrad_da_graph(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, indices_, lr_, l1_, l2_, global_step_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ResourceSparseApplyAdagradDA") do desc = tf.NodeDescription("ResourceSparseApplyAdagradDA") @@ -47146,7 +50122,10 @@ begin desc["T"] = tf.data_type(lr_) desc["T"] = tf.data_type(l1_) desc["T"] = tf.data_type(l2_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(resource_sparse_apply_adagrad_da, [var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, indices_, lr_, l1_, l2_, global_step_], name=nothing, use_locking=nothing) + tf.add_node(res[1], node) + return res[1] end function resource_sparse_apply_adagrad_da(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, indices_, lr_, l1_, l2_, global_step_; name=nothing, use_locking=nothing) if tf.eager_mode @@ -47164,7 +50143,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function temporary_variable_graph(; name=nothing, shape=nothing, dtype=nothing, var_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function temporary_variable_graph(; name=nothing, shape=nothing, dtype=nothing, var_name=nothing) local desc tf.with_op_name(name, "TemporaryVariable") do desc = tf.NodeDescription("TemporaryVariable") @@ -47191,7 +50170,10 @@ begin if var_name !== nothing desc["var_name"] = Base.String(var_name) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(temporary_variable, [], name=nothing, shape=nothing, dtype=nothing, var_name=nothing) + tf.add_node(res[1], node) + return res[1] end function temporary_variable(; name=nothing, shape=nothing, dtype=nothing, var_name=nothing) if tf.eager_mode @@ -47209,7 +50191,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function resource_apply_add_sign_graph(var_, m_, lr_, alpha_, sign_decay_, beta_, grad_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_apply_add_sign_graph(var_, m_, lr_, alpha_, sign_decay_, beta_, grad_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ResourceApplyAddSign") do desc = tf.NodeDescription("ResourceApplyAddSign") @@ -47251,7 +50233,10 @@ begin desc["T"] = tf.data_type(sign_decay_) desc["T"] = tf.data_type(beta_) desc["T"] = tf.data_type(grad_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(resource_apply_add_sign, [var_, m_, lr_, alpha_, sign_decay_, beta_, grad_], name=nothing, use_locking=nothing) + tf.add_node(res[1], node) + return res[1] end function resource_apply_add_sign(var_, m_, lr_, alpha_, sign_decay_, beta_, grad_; name=nothing, use_locking=nothing) if tf.eager_mode @@ -47269,7 +50254,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function roll_graph(input_, shift_, axis_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function roll_graph(input_, shift_, axis_; name=nothing) local desc tf.with_op_name(name, "Roll") do desc = tf.NodeDescription("Roll") @@ -47293,7 +50278,10 @@ begin desc["T"] = tf.data_type(input_) desc["Tshift"] = tf.data_type(shift_) desc["Taxis"] = tf.data_type(axis_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(roll, [input_, shift_, axis_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function roll(input_, shift_, axis_; name=nothing) if tf.eager_mode @@ -47311,7 +50299,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function xdivy_graph(x_, y_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function xdivy_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "Xdivy") do desc = tf.NodeDescription("Xdivy") @@ -47329,7 +50317,10 @@ begin tf.add_input(desc, y_) desc["T"] = tf.data_type(x_) desc["T"] = tf.data_type(y_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(xdivy, [x_, y_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function xdivy(x_, y_; name=nothing) if tf.eager_mode @@ -47347,7 +50338,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function max_pool3d_grad_grad_graph(orig_input_, orig_output_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function max_pool3d_grad_grad_graph(orig_input_, orig_output_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) local desc tf.with_op_name(name, "MaxPool3DGradGrad") do desc = tf.NodeDescription("MaxPool3DGradGrad") @@ -47393,7 +50384,10 @@ begin desc["T"] = tf.data_type(orig_input_) desc["T"] = tf.data_type(orig_output_) desc["T"] = tf.data_type(grad_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(max_pool3d_grad_grad, [orig_input_, orig_output_, grad_], name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + tf.add_node(res[1], node) + return res[1] end function max_pool3d_grad_grad(orig_input_, orig_output_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) if tf.eager_mode @@ -47411,7 +50405,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function crop_and_resize_graph(image_, boxes_, box_ind_, crop_size_; name=nothing, method=nothing, extrapolation_value=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function crop_and_resize_graph(image_, boxes_, box_ind_, crop_size_; name=nothing, method=nothing, extrapolation_value=nothing) local desc tf.with_op_name(name, "CropAndResize") do desc = tf.NodeDescription("CropAndResize") @@ -47446,7 +50440,10 @@ begin desc["extrapolation_value"] = Base.identity(extrapolation_value) end desc["T"] = tf.data_type(image_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(crop_and_resize, [image_, boxes_, box_ind_, crop_size_], name=nothing, method=nothing, extrapolation_value=nothing) + tf.add_node(res[1], node) + return res[1] end function crop_and_resize(image_, boxes_, box_ind_, crop_size_; name=nothing, method=nothing, extrapolation_value=nothing) if tf.eager_mode @@ -47464,7 +50461,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function quantized_bias_add_graph(input_, bias_, min_input_, max_input_, min_bias_, max_bias_; name=nothing, out_type=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantized_bias_add_graph(input_, bias_, min_input_, max_input_, min_bias_, max_bias_; name=nothing, out_type=nothing) local desc tf.with_op_name(name, "QuantizedBiasAdd") do desc = tf.NodeDescription("QuantizedBiasAdd") @@ -47506,7 +50503,10 @@ begin end desc["T1"] = tf.data_type(input_) desc["T2"] = tf.data_type(bias_) - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(quantized_bias_add, [input_, bias_, min_input_, max_input_, min_bias_, max_bias_], name=nothing, out_type=nothing) + tf.add_node(res[1], node) + return res end function quantized_bias_add(input_, bias_, min_input_, max_input_, min_bias_, max_bias_; name=nothing, out_type=nothing) if tf.eager_mode @@ -47524,7 +50524,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function kmc2chain_initialization_graph(distances_, seed_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function kmc2chain_initialization_graph(distances_, seed_; name=nothing) local desc tf.with_op_name(name, "KMC2ChainInitialization") do desc = tf.NodeDescription("KMC2ChainInitialization") @@ -47539,7 +50539,10 @@ begin desc = tf.EagerOp("KMC2ChainInitialization") tf.add_input(desc, distances_) tf.add_input(desc, seed_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(kmc2chain_initialization, [distances_, seed_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function kmc2chain_initialization(distances_, seed_; name=nothing) if tf.eager_mode @@ -47557,7 +50560,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function map_unstage_no_key_graph(indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function map_unstage_no_key_graph(indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "MapUnstageNoKey") do desc = tf.NodeDescription("MapUnstageNoKey") @@ -47604,7 +50607,10 @@ begin if shared_name !== nothing desc["shared_name"] = Base.String(shared_name) end - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(map_unstage_no_key, [indices_], name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + tf.add_node(res[1], node) + return res end function map_unstage_no_key(indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) if tf.eager_mode @@ -47622,7 +50628,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function scatter_nd_sub_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function scatter_nd_sub_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ScatterNdSub") do desc = tf.NodeDescription("ScatterNdSub") @@ -47652,7 +50658,10 @@ begin desc["T"] = tf.data_type(ref_) desc["Tindices"] = tf.data_type(indices_) desc["T"] = tf.data_type(updates_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(scatter_nd_sub, [ref_, indices_, updates_], name=nothing, use_locking=nothing) + tf.add_node(res[1], node) + return res[1] end function scatter_nd_sub(ref_, indices_, updates_; name=nothing, use_locking=nothing) if tf.eager_mode @@ -47670,7 +50679,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function resize_bilinear_graph(images_, size_; name=nothing, align_corners=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resize_bilinear_graph(images_, size_; name=nothing, align_corners=nothing) local desc tf.with_op_name(name, "ResizeBilinear") do desc = tf.NodeDescription("ResizeBilinear") @@ -47693,7 +50702,10 @@ begin desc["align_corners"] = Base.Bool(align_corners) end desc["T"] = tf.data_type(images_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(resize_bilinear, [images_, size_], name=nothing, align_corners=nothing) + tf.add_node(res[1], node) + return res[1] end function resize_bilinear(images_, size_; name=nothing, align_corners=nothing) if tf.eager_mode @@ -47711,7 +50723,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function ordered_map_peek_graph(key_, indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ordered_map_peek_graph(key_, indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "OrderedMapPeek") do desc = tf.NodeDescription("OrderedMapPeek") @@ -47756,7 +50768,10 @@ begin if shared_name !== nothing desc["shared_name"] = Base.String(shared_name) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(ordered_map_peek, [key_, indices_], name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + tf.add_node(res[1], node) + return res[1] end function ordered_map_peek(key_, indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) if tf.eager_mode @@ -47774,7 +50789,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tensor_array_graph(size_; name=nothing, dtype=nothing, dynamic_size=nothing, clear_after_read=nothing, tensor_array_name=nothing, element_shape=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_graph(size_; name=nothing, dtype=nothing, dynamic_size=nothing, clear_after_read=nothing, tensor_array_name=nothing, element_shape=nothing) local desc tf.with_op_name(name, "TensorArray") do desc = tf.NodeDescription("TensorArray") @@ -47816,7 +50831,10 @@ begin if element_shape !== nothing desc["element_shape"] = Base.identity(element_shape) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(tensor_array, [size_], name=nothing, dtype=nothing, dynamic_size=nothing, clear_after_read=nothing, tensor_array_name=nothing, element_shape=nothing) + tf.add_node(res[1], node) + return res[1] end function tensor_array(size_; name=nothing, dtype=nothing, dynamic_size=nothing, clear_after_read=nothing, tensor_array_name=nothing, element_shape=nothing) if tf.eager_mode @@ -47834,7 +50852,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function inplace_sub_graph(x_, i_, v_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function inplace_sub_graph(x_, i_, v_; name=nothing) local desc tf.with_op_name(name, "InplaceSub") do desc = tf.NodeDescription("InplaceSub") @@ -47855,7 +50873,10 @@ begin tf.add_input(desc, v_) desc["T"] = tf.data_type(x_) desc["T"] = tf.data_type(v_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(inplace_sub, [x_, i_, v_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function inplace_sub(x_, i_, v_; name=nothing) if tf.eager_mode @@ -47873,7 +50894,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function pow_graph(x_, y_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function pow_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "Pow") do desc = tf.NodeDescription("Pow") @@ -47891,7 +50912,10 @@ begin tf.add_input(desc, y_) desc["T"] = tf.data_type(x_) desc["T"] = tf.data_type(y_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(pow, [x_, y_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function pow(x_, y_; name=nothing) if tf.eager_mode @@ -47909,7 +50933,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function stateful_standard_normal_graph(resource_, shape_; name=nothing, dtype=nothing, shape_dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function stateful_standard_normal_graph(resource_, shape_; name=nothing, dtype=nothing, shape_dtype=nothing) local desc tf.with_op_name(name, "StatefulStandardNormal") do desc = tf.NodeDescription("StatefulStandardNormal") @@ -47938,7 +50962,10 @@ begin desc["shape_dtype"] = Base.identity(shape_dtype) end desc["shape_dtype"] = tf.data_type(shape_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(stateful_standard_normal, [resource_, shape_], name=nothing, dtype=nothing, shape_dtype=nothing) + tf.add_node(res[1], node) + return res[1] end function stateful_standard_normal(resource_, shape_; name=nothing, dtype=nothing, shape_dtype=nothing) if tf.eager_mode @@ -47956,7 +50983,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function ref_next_iteration_graph(data_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ref_next_iteration_graph(data_; name=nothing) local desc tf.with_op_name(name, "RefNextIteration") do desc = tf.NodeDescription("RefNextIteration") @@ -47970,7 +50997,10 @@ begin desc = tf.EagerOp("RefNextIteration") tf.add_input(desc, data_) desc["T"] = tf.data_type(data_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(ref_next_iteration, [data_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function ref_next_iteration(data_; name=nothing) if tf.eager_mode @@ -47988,7 +51018,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function scalar_summary_graph(tags_, values_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function scalar_summary_graph(tags_, values_; name=nothing) local desc tf.with_op_name(name, "ScalarSummary") do desc = tf.NodeDescription("ScalarSummary") @@ -48005,7 +51035,10 @@ begin tf.add_input(desc, tags_) tf.add_input(desc, values_) desc["T"] = tf.data_type(values_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(scalar_summary, [tags_, values_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function scalar_summary(tags_, values_; name=nothing) if tf.eager_mode @@ -48023,7 +51056,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function string_split_v2_graph(input_, sep_; name=nothing, maxsplit=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function string_split_v2_graph(input_, sep_; name=nothing, maxsplit=nothing) local desc tf.with_op_name(name, "StringSplitV2") do desc = tf.NodeDescription("StringSplitV2") @@ -48049,7 +51082,10 @@ begin if maxsplit !== nothing desc["maxsplit"] = Base.Int(maxsplit) end - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(string_split_v2, [input_, sep_], name=nothing, maxsplit=nothing) + tf.add_node(res[1], node) + return res end function string_split_v2(input_, sep_; name=nothing, maxsplit=nothing) if tf.eager_mode @@ -48067,7 +51103,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function bessel_i0e_graph(x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function bessel_i0e_graph(x_; name=nothing) local desc tf.with_op_name(name, "BesselI0e") do desc = tf.NodeDescription("BesselI0e") @@ -48081,7 +51117,10 @@ begin desc = tf.EagerOp("BesselI0e") tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(bessel_i0e, [x_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function bessel_i0e(x_; name=nothing) if tf.eager_mode @@ -48099,7 +51138,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function unique_graph(x_; name=nothing, out_idx=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function unique_graph(x_; name=nothing, out_idx=nothing) local desc tf.with_op_name(name, "Unique") do desc = tf.NodeDescription("Unique") @@ -48124,7 +51163,10 @@ begin desc["out_idx"] = Base.identity(out_idx) end desc["T"] = tf.data_type(x_) - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(unique, [x_], name=nothing, out_idx=nothing) + tf.add_node(res[1], node) + return res end function unique(x_; name=nothing, out_idx=nothing) if tf.eager_mode @@ -48142,7 +51184,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function load_tpu_embedding_rms_prop_parameters_graph(parameters_, ms_, mom_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function load_tpu_embedding_rms_prop_parameters_graph(parameters_, ms_, mom_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "LoadTPUEmbeddingRMSPropParameters") do desc = tf.NodeDescription("LoadTPUEmbeddingRMSPropParameters") @@ -48184,7 +51226,10 @@ begin if shard_id !== nothing desc["shard_id"] = Base.Int(shard_id) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(load_tpu_embedding_rms_prop_parameters, [parameters_, ms_, mom_], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + tf.add_node(res[1], node) + return res[1] end function load_tpu_embedding_rms_prop_parameters(parameters_, ms_, mom_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) if tf.eager_mode @@ -48202,7 +51247,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function whole_file_reader_v2_graph(; name=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function whole_file_reader_v2_graph(; name=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "WholeFileReaderV2") do desc = tf.NodeDescription("WholeFileReaderV2") @@ -48223,7 +51268,10 @@ begin if shared_name !== nothing desc["shared_name"] = Base.String(shared_name) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(whole_file_reader_v2, [], name=nothing, container=nothing, shared_name=nothing) + tf.add_node(res[1], node) + return res[1] end function whole_file_reader_v2(; name=nothing, container=nothing, shared_name=nothing) if tf.eager_mode @@ -48241,7 +51289,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function eager_py_func_graph(input_; name=nothing, token=nothing, Tin=nothing, Tout=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function eager_py_func_graph(input_; name=nothing, token=nothing, Tin=nothing, Tout=nothing) local desc tf.with_op_name(name, "EagerPyFunc") do desc = tf.NodeDescription("EagerPyFunc") @@ -48271,7 +51319,10 @@ begin if Tout !== nothing desc["Tout"] = map(Base.identity, Tout) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(eager_py_func, [input_], name=nothing, token=nothing, Tin=nothing, Tout=nothing) + tf.add_node(res[1], node) + return res[1] end function eager_py_func(input_; name=nothing, token=nothing, Tin=nothing, Tout=nothing) if tf.eager_mode @@ -48289,7 +51340,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function next_iteration_graph(data_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function next_iteration_graph(data_; name=nothing) local desc tf.with_op_name(name, "NextIteration") do desc = tf.NodeDescription("NextIteration") @@ -48303,7 +51354,10 @@ begin desc = tf.EagerOp("NextIteration") tf.add_input(desc, data_) desc["T"] = tf.data_type(data_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(next_iteration, [data_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function next_iteration(data_; name=nothing) if tf.eager_mode @@ -48321,7 +51375,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function case_graph(branch_index_, input_; name=nothing, Tin=nothing, Tout=nothing, branches=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function case_graph(branch_index_, input_; name=nothing, Tin=nothing, Tout=nothing, branches=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "Case") do desc = tf.NodeDescription("Case") @@ -48360,7 +51414,10 @@ begin if output_shapes !== nothing desc["output_shapes"] = map(Base.identity, output_shapes) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(case, [branch_index_, input_], name=nothing, Tin=nothing, Tout=nothing, branches=nothing, output_shapes=nothing) + tf.add_node(res[1], node) + return res[1] end function case(branch_index_, input_; name=nothing, Tin=nothing, Tout=nothing, branches=nothing, output_shapes=nothing) if tf.eager_mode @@ -48378,7 +51435,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tensor_scatter_sub_graph(tensor_, indices_, updates_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_scatter_sub_graph(tensor_, indices_, updates_; name=nothing) local desc tf.with_op_name(name, "TensorScatterSub") do desc = tf.NodeDescription("TensorScatterSub") @@ -48402,7 +51459,10 @@ begin desc["T"] = tf.data_type(tensor_) desc["Tindices"] = tf.data_type(indices_) desc["T"] = tf.data_type(updates_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(tensor_scatter_sub, [tensor_, indices_, updates_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function tensor_scatter_sub(tensor_, indices_, updates_; name=nothing) if tf.eager_mode @@ -48420,7 +51480,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function scatter_max_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function scatter_max_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ScatterMax") do desc = tf.NodeDescription("ScatterMax") @@ -48450,7 +51510,10 @@ begin desc["T"] = tf.data_type(ref_) desc["Tindices"] = tf.data_type(indices_) desc["T"] = tf.data_type(updates_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(scatter_max, [ref_, indices_, updates_], name=nothing, use_locking=nothing) + tf.add_node(res[1], node) + return res[1] end function scatter_max(ref_, indices_, updates_; name=nothing, use_locking=nothing) if tf.eager_mode @@ -48468,7 +51531,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function sqrt_graph(x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sqrt_graph(x_; name=nothing) local desc tf.with_op_name(name, "Sqrt") do desc = tf.NodeDescription("Sqrt") @@ -48482,7 +51545,10 @@ begin desc = tf.EagerOp("Sqrt") tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(sqrt, [x_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function sqrt(x_; name=nothing) if tf.eager_mode @@ -48500,7 +51566,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function accumulator_take_gradient_graph(handle_, num_required_; name=nothing, dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function accumulator_take_gradient_graph(handle_, num_required_; name=nothing, dtype=nothing) local desc tf.with_op_name(name, "AccumulatorTakeGradient") do desc = tf.NodeDescription("AccumulatorTakeGradient") @@ -48521,7 +51587,10 @@ begin if dtype !== nothing desc["dtype"] = Base.identity(dtype) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(accumulator_take_gradient, [handle_, num_required_], name=nothing, dtype=nothing) + tf.add_node(res[1], node) + return res[1] end function accumulator_take_gradient(handle_, num_required_; name=nothing, dtype=nothing) if tf.eager_mode @@ -48539,7 +51608,7 @@ end Returns x + y element-wise. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function _mkl_add_graph(x_, y_, mkl_x_, mkl_y_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _mkl_add_graph(x_, y_, mkl_x_, mkl_y_; name=nothing) local desc tf.with_op_name(name, "_MklAdd") do desc = tf.NodeDescription("_MklAdd") @@ -48568,7 +51637,10 @@ begin tf.add_input(desc, mkl_y_) desc["T"] = tf.data_type(x_) desc["T"] = tf.data_type(y_) - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(_mkl_add, [x_, y_, mkl_x_, mkl_y_], name=nothing) + tf.add_node(res[1], node) + return res end function _mkl_add(x_, y_, mkl_x_, mkl_y_; name=nothing) if tf.eager_mode @@ -48586,7 +51658,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function reciprocal_graph(x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function reciprocal_graph(x_; name=nothing) local desc tf.with_op_name(name, "Reciprocal") do desc = tf.NodeDescription("Reciprocal") @@ -48600,7 +51672,10 @@ begin desc = tf.EagerOp("Reciprocal") tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(reciprocal, [x_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function reciprocal(x_; name=nothing) if tf.eager_mode @@ -48618,7 +51693,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function outfeed_enqueue_tuple_graph(inputs_; name=nothing, dtypes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function outfeed_enqueue_tuple_graph(inputs_; name=nothing, dtypes=nothing) local desc tf.with_op_name(name, "OutfeedEnqueueTuple") do desc = tf.NodeDescription("OutfeedEnqueueTuple") @@ -48636,7 +51711,10 @@ begin if dtypes !== nothing desc["dtypes"] = map(Base.identity, dtypes) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(outfeed_enqueue_tuple, [inputs_], name=nothing, dtypes=nothing) + tf.add_node(res[1], node) + return res[1] end function outfeed_enqueue_tuple(inputs_; name=nothing, dtypes=nothing) if tf.eager_mode @@ -48654,7 +51732,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function string_strip_graph(input_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function string_strip_graph(input_; name=nothing) local desc tf.with_op_name(name, "StringStrip") do desc = tf.NodeDescription("StringStrip") @@ -48666,7 +51744,10 @@ begin function string_strip_eager(input_; name=nothing) desc = tf.EagerOp("StringStrip") tf.add_input(desc, input_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(string_strip, [input_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function string_strip(input_; name=nothing) if tf.eager_mode @@ -48684,7 +51765,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function fake_quant_with_min_max_vars_per_channel_graph(inputs_, min_, max_; name=nothing, num_bits=nothing, narrow_range=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fake_quant_with_min_max_vars_per_channel_graph(inputs_, min_, max_; name=nothing, num_bits=nothing, narrow_range=nothing) local desc tf.with_op_name(name, "FakeQuantWithMinMaxVarsPerChannel") do desc = tf.NodeDescription("FakeQuantWithMinMaxVarsPerChannel") @@ -48714,7 +51795,10 @@ begin if narrow_range !== nothing desc["narrow_range"] = Base.Bool(narrow_range) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(fake_quant_with_min_max_vars_per_channel, [inputs_, min_, max_], name=nothing, num_bits=nothing, narrow_range=nothing) + tf.add_node(res[1], node) + return res[1] end function fake_quant_with_min_max_vars_per_channel(inputs_, min_, max_; name=nothing, num_bits=nothing, narrow_range=nothing) if tf.eager_mode @@ -48732,7 +51816,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function barrier_ready_size_graph(handle_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function barrier_ready_size_graph(handle_; name=nothing) local desc tf.with_op_name(name, "BarrierReadySize") do desc = tf.NodeDescription("BarrierReadySize") @@ -48744,7 +51828,10 @@ begin function barrier_ready_size_eager(handle_; name=nothing) desc = tf.EagerOp("BarrierReadySize") tf.add_input(desc, handle_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(barrier_ready_size, [handle_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function barrier_ready_size(handle_; name=nothing) if tf.eager_mode @@ -48762,7 +51849,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function string_to_hash_bucket_graph(string_tensor_; name=nothing, num_buckets=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function string_to_hash_bucket_graph(string_tensor_; name=nothing, num_buckets=nothing) local desc tf.with_op_name(name, "StringToHashBucket") do desc = tf.NodeDescription("StringToHashBucket") @@ -48780,7 +51867,10 @@ begin if num_buckets !== nothing desc["num_buckets"] = Base.Int(num_buckets) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(string_to_hash_bucket, [string_tensor_], name=nothing, num_buckets=nothing) + tf.add_node(res[1], node) + return res[1] end function string_to_hash_bucket(string_tensor_; name=nothing, num_buckets=nothing) if tf.eager_mode @@ -48798,7 +51888,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tensor_array_concat_graph(handle_, flow_in_; name=nothing, dtype=nothing, element_shape_except0=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_concat_graph(handle_, flow_in_; name=nothing, dtype=nothing, element_shape_except0=nothing) local desc tf.with_op_name(name, "TensorArrayConcat") do desc = tf.NodeDescription("TensorArrayConcat") @@ -48830,7 +51920,10 @@ begin if element_shape_except0 !== nothing desc["element_shape_except0"] = Base.identity(element_shape_except0) end - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(tensor_array_concat, [handle_, flow_in_], name=nothing, dtype=nothing, element_shape_except0=nothing) + tf.add_node(res[1], node) + return res end function tensor_array_concat(handle_, flow_in_; name=nothing, dtype=nothing, element_shape_except0=nothing) if tf.eager_mode @@ -48848,7 +51941,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function sharded_filename_graph(basename_, shard_, num_shards_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sharded_filename_graph(basename_, shard_, num_shards_; name=nothing) local desc tf.with_op_name(name, "ShardedFilename") do desc = tf.NodeDescription("ShardedFilename") @@ -48866,7 +51959,10 @@ begin tf.add_input(desc, basename_) tf.add_input(desc, shard_) tf.add_input(desc, num_shards_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(sharded_filename, [basename_, shard_, num_shards_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function sharded_filename(basename_, shard_, num_shards_; name=nothing) if tf.eager_mode @@ -48884,7 +51980,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function py_func_graph(input_; name=nothing, token=nothing, Tin=nothing, Tout=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function py_func_graph(input_; name=nothing, token=nothing, Tin=nothing, Tout=nothing) local desc tf.with_op_name(name, "PyFunc") do desc = tf.NodeDescription("PyFunc") @@ -48914,7 +52010,10 @@ begin if Tout !== nothing desc["Tout"] = map(Base.identity, Tout) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(py_func, [input_], name=nothing, token=nothing, Tin=nothing, Tout=nothing) + tf.add_node(res[1], node) + return res[1] end function py_func(input_; name=nothing, token=nothing, Tin=nothing, Tout=nothing) if tf.eager_mode @@ -48932,7 +52031,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function unsorted_segment_prod_graph(data_, segment_ids_, num_segments_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function unsorted_segment_prod_graph(data_, segment_ids_, num_segments_; name=nothing) local desc tf.with_op_name(name, "UnsortedSegmentProd") do desc = tf.NodeDescription("UnsortedSegmentProd") @@ -48957,7 +52056,10 @@ begin desc["T"] = tf.data_type(data_) desc["Tindices"] = tf.data_type(segment_ids_) desc["Tnumsegments"] = tf.data_type(num_segments_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(unsorted_segment_prod, [data_, segment_ids_, num_segments_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function unsorted_segment_prod(data_, segment_ids_, num_segments_; name=nothing) if tf.eager_mode @@ -48975,7 +52077,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function count_up_to_graph(ref_; name=nothing, limit=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function count_up_to_graph(ref_; name=nothing, limit=nothing) local desc tf.with_op_name(name, "CountUpTo") do desc = tf.NodeDescription("CountUpTo") @@ -48995,7 +52097,10 @@ begin desc["limit"] = Base.Int(limit) end desc["T"] = tf.data_type(ref_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(count_up_to, [ref_], name=nothing, limit=nothing) + tf.add_node(res[1], node) + return res[1] end function count_up_to(ref_; name=nothing, limit=nothing) if tf.eager_mode @@ -49013,7 +52118,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function random_gamma_graph(shape_, alpha_; name=nothing, seed=nothing, seed2=nothing, S=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function random_gamma_graph(shape_, alpha_; name=nothing, seed=nothing, seed2=nothing, S=nothing) local desc tf.with_op_name(name, "RandomGamma") do desc = tf.NodeDescription("RandomGamma") @@ -49050,7 +52155,10 @@ begin end desc["S"] = tf.data_type(shape_) desc["T"] = tf.data_type(alpha_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(random_gamma, [shape_, alpha_], name=nothing, seed=nothing, seed2=nothing, S=nothing) + tf.add_node(res[1], node) + return res[1] end function random_gamma(shape_, alpha_; name=nothing, seed=nothing, seed2=nothing, S=nothing) if tf.eager_mode @@ -49068,7 +52176,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tensor_array_grad_graph(handle_, flow_in_; name=nothing, source=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_grad_graph(handle_, flow_in_; name=nothing, source=nothing) local desc tf.with_op_name(name, "TensorArrayGrad") do desc = tf.NodeDescription("TensorArrayGrad") @@ -49089,7 +52197,10 @@ begin if source !== nothing desc["source"] = Base.String(source) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(tensor_array_grad, [handle_, flow_in_], name=nothing, source=nothing) + tf.add_node(res[1], node) + return res[1] end function tensor_array_grad(handle_, flow_in_; name=nothing, source=nothing) if tf.eager_mode @@ -49107,7 +52218,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function dilation2d_graph(input_, filter_; name=nothing, strides=nothing, rates=nothing, padding=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function dilation2d_graph(input_, filter_; name=nothing, strides=nothing, rates=nothing, padding=nothing) local desc tf.with_op_name(name, "Dilation2D") do desc = tf.NodeDescription("Dilation2D") @@ -49143,7 +52254,10 @@ begin end desc["T"] = tf.data_type(input_) desc["T"] = tf.data_type(filter_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(dilation2d, [input_, filter_], name=nothing, strides=nothing, rates=nothing, padding=nothing) + tf.add_node(res[1], node) + return res[1] end function dilation2d(input_, filter_; name=nothing, strides=nothing, rates=nothing, padding=nothing) if tf.eager_mode @@ -49161,7 +52275,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function unbatch_graph(batched_tensor_, batch_index_, id_; name=nothing, timeout_micros=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function unbatch_graph(batched_tensor_, batch_index_, id_; name=nothing, timeout_micros=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "Unbatch") do desc = tf.NodeDescription("Unbatch") @@ -49199,7 +52313,10 @@ begin desc["shared_name"] = Base.String(shared_name) end desc["T"] = tf.data_type(batched_tensor_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(unbatch, [batched_tensor_, batch_index_, id_], name=nothing, timeout_micros=nothing, container=nothing, shared_name=nothing) + tf.add_node(res[1], node) + return res[1] end function unbatch(batched_tensor_, batch_index_, id_; name=nothing, timeout_micros=nothing, container=nothing, shared_name=nothing) if tf.eager_mode @@ -49217,7 +52334,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function get_session_handle_graph(value_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function get_session_handle_graph(value_; name=nothing) local desc tf.with_op_name(name, "GetSessionHandle") do desc = tf.NodeDescription("GetSessionHandle") @@ -49231,7 +52348,10 @@ begin desc = tf.EagerOp("GetSessionHandle") tf.add_input(desc, value_) desc["T"] = tf.data_type(value_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(get_session_handle, [value_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function get_session_handle(value_; name=nothing) if tf.eager_mode @@ -49249,7 +52369,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function retrieve_tpu_embedding_adam_parameters_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function retrieve_tpu_embedding_adam_parameters_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "RetrieveTPUEmbeddingADAMParameters") do desc = tf.NodeDescription("RetrieveTPUEmbeddingADAMParameters") @@ -49287,7 +52407,10 @@ begin if shard_id !== nothing desc["shard_id"] = Base.Int(shard_id) end - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(retrieve_tpu_embedding_adam_parameters, [], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + tf.add_node(res[1], node) + return res end function retrieve_tpu_embedding_adam_parameters(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) if tf.eager_mode @@ -49305,7 +52428,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function mutable_hash_table_of_tensors_v2_graph(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function mutable_hash_table_of_tensors_v2_graph(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing) local desc tf.with_op_name(name, "MutableHashTableOfTensorsV2") do desc = tf.NodeDescription("MutableHashTableOfTensorsV2") @@ -49350,7 +52473,10 @@ begin if value_shape !== nothing desc["value_shape"] = Base.identity(value_shape) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(mutable_hash_table_of_tensors_v2, [], name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing) + tf.add_node(res[1], node) + return res[1] end function mutable_hash_table_of_tensors_v2(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing) if tf.eager_mode @@ -49368,7 +52494,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function sparse_apply_ftrl_graph(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, lr_power_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_apply_ftrl_graph(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, lr_power_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "SparseApplyFtrl") do desc = tf.NodeDescription("SparseApplyFtrl") @@ -49422,7 +52548,10 @@ begin desc["T"] = tf.data_type(l1_) desc["T"] = tf.data_type(l2_) desc["T"] = tf.data_type(lr_power_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(sparse_apply_ftrl, [var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, lr_power_], name=nothing, use_locking=nothing) + tf.add_node(res[1], node) + return res[1] end function sparse_apply_ftrl(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, lr_power_; name=nothing, use_locking=nothing) if tf.eager_mode @@ -49440,7 +52569,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function batch_dataset_v2_graph(input_dataset_, batch_size_, drop_remainder_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_dataset_v2_graph(input_dataset_, batch_size_, drop_remainder_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "BatchDatasetV2") do desc = tf.NodeDescription("BatchDatasetV2") @@ -49470,7 +52599,10 @@ begin if output_shapes !== nothing desc["output_shapes"] = map(Base.identity, output_shapes) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(batch_dataset_v2, [input_dataset_, batch_size_, drop_remainder_], name=nothing, output_types=nothing, output_shapes=nothing) + tf.add_node(res[1], node) + return res[1] end function batch_dataset_v2(input_dataset_, batch_size_, drop_remainder_; name=nothing, output_types=nothing, output_shapes=nothing) if tf.eager_mode @@ -49488,7 +52620,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function sparse_sparse_minimum_graph(a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_sparse_minimum_graph(a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_; name=nothing) local desc tf.with_op_name(name, "SparseSparseMinimum") do desc = tf.NodeDescription("SparseSparseMinimum") @@ -49523,7 +52655,10 @@ begin tf.add_input(desc, b_shape_) desc["T"] = tf.data_type(a_values_) desc["T"] = tf.data_type(b_values_) - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(sparse_sparse_minimum, [a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_], name=nothing) + tf.add_node(res[1], node) + return res end function sparse_sparse_minimum(a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_; name=nothing) if tf.eager_mode @@ -49541,7 +52676,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function reverse_v2_graph(tensor_, axis_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function reverse_v2_graph(tensor_, axis_; name=nothing) local desc tf.with_op_name(name, "ReverseV2") do desc = tf.NodeDescription("ReverseV2") @@ -49561,7 +52696,10 @@ begin tf.add_input(desc, axis_) desc["T"] = tf.data_type(tensor_) desc["Tidx"] = tf.data_type(axis_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(reverse_v2, [tensor_, axis_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function reverse_v2(tensor_, axis_; name=nothing) if tf.eager_mode @@ -49579,7 +52717,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function strided_slice_graph(input_, begin_, end_, strides_; name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function strided_slice_graph(input_, begin_, end_, strides_; name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing) local desc tf.with_op_name(name, "StridedSlice") do desc = tf.NodeDescription("StridedSlice") @@ -49675,7 +52813,10 @@ begin desc["Index"] = tf.data_type(begin_) desc["Index"] = tf.data_type(end_) desc["Index"] = tf.data_type(strides_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(strided_slice, [input_, begin_, end_, strides_], name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing) + tf.add_node(res[1], node) + return res[1] end function strided_slice(input_, begin_, end_, strides_; name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing) if tf.eager_mode @@ -49693,7 +52834,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function matching_files_graph(pattern_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function matching_files_graph(pattern_; name=nothing) local desc tf.with_op_name(name, "MatchingFiles") do desc = tf.NodeDescription("MatchingFiles") @@ -49705,7 +52846,10 @@ begin function matching_files_eager(pattern_; name=nothing) desc = tf.EagerOp("MatchingFiles") tf.add_input(desc, pattern_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(matching_files, [pattern_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function matching_files(pattern_; name=nothing) if tf.eager_mode @@ -49723,7 +52867,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function encode_base64_graph(input_; name=nothing, pad=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function encode_base64_graph(input_; name=nothing, pad=nothing) local desc tf.with_op_name(name, "EncodeBase64") do desc = tf.NodeDescription("EncodeBase64") @@ -49741,7 +52885,10 @@ begin if pad !== nothing desc["pad"] = Base.Bool(pad) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(encode_base64, [input_], name=nothing, pad=nothing) + tf.add_node(res[1], node) + return res[1] end function encode_base64(input_; name=nothing, pad=nothing) if tf.eager_mode @@ -49759,7 +52906,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function iterator_get_next_as_optional_graph(iterator_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function iterator_get_next_as_optional_graph(iterator_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "IteratorGetNextAsOptional") do desc = tf.NodeDescription("IteratorGetNextAsOptional") @@ -49783,7 +52930,10 @@ begin if output_shapes !== nothing desc["output_shapes"] = map(Base.identity, output_shapes) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(iterator_get_next_as_optional, [iterator_], name=nothing, output_types=nothing, output_shapes=nothing) + tf.add_node(res[1], node) + return res[1] end function iterator_get_next_as_optional(iterator_; name=nothing, output_types=nothing, output_shapes=nothing) if tf.eager_mode @@ -49801,7 +52951,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function padding_fifo_queue_graph(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function padding_fifo_queue_graph(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "PaddingFIFOQueue") do desc = tf.NodeDescription("PaddingFIFOQueue") @@ -49840,7 +52990,10 @@ begin if shared_name !== nothing desc["shared_name"] = Base.String(shared_name) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(padding_fifo_queue, [], name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) + tf.add_node(res[1], node) + return res[1] end function padding_fifo_queue(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) if tf.eager_mode @@ -49858,7 +53011,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function iterator_to_string_handle_graph(resource_handle_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function iterator_to_string_handle_graph(resource_handle_; name=nothing) local desc tf.with_op_name(name, "IteratorToStringHandle") do desc = tf.NodeDescription("IteratorToStringHandle") @@ -49870,7 +53023,10 @@ begin function iterator_to_string_handle_eager(resource_handle_; name=nothing) desc = tf.EagerOp("IteratorToStringHandle") tf.add_input(desc, resource_handle_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(iterator_to_string_handle, [resource_handle_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function iterator_to_string_handle(resource_handle_; name=nothing) if tf.eager_mode @@ -49888,7 +53044,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function max_pool_grad_grad_with_argmax_graph(input_, grad_, argmax_; name=nothing, ksize=nothing, strides=nothing, padding=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function max_pool_grad_grad_with_argmax_graph(input_, grad_, argmax_; name=nothing, ksize=nothing, strides=nothing, padding=nothing) local desc tf.with_op_name(name, "MaxPoolGradGradWithArgmax") do desc = tf.NodeDescription("MaxPoolGradGradWithArgmax") @@ -49929,7 +53085,10 @@ begin desc["T"] = tf.data_type(input_) desc["T"] = tf.data_type(grad_) desc["Targmax"] = tf.data_type(argmax_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(max_pool_grad_grad_with_argmax, [input_, grad_, argmax_], name=nothing, ksize=nothing, strides=nothing, padding=nothing) + tf.add_node(res[1], node) + return res[1] end function max_pool_grad_grad_with_argmax(input_, grad_, argmax_; name=nothing, ksize=nothing, strides=nothing, padding=nothing) if tf.eager_mode @@ -49947,7 +53106,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tensor_list_gather_graph(input_handle_, indices_, element_shape_; name=nothing, element_dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_list_gather_graph(input_handle_, indices_, element_shape_; name=nothing, element_dtype=nothing) local desc tf.with_op_name(name, "TensorListGather") do desc = tf.NodeDescription("TensorListGather") @@ -49971,7 +53130,10 @@ begin if element_dtype !== nothing desc["element_dtype"] = Base.identity(element_dtype) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(tensor_list_gather, [input_handle_, indices_, element_shape_], name=nothing, element_dtype=nothing) + tf.add_node(res[1], node) + return res[1] end function tensor_list_gather(input_handle_, indices_, element_shape_; name=nothing, element_dtype=nothing) if tf.eager_mode @@ -49989,7 +53151,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function multinomial_graph(logits_, num_samples_; name=nothing, seed=nothing, seed2=nothing, output_dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function multinomial_graph(logits_, num_samples_; name=nothing, seed=nothing, seed2=nothing, output_dtype=nothing) local desc tf.with_op_name(name, "Multinomial") do desc = tf.NodeDescription("Multinomial") @@ -50024,7 +53186,10 @@ begin desc["output_dtype"] = Base.identity(output_dtype) end desc["T"] = tf.data_type(logits_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(multinomial, [logits_, num_samples_], name=nothing, seed=nothing, seed2=nothing, output_dtype=nothing) + tf.add_node(res[1], node) + return res[1] end function multinomial(logits_, num_samples_; name=nothing, seed=nothing, seed2=nothing, output_dtype=nothing) if tf.eager_mode @@ -50042,7 +53207,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tensor_array_read_graph(handle_, index_, flow_in_; name=nothing, dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_read_graph(handle_, index_, flow_in_; name=nothing, dtype=nothing) local desc tf.with_op_name(name, "TensorArrayRead") do desc = tf.NodeDescription("TensorArrayRead") @@ -50066,7 +53231,10 @@ begin if dtype !== nothing desc["dtype"] = Base.identity(dtype) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(tensor_array_read, [handle_, index_, flow_in_], name=nothing, dtype=nothing) + tf.add_node(res[1], node) + return res[1] end function tensor_array_read(handle_, index_, flow_in_; name=nothing, dtype=nothing) if tf.eager_mode @@ -50084,7 +53252,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function experimental_indexed_dataset_get_graph(materialized_, index_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_indexed_dataset_get_graph(materialized_, index_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "ExperimentalIndexedDatasetGet") do desc = tf.NodeDescription("ExperimentalIndexedDatasetGet") @@ -50111,7 +53279,10 @@ begin if output_shapes !== nothing desc["output_shapes"] = map(Base.identity, output_shapes) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(experimental_indexed_dataset_get, [materialized_, index_], name=nothing, output_types=nothing, output_shapes=nothing) + tf.add_node(res[1], node) + return res[1] end function experimental_indexed_dataset_get(materialized_, index_; name=nothing, output_types=nothing, output_shapes=nothing) if tf.eager_mode @@ -50129,7 +53300,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tpu_partitioned_call_graph(args_, device_ordinal_; name=nothing, Tin=nothing, Tout=nothing, f=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tpu_partitioned_call_graph(args_, device_ordinal_; name=nothing, Tin=nothing, Tout=nothing, f=nothing) local desc tf.with_op_name(name, "TPUPartitionedCall") do desc = tf.NodeDescription("TPUPartitionedCall") @@ -50162,7 +53333,10 @@ begin if f !== nothing desc["f"] = Base.identity(f) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(tpu_partitioned_call, [args_, device_ordinal_], name=nothing, Tin=nothing, Tout=nothing, f=nothing) + tf.add_node(res[1], node) + return res[1] end function tpu_partitioned_call(args_, device_ordinal_; name=nothing, Tin=nothing, Tout=nothing, f=nothing) if tf.eager_mode @@ -50180,7 +53354,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function quantized_conv2d_and_relu_and_requantize_graph(input_, filter_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantized_conv2d_and_relu_and_requantize_graph(input_, filter_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) local desc tf.with_op_name(name, "QuantizedConv2DAndReluAndRequantize") do desc = tf.NodeDescription("QuantizedConv2DAndReluAndRequantize") @@ -50246,7 +53420,10 @@ begin end desc["Tinput"] = tf.data_type(input_) desc["Tfilter"] = tf.data_type(filter_) - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(quantized_conv2d_and_relu_and_requantize, [input_, filter_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_], name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) + tf.add_node(res[1], node) + return res end function quantized_conv2d_and_relu_and_requantize(input_, filter_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) if tf.eager_mode @@ -50264,7 +53441,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function iterator_from_string_handle_v2_graph(string_handle_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function iterator_from_string_handle_v2_graph(string_handle_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "IteratorFromStringHandleV2") do desc = tf.NodeDescription("IteratorFromStringHandleV2") @@ -50288,7 +53465,10 @@ begin if output_shapes !== nothing desc["output_shapes"] = map(Base.identity, output_shapes) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(iterator_from_string_handle_v2, [string_handle_], name=nothing, output_types=nothing, output_shapes=nothing) + tf.add_node(res[1], node) + return res[1] end function iterator_from_string_handle_v2(string_handle_; name=nothing, output_types=nothing, output_shapes=nothing) if tf.eager_mode @@ -50306,7 +53486,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function bitwise_or_graph(x_, y_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function bitwise_or_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "BitwiseOr") do desc = tf.NodeDescription("BitwiseOr") @@ -50324,7 +53504,10 @@ begin tf.add_input(desc, y_) desc["T"] = tf.data_type(x_) desc["T"] = tf.data_type(y_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(bitwise_or, [x_, y_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function bitwise_or(x_, y_; name=nothing) if tf.eager_mode @@ -50342,7 +53525,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function unsorted_segment_max_graph(data_, segment_ids_, num_segments_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function unsorted_segment_max_graph(data_, segment_ids_, num_segments_; name=nothing) local desc tf.with_op_name(name, "UnsortedSegmentMax") do desc = tf.NodeDescription("UnsortedSegmentMax") @@ -50367,7 +53550,10 @@ begin desc["T"] = tf.data_type(data_) desc["Tindices"] = tf.data_type(segment_ids_) desc["Tnumsegments"] = tf.data_type(num_segments_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(unsorted_segment_max, [data_, segment_ids_, num_segments_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function unsorted_segment_max(data_, segment_ids_, num_segments_; name=nothing) if tf.eager_mode @@ -50385,7 +53571,7 @@ end Returns (x - y)(x - y) element-wise. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function _mkl_squared_difference_graph(x_, y_, mkl_x_, mkl_y_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _mkl_squared_difference_graph(x_, y_, mkl_x_, mkl_y_; name=nothing) local desc tf.with_op_name(name, "_MklSquaredDifference") do desc = tf.NodeDescription("_MklSquaredDifference") @@ -50414,7 +53600,10 @@ begin tf.add_input(desc, mkl_y_) desc["T"] = tf.data_type(x_) desc["T"] = tf.data_type(y_) - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(_mkl_squared_difference, [x_, y_, mkl_x_, mkl_y_], name=nothing) + tf.add_node(res[1], node) + return res end function _mkl_squared_difference(x_, y_, mkl_x_, mkl_y_; name=nothing) if tf.eager_mode @@ -50432,7 +53621,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function conv3d_backprop_filter_graph(input_, filter_, out_backprop_; name=nothing, strides=nothing, padding=nothing, dilations=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function conv3d_backprop_filter_graph(input_, filter_, out_backprop_; name=nothing, strides=nothing, padding=nothing, dilations=nothing) local desc tf.with_op_name(name, "Conv3DBackpropFilter") do desc = tf.NodeDescription("Conv3DBackpropFilter") @@ -50472,7 +53661,10 @@ begin desc["T"] = tf.data_type(input_) desc["T"] = tf.data_type(filter_) desc["T"] = tf.data_type(out_backprop_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(conv3d_backprop_filter, [input_, filter_, out_backprop_], name=nothing, strides=nothing, padding=nothing, dilations=nothing) + tf.add_node(res[1], node) + return res[1] end function conv3d_backprop_filter(input_, filter_, out_backprop_; name=nothing, strides=nothing, padding=nothing, dilations=nothing) if tf.eager_mode @@ -50490,7 +53682,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function if__graph(cond_, input_; name=nothing, Tin=nothing, Tout=nothing, then_branch=nothing, else_branch=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function if__graph(cond_, input_; name=nothing, Tin=nothing, Tout=nothing, then_branch=nothing, else_branch=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "If") do desc = tf.NodeDescription("If") @@ -50537,7 +53729,10 @@ begin desc["output_shapes"] = map(Base.identity, output_shapes) end desc["Tcond"] = tf.data_type(cond_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(if_, [cond_, input_], name=nothing, Tin=nothing, Tout=nothing, then_branch=nothing, else_branch=nothing, output_shapes=nothing) + tf.add_node(res[1], node) + return res[1] end function if_(cond_, input_; name=nothing, Tin=nothing, Tout=nothing, then_branch=nothing, else_branch=nothing, output_shapes=nothing) if tf.eager_mode @@ -50555,7 +53750,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function flat_map_dataset_graph(input_dataset_, other_arguments_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function flat_map_dataset_graph(input_dataset_, other_arguments_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "FlatMapDataset") do desc = tf.NodeDescription("FlatMapDataset") @@ -50594,7 +53789,10 @@ begin if output_shapes !== nothing desc["output_shapes"] = map(Base.identity, output_shapes) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(flat_map_dataset, [input_dataset_, other_arguments_], name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) + tf.add_node(res[1], node) + return res[1] end function flat_map_dataset(input_dataset_, other_arguments_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) if tf.eager_mode @@ -50612,7 +53810,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tensor_list_scatter_graph(tensor_, indices_, element_shape_; name=nothing, element_dtype=nothing, shape_type=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_list_scatter_graph(tensor_, indices_, element_shape_; name=nothing, element_dtype=nothing, shape_type=nothing) local desc tf.with_op_name(name, "TensorListScatter") do desc = tf.NodeDescription("TensorListScatter") @@ -50646,7 +53844,10 @@ begin end desc["element_dtype"] = tf.data_type(tensor_) desc["shape_type"] = tf.data_type(element_shape_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(tensor_list_scatter, [tensor_, indices_, element_shape_], name=nothing, element_dtype=nothing, shape_type=nothing) + tf.add_node(res[1], node) + return res[1] end function tensor_list_scatter(tensor_, indices_, element_shape_; name=nothing, element_dtype=nothing, shape_type=nothing) if tf.eager_mode @@ -50664,7 +53865,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function softsign_grad_graph(gradients_, features_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function softsign_grad_graph(gradients_, features_; name=nothing) local desc tf.with_op_name(name, "SoftsignGrad") do desc = tf.NodeDescription("SoftsignGrad") @@ -50682,7 +53883,10 @@ begin tf.add_input(desc, features_) desc["T"] = tf.data_type(gradients_) desc["T"] = tf.data_type(features_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(softsign_grad, [gradients_, features_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function softsign_grad(gradients_, features_; name=nothing) if tf.eager_mode @@ -50700,7 +53904,7 @@ end Copy Host Op. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function copy_host_graph(input_; name=nothing, tensor_name=nothing, debug_ops_spec=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function copy_host_graph(input_; name=nothing, tensor_name=nothing, debug_ops_spec=nothing) local desc tf.with_op_name(name, "CopyHost") do desc = tf.NodeDescription("CopyHost") @@ -50726,7 +53930,10 @@ begin desc["debug_ops_spec"] = map(Base.identity, debug_ops_spec) end desc["T"] = tf.data_type(input_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(copy_host, [input_], name=nothing, tensor_name=nothing, debug_ops_spec=nothing) + tf.add_node(res[1], node) + return res[1] end function copy_host(input_; name=nothing, tensor_name=nothing, debug_ops_spec=nothing) if tf.eager_mode @@ -50744,7 +53951,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function lin_space_graph(start_, stop_, num_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function lin_space_graph(start_, stop_, num_; name=nothing) local desc tf.with_op_name(name, "LinSpace") do desc = tf.NodeDescription("LinSpace") @@ -50768,7 +53975,10 @@ begin desc["T"] = tf.data_type(start_) desc["T"] = tf.data_type(stop_) desc["Tidx"] = tf.data_type(num_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(lin_space, [start_, stop_, num_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function lin_space(start_, stop_, num_; name=nothing) if tf.eager_mode @@ -50786,7 +53996,7 @@ end Updates input `value` at `loc` with `update`. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function _parallel_concat_update_graph(value_, update_; name=nothing, loc=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _parallel_concat_update_graph(value_, update_; name=nothing, loc=nothing) local desc tf.with_op_name(name, "_ParallelConcatUpdate") do desc = tf.NodeDescription("_ParallelConcatUpdate") @@ -50810,7 +54020,10 @@ begin end desc["T"] = tf.data_type(value_) desc["T"] = tf.data_type(update_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(_parallel_concat_update, [value_, update_], name=nothing, loc=nothing) + tf.add_node(res[1], node) + return res[1] end function _parallel_concat_update(value_, update_; name=nothing, loc=nothing) if tf.eager_mode @@ -50828,7 +54041,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function stack_graph(; name=nothing, elem_type=nothing, stack_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function stack_graph(; name=nothing, elem_type=nothing, stack_name=nothing) local desc tf.with_op_name(name, "Stack") do desc = tf.NodeDescription("Stack") @@ -50849,7 +54062,10 @@ begin if stack_name !== nothing desc["stack_name"] = Base.String(stack_name) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(stack, [], name=nothing, elem_type=nothing, stack_name=nothing) + tf.add_node(res[1], node) + return res[1] end function stack(; name=nothing, elem_type=nothing, stack_name=nothing) if tf.eager_mode @@ -50867,7 +54083,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function stack_push_v2_graph(handle_, elem_; name=nothing, swap_memory=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function stack_push_v2_graph(handle_, elem_; name=nothing, swap_memory=nothing) local desc tf.with_op_name(name, "StackPushV2") do desc = tf.NodeDescription("StackPushV2") @@ -50890,7 +54106,10 @@ begin desc["swap_memory"] = Base.Bool(swap_memory) end desc["T"] = tf.data_type(elem_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(stack_push_v2, [handle_, elem_], name=nothing, swap_memory=nothing) + tf.add_node(res[1], node) + return res[1] end function stack_push_v2(handle_, elem_; name=nothing, swap_memory=nothing) if tf.eager_mode @@ -50908,7 +54127,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function assign_variable_op_graph(resource_, value_; name=nothing, dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function assign_variable_op_graph(resource_, value_; name=nothing, dtype=nothing) local desc tf.with_op_name(name, "AssignVariableOp") do desc = tf.NodeDescription("AssignVariableOp") @@ -50931,7 +54150,10 @@ begin desc["dtype"] = Base.identity(dtype) end desc["dtype"] = tf.data_type(value_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(assign_variable_op, [resource_, value_], name=nothing, dtype=nothing) + tf.add_node(res[1], node) + return res[1] end function assign_variable_op(resource_, value_; name=nothing, dtype=nothing) if tf.eager_mode @@ -50949,7 +54171,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function sparse_split_graph(split_dim_, indices_, values_, shape_; name=nothing, num_split=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_split_graph(split_dim_, indices_, values_, shape_; name=nothing, num_split=nothing) local desc tf.with_op_name(name, "SparseSplit") do desc = tf.NodeDescription("SparseSplit") @@ -50984,7 +54206,10 @@ begin desc["num_split"] = Base.Int(num_split) end desc["T"] = tf.data_type(values_) - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(sparse_split, [split_dim_, indices_, values_, shape_], name=nothing, num_split=nothing) + tf.add_node(res[1], node) + return res end function sparse_split(split_dim_, indices_, values_, shape_; name=nothing, num_split=nothing) if tf.eager_mode @@ -51002,7 +54227,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tensor_array_unpack_graph(handle_, value_, flow_in_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_unpack_graph(handle_, value_, flow_in_; name=nothing) local desc tf.with_op_name(name, "TensorArrayUnpack") do desc = tf.NodeDescription("TensorArrayUnpack") @@ -51022,7 +54247,10 @@ begin tf.add_input(desc, value_) tf.add_input(desc, flow_in_) desc["T"] = tf.data_type(value_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(tensor_array_unpack, [handle_, value_, flow_in_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function tensor_array_unpack(handle_, value_, flow_in_; name=nothing) if tf.eager_mode @@ -51040,7 +54268,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tensor_list_stack_graph(input_handle_, element_shape_; name=nothing, element_dtype=nothing, num_elements=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_list_stack_graph(input_handle_, element_shape_; name=nothing, element_dtype=nothing, num_elements=nothing) local desc tf.with_op_name(name, "TensorListStack") do desc = tf.NodeDescription("TensorListStack") @@ -51067,7 +54295,10 @@ begin if num_elements !== nothing desc["num_elements"] = Base.Int(num_elements) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(tensor_list_stack, [input_handle_, element_shape_], name=nothing, element_dtype=nothing, num_elements=nothing) + tf.add_node(res[1], node) + return res[1] end function tensor_list_stack(input_handle_, element_shape_; name=nothing, element_dtype=nothing, num_elements=nothing) if tf.eager_mode @@ -51085,7 +54316,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function barrier_incomplete_size_graph(handle_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function barrier_incomplete_size_graph(handle_; name=nothing) local desc tf.with_op_name(name, "BarrierIncompleteSize") do desc = tf.NodeDescription("BarrierIncompleteSize") @@ -51097,7 +54328,10 @@ begin function barrier_incomplete_size_eager(handle_; name=nothing) desc = tf.EagerOp("BarrierIncompleteSize") tf.add_input(desc, handle_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(barrier_incomplete_size, [handle_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function barrier_incomplete_size(handle_; name=nothing) if tf.eager_mode @@ -51115,7 +54349,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function restore_graph(file_pattern_, tensor_name_; name=nothing, dt=nothing, preferred_shard=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function restore_graph(file_pattern_, tensor_name_; name=nothing, dt=nothing, preferred_shard=nothing) local desc tf.with_op_name(name, "Restore") do desc = tf.NodeDescription("Restore") @@ -51142,7 +54376,10 @@ begin if preferred_shard !== nothing desc["preferred_shard"] = Base.Int(preferred_shard) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(restore, [file_pattern_, tensor_name_], name=nothing, dt=nothing, preferred_shard=nothing) + tf.add_node(res[1], node) + return res[1] end function restore(file_pattern_, tensor_name_; name=nothing, dt=nothing, preferred_shard=nothing) if tf.eager_mode @@ -51160,7 +54397,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tensor_array_v3_graph(size_; name=nothing, dtype=nothing, element_shape=nothing, dynamic_size=nothing, clear_after_read=nothing, identical_element_shapes=nothing, tensor_array_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_v3_graph(size_; name=nothing, dtype=nothing, element_shape=nothing, dynamic_size=nothing, clear_after_read=nothing, identical_element_shapes=nothing, tensor_array_name=nothing) local desc tf.with_op_name(name, "TensorArrayV3") do desc = tf.NodeDescription("TensorArrayV3") @@ -51213,7 +54450,10 @@ begin if tensor_array_name !== nothing desc["tensor_array_name"] = Base.String(tensor_array_name) end - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(tensor_array_v3, [size_], name=nothing, dtype=nothing, element_shape=nothing, dynamic_size=nothing, clear_after_read=nothing, identical_element_shapes=nothing, tensor_array_name=nothing) + tf.add_node(res[1], node) + return res end function tensor_array_v3(size_; name=nothing, dtype=nothing, element_shape=nothing, dynamic_size=nothing, clear_after_read=nothing, identical_element_shapes=nothing, tensor_array_name=nothing) if tf.eager_mode @@ -51231,7 +54471,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function experimental_assert_next_dataset_graph(input_dataset_, transformations_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_assert_next_dataset_graph(input_dataset_, transformations_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "ExperimentalAssertNextDataset") do desc = tf.NodeDescription("ExperimentalAssertNextDataset") @@ -51258,7 +54498,10 @@ begin if output_shapes !== nothing desc["output_shapes"] = map(Base.identity, output_shapes) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(experimental_assert_next_dataset, [input_dataset_, transformations_], name=nothing, output_types=nothing, output_shapes=nothing) + tf.add_node(res[1], node) + return res[1] end function experimental_assert_next_dataset(input_dataset_, transformations_; name=nothing, output_types=nothing, output_shapes=nothing) if tf.eager_mode @@ -51276,7 +54519,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function in_top_k_graph(predictions_, targets_; name=nothing, k=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function in_top_k_graph(predictions_, targets_; name=nothing, k=nothing) local desc tf.with_op_name(name, "InTopK") do desc = tf.NodeDescription("InTopK") @@ -51299,7 +54542,10 @@ begin desc["k"] = Base.Int(k) end desc["T"] = tf.data_type(targets_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(in_top_k, [predictions_, targets_], name=nothing, k=nothing) + tf.add_node(res[1], node) + return res[1] end function in_top_k(predictions_, targets_; name=nothing, k=nothing) if tf.eager_mode @@ -51317,7 +54563,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function scatter_sub_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function scatter_sub_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ScatterSub") do desc = tf.NodeDescription("ScatterSub") @@ -51347,7 +54593,10 @@ begin desc["T"] = tf.data_type(ref_) desc["Tindices"] = tf.data_type(indices_) desc["T"] = tf.data_type(updates_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(scatter_sub, [ref_, indices_, updates_], name=nothing, use_locking=nothing) + tf.add_node(res[1], node) + return res[1] end function scatter_sub(ref_, indices_, updates_; name=nothing, use_locking=nothing) if tf.eager_mode @@ -51365,7 +54614,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function acosh_graph(x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function acosh_graph(x_; name=nothing) local desc tf.with_op_name(name, "Acosh") do desc = tf.NodeDescription("Acosh") @@ -51379,7 +54628,10 @@ begin desc = tf.EagerOp("Acosh") tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(acosh, [x_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function acosh(x_; name=nothing) if tf.eager_mode @@ -51397,7 +54649,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function depthwise_conv2d_native_backprop_filter_graph(input_, filter_sizes_, out_backprop_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function depthwise_conv2d_native_backprop_filter_graph(input_, filter_sizes_, out_backprop_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) local desc tf.with_op_name(name, "DepthwiseConv2dNativeBackpropFilter") do desc = tf.NodeDescription("DepthwiseConv2dNativeBackpropFilter") @@ -51442,7 +54694,10 @@ begin end desc["T"] = tf.data_type(input_) desc["T"] = tf.data_type(out_backprop_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(depthwise_conv2d_native_backprop_filter, [input_, filter_sizes_, out_backprop_], name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) + tf.add_node(res[1], node) + return res[1] end function depthwise_conv2d_native_backprop_filter(input_, filter_sizes_, out_backprop_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) if tf.eager_mode @@ -51460,7 +54715,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function cast_graph(x_; name=nothing, SrcT=nothing, DstT=nothing, Truncate=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function cast_graph(x_; name=nothing, SrcT=nothing, DstT=nothing, Truncate=nothing) local desc tf.with_op_name(name, "Cast") do desc = tf.NodeDescription("Cast") @@ -51492,7 +54747,10 @@ begin desc["Truncate"] = Base.Bool(Truncate) end desc["SrcT"] = tf.data_type(x_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(cast, [x_], name=nothing, SrcT=nothing, DstT=nothing, Truncate=nothing) + tf.add_node(res[1], node) + return res[1] end function cast(x_; name=nothing, SrcT=nothing, DstT=nothing, Truncate=nothing) if tf.eager_mode @@ -51510,7 +54768,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function quantize_v2_graph(input_, min_range_, max_range_; name=nothing, mode=nothing, round_mode=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantize_v2_graph(input_, min_range_, max_range_; name=nothing, mode=nothing, round_mode=nothing) local desc tf.with_op_name(name, "QuantizeV2") do desc = tf.NodeDescription("QuantizeV2") @@ -51545,7 +54803,10 @@ begin if round_mode !== nothing desc["round_mode"] = Base.String(round_mode) end - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(quantize_v2, [input_, min_range_, max_range_], name=nothing, mode=nothing, round_mode=nothing) + tf.add_node(res[1], node) + return res end function quantize_v2(input_, min_range_, max_range_; name=nothing, mode=nothing, round_mode=nothing) if tf.eager_mode @@ -51563,7 +54824,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function generator_dataset_graph(init_func_other_args_, next_func_other_args_, finalize_func_other_args_; name=nothing, init_func=nothing, next_func=nothing, finalize_func=nothing, Tinit_func_args=nothing, Tnext_func_args=nothing, Tfinalize_func_args=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function generator_dataset_graph(init_func_other_args_, next_func_other_args_, finalize_func_other_args_; name=nothing, init_func=nothing, next_func=nothing, finalize_func=nothing, Tinit_func_args=nothing, Tnext_func_args=nothing, Tfinalize_func_args=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "GeneratorDataset") do desc = tf.NodeDescription("GeneratorDataset") @@ -51629,7 +54890,10 @@ begin if output_shapes !== nothing desc["output_shapes"] = map(Base.identity, output_shapes) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(generator_dataset, [init_func_other_args_, next_func_other_args_, finalize_func_other_args_], name=nothing, init_func=nothing, next_func=nothing, finalize_func=nothing, Tinit_func_args=nothing, Tnext_func_args=nothing, Tfinalize_func_args=nothing, output_types=nothing, output_shapes=nothing) + tf.add_node(res[1], node) + return res[1] end function generator_dataset(init_func_other_args_, next_func_other_args_, finalize_func_other_args_; name=nothing, init_func=nothing, next_func=nothing, finalize_func=nothing, Tinit_func_args=nothing, Tnext_func_args=nothing, Tfinalize_func_args=nothing, output_types=nothing, output_shapes=nothing) if tf.eager_mode @@ -51647,7 +54911,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tensor_forest_tree_serialize_graph(tree_handle_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_forest_tree_serialize_graph(tree_handle_; name=nothing) local desc tf.with_op_name(name, "TensorForestTreeSerialize") do desc = tf.NodeDescription("TensorForestTreeSerialize") @@ -51659,7 +54923,10 @@ begin function tensor_forest_tree_serialize_eager(tree_handle_; name=nothing) desc = tf.EagerOp("TensorForestTreeSerialize") tf.add_input(desc, tree_handle_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(tensor_forest_tree_serialize, [tree_handle_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function tensor_forest_tree_serialize(tree_handle_; name=nothing) if tf.eager_mode @@ -51677,7 +54944,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function next_after_graph(x1_, x2_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function next_after_graph(x1_, x2_; name=nothing) local desc tf.with_op_name(name, "NextAfter") do desc = tf.NodeDescription("NextAfter") @@ -51695,7 +54962,10 @@ begin tf.add_input(desc, x2_) desc["T"] = tf.data_type(x1_) desc["T"] = tf.data_type(x2_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(next_after, [x1_, x2_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function next_after(x1_, x2_; name=nothing) if tf.eager_mode @@ -51713,7 +54983,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tensor_array_close_v2_graph(handle_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_close_v2_graph(handle_; name=nothing) local desc tf.with_op_name(name, "TensorArrayCloseV2") do desc = tf.NodeDescription("TensorArrayCloseV2") @@ -51725,7 +54995,10 @@ begin function tensor_array_close_v2_eager(handle_; name=nothing) desc = tf.EagerOp("TensorArrayCloseV2") tf.add_input(desc, handle_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(tensor_array_close_v2, [handle_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function tensor_array_close_v2(handle_; name=nothing) if tf.eager_mode @@ -51743,7 +55016,7 @@ end A Reader that outputs rows from a BigQuery table as tensorflow Examples. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function big_query_reader_graph(; name=nothing, container=nothing, shared_name=nothing, project_id=nothing, dataset_id=nothing, table_id=nothing, columns=nothing, timestamp_millis=nothing, test_end_point=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function big_query_reader_graph(; name=nothing, container=nothing, shared_name=nothing, project_id=nothing, dataset_id=nothing, table_id=nothing, columns=nothing, timestamp_millis=nothing, test_end_point=nothing) local desc tf.with_op_name(name, "BigQueryReader") do desc = tf.NodeDescription("BigQueryReader") @@ -51800,7 +55073,10 @@ begin if test_end_point !== nothing desc["test_end_point"] = Base.String(test_end_point) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(big_query_reader, [], name=nothing, container=nothing, shared_name=nothing, project_id=nothing, dataset_id=nothing, table_id=nothing, columns=nothing, timestamp_millis=nothing, test_end_point=nothing) + tf.add_node(res[1], node) + return res[1] end function big_query_reader(; name=nothing, container=nothing, shared_name=nothing, project_id=nothing, dataset_id=nothing, table_id=nothing, columns=nothing, timestamp_millis=nothing, test_end_point=nothing) if tf.eager_mode @@ -51818,7 +55094,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function reader_read_v2_graph(reader_handle_, queue_handle_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function reader_read_v2_graph(reader_handle_, queue_handle_; name=nothing) local desc tf.with_op_name(name, "ReaderReadV2") do desc = tf.NodeDescription("ReaderReadV2") @@ -51838,7 +55114,10 @@ begin desc = tf.EagerOp("ReaderReadV2") tf.add_input(desc, reader_handle_) tf.add_input(desc, queue_handle_) - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(reader_read_v2, [reader_handle_, queue_handle_], name=nothing) + tf.add_node(res[1], node) + return res end function reader_read_v2(reader_handle_, queue_handle_; name=nothing) if tf.eager_mode @@ -51856,7 +55135,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function mod_graph(x_, y_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function mod_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "Mod") do desc = tf.NodeDescription("Mod") @@ -51874,7 +55153,10 @@ begin tf.add_input(desc, y_) desc["T"] = tf.data_type(x_) desc["T"] = tf.data_type(y_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(mod, [x_, y_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function mod(x_, y_; name=nothing) if tf.eager_mode @@ -51892,7 +55174,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function add_v2_graph(x_, y_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function add_v2_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "AddV2") do desc = tf.NodeDescription("AddV2") @@ -51910,7 +55192,10 @@ begin tf.add_input(desc, y_) desc["T"] = tf.data_type(x_) desc["T"] = tf.data_type(y_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(add_v2, [x_, y_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function add_v2(x_, y_; name=nothing) if tf.eager_mode @@ -51928,7 +55213,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function stateless_random_normal_graph(shape_, seed_; name=nothing, dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function stateless_random_normal_graph(shape_, seed_; name=nothing, dtype=nothing) local desc tf.with_op_name(name, "StatelessRandomNormal") do desc = tf.NodeDescription("StatelessRandomNormal") @@ -51953,7 +55238,10 @@ begin end desc["T"] = tf.data_type(shape_) desc["Tseed"] = tf.data_type(seed_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(stateless_random_normal, [shape_, seed_], name=nothing, dtype=nothing) + tf.add_node(res[1], node) + return res[1] end function stateless_random_normal(shape_, seed_; name=nothing, dtype=nothing) if tf.eager_mode @@ -51971,7 +55259,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function strided_slice_assign_graph(ref_, begin_, end_, strides_, value_; name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function strided_slice_assign_graph(ref_, begin_, end_, strides_, value_; name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing) local desc tf.with_op_name(name, "StridedSliceAssign") do desc = tf.NodeDescription("StridedSliceAssign") @@ -52071,7 +55359,10 @@ begin desc["Index"] = tf.data_type(end_) desc["Index"] = tf.data_type(strides_) desc["T"] = tf.data_type(value_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(strided_slice_assign, [ref_, begin_, end_, strides_, value_], name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing) + tf.add_node(res[1], node) + return res[1] end function strided_slice_assign(ref_, begin_, end_, strides_, value_; name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing) if tf.eager_mode @@ -52089,7 +55380,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function scatter_min_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function scatter_min_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ScatterMin") do desc = tf.NodeDescription("ScatterMin") @@ -52119,7 +55410,10 @@ begin desc["T"] = tf.data_type(ref_) desc["Tindices"] = tf.data_type(indices_) desc["T"] = tf.data_type(updates_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(scatter_min, [ref_, indices_, updates_], name=nothing, use_locking=nothing) + tf.add_node(res[1], node) + return res[1] end function scatter_min(ref_, indices_, updates_; name=nothing, use_locking=nothing) if tf.eager_mode @@ -52137,7 +55431,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function resource_strided_slice_assign_graph(ref_, begin_, end_, strides_, value_; name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_strided_slice_assign_graph(ref_, begin_, end_, strides_, value_; name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing) local desc tf.with_op_name(name, "ResourceStridedSliceAssign") do desc = tf.NodeDescription("ResourceStridedSliceAssign") @@ -52236,7 +55530,10 @@ begin desc["Index"] = tf.data_type(end_) desc["Index"] = tf.data_type(strides_) desc["T"] = tf.data_type(value_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(resource_strided_slice_assign, [ref_, begin_, end_, strides_, value_], name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing) + tf.add_node(res[1], node) + return res[1] end function resource_strided_slice_assign(ref_, begin_, end_, strides_, value_; name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing) if tf.eager_mode @@ -52254,7 +55551,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function random_gamma_grad_graph(alpha_, sample_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function random_gamma_grad_graph(alpha_, sample_; name=nothing) local desc tf.with_op_name(name, "RandomGammaGrad") do desc = tf.NodeDescription("RandomGammaGrad") @@ -52272,7 +55569,10 @@ begin tf.add_input(desc, sample_) desc["T"] = tf.data_type(alpha_) desc["T"] = tf.data_type(sample_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(random_gamma_grad, [alpha_, sample_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function random_gamma_grad(alpha_, sample_; name=nothing) if tf.eager_mode @@ -52290,7 +55590,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function resource_sparse_apply_keras_momentum_graph(var_, accum_, lr_, grad_, indices_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_sparse_apply_keras_momentum_graph(var_, accum_, lr_, grad_, indices_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) local desc tf.with_op_name(name, "ResourceSparseApplyKerasMomentum") do desc = tf.NodeDescription("ResourceSparseApplyKerasMomentum") @@ -52336,7 +55636,10 @@ begin desc["T"] = tf.data_type(grad_) desc["Tindices"] = tf.data_type(indices_) desc["T"] = tf.data_type(momentum_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(resource_sparse_apply_keras_momentum, [var_, accum_, lr_, grad_, indices_, momentum_], name=nothing, use_locking=nothing, use_nesterov=nothing) + tf.add_node(res[1], node) + return res[1] end function resource_sparse_apply_keras_momentum(var_, accum_, lr_, grad_, indices_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) if tf.eager_mode @@ -52354,7 +55657,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function boosted_trees_create_quantile_stream_resource_graph(quantile_stream_resource_handle_, epsilon_, num_streams_; name=nothing, max_elements=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function boosted_trees_create_quantile_stream_resource_graph(quantile_stream_resource_handle_, epsilon_, num_streams_; name=nothing, max_elements=nothing) local desc tf.with_op_name(name, "BoostedTreesCreateQuantileStreamResource") do desc = tf.NodeDescription("BoostedTreesCreateQuantileStreamResource") @@ -52378,7 +55681,10 @@ begin if max_elements !== nothing desc["max_elements"] = Base.Int(max_elements) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(boosted_trees_create_quantile_stream_resource, [quantile_stream_resource_handle_, epsilon_, num_streams_], name=nothing, max_elements=nothing) + tf.add_node(res[1], node) + return res[1] end function boosted_trees_create_quantile_stream_resource(quantile_stream_resource_handle_, epsilon_, num_streams_; name=nothing, max_elements=nothing) if tf.eager_mode @@ -52396,7 +55702,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function quantized_relu6_graph(features_, min_features_, max_features_; name=nothing, out_type=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantized_relu6_graph(features_, min_features_, max_features_; name=nothing, out_type=nothing) local desc tf.with_op_name(name, "QuantizedRelu6") do desc = tf.NodeDescription("QuantizedRelu6") @@ -52427,7 +55733,10 @@ begin desc["out_type"] = Base.identity(out_type) end desc["Tinput"] = tf.data_type(features_) - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(quantized_relu6, [features_, min_features_, max_features_], name=nothing, out_type=nothing) + tf.add_node(res[1], node) + return res end function quantized_relu6(features_, min_features_, max_features_; name=nothing, out_type=nothing) if tf.eager_mode @@ -52445,7 +55754,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function sparse_sparse_maximum_graph(a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_sparse_maximum_graph(a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_; name=nothing) local desc tf.with_op_name(name, "SparseSparseMaximum") do desc = tf.NodeDescription("SparseSparseMaximum") @@ -52480,7 +55789,10 @@ begin tf.add_input(desc, b_shape_) desc["T"] = tf.data_type(a_values_) desc["T"] = tf.data_type(b_values_) - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(sparse_sparse_maximum, [a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_], name=nothing) + tf.add_node(res[1], node) + return res end function sparse_sparse_maximum(a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_; name=nothing) if tf.eager_mode @@ -52498,7 +55810,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function batch_norm_with_global_normalization_graph(t_, m_, v_, beta_, gamma_; name=nothing, variance_epsilon=nothing, scale_after_normalization=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_norm_with_global_normalization_graph(t_, m_, v_, beta_, gamma_; name=nothing, variance_epsilon=nothing, scale_after_normalization=nothing) local desc tf.with_op_name(name, "BatchNormWithGlobalNormalization") do desc = tf.NodeDescription("BatchNormWithGlobalNormalization") @@ -52540,7 +55852,10 @@ begin desc["T"] = tf.data_type(v_) desc["T"] = tf.data_type(beta_) desc["T"] = tf.data_type(gamma_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(batch_norm_with_global_normalization, [t_, m_, v_, beta_, gamma_], name=nothing, variance_epsilon=nothing, scale_after_normalization=nothing) + tf.add_node(res[1], node) + return res[1] end function batch_norm_with_global_normalization(t_, m_, v_, beta_, gamma_; name=nothing, variance_epsilon=nothing, scale_after_normalization=nothing) if tf.eager_mode @@ -52558,7 +55873,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function in_top_kv2_graph(predictions_, targets_, k_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function in_top_kv2_graph(predictions_, targets_, k_; name=nothing) local desc tf.with_op_name(name, "InTopKV2") do desc = tf.NodeDescription("InTopKV2") @@ -52579,7 +55894,10 @@ begin tf.add_input(desc, k_) desc["T"] = tf.data_type(targets_) desc["T"] = tf.data_type(k_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(in_top_kv2, [predictions_, targets_, k_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function in_top_kv2(predictions_, targets_, k_; name=nothing) if tf.eager_mode @@ -52597,7 +55915,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function cholesky_graph(input_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function cholesky_graph(input_; name=nothing) local desc tf.with_op_name(name, "Cholesky") do desc = tf.NodeDescription("Cholesky") @@ -52611,7 +55929,10 @@ begin desc = tf.EagerOp("Cholesky") tf.add_input(desc, input_) desc["T"] = tf.data_type(input_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(cholesky, [input_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function cholesky(input_; name=nothing) if tf.eager_mode @@ -52629,7 +55950,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function resource_apply_centered_rms_prop_graph(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_apply_centered_rms_prop_graph(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ResourceApplyCenteredRMSProp") do desc = tf.NodeDescription("ResourceApplyCenteredRMSProp") @@ -52677,7 +55998,10 @@ begin desc["T"] = tf.data_type(momentum_) desc["T"] = tf.data_type(epsilon_) desc["T"] = tf.data_type(grad_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(resource_apply_centered_rms_prop, [var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_], name=nothing, use_locking=nothing) + tf.add_node(res[1], node) + return res[1] end function resource_apply_centered_rms_prop(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=nothing, use_locking=nothing) if tf.eager_mode @@ -52695,7 +56019,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function resource_apply_adagrad_graph(var_, accum_, lr_, grad_; name=nothing, use_locking=nothing, update_slots=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_apply_adagrad_graph(var_, accum_, lr_, grad_; name=nothing, use_locking=nothing, update_slots=nothing) local desc tf.with_op_name(name, "ResourceApplyAdagrad") do desc = tf.NodeDescription("ResourceApplyAdagrad") @@ -52731,7 +56055,10 @@ begin end desc["T"] = tf.data_type(lr_) desc["T"] = tf.data_type(grad_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(resource_apply_adagrad, [var_, accum_, lr_, grad_], name=nothing, use_locking=nothing, update_slots=nothing) + tf.add_node(res[1], node) + return res[1] end function resource_apply_adagrad(var_, accum_, lr_, grad_; name=nothing, use_locking=nothing, update_slots=nothing) if tf.eager_mode @@ -52749,7 +56076,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function experimental_parallel_interleave_dataset_graph(input_dataset_, other_arguments_, cycle_length_, block_length_, sloppy_, buffer_output_elements_, prefetch_input_elements_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_parallel_interleave_dataset_graph(input_dataset_, other_arguments_, cycle_length_, block_length_, sloppy_, buffer_output_elements_, prefetch_input_elements_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "ExperimentalParallelInterleaveDataset") do desc = tf.NodeDescription("ExperimentalParallelInterleaveDataset") @@ -52803,7 +56130,10 @@ begin if output_shapes !== nothing desc["output_shapes"] = map(Base.identity, output_shapes) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(experimental_parallel_interleave_dataset, [input_dataset_, other_arguments_, cycle_length_, block_length_, sloppy_, buffer_output_elements_, prefetch_input_elements_], name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) + tf.add_node(res[1], node) + return res[1] end function experimental_parallel_interleave_dataset(input_dataset_, other_arguments_, cycle_length_, block_length_, sloppy_, buffer_output_elements_, prefetch_input_elements_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) if tf.eager_mode @@ -52821,7 +56151,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function resize_bicubic_grad_graph(grads_, original_image_; name=nothing, align_corners=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resize_bicubic_grad_graph(grads_, original_image_; name=nothing, align_corners=nothing) local desc tf.with_op_name(name, "ResizeBicubicGrad") do desc = tf.NodeDescription("ResizeBicubicGrad") @@ -52844,7 +56174,10 @@ begin desc["align_corners"] = Base.Bool(align_corners) end desc["T"] = tf.data_type(original_image_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(resize_bicubic_grad, [grads_, original_image_], name=nothing, align_corners=nothing) + tf.add_node(res[1], node) + return res[1] end function resize_bicubic_grad(grads_, original_image_; name=nothing, align_corners=nothing) if tf.eager_mode @@ -52862,7 +56195,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function batch_self_adjoint_eig_graph(input_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_self_adjoint_eig_graph(input_; name=nothing) local desc tf.with_op_name(name, "BatchSelfAdjointEig") do desc = tf.NodeDescription("BatchSelfAdjointEig") @@ -52876,7 +56209,10 @@ begin desc = tf.EagerOp("BatchSelfAdjointEig") tf.add_input(desc, input_) desc["T"] = tf.data_type(input_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(batch_self_adjoint_eig, [input_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function batch_self_adjoint_eig(input_; name=nothing) if tf.eager_mode @@ -52894,7 +56230,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function sparse_softmax_graph(sp_indices_, sp_values_, sp_shape_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_softmax_graph(sp_indices_, sp_values_, sp_shape_; name=nothing) local desc tf.with_op_name(name, "SparseSoftmax") do desc = tf.NodeDescription("SparseSoftmax") @@ -52914,7 +56250,10 @@ begin tf.add_input(desc, sp_values_) tf.add_input(desc, sp_shape_) desc["T"] = tf.data_type(sp_values_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(sparse_softmax, [sp_indices_, sp_values_, sp_shape_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function sparse_softmax(sp_indices_, sp_values_, sp_shape_; name=nothing) if tf.eager_mode @@ -52932,7 +56271,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function asinh_graph(x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function asinh_graph(x_; name=nothing) local desc tf.with_op_name(name, "Asinh") do desc = tf.NodeDescription("Asinh") @@ -52946,7 +56285,10 @@ begin desc = tf.EagerOp("Asinh") tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(asinh, [x_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function asinh(x_; name=nothing) if tf.eager_mode @@ -52964,7 +56306,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function quantized_conv2d_and_relu_graph(input_, filter_, min_input_, max_input_, min_filter_, max_filter_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantized_conv2d_and_relu_graph(input_, filter_, min_input_, max_input_, min_filter_, max_filter_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) local desc tf.with_op_name(name, "QuantizedConv2DAndRelu") do desc = tf.NodeDescription("QuantizedConv2DAndRelu") @@ -53024,7 +56366,10 @@ begin end desc["Tinput"] = tf.data_type(input_) desc["Tfilter"] = tf.data_type(filter_) - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(quantized_conv2d_and_relu, [input_, filter_, min_input_, max_input_, min_filter_, max_filter_], name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) + tf.add_node(res[1], node) + return res end function quantized_conv2d_and_relu(input_, filter_, min_input_, max_input_, min_filter_, max_filter_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) if tf.eager_mode @@ -53042,7 +56387,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function matrix_inverse_graph(input_; name=nothing, adjoint=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function matrix_inverse_graph(input_; name=nothing, adjoint=nothing) local desc tf.with_op_name(name, "MatrixInverse") do desc = tf.NodeDescription("MatrixInverse") @@ -53062,7 +56407,10 @@ begin desc["adjoint"] = Base.Bool(adjoint) end desc["T"] = tf.data_type(input_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(matrix_inverse, [input_], name=nothing, adjoint=nothing) + tf.add_node(res[1], node) + return res[1] end function matrix_inverse(input_; name=nothing, adjoint=nothing) if tf.eager_mode @@ -53080,7 +56428,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function tensor_list_concat_lists_graph(input_a_, input_b_; name=nothing, element_dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_list_concat_lists_graph(input_a_, input_b_; name=nothing, element_dtype=nothing) local desc tf.with_op_name(name, "TensorListConcatLists") do desc = tf.NodeDescription("TensorListConcatLists") @@ -53101,7 +56449,10 @@ begin if element_dtype !== nothing desc["element_dtype"] = Base.identity(element_dtype) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(tensor_list_concat_lists, [input_a_, input_b_], name=nothing, element_dtype=nothing) + tf.add_node(res[1], node) + return res[1] end function tensor_list_concat_lists(input_a_, input_b_; name=nothing, element_dtype=nothing) if tf.eager_mode @@ -53119,7 +56470,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function requantize_graph(input_, input_min_, input_max_, requested_output_min_, requested_output_max_; name=nothing, out_type=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function requantize_graph(input_, input_min_, input_max_, requested_output_min_, requested_output_max_; name=nothing, out_type=nothing) local desc tf.with_op_name(name, "Requantize") do desc = tf.NodeDescription("Requantize") @@ -53156,7 +56507,10 @@ begin desc["out_type"] = Base.identity(out_type) end desc["Tinput"] = tf.data_type(input_) - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(requantize, [input_, input_min_, input_max_, requested_output_min_, requested_output_max_], name=nothing, out_type=nothing) + tf.add_node(res[1], node) + return res end function requantize(input_, input_min_, input_max_, requested_output_min_, requested_output_max_; name=nothing, out_type=nothing) if tf.eager_mode @@ -53174,7 +56528,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function fft_graph(input_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fft_graph(input_; name=nothing) local desc tf.with_op_name(name, "FFT") do desc = tf.NodeDescription("FFT") @@ -53188,7 +56542,10 @@ begin desc = tf.EagerOp("FFT") tf.add_input(desc, input_) desc["Tcomplex"] = tf.data_type(input_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(fft, [input_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function fft(input_; name=nothing) if tf.eager_mode @@ -53206,7 +56563,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function conjugate_transpose_graph(x_, perm_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function conjugate_transpose_graph(x_, perm_; name=nothing) local desc tf.with_op_name(name, "ConjugateTranspose") do desc = tf.NodeDescription("ConjugateTranspose") @@ -53225,7 +56582,10 @@ begin tf.add_input(desc, perm_) desc["T"] = tf.data_type(x_) desc["Tperm"] = tf.data_type(perm_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(conjugate_transpose, [x_, perm_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function conjugate_transpose(x_, perm_; name=nothing) if tf.eager_mode @@ -53243,7 +56603,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function unstage_graph(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function unstage_graph(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "Unstage") do desc = tf.NodeDescription("Unstage") @@ -53282,7 +56642,10 @@ begin if shared_name !== nothing desc["shared_name"] = Base.String(shared_name) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(unstage, [], name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + tf.add_node(res[1], node) + return res[1] end function unstage(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) if tf.eager_mode @@ -53300,7 +56663,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function relu6grad_graph(gradients_, features_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function relu6grad_graph(gradients_, features_; name=nothing) local desc tf.with_op_name(name, "Relu6Grad") do desc = tf.NodeDescription("Relu6Grad") @@ -53318,7 +56681,10 @@ begin tf.add_input(desc, features_) desc["T"] = tf.data_type(gradients_) desc["T"] = tf.data_type(features_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(relu6grad, [gradients_, features_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function relu6grad(gradients_, features_; name=nothing) if tf.eager_mode @@ -53336,7 +56702,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function scale_and_translate_grad_graph(grads_, original_image_, scale_, translation_; name=nothing, kernel_type=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function scale_and_translate_grad_graph(grads_, original_image_, scale_, translation_; name=nothing, kernel_type=nothing) local desc tf.with_op_name(name, "ScaleAndTranslateGrad") do desc = tf.NodeDescription("ScaleAndTranslateGrad") @@ -53366,7 +56732,10 @@ begin end desc["T"] = tf.data_type(grads_) desc["T"] = tf.data_type(original_image_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(scale_and_translate_grad, [grads_, original_image_, scale_, translation_], name=nothing, kernel_type=nothing) + tf.add_node(res[1], node) + return res[1] end function scale_and_translate_grad(grads_, original_image_, scale_, translation_; name=nothing, kernel_type=nothing) if tf.eager_mode @@ -53384,7 +56753,7 @@ end Converts an array of tensors to a list of tensors. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function _array_to_list_graph(input_; name=nothing, N=nothing, out_types=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _array_to_list_graph(input_; name=nothing, N=nothing, out_types=nothing) local desc tf.with_op_name(name, "_ArrayToList") do desc = tf.NodeDescription("_ArrayToList") @@ -53410,7 +56779,10 @@ begin desc["out_types"] = map(Base.identity, out_types) end desc["T"] = tf.data_type(input_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(_array_to_list, [input_], name=nothing, N=nothing, out_types=nothing) + tf.add_node(res[1], node) + return res[1] end function _array_to_list(input_; name=nothing, N=nothing, out_types=nothing) if tf.eager_mode @@ -53428,7 +56800,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function cudnn_rnnv3_graph(input_, input_h_, input_c_, params_, sequence_lengths_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing, is_training=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function cudnn_rnnv3_graph(input_, input_h_, input_c_, params_, sequence_lengths_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing, is_training=nothing) local desc tf.with_op_name(name, "CudnnRNNV3") do desc = tf.NodeDescription("CudnnRNNV3") @@ -53504,7 +56876,10 @@ begin desc["T"] = tf.data_type(input_h_) desc["T"] = tf.data_type(input_c_) desc["T"] = tf.data_type(params_) - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(cudnn_rnnv3, [input_, input_h_, input_c_, params_, sequence_lengths_], name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing, is_training=nothing) + tf.add_node(res[1], node) + return res end function cudnn_rnnv3(input_, input_h_, input_c_, params_, sequence_lengths_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing, is_training=nothing) if tf.eager_mode @@ -53522,7 +56897,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function expand_dims_graph(input_, dim_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function expand_dims_graph(input_, dim_; name=nothing) local desc tf.with_op_name(name, "ExpandDims") do desc = tf.NodeDescription("ExpandDims") @@ -53542,7 +56917,10 @@ begin tf.add_input(desc, dim_) desc["T"] = tf.data_type(input_) desc["Tdim"] = tf.data_type(dim_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(expand_dims, [input_, dim_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function expand_dims(input_, dim_; name=nothing) if tf.eager_mode @@ -53560,7 +56938,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function inv_grad_graph(y_, dy_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function inv_grad_graph(y_, dy_; name=nothing) local desc tf.with_op_name(name, "InvGrad") do desc = tf.NodeDescription("InvGrad") @@ -53578,7 +56956,10 @@ begin tf.add_input(desc, dy_) desc["T"] = tf.data_type(y_) desc["T"] = tf.data_type(dy_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(inv_grad, [y_, dy_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function inv_grad(y_, dy_; name=nothing) if tf.eager_mode @@ -53596,7 +56977,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function non_max_suppression_graph(boxes_, scores_, max_output_size_; name=nothing, iou_threshold=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function non_max_suppression_graph(boxes_, scores_, max_output_size_; name=nothing, iou_threshold=nothing) local desc tf.with_op_name(name, "NonMaxSuppression") do desc = tf.NodeDescription("NonMaxSuppression") @@ -53620,7 +57001,10 @@ begin if iou_threshold !== nothing desc["iou_threshold"] = Base.identity(iou_threshold) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(non_max_suppression, [boxes_, scores_, max_output_size_], name=nothing, iou_threshold=nothing) + tf.add_node(res[1], node) + return res[1] end function non_max_suppression(boxes_, scores_, max_output_size_; name=nothing, iou_threshold=nothing) if tf.eager_mode @@ -53638,7 +57022,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function l2loss_graph(t_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function l2loss_graph(t_; name=nothing) local desc tf.with_op_name(name, "L2Loss") do desc = tf.NodeDescription("L2Loss") @@ -53652,7 +57036,10 @@ begin desc = tf.EagerOp("L2Loss") tf.add_input(desc, t_) desc["T"] = tf.data_type(t_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(l2loss, [t_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function l2loss(t_; name=nothing) if tf.eager_mode @@ -53670,7 +57057,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function resize_area_graph(images_, size_; name=nothing, align_corners=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resize_area_graph(images_, size_; name=nothing, align_corners=nothing) local desc tf.with_op_name(name, "ResizeArea") do desc = tf.NodeDescription("ResizeArea") @@ -53693,7 +57080,10 @@ begin desc["align_corners"] = Base.Bool(align_corners) end desc["T"] = tf.data_type(images_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(resize_area, [images_, size_], name=nothing, align_corners=nothing) + tf.add_node(res[1], node) + return res[1] end function resize_area(images_, size_; name=nothing, align_corners=nothing) if tf.eager_mode @@ -53711,7 +57101,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function sparse_cross_graph(indices_, values_, shapes_, dense_inputs_; name=nothing, N=nothing, hashed_output=nothing, num_buckets=nothing, hash_key=nothing, sparse_types=nothing, dense_types=nothing, out_type=nothing, internal_type=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_cross_graph(indices_, values_, shapes_, dense_inputs_; name=nothing, N=nothing, hashed_output=nothing, num_buckets=nothing, hash_key=nothing, sparse_types=nothing, dense_types=nothing, out_type=nothing, internal_type=nothing) local desc tf.with_op_name(name, "SparseCross") do desc = tf.NodeDescription("SparseCross") @@ -53785,7 +57175,10 @@ begin if internal_type !== nothing desc["internal_type"] = Base.identity(internal_type) end - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(sparse_cross, [indices_, values_, shapes_, dense_inputs_], name=nothing, N=nothing, hashed_output=nothing, num_buckets=nothing, hash_key=nothing, sparse_types=nothing, dense_types=nothing, out_type=nothing, internal_type=nothing) + tf.add_node(res[1], node) + return res end function sparse_cross(indices_, values_, shapes_, dense_inputs_; name=nothing, N=nothing, hashed_output=nothing, num_buckets=nothing, hash_key=nothing, sparse_types=nothing, dense_types=nothing, out_type=nothing, internal_type=nothing) if tf.eager_mode @@ -53803,7 +57196,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function batch_fft3d_graph(input_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_fft3d_graph(input_; name=nothing) local desc tf.with_op_name(name, "BatchFFT3D") do desc = tf.NodeDescription("BatchFFT3D") @@ -53815,7 +57208,10 @@ begin function batch_fft3d_eager(input_; name=nothing) desc = tf.EagerOp("BatchFFT3D") tf.add_input(desc, input_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(batch_fft3d, [input_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function batch_fft3d(input_; name=nothing) if tf.eager_mode @@ -53833,7 +57229,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function random_standard_normal_graph(shape_; name=nothing, seed=nothing, seed2=nothing, dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function random_standard_normal_graph(shape_; name=nothing, seed=nothing, seed2=nothing, dtype=nothing) local desc tf.with_op_name(name, "RandomStandardNormal") do desc = tf.NodeDescription("RandomStandardNormal") @@ -53865,7 +57261,10 @@ begin desc["dtype"] = Base.identity(dtype) end desc["T"] = tf.data_type(shape_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(random_standard_normal, [shape_], name=nothing, seed=nothing, seed2=nothing, dtype=nothing) + tf.add_node(res[1], node) + return res[1] end function random_standard_normal(shape_; name=nothing, seed=nothing, seed2=nothing, dtype=nothing) if tf.eager_mode @@ -53883,7 +57282,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function resource_scatter_mul_graph(resource_, indices_, updates_; name=nothing, dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_scatter_mul_graph(resource_, indices_, updates_; name=nothing, dtype=nothing) local desc tf.with_op_name(name, "ResourceScatterMul") do desc = tf.NodeDescription("ResourceScatterMul") @@ -53912,7 +57311,10 @@ begin end desc["Tindices"] = tf.data_type(indices_) desc["dtype"] = tf.data_type(updates_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(resource_scatter_mul, [resource_, indices_, updates_], name=nothing, dtype=nothing) + tf.add_node(res[1], node) + return res[1] end function resource_scatter_mul(resource_, indices_, updates_; name=nothing, dtype=nothing) if tf.eager_mode @@ -53930,7 +57332,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function sdca_optimizer_graph(sparse_example_indices_, sparse_feature_indices_, sparse_feature_values_, dense_features_, example_weights_, example_labels_, sparse_indices_, sparse_weights_, dense_weights_, example_state_data_; name=nothing, loss_type=nothing, adaptative=nothing, num_sparse_features=nothing, num_sparse_features_with_values=nothing, num_dense_features=nothing, l1=nothing, l2=nothing, num_loss_partitions=nothing, num_inner_iterations=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sdca_optimizer_graph(sparse_example_indices_, sparse_feature_indices_, sparse_feature_values_, dense_features_, example_weights_, example_labels_, sparse_indices_, sparse_weights_, dense_weights_, example_state_data_; name=nothing, loss_type=nothing, adaptative=nothing, num_sparse_features=nothing, num_sparse_features_with_values=nothing, num_dense_features=nothing, l1=nothing, l2=nothing, num_loss_partitions=nothing, num_inner_iterations=nothing) local desc tf.with_op_name(name, "SdcaOptimizer") do desc = tf.NodeDescription("SdcaOptimizer") @@ -54028,7 +57430,10 @@ begin if num_inner_iterations !== nothing desc["num_inner_iterations"] = Base.Int(num_inner_iterations) end - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(sdca_optimizer, [sparse_example_indices_, sparse_feature_indices_, sparse_feature_values_, dense_features_, example_weights_, example_labels_, sparse_indices_, sparse_weights_, dense_weights_, example_state_data_], name=nothing, loss_type=nothing, adaptative=nothing, num_sparse_features=nothing, num_sparse_features_with_values=nothing, num_dense_features=nothing, l1=nothing, l2=nothing, num_loss_partitions=nothing, num_inner_iterations=nothing) + tf.add_node(res[1], node) + return res end function sdca_optimizer(sparse_example_indices_, sparse_feature_indices_, sparse_feature_values_, dense_features_, example_weights_, example_labels_, sparse_indices_, sparse_weights_, dense_weights_, example_state_data_; name=nothing, loss_type=nothing, adaptative=nothing, num_sparse_features=nothing, num_sparse_features_with_values=nothing, num_dense_features=nothing, l1=nothing, l2=nothing, num_loss_partitions=nothing, num_inner_iterations=nothing) if tf.eager_mode @@ -54046,7 +57451,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function zeta_graph(x_, q_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function zeta_graph(x_, q_; name=nothing) local desc tf.with_op_name(name, "Zeta") do desc = tf.NodeDescription("Zeta") @@ -54064,7 +57469,10 @@ begin tf.add_input(desc, q_) desc["T"] = tf.data_type(x_) desc["T"] = tf.data_type(q_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(zeta, [x_, q_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function zeta(x_, q_; name=nothing) if tf.eager_mode @@ -54082,7 +57490,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function sample_distorted_bounding_box_graph(image_size_, bounding_boxes_; name=nothing, seed=nothing, seed2=nothing, min_object_covered=nothing, aspect_ratio_range=nothing, area_range=nothing, max_attempts=nothing, use_image_if_no_bounding_boxes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sample_distorted_bounding_box_graph(image_size_, bounding_boxes_; name=nothing, seed=nothing, seed2=nothing, min_object_covered=nothing, aspect_ratio_range=nothing, area_range=nothing, max_attempts=nothing, use_image_if_no_bounding_boxes=nothing) local desc tf.with_op_name(name, "SampleDistortedBoundingBox") do desc = tf.NodeDescription("SampleDistortedBoundingBox") @@ -54146,7 +57554,10 @@ begin desc["use_image_if_no_bounding_boxes"] = Base.Bool(use_image_if_no_bounding_boxes) end desc["T"] = tf.data_type(image_size_) - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(sample_distorted_bounding_box, [image_size_, bounding_boxes_], name=nothing, seed=nothing, seed2=nothing, min_object_covered=nothing, aspect_ratio_range=nothing, area_range=nothing, max_attempts=nothing, use_image_if_no_bounding_boxes=nothing) + tf.add_node(res[1], node) + return res end function sample_distorted_bounding_box(image_size_, bounding_boxes_; name=nothing, seed=nothing, seed2=nothing, min_object_covered=nothing, aspect_ratio_range=nothing, area_range=nothing, max_attempts=nothing, use_image_if_no_bounding_boxes=nothing) if tf.eager_mode @@ -54164,7 +57575,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function igamma_grad_a_graph(a_, x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function igamma_grad_a_graph(a_, x_; name=nothing) local desc tf.with_op_name(name, "IgammaGradA") do desc = tf.NodeDescription("IgammaGradA") @@ -54182,7 +57593,10 @@ begin tf.add_input(desc, x_) desc["T"] = tf.data_type(a_) desc["T"] = tf.data_type(x_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(igamma_grad_a, [a_, x_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function igamma_grad_a(a_, x_; name=nothing) if tf.eager_mode @@ -54200,7 +57614,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function segment_max_graph(data_, segment_ids_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function segment_max_graph(data_, segment_ids_; name=nothing) local desc tf.with_op_name(name, "SegmentMax") do desc = tf.NodeDescription("SegmentMax") @@ -54220,7 +57634,10 @@ begin tf.add_input(desc, segment_ids_) desc["T"] = tf.data_type(data_) desc["Tindices"] = tf.data_type(segment_ids_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(segment_max, [data_, segment_ids_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function segment_max(data_, segment_ids_; name=nothing) if tf.eager_mode @@ -54238,7 +57655,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function range_graph(start_, limit_, delta_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function range_graph(start_, limit_, delta_; name=nothing) local desc tf.with_op_name(name, "Range") do desc = tf.NodeDescription("Range") @@ -54260,7 +57677,10 @@ begin desc["Tidx"] = tf.data_type(start_) desc["Tidx"] = tf.data_type(limit_) desc["Tidx"] = tf.data_type(delta_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(range, [start_, limit_, delta_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function range(start_, limit_, delta_; name=nothing) if tf.eager_mode @@ -54278,7 +57698,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function retrieve_tpu_embedding_momentum_parameters_grad_accum_debug_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function retrieve_tpu_embedding_momentum_parameters_grad_accum_debug_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "RetrieveTPUEmbeddingMomentumParametersGradAccumDebug") do desc = tf.NodeDescription("RetrieveTPUEmbeddingMomentumParametersGradAccumDebug") @@ -54316,7 +57736,10 @@ begin if shard_id !== nothing desc["shard_id"] = Base.Int(shard_id) end - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(retrieve_tpu_embedding_momentum_parameters_grad_accum_debug, [], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + tf.add_node(res[1], node) + return res end function retrieve_tpu_embedding_momentum_parameters_grad_accum_debug(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) if tf.eager_mode @@ -54334,7 +57757,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function flush_summary_writer_graph(writer_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function flush_summary_writer_graph(writer_; name=nothing) local desc tf.with_op_name(name, "FlushSummaryWriter") do desc = tf.NodeDescription("FlushSummaryWriter") @@ -54346,7 +57769,10 @@ begin function flush_summary_writer_eager(writer_; name=nothing) desc = tf.EagerOp("FlushSummaryWriter") tf.add_input(desc, writer_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(flush_summary_writer, [writer_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function flush_summary_writer(writer_; name=nothing) if tf.eager_mode @@ -54364,7 +57790,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function dequantize_graph(input_, min_range_, max_range_; name=nothing, mode=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function dequantize_graph(input_, min_range_, max_range_; name=nothing, mode=nothing) local desc tf.with_op_name(name, "Dequantize") do desc = tf.NodeDescription("Dequantize") @@ -54390,7 +57816,10 @@ begin desc["mode"] = Base.String(mode) end desc["T"] = tf.data_type(input_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(dequantize, [input_, min_range_, max_range_], name=nothing, mode=nothing) + tf.add_node(res[1], node) + return res[1] end function dequantize(input_, min_range_, max_range_; name=nothing, mode=nothing) if tf.eager_mode @@ -54408,7 +57837,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function sparse_fill_empty_rows_grad_graph(reverse_index_map_, grad_values_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_fill_empty_rows_grad_graph(reverse_index_map_, grad_values_; name=nothing) local desc tf.with_op_name(name, "SparseFillEmptyRowsGrad") do desc = tf.NodeDescription("SparseFillEmptyRowsGrad") @@ -54430,7 +57859,10 @@ begin tf.add_input(desc, reverse_index_map_) tf.add_input(desc, grad_values_) desc["T"] = tf.data_type(grad_values_) - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(sparse_fill_empty_rows_grad, [reverse_index_map_, grad_values_], name=nothing) + tf.add_node(res[1], node) + return res end function sparse_fill_empty_rows_grad(reverse_index_map_, grad_values_; name=nothing) if tf.eager_mode @@ -54448,7 +57880,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function iterator_get_next_graph(iterator_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function iterator_get_next_graph(iterator_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "IteratorGetNext") do desc = tf.NodeDescription("IteratorGetNext") @@ -54472,7 +57904,10 @@ begin if output_shapes !== nothing desc["output_shapes"] = map(Base.identity, output_shapes) end - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(iterator_get_next, [iterator_], name=nothing, output_types=nothing, output_shapes=nothing) + tf.add_node(res[1], node) + return res[1] end function iterator_get_next(iterator_; name=nothing, output_types=nothing, output_shapes=nothing) if tf.eager_mode @@ -54490,7 +57925,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function sparse_tensor_dense_add_graph(a_indices_, a_values_, a_shape_, b_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_tensor_dense_add_graph(a_indices_, a_values_, a_shape_, b_; name=nothing) local desc tf.with_op_name(name, "SparseTensorDenseAdd") do desc = tf.NodeDescription("SparseTensorDenseAdd") @@ -54519,7 +57954,10 @@ begin desc["T"] = tf.data_type(a_values_) desc["Tindices"] = tf.data_type(a_shape_) desc["T"] = tf.data_type(b_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(sparse_tensor_dense_add, [a_indices_, a_values_, a_shape_, b_], name=nothing) + tf.add_node(res[1], node) + return res[1] end function sparse_tensor_dense_add(a_indices_, a_values_, a_shape_, b_; name=nothing) if tf.eager_mode @@ -54537,7 +57975,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function prevent_gradient_graph(input_; name=nothing, message=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function prevent_gradient_graph(input_; name=nothing, message=nothing) local desc tf.with_op_name(name, "PreventGradient") do desc = tf.NodeDescription("PreventGradient") @@ -54557,7 +57995,10 @@ begin desc["message"] = Base.String(message) end desc["T"] = tf.data_type(input_) - (tf.execute(desc))[1] + res = tf.execute(desc) + node = tf.TapeNode(prevent_gradient, [input_], name=nothing, message=nothing) + tf.add_node(res[1], node) + return res[1] end function prevent_gradient(input_; name=nothing, message=nothing) if tf.eager_mode @@ -54575,7 +58016,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:230 =# tf.@op function lookup_table_export_graph(table_handle_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function lookup_table_export_graph(table_handle_; name=nothing) local desc tf.with_op_name(name, "LookupTableExport") do desc = tf.NodeDescription("LookupTableExport") @@ -54592,7 +58033,10 @@ begin function lookup_table_export_eager(table_handle_; name=nothing) desc = tf.EagerOp("LookupTableExport") tf.add_input(desc, table_handle_) - tf.execute(desc) + res = tf.execute(desc) + node = tf.TapeNode(lookup_table_export, [table_handle_], name=nothing) + tf.add_node(res[1], node) + return res end function lookup_table_export(table_handle_; name=nothing) if tf.eager_mode diff --git a/src/tape.jl b/src/tape.jl index f164db18..27e7e2d9 100644 --- a/src/tape.jl +++ b/src/tape.jl @@ -1,21 +1,20 @@ using MacroTools - -mutable struct Tensor - x -end +import MacroTools: splitdef, combinedef mutable struct TapeNode op args + kwargs end -import Base: *, log, sin + +TapeNode(op, args; kwargs...) = TapeNode(op, args, kwargs) mutable struct Tape - nodes::Dict{Tensor, TapeNode} + nodes::Dict{TensorHandle, TapeNode} end -Tape() = Tape(Dict{Tensor, TapeNode}()) +Tape() = Tape(Dict{TensorHandle, TapeNode}()) tape = nothing @@ -28,74 +27,114 @@ function set_tape(new_tape=nothing) end function add_node(t, node) + tape === nothing && return tape.nodes[t] = node end -function *(t1::Tensor, t2::Tensor) - t3 = Tensor(t1.x*t2.x) - node = TapeNode(*, [t1, t2]) - add_node(t3, node) - return t3 -end - -function *(t1::Tensor, t2::AbstractFloat) - return Tensor(t1.x*t2) -end - grad_fns = Dict() macro back_for(target, fn) def = splitdef(fn) + if def[:name] == :f + def[:name] = Symbol(string(target, "_", "backwards")) + end quote - $(esc(fn)) + $(esc(combinedef(def))) grad_fns[$target] = $(def[:name]) end end - -@back_for(*, function mul_backwards(args) - return [args[2], args[1]] +@back_for(Ops.add, function f(x, y; kwargs...) + return [constant(1.0), constant(1.0)] end) +@back_for(Ops.sub, function f(x, y; kwargs...) + return [constant(1.0), constant(-1.0)] +end) -@back_for(log, function log_backwards(args) - return [Tensor(1/args[1].x)] +@back_for(Ops.neg, function f(x; kwargs...) + return constant(-1.0) end) -function Base.sin(t::Tensor) +function with_no_grad(f) + old_tape = tape + global tape = nothing + res = f() + global tape = old_tape + return res end -@back_for(sin, function sin_backwards(args) - return [Tensor(cos(args[1].x))] +@back_for(Ops.exp, function f(x; kwargs...) + Ops.exp(x) end) -function log(t::Tensor) - res = Tensor(log(t.x)) - node = TapeNode(log, [t]) - add_node(res, node) - return res -end +@back_for(Ops.mean, function f(x, reduction_indices; keep_dims=nothing, kwargs...) + # assume reduction_indices is everything for now + n_elem = float(num_elements(x)) + [Ops.fill(size(x), 1/constant(n_elem)), nothing] +end) + +@back_for(Ops.sum, function f(x, reduction_indices; keep_dims=nothing, kwargs...) + # assume reduction_indices is everything for now + [Ops.fill(size(x), constant(1.0)), nothing] +end) + + +@back_for(Ops.mul, function f(x, y; kwargs...) + return [y, x] +end) -function grad(tape::Tape, tensor, out_grad, grads) +@back_for(Ops.cast, function f(x; kwargs...) + return constant(1.0) +end) + + +@back_for(Ops.log, function f(x; kwargs...) + return 1/x +end) + +@back_for(Ops.sin, function f(x; kwargs...) + return cos(x) +end) + +@back_for(Ops.cos, function f(x; kwargs...) + return sin(x) +end) + +@back_for(Ops.relu, function f(x; kwarg...) + (x > 0) .* x +end) + + +ensure_vector(x::AbstractArray) = x +ensure_vector(x) = [x] + +function _grad(tape::Tape, tensor, out_grad, grads) if !haskey(tape.nodes, tensor) return end node = tape.nodes[tensor] back_op = grad_fns[node.op] - arg_grads = back_op(node.args) - - + arg_grads = with_no_grad() do + back_op(node.args...; node.kwargs...) + end + arg_grads = ensure_vector(arg_grads) for (i, arg) in enumerate(node.args) - grads[arg] = arg_grads[i]*out_grad - grad(tape, arg, grads[arg].x, grads) + arg_grads[i] === nothing && continue + grads[arg] = arg_grads[i].*out_grad + _grad(tape, arg, grads[arg], grads) end return end -function grad(tape, tensor, out_grad=1.0) +function grad(tape, tensor, in_tensors::AbstractArray, out_grad=1.0) grads = Dict() - grad(tape, tensor, out_grad, grads) - return grads + _grad(tape, tensor, out_grad, grads) + return [grads[tensor] for tensor in in_tensors] +end + +function grad(tape, tensor, in_tensor, out_grad=1.0) + grad(tape, tensor, [in_tensor], out_grad)[1] end From 21bddb09e29ffbfc2d0a93eea788e488c6ca83ee Mon Sep 17 00:00:00 2001 From: Jon Malmaud Date: Fri, 22 Feb 2019 14:51:46 -0500 Subject: [PATCH 15/49] Misc. Fix transpose. More grads. --- src/eager.jl | 2 + src/generate_ops.jl | 6 + src/ops/imported_ops.jl | 2811 +++++++++++++++++++++++++++++++++++- src/ops/op_names.txt | 1148 +++++++++++++++ src/ops/transformations.jl | 3 +- src/tape.jl | 70 +- 6 files changed, 3941 insertions(+), 99 deletions(-) create mode 100644 src/ops/op_names.txt diff --git a/src/eager.jl b/src/eager.jl index 5228070a..de3400a9 100644 --- a/src/eager.jl +++ b/src/eager.jl @@ -301,3 +301,5 @@ function set_xla_compilation(op::EagerOp, enable) return enable end +Base.convert(::Type{TensorHandle}, h::TensorHandle) = h +Base.convert(::Type{TensorHandle}, h) = constant(h) diff --git a/src/generate_ops.jl b/src/generate_ops.jl index 9065251d..493ebeb9 100644 --- a/src/generate_ops.jl +++ b/src/generate_ops.jl @@ -240,11 +240,17 @@ function to_function(op::tensorflow.OpDef) end end + eager_convert_block = [] + for input in inputs[2:end] + c = :($input = convert(tf.TensorHandle, $input)) + push!(eager_convert_block, c) + end eager_expr = quote function $eager_name($(inputs...)) desc = tf.EagerOp($(op.name)) # $convert_block + $(eager_convert_block...) $eager_input_block $attr_block $(t_block...) diff --git a/src/ops/imported_ops.jl b/src/ops/imported_ops.jl index b21d80a0..84f113ca 100644 --- a/src/ops/imported_ops.jl +++ b/src/ops/imported_ops.jl @@ -1,4 +1,4 @@ -# Autogenerated on 2019-02-22T12:12:16.859 +# Autogenerated on 2019-02-22T14:43:13.001 module Ops import TensorFlow @@ -28,6 +28,8 @@ begin end function reduce_join_eager(inputs_, reduction_indices_; name=nothing, keep_dims=nothing, separator=nothing) desc = tf.EagerOp("ReduceJoin") + inputs_ = convert(tf.TensorHandle, inputs_) + reduction_indices_ = convert(tf.TensorHandle, reduction_indices_) tf.add_input(desc, inputs_) tf.add_input(desc, reduction_indices_) if keep_dims !== nothing @@ -90,6 +92,9 @@ begin end function reduce_dataset_eager(input_dataset_, initial_state_, other_arguments_; name=nothing, f=nothing, Tstate=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing) desc = tf.EagerOp("ReduceDataset") + input_dataset_ = convert(tf.TensorHandle, input_dataset_) + initial_state_ = convert(tf.TensorHandle, initial_state_) + other_arguments_ = convert(tf.TensorHandle, other_arguments_) tf.add_input(desc, input_dataset_) tf.add_input(desc, initial_state_) tf.add_input(desc, other_arguments_) @@ -153,6 +158,8 @@ begin end function tensor_list_from_tensor_eager(tensor_, element_shape_; name=nothing, element_dtype=nothing, shape_type=nothing) desc = tf.EagerOp("TensorListFromTensor") + tensor_ = convert(tf.TensorHandle, tensor_) + element_shape_ = convert(tf.TensorHandle, element_shape_) tf.add_input(desc, tensor_) tf.add_input(desc, element_shape_) if element_dtype !== nothing @@ -198,6 +205,7 @@ begin end function extract_jpeg_shape_eager(contents_; name=nothing, output_type=nothing) desc = tf.EagerOp("ExtractJpegShape") + contents_ = convert(tf.TensorHandle, contents_) tf.add_input(desc, contents_) if output_type !== nothing desc["output_type"] = Base.identity(output_type) @@ -246,6 +254,7 @@ begin end function svd_eager(input_; name=nothing, compute_uv=nothing, full_matrices=nothing) desc = tf.EagerOp("Svd") + input_ = convert(tf.TensorHandle, input_) tf.add_input(desc, input_) if compute_uv !== nothing desc["compute_uv"] = Base.Bool(compute_uv) @@ -292,6 +301,7 @@ begin end function iterator_get_next_sync_eager(iterator_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("IteratorGetNextSync") + iterator_ = convert(tf.TensorHandle, iterator_) tf.add_input(desc, iterator_) if output_types !== nothing desc["output_types"] = map(Base.identity, output_types) @@ -341,6 +351,7 @@ begin end function ref_enter_eager(data_; name=nothing, frame_name=nothing, is_constant=nothing, parallel_iterations=nothing) desc = tf.EagerOp("RefEnter") + data_ = convert(tf.TensorHandle, data_) tf.add_input(desc, data_) if frame_name !== nothing desc["frame_name"] = Base.String(frame_name) @@ -385,6 +396,7 @@ begin end function erf_eager(x_; name=nothing) desc = tf.EagerOp("Erf") + x_ = convert(tf.TensorHandle, x_) tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) res = tf.execute(desc) @@ -424,6 +436,7 @@ begin end function lookup_table_export_v2_eager(table_handle_; name=nothing) desc = tf.EagerOp("LookupTableExportV2") + table_handle_ = convert(tf.TensorHandle, table_handle_) tf.add_input(desc, table_handle_) res = tf.execute(desc) node = tf.TapeNode(lookup_table_export_v2, [table_handle_], name=nothing) @@ -458,6 +471,7 @@ begin end function round_eager(x_; name=nothing) desc = tf.EagerOp("Round") + x_ = convert(tf.TensorHandle, x_) tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) res = tf.execute(desc) @@ -540,6 +554,7 @@ begin end function tensor_forest_tree_is_initialized_op_eager(tree_handle_; name=nothing) desc = tf.EagerOp("TensorForestTreeIsInitializedOp") + tree_handle_ = convert(tf.TensorHandle, tree_handle_) tf.add_input(desc, tree_handle_) res = tf.execute(desc) node = tf.TapeNode(tensor_forest_tree_is_initialized_op, [tree_handle_], name=nothing) @@ -582,6 +597,7 @@ begin end function merge_eager(inputs_; name=nothing, N=nothing) desc = tf.EagerOp("Merge") + inputs_ = convert(tf.TensorHandle, inputs_) tf.add_input(desc, inputs_) if N !== nothing desc["N"] = Base.Int(N) @@ -627,6 +643,9 @@ begin end function histogram_fixed_width_eager(values_, value_range_, nbins_; name=nothing, dtype=nothing) desc = tf.EagerOp("HistogramFixedWidth") + values_ = convert(tf.TensorHandle, values_) + value_range_ = convert(tf.TensorHandle, value_range_) + nbins_ = convert(tf.TensorHandle, nbins_) tf.add_input(desc, values_) tf.add_input(desc, value_range_) tf.add_input(desc, nbins_) @@ -668,6 +687,7 @@ begin end function asin_eager(x_; name=nothing) desc = tf.EagerOp("Asin") + x_ = convert(tf.TensorHandle, x_) tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) res = tf.execute(desc) @@ -709,6 +729,8 @@ begin end function any_eager(input_, reduction_indices_; name=nothing, keep_dims=nothing) desc = tf.EagerOp("Any") + input_ = convert(tf.TensorHandle, input_) + reduction_indices_ = convert(tf.TensorHandle, reduction_indices_) tf.add_input(desc, input_) tf.add_input(desc, reduction_indices_) if keep_dims !== nothing @@ -750,6 +772,8 @@ begin end function rsqrt_grad_eager(y_, dy_; name=nothing) desc = tf.EagerOp("RsqrtGrad") + y_ = convert(tf.TensorHandle, y_) + dy_ = convert(tf.TensorHandle, dy_) tf.add_input(desc, y_) tf.add_input(desc, dy_) desc["T"] = tf.data_type(y_) @@ -793,6 +817,10 @@ begin end function tensor_array_scatter_eager(handle_, indices_, value_, flow_in_; name=nothing) desc = tf.EagerOp("TensorArrayScatter") + handle_ = convert(tf.TensorHandle, handle_) + indices_ = convert(tf.TensorHandle, indices_) + value_ = convert(tf.TensorHandle, value_) + flow_in_ = convert(tf.TensorHandle, flow_in_) tf.add_input(desc, handle_) tf.add_input(desc, indices_) tf.add_input(desc, value_) @@ -841,6 +869,8 @@ begin end function dynamic_partition_eager(data_, partitions_; name=nothing, num_partitions=nothing) desc = tf.EagerOp("DynamicPartition") + data_ = convert(tf.TensorHandle, data_) + partitions_ = convert(tf.TensorHandle, partitions_) tf.add_input(desc, data_) tf.add_input(desc, partitions_) if num_partitions !== nothing @@ -887,6 +917,8 @@ begin end function experimental_private_thread_pool_dataset_eager(input_dataset_, num_threads_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("ExperimentalPrivateThreadPoolDataset") + input_dataset_ = convert(tf.TensorHandle, input_dataset_) + num_threads_ = convert(tf.TensorHandle, num_threads_) tf.add_input(desc, input_dataset_) tf.add_input(desc, num_threads_) if output_types !== nothing @@ -927,6 +959,7 @@ begin end function reader_serialize_state_eager(reader_handle_; name=nothing) desc = tf.EagerOp("ReaderSerializeState") + reader_handle_ = convert(tf.TensorHandle, reader_handle_) tf.add_input(desc, reader_handle_) res = tf.execute(desc) node = tf.TapeNode(reader_serialize_state, [reader_handle_], name=nothing) @@ -963,6 +996,8 @@ begin end function right_shift_eager(x_, y_; name=nothing) desc = tf.EagerOp("RightShift") + x_ = convert(tf.TensorHandle, x_) + y_ = convert(tf.TensorHandle, y_) tf.add_input(desc, x_) tf.add_input(desc, y_) desc["T"] = tf.data_type(x_) @@ -983,7 +1018,7 @@ end """ - avg_pool3d(input; data_format=) + avg_pool3d(input; data_format=NDHWC) """ @@ -1012,6 +1047,7 @@ begin end function avg_pool3d_eager(input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) desc = tf.EagerOp("AvgPool3D") + input_ = convert(tf.TensorHandle, input_) tf.add_input(desc, input_) if ksize !== nothing desc["ksize"] = map(Base.identity, ksize) @@ -1062,6 +1098,7 @@ begin end function encode_png_eager(image_; name=nothing, compression=nothing) desc = tf.EagerOp("EncodePng") + image_ = convert(tf.TensorHandle, image_) tf.add_input(desc, image_) if compression !== nothing desc["compression"] = Base.Int(compression) @@ -1112,6 +1149,7 @@ begin end function debug_identity_eager(input_; name=nothing, device_name=nothing, tensor_name=nothing, debug_urls=nothing, gated_grpc=nothing) desc = tf.EagerOp("DebugIdentity") + input_ = convert(tf.TensorHandle, input_) tf.add_input(desc, input_) if device_name !== nothing desc["device_name"] = Base.String(device_name) @@ -1159,6 +1197,7 @@ begin end function imag_eager(input_; name=nothing) desc = tf.EagerOp("Imag") + input_ = convert(tf.TensorHandle, input_) tf.add_input(desc, input_) desc["T"] = tf.data_type(input_) res = tf.execute(desc) @@ -1217,6 +1256,16 @@ begin end function resource_sparse_apply_ftrl_v2_eager(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ResourceSparseApplyFtrlV2") + var_ = convert(tf.TensorHandle, var_) + accum_ = convert(tf.TensorHandle, accum_) + linear_ = convert(tf.TensorHandle, linear_) + grad_ = convert(tf.TensorHandle, grad_) + indices_ = convert(tf.TensorHandle, indices_) + lr_ = convert(tf.TensorHandle, lr_) + l1_ = convert(tf.TensorHandle, l1_) + l2_ = convert(tf.TensorHandle, l2_) + l2_shrinkage_ = convert(tf.TensorHandle, l2_shrinkage_) + lr_power_ = convert(tf.TensorHandle, lr_power_) tf.add_input(desc, var_) tf.add_input(desc, accum_) tf.add_input(desc, linear_) @@ -1330,6 +1379,7 @@ begin end function sign_eager(x_; name=nothing) desc = tf.EagerOp("Sign") + x_ = convert(tf.TensorHandle, x_) tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) res = tf.execute(desc) @@ -1365,6 +1415,7 @@ begin end function population_count_eager(x_; name=nothing) desc = tf.EagerOp("PopulationCount") + x_ = convert(tf.TensorHandle, x_) tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) res = tf.execute(desc) @@ -1400,6 +1451,7 @@ begin end function neg_eager(x_; name=nothing) desc = tf.EagerOp("Neg") + x_ = convert(tf.TensorHandle, x_) tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) res = tf.execute(desc) @@ -1486,6 +1538,10 @@ begin end function sparse_reduce_sum_eager(input_indices_, input_values_, input_shape_, reduction_axes_; name=nothing, keep_dims=nothing) desc = tf.EagerOp("SparseReduceSum") + input_indices_ = convert(tf.TensorHandle, input_indices_) + input_values_ = convert(tf.TensorHandle, input_values_) + input_shape_ = convert(tf.TensorHandle, input_shape_) + reduction_axes_ = convert(tf.TensorHandle, reduction_axes_) tf.add_input(desc, input_indices_) tf.add_input(desc, input_values_) tf.add_input(desc, input_shape_) @@ -1540,6 +1596,8 @@ begin end function filter_dataset_eager(input_dataset_, other_arguments_; name=nothing, predicate=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("FilterDataset") + input_dataset_ = convert(tf.TensorHandle, input_dataset_) + other_arguments_ = convert(tf.TensorHandle, other_arguments_) tf.add_input(desc, input_dataset_) tf.add_input(desc, other_arguments_) if predicate !== nothing @@ -1570,7 +1628,7 @@ end """ - string_length(input; unit=) + string_length(input; unit=BYTE) """ @@ -1589,6 +1647,7 @@ begin end function string_length_eager(input_; name=nothing, unit=nothing) desc = tf.EagerOp("StringLength") + input_ = convert(tf.TensorHandle, input_) tf.add_input(desc, input_) if unit !== nothing desc["unit"] = Base.String(unit) @@ -1609,7 +1668,7 @@ end """ - conv3d(input, filter; data_format=, dilations=[1, 1, 1, 1, 1]) + conv3d(input, filter; data_format=NDHWC, dilations=[1, 1, 1, 1, 1]) """ @@ -1640,6 +1699,8 @@ begin end function conv3d_eager(input_, filter_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) desc = tf.EagerOp("Conv3D") + input_ = convert(tf.TensorHandle, input_) + filter_ = convert(tf.TensorHandle, filter_) tf.add_input(desc, input_) tf.add_input(desc, filter_) if strides !== nothing @@ -1747,6 +1808,7 @@ begin end function optional_has_value_eager(optional_; name=nothing) desc = tf.EagerOp("OptionalHasValue") + optional_ = convert(tf.TensorHandle, optional_) tf.add_input(desc, optional_) res = tf.execute(desc) node = tf.TapeNode(optional_has_value, [optional_], name=nothing) @@ -1805,6 +1867,16 @@ begin end function apply_adam_eager(var_, m_, v_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing, use_nesterov=nothing) desc = tf.EagerOp("ApplyAdam") + var_ = convert(tf.TensorHandle, var_) + m_ = convert(tf.TensorHandle, m_) + v_ = convert(tf.TensorHandle, v_) + beta1_power_ = convert(tf.TensorHandle, beta1_power_) + beta2_power_ = convert(tf.TensorHandle, beta2_power_) + lr_ = convert(tf.TensorHandle, lr_) + beta1_ = convert(tf.TensorHandle, beta1_) + beta2_ = convert(tf.TensorHandle, beta2_) + epsilon_ = convert(tf.TensorHandle, epsilon_) + grad_ = convert(tf.TensorHandle, grad_) tf.add_input(desc, var_) tf.add_input(desc, m_) tf.add_input(desc, v_) @@ -1847,7 +1919,7 @@ end """ - cudnn_rnn_params_to_canonical(num_layers, num_units, input_size, params; rnn_mode=, input_mode=, direction=, dropout=?, seed=0, seed2=0) + cudnn_rnn_params_to_canonical(num_layers, num_units, input_size, params; rnn_mode=lstm, input_mode=linear_input, direction=unidirectional, dropout=?, seed=0, seed2=0) """ @@ -1896,6 +1968,10 @@ begin end function cudnn_rnn_params_to_canonical_eager(num_layers_, num_units_, input_size_, params_; name=nothing, num_params=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) desc = tf.EagerOp("CudnnRNNParamsToCanonical") + num_layers_ = convert(tf.TensorHandle, num_layers_) + num_units_ = convert(tf.TensorHandle, num_units_) + input_size_ = convert(tf.TensorHandle, input_size_) + params_ = convert(tf.TensorHandle, params_) tf.add_input(desc, num_layers_) tf.add_input(desc, num_units_) tf.add_input(desc, input_size_) @@ -1956,6 +2032,8 @@ begin end function irfft3d_eager(input_, fft_length_; name=nothing) desc = tf.EagerOp("IRFFT3D") + input_ = convert(tf.TensorHandle, input_) + fft_length_ = convert(tf.TensorHandle, fft_length_) tf.add_input(desc, input_) tf.add_input(desc, fft_length_) res = tf.execute(desc) @@ -1991,6 +2069,7 @@ begin end function angle_eager(input_; name=nothing) desc = tf.EagerOp("Angle") + input_ = convert(tf.TensorHandle, input_) tf.add_input(desc, input_) desc["T"] = tf.data_type(input_) res = tf.execute(desc) @@ -2090,6 +2169,7 @@ begin end function learned_unigram_candidate_sampler_eager(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing) desc = tf.EagerOp("LearnedUnigramCandidateSampler") + true_classes_ = convert(tf.TensorHandle, true_classes_) tf.add_input(desc, true_classes_) if num_true !== nothing desc["num_true"] = Base.Int(num_true) @@ -2178,6 +2258,7 @@ begin end function matrix_square_root_eager(input_; name=nothing) desc = tf.EagerOp("MatrixSquareRoot") + input_ = convert(tf.TensorHandle, input_) tf.add_input(desc, input_) desc["T"] = tf.data_type(input_) res = tf.execute(desc) @@ -2219,6 +2300,10 @@ begin end function sparse_dense_cwise_mul_eager(sp_indices_, sp_values_, sp_shape_, dense_; name=nothing) desc = tf.EagerOp("SparseDenseCwiseMul") + sp_indices_ = convert(tf.TensorHandle, sp_indices_) + sp_values_ = convert(tf.TensorHandle, sp_values_) + sp_shape_ = convert(tf.TensorHandle, sp_shape_) + dense_ = convert(tf.TensorHandle, dense_) tf.add_input(desc, sp_indices_) tf.add_input(desc, sp_values_) tf.add_input(desc, sp_shape_) @@ -2270,6 +2355,8 @@ begin end function tensor_array_concat_v3_eager(handle_, flow_in_; name=nothing, dtype=nothing, element_shape_except0=nothing) desc = tf.EagerOp("TensorArrayConcatV3") + handle_ = convert(tf.TensorHandle, handle_) + flow_in_ = convert(tf.TensorHandle, flow_in_) tf.add_input(desc, handle_) tf.add_input(desc, flow_in_) if dtype !== nothing @@ -2310,6 +2397,7 @@ begin end function unicode_script_eager(input_; name=nothing) desc = tf.EagerOp("UnicodeScript") + input_ = convert(tf.TensorHandle, input_) tf.add_input(desc, input_) res = tf.execute(desc) node = tf.TapeNode(unicode_script, [input_], name=nothing) @@ -2346,6 +2434,8 @@ begin end function batch_cholesky_grad_eager(l_, grad_; name=nothing) desc = tf.EagerOp("BatchCholeskyGrad") + l_ = convert(tf.TensorHandle, l_) + grad_ = convert(tf.TensorHandle, grad_) tf.add_input(desc, l_) tf.add_input(desc, grad_) desc["T"] = tf.data_type(l_) @@ -2390,6 +2480,8 @@ begin end function mean_eager(input_, reduction_indices_; name=nothing, keep_dims=nothing) desc = tf.EagerOp("Mean") + input_ = convert(tf.TensorHandle, input_) + reduction_indices_ = convert(tf.TensorHandle, reduction_indices_) tf.add_input(desc, input_) tf.add_input(desc, reduction_indices_) if keep_dims !== nothing @@ -2429,6 +2521,7 @@ begin end function batch_fft_eager(input_; name=nothing) desc = tf.EagerOp("BatchFFT") + input_ = convert(tf.TensorHandle, input_) tf.add_input(desc, input_) res = tf.execute(desc) node = tf.TapeNode(batch_fft, [input_], name=nothing) @@ -2463,6 +2556,7 @@ begin end function sin_eager(x_; name=nothing) desc = tf.EagerOp("Sin") + x_ = convert(tf.TensorHandle, x_) tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) res = tf.execute(desc) @@ -2558,6 +2652,9 @@ begin end function quantized_max_pool_eager(input_, min_input_, max_input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing) desc = tf.EagerOp("QuantizedMaxPool") + input_ = convert(tf.TensorHandle, input_) + min_input_ = convert(tf.TensorHandle, min_input_) + max_input_ = convert(tf.TensorHandle, max_input_) tf.add_input(desc, input_) tf.add_input(desc, min_input_) tf.add_input(desc, max_input_) @@ -2625,6 +2722,9 @@ begin end function ordered_map_stage_eager(key_, indices_, values_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, fake_dtypes=nothing, container=nothing, shared_name=nothing) desc = tf.EagerOp("OrderedMapStage") + key_ = convert(tf.TensorHandle, key_) + indices_ = convert(tf.TensorHandle, indices_) + values_ = convert(tf.TensorHandle, values_) tf.add_input(desc, key_) tf.add_input(desc, indices_) tf.add_input(desc, values_) @@ -2696,6 +2796,7 @@ begin end function partitioned_call_eager(args_; name=nothing, Tin=nothing, Tout=nothing, f=nothing, config=nothing, config_proto=nothing, executor_type=nothing) desc = tf.EagerOp("PartitionedCall") + args_ = convert(tf.TensorHandle, args_) tf.add_input(desc, args_) if Tin !== nothing desc["Tin"] = map(Base.identity, Tin) @@ -2764,6 +2865,11 @@ begin end function sparse_apply_adagrad_eager(var_, accum_, lr_, grad_, indices_; name=nothing, use_locking=nothing, update_slots=nothing) desc = tf.EagerOp("SparseApplyAdagrad") + var_ = convert(tf.TensorHandle, var_) + accum_ = convert(tf.TensorHandle, accum_) + lr_ = convert(tf.TensorHandle, lr_) + grad_ = convert(tf.TensorHandle, grad_) + indices_ = convert(tf.TensorHandle, indices_) tf.add_input(desc, var_) tf.add_input(desc, accum_) tf.add_input(desc, lr_) @@ -2796,7 +2902,7 @@ end """ - decode_proto_v2(bytes; descriptor_source=, message_format=, sanitize=false) + decode_proto_v2(bytes; descriptor_source=local://, message_format=binary, sanitize=false) """ @@ -2835,6 +2941,7 @@ begin end function decode_proto_v2_eager(bytes_; name=nothing, message_type=nothing, field_names=nothing, output_types=nothing, descriptor_source=nothing, message_format=nothing, sanitize=nothing) desc = tf.EagerOp("DecodeProtoV2") + bytes_ = convert(tf.TensorHandle, bytes_) tf.add_input(desc, bytes_) if message_type !== nothing desc["message_type"] = Base.String(message_type) @@ -2891,6 +2998,9 @@ begin end function betainc_eager(a_, b_, x_; name=nothing) desc = tf.EagerOp("Betainc") + a_ = convert(tf.TensorHandle, a_) + b_ = convert(tf.TensorHandle, b_) + x_ = convert(tf.TensorHandle, x_) tf.add_input(desc, a_) tf.add_input(desc, b_) tf.add_input(desc, x_) @@ -2930,6 +3040,7 @@ begin end function guarantee_const_eager(input_; name=nothing) desc = tf.EagerOp("GuaranteeConst") + input_ = convert(tf.TensorHandle, input_) tf.add_input(desc, input_) desc["T"] = tf.data_type(input_) res = tf.execute(desc) @@ -2967,6 +3078,7 @@ begin end function decode_bmp_eager(contents_; name=nothing, channels=nothing) desc = tf.EagerOp("DecodeBmp") + contents_ = convert(tf.TensorHandle, contents_) tf.add_input(desc, contents_) if channels !== nothing desc["channels"] = Base.Int(channels) @@ -3013,6 +3125,8 @@ begin end function boosted_trees_bucketize_eager(float_values_, bucket_boundaries_; name=nothing, num_features=nothing) desc = tf.EagerOp("BoostedTreesBucketize") + float_values_ = convert(tf.TensorHandle, float_values_) + bucket_boundaries_ = convert(tf.TensorHandle, bucket_boundaries_) tf.add_input(desc, float_values_) tf.add_input(desc, bucket_boundaries_) if num_features !== nothing @@ -3081,6 +3195,7 @@ begin end function experimental_stats_aggregator_summary_eager(iterator_; name=nothing) desc = tf.EagerOp("ExperimentalStatsAggregatorSummary") + iterator_ = convert(tf.TensorHandle, iterator_) tf.add_input(desc, iterator_) res = tf.execute(desc) node = tf.TapeNode(experimental_stats_aggregator_summary, [iterator_], name=nothing) @@ -3146,6 +3261,7 @@ begin end function matrix_exponential_eager(input_; name=nothing) desc = tf.EagerOp("MatrixExponential") + input_ = convert(tf.TensorHandle, input_) tf.add_input(desc, input_) desc["T"] = tf.data_type(input_) res = tf.execute(desc) @@ -3184,6 +3300,7 @@ begin end function size_eager(input_; name=nothing, out_type=nothing) desc = tf.EagerOp("Size") + input_ = convert(tf.TensorHandle, input_) tf.add_input(desc, input_) if out_type !== nothing desc["out_type"] = Base.identity(out_type) @@ -3225,6 +3342,7 @@ begin end function add_n_eager(inputs_; name=nothing, N=nothing) desc = tf.EagerOp("AddN") + inputs_ = convert(tf.TensorHandle, inputs_) tf.add_input(desc, inputs_) if N !== nothing desc["N"] = Base.Int(N) @@ -3269,6 +3387,9 @@ begin end function sparse_segment_sum_eager(data_, indices_, segment_ids_; name=nothing) desc = tf.EagerOp("SparseSegmentSum") + data_ = convert(tf.TensorHandle, data_) + indices_ = convert(tf.TensorHandle, indices_) + segment_ids_ = convert(tf.TensorHandle, segment_ids_) tf.add_input(desc, data_) tf.add_input(desc, indices_) tf.add_input(desc, segment_ids_) @@ -3314,6 +3435,8 @@ begin end function batch_dataset_eager(input_dataset_, batch_size_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("BatchDataset") + input_dataset_ = convert(tf.TensorHandle, input_dataset_) + batch_size_ = convert(tf.TensorHandle, batch_size_) tf.add_input(desc, input_dataset_) tf.add_input(desc, batch_size_) if output_types !== nothing @@ -3434,6 +3557,8 @@ begin end function queue_dequeue_up_to_v2_eager(handle_, n_; name=nothing, component_types=nothing, timeout_ms=nothing) desc = tf.EagerOp("QueueDequeueUpToV2") + handle_ = convert(tf.TensorHandle, handle_) + n_ = convert(tf.TensorHandle, n_) tf.add_input(desc, handle_) tf.add_input(desc, n_) if component_types !== nothing @@ -3551,6 +3676,10 @@ begin end function load_tpu_embedding_rms_prop_parameters_grad_accum_debug_eager(parameters_, ms_, mom_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) desc = tf.EagerOp("LoadTPUEmbeddingRMSPropParametersGradAccumDebug") + parameters_ = convert(tf.TensorHandle, parameters_) + ms_ = convert(tf.TensorHandle, ms_) + mom_ = convert(tf.TensorHandle, mom_) + gradient_accumulators_ = convert(tf.TensorHandle, gradient_accumulators_) tf.add_input(desc, parameters_) tf.add_input(desc, ms_) tf.add_input(desc, mom_) @@ -3600,6 +3729,7 @@ begin end function serialize_tensor_eager(tensor_; name=nothing) desc = tf.EagerOp("SerializeTensor") + tensor_ = convert(tf.TensorHandle, tensor_) tf.add_input(desc, tensor_) desc["T"] = tf.data_type(tensor_) res = tf.execute(desc) @@ -3637,6 +3767,8 @@ begin end function mul_eager(x_, y_; name=nothing) desc = tf.EagerOp("Mul") + x_ = convert(tf.TensorHandle, x_) + y_ = convert(tf.TensorHandle, y_) tf.add_input(desc, x_) tf.add_input(desc, y_) desc["T"] = tf.data_type(x_) @@ -3681,6 +3813,8 @@ begin end function softmax_cross_entropy_with_logits_eager(features_, labels_; name=nothing) desc = tf.EagerOp("SoftmaxCrossEntropyWithLogits") + features_ = convert(tf.TensorHandle, features_) + labels_ = convert(tf.TensorHandle, labels_) tf.add_input(desc, features_) tf.add_input(desc, labels_) desc["T"] = tf.data_type(features_) @@ -3727,6 +3861,9 @@ begin end function resource_scatter_div_eager(resource_, indices_, updates_; name=nothing, dtype=nothing) desc = tf.EagerOp("ResourceScatterDiv") + resource_ = convert(tf.TensorHandle, resource_) + indices_ = convert(tf.TensorHandle, indices_) + updates_ = convert(tf.TensorHandle, updates_) tf.add_input(desc, resource_) tf.add_input(desc, indices_) tf.add_input(desc, updates_) @@ -3777,6 +3914,12 @@ begin end function fixed_length_record_dataset_v2_eager(filenames_, header_bytes_, record_bytes_, footer_bytes_, buffer_size_, compression_type_; name=nothing) desc = tf.EagerOp("FixedLengthRecordDatasetV2") + filenames_ = convert(tf.TensorHandle, filenames_) + header_bytes_ = convert(tf.TensorHandle, header_bytes_) + record_bytes_ = convert(tf.TensorHandle, record_bytes_) + footer_bytes_ = convert(tf.TensorHandle, footer_bytes_) + buffer_size_ = convert(tf.TensorHandle, buffer_size_) + compression_type_ = convert(tf.TensorHandle, compression_type_) tf.add_input(desc, filenames_) tf.add_input(desc, header_bytes_) tf.add_input(desc, record_bytes_) @@ -3823,6 +3966,8 @@ begin end function skip_dataset_eager(input_dataset_, count_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("SkipDataset") + input_dataset_ = convert(tf.TensorHandle, input_dataset_) + count_ = convert(tf.TensorHandle, count_) tf.add_input(desc, input_dataset_) tf.add_input(desc, count_) if output_types !== nothing @@ -3864,6 +4009,7 @@ begin end function cosh_eager(x_; name=nothing) desc = tf.EagerOp("Cosh") + x_ = convert(tf.TensorHandle, x_) tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) res = tf.execute(desc) @@ -3882,7 +4028,7 @@ end """ - fused_batch_norm_v2(x, scale, offset, mean, variance; epsilon=?, data_format=, is_training=true) + fused_batch_norm_v2(x, scale, offset, mean, variance; epsilon=?, data_format=NHWC, is_training=true) """ @@ -3925,6 +4071,11 @@ begin end function fused_batch_norm_v2_eager(x_, scale_, offset_, mean_, variance_; name=nothing, U=nothing, epsilon=nothing, data_format=nothing, is_training=nothing) desc = tf.EagerOp("FusedBatchNormV2") + x_ = convert(tf.TensorHandle, x_) + scale_ = convert(tf.TensorHandle, scale_) + offset_ = convert(tf.TensorHandle, offset_) + mean_ = convert(tf.TensorHandle, mean_) + variance_ = convert(tf.TensorHandle, variance_) tf.add_input(desc, x_) tf.add_input(desc, scale_) tf.add_input(desc, offset_) @@ -3986,6 +4137,10 @@ begin end function tensor_array_split_eager(handle_, value_, lengths_, flow_in_; name=nothing) desc = tf.EagerOp("TensorArraySplit") + handle_ = convert(tf.TensorHandle, handle_) + value_ = convert(tf.TensorHandle, value_) + lengths_ = convert(tf.TensorHandle, lengths_) + flow_in_ = convert(tf.TensorHandle, flow_in_) tf.add_input(desc, handle_) tf.add_input(desc, value_) tf.add_input(desc, lengths_) @@ -4043,6 +4198,10 @@ begin end function ctc_loss_eager(inputs_, labels_indices_, labels_values_, sequence_length_; name=nothing, preprocess_collapse_repeated=nothing, ctc_merge_repeated=nothing, ignore_longer_outputs_than_inputs=nothing) desc = tf.EagerOp("CTCLoss") + inputs_ = convert(tf.TensorHandle, inputs_) + labels_indices_ = convert(tf.TensorHandle, labels_indices_) + labels_values_ = convert(tf.TensorHandle, labels_values_) + sequence_length_ = convert(tf.TensorHandle, sequence_length_) tf.add_input(desc, inputs_) tf.add_input(desc, labels_indices_) tf.add_input(desc, labels_values_) @@ -4101,6 +4260,10 @@ begin end function quantized_reshape_eager(tensor_, shape_, input_min_, input_max_; name=nothing) desc = tf.EagerOp("QuantizedReshape") + tensor_ = convert(tf.TensorHandle, tensor_) + shape_ = convert(tf.TensorHandle, shape_) + input_min_ = convert(tf.TensorHandle, input_min_) + input_max_ = convert(tf.TensorHandle, input_max_) tf.add_input(desc, tensor_) tf.add_input(desc, shape_) tf.add_input(desc, input_min_) @@ -4142,6 +4305,8 @@ begin end function floor_div_eager(x_, y_; name=nothing) desc = tf.EagerOp("FloorDiv") + x_ = convert(tf.TensorHandle, x_) + y_ = convert(tf.TensorHandle, y_) tf.add_input(desc, x_) tf.add_input(desc, y_) desc["T"] = tf.data_type(x_) @@ -4193,6 +4358,7 @@ begin end function tensor_array_v2_eager(size_; name=nothing, dtype=nothing, element_shape=nothing, dynamic_size=nothing, clear_after_read=nothing, tensor_array_name=nothing) desc = tf.EagerOp("TensorArrayV2") + size_ = convert(tf.TensorHandle, size_) tf.add_input(desc, size_) if dtype !== nothing desc["dtype"] = Base.identity(dtype) @@ -4244,6 +4410,7 @@ begin end function barrier_close_eager(handle_; name=nothing, cancel_pending_enqueues=nothing) desc = tf.EagerOp("BarrierClose") + handle_ = convert(tf.TensorHandle, handle_) tf.add_input(desc, handle_) if cancel_pending_enqueues !== nothing desc["cancel_pending_enqueues"] = Base.Bool(cancel_pending_enqueues) @@ -4283,6 +4450,7 @@ begin end function read_variable_op_eager(resource_; name=nothing, dtype=nothing) desc = tf.EagerOp("ReadVariableOp") + resource_ = convert(tf.TensorHandle, resource_) tf.add_input(desc, resource_) if dtype !== nothing desc["dtype"] = Base.identity(dtype) @@ -4336,6 +4504,12 @@ begin end function quantized_mul_eager(x_, y_, min_x_, max_x_, min_y_, max_y_; name=nothing) desc = tf.EagerOp("QuantizedMul") + x_ = convert(tf.TensorHandle, x_) + y_ = convert(tf.TensorHandle, y_) + min_x_ = convert(tf.TensorHandle, min_x_) + max_x_ = convert(tf.TensorHandle, max_x_) + min_y_ = convert(tf.TensorHandle, min_y_) + max_y_ = convert(tf.TensorHandle, max_y_) tf.add_input(desc, x_) tf.add_input(desc, y_) tf.add_input(desc, min_x_) @@ -4377,6 +4551,7 @@ begin end function selu_eager(features_; name=nothing) desc = tf.EagerOp("Selu") + features_ = convert(tf.TensorHandle, features_) tf.add_input(desc, features_) desc["T"] = tf.data_type(features_) res = tf.execute(desc) @@ -4395,7 +4570,7 @@ end """ - cudnn_rnn_backprop_v3(input, input_h, input_c, params, sequence_lengths, output, output_h, output_c, output_backprop, output_h_backprop, output_c_backprop, reserve_space, host_reserved; rnn_mode=, input_mode=, direction=, dropout=?, seed=0, seed2=0) + cudnn_rnn_backprop_v3(input, input_h, input_c, params, sequence_lengths, output, output_h, output_c, output_backprop, output_h_backprop, output_c_backprop, reserve_space, host_reserved; rnn_mode=lstm, input_mode=linear_input, direction=unidirectional, dropout=?, seed=0, seed2=0) """ @@ -4459,6 +4634,19 @@ begin end function cudnn_rnn_backprop_v3_eager(input_, input_h_, input_c_, params_, sequence_lengths_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_, host_reserved_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) desc = tf.EagerOp("CudnnRNNBackpropV3") + input_ = convert(tf.TensorHandle, input_) + input_h_ = convert(tf.TensorHandle, input_h_) + input_c_ = convert(tf.TensorHandle, input_c_) + params_ = convert(tf.TensorHandle, params_) + sequence_lengths_ = convert(tf.TensorHandle, sequence_lengths_) + output_ = convert(tf.TensorHandle, output_) + output_h_ = convert(tf.TensorHandle, output_h_) + output_c_ = convert(tf.TensorHandle, output_c_) + output_backprop_ = convert(tf.TensorHandle, output_backprop_) + output_h_backprop_ = convert(tf.TensorHandle, output_h_backprop_) + output_c_backprop_ = convert(tf.TensorHandle, output_c_backprop_) + reserve_space_ = convert(tf.TensorHandle, reserve_space_) + host_reserved_ = convert(tf.TensorHandle, host_reserved_) tf.add_input(desc, input_) tf.add_input(desc, input_h_) tf.add_input(desc, input_c_) @@ -4539,6 +4727,9 @@ begin end function lookup_table_insert_eager(table_handle_, keys_, values_; name=nothing) desc = tf.EagerOp("LookupTableInsert") + table_handle_ = convert(tf.TensorHandle, table_handle_) + keys_ = convert(tf.TensorHandle, keys_) + values_ = convert(tf.TensorHandle, values_) tf.add_input(desc, table_handle_) tf.add_input(desc, keys_) tf.add_input(desc, values_) @@ -4577,6 +4768,7 @@ begin end function complex_abs_eager(x_; name=nothing) desc = tf.EagerOp("ComplexAbs") + x_ = convert(tf.TensorHandle, x_) tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) res = tf.execute(desc) @@ -4614,6 +4806,8 @@ begin end function tridiagonal_solve_eager(diagonals_, rhs_; name=nothing) desc = tf.EagerOp("TridiagonalSolve") + diagonals_ = convert(tf.TensorHandle, diagonals_) + rhs_ = convert(tf.TensorHandle, rhs_) tf.add_input(desc, diagonals_) tf.add_input(desc, rhs_) desc["T"] = tf.data_type(diagonals_) @@ -4656,6 +4850,9 @@ begin end function lookup_table_import_eager(table_handle_, keys_, values_; name=nothing) desc = tf.EagerOp("LookupTableImport") + table_handle_ = convert(tf.TensorHandle, table_handle_) + keys_ = convert(tf.TensorHandle, keys_) + values_ = convert(tf.TensorHandle, values_) tf.add_input(desc, table_handle_) tf.add_input(desc, keys_) tf.add_input(desc, values_) @@ -4694,6 +4891,7 @@ begin end function abs_eager(x_; name=nothing) desc = tf.EagerOp("Abs") + x_ = convert(tf.TensorHandle, x_) tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) res = tf.execute(desc) @@ -4753,6 +4951,16 @@ begin end function resource_apply_adam_eager(var_, m_, v_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing, use_nesterov=nothing) desc = tf.EagerOp("ResourceApplyAdam") + var_ = convert(tf.TensorHandle, var_) + m_ = convert(tf.TensorHandle, m_) + v_ = convert(tf.TensorHandle, v_) + beta1_power_ = convert(tf.TensorHandle, beta1_power_) + beta2_power_ = convert(tf.TensorHandle, beta2_power_) + lr_ = convert(tf.TensorHandle, lr_) + beta1_ = convert(tf.TensorHandle, beta1_) + beta2_ = convert(tf.TensorHandle, beta2_) + epsilon_ = convert(tf.TensorHandle, epsilon_) + grad_ = convert(tf.TensorHandle, grad_) tf.add_input(desc, var_) tf.add_input(desc, m_) tf.add_input(desc, v_) @@ -4815,6 +5023,10 @@ begin end function write_histogram_summary_eager(writer_, step_, tag_, values_; name=nothing) desc = tf.EagerOp("WriteHistogramSummary") + writer_ = convert(tf.TensorHandle, writer_) + step_ = convert(tf.TensorHandle, step_) + tag_ = convert(tf.TensorHandle, tag_) + values_ = convert(tf.TensorHandle, values_) tf.add_input(desc, writer_) tf.add_input(desc, step_) tf.add_input(desc, tag_) @@ -4854,6 +5066,8 @@ begin end function experimental_indexed_dataset_materialize_eager(dataset_, materialized_; name=nothing) desc = tf.EagerOp("ExperimentalIndexedDatasetMaterialize") + dataset_ = convert(tf.TensorHandle, dataset_) + materialized_ = convert(tf.TensorHandle, materialized_) tf.add_input(desc, dataset_) tf.add_input(desc, materialized_) res = tf.execute(desc) @@ -4904,6 +5118,7 @@ begin end function _host_send_eager(tensor_; name=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing) desc = tf.EagerOp("_HostSend") + tensor_ = convert(tf.TensorHandle, tensor_) tf.add_input(desc, tensor_) if tensor_name !== nothing desc["tensor_name"] = Base.String(tensor_name) @@ -4956,6 +5171,8 @@ begin end function greater_eager(x_, y_; name=nothing) desc = tf.EagerOp("Greater") + x_ = convert(tf.TensorHandle, x_) + y_ = convert(tf.TensorHandle, y_) tf.add_input(desc, x_) tf.add_input(desc, y_) desc["T"] = tf.data_type(x_) @@ -4996,6 +5213,7 @@ begin end function nccl_broadcast_eager(input_; name=nothing, shape=nothing) desc = tf.EagerOp("NcclBroadcast") + input_ = convert(tf.TensorHandle, input_) tf.add_input(desc, input_) if shape !== nothing desc["shape"] = Base.identity(shape) @@ -5039,6 +5257,8 @@ begin end function tensor_list_push_back_batch_eager(input_handles_, tensor_; name=nothing, element_dtype=nothing) desc = tf.EagerOp("TensorListPushBackBatch") + input_handles_ = convert(tf.TensorHandle, input_handles_) + tensor_ = convert(tf.TensorHandle, tensor_) tf.add_input(desc, input_handles_) tf.add_input(desc, tensor_) if element_dtype !== nothing @@ -5087,6 +5307,9 @@ begin end function resource_scatter_min_eager(resource_, indices_, updates_; name=nothing, dtype=nothing) desc = tf.EagerOp("ResourceScatterMin") + resource_ = convert(tf.TensorHandle, resource_) + indices_ = convert(tf.TensorHandle, indices_) + updates_ = convert(tf.TensorHandle, updates_) tf.add_input(desc, resource_) tf.add_input(desc, indices_) tf.add_input(desc, updates_) @@ -5137,6 +5360,9 @@ begin end function slice_eager(input_, begin_, size_; name=nothing, Index=nothing) desc = tf.EagerOp("Slice") + input_ = convert(tf.TensorHandle, input_) + begin_ = convert(tf.TensorHandle, begin_) + size_ = convert(tf.TensorHandle, size_) tf.add_input(desc, input_) tf.add_input(desc, begin_) tf.add_input(desc, size_) @@ -5162,7 +5388,7 @@ end """ - unicode_decode(input; errors=, replacement_char=65533, replace_control_characters=false) + unicode_decode(input; errors=replace, replacement_char=65533, replace_control_characters=false) """ @@ -5195,6 +5421,7 @@ begin end function unicode_decode_eager(input_; name=nothing, input_encoding=nothing, errors=nothing, replacement_char=nothing, replace_control_characters=nothing) desc = tf.EagerOp("UnicodeDecode") + input_ = convert(tf.TensorHandle, input_) tf.add_input(desc, input_) if input_encoding !== nothing desc["input_encoding"] = Base.String(input_encoding) @@ -5248,6 +5475,8 @@ begin end function take_dataset_eager(input_dataset_, count_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("TakeDataset") + input_dataset_ = convert(tf.TensorHandle, input_dataset_) + count_ = convert(tf.TensorHandle, count_) tf.add_input(desc, input_dataset_) tf.add_input(desc, count_) if output_types !== nothing @@ -5303,6 +5532,10 @@ begin end function boosted_trees_make_stats_summary_eager(node_ids_, gradients_, hessians_, bucketized_features_list_; name=nothing, max_splits=nothing, num_buckets=nothing, num_features=nothing) desc = tf.EagerOp("BoostedTreesMakeStatsSummary") + node_ids_ = convert(tf.TensorHandle, node_ids_) + gradients_ = convert(tf.TensorHandle, gradients_) + hessians_ = convert(tf.TensorHandle, hessians_) + bucketized_features_list_ = convert(tf.TensorHandle, bucketized_features_list_) tf.add_input(desc, node_ids_) tf.add_input(desc, gradients_) tf.add_input(desc, hessians_) @@ -5368,6 +5601,7 @@ begin end function all_candidate_sampler_eager(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, seed=nothing, seed2=nothing) desc = tf.EagerOp("AllCandidateSampler") + true_classes_ = convert(tf.TensorHandle, true_classes_) tf.add_input(desc, true_classes_) if num_true !== nothing desc["num_true"] = Base.Int(num_true) @@ -5400,7 +5634,7 @@ end """ - conv2d_backprop_input(input_sizes, filter, out_backprop; use_cudnn_on_gpu=true, explicit_paddings=Int64[], data_format=, dilations=[1, 1, 1, 1]) + conv2d_backprop_input(input_sizes, filter, out_backprop; use_cudnn_on_gpu=true, explicit_paddings=Int64[], data_format=NHWC, dilations=[1, 1, 1, 1]) """ @@ -5439,6 +5673,9 @@ begin end function conv2d_backprop_input_eager(input_sizes_, filter_, out_backprop_; name=nothing, strides=nothing, use_cudnn_on_gpu=nothing, padding=nothing, explicit_paddings=nothing, data_format=nothing, dilations=nothing) desc = tf.EagerOp("Conv2DBackpropInput") + input_sizes_ = convert(tf.TensorHandle, input_sizes_) + filter_ = convert(tf.TensorHandle, filter_) + out_backprop_ = convert(tf.TensorHandle, out_backprop_) tf.add_input(desc, input_sizes_) tf.add_input(desc, filter_) tf.add_input(desc, out_backprop_) @@ -5500,6 +5737,7 @@ begin end function dataset_to_single_element_eager(dataset_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("DatasetToSingleElement") + dataset_ = convert(tf.TensorHandle, dataset_) tf.add_input(desc, dataset_) if output_types !== nothing desc["output_types"] = map(Base.identity, output_types) @@ -5547,6 +5785,8 @@ begin end function cache_dataset_eager(input_dataset_, filename_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("CacheDataset") + input_dataset_ = convert(tf.TensorHandle, input_dataset_) + filename_ = convert(tf.TensorHandle, filename_) tf.add_input(desc, input_dataset_) tf.add_input(desc, filename_) if output_types !== nothing @@ -5604,6 +5844,10 @@ begin end function fake_quant_with_min_max_vars_gradient_eager(gradients_, inputs_, min_, max_; name=nothing, num_bits=nothing, narrow_range=nothing) desc = tf.EagerOp("FakeQuantWithMinMaxVarsGradient") + gradients_ = convert(tf.TensorHandle, gradients_) + inputs_ = convert(tf.TensorHandle, inputs_) + min_ = convert(tf.TensorHandle, min_) + max_ = convert(tf.TensorHandle, max_) tf.add_input(desc, gradients_) tf.add_input(desc, inputs_) tf.add_input(desc, min_) @@ -5665,6 +5909,10 @@ begin end function fused_resize_and_pad_conv2d_eager(input_, size_, paddings_, filter_; name=nothing, resize_align_corners=nothing, mode=nothing, strides=nothing, padding=nothing) desc = tf.EagerOp("FusedResizeAndPadConv2D") + input_ = convert(tf.TensorHandle, input_) + size_ = convert(tf.TensorHandle, size_) + paddings_ = convert(tf.TensorHandle, paddings_) + filter_ = convert(tf.TensorHandle, filter_) tf.add_input(desc, input_) tf.add_input(desc, size_) tf.add_input(desc, paddings_) @@ -5750,6 +5998,7 @@ begin end function batch_eager(in_tensors_; name=nothing, num_batch_threads=nothing, max_batch_size=nothing, max_enqueued_batches=nothing, batch_timeout_micros=nothing, allowed_batch_sizes=nothing, grad_timeout_micros=nothing, container=nothing, shared_name=nothing, batching_queue=nothing, T=nothing) desc = tf.EagerOp("Batch") + in_tensors_ = convert(tf.TensorHandle, in_tensors_) tf.add_input(desc, in_tensors_) if num_batch_threads !== nothing desc["num_batch_threads"] = Base.Int(num_batch_threads) @@ -5874,6 +6123,9 @@ begin end function batch_to_space_nd_eager(input_, block_shape_, crops_; name=nothing) desc = tf.EagerOp("BatchToSpaceND") + input_ = convert(tf.TensorHandle, input_) + block_shape_ = convert(tf.TensorHandle, block_shape_) + crops_ = convert(tf.TensorHandle, crops_) tf.add_input(desc, input_) tf.add_input(desc, block_shape_) tf.add_input(desc, crops_) @@ -5912,6 +6164,7 @@ begin end function loop_cond_eager(input_; name=nothing) desc = tf.EagerOp("LoopCond") + input_ = convert(tf.TensorHandle, input_) tf.add_input(desc, input_) res = tf.execute(desc) node = tf.TapeNode(loop_cond, [input_], name=nothing) @@ -5929,7 +6182,7 @@ end """ - depth_to_space(input; data_format=) + depth_to_space(input; data_format=NHWC) """ @@ -5952,6 +6205,7 @@ begin end function depth_to_space_eager(input_; name=nothing, block_size=nothing, data_format=nothing) desc = tf.EagerOp("DepthToSpace") + input_ = convert(tf.TensorHandle, input_) tf.add_input(desc, input_) if block_size !== nothing desc["block_size"] = Base.Int(block_size) @@ -5996,6 +6250,7 @@ begin end function destroy_temporary_variable_eager(ref_; name=nothing, var_name=nothing) desc = tf.EagerOp("DestroyTemporaryVariable") + ref_ = convert(tf.TensorHandle, ref_) tf.add_input(desc, ref_) if var_name !== nothing desc["var_name"] = Base.String(var_name) @@ -6017,7 +6272,7 @@ end """ - cudnn_rnn(input, input_h, input_c, params; rnn_mode=, input_mode=, direction=, dropout=?, seed=0, seed2=0, is_training=true) + cudnn_rnn(input, input_h, input_c, params; rnn_mode=lstm, input_mode=linear_input, direction=unidirectional, dropout=?, seed=0, seed2=0, is_training=true) """ @@ -6066,6 +6321,10 @@ begin end function cudnn_rnn_eager(input_, input_h_, input_c_, params_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing, is_training=nothing) desc = tf.EagerOp("CudnnRNN") + input_ = convert(tf.TensorHandle, input_) + input_h_ = convert(tf.TensorHandle, input_h_) + input_c_ = convert(tf.TensorHandle, input_c_) + params_ = convert(tf.TensorHandle, params_) tf.add_input(desc, input_) tf.add_input(desc, input_h_) tf.add_input(desc, input_c_) @@ -6128,6 +6387,7 @@ begin end function ref_identity_eager(input_; name=nothing) desc = tf.EagerOp("RefIdentity") + input_ = convert(tf.TensorHandle, input_) tf.add_input(desc, input_) desc["T"] = tf.data_type(input_) res = tf.execute(desc) @@ -6146,7 +6406,7 @@ end """ - max_pool3d_grad(orig_input, orig_output, grad; data_format=) + max_pool3d_grad(orig_input, orig_output, grad; data_format=NDHWC) """ @@ -6180,6 +6440,9 @@ begin end function max_pool3d_grad_eager(orig_input_, orig_output_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) desc = tf.EagerOp("MaxPool3DGrad") + orig_input_ = convert(tf.TensorHandle, orig_input_) + orig_output_ = convert(tf.TensorHandle, orig_output_) + grad_ = convert(tf.TensorHandle, grad_) tf.add_input(desc, orig_input_) tf.add_input(desc, orig_output_) tf.add_input(desc, grad_) @@ -6246,6 +6509,9 @@ begin end function load_tpu_embedding_momentum_parameters_grad_accum_debug_eager(parameters_, momenta_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) desc = tf.EagerOp("LoadTPUEmbeddingMomentumParametersGradAccumDebug") + parameters_ = convert(tf.TensorHandle, parameters_) + momenta_ = convert(tf.TensorHandle, momenta_) + gradient_accumulators_ = convert(tf.TensorHandle, gradient_accumulators_) tf.add_input(desc, parameters_) tf.add_input(desc, momenta_) tf.add_input(desc, gradient_accumulators_) @@ -6367,6 +6633,9 @@ begin end function conv3d_backprop_input_eager(input_, filter_, out_backprop_; name=nothing, strides=nothing, padding=nothing, dilations=nothing) desc = tf.EagerOp("Conv3DBackpropInput") + input_ = convert(tf.TensorHandle, input_) + filter_ = convert(tf.TensorHandle, filter_) + out_backprop_ = convert(tf.TensorHandle, out_backprop_) tf.add_input(desc, input_) tf.add_input(desc, filter_) tf.add_input(desc, out_backprop_) @@ -6415,6 +6684,7 @@ begin end function ref_exit_eager(data_; name=nothing) desc = tf.EagerOp("RefExit") + data_ = convert(tf.TensorHandle, data_) tf.add_input(desc, data_) desc["T"] = tf.data_type(data_) res = tf.execute(desc) @@ -6511,6 +6781,8 @@ begin end function encode_wav_eager(audio_, sample_rate_; name=nothing) desc = tf.EagerOp("EncodeWav") + audio_ = convert(tf.TensorHandle, audio_) + sample_rate_ = convert(tf.TensorHandle, sample_rate_) tf.add_input(desc, audio_) tf.add_input(desc, sample_rate_) res = tf.execute(desc) @@ -6550,6 +6822,9 @@ begin end function tensor_summary_v2_eager(tag_, tensor_, serialized_summary_metadata_; name=nothing) desc = tf.EagerOp("TensorSummaryV2") + tag_ = convert(tf.TensorHandle, tag_) + tensor_ = convert(tf.TensorHandle, tensor_) + serialized_summary_metadata_ = convert(tf.TensorHandle, serialized_summary_metadata_) tf.add_input(desc, tag_) tf.add_input(desc, tensor_) tf.add_input(desc, serialized_summary_metadata_) @@ -6594,6 +6869,8 @@ begin end function queue_dequeue_up_to_eager(handle_, n_; name=nothing, component_types=nothing, timeout_ms=nothing) desc = tf.EagerOp("QueueDequeueUpTo") + handle_ = convert(tf.TensorHandle, handle_) + n_ = convert(tf.TensorHandle, n_) tf.add_input(desc, handle_) tf.add_input(desc, n_) if component_types !== nothing @@ -6640,6 +6917,9 @@ begin end function matrix_band_part_eager(input_, num_lower_, num_upper_; name=nothing) desc = tf.EagerOp("MatrixBandPart") + input_ = convert(tf.TensorHandle, input_) + num_lower_ = convert(tf.TensorHandle, num_lower_) + num_upper_ = convert(tf.TensorHandle, num_upper_) tf.add_input(desc, input_) tf.add_input(desc, num_lower_) tf.add_input(desc, num_upper_) @@ -6685,6 +6965,7 @@ begin end function copy_eager(input_; name=nothing, tensor_name=nothing, debug_ops_spec=nothing) desc = tf.EagerOp("Copy") + input_ = convert(tf.TensorHandle, input_) tf.add_input(desc, input_) if tensor_name !== nothing desc["tensor_name"] = Base.String(tensor_name) @@ -6737,6 +7018,7 @@ begin end function shape_n_eager(input_; name=nothing, N=nothing, out_type=nothing) desc = tf.EagerOp("ShapeN") + input_ = convert(tf.TensorHandle, input_) tf.add_input(desc, input_) if N !== nothing desc["N"] = Base.Int(N) @@ -6805,6 +7087,9 @@ begin end function experimental_parse_example_dataset_eager(input_dataset_, num_parallel_calls_, dense_defaults_; name=nothing, sparse_keys=nothing, dense_keys=nothing, sparse_types=nothing, Tdense=nothing, dense_shapes=nothing, output_types=nothing, output_shapes=nothing, sloppy=nothing) desc = tf.EagerOp("ExperimentalParseExampleDataset") + input_dataset_ = convert(tf.TensorHandle, input_dataset_) + num_parallel_calls_ = convert(tf.TensorHandle, num_parallel_calls_) + dense_defaults_ = convert(tf.TensorHandle, dense_defaults_) tf.add_input(desc, input_dataset_) tf.add_input(desc, num_parallel_calls_) tf.add_input(desc, dense_defaults_) @@ -6870,6 +7155,8 @@ begin end function concat_eager(concat_dim_, values_; name=nothing, N=nothing) desc = tf.EagerOp("Concat") + concat_dim_ = convert(tf.TensorHandle, concat_dim_) + values_ = convert(tf.TensorHandle, values_) tf.add_input(desc, concat_dim_) tf.add_input(desc, values_) if N !== nothing @@ -6892,7 +7179,7 @@ end """ - data_format_dim_map(x; src_format=, dst_format=) + data_format_dim_map(x; src_format=NHWC, dst_format=NCHW) """ @@ -6915,6 +7202,7 @@ begin end function data_format_dim_map_eager(x_; name=nothing, src_format=nothing, dst_format=nothing) desc = tf.EagerOp("DataFormatDimMap") + x_ = convert(tf.TensorHandle, x_) tf.add_input(desc, x_) if src_format !== nothing desc["src_format"] = Base.String(src_format) @@ -6998,6 +7286,7 @@ begin end function softplus_eager(features_; name=nothing) desc = tf.EagerOp("Softplus") + features_ = convert(tf.TensorHandle, features_) tf.add_input(desc, features_) desc["T"] = tf.data_type(features_) res = tf.execute(desc) @@ -7050,6 +7339,13 @@ begin end function resource_sparse_apply_proximal_adagrad_eager(var_, accum_, lr_, l1_, l2_, grad_, indices_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ResourceSparseApplyProximalAdagrad") + var_ = convert(tf.TensorHandle, var_) + accum_ = convert(tf.TensorHandle, accum_) + lr_ = convert(tf.TensorHandle, lr_) + l1_ = convert(tf.TensorHandle, l1_) + l2_ = convert(tf.TensorHandle, l2_) + grad_ = convert(tf.TensorHandle, grad_) + indices_ = convert(tf.TensorHandle, indices_) tf.add_input(desc, var_) tf.add_input(desc, accum_) tf.add_input(desc, lr_) @@ -7146,6 +7442,14 @@ begin end function parse_single_sequence_example_eager(serialized_, feature_list_dense_missing_assumed_empty_, context_sparse_keys_, context_dense_keys_, feature_list_sparse_keys_, feature_list_dense_keys_, context_dense_defaults_, debug_name_; name=nothing, Ncontext_sparse=nothing, Ncontext_dense=nothing, Nfeature_list_sparse=nothing, Nfeature_list_dense=nothing, context_sparse_types=nothing, Tcontext_dense=nothing, feature_list_dense_types=nothing, context_dense_shapes=nothing, feature_list_sparse_types=nothing, feature_list_dense_shapes=nothing) desc = tf.EagerOp("ParseSingleSequenceExample") + serialized_ = convert(tf.TensorHandle, serialized_) + feature_list_dense_missing_assumed_empty_ = convert(tf.TensorHandle, feature_list_dense_missing_assumed_empty_) + context_sparse_keys_ = convert(tf.TensorHandle, context_sparse_keys_) + context_dense_keys_ = convert(tf.TensorHandle, context_dense_keys_) + feature_list_sparse_keys_ = convert(tf.TensorHandle, feature_list_sparse_keys_) + feature_list_dense_keys_ = convert(tf.TensorHandle, feature_list_dense_keys_) + context_dense_defaults_ = convert(tf.TensorHandle, context_dense_defaults_) + debug_name_ = convert(tf.TensorHandle, debug_name_) tf.add_input(desc, serialized_) tf.add_input(desc, feature_list_dense_missing_assumed_empty_) tf.add_input(desc, context_sparse_keys_) @@ -7217,6 +7521,7 @@ begin end function matrix_diag_eager(diagonal_; name=nothing) desc = tf.EagerOp("MatrixDiag") + diagonal_ = convert(tf.TensorHandle, diagonal_) tf.add_input(desc, diagonal_) desc["T"] = tf.data_type(diagonal_) res = tf.execute(desc) @@ -7292,6 +7597,9 @@ begin end function shard_dataset_eager(input_dataset_, num_shards_, index_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("ShardDataset") + input_dataset_ = convert(tf.TensorHandle, input_dataset_) + num_shards_ = convert(tf.TensorHandle, num_shards_) + index_ = convert(tf.TensorHandle, index_) tf.add_input(desc, input_dataset_) tf.add_input(desc, num_shards_) tf.add_input(desc, index_) @@ -7317,7 +7625,7 @@ end """ - max_pool_grad_grad(orig_input, orig_output, grad; data_format=) + max_pool_grad_grad(orig_input, orig_output, grad; data_format=NHWC) """ @@ -7350,6 +7658,9 @@ begin end function max_pool_grad_grad_eager(orig_input_, orig_output_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) desc = tf.EagerOp("MaxPoolGradGrad") + orig_input_ = convert(tf.TensorHandle, orig_input_) + orig_output_ = convert(tf.TensorHandle, orig_output_) + grad_ = convert(tf.TensorHandle, grad_) tf.add_input(desc, orig_input_) tf.add_input(desc, orig_output_) tf.add_input(desc, grad_) @@ -7406,6 +7717,8 @@ begin end function resize_bilinear_grad_eager(grads_, original_image_; name=nothing, align_corners=nothing) desc = tf.EagerOp("ResizeBilinearGrad") + grads_ = convert(tf.TensorHandle, grads_) + original_image_ = convert(tf.TensorHandle, original_image_) tf.add_input(desc, grads_) tf.add_input(desc, original_image_) if align_corners !== nothing @@ -7452,6 +7765,8 @@ begin end function batch_to_space_eager(input_, crops_; name=nothing, block_size=nothing) desc = tf.EagerOp("BatchToSpace") + input_ = convert(tf.TensorHandle, input_) + crops_ = convert(tf.TensorHandle, crops_) tf.add_input(desc, input_) tf.add_input(desc, crops_) if block_size !== nothing @@ -7494,6 +7809,7 @@ begin end function optional_from_value_eager(components_; name=nothing, Toutput_types=nothing) desc = tf.EagerOp("OptionalFromValue") + components_ = convert(tf.TensorHandle, components_) tf.add_input(desc, components_) if Toutput_types !== nothing desc["Toutput_types"] = map(Base.identity, Toutput_types) @@ -7533,6 +7849,8 @@ begin end function xlogy_eager(x_, y_; name=nothing) desc = tf.EagerOp("Xlogy") + x_ = convert(tf.TensorHandle, x_) + y_ = convert(tf.TensorHandle, y_) tf.add_input(desc, x_) tf.add_input(desc, y_) desc["T"] = tf.data_type(x_) @@ -7572,6 +7890,8 @@ begin end function cross_eager(a_, b_; name=nothing) desc = tf.EagerOp("Cross") + a_ = convert(tf.TensorHandle, a_) + b_ = convert(tf.TensorHandle, b_) tf.add_input(desc, a_) tf.add_input(desc, b_) desc["T"] = tf.data_type(a_) @@ -7611,6 +7931,8 @@ begin end function bitwise_and_eager(x_, y_; name=nothing) desc = tf.EagerOp("BitwiseAnd") + x_ = convert(tf.TensorHandle, x_) + y_ = convert(tf.TensorHandle, y_) tf.add_input(desc, x_) tf.add_input(desc, y_) desc["T"] = tf.data_type(x_) @@ -7651,6 +7973,8 @@ begin end function broadcast_to_eager(input_, shape_; name=nothing) desc = tf.EagerOp("BroadcastTo") + input_ = convert(tf.TensorHandle, input_) + shape_ = convert(tf.TensorHandle, shape_) tf.add_input(desc, input_) tf.add_input(desc, shape_) desc["T"] = tf.data_type(input_) @@ -7690,6 +8014,8 @@ begin end function elu_grad_eager(gradients_, outputs_; name=nothing) desc = tf.EagerOp("EluGrad") + gradients_ = convert(tf.TensorHandle, gradients_) + outputs_ = convert(tf.TensorHandle, outputs_) tf.add_input(desc, gradients_) tf.add_input(desc, outputs_) desc["T"] = tf.data_type(gradients_) @@ -7710,7 +8036,7 @@ end """ - cudnn_rnn_backprop(input, input_h, input_c, params, output, output_h, output_c, output_backprop, output_h_backprop, output_c_backprop, reserve_space; rnn_mode=, input_mode=, direction=, dropout=?, seed=0, seed2=0) + cudnn_rnn_backprop(input, input_h, input_c, params, output, output_h, output_c, output_backprop, output_h_backprop, output_c_backprop, reserve_space; rnn_mode=lstm, input_mode=linear_input, direction=unidirectional, dropout=?, seed=0, seed2=0) """ @@ -7770,6 +8096,17 @@ begin end function cudnn_rnn_backprop_eager(input_, input_h_, input_c_, params_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) desc = tf.EagerOp("CudnnRNNBackprop") + input_ = convert(tf.TensorHandle, input_) + input_h_ = convert(tf.TensorHandle, input_h_) + input_c_ = convert(tf.TensorHandle, input_c_) + params_ = convert(tf.TensorHandle, params_) + output_ = convert(tf.TensorHandle, output_) + output_h_ = convert(tf.TensorHandle, output_h_) + output_c_ = convert(tf.TensorHandle, output_c_) + output_backprop_ = convert(tf.TensorHandle, output_backprop_) + output_h_backprop_ = convert(tf.TensorHandle, output_h_backprop_) + output_c_backprop_ = convert(tf.TensorHandle, output_c_backprop_) + reserve_space_ = convert(tf.TensorHandle, reserve_space_) tf.add_input(desc, input_) tf.add_input(desc, input_h_) tf.add_input(desc, input_c_) @@ -7845,6 +8182,7 @@ begin end function string_to_hash_bucket_fast_eager(input_; name=nothing, num_buckets=nothing) desc = tf.EagerOp("StringToHashBucketFast") + input_ = convert(tf.TensorHandle, input_) tf.add_input(desc, input_) if num_buckets !== nothing desc["num_buckets"] = Base.Int(num_buckets) @@ -7942,6 +8280,7 @@ begin end function relu_eager(features_; name=nothing) desc = tf.EagerOp("Relu") + features_ = convert(tf.TensorHandle, features_) tf.add_input(desc, features_) desc["T"] = tf.data_type(features_) res = tf.execute(desc) @@ -7982,6 +8321,8 @@ begin end function nth_element_eager(input_, n_; name=nothing, reverse=nothing) desc = tf.EagerOp("NthElement") + input_ = convert(tf.TensorHandle, input_) + n_ = convert(tf.TensorHandle, n_) tf.add_input(desc, input_) tf.add_input(desc, n_) if reverse !== nothing @@ -8021,6 +8362,7 @@ begin end function softsign_eager(features_; name=nothing) desc = tf.EagerOp("Softsign") + features_ = convert(tf.TensorHandle, features_) tf.add_input(desc, features_) desc["T"] = tf.data_type(features_) res = tf.execute(desc) @@ -8080,6 +8422,7 @@ begin end function mutable_dense_hash_table_eager(empty_key_; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing, initial_num_buckets=nothing, max_load_factor=nothing) desc = tf.EagerOp("MutableDenseHashTable") + empty_key_ = convert(tf.TensorHandle, empty_key_) tf.add_input(desc, empty_key_) if container !== nothing desc["container"] = Base.String(container) @@ -8172,6 +8515,8 @@ begin end function polygamma_eager(a_, x_; name=nothing) desc = tf.EagerOp("Polygamma") + a_ = convert(tf.TensorHandle, a_) + x_ = convert(tf.TensorHandle, x_) tf.add_input(desc, a_) tf.add_input(desc, x_) desc["T"] = tf.data_type(a_) @@ -8215,6 +8560,7 @@ begin end function nccl_reduce_eager(input_; name=nothing, reduction=nothing, num_devices=nothing) desc = tf.EagerOp("NcclReduce") + input_ = convert(tf.TensorHandle, input_) tf.add_input(desc, input_) if reduction !== nothing desc["reduction"] = Base.String(reduction) @@ -8263,6 +8609,8 @@ begin end function arg_max_eager(input_, dimension_; name=nothing, output_type=nothing) desc = tf.EagerOp("ArgMax") + input_ = convert(tf.TensorHandle, input_) + dimension_ = convert(tf.TensorHandle, dimension_) tf.add_input(desc, input_) tf.add_input(desc, dimension_) if output_type !== nothing @@ -8305,6 +8653,8 @@ begin end function matrix_set_diag_eager(input_, diagonal_; name=nothing) desc = tf.EagerOp("MatrixSetDiag") + input_ = convert(tf.TensorHandle, input_) + diagonal_ = convert(tf.TensorHandle, diagonal_) tf.add_input(desc, input_) tf.add_input(desc, diagonal_) desc["T"] = tf.data_type(input_) @@ -8348,6 +8698,9 @@ begin end function space_to_batch_nd_eager(input_, block_shape_, paddings_; name=nothing) desc = tf.EagerOp("SpaceToBatchND") + input_ = convert(tf.TensorHandle, input_) + block_shape_ = convert(tf.TensorHandle, block_shape_) + paddings_ = convert(tf.TensorHandle, paddings_) tf.add_input(desc, input_) tf.add_input(desc, block_shape_) tf.add_input(desc, paddings_) @@ -8395,6 +8748,9 @@ begin end function sparse_reshape_eager(input_indices_, input_shape_, new_shape_; name=nothing) desc = tf.EagerOp("SparseReshape") + input_indices_ = convert(tf.TensorHandle, input_indices_) + input_shape_ = convert(tf.TensorHandle, input_shape_) + new_shape_ = convert(tf.TensorHandle, new_shape_) tf.add_input(desc, input_indices_) tf.add_input(desc, input_shape_) tf.add_input(desc, new_shape_) @@ -8438,6 +8794,8 @@ begin end function optimize_dataset_eager(input_dataset_, optimizations_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("OptimizeDataset") + input_dataset_ = convert(tf.TensorHandle, input_dataset_) + optimizations_ = convert(tf.TensorHandle, optimizations_) tf.add_input(desc, input_dataset_) tf.add_input(desc, optimizations_) if output_types !== nothing @@ -8486,6 +8844,8 @@ begin end function concat_v2_eager(values_, axis_; name=nothing, N=nothing) desc = tf.EagerOp("ConcatV2") + values_ = convert(tf.TensorHandle, values_) + axis_ = convert(tf.TensorHandle, axis_) tf.add_input(desc, values_) tf.add_input(desc, axis_) if N !== nothing @@ -8545,6 +8905,14 @@ begin end function resource_sparse_apply_adadelta_eager(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ResourceSparseApplyAdadelta") + var_ = convert(tf.TensorHandle, var_) + accum_ = convert(tf.TensorHandle, accum_) + accum_update_ = convert(tf.TensorHandle, accum_update_) + lr_ = convert(tf.TensorHandle, lr_) + rho_ = convert(tf.TensorHandle, rho_) + epsilon_ = convert(tf.TensorHandle, epsilon_) + grad_ = convert(tf.TensorHandle, grad_) + indices_ = convert(tf.TensorHandle, indices_) tf.add_input(desc, var_) tf.add_input(desc, accum_) tf.add_input(desc, accum_update_) @@ -8597,6 +8965,8 @@ begin end function tile_eager(input_, multiples_; name=nothing) desc = tf.EagerOp("Tile") + input_ = convert(tf.TensorHandle, input_) + multiples_ = convert(tf.TensorHandle, multiples_) tf.add_input(desc, input_) tf.add_input(desc, multiples_) desc["T"] = tf.data_type(input_) @@ -8683,6 +9053,9 @@ begin end function serialize_many_sparse_eager(sparse_indices_, sparse_values_, sparse_shape_; name=nothing, out_type=nothing) desc = tf.EagerOp("SerializeManySparse") + sparse_indices_ = convert(tf.TensorHandle, sparse_indices_) + sparse_values_ = convert(tf.TensorHandle, sparse_values_) + sparse_shape_ = convert(tf.TensorHandle, sparse_shape_) tf.add_input(desc, sparse_indices_) tf.add_input(desc, sparse_values_) tf.add_input(desc, sparse_shape_) @@ -8730,6 +9103,8 @@ begin end function tpu_embedding_activations_eager(embedding_variable_, sliced_activations_; name=nothing, table_id=nothing, lookup_id=nothing) desc = tf.EagerOp("TPUEmbeddingActivations") + embedding_variable_ = convert(tf.TensorHandle, embedding_variable_) + sliced_activations_ = convert(tf.TensorHandle, sliced_activations_) tf.add_input(desc, embedding_variable_) tf.add_input(desc, sliced_activations_) if table_id !== nothing @@ -8778,6 +9153,9 @@ begin end function batch_matrix_solve_ls_eager(matrix_, rhs_, l2_regularizer_; name=nothing, fast=nothing) desc = tf.EagerOp("BatchMatrixSolveLs") + matrix_ = convert(tf.TensorHandle, matrix_) + rhs_ = convert(tf.TensorHandle, rhs_) + l2_regularizer_ = convert(tf.TensorHandle, l2_regularizer_) tf.add_input(desc, matrix_) tf.add_input(desc, rhs_) tf.add_input(desc, l2_regularizer_) @@ -8821,6 +9199,8 @@ begin end function not_equal_eager(x_, y_; name=nothing) desc = tf.EagerOp("NotEqual") + x_ = convert(tf.TensorHandle, x_) + y_ = convert(tf.TensorHandle, y_) tf.add_input(desc, x_) tf.add_input(desc, y_) desc["T"] = tf.data_type(x_) @@ -8858,6 +9238,7 @@ begin end function lgamma_eager(x_; name=nothing) desc = tf.EagerOp("Lgamma") + x_ = convert(tf.TensorHandle, x_) tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) res = tf.execute(desc) @@ -8876,7 +9257,7 @@ end """ - tpu_replicate_metadata(; num_cores_per_replica=1, topology=, use_tpu=true, device_assignment=Int64[], computation_shape=Int64[], host_compute_core=Int64[], padding_map=Int64[], step_marker_location=) + tpu_replicate_metadata(; num_cores_per_replica=1, topology=, use_tpu=true, device_assignment=Int64[], computation_shape=Int64[], host_compute_core=Int64[], padding_map=Int64[], step_marker_location=STEP_MARK_AT_ENTRY) """ @@ -9037,6 +9418,7 @@ begin end function self_adjoint_eig_eager(input_; name=nothing) desc = tf.EagerOp("SelfAdjointEig") + input_ = convert(tf.TensorHandle, input_) tf.add_input(desc, input_) desc["T"] = tf.data_type(input_) res = tf.execute(desc) @@ -9079,6 +9461,7 @@ begin end function boosted_trees_quantile_stream_resource_get_bucket_boundaries_eager(quantile_stream_resource_handle_; name=nothing, num_features=nothing) desc = tf.EagerOp("BoostedTreesQuantileStreamResourceGetBucketBoundaries") + quantile_stream_resource_handle_ = convert(tf.TensorHandle, quantile_stream_resource_handle_) tf.add_input(desc, quantile_stream_resource_handle_) if num_features !== nothing desc["num_features"] = Base.Int(num_features) @@ -9122,6 +9505,10 @@ begin end function sparse_dense_cwise_div_eager(sp_indices_, sp_values_, sp_shape_, dense_; name=nothing) desc = tf.EagerOp("SparseDenseCwiseDiv") + sp_indices_ = convert(tf.TensorHandle, sp_indices_) + sp_values_ = convert(tf.TensorHandle, sp_values_) + sp_shape_ = convert(tf.TensorHandle, sp_shape_) + dense_ = convert(tf.TensorHandle, dense_) tf.add_input(desc, sp_indices_) tf.add_input(desc, sp_values_) tf.add_input(desc, sp_shape_) @@ -9161,6 +9548,7 @@ begin end function acos_eager(x_; name=nothing) desc = tf.EagerOp("Acos") + x_ = convert(tf.TensorHandle, x_) tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) res = tf.execute(desc) @@ -9202,6 +9590,8 @@ begin end function all_eager(input_, reduction_indices_; name=nothing, keep_dims=nothing) desc = tf.EagerOp("All") + input_ = convert(tf.TensorHandle, input_) + reduction_indices_ = convert(tf.TensorHandle, reduction_indices_) tf.add_input(desc, input_) tf.add_input(desc, reduction_indices_) if keep_dims !== nothing @@ -9243,6 +9633,8 @@ begin end function compare_and_bitpack_eager(input_, threshold_; name=nothing) desc = tf.EagerOp("CompareAndBitpack") + input_ = convert(tf.TensorHandle, input_) + threshold_ = convert(tf.TensorHandle, threshold_) tf.add_input(desc, input_) tf.add_input(desc, threshold_) desc["T"] = tf.data_type(input_) @@ -9339,6 +9731,7 @@ begin end function experimental_unique_dataset_eager(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("ExperimentalUniqueDataset") + input_dataset_ = convert(tf.TensorHandle, input_dataset_) tf.add_input(desc, input_dataset_) if output_types !== nothing desc["output_types"] = map(Base.identity, output_types) @@ -9411,6 +9804,14 @@ begin end function quantized_conv2d_with_bias_sum_and_relu_eager(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_, summand_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) desc = tf.EagerOp("QuantizedConv2DWithBiasSumAndRelu") + input_ = convert(tf.TensorHandle, input_) + filter_ = convert(tf.TensorHandle, filter_) + bias_ = convert(tf.TensorHandle, bias_) + min_input_ = convert(tf.TensorHandle, min_input_) + max_input_ = convert(tf.TensorHandle, max_input_) + min_filter_ = convert(tf.TensorHandle, min_filter_) + max_filter_ = convert(tf.TensorHandle, max_filter_) + summand_ = convert(tf.TensorHandle, summand_) tf.add_input(desc, input_) tf.add_input(desc, filter_) tf.add_input(desc, bias_) @@ -9476,6 +9877,8 @@ begin end function list_diff_eager(x_, y_; name=nothing, out_idx=nothing) desc = tf.EagerOp("ListDiff") + x_ = convert(tf.TensorHandle, x_) + y_ = convert(tf.TensorHandle, y_) tf.add_input(desc, x_) tf.add_input(desc, y_) if out_idx !== nothing @@ -9523,6 +9926,11 @@ begin end function create_summary_file_writer_eager(writer_, logdir_, max_queue_, flush_millis_, filename_suffix_; name=nothing) desc = tf.EagerOp("CreateSummaryFileWriter") + writer_ = convert(tf.TensorHandle, writer_) + logdir_ = convert(tf.TensorHandle, logdir_) + max_queue_ = convert(tf.TensorHandle, max_queue_) + flush_millis_ = convert(tf.TensorHandle, flush_millis_) + filename_suffix_ = convert(tf.TensorHandle, filename_suffix_) tf.add_input(desc, writer_) tf.add_input(desc, logdir_) tf.add_input(desc, max_queue_) @@ -9576,6 +9984,8 @@ begin end function generate_vocab_remapping_eager(new_vocab_file_, old_vocab_file_; name=nothing, new_vocab_offset=nothing, num_new_vocab=nothing, old_vocab_size=nothing) desc = tf.EagerOp("GenerateVocabRemapping") + new_vocab_file_ = convert(tf.TensorHandle, new_vocab_file_) + old_vocab_file_ = convert(tf.TensorHandle, old_vocab_file_) tf.add_input(desc, new_vocab_file_) tf.add_input(desc, old_vocab_file_) if new_vocab_offset !== nothing @@ -9623,6 +10033,7 @@ begin end function batch_matrix_inverse_eager(input_; name=nothing, adjoint=nothing) desc = tf.EagerOp("BatchMatrixInverse") + input_ = convert(tf.TensorHandle, input_) tf.add_input(desc, input_) if adjoint !== nothing desc["adjoint"] = Base.Bool(adjoint) @@ -9723,6 +10134,7 @@ begin end function stop_gradient_eager(input_; name=nothing) desc = tf.EagerOp("StopGradient") + input_ = convert(tf.TensorHandle, input_) tf.add_input(desc, input_) desc["T"] = tf.data_type(input_) res = tf.execute(desc) @@ -9769,6 +10181,8 @@ begin end function split_eager(split_dim_, value_; name=nothing, num_split=nothing) desc = tf.EagerOp("Split") + split_dim_ = convert(tf.TensorHandle, split_dim_) + value_ = convert(tf.TensorHandle, value_) tf.add_input(desc, split_dim_) tf.add_input(desc, value_) if num_split !== nothing @@ -9822,6 +10236,7 @@ begin end function unpack_eager(value_; name=nothing, num=nothing, axis=nothing) desc = tf.EagerOp("Unpack") + value_ = convert(tf.TensorHandle, value_) tf.add_input(desc, value_) if num !== nothing desc["num"] = Base.Int(num) @@ -9875,6 +10290,9 @@ begin end function resource_scatter_max_eager(resource_, indices_, updates_; name=nothing, dtype=nothing) desc = tf.EagerOp("ResourceScatterMax") + resource_ = convert(tf.TensorHandle, resource_) + indices_ = convert(tf.TensorHandle, indices_) + updates_ = convert(tf.TensorHandle, updates_) tf.add_input(desc, resource_) tf.add_input(desc, indices_) tf.add_input(desc, updates_) @@ -9922,6 +10340,10 @@ begin end function tensor_array_write_eager(handle_, index_, value_, flow_in_; name=nothing) desc = tf.EagerOp("TensorArrayWrite") + handle_ = convert(tf.TensorHandle, handle_) + index_ = convert(tf.TensorHandle, index_) + value_ = convert(tf.TensorHandle, value_) + flow_in_ = convert(tf.TensorHandle, flow_in_) tf.add_input(desc, handle_) tf.add_input(desc, index_) tf.add_input(desc, value_) @@ -9966,6 +10388,8 @@ begin end function fill_eager(dims_, value_; name=nothing, index_type=nothing) desc = tf.EagerOp("Fill") + dims_ = convert(tf.TensorHandle, dims_) + value_ = convert(tf.TensorHandle, value_) tf.add_input(desc, dims_) tf.add_input(desc, value_) if index_type !== nothing @@ -10041,6 +10465,15 @@ begin end function quantized_conv2d_with_bias_and_requantize_eager(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) desc = tf.EagerOp("QuantizedConv2DWithBiasAndRequantize") + input_ = convert(tf.TensorHandle, input_) + filter_ = convert(tf.TensorHandle, filter_) + bias_ = convert(tf.TensorHandle, bias_) + min_input_ = convert(tf.TensorHandle, min_input_) + max_input_ = convert(tf.TensorHandle, max_input_) + min_filter_ = convert(tf.TensorHandle, min_filter_) + max_filter_ = convert(tf.TensorHandle, max_filter_) + min_freezed_output_ = convert(tf.TensorHandle, min_freezed_output_) + max_freezed_output_ = convert(tf.TensorHandle, max_freezed_output_) tf.add_input(desc, input_) tf.add_input(desc, filter_) tf.add_input(desc, bias_) @@ -10098,6 +10531,7 @@ begin end function softmax_eager(logits_; name=nothing) desc = tf.EagerOp("Softmax") + logits_ = convert(tf.TensorHandle, logits_) tf.add_input(desc, logits_) desc["T"] = tf.data_type(logits_) res = tf.execute(desc) @@ -10138,6 +10572,8 @@ begin end function resize_bicubic_eager(images_, size_; name=nothing, align_corners=nothing) desc = tf.EagerOp("ResizeBicubic") + images_ = convert(tf.TensorHandle, images_) + size_ = convert(tf.TensorHandle, size_) tf.add_input(desc, images_) tf.add_input(desc, size_) if align_corners !== nothing @@ -10262,7 +10698,7 @@ end """ - decode_csv(records, record_defaults; field_delim=, use_quote_delim=true, na_value=, select_cols=Int64[]) + decode_csv(records, record_defaults; field_delim=,, use_quote_delim=true, na_value=, select_cols=Int64[]) """ @@ -10295,6 +10731,8 @@ begin end function decode_csv_eager(records_, record_defaults_; name=nothing, OUT_TYPE=nothing, field_delim=nothing, use_quote_delim=nothing, na_value=nothing, select_cols=nothing) desc = tf.EagerOp("DecodeCSV") + records_ = convert(tf.TensorHandle, records_) + record_defaults_ = convert(tf.TensorHandle, record_defaults_) tf.add_input(desc, records_) tf.add_input(desc, record_defaults_) if OUT_TYPE !== nothing @@ -10350,6 +10788,9 @@ begin end function lookup_table_find_eager(table_handle_, keys_, default_value_; name=nothing) desc = tf.EagerOp("LookupTableFind") + table_handle_ = convert(tf.TensorHandle, table_handle_) + keys_ = convert(tf.TensorHandle, keys_) + default_value_ = convert(tf.TensorHandle, default_value_) tf.add_input(desc, table_handle_) tf.add_input(desc, keys_) tf.add_input(desc, default_value_) @@ -10401,6 +10842,11 @@ begin end function shuffle_and_repeat_dataset_eager(input_dataset_, buffer_size_, seed_, seed2_, count_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("ShuffleAndRepeatDataset") + input_dataset_ = convert(tf.TensorHandle, input_dataset_) + buffer_size_ = convert(tf.TensorHandle, buffer_size_) + seed_ = convert(tf.TensorHandle, seed_) + seed2_ = convert(tf.TensorHandle, seed2_) + count_ = convert(tf.TensorHandle, count_) tf.add_input(desc, input_dataset_) tf.add_input(desc, buffer_size_) tf.add_input(desc, seed_) @@ -10457,6 +10903,9 @@ begin end function requantization_range_per_channel_eager(input_, input_min_, input_max_; name=nothing, clip_value_max=nothing) desc = tf.EagerOp("RequantizationRangePerChannel") + input_ = convert(tf.TensorHandle, input_) + input_min_ = convert(tf.TensorHandle, input_min_) + input_max_ = convert(tf.TensorHandle, input_max_) tf.add_input(desc, input_) tf.add_input(desc, input_min_) tf.add_input(desc, input_max_) @@ -10502,6 +10951,7 @@ begin end function experimental_unbatch_dataset_eager(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("ExperimentalUnbatchDataset") + input_dataset_ = convert(tf.TensorHandle, input_dataset_) tf.add_input(desc, input_dataset_) if output_types !== nothing desc["output_types"] = map(Base.identity, output_types) @@ -10525,7 +10975,7 @@ end """ - avg_pool3d_grad(orig_input_shape, grad; data_format=) + avg_pool3d_grad(orig_input_shape, grad; data_format=NDHWC) """ @@ -10556,6 +11006,8 @@ begin end function avg_pool3d_grad_eager(orig_input_shape_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) desc = tf.EagerOp("AvgPool3DGrad") + orig_input_shape_ = convert(tf.TensorHandle, orig_input_shape_) + grad_ = convert(tf.TensorHandle, grad_) tf.add_input(desc, orig_input_shape_) tf.add_input(desc, grad_) if ksize !== nothing @@ -10610,6 +11062,7 @@ begin end function placeholder_with_default_eager(input_; name=nothing, dtype=nothing, shape=nothing) desc = tf.EagerOp("PlaceholderWithDefault") + input_ = convert(tf.TensorHandle, input_) tf.add_input(desc, input_) if dtype !== nothing desc["dtype"] = Base.identity(dtype) @@ -10656,6 +11109,9 @@ begin end function initialize_table_v2_eager(table_handle_, keys_, values_; name=nothing) desc = tf.EagerOp("InitializeTableV2") + table_handle_ = convert(tf.TensorHandle, table_handle_) + keys_ = convert(tf.TensorHandle, keys_) + values_ = convert(tf.TensorHandle, values_) tf.add_input(desc, table_handle_) tf.add_input(desc, keys_) tf.add_input(desc, values_) @@ -10701,6 +11157,9 @@ begin end function set_size_eager(set_indices_, set_values_, set_shape_; name=nothing, validate_indices=nothing) desc = tf.EagerOp("SetSize") + set_indices_ = convert(tf.TensorHandle, set_indices_) + set_values_ = convert(tf.TensorHandle, set_values_) + set_shape_ = convert(tf.TensorHandle, set_shape_) tf.add_input(desc, set_indices_) tf.add_input(desc, set_values_) tf.add_input(desc, set_shape_) @@ -10748,6 +11207,8 @@ begin end function assert_eager(condition_, data_; name=nothing, T=nothing, summarize=nothing) desc = tf.EagerOp("Assert") + condition_ = convert(tf.TensorHandle, condition_) + data_ = convert(tf.TensorHandle, data_) tf.add_input(desc, condition_) tf.add_input(desc, data_) if T !== nothing @@ -10805,6 +11266,11 @@ begin end function non_max_suppression_v4_eager(boxes_, scores_, max_output_size_, iou_threshold_, score_threshold_; name=nothing, pad_to_max_output_size=nothing) desc = tf.EagerOp("NonMaxSuppressionV4") + boxes_ = convert(tf.TensorHandle, boxes_) + scores_ = convert(tf.TensorHandle, scores_) + max_output_size_ = convert(tf.TensorHandle, max_output_size_) + iou_threshold_ = convert(tf.TensorHandle, iou_threshold_) + score_threshold_ = convert(tf.TensorHandle, score_threshold_) tf.add_input(desc, boxes_) tf.add_input(desc, scores_) tf.add_input(desc, max_output_size_) @@ -10875,6 +11341,9 @@ begin end function sample_distorted_bounding_box_v2_eager(image_size_, bounding_boxes_, min_object_covered_; name=nothing, seed=nothing, seed2=nothing, aspect_ratio_range=nothing, area_range=nothing, max_attempts=nothing, use_image_if_no_bounding_boxes=nothing) desc = tf.EagerOp("SampleDistortedBoundingBoxV2") + image_size_ = convert(tf.TensorHandle, image_size_) + bounding_boxes_ = convert(tf.TensorHandle, bounding_boxes_) + min_object_covered_ = convert(tf.TensorHandle, min_object_covered_) tf.add_input(desc, image_size_) tf.add_input(desc, bounding_boxes_) tf.add_input(desc, min_object_covered_) @@ -10913,7 +11382,7 @@ end """ - initialize_table_from_text_file(table_handle, filename; vocab_size=-1, delimiter=) + initialize_table_from_text_file(table_handle, filename; vocab_size=-1, delimiter= ) """ @@ -10943,6 +11412,8 @@ begin end function initialize_table_from_text_file_eager(table_handle_, filename_; name=nothing, key_index=nothing, value_index=nothing, vocab_size=nothing, delimiter=nothing) desc = tf.EagerOp("InitializeTableFromTextFile") + table_handle_ = convert(tf.TensorHandle, table_handle_) + filename_ = convert(tf.TensorHandle, filename_) tf.add_input(desc, table_handle_) tf.add_input(desc, filename_) if key_index !== nothing @@ -10989,6 +11460,7 @@ begin end function lookup_table_size_eager(table_handle_; name=nothing) desc = tf.EagerOp("LookupTableSize") + table_handle_ = convert(tf.TensorHandle, table_handle_) tf.add_input(desc, table_handle_) res = tf.execute(desc) node = tf.TapeNode(lookup_table_size, [table_handle_], name=nothing) @@ -11044,6 +11516,15 @@ begin end function sparse_apply_adagrad_da_eager(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, indices_, lr_, l1_, l2_, global_step_; name=nothing, use_locking=nothing) desc = tf.EagerOp("SparseApplyAdagradDA") + var_ = convert(tf.TensorHandle, var_) + gradient_accumulator_ = convert(tf.TensorHandle, gradient_accumulator_) + gradient_squared_accumulator_ = convert(tf.TensorHandle, gradient_squared_accumulator_) + grad_ = convert(tf.TensorHandle, grad_) + indices_ = convert(tf.TensorHandle, indices_) + lr_ = convert(tf.TensorHandle, lr_) + l1_ = convert(tf.TensorHandle, l1_) + l2_ = convert(tf.TensorHandle, l2_) + global_step_ = convert(tf.TensorHandle, global_step_) tf.add_input(desc, var_) tf.add_input(desc, gradient_accumulator_) tf.add_input(desc, gradient_squared_accumulator_) @@ -11104,6 +11585,8 @@ begin end function broadcast_gradient_args_eager(s0_, s1_; name=nothing) desc = tf.EagerOp("BroadcastGradientArgs") + s0_ = convert(tf.TensorHandle, s0_) + s1_ = convert(tf.TensorHandle, s1_) tf.add_input(desc, s0_) tf.add_input(desc, s1_) desc["T"] = tf.data_type(s0_) @@ -11238,6 +11721,7 @@ begin end function _while_eager(input_; name=nothing, T=nothing, cond=nothing, body=nothing) desc = tf.EagerOp("_While") + input_ = convert(tf.TensorHandle, input_) tf.add_input(desc, input_) if T !== nothing desc["T"] = map(Base.identity, T) @@ -11286,6 +11770,9 @@ begin end function initialize_table_eager(table_handle_, keys_, values_; name=nothing) desc = tf.EagerOp("InitializeTable") + table_handle_ = convert(tf.TensorHandle, table_handle_) + keys_ = convert(tf.TensorHandle, keys_) + values_ = convert(tf.TensorHandle, values_) tf.add_input(desc, table_handle_) tf.add_input(desc, keys_) tf.add_input(desc, values_) @@ -11345,6 +11832,7 @@ begin end function debug_numeric_summary_eager(input_; name=nothing, device_name=nothing, tensor_name=nothing, debug_urls=nothing, lower_bound=nothing, upper_bound=nothing, mute_if_healthy=nothing, gated_grpc=nothing) desc = tf.EagerOp("DebugNumericSummary") + input_ = convert(tf.TensorHandle, input_) tf.add_input(desc, input_) if device_name !== nothing desc["device_name"] = Base.String(device_name) @@ -11460,6 +11948,7 @@ begin end function tanh_eager(x_; name=nothing) desc = tf.EagerOp("Tanh") + x_ = convert(tf.TensorHandle, x_) tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) res = tf.execute(desc) @@ -11503,6 +11992,7 @@ begin end function symbolic_gradient_eager(input_; name=nothing, Tin=nothing, Tout=nothing, f=nothing) desc = tf.EagerOp("SymbolicGradient") + input_ = convert(tf.TensorHandle, input_) tf.add_input(desc, input_) if Tin !== nothing desc["Tin"] = map(Base.identity, Tin) @@ -11567,6 +12057,15 @@ begin end function boosted_trees_update_ensemble_eager(tree_ensemble_handle_, feature_ids_, node_ids_, gains_, thresholds_, left_node_contribs_, right_node_contribs_, max_depth_, learning_rate_; name=nothing, pruning_mode=nothing, num_features=nothing) desc = tf.EagerOp("BoostedTreesUpdateEnsemble") + tree_ensemble_handle_ = convert(tf.TensorHandle, tree_ensemble_handle_) + feature_ids_ = convert(tf.TensorHandle, feature_ids_) + node_ids_ = convert(tf.TensorHandle, node_ids_) + gains_ = convert(tf.TensorHandle, gains_) + thresholds_ = convert(tf.TensorHandle, thresholds_) + left_node_contribs_ = convert(tf.TensorHandle, left_node_contribs_) + right_node_contribs_ = convert(tf.TensorHandle, right_node_contribs_) + max_depth_ = convert(tf.TensorHandle, max_depth_) + learning_rate_ = convert(tf.TensorHandle, learning_rate_) tf.add_input(desc, tree_ensemble_handle_) tf.add_input(desc, feature_ids_) tf.add_input(desc, node_ids_) @@ -11629,6 +12128,11 @@ begin end function apply_momentum_eager(var_, accum_, lr_, grad_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) desc = tf.EagerOp("ApplyMomentum") + var_ = convert(tf.TensorHandle, var_) + accum_ = convert(tf.TensorHandle, accum_) + lr_ = convert(tf.TensorHandle, lr_) + grad_ = convert(tf.TensorHandle, grad_) + momentum_ = convert(tf.TensorHandle, momentum_) tf.add_input(desc, var_) tf.add_input(desc, accum_) tf.add_input(desc, lr_) @@ -11684,6 +12188,8 @@ begin end function reader_read_eager(reader_handle_, queue_handle_; name=nothing) desc = tf.EagerOp("ReaderRead") + reader_handle_ = convert(tf.TensorHandle, reader_handle_) + queue_handle_ = convert(tf.TensorHandle, queue_handle_) tf.add_input(desc, reader_handle_) tf.add_input(desc, queue_handle_) res = tf.execute(desc) @@ -11724,6 +12230,7 @@ begin end function _wait_for_distributed_tpu_eager(inputs_; name=nothing, startup_timeout_sec=nothing, N=nothing) desc = tf.EagerOp("_WaitForDistributedTPU") + inputs_ = convert(tf.TensorHandle, inputs_) tf.add_input(desc, inputs_) if startup_timeout_sec !== nothing desc["startup_timeout_sec"] = Base.Int(startup_timeout_sec) @@ -11763,6 +12270,7 @@ begin end function mutex_lock_eager(mutex_; name=nothing) desc = tf.EagerOp("MutexLock") + mutex_ = convert(tf.TensorHandle, mutex_) tf.add_input(desc, mutex_) res = tf.execute(desc) node = tf.TapeNode(mutex_lock, [mutex_], name=nothing) @@ -11798,6 +12306,8 @@ begin end function accumulator_set_global_step_eager(handle_, new_global_step_; name=nothing) desc = tf.EagerOp("AccumulatorSetGlobalStep") + handle_ = convert(tf.TensorHandle, handle_) + new_global_step_ = convert(tf.TensorHandle, new_global_step_) tf.add_input(desc, handle_) tf.add_input(desc, new_global_step_) res = tf.execute(desc) @@ -11849,6 +12359,12 @@ begin end function quantized_add_eager(x_, y_, min_x_, max_x_, min_y_, max_y_; name=nothing) desc = tf.EagerOp("QuantizedAdd") + x_ = convert(tf.TensorHandle, x_) + y_ = convert(tf.TensorHandle, y_) + min_x_ = convert(tf.TensorHandle, min_x_) + max_x_ = convert(tf.TensorHandle, max_x_) + min_y_ = convert(tf.TensorHandle, min_y_) + max_y_ = convert(tf.TensorHandle, max_y_) tf.add_input(desc, x_) tf.add_input(desc, y_) tf.add_input(desc, min_x_) @@ -11893,6 +12409,7 @@ begin end function squeeze_eager(input_; name=nothing, squeeze_dims=nothing) desc = tf.EagerOp("Squeeze") + input_ = convert(tf.TensorHandle, input_) tf.add_input(desc, input_) if squeeze_dims !== nothing desc["squeeze_dims"] = map(Base.identity, squeeze_dims) @@ -11930,6 +12447,7 @@ begin end function experimental_matching_files_dataset_eager(patterns_; name=nothing) desc = tf.EagerOp("ExperimentalMatchingFilesDataset") + patterns_ = convert(tf.TensorHandle, patterns_) tf.add_input(desc, patterns_) res = tf.execute(desc) node = tf.TapeNode(experimental_matching_files_dataset, [patterns_], name=nothing) @@ -11967,6 +12485,9 @@ begin end function experimental_dataset_to_tf_record_eager(input_dataset_, filename_, compression_type_; name=nothing) desc = tf.EagerOp("ExperimentalDatasetToTFRecord") + input_dataset_ = convert(tf.TensorHandle, input_dataset_) + filename_ = convert(tf.TensorHandle, filename_) + compression_type_ = convert(tf.TensorHandle, compression_type_) tf.add_input(desc, input_dataset_) tf.add_input(desc, filename_) tf.add_input(desc, compression_type_) @@ -12014,6 +12535,7 @@ begin end function load_tpu_embedding_stochastic_gradient_descent_parameters_eager(parameters_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) desc = tf.EagerOp("LoadTPUEmbeddingStochasticGradientDescentParameters") + parameters_ = convert(tf.TensorHandle, parameters_) tf.add_input(desc, parameters_) if table_id !== nothing desc["table_id"] = Base.Int(table_id) @@ -12099,6 +12621,7 @@ begin end function zip_dataset_eager(input_datasets_; name=nothing, output_types=nothing, output_shapes=nothing, N=nothing) desc = tf.EagerOp("ZipDataset") + input_datasets_ = convert(tf.TensorHandle, input_datasets_) tf.add_input(desc, input_datasets_) if output_types !== nothing desc["output_types"] = map(Base.identity, output_types) @@ -12235,6 +12758,7 @@ begin end function nccl_all_reduce_eager(input_; name=nothing, reduction=nothing, num_devices=nothing, shared_name=nothing) desc = tf.EagerOp("NcclAllReduce") + input_ = convert(tf.TensorHandle, input_) tf.add_input(desc, input_) if reduction !== nothing desc["reduction"] = Base.String(reduction) @@ -12282,6 +12806,9 @@ begin end function text_line_dataset_eager(filenames_, compression_type_, buffer_size_; name=nothing) desc = tf.EagerOp("TextLineDataset") + filenames_ = convert(tf.TensorHandle, filenames_) + compression_type_ = convert(tf.TensorHandle, compression_type_) + buffer_size_ = convert(tf.TensorHandle, buffer_size_) tf.add_input(desc, filenames_) tf.add_input(desc, compression_type_) tf.add_input(desc, buffer_size_) @@ -12326,6 +12853,7 @@ begin end function sdca_shrink_l1_eager(weights_; name=nothing, num_features=nothing, l1=nothing, l2=nothing) desc = tf.EagerOp("SdcaShrinkL1") + weights_ = convert(tf.TensorHandle, weights_) tf.add_input(desc, weights_) if num_features !== nothing desc["num_features"] = Base.Int(num_features) @@ -12422,6 +12950,7 @@ begin end function multi_device_iterator_from_string_handle_eager(string_handle_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("MultiDeviceIteratorFromStringHandle") + string_handle_ = convert(tf.TensorHandle, string_handle_) tf.add_input(desc, string_handle_) if output_types !== nothing desc["output_types"] = map(Base.identity, output_types) @@ -12478,6 +13007,11 @@ begin end function padded_batch_dataset_v2_eager(input_dataset_, batch_size_, padded_shapes_, padding_values_, drop_remainder_; name=nothing, Toutput_types=nothing, output_shapes=nothing, N=nothing) desc = tf.EagerOp("PaddedBatchDatasetV2") + input_dataset_ = convert(tf.TensorHandle, input_dataset_) + batch_size_ = convert(tf.TensorHandle, batch_size_) + padded_shapes_ = convert(tf.TensorHandle, padded_shapes_) + padding_values_ = convert(tf.TensorHandle, padding_values_) + drop_remainder_ = convert(tf.TensorHandle, drop_remainder_) tf.add_input(desc, input_dataset_) tf.add_input(desc, batch_size_) tf.add_input(desc, padded_shapes_) @@ -12538,6 +13072,8 @@ begin end function load_tpu_embedding_proximal_adagrad_parameters_eager(parameters_, accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) desc = tf.EagerOp("LoadTPUEmbeddingProximalAdagradParameters") + parameters_ = convert(tf.TensorHandle, parameters_) + accumulators_ = convert(tf.TensorHandle, accumulators_) tf.add_input(desc, parameters_) tf.add_input(desc, accumulators_) if table_id !== nothing @@ -12586,6 +13122,8 @@ begin end function tensor_array_size_eager(handle_, flow_in_; name=nothing) desc = tf.EagerOp("TensorArraySize") + handle_ = convert(tf.TensorHandle, handle_) + flow_in_ = convert(tf.TensorHandle, flow_in_) tf.add_input(desc, handle_) tf.add_input(desc, flow_in_) res = tf.execute(desc) @@ -12687,6 +13225,8 @@ begin end function stateless_random_uniform_eager(shape_, seed_; name=nothing, dtype=nothing) desc = tf.EagerOp("StatelessRandomUniform") + shape_ = convert(tf.TensorHandle, shape_) + seed_ = convert(tf.TensorHandle, seed_) tf.add_input(desc, shape_) tf.add_input(desc, seed_) if dtype !== nothing @@ -12748,6 +13288,12 @@ begin end function sparse_to_sparse_set_operation_eager(set1_indices_, set1_values_, set1_shape_, set2_indices_, set2_values_, set2_shape_; name=nothing, set_operation=nothing, validate_indices=nothing) desc = tf.EagerOp("SparseToSparseSetOperation") + set1_indices_ = convert(tf.TensorHandle, set1_indices_) + set1_values_ = convert(tf.TensorHandle, set1_values_) + set1_shape_ = convert(tf.TensorHandle, set1_shape_) + set2_indices_ = convert(tf.TensorHandle, set2_indices_) + set2_values_ = convert(tf.TensorHandle, set2_values_) + set2_shape_ = convert(tf.TensorHandle, set2_shape_) tf.add_input(desc, set1_indices_) tf.add_input(desc, set1_values_) tf.add_input(desc, set1_shape_) @@ -12804,6 +13350,7 @@ begin end function tensor_summary_eager(tensor_; name=nothing, description=nothing, labels=nothing, display_name=nothing) desc = tf.EagerOp("TensorSummary") + tensor_ = convert(tf.TensorHandle, tensor_) tf.add_input(desc, tensor_) if description !== nothing desc["description"] = Base.String(description) @@ -12856,6 +13403,7 @@ begin end function remote_fused_graph_execute_eager(inputs_; name=nothing, Tinputs=nothing, Toutputs=nothing, serialized_remote_fused_graph_execute_info=nothing) desc = tf.EagerOp("RemoteFusedGraphExecute") + inputs_ = convert(tf.TensorHandle, inputs_) tf.add_input(desc, inputs_) if Tinputs !== nothing desc["Tinputs"] = map(Base.identity, Tinputs) @@ -12905,6 +13453,10 @@ begin end function sparse_slice_grad_eager(backprop_val_grad_, input_indices_, input_start_, output_indices_; name=nothing) desc = tf.EagerOp("SparseSliceGrad") + backprop_val_grad_ = convert(tf.TensorHandle, backprop_val_grad_) + input_indices_ = convert(tf.TensorHandle, input_indices_) + input_start_ = convert(tf.TensorHandle, input_start_) + output_indices_ = convert(tf.TensorHandle, output_indices_) tf.add_input(desc, backprop_val_grad_) tf.add_input(desc, input_indices_) tf.add_input(desc, input_start_) @@ -12953,6 +13505,8 @@ begin end function cumsum_eager(x_, axis_; name=nothing, exclusive=nothing, reverse=nothing) desc = tf.EagerOp("Cumsum") + x_ = convert(tf.TensorHandle, x_) + axis_ = convert(tf.TensorHandle, axis_) tf.add_input(desc, x_) tf.add_input(desc, axis_) if exclusive !== nothing @@ -13015,6 +13569,11 @@ begin end function batch_norm_with_global_normalization_grad_eager(t_, m_, v_, gamma_, backprop_; name=nothing, variance_epsilon=nothing, scale_after_normalization=nothing) desc = tf.EagerOp("BatchNormWithGlobalNormalizationGrad") + t_ = convert(tf.TensorHandle, t_) + m_ = convert(tf.TensorHandle, m_) + v_ = convert(tf.TensorHandle, v_) + gamma_ = convert(tf.TensorHandle, gamma_) + backprop_ = convert(tf.TensorHandle, backprop_) tf.add_input(desc, t_) tf.add_input(desc, m_) tf.add_input(desc, v_) @@ -13047,7 +13606,7 @@ end """ - avg_pool_grad(orig_input_shape, grad; data_format=) + avg_pool_grad(orig_input_shape, grad; data_format=NHWC) """ @@ -13078,6 +13637,8 @@ begin end function avg_pool_grad_eager(orig_input_shape_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) desc = tf.EagerOp("AvgPoolGrad") + orig_input_shape_ = convert(tf.TensorHandle, orig_input_shape_) + grad_ = convert(tf.TensorHandle, grad_) tf.add_input(desc, orig_input_shape_) tf.add_input(desc, grad_) if ksize !== nothing @@ -13132,6 +13693,9 @@ begin end function restore_v2_eager(prefix_, tensor_names_, shape_and_slices_; name=nothing, dtypes=nothing) desc = tf.EagerOp("RestoreV2") + prefix_ = convert(tf.TensorHandle, prefix_) + tensor_names_ = convert(tf.TensorHandle, tensor_names_) + shape_and_slices_ = convert(tf.TensorHandle, shape_and_slices_) tf.add_input(desc, prefix_) tf.add_input(desc, tensor_names_) tf.add_input(desc, shape_and_slices_) @@ -13171,6 +13735,7 @@ begin end function relu6_eager(features_; name=nothing) desc = tf.EagerOp("Relu6") + features_ = convert(tf.TensorHandle, features_) tf.add_input(desc, features_) desc["T"] = tf.data_type(features_) res = tf.execute(desc) @@ -13227,6 +13792,15 @@ begin end function sparse_apply_rms_prop_eager(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) desc = tf.EagerOp("SparseApplyRMSProp") + var_ = convert(tf.TensorHandle, var_) + ms_ = convert(tf.TensorHandle, ms_) + mom_ = convert(tf.TensorHandle, mom_) + lr_ = convert(tf.TensorHandle, lr_) + rho_ = convert(tf.TensorHandle, rho_) + momentum_ = convert(tf.TensorHandle, momentum_) + epsilon_ = convert(tf.TensorHandle, epsilon_) + grad_ = convert(tf.TensorHandle, grad_) + indices_ = convert(tf.TensorHandle, indices_) tf.add_input(desc, var_) tf.add_input(desc, ms_) tf.add_input(desc, mom_) @@ -13330,7 +13904,7 @@ end """ - max_pool(input; data_format=) + max_pool(input; data_format=NHWC) """ @@ -13359,6 +13933,7 @@ begin end function max_pool_eager(input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) desc = tf.EagerOp("MaxPool") + input_ = convert(tf.TensorHandle, input_) tf.add_input(desc, input_) if ksize !== nothing desc["ksize"] = map(Base.identity, ksize) @@ -13406,6 +13981,7 @@ begin end function invert_eager(x_; name=nothing) desc = tf.EagerOp("Invert") + x_ = convert(tf.TensorHandle, x_) tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) res = tf.execute(desc) @@ -13444,6 +14020,7 @@ begin end function _unary_ops_composition_eager(x_; name=nothing, op_names=nothing) desc = tf.EagerOp("_UnaryOpsComposition") + x_ = convert(tf.TensorHandle, x_) tf.add_input(desc, x_) if op_names !== nothing desc["op_names"] = map(Base.identity, op_names) @@ -13501,6 +14078,8 @@ begin end function experimental_map_dataset_eager(input_dataset_, other_arguments_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing, preserve_cardinality=nothing) desc = tf.EagerOp("ExperimentalMapDataset") + input_dataset_ = convert(tf.TensorHandle, input_dataset_) + other_arguments_ = convert(tf.TensorHandle, other_arguments_) tf.add_input(desc, input_dataset_) tf.add_input(desc, other_arguments_) if f !== nothing @@ -13569,6 +14148,9 @@ begin end function load_tpu_embedding_adam_parameters_eager(parameters_, momenta_, velocities_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) desc = tf.EagerOp("LoadTPUEmbeddingADAMParameters") + parameters_ = convert(tf.TensorHandle, parameters_) + momenta_ = convert(tf.TensorHandle, momenta_) + velocities_ = convert(tf.TensorHandle, velocities_) tf.add_input(desc, parameters_) tf.add_input(desc, momenta_) tf.add_input(desc, velocities_) @@ -13619,6 +14201,7 @@ begin end function parse_tensor_eager(serialized_; name=nothing, out_type=nothing) desc = tf.EagerOp("ParseTensor") + serialized_ = convert(tf.TensorHandle, serialized_) tf.add_input(desc, serialized_) if out_type !== nothing desc["out_type"] = Base.identity(out_type) @@ -13719,6 +14302,9 @@ begin end function multi_device_iterator_get_next_from_shard_eager(multi_device_iterator_, shard_num_, incarnation_id_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("MultiDeviceIteratorGetNextFromShard") + multi_device_iterator_ = convert(tf.TensorHandle, multi_device_iterator_) + shard_num_ = convert(tf.TensorHandle, shard_num_) + incarnation_id_ = convert(tf.TensorHandle, incarnation_id_) tf.add_input(desc, multi_device_iterator_) tf.add_input(desc, shard_num_) tf.add_input(desc, incarnation_id_) @@ -13772,6 +14358,9 @@ begin end function random_uniform_int_eager(shape_, minval_, maxval_; name=nothing, seed=nothing, seed2=nothing) desc = tf.EagerOp("RandomUniformInt") + shape_ = convert(tf.TensorHandle, shape_) + minval_ = convert(tf.TensorHandle, minval_) + maxval_ = convert(tf.TensorHandle, maxval_) tf.add_input(desc, shape_) tf.add_input(desc, minval_) tf.add_input(desc, maxval_) @@ -13825,6 +14414,8 @@ begin end function sparse_softmax_cross_entropy_with_logits_eager(features_, labels_; name=nothing) desc = tf.EagerOp("SparseSoftmaxCrossEntropyWithLogits") + features_ = convert(tf.TensorHandle, features_) + labels_ = convert(tf.TensorHandle, labels_) tf.add_input(desc, features_) tf.add_input(desc, labels_) desc["T"] = tf.data_type(features_) @@ -13868,6 +14459,9 @@ begin end function tensor_array_read_v2_eager(handle_, index_, flow_in_; name=nothing, dtype=nothing) desc = tf.EagerOp("TensorArrayReadV2") + handle_ = convert(tf.TensorHandle, handle_) + index_ = convert(tf.TensorHandle, index_) + flow_in_ = convert(tf.TensorHandle, flow_in_) tf.add_input(desc, handle_) tf.add_input(desc, index_) tf.add_input(desc, flow_in_) @@ -13915,6 +14509,9 @@ begin end function reader_read_up_to_eager(reader_handle_, queue_handle_, num_records_; name=nothing) desc = tf.EagerOp("ReaderReadUpTo") + reader_handle_ = convert(tf.TensorHandle, reader_handle_) + queue_handle_ = convert(tf.TensorHandle, queue_handle_) + num_records_ = convert(tf.TensorHandle, num_records_) tf.add_input(desc, reader_handle_) tf.add_input(desc, queue_handle_) tf.add_input(desc, num_records_) @@ -13934,7 +14531,7 @@ end """ - encode_proto(sizes, values; descriptor_source=) + encode_proto(sizes, values; descriptor_source=local://) """ @@ -13964,6 +14561,8 @@ begin end function encode_proto_eager(sizes_, values_; name=nothing, field_names=nothing, message_type=nothing, descriptor_source=nothing, Tinput_types=nothing) desc = tf.EagerOp("EncodeProto") + sizes_ = convert(tf.TensorHandle, sizes_) + values_ = convert(tf.TensorHandle, values_) tf.add_input(desc, sizes_) tf.add_input(desc, values_) if field_names !== nothing @@ -14056,6 +14655,11 @@ begin end function strided_slice_grad_eager(shape_, begin_, end_, strides_, dy_; name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing) desc = tf.EagerOp("StridedSliceGrad") + shape_ = convert(tf.TensorHandle, shape_) + begin_ = convert(tf.TensorHandle, begin_) + end_ = convert(tf.TensorHandle, end_) + strides_ = convert(tf.TensorHandle, strides_) + dy_ = convert(tf.TensorHandle, dy_) tf.add_input(desc, shape_) tf.add_input(desc, begin_) tf.add_input(desc, end_) @@ -14141,6 +14745,7 @@ begin end function _nccl_reduce_send_eager(input_; name=nothing, reduction=nothing, num_devices=nothing, shared_name=nothing) desc = tf.EagerOp("_NcclReduceSend") + input_ = convert(tf.TensorHandle, input_) tf.add_input(desc, input_) if reduction !== nothing desc["reduction"] = Base.String(reduction) @@ -14199,6 +14804,10 @@ begin end function padded_batch_dataset_eager(input_dataset_, batch_size_, padded_shapes_, padding_values_; name=nothing, Toutput_types=nothing, output_shapes=nothing, N=nothing) desc = tf.EagerOp("PaddedBatchDataset") + input_dataset_ = convert(tf.TensorHandle, input_dataset_) + batch_size_ = convert(tf.TensorHandle, batch_size_) + padded_shapes_ = convert(tf.TensorHandle, padded_shapes_) + padding_values_ = convert(tf.TensorHandle, padding_values_) tf.add_input(desc, input_dataset_) tf.add_input(desc, batch_size_) tf.add_input(desc, padded_shapes_) @@ -14228,7 +14837,7 @@ end """ - data_format_vec_permute(x; src_format=, dst_format=) + data_format_vec_permute(x; src_format=NHWC, dst_format=NCHW) """ @@ -14251,6 +14860,7 @@ begin end function data_format_vec_permute_eager(x_; name=nothing, src_format=nothing, dst_format=nothing) desc = tf.EagerOp("DataFormatVecPermute") + x_ = convert(tf.TensorHandle, x_) tf.add_input(desc, x_) if src_format !== nothing desc["src_format"] = Base.String(src_format) @@ -14275,7 +14885,7 @@ end """ - string_format(inputs; template=, placeholder=, summarize=3) + string_format(inputs; template=%s, placeholder=%s, summarize=3) """ @@ -14303,6 +14913,7 @@ begin end function string_format_eager(inputs_; name=nothing, T=nothing, template=nothing, placeholder=nothing, summarize=nothing) desc = tf.EagerOp("StringFormat") + inputs_ = convert(tf.TensorHandle, inputs_) tf.add_input(desc, inputs_) if T !== nothing desc["T"] = map(Base.identity, T) @@ -14364,6 +14975,7 @@ begin end function as_string_eager(input_; name=nothing, precision=nothing, scientific=nothing, shortest=nothing, width=nothing, fill=nothing) desc = tf.EagerOp("AsString") + input_ = convert(tf.TensorHandle, input_) tf.add_input(desc, input_) if precision !== nothing desc["precision"] = Base.Int(precision) @@ -14421,6 +15033,8 @@ begin end function queue_enqueue_many_eager(handle_, components_; name=nothing, Tcomponents=nothing, timeout_ms=nothing) desc = tf.EagerOp("QueueEnqueueMany") + handle_ = convert(tf.TensorHandle, handle_) + components_ = convert(tf.TensorHandle, components_) tf.add_input(desc, handle_) tf.add_input(desc, components_) if Tcomponents !== nothing @@ -14516,6 +15130,10 @@ begin end function apply_adagrad_eager(var_, accum_, lr_, grad_; name=nothing, use_locking=nothing, update_slots=nothing) desc = tf.EagerOp("ApplyAdagrad") + var_ = convert(tf.TensorHandle, var_) + accum_ = convert(tf.TensorHandle, accum_) + lr_ = convert(tf.TensorHandle, lr_) + grad_ = convert(tf.TensorHandle, grad_) tf.add_input(desc, var_) tf.add_input(desc, accum_) tf.add_input(desc, lr_) @@ -14562,6 +15180,7 @@ begin end function experimental_iterator_get_device_eager(resource_; name=nothing) desc = tf.EagerOp("ExperimentalIteratorGetDevice") + resource_ = convert(tf.TensorHandle, resource_) tf.add_input(desc, resource_) res = tf.execute(desc) node = tf.TapeNode(experimental_iterator_get_device, [resource_], name=nothing) @@ -14602,6 +15221,10 @@ begin end function adjust_contrast_eager(images_, contrast_factor_, min_value_, max_value_; name=nothing) desc = tf.EagerOp("AdjustContrast") + images_ = convert(tf.TensorHandle, images_) + contrast_factor_ = convert(tf.TensorHandle, contrast_factor_) + min_value_ = convert(tf.TensorHandle, min_value_) + max_value_ = convert(tf.TensorHandle, max_value_) tf.add_input(desc, images_) tf.add_input(desc, contrast_factor_) tf.add_input(desc, min_value_) @@ -14652,6 +15275,7 @@ begin end function extract_image_patches_eager(images_; name=nothing, ksizes=nothing, strides=nothing, rates=nothing, padding=nothing) desc = tf.EagerOp("ExtractImagePatches") + images_ = convert(tf.TensorHandle, images_) tf.add_input(desc, images_) if ksizes !== nothing desc["ksizes"] = map(Base.identity, ksizes) @@ -14682,7 +15306,7 @@ end """ - scale_and_translate(images, size, scale, translation; kernel_type=) + scale_and_translate(images, size, scale, translation; kernel_type=lanczos3) """ @@ -14708,6 +15332,10 @@ begin end function scale_and_translate_eager(images_, size_, scale_, translation_; name=nothing, kernel_type=nothing) desc = tf.EagerOp("ScaleAndTranslate") + images_ = convert(tf.TensorHandle, images_) + size_ = convert(tf.TensorHandle, size_) + scale_ = convert(tf.TensorHandle, scale_) + translation_ = convert(tf.TensorHandle, translation_) tf.add_input(desc, images_) tf.add_input(desc, size_) tf.add_input(desc, scale_) @@ -14834,6 +15462,7 @@ begin end function elu_eager(features_; name=nothing) desc = tf.EagerOp("Elu") + features_ = convert(tf.TensorHandle, features_) tf.add_input(desc, features_) desc["T"] = tf.data_type(features_) res = tf.execute(desc) @@ -14878,6 +15507,9 @@ begin end function scatter_update_eager(ref_, indices_, updates_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ScatterUpdate") + ref_ = convert(tf.TensorHandle, ref_) + indices_ = convert(tf.TensorHandle, indices_) + updates_ = convert(tf.TensorHandle, updates_) tf.add_input(desc, ref_) tf.add_input(desc, indices_) tf.add_input(desc, updates_) @@ -14922,6 +15554,8 @@ begin end function floor_mod_eager(x_, y_; name=nothing) desc = tf.EagerOp("FloorMod") + x_ = convert(tf.TensorHandle, x_) + y_ = convert(tf.TensorHandle, y_) tf.add_input(desc, x_) tf.add_input(desc, y_) desc["T"] = tf.data_type(x_) @@ -14964,6 +15598,7 @@ begin end function experimental_ignore_errors_dataset_eager(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("ExperimentalIgnoreErrorsDataset") + input_dataset_ = convert(tf.TensorHandle, input_dataset_) tf.add_input(desc, input_dataset_) if output_types !== nothing desc["output_types"] = map(Base.identity, output_types) @@ -15015,6 +15650,10 @@ begin end function experimental_set_stats_aggregator_dataset_eager(input_dataset_, stats_aggregator_, tag_, counter_prefix_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("ExperimentalSetStatsAggregatorDataset") + input_dataset_ = convert(tf.TensorHandle, input_dataset_) + stats_aggregator_ = convert(tf.TensorHandle, stats_aggregator_) + tag_ = convert(tf.TensorHandle, tag_) + counter_prefix_ = convert(tf.TensorHandle, counter_prefix_) tf.add_input(desc, input_dataset_) tf.add_input(desc, stats_aggregator_) tf.add_input(desc, tag_) @@ -15073,6 +15712,8 @@ begin end function compute_accidental_hits_eager(true_classes_, sampled_candidates_; name=nothing, num_true=nothing, seed=nothing, seed2=nothing) desc = tf.EagerOp("ComputeAccidentalHits") + true_classes_ = convert(tf.TensorHandle, true_classes_) + sampled_candidates_ = convert(tf.TensorHandle, sampled_candidates_) tf.add_input(desc, true_classes_) tf.add_input(desc, sampled_candidates_) if num_true !== nothing @@ -15119,6 +15760,7 @@ begin end function string_to_number_eager(string_tensor_; name=nothing, out_type=nothing) desc = tf.EagerOp("StringToNumber") + string_tensor_ = convert(tf.TensorHandle, string_tensor_) tf.add_input(desc, string_tensor_) if out_type !== nothing desc["out_type"] = Base.identity(out_type) @@ -15156,6 +15798,7 @@ begin end function snapshot_eager(input_; name=nothing) desc = tf.EagerOp("Snapshot") + input_ = convert(tf.TensorHandle, input_) tf.add_input(desc, input_) desc["T"] = tf.data_type(input_) res = tf.execute(desc) @@ -15192,6 +15835,8 @@ begin end function deserialize_iterator_eager(resource_handle_, serialized_; name=nothing) desc = tf.EagerOp("DeserializeIterator") + resource_handle_ = convert(tf.TensorHandle, resource_handle_) + serialized_ = convert(tf.TensorHandle, serialized_) tf.add_input(desc, resource_handle_) tf.add_input(desc, serialized_) res = tf.execute(desc) @@ -15227,6 +15872,7 @@ begin end function atan_eager(x_; name=nothing) desc = tf.EagerOp("Atan") + x_ = convert(tf.TensorHandle, x_) tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) res = tf.execute(desc) @@ -15270,6 +15916,8 @@ begin end function mat_mul_eager(a_, b_; name=nothing, transpose_a=nothing, transpose_b=nothing) desc = tf.EagerOp("MatMul") + a_ = convert(tf.TensorHandle, a_) + b_ = convert(tf.TensorHandle, b_) tf.add_input(desc, a_) tf.add_input(desc, b_) if transpose_a !== nothing @@ -15313,6 +15961,7 @@ begin end function erfc_eager(x_; name=nothing) desc = tf.EagerOp("Erfc") + x_ = convert(tf.TensorHandle, x_) tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) res = tf.execute(desc) @@ -15350,6 +15999,8 @@ begin end function sigmoid_grad_eager(y_, dy_; name=nothing) desc = tf.EagerOp("SigmoidGrad") + y_ = convert(tf.TensorHandle, y_) + dy_ = convert(tf.TensorHandle, dy_) tf.add_input(desc, y_) tf.add_input(desc, dy_) desc["T"] = tf.data_type(y_) @@ -15467,6 +16118,11 @@ begin end function non_max_suppression_v3_eager(boxes_, scores_, max_output_size_, iou_threshold_, score_threshold_; name=nothing) desc = tf.EagerOp("NonMaxSuppressionV3") + boxes_ = convert(tf.TensorHandle, boxes_) + scores_ = convert(tf.TensorHandle, scores_) + max_output_size_ = convert(tf.TensorHandle, max_output_size_) + iou_threshold_ = convert(tf.TensorHandle, iou_threshold_) + score_threshold_ = convert(tf.TensorHandle, score_threshold_) tf.add_input(desc, boxes_) tf.add_input(desc, scores_) tf.add_input(desc, max_output_size_) @@ -15520,6 +16176,9 @@ begin end function dilation2d_backprop_input_eager(input_, filter_, out_backprop_; name=nothing, strides=nothing, rates=nothing, padding=nothing) desc = tf.EagerOp("Dilation2DBackpropInput") + input_ = convert(tf.TensorHandle, input_) + filter_ = convert(tf.TensorHandle, filter_) + out_backprop_ = convert(tf.TensorHandle, out_backprop_) tf.add_input(desc, input_) tf.add_input(desc, filter_) tf.add_input(desc, out_backprop_) @@ -15569,6 +16228,8 @@ begin end function logical_or_eager(x_, y_; name=nothing) desc = tf.EagerOp("LogicalOr") + x_ = convert(tf.TensorHandle, x_) + y_ = convert(tf.TensorHandle, y_) tf.add_input(desc, x_) tf.add_input(desc, y_) res = tf.execute(desc) @@ -15619,6 +16280,13 @@ begin end function resource_apply_adadelta_eager(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ResourceApplyAdadelta") + var_ = convert(tf.TensorHandle, var_) + accum_ = convert(tf.TensorHandle, accum_) + accum_update_ = convert(tf.TensorHandle, accum_update_) + lr_ = convert(tf.TensorHandle, lr_) + rho_ = convert(tf.TensorHandle, rho_) + epsilon_ = convert(tf.TensorHandle, epsilon_) + grad_ = convert(tf.TensorHandle, grad_) tf.add_input(desc, var_) tf.add_input(desc, accum_) tf.add_input(desc, accum_update_) @@ -15683,6 +16351,10 @@ begin end function dense_to_sparse_set_operation_eager(set1_, set2_indices_, set2_values_, set2_shape_; name=nothing, set_operation=nothing, validate_indices=nothing) desc = tf.EagerOp("DenseToSparseSetOperation") + set1_ = convert(tf.TensorHandle, set1_) + set2_indices_ = convert(tf.TensorHandle, set2_indices_) + set2_values_ = convert(tf.TensorHandle, set2_values_) + set2_shape_ = convert(tf.TensorHandle, set2_shape_) tf.add_input(desc, set1_) tf.add_input(desc, set2_indices_) tf.add_input(desc, set2_values_) @@ -15727,6 +16399,7 @@ begin end function reader_num_records_produced_eager(reader_handle_; name=nothing) desc = tf.EagerOp("ReaderNumRecordsProduced") + reader_handle_ = convert(tf.TensorHandle, reader_handle_) tf.add_input(desc, reader_handle_) res = tf.execute(desc) node = tf.TapeNode(reader_num_records_produced, [reader_handle_], name=nothing) @@ -15763,6 +16436,8 @@ begin end function adjust_hue_eager(images_, delta_; name=nothing) desc = tf.EagerOp("AdjustHue") + images_ = convert(tf.TensorHandle, images_) + delta_ = convert(tf.TensorHandle, delta_) tf.add_input(desc, images_) tf.add_input(desc, delta_) desc["T"] = tf.data_type(images_) @@ -15803,6 +16478,8 @@ begin end function boosted_trees_quantile_stream_resource_flush_eager(quantile_stream_resource_handle_, num_buckets_; name=nothing, generate_quantiles=nothing) desc = tf.EagerOp("BoostedTreesQuantileStreamResourceFlush") + quantile_stream_resource_handle_ = convert(tf.TensorHandle, quantile_stream_resource_handle_) + num_buckets_ = convert(tf.TensorHandle, num_buckets_) tf.add_input(desc, quantile_stream_resource_handle_) tf.add_input(desc, num_buckets_) if generate_quantiles !== nothing @@ -15863,6 +16540,11 @@ begin end function experimental_map_and_batch_dataset_eager(input_dataset_, other_arguments_, batch_size_, num_parallel_calls_, drop_remainder_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, preserve_cardinality=nothing) desc = tf.EagerOp("ExperimentalMapAndBatchDataset") + input_dataset_ = convert(tf.TensorHandle, input_dataset_) + other_arguments_ = convert(tf.TensorHandle, other_arguments_) + batch_size_ = convert(tf.TensorHandle, batch_size_) + num_parallel_calls_ = convert(tf.TensorHandle, num_parallel_calls_) + drop_remainder_ = convert(tf.TensorHandle, drop_remainder_) tf.add_input(desc, input_dataset_) tf.add_input(desc, other_arguments_) tf.add_input(desc, batch_size_) @@ -15918,6 +16600,8 @@ begin end function real_div_eager(x_, y_; name=nothing) desc = tf.EagerOp("RealDiv") + x_ = convert(tf.TensorHandle, x_) + y_ = convert(tf.TensorHandle, y_) tf.add_input(desc, x_) tf.add_input(desc, y_) desc["T"] = tf.data_type(x_) @@ -15964,6 +16648,9 @@ begin end function restore_slice_eager(file_pattern_, tensor_name_, shape_and_slice_; name=nothing, dt=nothing, preferred_shard=nothing) desc = tf.EagerOp("RestoreSlice") + file_pattern_ = convert(tf.TensorHandle, file_pattern_) + tensor_name_ = convert(tf.TensorHandle, tensor_name_) + shape_and_slice_ = convert(tf.TensorHandle, shape_and_slice_) tf.add_input(desc, file_pattern_) tf.add_input(desc, tensor_name_) tf.add_input(desc, shape_and_slice_) @@ -16008,6 +16695,7 @@ begin end function stack_pop_v2_eager(handle_; name=nothing, elem_type=nothing) desc = tf.EagerOp("StackPopV2") + handle_ = convert(tf.TensorHandle, handle_) tf.add_input(desc, handle_) if elem_type !== nothing desc["elem_type"] = Base.identity(elem_type) @@ -16047,6 +16735,8 @@ begin end function reverse_eager(tensor_, dims_; name=nothing) desc = tf.EagerOp("Reverse") + tensor_ = convert(tf.TensorHandle, tensor_) + dims_ = convert(tf.TensorHandle, dims_) tf.add_input(desc, tensor_) tf.add_input(desc, dims_) desc["T"] = tf.data_type(tensor_) @@ -16088,6 +16778,7 @@ begin end function decode_png_eager(contents_; name=nothing, channels=nothing, dtype=nothing) desc = tf.EagerOp("DecodePng") + contents_ = convert(tf.TensorHandle, contents_) tf.add_input(desc, contents_) if channels !== nothing desc["channels"] = Base.Int(channels) @@ -16134,6 +16825,10 @@ begin end function non_max_suppression_v2_eager(boxes_, scores_, max_output_size_, iou_threshold_; name=nothing) desc = tf.EagerOp("NonMaxSuppressionV2") + boxes_ = convert(tf.TensorHandle, boxes_) + scores_ = convert(tf.TensorHandle, scores_) + max_output_size_ = convert(tf.TensorHandle, max_output_size_) + iou_threshold_ = convert(tf.TensorHandle, iou_threshold_) tf.add_input(desc, boxes_) tf.add_input(desc, scores_) tf.add_input(desc, max_output_size_) @@ -16175,6 +16870,8 @@ begin end function igamma_eager(a_, x_; name=nothing) desc = tf.EagerOp("Igamma") + a_ = convert(tf.TensorHandle, a_) + x_ = convert(tf.TensorHandle, x_) tf.add_input(desc, a_) tf.add_input(desc, x_) desc["T"] = tf.data_type(a_) @@ -16212,6 +16909,7 @@ begin end function digamma_eager(x_; name=nothing) desc = tf.EagerOp("Digamma") + x_ = convert(tf.TensorHandle, x_) tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) res = tf.execute(desc) @@ -16266,6 +16964,15 @@ begin end function resource_apply_ada_max_eager(var_, m_, v_, beta1_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ResourceApplyAdaMax") + var_ = convert(tf.TensorHandle, var_) + m_ = convert(tf.TensorHandle, m_) + v_ = convert(tf.TensorHandle, v_) + beta1_power_ = convert(tf.TensorHandle, beta1_power_) + lr_ = convert(tf.TensorHandle, lr_) + beta1_ = convert(tf.TensorHandle, beta1_) + beta2_ = convert(tf.TensorHandle, beta2_) + epsilon_ = convert(tf.TensorHandle, epsilon_) + grad_ = convert(tf.TensorHandle, grad_) tf.add_input(desc, var_) tf.add_input(desc, m_) tf.add_input(desc, v_) @@ -16300,7 +17007,7 @@ end """ - space_to_depth(input; data_format=) + space_to_depth(input; data_format=NHWC) """ @@ -16323,6 +17030,7 @@ begin end function space_to_depth_eager(input_; name=nothing, block_size=nothing, data_format=nothing) desc = tf.EagerOp("SpaceToDepth") + input_ = convert(tf.TensorHandle, input_) tf.add_input(desc, input_) if block_size !== nothing desc["block_size"] = Base.Int(block_size) @@ -16366,6 +17074,8 @@ begin end function sqrt_grad_eager(y_, dy_; name=nothing) desc = tf.EagerOp("SqrtGrad") + y_ = convert(tf.TensorHandle, y_) + dy_ = convert(tf.TensorHandle, dy_) tf.add_input(desc, y_) tf.add_input(desc, dy_) desc["T"] = tf.data_type(y_) @@ -16419,6 +17129,8 @@ begin end function map_unstage_eager(key_, indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) desc = tf.EagerOp("MapUnstage") + key_ = convert(tf.TensorHandle, key_) + indices_ = convert(tf.TensorHandle, indices_) tf.add_input(desc, key_) tf.add_input(desc, indices_) if capacity !== nothing @@ -16477,6 +17189,7 @@ begin end function qr_eager(input_; name=nothing, full_matrices=nothing) desc = tf.EagerOp("Qr") + input_ = convert(tf.TensorHandle, input_) tf.add_input(desc, input_) if full_matrices !== nothing desc["full_matrices"] = Base.Bool(full_matrices) @@ -16535,6 +17248,12 @@ begin end function boosted_trees_calculate_best_gains_per_feature_eager(node_id_range_, stats_summary_list_, l1_, l2_, tree_complexity_, min_node_weight_; name=nothing, max_splits=nothing, num_features=nothing) desc = tf.EagerOp("BoostedTreesCalculateBestGainsPerFeature") + node_id_range_ = convert(tf.TensorHandle, node_id_range_) + stats_summary_list_ = convert(tf.TensorHandle, stats_summary_list_) + l1_ = convert(tf.TensorHandle, l1_) + l2_ = convert(tf.TensorHandle, l2_) + tree_complexity_ = convert(tf.TensorHandle, tree_complexity_) + min_node_weight_ = convert(tf.TensorHandle, min_node_weight_) tf.add_input(desc, node_id_range_) tf.add_input(desc, stats_summary_list_) tf.add_input(desc, l1_) @@ -16592,6 +17311,10 @@ begin end function unbatch_grad_eager(original_input_, batch_index_, grad_, id_; name=nothing, container=nothing, shared_name=nothing) desc = tf.EagerOp("UnbatchGrad") + original_input_ = convert(tf.TensorHandle, original_input_) + batch_index_ = convert(tf.TensorHandle, batch_index_) + grad_ = convert(tf.TensorHandle, grad_) + id_ = convert(tf.TensorHandle, id_) tf.add_input(desc, original_input_) tf.add_input(desc, batch_index_) tf.add_input(desc, grad_) @@ -16637,6 +17360,7 @@ begin end function log_softmax_eager(logits_; name=nothing) desc = tf.EagerOp("LogSoftmax") + logits_ = convert(tf.TensorHandle, logits_) tf.add_input(desc, logits_) desc["T"] = tf.data_type(logits_) res = tf.execute(desc) @@ -16674,6 +17398,7 @@ begin end function resource_count_up_to_eager(resource_; name=nothing, limit=nothing) desc = tf.EagerOp("ResourceCountUpTo") + resource_ = convert(tf.TensorHandle, resource_) tf.add_input(desc, resource_) if limit !== nothing desc["limit"] = Base.Int(limit) @@ -16717,6 +17442,7 @@ begin end function accumulate_nv2_eager(inputs_; name=nothing, N=nothing, shape=nothing) desc = tf.EagerOp("AccumulateNV2") + inputs_ = convert(tf.TensorHandle, inputs_) tf.add_input(desc, inputs_) if N !== nothing desc["N"] = Base.Int(N) @@ -16782,6 +17508,9 @@ begin end function parallel_map_dataset_eager(input_dataset_, other_arguments_, num_parallel_calls_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing, sloppy=nothing, preserve_cardinality=nothing) desc = tf.EagerOp("ParallelMapDataset") + input_dataset_ = convert(tf.TensorHandle, input_dataset_) + other_arguments_ = convert(tf.TensorHandle, other_arguments_) + num_parallel_calls_ = convert(tf.TensorHandle, num_parallel_calls_) tf.add_input(desc, input_dataset_) tf.add_input(desc, other_arguments_) tf.add_input(desc, num_parallel_calls_) @@ -16848,6 +17577,7 @@ begin end function random_uniform_eager(shape_; name=nothing, seed=nothing, seed2=nothing, dtype=nothing) desc = tf.EagerOp("RandomUniform") + shape_ = convert(tf.TensorHandle, shape_) tf.add_input(desc, shape_) if seed !== nothing desc["seed"] = Base.Int(seed) @@ -16875,7 +17605,7 @@ end """ - unicode_transcode(input; errors=, replacement_char=65533, replace_control_characters=false) + unicode_transcode(input; errors=replace, replacement_char=65533, replace_control_characters=false) """ @@ -16906,6 +17636,7 @@ begin end function unicode_transcode_eager(input_; name=nothing, input_encoding=nothing, output_encoding=nothing, errors=nothing, replacement_char=nothing, replace_control_characters=nothing) desc = tf.EagerOp("UnicodeTranscode") + input_ = convert(tf.TensorHandle, input_) tf.add_input(desc, input_) if input_encoding !== nothing desc["input_encoding"] = Base.String(input_encoding) @@ -16954,6 +17685,7 @@ begin end function reader_reset_eager(reader_handle_; name=nothing) desc = tf.EagerOp("ReaderReset") + reader_handle_ = convert(tf.TensorHandle, reader_handle_) tf.add_input(desc, reader_handle_) res = tf.execute(desc) node = tf.TapeNode(reader_reset, [reader_handle_], name=nothing) @@ -16994,6 +17726,7 @@ begin end function _nccl_broadcast_send_eager(input_; name=nothing, num_devices=nothing, shared_name=nothing) desc = tf.EagerOp("_NcclBroadcastSend") + input_ = convert(tf.TensorHandle, input_) tf.add_input(desc, input_) if num_devices !== nothing desc["num_devices"] = Base.Int(num_devices) @@ -17035,6 +17768,7 @@ begin end function batch_matrix_determinant_eager(input_; name=nothing) desc = tf.EagerOp("BatchMatrixDeterminant") + input_ = convert(tf.TensorHandle, input_) tf.add_input(desc, input_) desc["T"] = tf.data_type(input_) res = tf.execute(desc) @@ -17072,6 +17806,8 @@ begin end function less_equal_eager(x_, y_; name=nothing) desc = tf.EagerOp("LessEqual") + x_ = convert(tf.TensorHandle, x_) + y_ = convert(tf.TensorHandle, y_) tf.add_input(desc, x_) tf.add_input(desc, y_) desc["T"] = tf.data_type(x_) @@ -17116,6 +17852,9 @@ begin end function apply_gradient_descent_eager(var_, alpha_, delta_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ApplyGradientDescent") + var_ = convert(tf.TensorHandle, var_) + alpha_ = convert(tf.TensorHandle, alpha_) + delta_ = convert(tf.TensorHandle, delta_) tf.add_input(desc, var_) tf.add_input(desc, alpha_) tf.add_input(desc, delta_) @@ -17164,6 +17903,9 @@ begin end function sparse_segment_sqrt_n_eager(data_, indices_, segment_ids_; name=nothing) desc = tf.EagerOp("SparseSegmentSqrtN") + data_ = convert(tf.TensorHandle, data_) + indices_ = convert(tf.TensorHandle, indices_) + segment_ids_ = convert(tf.TensorHandle, segment_ids_) tf.add_input(desc, data_) tf.add_input(desc, indices_) tf.add_input(desc, segment_ids_) @@ -17202,6 +17944,7 @@ begin end function matrix_logarithm_eager(input_; name=nothing) desc = tf.EagerOp("MatrixLogarithm") + input_ = convert(tf.TensorHandle, input_) tf.add_input(desc, input_) desc["T"] = tf.data_type(input_) res = tf.execute(desc) @@ -17246,6 +17989,9 @@ begin end function scatter_mul_eager(ref_, indices_, updates_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ScatterMul") + ref_ = convert(tf.TensorHandle, ref_) + indices_ = convert(tf.TensorHandle, indices_) + updates_ = convert(tf.TensorHandle, updates_) tf.add_input(desc, ref_) tf.add_input(desc, indices_) tf.add_input(desc, updates_) @@ -17305,6 +18051,7 @@ begin end function decode_jpeg_eager(contents_; name=nothing, channels=nothing, ratio=nothing, fancy_upscaling=nothing, try_recover_truncated=nothing, acceptable_fraction=nothing, dct_method=nothing) desc = tf.EagerOp("DecodeJpeg") + contents_ = convert(tf.TensorHandle, contents_) tf.add_input(desc, contents_) if channels !== nothing desc["channels"] = Base.Int(channels) @@ -17442,6 +18189,8 @@ begin end function queue_enqueue_many_v2_eager(handle_, components_; name=nothing, Tcomponents=nothing, timeout_ms=nothing) desc = tf.EagerOp("QueueEnqueueManyV2") + handle_ = convert(tf.TensorHandle, handle_) + components_ = convert(tf.TensorHandle, components_) tf.add_input(desc, handle_) tf.add_input(desc, components_) if Tcomponents !== nothing @@ -17506,6 +18255,16 @@ begin end function resource_sparse_apply_centered_rms_prop_eager(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ResourceSparseApplyCenteredRMSProp") + var_ = convert(tf.TensorHandle, var_) + mg_ = convert(tf.TensorHandle, mg_) + ms_ = convert(tf.TensorHandle, ms_) + mom_ = convert(tf.TensorHandle, mom_) + lr_ = convert(tf.TensorHandle, lr_) + rho_ = convert(tf.TensorHandle, rho_) + momentum_ = convert(tf.TensorHandle, momentum_) + epsilon_ = convert(tf.TensorHandle, epsilon_) + grad_ = convert(tf.TensorHandle, grad_) + indices_ = convert(tf.TensorHandle, indices_) tf.add_input(desc, var_) tf.add_input(desc, mg_) tf.add_input(desc, ms_) @@ -17575,6 +18334,10 @@ begin end function interleave_dataset_eager(input_dataset_, other_arguments_, cycle_length_, block_length_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("InterleaveDataset") + input_dataset_ = convert(tf.TensorHandle, input_dataset_) + other_arguments_ = convert(tf.TensorHandle, other_arguments_) + cycle_length_ = convert(tf.TensorHandle, cycle_length_) + block_length_ = convert(tf.TensorHandle, block_length_) tf.add_input(desc, input_dataset_) tf.add_input(desc, other_arguments_) tf.add_input(desc, cycle_length_) @@ -17626,6 +18389,7 @@ begin end function stack_pop_eager(handle_; name=nothing, elem_type=nothing) desc = tf.EagerOp("StackPop") + handle_ = convert(tf.TensorHandle, handle_) tf.add_input(desc, handle_) if elem_type !== nothing desc["elem_type"] = Base.identity(elem_type) @@ -17646,7 +18410,7 @@ end """ - max_pool_v2(input, ksize, strides; data_format=) + max_pool_v2(input, ksize, strides; data_format=NHWC) """ @@ -17673,6 +18437,9 @@ begin end function max_pool_v2_eager(input_, ksize_, strides_; name=nothing, padding=nothing, data_format=nothing) desc = tf.EagerOp("MaxPoolV2") + input_ = convert(tf.TensorHandle, input_) + ksize_ = convert(tf.TensorHandle, ksize_) + strides_ = convert(tf.TensorHandle, strides_) tf.add_input(desc, input_) tf.add_input(desc, ksize_) tf.add_input(desc, strides_) @@ -17719,6 +18486,9 @@ begin end function boosted_trees_deserialize_ensemble_eager(tree_ensemble_handle_, stamp_token_, tree_ensemble_serialized_; name=nothing) desc = tf.EagerOp("BoostedTreesDeserializeEnsemble") + tree_ensemble_handle_ = convert(tf.TensorHandle, tree_ensemble_handle_) + stamp_token_ = convert(tf.TensorHandle, stamp_token_) + tree_ensemble_serialized_ = convert(tf.TensorHandle, tree_ensemble_serialized_) tf.add_input(desc, tree_ensemble_handle_) tf.add_input(desc, stamp_token_) tf.add_input(desc, tree_ensemble_serialized_) @@ -17771,6 +18541,11 @@ begin end function load_and_remap_matrix_eager(ckpt_path_, old_tensor_name_, row_remapping_, col_remapping_, initializing_values_; name=nothing, num_rows=nothing, num_cols=nothing, max_rows_in_memory=nothing) desc = tf.EagerOp("LoadAndRemapMatrix") + ckpt_path_ = convert(tf.TensorHandle, ckpt_path_) + old_tensor_name_ = convert(tf.TensorHandle, old_tensor_name_) + row_remapping_ = convert(tf.TensorHandle, row_remapping_) + col_remapping_ = convert(tf.TensorHandle, col_remapping_) + initializing_values_ = convert(tf.TensorHandle, initializing_values_) tf.add_input(desc, ckpt_path_) tf.add_input(desc, old_tensor_name_) tf.add_input(desc, row_remapping_) @@ -17833,6 +18608,12 @@ begin end function sparse_apply_proximal_gradient_descent_eager(var_, alpha_, l1_, l2_, grad_, indices_; name=nothing, use_locking=nothing) desc = tf.EagerOp("SparseApplyProximalGradientDescent") + var_ = convert(tf.TensorHandle, var_) + alpha_ = convert(tf.TensorHandle, alpha_) + l1_ = convert(tf.TensorHandle, l1_) + l2_ = convert(tf.TensorHandle, l2_) + grad_ = convert(tf.TensorHandle, grad_) + indices_ = convert(tf.TensorHandle, indices_) tf.add_input(desc, var_) tf.add_input(desc, alpha_) tf.add_input(desc, l1_) @@ -17889,6 +18670,7 @@ begin end function py_func_stateless_eager(input_; name=nothing, token=nothing, Tin=nothing, Tout=nothing) desc = tf.EagerOp("PyFuncStateless") + input_ = convert(tf.TensorHandle, input_) tf.add_input(desc, input_) if token !== nothing desc["token"] = Base.String(token) @@ -17932,6 +18714,7 @@ begin end function where_eager(input_; name=nothing) desc = tf.EagerOp("Where") + input_ = convert(tf.TensorHandle, input_) tf.add_input(desc, input_) desc["T"] = tf.data_type(input_) res = tf.execute(desc) @@ -17980,6 +18763,8 @@ begin end function mfcc_eager(spectrogram_, sample_rate_; name=nothing, upper_frequency_limit=nothing, lower_frequency_limit=nothing, filterbank_channel_count=nothing, dct_coefficient_count=nothing) desc = tf.EagerOp("Mfcc") + spectrogram_ = convert(tf.TensorHandle, spectrogram_) + sample_rate_ = convert(tf.TensorHandle, sample_rate_) tf.add_input(desc, spectrogram_) tf.add_input(desc, sample_rate_) if upper_frequency_limit !== nothing @@ -18030,6 +18815,7 @@ begin end function check_numerics_eager(tensor_; name=nothing, message=nothing) desc = tf.EagerOp("CheckNumerics") + tensor_ = convert(tf.TensorHandle, tensor_) tf.add_input(desc, tensor_) if message !== nothing desc["message"] = Base.String(message) @@ -18161,6 +18947,10 @@ begin end function sparse_segment_mean_grad_eager(grad_, indices_, segment_ids_, output_dim0_; name=nothing) desc = tf.EagerOp("SparseSegmentMeanGrad") + grad_ = convert(tf.TensorHandle, grad_) + indices_ = convert(tf.TensorHandle, indices_) + segment_ids_ = convert(tf.TensorHandle, segment_ids_) + output_dim0_ = convert(tf.TensorHandle, output_dim0_) tf.add_input(desc, grad_) tf.add_input(desc, indices_) tf.add_input(desc, segment_ids_) @@ -18217,6 +19007,9 @@ begin end function try_rpc_eager(address_, method_, request_; name=nothing, protocol=nothing, fail_fast=nothing, timeout_in_ms=nothing) desc = tf.EagerOp("TryRpc") + address_ = convert(tf.TensorHandle, address_) + method_ = convert(tf.TensorHandle, method_) + request_ = convert(tf.TensorHandle, request_) tf.add_input(desc, address_) tf.add_input(desc, method_) tf.add_input(desc, request_) @@ -18270,6 +19063,8 @@ begin end function batch_matrix_triangular_solve_eager(matrix_, rhs_; name=nothing, lower=nothing, adjoint=nothing) desc = tf.EagerOp("BatchMatrixTriangularSolve") + matrix_ = convert(tf.TensorHandle, matrix_) + rhs_ = convert(tf.TensorHandle, rhs_) tf.add_input(desc, matrix_) tf.add_input(desc, rhs_) if lower !== nothing @@ -18316,6 +19111,7 @@ begin end function _retval_eager(input_; name=nothing, index=nothing) desc = tf.EagerOp("_Retval") + input_ = convert(tf.TensorHandle, input_) tf.add_input(desc, input_) if index !== nothing desc["index"] = Base.Int(index) @@ -18362,6 +19158,7 @@ begin end function unique_with_counts_eager(x_; name=nothing, out_idx=nothing) desc = tf.EagerOp("UniqueWithCounts") + x_ = convert(tf.TensorHandle, x_) tf.add_input(desc, x_) if out_idx !== nothing desc["out_idx"] = Base.identity(out_idx) @@ -18402,6 +19199,8 @@ begin end function add_eager(x_, y_; name=nothing) desc = tf.EagerOp("Add") + x_ = convert(tf.TensorHandle, x_) + y_ = convert(tf.TensorHandle, y_) tf.add_input(desc, x_) tf.add_input(desc, y_) desc["T"] = tf.data_type(x_) @@ -18460,6 +19259,9 @@ begin end function experimental_scan_dataset_eager(input_dataset_, initial_state_, other_arguments_; name=nothing, f=nothing, Tstate=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, preserve_cardinality=nothing) desc = tf.EagerOp("ExperimentalScanDataset") + input_dataset_ = convert(tf.TensorHandle, input_dataset_) + initial_state_ = convert(tf.TensorHandle, initial_state_) + other_arguments_ = convert(tf.TensorHandle, other_arguments_) tf.add_input(desc, input_dataset_) tf.add_input(desc, initial_state_) tf.add_input(desc, other_arguments_) @@ -18519,6 +19321,8 @@ begin end function assign_add_variable_op_eager(resource_, value_; name=nothing, dtype=nothing) desc = tf.EagerOp("AssignAddVariableOp") + resource_ = convert(tf.TensorHandle, resource_) + value_ = convert(tf.TensorHandle, value_) tf.add_input(desc, resource_) tf.add_input(desc, value_) if dtype !== nothing @@ -18572,6 +19376,9 @@ begin end function split_v_eager(value_, size_splits_, split_dim_; name=nothing, num_split=nothing) desc = tf.EagerOp("SplitV") + value_ = convert(tf.TensorHandle, value_) + size_splits_ = convert(tf.TensorHandle, size_splits_) + split_dim_ = convert(tf.TensorHandle, split_dim_) tf.add_input(desc, value_) tf.add_input(desc, size_splits_) tf.add_input(desc, split_dim_) @@ -18621,6 +19428,8 @@ begin end function assign_eager(ref_, value_; name=nothing, validate_shape=nothing, use_locking=nothing) desc = tf.EagerOp("Assign") + ref_ = convert(tf.TensorHandle, ref_) + value_ = convert(tf.TensorHandle, value_) tf.add_input(desc, ref_) tf.add_input(desc, value_) if validate_shape !== nothing @@ -18678,6 +19487,7 @@ begin end function max_pool_with_argmax_eager(input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing) desc = tf.EagerOp("MaxPoolWithArgmax") + input_ = convert(tf.TensorHandle, input_) tf.add_input(desc, input_) if ksize !== nothing desc["ksize"] = map(Base.identity, ksize) @@ -18736,6 +19546,10 @@ begin end function quantized_relu_x_eager(features_, max_value_, min_features_, max_features_; name=nothing, out_type=nothing) desc = tf.EagerOp("QuantizedReluX") + features_ = convert(tf.TensorHandle, features_) + max_value_ = convert(tf.TensorHandle, max_value_) + min_features_ = convert(tf.TensorHandle, min_features_) + max_features_ = convert(tf.TensorHandle, max_features_) tf.add_input(desc, features_) tf.add_input(desc, max_value_) tf.add_input(desc, min_features_) @@ -18855,6 +19669,7 @@ begin end function fft2d_eager(input_; name=nothing) desc = tf.EagerOp("FFT2D") + input_ = convert(tf.TensorHandle, input_) tf.add_input(desc, input_) desc["Tcomplex"] = tf.data_type(input_) res = tf.execute(desc) @@ -18897,6 +19712,8 @@ begin end function experimental_thread_pool_dataset_eager(input_dataset_, thread_pool_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("ExperimentalThreadPoolDataset") + input_dataset_ = convert(tf.TensorHandle, input_dataset_) + thread_pool_ = convert(tf.TensorHandle, thread_pool_) tf.add_input(desc, input_dataset_) tf.add_input(desc, thread_pool_) if output_types !== nothing @@ -18948,6 +19765,8 @@ begin end function experimental_directed_interleave_dataset_eager(selector_input_dataset_, data_input_datasets_; name=nothing, output_types=nothing, output_shapes=nothing, N=nothing) desc = tf.EagerOp("ExperimentalDirectedInterleaveDataset") + selector_input_dataset_ = convert(tf.TensorHandle, selector_input_dataset_) + data_input_datasets_ = convert(tf.TensorHandle, data_input_datasets_) tf.add_input(desc, selector_input_dataset_) tf.add_input(desc, data_input_datasets_) if output_types !== nothing @@ -19000,6 +19819,10 @@ begin end function sparse_segment_sqrt_n_grad_eager(grad_, indices_, segment_ids_, output_dim0_; name=nothing) desc = tf.EagerOp("SparseSegmentSqrtNGrad") + grad_ = convert(tf.TensorHandle, grad_) + indices_ = convert(tf.TensorHandle, indices_) + segment_ids_ = convert(tf.TensorHandle, segment_ids_) + output_dim0_ = convert(tf.TensorHandle, output_dim0_) tf.add_input(desc, grad_) tf.add_input(desc, indices_) tf.add_input(desc, segment_ids_) @@ -19039,6 +19862,7 @@ begin end function real_eager(input_; name=nothing) desc = tf.EagerOp("Real") + input_ = convert(tf.TensorHandle, input_) tf.add_input(desc, input_) desc["T"] = tf.data_type(input_) res = tf.execute(desc) @@ -19090,6 +19914,8 @@ begin end function ordered_map_unstage_eager(key_, indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) desc = tf.EagerOp("OrderedMapUnstage") + key_ = convert(tf.TensorHandle, key_) + indices_ = convert(tf.TensorHandle, indices_) tf.add_input(desc, key_) tf.add_input(desc, indices_) if capacity !== nothing @@ -19141,6 +19967,8 @@ begin end function rfft2d_eager(input_, fft_length_; name=nothing) desc = tf.EagerOp("RFFT2D") + input_ = convert(tf.TensorHandle, input_) + fft_length_ = convert(tf.TensorHandle, fft_length_) tf.add_input(desc, input_) tf.add_input(desc, fft_length_) res = tf.execute(desc) @@ -19175,6 +20003,7 @@ begin end function var_is_initialized_op_eager(resource_; name=nothing) desc = tf.EagerOp("VarIsInitializedOp") + resource_ = convert(tf.TensorHandle, resource_) tf.add_input(desc, resource_) res = tf.execute(desc) node = tf.TapeNode(var_is_initialized_op, [resource_], name=nothing) @@ -19253,6 +20082,8 @@ begin end function atan2_eager(y_, x_; name=nothing) desc = tf.EagerOp("Atan2") + y_ = convert(tf.TensorHandle, y_) + x_ = convert(tf.TensorHandle, x_) tf.add_input(desc, y_) tf.add_input(desc, x_) desc["T"] = tf.data_type(y_) @@ -19305,6 +20136,8 @@ begin end function random_poisson_eager(shape_, rate_; name=nothing, seed=nothing, seed2=nothing, S=nothing, dtype=nothing) desc = tf.EagerOp("RandomPoisson") + shape_ = convert(tf.TensorHandle, shape_) + rate_ = convert(tf.TensorHandle, rate_) tf.add_input(desc, shape_) tf.add_input(desc, rate_) if seed !== nothing @@ -19363,6 +20196,8 @@ begin end function reverse_sequence_eager(input_, seq_lengths_; name=nothing, seq_dim=nothing, batch_dim=nothing) desc = tf.EagerOp("ReverseSequence") + input_ = convert(tf.TensorHandle, input_) + seq_lengths_ = convert(tf.TensorHandle, seq_lengths_) tf.add_input(desc, input_) tf.add_input(desc, seq_lengths_) if seq_dim !== nothing @@ -19409,6 +20244,7 @@ begin end function outfeed_enqueue_eager(input_; name=nothing, dtype=nothing) desc = tf.EagerOp("OutfeedEnqueue") + input_ = convert(tf.TensorHandle, input_) tf.add_input(desc, input_) if dtype !== nothing desc["dtype"] = Base.identity(dtype) @@ -19449,6 +20285,8 @@ begin end function sub_eager(x_, y_; name=nothing) desc = tf.EagerOp("Sub") + x_ = convert(tf.TensorHandle, x_) + y_ = convert(tf.TensorHandle, y_) tf.add_input(desc, x_) tf.add_input(desc, y_) desc["T"] = tf.data_type(x_) @@ -19495,6 +20333,8 @@ begin end function string_split_eager(input_, delimiter_; name=nothing, skip_empty=nothing) desc = tf.EagerOp("StringSplit") + input_ = convert(tf.TensorHandle, input_) + delimiter_ = convert(tf.TensorHandle, delimiter_) tf.add_input(desc, input_) tf.add_input(desc, delimiter_) if skip_empty !== nothing @@ -19543,6 +20383,8 @@ begin end function cumprod_eager(x_, axis_; name=nothing, exclusive=nothing, reverse=nothing) desc = tf.EagerOp("Cumprod") + x_ = convert(tf.TensorHandle, x_) + axis_ = convert(tf.TensorHandle, axis_) tf.add_input(desc, x_) tf.add_input(desc, axis_) if exclusive !== nothing @@ -19600,6 +20442,10 @@ begin end function quantized_resize_bilinear_eager(images_, size_, min_, max_; name=nothing, align_corners=nothing) desc = tf.EagerOp("QuantizedResizeBilinear") + images_ = convert(tf.TensorHandle, images_) + size_ = convert(tf.TensorHandle, size_) + min_ = convert(tf.TensorHandle, min_) + max_ = convert(tf.TensorHandle, max_) tf.add_input(desc, images_) tf.add_input(desc, size_) tf.add_input(desc, min_) @@ -19665,6 +20511,8 @@ begin end function parse_single_example_eager(serialized_, dense_defaults_; name=nothing, num_sparse=nothing, sparse_keys=nothing, dense_keys=nothing, sparse_types=nothing, Tdense=nothing, dense_shapes=nothing) desc = tf.EagerOp("ParseSingleExample") + serialized_ = convert(tf.TensorHandle, serialized_) + dense_defaults_ = convert(tf.TensorHandle, dense_defaults_) tf.add_input(desc, serialized_) tf.add_input(desc, dense_defaults_) if num_sparse !== nothing @@ -19721,6 +20569,7 @@ begin end function is_variable_initialized_eager(ref_; name=nothing, dtype=nothing) desc = tf.EagerOp("IsVariableInitialized") + ref_ = convert(tf.TensorHandle, ref_) tf.add_input(desc, ref_) if dtype !== nothing desc["dtype"] = Base.identity(dtype) @@ -19816,6 +20665,9 @@ begin end function tensor_list_concat_v2_eager(input_handle_, element_shape_, leading_dims_; name=nothing, element_dtype=nothing, shape_type=nothing) desc = tf.EagerOp("TensorListConcatV2") + input_handle_ = convert(tf.TensorHandle, input_handle_) + element_shape_ = convert(tf.TensorHandle, element_shape_) + leading_dims_ = convert(tf.TensorHandle, leading_dims_) tf.add_input(desc, input_handle_) tf.add_input(desc, element_shape_) tf.add_input(desc, leading_dims_) @@ -19842,7 +20694,7 @@ end """ - cudnn_rnnv2(input, input_h, input_c, params; rnn_mode=, input_mode=, direction=, dropout=?, seed=0, seed2=0, is_training=true) + cudnn_rnnv2(input, input_h, input_c, params; rnn_mode=lstm, input_mode=linear_input, direction=unidirectional, dropout=?, seed=0, seed2=0, is_training=true) """ @@ -19891,6 +20743,10 @@ begin end function cudnn_rnnv2_eager(input_, input_h_, input_c_, params_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing, is_training=nothing) desc = tf.EagerOp("CudnnRNNV2") + input_ = convert(tf.TensorHandle, input_) + input_h_ = convert(tf.TensorHandle, input_h_) + input_c_ = convert(tf.TensorHandle, input_c_) + params_ = convert(tf.TensorHandle, params_) tf.add_input(desc, input_) tf.add_input(desc, input_h_) tf.add_input(desc, input_c_) @@ -19962,6 +20818,9 @@ begin end function resource_scatter_sub_eager(resource_, indices_, updates_; name=nothing, dtype=nothing) desc = tf.EagerOp("ResourceScatterSub") + resource_ = convert(tf.TensorHandle, resource_) + indices_ = convert(tf.TensorHandle, indices_) + updates_ = convert(tf.TensorHandle, updates_) tf.add_input(desc, resource_) tf.add_input(desc, indices_) tf.add_input(desc, updates_) @@ -20008,6 +20867,8 @@ begin end function assign_add_eager(ref_, value_; name=nothing, use_locking=nothing) desc = tf.EagerOp("AssignAdd") + ref_ = convert(tf.TensorHandle, ref_) + value_ = convert(tf.TensorHandle, value_) tf.add_input(desc, ref_) tf.add_input(desc, value_) if use_locking !== nothing @@ -20053,6 +20914,7 @@ begin end function tensor_dataset_eager(components_; name=nothing, Toutput_types=nothing, output_shapes=nothing) desc = tf.EagerOp("TensorDataset") + components_ = convert(tf.TensorHandle, components_) tf.add_input(desc, components_) if Toutput_types !== nothing desc["Toutput_types"] = map(Base.identity, Toutput_types) @@ -20096,6 +20958,7 @@ begin end function bucketize_eager(input_; name=nothing, boundaries=nothing) desc = tf.EagerOp("Bucketize") + input_ = convert(tf.TensorHandle, input_) tf.add_input(desc, input_) if boundaries !== nothing desc["boundaries"] = map(Base.identity, boundaries) @@ -20143,6 +21006,10 @@ begin end function sparse_reduce_max_eager(input_indices_, input_values_, input_shape_, reduction_axes_; name=nothing, keep_dims=nothing) desc = tf.EagerOp("SparseReduceMax") + input_indices_ = convert(tf.TensorHandle, input_indices_) + input_values_ = convert(tf.TensorHandle, input_values_) + input_shape_ = convert(tf.TensorHandle, input_shape_) + reduction_axes_ = convert(tf.TensorHandle, reduction_axes_) tf.add_input(desc, input_indices_) tf.add_input(desc, input_values_) tf.add_input(desc, input_shape_) @@ -20254,6 +21121,9 @@ begin end function tensor_array_grad_with_shape_eager(handle_, flow_in_, shape_to_prepend_; name=nothing, source=nothing) desc = tf.EagerOp("TensorArrayGradWithShape") + handle_ = convert(tf.TensorHandle, handle_) + flow_in_ = convert(tf.TensorHandle, flow_in_) + shape_to_prepend_ = convert(tf.TensorHandle, shape_to_prepend_) tf.add_input(desc, handle_) tf.add_input(desc, flow_in_) tf.add_input(desc, shape_to_prepend_) @@ -20292,6 +21162,7 @@ begin end function tensor_array_close_v3_eager(handle_; name=nothing) desc = tf.EagerOp("TensorArrayCloseV3") + handle_ = convert(tf.TensorHandle, handle_) tf.add_input(desc, handle_) res = tf.execute(desc) node = tf.TapeNode(tensor_array_close_v3, [handle_], name=nothing) @@ -20333,6 +21204,11 @@ begin end function non_max_suppression_with_overlaps_eager(overlaps_, scores_, max_output_size_, overlap_threshold_, score_threshold_; name=nothing) desc = tf.EagerOp("NonMaxSuppressionWithOverlaps") + overlaps_ = convert(tf.TensorHandle, overlaps_) + scores_ = convert(tf.TensorHandle, scores_) + max_output_size_ = convert(tf.TensorHandle, max_output_size_) + overlap_threshold_ = convert(tf.TensorHandle, overlap_threshold_) + score_threshold_ = convert(tf.TensorHandle, score_threshold_) tf.add_input(desc, overlaps_) tf.add_input(desc, scores_) tf.add_input(desc, max_output_size_) @@ -20380,6 +21256,7 @@ begin end function pack_eager(values_; name=nothing, N=nothing, axis=nothing) desc = tf.EagerOp("Pack") + values_ = convert(tf.TensorHandle, values_) tf.add_input(desc, values_) if N !== nothing desc["N"] = Base.Int(N) @@ -20428,6 +21305,8 @@ begin end function tensor_array_grad_v2_eager(handle_, flow_in_; name=nothing, source=nothing) desc = tf.EagerOp("TensorArrayGradV2") + handle_ = convert(tf.TensorHandle, handle_) + flow_in_ = convert(tf.TensorHandle, flow_in_) tf.add_input(desc, handle_) tf.add_input(desc, flow_in_) if source !== nothing @@ -20471,6 +21350,8 @@ begin end function assign_sub_variable_op_eager(resource_, value_; name=nothing, dtype=nothing) desc = tf.EagerOp("AssignSubVariableOp") + resource_ = convert(tf.TensorHandle, resource_) + value_ = convert(tf.TensorHandle, value_) tf.add_input(desc, resource_) tf.add_input(desc, value_) if dtype !== nothing @@ -20509,6 +21390,7 @@ begin end function batch_fft2d_eager(input_; name=nothing) desc = tf.EagerOp("BatchFFT2D") + input_ = convert(tf.TensorHandle, input_) tf.add_input(desc, input_) res = tf.execute(desc) node = tf.TapeNode(batch_fft2d, [input_], name=nothing) @@ -20542,6 +21424,7 @@ begin end function close_summary_writer_eager(writer_; name=nothing) desc = tf.EagerOp("CloseSummaryWriter") + writer_ = convert(tf.TensorHandle, writer_) tf.add_input(desc, writer_) res = tf.execute(desc) node = tf.TapeNode(close_summary_writer, [writer_], name=nothing) @@ -20576,6 +21459,7 @@ begin end function rank_eager(input_; name=nothing) desc = tf.EagerOp("Rank") + input_ = convert(tf.TensorHandle, input_) tf.add_input(desc, input_) desc["T"] = tf.data_type(input_) res = tf.execute(desc) @@ -20611,6 +21495,7 @@ begin end function fft3d_eager(input_; name=nothing) desc = tf.EagerOp("FFT3D") + input_ = convert(tf.TensorHandle, input_) tf.add_input(desc, input_) desc["Tcomplex"] = tf.data_type(input_) res = tf.execute(desc) @@ -20663,6 +21548,14 @@ begin end function apply_ftrl_eager(var_, accum_, linear_, grad_, lr_, l1_, l2_, lr_power_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ApplyFtrl") + var_ = convert(tf.TensorHandle, var_) + accum_ = convert(tf.TensorHandle, accum_) + linear_ = convert(tf.TensorHandle, linear_) + grad_ = convert(tf.TensorHandle, grad_) + lr_ = convert(tf.TensorHandle, lr_) + l1_ = convert(tf.TensorHandle, l1_) + l2_ = convert(tf.TensorHandle, l2_) + lr_power_ = convert(tf.TensorHandle, lr_power_) tf.add_input(desc, var_) tf.add_input(desc, accum_) tf.add_input(desc, linear_) @@ -20765,6 +21658,7 @@ begin end function audio_spectrogram_eager(input_; name=nothing, window_size=nothing, stride=nothing, magnitude_squared=nothing) desc = tf.EagerOp("AudioSpectrogram") + input_ = convert(tf.TensorHandle, input_) tf.add_input(desc, input_) if window_size !== nothing desc["window_size"] = Base.Int(window_size) @@ -20810,6 +21704,7 @@ begin end function variable_shape_eager(input_; name=nothing, out_type=nothing) desc = tf.EagerOp("VariableShape") + input_ = convert(tf.TensorHandle, input_) tf.add_input(desc, input_) if out_type !== nothing desc["out_type"] = Base.identity(out_type) @@ -20962,6 +21857,8 @@ begin end function tensor_forest_create_tree_variable_eager(tree_handle_, tree_config_; name=nothing) desc = tf.EagerOp("TensorForestCreateTreeVariable") + tree_handle_ = convert(tf.TensorHandle, tree_handle_) + tree_config_ = convert(tf.TensorHandle, tree_config_) tf.add_input(desc, tree_handle_) tf.add_input(desc, tree_config_) res = tf.execute(desc) @@ -21011,6 +21908,9 @@ begin end function max_pool_grad_with_argmax_eager(input_, grad_, argmax_; name=nothing, ksize=nothing, strides=nothing, padding=nothing) desc = tf.EagerOp("MaxPoolGradWithArgmax") + input_ = convert(tf.TensorHandle, input_) + grad_ = convert(tf.TensorHandle, grad_) + argmax_ = convert(tf.TensorHandle, argmax_) tf.add_input(desc, input_) tf.add_input(desc, grad_) tf.add_input(desc, argmax_) @@ -21066,6 +21966,8 @@ begin end function ref_switch_eager(data_, pred_; name=nothing) desc = tf.EagerOp("RefSwitch") + data_ = convert(tf.TensorHandle, data_) + pred_ = convert(tf.TensorHandle, pred_) tf.add_input(desc, data_) tf.add_input(desc, pred_) desc["T"] = tf.data_type(data_) @@ -21101,6 +22003,7 @@ begin end function sdca_fprint_eager(input_; name=nothing) desc = tf.EagerOp("SdcaFprint") + input_ = convert(tf.TensorHandle, input_) tf.add_input(desc, input_) res = tf.execute(desc) node = tf.TapeNode(sdca_fprint, [input_], name=nothing) @@ -21146,6 +22049,7 @@ begin end function experimental_choose_fastest_dataset_eager(input_datasets_; name=nothing, N=nothing, num_experiments=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("ExperimentalChooseFastestDataset") + input_datasets_ = convert(tf.TensorHandle, input_datasets_) tf.add_input(desc, input_datasets_) if N !== nothing desc["N"] = Base.Int(N) @@ -21195,6 +22099,7 @@ begin end function leaky_relu_eager(features_; name=nothing, alpha=nothing) desc = tf.EagerOp("LeakyRelu") + features_ = convert(tf.TensorHandle, features_) tf.add_input(desc, features_) if alpha !== nothing desc["alpha"] = Base.identity(alpha) @@ -21235,6 +22140,7 @@ begin end function identity_n_eager(input_; name=nothing, T=nothing) desc = tf.EagerOp("IdentityN") + input_ = convert(tf.TensorHandle, input_) tf.add_input(desc, input_) if T !== nothing desc["T"] = map(Base.identity, T) @@ -21255,7 +22161,7 @@ end """ - cudnn_rnn_backprop_v2(input, input_h, input_c, params, output, output_h, output_c, output_backprop, output_h_backprop, output_c_backprop, reserve_space, host_reserved; rnn_mode=, input_mode=, direction=, dropout=?, seed=0, seed2=0) + cudnn_rnn_backprop_v2(input, input_h, input_c, params, output, output_h, output_c, output_backprop, output_h_backprop, output_c_backprop, reserve_space, host_reserved; rnn_mode=lstm, input_mode=linear_input, direction=unidirectional, dropout=?, seed=0, seed2=0) """ @@ -21317,6 +22223,18 @@ begin end function cudnn_rnn_backprop_v2_eager(input_, input_h_, input_c_, params_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_, host_reserved_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) desc = tf.EagerOp("CudnnRNNBackpropV2") + input_ = convert(tf.TensorHandle, input_) + input_h_ = convert(tf.TensorHandle, input_h_) + input_c_ = convert(tf.TensorHandle, input_c_) + params_ = convert(tf.TensorHandle, params_) + output_ = convert(tf.TensorHandle, output_) + output_h_ = convert(tf.TensorHandle, output_h_) + output_c_ = convert(tf.TensorHandle, output_c_) + output_backprop_ = convert(tf.TensorHandle, output_backprop_) + output_h_backprop_ = convert(tf.TensorHandle, output_h_backprop_) + output_c_backprop_ = convert(tf.TensorHandle, output_c_backprop_) + reserve_space_ = convert(tf.TensorHandle, reserve_space_) + host_reserved_ = convert(tf.TensorHandle, host_reserved_) tf.add_input(desc, input_) tf.add_input(desc, input_h_) tf.add_input(desc, input_c_) @@ -21400,6 +22318,9 @@ begin end function requantization_range_eager(input_, input_min_, input_max_; name=nothing) desc = tf.EagerOp("RequantizationRange") + input_ = convert(tf.TensorHandle, input_) + input_min_ = convert(tf.TensorHandle, input_min_) + input_max_ = convert(tf.TensorHandle, input_max_) tf.add_input(desc, input_) tf.add_input(desc, input_min_) tf.add_input(desc, input_max_) @@ -21439,6 +22360,8 @@ begin end function maximum_eager(x_, y_; name=nothing) desc = tf.EagerOp("Maximum") + x_ = convert(tf.TensorHandle, x_) + y_ = convert(tf.TensorHandle, y_) tf.add_input(desc, x_) tf.add_input(desc, y_) desc["T"] = tf.data_type(x_) @@ -21479,6 +22402,8 @@ begin end function reshape_eager(tensor_, shape_; name=nothing) desc = tf.EagerOp("Reshape") + tensor_ = convert(tf.TensorHandle, tensor_) + shape_ = convert(tf.TensorHandle, shape_) tf.add_input(desc, tensor_) tf.add_input(desc, shape_) desc["T"] = tf.data_type(tensor_) @@ -21523,6 +22448,9 @@ begin end function matrix_solve_ls_eager(matrix_, rhs_, l2_regularizer_; name=nothing, fast=nothing) desc = tf.EagerOp("MatrixSolveLs") + matrix_ = convert(tf.TensorHandle, matrix_) + rhs_ = convert(tf.TensorHandle, rhs_) + l2_regularizer_ = convert(tf.TensorHandle, l2_regularizer_) tf.add_input(desc, matrix_) tf.add_input(desc, rhs_) tf.add_input(desc, l2_regularizer_) @@ -21567,6 +22495,9 @@ begin end function tf_record_dataset_eager(filenames_, compression_type_, buffer_size_; name=nothing) desc = tf.EagerOp("TFRecordDataset") + filenames_ = convert(tf.TensorHandle, filenames_) + compression_type_ = convert(tf.TensorHandle, compression_type_) + buffer_size_ = convert(tf.TensorHandle, buffer_size_) tf.add_input(desc, filenames_) tf.add_input(desc, compression_type_) tf.add_input(desc, buffer_size_) @@ -21610,6 +22541,8 @@ begin end function boosted_trees_example_debug_outputs_eager(tree_ensemble_handle_, bucketized_features_; name=nothing, num_bucketized_features=nothing, logits_dimension=nothing) desc = tf.EagerOp("BoostedTreesExampleDebugOutputs") + tree_ensemble_handle_ = convert(tf.TensorHandle, tree_ensemble_handle_) + bucketized_features_ = convert(tf.TensorHandle, bucketized_features_) tf.add_input(desc, tree_ensemble_handle_) tf.add_input(desc, bucketized_features_) if num_bucketized_features !== nothing @@ -21651,6 +22584,7 @@ begin end function hsv_to_rgb_eager(images_; name=nothing) desc = tf.EagerOp("HSVToRGB") + images_ = convert(tf.TensorHandle, images_) tf.add_input(desc, images_) desc["T"] = tf.data_type(images_) res = tf.execute(desc) @@ -21693,6 +22627,8 @@ begin end function experimental_max_intra_op_parallelism_dataset_eager(input_dataset_, max_intra_op_parallelism_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("ExperimentalMaxIntraOpParallelismDataset") + input_dataset_ = convert(tf.TensorHandle, input_dataset_) + max_intra_op_parallelism_ = convert(tf.TensorHandle, max_intra_op_parallelism_) tf.add_input(desc, input_dataset_) tf.add_input(desc, max_intra_op_parallelism_) if output_types !== nothing @@ -21743,6 +22679,9 @@ begin end function scatter_div_eager(ref_, indices_, updates_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ScatterDiv") + ref_ = convert(tf.TensorHandle, ref_) + indices_ = convert(tf.TensorHandle, indices_) + updates_ = convert(tf.TensorHandle, updates_) tf.add_input(desc, ref_) tf.add_input(desc, indices_) tf.add_input(desc, updates_) @@ -21795,6 +22734,7 @@ begin end function decode_wav_eager(contents_; name=nothing, desired_channels=nothing, desired_samples=nothing) desc = tf.EagerOp("DecodeWav") + contents_ = convert(tf.TensorHandle, contents_) tf.add_input(desc, contents_) if desired_channels !== nothing desc["desired_channels"] = Base.Int(desired_channels) @@ -21835,6 +22775,7 @@ begin end function log_eager(x_; name=nothing) desc = tf.EagerOp("Log") + x_ = convert(tf.TensorHandle, x_) tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) res = tf.execute(desc) @@ -21878,6 +22819,10 @@ begin end function save_v2_eager(prefix_, tensor_names_, shape_and_slices_, tensors_; name=nothing, dtypes=nothing) desc = tf.EagerOp("SaveV2") + prefix_ = convert(tf.TensorHandle, prefix_) + tensor_names_ = convert(tf.TensorHandle, tensor_names_) + shape_and_slices_ = convert(tf.TensorHandle, shape_and_slices_) + tensors_ = convert(tf.TensorHandle, tensors_) tf.add_input(desc, prefix_) tf.add_input(desc, tensor_names_) tf.add_input(desc, shape_and_slices_) @@ -21918,6 +22863,7 @@ begin end function deep_copy_eager(x_; name=nothing) desc = tf.EagerOp("DeepCopy") + x_ = convert(tf.TensorHandle, x_) tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) res = tf.execute(desc) @@ -21958,6 +22904,7 @@ begin end function model_dataset_eager(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("ModelDataset") + input_dataset_ = convert(tf.TensorHandle, input_dataset_) tf.add_input(desc, input_dataset_) if output_types !== nothing desc["output_types"] = map(Base.identity, output_types) @@ -22051,6 +22998,9 @@ begin end function parse_sequence_example_eager(serialized_, debug_name_, context_dense_defaults_; name=nothing, feature_list_dense_missing_assumed_empty=nothing, context_sparse_keys=nothing, context_dense_keys=nothing, feature_list_sparse_keys=nothing, feature_list_dense_keys=nothing, Ncontext_sparse=nothing, Ncontext_dense=nothing, Nfeature_list_sparse=nothing, Nfeature_list_dense=nothing, context_sparse_types=nothing, Tcontext_dense=nothing, feature_list_dense_types=nothing, context_dense_shapes=nothing, feature_list_sparse_types=nothing, feature_list_dense_shapes=nothing) desc = tf.EagerOp("ParseSequenceExample") + serialized_ = convert(tf.TensorHandle, serialized_) + debug_name_ = convert(tf.TensorHandle, debug_name_) + context_dense_defaults_ = convert(tf.TensorHandle, context_dense_defaults_) tf.add_input(desc, serialized_) tf.add_input(desc, debug_name_) tf.add_input(desc, context_dense_defaults_) @@ -22132,6 +23082,7 @@ begin end function sinh_eager(x_; name=nothing) desc = tf.EagerOp("Sinh") + x_ = convert(tf.TensorHandle, x_) tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) res = tf.execute(desc) @@ -22227,6 +23178,10 @@ begin end function tensor_array_write_v2_eager(handle_, index_, value_, flow_in_; name=nothing) desc = tf.EagerOp("TensorArrayWriteV2") + handle_ = convert(tf.TensorHandle, handle_) + index_ = convert(tf.TensorHandle, index_) + value_ = convert(tf.TensorHandle, value_) + flow_in_ = convert(tf.TensorHandle, flow_in_) tf.add_input(desc, handle_) tf.add_input(desc, index_) tf.add_input(desc, value_) @@ -22267,6 +23222,7 @@ begin end function tensor_list_element_shape_eager(input_handle_; name=nothing, shape_type=nothing) desc = tf.EagerOp("TensorListElementShape") + input_handle_ = convert(tf.TensorHandle, input_handle_) tf.add_input(desc, input_handle_) if shape_type !== nothing desc["shape_type"] = Base.identity(shape_type) @@ -22303,6 +23259,7 @@ begin end function queue_size_v2_eager(handle_; name=nothing) desc = tf.EagerOp("QueueSizeV2") + handle_ = convert(tf.TensorHandle, handle_) tf.add_input(desc, handle_) res = tf.execute(desc) node = tf.TapeNode(queue_size_v2, [handle_], name=nothing) @@ -22337,6 +23294,7 @@ begin end function expm1_eager(x_; name=nothing) desc = tf.EagerOp("Expm1") + x_ = convert(tf.TensorHandle, x_) tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) res = tf.execute(desc) @@ -22376,6 +23334,9 @@ begin end function batch_matrix_band_part_eager(input_, num_lower_, num_upper_; name=nothing) desc = tf.EagerOp("BatchMatrixBandPart") + input_ = convert(tf.TensorHandle, input_) + num_lower_ = convert(tf.TensorHandle, num_lower_) + num_upper_ = convert(tf.TensorHandle, num_upper_) tf.add_input(desc, input_) tf.add_input(desc, num_lower_) tf.add_input(desc, num_upper_) @@ -22420,6 +23381,8 @@ begin end function concatenate_dataset_eager(input_dataset_, another_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("ConcatenateDataset") + input_dataset_ = convert(tf.TensorHandle, input_dataset_) + another_dataset_ = convert(tf.TensorHandle, another_dataset_) tf.add_input(desc, input_dataset_) tf.add_input(desc, another_dataset_) if output_types !== nothing @@ -22460,6 +23423,7 @@ begin end function decode_gif_eager(contents_; name=nothing) desc = tf.EagerOp("DecodeGif") + contents_ = convert(tf.TensorHandle, contents_) tf.add_input(desc, contents_) res = tf.execute(desc) node = tf.TapeNode(decode_gif, [contents_], name=nothing) @@ -22477,7 +23441,7 @@ end """ - tpu_replicate(inputs, broadcast_inputs, variables, guaranteed_constants; num_cores_per_replica=1, topology=, use_tpu=true, device_assignment=Int64[], host_compute_core=Int64[], padding_map=Int64[], step_marker_location=) + tpu_replicate(inputs, broadcast_inputs, variables, guaranteed_constants; num_cores_per_replica=1, topology=, use_tpu=true, device_assignment=Int64[], host_compute_core=Int64[], padding_map=Int64[], step_marker_location=STEP_MARK_AT_ENTRY) """ @@ -22541,6 +23505,10 @@ begin end function tpu_replicate_eager(inputs_, broadcast_inputs_, variables_, guaranteed_constants_; name=nothing, computation=nothing, num_replicas=nothing, num_cores_per_replica=nothing, topology=nothing, use_tpu=nothing, device_assignment=nothing, host_compute_core=nothing, Tinputs=nothing, Tbroadcast_inputs=nothing, NumVariables=nothing, Tguaranteed_constants=nothing, output_types=nothing, padding_map=nothing, step_marker_location=nothing) desc = tf.EagerOp("TPUReplicate") + inputs_ = convert(tf.TensorHandle, inputs_) + broadcast_inputs_ = convert(tf.TensorHandle, broadcast_inputs_) + variables_ = convert(tf.TensorHandle, variables_) + guaranteed_constants_ = convert(tf.TensorHandle, guaranteed_constants_) tf.add_input(desc, inputs_) tf.add_input(desc, broadcast_inputs_) tf.add_input(desc, variables_) @@ -22628,6 +23596,7 @@ begin end function batch_self_adjoint_eig_v2_eager(input_; name=nothing, compute_v=nothing) desc = tf.EagerOp("BatchSelfAdjointEigV2") + input_ = convert(tf.TensorHandle, input_) tf.add_input(desc, input_) if compute_v !== nothing desc["compute_v"] = Base.Bool(compute_v) @@ -22669,6 +23638,7 @@ begin end function shape_eager(input_; name=nothing, out_type=nothing) desc = tf.EagerOp("Shape") + input_ = convert(tf.TensorHandle, input_) tf.add_input(desc, input_) if out_type !== nothing desc["out_type"] = Base.identity(out_type) @@ -22714,6 +23684,8 @@ begin end function repeat_dataset_eager(input_dataset_, count_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("RepeatDataset") + input_dataset_ = convert(tf.TensorHandle, input_dataset_) + count_ = convert(tf.TensorHandle, count_) tf.add_input(desc, input_dataset_) tf.add_input(desc, count_) if output_types !== nothing @@ -22738,7 +23710,7 @@ end """ - crop_and_resize_grad_boxes(grads, image, boxes, box_ind; method=) + crop_and_resize_grad_boxes(grads, image, boxes, box_ind; method=bilinear) """ @@ -22764,6 +23736,10 @@ begin end function crop_and_resize_grad_boxes_eager(grads_, image_, boxes_, box_ind_; name=nothing, method=nothing) desc = tf.EagerOp("CropAndResizeGradBoxes") + grads_ = convert(tf.TensorHandle, grads_) + image_ = convert(tf.TensorHandle, image_) + boxes_ = convert(tf.TensorHandle, boxes_) + box_ind_ = convert(tf.TensorHandle, box_ind_) tf.add_input(desc, grads_) tf.add_input(desc, image_) tf.add_input(desc, boxes_) @@ -22807,6 +23783,8 @@ begin end function reciprocal_grad_eager(y_, dy_; name=nothing) desc = tf.EagerOp("ReciprocalGrad") + y_ = convert(tf.TensorHandle, y_) + dy_ = convert(tf.TensorHandle, dy_) tf.add_input(desc, y_) tf.add_input(desc, dy_) desc["T"] = tf.data_type(y_) @@ -22849,6 +23827,8 @@ begin end function batch_matrix_solve_eager(matrix_, rhs_; name=nothing, adjoint=nothing) desc = tf.EagerOp("BatchMatrixSolve") + matrix_ = convert(tf.TensorHandle, matrix_) + rhs_ = convert(tf.TensorHandle, rhs_) tf.add_input(desc, matrix_) tf.add_input(desc, rhs_) if adjoint !== nothing @@ -22949,6 +23929,7 @@ begin end function exit_eager(data_; name=nothing) desc = tf.EagerOp("Exit") + data_ = convert(tf.TensorHandle, data_) tf.add_input(desc, data_) desc["T"] = tf.data_type(data_) res = tf.execute(desc) @@ -22996,6 +23977,7 @@ begin end function lrn_eager(input_; name=nothing, depth_radius=nothing, bias=nothing, alpha=nothing, beta=nothing) desc = tf.EagerOp("LRN") + input_ = convert(tf.TensorHandle, input_) tf.add_input(desc, input_) if depth_radius !== nothing desc["depth_radius"] = Base.Int(depth_radius) @@ -23057,6 +24039,8 @@ begin end function stateless_if_eager(cond_, input_; name=nothing, Tin=nothing, Tout=nothing, then_branch=nothing, else_branch=nothing) desc = tf.EagerOp("StatelessIf") + cond_ = convert(tf.TensorHandle, cond_) + input_ = convert(tf.TensorHandle, input_) tf.add_input(desc, cond_) tf.add_input(desc, input_) if Tin !== nothing @@ -23112,6 +24096,9 @@ begin end function tensor_list_set_item_eager(input_handle_, index_, item_; name=nothing, element_dtype=nothing) desc = tf.EagerOp("TensorListSetItem") + input_handle_ = convert(tf.TensorHandle, input_handle_) + index_ = convert(tf.TensorHandle, index_) + item_ = convert(tf.TensorHandle, item_) tf.add_input(desc, input_handle_) tf.add_input(desc, index_) tf.add_input(desc, item_) @@ -23152,6 +24139,7 @@ begin end function rsqrt_eager(x_; name=nothing) desc = tf.EagerOp("Rsqrt") + x_ = convert(tf.TensorHandle, x_) tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) res = tf.execute(desc) @@ -23229,6 +24217,18 @@ begin end function quantized_conv2d_with_bias_sum_and_relu_and_requantize_eager(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_, summand_, min_summand_, max_summand_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) desc = tf.EagerOp("QuantizedConv2DWithBiasSumAndReluAndRequantize") + input_ = convert(tf.TensorHandle, input_) + filter_ = convert(tf.TensorHandle, filter_) + bias_ = convert(tf.TensorHandle, bias_) + min_input_ = convert(tf.TensorHandle, min_input_) + max_input_ = convert(tf.TensorHandle, max_input_) + min_filter_ = convert(tf.TensorHandle, min_filter_) + max_filter_ = convert(tf.TensorHandle, max_filter_) + min_freezed_output_ = convert(tf.TensorHandle, min_freezed_output_) + max_freezed_output_ = convert(tf.TensorHandle, max_freezed_output_) + summand_ = convert(tf.TensorHandle, summand_) + min_summand_ = convert(tf.TensorHandle, min_summand_) + max_summand_ = convert(tf.TensorHandle, max_summand_) tf.add_input(desc, input_) tf.add_input(desc, filter_) tf.add_input(desc, bias_) @@ -23289,6 +24289,7 @@ begin end function delete_session_tensor_eager(handle_; name=nothing) desc = tf.EagerOp("DeleteSessionTensor") + handle_ = convert(tf.TensorHandle, handle_) tf.add_input(desc, handle_) res = tf.execute(desc) node = tf.TapeNode(delete_session_tensor, [handle_], name=nothing) @@ -23337,6 +24338,10 @@ begin end function one_hot_eager(indices_, depth_, on_value_, off_value_; name=nothing, axis=nothing) desc = tf.EagerOp("OneHot") + indices_ = convert(tf.TensorHandle, indices_) + depth_ = convert(tf.TensorHandle, depth_) + on_value_ = convert(tf.TensorHandle, on_value_) + off_value_ = convert(tf.TensorHandle, off_value_) tf.add_input(desc, indices_) tf.add_input(desc, depth_) tf.add_input(desc, on_value_) @@ -23400,6 +24405,14 @@ begin end function resource_apply_ftrl_eager(var_, accum_, linear_, grad_, lr_, l1_, l2_, lr_power_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ResourceApplyFtrl") + var_ = convert(tf.TensorHandle, var_) + accum_ = convert(tf.TensorHandle, accum_) + linear_ = convert(tf.TensorHandle, linear_) + grad_ = convert(tf.TensorHandle, grad_) + lr_ = convert(tf.TensorHandle, lr_) + l1_ = convert(tf.TensorHandle, l1_) + l2_ = convert(tf.TensorHandle, l2_) + lr_power_ = convert(tf.TensorHandle, lr_power_) tf.add_input(desc, var_) tf.add_input(desc, accum_) tf.add_input(desc, linear_) @@ -23498,6 +24511,16 @@ begin end function sdca_optimizer_v2_eager(sparse_example_indices_, sparse_feature_indices_, sparse_feature_values_, dense_features_, example_weights_, example_labels_, sparse_indices_, sparse_weights_, dense_weights_, example_state_data_; name=nothing, loss_type=nothing, adaptive=nothing, num_sparse_features=nothing, num_sparse_features_with_values=nothing, num_dense_features=nothing, l1=nothing, l2=nothing, num_loss_partitions=nothing, num_inner_iterations=nothing) desc = tf.EagerOp("SdcaOptimizerV2") + sparse_example_indices_ = convert(tf.TensorHandle, sparse_example_indices_) + sparse_feature_indices_ = convert(tf.TensorHandle, sparse_feature_indices_) + sparse_feature_values_ = convert(tf.TensorHandle, sparse_feature_values_) + dense_features_ = convert(tf.TensorHandle, dense_features_) + example_weights_ = convert(tf.TensorHandle, example_weights_) + example_labels_ = convert(tf.TensorHandle, example_labels_) + sparse_indices_ = convert(tf.TensorHandle, sparse_indices_) + sparse_weights_ = convert(tf.TensorHandle, sparse_weights_) + dense_weights_ = convert(tf.TensorHandle, dense_weights_) + example_state_data_ = convert(tf.TensorHandle, example_state_data_) tf.add_input(desc, sparse_example_indices_) tf.add_input(desc, sparse_feature_indices_) tf.add_input(desc, sparse_feature_values_) @@ -23575,6 +24598,8 @@ begin end function queue_enqueue_eager(handle_, components_; name=nothing, Tcomponents=nothing, timeout_ms=nothing) desc = tf.EagerOp("QueueEnqueue") + handle_ = convert(tf.TensorHandle, handle_) + components_ = convert(tf.TensorHandle, components_) tf.add_input(desc, handle_) tf.add_input(desc, components_) if Tcomponents !== nothing @@ -23599,7 +24624,7 @@ end """ - conditional_accumulator(; container=, shared_name=, reduction_type=) + conditional_accumulator(; container=, shared_name=, reduction_type=MEAN) """ @@ -23691,6 +24716,8 @@ begin end function ctc_beam_search_decoder_eager(inputs_, sequence_length_; name=nothing, beam_width=nothing, top_paths=nothing, merge_repeated=nothing) desc = tf.EagerOp("CTCBeamSearchDecoder") + inputs_ = convert(tf.TensorHandle, inputs_) + sequence_length_ = convert(tf.TensorHandle, sequence_length_) tf.add_input(desc, inputs_) tf.add_input(desc, sequence_length_) if beam_width !== nothing @@ -23794,6 +24821,14 @@ begin end function apply_rms_prop_eager(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ApplyRMSProp") + var_ = convert(tf.TensorHandle, var_) + ms_ = convert(tf.TensorHandle, ms_) + mom_ = convert(tf.TensorHandle, mom_) + lr_ = convert(tf.TensorHandle, lr_) + rho_ = convert(tf.TensorHandle, rho_) + momentum_ = convert(tf.TensorHandle, momentum_) + epsilon_ = convert(tf.TensorHandle, epsilon_) + grad_ = convert(tf.TensorHandle, grad_) tf.add_input(desc, var_) tf.add_input(desc, ms_) tf.add_input(desc, mom_) @@ -23848,6 +24883,8 @@ begin end function adjust_saturation_eager(images_, scale_; name=nothing) desc = tf.EagerOp("AdjustSaturation") + images_ = convert(tf.TensorHandle, images_) + scale_ = convert(tf.TensorHandle, scale_) tf.add_input(desc, images_) tf.add_input(desc, scale_) desc["T"] = tf.data_type(images_) @@ -23886,6 +24923,8 @@ begin end function lookup_table_remove_v2_eager(table_handle_, keys_; name=nothing) desc = tf.EagerOp("LookupTableRemoveV2") + table_handle_ = convert(tf.TensorHandle, table_handle_) + keys_ = convert(tf.TensorHandle, keys_) tf.add_input(desc, table_handle_) tf.add_input(desc, keys_) desc["Tin"] = tf.data_type(keys_) @@ -23924,6 +24963,7 @@ begin end function queue_close_eager(handle_; name=nothing, cancel_pending_enqueues=nothing) desc = tf.EagerOp("QueueClose") + handle_ = convert(tf.TensorHandle, handle_) tf.add_input(desc, handle_) if cancel_pending_enqueues !== nothing desc["cancel_pending_enqueues"] = Base.Bool(cancel_pending_enqueues) @@ -23968,6 +25008,8 @@ begin end function prefetch_dataset_eager(input_dataset_, buffer_size_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("PrefetchDataset") + input_dataset_ = convert(tf.TensorHandle, input_dataset_) + buffer_size_ = convert(tf.TensorHandle, buffer_size_) tf.add_input(desc, input_dataset_) tf.add_input(desc, buffer_size_) if output_types !== nothing @@ -24028,6 +25070,8 @@ begin end function map_dataset_eager(input_dataset_, other_arguments_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing, preserve_cardinality=nothing) desc = tf.EagerOp("MapDataset") + input_dataset_ = convert(tf.TensorHandle, input_dataset_) + other_arguments_ = convert(tf.TensorHandle, other_arguments_) tf.add_input(desc, input_dataset_) tf.add_input(desc, other_arguments_) if f !== nothing @@ -24111,6 +25155,13 @@ begin end function quantized_conv2d_with_bias_eager(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) desc = tf.EagerOp("QuantizedConv2DWithBias") + input_ = convert(tf.TensorHandle, input_) + filter_ = convert(tf.TensorHandle, filter_) + bias_ = convert(tf.TensorHandle, bias_) + min_input_ = convert(tf.TensorHandle, min_input_) + max_input_ = convert(tf.TensorHandle, max_input_) + min_filter_ = convert(tf.TensorHandle, min_filter_) + max_filter_ = convert(tf.TensorHandle, max_filter_) tf.add_input(desc, input_) tf.add_input(desc, filter_) tf.add_input(desc, bias_) @@ -24171,6 +25222,9 @@ begin end function tensor_array_read_v3_eager(handle_, index_, flow_in_; name=nothing, dtype=nothing) desc = tf.EagerOp("TensorArrayReadV3") + handle_ = convert(tf.TensorHandle, handle_) + index_ = convert(tf.TensorHandle, index_) + flow_in_ = convert(tf.TensorHandle, flow_in_) tf.add_input(desc, handle_) tf.add_input(desc, index_) tf.add_input(desc, flow_in_) @@ -24210,6 +25264,7 @@ begin end function identity_eager(input_; name=nothing) desc = tf.EagerOp("Identity") + input_ = convert(tf.TensorHandle, input_) tf.add_input(desc, input_) desc["T"] = tf.data_type(input_) res = tf.execute(desc) @@ -24259,6 +25314,8 @@ begin end function print_eager(input_, data_; name=nothing, U=nothing, message=nothing, first_n=nothing, summarize=nothing) desc = tf.EagerOp("Print") + input_ = convert(tf.TensorHandle, input_) + data_ = convert(tf.TensorHandle, data_) tf.add_input(desc, input_) tf.add_input(desc, data_) if U !== nothing @@ -24319,6 +25376,7 @@ begin end function collective_bcast_send_eager(input_; name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, shape=nothing) desc = tf.EagerOp("CollectiveBcastSend") + input_ = convert(tf.TensorHandle, input_) tf.add_input(desc, input_) if group_size !== nothing desc["group_size"] = Base.Int(group_size) @@ -24376,6 +25434,7 @@ begin end function _list_to_array_eager(input_; name=nothing, Tin=nothing, N=nothing) desc = tf.EagerOp("_ListToArray") + input_ = convert(tf.TensorHandle, input_) tf.add_input(desc, input_) if Tin !== nothing desc["Tin"] = map(Base.identity, Tin) @@ -24429,6 +25488,11 @@ begin end function neg_train_eager(w_in_, w_out_, examples_, labels_, lr_; name=nothing, vocab_count=nothing, num_negative_samples=nothing) desc = tf.EagerOp("NegTrain") + w_in_ = convert(tf.TensorHandle, w_in_) + w_out_ = convert(tf.TensorHandle, w_out_) + examples_ = convert(tf.TensorHandle, examples_) + labels_ = convert(tf.TensorHandle, labels_) + lr_ = convert(tf.TensorHandle, lr_) tf.add_input(desc, w_in_) tf.add_input(desc, w_out_) tf.add_input(desc, examples_) @@ -24472,6 +25536,7 @@ begin end function worker_heartbeat_eager(request_; name=nothing) desc = tf.EagerOp("WorkerHeartbeat") + request_ = convert(tf.TensorHandle, request_) tf.add_input(desc, request_) res = tf.execute(desc) node = tf.TapeNode(worker_heartbeat, [request_], name=nothing) @@ -24510,6 +25575,8 @@ begin end function merge_v2checkpoints_eager(checkpoint_prefixes_, destination_prefix_; name=nothing, delete_old_dirs=nothing) desc = tf.EagerOp("MergeV2Checkpoints") + checkpoint_prefixes_ = convert(tf.TensorHandle, checkpoint_prefixes_) + destination_prefix_ = convert(tf.TensorHandle, destination_prefix_) tf.add_input(desc, checkpoint_prefixes_) tf.add_input(desc, destination_prefix_) if delete_old_dirs !== nothing @@ -24550,6 +25617,8 @@ begin end function collective_permute_eager(input_, source_target_pairs_; name=nothing) desc = tf.EagerOp("CollectivePermute") + input_ = convert(tf.TensorHandle, input_) + source_target_pairs_ = convert(tf.TensorHandle, source_target_pairs_) tf.add_input(desc, input_) tf.add_input(desc, source_target_pairs_) desc["T"] = tf.data_type(input_) @@ -24598,6 +25667,10 @@ begin end function quantize_and_dequantize_v3_eager(input_, input_min_, input_max_, num_bits_; name=nothing, signed_input=nothing, range_given=nothing) desc = tf.EagerOp("QuantizeAndDequantizeV3") + input_ = convert(tf.TensorHandle, input_) + input_min_ = convert(tf.TensorHandle, input_min_) + input_max_ = convert(tf.TensorHandle, input_max_) + num_bits_ = convert(tf.TensorHandle, num_bits_) tf.add_input(desc, input_) tf.add_input(desc, input_min_) tf.add_input(desc, input_max_) @@ -24706,6 +25779,8 @@ begin end function softplus_grad_eager(gradients_, features_; name=nothing) desc = tf.EagerOp("SoftplusGrad") + gradients_ = convert(tf.TensorHandle, gradients_) + features_ = convert(tf.TensorHandle, features_) tf.add_input(desc, gradients_) tf.add_input(desc, features_) desc["T"] = tf.data_type(gradients_) @@ -24815,6 +25890,10 @@ begin end function tensor_array_scatter_v2_eager(handle_, indices_, value_, flow_in_; name=nothing) desc = tf.EagerOp("TensorArrayScatterV2") + handle_ = convert(tf.TensorHandle, handle_) + indices_ = convert(tf.TensorHandle, indices_) + value_ = convert(tf.TensorHandle, value_) + flow_in_ = convert(tf.TensorHandle, flow_in_) tf.add_input(desc, handle_) tf.add_input(desc, indices_) tf.add_input(desc, value_) @@ -24852,6 +25931,7 @@ begin end function decode_json_example_eager(json_examples_; name=nothing) desc = tf.EagerOp("DecodeJSONExample") + json_examples_ = convert(tf.TensorHandle, json_examples_) tf.add_input(desc, json_examples_) res = tf.execute(desc) node = tf.TapeNode(decode_json_example, [json_examples_], name=nothing) @@ -24869,7 +25949,7 @@ end """ - fused_batch_norm_grad_v2(y_backprop, x, scale, reserve_space_1, reserve_space_2; epsilon=?, data_format=, is_training=true) + fused_batch_norm_grad_v2(y_backprop, x, scale, reserve_space_1, reserve_space_2; epsilon=?, data_format=NHWC, is_training=true) """ @@ -24912,6 +25992,11 @@ begin end function fused_batch_norm_grad_v2_eager(y_backprop_, x_, scale_, reserve_space_1_, reserve_space_2_; name=nothing, U=nothing, epsilon=nothing, data_format=nothing, is_training=nothing) desc = tf.EagerOp("FusedBatchNormGradV2") + y_backprop_ = convert(tf.TensorHandle, y_backprop_) + x_ = convert(tf.TensorHandle, x_) + scale_ = convert(tf.TensorHandle, scale_) + reserve_space_1_ = convert(tf.TensorHandle, reserve_space_1_) + reserve_space_2_ = convert(tf.TensorHandle, reserve_space_2_) tf.add_input(desc, y_backprop_) tf.add_input(desc, x_) tf.add_input(desc, scale_) @@ -24975,6 +26060,7 @@ begin end function _host_cast_eager(x_; name=nothing, SrcT=nothing, DstT=nothing, Truncate=nothing) desc = tf.EagerOp("_HostCast") + x_ = convert(tf.TensorHandle, x_) tf.add_input(desc, x_) if SrcT !== nothing desc["SrcT"] = Base.identity(SrcT) @@ -25081,6 +26167,7 @@ begin end function while__eager(input_; name=nothing, T=nothing, cond=nothing, body=nothing, output_shapes=nothing, parallel_iterations=nothing) desc = tf.EagerOp("While") + input_ = convert(tf.TensorHandle, input_) tf.add_input(desc, input_) if T !== nothing desc["T"] = map(Base.identity, T) @@ -25138,6 +26225,9 @@ begin end function stateless_multinomial_eager(logits_, num_samples_, seed_; name=nothing, output_dtype=nothing) desc = tf.EagerOp("StatelessMultinomial") + logits_ = convert(tf.TensorHandle, logits_) + num_samples_ = convert(tf.TensorHandle, num_samples_) + seed_ = convert(tf.TensorHandle, seed_) tf.add_input(desc, logits_) tf.add_input(desc, num_samples_) tf.add_input(desc, seed_) @@ -25188,6 +26278,9 @@ begin end function scatter_add_eager(ref_, indices_, updates_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ScatterAdd") + ref_ = convert(tf.TensorHandle, ref_) + indices_ = convert(tf.TensorHandle, indices_) + updates_ = convert(tf.TensorHandle, updates_) tf.add_input(desc, ref_) tf.add_input(desc, indices_) tf.add_input(desc, updates_) @@ -25230,6 +26323,7 @@ begin end function conj_eager(input_; name=nothing) desc = tf.EagerOp("Conj") + input_ = convert(tf.TensorHandle, input_) tf.add_input(desc, input_) desc["T"] = tf.data_type(input_) res = tf.execute(desc) @@ -25270,6 +26364,8 @@ begin end function parallel_dynamic_stitch_eager(indices_, data_; name=nothing, N=nothing) desc = tf.EagerOp("ParallelDynamicStitch") + indices_ = convert(tf.TensorHandle, indices_) + data_ = convert(tf.TensorHandle, data_) tf.add_input(desc, indices_) tf.add_input(desc, data_) if N !== nothing @@ -25310,6 +26406,8 @@ begin end function make_iterator_eager(dataset_, iterator_; name=nothing) desc = tf.EagerOp("MakeIterator") + dataset_ = convert(tf.TensorHandle, dataset_) + iterator_ = convert(tf.TensorHandle, iterator_) tf.add_input(desc, dataset_) tf.add_input(desc, iterator_) res = tf.execute(desc) @@ -25346,6 +26444,8 @@ begin end function rfft3d_eager(input_, fft_length_; name=nothing) desc = tf.EagerOp("RFFT3D") + input_ = convert(tf.TensorHandle, input_) + fft_length_ = convert(tf.TensorHandle, fft_length_) tf.add_input(desc, input_) tf.add_input(desc, fft_length_) res = tf.execute(desc) @@ -25395,6 +26495,10 @@ begin end function sparse_reduce_sum_sparse_eager(input_indices_, input_values_, input_shape_, reduction_axes_; name=nothing, keep_dims=nothing) desc = tf.EagerOp("SparseReduceSumSparse") + input_indices_ = convert(tf.TensorHandle, input_indices_) + input_values_ = convert(tf.TensorHandle, input_values_) + input_shape_ = convert(tf.TensorHandle, input_shape_) + reduction_axes_ = convert(tf.TensorHandle, reduction_axes_) tf.add_input(desc, input_indices_) tf.add_input(desc, input_values_) tf.add_input(desc, input_shape_) @@ -25448,6 +26552,7 @@ begin end function collective_gather_eager(input_; name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, shape=nothing) desc = tf.EagerOp("CollectiveGather") + input_ = convert(tf.TensorHandle, input_) tf.add_input(desc, input_) if group_size !== nothing desc["group_size"] = Base.Int(group_size) @@ -25512,6 +26617,12 @@ begin end function combined_non_max_suppression_eager(boxes_, scores_, max_output_size_per_class_, max_total_size_, iou_threshold_, score_threshold_; name=nothing, pad_per_class=nothing) desc = tf.EagerOp("CombinedNonMaxSuppression") + boxes_ = convert(tf.TensorHandle, boxes_) + scores_ = convert(tf.TensorHandle, scores_) + max_output_size_per_class_ = convert(tf.TensorHandle, max_output_size_per_class_) + max_total_size_ = convert(tf.TensorHandle, max_total_size_) + iou_threshold_ = convert(tf.TensorHandle, iou_threshold_) + score_threshold_ = convert(tf.TensorHandle, score_threshold_) tf.add_input(desc, boxes_) tf.add_input(desc, scores_) tf.add_input(desc, max_output_size_per_class_) @@ -25629,6 +26740,9 @@ begin end function load_tpu_embedding_adadelta_parameters_eager(parameters_, accumulators_, updates_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) desc = tf.EagerOp("LoadTPUEmbeddingAdadeltaParameters") + parameters_ = convert(tf.TensorHandle, parameters_) + accumulators_ = convert(tf.TensorHandle, accumulators_) + updates_ = convert(tf.TensorHandle, updates_) tf.add_input(desc, parameters_) tf.add_input(desc, accumulators_) tf.add_input(desc, updates_) @@ -25695,6 +26809,13 @@ begin end function sparse_add_eager(a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_, thresh_; name=nothing) desc = tf.EagerOp("SparseAdd") + a_indices_ = convert(tf.TensorHandle, a_indices_) + a_values_ = convert(tf.TensorHandle, a_values_) + a_shape_ = convert(tf.TensorHandle, a_shape_) + b_indices_ = convert(tf.TensorHandle, b_indices_) + b_values_ = convert(tf.TensorHandle, b_values_) + b_shape_ = convert(tf.TensorHandle, b_shape_) + thresh_ = convert(tf.TensorHandle, thresh_) tf.add_input(desc, a_indices_) tf.add_input(desc, a_values_) tf.add_input(desc, a_shape_) @@ -25747,6 +26868,8 @@ begin end function ctc_greedy_decoder_eager(inputs_, sequence_length_; name=nothing, merge_repeated=nothing) desc = tf.EagerOp("CTCGreedyDecoder") + inputs_ = convert(tf.TensorHandle, inputs_) + sequence_length_ = convert(tf.TensorHandle, sequence_length_) tf.add_input(desc, inputs_) tf.add_input(desc, sequence_length_) if merge_repeated !== nothing @@ -25832,6 +26955,7 @@ begin end function consume_mutex_lock_eager(mutex_lock_; name=nothing) desc = tf.EagerOp("ConsumeMutexLock") + mutex_lock_ = convert(tf.TensorHandle, mutex_lock_) tf.add_input(desc, mutex_lock_) res = tf.execute(desc) node = tf.TapeNode(consume_mutex_lock, [mutex_lock_], name=nothing) @@ -25868,6 +26992,8 @@ begin end function greater_equal_eager(x_, y_; name=nothing) desc = tf.EagerOp("GreaterEqual") + x_ = convert(tf.TensorHandle, x_) + y_ = convert(tf.TensorHandle, y_) tf.add_input(desc, x_) tf.add_input(desc, y_) desc["T"] = tf.data_type(x_) @@ -25888,7 +27014,7 @@ end """ - initialize_table_from_text_file_v2(table_handle, filename; vocab_size=-1, delimiter=) + initialize_table_from_text_file_v2(table_handle, filename; vocab_size=-1, delimiter= ) """ @@ -25918,6 +27044,8 @@ begin end function initialize_table_from_text_file_v2_eager(table_handle_, filename_; name=nothing, key_index=nothing, value_index=nothing, vocab_size=nothing, delimiter=nothing) desc = tf.EagerOp("InitializeTableFromTextFileV2") + table_handle_ = convert(tf.TensorHandle, table_handle_) + filename_ = convert(tf.TensorHandle, filename_) tf.add_input(desc, table_handle_) tf.add_input(desc, filename_) if key_index !== nothing @@ -25970,6 +27098,7 @@ begin end function queue_dequeue_eager(handle_; name=nothing, component_types=nothing, timeout_ms=nothing) desc = tf.EagerOp("QueueDequeue") + handle_ = convert(tf.TensorHandle, handle_) tf.add_input(desc, handle_) if component_types !== nothing desc["component_types"] = map(Base.identity, component_types) @@ -26012,6 +27141,8 @@ begin end function equal_eager(x_, y_; name=nothing) desc = tf.EagerOp("Equal") + x_ = convert(tf.TensorHandle, x_) + y_ = convert(tf.TensorHandle, y_) tf.add_input(desc, x_) tf.add_input(desc, y_) desc["T"] = tf.data_type(x_) @@ -26054,6 +27185,7 @@ begin end function iterator_from_string_handle_eager(string_handle_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("IteratorFromStringHandle") + string_handle_ = convert(tf.TensorHandle, string_handle_) tf.add_input(desc, string_handle_) if output_types !== nothing desc["output_types"] = map(Base.identity, output_types) @@ -26105,6 +27237,9 @@ begin end function tensor_list_split_eager(tensor_, element_shape_, lengths_; name=nothing, element_dtype=nothing, shape_type=nothing) desc = tf.EagerOp("TensorListSplit") + tensor_ = convert(tf.TensorHandle, tensor_) + element_shape_ = convert(tf.TensorHandle, element_shape_) + lengths_ = convert(tf.TensorHandle, lengths_) tf.add_input(desc, tensor_) tf.add_input(desc, element_shape_) tf.add_input(desc, lengths_) @@ -26172,6 +27307,7 @@ begin end function fractional_max_pool_eager(value_; name=nothing, pooling_ratio=nothing, pseudo_random=nothing, overlapping=nothing, deterministic=nothing, seed=nothing, seed2=nothing) desc = tf.EagerOp("FractionalMaxPool") + value_ = convert(tf.TensorHandle, value_) tf.add_input(desc, value_) if pooling_ratio !== nothing desc["pooling_ratio"] = map(Base.identity, pooling_ratio) @@ -26231,6 +27367,9 @@ begin end function scatter_nd_eager(indices_, updates_, shape_; name=nothing) desc = tf.EagerOp("ScatterNd") + indices_ = convert(tf.TensorHandle, indices_) + updates_ = convert(tf.TensorHandle, updates_) + shape_ = convert(tf.TensorHandle, shape_) tf.add_input(desc, indices_) tf.add_input(desc, updates_) tf.add_input(desc, shape_) @@ -26277,6 +27416,9 @@ begin end function tensor_list_scatter_into_existing_list_eager(input_handle_, tensor_, indices_; name=nothing, element_dtype=nothing) desc = tf.EagerOp("TensorListScatterIntoExistingList") + input_handle_ = convert(tf.TensorHandle, input_handle_) + tensor_ = convert(tf.TensorHandle, tensor_) + indices_ = convert(tf.TensorHandle, indices_) tf.add_input(desc, input_handle_) tf.add_input(desc, tensor_) tf.add_input(desc, indices_) @@ -26321,6 +27463,9 @@ begin end function select_eager(condition_, t_, e_; name=nothing) desc = tf.EagerOp("Select") + condition_ = convert(tf.TensorHandle, condition_) + t_ = convert(tf.TensorHandle, t_) + e_ = convert(tf.TensorHandle, e_) tf.add_input(desc, condition_) tf.add_input(desc, t_) tf.add_input(desc, e_) @@ -26366,6 +27511,8 @@ begin end function min_eager(input_, reduction_indices_; name=nothing, keep_dims=nothing) desc = tf.EagerOp("Min") + input_ = convert(tf.TensorHandle, input_) + reduction_indices_ = convert(tf.TensorHandle, reduction_indices_) tf.add_input(desc, input_) tf.add_input(desc, reduction_indices_) if keep_dims !== nothing @@ -26422,6 +27569,9 @@ begin end function lrn_grad_eager(input_grads_, input_image_, output_image_; name=nothing, depth_radius=nothing, bias=nothing, alpha=nothing, beta=nothing) desc = tf.EagerOp("LRNGrad") + input_grads_ = convert(tf.TensorHandle, input_grads_) + input_image_ = convert(tf.TensorHandle, input_image_) + output_image_ = convert(tf.TensorHandle, output_image_) tf.add_input(desc, input_grads_) tf.add_input(desc, input_image_) tf.add_input(desc, output_image_) @@ -26491,6 +27641,8 @@ begin end function random_poisson_v2_eager(shape_, rate_; name=nothing, seed=nothing, seed2=nothing, S=nothing, R=nothing, dtype=nothing) desc = tf.EagerOp("RandomPoissonV2") + shape_ = convert(tf.TensorHandle, shape_) + rate_ = convert(tf.TensorHandle, rate_) tf.add_input(desc, shape_) tf.add_input(desc, rate_) if seed !== nothing @@ -26618,6 +27770,12 @@ begin end function resource_sparse_apply_proximal_gradient_descent_eager(var_, alpha_, l1_, l2_, grad_, indices_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ResourceSparseApplyProximalGradientDescent") + var_ = convert(tf.TensorHandle, var_) + alpha_ = convert(tf.TensorHandle, alpha_) + l1_ = convert(tf.TensorHandle, l1_) + l2_ = convert(tf.TensorHandle, l2_) + grad_ = convert(tf.TensorHandle, grad_) + indices_ = convert(tf.TensorHandle, indices_) tf.add_input(desc, var_) tf.add_input(desc, alpha_) tf.add_input(desc, l1_) @@ -26670,6 +27828,7 @@ begin end function experimental_non_serializable_dataset_eager(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("ExperimentalNonSerializableDataset") + input_dataset_ = convert(tf.TensorHandle, input_dataset_) tf.add_input(desc, input_dataset_) if output_types !== nothing desc["output_types"] = map(Base.identity, output_types) @@ -26717,6 +27876,8 @@ begin end function experimental_bytes_produced_stats_dataset_eager(input_dataset_, tag_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("ExperimentalBytesProducedStatsDataset") + input_dataset_ = convert(tf.TensorHandle, input_dataset_) + tag_ = convert(tf.TensorHandle, tag_) tf.add_input(desc, input_dataset_) tf.add_input(desc, tag_) if output_types !== nothing @@ -26771,6 +27932,9 @@ begin end function dilation2d_backprop_filter_eager(input_, filter_, out_backprop_; name=nothing, strides=nothing, rates=nothing, padding=nothing) desc = tf.EagerOp("Dilation2DBackpropFilter") + input_ = convert(tf.TensorHandle, input_) + filter_ = convert(tf.TensorHandle, filter_) + out_backprop_ = convert(tf.TensorHandle, out_backprop_) tf.add_input(desc, input_) tf.add_input(desc, filter_) tf.add_input(desc, out_backprop_) @@ -26833,6 +27997,8 @@ begin end function _if_eager(cond_, input_; name=nothing, Tin=nothing, Tout=nothing, then_branch=nothing, else_branch=nothing) desc = tf.EagerOp("_If") + cond_ = convert(tf.TensorHandle, cond_) + input_ = convert(tf.TensorHandle, input_) tf.add_input(desc, cond_) tf.add_input(desc, input_) if Tin !== nothing @@ -26864,7 +28030,7 @@ end """ - bias_add_grad(out_backprop; data_format=) + bias_add_grad(out_backprop; data_format=NHWC) """ @@ -26884,6 +28050,7 @@ begin end function bias_add_grad_eager(out_backprop_; name=nothing, data_format=nothing) desc = tf.EagerOp("BiasAddGrad") + out_backprop_ = convert(tf.TensorHandle, out_backprop_) tf.add_input(desc, out_backprop_) if data_format !== nothing desc["data_format"] = Base.String(data_format) @@ -26921,6 +28088,7 @@ begin end function reader_serialize_state_v2_eager(reader_handle_; name=nothing) desc = tf.EagerOp("ReaderSerializeStateV2") + reader_handle_ = convert(tf.TensorHandle, reader_handle_) tf.add_input(desc, reader_handle_) res = tf.execute(desc) node = tf.TapeNode(reader_serialize_state_v2, [reader_handle_], name=nothing) @@ -26954,6 +28122,7 @@ begin end function wrap_dataset_variant_eager(input_handle_; name=nothing) desc = tf.EagerOp("WrapDatasetVariant") + input_handle_ = convert(tf.TensorHandle, input_handle_) tf.add_input(desc, input_handle_) res = tf.execute(desc) node = tf.TapeNode(wrap_dataset_variant, [input_handle_], name=nothing) @@ -27010,6 +28179,11 @@ begin end function parallel_interleave_dataset_v2_eager(input_dataset_, other_arguments_, cycle_length_, block_length_, num_parallel_calls_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, sloppy=nothing) desc = tf.EagerOp("ParallelInterleaveDatasetV2") + input_dataset_ = convert(tf.TensorHandle, input_dataset_) + other_arguments_ = convert(tf.TensorHandle, other_arguments_) + cycle_length_ = convert(tf.TensorHandle, cycle_length_) + block_length_ = convert(tf.TensorHandle, block_length_) + num_parallel_calls_ = convert(tf.TensorHandle, num_parallel_calls_) tf.add_input(desc, input_dataset_) tf.add_input(desc, other_arguments_) tf.add_input(desc, cycle_length_) @@ -27046,7 +28220,7 @@ end """ - depthwise_conv2d_native_backprop_input(input_sizes, filter, out_backprop; data_format=, dilations=[1, 1, 1, 1]) + depthwise_conv2d_native_backprop_input(input_sizes, filter, out_backprop; data_format=NHWC, dilations=[1, 1, 1, 1]) """ @@ -27079,6 +28253,9 @@ begin end function depthwise_conv2d_native_backprop_input_eager(input_sizes_, filter_, out_backprop_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) desc = tf.EagerOp("DepthwiseConv2dNativeBackpropInput") + input_sizes_ = convert(tf.TensorHandle, input_sizes_) + filter_ = convert(tf.TensorHandle, filter_) + out_backprop_ = convert(tf.TensorHandle, out_backprop_) tf.add_input(desc, input_sizes_) tf.add_input(desc, filter_) tf.add_input(desc, out_backprop_) @@ -27146,6 +28323,14 @@ begin end function resource_apply_rms_prop_eager(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ResourceApplyRMSProp") + var_ = convert(tf.TensorHandle, var_) + ms_ = convert(tf.TensorHandle, ms_) + mom_ = convert(tf.TensorHandle, mom_) + lr_ = convert(tf.TensorHandle, lr_) + rho_ = convert(tf.TensorHandle, rho_) + momentum_ = convert(tf.TensorHandle, momentum_) + epsilon_ = convert(tf.TensorHandle, epsilon_) + grad_ = convert(tf.TensorHandle, grad_) tf.add_input(desc, var_) tf.add_input(desc, ms_) tf.add_input(desc, mom_) @@ -27204,6 +28389,8 @@ begin end function sparse_accumulator_take_gradient_eager(handle_, num_required_; name=nothing, dtype=nothing) desc = tf.EagerOp("SparseAccumulatorTakeGradient") + handle_ = convert(tf.TensorHandle, handle_) + num_required_ = convert(tf.TensorHandle, num_required_) tf.add_input(desc, handle_) tf.add_input(desc, num_required_) if dtype !== nothing @@ -27247,6 +28434,7 @@ begin end function experimental_lmdb_dataset_eager(filenames_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("ExperimentalLMDBDataset") + filenames_ = convert(tf.TensorHandle, filenames_) tf.add_input(desc, filenames_) if output_types !== nothing desc["output_types"] = map(Base.identity, output_types) @@ -27286,6 +28474,7 @@ begin end function stack_close_v2_eager(handle_; name=nothing) desc = tf.EagerOp("StackCloseV2") + handle_ = convert(tf.TensorHandle, handle_) tf.add_input(desc, handle_) res = tf.execute(desc) node = tf.TapeNode(stack_close_v2, [handle_], name=nothing) @@ -27397,6 +28586,14 @@ begin end function resource_apply_adagrad_da_eager(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, lr_, l1_, l2_, global_step_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ResourceApplyAdagradDA") + var_ = convert(tf.TensorHandle, var_) + gradient_accumulator_ = convert(tf.TensorHandle, gradient_accumulator_) + gradient_squared_accumulator_ = convert(tf.TensorHandle, gradient_squared_accumulator_) + grad_ = convert(tf.TensorHandle, grad_) + lr_ = convert(tf.TensorHandle, lr_) + l1_ = convert(tf.TensorHandle, l1_) + l2_ = convert(tf.TensorHandle, l2_) + global_step_ = convert(tf.TensorHandle, global_step_) tf.add_input(desc, var_) tf.add_input(desc, gradient_accumulator_) tf.add_input(desc, gradient_squared_accumulator_) @@ -27444,6 +28641,7 @@ begin end function tensor_forest_tree_size_eager(tree_handle_; name=nothing) desc = tf.EagerOp("TensorForestTreeSize") + tree_handle_ = convert(tf.TensorHandle, tree_handle_) tf.add_input(desc, tree_handle_) res = tf.execute(desc) node = tf.TapeNode(tensor_forest_tree_size, [tree_handle_], name=nothing) @@ -27478,6 +28676,7 @@ begin end function matrix_diag_part_eager(input_; name=nothing) desc = tf.EagerOp("MatrixDiagPart") + input_ = convert(tf.TensorHandle, input_) tf.add_input(desc, input_) desc["T"] = tf.data_type(input_) res = tf.execute(desc) @@ -27512,6 +28711,7 @@ begin end function reader_num_work_units_completed_v2_eager(reader_handle_; name=nothing) desc = tf.EagerOp("ReaderNumWorkUnitsCompletedV2") + reader_handle_ = convert(tf.TensorHandle, reader_handle_) tf.add_input(desc, reader_handle_) res = tf.execute(desc) node = tf.TapeNode(reader_num_work_units_completed_v2, [reader_handle_], name=nothing) @@ -27552,6 +28752,10 @@ begin end function tensor_array_split_v3_eager(handle_, value_, lengths_, flow_in_; name=nothing) desc = tf.EagerOp("TensorArraySplitV3") + handle_ = convert(tf.TensorHandle, handle_) + value_ = convert(tf.TensorHandle, value_) + lengths_ = convert(tf.TensorHandle, lengths_) + flow_in_ = convert(tf.TensorHandle, flow_in_) tf.add_input(desc, handle_) tf.add_input(desc, value_) tf.add_input(desc, lengths_) @@ -27602,6 +28806,10 @@ begin end function sparse_to_dense_eager(sparse_indices_, output_shape_, sparse_values_, default_value_; name=nothing, validate_indices=nothing) desc = tf.EagerOp("SparseToDense") + sparse_indices_ = convert(tf.TensorHandle, sparse_indices_) + output_shape_ = convert(tf.TensorHandle, output_shape_) + sparse_values_ = convert(tf.TensorHandle, sparse_values_) + default_value_ = convert(tf.TensorHandle, default_value_) tf.add_input(desc, sparse_indices_) tf.add_input(desc, output_shape_) tf.add_input(desc, sparse_values_) @@ -27649,6 +28857,7 @@ begin end function tpu_replicated_input_eager(inputs_; name=nothing, N=nothing) desc = tf.EagerOp("TPUReplicatedInput") + inputs_ = convert(tf.TensorHandle, inputs_) tf.add_input(desc, inputs_) if N !== nothing desc["N"] = Base.Int(N) @@ -27686,6 +28895,7 @@ begin end function stack_close_eager(handle_; name=nothing) desc = tf.EagerOp("StackClose") + handle_ = convert(tf.TensorHandle, handle_) tf.add_input(desc, handle_) res = tf.execute(desc) node = tf.TapeNode(stack_close, [handle_], name=nothing) @@ -27727,6 +28937,7 @@ begin end function deserialize_many_sparse_eager(serialized_sparse_; name=nothing, dtype=nothing) desc = tf.EagerOp("DeserializeManySparse") + serialized_sparse_ = convert(tf.TensorHandle, serialized_sparse_) tf.add_input(desc, serialized_sparse_) if dtype !== nothing desc["dtype"] = Base.identity(dtype) @@ -27773,6 +28984,7 @@ begin end function _nccl_reduce_recv_eager(input_; name=nothing, reduction=nothing, num_devices=nothing, shared_name=nothing) desc = tf.EagerOp("_NcclReduceRecv") + input_ = convert(tf.TensorHandle, input_) tf.add_input(desc, input_) if reduction !== nothing desc["reduction"] = Base.String(reduction) @@ -27823,6 +29035,8 @@ begin end function mirror_pad_grad_eager(input_, paddings_; name=nothing, mode=nothing) desc = tf.EagerOp("MirrorPadGrad") + input_ = convert(tf.TensorHandle, input_) + paddings_ = convert(tf.TensorHandle, paddings_) tf.add_input(desc, input_) tf.add_input(desc, paddings_) if mode !== nothing @@ -27865,6 +29079,8 @@ begin end function broadcast_args_eager(s0_, s1_; name=nothing) desc = tf.EagerOp("BroadcastArgs") + s0_ = convert(tf.TensorHandle, s0_) + s1_ = convert(tf.TensorHandle, s1_) tf.add_input(desc, s0_) tf.add_input(desc, s1_) desc["T"] = tf.data_type(s0_) @@ -27908,6 +29124,8 @@ begin end function stateless_truncated_normal_eager(shape_, seed_; name=nothing, dtype=nothing) desc = tf.EagerOp("StatelessTruncatedNormal") + shape_ = convert(tf.TensorHandle, shape_) + seed_ = convert(tf.TensorHandle, seed_) tf.add_input(desc, shape_) tf.add_input(desc, seed_) if dtype !== nothing @@ -27949,6 +29167,8 @@ begin end function regex_full_match_eager(input_, pattern_; name=nothing) desc = tf.EagerOp("RegexFullMatch") + input_ = convert(tf.TensorHandle, input_) + pattern_ = convert(tf.TensorHandle, pattern_) tf.add_input(desc, input_) tf.add_input(desc, pattern_) res = tf.execute(desc) @@ -27983,6 +29203,7 @@ begin end function unwrap_dataset_variant_eager(input_handle_; name=nothing) desc = tf.EagerOp("UnwrapDatasetVariant") + input_handle_ = convert(tf.TensorHandle, input_handle_) tf.add_input(desc, input_handle_) res = tf.execute(desc) node = tf.TapeNode(unwrap_dataset_variant, [input_handle_], name=nothing) @@ -28022,6 +29243,7 @@ begin end function empty_eager(shape_; name=nothing, dtype=nothing, init=nothing) desc = tf.EagerOp("Empty") + shape_ = convert(tf.TensorHandle, shape_) tf.add_input(desc, shape_) if dtype !== nothing desc["dtype"] = Base.identity(dtype) @@ -28112,6 +29334,8 @@ begin end function div_eager(x_, y_; name=nothing) desc = tf.EagerOp("Div") + x_ = convert(tf.TensorHandle, x_) + y_ = convert(tf.TensorHandle, y_) tf.add_input(desc, x_) tf.add_input(desc, y_) desc["T"] = tf.data_type(x_) @@ -28211,6 +29435,8 @@ begin end function truncate_div_eager(x_, y_; name=nothing) desc = tf.EagerOp("TruncateDiv") + x_ = convert(tf.TensorHandle, x_) + y_ = convert(tf.TensorHandle, y_) tf.add_input(desc, x_) tf.add_input(desc, y_) desc["T"] = tf.data_type(x_) @@ -28231,7 +29457,7 @@ end """ - unicode_encode(input_values, input_splits; errors=, replacement_char=65533) + unicode_encode(input_values, input_splits; errors=replace, replacement_char=65533) """ @@ -28258,6 +29484,8 @@ begin end function unicode_encode_eager(input_values_, input_splits_; name=nothing, errors=nothing, output_encoding=nothing, replacement_char=nothing) desc = tf.EagerOp("UnicodeEncode") + input_values_ = convert(tf.TensorHandle, input_values_) + input_splits_ = convert(tf.TensorHandle, input_splits_) tf.add_input(desc, input_values_) tf.add_input(desc, input_splits_) if errors !== nothing @@ -28304,6 +29532,7 @@ begin end function merge_summary_eager(inputs_; name=nothing, N=nothing) desc = tf.EagerOp("MergeSummary") + inputs_ = convert(tf.TensorHandle, inputs_) tf.add_input(desc, inputs_) if N !== nothing desc["N"] = Base.Int(N) @@ -28340,6 +29569,7 @@ begin end function fake_queue_eager(resource_; name=nothing) desc = tf.EagerOp("FakeQueue") + resource_ = convert(tf.TensorHandle, resource_) tf.add_input(desc, resource_) res = tf.execute(desc) node = tf.TapeNode(fake_queue, [resource_], name=nothing) @@ -28374,6 +29604,7 @@ begin end function batch_cholesky_eager(input_; name=nothing) desc = tf.EagerOp("BatchCholesky") + input_ = convert(tf.TensorHandle, input_) tf.add_input(desc, input_) desc["T"] = tf.data_type(input_) res = tf.execute(desc) @@ -28463,6 +29694,7 @@ begin end function bessel_i1e_eager(x_; name=nothing) desc = tf.EagerOp("BesselI1e") + x_ = convert(tf.TensorHandle, x_) tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) res = tf.execute(desc) @@ -28499,6 +29731,8 @@ begin end function import_event_eager(writer_, event_; name=nothing) desc = tf.EagerOp("ImportEvent") + writer_ = convert(tf.TensorHandle, writer_) + event_ = convert(tf.TensorHandle, event_) tf.add_input(desc, writer_) tf.add_input(desc, event_) res = tf.execute(desc) @@ -28558,6 +29792,9 @@ begin end function quantized_instance_norm_eager(x_, x_min_, x_max_; name=nothing, output_range_given=nothing, given_y_min=nothing, given_y_max=nothing, variance_epsilon=nothing, min_separation=nothing) desc = tf.EagerOp("QuantizedInstanceNorm") + x_ = convert(tf.TensorHandle, x_) + x_min_ = convert(tf.TensorHandle, x_min_) + x_max_ = convert(tf.TensorHandle, x_max_) tf.add_input(desc, x_) tf.add_input(desc, x_min_) tf.add_input(desc, x_max_) @@ -28623,6 +29860,8 @@ begin end function load_tpu_embedding_adagrad_parameters_eager(parameters_, accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) desc = tf.EagerOp("LoadTPUEmbeddingAdagradParameters") + parameters_ = convert(tf.TensorHandle, parameters_) + accumulators_ = convert(tf.TensorHandle, accumulators_) tf.add_input(desc, parameters_) tf.add_input(desc, accumulators_) if table_id !== nothing @@ -28676,6 +29915,10 @@ begin end function tensor_array_write_v3_eager(handle_, index_, value_, flow_in_; name=nothing) desc = tf.EagerOp("TensorArrayWriteV3") + handle_ = convert(tf.TensorHandle, handle_) + index_ = convert(tf.TensorHandle, index_) + value_ = convert(tf.TensorHandle, value_) + flow_in_ = convert(tf.TensorHandle, flow_in_) tf.add_input(desc, handle_) tf.add_input(desc, index_) tf.add_input(desc, value_) @@ -28727,6 +29970,8 @@ begin end function dense_to_dense_set_operation_eager(set1_, set2_; name=nothing, set_operation=nothing, validate_indices=nothing) desc = tf.EagerOp("DenseToDenseSetOperation") + set1_ = convert(tf.TensorHandle, set1_) + set2_ = convert(tf.TensorHandle, set2_) tf.add_input(desc, set1_) tf.add_input(desc, set2_) if set_operation !== nothing @@ -28753,7 +29998,7 @@ end """ - encode_jpeg(image; format=, quality=95, progressive=false, optimize_size=false, chroma_downsampling=true, density_unit=, x_density=300, y_density=300, xmp_metadata=) + encode_jpeg(image; format=, quality=95, progressive=false, optimize_size=false, chroma_downsampling=true, density_unit=in, x_density=300, y_density=300, xmp_metadata=) """ @@ -28796,6 +30041,7 @@ begin end function encode_jpeg_eager(image_; name=nothing, format=nothing, quality=nothing, progressive=nothing, optimize_size=nothing, chroma_downsampling=nothing, density_unit=nothing, x_density=nothing, y_density=nothing, xmp_metadata=nothing) desc = tf.EagerOp("EncodeJpeg") + image_ = convert(tf.TensorHandle, image_) tf.add_input(desc, image_) if format !== nothing desc["format"] = Base.String(format) @@ -28861,6 +30107,9 @@ begin end function inplace_update_eager(x_, i_, v_; name=nothing) desc = tf.EagerOp("InplaceUpdate") + x_ = convert(tf.TensorHandle, x_) + i_ = convert(tf.TensorHandle, i_) + v_ = convert(tf.TensorHandle, v_) tf.add_input(desc, x_) tf.add_input(desc, i_) tf.add_input(desc, v_) @@ -28912,6 +30161,9 @@ begin end function fused_pad_conv2d_eager(input_, paddings_, filter_; name=nothing, mode=nothing, strides=nothing, padding=nothing) desc = tf.EagerOp("FusedPadConv2D") + input_ = convert(tf.TensorHandle, input_) + paddings_ = convert(tf.TensorHandle, paddings_) + filter_ = convert(tf.TensorHandle, filter_) tf.add_input(desc, input_) tf.add_input(desc, paddings_) tf.add_input(desc, filter_) @@ -28971,6 +30223,9 @@ begin end function quantized_relu_eager(features_, min_features_, max_features_; name=nothing, out_type=nothing) desc = tf.EagerOp("QuantizedRelu") + features_ = convert(tf.TensorHandle, features_) + min_features_ = convert(tf.TensorHandle, min_features_) + max_features_ = convert(tf.TensorHandle, max_features_) tf.add_input(desc, features_) tf.add_input(desc, min_features_) tf.add_input(desc, max_features_) @@ -29015,6 +30270,8 @@ begin end function gather_nd_eager(params_, indices_; name=nothing) desc = tf.EagerOp("GatherNd") + params_ = convert(tf.TensorHandle, params_) + indices_ = convert(tf.TensorHandle, indices_) tf.add_input(desc, params_) tf.add_input(desc, indices_) desc["Tparams"] = tf.data_type(params_) @@ -29099,6 +30356,7 @@ begin end function filter_by_last_component_dataset_eager(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("FilterByLastComponentDataset") + input_dataset_ = convert(tf.TensorHandle, input_dataset_) tf.add_input(desc, input_dataset_) if output_types !== nothing desc["output_types"] = map(Base.identity, output_types) @@ -29143,6 +30401,9 @@ begin end function clip_by_value_eager(t_, clip_value_min_, clip_value_max_; name=nothing) desc = tf.EagerOp("ClipByValue") + t_ = convert(tf.TensorHandle, t_) + clip_value_min_ = convert(tf.TensorHandle, clip_value_min_) + clip_value_max_ = convert(tf.TensorHandle, clip_value_max_) tf.add_input(desc, t_) tf.add_input(desc, clip_value_min_) tf.add_input(desc, clip_value_max_) @@ -29190,6 +30451,8 @@ begin end function image_summary_eager(tag_, tensor_; name=nothing, max_images=nothing, bad_color=nothing) desc = tf.EagerOp("ImageSummary") + tag_ = convert(tf.TensorHandle, tag_) + tensor_ = convert(tf.TensorHandle, tensor_) tf.add_input(desc, tag_) tf.add_input(desc, tensor_) if max_images !== nothing @@ -29296,6 +30559,7 @@ begin end function string_join_eager(inputs_; name=nothing, N=nothing, separator=nothing) desc = tf.EagerOp("StringJoin") + inputs_ = convert(tf.TensorHandle, inputs_) tf.add_input(desc, inputs_) if N !== nothing desc["N"] = Base.Int(N) @@ -29345,6 +30609,9 @@ begin end function resource_scatter_nd_add_eager(ref_, indices_, updates_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ResourceScatterNdAdd") + ref_ = convert(tf.TensorHandle, ref_) + indices_ = convert(tf.TensorHandle, indices_) + updates_ = convert(tf.TensorHandle, updates_) tf.add_input(desc, ref_) tf.add_input(desc, indices_) tf.add_input(desc, updates_) @@ -29390,6 +30657,8 @@ begin end function boosted_trees_quantile_stream_resource_deserialize_eager(quantile_stream_resource_handle_, bucket_boundaries_; name=nothing, num_streams=nothing) desc = tf.EagerOp("BoostedTreesQuantileStreamResourceDeserialize") + quantile_stream_resource_handle_ = convert(tf.TensorHandle, quantile_stream_resource_handle_) + bucket_boundaries_ = convert(tf.TensorHandle, bucket_boundaries_) tf.add_input(desc, quantile_stream_resource_handle_) tf.add_input(desc, bucket_boundaries_) if num_streams !== nothing @@ -29430,6 +30699,8 @@ begin end function left_shift_eager(x_, y_; name=nothing) desc = tf.EagerOp("LeftShift") + x_ = convert(tf.TensorHandle, x_) + y_ = convert(tf.TensorHandle, y_) tf.add_input(desc, x_) tf.add_input(desc, y_) desc["T"] = tf.data_type(x_) @@ -29483,6 +30754,11 @@ begin end function requantize_per_channel_eager(input_, input_min_, input_max_, requested_output_min_, requested_output_max_; name=nothing, out_type=nothing) desc = tf.EagerOp("RequantizePerChannel") + input_ = convert(tf.TensorHandle, input_) + input_min_ = convert(tf.TensorHandle, input_min_) + input_max_ = convert(tf.TensorHandle, input_max_) + requested_output_min_ = convert(tf.TensorHandle, requested_output_min_) + requested_output_max_ = convert(tf.TensorHandle, requested_output_max_) tf.add_input(desc, input_) tf.add_input(desc, input_min_) tf.add_input(desc, input_max_) @@ -29531,6 +30807,9 @@ begin end function tensor_scatter_add_eager(tensor_, indices_, updates_; name=nothing) desc = tf.EagerOp("TensorScatterAdd") + tensor_ = convert(tf.TensorHandle, tensor_) + indices_ = convert(tf.TensorHandle, indices_) + updates_ = convert(tf.TensorHandle, updates_) tf.add_input(desc, tensor_) tf.add_input(desc, indices_) tf.add_input(desc, updates_) @@ -29635,6 +30914,7 @@ begin end function ifft3d_eager(input_; name=nothing) desc = tf.EagerOp("IFFT3D") + input_ = convert(tf.TensorHandle, input_) tf.add_input(desc, input_) desc["Tcomplex"] = tf.data_type(input_) res = tf.execute(desc) @@ -29677,6 +30957,8 @@ begin end function euclidean_norm_eager(input_, reduction_indices_; name=nothing, keep_dims=nothing) desc = tf.EagerOp("EuclideanNorm") + input_ = convert(tf.TensorHandle, input_) + reduction_indices_ = convert(tf.TensorHandle, reduction_indices_) tf.add_input(desc, input_) tf.add_input(desc, reduction_indices_) if keep_dims !== nothing @@ -29722,6 +31004,8 @@ begin end function ref_select_eager(index_, inputs_; name=nothing, N=nothing) desc = tf.EagerOp("RefSelect") + index_ = convert(tf.TensorHandle, index_) + inputs_ = convert(tf.TensorHandle, inputs_) tf.add_input(desc, index_) tf.add_input(desc, inputs_) if N !== nothing @@ -29765,6 +31049,9 @@ begin end function sparse_tensor_slice_dataset_eager(indices_, values_, dense_shape_; name=nothing) desc = tf.EagerOp("SparseTensorSliceDataset") + indices_ = convert(tf.TensorHandle, indices_) + values_ = convert(tf.TensorHandle, values_) + dense_shape_ = convert(tf.TensorHandle, dense_shape_) tf.add_input(desc, indices_) tf.add_input(desc, values_) tf.add_input(desc, dense_shape_) @@ -29860,6 +31147,7 @@ begin end function batch_ifft2d_eager(input_; name=nothing) desc = tf.EagerOp("BatchIFFT2D") + input_ = convert(tf.TensorHandle, input_) tf.add_input(desc, input_) res = tf.execute(desc) node = tf.TapeNode(batch_ifft2d, [input_], name=nothing) @@ -29903,6 +31191,9 @@ begin end function tensor_array_gather_eager(handle_, indices_, flow_in_; name=nothing, dtype=nothing, element_shape=nothing) desc = tf.EagerOp("TensorArrayGather") + handle_ = convert(tf.TensorHandle, handle_) + indices_ = convert(tf.TensorHandle, indices_) + flow_in_ = convert(tf.TensorHandle, flow_in_) tf.add_input(desc, handle_) tf.add_input(desc, indices_) tf.add_input(desc, flow_in_) @@ -29954,6 +31245,10 @@ begin end function sparse_segment_mean_with_num_segments_eager(data_, indices_, segment_ids_, num_segments_; name=nothing) desc = tf.EagerOp("SparseSegmentMeanWithNumSegments") + data_ = convert(tf.TensorHandle, data_) + indices_ = convert(tf.TensorHandle, indices_) + segment_ids_ = convert(tf.TensorHandle, segment_ids_) + num_segments_ = convert(tf.TensorHandle, num_segments_) tf.add_input(desc, data_) tf.add_input(desc, indices_) tf.add_input(desc, segment_ids_) @@ -29997,6 +31292,7 @@ begin end function ensure_shape_eager(input_; name=nothing, shape=nothing) desc = tf.EagerOp("EnsureShape") + input_ = convert(tf.TensorHandle, input_) tf.add_input(desc, input_) if shape !== nothing desc["shape"] = Base.identity(shape) @@ -30046,6 +31342,11 @@ begin end function apply_proximal_gradient_descent_eager(var_, alpha_, l1_, l2_, delta_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ApplyProximalGradientDescent") + var_ = convert(tf.TensorHandle, var_) + alpha_ = convert(tf.TensorHandle, alpha_) + l1_ = convert(tf.TensorHandle, l1_) + l2_ = convert(tf.TensorHandle, l2_) + delta_ = convert(tf.TensorHandle, delta_) tf.add_input(desc, var_) tf.add_input(desc, alpha_) tf.add_input(desc, l1_) @@ -30113,6 +31414,7 @@ begin end function collective_reduce_eager(input_; name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, merge_op=nothing, final_op=nothing, subdiv_offsets=nothing, wait_for=nothing) desc = tf.EagerOp("CollectiveReduce") + input_ = convert(tf.TensorHandle, input_) tf.add_input(desc, input_) if group_size !== nothing desc["group_size"] = Base.Int(group_size) @@ -30169,6 +31471,7 @@ begin end function is_nan_eager(x_; name=nothing) desc = tf.EagerOp("IsNan") + x_ = convert(tf.TensorHandle, x_) tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) res = tf.execute(desc) @@ -30223,6 +31526,15 @@ begin end function apply_ada_max_eager(var_, m_, v_, beta1_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ApplyAdaMax") + var_ = convert(tf.TensorHandle, var_) + m_ = convert(tf.TensorHandle, m_) + v_ = convert(tf.TensorHandle, v_) + beta1_power_ = convert(tf.TensorHandle, beta1_power_) + lr_ = convert(tf.TensorHandle, lr_) + beta1_ = convert(tf.TensorHandle, beta1_) + beta2_ = convert(tf.TensorHandle, beta2_) + epsilon_ = convert(tf.TensorHandle, epsilon_) + grad_ = convert(tf.TensorHandle, grad_) tf.add_input(desc, var_) tf.add_input(desc, m_) tf.add_input(desc, v_) @@ -30296,6 +31608,8 @@ begin end function decode_and_crop_jpeg_eager(contents_, crop_window_; name=nothing, channels=nothing, ratio=nothing, fancy_upscaling=nothing, try_recover_truncated=nothing, acceptable_fraction=nothing, dct_method=nothing) desc = tf.EagerOp("DecodeAndCropJpeg") + contents_ = convert(tf.TensorHandle, contents_) + crop_window_ = convert(tf.TensorHandle, crop_window_) tf.add_input(desc, contents_) tf.add_input(desc, crop_window_) if channels !== nothing @@ -30368,6 +31682,15 @@ begin end function apply_centered_rms_prop_eager(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ApplyCenteredRMSProp") + var_ = convert(tf.TensorHandle, var_) + mg_ = convert(tf.TensorHandle, mg_) + ms_ = convert(tf.TensorHandle, ms_) + mom_ = convert(tf.TensorHandle, mom_) + lr_ = convert(tf.TensorHandle, lr_) + rho_ = convert(tf.TensorHandle, rho_) + momentum_ = convert(tf.TensorHandle, momentum_) + epsilon_ = convert(tf.TensorHandle, epsilon_) + grad_ = convert(tf.TensorHandle, grad_) tf.add_input(desc, var_) tf.add_input(desc, mg_) tf.add_input(desc, ms_) @@ -30405,7 +31728,7 @@ end """ - conv3d_backprop_filter_v2(input, filter_sizes, out_backprop; data_format=, dilations=[1, 1, 1, 1, 1]) + conv3d_backprop_filter_v2(input, filter_sizes, out_backprop; data_format=NDHWC, dilations=[1, 1, 1, 1, 1]) """ @@ -30438,6 +31761,9 @@ begin end function conv3d_backprop_filter_v2_eager(input_, filter_sizes_, out_backprop_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) desc = tf.EagerOp("Conv3DBackpropFilterV2") + input_ = convert(tf.TensorHandle, input_) + filter_sizes_ = convert(tf.TensorHandle, filter_sizes_) + out_backprop_ = convert(tf.TensorHandle, out_backprop_) tf.add_input(desc, input_) tf.add_input(desc, filter_sizes_) tf.add_input(desc, out_backprop_) @@ -30496,6 +31822,8 @@ begin end function matrix_triangular_solve_eager(matrix_, rhs_; name=nothing, lower=nothing, adjoint=nothing) desc = tf.EagerOp("MatrixTriangularSolve") + matrix_ = convert(tf.TensorHandle, matrix_) + rhs_ = convert(tf.TensorHandle, rhs_) tf.add_input(desc, matrix_) tf.add_input(desc, rhs_) if lower !== nothing @@ -30538,6 +31866,7 @@ begin end function reader_num_work_units_completed_eager(reader_handle_; name=nothing) desc = tf.EagerOp("ReaderNumWorkUnitsCompleted") + reader_handle_ = convert(tf.TensorHandle, reader_handle_) tf.add_input(desc, reader_handle_) res = tf.execute(desc) node = tf.TapeNode(reader_num_work_units_completed, [reader_handle_], name=nothing) @@ -30582,6 +31911,11 @@ begin end function write_audio_summary_eager(writer_, step_, tag_, tensor_, sample_rate_; name=nothing, max_outputs=nothing) desc = tf.EagerOp("WriteAudioSummary") + writer_ = convert(tf.TensorHandle, writer_) + step_ = convert(tf.TensorHandle, step_) + tag_ = convert(tf.TensorHandle, tag_) + tensor_ = convert(tf.TensorHandle, tensor_) + sample_rate_ = convert(tf.TensorHandle, sample_rate_) tf.add_input(desc, writer_) tf.add_input(desc, step_) tf.add_input(desc, tag_) @@ -30624,6 +31958,8 @@ begin end function sharded_filespec_eager(basename_, num_shards_; name=nothing) desc = tf.EagerOp("ShardedFilespec") + basename_ = convert(tf.TensorHandle, basename_) + num_shards_ = convert(tf.TensorHandle, num_shards_) tf.add_input(desc, basename_) tf.add_input(desc, num_shards_) res = tf.execute(desc) @@ -30661,6 +31997,8 @@ begin end function div_no_nan_eager(x_, y_; name=nothing) desc = tf.EagerOp("DivNoNan") + x_ = convert(tf.TensorHandle, x_) + y_ = convert(tf.TensorHandle, y_) tf.add_input(desc, x_) tf.add_input(desc, y_) desc["T"] = tf.data_type(x_) @@ -30712,6 +32050,11 @@ begin end function sparse_accumulator_apply_gradient_eager(handle_, local_step_, gradient_indices_, gradient_values_, gradient_shape_; name=nothing, dtype=nothing, has_known_shape=nothing) desc = tf.EagerOp("SparseAccumulatorApplyGradient") + handle_ = convert(tf.TensorHandle, handle_) + local_step_ = convert(tf.TensorHandle, local_step_) + gradient_indices_ = convert(tf.TensorHandle, gradient_indices_) + gradient_values_ = convert(tf.TensorHandle, gradient_values_) + gradient_shape_ = convert(tf.TensorHandle, gradient_shape_) tf.add_input(desc, handle_) tf.add_input(desc, local_step_) tf.add_input(desc, gradient_indices_) @@ -30767,6 +32110,8 @@ begin end function ragged_tensor_to_sparse_eager(rt_nested_splits_, rt_dense_values_; name=nothing, RAGGED_RANK=nothing) desc = tf.EagerOp("RaggedTensorToSparse") + rt_nested_splits_ = convert(tf.TensorHandle, rt_nested_splits_) + rt_dense_values_ = convert(tf.TensorHandle, rt_dense_values_) tf.add_input(desc, rt_nested_splits_) tf.add_input(desc, rt_dense_values_) if RAGGED_RANK !== nothing @@ -30815,6 +32160,7 @@ begin end function extract_volume_patches_eager(input_; name=nothing, ksizes=nothing, strides=nothing, padding=nothing) desc = tf.EagerOp("ExtractVolumePatches") + input_ = convert(tf.TensorHandle, input_) tf.add_input(desc, input_) if ksizes !== nothing desc["ksizes"] = map(Base.identity, ksizes) @@ -30869,6 +32215,9 @@ begin end function barrier_insert_many_eager(handle_, keys_, values_; name=nothing, component_index=nothing) desc = tf.EagerOp("BarrierInsertMany") + handle_ = convert(tf.TensorHandle, handle_) + keys_ = convert(tf.TensorHandle, keys_) + values_ = convert(tf.TensorHandle, values_) tf.add_input(desc, handle_) tf.add_input(desc, keys_) tf.add_input(desc, values_) @@ -30960,6 +32309,8 @@ begin end function space_to_batch_eager(input_, paddings_; name=nothing, block_size=nothing) desc = tf.EagerOp("SpaceToBatch") + input_ = convert(tf.TensorHandle, input_) + paddings_ = convert(tf.TensorHandle, paddings_) tf.add_input(desc, input_) tf.add_input(desc, paddings_) if block_size !== nothing @@ -31068,6 +32419,8 @@ begin end function empty_tensor_list_eager(element_shape_, max_num_elements_; name=nothing, element_dtype=nothing, shape_type=nothing) desc = tf.EagerOp("EmptyTensorList") + element_shape_ = convert(tf.TensorHandle, element_shape_) + max_num_elements_ = convert(tf.TensorHandle, max_num_elements_) tf.add_input(desc, element_shape_) tf.add_input(desc, max_num_elements_) if element_dtype !== nothing @@ -31142,6 +32495,14 @@ begin end function quantized_conv2d_and_requantize_eager(input_, filter_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) desc = tf.EagerOp("QuantizedConv2DAndRequantize") + input_ = convert(tf.TensorHandle, input_) + filter_ = convert(tf.TensorHandle, filter_) + min_input_ = convert(tf.TensorHandle, min_input_) + max_input_ = convert(tf.TensorHandle, max_input_) + min_filter_ = convert(tf.TensorHandle, min_filter_) + max_filter_ = convert(tf.TensorHandle, max_filter_) + min_freezed_output_ = convert(tf.TensorHandle, min_freezed_output_) + max_freezed_output_ = convert(tf.TensorHandle, max_freezed_output_) tf.add_input(desc, input_) tf.add_input(desc, filter_) tf.add_input(desc, min_input_) @@ -31205,6 +32566,7 @@ begin end function lu_eager(input_; name=nothing, output_idx_type=nothing) desc = tf.EagerOp("Lu") + input_ = convert(tf.TensorHandle, input_) tf.add_input(desc, input_) if output_idx_type !== nothing desc["output_idx_type"] = Base.identity(output_idx_type) @@ -31245,6 +32607,7 @@ begin end function decode_compressed_eager(bytes_; name=nothing, compression_type=nothing) desc = tf.EagerOp("DecodeCompressed") + bytes_ = convert(tf.TensorHandle, bytes_) tf.add_input(desc, bytes_) if compression_type !== nothing desc["compression_type"] = Base.String(compression_type) @@ -31284,6 +32647,7 @@ begin end function get_session_tensor_eager(handle_; name=nothing, dtype=nothing) desc = tf.EagerOp("GetSessionTensor") + handle_ = convert(tf.TensorHandle, handle_) tf.add_input(desc, handle_) if dtype !== nothing desc["dtype"] = Base.identity(dtype) @@ -31330,6 +32694,9 @@ begin end function tensor_array_gather_v3_eager(handle_, indices_, flow_in_; name=nothing, dtype=nothing, element_shape=nothing) desc = tf.EagerOp("TensorArrayGatherV3") + handle_ = convert(tf.TensorHandle, handle_) + indices_ = convert(tf.TensorHandle, indices_) + flow_in_ = convert(tf.TensorHandle, flow_in_) tf.add_input(desc, handle_) tf.add_input(desc, indices_) tf.add_input(desc, flow_in_) @@ -31389,6 +32756,10 @@ begin end function load_tpu_embedding_ftrl_parameters_grad_accum_debug_eager(parameters_, accumulators_, linears_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) desc = tf.EagerOp("LoadTPUEmbeddingFTRLParametersGradAccumDebug") + parameters_ = convert(tf.TensorHandle, parameters_) + accumulators_ = convert(tf.TensorHandle, accumulators_) + linears_ = convert(tf.TensorHandle, linears_) + gradient_accumulators_ = convert(tf.TensorHandle, gradient_accumulators_) tf.add_input(desc, parameters_) tf.add_input(desc, accumulators_) tf.add_input(desc, linears_) @@ -31440,6 +32811,7 @@ begin end function destroy_resource_op_eager(resource_; name=nothing, ignore_lookup_error=nothing) desc = tf.EagerOp("DestroyResourceOp") + resource_ = convert(tf.TensorHandle, resource_) tf.add_input(desc, resource_) if ignore_lookup_error !== nothing desc["ignore_lookup_error"] = Base.Bool(ignore_lookup_error) @@ -31532,6 +32904,11 @@ begin end function create_summary_db_writer_eager(writer_, db_uri_, experiment_name_, run_name_, user_name_; name=nothing) desc = tf.EagerOp("CreateSummaryDbWriter") + writer_ = convert(tf.TensorHandle, writer_) + db_uri_ = convert(tf.TensorHandle, db_uri_) + experiment_name_ = convert(tf.TensorHandle, experiment_name_) + run_name_ = convert(tf.TensorHandle, run_name_) + user_name_ = convert(tf.TensorHandle, user_name_) tf.add_input(desc, writer_) tf.add_input(desc, db_uri_) tf.add_input(desc, experiment_name_) @@ -31572,6 +32949,8 @@ begin end function tanh_grad_eager(y_, dy_; name=nothing) desc = tf.EagerOp("TanhGrad") + y_ = convert(tf.TensorHandle, y_) + dy_ = convert(tf.TensorHandle, dy_) tf.add_input(desc, y_) tf.add_input(desc, dy_) desc["T"] = tf.data_type(y_) @@ -31608,6 +32987,7 @@ begin end function decode_base64_eager(input_; name=nothing) desc = tf.EagerOp("DecodeBase64") + input_ = convert(tf.TensorHandle, input_) tf.add_input(desc, input_) res = tf.execute(desc) node = tf.TapeNode(decode_base64, [input_], name=nothing) @@ -31625,7 +33005,7 @@ end """ - max_pool_grad_grad_v2(orig_input, orig_output, grad, ksize, strides; data_format=) + max_pool_grad_grad_v2(orig_input, orig_output, grad, ksize, strides; data_format=NHWC) """ @@ -31656,6 +33036,11 @@ begin end function max_pool_grad_grad_v2_eager(orig_input_, orig_output_, grad_, ksize_, strides_; name=nothing, padding=nothing, data_format=nothing) desc = tf.EagerOp("MaxPoolGradGradV2") + orig_input_ = convert(tf.TensorHandle, orig_input_) + orig_output_ = convert(tf.TensorHandle, orig_output_) + grad_ = convert(tf.TensorHandle, grad_) + ksize_ = convert(tf.TensorHandle, ksize_) + strides_ = convert(tf.TensorHandle, strides_) tf.add_input(desc, orig_input_) tf.add_input(desc, orig_output_) tf.add_input(desc, grad_) @@ -31709,6 +33094,9 @@ begin end function audio_summary_v2_eager(tag_, tensor_, sample_rate_; name=nothing, max_outputs=nothing) desc = tf.EagerOp("AudioSummaryV2") + tag_ = convert(tf.TensorHandle, tag_) + tensor_ = convert(tf.TensorHandle, tensor_) + sample_rate_ = convert(tf.TensorHandle, sample_rate_) tf.add_input(desc, tag_) tf.add_input(desc, tensor_) tf.add_input(desc, sample_rate_) @@ -31765,6 +33153,7 @@ begin end function stateful_partitioned_call_eager(args_; name=nothing, Tin=nothing, Tout=nothing, f=nothing, config=nothing, config_proto=nothing, executor_type=nothing) desc = tf.EagerOp("StatefulPartitionedCall") + args_ = convert(tf.TensorHandle, args_) tf.add_input(desc, args_) if Tin !== nothing desc["Tin"] = map(Base.identity, Tin) @@ -31834,6 +33223,8 @@ begin end function _scoped_allocator_concat_eager(backing_, inputs_; name=nothing, shape=nothing, reshape=nothing, sa_name=nothing, id=nothing, N=nothing) desc = tf.EagerOp("_ScopedAllocatorConcat") + backing_ = convert(tf.TensorHandle, backing_) + inputs_ = convert(tf.TensorHandle, inputs_) tf.add_input(desc, backing_) tf.add_input(desc, inputs_) if shape !== nothing @@ -31899,6 +33290,8 @@ begin end function fake_quant_with_min_max_args_gradient_eager(gradients_, inputs_; name=nothing, min=nothing, max=nothing, num_bits=nothing, narrow_range=nothing) desc = tf.EagerOp("FakeQuantWithMinMaxArgsGradient") + gradients_ = convert(tf.TensorHandle, gradients_) + inputs_ = convert(tf.TensorHandle, inputs_) tf.add_input(desc, gradients_) tf.add_input(desc, inputs_) if min !== nothing @@ -31957,6 +33350,7 @@ begin end function batch_svd_eager(input_; name=nothing, compute_uv=nothing, full_matrices=nothing) desc = tf.EagerOp("BatchSvd") + input_ = convert(tf.TensorHandle, input_) tf.add_input(desc, input_) if compute_uv !== nothing desc["compute_uv"] = Base.Bool(compute_uv) @@ -32019,6 +33413,9 @@ begin end function map_stage_eager(key_, indices_, values_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, fake_dtypes=nothing, container=nothing, shared_name=nothing) desc = tf.EagerOp("MapStage") + key_ = convert(tf.TensorHandle, key_) + indices_ = convert(tf.TensorHandle, indices_) + values_ = convert(tf.TensorHandle, values_) tf.add_input(desc, key_) tf.add_input(desc, indices_) tf.add_input(desc, values_) @@ -32094,6 +33491,15 @@ begin end function resource_sparse_apply_ftrl_eager(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, lr_power_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ResourceSparseApplyFtrl") + var_ = convert(tf.TensorHandle, var_) + accum_ = convert(tf.TensorHandle, accum_) + linear_ = convert(tf.TensorHandle, linear_) + grad_ = convert(tf.TensorHandle, grad_) + indices_ = convert(tf.TensorHandle, indices_) + lr_ = convert(tf.TensorHandle, lr_) + l1_ = convert(tf.TensorHandle, l1_) + l2_ = convert(tf.TensorHandle, l2_) + lr_power_ = convert(tf.TensorHandle, lr_power_) tf.add_input(desc, var_) tf.add_input(desc, accum_) tf.add_input(desc, linear_) @@ -32150,6 +33556,8 @@ begin end function resize_nearest_neighbor_eager(images_, size_; name=nothing, align_corners=nothing) desc = tf.EagerOp("ResizeNearestNeighbor") + images_ = convert(tf.TensorHandle, images_) + size_ = convert(tf.TensorHandle, size_) tf.add_input(desc, images_) tf.add_input(desc, size_) if align_corners !== nothing @@ -32210,6 +33618,15 @@ begin end function experimental_csv_dataset_eager(filenames_, compression_type_, buffer_size_, header_, field_delim_, use_quote_delim_, na_value_, select_cols_, record_defaults_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("ExperimentalCSVDataset") + filenames_ = convert(tf.TensorHandle, filenames_) + compression_type_ = convert(tf.TensorHandle, compression_type_) + buffer_size_ = convert(tf.TensorHandle, buffer_size_) + header_ = convert(tf.TensorHandle, header_) + field_delim_ = convert(tf.TensorHandle, field_delim_) + use_quote_delim_ = convert(tf.TensorHandle, use_quote_delim_) + na_value_ = convert(tf.TensorHandle, na_value_) + select_cols_ = convert(tf.TensorHandle, select_cols_) + record_defaults_ = convert(tf.TensorHandle, record_defaults_) tf.add_input(desc, filenames_) tf.add_input(desc, compression_type_) tf.add_input(desc, buffer_size_) @@ -32269,6 +33686,10 @@ begin end function _mkl_mul_eager(x_, y_, mkl_x_, mkl_y_; name=nothing) desc = tf.EagerOp("_MklMul") + x_ = convert(tf.TensorHandle, x_) + y_ = convert(tf.TensorHandle, y_) + mkl_x_ = convert(tf.TensorHandle, mkl_x_) + mkl_y_ = convert(tf.TensorHandle, mkl_y_) tf.add_input(desc, x_) tf.add_input(desc, y_) tf.add_input(desc, mkl_x_) @@ -32308,6 +33729,7 @@ begin end function batch_matrix_diag_eager(diagonal_; name=nothing) desc = tf.EagerOp("BatchMatrixDiag") + diagonal_ = convert(tf.TensorHandle, diagonal_) tf.add_input(desc, diagonal_) desc["T"] = tf.data_type(diagonal_) res = tf.execute(desc) @@ -32343,6 +33765,7 @@ begin end function is_inf_eager(x_; name=nothing) desc = tf.EagerOp("IsInf") + x_ = convert(tf.TensorHandle, x_) tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) res = tf.execute(desc) @@ -32418,6 +33841,7 @@ begin end function fixed_unigram_candidate_sampler_eager(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, vocab_file=nothing, distortion=nothing, num_reserved_ids=nothing, num_shards=nothing, shard=nothing, unigrams=nothing, seed=nothing, seed2=nothing) desc = tf.EagerOp("FixedUnigramCandidateSampler") + true_classes_ = convert(tf.TensorHandle, true_classes_) tf.add_input(desc, true_classes_) if num_true !== nothing desc["num_true"] = Base.Int(num_true) @@ -32511,6 +33935,16 @@ begin end function sparse_apply_ftrl_v2_eager(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=nothing, use_locking=nothing) desc = tf.EagerOp("SparseApplyFtrlV2") + var_ = convert(tf.TensorHandle, var_) + accum_ = convert(tf.TensorHandle, accum_) + linear_ = convert(tf.TensorHandle, linear_) + grad_ = convert(tf.TensorHandle, grad_) + indices_ = convert(tf.TensorHandle, indices_) + lr_ = convert(tf.TensorHandle, lr_) + l1_ = convert(tf.TensorHandle, l1_) + l2_ = convert(tf.TensorHandle, l2_) + l2_shrinkage_ = convert(tf.TensorHandle, l2_shrinkage_) + lr_power_ = convert(tf.TensorHandle, lr_power_) tf.add_input(desc, var_) tf.add_input(desc, accum_) tf.add_input(desc, linear_) @@ -32571,6 +34005,8 @@ begin end function unravel_index_eager(indices_, dims_; name=nothing) desc = tf.EagerOp("UnravelIndex") + indices_ = convert(tf.TensorHandle, indices_) + dims_ = convert(tf.TensorHandle, dims_) tf.add_input(desc, indices_) tf.add_input(desc, dims_) desc["Tidx"] = tf.data_type(indices_) @@ -32615,6 +34051,8 @@ begin end function max_eager(input_, reduction_indices_; name=nothing, keep_dims=nothing) desc = tf.EagerOp("Max") + input_ = convert(tf.TensorHandle, input_) + reduction_indices_ = convert(tf.TensorHandle, reduction_indices_) tf.add_input(desc, input_) tf.add_input(desc, reduction_indices_) if keep_dims !== nothing @@ -32655,6 +34093,7 @@ begin end function ifft2d_eager(input_; name=nothing) desc = tf.EagerOp("IFFT2D") + input_ = convert(tf.TensorHandle, input_) tf.add_input(desc, input_) desc["Tcomplex"] = tf.data_type(input_) res = tf.execute(desc) @@ -32708,6 +34147,9 @@ begin end function sparse_concat_eager(indices_, values_, shapes_; name=nothing, concat_dim=nothing, N=nothing) desc = tf.EagerOp("SparseConcat") + indices_ = convert(tf.TensorHandle, indices_) + values_ = convert(tf.TensorHandle, values_) + shapes_ = convert(tf.TensorHandle, shapes_) tf.add_input(desc, indices_) tf.add_input(desc, values_) tf.add_input(desc, shapes_) @@ -32756,6 +34198,8 @@ begin end function histogram_summary_eager(tag_, values_; name=nothing) desc = tf.EagerOp("HistogramSummary") + tag_ = convert(tf.TensorHandle, tag_) + values_ = convert(tf.TensorHandle, values_) tf.add_input(desc, tag_) tf.add_input(desc, values_) desc["T"] = tf.data_type(values_) @@ -32796,6 +34240,8 @@ begin end function segment_sum_eager(data_, segment_ids_; name=nothing) desc = tf.EagerOp("SegmentSum") + data_ = convert(tf.TensorHandle, data_) + segment_ids_ = convert(tf.TensorHandle, segment_ids_) tf.add_input(desc, data_) tf.add_input(desc, segment_ids_) desc["T"] = tf.data_type(data_) @@ -32833,6 +34279,7 @@ begin end function exp_eager(x_; name=nothing) desc = tf.EagerOp("Exp") + x_ = convert(tf.TensorHandle, x_) tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) res = tf.execute(desc) @@ -32925,6 +34372,9 @@ begin end function resource_scatter_nd_sub_eager(ref_, indices_, updates_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ResourceScatterNdSub") + ref_ = convert(tf.TensorHandle, ref_) + indices_ = convert(tf.TensorHandle, indices_) + updates_ = convert(tf.TensorHandle, updates_) tf.add_input(desc, ref_) tf.add_input(desc, indices_) tf.add_input(desc, updates_) @@ -32976,6 +34426,8 @@ begin end function _xla_send_from_host_eager(inputs_, dynamic_key_; name=nothing, Tinputs=nothing, key=nothing, device_ordinal=nothing) desc = tf.EagerOp("_XlaSendFromHost") + inputs_ = convert(tf.TensorHandle, inputs_) + dynamic_key_ = convert(tf.TensorHandle, dynamic_key_) tf.add_input(desc, inputs_) tf.add_input(desc, dynamic_key_) if Tinputs !== nothing @@ -33020,6 +34472,7 @@ begin end function get_session_handle_v2_eager(value_; name=nothing) desc = tf.EagerOp("GetSessionHandleV2") + value_ = convert(tf.TensorHandle, value_) tf.add_input(desc, value_) desc["T"] = tf.data_type(value_) res = tf.execute(desc) @@ -33057,6 +34510,8 @@ begin end function relu_grad_eager(gradients_, features_; name=nothing) desc = tf.EagerOp("ReluGrad") + gradients_ = convert(tf.TensorHandle, gradients_) + features_ = convert(tf.TensorHandle, features_) tf.add_input(desc, gradients_) tf.add_input(desc, features_) desc["T"] = tf.data_type(gradients_) @@ -33101,6 +34556,9 @@ begin end function unsorted_segment_min_eager(data_, segment_ids_, num_segments_; name=nothing) desc = tf.EagerOp("UnsortedSegmentMin") + data_ = convert(tf.TensorHandle, data_) + segment_ids_ = convert(tf.TensorHandle, segment_ids_) + num_segments_ = convert(tf.TensorHandle, num_segments_) tf.add_input(desc, data_) tf.add_input(desc, segment_ids_) tf.add_input(desc, num_segments_) @@ -33167,6 +34625,11 @@ begin end function parse_example_eager(serialized_, names_, sparse_keys_, dense_keys_, dense_defaults_; name=nothing, Nsparse=nothing, Ndense=nothing, sparse_types=nothing, Tdense=nothing, dense_shapes=nothing) desc = tf.EagerOp("ParseExample") + serialized_ = convert(tf.TensorHandle, serialized_) + names_ = convert(tf.TensorHandle, names_) + sparse_keys_ = convert(tf.TensorHandle, sparse_keys_) + dense_keys_ = convert(tf.TensorHandle, dense_keys_) + dense_defaults_ = convert(tf.TensorHandle, dense_defaults_) tf.add_input(desc, serialized_) tf.add_input(desc, names_) tf.add_input(desc, sparse_keys_) @@ -33227,6 +34690,8 @@ begin end function queue_enqueue_v2_eager(handle_, components_; name=nothing, Tcomponents=nothing, timeout_ms=nothing) desc = tf.EagerOp("QueueEnqueueV2") + handle_ = convert(tf.TensorHandle, handle_) + components_ = convert(tf.TensorHandle, components_) tf.add_input(desc, handle_) tf.add_input(desc, components_) if Tcomponents !== nothing @@ -33277,6 +34742,9 @@ begin end function scatter_nd_add_eager(ref_, indices_, updates_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ScatterNdAdd") + ref_ = convert(tf.TensorHandle, ref_) + indices_ = convert(tf.TensorHandle, indices_) + updates_ = convert(tf.TensorHandle, updates_) tf.add_input(desc, ref_) tf.add_input(desc, indices_) tf.add_input(desc, updates_) @@ -33318,6 +34786,7 @@ begin end function reader_num_records_produced_v2_eager(reader_handle_; name=nothing) desc = tf.EagerOp("ReaderNumRecordsProducedV2") + reader_handle_ = convert(tf.TensorHandle, reader_handle_) tf.add_input(desc, reader_handle_) res = tf.execute(desc) node = tf.TapeNode(reader_num_records_produced_v2, [reader_handle_], name=nothing) @@ -33369,6 +34838,10 @@ begin end function load_tpu_embedding_centered_rms_prop_parameters_eager(parameters_, ms_, mom_, mg_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) desc = tf.EagerOp("LoadTPUEmbeddingCenteredRMSPropParameters") + parameters_ = convert(tf.TensorHandle, parameters_) + ms_ = convert(tf.TensorHandle, ms_) + mom_ = convert(tf.TensorHandle, mom_) + mg_ = convert(tf.TensorHandle, mg_) tf.add_input(desc, parameters_) tf.add_input(desc, ms_) tf.add_input(desc, mom_) @@ -33423,6 +34896,8 @@ begin end function assign_sub_eager(ref_, value_; name=nothing, use_locking=nothing) desc = tf.EagerOp("AssignSub") + ref_ = convert(tf.TensorHandle, ref_) + value_ = convert(tf.TensorHandle, value_) tf.add_input(desc, ref_) tf.add_input(desc, value_) if use_locking !== nothing @@ -33470,6 +34945,9 @@ begin end function unsorted_segment_sum_eager(data_, segment_ids_, num_segments_; name=nothing) desc = tf.EagerOp("UnsortedSegmentSum") + data_ = convert(tf.TensorHandle, data_) + segment_ids_ = convert(tf.TensorHandle, segment_ids_) + num_segments_ = convert(tf.TensorHandle, num_segments_) tf.add_input(desc, data_) tf.add_input(desc, segment_ids_) tf.add_input(desc, num_segments_) @@ -33492,7 +34970,7 @@ end """ - fused_batch_norm_grad(y_backprop, x, scale, reserve_space_1, reserve_space_2; epsilon=?, data_format=, is_training=true) + fused_batch_norm_grad(y_backprop, x, scale, reserve_space_1, reserve_space_2; epsilon=?, data_format=NHWC, is_training=true) """ @@ -33531,6 +35009,11 @@ begin end function fused_batch_norm_grad_eager(y_backprop_, x_, scale_, reserve_space_1_, reserve_space_2_; name=nothing, epsilon=nothing, data_format=nothing, is_training=nothing) desc = tf.EagerOp("FusedBatchNormGrad") + y_backprop_ = convert(tf.TensorHandle, y_backprop_) + x_ = convert(tf.TensorHandle, x_) + scale_ = convert(tf.TensorHandle, scale_) + reserve_space_1_ = convert(tf.TensorHandle, reserve_space_1_) + reserve_space_2_ = convert(tf.TensorHandle, reserve_space_2_) tf.add_input(desc, y_backprop_) tf.add_input(desc, x_) tf.add_input(desc, scale_) @@ -33566,7 +35049,7 @@ end """ - max_pool_grad_v2(orig_input, orig_output, grad, ksize, strides; data_format=) + max_pool_grad_v2(orig_input, orig_output, grad, ksize, strides; data_format=NHWC) """ @@ -33597,6 +35080,11 @@ begin end function max_pool_grad_v2_eager(orig_input_, orig_output_, grad_, ksize_, strides_; name=nothing, padding=nothing, data_format=nothing) desc = tf.EagerOp("MaxPoolGradV2") + orig_input_ = convert(tf.TensorHandle, orig_input_) + orig_output_ = convert(tf.TensorHandle, orig_output_) + grad_ = convert(tf.TensorHandle, grad_) + ksize_ = convert(tf.TensorHandle, ksize_) + strides_ = convert(tf.TensorHandle, strides_) tf.add_input(desc, orig_input_) tf.add_input(desc, orig_output_) tf.add_input(desc, grad_) @@ -33674,6 +35162,13 @@ begin end function quantized_conv2d_with_bias_and_relu_eager(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) desc = tf.EagerOp("QuantizedConv2DWithBiasAndRelu") + input_ = convert(tf.TensorHandle, input_) + filter_ = convert(tf.TensorHandle, filter_) + bias_ = convert(tf.TensorHandle, bias_) + min_input_ = convert(tf.TensorHandle, min_input_) + max_input_ = convert(tf.TensorHandle, max_input_) + min_filter_ = convert(tf.TensorHandle, min_filter_) + max_filter_ = convert(tf.TensorHandle, max_filter_) tf.add_input(desc, input_) tf.add_input(desc, filter_) tf.add_input(desc, bias_) @@ -33731,6 +35226,9 @@ begin end function boosted_trees_create_ensemble_eager(tree_ensemble_handle_, stamp_token_, tree_ensemble_serialized_; name=nothing) desc = tf.EagerOp("BoostedTreesCreateEnsemble") + tree_ensemble_handle_ = convert(tf.TensorHandle, tree_ensemble_handle_) + stamp_token_ = convert(tf.TensorHandle, stamp_token_) + tree_ensemble_serialized_ = convert(tf.TensorHandle, tree_ensemble_serialized_) tf.add_input(desc, tree_ensemble_handle_) tf.add_input(desc, stamp_token_) tf.add_input(desc, tree_ensemble_serialized_) @@ -33899,6 +35397,8 @@ begin end function arg_min_eager(input_, dimension_; name=nothing, output_type=nothing) desc = tf.EagerOp("ArgMin") + input_ = convert(tf.TensorHandle, input_) + dimension_ = convert(tf.TensorHandle, dimension_) tf.add_input(desc, input_) tf.add_input(desc, dimension_) if output_type !== nothing @@ -33946,6 +35446,8 @@ begin end function queue_dequeue_many_eager(handle_, n_; name=nothing, component_types=nothing, timeout_ms=nothing) desc = tf.EagerOp("QueueDequeueMany") + handle_ = convert(tf.TensorHandle, handle_) + n_ = convert(tf.TensorHandle, n_) tf.add_input(desc, handle_) tf.add_input(desc, n_) if component_types !== nothing @@ -33991,6 +35493,7 @@ begin end function boosted_trees_serialize_ensemble_eager(tree_ensemble_handle_; name=nothing) desc = tf.EagerOp("BoostedTreesSerializeEnsemble") + tree_ensemble_handle_ = convert(tf.TensorHandle, tree_ensemble_handle_) tf.add_input(desc, tree_ensemble_handle_) res = tf.execute(desc) node = tf.TapeNode(boosted_trees_serialize_ensemble, [tree_ensemble_handle_], name=nothing) @@ -34027,6 +35530,8 @@ begin end function minimum_eager(x_, y_; name=nothing) desc = tf.EagerOp("Minimum") + x_ = convert(tf.TensorHandle, x_) + y_ = convert(tf.TensorHandle, y_) tf.add_input(desc, x_) tf.add_input(desc, y_) desc["T"] = tf.data_type(x_) @@ -34047,7 +35552,7 @@ end """ - substr(input, pos, len; unit=) + substr(input, pos, len; unit=BYTE) """ @@ -34071,6 +35576,9 @@ begin end function substr_eager(input_, pos_, len_; name=nothing, unit=nothing) desc = tf.EagerOp("Substr") + input_ = convert(tf.TensorHandle, input_) + pos_ = convert(tf.TensorHandle, pos_) + len_ = convert(tf.TensorHandle, len_) tf.add_input(desc, input_) tf.add_input(desc, pos_) tf.add_input(desc, len_) @@ -34111,6 +35619,7 @@ begin end function queue_size_eager(handle_; name=nothing) desc = tf.EagerOp("QueueSize") + handle_ = convert(tf.TensorHandle, handle_) tf.add_input(desc, handle_) res = tf.execute(desc) node = tf.TapeNode(queue_size, [handle_], name=nothing) @@ -34164,6 +35673,15 @@ begin end function apply_ftrl_v2_eager(var_, accum_, linear_, grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ApplyFtrlV2") + var_ = convert(tf.TensorHandle, var_) + accum_ = convert(tf.TensorHandle, accum_) + linear_ = convert(tf.TensorHandle, linear_) + grad_ = convert(tf.TensorHandle, grad_) + lr_ = convert(tf.TensorHandle, lr_) + l1_ = convert(tf.TensorHandle, l1_) + l2_ = convert(tf.TensorHandle, l2_) + l2_shrinkage_ = convert(tf.TensorHandle, l2_shrinkage_) + lr_power_ = convert(tf.TensorHandle, lr_power_) tf.add_input(desc, var_) tf.add_input(desc, accum_) tf.add_input(desc, linear_) @@ -34231,6 +35749,8 @@ begin end function load_tpu_embedding_momentum_parameters_eager(parameters_, momenta_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) desc = tf.EagerOp("LoadTPUEmbeddingMomentumParameters") + parameters_ = convert(tf.TensorHandle, parameters_) + momenta_ = convert(tf.TensorHandle, momenta_) tf.add_input(desc, parameters_) tf.add_input(desc, momenta_) if table_id !== nothing @@ -34284,6 +35804,9 @@ begin end function sparse_segment_mean_eager(data_, indices_, segment_ids_; name=nothing) desc = tf.EagerOp("SparseSegmentMean") + data_ = convert(tf.TensorHandle, data_) + indices_ = convert(tf.TensorHandle, indices_) + segment_ids_ = convert(tf.TensorHandle, segment_ids_) tf.add_input(desc, data_) tf.add_input(desc, indices_) tf.add_input(desc, segment_ids_) @@ -34335,6 +35858,12 @@ begin end function resource_apply_proximal_adagrad_eager(var_, accum_, lr_, l1_, l2_, grad_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ResourceApplyProximalAdagrad") + var_ = convert(tf.TensorHandle, var_) + accum_ = convert(tf.TensorHandle, accum_) + lr_ = convert(tf.TensorHandle, lr_) + l1_ = convert(tf.TensorHandle, l1_) + l2_ = convert(tf.TensorHandle, l2_) + grad_ = convert(tf.TensorHandle, grad_) tf.add_input(desc, var_) tf.add_input(desc, accum_) tf.add_input(desc, lr_) @@ -34390,6 +35919,9 @@ begin end function tensor_array_gather_v2_eager(handle_, indices_, flow_in_; name=nothing, dtype=nothing, element_shape=nothing) desc = tf.EagerOp("TensorArrayGatherV2") + handle_ = convert(tf.TensorHandle, handle_) + indices_ = convert(tf.TensorHandle, indices_) + flow_in_ = convert(tf.TensorHandle, flow_in_) tf.add_input(desc, handle_) tf.add_input(desc, indices_) tf.add_input(desc, flow_in_) @@ -34434,6 +35966,8 @@ begin end function less_eager(x_, y_; name=nothing) desc = tf.EagerOp("Less") + x_ = convert(tf.TensorHandle, x_) + y_ = convert(tf.TensorHandle, y_) tf.add_input(desc, x_) tf.add_input(desc, y_) desc["T"] = tf.data_type(x_) @@ -34518,6 +36052,8 @@ begin end function upper_bound_eager(sorted_inputs_, values_; name=nothing, out_type=nothing) desc = tf.EagerOp("UpperBound") + sorted_inputs_ = convert(tf.TensorHandle, sorted_inputs_) + values_ = convert(tf.TensorHandle, values_) tf.add_input(desc, sorted_inputs_) tf.add_input(desc, values_) if out_type !== nothing @@ -34564,6 +36100,9 @@ begin end function tensor_list_get_item_eager(input_handle_, index_, element_shape_; name=nothing, element_dtype=nothing) desc = tf.EagerOp("TensorListGetItem") + input_handle_ = convert(tf.TensorHandle, input_handle_) + index_ = convert(tf.TensorHandle, index_) + element_shape_ = convert(tf.TensorHandle, element_shape_) tf.add_input(desc, input_handle_) tf.add_input(desc, index_) tf.add_input(desc, element_shape_) @@ -34612,6 +36151,9 @@ begin end function fake_quant_with_min_max_vars_eager(inputs_, min_, max_; name=nothing, num_bits=nothing, narrow_range=nothing) desc = tf.EagerOp("FakeQuantWithMinMaxVars") + inputs_ = convert(tf.TensorHandle, inputs_) + min_ = convert(tf.TensorHandle, min_) + max_ = convert(tf.TensorHandle, max_) tf.add_input(desc, inputs_) tf.add_input(desc, min_) tf.add_input(desc, max_) @@ -34653,6 +36195,7 @@ begin end function is_boosted_trees_quantile_stream_resource_initialized_eager(quantile_stream_resource_handle_; name=nothing) desc = tf.EagerOp("IsBoostedTreesQuantileStreamResourceInitialized") + quantile_stream_resource_handle_ = convert(tf.TensorHandle, quantile_stream_resource_handle_) tf.add_input(desc, quantile_stream_resource_handle_) res = tf.execute(desc) node = tf.TapeNode(is_boosted_trees_quantile_stream_resource_initialized, [quantile_stream_resource_handle_], name=nothing) @@ -34695,6 +36238,9 @@ begin end function reader_read_up_to_v2_eager(reader_handle_, queue_handle_, num_records_; name=nothing) desc = tf.EagerOp("ReaderReadUpToV2") + reader_handle_ = convert(tf.TensorHandle, reader_handle_) + queue_handle_ = convert(tf.TensorHandle, queue_handle_) + num_records_ = convert(tf.TensorHandle, num_records_) tf.add_input(desc, reader_handle_) tf.add_input(desc, queue_handle_) tf.add_input(desc, num_records_) @@ -34733,6 +36279,8 @@ begin end function complex_eager(real_, imag_; name=nothing) desc = tf.EagerOp("Complex") + real_ = convert(tf.TensorHandle, real_) + imag_ = convert(tf.TensorHandle, imag_) tf.add_input(desc, real_) tf.add_input(desc, imag_) desc["T"] = tf.data_type(real_) @@ -34778,6 +36326,8 @@ begin end function tensor_list_reserve_eager(element_shape_, num_elements_; name=nothing, element_dtype=nothing, shape_type=nothing) desc = tf.EagerOp("TensorListReserve") + element_shape_ = convert(tf.TensorHandle, element_shape_) + num_elements_ = convert(tf.TensorHandle, num_elements_) tf.add_input(desc, element_shape_) tf.add_input(desc, num_elements_) if element_dtype !== nothing @@ -34823,6 +36373,7 @@ begin end function bitcast_eager(input_; name=nothing, type_=nothing) desc = tf.EagerOp("Bitcast") + input_ = convert(tf.TensorHandle, input_) tf.add_input(desc, input_) if type_ !== nothing desc["type"] = Base.identity(type_) @@ -34963,6 +36514,21 @@ begin end function quantized_batch_norm_with_global_normalization_eager(t_, t_min_, t_max_, m_, m_min_, m_max_, v_, v_min_, v_max_, beta_, beta_min_, beta_max_, gamma_, gamma_min_, gamma_max_; name=nothing, out_type=nothing, variance_epsilon=nothing, scale_after_normalization=nothing) desc = tf.EagerOp("QuantizedBatchNormWithGlobalNormalization") + t_ = convert(tf.TensorHandle, t_) + t_min_ = convert(tf.TensorHandle, t_min_) + t_max_ = convert(tf.TensorHandle, t_max_) + m_ = convert(tf.TensorHandle, m_) + m_min_ = convert(tf.TensorHandle, m_min_) + m_max_ = convert(tf.TensorHandle, m_max_) + v_ = convert(tf.TensorHandle, v_) + v_min_ = convert(tf.TensorHandle, v_min_) + v_max_ = convert(tf.TensorHandle, v_max_) + beta_ = convert(tf.TensorHandle, beta_) + beta_min_ = convert(tf.TensorHandle, beta_min_) + beta_max_ = convert(tf.TensorHandle, beta_max_) + gamma_ = convert(tf.TensorHandle, gamma_) + gamma_min_ = convert(tf.TensorHandle, gamma_min_) + gamma_max_ = convert(tf.TensorHandle, gamma_max_) tf.add_input(desc, t_) tf.add_input(desc, t_min_) tf.add_input(desc, t_max_) @@ -35025,6 +36591,7 @@ begin end function cos_eager(x_; name=nothing) desc = tf.EagerOp("Cos") + x_ = convert(tf.TensorHandle, x_) tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) res = tf.execute(desc) @@ -35072,6 +36639,9 @@ begin end function quantize_down_and_shrink_range_eager(input_, input_min_, input_max_; name=nothing, out_type=nothing) desc = tf.EagerOp("QuantizeDownAndShrinkRange") + input_ = convert(tf.TensorHandle, input_) + input_min_ = convert(tf.TensorHandle, input_min_) + input_max_ = convert(tf.TensorHandle, input_max_) tf.add_input(desc, input_) tf.add_input(desc, input_min_) tf.add_input(desc, input_max_) @@ -35119,6 +36689,8 @@ begin end function experimental_random_dataset_eager(seed_, seed2_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("ExperimentalRandomDataset") + seed_ = convert(tf.TensorHandle, seed_) + seed2_ = convert(tf.TensorHandle, seed2_) tf.add_input(desc, seed_) tf.add_input(desc, seed2_) if output_types !== nothing @@ -35172,6 +36744,9 @@ begin end function rpc_eager(address_, method_, request_; name=nothing, protocol=nothing, fail_fast=nothing, timeout_in_ms=nothing) desc = tf.EagerOp("Rpc") + address_ = convert(tf.TensorHandle, address_) + method_ = convert(tf.TensorHandle, method_) + request_ = convert(tf.TensorHandle, request_) tf.add_input(desc, address_) tf.add_input(desc, method_) tf.add_input(desc, request_) @@ -35259,6 +36834,18 @@ begin end function quantized_conv2d_with_bias_signed_sum_and_relu_and_requantize_eager(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_, summand_, min_summand_, max_summand_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) desc = tf.EagerOp("QuantizedConv2DWithBiasSignedSumAndReluAndRequantize") + input_ = convert(tf.TensorHandle, input_) + filter_ = convert(tf.TensorHandle, filter_) + bias_ = convert(tf.TensorHandle, bias_) + min_input_ = convert(tf.TensorHandle, min_input_) + max_input_ = convert(tf.TensorHandle, max_input_) + min_filter_ = convert(tf.TensorHandle, min_filter_) + max_filter_ = convert(tf.TensorHandle, max_filter_) + min_freezed_output_ = convert(tf.TensorHandle, min_freezed_output_) + max_freezed_output_ = convert(tf.TensorHandle, max_freezed_output_) + summand_ = convert(tf.TensorHandle, summand_) + min_summand_ = convert(tf.TensorHandle, min_summand_) + max_summand_ = convert(tf.TensorHandle, max_summand_) tf.add_input(desc, input_) tf.add_input(desc, filter_) tf.add_input(desc, bias_) @@ -35319,6 +36906,7 @@ begin end function tensor_list_length_eager(input_handle_; name=nothing) desc = tf.EagerOp("TensorListLength") + input_handle_ = convert(tf.TensorHandle, input_handle_) tf.add_input(desc, input_handle_) res = tf.execute(desc) node = tf.TapeNode(tensor_list_length, [input_handle_], name=nothing) @@ -35421,6 +37009,7 @@ begin end function stateless_while_eager(input_; name=nothing, T=nothing, cond=nothing, body=nothing) desc = tf.EagerOp("StatelessWhile") + input_ = convert(tf.TensorHandle, input_) tf.add_input(desc, input_) if T !== nothing desc["T"] = map(Base.identity, T) @@ -35447,7 +37036,7 @@ end """ - sparse_conditional_accumulator(; container=, shared_name=, reduction_type=) + sparse_conditional_accumulator(; container=, shared_name=, reduction_type=MEAN) """ @@ -35528,6 +37117,8 @@ begin end function segment_min_eager(data_, segment_ids_; name=nothing) desc = tf.EagerOp("SegmentMin") + data_ = convert(tf.TensorHandle, data_) + segment_ids_ = convert(tf.TensorHandle, segment_ids_) tf.add_input(desc, data_) tf.add_input(desc, segment_ids_) desc["T"] = tf.data_type(data_) @@ -35568,6 +37159,9 @@ begin end function write_graph_summary_eager(writer_, step_, tensor_; name=nothing) desc = tf.EagerOp("WriteGraphSummary") + writer_ = convert(tf.TensorHandle, writer_) + step_ = convert(tf.TensorHandle, step_) + tensor_ = convert(tf.TensorHandle, tensor_) tf.add_input(desc, writer_) tf.add_input(desc, step_) tf.add_input(desc, tensor_) @@ -35606,6 +37200,8 @@ begin end function cholesky_grad_eager(l_, grad_; name=nothing) desc = tf.EagerOp("CholeskyGrad") + l_ = convert(tf.TensorHandle, l_) + grad_ = convert(tf.TensorHandle, grad_) tf.add_input(desc, l_) tf.add_input(desc, grad_) desc["T"] = tf.data_type(l_) @@ -35665,6 +37261,7 @@ begin end function log_uniform_candidate_sampler_eager(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing) desc = tf.EagerOp("LogUniformCandidateSampler") + true_classes_ = convert(tf.TensorHandle, true_classes_) tf.add_input(desc, true_classes_) if num_true !== nothing desc["num_true"] = Base.Int(num_true) @@ -35724,6 +37321,9 @@ begin end function serialize_sparse_eager(sparse_indices_, sparse_values_, sparse_shape_; name=nothing, out_type=nothing) desc = tf.EagerOp("SerializeSparse") + sparse_indices_ = convert(tf.TensorHandle, sparse_indices_) + sparse_values_ = convert(tf.TensorHandle, sparse_values_) + sparse_shape_ = convert(tf.TensorHandle, sparse_shape_) tf.add_input(desc, sparse_indices_) tf.add_input(desc, sparse_values_) tf.add_input(desc, sparse_shape_) @@ -35770,6 +37370,9 @@ begin end function scatter_nd_non_aliasing_add_eager(input_, indices_, updates_; name=nothing) desc = tf.EagerOp("ScatterNdNonAliasingAdd") + input_ = convert(tf.TensorHandle, input_) + indices_ = convert(tf.TensorHandle, indices_) + updates_ = convert(tf.TensorHandle, updates_) tf.add_input(desc, input_) tf.add_input(desc, indices_) tf.add_input(desc, updates_) @@ -35817,6 +37420,7 @@ begin end function ref_merge_eager(inputs_; name=nothing, N=nothing) desc = tf.EagerOp("RefMerge") + inputs_ = convert(tf.TensorHandle, inputs_) tf.add_input(desc, inputs_) if N !== nothing desc["N"] = Base.Int(N) @@ -35865,6 +37469,7 @@ begin end function tensor_list_concat_eager(input_handle_; name=nothing, element_dtype=nothing, element_shape=nothing) desc = tf.EagerOp("TensorListConcat") + input_handle_ = convert(tf.TensorHandle, input_handle_) tf.add_input(desc, input_handle_) if element_dtype !== nothing desc["element_dtype"] = Base.identity(element_dtype) @@ -35888,7 +37493,7 @@ end """ - cudnn_rnn_canonical_to_params(num_layers, num_units, input_size, weights, biases; rnn_mode=, input_mode=, direction=, dropout=?, seed=0, seed2=0) + cudnn_rnn_canonical_to_params(num_layers, num_units, input_size, weights, biases; rnn_mode=lstm, input_mode=linear_input, direction=unidirectional, dropout=?, seed=0, seed2=0) """ @@ -35934,6 +37539,11 @@ begin end function cudnn_rnn_canonical_to_params_eager(num_layers_, num_units_, input_size_, weights_, biases_; name=nothing, num_params=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) desc = tf.EagerOp("CudnnRNNCanonicalToParams") + num_layers_ = convert(tf.TensorHandle, num_layers_) + num_units_ = convert(tf.TensorHandle, num_units_) + input_size_ = convert(tf.TensorHandle, input_size_) + weights_ = convert(tf.TensorHandle, weights_) + biases_ = convert(tf.TensorHandle, biases_) tf.add_input(desc, num_layers_) tf.add_input(desc, num_units_) tf.add_input(desc, input_size_) @@ -36014,6 +37624,14 @@ begin end function sparse_apply_adadelta_eager(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) desc = tf.EagerOp("SparseApplyAdadelta") + var_ = convert(tf.TensorHandle, var_) + accum_ = convert(tf.TensorHandle, accum_) + accum_update_ = convert(tf.TensorHandle, accum_update_) + lr_ = convert(tf.TensorHandle, lr_) + rho_ = convert(tf.TensorHandle, rho_) + epsilon_ = convert(tf.TensorHandle, epsilon_) + grad_ = convert(tf.TensorHandle, grad_) + indices_ = convert(tf.TensorHandle, indices_) tf.add_input(desc, var_) tf.add_input(desc, accum_) tf.add_input(desc, accum_update_) @@ -36065,6 +37683,7 @@ begin end function tensor_array_close_eager(handle_; name=nothing) desc = tf.EagerOp("TensorArrayClose") + handle_ = convert(tf.TensorHandle, handle_) tf.add_input(desc, handle_) res = tf.execute(desc) node = tf.TapeNode(tensor_array_close, [handle_], name=nothing) @@ -36101,6 +37720,8 @@ begin end function selu_grad_eager(gradients_, outputs_; name=nothing) desc = tf.EagerOp("SeluGrad") + gradients_ = convert(tf.TensorHandle, gradients_) + outputs_ = convert(tf.TensorHandle, outputs_) tf.add_input(desc, gradients_) tf.add_input(desc, outputs_) desc["T"] = tf.data_type(gradients_) @@ -36121,7 +37742,7 @@ end """ - crop_and_resize_grad_image(grads, boxes, box_ind, image_size; method=) + crop_and_resize_grad_image(grads, boxes, box_ind, image_size; method=bilinear) """ @@ -36146,6 +37767,10 @@ begin end function crop_and_resize_grad_image_eager(grads_, boxes_, box_ind_, image_size_; name=nothing, method=nothing) desc = tf.EagerOp("CropAndResizeGradImage") + grads_ = convert(tf.TensorHandle, grads_) + boxes_ = convert(tf.TensorHandle, boxes_) + box_ind_ = convert(tf.TensorHandle, box_ind_) + image_size_ = convert(tf.TensorHandle, image_size_) tf.add_input(desc, grads_) tf.add_input(desc, boxes_) tf.add_input(desc, box_ind_) @@ -36187,6 +37812,8 @@ begin end function rfft_eager(input_, fft_length_; name=nothing) desc = tf.EagerOp("RFFT") + input_ = convert(tf.TensorHandle, input_) + fft_length_ = convert(tf.TensorHandle, fft_length_) tf.add_input(desc, input_) tf.add_input(desc, fft_length_) res = tf.execute(desc) @@ -36231,6 +37858,9 @@ begin end function experimental_sql_dataset_eager(driver_name_, data_source_name_, query_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("ExperimentalSqlDataset") + driver_name_ = convert(tf.TensorHandle, driver_name_) + data_source_name_ = convert(tf.TensorHandle, data_source_name_) + query_ = convert(tf.TensorHandle, query_) tf.add_input(desc, driver_name_) tf.add_input(desc, data_source_name_) tf.add_input(desc, query_) @@ -36288,6 +37918,13 @@ begin end function resource_apply_power_sign_eager(var_, m_, lr_, logbase_, sign_decay_, beta_, grad_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ResourceApplyPowerSign") + var_ = convert(tf.TensorHandle, var_) + m_ = convert(tf.TensorHandle, m_) + lr_ = convert(tf.TensorHandle, lr_) + logbase_ = convert(tf.TensorHandle, logbase_) + sign_decay_ = convert(tf.TensorHandle, sign_decay_) + beta_ = convert(tf.TensorHandle, beta_) + grad_ = convert(tf.TensorHandle, grad_) tf.add_input(desc, var_) tf.add_input(desc, m_) tf.add_input(desc, lr_) @@ -36336,6 +37973,7 @@ begin end function matrix_determinant_eager(input_; name=nothing) desc = tf.EagerOp("MatrixDeterminant") + input_ = convert(tf.TensorHandle, input_) tf.add_input(desc, input_) desc["T"] = tf.data_type(input_) res = tf.execute(desc) @@ -36379,6 +38017,7 @@ begin end function static_regex_replace_eager(input_; name=nothing, pattern=nothing, rewrite=nothing, replace_global=nothing) desc = tf.EagerOp("StaticRegexReplace") + input_ = convert(tf.TensorHandle, input_) tf.add_input(desc, input_) if pattern !== nothing desc["pattern"] = Base.String(pattern) @@ -36405,7 +38044,7 @@ end """ - avg_pool(value; data_format=) + avg_pool(value; data_format=NHWC) """ @@ -36434,6 +38073,7 @@ begin end function avg_pool_eager(value_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) desc = tf.EagerOp("AvgPool") + value_ = convert(tf.TensorHandle, value_) tf.add_input(desc, value_) if ksize !== nothing desc["ksize"] = map(Base.identity, ksize) @@ -36487,6 +38127,10 @@ begin end function sparse_dense_cwise_add_eager(sp_indices_, sp_values_, sp_shape_, dense_; name=nothing) desc = tf.EagerOp("SparseDenseCwiseAdd") + sp_indices_ = convert(tf.TensorHandle, sp_indices_) + sp_values_ = convert(tf.TensorHandle, sp_values_) + sp_shape_ = convert(tf.TensorHandle, sp_shape_) + dense_ = convert(tf.TensorHandle, dense_) tf.add_input(desc, sp_indices_) tf.add_input(desc, sp_values_) tf.add_input(desc, sp_shape_) @@ -36528,6 +38172,8 @@ begin end function bias_add_v1_eager(value_, bias_; name=nothing) desc = tf.EagerOp("BiasAddV1") + value_ = convert(tf.TensorHandle, value_) + bias_ = convert(tf.TensorHandle, bias_) tf.add_input(desc, value_) tf.add_input(desc, bias_) desc["T"] = tf.data_type(value_) @@ -36565,6 +38211,7 @@ begin end function invert_permutation_eager(x_; name=nothing) desc = tf.EagerOp("InvertPermutation") + x_ = convert(tf.TensorHandle, x_) tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) res = tf.execute(desc) @@ -36678,6 +38325,12 @@ begin end function sparse_apply_momentum_eager(var_, accum_, lr_, grad_, indices_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) desc = tf.EagerOp("SparseApplyMomentum") + var_ = convert(tf.TensorHandle, var_) + accum_ = convert(tf.TensorHandle, accum_) + lr_ = convert(tf.TensorHandle, lr_) + grad_ = convert(tf.TensorHandle, grad_) + indices_ = convert(tf.TensorHandle, indices_) + momentum_ = convert(tf.TensorHandle, momentum_) tf.add_input(desc, var_) tf.add_input(desc, accum_) tf.add_input(desc, lr_) @@ -36741,6 +38394,7 @@ begin end function infeed_enqueue_eager(input_; name=nothing, dtype=nothing, shape=nothing, layout=nothing, device_ordinal=nothing) desc = tf.EagerOp("InfeedEnqueue") + input_ = convert(tf.TensorHandle, input_) tf.add_input(desc, input_) if dtype !== nothing desc["dtype"] = Base.identity(dtype) @@ -36799,6 +38453,10 @@ begin end function stateless_random_uniform_int_eager(shape_, seed_, minval_, maxval_; name=nothing, dtype=nothing) desc = tf.EagerOp("StatelessRandomUniformInt") + shape_ = convert(tf.TensorHandle, shape_) + seed_ = convert(tf.TensorHandle, seed_) + minval_ = convert(tf.TensorHandle, minval_) + maxval_ = convert(tf.TensorHandle, maxval_) tf.add_input(desc, shape_) tf.add_input(desc, seed_) tf.add_input(desc, minval_) @@ -36860,6 +38518,10 @@ begin end function load_tpu_embedding_adadelta_parameters_grad_accum_debug_eager(parameters_, accumulators_, updates_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) desc = tf.EagerOp("LoadTPUEmbeddingAdadeltaParametersGradAccumDebug") + parameters_ = convert(tf.TensorHandle, parameters_) + accumulators_ = convert(tf.TensorHandle, accumulators_) + updates_ = convert(tf.TensorHandle, updates_) + gradient_accumulators_ = convert(tf.TensorHandle, gradient_accumulators_) tf.add_input(desc, parameters_) tf.add_input(desc, accumulators_) tf.add_input(desc, updates_) @@ -36924,6 +38586,7 @@ begin end function _send_eager(tensor_; name=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing) desc = tf.EagerOp("_Send") + tensor_ = convert(tf.TensorHandle, tensor_) tf.add_input(desc, tensor_) if tensor_name !== nothing desc["tensor_name"] = Base.String(tensor_name) @@ -36990,6 +38653,8 @@ begin end function map_peek_eager(key_, indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) desc = tf.EagerOp("MapPeek") + key_ = convert(tf.TensorHandle, key_) + indices_ = convert(tf.TensorHandle, indices_) tf.add_input(desc, key_) tf.add_input(desc, indices_) if capacity !== nothing @@ -37046,6 +38711,10 @@ begin end function write_scalar_summary_eager(writer_, step_, tag_, value_; name=nothing) desc = tf.EagerOp("WriteScalarSummary") + writer_ = convert(tf.TensorHandle, writer_) + step_ = convert(tf.TensorHandle, step_) + tag_ = convert(tf.TensorHandle, tag_) + value_ = convert(tf.TensorHandle, value_) tf.add_input(desc, writer_) tf.add_input(desc, step_) tf.add_input(desc, tag_) @@ -37103,6 +38772,7 @@ begin end function ordered_map_unstage_no_key_eager(indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) desc = tf.EagerOp("OrderedMapUnstageNoKey") + indices_ = convert(tf.TensorHandle, indices_) tf.add_input(desc, indices_) if capacity !== nothing desc["capacity"] = Base.Int(capacity) @@ -37175,6 +38845,16 @@ begin end function sparse_apply_centered_rms_prop_eager(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) desc = tf.EagerOp("SparseApplyCenteredRMSProp") + var_ = convert(tf.TensorHandle, var_) + mg_ = convert(tf.TensorHandle, mg_) + ms_ = convert(tf.TensorHandle, ms_) + mom_ = convert(tf.TensorHandle, mom_) + lr_ = convert(tf.TensorHandle, lr_) + rho_ = convert(tf.TensorHandle, rho_) + momentum_ = convert(tf.TensorHandle, momentum_) + epsilon_ = convert(tf.TensorHandle, epsilon_) + grad_ = convert(tf.TensorHandle, grad_) + indices_ = convert(tf.TensorHandle, indices_) tf.add_input(desc, var_) tf.add_input(desc, mg_) tf.add_input(desc, ms_) @@ -37244,6 +38924,10 @@ begin end function tensor_list_scatter_v2_eager(tensor_, indices_, element_shape_, num_elements_; name=nothing, element_dtype=nothing, shape_type=nothing) desc = tf.EagerOp("TensorListScatterV2") + tensor_ = convert(tf.TensorHandle, tensor_) + indices_ = convert(tf.TensorHandle, indices_) + element_shape_ = convert(tf.TensorHandle, element_shape_) + num_elements_ = convert(tf.TensorHandle, num_elements_) tf.add_input(desc, tensor_) tf.add_input(desc, indices_) tf.add_input(desc, element_shape_) @@ -37272,7 +38956,7 @@ end """ - conv3d_backprop_input_v2(input_sizes, filter, out_backprop; data_format=, dilations=[1, 1, 1, 1, 1]) + conv3d_backprop_input_v2(input_sizes, filter, out_backprop; data_format=NDHWC, dilations=[1, 1, 1, 1, 1]) """ @@ -37306,6 +38990,9 @@ begin end function conv3d_backprop_input_v2_eager(input_sizes_, filter_, out_backprop_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) desc = tf.EagerOp("Conv3DBackpropInputV2") + input_sizes_ = convert(tf.TensorHandle, input_sizes_) + filter_ = convert(tf.TensorHandle, filter_) + out_backprop_ = convert(tf.TensorHandle, out_backprop_) tf.add_input(desc, input_sizes_) tf.add_input(desc, filter_) tf.add_input(desc, out_backprop_) @@ -37422,6 +39109,7 @@ begin end function random_shuffle_eager(value_; name=nothing, seed=nothing, seed2=nothing) desc = tf.EagerOp("RandomShuffle") + value_ = convert(tf.TensorHandle, value_) tf.add_input(desc, value_) if seed !== nothing desc["seed"] = Base.Int(seed) @@ -37485,6 +39173,7 @@ begin end function uniform_candidate_sampler_eager(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing) desc = tf.EagerOp("UniformCandidateSampler") + true_classes_ = convert(tf.TensorHandle, true_classes_) tf.add_input(desc, true_classes_) if num_true !== nothing desc["num_true"] = Base.Int(num_true) @@ -37543,6 +39232,10 @@ begin end function tensor_array_split_v2_eager(handle_, value_, lengths_, flow_in_; name=nothing) desc = tf.EagerOp("TensorArraySplitV2") + handle_ = convert(tf.TensorHandle, handle_) + value_ = convert(tf.TensorHandle, value_) + lengths_ = convert(tf.TensorHandle, lengths_) + flow_in_ = convert(tf.TensorHandle, flow_in_) tf.add_input(desc, handle_) tf.add_input(desc, value_) tf.add_input(desc, lengths_) @@ -37607,6 +39300,8 @@ begin end function mutable_dense_hash_table_v2_eager(empty_key_, deleted_key_; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing, initial_num_buckets=nothing, max_load_factor=nothing) desc = tf.EagerOp("MutableDenseHashTableV2") + empty_key_ = convert(tf.TensorHandle, empty_key_) + deleted_key_ = convert(tf.TensorHandle, deleted_key_) tf.add_input(desc, empty_key_) tf.add_input(desc, deleted_key_) if container !== nothing @@ -37670,6 +39365,8 @@ begin end function draw_bounding_boxes_eager(images_, boxes_; name=nothing) desc = tf.EagerOp("DrawBoundingBoxes") + images_ = convert(tf.TensorHandle, images_) + boxes_ = convert(tf.TensorHandle, boxes_) tf.add_input(desc, images_) tf.add_input(desc, boxes_) desc["T"] = tf.data_type(images_) @@ -37723,6 +39420,13 @@ begin end function sparse_apply_proximal_adagrad_eager(var_, accum_, lr_, l1_, l2_, grad_, indices_; name=nothing, use_locking=nothing) desc = tf.EagerOp("SparseApplyProximalAdagrad") + var_ = convert(tf.TensorHandle, var_) + accum_ = convert(tf.TensorHandle, accum_) + lr_ = convert(tf.TensorHandle, lr_) + l1_ = convert(tf.TensorHandle, l1_) + l2_ = convert(tf.TensorHandle, l2_) + grad_ = convert(tf.TensorHandle, grad_) + indices_ = convert(tf.TensorHandle, indices_) tf.add_input(desc, var_) tf.add_input(desc, accum_) tf.add_input(desc, lr_) @@ -37782,6 +39486,9 @@ begin end function range_dataset_eager(start_, stop_, step_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("RangeDataset") + start_ = convert(tf.TensorHandle, start_) + stop_ = convert(tf.TensorHandle, stop_) + step_ = convert(tf.TensorHandle, step_) tf.add_input(desc, start_) tf.add_input(desc, stop_) tf.add_input(desc, step_) @@ -37825,6 +39532,8 @@ begin end function reader_restore_state_v2_eager(reader_handle_, state_; name=nothing) desc = tf.EagerOp("ReaderRestoreStateV2") + reader_handle_ = convert(tf.TensorHandle, reader_handle_) + state_ = convert(tf.TensorHandle, state_) tf.add_input(desc, reader_handle_) tf.add_input(desc, state_) res = tf.execute(desc) @@ -37870,6 +39579,8 @@ begin end function top_kv2_eager(input_, k_; name=nothing, sorted=nothing) desc = tf.EagerOp("TopKV2") + input_ = convert(tf.TensorHandle, input_) + k_ = convert(tf.TensorHandle, k_) tf.add_input(desc, input_) tf.add_input(desc, k_) if sorted !== nothing @@ -37909,6 +39620,7 @@ begin end function atanh_eager(x_; name=nothing) desc = tf.EagerOp("Atanh") + x_ = convert(tf.TensorHandle, x_) tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) res = tf.execute(desc) @@ -37944,6 +39656,7 @@ begin end function debug_gradient_identity_eager(input_; name=nothing) desc = tf.EagerOp("DebugGradientIdentity") + input_ = convert(tf.TensorHandle, input_) tf.add_input(desc, input_) desc["T"] = tf.data_type(input_) res = tf.execute(desc) @@ -37990,6 +39703,10 @@ begin end function sparse_add_grad_eager(backprop_val_grad_, a_indices_, b_indices_, sum_indices_; name=nothing) desc = tf.EagerOp("SparseAddGrad") + backprop_val_grad_ = convert(tf.TensorHandle, backprop_val_grad_) + a_indices_ = convert(tf.TensorHandle, a_indices_) + b_indices_ = convert(tf.TensorHandle, b_indices_) + sum_indices_ = convert(tf.TensorHandle, sum_indices_) tf.add_input(desc, backprop_val_grad_) tf.add_input(desc, a_indices_) tf.add_input(desc, b_indices_) @@ -38037,6 +39754,9 @@ begin end function resource_scatter_add_eager(resource_, indices_, updates_; name=nothing, dtype=nothing) desc = tf.EagerOp("ResourceScatterAdd") + resource_ = convert(tf.TensorHandle, resource_) + indices_ = convert(tf.TensorHandle, indices_) + updates_ = convert(tf.TensorHandle, updates_) tf.add_input(desc, resource_) tf.add_input(desc, indices_) tf.add_input(desc, updates_) @@ -38078,6 +39798,7 @@ begin end function ceil_eager(x_; name=nothing) desc = tf.EagerOp("Ceil") + x_ = convert(tf.TensorHandle, x_) tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) res = tf.execute(desc) @@ -38119,6 +39840,9 @@ begin end function save_eager(filename_, tensor_names_, data_; name=nothing, T=nothing) desc = tf.EagerOp("Save") + filename_ = convert(tf.TensorHandle, filename_) + tensor_names_ = convert(tf.TensorHandle, tensor_names_) + data_ = convert(tf.TensorHandle, data_) tf.add_input(desc, filename_) tf.add_input(desc, tensor_names_) tf.add_input(desc, data_) @@ -38231,6 +39955,10 @@ begin end function quantized_concat_eager(concat_dim_, values_, input_mins_, input_maxes_; name=nothing, N=nothing) desc = tf.EagerOp("QuantizedConcat") + concat_dim_ = convert(tf.TensorHandle, concat_dim_) + values_ = convert(tf.TensorHandle, values_) + input_mins_ = convert(tf.TensorHandle, input_mins_) + input_maxes_ = convert(tf.TensorHandle, input_maxes_) tf.add_input(desc, concat_dim_) tf.add_input(desc, values_) tf.add_input(desc, input_mins_) @@ -38272,6 +40000,7 @@ begin end function zeros_like_eager(x_; name=nothing) desc = tf.EagerOp("ZerosLike") + x_ = convert(tf.TensorHandle, x_) tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) res = tf.execute(desc) @@ -38330,6 +40059,7 @@ begin end function fractional_avg_pool_eager(value_; name=nothing, pooling_ratio=nothing, pseudo_random=nothing, overlapping=nothing, deterministic=nothing, seed=nothing, seed2=nothing) desc = tf.EagerOp("FractionalAvgPool") + value_ = convert(tf.TensorHandle, value_) tf.add_input(desc, value_) if pooling_ratio !== nothing desc["pooling_ratio"] = map(Base.identity, pooling_ratio) @@ -38396,6 +40126,12 @@ begin end function edit_distance_eager(hypothesis_indices_, hypothesis_values_, hypothesis_shape_, truth_indices_, truth_values_, truth_shape_; name=nothing, normalize=nothing) desc = tf.EagerOp("EditDistance") + hypothesis_indices_ = convert(tf.TensorHandle, hypothesis_indices_) + hypothesis_values_ = convert(tf.TensorHandle, hypothesis_values_) + hypothesis_shape_ = convert(tf.TensorHandle, hypothesis_shape_) + truth_indices_ = convert(tf.TensorHandle, truth_indices_) + truth_values_ = convert(tf.TensorHandle, truth_values_) + truth_shape_ = convert(tf.TensorHandle, truth_shape_) tf.add_input(desc, hypothesis_indices_) tf.add_input(desc, hypothesis_values_) tf.add_input(desc, hypothesis_shape_) @@ -38451,6 +40187,8 @@ begin end function unique_v2_eager(x_, axis_; name=nothing, out_idx=nothing) desc = tf.EagerOp("UniqueV2") + x_ = convert(tf.TensorHandle, x_) + axis_ = convert(tf.TensorHandle, axis_) tf.add_input(desc, x_) tf.add_input(desc, axis_) if out_idx !== nothing @@ -38474,7 +40212,7 @@ end """ - quantize_and_dequantize_v2(input, input_min, input_max; signed_input=true, num_bits=8, range_given=false, round_mode=) + quantize_and_dequantize_v2(input, input_min, input_max; signed_input=true, num_bits=8, range_given=false, round_mode=HALF_TO_EVEN) """ @@ -38507,6 +40245,9 @@ begin end function quantize_and_dequantize_v2_eager(input_, input_min_, input_max_; name=nothing, signed_input=nothing, num_bits=nothing, range_given=nothing, round_mode=nothing) desc = tf.EagerOp("QuantizeAndDequantizeV2") + input_ = convert(tf.TensorHandle, input_) + input_min_ = convert(tf.TensorHandle, input_min_) + input_max_ = convert(tf.TensorHandle, input_max_) tf.add_input(desc, input_) tf.add_input(desc, input_min_) tf.add_input(desc, input_max_) @@ -38573,6 +40314,7 @@ begin end function quantize_and_dequantize_eager(input_; name=nothing, signed_input=nothing, num_bits=nothing, range_given=nothing, input_min=nothing, input_max=nothing) desc = tf.EagerOp("QuantizeAndDequantize") + input_ = convert(tf.TensorHandle, input_) tf.add_input(desc, input_) if signed_input !== nothing desc["signed_input"] = Base.Bool(signed_input) @@ -38632,6 +40374,8 @@ begin end function tensor_list_pop_back_eager(input_handle_, element_shape_; name=nothing, element_dtype=nothing) desc = tf.EagerOp("TensorListPopBack") + input_handle_ = convert(tf.TensorHandle, input_handle_) + element_shape_ = convert(tf.TensorHandle, element_shape_) tf.add_input(desc, input_handle_) tf.add_input(desc, element_shape_) if element_dtype !== nothing @@ -38682,6 +40426,7 @@ begin end function debug_nan_count_eager(input_; name=nothing, device_name=nothing, tensor_name=nothing, debug_urls=nothing, gated_grpc=nothing) desc = tf.EagerOp("DebugNanCount") + input_ = convert(tf.TensorHandle, input_) tf.add_input(desc, input_) if device_name !== nothing desc["device_name"] = Base.String(device_name) @@ -38746,6 +40491,14 @@ begin end function apply_adagrad_da_eager(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, lr_, l1_, l2_, global_step_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ApplyAdagradDA") + var_ = convert(tf.TensorHandle, var_) + gradient_accumulator_ = convert(tf.TensorHandle, gradient_accumulator_) + gradient_squared_accumulator_ = convert(tf.TensorHandle, gradient_squared_accumulator_) + grad_ = convert(tf.TensorHandle, grad_) + lr_ = convert(tf.TensorHandle, lr_) + l1_ = convert(tf.TensorHandle, l1_) + l2_ = convert(tf.TensorHandle, l2_) + global_step_ = convert(tf.TensorHandle, global_step_) tf.add_input(desc, var_) tf.add_input(desc, gradient_accumulator_) tf.add_input(desc, gradient_squared_accumulator_) @@ -38780,7 +40533,7 @@ end """ - depthwise_conv2d_native(input, filter; data_format=, dilations=[1, 1, 1, 1]) + depthwise_conv2d_native(input, filter; data_format=NHWC, dilations=[1, 1, 1, 1]) """ @@ -38811,6 +40564,8 @@ begin end function depthwise_conv2d_native_eager(input_, filter_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) desc = tf.EagerOp("DepthwiseConv2dNative") + input_ = convert(tf.TensorHandle, input_) + filter_ = convert(tf.TensorHandle, filter_) tf.add_input(desc, input_) tf.add_input(desc, filter_) if strides !== nothing @@ -38859,6 +40614,7 @@ begin end function serialize_iterator_eager(resource_handle_; name=nothing) desc = tf.EagerOp("SerializeIterator") + resource_handle_ = convert(tf.TensorHandle, resource_handle_) tf.add_input(desc, resource_handle_) res = tf.execute(desc) node = tf.TapeNode(serialize_iterator, [resource_handle_], name=nothing) @@ -38892,6 +40648,7 @@ begin end function dataset_to_graph_eager(input_dataset_; name=nothing) desc = tf.EagerOp("DatasetToGraph") + input_dataset_ = convert(tf.TensorHandle, input_dataset_) tf.add_input(desc, input_dataset_) res = tf.execute(desc) node = tf.TapeNode(dataset_to_graph, [input_dataset_], name=nothing) @@ -38937,6 +40694,7 @@ begin end function top_k_eager(input_; name=nothing, k=nothing, sorted=nothing) desc = tf.EagerOp("TopK") + input_ = convert(tf.TensorHandle, input_) tf.add_input(desc, input_) if k !== nothing desc["k"] = Base.Int(k) @@ -38997,6 +40755,15 @@ begin end function resource_apply_ftrl_v2_eager(var_, accum_, linear_, grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ResourceApplyFtrlV2") + var_ = convert(tf.TensorHandle, var_) + accum_ = convert(tf.TensorHandle, accum_) + linear_ = convert(tf.TensorHandle, linear_) + grad_ = convert(tf.TensorHandle, grad_) + lr_ = convert(tf.TensorHandle, lr_) + l1_ = convert(tf.TensorHandle, l1_) + l2_ = convert(tf.TensorHandle, l2_) + l2_shrinkage_ = convert(tf.TensorHandle, l2_shrinkage_) + lr_power_ = convert(tf.TensorHandle, lr_power_) tf.add_input(desc, var_) tf.add_input(desc, accum_) tf.add_input(desc, linear_) @@ -39053,6 +40820,7 @@ begin end function _nccl_broadcast_recv_eager(shape_; name=nothing, num_devices=nothing, shared_name=nothing) desc = tf.EagerOp("_NcclBroadcastRecv") + shape_ = convert(tf.TensorHandle, shape_) tf.add_input(desc, shape_) if num_devices !== nothing desc["num_devices"] = Base.Int(num_devices) @@ -39092,6 +40860,7 @@ begin end function queue_is_closed_eager(handle_; name=nothing) desc = tf.EagerOp("QueueIsClosed") + handle_ = convert(tf.TensorHandle, handle_) tf.add_input(desc, handle_) res = tf.execute(desc) node = tf.TapeNode(queue_is_closed, [handle_], name=nothing) @@ -39140,6 +40909,10 @@ begin end function shuffle_dataset_eager(input_dataset_, buffer_size_, seed_, seed2_; name=nothing, reshuffle_each_iteration=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("ShuffleDataset") + input_dataset_ = convert(tf.TensorHandle, input_dataset_) + buffer_size_ = convert(tf.TensorHandle, buffer_size_) + seed_ = convert(tf.TensorHandle, seed_) + seed2_ = convert(tf.TensorHandle, seed2_) tf.add_input(desc, input_dataset_) tf.add_input(desc, buffer_size_) tf.add_input(desc, seed_) @@ -39194,6 +40967,7 @@ begin end function deserialize_sparse_eager(serialized_sparse_; name=nothing, dtype=nothing) desc = tf.EagerOp("DeserializeSparse") + serialized_sparse_ = convert(tf.TensorHandle, serialized_sparse_) tf.add_input(desc, serialized_sparse_) if dtype !== nothing desc["dtype"] = Base.identity(dtype) @@ -39337,6 +41111,7 @@ begin end function truncated_normal_eager(shape_; name=nothing, seed=nothing, seed2=nothing, dtype=nothing) desc = tf.EagerOp("TruncatedNormal") + shape_ = convert(tf.TensorHandle, shape_) tf.add_input(desc, shape_) if seed !== nothing desc["seed"] = Base.Int(seed) @@ -39385,6 +41160,8 @@ begin end function tensor_forest_tree_predict_eager(tree_handle_, dense_features_; name=nothing, logits_dimension=nothing) desc = tf.EagerOp("TensorForestTreePredict") + tree_handle_ = convert(tf.TensorHandle, tree_handle_) + dense_features_ = convert(tf.TensorHandle, dense_features_) tf.add_input(desc, tree_handle_) tf.add_input(desc, dense_features_) if logits_dimension !== nothing @@ -39428,6 +41205,7 @@ begin end function stack_v2_eager(max_size_; name=nothing, elem_type=nothing, stack_name=nothing) desc = tf.EagerOp("StackV2") + max_size_ = convert(tf.TensorHandle, max_size_) tf.add_input(desc, max_size_) if elem_type !== nothing desc["elem_type"] = Base.identity(elem_type) @@ -39467,6 +41245,7 @@ begin end function accumulator_num_accumulated_eager(handle_; name=nothing) desc = tf.EagerOp("AccumulatorNumAccumulated") + handle_ = convert(tf.TensorHandle, handle_) tf.add_input(desc, handle_) res = tf.execute(desc) node = tf.TapeNode(accumulator_num_accumulated, [handle_], name=nothing) @@ -39500,6 +41279,7 @@ begin end function reader_reset_v2_eager(reader_handle_; name=nothing) desc = tf.EagerOp("ReaderResetV2") + reader_handle_ = convert(tf.TensorHandle, reader_handle_) tf.add_input(desc, reader_handle_) res = tf.execute(desc) node = tf.TapeNode(reader_reset_v2, [reader_handle_], name=nothing) @@ -39549,6 +41329,13 @@ begin end function apply_add_sign_eager(var_, m_, lr_, alpha_, sign_decay_, beta_, grad_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ApplyAddSign") + var_ = convert(tf.TensorHandle, var_) + m_ = convert(tf.TensorHandle, m_) + lr_ = convert(tf.TensorHandle, lr_) + alpha_ = convert(tf.TensorHandle, alpha_) + sign_decay_ = convert(tf.TensorHandle, sign_decay_) + beta_ = convert(tf.TensorHandle, beta_) + grad_ = convert(tf.TensorHandle, grad_) tf.add_input(desc, var_) tf.add_input(desc, m_) tf.add_input(desc, lr_) @@ -39658,6 +41445,7 @@ begin end function rint_eager(x_; name=nothing) desc = tf.EagerOp("Rint") + x_ = convert(tf.TensorHandle, x_) tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) res = tf.execute(desc) @@ -39735,7 +41523,7 @@ end """ - extract_glimpse(input, size, offsets; centered=true, normalized=true, uniform_noise=true, noise=) + extract_glimpse(input, size, offsets; centered=true, normalized=true, uniform_noise=true, noise=uniform) """ @@ -39767,6 +41555,9 @@ begin end function extract_glimpse_eager(input_, size_, offsets_; name=nothing, centered=nothing, normalized=nothing, uniform_noise=nothing, noise=nothing) desc = tf.EagerOp("ExtractGlimpse") + input_ = convert(tf.TensorHandle, input_) + size_ = convert(tf.TensorHandle, size_) + offsets_ = convert(tf.TensorHandle, offsets_) tf.add_input(desc, input_) tf.add_input(desc, size_) tf.add_input(desc, offsets_) @@ -39820,6 +41611,7 @@ begin end function string_to_hash_bucket_strong_eager(input_; name=nothing, num_buckets=nothing, key=nothing) desc = tf.EagerOp("StringToHashBucketStrong") + input_ = convert(tf.TensorHandle, input_) tf.add_input(desc, input_) if num_buckets !== nothing desc["num_buckets"] = Base.Int(num_buckets) @@ -39938,6 +41730,12 @@ begin end function resource_sparse_apply_momentum_eager(var_, accum_, lr_, grad_, indices_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) desc = tf.EagerOp("ResourceSparseApplyMomentum") + var_ = convert(tf.TensorHandle, var_) + accum_ = convert(tf.TensorHandle, accum_) + lr_ = convert(tf.TensorHandle, lr_) + grad_ = convert(tf.TensorHandle, grad_) + indices_ = convert(tf.TensorHandle, indices_) + momentum_ = convert(tf.TensorHandle, momentum_) tf.add_input(desc, var_) tf.add_input(desc, accum_) tf.add_input(desc, lr_) @@ -39995,6 +41793,10 @@ begin end function save_slices_eager(filename_, tensor_names_, shapes_and_slices_, data_; name=nothing, T=nothing) desc = tf.EagerOp("SaveSlices") + filename_ = convert(tf.TensorHandle, filename_) + tensor_names_ = convert(tf.TensorHandle, tensor_names_) + shapes_and_slices_ = convert(tf.TensorHandle, shapes_and_slices_) + data_ = convert(tf.TensorHandle, data_) tf.add_input(desc, filename_) tf.add_input(desc, tensor_names_) tf.add_input(desc, shapes_and_slices_) @@ -40034,6 +41836,7 @@ begin end function experimental_dataset_cardinality_eager(input_dataset_; name=nothing) desc = tf.EagerOp("ExperimentalDatasetCardinality") + input_dataset_ = convert(tf.TensorHandle, input_dataset_) tf.add_input(desc, input_dataset_) res = tf.execute(desc) node = tf.TapeNode(experimental_dataset_cardinality, [input_dataset_], name=nothing) @@ -40068,6 +41871,7 @@ begin end function is_finite_eager(x_; name=nothing) desc = tf.EagerOp("IsFinite") + x_ = convert(tf.TensorHandle, x_) tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) res = tf.execute(desc) @@ -40125,6 +41929,11 @@ begin end function experimental_numa_map_and_batch_dataset_eager(input_dataset_, other_arguments_, batch_size_, num_parallel_calls_, drop_remainder_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, preserve_cardinality=nothing) desc = tf.EagerOp("ExperimentalNumaMapAndBatchDataset") + input_dataset_ = convert(tf.TensorHandle, input_dataset_) + other_arguments_ = convert(tf.TensorHandle, other_arguments_) + batch_size_ = convert(tf.TensorHandle, batch_size_) + num_parallel_calls_ = convert(tf.TensorHandle, num_parallel_calls_) + drop_remainder_ = convert(tf.TensorHandle, drop_remainder_) tf.add_input(desc, input_dataset_) tf.add_input(desc, other_arguments_) tf.add_input(desc, batch_size_) @@ -40189,6 +41998,8 @@ begin end function all_to_all_eager(input_, group_assignment_; name=nothing, concat_dimension=nothing, split_dimension=nothing, split_count=nothing) desc = tf.EagerOp("AllToAll") + input_ = convert(tf.TensorHandle, input_) + group_assignment_ = convert(tf.TensorHandle, group_assignment_) tf.add_input(desc, input_) tf.add_input(desc, group_assignment_) if concat_dimension !== nothing @@ -40247,6 +42058,7 @@ begin end function take_many_sparse_from_tensors_map_eager(sparse_handles_; name=nothing, dtype=nothing, container=nothing, shared_name=nothing) desc = tf.EagerOp("TakeManySparseFromTensorsMap") + sparse_handles_ = convert(tf.TensorHandle, sparse_handles_) tf.add_input(desc, sparse_handles_) if dtype !== nothing desc["dtype"] = Base.identity(dtype) @@ -40290,6 +42102,7 @@ begin end function batch_matrix_diag_part_eager(input_; name=nothing) desc = tf.EagerOp("BatchMatrixDiagPart") + input_ = convert(tf.TensorHandle, input_) tf.add_input(desc, input_) desc["T"] = tf.data_type(input_) res = tf.execute(desc) @@ -40332,6 +42145,11 @@ begin end function fixed_length_record_dataset_eager(filenames_, header_bytes_, record_bytes_, footer_bytes_, buffer_size_; name=nothing) desc = tf.EagerOp("FixedLengthRecordDataset") + filenames_ = convert(tf.TensorHandle, filenames_) + header_bytes_ = convert(tf.TensorHandle, header_bytes_) + record_bytes_ = convert(tf.TensorHandle, record_bytes_) + footer_bytes_ = convert(tf.TensorHandle, footer_bytes_) + buffer_size_ = convert(tf.TensorHandle, buffer_size_) tf.add_input(desc, filenames_) tf.add_input(desc, header_bytes_) tf.add_input(desc, record_bytes_) @@ -40375,6 +42193,8 @@ begin end function stack_push_eager(handle_, elem_; name=nothing, swap_memory=nothing) desc = tf.EagerOp("StackPush") + handle_ = convert(tf.TensorHandle, handle_) + elem_ = convert(tf.TensorHandle, elem_) tf.add_input(desc, handle_) tf.add_input(desc, elem_) if swap_memory !== nothing @@ -40459,6 +42279,9 @@ begin end function multi_device_iterator_init_eager(dataset_, multi_device_iterator_, max_buffer_size_; name=nothing) desc = tf.EagerOp("MultiDeviceIteratorInit") + dataset_ = convert(tf.TensorHandle, dataset_) + multi_device_iterator_ = convert(tf.TensorHandle, multi_device_iterator_) + max_buffer_size_ = convert(tf.TensorHandle, max_buffer_size_) tf.add_input(desc, dataset_) tf.add_input(desc, multi_device_iterator_) tf.add_input(desc, max_buffer_size_) @@ -40498,6 +42321,9 @@ begin end function gcs_configure_block_cache_eager(max_cache_size_, block_size_, max_staleness_; name=nothing) desc = tf.EagerOp("GcsConfigureBlockCache") + max_cache_size_ = convert(tf.TensorHandle, max_cache_size_) + block_size_ = convert(tf.TensorHandle, block_size_) + max_staleness_ = convert(tf.TensorHandle, max_staleness_) tf.add_input(desc, max_cache_size_) tf.add_input(desc, block_size_) tf.add_input(desc, max_staleness_) @@ -40539,6 +42365,7 @@ begin end function queue_dequeue_v2_eager(handle_; name=nothing, component_types=nothing, timeout_ms=nothing) desc = tf.EagerOp("QueueDequeueV2") + handle_ = convert(tf.TensorHandle, handle_) tf.add_input(desc, handle_) if component_types !== nothing desc["component_types"] = map(Base.identity, component_types) @@ -40641,6 +42468,8 @@ begin end function transpose_eager(x_, perm_; name=nothing) desc = tf.EagerOp("Transpose") + x_ = convert(tf.TensorHandle, x_) + perm_ = convert(tf.TensorHandle, perm_) tf.add_input(desc, x_) tf.add_input(desc, perm_) desc["T"] = tf.data_type(x_) @@ -40678,6 +42507,7 @@ begin end function ifft_eager(input_; name=nothing) desc = tf.EagerOp("IFFT") + input_ = convert(tf.TensorHandle, input_) tf.add_input(desc, input_) desc["Tcomplex"] = tf.data_type(input_) res = tf.execute(desc) @@ -40722,6 +42552,10 @@ begin end function sparse_segment_sum_with_num_segments_eager(data_, indices_, segment_ids_, num_segments_; name=nothing) desc = tf.EagerOp("SparseSegmentSumWithNumSegments") + data_ = convert(tf.TensorHandle, data_) + indices_ = convert(tf.TensorHandle, indices_) + segment_ids_ = convert(tf.TensorHandle, segment_ids_) + num_segments_ = convert(tf.TensorHandle, num_segments_) tf.add_input(desc, data_) tf.add_input(desc, indices_) tf.add_input(desc, segment_ids_) @@ -40761,6 +42595,7 @@ begin end function queue_is_closed_v2_eager(handle_; name=nothing) desc = tf.EagerOp("QueueIsClosedV2") + handle_ = convert(tf.TensorHandle, handle_) tf.add_input(desc, handle_) res = tf.execute(desc) node = tf.TapeNode(queue_is_closed_v2, [handle_], name=nothing) @@ -40813,6 +42648,11 @@ begin end function parameterized_truncated_normal_eager(shape_, means_, stdevs_, minvals_, maxvals_; name=nothing, seed=nothing, seed2=nothing, dtype=nothing) desc = tf.EagerOp("ParameterizedTruncatedNormal") + shape_ = convert(tf.TensorHandle, shape_) + means_ = convert(tf.TensorHandle, means_) + stdevs_ = convert(tf.TensorHandle, stdevs_) + minvals_ = convert(tf.TensorHandle, minvals_) + maxvals_ = convert(tf.TensorHandle, maxvals_) tf.add_input(desc, shape_) tf.add_input(desc, means_) tf.add_input(desc, stdevs_) @@ -40865,6 +42705,7 @@ begin end function diag_part_eager(input_; name=nothing) desc = tf.EagerOp("DiagPart") + input_ = convert(tf.TensorHandle, input_) tf.add_input(desc, input_) desc["T"] = tf.data_type(input_) res = tf.execute(desc) @@ -40905,6 +42746,10 @@ begin end function kmeans_plus_plus_initialization_eager(points_, num_to_sample_, seed_, num_retries_per_sample_; name=nothing) desc = tf.EagerOp("KmeansPlusPlusInitialization") + points_ = convert(tf.TensorHandle, points_) + num_to_sample_ = convert(tf.TensorHandle, num_to_sample_) + seed_ = convert(tf.TensorHandle, seed_) + num_retries_per_sample_ = convert(tf.TensorHandle, num_retries_per_sample_) tf.add_input(desc, points_) tf.add_input(desc, num_to_sample_) tf.add_input(desc, seed_) @@ -40948,6 +42793,9 @@ begin end function regex_replace_eager(input_, pattern_, rewrite_; name=nothing, replace_global=nothing) desc = tf.EagerOp("RegexReplace") + input_ = convert(tf.TensorHandle, input_) + pattern_ = convert(tf.TensorHandle, pattern_) + rewrite_ = convert(tf.TensorHandle, rewrite_) tf.add_input(desc, input_) tf.add_input(desc, pattern_) tf.add_input(desc, rewrite_) @@ -41001,6 +42849,10 @@ begin end function sparse_tensor_dense_mat_mul_eager(a_indices_, a_values_, a_shape_, b_; name=nothing, adjoint_a=nothing, adjoint_b=nothing) desc = tf.EagerOp("SparseTensorDenseMatMul") + a_indices_ = convert(tf.TensorHandle, a_indices_) + a_values_ = convert(tf.TensorHandle, a_values_) + a_shape_ = convert(tf.TensorHandle, a_shape_) + b_ = convert(tf.TensorHandle, b_) tf.add_input(desc, a_indices_) tf.add_input(desc, a_values_) tf.add_input(desc, a_shape_) @@ -41063,6 +42915,8 @@ begin end function map_defun_eager(arguments_, captured_inputs_; name=nothing, Targuments=nothing, Tcaptured=nothing, output_types=nothing, output_shapes=nothing, f=nothing) desc = tf.EagerOp("MapDefun") + arguments_ = convert(tf.TensorHandle, arguments_) + captured_inputs_ = convert(tf.TensorHandle, captured_inputs_) tf.add_input(desc, arguments_) tf.add_input(desc, captured_inputs_) if Targuments !== nothing @@ -41135,6 +42989,7 @@ begin end function thread_unsafe_unigram_candidate_sampler_eager(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing) desc = tf.EagerOp("ThreadUnsafeUnigramCandidateSampler") + true_classes_ = convert(tf.TensorHandle, true_classes_) tf.add_input(desc, true_classes_) if num_true !== nothing desc["num_true"] = Base.Int(num_true) @@ -41252,6 +43107,7 @@ begin end function parallel_concat_eager(values_; name=nothing, N=nothing, shape=nothing) desc = tf.EagerOp("ParallelConcat") + values_ = convert(tf.TensorHandle, values_) tf.add_input(desc, values_) if N !== nothing desc["N"] = Base.Int(N) @@ -41298,6 +43154,9 @@ begin end function lookup_table_find_v2_eager(table_handle_, keys_, default_value_; name=nothing) desc = tf.EagerOp("LookupTableFindV2") + table_handle_ = convert(tf.TensorHandle, table_handle_) + keys_ = convert(tf.TensorHandle, keys_) + default_value_ = convert(tf.TensorHandle, default_value_) tf.add_input(desc, table_handle_) tf.add_input(desc, keys_) tf.add_input(desc, default_value_) @@ -41337,6 +43196,8 @@ begin end function tensor_forest_tree_deserialize_eager(tree_handle_, tree_config_; name=nothing) desc = tf.EagerOp("TensorForestTreeDeserialize") + tree_handle_ = convert(tf.TensorHandle, tree_handle_) + tree_config_ = convert(tf.TensorHandle, tree_config_) tf.add_input(desc, tree_handle_) tf.add_input(desc, tree_config_) res = tf.execute(desc) @@ -41442,6 +43303,7 @@ begin end function fake_quant_with_min_max_args_eager(inputs_; name=nothing, min=nothing, max=nothing, num_bits=nothing, narrow_range=nothing) desc = tf.EagerOp("FakeQuantWithMinMaxArgs") + inputs_ = convert(tf.TensorHandle, inputs_) tf.add_input(desc, inputs_) if min !== nothing desc["min"] = Base.identity(min) @@ -41495,6 +43357,9 @@ begin end function resource_apply_gradient_descent_eager(var_, alpha_, delta_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ResourceApplyGradientDescent") + var_ = convert(tf.TensorHandle, var_) + alpha_ = convert(tf.TensorHandle, alpha_) + delta_ = convert(tf.TensorHandle, delta_) tf.add_input(desc, var_) tf.add_input(desc, alpha_) tf.add_input(desc, delta_) @@ -41547,6 +43412,10 @@ begin end function experimental_sliding_window_dataset_eager(input_dataset_, window_size_, window_shift_, window_stride_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("ExperimentalSlidingWindowDataset") + input_dataset_ = convert(tf.TensorHandle, input_dataset_) + window_size_ = convert(tf.TensorHandle, window_size_) + window_shift_ = convert(tf.TensorHandle, window_shift_) + window_stride_ = convert(tf.TensorHandle, window_stride_) tf.add_input(desc, input_dataset_) tf.add_input(desc, window_size_) tf.add_input(desc, window_shift_) @@ -41595,6 +43464,7 @@ begin end function decode_raw_eager(bytes_; name=nothing, out_type=nothing, little_endian=nothing) desc = tf.EagerOp("DecodeRaw") + bytes_ = convert(tf.TensorHandle, bytes_) tf.add_input(desc, bytes_) if out_type !== nothing desc["out_type"] = Base.identity(out_type) @@ -41651,6 +43521,10 @@ begin end function fake_quant_with_min_max_vars_per_channel_gradient_eager(gradients_, inputs_, min_, max_; name=nothing, num_bits=nothing, narrow_range=nothing) desc = tf.EagerOp("FakeQuantWithMinMaxVarsPerChannelGradient") + gradients_ = convert(tf.TensorHandle, gradients_) + inputs_ = convert(tf.TensorHandle, inputs_) + min_ = convert(tf.TensorHandle, min_) + max_ = convert(tf.TensorHandle, max_) tf.add_input(desc, gradients_) tf.add_input(desc, inputs_) tf.add_input(desc, min_) @@ -41705,6 +43579,8 @@ begin end function unique_with_counts_v2_eager(x_, axis_; name=nothing, out_idx=nothing) desc = tf.EagerOp("UniqueWithCountsV2") + x_ = convert(tf.TensorHandle, x_) + axis_ = convert(tf.TensorHandle, axis_) tf.add_input(desc, x_) tf.add_input(desc, axis_) if out_idx !== nothing @@ -41752,6 +43628,8 @@ begin end function experimental_sleep_dataset_eager(input_dataset_, sleep_microseconds_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("ExperimentalSleepDataset") + input_dataset_ = convert(tf.TensorHandle, input_dataset_) + sleep_microseconds_ = convert(tf.TensorHandle, sleep_microseconds_) tf.add_input(desc, input_dataset_) tf.add_input(desc, sleep_microseconds_) if output_types !== nothing @@ -41801,6 +43679,7 @@ begin end function tpu_replicated_output_eager(input_; name=nothing, num_replicas=nothing) desc = tf.EagerOp("TPUReplicatedOutput") + input_ = convert(tf.TensorHandle, input_) tf.add_input(desc, input_) if num_replicas !== nothing desc["num_replicas"] = Base.Int(num_replicas) @@ -41844,6 +43723,8 @@ begin end function lower_bound_eager(sorted_inputs_, values_; name=nothing, out_type=nothing) desc = tf.EagerOp("LowerBound") + sorted_inputs_ = convert(tf.TensorHandle, sorted_inputs_) + values_ = convert(tf.TensorHandle, values_) tf.add_input(desc, sorted_inputs_) tf.add_input(desc, values_) if out_type !== nothing @@ -41884,6 +43765,7 @@ begin end function tan_eager(x_; name=nothing) desc = tf.EagerOp("Tan") + x_ = convert(tf.TensorHandle, x_) tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) res = tf.execute(desc) @@ -41928,6 +43810,7 @@ begin end function enter_eager(data_; name=nothing, frame_name=nothing, is_constant=nothing, parallel_iterations=nothing) desc = tf.EagerOp("Enter") + data_ = convert(tf.TensorHandle, data_) tf.add_input(desc, data_) if frame_name !== nothing desc["frame_name"] = Base.String(frame_name) @@ -41983,6 +43866,7 @@ begin end function infeed_enqueue_tuple_eager(inputs_; name=nothing, dtypes=nothing, shapes=nothing, layouts=nothing, device_ordinal=nothing) desc = tf.EagerOp("InfeedEnqueueTuple") + inputs_ = convert(tf.TensorHandle, inputs_) tf.add_input(desc, inputs_) if dtypes !== nothing desc["dtypes"] = map(Base.identity, dtypes) @@ -42028,6 +43912,7 @@ begin end function _set_global_tpu_array_eager(topology_; name=nothing) desc = tf.EagerOp("_SetGlobalTPUArray") + topology_ = convert(tf.TensorHandle, topology_) tf.add_input(desc, topology_) res = tf.execute(desc) node = tf.TapeNode(_set_global_tpu_array, [topology_], name=nothing) @@ -42062,6 +43947,7 @@ begin end function square_eager(x_; name=nothing) desc = tf.EagerOp("Square") + x_ = convert(tf.TensorHandle, x_) tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) res = tf.execute(desc) @@ -42097,6 +43983,7 @@ begin end function debug_gradient_ref_identity_eager(input_; name=nothing) desc = tf.EagerOp("DebugGradientRefIdentity") + input_ = convert(tf.TensorHandle, input_) tf.add_input(desc, input_) desc["T"] = tf.data_type(input_) res = tf.execute(desc) @@ -42147,6 +44034,13 @@ begin end function apply_adadelta_eager(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ApplyAdadelta") + var_ = convert(tf.TensorHandle, var_) + accum_ = convert(tf.TensorHandle, accum_) + accum_update_ = convert(tf.TensorHandle, accum_update_) + lr_ = convert(tf.TensorHandle, lr_) + rho_ = convert(tf.TensorHandle, rho_) + epsilon_ = convert(tf.TensorHandle, epsilon_) + grad_ = convert(tf.TensorHandle, grad_) tf.add_input(desc, var_) tf.add_input(desc, accum_) tf.add_input(desc, accum_update_) @@ -42226,6 +44120,10 @@ begin end function experimental_group_by_window_dataset_eager(input_dataset_, key_func_other_arguments_, reduce_func_other_arguments_, window_size_func_other_arguments_; name=nothing, key_func=nothing, reduce_func=nothing, window_size_func=nothing, Tkey_func_other_arguments=nothing, Treduce_func_other_arguments=nothing, Twindow_size_func_other_arguments=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("ExperimentalGroupByWindowDataset") + input_dataset_ = convert(tf.TensorHandle, input_dataset_) + key_func_other_arguments_ = convert(tf.TensorHandle, key_func_other_arguments_) + reduce_func_other_arguments_ = convert(tf.TensorHandle, reduce_func_other_arguments_) + window_size_func_other_arguments_ = convert(tf.TensorHandle, window_size_func_other_arguments_) tf.add_input(desc, input_dataset_) tf.add_input(desc, key_func_other_arguments_) tf.add_input(desc, reduce_func_other_arguments_) @@ -42294,6 +44192,8 @@ begin end function audio_summary_eager(tag_, tensor_; name=nothing, sample_rate=nothing, max_outputs=nothing) desc = tf.EagerOp("AudioSummary") + tag_ = convert(tf.TensorHandle, tag_) + tensor_ = convert(tf.TensorHandle, tensor_) tf.add_input(desc, tag_) tf.add_input(desc, tensor_) if sample_rate !== nothing @@ -42337,6 +44237,8 @@ begin end function squared_difference_eager(x_, y_; name=nothing) desc = tf.EagerOp("SquaredDifference") + x_ = convert(tf.TensorHandle, x_) + y_ = convert(tf.TensorHandle, y_) tf.add_input(desc, x_) tf.add_input(desc, y_) desc["T"] = tf.data_type(x_) @@ -42387,6 +44289,8 @@ begin end function experimental_take_while_dataset_eager(input_dataset_, other_arguments_; name=nothing, predicate=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("ExperimentalTakeWhileDataset") + input_dataset_ = convert(tf.TensorHandle, input_dataset_) + other_arguments_ = convert(tf.TensorHandle, other_arguments_) tf.add_input(desc, input_dataset_) tf.add_input(desc, other_arguments_) if predicate !== nothing @@ -42443,6 +44347,9 @@ begin end function scatter_nd_update_eager(ref_, indices_, updates_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ScatterNdUpdate") + ref_ = convert(tf.TensorHandle, ref_) + indices_ = convert(tf.TensorHandle, indices_) + updates_ = convert(tf.TensorHandle, updates_) tf.add_input(desc, ref_) tf.add_input(desc, indices_) tf.add_input(desc, updates_) @@ -42490,6 +44397,8 @@ begin end function dynamic_stitch_eager(indices_, data_; name=nothing, N=nothing) desc = tf.EagerOp("DynamicStitch") + indices_ = convert(tf.TensorHandle, indices_) + data_ = convert(tf.TensorHandle, data_) tf.add_input(desc, indices_) tf.add_input(desc, data_) if N !== nothing @@ -42529,6 +44438,7 @@ begin end function ones_like_eager(x_; name=nothing) desc = tf.EagerOp("OnesLike") + x_ = convert(tf.TensorHandle, x_) tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) res = tf.execute(desc) @@ -42575,6 +44485,11 @@ begin end function fractional_max_pool_grad_eager(orig_input_, orig_output_, out_backprop_, row_pooling_sequence_, col_pooling_sequence_; name=nothing, overlapping=nothing) desc = tf.EagerOp("FractionalMaxPoolGrad") + orig_input_ = convert(tf.TensorHandle, orig_input_) + orig_output_ = convert(tf.TensorHandle, orig_output_) + out_backprop_ = convert(tf.TensorHandle, out_backprop_) + row_pooling_sequence_ = convert(tf.TensorHandle, row_pooling_sequence_) + col_pooling_sequence_ = convert(tf.TensorHandle, col_pooling_sequence_) tf.add_input(desc, orig_input_) tf.add_input(desc, orig_output_) tf.add_input(desc, out_backprop_) @@ -42629,6 +44544,8 @@ begin end function remote_call_eager(target_, args_; name=nothing, Tin=nothing, Tout=nothing, f=nothing) desc = tf.EagerOp("RemoteCall") + target_ = convert(tf.TensorHandle, target_) + args_ = convert(tf.TensorHandle, args_) tf.add_input(desc, target_) tf.add_input(desc, args_) if Tin !== nothing @@ -42680,6 +44597,8 @@ begin end function gather_eager(params_, indices_; name=nothing, validate_indices=nothing) desc = tf.EagerOp("Gather") + params_ = convert(tf.TensorHandle, params_) + indices_ = convert(tf.TensorHandle, indices_) tf.add_input(desc, params_) tf.add_input(desc, indices_) if validate_indices !== nothing @@ -42742,6 +44661,12 @@ begin end function quantized_mat_mul_eager(a_, b_, min_a_, max_a_, min_b_, max_b_; name=nothing, transpose_a=nothing, transpose_b=nothing) desc = tf.EagerOp("QuantizedMatMul") + a_ = convert(tf.TensorHandle, a_) + b_ = convert(tf.TensorHandle, b_) + min_a_ = convert(tf.TensorHandle, min_a_) + max_a_ = convert(tf.TensorHandle, max_a_) + min_b_ = convert(tf.TensorHandle, min_b_) + max_b_ = convert(tf.TensorHandle, max_b_) tf.add_input(desc, a_) tf.add_input(desc, b_) tf.add_input(desc, min_a_) @@ -42772,7 +44697,7 @@ end """ - unicode_decode_with_offsets(input; errors=, replacement_char=65533, replace_control_characters=false) + unicode_decode_with_offsets(input; errors=replace, replacement_char=65533, replace_control_characters=false) """ @@ -42805,6 +44730,7 @@ begin end function unicode_decode_with_offsets_eager(input_; name=nothing, input_encoding=nothing, errors=nothing, replacement_char=nothing, replace_control_characters=nothing) desc = tf.EagerOp("UnicodeDecodeWithOffsets") + input_ = convert(tf.TensorHandle, input_) tf.add_input(desc, input_) if input_encoding !== nothing desc["input_encoding"] = Base.String(input_encoding) @@ -42868,6 +44794,10 @@ begin end function enqueue_tpu_embedding_sparse_tensor_batch_eager(sample_indices_, embedding_indices_, aggregation_weights_, mode_override_; name=nothing, N=nothing, device_ordinal=nothing, combiners=nothing, table_ids=nothing) desc = tf.EagerOp("EnqueueTPUEmbeddingSparseTensorBatch") + sample_indices_ = convert(tf.TensorHandle, sample_indices_) + embedding_indices_ = convert(tf.TensorHandle, embedding_indices_) + aggregation_weights_ = convert(tf.TensorHandle, aggregation_weights_) + mode_override_ = convert(tf.TensorHandle, mode_override_) tf.add_input(desc, sample_indices_) tf.add_input(desc, embedding_indices_) tf.add_input(desc, aggregation_weights_) @@ -42924,6 +44854,9 @@ begin end function accumulator_apply_gradient_eager(handle_, local_step_, gradient_; name=nothing, dtype=nothing) desc = tf.EagerOp("AccumulatorApplyGradient") + handle_ = convert(tf.TensorHandle, handle_) + local_step_ = convert(tf.TensorHandle, local_step_) + gradient_ = convert(tf.TensorHandle, gradient_) tf.add_input(desc, handle_) tf.add_input(desc, local_step_) tf.add_input(desc, gradient_) @@ -42972,6 +44905,11 @@ begin end function write_summary_eager(writer_, step_, tensor_, tag_, summary_metadata_; name=nothing) desc = tf.EagerOp("WriteSummary") + writer_ = convert(tf.TensorHandle, writer_) + step_ = convert(tf.TensorHandle, step_) + tensor_ = convert(tf.TensorHandle, tensor_) + tag_ = convert(tf.TensorHandle, tag_) + summary_metadata_ = convert(tf.TensorHandle, summary_metadata_) tf.add_input(desc, writer_) tf.add_input(desc, step_) tf.add_input(desc, tensor_) @@ -43039,6 +44977,12 @@ begin end function quantized_conv2d_eager(input_, filter_, min_input_, max_input_, min_filter_, max_filter_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) desc = tf.EagerOp("QuantizedConv2D") + input_ = convert(tf.TensorHandle, input_) + filter_ = convert(tf.TensorHandle, filter_) + min_input_ = convert(tf.TensorHandle, min_input_) + max_input_ = convert(tf.TensorHandle, max_input_) + min_filter_ = convert(tf.TensorHandle, min_filter_) + max_filter_ = convert(tf.TensorHandle, max_filter_) tf.add_input(desc, input_) tf.add_input(desc, filter_) tf.add_input(desc, min_input_) @@ -43106,6 +45050,11 @@ begin end function resource_apply_momentum_eager(var_, accum_, lr_, grad_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) desc = tf.EagerOp("ResourceApplyMomentum") + var_ = convert(tf.TensorHandle, var_) + accum_ = convert(tf.TensorHandle, accum_) + lr_ = convert(tf.TensorHandle, lr_) + grad_ = convert(tf.TensorHandle, grad_) + momentum_ = convert(tf.TensorHandle, momentum_) tf.add_input(desc, var_) tf.add_input(desc, accum_) tf.add_input(desc, lr_) @@ -43153,6 +45102,7 @@ begin end function log1p_eager(x_; name=nothing) desc = tf.EagerOp("Log1p") + x_ = convert(tf.TensorHandle, x_) tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) res = tf.execute(desc) @@ -43257,6 +45207,9 @@ begin end function resource_scatter_update_eager(resource_, indices_, updates_; name=nothing, dtype=nothing) desc = tf.EagerOp("ResourceScatterUpdate") + resource_ = convert(tf.TensorHandle, resource_) + indices_ = convert(tf.TensorHandle, indices_) + updates_ = convert(tf.TensorHandle, updates_) tf.add_input(desc, resource_) tf.add_input(desc, indices_) tf.add_input(desc, updates_) @@ -43316,6 +45269,8 @@ begin end function barrier_take_many_eager(handle_, num_elements_; name=nothing, component_types=nothing, allow_small_batch=nothing, wait_for_incomplete=nothing, timeout_ms=nothing) desc = tf.EagerOp("BarrierTakeMany") + handle_ = convert(tf.TensorHandle, handle_) + num_elements_ = convert(tf.TensorHandle, num_elements_) tf.add_input(desc, handle_) tf.add_input(desc, num_elements_) if component_types !== nothing @@ -43377,6 +45332,11 @@ begin end function resource_apply_keras_momentum_eager(var_, accum_, lr_, grad_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) desc = tf.EagerOp("ResourceApplyKerasMomentum") + var_ = convert(tf.TensorHandle, var_) + accum_ = convert(tf.TensorHandle, accum_) + lr_ = convert(tf.TensorHandle, lr_) + grad_ = convert(tf.TensorHandle, grad_) + momentum_ = convert(tf.TensorHandle, momentum_) tf.add_input(desc, var_) tf.add_input(desc, accum_) tf.add_input(desc, lr_) @@ -43504,6 +45464,7 @@ begin end function _xla_recv_at_host_eager(dynamic_key_; name=nothing, Toutputs=nothing, key=nothing, device_ordinal=nothing) desc = tf.EagerOp("_XlaRecvAtHost") + dynamic_key_ = convert(tf.TensorHandle, dynamic_key_) tf.add_input(desc, dynamic_key_) if Toutputs !== nothing desc["Toutputs"] = map(Base.identity, Toutputs) @@ -43565,6 +45526,9 @@ begin end function quantized_avg_pool_eager(input_, min_input_, max_input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing) desc = tf.EagerOp("QuantizedAvgPool") + input_ = convert(tf.TensorHandle, input_) + min_input_ = convert(tf.TensorHandle, min_input_) + max_input_ = convert(tf.TensorHandle, max_input_) tf.add_input(desc, input_) tf.add_input(desc, min_input_) tf.add_input(desc, max_input_) @@ -43634,6 +45598,17 @@ begin end function resource_apply_adam_with_amsgrad_eager(var_, m_, v_, vhat_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ResourceApplyAdamWithAmsgrad") + var_ = convert(tf.TensorHandle, var_) + m_ = convert(tf.TensorHandle, m_) + v_ = convert(tf.TensorHandle, v_) + vhat_ = convert(tf.TensorHandle, vhat_) + beta1_power_ = convert(tf.TensorHandle, beta1_power_) + beta2_power_ = convert(tf.TensorHandle, beta2_power_) + lr_ = convert(tf.TensorHandle, lr_) + beta1_ = convert(tf.TensorHandle, beta1_) + beta2_ = convert(tf.TensorHandle, beta2_) + epsilon_ = convert(tf.TensorHandle, epsilon_) + grad_ = convert(tf.TensorHandle, grad_) tf.add_input(desc, var_) tf.add_input(desc, m_) tf.add_input(desc, v_) @@ -43689,6 +45664,8 @@ begin end function tensor_list_resize_eager(input_handle_, size_; name=nothing) desc = tf.EagerOp("TensorListResize") + input_handle_ = convert(tf.TensorHandle, input_handle_) + size_ = convert(tf.TensorHandle, size_) tf.add_input(desc, input_handle_) tf.add_input(desc, size_) res = tf.execute(desc) @@ -43797,6 +45774,11 @@ begin end function boosted_trees_center_bias_eager(tree_ensemble_handle_, mean_gradients_, mean_hessians_, l1_, l2_; name=nothing) desc = tf.EagerOp("BoostedTreesCenterBias") + tree_ensemble_handle_ = convert(tf.TensorHandle, tree_ensemble_handle_) + mean_gradients_ = convert(tf.TensorHandle, mean_gradients_) + mean_hessians_ = convert(tf.TensorHandle, mean_hessians_) + l1_ = convert(tf.TensorHandle, l1_) + l2_ = convert(tf.TensorHandle, l2_) tf.add_input(desc, tree_ensemble_handle_) tf.add_input(desc, mean_gradients_) tf.add_input(desc, mean_hessians_) @@ -43834,6 +45816,7 @@ begin end function lookup_table_size_v2_eager(table_handle_; name=nothing) desc = tf.EagerOp("LookupTableSizeV2") + table_handle_ = convert(tf.TensorHandle, table_handle_) tf.add_input(desc, table_handle_) res = tf.execute(desc) node = tf.TapeNode(lookup_table_size_v2, [table_handle_], name=nothing) @@ -43869,6 +45852,8 @@ begin end function irfft_eager(input_, fft_length_; name=nothing) desc = tf.EagerOp("IRFFT") + input_ = convert(tf.TensorHandle, input_) + fft_length_ = convert(tf.TensorHandle, fft_length_) tf.add_input(desc, input_) tf.add_input(desc, fft_length_) res = tf.execute(desc) @@ -43908,6 +45893,9 @@ begin end function inplace_add_eager(x_, i_, v_; name=nothing) desc = tf.EagerOp("InplaceAdd") + x_ = convert(tf.TensorHandle, x_) + i_ = convert(tf.TensorHandle, i_) + v_ = convert(tf.TensorHandle, v_) tf.add_input(desc, x_) tf.add_input(desc, i_) tf.add_input(desc, v_) @@ -43929,7 +45917,7 @@ end """ - bias_add(value, bias; data_format=) + bias_add(value, bias; data_format=NHWC) """ @@ -43951,6 +45939,8 @@ begin end function bias_add_eager(value_, bias_; name=nothing, data_format=nothing) desc = tf.EagerOp("BiasAdd") + value_ = convert(tf.TensorHandle, value_) + bias_ = convert(tf.TensorHandle, bias_) tf.add_input(desc, value_) tf.add_input(desc, bias_) if data_format !== nothing @@ -44008,6 +45998,10 @@ begin end function load_tpu_embedding_adam_parameters_grad_accum_debug_eager(parameters_, momenta_, velocities_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) desc = tf.EagerOp("LoadTPUEmbeddingADAMParametersGradAccumDebug") + parameters_ = convert(tf.TensorHandle, parameters_) + momenta_ = convert(tf.TensorHandle, momenta_) + velocities_ = convert(tf.TensorHandle, velocities_) + gradient_accumulators_ = convert(tf.TensorHandle, gradient_accumulators_) tf.add_input(desc, parameters_) tf.add_input(desc, momenta_) tf.add_input(desc, velocities_) @@ -44097,6 +46091,9 @@ begin end function ragged_range_eager(starts_, limits_, deltas_; name=nothing) desc = tf.EagerOp("RaggedRange") + starts_ = convert(tf.TensorHandle, starts_) + limits_ = convert(tf.TensorHandle, limits_) + deltas_ = convert(tf.TensorHandle, deltas_) tf.add_input(desc, starts_) tf.add_input(desc, limits_) tf.add_input(desc, deltas_) @@ -44149,6 +46146,11 @@ begin end function window_dataset_eager(input_dataset_, size_, shift_, stride_, drop_remainder_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("WindowDataset") + input_dataset_ = convert(tf.TensorHandle, input_dataset_) + size_ = convert(tf.TensorHandle, size_) + shift_ = convert(tf.TensorHandle, shift_) + stride_ = convert(tf.TensorHandle, stride_) + drop_remainder_ = convert(tf.TensorHandle, drop_remainder_) tf.add_input(desc, input_dataset_) tf.add_input(desc, size_) tf.add_input(desc, shift_) @@ -44193,6 +46195,7 @@ begin end function diag_eager(diagonal_; name=nothing) desc = tf.EagerOp("Diag") + diagonal_ = convert(tf.TensorHandle, diagonal_) tf.add_input(desc, diagonal_) desc["T"] = tf.data_type(diagonal_) res = tf.execute(desc) @@ -44277,6 +46280,8 @@ begin end function experimental_latency_stats_dataset_eager(input_dataset_, tag_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("ExperimentalLatencyStatsDataset") + input_dataset_ = convert(tf.TensorHandle, input_dataset_) + tag_ = convert(tf.TensorHandle, tag_) tf.add_input(desc, input_dataset_) tf.add_input(desc, tag_) if output_types !== nothing @@ -44328,6 +46333,9 @@ begin end function add_sparse_to_tensors_map_eager(sparse_indices_, sparse_values_, sparse_shape_; name=nothing, container=nothing, shared_name=nothing) desc = tf.EagerOp("AddSparseToTensorsMap") + sparse_indices_ = convert(tf.TensorHandle, sparse_indices_) + sparse_values_ = convert(tf.TensorHandle, sparse_values_) + sparse_shape_ = convert(tf.TensorHandle, sparse_shape_) tf.add_input(desc, sparse_indices_) tf.add_input(desc, sparse_values_) tf.add_input(desc, sparse_shape_) @@ -44388,6 +46396,9 @@ begin end function ragged_gather_eager(params_nested_splits_, params_dense_values_, indices_; name=nothing, PARAMS_RAGGED_RANK=nothing, OUTPUT_RAGGED_RANK=nothing) desc = tf.EagerOp("RaggedGather") + params_nested_splits_ = convert(tf.TensorHandle, params_nested_splits_) + params_dense_values_ = convert(tf.TensorHandle, params_dense_values_) + indices_ = convert(tf.TensorHandle, indices_) tf.add_input(desc, params_nested_splits_) tf.add_input(desc, params_dense_values_) tf.add_input(desc, indices_) @@ -44432,6 +46443,7 @@ begin end function rgb_to_hsv_eager(images_; name=nothing) desc = tf.EagerOp("RGBToHSV") + images_ = convert(tf.TensorHandle, images_) tf.add_input(desc, images_) desc["T"] = tf.data_type(images_) res = tf.execute(desc) @@ -44466,6 +46478,7 @@ begin end function multi_device_iterator_to_string_handle_eager(multi_device_iterator_; name=nothing) desc = tf.EagerOp("MultiDeviceIteratorToStringHandle") + multi_device_iterator_ = convert(tf.TensorHandle, multi_device_iterator_) tf.add_input(desc, multi_device_iterator_) res = tf.execute(desc) node = tf.TapeNode(multi_device_iterator_to_string_handle, [multi_device_iterator_], name=nothing) @@ -44511,6 +46524,10 @@ begin end function for__eager(start_, limit_, delta_, input_; name=nothing, T=nothing, body=nothing) desc = tf.EagerOp("For") + start_ = convert(tf.TensorHandle, start_) + limit_ = convert(tf.TensorHandle, limit_) + delta_ = convert(tf.TensorHandle, delta_) + input_ = convert(tf.TensorHandle, input_) tf.add_input(desc, start_) tf.add_input(desc, limit_) tf.add_input(desc, delta_) @@ -44568,6 +46585,10 @@ begin end function sparse_reduce_max_sparse_eager(input_indices_, input_values_, input_shape_, reduction_axes_; name=nothing, keep_dims=nothing) desc = tf.EagerOp("SparseReduceMaxSparse") + input_indices_ = convert(tf.TensorHandle, input_indices_) + input_values_ = convert(tf.TensorHandle, input_values_) + input_shape_ = convert(tf.TensorHandle, input_shape_) + reduction_axes_ = convert(tf.TensorHandle, reduction_axes_) tf.add_input(desc, input_indices_) tf.add_input(desc, input_values_) tf.add_input(desc, input_shape_) @@ -44618,6 +46639,8 @@ begin end function concat_offset_eager(concat_dim_, shape_; name=nothing, N=nothing) desc = tf.EagerOp("ConcatOffset") + concat_dim_ = convert(tf.TensorHandle, concat_dim_) + shape_ = convert(tf.TensorHandle, shape_) tf.add_input(desc, concat_dim_) tf.add_input(desc, shape_) if N !== nothing @@ -44670,6 +46693,7 @@ begin end function stage_eager(values_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) desc = tf.EagerOp("Stage") + values_ = convert(tf.TensorHandle, values_) tf.add_input(desc, values_) if capacity !== nothing desc["capacity"] = Base.Int(capacity) @@ -44726,6 +46750,8 @@ begin end function switch_eager(data_, pred_; name=nothing) desc = tf.EagerOp("Switch") + data_ = convert(tf.TensorHandle, data_) + pred_ = convert(tf.TensorHandle, pred_) tf.add_input(desc, data_) tf.add_input(desc, pred_) desc["T"] = tf.data_type(data_) @@ -44769,6 +46795,8 @@ begin end function queue_dequeue_many_v2_eager(handle_, n_; name=nothing, component_types=nothing, timeout_ms=nothing) desc = tf.EagerOp("QueueDequeueManyV2") + handle_ = convert(tf.TensorHandle, handle_) + n_ = convert(tf.TensorHandle, n_) tf.add_input(desc, handle_) tf.add_input(desc, n_) if component_types !== nothing @@ -44814,6 +46842,8 @@ begin end function segment_prod_eager(data_, segment_ids_; name=nothing) desc = tf.EagerOp("SegmentProd") + data_ = convert(tf.TensorHandle, data_) + segment_ids_ = convert(tf.TensorHandle, segment_ids_) tf.add_input(desc, data_) tf.add_input(desc, segment_ids_) desc["T"] = tf.data_type(data_) @@ -44856,6 +46886,8 @@ begin end function approximate_equal_eager(x_, y_; name=nothing, tolerance=nothing) desc = tf.EagerOp("ApproximateEqual") + x_ = convert(tf.TensorHandle, x_) + y_ = convert(tf.TensorHandle, y_) tf.add_input(desc, x_) tf.add_input(desc, y_) if tolerance !== nothing @@ -44879,7 +46911,7 @@ end """ - conv2d(input, filter; use_cudnn_on_gpu=true, explicit_paddings=Int64[], data_format=, dilations=[1, 1, 1, 1]) + conv2d(input, filter; use_cudnn_on_gpu=true, explicit_paddings=Int64[], data_format=NHWC, dilations=[1, 1, 1, 1]) """ @@ -44916,6 +46948,8 @@ begin end function conv2d_eager(input_, filter_; name=nothing, strides=nothing, use_cudnn_on_gpu=nothing, padding=nothing, explicit_paddings=nothing, data_format=nothing, dilations=nothing) desc = tf.EagerOp("Conv2D") + input_ = convert(tf.TensorHandle, input_) + filter_ = convert(tf.TensorHandle, filter_) tf.add_input(desc, input_) tf.add_input(desc, filter_) if strides !== nothing @@ -44973,6 +47007,8 @@ begin end function cross_replica_sum_eager(input_, group_assignment_; name=nothing) desc = tf.EagerOp("CrossReplicaSum") + input_ = convert(tf.TensorHandle, input_) + group_assignment_ = convert(tf.TensorHandle, group_assignment_) tf.add_input(desc, input_) tf.add_input(desc, group_assignment_) desc["T"] = tf.data_type(input_) @@ -45024,6 +47060,8 @@ begin end function sparse_mat_mul_eager(a_, b_; name=nothing, transpose_a=nothing, transpose_b=nothing, a_is_sparse=nothing, b_is_sparse=nothing) desc = tf.EagerOp("SparseMatMul") + a_ = convert(tf.TensorHandle, a_) + b_ = convert(tf.TensorHandle, b_) tf.add_input(desc, a_) tf.add_input(desc, b_) if transpose_a !== nothing @@ -45092,6 +47130,8 @@ begin end function _scoped_allocator_split_eager(concat_, split_; name=nothing, sa_name=nothing, id=nothing, N=nothing, shapes=nothing) desc = tf.EagerOp("_ScopedAllocatorSplit") + concat_ = convert(tf.TensorHandle, concat_) + split_ = convert(tf.TensorHandle, split_) tf.add_input(desc, concat_) tf.add_input(desc, split_) if sa_name !== nothing @@ -45143,6 +47183,8 @@ begin end function igammac_eager(a_, x_; name=nothing) desc = tf.EagerOp("Igammac") + a_ = convert(tf.TensorHandle, a_) + x_ = convert(tf.TensorHandle, x_) tf.add_input(desc, a_) tf.add_input(desc, x_) desc["T"] = tf.data_type(a_) @@ -45188,6 +47230,8 @@ begin end function batch_mat_mul_eager(x_, y_; name=nothing, adj_x=nothing, adj_y=nothing) desc = tf.EagerOp("BatchMatMul") + x_ = convert(tf.TensorHandle, x_) + y_ = convert(tf.TensorHandle, y_) tf.add_input(desc, x_) tf.add_input(desc, y_) if adj_x !== nothing @@ -45245,6 +47289,10 @@ begin end function enqueue_tpu_embedding_sparse_batch_eager(sample_indices_, embedding_indices_, aggregation_weights_, mode_override_; name=nothing, N=nothing, device_ordinal=nothing, combiners=nothing) desc = tf.EagerOp("EnqueueTPUEmbeddingSparseBatch") + sample_indices_ = convert(tf.TensorHandle, sample_indices_) + embedding_indices_ = convert(tf.TensorHandle, embedding_indices_) + aggregation_weights_ = convert(tf.TensorHandle, aggregation_weights_) + mode_override_ = convert(tf.TensorHandle, mode_override_) tf.add_input(desc, sample_indices_) tf.add_input(desc, embedding_indices_) tf.add_input(desc, aggregation_weights_) @@ -45293,6 +47341,7 @@ begin end function queue_close_v2_eager(handle_; name=nothing, cancel_pending_enqueues=nothing) desc = tf.EagerOp("QueueCloseV2") + handle_ = convert(tf.TensorHandle, handle_) tf.add_input(desc, handle_) if cancel_pending_enqueues !== nothing desc["cancel_pending_enqueues"] = Base.Bool(cancel_pending_enqueues) @@ -45337,6 +47386,8 @@ begin end function tensor_array_pack_eager(handle_, flow_in_; name=nothing, dtype=nothing, element_shape=nothing) desc = tf.EagerOp("TensorArrayPack") + handle_ = convert(tf.TensorHandle, handle_) + flow_in_ = convert(tf.TensorHandle, flow_in_) tf.add_input(desc, handle_) tf.add_input(desc, flow_in_) if dtype !== nothing @@ -45379,6 +47430,8 @@ begin end function reader_restore_state_eager(reader_handle_, state_; name=nothing) desc = tf.EagerOp("ReaderRestoreState") + reader_handle_ = convert(tf.TensorHandle, reader_handle_) + state_ = convert(tf.TensorHandle, state_) tf.add_input(desc, reader_handle_) tf.add_input(desc, state_) res = tf.execute(desc) @@ -45397,7 +47450,7 @@ end """ - _fused_conv2d(input, filter, args; data_format=, dilations=[1, 1, 1, 1], use_cudnn_on_gpu=true, fused_ops=Int64[], epsilon=?) + _fused_conv2d(input, filter, args; data_format=NHWC, dilations=[1, 1, 1, 1], use_cudnn_on_gpu=true, fused_ops=Int64[], epsilon=?) *NOTE*: Do not invoke this operator directly in Python. Grappler is """ @@ -45442,6 +47495,9 @@ begin end function _fused_conv2d_eager(input_, filter_, args_; name=nothing, num_args=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing, use_cudnn_on_gpu=nothing, fused_ops=nothing, epsilon=nothing) desc = tf.EagerOp("_FusedConv2D") + input_ = convert(tf.TensorHandle, input_) + filter_ = convert(tf.TensorHandle, filter_) + args_ = convert(tf.TensorHandle, args_) tf.add_input(desc, input_) tf.add_input(desc, filter_) tf.add_input(desc, args_) @@ -45510,6 +47566,7 @@ begin end function _read_variables_op_eager(resources_; name=nothing, N=nothing, dtypes=nothing) desc = tf.EagerOp("_ReadVariablesOp") + resources_ = convert(tf.TensorHandle, resources_) tf.add_input(desc, resources_) if N !== nothing desc["N"] = Base.Int(N) @@ -45615,6 +47672,7 @@ begin end function read_file_eager(filename_; name=nothing) desc = tf.EagerOp("ReadFile") + filename_ = convert(tf.TensorHandle, filename_) tf.add_input(desc, filename_) res = tf.execute(desc) node = tf.TapeNode(read_file, [filename_], name=nothing) @@ -45666,6 +47724,10 @@ begin end function load_tpu_embedding_mdl_adagrad_light_parameters_eager(parameters_, accumulators_, weights_, benefits_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) desc = tf.EagerOp("LoadTPUEmbeddingMDLAdagradLightParameters") + parameters_ = convert(tf.TensorHandle, parameters_) + accumulators_ = convert(tf.TensorHandle, accumulators_) + weights_ = convert(tf.TensorHandle, weights_) + benefits_ = convert(tf.TensorHandle, benefits_) tf.add_input(desc, parameters_) tf.add_input(desc, accumulators_) tf.add_input(desc, weights_) @@ -45724,6 +47786,10 @@ begin end function fractional_avg_pool_grad_eager(orig_input_tensor_shape_, out_backprop_, row_pooling_sequence_, col_pooling_sequence_; name=nothing, overlapping=nothing) desc = tf.EagerOp("FractionalAvgPoolGrad") + orig_input_tensor_shape_ = convert(tf.TensorHandle, orig_input_tensor_shape_) + out_backprop_ = convert(tf.TensorHandle, out_backprop_) + row_pooling_sequence_ = convert(tf.TensorHandle, row_pooling_sequence_) + col_pooling_sequence_ = convert(tf.TensorHandle, col_pooling_sequence_) tf.add_input(desc, orig_input_tensor_shape_) tf.add_input(desc, out_backprop_) tf.add_input(desc, row_pooling_sequence_) @@ -45780,6 +47846,9 @@ begin end function load_tpu_embedding_adagrad_parameters_grad_accum_debug_eager(parameters_, accumulators_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) desc = tf.EagerOp("LoadTPUEmbeddingAdagradParametersGradAccumDebug") + parameters_ = convert(tf.TensorHandle, parameters_) + accumulators_ = convert(tf.TensorHandle, accumulators_) + gradient_accumulators_ = convert(tf.TensorHandle, gradient_accumulators_) tf.add_input(desc, parameters_) tf.add_input(desc, accumulators_) tf.add_input(desc, gradient_accumulators_) @@ -45838,6 +47907,9 @@ begin end function stateful_standard_normal_v2_eager(resource_, algorithm_, shape_; name=nothing, dtype=nothing, shape_dtype=nothing) desc = tf.EagerOp("StatefulStandardNormalV2") + resource_ = convert(tf.TensorHandle, resource_) + algorithm_ = convert(tf.TensorHandle, algorithm_) + shape_ = convert(tf.TensorHandle, shape_) tf.add_input(desc, resource_) tf.add_input(desc, algorithm_) tf.add_input(desc, shape_) @@ -45885,6 +47957,9 @@ begin end function bincount_eager(arr_, size_, weights_; name=nothing) desc = tf.EagerOp("Bincount") + arr_ = convert(tf.TensorHandle, arr_) + size_ = convert(tf.TensorHandle, size_) + weights_ = convert(tf.TensorHandle, weights_) tf.add_input(desc, arr_) tf.add_input(desc, size_) tf.add_input(desc, weights_) @@ -45922,6 +47997,7 @@ begin end function inv_eager(x_; name=nothing) desc = tf.EagerOp("Inv") + x_ = convert(tf.TensorHandle, x_) tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) res = tf.execute(desc) @@ -45970,6 +48046,12 @@ begin end function apply_proximal_adagrad_eager(var_, accum_, lr_, l1_, l2_, grad_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ApplyProximalAdagrad") + var_ = convert(tf.TensorHandle, var_) + accum_ = convert(tf.TensorHandle, accum_) + lr_ = convert(tf.TensorHandle, lr_) + l1_ = convert(tf.TensorHandle, l1_) + l2_ = convert(tf.TensorHandle, l2_) + grad_ = convert(tf.TensorHandle, grad_) tf.add_input(desc, var_) tf.add_input(desc, accum_) tf.add_input(desc, lr_) @@ -46025,6 +48107,9 @@ begin end function gather_v2_eager(params_, indices_, axis_; name=nothing) desc = tf.EagerOp("GatherV2") + params_ = convert(tf.TensorHandle, params_) + indices_ = convert(tf.TensorHandle, indices_) + axis_ = convert(tf.TensorHandle, axis_) tf.add_input(desc, params_) tf.add_input(desc, indices_) tf.add_input(desc, axis_) @@ -46065,6 +48150,8 @@ begin end function write_file_eager(filename_, contents_; name=nothing) desc = tf.EagerOp("WriteFile") + filename_ = convert(tf.TensorHandle, filename_) + contents_ = convert(tf.TensorHandle, contents_) tf.add_input(desc, filename_) tf.add_input(desc, contents_) res = tf.execute(desc) @@ -46104,6 +48191,7 @@ begin end function boosted_trees_get_ensemble_states_eager(tree_ensemble_handle_; name=nothing) desc = tf.EagerOp("BoostedTreesGetEnsembleStates") + tree_ensemble_handle_ = convert(tf.TensorHandle, tree_ensemble_handle_) tf.add_input(desc, tree_ensemble_handle_) res = tf.execute(desc) node = tf.TapeNode(boosted_trees_get_ensemble_states, [tree_ensemble_handle_], name=nothing) @@ -46147,6 +48235,8 @@ begin end function resource_gather_eager(resource_, indices_; name=nothing, validate_indices=nothing, dtype=nothing) desc = tf.EagerOp("ResourceGather") + resource_ = convert(tf.TensorHandle, resource_) + indices_ = convert(tf.TensorHandle, indices_) tf.add_input(desc, resource_) tf.add_input(desc, indices_) if validate_indices !== nothing @@ -46200,6 +48290,11 @@ begin end function resource_apply_proximal_gradient_descent_eager(var_, alpha_, l1_, l2_, delta_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ResourceApplyProximalGradientDescent") + var_ = convert(tf.TensorHandle, var_) + alpha_ = convert(tf.TensorHandle, alpha_) + l1_ = convert(tf.TensorHandle, l1_) + l2_ = convert(tf.TensorHandle, l2_) + delta_ = convert(tf.TensorHandle, delta_) tf.add_input(desc, var_) tf.add_input(desc, alpha_) tf.add_input(desc, l1_) @@ -46247,6 +48342,8 @@ begin end function truncate_mod_eager(x_, y_; name=nothing) desc = tf.EagerOp("TruncateMod") + x_ = convert(tf.TensorHandle, x_) + y_ = convert(tf.TensorHandle, y_) tf.add_input(desc, x_) tf.add_input(desc, y_) desc["T"] = tf.data_type(x_) @@ -46289,6 +48386,7 @@ begin end function log_matrix_determinant_eager(input_; name=nothing) desc = tf.EagerOp("LogMatrixDeterminant") + input_ = convert(tf.TensorHandle, input_) tf.add_input(desc, input_) desc["T"] = tf.data_type(input_) res = tf.execute(desc) @@ -46325,6 +48423,8 @@ begin end function irfft2d_eager(input_, fft_length_; name=nothing) desc = tf.EagerOp("IRFFT2D") + input_ = convert(tf.TensorHandle, input_) + fft_length_ = convert(tf.TensorHandle, fft_length_) tf.add_input(desc, input_) tf.add_input(desc, fft_length_) res = tf.execute(desc) @@ -46376,6 +48476,10 @@ begin end function boosted_trees_training_predict_eager(tree_ensemble_handle_, cached_tree_ids_, cached_node_ids_, bucketized_features_; name=nothing, num_bucketized_features=nothing, logits_dimension=nothing) desc = tf.EagerOp("BoostedTreesTrainingPredict") + tree_ensemble_handle_ = convert(tf.TensorHandle, tree_ensemble_handle_) + cached_tree_ids_ = convert(tf.TensorHandle, cached_tree_ids_) + cached_node_ids_ = convert(tf.TensorHandle, cached_node_ids_) + bucketized_features_ = convert(tf.TensorHandle, bucketized_features_) tf.add_input(desc, tree_ensemble_handle_) tf.add_input(desc, cached_tree_ids_) tf.add_input(desc, cached_node_ids_) @@ -46427,6 +48531,9 @@ begin end function nearest_neighbors_eager(points_, centers_, k_; name=nothing) desc = tf.EagerOp("NearestNeighbors") + points_ = convert(tf.TensorHandle, points_) + centers_ = convert(tf.TensorHandle, centers_) + k_ = convert(tf.TensorHandle, k_) tf.add_input(desc, points_) tf.add_input(desc, centers_) tf.add_input(desc, k_) @@ -46463,6 +48570,7 @@ begin end function floor_eager(x_; name=nothing) desc = tf.EagerOp("Floor") + x_ = convert(tf.TensorHandle, x_) tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) res = tf.execute(desc) @@ -46513,6 +48621,9 @@ begin end function load_tpu_embedding_proximal_adagrad_parameters_grad_accum_debug_eager(parameters_, accumulators_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) desc = tf.EagerOp("LoadTPUEmbeddingProximalAdagradParametersGradAccumDebug") + parameters_ = convert(tf.TensorHandle, parameters_) + accumulators_ = convert(tf.TensorHandle, accumulators_) + gradient_accumulators_ = convert(tf.TensorHandle, gradient_accumulators_) tf.add_input(desc, parameters_) tf.add_input(desc, accumulators_) tf.add_input(desc, gradient_accumulators_) @@ -46572,6 +48683,11 @@ begin end function write_image_summary_eager(writer_, step_, tag_, tensor_, bad_color_; name=nothing, max_images=nothing) desc = tf.EagerOp("WriteImageSummary") + writer_ = convert(tf.TensorHandle, writer_) + step_ = convert(tf.TensorHandle, step_) + tag_ = convert(tf.TensorHandle, tag_) + tensor_ = convert(tf.TensorHandle, tensor_) + bad_color_ = convert(tf.TensorHandle, bad_color_) tf.add_input(desc, writer_) tf.add_input(desc, step_) tf.add_input(desc, tag_) @@ -46616,6 +48732,8 @@ begin end function tile_grad_eager(input_, multiples_; name=nothing) desc = tf.EagerOp("TileGrad") + input_ = convert(tf.TensorHandle, input_) + multiples_ = convert(tf.TensorHandle, multiples_) tf.add_input(desc, input_) tf.add_input(desc, multiples_) desc["T"] = tf.data_type(input_) @@ -46661,6 +48779,8 @@ begin end function tensor_array_grad_v3_eager(handle_, flow_in_; name=nothing, source=nothing) desc = tf.EagerOp("TensorArrayGradV3") + handle_ = convert(tf.TensorHandle, handle_) + flow_in_ = convert(tf.TensorHandle, flow_in_) tf.add_input(desc, handle_) tf.add_input(desc, flow_in_) if source !== nothing @@ -46706,6 +48826,8 @@ begin end function enqueue_tpu_embedding_integer_batch_eager(batch_, mode_override_; name=nothing, N=nothing, device_ordinal=nothing) desc = tf.EagerOp("EnqueueTPUEmbeddingIntegerBatch") + batch_ = convert(tf.TensorHandle, batch_) + mode_override_ = convert(tf.TensorHandle, mode_override_) tf.add_input(desc, batch_) tf.add_input(desc, mode_override_) if N !== nothing @@ -46730,7 +48852,7 @@ end """ - fused_batch_norm(x, scale, offset, mean, variance; epsilon=?, data_format=, is_training=true) + fused_batch_norm(x, scale, offset, mean, variance; epsilon=?, data_format=NHWC, is_training=true) """ @@ -46769,6 +48891,11 @@ begin end function fused_batch_norm_eager(x_, scale_, offset_, mean_, variance_; name=nothing, epsilon=nothing, data_format=nothing, is_training=nothing) desc = tf.EagerOp("FusedBatchNorm") + x_ = convert(tf.TensorHandle, x_) + scale_ = convert(tf.TensorHandle, scale_) + offset_ = convert(tf.TensorHandle, offset_) + mean_ = convert(tf.TensorHandle, mean_) + variance_ = convert(tf.TensorHandle, variance_) tf.add_input(desc, x_) tf.add_input(desc, scale_) tf.add_input(desc, offset_) @@ -46822,6 +48949,8 @@ begin end function logical_and_eager(x_, y_; name=nothing) desc = tf.EagerOp("LogicalAnd") + x_ = convert(tf.TensorHandle, x_) + y_ = convert(tf.TensorHandle, y_) tf.add_input(desc, x_) tf.add_input(desc, y_) res = tf.execute(desc) @@ -46863,6 +48992,9 @@ begin end function tensor_scatter_update_eager(tensor_, indices_, updates_; name=nothing) desc = tf.EagerOp("TensorScatterUpdate") + tensor_ = convert(tf.TensorHandle, tensor_) + indices_ = convert(tf.TensorHandle, indices_) + updates_ = convert(tf.TensorHandle, updates_) tf.add_input(desc, tensor_) tf.add_input(desc, indices_) tf.add_input(desc, updates_) @@ -46955,6 +49087,7 @@ begin end function tensor_slice_dataset_eager(components_; name=nothing, Toutput_types=nothing, output_shapes=nothing) desc = tf.EagerOp("TensorSliceDataset") + components_ = convert(tf.TensorHandle, components_) tf.add_input(desc, components_) if Toutput_types !== nothing desc["Toutput_types"] = map(Base.identity, Toutput_types) @@ -47001,6 +49134,10 @@ begin end function tensor_array_scatter_v3_eager(handle_, indices_, value_, flow_in_; name=nothing) desc = tf.EagerOp("TensorArrayScatterV3") + handle_ = convert(tf.TensorHandle, handle_) + indices_ = convert(tf.TensorHandle, indices_) + value_ = convert(tf.TensorHandle, value_) + flow_in_ = convert(tf.TensorHandle, flow_in_) tf.add_input(desc, handle_) tf.add_input(desc, indices_) tf.add_input(desc, value_) @@ -47044,6 +49181,8 @@ begin end function resize_nearest_neighbor_grad_eager(grads_, size_; name=nothing, align_corners=nothing) desc = tf.EagerOp("ResizeNearestNeighborGrad") + grads_ = convert(tf.TensorHandle, grads_) + size_ = convert(tf.TensorHandle, size_) tf.add_input(desc, grads_) tf.add_input(desc, size_) if align_corners !== nothing @@ -47098,6 +49237,13 @@ begin end function apply_power_sign_eager(var_, m_, lr_, logbase_, sign_decay_, beta_, grad_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ApplyPowerSign") + var_ = convert(tf.TensorHandle, var_) + m_ = convert(tf.TensorHandle, m_) + lr_ = convert(tf.TensorHandle, lr_) + logbase_ = convert(tf.TensorHandle, logbase_) + sign_decay_ = convert(tf.TensorHandle, sign_decay_) + beta_ = convert(tf.TensorHandle, beta_) + grad_ = convert(tf.TensorHandle, grad_) tf.add_input(desc, var_) tf.add_input(desc, m_) tf.add_input(desc, lr_) @@ -47155,6 +49301,8 @@ begin end function experimental_rebatch_dataset_eager(input_dataset_, num_workers_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("ExperimentalRebatchDataset") + input_dataset_ = convert(tf.TensorHandle, input_dataset_) + num_workers_ = convert(tf.TensorHandle, num_workers_) tf.add_input(desc, input_dataset_) tf.add_input(desc, num_workers_) if output_types !== nothing @@ -47202,6 +49350,8 @@ begin end function mirror_pad_eager(input_, paddings_; name=nothing, mode=nothing) desc = tf.EagerOp("MirrorPad") + input_ = convert(tf.TensorHandle, input_) + paddings_ = convert(tf.TensorHandle, paddings_) tf.add_input(desc, input_) tf.add_input(desc, paddings_) if mode !== nothing @@ -47241,6 +49391,7 @@ begin end function logical_not_eager(x_; name=nothing) desc = tf.EagerOp("LogicalNot") + x_ = convert(tf.TensorHandle, x_) tf.add_input(desc, x_) res = tf.execute(desc) node = tf.TapeNode(logical_not, [x_], name=nothing) @@ -47274,6 +49425,7 @@ begin end function batch_ifft_eager(input_; name=nothing) desc = tf.EagerOp("BatchIFFT") + input_ = convert(tf.TensorHandle, input_) tf.add_input(desc, input_) res = tf.execute(desc) node = tf.TapeNode(batch_ifft, [input_], name=nothing) @@ -47320,6 +49472,8 @@ begin end function tensor_array_concat_v2_eager(handle_, flow_in_; name=nothing, dtype=nothing, element_shape_except0=nothing) desc = tf.EagerOp("TensorArrayConcatV2") + handle_ = convert(tf.TensorHandle, handle_) + flow_in_ = convert(tf.TensorHandle, flow_in_) tf.add_input(desc, handle_) tf.add_input(desc, flow_in_) if dtype !== nothing @@ -47368,6 +49522,8 @@ begin end function sum_eager(input_, reduction_indices_; name=nothing, keep_dims=nothing) desc = tf.EagerOp("Sum") + input_ = convert(tf.TensorHandle, input_) + reduction_indices_ = convert(tf.TensorHandle, reduction_indices_) tf.add_input(desc, input_) tf.add_input(desc, reduction_indices_) if keep_dims !== nothing @@ -47415,6 +49571,8 @@ begin end function boosted_trees_predict_eager(tree_ensemble_handle_, bucketized_features_; name=nothing, num_bucketized_features=nothing, logits_dimension=nothing) desc = tf.EagerOp("BoostedTreesPredict") + tree_ensemble_handle_ = convert(tf.TensorHandle, tree_ensemble_handle_) + bucketized_features_ = convert(tf.TensorHandle, bucketized_features_) tf.add_input(desc, tree_ensemble_handle_) tf.add_input(desc, bucketized_features_) if num_bucketized_features !== nothing @@ -47491,6 +49649,15 @@ begin end function quantized_conv2d_with_bias_and_relu_and_requantize_eager(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) desc = tf.EagerOp("QuantizedConv2DWithBiasAndReluAndRequantize") + input_ = convert(tf.TensorHandle, input_) + filter_ = convert(tf.TensorHandle, filter_) + bias_ = convert(tf.TensorHandle, bias_) + min_input_ = convert(tf.TensorHandle, min_input_) + max_input_ = convert(tf.TensorHandle, max_input_) + min_filter_ = convert(tf.TensorHandle, min_filter_) + max_filter_ = convert(tf.TensorHandle, max_filter_) + min_freezed_output_ = convert(tf.TensorHandle, min_freezed_output_) + max_freezed_output_ = convert(tf.TensorHandle, max_freezed_output_) tf.add_input(desc, input_) tf.add_input(desc, filter_) tf.add_input(desc, bias_) @@ -47564,6 +49731,11 @@ begin end function resource_sparse_apply_adagrad_eager(var_, accum_, lr_, grad_, indices_; name=nothing, use_locking=nothing, update_slots=nothing) desc = tf.EagerOp("ResourceSparseApplyAdagrad") + var_ = convert(tf.TensorHandle, var_) + accum_ = convert(tf.TensorHandle, accum_) + lr_ = convert(tf.TensorHandle, lr_) + grad_ = convert(tf.TensorHandle, grad_) + indices_ = convert(tf.TensorHandle, indices_) tf.add_input(desc, var_) tf.add_input(desc, accum_) tf.add_input(desc, lr_) @@ -47616,6 +49788,8 @@ begin end function leaky_relu_grad_eager(gradients_, features_; name=nothing, alpha=nothing) desc = tf.EagerOp("LeakyReluGrad") + gradients_ = convert(tf.TensorHandle, gradients_) + features_ = convert(tf.TensorHandle, features_) tf.add_input(desc, gradients_) tf.add_input(desc, features_) if alpha !== nothing @@ -47659,6 +49833,7 @@ begin end function _device_retval_eager(input_; name=nothing, index=nothing) desc = tf.EagerOp("_DeviceRetval") + input_ = convert(tf.TensorHandle, input_) tf.add_input(desc, input_) if index !== nothing desc["index"] = Base.Int(index) @@ -47700,6 +49875,8 @@ begin end function pad_eager(input_, paddings_; name=nothing) desc = tf.EagerOp("Pad") + input_ = convert(tf.TensorHandle, input_) + paddings_ = convert(tf.TensorHandle, paddings_) tf.add_input(desc, input_) tf.add_input(desc, paddings_) desc["T"] = tf.data_type(input_) @@ -47747,6 +49924,9 @@ begin end function add_many_sparse_to_tensors_map_eager(sparse_indices_, sparse_values_, sparse_shape_; name=nothing, container=nothing, shared_name=nothing) desc = tf.EagerOp("AddManySparseToTensorsMap") + sparse_indices_ = convert(tf.TensorHandle, sparse_indices_) + sparse_values_ = convert(tf.TensorHandle, sparse_values_) + sparse_shape_ = convert(tf.TensorHandle, sparse_shape_) tf.add_input(desc, sparse_indices_) tf.add_input(desc, sparse_values_) tf.add_input(desc, sparse_shape_) @@ -47799,6 +49979,9 @@ begin end function sparse_reorder_eager(input_indices_, input_values_, input_shape_; name=nothing) desc = tf.EagerOp("SparseReorder") + input_indices_ = convert(tf.TensorHandle, input_indices_) + input_values_ = convert(tf.TensorHandle, input_values_) + input_shape_ = convert(tf.TensorHandle, input_shape_) tf.add_input(desc, input_indices_) tf.add_input(desc, input_values_) tf.add_input(desc, input_shape_) @@ -47838,6 +50021,8 @@ begin end function bitwise_xor_eager(x_, y_; name=nothing) desc = tf.EagerOp("BitwiseXor") + x_ = convert(tf.TensorHandle, x_) + y_ = convert(tf.TensorHandle, y_) tf.add_input(desc, x_) tf.add_input(desc, y_) desc["T"] = tf.data_type(x_) @@ -47877,6 +50062,8 @@ begin end function batch_matrix_set_diag_eager(input_, diagonal_; name=nothing) desc = tf.EagerOp("BatchMatrixSetDiag") + input_ = convert(tf.TensorHandle, input_) + diagonal_ = convert(tf.TensorHandle, diagonal_) tf.add_input(desc, input_) tf.add_input(desc, diagonal_) desc["T"] = tf.data_type(input_) @@ -47919,6 +50106,9 @@ begin end function lookup_table_insert_v2_eager(table_handle_, keys_, values_; name=nothing) desc = tf.EagerOp("LookupTableInsertV2") + table_handle_ = convert(tf.TensorHandle, table_handle_) + keys_ = convert(tf.TensorHandle, keys_) + values_ = convert(tf.TensorHandle, values_) tf.add_input(desc, table_handle_) tf.add_input(desc, keys_) tf.add_input(desc, values_) @@ -47966,6 +50156,9 @@ begin end function experimental_dense_to_sparse_batch_dataset_eager(input_dataset_, batch_size_, row_shape_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("ExperimentalDenseToSparseBatchDataset") + input_dataset_ = convert(tf.TensorHandle, input_dataset_) + batch_size_ = convert(tf.TensorHandle, batch_size_) + row_shape_ = convert(tf.TensorHandle, row_shape_) tf.add_input(desc, input_dataset_) tf.add_input(desc, batch_size_) tf.add_input(desc, row_shape_) @@ -48029,6 +50222,15 @@ begin end function resource_sparse_apply_rms_prop_eager(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ResourceSparseApplyRMSProp") + var_ = convert(tf.TensorHandle, var_) + ms_ = convert(tf.TensorHandle, ms_) + mom_ = convert(tf.TensorHandle, mom_) + lr_ = convert(tf.TensorHandle, lr_) + rho_ = convert(tf.TensorHandle, rho_) + momentum_ = convert(tf.TensorHandle, momentum_) + epsilon_ = convert(tf.TensorHandle, epsilon_) + grad_ = convert(tf.TensorHandle, grad_) + indices_ = convert(tf.TensorHandle, indices_) tf.add_input(desc, var_) tf.add_input(desc, ms_) tf.add_input(desc, mom_) @@ -48088,6 +50290,8 @@ begin end function random_crop_eager(image_, size_; name=nothing, seed=nothing, seed2=nothing) desc = tf.EagerOp("RandomCrop") + image_ = convert(tf.TensorHandle, image_) + size_ = convert(tf.TensorHandle, size_) tf.add_input(desc, image_) tf.add_input(desc, size_) if seed !== nothing @@ -48135,6 +50339,9 @@ begin end function lookup_table_import_v2_eager(table_handle_, keys_, values_; name=nothing) desc = tf.EagerOp("LookupTableImportV2") + table_handle_ = convert(tf.TensorHandle, table_handle_) + keys_ = convert(tf.TensorHandle, keys_) + values_ = convert(tf.TensorHandle, values_) tf.add_input(desc, table_handle_) tf.add_input(desc, keys_) tf.add_input(desc, values_) @@ -48182,6 +50389,9 @@ begin end function resource_scatter_nd_update_eager(ref_, indices_, updates_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ResourceScatterNdUpdate") + ref_ = convert(tf.TensorHandle, ref_) + indices_ = convert(tf.TensorHandle, indices_) + updates_ = convert(tf.TensorHandle, updates_) tf.add_input(desc, ref_) tf.add_input(desc, indices_) tf.add_input(desc, updates_) @@ -48225,6 +50435,7 @@ begin end function static_regex_full_match_eager(input_; name=nothing, pattern=nothing) desc = tf.EagerOp("StaticRegexFullMatch") + input_ = convert(tf.TensorHandle, input_) tf.add_input(desc, input_) if pattern !== nothing desc["pattern"] = Base.String(pattern) @@ -48261,6 +50472,7 @@ begin end function gcs_configure_credentials_eager(json_; name=nothing) desc = tf.EagerOp("GcsConfigureCredentials") + json_ = convert(tf.TensorHandle, json_) tf.add_input(desc, json_) res = tf.execute(desc) node = tf.TapeNode(gcs_configure_credentials, [json_], name=nothing) @@ -48296,6 +50508,8 @@ begin end function tensor_array_size_v3_eager(handle_, flow_in_; name=nothing) desc = tf.EagerOp("TensorArraySizeV3") + handle_ = convert(tf.TensorHandle, handle_) + flow_in_ = convert(tf.TensorHandle, flow_in_) tf.add_input(desc, handle_) tf.add_input(desc, flow_in_) res = tf.execute(desc) @@ -48340,6 +50554,10 @@ begin end function sparse_segment_sqrt_n_with_num_segments_eager(data_, indices_, segment_ids_, num_segments_; name=nothing) desc = tf.EagerOp("SparseSegmentSqrtNWithNumSegments") + data_ = convert(tf.TensorHandle, data_) + indices_ = convert(tf.TensorHandle, indices_) + segment_ids_ = convert(tf.TensorHandle, segment_ids_) + num_segments_ = convert(tf.TensorHandle, num_segments_) tf.add_input(desc, data_) tf.add_input(desc, indices_) tf.add_input(desc, segment_ids_) @@ -48417,6 +50635,11 @@ begin end function experimental_group_by_reducer_dataset_eager(input_dataset_, key_func_other_arguments_, init_func_other_arguments_, reduce_func_other_arguments_, finalize_func_other_arguments_; name=nothing, key_func=nothing, init_func=nothing, reduce_func=nothing, finalize_func=nothing, Tkey_func_other_arguments=nothing, Tinit_func_other_arguments=nothing, Treduce_func_other_arguments=nothing, Tfinalize_func_other_arguments=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("ExperimentalGroupByReducerDataset") + input_dataset_ = convert(tf.TensorHandle, input_dataset_) + key_func_other_arguments_ = convert(tf.TensorHandle, key_func_other_arguments_) + init_func_other_arguments_ = convert(tf.TensorHandle, init_func_other_arguments_) + reduce_func_other_arguments_ = convert(tf.TensorHandle, reduce_func_other_arguments_) + finalize_func_other_arguments_ = convert(tf.TensorHandle, finalize_func_other_arguments_) tf.add_input(desc, input_dataset_) tf.add_input(desc, key_func_other_arguments_) tf.add_input(desc, init_func_other_arguments_) @@ -48468,7 +50691,7 @@ end """ - conv2d_backprop_filter(input, filter_sizes, out_backprop; use_cudnn_on_gpu=true, explicit_paddings=Int64[], data_format=, dilations=[1, 1, 1, 1]) + conv2d_backprop_filter(input, filter_sizes, out_backprop; use_cudnn_on_gpu=true, explicit_paddings=Int64[], data_format=NHWC, dilations=[1, 1, 1, 1]) """ @@ -48507,6 +50730,9 @@ begin end function conv2d_backprop_filter_eager(input_, filter_sizes_, out_backprop_; name=nothing, strides=nothing, use_cudnn_on_gpu=nothing, padding=nothing, explicit_paddings=nothing, data_format=nothing, dilations=nothing) desc = tf.EagerOp("Conv2DBackpropFilter") + input_ = convert(tf.TensorHandle, input_) + filter_sizes_ = convert(tf.TensorHandle, filter_sizes_) + out_backprop_ = convert(tf.TensorHandle, out_backprop_) tf.add_input(desc, input_) tf.add_input(desc, filter_sizes_) tf.add_input(desc, out_backprop_) @@ -48546,7 +50772,7 @@ end """ - max_pool_grad(orig_input, orig_output, grad; data_format=) + max_pool_grad(orig_input, orig_output, grad; data_format=NHWC) """ @@ -48579,6 +50805,9 @@ begin end function max_pool_grad_eager(orig_input_, orig_output_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) desc = tf.EagerOp("MaxPoolGrad") + orig_input_ = convert(tf.TensorHandle, orig_input_) + orig_output_ = convert(tf.TensorHandle, orig_output_) + grad_ = convert(tf.TensorHandle, grad_) tf.add_input(desc, orig_input_) tf.add_input(desc, orig_output_) tf.add_input(desc, grad_) @@ -48629,6 +50858,7 @@ begin end function _initialize_host_for_distributed_tpu_eager(input_; name=nothing) desc = tf.EagerOp("_InitializeHostForDistributedTPU") + input_ = convert(tf.TensorHandle, input_) tf.add_input(desc, input_) res = tf.execute(desc) node = tf.TapeNode(_initialize_host_for_distributed_tpu, [input_], name=nothing) @@ -48677,6 +50907,7 @@ begin end function stage_peek_eager(index_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) desc = tf.EagerOp("StagePeek") + index_ = convert(tf.TensorHandle, index_) tf.add_input(desc, index_) if capacity !== nothing desc["capacity"] = Base.Int(capacity) @@ -48731,6 +50962,9 @@ begin end function pad_v2_eager(input_, paddings_, constant_values_; name=nothing) desc = tf.EagerOp("PadV2") + input_ = convert(tf.TensorHandle, input_) + paddings_ = convert(tf.TensorHandle, paddings_) + constant_values_ = convert(tf.TensorHandle, constant_values_) tf.add_input(desc, input_) tf.add_input(desc, paddings_) tf.add_input(desc, constant_values_) @@ -48795,7 +51029,7 @@ end """ - print_v2(input; output_stream=) + print_v2(input; output_stream=stderr) """ @@ -48814,6 +51048,7 @@ begin end function print_v2_eager(input_; name=nothing, output_stream=nothing) desc = tf.EagerOp("PrintV2") + input_ = convert(tf.TensorHandle, input_) tf.add_input(desc, input_) if output_stream !== nothing desc["output_stream"] = Base.String(output_stream) @@ -48856,6 +51091,7 @@ begin end function optional_get_value_eager(optional_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("OptionalGetValue") + optional_ = convert(tf.TensorHandle, optional_) tf.add_input(desc, optional_) if output_types !== nothing desc["output_types"] = map(Base.identity, output_types) @@ -48911,6 +51147,9 @@ begin end function load_tpu_embedding_ftrl_parameters_eager(parameters_, accumulators_, linears_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) desc = tf.EagerOp("LoadTPUEmbeddingFTRLParameters") + parameters_ = convert(tf.TensorHandle, parameters_) + accumulators_ = convert(tf.TensorHandle, accumulators_) + linears_ = convert(tf.TensorHandle, linears_) tf.add_input(desc, parameters_) tf.add_input(desc, accumulators_) tf.add_input(desc, linears_) @@ -48972,6 +51211,11 @@ begin end function sparse_slice_eager(indices_, values_, shape_, start_, size_; name=nothing) desc = tf.EagerOp("SparseSlice") + indices_ = convert(tf.TensorHandle, indices_) + values_ = convert(tf.TensorHandle, values_) + shape_ = convert(tf.TensorHandle, shape_) + start_ = convert(tf.TensorHandle, start_) + size_ = convert(tf.TensorHandle, size_) tf.add_input(desc, indices_) tf.add_input(desc, values_) tf.add_input(desc, shape_) @@ -49022,6 +51266,9 @@ begin end function boosted_trees_make_quantile_summaries_eager(float_values_, example_weights_, epsilon_; name=nothing, num_features=nothing) desc = tf.EagerOp("BoostedTreesMakeQuantileSummaries") + float_values_ = convert(tf.TensorHandle, float_values_) + example_weights_ = convert(tf.TensorHandle, example_weights_) + epsilon_ = convert(tf.TensorHandle, epsilon_) tf.add_input(desc, float_values_) tf.add_input(desc, example_weights_) tf.add_input(desc, epsilon_) @@ -49066,6 +51313,8 @@ begin end function matrix_solve_eager(matrix_, rhs_; name=nothing, adjoint=nothing) desc = tf.EagerOp("MatrixSolve") + matrix_ = convert(tf.TensorHandle, matrix_) + rhs_ = convert(tf.TensorHandle, rhs_) tf.add_input(desc, matrix_) tf.add_input(desc, rhs_) if adjoint !== nothing @@ -49108,6 +51357,7 @@ begin end function _configure_distributed_tpu_eager(inputs_; name=nothing, N=nothing) desc = tf.EagerOp("_ConfigureDistributedTPU") + inputs_ = convert(tf.TensorHandle, inputs_) tf.add_input(desc, inputs_) if N !== nothing desc["N"] = Base.Int(N) @@ -49147,6 +51397,8 @@ begin end function adjust_contrastv2_eager(images_, contrast_factor_; name=nothing) desc = tf.EagerOp("AdjustContrastv2") + images_ = convert(tf.TensorHandle, images_) + contrast_factor_ = convert(tf.TensorHandle, contrast_factor_) tf.add_input(desc, images_) tf.add_input(desc, contrast_factor_) desc["T"] = tf.data_type(images_) @@ -49194,6 +51446,10 @@ begin end function _mkl_maximum_eager(x_, y_, mkl_x_, mkl_y_; name=nothing) desc = tf.EagerOp("_MklMaximum") + x_ = convert(tf.TensorHandle, x_) + y_ = convert(tf.TensorHandle, y_) + mkl_x_ = convert(tf.TensorHandle, mkl_x_) + mkl_y_ = convert(tf.TensorHandle, mkl_y_) tf.add_input(desc, x_) tf.add_input(desc, y_) tf.add_input(desc, mkl_x_) @@ -49216,7 +51472,7 @@ end """ - cudnn_rnn_params_size(num_layers, num_units, input_size; rnn_mode=, input_mode=, direction=, dropout=?, seed=0, seed2=0) + cudnn_rnn_params_size(num_layers, num_units, input_size; rnn_mode=lstm, input_mode=linear_input, direction=unidirectional, dropout=?, seed=0, seed2=0) """ @@ -49257,6 +51513,9 @@ begin end function cudnn_rnn_params_size_eager(num_layers_, num_units_, input_size_; name=nothing, S=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) desc = tf.EagerOp("CudnnRNNParamsSize") + num_layers_ = convert(tf.TensorHandle, num_layers_) + num_units_ = convert(tf.TensorHandle, num_units_) + input_size_ = convert(tf.TensorHandle, input_size_) tf.add_input(desc, num_layers_) tf.add_input(desc, num_units_) tf.add_input(desc, input_size_) @@ -49318,6 +51577,8 @@ begin end function boosted_trees_quantile_stream_resource_add_summaries_eager(quantile_stream_resource_handle_, summaries_; name=nothing, num_features=nothing) desc = tf.EagerOp("BoostedTreesQuantileStreamResourceAddSummaries") + quantile_stream_resource_handle_ = convert(tf.TensorHandle, quantile_stream_resource_handle_) + summaries_ = convert(tf.TensorHandle, summaries_) tf.add_input(desc, quantile_stream_resource_handle_) tf.add_input(desc, summaries_) if num_features !== nothing @@ -49355,6 +51616,7 @@ begin end function batch_ifft3d_eager(input_; name=nothing) desc = tf.EagerOp("BatchIFFT3D") + input_ = convert(tf.TensorHandle, input_) tf.add_input(desc, input_) res = tf.execute(desc) node = tf.TapeNode(batch_ifft3d, [input_], name=nothing) @@ -49389,6 +51651,7 @@ begin end function sigmoid_eager(x_; name=nothing) desc = tf.EagerOp("Sigmoid") + x_ = convert(tf.TensorHandle, x_) tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) res = tf.execute(desc) @@ -49428,6 +51691,8 @@ begin end function segment_mean_eager(data_, segment_ids_; name=nothing) desc = tf.EagerOp("SegmentMean") + data_ = convert(tf.TensorHandle, data_) + segment_ids_ = convert(tf.TensorHandle, segment_ids_) tf.add_input(desc, data_) tf.add_input(desc, segment_ids_) desc["T"] = tf.data_type(data_) @@ -49464,6 +51729,7 @@ begin end function is_boosted_trees_ensemble_initialized_eager(tree_ensemble_handle_; name=nothing) desc = tf.EagerOp("IsBoostedTreesEnsembleInitialized") + tree_ensemble_handle_ = convert(tf.TensorHandle, tree_ensemble_handle_) tf.add_input(desc, tree_ensemble_handle_) res = tf.execute(desc) node = tf.TapeNode(is_boosted_trees_ensemble_initialized, [tree_ensemble_handle_], name=nothing) @@ -49499,6 +51765,8 @@ begin end function tensor_array_size_v2_eager(handle_, flow_in_; name=nothing) desc = tf.EagerOp("TensorArraySizeV2") + handle_ = convert(tf.TensorHandle, handle_) + flow_in_ = convert(tf.TensorHandle, flow_in_) tf.add_input(desc, handle_) tf.add_input(desc, flow_in_) res = tf.execute(desc) @@ -49545,6 +51813,10 @@ begin end function _mkl_sub_eager(x_, y_, mkl_x_, mkl_y_; name=nothing) desc = tf.EagerOp("_MklSub") + x_ = convert(tf.TensorHandle, x_) + y_ = convert(tf.TensorHandle, y_) + mkl_x_ = convert(tf.TensorHandle, mkl_x_) + mkl_y_ = convert(tf.TensorHandle, mkl_y_) tf.add_input(desc, x_) tf.add_input(desc, y_) tf.add_input(desc, mkl_x_) @@ -49594,6 +51866,8 @@ begin end function send_tpu_embedding_gradients_eager(inputs_, learning_rates_; name=nothing, N=nothing, NN=nothing, config=nothing) desc = tf.EagerOp("SendTPUEmbeddingGradients") + inputs_ = convert(tf.TensorHandle, inputs_) + learning_rates_ = convert(tf.TensorHandle, learning_rates_) tf.add_input(desc, inputs_) tf.add_input(desc, learning_rates_) if N !== nothing @@ -49621,7 +51895,7 @@ end """ - max_pool3d(input; data_format=) + max_pool3d(input; data_format=NDHWC) """ @@ -49650,6 +51924,7 @@ begin end function max_pool3d_eager(input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) desc = tf.EagerOp("MaxPool3D") + input_ = convert(tf.TensorHandle, input_) tf.add_input(desc, input_) if ksize !== nothing desc["ksize"] = map(Base.identity, ksize) @@ -49704,6 +51979,8 @@ begin end function prod_eager(input_, reduction_indices_; name=nothing, keep_dims=nothing) desc = tf.EagerOp("Prod") + input_ = convert(tf.TensorHandle, input_) + reduction_indices_ = convert(tf.TensorHandle, reduction_indices_) tf.add_input(desc, input_) tf.add_input(desc, reduction_indices_) if keep_dims !== nothing @@ -49743,6 +52020,7 @@ begin end function experimental_identity_indexed_dataset_eager(size_; name=nothing) desc = tf.EagerOp("ExperimentalIdentityIndexedDataset") + size_ = convert(tf.TensorHandle, size_) tf.add_input(desc, size_) res = tf.execute(desc) node = tf.TapeNode(experimental_identity_indexed_dataset, [size_], name=nothing) @@ -49782,6 +52060,8 @@ begin end function tensor_list_push_back_eager(input_handle_, tensor_; name=nothing, element_dtype=nothing) desc = tf.EagerOp("TensorListPushBack") + input_handle_ = convert(tf.TensorHandle, input_handle_) + tensor_ = convert(tf.TensorHandle, tensor_) tf.add_input(desc, input_handle_) tf.add_input(desc, tensor_) if element_dtype !== nothing @@ -49858,6 +52138,8 @@ begin end function batch_function_eager(in_tensors_, captured_tensors_; name=nothing, f=nothing, num_batch_threads=nothing, max_batch_size=nothing, batch_timeout_micros=nothing, max_enqueued_batches=nothing, allowed_batch_sizes=nothing, container=nothing, shared_name=nothing, batching_queue=nothing, Tin=nothing, Tcaptured=nothing, Tout=nothing) desc = tf.EagerOp("BatchFunction") + in_tensors_ = convert(tf.TensorHandle, in_tensors_) + captured_tensors_ = convert(tf.TensorHandle, captured_tensors_) tf.add_input(desc, in_tensors_) tf.add_input(desc, captured_tensors_) if f !== nothing @@ -49940,6 +52222,10 @@ begin end function sparse_fill_empty_rows_eager(indices_, values_, dense_shape_, default_value_; name=nothing) desc = tf.EagerOp("SparseFillEmptyRows") + indices_ = convert(tf.TensorHandle, indices_) + values_ = convert(tf.TensorHandle, values_) + dense_shape_ = convert(tf.TensorHandle, dense_shape_) + default_value_ = convert(tf.TensorHandle, default_value_) tf.add_input(desc, indices_) tf.add_input(desc, values_) tf.add_input(desc, dense_shape_) @@ -49987,6 +52273,7 @@ begin end function self_adjoint_eig_v2_eager(input_; name=nothing, compute_v=nothing) desc = tf.EagerOp("SelfAdjointEigV2") + input_ = convert(tf.TensorHandle, input_) tf.add_input(desc, input_) if compute_v !== nothing desc["compute_v"] = Base.Bool(compute_v) @@ -50105,6 +52392,15 @@ begin end function resource_sparse_apply_adagrad_da_eager(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, indices_, lr_, l1_, l2_, global_step_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ResourceSparseApplyAdagradDA") + var_ = convert(tf.TensorHandle, var_) + gradient_accumulator_ = convert(tf.TensorHandle, gradient_accumulator_) + gradient_squared_accumulator_ = convert(tf.TensorHandle, gradient_squared_accumulator_) + grad_ = convert(tf.TensorHandle, grad_) + indices_ = convert(tf.TensorHandle, indices_) + lr_ = convert(tf.TensorHandle, lr_) + l1_ = convert(tf.TensorHandle, l1_) + l2_ = convert(tf.TensorHandle, l2_) + global_step_ = convert(tf.TensorHandle, global_step_) tf.add_input(desc, var_) tf.add_input(desc, gradient_accumulator_) tf.add_input(desc, gradient_squared_accumulator_) @@ -50218,6 +52514,13 @@ begin end function resource_apply_add_sign_eager(var_, m_, lr_, alpha_, sign_decay_, beta_, grad_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ResourceApplyAddSign") + var_ = convert(tf.TensorHandle, var_) + m_ = convert(tf.TensorHandle, m_) + lr_ = convert(tf.TensorHandle, lr_) + alpha_ = convert(tf.TensorHandle, alpha_) + sign_decay_ = convert(tf.TensorHandle, sign_decay_) + beta_ = convert(tf.TensorHandle, beta_) + grad_ = convert(tf.TensorHandle, grad_) tf.add_input(desc, var_) tf.add_input(desc, m_) tf.add_input(desc, lr_) @@ -50272,6 +52575,9 @@ begin end function roll_eager(input_, shift_, axis_; name=nothing) desc = tf.EagerOp("Roll") + input_ = convert(tf.TensorHandle, input_) + shift_ = convert(tf.TensorHandle, shift_) + axis_ = convert(tf.TensorHandle, axis_) tf.add_input(desc, input_) tf.add_input(desc, shift_) tf.add_input(desc, axis_) @@ -50313,6 +52619,8 @@ begin end function xdivy_eager(x_, y_; name=nothing) desc = tf.EagerOp("Xdivy") + x_ = convert(tf.TensorHandle, x_) + y_ = convert(tf.TensorHandle, y_) tf.add_input(desc, x_) tf.add_input(desc, y_) desc["T"] = tf.data_type(x_) @@ -50333,7 +52641,7 @@ end """ - max_pool3d_grad_grad(orig_input, orig_output, grad; data_format=) + max_pool3d_grad_grad(orig_input, orig_output, grad; data_format=NDHWC) """ @@ -50366,6 +52674,9 @@ begin end function max_pool3d_grad_grad_eager(orig_input_, orig_output_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) desc = tf.EagerOp("MaxPool3DGradGrad") + orig_input_ = convert(tf.TensorHandle, orig_input_) + orig_output_ = convert(tf.TensorHandle, orig_output_) + grad_ = convert(tf.TensorHandle, grad_) tf.add_input(desc, orig_input_) tf.add_input(desc, orig_output_) tf.add_input(desc, grad_) @@ -50400,7 +52711,7 @@ end """ - crop_and_resize(image, boxes, box_ind, crop_size; method=, extrapolation_value=?) + crop_and_resize(image, boxes, box_ind, crop_size; method=bilinear, extrapolation_value=?) """ @@ -50429,6 +52740,10 @@ begin end function crop_and_resize_eager(image_, boxes_, box_ind_, crop_size_; name=nothing, method=nothing, extrapolation_value=nothing) desc = tf.EagerOp("CropAndResize") + image_ = convert(tf.TensorHandle, image_) + boxes_ = convert(tf.TensorHandle, boxes_) + box_ind_ = convert(tf.TensorHandle, box_ind_) + crop_size_ = convert(tf.TensorHandle, crop_size_) tf.add_input(desc, image_) tf.add_input(desc, boxes_) tf.add_input(desc, box_ind_) @@ -50492,6 +52807,12 @@ begin end function quantized_bias_add_eager(input_, bias_, min_input_, max_input_, min_bias_, max_bias_; name=nothing, out_type=nothing) desc = tf.EagerOp("QuantizedBiasAdd") + input_ = convert(tf.TensorHandle, input_) + bias_ = convert(tf.TensorHandle, bias_) + min_input_ = convert(tf.TensorHandle, min_input_) + max_input_ = convert(tf.TensorHandle, max_input_) + min_bias_ = convert(tf.TensorHandle, min_bias_) + max_bias_ = convert(tf.TensorHandle, max_bias_) tf.add_input(desc, input_) tf.add_input(desc, bias_) tf.add_input(desc, min_input_) @@ -50537,6 +52858,8 @@ begin end function kmc2chain_initialization_eager(distances_, seed_; name=nothing) desc = tf.EagerOp("KMC2ChainInitialization") + distances_ = convert(tf.TensorHandle, distances_) + seed_ = convert(tf.TensorHandle, seed_) tf.add_input(desc, distances_) tf.add_input(desc, seed_) res = tf.execute(desc) @@ -50591,6 +52914,7 @@ begin end function map_unstage_no_key_eager(indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) desc = tf.EagerOp("MapUnstageNoKey") + indices_ = convert(tf.TensorHandle, indices_) tf.add_input(desc, indices_) if capacity !== nothing desc["capacity"] = Base.Int(capacity) @@ -50649,6 +52973,9 @@ begin end function scatter_nd_sub_eager(ref_, indices_, updates_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ScatterNdSub") + ref_ = convert(tf.TensorHandle, ref_) + indices_ = convert(tf.TensorHandle, indices_) + updates_ = convert(tf.TensorHandle, updates_) tf.add_input(desc, ref_) tf.add_input(desc, indices_) tf.add_input(desc, updates_) @@ -50696,6 +53023,8 @@ begin end function resize_bilinear_eager(images_, size_; name=nothing, align_corners=nothing) desc = tf.EagerOp("ResizeBilinear") + images_ = convert(tf.TensorHandle, images_) + size_ = convert(tf.TensorHandle, size_) tf.add_input(desc, images_) tf.add_input(desc, size_) if align_corners !== nothing @@ -50751,6 +53080,8 @@ begin end function ordered_map_peek_eager(key_, indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) desc = tf.EagerOp("OrderedMapPeek") + key_ = convert(tf.TensorHandle, key_) + indices_ = convert(tf.TensorHandle, indices_) tf.add_input(desc, key_) tf.add_input(desc, indices_) if capacity !== nothing @@ -50815,6 +53146,7 @@ begin end function tensor_array_eager(size_; name=nothing, dtype=nothing, dynamic_size=nothing, clear_after_read=nothing, tensor_array_name=nothing, element_shape=nothing) desc = tf.EagerOp("TensorArray") + size_ = convert(tf.TensorHandle, size_) tf.add_input(desc, size_) if dtype !== nothing desc["dtype"] = Base.identity(dtype) @@ -50868,6 +53200,9 @@ begin end function inplace_sub_eager(x_, i_, v_; name=nothing) desc = tf.EagerOp("InplaceSub") + x_ = convert(tf.TensorHandle, x_) + i_ = convert(tf.TensorHandle, i_) + v_ = convert(tf.TensorHandle, v_) tf.add_input(desc, x_) tf.add_input(desc, i_) tf.add_input(desc, v_) @@ -50908,6 +53243,8 @@ begin end function pow_eager(x_, y_; name=nothing) desc = tf.EagerOp("Pow") + x_ = convert(tf.TensorHandle, x_) + y_ = convert(tf.TensorHandle, y_) tf.add_input(desc, x_) tf.add_input(desc, y_) desc["T"] = tf.data_type(x_) @@ -50953,6 +53290,8 @@ begin end function stateful_standard_normal_eager(resource_, shape_; name=nothing, dtype=nothing, shape_dtype=nothing) desc = tf.EagerOp("StatefulStandardNormal") + resource_ = convert(tf.TensorHandle, resource_) + shape_ = convert(tf.TensorHandle, shape_) tf.add_input(desc, resource_) tf.add_input(desc, shape_) if dtype !== nothing @@ -50995,6 +53334,7 @@ begin end function ref_next_iteration_eager(data_; name=nothing) desc = tf.EagerOp("RefNextIteration") + data_ = convert(tf.TensorHandle, data_) tf.add_input(desc, data_) desc["T"] = tf.data_type(data_) res = tf.execute(desc) @@ -51032,6 +53372,8 @@ begin end function scalar_summary_eager(tags_, values_; name=nothing) desc = tf.EagerOp("ScalarSummary") + tags_ = convert(tf.TensorHandle, tags_) + values_ = convert(tf.TensorHandle, values_) tf.add_input(desc, tags_) tf.add_input(desc, values_) desc["T"] = tf.data_type(values_) @@ -51077,6 +53419,8 @@ begin end function string_split_v2_eager(input_, sep_; name=nothing, maxsplit=nothing) desc = tf.EagerOp("StringSplitV2") + input_ = convert(tf.TensorHandle, input_) + sep_ = convert(tf.TensorHandle, sep_) tf.add_input(desc, input_) tf.add_input(desc, sep_) if maxsplit !== nothing @@ -51115,6 +53459,7 @@ begin end function bessel_i0e_eager(x_; name=nothing) desc = tf.EagerOp("BesselI0e") + x_ = convert(tf.TensorHandle, x_) tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) res = tf.execute(desc) @@ -51158,6 +53503,7 @@ begin end function unique_eager(x_; name=nothing, out_idx=nothing) desc = tf.EagerOp("Unique") + x_ = convert(tf.TensorHandle, x_) tf.add_input(desc, x_) if out_idx !== nothing desc["out_idx"] = Base.identity(out_idx) @@ -51211,6 +53557,9 @@ begin end function load_tpu_embedding_rms_prop_parameters_eager(parameters_, ms_, mom_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) desc = tf.EagerOp("LoadTPUEmbeddingRMSPropParameters") + parameters_ = convert(tf.TensorHandle, parameters_) + ms_ = convert(tf.TensorHandle, ms_) + mom_ = convert(tf.TensorHandle, mom_) tf.add_input(desc, parameters_) tf.add_input(desc, ms_) tf.add_input(desc, mom_) @@ -51309,6 +53658,7 @@ begin end function eager_py_func_eager(input_; name=nothing, token=nothing, Tin=nothing, Tout=nothing) desc = tf.EagerOp("EagerPyFunc") + input_ = convert(tf.TensorHandle, input_) tf.add_input(desc, input_) if token !== nothing desc["token"] = Base.String(token) @@ -51352,6 +53702,7 @@ begin end function next_iteration_eager(data_; name=nothing) desc = tf.EagerOp("NextIteration") + data_ = convert(tf.TensorHandle, data_) tf.add_input(desc, data_) desc["T"] = tf.data_type(data_) res = tf.execute(desc) @@ -51400,6 +53751,8 @@ begin end function case_eager(branch_index_, input_; name=nothing, Tin=nothing, Tout=nothing, branches=nothing, output_shapes=nothing) desc = tf.EagerOp("Case") + branch_index_ = convert(tf.TensorHandle, branch_index_) + input_ = convert(tf.TensorHandle, input_) tf.add_input(desc, branch_index_) tf.add_input(desc, input_) if Tin !== nothing @@ -51453,6 +53806,9 @@ begin end function tensor_scatter_sub_eager(tensor_, indices_, updates_; name=nothing) desc = tf.EagerOp("TensorScatterSub") + tensor_ = convert(tf.TensorHandle, tensor_) + indices_ = convert(tf.TensorHandle, indices_) + updates_ = convert(tf.TensorHandle, updates_) tf.add_input(desc, tensor_) tf.add_input(desc, indices_) tf.add_input(desc, updates_) @@ -51501,6 +53857,9 @@ begin end function scatter_max_eager(ref_, indices_, updates_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ScatterMax") + ref_ = convert(tf.TensorHandle, ref_) + indices_ = convert(tf.TensorHandle, indices_) + updates_ = convert(tf.TensorHandle, updates_) tf.add_input(desc, ref_) tf.add_input(desc, indices_) tf.add_input(desc, updates_) @@ -51543,6 +53902,7 @@ begin end function sqrt_eager(x_; name=nothing) desc = tf.EagerOp("Sqrt") + x_ = convert(tf.TensorHandle, x_) tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) res = tf.execute(desc) @@ -51582,6 +53942,8 @@ begin end function accumulator_take_gradient_eager(handle_, num_required_; name=nothing, dtype=nothing) desc = tf.EagerOp("AccumulatorTakeGradient") + handle_ = convert(tf.TensorHandle, handle_) + num_required_ = convert(tf.TensorHandle, num_required_) tf.add_input(desc, handle_) tf.add_input(desc, num_required_) if dtype !== nothing @@ -51631,6 +53993,10 @@ begin end function _mkl_add_eager(x_, y_, mkl_x_, mkl_y_; name=nothing) desc = tf.EagerOp("_MklAdd") + x_ = convert(tf.TensorHandle, x_) + y_ = convert(tf.TensorHandle, y_) + mkl_x_ = convert(tf.TensorHandle, mkl_x_) + mkl_y_ = convert(tf.TensorHandle, mkl_y_) tf.add_input(desc, x_) tf.add_input(desc, y_) tf.add_input(desc, mkl_x_) @@ -51670,6 +54036,7 @@ begin end function reciprocal_eager(x_; name=nothing) desc = tf.EagerOp("Reciprocal") + x_ = convert(tf.TensorHandle, x_) tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) res = tf.execute(desc) @@ -51707,6 +54074,7 @@ begin end function outfeed_enqueue_tuple_eager(inputs_; name=nothing, dtypes=nothing) desc = tf.EagerOp("OutfeedEnqueueTuple") + inputs_ = convert(tf.TensorHandle, inputs_) tf.add_input(desc, inputs_) if dtypes !== nothing desc["dtypes"] = map(Base.identity, dtypes) @@ -51743,6 +54111,7 @@ begin end function string_strip_eager(input_; name=nothing) desc = tf.EagerOp("StringStrip") + input_ = convert(tf.TensorHandle, input_) tf.add_input(desc, input_) res = tf.execute(desc) node = tf.TapeNode(string_strip, [input_], name=nothing) @@ -51786,6 +54155,9 @@ begin end function fake_quant_with_min_max_vars_per_channel_eager(inputs_, min_, max_; name=nothing, num_bits=nothing, narrow_range=nothing) desc = tf.EagerOp("FakeQuantWithMinMaxVarsPerChannel") + inputs_ = convert(tf.TensorHandle, inputs_) + min_ = convert(tf.TensorHandle, min_) + max_ = convert(tf.TensorHandle, max_) tf.add_input(desc, inputs_) tf.add_input(desc, min_) tf.add_input(desc, max_) @@ -51827,6 +54199,7 @@ begin end function barrier_ready_size_eager(handle_; name=nothing) desc = tf.EagerOp("BarrierReadySize") + handle_ = convert(tf.TensorHandle, handle_) tf.add_input(desc, handle_) res = tf.execute(desc) node = tf.TapeNode(barrier_ready_size, [handle_], name=nothing) @@ -51863,6 +54236,7 @@ begin end function string_to_hash_bucket_eager(string_tensor_; name=nothing, num_buckets=nothing) desc = tf.EagerOp("StringToHashBucket") + string_tensor_ = convert(tf.TensorHandle, string_tensor_) tf.add_input(desc, string_tensor_) if num_buckets !== nothing desc["num_buckets"] = Base.Int(num_buckets) @@ -51912,6 +54286,8 @@ begin end function tensor_array_concat_eager(handle_, flow_in_; name=nothing, dtype=nothing, element_shape_except0=nothing) desc = tf.EagerOp("TensorArrayConcat") + handle_ = convert(tf.TensorHandle, handle_) + flow_in_ = convert(tf.TensorHandle, flow_in_) tf.add_input(desc, handle_) tf.add_input(desc, flow_in_) if dtype !== nothing @@ -51956,6 +54332,9 @@ begin end function sharded_filename_eager(basename_, shard_, num_shards_; name=nothing) desc = tf.EagerOp("ShardedFilename") + basename_ = convert(tf.TensorHandle, basename_) + shard_ = convert(tf.TensorHandle, shard_) + num_shards_ = convert(tf.TensorHandle, num_shards_) tf.add_input(desc, basename_) tf.add_input(desc, shard_) tf.add_input(desc, num_shards_) @@ -52000,6 +54379,7 @@ begin end function py_func_eager(input_; name=nothing, token=nothing, Tin=nothing, Tout=nothing) desc = tf.EagerOp("PyFunc") + input_ = convert(tf.TensorHandle, input_) tf.add_input(desc, input_) if token !== nothing desc["token"] = Base.String(token) @@ -52050,6 +54430,9 @@ begin end function unsorted_segment_prod_eager(data_, segment_ids_, num_segments_; name=nothing) desc = tf.EagerOp("UnsortedSegmentProd") + data_ = convert(tf.TensorHandle, data_) + segment_ids_ = convert(tf.TensorHandle, segment_ids_) + num_segments_ = convert(tf.TensorHandle, num_segments_) tf.add_input(desc, data_) tf.add_input(desc, segment_ids_) tf.add_input(desc, num_segments_) @@ -52092,6 +54475,7 @@ begin end function count_up_to_eager(ref_; name=nothing, limit=nothing) desc = tf.EagerOp("CountUpTo") + ref_ = convert(tf.TensorHandle, ref_) tf.add_input(desc, ref_) if limit !== nothing desc["limit"] = Base.Int(limit) @@ -52142,6 +54526,8 @@ begin end function random_gamma_eager(shape_, alpha_; name=nothing, seed=nothing, seed2=nothing, S=nothing) desc = tf.EagerOp("RandomGamma") + shape_ = convert(tf.TensorHandle, shape_) + alpha_ = convert(tf.TensorHandle, alpha_) tf.add_input(desc, shape_) tf.add_input(desc, alpha_) if seed !== nothing @@ -52192,6 +54578,8 @@ begin end function tensor_array_grad_eager(handle_, flow_in_; name=nothing, source=nothing) desc = tf.EagerOp("TensorArrayGrad") + handle_ = convert(tf.TensorHandle, handle_) + flow_in_ = convert(tf.TensorHandle, flow_in_) tf.add_input(desc, handle_) tf.add_input(desc, flow_in_) if source !== nothing @@ -52241,6 +54629,8 @@ begin end function dilation2d_eager(input_, filter_; name=nothing, strides=nothing, rates=nothing, padding=nothing) desc = tf.EagerOp("Dilation2D") + input_ = convert(tf.TensorHandle, input_) + filter_ = convert(tf.TensorHandle, filter_) tf.add_input(desc, input_) tf.add_input(desc, filter_) if strides !== nothing @@ -52300,6 +54690,9 @@ begin end function unbatch_eager(batched_tensor_, batch_index_, id_; name=nothing, timeout_micros=nothing, container=nothing, shared_name=nothing) desc = tf.EagerOp("Unbatch") + batched_tensor_ = convert(tf.TensorHandle, batched_tensor_) + batch_index_ = convert(tf.TensorHandle, batch_index_) + id_ = convert(tf.TensorHandle, id_) tf.add_input(desc, batched_tensor_) tf.add_input(desc, batch_index_) tf.add_input(desc, id_) @@ -52346,6 +54739,7 @@ begin end function get_session_handle_eager(value_; name=nothing) desc = tf.EagerOp("GetSessionHandle") + value_ = convert(tf.TensorHandle, value_) tf.add_input(desc, value_) desc["T"] = tf.data_type(value_) res = tf.execute(desc) @@ -52527,6 +54921,15 @@ begin end function sparse_apply_ftrl_eager(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, lr_power_; name=nothing, use_locking=nothing) desc = tf.EagerOp("SparseApplyFtrl") + var_ = convert(tf.TensorHandle, var_) + accum_ = convert(tf.TensorHandle, accum_) + linear_ = convert(tf.TensorHandle, linear_) + grad_ = convert(tf.TensorHandle, grad_) + indices_ = convert(tf.TensorHandle, indices_) + lr_ = convert(tf.TensorHandle, lr_) + l1_ = convert(tf.TensorHandle, l1_) + l2_ = convert(tf.TensorHandle, l2_) + lr_power_ = convert(tf.TensorHandle, lr_power_) tf.add_input(desc, var_) tf.add_input(desc, accum_) tf.add_input(desc, linear_) @@ -52590,6 +54993,9 @@ begin end function batch_dataset_v2_eager(input_dataset_, batch_size_, drop_remainder_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("BatchDatasetV2") + input_dataset_ = convert(tf.TensorHandle, input_dataset_) + batch_size_ = convert(tf.TensorHandle, batch_size_) + drop_remainder_ = convert(tf.TensorHandle, drop_remainder_) tf.add_input(desc, input_dataset_) tf.add_input(desc, batch_size_) tf.add_input(desc, drop_remainder_) @@ -52647,6 +55053,12 @@ begin end function sparse_sparse_minimum_eager(a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_; name=nothing) desc = tf.EagerOp("SparseSparseMinimum") + a_indices_ = convert(tf.TensorHandle, a_indices_) + a_values_ = convert(tf.TensorHandle, a_values_) + a_shape_ = convert(tf.TensorHandle, a_shape_) + b_indices_ = convert(tf.TensorHandle, b_indices_) + b_values_ = convert(tf.TensorHandle, b_values_) + b_shape_ = convert(tf.TensorHandle, b_shape_) tf.add_input(desc, a_indices_) tf.add_input(desc, a_values_) tf.add_input(desc, a_shape_) @@ -52692,6 +55104,8 @@ begin end function reverse_v2_eager(tensor_, axis_; name=nothing) desc = tf.EagerOp("ReverseV2") + tensor_ = convert(tf.TensorHandle, tensor_) + axis_ = convert(tf.TensorHandle, axis_) tf.add_input(desc, tensor_) tf.add_input(desc, axis_) desc["T"] = tf.data_type(tensor_) @@ -52772,6 +55186,10 @@ begin end function strided_slice_eager(input_, begin_, end_, strides_; name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing) desc = tf.EagerOp("StridedSlice") + input_ = convert(tf.TensorHandle, input_) + begin_ = convert(tf.TensorHandle, begin_) + end_ = convert(tf.TensorHandle, end_) + strides_ = convert(tf.TensorHandle, strides_) tf.add_input(desc, input_) tf.add_input(desc, begin_) tf.add_input(desc, end_) @@ -52845,6 +55263,7 @@ begin end function matching_files_eager(pattern_; name=nothing) desc = tf.EagerOp("MatchingFiles") + pattern_ = convert(tf.TensorHandle, pattern_) tf.add_input(desc, pattern_) res = tf.execute(desc) node = tf.TapeNode(matching_files, [pattern_], name=nothing) @@ -52881,6 +55300,7 @@ begin end function encode_base64_eager(input_; name=nothing, pad=nothing) desc = tf.EagerOp("EncodeBase64") + input_ = convert(tf.TensorHandle, input_) tf.add_input(desc, input_) if pad !== nothing desc["pad"] = Base.Bool(pad) @@ -52923,6 +55343,7 @@ begin end function iterator_get_next_as_optional_eager(iterator_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("IteratorGetNextAsOptional") + iterator_ = convert(tf.TensorHandle, iterator_) tf.add_input(desc, iterator_) if output_types !== nothing desc["output_types"] = map(Base.identity, output_types) @@ -53022,6 +55443,7 @@ begin end function iterator_to_string_handle_eager(resource_handle_; name=nothing) desc = tf.EagerOp("IteratorToStringHandle") + resource_handle_ = convert(tf.TensorHandle, resource_handle_) tf.add_input(desc, resource_handle_) res = tf.execute(desc) node = tf.TapeNode(iterator_to_string_handle, [resource_handle_], name=nothing) @@ -53070,6 +55492,9 @@ begin end function max_pool_grad_grad_with_argmax_eager(input_, grad_, argmax_; name=nothing, ksize=nothing, strides=nothing, padding=nothing) desc = tf.EagerOp("MaxPoolGradGradWithArgmax") + input_ = convert(tf.TensorHandle, input_) + grad_ = convert(tf.TensorHandle, grad_) + argmax_ = convert(tf.TensorHandle, argmax_) tf.add_input(desc, input_) tf.add_input(desc, grad_) tf.add_input(desc, argmax_) @@ -53124,6 +55549,9 @@ begin end function tensor_list_gather_eager(input_handle_, indices_, element_shape_; name=nothing, element_dtype=nothing) desc = tf.EagerOp("TensorListGather") + input_handle_ = convert(tf.TensorHandle, input_handle_) + indices_ = convert(tf.TensorHandle, indices_) + element_shape_ = convert(tf.TensorHandle, element_shape_) tf.add_input(desc, input_handle_) tf.add_input(desc, indices_) tf.add_input(desc, element_shape_) @@ -53174,6 +55602,8 @@ begin end function multinomial_eager(logits_, num_samples_; name=nothing, seed=nothing, seed2=nothing, output_dtype=nothing) desc = tf.EagerOp("Multinomial") + logits_ = convert(tf.TensorHandle, logits_) + num_samples_ = convert(tf.TensorHandle, num_samples_) tf.add_input(desc, logits_) tf.add_input(desc, num_samples_) if seed !== nothing @@ -53225,6 +55655,9 @@ begin end function tensor_array_read_eager(handle_, index_, flow_in_; name=nothing, dtype=nothing) desc = tf.EagerOp("TensorArrayRead") + handle_ = convert(tf.TensorHandle, handle_) + index_ = convert(tf.TensorHandle, index_) + flow_in_ = convert(tf.TensorHandle, flow_in_) tf.add_input(desc, handle_) tf.add_input(desc, index_) tf.add_input(desc, flow_in_) @@ -53271,6 +55704,8 @@ begin end function experimental_indexed_dataset_get_eager(materialized_, index_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("ExperimentalIndexedDatasetGet") + materialized_ = convert(tf.TensorHandle, materialized_) + index_ = convert(tf.TensorHandle, index_) tf.add_input(desc, materialized_) tf.add_input(desc, index_) if output_types !== nothing @@ -53322,6 +55757,8 @@ begin end function tpu_partitioned_call_eager(args_, device_ordinal_; name=nothing, Tin=nothing, Tout=nothing, f=nothing) desc = tf.EagerOp("TPUPartitionedCall") + args_ = convert(tf.TensorHandle, args_) + device_ordinal_ = convert(tf.TensorHandle, device_ordinal_) tf.add_input(desc, args_) tf.add_input(desc, device_ordinal_) if Tin !== nothing @@ -53398,6 +55835,14 @@ begin end function quantized_conv2d_and_relu_and_requantize_eager(input_, filter_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) desc = tf.EagerOp("QuantizedConv2DAndReluAndRequantize") + input_ = convert(tf.TensorHandle, input_) + filter_ = convert(tf.TensorHandle, filter_) + min_input_ = convert(tf.TensorHandle, min_input_) + max_input_ = convert(tf.TensorHandle, max_input_) + min_filter_ = convert(tf.TensorHandle, min_filter_) + max_filter_ = convert(tf.TensorHandle, max_filter_) + min_freezed_output_ = convert(tf.TensorHandle, min_freezed_output_) + max_freezed_output_ = convert(tf.TensorHandle, max_freezed_output_) tf.add_input(desc, input_) tf.add_input(desc, filter_) tf.add_input(desc, min_input_) @@ -53458,6 +55903,7 @@ begin end function iterator_from_string_handle_v2_eager(string_handle_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("IteratorFromStringHandleV2") + string_handle_ = convert(tf.TensorHandle, string_handle_) tf.add_input(desc, string_handle_) if output_types !== nothing desc["output_types"] = map(Base.identity, output_types) @@ -53500,6 +55946,8 @@ begin end function bitwise_or_eager(x_, y_; name=nothing) desc = tf.EagerOp("BitwiseOr") + x_ = convert(tf.TensorHandle, x_) + y_ = convert(tf.TensorHandle, y_) tf.add_input(desc, x_) tf.add_input(desc, y_) desc["T"] = tf.data_type(x_) @@ -53544,6 +55992,9 @@ begin end function unsorted_segment_max_eager(data_, segment_ids_, num_segments_; name=nothing) desc = tf.EagerOp("UnsortedSegmentMax") + data_ = convert(tf.TensorHandle, data_) + segment_ids_ = convert(tf.TensorHandle, segment_ids_) + num_segments_ = convert(tf.TensorHandle, num_segments_) tf.add_input(desc, data_) tf.add_input(desc, segment_ids_) tf.add_input(desc, num_segments_) @@ -53594,6 +56045,10 @@ begin end function _mkl_squared_difference_eager(x_, y_, mkl_x_, mkl_y_; name=nothing) desc = tf.EagerOp("_MklSquaredDifference") + x_ = convert(tf.TensorHandle, x_) + y_ = convert(tf.TensorHandle, y_) + mkl_x_ = convert(tf.TensorHandle, mkl_x_) + mkl_y_ = convert(tf.TensorHandle, mkl_y_) tf.add_input(desc, x_) tf.add_input(desc, y_) tf.add_input(desc, mkl_x_) @@ -53646,6 +56101,9 @@ begin end function conv3d_backprop_filter_eager(input_, filter_, out_backprop_; name=nothing, strides=nothing, padding=nothing, dilations=nothing) desc = tf.EagerOp("Conv3DBackpropFilter") + input_ = convert(tf.TensorHandle, input_) + filter_ = convert(tf.TensorHandle, filter_) + out_backprop_ = convert(tf.TensorHandle, out_backprop_) tf.add_input(desc, input_) tf.add_input(desc, filter_) tf.add_input(desc, out_backprop_) @@ -53711,6 +56169,8 @@ begin end function if__eager(cond_, input_; name=nothing, Tin=nothing, Tout=nothing, then_branch=nothing, else_branch=nothing, output_shapes=nothing) desc = tf.EagerOp("If") + cond_ = convert(tf.TensorHandle, cond_) + input_ = convert(tf.TensorHandle, input_) tf.add_input(desc, cond_) tf.add_input(desc, input_) if Tin !== nothing @@ -53775,6 +56235,8 @@ begin end function flat_map_dataset_eager(input_dataset_, other_arguments_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("FlatMapDataset") + input_dataset_ = convert(tf.TensorHandle, input_dataset_) + other_arguments_ = convert(tf.TensorHandle, other_arguments_) tf.add_input(desc, input_dataset_) tf.add_input(desc, other_arguments_) if f !== nothing @@ -53833,6 +56295,9 @@ begin end function tensor_list_scatter_eager(tensor_, indices_, element_shape_; name=nothing, element_dtype=nothing, shape_type=nothing) desc = tf.EagerOp("TensorListScatter") + tensor_ = convert(tf.TensorHandle, tensor_) + indices_ = convert(tf.TensorHandle, indices_) + element_shape_ = convert(tf.TensorHandle, element_shape_) tf.add_input(desc, tensor_) tf.add_input(desc, indices_) tf.add_input(desc, element_shape_) @@ -53879,6 +56344,8 @@ begin end function softsign_grad_eager(gradients_, features_; name=nothing) desc = tf.EagerOp("SoftsignGrad") + gradients_ = convert(tf.TensorHandle, gradients_) + features_ = convert(tf.TensorHandle, features_) tf.add_input(desc, gradients_) tf.add_input(desc, features_) desc["T"] = tf.data_type(gradients_) @@ -53922,6 +56389,7 @@ begin end function copy_host_eager(input_; name=nothing, tensor_name=nothing, debug_ops_spec=nothing) desc = tf.EagerOp("CopyHost") + input_ = convert(tf.TensorHandle, input_) tf.add_input(desc, input_) if tensor_name !== nothing desc["tensor_name"] = Base.String(tensor_name) @@ -53969,6 +56437,9 @@ begin end function lin_space_eager(start_, stop_, num_; name=nothing) desc = tf.EagerOp("LinSpace") + start_ = convert(tf.TensorHandle, start_) + stop_ = convert(tf.TensorHandle, stop_) + num_ = convert(tf.TensorHandle, num_) tf.add_input(desc, start_) tf.add_input(desc, stop_) tf.add_input(desc, num_) @@ -54013,6 +56484,8 @@ begin end function _parallel_concat_update_eager(value_, update_; name=nothing, loc=nothing) desc = tf.EagerOp("_ParallelConcatUpdate") + value_ = convert(tf.TensorHandle, value_) + update_ = convert(tf.TensorHandle, update_) tf.add_input(desc, value_) tf.add_input(desc, update_) if loc !== nothing @@ -54100,6 +56573,8 @@ begin end function stack_push_v2_eager(handle_, elem_; name=nothing, swap_memory=nothing) desc = tf.EagerOp("StackPushV2") + handle_ = convert(tf.TensorHandle, handle_) + elem_ = convert(tf.TensorHandle, elem_) tf.add_input(desc, handle_) tf.add_input(desc, elem_) if swap_memory !== nothing @@ -54144,6 +56619,8 @@ begin end function assign_variable_op_eager(resource_, value_; name=nothing, dtype=nothing) desc = tf.EagerOp("AssignVariableOp") + resource_ = convert(tf.TensorHandle, resource_) + value_ = convert(tf.TensorHandle, value_) tf.add_input(desc, resource_) tf.add_input(desc, value_) if dtype !== nothing @@ -54198,6 +56675,10 @@ begin end function sparse_split_eager(split_dim_, indices_, values_, shape_; name=nothing, num_split=nothing) desc = tf.EagerOp("SparseSplit") + split_dim_ = convert(tf.TensorHandle, split_dim_) + indices_ = convert(tf.TensorHandle, indices_) + values_ = convert(tf.TensorHandle, values_) + shape_ = convert(tf.TensorHandle, shape_) tf.add_input(desc, split_dim_) tf.add_input(desc, indices_) tf.add_input(desc, values_) @@ -54243,6 +56724,9 @@ begin end function tensor_array_unpack_eager(handle_, value_, flow_in_; name=nothing) desc = tf.EagerOp("TensorArrayUnpack") + handle_ = convert(tf.TensorHandle, handle_) + value_ = convert(tf.TensorHandle, value_) + flow_in_ = convert(tf.TensorHandle, flow_in_) tf.add_input(desc, handle_) tf.add_input(desc, value_) tf.add_input(desc, flow_in_) @@ -54287,6 +56771,8 @@ begin end function tensor_list_stack_eager(input_handle_, element_shape_; name=nothing, element_dtype=nothing, num_elements=nothing) desc = tf.EagerOp("TensorListStack") + input_handle_ = convert(tf.TensorHandle, input_handle_) + element_shape_ = convert(tf.TensorHandle, element_shape_) tf.add_input(desc, input_handle_) tf.add_input(desc, element_shape_) if element_dtype !== nothing @@ -54327,6 +56813,7 @@ begin end function barrier_incomplete_size_eager(handle_; name=nothing) desc = tf.EagerOp("BarrierIncompleteSize") + handle_ = convert(tf.TensorHandle, handle_) tf.add_input(desc, handle_) res = tf.execute(desc) node = tf.TapeNode(barrier_incomplete_size, [handle_], name=nothing) @@ -54368,6 +56855,8 @@ begin end function restore_eager(file_pattern_, tensor_name_; name=nothing, dt=nothing, preferred_shard=nothing) desc = tf.EagerOp("Restore") + file_pattern_ = convert(tf.TensorHandle, file_pattern_) + tensor_name_ = convert(tf.TensorHandle, tensor_name_) tf.add_input(desc, file_pattern_) tf.add_input(desc, tensor_name_) if dt !== nothing @@ -54431,6 +56920,7 @@ begin end function tensor_array_v3_eager(size_; name=nothing, dtype=nothing, element_shape=nothing, dynamic_size=nothing, clear_after_read=nothing, identical_element_shapes=nothing, tensor_array_name=nothing) desc = tf.EagerOp("TensorArrayV3") + size_ = convert(tf.TensorHandle, size_) tf.add_input(desc, size_) if dtype !== nothing desc["dtype"] = Base.identity(dtype) @@ -54490,6 +56980,8 @@ begin end function experimental_assert_next_dataset_eager(input_dataset_, transformations_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("ExperimentalAssertNextDataset") + input_dataset_ = convert(tf.TensorHandle, input_dataset_) + transformations_ = convert(tf.TensorHandle, transformations_) tf.add_input(desc, input_dataset_) tf.add_input(desc, transformations_) if output_types !== nothing @@ -54536,6 +57028,8 @@ begin end function in_top_k_eager(predictions_, targets_; name=nothing, k=nothing) desc = tf.EagerOp("InTopK") + predictions_ = convert(tf.TensorHandle, predictions_) + targets_ = convert(tf.TensorHandle, targets_) tf.add_input(desc, predictions_) tf.add_input(desc, targets_) if k !== nothing @@ -54584,6 +57078,9 @@ begin end function scatter_sub_eager(ref_, indices_, updates_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ScatterSub") + ref_ = convert(tf.TensorHandle, ref_) + indices_ = convert(tf.TensorHandle, indices_) + updates_ = convert(tf.TensorHandle, updates_) tf.add_input(desc, ref_) tf.add_input(desc, indices_) tf.add_input(desc, updates_) @@ -54626,6 +57123,7 @@ begin end function acosh_eager(x_; name=nothing) desc = tf.EagerOp("Acosh") + x_ = convert(tf.TensorHandle, x_) tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) res = tf.execute(desc) @@ -54644,7 +57142,7 @@ end """ - depthwise_conv2d_native_backprop_filter(input, filter_sizes, out_backprop; data_format=, dilations=[1, 1, 1, 1]) + depthwise_conv2d_native_backprop_filter(input, filter_sizes, out_backprop; data_format=NHWC, dilations=[1, 1, 1, 1]) """ @@ -54677,6 +57175,9 @@ begin end function depthwise_conv2d_native_backprop_filter_eager(input_, filter_sizes_, out_backprop_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) desc = tf.EagerOp("DepthwiseConv2dNativeBackpropFilter") + input_ = convert(tf.TensorHandle, input_) + filter_sizes_ = convert(tf.TensorHandle, filter_sizes_) + out_backprop_ = convert(tf.TensorHandle, out_backprop_) tf.add_input(desc, input_) tf.add_input(desc, filter_sizes_) tf.add_input(desc, out_backprop_) @@ -54736,6 +57237,7 @@ begin end function cast_eager(x_; name=nothing, SrcT=nothing, DstT=nothing, Truncate=nothing) desc = tf.EagerOp("Cast") + x_ = convert(tf.TensorHandle, x_) tf.add_input(desc, x_) if SrcT !== nothing desc["SrcT"] = Base.identity(SrcT) @@ -54763,7 +57265,7 @@ end """ - quantize_v2(input, min_range, max_range; mode=, round_mode=) + quantize_v2(input, min_range, max_range; mode=MIN_COMBINED, round_mode=HALF_AWAY_FROM_ZERO) """ @@ -54794,6 +57296,9 @@ begin end function quantize_v2_eager(input_, min_range_, max_range_; name=nothing, mode=nothing, round_mode=nothing) desc = tf.EagerOp("QuantizeV2") + input_ = convert(tf.TensorHandle, input_) + min_range_ = convert(tf.TensorHandle, min_range_) + max_range_ = convert(tf.TensorHandle, max_range_) tf.add_input(desc, input_) tf.add_input(desc, min_range_) tf.add_input(desc, max_range_) @@ -54863,6 +57368,9 @@ begin end function generator_dataset_eager(init_func_other_args_, next_func_other_args_, finalize_func_other_args_; name=nothing, init_func=nothing, next_func=nothing, finalize_func=nothing, Tinit_func_args=nothing, Tnext_func_args=nothing, Tfinalize_func_args=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("GeneratorDataset") + init_func_other_args_ = convert(tf.TensorHandle, init_func_other_args_) + next_func_other_args_ = convert(tf.TensorHandle, next_func_other_args_) + finalize_func_other_args_ = convert(tf.TensorHandle, finalize_func_other_args_) tf.add_input(desc, init_func_other_args_) tf.add_input(desc, next_func_other_args_) tf.add_input(desc, finalize_func_other_args_) @@ -54922,6 +57430,7 @@ begin end function tensor_forest_tree_serialize_eager(tree_handle_; name=nothing) desc = tf.EagerOp("TensorForestTreeSerialize") + tree_handle_ = convert(tf.TensorHandle, tree_handle_) tf.add_input(desc, tree_handle_) res = tf.execute(desc) node = tf.TapeNode(tensor_forest_tree_serialize, [tree_handle_], name=nothing) @@ -54958,6 +57467,8 @@ begin end function next_after_eager(x1_, x2_; name=nothing) desc = tf.EagerOp("NextAfter") + x1_ = convert(tf.TensorHandle, x1_) + x2_ = convert(tf.TensorHandle, x2_) tf.add_input(desc, x1_) tf.add_input(desc, x2_) desc["T"] = tf.data_type(x1_) @@ -54994,6 +57505,7 @@ begin end function tensor_array_close_v2_eager(handle_; name=nothing) desc = tf.EagerOp("TensorArrayCloseV2") + handle_ = convert(tf.TensorHandle, handle_) tf.add_input(desc, handle_) res = tf.execute(desc) node = tf.TapeNode(tensor_array_close_v2, [handle_], name=nothing) @@ -55112,6 +57624,8 @@ begin end function reader_read_v2_eager(reader_handle_, queue_handle_; name=nothing) desc = tf.EagerOp("ReaderReadV2") + reader_handle_ = convert(tf.TensorHandle, reader_handle_) + queue_handle_ = convert(tf.TensorHandle, queue_handle_) tf.add_input(desc, reader_handle_) tf.add_input(desc, queue_handle_) res = tf.execute(desc) @@ -55149,6 +57663,8 @@ begin end function mod_eager(x_, y_; name=nothing) desc = tf.EagerOp("Mod") + x_ = convert(tf.TensorHandle, x_) + y_ = convert(tf.TensorHandle, y_) tf.add_input(desc, x_) tf.add_input(desc, y_) desc["T"] = tf.data_type(x_) @@ -55188,6 +57704,8 @@ begin end function add_v2_eager(x_, y_; name=nothing) desc = tf.EagerOp("AddV2") + x_ = convert(tf.TensorHandle, x_) + y_ = convert(tf.TensorHandle, y_) tf.add_input(desc, x_) tf.add_input(desc, y_) desc["T"] = tf.data_type(x_) @@ -55231,6 +57749,8 @@ begin end function stateless_random_normal_eager(shape_, seed_; name=nothing, dtype=nothing) desc = tf.EagerOp("StatelessRandomNormal") + shape_ = convert(tf.TensorHandle, shape_) + seed_ = convert(tf.TensorHandle, seed_) tf.add_input(desc, shape_) tf.add_input(desc, seed_) if dtype !== nothing @@ -55316,6 +57836,11 @@ begin end function strided_slice_assign_eager(ref_, begin_, end_, strides_, value_; name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing) desc = tf.EagerOp("StridedSliceAssign") + ref_ = convert(tf.TensorHandle, ref_) + begin_ = convert(tf.TensorHandle, begin_) + end_ = convert(tf.TensorHandle, end_) + strides_ = convert(tf.TensorHandle, strides_) + value_ = convert(tf.TensorHandle, value_) tf.add_input(desc, ref_) tf.add_input(desc, begin_) tf.add_input(desc, end_) @@ -55401,6 +57926,9 @@ begin end function scatter_min_eager(ref_, indices_, updates_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ScatterMin") + ref_ = convert(tf.TensorHandle, ref_) + indices_ = convert(tf.TensorHandle, indices_) + updates_ = convert(tf.TensorHandle, updates_) tf.add_input(desc, ref_) tf.add_input(desc, indices_) tf.add_input(desc, updates_) @@ -55488,6 +58016,11 @@ begin end function resource_strided_slice_assign_eager(ref_, begin_, end_, strides_, value_; name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing) desc = tf.EagerOp("ResourceStridedSliceAssign") + ref_ = convert(tf.TensorHandle, ref_) + begin_ = convert(tf.TensorHandle, begin_) + end_ = convert(tf.TensorHandle, end_) + strides_ = convert(tf.TensorHandle, strides_) + value_ = convert(tf.TensorHandle, value_) tf.add_input(desc, ref_) tf.add_input(desc, begin_) tf.add_input(desc, end_) @@ -55565,6 +58098,8 @@ begin end function random_gamma_grad_eager(alpha_, sample_; name=nothing) desc = tf.EagerOp("RandomGammaGrad") + alpha_ = convert(tf.TensorHandle, alpha_) + sample_ = convert(tf.TensorHandle, sample_) tf.add_input(desc, alpha_) tf.add_input(desc, sample_) desc["T"] = tf.data_type(alpha_) @@ -55620,6 +58155,12 @@ begin end function resource_sparse_apply_keras_momentum_eager(var_, accum_, lr_, grad_, indices_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) desc = tf.EagerOp("ResourceSparseApplyKerasMomentum") + var_ = convert(tf.TensorHandle, var_) + accum_ = convert(tf.TensorHandle, accum_) + lr_ = convert(tf.TensorHandle, lr_) + grad_ = convert(tf.TensorHandle, grad_) + indices_ = convert(tf.TensorHandle, indices_) + momentum_ = convert(tf.TensorHandle, momentum_) tf.add_input(desc, var_) tf.add_input(desc, accum_) tf.add_input(desc, lr_) @@ -55675,6 +58216,9 @@ begin end function boosted_trees_create_quantile_stream_resource_eager(quantile_stream_resource_handle_, epsilon_, num_streams_; name=nothing, max_elements=nothing) desc = tf.EagerOp("BoostedTreesCreateQuantileStreamResource") + quantile_stream_resource_handle_ = convert(tf.TensorHandle, quantile_stream_resource_handle_) + epsilon_ = convert(tf.TensorHandle, epsilon_) + num_streams_ = convert(tf.TensorHandle, num_streams_) tf.add_input(desc, quantile_stream_resource_handle_) tf.add_input(desc, epsilon_) tf.add_input(desc, num_streams_) @@ -55726,6 +58270,9 @@ begin end function quantized_relu6_eager(features_, min_features_, max_features_; name=nothing, out_type=nothing) desc = tf.EagerOp("QuantizedRelu6") + features_ = convert(tf.TensorHandle, features_) + min_features_ = convert(tf.TensorHandle, min_features_) + max_features_ = convert(tf.TensorHandle, max_features_) tf.add_input(desc, features_) tf.add_input(desc, min_features_) tf.add_input(desc, max_features_) @@ -55781,6 +58328,12 @@ begin end function sparse_sparse_maximum_eager(a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_; name=nothing) desc = tf.EagerOp("SparseSparseMaximum") + a_indices_ = convert(tf.TensorHandle, a_indices_) + a_values_ = convert(tf.TensorHandle, a_values_) + a_shape_ = convert(tf.TensorHandle, a_shape_) + b_indices_ = convert(tf.TensorHandle, b_indices_) + b_values_ = convert(tf.TensorHandle, b_values_) + b_shape_ = convert(tf.TensorHandle, b_shape_) tf.add_input(desc, a_indices_) tf.add_input(desc, a_values_) tf.add_input(desc, a_shape_) @@ -55836,6 +58389,11 @@ begin end function batch_norm_with_global_normalization_eager(t_, m_, v_, beta_, gamma_; name=nothing, variance_epsilon=nothing, scale_after_normalization=nothing) desc = tf.EagerOp("BatchNormWithGlobalNormalization") + t_ = convert(tf.TensorHandle, t_) + m_ = convert(tf.TensorHandle, m_) + v_ = convert(tf.TensorHandle, v_) + beta_ = convert(tf.TensorHandle, beta_) + gamma_ = convert(tf.TensorHandle, gamma_) tf.add_input(desc, t_) tf.add_input(desc, m_) tf.add_input(desc, v_) @@ -55889,6 +58447,9 @@ begin end function in_top_kv2_eager(predictions_, targets_, k_; name=nothing) desc = tf.EagerOp("InTopKV2") + predictions_ = convert(tf.TensorHandle, predictions_) + targets_ = convert(tf.TensorHandle, targets_) + k_ = convert(tf.TensorHandle, k_) tf.add_input(desc, predictions_) tf.add_input(desc, targets_) tf.add_input(desc, k_) @@ -55927,6 +58488,7 @@ begin end function cholesky_eager(input_; name=nothing) desc = tf.EagerOp("Cholesky") + input_ = convert(tf.TensorHandle, input_) tf.add_input(desc, input_) desc["T"] = tf.data_type(input_) res = tf.execute(desc) @@ -55981,6 +58543,15 @@ begin end function resource_apply_centered_rms_prop_eager(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ResourceApplyCenteredRMSProp") + var_ = convert(tf.TensorHandle, var_) + mg_ = convert(tf.TensorHandle, mg_) + ms_ = convert(tf.TensorHandle, ms_) + mom_ = convert(tf.TensorHandle, mom_) + lr_ = convert(tf.TensorHandle, lr_) + rho_ = convert(tf.TensorHandle, rho_) + momentum_ = convert(tf.TensorHandle, momentum_) + epsilon_ = convert(tf.TensorHandle, epsilon_) + grad_ = convert(tf.TensorHandle, grad_) tf.add_input(desc, var_) tf.add_input(desc, mg_) tf.add_input(desc, ms_) @@ -56043,6 +58614,10 @@ begin end function resource_apply_adagrad_eager(var_, accum_, lr_, grad_; name=nothing, use_locking=nothing, update_slots=nothing) desc = tf.EagerOp("ResourceApplyAdagrad") + var_ = convert(tf.TensorHandle, var_) + accum_ = convert(tf.TensorHandle, accum_) + lr_ = convert(tf.TensorHandle, lr_) + grad_ = convert(tf.TensorHandle, grad_) tf.add_input(desc, var_) tf.add_input(desc, accum_) tf.add_input(desc, lr_) @@ -56111,6 +58686,13 @@ begin end function experimental_parallel_interleave_dataset_eager(input_dataset_, other_arguments_, cycle_length_, block_length_, sloppy_, buffer_output_elements_, prefetch_input_elements_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("ExperimentalParallelInterleaveDataset") + input_dataset_ = convert(tf.TensorHandle, input_dataset_) + other_arguments_ = convert(tf.TensorHandle, other_arguments_) + cycle_length_ = convert(tf.TensorHandle, cycle_length_) + block_length_ = convert(tf.TensorHandle, block_length_) + sloppy_ = convert(tf.TensorHandle, sloppy_) + buffer_output_elements_ = convert(tf.TensorHandle, buffer_output_elements_) + prefetch_input_elements_ = convert(tf.TensorHandle, prefetch_input_elements_) tf.add_input(desc, input_dataset_) tf.add_input(desc, other_arguments_) tf.add_input(desc, cycle_length_) @@ -56168,6 +58750,8 @@ begin end function resize_bicubic_grad_eager(grads_, original_image_; name=nothing, align_corners=nothing) desc = tf.EagerOp("ResizeBicubicGrad") + grads_ = convert(tf.TensorHandle, grads_) + original_image_ = convert(tf.TensorHandle, original_image_) tf.add_input(desc, grads_) tf.add_input(desc, original_image_) if align_corners !== nothing @@ -56207,6 +58791,7 @@ begin end function batch_self_adjoint_eig_eager(input_; name=nothing) desc = tf.EagerOp("BatchSelfAdjointEig") + input_ = convert(tf.TensorHandle, input_) tf.add_input(desc, input_) desc["T"] = tf.data_type(input_) res = tf.execute(desc) @@ -56246,6 +58831,9 @@ begin end function sparse_softmax_eager(sp_indices_, sp_values_, sp_shape_; name=nothing) desc = tf.EagerOp("SparseSoftmax") + sp_indices_ = convert(tf.TensorHandle, sp_indices_) + sp_values_ = convert(tf.TensorHandle, sp_values_) + sp_shape_ = convert(tf.TensorHandle, sp_shape_) tf.add_input(desc, sp_indices_) tf.add_input(desc, sp_values_) tf.add_input(desc, sp_shape_) @@ -56283,6 +58871,7 @@ begin end function asinh_eager(x_; name=nothing) desc = tf.EagerOp("Asinh") + x_ = convert(tf.TensorHandle, x_) tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) res = tf.execute(desc) @@ -56346,6 +58935,12 @@ begin end function quantized_conv2d_and_relu_eager(input_, filter_, min_input_, max_input_, min_filter_, max_filter_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) desc = tf.EagerOp("QuantizedConv2DAndRelu") + input_ = convert(tf.TensorHandle, input_) + filter_ = convert(tf.TensorHandle, filter_) + min_input_ = convert(tf.TensorHandle, min_input_) + max_input_ = convert(tf.TensorHandle, max_input_) + min_filter_ = convert(tf.TensorHandle, min_filter_) + max_filter_ = convert(tf.TensorHandle, max_filter_) tf.add_input(desc, input_) tf.add_input(desc, filter_) tf.add_input(desc, min_input_) @@ -56402,6 +58997,7 @@ begin end function matrix_inverse_eager(input_; name=nothing, adjoint=nothing) desc = tf.EagerOp("MatrixInverse") + input_ = convert(tf.TensorHandle, input_) tf.add_input(desc, input_) if adjoint !== nothing desc["adjoint"] = Base.Bool(adjoint) @@ -56444,6 +59040,8 @@ begin end function tensor_list_concat_lists_eager(input_a_, input_b_; name=nothing, element_dtype=nothing) desc = tf.EagerOp("TensorListConcatLists") + input_a_ = convert(tf.TensorHandle, input_a_) + input_b_ = convert(tf.TensorHandle, input_b_) tf.add_input(desc, input_a_) tf.add_input(desc, input_b_) if element_dtype !== nothing @@ -56498,6 +59096,11 @@ begin end function requantize_eager(input_, input_min_, input_max_, requested_output_min_, requested_output_max_; name=nothing, out_type=nothing) desc = tf.EagerOp("Requantize") + input_ = convert(tf.TensorHandle, input_) + input_min_ = convert(tf.TensorHandle, input_min_) + input_max_ = convert(tf.TensorHandle, input_max_) + requested_output_min_ = convert(tf.TensorHandle, requested_output_min_) + requested_output_max_ = convert(tf.TensorHandle, requested_output_max_) tf.add_input(desc, input_) tf.add_input(desc, input_min_) tf.add_input(desc, input_max_) @@ -56540,6 +59143,7 @@ begin end function fft_eager(input_; name=nothing) desc = tf.EagerOp("FFT") + input_ = convert(tf.TensorHandle, input_) tf.add_input(desc, input_) desc["Tcomplex"] = tf.data_type(input_) res = tf.execute(desc) @@ -56578,6 +59182,8 @@ begin end function conjugate_transpose_eager(x_, perm_; name=nothing) desc = tf.EagerOp("ConjugateTranspose") + x_ = convert(tf.TensorHandle, x_) + perm_ = convert(tf.TensorHandle, perm_) tf.add_input(desc, x_) tf.add_input(desc, perm_) desc["T"] = tf.data_type(x_) @@ -56677,6 +59283,8 @@ begin end function relu6grad_eager(gradients_, features_; name=nothing) desc = tf.EagerOp("Relu6Grad") + gradients_ = convert(tf.TensorHandle, gradients_) + features_ = convert(tf.TensorHandle, features_) tf.add_input(desc, gradients_) tf.add_input(desc, features_) desc["T"] = tf.data_type(gradients_) @@ -56697,7 +59305,7 @@ end """ - scale_and_translate_grad(grads, original_image, scale, translation; kernel_type=) + scale_and_translate_grad(grads, original_image, scale, translation; kernel_type=lanczos3) """ @@ -56723,6 +59331,10 @@ begin end function scale_and_translate_grad_eager(grads_, original_image_, scale_, translation_; name=nothing, kernel_type=nothing) desc = tf.EagerOp("ScaleAndTranslateGrad") + grads_ = convert(tf.TensorHandle, grads_) + original_image_ = convert(tf.TensorHandle, original_image_) + scale_ = convert(tf.TensorHandle, scale_) + translation_ = convert(tf.TensorHandle, translation_) tf.add_input(desc, grads_) tf.add_input(desc, original_image_) tf.add_input(desc, scale_) @@ -56771,6 +59383,7 @@ begin end function _array_to_list_eager(input_; name=nothing, N=nothing, out_types=nothing) desc = tf.EagerOp("_ArrayToList") + input_ = convert(tf.TensorHandle, input_) tf.add_input(desc, input_) if N !== nothing desc["N"] = Base.Int(N) @@ -56795,7 +59408,7 @@ end """ - cudnn_rnnv3(input, input_h, input_c, params, sequence_lengths; rnn_mode=, input_mode=, direction=, dropout=?, seed=0, seed2=0, is_training=true) + cudnn_rnnv3(input, input_h, input_c, params, sequence_lengths; rnn_mode=lstm, input_mode=linear_input, direction=unidirectional, dropout=?, seed=0, seed2=0, is_training=true) """ @@ -56846,6 +59459,11 @@ begin end function cudnn_rnnv3_eager(input_, input_h_, input_c_, params_, sequence_lengths_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing, is_training=nothing) desc = tf.EagerOp("CudnnRNNV3") + input_ = convert(tf.TensorHandle, input_) + input_h_ = convert(tf.TensorHandle, input_h_) + input_c_ = convert(tf.TensorHandle, input_c_) + params_ = convert(tf.TensorHandle, params_) + sequence_lengths_ = convert(tf.TensorHandle, sequence_lengths_) tf.add_input(desc, input_) tf.add_input(desc, input_h_) tf.add_input(desc, input_c_) @@ -56913,6 +59531,8 @@ begin end function expand_dims_eager(input_, dim_; name=nothing) desc = tf.EagerOp("ExpandDims") + input_ = convert(tf.TensorHandle, input_) + dim_ = convert(tf.TensorHandle, dim_) tf.add_input(desc, input_) tf.add_input(desc, dim_) desc["T"] = tf.data_type(input_) @@ -56952,6 +59572,8 @@ begin end function inv_grad_eager(y_, dy_; name=nothing) desc = tf.EagerOp("InvGrad") + y_ = convert(tf.TensorHandle, y_) + dy_ = convert(tf.TensorHandle, dy_) tf.add_input(desc, y_) tf.add_input(desc, dy_) desc["T"] = tf.data_type(y_) @@ -56995,6 +59617,9 @@ begin end function non_max_suppression_eager(boxes_, scores_, max_output_size_; name=nothing, iou_threshold=nothing) desc = tf.EagerOp("NonMaxSuppression") + boxes_ = convert(tf.TensorHandle, boxes_) + scores_ = convert(tf.TensorHandle, scores_) + max_output_size_ = convert(tf.TensorHandle, max_output_size_) tf.add_input(desc, boxes_) tf.add_input(desc, scores_) tf.add_input(desc, max_output_size_) @@ -57034,6 +59659,7 @@ begin end function l2loss_eager(t_; name=nothing) desc = tf.EagerOp("L2Loss") + t_ = convert(tf.TensorHandle, t_) tf.add_input(desc, t_) desc["T"] = tf.data_type(t_) res = tf.execute(desc) @@ -57074,6 +59700,8 @@ begin end function resize_area_eager(images_, size_; name=nothing, align_corners=nothing) desc = tf.EagerOp("ResizeArea") + images_ = convert(tf.TensorHandle, images_) + size_ = convert(tf.TensorHandle, size_) tf.add_input(desc, images_) tf.add_input(desc, size_) if align_corners !== nothing @@ -57147,6 +59775,10 @@ begin end function sparse_cross_eager(indices_, values_, shapes_, dense_inputs_; name=nothing, N=nothing, hashed_output=nothing, num_buckets=nothing, hash_key=nothing, sparse_types=nothing, dense_types=nothing, out_type=nothing, internal_type=nothing) desc = tf.EagerOp("SparseCross") + indices_ = convert(tf.TensorHandle, indices_) + values_ = convert(tf.TensorHandle, values_) + shapes_ = convert(tf.TensorHandle, shapes_) + dense_inputs_ = convert(tf.TensorHandle, dense_inputs_) tf.add_input(desc, indices_) tf.add_input(desc, values_) tf.add_input(desc, shapes_) @@ -57207,6 +59839,7 @@ begin end function batch_fft3d_eager(input_; name=nothing) desc = tf.EagerOp("BatchFFT3D") + input_ = convert(tf.TensorHandle, input_) tf.add_input(desc, input_) res = tf.execute(desc) node = tf.TapeNode(batch_fft3d, [input_], name=nothing) @@ -57250,6 +59883,7 @@ begin end function random_standard_normal_eager(shape_; name=nothing, seed=nothing, seed2=nothing, dtype=nothing) desc = tf.EagerOp("RandomStandardNormal") + shape_ = convert(tf.TensorHandle, shape_) tf.add_input(desc, shape_) if seed !== nothing desc["seed"] = Base.Int(seed) @@ -57303,6 +59937,9 @@ begin end function resource_scatter_mul_eager(resource_, indices_, updates_; name=nothing, dtype=nothing) desc = tf.EagerOp("ResourceScatterMul") + resource_ = convert(tf.TensorHandle, resource_) + indices_ = convert(tf.TensorHandle, indices_) + updates_ = convert(tf.TensorHandle, updates_) tf.add_input(desc, resource_) tf.add_input(desc, indices_) tf.add_input(desc, updates_) @@ -57393,6 +60030,16 @@ begin end function sdca_optimizer_eager(sparse_example_indices_, sparse_feature_indices_, sparse_feature_values_, dense_features_, example_weights_, example_labels_, sparse_indices_, sparse_weights_, dense_weights_, example_state_data_; name=nothing, loss_type=nothing, adaptative=nothing, num_sparse_features=nothing, num_sparse_features_with_values=nothing, num_dense_features=nothing, l1=nothing, l2=nothing, num_loss_partitions=nothing, num_inner_iterations=nothing) desc = tf.EagerOp("SdcaOptimizer") + sparse_example_indices_ = convert(tf.TensorHandle, sparse_example_indices_) + sparse_feature_indices_ = convert(tf.TensorHandle, sparse_feature_indices_) + sparse_feature_values_ = convert(tf.TensorHandle, sparse_feature_values_) + dense_features_ = convert(tf.TensorHandle, dense_features_) + example_weights_ = convert(tf.TensorHandle, example_weights_) + example_labels_ = convert(tf.TensorHandle, example_labels_) + sparse_indices_ = convert(tf.TensorHandle, sparse_indices_) + sparse_weights_ = convert(tf.TensorHandle, sparse_weights_) + dense_weights_ = convert(tf.TensorHandle, dense_weights_) + example_state_data_ = convert(tf.TensorHandle, example_state_data_) tf.add_input(desc, sparse_example_indices_) tf.add_input(desc, sparse_feature_indices_) tf.add_input(desc, sparse_feature_values_) @@ -57465,6 +60112,8 @@ begin end function zeta_eager(x_, q_; name=nothing) desc = tf.EagerOp("Zeta") + x_ = convert(tf.TensorHandle, x_) + q_ = convert(tf.TensorHandle, q_) tf.add_input(desc, x_) tf.add_input(desc, q_) desc["T"] = tf.data_type(x_) @@ -57530,6 +60179,8 @@ begin end function sample_distorted_bounding_box_eager(image_size_, bounding_boxes_; name=nothing, seed=nothing, seed2=nothing, min_object_covered=nothing, aspect_ratio_range=nothing, area_range=nothing, max_attempts=nothing, use_image_if_no_bounding_boxes=nothing) desc = tf.EagerOp("SampleDistortedBoundingBox") + image_size_ = convert(tf.TensorHandle, image_size_) + bounding_boxes_ = convert(tf.TensorHandle, bounding_boxes_) tf.add_input(desc, image_size_) tf.add_input(desc, bounding_boxes_) if seed !== nothing @@ -57589,6 +60240,8 @@ begin end function igamma_grad_a_eager(a_, x_; name=nothing) desc = tf.EagerOp("IgammaGradA") + a_ = convert(tf.TensorHandle, a_) + x_ = convert(tf.TensorHandle, x_) tf.add_input(desc, a_) tf.add_input(desc, x_) desc["T"] = tf.data_type(a_) @@ -57630,6 +60283,8 @@ begin end function segment_max_eager(data_, segment_ids_; name=nothing) desc = tf.EagerOp("SegmentMax") + data_ = convert(tf.TensorHandle, data_) + segment_ids_ = convert(tf.TensorHandle, segment_ids_) tf.add_input(desc, data_) tf.add_input(desc, segment_ids_) desc["T"] = tf.data_type(data_) @@ -57671,6 +60326,9 @@ begin end function range_eager(start_, limit_, delta_; name=nothing) desc = tf.EagerOp("Range") + start_ = convert(tf.TensorHandle, start_) + limit_ = convert(tf.TensorHandle, limit_) + delta_ = convert(tf.TensorHandle, delta_) tf.add_input(desc, start_) tf.add_input(desc, limit_) tf.add_input(desc, delta_) @@ -57768,6 +60426,7 @@ begin end function flush_summary_writer_eager(writer_; name=nothing) desc = tf.EagerOp("FlushSummaryWriter") + writer_ = convert(tf.TensorHandle, writer_) tf.add_input(desc, writer_) res = tf.execute(desc) node = tf.TapeNode(flush_summary_writer, [writer_], name=nothing) @@ -57785,7 +60444,7 @@ end """ - dequantize(input, min_range, max_range; mode=) + dequantize(input, min_range, max_range; mode=MIN_COMBINED) """ @@ -57809,6 +60468,9 @@ begin end function dequantize_eager(input_, min_range_, max_range_; name=nothing, mode=nothing) desc = tf.EagerOp("Dequantize") + input_ = convert(tf.TensorHandle, input_) + min_range_ = convert(tf.TensorHandle, min_range_) + max_range_ = convert(tf.TensorHandle, max_range_) tf.add_input(desc, input_) tf.add_input(desc, min_range_) tf.add_input(desc, max_range_) @@ -57856,6 +60518,8 @@ begin end function sparse_fill_empty_rows_grad_eager(reverse_index_map_, grad_values_; name=nothing) desc = tf.EagerOp("SparseFillEmptyRowsGrad") + reverse_index_map_ = convert(tf.TensorHandle, reverse_index_map_) + grad_values_ = convert(tf.TensorHandle, grad_values_) tf.add_input(desc, reverse_index_map_) tf.add_input(desc, grad_values_) desc["T"] = tf.data_type(grad_values_) @@ -57897,6 +60561,7 @@ begin end function iterator_get_next_eager(iterator_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("IteratorGetNext") + iterator_ = convert(tf.TensorHandle, iterator_) tf.add_input(desc, iterator_) if output_types !== nothing desc["output_types"] = map(Base.identity, output_types) @@ -57946,6 +60611,10 @@ begin end function sparse_tensor_dense_add_eager(a_indices_, a_values_, a_shape_, b_; name=nothing) desc = tf.EagerOp("SparseTensorDenseAdd") + a_indices_ = convert(tf.TensorHandle, a_indices_) + a_values_ = convert(tf.TensorHandle, a_values_) + a_shape_ = convert(tf.TensorHandle, a_shape_) + b_ = convert(tf.TensorHandle, b_) tf.add_input(desc, a_indices_) tf.add_input(desc, a_values_) tf.add_input(desc, a_shape_) @@ -57990,6 +60659,7 @@ begin end function prevent_gradient_eager(input_; name=nothing, message=nothing) desc = tf.EagerOp("PreventGradient") + input_ = convert(tf.TensorHandle, input_) tf.add_input(desc, input_) if message !== nothing desc["message"] = Base.String(message) @@ -58032,6 +60702,7 @@ begin end function lookup_table_export_eager(table_handle_; name=nothing) desc = tf.EagerOp("LookupTableExport") + table_handle_ = convert(tf.TensorHandle, table_handle_) tf.add_input(desc, table_handle_) res = tf.execute(desc) node = tf.TapeNode(lookup_table_export, [table_handle_], name=nothing) diff --git a/src/ops/op_names.txt b/src/ops/op_names.txt new file mode 100644 index 00000000..60222d70 --- /dev/null +++ b/src/ops/op_names.txt @@ -0,0 +1,1148 @@ +ReduceJoin +ReduceDataset +TensorListFromTensor +ExtractJpegShape +Svd +IteratorGetNextSync +RefEnter +Erf +LookupTableExportV2 +Round +OutfeedDequeue +TensorForestTreeIsInitializedOp +Merge +HistogramFixedWidth +Asin +Any +RsqrtGrad +TensorArrayScatter +DynamicPartition +ExperimentalPrivateThreadPoolDataset +ReaderSerializeState +RightShift +AvgPool3D +EncodePng +DebugIdentity +Imag +ResourceSparseApplyFtrlV2 +StageClear +Sign +PopulationCount +Neg +AnonymousIterator +SparseReduceSum +FilterDataset +StringLength +Conv3D +RetrieveTPUEmbeddingAdagradParameters +OptionalHasValue +ApplyAdam +CudnnRNNParamsToCanonical +IRFFT3D +Angle +TensorForestTreeResourceHandleOp +LearnedUnigramCandidateSampler +_Arg +MatrixSquareRoot +SparseDenseCwiseMul +TensorArrayConcatV3 +UnicodeScript +BatchCholeskyGrad +Mean +BatchFFT +Sin +BoostedTreesEnsembleResourceHandleOp +QuantizedMaxPool +OrderedMapStage +PartitionedCall +SparseApplyAdagrad +DecodeProtoV2 +Betainc +GuaranteeConst +DecodeBmp +BoostedTreesBucketize +ShutdownDistributedTPU +ExperimentalStatsAggregatorSummary +Timestamp +MatrixExponential +Size +AddN +SparseSegmentSum +BatchDataset +RecordInput +QueueDequeueUpToV2 +RetrieveTPUEmbeddingProximalAdagradParametersGradAccumDebug +LoadTPUEmbeddingRMSPropParametersGradAccumDebug +SerializeTensor +Mul +SoftmaxCrossEntropyWithLogits +ResourceScatterDiv +FixedLengthRecordDatasetV2 +SkipDataset +Cosh +FusedBatchNormV2 +TensorArraySplit +CTCLoss +QuantizedReshape +FloorDiv +TensorArrayV2 +BarrierClose +ReadVariableOp +QuantizedMul +Selu +CudnnRNNBackpropV3 +LookupTableInsert +ComplexAbs +TridiagonalSolve +LookupTableImport +Abs +ResourceApplyAdam +WriteHistogramSummary +ExperimentalIndexedDatasetMaterialize +_HostSend +Greater +NcclBroadcast +TensorListPushBackBatch +ResourceScatterMin +Slice +UnicodeDecode +TakeDataset +BoostedTreesMakeStatsSummary +AllCandidateSampler +Conv2DBackpropInput +DatasetToSingleElement +CacheDataset +FakeQuantWithMinMaxVarsGradient +FusedResizeAndPadConv2D +Batch +CollectiveBcastRecv +BatchToSpaceND +LoopCond +DepthToSpace +DestroyTemporaryVariable +CudnnRNN +RefIdentity +MaxPool3DGrad +LoadTPUEmbeddingMomentumParametersGradAccumDebug +PaddingFIFOQueueV2 +Conv3DBackpropInput +RefExit +MapClear +EncodeWav +TensorSummaryV2 +QueueDequeueUpTo +MatrixBandPart +Copy +ShapeN +ExperimentalParseExampleDataset +Concat +DataFormatDimMap +IdentityReader +Softplus +ResourceSparseApplyProximalAdagrad +ParseSingleSequenceExample +MatrixDiag +Fact +ShardDataset +MaxPoolGradGrad +ResizeBilinearGrad +BatchToSpace +OptionalFromValue +Xlogy +Cross +BitwiseAnd +BroadcastTo +EluGrad +CudnnRNNBackprop +StringToHashBucketFast +MutableHashTable +Relu +NthElement +Softsign +MutableDenseHashTable +_ShutdownDistributedTPU +Polygamma +NcclReduce +ArgMax +MatrixSetDiag +SpaceToBatchND +SparseReshape +OptimizeDataset +ConcatV2 +ResourceSparseApplyAdadelta +Tile +MutexV2 +SerializeManySparse +TPUEmbeddingActivations +BatchMatrixSolveLs +NotEqual +Lgamma +TPUReplicateMetadata +ExperimentalThreadPoolHandle +SelfAdjointEig +BoostedTreesQuantileStreamResourceGetBucketBoundaries +SparseDenseCwiseDiv +Acos +All +CompareAndBitpack +VarHandleOp +ExperimentalUniqueDataset +QuantizedConv2DWithBiasSumAndRelu +ListDiff +CreateSummaryFileWriter +GenerateVocabRemapping +BatchMatrixInverse +ControlTrigger +TPUOrdinalSelector +StopGradient +Split +Unpack +ResourceScatterMax +TensorArrayWrite +Fill +QuantizedConv2DWithBiasAndRequantize +Softmax +ResizeBicubic +InfeedDequeueTuple +MultiDeviceIterator +DecodeCSV +LookupTableFind +ShuffleAndRepeatDataset +RequantizationRangePerChannel +ExperimentalUnbatchDataset +AvgPool3DGrad +PlaceholderWithDefault +InitializeTableV2 +SetSize +Assert +NonMaxSuppressionV4 +SampleDistortedBoundingBoxV2 +InitializeTableFromTextFile +LookupTableSize +SparseApplyAdagradDA +BroadcastGradientArgs +SummaryWriter +RecvTPUEmbeddingActivations +_While +InitializeTable +DebugNumericSummary +RetrieveTPUEmbeddingAdagradParametersGradAccumDebug +Tanh +SymbolicGradient +BoostedTreesUpdateEnsemble +ApplyMomentum +ReaderRead +_WaitForDistributedTPU +MutexLock +AccumulatorSetGlobalStep +QuantizedAdd +Squeeze +ExperimentalMatchingFilesDataset +ExperimentalDatasetToTFRecord +LoadTPUEmbeddingStochasticGradientDescentParameters +NoOp +ZipDataset +IdentityReaderV2 +LMDBReader +NcclAllReduce +TextLineDataset +SdcaShrinkL1 +TFRecordReaderV2 +MultiDeviceIteratorFromStringHandle +PaddedBatchDatasetV2 +LoadTPUEmbeddingProximalAdagradParameters +TensorArraySize +OrderedMapSize +StatelessRandomUniform +SparseToSparseSetOperation +TensorSummary +RemoteFusedGraphExecute +SparseSliceGrad +Cumsum +BatchNormWithGlobalNormalizationGrad +AvgPoolGrad +RestoreV2 +Relu6 +SparseApplyRMSProp +_Recv +MaxPool +Invert +_UnaryOpsComposition +ExperimentalMapDataset +LoadTPUEmbeddingADAMParameters +ParseTensor +ExperimentalMaterializedIndexDatasetHandle +MultiDeviceIteratorGetNextFromShard +RandomUniformInt +SparseSoftmaxCrossEntropyWithLogits +TensorArrayReadV2 +ReaderReadUpTo +EncodeProto +StridedSliceGrad +_NcclReduceSend +PaddedBatchDataset +DataFormatVecPermute +StringFormat +AsString +QueueEnqueueMany +FakeParam +ApplyAdagrad +ExperimentalIteratorGetDevice +AdjustContrast +ExtractImagePatches +ScaleAndTranslate +OptionalNone +VariableV2 +Elu +ScatterUpdate +FloorMod +ExperimentalIgnoreErrorsDataset +ExperimentalSetStatsAggregatorDataset +ComputeAccidentalHits +StringToNumber +Snapshot +DeserializeIterator +Atan +MatMul +Erfc +SigmoidGrad +FixedLengthRecordReaderV2 +NonMaxSuppressionV3 +Dilation2DBackpropInput +LogicalOr +ResourceApplyAdadelta +DenseToSparseSetOperation +ReaderNumRecordsProduced +AdjustHue +BoostedTreesQuantileStreamResourceFlush +ExperimentalMapAndBatchDataset +RealDiv +RestoreSlice +StackPopV2 +Reverse +DecodePng +NonMaxSuppressionV2 +Igamma +Digamma +ResourceApplyAdaMax +SpaceToDepth +SqrtGrad +MapUnstage +Qr +BoostedTreesCalculateBestGainsPerFeature +UnbatchGrad +LogSoftmax +ResourceCountUpTo +AccumulateNV2 +ParallelMapDataset +RandomUniform +UnicodeTranscode +ReaderReset +_NcclBroadcastSend +BatchMatrixDeterminant +LessEqual +ApplyGradientDescent +SparseSegmentSqrtN +MatrixLogarithm +ScatterMul +DecodeJpeg +RandomShuffleQueueV2 +QueueEnqueueManyV2 +ResourceSparseApplyCenteredRMSProp +InterleaveDataset +StackPop +MaxPoolV2 +BoostedTreesDeserializeEnsemble +LoadAndRemapMatrix +SparseApplyProximalGradientDescent +PyFuncStateless +Where +Mfcc +CheckNumerics +TPUCompilationResult +RetrieveTPUEmbeddingStochasticGradientDescentParameters +SparseSegmentMeanGrad +TryRpc +BatchMatrixTriangularSolve +_Retval +UniqueWithCounts +Add +ExperimentalScanDataset +AssignAddVariableOp +SplitV +Assign +MaxPoolWithArgmax +QuantizedReluX +RandomShuffleQueue +FFT2D +ExperimentalThreadPoolDataset +ExperimentalDirectedInterleaveDataset +SparseSegmentSqrtNGrad +Real +OrderedMapUnstage +RFFT2D +VarIsInitializedOp +BoostedTreesQuantileStreamResourceHandleOp +Atan2 +RandomPoisson +ReverseSequence +OutfeedEnqueue +Sub +StringSplit +Cumprod +QuantizedResizeBilinear +ParseSingleExample +IsVariableInitialized +ExperimentalStatsAggregatorHandle +TensorListConcatV2 +CudnnRNNV2 +ResourceScatterSub +AssignAdd +TensorDataset +Bucketize +SparseReduceMax +RetrieveTPUEmbeddingMDLAdagradLightParameters +TensorArrayGradWithShape +TensorArrayCloseV3 +NonMaxSuppressionWithOverlaps +Pack +TensorArrayGradV2 +AssignSubVariableOp +BatchFFT2D +CloseSummaryWriter +Rank +FFT3D +ApplyFtrl +Abort +AudioSpectrogram +VariableShape +FIFOQueueV2 +Variable +TensorForestCreateTreeVariable +MaxPoolGradWithArgmax +RefSwitch +SdcaFprint +ExperimentalChooseFastestDataset +LeakyRelu +IdentityN +CudnnRNNBackpropV2 +RequantizationRange +Maximum +Reshape +MatrixSolveLs +TFRecordDataset +BoostedTreesExampleDebugOutputs +HSVToRGB +ExperimentalMaxIntraOpParallelismDataset +ScatterDiv +DecodeWav +Log +SaveV2 +DeepCopy +ModelDataset +ParseSequenceExample +Sinh +IteratorV2 +TensorArrayWriteV2 +TensorListElementShape +QueueSizeV2 +Expm1 +BatchMatrixBandPart +ConcatenateDataset +DecodeGif +TPUReplicate +BatchSelfAdjointEigV2 +Shape +RepeatDataset +CropAndResizeGradBoxes +ReciprocalGrad +BatchMatrixSolve +MutableHashTableV2 +Exit +LRN +StatelessIf +TensorListSetItem +Rsqrt +QuantizedConv2DWithBiasSumAndReluAndRequantize +DeleteSessionTensor +OneHot +ResourceApplyFtrl +SdcaOptimizerV2 +QueueEnqueue +ConditionalAccumulator +CTCBeamSearchDecoder +WholeFileReader +ApplyRMSProp +AdjustSaturation +LookupTableRemoveV2 +QueueClose +PrefetchDataset +MapDataset +QuantizedConv2DWithBias +TensorArrayReadV3 +Identity +Print +CollectiveBcastSend +_ListToArray +NegTrain +WorkerHeartbeat +MergeV2Checkpoints +CollectivePermute +QuantizeAndDequantizeV3 +HashTable +SoftplusGrad +FixedLengthRecordReader +TensorArrayScatterV2 +DecodeJSONExample +FusedBatchNormGradV2 +_HostCast +TFRecordReader +While +StatelessMultinomial +ScatterAdd +Conj +ParallelDynamicStitch +MakeIterator +RFFT3D +SparseReduceSumSparse +CollectiveGather +CombinedNonMaxSuppression +_ScopedAllocator +LoadTPUEmbeddingAdadeltaParameters +SparseAdd +CTCGreedyDecoder +ImmutableConst +ConsumeMutexLock +GreaterEqual +InitializeTableFromTextFileV2 +QueueDequeue +Equal +IteratorFromStringHandle +TensorListSplit +FractionalMaxPool +ScatterNd +TensorListScatterIntoExistingList +Select +Min +LRNGrad +RandomPoissonV2 +FIFOQueue +ResourceSparseApplyProximalGradientDescent +ExperimentalNonSerializableDataset +ExperimentalBytesProducedStatsDataset +Dilation2DBackpropFilter +_If +BiasAddGrad +ReaderSerializeStateV2 +WrapDatasetVariant +ParallelInterleaveDatasetV2 +DepthwiseConv2dNativeBackpropInput +ResourceApplyRMSProp +SparseAccumulatorTakeGradient +ExperimentalLMDBDataset +StackCloseV2 +MapSize +ResourceApplyAdagradDA +TensorForestTreeSize +MatrixDiagPart +ReaderNumWorkUnitsCompletedV2 +TensorArraySplitV3 +SparseToDense +TPUReplicatedInput +StackClose +DeserializeManySparse +_NcclReduceRecv +MirrorPadGrad +BroadcastArgs +StatelessTruncatedNormal +RegexFullMatch +UnwrapDatasetVariant +Empty +OutfeedDequeueTuple +Div +Barrier +TruncateDiv +UnicodeEncode +MergeSummary +FakeQueue +BatchCholesky +Iterator +BesselI1e +ImportEvent +QuantizedInstanceNorm +LoadTPUEmbeddingAdagradParameters +TensorArrayWriteV3 +DenseToDenseSetOperation +EncodeJpeg +InplaceUpdate +FusedPadConv2D +QuantizedRelu +GatherNd +Placeholder +FilterByLastComponentDataset +ClipByValue +ImageSummary +RetrieveTPUEmbeddingAdadeltaParameters +StringJoin +ResourceScatterNdAdd +BoostedTreesQuantileStreamResourceDeserialize +LeftShift +RequantizePerChannel +TensorScatterAdd +_VarHandlesOp +IFFT3D +EuclideanNorm +RefSelect +SparseTensorSliceDataset +RetrieveTPUEmbeddingFTRLParametersGradAccumDebug +BatchIFFT2D +TensorArrayGather +SparseSegmentMeanWithNumSegments +EnsureShape +ApplyProximalGradientDescent +CollectiveReduce +IsNan +ApplyAdaMax +DecodeAndCropJpeg +ApplyCenteredRMSProp +Conv3DBackpropFilterV2 +MatrixTriangularSolve +ReaderNumWorkUnitsCompleted +WriteAudioSummary +ShardedFilespec +DivNoNan +SparseAccumulatorApplyGradient +RaggedTensorToSparse +ExtractVolumePatches +BarrierInsertMany +Const +SpaceToBatch +StageSize +EmptyTensorList +QuantizedConv2DAndRequantize +Lu +DecodeCompressed +GetSessionTensor +TensorArrayGatherV3 +LoadTPUEmbeddingFTRLParametersGradAccumDebug +DestroyResourceOp +TextLineReader +CreateSummaryDbWriter +TanhGrad +DecodeBase64 +MaxPoolGradGradV2 +AudioSummaryV2 +StatefulPartitionedCall +_ScopedAllocatorConcat +FakeQuantWithMinMaxArgsGradient +BatchSvd +MapStage +ResourceSparseApplyFtrl +ResizeNearestNeighbor +ExperimentalCSVDataset +_MklMul +BatchMatrixDiag +IsInf +FixedUnigramCandidateSampler +SparseApplyFtrlV2 +UnravelIndex +Max +IFFT2D +SparseConcat +HistogramSummary +SegmentSum +Exp +ConfigureDistributedTPU +ResourceScatterNdSub +_XlaSendFromHost +GetSessionHandleV2 +ReluGrad +UnsortedSegmentMin +ParseExample +QueueEnqueueV2 +ScatterNdAdd +ReaderNumRecordsProducedV2 +LoadTPUEmbeddingCenteredRMSPropParameters +AssignSub +UnsortedSegmentSum +FusedBatchNormGrad +MaxPoolGradV2 +QuantizedConv2DWithBiasAndRelu +BoostedTreesCreateEnsemble +OrderedMapIncompleteSize +Skipgram +ArgMin +QueueDequeueMany +BoostedTreesSerializeEnsemble +Minimum +Substr +QueueSize +ApplyFtrlV2 +LoadTPUEmbeddingMomentumParameters +SparseSegmentMean +ResourceApplyProximalAdagrad +TensorArrayGatherV2 +Less +HostConst +UpperBound +TensorListGetItem +FakeQuantWithMinMaxVars +IsBoostedTreesQuantileStreamResourceInitialized +ReaderReadUpToV2 +Complex +TensorListReserve +Bitcast +PriorityQueue +QuantizedBatchNormWithGlobalNormalization +Cos +QuantizeDownAndShrinkRange +ExperimentalRandomDataset +Rpc +QuantizedConv2DWithBiasSignedSumAndReluAndRequantize +TensorListLength +MapIncompleteSize +StatelessWhile +SparseConditionalAccumulator +SegmentMin +WriteGraphSummary +CholeskyGrad +LogUniformCandidateSampler +SerializeSparse +ScatterNdNonAliasingAdd +RefMerge +TensorListConcat +CudnnRNNCanonicalToParams +SparseApplyAdadelta +TensorArrayClose +SeluGrad +CropAndResizeGradImage +RFFT +ExperimentalSqlDataset +ResourceApplyPowerSign +MatrixDeterminant +StaticRegexReplace +AvgPool +SparseDenseCwiseAdd +BiasAddV1 +InvertPermutation +HashTableV2 +SparseApplyMomentum +InfeedEnqueue +StatelessRandomUniformInt +LoadTPUEmbeddingAdadeltaParametersGradAccumDebug +_Send +MapPeek +WriteScalarSummary +OrderedMapUnstageNoKey +SparseApplyCenteredRMSProp +TensorListScatterV2 +Conv3DBackpropInputV2 +RetrieveTPUEmbeddingProximalAdagradParameters +RandomShuffle +UniformCandidateSampler +TensorArraySplitV2 +MutableDenseHashTableV2 +DrawBoundingBoxes +SparseApplyProximalAdagrad +RangeDataset +ReaderRestoreStateV2 +TopKV2 +Atanh +DebugGradientIdentity +SparseAddGrad +ResourceScatterAdd +Ceil +Save +RetrieveTPUEmbeddingCenteredRMSPropParameters +QuantizedConcat +ZerosLike +FractionalAvgPool +EditDistance +UniqueV2 +QuantizeAndDequantizeV2 +QuantizeAndDequantize +TensorListPopBack +DebugNanCount +ApplyAdagradDA +DepthwiseConv2dNative +SerializeIterator +DatasetToGraph +TopK +ResourceApplyFtrlV2 +_NcclBroadcastRecv +QueueIsClosed +ShuffleDataset +DeserializeSparse +PriorityQueueV2 +_DeviceArg +TruncatedNormal +TensorForestTreePredict +StackV2 +AccumulatorNumAccumulated +ReaderResetV2 +ApplyAddSign +RetrieveTPUEmbeddingRMSPropParametersGradAccumDebug +Rint +RetrieveTPUEmbeddingAdadeltaParametersGradAccumDebug +ExtractGlimpse +StringToHashBucketStrong +OneShotIterator +ResourceSparseApplyMomentum +SaveSlices +ExperimentalDatasetCardinality +IsFinite +ExperimentalNumaMapAndBatchDataset +AllToAll +TakeManySparseFromTensorsMap +BatchMatrixDiagPart +FixedLengthRecordDataset +StackPush +PlaceholderV2 +MultiDeviceIteratorInit +GcsConfigureBlockCache +QueueDequeueV2 +RetrieveTPUEmbeddingRMSPropParameters +Transpose +IFFT +SparseSegmentSumWithNumSegments +QueueIsClosedV2 +ParameterizedTruncatedNormal +DiagPart +KmeansPlusPlusInitialization +RegexReplace +SparseTensorDenseMatMul +MapDefun +ThreadUnsafeUnigramCandidateSampler +RetrieveTPUEmbeddingADAMParametersGradAccumDebug +ParallelConcat +LookupTableFindV2 +TensorForestTreeDeserialize +RetrieveTPUEmbeddingMomentumParameters +FakeQuantWithMinMaxArgs +ResourceApplyGradientDescent +ExperimentalSlidingWindowDataset +DecodeRaw +FakeQuantWithMinMaxVarsPerChannelGradient +UniqueWithCountsV2 +ExperimentalSleepDataset +TPUReplicatedOutput +LowerBound +Tan +Enter +InfeedEnqueueTuple +_SetGlobalTPUArray +Square +DebugGradientRefIdentity +ApplyAdadelta +ExperimentalGroupByWindowDataset +AudioSummary +SquaredDifference +ExperimentalTakeWhileDataset +ScatterNdUpdate +DynamicStitch +OnesLike +FractionalMaxPoolGrad +RemoteCall +Gather +QuantizedMatMul +UnicodeDecodeWithOffsets +EnqueueTPUEmbeddingSparseTensorBatch +AccumulatorApplyGradient +WriteSummary +QuantizedConv2D +ResourceApplyMomentum +Log1p +OrderedMapClear +ResourceScatterUpdate +BarrierTakeMany +ResourceApplyKerasMomentum +GenerateBigQueryReaderPartitions +_XlaRecvAtHost +QuantizedAvgPool +ResourceApplyAdamWithAmsgrad +TensorListResize +_HostRecv +BoostedTreesCenterBias +LookupTableSizeV2 +IRFFT +InplaceAdd +BiasAdd +LoadTPUEmbeddingADAMParametersGradAccumDebug +_DisconnectHostFromDistributedTPUSystem +RaggedRange +WindowDataset +Diag +InfeedDequeue +ExperimentalLatencyStatsDataset +AddSparseToTensorsMap +RaggedGather +RGBToHSV +MultiDeviceIteratorToStringHandle +For +SparseReduceMaxSparse +ConcatOffset +Stage +Switch +QueueDequeueManyV2 +SegmentProd +ApproximateEqual +Conv2D +CrossReplicaSum +SparseMatMul +_ScopedAllocatorSplit +Igammac +BatchMatMul +EnqueueTPUEmbeddingSparseBatch +QueueCloseV2 +TensorArrayPack +ReaderRestoreState +_FusedConv2D +_ReadVariablesOp +MutableHashTableOfTensors +ReadFile +LoadTPUEmbeddingMDLAdagradLightParameters +FractionalAvgPoolGrad +LoadTPUEmbeddingAdagradParametersGradAccumDebug +StatefulStandardNormalV2 +Bincount +Inv +ApplyProximalAdagrad +GatherV2 +WriteFile +BoostedTreesGetEnsembleStates +ResourceGather +ResourceApplyProximalGradientDescent +TruncateMod +LogMatrixDeterminant +IRFFT2D +BoostedTreesTrainingPredict +NearestNeighbors +Floor +LoadTPUEmbeddingProximalAdagradParametersGradAccumDebug +WriteImageSummary +TileGrad +TensorArrayGradV3 +EnqueueTPUEmbeddingIntegerBatch +FusedBatchNorm +LogicalAnd +TensorScatterUpdate +TextLineReaderV2 +TensorSliceDataset +TensorArrayScatterV3 +ResizeNearestNeighborGrad +ApplyPowerSign +ExperimentalRebatchDataset +MirrorPad +LogicalNot +BatchIFFT +TensorArrayConcatV2 +Sum +BoostedTreesPredict +QuantizedConv2DWithBiasAndReluAndRequantize +ResourceSparseApplyAdagrad +LeakyReluGrad +_DeviceRetval +Pad +AddManySparseToTensorsMap +SparseReorder +BitwiseXor +BatchMatrixSetDiag +LookupTableInsertV2 +ExperimentalDenseToSparseBatchDataset +ResourceSparseApplyRMSProp +RandomCrop +LookupTableImportV2 +ResourceScatterNdUpdate +StaticRegexFullMatch +GcsConfigureCredentials +TensorArraySizeV3 +SparseSegmentSqrtNWithNumSegments +ExperimentalGroupByReducerDataset +Conv2DBackpropFilter +MaxPoolGrad +_InitializeHostForDistributedTPU +StagePeek +PadV2 +_ParallelConcatStart +PrintV2 +OptionalGetValue +LoadTPUEmbeddingFTRLParameters +SparseSlice +BoostedTreesMakeQuantileSummaries +MatrixSolve +_ConfigureDistributedTPU +AdjustContrastv2 +_MklMaximum +CudnnRNNParamsSize +BoostedTreesQuantileStreamResourceAddSummaries +BatchIFFT3D +Sigmoid +SegmentMean +IsBoostedTreesEnsembleInitialized +TensorArraySizeV2 +_MklSub +SendTPUEmbeddingGradients +MaxPool3D +Prod +ExperimentalIdentityIndexedDataset +TensorListPushBack +BatchFunction +SparseFillEmptyRows +SelfAdjointEigV2 +RetrieveTPUEmbeddingFTRLParameters +ResourceSparseApplyAdagradDA +TemporaryVariable +ResourceApplyAddSign +Roll +Xdivy +MaxPool3DGradGrad +CropAndResize +QuantizedBiasAdd +KMC2ChainInitialization +MapUnstageNoKey +ScatterNdSub +ResizeBilinear +OrderedMapPeek +TensorArray +InplaceSub +Pow +StatefulStandardNormal +RefNextIteration +ScalarSummary +StringSplitV2 +BesselI0e +Unique +LoadTPUEmbeddingRMSPropParameters +WholeFileReaderV2 +EagerPyFunc +NextIteration +Case +TensorScatterSub +ScatterMax +Sqrt +AccumulatorTakeGradient +_MklAdd +Reciprocal +OutfeedEnqueueTuple +StringStrip +FakeQuantWithMinMaxVarsPerChannel +BarrierReadySize +StringToHashBucket +TensorArrayConcat +ShardedFilename +PyFunc +UnsortedSegmentProd +CountUpTo +RandomGamma +TensorArrayGrad +Dilation2D +Unbatch +GetSessionHandle +RetrieveTPUEmbeddingADAMParameters +MutableHashTableOfTensorsV2 +SparseApplyFtrl +BatchDatasetV2 +SparseSparseMinimum +ReverseV2 +StridedSlice +MatchingFiles +EncodeBase64 +IteratorGetNextAsOptional +PaddingFIFOQueue +IteratorToStringHandle +MaxPoolGradGradWithArgmax +TensorListGather +Multinomial +TensorArrayRead +ExperimentalIndexedDatasetGet +TPUPartitionedCall +QuantizedConv2DAndReluAndRequantize +IteratorFromStringHandleV2 +BitwiseOr +UnsortedSegmentMax +_MklSquaredDifference +Conv3DBackpropFilter +If +FlatMapDataset +TensorListScatter +SoftsignGrad +CopyHost +LinSpace +_ParallelConcatUpdate +Stack +StackPushV2 +AssignVariableOp +SparseSplit +TensorArrayUnpack +TensorListStack +BarrierIncompleteSize +Restore +TensorArrayV3 +ExperimentalAssertNextDataset +InTopK +ScatterSub +Acosh +DepthwiseConv2dNativeBackpropFilter +Cast +QuantizeV2 +GeneratorDataset +TensorForestTreeSerialize +NextAfter +TensorArrayCloseV2 +BigQueryReader +ReaderReadV2 +Mod +AddV2 +StatelessRandomNormal +StridedSliceAssign +ScatterMin +ResourceStridedSliceAssign +RandomGammaGrad +ResourceSparseApplyKerasMomentum +BoostedTreesCreateQuantileStreamResource +QuantizedRelu6 +SparseSparseMaximum +BatchNormWithGlobalNormalization +InTopKV2 +Cholesky +ResourceApplyCenteredRMSProp +ResourceApplyAdagrad +ExperimentalParallelInterleaveDataset +ResizeBicubicGrad +BatchSelfAdjointEig +SparseSoftmax +Asinh +QuantizedConv2DAndRelu +MatrixInverse +TensorListConcatLists +Requantize +FFT +ConjugateTranspose +Unstage +Relu6Grad +ScaleAndTranslateGrad +_ArrayToList +CudnnRNNV3 +ExpandDims +InvGrad +NonMaxSuppression +L2Loss +ResizeArea +SparseCross +BatchFFT3D +RandomStandardNormal +ResourceScatterMul +SdcaOptimizer +Zeta +SampleDistortedBoundingBox +IgammaGradA +SegmentMax +Range +RetrieveTPUEmbeddingMomentumParametersGradAccumDebug +FlushSummaryWriter +Dequantize +SparseFillEmptyRowsGrad +IteratorGetNext +SparseTensorDenseAdd +PreventGradient +LookupTableExport diff --git a/src/ops/transformations.jl b/src/ops/transformations.jl index 2d17705c..b2f457b1 100644 --- a/src/ops/transformations.jl +++ b/src/ops/transformations.jl @@ -381,13 +381,14 @@ Returns: with_op_name(name, "Transpose") do if perm === nothing r = range(constant(0), LinearAlgebra.rank(n)-1) - perm = reverse(r, [true]) + perm = reverse(r, [0]) end result = Ops.transpose(n, perm) end result end + @op function Base.permutedims(n::AbstractTensor, perm; name=nothing) transpose(n, perm .- 1; name=name) end diff --git a/src/tape.jl b/src/tape.jl index 27e7e2d9..15579c30 100644 --- a/src/tape.jl +++ b/src/tape.jl @@ -44,16 +44,16 @@ macro back_for(target, fn) end end -@back_for(Ops.add, function f(x, y; kwargs...) - return [constant(1.0), constant(1.0)] +@back_for(Ops.add, function f(grad, x, y; kwargs...) + return [constant(1.0), constant(1.0)] .*grad end) -@back_for(Ops.sub, function f(x, y; kwargs...) - return [constant(1.0), constant(-1.0)] +@back_for(Ops.sub, function f(grad, x, y; kwargs...) + return [constant(1.0), constant(-1.0)] .*grad end) -@back_for(Ops.neg, function f(x; kwargs...) - return constant(-1.0) +@back_for(Ops.neg, function f(grad, x; kwargs...) + return constant(-1.0) .* grad end) function with_no_grad(f) @@ -64,45 +64,59 @@ function with_no_grad(f) return res end -@back_for(Ops.exp, function f(x; kwargs...) - Ops.exp(x) +@back_for(Ops.exp, function f(grad, x; kwargs...) + Ops.exp(x) .* grad end) -@back_for(Ops.mean, function f(x, reduction_indices; keep_dims=nothing, kwargs...) +@back_for(Ops.mean, function f(grad, x, reduction_indices; keep_dims=nothing, kwargs...) # assume reduction_indices is everything for now n_elem = float(num_elements(x)) - [Ops.fill(size(x), 1/constant(n_elem)), nothing] + [grad .* Ops.fill(size(x), 1/constant(n_elem)), nothing] end) -@back_for(Ops.sum, function f(x, reduction_indices; keep_dims=nothing, kwargs...) +@back_for(Ops.sum, function f(grad, x, reduction_indices; keep_dims=nothing, kwargs...) # assume reduction_indices is everything for now - [Ops.fill(size(x), constant(1.0)), nothing] + [grad .* Ops.fill(size(x), constant(1.0)), nothing] end) +@back_for(Ops.mul, function f(grad, x, y; kwargs...) + return [grad.*y, grad.*x] +end) + +@back_for(Ops.cast, function f(grad, x; kwargs...) + return grad +end) -@back_for(Ops.mul, function f(x, y; kwargs...) - return [y, x] +@back_for(Ops.log, function f(grad, x; kwargs...) + return 1/x .* grad end) -@back_for(Ops.cast, function f(x; kwargs...) - return constant(1.0) +@back_for(Ops.sin, function f(grad, x; kwargs...) + return cos(x) .* grad end) +@back_for(Ops.cos, function f(grad, x; kwargs...) + return sin(x) .* grad +end) -@back_for(Ops.log, function f(x; kwargs...) - return 1/x +@back_for(Ops.relu, function f(grad, x; kwarg...) + # todo use relu grad + ((x > 0) .* x) .* grad end) -@back_for(Ops.sin, function f(x; kwargs...) - return cos(x) +@back_for(Ops.mat_mul, function f(grad, x, y; transpose_a=nothing, transpose_b=nothing, kwargs...) + # todo pay attension to transpose arguments + grad_x = Ops.mat_mul(grad, y, transpose_b=true) + grad_y = Ops.mat_mul(x, grad, transpose_a=true) + return [grad_x, grad_y] end) -@back_for(Ops.cos, function f(x; kwargs...) - return sin(x) +@back_for(Ops.tanh, function f(grad, x; kwargs...) + Ops.tanh_grad(x, grad) end) -@back_for(Ops.relu, function f(x; kwarg...) - (x > 0) .* x +@back_for(Ops.sigmoid, function f(grad, x; kwargs...) + Ops.sigmoid_grad(x, grad) end) @@ -117,24 +131,24 @@ function _grad(tape::Tape, tensor, out_grad, grads) node = tape.nodes[tensor] back_op = grad_fns[node.op] arg_grads = with_no_grad() do - back_op(node.args...; node.kwargs...) + back_op(out_grad, node.args...; node.kwargs...) end arg_grads = ensure_vector(arg_grads) for (i, arg) in enumerate(node.args) arg_grads[i] === nothing && continue - grads[arg] = arg_grads[i].*out_grad + grads[arg] = arg_grads[i] _grad(tape, arg, grads[arg], grads) end return end -function grad(tape, tensor, in_tensors::AbstractArray, out_grad=1.0) +function grad(tape, tensor, in_tensors::AbstractArray, out_grad=constant(1.0)) grads = Dict() _grad(tape, tensor, out_grad, grads) return [grads[tensor] for tensor in in_tensors] end -function grad(tape, tensor, in_tensor, out_grad=1.0) +function grad(tape, tensor, in_tensor, out_grad=constant(1.0)) grad(tape, tensor, [in_tensor], out_grad)[1] end From 7d1d07184bf38e14574831490c3e3d18e8f79315 Mon Sep 17 00:00:00 2001 From: Jon Malmaud Date: Sun, 24 Feb 2019 19:05:54 -0500 Subject: [PATCH 16/49] Diffeq functionality --- Project.toml | 1 + examples/diffeq.jl | 8 ++++++++ src/eager.jl | 23 +++++++++++++++++++++++ src/tape.jl | 6 ++++++ 4 files changed, 38 insertions(+) create mode 100644 examples/diffeq.jl diff --git a/Project.toml b/Project.toml index 99d06800..5962271e 100644 --- a/Project.toml +++ b/Project.toml @@ -7,6 +7,7 @@ version = "0.12.0" AutoHashEquals = "15f4f7f2-30c1-5605-9d31-71845cf9641f" Compat = "34da2185-b29b-5c13-b0c7-acf172513d20" Conda = "8f4d0f93-b110-5947-807f-2305c1781a2d" +Dates = "ade2ca70-3891-5945-98fb-dc099432e06a" Distributed = "8ba89e20-285c-5b6f-9357-94700520ee1b" Distributions = "31c24e10-a181-5473-b8eb-7969acd0382f" FileIO = "5789e2e9-d7fb-5bc7-8068-2c6fae9b9549" diff --git a/examples/diffeq.jl b/examples/diffeq.jl new file mode 100644 index 00000000..c7309f64 --- /dev/null +++ b/examples/diffeq.jl @@ -0,0 +1,8 @@ +using DifferentialEquations + +f(u,p,t)=1.01 .* u + +u0=constant(0.5) +tspan=(0.0,1.0) +prob=ODEProblem(f, u0, tspan) +s=solve(prob) diff --git a/src/eager.jl b/src/eager.jl index de3400a9..bdb5c0b8 100644 --- a/src/eager.jl +++ b/src/eager.jl @@ -303,3 +303,26 @@ end Base.convert(::Type{TensorHandle}, h::TensorHandle) = h Base.convert(::Type{TensorHandle}, h) = constant(h) + +function item(t::TensorHandle) + x = Array(t) + if length(x) != 1 + throw(ErrorException("item can only be called on scalar tensors")) + end + return x[1] +end + +Base.length(t::TensorHandle) = item(Ops.size(t)) + +Base.eltype(::Type{TensorHandle}) = Float64 # temp hack +Base.collect(t::TensorHandle) = Array(t) +Base.iterate(t::TensorHandle, args...) = iterate(Array(t), args...) +Base.zero(t::AbstractTensor) = Ops.zeros_like(t) +Base.ones(t::AbstractTensor) = Ops.ones_like(t) +function Base.:*(t1::TensorHandle, t2::Number) + return t1 .* t2 +end + +function Base.:*(t1::Number, t2::TensorHandle) + return t1 .* t2 +end diff --git a/src/tape.jl b/src/tape.jl index 15579c30..186a8445 100644 --- a/src/tape.jl +++ b/src/tape.jl @@ -111,6 +111,8 @@ end) return [grad_x, grad_y] end) +# These are all wrong. the _grad methods expect the OUTPUT, not the input. +# need to cache the output for them @back_for(Ops.tanh, function f(grad, x; kwargs...) Ops.tanh_grad(x, grad) end) @@ -119,6 +121,10 @@ end) Ops.sigmoid_grad(x, grad) end) +@back_for(Ops.sqrt, function f(grad, x; kwargs...) + Ops.sqrt_grad(x, grad) +end) + ensure_vector(x::AbstractArray) = x ensure_vector(x) = [x] From 50d0659a2055d946e37a7eeaadd144be818079d0 Mon Sep 17 00:00:00 2001 From: Jon Malmaud Date: Sun, 24 Feb 2019 20:46:45 -0500 Subject: [PATCH 17/49] Keras demo --- examples/keras.jl | 8 ++++++ src/TensorFlow.jl | 1 + src/eager.jl | 5 ++++ src/keras.jl | 64 +++++++++++++++++++++++++++++++++++++++++++++++ src/tape.jl | 11 +++++--- src/train.jl | 2 +- 6 files changed, 87 insertions(+), 4 deletions(-) create mode 100644 examples/keras.jl create mode 100644 src/keras.jl diff --git a/examples/keras.jl b/examples/keras.jl new file mode 100644 index 00000000..8e5fde27 --- /dev/null +++ b/examples/keras.jl @@ -0,0 +1,8 @@ +m = tf.Model() +layer = tf.dense(3,3) +tf.add(m, layer) + +x=constant(randn(5,3)) +y=3x +tf.compile(m, optimizer=.01, loss=tf.mse) +tf.fit(m, x, y, n_epochs=100) diff --git a/src/TensorFlow.jl b/src/TensorFlow.jl index 4bc3052d..fdc166ee 100644 --- a/src/TensorFlow.jl +++ b/src/TensorFlow.jl @@ -213,5 +213,6 @@ include("deprecated.jl") include("show.jl") include("generate_ops.jl") include("tape.jl") +include("keras.jl") end diff --git a/src/eager.jl b/src/eager.jl index bdb5c0b8..39a966e5 100644 --- a/src/eager.jl +++ b/src/eager.jl @@ -326,3 +326,8 @@ end function Base.:*(t1::Number, t2::TensorHandle) return t1 .* t2 end + +function inplace_sub(x, y) + i = cast(constant(0:(item(size(x,0))-1)), Int32) + Ops.inplace_sub(x, i, y) +end diff --git a/src/keras.jl b/src/keras.jl new file mode 100644 index 00000000..6f1f2050 --- /dev/null +++ b/src/keras.jl @@ -0,0 +1,64 @@ +using Statistics + +mutable struct Model + attrs::Dict +end + +mutable struct Dense + weights + bias +end + +function dense(in_size, out_size) + layer = Dense(constant(randn(in_size, out_size)), constant(zeros(out_size))) + return layer +end + +function Model() + d = Dict() + d["trainable"] = Set() + d["layers"] = [] + Model(d) +end + + +function add(m::Model, d::Dense) + set_trainable(m, d.weights) + set_trainable(m, d.bias) + push!(m.attrs["layers"], d) +end + +function forward(d::Dense, x) + x*d.weights #+ d.bias +end + +function mse(y, y_target) + return mean((y .- y_target) .^ 2) +end + +function set_trainable(m::Model, tensor) + push!(m.attrs["trainable"], tensor) +end + +function compile(m::Model; optimizer=nothing, loss=nothing) + m.attrs["optimizer"] = optimizer + m.attrs["loss"] = loss +end + +function fit(m::Model, x, y; n_epochs=1, batch_size=nothing) + lr = constant(m.attrs["optimizer"]) + for epoch in 1:n_epochs + tape = set_tape() + y_predicted = forward(m.attrs["layers"][1], x) + loss = m.attrs["loss"](y, y_predicted) + println("Epoch $epoch: Loss if $(item(loss))") + values = collect(m.attrs["trainable"]) + grads = grad(tape, loss, values) + for (value, g) in zip(values, grads) + if g === nothing + continue + end + inplace_sub(value, lr.*g) + end + end +end diff --git a/src/tape.jl b/src/tape.jl index 186a8445..715e9e70 100644 --- a/src/tape.jl +++ b/src/tape.jl @@ -45,17 +45,22 @@ macro back_for(target, fn) end @back_for(Ops.add, function f(grad, x, y; kwargs...) - return [constant(1.0), constant(1.0)] .*grad + println("Add got $grad, $x, $y") + return [constant(1.0).*grad, constant(1.0).*grad] end) @back_for(Ops.sub, function f(grad, x, y; kwargs...) - return [constant(1.0), constant(-1.0)] .*grad + return [constant(1.0).*grad, constant(-1.0).*grad] end) @back_for(Ops.neg, function f(grad, x; kwargs...) return constant(-1.0) .* grad end) +@back_for(Ops.pow, function f(grad, x, y; kwargs...) + [y.* (x.^(y.-1)), nothing] +end) + function with_no_grad(f) old_tape = tape global tape = nothing @@ -152,7 +157,7 @@ end function grad(tape, tensor, in_tensors::AbstractArray, out_grad=constant(1.0)) grads = Dict() _grad(tape, tensor, out_grad, grads) - return [grads[tensor] for tensor in in_tensors] + return [get(grads, tensor, nothing) for tensor in in_tensors] end function grad(tape, tensor, in_tensor, out_grad=constant(1.0)) diff --git a/src/train.jl b/src/train.jl index 0514ce99..f760f313 100644 --- a/src/train.jl +++ b/src/train.jl @@ -64,7 +64,7 @@ mutable struct GradientDescentOptimizer <: Optimizer name::String end -GradientDescentOptimizer(learning_rate; name="descent") = GradientDescentOptimizer(Tensor(learning_rate), name) +GradientDescentOptimizer(learning_rate; name="descent") = GradientDescentOptimizer(constant(learning_rate), name) function GradientDescentOptimizer(; α=.01, kwargs...) GradientDescentOptimizer(α; kwargs...) From 42ce8025371a3c18de7693ef5c84aef40ec11fae Mon Sep 17 00:00:00 2001 From: Jon Malmaud Date: Sun, 24 Feb 2019 21:57:11 -0500 Subject: [PATCH 18/49] Better grads --- src/generate_ops.jl | 2 +- src/ops/imported_ops.jl | 4594 +++++++++++++++++++-------------------- src/tape.jl | 28 +- 3 files changed, 2311 insertions(+), 2313 deletions(-) diff --git a/src/generate_ops.jl b/src/generate_ops.jl index 493ebeb9..bf770c85 100644 --- a/src/generate_ops.jl +++ b/src/generate_ops.jl @@ -255,7 +255,7 @@ function to_function(op::tensorflow.OpDef) $attr_block $(t_block...) res = tf.execute(desc) - node = tf.TapeNode($jl_name, [$(inputs[2:end]...)], $(inputs[1].args...)) + node = tf.TapeNode($jl_name, [$(inputs[2:end]...)], $(inputs[1].args...), res) tf.add_node(res[1], node) $eager_output_block end diff --git a/src/ops/imported_ops.jl b/src/ops/imported_ops.jl index 84f113ca..2418eb9a 100644 --- a/src/ops/imported_ops.jl +++ b/src/ops/imported_ops.jl @@ -1,4 +1,4 @@ -# Autogenerated on 2019-02-22T14:43:13.001 +# Autogenerated on 2019-02-24T21:47:31.178 module Ops import TensorFlow @@ -9,7 +9,7 @@ const tf = TensorFlow """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function reduce_join_graph(inputs_, reduction_indices_; name=nothing, keep_dims=nothing, separator=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function reduce_join_graph(inputs_, reduction_indices_; name=nothing, keep_dims=nothing, separator=nothing) local desc tf.with_op_name(name, "ReduceJoin") do desc = tf.NodeDescription("ReduceJoin") @@ -39,7 +39,7 @@ begin desc["separator"] = Base.String(separator) end res = tf.execute(desc) - node = tf.TapeNode(reduce_join, [inputs_, reduction_indices_], name=nothing, keep_dims=nothing, separator=nothing) + node = tf.TapeNode(reduce_join, [inputs_, reduction_indices_], name=nothing, keep_dims=nothing, separator=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -59,7 +59,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function reduce_dataset_graph(input_dataset_, initial_state_, other_arguments_; name=nothing, f=nothing, Tstate=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function reduce_dataset_graph(input_dataset_, initial_state_, other_arguments_; name=nothing, f=nothing, Tstate=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing) local desc tf.with_op_name(name, "ReduceDataset") do desc = tf.NodeDescription("ReduceDataset") @@ -117,7 +117,7 @@ begin desc["use_inter_op_parallelism"] = Base.Bool(use_inter_op_parallelism) end res = tf.execute(desc) - node = tf.TapeNode(reduce_dataset, [input_dataset_, initial_state_, other_arguments_], name=nothing, f=nothing, Tstate=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing) + node = tf.TapeNode(reduce_dataset, [input_dataset_, initial_state_, other_arguments_], name=nothing, f=nothing, Tstate=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -137,7 +137,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_list_from_tensor_graph(tensor_, element_shape_; name=nothing, element_dtype=nothing, shape_type=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_list_from_tensor_graph(tensor_, element_shape_; name=nothing, element_dtype=nothing, shape_type=nothing) local desc tf.with_op_name(name, "TensorListFromTensor") do desc = tf.NodeDescription("TensorListFromTensor") @@ -171,7 +171,7 @@ begin desc["element_dtype"] = tf.data_type(tensor_) desc["shape_type"] = tf.data_type(element_shape_) res = tf.execute(desc) - node = tf.TapeNode(tensor_list_from_tensor, [tensor_, element_shape_], name=nothing, element_dtype=nothing, shape_type=nothing) + node = tf.TapeNode(tensor_list_from_tensor, [tensor_, element_shape_], name=nothing, element_dtype=nothing, shape_type=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -191,7 +191,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function extract_jpeg_shape_graph(contents_; name=nothing, output_type=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function extract_jpeg_shape_graph(contents_; name=nothing, output_type=nothing) local desc tf.with_op_name(name, "ExtractJpegShape") do desc = tf.NodeDescription("ExtractJpegShape") @@ -211,7 +211,7 @@ begin desc["output_type"] = Base.identity(output_type) end res = tf.execute(desc) - node = tf.TapeNode(extract_jpeg_shape, [contents_], name=nothing, output_type=nothing) + node = tf.TapeNode(extract_jpeg_shape, [contents_], name=nothing, output_type=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -231,7 +231,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function svd_graph(input_; name=nothing, compute_uv=nothing, full_matrices=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function svd_graph(input_; name=nothing, compute_uv=nothing, full_matrices=nothing) local desc tf.with_op_name(name, "Svd") do desc = tf.NodeDescription("Svd") @@ -264,7 +264,7 @@ begin end desc["T"] = tf.data_type(input_) res = tf.execute(desc) - node = tf.TapeNode(svd, [input_], name=nothing, compute_uv=nothing, full_matrices=nothing) + node = tf.TapeNode(svd, [input_], name=nothing, compute_uv=nothing, full_matrices=nothing, res) tf.add_node(res[1], node) return res end @@ -284,7 +284,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function iterator_get_next_sync_graph(iterator_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function iterator_get_next_sync_graph(iterator_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "IteratorGetNextSync") do desc = tf.NodeDescription("IteratorGetNextSync") @@ -310,7 +310,7 @@ begin desc["output_shapes"] = map(Base.identity, output_shapes) end res = tf.execute(desc) - node = tf.TapeNode(iterator_get_next_sync, [iterator_], name=nothing, output_types=nothing, output_shapes=nothing) + node = tf.TapeNode(iterator_get_next_sync, [iterator_], name=nothing, output_types=nothing, output_shapes=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -330,7 +330,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ref_enter_graph(data_; name=nothing, frame_name=nothing, is_constant=nothing, parallel_iterations=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ref_enter_graph(data_; name=nothing, frame_name=nothing, is_constant=nothing, parallel_iterations=nothing) local desc tf.with_op_name(name, "RefEnter") do desc = tf.NodeDescription("RefEnter") @@ -364,7 +364,7 @@ begin end desc["T"] = tf.data_type(data_) res = tf.execute(desc) - node = tf.TapeNode(ref_enter, [data_], name=nothing, frame_name=nothing, is_constant=nothing, parallel_iterations=nothing) + node = tf.TapeNode(ref_enter, [data_], name=nothing, frame_name=nothing, is_constant=nothing, parallel_iterations=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -384,7 +384,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function erf_graph(x_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function erf_graph(x_; name=nothing) local desc tf.with_op_name(name, "Erf") do desc = tf.NodeDescription("Erf") @@ -400,7 +400,7 @@ begin tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) res = tf.execute(desc) - node = tf.TapeNode(erf, [x_], name=nothing) + node = tf.TapeNode(erf, [x_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -420,7 +420,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function lookup_table_export_v2_graph(table_handle_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function lookup_table_export_v2_graph(table_handle_; name=nothing) local desc tf.with_op_name(name, "LookupTableExportV2") do desc = tf.NodeDescription("LookupTableExportV2") @@ -439,7 +439,7 @@ begin table_handle_ = convert(tf.TensorHandle, table_handle_) tf.add_input(desc, table_handle_) res = tf.execute(desc) - node = tf.TapeNode(lookup_table_export_v2, [table_handle_], name=nothing) + node = tf.TapeNode(lookup_table_export_v2, [table_handle_], name=nothing, res) tf.add_node(res[1], node) return res end @@ -459,7 +459,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function round_graph(x_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function round_graph(x_; name=nothing) local desc tf.with_op_name(name, "Round") do desc = tf.NodeDescription("Round") @@ -475,7 +475,7 @@ begin tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) res = tf.execute(desc) - node = tf.TapeNode(round, [x_], name=nothing) + node = tf.TapeNode(round, [x_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -495,7 +495,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function outfeed_dequeue_graph(; name=nothing, dtype=nothing, shape=nothing, device_ordinal=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function outfeed_dequeue_graph(; name=nothing, dtype=nothing, shape=nothing, device_ordinal=nothing) local desc tf.with_op_name(name, "OutfeedDequeue") do desc = tf.NodeDescription("OutfeedDequeue") @@ -523,7 +523,7 @@ begin desc["device_ordinal"] = Base.Int(device_ordinal) end res = tf.execute(desc) - node = tf.TapeNode(outfeed_dequeue, [], name=nothing, dtype=nothing, shape=nothing, device_ordinal=nothing) + node = tf.TapeNode(outfeed_dequeue, [], name=nothing, dtype=nothing, shape=nothing, device_ordinal=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -543,7 +543,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_forest_tree_is_initialized_op_graph(tree_handle_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_forest_tree_is_initialized_op_graph(tree_handle_; name=nothing) local desc tf.with_op_name(name, "TensorForestTreeIsInitializedOp") do desc = tf.NodeDescription("TensorForestTreeIsInitializedOp") @@ -557,7 +557,7 @@ begin tree_handle_ = convert(tf.TensorHandle, tree_handle_) tf.add_input(desc, tree_handle_) res = tf.execute(desc) - node = tf.TapeNode(tensor_forest_tree_is_initialized_op, [tree_handle_], name=nothing) + node = tf.TapeNode(tensor_forest_tree_is_initialized_op, [tree_handle_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -577,7 +577,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function merge_graph(inputs_; name=nothing, N=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function merge_graph(inputs_; name=nothing, N=nothing) local desc tf.with_op_name(name, "Merge") do desc = tf.NodeDescription("Merge") @@ -604,7 +604,7 @@ begin end desc["T"] = tf.data_type(inputs_) res = tf.execute(desc) - node = tf.TapeNode(merge, [inputs_], name=nothing, N=nothing) + node = tf.TapeNode(merge, [inputs_], name=nothing, N=nothing, res) tf.add_node(res[1], node) return res end @@ -624,7 +624,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function histogram_fixed_width_graph(values_, value_range_, nbins_; name=nothing, dtype=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function histogram_fixed_width_graph(values_, value_range_, nbins_; name=nothing, dtype=nothing) local desc tf.with_op_name(name, "HistogramFixedWidth") do desc = tf.NodeDescription("HistogramFixedWidth") @@ -655,7 +655,7 @@ begin desc["T"] = tf.data_type(values_) desc["T"] = tf.data_type(value_range_) res = tf.execute(desc) - node = tf.TapeNode(histogram_fixed_width, [values_, value_range_, nbins_], name=nothing, dtype=nothing) + node = tf.TapeNode(histogram_fixed_width, [values_, value_range_, nbins_], name=nothing, dtype=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -675,7 +675,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function asin_graph(x_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function asin_graph(x_; name=nothing) local desc tf.with_op_name(name, "Asin") do desc = tf.NodeDescription("Asin") @@ -691,7 +691,7 @@ begin tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) res = tf.execute(desc) - node = tf.TapeNode(asin, [x_], name=nothing) + node = tf.TapeNode(asin, [x_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -711,7 +711,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function any_graph(input_, reduction_indices_; name=nothing, keep_dims=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function any_graph(input_, reduction_indices_; name=nothing, keep_dims=nothing) local desc tf.with_op_name(name, "Any") do desc = tf.NodeDescription("Any") @@ -738,7 +738,7 @@ begin end desc["Tidx"] = tf.data_type(reduction_indices_) res = tf.execute(desc) - node = tf.TapeNode(any, [input_, reduction_indices_], name=nothing, keep_dims=nothing) + node = tf.TapeNode(any, [input_, reduction_indices_], name=nothing, keep_dims=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -758,7 +758,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function rsqrt_grad_graph(y_, dy_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function rsqrt_grad_graph(y_, dy_; name=nothing) local desc tf.with_op_name(name, "RsqrtGrad") do desc = tf.NodeDescription("RsqrtGrad") @@ -779,7 +779,7 @@ begin desc["T"] = tf.data_type(y_) desc["T"] = tf.data_type(dy_) res = tf.execute(desc) - node = tf.TapeNode(rsqrt_grad, [y_, dy_], name=nothing) + node = tf.TapeNode(rsqrt_grad, [y_, dy_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -799,7 +799,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_scatter_graph(handle_, indices_, value_, flow_in_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_scatter_graph(handle_, indices_, value_, flow_in_; name=nothing) local desc tf.with_op_name(name, "TensorArrayScatter") do desc = tf.NodeDescription("TensorArrayScatter") @@ -827,7 +827,7 @@ begin tf.add_input(desc, flow_in_) desc["T"] = tf.data_type(value_) res = tf.execute(desc) - node = tf.TapeNode(tensor_array_scatter, [handle_, indices_, value_, flow_in_], name=nothing) + node = tf.TapeNode(tensor_array_scatter, [handle_, indices_, value_, flow_in_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -847,7 +847,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function dynamic_partition_graph(data_, partitions_; name=nothing, num_partitions=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function dynamic_partition_graph(data_, partitions_; name=nothing, num_partitions=nothing) local desc tf.with_op_name(name, "DynamicPartition") do desc = tf.NodeDescription("DynamicPartition") @@ -878,7 +878,7 @@ begin end desc["T"] = tf.data_type(data_) res = tf.execute(desc) - node = tf.TapeNode(dynamic_partition, [data_, partitions_], name=nothing, num_partitions=nothing) + node = tf.TapeNode(dynamic_partition, [data_, partitions_], name=nothing, num_partitions=nothing, res) tf.add_node(res[1], node) return res end @@ -898,7 +898,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_private_thread_pool_dataset_graph(input_dataset_, num_threads_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_private_thread_pool_dataset_graph(input_dataset_, num_threads_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "ExperimentalPrivateThreadPoolDataset") do desc = tf.NodeDescription("ExperimentalPrivateThreadPoolDataset") @@ -928,7 +928,7 @@ begin desc["output_shapes"] = map(Base.identity, output_shapes) end res = tf.execute(desc) - node = tf.TapeNode(experimental_private_thread_pool_dataset, [input_dataset_, num_threads_], name=nothing, output_types=nothing, output_shapes=nothing) + node = tf.TapeNode(experimental_private_thread_pool_dataset, [input_dataset_, num_threads_], name=nothing, output_types=nothing, output_shapes=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -948,7 +948,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function reader_serialize_state_graph(reader_handle_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function reader_serialize_state_graph(reader_handle_; name=nothing) local desc tf.with_op_name(name, "ReaderSerializeState") do desc = tf.NodeDescription("ReaderSerializeState") @@ -962,7 +962,7 @@ begin reader_handle_ = convert(tf.TensorHandle, reader_handle_) tf.add_input(desc, reader_handle_) res = tf.execute(desc) - node = tf.TapeNode(reader_serialize_state, [reader_handle_], name=nothing) + node = tf.TapeNode(reader_serialize_state, [reader_handle_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -982,7 +982,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function right_shift_graph(x_, y_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function right_shift_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "RightShift") do desc = tf.NodeDescription("RightShift") @@ -1003,7 +1003,7 @@ begin desc["T"] = tf.data_type(x_) desc["T"] = tf.data_type(y_) res = tf.execute(desc) - node = tf.TapeNode(right_shift, [x_, y_], name=nothing) + node = tf.TapeNode(right_shift, [x_, y_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -1023,7 +1023,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function avg_pool3d_graph(input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function avg_pool3d_graph(input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) local desc tf.with_op_name(name, "AvgPool3D") do desc = tf.NodeDescription("AvgPool3D") @@ -1063,7 +1063,7 @@ begin end desc["T"] = tf.data_type(input_) res = tf.execute(desc) - node = tf.TapeNode(avg_pool3d, [input_], name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + node = tf.TapeNode(avg_pool3d, [input_], name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -1083,7 +1083,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function encode_png_graph(image_; name=nothing, compression=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function encode_png_graph(image_; name=nothing, compression=nothing) local desc tf.with_op_name(name, "EncodePng") do desc = tf.NodeDescription("EncodePng") @@ -1105,7 +1105,7 @@ begin end desc["T"] = tf.data_type(image_) res = tf.execute(desc) - node = tf.TapeNode(encode_png, [image_], name=nothing, compression=nothing) + node = tf.TapeNode(encode_png, [image_], name=nothing, compression=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -1125,7 +1125,7 @@ end Debug Identity Op. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function debug_identity_graph(input_; name=nothing, device_name=nothing, tensor_name=nothing, debug_urls=nothing, gated_grpc=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function debug_identity_graph(input_; name=nothing, device_name=nothing, tensor_name=nothing, debug_urls=nothing, gated_grpc=nothing) local desc tf.with_op_name(name, "DebugIdentity") do desc = tf.NodeDescription("DebugIdentity") @@ -1165,7 +1165,7 @@ begin end desc["T"] = tf.data_type(input_) res = tf.execute(desc) - node = tf.TapeNode(debug_identity, [input_], name=nothing, device_name=nothing, tensor_name=nothing, debug_urls=nothing, gated_grpc=nothing) + node = tf.TapeNode(debug_identity, [input_], name=nothing, device_name=nothing, tensor_name=nothing, debug_urls=nothing, gated_grpc=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -1185,7 +1185,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function imag_graph(input_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function imag_graph(input_; name=nothing) local desc tf.with_op_name(name, "Imag") do desc = tf.NodeDescription("Imag") @@ -1201,7 +1201,7 @@ begin tf.add_input(desc, input_) desc["T"] = tf.data_type(input_) res = tf.execute(desc) - node = tf.TapeNode(imag, [input_], name=nothing) + node = tf.TapeNode(imag, [input_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -1221,7 +1221,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_sparse_apply_ftrl_v2_graph(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=nothing, use_locking=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_sparse_apply_ftrl_v2_graph(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ResourceSparseApplyFtrlV2") do desc = tf.NodeDescription("ResourceSparseApplyFtrlV2") @@ -1287,7 +1287,7 @@ begin desc["T"] = tf.data_type(l2_shrinkage_) desc["T"] = tf.data_type(lr_power_) res = tf.execute(desc) - node = tf.TapeNode(resource_sparse_apply_ftrl_v2, [var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, l2_shrinkage_, lr_power_], name=nothing, use_locking=nothing) + node = tf.TapeNode(resource_sparse_apply_ftrl_v2, [var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, l2_shrinkage_, lr_power_], name=nothing, use_locking=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -1307,7 +1307,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function stage_clear_graph(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function stage_clear_graph(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "StageClear") do desc = tf.NodeDescription("StageClear") @@ -1347,7 +1347,7 @@ begin desc["shared_name"] = Base.String(shared_name) end res = tf.execute(desc) - node = tf.TapeNode(stage_clear, [], name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + node = tf.TapeNode(stage_clear, [], name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -1367,7 +1367,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sign_graph(x_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sign_graph(x_; name=nothing) local desc tf.with_op_name(name, "Sign") do desc = tf.NodeDescription("Sign") @@ -1383,7 +1383,7 @@ begin tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) res = tf.execute(desc) - node = tf.TapeNode(sign, [x_], name=nothing) + node = tf.TapeNode(sign, [x_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -1403,7 +1403,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function population_count_graph(x_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function population_count_graph(x_; name=nothing) local desc tf.with_op_name(name, "PopulationCount") do desc = tf.NodeDescription("PopulationCount") @@ -1419,7 +1419,7 @@ begin tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) res = tf.execute(desc) - node = tf.TapeNode(population_count, [x_], name=nothing) + node = tf.TapeNode(population_count, [x_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -1439,7 +1439,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function neg_graph(x_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function neg_graph(x_; name=nothing) local desc tf.with_op_name(name, "Neg") do desc = tf.NodeDescription("Neg") @@ -1455,7 +1455,7 @@ begin tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) res = tf.execute(desc) - node = tf.TapeNode(neg, [x_], name=nothing) + node = tf.TapeNode(neg, [x_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -1475,7 +1475,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function anonymous_iterator_graph(; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function anonymous_iterator_graph(; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "AnonymousIterator") do desc = tf.NodeDescription("AnonymousIterator") @@ -1497,7 +1497,7 @@ begin desc["output_shapes"] = map(Base.identity, output_shapes) end res = tf.execute(desc) - node = tf.TapeNode(anonymous_iterator, [], name=nothing, output_types=nothing, output_shapes=nothing) + node = tf.TapeNode(anonymous_iterator, [], name=nothing, output_types=nothing, output_shapes=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -1517,7 +1517,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_reduce_sum_graph(input_indices_, input_values_, input_shape_, reduction_axes_; name=nothing, keep_dims=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_reduce_sum_graph(input_indices_, input_values_, input_shape_, reduction_axes_; name=nothing, keep_dims=nothing) local desc tf.with_op_name(name, "SparseReduceSum") do desc = tf.NodeDescription("SparseReduceSum") @@ -1551,7 +1551,7 @@ begin end desc["T"] = tf.data_type(input_values_) res = tf.execute(desc) - node = tf.TapeNode(sparse_reduce_sum, [input_indices_, input_values_, input_shape_, reduction_axes_], name=nothing, keep_dims=nothing) + node = tf.TapeNode(sparse_reduce_sum, [input_indices_, input_values_, input_shape_, reduction_axes_], name=nothing, keep_dims=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -1571,7 +1571,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function filter_dataset_graph(input_dataset_, other_arguments_; name=nothing, predicate=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function filter_dataset_graph(input_dataset_, other_arguments_; name=nothing, predicate=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "FilterDataset") do desc = tf.NodeDescription("FilterDataset") @@ -1613,7 +1613,7 @@ begin desc["output_shapes"] = map(Base.identity, output_shapes) end res = tf.execute(desc) - node = tf.TapeNode(filter_dataset, [input_dataset_, other_arguments_], name=nothing, predicate=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) + node = tf.TapeNode(filter_dataset, [input_dataset_, other_arguments_], name=nothing, predicate=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -1633,7 +1633,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function string_length_graph(input_; name=nothing, unit=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function string_length_graph(input_; name=nothing, unit=nothing) local desc tf.with_op_name(name, "StringLength") do desc = tf.NodeDescription("StringLength") @@ -1653,7 +1653,7 @@ begin desc["unit"] = Base.String(unit) end res = tf.execute(desc) - node = tf.TapeNode(string_length, [input_], name=nothing, unit=nothing) + node = tf.TapeNode(string_length, [input_], name=nothing, unit=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -1673,7 +1673,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function conv3d_graph(input_, filter_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function conv3d_graph(input_, filter_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) local desc tf.with_op_name(name, "Conv3D") do desc = tf.NodeDescription("Conv3D") @@ -1718,7 +1718,7 @@ begin desc["T"] = tf.data_type(input_) desc["T"] = tf.data_type(filter_) res = tf.execute(desc) - node = tf.TapeNode(conv3d, [input_, filter_], name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) + node = tf.TapeNode(conv3d, [input_, filter_], name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -1738,7 +1738,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function retrieve_tpu_embedding_adagrad_parameters_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function retrieve_tpu_embedding_adagrad_parameters_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "RetrieveTPUEmbeddingAdagradParameters") do desc = tf.NodeDescription("RetrieveTPUEmbeddingAdagradParameters") @@ -1777,7 +1777,7 @@ begin desc["shard_id"] = Base.Int(shard_id) end res = tf.execute(desc) - node = tf.TapeNode(retrieve_tpu_embedding_adagrad_parameters, [], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + node = tf.TapeNode(retrieve_tpu_embedding_adagrad_parameters, [], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res) tf.add_node(res[1], node) return res end @@ -1797,7 +1797,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function optional_has_value_graph(optional_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function optional_has_value_graph(optional_; name=nothing) local desc tf.with_op_name(name, "OptionalHasValue") do desc = tf.NodeDescription("OptionalHasValue") @@ -1811,7 +1811,7 @@ begin optional_ = convert(tf.TensorHandle, optional_) tf.add_input(desc, optional_) res = tf.execute(desc) - node = tf.TapeNode(optional_has_value, [optional_], name=nothing) + node = tf.TapeNode(optional_has_value, [optional_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -1831,7 +1831,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function apply_adam_graph(var_, m_, v_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing, use_nesterov=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function apply_adam_graph(var_, m_, v_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing, use_nesterov=nothing) local desc tf.with_op_name(name, "ApplyAdam") do desc = tf.NodeDescription("ApplyAdam") @@ -1904,7 +1904,7 @@ begin desc["T"] = tf.data_type(epsilon_) desc["T"] = tf.data_type(grad_) res = tf.execute(desc) - node = tf.TapeNode(apply_adam, [var_, m_, v_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_], name=nothing, use_locking=nothing, use_nesterov=nothing) + node = tf.TapeNode(apply_adam, [var_, m_, v_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_], name=nothing, use_locking=nothing, use_nesterov=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -1924,7 +1924,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function cudnn_rnn_params_to_canonical_graph(num_layers_, num_units_, input_size_, params_; name=nothing, num_params=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function cudnn_rnn_params_to_canonical_graph(num_layers_, num_units_, input_size_, params_; name=nothing, num_params=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) local desc tf.with_op_name(name, "CudnnRNNParamsToCanonical") do desc = tf.NodeDescription("CudnnRNNParamsToCanonical") @@ -1999,7 +1999,7 @@ begin end desc["T"] = tf.data_type(params_) res = tf.execute(desc) - node = tf.TapeNode(cudnn_rnn_params_to_canonical, [num_layers_, num_units_, input_size_, params_], name=nothing, num_params=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) + node = tf.TapeNode(cudnn_rnn_params_to_canonical, [num_layers_, num_units_, input_size_, params_], name=nothing, num_params=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing, res) tf.add_node(res[1], node) return res end @@ -2019,7 +2019,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function irfft3d_graph(input_, fft_length_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function irfft3d_graph(input_, fft_length_; name=nothing) local desc tf.with_op_name(name, "IRFFT3D") do desc = tf.NodeDescription("IRFFT3D") @@ -2037,7 +2037,7 @@ begin tf.add_input(desc, input_) tf.add_input(desc, fft_length_) res = tf.execute(desc) - node = tf.TapeNode(irfft3d, [input_, fft_length_], name=nothing) + node = tf.TapeNode(irfft3d, [input_, fft_length_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -2057,7 +2057,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function angle_graph(input_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function angle_graph(input_; name=nothing) local desc tf.with_op_name(name, "Angle") do desc = tf.NodeDescription("Angle") @@ -2073,7 +2073,7 @@ begin tf.add_input(desc, input_) desc["T"] = tf.data_type(input_) res = tf.execute(desc) - node = tf.TapeNode(angle, [input_], name=nothing) + node = tf.TapeNode(angle, [input_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -2093,7 +2093,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_forest_tree_resource_handle_op_graph(; name=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_forest_tree_resource_handle_op_graph(; name=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "TensorForestTreeResourceHandleOp") do desc = tf.NodeDescription("TensorForestTreeResourceHandleOp") @@ -2115,7 +2115,7 @@ begin desc["shared_name"] = Base.String(shared_name) end res = tf.execute(desc) - node = tf.TapeNode(tensor_forest_tree_resource_handle_op, [], name=nothing, container=nothing, shared_name=nothing) + node = tf.TapeNode(tensor_forest_tree_resource_handle_op, [], name=nothing, container=nothing, shared_name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -2135,7 +2135,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function learned_unigram_candidate_sampler_graph(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function learned_unigram_candidate_sampler_graph(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing) local desc tf.with_op_name(name, "LearnedUnigramCandidateSampler") do desc = tf.NodeDescription("LearnedUnigramCandidateSampler") @@ -2190,7 +2190,7 @@ begin desc["seed2"] = Base.Int(seed2) end res = tf.execute(desc) - node = tf.TapeNode(learned_unigram_candidate_sampler, [true_classes_], name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing) + node = tf.TapeNode(learned_unigram_candidate_sampler, [true_classes_], name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing, res) tf.add_node(res[1], node) return res end @@ -2210,7 +2210,7 @@ end A graph node which represents an argument to a function. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _arg_graph(; name=nothing, index=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _arg_graph(; name=nothing, index=nothing) local desc tf.with_op_name(name, "_Arg") do desc = tf.NodeDescription("_Arg") @@ -2226,7 +2226,7 @@ begin desc["index"] = Base.Int(index) end res = tf.execute(desc) - node = tf.TapeNode(_arg, [], name=nothing, index=nothing) + node = tf.TapeNode(_arg, [], name=nothing, index=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -2246,7 +2246,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function matrix_square_root_graph(input_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function matrix_square_root_graph(input_; name=nothing) local desc tf.with_op_name(name, "MatrixSquareRoot") do desc = tf.NodeDescription("MatrixSquareRoot") @@ -2262,7 +2262,7 @@ begin tf.add_input(desc, input_) desc["T"] = tf.data_type(input_) res = tf.execute(desc) - node = tf.TapeNode(matrix_square_root, [input_], name=nothing) + node = tf.TapeNode(matrix_square_root, [input_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -2282,7 +2282,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_dense_cwise_mul_graph(sp_indices_, sp_values_, sp_shape_, dense_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_dense_cwise_mul_graph(sp_indices_, sp_values_, sp_shape_, dense_; name=nothing) local desc tf.with_op_name(name, "SparseDenseCwiseMul") do desc = tf.NodeDescription("SparseDenseCwiseMul") @@ -2311,7 +2311,7 @@ begin desc["T"] = tf.data_type(sp_values_) desc["T"] = tf.data_type(dense_) res = tf.execute(desc) - node = tf.TapeNode(sparse_dense_cwise_mul, [sp_indices_, sp_values_, sp_shape_, dense_], name=nothing) + node = tf.TapeNode(sparse_dense_cwise_mul, [sp_indices_, sp_values_, sp_shape_, dense_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -2331,7 +2331,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_concat_v3_graph(handle_, flow_in_; name=nothing, dtype=nothing, element_shape_except0=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_concat_v3_graph(handle_, flow_in_; name=nothing, dtype=nothing, element_shape_except0=nothing) local desc tf.with_op_name(name, "TensorArrayConcatV3") do desc = tf.NodeDescription("TensorArrayConcatV3") @@ -2366,7 +2366,7 @@ begin desc["element_shape_except0"] = Base.identity(element_shape_except0) end res = tf.execute(desc) - node = tf.TapeNode(tensor_array_concat_v3, [handle_, flow_in_], name=nothing, dtype=nothing, element_shape_except0=nothing) + node = tf.TapeNode(tensor_array_concat_v3, [handle_, flow_in_], name=nothing, dtype=nothing, element_shape_except0=nothing, res) tf.add_node(res[1], node) return res end @@ -2386,7 +2386,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function unicode_script_graph(input_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function unicode_script_graph(input_; name=nothing) local desc tf.with_op_name(name, "UnicodeScript") do desc = tf.NodeDescription("UnicodeScript") @@ -2400,7 +2400,7 @@ begin input_ = convert(tf.TensorHandle, input_) tf.add_input(desc, input_) res = tf.execute(desc) - node = tf.TapeNode(unicode_script, [input_], name=nothing) + node = tf.TapeNode(unicode_script, [input_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -2420,7 +2420,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_cholesky_grad_graph(l_, grad_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_cholesky_grad_graph(l_, grad_; name=nothing) local desc tf.with_op_name(name, "BatchCholeskyGrad") do desc = tf.NodeDescription("BatchCholeskyGrad") @@ -2441,7 +2441,7 @@ begin desc["T"] = tf.data_type(l_) desc["T"] = tf.data_type(grad_) res = tf.execute(desc) - node = tf.TapeNode(batch_cholesky_grad, [l_, grad_], name=nothing) + node = tf.TapeNode(batch_cholesky_grad, [l_, grad_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -2461,7 +2461,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function mean_graph(input_, reduction_indices_; name=nothing, keep_dims=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function mean_graph(input_, reduction_indices_; name=nothing, keep_dims=nothing) local desc tf.with_op_name(name, "Mean") do desc = tf.NodeDescription("Mean") @@ -2490,7 +2490,7 @@ begin desc["T"] = tf.data_type(input_) desc["Tidx"] = tf.data_type(reduction_indices_) res = tf.execute(desc) - node = tf.TapeNode(mean, [input_, reduction_indices_], name=nothing, keep_dims=nothing) + node = tf.TapeNode(mean, [input_, reduction_indices_], name=nothing, keep_dims=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -2510,7 +2510,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_fft_graph(input_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_fft_graph(input_; name=nothing) local desc tf.with_op_name(name, "BatchFFT") do desc = tf.NodeDescription("BatchFFT") @@ -2524,7 +2524,7 @@ begin input_ = convert(tf.TensorHandle, input_) tf.add_input(desc, input_) res = tf.execute(desc) - node = tf.TapeNode(batch_fft, [input_], name=nothing) + node = tf.TapeNode(batch_fft, [input_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -2544,7 +2544,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sin_graph(x_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sin_graph(x_; name=nothing) local desc tf.with_op_name(name, "Sin") do desc = tf.NodeDescription("Sin") @@ -2560,7 +2560,7 @@ begin tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) res = tf.execute(desc) - node = tf.TapeNode(sin, [x_], name=nothing) + node = tf.TapeNode(sin, [x_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -2580,7 +2580,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function boosted_trees_ensemble_resource_handle_op_graph(; name=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function boosted_trees_ensemble_resource_handle_op_graph(; name=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "BoostedTreesEnsembleResourceHandleOp") do desc = tf.NodeDescription("BoostedTreesEnsembleResourceHandleOp") @@ -2602,7 +2602,7 @@ begin desc["shared_name"] = Base.String(shared_name) end res = tf.execute(desc) - node = tf.TapeNode(boosted_trees_ensemble_resource_handle_op, [], name=nothing, container=nothing, shared_name=nothing) + node = tf.TapeNode(boosted_trees_ensemble_resource_handle_op, [], name=nothing, container=nothing, shared_name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -2622,7 +2622,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantized_max_pool_graph(input_, min_input_, max_input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantized_max_pool_graph(input_, min_input_, max_input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing) local desc tf.with_op_name(name, "QuantizedMaxPool") do desc = tf.NodeDescription("QuantizedMaxPool") @@ -2669,7 +2669,7 @@ begin end desc["T"] = tf.data_type(input_) res = tf.execute(desc) - node = tf.TapeNode(quantized_max_pool, [input_, min_input_, max_input_], name=nothing, ksize=nothing, strides=nothing, padding=nothing) + node = tf.TapeNode(quantized_max_pool, [input_, min_input_, max_input_], name=nothing, ksize=nothing, strides=nothing, padding=nothing, res) tf.add_node(res[1], node) return res end @@ -2689,7 +2689,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ordered_map_stage_graph(key_, indices_, values_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, fake_dtypes=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ordered_map_stage_graph(key_, indices_, values_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, fake_dtypes=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "OrderedMapStage") do desc = tf.NodeDescription("OrderedMapStage") @@ -2747,7 +2747,7 @@ begin desc["shared_name"] = Base.String(shared_name) end res = tf.execute(desc) - node = tf.TapeNode(ordered_map_stage, [key_, indices_, values_], name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, fake_dtypes=nothing, container=nothing, shared_name=nothing) + node = tf.TapeNode(ordered_map_stage, [key_, indices_, values_], name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, fake_dtypes=nothing, container=nothing, shared_name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -2767,7 +2767,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function partitioned_call_graph(args_; name=nothing, Tin=nothing, Tout=nothing, f=nothing, config=nothing, config_proto=nothing, executor_type=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function partitioned_call_graph(args_; name=nothing, Tin=nothing, Tout=nothing, f=nothing, config=nothing, config_proto=nothing, executor_type=nothing) local desc tf.with_op_name(name, "PartitionedCall") do desc = tf.NodeDescription("PartitionedCall") @@ -2817,7 +2817,7 @@ begin desc["executor_type"] = Base.String(executor_type) end res = tf.execute(desc) - node = tf.TapeNode(partitioned_call, [args_], name=nothing, Tin=nothing, Tout=nothing, f=nothing, config=nothing, config_proto=nothing, executor_type=nothing) + node = tf.TapeNode(partitioned_call, [args_], name=nothing, Tin=nothing, Tout=nothing, f=nothing, config=nothing, config_proto=nothing, executor_type=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -2837,7 +2837,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_apply_adagrad_graph(var_, accum_, lr_, grad_, indices_; name=nothing, use_locking=nothing, update_slots=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_apply_adagrad_graph(var_, accum_, lr_, grad_, indices_; name=nothing, use_locking=nothing, update_slots=nothing) local desc tf.with_op_name(name, "SparseApplyAdagrad") do desc = tf.NodeDescription("SparseApplyAdagrad") @@ -2887,7 +2887,7 @@ begin desc["T"] = tf.data_type(grad_) desc["Tindices"] = tf.data_type(indices_) res = tf.execute(desc) - node = tf.TapeNode(sparse_apply_adagrad, [var_, accum_, lr_, grad_, indices_], name=nothing, use_locking=nothing, update_slots=nothing) + node = tf.TapeNode(sparse_apply_adagrad, [var_, accum_, lr_, grad_, indices_], name=nothing, use_locking=nothing, update_slots=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -2907,7 +2907,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function decode_proto_v2_graph(bytes_; name=nothing, message_type=nothing, field_names=nothing, output_types=nothing, descriptor_source=nothing, message_format=nothing, sanitize=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function decode_proto_v2_graph(bytes_; name=nothing, message_type=nothing, field_names=nothing, output_types=nothing, descriptor_source=nothing, message_format=nothing, sanitize=nothing) local desc tf.with_op_name(name, "DecodeProtoV2") do desc = tf.NodeDescription("DecodeProtoV2") @@ -2962,7 +2962,7 @@ begin desc["sanitize"] = Base.Bool(sanitize) end res = tf.execute(desc) - node = tf.TapeNode(decode_proto_v2, [bytes_], name=nothing, message_type=nothing, field_names=nothing, output_types=nothing, descriptor_source=nothing, message_format=nothing, sanitize=nothing) + node = tf.TapeNode(decode_proto_v2, [bytes_], name=nothing, message_type=nothing, field_names=nothing, output_types=nothing, descriptor_source=nothing, message_format=nothing, sanitize=nothing, res) tf.add_node(res[1], node) return res end @@ -2982,7 +2982,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function betainc_graph(a_, b_, x_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function betainc_graph(a_, b_, x_; name=nothing) local desc tf.with_op_name(name, "Betainc") do desc = tf.NodeDescription("Betainc") @@ -3008,7 +3008,7 @@ begin desc["T"] = tf.data_type(b_) desc["T"] = tf.data_type(x_) res = tf.execute(desc) - node = tf.TapeNode(betainc, [a_, b_, x_], name=nothing) + node = tf.TapeNode(betainc, [a_, b_, x_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -3028,7 +3028,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function guarantee_const_graph(input_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function guarantee_const_graph(input_; name=nothing) local desc tf.with_op_name(name, "GuaranteeConst") do desc = tf.NodeDescription("GuaranteeConst") @@ -3044,7 +3044,7 @@ begin tf.add_input(desc, input_) desc["T"] = tf.data_type(input_) res = tf.execute(desc) - node = tf.TapeNode(guarantee_const, [input_], name=nothing) + node = tf.TapeNode(guarantee_const, [input_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -3064,7 +3064,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function decode_bmp_graph(contents_; name=nothing, channels=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function decode_bmp_graph(contents_; name=nothing, channels=nothing) local desc tf.with_op_name(name, "DecodeBmp") do desc = tf.NodeDescription("DecodeBmp") @@ -3084,7 +3084,7 @@ begin desc["channels"] = Base.Int(channels) end res = tf.execute(desc) - node = tf.TapeNode(decode_bmp, [contents_], name=nothing, channels=nothing) + node = tf.TapeNode(decode_bmp, [contents_], name=nothing, channels=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -3104,7 +3104,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function boosted_trees_bucketize_graph(float_values_, bucket_boundaries_; name=nothing, num_features=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function boosted_trees_bucketize_graph(float_values_, bucket_boundaries_; name=nothing, num_features=nothing) local desc tf.with_op_name(name, "BoostedTreesBucketize") do desc = tf.NodeDescription("BoostedTreesBucketize") @@ -3133,7 +3133,7 @@ begin desc["num_features"] = Base.Int(num_features) end res = tf.execute(desc) - node = tf.TapeNode(boosted_trees_bucketize, [float_values_, bucket_boundaries_], name=nothing, num_features=nothing) + node = tf.TapeNode(boosted_trees_bucketize, [float_values_, bucket_boundaries_], name=nothing, num_features=nothing, res) tf.add_node(res[1], node) return res end @@ -3153,7 +3153,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function shutdown_distributed_tpu_graph(; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function shutdown_distributed_tpu_graph(; name=nothing) local desc tf.with_op_name(name, "ShutdownDistributedTPU") do desc @@ -3164,7 +3164,7 @@ begin function shutdown_distributed_tpu_eager(; name=nothing) desc = tf.EagerOp("ShutdownDistributedTPU") res = tf.execute(desc) - node = tf.TapeNode(shutdown_distributed_tpu, [], name=nothing) + node = tf.TapeNode(shutdown_distributed_tpu, [], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -3184,7 +3184,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_stats_aggregator_summary_graph(iterator_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_stats_aggregator_summary_graph(iterator_; name=nothing) local desc tf.with_op_name(name, "ExperimentalStatsAggregatorSummary") do desc = tf.NodeDescription("ExperimentalStatsAggregatorSummary") @@ -3198,7 +3198,7 @@ begin iterator_ = convert(tf.TensorHandle, iterator_) tf.add_input(desc, iterator_) res = tf.execute(desc) - node = tf.TapeNode(experimental_stats_aggregator_summary, [iterator_], name=nothing) + node = tf.TapeNode(experimental_stats_aggregator_summary, [iterator_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -3218,7 +3218,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function timestamp_graph(; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function timestamp_graph(; name=nothing) local desc tf.with_op_name(name, "Timestamp") do desc @@ -3229,7 +3229,7 @@ begin function timestamp_eager(; name=nothing) desc = tf.EagerOp("Timestamp") res = tf.execute(desc) - node = tf.TapeNode(timestamp, [], name=nothing) + node = tf.TapeNode(timestamp, [], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -3249,7 +3249,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function matrix_exponential_graph(input_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function matrix_exponential_graph(input_; name=nothing) local desc tf.with_op_name(name, "MatrixExponential") do desc = tf.NodeDescription("MatrixExponential") @@ -3265,7 +3265,7 @@ begin tf.add_input(desc, input_) desc["T"] = tf.data_type(input_) res = tf.execute(desc) - node = tf.TapeNode(matrix_exponential, [input_], name=nothing) + node = tf.TapeNode(matrix_exponential, [input_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -3285,7 +3285,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function size_graph(input_; name=nothing, out_type=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function size_graph(input_; name=nothing, out_type=nothing) local desc tf.with_op_name(name, "Size") do desc = tf.NodeDescription("Size") @@ -3307,7 +3307,7 @@ begin end desc["T"] = tf.data_type(input_) res = tf.execute(desc) - node = tf.TapeNode(size, [input_], name=nothing, out_type=nothing) + node = tf.TapeNode(size, [input_], name=nothing, out_type=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -3327,7 +3327,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function add_n_graph(inputs_; name=nothing, N=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function add_n_graph(inputs_; name=nothing, N=nothing) local desc tf.with_op_name(name, "AddN") do desc = tf.NodeDescription("AddN") @@ -3349,7 +3349,7 @@ begin end desc["T"] = tf.data_type(inputs_) res = tf.execute(desc) - node = tf.TapeNode(add_n, [inputs_], name=nothing, N=nothing) + node = tf.TapeNode(add_n, [inputs_], name=nothing, N=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -3369,7 +3369,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_segment_sum_graph(data_, indices_, segment_ids_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_segment_sum_graph(data_, indices_, segment_ids_; name=nothing) local desc tf.with_op_name(name, "SparseSegmentSum") do desc = tf.NodeDescription("SparseSegmentSum") @@ -3396,7 +3396,7 @@ begin desc["T"] = tf.data_type(data_) desc["Tidx"] = tf.data_type(indices_) res = tf.execute(desc) - node = tf.TapeNode(sparse_segment_sum, [data_, indices_, segment_ids_], name=nothing) + node = tf.TapeNode(sparse_segment_sum, [data_, indices_, segment_ids_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -3416,7 +3416,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_dataset_graph(input_dataset_, batch_size_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_dataset_graph(input_dataset_, batch_size_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "BatchDataset") do desc = tf.NodeDescription("BatchDataset") @@ -3446,7 +3446,7 @@ begin desc["output_shapes"] = map(Base.identity, output_shapes) end res = tf.execute(desc) - node = tf.TapeNode(batch_dataset, [input_dataset_, batch_size_], name=nothing, output_types=nothing, output_shapes=nothing) + node = tf.TapeNode(batch_dataset, [input_dataset_, batch_size_], name=nothing, output_types=nothing, output_shapes=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -3466,7 +3466,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function record_input_graph(; name=nothing, file_pattern=nothing, file_random_seed=nothing, file_shuffle_shift_ratio=nothing, file_buffer_size=nothing, file_parallelism=nothing, batch_size=nothing, compression_type=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function record_input_graph(; name=nothing, file_pattern=nothing, file_random_seed=nothing, file_shuffle_shift_ratio=nothing, file_buffer_size=nothing, file_parallelism=nothing, batch_size=nothing, compression_type=nothing) local desc tf.with_op_name(name, "RecordInput") do desc = tf.NodeDescription("RecordInput") @@ -3518,7 +3518,7 @@ begin desc["compression_type"] = Base.String(compression_type) end res = tf.execute(desc) - node = tf.TapeNode(record_input, [], name=nothing, file_pattern=nothing, file_random_seed=nothing, file_shuffle_shift_ratio=nothing, file_buffer_size=nothing, file_parallelism=nothing, batch_size=nothing, compression_type=nothing) + node = tf.TapeNode(record_input, [], name=nothing, file_pattern=nothing, file_random_seed=nothing, file_shuffle_shift_ratio=nothing, file_buffer_size=nothing, file_parallelism=nothing, batch_size=nothing, compression_type=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -3538,7 +3538,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function queue_dequeue_up_to_v2_graph(handle_, n_; name=nothing, component_types=nothing, timeout_ms=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function queue_dequeue_up_to_v2_graph(handle_, n_; name=nothing, component_types=nothing, timeout_ms=nothing) local desc tf.with_op_name(name, "QueueDequeueUpToV2") do desc = tf.NodeDescription("QueueDequeueUpToV2") @@ -3568,7 +3568,7 @@ begin desc["timeout_ms"] = Base.Int(timeout_ms) end res = tf.execute(desc) - node = tf.TapeNode(queue_dequeue_up_to_v2, [handle_, n_], name=nothing, component_types=nothing, timeout_ms=nothing) + node = tf.TapeNode(queue_dequeue_up_to_v2, [handle_, n_], name=nothing, component_types=nothing, timeout_ms=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -3588,7 +3588,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function retrieve_tpu_embedding_proximal_adagrad_parameters_grad_accum_debug_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function retrieve_tpu_embedding_proximal_adagrad_parameters_grad_accum_debug_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "RetrieveTPUEmbeddingProximalAdagradParametersGradAccumDebug") do desc = tf.NodeDescription("RetrieveTPUEmbeddingProximalAdagradParametersGradAccumDebug") @@ -3627,7 +3627,7 @@ begin desc["shard_id"] = Base.Int(shard_id) end res = tf.execute(desc) - node = tf.TapeNode(retrieve_tpu_embedding_proximal_adagrad_parameters_grad_accum_debug, [], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + node = tf.TapeNode(retrieve_tpu_embedding_proximal_adagrad_parameters_grad_accum_debug, [], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res) tf.add_node(res[1], node) return res end @@ -3647,7 +3647,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function load_tpu_embedding_rms_prop_parameters_grad_accum_debug_graph(parameters_, ms_, mom_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function load_tpu_embedding_rms_prop_parameters_grad_accum_debug_graph(parameters_, ms_, mom_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "LoadTPUEmbeddingRMSPropParametersGradAccumDebug") do desc = tf.NodeDescription("LoadTPUEmbeddingRMSPropParametersGradAccumDebug") @@ -3697,7 +3697,7 @@ begin desc["shard_id"] = Base.Int(shard_id) end res = tf.execute(desc) - node = tf.TapeNode(load_tpu_embedding_rms_prop_parameters_grad_accum_debug, [parameters_, ms_, mom_, gradient_accumulators_], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + node = tf.TapeNode(load_tpu_embedding_rms_prop_parameters_grad_accum_debug, [parameters_, ms_, mom_, gradient_accumulators_], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -3717,7 +3717,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function serialize_tensor_graph(tensor_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function serialize_tensor_graph(tensor_; name=nothing) local desc tf.with_op_name(name, "SerializeTensor") do desc = tf.NodeDescription("SerializeTensor") @@ -3733,7 +3733,7 @@ begin tf.add_input(desc, tensor_) desc["T"] = tf.data_type(tensor_) res = tf.execute(desc) - node = tf.TapeNode(serialize_tensor, [tensor_], name=nothing) + node = tf.TapeNode(serialize_tensor, [tensor_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -3753,7 +3753,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function mul_graph(x_, y_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function mul_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "Mul") do desc = tf.NodeDescription("Mul") @@ -3774,7 +3774,7 @@ begin desc["T"] = tf.data_type(x_) desc["T"] = tf.data_type(y_) res = tf.execute(desc) - node = tf.TapeNode(mul, [x_, y_], name=nothing) + node = tf.TapeNode(mul, [x_, y_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -3794,7 +3794,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function softmax_cross_entropy_with_logits_graph(features_, labels_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function softmax_cross_entropy_with_logits_graph(features_, labels_; name=nothing) local desc tf.with_op_name(name, "SoftmaxCrossEntropyWithLogits") do desc = tf.NodeDescription("SoftmaxCrossEntropyWithLogits") @@ -3820,7 +3820,7 @@ begin desc["T"] = tf.data_type(features_) desc["T"] = tf.data_type(labels_) res = tf.execute(desc) - node = tf.TapeNode(softmax_cross_entropy_with_logits, [features_, labels_], name=nothing) + node = tf.TapeNode(softmax_cross_entropy_with_logits, [features_, labels_], name=nothing, res) tf.add_node(res[1], node) return res end @@ -3840,7 +3840,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_scatter_div_graph(resource_, indices_, updates_; name=nothing, dtype=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_scatter_div_graph(resource_, indices_, updates_; name=nothing, dtype=nothing) local desc tf.with_op_name(name, "ResourceScatterDiv") do desc = tf.NodeDescription("ResourceScatterDiv") @@ -3873,7 +3873,7 @@ begin desc["Tindices"] = tf.data_type(indices_) desc["dtype"] = tf.data_type(updates_) res = tf.execute(desc) - node = tf.TapeNode(resource_scatter_div, [resource_, indices_, updates_], name=nothing, dtype=nothing) + node = tf.TapeNode(resource_scatter_div, [resource_, indices_, updates_], name=nothing, dtype=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -3893,7 +3893,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fixed_length_record_dataset_v2_graph(filenames_, header_bytes_, record_bytes_, footer_bytes_, buffer_size_, compression_type_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fixed_length_record_dataset_v2_graph(filenames_, header_bytes_, record_bytes_, footer_bytes_, buffer_size_, compression_type_; name=nothing) local desc tf.with_op_name(name, "FixedLengthRecordDatasetV2") do desc = tf.NodeDescription("FixedLengthRecordDatasetV2") @@ -3927,7 +3927,7 @@ begin tf.add_input(desc, buffer_size_) tf.add_input(desc, compression_type_) res = tf.execute(desc) - node = tf.TapeNode(fixed_length_record_dataset_v2, [filenames_, header_bytes_, record_bytes_, footer_bytes_, buffer_size_, compression_type_], name=nothing) + node = tf.TapeNode(fixed_length_record_dataset_v2, [filenames_, header_bytes_, record_bytes_, footer_bytes_, buffer_size_, compression_type_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -3947,7 +3947,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function skip_dataset_graph(input_dataset_, count_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function skip_dataset_graph(input_dataset_, count_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "SkipDataset") do desc = tf.NodeDescription("SkipDataset") @@ -3977,7 +3977,7 @@ begin desc["output_shapes"] = map(Base.identity, output_shapes) end res = tf.execute(desc) - node = tf.TapeNode(skip_dataset, [input_dataset_, count_], name=nothing, output_types=nothing, output_shapes=nothing) + node = tf.TapeNode(skip_dataset, [input_dataset_, count_], name=nothing, output_types=nothing, output_shapes=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -3997,7 +3997,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function cosh_graph(x_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function cosh_graph(x_; name=nothing) local desc tf.with_op_name(name, "Cosh") do desc = tf.NodeDescription("Cosh") @@ -4013,7 +4013,7 @@ begin tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) res = tf.execute(desc) - node = tf.TapeNode(cosh, [x_], name=nothing) + node = tf.TapeNode(cosh, [x_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -4033,7 +4033,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fused_batch_norm_v2_graph(x_, scale_, offset_, mean_, variance_; name=nothing, U=nothing, epsilon=nothing, data_format=nothing, is_training=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fused_batch_norm_v2_graph(x_, scale_, offset_, mean_, variance_; name=nothing, U=nothing, epsilon=nothing, data_format=nothing, is_training=nothing) local desc tf.with_op_name(name, "FusedBatchNormV2") do desc = tf.NodeDescription("FusedBatchNormV2") @@ -4099,7 +4099,7 @@ begin desc["U"] = tf.data_type(mean_) desc["U"] = tf.data_type(variance_) res = tf.execute(desc) - node = tf.TapeNode(fused_batch_norm_v2, [x_, scale_, offset_, mean_, variance_], name=nothing, U=nothing, epsilon=nothing, data_format=nothing, is_training=nothing) + node = tf.TapeNode(fused_batch_norm_v2, [x_, scale_, offset_, mean_, variance_], name=nothing, U=nothing, epsilon=nothing, data_format=nothing, is_training=nothing, res) tf.add_node(res[1], node) return res end @@ -4119,7 +4119,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_split_graph(handle_, value_, lengths_, flow_in_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_split_graph(handle_, value_, lengths_, flow_in_; name=nothing) local desc tf.with_op_name(name, "TensorArraySplit") do desc = tf.NodeDescription("TensorArraySplit") @@ -4147,7 +4147,7 @@ begin tf.add_input(desc, flow_in_) desc["T"] = tf.data_type(value_) res = tf.execute(desc) - node = tf.TapeNode(tensor_array_split, [handle_, value_, lengths_, flow_in_], name=nothing) + node = tf.TapeNode(tensor_array_split, [handle_, value_, lengths_, flow_in_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -4167,7 +4167,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ctc_loss_graph(inputs_, labels_indices_, labels_values_, sequence_length_; name=nothing, preprocess_collapse_repeated=nothing, ctc_merge_repeated=nothing, ignore_longer_outputs_than_inputs=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ctc_loss_graph(inputs_, labels_indices_, labels_values_, sequence_length_; name=nothing, preprocess_collapse_repeated=nothing, ctc_merge_repeated=nothing, ignore_longer_outputs_than_inputs=nothing) local desc tf.with_op_name(name, "CTCLoss") do desc = tf.NodeDescription("CTCLoss") @@ -4216,7 +4216,7 @@ begin desc["ignore_longer_outputs_than_inputs"] = Base.Bool(ignore_longer_outputs_than_inputs) end res = tf.execute(desc) - node = tf.TapeNode(ctc_loss, [inputs_, labels_indices_, labels_values_, sequence_length_], name=nothing, preprocess_collapse_repeated=nothing, ctc_merge_repeated=nothing, ignore_longer_outputs_than_inputs=nothing) + node = tf.TapeNode(ctc_loss, [inputs_, labels_indices_, labels_values_, sequence_length_], name=nothing, preprocess_collapse_repeated=nothing, ctc_merge_repeated=nothing, ignore_longer_outputs_than_inputs=nothing, res) tf.add_node(res[1], node) return res end @@ -4236,7 +4236,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantized_reshape_graph(tensor_, shape_, input_min_, input_max_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantized_reshape_graph(tensor_, shape_, input_min_, input_max_; name=nothing) local desc tf.with_op_name(name, "QuantizedReshape") do desc = tf.NodeDescription("QuantizedReshape") @@ -4271,7 +4271,7 @@ begin desc["T"] = tf.data_type(tensor_) desc["Tshape"] = tf.data_type(shape_) res = tf.execute(desc) - node = tf.TapeNode(quantized_reshape, [tensor_, shape_, input_min_, input_max_], name=nothing) + node = tf.TapeNode(quantized_reshape, [tensor_, shape_, input_min_, input_max_], name=nothing, res) tf.add_node(res[1], node) return res end @@ -4291,7 +4291,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function floor_div_graph(x_, y_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function floor_div_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "FloorDiv") do desc = tf.NodeDescription("FloorDiv") @@ -4312,7 +4312,7 @@ begin desc["T"] = tf.data_type(x_) desc["T"] = tf.data_type(y_) res = tf.execute(desc) - node = tf.TapeNode(floor_div, [x_, y_], name=nothing) + node = tf.TapeNode(floor_div, [x_, y_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -4332,7 +4332,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_v2_graph(size_; name=nothing, dtype=nothing, element_shape=nothing, dynamic_size=nothing, clear_after_read=nothing, tensor_array_name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_v2_graph(size_; name=nothing, dtype=nothing, element_shape=nothing, dynamic_size=nothing, clear_after_read=nothing, tensor_array_name=nothing) local desc tf.with_op_name(name, "TensorArrayV2") do desc = tf.NodeDescription("TensorArrayV2") @@ -4376,7 +4376,7 @@ begin desc["tensor_array_name"] = Base.String(tensor_array_name) end res = tf.execute(desc) - node = tf.TapeNode(tensor_array_v2, [size_], name=nothing, dtype=nothing, element_shape=nothing, dynamic_size=nothing, clear_after_read=nothing, tensor_array_name=nothing) + node = tf.TapeNode(tensor_array_v2, [size_], name=nothing, dtype=nothing, element_shape=nothing, dynamic_size=nothing, clear_after_read=nothing, tensor_array_name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -4396,7 +4396,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function barrier_close_graph(handle_; name=nothing, cancel_pending_enqueues=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function barrier_close_graph(handle_; name=nothing, cancel_pending_enqueues=nothing) local desc tf.with_op_name(name, "BarrierClose") do desc = tf.NodeDescription("BarrierClose") @@ -4416,7 +4416,7 @@ begin desc["cancel_pending_enqueues"] = Base.Bool(cancel_pending_enqueues) end res = tf.execute(desc) - node = tf.TapeNode(barrier_close, [handle_], name=nothing, cancel_pending_enqueues=nothing) + node = tf.TapeNode(barrier_close, [handle_], name=nothing, cancel_pending_enqueues=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -4436,7 +4436,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function read_variable_op_graph(resource_; name=nothing, dtype=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function read_variable_op_graph(resource_; name=nothing, dtype=nothing) local desc tf.with_op_name(name, "ReadVariableOp") do desc = tf.NodeDescription("ReadVariableOp") @@ -4456,7 +4456,7 @@ begin desc["dtype"] = Base.identity(dtype) end res = tf.execute(desc) - node = tf.TapeNode(read_variable_op, [resource_], name=nothing, dtype=nothing) + node = tf.TapeNode(read_variable_op, [resource_], name=nothing, dtype=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -4476,7 +4476,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantized_mul_graph(x_, y_, min_x_, max_x_, min_y_, max_y_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantized_mul_graph(x_, y_, min_x_, max_x_, min_y_, max_y_; name=nothing) local desc tf.with_op_name(name, "QuantizedMul") do desc = tf.NodeDescription("QuantizedMul") @@ -4519,7 +4519,7 @@ begin desc["T1"] = tf.data_type(x_) desc["T2"] = tf.data_type(y_) res = tf.execute(desc) - node = tf.TapeNode(quantized_mul, [x_, y_, min_x_, max_x_, min_y_, max_y_], name=nothing) + node = tf.TapeNode(quantized_mul, [x_, y_, min_x_, max_x_, min_y_, max_y_], name=nothing, res) tf.add_node(res[1], node) return res end @@ -4539,7 +4539,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function selu_graph(features_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function selu_graph(features_; name=nothing) local desc tf.with_op_name(name, "Selu") do desc = tf.NodeDescription("Selu") @@ -4555,7 +4555,7 @@ begin tf.add_input(desc, features_) desc["T"] = tf.data_type(features_) res = tf.execute(desc) - node = tf.TapeNode(selu, [features_], name=nothing) + node = tf.TapeNode(selu, [features_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -4575,7 +4575,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function cudnn_rnn_backprop_v3_graph(input_, input_h_, input_c_, params_, sequence_lengths_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_, host_reserved_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function cudnn_rnn_backprop_v3_graph(input_, input_h_, input_c_, params_, sequence_lengths_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_, host_reserved_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) local desc tf.with_op_name(name, "CudnnRNNBackpropV3") do desc = tf.NodeDescription("CudnnRNNBackpropV3") @@ -4690,7 +4690,7 @@ begin desc["T"] = tf.data_type(output_c_backprop_) desc["T"] = tf.data_type(reserve_space_) res = tf.execute(desc) - node = tf.TapeNode(cudnn_rnn_backprop_v3, [input_, input_h_, input_c_, params_, sequence_lengths_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_, host_reserved_], name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) + node = tf.TapeNode(cudnn_rnn_backprop_v3, [input_, input_h_, input_c_, params_, sequence_lengths_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_, host_reserved_], name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing, res) tf.add_node(res[1], node) return res end @@ -4710,7 +4710,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function lookup_table_insert_graph(table_handle_, keys_, values_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function lookup_table_insert_graph(table_handle_, keys_, values_; name=nothing) local desc tf.with_op_name(name, "LookupTableInsert") do desc = tf.NodeDescription("LookupTableInsert") @@ -4736,7 +4736,7 @@ begin desc["Tin"] = tf.data_type(keys_) desc["Tout"] = tf.data_type(values_) res = tf.execute(desc) - node = tf.TapeNode(lookup_table_insert, [table_handle_, keys_, values_], name=nothing) + node = tf.TapeNode(lookup_table_insert, [table_handle_, keys_, values_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -4756,7 +4756,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function complex_abs_graph(x_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function complex_abs_graph(x_; name=nothing) local desc tf.with_op_name(name, "ComplexAbs") do desc = tf.NodeDescription("ComplexAbs") @@ -4772,7 +4772,7 @@ begin tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) res = tf.execute(desc) - node = tf.TapeNode(complex_abs, [x_], name=nothing) + node = tf.TapeNode(complex_abs, [x_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -4792,7 +4792,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tridiagonal_solve_graph(diagonals_, rhs_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tridiagonal_solve_graph(diagonals_, rhs_; name=nothing) local desc tf.with_op_name(name, "TridiagonalSolve") do desc = tf.NodeDescription("TridiagonalSolve") @@ -4813,7 +4813,7 @@ begin desc["T"] = tf.data_type(diagonals_) desc["T"] = tf.data_type(rhs_) res = tf.execute(desc) - node = tf.TapeNode(tridiagonal_solve, [diagonals_, rhs_], name=nothing) + node = tf.TapeNode(tridiagonal_solve, [diagonals_, rhs_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -4833,7 +4833,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function lookup_table_import_graph(table_handle_, keys_, values_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function lookup_table_import_graph(table_handle_, keys_, values_; name=nothing) local desc tf.with_op_name(name, "LookupTableImport") do desc = tf.NodeDescription("LookupTableImport") @@ -4859,7 +4859,7 @@ begin desc["Tin"] = tf.data_type(keys_) desc["Tout"] = tf.data_type(values_) res = tf.execute(desc) - node = tf.TapeNode(lookup_table_import, [table_handle_, keys_, values_], name=nothing) + node = tf.TapeNode(lookup_table_import, [table_handle_, keys_, values_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -4879,7 +4879,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function abs_graph(x_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function abs_graph(x_; name=nothing) local desc tf.with_op_name(name, "Abs") do desc = tf.NodeDescription("Abs") @@ -4895,7 +4895,7 @@ begin tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) res = tf.execute(desc) - node = tf.TapeNode(abs, [x_], name=nothing) + node = tf.TapeNode(abs, [x_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -4915,7 +4915,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_apply_adam_graph(var_, m_, v_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing, use_nesterov=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_apply_adam_graph(var_, m_, v_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing, use_nesterov=nothing) local desc tf.with_op_name(name, "ResourceApplyAdam") do desc = tf.NodeDescription("ResourceApplyAdam") @@ -4985,7 +4985,7 @@ begin desc["T"] = tf.data_type(epsilon_) desc["T"] = tf.data_type(grad_) res = tf.execute(desc) - node = tf.TapeNode(resource_apply_adam, [var_, m_, v_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_], name=nothing, use_locking=nothing, use_nesterov=nothing) + node = tf.TapeNode(resource_apply_adam, [var_, m_, v_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_], name=nothing, use_locking=nothing, use_nesterov=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -5005,7 +5005,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function write_histogram_summary_graph(writer_, step_, tag_, values_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function write_histogram_summary_graph(writer_, step_, tag_, values_; name=nothing) local desc tf.with_op_name(name, "WriteHistogramSummary") do desc = tf.NodeDescription("WriteHistogramSummary") @@ -5033,7 +5033,7 @@ begin tf.add_input(desc, values_) desc["T"] = tf.data_type(values_) res = tf.execute(desc) - node = tf.TapeNode(write_histogram_summary, [writer_, step_, tag_, values_], name=nothing) + node = tf.TapeNode(write_histogram_summary, [writer_, step_, tag_, values_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -5053,7 +5053,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_indexed_dataset_materialize_graph(dataset_, materialized_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_indexed_dataset_materialize_graph(dataset_, materialized_; name=nothing) local desc tf.with_op_name(name, "ExperimentalIndexedDatasetMaterialize") do desc = tf.NodeDescription("ExperimentalIndexedDatasetMaterialize") @@ -5071,7 +5071,7 @@ begin tf.add_input(desc, dataset_) tf.add_input(desc, materialized_) res = tf.execute(desc) - node = tf.TapeNode(experimental_indexed_dataset_materialize, [dataset_, materialized_], name=nothing) + node = tf.TapeNode(experimental_indexed_dataset_materialize, [dataset_, materialized_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -5091,7 +5091,7 @@ end Sends the named tensor from send_device to recv_device. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _host_send_graph(tensor_; name=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _host_send_graph(tensor_; name=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing) local desc tf.with_op_name(name, "_HostSend") do desc = tf.NodeDescription("_HostSend") @@ -5137,7 +5137,7 @@ begin end desc["T"] = tf.data_type(tensor_) res = tf.execute(desc) - node = tf.TapeNode(_host_send, [tensor_], name=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing) + node = tf.TapeNode(_host_send, [tensor_], name=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -5157,7 +5157,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function greater_graph(x_, y_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function greater_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "Greater") do desc = tf.NodeDescription("Greater") @@ -5178,7 +5178,7 @@ begin desc["T"] = tf.data_type(x_) desc["T"] = tf.data_type(y_) res = tf.execute(desc) - node = tf.TapeNode(greater, [x_, y_], name=nothing) + node = tf.TapeNode(greater, [x_, y_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -5198,7 +5198,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function nccl_broadcast_graph(input_; name=nothing, shape=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function nccl_broadcast_graph(input_; name=nothing, shape=nothing) local desc tf.with_op_name(name, "NcclBroadcast") do desc = tf.NodeDescription("NcclBroadcast") @@ -5220,7 +5220,7 @@ begin end desc["T"] = tf.data_type(input_) res = tf.execute(desc) - node = tf.TapeNode(nccl_broadcast, [input_], name=nothing, shape=nothing) + node = tf.TapeNode(nccl_broadcast, [input_], name=nothing, shape=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -5240,7 +5240,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_list_push_back_batch_graph(input_handles_, tensor_; name=nothing, element_dtype=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_list_push_back_batch_graph(input_handles_, tensor_; name=nothing, element_dtype=nothing) local desc tf.with_op_name(name, "TensorListPushBackBatch") do desc = tf.NodeDescription("TensorListPushBackBatch") @@ -5266,7 +5266,7 @@ begin end desc["element_dtype"] = tf.data_type(tensor_) res = tf.execute(desc) - node = tf.TapeNode(tensor_list_push_back_batch, [input_handles_, tensor_], name=nothing, element_dtype=nothing) + node = tf.TapeNode(tensor_list_push_back_batch, [input_handles_, tensor_], name=nothing, element_dtype=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -5286,7 +5286,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_scatter_min_graph(resource_, indices_, updates_; name=nothing, dtype=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_scatter_min_graph(resource_, indices_, updates_; name=nothing, dtype=nothing) local desc tf.with_op_name(name, "ResourceScatterMin") do desc = tf.NodeDescription("ResourceScatterMin") @@ -5319,7 +5319,7 @@ begin desc["Tindices"] = tf.data_type(indices_) desc["dtype"] = tf.data_type(updates_) res = tf.execute(desc) - node = tf.TapeNode(resource_scatter_min, [resource_, indices_, updates_], name=nothing, dtype=nothing) + node = tf.TapeNode(resource_scatter_min, [resource_, indices_, updates_], name=nothing, dtype=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -5339,7 +5339,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function slice_graph(input_, begin_, size_; name=nothing, Index=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function slice_graph(input_, begin_, size_; name=nothing, Index=nothing) local desc tf.with_op_name(name, "Slice") do desc = tf.NodeDescription("Slice") @@ -5373,7 +5373,7 @@ begin desc["Index"] = tf.data_type(begin_) desc["Index"] = tf.data_type(size_) res = tf.execute(desc) - node = tf.TapeNode(slice, [input_, begin_, size_], name=nothing, Index=nothing) + node = tf.TapeNode(slice, [input_, begin_, size_], name=nothing, Index=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -5393,7 +5393,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function unicode_decode_graph(input_; name=nothing, input_encoding=nothing, errors=nothing, replacement_char=nothing, replace_control_characters=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function unicode_decode_graph(input_; name=nothing, input_encoding=nothing, errors=nothing, replacement_char=nothing, replace_control_characters=nothing) local desc tf.with_op_name(name, "UnicodeDecode") do desc = tf.NodeDescription("UnicodeDecode") @@ -5436,7 +5436,7 @@ begin desc["replace_control_characters"] = Base.Bool(replace_control_characters) end res = tf.execute(desc) - node = tf.TapeNode(unicode_decode, [input_], name=nothing, input_encoding=nothing, errors=nothing, replacement_char=nothing, replace_control_characters=nothing) + node = tf.TapeNode(unicode_decode, [input_], name=nothing, input_encoding=nothing, errors=nothing, replacement_char=nothing, replace_control_characters=nothing, res) tf.add_node(res[1], node) return res end @@ -5456,7 +5456,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function take_dataset_graph(input_dataset_, count_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function take_dataset_graph(input_dataset_, count_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "TakeDataset") do desc = tf.NodeDescription("TakeDataset") @@ -5486,7 +5486,7 @@ begin desc["output_shapes"] = map(Base.identity, output_shapes) end res = tf.execute(desc) - node = tf.TapeNode(take_dataset, [input_dataset_, count_], name=nothing, output_types=nothing, output_shapes=nothing) + node = tf.TapeNode(take_dataset, [input_dataset_, count_], name=nothing, output_types=nothing, output_shapes=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -5506,7 +5506,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function boosted_trees_make_stats_summary_graph(node_ids_, gradients_, hessians_, bucketized_features_list_; name=nothing, max_splits=nothing, num_buckets=nothing, num_features=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function boosted_trees_make_stats_summary_graph(node_ids_, gradients_, hessians_, bucketized_features_list_; name=nothing, max_splits=nothing, num_buckets=nothing, num_features=nothing) local desc tf.with_op_name(name, "BoostedTreesMakeStatsSummary") do desc = tf.NodeDescription("BoostedTreesMakeStatsSummary") @@ -5550,7 +5550,7 @@ begin desc["num_features"] = Base.Int(num_features) end res = tf.execute(desc) - node = tf.TapeNode(boosted_trees_make_stats_summary, [node_ids_, gradients_, hessians_, bucketized_features_list_], name=nothing, max_splits=nothing, num_buckets=nothing, num_features=nothing) + node = tf.TapeNode(boosted_trees_make_stats_summary, [node_ids_, gradients_, hessians_, bucketized_features_list_], name=nothing, max_splits=nothing, num_buckets=nothing, num_features=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -5570,7 +5570,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function all_candidate_sampler_graph(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, seed=nothing, seed2=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function all_candidate_sampler_graph(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, seed=nothing, seed2=nothing) local desc tf.with_op_name(name, "AllCandidateSampler") do desc = tf.NodeDescription("AllCandidateSampler") @@ -5619,7 +5619,7 @@ begin desc["seed2"] = Base.Int(seed2) end res = tf.execute(desc) - node = tf.TapeNode(all_candidate_sampler, [true_classes_], name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, seed=nothing, seed2=nothing) + node = tf.TapeNode(all_candidate_sampler, [true_classes_], name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, seed=nothing, seed2=nothing, res) tf.add_node(res[1], node) return res end @@ -5639,7 +5639,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function conv2d_backprop_input_graph(input_sizes_, filter_, out_backprop_; name=nothing, strides=nothing, use_cudnn_on_gpu=nothing, padding=nothing, explicit_paddings=nothing, data_format=nothing, dilations=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function conv2d_backprop_input_graph(input_sizes_, filter_, out_backprop_; name=nothing, strides=nothing, use_cudnn_on_gpu=nothing, padding=nothing, explicit_paddings=nothing, data_format=nothing, dilations=nothing) local desc tf.with_op_name(name, "Conv2DBackpropInput") do desc = tf.NodeDescription("Conv2DBackpropInput") @@ -5700,7 +5700,7 @@ begin desc["T"] = tf.data_type(filter_) desc["T"] = tf.data_type(out_backprop_) res = tf.execute(desc) - node = tf.TapeNode(conv2d_backprop_input, [input_sizes_, filter_, out_backprop_], name=nothing, strides=nothing, use_cudnn_on_gpu=nothing, padding=nothing, explicit_paddings=nothing, data_format=nothing, dilations=nothing) + node = tf.TapeNode(conv2d_backprop_input, [input_sizes_, filter_, out_backprop_], name=nothing, strides=nothing, use_cudnn_on_gpu=nothing, padding=nothing, explicit_paddings=nothing, data_format=nothing, dilations=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -5720,7 +5720,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function dataset_to_single_element_graph(dataset_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function dataset_to_single_element_graph(dataset_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "DatasetToSingleElement") do desc = tf.NodeDescription("DatasetToSingleElement") @@ -5746,7 +5746,7 @@ begin desc["output_shapes"] = map(Base.identity, output_shapes) end res = tf.execute(desc) - node = tf.TapeNode(dataset_to_single_element, [dataset_], name=nothing, output_types=nothing, output_shapes=nothing) + node = tf.TapeNode(dataset_to_single_element, [dataset_], name=nothing, output_types=nothing, output_shapes=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -5766,7 +5766,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function cache_dataset_graph(input_dataset_, filename_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function cache_dataset_graph(input_dataset_, filename_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "CacheDataset") do desc = tf.NodeDescription("CacheDataset") @@ -5796,7 +5796,7 @@ begin desc["output_shapes"] = map(Base.identity, output_shapes) end res = tf.execute(desc) - node = tf.TapeNode(cache_dataset, [input_dataset_, filename_], name=nothing, output_types=nothing, output_shapes=nothing) + node = tf.TapeNode(cache_dataset, [input_dataset_, filename_], name=nothing, output_types=nothing, output_shapes=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -5816,7 +5816,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fake_quant_with_min_max_vars_gradient_graph(gradients_, inputs_, min_, max_; name=nothing, num_bits=nothing, narrow_range=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fake_quant_with_min_max_vars_gradient_graph(gradients_, inputs_, min_, max_; name=nothing, num_bits=nothing, narrow_range=nothing) local desc tf.with_op_name(name, "FakeQuantWithMinMaxVarsGradient") do desc = tf.NodeDescription("FakeQuantWithMinMaxVarsGradient") @@ -5859,7 +5859,7 @@ begin desc["narrow_range"] = Base.Bool(narrow_range) end res = tf.execute(desc) - node = tf.TapeNode(fake_quant_with_min_max_vars_gradient, [gradients_, inputs_, min_, max_], name=nothing, num_bits=nothing, narrow_range=nothing) + node = tf.TapeNode(fake_quant_with_min_max_vars_gradient, [gradients_, inputs_, min_, max_], name=nothing, num_bits=nothing, narrow_range=nothing, res) tf.add_node(res[1], node) return res end @@ -5879,7 +5879,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fused_resize_and_pad_conv2d_graph(input_, size_, paddings_, filter_; name=nothing, resize_align_corners=nothing, mode=nothing, strides=nothing, padding=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fused_resize_and_pad_conv2d_graph(input_, size_, paddings_, filter_; name=nothing, resize_align_corners=nothing, mode=nothing, strides=nothing, padding=nothing) local desc tf.with_op_name(name, "FusedResizeAndPadConv2D") do desc = tf.NodeDescription("FusedResizeAndPadConv2D") @@ -5932,7 +5932,7 @@ begin desc["T"] = tf.data_type(input_) desc["T"] = tf.data_type(filter_) res = tf.execute(desc) - node = tf.TapeNode(fused_resize_and_pad_conv2d, [input_, size_, paddings_, filter_], name=nothing, resize_align_corners=nothing, mode=nothing, strides=nothing, padding=nothing) + node = tf.TapeNode(fused_resize_and_pad_conv2d, [input_, size_, paddings_, filter_], name=nothing, resize_align_corners=nothing, mode=nothing, strides=nothing, padding=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -5952,7 +5952,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_graph(in_tensors_; name=nothing, num_batch_threads=nothing, max_batch_size=nothing, max_enqueued_batches=nothing, batch_timeout_micros=nothing, allowed_batch_sizes=nothing, grad_timeout_micros=nothing, container=nothing, shared_name=nothing, batching_queue=nothing, T=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_graph(in_tensors_; name=nothing, num_batch_threads=nothing, max_batch_size=nothing, max_enqueued_batches=nothing, batch_timeout_micros=nothing, allowed_batch_sizes=nothing, grad_timeout_micros=nothing, container=nothing, shared_name=nothing, batching_queue=nothing, T=nothing) local desc tf.with_op_name(name, "Batch") do desc = tf.NodeDescription("Batch") @@ -6031,7 +6031,7 @@ begin desc["T"] = map(Base.identity, T) end res = tf.execute(desc) - node = tf.TapeNode(batch, [in_tensors_], name=nothing, num_batch_threads=nothing, max_batch_size=nothing, max_enqueued_batches=nothing, batch_timeout_micros=nothing, allowed_batch_sizes=nothing, grad_timeout_micros=nothing, container=nothing, shared_name=nothing, batching_queue=nothing, T=nothing) + node = tf.TapeNode(batch, [in_tensors_], name=nothing, num_batch_threads=nothing, max_batch_size=nothing, max_enqueued_batches=nothing, batch_timeout_micros=nothing, allowed_batch_sizes=nothing, grad_timeout_micros=nothing, container=nothing, shared_name=nothing, batching_queue=nothing, T=nothing, res) tf.add_node(res[1], node) return res end @@ -6051,7 +6051,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function collective_bcast_recv_graph(; name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, shape=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function collective_bcast_recv_graph(; name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, shape=nothing) local desc tf.with_op_name(name, "CollectiveBcastRecv") do desc = tf.NodeDescription("CollectiveBcastRecv") @@ -6085,7 +6085,7 @@ begin desc["shape"] = Base.identity(shape) end res = tf.execute(desc) - node = tf.TapeNode(collective_bcast_recv, [], name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, shape=nothing) + node = tf.TapeNode(collective_bcast_recv, [], name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, shape=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -6105,7 +6105,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_to_space_nd_graph(input_, block_shape_, crops_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_to_space_nd_graph(input_, block_shape_, crops_; name=nothing) local desc tf.with_op_name(name, "BatchToSpaceND") do desc = tf.NodeDescription("BatchToSpaceND") @@ -6133,7 +6133,7 @@ begin desc["Tblock_shape"] = tf.data_type(block_shape_) desc["Tcrops"] = tf.data_type(crops_) res = tf.execute(desc) - node = tf.TapeNode(batch_to_space_nd, [input_, block_shape_, crops_], name=nothing) + node = tf.TapeNode(batch_to_space_nd, [input_, block_shape_, crops_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -6153,7 +6153,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function loop_cond_graph(input_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function loop_cond_graph(input_; name=nothing) local desc tf.with_op_name(name, "LoopCond") do desc = tf.NodeDescription("LoopCond") @@ -6167,7 +6167,7 @@ begin input_ = convert(tf.TensorHandle, input_) tf.add_input(desc, input_) res = tf.execute(desc) - node = tf.TapeNode(loop_cond, [input_], name=nothing) + node = tf.TapeNode(loop_cond, [input_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -6187,7 +6187,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function depth_to_space_graph(input_; name=nothing, block_size=nothing, data_format=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function depth_to_space_graph(input_; name=nothing, block_size=nothing, data_format=nothing) local desc tf.with_op_name(name, "DepthToSpace") do desc = tf.NodeDescription("DepthToSpace") @@ -6215,7 +6215,7 @@ begin end desc["T"] = tf.data_type(input_) res = tf.execute(desc) - node = tf.TapeNode(depth_to_space, [input_], name=nothing, block_size=nothing, data_format=nothing) + node = tf.TapeNode(depth_to_space, [input_], name=nothing, block_size=nothing, data_format=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -6235,7 +6235,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function destroy_temporary_variable_graph(ref_; name=nothing, var_name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function destroy_temporary_variable_graph(ref_; name=nothing, var_name=nothing) local desc tf.with_op_name(name, "DestroyTemporaryVariable") do desc = tf.NodeDescription("DestroyTemporaryVariable") @@ -6257,7 +6257,7 @@ begin end desc["T"] = tf.data_type(ref_) res = tf.execute(desc) - node = tf.TapeNode(destroy_temporary_variable, [ref_], name=nothing, var_name=nothing) + node = tf.TapeNode(destroy_temporary_variable, [ref_], name=nothing, var_name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -6277,7 +6277,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function cudnn_rnn_graph(input_, input_h_, input_c_, params_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing, is_training=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function cudnn_rnn_graph(input_, input_h_, input_c_, params_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing, is_training=nothing) local desc tf.with_op_name(name, "CudnnRNN") do desc = tf.NodeDescription("CudnnRNN") @@ -6355,7 +6355,7 @@ begin desc["T"] = tf.data_type(input_c_) desc["T"] = tf.data_type(params_) res = tf.execute(desc) - node = tf.TapeNode(cudnn_rnn, [input_, input_h_, input_c_, params_], name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing, is_training=nothing) + node = tf.TapeNode(cudnn_rnn, [input_, input_h_, input_c_, params_], name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing, is_training=nothing, res) tf.add_node(res[1], node) return res end @@ -6375,7 +6375,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ref_identity_graph(input_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ref_identity_graph(input_; name=nothing) local desc tf.with_op_name(name, "RefIdentity") do desc = tf.NodeDescription("RefIdentity") @@ -6391,7 +6391,7 @@ begin tf.add_input(desc, input_) desc["T"] = tf.data_type(input_) res = tf.execute(desc) - node = tf.TapeNode(ref_identity, [input_], name=nothing) + node = tf.TapeNode(ref_identity, [input_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -6411,7 +6411,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function max_pool3d_grad_graph(orig_input_, orig_output_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function max_pool3d_grad_graph(orig_input_, orig_output_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) local desc tf.with_op_name(name, "MaxPool3DGrad") do desc = tf.NodeDescription("MaxPool3DGrad") @@ -6462,7 +6462,7 @@ begin desc["TInput"] = tf.data_type(orig_output_) desc["T"] = tf.data_type(grad_) res = tf.execute(desc) - node = tf.TapeNode(max_pool3d_grad, [orig_input_, orig_output_, grad_], name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + node = tf.TapeNode(max_pool3d_grad, [orig_input_, orig_output_, grad_], name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -6482,7 +6482,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function load_tpu_embedding_momentum_parameters_grad_accum_debug_graph(parameters_, momenta_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function load_tpu_embedding_momentum_parameters_grad_accum_debug_graph(parameters_, momenta_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "LoadTPUEmbeddingMomentumParametersGradAccumDebug") do desc = tf.NodeDescription("LoadTPUEmbeddingMomentumParametersGradAccumDebug") @@ -6528,7 +6528,7 @@ begin desc["shard_id"] = Base.Int(shard_id) end res = tf.execute(desc) - node = tf.TapeNode(load_tpu_embedding_momentum_parameters_grad_accum_debug, [parameters_, momenta_, gradient_accumulators_], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + node = tf.TapeNode(load_tpu_embedding_momentum_parameters_grad_accum_debug, [parameters_, momenta_, gradient_accumulators_], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -6548,7 +6548,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function padding_fifo_queue_v2_graph(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function padding_fifo_queue_v2_graph(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "PaddingFIFOQueueV2") do desc = tf.NodeDescription("PaddingFIFOQueueV2") @@ -6588,7 +6588,7 @@ begin desc["shared_name"] = Base.String(shared_name) end res = tf.execute(desc) - node = tf.TapeNode(padding_fifo_queue_v2, [], name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) + node = tf.TapeNode(padding_fifo_queue_v2, [], name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -6608,7 +6608,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function conv3d_backprop_input_graph(input_, filter_, out_backprop_; name=nothing, strides=nothing, padding=nothing, dilations=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function conv3d_backprop_input_graph(input_, filter_, out_backprop_; name=nothing, strides=nothing, padding=nothing, dilations=nothing) local desc tf.with_op_name(name, "Conv3DBackpropInput") do desc = tf.NodeDescription("Conv3DBackpropInput") @@ -6652,7 +6652,7 @@ begin desc["T"] = tf.data_type(filter_) desc["T"] = tf.data_type(out_backprop_) res = tf.execute(desc) - node = tf.TapeNode(conv3d_backprop_input, [input_, filter_, out_backprop_], name=nothing, strides=nothing, padding=nothing, dilations=nothing) + node = tf.TapeNode(conv3d_backprop_input, [input_, filter_, out_backprop_], name=nothing, strides=nothing, padding=nothing, dilations=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -6672,7 +6672,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ref_exit_graph(data_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ref_exit_graph(data_; name=nothing) local desc tf.with_op_name(name, "RefExit") do desc = tf.NodeDescription("RefExit") @@ -6688,7 +6688,7 @@ begin tf.add_input(desc, data_) desc["T"] = tf.data_type(data_) res = tf.execute(desc) - node = tf.TapeNode(ref_exit, [data_], name=nothing) + node = tf.TapeNode(ref_exit, [data_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -6708,7 +6708,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function map_clear_graph(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function map_clear_graph(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "MapClear") do desc = tf.NodeDescription("MapClear") @@ -6748,7 +6748,7 @@ begin desc["shared_name"] = Base.String(shared_name) end res = tf.execute(desc) - node = tf.TapeNode(map_clear, [], name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + node = tf.TapeNode(map_clear, [], name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -6768,7 +6768,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function encode_wav_graph(audio_, sample_rate_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function encode_wav_graph(audio_, sample_rate_; name=nothing) local desc tf.with_op_name(name, "EncodeWav") do desc = tf.NodeDescription("EncodeWav") @@ -6786,7 +6786,7 @@ begin tf.add_input(desc, audio_) tf.add_input(desc, sample_rate_) res = tf.execute(desc) - node = tf.TapeNode(encode_wav, [audio_, sample_rate_], name=nothing) + node = tf.TapeNode(encode_wav, [audio_, sample_rate_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -6806,7 +6806,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_summary_v2_graph(tag_, tensor_, serialized_summary_metadata_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_summary_v2_graph(tag_, tensor_, serialized_summary_metadata_; name=nothing) local desc tf.with_op_name(name, "TensorSummaryV2") do desc = tf.NodeDescription("TensorSummaryV2") @@ -6830,7 +6830,7 @@ begin tf.add_input(desc, serialized_summary_metadata_) desc["T"] = tf.data_type(tensor_) res = tf.execute(desc) - node = tf.TapeNode(tensor_summary_v2, [tag_, tensor_, serialized_summary_metadata_], name=nothing) + node = tf.TapeNode(tensor_summary_v2, [tag_, tensor_, serialized_summary_metadata_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -6850,7 +6850,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function queue_dequeue_up_to_graph(handle_, n_; name=nothing, component_types=nothing, timeout_ms=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function queue_dequeue_up_to_graph(handle_, n_; name=nothing, component_types=nothing, timeout_ms=nothing) local desc tf.with_op_name(name, "QueueDequeueUpTo") do desc = tf.NodeDescription("QueueDequeueUpTo") @@ -6880,7 +6880,7 @@ begin desc["timeout_ms"] = Base.Int(timeout_ms) end res = tf.execute(desc) - node = tf.TapeNode(queue_dequeue_up_to, [handle_, n_], name=nothing, component_types=nothing, timeout_ms=nothing) + node = tf.TapeNode(queue_dequeue_up_to, [handle_, n_], name=nothing, component_types=nothing, timeout_ms=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -6900,7 +6900,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function matrix_band_part_graph(input_, num_lower_, num_upper_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function matrix_band_part_graph(input_, num_lower_, num_upper_; name=nothing) local desc tf.with_op_name(name, "MatrixBandPart") do desc = tf.NodeDescription("MatrixBandPart") @@ -6927,7 +6927,7 @@ begin desc["Tindex"] = tf.data_type(num_lower_) desc["Tindex"] = tf.data_type(num_upper_) res = tf.execute(desc) - node = tf.TapeNode(matrix_band_part, [input_, num_lower_, num_upper_], name=nothing) + node = tf.TapeNode(matrix_band_part, [input_, num_lower_, num_upper_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -6947,7 +6947,7 @@ end Copy Op. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function copy_graph(input_; name=nothing, tensor_name=nothing, debug_ops_spec=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function copy_graph(input_; name=nothing, tensor_name=nothing, debug_ops_spec=nothing) local desc tf.with_op_name(name, "Copy") do desc = tf.NodeDescription("Copy") @@ -6975,7 +6975,7 @@ begin end desc["T"] = tf.data_type(input_) res = tf.execute(desc) - node = tf.TapeNode(copy, [input_], name=nothing, tensor_name=nothing, debug_ops_spec=nothing) + node = tf.TapeNode(copy, [input_], name=nothing, tensor_name=nothing, debug_ops_spec=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -6995,7 +6995,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function shape_n_graph(input_; name=nothing, N=nothing, out_type=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function shape_n_graph(input_; name=nothing, N=nothing, out_type=nothing) local desc tf.with_op_name(name, "ShapeN") do desc = tf.NodeDescription("ShapeN") @@ -7028,7 +7028,7 @@ begin end desc["T"] = tf.data_type(input_) res = tf.execute(desc) - node = tf.TapeNode(shape_n, [input_], name=nothing, N=nothing, out_type=nothing) + node = tf.TapeNode(shape_n, [input_], name=nothing, N=nothing, out_type=nothing, res) tf.add_node(res[1], node) return res end @@ -7048,7 +7048,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_parse_example_dataset_graph(input_dataset_, num_parallel_calls_, dense_defaults_; name=nothing, sparse_keys=nothing, dense_keys=nothing, sparse_types=nothing, Tdense=nothing, dense_shapes=nothing, output_types=nothing, output_shapes=nothing, sloppy=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_parse_example_dataset_graph(input_dataset_, num_parallel_calls_, dense_defaults_; name=nothing, sparse_keys=nothing, dense_keys=nothing, sparse_types=nothing, Tdense=nothing, dense_shapes=nothing, output_types=nothing, output_shapes=nothing, sloppy=nothing) local desc tf.with_op_name(name, "ExperimentalParseExampleDataset") do desc = tf.NodeDescription("ExperimentalParseExampleDataset") @@ -7118,7 +7118,7 @@ begin desc["sloppy"] = Base.Bool(sloppy) end res = tf.execute(desc) - node = tf.TapeNode(experimental_parse_example_dataset, [input_dataset_, num_parallel_calls_, dense_defaults_], name=nothing, sparse_keys=nothing, dense_keys=nothing, sparse_types=nothing, Tdense=nothing, dense_shapes=nothing, output_types=nothing, output_shapes=nothing, sloppy=nothing) + node = tf.TapeNode(experimental_parse_example_dataset, [input_dataset_, num_parallel_calls_, dense_defaults_], name=nothing, sparse_keys=nothing, dense_keys=nothing, sparse_types=nothing, Tdense=nothing, dense_shapes=nothing, output_types=nothing, output_shapes=nothing, sloppy=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -7138,7 +7138,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function concat_graph(concat_dim_, values_; name=nothing, N=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function concat_graph(concat_dim_, values_; name=nothing, N=nothing) local desc tf.with_op_name(name, "Concat") do desc = tf.NodeDescription("Concat") @@ -7164,7 +7164,7 @@ begin end desc["T"] = tf.data_type(values_) res = tf.execute(desc) - node = tf.TapeNode(concat, [concat_dim_, values_], name=nothing, N=nothing) + node = tf.TapeNode(concat, [concat_dim_, values_], name=nothing, N=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -7184,7 +7184,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function data_format_dim_map_graph(x_; name=nothing, src_format=nothing, dst_format=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function data_format_dim_map_graph(x_; name=nothing, src_format=nothing, dst_format=nothing) local desc tf.with_op_name(name, "DataFormatDimMap") do desc = tf.NodeDescription("DataFormatDimMap") @@ -7212,7 +7212,7 @@ begin end desc["T"] = tf.data_type(x_) res = tf.execute(desc) - node = tf.TapeNode(data_format_dim_map, [x_], name=nothing, src_format=nothing, dst_format=nothing) + node = tf.TapeNode(data_format_dim_map, [x_], name=nothing, src_format=nothing, dst_format=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -7232,7 +7232,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function identity_reader_graph(; name=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function identity_reader_graph(; name=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "IdentityReader") do desc = tf.NodeDescription("IdentityReader") @@ -7254,7 +7254,7 @@ begin desc["shared_name"] = Base.String(shared_name) end res = tf.execute(desc) - node = tf.TapeNode(identity_reader, [], name=nothing, container=nothing, shared_name=nothing) + node = tf.TapeNode(identity_reader, [], name=nothing, container=nothing, shared_name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -7274,7 +7274,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function softplus_graph(features_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function softplus_graph(features_; name=nothing) local desc tf.with_op_name(name, "Softplus") do desc = tf.NodeDescription("Softplus") @@ -7290,7 +7290,7 @@ begin tf.add_input(desc, features_) desc["T"] = tf.data_type(features_) res = tf.execute(desc) - node = tf.TapeNode(softplus, [features_], name=nothing) + node = tf.TapeNode(softplus, [features_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -7310,7 +7310,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_sparse_apply_proximal_adagrad_graph(var_, accum_, lr_, l1_, l2_, grad_, indices_; name=nothing, use_locking=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_sparse_apply_proximal_adagrad_graph(var_, accum_, lr_, l1_, l2_, grad_, indices_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ResourceSparseApplyProximalAdagrad") do desc = tf.NodeDescription("ResourceSparseApplyProximalAdagrad") @@ -7362,7 +7362,7 @@ begin desc["T"] = tf.data_type(grad_) desc["Tindices"] = tf.data_type(indices_) res = tf.execute(desc) - node = tf.TapeNode(resource_sparse_apply_proximal_adagrad, [var_, accum_, lr_, l1_, l2_, grad_, indices_], name=nothing, use_locking=nothing) + node = tf.TapeNode(resource_sparse_apply_proximal_adagrad, [var_, accum_, lr_, l1_, l2_, grad_, indices_], name=nothing, use_locking=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -7382,7 +7382,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function parse_single_sequence_example_graph(serialized_, feature_list_dense_missing_assumed_empty_, context_sparse_keys_, context_dense_keys_, feature_list_sparse_keys_, feature_list_dense_keys_, context_dense_defaults_, debug_name_; name=nothing, Ncontext_sparse=nothing, Ncontext_dense=nothing, Nfeature_list_sparse=nothing, Nfeature_list_dense=nothing, context_sparse_types=nothing, Tcontext_dense=nothing, feature_list_dense_types=nothing, context_dense_shapes=nothing, feature_list_sparse_types=nothing, feature_list_dense_shapes=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function parse_single_sequence_example_graph(serialized_, feature_list_dense_missing_assumed_empty_, context_sparse_keys_, context_dense_keys_, feature_list_sparse_keys_, feature_list_dense_keys_, context_dense_defaults_, debug_name_; name=nothing, Ncontext_sparse=nothing, Ncontext_dense=nothing, Nfeature_list_sparse=nothing, Nfeature_list_dense=nothing, context_sparse_types=nothing, Tcontext_dense=nothing, feature_list_dense_types=nothing, context_dense_shapes=nothing, feature_list_sparse_types=nothing, feature_list_dense_shapes=nothing) local desc tf.with_op_name(name, "ParseSingleSequenceExample") do desc = tf.NodeDescription("ParseSingleSequenceExample") @@ -7489,7 +7489,7 @@ begin desc["feature_list_dense_shapes"] = map(Base.identity, feature_list_dense_shapes) end res = tf.execute(desc) - node = tf.TapeNode(parse_single_sequence_example, [serialized_, feature_list_dense_missing_assumed_empty_, context_sparse_keys_, context_dense_keys_, feature_list_sparse_keys_, feature_list_dense_keys_, context_dense_defaults_, debug_name_], name=nothing, Ncontext_sparse=nothing, Ncontext_dense=nothing, Nfeature_list_sparse=nothing, Nfeature_list_dense=nothing, context_sparse_types=nothing, Tcontext_dense=nothing, feature_list_dense_types=nothing, context_dense_shapes=nothing, feature_list_sparse_types=nothing, feature_list_dense_shapes=nothing) + node = tf.TapeNode(parse_single_sequence_example, [serialized_, feature_list_dense_missing_assumed_empty_, context_sparse_keys_, context_dense_keys_, feature_list_sparse_keys_, feature_list_dense_keys_, context_dense_defaults_, debug_name_], name=nothing, Ncontext_sparse=nothing, Ncontext_dense=nothing, Nfeature_list_sparse=nothing, Nfeature_list_dense=nothing, context_sparse_types=nothing, Tcontext_dense=nothing, feature_list_dense_types=nothing, context_dense_shapes=nothing, feature_list_sparse_types=nothing, feature_list_dense_shapes=nothing, res) tf.add_node(res[1], node) return res end @@ -7509,7 +7509,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function matrix_diag_graph(diagonal_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function matrix_diag_graph(diagonal_; name=nothing) local desc tf.with_op_name(name, "MatrixDiag") do desc = tf.NodeDescription("MatrixDiag") @@ -7525,7 +7525,7 @@ begin tf.add_input(desc, diagonal_) desc["T"] = tf.data_type(diagonal_) res = tf.execute(desc) - node = tf.TapeNode(matrix_diag, [diagonal_], name=nothing) + node = tf.TapeNode(matrix_diag, [diagonal_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -7545,7 +7545,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fact_graph(; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fact_graph(; name=nothing) local desc tf.with_op_name(name, "Fact") do desc @@ -7556,7 +7556,7 @@ begin function fact_eager(; name=nothing) desc = tf.EagerOp("Fact") res = tf.execute(desc) - node = tf.TapeNode(fact, [], name=nothing) + node = tf.TapeNode(fact, [], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -7576,7 +7576,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function shard_dataset_graph(input_dataset_, num_shards_, index_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function shard_dataset_graph(input_dataset_, num_shards_, index_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "ShardDataset") do desc = tf.NodeDescription("ShardDataset") @@ -7610,7 +7610,7 @@ begin desc["output_shapes"] = map(Base.identity, output_shapes) end res = tf.execute(desc) - node = tf.TapeNode(shard_dataset, [input_dataset_, num_shards_, index_], name=nothing, output_types=nothing, output_shapes=nothing) + node = tf.TapeNode(shard_dataset, [input_dataset_, num_shards_, index_], name=nothing, output_types=nothing, output_shapes=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -7630,7 +7630,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function max_pool_grad_grad_graph(orig_input_, orig_output_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function max_pool_grad_grad_graph(orig_input_, orig_output_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) local desc tf.with_op_name(name, "MaxPoolGradGrad") do desc = tf.NodeDescription("MaxPoolGradGrad") @@ -7680,7 +7680,7 @@ begin desc["T"] = tf.data_type(orig_output_) desc["T"] = tf.data_type(grad_) res = tf.execute(desc) - node = tf.TapeNode(max_pool_grad_grad, [orig_input_, orig_output_, grad_], name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + node = tf.TapeNode(max_pool_grad_grad, [orig_input_, orig_output_, grad_], name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -7700,7 +7700,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resize_bilinear_grad_graph(grads_, original_image_; name=nothing, align_corners=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resize_bilinear_grad_graph(grads_, original_image_; name=nothing, align_corners=nothing) local desc tf.with_op_name(name, "ResizeBilinearGrad") do desc = tf.NodeDescription("ResizeBilinearGrad") @@ -7726,7 +7726,7 @@ begin end desc["T"] = tf.data_type(original_image_) res = tf.execute(desc) - node = tf.TapeNode(resize_bilinear_grad, [grads_, original_image_], name=nothing, align_corners=nothing) + node = tf.TapeNode(resize_bilinear_grad, [grads_, original_image_], name=nothing, align_corners=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -7746,7 +7746,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_to_space_graph(input_, crops_; name=nothing, block_size=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_to_space_graph(input_, crops_; name=nothing, block_size=nothing) local desc tf.with_op_name(name, "BatchToSpace") do desc = tf.NodeDescription("BatchToSpace") @@ -7775,7 +7775,7 @@ begin desc["T"] = tf.data_type(input_) desc["Tidx"] = tf.data_type(crops_) res = tf.execute(desc) - node = tf.TapeNode(batch_to_space, [input_, crops_], name=nothing, block_size=nothing) + node = tf.TapeNode(batch_to_space, [input_, crops_], name=nothing, block_size=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -7795,7 +7795,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function optional_from_value_graph(components_; name=nothing, Toutput_types=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function optional_from_value_graph(components_; name=nothing, Toutput_types=nothing) local desc tf.with_op_name(name, "OptionalFromValue") do desc = tf.NodeDescription("OptionalFromValue") @@ -7815,7 +7815,7 @@ begin desc["Toutput_types"] = map(Base.identity, Toutput_types) end res = tf.execute(desc) - node = tf.TapeNode(optional_from_value, [components_], name=nothing, Toutput_types=nothing) + node = tf.TapeNode(optional_from_value, [components_], name=nothing, Toutput_types=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -7835,7 +7835,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function xlogy_graph(x_, y_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function xlogy_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "Xlogy") do desc = tf.NodeDescription("Xlogy") @@ -7856,7 +7856,7 @@ begin desc["T"] = tf.data_type(x_) desc["T"] = tf.data_type(y_) res = tf.execute(desc) - node = tf.TapeNode(xlogy, [x_, y_], name=nothing) + node = tf.TapeNode(xlogy, [x_, y_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -7876,7 +7876,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function cross_graph(a_, b_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function cross_graph(a_, b_; name=nothing) local desc tf.with_op_name(name, "Cross") do desc = tf.NodeDescription("Cross") @@ -7897,7 +7897,7 @@ begin desc["T"] = tf.data_type(a_) desc["T"] = tf.data_type(b_) res = tf.execute(desc) - node = tf.TapeNode(cross, [a_, b_], name=nothing) + node = tf.TapeNode(cross, [a_, b_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -7917,7 +7917,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function bitwise_and_graph(x_, y_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function bitwise_and_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "BitwiseAnd") do desc = tf.NodeDescription("BitwiseAnd") @@ -7938,7 +7938,7 @@ begin desc["T"] = tf.data_type(x_) desc["T"] = tf.data_type(y_) res = tf.execute(desc) - node = tf.TapeNode(bitwise_and, [x_, y_], name=nothing) + node = tf.TapeNode(bitwise_and, [x_, y_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -7958,7 +7958,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function broadcast_to_graph(input_, shape_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function broadcast_to_graph(input_, shape_; name=nothing) local desc tf.with_op_name(name, "BroadcastTo") do desc = tf.NodeDescription("BroadcastTo") @@ -7980,7 +7980,7 @@ begin desc["T"] = tf.data_type(input_) desc["Tidx"] = tf.data_type(shape_) res = tf.execute(desc) - node = tf.TapeNode(broadcast_to, [input_, shape_], name=nothing) + node = tf.TapeNode(broadcast_to, [input_, shape_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -8000,7 +8000,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function elu_grad_graph(gradients_, outputs_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function elu_grad_graph(gradients_, outputs_; name=nothing) local desc tf.with_op_name(name, "EluGrad") do desc = tf.NodeDescription("EluGrad") @@ -8021,7 +8021,7 @@ begin desc["T"] = tf.data_type(gradients_) desc["T"] = tf.data_type(outputs_) res = tf.execute(desc) - node = tf.TapeNode(elu_grad, [gradients_, outputs_], name=nothing) + node = tf.TapeNode(elu_grad, [gradients_, outputs_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -8041,7 +8041,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function cudnn_rnn_backprop_graph(input_, input_h_, input_c_, params_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function cudnn_rnn_backprop_graph(input_, input_h_, input_c_, params_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) local desc tf.with_op_name(name, "CudnnRNNBackprop") do desc = tf.NodeDescription("CudnnRNNBackprop") @@ -8148,7 +8148,7 @@ begin desc["T"] = tf.data_type(output_c_backprop_) desc["T"] = tf.data_type(reserve_space_) res = tf.execute(desc) - node = tf.TapeNode(cudnn_rnn_backprop, [input_, input_h_, input_c_, params_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_], name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) + node = tf.TapeNode(cudnn_rnn_backprop, [input_, input_h_, input_c_, params_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_], name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing, res) tf.add_node(res[1], node) return res end @@ -8168,7 +8168,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function string_to_hash_bucket_fast_graph(input_; name=nothing, num_buckets=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function string_to_hash_bucket_fast_graph(input_; name=nothing, num_buckets=nothing) local desc tf.with_op_name(name, "StringToHashBucketFast") do desc = tf.NodeDescription("StringToHashBucketFast") @@ -8188,7 +8188,7 @@ begin desc["num_buckets"] = Base.Int(num_buckets) end res = tf.execute(desc) - node = tf.TapeNode(string_to_hash_bucket_fast, [input_], name=nothing, num_buckets=nothing) + node = tf.TapeNode(string_to_hash_bucket_fast, [input_], name=nothing, num_buckets=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -8208,7 +8208,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function mutable_hash_table_graph(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function mutable_hash_table_graph(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing) local desc tf.with_op_name(name, "MutableHashTable") do desc = tf.NodeDescription("MutableHashTable") @@ -8248,7 +8248,7 @@ begin desc["value_dtype"] = Base.identity(value_dtype) end res = tf.execute(desc) - node = tf.TapeNode(mutable_hash_table, [], name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing) + node = tf.TapeNode(mutable_hash_table, [], name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -8268,7 +8268,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function relu_graph(features_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function relu_graph(features_; name=nothing) local desc tf.with_op_name(name, "Relu") do desc = tf.NodeDescription("Relu") @@ -8284,7 +8284,7 @@ begin tf.add_input(desc, features_) desc["T"] = tf.data_type(features_) res = tf.execute(desc) - node = tf.TapeNode(relu, [features_], name=nothing) + node = tf.TapeNode(relu, [features_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -8304,7 +8304,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function nth_element_graph(input_, n_; name=nothing, reverse=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function nth_element_graph(input_, n_; name=nothing, reverse=nothing) local desc tf.with_op_name(name, "NthElement") do desc = tf.NodeDescription("NthElement") @@ -8330,7 +8330,7 @@ begin end desc["T"] = tf.data_type(input_) res = tf.execute(desc) - node = tf.TapeNode(nth_element, [input_, n_], name=nothing, reverse=nothing) + node = tf.TapeNode(nth_element, [input_, n_], name=nothing, reverse=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -8350,7 +8350,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function softsign_graph(features_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function softsign_graph(features_; name=nothing) local desc tf.with_op_name(name, "Softsign") do desc = tf.NodeDescription("Softsign") @@ -8366,7 +8366,7 @@ begin tf.add_input(desc, features_) desc["T"] = tf.data_type(features_) res = tf.execute(desc) - node = tf.TapeNode(softsign, [features_], name=nothing) + node = tf.TapeNode(softsign, [features_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -8386,7 +8386,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function mutable_dense_hash_table_graph(empty_key_; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing, initial_num_buckets=nothing, max_load_factor=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function mutable_dense_hash_table_graph(empty_key_; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing, initial_num_buckets=nothing, max_load_factor=nothing) local desc tf.with_op_name(name, "MutableDenseHashTable") do desc = tf.NodeDescription("MutableDenseHashTable") @@ -8450,7 +8450,7 @@ begin end desc["key_dtype"] = tf.data_type(empty_key_) res = tf.execute(desc) - node = tf.TapeNode(mutable_dense_hash_table, [empty_key_], name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing, initial_num_buckets=nothing, max_load_factor=nothing) + node = tf.TapeNode(mutable_dense_hash_table, [empty_key_], name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing, initial_num_buckets=nothing, max_load_factor=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -8470,7 +8470,7 @@ end An op that shuts down a running distributed TPU system. The Op returns """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _shutdown_distributed_tpu_graph(; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _shutdown_distributed_tpu_graph(; name=nothing) local desc tf.with_op_name(name, "_ShutdownDistributedTPU") do desc @@ -8481,7 +8481,7 @@ begin function _shutdown_distributed_tpu_eager(; name=nothing) desc = tf.EagerOp("_ShutdownDistributedTPU") res = tf.execute(desc) - node = tf.TapeNode(_shutdown_distributed_tpu, [], name=nothing) + node = tf.TapeNode(_shutdown_distributed_tpu, [], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -8501,7 +8501,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function polygamma_graph(a_, x_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function polygamma_graph(a_, x_; name=nothing) local desc tf.with_op_name(name, "Polygamma") do desc = tf.NodeDescription("Polygamma") @@ -8522,7 +8522,7 @@ begin desc["T"] = tf.data_type(a_) desc["T"] = tf.data_type(x_) res = tf.execute(desc) - node = tf.TapeNode(polygamma, [a_, x_], name=nothing) + node = tf.TapeNode(polygamma, [a_, x_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -8542,7 +8542,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function nccl_reduce_graph(input_; name=nothing, reduction=nothing, num_devices=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function nccl_reduce_graph(input_; name=nothing, reduction=nothing, num_devices=nothing) local desc tf.with_op_name(name, "NcclReduce") do desc = tf.NodeDescription("NcclReduce") @@ -8570,7 +8570,7 @@ begin end desc["T"] = tf.data_type(input_) res = tf.execute(desc) - node = tf.TapeNode(nccl_reduce, [input_], name=nothing, reduction=nothing, num_devices=nothing) + node = tf.TapeNode(nccl_reduce, [input_], name=nothing, reduction=nothing, num_devices=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -8590,7 +8590,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function arg_max_graph(input_, dimension_; name=nothing, output_type=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function arg_max_graph(input_, dimension_; name=nothing, output_type=nothing) local desc tf.with_op_name(name, "ArgMax") do desc = tf.NodeDescription("ArgMax") @@ -8619,7 +8619,7 @@ begin desc["T"] = tf.data_type(input_) desc["Tidx"] = tf.data_type(dimension_) res = tf.execute(desc) - node = tf.TapeNode(arg_max, [input_, dimension_], name=nothing, output_type=nothing) + node = tf.TapeNode(arg_max, [input_, dimension_], name=nothing, output_type=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -8639,7 +8639,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function matrix_set_diag_graph(input_, diagonal_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function matrix_set_diag_graph(input_, diagonal_; name=nothing) local desc tf.with_op_name(name, "MatrixSetDiag") do desc = tf.NodeDescription("MatrixSetDiag") @@ -8660,7 +8660,7 @@ begin desc["T"] = tf.data_type(input_) desc["T"] = tf.data_type(diagonal_) res = tf.execute(desc) - node = tf.TapeNode(matrix_set_diag, [input_, diagonal_], name=nothing) + node = tf.TapeNode(matrix_set_diag, [input_, diagonal_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -8680,7 +8680,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function space_to_batch_nd_graph(input_, block_shape_, paddings_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function space_to_batch_nd_graph(input_, block_shape_, paddings_; name=nothing) local desc tf.with_op_name(name, "SpaceToBatchND") do desc = tf.NodeDescription("SpaceToBatchND") @@ -8708,7 +8708,7 @@ begin desc["Tblock_shape"] = tf.data_type(block_shape_) desc["Tpaddings"] = tf.data_type(paddings_) res = tf.execute(desc) - node = tf.TapeNode(space_to_batch_nd, [input_, block_shape_, paddings_], name=nothing) + node = tf.TapeNode(space_to_batch_nd, [input_, block_shape_, paddings_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -8728,7 +8728,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_reshape_graph(input_indices_, input_shape_, new_shape_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_reshape_graph(input_indices_, input_shape_, new_shape_; name=nothing) local desc tf.with_op_name(name, "SparseReshape") do desc = tf.NodeDescription("SparseReshape") @@ -8755,7 +8755,7 @@ begin tf.add_input(desc, input_shape_) tf.add_input(desc, new_shape_) res = tf.execute(desc) - node = tf.TapeNode(sparse_reshape, [input_indices_, input_shape_, new_shape_], name=nothing) + node = tf.TapeNode(sparse_reshape, [input_indices_, input_shape_, new_shape_], name=nothing, res) tf.add_node(res[1], node) return res end @@ -8775,7 +8775,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function optimize_dataset_graph(input_dataset_, optimizations_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function optimize_dataset_graph(input_dataset_, optimizations_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "OptimizeDataset") do desc = tf.NodeDescription("OptimizeDataset") @@ -8805,7 +8805,7 @@ begin desc["output_shapes"] = map(Base.identity, output_shapes) end res = tf.execute(desc) - node = tf.TapeNode(optimize_dataset, [input_dataset_, optimizations_], name=nothing, output_types=nothing, output_shapes=nothing) + node = tf.TapeNode(optimize_dataset, [input_dataset_, optimizations_], name=nothing, output_types=nothing, output_shapes=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -8825,7 +8825,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function concat_v2_graph(values_, axis_; name=nothing, N=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function concat_v2_graph(values_, axis_; name=nothing, N=nothing) local desc tf.with_op_name(name, "ConcatV2") do desc = tf.NodeDescription("ConcatV2") @@ -8854,7 +8854,7 @@ begin desc["T"] = tf.data_type(values_) desc["Tidx"] = tf.data_type(axis_) res = tf.execute(desc) - node = tf.TapeNode(concat_v2, [values_, axis_], name=nothing, N=nothing) + node = tf.TapeNode(concat_v2, [values_, axis_], name=nothing, N=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -8874,7 +8874,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_sparse_apply_adadelta_graph(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_sparse_apply_adadelta_graph(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ResourceSparseApplyAdadelta") do desc = tf.NodeDescription("ResourceSparseApplyAdadelta") @@ -8930,7 +8930,7 @@ begin desc["T"] = tf.data_type(grad_) desc["Tindices"] = tf.data_type(indices_) res = tf.execute(desc) - node = tf.TapeNode(resource_sparse_apply_adadelta, [var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_, indices_], name=nothing, use_locking=nothing) + node = tf.TapeNode(resource_sparse_apply_adadelta, [var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_, indices_], name=nothing, use_locking=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -8950,7 +8950,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tile_graph(input_, multiples_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tile_graph(input_, multiples_; name=nothing) local desc tf.with_op_name(name, "Tile") do desc = tf.NodeDescription("Tile") @@ -8972,7 +8972,7 @@ begin desc["T"] = tf.data_type(input_) desc["Tmultiples"] = tf.data_type(multiples_) res = tf.execute(desc) - node = tf.TapeNode(tile, [input_, multiples_], name=nothing) + node = tf.TapeNode(tile, [input_, multiples_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -8992,7 +8992,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function mutex_v2_graph(; name=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function mutex_v2_graph(; name=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "MutexV2") do desc = tf.NodeDescription("MutexV2") @@ -9014,7 +9014,7 @@ begin desc["shared_name"] = Base.String(shared_name) end res = tf.execute(desc) - node = tf.TapeNode(mutex_v2, [], name=nothing, container=nothing, shared_name=nothing) + node = tf.TapeNode(mutex_v2, [], name=nothing, container=nothing, shared_name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -9034,7 +9034,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function serialize_many_sparse_graph(sparse_indices_, sparse_values_, sparse_shape_; name=nothing, out_type=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function serialize_many_sparse_graph(sparse_indices_, sparse_values_, sparse_shape_; name=nothing, out_type=nothing) local desc tf.with_op_name(name, "SerializeManySparse") do desc = tf.NodeDescription("SerializeManySparse") @@ -9064,7 +9064,7 @@ begin end desc["T"] = tf.data_type(sparse_values_) res = tf.execute(desc) - node = tf.TapeNode(serialize_many_sparse, [sparse_indices_, sparse_values_, sparse_shape_], name=nothing, out_type=nothing) + node = tf.TapeNode(serialize_many_sparse, [sparse_indices_, sparse_values_, sparse_shape_], name=nothing, out_type=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -9084,7 +9084,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tpu_embedding_activations_graph(embedding_variable_, sliced_activations_; name=nothing, table_id=nothing, lookup_id=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tpu_embedding_activations_graph(embedding_variable_, sliced_activations_; name=nothing, table_id=nothing, lookup_id=nothing) local desc tf.with_op_name(name, "TPUEmbeddingActivations") do desc = tf.NodeDescription("TPUEmbeddingActivations") @@ -9114,7 +9114,7 @@ begin desc["lookup_id"] = Base.Int(lookup_id) end res = tf.execute(desc) - node = tf.TapeNode(tpu_embedding_activations, [embedding_variable_, sliced_activations_], name=nothing, table_id=nothing, lookup_id=nothing) + node = tf.TapeNode(tpu_embedding_activations, [embedding_variable_, sliced_activations_], name=nothing, table_id=nothing, lookup_id=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -9134,7 +9134,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_matrix_solve_ls_graph(matrix_, rhs_, l2_regularizer_; name=nothing, fast=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_matrix_solve_ls_graph(matrix_, rhs_, l2_regularizer_; name=nothing, fast=nothing) local desc tf.with_op_name(name, "BatchMatrixSolveLs") do desc = tf.NodeDescription("BatchMatrixSolveLs") @@ -9165,7 +9165,7 @@ begin desc["T"] = tf.data_type(matrix_) desc["T"] = tf.data_type(rhs_) res = tf.execute(desc) - node = tf.TapeNode(batch_matrix_solve_ls, [matrix_, rhs_, l2_regularizer_], name=nothing, fast=nothing) + node = tf.TapeNode(batch_matrix_solve_ls, [matrix_, rhs_, l2_regularizer_], name=nothing, fast=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -9185,7 +9185,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function not_equal_graph(x_, y_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function not_equal_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "NotEqual") do desc = tf.NodeDescription("NotEqual") @@ -9206,7 +9206,7 @@ begin desc["T"] = tf.data_type(x_) desc["T"] = tf.data_type(y_) res = tf.execute(desc) - node = tf.TapeNode(not_equal, [x_, y_], name=nothing) + node = tf.TapeNode(not_equal, [x_, y_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -9226,7 +9226,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function lgamma_graph(x_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function lgamma_graph(x_; name=nothing) local desc tf.with_op_name(name, "Lgamma") do desc = tf.NodeDescription("Lgamma") @@ -9242,7 +9242,7 @@ begin tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) res = tf.execute(desc) - node = tf.TapeNode(lgamma, [x_], name=nothing) + node = tf.TapeNode(lgamma, [x_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -9262,7 +9262,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tpu_replicate_metadata_graph(; name=nothing, num_replicas=nothing, num_cores_per_replica=nothing, topology=nothing, use_tpu=nothing, device_assignment=nothing, computation_shape=nothing, host_compute_core=nothing, padding_map=nothing, step_marker_location=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tpu_replicate_metadata_graph(; name=nothing, num_replicas=nothing, num_cores_per_replica=nothing, topology=nothing, use_tpu=nothing, device_assignment=nothing, computation_shape=nothing, host_compute_core=nothing, padding_map=nothing, step_marker_location=nothing) local desc tf.with_op_name(name, "TPUReplicateMetadata") do desc = tf.NodeDescription("TPUReplicateMetadata") @@ -9326,7 +9326,7 @@ begin desc["step_marker_location"] = Base.String(step_marker_location) end res = tf.execute(desc) - node = tf.TapeNode(tpu_replicate_metadata, [], name=nothing, num_replicas=nothing, num_cores_per_replica=nothing, topology=nothing, use_tpu=nothing, device_assignment=nothing, computation_shape=nothing, host_compute_core=nothing, padding_map=nothing, step_marker_location=nothing) + node = tf.TapeNode(tpu_replicate_metadata, [], name=nothing, num_replicas=nothing, num_cores_per_replica=nothing, topology=nothing, use_tpu=nothing, device_assignment=nothing, computation_shape=nothing, host_compute_core=nothing, padding_map=nothing, step_marker_location=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -9346,7 +9346,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_thread_pool_handle_graph(; name=nothing, num_threads=nothing, max_intra_op_parallelism=nothing, display_name=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_thread_pool_handle_graph(; name=nothing, num_threads=nothing, max_intra_op_parallelism=nothing, display_name=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "ExperimentalThreadPoolHandle") do desc = tf.NodeDescription("ExperimentalThreadPoolHandle") @@ -9386,7 +9386,7 @@ begin desc["shared_name"] = Base.String(shared_name) end res = tf.execute(desc) - node = tf.TapeNode(experimental_thread_pool_handle, [], name=nothing, num_threads=nothing, max_intra_op_parallelism=nothing, display_name=nothing, container=nothing, shared_name=nothing) + node = tf.TapeNode(experimental_thread_pool_handle, [], name=nothing, num_threads=nothing, max_intra_op_parallelism=nothing, display_name=nothing, container=nothing, shared_name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -9406,7 +9406,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function self_adjoint_eig_graph(input_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function self_adjoint_eig_graph(input_; name=nothing) local desc tf.with_op_name(name, "SelfAdjointEig") do desc = tf.NodeDescription("SelfAdjointEig") @@ -9422,7 +9422,7 @@ begin tf.add_input(desc, input_) desc["T"] = tf.data_type(input_) res = tf.execute(desc) - node = tf.TapeNode(self_adjoint_eig, [input_], name=nothing) + node = tf.TapeNode(self_adjoint_eig, [input_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -9442,7 +9442,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function boosted_trees_quantile_stream_resource_get_bucket_boundaries_graph(quantile_stream_resource_handle_; name=nothing, num_features=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function boosted_trees_quantile_stream_resource_get_bucket_boundaries_graph(quantile_stream_resource_handle_; name=nothing, num_features=nothing) local desc tf.with_op_name(name, "BoostedTreesQuantileStreamResourceGetBucketBoundaries") do desc = tf.NodeDescription("BoostedTreesQuantileStreamResourceGetBucketBoundaries") @@ -9467,7 +9467,7 @@ begin desc["num_features"] = Base.Int(num_features) end res = tf.execute(desc) - node = tf.TapeNode(boosted_trees_quantile_stream_resource_get_bucket_boundaries, [quantile_stream_resource_handle_], name=nothing, num_features=nothing) + node = tf.TapeNode(boosted_trees_quantile_stream_resource_get_bucket_boundaries, [quantile_stream_resource_handle_], name=nothing, num_features=nothing, res) tf.add_node(res[1], node) return res end @@ -9487,7 +9487,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_dense_cwise_div_graph(sp_indices_, sp_values_, sp_shape_, dense_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_dense_cwise_div_graph(sp_indices_, sp_values_, sp_shape_, dense_; name=nothing) local desc tf.with_op_name(name, "SparseDenseCwiseDiv") do desc = tf.NodeDescription("SparseDenseCwiseDiv") @@ -9516,7 +9516,7 @@ begin desc["T"] = tf.data_type(sp_values_) desc["T"] = tf.data_type(dense_) res = tf.execute(desc) - node = tf.TapeNode(sparse_dense_cwise_div, [sp_indices_, sp_values_, sp_shape_, dense_], name=nothing) + node = tf.TapeNode(sparse_dense_cwise_div, [sp_indices_, sp_values_, sp_shape_, dense_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -9536,7 +9536,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function acos_graph(x_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function acos_graph(x_; name=nothing) local desc tf.with_op_name(name, "Acos") do desc = tf.NodeDescription("Acos") @@ -9552,7 +9552,7 @@ begin tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) res = tf.execute(desc) - node = tf.TapeNode(acos, [x_], name=nothing) + node = tf.TapeNode(acos, [x_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -9572,7 +9572,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function all_graph(input_, reduction_indices_; name=nothing, keep_dims=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function all_graph(input_, reduction_indices_; name=nothing, keep_dims=nothing) local desc tf.with_op_name(name, "All") do desc = tf.NodeDescription("All") @@ -9599,7 +9599,7 @@ begin end desc["Tidx"] = tf.data_type(reduction_indices_) res = tf.execute(desc) - node = tf.TapeNode(all, [input_, reduction_indices_], name=nothing, keep_dims=nothing) + node = tf.TapeNode(all, [input_, reduction_indices_], name=nothing, keep_dims=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -9619,7 +9619,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function compare_and_bitpack_graph(input_, threshold_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function compare_and_bitpack_graph(input_, threshold_; name=nothing) local desc tf.with_op_name(name, "CompareAndBitpack") do desc = tf.NodeDescription("CompareAndBitpack") @@ -9640,7 +9640,7 @@ begin desc["T"] = tf.data_type(input_) desc["T"] = tf.data_type(threshold_) res = tf.execute(desc) - node = tf.TapeNode(compare_and_bitpack, [input_, threshold_], name=nothing) + node = tf.TapeNode(compare_and_bitpack, [input_, threshold_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -9660,7 +9660,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function var_handle_op_graph(; name=nothing, container=nothing, shared_name=nothing, dtype=nothing, shape=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function var_handle_op_graph(; name=nothing, container=nothing, shared_name=nothing, dtype=nothing, shape=nothing) local desc tf.with_op_name(name, "VarHandleOp") do desc = tf.NodeDescription("VarHandleOp") @@ -9694,7 +9694,7 @@ begin desc["shape"] = Base.identity(shape) end res = tf.execute(desc) - node = tf.TapeNode(var_handle_op, [], name=nothing, container=nothing, shared_name=nothing, dtype=nothing, shape=nothing) + node = tf.TapeNode(var_handle_op, [], name=nothing, container=nothing, shared_name=nothing, dtype=nothing, shape=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -9714,7 +9714,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_unique_dataset_graph(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_unique_dataset_graph(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "ExperimentalUniqueDataset") do desc = tf.NodeDescription("ExperimentalUniqueDataset") @@ -9740,7 +9740,7 @@ begin desc["output_shapes"] = map(Base.identity, output_shapes) end res = tf.execute(desc) - node = tf.TapeNode(experimental_unique_dataset, [input_dataset_], name=nothing, output_types=nothing, output_shapes=nothing) + node = tf.TapeNode(experimental_unique_dataset, [input_dataset_], name=nothing, output_types=nothing, output_shapes=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -9760,7 +9760,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantized_conv2d_with_bias_sum_and_relu_graph(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_, summand_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantized_conv2d_with_bias_sum_and_relu_graph(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_, summand_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) local desc tf.with_op_name(name, "QuantizedConv2DWithBiasSumAndRelu") do desc = tf.NodeDescription("QuantizedConv2DWithBiasSumAndRelu") @@ -9835,7 +9835,7 @@ begin desc["Tinput"] = tf.data_type(input_) desc["Tfilter"] = tf.data_type(filter_) res = tf.execute(desc) - node = tf.TapeNode(quantized_conv2d_with_bias_sum_and_relu, [input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_, summand_], name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) + node = tf.TapeNode(quantized_conv2d_with_bias_sum_and_relu, [input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_, summand_], name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing, res) tf.add_node(res[1], node) return res end @@ -9855,7 +9855,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function list_diff_graph(x_, y_; name=nothing, out_idx=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function list_diff_graph(x_, y_; name=nothing, out_idx=nothing) local desc tf.with_op_name(name, "ListDiff") do desc = tf.NodeDescription("ListDiff") @@ -9887,7 +9887,7 @@ begin desc["T"] = tf.data_type(x_) desc["T"] = tf.data_type(y_) res = tf.execute(desc) - node = tf.TapeNode(list_diff, [x_, y_], name=nothing, out_idx=nothing) + node = tf.TapeNode(list_diff, [x_, y_], name=nothing, out_idx=nothing, res) tf.add_node(res[1], node) return res end @@ -9907,7 +9907,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function create_summary_file_writer_graph(writer_, logdir_, max_queue_, flush_millis_, filename_suffix_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function create_summary_file_writer_graph(writer_, logdir_, max_queue_, flush_millis_, filename_suffix_; name=nothing) local desc tf.with_op_name(name, "CreateSummaryFileWriter") do desc = tf.NodeDescription("CreateSummaryFileWriter") @@ -9937,7 +9937,7 @@ begin tf.add_input(desc, flush_millis_) tf.add_input(desc, filename_suffix_) res = tf.execute(desc) - node = tf.TapeNode(create_summary_file_writer, [writer_, logdir_, max_queue_, flush_millis_, filename_suffix_], name=nothing) + node = tf.TapeNode(create_summary_file_writer, [writer_, logdir_, max_queue_, flush_millis_, filename_suffix_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -9957,7 +9957,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function generate_vocab_remapping_graph(new_vocab_file_, old_vocab_file_; name=nothing, new_vocab_offset=nothing, num_new_vocab=nothing, old_vocab_size=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function generate_vocab_remapping_graph(new_vocab_file_, old_vocab_file_; name=nothing, new_vocab_offset=nothing, num_new_vocab=nothing, old_vocab_size=nothing) local desc tf.with_op_name(name, "GenerateVocabRemapping") do desc = tf.NodeDescription("GenerateVocabRemapping") @@ -9998,7 +9998,7 @@ begin desc["old_vocab_size"] = Base.Int(old_vocab_size) end res = tf.execute(desc) - node = tf.TapeNode(generate_vocab_remapping, [new_vocab_file_, old_vocab_file_], name=nothing, new_vocab_offset=nothing, num_new_vocab=nothing, old_vocab_size=nothing) + node = tf.TapeNode(generate_vocab_remapping, [new_vocab_file_, old_vocab_file_], name=nothing, new_vocab_offset=nothing, num_new_vocab=nothing, old_vocab_size=nothing, res) tf.add_node(res[1], node) return res end @@ -10018,7 +10018,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_matrix_inverse_graph(input_; name=nothing, adjoint=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_matrix_inverse_graph(input_; name=nothing, adjoint=nothing) local desc tf.with_op_name(name, "BatchMatrixInverse") do desc = tf.NodeDescription("BatchMatrixInverse") @@ -10040,7 +10040,7 @@ begin end desc["T"] = tf.data_type(input_) res = tf.execute(desc) - node = tf.TapeNode(batch_matrix_inverse, [input_], name=nothing, adjoint=nothing) + node = tf.TapeNode(batch_matrix_inverse, [input_], name=nothing, adjoint=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -10060,7 +10060,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function control_trigger_graph(; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function control_trigger_graph(; name=nothing) local desc tf.with_op_name(name, "ControlTrigger") do desc @@ -10071,7 +10071,7 @@ begin function control_trigger_eager(; name=nothing) desc = tf.EagerOp("ControlTrigger") res = tf.execute(desc) - node = tf.TapeNode(control_trigger, [], name=nothing) + node = tf.TapeNode(control_trigger, [], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -10091,7 +10091,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tpu_ordinal_selector_graph(; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tpu_ordinal_selector_graph(; name=nothing) local desc tf.with_op_name(name, "TPUOrdinalSelector") do desc @@ -10102,7 +10102,7 @@ begin function tpu_ordinal_selector_eager(; name=nothing) desc = tf.EagerOp("TPUOrdinalSelector") res = tf.execute(desc) - node = tf.TapeNode(tpu_ordinal_selector, [], name=nothing) + node = tf.TapeNode(tpu_ordinal_selector, [], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -10122,7 +10122,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function stop_gradient_graph(input_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function stop_gradient_graph(input_; name=nothing) local desc tf.with_op_name(name, "StopGradient") do desc = tf.NodeDescription("StopGradient") @@ -10138,7 +10138,7 @@ begin tf.add_input(desc, input_) desc["T"] = tf.data_type(input_) res = tf.execute(desc) - node = tf.TapeNode(stop_gradient, [input_], name=nothing) + node = tf.TapeNode(stop_gradient, [input_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -10158,7 +10158,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function split_graph(split_dim_, value_; name=nothing, num_split=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function split_graph(split_dim_, value_; name=nothing, num_split=nothing) local desc tf.with_op_name(name, "Split") do desc = tf.NodeDescription("Split") @@ -10190,7 +10190,7 @@ begin end desc["T"] = tf.data_type(value_) res = tf.execute(desc) - node = tf.TapeNode(split, [split_dim_, value_], name=nothing, num_split=nothing) + node = tf.TapeNode(split, [split_dim_, value_], name=nothing, num_split=nothing, res) tf.add_node(res[1], node) return res end @@ -10210,7 +10210,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function unpack_graph(value_; name=nothing, num=nothing, axis=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function unpack_graph(value_; name=nothing, num=nothing, axis=nothing) local desc tf.with_op_name(name, "Unpack") do desc = tf.NodeDescription("Unpack") @@ -10249,7 +10249,7 @@ begin end desc["T"] = tf.data_type(value_) res = tf.execute(desc) - node = tf.TapeNode(unpack, [value_], name=nothing, num=nothing, axis=nothing) + node = tf.TapeNode(unpack, [value_], name=nothing, num=nothing, axis=nothing, res) tf.add_node(res[1], node) return res end @@ -10269,7 +10269,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_scatter_max_graph(resource_, indices_, updates_; name=nothing, dtype=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_scatter_max_graph(resource_, indices_, updates_; name=nothing, dtype=nothing) local desc tf.with_op_name(name, "ResourceScatterMax") do desc = tf.NodeDescription("ResourceScatterMax") @@ -10302,7 +10302,7 @@ begin desc["Tindices"] = tf.data_type(indices_) desc["dtype"] = tf.data_type(updates_) res = tf.execute(desc) - node = tf.TapeNode(resource_scatter_max, [resource_, indices_, updates_], name=nothing, dtype=nothing) + node = tf.TapeNode(resource_scatter_max, [resource_, indices_, updates_], name=nothing, dtype=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -10322,7 +10322,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_write_graph(handle_, index_, value_, flow_in_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_write_graph(handle_, index_, value_, flow_in_; name=nothing) local desc tf.with_op_name(name, "TensorArrayWrite") do desc = tf.NodeDescription("TensorArrayWrite") @@ -10350,7 +10350,7 @@ begin tf.add_input(desc, flow_in_) desc["T"] = tf.data_type(value_) res = tf.execute(desc) - node = tf.TapeNode(tensor_array_write, [handle_, index_, value_, flow_in_], name=nothing) + node = tf.TapeNode(tensor_array_write, [handle_, index_, value_, flow_in_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -10370,7 +10370,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fill_graph(dims_, value_; name=nothing, index_type=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fill_graph(dims_, value_; name=nothing, index_type=nothing) local desc tf.with_op_name(name, "Fill") do desc = tf.NodeDescription("Fill") @@ -10398,7 +10398,7 @@ begin desc["index_type"] = tf.data_type(dims_) desc["T"] = tf.data_type(value_) res = tf.execute(desc) - node = tf.TapeNode(fill, [dims_, value_], name=nothing, index_type=nothing) + node = tf.TapeNode(fill, [dims_, value_], name=nothing, index_type=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -10418,7 +10418,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantized_conv2d_with_bias_and_requantize_graph(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantized_conv2d_with_bias_and_requantize_graph(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) local desc tf.with_op_name(name, "QuantizedConv2DWithBiasAndRequantize") do desc = tf.NodeDescription("QuantizedConv2DWithBiasAndRequantize") @@ -10499,7 +10499,7 @@ begin desc["Tfilter"] = tf.data_type(filter_) desc["Tbias"] = tf.data_type(bias_) res = tf.execute(desc) - node = tf.TapeNode(quantized_conv2d_with_bias_and_requantize, [input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_], name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) + node = tf.TapeNode(quantized_conv2d_with_bias_and_requantize, [input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_], name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing, res) tf.add_node(res[1], node) return res end @@ -10519,7 +10519,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function softmax_graph(logits_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function softmax_graph(logits_; name=nothing) local desc tf.with_op_name(name, "Softmax") do desc = tf.NodeDescription("Softmax") @@ -10535,7 +10535,7 @@ begin tf.add_input(desc, logits_) desc["T"] = tf.data_type(logits_) res = tf.execute(desc) - node = tf.TapeNode(softmax, [logits_], name=nothing) + node = tf.TapeNode(softmax, [logits_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -10555,7 +10555,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resize_bicubic_graph(images_, size_; name=nothing, align_corners=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resize_bicubic_graph(images_, size_; name=nothing, align_corners=nothing) local desc tf.with_op_name(name, "ResizeBicubic") do desc = tf.NodeDescription("ResizeBicubic") @@ -10581,7 +10581,7 @@ begin end desc["T"] = tf.data_type(images_) res = tf.execute(desc) - node = tf.TapeNode(resize_bicubic, [images_, size_], name=nothing, align_corners=nothing) + node = tf.TapeNode(resize_bicubic, [images_, size_], name=nothing, align_corners=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -10601,7 +10601,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function infeed_dequeue_tuple_graph(; name=nothing, dtypes=nothing, shapes=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function infeed_dequeue_tuple_graph(; name=nothing, dtypes=nothing, shapes=nothing) local desc tf.with_op_name(name, "InfeedDequeueTuple") do desc = tf.NodeDescription("InfeedDequeueTuple") @@ -10623,7 +10623,7 @@ begin desc["shapes"] = map(Base.identity, shapes) end res = tf.execute(desc) - node = tf.TapeNode(infeed_dequeue_tuple, [], name=nothing, dtypes=nothing, shapes=nothing) + node = tf.TapeNode(infeed_dequeue_tuple, [], name=nothing, dtypes=nothing, shapes=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -10643,7 +10643,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function multi_device_iterator_graph(; name=nothing, devices=nothing, shared_name=nothing, container=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function multi_device_iterator_graph(; name=nothing, devices=nothing, shared_name=nothing, container=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "MultiDeviceIterator") do desc = tf.NodeDescription("MultiDeviceIterator") @@ -10683,7 +10683,7 @@ begin desc["output_shapes"] = map(Base.identity, output_shapes) end res = tf.execute(desc) - node = tf.TapeNode(multi_device_iterator, [], name=nothing, devices=nothing, shared_name=nothing, container=nothing, output_types=nothing, output_shapes=nothing) + node = tf.TapeNode(multi_device_iterator, [], name=nothing, devices=nothing, shared_name=nothing, container=nothing, output_types=nothing, output_shapes=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -10703,7 +10703,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function decode_csv_graph(records_, record_defaults_; name=nothing, OUT_TYPE=nothing, field_delim=nothing, use_quote_delim=nothing, na_value=nothing, select_cols=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function decode_csv_graph(records_, record_defaults_; name=nothing, OUT_TYPE=nothing, field_delim=nothing, use_quote_delim=nothing, na_value=nothing, select_cols=nothing) local desc tf.with_op_name(name, "DecodeCSV") do desc = tf.NodeDescription("DecodeCSV") @@ -10751,7 +10751,7 @@ begin desc["select_cols"] = map(Base.identity, select_cols) end res = tf.execute(desc) - node = tf.TapeNode(decode_csv, [records_, record_defaults_], name=nothing, OUT_TYPE=nothing, field_delim=nothing, use_quote_delim=nothing, na_value=nothing, select_cols=nothing) + node = tf.TapeNode(decode_csv, [records_, record_defaults_], name=nothing, OUT_TYPE=nothing, field_delim=nothing, use_quote_delim=nothing, na_value=nothing, select_cols=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -10771,7 +10771,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function lookup_table_find_graph(table_handle_, keys_, default_value_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function lookup_table_find_graph(table_handle_, keys_, default_value_; name=nothing) local desc tf.with_op_name(name, "LookupTableFind") do desc = tf.NodeDescription("LookupTableFind") @@ -10797,7 +10797,7 @@ begin desc["Tin"] = tf.data_type(keys_) desc["Tout"] = tf.data_type(default_value_) res = tf.execute(desc) - node = tf.TapeNode(lookup_table_find, [table_handle_, keys_, default_value_], name=nothing) + node = tf.TapeNode(lookup_table_find, [table_handle_, keys_, default_value_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -10817,7 +10817,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function shuffle_and_repeat_dataset_graph(input_dataset_, buffer_size_, seed_, seed2_, count_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function shuffle_and_repeat_dataset_graph(input_dataset_, buffer_size_, seed_, seed2_, count_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "ShuffleAndRepeatDataset") do desc = tf.NodeDescription("ShuffleAndRepeatDataset") @@ -10859,7 +10859,7 @@ begin desc["output_shapes"] = map(Base.identity, output_shapes) end res = tf.execute(desc) - node = tf.TapeNode(shuffle_and_repeat_dataset, [input_dataset_, buffer_size_, seed_, seed2_, count_], name=nothing, output_types=nothing, output_shapes=nothing) + node = tf.TapeNode(shuffle_and_repeat_dataset, [input_dataset_, buffer_size_, seed_, seed2_, count_], name=nothing, output_types=nothing, output_shapes=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -10879,7 +10879,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function requantization_range_per_channel_graph(input_, input_min_, input_max_; name=nothing, clip_value_max=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function requantization_range_per_channel_graph(input_, input_min_, input_max_; name=nothing, clip_value_max=nothing) local desc tf.with_op_name(name, "RequantizationRangePerChannel") do desc = tf.NodeDescription("RequantizationRangePerChannel") @@ -10914,7 +10914,7 @@ begin end desc["T"] = tf.data_type(input_) res = tf.execute(desc) - node = tf.TapeNode(requantization_range_per_channel, [input_, input_min_, input_max_], name=nothing, clip_value_max=nothing) + node = tf.TapeNode(requantization_range_per_channel, [input_, input_min_, input_max_], name=nothing, clip_value_max=nothing, res) tf.add_node(res[1], node) return res end @@ -10934,7 +10934,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_unbatch_dataset_graph(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_unbatch_dataset_graph(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "ExperimentalUnbatchDataset") do desc = tf.NodeDescription("ExperimentalUnbatchDataset") @@ -10960,7 +10960,7 @@ begin desc["output_shapes"] = map(Base.identity, output_shapes) end res = tf.execute(desc) - node = tf.TapeNode(experimental_unbatch_dataset, [input_dataset_], name=nothing, output_types=nothing, output_shapes=nothing) + node = tf.TapeNode(experimental_unbatch_dataset, [input_dataset_], name=nothing, output_types=nothing, output_shapes=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -10980,7 +10980,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function avg_pool3d_grad_graph(orig_input_shape_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function avg_pool3d_grad_graph(orig_input_shape_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) local desc tf.with_op_name(name, "AvgPool3DGrad") do desc = tf.NodeDescription("AvgPool3DGrad") @@ -11024,7 +11024,7 @@ begin end desc["T"] = tf.data_type(grad_) res = tf.execute(desc) - node = tf.TapeNode(avg_pool3d_grad, [orig_input_shape_, grad_], name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + node = tf.TapeNode(avg_pool3d_grad, [orig_input_shape_, grad_], name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -11044,7 +11044,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function placeholder_with_default_graph(input_; name=nothing, dtype=nothing, shape=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function placeholder_with_default_graph(input_; name=nothing, dtype=nothing, shape=nothing) local desc tf.with_op_name(name, "PlaceholderWithDefault") do desc = tf.NodeDescription("PlaceholderWithDefault") @@ -11072,7 +11072,7 @@ begin end desc["dtype"] = tf.data_type(input_) res = tf.execute(desc) - node = tf.TapeNode(placeholder_with_default, [input_], name=nothing, dtype=nothing, shape=nothing) + node = tf.TapeNode(placeholder_with_default, [input_], name=nothing, dtype=nothing, shape=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -11092,7 +11092,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function initialize_table_v2_graph(table_handle_, keys_, values_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function initialize_table_v2_graph(table_handle_, keys_, values_; name=nothing) local desc tf.with_op_name(name, "InitializeTableV2") do desc = tf.NodeDescription("InitializeTableV2") @@ -11118,7 +11118,7 @@ begin desc["Tkey"] = tf.data_type(keys_) desc["Tval"] = tf.data_type(values_) res = tf.execute(desc) - node = tf.TapeNode(initialize_table_v2, [table_handle_, keys_, values_], name=nothing) + node = tf.TapeNode(initialize_table_v2, [table_handle_, keys_, values_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -11138,7 +11138,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function set_size_graph(set_indices_, set_values_, set_shape_; name=nothing, validate_indices=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function set_size_graph(set_indices_, set_values_, set_shape_; name=nothing, validate_indices=nothing) local desc tf.with_op_name(name, "SetSize") do desc = tf.NodeDescription("SetSize") @@ -11168,7 +11168,7 @@ begin end desc["T"] = tf.data_type(set_values_) res = tf.execute(desc) - node = tf.TapeNode(set_size, [set_indices_, set_values_, set_shape_], name=nothing, validate_indices=nothing) + node = tf.TapeNode(set_size, [set_indices_, set_values_, set_shape_], name=nothing, validate_indices=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -11188,7 +11188,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function assert_graph(condition_, data_; name=nothing, T=nothing, summarize=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function assert_graph(condition_, data_; name=nothing, T=nothing, summarize=nothing) local desc tf.with_op_name(name, "Assert") do desc = tf.NodeDescription("Assert") @@ -11218,7 +11218,7 @@ begin desc["summarize"] = Base.Int(summarize) end res = tf.execute(desc) - node = tf.TapeNode(assert, [condition_, data_], name=nothing, T=nothing, summarize=nothing) + node = tf.TapeNode(assert, [condition_, data_], name=nothing, T=nothing, summarize=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -11238,7 +11238,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function non_max_suppression_v4_graph(boxes_, scores_, max_output_size_, iou_threshold_, score_threshold_; name=nothing, pad_to_max_output_size=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function non_max_suppression_v4_graph(boxes_, scores_, max_output_size_, iou_threshold_, score_threshold_; name=nothing, pad_to_max_output_size=nothing) local desc tf.with_op_name(name, "NonMaxSuppressionV4") do desc = tf.NodeDescription("NonMaxSuppressionV4") @@ -11282,7 +11282,7 @@ begin desc["T"] = tf.data_type(boxes_) desc["T"] = tf.data_type(scores_) res = tf.execute(desc) - node = tf.TapeNode(non_max_suppression_v4, [boxes_, scores_, max_output_size_, iou_threshold_, score_threshold_], name=nothing, pad_to_max_output_size=nothing) + node = tf.TapeNode(non_max_suppression_v4, [boxes_, scores_, max_output_size_, iou_threshold_, score_threshold_], name=nothing, pad_to_max_output_size=nothing, res) tf.add_node(res[1], node) return res end @@ -11302,7 +11302,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sample_distorted_bounding_box_v2_graph(image_size_, bounding_boxes_, min_object_covered_; name=nothing, seed=nothing, seed2=nothing, aspect_ratio_range=nothing, area_range=nothing, max_attempts=nothing, use_image_if_no_bounding_boxes=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sample_distorted_bounding_box_v2_graph(image_size_, bounding_boxes_, min_object_covered_; name=nothing, seed=nothing, seed2=nothing, aspect_ratio_range=nothing, area_range=nothing, max_attempts=nothing, use_image_if_no_bounding_boxes=nothing) local desc tf.with_op_name(name, "SampleDistortedBoundingBoxV2") do desc = tf.NodeDescription("SampleDistortedBoundingBoxV2") @@ -11367,7 +11367,7 @@ begin end desc["T"] = tf.data_type(image_size_) res = tf.execute(desc) - node = tf.TapeNode(sample_distorted_bounding_box_v2, [image_size_, bounding_boxes_, min_object_covered_], name=nothing, seed=nothing, seed2=nothing, aspect_ratio_range=nothing, area_range=nothing, max_attempts=nothing, use_image_if_no_bounding_boxes=nothing) + node = tf.TapeNode(sample_distorted_bounding_box_v2, [image_size_, bounding_boxes_, min_object_covered_], name=nothing, seed=nothing, seed2=nothing, aspect_ratio_range=nothing, area_range=nothing, max_attempts=nothing, use_image_if_no_bounding_boxes=nothing, res) tf.add_node(res[1], node) return res end @@ -11387,7 +11387,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function initialize_table_from_text_file_graph(table_handle_, filename_; name=nothing, key_index=nothing, value_index=nothing, vocab_size=nothing, delimiter=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function initialize_table_from_text_file_graph(table_handle_, filename_; name=nothing, key_index=nothing, value_index=nothing, vocab_size=nothing, delimiter=nothing) local desc tf.with_op_name(name, "InitializeTableFromTextFile") do desc = tf.NodeDescription("InitializeTableFromTextFile") @@ -11429,7 +11429,7 @@ begin desc["delimiter"] = Base.String(delimiter) end res = tf.execute(desc) - node = tf.TapeNode(initialize_table_from_text_file, [table_handle_, filename_], name=nothing, key_index=nothing, value_index=nothing, vocab_size=nothing, delimiter=nothing) + node = tf.TapeNode(initialize_table_from_text_file, [table_handle_, filename_], name=nothing, key_index=nothing, value_index=nothing, vocab_size=nothing, delimiter=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -11449,7 +11449,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function lookup_table_size_graph(table_handle_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function lookup_table_size_graph(table_handle_; name=nothing) local desc tf.with_op_name(name, "LookupTableSize") do desc = tf.NodeDescription("LookupTableSize") @@ -11463,7 +11463,7 @@ begin table_handle_ = convert(tf.TensorHandle, table_handle_) tf.add_input(desc, table_handle_) res = tf.execute(desc) - node = tf.TapeNode(lookup_table_size, [table_handle_], name=nothing) + node = tf.TapeNode(lookup_table_size, [table_handle_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -11483,7 +11483,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_apply_adagrad_da_graph(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, indices_, lr_, l1_, l2_, global_step_; name=nothing, use_locking=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_apply_adagrad_da_graph(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, indices_, lr_, l1_, l2_, global_step_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "SparseApplyAdagradDA") do desc = tf.NodeDescription("SparseApplyAdagradDA") @@ -11546,7 +11546,7 @@ begin desc["T"] = tf.data_type(l1_) desc["T"] = tf.data_type(l2_) res = tf.execute(desc) - node = tf.TapeNode(sparse_apply_adagrad_da, [var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, indices_, lr_, l1_, l2_, global_step_], name=nothing, use_locking=nothing) + node = tf.TapeNode(sparse_apply_adagrad_da, [var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, indices_, lr_, l1_, l2_, global_step_], name=nothing, use_locking=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -11566,7 +11566,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function broadcast_gradient_args_graph(s0_, s1_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function broadcast_gradient_args_graph(s0_, s1_; name=nothing) local desc tf.with_op_name(name, "BroadcastGradientArgs") do desc = tf.NodeDescription("BroadcastGradientArgs") @@ -11592,7 +11592,7 @@ begin desc["T"] = tf.data_type(s0_) desc["T"] = tf.data_type(s1_) res = tf.execute(desc) - node = tf.TapeNode(broadcast_gradient_args, [s0_, s1_], name=nothing) + node = tf.TapeNode(broadcast_gradient_args, [s0_, s1_], name=nothing, res) tf.add_node(res[1], node) return res end @@ -11612,7 +11612,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function summary_writer_graph(; name=nothing, shared_name=nothing, container=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function summary_writer_graph(; name=nothing, shared_name=nothing, container=nothing) local desc tf.with_op_name(name, "SummaryWriter") do desc = tf.NodeDescription("SummaryWriter") @@ -11634,7 +11634,7 @@ begin desc["container"] = Base.String(container) end res = tf.execute(desc) - node = tf.TapeNode(summary_writer, [], name=nothing, shared_name=nothing, container=nothing) + node = tf.TapeNode(summary_writer, [], name=nothing, shared_name=nothing, container=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -11654,7 +11654,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function recv_tpu_embedding_activations_graph(; name=nothing, num_outputs=nothing, config=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function recv_tpu_embedding_activations_graph(; name=nothing, num_outputs=nothing, config=nothing) local desc tf.with_op_name(name, "RecvTPUEmbeddingActivations") do desc = tf.NodeDescription("RecvTPUEmbeddingActivations") @@ -11681,7 +11681,7 @@ begin desc["config"] = Base.String(config) end res = tf.execute(desc) - node = tf.TapeNode(recv_tpu_embedding_activations, [], name=nothing, num_outputs=nothing, config=nothing) + node = tf.TapeNode(recv_tpu_embedding_activations, [], name=nothing, num_outputs=nothing, config=nothing, res) tf.add_node(res[1], node) return res end @@ -11701,7 +11701,7 @@ end output = input; While (Cond(output)) { output = Body(output) } """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _while_graph(input_; name=nothing, T=nothing, cond=nothing, body=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _while_graph(input_; name=nothing, T=nothing, cond=nothing, body=nothing) local desc tf.with_op_name(name, "_While") do desc = tf.NodeDescription("_While") @@ -11733,7 +11733,7 @@ begin desc["body"] = Base.identity(body) end res = tf.execute(desc) - node = tf.TapeNode(_while, [input_], name=nothing, T=nothing, cond=nothing, body=nothing) + node = tf.TapeNode(_while, [input_], name=nothing, T=nothing, cond=nothing, body=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -11753,7 +11753,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function initialize_table_graph(table_handle_, keys_, values_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function initialize_table_graph(table_handle_, keys_, values_; name=nothing) local desc tf.with_op_name(name, "InitializeTable") do desc = tf.NodeDescription("InitializeTable") @@ -11779,7 +11779,7 @@ begin desc["Tkey"] = tf.data_type(keys_) desc["Tval"] = tf.data_type(values_) res = tf.execute(desc) - node = tf.TapeNode(initialize_table, [table_handle_, keys_, values_], name=nothing) + node = tf.TapeNode(initialize_table, [table_handle_, keys_, values_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -11799,7 +11799,7 @@ end Debug Numeric Summary Op. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function debug_numeric_summary_graph(input_; name=nothing, device_name=nothing, tensor_name=nothing, debug_urls=nothing, lower_bound=nothing, upper_bound=nothing, mute_if_healthy=nothing, gated_grpc=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function debug_numeric_summary_graph(input_; name=nothing, device_name=nothing, tensor_name=nothing, debug_urls=nothing, lower_bound=nothing, upper_bound=nothing, mute_if_healthy=nothing, gated_grpc=nothing) local desc tf.with_op_name(name, "DebugNumericSummary") do desc = tf.NodeDescription("DebugNumericSummary") @@ -11857,7 +11857,7 @@ begin end desc["T"] = tf.data_type(input_) res = tf.execute(desc) - node = tf.TapeNode(debug_numeric_summary, [input_], name=nothing, device_name=nothing, tensor_name=nothing, debug_urls=nothing, lower_bound=nothing, upper_bound=nothing, mute_if_healthy=nothing, gated_grpc=nothing) + node = tf.TapeNode(debug_numeric_summary, [input_], name=nothing, device_name=nothing, tensor_name=nothing, debug_urls=nothing, lower_bound=nothing, upper_bound=nothing, mute_if_healthy=nothing, gated_grpc=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -11877,7 +11877,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function retrieve_tpu_embedding_adagrad_parameters_grad_accum_debug_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function retrieve_tpu_embedding_adagrad_parameters_grad_accum_debug_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "RetrieveTPUEmbeddingAdagradParametersGradAccumDebug") do desc = tf.NodeDescription("RetrieveTPUEmbeddingAdagradParametersGradAccumDebug") @@ -11916,7 +11916,7 @@ begin desc["shard_id"] = Base.Int(shard_id) end res = tf.execute(desc) - node = tf.TapeNode(retrieve_tpu_embedding_adagrad_parameters_grad_accum_debug, [], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + node = tf.TapeNode(retrieve_tpu_embedding_adagrad_parameters_grad_accum_debug, [], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res) tf.add_node(res[1], node) return res end @@ -11936,7 +11936,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tanh_graph(x_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tanh_graph(x_; name=nothing) local desc tf.with_op_name(name, "Tanh") do desc = tf.NodeDescription("Tanh") @@ -11952,7 +11952,7 @@ begin tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) res = tf.execute(desc) - node = tf.TapeNode(tanh, [x_], name=nothing) + node = tf.TapeNode(tanh, [x_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -11972,7 +11972,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function symbolic_gradient_graph(input_; name=nothing, Tin=nothing, Tout=nothing, f=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function symbolic_gradient_graph(input_; name=nothing, Tin=nothing, Tout=nothing, f=nothing) local desc tf.with_op_name(name, "SymbolicGradient") do desc = tf.NodeDescription("SymbolicGradient") @@ -12004,7 +12004,7 @@ begin desc["f"] = Base.identity(f) end res = tf.execute(desc) - node = tf.TapeNode(symbolic_gradient, [input_], name=nothing, Tin=nothing, Tout=nothing, f=nothing) + node = tf.TapeNode(symbolic_gradient, [input_], name=nothing, Tin=nothing, Tout=nothing, f=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -12024,7 +12024,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function boosted_trees_update_ensemble_graph(tree_ensemble_handle_, feature_ids_, node_ids_, gains_, thresholds_, left_node_contribs_, right_node_contribs_, max_depth_, learning_rate_; name=nothing, pruning_mode=nothing, num_features=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function boosted_trees_update_ensemble_graph(tree_ensemble_handle_, feature_ids_, node_ids_, gains_, thresholds_, left_node_contribs_, right_node_contribs_, max_depth_, learning_rate_; name=nothing, pruning_mode=nothing, num_features=nothing) local desc tf.with_op_name(name, "BoostedTreesUpdateEnsemble") do desc = tf.NodeDescription("BoostedTreesUpdateEnsemble") @@ -12082,7 +12082,7 @@ begin desc["num_features"] = Base.Int(num_features) end res = tf.execute(desc) - node = tf.TapeNode(boosted_trees_update_ensemble, [tree_ensemble_handle_, feature_ids_, node_ids_, gains_, thresholds_, left_node_contribs_, right_node_contribs_, max_depth_, learning_rate_], name=nothing, pruning_mode=nothing, num_features=nothing) + node = tf.TapeNode(boosted_trees_update_ensemble, [tree_ensemble_handle_, feature_ids_, node_ids_, gains_, thresholds_, left_node_contribs_, right_node_contribs_, max_depth_, learning_rate_], name=nothing, pruning_mode=nothing, num_features=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -12102,7 +12102,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function apply_momentum_graph(var_, accum_, lr_, grad_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function apply_momentum_graph(var_, accum_, lr_, grad_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) local desc tf.with_op_name(name, "ApplyMomentum") do desc = tf.NodeDescription("ApplyMomentum") @@ -12150,7 +12150,7 @@ begin desc["T"] = tf.data_type(grad_) desc["T"] = tf.data_type(momentum_) res = tf.execute(desc) - node = tf.TapeNode(apply_momentum, [var_, accum_, lr_, grad_, momentum_], name=nothing, use_locking=nothing, use_nesterov=nothing) + node = tf.TapeNode(apply_momentum, [var_, accum_, lr_, grad_, momentum_], name=nothing, use_locking=nothing, use_nesterov=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -12170,7 +12170,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function reader_read_graph(reader_handle_, queue_handle_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function reader_read_graph(reader_handle_, queue_handle_; name=nothing) local desc tf.with_op_name(name, "ReaderRead") do desc = tf.NodeDescription("ReaderRead") @@ -12193,7 +12193,7 @@ begin tf.add_input(desc, reader_handle_) tf.add_input(desc, queue_handle_) res = tf.execute(desc) - node = tf.TapeNode(reader_read, [reader_handle_, queue_handle_], name=nothing) + node = tf.TapeNode(reader_read, [reader_handle_, queue_handle_], name=nothing, res) tf.add_node(res[1], node) return res end @@ -12213,7 +12213,7 @@ end An op that blocks execution until a distributed TPU system has """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _wait_for_distributed_tpu_graph(inputs_; name=nothing, startup_timeout_sec=nothing, N=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _wait_for_distributed_tpu_graph(inputs_; name=nothing, startup_timeout_sec=nothing, N=nothing) local desc tf.with_op_name(name, "_WaitForDistributedTPU") do desc = tf.NodeDescription("_WaitForDistributedTPU") @@ -12239,7 +12239,7 @@ begin desc["N"] = Base.Int(N) end res = tf.execute(desc) - node = tf.TapeNode(_wait_for_distributed_tpu, [inputs_], name=nothing, startup_timeout_sec=nothing, N=nothing) + node = tf.TapeNode(_wait_for_distributed_tpu, [inputs_], name=nothing, startup_timeout_sec=nothing, N=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -12259,7 +12259,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function mutex_lock_graph(mutex_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function mutex_lock_graph(mutex_; name=nothing) local desc tf.with_op_name(name, "MutexLock") do desc = tf.NodeDescription("MutexLock") @@ -12273,7 +12273,7 @@ begin mutex_ = convert(tf.TensorHandle, mutex_) tf.add_input(desc, mutex_) res = tf.execute(desc) - node = tf.TapeNode(mutex_lock, [mutex_], name=nothing) + node = tf.TapeNode(mutex_lock, [mutex_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -12293,7 +12293,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function accumulator_set_global_step_graph(handle_, new_global_step_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function accumulator_set_global_step_graph(handle_, new_global_step_; name=nothing) local desc tf.with_op_name(name, "AccumulatorSetGlobalStep") do desc = tf.NodeDescription("AccumulatorSetGlobalStep") @@ -12311,7 +12311,7 @@ begin tf.add_input(desc, handle_) tf.add_input(desc, new_global_step_) res = tf.execute(desc) - node = tf.TapeNode(accumulator_set_global_step, [handle_, new_global_step_], name=nothing) + node = tf.TapeNode(accumulator_set_global_step, [handle_, new_global_step_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -12331,7 +12331,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantized_add_graph(x_, y_, min_x_, max_x_, min_y_, max_y_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantized_add_graph(x_, y_, min_x_, max_x_, min_y_, max_y_; name=nothing) local desc tf.with_op_name(name, "QuantizedAdd") do desc = tf.NodeDescription("QuantizedAdd") @@ -12374,7 +12374,7 @@ begin desc["T1"] = tf.data_type(x_) desc["T2"] = tf.data_type(y_) res = tf.execute(desc) - node = tf.TapeNode(quantized_add, [x_, y_, min_x_, max_x_, min_y_, max_y_], name=nothing) + node = tf.TapeNode(quantized_add, [x_, y_, min_x_, max_x_, min_y_, max_y_], name=nothing, res) tf.add_node(res[1], node) return res end @@ -12394,7 +12394,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function squeeze_graph(input_; name=nothing, squeeze_dims=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function squeeze_graph(input_; name=nothing, squeeze_dims=nothing) local desc tf.with_op_name(name, "Squeeze") do desc = tf.NodeDescription("Squeeze") @@ -12416,7 +12416,7 @@ begin end desc["T"] = tf.data_type(input_) res = tf.execute(desc) - node = tf.TapeNode(squeeze, [input_], name=nothing, squeeze_dims=nothing) + node = tf.TapeNode(squeeze, [input_], name=nothing, squeeze_dims=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -12436,7 +12436,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_matching_files_dataset_graph(patterns_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_matching_files_dataset_graph(patterns_; name=nothing) local desc tf.with_op_name(name, "ExperimentalMatchingFilesDataset") do desc = tf.NodeDescription("ExperimentalMatchingFilesDataset") @@ -12450,7 +12450,7 @@ begin patterns_ = convert(tf.TensorHandle, patterns_) tf.add_input(desc, patterns_) res = tf.execute(desc) - node = tf.TapeNode(experimental_matching_files_dataset, [patterns_], name=nothing) + node = tf.TapeNode(experimental_matching_files_dataset, [patterns_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -12470,7 +12470,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_dataset_to_tf_record_graph(input_dataset_, filename_, compression_type_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_dataset_to_tf_record_graph(input_dataset_, filename_, compression_type_; name=nothing) local desc tf.with_op_name(name, "ExperimentalDatasetToTFRecord") do desc = tf.NodeDescription("ExperimentalDatasetToTFRecord") @@ -12492,7 +12492,7 @@ begin tf.add_input(desc, filename_) tf.add_input(desc, compression_type_) res = tf.execute(desc) - node = tf.TapeNode(experimental_dataset_to_tf_record, [input_dataset_, filename_, compression_type_], name=nothing) + node = tf.TapeNode(experimental_dataset_to_tf_record, [input_dataset_, filename_, compression_type_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -12512,7 +12512,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function load_tpu_embedding_stochastic_gradient_descent_parameters_graph(parameters_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function load_tpu_embedding_stochastic_gradient_descent_parameters_graph(parameters_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "LoadTPUEmbeddingStochasticGradientDescentParameters") do desc = tf.NodeDescription("LoadTPUEmbeddingStochasticGradientDescentParameters") @@ -12550,7 +12550,7 @@ begin desc["shard_id"] = Base.Int(shard_id) end res = tf.execute(desc) - node = tf.TapeNode(load_tpu_embedding_stochastic_gradient_descent_parameters, [parameters_], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + node = tf.TapeNode(load_tpu_embedding_stochastic_gradient_descent_parameters, [parameters_], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -12570,7 +12570,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function no_op_graph(; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function no_op_graph(; name=nothing) local desc tf.with_op_name(name, "NoOp") do desc @@ -12581,7 +12581,7 @@ begin function no_op_eager(; name=nothing) desc = tf.EagerOp("NoOp") res = tf.execute(desc) - node = tf.TapeNode(no_op, [], name=nothing) + node = tf.TapeNode(no_op, [], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -12601,7 +12601,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function zip_dataset_graph(input_datasets_; name=nothing, output_types=nothing, output_shapes=nothing, N=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function zip_dataset_graph(input_datasets_; name=nothing, output_types=nothing, output_shapes=nothing, N=nothing) local desc tf.with_op_name(name, "ZipDataset") do desc = tf.NodeDescription("ZipDataset") @@ -12633,7 +12633,7 @@ begin desc["N"] = Base.Int(N) end res = tf.execute(desc) - node = tf.TapeNode(zip_dataset, [input_datasets_], name=nothing, output_types=nothing, output_shapes=nothing, N=nothing) + node = tf.TapeNode(zip_dataset, [input_datasets_], name=nothing, output_types=nothing, output_shapes=nothing, N=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -12653,7 +12653,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function identity_reader_v2_graph(; name=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function identity_reader_v2_graph(; name=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "IdentityReaderV2") do desc = tf.NodeDescription("IdentityReaderV2") @@ -12675,7 +12675,7 @@ begin desc["shared_name"] = Base.String(shared_name) end res = tf.execute(desc) - node = tf.TapeNode(identity_reader_v2, [], name=nothing, container=nothing, shared_name=nothing) + node = tf.TapeNode(identity_reader_v2, [], name=nothing, container=nothing, shared_name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -12695,7 +12695,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function lmdb_reader_graph(; name=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function lmdb_reader_graph(; name=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "LMDBReader") do desc = tf.NodeDescription("LMDBReader") @@ -12717,7 +12717,7 @@ begin desc["shared_name"] = Base.String(shared_name) end res = tf.execute(desc) - node = tf.TapeNode(lmdb_reader, [], name=nothing, container=nothing, shared_name=nothing) + node = tf.TapeNode(lmdb_reader, [], name=nothing, container=nothing, shared_name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -12737,7 +12737,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function nccl_all_reduce_graph(input_; name=nothing, reduction=nothing, num_devices=nothing, shared_name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function nccl_all_reduce_graph(input_; name=nothing, reduction=nothing, num_devices=nothing, shared_name=nothing) local desc tf.with_op_name(name, "NcclAllReduce") do desc = tf.NodeDescription("NcclAllReduce") @@ -12771,7 +12771,7 @@ begin end desc["T"] = tf.data_type(input_) res = tf.execute(desc) - node = tf.TapeNode(nccl_all_reduce, [input_], name=nothing, reduction=nothing, num_devices=nothing, shared_name=nothing) + node = tf.TapeNode(nccl_all_reduce, [input_], name=nothing, reduction=nothing, num_devices=nothing, shared_name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -12791,7 +12791,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function text_line_dataset_graph(filenames_, compression_type_, buffer_size_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function text_line_dataset_graph(filenames_, compression_type_, buffer_size_; name=nothing) local desc tf.with_op_name(name, "TextLineDataset") do desc = tf.NodeDescription("TextLineDataset") @@ -12813,7 +12813,7 @@ begin tf.add_input(desc, compression_type_) tf.add_input(desc, buffer_size_) res = tf.execute(desc) - node = tf.TapeNode(text_line_dataset, [filenames_, compression_type_, buffer_size_], name=nothing) + node = tf.TapeNode(text_line_dataset, [filenames_, compression_type_, buffer_size_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -12833,7 +12833,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sdca_shrink_l1_graph(weights_; name=nothing, num_features=nothing, l1=nothing, l2=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sdca_shrink_l1_graph(weights_; name=nothing, num_features=nothing, l1=nothing, l2=nothing) local desc tf.with_op_name(name, "SdcaShrinkL1") do desc = tf.NodeDescription("SdcaShrinkL1") @@ -12865,7 +12865,7 @@ begin desc["l2"] = Base.identity(l2) end res = tf.execute(desc) - node = tf.TapeNode(sdca_shrink_l1, [weights_], name=nothing, num_features=nothing, l1=nothing, l2=nothing) + node = tf.TapeNode(sdca_shrink_l1, [weights_], name=nothing, num_features=nothing, l1=nothing, l2=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -12885,7 +12885,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tf_record_reader_v2_graph(; name=nothing, container=nothing, shared_name=nothing, compression_type=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tf_record_reader_v2_graph(; name=nothing, container=nothing, shared_name=nothing, compression_type=nothing) local desc tf.with_op_name(name, "TFRecordReaderV2") do desc = tf.NodeDescription("TFRecordReaderV2") @@ -12913,7 +12913,7 @@ begin desc["compression_type"] = Base.String(compression_type) end res = tf.execute(desc) - node = tf.TapeNode(tf_record_reader_v2, [], name=nothing, container=nothing, shared_name=nothing, compression_type=nothing) + node = tf.TapeNode(tf_record_reader_v2, [], name=nothing, container=nothing, shared_name=nothing, compression_type=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -12933,7 +12933,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function multi_device_iterator_from_string_handle_graph(string_handle_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function multi_device_iterator_from_string_handle_graph(string_handle_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "MultiDeviceIteratorFromStringHandle") do desc = tf.NodeDescription("MultiDeviceIteratorFromStringHandle") @@ -12959,7 +12959,7 @@ begin desc["output_shapes"] = map(Base.identity, output_shapes) end res = tf.execute(desc) - node = tf.TapeNode(multi_device_iterator_from_string_handle, [string_handle_], name=nothing, output_types=nothing, output_shapes=nothing) + node = tf.TapeNode(multi_device_iterator_from_string_handle, [string_handle_], name=nothing, output_types=nothing, output_shapes=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -12979,7 +12979,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function padded_batch_dataset_v2_graph(input_dataset_, batch_size_, padded_shapes_, padding_values_, drop_remainder_; name=nothing, Toutput_types=nothing, output_shapes=nothing, N=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function padded_batch_dataset_v2_graph(input_dataset_, batch_size_, padded_shapes_, padding_values_, drop_remainder_; name=nothing, Toutput_types=nothing, output_shapes=nothing, N=nothing) local desc tf.with_op_name(name, "PaddedBatchDatasetV2") do desc = tf.NodeDescription("PaddedBatchDatasetV2") @@ -13027,7 +13027,7 @@ begin desc["N"] = Base.Int(N) end res = tf.execute(desc) - node = tf.TapeNode(padded_batch_dataset_v2, [input_dataset_, batch_size_, padded_shapes_, padding_values_, drop_remainder_], name=nothing, Toutput_types=nothing, output_shapes=nothing, N=nothing) + node = tf.TapeNode(padded_batch_dataset_v2, [input_dataset_, batch_size_, padded_shapes_, padding_values_, drop_remainder_], name=nothing, Toutput_types=nothing, output_shapes=nothing, N=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -13047,7 +13047,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function load_tpu_embedding_proximal_adagrad_parameters_graph(parameters_, accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function load_tpu_embedding_proximal_adagrad_parameters_graph(parameters_, accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "LoadTPUEmbeddingProximalAdagradParameters") do desc = tf.NodeDescription("LoadTPUEmbeddingProximalAdagradParameters") @@ -13089,7 +13089,7 @@ begin desc["shard_id"] = Base.Int(shard_id) end res = tf.execute(desc) - node = tf.TapeNode(load_tpu_embedding_proximal_adagrad_parameters, [parameters_, accumulators_], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + node = tf.TapeNode(load_tpu_embedding_proximal_adagrad_parameters, [parameters_, accumulators_], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -13109,7 +13109,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_size_graph(handle_, flow_in_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_size_graph(handle_, flow_in_; name=nothing) local desc tf.with_op_name(name, "TensorArraySize") do desc = tf.NodeDescription("TensorArraySize") @@ -13127,7 +13127,7 @@ begin tf.add_input(desc, handle_) tf.add_input(desc, flow_in_) res = tf.execute(desc) - node = tf.TapeNode(tensor_array_size, [handle_, flow_in_], name=nothing) + node = tf.TapeNode(tensor_array_size, [handle_, flow_in_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -13147,7 +13147,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ordered_map_size_graph(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ordered_map_size_graph(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "OrderedMapSize") do desc = tf.NodeDescription("OrderedMapSize") @@ -13187,7 +13187,7 @@ begin desc["shared_name"] = Base.String(shared_name) end res = tf.execute(desc) - node = tf.TapeNode(ordered_map_size, [], name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + node = tf.TapeNode(ordered_map_size, [], name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -13207,7 +13207,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function stateless_random_uniform_graph(shape_, seed_; name=nothing, dtype=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function stateless_random_uniform_graph(shape_, seed_; name=nothing, dtype=nothing) local desc tf.with_op_name(name, "StatelessRandomUniform") do desc = tf.NodeDescription("StatelessRandomUniform") @@ -13235,7 +13235,7 @@ begin desc["T"] = tf.data_type(shape_) desc["Tseed"] = tf.data_type(seed_) res = tf.execute(desc) - node = tf.TapeNode(stateless_random_uniform, [shape_, seed_], name=nothing, dtype=nothing) + node = tf.TapeNode(stateless_random_uniform, [shape_, seed_], name=nothing, dtype=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -13255,7 +13255,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_to_sparse_set_operation_graph(set1_indices_, set1_values_, set1_shape_, set2_indices_, set2_values_, set2_shape_; name=nothing, set_operation=nothing, validate_indices=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_to_sparse_set_operation_graph(set1_indices_, set1_values_, set1_shape_, set2_indices_, set2_values_, set2_shape_; name=nothing, set_operation=nothing, validate_indices=nothing) local desc tf.with_op_name(name, "SparseToSparseSetOperation") do desc = tf.NodeDescription("SparseToSparseSetOperation") @@ -13309,7 +13309,7 @@ begin desc["T"] = tf.data_type(set1_values_) desc["T"] = tf.data_type(set2_values_) res = tf.execute(desc) - node = tf.TapeNode(sparse_to_sparse_set_operation, [set1_indices_, set1_values_, set1_shape_, set2_indices_, set2_values_, set2_shape_], name=nothing, set_operation=nothing, validate_indices=nothing) + node = tf.TapeNode(sparse_to_sparse_set_operation, [set1_indices_, set1_values_, set1_shape_, set2_indices_, set2_values_, set2_shape_], name=nothing, set_operation=nothing, validate_indices=nothing, res) tf.add_node(res[1], node) return res end @@ -13329,7 +13329,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_summary_graph(tensor_; name=nothing, description=nothing, labels=nothing, display_name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_summary_graph(tensor_; name=nothing, description=nothing, labels=nothing, display_name=nothing) local desc tf.with_op_name(name, "TensorSummary") do desc = tf.NodeDescription("TensorSummary") @@ -13363,7 +13363,7 @@ begin end desc["T"] = tf.data_type(tensor_) res = tf.execute(desc) - node = tf.TapeNode(tensor_summary, [tensor_], name=nothing, description=nothing, labels=nothing, display_name=nothing) + node = tf.TapeNode(tensor_summary, [tensor_], name=nothing, description=nothing, labels=nothing, display_name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -13383,7 +13383,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function remote_fused_graph_execute_graph(inputs_; name=nothing, Tinputs=nothing, Toutputs=nothing, serialized_remote_fused_graph_execute_info=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function remote_fused_graph_execute_graph(inputs_; name=nothing, Tinputs=nothing, Toutputs=nothing, serialized_remote_fused_graph_execute_info=nothing) local desc tf.with_op_name(name, "RemoteFusedGraphExecute") do desc = tf.NodeDescription("RemoteFusedGraphExecute") @@ -13415,7 +13415,7 @@ begin desc["serialized_remote_fused_graph_execute_info"] = Base.String(serialized_remote_fused_graph_execute_info) end res = tf.execute(desc) - node = tf.TapeNode(remote_fused_graph_execute, [inputs_], name=nothing, Tinputs=nothing, Toutputs=nothing, serialized_remote_fused_graph_execute_info=nothing) + node = tf.TapeNode(remote_fused_graph_execute, [inputs_], name=nothing, Tinputs=nothing, Toutputs=nothing, serialized_remote_fused_graph_execute_info=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -13435,7 +13435,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_slice_grad_graph(backprop_val_grad_, input_indices_, input_start_, output_indices_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_slice_grad_graph(backprop_val_grad_, input_indices_, input_start_, output_indices_; name=nothing) local desc tf.with_op_name(name, "SparseSliceGrad") do desc = tf.NodeDescription("SparseSliceGrad") @@ -13463,7 +13463,7 @@ begin tf.add_input(desc, output_indices_) desc["T"] = tf.data_type(backprop_val_grad_) res = tf.execute(desc) - node = tf.TapeNode(sparse_slice_grad, [backprop_val_grad_, input_indices_, input_start_, output_indices_], name=nothing) + node = tf.TapeNode(sparse_slice_grad, [backprop_val_grad_, input_indices_, input_start_, output_indices_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -13483,7 +13483,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function cumsum_graph(x_, axis_; name=nothing, exclusive=nothing, reverse=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function cumsum_graph(x_, axis_; name=nothing, exclusive=nothing, reverse=nothing) local desc tf.with_op_name(name, "Cumsum") do desc = tf.NodeDescription("Cumsum") @@ -13518,7 +13518,7 @@ begin desc["T"] = tf.data_type(x_) desc["Tidx"] = tf.data_type(axis_) res = tf.execute(desc) - node = tf.TapeNode(cumsum, [x_, axis_], name=nothing, exclusive=nothing, reverse=nothing) + node = tf.TapeNode(cumsum, [x_, axis_], name=nothing, exclusive=nothing, reverse=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -13538,7 +13538,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_norm_with_global_normalization_grad_graph(t_, m_, v_, gamma_, backprop_; name=nothing, variance_epsilon=nothing, scale_after_normalization=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_norm_with_global_normalization_grad_graph(t_, m_, v_, gamma_, backprop_; name=nothing, variance_epsilon=nothing, scale_after_normalization=nothing) local desc tf.with_op_name(name, "BatchNormWithGlobalNormalizationGrad") do desc = tf.NodeDescription("BatchNormWithGlobalNormalizationGrad") @@ -13591,7 +13591,7 @@ begin desc["T"] = tf.data_type(gamma_) desc["T"] = tf.data_type(backprop_) res = tf.execute(desc) - node = tf.TapeNode(batch_norm_with_global_normalization_grad, [t_, m_, v_, gamma_, backprop_], name=nothing, variance_epsilon=nothing, scale_after_normalization=nothing) + node = tf.TapeNode(batch_norm_with_global_normalization_grad, [t_, m_, v_, gamma_, backprop_], name=nothing, variance_epsilon=nothing, scale_after_normalization=nothing, res) tf.add_node(res[1], node) return res end @@ -13611,7 +13611,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function avg_pool_grad_graph(orig_input_shape_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function avg_pool_grad_graph(orig_input_shape_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) local desc tf.with_op_name(name, "AvgPoolGrad") do desc = tf.NodeDescription("AvgPoolGrad") @@ -13655,7 +13655,7 @@ begin end desc["T"] = tf.data_type(grad_) res = tf.execute(desc) - node = tf.TapeNode(avg_pool_grad, [orig_input_shape_, grad_], name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + node = tf.TapeNode(avg_pool_grad, [orig_input_shape_, grad_], name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -13675,7 +13675,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function restore_v2_graph(prefix_, tensor_names_, shape_and_slices_; name=nothing, dtypes=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function restore_v2_graph(prefix_, tensor_names_, shape_and_slices_; name=nothing, dtypes=nothing) local desc tf.with_op_name(name, "RestoreV2") do desc = tf.NodeDescription("RestoreV2") @@ -13703,7 +13703,7 @@ begin desc["dtypes"] = map(Base.identity, dtypes) end res = tf.execute(desc) - node = tf.TapeNode(restore_v2, [prefix_, tensor_names_, shape_and_slices_], name=nothing, dtypes=nothing) + node = tf.TapeNode(restore_v2, [prefix_, tensor_names_, shape_and_slices_], name=nothing, dtypes=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -13723,7 +13723,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function relu6_graph(features_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function relu6_graph(features_; name=nothing) local desc tf.with_op_name(name, "Relu6") do desc = tf.NodeDescription("Relu6") @@ -13739,7 +13739,7 @@ begin tf.add_input(desc, features_) desc["T"] = tf.data_type(features_) res = tf.execute(desc) - node = tf.TapeNode(relu6, [features_], name=nothing) + node = tf.TapeNode(relu6, [features_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -13759,7 +13759,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_apply_rms_prop_graph(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_apply_rms_prop_graph(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "SparseApplyRMSProp") do desc = tf.NodeDescription("SparseApplyRMSProp") @@ -13823,7 +13823,7 @@ begin desc["T"] = tf.data_type(grad_) desc["Tindices"] = tf.data_type(indices_) res = tf.execute(desc) - node = tf.TapeNode(sparse_apply_rms_prop, [var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_], name=nothing, use_locking=nothing) + node = tf.TapeNode(sparse_apply_rms_prop, [var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_], name=nothing, use_locking=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -13843,7 +13843,7 @@ end Receives the named tensor from send_device on recv_device. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _recv_graph(; name=nothing, tensor_type=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _recv_graph(; name=nothing, tensor_type=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing) local desc tf.with_op_name(name, "_Recv") do desc = tf.NodeDescription("_Recv") @@ -13889,7 +13889,7 @@ begin desc["client_terminated"] = Base.Bool(client_terminated) end res = tf.execute(desc) - node = tf.TapeNode(_recv, [], name=nothing, tensor_type=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing) + node = tf.TapeNode(_recv, [], name=nothing, tensor_type=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -13909,7 +13909,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function max_pool_graph(input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function max_pool_graph(input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) local desc tf.with_op_name(name, "MaxPool") do desc = tf.NodeDescription("MaxPool") @@ -13949,7 +13949,7 @@ begin end desc["T"] = tf.data_type(input_) res = tf.execute(desc) - node = tf.TapeNode(max_pool, [input_], name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + node = tf.TapeNode(max_pool, [input_], name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -13969,7 +13969,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function invert_graph(x_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function invert_graph(x_; name=nothing) local desc tf.with_op_name(name, "Invert") do desc = tf.NodeDescription("Invert") @@ -13985,7 +13985,7 @@ begin tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) res = tf.execute(desc) - node = tf.TapeNode(invert, [x_], name=nothing) + node = tf.TapeNode(invert, [x_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -14005,7 +14005,7 @@ end *NOTE*: Do not invoke this operator directly in Python. Graph rewrite pass is """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _unary_ops_composition_graph(x_; name=nothing, op_names=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _unary_ops_composition_graph(x_; name=nothing, op_names=nothing) local desc tf.with_op_name(name, "_UnaryOpsComposition") do desc = tf.NodeDescription("_UnaryOpsComposition") @@ -14027,7 +14027,7 @@ begin end desc["T"] = tf.data_type(x_) res = tf.execute(desc) - node = tf.TapeNode(_unary_ops_composition, [x_], name=nothing, op_names=nothing) + node = tf.TapeNode(_unary_ops_composition, [x_], name=nothing, op_names=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -14047,7 +14047,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_map_dataset_graph(input_dataset_, other_arguments_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing, preserve_cardinality=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_map_dataset_graph(input_dataset_, other_arguments_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing, preserve_cardinality=nothing) local desc tf.with_op_name(name, "ExperimentalMapDataset") do desc = tf.NodeDescription("ExperimentalMapDataset") @@ -14101,7 +14101,7 @@ begin desc["preserve_cardinality"] = Base.Bool(preserve_cardinality) end res = tf.execute(desc) - node = tf.TapeNode(experimental_map_dataset, [input_dataset_, other_arguments_], name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing, preserve_cardinality=nothing) + node = tf.TapeNode(experimental_map_dataset, [input_dataset_, other_arguments_], name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing, preserve_cardinality=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -14121,7 +14121,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function load_tpu_embedding_adam_parameters_graph(parameters_, momenta_, velocities_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function load_tpu_embedding_adam_parameters_graph(parameters_, momenta_, velocities_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "LoadTPUEmbeddingADAMParameters") do desc = tf.NodeDescription("LoadTPUEmbeddingADAMParameters") @@ -14167,7 +14167,7 @@ begin desc["shard_id"] = Base.Int(shard_id) end res = tf.execute(desc) - node = tf.TapeNode(load_tpu_embedding_adam_parameters, [parameters_, momenta_, velocities_], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + node = tf.TapeNode(load_tpu_embedding_adam_parameters, [parameters_, momenta_, velocities_], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -14187,7 +14187,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function parse_tensor_graph(serialized_; name=nothing, out_type=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function parse_tensor_graph(serialized_; name=nothing, out_type=nothing) local desc tf.with_op_name(name, "ParseTensor") do desc = tf.NodeDescription("ParseTensor") @@ -14207,7 +14207,7 @@ begin desc["out_type"] = Base.identity(out_type) end res = tf.execute(desc) - node = tf.TapeNode(parse_tensor, [serialized_], name=nothing, out_type=nothing) + node = tf.TapeNode(parse_tensor, [serialized_], name=nothing, out_type=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -14227,7 +14227,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_materialized_index_dataset_handle_graph(; name=nothing, container=nothing, shared_name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_materialized_index_dataset_handle_graph(; name=nothing, container=nothing, shared_name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "ExperimentalMaterializedIndexDatasetHandle") do desc = tf.NodeDescription("ExperimentalMaterializedIndexDatasetHandle") @@ -14261,7 +14261,7 @@ begin desc["output_shapes"] = map(Base.identity, output_shapes) end res = tf.execute(desc) - node = tf.TapeNode(experimental_materialized_index_dataset_handle, [], name=nothing, container=nothing, shared_name=nothing, output_types=nothing, output_shapes=nothing) + node = tf.TapeNode(experimental_materialized_index_dataset_handle, [], name=nothing, container=nothing, shared_name=nothing, output_types=nothing, output_shapes=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -14281,7 +14281,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function multi_device_iterator_get_next_from_shard_graph(multi_device_iterator_, shard_num_, incarnation_id_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function multi_device_iterator_get_next_from_shard_graph(multi_device_iterator_, shard_num_, incarnation_id_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "MultiDeviceIteratorGetNextFromShard") do desc = tf.NodeDescription("MultiDeviceIteratorGetNextFromShard") @@ -14315,7 +14315,7 @@ begin desc["output_shapes"] = map(Base.identity, output_shapes) end res = tf.execute(desc) - node = tf.TapeNode(multi_device_iterator_get_next_from_shard, [multi_device_iterator_, shard_num_, incarnation_id_], name=nothing, output_types=nothing, output_shapes=nothing) + node = tf.TapeNode(multi_device_iterator_get_next_from_shard, [multi_device_iterator_, shard_num_, incarnation_id_], name=nothing, output_types=nothing, output_shapes=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -14335,7 +14335,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function random_uniform_int_graph(shape_, minval_, maxval_; name=nothing, seed=nothing, seed2=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function random_uniform_int_graph(shape_, minval_, maxval_; name=nothing, seed=nothing, seed2=nothing) local desc tf.with_op_name(name, "RandomUniformInt") do desc = tf.NodeDescription("RandomUniformInt") @@ -14374,7 +14374,7 @@ begin desc["Tout"] = tf.data_type(minval_) desc["Tout"] = tf.data_type(maxval_) res = tf.execute(desc) - node = tf.TapeNode(random_uniform_int, [shape_, minval_, maxval_], name=nothing, seed=nothing, seed2=nothing) + node = tf.TapeNode(random_uniform_int, [shape_, minval_, maxval_], name=nothing, seed=nothing, seed2=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -14394,7 +14394,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_softmax_cross_entropy_with_logits_graph(features_, labels_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_softmax_cross_entropy_with_logits_graph(features_, labels_; name=nothing) local desc tf.with_op_name(name, "SparseSoftmaxCrossEntropyWithLogits") do desc = tf.NodeDescription("SparseSoftmaxCrossEntropyWithLogits") @@ -14421,7 +14421,7 @@ begin desc["T"] = tf.data_type(features_) desc["Tlabels"] = tf.data_type(labels_) res = tf.execute(desc) - node = tf.TapeNode(sparse_softmax_cross_entropy_with_logits, [features_, labels_], name=nothing) + node = tf.TapeNode(sparse_softmax_cross_entropy_with_logits, [features_, labels_], name=nothing, res) tf.add_node(res[1], node) return res end @@ -14441,7 +14441,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_read_v2_graph(handle_, index_, flow_in_; name=nothing, dtype=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_read_v2_graph(handle_, index_, flow_in_; name=nothing, dtype=nothing) local desc tf.with_op_name(name, "TensorArrayReadV2") do desc = tf.NodeDescription("TensorArrayReadV2") @@ -14469,7 +14469,7 @@ begin desc["dtype"] = Base.identity(dtype) end res = tf.execute(desc) - node = tf.TapeNode(tensor_array_read_v2, [handle_, index_, flow_in_], name=nothing, dtype=nothing) + node = tf.TapeNode(tensor_array_read_v2, [handle_, index_, flow_in_], name=nothing, dtype=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -14489,7 +14489,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function reader_read_up_to_graph(reader_handle_, queue_handle_, num_records_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function reader_read_up_to_graph(reader_handle_, queue_handle_, num_records_; name=nothing) local desc tf.with_op_name(name, "ReaderReadUpTo") do desc = tf.NodeDescription("ReaderReadUpTo") @@ -14516,7 +14516,7 @@ begin tf.add_input(desc, queue_handle_) tf.add_input(desc, num_records_) res = tf.execute(desc) - node = tf.TapeNode(reader_read_up_to, [reader_handle_, queue_handle_, num_records_], name=nothing) + node = tf.TapeNode(reader_read_up_to, [reader_handle_, queue_handle_, num_records_], name=nothing, res) tf.add_node(res[1], node) return res end @@ -14536,7 +14536,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function encode_proto_graph(sizes_, values_; name=nothing, field_names=nothing, message_type=nothing, descriptor_source=nothing, Tinput_types=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function encode_proto_graph(sizes_, values_; name=nothing, field_names=nothing, message_type=nothing, descriptor_source=nothing, Tinput_types=nothing) local desc tf.with_op_name(name, "EncodeProto") do desc = tf.NodeDescription("EncodeProto") @@ -14578,7 +14578,7 @@ begin desc["Tinput_types"] = map(Base.identity, Tinput_types) end res = tf.execute(desc) - node = tf.TapeNode(encode_proto, [sizes_, values_], name=nothing, field_names=nothing, message_type=nothing, descriptor_source=nothing, Tinput_types=nothing) + node = tf.TapeNode(encode_proto, [sizes_, values_], name=nothing, field_names=nothing, message_type=nothing, descriptor_source=nothing, Tinput_types=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -14598,7 +14598,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function strided_slice_grad_graph(shape_, begin_, end_, strides_, dy_; name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function strided_slice_grad_graph(shape_, begin_, end_, strides_, dy_; name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing) local desc tf.with_op_name(name, "StridedSliceGrad") do desc = tf.NodeDescription("StridedSliceGrad") @@ -14704,7 +14704,7 @@ begin desc["Index"] = tf.data_type(strides_) desc["T"] = tf.data_type(dy_) res = tf.execute(desc) - node = tf.TapeNode(strided_slice_grad, [shape_, begin_, end_, strides_, dy_], name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing) + node = tf.TapeNode(strided_slice_grad, [shape_, begin_, end_, strides_, dy_], name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -14724,7 +14724,7 @@ end Replacement node for NcclReduce. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _nccl_reduce_send_graph(input_; name=nothing, reduction=nothing, num_devices=nothing, shared_name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _nccl_reduce_send_graph(input_; name=nothing, reduction=nothing, num_devices=nothing, shared_name=nothing) local desc tf.with_op_name(name, "_NcclReduceSend") do desc = tf.NodeDescription("_NcclReduceSend") @@ -14758,7 +14758,7 @@ begin end desc["T"] = tf.data_type(input_) res = tf.execute(desc) - node = tf.TapeNode(_nccl_reduce_send, [input_], name=nothing, reduction=nothing, num_devices=nothing, shared_name=nothing) + node = tf.TapeNode(_nccl_reduce_send, [input_], name=nothing, reduction=nothing, num_devices=nothing, shared_name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -14778,7 +14778,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function padded_batch_dataset_graph(input_dataset_, batch_size_, padded_shapes_, padding_values_; name=nothing, Toutput_types=nothing, output_shapes=nothing, N=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function padded_batch_dataset_graph(input_dataset_, batch_size_, padded_shapes_, padding_values_; name=nothing, Toutput_types=nothing, output_shapes=nothing, N=nothing) local desc tf.with_op_name(name, "PaddedBatchDataset") do desc = tf.NodeDescription("PaddedBatchDataset") @@ -14822,7 +14822,7 @@ begin desc["N"] = Base.Int(N) end res = tf.execute(desc) - node = tf.TapeNode(padded_batch_dataset, [input_dataset_, batch_size_, padded_shapes_, padding_values_], name=nothing, Toutput_types=nothing, output_shapes=nothing, N=nothing) + node = tf.TapeNode(padded_batch_dataset, [input_dataset_, batch_size_, padded_shapes_, padding_values_], name=nothing, Toutput_types=nothing, output_shapes=nothing, N=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -14842,7 +14842,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function data_format_vec_permute_graph(x_; name=nothing, src_format=nothing, dst_format=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function data_format_vec_permute_graph(x_; name=nothing, src_format=nothing, dst_format=nothing) local desc tf.with_op_name(name, "DataFormatVecPermute") do desc = tf.NodeDescription("DataFormatVecPermute") @@ -14870,7 +14870,7 @@ begin end desc["T"] = tf.data_type(x_) res = tf.execute(desc) - node = tf.TapeNode(data_format_vec_permute, [x_], name=nothing, src_format=nothing, dst_format=nothing) + node = tf.TapeNode(data_format_vec_permute, [x_], name=nothing, src_format=nothing, dst_format=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -14890,7 +14890,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function string_format_graph(inputs_; name=nothing, T=nothing, template=nothing, placeholder=nothing, summarize=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function string_format_graph(inputs_; name=nothing, T=nothing, template=nothing, placeholder=nothing, summarize=nothing) local desc tf.with_op_name(name, "StringFormat") do desc = tf.NodeDescription("StringFormat") @@ -14928,7 +14928,7 @@ begin desc["summarize"] = Base.Int(summarize) end res = tf.execute(desc) - node = tf.TapeNode(string_format, [inputs_], name=nothing, T=nothing, template=nothing, placeholder=nothing, summarize=nothing) + node = tf.TapeNode(string_format, [inputs_], name=nothing, T=nothing, template=nothing, placeholder=nothing, summarize=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -14948,7 +14948,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function as_string_graph(input_; name=nothing, precision=nothing, scientific=nothing, shortest=nothing, width=nothing, fill=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function as_string_graph(input_; name=nothing, precision=nothing, scientific=nothing, shortest=nothing, width=nothing, fill=nothing) local desc tf.with_op_name(name, "AsString") do desc = tf.NodeDescription("AsString") @@ -14994,7 +14994,7 @@ begin end desc["T"] = tf.data_type(input_) res = tf.execute(desc) - node = tf.TapeNode(as_string, [input_], name=nothing, precision=nothing, scientific=nothing, shortest=nothing, width=nothing, fill=nothing) + node = tf.TapeNode(as_string, [input_], name=nothing, precision=nothing, scientific=nothing, shortest=nothing, width=nothing, fill=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -15014,7 +15014,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function queue_enqueue_many_graph(handle_, components_; name=nothing, Tcomponents=nothing, timeout_ms=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function queue_enqueue_many_graph(handle_, components_; name=nothing, Tcomponents=nothing, timeout_ms=nothing) local desc tf.with_op_name(name, "QueueEnqueueMany") do desc = tf.NodeDescription("QueueEnqueueMany") @@ -15044,7 +15044,7 @@ begin desc["timeout_ms"] = Base.Int(timeout_ms) end res = tf.execute(desc) - node = tf.TapeNode(queue_enqueue_many, [handle_, components_], name=nothing, Tcomponents=nothing, timeout_ms=nothing) + node = tf.TapeNode(queue_enqueue_many, [handle_, components_], name=nothing, Tcomponents=nothing, timeout_ms=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -15064,7 +15064,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fake_param_graph(; name=nothing, dtype=nothing, shape=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fake_param_graph(; name=nothing, dtype=nothing, shape=nothing) local desc tf.with_op_name(name, "FakeParam") do desc = tf.NodeDescription("FakeParam") @@ -15086,7 +15086,7 @@ begin desc["shape"] = Base.identity(shape) end res = tf.execute(desc) - node = tf.TapeNode(fake_param, [], name=nothing, dtype=nothing, shape=nothing) + node = tf.TapeNode(fake_param, [], name=nothing, dtype=nothing, shape=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -15106,7 +15106,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function apply_adagrad_graph(var_, accum_, lr_, grad_; name=nothing, use_locking=nothing, update_slots=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function apply_adagrad_graph(var_, accum_, lr_, grad_; name=nothing, use_locking=nothing, update_slots=nothing) local desc tf.with_op_name(name, "ApplyAdagrad") do desc = tf.NodeDescription("ApplyAdagrad") @@ -15149,7 +15149,7 @@ begin desc["T"] = tf.data_type(lr_) desc["T"] = tf.data_type(grad_) res = tf.execute(desc) - node = tf.TapeNode(apply_adagrad, [var_, accum_, lr_, grad_], name=nothing, use_locking=nothing, update_slots=nothing) + node = tf.TapeNode(apply_adagrad, [var_, accum_, lr_, grad_], name=nothing, use_locking=nothing, update_slots=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -15169,7 +15169,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_iterator_get_device_graph(resource_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_iterator_get_device_graph(resource_; name=nothing) local desc tf.with_op_name(name, "ExperimentalIteratorGetDevice") do desc = tf.NodeDescription("ExperimentalIteratorGetDevice") @@ -15183,7 +15183,7 @@ begin resource_ = convert(tf.TensorHandle, resource_) tf.add_input(desc, resource_) res = tf.execute(desc) - node = tf.TapeNode(experimental_iterator_get_device, [resource_], name=nothing) + node = tf.TapeNode(experimental_iterator_get_device, [resource_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -15203,7 +15203,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function adjust_contrast_graph(images_, contrast_factor_, min_value_, max_value_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function adjust_contrast_graph(images_, contrast_factor_, min_value_, max_value_; name=nothing) local desc tf.with_op_name(name, "AdjustContrast") do desc = tf.NodeDescription("AdjustContrast") @@ -15231,7 +15231,7 @@ begin tf.add_input(desc, max_value_) desc["T"] = tf.data_type(images_) res = tf.execute(desc) - node = tf.TapeNode(adjust_contrast, [images_, contrast_factor_, min_value_, max_value_], name=nothing) + node = tf.TapeNode(adjust_contrast, [images_, contrast_factor_, min_value_, max_value_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -15251,7 +15251,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function extract_image_patches_graph(images_; name=nothing, ksizes=nothing, strides=nothing, rates=nothing, padding=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function extract_image_patches_graph(images_; name=nothing, ksizes=nothing, strides=nothing, rates=nothing, padding=nothing) local desc tf.with_op_name(name, "ExtractImagePatches") do desc = tf.NodeDescription("ExtractImagePatches") @@ -15291,7 +15291,7 @@ begin end desc["T"] = tf.data_type(images_) res = tf.execute(desc) - node = tf.TapeNode(extract_image_patches, [images_], name=nothing, ksizes=nothing, strides=nothing, rates=nothing, padding=nothing) + node = tf.TapeNode(extract_image_patches, [images_], name=nothing, ksizes=nothing, strides=nothing, rates=nothing, padding=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -15311,7 +15311,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function scale_and_translate_graph(images_, size_, scale_, translation_; name=nothing, kernel_type=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function scale_and_translate_graph(images_, size_, scale_, translation_; name=nothing, kernel_type=nothing) local desc tf.with_op_name(name, "ScaleAndTranslate") do desc = tf.NodeDescription("ScaleAndTranslate") @@ -15345,7 +15345,7 @@ begin end desc["T"] = tf.data_type(images_) res = tf.execute(desc) - node = tf.TapeNode(scale_and_translate, [images_, size_, scale_, translation_], name=nothing, kernel_type=nothing) + node = tf.TapeNode(scale_and_translate, [images_, size_, scale_, translation_], name=nothing, kernel_type=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -15365,7 +15365,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function optional_none_graph(; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function optional_none_graph(; name=nothing) local desc tf.with_op_name(name, "OptionalNone") do desc @@ -15376,7 +15376,7 @@ begin function optional_none_eager(; name=nothing) desc = tf.EagerOp("OptionalNone") res = tf.execute(desc) - node = tf.TapeNode(optional_none, [], name=nothing) + node = tf.TapeNode(optional_none, [], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -15396,7 +15396,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function variable_v2_graph(; name=nothing, shape=nothing, dtype=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function variable_v2_graph(; name=nothing, shape=nothing, dtype=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "VariableV2") do desc = tf.NodeDescription("VariableV2") @@ -15430,7 +15430,7 @@ begin desc["shared_name"] = Base.String(shared_name) end res = tf.execute(desc) - node = tf.TapeNode(variable_v2, [], name=nothing, shape=nothing, dtype=nothing, container=nothing, shared_name=nothing) + node = tf.TapeNode(variable_v2, [], name=nothing, shape=nothing, dtype=nothing, container=nothing, shared_name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -15450,7 +15450,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function elu_graph(features_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function elu_graph(features_; name=nothing) local desc tf.with_op_name(name, "Elu") do desc = tf.NodeDescription("Elu") @@ -15466,7 +15466,7 @@ begin tf.add_input(desc, features_) desc["T"] = tf.data_type(features_) res = tf.execute(desc) - node = tf.TapeNode(elu, [features_], name=nothing) + node = tf.TapeNode(elu, [features_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -15486,7 +15486,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function scatter_update_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function scatter_update_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ScatterUpdate") do desc = tf.NodeDescription("ScatterUpdate") @@ -15520,7 +15520,7 @@ begin desc["Tindices"] = tf.data_type(indices_) desc["T"] = tf.data_type(updates_) res = tf.execute(desc) - node = tf.TapeNode(scatter_update, [ref_, indices_, updates_], name=nothing, use_locking=nothing) + node = tf.TapeNode(scatter_update, [ref_, indices_, updates_], name=nothing, use_locking=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -15540,7 +15540,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function floor_mod_graph(x_, y_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function floor_mod_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "FloorMod") do desc = tf.NodeDescription("FloorMod") @@ -15561,7 +15561,7 @@ begin desc["T"] = tf.data_type(x_) desc["T"] = tf.data_type(y_) res = tf.execute(desc) - node = tf.TapeNode(floor_mod, [x_, y_], name=nothing) + node = tf.TapeNode(floor_mod, [x_, y_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -15581,7 +15581,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_ignore_errors_dataset_graph(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_ignore_errors_dataset_graph(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "ExperimentalIgnoreErrorsDataset") do desc = tf.NodeDescription("ExperimentalIgnoreErrorsDataset") @@ -15607,7 +15607,7 @@ begin desc["output_shapes"] = map(Base.identity, output_shapes) end res = tf.execute(desc) - node = tf.TapeNode(experimental_ignore_errors_dataset, [input_dataset_], name=nothing, output_types=nothing, output_shapes=nothing) + node = tf.TapeNode(experimental_ignore_errors_dataset, [input_dataset_], name=nothing, output_types=nothing, output_shapes=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -15627,7 +15627,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_set_stats_aggregator_dataset_graph(input_dataset_, stats_aggregator_, tag_, counter_prefix_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_set_stats_aggregator_dataset_graph(input_dataset_, stats_aggregator_, tag_, counter_prefix_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "ExperimentalSetStatsAggregatorDataset") do desc = tf.NodeDescription("ExperimentalSetStatsAggregatorDataset") @@ -15665,7 +15665,7 @@ begin desc["output_shapes"] = map(Base.identity, output_shapes) end res = tf.execute(desc) - node = tf.TapeNode(experimental_set_stats_aggregator_dataset, [input_dataset_, stats_aggregator_, tag_, counter_prefix_], name=nothing, output_types=nothing, output_shapes=nothing) + node = tf.TapeNode(experimental_set_stats_aggregator_dataset, [input_dataset_, stats_aggregator_, tag_, counter_prefix_], name=nothing, output_types=nothing, output_shapes=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -15685,7 +15685,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function compute_accidental_hits_graph(true_classes_, sampled_candidates_; name=nothing, num_true=nothing, seed=nothing, seed2=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function compute_accidental_hits_graph(true_classes_, sampled_candidates_; name=nothing, num_true=nothing, seed=nothing, seed2=nothing) local desc tf.with_op_name(name, "ComputeAccidentalHits") do desc = tf.NodeDescription("ComputeAccidentalHits") @@ -15726,7 +15726,7 @@ begin desc["seed2"] = Base.Int(seed2) end res = tf.execute(desc) - node = tf.TapeNode(compute_accidental_hits, [true_classes_, sampled_candidates_], name=nothing, num_true=nothing, seed=nothing, seed2=nothing) + node = tf.TapeNode(compute_accidental_hits, [true_classes_, sampled_candidates_], name=nothing, num_true=nothing, seed=nothing, seed2=nothing, res) tf.add_node(res[1], node) return res end @@ -15746,7 +15746,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function string_to_number_graph(string_tensor_; name=nothing, out_type=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function string_to_number_graph(string_tensor_; name=nothing, out_type=nothing) local desc tf.with_op_name(name, "StringToNumber") do desc = tf.NodeDescription("StringToNumber") @@ -15766,7 +15766,7 @@ begin desc["out_type"] = Base.identity(out_type) end res = tf.execute(desc) - node = tf.TapeNode(string_to_number, [string_tensor_], name=nothing, out_type=nothing) + node = tf.TapeNode(string_to_number, [string_tensor_], name=nothing, out_type=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -15786,7 +15786,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function snapshot_graph(input_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function snapshot_graph(input_; name=nothing) local desc tf.with_op_name(name, "Snapshot") do desc = tf.NodeDescription("Snapshot") @@ -15802,7 +15802,7 @@ begin tf.add_input(desc, input_) desc["T"] = tf.data_type(input_) res = tf.execute(desc) - node = tf.TapeNode(snapshot, [input_], name=nothing) + node = tf.TapeNode(snapshot, [input_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -15822,7 +15822,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function deserialize_iterator_graph(resource_handle_, serialized_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function deserialize_iterator_graph(resource_handle_, serialized_; name=nothing) local desc tf.with_op_name(name, "DeserializeIterator") do desc = tf.NodeDescription("DeserializeIterator") @@ -15840,7 +15840,7 @@ begin tf.add_input(desc, resource_handle_) tf.add_input(desc, serialized_) res = tf.execute(desc) - node = tf.TapeNode(deserialize_iterator, [resource_handle_, serialized_], name=nothing) + node = tf.TapeNode(deserialize_iterator, [resource_handle_, serialized_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -15860,7 +15860,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function atan_graph(x_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function atan_graph(x_; name=nothing) local desc tf.with_op_name(name, "Atan") do desc = tf.NodeDescription("Atan") @@ -15876,7 +15876,7 @@ begin tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) res = tf.execute(desc) - node = tf.TapeNode(atan, [x_], name=nothing) + node = tf.TapeNode(atan, [x_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -15896,7 +15896,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function mat_mul_graph(a_, b_; name=nothing, transpose_a=nothing, transpose_b=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function mat_mul_graph(a_, b_; name=nothing, transpose_a=nothing, transpose_b=nothing) local desc tf.with_op_name(name, "MatMul") do desc = tf.NodeDescription("MatMul") @@ -15929,7 +15929,7 @@ begin desc["T"] = tf.data_type(a_) desc["T"] = tf.data_type(b_) res = tf.execute(desc) - node = tf.TapeNode(mat_mul, [a_, b_], name=nothing, transpose_a=nothing, transpose_b=nothing) + node = tf.TapeNode(mat_mul, [a_, b_], name=nothing, transpose_a=nothing, transpose_b=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -15949,7 +15949,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function erfc_graph(x_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function erfc_graph(x_; name=nothing) local desc tf.with_op_name(name, "Erfc") do desc = tf.NodeDescription("Erfc") @@ -15965,7 +15965,7 @@ begin tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) res = tf.execute(desc) - node = tf.TapeNode(erfc, [x_], name=nothing) + node = tf.TapeNode(erfc, [x_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -15985,7 +15985,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sigmoid_grad_graph(y_, dy_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sigmoid_grad_graph(y_, dy_; name=nothing) local desc tf.with_op_name(name, "SigmoidGrad") do desc = tf.NodeDescription("SigmoidGrad") @@ -16006,7 +16006,7 @@ begin desc["T"] = tf.data_type(y_) desc["T"] = tf.data_type(dy_) res = tf.execute(desc) - node = tf.TapeNode(sigmoid_grad, [y_, dy_], name=nothing) + node = tf.TapeNode(sigmoid_grad, [y_, dy_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -16026,7 +16026,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fixed_length_record_reader_v2_graph(; name=nothing, header_bytes=nothing, record_bytes=nothing, footer_bytes=nothing, hop_bytes=nothing, container=nothing, shared_name=nothing, encoding=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fixed_length_record_reader_v2_graph(; name=nothing, header_bytes=nothing, record_bytes=nothing, footer_bytes=nothing, hop_bytes=nothing, container=nothing, shared_name=nothing, encoding=nothing) local desc tf.with_op_name(name, "FixedLengthRecordReaderV2") do desc = tf.NodeDescription("FixedLengthRecordReaderV2") @@ -16078,7 +16078,7 @@ begin desc["encoding"] = Base.String(encoding) end res = tf.execute(desc) - node = tf.TapeNode(fixed_length_record_reader_v2, [], name=nothing, header_bytes=nothing, record_bytes=nothing, footer_bytes=nothing, hop_bytes=nothing, container=nothing, shared_name=nothing, encoding=nothing) + node = tf.TapeNode(fixed_length_record_reader_v2, [], name=nothing, header_bytes=nothing, record_bytes=nothing, footer_bytes=nothing, hop_bytes=nothing, container=nothing, shared_name=nothing, encoding=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -16098,7 +16098,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function non_max_suppression_v3_graph(boxes_, scores_, max_output_size_, iou_threshold_, score_threshold_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function non_max_suppression_v3_graph(boxes_, scores_, max_output_size_, iou_threshold_, score_threshold_; name=nothing) local desc tf.with_op_name(name, "NonMaxSuppressionV3") do desc = tf.NodeDescription("NonMaxSuppressionV3") @@ -16131,7 +16131,7 @@ begin desc["T"] = tf.data_type(boxes_) desc["T"] = tf.data_type(scores_) res = tf.execute(desc) - node = tf.TapeNode(non_max_suppression_v3, [boxes_, scores_, max_output_size_, iou_threshold_, score_threshold_], name=nothing) + node = tf.TapeNode(non_max_suppression_v3, [boxes_, scores_, max_output_size_, iou_threshold_, score_threshold_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -16151,7 +16151,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function dilation2d_backprop_input_graph(input_, filter_, out_backprop_; name=nothing, strides=nothing, rates=nothing, padding=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function dilation2d_backprop_input_graph(input_, filter_, out_backprop_; name=nothing, strides=nothing, rates=nothing, padding=nothing) local desc tf.with_op_name(name, "Dilation2DBackpropInput") do desc = tf.NodeDescription("Dilation2DBackpropInput") @@ -16195,7 +16195,7 @@ begin desc["T"] = tf.data_type(filter_) desc["T"] = tf.data_type(out_backprop_) res = tf.execute(desc) - node = tf.TapeNode(dilation2d_backprop_input, [input_, filter_, out_backprop_], name=nothing, strides=nothing, rates=nothing, padding=nothing) + node = tf.TapeNode(dilation2d_backprop_input, [input_, filter_, out_backprop_], name=nothing, strides=nothing, rates=nothing, padding=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -16215,7 +16215,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function logical_or_graph(x_, y_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function logical_or_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "LogicalOr") do desc = tf.NodeDescription("LogicalOr") @@ -16233,7 +16233,7 @@ begin tf.add_input(desc, x_) tf.add_input(desc, y_) res = tf.execute(desc) - node = tf.TapeNode(logical_or, [x_, y_], name=nothing) + node = tf.TapeNode(logical_or, [x_, y_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -16253,7 +16253,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_apply_adadelta_graph(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_; name=nothing, use_locking=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_apply_adadelta_graph(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ResourceApplyAdadelta") do desc = tf.NodeDescription("ResourceApplyAdadelta") @@ -16302,7 +16302,7 @@ begin desc["T"] = tf.data_type(epsilon_) desc["T"] = tf.data_type(grad_) res = tf.execute(desc) - node = tf.TapeNode(resource_apply_adadelta, [var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_], name=nothing, use_locking=nothing) + node = tf.TapeNode(resource_apply_adadelta, [var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_], name=nothing, use_locking=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -16322,7 +16322,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function dense_to_sparse_set_operation_graph(set1_, set2_indices_, set2_values_, set2_shape_; name=nothing, set_operation=nothing, validate_indices=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function dense_to_sparse_set_operation_graph(set1_, set2_indices_, set2_values_, set2_shape_; name=nothing, set_operation=nothing, validate_indices=nothing) local desc tf.with_op_name(name, "DenseToSparseSetOperation") do desc = tf.NodeDescription("DenseToSparseSetOperation") @@ -16368,7 +16368,7 @@ begin desc["T"] = tf.data_type(set1_) desc["T"] = tf.data_type(set2_values_) res = tf.execute(desc) - node = tf.TapeNode(dense_to_sparse_set_operation, [set1_, set2_indices_, set2_values_, set2_shape_], name=nothing, set_operation=nothing, validate_indices=nothing) + node = tf.TapeNode(dense_to_sparse_set_operation, [set1_, set2_indices_, set2_values_, set2_shape_], name=nothing, set_operation=nothing, validate_indices=nothing, res) tf.add_node(res[1], node) return res end @@ -16388,7 +16388,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function reader_num_records_produced_graph(reader_handle_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function reader_num_records_produced_graph(reader_handle_; name=nothing) local desc tf.with_op_name(name, "ReaderNumRecordsProduced") do desc = tf.NodeDescription("ReaderNumRecordsProduced") @@ -16402,7 +16402,7 @@ begin reader_handle_ = convert(tf.TensorHandle, reader_handle_) tf.add_input(desc, reader_handle_) res = tf.execute(desc) - node = tf.TapeNode(reader_num_records_produced, [reader_handle_], name=nothing) + node = tf.TapeNode(reader_num_records_produced, [reader_handle_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -16422,7 +16422,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function adjust_hue_graph(images_, delta_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function adjust_hue_graph(images_, delta_; name=nothing) local desc tf.with_op_name(name, "AdjustHue") do desc = tf.NodeDescription("AdjustHue") @@ -16442,7 +16442,7 @@ begin tf.add_input(desc, delta_) desc["T"] = tf.data_type(images_) res = tf.execute(desc) - node = tf.TapeNode(adjust_hue, [images_, delta_], name=nothing) + node = tf.TapeNode(adjust_hue, [images_, delta_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -16462,7 +16462,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function boosted_trees_quantile_stream_resource_flush_graph(quantile_stream_resource_handle_, num_buckets_; name=nothing, generate_quantiles=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function boosted_trees_quantile_stream_resource_flush_graph(quantile_stream_resource_handle_, num_buckets_; name=nothing, generate_quantiles=nothing) local desc tf.with_op_name(name, "BoostedTreesQuantileStreamResourceFlush") do desc = tf.NodeDescription("BoostedTreesQuantileStreamResourceFlush") @@ -16486,7 +16486,7 @@ begin desc["generate_quantiles"] = Base.Bool(generate_quantiles) end res = tf.execute(desc) - node = tf.TapeNode(boosted_trees_quantile_stream_resource_flush, [quantile_stream_resource_handle_, num_buckets_], name=nothing, generate_quantiles=nothing) + node = tf.TapeNode(boosted_trees_quantile_stream_resource_flush, [quantile_stream_resource_handle_, num_buckets_], name=nothing, generate_quantiles=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -16506,7 +16506,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_map_and_batch_dataset_graph(input_dataset_, other_arguments_, batch_size_, num_parallel_calls_, drop_remainder_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, preserve_cardinality=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_map_and_batch_dataset_graph(input_dataset_, other_arguments_, batch_size_, num_parallel_calls_, drop_remainder_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, preserve_cardinality=nothing) local desc tf.with_op_name(name, "ExperimentalMapAndBatchDataset") do desc = tf.NodeDescription("ExperimentalMapAndBatchDataset") @@ -16566,7 +16566,7 @@ begin desc["preserve_cardinality"] = Base.Bool(preserve_cardinality) end res = tf.execute(desc) - node = tf.TapeNode(experimental_map_and_batch_dataset, [input_dataset_, other_arguments_, batch_size_, num_parallel_calls_, drop_remainder_], name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, preserve_cardinality=nothing) + node = tf.TapeNode(experimental_map_and_batch_dataset, [input_dataset_, other_arguments_, batch_size_, num_parallel_calls_, drop_remainder_], name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, preserve_cardinality=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -16586,7 +16586,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function real_div_graph(x_, y_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function real_div_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "RealDiv") do desc = tf.NodeDescription("RealDiv") @@ -16607,7 +16607,7 @@ begin desc["T"] = tf.data_type(x_) desc["T"] = tf.data_type(y_) res = tf.execute(desc) - node = tf.TapeNode(real_div, [x_, y_], name=nothing) + node = tf.TapeNode(real_div, [x_, y_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -16627,7 +16627,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function restore_slice_graph(file_pattern_, tensor_name_, shape_and_slice_; name=nothing, dt=nothing, preferred_shard=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function restore_slice_graph(file_pattern_, tensor_name_, shape_and_slice_; name=nothing, dt=nothing, preferred_shard=nothing) local desc tf.with_op_name(name, "RestoreSlice") do desc = tf.NodeDescription("RestoreSlice") @@ -16661,7 +16661,7 @@ begin desc["preferred_shard"] = Base.Int(preferred_shard) end res = tf.execute(desc) - node = tf.TapeNode(restore_slice, [file_pattern_, tensor_name_, shape_and_slice_], name=nothing, dt=nothing, preferred_shard=nothing) + node = tf.TapeNode(restore_slice, [file_pattern_, tensor_name_, shape_and_slice_], name=nothing, dt=nothing, preferred_shard=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -16681,7 +16681,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function stack_pop_v2_graph(handle_; name=nothing, elem_type=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function stack_pop_v2_graph(handle_; name=nothing, elem_type=nothing) local desc tf.with_op_name(name, "StackPopV2") do desc = tf.NodeDescription("StackPopV2") @@ -16701,7 +16701,7 @@ begin desc["elem_type"] = Base.identity(elem_type) end res = tf.execute(desc) - node = tf.TapeNode(stack_pop_v2, [handle_], name=nothing, elem_type=nothing) + node = tf.TapeNode(stack_pop_v2, [handle_], name=nothing, elem_type=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -16721,7 +16721,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function reverse_graph(tensor_, dims_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function reverse_graph(tensor_, dims_; name=nothing) local desc tf.with_op_name(name, "Reverse") do desc = tf.NodeDescription("Reverse") @@ -16741,7 +16741,7 @@ begin tf.add_input(desc, dims_) desc["T"] = tf.data_type(tensor_) res = tf.execute(desc) - node = tf.TapeNode(reverse, [tensor_, dims_], name=nothing) + node = tf.TapeNode(reverse, [tensor_, dims_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -16761,7 +16761,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function decode_png_graph(contents_; name=nothing, channels=nothing, dtype=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function decode_png_graph(contents_; name=nothing, channels=nothing, dtype=nothing) local desc tf.with_op_name(name, "DecodePng") do desc = tf.NodeDescription("DecodePng") @@ -16787,7 +16787,7 @@ begin desc["dtype"] = Base.identity(dtype) end res = tf.execute(desc) - node = tf.TapeNode(decode_png, [contents_], name=nothing, channels=nothing, dtype=nothing) + node = tf.TapeNode(decode_png, [contents_], name=nothing, channels=nothing, dtype=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -16807,7 +16807,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function non_max_suppression_v2_graph(boxes_, scores_, max_output_size_, iou_threshold_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function non_max_suppression_v2_graph(boxes_, scores_, max_output_size_, iou_threshold_; name=nothing) local desc tf.with_op_name(name, "NonMaxSuppressionV2") do desc = tf.NodeDescription("NonMaxSuppressionV2") @@ -16836,7 +16836,7 @@ begin desc["T"] = tf.data_type(boxes_) desc["T"] = tf.data_type(scores_) res = tf.execute(desc) - node = tf.TapeNode(non_max_suppression_v2, [boxes_, scores_, max_output_size_, iou_threshold_], name=nothing) + node = tf.TapeNode(non_max_suppression_v2, [boxes_, scores_, max_output_size_, iou_threshold_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -16856,7 +16856,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function igamma_graph(a_, x_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function igamma_graph(a_, x_; name=nothing) local desc tf.with_op_name(name, "Igamma") do desc = tf.NodeDescription("Igamma") @@ -16877,7 +16877,7 @@ begin desc["T"] = tf.data_type(a_) desc["T"] = tf.data_type(x_) res = tf.execute(desc) - node = tf.TapeNode(igamma, [a_, x_], name=nothing) + node = tf.TapeNode(igamma, [a_, x_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -16897,7 +16897,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function digamma_graph(x_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function digamma_graph(x_; name=nothing) local desc tf.with_op_name(name, "Digamma") do desc = tf.NodeDescription("Digamma") @@ -16913,7 +16913,7 @@ begin tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) res = tf.execute(desc) - node = tf.TapeNode(digamma, [x_], name=nothing) + node = tf.TapeNode(digamma, [x_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -16933,7 +16933,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_apply_ada_max_graph(var_, m_, v_, beta1_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_apply_ada_max_graph(var_, m_, v_, beta1_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ResourceApplyAdaMax") do desc = tf.NodeDescription("ResourceApplyAdaMax") @@ -16992,7 +16992,7 @@ begin desc["T"] = tf.data_type(epsilon_) desc["T"] = tf.data_type(grad_) res = tf.execute(desc) - node = tf.TapeNode(resource_apply_ada_max, [var_, m_, v_, beta1_power_, lr_, beta1_, beta2_, epsilon_, grad_], name=nothing, use_locking=nothing) + node = tf.TapeNode(resource_apply_ada_max, [var_, m_, v_, beta1_power_, lr_, beta1_, beta2_, epsilon_, grad_], name=nothing, use_locking=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -17012,7 +17012,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function space_to_depth_graph(input_; name=nothing, block_size=nothing, data_format=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function space_to_depth_graph(input_; name=nothing, block_size=nothing, data_format=nothing) local desc tf.with_op_name(name, "SpaceToDepth") do desc = tf.NodeDescription("SpaceToDepth") @@ -17040,7 +17040,7 @@ begin end desc["T"] = tf.data_type(input_) res = tf.execute(desc) - node = tf.TapeNode(space_to_depth, [input_], name=nothing, block_size=nothing, data_format=nothing) + node = tf.TapeNode(space_to_depth, [input_], name=nothing, block_size=nothing, data_format=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -17060,7 +17060,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sqrt_grad_graph(y_, dy_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sqrt_grad_graph(y_, dy_; name=nothing) local desc tf.with_op_name(name, "SqrtGrad") do desc = tf.NodeDescription("SqrtGrad") @@ -17081,7 +17081,7 @@ begin desc["T"] = tf.data_type(y_) desc["T"] = tf.data_type(dy_) res = tf.execute(desc) - node = tf.TapeNode(sqrt_grad, [y_, dy_], name=nothing) + node = tf.TapeNode(sqrt_grad, [y_, dy_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -17101,7 +17101,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function map_unstage_graph(key_, indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function map_unstage_graph(key_, indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "MapUnstage") do desc = tf.NodeDescription("MapUnstage") @@ -17149,7 +17149,7 @@ begin desc["shared_name"] = Base.String(shared_name) end res = tf.execute(desc) - node = tf.TapeNode(map_unstage, [key_, indices_], name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + node = tf.TapeNode(map_unstage, [key_, indices_], name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -17169,7 +17169,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function qr_graph(input_; name=nothing, full_matrices=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function qr_graph(input_; name=nothing, full_matrices=nothing) local desc tf.with_op_name(name, "Qr") do desc = tf.NodeDescription("Qr") @@ -17196,7 +17196,7 @@ begin end desc["T"] = tf.data_type(input_) res = tf.execute(desc) - node = tf.TapeNode(qr, [input_], name=nothing, full_matrices=nothing) + node = tf.TapeNode(qr, [input_], name=nothing, full_matrices=nothing, res) tf.add_node(res[1], node) return res end @@ -17216,7 +17216,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function boosted_trees_calculate_best_gains_per_feature_graph(node_id_range_, stats_summary_list_, l1_, l2_, tree_complexity_, min_node_weight_; name=nothing, max_splits=nothing, num_features=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function boosted_trees_calculate_best_gains_per_feature_graph(node_id_range_, stats_summary_list_, l1_, l2_, tree_complexity_, min_node_weight_; name=nothing, max_splits=nothing, num_features=nothing) local desc tf.with_op_name(name, "BoostedTreesCalculateBestGainsPerFeature") do desc = tf.NodeDescription("BoostedTreesCalculateBestGainsPerFeature") @@ -17267,7 +17267,7 @@ begin desc["num_features"] = Base.Int(num_features) end res = tf.execute(desc) - node = tf.TapeNode(boosted_trees_calculate_best_gains_per_feature, [node_id_range_, stats_summary_list_, l1_, l2_, tree_complexity_, min_node_weight_], name=nothing, max_splits=nothing, num_features=nothing) + node = tf.TapeNode(boosted_trees_calculate_best_gains_per_feature, [node_id_range_, stats_summary_list_, l1_, l2_, tree_complexity_, min_node_weight_], name=nothing, max_splits=nothing, num_features=nothing, res) tf.add_node(res[1], node) return res end @@ -17287,7 +17287,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function unbatch_grad_graph(original_input_, batch_index_, grad_, id_; name=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function unbatch_grad_graph(original_input_, batch_index_, grad_, id_; name=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "UnbatchGrad") do desc = tf.NodeDescription("UnbatchGrad") @@ -17328,7 +17328,7 @@ begin desc["T"] = tf.data_type(original_input_) desc["T"] = tf.data_type(grad_) res = tf.execute(desc) - node = tf.TapeNode(unbatch_grad, [original_input_, batch_index_, grad_, id_], name=nothing, container=nothing, shared_name=nothing) + node = tf.TapeNode(unbatch_grad, [original_input_, batch_index_, grad_, id_], name=nothing, container=nothing, shared_name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -17348,7 +17348,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function log_softmax_graph(logits_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function log_softmax_graph(logits_; name=nothing) local desc tf.with_op_name(name, "LogSoftmax") do desc = tf.NodeDescription("LogSoftmax") @@ -17364,7 +17364,7 @@ begin tf.add_input(desc, logits_) desc["T"] = tf.data_type(logits_) res = tf.execute(desc) - node = tf.TapeNode(log_softmax, [logits_], name=nothing) + node = tf.TapeNode(log_softmax, [logits_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -17384,7 +17384,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_count_up_to_graph(resource_; name=nothing, limit=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_count_up_to_graph(resource_; name=nothing, limit=nothing) local desc tf.with_op_name(name, "ResourceCountUpTo") do desc = tf.NodeDescription("ResourceCountUpTo") @@ -17404,7 +17404,7 @@ begin desc["limit"] = Base.Int(limit) end res = tf.execute(desc) - node = tf.TapeNode(resource_count_up_to, [resource_], name=nothing, limit=nothing) + node = tf.TapeNode(resource_count_up_to, [resource_], name=nothing, limit=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -17424,7 +17424,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function accumulate_nv2_graph(inputs_; name=nothing, N=nothing, shape=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function accumulate_nv2_graph(inputs_; name=nothing, N=nothing, shape=nothing) local desc tf.with_op_name(name, "AccumulateNV2") do desc = tf.NodeDescription("AccumulateNV2") @@ -17452,7 +17452,7 @@ begin end desc["T"] = tf.data_type(inputs_) res = tf.execute(desc) - node = tf.TapeNode(accumulate_nv2, [inputs_], name=nothing, N=nothing, shape=nothing) + node = tf.TapeNode(accumulate_nv2, [inputs_], name=nothing, N=nothing, shape=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -17472,7 +17472,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function parallel_map_dataset_graph(input_dataset_, other_arguments_, num_parallel_calls_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing, sloppy=nothing, preserve_cardinality=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function parallel_map_dataset_graph(input_dataset_, other_arguments_, num_parallel_calls_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing, sloppy=nothing, preserve_cardinality=nothing) local desc tf.with_op_name(name, "ParallelMapDataset") do desc = tf.NodeDescription("ParallelMapDataset") @@ -17536,7 +17536,7 @@ begin desc["preserve_cardinality"] = Base.Bool(preserve_cardinality) end res = tf.execute(desc) - node = tf.TapeNode(parallel_map_dataset, [input_dataset_, other_arguments_, num_parallel_calls_], name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing, sloppy=nothing, preserve_cardinality=nothing) + node = tf.TapeNode(parallel_map_dataset, [input_dataset_, other_arguments_, num_parallel_calls_], name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing, sloppy=nothing, preserve_cardinality=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -17556,7 +17556,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function random_uniform_graph(shape_; name=nothing, seed=nothing, seed2=nothing, dtype=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function random_uniform_graph(shape_; name=nothing, seed=nothing, seed2=nothing, dtype=nothing) local desc tf.with_op_name(name, "RandomUniform") do desc = tf.NodeDescription("RandomUniform") @@ -17590,7 +17590,7 @@ begin end desc["T"] = tf.data_type(shape_) res = tf.execute(desc) - node = tf.TapeNode(random_uniform, [shape_], name=nothing, seed=nothing, seed2=nothing, dtype=nothing) + node = tf.TapeNode(random_uniform, [shape_], name=nothing, seed=nothing, seed2=nothing, dtype=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -17610,7 +17610,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function unicode_transcode_graph(input_; name=nothing, input_encoding=nothing, output_encoding=nothing, errors=nothing, replacement_char=nothing, replace_control_characters=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function unicode_transcode_graph(input_; name=nothing, input_encoding=nothing, output_encoding=nothing, errors=nothing, replacement_char=nothing, replace_control_characters=nothing) local desc tf.with_op_name(name, "UnicodeTranscode") do desc = tf.NodeDescription("UnicodeTranscode") @@ -17654,7 +17654,7 @@ begin desc["replace_control_characters"] = Base.Bool(replace_control_characters) end res = tf.execute(desc) - node = tf.TapeNode(unicode_transcode, [input_], name=nothing, input_encoding=nothing, output_encoding=nothing, errors=nothing, replacement_char=nothing, replace_control_characters=nothing) + node = tf.TapeNode(unicode_transcode, [input_], name=nothing, input_encoding=nothing, output_encoding=nothing, errors=nothing, replacement_char=nothing, replace_control_characters=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -17674,7 +17674,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function reader_reset_graph(reader_handle_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function reader_reset_graph(reader_handle_; name=nothing) local desc tf.with_op_name(name, "ReaderReset") do desc = tf.NodeDescription("ReaderReset") @@ -17688,7 +17688,7 @@ begin reader_handle_ = convert(tf.TensorHandle, reader_handle_) tf.add_input(desc, reader_handle_) res = tf.execute(desc) - node = tf.TapeNode(reader_reset, [reader_handle_], name=nothing) + node = tf.TapeNode(reader_reset, [reader_handle_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -17708,7 +17708,7 @@ end Replacement node for NcclBroadcast. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _nccl_broadcast_send_graph(input_; name=nothing, num_devices=nothing, shared_name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _nccl_broadcast_send_graph(input_; name=nothing, num_devices=nothing, shared_name=nothing) local desc tf.with_op_name(name, "_NcclBroadcastSend") do desc = tf.NodeDescription("_NcclBroadcastSend") @@ -17736,7 +17736,7 @@ begin end desc["T"] = tf.data_type(input_) res = tf.execute(desc) - node = tf.TapeNode(_nccl_broadcast_send, [input_], name=nothing, num_devices=nothing, shared_name=nothing) + node = tf.TapeNode(_nccl_broadcast_send, [input_], name=nothing, num_devices=nothing, shared_name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -17756,7 +17756,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_matrix_determinant_graph(input_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_matrix_determinant_graph(input_; name=nothing) local desc tf.with_op_name(name, "BatchMatrixDeterminant") do desc = tf.NodeDescription("BatchMatrixDeterminant") @@ -17772,7 +17772,7 @@ begin tf.add_input(desc, input_) desc["T"] = tf.data_type(input_) res = tf.execute(desc) - node = tf.TapeNode(batch_matrix_determinant, [input_], name=nothing) + node = tf.TapeNode(batch_matrix_determinant, [input_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -17792,7 +17792,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function less_equal_graph(x_, y_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function less_equal_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "LessEqual") do desc = tf.NodeDescription("LessEqual") @@ -17813,7 +17813,7 @@ begin desc["T"] = tf.data_type(x_) desc["T"] = tf.data_type(y_) res = tf.execute(desc) - node = tf.TapeNode(less_equal, [x_, y_], name=nothing) + node = tf.TapeNode(less_equal, [x_, y_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -17833,7 +17833,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function apply_gradient_descent_graph(var_, alpha_, delta_; name=nothing, use_locking=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function apply_gradient_descent_graph(var_, alpha_, delta_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ApplyGradientDescent") do desc = tf.NodeDescription("ApplyGradientDescent") @@ -17865,7 +17865,7 @@ begin desc["T"] = tf.data_type(alpha_) desc["T"] = tf.data_type(delta_) res = tf.execute(desc) - node = tf.TapeNode(apply_gradient_descent, [var_, alpha_, delta_], name=nothing, use_locking=nothing) + node = tf.TapeNode(apply_gradient_descent, [var_, alpha_, delta_], name=nothing, use_locking=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -17885,7 +17885,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_segment_sqrt_n_graph(data_, indices_, segment_ids_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_segment_sqrt_n_graph(data_, indices_, segment_ids_; name=nothing) local desc tf.with_op_name(name, "SparseSegmentSqrtN") do desc = tf.NodeDescription("SparseSegmentSqrtN") @@ -17912,7 +17912,7 @@ begin desc["T"] = tf.data_type(data_) desc["Tidx"] = tf.data_type(indices_) res = tf.execute(desc) - node = tf.TapeNode(sparse_segment_sqrt_n, [data_, indices_, segment_ids_], name=nothing) + node = tf.TapeNode(sparse_segment_sqrt_n, [data_, indices_, segment_ids_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -17932,7 +17932,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function matrix_logarithm_graph(input_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function matrix_logarithm_graph(input_; name=nothing) local desc tf.with_op_name(name, "MatrixLogarithm") do desc = tf.NodeDescription("MatrixLogarithm") @@ -17948,7 +17948,7 @@ begin tf.add_input(desc, input_) desc["T"] = tf.data_type(input_) res = tf.execute(desc) - node = tf.TapeNode(matrix_logarithm, [input_], name=nothing) + node = tf.TapeNode(matrix_logarithm, [input_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -17968,7 +17968,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function scatter_mul_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function scatter_mul_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ScatterMul") do desc = tf.NodeDescription("ScatterMul") @@ -18002,7 +18002,7 @@ begin desc["Tindices"] = tf.data_type(indices_) desc["T"] = tf.data_type(updates_) res = tf.execute(desc) - node = tf.TapeNode(scatter_mul, [ref_, indices_, updates_], name=nothing, use_locking=nothing) + node = tf.TapeNode(scatter_mul, [ref_, indices_, updates_], name=nothing, use_locking=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -18022,7 +18022,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function decode_jpeg_graph(contents_; name=nothing, channels=nothing, ratio=nothing, fancy_upscaling=nothing, try_recover_truncated=nothing, acceptable_fraction=nothing, dct_method=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function decode_jpeg_graph(contents_; name=nothing, channels=nothing, ratio=nothing, fancy_upscaling=nothing, try_recover_truncated=nothing, acceptable_fraction=nothing, dct_method=nothing) local desc tf.with_op_name(name, "DecodeJpeg") do desc = tf.NodeDescription("DecodeJpeg") @@ -18072,7 +18072,7 @@ begin desc["dct_method"] = Base.String(dct_method) end res = tf.execute(desc) - node = tf.TapeNode(decode_jpeg, [contents_], name=nothing, channels=nothing, ratio=nothing, fancy_upscaling=nothing, try_recover_truncated=nothing, acceptable_fraction=nothing, dct_method=nothing) + node = tf.TapeNode(decode_jpeg, [contents_], name=nothing, channels=nothing, ratio=nothing, fancy_upscaling=nothing, try_recover_truncated=nothing, acceptable_fraction=nothing, dct_method=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -18092,7 +18092,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function random_shuffle_queue_v2_graph(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, min_after_dequeue=nothing, seed=nothing, seed2=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function random_shuffle_queue_v2_graph(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, min_after_dequeue=nothing, seed=nothing, seed2=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "RandomShuffleQueueV2") do desc = tf.NodeDescription("RandomShuffleQueueV2") @@ -18150,7 +18150,7 @@ begin desc["shared_name"] = Base.String(shared_name) end res = tf.execute(desc) - node = tf.TapeNode(random_shuffle_queue_v2, [], name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, min_after_dequeue=nothing, seed=nothing, seed2=nothing, container=nothing, shared_name=nothing) + node = tf.TapeNode(random_shuffle_queue_v2, [], name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, min_after_dequeue=nothing, seed=nothing, seed2=nothing, container=nothing, shared_name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -18170,7 +18170,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function queue_enqueue_many_v2_graph(handle_, components_; name=nothing, Tcomponents=nothing, timeout_ms=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function queue_enqueue_many_v2_graph(handle_, components_; name=nothing, Tcomponents=nothing, timeout_ms=nothing) local desc tf.with_op_name(name, "QueueEnqueueManyV2") do desc = tf.NodeDescription("QueueEnqueueManyV2") @@ -18200,7 +18200,7 @@ begin desc["timeout_ms"] = Base.Int(timeout_ms) end res = tf.execute(desc) - node = tf.TapeNode(queue_enqueue_many_v2, [handle_, components_], name=nothing, Tcomponents=nothing, timeout_ms=nothing) + node = tf.TapeNode(queue_enqueue_many_v2, [handle_, components_], name=nothing, Tcomponents=nothing, timeout_ms=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -18220,7 +18220,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_sparse_apply_centered_rms_prop_graph(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_sparse_apply_centered_rms_prop_graph(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ResourceSparseApplyCenteredRMSProp") do desc = tf.NodeDescription("ResourceSparseApplyCenteredRMSProp") @@ -18285,7 +18285,7 @@ begin desc["T"] = tf.data_type(grad_) desc["Tindices"] = tf.data_type(indices_) res = tf.execute(desc) - node = tf.TapeNode(resource_sparse_apply_centered_rms_prop, [var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_], name=nothing, use_locking=nothing) + node = tf.TapeNode(resource_sparse_apply_centered_rms_prop, [var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_], name=nothing, use_locking=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -18305,7 +18305,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function interleave_dataset_graph(input_dataset_, other_arguments_, cycle_length_, block_length_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function interleave_dataset_graph(input_dataset_, other_arguments_, cycle_length_, block_length_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "InterleaveDataset") do desc = tf.NodeDescription("InterleaveDataset") @@ -18355,7 +18355,7 @@ begin desc["output_shapes"] = map(Base.identity, output_shapes) end res = tf.execute(desc) - node = tf.TapeNode(interleave_dataset, [input_dataset_, other_arguments_, cycle_length_, block_length_], name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) + node = tf.TapeNode(interleave_dataset, [input_dataset_, other_arguments_, cycle_length_, block_length_], name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -18375,7 +18375,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function stack_pop_graph(handle_; name=nothing, elem_type=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function stack_pop_graph(handle_; name=nothing, elem_type=nothing) local desc tf.with_op_name(name, "StackPop") do desc = tf.NodeDescription("StackPop") @@ -18395,7 +18395,7 @@ begin desc["elem_type"] = Base.identity(elem_type) end res = tf.execute(desc) - node = tf.TapeNode(stack_pop, [handle_], name=nothing, elem_type=nothing) + node = tf.TapeNode(stack_pop, [handle_], name=nothing, elem_type=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -18415,7 +18415,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function max_pool_v2_graph(input_, ksize_, strides_; name=nothing, padding=nothing, data_format=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function max_pool_v2_graph(input_, ksize_, strides_; name=nothing, padding=nothing, data_format=nothing) local desc tf.with_op_name(name, "MaxPoolV2") do desc = tf.NodeDescription("MaxPoolV2") @@ -18451,7 +18451,7 @@ begin end desc["T"] = tf.data_type(input_) res = tf.execute(desc) - node = tf.TapeNode(max_pool_v2, [input_, ksize_, strides_], name=nothing, padding=nothing, data_format=nothing) + node = tf.TapeNode(max_pool_v2, [input_, ksize_, strides_], name=nothing, padding=nothing, data_format=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -18471,7 +18471,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function boosted_trees_deserialize_ensemble_graph(tree_ensemble_handle_, stamp_token_, tree_ensemble_serialized_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function boosted_trees_deserialize_ensemble_graph(tree_ensemble_handle_, stamp_token_, tree_ensemble_serialized_; name=nothing) local desc tf.with_op_name(name, "BoostedTreesDeserializeEnsemble") do desc = tf.NodeDescription("BoostedTreesDeserializeEnsemble") @@ -18493,7 +18493,7 @@ begin tf.add_input(desc, stamp_token_) tf.add_input(desc, tree_ensemble_serialized_) res = tf.execute(desc) - node = tf.TapeNode(boosted_trees_deserialize_ensemble, [tree_ensemble_handle_, stamp_token_, tree_ensemble_serialized_], name=nothing) + node = tf.TapeNode(boosted_trees_deserialize_ensemble, [tree_ensemble_handle_, stamp_token_, tree_ensemble_serialized_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -18513,7 +18513,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function load_and_remap_matrix_graph(ckpt_path_, old_tensor_name_, row_remapping_, col_remapping_, initializing_values_; name=nothing, num_rows=nothing, num_cols=nothing, max_rows_in_memory=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function load_and_remap_matrix_graph(ckpt_path_, old_tensor_name_, row_remapping_, col_remapping_, initializing_values_; name=nothing, num_rows=nothing, num_cols=nothing, max_rows_in_memory=nothing) local desc tf.with_op_name(name, "LoadAndRemapMatrix") do desc = tf.NodeDescription("LoadAndRemapMatrix") @@ -18561,7 +18561,7 @@ begin desc["max_rows_in_memory"] = Base.Int(max_rows_in_memory) end res = tf.execute(desc) - node = tf.TapeNode(load_and_remap_matrix, [ckpt_path_, old_tensor_name_, row_remapping_, col_remapping_, initializing_values_], name=nothing, num_rows=nothing, num_cols=nothing, max_rows_in_memory=nothing) + node = tf.TapeNode(load_and_remap_matrix, [ckpt_path_, old_tensor_name_, row_remapping_, col_remapping_, initializing_values_], name=nothing, num_rows=nothing, num_cols=nothing, max_rows_in_memory=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -18581,7 +18581,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_apply_proximal_gradient_descent_graph(var_, alpha_, l1_, l2_, grad_, indices_; name=nothing, use_locking=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_apply_proximal_gradient_descent_graph(var_, alpha_, l1_, l2_, grad_, indices_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "SparseApplyProximalGradientDescent") do desc = tf.NodeDescription("SparseApplyProximalGradientDescent") @@ -18630,7 +18630,7 @@ begin desc["T"] = tf.data_type(grad_) desc["Tindices"] = tf.data_type(indices_) res = tf.execute(desc) - node = tf.TapeNode(sparse_apply_proximal_gradient_descent, [var_, alpha_, l1_, l2_, grad_, indices_], name=nothing, use_locking=nothing) + node = tf.TapeNode(sparse_apply_proximal_gradient_descent, [var_, alpha_, l1_, l2_, grad_, indices_], name=nothing, use_locking=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -18650,7 +18650,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function py_func_stateless_graph(input_; name=nothing, token=nothing, Tin=nothing, Tout=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function py_func_stateless_graph(input_; name=nothing, token=nothing, Tin=nothing, Tout=nothing) local desc tf.with_op_name(name, "PyFuncStateless") do desc = tf.NodeDescription("PyFuncStateless") @@ -18682,7 +18682,7 @@ begin desc["Tout"] = map(Base.identity, Tout) end res = tf.execute(desc) - node = tf.TapeNode(py_func_stateless, [input_], name=nothing, token=nothing, Tin=nothing, Tout=nothing) + node = tf.TapeNode(py_func_stateless, [input_], name=nothing, token=nothing, Tin=nothing, Tout=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -18702,7 +18702,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function where_graph(input_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function where_graph(input_; name=nothing) local desc tf.with_op_name(name, "Where") do desc = tf.NodeDescription("Where") @@ -18718,7 +18718,7 @@ begin tf.add_input(desc, input_) desc["T"] = tf.data_type(input_) res = tf.execute(desc) - node = tf.TapeNode(where, [input_], name=nothing) + node = tf.TapeNode(where, [input_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -18738,7 +18738,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function mfcc_graph(spectrogram_, sample_rate_; name=nothing, upper_frequency_limit=nothing, lower_frequency_limit=nothing, filterbank_channel_count=nothing, dct_coefficient_count=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function mfcc_graph(spectrogram_, sample_rate_; name=nothing, upper_frequency_limit=nothing, lower_frequency_limit=nothing, filterbank_channel_count=nothing, dct_coefficient_count=nothing) local desc tf.with_op_name(name, "Mfcc") do desc = tf.NodeDescription("Mfcc") @@ -18780,7 +18780,7 @@ begin desc["dct_coefficient_count"] = Base.Int(dct_coefficient_count) end res = tf.execute(desc) - node = tf.TapeNode(mfcc, [spectrogram_, sample_rate_], name=nothing, upper_frequency_limit=nothing, lower_frequency_limit=nothing, filterbank_channel_count=nothing, dct_coefficient_count=nothing) + node = tf.TapeNode(mfcc, [spectrogram_, sample_rate_], name=nothing, upper_frequency_limit=nothing, lower_frequency_limit=nothing, filterbank_channel_count=nothing, dct_coefficient_count=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -18800,7 +18800,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function check_numerics_graph(tensor_; name=nothing, message=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function check_numerics_graph(tensor_; name=nothing, message=nothing) local desc tf.with_op_name(name, "CheckNumerics") do desc = tf.NodeDescription("CheckNumerics") @@ -18822,7 +18822,7 @@ begin end desc["T"] = tf.data_type(tensor_) res = tf.execute(desc) - node = tf.TapeNode(check_numerics, [tensor_], name=nothing, message=nothing) + node = tf.TapeNode(check_numerics, [tensor_], name=nothing, message=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -18842,7 +18842,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tpu_compilation_result_graph(; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tpu_compilation_result_graph(; name=nothing) local desc tf.with_op_name(name, "TPUCompilationResult") do desc @@ -18853,7 +18853,7 @@ begin function tpu_compilation_result_eager(; name=nothing) desc = tf.EagerOp("TPUCompilationResult") res = tf.execute(desc) - node = tf.TapeNode(tpu_compilation_result, [], name=nothing) + node = tf.TapeNode(tpu_compilation_result, [], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -18873,7 +18873,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function retrieve_tpu_embedding_stochastic_gradient_descent_parameters_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function retrieve_tpu_embedding_stochastic_gradient_descent_parameters_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "RetrieveTPUEmbeddingStochasticGradientDescentParameters") do desc = tf.NodeDescription("RetrieveTPUEmbeddingStochasticGradientDescentParameters") @@ -18907,7 +18907,7 @@ begin desc["shard_id"] = Base.Int(shard_id) end res = tf.execute(desc) - node = tf.TapeNode(retrieve_tpu_embedding_stochastic_gradient_descent_parameters, [], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + node = tf.TapeNode(retrieve_tpu_embedding_stochastic_gradient_descent_parameters, [], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -18927,7 +18927,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_segment_mean_grad_graph(grad_, indices_, segment_ids_, output_dim0_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_segment_mean_grad_graph(grad_, indices_, segment_ids_, output_dim0_; name=nothing) local desc tf.with_op_name(name, "SparseSegmentMeanGrad") do desc = tf.NodeDescription("SparseSegmentMeanGrad") @@ -18958,7 +18958,7 @@ begin desc["T"] = tf.data_type(grad_) desc["Tidx"] = tf.data_type(indices_) res = tf.execute(desc) - node = tf.TapeNode(sparse_segment_mean_grad, [grad_, indices_, segment_ids_, output_dim0_], name=nothing) + node = tf.TapeNode(sparse_segment_mean_grad, [grad_, indices_, segment_ids_, output_dim0_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -18978,7 +18978,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function try_rpc_graph(address_, method_, request_; name=nothing, protocol=nothing, fail_fast=nothing, timeout_in_ms=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function try_rpc_graph(address_, method_, request_; name=nothing, protocol=nothing, fail_fast=nothing, timeout_in_ms=nothing) local desc tf.with_op_name(name, "TryRpc") do desc = tf.NodeDescription("TryRpc") @@ -19023,7 +19023,7 @@ begin desc["timeout_in_ms"] = Base.Int(timeout_in_ms) end res = tf.execute(desc) - node = tf.TapeNode(try_rpc, [address_, method_, request_], name=nothing, protocol=nothing, fail_fast=nothing, timeout_in_ms=nothing) + node = tf.TapeNode(try_rpc, [address_, method_, request_], name=nothing, protocol=nothing, fail_fast=nothing, timeout_in_ms=nothing, res) tf.add_node(res[1], node) return res end @@ -19043,7 +19043,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_matrix_triangular_solve_graph(matrix_, rhs_; name=nothing, lower=nothing, adjoint=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_matrix_triangular_solve_graph(matrix_, rhs_; name=nothing, lower=nothing, adjoint=nothing) local desc tf.with_op_name(name, "BatchMatrixTriangularSolve") do desc = tf.NodeDescription("BatchMatrixTriangularSolve") @@ -19076,7 +19076,7 @@ begin desc["T"] = tf.data_type(matrix_) desc["T"] = tf.data_type(rhs_) res = tf.execute(desc) - node = tf.TapeNode(batch_matrix_triangular_solve, [matrix_, rhs_], name=nothing, lower=nothing, adjoint=nothing) + node = tf.TapeNode(batch_matrix_triangular_solve, [matrix_, rhs_], name=nothing, lower=nothing, adjoint=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -19096,7 +19096,7 @@ end A graph node which represents a return value of a function. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _retval_graph(input_; name=nothing, index=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _retval_graph(input_; name=nothing, index=nothing) local desc tf.with_op_name(name, "_Retval") do desc = tf.NodeDescription("_Retval") @@ -19118,7 +19118,7 @@ begin end desc["T"] = tf.data_type(input_) res = tf.execute(desc) - node = tf.TapeNode(_retval, [input_], name=nothing, index=nothing) + node = tf.TapeNode(_retval, [input_], name=nothing, index=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -19138,7 +19138,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function unique_with_counts_graph(x_; name=nothing, out_idx=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function unique_with_counts_graph(x_; name=nothing, out_idx=nothing) local desc tf.with_op_name(name, "UniqueWithCounts") do desc = tf.NodeDescription("UniqueWithCounts") @@ -19165,7 +19165,7 @@ begin end desc["T"] = tf.data_type(x_) res = tf.execute(desc) - node = tf.TapeNode(unique_with_counts, [x_], name=nothing, out_idx=nothing) + node = tf.TapeNode(unique_with_counts, [x_], name=nothing, out_idx=nothing, res) tf.add_node(res[1], node) return res end @@ -19185,7 +19185,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function add_graph(x_, y_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function add_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "Add") do desc = tf.NodeDescription("Add") @@ -19206,7 +19206,7 @@ begin desc["T"] = tf.data_type(x_) desc["T"] = tf.data_type(y_) res = tf.execute(desc) - node = tf.TapeNode(add, [x_, y_], name=nothing) + node = tf.TapeNode(add, [x_, y_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -19226,7 +19226,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_scan_dataset_graph(input_dataset_, initial_state_, other_arguments_; name=nothing, f=nothing, Tstate=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, preserve_cardinality=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_scan_dataset_graph(input_dataset_, initial_state_, other_arguments_; name=nothing, f=nothing, Tstate=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, preserve_cardinality=nothing) local desc tf.with_op_name(name, "ExperimentalScanDataset") do desc = tf.NodeDescription("ExperimentalScanDataset") @@ -19284,7 +19284,7 @@ begin desc["preserve_cardinality"] = Base.Bool(preserve_cardinality) end res = tf.execute(desc) - node = tf.TapeNode(experimental_scan_dataset, [input_dataset_, initial_state_, other_arguments_], name=nothing, f=nothing, Tstate=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, preserve_cardinality=nothing) + node = tf.TapeNode(experimental_scan_dataset, [input_dataset_, initial_state_, other_arguments_], name=nothing, f=nothing, Tstate=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, preserve_cardinality=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -19304,7 +19304,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function assign_add_variable_op_graph(resource_, value_; name=nothing, dtype=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function assign_add_variable_op_graph(resource_, value_; name=nothing, dtype=nothing) local desc tf.with_op_name(name, "AssignAddVariableOp") do desc = tf.NodeDescription("AssignAddVariableOp") @@ -19330,7 +19330,7 @@ begin end desc["dtype"] = tf.data_type(value_) res = tf.execute(desc) - node = tf.TapeNode(assign_add_variable_op, [resource_, value_], name=nothing, dtype=nothing) + node = tf.TapeNode(assign_add_variable_op, [resource_, value_], name=nothing, dtype=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -19350,7 +19350,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function split_v_graph(value_, size_splits_, split_dim_; name=nothing, num_split=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function split_v_graph(value_, size_splits_, split_dim_; name=nothing, num_split=nothing) local desc tf.with_op_name(name, "SplitV") do desc = tf.NodeDescription("SplitV") @@ -19388,7 +19388,7 @@ begin desc["T"] = tf.data_type(value_) desc["Tlen"] = tf.data_type(size_splits_) res = tf.execute(desc) - node = tf.TapeNode(split_v, [value_, size_splits_, split_dim_], name=nothing, num_split=nothing) + node = tf.TapeNode(split_v, [value_, size_splits_, split_dim_], name=nothing, num_split=nothing, res) tf.add_node(res[1], node) return res end @@ -19408,7 +19408,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function assign_graph(ref_, value_; name=nothing, validate_shape=nothing, use_locking=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function assign_graph(ref_, value_; name=nothing, validate_shape=nothing, use_locking=nothing) local desc tf.with_op_name(name, "Assign") do desc = tf.NodeDescription("Assign") @@ -19441,7 +19441,7 @@ begin desc["T"] = tf.data_type(ref_) desc["T"] = tf.data_type(value_) res = tf.execute(desc) - node = tf.TapeNode(assign, [ref_, value_], name=nothing, validate_shape=nothing, use_locking=nothing) + node = tf.TapeNode(assign, [ref_, value_], name=nothing, validate_shape=nothing, use_locking=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -19461,7 +19461,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function max_pool_with_argmax_graph(input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function max_pool_with_argmax_graph(input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing) local desc tf.with_op_name(name, "MaxPoolWithArgmax") do desc = tf.NodeDescription("MaxPoolWithArgmax") @@ -19500,7 +19500,7 @@ begin end desc["T"] = tf.data_type(input_) res = tf.execute(desc) - node = tf.TapeNode(max_pool_with_argmax, [input_], name=nothing, ksize=nothing, strides=nothing, padding=nothing) + node = tf.TapeNode(max_pool_with_argmax, [input_], name=nothing, ksize=nothing, strides=nothing, padding=nothing, res) tf.add_node(res[1], node) return res end @@ -19520,7 +19520,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantized_relu_x_graph(features_, max_value_, min_features_, max_features_; name=nothing, out_type=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantized_relu_x_graph(features_, max_value_, min_features_, max_features_; name=nothing, out_type=nothing) local desc tf.with_op_name(name, "QuantizedReluX") do desc = tf.NodeDescription("QuantizedReluX") @@ -19559,7 +19559,7 @@ begin end desc["Tinput"] = tf.data_type(features_) res = tf.execute(desc) - node = tf.TapeNode(quantized_relu_x, [features_, max_value_, min_features_, max_features_], name=nothing, out_type=nothing) + node = tf.TapeNode(quantized_relu_x, [features_, max_value_, min_features_, max_features_], name=nothing, out_type=nothing, res) tf.add_node(res[1], node) return res end @@ -19579,7 +19579,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function random_shuffle_queue_graph(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, min_after_dequeue=nothing, seed=nothing, seed2=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function random_shuffle_queue_graph(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, min_after_dequeue=nothing, seed=nothing, seed2=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "RandomShuffleQueue") do desc = tf.NodeDescription("RandomShuffleQueue") @@ -19637,7 +19637,7 @@ begin desc["shared_name"] = Base.String(shared_name) end res = tf.execute(desc) - node = tf.TapeNode(random_shuffle_queue, [], name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, min_after_dequeue=nothing, seed=nothing, seed2=nothing, container=nothing, shared_name=nothing) + node = tf.TapeNode(random_shuffle_queue, [], name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, min_after_dequeue=nothing, seed=nothing, seed2=nothing, container=nothing, shared_name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -19657,7 +19657,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fft2d_graph(input_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fft2d_graph(input_; name=nothing) local desc tf.with_op_name(name, "FFT2D") do desc = tf.NodeDescription("FFT2D") @@ -19673,7 +19673,7 @@ begin tf.add_input(desc, input_) desc["Tcomplex"] = tf.data_type(input_) res = tf.execute(desc) - node = tf.TapeNode(fft2d, [input_], name=nothing) + node = tf.TapeNode(fft2d, [input_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -19693,7 +19693,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_thread_pool_dataset_graph(input_dataset_, thread_pool_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_thread_pool_dataset_graph(input_dataset_, thread_pool_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "ExperimentalThreadPoolDataset") do desc = tf.NodeDescription("ExperimentalThreadPoolDataset") @@ -19723,7 +19723,7 @@ begin desc["output_shapes"] = map(Base.identity, output_shapes) end res = tf.execute(desc) - node = tf.TapeNode(experimental_thread_pool_dataset, [input_dataset_, thread_pool_], name=nothing, output_types=nothing, output_shapes=nothing) + node = tf.TapeNode(experimental_thread_pool_dataset, [input_dataset_, thread_pool_], name=nothing, output_types=nothing, output_shapes=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -19743,7 +19743,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_directed_interleave_dataset_graph(selector_input_dataset_, data_input_datasets_; name=nothing, output_types=nothing, output_shapes=nothing, N=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_directed_interleave_dataset_graph(selector_input_dataset_, data_input_datasets_; name=nothing, output_types=nothing, output_shapes=nothing, N=nothing) local desc tf.with_op_name(name, "ExperimentalDirectedInterleaveDataset") do desc = tf.NodeDescription("ExperimentalDirectedInterleaveDataset") @@ -19779,7 +19779,7 @@ begin desc["N"] = Base.Int(N) end res = tf.execute(desc) - node = tf.TapeNode(experimental_directed_interleave_dataset, [selector_input_dataset_, data_input_datasets_], name=nothing, output_types=nothing, output_shapes=nothing, N=nothing) + node = tf.TapeNode(experimental_directed_interleave_dataset, [selector_input_dataset_, data_input_datasets_], name=nothing, output_types=nothing, output_shapes=nothing, N=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -19799,7 +19799,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_segment_sqrt_n_grad_graph(grad_, indices_, segment_ids_, output_dim0_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_segment_sqrt_n_grad_graph(grad_, indices_, segment_ids_, output_dim0_; name=nothing) local desc tf.with_op_name(name, "SparseSegmentSqrtNGrad") do desc = tf.NodeDescription("SparseSegmentSqrtNGrad") @@ -19830,7 +19830,7 @@ begin desc["T"] = tf.data_type(grad_) desc["Tidx"] = tf.data_type(indices_) res = tf.execute(desc) - node = tf.TapeNode(sparse_segment_sqrt_n_grad, [grad_, indices_, segment_ids_, output_dim0_], name=nothing) + node = tf.TapeNode(sparse_segment_sqrt_n_grad, [grad_, indices_, segment_ids_, output_dim0_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -19850,7 +19850,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function real_graph(input_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function real_graph(input_; name=nothing) local desc tf.with_op_name(name, "Real") do desc = tf.NodeDescription("Real") @@ -19866,7 +19866,7 @@ begin tf.add_input(desc, input_) desc["T"] = tf.data_type(input_) res = tf.execute(desc) - node = tf.TapeNode(real, [input_], name=nothing) + node = tf.TapeNode(real, [input_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -19886,7 +19886,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ordered_map_unstage_graph(key_, indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ordered_map_unstage_graph(key_, indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "OrderedMapUnstage") do desc = tf.NodeDescription("OrderedMapUnstage") @@ -19934,7 +19934,7 @@ begin desc["shared_name"] = Base.String(shared_name) end res = tf.execute(desc) - node = tf.TapeNode(ordered_map_unstage, [key_, indices_], name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + node = tf.TapeNode(ordered_map_unstage, [key_, indices_], name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -19954,7 +19954,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function rfft2d_graph(input_, fft_length_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function rfft2d_graph(input_, fft_length_; name=nothing) local desc tf.with_op_name(name, "RFFT2D") do desc = tf.NodeDescription("RFFT2D") @@ -19972,7 +19972,7 @@ begin tf.add_input(desc, input_) tf.add_input(desc, fft_length_) res = tf.execute(desc) - node = tf.TapeNode(rfft2d, [input_, fft_length_], name=nothing) + node = tf.TapeNode(rfft2d, [input_, fft_length_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -19992,7 +19992,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function var_is_initialized_op_graph(resource_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function var_is_initialized_op_graph(resource_; name=nothing) local desc tf.with_op_name(name, "VarIsInitializedOp") do desc = tf.NodeDescription("VarIsInitializedOp") @@ -20006,7 +20006,7 @@ begin resource_ = convert(tf.TensorHandle, resource_) tf.add_input(desc, resource_) res = tf.execute(desc) - node = tf.TapeNode(var_is_initialized_op, [resource_], name=nothing) + node = tf.TapeNode(var_is_initialized_op, [resource_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -20026,7 +20026,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function boosted_trees_quantile_stream_resource_handle_op_graph(; name=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function boosted_trees_quantile_stream_resource_handle_op_graph(; name=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "BoostedTreesQuantileStreamResourceHandleOp") do desc = tf.NodeDescription("BoostedTreesQuantileStreamResourceHandleOp") @@ -20048,7 +20048,7 @@ begin desc["shared_name"] = Base.String(shared_name) end res = tf.execute(desc) - node = tf.TapeNode(boosted_trees_quantile_stream_resource_handle_op, [], name=nothing, container=nothing, shared_name=nothing) + node = tf.TapeNode(boosted_trees_quantile_stream_resource_handle_op, [], name=nothing, container=nothing, shared_name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -20068,7 +20068,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function atan2_graph(y_, x_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function atan2_graph(y_, x_; name=nothing) local desc tf.with_op_name(name, "Atan2") do desc = tf.NodeDescription("Atan2") @@ -20089,7 +20089,7 @@ begin desc["T"] = tf.data_type(y_) desc["T"] = tf.data_type(x_) res = tf.execute(desc) - node = tf.TapeNode(atan2, [y_, x_], name=nothing) + node = tf.TapeNode(atan2, [y_, x_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -20109,7 +20109,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function random_poisson_graph(shape_, rate_; name=nothing, seed=nothing, seed2=nothing, S=nothing, dtype=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function random_poisson_graph(shape_, rate_; name=nothing, seed=nothing, seed2=nothing, S=nothing, dtype=nothing) local desc tf.with_op_name(name, "RandomPoisson") do desc = tf.NodeDescription("RandomPoisson") @@ -20155,7 +20155,7 @@ begin desc["S"] = tf.data_type(shape_) desc["dtype"] = tf.data_type(rate_) res = tf.execute(desc) - node = tf.TapeNode(random_poisson, [shape_, rate_], name=nothing, seed=nothing, seed2=nothing, S=nothing, dtype=nothing) + node = tf.TapeNode(random_poisson, [shape_, rate_], name=nothing, seed=nothing, seed2=nothing, S=nothing, dtype=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -20175,7 +20175,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function reverse_sequence_graph(input_, seq_lengths_; name=nothing, seq_dim=nothing, batch_dim=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function reverse_sequence_graph(input_, seq_lengths_; name=nothing, seq_dim=nothing, batch_dim=nothing) local desc tf.with_op_name(name, "ReverseSequence") do desc = tf.NodeDescription("ReverseSequence") @@ -20209,7 +20209,7 @@ begin desc["T"] = tf.data_type(input_) desc["Tlen"] = tf.data_type(seq_lengths_) res = tf.execute(desc) - node = tf.TapeNode(reverse_sequence, [input_, seq_lengths_], name=nothing, seq_dim=nothing, batch_dim=nothing) + node = tf.TapeNode(reverse_sequence, [input_, seq_lengths_], name=nothing, seq_dim=nothing, batch_dim=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -20229,7 +20229,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function outfeed_enqueue_graph(input_; name=nothing, dtype=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function outfeed_enqueue_graph(input_; name=nothing, dtype=nothing) local desc tf.with_op_name(name, "OutfeedEnqueue") do desc = tf.NodeDescription("OutfeedEnqueue") @@ -20251,7 +20251,7 @@ begin end desc["dtype"] = tf.data_type(input_) res = tf.execute(desc) - node = tf.TapeNode(outfeed_enqueue, [input_], name=nothing, dtype=nothing) + node = tf.TapeNode(outfeed_enqueue, [input_], name=nothing, dtype=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -20271,7 +20271,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sub_graph(x_, y_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sub_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "Sub") do desc = tf.NodeDescription("Sub") @@ -20292,7 +20292,7 @@ begin desc["T"] = tf.data_type(x_) desc["T"] = tf.data_type(y_) res = tf.execute(desc) - node = tf.TapeNode(sub, [x_, y_], name=nothing) + node = tf.TapeNode(sub, [x_, y_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -20312,7 +20312,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function string_split_graph(input_, delimiter_; name=nothing, skip_empty=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function string_split_graph(input_, delimiter_; name=nothing, skip_empty=nothing) local desc tf.with_op_name(name, "StringSplit") do desc = tf.NodeDescription("StringSplit") @@ -20341,7 +20341,7 @@ begin desc["skip_empty"] = Base.Bool(skip_empty) end res = tf.execute(desc) - node = tf.TapeNode(string_split, [input_, delimiter_], name=nothing, skip_empty=nothing) + node = tf.TapeNode(string_split, [input_, delimiter_], name=nothing, skip_empty=nothing, res) tf.add_node(res[1], node) return res end @@ -20361,7 +20361,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function cumprod_graph(x_, axis_; name=nothing, exclusive=nothing, reverse=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function cumprod_graph(x_, axis_; name=nothing, exclusive=nothing, reverse=nothing) local desc tf.with_op_name(name, "Cumprod") do desc = tf.NodeDescription("Cumprod") @@ -20396,7 +20396,7 @@ begin desc["T"] = tf.data_type(x_) desc["Tidx"] = tf.data_type(axis_) res = tf.execute(desc) - node = tf.TapeNode(cumprod, [x_, axis_], name=nothing, exclusive=nothing, reverse=nothing) + node = tf.TapeNode(cumprod, [x_, axis_], name=nothing, exclusive=nothing, reverse=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -20416,7 +20416,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantized_resize_bilinear_graph(images_, size_, min_, max_; name=nothing, align_corners=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantized_resize_bilinear_graph(images_, size_, min_, max_; name=nothing, align_corners=nothing) local desc tf.with_op_name(name, "QuantizedResizeBilinear") do desc = tf.NodeDescription("QuantizedResizeBilinear") @@ -20455,7 +20455,7 @@ begin end desc["T"] = tf.data_type(images_) res = tf.execute(desc) - node = tf.TapeNode(quantized_resize_bilinear, [images_, size_, min_, max_], name=nothing, align_corners=nothing) + node = tf.TapeNode(quantized_resize_bilinear, [images_, size_, min_, max_], name=nothing, align_corners=nothing, res) tf.add_node(res[1], node) return res end @@ -20475,7 +20475,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function parse_single_example_graph(serialized_, dense_defaults_; name=nothing, num_sparse=nothing, sparse_keys=nothing, dense_keys=nothing, sparse_types=nothing, Tdense=nothing, dense_shapes=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function parse_single_example_graph(serialized_, dense_defaults_; name=nothing, num_sparse=nothing, sparse_keys=nothing, dense_keys=nothing, sparse_types=nothing, Tdense=nothing, dense_shapes=nothing) local desc tf.with_op_name(name, "ParseSingleExample") do desc = tf.NodeDescription("ParseSingleExample") @@ -20534,7 +20534,7 @@ begin desc["dense_shapes"] = map(Base.identity, dense_shapes) end res = tf.execute(desc) - node = tf.TapeNode(parse_single_example, [serialized_, dense_defaults_], name=nothing, num_sparse=nothing, sparse_keys=nothing, dense_keys=nothing, sparse_types=nothing, Tdense=nothing, dense_shapes=nothing) + node = tf.TapeNode(parse_single_example, [serialized_, dense_defaults_], name=nothing, num_sparse=nothing, sparse_keys=nothing, dense_keys=nothing, sparse_types=nothing, Tdense=nothing, dense_shapes=nothing, res) tf.add_node(res[1], node) return res end @@ -20554,7 +20554,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function is_variable_initialized_graph(ref_; name=nothing, dtype=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function is_variable_initialized_graph(ref_; name=nothing, dtype=nothing) local desc tf.with_op_name(name, "IsVariableInitialized") do desc = tf.NodeDescription("IsVariableInitialized") @@ -20576,7 +20576,7 @@ begin end desc["dtype"] = tf.data_type(ref_) res = tf.execute(desc) - node = tf.TapeNode(is_variable_initialized, [ref_], name=nothing, dtype=nothing) + node = tf.TapeNode(is_variable_initialized, [ref_], name=nothing, dtype=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -20596,7 +20596,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_stats_aggregator_handle_graph(; name=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_stats_aggregator_handle_graph(; name=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "ExperimentalStatsAggregatorHandle") do desc = tf.NodeDescription("ExperimentalStatsAggregatorHandle") @@ -20618,7 +20618,7 @@ begin desc["shared_name"] = Base.String(shared_name) end res = tf.execute(desc) - node = tf.TapeNode(experimental_stats_aggregator_handle, [], name=nothing, container=nothing, shared_name=nothing) + node = tf.TapeNode(experimental_stats_aggregator_handle, [], name=nothing, container=nothing, shared_name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -20638,7 +20638,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_list_concat_v2_graph(input_handle_, element_shape_, leading_dims_; name=nothing, element_dtype=nothing, shape_type=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_list_concat_v2_graph(input_handle_, element_shape_, leading_dims_; name=nothing, element_dtype=nothing, shape_type=nothing) local desc tf.with_op_name(name, "TensorListConcatV2") do desc = tf.NodeDescription("TensorListConcatV2") @@ -20679,7 +20679,7 @@ begin end desc["shape_type"] = tf.data_type(element_shape_) res = tf.execute(desc) - node = tf.TapeNode(tensor_list_concat_v2, [input_handle_, element_shape_, leading_dims_], name=nothing, element_dtype=nothing, shape_type=nothing) + node = tf.TapeNode(tensor_list_concat_v2, [input_handle_, element_shape_, leading_dims_], name=nothing, element_dtype=nothing, shape_type=nothing, res) tf.add_node(res[1], node) return res end @@ -20699,7 +20699,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function cudnn_rnnv2_graph(input_, input_h_, input_c_, params_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing, is_training=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function cudnn_rnnv2_graph(input_, input_h_, input_c_, params_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing, is_training=nothing) local desc tf.with_op_name(name, "CudnnRNNV2") do desc = tf.NodeDescription("CudnnRNNV2") @@ -20777,7 +20777,7 @@ begin desc["T"] = tf.data_type(input_c_) desc["T"] = tf.data_type(params_) res = tf.execute(desc) - node = tf.TapeNode(cudnn_rnnv2, [input_, input_h_, input_c_, params_], name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing, is_training=nothing) + node = tf.TapeNode(cudnn_rnnv2, [input_, input_h_, input_c_, params_], name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing, is_training=nothing, res) tf.add_node(res[1], node) return res end @@ -20797,7 +20797,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_scatter_sub_graph(resource_, indices_, updates_; name=nothing, dtype=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_scatter_sub_graph(resource_, indices_, updates_; name=nothing, dtype=nothing) local desc tf.with_op_name(name, "ResourceScatterSub") do desc = tf.NodeDescription("ResourceScatterSub") @@ -20830,7 +20830,7 @@ begin desc["Tindices"] = tf.data_type(indices_) desc["dtype"] = tf.data_type(updates_) res = tf.execute(desc) - node = tf.TapeNode(resource_scatter_sub, [resource_, indices_, updates_], name=nothing, dtype=nothing) + node = tf.TapeNode(resource_scatter_sub, [resource_, indices_, updates_], name=nothing, dtype=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -20850,7 +20850,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function assign_add_graph(ref_, value_; name=nothing, use_locking=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function assign_add_graph(ref_, value_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "AssignAdd") do desc = tf.NodeDescription("AssignAdd") @@ -20877,7 +20877,7 @@ begin desc["T"] = tf.data_type(ref_) desc["T"] = tf.data_type(value_) res = tf.execute(desc) - node = tf.TapeNode(assign_add, [ref_, value_], name=nothing, use_locking=nothing) + node = tf.TapeNode(assign_add, [ref_, value_], name=nothing, use_locking=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -20897,7 +20897,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_dataset_graph(components_; name=nothing, Toutput_types=nothing, output_shapes=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_dataset_graph(components_; name=nothing, Toutput_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "TensorDataset") do desc = tf.NodeDescription("TensorDataset") @@ -20923,7 +20923,7 @@ begin desc["output_shapes"] = map(Base.identity, output_shapes) end res = tf.execute(desc) - node = tf.TapeNode(tensor_dataset, [components_], name=nothing, Toutput_types=nothing, output_shapes=nothing) + node = tf.TapeNode(tensor_dataset, [components_], name=nothing, Toutput_types=nothing, output_shapes=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -20943,7 +20943,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function bucketize_graph(input_; name=nothing, boundaries=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function bucketize_graph(input_; name=nothing, boundaries=nothing) local desc tf.with_op_name(name, "Bucketize") do desc = tf.NodeDescription("Bucketize") @@ -20965,7 +20965,7 @@ begin end desc["T"] = tf.data_type(input_) res = tf.execute(desc) - node = tf.TapeNode(bucketize, [input_], name=nothing, boundaries=nothing) + node = tf.TapeNode(bucketize, [input_], name=nothing, boundaries=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -20985,7 +20985,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_reduce_max_graph(input_indices_, input_values_, input_shape_, reduction_axes_; name=nothing, keep_dims=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_reduce_max_graph(input_indices_, input_values_, input_shape_, reduction_axes_; name=nothing, keep_dims=nothing) local desc tf.with_op_name(name, "SparseReduceMax") do desc = tf.NodeDescription("SparseReduceMax") @@ -21019,7 +21019,7 @@ begin end desc["T"] = tf.data_type(input_values_) res = tf.execute(desc) - node = tf.TapeNode(sparse_reduce_max, [input_indices_, input_values_, input_shape_, reduction_axes_], name=nothing, keep_dims=nothing) + node = tf.TapeNode(sparse_reduce_max, [input_indices_, input_values_, input_shape_, reduction_axes_], name=nothing, keep_dims=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -21039,7 +21039,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function retrieve_tpu_embedding_mdl_adagrad_light_parameters_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function retrieve_tpu_embedding_mdl_adagrad_light_parameters_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "RetrieveTPUEmbeddingMDLAdagradLightParameters") do desc = tf.NodeDescription("RetrieveTPUEmbeddingMDLAdagradLightParameters") @@ -21078,7 +21078,7 @@ begin desc["shard_id"] = Base.Int(shard_id) end res = tf.execute(desc) - node = tf.TapeNode(retrieve_tpu_embedding_mdl_adagrad_light_parameters, [], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + node = tf.TapeNode(retrieve_tpu_embedding_mdl_adagrad_light_parameters, [], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res) tf.add_node(res[1], node) return res end @@ -21098,7 +21098,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_grad_with_shape_graph(handle_, flow_in_, shape_to_prepend_; name=nothing, source=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_grad_with_shape_graph(handle_, flow_in_, shape_to_prepend_; name=nothing, source=nothing) local desc tf.with_op_name(name, "TensorArrayGradWithShape") do desc = tf.NodeDescription("TensorArrayGradWithShape") @@ -21131,7 +21131,7 @@ begin desc["source"] = Base.String(source) end res = tf.execute(desc) - node = tf.TapeNode(tensor_array_grad_with_shape, [handle_, flow_in_, shape_to_prepend_], name=nothing, source=nothing) + node = tf.TapeNode(tensor_array_grad_with_shape, [handle_, flow_in_, shape_to_prepend_], name=nothing, source=nothing, res) tf.add_node(res[1], node) return res end @@ -21151,7 +21151,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_close_v3_graph(handle_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_close_v3_graph(handle_; name=nothing) local desc tf.with_op_name(name, "TensorArrayCloseV3") do desc = tf.NodeDescription("TensorArrayCloseV3") @@ -21165,7 +21165,7 @@ begin handle_ = convert(tf.TensorHandle, handle_) tf.add_input(desc, handle_) res = tf.execute(desc) - node = tf.TapeNode(tensor_array_close_v3, [handle_], name=nothing) + node = tf.TapeNode(tensor_array_close_v3, [handle_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -21185,7 +21185,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function non_max_suppression_with_overlaps_graph(overlaps_, scores_, max_output_size_, overlap_threshold_, score_threshold_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function non_max_suppression_with_overlaps_graph(overlaps_, scores_, max_output_size_, overlap_threshold_, score_threshold_; name=nothing) local desc tf.with_op_name(name, "NonMaxSuppressionWithOverlaps") do desc = tf.NodeDescription("NonMaxSuppressionWithOverlaps") @@ -21215,7 +21215,7 @@ begin tf.add_input(desc, overlap_threshold_) tf.add_input(desc, score_threshold_) res = tf.execute(desc) - node = tf.TapeNode(non_max_suppression_with_overlaps, [overlaps_, scores_, max_output_size_, overlap_threshold_, score_threshold_], name=nothing) + node = tf.TapeNode(non_max_suppression_with_overlaps, [overlaps_, scores_, max_output_size_, overlap_threshold_, score_threshold_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -21235,7 +21235,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function pack_graph(values_; name=nothing, N=nothing, axis=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function pack_graph(values_; name=nothing, N=nothing, axis=nothing) local desc tf.with_op_name(name, "Pack") do desc = tf.NodeDescription("Pack") @@ -21269,7 +21269,7 @@ begin end desc["T"] = tf.data_type(values_) res = tf.execute(desc) - node = tf.TapeNode(pack, [values_], name=nothing, N=nothing, axis=nothing) + node = tf.TapeNode(pack, [values_], name=nothing, N=nothing, axis=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -21289,7 +21289,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_grad_v2_graph(handle_, flow_in_; name=nothing, source=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_grad_v2_graph(handle_, flow_in_; name=nothing, source=nothing) local desc tf.with_op_name(name, "TensorArrayGradV2") do desc = tf.NodeDescription("TensorArrayGradV2") @@ -21313,7 +21313,7 @@ begin desc["source"] = Base.String(source) end res = tf.execute(desc) - node = tf.TapeNode(tensor_array_grad_v2, [handle_, flow_in_], name=nothing, source=nothing) + node = tf.TapeNode(tensor_array_grad_v2, [handle_, flow_in_], name=nothing, source=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -21333,7 +21333,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function assign_sub_variable_op_graph(resource_, value_; name=nothing, dtype=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function assign_sub_variable_op_graph(resource_, value_; name=nothing, dtype=nothing) local desc tf.with_op_name(name, "AssignSubVariableOp") do desc = tf.NodeDescription("AssignSubVariableOp") @@ -21359,7 +21359,7 @@ begin end desc["dtype"] = tf.data_type(value_) res = tf.execute(desc) - node = tf.TapeNode(assign_sub_variable_op, [resource_, value_], name=nothing, dtype=nothing) + node = tf.TapeNode(assign_sub_variable_op, [resource_, value_], name=nothing, dtype=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -21379,7 +21379,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_fft2d_graph(input_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_fft2d_graph(input_; name=nothing) local desc tf.with_op_name(name, "BatchFFT2D") do desc = tf.NodeDescription("BatchFFT2D") @@ -21393,7 +21393,7 @@ begin input_ = convert(tf.TensorHandle, input_) tf.add_input(desc, input_) res = tf.execute(desc) - node = tf.TapeNode(batch_fft2d, [input_], name=nothing) + node = tf.TapeNode(batch_fft2d, [input_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -21413,7 +21413,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function close_summary_writer_graph(writer_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function close_summary_writer_graph(writer_; name=nothing) local desc tf.with_op_name(name, "CloseSummaryWriter") do desc = tf.NodeDescription("CloseSummaryWriter") @@ -21427,7 +21427,7 @@ begin writer_ = convert(tf.TensorHandle, writer_) tf.add_input(desc, writer_) res = tf.execute(desc) - node = tf.TapeNode(close_summary_writer, [writer_], name=nothing) + node = tf.TapeNode(close_summary_writer, [writer_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -21447,7 +21447,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function rank_graph(input_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function rank_graph(input_; name=nothing) local desc tf.with_op_name(name, "Rank") do desc = tf.NodeDescription("Rank") @@ -21463,7 +21463,7 @@ begin tf.add_input(desc, input_) desc["T"] = tf.data_type(input_) res = tf.execute(desc) - node = tf.TapeNode(rank, [input_], name=nothing) + node = tf.TapeNode(rank, [input_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -21483,7 +21483,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fft3d_graph(input_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fft3d_graph(input_; name=nothing) local desc tf.with_op_name(name, "FFT3D") do desc = tf.NodeDescription("FFT3D") @@ -21499,7 +21499,7 @@ begin tf.add_input(desc, input_) desc["Tcomplex"] = tf.data_type(input_) res = tf.execute(desc) - node = tf.TapeNode(fft3d, [input_], name=nothing) + node = tf.TapeNode(fft3d, [input_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -21519,7 +21519,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function apply_ftrl_graph(var_, accum_, linear_, grad_, lr_, l1_, l2_, lr_power_; name=nothing, use_locking=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function apply_ftrl_graph(var_, accum_, linear_, grad_, lr_, l1_, l2_, lr_power_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ApplyFtrl") do desc = tf.NodeDescription("ApplyFtrl") @@ -21576,7 +21576,7 @@ begin desc["T"] = tf.data_type(l2_) desc["T"] = tf.data_type(lr_power_) res = tf.execute(desc) - node = tf.TapeNode(apply_ftrl, [var_, accum_, linear_, grad_, lr_, l1_, l2_, lr_power_], name=nothing, use_locking=nothing) + node = tf.TapeNode(apply_ftrl, [var_, accum_, linear_, grad_, lr_, l1_, l2_, lr_power_], name=nothing, use_locking=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -21596,7 +21596,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function abort_graph(; name=nothing, error_msg=nothing, exit_without_error=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function abort_graph(; name=nothing, error_msg=nothing, exit_without_error=nothing) local desc tf.with_op_name(name, "Abort") do desc = tf.NodeDescription("Abort") @@ -21618,7 +21618,7 @@ begin desc["exit_without_error"] = Base.Bool(exit_without_error) end res = tf.execute(desc) - node = tf.TapeNode(abort, [], name=nothing, error_msg=nothing, exit_without_error=nothing) + node = tf.TapeNode(abort, [], name=nothing, error_msg=nothing, exit_without_error=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -21638,7 +21638,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function audio_spectrogram_graph(input_; name=nothing, window_size=nothing, stride=nothing, magnitude_squared=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function audio_spectrogram_graph(input_; name=nothing, window_size=nothing, stride=nothing, magnitude_squared=nothing) local desc tf.with_op_name(name, "AudioSpectrogram") do desc = tf.NodeDescription("AudioSpectrogram") @@ -21670,7 +21670,7 @@ begin desc["magnitude_squared"] = Base.Bool(magnitude_squared) end res = tf.execute(desc) - node = tf.TapeNode(audio_spectrogram, [input_], name=nothing, window_size=nothing, stride=nothing, magnitude_squared=nothing) + node = tf.TapeNode(audio_spectrogram, [input_], name=nothing, window_size=nothing, stride=nothing, magnitude_squared=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -21690,7 +21690,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function variable_shape_graph(input_; name=nothing, out_type=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function variable_shape_graph(input_; name=nothing, out_type=nothing) local desc tf.with_op_name(name, "VariableShape") do desc = tf.NodeDescription("VariableShape") @@ -21710,7 +21710,7 @@ begin desc["out_type"] = Base.identity(out_type) end res = tf.execute(desc) - node = tf.TapeNode(variable_shape, [input_], name=nothing, out_type=nothing) + node = tf.TapeNode(variable_shape, [input_], name=nothing, out_type=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -21730,7 +21730,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fifo_queue_v2_graph(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fifo_queue_v2_graph(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "FIFOQueueV2") do desc = tf.NodeDescription("FIFOQueueV2") @@ -21770,7 +21770,7 @@ begin desc["shared_name"] = Base.String(shared_name) end res = tf.execute(desc) - node = tf.TapeNode(fifo_queue_v2, [], name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) + node = tf.TapeNode(fifo_queue_v2, [], name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -21790,7 +21790,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function variable_graph(; name=nothing, shape=nothing, dtype=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function variable_graph(; name=nothing, shape=nothing, dtype=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "Variable") do desc = tf.NodeDescription("Variable") @@ -21824,7 +21824,7 @@ begin desc["shared_name"] = Base.String(shared_name) end res = tf.execute(desc) - node = tf.TapeNode(variable, [], name=nothing, shape=nothing, dtype=nothing, container=nothing, shared_name=nothing) + node = tf.TapeNode(variable, [], name=nothing, shape=nothing, dtype=nothing, container=nothing, shared_name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -21844,7 +21844,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_forest_create_tree_variable_graph(tree_handle_, tree_config_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_forest_create_tree_variable_graph(tree_handle_, tree_config_; name=nothing) local desc tf.with_op_name(name, "TensorForestCreateTreeVariable") do desc = tf.NodeDescription("TensorForestCreateTreeVariable") @@ -21862,7 +21862,7 @@ begin tf.add_input(desc, tree_handle_) tf.add_input(desc, tree_config_) res = tf.execute(desc) - node = tf.TapeNode(tensor_forest_create_tree_variable, [tree_handle_, tree_config_], name=nothing) + node = tf.TapeNode(tensor_forest_create_tree_variable, [tree_handle_, tree_config_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -21882,7 +21882,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function max_pool_grad_with_argmax_graph(input_, grad_, argmax_; name=nothing, ksize=nothing, strides=nothing, padding=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function max_pool_grad_with_argmax_graph(input_, grad_, argmax_; name=nothing, ksize=nothing, strides=nothing, padding=nothing) local desc tf.with_op_name(name, "MaxPoolGradWithArgmax") do desc = tf.NodeDescription("MaxPoolGradWithArgmax") @@ -21927,7 +21927,7 @@ begin desc["T"] = tf.data_type(grad_) desc["Targmax"] = tf.data_type(argmax_) res = tf.execute(desc) - node = tf.TapeNode(max_pool_grad_with_argmax, [input_, grad_, argmax_], name=nothing, ksize=nothing, strides=nothing, padding=nothing) + node = tf.TapeNode(max_pool_grad_with_argmax, [input_, grad_, argmax_], name=nothing, ksize=nothing, strides=nothing, padding=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -21947,7 +21947,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ref_switch_graph(data_, pred_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ref_switch_graph(data_, pred_; name=nothing) local desc tf.with_op_name(name, "RefSwitch") do desc = tf.NodeDescription("RefSwitch") @@ -21972,7 +21972,7 @@ begin tf.add_input(desc, pred_) desc["T"] = tf.data_type(data_) res = tf.execute(desc) - node = tf.TapeNode(ref_switch, [data_, pred_], name=nothing) + node = tf.TapeNode(ref_switch, [data_, pred_], name=nothing, res) tf.add_node(res[1], node) return res end @@ -21992,7 +21992,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sdca_fprint_graph(input_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sdca_fprint_graph(input_; name=nothing) local desc tf.with_op_name(name, "SdcaFprint") do desc = tf.NodeDescription("SdcaFprint") @@ -22006,7 +22006,7 @@ begin input_ = convert(tf.TensorHandle, input_) tf.add_input(desc, input_) res = tf.execute(desc) - node = tf.TapeNode(sdca_fprint, [input_], name=nothing) + node = tf.TapeNode(sdca_fprint, [input_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -22026,7 +22026,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_choose_fastest_dataset_graph(input_datasets_; name=nothing, N=nothing, num_experiments=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_choose_fastest_dataset_graph(input_datasets_; name=nothing, N=nothing, num_experiments=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "ExperimentalChooseFastestDataset") do desc = tf.NodeDescription("ExperimentalChooseFastestDataset") @@ -22064,7 +22064,7 @@ begin desc["output_shapes"] = map(Base.identity, output_shapes) end res = tf.execute(desc) - node = tf.TapeNode(experimental_choose_fastest_dataset, [input_datasets_], name=nothing, N=nothing, num_experiments=nothing, output_types=nothing, output_shapes=nothing) + node = tf.TapeNode(experimental_choose_fastest_dataset, [input_datasets_], name=nothing, N=nothing, num_experiments=nothing, output_types=nothing, output_shapes=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -22084,7 +22084,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function leaky_relu_graph(features_; name=nothing, alpha=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function leaky_relu_graph(features_; name=nothing, alpha=nothing) local desc tf.with_op_name(name, "LeakyRelu") do desc = tf.NodeDescription("LeakyRelu") @@ -22106,7 +22106,7 @@ begin end desc["T"] = tf.data_type(features_) res = tf.execute(desc) - node = tf.TapeNode(leaky_relu, [features_], name=nothing, alpha=nothing) + node = tf.TapeNode(leaky_relu, [features_], name=nothing, alpha=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -22126,7 +22126,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function identity_n_graph(input_; name=nothing, T=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function identity_n_graph(input_; name=nothing, T=nothing) local desc tf.with_op_name(name, "IdentityN") do desc = tf.NodeDescription("IdentityN") @@ -22146,7 +22146,7 @@ begin desc["T"] = map(Base.identity, T) end res = tf.execute(desc) - node = tf.TapeNode(identity_n, [input_], name=nothing, T=nothing) + node = tf.TapeNode(identity_n, [input_], name=nothing, T=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -22166,7 +22166,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function cudnn_rnn_backprop_v2_graph(input_, input_h_, input_c_, params_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_, host_reserved_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function cudnn_rnn_backprop_v2_graph(input_, input_h_, input_c_, params_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_, host_reserved_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) local desc tf.with_op_name(name, "CudnnRNNBackpropV2") do desc = tf.NodeDescription("CudnnRNNBackpropV2") @@ -22277,7 +22277,7 @@ begin desc["T"] = tf.data_type(output_c_backprop_) desc["T"] = tf.data_type(reserve_space_) res = tf.execute(desc) - node = tf.TapeNode(cudnn_rnn_backprop_v2, [input_, input_h_, input_c_, params_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_, host_reserved_], name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) + node = tf.TapeNode(cudnn_rnn_backprop_v2, [input_, input_h_, input_c_, params_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_, host_reserved_], name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing, res) tf.add_node(res[1], node) return res end @@ -22297,7 +22297,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function requantization_range_graph(input_, input_min_, input_max_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function requantization_range_graph(input_, input_min_, input_max_; name=nothing) local desc tf.with_op_name(name, "RequantizationRange") do desc = tf.NodeDescription("RequantizationRange") @@ -22326,7 +22326,7 @@ begin tf.add_input(desc, input_max_) desc["Tinput"] = tf.data_type(input_) res = tf.execute(desc) - node = tf.TapeNode(requantization_range, [input_, input_min_, input_max_], name=nothing) + node = tf.TapeNode(requantization_range, [input_, input_min_, input_max_], name=nothing, res) tf.add_node(res[1], node) return res end @@ -22346,7 +22346,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function maximum_graph(x_, y_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function maximum_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "Maximum") do desc = tf.NodeDescription("Maximum") @@ -22367,7 +22367,7 @@ begin desc["T"] = tf.data_type(x_) desc["T"] = tf.data_type(y_) res = tf.execute(desc) - node = tf.TapeNode(maximum, [x_, y_], name=nothing) + node = tf.TapeNode(maximum, [x_, y_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -22387,7 +22387,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function reshape_graph(tensor_, shape_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function reshape_graph(tensor_, shape_; name=nothing) local desc tf.with_op_name(name, "Reshape") do desc = tf.NodeDescription("Reshape") @@ -22409,7 +22409,7 @@ begin desc["T"] = tf.data_type(tensor_) desc["Tshape"] = tf.data_type(shape_) res = tf.execute(desc) - node = tf.TapeNode(reshape, [tensor_, shape_], name=nothing) + node = tf.TapeNode(reshape, [tensor_, shape_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -22429,7 +22429,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function matrix_solve_ls_graph(matrix_, rhs_, l2_regularizer_; name=nothing, fast=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function matrix_solve_ls_graph(matrix_, rhs_, l2_regularizer_; name=nothing, fast=nothing) local desc tf.with_op_name(name, "MatrixSolveLs") do desc = tf.NodeDescription("MatrixSolveLs") @@ -22460,7 +22460,7 @@ begin desc["T"] = tf.data_type(matrix_) desc["T"] = tf.data_type(rhs_) res = tf.execute(desc) - node = tf.TapeNode(matrix_solve_ls, [matrix_, rhs_, l2_regularizer_], name=nothing, fast=nothing) + node = tf.TapeNode(matrix_solve_ls, [matrix_, rhs_, l2_regularizer_], name=nothing, fast=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -22480,7 +22480,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tf_record_dataset_graph(filenames_, compression_type_, buffer_size_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tf_record_dataset_graph(filenames_, compression_type_, buffer_size_; name=nothing) local desc tf.with_op_name(name, "TFRecordDataset") do desc = tf.NodeDescription("TFRecordDataset") @@ -22502,7 +22502,7 @@ begin tf.add_input(desc, compression_type_) tf.add_input(desc, buffer_size_) res = tf.execute(desc) - node = tf.TapeNode(tf_record_dataset, [filenames_, compression_type_, buffer_size_], name=nothing) + node = tf.TapeNode(tf_record_dataset, [filenames_, compression_type_, buffer_size_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -22522,7 +22522,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function boosted_trees_example_debug_outputs_graph(tree_ensemble_handle_, bucketized_features_; name=nothing, num_bucketized_features=nothing, logits_dimension=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function boosted_trees_example_debug_outputs_graph(tree_ensemble_handle_, bucketized_features_; name=nothing, num_bucketized_features=nothing, logits_dimension=nothing) local desc tf.with_op_name(name, "BoostedTreesExampleDebugOutputs") do desc = tf.NodeDescription("BoostedTreesExampleDebugOutputs") @@ -22552,7 +22552,7 @@ begin desc["logits_dimension"] = Base.Int(logits_dimension) end res = tf.execute(desc) - node = tf.TapeNode(boosted_trees_example_debug_outputs, [tree_ensemble_handle_, bucketized_features_], name=nothing, num_bucketized_features=nothing, logits_dimension=nothing) + node = tf.TapeNode(boosted_trees_example_debug_outputs, [tree_ensemble_handle_, bucketized_features_], name=nothing, num_bucketized_features=nothing, logits_dimension=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -22572,7 +22572,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function hsv_to_rgb_graph(images_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function hsv_to_rgb_graph(images_; name=nothing) local desc tf.with_op_name(name, "HSVToRGB") do desc = tf.NodeDescription("HSVToRGB") @@ -22588,7 +22588,7 @@ begin tf.add_input(desc, images_) desc["T"] = tf.data_type(images_) res = tf.execute(desc) - node = tf.TapeNode(hsv_to_rgb, [images_], name=nothing) + node = tf.TapeNode(hsv_to_rgb, [images_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -22608,7 +22608,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_max_intra_op_parallelism_dataset_graph(input_dataset_, max_intra_op_parallelism_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_max_intra_op_parallelism_dataset_graph(input_dataset_, max_intra_op_parallelism_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "ExperimentalMaxIntraOpParallelismDataset") do desc = tf.NodeDescription("ExperimentalMaxIntraOpParallelismDataset") @@ -22638,7 +22638,7 @@ begin desc["output_shapes"] = map(Base.identity, output_shapes) end res = tf.execute(desc) - node = tf.TapeNode(experimental_max_intra_op_parallelism_dataset, [input_dataset_, max_intra_op_parallelism_], name=nothing, output_types=nothing, output_shapes=nothing) + node = tf.TapeNode(experimental_max_intra_op_parallelism_dataset, [input_dataset_, max_intra_op_parallelism_], name=nothing, output_types=nothing, output_shapes=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -22658,7 +22658,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function scatter_div_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function scatter_div_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ScatterDiv") do desc = tf.NodeDescription("ScatterDiv") @@ -22692,7 +22692,7 @@ begin desc["Tindices"] = tf.data_type(indices_) desc["T"] = tf.data_type(updates_) res = tf.execute(desc) - node = tf.TapeNode(scatter_div, [ref_, indices_, updates_], name=nothing, use_locking=nothing) + node = tf.TapeNode(scatter_div, [ref_, indices_, updates_], name=nothing, use_locking=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -22712,7 +22712,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function decode_wav_graph(contents_; name=nothing, desired_channels=nothing, desired_samples=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function decode_wav_graph(contents_; name=nothing, desired_channels=nothing, desired_samples=nothing) local desc tf.with_op_name(name, "DecodeWav") do desc = tf.NodeDescription("DecodeWav") @@ -22743,7 +22743,7 @@ begin desc["desired_samples"] = Base.Int(desired_samples) end res = tf.execute(desc) - node = tf.TapeNode(decode_wav, [contents_], name=nothing, desired_channels=nothing, desired_samples=nothing) + node = tf.TapeNode(decode_wav, [contents_], name=nothing, desired_channels=nothing, desired_samples=nothing, res) tf.add_node(res[1], node) return res end @@ -22763,7 +22763,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function log_graph(x_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function log_graph(x_; name=nothing) local desc tf.with_op_name(name, "Log") do desc = tf.NodeDescription("Log") @@ -22779,7 +22779,7 @@ begin tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) res = tf.execute(desc) - node = tf.TapeNode(log, [x_], name=nothing) + node = tf.TapeNode(log, [x_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -22799,7 +22799,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function save_v2_graph(prefix_, tensor_names_, shape_and_slices_, tensors_; name=nothing, dtypes=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function save_v2_graph(prefix_, tensor_names_, shape_and_slices_, tensors_; name=nothing, dtypes=nothing) local desc tf.with_op_name(name, "SaveV2") do desc = tf.NodeDescription("SaveV2") @@ -22831,7 +22831,7 @@ begin desc["dtypes"] = map(Base.identity, dtypes) end res = tf.execute(desc) - node = tf.TapeNode(save_v2, [prefix_, tensor_names_, shape_and_slices_, tensors_], name=nothing, dtypes=nothing) + node = tf.TapeNode(save_v2, [prefix_, tensor_names_, shape_and_slices_, tensors_], name=nothing, dtypes=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -22851,7 +22851,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function deep_copy_graph(x_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function deep_copy_graph(x_; name=nothing) local desc tf.with_op_name(name, "DeepCopy") do desc = tf.NodeDescription("DeepCopy") @@ -22867,7 +22867,7 @@ begin tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) res = tf.execute(desc) - node = tf.TapeNode(deep_copy, [x_], name=nothing) + node = tf.TapeNode(deep_copy, [x_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -22887,7 +22887,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function model_dataset_graph(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function model_dataset_graph(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "ModelDataset") do desc = tf.NodeDescription("ModelDataset") @@ -22913,7 +22913,7 @@ begin desc["output_shapes"] = map(Base.identity, output_shapes) end res = tf.execute(desc) - node = tf.TapeNode(model_dataset, [input_dataset_], name=nothing, output_types=nothing, output_shapes=nothing) + node = tf.TapeNode(model_dataset, [input_dataset_], name=nothing, output_types=nothing, output_shapes=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -22933,7 +22933,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function parse_sequence_example_graph(serialized_, debug_name_, context_dense_defaults_; name=nothing, feature_list_dense_missing_assumed_empty=nothing, context_sparse_keys=nothing, context_dense_keys=nothing, feature_list_sparse_keys=nothing, feature_list_dense_keys=nothing, Ncontext_sparse=nothing, Ncontext_dense=nothing, Nfeature_list_sparse=nothing, Nfeature_list_dense=nothing, context_sparse_types=nothing, Tcontext_dense=nothing, feature_list_dense_types=nothing, context_dense_shapes=nothing, feature_list_sparse_types=nothing, feature_list_dense_shapes=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function parse_sequence_example_graph(serialized_, debug_name_, context_dense_defaults_; name=nothing, feature_list_dense_missing_assumed_empty=nothing, context_sparse_keys=nothing, context_dense_keys=nothing, feature_list_sparse_keys=nothing, feature_list_dense_keys=nothing, Ncontext_sparse=nothing, Ncontext_dense=nothing, Nfeature_list_sparse=nothing, Nfeature_list_dense=nothing, context_sparse_types=nothing, Tcontext_dense=nothing, feature_list_dense_types=nothing, context_dense_shapes=nothing, feature_list_sparse_types=nothing, feature_list_dense_shapes=nothing) local desc tf.with_op_name(name, "ParseSequenceExample") do desc = tf.NodeDescription("ParseSequenceExample") @@ -23050,7 +23050,7 @@ begin desc["feature_list_dense_shapes"] = map(Base.identity, feature_list_dense_shapes) end res = tf.execute(desc) - node = tf.TapeNode(parse_sequence_example, [serialized_, debug_name_, context_dense_defaults_], name=nothing, feature_list_dense_missing_assumed_empty=nothing, context_sparse_keys=nothing, context_dense_keys=nothing, feature_list_sparse_keys=nothing, feature_list_dense_keys=nothing, Ncontext_sparse=nothing, Ncontext_dense=nothing, Nfeature_list_sparse=nothing, Nfeature_list_dense=nothing, context_sparse_types=nothing, Tcontext_dense=nothing, feature_list_dense_types=nothing, context_dense_shapes=nothing, feature_list_sparse_types=nothing, feature_list_dense_shapes=nothing) + node = tf.TapeNode(parse_sequence_example, [serialized_, debug_name_, context_dense_defaults_], name=nothing, feature_list_dense_missing_assumed_empty=nothing, context_sparse_keys=nothing, context_dense_keys=nothing, feature_list_sparse_keys=nothing, feature_list_dense_keys=nothing, Ncontext_sparse=nothing, Ncontext_dense=nothing, Nfeature_list_sparse=nothing, Nfeature_list_dense=nothing, context_sparse_types=nothing, Tcontext_dense=nothing, feature_list_dense_types=nothing, context_dense_shapes=nothing, feature_list_sparse_types=nothing, feature_list_dense_shapes=nothing, res) tf.add_node(res[1], node) return res end @@ -23070,7 +23070,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sinh_graph(x_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sinh_graph(x_; name=nothing) local desc tf.with_op_name(name, "Sinh") do desc = tf.NodeDescription("Sinh") @@ -23086,7 +23086,7 @@ begin tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) res = tf.execute(desc) - node = tf.TapeNode(sinh, [x_], name=nothing) + node = tf.TapeNode(sinh, [x_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -23106,7 +23106,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function iterator_v2_graph(; name=nothing, shared_name=nothing, container=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function iterator_v2_graph(; name=nothing, shared_name=nothing, container=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "IteratorV2") do desc = tf.NodeDescription("IteratorV2") @@ -23140,7 +23140,7 @@ begin desc["output_shapes"] = map(Base.identity, output_shapes) end res = tf.execute(desc) - node = tf.TapeNode(iterator_v2, [], name=nothing, shared_name=nothing, container=nothing, output_types=nothing, output_shapes=nothing) + node = tf.TapeNode(iterator_v2, [], name=nothing, shared_name=nothing, container=nothing, output_types=nothing, output_shapes=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -23160,7 +23160,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_write_v2_graph(handle_, index_, value_, flow_in_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_write_v2_graph(handle_, index_, value_, flow_in_; name=nothing) local desc tf.with_op_name(name, "TensorArrayWriteV2") do desc = tf.NodeDescription("TensorArrayWriteV2") @@ -23188,7 +23188,7 @@ begin tf.add_input(desc, flow_in_) desc["T"] = tf.data_type(value_) res = tf.execute(desc) - node = tf.TapeNode(tensor_array_write_v2, [handle_, index_, value_, flow_in_], name=nothing) + node = tf.TapeNode(tensor_array_write_v2, [handle_, index_, value_, flow_in_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -23208,7 +23208,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_list_element_shape_graph(input_handle_; name=nothing, shape_type=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_list_element_shape_graph(input_handle_; name=nothing, shape_type=nothing) local desc tf.with_op_name(name, "TensorListElementShape") do desc = tf.NodeDescription("TensorListElementShape") @@ -23228,7 +23228,7 @@ begin desc["shape_type"] = Base.identity(shape_type) end res = tf.execute(desc) - node = tf.TapeNode(tensor_list_element_shape, [input_handle_], name=nothing, shape_type=nothing) + node = tf.TapeNode(tensor_list_element_shape, [input_handle_], name=nothing, shape_type=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -23248,7 +23248,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function queue_size_v2_graph(handle_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function queue_size_v2_graph(handle_; name=nothing) local desc tf.with_op_name(name, "QueueSizeV2") do desc = tf.NodeDescription("QueueSizeV2") @@ -23262,7 +23262,7 @@ begin handle_ = convert(tf.TensorHandle, handle_) tf.add_input(desc, handle_) res = tf.execute(desc) - node = tf.TapeNode(queue_size_v2, [handle_], name=nothing) + node = tf.TapeNode(queue_size_v2, [handle_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -23282,7 +23282,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function expm1_graph(x_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function expm1_graph(x_; name=nothing) local desc tf.with_op_name(name, "Expm1") do desc = tf.NodeDescription("Expm1") @@ -23298,7 +23298,7 @@ begin tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) res = tf.execute(desc) - node = tf.TapeNode(expm1, [x_], name=nothing) + node = tf.TapeNode(expm1, [x_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -23318,7 +23318,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_matrix_band_part_graph(input_, num_lower_, num_upper_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_matrix_band_part_graph(input_, num_lower_, num_upper_; name=nothing) local desc tf.with_op_name(name, "BatchMatrixBandPart") do desc = tf.NodeDescription("BatchMatrixBandPart") @@ -23342,7 +23342,7 @@ begin tf.add_input(desc, num_upper_) desc["T"] = tf.data_type(input_) res = tf.execute(desc) - node = tf.TapeNode(batch_matrix_band_part, [input_, num_lower_, num_upper_], name=nothing) + node = tf.TapeNode(batch_matrix_band_part, [input_, num_lower_, num_upper_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -23362,7 +23362,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function concatenate_dataset_graph(input_dataset_, another_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function concatenate_dataset_graph(input_dataset_, another_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "ConcatenateDataset") do desc = tf.NodeDescription("ConcatenateDataset") @@ -23392,7 +23392,7 @@ begin desc["output_shapes"] = map(Base.identity, output_shapes) end res = tf.execute(desc) - node = tf.TapeNode(concatenate_dataset, [input_dataset_, another_dataset_], name=nothing, output_types=nothing, output_shapes=nothing) + node = tf.TapeNode(concatenate_dataset, [input_dataset_, another_dataset_], name=nothing, output_types=nothing, output_shapes=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -23412,7 +23412,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function decode_gif_graph(contents_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function decode_gif_graph(contents_; name=nothing) local desc tf.with_op_name(name, "DecodeGif") do desc = tf.NodeDescription("DecodeGif") @@ -23426,7 +23426,7 @@ begin contents_ = convert(tf.TensorHandle, contents_) tf.add_input(desc, contents_) res = tf.execute(desc) - node = tf.TapeNode(decode_gif, [contents_], name=nothing) + node = tf.TapeNode(decode_gif, [contents_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -23446,7 +23446,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tpu_replicate_graph(inputs_, broadcast_inputs_, variables_, guaranteed_constants_; name=nothing, computation=nothing, num_replicas=nothing, num_cores_per_replica=nothing, topology=nothing, use_tpu=nothing, device_assignment=nothing, host_compute_core=nothing, Tinputs=nothing, Tbroadcast_inputs=nothing, NumVariables=nothing, Tguaranteed_constants=nothing, output_types=nothing, padding_map=nothing, step_marker_location=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tpu_replicate_graph(inputs_, broadcast_inputs_, variables_, guaranteed_constants_; name=nothing, computation=nothing, num_replicas=nothing, num_cores_per_replica=nothing, topology=nothing, use_tpu=nothing, device_assignment=nothing, host_compute_core=nothing, Tinputs=nothing, Tbroadcast_inputs=nothing, NumVariables=nothing, Tguaranteed_constants=nothing, output_types=nothing, padding_map=nothing, step_marker_location=nothing) local desc tf.with_op_name(name, "TPUReplicate") do desc = tf.NodeDescription("TPUReplicate") @@ -23556,7 +23556,7 @@ begin desc["step_marker_location"] = Base.String(step_marker_location) end res = tf.execute(desc) - node = tf.TapeNode(tpu_replicate, [inputs_, broadcast_inputs_, variables_, guaranteed_constants_], name=nothing, computation=nothing, num_replicas=nothing, num_cores_per_replica=nothing, topology=nothing, use_tpu=nothing, device_assignment=nothing, host_compute_core=nothing, Tinputs=nothing, Tbroadcast_inputs=nothing, NumVariables=nothing, Tguaranteed_constants=nothing, output_types=nothing, padding_map=nothing, step_marker_location=nothing) + node = tf.TapeNode(tpu_replicate, [inputs_, broadcast_inputs_, variables_, guaranteed_constants_], name=nothing, computation=nothing, num_replicas=nothing, num_cores_per_replica=nothing, topology=nothing, use_tpu=nothing, device_assignment=nothing, host_compute_core=nothing, Tinputs=nothing, Tbroadcast_inputs=nothing, NumVariables=nothing, Tguaranteed_constants=nothing, output_types=nothing, padding_map=nothing, step_marker_location=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -23576,7 +23576,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_self_adjoint_eig_v2_graph(input_; name=nothing, compute_v=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_self_adjoint_eig_v2_graph(input_; name=nothing, compute_v=nothing) local desc tf.with_op_name(name, "BatchSelfAdjointEigV2") do desc = tf.NodeDescription("BatchSelfAdjointEigV2") @@ -23603,7 +23603,7 @@ begin end desc["T"] = tf.data_type(input_) res = tf.execute(desc) - node = tf.TapeNode(batch_self_adjoint_eig_v2, [input_], name=nothing, compute_v=nothing) + node = tf.TapeNode(batch_self_adjoint_eig_v2, [input_], name=nothing, compute_v=nothing, res) tf.add_node(res[1], node) return res end @@ -23623,7 +23623,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function shape_graph(input_; name=nothing, out_type=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function shape_graph(input_; name=nothing, out_type=nothing) local desc tf.with_op_name(name, "Shape") do desc = tf.NodeDescription("Shape") @@ -23645,7 +23645,7 @@ begin end desc["T"] = tf.data_type(input_) res = tf.execute(desc) - node = tf.TapeNode(shape, [input_], name=nothing, out_type=nothing) + node = tf.TapeNode(shape, [input_], name=nothing, out_type=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -23665,7 +23665,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function repeat_dataset_graph(input_dataset_, count_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function repeat_dataset_graph(input_dataset_, count_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "RepeatDataset") do desc = tf.NodeDescription("RepeatDataset") @@ -23695,7 +23695,7 @@ begin desc["output_shapes"] = map(Base.identity, output_shapes) end res = tf.execute(desc) - node = tf.TapeNode(repeat_dataset, [input_dataset_, count_], name=nothing, output_types=nothing, output_shapes=nothing) + node = tf.TapeNode(repeat_dataset, [input_dataset_, count_], name=nothing, output_types=nothing, output_shapes=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -23715,7 +23715,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function crop_and_resize_grad_boxes_graph(grads_, image_, boxes_, box_ind_; name=nothing, method=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function crop_and_resize_grad_boxes_graph(grads_, image_, boxes_, box_ind_; name=nothing, method=nothing) local desc tf.with_op_name(name, "CropAndResizeGradBoxes") do desc = tf.NodeDescription("CropAndResizeGradBoxes") @@ -23749,7 +23749,7 @@ begin end desc["T"] = tf.data_type(image_) res = tf.execute(desc) - node = tf.TapeNode(crop_and_resize_grad_boxes, [grads_, image_, boxes_, box_ind_], name=nothing, method=nothing) + node = tf.TapeNode(crop_and_resize_grad_boxes, [grads_, image_, boxes_, box_ind_], name=nothing, method=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -23769,7 +23769,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function reciprocal_grad_graph(y_, dy_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function reciprocal_grad_graph(y_, dy_; name=nothing) local desc tf.with_op_name(name, "ReciprocalGrad") do desc = tf.NodeDescription("ReciprocalGrad") @@ -23790,7 +23790,7 @@ begin desc["T"] = tf.data_type(y_) desc["T"] = tf.data_type(dy_) res = tf.execute(desc) - node = tf.TapeNode(reciprocal_grad, [y_, dy_], name=nothing) + node = tf.TapeNode(reciprocal_grad, [y_, dy_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -23810,7 +23810,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_matrix_solve_graph(matrix_, rhs_; name=nothing, adjoint=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_matrix_solve_graph(matrix_, rhs_; name=nothing, adjoint=nothing) local desc tf.with_op_name(name, "BatchMatrixSolve") do desc = tf.NodeDescription("BatchMatrixSolve") @@ -23837,7 +23837,7 @@ begin desc["T"] = tf.data_type(matrix_) desc["T"] = tf.data_type(rhs_) res = tf.execute(desc) - node = tf.TapeNode(batch_matrix_solve, [matrix_, rhs_], name=nothing, adjoint=nothing) + node = tf.TapeNode(batch_matrix_solve, [matrix_, rhs_], name=nothing, adjoint=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -23857,7 +23857,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function mutable_hash_table_v2_graph(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function mutable_hash_table_v2_graph(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing) local desc tf.with_op_name(name, "MutableHashTableV2") do desc = tf.NodeDescription("MutableHashTableV2") @@ -23897,7 +23897,7 @@ begin desc["value_dtype"] = Base.identity(value_dtype) end res = tf.execute(desc) - node = tf.TapeNode(mutable_hash_table_v2, [], name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing) + node = tf.TapeNode(mutable_hash_table_v2, [], name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -23917,7 +23917,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function exit_graph(data_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function exit_graph(data_; name=nothing) local desc tf.with_op_name(name, "Exit") do desc = tf.NodeDescription("Exit") @@ -23933,7 +23933,7 @@ begin tf.add_input(desc, data_) desc["T"] = tf.data_type(data_) res = tf.execute(desc) - node = tf.TapeNode(exit, [data_], name=nothing) + node = tf.TapeNode(exit, [data_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -23953,7 +23953,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function lrn_graph(input_; name=nothing, depth_radius=nothing, bias=nothing, alpha=nothing, beta=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function lrn_graph(input_; name=nothing, depth_radius=nothing, bias=nothing, alpha=nothing, beta=nothing) local desc tf.with_op_name(name, "LRN") do desc = tf.NodeDescription("LRN") @@ -23993,7 +23993,7 @@ begin end desc["T"] = tf.data_type(input_) res = tf.execute(desc) - node = tf.TapeNode(lrn, [input_], name=nothing, depth_radius=nothing, bias=nothing, alpha=nothing, beta=nothing) + node = tf.TapeNode(lrn, [input_], name=nothing, depth_radius=nothing, bias=nothing, alpha=nothing, beta=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -24013,7 +24013,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function stateless_if_graph(cond_, input_; name=nothing, Tin=nothing, Tout=nothing, then_branch=nothing, else_branch=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function stateless_if_graph(cond_, input_; name=nothing, Tin=nothing, Tout=nothing, then_branch=nothing, else_branch=nothing) local desc tf.with_op_name(name, "StatelessIf") do desc = tf.NodeDescription("StatelessIf") @@ -24057,7 +24057,7 @@ begin end desc["Tcond"] = tf.data_type(cond_) res = tf.execute(desc) - node = tf.TapeNode(stateless_if, [cond_, input_], name=nothing, Tin=nothing, Tout=nothing, then_branch=nothing, else_branch=nothing) + node = tf.TapeNode(stateless_if, [cond_, input_], name=nothing, Tin=nothing, Tout=nothing, then_branch=nothing, else_branch=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -24077,7 +24077,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_list_set_item_graph(input_handle_, index_, item_; name=nothing, element_dtype=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_list_set_item_graph(input_handle_, index_, item_; name=nothing, element_dtype=nothing) local desc tf.with_op_name(name, "TensorListSetItem") do desc = tf.NodeDescription("TensorListSetItem") @@ -24107,7 +24107,7 @@ begin end desc["element_dtype"] = tf.data_type(item_) res = tf.execute(desc) - node = tf.TapeNode(tensor_list_set_item, [input_handle_, index_, item_], name=nothing, element_dtype=nothing) + node = tf.TapeNode(tensor_list_set_item, [input_handle_, index_, item_], name=nothing, element_dtype=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -24127,7 +24127,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function rsqrt_graph(x_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function rsqrt_graph(x_; name=nothing) local desc tf.with_op_name(name, "Rsqrt") do desc = tf.NodeDescription("Rsqrt") @@ -24143,7 +24143,7 @@ begin tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) res = tf.execute(desc) - node = tf.TapeNode(rsqrt, [x_], name=nothing) + node = tf.TapeNode(rsqrt, [x_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -24163,7 +24163,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantized_conv2d_with_bias_sum_and_relu_and_requantize_graph(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_, summand_, min_summand_, max_summand_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantized_conv2d_with_bias_sum_and_relu_and_requantize_graph(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_, summand_, min_summand_, max_summand_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) local desc tf.with_op_name(name, "QuantizedConv2DWithBiasSumAndReluAndRequantize") do desc = tf.NodeDescription("QuantizedConv2DWithBiasSumAndReluAndRequantize") @@ -24258,7 +24258,7 @@ begin desc["Tbias"] = tf.data_type(bias_) desc["Tsummand"] = tf.data_type(summand_) res = tf.execute(desc) - node = tf.TapeNode(quantized_conv2d_with_bias_sum_and_relu_and_requantize, [input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_, summand_, min_summand_, max_summand_], name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) + node = tf.TapeNode(quantized_conv2d_with_bias_sum_and_relu_and_requantize, [input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_, summand_, min_summand_, max_summand_], name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing, res) tf.add_node(res[1], node) return res end @@ -24278,7 +24278,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function delete_session_tensor_graph(handle_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function delete_session_tensor_graph(handle_; name=nothing) local desc tf.with_op_name(name, "DeleteSessionTensor") do desc = tf.NodeDescription("DeleteSessionTensor") @@ -24292,7 +24292,7 @@ begin handle_ = convert(tf.TensorHandle, handle_) tf.add_input(desc, handle_) res = tf.execute(desc) - node = tf.TapeNode(delete_session_tensor, [handle_], name=nothing) + node = tf.TapeNode(delete_session_tensor, [handle_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -24312,7 +24312,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function one_hot_graph(indices_, depth_, on_value_, off_value_; name=nothing, axis=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function one_hot_graph(indices_, depth_, on_value_, off_value_; name=nothing, axis=nothing) local desc tf.with_op_name(name, "OneHot") do desc = tf.NodeDescription("OneHot") @@ -24356,7 +24356,7 @@ begin desc["T"] = tf.data_type(on_value_) desc["T"] = tf.data_type(off_value_) res = tf.execute(desc) - node = tf.TapeNode(one_hot, [indices_, depth_, on_value_, off_value_], name=nothing, axis=nothing) + node = tf.TapeNode(one_hot, [indices_, depth_, on_value_, off_value_], name=nothing, axis=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -24376,7 +24376,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_apply_ftrl_graph(var_, accum_, linear_, grad_, lr_, l1_, l2_, lr_power_; name=nothing, use_locking=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_apply_ftrl_graph(var_, accum_, linear_, grad_, lr_, l1_, l2_, lr_power_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ResourceApplyFtrl") do desc = tf.NodeDescription("ResourceApplyFtrl") @@ -24430,7 +24430,7 @@ begin desc["T"] = tf.data_type(l2_) desc["T"] = tf.data_type(lr_power_) res = tf.execute(desc) - node = tf.TapeNode(resource_apply_ftrl, [var_, accum_, linear_, grad_, lr_, l1_, l2_, lr_power_], name=nothing, use_locking=nothing) + node = tf.TapeNode(resource_apply_ftrl, [var_, accum_, linear_, grad_, lr_, l1_, l2_, lr_power_], name=nothing, use_locking=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -24450,7 +24450,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sdca_optimizer_v2_graph(sparse_example_indices_, sparse_feature_indices_, sparse_feature_values_, dense_features_, example_weights_, example_labels_, sparse_indices_, sparse_weights_, dense_weights_, example_state_data_; name=nothing, loss_type=nothing, adaptive=nothing, num_sparse_features=nothing, num_sparse_features_with_values=nothing, num_dense_features=nothing, l1=nothing, l2=nothing, num_loss_partitions=nothing, num_inner_iterations=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sdca_optimizer_v2_graph(sparse_example_indices_, sparse_feature_indices_, sparse_feature_values_, dense_features_, example_weights_, example_labels_, sparse_indices_, sparse_weights_, dense_weights_, example_state_data_; name=nothing, loss_type=nothing, adaptive=nothing, num_sparse_features=nothing, num_sparse_features_with_values=nothing, num_dense_features=nothing, l1=nothing, l2=nothing, num_loss_partitions=nothing, num_inner_iterations=nothing) local desc tf.with_op_name(name, "SdcaOptimizerV2") do desc = tf.NodeDescription("SdcaOptimizerV2") @@ -24559,7 +24559,7 @@ begin desc["num_inner_iterations"] = Base.Int(num_inner_iterations) end res = tf.execute(desc) - node = tf.TapeNode(sdca_optimizer_v2, [sparse_example_indices_, sparse_feature_indices_, sparse_feature_values_, dense_features_, example_weights_, example_labels_, sparse_indices_, sparse_weights_, dense_weights_, example_state_data_], name=nothing, loss_type=nothing, adaptive=nothing, num_sparse_features=nothing, num_sparse_features_with_values=nothing, num_dense_features=nothing, l1=nothing, l2=nothing, num_loss_partitions=nothing, num_inner_iterations=nothing) + node = tf.TapeNode(sdca_optimizer_v2, [sparse_example_indices_, sparse_feature_indices_, sparse_feature_values_, dense_features_, example_weights_, example_labels_, sparse_indices_, sparse_weights_, dense_weights_, example_state_data_], name=nothing, loss_type=nothing, adaptive=nothing, num_sparse_features=nothing, num_sparse_features_with_values=nothing, num_dense_features=nothing, l1=nothing, l2=nothing, num_loss_partitions=nothing, num_inner_iterations=nothing, res) tf.add_node(res[1], node) return res end @@ -24579,7 +24579,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function queue_enqueue_graph(handle_, components_; name=nothing, Tcomponents=nothing, timeout_ms=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function queue_enqueue_graph(handle_, components_; name=nothing, Tcomponents=nothing, timeout_ms=nothing) local desc tf.with_op_name(name, "QueueEnqueue") do desc = tf.NodeDescription("QueueEnqueue") @@ -24609,7 +24609,7 @@ begin desc["timeout_ms"] = Base.Int(timeout_ms) end res = tf.execute(desc) - node = tf.TapeNode(queue_enqueue, [handle_, components_], name=nothing, Tcomponents=nothing, timeout_ms=nothing) + node = tf.TapeNode(queue_enqueue, [handle_, components_], name=nothing, Tcomponents=nothing, timeout_ms=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -24629,7 +24629,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function conditional_accumulator_graph(; name=nothing, dtype=nothing, shape=nothing, container=nothing, shared_name=nothing, reduction_type=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function conditional_accumulator_graph(; name=nothing, dtype=nothing, shape=nothing, container=nothing, shared_name=nothing, reduction_type=nothing) local desc tf.with_op_name(name, "ConditionalAccumulator") do desc = tf.NodeDescription("ConditionalAccumulator") @@ -24669,7 +24669,7 @@ begin desc["reduction_type"] = Base.String(reduction_type) end res = tf.execute(desc) - node = tf.TapeNode(conditional_accumulator, [], name=nothing, dtype=nothing, shape=nothing, container=nothing, shared_name=nothing, reduction_type=nothing) + node = tf.TapeNode(conditional_accumulator, [], name=nothing, dtype=nothing, shape=nothing, container=nothing, shared_name=nothing, reduction_type=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -24689,7 +24689,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ctc_beam_search_decoder_graph(inputs_, sequence_length_; name=nothing, beam_width=nothing, top_paths=nothing, merge_repeated=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ctc_beam_search_decoder_graph(inputs_, sequence_length_; name=nothing, beam_width=nothing, top_paths=nothing, merge_repeated=nothing) local desc tf.with_op_name(name, "CTCBeamSearchDecoder") do desc = tf.NodeDescription("CTCBeamSearchDecoder") @@ -24730,7 +24730,7 @@ begin desc["merge_repeated"] = Base.Bool(merge_repeated) end res = tf.execute(desc) - node = tf.TapeNode(ctc_beam_search_decoder, [inputs_, sequence_length_], name=nothing, beam_width=nothing, top_paths=nothing, merge_repeated=nothing) + node = tf.TapeNode(ctc_beam_search_decoder, [inputs_, sequence_length_], name=nothing, beam_width=nothing, top_paths=nothing, merge_repeated=nothing, res) tf.add_node(res[1], node) return res end @@ -24750,7 +24750,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function whole_file_reader_graph(; name=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function whole_file_reader_graph(; name=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "WholeFileReader") do desc = tf.NodeDescription("WholeFileReader") @@ -24772,7 +24772,7 @@ begin desc["shared_name"] = Base.String(shared_name) end res = tf.execute(desc) - node = tf.TapeNode(whole_file_reader, [], name=nothing, container=nothing, shared_name=nothing) + node = tf.TapeNode(whole_file_reader, [], name=nothing, container=nothing, shared_name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -24792,7 +24792,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function apply_rms_prop_graph(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=nothing, use_locking=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function apply_rms_prop_graph(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ApplyRMSProp") do desc = tf.NodeDescription("ApplyRMSProp") @@ -24849,7 +24849,7 @@ begin desc["T"] = tf.data_type(epsilon_) desc["T"] = tf.data_type(grad_) res = tf.execute(desc) - node = tf.TapeNode(apply_rms_prop, [var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_], name=nothing, use_locking=nothing) + node = tf.TapeNode(apply_rms_prop, [var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_], name=nothing, use_locking=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -24869,7 +24869,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function adjust_saturation_graph(images_, scale_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function adjust_saturation_graph(images_, scale_; name=nothing) local desc tf.with_op_name(name, "AdjustSaturation") do desc = tf.NodeDescription("AdjustSaturation") @@ -24889,7 +24889,7 @@ begin tf.add_input(desc, scale_) desc["T"] = tf.data_type(images_) res = tf.execute(desc) - node = tf.TapeNode(adjust_saturation, [images_, scale_], name=nothing) + node = tf.TapeNode(adjust_saturation, [images_, scale_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -24909,7 +24909,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function lookup_table_remove_v2_graph(table_handle_, keys_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function lookup_table_remove_v2_graph(table_handle_, keys_; name=nothing) local desc tf.with_op_name(name, "LookupTableRemoveV2") do desc = tf.NodeDescription("LookupTableRemoveV2") @@ -24929,7 +24929,7 @@ begin tf.add_input(desc, keys_) desc["Tin"] = tf.data_type(keys_) res = tf.execute(desc) - node = tf.TapeNode(lookup_table_remove_v2, [table_handle_, keys_], name=nothing) + node = tf.TapeNode(lookup_table_remove_v2, [table_handle_, keys_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -24949,7 +24949,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function queue_close_graph(handle_; name=nothing, cancel_pending_enqueues=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function queue_close_graph(handle_; name=nothing, cancel_pending_enqueues=nothing) local desc tf.with_op_name(name, "QueueClose") do desc = tf.NodeDescription("QueueClose") @@ -24969,7 +24969,7 @@ begin desc["cancel_pending_enqueues"] = Base.Bool(cancel_pending_enqueues) end res = tf.execute(desc) - node = tf.TapeNode(queue_close, [handle_], name=nothing, cancel_pending_enqueues=nothing) + node = tf.TapeNode(queue_close, [handle_], name=nothing, cancel_pending_enqueues=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -24989,7 +24989,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function prefetch_dataset_graph(input_dataset_, buffer_size_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function prefetch_dataset_graph(input_dataset_, buffer_size_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "PrefetchDataset") do desc = tf.NodeDescription("PrefetchDataset") @@ -25019,7 +25019,7 @@ begin desc["output_shapes"] = map(Base.identity, output_shapes) end res = tf.execute(desc) - node = tf.TapeNode(prefetch_dataset, [input_dataset_, buffer_size_], name=nothing, output_types=nothing, output_shapes=nothing) + node = tf.TapeNode(prefetch_dataset, [input_dataset_, buffer_size_], name=nothing, output_types=nothing, output_shapes=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -25039,7 +25039,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function map_dataset_graph(input_dataset_, other_arguments_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing, preserve_cardinality=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function map_dataset_graph(input_dataset_, other_arguments_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing, preserve_cardinality=nothing) local desc tf.with_op_name(name, "MapDataset") do desc = tf.NodeDescription("MapDataset") @@ -25093,7 +25093,7 @@ begin desc["preserve_cardinality"] = Base.Bool(preserve_cardinality) end res = tf.execute(desc) - node = tf.TapeNode(map_dataset, [input_dataset_, other_arguments_], name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing, preserve_cardinality=nothing) + node = tf.TapeNode(map_dataset, [input_dataset_, other_arguments_], name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing, preserve_cardinality=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -25113,7 +25113,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantized_conv2d_with_bias_graph(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantized_conv2d_with_bias_graph(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) local desc tf.with_op_name(name, "QuantizedConv2DWithBias") do desc = tf.NodeDescription("QuantizedConv2DWithBias") @@ -25184,7 +25184,7 @@ begin desc["Tinput"] = tf.data_type(input_) desc["Tfilter"] = tf.data_type(filter_) res = tf.execute(desc) - node = tf.TapeNode(quantized_conv2d_with_bias, [input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_], name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) + node = tf.TapeNode(quantized_conv2d_with_bias, [input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_], name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing, res) tf.add_node(res[1], node) return res end @@ -25204,7 +25204,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_read_v3_graph(handle_, index_, flow_in_; name=nothing, dtype=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_read_v3_graph(handle_, index_, flow_in_; name=nothing, dtype=nothing) local desc tf.with_op_name(name, "TensorArrayReadV3") do desc = tf.NodeDescription("TensorArrayReadV3") @@ -25232,7 +25232,7 @@ begin desc["dtype"] = Base.identity(dtype) end res = tf.execute(desc) - node = tf.TapeNode(tensor_array_read_v3, [handle_, index_, flow_in_], name=nothing, dtype=nothing) + node = tf.TapeNode(tensor_array_read_v3, [handle_, index_, flow_in_], name=nothing, dtype=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -25252,7 +25252,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function identity_graph(input_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function identity_graph(input_; name=nothing) local desc tf.with_op_name(name, "Identity") do desc = tf.NodeDescription("Identity") @@ -25268,7 +25268,7 @@ begin tf.add_input(desc, input_) desc["T"] = tf.data_type(input_) res = tf.execute(desc) - node = tf.TapeNode(identity, [input_], name=nothing) + node = tf.TapeNode(identity, [input_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -25288,7 +25288,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function print_graph(input_, data_; name=nothing, U=nothing, message=nothing, first_n=nothing, summarize=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function print_graph(input_, data_; name=nothing, U=nothing, message=nothing, first_n=nothing, summarize=nothing) local desc tf.with_op_name(name, "Print") do desc = tf.NodeDescription("Print") @@ -25332,7 +25332,7 @@ begin end desc["T"] = tf.data_type(input_) res = tf.execute(desc) - node = tf.TapeNode(print, [input_, data_], name=nothing, U=nothing, message=nothing, first_n=nothing, summarize=nothing) + node = tf.TapeNode(print, [input_, data_], name=nothing, U=nothing, message=nothing, first_n=nothing, summarize=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -25352,7 +25352,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function collective_bcast_send_graph(input_; name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, shape=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function collective_bcast_send_graph(input_; name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, shape=nothing) local desc tf.with_op_name(name, "CollectiveBcastSend") do desc = tf.NodeDescription("CollectiveBcastSend") @@ -25392,7 +25392,7 @@ begin end desc["T"] = tf.data_type(input_) res = tf.execute(desc) - node = tf.TapeNode(collective_bcast_send, [input_], name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, shape=nothing) + node = tf.TapeNode(collective_bcast_send, [input_], name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, shape=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -25412,7 +25412,7 @@ end Converts a list of tensors to an array of tensors. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _list_to_array_graph(input_; name=nothing, Tin=nothing, N=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _list_to_array_graph(input_; name=nothing, Tin=nothing, N=nothing) local desc tf.with_op_name(name, "_ListToArray") do desc = tf.NodeDescription("_ListToArray") @@ -25443,7 +25443,7 @@ begin desc["N"] = Base.Int(N) end res = tf.execute(desc) - node = tf.TapeNode(_list_to_array, [input_], name=nothing, Tin=nothing, N=nothing) + node = tf.TapeNode(_list_to_array, [input_], name=nothing, Tin=nothing, N=nothing, res) tf.add_node(res[1], node) return res end @@ -25463,7 +25463,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function neg_train_graph(w_in_, w_out_, examples_, labels_, lr_; name=nothing, vocab_count=nothing, num_negative_samples=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function neg_train_graph(w_in_, w_out_, examples_, labels_, lr_; name=nothing, vocab_count=nothing, num_negative_samples=nothing) local desc tf.with_op_name(name, "NegTrain") do desc = tf.NodeDescription("NegTrain") @@ -25505,7 +25505,7 @@ begin desc["num_negative_samples"] = Base.Int(num_negative_samples) end res = tf.execute(desc) - node = tf.TapeNode(neg_train, [w_in_, w_out_, examples_, labels_, lr_], name=nothing, vocab_count=nothing, num_negative_samples=nothing) + node = tf.TapeNode(neg_train, [w_in_, w_out_, examples_, labels_, lr_], name=nothing, vocab_count=nothing, num_negative_samples=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -25525,7 +25525,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function worker_heartbeat_graph(request_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function worker_heartbeat_graph(request_; name=nothing) local desc tf.with_op_name(name, "WorkerHeartbeat") do desc = tf.NodeDescription("WorkerHeartbeat") @@ -25539,7 +25539,7 @@ begin request_ = convert(tf.TensorHandle, request_) tf.add_input(desc, request_) res = tf.execute(desc) - node = tf.TapeNode(worker_heartbeat, [request_], name=nothing) + node = tf.TapeNode(worker_heartbeat, [request_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -25559,7 +25559,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function merge_v2checkpoints_graph(checkpoint_prefixes_, destination_prefix_; name=nothing, delete_old_dirs=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function merge_v2checkpoints_graph(checkpoint_prefixes_, destination_prefix_; name=nothing, delete_old_dirs=nothing) local desc tf.with_op_name(name, "MergeV2Checkpoints") do desc = tf.NodeDescription("MergeV2Checkpoints") @@ -25583,7 +25583,7 @@ begin desc["delete_old_dirs"] = Base.Bool(delete_old_dirs) end res = tf.execute(desc) - node = tf.TapeNode(merge_v2checkpoints, [checkpoint_prefixes_, destination_prefix_], name=nothing, delete_old_dirs=nothing) + node = tf.TapeNode(merge_v2checkpoints, [checkpoint_prefixes_, destination_prefix_], name=nothing, delete_old_dirs=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -25603,7 +25603,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function collective_permute_graph(input_, source_target_pairs_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function collective_permute_graph(input_, source_target_pairs_; name=nothing) local desc tf.with_op_name(name, "CollectivePermute") do desc = tf.NodeDescription("CollectivePermute") @@ -25623,7 +25623,7 @@ begin tf.add_input(desc, source_target_pairs_) desc["T"] = tf.data_type(input_) res = tf.execute(desc) - node = tf.TapeNode(collective_permute, [input_, source_target_pairs_], name=nothing) + node = tf.TapeNode(collective_permute, [input_, source_target_pairs_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -25643,7 +25643,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantize_and_dequantize_v3_graph(input_, input_min_, input_max_, num_bits_; name=nothing, signed_input=nothing, range_given=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantize_and_dequantize_v3_graph(input_, input_min_, input_max_, num_bits_; name=nothing, signed_input=nothing, range_given=nothing) local desc tf.with_op_name(name, "QuantizeAndDequantizeV3") do desc = tf.NodeDescription("QuantizeAndDequantizeV3") @@ -25685,7 +25685,7 @@ begin desc["T"] = tf.data_type(input_min_) desc["T"] = tf.data_type(input_max_) res = tf.execute(desc) - node = tf.TapeNode(quantize_and_dequantize_v3, [input_, input_min_, input_max_, num_bits_], name=nothing, signed_input=nothing, range_given=nothing) + node = tf.TapeNode(quantize_and_dequantize_v3, [input_, input_min_, input_max_, num_bits_], name=nothing, signed_input=nothing, range_given=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -25705,7 +25705,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function hash_table_graph(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function hash_table_graph(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing) local desc tf.with_op_name(name, "HashTable") do desc = tf.NodeDescription("HashTable") @@ -25745,7 +25745,7 @@ begin desc["value_dtype"] = Base.identity(value_dtype) end res = tf.execute(desc) - node = tf.TapeNode(hash_table, [], name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing) + node = tf.TapeNode(hash_table, [], name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -25765,7 +25765,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function softplus_grad_graph(gradients_, features_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function softplus_grad_graph(gradients_, features_; name=nothing) local desc tf.with_op_name(name, "SoftplusGrad") do desc = tf.NodeDescription("SoftplusGrad") @@ -25786,7 +25786,7 @@ begin desc["T"] = tf.data_type(gradients_) desc["T"] = tf.data_type(features_) res = tf.execute(desc) - node = tf.TapeNode(softplus_grad, [gradients_, features_], name=nothing) + node = tf.TapeNode(softplus_grad, [gradients_, features_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -25806,7 +25806,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fixed_length_record_reader_graph(; name=nothing, header_bytes=nothing, record_bytes=nothing, footer_bytes=nothing, hop_bytes=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fixed_length_record_reader_graph(; name=nothing, header_bytes=nothing, record_bytes=nothing, footer_bytes=nothing, hop_bytes=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "FixedLengthRecordReader") do desc = tf.NodeDescription("FixedLengthRecordReader") @@ -25852,7 +25852,7 @@ begin desc["shared_name"] = Base.String(shared_name) end res = tf.execute(desc) - node = tf.TapeNode(fixed_length_record_reader, [], name=nothing, header_bytes=nothing, record_bytes=nothing, footer_bytes=nothing, hop_bytes=nothing, container=nothing, shared_name=nothing) + node = tf.TapeNode(fixed_length_record_reader, [], name=nothing, header_bytes=nothing, record_bytes=nothing, footer_bytes=nothing, hop_bytes=nothing, container=nothing, shared_name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -25872,7 +25872,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_scatter_v2_graph(handle_, indices_, value_, flow_in_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_scatter_v2_graph(handle_, indices_, value_, flow_in_; name=nothing) local desc tf.with_op_name(name, "TensorArrayScatterV2") do desc = tf.NodeDescription("TensorArrayScatterV2") @@ -25900,7 +25900,7 @@ begin tf.add_input(desc, flow_in_) desc["T"] = tf.data_type(value_) res = tf.execute(desc) - node = tf.TapeNode(tensor_array_scatter_v2, [handle_, indices_, value_, flow_in_], name=nothing) + node = tf.TapeNode(tensor_array_scatter_v2, [handle_, indices_, value_, flow_in_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -25920,7 +25920,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function decode_json_example_graph(json_examples_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function decode_json_example_graph(json_examples_; name=nothing) local desc tf.with_op_name(name, "DecodeJSONExample") do desc = tf.NodeDescription("DecodeJSONExample") @@ -25934,7 +25934,7 @@ begin json_examples_ = convert(tf.TensorHandle, json_examples_) tf.add_input(desc, json_examples_) res = tf.execute(desc) - node = tf.TapeNode(decode_json_example, [json_examples_], name=nothing) + node = tf.TapeNode(decode_json_example, [json_examples_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -25954,7 +25954,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fused_batch_norm_grad_v2_graph(y_backprop_, x_, scale_, reserve_space_1_, reserve_space_2_; name=nothing, U=nothing, epsilon=nothing, data_format=nothing, is_training=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fused_batch_norm_grad_v2_graph(y_backprop_, x_, scale_, reserve_space_1_, reserve_space_2_; name=nothing, U=nothing, epsilon=nothing, data_format=nothing, is_training=nothing) local desc tf.with_op_name(name, "FusedBatchNormGradV2") do desc = tf.NodeDescription("FusedBatchNormGradV2") @@ -26019,7 +26019,7 @@ begin desc["U"] = tf.data_type(reserve_space_1_) desc["U"] = tf.data_type(reserve_space_2_) res = tf.execute(desc) - node = tf.TapeNode(fused_batch_norm_grad_v2, [y_backprop_, x_, scale_, reserve_space_1_, reserve_space_2_], name=nothing, U=nothing, epsilon=nothing, data_format=nothing, is_training=nothing) + node = tf.TapeNode(fused_batch_norm_grad_v2, [y_backprop_, x_, scale_, reserve_space_1_, reserve_space_2_], name=nothing, U=nothing, epsilon=nothing, data_format=nothing, is_training=nothing, res) tf.add_node(res[1], node) return res end @@ -26039,7 +26039,7 @@ end Cast x of type SrcT to y of DstT. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _host_cast_graph(x_; name=nothing, SrcT=nothing, DstT=nothing, Truncate=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _host_cast_graph(x_; name=nothing, SrcT=nothing, DstT=nothing, Truncate=nothing) local desc tf.with_op_name(name, "_HostCast") do desc = tf.NodeDescription("_HostCast") @@ -26073,7 +26073,7 @@ begin end desc["SrcT"] = tf.data_type(x_) res = tf.execute(desc) - node = tf.TapeNode(_host_cast, [x_], name=nothing, SrcT=nothing, DstT=nothing, Truncate=nothing) + node = tf.TapeNode(_host_cast, [x_], name=nothing, SrcT=nothing, DstT=nothing, Truncate=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -26093,7 +26093,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tf_record_reader_graph(; name=nothing, container=nothing, shared_name=nothing, compression_type=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tf_record_reader_graph(; name=nothing, container=nothing, shared_name=nothing, compression_type=nothing) local desc tf.with_op_name(name, "TFRecordReader") do desc = tf.NodeDescription("TFRecordReader") @@ -26121,7 +26121,7 @@ begin desc["compression_type"] = Base.String(compression_type) end res = tf.execute(desc) - node = tf.TapeNode(tf_record_reader, [], name=nothing, container=nothing, shared_name=nothing, compression_type=nothing) + node = tf.TapeNode(tf_record_reader, [], name=nothing, container=nothing, shared_name=nothing, compression_type=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -26141,7 +26141,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function while__graph(input_; name=nothing, T=nothing, cond=nothing, body=nothing, output_shapes=nothing, parallel_iterations=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function while__graph(input_; name=nothing, T=nothing, cond=nothing, body=nothing, output_shapes=nothing, parallel_iterations=nothing) local desc tf.with_op_name(name, "While") do desc = tf.NodeDescription("While") @@ -26185,7 +26185,7 @@ begin desc["parallel_iterations"] = Base.Int(parallel_iterations) end res = tf.execute(desc) - node = tf.TapeNode(while_, [input_], name=nothing, T=nothing, cond=nothing, body=nothing, output_shapes=nothing, parallel_iterations=nothing) + node = tf.TapeNode(while_, [input_], name=nothing, T=nothing, cond=nothing, body=nothing, output_shapes=nothing, parallel_iterations=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -26205,7 +26205,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function stateless_multinomial_graph(logits_, num_samples_, seed_; name=nothing, output_dtype=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function stateless_multinomial_graph(logits_, num_samples_, seed_; name=nothing, output_dtype=nothing) local desc tf.with_op_name(name, "StatelessMultinomial") do desc = tf.NodeDescription("StatelessMultinomial") @@ -26237,7 +26237,7 @@ begin desc["T"] = tf.data_type(logits_) desc["Tseed"] = tf.data_type(seed_) res = tf.execute(desc) - node = tf.TapeNode(stateless_multinomial, [logits_, num_samples_, seed_], name=nothing, output_dtype=nothing) + node = tf.TapeNode(stateless_multinomial, [logits_, num_samples_, seed_], name=nothing, output_dtype=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -26257,7 +26257,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function scatter_add_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function scatter_add_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ScatterAdd") do desc = tf.NodeDescription("ScatterAdd") @@ -26291,7 +26291,7 @@ begin desc["Tindices"] = tf.data_type(indices_) desc["T"] = tf.data_type(updates_) res = tf.execute(desc) - node = tf.TapeNode(scatter_add, [ref_, indices_, updates_], name=nothing, use_locking=nothing) + node = tf.TapeNode(scatter_add, [ref_, indices_, updates_], name=nothing, use_locking=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -26311,7 +26311,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function conj_graph(input_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function conj_graph(input_; name=nothing) local desc tf.with_op_name(name, "Conj") do desc = tf.NodeDescription("Conj") @@ -26327,7 +26327,7 @@ begin tf.add_input(desc, input_) desc["T"] = tf.data_type(input_) res = tf.execute(desc) - node = tf.TapeNode(conj, [input_], name=nothing) + node = tf.TapeNode(conj, [input_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -26347,7 +26347,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function parallel_dynamic_stitch_graph(indices_, data_; name=nothing, N=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function parallel_dynamic_stitch_graph(indices_, data_; name=nothing, N=nothing) local desc tf.with_op_name(name, "ParallelDynamicStitch") do desc = tf.NodeDescription("ParallelDynamicStitch") @@ -26373,7 +26373,7 @@ begin end desc["T"] = tf.data_type(data_) res = tf.execute(desc) - node = tf.TapeNode(parallel_dynamic_stitch, [indices_, data_], name=nothing, N=nothing) + node = tf.TapeNode(parallel_dynamic_stitch, [indices_, data_], name=nothing, N=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -26393,7 +26393,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function make_iterator_graph(dataset_, iterator_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function make_iterator_graph(dataset_, iterator_; name=nothing) local desc tf.with_op_name(name, "MakeIterator") do desc = tf.NodeDescription("MakeIterator") @@ -26411,7 +26411,7 @@ begin tf.add_input(desc, dataset_) tf.add_input(desc, iterator_) res = tf.execute(desc) - node = tf.TapeNode(make_iterator, [dataset_, iterator_], name=nothing) + node = tf.TapeNode(make_iterator, [dataset_, iterator_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -26431,7 +26431,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function rfft3d_graph(input_, fft_length_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function rfft3d_graph(input_, fft_length_; name=nothing) local desc tf.with_op_name(name, "RFFT3D") do desc = tf.NodeDescription("RFFT3D") @@ -26449,7 +26449,7 @@ begin tf.add_input(desc, input_) tf.add_input(desc, fft_length_) res = tf.execute(desc) - node = tf.TapeNode(rfft3d, [input_, fft_length_], name=nothing) + node = tf.TapeNode(rfft3d, [input_, fft_length_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -26469,7 +26469,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_reduce_sum_sparse_graph(input_indices_, input_values_, input_shape_, reduction_axes_; name=nothing, keep_dims=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_reduce_sum_sparse_graph(input_indices_, input_values_, input_shape_, reduction_axes_; name=nothing, keep_dims=nothing) local desc tf.with_op_name(name, "SparseReduceSumSparse") do desc = tf.NodeDescription("SparseReduceSumSparse") @@ -26508,7 +26508,7 @@ begin end desc["T"] = tf.data_type(input_values_) res = tf.execute(desc) - node = tf.TapeNode(sparse_reduce_sum_sparse, [input_indices_, input_values_, input_shape_, reduction_axes_], name=nothing, keep_dims=nothing) + node = tf.TapeNode(sparse_reduce_sum_sparse, [input_indices_, input_values_, input_shape_, reduction_axes_], name=nothing, keep_dims=nothing, res) tf.add_node(res[1], node) return res end @@ -26528,7 +26528,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function collective_gather_graph(input_; name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, shape=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function collective_gather_graph(input_; name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, shape=nothing) local desc tf.with_op_name(name, "CollectiveGather") do desc = tf.NodeDescription("CollectiveGather") @@ -26568,7 +26568,7 @@ begin end desc["T"] = tf.data_type(input_) res = tf.execute(desc) - node = tf.TapeNode(collective_gather, [input_], name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, shape=nothing) + node = tf.TapeNode(collective_gather, [input_], name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, shape=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -26588,7 +26588,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function combined_non_max_suppression_graph(boxes_, scores_, max_output_size_per_class_, max_total_size_, iou_threshold_, score_threshold_; name=nothing, pad_per_class=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function combined_non_max_suppression_graph(boxes_, scores_, max_output_size_per_class_, max_total_size_, iou_threshold_, score_threshold_; name=nothing, pad_per_class=nothing) local desc tf.with_op_name(name, "CombinedNonMaxSuppression") do desc = tf.NodeDescription("CombinedNonMaxSuppression") @@ -26633,7 +26633,7 @@ begin desc["pad_per_class"] = Base.Bool(pad_per_class) end res = tf.execute(desc) - node = tf.TapeNode(combined_non_max_suppression, [boxes_, scores_, max_output_size_per_class_, max_total_size_, iou_threshold_, score_threshold_], name=nothing, pad_per_class=nothing) + node = tf.TapeNode(combined_non_max_suppression, [boxes_, scores_, max_output_size_per_class_, max_total_size_, iou_threshold_, score_threshold_], name=nothing, pad_per_class=nothing, res) tf.add_node(res[1], node) return res end @@ -26653,7 +26653,7 @@ end Allocates a mutable tensor that becomes available to appropriately annotated """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _scoped_allocator_graph(; name=nothing, shapes=nothing, shape=nothing, sa_name=nothing, id=nothing, expected_call_count=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _scoped_allocator_graph(; name=nothing, shapes=nothing, shape=nothing, sa_name=nothing, id=nothing, expected_call_count=nothing) local desc tf.with_op_name(name, "_ScopedAllocator") do desc = tf.NodeDescription("_ScopedAllocator") @@ -26693,7 +26693,7 @@ begin desc["expected_call_count"] = Base.Int(expected_call_count) end res = tf.execute(desc) - node = tf.TapeNode(_scoped_allocator, [], name=nothing, shapes=nothing, shape=nothing, sa_name=nothing, id=nothing, expected_call_count=nothing) + node = tf.TapeNode(_scoped_allocator, [], name=nothing, shapes=nothing, shape=nothing, sa_name=nothing, id=nothing, expected_call_count=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -26713,7 +26713,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function load_tpu_embedding_adadelta_parameters_graph(parameters_, accumulators_, updates_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function load_tpu_embedding_adadelta_parameters_graph(parameters_, accumulators_, updates_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "LoadTPUEmbeddingAdadeltaParameters") do desc = tf.NodeDescription("LoadTPUEmbeddingAdadeltaParameters") @@ -26759,7 +26759,7 @@ begin desc["shard_id"] = Base.Int(shard_id) end res = tf.execute(desc) - node = tf.TapeNode(load_tpu_embedding_adadelta_parameters, [parameters_, accumulators_, updates_], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + node = tf.TapeNode(load_tpu_embedding_adadelta_parameters, [parameters_, accumulators_, updates_], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -26779,7 +26779,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_add_graph(a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_, thresh_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_add_graph(a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_, thresh_; name=nothing) local desc tf.with_op_name(name, "SparseAdd") do desc = tf.NodeDescription("SparseAdd") @@ -26827,7 +26827,7 @@ begin desc["T"] = tf.data_type(b_values_) desc["Treal"] = tf.data_type(thresh_) res = tf.execute(desc) - node = tf.TapeNode(sparse_add, [a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_, thresh_], name=nothing) + node = tf.TapeNode(sparse_add, [a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_, thresh_], name=nothing, res) tf.add_node(res[1], node) return res end @@ -26847,7 +26847,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ctc_greedy_decoder_graph(inputs_, sequence_length_; name=nothing, merge_repeated=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ctc_greedy_decoder_graph(inputs_, sequence_length_; name=nothing, merge_repeated=nothing) local desc tf.with_op_name(name, "CTCGreedyDecoder") do desc = tf.NodeDescription("CTCGreedyDecoder") @@ -26876,7 +26876,7 @@ begin desc["merge_repeated"] = Base.Bool(merge_repeated) end res = tf.execute(desc) - node = tf.TapeNode(ctc_greedy_decoder, [inputs_, sequence_length_], name=nothing, merge_repeated=nothing) + node = tf.TapeNode(ctc_greedy_decoder, [inputs_, sequence_length_], name=nothing, merge_repeated=nothing, res) tf.add_node(res[1], node) return res end @@ -26896,7 +26896,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function immutable_const_graph(; name=nothing, dtype=nothing, shape=nothing, memory_region_name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function immutable_const_graph(; name=nothing, dtype=nothing, shape=nothing, memory_region_name=nothing) local desc tf.with_op_name(name, "ImmutableConst") do desc = tf.NodeDescription("ImmutableConst") @@ -26924,7 +26924,7 @@ begin desc["memory_region_name"] = Base.String(memory_region_name) end res = tf.execute(desc) - node = tf.TapeNode(immutable_const, [], name=nothing, dtype=nothing, shape=nothing, memory_region_name=nothing) + node = tf.TapeNode(immutable_const, [], name=nothing, dtype=nothing, shape=nothing, memory_region_name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -26944,7 +26944,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function consume_mutex_lock_graph(mutex_lock_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function consume_mutex_lock_graph(mutex_lock_; name=nothing) local desc tf.with_op_name(name, "ConsumeMutexLock") do desc = tf.NodeDescription("ConsumeMutexLock") @@ -26958,7 +26958,7 @@ begin mutex_lock_ = convert(tf.TensorHandle, mutex_lock_) tf.add_input(desc, mutex_lock_) res = tf.execute(desc) - node = tf.TapeNode(consume_mutex_lock, [mutex_lock_], name=nothing) + node = tf.TapeNode(consume_mutex_lock, [mutex_lock_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -26978,7 +26978,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function greater_equal_graph(x_, y_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function greater_equal_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "GreaterEqual") do desc = tf.NodeDescription("GreaterEqual") @@ -26999,7 +26999,7 @@ begin desc["T"] = tf.data_type(x_) desc["T"] = tf.data_type(y_) res = tf.execute(desc) - node = tf.TapeNode(greater_equal, [x_, y_], name=nothing) + node = tf.TapeNode(greater_equal, [x_, y_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -27019,7 +27019,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function initialize_table_from_text_file_v2_graph(table_handle_, filename_; name=nothing, key_index=nothing, value_index=nothing, vocab_size=nothing, delimiter=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function initialize_table_from_text_file_v2_graph(table_handle_, filename_; name=nothing, key_index=nothing, value_index=nothing, vocab_size=nothing, delimiter=nothing) local desc tf.with_op_name(name, "InitializeTableFromTextFileV2") do desc = tf.NodeDescription("InitializeTableFromTextFileV2") @@ -27061,7 +27061,7 @@ begin desc["delimiter"] = Base.String(delimiter) end res = tf.execute(desc) - node = tf.TapeNode(initialize_table_from_text_file_v2, [table_handle_, filename_], name=nothing, key_index=nothing, value_index=nothing, vocab_size=nothing, delimiter=nothing) + node = tf.TapeNode(initialize_table_from_text_file_v2, [table_handle_, filename_], name=nothing, key_index=nothing, value_index=nothing, vocab_size=nothing, delimiter=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -27081,7 +27081,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function queue_dequeue_graph(handle_; name=nothing, component_types=nothing, timeout_ms=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function queue_dequeue_graph(handle_; name=nothing, component_types=nothing, timeout_ms=nothing) local desc tf.with_op_name(name, "QueueDequeue") do desc = tf.NodeDescription("QueueDequeue") @@ -27107,7 +27107,7 @@ begin desc["timeout_ms"] = Base.Int(timeout_ms) end res = tf.execute(desc) - node = tf.TapeNode(queue_dequeue, [handle_], name=nothing, component_types=nothing, timeout_ms=nothing) + node = tf.TapeNode(queue_dequeue, [handle_], name=nothing, component_types=nothing, timeout_ms=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -27127,7 +27127,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function equal_graph(x_, y_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function equal_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "Equal") do desc = tf.NodeDescription("Equal") @@ -27148,7 +27148,7 @@ begin desc["T"] = tf.data_type(x_) desc["T"] = tf.data_type(y_) res = tf.execute(desc) - node = tf.TapeNode(equal, [x_, y_], name=nothing) + node = tf.TapeNode(equal, [x_, y_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -27168,7 +27168,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function iterator_from_string_handle_graph(string_handle_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function iterator_from_string_handle_graph(string_handle_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "IteratorFromStringHandle") do desc = tf.NodeDescription("IteratorFromStringHandle") @@ -27194,7 +27194,7 @@ begin desc["output_shapes"] = map(Base.identity, output_shapes) end res = tf.execute(desc) - node = tf.TapeNode(iterator_from_string_handle, [string_handle_], name=nothing, output_types=nothing, output_shapes=nothing) + node = tf.TapeNode(iterator_from_string_handle, [string_handle_], name=nothing, output_types=nothing, output_shapes=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -27214,7 +27214,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_list_split_graph(tensor_, element_shape_, lengths_; name=nothing, element_dtype=nothing, shape_type=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_list_split_graph(tensor_, element_shape_, lengths_; name=nothing, element_dtype=nothing, shape_type=nothing) local desc tf.with_op_name(name, "TensorListSplit") do desc = tf.NodeDescription("TensorListSplit") @@ -27252,7 +27252,7 @@ begin desc["element_dtype"] = tf.data_type(tensor_) desc["shape_type"] = tf.data_type(element_shape_) res = tf.execute(desc) - node = tf.TapeNode(tensor_list_split, [tensor_, element_shape_, lengths_], name=nothing, element_dtype=nothing, shape_type=nothing) + node = tf.TapeNode(tensor_list_split, [tensor_, element_shape_, lengths_], name=nothing, element_dtype=nothing, shape_type=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -27272,7 +27272,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fractional_max_pool_graph(value_; name=nothing, pooling_ratio=nothing, pseudo_random=nothing, overlapping=nothing, deterministic=nothing, seed=nothing, seed2=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fractional_max_pool_graph(value_; name=nothing, pooling_ratio=nothing, pseudo_random=nothing, overlapping=nothing, deterministic=nothing, seed=nothing, seed2=nothing) local desc tf.with_op_name(name, "FractionalMaxPool") do desc = tf.NodeDescription("FractionalMaxPool") @@ -27329,7 +27329,7 @@ begin end desc["T"] = tf.data_type(value_) res = tf.execute(desc) - node = tf.TapeNode(fractional_max_pool, [value_], name=nothing, pooling_ratio=nothing, pseudo_random=nothing, overlapping=nothing, deterministic=nothing, seed=nothing, seed2=nothing) + node = tf.TapeNode(fractional_max_pool, [value_], name=nothing, pooling_ratio=nothing, pseudo_random=nothing, overlapping=nothing, deterministic=nothing, seed=nothing, seed2=nothing, res) tf.add_node(res[1], node) return res end @@ -27349,7 +27349,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function scatter_nd_graph(indices_, updates_, shape_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function scatter_nd_graph(indices_, updates_, shape_; name=nothing) local desc tf.with_op_name(name, "ScatterNd") do desc = tf.NodeDescription("ScatterNd") @@ -27377,7 +27377,7 @@ begin desc["T"] = tf.data_type(updates_) desc["Tindices"] = tf.data_type(shape_) res = tf.execute(desc) - node = tf.TapeNode(scatter_nd, [indices_, updates_, shape_], name=nothing) + node = tf.TapeNode(scatter_nd, [indices_, updates_, shape_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -27397,7 +27397,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_list_scatter_into_existing_list_graph(input_handle_, tensor_, indices_; name=nothing, element_dtype=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_list_scatter_into_existing_list_graph(input_handle_, tensor_, indices_; name=nothing, element_dtype=nothing) local desc tf.with_op_name(name, "TensorListScatterIntoExistingList") do desc = tf.NodeDescription("TensorListScatterIntoExistingList") @@ -27427,7 +27427,7 @@ begin end desc["element_dtype"] = tf.data_type(tensor_) res = tf.execute(desc) - node = tf.TapeNode(tensor_list_scatter_into_existing_list, [input_handle_, tensor_, indices_], name=nothing, element_dtype=nothing) + node = tf.TapeNode(tensor_list_scatter_into_existing_list, [input_handle_, tensor_, indices_], name=nothing, element_dtype=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -27447,7 +27447,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function select_graph(condition_, t_, e_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function select_graph(condition_, t_, e_; name=nothing) local desc tf.with_op_name(name, "Select") do desc = tf.NodeDescription("Select") @@ -27472,7 +27472,7 @@ begin desc["T"] = tf.data_type(t_) desc["T"] = tf.data_type(e_) res = tf.execute(desc) - node = tf.TapeNode(select, [condition_, t_, e_], name=nothing) + node = tf.TapeNode(select, [condition_, t_, e_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -27492,7 +27492,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function min_graph(input_, reduction_indices_; name=nothing, keep_dims=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function min_graph(input_, reduction_indices_; name=nothing, keep_dims=nothing) local desc tf.with_op_name(name, "Min") do desc = tf.NodeDescription("Min") @@ -27521,7 +27521,7 @@ begin desc["T"] = tf.data_type(input_) desc["Tidx"] = tf.data_type(reduction_indices_) res = tf.execute(desc) - node = tf.TapeNode(min, [input_, reduction_indices_], name=nothing, keep_dims=nothing) + node = tf.TapeNode(min, [input_, reduction_indices_], name=nothing, keep_dims=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -27541,7 +27541,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function lrn_grad_graph(input_grads_, input_image_, output_image_; name=nothing, depth_radius=nothing, bias=nothing, alpha=nothing, beta=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function lrn_grad_graph(input_grads_, input_image_, output_image_; name=nothing, depth_radius=nothing, bias=nothing, alpha=nothing, beta=nothing) local desc tf.with_op_name(name, "LRNGrad") do desc = tf.NodeDescription("LRNGrad") @@ -27591,7 +27591,7 @@ begin desc["T"] = tf.data_type(input_image_) desc["T"] = tf.data_type(output_image_) res = tf.execute(desc) - node = tf.TapeNode(lrn_grad, [input_grads_, input_image_, output_image_], name=nothing, depth_radius=nothing, bias=nothing, alpha=nothing, beta=nothing) + node = tf.TapeNode(lrn_grad, [input_grads_, input_image_, output_image_], name=nothing, depth_radius=nothing, bias=nothing, alpha=nothing, beta=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -27611,7 +27611,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function random_poisson_v2_graph(shape_, rate_; name=nothing, seed=nothing, seed2=nothing, S=nothing, R=nothing, dtype=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function random_poisson_v2_graph(shape_, rate_; name=nothing, seed=nothing, seed2=nothing, S=nothing, R=nothing, dtype=nothing) local desc tf.with_op_name(name, "RandomPoissonV2") do desc = tf.NodeDescription("RandomPoissonV2") @@ -27663,7 +27663,7 @@ begin desc["S"] = tf.data_type(shape_) desc["R"] = tf.data_type(rate_) res = tf.execute(desc) - node = tf.TapeNode(random_poisson_v2, [shape_, rate_], name=nothing, seed=nothing, seed2=nothing, S=nothing, R=nothing, dtype=nothing) + node = tf.TapeNode(random_poisson_v2, [shape_, rate_], name=nothing, seed=nothing, seed2=nothing, S=nothing, R=nothing, dtype=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -27683,7 +27683,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fifo_queue_graph(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fifo_queue_graph(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "FIFOQueue") do desc = tf.NodeDescription("FIFOQueue") @@ -27723,7 +27723,7 @@ begin desc["shared_name"] = Base.String(shared_name) end res = tf.execute(desc) - node = tf.TapeNode(fifo_queue, [], name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) + node = tf.TapeNode(fifo_queue, [], name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -27743,7 +27743,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_sparse_apply_proximal_gradient_descent_graph(var_, alpha_, l1_, l2_, grad_, indices_; name=nothing, use_locking=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_sparse_apply_proximal_gradient_descent_graph(var_, alpha_, l1_, l2_, grad_, indices_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ResourceSparseApplyProximalGradientDescent") do desc = tf.NodeDescription("ResourceSparseApplyProximalGradientDescent") @@ -27791,7 +27791,7 @@ begin desc["T"] = tf.data_type(grad_) desc["Tindices"] = tf.data_type(indices_) res = tf.execute(desc) - node = tf.TapeNode(resource_sparse_apply_proximal_gradient_descent, [var_, alpha_, l1_, l2_, grad_, indices_], name=nothing, use_locking=nothing) + node = tf.TapeNode(resource_sparse_apply_proximal_gradient_descent, [var_, alpha_, l1_, l2_, grad_, indices_], name=nothing, use_locking=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -27811,7 +27811,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_non_serializable_dataset_graph(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_non_serializable_dataset_graph(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "ExperimentalNonSerializableDataset") do desc = tf.NodeDescription("ExperimentalNonSerializableDataset") @@ -27837,7 +27837,7 @@ begin desc["output_shapes"] = map(Base.identity, output_shapes) end res = tf.execute(desc) - node = tf.TapeNode(experimental_non_serializable_dataset, [input_dataset_], name=nothing, output_types=nothing, output_shapes=nothing) + node = tf.TapeNode(experimental_non_serializable_dataset, [input_dataset_], name=nothing, output_types=nothing, output_shapes=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -27857,7 +27857,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_bytes_produced_stats_dataset_graph(input_dataset_, tag_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_bytes_produced_stats_dataset_graph(input_dataset_, tag_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "ExperimentalBytesProducedStatsDataset") do desc = tf.NodeDescription("ExperimentalBytesProducedStatsDataset") @@ -27887,7 +27887,7 @@ begin desc["output_shapes"] = map(Base.identity, output_shapes) end res = tf.execute(desc) - node = tf.TapeNode(experimental_bytes_produced_stats_dataset, [input_dataset_, tag_], name=nothing, output_types=nothing, output_shapes=nothing) + node = tf.TapeNode(experimental_bytes_produced_stats_dataset, [input_dataset_, tag_], name=nothing, output_types=nothing, output_shapes=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -27907,7 +27907,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function dilation2d_backprop_filter_graph(input_, filter_, out_backprop_; name=nothing, strides=nothing, rates=nothing, padding=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function dilation2d_backprop_filter_graph(input_, filter_, out_backprop_; name=nothing, strides=nothing, rates=nothing, padding=nothing) local desc tf.with_op_name(name, "Dilation2DBackpropFilter") do desc = tf.NodeDescription("Dilation2DBackpropFilter") @@ -27951,7 +27951,7 @@ begin desc["T"] = tf.data_type(filter_) desc["T"] = tf.data_type(out_backprop_) res = tf.execute(desc) - node = tf.TapeNode(dilation2d_backprop_filter, [input_, filter_, out_backprop_], name=nothing, strides=nothing, rates=nothing, padding=nothing) + node = tf.TapeNode(dilation2d_backprop_filter, [input_, filter_, out_backprop_], name=nothing, strides=nothing, rates=nothing, padding=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -27971,7 +27971,7 @@ end output = cond ? then_branch(input) : else_branch(input) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _if_graph(cond_, input_; name=nothing, Tin=nothing, Tout=nothing, then_branch=nothing, else_branch=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _if_graph(cond_, input_; name=nothing, Tin=nothing, Tout=nothing, then_branch=nothing, else_branch=nothing) local desc tf.with_op_name(name, "_If") do desc = tf.NodeDescription("_If") @@ -28015,7 +28015,7 @@ begin end desc["Tcond"] = tf.data_type(cond_) res = tf.execute(desc) - node = tf.TapeNode(_if, [cond_, input_], name=nothing, Tin=nothing, Tout=nothing, then_branch=nothing, else_branch=nothing) + node = tf.TapeNode(_if, [cond_, input_], name=nothing, Tin=nothing, Tout=nothing, then_branch=nothing, else_branch=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -28035,7 +28035,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function bias_add_grad_graph(out_backprop_; name=nothing, data_format=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function bias_add_grad_graph(out_backprop_; name=nothing, data_format=nothing) local desc tf.with_op_name(name, "BiasAddGrad") do desc = tf.NodeDescription("BiasAddGrad") @@ -28057,7 +28057,7 @@ begin end desc["T"] = tf.data_type(out_backprop_) res = tf.execute(desc) - node = tf.TapeNode(bias_add_grad, [out_backprop_], name=nothing, data_format=nothing) + node = tf.TapeNode(bias_add_grad, [out_backprop_], name=nothing, data_format=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -28077,7 +28077,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function reader_serialize_state_v2_graph(reader_handle_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function reader_serialize_state_v2_graph(reader_handle_; name=nothing) local desc tf.with_op_name(name, "ReaderSerializeStateV2") do desc = tf.NodeDescription("ReaderSerializeStateV2") @@ -28091,7 +28091,7 @@ begin reader_handle_ = convert(tf.TensorHandle, reader_handle_) tf.add_input(desc, reader_handle_) res = tf.execute(desc) - node = tf.TapeNode(reader_serialize_state_v2, [reader_handle_], name=nothing) + node = tf.TapeNode(reader_serialize_state_v2, [reader_handle_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -28111,7 +28111,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function wrap_dataset_variant_graph(input_handle_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function wrap_dataset_variant_graph(input_handle_; name=nothing) local desc tf.with_op_name(name, "WrapDatasetVariant") do desc = tf.NodeDescription("WrapDatasetVariant") @@ -28125,7 +28125,7 @@ begin input_handle_ = convert(tf.TensorHandle, input_handle_) tf.add_input(desc, input_handle_) res = tf.execute(desc) - node = tf.TapeNode(wrap_dataset_variant, [input_handle_], name=nothing) + node = tf.TapeNode(wrap_dataset_variant, [input_handle_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -28145,7 +28145,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function parallel_interleave_dataset_v2_graph(input_dataset_, other_arguments_, cycle_length_, block_length_, num_parallel_calls_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, sloppy=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function parallel_interleave_dataset_v2_graph(input_dataset_, other_arguments_, cycle_length_, block_length_, num_parallel_calls_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, sloppy=nothing) local desc tf.with_op_name(name, "ParallelInterleaveDatasetV2") do desc = tf.NodeDescription("ParallelInterleaveDatasetV2") @@ -28205,7 +28205,7 @@ begin desc["sloppy"] = Base.Bool(sloppy) end res = tf.execute(desc) - node = tf.TapeNode(parallel_interleave_dataset_v2, [input_dataset_, other_arguments_, cycle_length_, block_length_, num_parallel_calls_], name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, sloppy=nothing) + node = tf.TapeNode(parallel_interleave_dataset_v2, [input_dataset_, other_arguments_, cycle_length_, block_length_, num_parallel_calls_], name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, sloppy=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -28225,7 +28225,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function depthwise_conv2d_native_backprop_input_graph(input_sizes_, filter_, out_backprop_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function depthwise_conv2d_native_backprop_input_graph(input_sizes_, filter_, out_backprop_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) local desc tf.with_op_name(name, "DepthwiseConv2dNativeBackpropInput") do desc = tf.NodeDescription("DepthwiseConv2dNativeBackpropInput") @@ -28274,7 +28274,7 @@ begin desc["T"] = tf.data_type(filter_) desc["T"] = tf.data_type(out_backprop_) res = tf.execute(desc) - node = tf.TapeNode(depthwise_conv2d_native_backprop_input, [input_sizes_, filter_, out_backprop_], name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) + node = tf.TapeNode(depthwise_conv2d_native_backprop_input, [input_sizes_, filter_, out_backprop_], name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -28294,7 +28294,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_apply_rms_prop_graph(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=nothing, use_locking=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_apply_rms_prop_graph(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ResourceApplyRMSProp") do desc = tf.NodeDescription("ResourceApplyRMSProp") @@ -28348,7 +28348,7 @@ begin desc["T"] = tf.data_type(epsilon_) desc["T"] = tf.data_type(grad_) res = tf.execute(desc) - node = tf.TapeNode(resource_apply_rms_prop, [var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_], name=nothing, use_locking=nothing) + node = tf.TapeNode(resource_apply_rms_prop, [var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_], name=nothing, use_locking=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -28368,7 +28368,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_accumulator_take_gradient_graph(handle_, num_required_; name=nothing, dtype=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_accumulator_take_gradient_graph(handle_, num_required_; name=nothing, dtype=nothing) local desc tf.with_op_name(name, "SparseAccumulatorTakeGradient") do desc = tf.NodeDescription("SparseAccumulatorTakeGradient") @@ -28397,7 +28397,7 @@ begin desc["dtype"] = Base.identity(dtype) end res = tf.execute(desc) - node = tf.TapeNode(sparse_accumulator_take_gradient, [handle_, num_required_], name=nothing, dtype=nothing) + node = tf.TapeNode(sparse_accumulator_take_gradient, [handle_, num_required_], name=nothing, dtype=nothing, res) tf.add_node(res[1], node) return res end @@ -28417,7 +28417,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_lmdb_dataset_graph(filenames_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_lmdb_dataset_graph(filenames_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "ExperimentalLMDBDataset") do desc = tf.NodeDescription("ExperimentalLMDBDataset") @@ -28443,7 +28443,7 @@ begin desc["output_shapes"] = map(Base.identity, output_shapes) end res = tf.execute(desc) - node = tf.TapeNode(experimental_lmdb_dataset, [filenames_], name=nothing, output_types=nothing, output_shapes=nothing) + node = tf.TapeNode(experimental_lmdb_dataset, [filenames_], name=nothing, output_types=nothing, output_shapes=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -28463,7 +28463,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function stack_close_v2_graph(handle_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function stack_close_v2_graph(handle_; name=nothing) local desc tf.with_op_name(name, "StackCloseV2") do desc = tf.NodeDescription("StackCloseV2") @@ -28477,7 +28477,7 @@ begin handle_ = convert(tf.TensorHandle, handle_) tf.add_input(desc, handle_) res = tf.execute(desc) - node = tf.TapeNode(stack_close_v2, [handle_], name=nothing) + node = tf.TapeNode(stack_close_v2, [handle_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -28497,7 +28497,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function map_size_graph(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function map_size_graph(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "MapSize") do desc = tf.NodeDescription("MapSize") @@ -28537,7 +28537,7 @@ begin desc["shared_name"] = Base.String(shared_name) end res = tf.execute(desc) - node = tf.TapeNode(map_size, [], name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + node = tf.TapeNode(map_size, [], name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -28557,7 +28557,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_apply_adagrad_da_graph(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, lr_, l1_, l2_, global_step_; name=nothing, use_locking=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_apply_adagrad_da_graph(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, lr_, l1_, l2_, global_step_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ResourceApplyAdagradDA") do desc = tf.NodeDescription("ResourceApplyAdagradDA") @@ -28610,7 +28610,7 @@ begin desc["T"] = tf.data_type(l1_) desc["T"] = tf.data_type(l2_) res = tf.execute(desc) - node = tf.TapeNode(resource_apply_adagrad_da, [var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, lr_, l1_, l2_, global_step_], name=nothing, use_locking=nothing) + node = tf.TapeNode(resource_apply_adagrad_da, [var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, lr_, l1_, l2_, global_step_], name=nothing, use_locking=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -28630,7 +28630,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_forest_tree_size_graph(tree_handle_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_forest_tree_size_graph(tree_handle_; name=nothing) local desc tf.with_op_name(name, "TensorForestTreeSize") do desc = tf.NodeDescription("TensorForestTreeSize") @@ -28644,7 +28644,7 @@ begin tree_handle_ = convert(tf.TensorHandle, tree_handle_) tf.add_input(desc, tree_handle_) res = tf.execute(desc) - node = tf.TapeNode(tensor_forest_tree_size, [tree_handle_], name=nothing) + node = tf.TapeNode(tensor_forest_tree_size, [tree_handle_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -28664,7 +28664,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function matrix_diag_part_graph(input_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function matrix_diag_part_graph(input_; name=nothing) local desc tf.with_op_name(name, "MatrixDiagPart") do desc = tf.NodeDescription("MatrixDiagPart") @@ -28680,7 +28680,7 @@ begin tf.add_input(desc, input_) desc["T"] = tf.data_type(input_) res = tf.execute(desc) - node = tf.TapeNode(matrix_diag_part, [input_], name=nothing) + node = tf.TapeNode(matrix_diag_part, [input_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -28700,7 +28700,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function reader_num_work_units_completed_v2_graph(reader_handle_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function reader_num_work_units_completed_v2_graph(reader_handle_; name=nothing) local desc tf.with_op_name(name, "ReaderNumWorkUnitsCompletedV2") do desc = tf.NodeDescription("ReaderNumWorkUnitsCompletedV2") @@ -28714,7 +28714,7 @@ begin reader_handle_ = convert(tf.TensorHandle, reader_handle_) tf.add_input(desc, reader_handle_) res = tf.execute(desc) - node = tf.TapeNode(reader_num_work_units_completed_v2, [reader_handle_], name=nothing) + node = tf.TapeNode(reader_num_work_units_completed_v2, [reader_handle_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -28734,7 +28734,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_split_v3_graph(handle_, value_, lengths_, flow_in_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_split_v3_graph(handle_, value_, lengths_, flow_in_; name=nothing) local desc tf.with_op_name(name, "TensorArraySplitV3") do desc = tf.NodeDescription("TensorArraySplitV3") @@ -28762,7 +28762,7 @@ begin tf.add_input(desc, flow_in_) desc["T"] = tf.data_type(value_) res = tf.execute(desc) - node = tf.TapeNode(tensor_array_split_v3, [handle_, value_, lengths_, flow_in_], name=nothing) + node = tf.TapeNode(tensor_array_split_v3, [handle_, value_, lengths_, flow_in_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -28782,7 +28782,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_to_dense_graph(sparse_indices_, output_shape_, sparse_values_, default_value_; name=nothing, validate_indices=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_to_dense_graph(sparse_indices_, output_shape_, sparse_values_, default_value_; name=nothing, validate_indices=nothing) local desc tf.with_op_name(name, "SparseToDense") do desc = tf.NodeDescription("SparseToDense") @@ -28822,7 +28822,7 @@ begin desc["T"] = tf.data_type(sparse_values_) desc["T"] = tf.data_type(default_value_) res = tf.execute(desc) - node = tf.TapeNode(sparse_to_dense, [sparse_indices_, output_shape_, sparse_values_, default_value_], name=nothing, validate_indices=nothing) + node = tf.TapeNode(sparse_to_dense, [sparse_indices_, output_shape_, sparse_values_, default_value_], name=nothing, validate_indices=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -28842,7 +28842,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tpu_replicated_input_graph(inputs_; name=nothing, N=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tpu_replicated_input_graph(inputs_; name=nothing, N=nothing) local desc tf.with_op_name(name, "TPUReplicatedInput") do desc = tf.NodeDescription("TPUReplicatedInput") @@ -28864,7 +28864,7 @@ begin end desc["T"] = tf.data_type(inputs_) res = tf.execute(desc) - node = tf.TapeNode(tpu_replicated_input, [inputs_], name=nothing, N=nothing) + node = tf.TapeNode(tpu_replicated_input, [inputs_], name=nothing, N=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -28884,7 +28884,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function stack_close_graph(handle_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function stack_close_graph(handle_; name=nothing) local desc tf.with_op_name(name, "StackClose") do desc = tf.NodeDescription("StackClose") @@ -28898,7 +28898,7 @@ begin handle_ = convert(tf.TensorHandle, handle_) tf.add_input(desc, handle_) res = tf.execute(desc) - node = tf.TapeNode(stack_close, [handle_], name=nothing) + node = tf.TapeNode(stack_close, [handle_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -28918,7 +28918,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function deserialize_many_sparse_graph(serialized_sparse_; name=nothing, dtype=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function deserialize_many_sparse_graph(serialized_sparse_; name=nothing, dtype=nothing) local desc tf.with_op_name(name, "DeserializeManySparse") do desc = tf.NodeDescription("DeserializeManySparse") @@ -28943,7 +28943,7 @@ begin desc["dtype"] = Base.identity(dtype) end res = tf.execute(desc) - node = tf.TapeNode(deserialize_many_sparse, [serialized_sparse_], name=nothing, dtype=nothing) + node = tf.TapeNode(deserialize_many_sparse, [serialized_sparse_], name=nothing, dtype=nothing, res) tf.add_node(res[1], node) return res end @@ -28963,7 +28963,7 @@ end Replacement node for NcclReduce. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _nccl_reduce_recv_graph(input_; name=nothing, reduction=nothing, num_devices=nothing, shared_name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _nccl_reduce_recv_graph(input_; name=nothing, reduction=nothing, num_devices=nothing, shared_name=nothing) local desc tf.with_op_name(name, "_NcclReduceRecv") do desc = tf.NodeDescription("_NcclReduceRecv") @@ -28997,7 +28997,7 @@ begin end desc["T"] = tf.data_type(input_) res = tf.execute(desc) - node = tf.TapeNode(_nccl_reduce_recv, [input_], name=nothing, reduction=nothing, num_devices=nothing, shared_name=nothing) + node = tf.TapeNode(_nccl_reduce_recv, [input_], name=nothing, reduction=nothing, num_devices=nothing, shared_name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -29017,7 +29017,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function mirror_pad_grad_graph(input_, paddings_; name=nothing, mode=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function mirror_pad_grad_graph(input_, paddings_; name=nothing, mode=nothing) local desc tf.with_op_name(name, "MirrorPadGrad") do desc = tf.NodeDescription("MirrorPadGrad") @@ -29045,7 +29045,7 @@ begin desc["T"] = tf.data_type(input_) desc["Tpaddings"] = tf.data_type(paddings_) res = tf.execute(desc) - node = tf.TapeNode(mirror_pad_grad, [input_, paddings_], name=nothing, mode=nothing) + node = tf.TapeNode(mirror_pad_grad, [input_, paddings_], name=nothing, mode=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -29065,7 +29065,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function broadcast_args_graph(s0_, s1_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function broadcast_args_graph(s0_, s1_; name=nothing) local desc tf.with_op_name(name, "BroadcastArgs") do desc = tf.NodeDescription("BroadcastArgs") @@ -29086,7 +29086,7 @@ begin desc["T"] = tf.data_type(s0_) desc["T"] = tf.data_type(s1_) res = tf.execute(desc) - node = tf.TapeNode(broadcast_args, [s0_, s1_], name=nothing) + node = tf.TapeNode(broadcast_args, [s0_, s1_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -29106,7 +29106,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function stateless_truncated_normal_graph(shape_, seed_; name=nothing, dtype=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function stateless_truncated_normal_graph(shape_, seed_; name=nothing, dtype=nothing) local desc tf.with_op_name(name, "StatelessTruncatedNormal") do desc = tf.NodeDescription("StatelessTruncatedNormal") @@ -29134,7 +29134,7 @@ begin desc["T"] = tf.data_type(shape_) desc["Tseed"] = tf.data_type(seed_) res = tf.execute(desc) - node = tf.TapeNode(stateless_truncated_normal, [shape_, seed_], name=nothing, dtype=nothing) + node = tf.TapeNode(stateless_truncated_normal, [shape_, seed_], name=nothing, dtype=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -29154,7 +29154,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function regex_full_match_graph(input_, pattern_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function regex_full_match_graph(input_, pattern_; name=nothing) local desc tf.with_op_name(name, "RegexFullMatch") do desc = tf.NodeDescription("RegexFullMatch") @@ -29172,7 +29172,7 @@ begin tf.add_input(desc, input_) tf.add_input(desc, pattern_) res = tf.execute(desc) - node = tf.TapeNode(regex_full_match, [input_, pattern_], name=nothing) + node = tf.TapeNode(regex_full_match, [input_, pattern_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -29192,7 +29192,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function unwrap_dataset_variant_graph(input_handle_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function unwrap_dataset_variant_graph(input_handle_; name=nothing) local desc tf.with_op_name(name, "UnwrapDatasetVariant") do desc = tf.NodeDescription("UnwrapDatasetVariant") @@ -29206,7 +29206,7 @@ begin input_handle_ = convert(tf.TensorHandle, input_handle_) tf.add_input(desc, input_handle_) res = tf.execute(desc) - node = tf.TapeNode(unwrap_dataset_variant, [input_handle_], name=nothing) + node = tf.TapeNode(unwrap_dataset_variant, [input_handle_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -29226,7 +29226,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function empty_graph(shape_; name=nothing, dtype=nothing, init=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function empty_graph(shape_; name=nothing, dtype=nothing, init=nothing) local desc tf.with_op_name(name, "Empty") do desc = tf.NodeDescription("Empty") @@ -29252,7 +29252,7 @@ begin desc["init"] = Base.Bool(init) end res = tf.execute(desc) - node = tf.TapeNode(empty, [shape_], name=nothing, dtype=nothing, init=nothing) + node = tf.TapeNode(empty, [shape_], name=nothing, dtype=nothing, init=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -29272,7 +29272,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function outfeed_dequeue_tuple_graph(; name=nothing, dtypes=nothing, shapes=nothing, device_ordinal=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function outfeed_dequeue_tuple_graph(; name=nothing, dtypes=nothing, shapes=nothing, device_ordinal=nothing) local desc tf.with_op_name(name, "OutfeedDequeueTuple") do desc = tf.NodeDescription("OutfeedDequeueTuple") @@ -29300,7 +29300,7 @@ begin desc["device_ordinal"] = Base.Int(device_ordinal) end res = tf.execute(desc) - node = tf.TapeNode(outfeed_dequeue_tuple, [], name=nothing, dtypes=nothing, shapes=nothing, device_ordinal=nothing) + node = tf.TapeNode(outfeed_dequeue_tuple, [], name=nothing, dtypes=nothing, shapes=nothing, device_ordinal=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -29320,7 +29320,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function div_graph(x_, y_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function div_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "Div") do desc = tf.NodeDescription("Div") @@ -29341,7 +29341,7 @@ begin desc["T"] = tf.data_type(x_) desc["T"] = tf.data_type(y_) res = tf.execute(desc) - node = tf.TapeNode(div, [x_, y_], name=nothing) + node = tf.TapeNode(div, [x_, y_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -29361,7 +29361,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function barrier_graph(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function barrier_graph(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "Barrier") do desc = tf.NodeDescription("Barrier") @@ -29401,7 +29401,7 @@ begin desc["shared_name"] = Base.String(shared_name) end res = tf.execute(desc) - node = tf.TapeNode(barrier, [], name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) + node = tf.TapeNode(barrier, [], name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -29421,7 +29421,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function truncate_div_graph(x_, y_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function truncate_div_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "TruncateDiv") do desc = tf.NodeDescription("TruncateDiv") @@ -29442,7 +29442,7 @@ begin desc["T"] = tf.data_type(x_) desc["T"] = tf.data_type(y_) res = tf.execute(desc) - node = tf.TapeNode(truncate_div, [x_, y_], name=nothing) + node = tf.TapeNode(truncate_div, [x_, y_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -29462,7 +29462,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function unicode_encode_graph(input_values_, input_splits_; name=nothing, errors=nothing, output_encoding=nothing, replacement_char=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function unicode_encode_graph(input_values_, input_splits_; name=nothing, errors=nothing, output_encoding=nothing, replacement_char=nothing) local desc tf.with_op_name(name, "UnicodeEncode") do desc = tf.NodeDescription("UnicodeEncode") @@ -29498,7 +29498,7 @@ begin desc["replacement_char"] = Base.Int(replacement_char) end res = tf.execute(desc) - node = tf.TapeNode(unicode_encode, [input_values_, input_splits_], name=nothing, errors=nothing, output_encoding=nothing, replacement_char=nothing) + node = tf.TapeNode(unicode_encode, [input_values_, input_splits_], name=nothing, errors=nothing, output_encoding=nothing, replacement_char=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -29518,7 +29518,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function merge_summary_graph(inputs_; name=nothing, N=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function merge_summary_graph(inputs_; name=nothing, N=nothing) local desc tf.with_op_name(name, "MergeSummary") do desc = tf.NodeDescription("MergeSummary") @@ -29538,7 +29538,7 @@ begin desc["N"] = Base.Int(N) end res = tf.execute(desc) - node = tf.TapeNode(merge_summary, [inputs_], name=nothing, N=nothing) + node = tf.TapeNode(merge_summary, [inputs_], name=nothing, N=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -29558,7 +29558,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fake_queue_graph(resource_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fake_queue_graph(resource_; name=nothing) local desc tf.with_op_name(name, "FakeQueue") do desc = tf.NodeDescription("FakeQueue") @@ -29572,7 +29572,7 @@ begin resource_ = convert(tf.TensorHandle, resource_) tf.add_input(desc, resource_) res = tf.execute(desc) - node = tf.TapeNode(fake_queue, [resource_], name=nothing) + node = tf.TapeNode(fake_queue, [resource_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -29592,7 +29592,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_cholesky_graph(input_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_cholesky_graph(input_; name=nothing) local desc tf.with_op_name(name, "BatchCholesky") do desc = tf.NodeDescription("BatchCholesky") @@ -29608,7 +29608,7 @@ begin tf.add_input(desc, input_) desc["T"] = tf.data_type(input_) res = tf.execute(desc) - node = tf.TapeNode(batch_cholesky, [input_], name=nothing) + node = tf.TapeNode(batch_cholesky, [input_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -29628,7 +29628,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function iterator_graph(; name=nothing, shared_name=nothing, container=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function iterator_graph(; name=nothing, shared_name=nothing, container=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "Iterator") do desc = tf.NodeDescription("Iterator") @@ -29662,7 +29662,7 @@ begin desc["output_shapes"] = map(Base.identity, output_shapes) end res = tf.execute(desc) - node = tf.TapeNode(iterator, [], name=nothing, shared_name=nothing, container=nothing, output_types=nothing, output_shapes=nothing) + node = tf.TapeNode(iterator, [], name=nothing, shared_name=nothing, container=nothing, output_types=nothing, output_shapes=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -29682,7 +29682,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function bessel_i1e_graph(x_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function bessel_i1e_graph(x_; name=nothing) local desc tf.with_op_name(name, "BesselI1e") do desc = tf.NodeDescription("BesselI1e") @@ -29698,7 +29698,7 @@ begin tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) res = tf.execute(desc) - node = tf.TapeNode(bessel_i1e, [x_], name=nothing) + node = tf.TapeNode(bessel_i1e, [x_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -29718,7 +29718,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function import_event_graph(writer_, event_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function import_event_graph(writer_, event_; name=nothing) local desc tf.with_op_name(name, "ImportEvent") do desc = tf.NodeDescription("ImportEvent") @@ -29736,7 +29736,7 @@ begin tf.add_input(desc, writer_) tf.add_input(desc, event_) res = tf.execute(desc) - node = tf.TapeNode(import_event, [writer_, event_], name=nothing) + node = tf.TapeNode(import_event, [writer_, event_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -29756,7 +29756,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantized_instance_norm_graph(x_, x_min_, x_max_; name=nothing, output_range_given=nothing, given_y_min=nothing, given_y_max=nothing, variance_epsilon=nothing, min_separation=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantized_instance_norm_graph(x_, x_min_, x_max_; name=nothing, output_range_given=nothing, given_y_min=nothing, given_y_max=nothing, variance_epsilon=nothing, min_separation=nothing) local desc tf.with_op_name(name, "QuantizedInstanceNorm") do desc = tf.NodeDescription("QuantizedInstanceNorm") @@ -29815,7 +29815,7 @@ begin end desc["T"] = tf.data_type(x_) res = tf.execute(desc) - node = tf.TapeNode(quantized_instance_norm, [x_, x_min_, x_max_], name=nothing, output_range_given=nothing, given_y_min=nothing, given_y_max=nothing, variance_epsilon=nothing, min_separation=nothing) + node = tf.TapeNode(quantized_instance_norm, [x_, x_min_, x_max_], name=nothing, output_range_given=nothing, given_y_min=nothing, given_y_max=nothing, variance_epsilon=nothing, min_separation=nothing, res) tf.add_node(res[1], node) return res end @@ -29835,7 +29835,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function load_tpu_embedding_adagrad_parameters_graph(parameters_, accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function load_tpu_embedding_adagrad_parameters_graph(parameters_, accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "LoadTPUEmbeddingAdagradParameters") do desc = tf.NodeDescription("LoadTPUEmbeddingAdagradParameters") @@ -29877,7 +29877,7 @@ begin desc["shard_id"] = Base.Int(shard_id) end res = tf.execute(desc) - node = tf.TapeNode(load_tpu_embedding_adagrad_parameters, [parameters_, accumulators_], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + node = tf.TapeNode(load_tpu_embedding_adagrad_parameters, [parameters_, accumulators_], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -29897,7 +29897,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_write_v3_graph(handle_, index_, value_, flow_in_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_write_v3_graph(handle_, index_, value_, flow_in_; name=nothing) local desc tf.with_op_name(name, "TensorArrayWriteV3") do desc = tf.NodeDescription("TensorArrayWriteV3") @@ -29925,7 +29925,7 @@ begin tf.add_input(desc, flow_in_) desc["T"] = tf.data_type(value_) res = tf.execute(desc) - node = tf.TapeNode(tensor_array_write_v3, [handle_, index_, value_, flow_in_], name=nothing) + node = tf.TapeNode(tensor_array_write_v3, [handle_, index_, value_, flow_in_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -29945,7 +29945,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function dense_to_dense_set_operation_graph(set1_, set2_; name=nothing, set_operation=nothing, validate_indices=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function dense_to_dense_set_operation_graph(set1_, set2_; name=nothing, set_operation=nothing, validate_indices=nothing) local desc tf.with_op_name(name, "DenseToDenseSetOperation") do desc = tf.NodeDescription("DenseToDenseSetOperation") @@ -29983,7 +29983,7 @@ begin desc["T"] = tf.data_type(set1_) desc["T"] = tf.data_type(set2_) res = tf.execute(desc) - node = tf.TapeNode(dense_to_dense_set_operation, [set1_, set2_], name=nothing, set_operation=nothing, validate_indices=nothing) + node = tf.TapeNode(dense_to_dense_set_operation, [set1_, set2_], name=nothing, set_operation=nothing, validate_indices=nothing, res) tf.add_node(res[1], node) return res end @@ -30003,7 +30003,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function encode_jpeg_graph(image_; name=nothing, format=nothing, quality=nothing, progressive=nothing, optimize_size=nothing, chroma_downsampling=nothing, density_unit=nothing, x_density=nothing, y_density=nothing, xmp_metadata=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function encode_jpeg_graph(image_; name=nothing, format=nothing, quality=nothing, progressive=nothing, optimize_size=nothing, chroma_downsampling=nothing, density_unit=nothing, x_density=nothing, y_density=nothing, xmp_metadata=nothing) local desc tf.with_op_name(name, "EncodeJpeg") do desc = tf.NodeDescription("EncodeJpeg") @@ -30071,7 +30071,7 @@ begin desc["xmp_metadata"] = Base.String(xmp_metadata) end res = tf.execute(desc) - node = tf.TapeNode(encode_jpeg, [image_], name=nothing, format=nothing, quality=nothing, progressive=nothing, optimize_size=nothing, chroma_downsampling=nothing, density_unit=nothing, x_density=nothing, y_density=nothing, xmp_metadata=nothing) + node = tf.TapeNode(encode_jpeg, [image_], name=nothing, format=nothing, quality=nothing, progressive=nothing, optimize_size=nothing, chroma_downsampling=nothing, density_unit=nothing, x_density=nothing, y_density=nothing, xmp_metadata=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -30091,7 +30091,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function inplace_update_graph(x_, i_, v_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function inplace_update_graph(x_, i_, v_; name=nothing) local desc tf.with_op_name(name, "InplaceUpdate") do desc = tf.NodeDescription("InplaceUpdate") @@ -30116,7 +30116,7 @@ begin desc["T"] = tf.data_type(x_) desc["T"] = tf.data_type(v_) res = tf.execute(desc) - node = tf.TapeNode(inplace_update, [x_, i_, v_], name=nothing) + node = tf.TapeNode(inplace_update, [x_, i_, v_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -30136,7 +30136,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fused_pad_conv2d_graph(input_, paddings_, filter_; name=nothing, mode=nothing, strides=nothing, padding=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fused_pad_conv2d_graph(input_, paddings_, filter_; name=nothing, mode=nothing, strides=nothing, padding=nothing) local desc tf.with_op_name(name, "FusedPadConv2D") do desc = tf.NodeDescription("FusedPadConv2D") @@ -30179,7 +30179,7 @@ begin desc["T"] = tf.data_type(input_) desc["T"] = tf.data_type(filter_) res = tf.execute(desc) - node = tf.TapeNode(fused_pad_conv2d, [input_, paddings_, filter_], name=nothing, mode=nothing, strides=nothing, padding=nothing) + node = tf.TapeNode(fused_pad_conv2d, [input_, paddings_, filter_], name=nothing, mode=nothing, strides=nothing, padding=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -30199,7 +30199,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantized_relu_graph(features_, min_features_, max_features_; name=nothing, out_type=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantized_relu_graph(features_, min_features_, max_features_; name=nothing, out_type=nothing) local desc tf.with_op_name(name, "QuantizedRelu") do desc = tf.NodeDescription("QuantizedRelu") @@ -30234,7 +30234,7 @@ begin end desc["Tinput"] = tf.data_type(features_) res = tf.execute(desc) - node = tf.TapeNode(quantized_relu, [features_, min_features_, max_features_], name=nothing, out_type=nothing) + node = tf.TapeNode(quantized_relu, [features_, min_features_, max_features_], name=nothing, out_type=nothing, res) tf.add_node(res[1], node) return res end @@ -30254,7 +30254,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function gather_nd_graph(params_, indices_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function gather_nd_graph(params_, indices_; name=nothing) local desc tf.with_op_name(name, "GatherNd") do desc = tf.NodeDescription("GatherNd") @@ -30277,7 +30277,7 @@ begin desc["Tparams"] = tf.data_type(params_) desc["Tindices"] = tf.data_type(indices_) res = tf.execute(desc) - node = tf.TapeNode(gather_nd, [params_, indices_], name=nothing) + node = tf.TapeNode(gather_nd, [params_, indices_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -30297,7 +30297,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function placeholder_graph(; name=nothing, dtype=nothing, shape=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function placeholder_graph(; name=nothing, dtype=nothing, shape=nothing) local desc tf.with_op_name(name, "Placeholder") do desc = tf.NodeDescription("Placeholder") @@ -30319,7 +30319,7 @@ begin desc["shape"] = Base.identity(shape) end res = tf.execute(desc) - node = tf.TapeNode(placeholder, [], name=nothing, dtype=nothing, shape=nothing) + node = tf.TapeNode(placeholder, [], name=nothing, dtype=nothing, shape=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -30339,7 +30339,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function filter_by_last_component_dataset_graph(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function filter_by_last_component_dataset_graph(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "FilterByLastComponentDataset") do desc = tf.NodeDescription("FilterByLastComponentDataset") @@ -30365,7 +30365,7 @@ begin desc["output_shapes"] = map(Base.identity, output_shapes) end res = tf.execute(desc) - node = tf.TapeNode(filter_by_last_component_dataset, [input_dataset_], name=nothing, output_types=nothing, output_shapes=nothing) + node = tf.TapeNode(filter_by_last_component_dataset, [input_dataset_], name=nothing, output_types=nothing, output_shapes=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -30385,7 +30385,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function clip_by_value_graph(t_, clip_value_min_, clip_value_max_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function clip_by_value_graph(t_, clip_value_min_, clip_value_max_; name=nothing) local desc tf.with_op_name(name, "ClipByValue") do desc = tf.NodeDescription("ClipByValue") @@ -30411,7 +30411,7 @@ begin desc["T"] = tf.data_type(clip_value_min_) desc["T"] = tf.data_type(clip_value_max_) res = tf.execute(desc) - node = tf.TapeNode(clip_by_value, [t_, clip_value_min_, clip_value_max_], name=nothing) + node = tf.TapeNode(clip_by_value, [t_, clip_value_min_, clip_value_max_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -30431,7 +30431,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function image_summary_graph(tag_, tensor_; name=nothing, max_images=nothing, bad_color=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function image_summary_graph(tag_, tensor_; name=nothing, max_images=nothing, bad_color=nothing) local desc tf.with_op_name(name, "ImageSummary") do desc = tf.NodeDescription("ImageSummary") @@ -30463,7 +30463,7 @@ begin end desc["T"] = tf.data_type(tensor_) res = tf.execute(desc) - node = tf.TapeNode(image_summary, [tag_, tensor_], name=nothing, max_images=nothing, bad_color=nothing) + node = tf.TapeNode(image_summary, [tag_, tensor_], name=nothing, max_images=nothing, bad_color=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -30483,7 +30483,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function retrieve_tpu_embedding_adadelta_parameters_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function retrieve_tpu_embedding_adadelta_parameters_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "RetrieveTPUEmbeddingAdadeltaParameters") do desc = tf.NodeDescription("RetrieveTPUEmbeddingAdadeltaParameters") @@ -30522,7 +30522,7 @@ begin desc["shard_id"] = Base.Int(shard_id) end res = tf.execute(desc) - node = tf.TapeNode(retrieve_tpu_embedding_adadelta_parameters, [], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + node = tf.TapeNode(retrieve_tpu_embedding_adadelta_parameters, [], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res) tf.add_node(res[1], node) return res end @@ -30542,7 +30542,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function string_join_graph(inputs_; name=nothing, N=nothing, separator=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function string_join_graph(inputs_; name=nothing, N=nothing, separator=nothing) local desc tf.with_op_name(name, "StringJoin") do desc = tf.NodeDescription("StringJoin") @@ -30568,7 +30568,7 @@ begin desc["separator"] = Base.String(separator) end res = tf.execute(desc) - node = tf.TapeNode(string_join, [inputs_], name=nothing, N=nothing, separator=nothing) + node = tf.TapeNode(string_join, [inputs_], name=nothing, N=nothing, separator=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -30588,7 +30588,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_scatter_nd_add_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_scatter_nd_add_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ResourceScatterNdAdd") do desc = tf.NodeDescription("ResourceScatterNdAdd") @@ -30621,7 +30621,7 @@ begin desc["Tindices"] = tf.data_type(indices_) desc["T"] = tf.data_type(updates_) res = tf.execute(desc) - node = tf.TapeNode(resource_scatter_nd_add, [ref_, indices_, updates_], name=nothing, use_locking=nothing) + node = tf.TapeNode(resource_scatter_nd_add, [ref_, indices_, updates_], name=nothing, use_locking=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -30641,7 +30641,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function boosted_trees_quantile_stream_resource_deserialize_graph(quantile_stream_resource_handle_, bucket_boundaries_; name=nothing, num_streams=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function boosted_trees_quantile_stream_resource_deserialize_graph(quantile_stream_resource_handle_, bucket_boundaries_; name=nothing, num_streams=nothing) local desc tf.with_op_name(name, "BoostedTreesQuantileStreamResourceDeserialize") do desc = tf.NodeDescription("BoostedTreesQuantileStreamResourceDeserialize") @@ -30665,7 +30665,7 @@ begin desc["num_streams"] = Base.Int(num_streams) end res = tf.execute(desc) - node = tf.TapeNode(boosted_trees_quantile_stream_resource_deserialize, [quantile_stream_resource_handle_, bucket_boundaries_], name=nothing, num_streams=nothing) + node = tf.TapeNode(boosted_trees_quantile_stream_resource_deserialize, [quantile_stream_resource_handle_, bucket_boundaries_], name=nothing, num_streams=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -30685,7 +30685,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function left_shift_graph(x_, y_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function left_shift_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "LeftShift") do desc = tf.NodeDescription("LeftShift") @@ -30706,7 +30706,7 @@ begin desc["T"] = tf.data_type(x_) desc["T"] = tf.data_type(y_) res = tf.execute(desc) - node = tf.TapeNode(left_shift, [x_, y_], name=nothing) + node = tf.TapeNode(left_shift, [x_, y_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -30726,7 +30726,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function requantize_per_channel_graph(input_, input_min_, input_max_, requested_output_min_, requested_output_max_; name=nothing, out_type=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function requantize_per_channel_graph(input_, input_min_, input_max_, requested_output_min_, requested_output_max_; name=nothing, out_type=nothing) local desc tf.with_op_name(name, "RequantizePerChannel") do desc = tf.NodeDescription("RequantizePerChannel") @@ -30769,7 +30769,7 @@ begin end desc["T"] = tf.data_type(input_) res = tf.execute(desc) - node = tf.TapeNode(requantize_per_channel, [input_, input_min_, input_max_, requested_output_min_, requested_output_max_], name=nothing, out_type=nothing) + node = tf.TapeNode(requantize_per_channel, [input_, input_min_, input_max_, requested_output_min_, requested_output_max_], name=nothing, out_type=nothing, res) tf.add_node(res[1], node) return res end @@ -30789,7 +30789,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_scatter_add_graph(tensor_, indices_, updates_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_scatter_add_graph(tensor_, indices_, updates_; name=nothing) local desc tf.with_op_name(name, "TensorScatterAdd") do desc = tf.NodeDescription("TensorScatterAdd") @@ -30817,7 +30817,7 @@ begin desc["Tindices"] = tf.data_type(indices_) desc["T"] = tf.data_type(updates_) res = tf.execute(desc) - node = tf.TapeNode(tensor_scatter_add, [tensor_, indices_, updates_], name=nothing) + node = tf.TapeNode(tensor_scatter_add, [tensor_, indices_, updates_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -30837,7 +30837,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _var_handles_op_graph(; name=nothing, containers=nothing, shared_names=nothing, N=nothing, dtypes=nothing, shapes=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _var_handles_op_graph(; name=nothing, containers=nothing, shared_names=nothing, N=nothing, dtypes=nothing, shapes=nothing) local desc tf.with_op_name(name, "_VarHandlesOp") do desc = tf.NodeDescription("_VarHandlesOp") @@ -30882,7 +30882,7 @@ begin desc["shapes"] = map(Base.identity, shapes) end res = tf.execute(desc) - node = tf.TapeNode(_var_handles_op, [], name=nothing, containers=nothing, shared_names=nothing, N=nothing, dtypes=nothing, shapes=nothing) + node = tf.TapeNode(_var_handles_op, [], name=nothing, containers=nothing, shared_names=nothing, N=nothing, dtypes=nothing, shapes=nothing, res) tf.add_node(res[1], node) return res end @@ -30902,7 +30902,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ifft3d_graph(input_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ifft3d_graph(input_; name=nothing) local desc tf.with_op_name(name, "IFFT3D") do desc = tf.NodeDescription("IFFT3D") @@ -30918,7 +30918,7 @@ begin tf.add_input(desc, input_) desc["Tcomplex"] = tf.data_type(input_) res = tf.execute(desc) - node = tf.TapeNode(ifft3d, [input_], name=nothing) + node = tf.TapeNode(ifft3d, [input_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -30938,7 +30938,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function euclidean_norm_graph(input_, reduction_indices_; name=nothing, keep_dims=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function euclidean_norm_graph(input_, reduction_indices_; name=nothing, keep_dims=nothing) local desc tf.with_op_name(name, "EuclideanNorm") do desc = tf.NodeDescription("EuclideanNorm") @@ -30967,7 +30967,7 @@ begin desc["T"] = tf.data_type(input_) desc["Tidx"] = tf.data_type(reduction_indices_) res = tf.execute(desc) - node = tf.TapeNode(euclidean_norm, [input_, reduction_indices_], name=nothing, keep_dims=nothing) + node = tf.TapeNode(euclidean_norm, [input_, reduction_indices_], name=nothing, keep_dims=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -30987,7 +30987,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ref_select_graph(index_, inputs_; name=nothing, N=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ref_select_graph(index_, inputs_; name=nothing, N=nothing) local desc tf.with_op_name(name, "RefSelect") do desc = tf.NodeDescription("RefSelect") @@ -31013,7 +31013,7 @@ begin end desc["T"] = tf.data_type(inputs_) res = tf.execute(desc) - node = tf.TapeNode(ref_select, [index_, inputs_], name=nothing, N=nothing) + node = tf.TapeNode(ref_select, [index_, inputs_], name=nothing, N=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -31033,7 +31033,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_tensor_slice_dataset_graph(indices_, values_, dense_shape_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_tensor_slice_dataset_graph(indices_, values_, dense_shape_; name=nothing) local desc tf.with_op_name(name, "SparseTensorSliceDataset") do desc = tf.NodeDescription("SparseTensorSliceDataset") @@ -31057,7 +31057,7 @@ begin tf.add_input(desc, dense_shape_) desc["Tvalues"] = tf.data_type(values_) res = tf.execute(desc) - node = tf.TapeNode(sparse_tensor_slice_dataset, [indices_, values_, dense_shape_], name=nothing) + node = tf.TapeNode(sparse_tensor_slice_dataset, [indices_, values_, dense_shape_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -31077,7 +31077,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function retrieve_tpu_embedding_ftrl_parameters_grad_accum_debug_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function retrieve_tpu_embedding_ftrl_parameters_grad_accum_debug_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "RetrieveTPUEmbeddingFTRLParametersGradAccumDebug") do desc = tf.NodeDescription("RetrieveTPUEmbeddingFTRLParametersGradAccumDebug") @@ -31116,7 +31116,7 @@ begin desc["shard_id"] = Base.Int(shard_id) end res = tf.execute(desc) - node = tf.TapeNode(retrieve_tpu_embedding_ftrl_parameters_grad_accum_debug, [], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + node = tf.TapeNode(retrieve_tpu_embedding_ftrl_parameters_grad_accum_debug, [], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res) tf.add_node(res[1], node) return res end @@ -31136,7 +31136,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_ifft2d_graph(input_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_ifft2d_graph(input_; name=nothing) local desc tf.with_op_name(name, "BatchIFFT2D") do desc = tf.NodeDescription("BatchIFFT2D") @@ -31150,7 +31150,7 @@ begin input_ = convert(tf.TensorHandle, input_) tf.add_input(desc, input_) res = tf.execute(desc) - node = tf.TapeNode(batch_ifft2d, [input_], name=nothing) + node = tf.TapeNode(batch_ifft2d, [input_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -31170,7 +31170,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_gather_graph(handle_, indices_, flow_in_; name=nothing, dtype=nothing, element_shape=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_gather_graph(handle_, indices_, flow_in_; name=nothing, dtype=nothing, element_shape=nothing) local desc tf.with_op_name(name, "TensorArrayGather") do desc = tf.NodeDescription("TensorArrayGather") @@ -31204,7 +31204,7 @@ begin desc["element_shape"] = Base.identity(element_shape) end res = tf.execute(desc) - node = tf.TapeNode(tensor_array_gather, [handle_, indices_, flow_in_], name=nothing, dtype=nothing, element_shape=nothing) + node = tf.TapeNode(tensor_array_gather, [handle_, indices_, flow_in_], name=nothing, dtype=nothing, element_shape=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -31224,7 +31224,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_segment_mean_with_num_segments_graph(data_, indices_, segment_ids_, num_segments_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_segment_mean_with_num_segments_graph(data_, indices_, segment_ids_, num_segments_; name=nothing) local desc tf.with_op_name(name, "SparseSegmentMeanWithNumSegments") do desc = tf.NodeDescription("SparseSegmentMeanWithNumSegments") @@ -31257,7 +31257,7 @@ begin desc["Tidx"] = tf.data_type(indices_) desc["Tnumsegments"] = tf.data_type(num_segments_) res = tf.execute(desc) - node = tf.TapeNode(sparse_segment_mean_with_num_segments, [data_, indices_, segment_ids_, num_segments_], name=nothing) + node = tf.TapeNode(sparse_segment_mean_with_num_segments, [data_, indices_, segment_ids_, num_segments_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -31277,7 +31277,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ensure_shape_graph(input_; name=nothing, shape=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ensure_shape_graph(input_; name=nothing, shape=nothing) local desc tf.with_op_name(name, "EnsureShape") do desc = tf.NodeDescription("EnsureShape") @@ -31299,7 +31299,7 @@ begin end desc["T"] = tf.data_type(input_) res = tf.execute(desc) - node = tf.TapeNode(ensure_shape, [input_], name=nothing, shape=nothing) + node = tf.TapeNode(ensure_shape, [input_], name=nothing, shape=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -31319,7 +31319,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function apply_proximal_gradient_descent_graph(var_, alpha_, l1_, l2_, delta_; name=nothing, use_locking=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function apply_proximal_gradient_descent_graph(var_, alpha_, l1_, l2_, delta_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ApplyProximalGradientDescent") do desc = tf.NodeDescription("ApplyProximalGradientDescent") @@ -31361,7 +31361,7 @@ begin desc["T"] = tf.data_type(l2_) desc["T"] = tf.data_type(delta_) res = tf.execute(desc) - node = tf.TapeNode(apply_proximal_gradient_descent, [var_, alpha_, l1_, l2_, delta_], name=nothing, use_locking=nothing) + node = tf.TapeNode(apply_proximal_gradient_descent, [var_, alpha_, l1_, l2_, delta_], name=nothing, use_locking=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -31381,7 +31381,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function collective_reduce_graph(input_; name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, merge_op=nothing, final_op=nothing, subdiv_offsets=nothing, wait_for=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function collective_reduce_graph(input_; name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, merge_op=nothing, final_op=nothing, subdiv_offsets=nothing, wait_for=nothing) local desc tf.with_op_name(name, "CollectiveReduce") do desc = tf.NodeDescription("CollectiveReduce") @@ -31439,7 +31439,7 @@ begin end desc["T"] = tf.data_type(input_) res = tf.execute(desc) - node = tf.TapeNode(collective_reduce, [input_], name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, merge_op=nothing, final_op=nothing, subdiv_offsets=nothing, wait_for=nothing) + node = tf.TapeNode(collective_reduce, [input_], name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, merge_op=nothing, final_op=nothing, subdiv_offsets=nothing, wait_for=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -31459,7 +31459,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function is_nan_graph(x_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function is_nan_graph(x_; name=nothing) local desc tf.with_op_name(name, "IsNan") do desc = tf.NodeDescription("IsNan") @@ -31475,7 +31475,7 @@ begin tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) res = tf.execute(desc) - node = tf.TapeNode(is_nan, [x_], name=nothing) + node = tf.TapeNode(is_nan, [x_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -31495,7 +31495,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function apply_ada_max_graph(var_, m_, v_, beta1_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function apply_ada_max_graph(var_, m_, v_, beta1_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ApplyAdaMax") do desc = tf.NodeDescription("ApplyAdaMax") @@ -31557,7 +31557,7 @@ begin desc["T"] = tf.data_type(epsilon_) desc["T"] = tf.data_type(grad_) res = tf.execute(desc) - node = tf.TapeNode(apply_ada_max, [var_, m_, v_, beta1_power_, lr_, beta1_, beta2_, epsilon_, grad_], name=nothing, use_locking=nothing) + node = tf.TapeNode(apply_ada_max, [var_, m_, v_, beta1_power_, lr_, beta1_, beta2_, epsilon_, grad_], name=nothing, use_locking=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -31577,7 +31577,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function decode_and_crop_jpeg_graph(contents_, crop_window_; name=nothing, channels=nothing, ratio=nothing, fancy_upscaling=nothing, try_recover_truncated=nothing, acceptable_fraction=nothing, dct_method=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function decode_and_crop_jpeg_graph(contents_, crop_window_; name=nothing, channels=nothing, ratio=nothing, fancy_upscaling=nothing, try_recover_truncated=nothing, acceptable_fraction=nothing, dct_method=nothing) local desc tf.with_op_name(name, "DecodeAndCropJpeg") do desc = tf.NodeDescription("DecodeAndCropJpeg") @@ -31631,7 +31631,7 @@ begin desc["dct_method"] = Base.String(dct_method) end res = tf.execute(desc) - node = tf.TapeNode(decode_and_crop_jpeg, [contents_, crop_window_], name=nothing, channels=nothing, ratio=nothing, fancy_upscaling=nothing, try_recover_truncated=nothing, acceptable_fraction=nothing, dct_method=nothing) + node = tf.TapeNode(decode_and_crop_jpeg, [contents_, crop_window_], name=nothing, channels=nothing, ratio=nothing, fancy_upscaling=nothing, try_recover_truncated=nothing, acceptable_fraction=nothing, dct_method=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -31651,7 +31651,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function apply_centered_rms_prop_graph(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=nothing, use_locking=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function apply_centered_rms_prop_graph(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ApplyCenteredRMSProp") do desc = tf.NodeDescription("ApplyCenteredRMSProp") @@ -31713,7 +31713,7 @@ begin desc["T"] = tf.data_type(epsilon_) desc["T"] = tf.data_type(grad_) res = tf.execute(desc) - node = tf.TapeNode(apply_centered_rms_prop, [var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_], name=nothing, use_locking=nothing) + node = tf.TapeNode(apply_centered_rms_prop, [var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_], name=nothing, use_locking=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -31733,7 +31733,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function conv3d_backprop_filter_v2_graph(input_, filter_sizes_, out_backprop_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function conv3d_backprop_filter_v2_graph(input_, filter_sizes_, out_backprop_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) local desc tf.with_op_name(name, "Conv3DBackpropFilterV2") do desc = tf.NodeDescription("Conv3DBackpropFilterV2") @@ -31782,7 +31782,7 @@ begin desc["T"] = tf.data_type(input_) desc["T"] = tf.data_type(out_backprop_) res = tf.execute(desc) - node = tf.TapeNode(conv3d_backprop_filter_v2, [input_, filter_sizes_, out_backprop_], name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) + node = tf.TapeNode(conv3d_backprop_filter_v2, [input_, filter_sizes_, out_backprop_], name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -31802,7 +31802,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function matrix_triangular_solve_graph(matrix_, rhs_; name=nothing, lower=nothing, adjoint=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function matrix_triangular_solve_graph(matrix_, rhs_; name=nothing, lower=nothing, adjoint=nothing) local desc tf.with_op_name(name, "MatrixTriangularSolve") do desc = tf.NodeDescription("MatrixTriangularSolve") @@ -31835,7 +31835,7 @@ begin desc["T"] = tf.data_type(matrix_) desc["T"] = tf.data_type(rhs_) res = tf.execute(desc) - node = tf.TapeNode(matrix_triangular_solve, [matrix_, rhs_], name=nothing, lower=nothing, adjoint=nothing) + node = tf.TapeNode(matrix_triangular_solve, [matrix_, rhs_], name=nothing, lower=nothing, adjoint=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -31855,7 +31855,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function reader_num_work_units_completed_graph(reader_handle_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function reader_num_work_units_completed_graph(reader_handle_; name=nothing) local desc tf.with_op_name(name, "ReaderNumWorkUnitsCompleted") do desc = tf.NodeDescription("ReaderNumWorkUnitsCompleted") @@ -31869,7 +31869,7 @@ begin reader_handle_ = convert(tf.TensorHandle, reader_handle_) tf.add_input(desc, reader_handle_) res = tf.execute(desc) - node = tf.TapeNode(reader_num_work_units_completed, [reader_handle_], name=nothing) + node = tf.TapeNode(reader_num_work_units_completed, [reader_handle_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -31889,7 +31889,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function write_audio_summary_graph(writer_, step_, tag_, tensor_, sample_rate_; name=nothing, max_outputs=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function write_audio_summary_graph(writer_, step_, tag_, tensor_, sample_rate_; name=nothing, max_outputs=nothing) local desc tf.with_op_name(name, "WriteAudioSummary") do desc = tf.NodeDescription("WriteAudioSummary") @@ -31925,7 +31925,7 @@ begin desc["max_outputs"] = Base.Int(max_outputs) end res = tf.execute(desc) - node = tf.TapeNode(write_audio_summary, [writer_, step_, tag_, tensor_, sample_rate_], name=nothing, max_outputs=nothing) + node = tf.TapeNode(write_audio_summary, [writer_, step_, tag_, tensor_, sample_rate_], name=nothing, max_outputs=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -31945,7 +31945,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sharded_filespec_graph(basename_, num_shards_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sharded_filespec_graph(basename_, num_shards_; name=nothing) local desc tf.with_op_name(name, "ShardedFilespec") do desc = tf.NodeDescription("ShardedFilespec") @@ -31963,7 +31963,7 @@ begin tf.add_input(desc, basename_) tf.add_input(desc, num_shards_) res = tf.execute(desc) - node = tf.TapeNode(sharded_filespec, [basename_, num_shards_], name=nothing) + node = tf.TapeNode(sharded_filespec, [basename_, num_shards_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -31983,7 +31983,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function div_no_nan_graph(x_, y_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function div_no_nan_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "DivNoNan") do desc = tf.NodeDescription("DivNoNan") @@ -32004,7 +32004,7 @@ begin desc["T"] = tf.data_type(x_) desc["T"] = tf.data_type(y_) res = tf.execute(desc) - node = tf.TapeNode(div_no_nan, [x_, y_], name=nothing) + node = tf.TapeNode(div_no_nan, [x_, y_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -32024,7 +32024,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_accumulator_apply_gradient_graph(handle_, local_step_, gradient_indices_, gradient_values_, gradient_shape_; name=nothing, dtype=nothing, has_known_shape=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_accumulator_apply_gradient_graph(handle_, local_step_, gradient_indices_, gradient_values_, gradient_shape_; name=nothing, dtype=nothing, has_known_shape=nothing) local desc tf.with_op_name(name, "SparseAccumulatorApplyGradient") do desc = tf.NodeDescription("SparseAccumulatorApplyGradient") @@ -32068,7 +32068,7 @@ begin end desc["dtype"] = tf.data_type(gradient_values_) res = tf.execute(desc) - node = tf.TapeNode(sparse_accumulator_apply_gradient, [handle_, local_step_, gradient_indices_, gradient_values_, gradient_shape_], name=nothing, dtype=nothing, has_known_shape=nothing) + node = tf.TapeNode(sparse_accumulator_apply_gradient, [handle_, local_step_, gradient_indices_, gradient_values_, gradient_shape_], name=nothing, dtype=nothing, has_known_shape=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -32088,7 +32088,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ragged_tensor_to_sparse_graph(rt_nested_splits_, rt_dense_values_; name=nothing, RAGGED_RANK=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ragged_tensor_to_sparse_graph(rt_nested_splits_, rt_dense_values_; name=nothing, RAGGED_RANK=nothing) local desc tf.with_op_name(name, "RaggedTensorToSparse") do desc = tf.NodeDescription("RaggedTensorToSparse") @@ -32119,7 +32119,7 @@ begin end desc["T"] = tf.data_type(rt_dense_values_) res = tf.execute(desc) - node = tf.TapeNode(ragged_tensor_to_sparse, [rt_nested_splits_, rt_dense_values_], name=nothing, RAGGED_RANK=nothing) + node = tf.TapeNode(ragged_tensor_to_sparse, [rt_nested_splits_, rt_dense_values_], name=nothing, RAGGED_RANK=nothing, res) tf.add_node(res[1], node) return res end @@ -32139,7 +32139,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function extract_volume_patches_graph(input_; name=nothing, ksizes=nothing, strides=nothing, padding=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function extract_volume_patches_graph(input_; name=nothing, ksizes=nothing, strides=nothing, padding=nothing) local desc tf.with_op_name(name, "ExtractVolumePatches") do desc = tf.NodeDescription("ExtractVolumePatches") @@ -32173,7 +32173,7 @@ begin end desc["T"] = tf.data_type(input_) res = tf.execute(desc) - node = tf.TapeNode(extract_volume_patches, [input_], name=nothing, ksizes=nothing, strides=nothing, padding=nothing) + node = tf.TapeNode(extract_volume_patches, [input_], name=nothing, ksizes=nothing, strides=nothing, padding=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -32193,7 +32193,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function barrier_insert_many_graph(handle_, keys_, values_; name=nothing, component_index=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function barrier_insert_many_graph(handle_, keys_, values_; name=nothing, component_index=nothing) local desc tf.with_op_name(name, "BarrierInsertMany") do desc = tf.NodeDescription("BarrierInsertMany") @@ -32229,7 +32229,7 @@ begin end desc["T"] = tf.data_type(values_) res = tf.execute(desc) - node = tf.TapeNode(barrier_insert_many, [handle_, keys_, values_], name=nothing, component_index=nothing) + node = tf.TapeNode(barrier_insert_many, [handle_, keys_, values_], name=nothing, component_index=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -32249,7 +32249,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function const__graph(; name=nothing, value=nothing, dtype=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function const__graph(; name=nothing, value=nothing, dtype=nothing) local desc tf.with_op_name(name, "Const") do desc = tf.NodeDescription("Const") @@ -32271,7 +32271,7 @@ begin desc["dtype"] = Base.identity(dtype) end res = tf.execute(desc) - node = tf.TapeNode(const_, [], name=nothing, value=nothing, dtype=nothing) + node = tf.TapeNode(const_, [], name=nothing, value=nothing, dtype=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -32291,7 +32291,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function space_to_batch_graph(input_, paddings_; name=nothing, block_size=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function space_to_batch_graph(input_, paddings_; name=nothing, block_size=nothing) local desc tf.with_op_name(name, "SpaceToBatch") do desc = tf.NodeDescription("SpaceToBatch") @@ -32319,7 +32319,7 @@ begin desc["T"] = tf.data_type(input_) desc["Tpaddings"] = tf.data_type(paddings_) res = tf.execute(desc) - node = tf.TapeNode(space_to_batch, [input_, paddings_], name=nothing, block_size=nothing) + node = tf.TapeNode(space_to_batch, [input_, paddings_], name=nothing, block_size=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -32339,7 +32339,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function stage_size_graph(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function stage_size_graph(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "StageSize") do desc = tf.NodeDescription("StageSize") @@ -32379,7 +32379,7 @@ begin desc["shared_name"] = Base.String(shared_name) end res = tf.execute(desc) - node = tf.TapeNode(stage_size, [], name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + node = tf.TapeNode(stage_size, [], name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -32399,7 +32399,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function empty_tensor_list_graph(element_shape_, max_num_elements_; name=nothing, element_dtype=nothing, shape_type=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function empty_tensor_list_graph(element_shape_, max_num_elements_; name=nothing, element_dtype=nothing, shape_type=nothing) local desc tf.with_op_name(name, "EmptyTensorList") do desc = tf.NodeDescription("EmptyTensorList") @@ -32431,7 +32431,7 @@ begin end desc["shape_type"] = tf.data_type(element_shape_) res = tf.execute(desc) - node = tf.TapeNode(empty_tensor_list, [element_shape_, max_num_elements_], name=nothing, element_dtype=nothing, shape_type=nothing) + node = tf.TapeNode(empty_tensor_list, [element_shape_, max_num_elements_], name=nothing, element_dtype=nothing, shape_type=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -32451,7 +32451,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantized_conv2d_and_requantize_graph(input_, filter_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantized_conv2d_and_requantize_graph(input_, filter_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) local desc tf.with_op_name(name, "QuantizedConv2DAndRequantize") do desc = tf.NodeDescription("QuantizedConv2DAndRequantize") @@ -32526,7 +32526,7 @@ begin desc["Tinput"] = tf.data_type(input_) desc["Tfilter"] = tf.data_type(filter_) res = tf.execute(desc) - node = tf.TapeNode(quantized_conv2d_and_requantize, [input_, filter_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_], name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) + node = tf.TapeNode(quantized_conv2d_and_requantize, [input_, filter_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_], name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing, res) tf.add_node(res[1], node) return res end @@ -32546,7 +32546,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function lu_graph(input_; name=nothing, output_idx_type=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function lu_graph(input_; name=nothing, output_idx_type=nothing) local desc tf.with_op_name(name, "Lu") do desc = tf.NodeDescription("Lu") @@ -32573,7 +32573,7 @@ begin end desc["T"] = tf.data_type(input_) res = tf.execute(desc) - node = tf.TapeNode(lu, [input_], name=nothing, output_idx_type=nothing) + node = tf.TapeNode(lu, [input_], name=nothing, output_idx_type=nothing, res) tf.add_node(res[1], node) return res end @@ -32593,7 +32593,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function decode_compressed_graph(bytes_; name=nothing, compression_type=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function decode_compressed_graph(bytes_; name=nothing, compression_type=nothing) local desc tf.with_op_name(name, "DecodeCompressed") do desc = tf.NodeDescription("DecodeCompressed") @@ -32613,7 +32613,7 @@ begin desc["compression_type"] = Base.String(compression_type) end res = tf.execute(desc) - node = tf.TapeNode(decode_compressed, [bytes_], name=nothing, compression_type=nothing) + node = tf.TapeNode(decode_compressed, [bytes_], name=nothing, compression_type=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -32633,7 +32633,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function get_session_tensor_graph(handle_; name=nothing, dtype=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function get_session_tensor_graph(handle_; name=nothing, dtype=nothing) local desc tf.with_op_name(name, "GetSessionTensor") do desc = tf.NodeDescription("GetSessionTensor") @@ -32653,7 +32653,7 @@ begin desc["dtype"] = Base.identity(dtype) end res = tf.execute(desc) - node = tf.TapeNode(get_session_tensor, [handle_], name=nothing, dtype=nothing) + node = tf.TapeNode(get_session_tensor, [handle_], name=nothing, dtype=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -32673,7 +32673,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_gather_v3_graph(handle_, indices_, flow_in_; name=nothing, dtype=nothing, element_shape=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_gather_v3_graph(handle_, indices_, flow_in_; name=nothing, dtype=nothing, element_shape=nothing) local desc tf.with_op_name(name, "TensorArrayGatherV3") do desc = tf.NodeDescription("TensorArrayGatherV3") @@ -32707,7 +32707,7 @@ begin desc["element_shape"] = Base.identity(element_shape) end res = tf.execute(desc) - node = tf.TapeNode(tensor_array_gather_v3, [handle_, indices_, flow_in_], name=nothing, dtype=nothing, element_shape=nothing) + node = tf.TapeNode(tensor_array_gather_v3, [handle_, indices_, flow_in_], name=nothing, dtype=nothing, element_shape=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -32727,7 +32727,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function load_tpu_embedding_ftrl_parameters_grad_accum_debug_graph(parameters_, accumulators_, linears_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function load_tpu_embedding_ftrl_parameters_grad_accum_debug_graph(parameters_, accumulators_, linears_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "LoadTPUEmbeddingFTRLParametersGradAccumDebug") do desc = tf.NodeDescription("LoadTPUEmbeddingFTRLParametersGradAccumDebug") @@ -32777,7 +32777,7 @@ begin desc["shard_id"] = Base.Int(shard_id) end res = tf.execute(desc) - node = tf.TapeNode(load_tpu_embedding_ftrl_parameters_grad_accum_debug, [parameters_, accumulators_, linears_, gradient_accumulators_], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + node = tf.TapeNode(load_tpu_embedding_ftrl_parameters_grad_accum_debug, [parameters_, accumulators_, linears_, gradient_accumulators_], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -32797,7 +32797,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function destroy_resource_op_graph(resource_; name=nothing, ignore_lookup_error=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function destroy_resource_op_graph(resource_; name=nothing, ignore_lookup_error=nothing) local desc tf.with_op_name(name, "DestroyResourceOp") do desc = tf.NodeDescription("DestroyResourceOp") @@ -32817,7 +32817,7 @@ begin desc["ignore_lookup_error"] = Base.Bool(ignore_lookup_error) end res = tf.execute(desc) - node = tf.TapeNode(destroy_resource_op, [resource_], name=nothing, ignore_lookup_error=nothing) + node = tf.TapeNode(destroy_resource_op, [resource_], name=nothing, ignore_lookup_error=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -32837,7 +32837,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function text_line_reader_graph(; name=nothing, skip_header_lines=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function text_line_reader_graph(; name=nothing, skip_header_lines=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "TextLineReader") do desc = tf.NodeDescription("TextLineReader") @@ -32865,7 +32865,7 @@ begin desc["shared_name"] = Base.String(shared_name) end res = tf.execute(desc) - node = tf.TapeNode(text_line_reader, [], name=nothing, skip_header_lines=nothing, container=nothing, shared_name=nothing) + node = tf.TapeNode(text_line_reader, [], name=nothing, skip_header_lines=nothing, container=nothing, shared_name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -32885,7 +32885,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function create_summary_db_writer_graph(writer_, db_uri_, experiment_name_, run_name_, user_name_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function create_summary_db_writer_graph(writer_, db_uri_, experiment_name_, run_name_, user_name_; name=nothing) local desc tf.with_op_name(name, "CreateSummaryDbWriter") do desc = tf.NodeDescription("CreateSummaryDbWriter") @@ -32915,7 +32915,7 @@ begin tf.add_input(desc, run_name_) tf.add_input(desc, user_name_) res = tf.execute(desc) - node = tf.TapeNode(create_summary_db_writer, [writer_, db_uri_, experiment_name_, run_name_, user_name_], name=nothing) + node = tf.TapeNode(create_summary_db_writer, [writer_, db_uri_, experiment_name_, run_name_, user_name_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -32935,7 +32935,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tanh_grad_graph(y_, dy_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tanh_grad_graph(y_, dy_; name=nothing) local desc tf.with_op_name(name, "TanhGrad") do desc = tf.NodeDescription("TanhGrad") @@ -32956,7 +32956,7 @@ begin desc["T"] = tf.data_type(y_) desc["T"] = tf.data_type(dy_) res = tf.execute(desc) - node = tf.TapeNode(tanh_grad, [y_, dy_], name=nothing) + node = tf.TapeNode(tanh_grad, [y_, dy_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -32976,7 +32976,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function decode_base64_graph(input_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function decode_base64_graph(input_; name=nothing) local desc tf.with_op_name(name, "DecodeBase64") do desc = tf.NodeDescription("DecodeBase64") @@ -32990,7 +32990,7 @@ begin input_ = convert(tf.TensorHandle, input_) tf.add_input(desc, input_) res = tf.execute(desc) - node = tf.TapeNode(decode_base64, [input_], name=nothing) + node = tf.TapeNode(decode_base64, [input_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -33010,7 +33010,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function max_pool_grad_grad_v2_graph(orig_input_, orig_output_, grad_, ksize_, strides_; name=nothing, padding=nothing, data_format=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function max_pool_grad_grad_v2_graph(orig_input_, orig_output_, grad_, ksize_, strides_; name=nothing, padding=nothing, data_format=nothing) local desc tf.with_op_name(name, "MaxPoolGradGradV2") do desc = tf.NodeDescription("MaxPoolGradGradV2") @@ -33056,7 +33056,7 @@ begin desc["T"] = tf.data_type(orig_output_) desc["T"] = tf.data_type(grad_) res = tf.execute(desc) - node = tf.TapeNode(max_pool_grad_grad_v2, [orig_input_, orig_output_, grad_, ksize_, strides_], name=nothing, padding=nothing, data_format=nothing) + node = tf.TapeNode(max_pool_grad_grad_v2, [orig_input_, orig_output_, grad_, ksize_, strides_], name=nothing, padding=nothing, data_format=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -33076,7 +33076,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function audio_summary_v2_graph(tag_, tensor_, sample_rate_; name=nothing, max_outputs=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function audio_summary_v2_graph(tag_, tensor_, sample_rate_; name=nothing, max_outputs=nothing) local desc tf.with_op_name(name, "AudioSummaryV2") do desc = tf.NodeDescription("AudioSummaryV2") @@ -33104,7 +33104,7 @@ begin desc["max_outputs"] = Base.Int(max_outputs) end res = tf.execute(desc) - node = tf.TapeNode(audio_summary_v2, [tag_, tensor_, sample_rate_], name=nothing, max_outputs=nothing) + node = tf.TapeNode(audio_summary_v2, [tag_, tensor_, sample_rate_], name=nothing, max_outputs=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -33124,7 +33124,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function stateful_partitioned_call_graph(args_; name=nothing, Tin=nothing, Tout=nothing, f=nothing, config=nothing, config_proto=nothing, executor_type=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function stateful_partitioned_call_graph(args_; name=nothing, Tin=nothing, Tout=nothing, f=nothing, config=nothing, config_proto=nothing, executor_type=nothing) local desc tf.with_op_name(name, "StatefulPartitionedCall") do desc = tf.NodeDescription("StatefulPartitionedCall") @@ -33174,7 +33174,7 @@ begin desc["executor_type"] = Base.String(executor_type) end res = tf.execute(desc) - node = tf.TapeNode(stateful_partitioned_call, [args_], name=nothing, Tin=nothing, Tout=nothing, f=nothing, config=nothing, config_proto=nothing, executor_type=nothing) + node = tf.TapeNode(stateful_partitioned_call, [args_], name=nothing, Tin=nothing, Tout=nothing, f=nothing, config=nothing, config_proto=nothing, executor_type=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -33194,7 +33194,7 @@ end Acts like a Concat Op that merges multple tensors into one, however it must """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _scoped_allocator_concat_graph(backing_, inputs_; name=nothing, shape=nothing, reshape=nothing, sa_name=nothing, id=nothing, N=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _scoped_allocator_concat_graph(backing_, inputs_; name=nothing, shape=nothing, reshape=nothing, sa_name=nothing, id=nothing, N=nothing) local desc tf.with_op_name(name, "_ScopedAllocatorConcat") do desc = tf.NodeDescription("_ScopedAllocatorConcat") @@ -33245,7 +33245,7 @@ begin desc["T"] = tf.data_type(backing_) desc["T"] = tf.data_type(inputs_) res = tf.execute(desc) - node = tf.TapeNode(_scoped_allocator_concat, [backing_, inputs_], name=nothing, shape=nothing, reshape=nothing, sa_name=nothing, id=nothing, N=nothing) + node = tf.TapeNode(_scoped_allocator_concat, [backing_, inputs_], name=nothing, shape=nothing, reshape=nothing, sa_name=nothing, id=nothing, N=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -33265,7 +33265,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fake_quant_with_min_max_args_gradient_graph(gradients_, inputs_; name=nothing, min=nothing, max=nothing, num_bits=nothing, narrow_range=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fake_quant_with_min_max_args_gradient_graph(gradients_, inputs_; name=nothing, min=nothing, max=nothing, num_bits=nothing, narrow_range=nothing) local desc tf.with_op_name(name, "FakeQuantWithMinMaxArgsGradient") do desc = tf.NodeDescription("FakeQuantWithMinMaxArgsGradient") @@ -33307,7 +33307,7 @@ begin desc["narrow_range"] = Base.Bool(narrow_range) end res = tf.execute(desc) - node = tf.TapeNode(fake_quant_with_min_max_args_gradient, [gradients_, inputs_], name=nothing, min=nothing, max=nothing, num_bits=nothing, narrow_range=nothing) + node = tf.TapeNode(fake_quant_with_min_max_args_gradient, [gradients_, inputs_], name=nothing, min=nothing, max=nothing, num_bits=nothing, narrow_range=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -33327,7 +33327,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_svd_graph(input_; name=nothing, compute_uv=nothing, full_matrices=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_svd_graph(input_; name=nothing, compute_uv=nothing, full_matrices=nothing) local desc tf.with_op_name(name, "BatchSvd") do desc = tf.NodeDescription("BatchSvd") @@ -33360,7 +33360,7 @@ begin end desc["T"] = tf.data_type(input_) res = tf.execute(desc) - node = tf.TapeNode(batch_svd, [input_], name=nothing, compute_uv=nothing, full_matrices=nothing) + node = tf.TapeNode(batch_svd, [input_], name=nothing, compute_uv=nothing, full_matrices=nothing, res) tf.add_node(res[1], node) return res end @@ -33380,7 +33380,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function map_stage_graph(key_, indices_, values_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, fake_dtypes=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function map_stage_graph(key_, indices_, values_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, fake_dtypes=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "MapStage") do desc = tf.NodeDescription("MapStage") @@ -33438,7 +33438,7 @@ begin desc["shared_name"] = Base.String(shared_name) end res = tf.execute(desc) - node = tf.TapeNode(map_stage, [key_, indices_, values_], name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, fake_dtypes=nothing, container=nothing, shared_name=nothing) + node = tf.TapeNode(map_stage, [key_, indices_, values_], name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, fake_dtypes=nothing, container=nothing, shared_name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -33458,7 +33458,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_sparse_apply_ftrl_graph(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, lr_power_; name=nothing, use_locking=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_sparse_apply_ftrl_graph(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, lr_power_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ResourceSparseApplyFtrl") do desc = tf.NodeDescription("ResourceSparseApplyFtrl") @@ -33519,7 +33519,7 @@ begin desc["T"] = tf.data_type(l2_) desc["T"] = tf.data_type(lr_power_) res = tf.execute(desc) - node = tf.TapeNode(resource_sparse_apply_ftrl, [var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, lr_power_], name=nothing, use_locking=nothing) + node = tf.TapeNode(resource_sparse_apply_ftrl, [var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, lr_power_], name=nothing, use_locking=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -33539,7 +33539,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resize_nearest_neighbor_graph(images_, size_; name=nothing, align_corners=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resize_nearest_neighbor_graph(images_, size_; name=nothing, align_corners=nothing) local desc tf.with_op_name(name, "ResizeNearestNeighbor") do desc = tf.NodeDescription("ResizeNearestNeighbor") @@ -33565,7 +33565,7 @@ begin end desc["T"] = tf.data_type(images_) res = tf.execute(desc) - node = tf.TapeNode(resize_nearest_neighbor, [images_, size_], name=nothing, align_corners=nothing) + node = tf.TapeNode(resize_nearest_neighbor, [images_, size_], name=nothing, align_corners=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -33585,7 +33585,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_csv_dataset_graph(filenames_, compression_type_, buffer_size_, header_, field_delim_, use_quote_delim_, na_value_, select_cols_, record_defaults_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_csv_dataset_graph(filenames_, compression_type_, buffer_size_, header_, field_delim_, use_quote_delim_, na_value_, select_cols_, record_defaults_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "ExperimentalCSVDataset") do desc = tf.NodeDescription("ExperimentalCSVDataset") @@ -33643,7 +33643,7 @@ begin desc["output_shapes"] = map(Base.identity, output_shapes) end res = tf.execute(desc) - node = tf.TapeNode(experimental_csv_dataset, [filenames_, compression_type_, buffer_size_, header_, field_delim_, use_quote_delim_, na_value_, select_cols_, record_defaults_], name=nothing, output_types=nothing, output_shapes=nothing) + node = tf.TapeNode(experimental_csv_dataset, [filenames_, compression_type_, buffer_size_, header_, field_delim_, use_quote_delim_, na_value_, select_cols_, record_defaults_], name=nothing, output_types=nothing, output_shapes=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -33663,7 +33663,7 @@ end Returns x * y element-wise. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _mkl_mul_graph(x_, y_, mkl_x_, mkl_y_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _mkl_mul_graph(x_, y_, mkl_x_, mkl_y_; name=nothing) local desc tf.with_op_name(name, "_MklMul") do desc = tf.NodeDescription("_MklMul") @@ -33697,7 +33697,7 @@ begin desc["T"] = tf.data_type(x_) desc["T"] = tf.data_type(y_) res = tf.execute(desc) - node = tf.TapeNode(_mkl_mul, [x_, y_, mkl_x_, mkl_y_], name=nothing) + node = tf.TapeNode(_mkl_mul, [x_, y_, mkl_x_, mkl_y_], name=nothing, res) tf.add_node(res[1], node) return res end @@ -33717,7 +33717,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_matrix_diag_graph(diagonal_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_matrix_diag_graph(diagonal_; name=nothing) local desc tf.with_op_name(name, "BatchMatrixDiag") do desc = tf.NodeDescription("BatchMatrixDiag") @@ -33733,7 +33733,7 @@ begin tf.add_input(desc, diagonal_) desc["T"] = tf.data_type(diagonal_) res = tf.execute(desc) - node = tf.TapeNode(batch_matrix_diag, [diagonal_], name=nothing) + node = tf.TapeNode(batch_matrix_diag, [diagonal_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -33753,7 +33753,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function is_inf_graph(x_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function is_inf_graph(x_; name=nothing) local desc tf.with_op_name(name, "IsInf") do desc = tf.NodeDescription("IsInf") @@ -33769,7 +33769,7 @@ begin tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) res = tf.execute(desc) - node = tf.TapeNode(is_inf, [x_], name=nothing) + node = tf.TapeNode(is_inf, [x_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -33789,7 +33789,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fixed_unigram_candidate_sampler_graph(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, vocab_file=nothing, distortion=nothing, num_reserved_ids=nothing, num_shards=nothing, shard=nothing, unigrams=nothing, seed=nothing, seed2=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fixed_unigram_candidate_sampler_graph(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, vocab_file=nothing, distortion=nothing, num_reserved_ids=nothing, num_shards=nothing, shard=nothing, unigrams=nothing, seed=nothing, seed2=nothing) local desc tf.with_op_name(name, "FixedUnigramCandidateSampler") do desc = tf.NodeDescription("FixedUnigramCandidateSampler") @@ -33880,7 +33880,7 @@ begin desc["seed2"] = Base.Int(seed2) end res = tf.execute(desc) - node = tf.TapeNode(fixed_unigram_candidate_sampler, [true_classes_], name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, vocab_file=nothing, distortion=nothing, num_reserved_ids=nothing, num_shards=nothing, shard=nothing, unigrams=nothing, seed=nothing, seed2=nothing) + node = tf.TapeNode(fixed_unigram_candidate_sampler, [true_classes_], name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, vocab_file=nothing, distortion=nothing, num_reserved_ids=nothing, num_shards=nothing, shard=nothing, unigrams=nothing, seed=nothing, seed2=nothing, res) tf.add_node(res[1], node) return res end @@ -33900,7 +33900,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_apply_ftrl_v2_graph(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=nothing, use_locking=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_apply_ftrl_v2_graph(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "SparseApplyFtrlV2") do desc = tf.NodeDescription("SparseApplyFtrlV2") @@ -33969,7 +33969,7 @@ begin desc["T"] = tf.data_type(l2_shrinkage_) desc["T"] = tf.data_type(lr_power_) res = tf.execute(desc) - node = tf.TapeNode(sparse_apply_ftrl_v2, [var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, l2_shrinkage_, lr_power_], name=nothing, use_locking=nothing) + node = tf.TapeNode(sparse_apply_ftrl_v2, [var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, l2_shrinkage_, lr_power_], name=nothing, use_locking=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -33989,7 +33989,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function unravel_index_graph(indices_, dims_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function unravel_index_graph(indices_, dims_; name=nothing) local desc tf.with_op_name(name, "UnravelIndex") do desc = tf.NodeDescription("UnravelIndex") @@ -34012,7 +34012,7 @@ begin desc["Tidx"] = tf.data_type(indices_) desc["Tidx"] = tf.data_type(dims_) res = tf.execute(desc) - node = tf.TapeNode(unravel_index, [indices_, dims_], name=nothing) + node = tf.TapeNode(unravel_index, [indices_, dims_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -34032,7 +34032,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function max_graph(input_, reduction_indices_; name=nothing, keep_dims=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function max_graph(input_, reduction_indices_; name=nothing, keep_dims=nothing) local desc tf.with_op_name(name, "Max") do desc = tf.NodeDescription("Max") @@ -34061,7 +34061,7 @@ begin desc["T"] = tf.data_type(input_) desc["Tidx"] = tf.data_type(reduction_indices_) res = tf.execute(desc) - node = tf.TapeNode(max, [input_, reduction_indices_], name=nothing, keep_dims=nothing) + node = tf.TapeNode(max, [input_, reduction_indices_], name=nothing, keep_dims=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -34081,7 +34081,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ifft2d_graph(input_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ifft2d_graph(input_; name=nothing) local desc tf.with_op_name(name, "IFFT2D") do desc = tf.NodeDescription("IFFT2D") @@ -34097,7 +34097,7 @@ begin tf.add_input(desc, input_) desc["Tcomplex"] = tf.data_type(input_) res = tf.execute(desc) - node = tf.TapeNode(ifft2d, [input_], name=nothing) + node = tf.TapeNode(ifft2d, [input_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -34117,7 +34117,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_concat_graph(indices_, values_, shapes_; name=nothing, concat_dim=nothing, N=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_concat_graph(indices_, values_, shapes_; name=nothing, concat_dim=nothing, N=nothing) local desc tf.with_op_name(name, "SparseConcat") do desc = tf.NodeDescription("SparseConcat") @@ -34164,7 +34164,7 @@ begin end desc["T"] = tf.data_type(values_) res = tf.execute(desc) - node = tf.TapeNode(sparse_concat, [indices_, values_, shapes_], name=nothing, concat_dim=nothing, N=nothing) + node = tf.TapeNode(sparse_concat, [indices_, values_, shapes_], name=nothing, concat_dim=nothing, N=nothing, res) tf.add_node(res[1], node) return res end @@ -34184,7 +34184,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function histogram_summary_graph(tag_, values_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function histogram_summary_graph(tag_, values_; name=nothing) local desc tf.with_op_name(name, "HistogramSummary") do desc = tf.NodeDescription("HistogramSummary") @@ -34204,7 +34204,7 @@ begin tf.add_input(desc, values_) desc["T"] = tf.data_type(values_) res = tf.execute(desc) - node = tf.TapeNode(histogram_summary, [tag_, values_], name=nothing) + node = tf.TapeNode(histogram_summary, [tag_, values_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -34224,7 +34224,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function segment_sum_graph(data_, segment_ids_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function segment_sum_graph(data_, segment_ids_; name=nothing) local desc tf.with_op_name(name, "SegmentSum") do desc = tf.NodeDescription("SegmentSum") @@ -34247,7 +34247,7 @@ begin desc["T"] = tf.data_type(data_) desc["Tindices"] = tf.data_type(segment_ids_) res = tf.execute(desc) - node = tf.TapeNode(segment_sum, [data_, segment_ids_], name=nothing) + node = tf.TapeNode(segment_sum, [data_, segment_ids_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -34267,7 +34267,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function exp_graph(x_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function exp_graph(x_; name=nothing) local desc tf.with_op_name(name, "Exp") do desc = tf.NodeDescription("Exp") @@ -34283,7 +34283,7 @@ begin tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) res = tf.execute(desc) - node = tf.TapeNode(exp, [x_], name=nothing) + node = tf.TapeNode(exp, [x_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -34303,7 +34303,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function configure_distributed_tpu_graph(; name=nothing, embedding_config=nothing, tpu_embedding_config=nothing, is_global_init=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function configure_distributed_tpu_graph(; name=nothing, embedding_config=nothing, tpu_embedding_config=nothing, is_global_init=nothing) local desc tf.with_op_name(name, "ConfigureDistributedTPU") do desc = tf.NodeDescription("ConfigureDistributedTPU") @@ -34331,7 +34331,7 @@ begin desc["is_global_init"] = Base.Bool(is_global_init) end res = tf.execute(desc) - node = tf.TapeNode(configure_distributed_tpu, [], name=nothing, embedding_config=nothing, tpu_embedding_config=nothing, is_global_init=nothing) + node = tf.TapeNode(configure_distributed_tpu, [], name=nothing, embedding_config=nothing, tpu_embedding_config=nothing, is_global_init=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -34351,7 +34351,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_scatter_nd_sub_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_scatter_nd_sub_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ResourceScatterNdSub") do desc = tf.NodeDescription("ResourceScatterNdSub") @@ -34384,7 +34384,7 @@ begin desc["Tindices"] = tf.data_type(indices_) desc["T"] = tf.data_type(updates_) res = tf.execute(desc) - node = tf.TapeNode(resource_scatter_nd_sub, [ref_, indices_, updates_], name=nothing, use_locking=nothing) + node = tf.TapeNode(resource_scatter_nd_sub, [ref_, indices_, updates_], name=nothing, use_locking=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -34404,7 +34404,7 @@ end A placeholder op for multiple values that will be sent from TensorFlow to a """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _xla_send_from_host_graph(inputs_, dynamic_key_; name=nothing, Tinputs=nothing, key=nothing, device_ordinal=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _xla_send_from_host_graph(inputs_, dynamic_key_; name=nothing, Tinputs=nothing, key=nothing, device_ordinal=nothing) local desc tf.with_op_name(name, "_XlaSendFromHost") do desc = tf.NodeDescription("_XlaSendFromHost") @@ -34440,7 +34440,7 @@ begin desc["device_ordinal"] = Base.Int(device_ordinal) end res = tf.execute(desc) - node = tf.TapeNode(_xla_send_from_host, [inputs_, dynamic_key_], name=nothing, Tinputs=nothing, key=nothing, device_ordinal=nothing) + node = tf.TapeNode(_xla_send_from_host, [inputs_, dynamic_key_], name=nothing, Tinputs=nothing, key=nothing, device_ordinal=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -34460,7 +34460,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function get_session_handle_v2_graph(value_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function get_session_handle_v2_graph(value_; name=nothing) local desc tf.with_op_name(name, "GetSessionHandleV2") do desc = tf.NodeDescription("GetSessionHandleV2") @@ -34476,7 +34476,7 @@ begin tf.add_input(desc, value_) desc["T"] = tf.data_type(value_) res = tf.execute(desc) - node = tf.TapeNode(get_session_handle_v2, [value_], name=nothing) + node = tf.TapeNode(get_session_handle_v2, [value_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -34496,7 +34496,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function relu_grad_graph(gradients_, features_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function relu_grad_graph(gradients_, features_; name=nothing) local desc tf.with_op_name(name, "ReluGrad") do desc = tf.NodeDescription("ReluGrad") @@ -34517,7 +34517,7 @@ begin desc["T"] = tf.data_type(gradients_) desc["T"] = tf.data_type(features_) res = tf.execute(desc) - node = tf.TapeNode(relu_grad, [gradients_, features_], name=nothing) + node = tf.TapeNode(relu_grad, [gradients_, features_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -34537,7 +34537,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function unsorted_segment_min_graph(data_, segment_ids_, num_segments_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function unsorted_segment_min_graph(data_, segment_ids_, num_segments_; name=nothing) local desc tf.with_op_name(name, "UnsortedSegmentMin") do desc = tf.NodeDescription("UnsortedSegmentMin") @@ -34566,7 +34566,7 @@ begin desc["Tindices"] = tf.data_type(segment_ids_) desc["Tnumsegments"] = tf.data_type(num_segments_) res = tf.execute(desc) - node = tf.TapeNode(unsorted_segment_min, [data_, segment_ids_, num_segments_], name=nothing) + node = tf.TapeNode(unsorted_segment_min, [data_, segment_ids_, num_segments_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -34586,7 +34586,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function parse_example_graph(serialized_, names_, sparse_keys_, dense_keys_, dense_defaults_; name=nothing, Nsparse=nothing, Ndense=nothing, sparse_types=nothing, Tdense=nothing, dense_shapes=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function parse_example_graph(serialized_, names_, sparse_keys_, dense_keys_, dense_defaults_; name=nothing, Nsparse=nothing, Ndense=nothing, sparse_types=nothing, Tdense=nothing, dense_shapes=nothing) local desc tf.with_op_name(name, "ParseExample") do desc = tf.NodeDescription("ParseExample") @@ -34651,7 +34651,7 @@ begin desc["dense_shapes"] = map(Base.identity, dense_shapes) end res = tf.execute(desc) - node = tf.TapeNode(parse_example, [serialized_, names_, sparse_keys_, dense_keys_, dense_defaults_], name=nothing, Nsparse=nothing, Ndense=nothing, sparse_types=nothing, Tdense=nothing, dense_shapes=nothing) + node = tf.TapeNode(parse_example, [serialized_, names_, sparse_keys_, dense_keys_, dense_defaults_], name=nothing, Nsparse=nothing, Ndense=nothing, sparse_types=nothing, Tdense=nothing, dense_shapes=nothing, res) tf.add_node(res[1], node) return res end @@ -34671,7 +34671,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function queue_enqueue_v2_graph(handle_, components_; name=nothing, Tcomponents=nothing, timeout_ms=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function queue_enqueue_v2_graph(handle_, components_; name=nothing, Tcomponents=nothing, timeout_ms=nothing) local desc tf.with_op_name(name, "QueueEnqueueV2") do desc = tf.NodeDescription("QueueEnqueueV2") @@ -34701,7 +34701,7 @@ begin desc["timeout_ms"] = Base.Int(timeout_ms) end res = tf.execute(desc) - node = tf.TapeNode(queue_enqueue_v2, [handle_, components_], name=nothing, Tcomponents=nothing, timeout_ms=nothing) + node = tf.TapeNode(queue_enqueue_v2, [handle_, components_], name=nothing, Tcomponents=nothing, timeout_ms=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -34721,7 +34721,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function scatter_nd_add_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function scatter_nd_add_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ScatterNdAdd") do desc = tf.NodeDescription("ScatterNdAdd") @@ -34755,7 +34755,7 @@ begin desc["Tindices"] = tf.data_type(indices_) desc["T"] = tf.data_type(updates_) res = tf.execute(desc) - node = tf.TapeNode(scatter_nd_add, [ref_, indices_, updates_], name=nothing, use_locking=nothing) + node = tf.TapeNode(scatter_nd_add, [ref_, indices_, updates_], name=nothing, use_locking=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -34775,7 +34775,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function reader_num_records_produced_v2_graph(reader_handle_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function reader_num_records_produced_v2_graph(reader_handle_; name=nothing) local desc tf.with_op_name(name, "ReaderNumRecordsProducedV2") do desc = tf.NodeDescription("ReaderNumRecordsProducedV2") @@ -34789,7 +34789,7 @@ begin reader_handle_ = convert(tf.TensorHandle, reader_handle_) tf.add_input(desc, reader_handle_) res = tf.execute(desc) - node = tf.TapeNode(reader_num_records_produced_v2, [reader_handle_], name=nothing) + node = tf.TapeNode(reader_num_records_produced_v2, [reader_handle_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -34809,7 +34809,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function load_tpu_embedding_centered_rms_prop_parameters_graph(parameters_, ms_, mom_, mg_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function load_tpu_embedding_centered_rms_prop_parameters_graph(parameters_, ms_, mom_, mg_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "LoadTPUEmbeddingCenteredRMSPropParameters") do desc = tf.NodeDescription("LoadTPUEmbeddingCenteredRMSPropParameters") @@ -34859,7 +34859,7 @@ begin desc["shard_id"] = Base.Int(shard_id) end res = tf.execute(desc) - node = tf.TapeNode(load_tpu_embedding_centered_rms_prop_parameters, [parameters_, ms_, mom_, mg_], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + node = tf.TapeNode(load_tpu_embedding_centered_rms_prop_parameters, [parameters_, ms_, mom_, mg_], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -34879,7 +34879,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function assign_sub_graph(ref_, value_; name=nothing, use_locking=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function assign_sub_graph(ref_, value_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "AssignSub") do desc = tf.NodeDescription("AssignSub") @@ -34906,7 +34906,7 @@ begin desc["T"] = tf.data_type(ref_) desc["T"] = tf.data_type(value_) res = tf.execute(desc) - node = tf.TapeNode(assign_sub, [ref_, value_], name=nothing, use_locking=nothing) + node = tf.TapeNode(assign_sub, [ref_, value_], name=nothing, use_locking=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -34926,7 +34926,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function unsorted_segment_sum_graph(data_, segment_ids_, num_segments_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function unsorted_segment_sum_graph(data_, segment_ids_, num_segments_; name=nothing) local desc tf.with_op_name(name, "UnsortedSegmentSum") do desc = tf.NodeDescription("UnsortedSegmentSum") @@ -34955,7 +34955,7 @@ begin desc["Tindices"] = tf.data_type(segment_ids_) desc["Tnumsegments"] = tf.data_type(num_segments_) res = tf.execute(desc) - node = tf.TapeNode(unsorted_segment_sum, [data_, segment_ids_, num_segments_], name=nothing) + node = tf.TapeNode(unsorted_segment_sum, [data_, segment_ids_, num_segments_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -34975,7 +34975,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fused_batch_norm_grad_graph(y_backprop_, x_, scale_, reserve_space_1_, reserve_space_2_; name=nothing, epsilon=nothing, data_format=nothing, is_training=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fused_batch_norm_grad_graph(y_backprop_, x_, scale_, reserve_space_1_, reserve_space_2_; name=nothing, epsilon=nothing, data_format=nothing, is_training=nothing) local desc tf.with_op_name(name, "FusedBatchNormGrad") do desc = tf.NodeDescription("FusedBatchNormGrad") @@ -35034,7 +35034,7 @@ begin desc["T"] = tf.data_type(reserve_space_1_) desc["T"] = tf.data_type(reserve_space_2_) res = tf.execute(desc) - node = tf.TapeNode(fused_batch_norm_grad, [y_backprop_, x_, scale_, reserve_space_1_, reserve_space_2_], name=nothing, epsilon=nothing, data_format=nothing, is_training=nothing) + node = tf.TapeNode(fused_batch_norm_grad, [y_backprop_, x_, scale_, reserve_space_1_, reserve_space_2_], name=nothing, epsilon=nothing, data_format=nothing, is_training=nothing, res) tf.add_node(res[1], node) return res end @@ -35054,7 +35054,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function max_pool_grad_v2_graph(orig_input_, orig_output_, grad_, ksize_, strides_; name=nothing, padding=nothing, data_format=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function max_pool_grad_v2_graph(orig_input_, orig_output_, grad_, ksize_, strides_; name=nothing, padding=nothing, data_format=nothing) local desc tf.with_op_name(name, "MaxPoolGradV2") do desc = tf.NodeDescription("MaxPoolGradV2") @@ -35100,7 +35100,7 @@ begin desc["T"] = tf.data_type(orig_output_) desc["T"] = tf.data_type(grad_) res = tf.execute(desc) - node = tf.TapeNode(max_pool_grad_v2, [orig_input_, orig_output_, grad_, ksize_, strides_], name=nothing, padding=nothing, data_format=nothing) + node = tf.TapeNode(max_pool_grad_v2, [orig_input_, orig_output_, grad_, ksize_, strides_], name=nothing, padding=nothing, data_format=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -35120,7 +35120,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantized_conv2d_with_bias_and_relu_graph(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantized_conv2d_with_bias_and_relu_graph(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) local desc tf.with_op_name(name, "QuantizedConv2DWithBiasAndRelu") do desc = tf.NodeDescription("QuantizedConv2DWithBiasAndRelu") @@ -35191,7 +35191,7 @@ begin desc["Tinput"] = tf.data_type(input_) desc["Tfilter"] = tf.data_type(filter_) res = tf.execute(desc) - node = tf.TapeNode(quantized_conv2d_with_bias_and_relu, [input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_], name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) + node = tf.TapeNode(quantized_conv2d_with_bias_and_relu, [input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_], name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing, res) tf.add_node(res[1], node) return res end @@ -35211,7 +35211,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function boosted_trees_create_ensemble_graph(tree_ensemble_handle_, stamp_token_, tree_ensemble_serialized_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function boosted_trees_create_ensemble_graph(tree_ensemble_handle_, stamp_token_, tree_ensemble_serialized_; name=nothing) local desc tf.with_op_name(name, "BoostedTreesCreateEnsemble") do desc = tf.NodeDescription("BoostedTreesCreateEnsemble") @@ -35233,7 +35233,7 @@ begin tf.add_input(desc, stamp_token_) tf.add_input(desc, tree_ensemble_serialized_) res = tf.execute(desc) - node = tf.TapeNode(boosted_trees_create_ensemble, [tree_ensemble_handle_, stamp_token_, tree_ensemble_serialized_], name=nothing) + node = tf.TapeNode(boosted_trees_create_ensemble, [tree_ensemble_handle_, stamp_token_, tree_ensemble_serialized_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -35253,7 +35253,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ordered_map_incomplete_size_graph(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ordered_map_incomplete_size_graph(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "OrderedMapIncompleteSize") do desc = tf.NodeDescription("OrderedMapIncompleteSize") @@ -35293,7 +35293,7 @@ begin desc["shared_name"] = Base.String(shared_name) end res = tf.execute(desc) - node = tf.TapeNode(ordered_map_incomplete_size, [], name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + node = tf.TapeNode(ordered_map_incomplete_size, [], name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -35313,7 +35313,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function skipgram_graph(; name=nothing, filename=nothing, batch_size=nothing, window_size=nothing, min_count=nothing, subsample=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function skipgram_graph(; name=nothing, filename=nothing, batch_size=nothing, window_size=nothing, min_count=nothing, subsample=nothing) local desc tf.with_op_name(name, "Skipgram") do desc = tf.NodeDescription("Skipgram") @@ -35358,7 +35358,7 @@ begin desc["subsample"] = Base.identity(subsample) end res = tf.execute(desc) - node = tf.TapeNode(skipgram, [], name=nothing, filename=nothing, batch_size=nothing, window_size=nothing, min_count=nothing, subsample=nothing) + node = tf.TapeNode(skipgram, [], name=nothing, filename=nothing, batch_size=nothing, window_size=nothing, min_count=nothing, subsample=nothing, res) tf.add_node(res[1], node) return res end @@ -35378,7 +35378,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function arg_min_graph(input_, dimension_; name=nothing, output_type=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function arg_min_graph(input_, dimension_; name=nothing, output_type=nothing) local desc tf.with_op_name(name, "ArgMin") do desc = tf.NodeDescription("ArgMin") @@ -35407,7 +35407,7 @@ begin desc["T"] = tf.data_type(input_) desc["Tidx"] = tf.data_type(dimension_) res = tf.execute(desc) - node = tf.TapeNode(arg_min, [input_, dimension_], name=nothing, output_type=nothing) + node = tf.TapeNode(arg_min, [input_, dimension_], name=nothing, output_type=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -35427,7 +35427,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function queue_dequeue_many_graph(handle_, n_; name=nothing, component_types=nothing, timeout_ms=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function queue_dequeue_many_graph(handle_, n_; name=nothing, component_types=nothing, timeout_ms=nothing) local desc tf.with_op_name(name, "QueueDequeueMany") do desc = tf.NodeDescription("QueueDequeueMany") @@ -35457,7 +35457,7 @@ begin desc["timeout_ms"] = Base.Int(timeout_ms) end res = tf.execute(desc) - node = tf.TapeNode(queue_dequeue_many, [handle_, n_], name=nothing, component_types=nothing, timeout_ms=nothing) + node = tf.TapeNode(queue_dequeue_many, [handle_, n_], name=nothing, component_types=nothing, timeout_ms=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -35477,7 +35477,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function boosted_trees_serialize_ensemble_graph(tree_ensemble_handle_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function boosted_trees_serialize_ensemble_graph(tree_ensemble_handle_; name=nothing) local desc tf.with_op_name(name, "BoostedTreesSerializeEnsemble") do desc = tf.NodeDescription("BoostedTreesSerializeEnsemble") @@ -35496,7 +35496,7 @@ begin tree_ensemble_handle_ = convert(tf.TensorHandle, tree_ensemble_handle_) tf.add_input(desc, tree_ensemble_handle_) res = tf.execute(desc) - node = tf.TapeNode(boosted_trees_serialize_ensemble, [tree_ensemble_handle_], name=nothing) + node = tf.TapeNode(boosted_trees_serialize_ensemble, [tree_ensemble_handle_], name=nothing, res) tf.add_node(res[1], node) return res end @@ -35516,7 +35516,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function minimum_graph(x_, y_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function minimum_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "Minimum") do desc = tf.NodeDescription("Minimum") @@ -35537,7 +35537,7 @@ begin desc["T"] = tf.data_type(x_) desc["T"] = tf.data_type(y_) res = tf.execute(desc) - node = tf.TapeNode(minimum, [x_, y_], name=nothing) + node = tf.TapeNode(minimum, [x_, y_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -35557,7 +35557,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function substr_graph(input_, pos_, len_; name=nothing, unit=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function substr_graph(input_, pos_, len_; name=nothing, unit=nothing) local desc tf.with_op_name(name, "Substr") do desc = tf.NodeDescription("Substr") @@ -35588,7 +35588,7 @@ begin desc["T"] = tf.data_type(pos_) desc["T"] = tf.data_type(len_) res = tf.execute(desc) - node = tf.TapeNode(substr, [input_, pos_, len_], name=nothing, unit=nothing) + node = tf.TapeNode(substr, [input_, pos_, len_], name=nothing, unit=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -35608,7 +35608,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function queue_size_graph(handle_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function queue_size_graph(handle_; name=nothing) local desc tf.with_op_name(name, "QueueSize") do desc = tf.NodeDescription("QueueSize") @@ -35622,7 +35622,7 @@ begin handle_ = convert(tf.TensorHandle, handle_) tf.add_input(desc, handle_) res = tf.execute(desc) - node = tf.TapeNode(queue_size, [handle_], name=nothing) + node = tf.TapeNode(queue_size, [handle_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -35642,7 +35642,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function apply_ftrl_v2_graph(var_, accum_, linear_, grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=nothing, use_locking=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function apply_ftrl_v2_graph(var_, accum_, linear_, grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ApplyFtrlV2") do desc = tf.NodeDescription("ApplyFtrlV2") @@ -35704,7 +35704,7 @@ begin desc["T"] = tf.data_type(l2_shrinkage_) desc["T"] = tf.data_type(lr_power_) res = tf.execute(desc) - node = tf.TapeNode(apply_ftrl_v2, [var_, accum_, linear_, grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_], name=nothing, use_locking=nothing) + node = tf.TapeNode(apply_ftrl_v2, [var_, accum_, linear_, grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_], name=nothing, use_locking=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -35724,7 +35724,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function load_tpu_embedding_momentum_parameters_graph(parameters_, momenta_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function load_tpu_embedding_momentum_parameters_graph(parameters_, momenta_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "LoadTPUEmbeddingMomentumParameters") do desc = tf.NodeDescription("LoadTPUEmbeddingMomentumParameters") @@ -35766,7 +35766,7 @@ begin desc["shard_id"] = Base.Int(shard_id) end res = tf.execute(desc) - node = tf.TapeNode(load_tpu_embedding_momentum_parameters, [parameters_, momenta_], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + node = tf.TapeNode(load_tpu_embedding_momentum_parameters, [parameters_, momenta_], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -35786,7 +35786,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_segment_mean_graph(data_, indices_, segment_ids_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_segment_mean_graph(data_, indices_, segment_ids_; name=nothing) local desc tf.with_op_name(name, "SparseSegmentMean") do desc = tf.NodeDescription("SparseSegmentMean") @@ -35813,7 +35813,7 @@ begin desc["T"] = tf.data_type(data_) desc["Tidx"] = tf.data_type(indices_) res = tf.execute(desc) - node = tf.TapeNode(sparse_segment_mean, [data_, indices_, segment_ids_], name=nothing) + node = tf.TapeNode(sparse_segment_mean, [data_, indices_, segment_ids_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -35833,7 +35833,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_apply_proximal_adagrad_graph(var_, accum_, lr_, l1_, l2_, grad_; name=nothing, use_locking=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_apply_proximal_adagrad_graph(var_, accum_, lr_, l1_, l2_, grad_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ResourceApplyProximalAdagrad") do desc = tf.NodeDescription("ResourceApplyProximalAdagrad") @@ -35878,7 +35878,7 @@ begin desc["T"] = tf.data_type(l2_) desc["T"] = tf.data_type(grad_) res = tf.execute(desc) - node = tf.TapeNode(resource_apply_proximal_adagrad, [var_, accum_, lr_, l1_, l2_, grad_], name=nothing, use_locking=nothing) + node = tf.TapeNode(resource_apply_proximal_adagrad, [var_, accum_, lr_, l1_, l2_, grad_], name=nothing, use_locking=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -35898,7 +35898,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_gather_v2_graph(handle_, indices_, flow_in_; name=nothing, dtype=nothing, element_shape=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_gather_v2_graph(handle_, indices_, flow_in_; name=nothing, dtype=nothing, element_shape=nothing) local desc tf.with_op_name(name, "TensorArrayGatherV2") do desc = tf.NodeDescription("TensorArrayGatherV2") @@ -35932,7 +35932,7 @@ begin desc["element_shape"] = Base.identity(element_shape) end res = tf.execute(desc) - node = tf.TapeNode(tensor_array_gather_v2, [handle_, indices_, flow_in_], name=nothing, dtype=nothing, element_shape=nothing) + node = tf.TapeNode(tensor_array_gather_v2, [handle_, indices_, flow_in_], name=nothing, dtype=nothing, element_shape=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -35952,7 +35952,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function less_graph(x_, y_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function less_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "Less") do desc = tf.NodeDescription("Less") @@ -35973,7 +35973,7 @@ begin desc["T"] = tf.data_type(x_) desc["T"] = tf.data_type(y_) res = tf.execute(desc) - node = tf.TapeNode(less, [x_, y_], name=nothing) + node = tf.TapeNode(less, [x_, y_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -35993,7 +35993,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function host_const_graph(; name=nothing, value=nothing, dtype=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function host_const_graph(; name=nothing, value=nothing, dtype=nothing) local desc tf.with_op_name(name, "HostConst") do desc = tf.NodeDescription("HostConst") @@ -36015,7 +36015,7 @@ begin desc["dtype"] = Base.identity(dtype) end res = tf.execute(desc) - node = tf.TapeNode(host_const, [], name=nothing, value=nothing, dtype=nothing) + node = tf.TapeNode(host_const, [], name=nothing, value=nothing, dtype=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -36035,7 +36035,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function upper_bound_graph(sorted_inputs_, values_; name=nothing, out_type=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function upper_bound_graph(sorted_inputs_, values_; name=nothing, out_type=nothing) local desc tf.with_op_name(name, "UpperBound") do desc = tf.NodeDescription("UpperBound") @@ -36062,7 +36062,7 @@ begin desc["T"] = tf.data_type(sorted_inputs_) desc["T"] = tf.data_type(values_) res = tf.execute(desc) - node = tf.TapeNode(upper_bound, [sorted_inputs_, values_], name=nothing, out_type=nothing) + node = tf.TapeNode(upper_bound, [sorted_inputs_, values_], name=nothing, out_type=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -36082,7 +36082,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_list_get_item_graph(input_handle_, index_, element_shape_; name=nothing, element_dtype=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_list_get_item_graph(input_handle_, index_, element_shape_; name=nothing, element_dtype=nothing) local desc tf.with_op_name(name, "TensorListGetItem") do desc = tf.NodeDescription("TensorListGetItem") @@ -36110,7 +36110,7 @@ begin desc["element_dtype"] = Base.identity(element_dtype) end res = tf.execute(desc) - node = tf.TapeNode(tensor_list_get_item, [input_handle_, index_, element_shape_], name=nothing, element_dtype=nothing) + node = tf.TapeNode(tensor_list_get_item, [input_handle_, index_, element_shape_], name=nothing, element_dtype=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -36130,7 +36130,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fake_quant_with_min_max_vars_graph(inputs_, min_, max_; name=nothing, num_bits=nothing, narrow_range=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fake_quant_with_min_max_vars_graph(inputs_, min_, max_; name=nothing, num_bits=nothing, narrow_range=nothing) local desc tf.with_op_name(name, "FakeQuantWithMinMaxVars") do desc = tf.NodeDescription("FakeQuantWithMinMaxVars") @@ -36164,7 +36164,7 @@ begin desc["narrow_range"] = Base.Bool(narrow_range) end res = tf.execute(desc) - node = tf.TapeNode(fake_quant_with_min_max_vars, [inputs_, min_, max_], name=nothing, num_bits=nothing, narrow_range=nothing) + node = tf.TapeNode(fake_quant_with_min_max_vars, [inputs_, min_, max_], name=nothing, num_bits=nothing, narrow_range=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -36184,7 +36184,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function is_boosted_trees_quantile_stream_resource_initialized_graph(quantile_stream_resource_handle_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function is_boosted_trees_quantile_stream_resource_initialized_graph(quantile_stream_resource_handle_; name=nothing) local desc tf.with_op_name(name, "IsBoostedTreesQuantileStreamResourceInitialized") do desc = tf.NodeDescription("IsBoostedTreesQuantileStreamResourceInitialized") @@ -36198,7 +36198,7 @@ begin quantile_stream_resource_handle_ = convert(tf.TensorHandle, quantile_stream_resource_handle_) tf.add_input(desc, quantile_stream_resource_handle_) res = tf.execute(desc) - node = tf.TapeNode(is_boosted_trees_quantile_stream_resource_initialized, [quantile_stream_resource_handle_], name=nothing) + node = tf.TapeNode(is_boosted_trees_quantile_stream_resource_initialized, [quantile_stream_resource_handle_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -36218,7 +36218,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function reader_read_up_to_v2_graph(reader_handle_, queue_handle_, num_records_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function reader_read_up_to_v2_graph(reader_handle_, queue_handle_, num_records_; name=nothing) local desc tf.with_op_name(name, "ReaderReadUpToV2") do desc = tf.NodeDescription("ReaderReadUpToV2") @@ -36245,7 +36245,7 @@ begin tf.add_input(desc, queue_handle_) tf.add_input(desc, num_records_) res = tf.execute(desc) - node = tf.TapeNode(reader_read_up_to_v2, [reader_handle_, queue_handle_, num_records_], name=nothing) + node = tf.TapeNode(reader_read_up_to_v2, [reader_handle_, queue_handle_, num_records_], name=nothing, res) tf.add_node(res[1], node) return res end @@ -36265,7 +36265,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function complex_graph(real_, imag_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function complex_graph(real_, imag_; name=nothing) local desc tf.with_op_name(name, "Complex") do desc = tf.NodeDescription("Complex") @@ -36286,7 +36286,7 @@ begin desc["T"] = tf.data_type(real_) desc["T"] = tf.data_type(imag_) res = tf.execute(desc) - node = tf.TapeNode(complex, [real_, imag_], name=nothing) + node = tf.TapeNode(complex, [real_, imag_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -36306,7 +36306,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_list_reserve_graph(element_shape_, num_elements_; name=nothing, element_dtype=nothing, shape_type=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_list_reserve_graph(element_shape_, num_elements_; name=nothing, element_dtype=nothing, shape_type=nothing) local desc tf.with_op_name(name, "TensorListReserve") do desc = tf.NodeDescription("TensorListReserve") @@ -36338,7 +36338,7 @@ begin end desc["shape_type"] = tf.data_type(element_shape_) res = tf.execute(desc) - node = tf.TapeNode(tensor_list_reserve, [element_shape_, num_elements_], name=nothing, element_dtype=nothing, shape_type=nothing) + node = tf.TapeNode(tensor_list_reserve, [element_shape_, num_elements_], name=nothing, element_dtype=nothing, shape_type=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -36358,7 +36358,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function bitcast_graph(input_; name=nothing, type_=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function bitcast_graph(input_; name=nothing, type_=nothing) local desc tf.with_op_name(name, "Bitcast") do desc = tf.NodeDescription("Bitcast") @@ -36380,7 +36380,7 @@ begin end desc["T"] = tf.data_type(input_) res = tf.execute(desc) - node = tf.TapeNode(bitcast, [input_], name=nothing, type_=nothing) + node = tf.TapeNode(bitcast, [input_], name=nothing, type_=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -36400,7 +36400,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function priority_queue_graph(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function priority_queue_graph(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "PriorityQueue") do desc = tf.NodeDescription("PriorityQueue") @@ -36440,7 +36440,7 @@ begin desc["shared_name"] = Base.String(shared_name) end res = tf.execute(desc) - node = tf.TapeNode(priority_queue, [], name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) + node = tf.TapeNode(priority_queue, [], name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -36460,7 +36460,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantized_batch_norm_with_global_normalization_graph(t_, t_min_, t_max_, m_, m_min_, m_max_, v_, v_min_, v_max_, beta_, beta_min_, beta_max_, gamma_, gamma_min_, gamma_max_; name=nothing, out_type=nothing, variance_epsilon=nothing, scale_after_normalization=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantized_batch_norm_with_global_normalization_graph(t_, t_min_, t_max_, m_, m_min_, m_max_, v_, v_min_, v_max_, beta_, beta_min_, beta_max_, gamma_, gamma_min_, gamma_max_; name=nothing, out_type=nothing, variance_epsilon=nothing, scale_after_normalization=nothing) local desc tf.with_op_name(name, "QuantizedBatchNormWithGlobalNormalization") do desc = tf.NodeDescription("QuantizedBatchNormWithGlobalNormalization") @@ -36559,7 +36559,7 @@ begin desc["Tinput"] = tf.data_type(beta_) desc["Tinput"] = tf.data_type(gamma_) res = tf.execute(desc) - node = tf.TapeNode(quantized_batch_norm_with_global_normalization, [t_, t_min_, t_max_, m_, m_min_, m_max_, v_, v_min_, v_max_, beta_, beta_min_, beta_max_, gamma_, gamma_min_, gamma_max_], name=nothing, out_type=nothing, variance_epsilon=nothing, scale_after_normalization=nothing) + node = tf.TapeNode(quantized_batch_norm_with_global_normalization, [t_, t_min_, t_max_, m_, m_min_, m_max_, v_, v_min_, v_max_, beta_, beta_min_, beta_max_, gamma_, gamma_min_, gamma_max_], name=nothing, out_type=nothing, variance_epsilon=nothing, scale_after_normalization=nothing, res) tf.add_node(res[1], node) return res end @@ -36579,7 +36579,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function cos_graph(x_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function cos_graph(x_; name=nothing) local desc tf.with_op_name(name, "Cos") do desc = tf.NodeDescription("Cos") @@ -36595,7 +36595,7 @@ begin tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) res = tf.execute(desc) - node = tf.TapeNode(cos, [x_], name=nothing) + node = tf.TapeNode(cos, [x_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -36615,7 +36615,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantize_down_and_shrink_range_graph(input_, input_min_, input_max_; name=nothing, out_type=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantize_down_and_shrink_range_graph(input_, input_min_, input_max_; name=nothing, out_type=nothing) local desc tf.with_op_name(name, "QuantizeDownAndShrinkRange") do desc = tf.NodeDescription("QuantizeDownAndShrinkRange") @@ -36650,7 +36650,7 @@ begin end desc["Tinput"] = tf.data_type(input_) res = tf.execute(desc) - node = tf.TapeNode(quantize_down_and_shrink_range, [input_, input_min_, input_max_], name=nothing, out_type=nothing) + node = tf.TapeNode(quantize_down_and_shrink_range, [input_, input_min_, input_max_], name=nothing, out_type=nothing, res) tf.add_node(res[1], node) return res end @@ -36670,7 +36670,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_random_dataset_graph(seed_, seed2_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_random_dataset_graph(seed_, seed2_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "ExperimentalRandomDataset") do desc = tf.NodeDescription("ExperimentalRandomDataset") @@ -36700,7 +36700,7 @@ begin desc["output_shapes"] = map(Base.identity, output_shapes) end res = tf.execute(desc) - node = tf.TapeNode(experimental_random_dataset, [seed_, seed2_], name=nothing, output_types=nothing, output_shapes=nothing) + node = tf.TapeNode(experimental_random_dataset, [seed_, seed2_], name=nothing, output_types=nothing, output_shapes=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -36720,7 +36720,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function rpc_graph(address_, method_, request_; name=nothing, protocol=nothing, fail_fast=nothing, timeout_in_ms=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function rpc_graph(address_, method_, request_; name=nothing, protocol=nothing, fail_fast=nothing, timeout_in_ms=nothing) local desc tf.with_op_name(name, "Rpc") do desc = tf.NodeDescription("Rpc") @@ -36760,7 +36760,7 @@ begin desc["timeout_in_ms"] = Base.Int(timeout_in_ms) end res = tf.execute(desc) - node = tf.TapeNode(rpc, [address_, method_, request_], name=nothing, protocol=nothing, fail_fast=nothing, timeout_in_ms=nothing) + node = tf.TapeNode(rpc, [address_, method_, request_], name=nothing, protocol=nothing, fail_fast=nothing, timeout_in_ms=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -36780,7 +36780,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantized_conv2d_with_bias_signed_sum_and_relu_and_requantize_graph(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_, summand_, min_summand_, max_summand_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantized_conv2d_with_bias_signed_sum_and_relu_and_requantize_graph(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_, summand_, min_summand_, max_summand_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) local desc tf.with_op_name(name, "QuantizedConv2DWithBiasSignedSumAndReluAndRequantize") do desc = tf.NodeDescription("QuantizedConv2DWithBiasSignedSumAndReluAndRequantize") @@ -36875,7 +36875,7 @@ begin desc["Tbias"] = tf.data_type(bias_) desc["Tsummand"] = tf.data_type(summand_) res = tf.execute(desc) - node = tf.TapeNode(quantized_conv2d_with_bias_signed_sum_and_relu_and_requantize, [input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_, summand_, min_summand_, max_summand_], name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) + node = tf.TapeNode(quantized_conv2d_with_bias_signed_sum_and_relu_and_requantize, [input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_, summand_, min_summand_, max_summand_], name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing, res) tf.add_node(res[1], node) return res end @@ -36895,7 +36895,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_list_length_graph(input_handle_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_list_length_graph(input_handle_; name=nothing) local desc tf.with_op_name(name, "TensorListLength") do desc = tf.NodeDescription("TensorListLength") @@ -36909,7 +36909,7 @@ begin input_handle_ = convert(tf.TensorHandle, input_handle_) tf.add_input(desc, input_handle_) res = tf.execute(desc) - node = tf.TapeNode(tensor_list_length, [input_handle_], name=nothing) + node = tf.TapeNode(tensor_list_length, [input_handle_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -36929,7 +36929,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function map_incomplete_size_graph(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function map_incomplete_size_graph(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "MapIncompleteSize") do desc = tf.NodeDescription("MapIncompleteSize") @@ -36969,7 +36969,7 @@ begin desc["shared_name"] = Base.String(shared_name) end res = tf.execute(desc) - node = tf.TapeNode(map_incomplete_size, [], name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + node = tf.TapeNode(map_incomplete_size, [], name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -36989,7 +36989,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function stateless_while_graph(input_; name=nothing, T=nothing, cond=nothing, body=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function stateless_while_graph(input_; name=nothing, T=nothing, cond=nothing, body=nothing) local desc tf.with_op_name(name, "StatelessWhile") do desc = tf.NodeDescription("StatelessWhile") @@ -37021,7 +37021,7 @@ begin desc["body"] = Base.identity(body) end res = tf.execute(desc) - node = tf.TapeNode(stateless_while, [input_], name=nothing, T=nothing, cond=nothing, body=nothing) + node = tf.TapeNode(stateless_while, [input_], name=nothing, T=nothing, cond=nothing, body=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -37041,7 +37041,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_conditional_accumulator_graph(; name=nothing, dtype=nothing, shape=nothing, container=nothing, shared_name=nothing, reduction_type=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_conditional_accumulator_graph(; name=nothing, dtype=nothing, shape=nothing, container=nothing, shared_name=nothing, reduction_type=nothing) local desc tf.with_op_name(name, "SparseConditionalAccumulator") do desc = tf.NodeDescription("SparseConditionalAccumulator") @@ -37081,7 +37081,7 @@ begin desc["reduction_type"] = Base.String(reduction_type) end res = tf.execute(desc) - node = tf.TapeNode(sparse_conditional_accumulator, [], name=nothing, dtype=nothing, shape=nothing, container=nothing, shared_name=nothing, reduction_type=nothing) + node = tf.TapeNode(sparse_conditional_accumulator, [], name=nothing, dtype=nothing, shape=nothing, container=nothing, shared_name=nothing, reduction_type=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -37101,7 +37101,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function segment_min_graph(data_, segment_ids_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function segment_min_graph(data_, segment_ids_; name=nothing) local desc tf.with_op_name(name, "SegmentMin") do desc = tf.NodeDescription("SegmentMin") @@ -37124,7 +37124,7 @@ begin desc["T"] = tf.data_type(data_) desc["Tindices"] = tf.data_type(segment_ids_) res = tf.execute(desc) - node = tf.TapeNode(segment_min, [data_, segment_ids_], name=nothing) + node = tf.TapeNode(segment_min, [data_, segment_ids_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -37144,7 +37144,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function write_graph_summary_graph(writer_, step_, tensor_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function write_graph_summary_graph(writer_, step_, tensor_; name=nothing) local desc tf.with_op_name(name, "WriteGraphSummary") do desc = tf.NodeDescription("WriteGraphSummary") @@ -37166,7 +37166,7 @@ begin tf.add_input(desc, step_) tf.add_input(desc, tensor_) res = tf.execute(desc) - node = tf.TapeNode(write_graph_summary, [writer_, step_, tensor_], name=nothing) + node = tf.TapeNode(write_graph_summary, [writer_, step_, tensor_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -37186,7 +37186,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function cholesky_grad_graph(l_, grad_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function cholesky_grad_graph(l_, grad_; name=nothing) local desc tf.with_op_name(name, "CholeskyGrad") do desc = tf.NodeDescription("CholeskyGrad") @@ -37207,7 +37207,7 @@ begin desc["T"] = tf.data_type(l_) desc["T"] = tf.data_type(grad_) res = tf.execute(desc) - node = tf.TapeNode(cholesky_grad, [l_, grad_], name=nothing) + node = tf.TapeNode(cholesky_grad, [l_, grad_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -37227,7 +37227,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function log_uniform_candidate_sampler_graph(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function log_uniform_candidate_sampler_graph(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing) local desc tf.with_op_name(name, "LogUniformCandidateSampler") do desc = tf.NodeDescription("LogUniformCandidateSampler") @@ -37282,7 +37282,7 @@ begin desc["seed2"] = Base.Int(seed2) end res = tf.execute(desc) - node = tf.TapeNode(log_uniform_candidate_sampler, [true_classes_], name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing) + node = tf.TapeNode(log_uniform_candidate_sampler, [true_classes_], name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing, res) tf.add_node(res[1], node) return res end @@ -37302,7 +37302,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function serialize_sparse_graph(sparse_indices_, sparse_values_, sparse_shape_; name=nothing, out_type=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function serialize_sparse_graph(sparse_indices_, sparse_values_, sparse_shape_; name=nothing, out_type=nothing) local desc tf.with_op_name(name, "SerializeSparse") do desc = tf.NodeDescription("SerializeSparse") @@ -37332,7 +37332,7 @@ begin end desc["T"] = tf.data_type(sparse_values_) res = tf.execute(desc) - node = tf.TapeNode(serialize_sparse, [sparse_indices_, sparse_values_, sparse_shape_], name=nothing, out_type=nothing) + node = tf.TapeNode(serialize_sparse, [sparse_indices_, sparse_values_, sparse_shape_], name=nothing, out_type=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -37352,7 +37352,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function scatter_nd_non_aliasing_add_graph(input_, indices_, updates_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function scatter_nd_non_aliasing_add_graph(input_, indices_, updates_; name=nothing) local desc tf.with_op_name(name, "ScatterNdNonAliasingAdd") do desc = tf.NodeDescription("ScatterNdNonAliasingAdd") @@ -37380,7 +37380,7 @@ begin desc["Tindices"] = tf.data_type(indices_) desc["T"] = tf.data_type(updates_) res = tf.execute(desc) - node = tf.TapeNode(scatter_nd_non_aliasing_add, [input_, indices_, updates_], name=nothing) + node = tf.TapeNode(scatter_nd_non_aliasing_add, [input_, indices_, updates_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -37400,7 +37400,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ref_merge_graph(inputs_; name=nothing, N=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ref_merge_graph(inputs_; name=nothing, N=nothing) local desc tf.with_op_name(name, "RefMerge") do desc = tf.NodeDescription("RefMerge") @@ -37427,7 +37427,7 @@ begin end desc["T"] = tf.data_type(inputs_) res = tf.execute(desc) - node = tf.TapeNode(ref_merge, [inputs_], name=nothing, N=nothing) + node = tf.TapeNode(ref_merge, [inputs_], name=nothing, N=nothing, res) tf.add_node(res[1], node) return res end @@ -37447,7 +37447,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_list_concat_graph(input_handle_; name=nothing, element_dtype=nothing, element_shape=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_list_concat_graph(input_handle_; name=nothing, element_dtype=nothing, element_shape=nothing) local desc tf.with_op_name(name, "TensorListConcat") do desc = tf.NodeDescription("TensorListConcat") @@ -37478,7 +37478,7 @@ begin desc["element_shape"] = Base.identity(element_shape) end res = tf.execute(desc) - node = tf.TapeNode(tensor_list_concat, [input_handle_], name=nothing, element_dtype=nothing, element_shape=nothing) + node = tf.TapeNode(tensor_list_concat, [input_handle_], name=nothing, element_dtype=nothing, element_shape=nothing, res) tf.add_node(res[1], node) return res end @@ -37498,7 +37498,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function cudnn_rnn_canonical_to_params_graph(num_layers_, num_units_, input_size_, weights_, biases_; name=nothing, num_params=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function cudnn_rnn_canonical_to_params_graph(num_layers_, num_units_, input_size_, weights_, biases_; name=nothing, num_params=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) local desc tf.with_op_name(name, "CudnnRNNCanonicalToParams") do desc = tf.NodeDescription("CudnnRNNCanonicalToParams") @@ -37573,7 +37573,7 @@ begin desc["T"] = tf.data_type(weights_) desc["T"] = tf.data_type(biases_) res = tf.execute(desc) - node = tf.TapeNode(cudnn_rnn_canonical_to_params, [num_layers_, num_units_, input_size_, weights_, biases_], name=nothing, num_params=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) + node = tf.TapeNode(cudnn_rnn_canonical_to_params, [num_layers_, num_units_, input_size_, weights_, biases_], name=nothing, num_params=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -37593,7 +37593,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_apply_adadelta_graph(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_apply_adadelta_graph(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "SparseApplyAdadelta") do desc = tf.NodeDescription("SparseApplyAdadelta") @@ -37652,7 +37652,7 @@ begin desc["T"] = tf.data_type(grad_) desc["Tindices"] = tf.data_type(indices_) res = tf.execute(desc) - node = tf.TapeNode(sparse_apply_adadelta, [var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_, indices_], name=nothing, use_locking=nothing) + node = tf.TapeNode(sparse_apply_adadelta, [var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_, indices_], name=nothing, use_locking=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -37672,7 +37672,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_close_graph(handle_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_close_graph(handle_; name=nothing) local desc tf.with_op_name(name, "TensorArrayClose") do desc = tf.NodeDescription("TensorArrayClose") @@ -37686,7 +37686,7 @@ begin handle_ = convert(tf.TensorHandle, handle_) tf.add_input(desc, handle_) res = tf.execute(desc) - node = tf.TapeNode(tensor_array_close, [handle_], name=nothing) + node = tf.TapeNode(tensor_array_close, [handle_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -37706,7 +37706,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function selu_grad_graph(gradients_, outputs_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function selu_grad_graph(gradients_, outputs_; name=nothing) local desc tf.with_op_name(name, "SeluGrad") do desc = tf.NodeDescription("SeluGrad") @@ -37727,7 +37727,7 @@ begin desc["T"] = tf.data_type(gradients_) desc["T"] = tf.data_type(outputs_) res = tf.execute(desc) - node = tf.TapeNode(selu_grad, [gradients_, outputs_], name=nothing) + node = tf.TapeNode(selu_grad, [gradients_, outputs_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -37747,7 +37747,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function crop_and_resize_grad_image_graph(grads_, boxes_, box_ind_, image_size_; name=nothing, method=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function crop_and_resize_grad_image_graph(grads_, boxes_, box_ind_, image_size_; name=nothing, method=nothing) local desc tf.with_op_name(name, "CropAndResizeGradImage") do desc = tf.NodeDescription("CropAndResizeGradImage") @@ -37779,7 +37779,7 @@ begin desc["method"] = Base.String(method) end res = tf.execute(desc) - node = tf.TapeNode(crop_and_resize_grad_image, [grads_, boxes_, box_ind_, image_size_], name=nothing, method=nothing) + node = tf.TapeNode(crop_and_resize_grad_image, [grads_, boxes_, box_ind_, image_size_], name=nothing, method=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -37799,7 +37799,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function rfft_graph(input_, fft_length_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function rfft_graph(input_, fft_length_; name=nothing) local desc tf.with_op_name(name, "RFFT") do desc = tf.NodeDescription("RFFT") @@ -37817,7 +37817,7 @@ begin tf.add_input(desc, input_) tf.add_input(desc, fft_length_) res = tf.execute(desc) - node = tf.TapeNode(rfft, [input_, fft_length_], name=nothing) + node = tf.TapeNode(rfft, [input_, fft_length_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -37837,7 +37837,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_sql_dataset_graph(driver_name_, data_source_name_, query_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_sql_dataset_graph(driver_name_, data_source_name_, query_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "ExperimentalSqlDataset") do desc = tf.NodeDescription("ExperimentalSqlDataset") @@ -37871,7 +37871,7 @@ begin desc["output_shapes"] = map(Base.identity, output_shapes) end res = tf.execute(desc) - node = tf.TapeNode(experimental_sql_dataset, [driver_name_, data_source_name_, query_], name=nothing, output_types=nothing, output_shapes=nothing) + node = tf.TapeNode(experimental_sql_dataset, [driver_name_, data_source_name_, query_], name=nothing, output_types=nothing, output_shapes=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -37891,7 +37891,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_apply_power_sign_graph(var_, m_, lr_, logbase_, sign_decay_, beta_, grad_; name=nothing, use_locking=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_apply_power_sign_graph(var_, m_, lr_, logbase_, sign_decay_, beta_, grad_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ResourceApplyPowerSign") do desc = tf.NodeDescription("ResourceApplyPowerSign") @@ -37941,7 +37941,7 @@ begin desc["T"] = tf.data_type(beta_) desc["T"] = tf.data_type(grad_) res = tf.execute(desc) - node = tf.TapeNode(resource_apply_power_sign, [var_, m_, lr_, logbase_, sign_decay_, beta_, grad_], name=nothing, use_locking=nothing) + node = tf.TapeNode(resource_apply_power_sign, [var_, m_, lr_, logbase_, sign_decay_, beta_, grad_], name=nothing, use_locking=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -37961,7 +37961,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function matrix_determinant_graph(input_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function matrix_determinant_graph(input_; name=nothing) local desc tf.with_op_name(name, "MatrixDeterminant") do desc = tf.NodeDescription("MatrixDeterminant") @@ -37977,7 +37977,7 @@ begin tf.add_input(desc, input_) desc["T"] = tf.data_type(input_) res = tf.execute(desc) - node = tf.TapeNode(matrix_determinant, [input_], name=nothing) + node = tf.TapeNode(matrix_determinant, [input_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -37997,7 +37997,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function static_regex_replace_graph(input_; name=nothing, pattern=nothing, rewrite=nothing, replace_global=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function static_regex_replace_graph(input_; name=nothing, pattern=nothing, rewrite=nothing, replace_global=nothing) local desc tf.with_op_name(name, "StaticRegexReplace") do desc = tf.NodeDescription("StaticRegexReplace") @@ -38029,7 +38029,7 @@ begin desc["replace_global"] = Base.Bool(replace_global) end res = tf.execute(desc) - node = tf.TapeNode(static_regex_replace, [input_], name=nothing, pattern=nothing, rewrite=nothing, replace_global=nothing) + node = tf.TapeNode(static_regex_replace, [input_], name=nothing, pattern=nothing, rewrite=nothing, replace_global=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -38049,7 +38049,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function avg_pool_graph(value_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function avg_pool_graph(value_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) local desc tf.with_op_name(name, "AvgPool") do desc = tf.NodeDescription("AvgPool") @@ -38089,7 +38089,7 @@ begin end desc["T"] = tf.data_type(value_) res = tf.execute(desc) - node = tf.TapeNode(avg_pool, [value_], name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + node = tf.TapeNode(avg_pool, [value_], name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -38109,7 +38109,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_dense_cwise_add_graph(sp_indices_, sp_values_, sp_shape_, dense_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_dense_cwise_add_graph(sp_indices_, sp_values_, sp_shape_, dense_; name=nothing) local desc tf.with_op_name(name, "SparseDenseCwiseAdd") do desc = tf.NodeDescription("SparseDenseCwiseAdd") @@ -38138,7 +38138,7 @@ begin desc["T"] = tf.data_type(sp_values_) desc["T"] = tf.data_type(dense_) res = tf.execute(desc) - node = tf.TapeNode(sparse_dense_cwise_add, [sp_indices_, sp_values_, sp_shape_, dense_], name=nothing) + node = tf.TapeNode(sparse_dense_cwise_add, [sp_indices_, sp_values_, sp_shape_, dense_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -38158,7 +38158,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function bias_add_v1_graph(value_, bias_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function bias_add_v1_graph(value_, bias_; name=nothing) local desc tf.with_op_name(name, "BiasAddV1") do desc = tf.NodeDescription("BiasAddV1") @@ -38179,7 +38179,7 @@ begin desc["T"] = tf.data_type(value_) desc["T"] = tf.data_type(bias_) res = tf.execute(desc) - node = tf.TapeNode(bias_add_v1, [value_, bias_], name=nothing) + node = tf.TapeNode(bias_add_v1, [value_, bias_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -38199,7 +38199,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function invert_permutation_graph(x_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function invert_permutation_graph(x_; name=nothing) local desc tf.with_op_name(name, "InvertPermutation") do desc = tf.NodeDescription("InvertPermutation") @@ -38215,7 +38215,7 @@ begin tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) res = tf.execute(desc) - node = tf.TapeNode(invert_permutation, [x_], name=nothing) + node = tf.TapeNode(invert_permutation, [x_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -38235,7 +38235,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function hash_table_v2_graph(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function hash_table_v2_graph(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing) local desc tf.with_op_name(name, "HashTableV2") do desc = tf.NodeDescription("HashTableV2") @@ -38275,7 +38275,7 @@ begin desc["value_dtype"] = Base.identity(value_dtype) end res = tf.execute(desc) - node = tf.TapeNode(hash_table_v2, [], name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing) + node = tf.TapeNode(hash_table_v2, [], name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -38295,7 +38295,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_apply_momentum_graph(var_, accum_, lr_, grad_, indices_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_apply_momentum_graph(var_, accum_, lr_, grad_, indices_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) local desc tf.with_op_name(name, "SparseApplyMomentum") do desc = tf.NodeDescription("SparseApplyMomentum") @@ -38350,7 +38350,7 @@ begin desc["Tindices"] = tf.data_type(indices_) desc["T"] = tf.data_type(momentum_) res = tf.execute(desc) - node = tf.TapeNode(sparse_apply_momentum, [var_, accum_, lr_, grad_, indices_, momentum_], name=nothing, use_locking=nothing, use_nesterov=nothing) + node = tf.TapeNode(sparse_apply_momentum, [var_, accum_, lr_, grad_, indices_, momentum_], name=nothing, use_locking=nothing, use_nesterov=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -38370,7 +38370,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function infeed_enqueue_graph(input_; name=nothing, dtype=nothing, shape=nothing, layout=nothing, device_ordinal=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function infeed_enqueue_graph(input_; name=nothing, dtype=nothing, shape=nothing, layout=nothing, device_ordinal=nothing) local desc tf.with_op_name(name, "InfeedEnqueue") do desc = tf.NodeDescription("InfeedEnqueue") @@ -38410,7 +38410,7 @@ begin end desc["dtype"] = tf.data_type(input_) res = tf.execute(desc) - node = tf.TapeNode(infeed_enqueue, [input_], name=nothing, dtype=nothing, shape=nothing, layout=nothing, device_ordinal=nothing) + node = tf.TapeNode(infeed_enqueue, [input_], name=nothing, dtype=nothing, shape=nothing, layout=nothing, device_ordinal=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -38430,7 +38430,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function stateless_random_uniform_int_graph(shape_, seed_, minval_, maxval_; name=nothing, dtype=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function stateless_random_uniform_int_graph(shape_, seed_, minval_, maxval_; name=nothing, dtype=nothing) local desc tf.with_op_name(name, "StatelessRandomUniformInt") do desc = tf.NodeDescription("StatelessRandomUniformInt") @@ -38469,7 +38469,7 @@ begin desc["dtype"] = tf.data_type(minval_) desc["dtype"] = tf.data_type(maxval_) res = tf.execute(desc) - node = tf.TapeNode(stateless_random_uniform_int, [shape_, seed_, minval_, maxval_], name=nothing, dtype=nothing) + node = tf.TapeNode(stateless_random_uniform_int, [shape_, seed_, minval_, maxval_], name=nothing, dtype=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -38489,7 +38489,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function load_tpu_embedding_adadelta_parameters_grad_accum_debug_graph(parameters_, accumulators_, updates_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function load_tpu_embedding_adadelta_parameters_grad_accum_debug_graph(parameters_, accumulators_, updates_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "LoadTPUEmbeddingAdadeltaParametersGradAccumDebug") do desc = tf.NodeDescription("LoadTPUEmbeddingAdadeltaParametersGradAccumDebug") @@ -38539,7 +38539,7 @@ begin desc["shard_id"] = Base.Int(shard_id) end res = tf.execute(desc) - node = tf.TapeNode(load_tpu_embedding_adadelta_parameters_grad_accum_debug, [parameters_, accumulators_, updates_, gradient_accumulators_], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + node = tf.TapeNode(load_tpu_embedding_adadelta_parameters_grad_accum_debug, [parameters_, accumulators_, updates_, gradient_accumulators_], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -38559,7 +38559,7 @@ end Sends the named tensor from send_device to recv_device. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _send_graph(tensor_; name=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _send_graph(tensor_; name=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing) local desc tf.with_op_name(name, "_Send") do desc = tf.NodeDescription("_Send") @@ -38605,7 +38605,7 @@ begin end desc["T"] = tf.data_type(tensor_) res = tf.execute(desc) - node = tf.TapeNode(_send, [tensor_], name=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing) + node = tf.TapeNode(_send, [tensor_], name=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -38625,7 +38625,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function map_peek_graph(key_, indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function map_peek_graph(key_, indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "MapPeek") do desc = tf.NodeDescription("MapPeek") @@ -38673,7 +38673,7 @@ begin desc["shared_name"] = Base.String(shared_name) end res = tf.execute(desc) - node = tf.TapeNode(map_peek, [key_, indices_], name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + node = tf.TapeNode(map_peek, [key_, indices_], name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -38693,7 +38693,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function write_scalar_summary_graph(writer_, step_, tag_, value_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function write_scalar_summary_graph(writer_, step_, tag_, value_; name=nothing) local desc tf.with_op_name(name, "WriteScalarSummary") do desc = tf.NodeDescription("WriteScalarSummary") @@ -38721,7 +38721,7 @@ begin tf.add_input(desc, value_) desc["T"] = tf.data_type(value_) res = tf.execute(desc) - node = tf.TapeNode(write_scalar_summary, [writer_, step_, tag_, value_], name=nothing) + node = tf.TapeNode(write_scalar_summary, [writer_, step_, tag_, value_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -38741,7 +38741,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ordered_map_unstage_no_key_graph(indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ordered_map_unstage_no_key_graph(indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "OrderedMapUnstageNoKey") do desc = tf.NodeDescription("OrderedMapUnstageNoKey") @@ -38790,7 +38790,7 @@ begin desc["shared_name"] = Base.String(shared_name) end res = tf.execute(desc) - node = tf.TapeNode(ordered_map_unstage_no_key, [indices_], name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + node = tf.TapeNode(ordered_map_unstage_no_key, [indices_], name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing, res) tf.add_node(res[1], node) return res end @@ -38810,7 +38810,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_apply_centered_rms_prop_graph(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_apply_centered_rms_prop_graph(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "SparseApplyCenteredRMSProp") do desc = tf.NodeDescription("SparseApplyCenteredRMSProp") @@ -38879,7 +38879,7 @@ begin desc["T"] = tf.data_type(grad_) desc["Tindices"] = tf.data_type(indices_) res = tf.execute(desc) - node = tf.TapeNode(sparse_apply_centered_rms_prop, [var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_], name=nothing, use_locking=nothing) + node = tf.TapeNode(sparse_apply_centered_rms_prop, [var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_], name=nothing, use_locking=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -38899,7 +38899,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_list_scatter_v2_graph(tensor_, indices_, element_shape_, num_elements_; name=nothing, element_dtype=nothing, shape_type=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_list_scatter_v2_graph(tensor_, indices_, element_shape_, num_elements_; name=nothing, element_dtype=nothing, shape_type=nothing) local desc tf.with_op_name(name, "TensorListScatterV2") do desc = tf.NodeDescription("TensorListScatterV2") @@ -38941,7 +38941,7 @@ begin desc["element_dtype"] = tf.data_type(tensor_) desc["shape_type"] = tf.data_type(element_shape_) res = tf.execute(desc) - node = tf.TapeNode(tensor_list_scatter_v2, [tensor_, indices_, element_shape_, num_elements_], name=nothing, element_dtype=nothing, shape_type=nothing) + node = tf.TapeNode(tensor_list_scatter_v2, [tensor_, indices_, element_shape_, num_elements_], name=nothing, element_dtype=nothing, shape_type=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -38961,7 +38961,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function conv3d_backprop_input_v2_graph(input_sizes_, filter_, out_backprop_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function conv3d_backprop_input_v2_graph(input_sizes_, filter_, out_backprop_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) local desc tf.with_op_name(name, "Conv3DBackpropInputV2") do desc = tf.NodeDescription("Conv3DBackpropInputV2") @@ -39012,7 +39012,7 @@ begin desc["T"] = tf.data_type(filter_) desc["T"] = tf.data_type(out_backprop_) res = tf.execute(desc) - node = tf.TapeNode(conv3d_backprop_input_v2, [input_sizes_, filter_, out_backprop_], name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) + node = tf.TapeNode(conv3d_backprop_input_v2, [input_sizes_, filter_, out_backprop_], name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -39032,7 +39032,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function retrieve_tpu_embedding_proximal_adagrad_parameters_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function retrieve_tpu_embedding_proximal_adagrad_parameters_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "RetrieveTPUEmbeddingProximalAdagradParameters") do desc = tf.NodeDescription("RetrieveTPUEmbeddingProximalAdagradParameters") @@ -39071,7 +39071,7 @@ begin desc["shard_id"] = Base.Int(shard_id) end res = tf.execute(desc) - node = tf.TapeNode(retrieve_tpu_embedding_proximal_adagrad_parameters, [], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + node = tf.TapeNode(retrieve_tpu_embedding_proximal_adagrad_parameters, [], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res) tf.add_node(res[1], node) return res end @@ -39091,7 +39091,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function random_shuffle_graph(value_; name=nothing, seed=nothing, seed2=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function random_shuffle_graph(value_; name=nothing, seed=nothing, seed2=nothing) local desc tf.with_op_name(name, "RandomShuffle") do desc = tf.NodeDescription("RandomShuffle") @@ -39119,7 +39119,7 @@ begin end desc["T"] = tf.data_type(value_) res = tf.execute(desc) - node = tf.TapeNode(random_shuffle, [value_], name=nothing, seed=nothing, seed2=nothing) + node = tf.TapeNode(random_shuffle, [value_], name=nothing, seed=nothing, seed2=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -39139,7 +39139,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function uniform_candidate_sampler_graph(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function uniform_candidate_sampler_graph(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing) local desc tf.with_op_name(name, "UniformCandidateSampler") do desc = tf.NodeDescription("UniformCandidateSampler") @@ -39194,7 +39194,7 @@ begin desc["seed2"] = Base.Int(seed2) end res = tf.execute(desc) - node = tf.TapeNode(uniform_candidate_sampler, [true_classes_], name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing) + node = tf.TapeNode(uniform_candidate_sampler, [true_classes_], name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing, res) tf.add_node(res[1], node) return res end @@ -39214,7 +39214,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_split_v2_graph(handle_, value_, lengths_, flow_in_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_split_v2_graph(handle_, value_, lengths_, flow_in_; name=nothing) local desc tf.with_op_name(name, "TensorArraySplitV2") do desc = tf.NodeDescription("TensorArraySplitV2") @@ -39242,7 +39242,7 @@ begin tf.add_input(desc, flow_in_) desc["T"] = tf.data_type(value_) res = tf.execute(desc) - node = tf.TapeNode(tensor_array_split_v2, [handle_, value_, lengths_, flow_in_], name=nothing) + node = tf.TapeNode(tensor_array_split_v2, [handle_, value_, lengths_, flow_in_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -39262,7 +39262,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function mutable_dense_hash_table_v2_graph(empty_key_, deleted_key_; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing, initial_num_buckets=nothing, max_load_factor=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function mutable_dense_hash_table_v2_graph(empty_key_, deleted_key_; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing, initial_num_buckets=nothing, max_load_factor=nothing) local desc tf.with_op_name(name, "MutableDenseHashTableV2") do desc = tf.NodeDescription("MutableDenseHashTableV2") @@ -39331,7 +39331,7 @@ begin desc["key_dtype"] = tf.data_type(empty_key_) desc["key_dtype"] = tf.data_type(deleted_key_) res = tf.execute(desc) - node = tf.TapeNode(mutable_dense_hash_table_v2, [empty_key_, deleted_key_], name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing, initial_num_buckets=nothing, max_load_factor=nothing) + node = tf.TapeNode(mutable_dense_hash_table_v2, [empty_key_, deleted_key_], name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing, initial_num_buckets=nothing, max_load_factor=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -39351,7 +39351,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function draw_bounding_boxes_graph(images_, boxes_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function draw_bounding_boxes_graph(images_, boxes_; name=nothing) local desc tf.with_op_name(name, "DrawBoundingBoxes") do desc = tf.NodeDescription("DrawBoundingBoxes") @@ -39371,7 +39371,7 @@ begin tf.add_input(desc, boxes_) desc["T"] = tf.data_type(images_) res = tf.execute(desc) - node = tf.TapeNode(draw_bounding_boxes, [images_, boxes_], name=nothing) + node = tf.TapeNode(draw_bounding_boxes, [images_, boxes_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -39391,7 +39391,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_apply_proximal_adagrad_graph(var_, accum_, lr_, l1_, l2_, grad_, indices_; name=nothing, use_locking=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_apply_proximal_adagrad_graph(var_, accum_, lr_, l1_, l2_, grad_, indices_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "SparseApplyProximalAdagrad") do desc = tf.NodeDescription("SparseApplyProximalAdagrad") @@ -39445,7 +39445,7 @@ begin desc["T"] = tf.data_type(grad_) desc["Tindices"] = tf.data_type(indices_) res = tf.execute(desc) - node = tf.TapeNode(sparse_apply_proximal_adagrad, [var_, accum_, lr_, l1_, l2_, grad_, indices_], name=nothing, use_locking=nothing) + node = tf.TapeNode(sparse_apply_proximal_adagrad, [var_, accum_, lr_, l1_, l2_, grad_, indices_], name=nothing, use_locking=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -39465,7 +39465,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function range_dataset_graph(start_, stop_, step_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function range_dataset_graph(start_, stop_, step_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "RangeDataset") do desc = tf.NodeDescription("RangeDataset") @@ -39499,7 +39499,7 @@ begin desc["output_shapes"] = map(Base.identity, output_shapes) end res = tf.execute(desc) - node = tf.TapeNode(range_dataset, [start_, stop_, step_], name=nothing, output_types=nothing, output_shapes=nothing) + node = tf.TapeNode(range_dataset, [start_, stop_, step_], name=nothing, output_types=nothing, output_shapes=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -39519,7 +39519,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function reader_restore_state_v2_graph(reader_handle_, state_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function reader_restore_state_v2_graph(reader_handle_, state_; name=nothing) local desc tf.with_op_name(name, "ReaderRestoreStateV2") do desc = tf.NodeDescription("ReaderRestoreStateV2") @@ -39537,7 +39537,7 @@ begin tf.add_input(desc, reader_handle_) tf.add_input(desc, state_) res = tf.execute(desc) - node = tf.TapeNode(reader_restore_state_v2, [reader_handle_, state_], name=nothing) + node = tf.TapeNode(reader_restore_state_v2, [reader_handle_, state_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -39557,7 +39557,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function top_kv2_graph(input_, k_; name=nothing, sorted=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function top_kv2_graph(input_, k_; name=nothing, sorted=nothing) local desc tf.with_op_name(name, "TopKV2") do desc = tf.NodeDescription("TopKV2") @@ -39588,7 +39588,7 @@ begin end desc["T"] = tf.data_type(input_) res = tf.execute(desc) - node = tf.TapeNode(top_kv2, [input_, k_], name=nothing, sorted=nothing) + node = tf.TapeNode(top_kv2, [input_, k_], name=nothing, sorted=nothing, res) tf.add_node(res[1], node) return res end @@ -39608,7 +39608,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function atanh_graph(x_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function atanh_graph(x_; name=nothing) local desc tf.with_op_name(name, "Atanh") do desc = tf.NodeDescription("Atanh") @@ -39624,7 +39624,7 @@ begin tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) res = tf.execute(desc) - node = tf.TapeNode(atanh, [x_], name=nothing) + node = tf.TapeNode(atanh, [x_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -39644,7 +39644,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function debug_gradient_identity_graph(input_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function debug_gradient_identity_graph(input_; name=nothing) local desc tf.with_op_name(name, "DebugGradientIdentity") do desc = tf.NodeDescription("DebugGradientIdentity") @@ -39660,7 +39660,7 @@ begin tf.add_input(desc, input_) desc["T"] = tf.data_type(input_) res = tf.execute(desc) - node = tf.TapeNode(debug_gradient_identity, [input_], name=nothing) + node = tf.TapeNode(debug_gradient_identity, [input_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -39680,7 +39680,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_add_grad_graph(backprop_val_grad_, a_indices_, b_indices_, sum_indices_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_add_grad_graph(backprop_val_grad_, a_indices_, b_indices_, sum_indices_; name=nothing) local desc tf.with_op_name(name, "SparseAddGrad") do desc = tf.NodeDescription("SparseAddGrad") @@ -39713,7 +39713,7 @@ begin tf.add_input(desc, sum_indices_) desc["T"] = tf.data_type(backprop_val_grad_) res = tf.execute(desc) - node = tf.TapeNode(sparse_add_grad, [backprop_val_grad_, a_indices_, b_indices_, sum_indices_], name=nothing) + node = tf.TapeNode(sparse_add_grad, [backprop_val_grad_, a_indices_, b_indices_, sum_indices_], name=nothing, res) tf.add_node(res[1], node) return res end @@ -39733,7 +39733,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_scatter_add_graph(resource_, indices_, updates_; name=nothing, dtype=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_scatter_add_graph(resource_, indices_, updates_; name=nothing, dtype=nothing) local desc tf.with_op_name(name, "ResourceScatterAdd") do desc = tf.NodeDescription("ResourceScatterAdd") @@ -39766,7 +39766,7 @@ begin desc["Tindices"] = tf.data_type(indices_) desc["dtype"] = tf.data_type(updates_) res = tf.execute(desc) - node = tf.TapeNode(resource_scatter_add, [resource_, indices_, updates_], name=nothing, dtype=nothing) + node = tf.TapeNode(resource_scatter_add, [resource_, indices_, updates_], name=nothing, dtype=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -39786,7 +39786,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ceil_graph(x_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ceil_graph(x_; name=nothing) local desc tf.with_op_name(name, "Ceil") do desc = tf.NodeDescription("Ceil") @@ -39802,7 +39802,7 @@ begin tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) res = tf.execute(desc) - node = tf.TapeNode(ceil, [x_], name=nothing) + node = tf.TapeNode(ceil, [x_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -39822,7 +39822,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function save_graph(filename_, tensor_names_, data_; name=nothing, T=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function save_graph(filename_, tensor_names_, data_; name=nothing, T=nothing) local desc tf.with_op_name(name, "Save") do desc = tf.NodeDescription("Save") @@ -39850,7 +39850,7 @@ begin desc["T"] = map(Base.identity, T) end res = tf.execute(desc) - node = tf.TapeNode(save, [filename_, tensor_names_, data_], name=nothing, T=nothing) + node = tf.TapeNode(save, [filename_, tensor_names_, data_], name=nothing, T=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -39870,7 +39870,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function retrieve_tpu_embedding_centered_rms_prop_parameters_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function retrieve_tpu_embedding_centered_rms_prop_parameters_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "RetrieveTPUEmbeddingCenteredRMSPropParameters") do desc = tf.NodeDescription("RetrieveTPUEmbeddingCenteredRMSPropParameters") @@ -39909,7 +39909,7 @@ begin desc["shard_id"] = Base.Int(shard_id) end res = tf.execute(desc) - node = tf.TapeNode(retrieve_tpu_embedding_centered_rms_prop_parameters, [], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + node = tf.TapeNode(retrieve_tpu_embedding_centered_rms_prop_parameters, [], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res) tf.add_node(res[1], node) return res end @@ -39929,7 +39929,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantized_concat_graph(concat_dim_, values_, input_mins_, input_maxes_; name=nothing, N=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantized_concat_graph(concat_dim_, values_, input_mins_, input_maxes_; name=nothing, N=nothing) local desc tf.with_op_name(name, "QuantizedConcat") do desc = tf.NodeDescription("QuantizedConcat") @@ -39968,7 +39968,7 @@ begin end desc["T"] = tf.data_type(values_) res = tf.execute(desc) - node = tf.TapeNode(quantized_concat, [concat_dim_, values_, input_mins_, input_maxes_], name=nothing, N=nothing) + node = tf.TapeNode(quantized_concat, [concat_dim_, values_, input_mins_, input_maxes_], name=nothing, N=nothing, res) tf.add_node(res[1], node) return res end @@ -39988,7 +39988,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function zeros_like_graph(x_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function zeros_like_graph(x_; name=nothing) local desc tf.with_op_name(name, "ZerosLike") do desc = tf.NodeDescription("ZerosLike") @@ -40004,7 +40004,7 @@ begin tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) res = tf.execute(desc) - node = tf.TapeNode(zeros_like, [x_], name=nothing) + node = tf.TapeNode(zeros_like, [x_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -40024,7 +40024,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fractional_avg_pool_graph(value_; name=nothing, pooling_ratio=nothing, pseudo_random=nothing, overlapping=nothing, deterministic=nothing, seed=nothing, seed2=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fractional_avg_pool_graph(value_; name=nothing, pooling_ratio=nothing, pseudo_random=nothing, overlapping=nothing, deterministic=nothing, seed=nothing, seed2=nothing) local desc tf.with_op_name(name, "FractionalAvgPool") do desc = tf.NodeDescription("FractionalAvgPool") @@ -40081,7 +40081,7 @@ begin end desc["T"] = tf.data_type(value_) res = tf.execute(desc) - node = tf.TapeNode(fractional_avg_pool, [value_], name=nothing, pooling_ratio=nothing, pseudo_random=nothing, overlapping=nothing, deterministic=nothing, seed=nothing, seed2=nothing) + node = tf.TapeNode(fractional_avg_pool, [value_], name=nothing, pooling_ratio=nothing, pseudo_random=nothing, overlapping=nothing, deterministic=nothing, seed=nothing, seed2=nothing, res) tf.add_node(res[1], node) return res end @@ -40101,7 +40101,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function edit_distance_graph(hypothesis_indices_, hypothesis_values_, hypothesis_shape_, truth_indices_, truth_values_, truth_shape_; name=nothing, normalize=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function edit_distance_graph(hypothesis_indices_, hypothesis_values_, hypothesis_shape_, truth_indices_, truth_values_, truth_shape_; name=nothing, normalize=nothing) local desc tf.with_op_name(name, "EditDistance") do desc = tf.NodeDescription("EditDistance") @@ -40144,7 +40144,7 @@ begin desc["T"] = tf.data_type(hypothesis_values_) desc["T"] = tf.data_type(truth_values_) res = tf.execute(desc) - node = tf.TapeNode(edit_distance, [hypothesis_indices_, hypothesis_values_, hypothesis_shape_, truth_indices_, truth_values_, truth_shape_], name=nothing, normalize=nothing) + node = tf.TapeNode(edit_distance, [hypothesis_indices_, hypothesis_values_, hypothesis_shape_, truth_indices_, truth_values_, truth_shape_], name=nothing, normalize=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -40164,7 +40164,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function unique_v2_graph(x_, axis_; name=nothing, out_idx=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function unique_v2_graph(x_, axis_; name=nothing, out_idx=nothing) local desc tf.with_op_name(name, "UniqueV2") do desc = tf.NodeDescription("UniqueV2") @@ -40197,7 +40197,7 @@ begin desc["T"] = tf.data_type(x_) desc["Taxis"] = tf.data_type(axis_) res = tf.execute(desc) - node = tf.TapeNode(unique_v2, [x_, axis_], name=nothing, out_idx=nothing) + node = tf.TapeNode(unique_v2, [x_, axis_], name=nothing, out_idx=nothing, res) tf.add_node(res[1], node) return res end @@ -40217,7 +40217,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantize_and_dequantize_v2_graph(input_, input_min_, input_max_; name=nothing, signed_input=nothing, num_bits=nothing, range_given=nothing, round_mode=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantize_and_dequantize_v2_graph(input_, input_min_, input_max_; name=nothing, signed_input=nothing, num_bits=nothing, range_given=nothing, round_mode=nothing) local desc tf.with_op_name(name, "QuantizeAndDequantizeV2") do desc = tf.NodeDescription("QuantizeAndDequantizeV2") @@ -40267,7 +40267,7 @@ begin desc["T"] = tf.data_type(input_min_) desc["T"] = tf.data_type(input_max_) res = tf.execute(desc) - node = tf.TapeNode(quantize_and_dequantize_v2, [input_, input_min_, input_max_], name=nothing, signed_input=nothing, num_bits=nothing, range_given=nothing, round_mode=nothing) + node = tf.TapeNode(quantize_and_dequantize_v2, [input_, input_min_, input_max_], name=nothing, signed_input=nothing, num_bits=nothing, range_given=nothing, round_mode=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -40287,7 +40287,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantize_and_dequantize_graph(input_; name=nothing, signed_input=nothing, num_bits=nothing, range_given=nothing, input_min=nothing, input_max=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantize_and_dequantize_graph(input_; name=nothing, signed_input=nothing, num_bits=nothing, range_given=nothing, input_min=nothing, input_max=nothing) local desc tf.with_op_name(name, "QuantizeAndDequantize") do desc = tf.NodeDescription("QuantizeAndDequantize") @@ -40333,7 +40333,7 @@ begin end desc["T"] = tf.data_type(input_) res = tf.execute(desc) - node = tf.TapeNode(quantize_and_dequantize, [input_], name=nothing, signed_input=nothing, num_bits=nothing, range_given=nothing, input_min=nothing, input_max=nothing) + node = tf.TapeNode(quantize_and_dequantize, [input_], name=nothing, signed_input=nothing, num_bits=nothing, range_given=nothing, input_min=nothing, input_max=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -40353,7 +40353,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_list_pop_back_graph(input_handle_, element_shape_; name=nothing, element_dtype=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_list_pop_back_graph(input_handle_, element_shape_; name=nothing, element_dtype=nothing) local desc tf.with_op_name(name, "TensorListPopBack") do desc = tf.NodeDescription("TensorListPopBack") @@ -40382,7 +40382,7 @@ begin desc["element_dtype"] = Base.identity(element_dtype) end res = tf.execute(desc) - node = tf.TapeNode(tensor_list_pop_back, [input_handle_, element_shape_], name=nothing, element_dtype=nothing) + node = tf.TapeNode(tensor_list_pop_back, [input_handle_, element_shape_], name=nothing, element_dtype=nothing, res) tf.add_node(res[1], node) return res end @@ -40402,7 +40402,7 @@ end Debug NaN Value Counter Op """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function debug_nan_count_graph(input_; name=nothing, device_name=nothing, tensor_name=nothing, debug_urls=nothing, gated_grpc=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function debug_nan_count_graph(input_; name=nothing, device_name=nothing, tensor_name=nothing, debug_urls=nothing, gated_grpc=nothing) local desc tf.with_op_name(name, "DebugNanCount") do desc = tf.NodeDescription("DebugNanCount") @@ -40442,7 +40442,7 @@ begin end desc["T"] = tf.data_type(input_) res = tf.execute(desc) - node = tf.TapeNode(debug_nan_count, [input_], name=nothing, device_name=nothing, tensor_name=nothing, debug_urls=nothing, gated_grpc=nothing) + node = tf.TapeNode(debug_nan_count, [input_], name=nothing, device_name=nothing, tensor_name=nothing, debug_urls=nothing, gated_grpc=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -40462,7 +40462,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function apply_adagrad_da_graph(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, lr_, l1_, l2_, global_step_; name=nothing, use_locking=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function apply_adagrad_da_graph(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, lr_, l1_, l2_, global_step_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ApplyAdagradDA") do desc = tf.NodeDescription("ApplyAdagradDA") @@ -40518,7 +40518,7 @@ begin desc["T"] = tf.data_type(l1_) desc["T"] = tf.data_type(l2_) res = tf.execute(desc) - node = tf.TapeNode(apply_adagrad_da, [var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, lr_, l1_, l2_, global_step_], name=nothing, use_locking=nothing) + node = tf.TapeNode(apply_adagrad_da, [var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, lr_, l1_, l2_, global_step_], name=nothing, use_locking=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -40538,7 +40538,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function depthwise_conv2d_native_graph(input_, filter_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function depthwise_conv2d_native_graph(input_, filter_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) local desc tf.with_op_name(name, "DepthwiseConv2dNative") do desc = tf.NodeDescription("DepthwiseConv2dNative") @@ -40583,7 +40583,7 @@ begin desc["T"] = tf.data_type(input_) desc["T"] = tf.data_type(filter_) res = tf.execute(desc) - node = tf.TapeNode(depthwise_conv2d_native, [input_, filter_], name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) + node = tf.TapeNode(depthwise_conv2d_native, [input_, filter_], name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -40603,7 +40603,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function serialize_iterator_graph(resource_handle_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function serialize_iterator_graph(resource_handle_; name=nothing) local desc tf.with_op_name(name, "SerializeIterator") do desc = tf.NodeDescription("SerializeIterator") @@ -40617,7 +40617,7 @@ begin resource_handle_ = convert(tf.TensorHandle, resource_handle_) tf.add_input(desc, resource_handle_) res = tf.execute(desc) - node = tf.TapeNode(serialize_iterator, [resource_handle_], name=nothing) + node = tf.TapeNode(serialize_iterator, [resource_handle_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -40637,7 +40637,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function dataset_to_graph_graph(input_dataset_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function dataset_to_graph_graph(input_dataset_; name=nothing) local desc tf.with_op_name(name, "DatasetToGraph") do desc = tf.NodeDescription("DatasetToGraph") @@ -40651,7 +40651,7 @@ begin input_dataset_ = convert(tf.TensorHandle, input_dataset_) tf.add_input(desc, input_dataset_) res = tf.execute(desc) - node = tf.TapeNode(dataset_to_graph, [input_dataset_], name=nothing) + node = tf.TapeNode(dataset_to_graph, [input_dataset_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -40671,7 +40671,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function top_k_graph(input_; name=nothing, k=nothing, sorted=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function top_k_graph(input_; name=nothing, k=nothing, sorted=nothing) local desc tf.with_op_name(name, "TopK") do desc = tf.NodeDescription("TopK") @@ -40704,7 +40704,7 @@ begin end desc["T"] = tf.data_type(input_) res = tf.execute(desc) - node = tf.TapeNode(top_k, [input_], name=nothing, k=nothing, sorted=nothing) + node = tf.TapeNode(top_k, [input_], name=nothing, k=nothing, sorted=nothing, res) tf.add_node(res[1], node) return res end @@ -40724,7 +40724,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_apply_ftrl_v2_graph(var_, accum_, linear_, grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=nothing, use_locking=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_apply_ftrl_v2_graph(var_, accum_, linear_, grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ResourceApplyFtrlV2") do desc = tf.NodeDescription("ResourceApplyFtrlV2") @@ -40783,7 +40783,7 @@ begin desc["T"] = tf.data_type(l2_shrinkage_) desc["T"] = tf.data_type(lr_power_) res = tf.execute(desc) - node = tf.TapeNode(resource_apply_ftrl_v2, [var_, accum_, linear_, grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_], name=nothing, use_locking=nothing) + node = tf.TapeNode(resource_apply_ftrl_v2, [var_, accum_, linear_, grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_], name=nothing, use_locking=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -40803,7 +40803,7 @@ end Replacement node for NcclBroadcast. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _nccl_broadcast_recv_graph(shape_; name=nothing, num_devices=nothing, shared_name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _nccl_broadcast_recv_graph(shape_; name=nothing, num_devices=nothing, shared_name=nothing) local desc tf.with_op_name(name, "_NcclBroadcastRecv") do desc = tf.NodeDescription("_NcclBroadcastRecv") @@ -40829,7 +40829,7 @@ begin desc["shared_name"] = Base.String(shared_name) end res = tf.execute(desc) - node = tf.TapeNode(_nccl_broadcast_recv, [shape_], name=nothing, num_devices=nothing, shared_name=nothing) + node = tf.TapeNode(_nccl_broadcast_recv, [shape_], name=nothing, num_devices=nothing, shared_name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -40849,7 +40849,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function queue_is_closed_graph(handle_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function queue_is_closed_graph(handle_; name=nothing) local desc tf.with_op_name(name, "QueueIsClosed") do desc = tf.NodeDescription("QueueIsClosed") @@ -40863,7 +40863,7 @@ begin handle_ = convert(tf.TensorHandle, handle_) tf.add_input(desc, handle_) res = tf.execute(desc) - node = tf.TapeNode(queue_is_closed, [handle_], name=nothing) + node = tf.TapeNode(queue_is_closed, [handle_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -40883,7 +40883,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function shuffle_dataset_graph(input_dataset_, buffer_size_, seed_, seed2_; name=nothing, reshuffle_each_iteration=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function shuffle_dataset_graph(input_dataset_, buffer_size_, seed_, seed2_; name=nothing, reshuffle_each_iteration=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "ShuffleDataset") do desc = tf.NodeDescription("ShuffleDataset") @@ -40927,7 +40927,7 @@ begin desc["output_shapes"] = map(Base.identity, output_shapes) end res = tf.execute(desc) - node = tf.TapeNode(shuffle_dataset, [input_dataset_, buffer_size_, seed_, seed2_], name=nothing, reshuffle_each_iteration=nothing, output_types=nothing, output_shapes=nothing) + node = tf.TapeNode(shuffle_dataset, [input_dataset_, buffer_size_, seed_, seed2_], name=nothing, reshuffle_each_iteration=nothing, output_types=nothing, output_shapes=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -40947,7 +40947,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function deserialize_sparse_graph(serialized_sparse_; name=nothing, dtype=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function deserialize_sparse_graph(serialized_sparse_; name=nothing, dtype=nothing) local desc tf.with_op_name(name, "DeserializeSparse") do desc = tf.NodeDescription("DeserializeSparse") @@ -40974,7 +40974,7 @@ begin end desc["Tserialized"] = tf.data_type(serialized_sparse_) res = tf.execute(desc) - node = tf.TapeNode(deserialize_sparse, [serialized_sparse_], name=nothing, dtype=nothing) + node = tf.TapeNode(deserialize_sparse, [serialized_sparse_], name=nothing, dtype=nothing, res) tf.add_node(res[1], node) return res end @@ -40994,7 +40994,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function priority_queue_v2_graph(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function priority_queue_v2_graph(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "PriorityQueueV2") do desc = tf.NodeDescription("PriorityQueueV2") @@ -41034,7 +41034,7 @@ begin desc["shared_name"] = Base.String(shared_name) end res = tf.execute(desc) - node = tf.TapeNode(priority_queue_v2, [], name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) + node = tf.TapeNode(priority_queue_v2, [], name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -41054,7 +41054,7 @@ end A graph node which represents an argument to a function. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _device_arg_graph(; name=nothing, index=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _device_arg_graph(; name=nothing, index=nothing) local desc tf.with_op_name(name, "_DeviceArg") do desc = tf.NodeDescription("_DeviceArg") @@ -41070,7 +41070,7 @@ begin desc["index"] = Base.Int(index) end res = tf.execute(desc) - node = tf.TapeNode(_device_arg, [], name=nothing, index=nothing) + node = tf.TapeNode(_device_arg, [], name=nothing, index=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -41090,7 +41090,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function truncated_normal_graph(shape_; name=nothing, seed=nothing, seed2=nothing, dtype=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function truncated_normal_graph(shape_; name=nothing, seed=nothing, seed2=nothing, dtype=nothing) local desc tf.with_op_name(name, "TruncatedNormal") do desc = tf.NodeDescription("TruncatedNormal") @@ -41124,7 +41124,7 @@ begin end desc["T"] = tf.data_type(shape_) res = tf.execute(desc) - node = tf.TapeNode(truncated_normal, [shape_], name=nothing, seed=nothing, seed2=nothing, dtype=nothing) + node = tf.TapeNode(truncated_normal, [shape_], name=nothing, seed=nothing, seed2=nothing, dtype=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -41144,7 +41144,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_forest_tree_predict_graph(tree_handle_, dense_features_; name=nothing, logits_dimension=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_forest_tree_predict_graph(tree_handle_, dense_features_; name=nothing, logits_dimension=nothing) local desc tf.with_op_name(name, "TensorForestTreePredict") do desc = tf.NodeDescription("TensorForestTreePredict") @@ -41168,7 +41168,7 @@ begin desc["logits_dimension"] = Base.Int(logits_dimension) end res = tf.execute(desc) - node = tf.TapeNode(tensor_forest_tree_predict, [tree_handle_, dense_features_], name=nothing, logits_dimension=nothing) + node = tf.TapeNode(tensor_forest_tree_predict, [tree_handle_, dense_features_], name=nothing, logits_dimension=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -41188,7 +41188,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function stack_v2_graph(max_size_; name=nothing, elem_type=nothing, stack_name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function stack_v2_graph(max_size_; name=nothing, elem_type=nothing, stack_name=nothing) local desc tf.with_op_name(name, "StackV2") do desc = tf.NodeDescription("StackV2") @@ -41214,7 +41214,7 @@ begin desc["stack_name"] = Base.String(stack_name) end res = tf.execute(desc) - node = tf.TapeNode(stack_v2, [max_size_], name=nothing, elem_type=nothing, stack_name=nothing) + node = tf.TapeNode(stack_v2, [max_size_], name=nothing, elem_type=nothing, stack_name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -41234,7 +41234,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function accumulator_num_accumulated_graph(handle_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function accumulator_num_accumulated_graph(handle_; name=nothing) local desc tf.with_op_name(name, "AccumulatorNumAccumulated") do desc = tf.NodeDescription("AccumulatorNumAccumulated") @@ -41248,7 +41248,7 @@ begin handle_ = convert(tf.TensorHandle, handle_) tf.add_input(desc, handle_) res = tf.execute(desc) - node = tf.TapeNode(accumulator_num_accumulated, [handle_], name=nothing) + node = tf.TapeNode(accumulator_num_accumulated, [handle_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -41268,7 +41268,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function reader_reset_v2_graph(reader_handle_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function reader_reset_v2_graph(reader_handle_; name=nothing) local desc tf.with_op_name(name, "ReaderResetV2") do desc = tf.NodeDescription("ReaderResetV2") @@ -41282,7 +41282,7 @@ begin reader_handle_ = convert(tf.TensorHandle, reader_handle_) tf.add_input(desc, reader_handle_) res = tf.execute(desc) - node = tf.TapeNode(reader_reset_v2, [reader_handle_], name=nothing) + node = tf.TapeNode(reader_reset_v2, [reader_handle_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -41302,7 +41302,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function apply_add_sign_graph(var_, m_, lr_, alpha_, sign_decay_, beta_, grad_; name=nothing, use_locking=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function apply_add_sign_graph(var_, m_, lr_, alpha_, sign_decay_, beta_, grad_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ApplyAddSign") do desc = tf.NodeDescription("ApplyAddSign") @@ -41354,7 +41354,7 @@ begin desc["T"] = tf.data_type(beta_) desc["T"] = tf.data_type(grad_) res = tf.execute(desc) - node = tf.TapeNode(apply_add_sign, [var_, m_, lr_, alpha_, sign_decay_, beta_, grad_], name=nothing, use_locking=nothing) + node = tf.TapeNode(apply_add_sign, [var_, m_, lr_, alpha_, sign_decay_, beta_, grad_], name=nothing, use_locking=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -41374,7 +41374,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function retrieve_tpu_embedding_rms_prop_parameters_grad_accum_debug_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function retrieve_tpu_embedding_rms_prop_parameters_grad_accum_debug_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "RetrieveTPUEmbeddingRMSPropParametersGradAccumDebug") do desc = tf.NodeDescription("RetrieveTPUEmbeddingRMSPropParametersGradAccumDebug") @@ -41413,7 +41413,7 @@ begin desc["shard_id"] = Base.Int(shard_id) end res = tf.execute(desc) - node = tf.TapeNode(retrieve_tpu_embedding_rms_prop_parameters_grad_accum_debug, [], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + node = tf.TapeNode(retrieve_tpu_embedding_rms_prop_parameters_grad_accum_debug, [], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res) tf.add_node(res[1], node) return res end @@ -41433,7 +41433,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function rint_graph(x_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function rint_graph(x_; name=nothing) local desc tf.with_op_name(name, "Rint") do desc = tf.NodeDescription("Rint") @@ -41449,7 +41449,7 @@ begin tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) res = tf.execute(desc) - node = tf.TapeNode(rint, [x_], name=nothing) + node = tf.TapeNode(rint, [x_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -41469,7 +41469,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function retrieve_tpu_embedding_adadelta_parameters_grad_accum_debug_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function retrieve_tpu_embedding_adadelta_parameters_grad_accum_debug_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "RetrieveTPUEmbeddingAdadeltaParametersGradAccumDebug") do desc = tf.NodeDescription("RetrieveTPUEmbeddingAdadeltaParametersGradAccumDebug") @@ -41508,7 +41508,7 @@ begin desc["shard_id"] = Base.Int(shard_id) end res = tf.execute(desc) - node = tf.TapeNode(retrieve_tpu_embedding_adadelta_parameters_grad_accum_debug, [], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + node = tf.TapeNode(retrieve_tpu_embedding_adadelta_parameters_grad_accum_debug, [], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res) tf.add_node(res[1], node) return res end @@ -41528,7 +41528,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function extract_glimpse_graph(input_, size_, offsets_; name=nothing, centered=nothing, normalized=nothing, uniform_noise=nothing, noise=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function extract_glimpse_graph(input_, size_, offsets_; name=nothing, centered=nothing, normalized=nothing, uniform_noise=nothing, noise=nothing) local desc tf.with_op_name(name, "ExtractGlimpse") do desc = tf.NodeDescription("ExtractGlimpse") @@ -41574,7 +41574,7 @@ begin desc["noise"] = Base.String(noise) end res = tf.execute(desc) - node = tf.TapeNode(extract_glimpse, [input_, size_, offsets_], name=nothing, centered=nothing, normalized=nothing, uniform_noise=nothing, noise=nothing) + node = tf.TapeNode(extract_glimpse, [input_, size_, offsets_], name=nothing, centered=nothing, normalized=nothing, uniform_noise=nothing, noise=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -41594,7 +41594,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function string_to_hash_bucket_strong_graph(input_; name=nothing, num_buckets=nothing, key=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function string_to_hash_bucket_strong_graph(input_; name=nothing, num_buckets=nothing, key=nothing) local desc tf.with_op_name(name, "StringToHashBucketStrong") do desc = tf.NodeDescription("StringToHashBucketStrong") @@ -41620,7 +41620,7 @@ begin desc["key"] = map(Base.identity, key) end res = tf.execute(desc) - node = tf.TapeNode(string_to_hash_bucket_strong, [input_], name=nothing, num_buckets=nothing, key=nothing) + node = tf.TapeNode(string_to_hash_bucket_strong, [input_], name=nothing, num_buckets=nothing, key=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -41640,7 +41640,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function one_shot_iterator_graph(; name=nothing, dataset_factory=nothing, output_types=nothing, output_shapes=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function one_shot_iterator_graph(; name=nothing, dataset_factory=nothing, output_types=nothing, output_shapes=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "OneShotIterator") do desc = tf.NodeDescription("OneShotIterator") @@ -41680,7 +41680,7 @@ begin desc["shared_name"] = Base.String(shared_name) end res = tf.execute(desc) - node = tf.TapeNode(one_shot_iterator, [], name=nothing, dataset_factory=nothing, output_types=nothing, output_shapes=nothing, container=nothing, shared_name=nothing) + node = tf.TapeNode(one_shot_iterator, [], name=nothing, dataset_factory=nothing, output_types=nothing, output_shapes=nothing, container=nothing, shared_name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -41700,7 +41700,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_sparse_apply_momentum_graph(var_, accum_, lr_, grad_, indices_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_sparse_apply_momentum_graph(var_, accum_, lr_, grad_, indices_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) local desc tf.with_op_name(name, "ResourceSparseApplyMomentum") do desc = tf.NodeDescription("ResourceSparseApplyMomentum") @@ -41753,7 +41753,7 @@ begin desc["Tindices"] = tf.data_type(indices_) desc["T"] = tf.data_type(momentum_) res = tf.execute(desc) - node = tf.TapeNode(resource_sparse_apply_momentum, [var_, accum_, lr_, grad_, indices_, momentum_], name=nothing, use_locking=nothing, use_nesterov=nothing) + node = tf.TapeNode(resource_sparse_apply_momentum, [var_, accum_, lr_, grad_, indices_, momentum_], name=nothing, use_locking=nothing, use_nesterov=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -41773,7 +41773,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function save_slices_graph(filename_, tensor_names_, shapes_and_slices_, data_; name=nothing, T=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function save_slices_graph(filename_, tensor_names_, shapes_and_slices_, data_; name=nothing, T=nothing) local desc tf.with_op_name(name, "SaveSlices") do desc = tf.NodeDescription("SaveSlices") @@ -41805,7 +41805,7 @@ begin desc["T"] = map(Base.identity, T) end res = tf.execute(desc) - node = tf.TapeNode(save_slices, [filename_, tensor_names_, shapes_and_slices_, data_], name=nothing, T=nothing) + node = tf.TapeNode(save_slices, [filename_, tensor_names_, shapes_and_slices_, data_], name=nothing, T=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -41825,7 +41825,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_dataset_cardinality_graph(input_dataset_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_dataset_cardinality_graph(input_dataset_; name=nothing) local desc tf.with_op_name(name, "ExperimentalDatasetCardinality") do desc = tf.NodeDescription("ExperimentalDatasetCardinality") @@ -41839,7 +41839,7 @@ begin input_dataset_ = convert(tf.TensorHandle, input_dataset_) tf.add_input(desc, input_dataset_) res = tf.execute(desc) - node = tf.TapeNode(experimental_dataset_cardinality, [input_dataset_], name=nothing) + node = tf.TapeNode(experimental_dataset_cardinality, [input_dataset_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -41859,7 +41859,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function is_finite_graph(x_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function is_finite_graph(x_; name=nothing) local desc tf.with_op_name(name, "IsFinite") do desc = tf.NodeDescription("IsFinite") @@ -41875,7 +41875,7 @@ begin tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) res = tf.execute(desc) - node = tf.TapeNode(is_finite, [x_], name=nothing) + node = tf.TapeNode(is_finite, [x_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -41895,7 +41895,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_numa_map_and_batch_dataset_graph(input_dataset_, other_arguments_, batch_size_, num_parallel_calls_, drop_remainder_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, preserve_cardinality=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_numa_map_and_batch_dataset_graph(input_dataset_, other_arguments_, batch_size_, num_parallel_calls_, drop_remainder_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, preserve_cardinality=nothing) local desc tf.with_op_name(name, "ExperimentalNumaMapAndBatchDataset") do desc = tf.NodeDescription("ExperimentalNumaMapAndBatchDataset") @@ -41955,7 +41955,7 @@ begin desc["preserve_cardinality"] = Base.Bool(preserve_cardinality) end res = tf.execute(desc) - node = tf.TapeNode(experimental_numa_map_and_batch_dataset, [input_dataset_, other_arguments_, batch_size_, num_parallel_calls_, drop_remainder_], name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, preserve_cardinality=nothing) + node = tf.TapeNode(experimental_numa_map_and_batch_dataset, [input_dataset_, other_arguments_, batch_size_, num_parallel_calls_, drop_remainder_], name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, preserve_cardinality=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -41975,7 +41975,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function all_to_all_graph(input_, group_assignment_; name=nothing, concat_dimension=nothing, split_dimension=nothing, split_count=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function all_to_all_graph(input_, group_assignment_; name=nothing, concat_dimension=nothing, split_dimension=nothing, split_count=nothing) local desc tf.with_op_name(name, "AllToAll") do desc = tf.NodeDescription("AllToAll") @@ -42013,7 +42013,7 @@ begin end desc["T"] = tf.data_type(input_) res = tf.execute(desc) - node = tf.TapeNode(all_to_all, [input_, group_assignment_], name=nothing, concat_dimension=nothing, split_dimension=nothing, split_count=nothing) + node = tf.TapeNode(all_to_all, [input_, group_assignment_], name=nothing, concat_dimension=nothing, split_dimension=nothing, split_count=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -42033,7 +42033,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function take_many_sparse_from_tensors_map_graph(sparse_handles_; name=nothing, dtype=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function take_many_sparse_from_tensors_map_graph(sparse_handles_; name=nothing, dtype=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "TakeManySparseFromTensorsMap") do desc = tf.NodeDescription("TakeManySparseFromTensorsMap") @@ -42070,7 +42070,7 @@ begin desc["shared_name"] = Base.String(shared_name) end res = tf.execute(desc) - node = tf.TapeNode(take_many_sparse_from_tensors_map, [sparse_handles_], name=nothing, dtype=nothing, container=nothing, shared_name=nothing) + node = tf.TapeNode(take_many_sparse_from_tensors_map, [sparse_handles_], name=nothing, dtype=nothing, container=nothing, shared_name=nothing, res) tf.add_node(res[1], node) return res end @@ -42090,7 +42090,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_matrix_diag_part_graph(input_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_matrix_diag_part_graph(input_; name=nothing) local desc tf.with_op_name(name, "BatchMatrixDiagPart") do desc = tf.NodeDescription("BatchMatrixDiagPart") @@ -42106,7 +42106,7 @@ begin tf.add_input(desc, input_) desc["T"] = tf.data_type(input_) res = tf.execute(desc) - node = tf.TapeNode(batch_matrix_diag_part, [input_], name=nothing) + node = tf.TapeNode(batch_matrix_diag_part, [input_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -42126,7 +42126,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fixed_length_record_dataset_graph(filenames_, header_bytes_, record_bytes_, footer_bytes_, buffer_size_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fixed_length_record_dataset_graph(filenames_, header_bytes_, record_bytes_, footer_bytes_, buffer_size_; name=nothing) local desc tf.with_op_name(name, "FixedLengthRecordDataset") do desc = tf.NodeDescription("FixedLengthRecordDataset") @@ -42156,7 +42156,7 @@ begin tf.add_input(desc, footer_bytes_) tf.add_input(desc, buffer_size_) res = tf.execute(desc) - node = tf.TapeNode(fixed_length_record_dataset, [filenames_, header_bytes_, record_bytes_, footer_bytes_, buffer_size_], name=nothing) + node = tf.TapeNode(fixed_length_record_dataset, [filenames_, header_bytes_, record_bytes_, footer_bytes_, buffer_size_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -42176,7 +42176,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function stack_push_graph(handle_, elem_; name=nothing, swap_memory=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function stack_push_graph(handle_, elem_; name=nothing, swap_memory=nothing) local desc tf.with_op_name(name, "StackPush") do desc = tf.NodeDescription("StackPush") @@ -42202,7 +42202,7 @@ begin end desc["T"] = tf.data_type(elem_) res = tf.execute(desc) - node = tf.TapeNode(stack_push, [handle_, elem_], name=nothing, swap_memory=nothing) + node = tf.TapeNode(stack_push, [handle_, elem_], name=nothing, swap_memory=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -42222,7 +42222,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function placeholder_v2_graph(; name=nothing, dtype=nothing, shape=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function placeholder_v2_graph(; name=nothing, dtype=nothing, shape=nothing) local desc tf.with_op_name(name, "PlaceholderV2") do desc = tf.NodeDescription("PlaceholderV2") @@ -42244,7 +42244,7 @@ begin desc["shape"] = Base.identity(shape) end res = tf.execute(desc) - node = tf.TapeNode(placeholder_v2, [], name=nothing, dtype=nothing, shape=nothing) + node = tf.TapeNode(placeholder_v2, [], name=nothing, dtype=nothing, shape=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -42264,7 +42264,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function multi_device_iterator_init_graph(dataset_, multi_device_iterator_, max_buffer_size_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function multi_device_iterator_init_graph(dataset_, multi_device_iterator_, max_buffer_size_; name=nothing) local desc tf.with_op_name(name, "MultiDeviceIteratorInit") do desc = tf.NodeDescription("MultiDeviceIteratorInit") @@ -42286,7 +42286,7 @@ begin tf.add_input(desc, multi_device_iterator_) tf.add_input(desc, max_buffer_size_) res = tf.execute(desc) - node = tf.TapeNode(multi_device_iterator_init, [dataset_, multi_device_iterator_, max_buffer_size_], name=nothing) + node = tf.TapeNode(multi_device_iterator_init, [dataset_, multi_device_iterator_, max_buffer_size_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -42306,7 +42306,7 @@ end Re-configures the GCS block cache with the new configuration values. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function gcs_configure_block_cache_graph(max_cache_size_, block_size_, max_staleness_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function gcs_configure_block_cache_graph(max_cache_size_, block_size_, max_staleness_; name=nothing) local desc tf.with_op_name(name, "GcsConfigureBlockCache") do desc = tf.NodeDescription("GcsConfigureBlockCache") @@ -42328,7 +42328,7 @@ begin tf.add_input(desc, block_size_) tf.add_input(desc, max_staleness_) res = tf.execute(desc) - node = tf.TapeNode(gcs_configure_block_cache, [max_cache_size_, block_size_, max_staleness_], name=nothing) + node = tf.TapeNode(gcs_configure_block_cache, [max_cache_size_, block_size_, max_staleness_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -42348,7 +42348,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function queue_dequeue_v2_graph(handle_; name=nothing, component_types=nothing, timeout_ms=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function queue_dequeue_v2_graph(handle_; name=nothing, component_types=nothing, timeout_ms=nothing) local desc tf.with_op_name(name, "QueueDequeueV2") do desc = tf.NodeDescription("QueueDequeueV2") @@ -42374,7 +42374,7 @@ begin desc["timeout_ms"] = Base.Int(timeout_ms) end res = tf.execute(desc) - node = tf.TapeNode(queue_dequeue_v2, [handle_], name=nothing, component_types=nothing, timeout_ms=nothing) + node = tf.TapeNode(queue_dequeue_v2, [handle_], name=nothing, component_types=nothing, timeout_ms=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -42394,7 +42394,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function retrieve_tpu_embedding_rms_prop_parameters_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function retrieve_tpu_embedding_rms_prop_parameters_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "RetrieveTPUEmbeddingRMSPropParameters") do desc = tf.NodeDescription("RetrieveTPUEmbeddingRMSPropParameters") @@ -42433,7 +42433,7 @@ begin desc["shard_id"] = Base.Int(shard_id) end res = tf.execute(desc) - node = tf.TapeNode(retrieve_tpu_embedding_rms_prop_parameters, [], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + node = tf.TapeNode(retrieve_tpu_embedding_rms_prop_parameters, [], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res) tf.add_node(res[1], node) return res end @@ -42453,7 +42453,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function transpose_graph(x_, perm_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function transpose_graph(x_, perm_; name=nothing) local desc tf.with_op_name(name, "Transpose") do desc = tf.NodeDescription("Transpose") @@ -42475,7 +42475,7 @@ begin desc["T"] = tf.data_type(x_) desc["Tperm"] = tf.data_type(perm_) res = tf.execute(desc) - node = tf.TapeNode(transpose, [x_, perm_], name=nothing) + node = tf.TapeNode(transpose, [x_, perm_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -42495,7 +42495,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ifft_graph(input_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ifft_graph(input_; name=nothing) local desc tf.with_op_name(name, "IFFT") do desc = tf.NodeDescription("IFFT") @@ -42511,7 +42511,7 @@ begin tf.add_input(desc, input_) desc["Tcomplex"] = tf.data_type(input_) res = tf.execute(desc) - node = tf.TapeNode(ifft, [input_], name=nothing) + node = tf.TapeNode(ifft, [input_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -42531,7 +42531,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_segment_sum_with_num_segments_graph(data_, indices_, segment_ids_, num_segments_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_segment_sum_with_num_segments_graph(data_, indices_, segment_ids_, num_segments_; name=nothing) local desc tf.with_op_name(name, "SparseSegmentSumWithNumSegments") do desc = tf.NodeDescription("SparseSegmentSumWithNumSegments") @@ -42564,7 +42564,7 @@ begin desc["Tidx"] = tf.data_type(indices_) desc["Tnumsegments"] = tf.data_type(num_segments_) res = tf.execute(desc) - node = tf.TapeNode(sparse_segment_sum_with_num_segments, [data_, indices_, segment_ids_, num_segments_], name=nothing) + node = tf.TapeNode(sparse_segment_sum_with_num_segments, [data_, indices_, segment_ids_, num_segments_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -42584,7 +42584,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function queue_is_closed_v2_graph(handle_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function queue_is_closed_v2_graph(handle_; name=nothing) local desc tf.with_op_name(name, "QueueIsClosedV2") do desc = tf.NodeDescription("QueueIsClosedV2") @@ -42598,7 +42598,7 @@ begin handle_ = convert(tf.TensorHandle, handle_) tf.add_input(desc, handle_) res = tf.execute(desc) - node = tf.TapeNode(queue_is_closed_v2, [handle_], name=nothing) + node = tf.TapeNode(queue_is_closed_v2, [handle_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -42618,7 +42618,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function parameterized_truncated_normal_graph(shape_, means_, stdevs_, minvals_, maxvals_; name=nothing, seed=nothing, seed2=nothing, dtype=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function parameterized_truncated_normal_graph(shape_, means_, stdevs_, minvals_, maxvals_; name=nothing, seed=nothing, seed2=nothing, dtype=nothing) local desc tf.with_op_name(name, "ParameterizedTruncatedNormal") do desc = tf.NodeDescription("ParameterizedTruncatedNormal") @@ -42673,7 +42673,7 @@ begin desc["dtype"] = tf.data_type(minvals_) desc["dtype"] = tf.data_type(maxvals_) res = tf.execute(desc) - node = tf.TapeNode(parameterized_truncated_normal, [shape_, means_, stdevs_, minvals_, maxvals_], name=nothing, seed=nothing, seed2=nothing, dtype=nothing) + node = tf.TapeNode(parameterized_truncated_normal, [shape_, means_, stdevs_, minvals_, maxvals_], name=nothing, seed=nothing, seed2=nothing, dtype=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -42693,7 +42693,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function diag_part_graph(input_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function diag_part_graph(input_; name=nothing) local desc tf.with_op_name(name, "DiagPart") do desc = tf.NodeDescription("DiagPart") @@ -42709,7 +42709,7 @@ begin tf.add_input(desc, input_) desc["T"] = tf.data_type(input_) res = tf.execute(desc) - node = tf.TapeNode(diag_part, [input_], name=nothing) + node = tf.TapeNode(diag_part, [input_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -42729,7 +42729,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function kmeans_plus_plus_initialization_graph(points_, num_to_sample_, seed_, num_retries_per_sample_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function kmeans_plus_plus_initialization_graph(points_, num_to_sample_, seed_, num_retries_per_sample_; name=nothing) local desc tf.with_op_name(name, "KmeansPlusPlusInitialization") do desc = tf.NodeDescription("KmeansPlusPlusInitialization") @@ -42755,7 +42755,7 @@ begin tf.add_input(desc, seed_) tf.add_input(desc, num_retries_per_sample_) res = tf.execute(desc) - node = tf.TapeNode(kmeans_plus_plus_initialization, [points_, num_to_sample_, seed_, num_retries_per_sample_], name=nothing) + node = tf.TapeNode(kmeans_plus_plus_initialization, [points_, num_to_sample_, seed_, num_retries_per_sample_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -42775,7 +42775,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function regex_replace_graph(input_, pattern_, rewrite_; name=nothing, replace_global=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function regex_replace_graph(input_, pattern_, rewrite_; name=nothing, replace_global=nothing) local desc tf.with_op_name(name, "RegexReplace") do desc = tf.NodeDescription("RegexReplace") @@ -42803,7 +42803,7 @@ begin desc["replace_global"] = Base.Bool(replace_global) end res = tf.execute(desc) - node = tf.TapeNode(regex_replace, [input_, pattern_, rewrite_], name=nothing, replace_global=nothing) + node = tf.TapeNode(regex_replace, [input_, pattern_, rewrite_], name=nothing, replace_global=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -42823,7 +42823,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_tensor_dense_mat_mul_graph(a_indices_, a_values_, a_shape_, b_; name=nothing, adjoint_a=nothing, adjoint_b=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_tensor_dense_mat_mul_graph(a_indices_, a_values_, a_shape_, b_; name=nothing, adjoint_a=nothing, adjoint_b=nothing) local desc tf.with_op_name(name, "SparseTensorDenseMatMul") do desc = tf.NodeDescription("SparseTensorDenseMatMul") @@ -42867,7 +42867,7 @@ begin desc["T"] = tf.data_type(a_values_) desc["T"] = tf.data_type(b_) res = tf.execute(desc) - node = tf.TapeNode(sparse_tensor_dense_mat_mul, [a_indices_, a_values_, a_shape_, b_], name=nothing, adjoint_a=nothing, adjoint_b=nothing) + node = tf.TapeNode(sparse_tensor_dense_mat_mul, [a_indices_, a_values_, a_shape_, b_], name=nothing, adjoint_a=nothing, adjoint_b=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -42887,7 +42887,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function map_defun_graph(arguments_, captured_inputs_; name=nothing, Targuments=nothing, Tcaptured=nothing, output_types=nothing, output_shapes=nothing, f=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function map_defun_graph(arguments_, captured_inputs_; name=nothing, Targuments=nothing, Tcaptured=nothing, output_types=nothing, output_shapes=nothing, f=nothing) local desc tf.with_op_name(name, "MapDefun") do desc = tf.NodeDescription("MapDefun") @@ -42935,7 +42935,7 @@ begin desc["f"] = Base.identity(f) end res = tf.execute(desc) - node = tf.TapeNode(map_defun, [arguments_, captured_inputs_], name=nothing, Targuments=nothing, Tcaptured=nothing, output_types=nothing, output_shapes=nothing, f=nothing) + node = tf.TapeNode(map_defun, [arguments_, captured_inputs_], name=nothing, Targuments=nothing, Tcaptured=nothing, output_types=nothing, output_shapes=nothing, f=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -42955,7 +42955,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function thread_unsafe_unigram_candidate_sampler_graph(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function thread_unsafe_unigram_candidate_sampler_graph(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing) local desc tf.with_op_name(name, "ThreadUnsafeUnigramCandidateSampler") do desc = tf.NodeDescription("ThreadUnsafeUnigramCandidateSampler") @@ -43010,7 +43010,7 @@ begin desc["seed2"] = Base.Int(seed2) end res = tf.execute(desc) - node = tf.TapeNode(thread_unsafe_unigram_candidate_sampler, [true_classes_], name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing) + node = tf.TapeNode(thread_unsafe_unigram_candidate_sampler, [true_classes_], name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing, res) tf.add_node(res[1], node) return res end @@ -43030,7 +43030,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function retrieve_tpu_embedding_adam_parameters_grad_accum_debug_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function retrieve_tpu_embedding_adam_parameters_grad_accum_debug_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "RetrieveTPUEmbeddingADAMParametersGradAccumDebug") do desc = tf.NodeDescription("RetrieveTPUEmbeddingADAMParametersGradAccumDebug") @@ -43069,7 +43069,7 @@ begin desc["shard_id"] = Base.Int(shard_id) end res = tf.execute(desc) - node = tf.TapeNode(retrieve_tpu_embedding_adam_parameters_grad_accum_debug, [], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + node = tf.TapeNode(retrieve_tpu_embedding_adam_parameters_grad_accum_debug, [], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res) tf.add_node(res[1], node) return res end @@ -43089,7 +43089,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function parallel_concat_graph(values_; name=nothing, N=nothing, shape=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function parallel_concat_graph(values_; name=nothing, N=nothing, shape=nothing) local desc tf.with_op_name(name, "ParallelConcat") do desc = tf.NodeDescription("ParallelConcat") @@ -43117,7 +43117,7 @@ begin end desc["T"] = tf.data_type(values_) res = tf.execute(desc) - node = tf.TapeNode(parallel_concat, [values_], name=nothing, N=nothing, shape=nothing) + node = tf.TapeNode(parallel_concat, [values_], name=nothing, N=nothing, shape=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -43137,7 +43137,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function lookup_table_find_v2_graph(table_handle_, keys_, default_value_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function lookup_table_find_v2_graph(table_handle_, keys_, default_value_; name=nothing) local desc tf.with_op_name(name, "LookupTableFindV2") do desc = tf.NodeDescription("LookupTableFindV2") @@ -43163,7 +43163,7 @@ begin desc["Tin"] = tf.data_type(keys_) desc["Tout"] = tf.data_type(default_value_) res = tf.execute(desc) - node = tf.TapeNode(lookup_table_find_v2, [table_handle_, keys_, default_value_], name=nothing) + node = tf.TapeNode(lookup_table_find_v2, [table_handle_, keys_, default_value_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -43183,7 +43183,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_forest_tree_deserialize_graph(tree_handle_, tree_config_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_forest_tree_deserialize_graph(tree_handle_, tree_config_; name=nothing) local desc tf.with_op_name(name, "TensorForestTreeDeserialize") do desc = tf.NodeDescription("TensorForestTreeDeserialize") @@ -43201,7 +43201,7 @@ begin tf.add_input(desc, tree_handle_) tf.add_input(desc, tree_config_) res = tf.execute(desc) - node = tf.TapeNode(tensor_forest_tree_deserialize, [tree_handle_, tree_config_], name=nothing) + node = tf.TapeNode(tensor_forest_tree_deserialize, [tree_handle_, tree_config_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -43221,7 +43221,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function retrieve_tpu_embedding_momentum_parameters_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function retrieve_tpu_embedding_momentum_parameters_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "RetrieveTPUEmbeddingMomentumParameters") do desc = tf.NodeDescription("RetrieveTPUEmbeddingMomentumParameters") @@ -43260,7 +43260,7 @@ begin desc["shard_id"] = Base.Int(shard_id) end res = tf.execute(desc) - node = tf.TapeNode(retrieve_tpu_embedding_momentum_parameters, [], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + node = tf.TapeNode(retrieve_tpu_embedding_momentum_parameters, [], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res) tf.add_node(res[1], node) return res end @@ -43280,7 +43280,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fake_quant_with_min_max_args_graph(inputs_; name=nothing, min=nothing, max=nothing, num_bits=nothing, narrow_range=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fake_quant_with_min_max_args_graph(inputs_; name=nothing, min=nothing, max=nothing, num_bits=nothing, narrow_range=nothing) local desc tf.with_op_name(name, "FakeQuantWithMinMaxArgs") do desc = tf.NodeDescription("FakeQuantWithMinMaxArgs") @@ -43318,7 +43318,7 @@ begin desc["narrow_range"] = Base.Bool(narrow_range) end res = tf.execute(desc) - node = tf.TapeNode(fake_quant_with_min_max_args, [inputs_], name=nothing, min=nothing, max=nothing, num_bits=nothing, narrow_range=nothing) + node = tf.TapeNode(fake_quant_with_min_max_args, [inputs_], name=nothing, min=nothing, max=nothing, num_bits=nothing, narrow_range=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -43338,7 +43338,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_apply_gradient_descent_graph(var_, alpha_, delta_; name=nothing, use_locking=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_apply_gradient_descent_graph(var_, alpha_, delta_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ResourceApplyGradientDescent") do desc = tf.NodeDescription("ResourceApplyGradientDescent") @@ -43369,7 +43369,7 @@ begin desc["T"] = tf.data_type(alpha_) desc["T"] = tf.data_type(delta_) res = tf.execute(desc) - node = tf.TapeNode(resource_apply_gradient_descent, [var_, alpha_, delta_], name=nothing, use_locking=nothing) + node = tf.TapeNode(resource_apply_gradient_descent, [var_, alpha_, delta_], name=nothing, use_locking=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -43389,7 +43389,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_sliding_window_dataset_graph(input_dataset_, window_size_, window_shift_, window_stride_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_sliding_window_dataset_graph(input_dataset_, window_size_, window_shift_, window_stride_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "ExperimentalSlidingWindowDataset") do desc = tf.NodeDescription("ExperimentalSlidingWindowDataset") @@ -43427,7 +43427,7 @@ begin desc["output_shapes"] = map(Base.identity, output_shapes) end res = tf.execute(desc) - node = tf.TapeNode(experimental_sliding_window_dataset, [input_dataset_, window_size_, window_shift_, window_stride_], name=nothing, output_types=nothing, output_shapes=nothing) + node = tf.TapeNode(experimental_sliding_window_dataset, [input_dataset_, window_size_, window_shift_, window_stride_], name=nothing, output_types=nothing, output_shapes=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -43447,7 +43447,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function decode_raw_graph(bytes_; name=nothing, out_type=nothing, little_endian=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function decode_raw_graph(bytes_; name=nothing, out_type=nothing, little_endian=nothing) local desc tf.with_op_name(name, "DecodeRaw") do desc = tf.NodeDescription("DecodeRaw") @@ -43473,7 +43473,7 @@ begin desc["little_endian"] = Base.Bool(little_endian) end res = tf.execute(desc) - node = tf.TapeNode(decode_raw, [bytes_], name=nothing, out_type=nothing, little_endian=nothing) + node = tf.TapeNode(decode_raw, [bytes_], name=nothing, out_type=nothing, little_endian=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -43493,7 +43493,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fake_quant_with_min_max_vars_per_channel_gradient_graph(gradients_, inputs_, min_, max_; name=nothing, num_bits=nothing, narrow_range=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fake_quant_with_min_max_vars_per_channel_gradient_graph(gradients_, inputs_, min_, max_; name=nothing, num_bits=nothing, narrow_range=nothing) local desc tf.with_op_name(name, "FakeQuantWithMinMaxVarsPerChannelGradient") do desc = tf.NodeDescription("FakeQuantWithMinMaxVarsPerChannelGradient") @@ -43536,7 +43536,7 @@ begin desc["narrow_range"] = Base.Bool(narrow_range) end res = tf.execute(desc) - node = tf.TapeNode(fake_quant_with_min_max_vars_per_channel_gradient, [gradients_, inputs_, min_, max_], name=nothing, num_bits=nothing, narrow_range=nothing) + node = tf.TapeNode(fake_quant_with_min_max_vars_per_channel_gradient, [gradients_, inputs_, min_, max_], name=nothing, num_bits=nothing, narrow_range=nothing, res) tf.add_node(res[1], node) return res end @@ -43556,7 +43556,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function unique_with_counts_v2_graph(x_, axis_; name=nothing, out_idx=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function unique_with_counts_v2_graph(x_, axis_; name=nothing, out_idx=nothing) local desc tf.with_op_name(name, "UniqueWithCountsV2") do desc = tf.NodeDescription("UniqueWithCountsV2") @@ -43589,7 +43589,7 @@ begin desc["T"] = tf.data_type(x_) desc["Taxis"] = tf.data_type(axis_) res = tf.execute(desc) - node = tf.TapeNode(unique_with_counts_v2, [x_, axis_], name=nothing, out_idx=nothing) + node = tf.TapeNode(unique_with_counts_v2, [x_, axis_], name=nothing, out_idx=nothing, res) tf.add_node(res[1], node) return res end @@ -43609,7 +43609,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_sleep_dataset_graph(input_dataset_, sleep_microseconds_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_sleep_dataset_graph(input_dataset_, sleep_microseconds_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "ExperimentalSleepDataset") do desc = tf.NodeDescription("ExperimentalSleepDataset") @@ -43639,7 +43639,7 @@ begin desc["output_shapes"] = map(Base.identity, output_shapes) end res = tf.execute(desc) - node = tf.TapeNode(experimental_sleep_dataset, [input_dataset_, sleep_microseconds_], name=nothing, output_types=nothing, output_shapes=nothing) + node = tf.TapeNode(experimental_sleep_dataset, [input_dataset_, sleep_microseconds_], name=nothing, output_types=nothing, output_shapes=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -43659,7 +43659,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tpu_replicated_output_graph(input_; name=nothing, num_replicas=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tpu_replicated_output_graph(input_; name=nothing, num_replicas=nothing) local desc tf.with_op_name(name, "TPUReplicatedOutput") do desc = tf.NodeDescription("TPUReplicatedOutput") @@ -43686,7 +43686,7 @@ begin end desc["T"] = tf.data_type(input_) res = tf.execute(desc) - node = tf.TapeNode(tpu_replicated_output, [input_], name=nothing, num_replicas=nothing) + node = tf.TapeNode(tpu_replicated_output, [input_], name=nothing, num_replicas=nothing, res) tf.add_node(res[1], node) return res end @@ -43706,7 +43706,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function lower_bound_graph(sorted_inputs_, values_; name=nothing, out_type=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function lower_bound_graph(sorted_inputs_, values_; name=nothing, out_type=nothing) local desc tf.with_op_name(name, "LowerBound") do desc = tf.NodeDescription("LowerBound") @@ -43733,7 +43733,7 @@ begin desc["T"] = tf.data_type(sorted_inputs_) desc["T"] = tf.data_type(values_) res = tf.execute(desc) - node = tf.TapeNode(lower_bound, [sorted_inputs_, values_], name=nothing, out_type=nothing) + node = tf.TapeNode(lower_bound, [sorted_inputs_, values_], name=nothing, out_type=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -43753,7 +43753,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tan_graph(x_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tan_graph(x_; name=nothing) local desc tf.with_op_name(name, "Tan") do desc = tf.NodeDescription("Tan") @@ -43769,7 +43769,7 @@ begin tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) res = tf.execute(desc) - node = tf.TapeNode(tan, [x_], name=nothing) + node = tf.TapeNode(tan, [x_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -43789,7 +43789,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function enter_graph(data_; name=nothing, frame_name=nothing, is_constant=nothing, parallel_iterations=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function enter_graph(data_; name=nothing, frame_name=nothing, is_constant=nothing, parallel_iterations=nothing) local desc tf.with_op_name(name, "Enter") do desc = tf.NodeDescription("Enter") @@ -43823,7 +43823,7 @@ begin end desc["T"] = tf.data_type(data_) res = tf.execute(desc) - node = tf.TapeNode(enter, [data_], name=nothing, frame_name=nothing, is_constant=nothing, parallel_iterations=nothing) + node = tf.TapeNode(enter, [data_], name=nothing, frame_name=nothing, is_constant=nothing, parallel_iterations=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -43843,7 +43843,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function infeed_enqueue_tuple_graph(inputs_; name=nothing, dtypes=nothing, shapes=nothing, layouts=nothing, device_ordinal=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function infeed_enqueue_tuple_graph(inputs_; name=nothing, dtypes=nothing, shapes=nothing, layouts=nothing, device_ordinal=nothing) local desc tf.with_op_name(name, "InfeedEnqueueTuple") do desc = tf.NodeDescription("InfeedEnqueueTuple") @@ -43881,7 +43881,7 @@ begin desc["device_ordinal"] = Base.Int(device_ordinal) end res = tf.execute(desc) - node = tf.TapeNode(infeed_enqueue_tuple, [inputs_], name=nothing, dtypes=nothing, shapes=nothing, layouts=nothing, device_ordinal=nothing) + node = tf.TapeNode(infeed_enqueue_tuple, [inputs_], name=nothing, dtypes=nothing, shapes=nothing, layouts=nothing, device_ordinal=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -43901,7 +43901,7 @@ end An op that informs a host of the global ids of all the of TPUs in the """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _set_global_tpu_array_graph(topology_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _set_global_tpu_array_graph(topology_; name=nothing) local desc tf.with_op_name(name, "_SetGlobalTPUArray") do desc = tf.NodeDescription("_SetGlobalTPUArray") @@ -43915,7 +43915,7 @@ begin topology_ = convert(tf.TensorHandle, topology_) tf.add_input(desc, topology_) res = tf.execute(desc) - node = tf.TapeNode(_set_global_tpu_array, [topology_], name=nothing) + node = tf.TapeNode(_set_global_tpu_array, [topology_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -43935,7 +43935,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function square_graph(x_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function square_graph(x_; name=nothing) local desc tf.with_op_name(name, "Square") do desc = tf.NodeDescription("Square") @@ -43951,7 +43951,7 @@ begin tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) res = tf.execute(desc) - node = tf.TapeNode(square, [x_], name=nothing) + node = tf.TapeNode(square, [x_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -43971,7 +43971,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function debug_gradient_ref_identity_graph(input_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function debug_gradient_ref_identity_graph(input_; name=nothing) local desc tf.with_op_name(name, "DebugGradientRefIdentity") do desc = tf.NodeDescription("DebugGradientRefIdentity") @@ -43987,7 +43987,7 @@ begin tf.add_input(desc, input_) desc["T"] = tf.data_type(input_) res = tf.execute(desc) - node = tf.TapeNode(debug_gradient_ref_identity, [input_], name=nothing) + node = tf.TapeNode(debug_gradient_ref_identity, [input_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -44007,7 +44007,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function apply_adadelta_graph(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_; name=nothing, use_locking=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function apply_adadelta_graph(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ApplyAdadelta") do desc = tf.NodeDescription("ApplyAdadelta") @@ -44059,7 +44059,7 @@ begin desc["T"] = tf.data_type(epsilon_) desc["T"] = tf.data_type(grad_) res = tf.execute(desc) - node = tf.TapeNode(apply_adadelta, [var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_], name=nothing, use_locking=nothing) + node = tf.TapeNode(apply_adadelta, [var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_], name=nothing, use_locking=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -44079,7 +44079,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_group_by_window_dataset_graph(input_dataset_, key_func_other_arguments_, reduce_func_other_arguments_, window_size_func_other_arguments_; name=nothing, key_func=nothing, reduce_func=nothing, window_size_func=nothing, Tkey_func_other_arguments=nothing, Treduce_func_other_arguments=nothing, Twindow_size_func_other_arguments=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_group_by_window_dataset_graph(input_dataset_, key_func_other_arguments_, reduce_func_other_arguments_, window_size_func_other_arguments_; name=nothing, key_func=nothing, reduce_func=nothing, window_size_func=nothing, Tkey_func_other_arguments=nothing, Treduce_func_other_arguments=nothing, Twindow_size_func_other_arguments=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "ExperimentalGroupByWindowDataset") do desc = tf.NodeDescription("ExperimentalGroupByWindowDataset") @@ -44153,7 +44153,7 @@ begin desc["output_shapes"] = map(Base.identity, output_shapes) end res = tf.execute(desc) - node = tf.TapeNode(experimental_group_by_window_dataset, [input_dataset_, key_func_other_arguments_, reduce_func_other_arguments_, window_size_func_other_arguments_], name=nothing, key_func=nothing, reduce_func=nothing, window_size_func=nothing, Tkey_func_other_arguments=nothing, Treduce_func_other_arguments=nothing, Twindow_size_func_other_arguments=nothing, output_types=nothing, output_shapes=nothing) + node = tf.TapeNode(experimental_group_by_window_dataset, [input_dataset_, key_func_other_arguments_, reduce_func_other_arguments_, window_size_func_other_arguments_], name=nothing, key_func=nothing, reduce_func=nothing, window_size_func=nothing, Tkey_func_other_arguments=nothing, Treduce_func_other_arguments=nothing, Twindow_size_func_other_arguments=nothing, output_types=nothing, output_shapes=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -44173,7 +44173,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function audio_summary_graph(tag_, tensor_; name=nothing, sample_rate=nothing, max_outputs=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function audio_summary_graph(tag_, tensor_; name=nothing, sample_rate=nothing, max_outputs=nothing) local desc tf.with_op_name(name, "AudioSummary") do desc = tf.NodeDescription("AudioSummary") @@ -44203,7 +44203,7 @@ begin desc["max_outputs"] = Base.Int(max_outputs) end res = tf.execute(desc) - node = tf.TapeNode(audio_summary, [tag_, tensor_], name=nothing, sample_rate=nothing, max_outputs=nothing) + node = tf.TapeNode(audio_summary, [tag_, tensor_], name=nothing, sample_rate=nothing, max_outputs=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -44223,7 +44223,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function squared_difference_graph(x_, y_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function squared_difference_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "SquaredDifference") do desc = tf.NodeDescription("SquaredDifference") @@ -44244,7 +44244,7 @@ begin desc["T"] = tf.data_type(x_) desc["T"] = tf.data_type(y_) res = tf.execute(desc) - node = tf.TapeNode(squared_difference, [x_, y_], name=nothing) + node = tf.TapeNode(squared_difference, [x_, y_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -44264,7 +44264,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_take_while_dataset_graph(input_dataset_, other_arguments_; name=nothing, predicate=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_take_while_dataset_graph(input_dataset_, other_arguments_; name=nothing, predicate=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "ExperimentalTakeWhileDataset") do desc = tf.NodeDescription("ExperimentalTakeWhileDataset") @@ -44306,7 +44306,7 @@ begin desc["output_shapes"] = map(Base.identity, output_shapes) end res = tf.execute(desc) - node = tf.TapeNode(experimental_take_while_dataset, [input_dataset_, other_arguments_], name=nothing, predicate=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) + node = tf.TapeNode(experimental_take_while_dataset, [input_dataset_, other_arguments_], name=nothing, predicate=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -44326,7 +44326,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function scatter_nd_update_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function scatter_nd_update_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ScatterNdUpdate") do desc = tf.NodeDescription("ScatterNdUpdate") @@ -44360,7 +44360,7 @@ begin desc["Tindices"] = tf.data_type(indices_) desc["T"] = tf.data_type(updates_) res = tf.execute(desc) - node = tf.TapeNode(scatter_nd_update, [ref_, indices_, updates_], name=nothing, use_locking=nothing) + node = tf.TapeNode(scatter_nd_update, [ref_, indices_, updates_], name=nothing, use_locking=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -44380,7 +44380,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function dynamic_stitch_graph(indices_, data_; name=nothing, N=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function dynamic_stitch_graph(indices_, data_; name=nothing, N=nothing) local desc tf.with_op_name(name, "DynamicStitch") do desc = tf.NodeDescription("DynamicStitch") @@ -44406,7 +44406,7 @@ begin end desc["T"] = tf.data_type(data_) res = tf.execute(desc) - node = tf.TapeNode(dynamic_stitch, [indices_, data_], name=nothing, N=nothing) + node = tf.TapeNode(dynamic_stitch, [indices_, data_], name=nothing, N=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -44426,7 +44426,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ones_like_graph(x_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ones_like_graph(x_; name=nothing) local desc tf.with_op_name(name, "OnesLike") do desc = tf.NodeDescription("OnesLike") @@ -44442,7 +44442,7 @@ begin tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) res = tf.execute(desc) - node = tf.TapeNode(ones_like, [x_], name=nothing) + node = tf.TapeNode(ones_like, [x_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -44462,7 +44462,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fractional_max_pool_grad_graph(orig_input_, orig_output_, out_backprop_, row_pooling_sequence_, col_pooling_sequence_; name=nothing, overlapping=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fractional_max_pool_grad_graph(orig_input_, orig_output_, out_backprop_, row_pooling_sequence_, col_pooling_sequence_; name=nothing, overlapping=nothing) local desc tf.with_op_name(name, "FractionalMaxPoolGrad") do desc = tf.NodeDescription("FractionalMaxPoolGrad") @@ -44502,7 +44502,7 @@ begin desc["T"] = tf.data_type(orig_output_) desc["T"] = tf.data_type(out_backprop_) res = tf.execute(desc) - node = tf.TapeNode(fractional_max_pool_grad, [orig_input_, orig_output_, out_backprop_, row_pooling_sequence_, col_pooling_sequence_], name=nothing, overlapping=nothing) + node = tf.TapeNode(fractional_max_pool_grad, [orig_input_, orig_output_, out_backprop_, row_pooling_sequence_, col_pooling_sequence_], name=nothing, overlapping=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -44522,7 +44522,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function remote_call_graph(target_, args_; name=nothing, Tin=nothing, Tout=nothing, f=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function remote_call_graph(target_, args_; name=nothing, Tin=nothing, Tout=nothing, f=nothing) local desc tf.with_op_name(name, "RemoteCall") do desc = tf.NodeDescription("RemoteCall") @@ -44558,7 +44558,7 @@ begin desc["f"] = Base.identity(f) end res = tf.execute(desc) - node = tf.TapeNode(remote_call, [target_, args_], name=nothing, Tin=nothing, Tout=nothing, f=nothing) + node = tf.TapeNode(remote_call, [target_, args_], name=nothing, Tin=nothing, Tout=nothing, f=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -44578,7 +44578,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function gather_graph(params_, indices_; name=nothing, validate_indices=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function gather_graph(params_, indices_; name=nothing, validate_indices=nothing) local desc tf.with_op_name(name, "Gather") do desc = tf.NodeDescription("Gather") @@ -44607,7 +44607,7 @@ begin desc["Tparams"] = tf.data_type(params_) desc["Tindices"] = tf.data_type(indices_) res = tf.execute(desc) - node = tf.TapeNode(gather, [params_, indices_], name=nothing, validate_indices=nothing) + node = tf.TapeNode(gather, [params_, indices_], name=nothing, validate_indices=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -44627,7 +44627,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantized_mat_mul_graph(a_, b_, min_a_, max_a_, min_b_, max_b_; name=nothing, transpose_a=nothing, transpose_b=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantized_mat_mul_graph(a_, b_, min_a_, max_a_, min_b_, max_b_; name=nothing, transpose_a=nothing, transpose_b=nothing) local desc tf.with_op_name(name, "QuantizedMatMul") do desc = tf.NodeDescription("QuantizedMatMul") @@ -44682,7 +44682,7 @@ begin desc["T1"] = tf.data_type(a_) desc["T2"] = tf.data_type(b_) res = tf.execute(desc) - node = tf.TapeNode(quantized_mat_mul, [a_, b_, min_a_, max_a_, min_b_, max_b_], name=nothing, transpose_a=nothing, transpose_b=nothing) + node = tf.TapeNode(quantized_mat_mul, [a_, b_, min_a_, max_a_, min_b_, max_b_], name=nothing, transpose_a=nothing, transpose_b=nothing, res) tf.add_node(res[1], node) return res end @@ -44702,7 +44702,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function unicode_decode_with_offsets_graph(input_; name=nothing, input_encoding=nothing, errors=nothing, replacement_char=nothing, replace_control_characters=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function unicode_decode_with_offsets_graph(input_; name=nothing, input_encoding=nothing, errors=nothing, replacement_char=nothing, replace_control_characters=nothing) local desc tf.with_op_name(name, "UnicodeDecodeWithOffsets") do desc = tf.NodeDescription("UnicodeDecodeWithOffsets") @@ -44745,7 +44745,7 @@ begin desc["replace_control_characters"] = Base.Bool(replace_control_characters) end res = tf.execute(desc) - node = tf.TapeNode(unicode_decode_with_offsets, [input_], name=nothing, input_encoding=nothing, errors=nothing, replacement_char=nothing, replace_control_characters=nothing) + node = tf.TapeNode(unicode_decode_with_offsets, [input_], name=nothing, input_encoding=nothing, errors=nothing, replacement_char=nothing, replace_control_characters=nothing, res) tf.add_node(res[1], node) return res end @@ -44765,7 +44765,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function enqueue_tpu_embedding_sparse_tensor_batch_graph(sample_indices_, embedding_indices_, aggregation_weights_, mode_override_; name=nothing, N=nothing, device_ordinal=nothing, combiners=nothing, table_ids=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function enqueue_tpu_embedding_sparse_tensor_batch_graph(sample_indices_, embedding_indices_, aggregation_weights_, mode_override_; name=nothing, N=nothing, device_ordinal=nothing, combiners=nothing, table_ids=nothing) local desc tf.with_op_name(name, "EnqueueTPUEmbeddingSparseTensorBatch") do desc = tf.NodeDescription("EnqueueTPUEmbeddingSparseTensorBatch") @@ -44815,7 +44815,7 @@ begin desc["table_ids"] = map(Base.identity, table_ids) end res = tf.execute(desc) - node = tf.TapeNode(enqueue_tpu_embedding_sparse_tensor_batch, [sample_indices_, embedding_indices_, aggregation_weights_, mode_override_], name=nothing, N=nothing, device_ordinal=nothing, combiners=nothing, table_ids=nothing) + node = tf.TapeNode(enqueue_tpu_embedding_sparse_tensor_batch, [sample_indices_, embedding_indices_, aggregation_weights_, mode_override_], name=nothing, N=nothing, device_ordinal=nothing, combiners=nothing, table_ids=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -44835,7 +44835,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function accumulator_apply_gradient_graph(handle_, local_step_, gradient_; name=nothing, dtype=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function accumulator_apply_gradient_graph(handle_, local_step_, gradient_; name=nothing, dtype=nothing) local desc tf.with_op_name(name, "AccumulatorApplyGradient") do desc = tf.NodeDescription("AccumulatorApplyGradient") @@ -44865,7 +44865,7 @@ begin end desc["dtype"] = tf.data_type(gradient_) res = tf.execute(desc) - node = tf.TapeNode(accumulator_apply_gradient, [handle_, local_step_, gradient_], name=nothing, dtype=nothing) + node = tf.TapeNode(accumulator_apply_gradient, [handle_, local_step_, gradient_], name=nothing, dtype=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -44885,7 +44885,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function write_summary_graph(writer_, step_, tensor_, tag_, summary_metadata_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function write_summary_graph(writer_, step_, tensor_, tag_, summary_metadata_; name=nothing) local desc tf.with_op_name(name, "WriteSummary") do desc = tf.NodeDescription("WriteSummary") @@ -44917,7 +44917,7 @@ begin tf.add_input(desc, summary_metadata_) desc["T"] = tf.data_type(tensor_) res = tf.execute(desc) - node = tf.TapeNode(write_summary, [writer_, step_, tensor_, tag_, summary_metadata_], name=nothing) + node = tf.TapeNode(write_summary, [writer_, step_, tensor_, tag_, summary_metadata_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -44937,7 +44937,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantized_conv2d_graph(input_, filter_, min_input_, max_input_, min_filter_, max_filter_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantized_conv2d_graph(input_, filter_, min_input_, max_input_, min_filter_, max_filter_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) local desc tf.with_op_name(name, "QuantizedConv2D") do desc = tf.NodeDescription("QuantizedConv2D") @@ -45004,7 +45004,7 @@ begin desc["Tinput"] = tf.data_type(input_) desc["Tfilter"] = tf.data_type(filter_) res = tf.execute(desc) - node = tf.TapeNode(quantized_conv2d, [input_, filter_, min_input_, max_input_, min_filter_, max_filter_], name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) + node = tf.TapeNode(quantized_conv2d, [input_, filter_, min_input_, max_input_, min_filter_, max_filter_], name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing, res) tf.add_node(res[1], node) return res end @@ -45024,7 +45024,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_apply_momentum_graph(var_, accum_, lr_, grad_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_apply_momentum_graph(var_, accum_, lr_, grad_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) local desc tf.with_op_name(name, "ResourceApplyMomentum") do desc = tf.NodeDescription("ResourceApplyMomentum") @@ -45070,7 +45070,7 @@ begin desc["T"] = tf.data_type(grad_) desc["T"] = tf.data_type(momentum_) res = tf.execute(desc) - node = tf.TapeNode(resource_apply_momentum, [var_, accum_, lr_, grad_, momentum_], name=nothing, use_locking=nothing, use_nesterov=nothing) + node = tf.TapeNode(resource_apply_momentum, [var_, accum_, lr_, grad_, momentum_], name=nothing, use_locking=nothing, use_nesterov=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -45090,7 +45090,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function log1p_graph(x_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function log1p_graph(x_; name=nothing) local desc tf.with_op_name(name, "Log1p") do desc = tf.NodeDescription("Log1p") @@ -45106,7 +45106,7 @@ begin tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) res = tf.execute(desc) - node = tf.TapeNode(log1p, [x_], name=nothing) + node = tf.TapeNode(log1p, [x_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -45126,7 +45126,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ordered_map_clear_graph(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ordered_map_clear_graph(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "OrderedMapClear") do desc = tf.NodeDescription("OrderedMapClear") @@ -45166,7 +45166,7 @@ begin desc["shared_name"] = Base.String(shared_name) end res = tf.execute(desc) - node = tf.TapeNode(ordered_map_clear, [], name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + node = tf.TapeNode(ordered_map_clear, [], name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -45186,7 +45186,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_scatter_update_graph(resource_, indices_, updates_; name=nothing, dtype=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_scatter_update_graph(resource_, indices_, updates_; name=nothing, dtype=nothing) local desc tf.with_op_name(name, "ResourceScatterUpdate") do desc = tf.NodeDescription("ResourceScatterUpdate") @@ -45219,7 +45219,7 @@ begin desc["Tindices"] = tf.data_type(indices_) desc["dtype"] = tf.data_type(updates_) res = tf.execute(desc) - node = tf.TapeNode(resource_scatter_update, [resource_, indices_, updates_], name=nothing, dtype=nothing) + node = tf.TapeNode(resource_scatter_update, [resource_, indices_, updates_], name=nothing, dtype=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -45239,7 +45239,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function barrier_take_many_graph(handle_, num_elements_; name=nothing, component_types=nothing, allow_small_batch=nothing, wait_for_incomplete=nothing, timeout_ms=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function barrier_take_many_graph(handle_, num_elements_; name=nothing, component_types=nothing, allow_small_batch=nothing, wait_for_incomplete=nothing, timeout_ms=nothing) local desc tf.with_op_name(name, "BarrierTakeMany") do desc = tf.NodeDescription("BarrierTakeMany") @@ -45286,7 +45286,7 @@ begin desc["timeout_ms"] = Base.Int(timeout_ms) end res = tf.execute(desc) - node = tf.TapeNode(barrier_take_many, [handle_, num_elements_], name=nothing, component_types=nothing, allow_small_batch=nothing, wait_for_incomplete=nothing, timeout_ms=nothing) + node = tf.TapeNode(barrier_take_many, [handle_, num_elements_], name=nothing, component_types=nothing, allow_small_batch=nothing, wait_for_incomplete=nothing, timeout_ms=nothing, res) tf.add_node(res[1], node) return res end @@ -45306,7 +45306,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_apply_keras_momentum_graph(var_, accum_, lr_, grad_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_apply_keras_momentum_graph(var_, accum_, lr_, grad_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) local desc tf.with_op_name(name, "ResourceApplyKerasMomentum") do desc = tf.NodeDescription("ResourceApplyKerasMomentum") @@ -45352,7 +45352,7 @@ begin desc["T"] = tf.data_type(grad_) desc["T"] = tf.data_type(momentum_) res = tf.execute(desc) - node = tf.TapeNode(resource_apply_keras_momentum, [var_, accum_, lr_, grad_, momentum_], name=nothing, use_locking=nothing, use_nesterov=nothing) + node = tf.TapeNode(resource_apply_keras_momentum, [var_, accum_, lr_, grad_, momentum_], name=nothing, use_locking=nothing, use_nesterov=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -45372,7 +45372,7 @@ end Generates serialized partition messages suitable for batch reads. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function generate_big_query_reader_partitions_graph(; name=nothing, project_id=nothing, dataset_id=nothing, table_id=nothing, columns=nothing, timestamp_millis=nothing, num_partitions=nothing, test_end_point=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function generate_big_query_reader_partitions_graph(; name=nothing, project_id=nothing, dataset_id=nothing, table_id=nothing, columns=nothing, timestamp_millis=nothing, num_partitions=nothing, test_end_point=nothing) local desc tf.with_op_name(name, "GenerateBigQueryReaderPartitions") do desc = tf.NodeDescription("GenerateBigQueryReaderPartitions") @@ -45424,7 +45424,7 @@ begin desc["test_end_point"] = Base.String(test_end_point) end res = tf.execute(desc) - node = tf.TapeNode(generate_big_query_reader_partitions, [], name=nothing, project_id=nothing, dataset_id=nothing, table_id=nothing, columns=nothing, timestamp_millis=nothing, num_partitions=nothing, test_end_point=nothing) + node = tf.TapeNode(generate_big_query_reader_partitions, [], name=nothing, project_id=nothing, dataset_id=nothing, table_id=nothing, columns=nothing, timestamp_millis=nothing, num_partitions=nothing, test_end_point=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -45444,7 +45444,7 @@ end A placeholder op for multiple values that will be sent to TensorFlow from a """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _xla_recv_at_host_graph(dynamic_key_; name=nothing, Toutputs=nothing, key=nothing, device_ordinal=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _xla_recv_at_host_graph(dynamic_key_; name=nothing, Toutputs=nothing, key=nothing, device_ordinal=nothing) local desc tf.with_op_name(name, "_XlaRecvAtHost") do desc = tf.NodeDescription("_XlaRecvAtHost") @@ -45476,7 +45476,7 @@ begin desc["device_ordinal"] = Base.Int(device_ordinal) end res = tf.execute(desc) - node = tf.TapeNode(_xla_recv_at_host, [dynamic_key_], name=nothing, Toutputs=nothing, key=nothing, device_ordinal=nothing) + node = tf.TapeNode(_xla_recv_at_host, [dynamic_key_], name=nothing, Toutputs=nothing, key=nothing, device_ordinal=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -45496,7 +45496,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantized_avg_pool_graph(input_, min_input_, max_input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantized_avg_pool_graph(input_, min_input_, max_input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing) local desc tf.with_op_name(name, "QuantizedAvgPool") do desc = tf.NodeDescription("QuantizedAvgPool") @@ -45543,7 +45543,7 @@ begin end desc["T"] = tf.data_type(input_) res = tf.execute(desc) - node = tf.TapeNode(quantized_avg_pool, [input_, min_input_, max_input_], name=nothing, ksize=nothing, strides=nothing, padding=nothing) + node = tf.TapeNode(quantized_avg_pool, [input_, min_input_, max_input_], name=nothing, ksize=nothing, strides=nothing, padding=nothing, res) tf.add_node(res[1], node) return res end @@ -45563,7 +45563,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_apply_adam_with_amsgrad_graph(var_, m_, v_, vhat_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_apply_adam_with_amsgrad_graph(var_, m_, v_, vhat_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ResourceApplyAdamWithAmsgrad") do desc = tf.NodeDescription("ResourceApplyAdamWithAmsgrad") @@ -45631,7 +45631,7 @@ begin desc["T"] = tf.data_type(epsilon_) desc["T"] = tf.data_type(grad_) res = tf.execute(desc) - node = tf.TapeNode(resource_apply_adam_with_amsgrad, [var_, m_, v_, vhat_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_], name=nothing, use_locking=nothing) + node = tf.TapeNode(resource_apply_adam_with_amsgrad, [var_, m_, v_, vhat_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_], name=nothing, use_locking=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -45651,7 +45651,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_list_resize_graph(input_handle_, size_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_list_resize_graph(input_handle_, size_; name=nothing) local desc tf.with_op_name(name, "TensorListResize") do desc = tf.NodeDescription("TensorListResize") @@ -45669,7 +45669,7 @@ begin tf.add_input(desc, input_handle_) tf.add_input(desc, size_) res = tf.execute(desc) - node = tf.TapeNode(tensor_list_resize, [input_handle_, size_], name=nothing) + node = tf.TapeNode(tensor_list_resize, [input_handle_, size_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -45689,7 +45689,7 @@ end Receives the named tensor from send_device on recv_device. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _host_recv_graph(; name=nothing, tensor_type=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _host_recv_graph(; name=nothing, tensor_type=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing) local desc tf.with_op_name(name, "_HostRecv") do desc = tf.NodeDescription("_HostRecv") @@ -45735,7 +45735,7 @@ begin desc["client_terminated"] = Base.Bool(client_terminated) end res = tf.execute(desc) - node = tf.TapeNode(_host_recv, [], name=nothing, tensor_type=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing) + node = tf.TapeNode(_host_recv, [], name=nothing, tensor_type=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -45755,7 +45755,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function boosted_trees_center_bias_graph(tree_ensemble_handle_, mean_gradients_, mean_hessians_, l1_, l2_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function boosted_trees_center_bias_graph(tree_ensemble_handle_, mean_gradients_, mean_hessians_, l1_, l2_; name=nothing) local desc tf.with_op_name(name, "BoostedTreesCenterBias") do desc = tf.NodeDescription("BoostedTreesCenterBias") @@ -45785,7 +45785,7 @@ begin tf.add_input(desc, l1_) tf.add_input(desc, l2_) res = tf.execute(desc) - node = tf.TapeNode(boosted_trees_center_bias, [tree_ensemble_handle_, mean_gradients_, mean_hessians_, l1_, l2_], name=nothing) + node = tf.TapeNode(boosted_trees_center_bias, [tree_ensemble_handle_, mean_gradients_, mean_hessians_, l1_, l2_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -45805,7 +45805,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function lookup_table_size_v2_graph(table_handle_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function lookup_table_size_v2_graph(table_handle_; name=nothing) local desc tf.with_op_name(name, "LookupTableSizeV2") do desc = tf.NodeDescription("LookupTableSizeV2") @@ -45819,7 +45819,7 @@ begin table_handle_ = convert(tf.TensorHandle, table_handle_) tf.add_input(desc, table_handle_) res = tf.execute(desc) - node = tf.TapeNode(lookup_table_size_v2, [table_handle_], name=nothing) + node = tf.TapeNode(lookup_table_size_v2, [table_handle_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -45839,7 +45839,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function irfft_graph(input_, fft_length_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function irfft_graph(input_, fft_length_; name=nothing) local desc tf.with_op_name(name, "IRFFT") do desc = tf.NodeDescription("IRFFT") @@ -45857,7 +45857,7 @@ begin tf.add_input(desc, input_) tf.add_input(desc, fft_length_) res = tf.execute(desc) - node = tf.TapeNode(irfft, [input_, fft_length_], name=nothing) + node = tf.TapeNode(irfft, [input_, fft_length_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -45877,7 +45877,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function inplace_add_graph(x_, i_, v_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function inplace_add_graph(x_, i_, v_; name=nothing) local desc tf.with_op_name(name, "InplaceAdd") do desc = tf.NodeDescription("InplaceAdd") @@ -45902,7 +45902,7 @@ begin desc["T"] = tf.data_type(x_) desc["T"] = tf.data_type(v_) res = tf.execute(desc) - node = tf.TapeNode(inplace_add, [x_, i_, v_], name=nothing) + node = tf.TapeNode(inplace_add, [x_, i_, v_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -45922,7 +45922,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function bias_add_graph(value_, bias_; name=nothing, data_format=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function bias_add_graph(value_, bias_; name=nothing, data_format=nothing) local desc tf.with_op_name(name, "BiasAdd") do desc = tf.NodeDescription("BiasAdd") @@ -45949,7 +45949,7 @@ begin desc["T"] = tf.data_type(value_) desc["T"] = tf.data_type(bias_) res = tf.execute(desc) - node = tf.TapeNode(bias_add, [value_, bias_], name=nothing, data_format=nothing) + node = tf.TapeNode(bias_add, [value_, bias_], name=nothing, data_format=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -45969,7 +45969,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function load_tpu_embedding_adam_parameters_grad_accum_debug_graph(parameters_, momenta_, velocities_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function load_tpu_embedding_adam_parameters_grad_accum_debug_graph(parameters_, momenta_, velocities_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "LoadTPUEmbeddingADAMParametersGradAccumDebug") do desc = tf.NodeDescription("LoadTPUEmbeddingADAMParametersGradAccumDebug") @@ -46019,7 +46019,7 @@ begin desc["shard_id"] = Base.Int(shard_id) end res = tf.execute(desc) - node = tf.TapeNode(load_tpu_embedding_adam_parameters_grad_accum_debug, [parameters_, momenta_, velocities_, gradient_accumulators_], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + node = tf.TapeNode(load_tpu_embedding_adam_parameters_grad_accum_debug, [parameters_, momenta_, velocities_, gradient_accumulators_], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -46039,7 +46039,7 @@ end An op that disconnects the TPUs on a host from a running distributed """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _disconnect_host_from_distributed_tpu_system_graph(; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _disconnect_host_from_distributed_tpu_system_graph(; name=nothing) local desc tf.with_op_name(name, "_DisconnectHostFromDistributedTPUSystem") do desc @@ -46050,7 +46050,7 @@ begin function _disconnect_host_from_distributed_tpu_system_eager(; name=nothing) desc = tf.EagerOp("_DisconnectHostFromDistributedTPUSystem") res = tf.execute(desc) - node = tf.TapeNode(_disconnect_host_from_distributed_tpu_system, [], name=nothing) + node = tf.TapeNode(_disconnect_host_from_distributed_tpu_system, [], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -46070,7 +46070,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ragged_range_graph(starts_, limits_, deltas_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ragged_range_graph(starts_, limits_, deltas_; name=nothing) local desc tf.with_op_name(name, "RaggedRange") do desc = tf.NodeDescription("RaggedRange") @@ -46101,7 +46101,7 @@ begin desc["T"] = tf.data_type(limits_) desc["T"] = tf.data_type(deltas_) res = tf.execute(desc) - node = tf.TapeNode(ragged_range, [starts_, limits_, deltas_], name=nothing) + node = tf.TapeNode(ragged_range, [starts_, limits_, deltas_], name=nothing, res) tf.add_node(res[1], node) return res end @@ -46121,7 +46121,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function window_dataset_graph(input_dataset_, size_, shift_, stride_, drop_remainder_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function window_dataset_graph(input_dataset_, size_, shift_, stride_, drop_remainder_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "WindowDataset") do desc = tf.NodeDescription("WindowDataset") @@ -46163,7 +46163,7 @@ begin desc["output_shapes"] = map(Base.identity, output_shapes) end res = tf.execute(desc) - node = tf.TapeNode(window_dataset, [input_dataset_, size_, shift_, stride_, drop_remainder_], name=nothing, output_types=nothing, output_shapes=nothing) + node = tf.TapeNode(window_dataset, [input_dataset_, size_, shift_, stride_, drop_remainder_], name=nothing, output_types=nothing, output_shapes=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -46183,7 +46183,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function diag_graph(diagonal_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function diag_graph(diagonal_; name=nothing) local desc tf.with_op_name(name, "Diag") do desc = tf.NodeDescription("Diag") @@ -46199,7 +46199,7 @@ begin tf.add_input(desc, diagonal_) desc["T"] = tf.data_type(diagonal_) res = tf.execute(desc) - node = tf.TapeNode(diag, [diagonal_], name=nothing) + node = tf.TapeNode(diag, [diagonal_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -46219,7 +46219,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function infeed_dequeue_graph(; name=nothing, dtype=nothing, shape=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function infeed_dequeue_graph(; name=nothing, dtype=nothing, shape=nothing) local desc tf.with_op_name(name, "InfeedDequeue") do desc = tf.NodeDescription("InfeedDequeue") @@ -46241,7 +46241,7 @@ begin desc["shape"] = Base.identity(shape) end res = tf.execute(desc) - node = tf.TapeNode(infeed_dequeue, [], name=nothing, dtype=nothing, shape=nothing) + node = tf.TapeNode(infeed_dequeue, [], name=nothing, dtype=nothing, shape=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -46261,7 +46261,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_latency_stats_dataset_graph(input_dataset_, tag_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_latency_stats_dataset_graph(input_dataset_, tag_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "ExperimentalLatencyStatsDataset") do desc = tf.NodeDescription("ExperimentalLatencyStatsDataset") @@ -46291,7 +46291,7 @@ begin desc["output_shapes"] = map(Base.identity, output_shapes) end res = tf.execute(desc) - node = tf.TapeNode(experimental_latency_stats_dataset, [input_dataset_, tag_], name=nothing, output_types=nothing, output_shapes=nothing) + node = tf.TapeNode(experimental_latency_stats_dataset, [input_dataset_, tag_], name=nothing, output_types=nothing, output_shapes=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -46311,7 +46311,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function add_sparse_to_tensors_map_graph(sparse_indices_, sparse_values_, sparse_shape_; name=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function add_sparse_to_tensors_map_graph(sparse_indices_, sparse_values_, sparse_shape_; name=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "AddSparseToTensorsMap") do desc = tf.NodeDescription("AddSparseToTensorsMap") @@ -46347,7 +46347,7 @@ begin end desc["T"] = tf.data_type(sparse_values_) res = tf.execute(desc) - node = tf.TapeNode(add_sparse_to_tensors_map, [sparse_indices_, sparse_values_, sparse_shape_], name=nothing, container=nothing, shared_name=nothing) + node = tf.TapeNode(add_sparse_to_tensors_map, [sparse_indices_, sparse_values_, sparse_shape_], name=nothing, container=nothing, shared_name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -46367,7 +46367,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ragged_gather_graph(params_nested_splits_, params_dense_values_, indices_; name=nothing, PARAMS_RAGGED_RANK=nothing, OUTPUT_RAGGED_RANK=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ragged_gather_graph(params_nested_splits_, params_dense_values_, indices_; name=nothing, PARAMS_RAGGED_RANK=nothing, OUTPUT_RAGGED_RANK=nothing) local desc tf.with_op_name(name, "RaggedGather") do desc = tf.NodeDescription("RaggedGather") @@ -46411,7 +46411,7 @@ begin desc["Tvalues"] = tf.data_type(params_dense_values_) desc["Tindices"] = tf.data_type(indices_) res = tf.execute(desc) - node = tf.TapeNode(ragged_gather, [params_nested_splits_, params_dense_values_, indices_], name=nothing, PARAMS_RAGGED_RANK=nothing, OUTPUT_RAGGED_RANK=nothing) + node = tf.TapeNode(ragged_gather, [params_nested_splits_, params_dense_values_, indices_], name=nothing, PARAMS_RAGGED_RANK=nothing, OUTPUT_RAGGED_RANK=nothing, res) tf.add_node(res[1], node) return res end @@ -46431,7 +46431,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function rgb_to_hsv_graph(images_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function rgb_to_hsv_graph(images_; name=nothing) local desc tf.with_op_name(name, "RGBToHSV") do desc = tf.NodeDescription("RGBToHSV") @@ -46447,7 +46447,7 @@ begin tf.add_input(desc, images_) desc["T"] = tf.data_type(images_) res = tf.execute(desc) - node = tf.TapeNode(rgb_to_hsv, [images_], name=nothing) + node = tf.TapeNode(rgb_to_hsv, [images_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -46467,7 +46467,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function multi_device_iterator_to_string_handle_graph(multi_device_iterator_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function multi_device_iterator_to_string_handle_graph(multi_device_iterator_; name=nothing) local desc tf.with_op_name(name, "MultiDeviceIteratorToStringHandle") do desc = tf.NodeDescription("MultiDeviceIteratorToStringHandle") @@ -46481,7 +46481,7 @@ begin multi_device_iterator_ = convert(tf.TensorHandle, multi_device_iterator_) tf.add_input(desc, multi_device_iterator_) res = tf.execute(desc) - node = tf.TapeNode(multi_device_iterator_to_string_handle, [multi_device_iterator_], name=nothing) + node = tf.TapeNode(multi_device_iterator_to_string_handle, [multi_device_iterator_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -46501,7 +46501,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function for__graph(start_, limit_, delta_, input_; name=nothing, T=nothing, body=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function for__graph(start_, limit_, delta_, input_; name=nothing, T=nothing, body=nothing) local desc tf.with_op_name(name, "For") do desc = tf.NodeDescription("For") @@ -46539,7 +46539,7 @@ begin desc["body"] = Base.identity(body) end res = tf.execute(desc) - node = tf.TapeNode(for_, [start_, limit_, delta_, input_], name=nothing, T=nothing, body=nothing) + node = tf.TapeNode(for_, [start_, limit_, delta_, input_], name=nothing, T=nothing, body=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -46559,7 +46559,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_reduce_max_sparse_graph(input_indices_, input_values_, input_shape_, reduction_axes_; name=nothing, keep_dims=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_reduce_max_sparse_graph(input_indices_, input_values_, input_shape_, reduction_axes_; name=nothing, keep_dims=nothing) local desc tf.with_op_name(name, "SparseReduceMaxSparse") do desc = tf.NodeDescription("SparseReduceMaxSparse") @@ -46598,7 +46598,7 @@ begin end desc["T"] = tf.data_type(input_values_) res = tf.execute(desc) - node = tf.TapeNode(sparse_reduce_max_sparse, [input_indices_, input_values_, input_shape_, reduction_axes_], name=nothing, keep_dims=nothing) + node = tf.TapeNode(sparse_reduce_max_sparse, [input_indices_, input_values_, input_shape_, reduction_axes_], name=nothing, keep_dims=nothing, res) tf.add_node(res[1], node) return res end @@ -46618,7 +46618,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function concat_offset_graph(concat_dim_, shape_; name=nothing, N=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function concat_offset_graph(concat_dim_, shape_; name=nothing, N=nothing) local desc tf.with_op_name(name, "ConcatOffset") do desc = tf.NodeDescription("ConcatOffset") @@ -46647,7 +46647,7 @@ begin desc["N"] = Base.Int(N) end res = tf.execute(desc) - node = tf.TapeNode(concat_offset, [concat_dim_, shape_], name=nothing, N=nothing) + node = tf.TapeNode(concat_offset, [concat_dim_, shape_], name=nothing, N=nothing, res) tf.add_node(res[1], node) return res end @@ -46667,7 +46667,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function stage_graph(values_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function stage_graph(values_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "Stage") do desc = tf.NodeDescription("Stage") @@ -46711,7 +46711,7 @@ begin desc["shared_name"] = Base.String(shared_name) end res = tf.execute(desc) - node = tf.TapeNode(stage, [values_], name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + node = tf.TapeNode(stage, [values_], name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -46731,7 +46731,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function switch_graph(data_, pred_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function switch_graph(data_, pred_; name=nothing) local desc tf.with_op_name(name, "Switch") do desc = tf.NodeDescription("Switch") @@ -46756,7 +46756,7 @@ begin tf.add_input(desc, pred_) desc["T"] = tf.data_type(data_) res = tf.execute(desc) - node = tf.TapeNode(switch, [data_, pred_], name=nothing) + node = tf.TapeNode(switch, [data_, pred_], name=nothing, res) tf.add_node(res[1], node) return res end @@ -46776,7 +46776,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function queue_dequeue_many_v2_graph(handle_, n_; name=nothing, component_types=nothing, timeout_ms=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function queue_dequeue_many_v2_graph(handle_, n_; name=nothing, component_types=nothing, timeout_ms=nothing) local desc tf.with_op_name(name, "QueueDequeueManyV2") do desc = tf.NodeDescription("QueueDequeueManyV2") @@ -46806,7 +46806,7 @@ begin desc["timeout_ms"] = Base.Int(timeout_ms) end res = tf.execute(desc) - node = tf.TapeNode(queue_dequeue_many_v2, [handle_, n_], name=nothing, component_types=nothing, timeout_ms=nothing) + node = tf.TapeNode(queue_dequeue_many_v2, [handle_, n_], name=nothing, component_types=nothing, timeout_ms=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -46826,7 +46826,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function segment_prod_graph(data_, segment_ids_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function segment_prod_graph(data_, segment_ids_; name=nothing) local desc tf.with_op_name(name, "SegmentProd") do desc = tf.NodeDescription("SegmentProd") @@ -46849,7 +46849,7 @@ begin desc["T"] = tf.data_type(data_) desc["Tindices"] = tf.data_type(segment_ids_) res = tf.execute(desc) - node = tf.TapeNode(segment_prod, [data_, segment_ids_], name=nothing) + node = tf.TapeNode(segment_prod, [data_, segment_ids_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -46869,7 +46869,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function approximate_equal_graph(x_, y_; name=nothing, tolerance=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function approximate_equal_graph(x_, y_; name=nothing, tolerance=nothing) local desc tf.with_op_name(name, "ApproximateEqual") do desc = tf.NodeDescription("ApproximateEqual") @@ -46896,7 +46896,7 @@ begin desc["T"] = tf.data_type(x_) desc["T"] = tf.data_type(y_) res = tf.execute(desc) - node = tf.TapeNode(approximate_equal, [x_, y_], name=nothing, tolerance=nothing) + node = tf.TapeNode(approximate_equal, [x_, y_], name=nothing, tolerance=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -46916,7 +46916,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function conv2d_graph(input_, filter_; name=nothing, strides=nothing, use_cudnn_on_gpu=nothing, padding=nothing, explicit_paddings=nothing, data_format=nothing, dilations=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function conv2d_graph(input_, filter_; name=nothing, strides=nothing, use_cudnn_on_gpu=nothing, padding=nothing, explicit_paddings=nothing, data_format=nothing, dilations=nothing) local desc tf.with_op_name(name, "Conv2D") do desc = tf.NodeDescription("Conv2D") @@ -46973,7 +46973,7 @@ begin desc["T"] = tf.data_type(input_) desc["T"] = tf.data_type(filter_) res = tf.execute(desc) - node = tf.TapeNode(conv2d, [input_, filter_], name=nothing, strides=nothing, use_cudnn_on_gpu=nothing, padding=nothing, explicit_paddings=nothing, data_format=nothing, dilations=nothing) + node = tf.TapeNode(conv2d, [input_, filter_], name=nothing, strides=nothing, use_cudnn_on_gpu=nothing, padding=nothing, explicit_paddings=nothing, data_format=nothing, dilations=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -46993,7 +46993,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function cross_replica_sum_graph(input_, group_assignment_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function cross_replica_sum_graph(input_, group_assignment_; name=nothing) local desc tf.with_op_name(name, "CrossReplicaSum") do desc = tf.NodeDescription("CrossReplicaSum") @@ -47013,7 +47013,7 @@ begin tf.add_input(desc, group_assignment_) desc["T"] = tf.data_type(input_) res = tf.execute(desc) - node = tf.TapeNode(cross_replica_sum, [input_, group_assignment_], name=nothing) + node = tf.TapeNode(cross_replica_sum, [input_, group_assignment_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -47033,7 +47033,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_mat_mul_graph(a_, b_; name=nothing, transpose_a=nothing, transpose_b=nothing, a_is_sparse=nothing, b_is_sparse=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_mat_mul_graph(a_, b_; name=nothing, transpose_a=nothing, transpose_b=nothing, a_is_sparse=nothing, b_is_sparse=nothing) local desc tf.with_op_name(name, "SparseMatMul") do desc = tf.NodeDescription("SparseMatMul") @@ -47079,7 +47079,7 @@ begin desc["Ta"] = tf.data_type(a_) desc["Tb"] = tf.data_type(b_) res = tf.execute(desc) - node = tf.TapeNode(sparse_mat_mul, [a_, b_], name=nothing, transpose_a=nothing, transpose_b=nothing, a_is_sparse=nothing, b_is_sparse=nothing) + node = tf.TapeNode(sparse_mat_mul, [a_, b_], name=nothing, transpose_a=nothing, transpose_b=nothing, a_is_sparse=nothing, b_is_sparse=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -47099,7 +47099,7 @@ end Acts roughly like a SplitV Op that splits one tensor into multiple tensors """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _scoped_allocator_split_graph(concat_, split_; name=nothing, sa_name=nothing, id=nothing, N=nothing, shapes=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _scoped_allocator_split_graph(concat_, split_; name=nothing, sa_name=nothing, id=nothing, N=nothing, shapes=nothing) local desc tf.with_op_name(name, "_ScopedAllocatorSplit") do desc = tf.NodeDescription("_ScopedAllocatorSplit") @@ -47149,7 +47149,7 @@ begin desc["T"] = tf.data_type(concat_) desc["T"] = tf.data_type(split_) res = tf.execute(desc) - node = tf.TapeNode(_scoped_allocator_split, [concat_, split_], name=nothing, sa_name=nothing, id=nothing, N=nothing, shapes=nothing) + node = tf.TapeNode(_scoped_allocator_split, [concat_, split_], name=nothing, sa_name=nothing, id=nothing, N=nothing, shapes=nothing, res) tf.add_node(res[1], node) return res end @@ -47169,7 +47169,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function igammac_graph(a_, x_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function igammac_graph(a_, x_; name=nothing) local desc tf.with_op_name(name, "Igammac") do desc = tf.NodeDescription("Igammac") @@ -47190,7 +47190,7 @@ begin desc["T"] = tf.data_type(a_) desc["T"] = tf.data_type(x_) res = tf.execute(desc) - node = tf.TapeNode(igammac, [a_, x_], name=nothing) + node = tf.TapeNode(igammac, [a_, x_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -47210,7 +47210,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_mat_mul_graph(x_, y_; name=nothing, adj_x=nothing, adj_y=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_mat_mul_graph(x_, y_; name=nothing, adj_x=nothing, adj_y=nothing) local desc tf.with_op_name(name, "BatchMatMul") do desc = tf.NodeDescription("BatchMatMul") @@ -47243,7 +47243,7 @@ begin desc["T"] = tf.data_type(x_) desc["T"] = tf.data_type(y_) res = tf.execute(desc) - node = tf.TapeNode(batch_mat_mul, [x_, y_], name=nothing, adj_x=nothing, adj_y=nothing) + node = tf.TapeNode(batch_mat_mul, [x_, y_], name=nothing, adj_x=nothing, adj_y=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -47263,7 +47263,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function enqueue_tpu_embedding_sparse_batch_graph(sample_indices_, embedding_indices_, aggregation_weights_, mode_override_; name=nothing, N=nothing, device_ordinal=nothing, combiners=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function enqueue_tpu_embedding_sparse_batch_graph(sample_indices_, embedding_indices_, aggregation_weights_, mode_override_; name=nothing, N=nothing, device_ordinal=nothing, combiners=nothing) local desc tf.with_op_name(name, "EnqueueTPUEmbeddingSparseBatch") do desc = tf.NodeDescription("EnqueueTPUEmbeddingSparseBatch") @@ -47307,7 +47307,7 @@ begin desc["combiners"] = map(Base.identity, combiners) end res = tf.execute(desc) - node = tf.TapeNode(enqueue_tpu_embedding_sparse_batch, [sample_indices_, embedding_indices_, aggregation_weights_, mode_override_], name=nothing, N=nothing, device_ordinal=nothing, combiners=nothing) + node = tf.TapeNode(enqueue_tpu_embedding_sparse_batch, [sample_indices_, embedding_indices_, aggregation_weights_, mode_override_], name=nothing, N=nothing, device_ordinal=nothing, combiners=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -47327,7 +47327,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function queue_close_v2_graph(handle_; name=nothing, cancel_pending_enqueues=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function queue_close_v2_graph(handle_; name=nothing, cancel_pending_enqueues=nothing) local desc tf.with_op_name(name, "QueueCloseV2") do desc = tf.NodeDescription("QueueCloseV2") @@ -47347,7 +47347,7 @@ begin desc["cancel_pending_enqueues"] = Base.Bool(cancel_pending_enqueues) end res = tf.execute(desc) - node = tf.TapeNode(queue_close_v2, [handle_], name=nothing, cancel_pending_enqueues=nothing) + node = tf.TapeNode(queue_close_v2, [handle_], name=nothing, cancel_pending_enqueues=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -47367,7 +47367,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_pack_graph(handle_, flow_in_; name=nothing, dtype=nothing, element_shape=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_pack_graph(handle_, flow_in_; name=nothing, dtype=nothing, element_shape=nothing) local desc tf.with_op_name(name, "TensorArrayPack") do desc = tf.NodeDescription("TensorArrayPack") @@ -47397,7 +47397,7 @@ begin desc["element_shape"] = Base.identity(element_shape) end res = tf.execute(desc) - node = tf.TapeNode(tensor_array_pack, [handle_, flow_in_], name=nothing, dtype=nothing, element_shape=nothing) + node = tf.TapeNode(tensor_array_pack, [handle_, flow_in_], name=nothing, dtype=nothing, element_shape=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -47417,7 +47417,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function reader_restore_state_graph(reader_handle_, state_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function reader_restore_state_graph(reader_handle_, state_; name=nothing) local desc tf.with_op_name(name, "ReaderRestoreState") do desc = tf.NodeDescription("ReaderRestoreState") @@ -47435,7 +47435,7 @@ begin tf.add_input(desc, reader_handle_) tf.add_input(desc, state_) res = tf.execute(desc) - node = tf.TapeNode(reader_restore_state, [reader_handle_, state_], name=nothing) + node = tf.TapeNode(reader_restore_state, [reader_handle_, state_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -47455,7 +47455,7 @@ end *NOTE*: Do not invoke this operator directly in Python. Grappler is """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _fused_conv2d_graph(input_, filter_, args_; name=nothing, num_args=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing, use_cudnn_on_gpu=nothing, fused_ops=nothing, epsilon=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _fused_conv2d_graph(input_, filter_, args_; name=nothing, num_args=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing, use_cudnn_on_gpu=nothing, fused_ops=nothing, epsilon=nothing) local desc tf.with_op_name(name, "_FusedConv2D") do desc = tf.NodeDescription("_FusedConv2D") @@ -47529,7 +47529,7 @@ begin desc["T"] = tf.data_type(filter_) desc["T"] = tf.data_type(args_) res = tf.execute(desc) - node = tf.TapeNode(_fused_conv2d, [input_, filter_, args_], name=nothing, num_args=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing, use_cudnn_on_gpu=nothing, fused_ops=nothing, epsilon=nothing) + node = tf.TapeNode(_fused_conv2d, [input_, filter_, args_], name=nothing, num_args=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing, use_cudnn_on_gpu=nothing, fused_ops=nothing, epsilon=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -47549,7 +47549,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _read_variables_op_graph(resources_; name=nothing, N=nothing, dtypes=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _read_variables_op_graph(resources_; name=nothing, N=nothing, dtypes=nothing) local desc tf.with_op_name(name, "_ReadVariablesOp") do desc = tf.NodeDescription("_ReadVariablesOp") @@ -47575,7 +47575,7 @@ begin desc["dtypes"] = map(Base.identity, dtypes) end res = tf.execute(desc) - node = tf.TapeNode(_read_variables_op, [resources_], name=nothing, N=nothing, dtypes=nothing) + node = tf.TapeNode(_read_variables_op, [resources_], name=nothing, N=nothing, dtypes=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -47595,7 +47595,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function mutable_hash_table_of_tensors_graph(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function mutable_hash_table_of_tensors_graph(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing) local desc tf.with_op_name(name, "MutableHashTableOfTensors") do desc = tf.NodeDescription("MutableHashTableOfTensors") @@ -47641,7 +47641,7 @@ begin desc["value_shape"] = Base.identity(value_shape) end res = tf.execute(desc) - node = tf.TapeNode(mutable_hash_table_of_tensors, [], name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing) + node = tf.TapeNode(mutable_hash_table_of_tensors, [], name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -47661,7 +47661,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function read_file_graph(filename_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function read_file_graph(filename_; name=nothing) local desc tf.with_op_name(name, "ReadFile") do desc = tf.NodeDescription("ReadFile") @@ -47675,7 +47675,7 @@ begin filename_ = convert(tf.TensorHandle, filename_) tf.add_input(desc, filename_) res = tf.execute(desc) - node = tf.TapeNode(read_file, [filename_], name=nothing) + node = tf.TapeNode(read_file, [filename_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -47695,7 +47695,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function load_tpu_embedding_mdl_adagrad_light_parameters_graph(parameters_, accumulators_, weights_, benefits_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function load_tpu_embedding_mdl_adagrad_light_parameters_graph(parameters_, accumulators_, weights_, benefits_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "LoadTPUEmbeddingMDLAdagradLightParameters") do desc = tf.NodeDescription("LoadTPUEmbeddingMDLAdagradLightParameters") @@ -47745,7 +47745,7 @@ begin desc["shard_id"] = Base.Int(shard_id) end res = tf.execute(desc) - node = tf.TapeNode(load_tpu_embedding_mdl_adagrad_light_parameters, [parameters_, accumulators_, weights_, benefits_], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + node = tf.TapeNode(load_tpu_embedding_mdl_adagrad_light_parameters, [parameters_, accumulators_, weights_, benefits_], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -47765,7 +47765,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fractional_avg_pool_grad_graph(orig_input_tensor_shape_, out_backprop_, row_pooling_sequence_, col_pooling_sequence_; name=nothing, overlapping=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fractional_avg_pool_grad_graph(orig_input_tensor_shape_, out_backprop_, row_pooling_sequence_, col_pooling_sequence_; name=nothing, overlapping=nothing) local desc tf.with_op_name(name, "FractionalAvgPoolGrad") do desc = tf.NodeDescription("FractionalAvgPoolGrad") @@ -47799,7 +47799,7 @@ begin end desc["T"] = tf.data_type(out_backprop_) res = tf.execute(desc) - node = tf.TapeNode(fractional_avg_pool_grad, [orig_input_tensor_shape_, out_backprop_, row_pooling_sequence_, col_pooling_sequence_], name=nothing, overlapping=nothing) + node = tf.TapeNode(fractional_avg_pool_grad, [orig_input_tensor_shape_, out_backprop_, row_pooling_sequence_, col_pooling_sequence_], name=nothing, overlapping=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -47819,7 +47819,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function load_tpu_embedding_adagrad_parameters_grad_accum_debug_graph(parameters_, accumulators_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function load_tpu_embedding_adagrad_parameters_grad_accum_debug_graph(parameters_, accumulators_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "LoadTPUEmbeddingAdagradParametersGradAccumDebug") do desc = tf.NodeDescription("LoadTPUEmbeddingAdagradParametersGradAccumDebug") @@ -47865,7 +47865,7 @@ begin desc["shard_id"] = Base.Int(shard_id) end res = tf.execute(desc) - node = tf.TapeNode(load_tpu_embedding_adagrad_parameters_grad_accum_debug, [parameters_, accumulators_, gradient_accumulators_], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + node = tf.TapeNode(load_tpu_embedding_adagrad_parameters_grad_accum_debug, [parameters_, accumulators_, gradient_accumulators_], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -47885,7 +47885,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function stateful_standard_normal_v2_graph(resource_, algorithm_, shape_; name=nothing, dtype=nothing, shape_dtype=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function stateful_standard_normal_v2_graph(resource_, algorithm_, shape_; name=nothing, dtype=nothing, shape_dtype=nothing) local desc tf.with_op_name(name, "StatefulStandardNormalV2") do desc = tf.NodeDescription("StatefulStandardNormalV2") @@ -47921,7 +47921,7 @@ begin end desc["shape_dtype"] = tf.data_type(shape_) res = tf.execute(desc) - node = tf.TapeNode(stateful_standard_normal_v2, [resource_, algorithm_, shape_], name=nothing, dtype=nothing, shape_dtype=nothing) + node = tf.TapeNode(stateful_standard_normal_v2, [resource_, algorithm_, shape_], name=nothing, dtype=nothing, shape_dtype=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -47941,7 +47941,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function bincount_graph(arr_, size_, weights_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function bincount_graph(arr_, size_, weights_; name=nothing) local desc tf.with_op_name(name, "Bincount") do desc = tf.NodeDescription("Bincount") @@ -47965,7 +47965,7 @@ begin tf.add_input(desc, weights_) desc["T"] = tf.data_type(weights_) res = tf.execute(desc) - node = tf.TapeNode(bincount, [arr_, size_, weights_], name=nothing) + node = tf.TapeNode(bincount, [arr_, size_, weights_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -47985,7 +47985,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function inv_graph(x_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function inv_graph(x_; name=nothing) local desc tf.with_op_name(name, "Inv") do desc = tf.NodeDescription("Inv") @@ -48001,7 +48001,7 @@ begin tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) res = tf.execute(desc) - node = tf.TapeNode(inv, [x_], name=nothing) + node = tf.TapeNode(inv, [x_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -48021,7 +48021,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function apply_proximal_adagrad_graph(var_, accum_, lr_, l1_, l2_, grad_; name=nothing, use_locking=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function apply_proximal_adagrad_graph(var_, accum_, lr_, l1_, l2_, grad_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ApplyProximalAdagrad") do desc = tf.NodeDescription("ApplyProximalAdagrad") @@ -48068,7 +48068,7 @@ begin desc["T"] = tf.data_type(l2_) desc["T"] = tf.data_type(grad_) res = tf.execute(desc) - node = tf.TapeNode(apply_proximal_adagrad, [var_, accum_, lr_, l1_, l2_, grad_], name=nothing, use_locking=nothing) + node = tf.TapeNode(apply_proximal_adagrad, [var_, accum_, lr_, l1_, l2_, grad_], name=nothing, use_locking=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -48088,7 +48088,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function gather_v2_graph(params_, indices_, axis_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function gather_v2_graph(params_, indices_, axis_; name=nothing) local desc tf.with_op_name(name, "GatherV2") do desc = tf.NodeDescription("GatherV2") @@ -48117,7 +48117,7 @@ begin desc["Tindices"] = tf.data_type(indices_) desc["Taxis"] = tf.data_type(axis_) res = tf.execute(desc) - node = tf.TapeNode(gather_v2, [params_, indices_, axis_], name=nothing) + node = tf.TapeNode(gather_v2, [params_, indices_, axis_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -48137,7 +48137,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function write_file_graph(filename_, contents_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function write_file_graph(filename_, contents_; name=nothing) local desc tf.with_op_name(name, "WriteFile") do desc = tf.NodeDescription("WriteFile") @@ -48155,7 +48155,7 @@ begin tf.add_input(desc, filename_) tf.add_input(desc, contents_) res = tf.execute(desc) - node = tf.TapeNode(write_file, [filename_, contents_], name=nothing) + node = tf.TapeNode(write_file, [filename_, contents_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -48175,7 +48175,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function boosted_trees_get_ensemble_states_graph(tree_ensemble_handle_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function boosted_trees_get_ensemble_states_graph(tree_ensemble_handle_; name=nothing) local desc tf.with_op_name(name, "BoostedTreesGetEnsembleStates") do desc = tf.NodeDescription("BoostedTreesGetEnsembleStates") @@ -48194,7 +48194,7 @@ begin tree_ensemble_handle_ = convert(tf.TensorHandle, tree_ensemble_handle_) tf.add_input(desc, tree_ensemble_handle_) res = tf.execute(desc) - node = tf.TapeNode(boosted_trees_get_ensemble_states, [tree_ensemble_handle_], name=nothing) + node = tf.TapeNode(boosted_trees_get_ensemble_states, [tree_ensemble_handle_], name=nothing, res) tf.add_node(res[1], node) return res end @@ -48214,7 +48214,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_gather_graph(resource_, indices_; name=nothing, validate_indices=nothing, dtype=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_gather_graph(resource_, indices_; name=nothing, validate_indices=nothing, dtype=nothing) local desc tf.with_op_name(name, "ResourceGather") do desc = tf.NodeDescription("ResourceGather") @@ -48247,7 +48247,7 @@ begin end desc["Tindices"] = tf.data_type(indices_) res = tf.execute(desc) - node = tf.TapeNode(resource_gather, [resource_, indices_], name=nothing, validate_indices=nothing, dtype=nothing) + node = tf.TapeNode(resource_gather, [resource_, indices_], name=nothing, validate_indices=nothing, dtype=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -48267,7 +48267,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_apply_proximal_gradient_descent_graph(var_, alpha_, l1_, l2_, delta_; name=nothing, use_locking=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_apply_proximal_gradient_descent_graph(var_, alpha_, l1_, l2_, delta_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ResourceApplyProximalGradientDescent") do desc = tf.NodeDescription("ResourceApplyProximalGradientDescent") @@ -48308,7 +48308,7 @@ begin desc["T"] = tf.data_type(l2_) desc["T"] = tf.data_type(delta_) res = tf.execute(desc) - node = tf.TapeNode(resource_apply_proximal_gradient_descent, [var_, alpha_, l1_, l2_, delta_], name=nothing, use_locking=nothing) + node = tf.TapeNode(resource_apply_proximal_gradient_descent, [var_, alpha_, l1_, l2_, delta_], name=nothing, use_locking=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -48328,7 +48328,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function truncate_mod_graph(x_, y_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function truncate_mod_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "TruncateMod") do desc = tf.NodeDescription("TruncateMod") @@ -48349,7 +48349,7 @@ begin desc["T"] = tf.data_type(x_) desc["T"] = tf.data_type(y_) res = tf.execute(desc) - node = tf.TapeNode(truncate_mod, [x_, y_], name=nothing) + node = tf.TapeNode(truncate_mod, [x_, y_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -48369,7 +48369,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function log_matrix_determinant_graph(input_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function log_matrix_determinant_graph(input_; name=nothing) local desc tf.with_op_name(name, "LogMatrixDeterminant") do desc = tf.NodeDescription("LogMatrixDeterminant") @@ -48390,7 +48390,7 @@ begin tf.add_input(desc, input_) desc["T"] = tf.data_type(input_) res = tf.execute(desc) - node = tf.TapeNode(log_matrix_determinant, [input_], name=nothing) + node = tf.TapeNode(log_matrix_determinant, [input_], name=nothing, res) tf.add_node(res[1], node) return res end @@ -48410,7 +48410,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function irfft2d_graph(input_, fft_length_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function irfft2d_graph(input_, fft_length_; name=nothing) local desc tf.with_op_name(name, "IRFFT2D") do desc = tf.NodeDescription("IRFFT2D") @@ -48428,7 +48428,7 @@ begin tf.add_input(desc, input_) tf.add_input(desc, fft_length_) res = tf.execute(desc) - node = tf.TapeNode(irfft2d, [input_, fft_length_], name=nothing) + node = tf.TapeNode(irfft2d, [input_, fft_length_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -48448,7 +48448,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function boosted_trees_training_predict_graph(tree_ensemble_handle_, cached_tree_ids_, cached_node_ids_, bucketized_features_; name=nothing, num_bucketized_features=nothing, logits_dimension=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function boosted_trees_training_predict_graph(tree_ensemble_handle_, cached_tree_ids_, cached_node_ids_, bucketized_features_; name=nothing, num_bucketized_features=nothing, logits_dimension=nothing) local desc tf.with_op_name(name, "BoostedTreesTrainingPredict") do desc = tf.NodeDescription("BoostedTreesTrainingPredict") @@ -48491,7 +48491,7 @@ begin desc["logits_dimension"] = Base.Int(logits_dimension) end res = tf.execute(desc) - node = tf.TapeNode(boosted_trees_training_predict, [tree_ensemble_handle_, cached_tree_ids_, cached_node_ids_, bucketized_features_], name=nothing, num_bucketized_features=nothing, logits_dimension=nothing) + node = tf.TapeNode(boosted_trees_training_predict, [tree_ensemble_handle_, cached_tree_ids_, cached_node_ids_, bucketized_features_], name=nothing, num_bucketized_features=nothing, logits_dimension=nothing, res) tf.add_node(res[1], node) return res end @@ -48511,7 +48511,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function nearest_neighbors_graph(points_, centers_, k_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function nearest_neighbors_graph(points_, centers_, k_; name=nothing) local desc tf.with_op_name(name, "NearestNeighbors") do desc = tf.NodeDescription("NearestNeighbors") @@ -48538,7 +48538,7 @@ begin tf.add_input(desc, centers_) tf.add_input(desc, k_) res = tf.execute(desc) - node = tf.TapeNode(nearest_neighbors, [points_, centers_, k_], name=nothing) + node = tf.TapeNode(nearest_neighbors, [points_, centers_, k_], name=nothing, res) tf.add_node(res[1], node) return res end @@ -48558,7 +48558,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function floor_graph(x_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function floor_graph(x_; name=nothing) local desc tf.with_op_name(name, "Floor") do desc = tf.NodeDescription("Floor") @@ -48574,7 +48574,7 @@ begin tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) res = tf.execute(desc) - node = tf.TapeNode(floor, [x_], name=nothing) + node = tf.TapeNode(floor, [x_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -48594,7 +48594,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function load_tpu_embedding_proximal_adagrad_parameters_grad_accum_debug_graph(parameters_, accumulators_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function load_tpu_embedding_proximal_adagrad_parameters_grad_accum_debug_graph(parameters_, accumulators_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "LoadTPUEmbeddingProximalAdagradParametersGradAccumDebug") do desc = tf.NodeDescription("LoadTPUEmbeddingProximalAdagradParametersGradAccumDebug") @@ -48640,7 +48640,7 @@ begin desc["shard_id"] = Base.Int(shard_id) end res = tf.execute(desc) - node = tf.TapeNode(load_tpu_embedding_proximal_adagrad_parameters_grad_accum_debug, [parameters_, accumulators_, gradient_accumulators_], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + node = tf.TapeNode(load_tpu_embedding_proximal_adagrad_parameters_grad_accum_debug, [parameters_, accumulators_, gradient_accumulators_], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -48660,7 +48660,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function write_image_summary_graph(writer_, step_, tag_, tensor_, bad_color_; name=nothing, max_images=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function write_image_summary_graph(writer_, step_, tag_, tensor_, bad_color_; name=nothing, max_images=nothing) local desc tf.with_op_name(name, "WriteImageSummary") do desc = tf.NodeDescription("WriteImageSummary") @@ -48698,7 +48698,7 @@ begin end desc["T"] = tf.data_type(tensor_) res = tf.execute(desc) - node = tf.TapeNode(write_image_summary, [writer_, step_, tag_, tensor_, bad_color_], name=nothing, max_images=nothing) + node = tf.TapeNode(write_image_summary, [writer_, step_, tag_, tensor_, bad_color_], name=nothing, max_images=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -48718,7 +48718,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tile_grad_graph(input_, multiples_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tile_grad_graph(input_, multiples_; name=nothing) local desc tf.with_op_name(name, "TileGrad") do desc = tf.NodeDescription("TileGrad") @@ -48738,7 +48738,7 @@ begin tf.add_input(desc, multiples_) desc["T"] = tf.data_type(input_) res = tf.execute(desc) - node = tf.TapeNode(tile_grad, [input_, multiples_], name=nothing) + node = tf.TapeNode(tile_grad, [input_, multiples_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -48758,7 +48758,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_grad_v3_graph(handle_, flow_in_; name=nothing, source=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_grad_v3_graph(handle_, flow_in_; name=nothing, source=nothing) local desc tf.with_op_name(name, "TensorArrayGradV3") do desc = tf.NodeDescription("TensorArrayGradV3") @@ -48787,7 +48787,7 @@ begin desc["source"] = Base.String(source) end res = tf.execute(desc) - node = tf.TapeNode(tensor_array_grad_v3, [handle_, flow_in_], name=nothing, source=nothing) + node = tf.TapeNode(tensor_array_grad_v3, [handle_, flow_in_], name=nothing, source=nothing, res) tf.add_node(res[1], node) return res end @@ -48807,7 +48807,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function enqueue_tpu_embedding_integer_batch_graph(batch_, mode_override_; name=nothing, N=nothing, device_ordinal=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function enqueue_tpu_embedding_integer_batch_graph(batch_, mode_override_; name=nothing, N=nothing, device_ordinal=nothing) local desc tf.with_op_name(name, "EnqueueTPUEmbeddingIntegerBatch") do desc = tf.NodeDescription("EnqueueTPUEmbeddingIntegerBatch") @@ -48837,7 +48837,7 @@ begin desc["device_ordinal"] = Base.Int(device_ordinal) end res = tf.execute(desc) - node = tf.TapeNode(enqueue_tpu_embedding_integer_batch, [batch_, mode_override_], name=nothing, N=nothing, device_ordinal=nothing) + node = tf.TapeNode(enqueue_tpu_embedding_integer_batch, [batch_, mode_override_], name=nothing, N=nothing, device_ordinal=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -48857,7 +48857,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fused_batch_norm_graph(x_, scale_, offset_, mean_, variance_; name=nothing, epsilon=nothing, data_format=nothing, is_training=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fused_batch_norm_graph(x_, scale_, offset_, mean_, variance_; name=nothing, epsilon=nothing, data_format=nothing, is_training=nothing) local desc tf.with_op_name(name, "FusedBatchNorm") do desc = tf.NodeDescription("FusedBatchNorm") @@ -48916,7 +48916,7 @@ begin desc["T"] = tf.data_type(mean_) desc["T"] = tf.data_type(variance_) res = tf.execute(desc) - node = tf.TapeNode(fused_batch_norm, [x_, scale_, offset_, mean_, variance_], name=nothing, epsilon=nothing, data_format=nothing, is_training=nothing) + node = tf.TapeNode(fused_batch_norm, [x_, scale_, offset_, mean_, variance_], name=nothing, epsilon=nothing, data_format=nothing, is_training=nothing, res) tf.add_node(res[1], node) return res end @@ -48936,7 +48936,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function logical_and_graph(x_, y_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function logical_and_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "LogicalAnd") do desc = tf.NodeDescription("LogicalAnd") @@ -48954,7 +48954,7 @@ begin tf.add_input(desc, x_) tf.add_input(desc, y_) res = tf.execute(desc) - node = tf.TapeNode(logical_and, [x_, y_], name=nothing) + node = tf.TapeNode(logical_and, [x_, y_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -48974,7 +48974,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_scatter_update_graph(tensor_, indices_, updates_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_scatter_update_graph(tensor_, indices_, updates_; name=nothing) local desc tf.with_op_name(name, "TensorScatterUpdate") do desc = tf.NodeDescription("TensorScatterUpdate") @@ -49002,7 +49002,7 @@ begin desc["Tindices"] = tf.data_type(indices_) desc["T"] = tf.data_type(updates_) res = tf.execute(desc) - node = tf.TapeNode(tensor_scatter_update, [tensor_, indices_, updates_], name=nothing) + node = tf.TapeNode(tensor_scatter_update, [tensor_, indices_, updates_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -49022,7 +49022,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function text_line_reader_v2_graph(; name=nothing, skip_header_lines=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function text_line_reader_v2_graph(; name=nothing, skip_header_lines=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "TextLineReaderV2") do desc = tf.NodeDescription("TextLineReaderV2") @@ -49050,7 +49050,7 @@ begin desc["shared_name"] = Base.String(shared_name) end res = tf.execute(desc) - node = tf.TapeNode(text_line_reader_v2, [], name=nothing, skip_header_lines=nothing, container=nothing, shared_name=nothing) + node = tf.TapeNode(text_line_reader_v2, [], name=nothing, skip_header_lines=nothing, container=nothing, shared_name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -49070,7 +49070,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_slice_dataset_graph(components_; name=nothing, Toutput_types=nothing, output_shapes=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_slice_dataset_graph(components_; name=nothing, Toutput_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "TensorSliceDataset") do desc = tf.NodeDescription("TensorSliceDataset") @@ -49096,7 +49096,7 @@ begin desc["output_shapes"] = map(Base.identity, output_shapes) end res = tf.execute(desc) - node = tf.TapeNode(tensor_slice_dataset, [components_], name=nothing, Toutput_types=nothing, output_shapes=nothing) + node = tf.TapeNode(tensor_slice_dataset, [components_], name=nothing, Toutput_types=nothing, output_shapes=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -49116,7 +49116,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_scatter_v3_graph(handle_, indices_, value_, flow_in_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_scatter_v3_graph(handle_, indices_, value_, flow_in_; name=nothing) local desc tf.with_op_name(name, "TensorArrayScatterV3") do desc = tf.NodeDescription("TensorArrayScatterV3") @@ -49144,7 +49144,7 @@ begin tf.add_input(desc, flow_in_) desc["T"] = tf.data_type(value_) res = tf.execute(desc) - node = tf.TapeNode(tensor_array_scatter_v3, [handle_, indices_, value_, flow_in_], name=nothing) + node = tf.TapeNode(tensor_array_scatter_v3, [handle_, indices_, value_, flow_in_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -49164,7 +49164,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resize_nearest_neighbor_grad_graph(grads_, size_; name=nothing, align_corners=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resize_nearest_neighbor_grad_graph(grads_, size_; name=nothing, align_corners=nothing) local desc tf.with_op_name(name, "ResizeNearestNeighborGrad") do desc = tf.NodeDescription("ResizeNearestNeighborGrad") @@ -49190,7 +49190,7 @@ begin end desc["T"] = tf.data_type(grads_) res = tf.execute(desc) - node = tf.TapeNode(resize_nearest_neighbor_grad, [grads_, size_], name=nothing, align_corners=nothing) + node = tf.TapeNode(resize_nearest_neighbor_grad, [grads_, size_], name=nothing, align_corners=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -49210,7 +49210,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function apply_power_sign_graph(var_, m_, lr_, logbase_, sign_decay_, beta_, grad_; name=nothing, use_locking=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function apply_power_sign_graph(var_, m_, lr_, logbase_, sign_decay_, beta_, grad_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ApplyPowerSign") do desc = tf.NodeDescription("ApplyPowerSign") @@ -49262,7 +49262,7 @@ begin desc["T"] = tf.data_type(beta_) desc["T"] = tf.data_type(grad_) res = tf.execute(desc) - node = tf.TapeNode(apply_power_sign, [var_, m_, lr_, logbase_, sign_decay_, beta_, grad_], name=nothing, use_locking=nothing) + node = tf.TapeNode(apply_power_sign, [var_, m_, lr_, logbase_, sign_decay_, beta_, grad_], name=nothing, use_locking=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -49282,7 +49282,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_rebatch_dataset_graph(input_dataset_, num_workers_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_rebatch_dataset_graph(input_dataset_, num_workers_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "ExperimentalRebatchDataset") do desc = tf.NodeDescription("ExperimentalRebatchDataset") @@ -49312,7 +49312,7 @@ begin desc["output_shapes"] = map(Base.identity, output_shapes) end res = tf.execute(desc) - node = tf.TapeNode(experimental_rebatch_dataset, [input_dataset_, num_workers_], name=nothing, output_types=nothing, output_shapes=nothing) + node = tf.TapeNode(experimental_rebatch_dataset, [input_dataset_, num_workers_], name=nothing, output_types=nothing, output_shapes=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -49332,7 +49332,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function mirror_pad_graph(input_, paddings_; name=nothing, mode=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function mirror_pad_graph(input_, paddings_; name=nothing, mode=nothing) local desc tf.with_op_name(name, "MirrorPad") do desc = tf.NodeDescription("MirrorPad") @@ -49360,7 +49360,7 @@ begin desc["T"] = tf.data_type(input_) desc["Tpaddings"] = tf.data_type(paddings_) res = tf.execute(desc) - node = tf.TapeNode(mirror_pad, [input_, paddings_], name=nothing, mode=nothing) + node = tf.TapeNode(mirror_pad, [input_, paddings_], name=nothing, mode=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -49380,7 +49380,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function logical_not_graph(x_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function logical_not_graph(x_; name=nothing) local desc tf.with_op_name(name, "LogicalNot") do desc = tf.NodeDescription("LogicalNot") @@ -49394,7 +49394,7 @@ begin x_ = convert(tf.TensorHandle, x_) tf.add_input(desc, x_) res = tf.execute(desc) - node = tf.TapeNode(logical_not, [x_], name=nothing) + node = tf.TapeNode(logical_not, [x_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -49414,7 +49414,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_ifft_graph(input_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_ifft_graph(input_; name=nothing) local desc tf.with_op_name(name, "BatchIFFT") do desc = tf.NodeDescription("BatchIFFT") @@ -49428,7 +49428,7 @@ begin input_ = convert(tf.TensorHandle, input_) tf.add_input(desc, input_) res = tf.execute(desc) - node = tf.TapeNode(batch_ifft, [input_], name=nothing) + node = tf.TapeNode(batch_ifft, [input_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -49448,7 +49448,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_concat_v2_graph(handle_, flow_in_; name=nothing, dtype=nothing, element_shape_except0=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_concat_v2_graph(handle_, flow_in_; name=nothing, dtype=nothing, element_shape_except0=nothing) local desc tf.with_op_name(name, "TensorArrayConcatV2") do desc = tf.NodeDescription("TensorArrayConcatV2") @@ -49483,7 +49483,7 @@ begin desc["element_shape_except0"] = Base.identity(element_shape_except0) end res = tf.execute(desc) - node = tf.TapeNode(tensor_array_concat_v2, [handle_, flow_in_], name=nothing, dtype=nothing, element_shape_except0=nothing) + node = tf.TapeNode(tensor_array_concat_v2, [handle_, flow_in_], name=nothing, dtype=nothing, element_shape_except0=nothing, res) tf.add_node(res[1], node) return res end @@ -49503,7 +49503,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sum_graph(input_, reduction_indices_; name=nothing, keep_dims=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sum_graph(input_, reduction_indices_; name=nothing, keep_dims=nothing) local desc tf.with_op_name(name, "Sum") do desc = tf.NodeDescription("Sum") @@ -49532,7 +49532,7 @@ begin desc["T"] = tf.data_type(input_) desc["Tidx"] = tf.data_type(reduction_indices_) res = tf.execute(desc) - node = tf.TapeNode(sum, [input_, reduction_indices_], name=nothing, keep_dims=nothing) + node = tf.TapeNode(sum, [input_, reduction_indices_], name=nothing, keep_dims=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -49552,7 +49552,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function boosted_trees_predict_graph(tree_ensemble_handle_, bucketized_features_; name=nothing, num_bucketized_features=nothing, logits_dimension=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function boosted_trees_predict_graph(tree_ensemble_handle_, bucketized_features_; name=nothing, num_bucketized_features=nothing, logits_dimension=nothing) local desc tf.with_op_name(name, "BoostedTreesPredict") do desc = tf.NodeDescription("BoostedTreesPredict") @@ -49582,7 +49582,7 @@ begin desc["logits_dimension"] = Base.Int(logits_dimension) end res = tf.execute(desc) - node = tf.TapeNode(boosted_trees_predict, [tree_ensemble_handle_, bucketized_features_], name=nothing, num_bucketized_features=nothing, logits_dimension=nothing) + node = tf.TapeNode(boosted_trees_predict, [tree_ensemble_handle_, bucketized_features_], name=nothing, num_bucketized_features=nothing, logits_dimension=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -49602,7 +49602,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantized_conv2d_with_bias_and_relu_and_requantize_graph(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantized_conv2d_with_bias_and_relu_and_requantize_graph(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) local desc tf.with_op_name(name, "QuantizedConv2DWithBiasAndReluAndRequantize") do desc = tf.NodeDescription("QuantizedConv2DWithBiasAndReluAndRequantize") @@ -49683,7 +49683,7 @@ begin desc["Tfilter"] = tf.data_type(filter_) desc["Tbias"] = tf.data_type(bias_) res = tf.execute(desc) - node = tf.TapeNode(quantized_conv2d_with_bias_and_relu_and_requantize, [input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_], name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) + node = tf.TapeNode(quantized_conv2d_with_bias_and_relu_and_requantize, [input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_], name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing, res) tf.add_node(res[1], node) return res end @@ -49703,7 +49703,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_sparse_apply_adagrad_graph(var_, accum_, lr_, grad_, indices_; name=nothing, use_locking=nothing, update_slots=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_sparse_apply_adagrad_graph(var_, accum_, lr_, grad_, indices_; name=nothing, use_locking=nothing, update_slots=nothing) local desc tf.with_op_name(name, "ResourceSparseApplyAdagrad") do desc = tf.NodeDescription("ResourceSparseApplyAdagrad") @@ -49751,7 +49751,7 @@ begin desc["T"] = tf.data_type(grad_) desc["Tindices"] = tf.data_type(indices_) res = tf.execute(desc) - node = tf.TapeNode(resource_sparse_apply_adagrad, [var_, accum_, lr_, grad_, indices_], name=nothing, use_locking=nothing, update_slots=nothing) + node = tf.TapeNode(resource_sparse_apply_adagrad, [var_, accum_, lr_, grad_, indices_], name=nothing, use_locking=nothing, update_slots=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -49771,7 +49771,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function leaky_relu_grad_graph(gradients_, features_; name=nothing, alpha=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function leaky_relu_grad_graph(gradients_, features_; name=nothing, alpha=nothing) local desc tf.with_op_name(name, "LeakyReluGrad") do desc = tf.NodeDescription("LeakyReluGrad") @@ -49798,7 +49798,7 @@ begin desc["T"] = tf.data_type(gradients_) desc["T"] = tf.data_type(features_) res = tf.execute(desc) - node = tf.TapeNode(leaky_relu_grad, [gradients_, features_], name=nothing, alpha=nothing) + node = tf.TapeNode(leaky_relu_grad, [gradients_, features_], name=nothing, alpha=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -49818,7 +49818,7 @@ end A graph node which represents a return value of a function. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _device_retval_graph(input_; name=nothing, index=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _device_retval_graph(input_; name=nothing, index=nothing) local desc tf.with_op_name(name, "_DeviceRetval") do desc = tf.NodeDescription("_DeviceRetval") @@ -49840,7 +49840,7 @@ begin end desc["T"] = tf.data_type(input_) res = tf.execute(desc) - node = tf.TapeNode(_device_retval, [input_], name=nothing, index=nothing) + node = tf.TapeNode(_device_retval, [input_], name=nothing, index=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -49860,7 +49860,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function pad_graph(input_, paddings_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function pad_graph(input_, paddings_; name=nothing) local desc tf.with_op_name(name, "Pad") do desc = tf.NodeDescription("Pad") @@ -49882,7 +49882,7 @@ begin desc["T"] = tf.data_type(input_) desc["Tpaddings"] = tf.data_type(paddings_) res = tf.execute(desc) - node = tf.TapeNode(pad, [input_, paddings_], name=nothing) + node = tf.TapeNode(pad, [input_, paddings_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -49902,7 +49902,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function add_many_sparse_to_tensors_map_graph(sparse_indices_, sparse_values_, sparse_shape_; name=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function add_many_sparse_to_tensors_map_graph(sparse_indices_, sparse_values_, sparse_shape_; name=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "AddManySparseToTensorsMap") do desc = tf.NodeDescription("AddManySparseToTensorsMap") @@ -49938,7 +49938,7 @@ begin end desc["T"] = tf.data_type(sparse_values_) res = tf.execute(desc) - node = tf.TapeNode(add_many_sparse_to_tensors_map, [sparse_indices_, sparse_values_, sparse_shape_], name=nothing, container=nothing, shared_name=nothing) + node = tf.TapeNode(add_many_sparse_to_tensors_map, [sparse_indices_, sparse_values_, sparse_shape_], name=nothing, container=nothing, shared_name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -49958,7 +49958,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_reorder_graph(input_indices_, input_values_, input_shape_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_reorder_graph(input_indices_, input_values_, input_shape_; name=nothing) local desc tf.with_op_name(name, "SparseReorder") do desc = tf.NodeDescription("SparseReorder") @@ -49987,7 +49987,7 @@ begin tf.add_input(desc, input_shape_) desc["T"] = tf.data_type(input_values_) res = tf.execute(desc) - node = tf.TapeNode(sparse_reorder, [input_indices_, input_values_, input_shape_], name=nothing) + node = tf.TapeNode(sparse_reorder, [input_indices_, input_values_, input_shape_], name=nothing, res) tf.add_node(res[1], node) return res end @@ -50007,7 +50007,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function bitwise_xor_graph(x_, y_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function bitwise_xor_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "BitwiseXor") do desc = tf.NodeDescription("BitwiseXor") @@ -50028,7 +50028,7 @@ begin desc["T"] = tf.data_type(x_) desc["T"] = tf.data_type(y_) res = tf.execute(desc) - node = tf.TapeNode(bitwise_xor, [x_, y_], name=nothing) + node = tf.TapeNode(bitwise_xor, [x_, y_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -50048,7 +50048,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_matrix_set_diag_graph(input_, diagonal_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_matrix_set_diag_graph(input_, diagonal_; name=nothing) local desc tf.with_op_name(name, "BatchMatrixSetDiag") do desc = tf.NodeDescription("BatchMatrixSetDiag") @@ -50069,7 +50069,7 @@ begin desc["T"] = tf.data_type(input_) desc["T"] = tf.data_type(diagonal_) res = tf.execute(desc) - node = tf.TapeNode(batch_matrix_set_diag, [input_, diagonal_], name=nothing) + node = tf.TapeNode(batch_matrix_set_diag, [input_, diagonal_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -50089,7 +50089,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function lookup_table_insert_v2_graph(table_handle_, keys_, values_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function lookup_table_insert_v2_graph(table_handle_, keys_, values_; name=nothing) local desc tf.with_op_name(name, "LookupTableInsertV2") do desc = tf.NodeDescription("LookupTableInsertV2") @@ -50115,7 +50115,7 @@ begin desc["Tin"] = tf.data_type(keys_) desc["Tout"] = tf.data_type(values_) res = tf.execute(desc) - node = tf.TapeNode(lookup_table_insert_v2, [table_handle_, keys_, values_], name=nothing) + node = tf.TapeNode(lookup_table_insert_v2, [table_handle_, keys_, values_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -50135,7 +50135,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_dense_to_sparse_batch_dataset_graph(input_dataset_, batch_size_, row_shape_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_dense_to_sparse_batch_dataset_graph(input_dataset_, batch_size_, row_shape_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "ExperimentalDenseToSparseBatchDataset") do desc = tf.NodeDescription("ExperimentalDenseToSparseBatchDataset") @@ -50169,7 +50169,7 @@ begin desc["output_shapes"] = map(Base.identity, output_shapes) end res = tf.execute(desc) - node = tf.TapeNode(experimental_dense_to_sparse_batch_dataset, [input_dataset_, batch_size_, row_shape_], name=nothing, output_types=nothing, output_shapes=nothing) + node = tf.TapeNode(experimental_dense_to_sparse_batch_dataset, [input_dataset_, batch_size_, row_shape_], name=nothing, output_types=nothing, output_shapes=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -50189,7 +50189,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_sparse_apply_rms_prop_graph(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_sparse_apply_rms_prop_graph(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ResourceSparseApplyRMSProp") do desc = tf.NodeDescription("ResourceSparseApplyRMSProp") @@ -50250,7 +50250,7 @@ begin desc["T"] = tf.data_type(grad_) desc["Tindices"] = tf.data_type(indices_) res = tf.execute(desc) - node = tf.TapeNode(resource_sparse_apply_rms_prop, [var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_], name=nothing, use_locking=nothing) + node = tf.TapeNode(resource_sparse_apply_rms_prop, [var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_], name=nothing, use_locking=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -50270,7 +50270,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function random_crop_graph(image_, size_; name=nothing, seed=nothing, seed2=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function random_crop_graph(image_, size_; name=nothing, seed=nothing, seed2=nothing) local desc tf.with_op_name(name, "RandomCrop") do desc = tf.NodeDescription("RandomCrop") @@ -50302,7 +50302,7 @@ begin end desc["T"] = tf.data_type(image_) res = tf.execute(desc) - node = tf.TapeNode(random_crop, [image_, size_], name=nothing, seed=nothing, seed2=nothing) + node = tf.TapeNode(random_crop, [image_, size_], name=nothing, seed=nothing, seed2=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -50322,7 +50322,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function lookup_table_import_v2_graph(table_handle_, keys_, values_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function lookup_table_import_v2_graph(table_handle_, keys_, values_; name=nothing) local desc tf.with_op_name(name, "LookupTableImportV2") do desc = tf.NodeDescription("LookupTableImportV2") @@ -50348,7 +50348,7 @@ begin desc["Tin"] = tf.data_type(keys_) desc["Tout"] = tf.data_type(values_) res = tf.execute(desc) - node = tf.TapeNode(lookup_table_import_v2, [table_handle_, keys_, values_], name=nothing) + node = tf.TapeNode(lookup_table_import_v2, [table_handle_, keys_, values_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -50368,7 +50368,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_scatter_nd_update_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_scatter_nd_update_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ResourceScatterNdUpdate") do desc = tf.NodeDescription("ResourceScatterNdUpdate") @@ -50401,7 +50401,7 @@ begin desc["Tindices"] = tf.data_type(indices_) desc["T"] = tf.data_type(updates_) res = tf.execute(desc) - node = tf.TapeNode(resource_scatter_nd_update, [ref_, indices_, updates_], name=nothing, use_locking=nothing) + node = tf.TapeNode(resource_scatter_nd_update, [ref_, indices_, updates_], name=nothing, use_locking=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -50421,7 +50421,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function static_regex_full_match_graph(input_; name=nothing, pattern=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function static_regex_full_match_graph(input_; name=nothing, pattern=nothing) local desc tf.with_op_name(name, "StaticRegexFullMatch") do desc = tf.NodeDescription("StaticRegexFullMatch") @@ -50441,7 +50441,7 @@ begin desc["pattern"] = Base.String(pattern) end res = tf.execute(desc) - node = tf.TapeNode(static_regex_full_match, [input_], name=nothing, pattern=nothing) + node = tf.TapeNode(static_regex_full_match, [input_], name=nothing, pattern=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -50461,7 +50461,7 @@ end Configures the credentials used by the GCS client of the local TF runtime. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function gcs_configure_credentials_graph(json_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function gcs_configure_credentials_graph(json_; name=nothing) local desc tf.with_op_name(name, "GcsConfigureCredentials") do desc = tf.NodeDescription("GcsConfigureCredentials") @@ -50475,7 +50475,7 @@ begin json_ = convert(tf.TensorHandle, json_) tf.add_input(desc, json_) res = tf.execute(desc) - node = tf.TapeNode(gcs_configure_credentials, [json_], name=nothing) + node = tf.TapeNode(gcs_configure_credentials, [json_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -50495,7 +50495,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_size_v3_graph(handle_, flow_in_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_size_v3_graph(handle_, flow_in_; name=nothing) local desc tf.with_op_name(name, "TensorArraySizeV3") do desc = tf.NodeDescription("TensorArraySizeV3") @@ -50513,7 +50513,7 @@ begin tf.add_input(desc, handle_) tf.add_input(desc, flow_in_) res = tf.execute(desc) - node = tf.TapeNode(tensor_array_size_v3, [handle_, flow_in_], name=nothing) + node = tf.TapeNode(tensor_array_size_v3, [handle_, flow_in_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -50533,7 +50533,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_segment_sqrt_n_with_num_segments_graph(data_, indices_, segment_ids_, num_segments_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_segment_sqrt_n_with_num_segments_graph(data_, indices_, segment_ids_, num_segments_; name=nothing) local desc tf.with_op_name(name, "SparseSegmentSqrtNWithNumSegments") do desc = tf.NodeDescription("SparseSegmentSqrtNWithNumSegments") @@ -50566,7 +50566,7 @@ begin desc["Tidx"] = tf.data_type(indices_) desc["Tnumsegments"] = tf.data_type(num_segments_) res = tf.execute(desc) - node = tf.TapeNode(sparse_segment_sqrt_n_with_num_segments, [data_, indices_, segment_ids_, num_segments_], name=nothing) + node = tf.TapeNode(sparse_segment_sqrt_n_with_num_segments, [data_, indices_, segment_ids_, num_segments_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -50586,7 +50586,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_group_by_reducer_dataset_graph(input_dataset_, key_func_other_arguments_, init_func_other_arguments_, reduce_func_other_arguments_, finalize_func_other_arguments_; name=nothing, key_func=nothing, init_func=nothing, reduce_func=nothing, finalize_func=nothing, Tkey_func_other_arguments=nothing, Tinit_func_other_arguments=nothing, Treduce_func_other_arguments=nothing, Tfinalize_func_other_arguments=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_group_by_reducer_dataset_graph(input_dataset_, key_func_other_arguments_, init_func_other_arguments_, reduce_func_other_arguments_, finalize_func_other_arguments_; name=nothing, key_func=nothing, init_func=nothing, reduce_func=nothing, finalize_func=nothing, Tkey_func_other_arguments=nothing, Tinit_func_other_arguments=nothing, Treduce_func_other_arguments=nothing, Tfinalize_func_other_arguments=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "ExperimentalGroupByReducerDataset") do desc = tf.NodeDescription("ExperimentalGroupByReducerDataset") @@ -50676,7 +50676,7 @@ begin desc["output_shapes"] = map(Base.identity, output_shapes) end res = tf.execute(desc) - node = tf.TapeNode(experimental_group_by_reducer_dataset, [input_dataset_, key_func_other_arguments_, init_func_other_arguments_, reduce_func_other_arguments_, finalize_func_other_arguments_], name=nothing, key_func=nothing, init_func=nothing, reduce_func=nothing, finalize_func=nothing, Tkey_func_other_arguments=nothing, Tinit_func_other_arguments=nothing, Treduce_func_other_arguments=nothing, Tfinalize_func_other_arguments=nothing, output_types=nothing, output_shapes=nothing) + node = tf.TapeNode(experimental_group_by_reducer_dataset, [input_dataset_, key_func_other_arguments_, init_func_other_arguments_, reduce_func_other_arguments_, finalize_func_other_arguments_], name=nothing, key_func=nothing, init_func=nothing, reduce_func=nothing, finalize_func=nothing, Tkey_func_other_arguments=nothing, Tinit_func_other_arguments=nothing, Treduce_func_other_arguments=nothing, Tfinalize_func_other_arguments=nothing, output_types=nothing, output_shapes=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -50696,7 +50696,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function conv2d_backprop_filter_graph(input_, filter_sizes_, out_backprop_; name=nothing, strides=nothing, use_cudnn_on_gpu=nothing, padding=nothing, explicit_paddings=nothing, data_format=nothing, dilations=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function conv2d_backprop_filter_graph(input_, filter_sizes_, out_backprop_; name=nothing, strides=nothing, use_cudnn_on_gpu=nothing, padding=nothing, explicit_paddings=nothing, data_format=nothing, dilations=nothing) local desc tf.with_op_name(name, "Conv2DBackpropFilter") do desc = tf.NodeDescription("Conv2DBackpropFilter") @@ -50757,7 +50757,7 @@ begin desc["T"] = tf.data_type(input_) desc["T"] = tf.data_type(out_backprop_) res = tf.execute(desc) - node = tf.TapeNode(conv2d_backprop_filter, [input_, filter_sizes_, out_backprop_], name=nothing, strides=nothing, use_cudnn_on_gpu=nothing, padding=nothing, explicit_paddings=nothing, data_format=nothing, dilations=nothing) + node = tf.TapeNode(conv2d_backprop_filter, [input_, filter_sizes_, out_backprop_], name=nothing, strides=nothing, use_cudnn_on_gpu=nothing, padding=nothing, explicit_paddings=nothing, data_format=nothing, dilations=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -50777,7 +50777,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function max_pool_grad_graph(orig_input_, orig_output_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function max_pool_grad_graph(orig_input_, orig_output_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) local desc tf.with_op_name(name, "MaxPoolGrad") do desc = tf.NodeDescription("MaxPoolGrad") @@ -50827,7 +50827,7 @@ begin desc["T"] = tf.data_type(orig_output_) desc["T"] = tf.data_type(grad_) res = tf.execute(desc) - node = tf.TapeNode(max_pool_grad, [orig_input_, orig_output_, grad_], name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + node = tf.TapeNode(max_pool_grad, [orig_input_, orig_output_, grad_], name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -50847,7 +50847,7 @@ end An op that connects each chip on the host to a centralized UberDriver to allow """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _initialize_host_for_distributed_tpu_graph(input_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _initialize_host_for_distributed_tpu_graph(input_; name=nothing) local desc tf.with_op_name(name, "_InitializeHostForDistributedTPU") do desc = tf.NodeDescription("_InitializeHostForDistributedTPU") @@ -50861,7 +50861,7 @@ begin input_ = convert(tf.TensorHandle, input_) tf.add_input(desc, input_) res = tf.execute(desc) - node = tf.TapeNode(_initialize_host_for_distributed_tpu, [input_], name=nothing) + node = tf.TapeNode(_initialize_host_for_distributed_tpu, [input_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -50881,7 +50881,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function stage_peek_graph(index_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function stage_peek_graph(index_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "StagePeek") do desc = tf.NodeDescription("StagePeek") @@ -50925,7 +50925,7 @@ begin desc["shared_name"] = Base.String(shared_name) end res = tf.execute(desc) - node = tf.TapeNode(stage_peek, [index_], name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + node = tf.TapeNode(stage_peek, [index_], name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -50945,7 +50945,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function pad_v2_graph(input_, paddings_, constant_values_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function pad_v2_graph(input_, paddings_, constant_values_; name=nothing) local desc tf.with_op_name(name, "PadV2") do desc = tf.NodeDescription("PadV2") @@ -50972,7 +50972,7 @@ begin desc["Tpaddings"] = tf.data_type(paddings_) desc["T"] = tf.data_type(constant_values_) res = tf.execute(desc) - node = tf.TapeNode(pad_v2, [input_, paddings_, constant_values_], name=nothing) + node = tf.TapeNode(pad_v2, [input_, paddings_, constant_values_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -50992,7 +50992,7 @@ end Creates an empty Tensor with shape `shape` and type `dtype`. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _parallel_concat_start_graph(; name=nothing, shape=nothing, dtype=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _parallel_concat_start_graph(; name=nothing, shape=nothing, dtype=nothing) local desc tf.with_op_name(name, "_ParallelConcatStart") do desc = tf.NodeDescription("_ParallelConcatStart") @@ -51014,7 +51014,7 @@ begin desc["dtype"] = Base.identity(dtype) end res = tf.execute(desc) - node = tf.TapeNode(_parallel_concat_start, [], name=nothing, shape=nothing, dtype=nothing) + node = tf.TapeNode(_parallel_concat_start, [], name=nothing, shape=nothing, dtype=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -51034,7 +51034,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function print_v2_graph(input_; name=nothing, output_stream=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function print_v2_graph(input_; name=nothing, output_stream=nothing) local desc tf.with_op_name(name, "PrintV2") do desc = tf.NodeDescription("PrintV2") @@ -51054,7 +51054,7 @@ begin desc["output_stream"] = Base.String(output_stream) end res = tf.execute(desc) - node = tf.TapeNode(print_v2, [input_], name=nothing, output_stream=nothing) + node = tf.TapeNode(print_v2, [input_], name=nothing, output_stream=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -51074,7 +51074,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function optional_get_value_graph(optional_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function optional_get_value_graph(optional_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "OptionalGetValue") do desc = tf.NodeDescription("OptionalGetValue") @@ -51100,7 +51100,7 @@ begin desc["output_shapes"] = map(Base.identity, output_shapes) end res = tf.execute(desc) - node = tf.TapeNode(optional_get_value, [optional_], name=nothing, output_types=nothing, output_shapes=nothing) + node = tf.TapeNode(optional_get_value, [optional_], name=nothing, output_types=nothing, output_shapes=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -51120,7 +51120,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function load_tpu_embedding_ftrl_parameters_graph(parameters_, accumulators_, linears_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function load_tpu_embedding_ftrl_parameters_graph(parameters_, accumulators_, linears_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "LoadTPUEmbeddingFTRLParameters") do desc = tf.NodeDescription("LoadTPUEmbeddingFTRLParameters") @@ -51166,7 +51166,7 @@ begin desc["shard_id"] = Base.Int(shard_id) end res = tf.execute(desc) - node = tf.TapeNode(load_tpu_embedding_ftrl_parameters, [parameters_, accumulators_, linears_], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + node = tf.TapeNode(load_tpu_embedding_ftrl_parameters, [parameters_, accumulators_, linears_], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -51186,7 +51186,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_slice_graph(indices_, values_, shape_, start_, size_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_slice_graph(indices_, values_, shape_, start_, size_; name=nothing) local desc tf.with_op_name(name, "SparseSlice") do desc = tf.NodeDescription("SparseSlice") @@ -51223,7 +51223,7 @@ begin tf.add_input(desc, size_) desc["T"] = tf.data_type(values_) res = tf.execute(desc) - node = tf.TapeNode(sparse_slice, [indices_, values_, shape_, start_, size_], name=nothing) + node = tf.TapeNode(sparse_slice, [indices_, values_, shape_, start_, size_], name=nothing, res) tf.add_node(res[1], node) return res end @@ -51243,7 +51243,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function boosted_trees_make_quantile_summaries_graph(float_values_, example_weights_, epsilon_; name=nothing, num_features=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function boosted_trees_make_quantile_summaries_graph(float_values_, example_weights_, epsilon_; name=nothing, num_features=nothing) local desc tf.with_op_name(name, "BoostedTreesMakeQuantileSummaries") do desc = tf.NodeDescription("BoostedTreesMakeQuantileSummaries") @@ -51276,7 +51276,7 @@ begin desc["num_features"] = Base.Int(num_features) end res = tf.execute(desc) - node = tf.TapeNode(boosted_trees_make_quantile_summaries, [float_values_, example_weights_, epsilon_], name=nothing, num_features=nothing) + node = tf.TapeNode(boosted_trees_make_quantile_summaries, [float_values_, example_weights_, epsilon_], name=nothing, num_features=nothing, res) tf.add_node(res[1], node) return res end @@ -51296,7 +51296,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function matrix_solve_graph(matrix_, rhs_; name=nothing, adjoint=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function matrix_solve_graph(matrix_, rhs_; name=nothing, adjoint=nothing) local desc tf.with_op_name(name, "MatrixSolve") do desc = tf.NodeDescription("MatrixSolve") @@ -51323,7 +51323,7 @@ begin desc["T"] = tf.data_type(matrix_) desc["T"] = tf.data_type(rhs_) res = tf.execute(desc) - node = tf.TapeNode(matrix_solve, [matrix_, rhs_], name=nothing, adjoint=nothing) + node = tf.TapeNode(matrix_solve, [matrix_, rhs_], name=nothing, adjoint=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -51343,7 +51343,7 @@ end An op that sets up the centralized structures for a distributed TPU """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _configure_distributed_tpu_graph(inputs_; name=nothing, N=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _configure_distributed_tpu_graph(inputs_; name=nothing, N=nothing) local desc tf.with_op_name(name, "_ConfigureDistributedTPU") do desc = tf.NodeDescription("_ConfigureDistributedTPU") @@ -51363,7 +51363,7 @@ begin desc["N"] = Base.Int(N) end res = tf.execute(desc) - node = tf.TapeNode(_configure_distributed_tpu, [inputs_], name=nothing, N=nothing) + node = tf.TapeNode(_configure_distributed_tpu, [inputs_], name=nothing, N=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -51383,7 +51383,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function adjust_contrastv2_graph(images_, contrast_factor_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function adjust_contrastv2_graph(images_, contrast_factor_; name=nothing) local desc tf.with_op_name(name, "AdjustContrastv2") do desc = tf.NodeDescription("AdjustContrastv2") @@ -51403,7 +51403,7 @@ begin tf.add_input(desc, contrast_factor_) desc["T"] = tf.data_type(images_) res = tf.execute(desc) - node = tf.TapeNode(adjust_contrastv2, [images_, contrast_factor_], name=nothing) + node = tf.TapeNode(adjust_contrastv2, [images_, contrast_factor_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -51423,7 +51423,7 @@ end Returns the max of x and y (i.e. x > y ? x : y) element-wise. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _mkl_maximum_graph(x_, y_, mkl_x_, mkl_y_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _mkl_maximum_graph(x_, y_, mkl_x_, mkl_y_; name=nothing) local desc tf.with_op_name(name, "_MklMaximum") do desc = tf.NodeDescription("_MklMaximum") @@ -51457,7 +51457,7 @@ begin desc["T"] = tf.data_type(x_) desc["T"] = tf.data_type(y_) res = tf.execute(desc) - node = tf.TapeNode(_mkl_maximum, [x_, y_, mkl_x_, mkl_y_], name=nothing) + node = tf.TapeNode(_mkl_maximum, [x_, y_, mkl_x_, mkl_y_], name=nothing, res) tf.add_node(res[1], node) return res end @@ -51477,7 +51477,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function cudnn_rnn_params_size_graph(num_layers_, num_units_, input_size_; name=nothing, S=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function cudnn_rnn_params_size_graph(num_layers_, num_units_, input_size_; name=nothing, S=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) local desc tf.with_op_name(name, "CudnnRNNParamsSize") do desc = tf.NodeDescription("CudnnRNNParamsSize") @@ -51541,7 +51541,7 @@ begin desc["seed2"] = Base.Int(seed2) end res = tf.execute(desc) - node = tf.TapeNode(cudnn_rnn_params_size, [num_layers_, num_units_, input_size_], name=nothing, S=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) + node = tf.TapeNode(cudnn_rnn_params_size, [num_layers_, num_units_, input_size_], name=nothing, S=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -51561,7 +51561,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function boosted_trees_quantile_stream_resource_add_summaries_graph(quantile_stream_resource_handle_, summaries_; name=nothing, num_features=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function boosted_trees_quantile_stream_resource_add_summaries_graph(quantile_stream_resource_handle_, summaries_; name=nothing, num_features=nothing) local desc tf.with_op_name(name, "BoostedTreesQuantileStreamResourceAddSummaries") do desc = tf.NodeDescription("BoostedTreesQuantileStreamResourceAddSummaries") @@ -51585,7 +51585,7 @@ begin desc["num_features"] = Base.Int(num_features) end res = tf.execute(desc) - node = tf.TapeNode(boosted_trees_quantile_stream_resource_add_summaries, [quantile_stream_resource_handle_, summaries_], name=nothing, num_features=nothing) + node = tf.TapeNode(boosted_trees_quantile_stream_resource_add_summaries, [quantile_stream_resource_handle_, summaries_], name=nothing, num_features=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -51605,7 +51605,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_ifft3d_graph(input_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_ifft3d_graph(input_; name=nothing) local desc tf.with_op_name(name, "BatchIFFT3D") do desc = tf.NodeDescription("BatchIFFT3D") @@ -51619,7 +51619,7 @@ begin input_ = convert(tf.TensorHandle, input_) tf.add_input(desc, input_) res = tf.execute(desc) - node = tf.TapeNode(batch_ifft3d, [input_], name=nothing) + node = tf.TapeNode(batch_ifft3d, [input_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -51639,7 +51639,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sigmoid_graph(x_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sigmoid_graph(x_; name=nothing) local desc tf.with_op_name(name, "Sigmoid") do desc = tf.NodeDescription("Sigmoid") @@ -51655,7 +51655,7 @@ begin tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) res = tf.execute(desc) - node = tf.TapeNode(sigmoid, [x_], name=nothing) + node = tf.TapeNode(sigmoid, [x_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -51675,7 +51675,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function segment_mean_graph(data_, segment_ids_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function segment_mean_graph(data_, segment_ids_; name=nothing) local desc tf.with_op_name(name, "SegmentMean") do desc = tf.NodeDescription("SegmentMean") @@ -51698,7 +51698,7 @@ begin desc["T"] = tf.data_type(data_) desc["Tindices"] = tf.data_type(segment_ids_) res = tf.execute(desc) - node = tf.TapeNode(segment_mean, [data_, segment_ids_], name=nothing) + node = tf.TapeNode(segment_mean, [data_, segment_ids_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -51718,7 +51718,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function is_boosted_trees_ensemble_initialized_graph(tree_ensemble_handle_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function is_boosted_trees_ensemble_initialized_graph(tree_ensemble_handle_; name=nothing) local desc tf.with_op_name(name, "IsBoostedTreesEnsembleInitialized") do desc = tf.NodeDescription("IsBoostedTreesEnsembleInitialized") @@ -51732,7 +51732,7 @@ begin tree_ensemble_handle_ = convert(tf.TensorHandle, tree_ensemble_handle_) tf.add_input(desc, tree_ensemble_handle_) res = tf.execute(desc) - node = tf.TapeNode(is_boosted_trees_ensemble_initialized, [tree_ensemble_handle_], name=nothing) + node = tf.TapeNode(is_boosted_trees_ensemble_initialized, [tree_ensemble_handle_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -51752,7 +51752,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_size_v2_graph(handle_, flow_in_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_size_v2_graph(handle_, flow_in_; name=nothing) local desc tf.with_op_name(name, "TensorArraySizeV2") do desc = tf.NodeDescription("TensorArraySizeV2") @@ -51770,7 +51770,7 @@ begin tf.add_input(desc, handle_) tf.add_input(desc, flow_in_) res = tf.execute(desc) - node = tf.TapeNode(tensor_array_size_v2, [handle_, flow_in_], name=nothing) + node = tf.TapeNode(tensor_array_size_v2, [handle_, flow_in_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -51790,7 +51790,7 @@ end Returns x - y element-wise. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _mkl_sub_graph(x_, y_, mkl_x_, mkl_y_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _mkl_sub_graph(x_, y_, mkl_x_, mkl_y_; name=nothing) local desc tf.with_op_name(name, "_MklSub") do desc = tf.NodeDescription("_MklSub") @@ -51824,7 +51824,7 @@ begin desc["T"] = tf.data_type(x_) desc["T"] = tf.data_type(y_) res = tf.execute(desc) - node = tf.TapeNode(_mkl_sub, [x_, y_, mkl_x_, mkl_y_], name=nothing) + node = tf.TapeNode(_mkl_sub, [x_, y_, mkl_x_, mkl_y_], name=nothing, res) tf.add_node(res[1], node) return res end @@ -51844,7 +51844,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function send_tpu_embedding_gradients_graph(inputs_, learning_rates_; name=nothing, N=nothing, NN=nothing, config=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function send_tpu_embedding_gradients_graph(inputs_, learning_rates_; name=nothing, N=nothing, NN=nothing, config=nothing) local desc tf.with_op_name(name, "SendTPUEmbeddingGradients") do desc = tf.NodeDescription("SendTPUEmbeddingGradients") @@ -51880,7 +51880,7 @@ begin desc["config"] = Base.String(config) end res = tf.execute(desc) - node = tf.TapeNode(send_tpu_embedding_gradients, [inputs_, learning_rates_], name=nothing, N=nothing, NN=nothing, config=nothing) + node = tf.TapeNode(send_tpu_embedding_gradients, [inputs_, learning_rates_], name=nothing, N=nothing, NN=nothing, config=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -51900,7 +51900,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function max_pool3d_graph(input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function max_pool3d_graph(input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) local desc tf.with_op_name(name, "MaxPool3D") do desc = tf.NodeDescription("MaxPool3D") @@ -51940,7 +51940,7 @@ begin end desc["T"] = tf.data_type(input_) res = tf.execute(desc) - node = tf.TapeNode(max_pool3d, [input_], name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + node = tf.TapeNode(max_pool3d, [input_], name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -51960,7 +51960,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function prod_graph(input_, reduction_indices_; name=nothing, keep_dims=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function prod_graph(input_, reduction_indices_; name=nothing, keep_dims=nothing) local desc tf.with_op_name(name, "Prod") do desc = tf.NodeDescription("Prod") @@ -51989,7 +51989,7 @@ begin desc["T"] = tf.data_type(input_) desc["Tidx"] = tf.data_type(reduction_indices_) res = tf.execute(desc) - node = tf.TapeNode(prod, [input_, reduction_indices_], name=nothing, keep_dims=nothing) + node = tf.TapeNode(prod, [input_, reduction_indices_], name=nothing, keep_dims=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -52009,7 +52009,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_identity_indexed_dataset_graph(size_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_identity_indexed_dataset_graph(size_; name=nothing) local desc tf.with_op_name(name, "ExperimentalIdentityIndexedDataset") do desc = tf.NodeDescription("ExperimentalIdentityIndexedDataset") @@ -52023,7 +52023,7 @@ begin size_ = convert(tf.TensorHandle, size_) tf.add_input(desc, size_) res = tf.execute(desc) - node = tf.TapeNode(experimental_identity_indexed_dataset, [size_], name=nothing) + node = tf.TapeNode(experimental_identity_indexed_dataset, [size_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -52043,7 +52043,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_list_push_back_graph(input_handle_, tensor_; name=nothing, element_dtype=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_list_push_back_graph(input_handle_, tensor_; name=nothing, element_dtype=nothing) local desc tf.with_op_name(name, "TensorListPushBack") do desc = tf.NodeDescription("TensorListPushBack") @@ -52069,7 +52069,7 @@ begin end desc["element_dtype"] = tf.data_type(tensor_) res = tf.execute(desc) - node = tf.TapeNode(tensor_list_push_back, [input_handle_, tensor_], name=nothing, element_dtype=nothing) + node = tf.TapeNode(tensor_list_push_back, [input_handle_, tensor_], name=nothing, element_dtype=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -52089,7 +52089,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_function_graph(in_tensors_, captured_tensors_; name=nothing, f=nothing, num_batch_threads=nothing, max_batch_size=nothing, batch_timeout_micros=nothing, max_enqueued_batches=nothing, allowed_batch_sizes=nothing, container=nothing, shared_name=nothing, batching_queue=nothing, Tin=nothing, Tcaptured=nothing, Tout=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_function_graph(in_tensors_, captured_tensors_; name=nothing, f=nothing, num_batch_threads=nothing, max_batch_size=nothing, batch_timeout_micros=nothing, max_enqueued_batches=nothing, allowed_batch_sizes=nothing, container=nothing, shared_name=nothing, batching_queue=nothing, Tin=nothing, Tcaptured=nothing, Tout=nothing) local desc tf.with_op_name(name, "BatchFunction") do desc = tf.NodeDescription("BatchFunction") @@ -52179,7 +52179,7 @@ begin desc["Tout"] = map(Base.identity, Tout) end res = tf.execute(desc) - node = tf.TapeNode(batch_function, [in_tensors_, captured_tensors_], name=nothing, f=nothing, num_batch_threads=nothing, max_batch_size=nothing, batch_timeout_micros=nothing, max_enqueued_batches=nothing, allowed_batch_sizes=nothing, container=nothing, shared_name=nothing, batching_queue=nothing, Tin=nothing, Tcaptured=nothing, Tout=nothing) + node = tf.TapeNode(batch_function, [in_tensors_, captured_tensors_], name=nothing, f=nothing, num_batch_threads=nothing, max_batch_size=nothing, batch_timeout_micros=nothing, max_enqueued_batches=nothing, allowed_batch_sizes=nothing, container=nothing, shared_name=nothing, batching_queue=nothing, Tin=nothing, Tcaptured=nothing, Tout=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -52199,7 +52199,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_fill_empty_rows_graph(indices_, values_, dense_shape_, default_value_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_fill_empty_rows_graph(indices_, values_, dense_shape_, default_value_; name=nothing) local desc tf.with_op_name(name, "SparseFillEmptyRows") do desc = tf.NodeDescription("SparseFillEmptyRows") @@ -52233,7 +52233,7 @@ begin desc["T"] = tf.data_type(values_) desc["T"] = tf.data_type(default_value_) res = tf.execute(desc) - node = tf.TapeNode(sparse_fill_empty_rows, [indices_, values_, dense_shape_, default_value_], name=nothing) + node = tf.TapeNode(sparse_fill_empty_rows, [indices_, values_, dense_shape_, default_value_], name=nothing, res) tf.add_node(res[1], node) return res end @@ -52253,7 +52253,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function self_adjoint_eig_v2_graph(input_; name=nothing, compute_v=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function self_adjoint_eig_v2_graph(input_; name=nothing, compute_v=nothing) local desc tf.with_op_name(name, "SelfAdjointEigV2") do desc = tf.NodeDescription("SelfAdjointEigV2") @@ -52280,7 +52280,7 @@ begin end desc["T"] = tf.data_type(input_) res = tf.execute(desc) - node = tf.TapeNode(self_adjoint_eig_v2, [input_], name=nothing, compute_v=nothing) + node = tf.TapeNode(self_adjoint_eig_v2, [input_], name=nothing, compute_v=nothing, res) tf.add_node(res[1], node) return res end @@ -52300,7 +52300,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function retrieve_tpu_embedding_ftrl_parameters_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function retrieve_tpu_embedding_ftrl_parameters_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "RetrieveTPUEmbeddingFTRLParameters") do desc = tf.NodeDescription("RetrieveTPUEmbeddingFTRLParameters") @@ -52339,7 +52339,7 @@ begin desc["shard_id"] = Base.Int(shard_id) end res = tf.execute(desc) - node = tf.TapeNode(retrieve_tpu_embedding_ftrl_parameters, [], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + node = tf.TapeNode(retrieve_tpu_embedding_ftrl_parameters, [], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res) tf.add_node(res[1], node) return res end @@ -52359,7 +52359,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_sparse_apply_adagrad_da_graph(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, indices_, lr_, l1_, l2_, global_step_; name=nothing, use_locking=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_sparse_apply_adagrad_da_graph(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, indices_, lr_, l1_, l2_, global_step_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ResourceSparseApplyAdagradDA") do desc = tf.NodeDescription("ResourceSparseApplyAdagradDA") @@ -52419,7 +52419,7 @@ begin desc["T"] = tf.data_type(l1_) desc["T"] = tf.data_type(l2_) res = tf.execute(desc) - node = tf.TapeNode(resource_sparse_apply_adagrad_da, [var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, indices_, lr_, l1_, l2_, global_step_], name=nothing, use_locking=nothing) + node = tf.TapeNode(resource_sparse_apply_adagrad_da, [var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, indices_, lr_, l1_, l2_, global_step_], name=nothing, use_locking=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -52439,7 +52439,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function temporary_variable_graph(; name=nothing, shape=nothing, dtype=nothing, var_name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function temporary_variable_graph(; name=nothing, shape=nothing, dtype=nothing, var_name=nothing) local desc tf.with_op_name(name, "TemporaryVariable") do desc = tf.NodeDescription("TemporaryVariable") @@ -52467,7 +52467,7 @@ begin desc["var_name"] = Base.String(var_name) end res = tf.execute(desc) - node = tf.TapeNode(temporary_variable, [], name=nothing, shape=nothing, dtype=nothing, var_name=nothing) + node = tf.TapeNode(temporary_variable, [], name=nothing, shape=nothing, dtype=nothing, var_name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -52487,7 +52487,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_apply_add_sign_graph(var_, m_, lr_, alpha_, sign_decay_, beta_, grad_; name=nothing, use_locking=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_apply_add_sign_graph(var_, m_, lr_, alpha_, sign_decay_, beta_, grad_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ResourceApplyAddSign") do desc = tf.NodeDescription("ResourceApplyAddSign") @@ -52537,7 +52537,7 @@ begin desc["T"] = tf.data_type(beta_) desc["T"] = tf.data_type(grad_) res = tf.execute(desc) - node = tf.TapeNode(resource_apply_add_sign, [var_, m_, lr_, alpha_, sign_decay_, beta_, grad_], name=nothing, use_locking=nothing) + node = tf.TapeNode(resource_apply_add_sign, [var_, m_, lr_, alpha_, sign_decay_, beta_, grad_], name=nothing, use_locking=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -52557,7 +52557,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function roll_graph(input_, shift_, axis_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function roll_graph(input_, shift_, axis_; name=nothing) local desc tf.with_op_name(name, "Roll") do desc = tf.NodeDescription("Roll") @@ -52585,7 +52585,7 @@ begin desc["Tshift"] = tf.data_type(shift_) desc["Taxis"] = tf.data_type(axis_) res = tf.execute(desc) - node = tf.TapeNode(roll, [input_, shift_, axis_], name=nothing) + node = tf.TapeNode(roll, [input_, shift_, axis_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -52605,7 +52605,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function xdivy_graph(x_, y_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function xdivy_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "Xdivy") do desc = tf.NodeDescription("Xdivy") @@ -52626,7 +52626,7 @@ begin desc["T"] = tf.data_type(x_) desc["T"] = tf.data_type(y_) res = tf.execute(desc) - node = tf.TapeNode(xdivy, [x_, y_], name=nothing) + node = tf.TapeNode(xdivy, [x_, y_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -52646,7 +52646,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function max_pool3d_grad_grad_graph(orig_input_, orig_output_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function max_pool3d_grad_grad_graph(orig_input_, orig_output_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) local desc tf.with_op_name(name, "MaxPool3DGradGrad") do desc = tf.NodeDescription("MaxPool3DGradGrad") @@ -52696,7 +52696,7 @@ begin desc["T"] = tf.data_type(orig_output_) desc["T"] = tf.data_type(grad_) res = tf.execute(desc) - node = tf.TapeNode(max_pool3d_grad_grad, [orig_input_, orig_output_, grad_], name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + node = tf.TapeNode(max_pool3d_grad_grad, [orig_input_, orig_output_, grad_], name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -52716,7 +52716,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function crop_and_resize_graph(image_, boxes_, box_ind_, crop_size_; name=nothing, method=nothing, extrapolation_value=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function crop_and_resize_graph(image_, boxes_, box_ind_, crop_size_; name=nothing, method=nothing, extrapolation_value=nothing) local desc tf.with_op_name(name, "CropAndResize") do desc = tf.NodeDescription("CropAndResize") @@ -52756,7 +52756,7 @@ begin end desc["T"] = tf.data_type(image_) res = tf.execute(desc) - node = tf.TapeNode(crop_and_resize, [image_, boxes_, box_ind_, crop_size_], name=nothing, method=nothing, extrapolation_value=nothing) + node = tf.TapeNode(crop_and_resize, [image_, boxes_, box_ind_, crop_size_], name=nothing, method=nothing, extrapolation_value=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -52776,7 +52776,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantized_bias_add_graph(input_, bias_, min_input_, max_input_, min_bias_, max_bias_; name=nothing, out_type=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantized_bias_add_graph(input_, bias_, min_input_, max_input_, min_bias_, max_bias_; name=nothing, out_type=nothing) local desc tf.with_op_name(name, "QuantizedBiasAdd") do desc = tf.NodeDescription("QuantizedBiasAdd") @@ -52825,7 +52825,7 @@ begin desc["T1"] = tf.data_type(input_) desc["T2"] = tf.data_type(bias_) res = tf.execute(desc) - node = tf.TapeNode(quantized_bias_add, [input_, bias_, min_input_, max_input_, min_bias_, max_bias_], name=nothing, out_type=nothing) + node = tf.TapeNode(quantized_bias_add, [input_, bias_, min_input_, max_input_, min_bias_, max_bias_], name=nothing, out_type=nothing, res) tf.add_node(res[1], node) return res end @@ -52845,7 +52845,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function kmc2chain_initialization_graph(distances_, seed_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function kmc2chain_initialization_graph(distances_, seed_; name=nothing) local desc tf.with_op_name(name, "KMC2ChainInitialization") do desc = tf.NodeDescription("KMC2ChainInitialization") @@ -52863,7 +52863,7 @@ begin tf.add_input(desc, distances_) tf.add_input(desc, seed_) res = tf.execute(desc) - node = tf.TapeNode(kmc2chain_initialization, [distances_, seed_], name=nothing) + node = tf.TapeNode(kmc2chain_initialization, [distances_, seed_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -52883,7 +52883,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function map_unstage_no_key_graph(indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function map_unstage_no_key_graph(indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "MapUnstageNoKey") do desc = tf.NodeDescription("MapUnstageNoKey") @@ -52932,7 +52932,7 @@ begin desc["shared_name"] = Base.String(shared_name) end res = tf.execute(desc) - node = tf.TapeNode(map_unstage_no_key, [indices_], name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + node = tf.TapeNode(map_unstage_no_key, [indices_], name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing, res) tf.add_node(res[1], node) return res end @@ -52952,7 +52952,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function scatter_nd_sub_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function scatter_nd_sub_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ScatterNdSub") do desc = tf.NodeDescription("ScatterNdSub") @@ -52986,7 +52986,7 @@ begin desc["Tindices"] = tf.data_type(indices_) desc["T"] = tf.data_type(updates_) res = tf.execute(desc) - node = tf.TapeNode(scatter_nd_sub, [ref_, indices_, updates_], name=nothing, use_locking=nothing) + node = tf.TapeNode(scatter_nd_sub, [ref_, indices_, updates_], name=nothing, use_locking=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -53006,7 +53006,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resize_bilinear_graph(images_, size_; name=nothing, align_corners=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resize_bilinear_graph(images_, size_; name=nothing, align_corners=nothing) local desc tf.with_op_name(name, "ResizeBilinear") do desc = tf.NodeDescription("ResizeBilinear") @@ -53032,7 +53032,7 @@ begin end desc["T"] = tf.data_type(images_) res = tf.execute(desc) - node = tf.TapeNode(resize_bilinear, [images_, size_], name=nothing, align_corners=nothing) + node = tf.TapeNode(resize_bilinear, [images_, size_], name=nothing, align_corners=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -53052,7 +53052,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ordered_map_peek_graph(key_, indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ordered_map_peek_graph(key_, indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "OrderedMapPeek") do desc = tf.NodeDescription("OrderedMapPeek") @@ -53100,7 +53100,7 @@ begin desc["shared_name"] = Base.String(shared_name) end res = tf.execute(desc) - node = tf.TapeNode(ordered_map_peek, [key_, indices_], name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + node = tf.TapeNode(ordered_map_peek, [key_, indices_], name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -53120,7 +53120,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_graph(size_; name=nothing, dtype=nothing, dynamic_size=nothing, clear_after_read=nothing, tensor_array_name=nothing, element_shape=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_graph(size_; name=nothing, dtype=nothing, dynamic_size=nothing, clear_after_read=nothing, tensor_array_name=nothing, element_shape=nothing) local desc tf.with_op_name(name, "TensorArray") do desc = tf.NodeDescription("TensorArray") @@ -53164,7 +53164,7 @@ begin desc["element_shape"] = Base.identity(element_shape) end res = tf.execute(desc) - node = tf.TapeNode(tensor_array, [size_], name=nothing, dtype=nothing, dynamic_size=nothing, clear_after_read=nothing, tensor_array_name=nothing, element_shape=nothing) + node = tf.TapeNode(tensor_array, [size_], name=nothing, dtype=nothing, dynamic_size=nothing, clear_after_read=nothing, tensor_array_name=nothing, element_shape=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -53184,7 +53184,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function inplace_sub_graph(x_, i_, v_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function inplace_sub_graph(x_, i_, v_; name=nothing) local desc tf.with_op_name(name, "InplaceSub") do desc = tf.NodeDescription("InplaceSub") @@ -53209,7 +53209,7 @@ begin desc["T"] = tf.data_type(x_) desc["T"] = tf.data_type(v_) res = tf.execute(desc) - node = tf.TapeNode(inplace_sub, [x_, i_, v_], name=nothing) + node = tf.TapeNode(inplace_sub, [x_, i_, v_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -53229,7 +53229,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function pow_graph(x_, y_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function pow_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "Pow") do desc = tf.NodeDescription("Pow") @@ -53250,7 +53250,7 @@ begin desc["T"] = tf.data_type(x_) desc["T"] = tf.data_type(y_) res = tf.execute(desc) - node = tf.TapeNode(pow, [x_, y_], name=nothing) + node = tf.TapeNode(pow, [x_, y_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -53270,7 +53270,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function stateful_standard_normal_graph(resource_, shape_; name=nothing, dtype=nothing, shape_dtype=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function stateful_standard_normal_graph(resource_, shape_; name=nothing, dtype=nothing, shape_dtype=nothing) local desc tf.with_op_name(name, "StatefulStandardNormal") do desc = tf.NodeDescription("StatefulStandardNormal") @@ -53302,7 +53302,7 @@ begin end desc["shape_dtype"] = tf.data_type(shape_) res = tf.execute(desc) - node = tf.TapeNode(stateful_standard_normal, [resource_, shape_], name=nothing, dtype=nothing, shape_dtype=nothing) + node = tf.TapeNode(stateful_standard_normal, [resource_, shape_], name=nothing, dtype=nothing, shape_dtype=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -53322,7 +53322,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ref_next_iteration_graph(data_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ref_next_iteration_graph(data_; name=nothing) local desc tf.with_op_name(name, "RefNextIteration") do desc = tf.NodeDescription("RefNextIteration") @@ -53338,7 +53338,7 @@ begin tf.add_input(desc, data_) desc["T"] = tf.data_type(data_) res = tf.execute(desc) - node = tf.TapeNode(ref_next_iteration, [data_], name=nothing) + node = tf.TapeNode(ref_next_iteration, [data_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -53358,7 +53358,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function scalar_summary_graph(tags_, values_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function scalar_summary_graph(tags_, values_; name=nothing) local desc tf.with_op_name(name, "ScalarSummary") do desc = tf.NodeDescription("ScalarSummary") @@ -53378,7 +53378,7 @@ begin tf.add_input(desc, values_) desc["T"] = tf.data_type(values_) res = tf.execute(desc) - node = tf.TapeNode(scalar_summary, [tags_, values_], name=nothing) + node = tf.TapeNode(scalar_summary, [tags_, values_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -53398,7 +53398,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function string_split_v2_graph(input_, sep_; name=nothing, maxsplit=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function string_split_v2_graph(input_, sep_; name=nothing, maxsplit=nothing) local desc tf.with_op_name(name, "StringSplitV2") do desc = tf.NodeDescription("StringSplitV2") @@ -53427,7 +53427,7 @@ begin desc["maxsplit"] = Base.Int(maxsplit) end res = tf.execute(desc) - node = tf.TapeNode(string_split_v2, [input_, sep_], name=nothing, maxsplit=nothing) + node = tf.TapeNode(string_split_v2, [input_, sep_], name=nothing, maxsplit=nothing, res) tf.add_node(res[1], node) return res end @@ -53447,7 +53447,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function bessel_i0e_graph(x_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function bessel_i0e_graph(x_; name=nothing) local desc tf.with_op_name(name, "BesselI0e") do desc = tf.NodeDescription("BesselI0e") @@ -53463,7 +53463,7 @@ begin tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) res = tf.execute(desc) - node = tf.TapeNode(bessel_i0e, [x_], name=nothing) + node = tf.TapeNode(bessel_i0e, [x_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -53483,7 +53483,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function unique_graph(x_; name=nothing, out_idx=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function unique_graph(x_; name=nothing, out_idx=nothing) local desc tf.with_op_name(name, "Unique") do desc = tf.NodeDescription("Unique") @@ -53510,7 +53510,7 @@ begin end desc["T"] = tf.data_type(x_) res = tf.execute(desc) - node = tf.TapeNode(unique, [x_], name=nothing, out_idx=nothing) + node = tf.TapeNode(unique, [x_], name=nothing, out_idx=nothing, res) tf.add_node(res[1], node) return res end @@ -53530,7 +53530,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function load_tpu_embedding_rms_prop_parameters_graph(parameters_, ms_, mom_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function load_tpu_embedding_rms_prop_parameters_graph(parameters_, ms_, mom_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "LoadTPUEmbeddingRMSPropParameters") do desc = tf.NodeDescription("LoadTPUEmbeddingRMSPropParameters") @@ -53576,7 +53576,7 @@ begin desc["shard_id"] = Base.Int(shard_id) end res = tf.execute(desc) - node = tf.TapeNode(load_tpu_embedding_rms_prop_parameters, [parameters_, ms_, mom_], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + node = tf.TapeNode(load_tpu_embedding_rms_prop_parameters, [parameters_, ms_, mom_], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -53596,7 +53596,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function whole_file_reader_v2_graph(; name=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function whole_file_reader_v2_graph(; name=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "WholeFileReaderV2") do desc = tf.NodeDescription("WholeFileReaderV2") @@ -53618,7 +53618,7 @@ begin desc["shared_name"] = Base.String(shared_name) end res = tf.execute(desc) - node = tf.TapeNode(whole_file_reader_v2, [], name=nothing, container=nothing, shared_name=nothing) + node = tf.TapeNode(whole_file_reader_v2, [], name=nothing, container=nothing, shared_name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -53638,7 +53638,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function eager_py_func_graph(input_; name=nothing, token=nothing, Tin=nothing, Tout=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function eager_py_func_graph(input_; name=nothing, token=nothing, Tin=nothing, Tout=nothing) local desc tf.with_op_name(name, "EagerPyFunc") do desc = tf.NodeDescription("EagerPyFunc") @@ -53670,7 +53670,7 @@ begin desc["Tout"] = map(Base.identity, Tout) end res = tf.execute(desc) - node = tf.TapeNode(eager_py_func, [input_], name=nothing, token=nothing, Tin=nothing, Tout=nothing) + node = tf.TapeNode(eager_py_func, [input_], name=nothing, token=nothing, Tin=nothing, Tout=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -53690,7 +53690,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function next_iteration_graph(data_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function next_iteration_graph(data_; name=nothing) local desc tf.with_op_name(name, "NextIteration") do desc = tf.NodeDescription("NextIteration") @@ -53706,7 +53706,7 @@ begin tf.add_input(desc, data_) desc["T"] = tf.data_type(data_) res = tf.execute(desc) - node = tf.TapeNode(next_iteration, [data_], name=nothing) + node = tf.TapeNode(next_iteration, [data_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -53726,7 +53726,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function case_graph(branch_index_, input_; name=nothing, Tin=nothing, Tout=nothing, branches=nothing, output_shapes=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function case_graph(branch_index_, input_; name=nothing, Tin=nothing, Tout=nothing, branches=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "Case") do desc = tf.NodeDescription("Case") @@ -53768,7 +53768,7 @@ begin desc["output_shapes"] = map(Base.identity, output_shapes) end res = tf.execute(desc) - node = tf.TapeNode(case, [branch_index_, input_], name=nothing, Tin=nothing, Tout=nothing, branches=nothing, output_shapes=nothing) + node = tf.TapeNode(case, [branch_index_, input_], name=nothing, Tin=nothing, Tout=nothing, branches=nothing, output_shapes=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -53788,7 +53788,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_scatter_sub_graph(tensor_, indices_, updates_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_scatter_sub_graph(tensor_, indices_, updates_; name=nothing) local desc tf.with_op_name(name, "TensorScatterSub") do desc = tf.NodeDescription("TensorScatterSub") @@ -53816,7 +53816,7 @@ begin desc["Tindices"] = tf.data_type(indices_) desc["T"] = tf.data_type(updates_) res = tf.execute(desc) - node = tf.TapeNode(tensor_scatter_sub, [tensor_, indices_, updates_], name=nothing) + node = tf.TapeNode(tensor_scatter_sub, [tensor_, indices_, updates_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -53836,7 +53836,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function scatter_max_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function scatter_max_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ScatterMax") do desc = tf.NodeDescription("ScatterMax") @@ -53870,7 +53870,7 @@ begin desc["Tindices"] = tf.data_type(indices_) desc["T"] = tf.data_type(updates_) res = tf.execute(desc) - node = tf.TapeNode(scatter_max, [ref_, indices_, updates_], name=nothing, use_locking=nothing) + node = tf.TapeNode(scatter_max, [ref_, indices_, updates_], name=nothing, use_locking=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -53890,7 +53890,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sqrt_graph(x_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sqrt_graph(x_; name=nothing) local desc tf.with_op_name(name, "Sqrt") do desc = tf.NodeDescription("Sqrt") @@ -53906,7 +53906,7 @@ begin tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) res = tf.execute(desc) - node = tf.TapeNode(sqrt, [x_], name=nothing) + node = tf.TapeNode(sqrt, [x_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -53926,7 +53926,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function accumulator_take_gradient_graph(handle_, num_required_; name=nothing, dtype=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function accumulator_take_gradient_graph(handle_, num_required_; name=nothing, dtype=nothing) local desc tf.with_op_name(name, "AccumulatorTakeGradient") do desc = tf.NodeDescription("AccumulatorTakeGradient") @@ -53950,7 +53950,7 @@ begin desc["dtype"] = Base.identity(dtype) end res = tf.execute(desc) - node = tf.TapeNode(accumulator_take_gradient, [handle_, num_required_], name=nothing, dtype=nothing) + node = tf.TapeNode(accumulator_take_gradient, [handle_, num_required_], name=nothing, dtype=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -53970,7 +53970,7 @@ end Returns x + y element-wise. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _mkl_add_graph(x_, y_, mkl_x_, mkl_y_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _mkl_add_graph(x_, y_, mkl_x_, mkl_y_; name=nothing) local desc tf.with_op_name(name, "_MklAdd") do desc = tf.NodeDescription("_MklAdd") @@ -54004,7 +54004,7 @@ begin desc["T"] = tf.data_type(x_) desc["T"] = tf.data_type(y_) res = tf.execute(desc) - node = tf.TapeNode(_mkl_add, [x_, y_, mkl_x_, mkl_y_], name=nothing) + node = tf.TapeNode(_mkl_add, [x_, y_, mkl_x_, mkl_y_], name=nothing, res) tf.add_node(res[1], node) return res end @@ -54024,7 +54024,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function reciprocal_graph(x_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function reciprocal_graph(x_; name=nothing) local desc tf.with_op_name(name, "Reciprocal") do desc = tf.NodeDescription("Reciprocal") @@ -54040,7 +54040,7 @@ begin tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) res = tf.execute(desc) - node = tf.TapeNode(reciprocal, [x_], name=nothing) + node = tf.TapeNode(reciprocal, [x_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -54060,7 +54060,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function outfeed_enqueue_tuple_graph(inputs_; name=nothing, dtypes=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function outfeed_enqueue_tuple_graph(inputs_; name=nothing, dtypes=nothing) local desc tf.with_op_name(name, "OutfeedEnqueueTuple") do desc = tf.NodeDescription("OutfeedEnqueueTuple") @@ -54080,7 +54080,7 @@ begin desc["dtypes"] = map(Base.identity, dtypes) end res = tf.execute(desc) - node = tf.TapeNode(outfeed_enqueue_tuple, [inputs_], name=nothing, dtypes=nothing) + node = tf.TapeNode(outfeed_enqueue_tuple, [inputs_], name=nothing, dtypes=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -54100,7 +54100,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function string_strip_graph(input_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function string_strip_graph(input_; name=nothing) local desc tf.with_op_name(name, "StringStrip") do desc = tf.NodeDescription("StringStrip") @@ -54114,7 +54114,7 @@ begin input_ = convert(tf.TensorHandle, input_) tf.add_input(desc, input_) res = tf.execute(desc) - node = tf.TapeNode(string_strip, [input_], name=nothing) + node = tf.TapeNode(string_strip, [input_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -54134,7 +54134,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fake_quant_with_min_max_vars_per_channel_graph(inputs_, min_, max_; name=nothing, num_bits=nothing, narrow_range=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fake_quant_with_min_max_vars_per_channel_graph(inputs_, min_, max_; name=nothing, num_bits=nothing, narrow_range=nothing) local desc tf.with_op_name(name, "FakeQuantWithMinMaxVarsPerChannel") do desc = tf.NodeDescription("FakeQuantWithMinMaxVarsPerChannel") @@ -54168,7 +54168,7 @@ begin desc["narrow_range"] = Base.Bool(narrow_range) end res = tf.execute(desc) - node = tf.TapeNode(fake_quant_with_min_max_vars_per_channel, [inputs_, min_, max_], name=nothing, num_bits=nothing, narrow_range=nothing) + node = tf.TapeNode(fake_quant_with_min_max_vars_per_channel, [inputs_, min_, max_], name=nothing, num_bits=nothing, narrow_range=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -54188,7 +54188,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function barrier_ready_size_graph(handle_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function barrier_ready_size_graph(handle_; name=nothing) local desc tf.with_op_name(name, "BarrierReadySize") do desc = tf.NodeDescription("BarrierReadySize") @@ -54202,7 +54202,7 @@ begin handle_ = convert(tf.TensorHandle, handle_) tf.add_input(desc, handle_) res = tf.execute(desc) - node = tf.TapeNode(barrier_ready_size, [handle_], name=nothing) + node = tf.TapeNode(barrier_ready_size, [handle_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -54222,7 +54222,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function string_to_hash_bucket_graph(string_tensor_; name=nothing, num_buckets=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function string_to_hash_bucket_graph(string_tensor_; name=nothing, num_buckets=nothing) local desc tf.with_op_name(name, "StringToHashBucket") do desc = tf.NodeDescription("StringToHashBucket") @@ -54242,7 +54242,7 @@ begin desc["num_buckets"] = Base.Int(num_buckets) end res = tf.execute(desc) - node = tf.TapeNode(string_to_hash_bucket, [string_tensor_], name=nothing, num_buckets=nothing) + node = tf.TapeNode(string_to_hash_bucket, [string_tensor_], name=nothing, num_buckets=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -54262,7 +54262,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_concat_graph(handle_, flow_in_; name=nothing, dtype=nothing, element_shape_except0=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_concat_graph(handle_, flow_in_; name=nothing, dtype=nothing, element_shape_except0=nothing) local desc tf.with_op_name(name, "TensorArrayConcat") do desc = tf.NodeDescription("TensorArrayConcat") @@ -54297,7 +54297,7 @@ begin desc["element_shape_except0"] = Base.identity(element_shape_except0) end res = tf.execute(desc) - node = tf.TapeNode(tensor_array_concat, [handle_, flow_in_], name=nothing, dtype=nothing, element_shape_except0=nothing) + node = tf.TapeNode(tensor_array_concat, [handle_, flow_in_], name=nothing, dtype=nothing, element_shape_except0=nothing, res) tf.add_node(res[1], node) return res end @@ -54317,7 +54317,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sharded_filename_graph(basename_, shard_, num_shards_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sharded_filename_graph(basename_, shard_, num_shards_; name=nothing) local desc tf.with_op_name(name, "ShardedFilename") do desc = tf.NodeDescription("ShardedFilename") @@ -54339,7 +54339,7 @@ begin tf.add_input(desc, shard_) tf.add_input(desc, num_shards_) res = tf.execute(desc) - node = tf.TapeNode(sharded_filename, [basename_, shard_, num_shards_], name=nothing) + node = tf.TapeNode(sharded_filename, [basename_, shard_, num_shards_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -54359,7 +54359,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function py_func_graph(input_; name=nothing, token=nothing, Tin=nothing, Tout=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function py_func_graph(input_; name=nothing, token=nothing, Tin=nothing, Tout=nothing) local desc tf.with_op_name(name, "PyFunc") do desc = tf.NodeDescription("PyFunc") @@ -54391,7 +54391,7 @@ begin desc["Tout"] = map(Base.identity, Tout) end res = tf.execute(desc) - node = tf.TapeNode(py_func, [input_], name=nothing, token=nothing, Tin=nothing, Tout=nothing) + node = tf.TapeNode(py_func, [input_], name=nothing, token=nothing, Tin=nothing, Tout=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -54411,7 +54411,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function unsorted_segment_prod_graph(data_, segment_ids_, num_segments_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function unsorted_segment_prod_graph(data_, segment_ids_, num_segments_; name=nothing) local desc tf.with_op_name(name, "UnsortedSegmentProd") do desc = tf.NodeDescription("UnsortedSegmentProd") @@ -54440,7 +54440,7 @@ begin desc["Tindices"] = tf.data_type(segment_ids_) desc["Tnumsegments"] = tf.data_type(num_segments_) res = tf.execute(desc) - node = tf.TapeNode(unsorted_segment_prod, [data_, segment_ids_, num_segments_], name=nothing) + node = tf.TapeNode(unsorted_segment_prod, [data_, segment_ids_, num_segments_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -54460,7 +54460,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function count_up_to_graph(ref_; name=nothing, limit=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function count_up_to_graph(ref_; name=nothing, limit=nothing) local desc tf.with_op_name(name, "CountUpTo") do desc = tf.NodeDescription("CountUpTo") @@ -54482,7 +54482,7 @@ begin end desc["T"] = tf.data_type(ref_) res = tf.execute(desc) - node = tf.TapeNode(count_up_to, [ref_], name=nothing, limit=nothing) + node = tf.TapeNode(count_up_to, [ref_], name=nothing, limit=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -54502,7 +54502,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function random_gamma_graph(shape_, alpha_; name=nothing, seed=nothing, seed2=nothing, S=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function random_gamma_graph(shape_, alpha_; name=nothing, seed=nothing, seed2=nothing, S=nothing) local desc tf.with_op_name(name, "RandomGamma") do desc = tf.NodeDescription("RandomGamma") @@ -54542,7 +54542,7 @@ begin desc["S"] = tf.data_type(shape_) desc["T"] = tf.data_type(alpha_) res = tf.execute(desc) - node = tf.TapeNode(random_gamma, [shape_, alpha_], name=nothing, seed=nothing, seed2=nothing, S=nothing) + node = tf.TapeNode(random_gamma, [shape_, alpha_], name=nothing, seed=nothing, seed2=nothing, S=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -54562,7 +54562,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_grad_graph(handle_, flow_in_; name=nothing, source=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_grad_graph(handle_, flow_in_; name=nothing, source=nothing) local desc tf.with_op_name(name, "TensorArrayGrad") do desc = tf.NodeDescription("TensorArrayGrad") @@ -54586,7 +54586,7 @@ begin desc["source"] = Base.String(source) end res = tf.execute(desc) - node = tf.TapeNode(tensor_array_grad, [handle_, flow_in_], name=nothing, source=nothing) + node = tf.TapeNode(tensor_array_grad, [handle_, flow_in_], name=nothing, source=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -54606,7 +54606,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function dilation2d_graph(input_, filter_; name=nothing, strides=nothing, rates=nothing, padding=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function dilation2d_graph(input_, filter_; name=nothing, strides=nothing, rates=nothing, padding=nothing) local desc tf.with_op_name(name, "Dilation2D") do desc = tf.NodeDescription("Dilation2D") @@ -54645,7 +54645,7 @@ begin desc["T"] = tf.data_type(input_) desc["T"] = tf.data_type(filter_) res = tf.execute(desc) - node = tf.TapeNode(dilation2d, [input_, filter_], name=nothing, strides=nothing, rates=nothing, padding=nothing) + node = tf.TapeNode(dilation2d, [input_, filter_], name=nothing, strides=nothing, rates=nothing, padding=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -54665,7 +54665,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function unbatch_graph(batched_tensor_, batch_index_, id_; name=nothing, timeout_micros=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function unbatch_graph(batched_tensor_, batch_index_, id_; name=nothing, timeout_micros=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "Unbatch") do desc = tf.NodeDescription("Unbatch") @@ -54707,7 +54707,7 @@ begin end desc["T"] = tf.data_type(batched_tensor_) res = tf.execute(desc) - node = tf.TapeNode(unbatch, [batched_tensor_, batch_index_, id_], name=nothing, timeout_micros=nothing, container=nothing, shared_name=nothing) + node = tf.TapeNode(unbatch, [batched_tensor_, batch_index_, id_], name=nothing, timeout_micros=nothing, container=nothing, shared_name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -54727,7 +54727,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function get_session_handle_graph(value_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function get_session_handle_graph(value_; name=nothing) local desc tf.with_op_name(name, "GetSessionHandle") do desc = tf.NodeDescription("GetSessionHandle") @@ -54743,7 +54743,7 @@ begin tf.add_input(desc, value_) desc["T"] = tf.data_type(value_) res = tf.execute(desc) - node = tf.TapeNode(get_session_handle, [value_], name=nothing) + node = tf.TapeNode(get_session_handle, [value_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -54763,7 +54763,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function retrieve_tpu_embedding_adam_parameters_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function retrieve_tpu_embedding_adam_parameters_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "RetrieveTPUEmbeddingADAMParameters") do desc = tf.NodeDescription("RetrieveTPUEmbeddingADAMParameters") @@ -54802,7 +54802,7 @@ begin desc["shard_id"] = Base.Int(shard_id) end res = tf.execute(desc) - node = tf.TapeNode(retrieve_tpu_embedding_adam_parameters, [], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + node = tf.TapeNode(retrieve_tpu_embedding_adam_parameters, [], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res) tf.add_node(res[1], node) return res end @@ -54822,7 +54822,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function mutable_hash_table_of_tensors_v2_graph(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function mutable_hash_table_of_tensors_v2_graph(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing) local desc tf.with_op_name(name, "MutableHashTableOfTensorsV2") do desc = tf.NodeDescription("MutableHashTableOfTensorsV2") @@ -54868,7 +54868,7 @@ begin desc["value_shape"] = Base.identity(value_shape) end res = tf.execute(desc) - node = tf.TapeNode(mutable_hash_table_of_tensors_v2, [], name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing) + node = tf.TapeNode(mutable_hash_table_of_tensors_v2, [], name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -54888,7 +54888,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_apply_ftrl_graph(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, lr_power_; name=nothing, use_locking=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_apply_ftrl_graph(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, lr_power_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "SparseApplyFtrl") do desc = tf.NodeDescription("SparseApplyFtrl") @@ -54952,7 +54952,7 @@ begin desc["T"] = tf.data_type(l2_) desc["T"] = tf.data_type(lr_power_) res = tf.execute(desc) - node = tf.TapeNode(sparse_apply_ftrl, [var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, lr_power_], name=nothing, use_locking=nothing) + node = tf.TapeNode(sparse_apply_ftrl, [var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, lr_power_], name=nothing, use_locking=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -54972,7 +54972,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_dataset_v2_graph(input_dataset_, batch_size_, drop_remainder_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_dataset_v2_graph(input_dataset_, batch_size_, drop_remainder_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "BatchDatasetV2") do desc = tf.NodeDescription("BatchDatasetV2") @@ -55006,7 +55006,7 @@ begin desc["output_shapes"] = map(Base.identity, output_shapes) end res = tf.execute(desc) - node = tf.TapeNode(batch_dataset_v2, [input_dataset_, batch_size_, drop_remainder_], name=nothing, output_types=nothing, output_shapes=nothing) + node = tf.TapeNode(batch_dataset_v2, [input_dataset_, batch_size_, drop_remainder_], name=nothing, output_types=nothing, output_shapes=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -55026,7 +55026,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_sparse_minimum_graph(a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_sparse_minimum_graph(a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_; name=nothing) local desc tf.with_op_name(name, "SparseSparseMinimum") do desc = tf.NodeDescription("SparseSparseMinimum") @@ -55068,7 +55068,7 @@ begin desc["T"] = tf.data_type(a_values_) desc["T"] = tf.data_type(b_values_) res = tf.execute(desc) - node = tf.TapeNode(sparse_sparse_minimum, [a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_], name=nothing) + node = tf.TapeNode(sparse_sparse_minimum, [a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_], name=nothing, res) tf.add_node(res[1], node) return res end @@ -55088,7 +55088,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function reverse_v2_graph(tensor_, axis_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function reverse_v2_graph(tensor_, axis_; name=nothing) local desc tf.with_op_name(name, "ReverseV2") do desc = tf.NodeDescription("ReverseV2") @@ -55111,7 +55111,7 @@ begin desc["T"] = tf.data_type(tensor_) desc["Tidx"] = tf.data_type(axis_) res = tf.execute(desc) - node = tf.TapeNode(reverse_v2, [tensor_, axis_], name=nothing) + node = tf.TapeNode(reverse_v2, [tensor_, axis_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -55131,7 +55131,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function strided_slice_graph(input_, begin_, end_, strides_; name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function strided_slice_graph(input_, begin_, end_, strides_; name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing) local desc tf.with_op_name(name, "StridedSlice") do desc = tf.NodeDescription("StridedSlice") @@ -55232,7 +55232,7 @@ begin desc["Index"] = tf.data_type(end_) desc["Index"] = tf.data_type(strides_) res = tf.execute(desc) - node = tf.TapeNode(strided_slice, [input_, begin_, end_, strides_], name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing) + node = tf.TapeNode(strided_slice, [input_, begin_, end_, strides_], name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -55252,7 +55252,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function matching_files_graph(pattern_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function matching_files_graph(pattern_; name=nothing) local desc tf.with_op_name(name, "MatchingFiles") do desc = tf.NodeDescription("MatchingFiles") @@ -55266,7 +55266,7 @@ begin pattern_ = convert(tf.TensorHandle, pattern_) tf.add_input(desc, pattern_) res = tf.execute(desc) - node = tf.TapeNode(matching_files, [pattern_], name=nothing) + node = tf.TapeNode(matching_files, [pattern_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -55286,7 +55286,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function encode_base64_graph(input_; name=nothing, pad=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function encode_base64_graph(input_; name=nothing, pad=nothing) local desc tf.with_op_name(name, "EncodeBase64") do desc = tf.NodeDescription("EncodeBase64") @@ -55306,7 +55306,7 @@ begin desc["pad"] = Base.Bool(pad) end res = tf.execute(desc) - node = tf.TapeNode(encode_base64, [input_], name=nothing, pad=nothing) + node = tf.TapeNode(encode_base64, [input_], name=nothing, pad=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -55326,7 +55326,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function iterator_get_next_as_optional_graph(iterator_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function iterator_get_next_as_optional_graph(iterator_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "IteratorGetNextAsOptional") do desc = tf.NodeDescription("IteratorGetNextAsOptional") @@ -55352,7 +55352,7 @@ begin desc["output_shapes"] = map(Base.identity, output_shapes) end res = tf.execute(desc) - node = tf.TapeNode(iterator_get_next_as_optional, [iterator_], name=nothing, output_types=nothing, output_shapes=nothing) + node = tf.TapeNode(iterator_get_next_as_optional, [iterator_], name=nothing, output_types=nothing, output_shapes=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -55372,7 +55372,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function padding_fifo_queue_graph(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function padding_fifo_queue_graph(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "PaddingFIFOQueue") do desc = tf.NodeDescription("PaddingFIFOQueue") @@ -55412,7 +55412,7 @@ begin desc["shared_name"] = Base.String(shared_name) end res = tf.execute(desc) - node = tf.TapeNode(padding_fifo_queue, [], name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) + node = tf.TapeNode(padding_fifo_queue, [], name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -55432,7 +55432,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function iterator_to_string_handle_graph(resource_handle_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function iterator_to_string_handle_graph(resource_handle_; name=nothing) local desc tf.with_op_name(name, "IteratorToStringHandle") do desc = tf.NodeDescription("IteratorToStringHandle") @@ -55446,7 +55446,7 @@ begin resource_handle_ = convert(tf.TensorHandle, resource_handle_) tf.add_input(desc, resource_handle_) res = tf.execute(desc) - node = tf.TapeNode(iterator_to_string_handle, [resource_handle_], name=nothing) + node = tf.TapeNode(iterator_to_string_handle, [resource_handle_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -55466,7 +55466,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function max_pool_grad_grad_with_argmax_graph(input_, grad_, argmax_; name=nothing, ksize=nothing, strides=nothing, padding=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function max_pool_grad_grad_with_argmax_graph(input_, grad_, argmax_; name=nothing, ksize=nothing, strides=nothing, padding=nothing) local desc tf.with_op_name(name, "MaxPoolGradGradWithArgmax") do desc = tf.NodeDescription("MaxPoolGradGradWithArgmax") @@ -55511,7 +55511,7 @@ begin desc["T"] = tf.data_type(grad_) desc["Targmax"] = tf.data_type(argmax_) res = tf.execute(desc) - node = tf.TapeNode(max_pool_grad_grad_with_argmax, [input_, grad_, argmax_], name=nothing, ksize=nothing, strides=nothing, padding=nothing) + node = tf.TapeNode(max_pool_grad_grad_with_argmax, [input_, grad_, argmax_], name=nothing, ksize=nothing, strides=nothing, padding=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -55531,7 +55531,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_list_gather_graph(input_handle_, indices_, element_shape_; name=nothing, element_dtype=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_list_gather_graph(input_handle_, indices_, element_shape_; name=nothing, element_dtype=nothing) local desc tf.with_op_name(name, "TensorListGather") do desc = tf.NodeDescription("TensorListGather") @@ -55559,7 +55559,7 @@ begin desc["element_dtype"] = Base.identity(element_dtype) end res = tf.execute(desc) - node = tf.TapeNode(tensor_list_gather, [input_handle_, indices_, element_shape_], name=nothing, element_dtype=nothing) + node = tf.TapeNode(tensor_list_gather, [input_handle_, indices_, element_shape_], name=nothing, element_dtype=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -55579,7 +55579,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function multinomial_graph(logits_, num_samples_; name=nothing, seed=nothing, seed2=nothing, output_dtype=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function multinomial_graph(logits_, num_samples_; name=nothing, seed=nothing, seed2=nothing, output_dtype=nothing) local desc tf.with_op_name(name, "Multinomial") do desc = tf.NodeDescription("Multinomial") @@ -55617,7 +55617,7 @@ begin end desc["T"] = tf.data_type(logits_) res = tf.execute(desc) - node = tf.TapeNode(multinomial, [logits_, num_samples_], name=nothing, seed=nothing, seed2=nothing, output_dtype=nothing) + node = tf.TapeNode(multinomial, [logits_, num_samples_], name=nothing, seed=nothing, seed2=nothing, output_dtype=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -55637,7 +55637,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_read_graph(handle_, index_, flow_in_; name=nothing, dtype=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_read_graph(handle_, index_, flow_in_; name=nothing, dtype=nothing) local desc tf.with_op_name(name, "TensorArrayRead") do desc = tf.NodeDescription("TensorArrayRead") @@ -55665,7 +55665,7 @@ begin desc["dtype"] = Base.identity(dtype) end res = tf.execute(desc) - node = tf.TapeNode(tensor_array_read, [handle_, index_, flow_in_], name=nothing, dtype=nothing) + node = tf.TapeNode(tensor_array_read, [handle_, index_, flow_in_], name=nothing, dtype=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -55685,7 +55685,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_indexed_dataset_get_graph(materialized_, index_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_indexed_dataset_get_graph(materialized_, index_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "ExperimentalIndexedDatasetGet") do desc = tf.NodeDescription("ExperimentalIndexedDatasetGet") @@ -55715,7 +55715,7 @@ begin desc["output_shapes"] = map(Base.identity, output_shapes) end res = tf.execute(desc) - node = tf.TapeNode(experimental_indexed_dataset_get, [materialized_, index_], name=nothing, output_types=nothing, output_shapes=nothing) + node = tf.TapeNode(experimental_indexed_dataset_get, [materialized_, index_], name=nothing, output_types=nothing, output_shapes=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -55735,7 +55735,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tpu_partitioned_call_graph(args_, device_ordinal_; name=nothing, Tin=nothing, Tout=nothing, f=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tpu_partitioned_call_graph(args_, device_ordinal_; name=nothing, Tin=nothing, Tout=nothing, f=nothing) local desc tf.with_op_name(name, "TPUPartitionedCall") do desc = tf.NodeDescription("TPUPartitionedCall") @@ -55771,7 +55771,7 @@ begin desc["f"] = Base.identity(f) end res = tf.execute(desc) - node = tf.TapeNode(tpu_partitioned_call, [args_, device_ordinal_], name=nothing, Tin=nothing, Tout=nothing, f=nothing) + node = tf.TapeNode(tpu_partitioned_call, [args_, device_ordinal_], name=nothing, Tin=nothing, Tout=nothing, f=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -55791,7 +55791,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantized_conv2d_and_relu_and_requantize_graph(input_, filter_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantized_conv2d_and_relu_and_requantize_graph(input_, filter_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) local desc tf.with_op_name(name, "QuantizedConv2DAndReluAndRequantize") do desc = tf.NodeDescription("QuantizedConv2DAndReluAndRequantize") @@ -55866,7 +55866,7 @@ begin desc["Tinput"] = tf.data_type(input_) desc["Tfilter"] = tf.data_type(filter_) res = tf.execute(desc) - node = tf.TapeNode(quantized_conv2d_and_relu_and_requantize, [input_, filter_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_], name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) + node = tf.TapeNode(quantized_conv2d_and_relu_and_requantize, [input_, filter_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_], name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing, res) tf.add_node(res[1], node) return res end @@ -55886,7 +55886,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function iterator_from_string_handle_v2_graph(string_handle_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function iterator_from_string_handle_v2_graph(string_handle_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "IteratorFromStringHandleV2") do desc = tf.NodeDescription("IteratorFromStringHandleV2") @@ -55912,7 +55912,7 @@ begin desc["output_shapes"] = map(Base.identity, output_shapes) end res = tf.execute(desc) - node = tf.TapeNode(iterator_from_string_handle_v2, [string_handle_], name=nothing, output_types=nothing, output_shapes=nothing) + node = tf.TapeNode(iterator_from_string_handle_v2, [string_handle_], name=nothing, output_types=nothing, output_shapes=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -55932,7 +55932,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function bitwise_or_graph(x_, y_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function bitwise_or_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "BitwiseOr") do desc = tf.NodeDescription("BitwiseOr") @@ -55953,7 +55953,7 @@ begin desc["T"] = tf.data_type(x_) desc["T"] = tf.data_type(y_) res = tf.execute(desc) - node = tf.TapeNode(bitwise_or, [x_, y_], name=nothing) + node = tf.TapeNode(bitwise_or, [x_, y_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -55973,7 +55973,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function unsorted_segment_max_graph(data_, segment_ids_, num_segments_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function unsorted_segment_max_graph(data_, segment_ids_, num_segments_; name=nothing) local desc tf.with_op_name(name, "UnsortedSegmentMax") do desc = tf.NodeDescription("UnsortedSegmentMax") @@ -56002,7 +56002,7 @@ begin desc["Tindices"] = tf.data_type(segment_ids_) desc["Tnumsegments"] = tf.data_type(num_segments_) res = tf.execute(desc) - node = tf.TapeNode(unsorted_segment_max, [data_, segment_ids_, num_segments_], name=nothing) + node = tf.TapeNode(unsorted_segment_max, [data_, segment_ids_, num_segments_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -56022,7 +56022,7 @@ end Returns (x - y)(x - y) element-wise. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _mkl_squared_difference_graph(x_, y_, mkl_x_, mkl_y_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _mkl_squared_difference_graph(x_, y_, mkl_x_, mkl_y_; name=nothing) local desc tf.with_op_name(name, "_MklSquaredDifference") do desc = tf.NodeDescription("_MklSquaredDifference") @@ -56056,7 +56056,7 @@ begin desc["T"] = tf.data_type(x_) desc["T"] = tf.data_type(y_) res = tf.execute(desc) - node = tf.TapeNode(_mkl_squared_difference, [x_, y_, mkl_x_, mkl_y_], name=nothing) + node = tf.TapeNode(_mkl_squared_difference, [x_, y_, mkl_x_, mkl_y_], name=nothing, res) tf.add_node(res[1], node) return res end @@ -56076,7 +56076,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function conv3d_backprop_filter_graph(input_, filter_, out_backprop_; name=nothing, strides=nothing, padding=nothing, dilations=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function conv3d_backprop_filter_graph(input_, filter_, out_backprop_; name=nothing, strides=nothing, padding=nothing, dilations=nothing) local desc tf.with_op_name(name, "Conv3DBackpropFilter") do desc = tf.NodeDescription("Conv3DBackpropFilter") @@ -56120,7 +56120,7 @@ begin desc["T"] = tf.data_type(filter_) desc["T"] = tf.data_type(out_backprop_) res = tf.execute(desc) - node = tf.TapeNode(conv3d_backprop_filter, [input_, filter_, out_backprop_], name=nothing, strides=nothing, padding=nothing, dilations=nothing) + node = tf.TapeNode(conv3d_backprop_filter, [input_, filter_, out_backprop_], name=nothing, strides=nothing, padding=nothing, dilations=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -56140,7 +56140,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function if__graph(cond_, input_; name=nothing, Tin=nothing, Tout=nothing, then_branch=nothing, else_branch=nothing, output_shapes=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function if__graph(cond_, input_; name=nothing, Tin=nothing, Tout=nothing, then_branch=nothing, else_branch=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "If") do desc = tf.NodeDescription("If") @@ -56190,7 +56190,7 @@ begin end desc["Tcond"] = tf.data_type(cond_) res = tf.execute(desc) - node = tf.TapeNode(if_, [cond_, input_], name=nothing, Tin=nothing, Tout=nothing, then_branch=nothing, else_branch=nothing, output_shapes=nothing) + node = tf.TapeNode(if_, [cond_, input_], name=nothing, Tin=nothing, Tout=nothing, then_branch=nothing, else_branch=nothing, output_shapes=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -56210,7 +56210,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function flat_map_dataset_graph(input_dataset_, other_arguments_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function flat_map_dataset_graph(input_dataset_, other_arguments_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "FlatMapDataset") do desc = tf.NodeDescription("FlatMapDataset") @@ -56252,7 +56252,7 @@ begin desc["output_shapes"] = map(Base.identity, output_shapes) end res = tf.execute(desc) - node = tf.TapeNode(flat_map_dataset, [input_dataset_, other_arguments_], name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) + node = tf.TapeNode(flat_map_dataset, [input_dataset_, other_arguments_], name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -56272,7 +56272,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_list_scatter_graph(tensor_, indices_, element_shape_; name=nothing, element_dtype=nothing, shape_type=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_list_scatter_graph(tensor_, indices_, element_shape_; name=nothing, element_dtype=nothing, shape_type=nothing) local desc tf.with_op_name(name, "TensorListScatter") do desc = tf.NodeDescription("TensorListScatter") @@ -56310,7 +56310,7 @@ begin desc["element_dtype"] = tf.data_type(tensor_) desc["shape_type"] = tf.data_type(element_shape_) res = tf.execute(desc) - node = tf.TapeNode(tensor_list_scatter, [tensor_, indices_, element_shape_], name=nothing, element_dtype=nothing, shape_type=nothing) + node = tf.TapeNode(tensor_list_scatter, [tensor_, indices_, element_shape_], name=nothing, element_dtype=nothing, shape_type=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -56330,7 +56330,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function softsign_grad_graph(gradients_, features_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function softsign_grad_graph(gradients_, features_; name=nothing) local desc tf.with_op_name(name, "SoftsignGrad") do desc = tf.NodeDescription("SoftsignGrad") @@ -56351,7 +56351,7 @@ begin desc["T"] = tf.data_type(gradients_) desc["T"] = tf.data_type(features_) res = tf.execute(desc) - node = tf.TapeNode(softsign_grad, [gradients_, features_], name=nothing) + node = tf.TapeNode(softsign_grad, [gradients_, features_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -56371,7 +56371,7 @@ end Copy Host Op. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function copy_host_graph(input_; name=nothing, tensor_name=nothing, debug_ops_spec=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function copy_host_graph(input_; name=nothing, tensor_name=nothing, debug_ops_spec=nothing) local desc tf.with_op_name(name, "CopyHost") do desc = tf.NodeDescription("CopyHost") @@ -56399,7 +56399,7 @@ begin end desc["T"] = tf.data_type(input_) res = tf.execute(desc) - node = tf.TapeNode(copy_host, [input_], name=nothing, tensor_name=nothing, debug_ops_spec=nothing) + node = tf.TapeNode(copy_host, [input_], name=nothing, tensor_name=nothing, debug_ops_spec=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -56419,7 +56419,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function lin_space_graph(start_, stop_, num_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function lin_space_graph(start_, stop_, num_; name=nothing) local desc tf.with_op_name(name, "LinSpace") do desc = tf.NodeDescription("LinSpace") @@ -56447,7 +56447,7 @@ begin desc["T"] = tf.data_type(stop_) desc["Tidx"] = tf.data_type(num_) res = tf.execute(desc) - node = tf.TapeNode(lin_space, [start_, stop_, num_], name=nothing) + node = tf.TapeNode(lin_space, [start_, stop_, num_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -56467,7 +56467,7 @@ end Updates input `value` at `loc` with `update`. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _parallel_concat_update_graph(value_, update_; name=nothing, loc=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _parallel_concat_update_graph(value_, update_; name=nothing, loc=nothing) local desc tf.with_op_name(name, "_ParallelConcatUpdate") do desc = tf.NodeDescription("_ParallelConcatUpdate") @@ -56494,7 +56494,7 @@ begin desc["T"] = tf.data_type(value_) desc["T"] = tf.data_type(update_) res = tf.execute(desc) - node = tf.TapeNode(_parallel_concat_update, [value_, update_], name=nothing, loc=nothing) + node = tf.TapeNode(_parallel_concat_update, [value_, update_], name=nothing, loc=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -56514,7 +56514,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function stack_graph(; name=nothing, elem_type=nothing, stack_name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function stack_graph(; name=nothing, elem_type=nothing, stack_name=nothing) local desc tf.with_op_name(name, "Stack") do desc = tf.NodeDescription("Stack") @@ -56536,7 +56536,7 @@ begin desc["stack_name"] = Base.String(stack_name) end res = tf.execute(desc) - node = tf.TapeNode(stack, [], name=nothing, elem_type=nothing, stack_name=nothing) + node = tf.TapeNode(stack, [], name=nothing, elem_type=nothing, stack_name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -56556,7 +56556,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function stack_push_v2_graph(handle_, elem_; name=nothing, swap_memory=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function stack_push_v2_graph(handle_, elem_; name=nothing, swap_memory=nothing) local desc tf.with_op_name(name, "StackPushV2") do desc = tf.NodeDescription("StackPushV2") @@ -56582,7 +56582,7 @@ begin end desc["T"] = tf.data_type(elem_) res = tf.execute(desc) - node = tf.TapeNode(stack_push_v2, [handle_, elem_], name=nothing, swap_memory=nothing) + node = tf.TapeNode(stack_push_v2, [handle_, elem_], name=nothing, swap_memory=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -56602,7 +56602,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function assign_variable_op_graph(resource_, value_; name=nothing, dtype=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function assign_variable_op_graph(resource_, value_; name=nothing, dtype=nothing) local desc tf.with_op_name(name, "AssignVariableOp") do desc = tf.NodeDescription("AssignVariableOp") @@ -56628,7 +56628,7 @@ begin end desc["dtype"] = tf.data_type(value_) res = tf.execute(desc) - node = tf.TapeNode(assign_variable_op, [resource_, value_], name=nothing, dtype=nothing) + node = tf.TapeNode(assign_variable_op, [resource_, value_], name=nothing, dtype=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -56648,7 +56648,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_split_graph(split_dim_, indices_, values_, shape_; name=nothing, num_split=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_split_graph(split_dim_, indices_, values_, shape_; name=nothing, num_split=nothing) local desc tf.with_op_name(name, "SparseSplit") do desc = tf.NodeDescription("SparseSplit") @@ -56688,7 +56688,7 @@ begin end desc["T"] = tf.data_type(values_) res = tf.execute(desc) - node = tf.TapeNode(sparse_split, [split_dim_, indices_, values_, shape_], name=nothing, num_split=nothing) + node = tf.TapeNode(sparse_split, [split_dim_, indices_, values_, shape_], name=nothing, num_split=nothing, res) tf.add_node(res[1], node) return res end @@ -56708,7 +56708,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_unpack_graph(handle_, value_, flow_in_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_unpack_graph(handle_, value_, flow_in_; name=nothing) local desc tf.with_op_name(name, "TensorArrayUnpack") do desc = tf.NodeDescription("TensorArrayUnpack") @@ -56732,7 +56732,7 @@ begin tf.add_input(desc, flow_in_) desc["T"] = tf.data_type(value_) res = tf.execute(desc) - node = tf.TapeNode(tensor_array_unpack, [handle_, value_, flow_in_], name=nothing) + node = tf.TapeNode(tensor_array_unpack, [handle_, value_, flow_in_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -56752,7 +56752,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_list_stack_graph(input_handle_, element_shape_; name=nothing, element_dtype=nothing, num_elements=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_list_stack_graph(input_handle_, element_shape_; name=nothing, element_dtype=nothing, num_elements=nothing) local desc tf.with_op_name(name, "TensorListStack") do desc = tf.NodeDescription("TensorListStack") @@ -56782,7 +56782,7 @@ begin desc["num_elements"] = Base.Int(num_elements) end res = tf.execute(desc) - node = tf.TapeNode(tensor_list_stack, [input_handle_, element_shape_], name=nothing, element_dtype=nothing, num_elements=nothing) + node = tf.TapeNode(tensor_list_stack, [input_handle_, element_shape_], name=nothing, element_dtype=nothing, num_elements=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -56802,7 +56802,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function barrier_incomplete_size_graph(handle_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function barrier_incomplete_size_graph(handle_; name=nothing) local desc tf.with_op_name(name, "BarrierIncompleteSize") do desc = tf.NodeDescription("BarrierIncompleteSize") @@ -56816,7 +56816,7 @@ begin handle_ = convert(tf.TensorHandle, handle_) tf.add_input(desc, handle_) res = tf.execute(desc) - node = tf.TapeNode(barrier_incomplete_size, [handle_], name=nothing) + node = tf.TapeNode(barrier_incomplete_size, [handle_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -56836,7 +56836,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function restore_graph(file_pattern_, tensor_name_; name=nothing, dt=nothing, preferred_shard=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function restore_graph(file_pattern_, tensor_name_; name=nothing, dt=nothing, preferred_shard=nothing) local desc tf.with_op_name(name, "Restore") do desc = tf.NodeDescription("Restore") @@ -56866,7 +56866,7 @@ begin desc["preferred_shard"] = Base.Int(preferred_shard) end res = tf.execute(desc) - node = tf.TapeNode(restore, [file_pattern_, tensor_name_], name=nothing, dt=nothing, preferred_shard=nothing) + node = tf.TapeNode(restore, [file_pattern_, tensor_name_], name=nothing, dt=nothing, preferred_shard=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -56886,7 +56886,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_v3_graph(size_; name=nothing, dtype=nothing, element_shape=nothing, dynamic_size=nothing, clear_after_read=nothing, identical_element_shapes=nothing, tensor_array_name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_v3_graph(size_; name=nothing, dtype=nothing, element_shape=nothing, dynamic_size=nothing, clear_after_read=nothing, identical_element_shapes=nothing, tensor_array_name=nothing) local desc tf.with_op_name(name, "TensorArrayV3") do desc = tf.NodeDescription("TensorArrayV3") @@ -56941,7 +56941,7 @@ begin desc["tensor_array_name"] = Base.String(tensor_array_name) end res = tf.execute(desc) - node = tf.TapeNode(tensor_array_v3, [size_], name=nothing, dtype=nothing, element_shape=nothing, dynamic_size=nothing, clear_after_read=nothing, identical_element_shapes=nothing, tensor_array_name=nothing) + node = tf.TapeNode(tensor_array_v3, [size_], name=nothing, dtype=nothing, element_shape=nothing, dynamic_size=nothing, clear_after_read=nothing, identical_element_shapes=nothing, tensor_array_name=nothing, res) tf.add_node(res[1], node) return res end @@ -56961,7 +56961,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_assert_next_dataset_graph(input_dataset_, transformations_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_assert_next_dataset_graph(input_dataset_, transformations_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "ExperimentalAssertNextDataset") do desc = tf.NodeDescription("ExperimentalAssertNextDataset") @@ -56991,7 +56991,7 @@ begin desc["output_shapes"] = map(Base.identity, output_shapes) end res = tf.execute(desc) - node = tf.TapeNode(experimental_assert_next_dataset, [input_dataset_, transformations_], name=nothing, output_types=nothing, output_shapes=nothing) + node = tf.TapeNode(experimental_assert_next_dataset, [input_dataset_, transformations_], name=nothing, output_types=nothing, output_shapes=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -57011,7 +57011,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function in_top_k_graph(predictions_, targets_; name=nothing, k=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function in_top_k_graph(predictions_, targets_; name=nothing, k=nothing) local desc tf.with_op_name(name, "InTopK") do desc = tf.NodeDescription("InTopK") @@ -57037,7 +57037,7 @@ begin end desc["T"] = tf.data_type(targets_) res = tf.execute(desc) - node = tf.TapeNode(in_top_k, [predictions_, targets_], name=nothing, k=nothing) + node = tf.TapeNode(in_top_k, [predictions_, targets_], name=nothing, k=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -57057,7 +57057,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function scatter_sub_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function scatter_sub_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ScatterSub") do desc = tf.NodeDescription("ScatterSub") @@ -57091,7 +57091,7 @@ begin desc["Tindices"] = tf.data_type(indices_) desc["T"] = tf.data_type(updates_) res = tf.execute(desc) - node = tf.TapeNode(scatter_sub, [ref_, indices_, updates_], name=nothing, use_locking=nothing) + node = tf.TapeNode(scatter_sub, [ref_, indices_, updates_], name=nothing, use_locking=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -57111,7 +57111,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function acosh_graph(x_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function acosh_graph(x_; name=nothing) local desc tf.with_op_name(name, "Acosh") do desc = tf.NodeDescription("Acosh") @@ -57127,7 +57127,7 @@ begin tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) res = tf.execute(desc) - node = tf.TapeNode(acosh, [x_], name=nothing) + node = tf.TapeNode(acosh, [x_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -57147,7 +57147,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function depthwise_conv2d_native_backprop_filter_graph(input_, filter_sizes_, out_backprop_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function depthwise_conv2d_native_backprop_filter_graph(input_, filter_sizes_, out_backprop_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) local desc tf.with_op_name(name, "DepthwiseConv2dNativeBackpropFilter") do desc = tf.NodeDescription("DepthwiseConv2dNativeBackpropFilter") @@ -57196,7 +57196,7 @@ begin desc["T"] = tf.data_type(input_) desc["T"] = tf.data_type(out_backprop_) res = tf.execute(desc) - node = tf.TapeNode(depthwise_conv2d_native_backprop_filter, [input_, filter_sizes_, out_backprop_], name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) + node = tf.TapeNode(depthwise_conv2d_native_backprop_filter, [input_, filter_sizes_, out_backprop_], name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -57216,7 +57216,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function cast_graph(x_; name=nothing, SrcT=nothing, DstT=nothing, Truncate=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function cast_graph(x_; name=nothing, SrcT=nothing, DstT=nothing, Truncate=nothing) local desc tf.with_op_name(name, "Cast") do desc = tf.NodeDescription("Cast") @@ -57250,7 +57250,7 @@ begin end desc["SrcT"] = tf.data_type(x_) res = tf.execute(desc) - node = tf.TapeNode(cast, [x_], name=nothing, SrcT=nothing, DstT=nothing, Truncate=nothing) + node = tf.TapeNode(cast, [x_], name=nothing, SrcT=nothing, DstT=nothing, Truncate=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -57270,7 +57270,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantize_v2_graph(input_, min_range_, max_range_; name=nothing, mode=nothing, round_mode=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantize_v2_graph(input_, min_range_, max_range_; name=nothing, mode=nothing, round_mode=nothing) local desc tf.with_op_name(name, "QuantizeV2") do desc = tf.NodeDescription("QuantizeV2") @@ -57309,7 +57309,7 @@ begin desc["round_mode"] = Base.String(round_mode) end res = tf.execute(desc) - node = tf.TapeNode(quantize_v2, [input_, min_range_, max_range_], name=nothing, mode=nothing, round_mode=nothing) + node = tf.TapeNode(quantize_v2, [input_, min_range_, max_range_], name=nothing, mode=nothing, round_mode=nothing, res) tf.add_node(res[1], node) return res end @@ -57329,7 +57329,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function generator_dataset_graph(init_func_other_args_, next_func_other_args_, finalize_func_other_args_; name=nothing, init_func=nothing, next_func=nothing, finalize_func=nothing, Tinit_func_args=nothing, Tnext_func_args=nothing, Tfinalize_func_args=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function generator_dataset_graph(init_func_other_args_, next_func_other_args_, finalize_func_other_args_; name=nothing, init_func=nothing, next_func=nothing, finalize_func=nothing, Tinit_func_args=nothing, Tnext_func_args=nothing, Tfinalize_func_args=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "GeneratorDataset") do desc = tf.NodeDescription("GeneratorDataset") @@ -57399,7 +57399,7 @@ begin desc["output_shapes"] = map(Base.identity, output_shapes) end res = tf.execute(desc) - node = tf.TapeNode(generator_dataset, [init_func_other_args_, next_func_other_args_, finalize_func_other_args_], name=nothing, init_func=nothing, next_func=nothing, finalize_func=nothing, Tinit_func_args=nothing, Tnext_func_args=nothing, Tfinalize_func_args=nothing, output_types=nothing, output_shapes=nothing) + node = tf.TapeNode(generator_dataset, [init_func_other_args_, next_func_other_args_, finalize_func_other_args_], name=nothing, init_func=nothing, next_func=nothing, finalize_func=nothing, Tinit_func_args=nothing, Tnext_func_args=nothing, Tfinalize_func_args=nothing, output_types=nothing, output_shapes=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -57419,7 +57419,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_forest_tree_serialize_graph(tree_handle_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_forest_tree_serialize_graph(tree_handle_; name=nothing) local desc tf.with_op_name(name, "TensorForestTreeSerialize") do desc = tf.NodeDescription("TensorForestTreeSerialize") @@ -57433,7 +57433,7 @@ begin tree_handle_ = convert(tf.TensorHandle, tree_handle_) tf.add_input(desc, tree_handle_) res = tf.execute(desc) - node = tf.TapeNode(tensor_forest_tree_serialize, [tree_handle_], name=nothing) + node = tf.TapeNode(tensor_forest_tree_serialize, [tree_handle_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -57453,7 +57453,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function next_after_graph(x1_, x2_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function next_after_graph(x1_, x2_; name=nothing) local desc tf.with_op_name(name, "NextAfter") do desc = tf.NodeDescription("NextAfter") @@ -57474,7 +57474,7 @@ begin desc["T"] = tf.data_type(x1_) desc["T"] = tf.data_type(x2_) res = tf.execute(desc) - node = tf.TapeNode(next_after, [x1_, x2_], name=nothing) + node = tf.TapeNode(next_after, [x1_, x2_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -57494,7 +57494,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_close_v2_graph(handle_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_close_v2_graph(handle_; name=nothing) local desc tf.with_op_name(name, "TensorArrayCloseV2") do desc = tf.NodeDescription("TensorArrayCloseV2") @@ -57508,7 +57508,7 @@ begin handle_ = convert(tf.TensorHandle, handle_) tf.add_input(desc, handle_) res = tf.execute(desc) - node = tf.TapeNode(tensor_array_close_v2, [handle_], name=nothing) + node = tf.TapeNode(tensor_array_close_v2, [handle_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -57528,7 +57528,7 @@ end A Reader that outputs rows from a BigQuery table as tensorflow Examples. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function big_query_reader_graph(; name=nothing, container=nothing, shared_name=nothing, project_id=nothing, dataset_id=nothing, table_id=nothing, columns=nothing, timestamp_millis=nothing, test_end_point=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function big_query_reader_graph(; name=nothing, container=nothing, shared_name=nothing, project_id=nothing, dataset_id=nothing, table_id=nothing, columns=nothing, timestamp_millis=nothing, test_end_point=nothing) local desc tf.with_op_name(name, "BigQueryReader") do desc = tf.NodeDescription("BigQueryReader") @@ -57586,7 +57586,7 @@ begin desc["test_end_point"] = Base.String(test_end_point) end res = tf.execute(desc) - node = tf.TapeNode(big_query_reader, [], name=nothing, container=nothing, shared_name=nothing, project_id=nothing, dataset_id=nothing, table_id=nothing, columns=nothing, timestamp_millis=nothing, test_end_point=nothing) + node = tf.TapeNode(big_query_reader, [], name=nothing, container=nothing, shared_name=nothing, project_id=nothing, dataset_id=nothing, table_id=nothing, columns=nothing, timestamp_millis=nothing, test_end_point=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -57606,7 +57606,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function reader_read_v2_graph(reader_handle_, queue_handle_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function reader_read_v2_graph(reader_handle_, queue_handle_; name=nothing) local desc tf.with_op_name(name, "ReaderReadV2") do desc = tf.NodeDescription("ReaderReadV2") @@ -57629,7 +57629,7 @@ begin tf.add_input(desc, reader_handle_) tf.add_input(desc, queue_handle_) res = tf.execute(desc) - node = tf.TapeNode(reader_read_v2, [reader_handle_, queue_handle_], name=nothing) + node = tf.TapeNode(reader_read_v2, [reader_handle_, queue_handle_], name=nothing, res) tf.add_node(res[1], node) return res end @@ -57649,7 +57649,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function mod_graph(x_, y_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function mod_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "Mod") do desc = tf.NodeDescription("Mod") @@ -57670,7 +57670,7 @@ begin desc["T"] = tf.data_type(x_) desc["T"] = tf.data_type(y_) res = tf.execute(desc) - node = tf.TapeNode(mod, [x_, y_], name=nothing) + node = tf.TapeNode(mod, [x_, y_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -57690,7 +57690,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function add_v2_graph(x_, y_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function add_v2_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "AddV2") do desc = tf.NodeDescription("AddV2") @@ -57711,7 +57711,7 @@ begin desc["T"] = tf.data_type(x_) desc["T"] = tf.data_type(y_) res = tf.execute(desc) - node = tf.TapeNode(add_v2, [x_, y_], name=nothing) + node = tf.TapeNode(add_v2, [x_, y_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -57731,7 +57731,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function stateless_random_normal_graph(shape_, seed_; name=nothing, dtype=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function stateless_random_normal_graph(shape_, seed_; name=nothing, dtype=nothing) local desc tf.with_op_name(name, "StatelessRandomNormal") do desc = tf.NodeDescription("StatelessRandomNormal") @@ -57759,7 +57759,7 @@ begin desc["T"] = tf.data_type(shape_) desc["Tseed"] = tf.data_type(seed_) res = tf.execute(desc) - node = tf.TapeNode(stateless_random_normal, [shape_, seed_], name=nothing, dtype=nothing) + node = tf.TapeNode(stateless_random_normal, [shape_, seed_], name=nothing, dtype=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -57779,7 +57779,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function strided_slice_assign_graph(ref_, begin_, end_, strides_, value_; name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function strided_slice_assign_graph(ref_, begin_, end_, strides_, value_; name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing) local desc tf.with_op_name(name, "StridedSliceAssign") do desc = tf.NodeDescription("StridedSliceAssign") @@ -57885,7 +57885,7 @@ begin desc["Index"] = tf.data_type(strides_) desc["T"] = tf.data_type(value_) res = tf.execute(desc) - node = tf.TapeNode(strided_slice_assign, [ref_, begin_, end_, strides_, value_], name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing) + node = tf.TapeNode(strided_slice_assign, [ref_, begin_, end_, strides_, value_], name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -57905,7 +57905,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function scatter_min_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function scatter_min_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ScatterMin") do desc = tf.NodeDescription("ScatterMin") @@ -57939,7 +57939,7 @@ begin desc["Tindices"] = tf.data_type(indices_) desc["T"] = tf.data_type(updates_) res = tf.execute(desc) - node = tf.TapeNode(scatter_min, [ref_, indices_, updates_], name=nothing, use_locking=nothing) + node = tf.TapeNode(scatter_min, [ref_, indices_, updates_], name=nothing, use_locking=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -57959,7 +57959,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_strided_slice_assign_graph(ref_, begin_, end_, strides_, value_; name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_strided_slice_assign_graph(ref_, begin_, end_, strides_, value_; name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing) local desc tf.with_op_name(name, "ResourceStridedSliceAssign") do desc = tf.NodeDescription("ResourceStridedSliceAssign") @@ -58064,7 +58064,7 @@ begin desc["Index"] = tf.data_type(strides_) desc["T"] = tf.data_type(value_) res = tf.execute(desc) - node = tf.TapeNode(resource_strided_slice_assign, [ref_, begin_, end_, strides_, value_], name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing) + node = tf.TapeNode(resource_strided_slice_assign, [ref_, begin_, end_, strides_, value_], name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -58084,7 +58084,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function random_gamma_grad_graph(alpha_, sample_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function random_gamma_grad_graph(alpha_, sample_; name=nothing) local desc tf.with_op_name(name, "RandomGammaGrad") do desc = tf.NodeDescription("RandomGammaGrad") @@ -58105,7 +58105,7 @@ begin desc["T"] = tf.data_type(alpha_) desc["T"] = tf.data_type(sample_) res = tf.execute(desc) - node = tf.TapeNode(random_gamma_grad, [alpha_, sample_], name=nothing) + node = tf.TapeNode(random_gamma_grad, [alpha_, sample_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -58125,7 +58125,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_sparse_apply_keras_momentum_graph(var_, accum_, lr_, grad_, indices_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_sparse_apply_keras_momentum_graph(var_, accum_, lr_, grad_, indices_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) local desc tf.with_op_name(name, "ResourceSparseApplyKerasMomentum") do desc = tf.NodeDescription("ResourceSparseApplyKerasMomentum") @@ -58178,7 +58178,7 @@ begin desc["Tindices"] = tf.data_type(indices_) desc["T"] = tf.data_type(momentum_) res = tf.execute(desc) - node = tf.TapeNode(resource_sparse_apply_keras_momentum, [var_, accum_, lr_, grad_, indices_, momentum_], name=nothing, use_locking=nothing, use_nesterov=nothing) + node = tf.TapeNode(resource_sparse_apply_keras_momentum, [var_, accum_, lr_, grad_, indices_, momentum_], name=nothing, use_locking=nothing, use_nesterov=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -58198,7 +58198,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function boosted_trees_create_quantile_stream_resource_graph(quantile_stream_resource_handle_, epsilon_, num_streams_; name=nothing, max_elements=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function boosted_trees_create_quantile_stream_resource_graph(quantile_stream_resource_handle_, epsilon_, num_streams_; name=nothing, max_elements=nothing) local desc tf.with_op_name(name, "BoostedTreesCreateQuantileStreamResource") do desc = tf.NodeDescription("BoostedTreesCreateQuantileStreamResource") @@ -58226,7 +58226,7 @@ begin desc["max_elements"] = Base.Int(max_elements) end res = tf.execute(desc) - node = tf.TapeNode(boosted_trees_create_quantile_stream_resource, [quantile_stream_resource_handle_, epsilon_, num_streams_], name=nothing, max_elements=nothing) + node = tf.TapeNode(boosted_trees_create_quantile_stream_resource, [quantile_stream_resource_handle_, epsilon_, num_streams_], name=nothing, max_elements=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -58246,7 +58246,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantized_relu6_graph(features_, min_features_, max_features_; name=nothing, out_type=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantized_relu6_graph(features_, min_features_, max_features_; name=nothing, out_type=nothing) local desc tf.with_op_name(name, "QuantizedRelu6") do desc = tf.NodeDescription("QuantizedRelu6") @@ -58281,7 +58281,7 @@ begin end desc["Tinput"] = tf.data_type(features_) res = tf.execute(desc) - node = tf.TapeNode(quantized_relu6, [features_, min_features_, max_features_], name=nothing, out_type=nothing) + node = tf.TapeNode(quantized_relu6, [features_, min_features_, max_features_], name=nothing, out_type=nothing, res) tf.add_node(res[1], node) return res end @@ -58301,7 +58301,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_sparse_maximum_graph(a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_sparse_maximum_graph(a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_; name=nothing) local desc tf.with_op_name(name, "SparseSparseMaximum") do desc = tf.NodeDescription("SparseSparseMaximum") @@ -58343,7 +58343,7 @@ begin desc["T"] = tf.data_type(a_values_) desc["T"] = tf.data_type(b_values_) res = tf.execute(desc) - node = tf.TapeNode(sparse_sparse_maximum, [a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_], name=nothing) + node = tf.TapeNode(sparse_sparse_maximum, [a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_], name=nothing, res) tf.add_node(res[1], node) return res end @@ -58363,7 +58363,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_norm_with_global_normalization_graph(t_, m_, v_, beta_, gamma_; name=nothing, variance_epsilon=nothing, scale_after_normalization=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_norm_with_global_normalization_graph(t_, m_, v_, beta_, gamma_; name=nothing, variance_epsilon=nothing, scale_after_normalization=nothing) local desc tf.with_op_name(name, "BatchNormWithGlobalNormalization") do desc = tf.NodeDescription("BatchNormWithGlobalNormalization") @@ -58411,7 +58411,7 @@ begin desc["T"] = tf.data_type(beta_) desc["T"] = tf.data_type(gamma_) res = tf.execute(desc) - node = tf.TapeNode(batch_norm_with_global_normalization, [t_, m_, v_, beta_, gamma_], name=nothing, variance_epsilon=nothing, scale_after_normalization=nothing) + node = tf.TapeNode(batch_norm_with_global_normalization, [t_, m_, v_, beta_, gamma_], name=nothing, variance_epsilon=nothing, scale_after_normalization=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -58431,7 +58431,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function in_top_kv2_graph(predictions_, targets_, k_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function in_top_kv2_graph(predictions_, targets_, k_; name=nothing) local desc tf.with_op_name(name, "InTopKV2") do desc = tf.NodeDescription("InTopKV2") @@ -58456,7 +58456,7 @@ begin desc["T"] = tf.data_type(targets_) desc["T"] = tf.data_type(k_) res = tf.execute(desc) - node = tf.TapeNode(in_top_kv2, [predictions_, targets_, k_], name=nothing) + node = tf.TapeNode(in_top_kv2, [predictions_, targets_, k_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -58476,7 +58476,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function cholesky_graph(input_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function cholesky_graph(input_; name=nothing) local desc tf.with_op_name(name, "Cholesky") do desc = tf.NodeDescription("Cholesky") @@ -58492,7 +58492,7 @@ begin tf.add_input(desc, input_) desc["T"] = tf.data_type(input_) res = tf.execute(desc) - node = tf.TapeNode(cholesky, [input_], name=nothing) + node = tf.TapeNode(cholesky, [input_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -58512,7 +58512,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_apply_centered_rms_prop_graph(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=nothing, use_locking=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_apply_centered_rms_prop_graph(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ResourceApplyCenteredRMSProp") do desc = tf.NodeDescription("ResourceApplyCenteredRMSProp") @@ -58570,7 +58570,7 @@ begin desc["T"] = tf.data_type(epsilon_) desc["T"] = tf.data_type(grad_) res = tf.execute(desc) - node = tf.TapeNode(resource_apply_centered_rms_prop, [var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_], name=nothing, use_locking=nothing) + node = tf.TapeNode(resource_apply_centered_rms_prop, [var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_], name=nothing, use_locking=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -58590,7 +58590,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_apply_adagrad_graph(var_, accum_, lr_, grad_; name=nothing, use_locking=nothing, update_slots=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_apply_adagrad_graph(var_, accum_, lr_, grad_; name=nothing, use_locking=nothing, update_slots=nothing) local desc tf.with_op_name(name, "ResourceApplyAdagrad") do desc = tf.NodeDescription("ResourceApplyAdagrad") @@ -58631,7 +58631,7 @@ begin desc["T"] = tf.data_type(lr_) desc["T"] = tf.data_type(grad_) res = tf.execute(desc) - node = tf.TapeNode(resource_apply_adagrad, [var_, accum_, lr_, grad_], name=nothing, use_locking=nothing, update_slots=nothing) + node = tf.TapeNode(resource_apply_adagrad, [var_, accum_, lr_, grad_], name=nothing, use_locking=nothing, update_slots=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -58651,7 +58651,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_parallel_interleave_dataset_graph(input_dataset_, other_arguments_, cycle_length_, block_length_, sloppy_, buffer_output_elements_, prefetch_input_elements_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_parallel_interleave_dataset_graph(input_dataset_, other_arguments_, cycle_length_, block_length_, sloppy_, buffer_output_elements_, prefetch_input_elements_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "ExperimentalParallelInterleaveDataset") do desc = tf.NodeDescription("ExperimentalParallelInterleaveDataset") @@ -58713,7 +58713,7 @@ begin desc["output_shapes"] = map(Base.identity, output_shapes) end res = tf.execute(desc) - node = tf.TapeNode(experimental_parallel_interleave_dataset, [input_dataset_, other_arguments_, cycle_length_, block_length_, sloppy_, buffer_output_elements_, prefetch_input_elements_], name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) + node = tf.TapeNode(experimental_parallel_interleave_dataset, [input_dataset_, other_arguments_, cycle_length_, block_length_, sloppy_, buffer_output_elements_, prefetch_input_elements_], name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -58733,7 +58733,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resize_bicubic_grad_graph(grads_, original_image_; name=nothing, align_corners=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resize_bicubic_grad_graph(grads_, original_image_; name=nothing, align_corners=nothing) local desc tf.with_op_name(name, "ResizeBicubicGrad") do desc = tf.NodeDescription("ResizeBicubicGrad") @@ -58759,7 +58759,7 @@ begin end desc["T"] = tf.data_type(original_image_) res = tf.execute(desc) - node = tf.TapeNode(resize_bicubic_grad, [grads_, original_image_], name=nothing, align_corners=nothing) + node = tf.TapeNode(resize_bicubic_grad, [grads_, original_image_], name=nothing, align_corners=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -58779,7 +58779,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_self_adjoint_eig_graph(input_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_self_adjoint_eig_graph(input_; name=nothing) local desc tf.with_op_name(name, "BatchSelfAdjointEig") do desc = tf.NodeDescription("BatchSelfAdjointEig") @@ -58795,7 +58795,7 @@ begin tf.add_input(desc, input_) desc["T"] = tf.data_type(input_) res = tf.execute(desc) - node = tf.TapeNode(batch_self_adjoint_eig, [input_], name=nothing) + node = tf.TapeNode(batch_self_adjoint_eig, [input_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -58815,7 +58815,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_softmax_graph(sp_indices_, sp_values_, sp_shape_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_softmax_graph(sp_indices_, sp_values_, sp_shape_; name=nothing) local desc tf.with_op_name(name, "SparseSoftmax") do desc = tf.NodeDescription("SparseSoftmax") @@ -58839,7 +58839,7 @@ begin tf.add_input(desc, sp_shape_) desc["T"] = tf.data_type(sp_values_) res = tf.execute(desc) - node = tf.TapeNode(sparse_softmax, [sp_indices_, sp_values_, sp_shape_], name=nothing) + node = tf.TapeNode(sparse_softmax, [sp_indices_, sp_values_, sp_shape_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -58859,7 +58859,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function asinh_graph(x_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function asinh_graph(x_; name=nothing) local desc tf.with_op_name(name, "Asinh") do desc = tf.NodeDescription("Asinh") @@ -58875,7 +58875,7 @@ begin tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) res = tf.execute(desc) - node = tf.TapeNode(asinh, [x_], name=nothing) + node = tf.TapeNode(asinh, [x_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -58895,7 +58895,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantized_conv2d_and_relu_graph(input_, filter_, min_input_, max_input_, min_filter_, max_filter_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantized_conv2d_and_relu_graph(input_, filter_, min_input_, max_input_, min_filter_, max_filter_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) local desc tf.with_op_name(name, "QuantizedConv2DAndRelu") do desc = tf.NodeDescription("QuantizedConv2DAndRelu") @@ -58962,7 +58962,7 @@ begin desc["Tinput"] = tf.data_type(input_) desc["Tfilter"] = tf.data_type(filter_) res = tf.execute(desc) - node = tf.TapeNode(quantized_conv2d_and_relu, [input_, filter_, min_input_, max_input_, min_filter_, max_filter_], name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) + node = tf.TapeNode(quantized_conv2d_and_relu, [input_, filter_, min_input_, max_input_, min_filter_, max_filter_], name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing, res) tf.add_node(res[1], node) return res end @@ -58982,7 +58982,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function matrix_inverse_graph(input_; name=nothing, adjoint=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function matrix_inverse_graph(input_; name=nothing, adjoint=nothing) local desc tf.with_op_name(name, "MatrixInverse") do desc = tf.NodeDescription("MatrixInverse") @@ -59004,7 +59004,7 @@ begin end desc["T"] = tf.data_type(input_) res = tf.execute(desc) - node = tf.TapeNode(matrix_inverse, [input_], name=nothing, adjoint=nothing) + node = tf.TapeNode(matrix_inverse, [input_], name=nothing, adjoint=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -59024,7 +59024,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_list_concat_lists_graph(input_a_, input_b_; name=nothing, element_dtype=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_list_concat_lists_graph(input_a_, input_b_; name=nothing, element_dtype=nothing) local desc tf.with_op_name(name, "TensorListConcatLists") do desc = tf.NodeDescription("TensorListConcatLists") @@ -59048,7 +59048,7 @@ begin desc["element_dtype"] = Base.identity(element_dtype) end res = tf.execute(desc) - node = tf.TapeNode(tensor_list_concat_lists, [input_a_, input_b_], name=nothing, element_dtype=nothing) + node = tf.TapeNode(tensor_list_concat_lists, [input_a_, input_b_], name=nothing, element_dtype=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -59068,7 +59068,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function requantize_graph(input_, input_min_, input_max_, requested_output_min_, requested_output_max_; name=nothing, out_type=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function requantize_graph(input_, input_min_, input_max_, requested_output_min_, requested_output_max_; name=nothing, out_type=nothing) local desc tf.with_op_name(name, "Requantize") do desc = tf.NodeDescription("Requantize") @@ -59111,7 +59111,7 @@ begin end desc["Tinput"] = tf.data_type(input_) res = tf.execute(desc) - node = tf.TapeNode(requantize, [input_, input_min_, input_max_, requested_output_min_, requested_output_max_], name=nothing, out_type=nothing) + node = tf.TapeNode(requantize, [input_, input_min_, input_max_, requested_output_min_, requested_output_max_], name=nothing, out_type=nothing, res) tf.add_node(res[1], node) return res end @@ -59131,7 +59131,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fft_graph(input_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fft_graph(input_; name=nothing) local desc tf.with_op_name(name, "FFT") do desc = tf.NodeDescription("FFT") @@ -59147,7 +59147,7 @@ begin tf.add_input(desc, input_) desc["Tcomplex"] = tf.data_type(input_) res = tf.execute(desc) - node = tf.TapeNode(fft, [input_], name=nothing) + node = tf.TapeNode(fft, [input_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -59167,7 +59167,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function conjugate_transpose_graph(x_, perm_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function conjugate_transpose_graph(x_, perm_; name=nothing) local desc tf.with_op_name(name, "ConjugateTranspose") do desc = tf.NodeDescription("ConjugateTranspose") @@ -59189,7 +59189,7 @@ begin desc["T"] = tf.data_type(x_) desc["Tperm"] = tf.data_type(perm_) res = tf.execute(desc) - node = tf.TapeNode(conjugate_transpose, [x_, perm_], name=nothing) + node = tf.TapeNode(conjugate_transpose, [x_, perm_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -59209,7 +59209,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function unstage_graph(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function unstage_graph(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "Unstage") do desc = tf.NodeDescription("Unstage") @@ -59249,7 +59249,7 @@ begin desc["shared_name"] = Base.String(shared_name) end res = tf.execute(desc) - node = tf.TapeNode(unstage, [], name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + node = tf.TapeNode(unstage, [], name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -59269,7 +59269,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function relu6grad_graph(gradients_, features_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function relu6grad_graph(gradients_, features_; name=nothing) local desc tf.with_op_name(name, "Relu6Grad") do desc = tf.NodeDescription("Relu6Grad") @@ -59290,7 +59290,7 @@ begin desc["T"] = tf.data_type(gradients_) desc["T"] = tf.data_type(features_) res = tf.execute(desc) - node = tf.TapeNode(relu6grad, [gradients_, features_], name=nothing) + node = tf.TapeNode(relu6grad, [gradients_, features_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -59310,7 +59310,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function scale_and_translate_grad_graph(grads_, original_image_, scale_, translation_; name=nothing, kernel_type=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function scale_and_translate_grad_graph(grads_, original_image_, scale_, translation_; name=nothing, kernel_type=nothing) local desc tf.with_op_name(name, "ScaleAndTranslateGrad") do desc = tf.NodeDescription("ScaleAndTranslateGrad") @@ -59345,7 +59345,7 @@ begin desc["T"] = tf.data_type(grads_) desc["T"] = tf.data_type(original_image_) res = tf.execute(desc) - node = tf.TapeNode(scale_and_translate_grad, [grads_, original_image_, scale_, translation_], name=nothing, kernel_type=nothing) + node = tf.TapeNode(scale_and_translate_grad, [grads_, original_image_, scale_, translation_], name=nothing, kernel_type=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -59365,7 +59365,7 @@ end Converts an array of tensors to a list of tensors. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _array_to_list_graph(input_; name=nothing, N=nothing, out_types=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _array_to_list_graph(input_; name=nothing, N=nothing, out_types=nothing) local desc tf.with_op_name(name, "_ArrayToList") do desc = tf.NodeDescription("_ArrayToList") @@ -59393,7 +59393,7 @@ begin end desc["T"] = tf.data_type(input_) res = tf.execute(desc) - node = tf.TapeNode(_array_to_list, [input_], name=nothing, N=nothing, out_types=nothing) + node = tf.TapeNode(_array_to_list, [input_], name=nothing, N=nothing, out_types=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -59413,7 +59413,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function cudnn_rnnv3_graph(input_, input_h_, input_c_, params_, sequence_lengths_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing, is_training=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function cudnn_rnnv3_graph(input_, input_h_, input_c_, params_, sequence_lengths_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing, is_training=nothing) local desc tf.with_op_name(name, "CudnnRNNV3") do desc = tf.NodeDescription("CudnnRNNV3") @@ -59495,7 +59495,7 @@ begin desc["T"] = tf.data_type(input_c_) desc["T"] = tf.data_type(params_) res = tf.execute(desc) - node = tf.TapeNode(cudnn_rnnv3, [input_, input_h_, input_c_, params_, sequence_lengths_], name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing, is_training=nothing) + node = tf.TapeNode(cudnn_rnnv3, [input_, input_h_, input_c_, params_, sequence_lengths_], name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing, is_training=nothing, res) tf.add_node(res[1], node) return res end @@ -59515,7 +59515,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function expand_dims_graph(input_, dim_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function expand_dims_graph(input_, dim_; name=nothing) local desc tf.with_op_name(name, "ExpandDims") do desc = tf.NodeDescription("ExpandDims") @@ -59538,7 +59538,7 @@ begin desc["T"] = tf.data_type(input_) desc["Tdim"] = tf.data_type(dim_) res = tf.execute(desc) - node = tf.TapeNode(expand_dims, [input_, dim_], name=nothing) + node = tf.TapeNode(expand_dims, [input_, dim_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -59558,7 +59558,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function inv_grad_graph(y_, dy_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function inv_grad_graph(y_, dy_; name=nothing) local desc tf.with_op_name(name, "InvGrad") do desc = tf.NodeDescription("InvGrad") @@ -59579,7 +59579,7 @@ begin desc["T"] = tf.data_type(y_) desc["T"] = tf.data_type(dy_) res = tf.execute(desc) - node = tf.TapeNode(inv_grad, [y_, dy_], name=nothing) + node = tf.TapeNode(inv_grad, [y_, dy_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -59599,7 +59599,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function non_max_suppression_graph(boxes_, scores_, max_output_size_; name=nothing, iou_threshold=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function non_max_suppression_graph(boxes_, scores_, max_output_size_; name=nothing, iou_threshold=nothing) local desc tf.with_op_name(name, "NonMaxSuppression") do desc = tf.NodeDescription("NonMaxSuppression") @@ -59627,7 +59627,7 @@ begin desc["iou_threshold"] = Base.identity(iou_threshold) end res = tf.execute(desc) - node = tf.TapeNode(non_max_suppression, [boxes_, scores_, max_output_size_], name=nothing, iou_threshold=nothing) + node = tf.TapeNode(non_max_suppression, [boxes_, scores_, max_output_size_], name=nothing, iou_threshold=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -59647,7 +59647,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function l2loss_graph(t_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function l2loss_graph(t_; name=nothing) local desc tf.with_op_name(name, "L2Loss") do desc = tf.NodeDescription("L2Loss") @@ -59663,7 +59663,7 @@ begin tf.add_input(desc, t_) desc["T"] = tf.data_type(t_) res = tf.execute(desc) - node = tf.TapeNode(l2loss, [t_], name=nothing) + node = tf.TapeNode(l2loss, [t_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -59683,7 +59683,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resize_area_graph(images_, size_; name=nothing, align_corners=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resize_area_graph(images_, size_; name=nothing, align_corners=nothing) local desc tf.with_op_name(name, "ResizeArea") do desc = tf.NodeDescription("ResizeArea") @@ -59709,7 +59709,7 @@ begin end desc["T"] = tf.data_type(images_) res = tf.execute(desc) - node = tf.TapeNode(resize_area, [images_, size_], name=nothing, align_corners=nothing) + node = tf.TapeNode(resize_area, [images_, size_], name=nothing, align_corners=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -59729,7 +59729,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_cross_graph(indices_, values_, shapes_, dense_inputs_; name=nothing, N=nothing, hashed_output=nothing, num_buckets=nothing, hash_key=nothing, sparse_types=nothing, dense_types=nothing, out_type=nothing, internal_type=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_cross_graph(indices_, values_, shapes_, dense_inputs_; name=nothing, N=nothing, hashed_output=nothing, num_buckets=nothing, hash_key=nothing, sparse_types=nothing, dense_types=nothing, out_type=nothing, internal_type=nothing) local desc tf.with_op_name(name, "SparseCross") do desc = tf.NodeDescription("SparseCross") @@ -59808,7 +59808,7 @@ begin desc["internal_type"] = Base.identity(internal_type) end res = tf.execute(desc) - node = tf.TapeNode(sparse_cross, [indices_, values_, shapes_, dense_inputs_], name=nothing, N=nothing, hashed_output=nothing, num_buckets=nothing, hash_key=nothing, sparse_types=nothing, dense_types=nothing, out_type=nothing, internal_type=nothing) + node = tf.TapeNode(sparse_cross, [indices_, values_, shapes_, dense_inputs_], name=nothing, N=nothing, hashed_output=nothing, num_buckets=nothing, hash_key=nothing, sparse_types=nothing, dense_types=nothing, out_type=nothing, internal_type=nothing, res) tf.add_node(res[1], node) return res end @@ -59828,7 +59828,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_fft3d_graph(input_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_fft3d_graph(input_; name=nothing) local desc tf.with_op_name(name, "BatchFFT3D") do desc = tf.NodeDescription("BatchFFT3D") @@ -59842,7 +59842,7 @@ begin input_ = convert(tf.TensorHandle, input_) tf.add_input(desc, input_) res = tf.execute(desc) - node = tf.TapeNode(batch_fft3d, [input_], name=nothing) + node = tf.TapeNode(batch_fft3d, [input_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -59862,7 +59862,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function random_standard_normal_graph(shape_; name=nothing, seed=nothing, seed2=nothing, dtype=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function random_standard_normal_graph(shape_; name=nothing, seed=nothing, seed2=nothing, dtype=nothing) local desc tf.with_op_name(name, "RandomStandardNormal") do desc = tf.NodeDescription("RandomStandardNormal") @@ -59896,7 +59896,7 @@ begin end desc["T"] = tf.data_type(shape_) res = tf.execute(desc) - node = tf.TapeNode(random_standard_normal, [shape_], name=nothing, seed=nothing, seed2=nothing, dtype=nothing) + node = tf.TapeNode(random_standard_normal, [shape_], name=nothing, seed=nothing, seed2=nothing, dtype=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -59916,7 +59916,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_scatter_mul_graph(resource_, indices_, updates_; name=nothing, dtype=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_scatter_mul_graph(resource_, indices_, updates_; name=nothing, dtype=nothing) local desc tf.with_op_name(name, "ResourceScatterMul") do desc = tf.NodeDescription("ResourceScatterMul") @@ -59949,7 +59949,7 @@ begin desc["Tindices"] = tf.data_type(indices_) desc["dtype"] = tf.data_type(updates_) res = tf.execute(desc) - node = tf.TapeNode(resource_scatter_mul, [resource_, indices_, updates_], name=nothing, dtype=nothing) + node = tf.TapeNode(resource_scatter_mul, [resource_, indices_, updates_], name=nothing, dtype=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -59969,7 +59969,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sdca_optimizer_graph(sparse_example_indices_, sparse_feature_indices_, sparse_feature_values_, dense_features_, example_weights_, example_labels_, sparse_indices_, sparse_weights_, dense_weights_, example_state_data_; name=nothing, loss_type=nothing, adaptative=nothing, num_sparse_features=nothing, num_sparse_features_with_values=nothing, num_dense_features=nothing, l1=nothing, l2=nothing, num_loss_partitions=nothing, num_inner_iterations=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sdca_optimizer_graph(sparse_example_indices_, sparse_feature_indices_, sparse_feature_values_, dense_features_, example_weights_, example_labels_, sparse_indices_, sparse_weights_, dense_weights_, example_state_data_; name=nothing, loss_type=nothing, adaptative=nothing, num_sparse_features=nothing, num_sparse_features_with_values=nothing, num_dense_features=nothing, l1=nothing, l2=nothing, num_loss_partitions=nothing, num_inner_iterations=nothing) local desc tf.with_op_name(name, "SdcaOptimizer") do desc = tf.NodeDescription("SdcaOptimizer") @@ -60078,7 +60078,7 @@ begin desc["num_inner_iterations"] = Base.Int(num_inner_iterations) end res = tf.execute(desc) - node = tf.TapeNode(sdca_optimizer, [sparse_example_indices_, sparse_feature_indices_, sparse_feature_values_, dense_features_, example_weights_, example_labels_, sparse_indices_, sparse_weights_, dense_weights_, example_state_data_], name=nothing, loss_type=nothing, adaptative=nothing, num_sparse_features=nothing, num_sparse_features_with_values=nothing, num_dense_features=nothing, l1=nothing, l2=nothing, num_loss_partitions=nothing, num_inner_iterations=nothing) + node = tf.TapeNode(sdca_optimizer, [sparse_example_indices_, sparse_feature_indices_, sparse_feature_values_, dense_features_, example_weights_, example_labels_, sparse_indices_, sparse_weights_, dense_weights_, example_state_data_], name=nothing, loss_type=nothing, adaptative=nothing, num_sparse_features=nothing, num_sparse_features_with_values=nothing, num_dense_features=nothing, l1=nothing, l2=nothing, num_loss_partitions=nothing, num_inner_iterations=nothing, res) tf.add_node(res[1], node) return res end @@ -60098,7 +60098,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function zeta_graph(x_, q_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function zeta_graph(x_, q_; name=nothing) local desc tf.with_op_name(name, "Zeta") do desc = tf.NodeDescription("Zeta") @@ -60119,7 +60119,7 @@ begin desc["T"] = tf.data_type(x_) desc["T"] = tf.data_type(q_) res = tf.execute(desc) - node = tf.TapeNode(zeta, [x_, q_], name=nothing) + node = tf.TapeNode(zeta, [x_, q_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -60139,7 +60139,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sample_distorted_bounding_box_graph(image_size_, bounding_boxes_; name=nothing, seed=nothing, seed2=nothing, min_object_covered=nothing, aspect_ratio_range=nothing, area_range=nothing, max_attempts=nothing, use_image_if_no_bounding_boxes=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sample_distorted_bounding_box_graph(image_size_, bounding_boxes_; name=nothing, seed=nothing, seed2=nothing, min_object_covered=nothing, aspect_ratio_range=nothing, area_range=nothing, max_attempts=nothing, use_image_if_no_bounding_boxes=nothing) local desc tf.with_op_name(name, "SampleDistortedBoundingBox") do desc = tf.NodeDescription("SampleDistortedBoundingBox") @@ -60206,7 +60206,7 @@ begin end desc["T"] = tf.data_type(image_size_) res = tf.execute(desc) - node = tf.TapeNode(sample_distorted_bounding_box, [image_size_, bounding_boxes_], name=nothing, seed=nothing, seed2=nothing, min_object_covered=nothing, aspect_ratio_range=nothing, area_range=nothing, max_attempts=nothing, use_image_if_no_bounding_boxes=nothing) + node = tf.TapeNode(sample_distorted_bounding_box, [image_size_, bounding_boxes_], name=nothing, seed=nothing, seed2=nothing, min_object_covered=nothing, aspect_ratio_range=nothing, area_range=nothing, max_attempts=nothing, use_image_if_no_bounding_boxes=nothing, res) tf.add_node(res[1], node) return res end @@ -60226,7 +60226,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function igamma_grad_a_graph(a_, x_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function igamma_grad_a_graph(a_, x_; name=nothing) local desc tf.with_op_name(name, "IgammaGradA") do desc = tf.NodeDescription("IgammaGradA") @@ -60247,7 +60247,7 @@ begin desc["T"] = tf.data_type(a_) desc["T"] = tf.data_type(x_) res = tf.execute(desc) - node = tf.TapeNode(igamma_grad_a, [a_, x_], name=nothing) + node = tf.TapeNode(igamma_grad_a, [a_, x_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -60267,7 +60267,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function segment_max_graph(data_, segment_ids_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function segment_max_graph(data_, segment_ids_; name=nothing) local desc tf.with_op_name(name, "SegmentMax") do desc = tf.NodeDescription("SegmentMax") @@ -60290,7 +60290,7 @@ begin desc["T"] = tf.data_type(data_) desc["Tindices"] = tf.data_type(segment_ids_) res = tf.execute(desc) - node = tf.TapeNode(segment_max, [data_, segment_ids_], name=nothing) + node = tf.TapeNode(segment_max, [data_, segment_ids_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -60310,7 +60310,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function range_graph(start_, limit_, delta_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function range_graph(start_, limit_, delta_; name=nothing) local desc tf.with_op_name(name, "Range") do desc = tf.NodeDescription("Range") @@ -60336,7 +60336,7 @@ begin desc["Tidx"] = tf.data_type(limit_) desc["Tidx"] = tf.data_type(delta_) res = tf.execute(desc) - node = tf.TapeNode(range, [start_, limit_, delta_], name=nothing) + node = tf.TapeNode(range, [start_, limit_, delta_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -60356,7 +60356,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function retrieve_tpu_embedding_momentum_parameters_grad_accum_debug_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function retrieve_tpu_embedding_momentum_parameters_grad_accum_debug_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "RetrieveTPUEmbeddingMomentumParametersGradAccumDebug") do desc = tf.NodeDescription("RetrieveTPUEmbeddingMomentumParametersGradAccumDebug") @@ -60395,7 +60395,7 @@ begin desc["shard_id"] = Base.Int(shard_id) end res = tf.execute(desc) - node = tf.TapeNode(retrieve_tpu_embedding_momentum_parameters_grad_accum_debug, [], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + node = tf.TapeNode(retrieve_tpu_embedding_momentum_parameters_grad_accum_debug, [], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res) tf.add_node(res[1], node) return res end @@ -60415,7 +60415,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function flush_summary_writer_graph(writer_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function flush_summary_writer_graph(writer_; name=nothing) local desc tf.with_op_name(name, "FlushSummaryWriter") do desc = tf.NodeDescription("FlushSummaryWriter") @@ -60429,7 +60429,7 @@ begin writer_ = convert(tf.TensorHandle, writer_) tf.add_input(desc, writer_) res = tf.execute(desc) - node = tf.TapeNode(flush_summary_writer, [writer_], name=nothing) + node = tf.TapeNode(flush_summary_writer, [writer_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -60449,7 +60449,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function dequantize_graph(input_, min_range_, max_range_; name=nothing, mode=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function dequantize_graph(input_, min_range_, max_range_; name=nothing, mode=nothing) local desc tf.with_op_name(name, "Dequantize") do desc = tf.NodeDescription("Dequantize") @@ -60479,7 +60479,7 @@ begin end desc["T"] = tf.data_type(input_) res = tf.execute(desc) - node = tf.TapeNode(dequantize, [input_, min_range_, max_range_], name=nothing, mode=nothing) + node = tf.TapeNode(dequantize, [input_, min_range_, max_range_], name=nothing, mode=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -60499,7 +60499,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_fill_empty_rows_grad_graph(reverse_index_map_, grad_values_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_fill_empty_rows_grad_graph(reverse_index_map_, grad_values_; name=nothing) local desc tf.with_op_name(name, "SparseFillEmptyRowsGrad") do desc = tf.NodeDescription("SparseFillEmptyRowsGrad") @@ -60524,7 +60524,7 @@ begin tf.add_input(desc, grad_values_) desc["T"] = tf.data_type(grad_values_) res = tf.execute(desc) - node = tf.TapeNode(sparse_fill_empty_rows_grad, [reverse_index_map_, grad_values_], name=nothing) + node = tf.TapeNode(sparse_fill_empty_rows_grad, [reverse_index_map_, grad_values_], name=nothing, res) tf.add_node(res[1], node) return res end @@ -60544,7 +60544,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function iterator_get_next_graph(iterator_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function iterator_get_next_graph(iterator_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "IteratorGetNext") do desc = tf.NodeDescription("IteratorGetNext") @@ -60570,7 +60570,7 @@ begin desc["output_shapes"] = map(Base.identity, output_shapes) end res = tf.execute(desc) - node = tf.TapeNode(iterator_get_next, [iterator_], name=nothing, output_types=nothing, output_shapes=nothing) + node = tf.TapeNode(iterator_get_next, [iterator_], name=nothing, output_types=nothing, output_shapes=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -60590,7 +60590,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_tensor_dense_add_graph(a_indices_, a_values_, a_shape_, b_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_tensor_dense_add_graph(a_indices_, a_values_, a_shape_, b_; name=nothing) local desc tf.with_op_name(name, "SparseTensorDenseAdd") do desc = tf.NodeDescription("SparseTensorDenseAdd") @@ -60624,7 +60624,7 @@ begin desc["Tindices"] = tf.data_type(a_shape_) desc["T"] = tf.data_type(b_) res = tf.execute(desc) - node = tf.TapeNode(sparse_tensor_dense_add, [a_indices_, a_values_, a_shape_, b_], name=nothing) + node = tf.TapeNode(sparse_tensor_dense_add, [a_indices_, a_values_, a_shape_, b_], name=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -60644,7 +60644,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function prevent_gradient_graph(input_; name=nothing, message=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function prevent_gradient_graph(input_; name=nothing, message=nothing) local desc tf.with_op_name(name, "PreventGradient") do desc = tf.NodeDescription("PreventGradient") @@ -60666,7 +60666,7 @@ begin end desc["T"] = tf.data_type(input_) res = tf.execute(desc) - node = tf.TapeNode(prevent_gradient, [input_], name=nothing, message=nothing) + node = tf.TapeNode(prevent_gradient, [input_], name=nothing, message=nothing, res) tf.add_node(res[1], node) return res[1] end @@ -60686,7 +60686,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function lookup_table_export_graph(table_handle_; name=nothing) + #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function lookup_table_export_graph(table_handle_; name=nothing) local desc tf.with_op_name(name, "LookupTableExport") do desc = tf.NodeDescription("LookupTableExport") @@ -60705,7 +60705,7 @@ begin table_handle_ = convert(tf.TensorHandle, table_handle_) tf.add_input(desc, table_handle_) res = tf.execute(desc) - node = tf.TapeNode(lookup_table_export, [table_handle_], name=nothing) + node = tf.TapeNode(lookup_table_export, [table_handle_], name=nothing, res) tf.add_node(res[1], node) return res end diff --git a/src/tape.jl b/src/tape.jl index 715e9e70..89dee690 100644 --- a/src/tape.jl +++ b/src/tape.jl @@ -2,13 +2,14 @@ using MacroTools import MacroTools: splitdef, combinedef mutable struct TapeNode - op - args - kwargs + op::Function + args::Vector{TensorHandle} + results::Vector{TensorHandle} + kwargs::Dict end -TapeNode(op, args; kwargs...) = TapeNode(op, args, kwargs) +TapeNode(op, args, results; kwargs...) = TapeNode(op, args, results, kwargs) mutable struct Tape nodes::Dict{TensorHandle, TapeNode} @@ -105,8 +106,7 @@ end) end) @back_for(Ops.relu, function f(grad, x; kwarg...) - # todo use relu grad - ((x > 0) .* x) .* grad + Ops.relu_grad(grad, x) end) @back_for(Ops.mat_mul, function f(grad, x, y; transpose_a=nothing, transpose_b=nothing, kwargs...) @@ -116,18 +116,16 @@ end) return [grad_x, grad_y] end) -# These are all wrong. the _grad methods expect the OUTPUT, not the input. -# need to cache the output for them -@back_for(Ops.tanh, function f(grad, x; kwargs...) - Ops.tanh_grad(x, grad) +@back_for(Ops.tanh, function f(grad, x; output=nothing, kwargs...) + Ops.tanh_grad(output[1], grad) end) -@back_for(Ops.sigmoid, function f(grad, x; kwargs...) - Ops.sigmoid_grad(x, grad) +@back_for(Ops.sigmoid, function f(grad, x; output=nothing, kwargs...) + Ops.sigmoid_grad(output[1], grad) end) -@back_for(Ops.sqrt, function f(grad, x; kwargs...) - Ops.sqrt_grad(x, grad) +@back_for(Ops.sqrt, function f(grad, x; output=nothing, kwargs...) + Ops.sqrt_grad(output[1], grad) end) @@ -142,7 +140,7 @@ function _grad(tape::Tape, tensor, out_grad, grads) node = tape.nodes[tensor] back_op = grad_fns[node.op] arg_grads = with_no_grad() do - back_op(out_grad, node.args...; node.kwargs...) + back_op(out_grad, node.args...; output=node.results, node.kwargs...) end arg_grads = ensure_vector(arg_grads) for (i, arg) in enumerate(node.args) From f95012d6381f071a44728a4367b476b6a7ee6d44 Mon Sep 17 00:00:00 2001 From: Jon Malmaud Date: Sun, 24 Feb 2019 22:42:38 -0500 Subject: [PATCH 19/49] Improved Keras --- examples/keras.jl | 15 ++++++++----- src/keras.jl | 56 +++++++++++++++++++++++++++++++++++------------ src/tape.jl | 3 +++ 3 files changed, 54 insertions(+), 20 deletions(-) diff --git a/examples/keras.jl b/examples/keras.jl index 8e5fde27..cc662967 100644 --- a/examples/keras.jl +++ b/examples/keras.jl @@ -1,8 +1,11 @@ -m = tf.Model() -layer = tf.dense(3,3) -tf.add(m, layer) +tf=TensorFlow +m = tf.Sequential() + +tf.add(m, tf.Dense(3,10)) +tf.add(m, tf.ReluLayer()) +tf.add(m, tf.Dense(10, 3)) x=constant(randn(5,3)) -y=3x -tf.compile(m, optimizer=.01, loss=tf.mse) -tf.fit(m, x, y, n_epochs=100) +y=3x+5 +tf.compile(m, optimizer=tf.SGD(lr=1e-3), loss=tf.mse) +tf.fit(m, x, y, n_epochs=1000) diff --git a/src/keras.jl b/src/keras.jl index 6f1f2050..1e86b650 100644 --- a/src/keras.jl +++ b/src/keras.jl @@ -1,55 +1,82 @@ using Statistics -mutable struct Model +abstract type Model +end + +mutable struct Sequential <: Model attrs::Dict end mutable struct Dense - weights - bias + weights::TensorHandle + bias::TensorHandle end -function dense(in_size, out_size) +function Dense(in_size::Integer, out_size::Integer) layer = Dense(constant(randn(in_size, out_size)), constant(zeros(out_size))) return layer end -function Model() +struct ReluLayer +end + +function forward(r::ReluLayer, x) + nn.relu(x) +end + +struct SGD + lr::TensorHandle +end + +SGD(;lr=1e-3)= SGD(convert(TensorHandle, lr)) + +function Sequential() d = Dict() d["trainable"] = Set() d["layers"] = [] - Model(d) + Sequential(d) end -function add(m::Model, d::Dense) +function add(m::Sequential, d::Dense) set_trainable(m, d.weights) set_trainable(m, d.bias) push!(m.attrs["layers"], d) end +function add(m::Sequential, layer) + push!(m.attrs["layers"], layer) +end + function forward(d::Dense, x) - x*d.weights #+ d.bias + Ops.bias_add(x*d.weights, d.bias) end function mse(y, y_target) return mean((y .- y_target) .^ 2) end -function set_trainable(m::Model, tensor) +function set_trainable(m::Sequential, tensor) push!(m.attrs["trainable"], tensor) end -function compile(m::Model; optimizer=nothing, loss=nothing) +function compile(m::Sequential; optimizer=nothing, loss=nothing) m.attrs["optimizer"] = optimizer m.attrs["loss"] = loss end -function fit(m::Model, x, y; n_epochs=1, batch_size=nothing) - lr = constant(m.attrs["optimizer"]) +function optimizier_step(g::SGD, value, grads) + inplace_sub(value, g.lr .* grads) +end + +function fit(m::Sequential, x, y; n_epochs=1, batch_size=nothing) + optimizer = m.attrs["optimizer"] for epoch in 1:n_epochs tape = set_tape() - y_predicted = forward(m.attrs["layers"][1], x) + y_predicted = x + for layer in m.attrs["layers"] + y_predicted = forward(layer, y_predicted) + end loss = m.attrs["loss"](y, y_predicted) println("Epoch $epoch: Loss if $(item(loss))") values = collect(m.attrs["trainable"]) @@ -58,7 +85,8 @@ function fit(m::Model, x, y; n_epochs=1, batch_size=nothing) if g === nothing continue end - inplace_sub(value, lr.*g) + optimizier_step(optimizer, value, g) + # inplace_sub(value, lr.*g) end end end diff --git a/src/tape.jl b/src/tape.jl index 89dee690..fa29f6a2 100644 --- a/src/tape.jl +++ b/src/tape.jl @@ -128,6 +128,9 @@ end) Ops.sqrt_grad(output[1], grad) end) +@back_for(Ops.bias_add, function f(grad, x, y; kwargs...) + [grad, Ops.bias_add_grad(grad)] +end) ensure_vector(x::AbstractArray) = x ensure_vector(x) = [x] From 06b5bcc1055c7faa1ffa7859fb9803b8128f75e0 Mon Sep 17 00:00:00 2001 From: Jon Malmaud Date: Mon, 25 Feb 2019 13:34:43 -0500 Subject: [PATCH 20/49] Rename Relu layer --- examples/keras.jl | 3 ++- examples/neural_ode.jl | 8 ++++++++ src/keras.jl | 43 ++++++++++++++++++++++++++++++++++++------ 3 files changed, 47 insertions(+), 7 deletions(-) create mode 100644 examples/neural_ode.jl diff --git a/examples/keras.jl b/examples/keras.jl index cc662967..64019f67 100644 --- a/examples/keras.jl +++ b/examples/keras.jl @@ -1,8 +1,9 @@ +using TensorFlow tf=TensorFlow m = tf.Sequential() tf.add(m, tf.Dense(3,10)) -tf.add(m, tf.ReluLayer()) +tf.add(m, tf.Relu()) tf.add(m, tf.Dense(10, 3)) x=constant(randn(5,3)) diff --git a/examples/neural_ode.jl b/examples/neural_ode.jl new file mode 100644 index 00000000..f80e95f4 --- /dev/null +++ b/examples/neural_ode.jl @@ -0,0 +1,8 @@ +using TensorFlow +using DifferentialEquations + +model = tf.Sequential([tf.Dense(2, 1)]) +f(u, p, t) = model(u) +problem = ODEProblem(f, u0=[0.5, 0.5], tspan=(0.0, 1.0)) +tf.compile(model, optimizer=tf.Adam(), loss=tf.diffeq_loss(problem, t=[0.0, 0.5, 1.0])) +tf.fit(m, [1.0, 2.0, 5.0], n_epochs=100) diff --git a/src/keras.jl b/src/keras.jl index 1e86b650..6c86e2e3 100644 --- a/src/keras.jl +++ b/src/keras.jl @@ -1,13 +1,38 @@ using Statistics -abstract type Model + +abstract type KerasCallable +end + +abstract type Model <: KerasCallable +end + +abstract type Layer <: KerasCallable +end + +function struct_name(f) + @capture(f, struct name_ <: _ + __ + end) && return name + @capture(f, mutable struct name_ <: _ + __ + end) && return name + return nothing end -mutable struct Sequential <: Model +# Get around https://github.com/JuliaLang/julia/issues/14919 +macro callable(f) + name = struct_name(f) + quote + (m::$name)(args...; kwargs...) = forward(m, args...; kwargs...) + end +end + +@callable mutable struct Sequential <: Model attrs::Dict end -mutable struct Dense +@callable mutable struct Dense <: Layer weights::TensorHandle bias::TensorHandle end @@ -17,10 +42,10 @@ function Dense(in_size::Integer, out_size::Integer) return layer end -struct ReluLayer +@callable struct Relu <: Layer end -function forward(r::ReluLayer, x) +function forward(r::Relu, x) nn.relu(x) end @@ -52,6 +77,13 @@ function forward(d::Dense, x) Ops.bias_add(x*d.weights, d.bias) end +function forward(m::Sequential, x) + for layer in m.attrs["layers"] + x = forward(layer, x) + end + return x +end + function mse(y, y_target) return mean((y .- y_target) .^ 2) end @@ -86,7 +118,6 @@ function fit(m::Sequential, x, y; n_epochs=1, batch_size=nothing) continue end optimizier_step(optimizer, value, g) - # inplace_sub(value, lr.*g) end end end From d9f7f5efa9a12ad69b1ec6b6759f092d3b21a44a Mon Sep 17 00:00:00 2001 From: Jon Malmaud Date: Mon, 25 Feb 2019 18:21:30 -0500 Subject: [PATCH 21/49] Eager summaries --- src/crc.jl | 78 ++++++++++++++++++++++++++++++++++++++++++ src/eager.jl | 2 ++ src/keras.jl | 1 + src/ops/module_test.jl | 10 ++++++ src/ops/summaries.jl | 8 +++-- src/summary_test.jl | 7 ++++ src/summary_writer.jl | 69 +++++++++++++++++++++++++++++-------- 7 files changed, 159 insertions(+), 16 deletions(-) create mode 100644 src/crc.jl create mode 100644 src/ops/module_test.jl create mode 100644 src/summary_test.jl diff --git a/src/crc.jl b/src/crc.jl new file mode 100644 index 00000000..903d6477 --- /dev/null +++ b/src/crc.jl @@ -0,0 +1,78 @@ +CRC_TABLE = [ + 0x00000000, 0xf26b8303, 0xe13b70f7, 0x1350f3f4, + 0xc79a971f, 0x35f1141c, 0x26a1e7e8, 0xd4ca64eb, + 0x8ad958cf, 0x78b2dbcc, 0x6be22838, 0x9989ab3b, + 0x4d43cfd0, 0xbf284cd3, 0xac78bf27, 0x5e133c24, + 0x105ec76f, 0xe235446c, 0xf165b798, 0x030e349b, + 0xd7c45070, 0x25afd373, 0x36ff2087, 0xc494a384, + 0x9a879fa0, 0x68ec1ca3, 0x7bbcef57, 0x89d76c54, + 0x5d1d08bf, 0xaf768bbc, 0xbc267848, 0x4e4dfb4b, + 0x20bd8ede, 0xd2d60ddd, 0xc186fe29, 0x33ed7d2a, + 0xe72719c1, 0x154c9ac2, 0x061c6936, 0xf477ea35, + 0xaa64d611, 0x580f5512, 0x4b5fa6e6, 0xb93425e5, + 0x6dfe410e, 0x9f95c20d, 0x8cc531f9, 0x7eaeb2fa, + 0x30e349b1, 0xc288cab2, 0xd1d83946, 0x23b3ba45, + 0xf779deae, 0x05125dad, 0x1642ae59, 0xe4292d5a, + 0xba3a117e, 0x4851927d, 0x5b016189, 0xa96ae28a, + 0x7da08661, 0x8fcb0562, 0x9c9bf696, 0x6ef07595, + 0x417b1dbc, 0xb3109ebf, 0xa0406d4b, 0x522bee48, + 0x86e18aa3, 0x748a09a0, 0x67dafa54, 0x95b17957, + 0xcba24573, 0x39c9c670, 0x2a993584, 0xd8f2b687, + 0x0c38d26c, 0xfe53516f, 0xed03a29b, 0x1f682198, + 0x5125dad3, 0xa34e59d0, 0xb01eaa24, 0x42752927, + 0x96bf4dcc, 0x64d4cecf, 0x77843d3b, 0x85efbe38, + 0xdbfc821c, 0x2997011f, 0x3ac7f2eb, 0xc8ac71e8, + 0x1c661503, 0xee0d9600, 0xfd5d65f4, 0x0f36e6f7, + 0x61c69362, 0x93ad1061, 0x80fde395, 0x72966096, + 0xa65c047d, 0x5437877e, 0x4767748a, 0xb50cf789, + 0xeb1fcbad, 0x197448ae, 0x0a24bb5a, 0xf84f3859, + 0x2c855cb2, 0xdeeedfb1, 0xcdbe2c45, 0x3fd5af46, + 0x7198540d, 0x83f3d70e, 0x90a324fa, 0x62c8a7f9, + 0xb602c312, 0x44694011, 0x5739b3e5, 0xa55230e6, + 0xfb410cc2, 0x092a8fc1, 0x1a7a7c35, 0xe811ff36, + 0x3cdb9bdd, 0xceb018de, 0xdde0eb2a, 0x2f8b6829, + 0x82f63b78, 0x709db87b, 0x63cd4b8f, 0x91a6c88c, + 0x456cac67, 0xb7072f64, 0xa457dc90, 0x563c5f93, + 0x082f63b7, 0xfa44e0b4, 0xe9141340, 0x1b7f9043, + 0xcfb5f4a8, 0x3dde77ab, 0x2e8e845f, 0xdce5075c, + 0x92a8fc17, 0x60c37f14, 0x73938ce0, 0x81f80fe3, + 0x55326b08, 0xa759e80b, 0xb4091bff, 0x466298fc, + 0x1871a4d8, 0xea1a27db, 0xf94ad42f, 0x0b21572c, + 0xdfeb33c7, 0x2d80b0c4, 0x3ed04330, 0xccbbc033, + 0xa24bb5a6, 0x502036a5, 0x4370c551, 0xb11b4652, + 0x65d122b9, 0x97baa1ba, 0x84ea524e, 0x7681d14d, + 0x2892ed69, 0xdaf96e6a, 0xc9a99d9e, 0x3bc21e9d, + 0xef087a76, 0x1d63f975, 0x0e330a81, 0xfc588982, + 0xb21572c9, 0x407ef1ca, 0x532e023e, 0xa145813d, + 0x758fe5d6, 0x87e466d5, 0x94b49521, 0x66df1622, + 0x38cc2a06, 0xcaa7a905, 0xd9f75af1, 0x2b9cd9f2, + 0xff56bd19, 0x0d3d3e1a, 0x1e6dcdee, 0xec064eed, + 0xc38d26c4, 0x31e6a5c7, 0x22b65633, 0xd0ddd530, + 0x0417b1db, 0xf67c32d8, 0xe52cc12c, 0x1747422f, + 0x49547e0b, 0xbb3ffd08, 0xa86f0efc, 0x5a048dff, + 0x8ecee914, 0x7ca56a17, 0x6ff599e3, 0x9d9e1ae0, + 0xd3d3e1ab, 0x21b862a8, 0x32e8915c, 0xc083125f, + 0x144976b4, 0xe622f5b7, 0xf5720643, 0x07198540, + 0x590ab964, 0xab613a67, 0xb831c993, 0x4a5a4a90, + 0x9e902e7b, 0x6cfbad78, 0x7fab5e8c, 0x8dc0dd8f, + 0xe330a81a, 0x115b2b19, 0x020bd8ed, 0xf0605bee, + 0x24aa3f05, 0xd6c1bc06, 0xc5914ff2, 0x37faccf1, + 0x69e9f0d5, 0x9b8273d6, 0x88d28022, 0x7ab90321, + 0xae7367ca, 0x5c18e4c9, 0x4f48173d, 0xbd23943e, + 0xf36e6f75, 0x0105ec76, 0x12551f82, 0xe03e9c81, + 0x34f4f86a, 0xc69f7b69, 0xd5cf889d, 0x27a40b9e, + 0x79b737ba, 0x8bdcb4b9, 0x988c474d, 0x6ae7c44e, + 0xbe2da0a5, 0x4c4623a6, 0x5f16d052, 0xad7d5351, +] + +MASK = 0xFFFFFFFF + +function crc32(data) + crc = MASK + for byte in data + table_index = (xor(crc, byte) & 0xff) + 1 + crc = xor(CRC_TABLE[table_index], (crc>>8))# & MASK + end + return xor(crc, MASK) +end + diff --git a/src/eager.jl b/src/eager.jl index 39a966e5..598629c9 100644 --- a/src/eager.jl +++ b/src/eager.jl @@ -331,3 +331,5 @@ function inplace_sub(x, y) i = cast(constant(0:(item(size(x,0))-1)), Int32) Ops.inplace_sub(x, i, y) end + + diff --git a/src/keras.jl b/src/keras.jl index 6c86e2e3..17780362 100644 --- a/src/keras.jl +++ b/src/keras.jl @@ -24,6 +24,7 @@ end macro callable(f) name = struct_name(f) quote + $(esc(f)) (m::$name)(args...; kwargs...) = forward(m, args...; kwargs...) end end diff --git a/src/ops/module_test.jl b/src/ops/module_test.jl new file mode 100644 index 00000000..81e88392 --- /dev/null +++ b/src/ops/module_test.jl @@ -0,0 +1,10 @@ +module M + export x + x=1 + module Y + using ..M + function f(y) + return y+x + end + end +end diff --git a/src/ops/summaries.jl b/src/ops/summaries.jl index cf4c5509..b51c3115 100644 --- a/src/ops/summaries.jl +++ b/src/ops/summaries.jl @@ -16,9 +16,13 @@ for (jl_func, op) in [ (:histogram, :histogram_summary), (:image, :image_summary) ] - @eval @tf.op function $jl_func(args...; collections=[:Summaries], kwargs...) + @eval @tf.op function $jl_func(args...; collections=[:Summaries], step=0, kwargs...) res = tf.Ops.$op(args...; kwargs...) - foreach(c->tf.add_to_collection(c, res), collections) + if tf.eager_mode + tf.summary.record_summary(tf.item(res), step=step) + else + foreach(c->tf.add_to_collection(c, res), collections) + end res end diff --git a/src/summary_test.jl b/src/summary_test.jl new file mode 100644 index 00000000..22c3d8ec --- /dev/null +++ b/src/summary_test.jl @@ -0,0 +1,7 @@ +tf = TensorFlow +summary = tf.summary +writer = summary.FileWriter("/Users/malmaud/tmp") +summary.set_default(writer) +summary.scalar("x", 3.2, step=0) +summary.scalar("x", 5.0, step=1) +summary.scalar("x", -2.5, step=2) diff --git a/src/summary_writer.jl b/src/summary_writer.jl index 9b04e556..e43efd6b 100644 --- a/src/summary_writer.jl +++ b/src/summary_writer.jl @@ -1,11 +1,12 @@ using ProtoBuf -import TensorFlow -import Distributed +import ..TensorFlow const tf = TensorFlow import ..TensorFlow: tensorflow, Graph, get_def_graph, @py_proc +include("crc.jl") + struct FileWriter - pyo::Distributed.Future + file_handle logdir::String end @@ -28,28 +29,50 @@ Arguments: * logdir: A string. Directory where event file will be written. * graph: A `Graph` object. """ -function FileWriter(log_dir::AbstractString; graph=get_def_graph()) +function FileWriter(log_dir::AbstractString; graph=nothing) + if !tf.eager_mode && graph===nothing + graph = get_def_graph() + end mkpath(log_dir) - path = joinpath(log_dir, "events") - pyo = @py_proc pywrap_tensorflow[][:EventsWriter](py_bytes($path)) - writer = FileWriter(pyo, String(log_dir)) + path = joinpath(log_dir, "events.out.tfevents.1") + rm(path, force=true) + # pyo = @py_proc pywrap_tensorflow[][:EventsWriter](py_bytes($path)) + writer = FileWriter(open(path, "w"), String(log_dir)) if graph !== nothing write(writer, graph) end return writer end + +function masked_crc(data) + x = crc32(data) + ((x>>15) | (x<<17)) + 0xa282ead8 +end + function Base.write(writer::FileWriter, event::tensorflow.Event) b = IOBuffer() writeproto(b, event) seekstart(b) proto = read(b) - @py_proc begin - py_event = py_tf[][:Event]() - py_event[:ParseFromString](py_bytes($(proto))) - $(writer.pyo)[:WriteEvent](py_event) - $(writer.pyo)[:Flush]() - end + # @py_proc begin + # py_event = py_tf[][:Event]() + # py_event[:ParseFromString](py_bytes($(proto))) + # $(writer.pyo)[:WriteEvent](py_event) + # $(writer.pyo)[:Flush]() + # end + file = writer.file_handle + proto_length = UInt64(length(proto)) + buffer = IOBuffer() + write(buffer, proto_length) + seekstart(buffer) + proto_length_bytes = read(buffer) + proto_length_bytes_rev = reverse(proto_length_bytes) + write(file, proto_length_bytes) + write(file, masked_crc(proto_length_bytes)) + write(file, proto) + write(file, masked_crc(proto)) + flush(file) nothing end @@ -58,6 +81,12 @@ function Base.write(writer::FileWriter, summary::tensorflow.Summary, global_step setproperty!(event, :step, Int(global_step)) setproperty!(event, :wall_time, time()) setproperty!(event, :summary, summary) + # Some bug in ProtoBuf.jl is causing these to not be marked as filled, + # so we do it manually. + fillset(event, :wall_time) + fillset(event, :step) + fillset(event, :summary) + write(writer, event) end @@ -76,7 +105,19 @@ function Base.write(writer::FileWriter, graph::Graph) write(writer, event) end +default_file_writer = nothing +function set_default(writer::FileWriter) + # todo use context + global default_file_writer = writer +end + +function record_summary(summary_pb; step=0) + default_file_writer === nothing && return + write(default_file_writer, summary_pb, step) +end + + function Base.close(writer::FileWriter) - @py_proc $(writer.pyo)[:Close]() + close(writer.file_handle) nothing end From 53d029d62086564b24a60c2499759c1332ec710f Mon Sep 17 00:00:00 2001 From: Jon Malmaud Date: Mon, 25 Feb 2019 18:30:25 -0500 Subject: [PATCH 22/49] Summary tweaks --- src/crc.jl | 11 ++++++++++- src/summary_test.jl | 7 ++++--- src/summary_writer.jl | 22 +++++++--------------- 3 files changed, 21 insertions(+), 19 deletions(-) diff --git a/src/crc.jl b/src/crc.jl index 903d6477..7083cd74 100644 --- a/src/crc.jl +++ b/src/crc.jl @@ -1,3 +1,5 @@ +module CRC + CRC_TABLE = [ 0x00000000, 0xf26b8303, 0xe13b70f7, 0x1350f3f4, 0xc79a971f, 0x35f1141c, 0x26a1e7e8, 0xd4ca64eb, @@ -65,9 +67,10 @@ CRC_TABLE = [ 0xbe2da0a5, 0x4c4623a6, 0x5f16d052, 0xad7d5351, ] -MASK = 0xFFFFFFFF function crc32(data) + MASK = 0xFFFFFFFF + crc = MASK for byte in data table_index = (xor(crc, byte) & 0xff) + 1 @@ -76,3 +79,9 @@ function crc32(data) return xor(crc, MASK) end +function masked_crc(data) + x = crc32(data) + ((x>>15) | (x<<17)) + 0xa282ead8 +end + +end diff --git a/src/summary_test.jl b/src/summary_test.jl index 22c3d8ec..d3380256 100644 --- a/src/summary_test.jl +++ b/src/summary_test.jl @@ -2,6 +2,7 @@ tf = TensorFlow summary = tf.summary writer = summary.FileWriter("/Users/malmaud/tmp") summary.set_default(writer) -summary.scalar("x", 3.2, step=0) -summary.scalar("x", 5.0, step=1) -summary.scalar("x", -2.5, step=2) +tag="y" +summary.scalar(tag, 3.2, step=0) +summary.scalar(tag, 5.0, step=1) +summary.scalar(tag, -2.5, step=2) diff --git a/src/summary_writer.jl b/src/summary_writer.jl index e43efd6b..157dad78 100644 --- a/src/summary_writer.jl +++ b/src/summary_writer.jl @@ -34,9 +34,12 @@ function FileWriter(log_dir::AbstractString; graph=nothing) graph = get_def_graph() end mkpath(log_dir) - path = joinpath(log_dir, "events.out.tfevents.1") + local path + for i in Iterators.countfrom(1) + path = joinpath(log_dir, "events.out.tfevents.$i") + isfile(path) || break + end rm(path, force=true) - # pyo = @py_proc pywrap_tensorflow[][:EventsWriter](py_bytes($path)) writer = FileWriter(open(path, "w"), String(log_dir)) if graph !== nothing write(writer, graph) @@ -45,22 +48,11 @@ function FileWriter(log_dir::AbstractString; graph=nothing) end -function masked_crc(data) - x = crc32(data) - ((x>>15) | (x<<17)) + 0xa282ead8 -end - function Base.write(writer::FileWriter, event::tensorflow.Event) b = IOBuffer() writeproto(b, event) seekstart(b) proto = read(b) - # @py_proc begin - # py_event = py_tf[][:Event]() - # py_event[:ParseFromString](py_bytes($(proto))) - # $(writer.pyo)[:WriteEvent](py_event) - # $(writer.pyo)[:Flush]() - # end file = writer.file_handle proto_length = UInt64(length(proto)) buffer = IOBuffer() @@ -69,9 +61,9 @@ function Base.write(writer::FileWriter, event::tensorflow.Event) proto_length_bytes = read(buffer) proto_length_bytes_rev = reverse(proto_length_bytes) write(file, proto_length_bytes) - write(file, masked_crc(proto_length_bytes)) + write(file, CRC.masked_crc(proto_length_bytes)) write(file, proto) - write(file, masked_crc(proto)) + write(file, CRC.masked_crc(proto)) flush(file) nothing end From 760601fbc0527dde22bf39696bfe94baf02d7f61 Mon Sep 17 00:00:00 2001 From: Jon Malmaud Date: Mon, 25 Feb 2019 18:33:31 -0500 Subject: [PATCH 23/49] tweaks --- src/crc.jl | 4 +--- src/summary_test.jl | 8 -------- test/summary_eager.jl | 11 +++++++++++ 3 files changed, 12 insertions(+), 11 deletions(-) delete mode 100644 src/summary_test.jl create mode 100644 test/summary_eager.jl diff --git a/src/crc.jl b/src/crc.jl index 7083cd74..5d71e2da 100644 --- a/src/crc.jl +++ b/src/crc.jl @@ -67,14 +67,12 @@ CRC_TABLE = [ 0xbe2da0a5, 0x4c4623a6, 0x5f16d052, 0xad7d5351, ] - function crc32(data) MASK = 0xFFFFFFFF - crc = MASK for byte in data table_index = (xor(crc, byte) & 0xff) + 1 - crc = xor(CRC_TABLE[table_index], (crc>>8))# & MASK + crc = xor(CRC_TABLE[table_index], crc >> 8) end return xor(crc, MASK) end diff --git a/src/summary_test.jl b/src/summary_test.jl deleted file mode 100644 index d3380256..00000000 --- a/src/summary_test.jl +++ /dev/null @@ -1,8 +0,0 @@ -tf = TensorFlow -summary = tf.summary -writer = summary.FileWriter("/Users/malmaud/tmp") -summary.set_default(writer) -tag="y" -summary.scalar(tag, 3.2, step=0) -summary.scalar(tag, 5.0, step=1) -summary.scalar(tag, -2.5, step=2) diff --git a/test/summary_eager.jl b/test/summary_eager.jl new file mode 100644 index 00000000..315b98c9 --- /dev/null +++ b/test/summary_eager.jl @@ -0,0 +1,11 @@ +using TensorFlow +tf = TensorFlow +summary = tf.summary +mktempdir() do tmpdir + writer = summary.FileWriter(tmpdir) + summary.set_default(writer) + tag="y" + summary.scalar(tag, 3.2, step=0) + summary.scalar(tag, 5.0, step=1) + summary.scalar(tag, -2.5, step=2) +end From 5780f55499acff6a09c423f30b4abc55c8a72cfc Mon Sep 17 00:00:00 2001 From: Jon Malmaud Date: Mon, 25 Feb 2019 19:12:38 -0500 Subject: [PATCH 24/49] scalar summary macro --- src/meta.jl | 6 ++++++ src/ops/summaries.jl | 12 ++++++++++-- 2 files changed, 16 insertions(+), 2 deletions(-) diff --git a/src/meta.jl b/src/meta.jl index 66d63b92..cfa7078b 100644 --- a/src/meta.jl +++ b/src/meta.jl @@ -170,3 +170,9 @@ macro tf(ex) end end |> esc end + +macro scalar_summary(f) + @capture(f, funcname(args__; kwargs__)) + +end + diff --git a/src/ops/summaries.jl b/src/ops/summaries.jl index b51c3115..a9673434 100644 --- a/src/ops/summaries.jl +++ b/src/ops/summaries.jl @@ -5,9 +5,11 @@ scalar, audio, histogram, merge_all, -image +image, +@scalar import TensorFlow +using MacroTools const tf = TensorFlow for (jl_func, op) in [ @@ -22,8 +24,8 @@ for (jl_func, op) in [ tf.summary.record_summary(tf.item(res), step=step) else foreach(c->tf.add_to_collection(c, res), collections) + return res end - res end # Set the documentation of the summary function to the same as the @@ -53,4 +55,10 @@ function merge_all(key=:Summaries) merge(tensors) end +macro scalar(f, args...) + quote + scalar($(string(f)), $(esc(f)); $(esc.(args)...)) + end +end + end From edec9a4b0cbac23d446458799a98d668bdbd5765 Mon Sep 17 00:00:00 2001 From: Jon Malmaud Date: Tue, 26 Feb 2019 11:47:16 -0500 Subject: [PATCH 25/49] Switch to stdlib crc --- src/crc.jl | 85 ------------------------------------------- src/summary_writer.jl | 7 +++- 2 files changed, 5 insertions(+), 87 deletions(-) delete mode 100644 src/crc.jl diff --git a/src/crc.jl b/src/crc.jl deleted file mode 100644 index 5d71e2da..00000000 --- a/src/crc.jl +++ /dev/null @@ -1,85 +0,0 @@ -module CRC - -CRC_TABLE = [ - 0x00000000, 0xf26b8303, 0xe13b70f7, 0x1350f3f4, - 0xc79a971f, 0x35f1141c, 0x26a1e7e8, 0xd4ca64eb, - 0x8ad958cf, 0x78b2dbcc, 0x6be22838, 0x9989ab3b, - 0x4d43cfd0, 0xbf284cd3, 0xac78bf27, 0x5e133c24, - 0x105ec76f, 0xe235446c, 0xf165b798, 0x030e349b, - 0xd7c45070, 0x25afd373, 0x36ff2087, 0xc494a384, - 0x9a879fa0, 0x68ec1ca3, 0x7bbcef57, 0x89d76c54, - 0x5d1d08bf, 0xaf768bbc, 0xbc267848, 0x4e4dfb4b, - 0x20bd8ede, 0xd2d60ddd, 0xc186fe29, 0x33ed7d2a, - 0xe72719c1, 0x154c9ac2, 0x061c6936, 0xf477ea35, - 0xaa64d611, 0x580f5512, 0x4b5fa6e6, 0xb93425e5, - 0x6dfe410e, 0x9f95c20d, 0x8cc531f9, 0x7eaeb2fa, - 0x30e349b1, 0xc288cab2, 0xd1d83946, 0x23b3ba45, - 0xf779deae, 0x05125dad, 0x1642ae59, 0xe4292d5a, - 0xba3a117e, 0x4851927d, 0x5b016189, 0xa96ae28a, - 0x7da08661, 0x8fcb0562, 0x9c9bf696, 0x6ef07595, - 0x417b1dbc, 0xb3109ebf, 0xa0406d4b, 0x522bee48, - 0x86e18aa3, 0x748a09a0, 0x67dafa54, 0x95b17957, - 0xcba24573, 0x39c9c670, 0x2a993584, 0xd8f2b687, - 0x0c38d26c, 0xfe53516f, 0xed03a29b, 0x1f682198, - 0x5125dad3, 0xa34e59d0, 0xb01eaa24, 0x42752927, - 0x96bf4dcc, 0x64d4cecf, 0x77843d3b, 0x85efbe38, - 0xdbfc821c, 0x2997011f, 0x3ac7f2eb, 0xc8ac71e8, - 0x1c661503, 0xee0d9600, 0xfd5d65f4, 0x0f36e6f7, - 0x61c69362, 0x93ad1061, 0x80fde395, 0x72966096, - 0xa65c047d, 0x5437877e, 0x4767748a, 0xb50cf789, - 0xeb1fcbad, 0x197448ae, 0x0a24bb5a, 0xf84f3859, - 0x2c855cb2, 0xdeeedfb1, 0xcdbe2c45, 0x3fd5af46, - 0x7198540d, 0x83f3d70e, 0x90a324fa, 0x62c8a7f9, - 0xb602c312, 0x44694011, 0x5739b3e5, 0xa55230e6, - 0xfb410cc2, 0x092a8fc1, 0x1a7a7c35, 0xe811ff36, - 0x3cdb9bdd, 0xceb018de, 0xdde0eb2a, 0x2f8b6829, - 0x82f63b78, 0x709db87b, 0x63cd4b8f, 0x91a6c88c, - 0x456cac67, 0xb7072f64, 0xa457dc90, 0x563c5f93, - 0x082f63b7, 0xfa44e0b4, 0xe9141340, 0x1b7f9043, - 0xcfb5f4a8, 0x3dde77ab, 0x2e8e845f, 0xdce5075c, - 0x92a8fc17, 0x60c37f14, 0x73938ce0, 0x81f80fe3, - 0x55326b08, 0xa759e80b, 0xb4091bff, 0x466298fc, - 0x1871a4d8, 0xea1a27db, 0xf94ad42f, 0x0b21572c, - 0xdfeb33c7, 0x2d80b0c4, 0x3ed04330, 0xccbbc033, - 0xa24bb5a6, 0x502036a5, 0x4370c551, 0xb11b4652, - 0x65d122b9, 0x97baa1ba, 0x84ea524e, 0x7681d14d, - 0x2892ed69, 0xdaf96e6a, 0xc9a99d9e, 0x3bc21e9d, - 0xef087a76, 0x1d63f975, 0x0e330a81, 0xfc588982, - 0xb21572c9, 0x407ef1ca, 0x532e023e, 0xa145813d, - 0x758fe5d6, 0x87e466d5, 0x94b49521, 0x66df1622, - 0x38cc2a06, 0xcaa7a905, 0xd9f75af1, 0x2b9cd9f2, - 0xff56bd19, 0x0d3d3e1a, 0x1e6dcdee, 0xec064eed, - 0xc38d26c4, 0x31e6a5c7, 0x22b65633, 0xd0ddd530, - 0x0417b1db, 0xf67c32d8, 0xe52cc12c, 0x1747422f, - 0x49547e0b, 0xbb3ffd08, 0xa86f0efc, 0x5a048dff, - 0x8ecee914, 0x7ca56a17, 0x6ff599e3, 0x9d9e1ae0, - 0xd3d3e1ab, 0x21b862a8, 0x32e8915c, 0xc083125f, - 0x144976b4, 0xe622f5b7, 0xf5720643, 0x07198540, - 0x590ab964, 0xab613a67, 0xb831c993, 0x4a5a4a90, - 0x9e902e7b, 0x6cfbad78, 0x7fab5e8c, 0x8dc0dd8f, - 0xe330a81a, 0x115b2b19, 0x020bd8ed, 0xf0605bee, - 0x24aa3f05, 0xd6c1bc06, 0xc5914ff2, 0x37faccf1, - 0x69e9f0d5, 0x9b8273d6, 0x88d28022, 0x7ab90321, - 0xae7367ca, 0x5c18e4c9, 0x4f48173d, 0xbd23943e, - 0xf36e6f75, 0x0105ec76, 0x12551f82, 0xe03e9c81, - 0x34f4f86a, 0xc69f7b69, 0xd5cf889d, 0x27a40b9e, - 0x79b737ba, 0x8bdcb4b9, 0x988c474d, 0x6ae7c44e, - 0xbe2da0a5, 0x4c4623a6, 0x5f16d052, 0xad7d5351, -] - -function crc32(data) - MASK = 0xFFFFFFFF - crc = MASK - for byte in data - table_index = (xor(crc, byte) & 0xff) + 1 - crc = xor(CRC_TABLE[table_index], crc >> 8) - end - return xor(crc, MASK) -end - -function masked_crc(data) - x = crc32(data) - ((x>>15) | (x<<17)) + 0xa282ead8 -end - -end diff --git a/src/summary_writer.jl b/src/summary_writer.jl index 157dad78..0621f405 100644 --- a/src/summary_writer.jl +++ b/src/summary_writer.jl @@ -1,10 +1,9 @@ using ProtoBuf +using CRC32c import ..TensorFlow const tf = TensorFlow import ..TensorFlow: tensorflow, Graph, get_def_graph, @py_proc -include("crc.jl") - struct FileWriter file_handle logdir::String @@ -47,6 +46,10 @@ function FileWriter(log_dir::AbstractString; graph=nothing) return writer end +function masked_crc(data) + x = CRC32c.crc32c(data) + ((x>>15) | (x<<17)) + 0xa282ead8 +end function Base.write(writer::FileWriter, event::tensorflow.Event) b = IOBuffer() From b42dc58a6a41c70f105b0b4f0fb88bf21508fc65 Mon Sep 17 00:00:00 2001 From: Jon Malmaud Date: Tue, 26 Feb 2019 12:18:45 -0500 Subject: [PATCH 26/49] Switch to context system. --- src/TensorFlow.jl | 15 + src/eager.jl | 56 +- src/generate_ops.jl | 2 +- src/ops.jl | 2 +- src/ops/imported_ops.jl | 2296 +++++++++++++++++++-------------------- src/ops/math.jl | 2 +- src/ops/sequences.jl | 2 +- src/ops/summaries.jl | 2 +- src/summary_writer.jl | 27 +- 9 files changed, 1235 insertions(+), 1169 deletions(-) diff --git a/src/TensorFlow.jl b/src/TensorFlow.jl index fdc166ee..b3bcd406 100644 --- a/src/TensorFlow.jl +++ b/src/TensorFlow.jl @@ -141,8 +141,23 @@ function deallocator(data, len, arg) end +struct Context + attrs::Dict +end + +Context() = Context(Dict()) + +struct ContextStack + contexts::Vector{Context} +end + +ContextStack() = ContextStack(Context[]) + +global_context = ContextStack() + function __init__() c_deallocator[] = @cfunction(deallocator, Cvoid, (Ptr{Cvoid}, Csize_t, Ptr{Cvoid})) + push!(global_context, default_context()) end function load_python_process(;force_reload=false) diff --git a/src/eager.jl b/src/eager.jl index 598629c9..873d29f7 100644 --- a/src/eager.jl +++ b/src/eager.jl @@ -31,9 +31,6 @@ function EagerContext(;async=false, placement_policy=nothing) return this end -eager_ctx = nothing #EagerContext() -eager_mode = true - Base.unsafe_convert(::Type{Ptr{Cvoid}}, c::EagerContext) = c.ptr function DeviceList(ctx::EagerContext) @@ -123,11 +120,12 @@ function EagerOp(ctx::EagerContext, op_name) end function EagerOp(op_name) - global eager_ctx - if eager_ctx === nothing - eager_ctx = EagerContext() + if get_eager_context() === nothing + ctx = Context() + ctx.attrs["eager_context"] = EagerContext() + push!(global_context, ctx) end - ctx = eager_ctx + ctx = get_eager_context() status = Status() ptr = @tfcall(:TFE_NewOp, Ptr{Cvoid}, (Ptr{Cvoid}, Cstring, Ptr{Cvoid}), ctx, op_name, status) check_status(status) @@ -282,7 +280,7 @@ function copy_to_device(ctx::EagerContext, h::TensorHandle, device_name) return res end -copy_to_device(h, device_name) = copy_to_device(eager_ctx, h, device_name) +copy_to_device(h, device_name) = copy_to_device(get_eager_context(), h, device_name) function set_device(op::EagerOp, device_name) status = Status() @@ -319,6 +317,7 @@ Base.collect(t::TensorHandle) = Array(t) Base.iterate(t::TensorHandle, args...) = iterate(Array(t), args...) Base.zero(t::AbstractTensor) = Ops.zeros_like(t) Base.ones(t::AbstractTensor) = Ops.ones_like(t) + function Base.:*(t1::TensorHandle, t2::Number) return t1 .* t2 end @@ -332,4 +331,45 @@ function inplace_sub(x, y) Ops.inplace_sub(x, i, y) end +function Base.push!(stack::ContextStack, context::Context) + push!(stack.contexts, context) +end + +function Base.pop!(stack::ContextStack) + pop!(stack.contexts) +end + +function default_context() + context = Context() + context.attrs["eager"] = true + return context +end + +function Base.getindex(c::ContextStack, name) + value = nothing + for context in c.contexts + new_value = get(context.attrs, name, nothing) + if new_value !== nothing + value = new_value + end + end + return value +end + +function context_value(name) + return global_context[name] +end +function in_eager_mode() + return context_value("eager")::Bool +end + +function with_context(ctx, block) + push!(global_context, ctx) + block() + pop!(global_context) +end + +function get_eager_context() + return context_value("eager_context") +end diff --git a/src/generate_ops.jl b/src/generate_ops.jl index bf770c85..c561df86 100644 --- a/src/generate_ops.jl +++ b/src/generate_ops.jl @@ -271,7 +271,7 @@ function to_function(op::tensorflow.OpDef) end dispatch_expr = quote function $jl_name($(inputs...)) - if tf.eager_mode + if tf.in_eager_mode() $(eager_name)($(call_args...)) else $(graph_name)($(call_args...)) diff --git a/src/ops.jl b/src/ops.jl index ef85fd2a..25770101 100644 --- a/src/ops.jl +++ b/src/ops.jl @@ -23,7 +23,7 @@ function tf_promote(args...) if isa(arg, AbstractArray) push!(new_args, arg) else - if eager_mode + if in_eager_mode() push!(new_args, Ops.cast(arg, DstT = big_type)) # TODO implement promotion else push!(new_args, convert(Tensor{big_type}, arg)) diff --git a/src/ops/imported_ops.jl b/src/ops/imported_ops.jl index 2418eb9a..fb22f952 100644 --- a/src/ops/imported_ops.jl +++ b/src/ops/imported_ops.jl @@ -44,7 +44,7 @@ begin return res[1] end function reduce_join(inputs_, reduction_indices_; name=nothing, keep_dims=nothing, separator=nothing) - if tf.eager_mode + if tf.in_eager_mode() reduce_join_eager(inputs_, reduction_indices_; name=name, keep_dims=keep_dims, separator=separator) else reduce_join_graph(inputs_, reduction_indices_; name=name, keep_dims=keep_dims, separator=separator) @@ -122,7 +122,7 @@ begin return res[1] end function reduce_dataset(input_dataset_, initial_state_, other_arguments_; name=nothing, f=nothing, Tstate=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing) - if tf.eager_mode + if tf.in_eager_mode() reduce_dataset_eager(input_dataset_, initial_state_, other_arguments_; name=name, f=f, Tstate=Tstate, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes, use_inter_op_parallelism=use_inter_op_parallelism) else reduce_dataset_graph(input_dataset_, initial_state_, other_arguments_; name=name, f=f, Tstate=Tstate, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes, use_inter_op_parallelism=use_inter_op_parallelism) @@ -176,7 +176,7 @@ begin return res[1] end function tensor_list_from_tensor(tensor_, element_shape_; name=nothing, element_dtype=nothing, shape_type=nothing) - if tf.eager_mode + if tf.in_eager_mode() tensor_list_from_tensor_eager(tensor_, element_shape_; name=name, element_dtype=element_dtype, shape_type=shape_type) else tensor_list_from_tensor_graph(tensor_, element_shape_; name=name, element_dtype=element_dtype, shape_type=shape_type) @@ -216,7 +216,7 @@ begin return res[1] end function extract_jpeg_shape(contents_; name=nothing, output_type=nothing) - if tf.eager_mode + if tf.in_eager_mode() extract_jpeg_shape_eager(contents_; name=name, output_type=output_type) else extract_jpeg_shape_graph(contents_; name=name, output_type=output_type) @@ -269,7 +269,7 @@ begin return res end function svd(input_; name=nothing, compute_uv=nothing, full_matrices=nothing) - if tf.eager_mode + if tf.in_eager_mode() svd_eager(input_; name=name, compute_uv=compute_uv, full_matrices=full_matrices) else svd_graph(input_; name=name, compute_uv=compute_uv, full_matrices=full_matrices) @@ -315,7 +315,7 @@ begin return res[1] end function iterator_get_next_sync(iterator_; name=nothing, output_types=nothing, output_shapes=nothing) - if tf.eager_mode + if tf.in_eager_mode() iterator_get_next_sync_eager(iterator_; name=name, output_types=output_types, output_shapes=output_shapes) else iterator_get_next_sync_graph(iterator_; name=name, output_types=output_types, output_shapes=output_shapes) @@ -369,7 +369,7 @@ begin return res[1] end function ref_enter(data_; name=nothing, frame_name=nothing, is_constant=nothing, parallel_iterations=nothing) - if tf.eager_mode + if tf.in_eager_mode() ref_enter_eager(data_; name=name, frame_name=frame_name, is_constant=is_constant, parallel_iterations=parallel_iterations) else ref_enter_graph(data_; name=name, frame_name=frame_name, is_constant=is_constant, parallel_iterations=parallel_iterations) @@ -405,7 +405,7 @@ begin return res[1] end function erf(x_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() erf_eager(x_; name=name) else erf_graph(x_; name=name) @@ -444,7 +444,7 @@ begin return res end function lookup_table_export_v2(table_handle_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() lookup_table_export_v2_eager(table_handle_; name=name) else lookup_table_export_v2_graph(table_handle_; name=name) @@ -480,7 +480,7 @@ begin return res[1] end function round(x_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() round_eager(x_; name=name) else round_graph(x_; name=name) @@ -528,7 +528,7 @@ begin return res[1] end function outfeed_dequeue(; name=nothing, dtype=nothing, shape=nothing, device_ordinal=nothing) - if tf.eager_mode + if tf.in_eager_mode() outfeed_dequeue_eager(; name=name, dtype=dtype, shape=shape, device_ordinal=device_ordinal) else outfeed_dequeue_graph(; name=name, dtype=dtype, shape=shape, device_ordinal=device_ordinal) @@ -562,7 +562,7 @@ begin return res[1] end function tensor_forest_tree_is_initialized_op(tree_handle_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() tensor_forest_tree_is_initialized_op_eager(tree_handle_; name=name) else tensor_forest_tree_is_initialized_op_graph(tree_handle_; name=name) @@ -609,7 +609,7 @@ begin return res end function merge(inputs_; name=nothing, N=nothing) - if tf.eager_mode + if tf.in_eager_mode() merge_eager(inputs_; name=name, N=N) else merge_graph(inputs_; name=name, N=N) @@ -660,7 +660,7 @@ begin return res[1] end function histogram_fixed_width(values_, value_range_, nbins_; name=nothing, dtype=nothing) - if tf.eager_mode + if tf.in_eager_mode() histogram_fixed_width_eager(values_, value_range_, nbins_; name=name, dtype=dtype) else histogram_fixed_width_graph(values_, value_range_, nbins_; name=name, dtype=dtype) @@ -696,7 +696,7 @@ begin return res[1] end function asin(x_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() asin_eager(x_; name=name) else asin_graph(x_; name=name) @@ -743,7 +743,7 @@ begin return res[1] end function any(input_, reduction_indices_; name=nothing, keep_dims=nothing) - if tf.eager_mode + if tf.in_eager_mode() any_eager(input_, reduction_indices_; name=name, keep_dims=keep_dims) else any_graph(input_, reduction_indices_; name=name, keep_dims=keep_dims) @@ -784,7 +784,7 @@ begin return res[1] end function rsqrt_grad(y_, dy_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() rsqrt_grad_eager(y_, dy_; name=name) else rsqrt_grad_graph(y_, dy_; name=name) @@ -832,7 +832,7 @@ begin return res[1] end function tensor_array_scatter(handle_, indices_, value_, flow_in_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() tensor_array_scatter_eager(handle_, indices_, value_, flow_in_; name=name) else tensor_array_scatter_graph(handle_, indices_, value_, flow_in_; name=name) @@ -883,7 +883,7 @@ begin return res end function dynamic_partition(data_, partitions_; name=nothing, num_partitions=nothing) - if tf.eager_mode + if tf.in_eager_mode() dynamic_partition_eager(data_, partitions_; name=name, num_partitions=num_partitions) else dynamic_partition_graph(data_, partitions_; name=name, num_partitions=num_partitions) @@ -933,7 +933,7 @@ begin return res[1] end function experimental_private_thread_pool_dataset(input_dataset_, num_threads_; name=nothing, output_types=nothing, output_shapes=nothing) - if tf.eager_mode + if tf.in_eager_mode() experimental_private_thread_pool_dataset_eager(input_dataset_, num_threads_; name=name, output_types=output_types, output_shapes=output_shapes) else experimental_private_thread_pool_dataset_graph(input_dataset_, num_threads_; name=name, output_types=output_types, output_shapes=output_shapes) @@ -967,7 +967,7 @@ begin return res[1] end function reader_serialize_state(reader_handle_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() reader_serialize_state_eager(reader_handle_; name=name) else reader_serialize_state_graph(reader_handle_; name=name) @@ -1008,7 +1008,7 @@ begin return res[1] end function right_shift(x_, y_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() right_shift_eager(x_, y_; name=name) else right_shift_graph(x_, y_; name=name) @@ -1068,7 +1068,7 @@ begin return res[1] end function avg_pool3d(input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) - if tf.eager_mode + if tf.in_eager_mode() avg_pool3d_eager(input_; name=name, ksize=ksize, strides=strides, padding=padding, data_format=data_format) else avg_pool3d_graph(input_; name=name, ksize=ksize, strides=strides, padding=padding, data_format=data_format) @@ -1110,7 +1110,7 @@ begin return res[1] end function encode_png(image_; name=nothing, compression=nothing) - if tf.eager_mode + if tf.in_eager_mode() encode_png_eager(image_; name=name, compression=compression) else encode_png_graph(image_; name=name, compression=compression) @@ -1170,7 +1170,7 @@ begin return res[1] end function debug_identity(input_; name=nothing, device_name=nothing, tensor_name=nothing, debug_urls=nothing, gated_grpc=nothing) - if tf.eager_mode + if tf.in_eager_mode() debug_identity_eager(input_; name=name, device_name=device_name, tensor_name=tensor_name, debug_urls=debug_urls, gated_grpc=gated_grpc) else debug_identity_graph(input_; name=name, device_name=device_name, tensor_name=tensor_name, debug_urls=debug_urls, gated_grpc=gated_grpc) @@ -1206,7 +1206,7 @@ begin return res[1] end function imag(input_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() imag_eager(input_; name=name) else imag_graph(input_; name=name) @@ -1292,7 +1292,7 @@ begin return res[1] end function resource_sparse_apply_ftrl_v2(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=nothing, use_locking=nothing) - if tf.eager_mode + if tf.in_eager_mode() resource_sparse_apply_ftrl_v2_eager(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=name, use_locking=use_locking) else resource_sparse_apply_ftrl_v2_graph(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=name, use_locking=use_locking) @@ -1352,7 +1352,7 @@ begin return res[1] end function stage_clear(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) - if tf.eager_mode + if tf.in_eager_mode() stage_clear_eager(; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) else stage_clear_graph(; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) @@ -1388,7 +1388,7 @@ begin return res[1] end function sign(x_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() sign_eager(x_; name=name) else sign_graph(x_; name=name) @@ -1424,7 +1424,7 @@ begin return res[1] end function population_count(x_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() population_count_eager(x_; name=name) else population_count_graph(x_; name=name) @@ -1460,7 +1460,7 @@ begin return res[1] end function neg(x_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() neg_eager(x_; name=name) else neg_graph(x_; name=name) @@ -1502,7 +1502,7 @@ begin return res[1] end function anonymous_iterator(; name=nothing, output_types=nothing, output_shapes=nothing) - if tf.eager_mode + if tf.in_eager_mode() anonymous_iterator_eager(; name=name, output_types=output_types, output_shapes=output_shapes) else anonymous_iterator_graph(; name=name, output_types=output_types, output_shapes=output_shapes) @@ -1556,7 +1556,7 @@ begin return res[1] end function sparse_reduce_sum(input_indices_, input_values_, input_shape_, reduction_axes_; name=nothing, keep_dims=nothing) - if tf.eager_mode + if tf.in_eager_mode() sparse_reduce_sum_eager(input_indices_, input_values_, input_shape_, reduction_axes_; name=name, keep_dims=keep_dims) else sparse_reduce_sum_graph(input_indices_, input_values_, input_shape_, reduction_axes_; name=name, keep_dims=keep_dims) @@ -1618,7 +1618,7 @@ begin return res[1] end function filter_dataset(input_dataset_, other_arguments_; name=nothing, predicate=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) - if tf.eager_mode + if tf.in_eager_mode() filter_dataset_eager(input_dataset_, other_arguments_; name=name, predicate=predicate, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes) else filter_dataset_graph(input_dataset_, other_arguments_; name=name, predicate=predicate, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes) @@ -1658,7 +1658,7 @@ begin return res[1] end function string_length(input_; name=nothing, unit=nothing) - if tf.eager_mode + if tf.in_eager_mode() string_length_eager(input_; name=name, unit=unit) else string_length_graph(input_; name=name, unit=unit) @@ -1723,7 +1723,7 @@ begin return res[1] end function conv3d(input_, filter_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) - if tf.eager_mode + if tf.in_eager_mode() conv3d_eager(input_, filter_; name=name, strides=strides, padding=padding, data_format=data_format, dilations=dilations) else conv3d_graph(input_, filter_; name=name, strides=strides, padding=padding, data_format=data_format, dilations=dilations) @@ -1782,7 +1782,7 @@ begin return res end function retrieve_tpu_embedding_adagrad_parameters(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - if tf.eager_mode + if tf.in_eager_mode() retrieve_tpu_embedding_adagrad_parameters_eager(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) else retrieve_tpu_embedding_adagrad_parameters_graph(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) @@ -1816,7 +1816,7 @@ begin return res[1] end function optional_has_value(optional_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() optional_has_value_eager(optional_; name=name) else optional_has_value_graph(optional_; name=name) @@ -1909,7 +1909,7 @@ begin return res[1] end function apply_adam(var_, m_, v_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing, use_nesterov=nothing) - if tf.eager_mode + if tf.in_eager_mode() apply_adam_eager(var_, m_, v_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=name, use_locking=use_locking, use_nesterov=use_nesterov) else apply_adam_graph(var_, m_, v_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=name, use_locking=use_locking, use_nesterov=use_nesterov) @@ -2004,7 +2004,7 @@ begin return res end function cudnn_rnn_params_to_canonical(num_layers_, num_units_, input_size_, params_; name=nothing, num_params=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) - if tf.eager_mode + if tf.in_eager_mode() cudnn_rnn_params_to_canonical_eager(num_layers_, num_units_, input_size_, params_; name=name, num_params=num_params, rnn_mode=rnn_mode, input_mode=input_mode, direction=direction, dropout=dropout, seed=seed, seed2=seed2) else cudnn_rnn_params_to_canonical_graph(num_layers_, num_units_, input_size_, params_; name=name, num_params=num_params, rnn_mode=rnn_mode, input_mode=input_mode, direction=direction, dropout=dropout, seed=seed, seed2=seed2) @@ -2042,7 +2042,7 @@ begin return res[1] end function irfft3d(input_, fft_length_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() irfft3d_eager(input_, fft_length_; name=name) else irfft3d_graph(input_, fft_length_; name=name) @@ -2078,7 +2078,7 @@ begin return res[1] end function angle(input_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() angle_eager(input_; name=name) else angle_graph(input_; name=name) @@ -2120,7 +2120,7 @@ begin return res[1] end function tensor_forest_tree_resource_handle_op(; name=nothing, container=nothing, shared_name=nothing) - if tf.eager_mode + if tf.in_eager_mode() tensor_forest_tree_resource_handle_op_eager(; name=name, container=container, shared_name=shared_name) else tensor_forest_tree_resource_handle_op_graph(; name=name, container=container, shared_name=shared_name) @@ -2195,7 +2195,7 @@ begin return res end function learned_unigram_candidate_sampler(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing) - if tf.eager_mode + if tf.in_eager_mode() learned_unigram_candidate_sampler_eager(true_classes_; name=name, num_true=num_true, num_sampled=num_sampled, unique=unique, range_max=range_max, seed=seed, seed2=seed2) else learned_unigram_candidate_sampler_graph(true_classes_; name=name, num_true=num_true, num_sampled=num_sampled, unique=unique, range_max=range_max, seed=seed, seed2=seed2) @@ -2231,7 +2231,7 @@ begin return res[1] end function _arg(; name=nothing, index=nothing) - if tf.eager_mode + if tf.in_eager_mode() _arg_eager(; name=name, index=index) else _arg_graph(; name=name, index=index) @@ -2267,7 +2267,7 @@ begin return res[1] end function matrix_square_root(input_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() matrix_square_root_eager(input_; name=name) else matrix_square_root_graph(input_; name=name) @@ -2316,7 +2316,7 @@ begin return res[1] end function sparse_dense_cwise_mul(sp_indices_, sp_values_, sp_shape_, dense_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() sparse_dense_cwise_mul_eager(sp_indices_, sp_values_, sp_shape_, dense_; name=name) else sparse_dense_cwise_mul_graph(sp_indices_, sp_values_, sp_shape_, dense_; name=name) @@ -2371,7 +2371,7 @@ begin return res end function tensor_array_concat_v3(handle_, flow_in_; name=nothing, dtype=nothing, element_shape_except0=nothing) - if tf.eager_mode + if tf.in_eager_mode() tensor_array_concat_v3_eager(handle_, flow_in_; name=name, dtype=dtype, element_shape_except0=element_shape_except0) else tensor_array_concat_v3_graph(handle_, flow_in_; name=name, dtype=dtype, element_shape_except0=element_shape_except0) @@ -2405,7 +2405,7 @@ begin return res[1] end function unicode_script(input_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() unicode_script_eager(input_; name=name) else unicode_script_graph(input_; name=name) @@ -2446,7 +2446,7 @@ begin return res[1] end function batch_cholesky_grad(l_, grad_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() batch_cholesky_grad_eager(l_, grad_; name=name) else batch_cholesky_grad_graph(l_, grad_; name=name) @@ -2495,7 +2495,7 @@ begin return res[1] end function mean(input_, reduction_indices_; name=nothing, keep_dims=nothing) - if tf.eager_mode + if tf.in_eager_mode() mean_eager(input_, reduction_indices_; name=name, keep_dims=keep_dims) else mean_graph(input_, reduction_indices_; name=name, keep_dims=keep_dims) @@ -2529,7 +2529,7 @@ begin return res[1] end function batch_fft(input_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() batch_fft_eager(input_; name=name) else batch_fft_graph(input_; name=name) @@ -2565,7 +2565,7 @@ begin return res[1] end function sin(x_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() sin_eager(x_; name=name) else sin_graph(x_; name=name) @@ -2607,7 +2607,7 @@ begin return res[1] end function boosted_trees_ensemble_resource_handle_op(; name=nothing, container=nothing, shared_name=nothing) - if tf.eager_mode + if tf.in_eager_mode() boosted_trees_ensemble_resource_handle_op_eager(; name=name, container=container, shared_name=shared_name) else boosted_trees_ensemble_resource_handle_op_graph(; name=name, container=container, shared_name=shared_name) @@ -2674,7 +2674,7 @@ begin return res end function quantized_max_pool(input_, min_input_, max_input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing) - if tf.eager_mode + if tf.in_eager_mode() quantized_max_pool_eager(input_, min_input_, max_input_; name=name, ksize=ksize, strides=strides, padding=padding) else quantized_max_pool_graph(input_, min_input_, max_input_; name=name, ksize=ksize, strides=strides, padding=padding) @@ -2752,7 +2752,7 @@ begin return res[1] end function ordered_map_stage(key_, indices_, values_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, fake_dtypes=nothing, container=nothing, shared_name=nothing) - if tf.eager_mode + if tf.in_eager_mode() ordered_map_stage_eager(key_, indices_, values_; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, fake_dtypes=fake_dtypes, container=container, shared_name=shared_name) else ordered_map_stage_graph(key_, indices_, values_; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, fake_dtypes=fake_dtypes, container=container, shared_name=shared_name) @@ -2822,7 +2822,7 @@ begin return res[1] end function partitioned_call(args_; name=nothing, Tin=nothing, Tout=nothing, f=nothing, config=nothing, config_proto=nothing, executor_type=nothing) - if tf.eager_mode + if tf.in_eager_mode() partitioned_call_eager(args_; name=name, Tin=Tin, Tout=Tout, f=f, config=config, config_proto=config_proto, executor_type=executor_type) else partitioned_call_graph(args_; name=name, Tin=Tin, Tout=Tout, f=f, config=config, config_proto=config_proto, executor_type=executor_type) @@ -2892,7 +2892,7 @@ begin return res[1] end function sparse_apply_adagrad(var_, accum_, lr_, grad_, indices_; name=nothing, use_locking=nothing, update_slots=nothing) - if tf.eager_mode + if tf.in_eager_mode() sparse_apply_adagrad_eager(var_, accum_, lr_, grad_, indices_; name=name, use_locking=use_locking, update_slots=update_slots) else sparse_apply_adagrad_graph(var_, accum_, lr_, grad_, indices_; name=name, use_locking=use_locking, update_slots=update_slots) @@ -2967,7 +2967,7 @@ begin return res end function decode_proto_v2(bytes_; name=nothing, message_type=nothing, field_names=nothing, output_types=nothing, descriptor_source=nothing, message_format=nothing, sanitize=nothing) - if tf.eager_mode + if tf.in_eager_mode() decode_proto_v2_eager(bytes_; name=name, message_type=message_type, field_names=field_names, output_types=output_types, descriptor_source=descriptor_source, message_format=message_format, sanitize=sanitize) else decode_proto_v2_graph(bytes_; name=name, message_type=message_type, field_names=field_names, output_types=output_types, descriptor_source=descriptor_source, message_format=message_format, sanitize=sanitize) @@ -3013,7 +3013,7 @@ begin return res[1] end function betainc(a_, b_, x_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() betainc_eager(a_, b_, x_; name=name) else betainc_graph(a_, b_, x_; name=name) @@ -3049,7 +3049,7 @@ begin return res[1] end function guarantee_const(input_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() guarantee_const_eager(input_; name=name) else guarantee_const_graph(input_; name=name) @@ -3089,7 +3089,7 @@ begin return res[1] end function decode_bmp(contents_; name=nothing, channels=nothing) - if tf.eager_mode + if tf.in_eager_mode() decode_bmp_eager(contents_; name=name, channels=channels) else decode_bmp_graph(contents_; name=name, channels=channels) @@ -3138,7 +3138,7 @@ begin return res end function boosted_trees_bucketize(float_values_, bucket_boundaries_; name=nothing, num_features=nothing) - if tf.eager_mode + if tf.in_eager_mode() boosted_trees_bucketize_eager(float_values_, bucket_boundaries_; name=name, num_features=num_features) else boosted_trees_bucketize_graph(float_values_, bucket_boundaries_; name=name, num_features=num_features) @@ -3169,7 +3169,7 @@ begin return res[1] end function shutdown_distributed_tpu(; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() shutdown_distributed_tpu_eager(; name=name) else shutdown_distributed_tpu_graph(; name=name) @@ -3203,7 +3203,7 @@ begin return res[1] end function experimental_stats_aggregator_summary(iterator_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() experimental_stats_aggregator_summary_eager(iterator_; name=name) else experimental_stats_aggregator_summary_graph(iterator_; name=name) @@ -3234,7 +3234,7 @@ begin return res[1] end function timestamp(; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() timestamp_eager(; name=name) else timestamp_graph(; name=name) @@ -3270,7 +3270,7 @@ begin return res[1] end function matrix_exponential(input_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() matrix_exponential_eager(input_; name=name) else matrix_exponential_graph(input_; name=name) @@ -3312,7 +3312,7 @@ begin return res[1] end function size(input_; name=nothing, out_type=nothing) - if tf.eager_mode + if tf.in_eager_mode() size_eager(input_; name=name, out_type=out_type) else size_graph(input_; name=name, out_type=out_type) @@ -3354,7 +3354,7 @@ begin return res[1] end function add_n(inputs_; name=nothing, N=nothing) - if tf.eager_mode + if tf.in_eager_mode() add_n_eager(inputs_; name=name, N=N) else add_n_graph(inputs_; name=name, N=N) @@ -3401,7 +3401,7 @@ begin return res[1] end function sparse_segment_sum(data_, indices_, segment_ids_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() sparse_segment_sum_eager(data_, indices_, segment_ids_; name=name) else sparse_segment_sum_graph(data_, indices_, segment_ids_; name=name) @@ -3451,7 +3451,7 @@ begin return res[1] end function batch_dataset(input_dataset_, batch_size_; name=nothing, output_types=nothing, output_shapes=nothing) - if tf.eager_mode + if tf.in_eager_mode() batch_dataset_eager(input_dataset_, batch_size_; name=name, output_types=output_types, output_shapes=output_shapes) else batch_dataset_graph(input_dataset_, batch_size_; name=name, output_types=output_types, output_shapes=output_shapes) @@ -3523,7 +3523,7 @@ begin return res[1] end function record_input(; name=nothing, file_pattern=nothing, file_random_seed=nothing, file_shuffle_shift_ratio=nothing, file_buffer_size=nothing, file_parallelism=nothing, batch_size=nothing, compression_type=nothing) - if tf.eager_mode + if tf.in_eager_mode() record_input_eager(; name=name, file_pattern=file_pattern, file_random_seed=file_random_seed, file_shuffle_shift_ratio=file_shuffle_shift_ratio, file_buffer_size=file_buffer_size, file_parallelism=file_parallelism, batch_size=batch_size, compression_type=compression_type) else record_input_graph(; name=name, file_pattern=file_pattern, file_random_seed=file_random_seed, file_shuffle_shift_ratio=file_shuffle_shift_ratio, file_buffer_size=file_buffer_size, file_parallelism=file_parallelism, batch_size=batch_size, compression_type=compression_type) @@ -3573,7 +3573,7 @@ begin return res[1] end function queue_dequeue_up_to_v2(handle_, n_; name=nothing, component_types=nothing, timeout_ms=nothing) - if tf.eager_mode + if tf.in_eager_mode() queue_dequeue_up_to_v2_eager(handle_, n_; name=name, component_types=component_types, timeout_ms=timeout_ms) else queue_dequeue_up_to_v2_graph(handle_, n_; name=name, component_types=component_types, timeout_ms=timeout_ms) @@ -3632,7 +3632,7 @@ begin return res end function retrieve_tpu_embedding_proximal_adagrad_parameters_grad_accum_debug(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - if tf.eager_mode + if tf.in_eager_mode() retrieve_tpu_embedding_proximal_adagrad_parameters_grad_accum_debug_eager(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) else retrieve_tpu_embedding_proximal_adagrad_parameters_grad_accum_debug_graph(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) @@ -3702,7 +3702,7 @@ begin return res[1] end function load_tpu_embedding_rms_prop_parameters_grad_accum_debug(parameters_, ms_, mom_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - if tf.eager_mode + if tf.in_eager_mode() load_tpu_embedding_rms_prop_parameters_grad_accum_debug_eager(parameters_, ms_, mom_, gradient_accumulators_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) else load_tpu_embedding_rms_prop_parameters_grad_accum_debug_graph(parameters_, ms_, mom_, gradient_accumulators_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) @@ -3738,7 +3738,7 @@ begin return res[1] end function serialize_tensor(tensor_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() serialize_tensor_eager(tensor_; name=name) else serialize_tensor_graph(tensor_; name=name) @@ -3779,7 +3779,7 @@ begin return res[1] end function mul(x_, y_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() mul_eager(x_, y_; name=name) else mul_graph(x_, y_; name=name) @@ -3825,7 +3825,7 @@ begin return res end function softmax_cross_entropy_with_logits(features_, labels_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() softmax_cross_entropy_with_logits_eager(features_, labels_; name=name) else softmax_cross_entropy_with_logits_graph(features_, labels_; name=name) @@ -3878,7 +3878,7 @@ begin return res[1] end function resource_scatter_div(resource_, indices_, updates_; name=nothing, dtype=nothing) - if tf.eager_mode + if tf.in_eager_mode() resource_scatter_div_eager(resource_, indices_, updates_; name=name, dtype=dtype) else resource_scatter_div_graph(resource_, indices_, updates_; name=name, dtype=dtype) @@ -3932,7 +3932,7 @@ begin return res[1] end function fixed_length_record_dataset_v2(filenames_, header_bytes_, record_bytes_, footer_bytes_, buffer_size_, compression_type_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() fixed_length_record_dataset_v2_eager(filenames_, header_bytes_, record_bytes_, footer_bytes_, buffer_size_, compression_type_; name=name) else fixed_length_record_dataset_v2_graph(filenames_, header_bytes_, record_bytes_, footer_bytes_, buffer_size_, compression_type_; name=name) @@ -3982,7 +3982,7 @@ begin return res[1] end function skip_dataset(input_dataset_, count_; name=nothing, output_types=nothing, output_shapes=nothing) - if tf.eager_mode + if tf.in_eager_mode() skip_dataset_eager(input_dataset_, count_; name=name, output_types=output_types, output_shapes=output_shapes) else skip_dataset_graph(input_dataset_, count_; name=name, output_types=output_types, output_shapes=output_shapes) @@ -4018,7 +4018,7 @@ begin return res[1] end function cosh(x_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() cosh_eager(x_; name=name) else cosh_graph(x_; name=name) @@ -4104,7 +4104,7 @@ begin return res end function fused_batch_norm_v2(x_, scale_, offset_, mean_, variance_; name=nothing, U=nothing, epsilon=nothing, data_format=nothing, is_training=nothing) - if tf.eager_mode + if tf.in_eager_mode() fused_batch_norm_v2_eager(x_, scale_, offset_, mean_, variance_; name=name, U=U, epsilon=epsilon, data_format=data_format, is_training=is_training) else fused_batch_norm_v2_graph(x_, scale_, offset_, mean_, variance_; name=name, U=U, epsilon=epsilon, data_format=data_format, is_training=is_training) @@ -4152,7 +4152,7 @@ begin return res[1] end function tensor_array_split(handle_, value_, lengths_, flow_in_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() tensor_array_split_eager(handle_, value_, lengths_, flow_in_; name=name) else tensor_array_split_graph(handle_, value_, lengths_, flow_in_; name=name) @@ -4221,7 +4221,7 @@ begin return res end function ctc_loss(inputs_, labels_indices_, labels_values_, sequence_length_; name=nothing, preprocess_collapse_repeated=nothing, ctc_merge_repeated=nothing, ignore_longer_outputs_than_inputs=nothing) - if tf.eager_mode + if tf.in_eager_mode() ctc_loss_eager(inputs_, labels_indices_, labels_values_, sequence_length_; name=name, preprocess_collapse_repeated=preprocess_collapse_repeated, ctc_merge_repeated=ctc_merge_repeated, ignore_longer_outputs_than_inputs=ignore_longer_outputs_than_inputs) else ctc_loss_graph(inputs_, labels_indices_, labels_values_, sequence_length_; name=name, preprocess_collapse_repeated=preprocess_collapse_repeated, ctc_merge_repeated=ctc_merge_repeated, ignore_longer_outputs_than_inputs=ignore_longer_outputs_than_inputs) @@ -4276,7 +4276,7 @@ begin return res end function quantized_reshape(tensor_, shape_, input_min_, input_max_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() quantized_reshape_eager(tensor_, shape_, input_min_, input_max_; name=name) else quantized_reshape_graph(tensor_, shape_, input_min_, input_max_; name=name) @@ -4317,7 +4317,7 @@ begin return res[1] end function floor_div(x_, y_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() floor_div_eager(x_, y_; name=name) else floor_div_graph(x_, y_; name=name) @@ -4381,7 +4381,7 @@ begin return res[1] end function tensor_array_v2(size_; name=nothing, dtype=nothing, element_shape=nothing, dynamic_size=nothing, clear_after_read=nothing, tensor_array_name=nothing) - if tf.eager_mode + if tf.in_eager_mode() tensor_array_v2_eager(size_; name=name, dtype=dtype, element_shape=element_shape, dynamic_size=dynamic_size, clear_after_read=clear_after_read, tensor_array_name=tensor_array_name) else tensor_array_v2_graph(size_; name=name, dtype=dtype, element_shape=element_shape, dynamic_size=dynamic_size, clear_after_read=clear_after_read, tensor_array_name=tensor_array_name) @@ -4421,7 +4421,7 @@ begin return res[1] end function barrier_close(handle_; name=nothing, cancel_pending_enqueues=nothing) - if tf.eager_mode + if tf.in_eager_mode() barrier_close_eager(handle_; name=name, cancel_pending_enqueues=cancel_pending_enqueues) else barrier_close_graph(handle_; name=name, cancel_pending_enqueues=cancel_pending_enqueues) @@ -4461,7 +4461,7 @@ begin return res[1] end function read_variable_op(resource_; name=nothing, dtype=nothing) - if tf.eager_mode + if tf.in_eager_mode() read_variable_op_eager(resource_; name=name, dtype=dtype) else read_variable_op_graph(resource_; name=name, dtype=dtype) @@ -4524,7 +4524,7 @@ begin return res end function quantized_mul(x_, y_, min_x_, max_x_, min_y_, max_y_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() quantized_mul_eager(x_, y_, min_x_, max_x_, min_y_, max_y_; name=name) else quantized_mul_graph(x_, y_, min_x_, max_x_, min_y_, max_y_; name=name) @@ -4560,7 +4560,7 @@ begin return res[1] end function selu(features_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() selu_eager(features_; name=name) else selu_graph(features_; name=name) @@ -4695,7 +4695,7 @@ begin return res end function cudnn_rnn_backprop_v3(input_, input_h_, input_c_, params_, sequence_lengths_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_, host_reserved_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) - if tf.eager_mode + if tf.in_eager_mode() cudnn_rnn_backprop_v3_eager(input_, input_h_, input_c_, params_, sequence_lengths_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_, host_reserved_; name=name, rnn_mode=rnn_mode, input_mode=input_mode, direction=direction, dropout=dropout, seed=seed, seed2=seed2) else cudnn_rnn_backprop_v3_graph(input_, input_h_, input_c_, params_, sequence_lengths_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_, host_reserved_; name=name, rnn_mode=rnn_mode, input_mode=input_mode, direction=direction, dropout=dropout, seed=seed, seed2=seed2) @@ -4741,7 +4741,7 @@ begin return res[1] end function lookup_table_insert(table_handle_, keys_, values_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() lookup_table_insert_eager(table_handle_, keys_, values_; name=name) else lookup_table_insert_graph(table_handle_, keys_, values_; name=name) @@ -4777,7 +4777,7 @@ begin return res[1] end function complex_abs(x_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() complex_abs_eager(x_; name=name) else complex_abs_graph(x_; name=name) @@ -4818,7 +4818,7 @@ begin return res[1] end function tridiagonal_solve(diagonals_, rhs_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() tridiagonal_solve_eager(diagonals_, rhs_; name=name) else tridiagonal_solve_graph(diagonals_, rhs_; name=name) @@ -4864,7 +4864,7 @@ begin return res[1] end function lookup_table_import(table_handle_, keys_, values_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() lookup_table_import_eager(table_handle_, keys_, values_; name=name) else lookup_table_import_graph(table_handle_, keys_, values_; name=name) @@ -4900,7 +4900,7 @@ begin return res[1] end function abs(x_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() abs_eager(x_; name=name) else abs_graph(x_; name=name) @@ -4990,7 +4990,7 @@ begin return res[1] end function resource_apply_adam(var_, m_, v_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing, use_nesterov=nothing) - if tf.eager_mode + if tf.in_eager_mode() resource_apply_adam_eager(var_, m_, v_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=name, use_locking=use_locking, use_nesterov=use_nesterov) else resource_apply_adam_graph(var_, m_, v_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=name, use_locking=use_locking, use_nesterov=use_nesterov) @@ -5038,7 +5038,7 @@ begin return res[1] end function write_histogram_summary(writer_, step_, tag_, values_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() write_histogram_summary_eager(writer_, step_, tag_, values_; name=name) else write_histogram_summary_graph(writer_, step_, tag_, values_; name=name) @@ -5076,7 +5076,7 @@ begin return res[1] end function experimental_indexed_dataset_materialize(dataset_, materialized_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() experimental_indexed_dataset_materialize_eager(dataset_, materialized_; name=name) else experimental_indexed_dataset_materialize_graph(dataset_, materialized_; name=name) @@ -5142,7 +5142,7 @@ begin return res[1] end function _host_send(tensor_; name=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing) - if tf.eager_mode + if tf.in_eager_mode() _host_send_eager(tensor_; name=name, tensor_name=tensor_name, send_device=send_device, send_device_incarnation=send_device_incarnation, recv_device=recv_device, client_terminated=client_terminated) else _host_send_graph(tensor_; name=name, tensor_name=tensor_name, send_device=send_device, send_device_incarnation=send_device_incarnation, recv_device=recv_device, client_terminated=client_terminated) @@ -5183,7 +5183,7 @@ begin return res[1] end function greater(x_, y_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() greater_eager(x_, y_; name=name) else greater_graph(x_, y_; name=name) @@ -5225,7 +5225,7 @@ begin return res[1] end function nccl_broadcast(input_; name=nothing, shape=nothing) - if tf.eager_mode + if tf.in_eager_mode() nccl_broadcast_eager(input_; name=name, shape=shape) else nccl_broadcast_graph(input_; name=name, shape=shape) @@ -5271,7 +5271,7 @@ begin return res[1] end function tensor_list_push_back_batch(input_handles_, tensor_; name=nothing, element_dtype=nothing) - if tf.eager_mode + if tf.in_eager_mode() tensor_list_push_back_batch_eager(input_handles_, tensor_; name=name, element_dtype=element_dtype) else tensor_list_push_back_batch_graph(input_handles_, tensor_; name=name, element_dtype=element_dtype) @@ -5324,7 +5324,7 @@ begin return res[1] end function resource_scatter_min(resource_, indices_, updates_; name=nothing, dtype=nothing) - if tf.eager_mode + if tf.in_eager_mode() resource_scatter_min_eager(resource_, indices_, updates_; name=name, dtype=dtype) else resource_scatter_min_graph(resource_, indices_, updates_; name=name, dtype=dtype) @@ -5378,7 +5378,7 @@ begin return res[1] end function slice(input_, begin_, size_; name=nothing, Index=nothing) - if tf.eager_mode + if tf.in_eager_mode() slice_eager(input_, begin_, size_; name=name, Index=Index) else slice_graph(input_, begin_, size_; name=name, Index=Index) @@ -5441,7 +5441,7 @@ begin return res end function unicode_decode(input_; name=nothing, input_encoding=nothing, errors=nothing, replacement_char=nothing, replace_control_characters=nothing) - if tf.eager_mode + if tf.in_eager_mode() unicode_decode_eager(input_; name=name, input_encoding=input_encoding, errors=errors, replacement_char=replacement_char, replace_control_characters=replace_control_characters) else unicode_decode_graph(input_; name=name, input_encoding=input_encoding, errors=errors, replacement_char=replacement_char, replace_control_characters=replace_control_characters) @@ -5491,7 +5491,7 @@ begin return res[1] end function take_dataset(input_dataset_, count_; name=nothing, output_types=nothing, output_shapes=nothing) - if tf.eager_mode + if tf.in_eager_mode() take_dataset_eager(input_dataset_, count_; name=name, output_types=output_types, output_shapes=output_shapes) else take_dataset_graph(input_dataset_, count_; name=name, output_types=output_types, output_shapes=output_shapes) @@ -5555,7 +5555,7 @@ begin return res[1] end function boosted_trees_make_stats_summary(node_ids_, gradients_, hessians_, bucketized_features_list_; name=nothing, max_splits=nothing, num_buckets=nothing, num_features=nothing) - if tf.eager_mode + if tf.in_eager_mode() boosted_trees_make_stats_summary_eager(node_ids_, gradients_, hessians_, bucketized_features_list_; name=name, max_splits=max_splits, num_buckets=num_buckets, num_features=num_features) else boosted_trees_make_stats_summary_graph(node_ids_, gradients_, hessians_, bucketized_features_list_; name=name, max_splits=max_splits, num_buckets=num_buckets, num_features=num_features) @@ -5624,7 +5624,7 @@ begin return res end function all_candidate_sampler(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, seed=nothing, seed2=nothing) - if tf.eager_mode + if tf.in_eager_mode() all_candidate_sampler_eager(true_classes_; name=name, num_true=num_true, num_sampled=num_sampled, unique=unique, seed=seed, seed2=seed2) else all_candidate_sampler_graph(true_classes_; name=name, num_true=num_true, num_sampled=num_sampled, unique=unique, seed=seed, seed2=seed2) @@ -5705,7 +5705,7 @@ begin return res[1] end function conv2d_backprop_input(input_sizes_, filter_, out_backprop_; name=nothing, strides=nothing, use_cudnn_on_gpu=nothing, padding=nothing, explicit_paddings=nothing, data_format=nothing, dilations=nothing) - if tf.eager_mode + if tf.in_eager_mode() conv2d_backprop_input_eager(input_sizes_, filter_, out_backprop_; name=name, strides=strides, use_cudnn_on_gpu=use_cudnn_on_gpu, padding=padding, explicit_paddings=explicit_paddings, data_format=data_format, dilations=dilations) else conv2d_backprop_input_graph(input_sizes_, filter_, out_backprop_; name=name, strides=strides, use_cudnn_on_gpu=use_cudnn_on_gpu, padding=padding, explicit_paddings=explicit_paddings, data_format=data_format, dilations=dilations) @@ -5751,7 +5751,7 @@ begin return res[1] end function dataset_to_single_element(dataset_; name=nothing, output_types=nothing, output_shapes=nothing) - if tf.eager_mode + if tf.in_eager_mode() dataset_to_single_element_eager(dataset_; name=name, output_types=output_types, output_shapes=output_shapes) else dataset_to_single_element_graph(dataset_; name=name, output_types=output_types, output_shapes=output_shapes) @@ -5801,7 +5801,7 @@ begin return res[1] end function cache_dataset(input_dataset_, filename_; name=nothing, output_types=nothing, output_shapes=nothing) - if tf.eager_mode + if tf.in_eager_mode() cache_dataset_eager(input_dataset_, filename_; name=name, output_types=output_types, output_shapes=output_shapes) else cache_dataset_graph(input_dataset_, filename_; name=name, output_types=output_types, output_shapes=output_shapes) @@ -5864,7 +5864,7 @@ begin return res end function fake_quant_with_min_max_vars_gradient(gradients_, inputs_, min_, max_; name=nothing, num_bits=nothing, narrow_range=nothing) - if tf.eager_mode + if tf.in_eager_mode() fake_quant_with_min_max_vars_gradient_eager(gradients_, inputs_, min_, max_; name=name, num_bits=num_bits, narrow_range=narrow_range) else fake_quant_with_min_max_vars_gradient_graph(gradients_, inputs_, min_, max_; name=name, num_bits=num_bits, narrow_range=narrow_range) @@ -5937,7 +5937,7 @@ begin return res[1] end function fused_resize_and_pad_conv2d(input_, size_, paddings_, filter_; name=nothing, resize_align_corners=nothing, mode=nothing, strides=nothing, padding=nothing) - if tf.eager_mode + if tf.in_eager_mode() fused_resize_and_pad_conv2d_eager(input_, size_, paddings_, filter_; name=name, resize_align_corners=resize_align_corners, mode=mode, strides=strides, padding=padding) else fused_resize_and_pad_conv2d_graph(input_, size_, paddings_, filter_; name=name, resize_align_corners=resize_align_corners, mode=mode, strides=strides, padding=padding) @@ -6036,7 +6036,7 @@ begin return res end function batch(in_tensors_; name=nothing, num_batch_threads=nothing, max_batch_size=nothing, max_enqueued_batches=nothing, batch_timeout_micros=nothing, allowed_batch_sizes=nothing, grad_timeout_micros=nothing, container=nothing, shared_name=nothing, batching_queue=nothing, T=nothing) - if tf.eager_mode + if tf.in_eager_mode() batch_eager(in_tensors_; name=name, num_batch_threads=num_batch_threads, max_batch_size=max_batch_size, max_enqueued_batches=max_enqueued_batches, batch_timeout_micros=batch_timeout_micros, allowed_batch_sizes=allowed_batch_sizes, grad_timeout_micros=grad_timeout_micros, container=container, shared_name=shared_name, batching_queue=batching_queue, T=T) else batch_graph(in_tensors_; name=name, num_batch_threads=num_batch_threads, max_batch_size=max_batch_size, max_enqueued_batches=max_enqueued_batches, batch_timeout_micros=batch_timeout_micros, allowed_batch_sizes=allowed_batch_sizes, grad_timeout_micros=grad_timeout_micros, container=container, shared_name=shared_name, batching_queue=batching_queue, T=T) @@ -6090,7 +6090,7 @@ begin return res[1] end function collective_bcast_recv(; name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, shape=nothing) - if tf.eager_mode + if tf.in_eager_mode() collective_bcast_recv_eager(; name=name, group_size=group_size, group_key=group_key, instance_key=instance_key, shape=shape) else collective_bcast_recv_graph(; name=name, group_size=group_size, group_key=group_key, instance_key=instance_key, shape=shape) @@ -6138,7 +6138,7 @@ begin return res[1] end function batch_to_space_nd(input_, block_shape_, crops_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() batch_to_space_nd_eager(input_, block_shape_, crops_; name=name) else batch_to_space_nd_graph(input_, block_shape_, crops_; name=name) @@ -6172,7 +6172,7 @@ begin return res[1] end function loop_cond(input_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() loop_cond_eager(input_; name=name) else loop_cond_graph(input_; name=name) @@ -6220,7 +6220,7 @@ begin return res[1] end function depth_to_space(input_; name=nothing, block_size=nothing, data_format=nothing) - if tf.eager_mode + if tf.in_eager_mode() depth_to_space_eager(input_; name=name, block_size=block_size, data_format=data_format) else depth_to_space_graph(input_; name=name, block_size=block_size, data_format=data_format) @@ -6262,7 +6262,7 @@ begin return res[1] end function destroy_temporary_variable(ref_; name=nothing, var_name=nothing) - if tf.eager_mode + if tf.in_eager_mode() destroy_temporary_variable_eager(ref_; name=name, var_name=var_name) else destroy_temporary_variable_graph(ref_; name=name, var_name=var_name) @@ -6360,7 +6360,7 @@ begin return res end function cudnn_rnn(input_, input_h_, input_c_, params_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing, is_training=nothing) - if tf.eager_mode + if tf.in_eager_mode() cudnn_rnn_eager(input_, input_h_, input_c_, params_; name=name, rnn_mode=rnn_mode, input_mode=input_mode, direction=direction, dropout=dropout, seed=seed, seed2=seed2, is_training=is_training) else cudnn_rnn_graph(input_, input_h_, input_c_, params_; name=name, rnn_mode=rnn_mode, input_mode=input_mode, direction=direction, dropout=dropout, seed=seed, seed2=seed2, is_training=is_training) @@ -6396,7 +6396,7 @@ begin return res[1] end function ref_identity(input_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() ref_identity_eager(input_; name=name) else ref_identity_graph(input_; name=name) @@ -6467,7 +6467,7 @@ begin return res[1] end function max_pool3d_grad(orig_input_, orig_output_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) - if tf.eager_mode + if tf.in_eager_mode() max_pool3d_grad_eager(orig_input_, orig_output_, grad_; name=name, ksize=ksize, strides=strides, padding=padding, data_format=data_format) else max_pool3d_grad_graph(orig_input_, orig_output_, grad_; name=name, ksize=ksize, strides=strides, padding=padding, data_format=data_format) @@ -6533,7 +6533,7 @@ begin return res[1] end function load_tpu_embedding_momentum_parameters_grad_accum_debug(parameters_, momenta_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - if tf.eager_mode + if tf.in_eager_mode() load_tpu_embedding_momentum_parameters_grad_accum_debug_eager(parameters_, momenta_, gradient_accumulators_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) else load_tpu_embedding_momentum_parameters_grad_accum_debug_graph(parameters_, momenta_, gradient_accumulators_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) @@ -6593,7 +6593,7 @@ begin return res[1] end function padding_fifo_queue_v2(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) - if tf.eager_mode + if tf.in_eager_mode() padding_fifo_queue_v2_eager(; name=name, component_types=component_types, shapes=shapes, capacity=capacity, container=container, shared_name=shared_name) else padding_fifo_queue_v2_graph(; name=name, component_types=component_types, shapes=shapes, capacity=capacity, container=container, shared_name=shared_name) @@ -6657,7 +6657,7 @@ begin return res[1] end function conv3d_backprop_input(input_, filter_, out_backprop_; name=nothing, strides=nothing, padding=nothing, dilations=nothing) - if tf.eager_mode + if tf.in_eager_mode() conv3d_backprop_input_eager(input_, filter_, out_backprop_; name=name, strides=strides, padding=padding, dilations=dilations) else conv3d_backprop_input_graph(input_, filter_, out_backprop_; name=name, strides=strides, padding=padding, dilations=dilations) @@ -6693,7 +6693,7 @@ begin return res[1] end function ref_exit(data_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() ref_exit_eager(data_; name=name) else ref_exit_graph(data_; name=name) @@ -6753,7 +6753,7 @@ begin return res[1] end function map_clear(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) - if tf.eager_mode + if tf.in_eager_mode() map_clear_eager(; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) else map_clear_graph(; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) @@ -6791,7 +6791,7 @@ begin return res[1] end function encode_wav(audio_, sample_rate_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() encode_wav_eager(audio_, sample_rate_; name=name) else encode_wav_graph(audio_, sample_rate_; name=name) @@ -6835,7 +6835,7 @@ begin return res[1] end function tensor_summary_v2(tag_, tensor_, serialized_summary_metadata_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() tensor_summary_v2_eager(tag_, tensor_, serialized_summary_metadata_; name=name) else tensor_summary_v2_graph(tag_, tensor_, serialized_summary_metadata_; name=name) @@ -6885,7 +6885,7 @@ begin return res[1] end function queue_dequeue_up_to(handle_, n_; name=nothing, component_types=nothing, timeout_ms=nothing) - if tf.eager_mode + if tf.in_eager_mode() queue_dequeue_up_to_eager(handle_, n_; name=name, component_types=component_types, timeout_ms=timeout_ms) else queue_dequeue_up_to_graph(handle_, n_; name=name, component_types=component_types, timeout_ms=timeout_ms) @@ -6932,7 +6932,7 @@ begin return res[1] end function matrix_band_part(input_, num_lower_, num_upper_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() matrix_band_part_eager(input_, num_lower_, num_upper_; name=name) else matrix_band_part_graph(input_, num_lower_, num_upper_; name=name) @@ -6980,7 +6980,7 @@ begin return res[1] end function copy(input_; name=nothing, tensor_name=nothing, debug_ops_spec=nothing) - if tf.eager_mode + if tf.in_eager_mode() copy_eager(input_; name=name, tensor_name=tensor_name, debug_ops_spec=debug_ops_spec) else copy_graph(input_; name=name, tensor_name=tensor_name, debug_ops_spec=debug_ops_spec) @@ -7033,7 +7033,7 @@ begin return res end function shape_n(input_; name=nothing, N=nothing, out_type=nothing) - if tf.eager_mode + if tf.in_eager_mode() shape_n_eager(input_; name=name, N=N, out_type=out_type) else shape_n_graph(input_; name=name, N=N, out_type=out_type) @@ -7123,7 +7123,7 @@ begin return res[1] end function experimental_parse_example_dataset(input_dataset_, num_parallel_calls_, dense_defaults_; name=nothing, sparse_keys=nothing, dense_keys=nothing, sparse_types=nothing, Tdense=nothing, dense_shapes=nothing, output_types=nothing, output_shapes=nothing, sloppy=nothing) - if tf.eager_mode + if tf.in_eager_mode() experimental_parse_example_dataset_eager(input_dataset_, num_parallel_calls_, dense_defaults_; name=name, sparse_keys=sparse_keys, dense_keys=dense_keys, sparse_types=sparse_types, Tdense=Tdense, dense_shapes=dense_shapes, output_types=output_types, output_shapes=output_shapes, sloppy=sloppy) else experimental_parse_example_dataset_graph(input_dataset_, num_parallel_calls_, dense_defaults_; name=name, sparse_keys=sparse_keys, dense_keys=dense_keys, sparse_types=sparse_types, Tdense=Tdense, dense_shapes=dense_shapes, output_types=output_types, output_shapes=output_shapes, sloppy=sloppy) @@ -7169,7 +7169,7 @@ begin return res[1] end function concat(concat_dim_, values_; name=nothing, N=nothing) - if tf.eager_mode + if tf.in_eager_mode() concat_eager(concat_dim_, values_; name=name, N=N) else concat_graph(concat_dim_, values_; name=name, N=N) @@ -7217,7 +7217,7 @@ begin return res[1] end function data_format_dim_map(x_; name=nothing, src_format=nothing, dst_format=nothing) - if tf.eager_mode + if tf.in_eager_mode() data_format_dim_map_eager(x_; name=name, src_format=src_format, dst_format=dst_format) else data_format_dim_map_graph(x_; name=name, src_format=src_format, dst_format=dst_format) @@ -7259,7 +7259,7 @@ begin return res[1] end function identity_reader(; name=nothing, container=nothing, shared_name=nothing) - if tf.eager_mode + if tf.in_eager_mode() identity_reader_eager(; name=name, container=container, shared_name=shared_name) else identity_reader_graph(; name=name, container=container, shared_name=shared_name) @@ -7295,7 +7295,7 @@ begin return res[1] end function softplus(features_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() softplus_eager(features_; name=name) else softplus_graph(features_; name=name) @@ -7367,7 +7367,7 @@ begin return res[1] end function resource_sparse_apply_proximal_adagrad(var_, accum_, lr_, l1_, l2_, grad_, indices_; name=nothing, use_locking=nothing) - if tf.eager_mode + if tf.in_eager_mode() resource_sparse_apply_proximal_adagrad_eager(var_, accum_, lr_, l1_, l2_, grad_, indices_; name=name, use_locking=use_locking) else resource_sparse_apply_proximal_adagrad_graph(var_, accum_, lr_, l1_, l2_, grad_, indices_; name=name, use_locking=use_locking) @@ -7494,7 +7494,7 @@ begin return res end function parse_single_sequence_example(serialized_, feature_list_dense_missing_assumed_empty_, context_sparse_keys_, context_dense_keys_, feature_list_sparse_keys_, feature_list_dense_keys_, context_dense_defaults_, debug_name_; name=nothing, Ncontext_sparse=nothing, Ncontext_dense=nothing, Nfeature_list_sparse=nothing, Nfeature_list_dense=nothing, context_sparse_types=nothing, Tcontext_dense=nothing, feature_list_dense_types=nothing, context_dense_shapes=nothing, feature_list_sparse_types=nothing, feature_list_dense_shapes=nothing) - if tf.eager_mode + if tf.in_eager_mode() parse_single_sequence_example_eager(serialized_, feature_list_dense_missing_assumed_empty_, context_sparse_keys_, context_dense_keys_, feature_list_sparse_keys_, feature_list_dense_keys_, context_dense_defaults_, debug_name_; name=name, Ncontext_sparse=Ncontext_sparse, Ncontext_dense=Ncontext_dense, Nfeature_list_sparse=Nfeature_list_sparse, Nfeature_list_dense=Nfeature_list_dense, context_sparse_types=context_sparse_types, Tcontext_dense=Tcontext_dense, feature_list_dense_types=feature_list_dense_types, context_dense_shapes=context_dense_shapes, feature_list_sparse_types=feature_list_sparse_types, feature_list_dense_shapes=feature_list_dense_shapes) else parse_single_sequence_example_graph(serialized_, feature_list_dense_missing_assumed_empty_, context_sparse_keys_, context_dense_keys_, feature_list_sparse_keys_, feature_list_dense_keys_, context_dense_defaults_, debug_name_; name=name, Ncontext_sparse=Ncontext_sparse, Ncontext_dense=Ncontext_dense, Nfeature_list_sparse=Nfeature_list_sparse, Nfeature_list_dense=Nfeature_list_dense, context_sparse_types=context_sparse_types, Tcontext_dense=Tcontext_dense, feature_list_dense_types=feature_list_dense_types, context_dense_shapes=context_dense_shapes, feature_list_sparse_types=feature_list_sparse_types, feature_list_dense_shapes=feature_list_dense_shapes) @@ -7530,7 +7530,7 @@ begin return res[1] end function matrix_diag(diagonal_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() matrix_diag_eager(diagonal_; name=name) else matrix_diag_graph(diagonal_; name=name) @@ -7561,7 +7561,7 @@ begin return res[1] end function fact(; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() fact_eager(; name=name) else fact_graph(; name=name) @@ -7615,7 +7615,7 @@ begin return res[1] end function shard_dataset(input_dataset_, num_shards_, index_; name=nothing, output_types=nothing, output_shapes=nothing) - if tf.eager_mode + if tf.in_eager_mode() shard_dataset_eager(input_dataset_, num_shards_, index_; name=name, output_types=output_types, output_shapes=output_shapes) else shard_dataset_graph(input_dataset_, num_shards_, index_; name=name, output_types=output_types, output_shapes=output_shapes) @@ -7685,7 +7685,7 @@ begin return res[1] end function max_pool_grad_grad(orig_input_, orig_output_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) - if tf.eager_mode + if tf.in_eager_mode() max_pool_grad_grad_eager(orig_input_, orig_output_, grad_; name=name, ksize=ksize, strides=strides, padding=padding, data_format=data_format) else max_pool_grad_grad_graph(orig_input_, orig_output_, grad_; name=name, ksize=ksize, strides=strides, padding=padding, data_format=data_format) @@ -7731,7 +7731,7 @@ begin return res[1] end function resize_bilinear_grad(grads_, original_image_; name=nothing, align_corners=nothing) - if tf.eager_mode + if tf.in_eager_mode() resize_bilinear_grad_eager(grads_, original_image_; name=name, align_corners=align_corners) else resize_bilinear_grad_graph(grads_, original_image_; name=name, align_corners=align_corners) @@ -7780,7 +7780,7 @@ begin return res[1] end function batch_to_space(input_, crops_; name=nothing, block_size=nothing) - if tf.eager_mode + if tf.in_eager_mode() batch_to_space_eager(input_, crops_; name=name, block_size=block_size) else batch_to_space_graph(input_, crops_; name=name, block_size=block_size) @@ -7820,7 +7820,7 @@ begin return res[1] end function optional_from_value(components_; name=nothing, Toutput_types=nothing) - if tf.eager_mode + if tf.in_eager_mode() optional_from_value_eager(components_; name=name, Toutput_types=Toutput_types) else optional_from_value_graph(components_; name=name, Toutput_types=Toutput_types) @@ -7861,7 +7861,7 @@ begin return res[1] end function xlogy(x_, y_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() xlogy_eager(x_, y_; name=name) else xlogy_graph(x_, y_; name=name) @@ -7902,7 +7902,7 @@ begin return res[1] end function cross(a_, b_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() cross_eager(a_, b_; name=name) else cross_graph(a_, b_; name=name) @@ -7943,7 +7943,7 @@ begin return res[1] end function bitwise_and(x_, y_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() bitwise_and_eager(x_, y_; name=name) else bitwise_and_graph(x_, y_; name=name) @@ -7985,7 +7985,7 @@ begin return res[1] end function broadcast_to(input_, shape_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() broadcast_to_eager(input_, shape_; name=name) else broadcast_to_graph(input_, shape_; name=name) @@ -8026,7 +8026,7 @@ begin return res[1] end function elu_grad(gradients_, outputs_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() elu_grad_eager(gradients_, outputs_; name=name) else elu_grad_graph(gradients_, outputs_; name=name) @@ -8153,7 +8153,7 @@ begin return res end function cudnn_rnn_backprop(input_, input_h_, input_c_, params_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) - if tf.eager_mode + if tf.in_eager_mode() cudnn_rnn_backprop_eager(input_, input_h_, input_c_, params_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_; name=name, rnn_mode=rnn_mode, input_mode=input_mode, direction=direction, dropout=dropout, seed=seed, seed2=seed2) else cudnn_rnn_backprop_graph(input_, input_h_, input_c_, params_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_; name=name, rnn_mode=rnn_mode, input_mode=input_mode, direction=direction, dropout=dropout, seed=seed, seed2=seed2) @@ -8193,7 +8193,7 @@ begin return res[1] end function string_to_hash_bucket_fast(input_; name=nothing, num_buckets=nothing) - if tf.eager_mode + if tf.in_eager_mode() string_to_hash_bucket_fast_eager(input_; name=name, num_buckets=num_buckets) else string_to_hash_bucket_fast_graph(input_; name=name, num_buckets=num_buckets) @@ -8253,7 +8253,7 @@ begin return res[1] end function mutable_hash_table(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing) - if tf.eager_mode + if tf.in_eager_mode() mutable_hash_table_eager(; name=name, container=container, shared_name=shared_name, use_node_name_sharing=use_node_name_sharing, key_dtype=key_dtype, value_dtype=value_dtype) else mutable_hash_table_graph(; name=name, container=container, shared_name=shared_name, use_node_name_sharing=use_node_name_sharing, key_dtype=key_dtype, value_dtype=value_dtype) @@ -8289,7 +8289,7 @@ begin return res[1] end function relu(features_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() relu_eager(features_; name=name) else relu_graph(features_; name=name) @@ -8335,7 +8335,7 @@ begin return res[1] end function nth_element(input_, n_; name=nothing, reverse=nothing) - if tf.eager_mode + if tf.in_eager_mode() nth_element_eager(input_, n_; name=name, reverse=reverse) else nth_element_graph(input_, n_; name=name, reverse=reverse) @@ -8371,7 +8371,7 @@ begin return res[1] end function softsign(features_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() softsign_eager(features_; name=name) else softsign_graph(features_; name=name) @@ -8455,7 +8455,7 @@ begin return res[1] end function mutable_dense_hash_table(empty_key_; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing, initial_num_buckets=nothing, max_load_factor=nothing) - if tf.eager_mode + if tf.in_eager_mode() mutable_dense_hash_table_eager(empty_key_; name=name, container=container, shared_name=shared_name, use_node_name_sharing=use_node_name_sharing, key_dtype=key_dtype, value_dtype=value_dtype, value_shape=value_shape, initial_num_buckets=initial_num_buckets, max_load_factor=max_load_factor) else mutable_dense_hash_table_graph(empty_key_; name=name, container=container, shared_name=shared_name, use_node_name_sharing=use_node_name_sharing, key_dtype=key_dtype, value_dtype=value_dtype, value_shape=value_shape, initial_num_buckets=initial_num_buckets, max_load_factor=max_load_factor) @@ -8486,7 +8486,7 @@ begin return res[1] end function _shutdown_distributed_tpu(; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() _shutdown_distributed_tpu_eager(; name=name) else _shutdown_distributed_tpu_graph(; name=name) @@ -8527,7 +8527,7 @@ begin return res[1] end function polygamma(a_, x_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() polygamma_eager(a_, x_; name=name) else polygamma_graph(a_, x_; name=name) @@ -8575,7 +8575,7 @@ begin return res[1] end function nccl_reduce(input_; name=nothing, reduction=nothing, num_devices=nothing) - if tf.eager_mode + if tf.in_eager_mode() nccl_reduce_eager(input_; name=name, reduction=reduction, num_devices=num_devices) else nccl_reduce_graph(input_; name=name, reduction=reduction, num_devices=num_devices) @@ -8624,7 +8624,7 @@ begin return res[1] end function arg_max(input_, dimension_; name=nothing, output_type=nothing) - if tf.eager_mode + if tf.in_eager_mode() arg_max_eager(input_, dimension_; name=name, output_type=output_type) else arg_max_graph(input_, dimension_; name=name, output_type=output_type) @@ -8665,7 +8665,7 @@ begin return res[1] end function matrix_set_diag(input_, diagonal_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() matrix_set_diag_eager(input_, diagonal_; name=name) else matrix_set_diag_graph(input_, diagonal_; name=name) @@ -8713,7 +8713,7 @@ begin return res[1] end function space_to_batch_nd(input_, block_shape_, paddings_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() space_to_batch_nd_eager(input_, block_shape_, paddings_; name=name) else space_to_batch_nd_graph(input_, block_shape_, paddings_; name=name) @@ -8760,7 +8760,7 @@ begin return res end function sparse_reshape(input_indices_, input_shape_, new_shape_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() sparse_reshape_eager(input_indices_, input_shape_, new_shape_; name=name) else sparse_reshape_graph(input_indices_, input_shape_, new_shape_; name=name) @@ -8810,7 +8810,7 @@ begin return res[1] end function optimize_dataset(input_dataset_, optimizations_; name=nothing, output_types=nothing, output_shapes=nothing) - if tf.eager_mode + if tf.in_eager_mode() optimize_dataset_eager(input_dataset_, optimizations_; name=name, output_types=output_types, output_shapes=output_shapes) else optimize_dataset_graph(input_dataset_, optimizations_; name=name, output_types=output_types, output_shapes=output_shapes) @@ -8859,7 +8859,7 @@ begin return res[1] end function concat_v2(values_, axis_; name=nothing, N=nothing) - if tf.eager_mode + if tf.in_eager_mode() concat_v2_eager(values_, axis_; name=name, N=N) else concat_v2_graph(values_, axis_; name=name, N=N) @@ -8935,7 +8935,7 @@ begin return res[1] end function resource_sparse_apply_adadelta(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) - if tf.eager_mode + if tf.in_eager_mode() resource_sparse_apply_adadelta_eager(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_, indices_; name=name, use_locking=use_locking) else resource_sparse_apply_adadelta_graph(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_, indices_; name=name, use_locking=use_locking) @@ -8977,7 +8977,7 @@ begin return res[1] end function tile(input_, multiples_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() tile_eager(input_, multiples_; name=name) else tile_graph(input_, multiples_; name=name) @@ -9019,7 +9019,7 @@ begin return res[1] end function mutex_v2(; name=nothing, container=nothing, shared_name=nothing) - if tf.eager_mode + if tf.in_eager_mode() mutex_v2_eager(; name=name, container=container, shared_name=shared_name) else mutex_v2_graph(; name=name, container=container, shared_name=shared_name) @@ -9069,7 +9069,7 @@ begin return res[1] end function serialize_many_sparse(sparse_indices_, sparse_values_, sparse_shape_; name=nothing, out_type=nothing) - if tf.eager_mode + if tf.in_eager_mode() serialize_many_sparse_eager(sparse_indices_, sparse_values_, sparse_shape_; name=name, out_type=out_type) else serialize_many_sparse_graph(sparse_indices_, sparse_values_, sparse_shape_; name=name, out_type=out_type) @@ -9119,7 +9119,7 @@ begin return res[1] end function tpu_embedding_activations(embedding_variable_, sliced_activations_; name=nothing, table_id=nothing, lookup_id=nothing) - if tf.eager_mode + if tf.in_eager_mode() tpu_embedding_activations_eager(embedding_variable_, sliced_activations_; name=name, table_id=table_id, lookup_id=lookup_id) else tpu_embedding_activations_graph(embedding_variable_, sliced_activations_; name=name, table_id=table_id, lookup_id=lookup_id) @@ -9170,7 +9170,7 @@ begin return res[1] end function batch_matrix_solve_ls(matrix_, rhs_, l2_regularizer_; name=nothing, fast=nothing) - if tf.eager_mode + if tf.in_eager_mode() batch_matrix_solve_ls_eager(matrix_, rhs_, l2_regularizer_; name=name, fast=fast) else batch_matrix_solve_ls_graph(matrix_, rhs_, l2_regularizer_; name=name, fast=fast) @@ -9211,7 +9211,7 @@ begin return res[1] end function not_equal(x_, y_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() not_equal_eager(x_, y_; name=name) else not_equal_graph(x_, y_; name=name) @@ -9247,7 +9247,7 @@ begin return res[1] end function lgamma(x_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() lgamma_eager(x_; name=name) else lgamma_graph(x_; name=name) @@ -9331,7 +9331,7 @@ begin return res[1] end function tpu_replicate_metadata(; name=nothing, num_replicas=nothing, num_cores_per_replica=nothing, topology=nothing, use_tpu=nothing, device_assignment=nothing, computation_shape=nothing, host_compute_core=nothing, padding_map=nothing, step_marker_location=nothing) - if tf.eager_mode + if tf.in_eager_mode() tpu_replicate_metadata_eager(; name=name, num_replicas=num_replicas, num_cores_per_replica=num_cores_per_replica, topology=topology, use_tpu=use_tpu, device_assignment=device_assignment, computation_shape=computation_shape, host_compute_core=host_compute_core, padding_map=padding_map, step_marker_location=step_marker_location) else tpu_replicate_metadata_graph(; name=name, num_replicas=num_replicas, num_cores_per_replica=num_cores_per_replica, topology=topology, use_tpu=use_tpu, device_assignment=device_assignment, computation_shape=computation_shape, host_compute_core=host_compute_core, padding_map=padding_map, step_marker_location=step_marker_location) @@ -9391,7 +9391,7 @@ begin return res[1] end function experimental_thread_pool_handle(; name=nothing, num_threads=nothing, max_intra_op_parallelism=nothing, display_name=nothing, container=nothing, shared_name=nothing) - if tf.eager_mode + if tf.in_eager_mode() experimental_thread_pool_handle_eager(; name=name, num_threads=num_threads, max_intra_op_parallelism=max_intra_op_parallelism, display_name=display_name, container=container, shared_name=shared_name) else experimental_thread_pool_handle_graph(; name=name, num_threads=num_threads, max_intra_op_parallelism=max_intra_op_parallelism, display_name=display_name, container=container, shared_name=shared_name) @@ -9427,7 +9427,7 @@ begin return res[1] end function self_adjoint_eig(input_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() self_adjoint_eig_eager(input_; name=name) else self_adjoint_eig_graph(input_; name=name) @@ -9472,7 +9472,7 @@ begin return res end function boosted_trees_quantile_stream_resource_get_bucket_boundaries(quantile_stream_resource_handle_; name=nothing, num_features=nothing) - if tf.eager_mode + if tf.in_eager_mode() boosted_trees_quantile_stream_resource_get_bucket_boundaries_eager(quantile_stream_resource_handle_; name=name, num_features=num_features) else boosted_trees_quantile_stream_resource_get_bucket_boundaries_graph(quantile_stream_resource_handle_; name=name, num_features=num_features) @@ -9521,7 +9521,7 @@ begin return res[1] end function sparse_dense_cwise_div(sp_indices_, sp_values_, sp_shape_, dense_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() sparse_dense_cwise_div_eager(sp_indices_, sp_values_, sp_shape_, dense_; name=name) else sparse_dense_cwise_div_graph(sp_indices_, sp_values_, sp_shape_, dense_; name=name) @@ -9557,7 +9557,7 @@ begin return res[1] end function acos(x_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() acos_eager(x_; name=name) else acos_graph(x_; name=name) @@ -9604,7 +9604,7 @@ begin return res[1] end function all(input_, reduction_indices_; name=nothing, keep_dims=nothing) - if tf.eager_mode + if tf.in_eager_mode() all_eager(input_, reduction_indices_; name=name, keep_dims=keep_dims) else all_graph(input_, reduction_indices_; name=name, keep_dims=keep_dims) @@ -9645,7 +9645,7 @@ begin return res[1] end function compare_and_bitpack(input_, threshold_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() compare_and_bitpack_eager(input_, threshold_; name=name) else compare_and_bitpack_graph(input_, threshold_; name=name) @@ -9699,7 +9699,7 @@ begin return res[1] end function var_handle_op(; name=nothing, container=nothing, shared_name=nothing, dtype=nothing, shape=nothing) - if tf.eager_mode + if tf.in_eager_mode() var_handle_op_eager(; name=name, container=container, shared_name=shared_name, dtype=dtype, shape=shape) else var_handle_op_graph(; name=name, container=container, shared_name=shared_name, dtype=dtype, shape=shape) @@ -9745,7 +9745,7 @@ begin return res[1] end function experimental_unique_dataset(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) - if tf.eager_mode + if tf.in_eager_mode() experimental_unique_dataset_eager(input_dataset_; name=name, output_types=output_types, output_shapes=output_shapes) else experimental_unique_dataset_graph(input_dataset_; name=name, output_types=output_types, output_shapes=output_shapes) @@ -9840,7 +9840,7 @@ begin return res end function quantized_conv2d_with_bias_sum_and_relu(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_, summand_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) - if tf.eager_mode + if tf.in_eager_mode() quantized_conv2d_with_bias_sum_and_relu_eager(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_, summand_; name=name, out_type=out_type, strides=strides, padding=padding, dilations=dilations) else quantized_conv2d_with_bias_sum_and_relu_graph(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_, summand_; name=name, out_type=out_type, strides=strides, padding=padding, dilations=dilations) @@ -9892,7 +9892,7 @@ begin return res end function list_diff(x_, y_; name=nothing, out_idx=nothing) - if tf.eager_mode + if tf.in_eager_mode() list_diff_eager(x_, y_; name=name, out_idx=out_idx) else list_diff_graph(x_, y_; name=name, out_idx=out_idx) @@ -9942,7 +9942,7 @@ begin return res[1] end function create_summary_file_writer(writer_, logdir_, max_queue_, flush_millis_, filename_suffix_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() create_summary_file_writer_eager(writer_, logdir_, max_queue_, flush_millis_, filename_suffix_; name=name) else create_summary_file_writer_graph(writer_, logdir_, max_queue_, flush_millis_, filename_suffix_; name=name) @@ -10003,7 +10003,7 @@ begin return res end function generate_vocab_remapping(new_vocab_file_, old_vocab_file_; name=nothing, new_vocab_offset=nothing, num_new_vocab=nothing, old_vocab_size=nothing) - if tf.eager_mode + if tf.in_eager_mode() generate_vocab_remapping_eager(new_vocab_file_, old_vocab_file_; name=name, new_vocab_offset=new_vocab_offset, num_new_vocab=num_new_vocab, old_vocab_size=old_vocab_size) else generate_vocab_remapping_graph(new_vocab_file_, old_vocab_file_; name=name, new_vocab_offset=new_vocab_offset, num_new_vocab=num_new_vocab, old_vocab_size=old_vocab_size) @@ -10045,7 +10045,7 @@ begin return res[1] end function batch_matrix_inverse(input_; name=nothing, adjoint=nothing) - if tf.eager_mode + if tf.in_eager_mode() batch_matrix_inverse_eager(input_; name=name, adjoint=adjoint) else batch_matrix_inverse_graph(input_; name=name, adjoint=adjoint) @@ -10076,7 +10076,7 @@ begin return res[1] end function control_trigger(; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() control_trigger_eager(; name=name) else control_trigger_graph(; name=name) @@ -10107,7 +10107,7 @@ begin return res[1] end function tpu_ordinal_selector(; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() tpu_ordinal_selector_eager(; name=name) else tpu_ordinal_selector_graph(; name=name) @@ -10143,7 +10143,7 @@ begin return res[1] end function stop_gradient(input_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() stop_gradient_eager(input_; name=name) else stop_gradient_graph(input_; name=name) @@ -10195,7 +10195,7 @@ begin return res end function split(split_dim_, value_; name=nothing, num_split=nothing) - if tf.eager_mode + if tf.in_eager_mode() split_eager(split_dim_, value_; name=name, num_split=num_split) else split_graph(split_dim_, value_; name=name, num_split=num_split) @@ -10254,7 +10254,7 @@ begin return res end function unpack(value_; name=nothing, num=nothing, axis=nothing) - if tf.eager_mode + if tf.in_eager_mode() unpack_eager(value_; name=name, num=num, axis=axis) else unpack_graph(value_; name=name, num=num, axis=axis) @@ -10307,7 +10307,7 @@ begin return res[1] end function resource_scatter_max(resource_, indices_, updates_; name=nothing, dtype=nothing) - if tf.eager_mode + if tf.in_eager_mode() resource_scatter_max_eager(resource_, indices_, updates_; name=name, dtype=dtype) else resource_scatter_max_graph(resource_, indices_, updates_; name=name, dtype=dtype) @@ -10355,7 +10355,7 @@ begin return res[1] end function tensor_array_write(handle_, index_, value_, flow_in_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() tensor_array_write_eager(handle_, index_, value_, flow_in_; name=name) else tensor_array_write_graph(handle_, index_, value_, flow_in_; name=name) @@ -10403,7 +10403,7 @@ begin return res[1] end function fill(dims_, value_; name=nothing, index_type=nothing) - if tf.eager_mode + if tf.in_eager_mode() fill_eager(dims_, value_; name=name, index_type=index_type) else fill_graph(dims_, value_; name=name, index_type=index_type) @@ -10504,7 +10504,7 @@ begin return res end function quantized_conv2d_with_bias_and_requantize(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) - if tf.eager_mode + if tf.in_eager_mode() quantized_conv2d_with_bias_and_requantize_eager(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_; name=name, out_type=out_type, strides=strides, padding=padding, dilations=dilations) else quantized_conv2d_with_bias_and_requantize_graph(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_; name=name, out_type=out_type, strides=strides, padding=padding, dilations=dilations) @@ -10540,7 +10540,7 @@ begin return res[1] end function softmax(logits_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() softmax_eager(logits_; name=name) else softmax_graph(logits_; name=name) @@ -10586,7 +10586,7 @@ begin return res[1] end function resize_bicubic(images_, size_; name=nothing, align_corners=nothing) - if tf.eager_mode + if tf.in_eager_mode() resize_bicubic_eager(images_, size_; name=name, align_corners=align_corners) else resize_bicubic_graph(images_, size_; name=name, align_corners=align_corners) @@ -10628,7 +10628,7 @@ begin return res[1] end function infeed_dequeue_tuple(; name=nothing, dtypes=nothing, shapes=nothing) - if tf.eager_mode + if tf.in_eager_mode() infeed_dequeue_tuple_eager(; name=name, dtypes=dtypes, shapes=shapes) else infeed_dequeue_tuple_graph(; name=name, dtypes=dtypes, shapes=shapes) @@ -10688,7 +10688,7 @@ begin return res[1] end function multi_device_iterator(; name=nothing, devices=nothing, shared_name=nothing, container=nothing, output_types=nothing, output_shapes=nothing) - if tf.eager_mode + if tf.in_eager_mode() multi_device_iterator_eager(; name=name, devices=devices, shared_name=shared_name, container=container, output_types=output_types, output_shapes=output_shapes) else multi_device_iterator_graph(; name=name, devices=devices, shared_name=shared_name, container=container, output_types=output_types, output_shapes=output_shapes) @@ -10756,7 +10756,7 @@ begin return res[1] end function decode_csv(records_, record_defaults_; name=nothing, OUT_TYPE=nothing, field_delim=nothing, use_quote_delim=nothing, na_value=nothing, select_cols=nothing) - if tf.eager_mode + if tf.in_eager_mode() decode_csv_eager(records_, record_defaults_; name=name, OUT_TYPE=OUT_TYPE, field_delim=field_delim, use_quote_delim=use_quote_delim, na_value=na_value, select_cols=select_cols) else decode_csv_graph(records_, record_defaults_; name=name, OUT_TYPE=OUT_TYPE, field_delim=field_delim, use_quote_delim=use_quote_delim, na_value=na_value, select_cols=select_cols) @@ -10802,7 +10802,7 @@ begin return res[1] end function lookup_table_find(table_handle_, keys_, default_value_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() lookup_table_find_eager(table_handle_, keys_, default_value_; name=name) else lookup_table_find_graph(table_handle_, keys_, default_value_; name=name) @@ -10864,7 +10864,7 @@ begin return res[1] end function shuffle_and_repeat_dataset(input_dataset_, buffer_size_, seed_, seed2_, count_; name=nothing, output_types=nothing, output_shapes=nothing) - if tf.eager_mode + if tf.in_eager_mode() shuffle_and_repeat_dataset_eager(input_dataset_, buffer_size_, seed_, seed2_, count_; name=name, output_types=output_types, output_shapes=output_shapes) else shuffle_and_repeat_dataset_graph(input_dataset_, buffer_size_, seed_, seed2_, count_; name=name, output_types=output_types, output_shapes=output_shapes) @@ -10919,7 +10919,7 @@ begin return res end function requantization_range_per_channel(input_, input_min_, input_max_; name=nothing, clip_value_max=nothing) - if tf.eager_mode + if tf.in_eager_mode() requantization_range_per_channel_eager(input_, input_min_, input_max_; name=name, clip_value_max=clip_value_max) else requantization_range_per_channel_graph(input_, input_min_, input_max_; name=name, clip_value_max=clip_value_max) @@ -10965,7 +10965,7 @@ begin return res[1] end function experimental_unbatch_dataset(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) - if tf.eager_mode + if tf.in_eager_mode() experimental_unbatch_dataset_eager(input_dataset_; name=name, output_types=output_types, output_shapes=output_shapes) else experimental_unbatch_dataset_graph(input_dataset_; name=name, output_types=output_types, output_shapes=output_shapes) @@ -11029,7 +11029,7 @@ begin return res[1] end function avg_pool3d_grad(orig_input_shape_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) - if tf.eager_mode + if tf.in_eager_mode() avg_pool3d_grad_eager(orig_input_shape_, grad_; name=name, ksize=ksize, strides=strides, padding=padding, data_format=data_format) else avg_pool3d_grad_graph(orig_input_shape_, grad_; name=name, ksize=ksize, strides=strides, padding=padding, data_format=data_format) @@ -11077,7 +11077,7 @@ begin return res[1] end function placeholder_with_default(input_; name=nothing, dtype=nothing, shape=nothing) - if tf.eager_mode + if tf.in_eager_mode() placeholder_with_default_eager(input_; name=name, dtype=dtype, shape=shape) else placeholder_with_default_graph(input_; name=name, dtype=dtype, shape=shape) @@ -11123,7 +11123,7 @@ begin return res[1] end function initialize_table_v2(table_handle_, keys_, values_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() initialize_table_v2_eager(table_handle_, keys_, values_; name=name) else initialize_table_v2_graph(table_handle_, keys_, values_; name=name) @@ -11173,7 +11173,7 @@ begin return res[1] end function set_size(set_indices_, set_values_, set_shape_; name=nothing, validate_indices=nothing) - if tf.eager_mode + if tf.in_eager_mode() set_size_eager(set_indices_, set_values_, set_shape_; name=name, validate_indices=validate_indices) else set_size_graph(set_indices_, set_values_, set_shape_; name=name, validate_indices=validate_indices) @@ -11223,7 +11223,7 @@ begin return res[1] end function assert(condition_, data_; name=nothing, T=nothing, summarize=nothing) - if tf.eager_mode + if tf.in_eager_mode() assert_eager(condition_, data_; name=name, T=T, summarize=summarize) else assert_graph(condition_, data_; name=name, T=T, summarize=summarize) @@ -11287,7 +11287,7 @@ begin return res end function non_max_suppression_v4(boxes_, scores_, max_output_size_, iou_threshold_, score_threshold_; name=nothing, pad_to_max_output_size=nothing) - if tf.eager_mode + if tf.in_eager_mode() non_max_suppression_v4_eager(boxes_, scores_, max_output_size_, iou_threshold_, score_threshold_; name=name, pad_to_max_output_size=pad_to_max_output_size) else non_max_suppression_v4_graph(boxes_, scores_, max_output_size_, iou_threshold_, score_threshold_; name=name, pad_to_max_output_size=pad_to_max_output_size) @@ -11372,7 +11372,7 @@ begin return res end function sample_distorted_bounding_box_v2(image_size_, bounding_boxes_, min_object_covered_; name=nothing, seed=nothing, seed2=nothing, aspect_ratio_range=nothing, area_range=nothing, max_attempts=nothing, use_image_if_no_bounding_boxes=nothing) - if tf.eager_mode + if tf.in_eager_mode() sample_distorted_bounding_box_v2_eager(image_size_, bounding_boxes_, min_object_covered_; name=name, seed=seed, seed2=seed2, aspect_ratio_range=aspect_ratio_range, area_range=area_range, max_attempts=max_attempts, use_image_if_no_bounding_boxes=use_image_if_no_bounding_boxes) else sample_distorted_bounding_box_v2_graph(image_size_, bounding_boxes_, min_object_covered_; name=name, seed=seed, seed2=seed2, aspect_ratio_range=aspect_ratio_range, area_range=area_range, max_attempts=max_attempts, use_image_if_no_bounding_boxes=use_image_if_no_bounding_boxes) @@ -11434,7 +11434,7 @@ begin return res[1] end function initialize_table_from_text_file(table_handle_, filename_; name=nothing, key_index=nothing, value_index=nothing, vocab_size=nothing, delimiter=nothing) - if tf.eager_mode + if tf.in_eager_mode() initialize_table_from_text_file_eager(table_handle_, filename_; name=name, key_index=key_index, value_index=value_index, vocab_size=vocab_size, delimiter=delimiter) else initialize_table_from_text_file_graph(table_handle_, filename_; name=name, key_index=key_index, value_index=value_index, vocab_size=vocab_size, delimiter=delimiter) @@ -11468,7 +11468,7 @@ begin return res[1] end function lookup_table_size(table_handle_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() lookup_table_size_eager(table_handle_; name=name) else lookup_table_size_graph(table_handle_; name=name) @@ -11551,7 +11551,7 @@ begin return res[1] end function sparse_apply_adagrad_da(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, indices_, lr_, l1_, l2_, global_step_; name=nothing, use_locking=nothing) - if tf.eager_mode + if tf.in_eager_mode() sparse_apply_adagrad_da_eager(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, indices_, lr_, l1_, l2_, global_step_; name=name, use_locking=use_locking) else sparse_apply_adagrad_da_graph(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, indices_, lr_, l1_, l2_, global_step_; name=name, use_locking=use_locking) @@ -11597,7 +11597,7 @@ begin return res end function broadcast_gradient_args(s0_, s1_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() broadcast_gradient_args_eager(s0_, s1_; name=name) else broadcast_gradient_args_graph(s0_, s1_; name=name) @@ -11639,7 +11639,7 @@ begin return res[1] end function summary_writer(; name=nothing, shared_name=nothing, container=nothing) - if tf.eager_mode + if tf.in_eager_mode() summary_writer_eager(; name=name, shared_name=shared_name, container=container) else summary_writer_graph(; name=name, shared_name=shared_name, container=container) @@ -11686,7 +11686,7 @@ begin return res end function recv_tpu_embedding_activations(; name=nothing, num_outputs=nothing, config=nothing) - if tf.eager_mode + if tf.in_eager_mode() recv_tpu_embedding_activations_eager(; name=name, num_outputs=num_outputs, config=config) else recv_tpu_embedding_activations_graph(; name=name, num_outputs=num_outputs, config=config) @@ -11738,7 +11738,7 @@ begin return res[1] end function _while(input_; name=nothing, T=nothing, cond=nothing, body=nothing) - if tf.eager_mode + if tf.in_eager_mode() _while_eager(input_; name=name, T=T, cond=cond, body=body) else _while_graph(input_; name=name, T=T, cond=cond, body=body) @@ -11784,7 +11784,7 @@ begin return res[1] end function initialize_table(table_handle_, keys_, values_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() initialize_table_eager(table_handle_, keys_, values_; name=name) else initialize_table_graph(table_handle_, keys_, values_; name=name) @@ -11862,7 +11862,7 @@ begin return res[1] end function debug_numeric_summary(input_; name=nothing, device_name=nothing, tensor_name=nothing, debug_urls=nothing, lower_bound=nothing, upper_bound=nothing, mute_if_healthy=nothing, gated_grpc=nothing) - if tf.eager_mode + if tf.in_eager_mode() debug_numeric_summary_eager(input_; name=name, device_name=device_name, tensor_name=tensor_name, debug_urls=debug_urls, lower_bound=lower_bound, upper_bound=upper_bound, mute_if_healthy=mute_if_healthy, gated_grpc=gated_grpc) else debug_numeric_summary_graph(input_; name=name, device_name=device_name, tensor_name=tensor_name, debug_urls=debug_urls, lower_bound=lower_bound, upper_bound=upper_bound, mute_if_healthy=mute_if_healthy, gated_grpc=gated_grpc) @@ -11921,7 +11921,7 @@ begin return res end function retrieve_tpu_embedding_adagrad_parameters_grad_accum_debug(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - if tf.eager_mode + if tf.in_eager_mode() retrieve_tpu_embedding_adagrad_parameters_grad_accum_debug_eager(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) else retrieve_tpu_embedding_adagrad_parameters_grad_accum_debug_graph(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) @@ -11957,7 +11957,7 @@ begin return res[1] end function tanh(x_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() tanh_eager(x_; name=name) else tanh_graph(x_; name=name) @@ -12009,7 +12009,7 @@ begin return res[1] end function symbolic_gradient(input_; name=nothing, Tin=nothing, Tout=nothing, f=nothing) - if tf.eager_mode + if tf.in_eager_mode() symbolic_gradient_eager(input_; name=name, Tin=Tin, Tout=Tout, f=f) else symbolic_gradient_graph(input_; name=name, Tin=Tin, Tout=Tout, f=f) @@ -12087,7 +12087,7 @@ begin return res[1] end function boosted_trees_update_ensemble(tree_ensemble_handle_, feature_ids_, node_ids_, gains_, thresholds_, left_node_contribs_, right_node_contribs_, max_depth_, learning_rate_; name=nothing, pruning_mode=nothing, num_features=nothing) - if tf.eager_mode + if tf.in_eager_mode() boosted_trees_update_ensemble_eager(tree_ensemble_handle_, feature_ids_, node_ids_, gains_, thresholds_, left_node_contribs_, right_node_contribs_, max_depth_, learning_rate_; name=name, pruning_mode=pruning_mode, num_features=num_features) else boosted_trees_update_ensemble_graph(tree_ensemble_handle_, feature_ids_, node_ids_, gains_, thresholds_, left_node_contribs_, right_node_contribs_, max_depth_, learning_rate_; name=name, pruning_mode=pruning_mode, num_features=num_features) @@ -12155,7 +12155,7 @@ begin return res[1] end function apply_momentum(var_, accum_, lr_, grad_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) - if tf.eager_mode + if tf.in_eager_mode() apply_momentum_eager(var_, accum_, lr_, grad_, momentum_; name=name, use_locking=use_locking, use_nesterov=use_nesterov) else apply_momentum_graph(var_, accum_, lr_, grad_, momentum_; name=name, use_locking=use_locking, use_nesterov=use_nesterov) @@ -12198,7 +12198,7 @@ begin return res end function reader_read(reader_handle_, queue_handle_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() reader_read_eager(reader_handle_, queue_handle_; name=name) else reader_read_graph(reader_handle_, queue_handle_; name=name) @@ -12244,7 +12244,7 @@ begin return res[1] end function _wait_for_distributed_tpu(inputs_; name=nothing, startup_timeout_sec=nothing, N=nothing) - if tf.eager_mode + if tf.in_eager_mode() _wait_for_distributed_tpu_eager(inputs_; name=name, startup_timeout_sec=startup_timeout_sec, N=N) else _wait_for_distributed_tpu_graph(inputs_; name=name, startup_timeout_sec=startup_timeout_sec, N=N) @@ -12278,7 +12278,7 @@ begin return res[1] end function mutex_lock(mutex_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() mutex_lock_eager(mutex_; name=name) else mutex_lock_graph(mutex_; name=name) @@ -12316,7 +12316,7 @@ begin return res[1] end function accumulator_set_global_step(handle_, new_global_step_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() accumulator_set_global_step_eager(handle_, new_global_step_; name=name) else accumulator_set_global_step_graph(handle_, new_global_step_; name=name) @@ -12379,7 +12379,7 @@ begin return res end function quantized_add(x_, y_, min_x_, max_x_, min_y_, max_y_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() quantized_add_eager(x_, y_, min_x_, max_x_, min_y_, max_y_; name=name) else quantized_add_graph(x_, y_, min_x_, max_x_, min_y_, max_y_; name=name) @@ -12421,7 +12421,7 @@ begin return res[1] end function squeeze(input_; name=nothing, squeeze_dims=nothing) - if tf.eager_mode + if tf.in_eager_mode() squeeze_eager(input_; name=name, squeeze_dims=squeeze_dims) else squeeze_graph(input_; name=name, squeeze_dims=squeeze_dims) @@ -12455,7 +12455,7 @@ begin return res[1] end function experimental_matching_files_dataset(patterns_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() experimental_matching_files_dataset_eager(patterns_; name=name) else experimental_matching_files_dataset_graph(patterns_; name=name) @@ -12497,7 +12497,7 @@ begin return res[1] end function experimental_dataset_to_tf_record(input_dataset_, filename_, compression_type_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() experimental_dataset_to_tf_record_eager(input_dataset_, filename_, compression_type_; name=name) else experimental_dataset_to_tf_record_graph(input_dataset_, filename_, compression_type_; name=name) @@ -12555,7 +12555,7 @@ begin return res[1] end function load_tpu_embedding_stochastic_gradient_descent_parameters(parameters_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - if tf.eager_mode + if tf.in_eager_mode() load_tpu_embedding_stochastic_gradient_descent_parameters_eager(parameters_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) else load_tpu_embedding_stochastic_gradient_descent_parameters_graph(parameters_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) @@ -12586,7 +12586,7 @@ begin return res[1] end function no_op(; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() no_op_eager(; name=name) else no_op_graph(; name=name) @@ -12638,7 +12638,7 @@ begin return res[1] end function zip_dataset(input_datasets_; name=nothing, output_types=nothing, output_shapes=nothing, N=nothing) - if tf.eager_mode + if tf.in_eager_mode() zip_dataset_eager(input_datasets_; name=name, output_types=output_types, output_shapes=output_shapes, N=N) else zip_dataset_graph(input_datasets_; name=name, output_types=output_types, output_shapes=output_shapes, N=N) @@ -12680,7 +12680,7 @@ begin return res[1] end function identity_reader_v2(; name=nothing, container=nothing, shared_name=nothing) - if tf.eager_mode + if tf.in_eager_mode() identity_reader_v2_eager(; name=name, container=container, shared_name=shared_name) else identity_reader_v2_graph(; name=name, container=container, shared_name=shared_name) @@ -12722,7 +12722,7 @@ begin return res[1] end function lmdb_reader(; name=nothing, container=nothing, shared_name=nothing) - if tf.eager_mode + if tf.in_eager_mode() lmdb_reader_eager(; name=name, container=container, shared_name=shared_name) else lmdb_reader_graph(; name=name, container=container, shared_name=shared_name) @@ -12776,7 +12776,7 @@ begin return res[1] end function nccl_all_reduce(input_; name=nothing, reduction=nothing, num_devices=nothing, shared_name=nothing) - if tf.eager_mode + if tf.in_eager_mode() nccl_all_reduce_eager(input_; name=name, reduction=reduction, num_devices=num_devices, shared_name=shared_name) else nccl_all_reduce_graph(input_; name=name, reduction=reduction, num_devices=num_devices, shared_name=shared_name) @@ -12818,7 +12818,7 @@ begin return res[1] end function text_line_dataset(filenames_, compression_type_, buffer_size_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() text_line_dataset_eager(filenames_, compression_type_, buffer_size_; name=name) else text_line_dataset_graph(filenames_, compression_type_, buffer_size_; name=name) @@ -12870,7 +12870,7 @@ begin return res[1] end function sdca_shrink_l1(weights_; name=nothing, num_features=nothing, l1=nothing, l2=nothing) - if tf.eager_mode + if tf.in_eager_mode() sdca_shrink_l1_eager(weights_; name=name, num_features=num_features, l1=l1, l2=l2) else sdca_shrink_l1_graph(weights_; name=name, num_features=num_features, l1=l1, l2=l2) @@ -12918,7 +12918,7 @@ begin return res[1] end function tf_record_reader_v2(; name=nothing, container=nothing, shared_name=nothing, compression_type=nothing) - if tf.eager_mode + if tf.in_eager_mode() tf_record_reader_v2_eager(; name=name, container=container, shared_name=shared_name, compression_type=compression_type) else tf_record_reader_v2_graph(; name=name, container=container, shared_name=shared_name, compression_type=compression_type) @@ -12964,7 +12964,7 @@ begin return res[1] end function multi_device_iterator_from_string_handle(string_handle_; name=nothing, output_types=nothing, output_shapes=nothing) - if tf.eager_mode + if tf.in_eager_mode() multi_device_iterator_from_string_handle_eager(string_handle_; name=name, output_types=output_types, output_shapes=output_shapes) else multi_device_iterator_from_string_handle_graph(string_handle_; name=name, output_types=output_types, output_shapes=output_shapes) @@ -13032,7 +13032,7 @@ begin return res[1] end function padded_batch_dataset_v2(input_dataset_, batch_size_, padded_shapes_, padding_values_, drop_remainder_; name=nothing, Toutput_types=nothing, output_shapes=nothing, N=nothing) - if tf.eager_mode + if tf.in_eager_mode() padded_batch_dataset_v2_eager(input_dataset_, batch_size_, padded_shapes_, padding_values_, drop_remainder_; name=name, Toutput_types=Toutput_types, output_shapes=output_shapes, N=N) else padded_batch_dataset_v2_graph(input_dataset_, batch_size_, padded_shapes_, padding_values_, drop_remainder_; name=name, Toutput_types=Toutput_types, output_shapes=output_shapes, N=N) @@ -13094,7 +13094,7 @@ begin return res[1] end function load_tpu_embedding_proximal_adagrad_parameters(parameters_, accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - if tf.eager_mode + if tf.in_eager_mode() load_tpu_embedding_proximal_adagrad_parameters_eager(parameters_, accumulators_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) else load_tpu_embedding_proximal_adagrad_parameters_graph(parameters_, accumulators_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) @@ -13132,7 +13132,7 @@ begin return res[1] end function tensor_array_size(handle_, flow_in_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() tensor_array_size_eager(handle_, flow_in_; name=name) else tensor_array_size_graph(handle_, flow_in_; name=name) @@ -13192,7 +13192,7 @@ begin return res[1] end function ordered_map_size(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) - if tf.eager_mode + if tf.in_eager_mode() ordered_map_size_eager(; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) else ordered_map_size_graph(; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) @@ -13240,7 +13240,7 @@ begin return res[1] end function stateless_random_uniform(shape_, seed_; name=nothing, dtype=nothing) - if tf.eager_mode + if tf.in_eager_mode() stateless_random_uniform_eager(shape_, seed_; name=name, dtype=dtype) else stateless_random_uniform_graph(shape_, seed_; name=name, dtype=dtype) @@ -13314,7 +13314,7 @@ begin return res end function sparse_to_sparse_set_operation(set1_indices_, set1_values_, set1_shape_, set2_indices_, set2_values_, set2_shape_; name=nothing, set_operation=nothing, validate_indices=nothing) - if tf.eager_mode + if tf.in_eager_mode() sparse_to_sparse_set_operation_eager(set1_indices_, set1_values_, set1_shape_, set2_indices_, set2_values_, set2_shape_; name=name, set_operation=set_operation, validate_indices=validate_indices) else sparse_to_sparse_set_operation_graph(set1_indices_, set1_values_, set1_shape_, set2_indices_, set2_values_, set2_shape_; name=name, set_operation=set_operation, validate_indices=validate_indices) @@ -13368,7 +13368,7 @@ begin return res[1] end function tensor_summary(tensor_; name=nothing, description=nothing, labels=nothing, display_name=nothing) - if tf.eager_mode + if tf.in_eager_mode() tensor_summary_eager(tensor_; name=name, description=description, labels=labels, display_name=display_name) else tensor_summary_graph(tensor_; name=name, description=description, labels=labels, display_name=display_name) @@ -13420,7 +13420,7 @@ begin return res[1] end function remote_fused_graph_execute(inputs_; name=nothing, Tinputs=nothing, Toutputs=nothing, serialized_remote_fused_graph_execute_info=nothing) - if tf.eager_mode + if tf.in_eager_mode() remote_fused_graph_execute_eager(inputs_; name=name, Tinputs=Tinputs, Toutputs=Toutputs, serialized_remote_fused_graph_execute_info=serialized_remote_fused_graph_execute_info) else remote_fused_graph_execute_graph(inputs_; name=name, Tinputs=Tinputs, Toutputs=Toutputs, serialized_remote_fused_graph_execute_info=serialized_remote_fused_graph_execute_info) @@ -13468,7 +13468,7 @@ begin return res[1] end function sparse_slice_grad(backprop_val_grad_, input_indices_, input_start_, output_indices_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() sparse_slice_grad_eager(backprop_val_grad_, input_indices_, input_start_, output_indices_; name=name) else sparse_slice_grad_graph(backprop_val_grad_, input_indices_, input_start_, output_indices_; name=name) @@ -13523,7 +13523,7 @@ begin return res[1] end function cumsum(x_, axis_; name=nothing, exclusive=nothing, reverse=nothing) - if tf.eager_mode + if tf.in_eager_mode() cumsum_eager(x_, axis_; name=name, exclusive=exclusive, reverse=reverse) else cumsum_graph(x_, axis_; name=name, exclusive=exclusive, reverse=reverse) @@ -13596,7 +13596,7 @@ begin return res end function batch_norm_with_global_normalization_grad(t_, m_, v_, gamma_, backprop_; name=nothing, variance_epsilon=nothing, scale_after_normalization=nothing) - if tf.eager_mode + if tf.in_eager_mode() batch_norm_with_global_normalization_grad_eager(t_, m_, v_, gamma_, backprop_; name=name, variance_epsilon=variance_epsilon, scale_after_normalization=scale_after_normalization) else batch_norm_with_global_normalization_grad_graph(t_, m_, v_, gamma_, backprop_; name=name, variance_epsilon=variance_epsilon, scale_after_normalization=scale_after_normalization) @@ -13660,7 +13660,7 @@ begin return res[1] end function avg_pool_grad(orig_input_shape_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) - if tf.eager_mode + if tf.in_eager_mode() avg_pool_grad_eager(orig_input_shape_, grad_; name=name, ksize=ksize, strides=strides, padding=padding, data_format=data_format) else avg_pool_grad_graph(orig_input_shape_, grad_; name=name, ksize=ksize, strides=strides, padding=padding, data_format=data_format) @@ -13708,7 +13708,7 @@ begin return res[1] end function restore_v2(prefix_, tensor_names_, shape_and_slices_; name=nothing, dtypes=nothing) - if tf.eager_mode + if tf.in_eager_mode() restore_v2_eager(prefix_, tensor_names_, shape_and_slices_; name=name, dtypes=dtypes) else restore_v2_graph(prefix_, tensor_names_, shape_and_slices_; name=name, dtypes=dtypes) @@ -13744,7 +13744,7 @@ begin return res[1] end function relu6(features_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() relu6_eager(features_; name=name) else relu6_graph(features_; name=name) @@ -13828,7 +13828,7 @@ begin return res[1] end function sparse_apply_rms_prop(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) - if tf.eager_mode + if tf.in_eager_mode() sparse_apply_rms_prop_eager(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=name, use_locking=use_locking) else sparse_apply_rms_prop_graph(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=name, use_locking=use_locking) @@ -13894,7 +13894,7 @@ begin return res[1] end function _recv(; name=nothing, tensor_type=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing) - if tf.eager_mode + if tf.in_eager_mode() _recv_eager(; name=name, tensor_type=tensor_type, tensor_name=tensor_name, send_device=send_device, send_device_incarnation=send_device_incarnation, recv_device=recv_device, client_terminated=client_terminated) else _recv_graph(; name=name, tensor_type=tensor_type, tensor_name=tensor_name, send_device=send_device, send_device_incarnation=send_device_incarnation, recv_device=recv_device, client_terminated=client_terminated) @@ -13954,7 +13954,7 @@ begin return res[1] end function max_pool(input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) - if tf.eager_mode + if tf.in_eager_mode() max_pool_eager(input_; name=name, ksize=ksize, strides=strides, padding=padding, data_format=data_format) else max_pool_graph(input_; name=name, ksize=ksize, strides=strides, padding=padding, data_format=data_format) @@ -13990,7 +13990,7 @@ begin return res[1] end function invert(x_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() invert_eager(x_; name=name) else invert_graph(x_; name=name) @@ -14032,7 +14032,7 @@ begin return res[1] end function _unary_ops_composition(x_; name=nothing, op_names=nothing) - if tf.eager_mode + if tf.in_eager_mode() _unary_ops_composition_eager(x_; name=name, op_names=op_names) else _unary_ops_composition_graph(x_; name=name, op_names=op_names) @@ -14106,7 +14106,7 @@ begin return res[1] end function experimental_map_dataset(input_dataset_, other_arguments_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing, preserve_cardinality=nothing) - if tf.eager_mode + if tf.in_eager_mode() experimental_map_dataset_eager(input_dataset_, other_arguments_; name=name, f=f, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes, use_inter_op_parallelism=use_inter_op_parallelism, preserve_cardinality=preserve_cardinality) else experimental_map_dataset_graph(input_dataset_, other_arguments_; name=name, f=f, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes, use_inter_op_parallelism=use_inter_op_parallelism, preserve_cardinality=preserve_cardinality) @@ -14172,7 +14172,7 @@ begin return res[1] end function load_tpu_embedding_adam_parameters(parameters_, momenta_, velocities_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - if tf.eager_mode + if tf.in_eager_mode() load_tpu_embedding_adam_parameters_eager(parameters_, momenta_, velocities_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) else load_tpu_embedding_adam_parameters_graph(parameters_, momenta_, velocities_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) @@ -14212,7 +14212,7 @@ begin return res[1] end function parse_tensor(serialized_; name=nothing, out_type=nothing) - if tf.eager_mode + if tf.in_eager_mode() parse_tensor_eager(serialized_; name=name, out_type=out_type) else parse_tensor_graph(serialized_; name=name, out_type=out_type) @@ -14266,7 +14266,7 @@ begin return res[1] end function experimental_materialized_index_dataset_handle(; name=nothing, container=nothing, shared_name=nothing, output_types=nothing, output_shapes=nothing) - if tf.eager_mode + if tf.in_eager_mode() experimental_materialized_index_dataset_handle_eager(; name=name, container=container, shared_name=shared_name, output_types=output_types, output_shapes=output_shapes) else experimental_materialized_index_dataset_handle_graph(; name=name, container=container, shared_name=shared_name, output_types=output_types, output_shapes=output_shapes) @@ -14320,7 +14320,7 @@ begin return res[1] end function multi_device_iterator_get_next_from_shard(multi_device_iterator_, shard_num_, incarnation_id_; name=nothing, output_types=nothing, output_shapes=nothing) - if tf.eager_mode + if tf.in_eager_mode() multi_device_iterator_get_next_from_shard_eager(multi_device_iterator_, shard_num_, incarnation_id_; name=name, output_types=output_types, output_shapes=output_shapes) else multi_device_iterator_get_next_from_shard_graph(multi_device_iterator_, shard_num_, incarnation_id_; name=name, output_types=output_types, output_shapes=output_shapes) @@ -14379,7 +14379,7 @@ begin return res[1] end function random_uniform_int(shape_, minval_, maxval_; name=nothing, seed=nothing, seed2=nothing) - if tf.eager_mode + if tf.in_eager_mode() random_uniform_int_eager(shape_, minval_, maxval_; name=name, seed=seed, seed2=seed2) else random_uniform_int_graph(shape_, minval_, maxval_; name=name, seed=seed, seed2=seed2) @@ -14426,7 +14426,7 @@ begin return res end function sparse_softmax_cross_entropy_with_logits(features_, labels_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() sparse_softmax_cross_entropy_with_logits_eager(features_, labels_; name=name) else sparse_softmax_cross_entropy_with_logits_graph(features_, labels_; name=name) @@ -14474,7 +14474,7 @@ begin return res[1] end function tensor_array_read_v2(handle_, index_, flow_in_; name=nothing, dtype=nothing) - if tf.eager_mode + if tf.in_eager_mode() tensor_array_read_v2_eager(handle_, index_, flow_in_; name=name, dtype=dtype) else tensor_array_read_v2_graph(handle_, index_, flow_in_; name=name, dtype=dtype) @@ -14521,7 +14521,7 @@ begin return res end function reader_read_up_to(reader_handle_, queue_handle_, num_records_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() reader_read_up_to_eager(reader_handle_, queue_handle_, num_records_; name=name) else reader_read_up_to_graph(reader_handle_, queue_handle_, num_records_; name=name) @@ -14583,7 +14583,7 @@ begin return res[1] end function encode_proto(sizes_, values_; name=nothing, field_names=nothing, message_type=nothing, descriptor_source=nothing, Tinput_types=nothing) - if tf.eager_mode + if tf.in_eager_mode() encode_proto_eager(sizes_, values_; name=name, field_names=field_names, message_type=message_type, descriptor_source=descriptor_source, Tinput_types=Tinput_types) else encode_proto_graph(sizes_, values_; name=name, field_names=field_names, message_type=message_type, descriptor_source=descriptor_source, Tinput_types=Tinput_types) @@ -14709,7 +14709,7 @@ begin return res[1] end function strided_slice_grad(shape_, begin_, end_, strides_, dy_; name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing) - if tf.eager_mode + if tf.in_eager_mode() strided_slice_grad_eager(shape_, begin_, end_, strides_, dy_; name=name, Index=Index, begin_mask=begin_mask, end_mask=end_mask, ellipsis_mask=ellipsis_mask, new_axis_mask=new_axis_mask, shrink_axis_mask=shrink_axis_mask) else strided_slice_grad_graph(shape_, begin_, end_, strides_, dy_; name=name, Index=Index, begin_mask=begin_mask, end_mask=end_mask, ellipsis_mask=ellipsis_mask, new_axis_mask=new_axis_mask, shrink_axis_mask=shrink_axis_mask) @@ -14763,7 +14763,7 @@ begin return res[1] end function _nccl_reduce_send(input_; name=nothing, reduction=nothing, num_devices=nothing, shared_name=nothing) - if tf.eager_mode + if tf.in_eager_mode() _nccl_reduce_send_eager(input_; name=name, reduction=reduction, num_devices=num_devices, shared_name=shared_name) else _nccl_reduce_send_graph(input_; name=name, reduction=reduction, num_devices=num_devices, shared_name=shared_name) @@ -14827,7 +14827,7 @@ begin return res[1] end function padded_batch_dataset(input_dataset_, batch_size_, padded_shapes_, padding_values_; name=nothing, Toutput_types=nothing, output_shapes=nothing, N=nothing) - if tf.eager_mode + if tf.in_eager_mode() padded_batch_dataset_eager(input_dataset_, batch_size_, padded_shapes_, padding_values_; name=name, Toutput_types=Toutput_types, output_shapes=output_shapes, N=N) else padded_batch_dataset_graph(input_dataset_, batch_size_, padded_shapes_, padding_values_; name=name, Toutput_types=Toutput_types, output_shapes=output_shapes, N=N) @@ -14875,7 +14875,7 @@ begin return res[1] end function data_format_vec_permute(x_; name=nothing, src_format=nothing, dst_format=nothing) - if tf.eager_mode + if tf.in_eager_mode() data_format_vec_permute_eager(x_; name=name, src_format=src_format, dst_format=dst_format) else data_format_vec_permute_graph(x_; name=name, src_format=src_format, dst_format=dst_format) @@ -14933,7 +14933,7 @@ begin return res[1] end function string_format(inputs_; name=nothing, T=nothing, template=nothing, placeholder=nothing, summarize=nothing) - if tf.eager_mode + if tf.in_eager_mode() string_format_eager(inputs_; name=name, T=T, template=template, placeholder=placeholder, summarize=summarize) else string_format_graph(inputs_; name=name, T=T, template=template, placeholder=placeholder, summarize=summarize) @@ -14999,7 +14999,7 @@ begin return res[1] end function as_string(input_; name=nothing, precision=nothing, scientific=nothing, shortest=nothing, width=nothing, fill=nothing) - if tf.eager_mode + if tf.in_eager_mode() as_string_eager(input_; name=name, precision=precision, scientific=scientific, shortest=shortest, width=width, fill=fill) else as_string_graph(input_; name=name, precision=precision, scientific=scientific, shortest=shortest, width=width, fill=fill) @@ -15049,7 +15049,7 @@ begin return res[1] end function queue_enqueue_many(handle_, components_; name=nothing, Tcomponents=nothing, timeout_ms=nothing) - if tf.eager_mode + if tf.in_eager_mode() queue_enqueue_many_eager(handle_, components_; name=name, Tcomponents=Tcomponents, timeout_ms=timeout_ms) else queue_enqueue_many_graph(handle_, components_; name=name, Tcomponents=Tcomponents, timeout_ms=timeout_ms) @@ -15091,7 +15091,7 @@ begin return res[1] end function fake_param(; name=nothing, dtype=nothing, shape=nothing) - if tf.eager_mode + if tf.in_eager_mode() fake_param_eager(; name=name, dtype=dtype, shape=shape) else fake_param_graph(; name=name, dtype=dtype, shape=shape) @@ -15154,7 +15154,7 @@ begin return res[1] end function apply_adagrad(var_, accum_, lr_, grad_; name=nothing, use_locking=nothing, update_slots=nothing) - if tf.eager_mode + if tf.in_eager_mode() apply_adagrad_eager(var_, accum_, lr_, grad_; name=name, use_locking=use_locking, update_slots=update_slots) else apply_adagrad_graph(var_, accum_, lr_, grad_; name=name, use_locking=use_locking, update_slots=update_slots) @@ -15188,7 +15188,7 @@ begin return res[1] end function experimental_iterator_get_device(resource_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() experimental_iterator_get_device_eager(resource_; name=name) else experimental_iterator_get_device_graph(resource_; name=name) @@ -15236,7 +15236,7 @@ begin return res[1] end function adjust_contrast(images_, contrast_factor_, min_value_, max_value_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() adjust_contrast_eager(images_, contrast_factor_, min_value_, max_value_; name=name) else adjust_contrast_graph(images_, contrast_factor_, min_value_, max_value_; name=name) @@ -15296,7 +15296,7 @@ begin return res[1] end function extract_image_patches(images_; name=nothing, ksizes=nothing, strides=nothing, rates=nothing, padding=nothing) - if tf.eager_mode + if tf.in_eager_mode() extract_image_patches_eager(images_; name=name, ksizes=ksizes, strides=strides, rates=rates, padding=padding) else extract_image_patches_graph(images_; name=name, ksizes=ksizes, strides=strides, rates=rates, padding=padding) @@ -15350,7 +15350,7 @@ begin return res[1] end function scale_and_translate(images_, size_, scale_, translation_; name=nothing, kernel_type=nothing) - if tf.eager_mode + if tf.in_eager_mode() scale_and_translate_eager(images_, size_, scale_, translation_; name=name, kernel_type=kernel_type) else scale_and_translate_graph(images_, size_, scale_, translation_; name=name, kernel_type=kernel_type) @@ -15381,7 +15381,7 @@ begin return res[1] end function optional_none(; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() optional_none_eager(; name=name) else optional_none_graph(; name=name) @@ -15435,7 +15435,7 @@ begin return res[1] end function variable_v2(; name=nothing, shape=nothing, dtype=nothing, container=nothing, shared_name=nothing) - if tf.eager_mode + if tf.in_eager_mode() variable_v2_eager(; name=name, shape=shape, dtype=dtype, container=container, shared_name=shared_name) else variable_v2_graph(; name=name, shape=shape, dtype=dtype, container=container, shared_name=shared_name) @@ -15471,7 +15471,7 @@ begin return res[1] end function elu(features_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() elu_eager(features_; name=name) else elu_graph(features_; name=name) @@ -15525,7 +15525,7 @@ begin return res[1] end function scatter_update(ref_, indices_, updates_; name=nothing, use_locking=nothing) - if tf.eager_mode + if tf.in_eager_mode() scatter_update_eager(ref_, indices_, updates_; name=name, use_locking=use_locking) else scatter_update_graph(ref_, indices_, updates_; name=name, use_locking=use_locking) @@ -15566,7 +15566,7 @@ begin return res[1] end function floor_mod(x_, y_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() floor_mod_eager(x_, y_; name=name) else floor_mod_graph(x_, y_; name=name) @@ -15612,7 +15612,7 @@ begin return res[1] end function experimental_ignore_errors_dataset(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) - if tf.eager_mode + if tf.in_eager_mode() experimental_ignore_errors_dataset_eager(input_dataset_; name=name, output_types=output_types, output_shapes=output_shapes) else experimental_ignore_errors_dataset_graph(input_dataset_; name=name, output_types=output_types, output_shapes=output_shapes) @@ -15670,7 +15670,7 @@ begin return res[1] end function experimental_set_stats_aggregator_dataset(input_dataset_, stats_aggregator_, tag_, counter_prefix_; name=nothing, output_types=nothing, output_shapes=nothing) - if tf.eager_mode + if tf.in_eager_mode() experimental_set_stats_aggregator_dataset_eager(input_dataset_, stats_aggregator_, tag_, counter_prefix_; name=name, output_types=output_types, output_shapes=output_shapes) else experimental_set_stats_aggregator_dataset_graph(input_dataset_, stats_aggregator_, tag_, counter_prefix_; name=name, output_types=output_types, output_shapes=output_shapes) @@ -15731,7 +15731,7 @@ begin return res end function compute_accidental_hits(true_classes_, sampled_candidates_; name=nothing, num_true=nothing, seed=nothing, seed2=nothing) - if tf.eager_mode + if tf.in_eager_mode() compute_accidental_hits_eager(true_classes_, sampled_candidates_; name=name, num_true=num_true, seed=seed, seed2=seed2) else compute_accidental_hits_graph(true_classes_, sampled_candidates_; name=name, num_true=num_true, seed=seed, seed2=seed2) @@ -15771,7 +15771,7 @@ begin return res[1] end function string_to_number(string_tensor_; name=nothing, out_type=nothing) - if tf.eager_mode + if tf.in_eager_mode() string_to_number_eager(string_tensor_; name=name, out_type=out_type) else string_to_number_graph(string_tensor_; name=name, out_type=out_type) @@ -15807,7 +15807,7 @@ begin return res[1] end function snapshot(input_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() snapshot_eager(input_; name=name) else snapshot_graph(input_; name=name) @@ -15845,7 +15845,7 @@ begin return res[1] end function deserialize_iterator(resource_handle_, serialized_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() deserialize_iterator_eager(resource_handle_, serialized_; name=name) else deserialize_iterator_graph(resource_handle_, serialized_; name=name) @@ -15881,7 +15881,7 @@ begin return res[1] end function atan(x_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() atan_eager(x_; name=name) else atan_graph(x_; name=name) @@ -15934,7 +15934,7 @@ begin return res[1] end function mat_mul(a_, b_; name=nothing, transpose_a=nothing, transpose_b=nothing) - if tf.eager_mode + if tf.in_eager_mode() mat_mul_eager(a_, b_; name=name, transpose_a=transpose_a, transpose_b=transpose_b) else mat_mul_graph(a_, b_; name=name, transpose_a=transpose_a, transpose_b=transpose_b) @@ -15970,7 +15970,7 @@ begin return res[1] end function erfc(x_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() erfc_eager(x_; name=name) else erfc_graph(x_; name=name) @@ -16011,7 +16011,7 @@ begin return res[1] end function sigmoid_grad(y_, dy_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() sigmoid_grad_eager(y_, dy_; name=name) else sigmoid_grad_graph(y_, dy_; name=name) @@ -16083,7 +16083,7 @@ begin return res[1] end function fixed_length_record_reader_v2(; name=nothing, header_bytes=nothing, record_bytes=nothing, footer_bytes=nothing, hop_bytes=nothing, container=nothing, shared_name=nothing, encoding=nothing) - if tf.eager_mode + if tf.in_eager_mode() fixed_length_record_reader_v2_eager(; name=name, header_bytes=header_bytes, record_bytes=record_bytes, footer_bytes=footer_bytes, hop_bytes=hop_bytes, container=container, shared_name=shared_name, encoding=encoding) else fixed_length_record_reader_v2_graph(; name=name, header_bytes=header_bytes, record_bytes=record_bytes, footer_bytes=footer_bytes, hop_bytes=hop_bytes, container=container, shared_name=shared_name, encoding=encoding) @@ -16136,7 +16136,7 @@ begin return res[1] end function non_max_suppression_v3(boxes_, scores_, max_output_size_, iou_threshold_, score_threshold_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() non_max_suppression_v3_eager(boxes_, scores_, max_output_size_, iou_threshold_, score_threshold_; name=name) else non_max_suppression_v3_graph(boxes_, scores_, max_output_size_, iou_threshold_, score_threshold_; name=name) @@ -16200,7 +16200,7 @@ begin return res[1] end function dilation2d_backprop_input(input_, filter_, out_backprop_; name=nothing, strides=nothing, rates=nothing, padding=nothing) - if tf.eager_mode + if tf.in_eager_mode() dilation2d_backprop_input_eager(input_, filter_, out_backprop_; name=name, strides=strides, rates=rates, padding=padding) else dilation2d_backprop_input_graph(input_, filter_, out_backprop_; name=name, strides=strides, rates=rates, padding=padding) @@ -16238,7 +16238,7 @@ begin return res[1] end function logical_or(x_, y_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() logical_or_eager(x_, y_; name=name) else logical_or_graph(x_, y_; name=name) @@ -16307,7 +16307,7 @@ begin return res[1] end function resource_apply_adadelta(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_; name=nothing, use_locking=nothing) - if tf.eager_mode + if tf.in_eager_mode() resource_apply_adadelta_eager(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_; name=name, use_locking=use_locking) else resource_apply_adadelta_graph(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_; name=name, use_locking=use_locking) @@ -16373,7 +16373,7 @@ begin return res end function dense_to_sparse_set_operation(set1_, set2_indices_, set2_values_, set2_shape_; name=nothing, set_operation=nothing, validate_indices=nothing) - if tf.eager_mode + if tf.in_eager_mode() dense_to_sparse_set_operation_eager(set1_, set2_indices_, set2_values_, set2_shape_; name=name, set_operation=set_operation, validate_indices=validate_indices) else dense_to_sparse_set_operation_graph(set1_, set2_indices_, set2_values_, set2_shape_; name=name, set_operation=set_operation, validate_indices=validate_indices) @@ -16407,7 +16407,7 @@ begin return res[1] end function reader_num_records_produced(reader_handle_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() reader_num_records_produced_eager(reader_handle_; name=name) else reader_num_records_produced_graph(reader_handle_; name=name) @@ -16447,7 +16447,7 @@ begin return res[1] end function adjust_hue(images_, delta_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() adjust_hue_eager(images_, delta_; name=name) else adjust_hue_graph(images_, delta_; name=name) @@ -16491,7 +16491,7 @@ begin return res[1] end function boosted_trees_quantile_stream_resource_flush(quantile_stream_resource_handle_, num_buckets_; name=nothing, generate_quantiles=nothing) - if tf.eager_mode + if tf.in_eager_mode() boosted_trees_quantile_stream_resource_flush_eager(quantile_stream_resource_handle_, num_buckets_; name=name, generate_quantiles=generate_quantiles) else boosted_trees_quantile_stream_resource_flush_graph(quantile_stream_resource_handle_, num_buckets_; name=name, generate_quantiles=generate_quantiles) @@ -16571,7 +16571,7 @@ begin return res[1] end function experimental_map_and_batch_dataset(input_dataset_, other_arguments_, batch_size_, num_parallel_calls_, drop_remainder_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, preserve_cardinality=nothing) - if tf.eager_mode + if tf.in_eager_mode() experimental_map_and_batch_dataset_eager(input_dataset_, other_arguments_, batch_size_, num_parallel_calls_, drop_remainder_; name=name, f=f, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes, preserve_cardinality=preserve_cardinality) else experimental_map_and_batch_dataset_graph(input_dataset_, other_arguments_, batch_size_, num_parallel_calls_, drop_remainder_; name=name, f=f, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes, preserve_cardinality=preserve_cardinality) @@ -16612,7 +16612,7 @@ begin return res[1] end function real_div(x_, y_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() real_div_eager(x_, y_; name=name) else real_div_graph(x_, y_; name=name) @@ -16666,7 +16666,7 @@ begin return res[1] end function restore_slice(file_pattern_, tensor_name_, shape_and_slice_; name=nothing, dt=nothing, preferred_shard=nothing) - if tf.eager_mode + if tf.in_eager_mode() restore_slice_eager(file_pattern_, tensor_name_, shape_and_slice_; name=name, dt=dt, preferred_shard=preferred_shard) else restore_slice_graph(file_pattern_, tensor_name_, shape_and_slice_; name=name, dt=dt, preferred_shard=preferred_shard) @@ -16706,7 +16706,7 @@ begin return res[1] end function stack_pop_v2(handle_; name=nothing, elem_type=nothing) - if tf.eager_mode + if tf.in_eager_mode() stack_pop_v2_eager(handle_; name=name, elem_type=elem_type) else stack_pop_v2_graph(handle_; name=name, elem_type=elem_type) @@ -16746,7 +16746,7 @@ begin return res[1] end function reverse(tensor_, dims_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() reverse_eager(tensor_, dims_; name=name) else reverse_graph(tensor_, dims_; name=name) @@ -16792,7 +16792,7 @@ begin return res[1] end function decode_png(contents_; name=nothing, channels=nothing, dtype=nothing) - if tf.eager_mode + if tf.in_eager_mode() decode_png_eager(contents_; name=name, channels=channels, dtype=dtype) else decode_png_graph(contents_; name=name, channels=channels, dtype=dtype) @@ -16841,7 +16841,7 @@ begin return res[1] end function non_max_suppression_v2(boxes_, scores_, max_output_size_, iou_threshold_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() non_max_suppression_v2_eager(boxes_, scores_, max_output_size_, iou_threshold_; name=name) else non_max_suppression_v2_graph(boxes_, scores_, max_output_size_, iou_threshold_; name=name) @@ -16882,7 +16882,7 @@ begin return res[1] end function igamma(a_, x_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() igamma_eager(a_, x_; name=name) else igamma_graph(a_, x_; name=name) @@ -16918,7 +16918,7 @@ begin return res[1] end function digamma(x_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() digamma_eager(x_; name=name) else digamma_graph(x_; name=name) @@ -16997,7 +16997,7 @@ begin return res[1] end function resource_apply_ada_max(var_, m_, v_, beta1_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing) - if tf.eager_mode + if tf.in_eager_mode() resource_apply_ada_max_eager(var_, m_, v_, beta1_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=name, use_locking=use_locking) else resource_apply_ada_max_graph(var_, m_, v_, beta1_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=name, use_locking=use_locking) @@ -17045,7 +17045,7 @@ begin return res[1] end function space_to_depth(input_; name=nothing, block_size=nothing, data_format=nothing) - if tf.eager_mode + if tf.in_eager_mode() space_to_depth_eager(input_; name=name, block_size=block_size, data_format=data_format) else space_to_depth_graph(input_; name=name, block_size=block_size, data_format=data_format) @@ -17086,7 +17086,7 @@ begin return res[1] end function sqrt_grad(y_, dy_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() sqrt_grad_eager(y_, dy_; name=name) else sqrt_grad_graph(y_, dy_; name=name) @@ -17154,7 +17154,7 @@ begin return res[1] end function map_unstage(key_, indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) - if tf.eager_mode + if tf.in_eager_mode() map_unstage_eager(key_, indices_; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) else map_unstage_graph(key_, indices_; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) @@ -17201,7 +17201,7 @@ begin return res end function qr(input_; name=nothing, full_matrices=nothing) - if tf.eager_mode + if tf.in_eager_mode() qr_eager(input_; name=name, full_matrices=full_matrices) else qr_graph(input_; name=name, full_matrices=full_matrices) @@ -17272,7 +17272,7 @@ begin return res end function boosted_trees_calculate_best_gains_per_feature(node_id_range_, stats_summary_list_, l1_, l2_, tree_complexity_, min_node_weight_; name=nothing, max_splits=nothing, num_features=nothing) - if tf.eager_mode + if tf.in_eager_mode() boosted_trees_calculate_best_gains_per_feature_eager(node_id_range_, stats_summary_list_, l1_, l2_, tree_complexity_, min_node_weight_; name=name, max_splits=max_splits, num_features=num_features) else boosted_trees_calculate_best_gains_per_feature_graph(node_id_range_, stats_summary_list_, l1_, l2_, tree_complexity_, min_node_weight_; name=name, max_splits=max_splits, num_features=num_features) @@ -17333,7 +17333,7 @@ begin return res[1] end function unbatch_grad(original_input_, batch_index_, grad_, id_; name=nothing, container=nothing, shared_name=nothing) - if tf.eager_mode + if tf.in_eager_mode() unbatch_grad_eager(original_input_, batch_index_, grad_, id_; name=name, container=container, shared_name=shared_name) else unbatch_grad_graph(original_input_, batch_index_, grad_, id_; name=name, container=container, shared_name=shared_name) @@ -17369,7 +17369,7 @@ begin return res[1] end function log_softmax(logits_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() log_softmax_eager(logits_; name=name) else log_softmax_graph(logits_; name=name) @@ -17409,7 +17409,7 @@ begin return res[1] end function resource_count_up_to(resource_; name=nothing, limit=nothing) - if tf.eager_mode + if tf.in_eager_mode() resource_count_up_to_eager(resource_; name=name, limit=limit) else resource_count_up_to_graph(resource_; name=name, limit=limit) @@ -17457,7 +17457,7 @@ begin return res[1] end function accumulate_nv2(inputs_; name=nothing, N=nothing, shape=nothing) - if tf.eager_mode + if tf.in_eager_mode() accumulate_nv2_eager(inputs_; name=name, N=N, shape=shape) else accumulate_nv2_graph(inputs_; name=name, N=N, shape=shape) @@ -17541,7 +17541,7 @@ begin return res[1] end function parallel_map_dataset(input_dataset_, other_arguments_, num_parallel_calls_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing, sloppy=nothing, preserve_cardinality=nothing) - if tf.eager_mode + if tf.in_eager_mode() parallel_map_dataset_eager(input_dataset_, other_arguments_, num_parallel_calls_; name=name, f=f, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes, use_inter_op_parallelism=use_inter_op_parallelism, sloppy=sloppy, preserve_cardinality=preserve_cardinality) else parallel_map_dataset_graph(input_dataset_, other_arguments_, num_parallel_calls_; name=name, f=f, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes, use_inter_op_parallelism=use_inter_op_parallelism, sloppy=sloppy, preserve_cardinality=preserve_cardinality) @@ -17595,7 +17595,7 @@ begin return res[1] end function random_uniform(shape_; name=nothing, seed=nothing, seed2=nothing, dtype=nothing) - if tf.eager_mode + if tf.in_eager_mode() random_uniform_eager(shape_; name=name, seed=seed, seed2=seed2, dtype=dtype) else random_uniform_graph(shape_; name=name, seed=seed, seed2=seed2, dtype=dtype) @@ -17659,7 +17659,7 @@ begin return res[1] end function unicode_transcode(input_; name=nothing, input_encoding=nothing, output_encoding=nothing, errors=nothing, replacement_char=nothing, replace_control_characters=nothing) - if tf.eager_mode + if tf.in_eager_mode() unicode_transcode_eager(input_; name=name, input_encoding=input_encoding, output_encoding=output_encoding, errors=errors, replacement_char=replacement_char, replace_control_characters=replace_control_characters) else unicode_transcode_graph(input_; name=name, input_encoding=input_encoding, output_encoding=output_encoding, errors=errors, replacement_char=replacement_char, replace_control_characters=replace_control_characters) @@ -17693,7 +17693,7 @@ begin return res[1] end function reader_reset(reader_handle_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() reader_reset_eager(reader_handle_; name=name) else reader_reset_graph(reader_handle_; name=name) @@ -17741,7 +17741,7 @@ begin return res[1] end function _nccl_broadcast_send(input_; name=nothing, num_devices=nothing, shared_name=nothing) - if tf.eager_mode + if tf.in_eager_mode() _nccl_broadcast_send_eager(input_; name=name, num_devices=num_devices, shared_name=shared_name) else _nccl_broadcast_send_graph(input_; name=name, num_devices=num_devices, shared_name=shared_name) @@ -17777,7 +17777,7 @@ begin return res[1] end function batch_matrix_determinant(input_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() batch_matrix_determinant_eager(input_; name=name) else batch_matrix_determinant_graph(input_; name=name) @@ -17818,7 +17818,7 @@ begin return res[1] end function less_equal(x_, y_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() less_equal_eager(x_, y_; name=name) else less_equal_graph(x_, y_; name=name) @@ -17870,7 +17870,7 @@ begin return res[1] end function apply_gradient_descent(var_, alpha_, delta_; name=nothing, use_locking=nothing) - if tf.eager_mode + if tf.in_eager_mode() apply_gradient_descent_eager(var_, alpha_, delta_; name=name, use_locking=use_locking) else apply_gradient_descent_graph(var_, alpha_, delta_; name=name, use_locking=use_locking) @@ -17917,7 +17917,7 @@ begin return res[1] end function sparse_segment_sqrt_n(data_, indices_, segment_ids_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() sparse_segment_sqrt_n_eager(data_, indices_, segment_ids_; name=name) else sparse_segment_sqrt_n_graph(data_, indices_, segment_ids_; name=name) @@ -17953,7 +17953,7 @@ begin return res[1] end function matrix_logarithm(input_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() matrix_logarithm_eager(input_; name=name) else matrix_logarithm_graph(input_; name=name) @@ -18007,7 +18007,7 @@ begin return res[1] end function scatter_mul(ref_, indices_, updates_; name=nothing, use_locking=nothing) - if tf.eager_mode + if tf.in_eager_mode() scatter_mul_eager(ref_, indices_, updates_; name=name, use_locking=use_locking) else scatter_mul_graph(ref_, indices_, updates_; name=name, use_locking=use_locking) @@ -18077,7 +18077,7 @@ begin return res[1] end function decode_jpeg(contents_; name=nothing, channels=nothing, ratio=nothing, fancy_upscaling=nothing, try_recover_truncated=nothing, acceptable_fraction=nothing, dct_method=nothing) - if tf.eager_mode + if tf.in_eager_mode() decode_jpeg_eager(contents_; name=name, channels=channels, ratio=ratio, fancy_upscaling=fancy_upscaling, try_recover_truncated=try_recover_truncated, acceptable_fraction=acceptable_fraction, dct_method=dct_method) else decode_jpeg_graph(contents_; name=name, channels=channels, ratio=ratio, fancy_upscaling=fancy_upscaling, try_recover_truncated=try_recover_truncated, acceptable_fraction=acceptable_fraction, dct_method=dct_method) @@ -18155,7 +18155,7 @@ begin return res[1] end function random_shuffle_queue_v2(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, min_after_dequeue=nothing, seed=nothing, seed2=nothing, container=nothing, shared_name=nothing) - if tf.eager_mode + if tf.in_eager_mode() random_shuffle_queue_v2_eager(; name=name, component_types=component_types, shapes=shapes, capacity=capacity, min_after_dequeue=min_after_dequeue, seed=seed, seed2=seed2, container=container, shared_name=shared_name) else random_shuffle_queue_v2_graph(; name=name, component_types=component_types, shapes=shapes, capacity=capacity, min_after_dequeue=min_after_dequeue, seed=seed, seed2=seed2, container=container, shared_name=shared_name) @@ -18205,7 +18205,7 @@ begin return res[1] end function queue_enqueue_many_v2(handle_, components_; name=nothing, Tcomponents=nothing, timeout_ms=nothing) - if tf.eager_mode + if tf.in_eager_mode() queue_enqueue_many_v2_eager(handle_, components_; name=name, Tcomponents=Tcomponents, timeout_ms=timeout_ms) else queue_enqueue_many_v2_graph(handle_, components_; name=name, Tcomponents=Tcomponents, timeout_ms=timeout_ms) @@ -18290,7 +18290,7 @@ begin return res[1] end function resource_sparse_apply_centered_rms_prop(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) - if tf.eager_mode + if tf.in_eager_mode() resource_sparse_apply_centered_rms_prop_eager(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=name, use_locking=use_locking) else resource_sparse_apply_centered_rms_prop_graph(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=name, use_locking=use_locking) @@ -18360,7 +18360,7 @@ begin return res[1] end function interleave_dataset(input_dataset_, other_arguments_, cycle_length_, block_length_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) - if tf.eager_mode + if tf.in_eager_mode() interleave_dataset_eager(input_dataset_, other_arguments_, cycle_length_, block_length_; name=name, f=f, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes) else interleave_dataset_graph(input_dataset_, other_arguments_, cycle_length_, block_length_; name=name, f=f, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes) @@ -18400,7 +18400,7 @@ begin return res[1] end function stack_pop(handle_; name=nothing, elem_type=nothing) - if tf.eager_mode + if tf.in_eager_mode() stack_pop_eager(handle_; name=name, elem_type=elem_type) else stack_pop_graph(handle_; name=name, elem_type=elem_type) @@ -18456,7 +18456,7 @@ begin return res[1] end function max_pool_v2(input_, ksize_, strides_; name=nothing, padding=nothing, data_format=nothing) - if tf.eager_mode + if tf.in_eager_mode() max_pool_v2_eager(input_, ksize_, strides_; name=name, padding=padding, data_format=data_format) else max_pool_v2_graph(input_, ksize_, strides_; name=name, padding=padding, data_format=data_format) @@ -18498,7 +18498,7 @@ begin return res[1] end function boosted_trees_deserialize_ensemble(tree_ensemble_handle_, stamp_token_, tree_ensemble_serialized_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() boosted_trees_deserialize_ensemble_eager(tree_ensemble_handle_, stamp_token_, tree_ensemble_serialized_; name=name) else boosted_trees_deserialize_ensemble_graph(tree_ensemble_handle_, stamp_token_, tree_ensemble_serialized_; name=name) @@ -18566,7 +18566,7 @@ begin return res[1] end function load_and_remap_matrix(ckpt_path_, old_tensor_name_, row_remapping_, col_remapping_, initializing_values_; name=nothing, num_rows=nothing, num_cols=nothing, max_rows_in_memory=nothing) - if tf.eager_mode + if tf.in_eager_mode() load_and_remap_matrix_eager(ckpt_path_, old_tensor_name_, row_remapping_, col_remapping_, initializing_values_; name=name, num_rows=num_rows, num_cols=num_cols, max_rows_in_memory=max_rows_in_memory) else load_and_remap_matrix_graph(ckpt_path_, old_tensor_name_, row_remapping_, col_remapping_, initializing_values_; name=name, num_rows=num_rows, num_cols=num_cols, max_rows_in_memory=max_rows_in_memory) @@ -18635,7 +18635,7 @@ begin return res[1] end function sparse_apply_proximal_gradient_descent(var_, alpha_, l1_, l2_, grad_, indices_; name=nothing, use_locking=nothing) - if tf.eager_mode + if tf.in_eager_mode() sparse_apply_proximal_gradient_descent_eager(var_, alpha_, l1_, l2_, grad_, indices_; name=name, use_locking=use_locking) else sparse_apply_proximal_gradient_descent_graph(var_, alpha_, l1_, l2_, grad_, indices_; name=name, use_locking=use_locking) @@ -18687,7 +18687,7 @@ begin return res[1] end function py_func_stateless(input_; name=nothing, token=nothing, Tin=nothing, Tout=nothing) - if tf.eager_mode + if tf.in_eager_mode() py_func_stateless_eager(input_; name=name, token=token, Tin=Tin, Tout=Tout) else py_func_stateless_graph(input_; name=name, token=token, Tin=Tin, Tout=Tout) @@ -18723,7 +18723,7 @@ begin return res[1] end function where(input_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() where_eager(input_; name=name) else where_graph(input_; name=name) @@ -18785,7 +18785,7 @@ begin return res[1] end function mfcc(spectrogram_, sample_rate_; name=nothing, upper_frequency_limit=nothing, lower_frequency_limit=nothing, filterbank_channel_count=nothing, dct_coefficient_count=nothing) - if tf.eager_mode + if tf.in_eager_mode() mfcc_eager(spectrogram_, sample_rate_; name=name, upper_frequency_limit=upper_frequency_limit, lower_frequency_limit=lower_frequency_limit, filterbank_channel_count=filterbank_channel_count, dct_coefficient_count=dct_coefficient_count) else mfcc_graph(spectrogram_, sample_rate_; name=name, upper_frequency_limit=upper_frequency_limit, lower_frequency_limit=lower_frequency_limit, filterbank_channel_count=filterbank_channel_count, dct_coefficient_count=dct_coefficient_count) @@ -18827,7 +18827,7 @@ begin return res[1] end function check_numerics(tensor_; name=nothing, message=nothing) - if tf.eager_mode + if tf.in_eager_mode() check_numerics_eager(tensor_; name=name, message=message) else check_numerics_graph(tensor_; name=name, message=message) @@ -18858,7 +18858,7 @@ begin return res[1] end function tpu_compilation_result(; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() tpu_compilation_result_eager(; name=name) else tpu_compilation_result_graph(; name=name) @@ -18912,7 +18912,7 @@ begin return res[1] end function retrieve_tpu_embedding_stochastic_gradient_descent_parameters(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - if tf.eager_mode + if tf.in_eager_mode() retrieve_tpu_embedding_stochastic_gradient_descent_parameters_eager(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) else retrieve_tpu_embedding_stochastic_gradient_descent_parameters_graph(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) @@ -18963,7 +18963,7 @@ begin return res[1] end function sparse_segment_mean_grad(grad_, indices_, segment_ids_, output_dim0_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() sparse_segment_mean_grad_eager(grad_, indices_, segment_ids_, output_dim0_; name=name) else sparse_segment_mean_grad_graph(grad_, indices_, segment_ids_, output_dim0_; name=name) @@ -19028,7 +19028,7 @@ begin return res end function try_rpc(address_, method_, request_; name=nothing, protocol=nothing, fail_fast=nothing, timeout_in_ms=nothing) - if tf.eager_mode + if tf.in_eager_mode() try_rpc_eager(address_, method_, request_; name=name, protocol=protocol, fail_fast=fail_fast, timeout_in_ms=timeout_in_ms) else try_rpc_graph(address_, method_, request_; name=name, protocol=protocol, fail_fast=fail_fast, timeout_in_ms=timeout_in_ms) @@ -19081,7 +19081,7 @@ begin return res[1] end function batch_matrix_triangular_solve(matrix_, rhs_; name=nothing, lower=nothing, adjoint=nothing) - if tf.eager_mode + if tf.in_eager_mode() batch_matrix_triangular_solve_eager(matrix_, rhs_; name=name, lower=lower, adjoint=adjoint) else batch_matrix_triangular_solve_graph(matrix_, rhs_; name=name, lower=lower, adjoint=adjoint) @@ -19123,7 +19123,7 @@ begin return res[1] end function _retval(input_; name=nothing, index=nothing) - if tf.eager_mode + if tf.in_eager_mode() _retval_eager(input_; name=name, index=index) else _retval_graph(input_; name=name, index=index) @@ -19170,7 +19170,7 @@ begin return res end function unique_with_counts(x_; name=nothing, out_idx=nothing) - if tf.eager_mode + if tf.in_eager_mode() unique_with_counts_eager(x_; name=name, out_idx=out_idx) else unique_with_counts_graph(x_; name=name, out_idx=out_idx) @@ -19211,7 +19211,7 @@ begin return res[1] end function add(x_, y_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() add_eager(x_, y_; name=name) else add_graph(x_, y_; name=name) @@ -19289,7 +19289,7 @@ begin return res[1] end function experimental_scan_dataset(input_dataset_, initial_state_, other_arguments_; name=nothing, f=nothing, Tstate=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, preserve_cardinality=nothing) - if tf.eager_mode + if tf.in_eager_mode() experimental_scan_dataset_eager(input_dataset_, initial_state_, other_arguments_; name=name, f=f, Tstate=Tstate, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes, preserve_cardinality=preserve_cardinality) else experimental_scan_dataset_graph(input_dataset_, initial_state_, other_arguments_; name=name, f=f, Tstate=Tstate, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes, preserve_cardinality=preserve_cardinality) @@ -19335,7 +19335,7 @@ begin return res[1] end function assign_add_variable_op(resource_, value_; name=nothing, dtype=nothing) - if tf.eager_mode + if tf.in_eager_mode() assign_add_variable_op_eager(resource_, value_; name=name, dtype=dtype) else assign_add_variable_op_graph(resource_, value_; name=name, dtype=dtype) @@ -19393,7 +19393,7 @@ begin return res end function split_v(value_, size_splits_, split_dim_; name=nothing, num_split=nothing) - if tf.eager_mode + if tf.in_eager_mode() split_v_eager(value_, size_splits_, split_dim_; name=name, num_split=num_split) else split_v_graph(value_, size_splits_, split_dim_; name=name, num_split=num_split) @@ -19446,7 +19446,7 @@ begin return res[1] end function assign(ref_, value_; name=nothing, validate_shape=nothing, use_locking=nothing) - if tf.eager_mode + if tf.in_eager_mode() assign_eager(ref_, value_; name=name, validate_shape=validate_shape, use_locking=use_locking) else assign_graph(ref_, value_; name=name, validate_shape=validate_shape, use_locking=use_locking) @@ -19505,7 +19505,7 @@ begin return res end function max_pool_with_argmax(input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing) - if tf.eager_mode + if tf.in_eager_mode() max_pool_with_argmax_eager(input_; name=name, ksize=ksize, strides=strides, padding=padding) else max_pool_with_argmax_graph(input_; name=name, ksize=ksize, strides=strides, padding=padding) @@ -19564,7 +19564,7 @@ begin return res end function quantized_relu_x(features_, max_value_, min_features_, max_features_; name=nothing, out_type=nothing) - if tf.eager_mode + if tf.in_eager_mode() quantized_relu_x_eager(features_, max_value_, min_features_, max_features_; name=name, out_type=out_type) else quantized_relu_x_graph(features_, max_value_, min_features_, max_features_; name=name, out_type=out_type) @@ -19642,7 +19642,7 @@ begin return res[1] end function random_shuffle_queue(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, min_after_dequeue=nothing, seed=nothing, seed2=nothing, container=nothing, shared_name=nothing) - if tf.eager_mode + if tf.in_eager_mode() random_shuffle_queue_eager(; name=name, component_types=component_types, shapes=shapes, capacity=capacity, min_after_dequeue=min_after_dequeue, seed=seed, seed2=seed2, container=container, shared_name=shared_name) else random_shuffle_queue_graph(; name=name, component_types=component_types, shapes=shapes, capacity=capacity, min_after_dequeue=min_after_dequeue, seed=seed, seed2=seed2, container=container, shared_name=shared_name) @@ -19678,7 +19678,7 @@ begin return res[1] end function fft2d(input_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() fft2d_eager(input_; name=name) else fft2d_graph(input_; name=name) @@ -19728,7 +19728,7 @@ begin return res[1] end function experimental_thread_pool_dataset(input_dataset_, thread_pool_; name=nothing, output_types=nothing, output_shapes=nothing) - if tf.eager_mode + if tf.in_eager_mode() experimental_thread_pool_dataset_eager(input_dataset_, thread_pool_; name=name, output_types=output_types, output_shapes=output_shapes) else experimental_thread_pool_dataset_graph(input_dataset_, thread_pool_; name=name, output_types=output_types, output_shapes=output_shapes) @@ -19784,7 +19784,7 @@ begin return res[1] end function experimental_directed_interleave_dataset(selector_input_dataset_, data_input_datasets_; name=nothing, output_types=nothing, output_shapes=nothing, N=nothing) - if tf.eager_mode + if tf.in_eager_mode() experimental_directed_interleave_dataset_eager(selector_input_dataset_, data_input_datasets_; name=name, output_types=output_types, output_shapes=output_shapes, N=N) else experimental_directed_interleave_dataset_graph(selector_input_dataset_, data_input_datasets_; name=name, output_types=output_types, output_shapes=output_shapes, N=N) @@ -19835,7 +19835,7 @@ begin return res[1] end function sparse_segment_sqrt_n_grad(grad_, indices_, segment_ids_, output_dim0_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() sparse_segment_sqrt_n_grad_eager(grad_, indices_, segment_ids_, output_dim0_; name=name) else sparse_segment_sqrt_n_grad_graph(grad_, indices_, segment_ids_, output_dim0_; name=name) @@ -19871,7 +19871,7 @@ begin return res[1] end function real(input_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() real_eager(input_; name=name) else real_graph(input_; name=name) @@ -19939,7 +19939,7 @@ begin return res[1] end function ordered_map_unstage(key_, indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) - if tf.eager_mode + if tf.in_eager_mode() ordered_map_unstage_eager(key_, indices_; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) else ordered_map_unstage_graph(key_, indices_; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) @@ -19977,7 +19977,7 @@ begin return res[1] end function rfft2d(input_, fft_length_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() rfft2d_eager(input_, fft_length_; name=name) else rfft2d_graph(input_, fft_length_; name=name) @@ -20011,7 +20011,7 @@ begin return res[1] end function var_is_initialized_op(resource_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() var_is_initialized_op_eager(resource_; name=name) else var_is_initialized_op_graph(resource_; name=name) @@ -20053,7 +20053,7 @@ begin return res[1] end function boosted_trees_quantile_stream_resource_handle_op(; name=nothing, container=nothing, shared_name=nothing) - if tf.eager_mode + if tf.in_eager_mode() boosted_trees_quantile_stream_resource_handle_op_eager(; name=name, container=container, shared_name=shared_name) else boosted_trees_quantile_stream_resource_handle_op_graph(; name=name, container=container, shared_name=shared_name) @@ -20094,7 +20094,7 @@ begin return res[1] end function atan2(y_, x_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() atan2_eager(y_, x_; name=name) else atan2_graph(y_, x_; name=name) @@ -20160,7 +20160,7 @@ begin return res[1] end function random_poisson(shape_, rate_; name=nothing, seed=nothing, seed2=nothing, S=nothing, dtype=nothing) - if tf.eager_mode + if tf.in_eager_mode() random_poisson_eager(shape_, rate_; name=name, seed=seed, seed2=seed2, S=S, dtype=dtype) else random_poisson_graph(shape_, rate_; name=name, seed=seed, seed2=seed2, S=S, dtype=dtype) @@ -20214,7 +20214,7 @@ begin return res[1] end function reverse_sequence(input_, seq_lengths_; name=nothing, seq_dim=nothing, batch_dim=nothing) - if tf.eager_mode + if tf.in_eager_mode() reverse_sequence_eager(input_, seq_lengths_; name=name, seq_dim=seq_dim, batch_dim=batch_dim) else reverse_sequence_graph(input_, seq_lengths_; name=name, seq_dim=seq_dim, batch_dim=batch_dim) @@ -20256,7 +20256,7 @@ begin return res[1] end function outfeed_enqueue(input_; name=nothing, dtype=nothing) - if tf.eager_mode + if tf.in_eager_mode() outfeed_enqueue_eager(input_; name=name, dtype=dtype) else outfeed_enqueue_graph(input_; name=name, dtype=dtype) @@ -20297,7 +20297,7 @@ begin return res[1] end function sub(x_, y_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() sub_eager(x_, y_; name=name) else sub_graph(x_, y_; name=name) @@ -20346,7 +20346,7 @@ begin return res end function string_split(input_, delimiter_; name=nothing, skip_empty=nothing) - if tf.eager_mode + if tf.in_eager_mode() string_split_eager(input_, delimiter_; name=name, skip_empty=skip_empty) else string_split_graph(input_, delimiter_; name=name, skip_empty=skip_empty) @@ -20401,7 +20401,7 @@ begin return res[1] end function cumprod(x_, axis_; name=nothing, exclusive=nothing, reverse=nothing) - if tf.eager_mode + if tf.in_eager_mode() cumprod_eager(x_, axis_; name=name, exclusive=exclusive, reverse=reverse) else cumprod_graph(x_, axis_; name=name, exclusive=exclusive, reverse=reverse) @@ -20460,7 +20460,7 @@ begin return res end function quantized_resize_bilinear(images_, size_, min_, max_; name=nothing, align_corners=nothing) - if tf.eager_mode + if tf.in_eager_mode() quantized_resize_bilinear_eager(images_, size_, min_, max_; name=name, align_corners=align_corners) else quantized_resize_bilinear_graph(images_, size_, min_, max_; name=name, align_corners=align_corners) @@ -20539,7 +20539,7 @@ begin return res end function parse_single_example(serialized_, dense_defaults_; name=nothing, num_sparse=nothing, sparse_keys=nothing, dense_keys=nothing, sparse_types=nothing, Tdense=nothing, dense_shapes=nothing) - if tf.eager_mode + if tf.in_eager_mode() parse_single_example_eager(serialized_, dense_defaults_; name=name, num_sparse=num_sparse, sparse_keys=sparse_keys, dense_keys=dense_keys, sparse_types=sparse_types, Tdense=Tdense, dense_shapes=dense_shapes) else parse_single_example_graph(serialized_, dense_defaults_; name=name, num_sparse=num_sparse, sparse_keys=sparse_keys, dense_keys=dense_keys, sparse_types=sparse_types, Tdense=Tdense, dense_shapes=dense_shapes) @@ -20581,7 +20581,7 @@ begin return res[1] end function is_variable_initialized(ref_; name=nothing, dtype=nothing) - if tf.eager_mode + if tf.in_eager_mode() is_variable_initialized_eager(ref_; name=name, dtype=dtype) else is_variable_initialized_graph(ref_; name=name, dtype=dtype) @@ -20623,7 +20623,7 @@ begin return res[1] end function experimental_stats_aggregator_handle(; name=nothing, container=nothing, shared_name=nothing) - if tf.eager_mode + if tf.in_eager_mode() experimental_stats_aggregator_handle_eager(; name=name, container=container, shared_name=shared_name) else experimental_stats_aggregator_handle_graph(; name=name, container=container, shared_name=shared_name) @@ -20684,7 +20684,7 @@ begin return res end function tensor_list_concat_v2(input_handle_, element_shape_, leading_dims_; name=nothing, element_dtype=nothing, shape_type=nothing) - if tf.eager_mode + if tf.in_eager_mode() tensor_list_concat_v2_eager(input_handle_, element_shape_, leading_dims_; name=name, element_dtype=element_dtype, shape_type=shape_type) else tensor_list_concat_v2_graph(input_handle_, element_shape_, leading_dims_; name=name, element_dtype=element_dtype, shape_type=shape_type) @@ -20782,7 +20782,7 @@ begin return res end function cudnn_rnnv2(input_, input_h_, input_c_, params_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing, is_training=nothing) - if tf.eager_mode + if tf.in_eager_mode() cudnn_rnnv2_eager(input_, input_h_, input_c_, params_; name=name, rnn_mode=rnn_mode, input_mode=input_mode, direction=direction, dropout=dropout, seed=seed, seed2=seed2, is_training=is_training) else cudnn_rnnv2_graph(input_, input_h_, input_c_, params_; name=name, rnn_mode=rnn_mode, input_mode=input_mode, direction=direction, dropout=dropout, seed=seed, seed2=seed2, is_training=is_training) @@ -20835,7 +20835,7 @@ begin return res[1] end function resource_scatter_sub(resource_, indices_, updates_; name=nothing, dtype=nothing) - if tf.eager_mode + if tf.in_eager_mode() resource_scatter_sub_eager(resource_, indices_, updates_; name=name, dtype=dtype) else resource_scatter_sub_graph(resource_, indices_, updates_; name=name, dtype=dtype) @@ -20882,7 +20882,7 @@ begin return res[1] end function assign_add(ref_, value_; name=nothing, use_locking=nothing) - if tf.eager_mode + if tf.in_eager_mode() assign_add_eager(ref_, value_; name=name, use_locking=use_locking) else assign_add_graph(ref_, value_; name=name, use_locking=use_locking) @@ -20928,7 +20928,7 @@ begin return res[1] end function tensor_dataset(components_; name=nothing, Toutput_types=nothing, output_shapes=nothing) - if tf.eager_mode + if tf.in_eager_mode() tensor_dataset_eager(components_; name=name, Toutput_types=Toutput_types, output_shapes=output_shapes) else tensor_dataset_graph(components_; name=name, Toutput_types=Toutput_types, output_shapes=output_shapes) @@ -20970,7 +20970,7 @@ begin return res[1] end function bucketize(input_; name=nothing, boundaries=nothing) - if tf.eager_mode + if tf.in_eager_mode() bucketize_eager(input_; name=name, boundaries=boundaries) else bucketize_graph(input_; name=name, boundaries=boundaries) @@ -21024,7 +21024,7 @@ begin return res[1] end function sparse_reduce_max(input_indices_, input_values_, input_shape_, reduction_axes_; name=nothing, keep_dims=nothing) - if tf.eager_mode + if tf.in_eager_mode() sparse_reduce_max_eager(input_indices_, input_values_, input_shape_, reduction_axes_; name=name, keep_dims=keep_dims) else sparse_reduce_max_graph(input_indices_, input_values_, input_shape_, reduction_axes_; name=name, keep_dims=keep_dims) @@ -21083,7 +21083,7 @@ begin return res end function retrieve_tpu_embedding_mdl_adagrad_light_parameters(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - if tf.eager_mode + if tf.in_eager_mode() retrieve_tpu_embedding_mdl_adagrad_light_parameters_eager(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) else retrieve_tpu_embedding_mdl_adagrad_light_parameters_graph(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) @@ -21136,7 +21136,7 @@ begin return res end function tensor_array_grad_with_shape(handle_, flow_in_, shape_to_prepend_; name=nothing, source=nothing) - if tf.eager_mode + if tf.in_eager_mode() tensor_array_grad_with_shape_eager(handle_, flow_in_, shape_to_prepend_; name=name, source=source) else tensor_array_grad_with_shape_graph(handle_, flow_in_, shape_to_prepend_; name=name, source=source) @@ -21170,7 +21170,7 @@ begin return res[1] end function tensor_array_close_v3(handle_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() tensor_array_close_v3_eager(handle_; name=name) else tensor_array_close_v3_graph(handle_; name=name) @@ -21220,7 +21220,7 @@ begin return res[1] end function non_max_suppression_with_overlaps(overlaps_, scores_, max_output_size_, overlap_threshold_, score_threshold_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() non_max_suppression_with_overlaps_eager(overlaps_, scores_, max_output_size_, overlap_threshold_, score_threshold_; name=name) else non_max_suppression_with_overlaps_graph(overlaps_, scores_, max_output_size_, overlap_threshold_, score_threshold_; name=name) @@ -21274,7 +21274,7 @@ begin return res[1] end function pack(values_; name=nothing, N=nothing, axis=nothing) - if tf.eager_mode + if tf.in_eager_mode() pack_eager(values_; name=name, N=N, axis=axis) else pack_graph(values_; name=name, N=N, axis=axis) @@ -21318,7 +21318,7 @@ begin return res[1] end function tensor_array_grad_v2(handle_, flow_in_; name=nothing, source=nothing) - if tf.eager_mode + if tf.in_eager_mode() tensor_array_grad_v2_eager(handle_, flow_in_; name=name, source=source) else tensor_array_grad_v2_graph(handle_, flow_in_; name=name, source=source) @@ -21364,7 +21364,7 @@ begin return res[1] end function assign_sub_variable_op(resource_, value_; name=nothing, dtype=nothing) - if tf.eager_mode + if tf.in_eager_mode() assign_sub_variable_op_eager(resource_, value_; name=name, dtype=dtype) else assign_sub_variable_op_graph(resource_, value_; name=name, dtype=dtype) @@ -21398,7 +21398,7 @@ begin return res[1] end function batch_fft2d(input_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() batch_fft2d_eager(input_; name=name) else batch_fft2d_graph(input_; name=name) @@ -21432,7 +21432,7 @@ begin return res[1] end function close_summary_writer(writer_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() close_summary_writer_eager(writer_; name=name) else close_summary_writer_graph(writer_; name=name) @@ -21468,7 +21468,7 @@ begin return res[1] end function rank(input_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() rank_eager(input_; name=name) else rank_graph(input_; name=name) @@ -21504,7 +21504,7 @@ begin return res[1] end function fft3d(input_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() fft3d_eager(input_; name=name) else fft3d_graph(input_; name=name) @@ -21581,7 +21581,7 @@ begin return res[1] end function apply_ftrl(var_, accum_, linear_, grad_, lr_, l1_, l2_, lr_power_; name=nothing, use_locking=nothing) - if tf.eager_mode + if tf.in_eager_mode() apply_ftrl_eager(var_, accum_, linear_, grad_, lr_, l1_, l2_, lr_power_; name=name, use_locking=use_locking) else apply_ftrl_graph(var_, accum_, linear_, grad_, lr_, l1_, l2_, lr_power_; name=name, use_locking=use_locking) @@ -21623,7 +21623,7 @@ begin return res[1] end function abort(; name=nothing, error_msg=nothing, exit_without_error=nothing) - if tf.eager_mode + if tf.in_eager_mode() abort_eager(; name=name, error_msg=error_msg, exit_without_error=exit_without_error) else abort_graph(; name=name, error_msg=error_msg, exit_without_error=exit_without_error) @@ -21675,7 +21675,7 @@ begin return res[1] end function audio_spectrogram(input_; name=nothing, window_size=nothing, stride=nothing, magnitude_squared=nothing) - if tf.eager_mode + if tf.in_eager_mode() audio_spectrogram_eager(input_; name=name, window_size=window_size, stride=stride, magnitude_squared=magnitude_squared) else audio_spectrogram_graph(input_; name=name, window_size=window_size, stride=stride, magnitude_squared=magnitude_squared) @@ -21715,7 +21715,7 @@ begin return res[1] end function variable_shape(input_; name=nothing, out_type=nothing) - if tf.eager_mode + if tf.in_eager_mode() variable_shape_eager(input_; name=name, out_type=out_type) else variable_shape_graph(input_; name=name, out_type=out_type) @@ -21775,7 +21775,7 @@ begin return res[1] end function fifo_queue_v2(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) - if tf.eager_mode + if tf.in_eager_mode() fifo_queue_v2_eager(; name=name, component_types=component_types, shapes=shapes, capacity=capacity, container=container, shared_name=shared_name) else fifo_queue_v2_graph(; name=name, component_types=component_types, shapes=shapes, capacity=capacity, container=container, shared_name=shared_name) @@ -21829,7 +21829,7 @@ begin return res[1] end function variable(; name=nothing, shape=nothing, dtype=nothing, container=nothing, shared_name=nothing) - if tf.eager_mode + if tf.in_eager_mode() variable_eager(; name=name, shape=shape, dtype=dtype, container=container, shared_name=shared_name) else variable_graph(; name=name, shape=shape, dtype=dtype, container=container, shared_name=shared_name) @@ -21867,7 +21867,7 @@ begin return res[1] end function tensor_forest_create_tree_variable(tree_handle_, tree_config_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() tensor_forest_create_tree_variable_eager(tree_handle_, tree_config_; name=name) else tensor_forest_create_tree_variable_graph(tree_handle_, tree_config_; name=name) @@ -21932,7 +21932,7 @@ begin return res[1] end function max_pool_grad_with_argmax(input_, grad_, argmax_; name=nothing, ksize=nothing, strides=nothing, padding=nothing) - if tf.eager_mode + if tf.in_eager_mode() max_pool_grad_with_argmax_eager(input_, grad_, argmax_; name=name, ksize=ksize, strides=strides, padding=padding) else max_pool_grad_with_argmax_graph(input_, grad_, argmax_; name=name, ksize=ksize, strides=strides, padding=padding) @@ -21977,7 +21977,7 @@ begin return res end function ref_switch(data_, pred_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() ref_switch_eager(data_, pred_; name=name) else ref_switch_graph(data_, pred_; name=name) @@ -22011,7 +22011,7 @@ begin return res[1] end function sdca_fprint(input_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() sdca_fprint_eager(input_; name=name) else sdca_fprint_graph(input_; name=name) @@ -22069,7 +22069,7 @@ begin return res[1] end function experimental_choose_fastest_dataset(input_datasets_; name=nothing, N=nothing, num_experiments=nothing, output_types=nothing, output_shapes=nothing) - if tf.eager_mode + if tf.in_eager_mode() experimental_choose_fastest_dataset_eager(input_datasets_; name=name, N=N, num_experiments=num_experiments, output_types=output_types, output_shapes=output_shapes) else experimental_choose_fastest_dataset_graph(input_datasets_; name=name, N=N, num_experiments=num_experiments, output_types=output_types, output_shapes=output_shapes) @@ -22111,7 +22111,7 @@ begin return res[1] end function leaky_relu(features_; name=nothing, alpha=nothing) - if tf.eager_mode + if tf.in_eager_mode() leaky_relu_eager(features_; name=name, alpha=alpha) else leaky_relu_graph(features_; name=name, alpha=alpha) @@ -22151,7 +22151,7 @@ begin return res[1] end function identity_n(input_; name=nothing, T=nothing) - if tf.eager_mode + if tf.in_eager_mode() identity_n_eager(input_; name=name, T=T) else identity_n_graph(input_; name=name, T=T) @@ -22282,7 +22282,7 @@ begin return res end function cudnn_rnn_backprop_v2(input_, input_h_, input_c_, params_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_, host_reserved_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) - if tf.eager_mode + if tf.in_eager_mode() cudnn_rnn_backprop_v2_eager(input_, input_h_, input_c_, params_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_, host_reserved_; name=name, rnn_mode=rnn_mode, input_mode=input_mode, direction=direction, dropout=dropout, seed=seed, seed2=seed2) else cudnn_rnn_backprop_v2_graph(input_, input_h_, input_c_, params_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_, host_reserved_; name=name, rnn_mode=rnn_mode, input_mode=input_mode, direction=direction, dropout=dropout, seed=seed, seed2=seed2) @@ -22331,7 +22331,7 @@ begin return res end function requantization_range(input_, input_min_, input_max_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() requantization_range_eager(input_, input_min_, input_max_; name=name) else requantization_range_graph(input_, input_min_, input_max_; name=name) @@ -22372,7 +22372,7 @@ begin return res[1] end function maximum(x_, y_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() maximum_eager(x_, y_; name=name) else maximum_graph(x_, y_; name=name) @@ -22414,7 +22414,7 @@ begin return res[1] end function reshape(tensor_, shape_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() reshape_eager(tensor_, shape_; name=name) else reshape_graph(tensor_, shape_; name=name) @@ -22465,7 +22465,7 @@ begin return res[1] end function matrix_solve_ls(matrix_, rhs_, l2_regularizer_; name=nothing, fast=nothing) - if tf.eager_mode + if tf.in_eager_mode() matrix_solve_ls_eager(matrix_, rhs_, l2_regularizer_; name=name, fast=fast) else matrix_solve_ls_graph(matrix_, rhs_, l2_regularizer_; name=name, fast=fast) @@ -22507,7 +22507,7 @@ begin return res[1] end function tf_record_dataset(filenames_, compression_type_, buffer_size_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() tf_record_dataset_eager(filenames_, compression_type_, buffer_size_; name=name) else tf_record_dataset_graph(filenames_, compression_type_, buffer_size_; name=name) @@ -22557,7 +22557,7 @@ begin return res[1] end function boosted_trees_example_debug_outputs(tree_ensemble_handle_, bucketized_features_; name=nothing, num_bucketized_features=nothing, logits_dimension=nothing) - if tf.eager_mode + if tf.in_eager_mode() boosted_trees_example_debug_outputs_eager(tree_ensemble_handle_, bucketized_features_; name=name, num_bucketized_features=num_bucketized_features, logits_dimension=logits_dimension) else boosted_trees_example_debug_outputs_graph(tree_ensemble_handle_, bucketized_features_; name=name, num_bucketized_features=num_bucketized_features, logits_dimension=logits_dimension) @@ -22593,7 +22593,7 @@ begin return res[1] end function hsv_to_rgb(images_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() hsv_to_rgb_eager(images_; name=name) else hsv_to_rgb_graph(images_; name=name) @@ -22643,7 +22643,7 @@ begin return res[1] end function experimental_max_intra_op_parallelism_dataset(input_dataset_, max_intra_op_parallelism_; name=nothing, output_types=nothing, output_shapes=nothing) - if tf.eager_mode + if tf.in_eager_mode() experimental_max_intra_op_parallelism_dataset_eager(input_dataset_, max_intra_op_parallelism_; name=name, output_types=output_types, output_shapes=output_shapes) else experimental_max_intra_op_parallelism_dataset_graph(input_dataset_, max_intra_op_parallelism_; name=name, output_types=output_types, output_shapes=output_shapes) @@ -22697,7 +22697,7 @@ begin return res[1] end function scatter_div(ref_, indices_, updates_; name=nothing, use_locking=nothing) - if tf.eager_mode + if tf.in_eager_mode() scatter_div_eager(ref_, indices_, updates_; name=name, use_locking=use_locking) else scatter_div_graph(ref_, indices_, updates_; name=name, use_locking=use_locking) @@ -22748,7 +22748,7 @@ begin return res end function decode_wav(contents_; name=nothing, desired_channels=nothing, desired_samples=nothing) - if tf.eager_mode + if tf.in_eager_mode() decode_wav_eager(contents_; name=name, desired_channels=desired_channels, desired_samples=desired_samples) else decode_wav_graph(contents_; name=name, desired_channels=desired_channels, desired_samples=desired_samples) @@ -22784,7 +22784,7 @@ begin return res[1] end function log(x_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() log_eager(x_; name=name) else log_graph(x_; name=name) @@ -22836,7 +22836,7 @@ begin return res[1] end function save_v2(prefix_, tensor_names_, shape_and_slices_, tensors_; name=nothing, dtypes=nothing) - if tf.eager_mode + if tf.in_eager_mode() save_v2_eager(prefix_, tensor_names_, shape_and_slices_, tensors_; name=name, dtypes=dtypes) else save_v2_graph(prefix_, tensor_names_, shape_and_slices_, tensors_; name=name, dtypes=dtypes) @@ -22872,7 +22872,7 @@ begin return res[1] end function deep_copy(x_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() deep_copy_eager(x_; name=name) else deep_copy_graph(x_; name=name) @@ -22918,7 +22918,7 @@ begin return res[1] end function model_dataset(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) - if tf.eager_mode + if tf.in_eager_mode() model_dataset_eager(input_dataset_; name=name, output_types=output_types, output_shapes=output_shapes) else model_dataset_graph(input_dataset_; name=name, output_types=output_types, output_shapes=output_shapes) @@ -23055,7 +23055,7 @@ begin return res end function parse_sequence_example(serialized_, debug_name_, context_dense_defaults_; name=nothing, feature_list_dense_missing_assumed_empty=nothing, context_sparse_keys=nothing, context_dense_keys=nothing, feature_list_sparse_keys=nothing, feature_list_dense_keys=nothing, Ncontext_sparse=nothing, Ncontext_dense=nothing, Nfeature_list_sparse=nothing, Nfeature_list_dense=nothing, context_sparse_types=nothing, Tcontext_dense=nothing, feature_list_dense_types=nothing, context_dense_shapes=nothing, feature_list_sparse_types=nothing, feature_list_dense_shapes=nothing) - if tf.eager_mode + if tf.in_eager_mode() parse_sequence_example_eager(serialized_, debug_name_, context_dense_defaults_; name=name, feature_list_dense_missing_assumed_empty=feature_list_dense_missing_assumed_empty, context_sparse_keys=context_sparse_keys, context_dense_keys=context_dense_keys, feature_list_sparse_keys=feature_list_sparse_keys, feature_list_dense_keys=feature_list_dense_keys, Ncontext_sparse=Ncontext_sparse, Ncontext_dense=Ncontext_dense, Nfeature_list_sparse=Nfeature_list_sparse, Nfeature_list_dense=Nfeature_list_dense, context_sparse_types=context_sparse_types, Tcontext_dense=Tcontext_dense, feature_list_dense_types=feature_list_dense_types, context_dense_shapes=context_dense_shapes, feature_list_sparse_types=feature_list_sparse_types, feature_list_dense_shapes=feature_list_dense_shapes) else parse_sequence_example_graph(serialized_, debug_name_, context_dense_defaults_; name=name, feature_list_dense_missing_assumed_empty=feature_list_dense_missing_assumed_empty, context_sparse_keys=context_sparse_keys, context_dense_keys=context_dense_keys, feature_list_sparse_keys=feature_list_sparse_keys, feature_list_dense_keys=feature_list_dense_keys, Ncontext_sparse=Ncontext_sparse, Ncontext_dense=Ncontext_dense, Nfeature_list_sparse=Nfeature_list_sparse, Nfeature_list_dense=Nfeature_list_dense, context_sparse_types=context_sparse_types, Tcontext_dense=Tcontext_dense, feature_list_dense_types=feature_list_dense_types, context_dense_shapes=context_dense_shapes, feature_list_sparse_types=feature_list_sparse_types, feature_list_dense_shapes=feature_list_dense_shapes) @@ -23091,7 +23091,7 @@ begin return res[1] end function sinh(x_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() sinh_eager(x_; name=name) else sinh_graph(x_; name=name) @@ -23145,7 +23145,7 @@ begin return res[1] end function iterator_v2(; name=nothing, shared_name=nothing, container=nothing, output_types=nothing, output_shapes=nothing) - if tf.eager_mode + if tf.in_eager_mode() iterator_v2_eager(; name=name, shared_name=shared_name, container=container, output_types=output_types, output_shapes=output_shapes) else iterator_v2_graph(; name=name, shared_name=shared_name, container=container, output_types=output_types, output_shapes=output_shapes) @@ -23193,7 +23193,7 @@ begin return res[1] end function tensor_array_write_v2(handle_, index_, value_, flow_in_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() tensor_array_write_v2_eager(handle_, index_, value_, flow_in_; name=name) else tensor_array_write_v2_graph(handle_, index_, value_, flow_in_; name=name) @@ -23233,7 +23233,7 @@ begin return res[1] end function tensor_list_element_shape(input_handle_; name=nothing, shape_type=nothing) - if tf.eager_mode + if tf.in_eager_mode() tensor_list_element_shape_eager(input_handle_; name=name, shape_type=shape_type) else tensor_list_element_shape_graph(input_handle_; name=name, shape_type=shape_type) @@ -23267,7 +23267,7 @@ begin return res[1] end function queue_size_v2(handle_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() queue_size_v2_eager(handle_; name=name) else queue_size_v2_graph(handle_; name=name) @@ -23303,7 +23303,7 @@ begin return res[1] end function expm1(x_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() expm1_eager(x_; name=name) else expm1_graph(x_; name=name) @@ -23347,7 +23347,7 @@ begin return res[1] end function batch_matrix_band_part(input_, num_lower_, num_upper_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() batch_matrix_band_part_eager(input_, num_lower_, num_upper_; name=name) else batch_matrix_band_part_graph(input_, num_lower_, num_upper_; name=name) @@ -23397,7 +23397,7 @@ begin return res[1] end function concatenate_dataset(input_dataset_, another_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) - if tf.eager_mode + if tf.in_eager_mode() concatenate_dataset_eager(input_dataset_, another_dataset_; name=name, output_types=output_types, output_shapes=output_shapes) else concatenate_dataset_graph(input_dataset_, another_dataset_; name=name, output_types=output_types, output_shapes=output_shapes) @@ -23431,7 +23431,7 @@ begin return res[1] end function decode_gif(contents_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() decode_gif_eager(contents_; name=name) else decode_gif_graph(contents_; name=name) @@ -23561,7 +23561,7 @@ begin return res[1] end function tpu_replicate(inputs_, broadcast_inputs_, variables_, guaranteed_constants_; name=nothing, computation=nothing, num_replicas=nothing, num_cores_per_replica=nothing, topology=nothing, use_tpu=nothing, device_assignment=nothing, host_compute_core=nothing, Tinputs=nothing, Tbroadcast_inputs=nothing, NumVariables=nothing, Tguaranteed_constants=nothing, output_types=nothing, padding_map=nothing, step_marker_location=nothing) - if tf.eager_mode + if tf.in_eager_mode() tpu_replicate_eager(inputs_, broadcast_inputs_, variables_, guaranteed_constants_; name=name, computation=computation, num_replicas=num_replicas, num_cores_per_replica=num_cores_per_replica, topology=topology, use_tpu=use_tpu, device_assignment=device_assignment, host_compute_core=host_compute_core, Tinputs=Tinputs, Tbroadcast_inputs=Tbroadcast_inputs, NumVariables=NumVariables, Tguaranteed_constants=Tguaranteed_constants, output_types=output_types, padding_map=padding_map, step_marker_location=step_marker_location) else tpu_replicate_graph(inputs_, broadcast_inputs_, variables_, guaranteed_constants_; name=name, computation=computation, num_replicas=num_replicas, num_cores_per_replica=num_cores_per_replica, topology=topology, use_tpu=use_tpu, device_assignment=device_assignment, host_compute_core=host_compute_core, Tinputs=Tinputs, Tbroadcast_inputs=Tbroadcast_inputs, NumVariables=NumVariables, Tguaranteed_constants=Tguaranteed_constants, output_types=output_types, padding_map=padding_map, step_marker_location=step_marker_location) @@ -23608,7 +23608,7 @@ begin return res end function batch_self_adjoint_eig_v2(input_; name=nothing, compute_v=nothing) - if tf.eager_mode + if tf.in_eager_mode() batch_self_adjoint_eig_v2_eager(input_; name=name, compute_v=compute_v) else batch_self_adjoint_eig_v2_graph(input_; name=name, compute_v=compute_v) @@ -23650,7 +23650,7 @@ begin return res[1] end function shape(input_; name=nothing, out_type=nothing) - if tf.eager_mode + if tf.in_eager_mode() shape_eager(input_; name=name, out_type=out_type) else shape_graph(input_; name=name, out_type=out_type) @@ -23700,7 +23700,7 @@ begin return res[1] end function repeat_dataset(input_dataset_, count_; name=nothing, output_types=nothing, output_shapes=nothing) - if tf.eager_mode + if tf.in_eager_mode() repeat_dataset_eager(input_dataset_, count_; name=name, output_types=output_types, output_shapes=output_shapes) else repeat_dataset_graph(input_dataset_, count_; name=name, output_types=output_types, output_shapes=output_shapes) @@ -23754,7 +23754,7 @@ begin return res[1] end function crop_and_resize_grad_boxes(grads_, image_, boxes_, box_ind_; name=nothing, method=nothing) - if tf.eager_mode + if tf.in_eager_mode() crop_and_resize_grad_boxes_eager(grads_, image_, boxes_, box_ind_; name=name, method=method) else crop_and_resize_grad_boxes_graph(grads_, image_, boxes_, box_ind_; name=name, method=method) @@ -23795,7 +23795,7 @@ begin return res[1] end function reciprocal_grad(y_, dy_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() reciprocal_grad_eager(y_, dy_; name=name) else reciprocal_grad_graph(y_, dy_; name=name) @@ -23842,7 +23842,7 @@ begin return res[1] end function batch_matrix_solve(matrix_, rhs_; name=nothing, adjoint=nothing) - if tf.eager_mode + if tf.in_eager_mode() batch_matrix_solve_eager(matrix_, rhs_; name=name, adjoint=adjoint) else batch_matrix_solve_graph(matrix_, rhs_; name=name, adjoint=adjoint) @@ -23902,7 +23902,7 @@ begin return res[1] end function mutable_hash_table_v2(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing) - if tf.eager_mode + if tf.in_eager_mode() mutable_hash_table_v2_eager(; name=name, container=container, shared_name=shared_name, use_node_name_sharing=use_node_name_sharing, key_dtype=key_dtype, value_dtype=value_dtype) else mutable_hash_table_v2_graph(; name=name, container=container, shared_name=shared_name, use_node_name_sharing=use_node_name_sharing, key_dtype=key_dtype, value_dtype=value_dtype) @@ -23938,7 +23938,7 @@ begin return res[1] end function exit(data_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() exit_eager(data_; name=name) else exit_graph(data_; name=name) @@ -23998,7 +23998,7 @@ begin return res[1] end function lrn(input_; name=nothing, depth_radius=nothing, bias=nothing, alpha=nothing, beta=nothing) - if tf.eager_mode + if tf.in_eager_mode() lrn_eager(input_; name=name, depth_radius=depth_radius, bias=bias, alpha=alpha, beta=beta) else lrn_graph(input_; name=name, depth_radius=depth_radius, bias=bias, alpha=alpha, beta=beta) @@ -24062,7 +24062,7 @@ begin return res[1] end function stateless_if(cond_, input_; name=nothing, Tin=nothing, Tout=nothing, then_branch=nothing, else_branch=nothing) - if tf.eager_mode + if tf.in_eager_mode() stateless_if_eager(cond_, input_; name=name, Tin=Tin, Tout=Tout, then_branch=then_branch, else_branch=else_branch) else stateless_if_graph(cond_, input_; name=name, Tin=Tin, Tout=Tout, then_branch=then_branch, else_branch=else_branch) @@ -24112,7 +24112,7 @@ begin return res[1] end function tensor_list_set_item(input_handle_, index_, item_; name=nothing, element_dtype=nothing) - if tf.eager_mode + if tf.in_eager_mode() tensor_list_set_item_eager(input_handle_, index_, item_; name=name, element_dtype=element_dtype) else tensor_list_set_item_graph(input_handle_, index_, item_; name=name, element_dtype=element_dtype) @@ -24148,7 +24148,7 @@ begin return res[1] end function rsqrt(x_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() rsqrt_eager(x_; name=name) else rsqrt_graph(x_; name=name) @@ -24263,7 +24263,7 @@ begin return res end function quantized_conv2d_with_bias_sum_and_relu_and_requantize(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_, summand_, min_summand_, max_summand_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) - if tf.eager_mode + if tf.in_eager_mode() quantized_conv2d_with_bias_sum_and_relu_and_requantize_eager(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_, summand_, min_summand_, max_summand_; name=name, out_type=out_type, strides=strides, padding=padding, dilations=dilations) else quantized_conv2d_with_bias_sum_and_relu_and_requantize_graph(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_, summand_, min_summand_, max_summand_; name=name, out_type=out_type, strides=strides, padding=padding, dilations=dilations) @@ -24297,7 +24297,7 @@ begin return res[1] end function delete_session_tensor(handle_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() delete_session_tensor_eager(handle_; name=name) else delete_session_tensor_graph(handle_; name=name) @@ -24361,7 +24361,7 @@ begin return res[1] end function one_hot(indices_, depth_, on_value_, off_value_; name=nothing, axis=nothing) - if tf.eager_mode + if tf.in_eager_mode() one_hot_eager(indices_, depth_, on_value_, off_value_; name=name, axis=axis) else one_hot_graph(indices_, depth_, on_value_, off_value_; name=name, axis=axis) @@ -24435,7 +24435,7 @@ begin return res[1] end function resource_apply_ftrl(var_, accum_, linear_, grad_, lr_, l1_, l2_, lr_power_; name=nothing, use_locking=nothing) - if tf.eager_mode + if tf.in_eager_mode() resource_apply_ftrl_eager(var_, accum_, linear_, grad_, lr_, l1_, l2_, lr_power_; name=name, use_locking=use_locking) else resource_apply_ftrl_graph(var_, accum_, linear_, grad_, lr_, l1_, l2_, lr_power_; name=name, use_locking=use_locking) @@ -24564,7 +24564,7 @@ begin return res end function sdca_optimizer_v2(sparse_example_indices_, sparse_feature_indices_, sparse_feature_values_, dense_features_, example_weights_, example_labels_, sparse_indices_, sparse_weights_, dense_weights_, example_state_data_; name=nothing, loss_type=nothing, adaptive=nothing, num_sparse_features=nothing, num_sparse_features_with_values=nothing, num_dense_features=nothing, l1=nothing, l2=nothing, num_loss_partitions=nothing, num_inner_iterations=nothing) - if tf.eager_mode + if tf.in_eager_mode() sdca_optimizer_v2_eager(sparse_example_indices_, sparse_feature_indices_, sparse_feature_values_, dense_features_, example_weights_, example_labels_, sparse_indices_, sparse_weights_, dense_weights_, example_state_data_; name=name, loss_type=loss_type, adaptive=adaptive, num_sparse_features=num_sparse_features, num_sparse_features_with_values=num_sparse_features_with_values, num_dense_features=num_dense_features, l1=l1, l2=l2, num_loss_partitions=num_loss_partitions, num_inner_iterations=num_inner_iterations) else sdca_optimizer_v2_graph(sparse_example_indices_, sparse_feature_indices_, sparse_feature_values_, dense_features_, example_weights_, example_labels_, sparse_indices_, sparse_weights_, dense_weights_, example_state_data_; name=name, loss_type=loss_type, adaptive=adaptive, num_sparse_features=num_sparse_features, num_sparse_features_with_values=num_sparse_features_with_values, num_dense_features=num_dense_features, l1=l1, l2=l2, num_loss_partitions=num_loss_partitions, num_inner_iterations=num_inner_iterations) @@ -24614,7 +24614,7 @@ begin return res[1] end function queue_enqueue(handle_, components_; name=nothing, Tcomponents=nothing, timeout_ms=nothing) - if tf.eager_mode + if tf.in_eager_mode() queue_enqueue_eager(handle_, components_; name=name, Tcomponents=Tcomponents, timeout_ms=timeout_ms) else queue_enqueue_graph(handle_, components_; name=name, Tcomponents=Tcomponents, timeout_ms=timeout_ms) @@ -24674,7 +24674,7 @@ begin return res[1] end function conditional_accumulator(; name=nothing, dtype=nothing, shape=nothing, container=nothing, shared_name=nothing, reduction_type=nothing) - if tf.eager_mode + if tf.in_eager_mode() conditional_accumulator_eager(; name=name, dtype=dtype, shape=shape, container=container, shared_name=shared_name, reduction_type=reduction_type) else conditional_accumulator_graph(; name=name, dtype=dtype, shape=shape, container=container, shared_name=shared_name, reduction_type=reduction_type) @@ -24735,7 +24735,7 @@ begin return res end function ctc_beam_search_decoder(inputs_, sequence_length_; name=nothing, beam_width=nothing, top_paths=nothing, merge_repeated=nothing) - if tf.eager_mode + if tf.in_eager_mode() ctc_beam_search_decoder_eager(inputs_, sequence_length_; name=name, beam_width=beam_width, top_paths=top_paths, merge_repeated=merge_repeated) else ctc_beam_search_decoder_graph(inputs_, sequence_length_; name=name, beam_width=beam_width, top_paths=top_paths, merge_repeated=merge_repeated) @@ -24777,7 +24777,7 @@ begin return res[1] end function whole_file_reader(; name=nothing, container=nothing, shared_name=nothing) - if tf.eager_mode + if tf.in_eager_mode() whole_file_reader_eager(; name=name, container=container, shared_name=shared_name) else whole_file_reader_graph(; name=name, container=container, shared_name=shared_name) @@ -24854,7 +24854,7 @@ begin return res[1] end function apply_rms_prop(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=nothing, use_locking=nothing) - if tf.eager_mode + if tf.in_eager_mode() apply_rms_prop_eager(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=name, use_locking=use_locking) else apply_rms_prop_graph(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=name, use_locking=use_locking) @@ -24894,7 +24894,7 @@ begin return res[1] end function adjust_saturation(images_, scale_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() adjust_saturation_eager(images_, scale_; name=name) else adjust_saturation_graph(images_, scale_; name=name) @@ -24934,7 +24934,7 @@ begin return res[1] end function lookup_table_remove_v2(table_handle_, keys_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() lookup_table_remove_v2_eager(table_handle_, keys_; name=name) else lookup_table_remove_v2_graph(table_handle_, keys_; name=name) @@ -24974,7 +24974,7 @@ begin return res[1] end function queue_close(handle_; name=nothing, cancel_pending_enqueues=nothing) - if tf.eager_mode + if tf.in_eager_mode() queue_close_eager(handle_; name=name, cancel_pending_enqueues=cancel_pending_enqueues) else queue_close_graph(handle_; name=name, cancel_pending_enqueues=cancel_pending_enqueues) @@ -25024,7 +25024,7 @@ begin return res[1] end function prefetch_dataset(input_dataset_, buffer_size_; name=nothing, output_types=nothing, output_shapes=nothing) - if tf.eager_mode + if tf.in_eager_mode() prefetch_dataset_eager(input_dataset_, buffer_size_; name=name, output_types=output_types, output_shapes=output_shapes) else prefetch_dataset_graph(input_dataset_, buffer_size_; name=name, output_types=output_types, output_shapes=output_shapes) @@ -25098,7 +25098,7 @@ begin return res[1] end function map_dataset(input_dataset_, other_arguments_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing, preserve_cardinality=nothing) - if tf.eager_mode + if tf.in_eager_mode() map_dataset_eager(input_dataset_, other_arguments_; name=name, f=f, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes, use_inter_op_parallelism=use_inter_op_parallelism, preserve_cardinality=preserve_cardinality) else map_dataset_graph(input_dataset_, other_arguments_; name=name, f=f, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes, use_inter_op_parallelism=use_inter_op_parallelism, preserve_cardinality=preserve_cardinality) @@ -25189,7 +25189,7 @@ begin return res end function quantized_conv2d_with_bias(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) - if tf.eager_mode + if tf.in_eager_mode() quantized_conv2d_with_bias_eager(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_; name=name, out_type=out_type, strides=strides, padding=padding, dilations=dilations) else quantized_conv2d_with_bias_graph(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_; name=name, out_type=out_type, strides=strides, padding=padding, dilations=dilations) @@ -25237,7 +25237,7 @@ begin return res[1] end function tensor_array_read_v3(handle_, index_, flow_in_; name=nothing, dtype=nothing) - if tf.eager_mode + if tf.in_eager_mode() tensor_array_read_v3_eager(handle_, index_, flow_in_; name=name, dtype=dtype) else tensor_array_read_v3_graph(handle_, index_, flow_in_; name=name, dtype=dtype) @@ -25273,7 +25273,7 @@ begin return res[1] end function identity(input_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() identity_eager(input_; name=name) else identity_graph(input_; name=name) @@ -25337,7 +25337,7 @@ begin return res[1] end function print(input_, data_; name=nothing, U=nothing, message=nothing, first_n=nothing, summarize=nothing) - if tf.eager_mode + if tf.in_eager_mode() print_eager(input_, data_; name=name, U=U, message=message, first_n=first_n, summarize=summarize) else print_graph(input_, data_; name=name, U=U, message=message, first_n=first_n, summarize=summarize) @@ -25397,7 +25397,7 @@ begin return res[1] end function collective_bcast_send(input_; name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, shape=nothing) - if tf.eager_mode + if tf.in_eager_mode() collective_bcast_send_eager(input_; name=name, group_size=group_size, group_key=group_key, instance_key=instance_key, shape=shape) else collective_bcast_send_graph(input_; name=name, group_size=group_size, group_key=group_key, instance_key=instance_key, shape=shape) @@ -25448,7 +25448,7 @@ begin return res end function _list_to_array(input_; name=nothing, Tin=nothing, N=nothing) - if tf.eager_mode + if tf.in_eager_mode() _list_to_array_eager(input_; name=name, Tin=Tin, N=N) else _list_to_array_graph(input_; name=name, Tin=Tin, N=N) @@ -25510,7 +25510,7 @@ begin return res[1] end function neg_train(w_in_, w_out_, examples_, labels_, lr_; name=nothing, vocab_count=nothing, num_negative_samples=nothing) - if tf.eager_mode + if tf.in_eager_mode() neg_train_eager(w_in_, w_out_, examples_, labels_, lr_; name=name, vocab_count=vocab_count, num_negative_samples=num_negative_samples) else neg_train_graph(w_in_, w_out_, examples_, labels_, lr_; name=name, vocab_count=vocab_count, num_negative_samples=num_negative_samples) @@ -25544,7 +25544,7 @@ begin return res[1] end function worker_heartbeat(request_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() worker_heartbeat_eager(request_; name=name) else worker_heartbeat_graph(request_; name=name) @@ -25588,7 +25588,7 @@ begin return res[1] end function merge_v2checkpoints(checkpoint_prefixes_, destination_prefix_; name=nothing, delete_old_dirs=nothing) - if tf.eager_mode + if tf.in_eager_mode() merge_v2checkpoints_eager(checkpoint_prefixes_, destination_prefix_; name=name, delete_old_dirs=delete_old_dirs) else merge_v2checkpoints_graph(checkpoint_prefixes_, destination_prefix_; name=name, delete_old_dirs=delete_old_dirs) @@ -25628,7 +25628,7 @@ begin return res[1] end function collective_permute(input_, source_target_pairs_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() collective_permute_eager(input_, source_target_pairs_; name=name) else collective_permute_graph(input_, source_target_pairs_; name=name) @@ -25690,7 +25690,7 @@ begin return res[1] end function quantize_and_dequantize_v3(input_, input_min_, input_max_, num_bits_; name=nothing, signed_input=nothing, range_given=nothing) - if tf.eager_mode + if tf.in_eager_mode() quantize_and_dequantize_v3_eager(input_, input_min_, input_max_, num_bits_; name=name, signed_input=signed_input, range_given=range_given) else quantize_and_dequantize_v3_graph(input_, input_min_, input_max_, num_bits_; name=name, signed_input=signed_input, range_given=range_given) @@ -25750,7 +25750,7 @@ begin return res[1] end function hash_table(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing) - if tf.eager_mode + if tf.in_eager_mode() hash_table_eager(; name=name, container=container, shared_name=shared_name, use_node_name_sharing=use_node_name_sharing, key_dtype=key_dtype, value_dtype=value_dtype) else hash_table_graph(; name=name, container=container, shared_name=shared_name, use_node_name_sharing=use_node_name_sharing, key_dtype=key_dtype, value_dtype=value_dtype) @@ -25791,7 +25791,7 @@ begin return res[1] end function softplus_grad(gradients_, features_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() softplus_grad_eager(gradients_, features_; name=name) else softplus_grad_graph(gradients_, features_; name=name) @@ -25857,7 +25857,7 @@ begin return res[1] end function fixed_length_record_reader(; name=nothing, header_bytes=nothing, record_bytes=nothing, footer_bytes=nothing, hop_bytes=nothing, container=nothing, shared_name=nothing) - if tf.eager_mode + if tf.in_eager_mode() fixed_length_record_reader_eager(; name=name, header_bytes=header_bytes, record_bytes=record_bytes, footer_bytes=footer_bytes, hop_bytes=hop_bytes, container=container, shared_name=shared_name) else fixed_length_record_reader_graph(; name=name, header_bytes=header_bytes, record_bytes=record_bytes, footer_bytes=footer_bytes, hop_bytes=hop_bytes, container=container, shared_name=shared_name) @@ -25905,7 +25905,7 @@ begin return res[1] end function tensor_array_scatter_v2(handle_, indices_, value_, flow_in_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() tensor_array_scatter_v2_eager(handle_, indices_, value_, flow_in_; name=name) else tensor_array_scatter_v2_graph(handle_, indices_, value_, flow_in_; name=name) @@ -25939,7 +25939,7 @@ begin return res[1] end function decode_json_example(json_examples_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() decode_json_example_eager(json_examples_; name=name) else decode_json_example_graph(json_examples_; name=name) @@ -26024,7 +26024,7 @@ begin return res end function fused_batch_norm_grad_v2(y_backprop_, x_, scale_, reserve_space_1_, reserve_space_2_; name=nothing, U=nothing, epsilon=nothing, data_format=nothing, is_training=nothing) - if tf.eager_mode + if tf.in_eager_mode() fused_batch_norm_grad_v2_eager(y_backprop_, x_, scale_, reserve_space_1_, reserve_space_2_; name=name, U=U, epsilon=epsilon, data_format=data_format, is_training=is_training) else fused_batch_norm_grad_v2_graph(y_backprop_, x_, scale_, reserve_space_1_, reserve_space_2_; name=name, U=U, epsilon=epsilon, data_format=data_format, is_training=is_training) @@ -26078,7 +26078,7 @@ begin return res[1] end function _host_cast(x_; name=nothing, SrcT=nothing, DstT=nothing, Truncate=nothing) - if tf.eager_mode + if tf.in_eager_mode() _host_cast_eager(x_; name=name, SrcT=SrcT, DstT=DstT, Truncate=Truncate) else _host_cast_graph(x_; name=name, SrcT=SrcT, DstT=DstT, Truncate=Truncate) @@ -26126,7 +26126,7 @@ begin return res[1] end function tf_record_reader(; name=nothing, container=nothing, shared_name=nothing, compression_type=nothing) - if tf.eager_mode + if tf.in_eager_mode() tf_record_reader_eager(; name=name, container=container, shared_name=shared_name, compression_type=compression_type) else tf_record_reader_graph(; name=name, container=container, shared_name=shared_name, compression_type=compression_type) @@ -26190,7 +26190,7 @@ begin return res[1] end function while_(input_; name=nothing, T=nothing, cond=nothing, body=nothing, output_shapes=nothing, parallel_iterations=nothing) - if tf.eager_mode + if tf.in_eager_mode() while__eager(input_; name=name, T=T, cond=cond, body=body, output_shapes=output_shapes, parallel_iterations=parallel_iterations) else while__graph(input_; name=name, T=T, cond=cond, body=body, output_shapes=output_shapes, parallel_iterations=parallel_iterations) @@ -26242,7 +26242,7 @@ begin return res[1] end function stateless_multinomial(logits_, num_samples_, seed_; name=nothing, output_dtype=nothing) - if tf.eager_mode + if tf.in_eager_mode() stateless_multinomial_eager(logits_, num_samples_, seed_; name=name, output_dtype=output_dtype) else stateless_multinomial_graph(logits_, num_samples_, seed_; name=name, output_dtype=output_dtype) @@ -26296,7 +26296,7 @@ begin return res[1] end function scatter_add(ref_, indices_, updates_; name=nothing, use_locking=nothing) - if tf.eager_mode + if tf.in_eager_mode() scatter_add_eager(ref_, indices_, updates_; name=name, use_locking=use_locking) else scatter_add_graph(ref_, indices_, updates_; name=name, use_locking=use_locking) @@ -26332,7 +26332,7 @@ begin return res[1] end function conj(input_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() conj_eager(input_; name=name) else conj_graph(input_; name=name) @@ -26378,7 +26378,7 @@ begin return res[1] end function parallel_dynamic_stitch(indices_, data_; name=nothing, N=nothing) - if tf.eager_mode + if tf.in_eager_mode() parallel_dynamic_stitch_eager(indices_, data_; name=name, N=N) else parallel_dynamic_stitch_graph(indices_, data_; name=name, N=N) @@ -26416,7 +26416,7 @@ begin return res[1] end function make_iterator(dataset_, iterator_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() make_iterator_eager(dataset_, iterator_; name=name) else make_iterator_graph(dataset_, iterator_; name=name) @@ -26454,7 +26454,7 @@ begin return res[1] end function rfft3d(input_, fft_length_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() rfft3d_eager(input_, fft_length_; name=name) else rfft3d_graph(input_, fft_length_; name=name) @@ -26513,7 +26513,7 @@ begin return res end function sparse_reduce_sum_sparse(input_indices_, input_values_, input_shape_, reduction_axes_; name=nothing, keep_dims=nothing) - if tf.eager_mode + if tf.in_eager_mode() sparse_reduce_sum_sparse_eager(input_indices_, input_values_, input_shape_, reduction_axes_; name=name, keep_dims=keep_dims) else sparse_reduce_sum_sparse_graph(input_indices_, input_values_, input_shape_, reduction_axes_; name=name, keep_dims=keep_dims) @@ -26573,7 +26573,7 @@ begin return res[1] end function collective_gather(input_; name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, shape=nothing) - if tf.eager_mode + if tf.in_eager_mode() collective_gather_eager(input_; name=name, group_size=group_size, group_key=group_key, instance_key=instance_key, shape=shape) else collective_gather_graph(input_; name=name, group_size=group_size, group_key=group_key, instance_key=instance_key, shape=shape) @@ -26638,7 +26638,7 @@ begin return res end function combined_non_max_suppression(boxes_, scores_, max_output_size_per_class_, max_total_size_, iou_threshold_, score_threshold_; name=nothing, pad_per_class=nothing) - if tf.eager_mode + if tf.in_eager_mode() combined_non_max_suppression_eager(boxes_, scores_, max_output_size_per_class_, max_total_size_, iou_threshold_, score_threshold_; name=name, pad_per_class=pad_per_class) else combined_non_max_suppression_graph(boxes_, scores_, max_output_size_per_class_, max_total_size_, iou_threshold_, score_threshold_; name=name, pad_per_class=pad_per_class) @@ -26698,7 +26698,7 @@ begin return res[1] end function _scoped_allocator(; name=nothing, shapes=nothing, shape=nothing, sa_name=nothing, id=nothing, expected_call_count=nothing) - if tf.eager_mode + if tf.in_eager_mode() _scoped_allocator_eager(; name=name, shapes=shapes, shape=shape, sa_name=sa_name, id=id, expected_call_count=expected_call_count) else _scoped_allocator_graph(; name=name, shapes=shapes, shape=shape, sa_name=sa_name, id=id, expected_call_count=expected_call_count) @@ -26764,7 +26764,7 @@ begin return res[1] end function load_tpu_embedding_adadelta_parameters(parameters_, accumulators_, updates_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - if tf.eager_mode + if tf.in_eager_mode() load_tpu_embedding_adadelta_parameters_eager(parameters_, accumulators_, updates_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) else load_tpu_embedding_adadelta_parameters_graph(parameters_, accumulators_, updates_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) @@ -26832,7 +26832,7 @@ begin return res end function sparse_add(a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_, thresh_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() sparse_add_eager(a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_, thresh_; name=name) else sparse_add_graph(a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_, thresh_; name=name) @@ -26881,7 +26881,7 @@ begin return res end function ctc_greedy_decoder(inputs_, sequence_length_; name=nothing, merge_repeated=nothing) - if tf.eager_mode + if tf.in_eager_mode() ctc_greedy_decoder_eager(inputs_, sequence_length_; name=name, merge_repeated=merge_repeated) else ctc_greedy_decoder_graph(inputs_, sequence_length_; name=name, merge_repeated=merge_repeated) @@ -26929,7 +26929,7 @@ begin return res[1] end function immutable_const(; name=nothing, dtype=nothing, shape=nothing, memory_region_name=nothing) - if tf.eager_mode + if tf.in_eager_mode() immutable_const_eager(; name=name, dtype=dtype, shape=shape, memory_region_name=memory_region_name) else immutable_const_graph(; name=name, dtype=dtype, shape=shape, memory_region_name=memory_region_name) @@ -26963,7 +26963,7 @@ begin return res[1] end function consume_mutex_lock(mutex_lock_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() consume_mutex_lock_eager(mutex_lock_; name=name) else consume_mutex_lock_graph(mutex_lock_; name=name) @@ -27004,7 +27004,7 @@ begin return res[1] end function greater_equal(x_, y_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() greater_equal_eager(x_, y_; name=name) else greater_equal_graph(x_, y_; name=name) @@ -27066,7 +27066,7 @@ begin return res[1] end function initialize_table_from_text_file_v2(table_handle_, filename_; name=nothing, key_index=nothing, value_index=nothing, vocab_size=nothing, delimiter=nothing) - if tf.eager_mode + if tf.in_eager_mode() initialize_table_from_text_file_v2_eager(table_handle_, filename_; name=name, key_index=key_index, value_index=value_index, vocab_size=vocab_size, delimiter=delimiter) else initialize_table_from_text_file_v2_graph(table_handle_, filename_; name=name, key_index=key_index, value_index=value_index, vocab_size=vocab_size, delimiter=delimiter) @@ -27112,7 +27112,7 @@ begin return res[1] end function queue_dequeue(handle_; name=nothing, component_types=nothing, timeout_ms=nothing) - if tf.eager_mode + if tf.in_eager_mode() queue_dequeue_eager(handle_; name=name, component_types=component_types, timeout_ms=timeout_ms) else queue_dequeue_graph(handle_; name=name, component_types=component_types, timeout_ms=timeout_ms) @@ -27153,7 +27153,7 @@ begin return res[1] end function equal(x_, y_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() equal_eager(x_, y_; name=name) else equal_graph(x_, y_; name=name) @@ -27199,7 +27199,7 @@ begin return res[1] end function iterator_from_string_handle(string_handle_; name=nothing, output_types=nothing, output_shapes=nothing) - if tf.eager_mode + if tf.in_eager_mode() iterator_from_string_handle_eager(string_handle_; name=name, output_types=output_types, output_shapes=output_shapes) else iterator_from_string_handle_graph(string_handle_; name=name, output_types=output_types, output_shapes=output_shapes) @@ -27257,7 +27257,7 @@ begin return res[1] end function tensor_list_split(tensor_, element_shape_, lengths_; name=nothing, element_dtype=nothing, shape_type=nothing) - if tf.eager_mode + if tf.in_eager_mode() tensor_list_split_eager(tensor_, element_shape_, lengths_; name=name, element_dtype=element_dtype, shape_type=shape_type) else tensor_list_split_graph(tensor_, element_shape_, lengths_; name=name, element_dtype=element_dtype, shape_type=shape_type) @@ -27334,7 +27334,7 @@ begin return res end function fractional_max_pool(value_; name=nothing, pooling_ratio=nothing, pseudo_random=nothing, overlapping=nothing, deterministic=nothing, seed=nothing, seed2=nothing) - if tf.eager_mode + if tf.in_eager_mode() fractional_max_pool_eager(value_; name=name, pooling_ratio=pooling_ratio, pseudo_random=pseudo_random, overlapping=overlapping, deterministic=deterministic, seed=seed, seed2=seed2) else fractional_max_pool_graph(value_; name=name, pooling_ratio=pooling_ratio, pseudo_random=pseudo_random, overlapping=overlapping, deterministic=deterministic, seed=seed, seed2=seed2) @@ -27382,7 +27382,7 @@ begin return res[1] end function scatter_nd(indices_, updates_, shape_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() scatter_nd_eager(indices_, updates_, shape_; name=name) else scatter_nd_graph(indices_, updates_, shape_; name=name) @@ -27432,7 +27432,7 @@ begin return res[1] end function tensor_list_scatter_into_existing_list(input_handle_, tensor_, indices_; name=nothing, element_dtype=nothing) - if tf.eager_mode + if tf.in_eager_mode() tensor_list_scatter_into_existing_list_eager(input_handle_, tensor_, indices_; name=name, element_dtype=element_dtype) else tensor_list_scatter_into_existing_list_graph(input_handle_, tensor_, indices_; name=name, element_dtype=element_dtype) @@ -27477,7 +27477,7 @@ begin return res[1] end function select(condition_, t_, e_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() select_eager(condition_, t_, e_; name=name) else select_graph(condition_, t_, e_; name=name) @@ -27526,7 +27526,7 @@ begin return res[1] end function min(input_, reduction_indices_; name=nothing, keep_dims=nothing) - if tf.eager_mode + if tf.in_eager_mode() min_eager(input_, reduction_indices_; name=name, keep_dims=keep_dims) else min_graph(input_, reduction_indices_; name=name, keep_dims=keep_dims) @@ -27596,7 +27596,7 @@ begin return res[1] end function lrn_grad(input_grads_, input_image_, output_image_; name=nothing, depth_radius=nothing, bias=nothing, alpha=nothing, beta=nothing) - if tf.eager_mode + if tf.in_eager_mode() lrn_grad_eager(input_grads_, input_image_, output_image_; name=name, depth_radius=depth_radius, bias=bias, alpha=alpha, beta=beta) else lrn_grad_graph(input_grads_, input_image_, output_image_; name=name, depth_radius=depth_radius, bias=bias, alpha=alpha, beta=beta) @@ -27668,7 +27668,7 @@ begin return res[1] end function random_poisson_v2(shape_, rate_; name=nothing, seed=nothing, seed2=nothing, S=nothing, R=nothing, dtype=nothing) - if tf.eager_mode + if tf.in_eager_mode() random_poisson_v2_eager(shape_, rate_; name=name, seed=seed, seed2=seed2, S=S, R=R, dtype=dtype) else random_poisson_v2_graph(shape_, rate_; name=name, seed=seed, seed2=seed2, S=S, R=R, dtype=dtype) @@ -27728,7 +27728,7 @@ begin return res[1] end function fifo_queue(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) - if tf.eager_mode + if tf.in_eager_mode() fifo_queue_eager(; name=name, component_types=component_types, shapes=shapes, capacity=capacity, container=container, shared_name=shared_name) else fifo_queue_graph(; name=name, component_types=component_types, shapes=shapes, capacity=capacity, container=container, shared_name=shared_name) @@ -27796,7 +27796,7 @@ begin return res[1] end function resource_sparse_apply_proximal_gradient_descent(var_, alpha_, l1_, l2_, grad_, indices_; name=nothing, use_locking=nothing) - if tf.eager_mode + if tf.in_eager_mode() resource_sparse_apply_proximal_gradient_descent_eager(var_, alpha_, l1_, l2_, grad_, indices_; name=name, use_locking=use_locking) else resource_sparse_apply_proximal_gradient_descent_graph(var_, alpha_, l1_, l2_, grad_, indices_; name=name, use_locking=use_locking) @@ -27842,7 +27842,7 @@ begin return res[1] end function experimental_non_serializable_dataset(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) - if tf.eager_mode + if tf.in_eager_mode() experimental_non_serializable_dataset_eager(input_dataset_; name=name, output_types=output_types, output_shapes=output_shapes) else experimental_non_serializable_dataset_graph(input_dataset_; name=name, output_types=output_types, output_shapes=output_shapes) @@ -27892,7 +27892,7 @@ begin return res[1] end function experimental_bytes_produced_stats_dataset(input_dataset_, tag_; name=nothing, output_types=nothing, output_shapes=nothing) - if tf.eager_mode + if tf.in_eager_mode() experimental_bytes_produced_stats_dataset_eager(input_dataset_, tag_; name=name, output_types=output_types, output_shapes=output_shapes) else experimental_bytes_produced_stats_dataset_graph(input_dataset_, tag_; name=name, output_types=output_types, output_shapes=output_shapes) @@ -27956,7 +27956,7 @@ begin return res[1] end function dilation2d_backprop_filter(input_, filter_, out_backprop_; name=nothing, strides=nothing, rates=nothing, padding=nothing) - if tf.eager_mode + if tf.in_eager_mode() dilation2d_backprop_filter_eager(input_, filter_, out_backprop_; name=name, strides=strides, rates=rates, padding=padding) else dilation2d_backprop_filter_graph(input_, filter_, out_backprop_; name=name, strides=strides, rates=rates, padding=padding) @@ -28020,7 +28020,7 @@ begin return res[1] end function _if(cond_, input_; name=nothing, Tin=nothing, Tout=nothing, then_branch=nothing, else_branch=nothing) - if tf.eager_mode + if tf.in_eager_mode() _if_eager(cond_, input_; name=name, Tin=Tin, Tout=Tout, then_branch=then_branch, else_branch=else_branch) else _if_graph(cond_, input_; name=name, Tin=Tin, Tout=Tout, then_branch=then_branch, else_branch=else_branch) @@ -28062,7 +28062,7 @@ begin return res[1] end function bias_add_grad(out_backprop_; name=nothing, data_format=nothing) - if tf.eager_mode + if tf.in_eager_mode() bias_add_grad_eager(out_backprop_; name=name, data_format=data_format) else bias_add_grad_graph(out_backprop_; name=name, data_format=data_format) @@ -28096,7 +28096,7 @@ begin return res[1] end function reader_serialize_state_v2(reader_handle_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() reader_serialize_state_v2_eager(reader_handle_; name=name) else reader_serialize_state_v2_graph(reader_handle_; name=name) @@ -28130,7 +28130,7 @@ begin return res[1] end function wrap_dataset_variant(input_handle_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() wrap_dataset_variant_eager(input_handle_; name=name) else wrap_dataset_variant_graph(input_handle_; name=name) @@ -28210,7 +28210,7 @@ begin return res[1] end function parallel_interleave_dataset_v2(input_dataset_, other_arguments_, cycle_length_, block_length_, num_parallel_calls_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, sloppy=nothing) - if tf.eager_mode + if tf.in_eager_mode() parallel_interleave_dataset_v2_eager(input_dataset_, other_arguments_, cycle_length_, block_length_, num_parallel_calls_; name=name, f=f, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes, sloppy=sloppy) else parallel_interleave_dataset_v2_graph(input_dataset_, other_arguments_, cycle_length_, block_length_, num_parallel_calls_; name=name, f=f, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes, sloppy=sloppy) @@ -28279,7 +28279,7 @@ begin return res[1] end function depthwise_conv2d_native_backprop_input(input_sizes_, filter_, out_backprop_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) - if tf.eager_mode + if tf.in_eager_mode() depthwise_conv2d_native_backprop_input_eager(input_sizes_, filter_, out_backprop_; name=name, strides=strides, padding=padding, data_format=data_format, dilations=dilations) else depthwise_conv2d_native_backprop_input_graph(input_sizes_, filter_, out_backprop_; name=name, strides=strides, padding=padding, data_format=data_format, dilations=dilations) @@ -28353,7 +28353,7 @@ begin return res[1] end function resource_apply_rms_prop(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=nothing, use_locking=nothing) - if tf.eager_mode + if tf.in_eager_mode() resource_apply_rms_prop_eager(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=name, use_locking=use_locking) else resource_apply_rms_prop_graph(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=name, use_locking=use_locking) @@ -28402,7 +28402,7 @@ begin return res end function sparse_accumulator_take_gradient(handle_, num_required_; name=nothing, dtype=nothing) - if tf.eager_mode + if tf.in_eager_mode() sparse_accumulator_take_gradient_eager(handle_, num_required_; name=name, dtype=dtype) else sparse_accumulator_take_gradient_graph(handle_, num_required_; name=name, dtype=dtype) @@ -28448,7 +28448,7 @@ begin return res[1] end function experimental_lmdb_dataset(filenames_; name=nothing, output_types=nothing, output_shapes=nothing) - if tf.eager_mode + if tf.in_eager_mode() experimental_lmdb_dataset_eager(filenames_; name=name, output_types=output_types, output_shapes=output_shapes) else experimental_lmdb_dataset_graph(filenames_; name=name, output_types=output_types, output_shapes=output_shapes) @@ -28482,7 +28482,7 @@ begin return res[1] end function stack_close_v2(handle_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() stack_close_v2_eager(handle_; name=name) else stack_close_v2_graph(handle_; name=name) @@ -28542,7 +28542,7 @@ begin return res[1] end function map_size(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) - if tf.eager_mode + if tf.in_eager_mode() map_size_eager(; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) else map_size_graph(; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) @@ -28615,7 +28615,7 @@ begin return res[1] end function resource_apply_adagrad_da(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, lr_, l1_, l2_, global_step_; name=nothing, use_locking=nothing) - if tf.eager_mode + if tf.in_eager_mode() resource_apply_adagrad_da_eager(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, lr_, l1_, l2_, global_step_; name=name, use_locking=use_locking) else resource_apply_adagrad_da_graph(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, lr_, l1_, l2_, global_step_; name=name, use_locking=use_locking) @@ -28649,7 +28649,7 @@ begin return res[1] end function tensor_forest_tree_size(tree_handle_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() tensor_forest_tree_size_eager(tree_handle_; name=name) else tensor_forest_tree_size_graph(tree_handle_; name=name) @@ -28685,7 +28685,7 @@ begin return res[1] end function matrix_diag_part(input_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() matrix_diag_part_eager(input_; name=name) else matrix_diag_part_graph(input_; name=name) @@ -28719,7 +28719,7 @@ begin return res[1] end function reader_num_work_units_completed_v2(reader_handle_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() reader_num_work_units_completed_v2_eager(reader_handle_; name=name) else reader_num_work_units_completed_v2_graph(reader_handle_; name=name) @@ -28767,7 +28767,7 @@ begin return res[1] end function tensor_array_split_v3(handle_, value_, lengths_, flow_in_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() tensor_array_split_v3_eager(handle_, value_, lengths_, flow_in_; name=name) else tensor_array_split_v3_graph(handle_, value_, lengths_, flow_in_; name=name) @@ -28827,7 +28827,7 @@ begin return res[1] end function sparse_to_dense(sparse_indices_, output_shape_, sparse_values_, default_value_; name=nothing, validate_indices=nothing) - if tf.eager_mode + if tf.in_eager_mode() sparse_to_dense_eager(sparse_indices_, output_shape_, sparse_values_, default_value_; name=name, validate_indices=validate_indices) else sparse_to_dense_graph(sparse_indices_, output_shape_, sparse_values_, default_value_; name=name, validate_indices=validate_indices) @@ -28869,7 +28869,7 @@ begin return res[1] end function tpu_replicated_input(inputs_; name=nothing, N=nothing) - if tf.eager_mode + if tf.in_eager_mode() tpu_replicated_input_eager(inputs_; name=name, N=N) else tpu_replicated_input_graph(inputs_; name=name, N=N) @@ -28903,7 +28903,7 @@ begin return res[1] end function stack_close(handle_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() stack_close_eager(handle_; name=name) else stack_close_graph(handle_; name=name) @@ -28948,7 +28948,7 @@ begin return res end function deserialize_many_sparse(serialized_sparse_; name=nothing, dtype=nothing) - if tf.eager_mode + if tf.in_eager_mode() deserialize_many_sparse_eager(serialized_sparse_; name=name, dtype=dtype) else deserialize_many_sparse_graph(serialized_sparse_; name=name, dtype=dtype) @@ -29002,7 +29002,7 @@ begin return res[1] end function _nccl_reduce_recv(input_; name=nothing, reduction=nothing, num_devices=nothing, shared_name=nothing) - if tf.eager_mode + if tf.in_eager_mode() _nccl_reduce_recv_eager(input_; name=name, reduction=reduction, num_devices=num_devices, shared_name=shared_name) else _nccl_reduce_recv_graph(input_; name=name, reduction=reduction, num_devices=num_devices, shared_name=shared_name) @@ -29050,7 +29050,7 @@ begin return res[1] end function mirror_pad_grad(input_, paddings_; name=nothing, mode=nothing) - if tf.eager_mode + if tf.in_eager_mode() mirror_pad_grad_eager(input_, paddings_; name=name, mode=mode) else mirror_pad_grad_graph(input_, paddings_; name=name, mode=mode) @@ -29091,7 +29091,7 @@ begin return res[1] end function broadcast_args(s0_, s1_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() broadcast_args_eager(s0_, s1_; name=name) else broadcast_args_graph(s0_, s1_; name=name) @@ -29139,7 +29139,7 @@ begin return res[1] end function stateless_truncated_normal(shape_, seed_; name=nothing, dtype=nothing) - if tf.eager_mode + if tf.in_eager_mode() stateless_truncated_normal_eager(shape_, seed_; name=name, dtype=dtype) else stateless_truncated_normal_graph(shape_, seed_; name=name, dtype=dtype) @@ -29177,7 +29177,7 @@ begin return res[1] end function regex_full_match(input_, pattern_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() regex_full_match_eager(input_, pattern_; name=name) else regex_full_match_graph(input_, pattern_; name=name) @@ -29211,7 +29211,7 @@ begin return res[1] end function unwrap_dataset_variant(input_handle_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() unwrap_dataset_variant_eager(input_handle_; name=name) else unwrap_dataset_variant_graph(input_handle_; name=name) @@ -29257,7 +29257,7 @@ begin return res[1] end function empty(shape_; name=nothing, dtype=nothing, init=nothing) - if tf.eager_mode + if tf.in_eager_mode() empty_eager(shape_; name=name, dtype=dtype, init=init) else empty_graph(shape_; name=name, dtype=dtype, init=init) @@ -29305,7 +29305,7 @@ begin return res[1] end function outfeed_dequeue_tuple(; name=nothing, dtypes=nothing, shapes=nothing, device_ordinal=nothing) - if tf.eager_mode + if tf.in_eager_mode() outfeed_dequeue_tuple_eager(; name=name, dtypes=dtypes, shapes=shapes, device_ordinal=device_ordinal) else outfeed_dequeue_tuple_graph(; name=name, dtypes=dtypes, shapes=shapes, device_ordinal=device_ordinal) @@ -29346,7 +29346,7 @@ begin return res[1] end function div(x_, y_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() div_eager(x_, y_; name=name) else div_graph(x_, y_; name=name) @@ -29406,7 +29406,7 @@ begin return res[1] end function barrier(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) - if tf.eager_mode + if tf.in_eager_mode() barrier_eager(; name=name, component_types=component_types, shapes=shapes, capacity=capacity, container=container, shared_name=shared_name) else barrier_graph(; name=name, component_types=component_types, shapes=shapes, capacity=capacity, container=container, shared_name=shared_name) @@ -29447,7 +29447,7 @@ begin return res[1] end function truncate_div(x_, y_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() truncate_div_eager(x_, y_; name=name) else truncate_div_graph(x_, y_; name=name) @@ -29503,7 +29503,7 @@ begin return res[1] end function unicode_encode(input_values_, input_splits_; name=nothing, errors=nothing, output_encoding=nothing, replacement_char=nothing) - if tf.eager_mode + if tf.in_eager_mode() unicode_encode_eager(input_values_, input_splits_; name=name, errors=errors, output_encoding=output_encoding, replacement_char=replacement_char) else unicode_encode_graph(input_values_, input_splits_; name=name, errors=errors, output_encoding=output_encoding, replacement_char=replacement_char) @@ -29543,7 +29543,7 @@ begin return res[1] end function merge_summary(inputs_; name=nothing, N=nothing) - if tf.eager_mode + if tf.in_eager_mode() merge_summary_eager(inputs_; name=name, N=N) else merge_summary_graph(inputs_; name=name, N=N) @@ -29577,7 +29577,7 @@ begin return res[1] end function fake_queue(resource_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() fake_queue_eager(resource_; name=name) else fake_queue_graph(resource_; name=name) @@ -29613,7 +29613,7 @@ begin return res[1] end function batch_cholesky(input_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() batch_cholesky_eager(input_; name=name) else batch_cholesky_graph(input_; name=name) @@ -29667,7 +29667,7 @@ begin return res[1] end function iterator(; name=nothing, shared_name=nothing, container=nothing, output_types=nothing, output_shapes=nothing) - if tf.eager_mode + if tf.in_eager_mode() iterator_eager(; name=name, shared_name=shared_name, container=container, output_types=output_types, output_shapes=output_shapes) else iterator_graph(; name=name, shared_name=shared_name, container=container, output_types=output_types, output_shapes=output_shapes) @@ -29703,7 +29703,7 @@ begin return res[1] end function bessel_i1e(x_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() bessel_i1e_eager(x_; name=name) else bessel_i1e_graph(x_; name=name) @@ -29741,7 +29741,7 @@ begin return res[1] end function import_event(writer_, event_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() import_event_eager(writer_, event_; name=name) else import_event_graph(writer_, event_; name=name) @@ -29820,7 +29820,7 @@ begin return res end function quantized_instance_norm(x_, x_min_, x_max_; name=nothing, output_range_given=nothing, given_y_min=nothing, given_y_max=nothing, variance_epsilon=nothing, min_separation=nothing) - if tf.eager_mode + if tf.in_eager_mode() quantized_instance_norm_eager(x_, x_min_, x_max_; name=name, output_range_given=output_range_given, given_y_min=given_y_min, given_y_max=given_y_max, variance_epsilon=variance_epsilon, min_separation=min_separation) else quantized_instance_norm_graph(x_, x_min_, x_max_; name=name, output_range_given=output_range_given, given_y_min=given_y_min, given_y_max=given_y_max, variance_epsilon=variance_epsilon, min_separation=min_separation) @@ -29882,7 +29882,7 @@ begin return res[1] end function load_tpu_embedding_adagrad_parameters(parameters_, accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - if tf.eager_mode + if tf.in_eager_mode() load_tpu_embedding_adagrad_parameters_eager(parameters_, accumulators_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) else load_tpu_embedding_adagrad_parameters_graph(parameters_, accumulators_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) @@ -29930,7 +29930,7 @@ begin return res[1] end function tensor_array_write_v3(handle_, index_, value_, flow_in_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() tensor_array_write_v3_eager(handle_, index_, value_, flow_in_; name=name) else tensor_array_write_v3_graph(handle_, index_, value_, flow_in_; name=name) @@ -29988,7 +29988,7 @@ begin return res end function dense_to_dense_set_operation(set1_, set2_; name=nothing, set_operation=nothing, validate_indices=nothing) - if tf.eager_mode + if tf.in_eager_mode() dense_to_dense_set_operation_eager(set1_, set2_; name=name, set_operation=set_operation, validate_indices=validate_indices) else dense_to_dense_set_operation_graph(set1_, set2_; name=name, set_operation=set_operation, validate_indices=validate_indices) @@ -30076,7 +30076,7 @@ begin return res[1] end function encode_jpeg(image_; name=nothing, format=nothing, quality=nothing, progressive=nothing, optimize_size=nothing, chroma_downsampling=nothing, density_unit=nothing, x_density=nothing, y_density=nothing, xmp_metadata=nothing) - if tf.eager_mode + if tf.in_eager_mode() encode_jpeg_eager(image_; name=name, format=format, quality=quality, progressive=progressive, optimize_size=optimize_size, chroma_downsampling=chroma_downsampling, density_unit=density_unit, x_density=x_density, y_density=y_density, xmp_metadata=xmp_metadata) else encode_jpeg_graph(image_; name=name, format=format, quality=quality, progressive=progressive, optimize_size=optimize_size, chroma_downsampling=chroma_downsampling, density_unit=density_unit, x_density=x_density, y_density=y_density, xmp_metadata=xmp_metadata) @@ -30121,7 +30121,7 @@ begin return res[1] end function inplace_update(x_, i_, v_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() inplace_update_eager(x_, i_, v_; name=name) else inplace_update_graph(x_, i_, v_; name=name) @@ -30184,7 +30184,7 @@ begin return res[1] end function fused_pad_conv2d(input_, paddings_, filter_; name=nothing, mode=nothing, strides=nothing, padding=nothing) - if tf.eager_mode + if tf.in_eager_mode() fused_pad_conv2d_eager(input_, paddings_, filter_; name=name, mode=mode, strides=strides, padding=padding) else fused_pad_conv2d_graph(input_, paddings_, filter_; name=name, mode=mode, strides=strides, padding=padding) @@ -30239,7 +30239,7 @@ begin return res end function quantized_relu(features_, min_features_, max_features_; name=nothing, out_type=nothing) - if tf.eager_mode + if tf.in_eager_mode() quantized_relu_eager(features_, min_features_, max_features_; name=name, out_type=out_type) else quantized_relu_graph(features_, min_features_, max_features_; name=name, out_type=out_type) @@ -30282,7 +30282,7 @@ begin return res[1] end function gather_nd(params_, indices_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() gather_nd_eager(params_, indices_; name=name) else gather_nd_graph(params_, indices_; name=name) @@ -30324,7 +30324,7 @@ begin return res[1] end function placeholder(; name=nothing, dtype=nothing, shape=nothing) - if tf.eager_mode + if tf.in_eager_mode() placeholder_eager(; name=name, dtype=dtype, shape=shape) else placeholder_graph(; name=name, dtype=dtype, shape=shape) @@ -30370,7 +30370,7 @@ begin return res[1] end function filter_by_last_component_dataset(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) - if tf.eager_mode + if tf.in_eager_mode() filter_by_last_component_dataset_eager(input_dataset_; name=name, output_types=output_types, output_shapes=output_shapes) else filter_by_last_component_dataset_graph(input_dataset_; name=name, output_types=output_types, output_shapes=output_shapes) @@ -30416,7 +30416,7 @@ begin return res[1] end function clip_by_value(t_, clip_value_min_, clip_value_max_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() clip_by_value_eager(t_, clip_value_min_, clip_value_max_; name=name) else clip_by_value_graph(t_, clip_value_min_, clip_value_max_; name=name) @@ -30468,7 +30468,7 @@ begin return res[1] end function image_summary(tag_, tensor_; name=nothing, max_images=nothing, bad_color=nothing) - if tf.eager_mode + if tf.in_eager_mode() image_summary_eager(tag_, tensor_; name=name, max_images=max_images, bad_color=bad_color) else image_summary_graph(tag_, tensor_; name=name, max_images=max_images, bad_color=bad_color) @@ -30527,7 +30527,7 @@ begin return res end function retrieve_tpu_embedding_adadelta_parameters(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - if tf.eager_mode + if tf.in_eager_mode() retrieve_tpu_embedding_adadelta_parameters_eager(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) else retrieve_tpu_embedding_adadelta_parameters_graph(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) @@ -30573,7 +30573,7 @@ begin return res[1] end function string_join(inputs_; name=nothing, N=nothing, separator=nothing) - if tf.eager_mode + if tf.in_eager_mode() string_join_eager(inputs_; name=name, N=N, separator=separator) else string_join_graph(inputs_; name=name, N=N, separator=separator) @@ -30626,7 +30626,7 @@ begin return res[1] end function resource_scatter_nd_add(ref_, indices_, updates_; name=nothing, use_locking=nothing) - if tf.eager_mode + if tf.in_eager_mode() resource_scatter_nd_add_eager(ref_, indices_, updates_; name=name, use_locking=use_locking) else resource_scatter_nd_add_graph(ref_, indices_, updates_; name=name, use_locking=use_locking) @@ -30670,7 +30670,7 @@ begin return res[1] end function boosted_trees_quantile_stream_resource_deserialize(quantile_stream_resource_handle_, bucket_boundaries_; name=nothing, num_streams=nothing) - if tf.eager_mode + if tf.in_eager_mode() boosted_trees_quantile_stream_resource_deserialize_eager(quantile_stream_resource_handle_, bucket_boundaries_; name=name, num_streams=num_streams) else boosted_trees_quantile_stream_resource_deserialize_graph(quantile_stream_resource_handle_, bucket_boundaries_; name=name, num_streams=num_streams) @@ -30711,7 +30711,7 @@ begin return res[1] end function left_shift(x_, y_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() left_shift_eager(x_, y_; name=name) else left_shift_graph(x_, y_; name=name) @@ -30774,7 +30774,7 @@ begin return res end function requantize_per_channel(input_, input_min_, input_max_, requested_output_min_, requested_output_max_; name=nothing, out_type=nothing) - if tf.eager_mode + if tf.in_eager_mode() requantize_per_channel_eager(input_, input_min_, input_max_, requested_output_min_, requested_output_max_; name=name, out_type=out_type) else requantize_per_channel_graph(input_, input_min_, input_max_, requested_output_min_, requested_output_max_; name=name, out_type=out_type) @@ -30822,7 +30822,7 @@ begin return res[1] end function tensor_scatter_add(tensor_, indices_, updates_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() tensor_scatter_add_eager(tensor_, indices_, updates_; name=name) else tensor_scatter_add_graph(tensor_, indices_, updates_; name=name) @@ -30887,7 +30887,7 @@ begin return res end function _var_handles_op(; name=nothing, containers=nothing, shared_names=nothing, N=nothing, dtypes=nothing, shapes=nothing) - if tf.eager_mode + if tf.in_eager_mode() _var_handles_op_eager(; name=name, containers=containers, shared_names=shared_names, N=N, dtypes=dtypes, shapes=shapes) else _var_handles_op_graph(; name=name, containers=containers, shared_names=shared_names, N=N, dtypes=dtypes, shapes=shapes) @@ -30923,7 +30923,7 @@ begin return res[1] end function ifft3d(input_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() ifft3d_eager(input_; name=name) else ifft3d_graph(input_; name=name) @@ -30972,7 +30972,7 @@ begin return res[1] end function euclidean_norm(input_, reduction_indices_; name=nothing, keep_dims=nothing) - if tf.eager_mode + if tf.in_eager_mode() euclidean_norm_eager(input_, reduction_indices_; name=name, keep_dims=keep_dims) else euclidean_norm_graph(input_, reduction_indices_; name=name, keep_dims=keep_dims) @@ -31018,7 +31018,7 @@ begin return res[1] end function ref_select(index_, inputs_; name=nothing, N=nothing) - if tf.eager_mode + if tf.in_eager_mode() ref_select_eager(index_, inputs_; name=name, N=N) else ref_select_graph(index_, inputs_; name=name, N=N) @@ -31062,7 +31062,7 @@ begin return res[1] end function sparse_tensor_slice_dataset(indices_, values_, dense_shape_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() sparse_tensor_slice_dataset_eager(indices_, values_, dense_shape_; name=name) else sparse_tensor_slice_dataset_graph(indices_, values_, dense_shape_; name=name) @@ -31121,7 +31121,7 @@ begin return res end function retrieve_tpu_embedding_ftrl_parameters_grad_accum_debug(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - if tf.eager_mode + if tf.in_eager_mode() retrieve_tpu_embedding_ftrl_parameters_grad_accum_debug_eager(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) else retrieve_tpu_embedding_ftrl_parameters_grad_accum_debug_graph(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) @@ -31155,7 +31155,7 @@ begin return res[1] end function batch_ifft2d(input_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() batch_ifft2d_eager(input_; name=name) else batch_ifft2d_graph(input_; name=name) @@ -31209,7 +31209,7 @@ begin return res[1] end function tensor_array_gather(handle_, indices_, flow_in_; name=nothing, dtype=nothing, element_shape=nothing) - if tf.eager_mode + if tf.in_eager_mode() tensor_array_gather_eager(handle_, indices_, flow_in_; name=name, dtype=dtype, element_shape=element_shape) else tensor_array_gather_graph(handle_, indices_, flow_in_; name=name, dtype=dtype, element_shape=element_shape) @@ -31262,7 +31262,7 @@ begin return res[1] end function sparse_segment_mean_with_num_segments(data_, indices_, segment_ids_, num_segments_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() sparse_segment_mean_with_num_segments_eager(data_, indices_, segment_ids_, num_segments_; name=name) else sparse_segment_mean_with_num_segments_graph(data_, indices_, segment_ids_, num_segments_; name=name) @@ -31304,7 +31304,7 @@ begin return res[1] end function ensure_shape(input_; name=nothing, shape=nothing) - if tf.eager_mode + if tf.in_eager_mode() ensure_shape_eager(input_; name=name, shape=shape) else ensure_shape_graph(input_; name=name, shape=shape) @@ -31366,7 +31366,7 @@ begin return res[1] end function apply_proximal_gradient_descent(var_, alpha_, l1_, l2_, delta_; name=nothing, use_locking=nothing) - if tf.eager_mode + if tf.in_eager_mode() apply_proximal_gradient_descent_eager(var_, alpha_, l1_, l2_, delta_; name=name, use_locking=use_locking) else apply_proximal_gradient_descent_graph(var_, alpha_, l1_, l2_, delta_; name=name, use_locking=use_locking) @@ -31444,7 +31444,7 @@ begin return res[1] end function collective_reduce(input_; name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, merge_op=nothing, final_op=nothing, subdiv_offsets=nothing, wait_for=nothing) - if tf.eager_mode + if tf.in_eager_mode() collective_reduce_eager(input_; name=name, group_size=group_size, group_key=group_key, instance_key=instance_key, merge_op=merge_op, final_op=final_op, subdiv_offsets=subdiv_offsets, wait_for=wait_for) else collective_reduce_graph(input_; name=name, group_size=group_size, group_key=group_key, instance_key=instance_key, merge_op=merge_op, final_op=final_op, subdiv_offsets=subdiv_offsets, wait_for=wait_for) @@ -31480,7 +31480,7 @@ begin return res[1] end function is_nan(x_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() is_nan_eager(x_; name=name) else is_nan_graph(x_; name=name) @@ -31562,7 +31562,7 @@ begin return res[1] end function apply_ada_max(var_, m_, v_, beta1_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing) - if tf.eager_mode + if tf.in_eager_mode() apply_ada_max_eager(var_, m_, v_, beta1_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=name, use_locking=use_locking) else apply_ada_max_graph(var_, m_, v_, beta1_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=name, use_locking=use_locking) @@ -31636,7 +31636,7 @@ begin return res[1] end function decode_and_crop_jpeg(contents_, crop_window_; name=nothing, channels=nothing, ratio=nothing, fancy_upscaling=nothing, try_recover_truncated=nothing, acceptable_fraction=nothing, dct_method=nothing) - if tf.eager_mode + if tf.in_eager_mode() decode_and_crop_jpeg_eager(contents_, crop_window_; name=name, channels=channels, ratio=ratio, fancy_upscaling=fancy_upscaling, try_recover_truncated=try_recover_truncated, acceptable_fraction=acceptable_fraction, dct_method=dct_method) else decode_and_crop_jpeg_graph(contents_, crop_window_; name=name, channels=channels, ratio=ratio, fancy_upscaling=fancy_upscaling, try_recover_truncated=try_recover_truncated, acceptable_fraction=acceptable_fraction, dct_method=dct_method) @@ -31718,7 +31718,7 @@ begin return res[1] end function apply_centered_rms_prop(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=nothing, use_locking=nothing) - if tf.eager_mode + if tf.in_eager_mode() apply_centered_rms_prop_eager(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=name, use_locking=use_locking) else apply_centered_rms_prop_graph(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=name, use_locking=use_locking) @@ -31787,7 +31787,7 @@ begin return res[1] end function conv3d_backprop_filter_v2(input_, filter_sizes_, out_backprop_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) - if tf.eager_mode + if tf.in_eager_mode() conv3d_backprop_filter_v2_eager(input_, filter_sizes_, out_backprop_; name=name, strides=strides, padding=padding, data_format=data_format, dilations=dilations) else conv3d_backprop_filter_v2_graph(input_, filter_sizes_, out_backprop_; name=name, strides=strides, padding=padding, data_format=data_format, dilations=dilations) @@ -31840,7 +31840,7 @@ begin return res[1] end function matrix_triangular_solve(matrix_, rhs_; name=nothing, lower=nothing, adjoint=nothing) - if tf.eager_mode + if tf.in_eager_mode() matrix_triangular_solve_eager(matrix_, rhs_; name=name, lower=lower, adjoint=adjoint) else matrix_triangular_solve_graph(matrix_, rhs_; name=name, lower=lower, adjoint=adjoint) @@ -31874,7 +31874,7 @@ begin return res[1] end function reader_num_work_units_completed(reader_handle_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() reader_num_work_units_completed_eager(reader_handle_; name=name) else reader_num_work_units_completed_graph(reader_handle_; name=name) @@ -31930,7 +31930,7 @@ begin return res[1] end function write_audio_summary(writer_, step_, tag_, tensor_, sample_rate_; name=nothing, max_outputs=nothing) - if tf.eager_mode + if tf.in_eager_mode() write_audio_summary_eager(writer_, step_, tag_, tensor_, sample_rate_; name=name, max_outputs=max_outputs) else write_audio_summary_graph(writer_, step_, tag_, tensor_, sample_rate_; name=name, max_outputs=max_outputs) @@ -31968,7 +31968,7 @@ begin return res[1] end function sharded_filespec(basename_, num_shards_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() sharded_filespec_eager(basename_, num_shards_; name=name) else sharded_filespec_graph(basename_, num_shards_; name=name) @@ -32009,7 +32009,7 @@ begin return res[1] end function div_no_nan(x_, y_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() div_no_nan_eager(x_, y_; name=name) else div_no_nan_graph(x_, y_; name=name) @@ -32073,7 +32073,7 @@ begin return res[1] end function sparse_accumulator_apply_gradient(handle_, local_step_, gradient_indices_, gradient_values_, gradient_shape_; name=nothing, dtype=nothing, has_known_shape=nothing) - if tf.eager_mode + if tf.in_eager_mode() sparse_accumulator_apply_gradient_eager(handle_, local_step_, gradient_indices_, gradient_values_, gradient_shape_; name=name, dtype=dtype, has_known_shape=has_known_shape) else sparse_accumulator_apply_gradient_graph(handle_, local_step_, gradient_indices_, gradient_values_, gradient_shape_; name=name, dtype=dtype, has_known_shape=has_known_shape) @@ -32124,7 +32124,7 @@ begin return res end function ragged_tensor_to_sparse(rt_nested_splits_, rt_dense_values_; name=nothing, RAGGED_RANK=nothing) - if tf.eager_mode + if tf.in_eager_mode() ragged_tensor_to_sparse_eager(rt_nested_splits_, rt_dense_values_; name=name, RAGGED_RANK=RAGGED_RANK) else ragged_tensor_to_sparse_graph(rt_nested_splits_, rt_dense_values_; name=name, RAGGED_RANK=RAGGED_RANK) @@ -32178,7 +32178,7 @@ begin return res[1] end function extract_volume_patches(input_; name=nothing, ksizes=nothing, strides=nothing, padding=nothing) - if tf.eager_mode + if tf.in_eager_mode() extract_volume_patches_eager(input_; name=name, ksizes=ksizes, strides=strides, padding=padding) else extract_volume_patches_graph(input_; name=name, ksizes=ksizes, strides=strides, padding=padding) @@ -32234,7 +32234,7 @@ begin return res[1] end function barrier_insert_many(handle_, keys_, values_; name=nothing, component_index=nothing) - if tf.eager_mode + if tf.in_eager_mode() barrier_insert_many_eager(handle_, keys_, values_; name=name, component_index=component_index) else barrier_insert_many_graph(handle_, keys_, values_; name=name, component_index=component_index) @@ -32276,7 +32276,7 @@ begin return res[1] end function const_(; name=nothing, value=nothing, dtype=nothing) - if tf.eager_mode + if tf.in_eager_mode() const__eager(; name=name, value=value, dtype=dtype) else const__graph(; name=name, value=value, dtype=dtype) @@ -32324,7 +32324,7 @@ begin return res[1] end function space_to_batch(input_, paddings_; name=nothing, block_size=nothing) - if tf.eager_mode + if tf.in_eager_mode() space_to_batch_eager(input_, paddings_; name=name, block_size=block_size) else space_to_batch_graph(input_, paddings_; name=name, block_size=block_size) @@ -32384,7 +32384,7 @@ begin return res[1] end function stage_size(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) - if tf.eager_mode + if tf.in_eager_mode() stage_size_eager(; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) else stage_size_graph(; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) @@ -32436,7 +32436,7 @@ begin return res[1] end function empty_tensor_list(element_shape_, max_num_elements_; name=nothing, element_dtype=nothing, shape_type=nothing) - if tf.eager_mode + if tf.in_eager_mode() empty_tensor_list_eager(element_shape_, max_num_elements_; name=name, element_dtype=element_dtype, shape_type=shape_type) else empty_tensor_list_graph(element_shape_, max_num_elements_; name=name, element_dtype=element_dtype, shape_type=shape_type) @@ -32531,7 +32531,7 @@ begin return res end function quantized_conv2d_and_requantize(input_, filter_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) - if tf.eager_mode + if tf.in_eager_mode() quantized_conv2d_and_requantize_eager(input_, filter_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_; name=name, out_type=out_type, strides=strides, padding=padding, dilations=dilations) else quantized_conv2d_and_requantize_graph(input_, filter_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_; name=name, out_type=out_type, strides=strides, padding=padding, dilations=dilations) @@ -32578,7 +32578,7 @@ begin return res end function lu(input_; name=nothing, output_idx_type=nothing) - if tf.eager_mode + if tf.in_eager_mode() lu_eager(input_; name=name, output_idx_type=output_idx_type) else lu_graph(input_; name=name, output_idx_type=output_idx_type) @@ -32618,7 +32618,7 @@ begin return res[1] end function decode_compressed(bytes_; name=nothing, compression_type=nothing) - if tf.eager_mode + if tf.in_eager_mode() decode_compressed_eager(bytes_; name=name, compression_type=compression_type) else decode_compressed_graph(bytes_; name=name, compression_type=compression_type) @@ -32658,7 +32658,7 @@ begin return res[1] end function get_session_tensor(handle_; name=nothing, dtype=nothing) - if tf.eager_mode + if tf.in_eager_mode() get_session_tensor_eager(handle_; name=name, dtype=dtype) else get_session_tensor_graph(handle_; name=name, dtype=dtype) @@ -32712,7 +32712,7 @@ begin return res[1] end function tensor_array_gather_v3(handle_, indices_, flow_in_; name=nothing, dtype=nothing, element_shape=nothing) - if tf.eager_mode + if tf.in_eager_mode() tensor_array_gather_v3_eager(handle_, indices_, flow_in_; name=name, dtype=dtype, element_shape=element_shape) else tensor_array_gather_v3_graph(handle_, indices_, flow_in_; name=name, dtype=dtype, element_shape=element_shape) @@ -32782,7 +32782,7 @@ begin return res[1] end function load_tpu_embedding_ftrl_parameters_grad_accum_debug(parameters_, accumulators_, linears_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - if tf.eager_mode + if tf.in_eager_mode() load_tpu_embedding_ftrl_parameters_grad_accum_debug_eager(parameters_, accumulators_, linears_, gradient_accumulators_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) else load_tpu_embedding_ftrl_parameters_grad_accum_debug_graph(parameters_, accumulators_, linears_, gradient_accumulators_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) @@ -32822,7 +32822,7 @@ begin return res[1] end function destroy_resource_op(resource_; name=nothing, ignore_lookup_error=nothing) - if tf.eager_mode + if tf.in_eager_mode() destroy_resource_op_eager(resource_; name=name, ignore_lookup_error=ignore_lookup_error) else destroy_resource_op_graph(resource_; name=name, ignore_lookup_error=ignore_lookup_error) @@ -32870,7 +32870,7 @@ begin return res[1] end function text_line_reader(; name=nothing, skip_header_lines=nothing, container=nothing, shared_name=nothing) - if tf.eager_mode + if tf.in_eager_mode() text_line_reader_eager(; name=name, skip_header_lines=skip_header_lines, container=container, shared_name=shared_name) else text_line_reader_graph(; name=name, skip_header_lines=skip_header_lines, container=container, shared_name=shared_name) @@ -32920,7 +32920,7 @@ begin return res[1] end function create_summary_db_writer(writer_, db_uri_, experiment_name_, run_name_, user_name_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() create_summary_db_writer_eager(writer_, db_uri_, experiment_name_, run_name_, user_name_; name=name) else create_summary_db_writer_graph(writer_, db_uri_, experiment_name_, run_name_, user_name_; name=name) @@ -32961,7 +32961,7 @@ begin return res[1] end function tanh_grad(y_, dy_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() tanh_grad_eager(y_, dy_; name=name) else tanh_grad_graph(y_, dy_; name=name) @@ -32995,7 +32995,7 @@ begin return res[1] end function decode_base64(input_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() decode_base64_eager(input_; name=name) else decode_base64_graph(input_; name=name) @@ -33061,7 +33061,7 @@ begin return res[1] end function max_pool_grad_grad_v2(orig_input_, orig_output_, grad_, ksize_, strides_; name=nothing, padding=nothing, data_format=nothing) - if tf.eager_mode + if tf.in_eager_mode() max_pool_grad_grad_v2_eager(orig_input_, orig_output_, grad_, ksize_, strides_; name=name, padding=padding, data_format=data_format) else max_pool_grad_grad_v2_graph(orig_input_, orig_output_, grad_, ksize_, strides_; name=name, padding=padding, data_format=data_format) @@ -33109,7 +33109,7 @@ begin return res[1] end function audio_summary_v2(tag_, tensor_, sample_rate_; name=nothing, max_outputs=nothing) - if tf.eager_mode + if tf.in_eager_mode() audio_summary_v2_eager(tag_, tensor_, sample_rate_; name=name, max_outputs=max_outputs) else audio_summary_v2_graph(tag_, tensor_, sample_rate_; name=name, max_outputs=max_outputs) @@ -33179,7 +33179,7 @@ begin return res[1] end function stateful_partitioned_call(args_; name=nothing, Tin=nothing, Tout=nothing, f=nothing, config=nothing, config_proto=nothing, executor_type=nothing) - if tf.eager_mode + if tf.in_eager_mode() stateful_partitioned_call_eager(args_; name=name, Tin=Tin, Tout=Tout, f=f, config=config, config_proto=config_proto, executor_type=executor_type) else stateful_partitioned_call_graph(args_; name=name, Tin=Tin, Tout=Tout, f=f, config=config, config_proto=config_proto, executor_type=executor_type) @@ -33250,7 +33250,7 @@ begin return res[1] end function _scoped_allocator_concat(backing_, inputs_; name=nothing, shape=nothing, reshape=nothing, sa_name=nothing, id=nothing, N=nothing) - if tf.eager_mode + if tf.in_eager_mode() _scoped_allocator_concat_eager(backing_, inputs_; name=name, shape=shape, reshape=reshape, sa_name=sa_name, id=id, N=N) else _scoped_allocator_concat_graph(backing_, inputs_; name=name, shape=shape, reshape=reshape, sa_name=sa_name, id=id, N=N) @@ -33312,7 +33312,7 @@ begin return res[1] end function fake_quant_with_min_max_args_gradient(gradients_, inputs_; name=nothing, min=nothing, max=nothing, num_bits=nothing, narrow_range=nothing) - if tf.eager_mode + if tf.in_eager_mode() fake_quant_with_min_max_args_gradient_eager(gradients_, inputs_; name=name, min=min, max=max, num_bits=num_bits, narrow_range=narrow_range) else fake_quant_with_min_max_args_gradient_graph(gradients_, inputs_; name=name, min=min, max=max, num_bits=num_bits, narrow_range=narrow_range) @@ -33365,7 +33365,7 @@ begin return res end function batch_svd(input_; name=nothing, compute_uv=nothing, full_matrices=nothing) - if tf.eager_mode + if tf.in_eager_mode() batch_svd_eager(input_; name=name, compute_uv=compute_uv, full_matrices=full_matrices) else batch_svd_graph(input_; name=name, compute_uv=compute_uv, full_matrices=full_matrices) @@ -33443,7 +33443,7 @@ begin return res[1] end function map_stage(key_, indices_, values_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, fake_dtypes=nothing, container=nothing, shared_name=nothing) - if tf.eager_mode + if tf.in_eager_mode() map_stage_eager(key_, indices_, values_; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, fake_dtypes=fake_dtypes, container=container, shared_name=shared_name) else map_stage_graph(key_, indices_, values_; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, fake_dtypes=fake_dtypes, container=container, shared_name=shared_name) @@ -33524,7 +33524,7 @@ begin return res[1] end function resource_sparse_apply_ftrl(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, lr_power_; name=nothing, use_locking=nothing) - if tf.eager_mode + if tf.in_eager_mode() resource_sparse_apply_ftrl_eager(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, lr_power_; name=name, use_locking=use_locking) else resource_sparse_apply_ftrl_graph(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, lr_power_; name=name, use_locking=use_locking) @@ -33570,7 +33570,7 @@ begin return res[1] end function resize_nearest_neighbor(images_, size_; name=nothing, align_corners=nothing) - if tf.eager_mode + if tf.in_eager_mode() resize_nearest_neighbor_eager(images_, size_; name=name, align_corners=align_corners) else resize_nearest_neighbor_graph(images_, size_; name=name, align_corners=align_corners) @@ -33648,7 +33648,7 @@ begin return res[1] end function experimental_csv_dataset(filenames_, compression_type_, buffer_size_, header_, field_delim_, use_quote_delim_, na_value_, select_cols_, record_defaults_; name=nothing, output_types=nothing, output_shapes=nothing) - if tf.eager_mode + if tf.in_eager_mode() experimental_csv_dataset_eager(filenames_, compression_type_, buffer_size_, header_, field_delim_, use_quote_delim_, na_value_, select_cols_, record_defaults_; name=name, output_types=output_types, output_shapes=output_shapes) else experimental_csv_dataset_graph(filenames_, compression_type_, buffer_size_, header_, field_delim_, use_quote_delim_, na_value_, select_cols_, record_defaults_; name=name, output_types=output_types, output_shapes=output_shapes) @@ -33702,7 +33702,7 @@ begin return res end function _mkl_mul(x_, y_, mkl_x_, mkl_y_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() _mkl_mul_eager(x_, y_, mkl_x_, mkl_y_; name=name) else _mkl_mul_graph(x_, y_, mkl_x_, mkl_y_; name=name) @@ -33738,7 +33738,7 @@ begin return res[1] end function batch_matrix_diag(diagonal_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() batch_matrix_diag_eager(diagonal_; name=name) else batch_matrix_diag_graph(diagonal_; name=name) @@ -33774,7 +33774,7 @@ begin return res[1] end function is_inf(x_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() is_inf_eager(x_; name=name) else is_inf_graph(x_; name=name) @@ -33885,7 +33885,7 @@ begin return res end function fixed_unigram_candidate_sampler(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, vocab_file=nothing, distortion=nothing, num_reserved_ids=nothing, num_shards=nothing, shard=nothing, unigrams=nothing, seed=nothing, seed2=nothing) - if tf.eager_mode + if tf.in_eager_mode() fixed_unigram_candidate_sampler_eager(true_classes_; name=name, num_true=num_true, num_sampled=num_sampled, unique=unique, range_max=range_max, vocab_file=vocab_file, distortion=distortion, num_reserved_ids=num_reserved_ids, num_shards=num_shards, shard=shard, unigrams=unigrams, seed=seed, seed2=seed2) else fixed_unigram_candidate_sampler_graph(true_classes_; name=name, num_true=num_true, num_sampled=num_sampled, unique=unique, range_max=range_max, vocab_file=vocab_file, distortion=distortion, num_reserved_ids=num_reserved_ids, num_shards=num_shards, shard=shard, unigrams=unigrams, seed=seed, seed2=seed2) @@ -33974,7 +33974,7 @@ begin return res[1] end function sparse_apply_ftrl_v2(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=nothing, use_locking=nothing) - if tf.eager_mode + if tf.in_eager_mode() sparse_apply_ftrl_v2_eager(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=name, use_locking=use_locking) else sparse_apply_ftrl_v2_graph(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=name, use_locking=use_locking) @@ -34017,7 +34017,7 @@ begin return res[1] end function unravel_index(indices_, dims_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() unravel_index_eager(indices_, dims_; name=name) else unravel_index_graph(indices_, dims_; name=name) @@ -34066,7 +34066,7 @@ begin return res[1] end function max(input_, reduction_indices_; name=nothing, keep_dims=nothing) - if tf.eager_mode + if tf.in_eager_mode() max_eager(input_, reduction_indices_; name=name, keep_dims=keep_dims) else max_graph(input_, reduction_indices_; name=name, keep_dims=keep_dims) @@ -34102,7 +34102,7 @@ begin return res[1] end function ifft2d(input_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() ifft2d_eager(input_; name=name) else ifft2d_graph(input_; name=name) @@ -34169,7 +34169,7 @@ begin return res end function sparse_concat(indices_, values_, shapes_; name=nothing, concat_dim=nothing, N=nothing) - if tf.eager_mode + if tf.in_eager_mode() sparse_concat_eager(indices_, values_, shapes_; name=name, concat_dim=concat_dim, N=N) else sparse_concat_graph(indices_, values_, shapes_; name=name, concat_dim=concat_dim, N=N) @@ -34209,7 +34209,7 @@ begin return res[1] end function histogram_summary(tag_, values_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() histogram_summary_eager(tag_, values_; name=name) else histogram_summary_graph(tag_, values_; name=name) @@ -34252,7 +34252,7 @@ begin return res[1] end function segment_sum(data_, segment_ids_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() segment_sum_eager(data_, segment_ids_; name=name) else segment_sum_graph(data_, segment_ids_; name=name) @@ -34288,7 +34288,7 @@ begin return res[1] end function exp(x_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() exp_eager(x_; name=name) else exp_graph(x_; name=name) @@ -34336,7 +34336,7 @@ begin return res[1] end function configure_distributed_tpu(; name=nothing, embedding_config=nothing, tpu_embedding_config=nothing, is_global_init=nothing) - if tf.eager_mode + if tf.in_eager_mode() configure_distributed_tpu_eager(; name=name, embedding_config=embedding_config, tpu_embedding_config=tpu_embedding_config, is_global_init=is_global_init) else configure_distributed_tpu_graph(; name=name, embedding_config=embedding_config, tpu_embedding_config=tpu_embedding_config, is_global_init=is_global_init) @@ -34389,7 +34389,7 @@ begin return res[1] end function resource_scatter_nd_sub(ref_, indices_, updates_; name=nothing, use_locking=nothing) - if tf.eager_mode + if tf.in_eager_mode() resource_scatter_nd_sub_eager(ref_, indices_, updates_; name=name, use_locking=use_locking) else resource_scatter_nd_sub_graph(ref_, indices_, updates_; name=name, use_locking=use_locking) @@ -34445,7 +34445,7 @@ begin return res[1] end function _xla_send_from_host(inputs_, dynamic_key_; name=nothing, Tinputs=nothing, key=nothing, device_ordinal=nothing) - if tf.eager_mode + if tf.in_eager_mode() _xla_send_from_host_eager(inputs_, dynamic_key_; name=name, Tinputs=Tinputs, key=key, device_ordinal=device_ordinal) else _xla_send_from_host_graph(inputs_, dynamic_key_; name=name, Tinputs=Tinputs, key=key, device_ordinal=device_ordinal) @@ -34481,7 +34481,7 @@ begin return res[1] end function get_session_handle_v2(value_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() get_session_handle_v2_eager(value_; name=name) else get_session_handle_v2_graph(value_; name=name) @@ -34522,7 +34522,7 @@ begin return res[1] end function relu_grad(gradients_, features_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() relu_grad_eager(gradients_, features_; name=name) else relu_grad_graph(gradients_, features_; name=name) @@ -34571,7 +34571,7 @@ begin return res[1] end function unsorted_segment_min(data_, segment_ids_, num_segments_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() unsorted_segment_min_eager(data_, segment_ids_, num_segments_; name=name) else unsorted_segment_min_graph(data_, segment_ids_, num_segments_; name=name) @@ -34656,7 +34656,7 @@ begin return res end function parse_example(serialized_, names_, sparse_keys_, dense_keys_, dense_defaults_; name=nothing, Nsparse=nothing, Ndense=nothing, sparse_types=nothing, Tdense=nothing, dense_shapes=nothing) - if tf.eager_mode + if tf.in_eager_mode() parse_example_eager(serialized_, names_, sparse_keys_, dense_keys_, dense_defaults_; name=name, Nsparse=Nsparse, Ndense=Ndense, sparse_types=sparse_types, Tdense=Tdense, dense_shapes=dense_shapes) else parse_example_graph(serialized_, names_, sparse_keys_, dense_keys_, dense_defaults_; name=name, Nsparse=Nsparse, Ndense=Ndense, sparse_types=sparse_types, Tdense=Tdense, dense_shapes=dense_shapes) @@ -34706,7 +34706,7 @@ begin return res[1] end function queue_enqueue_v2(handle_, components_; name=nothing, Tcomponents=nothing, timeout_ms=nothing) - if tf.eager_mode + if tf.in_eager_mode() queue_enqueue_v2_eager(handle_, components_; name=name, Tcomponents=Tcomponents, timeout_ms=timeout_ms) else queue_enqueue_v2_graph(handle_, components_; name=name, Tcomponents=Tcomponents, timeout_ms=timeout_ms) @@ -34760,7 +34760,7 @@ begin return res[1] end function scatter_nd_add(ref_, indices_, updates_; name=nothing, use_locking=nothing) - if tf.eager_mode + if tf.in_eager_mode() scatter_nd_add_eager(ref_, indices_, updates_; name=name, use_locking=use_locking) else scatter_nd_add_graph(ref_, indices_, updates_; name=name, use_locking=use_locking) @@ -34794,7 +34794,7 @@ begin return res[1] end function reader_num_records_produced_v2(reader_handle_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() reader_num_records_produced_v2_eager(reader_handle_; name=name) else reader_num_records_produced_v2_graph(reader_handle_; name=name) @@ -34864,7 +34864,7 @@ begin return res[1] end function load_tpu_embedding_centered_rms_prop_parameters(parameters_, ms_, mom_, mg_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - if tf.eager_mode + if tf.in_eager_mode() load_tpu_embedding_centered_rms_prop_parameters_eager(parameters_, ms_, mom_, mg_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) else load_tpu_embedding_centered_rms_prop_parameters_graph(parameters_, ms_, mom_, mg_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) @@ -34911,7 +34911,7 @@ begin return res[1] end function assign_sub(ref_, value_; name=nothing, use_locking=nothing) - if tf.eager_mode + if tf.in_eager_mode() assign_sub_eager(ref_, value_; name=name, use_locking=use_locking) else assign_sub_graph(ref_, value_; name=name, use_locking=use_locking) @@ -34960,7 +34960,7 @@ begin return res[1] end function unsorted_segment_sum(data_, segment_ids_, num_segments_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() unsorted_segment_sum_eager(data_, segment_ids_, num_segments_; name=name) else unsorted_segment_sum_graph(data_, segment_ids_, num_segments_; name=name) @@ -35039,7 +35039,7 @@ begin return res end function fused_batch_norm_grad(y_backprop_, x_, scale_, reserve_space_1_, reserve_space_2_; name=nothing, epsilon=nothing, data_format=nothing, is_training=nothing) - if tf.eager_mode + if tf.in_eager_mode() fused_batch_norm_grad_eager(y_backprop_, x_, scale_, reserve_space_1_, reserve_space_2_; name=name, epsilon=epsilon, data_format=data_format, is_training=is_training) else fused_batch_norm_grad_graph(y_backprop_, x_, scale_, reserve_space_1_, reserve_space_2_; name=name, epsilon=epsilon, data_format=data_format, is_training=is_training) @@ -35105,7 +35105,7 @@ begin return res[1] end function max_pool_grad_v2(orig_input_, orig_output_, grad_, ksize_, strides_; name=nothing, padding=nothing, data_format=nothing) - if tf.eager_mode + if tf.in_eager_mode() max_pool_grad_v2_eager(orig_input_, orig_output_, grad_, ksize_, strides_; name=name, padding=padding, data_format=data_format) else max_pool_grad_v2_graph(orig_input_, orig_output_, grad_, ksize_, strides_; name=name, padding=padding, data_format=data_format) @@ -35196,7 +35196,7 @@ begin return res end function quantized_conv2d_with_bias_and_relu(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) - if tf.eager_mode + if tf.in_eager_mode() quantized_conv2d_with_bias_and_relu_eager(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_; name=name, out_type=out_type, strides=strides, padding=padding, dilations=dilations) else quantized_conv2d_with_bias_and_relu_graph(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_; name=name, out_type=out_type, strides=strides, padding=padding, dilations=dilations) @@ -35238,7 +35238,7 @@ begin return res[1] end function boosted_trees_create_ensemble(tree_ensemble_handle_, stamp_token_, tree_ensemble_serialized_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() boosted_trees_create_ensemble_eager(tree_ensemble_handle_, stamp_token_, tree_ensemble_serialized_; name=name) else boosted_trees_create_ensemble_graph(tree_ensemble_handle_, stamp_token_, tree_ensemble_serialized_; name=name) @@ -35298,7 +35298,7 @@ begin return res[1] end function ordered_map_incomplete_size(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) - if tf.eager_mode + if tf.in_eager_mode() ordered_map_incomplete_size_eager(; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) else ordered_map_incomplete_size_graph(; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) @@ -35363,7 +35363,7 @@ begin return res end function skipgram(; name=nothing, filename=nothing, batch_size=nothing, window_size=nothing, min_count=nothing, subsample=nothing) - if tf.eager_mode + if tf.in_eager_mode() skipgram_eager(; name=name, filename=filename, batch_size=batch_size, window_size=window_size, min_count=min_count, subsample=subsample) else skipgram_graph(; name=name, filename=filename, batch_size=batch_size, window_size=window_size, min_count=min_count, subsample=subsample) @@ -35412,7 +35412,7 @@ begin return res[1] end function arg_min(input_, dimension_; name=nothing, output_type=nothing) - if tf.eager_mode + if tf.in_eager_mode() arg_min_eager(input_, dimension_; name=name, output_type=output_type) else arg_min_graph(input_, dimension_; name=name, output_type=output_type) @@ -35462,7 +35462,7 @@ begin return res[1] end function queue_dequeue_many(handle_, n_; name=nothing, component_types=nothing, timeout_ms=nothing) - if tf.eager_mode + if tf.in_eager_mode() queue_dequeue_many_eager(handle_, n_; name=name, component_types=component_types, timeout_ms=timeout_ms) else queue_dequeue_many_graph(handle_, n_; name=name, component_types=component_types, timeout_ms=timeout_ms) @@ -35501,7 +35501,7 @@ begin return res end function boosted_trees_serialize_ensemble(tree_ensemble_handle_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() boosted_trees_serialize_ensemble_eager(tree_ensemble_handle_; name=name) else boosted_trees_serialize_ensemble_graph(tree_ensemble_handle_; name=name) @@ -35542,7 +35542,7 @@ begin return res[1] end function minimum(x_, y_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() minimum_eager(x_, y_; name=name) else minimum_graph(x_, y_; name=name) @@ -35593,7 +35593,7 @@ begin return res[1] end function substr(input_, pos_, len_; name=nothing, unit=nothing) - if tf.eager_mode + if tf.in_eager_mode() substr_eager(input_, pos_, len_; name=name, unit=unit) else substr_graph(input_, pos_, len_; name=name, unit=unit) @@ -35627,7 +35627,7 @@ begin return res[1] end function queue_size(handle_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() queue_size_eager(handle_; name=name) else queue_size_graph(handle_; name=name) @@ -35709,7 +35709,7 @@ begin return res[1] end function apply_ftrl_v2(var_, accum_, linear_, grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=nothing, use_locking=nothing) - if tf.eager_mode + if tf.in_eager_mode() apply_ftrl_v2_eager(var_, accum_, linear_, grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=name, use_locking=use_locking) else apply_ftrl_v2_graph(var_, accum_, linear_, grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=name, use_locking=use_locking) @@ -35771,7 +35771,7 @@ begin return res[1] end function load_tpu_embedding_momentum_parameters(parameters_, momenta_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - if tf.eager_mode + if tf.in_eager_mode() load_tpu_embedding_momentum_parameters_eager(parameters_, momenta_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) else load_tpu_embedding_momentum_parameters_graph(parameters_, momenta_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) @@ -35818,7 +35818,7 @@ begin return res[1] end function sparse_segment_mean(data_, indices_, segment_ids_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() sparse_segment_mean_eager(data_, indices_, segment_ids_; name=name) else sparse_segment_mean_graph(data_, indices_, segment_ids_; name=name) @@ -35883,7 +35883,7 @@ begin return res[1] end function resource_apply_proximal_adagrad(var_, accum_, lr_, l1_, l2_, grad_; name=nothing, use_locking=nothing) - if tf.eager_mode + if tf.in_eager_mode() resource_apply_proximal_adagrad_eager(var_, accum_, lr_, l1_, l2_, grad_; name=name, use_locking=use_locking) else resource_apply_proximal_adagrad_graph(var_, accum_, lr_, l1_, l2_, grad_; name=name, use_locking=use_locking) @@ -35937,7 +35937,7 @@ begin return res[1] end function tensor_array_gather_v2(handle_, indices_, flow_in_; name=nothing, dtype=nothing, element_shape=nothing) - if tf.eager_mode + if tf.in_eager_mode() tensor_array_gather_v2_eager(handle_, indices_, flow_in_; name=name, dtype=dtype, element_shape=element_shape) else tensor_array_gather_v2_graph(handle_, indices_, flow_in_; name=name, dtype=dtype, element_shape=element_shape) @@ -35978,7 +35978,7 @@ begin return res[1] end function less(x_, y_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() less_eager(x_, y_; name=name) else less_graph(x_, y_; name=name) @@ -36020,7 +36020,7 @@ begin return res[1] end function host_const(; name=nothing, value=nothing, dtype=nothing) - if tf.eager_mode + if tf.in_eager_mode() host_const_eager(; name=name, value=value, dtype=dtype) else host_const_graph(; name=name, value=value, dtype=dtype) @@ -36067,7 +36067,7 @@ begin return res[1] end function upper_bound(sorted_inputs_, values_; name=nothing, out_type=nothing) - if tf.eager_mode + if tf.in_eager_mode() upper_bound_eager(sorted_inputs_, values_; name=name, out_type=out_type) else upper_bound_graph(sorted_inputs_, values_; name=name, out_type=out_type) @@ -36115,7 +36115,7 @@ begin return res[1] end function tensor_list_get_item(input_handle_, index_, element_shape_; name=nothing, element_dtype=nothing) - if tf.eager_mode + if tf.in_eager_mode() tensor_list_get_item_eager(input_handle_, index_, element_shape_; name=name, element_dtype=element_dtype) else tensor_list_get_item_graph(input_handle_, index_, element_shape_; name=name, element_dtype=element_dtype) @@ -36169,7 +36169,7 @@ begin return res[1] end function fake_quant_with_min_max_vars(inputs_, min_, max_; name=nothing, num_bits=nothing, narrow_range=nothing) - if tf.eager_mode + if tf.in_eager_mode() fake_quant_with_min_max_vars_eager(inputs_, min_, max_; name=name, num_bits=num_bits, narrow_range=narrow_range) else fake_quant_with_min_max_vars_graph(inputs_, min_, max_; name=name, num_bits=num_bits, narrow_range=narrow_range) @@ -36203,7 +36203,7 @@ begin return res[1] end function is_boosted_trees_quantile_stream_resource_initialized(quantile_stream_resource_handle_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() is_boosted_trees_quantile_stream_resource_initialized_eager(quantile_stream_resource_handle_; name=name) else is_boosted_trees_quantile_stream_resource_initialized_graph(quantile_stream_resource_handle_; name=name) @@ -36250,7 +36250,7 @@ begin return res end function reader_read_up_to_v2(reader_handle_, queue_handle_, num_records_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() reader_read_up_to_v2_eager(reader_handle_, queue_handle_, num_records_; name=name) else reader_read_up_to_v2_graph(reader_handle_, queue_handle_, num_records_; name=name) @@ -36291,7 +36291,7 @@ begin return res[1] end function complex(real_, imag_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() complex_eager(real_, imag_; name=name) else complex_graph(real_, imag_; name=name) @@ -36343,7 +36343,7 @@ begin return res[1] end function tensor_list_reserve(element_shape_, num_elements_; name=nothing, element_dtype=nothing, shape_type=nothing) - if tf.eager_mode + if tf.in_eager_mode() tensor_list_reserve_eager(element_shape_, num_elements_; name=name, element_dtype=element_dtype, shape_type=shape_type) else tensor_list_reserve_graph(element_shape_, num_elements_; name=name, element_dtype=element_dtype, shape_type=shape_type) @@ -36385,7 +36385,7 @@ begin return res[1] end function bitcast(input_; name=nothing, type_=nothing) - if tf.eager_mode + if tf.in_eager_mode() bitcast_eager(input_; name=name, type_=type_) else bitcast_graph(input_; name=name, type_=type_) @@ -36445,7 +36445,7 @@ begin return res[1] end function priority_queue(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) - if tf.eager_mode + if tf.in_eager_mode() priority_queue_eager(; name=name, component_types=component_types, shapes=shapes, capacity=capacity, container=container, shared_name=shared_name) else priority_queue_graph(; name=name, component_types=component_types, shapes=shapes, capacity=capacity, container=container, shared_name=shared_name) @@ -36564,7 +36564,7 @@ begin return res end function quantized_batch_norm_with_global_normalization(t_, t_min_, t_max_, m_, m_min_, m_max_, v_, v_min_, v_max_, beta_, beta_min_, beta_max_, gamma_, gamma_min_, gamma_max_; name=nothing, out_type=nothing, variance_epsilon=nothing, scale_after_normalization=nothing) - if tf.eager_mode + if tf.in_eager_mode() quantized_batch_norm_with_global_normalization_eager(t_, t_min_, t_max_, m_, m_min_, m_max_, v_, v_min_, v_max_, beta_, beta_min_, beta_max_, gamma_, gamma_min_, gamma_max_; name=name, out_type=out_type, variance_epsilon=variance_epsilon, scale_after_normalization=scale_after_normalization) else quantized_batch_norm_with_global_normalization_graph(t_, t_min_, t_max_, m_, m_min_, m_max_, v_, v_min_, v_max_, beta_, beta_min_, beta_max_, gamma_, gamma_min_, gamma_max_; name=name, out_type=out_type, variance_epsilon=variance_epsilon, scale_after_normalization=scale_after_normalization) @@ -36600,7 +36600,7 @@ begin return res[1] end function cos(x_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() cos_eager(x_; name=name) else cos_graph(x_; name=name) @@ -36655,7 +36655,7 @@ begin return res end function quantize_down_and_shrink_range(input_, input_min_, input_max_; name=nothing, out_type=nothing) - if tf.eager_mode + if tf.in_eager_mode() quantize_down_and_shrink_range_eager(input_, input_min_, input_max_; name=name, out_type=out_type) else quantize_down_and_shrink_range_graph(input_, input_min_, input_max_; name=name, out_type=out_type) @@ -36705,7 +36705,7 @@ begin return res[1] end function experimental_random_dataset(seed_, seed2_; name=nothing, output_types=nothing, output_shapes=nothing) - if tf.eager_mode + if tf.in_eager_mode() experimental_random_dataset_eager(seed_, seed2_; name=name, output_types=output_types, output_shapes=output_shapes) else experimental_random_dataset_graph(seed_, seed2_; name=name, output_types=output_types, output_shapes=output_shapes) @@ -36765,7 +36765,7 @@ begin return res[1] end function rpc(address_, method_, request_; name=nothing, protocol=nothing, fail_fast=nothing, timeout_in_ms=nothing) - if tf.eager_mode + if tf.in_eager_mode() rpc_eager(address_, method_, request_; name=name, protocol=protocol, fail_fast=fail_fast, timeout_in_ms=timeout_in_ms) else rpc_graph(address_, method_, request_; name=name, protocol=protocol, fail_fast=fail_fast, timeout_in_ms=timeout_in_ms) @@ -36880,7 +36880,7 @@ begin return res end function quantized_conv2d_with_bias_signed_sum_and_relu_and_requantize(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_, summand_, min_summand_, max_summand_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) - if tf.eager_mode + if tf.in_eager_mode() quantized_conv2d_with_bias_signed_sum_and_relu_and_requantize_eager(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_, summand_, min_summand_, max_summand_; name=name, out_type=out_type, strides=strides, padding=padding, dilations=dilations) else quantized_conv2d_with_bias_signed_sum_and_relu_and_requantize_graph(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_, summand_, min_summand_, max_summand_; name=name, out_type=out_type, strides=strides, padding=padding, dilations=dilations) @@ -36914,7 +36914,7 @@ begin return res[1] end function tensor_list_length(input_handle_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() tensor_list_length_eager(input_handle_; name=name) else tensor_list_length_graph(input_handle_; name=name) @@ -36974,7 +36974,7 @@ begin return res[1] end function map_incomplete_size(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) - if tf.eager_mode + if tf.in_eager_mode() map_incomplete_size_eager(; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) else map_incomplete_size_graph(; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) @@ -37026,7 +37026,7 @@ begin return res[1] end function stateless_while(input_; name=nothing, T=nothing, cond=nothing, body=nothing) - if tf.eager_mode + if tf.in_eager_mode() stateless_while_eager(input_; name=name, T=T, cond=cond, body=body) else stateless_while_graph(input_; name=name, T=T, cond=cond, body=body) @@ -37086,7 +37086,7 @@ begin return res[1] end function sparse_conditional_accumulator(; name=nothing, dtype=nothing, shape=nothing, container=nothing, shared_name=nothing, reduction_type=nothing) - if tf.eager_mode + if tf.in_eager_mode() sparse_conditional_accumulator_eager(; name=name, dtype=dtype, shape=shape, container=container, shared_name=shared_name, reduction_type=reduction_type) else sparse_conditional_accumulator_graph(; name=name, dtype=dtype, shape=shape, container=container, shared_name=shared_name, reduction_type=reduction_type) @@ -37129,7 +37129,7 @@ begin return res[1] end function segment_min(data_, segment_ids_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() segment_min_eager(data_, segment_ids_; name=name) else segment_min_graph(data_, segment_ids_; name=name) @@ -37171,7 +37171,7 @@ begin return res[1] end function write_graph_summary(writer_, step_, tensor_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() write_graph_summary_eager(writer_, step_, tensor_; name=name) else write_graph_summary_graph(writer_, step_, tensor_; name=name) @@ -37212,7 +37212,7 @@ begin return res[1] end function cholesky_grad(l_, grad_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() cholesky_grad_eager(l_, grad_; name=name) else cholesky_grad_graph(l_, grad_; name=name) @@ -37287,7 +37287,7 @@ begin return res end function log_uniform_candidate_sampler(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing) - if tf.eager_mode + if tf.in_eager_mode() log_uniform_candidate_sampler_eager(true_classes_; name=name, num_true=num_true, num_sampled=num_sampled, unique=unique, range_max=range_max, seed=seed, seed2=seed2) else log_uniform_candidate_sampler_graph(true_classes_; name=name, num_true=num_true, num_sampled=num_sampled, unique=unique, range_max=range_max, seed=seed, seed2=seed2) @@ -37337,7 +37337,7 @@ begin return res[1] end function serialize_sparse(sparse_indices_, sparse_values_, sparse_shape_; name=nothing, out_type=nothing) - if tf.eager_mode + if tf.in_eager_mode() serialize_sparse_eager(sparse_indices_, sparse_values_, sparse_shape_; name=name, out_type=out_type) else serialize_sparse_graph(sparse_indices_, sparse_values_, sparse_shape_; name=name, out_type=out_type) @@ -37385,7 +37385,7 @@ begin return res[1] end function scatter_nd_non_aliasing_add(input_, indices_, updates_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() scatter_nd_non_aliasing_add_eager(input_, indices_, updates_; name=name) else scatter_nd_non_aliasing_add_graph(input_, indices_, updates_; name=name) @@ -37432,7 +37432,7 @@ begin return res end function ref_merge(inputs_; name=nothing, N=nothing) - if tf.eager_mode + if tf.in_eager_mode() ref_merge_eager(inputs_; name=name, N=N) else ref_merge_graph(inputs_; name=name, N=N) @@ -37483,7 +37483,7 @@ begin return res end function tensor_list_concat(input_handle_; name=nothing, element_dtype=nothing, element_shape=nothing) - if tf.eager_mode + if tf.in_eager_mode() tensor_list_concat_eager(input_handle_; name=name, element_dtype=element_dtype, element_shape=element_shape) else tensor_list_concat_graph(input_handle_; name=name, element_dtype=element_dtype, element_shape=element_shape) @@ -37578,7 +37578,7 @@ begin return res[1] end function cudnn_rnn_canonical_to_params(num_layers_, num_units_, input_size_, weights_, biases_; name=nothing, num_params=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) - if tf.eager_mode + if tf.in_eager_mode() cudnn_rnn_canonical_to_params_eager(num_layers_, num_units_, input_size_, weights_, biases_; name=name, num_params=num_params, rnn_mode=rnn_mode, input_mode=input_mode, direction=direction, dropout=dropout, seed=seed, seed2=seed2) else cudnn_rnn_canonical_to_params_graph(num_layers_, num_units_, input_size_, weights_, biases_; name=name, num_params=num_params, rnn_mode=rnn_mode, input_mode=input_mode, direction=direction, dropout=dropout, seed=seed, seed2=seed2) @@ -37657,7 +37657,7 @@ begin return res[1] end function sparse_apply_adadelta(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) - if tf.eager_mode + if tf.in_eager_mode() sparse_apply_adadelta_eager(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_, indices_; name=name, use_locking=use_locking) else sparse_apply_adadelta_graph(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_, indices_; name=name, use_locking=use_locking) @@ -37691,7 +37691,7 @@ begin return res[1] end function tensor_array_close(handle_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() tensor_array_close_eager(handle_; name=name) else tensor_array_close_graph(handle_; name=name) @@ -37732,7 +37732,7 @@ begin return res[1] end function selu_grad(gradients_, outputs_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() selu_grad_eager(gradients_, outputs_; name=name) else selu_grad_graph(gradients_, outputs_; name=name) @@ -37784,7 +37784,7 @@ begin return res[1] end function crop_and_resize_grad_image(grads_, boxes_, box_ind_, image_size_; name=nothing, method=nothing) - if tf.eager_mode + if tf.in_eager_mode() crop_and_resize_grad_image_eager(grads_, boxes_, box_ind_, image_size_; name=name, method=method) else crop_and_resize_grad_image_graph(grads_, boxes_, box_ind_, image_size_; name=name, method=method) @@ -37822,7 +37822,7 @@ begin return res[1] end function rfft(input_, fft_length_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() rfft_eager(input_, fft_length_; name=name) else rfft_graph(input_, fft_length_; name=name) @@ -37876,7 +37876,7 @@ begin return res[1] end function experimental_sql_dataset(driver_name_, data_source_name_, query_; name=nothing, output_types=nothing, output_shapes=nothing) - if tf.eager_mode + if tf.in_eager_mode() experimental_sql_dataset_eager(driver_name_, data_source_name_, query_; name=name, output_types=output_types, output_shapes=output_shapes) else experimental_sql_dataset_graph(driver_name_, data_source_name_, query_; name=name, output_types=output_types, output_shapes=output_shapes) @@ -37946,7 +37946,7 @@ begin return res[1] end function resource_apply_power_sign(var_, m_, lr_, logbase_, sign_decay_, beta_, grad_; name=nothing, use_locking=nothing) - if tf.eager_mode + if tf.in_eager_mode() resource_apply_power_sign_eager(var_, m_, lr_, logbase_, sign_decay_, beta_, grad_; name=name, use_locking=use_locking) else resource_apply_power_sign_graph(var_, m_, lr_, logbase_, sign_decay_, beta_, grad_; name=name, use_locking=use_locking) @@ -37982,7 +37982,7 @@ begin return res[1] end function matrix_determinant(input_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() matrix_determinant_eager(input_; name=name) else matrix_determinant_graph(input_; name=name) @@ -38034,7 +38034,7 @@ begin return res[1] end function static_regex_replace(input_; name=nothing, pattern=nothing, rewrite=nothing, replace_global=nothing) - if tf.eager_mode + if tf.in_eager_mode() static_regex_replace_eager(input_; name=name, pattern=pattern, rewrite=rewrite, replace_global=replace_global) else static_regex_replace_graph(input_; name=name, pattern=pattern, rewrite=rewrite, replace_global=replace_global) @@ -38094,7 +38094,7 @@ begin return res[1] end function avg_pool(value_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) - if tf.eager_mode + if tf.in_eager_mode() avg_pool_eager(value_; name=name, ksize=ksize, strides=strides, padding=padding, data_format=data_format) else avg_pool_graph(value_; name=name, ksize=ksize, strides=strides, padding=padding, data_format=data_format) @@ -38143,7 +38143,7 @@ begin return res[1] end function sparse_dense_cwise_add(sp_indices_, sp_values_, sp_shape_, dense_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() sparse_dense_cwise_add_eager(sp_indices_, sp_values_, sp_shape_, dense_; name=name) else sparse_dense_cwise_add_graph(sp_indices_, sp_values_, sp_shape_, dense_; name=name) @@ -38184,7 +38184,7 @@ begin return res[1] end function bias_add_v1(value_, bias_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() bias_add_v1_eager(value_, bias_; name=name) else bias_add_v1_graph(value_, bias_; name=name) @@ -38220,7 +38220,7 @@ begin return res[1] end function invert_permutation(x_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() invert_permutation_eager(x_; name=name) else invert_permutation_graph(x_; name=name) @@ -38280,7 +38280,7 @@ begin return res[1] end function hash_table_v2(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing) - if tf.eager_mode + if tf.in_eager_mode() hash_table_v2_eager(; name=name, container=container, shared_name=shared_name, use_node_name_sharing=use_node_name_sharing, key_dtype=key_dtype, value_dtype=value_dtype) else hash_table_v2_graph(; name=name, container=container, shared_name=shared_name, use_node_name_sharing=use_node_name_sharing, key_dtype=key_dtype, value_dtype=value_dtype) @@ -38355,7 +38355,7 @@ begin return res[1] end function sparse_apply_momentum(var_, accum_, lr_, grad_, indices_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) - if tf.eager_mode + if tf.in_eager_mode() sparse_apply_momentum_eager(var_, accum_, lr_, grad_, indices_, momentum_; name=name, use_locking=use_locking, use_nesterov=use_nesterov) else sparse_apply_momentum_graph(var_, accum_, lr_, grad_, indices_, momentum_; name=name, use_locking=use_locking, use_nesterov=use_nesterov) @@ -38415,7 +38415,7 @@ begin return res[1] end function infeed_enqueue(input_; name=nothing, dtype=nothing, shape=nothing, layout=nothing, device_ordinal=nothing) - if tf.eager_mode + if tf.in_eager_mode() infeed_enqueue_eager(input_; name=name, dtype=dtype, shape=shape, layout=layout, device_ordinal=device_ordinal) else infeed_enqueue_graph(input_; name=name, dtype=dtype, shape=shape, layout=layout, device_ordinal=device_ordinal) @@ -38474,7 +38474,7 @@ begin return res[1] end function stateless_random_uniform_int(shape_, seed_, minval_, maxval_; name=nothing, dtype=nothing) - if tf.eager_mode + if tf.in_eager_mode() stateless_random_uniform_int_eager(shape_, seed_, minval_, maxval_; name=name, dtype=dtype) else stateless_random_uniform_int_graph(shape_, seed_, minval_, maxval_; name=name, dtype=dtype) @@ -38544,7 +38544,7 @@ begin return res[1] end function load_tpu_embedding_adadelta_parameters_grad_accum_debug(parameters_, accumulators_, updates_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - if tf.eager_mode + if tf.in_eager_mode() load_tpu_embedding_adadelta_parameters_grad_accum_debug_eager(parameters_, accumulators_, updates_, gradient_accumulators_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) else load_tpu_embedding_adadelta_parameters_grad_accum_debug_graph(parameters_, accumulators_, updates_, gradient_accumulators_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) @@ -38610,7 +38610,7 @@ begin return res[1] end function _send(tensor_; name=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing) - if tf.eager_mode + if tf.in_eager_mode() _send_eager(tensor_; name=name, tensor_name=tensor_name, send_device=send_device, send_device_incarnation=send_device_incarnation, recv_device=recv_device, client_terminated=client_terminated) else _send_graph(tensor_; name=name, tensor_name=tensor_name, send_device=send_device, send_device_incarnation=send_device_incarnation, recv_device=recv_device, client_terminated=client_terminated) @@ -38678,7 +38678,7 @@ begin return res[1] end function map_peek(key_, indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) - if tf.eager_mode + if tf.in_eager_mode() map_peek_eager(key_, indices_; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) else map_peek_graph(key_, indices_; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) @@ -38726,7 +38726,7 @@ begin return res[1] end function write_scalar_summary(writer_, step_, tag_, value_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() write_scalar_summary_eager(writer_, step_, tag_, value_; name=name) else write_scalar_summary_graph(writer_, step_, tag_, value_; name=name) @@ -38795,7 +38795,7 @@ begin return res end function ordered_map_unstage_no_key(indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) - if tf.eager_mode + if tf.in_eager_mode() ordered_map_unstage_no_key_eager(indices_; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) else ordered_map_unstage_no_key_graph(indices_; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) @@ -38884,7 +38884,7 @@ begin return res[1] end function sparse_apply_centered_rms_prop(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) - if tf.eager_mode + if tf.in_eager_mode() sparse_apply_centered_rms_prop_eager(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=name, use_locking=use_locking) else sparse_apply_centered_rms_prop_graph(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=name, use_locking=use_locking) @@ -38946,7 +38946,7 @@ begin return res[1] end function tensor_list_scatter_v2(tensor_, indices_, element_shape_, num_elements_; name=nothing, element_dtype=nothing, shape_type=nothing) - if tf.eager_mode + if tf.in_eager_mode() tensor_list_scatter_v2_eager(tensor_, indices_, element_shape_, num_elements_; name=name, element_dtype=element_dtype, shape_type=shape_type) else tensor_list_scatter_v2_graph(tensor_, indices_, element_shape_, num_elements_; name=name, element_dtype=element_dtype, shape_type=shape_type) @@ -39017,7 +39017,7 @@ begin return res[1] end function conv3d_backprop_input_v2(input_sizes_, filter_, out_backprop_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) - if tf.eager_mode + if tf.in_eager_mode() conv3d_backprop_input_v2_eager(input_sizes_, filter_, out_backprop_; name=name, strides=strides, padding=padding, data_format=data_format, dilations=dilations) else conv3d_backprop_input_v2_graph(input_sizes_, filter_, out_backprop_; name=name, strides=strides, padding=padding, data_format=data_format, dilations=dilations) @@ -39076,7 +39076,7 @@ begin return res end function retrieve_tpu_embedding_proximal_adagrad_parameters(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - if tf.eager_mode + if tf.in_eager_mode() retrieve_tpu_embedding_proximal_adagrad_parameters_eager(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) else retrieve_tpu_embedding_proximal_adagrad_parameters_graph(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) @@ -39124,7 +39124,7 @@ begin return res[1] end function random_shuffle(value_; name=nothing, seed=nothing, seed2=nothing) - if tf.eager_mode + if tf.in_eager_mode() random_shuffle_eager(value_; name=name, seed=seed, seed2=seed2) else random_shuffle_graph(value_; name=name, seed=seed, seed2=seed2) @@ -39199,7 +39199,7 @@ begin return res end function uniform_candidate_sampler(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing) - if tf.eager_mode + if tf.in_eager_mode() uniform_candidate_sampler_eager(true_classes_; name=name, num_true=num_true, num_sampled=num_sampled, unique=unique, range_max=range_max, seed=seed, seed2=seed2) else uniform_candidate_sampler_graph(true_classes_; name=name, num_true=num_true, num_sampled=num_sampled, unique=unique, range_max=range_max, seed=seed, seed2=seed2) @@ -39247,7 +39247,7 @@ begin return res[1] end function tensor_array_split_v2(handle_, value_, lengths_, flow_in_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() tensor_array_split_v2_eager(handle_, value_, lengths_, flow_in_; name=name) else tensor_array_split_v2_graph(handle_, value_, lengths_, flow_in_; name=name) @@ -39336,7 +39336,7 @@ begin return res[1] end function mutable_dense_hash_table_v2(empty_key_, deleted_key_; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing, initial_num_buckets=nothing, max_load_factor=nothing) - if tf.eager_mode + if tf.in_eager_mode() mutable_dense_hash_table_v2_eager(empty_key_, deleted_key_; name=name, container=container, shared_name=shared_name, use_node_name_sharing=use_node_name_sharing, key_dtype=key_dtype, value_dtype=value_dtype, value_shape=value_shape, initial_num_buckets=initial_num_buckets, max_load_factor=max_load_factor) else mutable_dense_hash_table_v2_graph(empty_key_, deleted_key_; name=name, container=container, shared_name=shared_name, use_node_name_sharing=use_node_name_sharing, key_dtype=key_dtype, value_dtype=value_dtype, value_shape=value_shape, initial_num_buckets=initial_num_buckets, max_load_factor=max_load_factor) @@ -39376,7 +39376,7 @@ begin return res[1] end function draw_bounding_boxes(images_, boxes_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() draw_bounding_boxes_eager(images_, boxes_; name=name) else draw_bounding_boxes_graph(images_, boxes_; name=name) @@ -39450,7 +39450,7 @@ begin return res[1] end function sparse_apply_proximal_adagrad(var_, accum_, lr_, l1_, l2_, grad_, indices_; name=nothing, use_locking=nothing) - if tf.eager_mode + if tf.in_eager_mode() sparse_apply_proximal_adagrad_eager(var_, accum_, lr_, l1_, l2_, grad_, indices_; name=name, use_locking=use_locking) else sparse_apply_proximal_adagrad_graph(var_, accum_, lr_, l1_, l2_, grad_, indices_; name=name, use_locking=use_locking) @@ -39504,7 +39504,7 @@ begin return res[1] end function range_dataset(start_, stop_, step_; name=nothing, output_types=nothing, output_shapes=nothing) - if tf.eager_mode + if tf.in_eager_mode() range_dataset_eager(start_, stop_, step_; name=name, output_types=output_types, output_shapes=output_shapes) else range_dataset_graph(start_, stop_, step_; name=name, output_types=output_types, output_shapes=output_shapes) @@ -39542,7 +39542,7 @@ begin return res[1] end function reader_restore_state_v2(reader_handle_, state_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() reader_restore_state_v2_eager(reader_handle_, state_; name=name) else reader_restore_state_v2_graph(reader_handle_, state_; name=name) @@ -39593,7 +39593,7 @@ begin return res end function top_kv2(input_, k_; name=nothing, sorted=nothing) - if tf.eager_mode + if tf.in_eager_mode() top_kv2_eager(input_, k_; name=name, sorted=sorted) else top_kv2_graph(input_, k_; name=name, sorted=sorted) @@ -39629,7 +39629,7 @@ begin return res[1] end function atanh(x_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() atanh_eager(x_; name=name) else atanh_graph(x_; name=name) @@ -39665,7 +39665,7 @@ begin return res[1] end function debug_gradient_identity(input_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() debug_gradient_identity_eager(input_; name=name) else debug_gradient_identity_graph(input_; name=name) @@ -39718,7 +39718,7 @@ begin return res end function sparse_add_grad(backprop_val_grad_, a_indices_, b_indices_, sum_indices_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() sparse_add_grad_eager(backprop_val_grad_, a_indices_, b_indices_, sum_indices_; name=name) else sparse_add_grad_graph(backprop_val_grad_, a_indices_, b_indices_, sum_indices_; name=name) @@ -39771,7 +39771,7 @@ begin return res[1] end function resource_scatter_add(resource_, indices_, updates_; name=nothing, dtype=nothing) - if tf.eager_mode + if tf.in_eager_mode() resource_scatter_add_eager(resource_, indices_, updates_; name=name, dtype=dtype) else resource_scatter_add_graph(resource_, indices_, updates_; name=name, dtype=dtype) @@ -39807,7 +39807,7 @@ begin return res[1] end function ceil(x_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() ceil_eager(x_; name=name) else ceil_graph(x_; name=name) @@ -39855,7 +39855,7 @@ begin return res[1] end function save(filename_, tensor_names_, data_; name=nothing, T=nothing) - if tf.eager_mode + if tf.in_eager_mode() save_eager(filename_, tensor_names_, data_; name=name, T=T) else save_graph(filename_, tensor_names_, data_; name=name, T=T) @@ -39914,7 +39914,7 @@ begin return res end function retrieve_tpu_embedding_centered_rms_prop_parameters(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - if tf.eager_mode + if tf.in_eager_mode() retrieve_tpu_embedding_centered_rms_prop_parameters_eager(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) else retrieve_tpu_embedding_centered_rms_prop_parameters_graph(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) @@ -39973,7 +39973,7 @@ begin return res end function quantized_concat(concat_dim_, values_, input_mins_, input_maxes_; name=nothing, N=nothing) - if tf.eager_mode + if tf.in_eager_mode() quantized_concat_eager(concat_dim_, values_, input_mins_, input_maxes_; name=name, N=N) else quantized_concat_graph(concat_dim_, values_, input_mins_, input_maxes_; name=name, N=N) @@ -40009,7 +40009,7 @@ begin return res[1] end function zeros_like(x_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() zeros_like_eager(x_; name=name) else zeros_like_graph(x_; name=name) @@ -40086,7 +40086,7 @@ begin return res end function fractional_avg_pool(value_; name=nothing, pooling_ratio=nothing, pseudo_random=nothing, overlapping=nothing, deterministic=nothing, seed=nothing, seed2=nothing) - if tf.eager_mode + if tf.in_eager_mode() fractional_avg_pool_eager(value_; name=name, pooling_ratio=pooling_ratio, pseudo_random=pseudo_random, overlapping=overlapping, deterministic=deterministic, seed=seed, seed2=seed2) else fractional_avg_pool_graph(value_; name=name, pooling_ratio=pooling_ratio, pseudo_random=pseudo_random, overlapping=overlapping, deterministic=deterministic, seed=seed, seed2=seed2) @@ -40149,7 +40149,7 @@ begin return res[1] end function edit_distance(hypothesis_indices_, hypothesis_values_, hypothesis_shape_, truth_indices_, truth_values_, truth_shape_; name=nothing, normalize=nothing) - if tf.eager_mode + if tf.in_eager_mode() edit_distance_eager(hypothesis_indices_, hypothesis_values_, hypothesis_shape_, truth_indices_, truth_values_, truth_shape_; name=name, normalize=normalize) else edit_distance_graph(hypothesis_indices_, hypothesis_values_, hypothesis_shape_, truth_indices_, truth_values_, truth_shape_; name=name, normalize=normalize) @@ -40202,7 +40202,7 @@ begin return res end function unique_v2(x_, axis_; name=nothing, out_idx=nothing) - if tf.eager_mode + if tf.in_eager_mode() unique_v2_eager(x_, axis_; name=name, out_idx=out_idx) else unique_v2_graph(x_, axis_; name=name, out_idx=out_idx) @@ -40272,7 +40272,7 @@ begin return res[1] end function quantize_and_dequantize_v2(input_, input_min_, input_max_; name=nothing, signed_input=nothing, num_bits=nothing, range_given=nothing, round_mode=nothing) - if tf.eager_mode + if tf.in_eager_mode() quantize_and_dequantize_v2_eager(input_, input_min_, input_max_; name=name, signed_input=signed_input, num_bits=num_bits, range_given=range_given, round_mode=round_mode) else quantize_and_dequantize_v2_graph(input_, input_min_, input_max_; name=name, signed_input=signed_input, num_bits=num_bits, range_given=range_given, round_mode=round_mode) @@ -40338,7 +40338,7 @@ begin return res[1] end function quantize_and_dequantize(input_; name=nothing, signed_input=nothing, num_bits=nothing, range_given=nothing, input_min=nothing, input_max=nothing) - if tf.eager_mode + if tf.in_eager_mode() quantize_and_dequantize_eager(input_; name=name, signed_input=signed_input, num_bits=num_bits, range_given=range_given, input_min=input_min, input_max=input_max) else quantize_and_dequantize_graph(input_; name=name, signed_input=signed_input, num_bits=num_bits, range_given=range_given, input_min=input_min, input_max=input_max) @@ -40387,7 +40387,7 @@ begin return res end function tensor_list_pop_back(input_handle_, element_shape_; name=nothing, element_dtype=nothing) - if tf.eager_mode + if tf.in_eager_mode() tensor_list_pop_back_eager(input_handle_, element_shape_; name=name, element_dtype=element_dtype) else tensor_list_pop_back_graph(input_handle_, element_shape_; name=name, element_dtype=element_dtype) @@ -40447,7 +40447,7 @@ begin return res[1] end function debug_nan_count(input_; name=nothing, device_name=nothing, tensor_name=nothing, debug_urls=nothing, gated_grpc=nothing) - if tf.eager_mode + if tf.in_eager_mode() debug_nan_count_eager(input_; name=name, device_name=device_name, tensor_name=tensor_name, debug_urls=debug_urls, gated_grpc=gated_grpc) else debug_nan_count_graph(input_; name=name, device_name=device_name, tensor_name=tensor_name, debug_urls=debug_urls, gated_grpc=gated_grpc) @@ -40523,7 +40523,7 @@ begin return res[1] end function apply_adagrad_da(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, lr_, l1_, l2_, global_step_; name=nothing, use_locking=nothing) - if tf.eager_mode + if tf.in_eager_mode() apply_adagrad_da_eager(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, lr_, l1_, l2_, global_step_; name=name, use_locking=use_locking) else apply_adagrad_da_graph(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, lr_, l1_, l2_, global_step_; name=name, use_locking=use_locking) @@ -40588,7 +40588,7 @@ begin return res[1] end function depthwise_conv2d_native(input_, filter_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) - if tf.eager_mode + if tf.in_eager_mode() depthwise_conv2d_native_eager(input_, filter_; name=name, strides=strides, padding=padding, data_format=data_format, dilations=dilations) else depthwise_conv2d_native_graph(input_, filter_; name=name, strides=strides, padding=padding, data_format=data_format, dilations=dilations) @@ -40622,7 +40622,7 @@ begin return res[1] end function serialize_iterator(resource_handle_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() serialize_iterator_eager(resource_handle_; name=name) else serialize_iterator_graph(resource_handle_; name=name) @@ -40656,7 +40656,7 @@ begin return res[1] end function dataset_to_graph(input_dataset_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() dataset_to_graph_eager(input_dataset_; name=name) else dataset_to_graph_graph(input_dataset_; name=name) @@ -40709,7 +40709,7 @@ begin return res end function top_k(input_; name=nothing, k=nothing, sorted=nothing) - if tf.eager_mode + if tf.in_eager_mode() top_k_eager(input_; name=name, k=k, sorted=sorted) else top_k_graph(input_; name=name, k=k, sorted=sorted) @@ -40788,7 +40788,7 @@ begin return res[1] end function resource_apply_ftrl_v2(var_, accum_, linear_, grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=nothing, use_locking=nothing) - if tf.eager_mode + if tf.in_eager_mode() resource_apply_ftrl_v2_eager(var_, accum_, linear_, grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=name, use_locking=use_locking) else resource_apply_ftrl_v2_graph(var_, accum_, linear_, grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=name, use_locking=use_locking) @@ -40834,7 +40834,7 @@ begin return res[1] end function _nccl_broadcast_recv(shape_; name=nothing, num_devices=nothing, shared_name=nothing) - if tf.eager_mode + if tf.in_eager_mode() _nccl_broadcast_recv_eager(shape_; name=name, num_devices=num_devices, shared_name=shared_name) else _nccl_broadcast_recv_graph(shape_; name=name, num_devices=num_devices, shared_name=shared_name) @@ -40868,7 +40868,7 @@ begin return res[1] end function queue_is_closed(handle_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() queue_is_closed_eager(handle_; name=name) else queue_is_closed_graph(handle_; name=name) @@ -40932,7 +40932,7 @@ begin return res[1] end function shuffle_dataset(input_dataset_, buffer_size_, seed_, seed2_; name=nothing, reshuffle_each_iteration=nothing, output_types=nothing, output_shapes=nothing) - if tf.eager_mode + if tf.in_eager_mode() shuffle_dataset_eager(input_dataset_, buffer_size_, seed_, seed2_; name=name, reshuffle_each_iteration=reshuffle_each_iteration, output_types=output_types, output_shapes=output_shapes) else shuffle_dataset_graph(input_dataset_, buffer_size_, seed_, seed2_; name=name, reshuffle_each_iteration=reshuffle_each_iteration, output_types=output_types, output_shapes=output_shapes) @@ -40979,7 +40979,7 @@ begin return res end function deserialize_sparse(serialized_sparse_; name=nothing, dtype=nothing) - if tf.eager_mode + if tf.in_eager_mode() deserialize_sparse_eager(serialized_sparse_; name=name, dtype=dtype) else deserialize_sparse_graph(serialized_sparse_; name=name, dtype=dtype) @@ -41039,7 +41039,7 @@ begin return res[1] end function priority_queue_v2(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) - if tf.eager_mode + if tf.in_eager_mode() priority_queue_v2_eager(; name=name, component_types=component_types, shapes=shapes, capacity=capacity, container=container, shared_name=shared_name) else priority_queue_v2_graph(; name=name, component_types=component_types, shapes=shapes, capacity=capacity, container=container, shared_name=shared_name) @@ -41075,7 +41075,7 @@ begin return res[1] end function _device_arg(; name=nothing, index=nothing) - if tf.eager_mode + if tf.in_eager_mode() _device_arg_eager(; name=name, index=index) else _device_arg_graph(; name=name, index=index) @@ -41129,7 +41129,7 @@ begin return res[1] end function truncated_normal(shape_; name=nothing, seed=nothing, seed2=nothing, dtype=nothing) - if tf.eager_mode + if tf.in_eager_mode() truncated_normal_eager(shape_; name=name, seed=seed, seed2=seed2, dtype=dtype) else truncated_normal_graph(shape_; name=name, seed=seed, seed2=seed2, dtype=dtype) @@ -41173,7 +41173,7 @@ begin return res[1] end function tensor_forest_tree_predict(tree_handle_, dense_features_; name=nothing, logits_dimension=nothing) - if tf.eager_mode + if tf.in_eager_mode() tensor_forest_tree_predict_eager(tree_handle_, dense_features_; name=name, logits_dimension=logits_dimension) else tensor_forest_tree_predict_graph(tree_handle_, dense_features_; name=name, logits_dimension=logits_dimension) @@ -41219,7 +41219,7 @@ begin return res[1] end function stack_v2(max_size_; name=nothing, elem_type=nothing, stack_name=nothing) - if tf.eager_mode + if tf.in_eager_mode() stack_v2_eager(max_size_; name=name, elem_type=elem_type, stack_name=stack_name) else stack_v2_graph(max_size_; name=name, elem_type=elem_type, stack_name=stack_name) @@ -41253,7 +41253,7 @@ begin return res[1] end function accumulator_num_accumulated(handle_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() accumulator_num_accumulated_eager(handle_; name=name) else accumulator_num_accumulated_graph(handle_; name=name) @@ -41287,7 +41287,7 @@ begin return res[1] end function reader_reset_v2(reader_handle_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() reader_reset_v2_eager(reader_handle_; name=name) else reader_reset_v2_graph(reader_handle_; name=name) @@ -41359,7 +41359,7 @@ begin return res[1] end function apply_add_sign(var_, m_, lr_, alpha_, sign_decay_, beta_, grad_; name=nothing, use_locking=nothing) - if tf.eager_mode + if tf.in_eager_mode() apply_add_sign_eager(var_, m_, lr_, alpha_, sign_decay_, beta_, grad_; name=name, use_locking=use_locking) else apply_add_sign_graph(var_, m_, lr_, alpha_, sign_decay_, beta_, grad_; name=name, use_locking=use_locking) @@ -41418,7 +41418,7 @@ begin return res end function retrieve_tpu_embedding_rms_prop_parameters_grad_accum_debug(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - if tf.eager_mode + if tf.in_eager_mode() retrieve_tpu_embedding_rms_prop_parameters_grad_accum_debug_eager(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) else retrieve_tpu_embedding_rms_prop_parameters_grad_accum_debug_graph(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) @@ -41454,7 +41454,7 @@ begin return res[1] end function rint(x_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() rint_eager(x_; name=name) else rint_graph(x_; name=name) @@ -41513,7 +41513,7 @@ begin return res end function retrieve_tpu_embedding_adadelta_parameters_grad_accum_debug(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - if tf.eager_mode + if tf.in_eager_mode() retrieve_tpu_embedding_adadelta_parameters_grad_accum_debug_eager(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) else retrieve_tpu_embedding_adadelta_parameters_grad_accum_debug_graph(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) @@ -41579,7 +41579,7 @@ begin return res[1] end function extract_glimpse(input_, size_, offsets_; name=nothing, centered=nothing, normalized=nothing, uniform_noise=nothing, noise=nothing) - if tf.eager_mode + if tf.in_eager_mode() extract_glimpse_eager(input_, size_, offsets_; name=name, centered=centered, normalized=normalized, uniform_noise=uniform_noise, noise=noise) else extract_glimpse_graph(input_, size_, offsets_; name=name, centered=centered, normalized=normalized, uniform_noise=uniform_noise, noise=noise) @@ -41625,7 +41625,7 @@ begin return res[1] end function string_to_hash_bucket_strong(input_; name=nothing, num_buckets=nothing, key=nothing) - if tf.eager_mode + if tf.in_eager_mode() string_to_hash_bucket_strong_eager(input_; name=name, num_buckets=num_buckets, key=key) else string_to_hash_bucket_strong_graph(input_; name=name, num_buckets=num_buckets, key=key) @@ -41685,7 +41685,7 @@ begin return res[1] end function one_shot_iterator(; name=nothing, dataset_factory=nothing, output_types=nothing, output_shapes=nothing, container=nothing, shared_name=nothing) - if tf.eager_mode + if tf.in_eager_mode() one_shot_iterator_eager(; name=name, dataset_factory=dataset_factory, output_types=output_types, output_shapes=output_shapes, container=container, shared_name=shared_name) else one_shot_iterator_graph(; name=name, dataset_factory=dataset_factory, output_types=output_types, output_shapes=output_shapes, container=container, shared_name=shared_name) @@ -41758,7 +41758,7 @@ begin return res[1] end function resource_sparse_apply_momentum(var_, accum_, lr_, grad_, indices_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) - if tf.eager_mode + if tf.in_eager_mode() resource_sparse_apply_momentum_eager(var_, accum_, lr_, grad_, indices_, momentum_; name=name, use_locking=use_locking, use_nesterov=use_nesterov) else resource_sparse_apply_momentum_graph(var_, accum_, lr_, grad_, indices_, momentum_; name=name, use_locking=use_locking, use_nesterov=use_nesterov) @@ -41810,7 +41810,7 @@ begin return res[1] end function save_slices(filename_, tensor_names_, shapes_and_slices_, data_; name=nothing, T=nothing) - if tf.eager_mode + if tf.in_eager_mode() save_slices_eager(filename_, tensor_names_, shapes_and_slices_, data_; name=name, T=T) else save_slices_graph(filename_, tensor_names_, shapes_and_slices_, data_; name=name, T=T) @@ -41844,7 +41844,7 @@ begin return res[1] end function experimental_dataset_cardinality(input_dataset_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() experimental_dataset_cardinality_eager(input_dataset_; name=name) else experimental_dataset_cardinality_graph(input_dataset_; name=name) @@ -41880,7 +41880,7 @@ begin return res[1] end function is_finite(x_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() is_finite_eager(x_; name=name) else is_finite_graph(x_; name=name) @@ -41960,7 +41960,7 @@ begin return res[1] end function experimental_numa_map_and_batch_dataset(input_dataset_, other_arguments_, batch_size_, num_parallel_calls_, drop_remainder_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, preserve_cardinality=nothing) - if tf.eager_mode + if tf.in_eager_mode() experimental_numa_map_and_batch_dataset_eager(input_dataset_, other_arguments_, batch_size_, num_parallel_calls_, drop_remainder_; name=name, f=f, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes, preserve_cardinality=preserve_cardinality) else experimental_numa_map_and_batch_dataset_graph(input_dataset_, other_arguments_, batch_size_, num_parallel_calls_, drop_remainder_; name=name, f=f, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes, preserve_cardinality=preserve_cardinality) @@ -42018,7 +42018,7 @@ begin return res[1] end function all_to_all(input_, group_assignment_; name=nothing, concat_dimension=nothing, split_dimension=nothing, split_count=nothing) - if tf.eager_mode + if tf.in_eager_mode() all_to_all_eager(input_, group_assignment_; name=name, concat_dimension=concat_dimension, split_dimension=split_dimension, split_count=split_count) else all_to_all_graph(input_, group_assignment_; name=name, concat_dimension=concat_dimension, split_dimension=split_dimension, split_count=split_count) @@ -42075,7 +42075,7 @@ begin return res end function take_many_sparse_from_tensors_map(sparse_handles_; name=nothing, dtype=nothing, container=nothing, shared_name=nothing) - if tf.eager_mode + if tf.in_eager_mode() take_many_sparse_from_tensors_map_eager(sparse_handles_; name=name, dtype=dtype, container=container, shared_name=shared_name) else take_many_sparse_from_tensors_map_graph(sparse_handles_; name=name, dtype=dtype, container=container, shared_name=shared_name) @@ -42111,7 +42111,7 @@ begin return res[1] end function batch_matrix_diag_part(input_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() batch_matrix_diag_part_eager(input_; name=name) else batch_matrix_diag_part_graph(input_; name=name) @@ -42161,7 +42161,7 @@ begin return res[1] end function fixed_length_record_dataset(filenames_, header_bytes_, record_bytes_, footer_bytes_, buffer_size_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() fixed_length_record_dataset_eager(filenames_, header_bytes_, record_bytes_, footer_bytes_, buffer_size_; name=name) else fixed_length_record_dataset_graph(filenames_, header_bytes_, record_bytes_, footer_bytes_, buffer_size_; name=name) @@ -42207,7 +42207,7 @@ begin return res[1] end function stack_push(handle_, elem_; name=nothing, swap_memory=nothing) - if tf.eager_mode + if tf.in_eager_mode() stack_push_eager(handle_, elem_; name=name, swap_memory=swap_memory) else stack_push_graph(handle_, elem_; name=name, swap_memory=swap_memory) @@ -42249,7 +42249,7 @@ begin return res[1] end function placeholder_v2(; name=nothing, dtype=nothing, shape=nothing) - if tf.eager_mode + if tf.in_eager_mode() placeholder_v2_eager(; name=name, dtype=dtype, shape=shape) else placeholder_v2_graph(; name=name, dtype=dtype, shape=shape) @@ -42291,7 +42291,7 @@ begin return res[1] end function multi_device_iterator_init(dataset_, multi_device_iterator_, max_buffer_size_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() multi_device_iterator_init_eager(dataset_, multi_device_iterator_, max_buffer_size_; name=name) else multi_device_iterator_init_graph(dataset_, multi_device_iterator_, max_buffer_size_; name=name) @@ -42333,7 +42333,7 @@ begin return res[1] end function gcs_configure_block_cache(max_cache_size_, block_size_, max_staleness_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() gcs_configure_block_cache_eager(max_cache_size_, block_size_, max_staleness_; name=name) else gcs_configure_block_cache_graph(max_cache_size_, block_size_, max_staleness_; name=name) @@ -42379,7 +42379,7 @@ begin return res[1] end function queue_dequeue_v2(handle_; name=nothing, component_types=nothing, timeout_ms=nothing) - if tf.eager_mode + if tf.in_eager_mode() queue_dequeue_v2_eager(handle_; name=name, component_types=component_types, timeout_ms=timeout_ms) else queue_dequeue_v2_graph(handle_; name=name, component_types=component_types, timeout_ms=timeout_ms) @@ -42438,7 +42438,7 @@ begin return res end function retrieve_tpu_embedding_rms_prop_parameters(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - if tf.eager_mode + if tf.in_eager_mode() retrieve_tpu_embedding_rms_prop_parameters_eager(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) else retrieve_tpu_embedding_rms_prop_parameters_graph(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) @@ -42480,7 +42480,7 @@ begin return res[1] end function transpose(x_, perm_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() transpose_eager(x_, perm_; name=name) else transpose_graph(x_, perm_; name=name) @@ -42516,7 +42516,7 @@ begin return res[1] end function ifft(input_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() ifft_eager(input_; name=name) else ifft_graph(input_; name=name) @@ -42569,7 +42569,7 @@ begin return res[1] end function sparse_segment_sum_with_num_segments(data_, indices_, segment_ids_, num_segments_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() sparse_segment_sum_with_num_segments_eager(data_, indices_, segment_ids_, num_segments_; name=name) else sparse_segment_sum_with_num_segments_graph(data_, indices_, segment_ids_, num_segments_; name=name) @@ -42603,7 +42603,7 @@ begin return res[1] end function queue_is_closed_v2(handle_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() queue_is_closed_v2_eager(handle_; name=name) else queue_is_closed_v2_graph(handle_; name=name) @@ -42678,7 +42678,7 @@ begin return res[1] end function parameterized_truncated_normal(shape_, means_, stdevs_, minvals_, maxvals_; name=nothing, seed=nothing, seed2=nothing, dtype=nothing) - if tf.eager_mode + if tf.in_eager_mode() parameterized_truncated_normal_eager(shape_, means_, stdevs_, minvals_, maxvals_; name=name, seed=seed, seed2=seed2, dtype=dtype) else parameterized_truncated_normal_graph(shape_, means_, stdevs_, minvals_, maxvals_; name=name, seed=seed, seed2=seed2, dtype=dtype) @@ -42714,7 +42714,7 @@ begin return res[1] end function diag_part(input_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() diag_part_eager(input_; name=name) else diag_part_graph(input_; name=name) @@ -42760,7 +42760,7 @@ begin return res[1] end function kmeans_plus_plus_initialization(points_, num_to_sample_, seed_, num_retries_per_sample_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() kmeans_plus_plus_initialization_eager(points_, num_to_sample_, seed_, num_retries_per_sample_; name=name) else kmeans_plus_plus_initialization_graph(points_, num_to_sample_, seed_, num_retries_per_sample_; name=name) @@ -42808,7 +42808,7 @@ begin return res[1] end function regex_replace(input_, pattern_, rewrite_; name=nothing, replace_global=nothing) - if tf.eager_mode + if tf.in_eager_mode() regex_replace_eager(input_, pattern_, rewrite_; name=name, replace_global=replace_global) else regex_replace_graph(input_, pattern_, rewrite_; name=name, replace_global=replace_global) @@ -42872,7 +42872,7 @@ begin return res[1] end function sparse_tensor_dense_mat_mul(a_indices_, a_values_, a_shape_, b_; name=nothing, adjoint_a=nothing, adjoint_b=nothing) - if tf.eager_mode + if tf.in_eager_mode() sparse_tensor_dense_mat_mul_eager(a_indices_, a_values_, a_shape_, b_; name=name, adjoint_a=adjoint_a, adjoint_b=adjoint_b) else sparse_tensor_dense_mat_mul_graph(a_indices_, a_values_, a_shape_, b_; name=name, adjoint_a=adjoint_a, adjoint_b=adjoint_b) @@ -42940,7 +42940,7 @@ begin return res[1] end function map_defun(arguments_, captured_inputs_; name=nothing, Targuments=nothing, Tcaptured=nothing, output_types=nothing, output_shapes=nothing, f=nothing) - if tf.eager_mode + if tf.in_eager_mode() map_defun_eager(arguments_, captured_inputs_; name=name, Targuments=Targuments, Tcaptured=Tcaptured, output_types=output_types, output_shapes=output_shapes, f=f) else map_defun_graph(arguments_, captured_inputs_; name=name, Targuments=Targuments, Tcaptured=Tcaptured, output_types=output_types, output_shapes=output_shapes, f=f) @@ -43015,7 +43015,7 @@ begin return res end function thread_unsafe_unigram_candidate_sampler(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing) - if tf.eager_mode + if tf.in_eager_mode() thread_unsafe_unigram_candidate_sampler_eager(true_classes_; name=name, num_true=num_true, num_sampled=num_sampled, unique=unique, range_max=range_max, seed=seed, seed2=seed2) else thread_unsafe_unigram_candidate_sampler_graph(true_classes_; name=name, num_true=num_true, num_sampled=num_sampled, unique=unique, range_max=range_max, seed=seed, seed2=seed2) @@ -43074,7 +43074,7 @@ begin return res end function retrieve_tpu_embedding_adam_parameters_grad_accum_debug(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - if tf.eager_mode + if tf.in_eager_mode() retrieve_tpu_embedding_adam_parameters_grad_accum_debug_eager(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) else retrieve_tpu_embedding_adam_parameters_grad_accum_debug_graph(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) @@ -43122,7 +43122,7 @@ begin return res[1] end function parallel_concat(values_; name=nothing, N=nothing, shape=nothing) - if tf.eager_mode + if tf.in_eager_mode() parallel_concat_eager(values_; name=name, N=N, shape=shape) else parallel_concat_graph(values_; name=name, N=N, shape=shape) @@ -43168,7 +43168,7 @@ begin return res[1] end function lookup_table_find_v2(table_handle_, keys_, default_value_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() lookup_table_find_v2_eager(table_handle_, keys_, default_value_; name=name) else lookup_table_find_v2_graph(table_handle_, keys_, default_value_; name=name) @@ -43206,7 +43206,7 @@ begin return res[1] end function tensor_forest_tree_deserialize(tree_handle_, tree_config_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() tensor_forest_tree_deserialize_eager(tree_handle_, tree_config_; name=name) else tensor_forest_tree_deserialize_graph(tree_handle_, tree_config_; name=name) @@ -43265,7 +43265,7 @@ begin return res end function retrieve_tpu_embedding_momentum_parameters(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - if tf.eager_mode + if tf.in_eager_mode() retrieve_tpu_embedding_momentum_parameters_eager(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) else retrieve_tpu_embedding_momentum_parameters_graph(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) @@ -43323,7 +43323,7 @@ begin return res[1] end function fake_quant_with_min_max_args(inputs_; name=nothing, min=nothing, max=nothing, num_bits=nothing, narrow_range=nothing) - if tf.eager_mode + if tf.in_eager_mode() fake_quant_with_min_max_args_eager(inputs_; name=name, min=min, max=max, num_bits=num_bits, narrow_range=narrow_range) else fake_quant_with_min_max_args_graph(inputs_; name=name, min=min, max=max, num_bits=num_bits, narrow_range=narrow_range) @@ -43374,7 +43374,7 @@ begin return res[1] end function resource_apply_gradient_descent(var_, alpha_, delta_; name=nothing, use_locking=nothing) - if tf.eager_mode + if tf.in_eager_mode() resource_apply_gradient_descent_eager(var_, alpha_, delta_; name=name, use_locking=use_locking) else resource_apply_gradient_descent_graph(var_, alpha_, delta_; name=name, use_locking=use_locking) @@ -43432,7 +43432,7 @@ begin return res[1] end function experimental_sliding_window_dataset(input_dataset_, window_size_, window_shift_, window_stride_; name=nothing, output_types=nothing, output_shapes=nothing) - if tf.eager_mode + if tf.in_eager_mode() experimental_sliding_window_dataset_eager(input_dataset_, window_size_, window_shift_, window_stride_; name=name, output_types=output_types, output_shapes=output_shapes) else experimental_sliding_window_dataset_graph(input_dataset_, window_size_, window_shift_, window_stride_; name=name, output_types=output_types, output_shapes=output_shapes) @@ -43478,7 +43478,7 @@ begin return res[1] end function decode_raw(bytes_; name=nothing, out_type=nothing, little_endian=nothing) - if tf.eager_mode + if tf.in_eager_mode() decode_raw_eager(bytes_; name=name, out_type=out_type, little_endian=little_endian) else decode_raw_graph(bytes_; name=name, out_type=out_type, little_endian=little_endian) @@ -43541,7 +43541,7 @@ begin return res end function fake_quant_with_min_max_vars_per_channel_gradient(gradients_, inputs_, min_, max_; name=nothing, num_bits=nothing, narrow_range=nothing) - if tf.eager_mode + if tf.in_eager_mode() fake_quant_with_min_max_vars_per_channel_gradient_eager(gradients_, inputs_, min_, max_; name=name, num_bits=num_bits, narrow_range=narrow_range) else fake_quant_with_min_max_vars_per_channel_gradient_graph(gradients_, inputs_, min_, max_; name=name, num_bits=num_bits, narrow_range=narrow_range) @@ -43594,7 +43594,7 @@ begin return res end function unique_with_counts_v2(x_, axis_; name=nothing, out_idx=nothing) - if tf.eager_mode + if tf.in_eager_mode() unique_with_counts_v2_eager(x_, axis_; name=name, out_idx=out_idx) else unique_with_counts_v2_graph(x_, axis_; name=name, out_idx=out_idx) @@ -43644,7 +43644,7 @@ begin return res[1] end function experimental_sleep_dataset(input_dataset_, sleep_microseconds_; name=nothing, output_types=nothing, output_shapes=nothing) - if tf.eager_mode + if tf.in_eager_mode() experimental_sleep_dataset_eager(input_dataset_, sleep_microseconds_; name=name, output_types=output_types, output_shapes=output_shapes) else experimental_sleep_dataset_graph(input_dataset_, sleep_microseconds_; name=name, output_types=output_types, output_shapes=output_shapes) @@ -43691,7 +43691,7 @@ begin return res end function tpu_replicated_output(input_; name=nothing, num_replicas=nothing) - if tf.eager_mode + if tf.in_eager_mode() tpu_replicated_output_eager(input_; name=name, num_replicas=num_replicas) else tpu_replicated_output_graph(input_; name=name, num_replicas=num_replicas) @@ -43738,7 +43738,7 @@ begin return res[1] end function lower_bound(sorted_inputs_, values_; name=nothing, out_type=nothing) - if tf.eager_mode + if tf.in_eager_mode() lower_bound_eager(sorted_inputs_, values_; name=name, out_type=out_type) else lower_bound_graph(sorted_inputs_, values_; name=name, out_type=out_type) @@ -43774,7 +43774,7 @@ begin return res[1] end function tan(x_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() tan_eager(x_; name=name) else tan_graph(x_; name=name) @@ -43828,7 +43828,7 @@ begin return res[1] end function enter(data_; name=nothing, frame_name=nothing, is_constant=nothing, parallel_iterations=nothing) - if tf.eager_mode + if tf.in_eager_mode() enter_eager(data_; name=name, frame_name=frame_name, is_constant=is_constant, parallel_iterations=parallel_iterations) else enter_graph(data_; name=name, frame_name=frame_name, is_constant=is_constant, parallel_iterations=parallel_iterations) @@ -43886,7 +43886,7 @@ begin return res[1] end function infeed_enqueue_tuple(inputs_; name=nothing, dtypes=nothing, shapes=nothing, layouts=nothing, device_ordinal=nothing) - if tf.eager_mode + if tf.in_eager_mode() infeed_enqueue_tuple_eager(inputs_; name=name, dtypes=dtypes, shapes=shapes, layouts=layouts, device_ordinal=device_ordinal) else infeed_enqueue_tuple_graph(inputs_; name=name, dtypes=dtypes, shapes=shapes, layouts=layouts, device_ordinal=device_ordinal) @@ -43920,7 +43920,7 @@ begin return res[1] end function _set_global_tpu_array(topology_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() _set_global_tpu_array_eager(topology_; name=name) else _set_global_tpu_array_graph(topology_; name=name) @@ -43956,7 +43956,7 @@ begin return res[1] end function square(x_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() square_eager(x_; name=name) else square_graph(x_; name=name) @@ -43992,7 +43992,7 @@ begin return res[1] end function debug_gradient_ref_identity(input_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() debug_gradient_ref_identity_eager(input_; name=name) else debug_gradient_ref_identity_graph(input_; name=name) @@ -44064,7 +44064,7 @@ begin return res[1] end function apply_adadelta(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_; name=nothing, use_locking=nothing) - if tf.eager_mode + if tf.in_eager_mode() apply_adadelta_eager(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_; name=name, use_locking=use_locking) else apply_adadelta_graph(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_; name=name, use_locking=use_locking) @@ -44158,7 +44158,7 @@ begin return res[1] end function experimental_group_by_window_dataset(input_dataset_, key_func_other_arguments_, reduce_func_other_arguments_, window_size_func_other_arguments_; name=nothing, key_func=nothing, reduce_func=nothing, window_size_func=nothing, Tkey_func_other_arguments=nothing, Treduce_func_other_arguments=nothing, Twindow_size_func_other_arguments=nothing, output_types=nothing, output_shapes=nothing) - if tf.eager_mode + if tf.in_eager_mode() experimental_group_by_window_dataset_eager(input_dataset_, key_func_other_arguments_, reduce_func_other_arguments_, window_size_func_other_arguments_; name=name, key_func=key_func, reduce_func=reduce_func, window_size_func=window_size_func, Tkey_func_other_arguments=Tkey_func_other_arguments, Treduce_func_other_arguments=Treduce_func_other_arguments, Twindow_size_func_other_arguments=Twindow_size_func_other_arguments, output_types=output_types, output_shapes=output_shapes) else experimental_group_by_window_dataset_graph(input_dataset_, key_func_other_arguments_, reduce_func_other_arguments_, window_size_func_other_arguments_; name=name, key_func=key_func, reduce_func=reduce_func, window_size_func=window_size_func, Tkey_func_other_arguments=Tkey_func_other_arguments, Treduce_func_other_arguments=Treduce_func_other_arguments, Twindow_size_func_other_arguments=Twindow_size_func_other_arguments, output_types=output_types, output_shapes=output_shapes) @@ -44208,7 +44208,7 @@ begin return res[1] end function audio_summary(tag_, tensor_; name=nothing, sample_rate=nothing, max_outputs=nothing) - if tf.eager_mode + if tf.in_eager_mode() audio_summary_eager(tag_, tensor_; name=name, sample_rate=sample_rate, max_outputs=max_outputs) else audio_summary_graph(tag_, tensor_; name=name, sample_rate=sample_rate, max_outputs=max_outputs) @@ -44249,7 +44249,7 @@ begin return res[1] end function squared_difference(x_, y_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() squared_difference_eager(x_, y_; name=name) else squared_difference_graph(x_, y_; name=name) @@ -44311,7 +44311,7 @@ begin return res[1] end function experimental_take_while_dataset(input_dataset_, other_arguments_; name=nothing, predicate=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) - if tf.eager_mode + if tf.in_eager_mode() experimental_take_while_dataset_eager(input_dataset_, other_arguments_; name=name, predicate=predicate, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes) else experimental_take_while_dataset_graph(input_dataset_, other_arguments_; name=name, predicate=predicate, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes) @@ -44365,7 +44365,7 @@ begin return res[1] end function scatter_nd_update(ref_, indices_, updates_; name=nothing, use_locking=nothing) - if tf.eager_mode + if tf.in_eager_mode() scatter_nd_update_eager(ref_, indices_, updates_; name=name, use_locking=use_locking) else scatter_nd_update_graph(ref_, indices_, updates_; name=name, use_locking=use_locking) @@ -44411,7 +44411,7 @@ begin return res[1] end function dynamic_stitch(indices_, data_; name=nothing, N=nothing) - if tf.eager_mode + if tf.in_eager_mode() dynamic_stitch_eager(indices_, data_; name=name, N=N) else dynamic_stitch_graph(indices_, data_; name=name, N=N) @@ -44447,7 +44447,7 @@ begin return res[1] end function ones_like(x_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() ones_like_eager(x_; name=name) else ones_like_graph(x_; name=name) @@ -44507,7 +44507,7 @@ begin return res[1] end function fractional_max_pool_grad(orig_input_, orig_output_, out_backprop_, row_pooling_sequence_, col_pooling_sequence_; name=nothing, overlapping=nothing) - if tf.eager_mode + if tf.in_eager_mode() fractional_max_pool_grad_eager(orig_input_, orig_output_, out_backprop_, row_pooling_sequence_, col_pooling_sequence_; name=name, overlapping=overlapping) else fractional_max_pool_grad_graph(orig_input_, orig_output_, out_backprop_, row_pooling_sequence_, col_pooling_sequence_; name=name, overlapping=overlapping) @@ -44563,7 +44563,7 @@ begin return res[1] end function remote_call(target_, args_; name=nothing, Tin=nothing, Tout=nothing, f=nothing) - if tf.eager_mode + if tf.in_eager_mode() remote_call_eager(target_, args_; name=name, Tin=Tin, Tout=Tout, f=f) else remote_call_graph(target_, args_; name=name, Tin=Tin, Tout=Tout, f=f) @@ -44612,7 +44612,7 @@ begin return res[1] end function gather(params_, indices_; name=nothing, validate_indices=nothing) - if tf.eager_mode + if tf.in_eager_mode() gather_eager(params_, indices_; name=name, validate_indices=validate_indices) else gather_graph(params_, indices_; name=name, validate_indices=validate_indices) @@ -44687,7 +44687,7 @@ begin return res end function quantized_mat_mul(a_, b_, min_a_, max_a_, min_b_, max_b_; name=nothing, transpose_a=nothing, transpose_b=nothing) - if tf.eager_mode + if tf.in_eager_mode() quantized_mat_mul_eager(a_, b_, min_a_, max_a_, min_b_, max_b_; name=name, transpose_a=transpose_a, transpose_b=transpose_b) else quantized_mat_mul_graph(a_, b_, min_a_, max_a_, min_b_, max_b_; name=name, transpose_a=transpose_a, transpose_b=transpose_b) @@ -44750,7 +44750,7 @@ begin return res end function unicode_decode_with_offsets(input_; name=nothing, input_encoding=nothing, errors=nothing, replacement_char=nothing, replace_control_characters=nothing) - if tf.eager_mode + if tf.in_eager_mode() unicode_decode_with_offsets_eager(input_; name=name, input_encoding=input_encoding, errors=errors, replacement_char=replacement_char, replace_control_characters=replace_control_characters) else unicode_decode_with_offsets_graph(input_; name=name, input_encoding=input_encoding, errors=errors, replacement_char=replacement_char, replace_control_characters=replace_control_characters) @@ -44820,7 +44820,7 @@ begin return res[1] end function enqueue_tpu_embedding_sparse_tensor_batch(sample_indices_, embedding_indices_, aggregation_weights_, mode_override_; name=nothing, N=nothing, device_ordinal=nothing, combiners=nothing, table_ids=nothing) - if tf.eager_mode + if tf.in_eager_mode() enqueue_tpu_embedding_sparse_tensor_batch_eager(sample_indices_, embedding_indices_, aggregation_weights_, mode_override_; name=name, N=N, device_ordinal=device_ordinal, combiners=combiners, table_ids=table_ids) else enqueue_tpu_embedding_sparse_tensor_batch_graph(sample_indices_, embedding_indices_, aggregation_weights_, mode_override_; name=name, N=N, device_ordinal=device_ordinal, combiners=combiners, table_ids=table_ids) @@ -44870,7 +44870,7 @@ begin return res[1] end function accumulator_apply_gradient(handle_, local_step_, gradient_; name=nothing, dtype=nothing) - if tf.eager_mode + if tf.in_eager_mode() accumulator_apply_gradient_eager(handle_, local_step_, gradient_; name=name, dtype=dtype) else accumulator_apply_gradient_graph(handle_, local_step_, gradient_; name=name, dtype=dtype) @@ -44922,7 +44922,7 @@ begin return res[1] end function write_summary(writer_, step_, tensor_, tag_, summary_metadata_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() write_summary_eager(writer_, step_, tensor_, tag_, summary_metadata_; name=name) else write_summary_graph(writer_, step_, tensor_, tag_, summary_metadata_; name=name) @@ -45009,7 +45009,7 @@ begin return res end function quantized_conv2d(input_, filter_, min_input_, max_input_, min_filter_, max_filter_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) - if tf.eager_mode + if tf.in_eager_mode() quantized_conv2d_eager(input_, filter_, min_input_, max_input_, min_filter_, max_filter_; name=name, out_type=out_type, strides=strides, padding=padding, dilations=dilations) else quantized_conv2d_graph(input_, filter_, min_input_, max_input_, min_filter_, max_filter_; name=name, out_type=out_type, strides=strides, padding=padding, dilations=dilations) @@ -45075,7 +45075,7 @@ begin return res[1] end function resource_apply_momentum(var_, accum_, lr_, grad_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) - if tf.eager_mode + if tf.in_eager_mode() resource_apply_momentum_eager(var_, accum_, lr_, grad_, momentum_; name=name, use_locking=use_locking, use_nesterov=use_nesterov) else resource_apply_momentum_graph(var_, accum_, lr_, grad_, momentum_; name=name, use_locking=use_locking, use_nesterov=use_nesterov) @@ -45111,7 +45111,7 @@ begin return res[1] end function log1p(x_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() log1p_eager(x_; name=name) else log1p_graph(x_; name=name) @@ -45171,7 +45171,7 @@ begin return res[1] end function ordered_map_clear(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) - if tf.eager_mode + if tf.in_eager_mode() ordered_map_clear_eager(; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) else ordered_map_clear_graph(; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) @@ -45224,7 +45224,7 @@ begin return res[1] end function resource_scatter_update(resource_, indices_, updates_; name=nothing, dtype=nothing) - if tf.eager_mode + if tf.in_eager_mode() resource_scatter_update_eager(resource_, indices_, updates_; name=name, dtype=dtype) else resource_scatter_update_graph(resource_, indices_, updates_; name=name, dtype=dtype) @@ -45291,7 +45291,7 @@ begin return res end function barrier_take_many(handle_, num_elements_; name=nothing, component_types=nothing, allow_small_batch=nothing, wait_for_incomplete=nothing, timeout_ms=nothing) - if tf.eager_mode + if tf.in_eager_mode() barrier_take_many_eager(handle_, num_elements_; name=name, component_types=component_types, allow_small_batch=allow_small_batch, wait_for_incomplete=wait_for_incomplete, timeout_ms=timeout_ms) else barrier_take_many_graph(handle_, num_elements_; name=name, component_types=component_types, allow_small_batch=allow_small_batch, wait_for_incomplete=wait_for_incomplete, timeout_ms=timeout_ms) @@ -45357,7 +45357,7 @@ begin return res[1] end function resource_apply_keras_momentum(var_, accum_, lr_, grad_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) - if tf.eager_mode + if tf.in_eager_mode() resource_apply_keras_momentum_eager(var_, accum_, lr_, grad_, momentum_; name=name, use_locking=use_locking, use_nesterov=use_nesterov) else resource_apply_keras_momentum_graph(var_, accum_, lr_, grad_, momentum_; name=name, use_locking=use_locking, use_nesterov=use_nesterov) @@ -45429,7 +45429,7 @@ begin return res[1] end function generate_big_query_reader_partitions(; name=nothing, project_id=nothing, dataset_id=nothing, table_id=nothing, columns=nothing, timestamp_millis=nothing, num_partitions=nothing, test_end_point=nothing) - if tf.eager_mode + if tf.in_eager_mode() generate_big_query_reader_partitions_eager(; name=name, project_id=project_id, dataset_id=dataset_id, table_id=table_id, columns=columns, timestamp_millis=timestamp_millis, num_partitions=num_partitions, test_end_point=test_end_point) else generate_big_query_reader_partitions_graph(; name=name, project_id=project_id, dataset_id=dataset_id, table_id=table_id, columns=columns, timestamp_millis=timestamp_millis, num_partitions=num_partitions, test_end_point=test_end_point) @@ -45481,7 +45481,7 @@ begin return res[1] end function _xla_recv_at_host(dynamic_key_; name=nothing, Toutputs=nothing, key=nothing, device_ordinal=nothing) - if tf.eager_mode + if tf.in_eager_mode() _xla_recv_at_host_eager(dynamic_key_; name=name, Toutputs=Toutputs, key=key, device_ordinal=device_ordinal) else _xla_recv_at_host_graph(dynamic_key_; name=name, Toutputs=Toutputs, key=key, device_ordinal=device_ordinal) @@ -45548,7 +45548,7 @@ begin return res end function quantized_avg_pool(input_, min_input_, max_input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing) - if tf.eager_mode + if tf.in_eager_mode() quantized_avg_pool_eager(input_, min_input_, max_input_; name=name, ksize=ksize, strides=strides, padding=padding) else quantized_avg_pool_graph(input_, min_input_, max_input_; name=name, ksize=ksize, strides=strides, padding=padding) @@ -45636,7 +45636,7 @@ begin return res[1] end function resource_apply_adam_with_amsgrad(var_, m_, v_, vhat_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing) - if tf.eager_mode + if tf.in_eager_mode() resource_apply_adam_with_amsgrad_eager(var_, m_, v_, vhat_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=name, use_locking=use_locking) else resource_apply_adam_with_amsgrad_graph(var_, m_, v_, vhat_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=name, use_locking=use_locking) @@ -45674,7 +45674,7 @@ begin return res[1] end function tensor_list_resize(input_handle_, size_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() tensor_list_resize_eager(input_handle_, size_; name=name) else tensor_list_resize_graph(input_handle_, size_; name=name) @@ -45740,7 +45740,7 @@ begin return res[1] end function _host_recv(; name=nothing, tensor_type=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing) - if tf.eager_mode + if tf.in_eager_mode() _host_recv_eager(; name=name, tensor_type=tensor_type, tensor_name=tensor_name, send_device=send_device, send_device_incarnation=send_device_incarnation, recv_device=recv_device, client_terminated=client_terminated) else _host_recv_graph(; name=name, tensor_type=tensor_type, tensor_name=tensor_name, send_device=send_device, send_device_incarnation=send_device_incarnation, recv_device=recv_device, client_terminated=client_terminated) @@ -45790,7 +45790,7 @@ begin return res[1] end function boosted_trees_center_bias(tree_ensemble_handle_, mean_gradients_, mean_hessians_, l1_, l2_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() boosted_trees_center_bias_eager(tree_ensemble_handle_, mean_gradients_, mean_hessians_, l1_, l2_; name=name) else boosted_trees_center_bias_graph(tree_ensemble_handle_, mean_gradients_, mean_hessians_, l1_, l2_; name=name) @@ -45824,7 +45824,7 @@ begin return res[1] end function lookup_table_size_v2(table_handle_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() lookup_table_size_v2_eager(table_handle_; name=name) else lookup_table_size_v2_graph(table_handle_; name=name) @@ -45862,7 +45862,7 @@ begin return res[1] end function irfft(input_, fft_length_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() irfft_eager(input_, fft_length_; name=name) else irfft_graph(input_, fft_length_; name=name) @@ -45907,7 +45907,7 @@ begin return res[1] end function inplace_add(x_, i_, v_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() inplace_add_eager(x_, i_, v_; name=name) else inplace_add_graph(x_, i_, v_; name=name) @@ -45954,7 +45954,7 @@ begin return res[1] end function bias_add(value_, bias_; name=nothing, data_format=nothing) - if tf.eager_mode + if tf.in_eager_mode() bias_add_eager(value_, bias_; name=name, data_format=data_format) else bias_add_graph(value_, bias_; name=name, data_format=data_format) @@ -46024,7 +46024,7 @@ begin return res[1] end function load_tpu_embedding_adam_parameters_grad_accum_debug(parameters_, momenta_, velocities_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - if tf.eager_mode + if tf.in_eager_mode() load_tpu_embedding_adam_parameters_grad_accum_debug_eager(parameters_, momenta_, velocities_, gradient_accumulators_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) else load_tpu_embedding_adam_parameters_grad_accum_debug_graph(parameters_, momenta_, velocities_, gradient_accumulators_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) @@ -46055,7 +46055,7 @@ begin return res[1] end function _disconnect_host_from_distributed_tpu_system(; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() _disconnect_host_from_distributed_tpu_system_eager(; name=name) else _disconnect_host_from_distributed_tpu_system_graph(; name=name) @@ -46106,7 +46106,7 @@ begin return res end function ragged_range(starts_, limits_, deltas_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() ragged_range_eager(starts_, limits_, deltas_; name=name) else ragged_range_graph(starts_, limits_, deltas_; name=name) @@ -46168,7 +46168,7 @@ begin return res[1] end function window_dataset(input_dataset_, size_, shift_, stride_, drop_remainder_; name=nothing, output_types=nothing, output_shapes=nothing) - if tf.eager_mode + if tf.in_eager_mode() window_dataset_eager(input_dataset_, size_, shift_, stride_, drop_remainder_; name=name, output_types=output_types, output_shapes=output_shapes) else window_dataset_graph(input_dataset_, size_, shift_, stride_, drop_remainder_; name=name, output_types=output_types, output_shapes=output_shapes) @@ -46204,7 +46204,7 @@ begin return res[1] end function diag(diagonal_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() diag_eager(diagonal_; name=name) else diag_graph(diagonal_; name=name) @@ -46246,7 +46246,7 @@ begin return res[1] end function infeed_dequeue(; name=nothing, dtype=nothing, shape=nothing) - if tf.eager_mode + if tf.in_eager_mode() infeed_dequeue_eager(; name=name, dtype=dtype, shape=shape) else infeed_dequeue_graph(; name=name, dtype=dtype, shape=shape) @@ -46296,7 +46296,7 @@ begin return res[1] end function experimental_latency_stats_dataset(input_dataset_, tag_; name=nothing, output_types=nothing, output_shapes=nothing) - if tf.eager_mode + if tf.in_eager_mode() experimental_latency_stats_dataset_eager(input_dataset_, tag_; name=name, output_types=output_types, output_shapes=output_shapes) else experimental_latency_stats_dataset_graph(input_dataset_, tag_; name=name, output_types=output_types, output_shapes=output_shapes) @@ -46352,7 +46352,7 @@ begin return res[1] end function add_sparse_to_tensors_map(sparse_indices_, sparse_values_, sparse_shape_; name=nothing, container=nothing, shared_name=nothing) - if tf.eager_mode + if tf.in_eager_mode() add_sparse_to_tensors_map_eager(sparse_indices_, sparse_values_, sparse_shape_; name=name, container=container, shared_name=shared_name) else add_sparse_to_tensors_map_graph(sparse_indices_, sparse_values_, sparse_shape_; name=name, container=container, shared_name=shared_name) @@ -46416,7 +46416,7 @@ begin return res end function ragged_gather(params_nested_splits_, params_dense_values_, indices_; name=nothing, PARAMS_RAGGED_RANK=nothing, OUTPUT_RAGGED_RANK=nothing) - if tf.eager_mode + if tf.in_eager_mode() ragged_gather_eager(params_nested_splits_, params_dense_values_, indices_; name=name, PARAMS_RAGGED_RANK=PARAMS_RAGGED_RANK, OUTPUT_RAGGED_RANK=OUTPUT_RAGGED_RANK) else ragged_gather_graph(params_nested_splits_, params_dense_values_, indices_; name=name, PARAMS_RAGGED_RANK=PARAMS_RAGGED_RANK, OUTPUT_RAGGED_RANK=OUTPUT_RAGGED_RANK) @@ -46452,7 +46452,7 @@ begin return res[1] end function rgb_to_hsv(images_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() rgb_to_hsv_eager(images_; name=name) else rgb_to_hsv_graph(images_; name=name) @@ -46486,7 +46486,7 @@ begin return res[1] end function multi_device_iterator_to_string_handle(multi_device_iterator_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() multi_device_iterator_to_string_handle_eager(multi_device_iterator_; name=name) else multi_device_iterator_to_string_handle_graph(multi_device_iterator_; name=name) @@ -46544,7 +46544,7 @@ begin return res[1] end function for_(start_, limit_, delta_, input_; name=nothing, T=nothing, body=nothing) - if tf.eager_mode + if tf.in_eager_mode() for__eager(start_, limit_, delta_, input_; name=name, T=T, body=body) else for__graph(start_, limit_, delta_, input_; name=name, T=T, body=body) @@ -46603,7 +46603,7 @@ begin return res end function sparse_reduce_max_sparse(input_indices_, input_values_, input_shape_, reduction_axes_; name=nothing, keep_dims=nothing) - if tf.eager_mode + if tf.in_eager_mode() sparse_reduce_max_sparse_eager(input_indices_, input_values_, input_shape_, reduction_axes_; name=name, keep_dims=keep_dims) else sparse_reduce_max_sparse_graph(input_indices_, input_values_, input_shape_, reduction_axes_; name=name, keep_dims=keep_dims) @@ -46652,7 +46652,7 @@ begin return res end function concat_offset(concat_dim_, shape_; name=nothing, N=nothing) - if tf.eager_mode + if tf.in_eager_mode() concat_offset_eager(concat_dim_, shape_; name=name, N=N) else concat_offset_graph(concat_dim_, shape_; name=name, N=N) @@ -46716,7 +46716,7 @@ begin return res[1] end function stage(values_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) - if tf.eager_mode + if tf.in_eager_mode() stage_eager(values_; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) else stage_graph(values_; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) @@ -46761,7 +46761,7 @@ begin return res end function switch(data_, pred_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() switch_eager(data_, pred_; name=name) else switch_graph(data_, pred_; name=name) @@ -46811,7 +46811,7 @@ begin return res[1] end function queue_dequeue_many_v2(handle_, n_; name=nothing, component_types=nothing, timeout_ms=nothing) - if tf.eager_mode + if tf.in_eager_mode() queue_dequeue_many_v2_eager(handle_, n_; name=name, component_types=component_types, timeout_ms=timeout_ms) else queue_dequeue_many_v2_graph(handle_, n_; name=name, component_types=component_types, timeout_ms=timeout_ms) @@ -46854,7 +46854,7 @@ begin return res[1] end function segment_prod(data_, segment_ids_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() segment_prod_eager(data_, segment_ids_; name=name) else segment_prod_graph(data_, segment_ids_; name=name) @@ -46901,7 +46901,7 @@ begin return res[1] end function approximate_equal(x_, y_; name=nothing, tolerance=nothing) - if tf.eager_mode + if tf.in_eager_mode() approximate_equal_eager(x_, y_; name=name, tolerance=tolerance) else approximate_equal_graph(x_, y_; name=name, tolerance=tolerance) @@ -46978,7 +46978,7 @@ begin return res[1] end function conv2d(input_, filter_; name=nothing, strides=nothing, use_cudnn_on_gpu=nothing, padding=nothing, explicit_paddings=nothing, data_format=nothing, dilations=nothing) - if tf.eager_mode + if tf.in_eager_mode() conv2d_eager(input_, filter_; name=name, strides=strides, use_cudnn_on_gpu=use_cudnn_on_gpu, padding=padding, explicit_paddings=explicit_paddings, data_format=data_format, dilations=dilations) else conv2d_graph(input_, filter_; name=name, strides=strides, use_cudnn_on_gpu=use_cudnn_on_gpu, padding=padding, explicit_paddings=explicit_paddings, data_format=data_format, dilations=dilations) @@ -47018,7 +47018,7 @@ begin return res[1] end function cross_replica_sum(input_, group_assignment_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() cross_replica_sum_eager(input_, group_assignment_; name=name) else cross_replica_sum_graph(input_, group_assignment_; name=name) @@ -47084,7 +47084,7 @@ begin return res[1] end function sparse_mat_mul(a_, b_; name=nothing, transpose_a=nothing, transpose_b=nothing, a_is_sparse=nothing, b_is_sparse=nothing) - if tf.eager_mode + if tf.in_eager_mode() sparse_mat_mul_eager(a_, b_; name=name, transpose_a=transpose_a, transpose_b=transpose_b, a_is_sparse=a_is_sparse, b_is_sparse=b_is_sparse) else sparse_mat_mul_graph(a_, b_; name=name, transpose_a=transpose_a, transpose_b=transpose_b, a_is_sparse=a_is_sparse, b_is_sparse=b_is_sparse) @@ -47154,7 +47154,7 @@ begin return res end function _scoped_allocator_split(concat_, split_; name=nothing, sa_name=nothing, id=nothing, N=nothing, shapes=nothing) - if tf.eager_mode + if tf.in_eager_mode() _scoped_allocator_split_eager(concat_, split_; name=name, sa_name=sa_name, id=id, N=N, shapes=shapes) else _scoped_allocator_split_graph(concat_, split_; name=name, sa_name=sa_name, id=id, N=N, shapes=shapes) @@ -47195,7 +47195,7 @@ begin return res[1] end function igammac(a_, x_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() igammac_eager(a_, x_; name=name) else igammac_graph(a_, x_; name=name) @@ -47248,7 +47248,7 @@ begin return res[1] end function batch_mat_mul(x_, y_; name=nothing, adj_x=nothing, adj_y=nothing) - if tf.eager_mode + if tf.in_eager_mode() batch_mat_mul_eager(x_, y_; name=name, adj_x=adj_x, adj_y=adj_y) else batch_mat_mul_graph(x_, y_; name=name, adj_x=adj_x, adj_y=adj_y) @@ -47312,7 +47312,7 @@ begin return res[1] end function enqueue_tpu_embedding_sparse_batch(sample_indices_, embedding_indices_, aggregation_weights_, mode_override_; name=nothing, N=nothing, device_ordinal=nothing, combiners=nothing) - if tf.eager_mode + if tf.in_eager_mode() enqueue_tpu_embedding_sparse_batch_eager(sample_indices_, embedding_indices_, aggregation_weights_, mode_override_; name=name, N=N, device_ordinal=device_ordinal, combiners=combiners) else enqueue_tpu_embedding_sparse_batch_graph(sample_indices_, embedding_indices_, aggregation_weights_, mode_override_; name=name, N=N, device_ordinal=device_ordinal, combiners=combiners) @@ -47352,7 +47352,7 @@ begin return res[1] end function queue_close_v2(handle_; name=nothing, cancel_pending_enqueues=nothing) - if tf.eager_mode + if tf.in_eager_mode() queue_close_v2_eager(handle_; name=name, cancel_pending_enqueues=cancel_pending_enqueues) else queue_close_v2_graph(handle_; name=name, cancel_pending_enqueues=cancel_pending_enqueues) @@ -47402,7 +47402,7 @@ begin return res[1] end function tensor_array_pack(handle_, flow_in_; name=nothing, dtype=nothing, element_shape=nothing) - if tf.eager_mode + if tf.in_eager_mode() tensor_array_pack_eager(handle_, flow_in_; name=name, dtype=dtype, element_shape=element_shape) else tensor_array_pack_graph(handle_, flow_in_; name=name, dtype=dtype, element_shape=element_shape) @@ -47440,7 +47440,7 @@ begin return res[1] end function reader_restore_state(reader_handle_, state_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() reader_restore_state_eager(reader_handle_, state_; name=name) else reader_restore_state_graph(reader_handle_, state_; name=name) @@ -47534,7 +47534,7 @@ begin return res[1] end function _fused_conv2d(input_, filter_, args_; name=nothing, num_args=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing, use_cudnn_on_gpu=nothing, fused_ops=nothing, epsilon=nothing) - if tf.eager_mode + if tf.in_eager_mode() _fused_conv2d_eager(input_, filter_, args_; name=name, num_args=num_args, strides=strides, padding=padding, data_format=data_format, dilations=dilations, use_cudnn_on_gpu=use_cudnn_on_gpu, fused_ops=fused_ops, epsilon=epsilon) else _fused_conv2d_graph(input_, filter_, args_; name=name, num_args=num_args, strides=strides, padding=padding, data_format=data_format, dilations=dilations, use_cudnn_on_gpu=use_cudnn_on_gpu, fused_ops=fused_ops, epsilon=epsilon) @@ -47580,7 +47580,7 @@ begin return res[1] end function _read_variables_op(resources_; name=nothing, N=nothing, dtypes=nothing) - if tf.eager_mode + if tf.in_eager_mode() _read_variables_op_eager(resources_; name=name, N=N, dtypes=dtypes) else _read_variables_op_graph(resources_; name=name, N=N, dtypes=dtypes) @@ -47646,7 +47646,7 @@ begin return res[1] end function mutable_hash_table_of_tensors(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing) - if tf.eager_mode + if tf.in_eager_mode() mutable_hash_table_of_tensors_eager(; name=name, container=container, shared_name=shared_name, use_node_name_sharing=use_node_name_sharing, key_dtype=key_dtype, value_dtype=value_dtype, value_shape=value_shape) else mutable_hash_table_of_tensors_graph(; name=name, container=container, shared_name=shared_name, use_node_name_sharing=use_node_name_sharing, key_dtype=key_dtype, value_dtype=value_dtype, value_shape=value_shape) @@ -47680,7 +47680,7 @@ begin return res[1] end function read_file(filename_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() read_file_eager(filename_; name=name) else read_file_graph(filename_; name=name) @@ -47750,7 +47750,7 @@ begin return res[1] end function load_tpu_embedding_mdl_adagrad_light_parameters(parameters_, accumulators_, weights_, benefits_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - if tf.eager_mode + if tf.in_eager_mode() load_tpu_embedding_mdl_adagrad_light_parameters_eager(parameters_, accumulators_, weights_, benefits_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) else load_tpu_embedding_mdl_adagrad_light_parameters_graph(parameters_, accumulators_, weights_, benefits_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) @@ -47804,7 +47804,7 @@ begin return res[1] end function fractional_avg_pool_grad(orig_input_tensor_shape_, out_backprop_, row_pooling_sequence_, col_pooling_sequence_; name=nothing, overlapping=nothing) - if tf.eager_mode + if tf.in_eager_mode() fractional_avg_pool_grad_eager(orig_input_tensor_shape_, out_backprop_, row_pooling_sequence_, col_pooling_sequence_; name=name, overlapping=overlapping) else fractional_avg_pool_grad_graph(orig_input_tensor_shape_, out_backprop_, row_pooling_sequence_, col_pooling_sequence_; name=name, overlapping=overlapping) @@ -47870,7 +47870,7 @@ begin return res[1] end function load_tpu_embedding_adagrad_parameters_grad_accum_debug(parameters_, accumulators_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - if tf.eager_mode + if tf.in_eager_mode() load_tpu_embedding_adagrad_parameters_grad_accum_debug_eager(parameters_, accumulators_, gradient_accumulators_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) else load_tpu_embedding_adagrad_parameters_grad_accum_debug_graph(parameters_, accumulators_, gradient_accumulators_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) @@ -47926,7 +47926,7 @@ begin return res[1] end function stateful_standard_normal_v2(resource_, algorithm_, shape_; name=nothing, dtype=nothing, shape_dtype=nothing) - if tf.eager_mode + if tf.in_eager_mode() stateful_standard_normal_v2_eager(resource_, algorithm_, shape_; name=name, dtype=dtype, shape_dtype=shape_dtype) else stateful_standard_normal_v2_graph(resource_, algorithm_, shape_; name=name, dtype=dtype, shape_dtype=shape_dtype) @@ -47970,7 +47970,7 @@ begin return res[1] end function bincount(arr_, size_, weights_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() bincount_eager(arr_, size_, weights_; name=name) else bincount_graph(arr_, size_, weights_; name=name) @@ -48006,7 +48006,7 @@ begin return res[1] end function inv(x_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() inv_eager(x_; name=name) else inv_graph(x_; name=name) @@ -48073,7 +48073,7 @@ begin return res[1] end function apply_proximal_adagrad(var_, accum_, lr_, l1_, l2_, grad_; name=nothing, use_locking=nothing) - if tf.eager_mode + if tf.in_eager_mode() apply_proximal_adagrad_eager(var_, accum_, lr_, l1_, l2_, grad_; name=name, use_locking=use_locking) else apply_proximal_adagrad_graph(var_, accum_, lr_, l1_, l2_, grad_; name=name, use_locking=use_locking) @@ -48122,7 +48122,7 @@ begin return res[1] end function gather_v2(params_, indices_, axis_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() gather_v2_eager(params_, indices_, axis_; name=name) else gather_v2_graph(params_, indices_, axis_; name=name) @@ -48160,7 +48160,7 @@ begin return res[1] end function write_file(filename_, contents_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() write_file_eager(filename_, contents_; name=name) else write_file_graph(filename_, contents_; name=name) @@ -48199,7 +48199,7 @@ begin return res end function boosted_trees_get_ensemble_states(tree_ensemble_handle_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() boosted_trees_get_ensemble_states_eager(tree_ensemble_handle_; name=name) else boosted_trees_get_ensemble_states_graph(tree_ensemble_handle_; name=name) @@ -48252,7 +48252,7 @@ begin return res[1] end function resource_gather(resource_, indices_; name=nothing, validate_indices=nothing, dtype=nothing) - if tf.eager_mode + if tf.in_eager_mode() resource_gather_eager(resource_, indices_; name=name, validate_indices=validate_indices, dtype=dtype) else resource_gather_graph(resource_, indices_; name=name, validate_indices=validate_indices, dtype=dtype) @@ -48313,7 +48313,7 @@ begin return res[1] end function resource_apply_proximal_gradient_descent(var_, alpha_, l1_, l2_, delta_; name=nothing, use_locking=nothing) - if tf.eager_mode + if tf.in_eager_mode() resource_apply_proximal_gradient_descent_eager(var_, alpha_, l1_, l2_, delta_; name=name, use_locking=use_locking) else resource_apply_proximal_gradient_descent_graph(var_, alpha_, l1_, l2_, delta_; name=name, use_locking=use_locking) @@ -48354,7 +48354,7 @@ begin return res[1] end function truncate_mod(x_, y_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() truncate_mod_eager(x_, y_; name=name) else truncate_mod_graph(x_, y_; name=name) @@ -48395,7 +48395,7 @@ begin return res end function log_matrix_determinant(input_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() log_matrix_determinant_eager(input_; name=name) else log_matrix_determinant_graph(input_; name=name) @@ -48433,7 +48433,7 @@ begin return res[1] end function irfft2d(input_, fft_length_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() irfft2d_eager(input_, fft_length_; name=name) else irfft2d_graph(input_, fft_length_; name=name) @@ -48496,7 +48496,7 @@ begin return res end function boosted_trees_training_predict(tree_ensemble_handle_, cached_tree_ids_, cached_node_ids_, bucketized_features_; name=nothing, num_bucketized_features=nothing, logits_dimension=nothing) - if tf.eager_mode + if tf.in_eager_mode() boosted_trees_training_predict_eager(tree_ensemble_handle_, cached_tree_ids_, cached_node_ids_, bucketized_features_; name=name, num_bucketized_features=num_bucketized_features, logits_dimension=logits_dimension) else boosted_trees_training_predict_graph(tree_ensemble_handle_, cached_tree_ids_, cached_node_ids_, bucketized_features_; name=name, num_bucketized_features=num_bucketized_features, logits_dimension=logits_dimension) @@ -48543,7 +48543,7 @@ begin return res end function nearest_neighbors(points_, centers_, k_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() nearest_neighbors_eager(points_, centers_, k_; name=name) else nearest_neighbors_graph(points_, centers_, k_; name=name) @@ -48579,7 +48579,7 @@ begin return res[1] end function floor(x_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() floor_eager(x_; name=name) else floor_graph(x_; name=name) @@ -48645,7 +48645,7 @@ begin return res[1] end function load_tpu_embedding_proximal_adagrad_parameters_grad_accum_debug(parameters_, accumulators_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - if tf.eager_mode + if tf.in_eager_mode() load_tpu_embedding_proximal_adagrad_parameters_grad_accum_debug_eager(parameters_, accumulators_, gradient_accumulators_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) else load_tpu_embedding_proximal_adagrad_parameters_grad_accum_debug_graph(parameters_, accumulators_, gradient_accumulators_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) @@ -48703,7 +48703,7 @@ begin return res[1] end function write_image_summary(writer_, step_, tag_, tensor_, bad_color_; name=nothing, max_images=nothing) - if tf.eager_mode + if tf.in_eager_mode() write_image_summary_eager(writer_, step_, tag_, tensor_, bad_color_; name=name, max_images=max_images) else write_image_summary_graph(writer_, step_, tag_, tensor_, bad_color_; name=name, max_images=max_images) @@ -48743,7 +48743,7 @@ begin return res[1] end function tile_grad(input_, multiples_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() tile_grad_eager(input_, multiples_; name=name) else tile_grad_graph(input_, multiples_; name=name) @@ -48792,7 +48792,7 @@ begin return res end function tensor_array_grad_v3(handle_, flow_in_; name=nothing, source=nothing) - if tf.eager_mode + if tf.in_eager_mode() tensor_array_grad_v3_eager(handle_, flow_in_; name=name, source=source) else tensor_array_grad_v3_graph(handle_, flow_in_; name=name, source=source) @@ -48842,7 +48842,7 @@ begin return res[1] end function enqueue_tpu_embedding_integer_batch(batch_, mode_override_; name=nothing, N=nothing, device_ordinal=nothing) - if tf.eager_mode + if tf.in_eager_mode() enqueue_tpu_embedding_integer_batch_eager(batch_, mode_override_; name=name, N=N, device_ordinal=device_ordinal) else enqueue_tpu_embedding_integer_batch_graph(batch_, mode_override_; name=name, N=N, device_ordinal=device_ordinal) @@ -48921,7 +48921,7 @@ begin return res end function fused_batch_norm(x_, scale_, offset_, mean_, variance_; name=nothing, epsilon=nothing, data_format=nothing, is_training=nothing) - if tf.eager_mode + if tf.in_eager_mode() fused_batch_norm_eager(x_, scale_, offset_, mean_, variance_; name=name, epsilon=epsilon, data_format=data_format, is_training=is_training) else fused_batch_norm_graph(x_, scale_, offset_, mean_, variance_; name=name, epsilon=epsilon, data_format=data_format, is_training=is_training) @@ -48959,7 +48959,7 @@ begin return res[1] end function logical_and(x_, y_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() logical_and_eager(x_, y_; name=name) else logical_and_graph(x_, y_; name=name) @@ -49007,7 +49007,7 @@ begin return res[1] end function tensor_scatter_update(tensor_, indices_, updates_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() tensor_scatter_update_eager(tensor_, indices_, updates_; name=name) else tensor_scatter_update_graph(tensor_, indices_, updates_; name=name) @@ -49055,7 +49055,7 @@ begin return res[1] end function text_line_reader_v2(; name=nothing, skip_header_lines=nothing, container=nothing, shared_name=nothing) - if tf.eager_mode + if tf.in_eager_mode() text_line_reader_v2_eager(; name=name, skip_header_lines=skip_header_lines, container=container, shared_name=shared_name) else text_line_reader_v2_graph(; name=name, skip_header_lines=skip_header_lines, container=container, shared_name=shared_name) @@ -49101,7 +49101,7 @@ begin return res[1] end function tensor_slice_dataset(components_; name=nothing, Toutput_types=nothing, output_shapes=nothing) - if tf.eager_mode + if tf.in_eager_mode() tensor_slice_dataset_eager(components_; name=name, Toutput_types=Toutput_types, output_shapes=output_shapes) else tensor_slice_dataset_graph(components_; name=name, Toutput_types=Toutput_types, output_shapes=output_shapes) @@ -49149,7 +49149,7 @@ begin return res[1] end function tensor_array_scatter_v3(handle_, indices_, value_, flow_in_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() tensor_array_scatter_v3_eager(handle_, indices_, value_, flow_in_; name=name) else tensor_array_scatter_v3_graph(handle_, indices_, value_, flow_in_; name=name) @@ -49195,7 +49195,7 @@ begin return res[1] end function resize_nearest_neighbor_grad(grads_, size_; name=nothing, align_corners=nothing) - if tf.eager_mode + if tf.in_eager_mode() resize_nearest_neighbor_grad_eager(grads_, size_; name=name, align_corners=align_corners) else resize_nearest_neighbor_grad_graph(grads_, size_; name=name, align_corners=align_corners) @@ -49267,7 +49267,7 @@ begin return res[1] end function apply_power_sign(var_, m_, lr_, logbase_, sign_decay_, beta_, grad_; name=nothing, use_locking=nothing) - if tf.eager_mode + if tf.in_eager_mode() apply_power_sign_eager(var_, m_, lr_, logbase_, sign_decay_, beta_, grad_; name=name, use_locking=use_locking) else apply_power_sign_graph(var_, m_, lr_, logbase_, sign_decay_, beta_, grad_; name=name, use_locking=use_locking) @@ -49317,7 +49317,7 @@ begin return res[1] end function experimental_rebatch_dataset(input_dataset_, num_workers_; name=nothing, output_types=nothing, output_shapes=nothing) - if tf.eager_mode + if tf.in_eager_mode() experimental_rebatch_dataset_eager(input_dataset_, num_workers_; name=name, output_types=output_types, output_shapes=output_shapes) else experimental_rebatch_dataset_graph(input_dataset_, num_workers_; name=name, output_types=output_types, output_shapes=output_shapes) @@ -49365,7 +49365,7 @@ begin return res[1] end function mirror_pad(input_, paddings_; name=nothing, mode=nothing) - if tf.eager_mode + if tf.in_eager_mode() mirror_pad_eager(input_, paddings_; name=name, mode=mode) else mirror_pad_graph(input_, paddings_; name=name, mode=mode) @@ -49399,7 +49399,7 @@ begin return res[1] end function logical_not(x_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() logical_not_eager(x_; name=name) else logical_not_graph(x_; name=name) @@ -49433,7 +49433,7 @@ begin return res[1] end function batch_ifft(input_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() batch_ifft_eager(input_; name=name) else batch_ifft_graph(input_; name=name) @@ -49488,7 +49488,7 @@ begin return res end function tensor_array_concat_v2(handle_, flow_in_; name=nothing, dtype=nothing, element_shape_except0=nothing) - if tf.eager_mode + if tf.in_eager_mode() tensor_array_concat_v2_eager(handle_, flow_in_; name=name, dtype=dtype, element_shape_except0=element_shape_except0) else tensor_array_concat_v2_graph(handle_, flow_in_; name=name, dtype=dtype, element_shape_except0=element_shape_except0) @@ -49537,7 +49537,7 @@ begin return res[1] end function sum(input_, reduction_indices_; name=nothing, keep_dims=nothing) - if tf.eager_mode + if tf.in_eager_mode() sum_eager(input_, reduction_indices_; name=name, keep_dims=keep_dims) else sum_graph(input_, reduction_indices_; name=name, keep_dims=keep_dims) @@ -49587,7 +49587,7 @@ begin return res[1] end function boosted_trees_predict(tree_ensemble_handle_, bucketized_features_; name=nothing, num_bucketized_features=nothing, logits_dimension=nothing) - if tf.eager_mode + if tf.in_eager_mode() boosted_trees_predict_eager(tree_ensemble_handle_, bucketized_features_; name=name, num_bucketized_features=num_bucketized_features, logits_dimension=logits_dimension) else boosted_trees_predict_graph(tree_ensemble_handle_, bucketized_features_; name=name, num_bucketized_features=num_bucketized_features, logits_dimension=logits_dimension) @@ -49688,7 +49688,7 @@ begin return res end function quantized_conv2d_with_bias_and_relu_and_requantize(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) - if tf.eager_mode + if tf.in_eager_mode() quantized_conv2d_with_bias_and_relu_and_requantize_eager(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_; name=name, out_type=out_type, strides=strides, padding=padding, dilations=dilations) else quantized_conv2d_with_bias_and_relu_and_requantize_graph(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_; name=name, out_type=out_type, strides=strides, padding=padding, dilations=dilations) @@ -49756,7 +49756,7 @@ begin return res[1] end function resource_sparse_apply_adagrad(var_, accum_, lr_, grad_, indices_; name=nothing, use_locking=nothing, update_slots=nothing) - if tf.eager_mode + if tf.in_eager_mode() resource_sparse_apply_adagrad_eager(var_, accum_, lr_, grad_, indices_; name=name, use_locking=use_locking, update_slots=update_slots) else resource_sparse_apply_adagrad_graph(var_, accum_, lr_, grad_, indices_; name=name, use_locking=use_locking, update_slots=update_slots) @@ -49803,7 +49803,7 @@ begin return res[1] end function leaky_relu_grad(gradients_, features_; name=nothing, alpha=nothing) - if tf.eager_mode + if tf.in_eager_mode() leaky_relu_grad_eager(gradients_, features_; name=name, alpha=alpha) else leaky_relu_grad_graph(gradients_, features_; name=name, alpha=alpha) @@ -49845,7 +49845,7 @@ begin return res[1] end function _device_retval(input_; name=nothing, index=nothing) - if tf.eager_mode + if tf.in_eager_mode() _device_retval_eager(input_; name=name, index=index) else _device_retval_graph(input_; name=name, index=index) @@ -49887,7 +49887,7 @@ begin return res[1] end function pad(input_, paddings_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() pad_eager(input_, paddings_; name=name) else pad_graph(input_, paddings_; name=name) @@ -49943,7 +49943,7 @@ begin return res[1] end function add_many_sparse_to_tensors_map(sparse_indices_, sparse_values_, sparse_shape_; name=nothing, container=nothing, shared_name=nothing) - if tf.eager_mode + if tf.in_eager_mode() add_many_sparse_to_tensors_map_eager(sparse_indices_, sparse_values_, sparse_shape_; name=name, container=container, shared_name=shared_name) else add_many_sparse_to_tensors_map_graph(sparse_indices_, sparse_values_, sparse_shape_; name=name, container=container, shared_name=shared_name) @@ -49992,7 +49992,7 @@ begin return res end function sparse_reorder(input_indices_, input_values_, input_shape_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() sparse_reorder_eager(input_indices_, input_values_, input_shape_; name=name) else sparse_reorder_graph(input_indices_, input_values_, input_shape_; name=name) @@ -50033,7 +50033,7 @@ begin return res[1] end function bitwise_xor(x_, y_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() bitwise_xor_eager(x_, y_; name=name) else bitwise_xor_graph(x_, y_; name=name) @@ -50074,7 +50074,7 @@ begin return res[1] end function batch_matrix_set_diag(input_, diagonal_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() batch_matrix_set_diag_eager(input_, diagonal_; name=name) else batch_matrix_set_diag_graph(input_, diagonal_; name=name) @@ -50120,7 +50120,7 @@ begin return res[1] end function lookup_table_insert_v2(table_handle_, keys_, values_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() lookup_table_insert_v2_eager(table_handle_, keys_, values_; name=name) else lookup_table_insert_v2_graph(table_handle_, keys_, values_; name=name) @@ -50174,7 +50174,7 @@ begin return res[1] end function experimental_dense_to_sparse_batch_dataset(input_dataset_, batch_size_, row_shape_; name=nothing, output_types=nothing, output_shapes=nothing) - if tf.eager_mode + if tf.in_eager_mode() experimental_dense_to_sparse_batch_dataset_eager(input_dataset_, batch_size_, row_shape_; name=name, output_types=output_types, output_shapes=output_shapes) else experimental_dense_to_sparse_batch_dataset_graph(input_dataset_, batch_size_, row_shape_; name=name, output_types=output_types, output_shapes=output_shapes) @@ -50255,7 +50255,7 @@ begin return res[1] end function resource_sparse_apply_rms_prop(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) - if tf.eager_mode + if tf.in_eager_mode() resource_sparse_apply_rms_prop_eager(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=name, use_locking=use_locking) else resource_sparse_apply_rms_prop_graph(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=name, use_locking=use_locking) @@ -50307,7 +50307,7 @@ begin return res[1] end function random_crop(image_, size_; name=nothing, seed=nothing, seed2=nothing) - if tf.eager_mode + if tf.in_eager_mode() random_crop_eager(image_, size_; name=name, seed=seed, seed2=seed2) else random_crop_graph(image_, size_; name=name, seed=seed, seed2=seed2) @@ -50353,7 +50353,7 @@ begin return res[1] end function lookup_table_import_v2(table_handle_, keys_, values_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() lookup_table_import_v2_eager(table_handle_, keys_, values_; name=name) else lookup_table_import_v2_graph(table_handle_, keys_, values_; name=name) @@ -50406,7 +50406,7 @@ begin return res[1] end function resource_scatter_nd_update(ref_, indices_, updates_; name=nothing, use_locking=nothing) - if tf.eager_mode + if tf.in_eager_mode() resource_scatter_nd_update_eager(ref_, indices_, updates_; name=name, use_locking=use_locking) else resource_scatter_nd_update_graph(ref_, indices_, updates_; name=name, use_locking=use_locking) @@ -50446,7 +50446,7 @@ begin return res[1] end function static_regex_full_match(input_; name=nothing, pattern=nothing) - if tf.eager_mode + if tf.in_eager_mode() static_regex_full_match_eager(input_; name=name, pattern=pattern) else static_regex_full_match_graph(input_; name=name, pattern=pattern) @@ -50480,7 +50480,7 @@ begin return res[1] end function gcs_configure_credentials(json_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() gcs_configure_credentials_eager(json_; name=name) else gcs_configure_credentials_graph(json_; name=name) @@ -50518,7 +50518,7 @@ begin return res[1] end function tensor_array_size_v3(handle_, flow_in_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() tensor_array_size_v3_eager(handle_, flow_in_; name=name) else tensor_array_size_v3_graph(handle_, flow_in_; name=name) @@ -50571,7 +50571,7 @@ begin return res[1] end function sparse_segment_sqrt_n_with_num_segments(data_, indices_, segment_ids_, num_segments_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() sparse_segment_sqrt_n_with_num_segments_eager(data_, indices_, segment_ids_, num_segments_; name=name) else sparse_segment_sqrt_n_with_num_segments_graph(data_, indices_, segment_ids_, num_segments_; name=name) @@ -50681,7 +50681,7 @@ begin return res[1] end function experimental_group_by_reducer_dataset(input_dataset_, key_func_other_arguments_, init_func_other_arguments_, reduce_func_other_arguments_, finalize_func_other_arguments_; name=nothing, key_func=nothing, init_func=nothing, reduce_func=nothing, finalize_func=nothing, Tkey_func_other_arguments=nothing, Tinit_func_other_arguments=nothing, Treduce_func_other_arguments=nothing, Tfinalize_func_other_arguments=nothing, output_types=nothing, output_shapes=nothing) - if tf.eager_mode + if tf.in_eager_mode() experimental_group_by_reducer_dataset_eager(input_dataset_, key_func_other_arguments_, init_func_other_arguments_, reduce_func_other_arguments_, finalize_func_other_arguments_; name=name, key_func=key_func, init_func=init_func, reduce_func=reduce_func, finalize_func=finalize_func, Tkey_func_other_arguments=Tkey_func_other_arguments, Tinit_func_other_arguments=Tinit_func_other_arguments, Treduce_func_other_arguments=Treduce_func_other_arguments, Tfinalize_func_other_arguments=Tfinalize_func_other_arguments, output_types=output_types, output_shapes=output_shapes) else experimental_group_by_reducer_dataset_graph(input_dataset_, key_func_other_arguments_, init_func_other_arguments_, reduce_func_other_arguments_, finalize_func_other_arguments_; name=name, key_func=key_func, init_func=init_func, reduce_func=reduce_func, finalize_func=finalize_func, Tkey_func_other_arguments=Tkey_func_other_arguments, Tinit_func_other_arguments=Tinit_func_other_arguments, Treduce_func_other_arguments=Treduce_func_other_arguments, Tfinalize_func_other_arguments=Tfinalize_func_other_arguments, output_types=output_types, output_shapes=output_shapes) @@ -50762,7 +50762,7 @@ begin return res[1] end function conv2d_backprop_filter(input_, filter_sizes_, out_backprop_; name=nothing, strides=nothing, use_cudnn_on_gpu=nothing, padding=nothing, explicit_paddings=nothing, data_format=nothing, dilations=nothing) - if tf.eager_mode + if tf.in_eager_mode() conv2d_backprop_filter_eager(input_, filter_sizes_, out_backprop_; name=name, strides=strides, use_cudnn_on_gpu=use_cudnn_on_gpu, padding=padding, explicit_paddings=explicit_paddings, data_format=data_format, dilations=dilations) else conv2d_backprop_filter_graph(input_, filter_sizes_, out_backprop_; name=name, strides=strides, use_cudnn_on_gpu=use_cudnn_on_gpu, padding=padding, explicit_paddings=explicit_paddings, data_format=data_format, dilations=dilations) @@ -50832,7 +50832,7 @@ begin return res[1] end function max_pool_grad(orig_input_, orig_output_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) - if tf.eager_mode + if tf.in_eager_mode() max_pool_grad_eager(orig_input_, orig_output_, grad_; name=name, ksize=ksize, strides=strides, padding=padding, data_format=data_format) else max_pool_grad_graph(orig_input_, orig_output_, grad_; name=name, ksize=ksize, strides=strides, padding=padding, data_format=data_format) @@ -50866,7 +50866,7 @@ begin return res[1] end function _initialize_host_for_distributed_tpu(input_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() _initialize_host_for_distributed_tpu_eager(input_; name=name) else _initialize_host_for_distributed_tpu_graph(input_; name=name) @@ -50930,7 +50930,7 @@ begin return res[1] end function stage_peek(index_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) - if tf.eager_mode + if tf.in_eager_mode() stage_peek_eager(index_; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) else stage_peek_graph(index_; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) @@ -50977,7 +50977,7 @@ begin return res[1] end function pad_v2(input_, paddings_, constant_values_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() pad_v2_eager(input_, paddings_, constant_values_; name=name) else pad_v2_graph(input_, paddings_, constant_values_; name=name) @@ -51019,7 +51019,7 @@ begin return res[1] end function _parallel_concat_start(; name=nothing, shape=nothing, dtype=nothing) - if tf.eager_mode + if tf.in_eager_mode() _parallel_concat_start_eager(; name=name, shape=shape, dtype=dtype) else _parallel_concat_start_graph(; name=name, shape=shape, dtype=dtype) @@ -51059,7 +51059,7 @@ begin return res[1] end function print_v2(input_; name=nothing, output_stream=nothing) - if tf.eager_mode + if tf.in_eager_mode() print_v2_eager(input_; name=name, output_stream=output_stream) else print_v2_graph(input_; name=name, output_stream=output_stream) @@ -51105,7 +51105,7 @@ begin return res[1] end function optional_get_value(optional_; name=nothing, output_types=nothing, output_shapes=nothing) - if tf.eager_mode + if tf.in_eager_mode() optional_get_value_eager(optional_; name=name, output_types=output_types, output_shapes=output_shapes) else optional_get_value_graph(optional_; name=name, output_types=output_types, output_shapes=output_shapes) @@ -51171,7 +51171,7 @@ begin return res[1] end function load_tpu_embedding_ftrl_parameters(parameters_, accumulators_, linears_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - if tf.eager_mode + if tf.in_eager_mode() load_tpu_embedding_ftrl_parameters_eager(parameters_, accumulators_, linears_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) else load_tpu_embedding_ftrl_parameters_graph(parameters_, accumulators_, linears_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) @@ -51228,7 +51228,7 @@ begin return res end function sparse_slice(indices_, values_, shape_, start_, size_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() sparse_slice_eager(indices_, values_, shape_, start_, size_; name=name) else sparse_slice_graph(indices_, values_, shape_, start_, size_; name=name) @@ -51281,7 +51281,7 @@ begin return res end function boosted_trees_make_quantile_summaries(float_values_, example_weights_, epsilon_; name=nothing, num_features=nothing) - if tf.eager_mode + if tf.in_eager_mode() boosted_trees_make_quantile_summaries_eager(float_values_, example_weights_, epsilon_; name=name, num_features=num_features) else boosted_trees_make_quantile_summaries_graph(float_values_, example_weights_, epsilon_; name=name, num_features=num_features) @@ -51328,7 +51328,7 @@ begin return res[1] end function matrix_solve(matrix_, rhs_; name=nothing, adjoint=nothing) - if tf.eager_mode + if tf.in_eager_mode() matrix_solve_eager(matrix_, rhs_; name=name, adjoint=adjoint) else matrix_solve_graph(matrix_, rhs_; name=name, adjoint=adjoint) @@ -51368,7 +51368,7 @@ begin return res[1] end function _configure_distributed_tpu(inputs_; name=nothing, N=nothing) - if tf.eager_mode + if tf.in_eager_mode() _configure_distributed_tpu_eager(inputs_; name=name, N=N) else _configure_distributed_tpu_graph(inputs_; name=name, N=N) @@ -51408,7 +51408,7 @@ begin return res[1] end function adjust_contrastv2(images_, contrast_factor_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() adjust_contrastv2_eager(images_, contrast_factor_; name=name) else adjust_contrastv2_graph(images_, contrast_factor_; name=name) @@ -51462,7 +51462,7 @@ begin return res end function _mkl_maximum(x_, y_, mkl_x_, mkl_y_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() _mkl_maximum_eager(x_, y_, mkl_x_, mkl_y_; name=name) else _mkl_maximum_graph(x_, y_, mkl_x_, mkl_y_; name=name) @@ -51546,7 +51546,7 @@ begin return res[1] end function cudnn_rnn_params_size(num_layers_, num_units_, input_size_; name=nothing, S=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) - if tf.eager_mode + if tf.in_eager_mode() cudnn_rnn_params_size_eager(num_layers_, num_units_, input_size_; name=name, S=S, rnn_mode=rnn_mode, input_mode=input_mode, direction=direction, dropout=dropout, seed=seed, seed2=seed2) else cudnn_rnn_params_size_graph(num_layers_, num_units_, input_size_; name=name, S=S, rnn_mode=rnn_mode, input_mode=input_mode, direction=direction, dropout=dropout, seed=seed, seed2=seed2) @@ -51590,7 +51590,7 @@ begin return res[1] end function boosted_trees_quantile_stream_resource_add_summaries(quantile_stream_resource_handle_, summaries_; name=nothing, num_features=nothing) - if tf.eager_mode + if tf.in_eager_mode() boosted_trees_quantile_stream_resource_add_summaries_eager(quantile_stream_resource_handle_, summaries_; name=name, num_features=num_features) else boosted_trees_quantile_stream_resource_add_summaries_graph(quantile_stream_resource_handle_, summaries_; name=name, num_features=num_features) @@ -51624,7 +51624,7 @@ begin return res[1] end function batch_ifft3d(input_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() batch_ifft3d_eager(input_; name=name) else batch_ifft3d_graph(input_; name=name) @@ -51660,7 +51660,7 @@ begin return res[1] end function sigmoid(x_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() sigmoid_eager(x_; name=name) else sigmoid_graph(x_; name=name) @@ -51703,7 +51703,7 @@ begin return res[1] end function segment_mean(data_, segment_ids_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() segment_mean_eager(data_, segment_ids_; name=name) else segment_mean_graph(data_, segment_ids_; name=name) @@ -51737,7 +51737,7 @@ begin return res[1] end function is_boosted_trees_ensemble_initialized(tree_ensemble_handle_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() is_boosted_trees_ensemble_initialized_eager(tree_ensemble_handle_; name=name) else is_boosted_trees_ensemble_initialized_graph(tree_ensemble_handle_; name=name) @@ -51775,7 +51775,7 @@ begin return res[1] end function tensor_array_size_v2(handle_, flow_in_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() tensor_array_size_v2_eager(handle_, flow_in_; name=name) else tensor_array_size_v2_graph(handle_, flow_in_; name=name) @@ -51829,7 +51829,7 @@ begin return res end function _mkl_sub(x_, y_, mkl_x_, mkl_y_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() _mkl_sub_eager(x_, y_, mkl_x_, mkl_y_; name=name) else _mkl_sub_graph(x_, y_, mkl_x_, mkl_y_; name=name) @@ -51885,7 +51885,7 @@ begin return res[1] end function send_tpu_embedding_gradients(inputs_, learning_rates_; name=nothing, N=nothing, NN=nothing, config=nothing) - if tf.eager_mode + if tf.in_eager_mode() send_tpu_embedding_gradients_eager(inputs_, learning_rates_; name=name, N=N, NN=NN, config=config) else send_tpu_embedding_gradients_graph(inputs_, learning_rates_; name=name, N=N, NN=NN, config=config) @@ -51945,7 +51945,7 @@ begin return res[1] end function max_pool3d(input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) - if tf.eager_mode + if tf.in_eager_mode() max_pool3d_eager(input_; name=name, ksize=ksize, strides=strides, padding=padding, data_format=data_format) else max_pool3d_graph(input_; name=name, ksize=ksize, strides=strides, padding=padding, data_format=data_format) @@ -51994,7 +51994,7 @@ begin return res[1] end function prod(input_, reduction_indices_; name=nothing, keep_dims=nothing) - if tf.eager_mode + if tf.in_eager_mode() prod_eager(input_, reduction_indices_; name=name, keep_dims=keep_dims) else prod_graph(input_, reduction_indices_; name=name, keep_dims=keep_dims) @@ -52028,7 +52028,7 @@ begin return res[1] end function experimental_identity_indexed_dataset(size_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() experimental_identity_indexed_dataset_eager(size_; name=name) else experimental_identity_indexed_dataset_graph(size_; name=name) @@ -52074,7 +52074,7 @@ begin return res[1] end function tensor_list_push_back(input_handle_, tensor_; name=nothing, element_dtype=nothing) - if tf.eager_mode + if tf.in_eager_mode() tensor_list_push_back_eager(input_handle_, tensor_; name=name, element_dtype=element_dtype) else tensor_list_push_back_graph(input_handle_, tensor_; name=name, element_dtype=element_dtype) @@ -52184,7 +52184,7 @@ begin return res[1] end function batch_function(in_tensors_, captured_tensors_; name=nothing, f=nothing, num_batch_threads=nothing, max_batch_size=nothing, batch_timeout_micros=nothing, max_enqueued_batches=nothing, allowed_batch_sizes=nothing, container=nothing, shared_name=nothing, batching_queue=nothing, Tin=nothing, Tcaptured=nothing, Tout=nothing) - if tf.eager_mode + if tf.in_eager_mode() batch_function_eager(in_tensors_, captured_tensors_; name=name, f=f, num_batch_threads=num_batch_threads, max_batch_size=max_batch_size, batch_timeout_micros=batch_timeout_micros, max_enqueued_batches=max_enqueued_batches, allowed_batch_sizes=allowed_batch_sizes, container=container, shared_name=shared_name, batching_queue=batching_queue, Tin=Tin, Tcaptured=Tcaptured, Tout=Tout) else batch_function_graph(in_tensors_, captured_tensors_; name=name, f=f, num_batch_threads=num_batch_threads, max_batch_size=max_batch_size, batch_timeout_micros=batch_timeout_micros, max_enqueued_batches=max_enqueued_batches, allowed_batch_sizes=allowed_batch_sizes, container=container, shared_name=shared_name, batching_queue=batching_queue, Tin=Tin, Tcaptured=Tcaptured, Tout=Tout) @@ -52238,7 +52238,7 @@ begin return res end function sparse_fill_empty_rows(indices_, values_, dense_shape_, default_value_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() sparse_fill_empty_rows_eager(indices_, values_, dense_shape_, default_value_; name=name) else sparse_fill_empty_rows_graph(indices_, values_, dense_shape_, default_value_; name=name) @@ -52285,7 +52285,7 @@ begin return res end function self_adjoint_eig_v2(input_; name=nothing, compute_v=nothing) - if tf.eager_mode + if tf.in_eager_mode() self_adjoint_eig_v2_eager(input_; name=name, compute_v=compute_v) else self_adjoint_eig_v2_graph(input_; name=name, compute_v=compute_v) @@ -52344,7 +52344,7 @@ begin return res end function retrieve_tpu_embedding_ftrl_parameters(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - if tf.eager_mode + if tf.in_eager_mode() retrieve_tpu_embedding_ftrl_parameters_eager(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) else retrieve_tpu_embedding_ftrl_parameters_graph(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) @@ -52424,7 +52424,7 @@ begin return res[1] end function resource_sparse_apply_adagrad_da(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, indices_, lr_, l1_, l2_, global_step_; name=nothing, use_locking=nothing) - if tf.eager_mode + if tf.in_eager_mode() resource_sparse_apply_adagrad_da_eager(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, indices_, lr_, l1_, l2_, global_step_; name=name, use_locking=use_locking) else resource_sparse_apply_adagrad_da_graph(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, indices_, lr_, l1_, l2_, global_step_; name=name, use_locking=use_locking) @@ -52472,7 +52472,7 @@ begin return res[1] end function temporary_variable(; name=nothing, shape=nothing, dtype=nothing, var_name=nothing) - if tf.eager_mode + if tf.in_eager_mode() temporary_variable_eager(; name=name, shape=shape, dtype=dtype, var_name=var_name) else temporary_variable_graph(; name=name, shape=shape, dtype=dtype, var_name=var_name) @@ -52542,7 +52542,7 @@ begin return res[1] end function resource_apply_add_sign(var_, m_, lr_, alpha_, sign_decay_, beta_, grad_; name=nothing, use_locking=nothing) - if tf.eager_mode + if tf.in_eager_mode() resource_apply_add_sign_eager(var_, m_, lr_, alpha_, sign_decay_, beta_, grad_; name=name, use_locking=use_locking) else resource_apply_add_sign_graph(var_, m_, lr_, alpha_, sign_decay_, beta_, grad_; name=name, use_locking=use_locking) @@ -52590,7 +52590,7 @@ begin return res[1] end function roll(input_, shift_, axis_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() roll_eager(input_, shift_, axis_; name=name) else roll_graph(input_, shift_, axis_; name=name) @@ -52631,7 +52631,7 @@ begin return res[1] end function xdivy(x_, y_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() xdivy_eager(x_, y_; name=name) else xdivy_graph(x_, y_; name=name) @@ -52701,7 +52701,7 @@ begin return res[1] end function max_pool3d_grad_grad(orig_input_, orig_output_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) - if tf.eager_mode + if tf.in_eager_mode() max_pool3d_grad_grad_eager(orig_input_, orig_output_, grad_; name=name, ksize=ksize, strides=strides, padding=padding, data_format=data_format) else max_pool3d_grad_grad_graph(orig_input_, orig_output_, grad_; name=name, ksize=ksize, strides=strides, padding=padding, data_format=data_format) @@ -52761,7 +52761,7 @@ begin return res[1] end function crop_and_resize(image_, boxes_, box_ind_, crop_size_; name=nothing, method=nothing, extrapolation_value=nothing) - if tf.eager_mode + if tf.in_eager_mode() crop_and_resize_eager(image_, boxes_, box_ind_, crop_size_; name=name, method=method, extrapolation_value=extrapolation_value) else crop_and_resize_graph(image_, boxes_, box_ind_, crop_size_; name=name, method=method, extrapolation_value=extrapolation_value) @@ -52830,7 +52830,7 @@ begin return res end function quantized_bias_add(input_, bias_, min_input_, max_input_, min_bias_, max_bias_; name=nothing, out_type=nothing) - if tf.eager_mode + if tf.in_eager_mode() quantized_bias_add_eager(input_, bias_, min_input_, max_input_, min_bias_, max_bias_; name=name, out_type=out_type) else quantized_bias_add_graph(input_, bias_, min_input_, max_input_, min_bias_, max_bias_; name=name, out_type=out_type) @@ -52868,7 +52868,7 @@ begin return res[1] end function kmc2chain_initialization(distances_, seed_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() kmc2chain_initialization_eager(distances_, seed_; name=name) else kmc2chain_initialization_graph(distances_, seed_; name=name) @@ -52937,7 +52937,7 @@ begin return res end function map_unstage_no_key(indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) - if tf.eager_mode + if tf.in_eager_mode() map_unstage_no_key_eager(indices_; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) else map_unstage_no_key_graph(indices_; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) @@ -52991,7 +52991,7 @@ begin return res[1] end function scatter_nd_sub(ref_, indices_, updates_; name=nothing, use_locking=nothing) - if tf.eager_mode + if tf.in_eager_mode() scatter_nd_sub_eager(ref_, indices_, updates_; name=name, use_locking=use_locking) else scatter_nd_sub_graph(ref_, indices_, updates_; name=name, use_locking=use_locking) @@ -53037,7 +53037,7 @@ begin return res[1] end function resize_bilinear(images_, size_; name=nothing, align_corners=nothing) - if tf.eager_mode + if tf.in_eager_mode() resize_bilinear_eager(images_, size_; name=name, align_corners=align_corners) else resize_bilinear_graph(images_, size_; name=name, align_corners=align_corners) @@ -53105,7 +53105,7 @@ begin return res[1] end function ordered_map_peek(key_, indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) - if tf.eager_mode + if tf.in_eager_mode() ordered_map_peek_eager(key_, indices_; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) else ordered_map_peek_graph(key_, indices_; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) @@ -53169,7 +53169,7 @@ begin return res[1] end function tensor_array(size_; name=nothing, dtype=nothing, dynamic_size=nothing, clear_after_read=nothing, tensor_array_name=nothing, element_shape=nothing) - if tf.eager_mode + if tf.in_eager_mode() tensor_array_eager(size_; name=name, dtype=dtype, dynamic_size=dynamic_size, clear_after_read=clear_after_read, tensor_array_name=tensor_array_name, element_shape=element_shape) else tensor_array_graph(size_; name=name, dtype=dtype, dynamic_size=dynamic_size, clear_after_read=clear_after_read, tensor_array_name=tensor_array_name, element_shape=element_shape) @@ -53214,7 +53214,7 @@ begin return res[1] end function inplace_sub(x_, i_, v_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() inplace_sub_eager(x_, i_, v_; name=name) else inplace_sub_graph(x_, i_, v_; name=name) @@ -53255,7 +53255,7 @@ begin return res[1] end function pow(x_, y_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() pow_eager(x_, y_; name=name) else pow_graph(x_, y_; name=name) @@ -53307,7 +53307,7 @@ begin return res[1] end function stateful_standard_normal(resource_, shape_; name=nothing, dtype=nothing, shape_dtype=nothing) - if tf.eager_mode + if tf.in_eager_mode() stateful_standard_normal_eager(resource_, shape_; name=name, dtype=dtype, shape_dtype=shape_dtype) else stateful_standard_normal_graph(resource_, shape_; name=name, dtype=dtype, shape_dtype=shape_dtype) @@ -53343,7 +53343,7 @@ begin return res[1] end function ref_next_iteration(data_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() ref_next_iteration_eager(data_; name=name) else ref_next_iteration_graph(data_; name=name) @@ -53383,7 +53383,7 @@ begin return res[1] end function scalar_summary(tags_, values_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() scalar_summary_eager(tags_, values_; name=name) else scalar_summary_graph(tags_, values_; name=name) @@ -53432,7 +53432,7 @@ begin return res end function string_split_v2(input_, sep_; name=nothing, maxsplit=nothing) - if tf.eager_mode + if tf.in_eager_mode() string_split_v2_eager(input_, sep_; name=name, maxsplit=maxsplit) else string_split_v2_graph(input_, sep_; name=name, maxsplit=maxsplit) @@ -53468,7 +53468,7 @@ begin return res[1] end function bessel_i0e(x_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() bessel_i0e_eager(x_; name=name) else bessel_i0e_graph(x_; name=name) @@ -53515,7 +53515,7 @@ begin return res end function unique(x_; name=nothing, out_idx=nothing) - if tf.eager_mode + if tf.in_eager_mode() unique_eager(x_; name=name, out_idx=out_idx) else unique_graph(x_; name=name, out_idx=out_idx) @@ -53581,7 +53581,7 @@ begin return res[1] end function load_tpu_embedding_rms_prop_parameters(parameters_, ms_, mom_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - if tf.eager_mode + if tf.in_eager_mode() load_tpu_embedding_rms_prop_parameters_eager(parameters_, ms_, mom_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) else load_tpu_embedding_rms_prop_parameters_graph(parameters_, ms_, mom_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) @@ -53623,7 +53623,7 @@ begin return res[1] end function whole_file_reader_v2(; name=nothing, container=nothing, shared_name=nothing) - if tf.eager_mode + if tf.in_eager_mode() whole_file_reader_v2_eager(; name=name, container=container, shared_name=shared_name) else whole_file_reader_v2_graph(; name=name, container=container, shared_name=shared_name) @@ -53675,7 +53675,7 @@ begin return res[1] end function eager_py_func(input_; name=nothing, token=nothing, Tin=nothing, Tout=nothing) - if tf.eager_mode + if tf.in_eager_mode() eager_py_func_eager(input_; name=name, token=token, Tin=Tin, Tout=Tout) else eager_py_func_graph(input_; name=name, token=token, Tin=Tin, Tout=Tout) @@ -53711,7 +53711,7 @@ begin return res[1] end function next_iteration(data_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() next_iteration_eager(data_; name=name) else next_iteration_graph(data_; name=name) @@ -53773,7 +53773,7 @@ begin return res[1] end function case(branch_index_, input_; name=nothing, Tin=nothing, Tout=nothing, branches=nothing, output_shapes=nothing) - if tf.eager_mode + if tf.in_eager_mode() case_eager(branch_index_, input_; name=name, Tin=Tin, Tout=Tout, branches=branches, output_shapes=output_shapes) else case_graph(branch_index_, input_; name=name, Tin=Tin, Tout=Tout, branches=branches, output_shapes=output_shapes) @@ -53821,7 +53821,7 @@ begin return res[1] end function tensor_scatter_sub(tensor_, indices_, updates_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() tensor_scatter_sub_eager(tensor_, indices_, updates_; name=name) else tensor_scatter_sub_graph(tensor_, indices_, updates_; name=name) @@ -53875,7 +53875,7 @@ begin return res[1] end function scatter_max(ref_, indices_, updates_; name=nothing, use_locking=nothing) - if tf.eager_mode + if tf.in_eager_mode() scatter_max_eager(ref_, indices_, updates_; name=name, use_locking=use_locking) else scatter_max_graph(ref_, indices_, updates_; name=name, use_locking=use_locking) @@ -53911,7 +53911,7 @@ begin return res[1] end function sqrt(x_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() sqrt_eager(x_; name=name) else sqrt_graph(x_; name=name) @@ -53955,7 +53955,7 @@ begin return res[1] end function accumulator_take_gradient(handle_, num_required_; name=nothing, dtype=nothing) - if tf.eager_mode + if tf.in_eager_mode() accumulator_take_gradient_eager(handle_, num_required_; name=name, dtype=dtype) else accumulator_take_gradient_graph(handle_, num_required_; name=name, dtype=dtype) @@ -54009,7 +54009,7 @@ begin return res end function _mkl_add(x_, y_, mkl_x_, mkl_y_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() _mkl_add_eager(x_, y_, mkl_x_, mkl_y_; name=name) else _mkl_add_graph(x_, y_, mkl_x_, mkl_y_; name=name) @@ -54045,7 +54045,7 @@ begin return res[1] end function reciprocal(x_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() reciprocal_eager(x_; name=name) else reciprocal_graph(x_; name=name) @@ -54085,7 +54085,7 @@ begin return res[1] end function outfeed_enqueue_tuple(inputs_; name=nothing, dtypes=nothing) - if tf.eager_mode + if tf.in_eager_mode() outfeed_enqueue_tuple_eager(inputs_; name=name, dtypes=dtypes) else outfeed_enqueue_tuple_graph(inputs_; name=name, dtypes=dtypes) @@ -54119,7 +54119,7 @@ begin return res[1] end function string_strip(input_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() string_strip_eager(input_; name=name) else string_strip_graph(input_; name=name) @@ -54173,7 +54173,7 @@ begin return res[1] end function fake_quant_with_min_max_vars_per_channel(inputs_, min_, max_; name=nothing, num_bits=nothing, narrow_range=nothing) - if tf.eager_mode + if tf.in_eager_mode() fake_quant_with_min_max_vars_per_channel_eager(inputs_, min_, max_; name=name, num_bits=num_bits, narrow_range=narrow_range) else fake_quant_with_min_max_vars_per_channel_graph(inputs_, min_, max_; name=name, num_bits=num_bits, narrow_range=narrow_range) @@ -54207,7 +54207,7 @@ begin return res[1] end function barrier_ready_size(handle_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() barrier_ready_size_eager(handle_; name=name) else barrier_ready_size_graph(handle_; name=name) @@ -54247,7 +54247,7 @@ begin return res[1] end function string_to_hash_bucket(string_tensor_; name=nothing, num_buckets=nothing) - if tf.eager_mode + if tf.in_eager_mode() string_to_hash_bucket_eager(string_tensor_; name=name, num_buckets=num_buckets) else string_to_hash_bucket_graph(string_tensor_; name=name, num_buckets=num_buckets) @@ -54302,7 +54302,7 @@ begin return res end function tensor_array_concat(handle_, flow_in_; name=nothing, dtype=nothing, element_shape_except0=nothing) - if tf.eager_mode + if tf.in_eager_mode() tensor_array_concat_eager(handle_, flow_in_; name=name, dtype=dtype, element_shape_except0=element_shape_except0) else tensor_array_concat_graph(handle_, flow_in_; name=name, dtype=dtype, element_shape_except0=element_shape_except0) @@ -54344,7 +54344,7 @@ begin return res[1] end function sharded_filename(basename_, shard_, num_shards_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() sharded_filename_eager(basename_, shard_, num_shards_; name=name) else sharded_filename_graph(basename_, shard_, num_shards_; name=name) @@ -54396,7 +54396,7 @@ begin return res[1] end function py_func(input_; name=nothing, token=nothing, Tin=nothing, Tout=nothing) - if tf.eager_mode + if tf.in_eager_mode() py_func_eager(input_; name=name, token=token, Tin=Tin, Tout=Tout) else py_func_graph(input_; name=name, token=token, Tin=Tin, Tout=Tout) @@ -54445,7 +54445,7 @@ begin return res[1] end function unsorted_segment_prod(data_, segment_ids_, num_segments_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() unsorted_segment_prod_eager(data_, segment_ids_, num_segments_; name=name) else unsorted_segment_prod_graph(data_, segment_ids_, num_segments_; name=name) @@ -54487,7 +54487,7 @@ begin return res[1] end function count_up_to(ref_; name=nothing, limit=nothing) - if tf.eager_mode + if tf.in_eager_mode() count_up_to_eager(ref_; name=name, limit=limit) else count_up_to_graph(ref_; name=name, limit=limit) @@ -54547,7 +54547,7 @@ begin return res[1] end function random_gamma(shape_, alpha_; name=nothing, seed=nothing, seed2=nothing, S=nothing) - if tf.eager_mode + if tf.in_eager_mode() random_gamma_eager(shape_, alpha_; name=name, seed=seed, seed2=seed2, S=S) else random_gamma_graph(shape_, alpha_; name=name, seed=seed, seed2=seed2, S=S) @@ -54591,7 +54591,7 @@ begin return res[1] end function tensor_array_grad(handle_, flow_in_; name=nothing, source=nothing) - if tf.eager_mode + if tf.in_eager_mode() tensor_array_grad_eager(handle_, flow_in_; name=name, source=source) else tensor_array_grad_graph(handle_, flow_in_; name=name, source=source) @@ -54650,7 +54650,7 @@ begin return res[1] end function dilation2d(input_, filter_; name=nothing, strides=nothing, rates=nothing, padding=nothing) - if tf.eager_mode + if tf.in_eager_mode() dilation2d_eager(input_, filter_; name=name, strides=strides, rates=rates, padding=padding) else dilation2d_graph(input_, filter_; name=name, strides=strides, rates=rates, padding=padding) @@ -54712,7 +54712,7 @@ begin return res[1] end function unbatch(batched_tensor_, batch_index_, id_; name=nothing, timeout_micros=nothing, container=nothing, shared_name=nothing) - if tf.eager_mode + if tf.in_eager_mode() unbatch_eager(batched_tensor_, batch_index_, id_; name=name, timeout_micros=timeout_micros, container=container, shared_name=shared_name) else unbatch_graph(batched_tensor_, batch_index_, id_; name=name, timeout_micros=timeout_micros, container=container, shared_name=shared_name) @@ -54748,7 +54748,7 @@ begin return res[1] end function get_session_handle(value_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() get_session_handle_eager(value_; name=name) else get_session_handle_graph(value_; name=name) @@ -54807,7 +54807,7 @@ begin return res end function retrieve_tpu_embedding_adam_parameters(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - if tf.eager_mode + if tf.in_eager_mode() retrieve_tpu_embedding_adam_parameters_eager(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) else retrieve_tpu_embedding_adam_parameters_graph(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) @@ -54873,7 +54873,7 @@ begin return res[1] end function mutable_hash_table_of_tensors_v2(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing) - if tf.eager_mode + if tf.in_eager_mode() mutable_hash_table_of_tensors_v2_eager(; name=name, container=container, shared_name=shared_name, use_node_name_sharing=use_node_name_sharing, key_dtype=key_dtype, value_dtype=value_dtype, value_shape=value_shape) else mutable_hash_table_of_tensors_v2_graph(; name=name, container=container, shared_name=shared_name, use_node_name_sharing=use_node_name_sharing, key_dtype=key_dtype, value_dtype=value_dtype, value_shape=value_shape) @@ -54957,7 +54957,7 @@ begin return res[1] end function sparse_apply_ftrl(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, lr_power_; name=nothing, use_locking=nothing) - if tf.eager_mode + if tf.in_eager_mode() sparse_apply_ftrl_eager(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, lr_power_; name=name, use_locking=use_locking) else sparse_apply_ftrl_graph(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, lr_power_; name=name, use_locking=use_locking) @@ -55011,7 +55011,7 @@ begin return res[1] end function batch_dataset_v2(input_dataset_, batch_size_, drop_remainder_; name=nothing, output_types=nothing, output_shapes=nothing) - if tf.eager_mode + if tf.in_eager_mode() batch_dataset_v2_eager(input_dataset_, batch_size_, drop_remainder_; name=name, output_types=output_types, output_shapes=output_shapes) else batch_dataset_v2_graph(input_dataset_, batch_size_, drop_remainder_; name=name, output_types=output_types, output_shapes=output_shapes) @@ -55073,7 +55073,7 @@ begin return res end function sparse_sparse_minimum(a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() sparse_sparse_minimum_eager(a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_; name=name) else sparse_sparse_minimum_graph(a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_; name=name) @@ -55116,7 +55116,7 @@ begin return res[1] end function reverse_v2(tensor_, axis_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() reverse_v2_eager(tensor_, axis_; name=name) else reverse_v2_graph(tensor_, axis_; name=name) @@ -55237,7 +55237,7 @@ begin return res[1] end function strided_slice(input_, begin_, end_, strides_; name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing) - if tf.eager_mode + if tf.in_eager_mode() strided_slice_eager(input_, begin_, end_, strides_; name=name, Index=Index, begin_mask=begin_mask, end_mask=end_mask, ellipsis_mask=ellipsis_mask, new_axis_mask=new_axis_mask, shrink_axis_mask=shrink_axis_mask) else strided_slice_graph(input_, begin_, end_, strides_; name=name, Index=Index, begin_mask=begin_mask, end_mask=end_mask, ellipsis_mask=ellipsis_mask, new_axis_mask=new_axis_mask, shrink_axis_mask=shrink_axis_mask) @@ -55271,7 +55271,7 @@ begin return res[1] end function matching_files(pattern_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() matching_files_eager(pattern_; name=name) else matching_files_graph(pattern_; name=name) @@ -55311,7 +55311,7 @@ begin return res[1] end function encode_base64(input_; name=nothing, pad=nothing) - if tf.eager_mode + if tf.in_eager_mode() encode_base64_eager(input_; name=name, pad=pad) else encode_base64_graph(input_; name=name, pad=pad) @@ -55357,7 +55357,7 @@ begin return res[1] end function iterator_get_next_as_optional(iterator_; name=nothing, output_types=nothing, output_shapes=nothing) - if tf.eager_mode + if tf.in_eager_mode() iterator_get_next_as_optional_eager(iterator_; name=name, output_types=output_types, output_shapes=output_shapes) else iterator_get_next_as_optional_graph(iterator_; name=name, output_types=output_types, output_shapes=output_shapes) @@ -55417,7 +55417,7 @@ begin return res[1] end function padding_fifo_queue(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) - if tf.eager_mode + if tf.in_eager_mode() padding_fifo_queue_eager(; name=name, component_types=component_types, shapes=shapes, capacity=capacity, container=container, shared_name=shared_name) else padding_fifo_queue_graph(; name=name, component_types=component_types, shapes=shapes, capacity=capacity, container=container, shared_name=shared_name) @@ -55451,7 +55451,7 @@ begin return res[1] end function iterator_to_string_handle(resource_handle_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() iterator_to_string_handle_eager(resource_handle_; name=name) else iterator_to_string_handle_graph(resource_handle_; name=name) @@ -55516,7 +55516,7 @@ begin return res[1] end function max_pool_grad_grad_with_argmax(input_, grad_, argmax_; name=nothing, ksize=nothing, strides=nothing, padding=nothing) - if tf.eager_mode + if tf.in_eager_mode() max_pool_grad_grad_with_argmax_eager(input_, grad_, argmax_; name=name, ksize=ksize, strides=strides, padding=padding) else max_pool_grad_grad_with_argmax_graph(input_, grad_, argmax_; name=name, ksize=ksize, strides=strides, padding=padding) @@ -55564,7 +55564,7 @@ begin return res[1] end function tensor_list_gather(input_handle_, indices_, element_shape_; name=nothing, element_dtype=nothing) - if tf.eager_mode + if tf.in_eager_mode() tensor_list_gather_eager(input_handle_, indices_, element_shape_; name=name, element_dtype=element_dtype) else tensor_list_gather_graph(input_handle_, indices_, element_shape_; name=name, element_dtype=element_dtype) @@ -55622,7 +55622,7 @@ begin return res[1] end function multinomial(logits_, num_samples_; name=nothing, seed=nothing, seed2=nothing, output_dtype=nothing) - if tf.eager_mode + if tf.in_eager_mode() multinomial_eager(logits_, num_samples_; name=name, seed=seed, seed2=seed2, output_dtype=output_dtype) else multinomial_graph(logits_, num_samples_; name=name, seed=seed, seed2=seed2, output_dtype=output_dtype) @@ -55670,7 +55670,7 @@ begin return res[1] end function tensor_array_read(handle_, index_, flow_in_; name=nothing, dtype=nothing) - if tf.eager_mode + if tf.in_eager_mode() tensor_array_read_eager(handle_, index_, flow_in_; name=name, dtype=dtype) else tensor_array_read_graph(handle_, index_, flow_in_; name=name, dtype=dtype) @@ -55720,7 +55720,7 @@ begin return res[1] end function experimental_indexed_dataset_get(materialized_, index_; name=nothing, output_types=nothing, output_shapes=nothing) - if tf.eager_mode + if tf.in_eager_mode() experimental_indexed_dataset_get_eager(materialized_, index_; name=name, output_types=output_types, output_shapes=output_shapes) else experimental_indexed_dataset_get_graph(materialized_, index_; name=name, output_types=output_types, output_shapes=output_shapes) @@ -55776,7 +55776,7 @@ begin return res[1] end function tpu_partitioned_call(args_, device_ordinal_; name=nothing, Tin=nothing, Tout=nothing, f=nothing) - if tf.eager_mode + if tf.in_eager_mode() tpu_partitioned_call_eager(args_, device_ordinal_; name=name, Tin=Tin, Tout=Tout, f=f) else tpu_partitioned_call_graph(args_, device_ordinal_; name=name, Tin=Tin, Tout=Tout, f=f) @@ -55871,7 +55871,7 @@ begin return res end function quantized_conv2d_and_relu_and_requantize(input_, filter_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) - if tf.eager_mode + if tf.in_eager_mode() quantized_conv2d_and_relu_and_requantize_eager(input_, filter_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_; name=name, out_type=out_type, strides=strides, padding=padding, dilations=dilations) else quantized_conv2d_and_relu_and_requantize_graph(input_, filter_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_; name=name, out_type=out_type, strides=strides, padding=padding, dilations=dilations) @@ -55917,7 +55917,7 @@ begin return res[1] end function iterator_from_string_handle_v2(string_handle_; name=nothing, output_types=nothing, output_shapes=nothing) - if tf.eager_mode + if tf.in_eager_mode() iterator_from_string_handle_v2_eager(string_handle_; name=name, output_types=output_types, output_shapes=output_shapes) else iterator_from_string_handle_v2_graph(string_handle_; name=name, output_types=output_types, output_shapes=output_shapes) @@ -55958,7 +55958,7 @@ begin return res[1] end function bitwise_or(x_, y_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() bitwise_or_eager(x_, y_; name=name) else bitwise_or_graph(x_, y_; name=name) @@ -56007,7 +56007,7 @@ begin return res[1] end function unsorted_segment_max(data_, segment_ids_, num_segments_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() unsorted_segment_max_eager(data_, segment_ids_, num_segments_; name=name) else unsorted_segment_max_graph(data_, segment_ids_, num_segments_; name=name) @@ -56061,7 +56061,7 @@ begin return res end function _mkl_squared_difference(x_, y_, mkl_x_, mkl_y_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() _mkl_squared_difference_eager(x_, y_, mkl_x_, mkl_y_; name=name) else _mkl_squared_difference_graph(x_, y_, mkl_x_, mkl_y_; name=name) @@ -56125,7 +56125,7 @@ begin return res[1] end function conv3d_backprop_filter(input_, filter_, out_backprop_; name=nothing, strides=nothing, padding=nothing, dilations=nothing) - if tf.eager_mode + if tf.in_eager_mode() conv3d_backprop_filter_eager(input_, filter_, out_backprop_; name=name, strides=strides, padding=padding, dilations=dilations) else conv3d_backprop_filter_graph(input_, filter_, out_backprop_; name=name, strides=strides, padding=padding, dilations=dilations) @@ -56195,7 +56195,7 @@ begin return res[1] end function if_(cond_, input_; name=nothing, Tin=nothing, Tout=nothing, then_branch=nothing, else_branch=nothing, output_shapes=nothing) - if tf.eager_mode + if tf.in_eager_mode() if__eager(cond_, input_; name=name, Tin=Tin, Tout=Tout, then_branch=then_branch, else_branch=else_branch, output_shapes=output_shapes) else if__graph(cond_, input_; name=name, Tin=Tin, Tout=Tout, then_branch=then_branch, else_branch=else_branch, output_shapes=output_shapes) @@ -56257,7 +56257,7 @@ begin return res[1] end function flat_map_dataset(input_dataset_, other_arguments_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) - if tf.eager_mode + if tf.in_eager_mode() flat_map_dataset_eager(input_dataset_, other_arguments_; name=name, f=f, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes) else flat_map_dataset_graph(input_dataset_, other_arguments_; name=name, f=f, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes) @@ -56315,7 +56315,7 @@ begin return res[1] end function tensor_list_scatter(tensor_, indices_, element_shape_; name=nothing, element_dtype=nothing, shape_type=nothing) - if tf.eager_mode + if tf.in_eager_mode() tensor_list_scatter_eager(tensor_, indices_, element_shape_; name=name, element_dtype=element_dtype, shape_type=shape_type) else tensor_list_scatter_graph(tensor_, indices_, element_shape_; name=name, element_dtype=element_dtype, shape_type=shape_type) @@ -56356,7 +56356,7 @@ begin return res[1] end function softsign_grad(gradients_, features_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() softsign_grad_eager(gradients_, features_; name=name) else softsign_grad_graph(gradients_, features_; name=name) @@ -56404,7 +56404,7 @@ begin return res[1] end function copy_host(input_; name=nothing, tensor_name=nothing, debug_ops_spec=nothing) - if tf.eager_mode + if tf.in_eager_mode() copy_host_eager(input_; name=name, tensor_name=tensor_name, debug_ops_spec=debug_ops_spec) else copy_host_graph(input_; name=name, tensor_name=tensor_name, debug_ops_spec=debug_ops_spec) @@ -56452,7 +56452,7 @@ begin return res[1] end function lin_space(start_, stop_, num_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() lin_space_eager(start_, stop_, num_; name=name) else lin_space_graph(start_, stop_, num_; name=name) @@ -56499,7 +56499,7 @@ begin return res[1] end function _parallel_concat_update(value_, update_; name=nothing, loc=nothing) - if tf.eager_mode + if tf.in_eager_mode() _parallel_concat_update_eager(value_, update_; name=name, loc=loc) else _parallel_concat_update_graph(value_, update_; name=name, loc=loc) @@ -56541,7 +56541,7 @@ begin return res[1] end function stack(; name=nothing, elem_type=nothing, stack_name=nothing) - if tf.eager_mode + if tf.in_eager_mode() stack_eager(; name=name, elem_type=elem_type, stack_name=stack_name) else stack_graph(; name=name, elem_type=elem_type, stack_name=stack_name) @@ -56587,7 +56587,7 @@ begin return res[1] end function stack_push_v2(handle_, elem_; name=nothing, swap_memory=nothing) - if tf.eager_mode + if tf.in_eager_mode() stack_push_v2_eager(handle_, elem_; name=name, swap_memory=swap_memory) else stack_push_v2_graph(handle_, elem_; name=name, swap_memory=swap_memory) @@ -56633,7 +56633,7 @@ begin return res[1] end function assign_variable_op(resource_, value_; name=nothing, dtype=nothing) - if tf.eager_mode + if tf.in_eager_mode() assign_variable_op_eager(resource_, value_; name=name, dtype=dtype) else assign_variable_op_graph(resource_, value_; name=name, dtype=dtype) @@ -56693,7 +56693,7 @@ begin return res end function sparse_split(split_dim_, indices_, values_, shape_; name=nothing, num_split=nothing) - if tf.eager_mode + if tf.in_eager_mode() sparse_split_eager(split_dim_, indices_, values_, shape_; name=name, num_split=num_split) else sparse_split_graph(split_dim_, indices_, values_, shape_; name=name, num_split=num_split) @@ -56737,7 +56737,7 @@ begin return res[1] end function tensor_array_unpack(handle_, value_, flow_in_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() tensor_array_unpack_eager(handle_, value_, flow_in_; name=name) else tensor_array_unpack_graph(handle_, value_, flow_in_; name=name) @@ -56787,7 +56787,7 @@ begin return res[1] end function tensor_list_stack(input_handle_, element_shape_; name=nothing, element_dtype=nothing, num_elements=nothing) - if tf.eager_mode + if tf.in_eager_mode() tensor_list_stack_eager(input_handle_, element_shape_; name=name, element_dtype=element_dtype, num_elements=num_elements) else tensor_list_stack_graph(input_handle_, element_shape_; name=name, element_dtype=element_dtype, num_elements=num_elements) @@ -56821,7 +56821,7 @@ begin return res[1] end function barrier_incomplete_size(handle_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() barrier_incomplete_size_eager(handle_; name=name) else barrier_incomplete_size_graph(handle_; name=name) @@ -56871,7 +56871,7 @@ begin return res[1] end function restore(file_pattern_, tensor_name_; name=nothing, dt=nothing, preferred_shard=nothing) - if tf.eager_mode + if tf.in_eager_mode() restore_eager(file_pattern_, tensor_name_; name=name, dt=dt, preferred_shard=preferred_shard) else restore_graph(file_pattern_, tensor_name_; name=name, dt=dt, preferred_shard=preferred_shard) @@ -56946,7 +56946,7 @@ begin return res end function tensor_array_v3(size_; name=nothing, dtype=nothing, element_shape=nothing, dynamic_size=nothing, clear_after_read=nothing, identical_element_shapes=nothing, tensor_array_name=nothing) - if tf.eager_mode + if tf.in_eager_mode() tensor_array_v3_eager(size_; name=name, dtype=dtype, element_shape=element_shape, dynamic_size=dynamic_size, clear_after_read=clear_after_read, identical_element_shapes=identical_element_shapes, tensor_array_name=tensor_array_name) else tensor_array_v3_graph(size_; name=name, dtype=dtype, element_shape=element_shape, dynamic_size=dynamic_size, clear_after_read=clear_after_read, identical_element_shapes=identical_element_shapes, tensor_array_name=tensor_array_name) @@ -56996,7 +56996,7 @@ begin return res[1] end function experimental_assert_next_dataset(input_dataset_, transformations_; name=nothing, output_types=nothing, output_shapes=nothing) - if tf.eager_mode + if tf.in_eager_mode() experimental_assert_next_dataset_eager(input_dataset_, transformations_; name=name, output_types=output_types, output_shapes=output_shapes) else experimental_assert_next_dataset_graph(input_dataset_, transformations_; name=name, output_types=output_types, output_shapes=output_shapes) @@ -57042,7 +57042,7 @@ begin return res[1] end function in_top_k(predictions_, targets_; name=nothing, k=nothing) - if tf.eager_mode + if tf.in_eager_mode() in_top_k_eager(predictions_, targets_; name=name, k=k) else in_top_k_graph(predictions_, targets_; name=name, k=k) @@ -57096,7 +57096,7 @@ begin return res[1] end function scatter_sub(ref_, indices_, updates_; name=nothing, use_locking=nothing) - if tf.eager_mode + if tf.in_eager_mode() scatter_sub_eager(ref_, indices_, updates_; name=name, use_locking=use_locking) else scatter_sub_graph(ref_, indices_, updates_; name=name, use_locking=use_locking) @@ -57132,7 +57132,7 @@ begin return res[1] end function acosh(x_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() acosh_eager(x_; name=name) else acosh_graph(x_; name=name) @@ -57201,7 +57201,7 @@ begin return res[1] end function depthwise_conv2d_native_backprop_filter(input_, filter_sizes_, out_backprop_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) - if tf.eager_mode + if tf.in_eager_mode() depthwise_conv2d_native_backprop_filter_eager(input_, filter_sizes_, out_backprop_; name=name, strides=strides, padding=padding, data_format=data_format, dilations=dilations) else depthwise_conv2d_native_backprop_filter_graph(input_, filter_sizes_, out_backprop_; name=name, strides=strides, padding=padding, data_format=data_format, dilations=dilations) @@ -57255,7 +57255,7 @@ begin return res[1] end function cast(x_; name=nothing, SrcT=nothing, DstT=nothing, Truncate=nothing) - if tf.eager_mode + if tf.in_eager_mode() cast_eager(x_; name=name, SrcT=SrcT, DstT=DstT, Truncate=Truncate) else cast_graph(x_; name=name, SrcT=SrcT, DstT=DstT, Truncate=Truncate) @@ -57314,7 +57314,7 @@ begin return res end function quantize_v2(input_, min_range_, max_range_; name=nothing, mode=nothing, round_mode=nothing) - if tf.eager_mode + if tf.in_eager_mode() quantize_v2_eager(input_, min_range_, max_range_; name=name, mode=mode, round_mode=round_mode) else quantize_v2_graph(input_, min_range_, max_range_; name=name, mode=mode, round_mode=round_mode) @@ -57404,7 +57404,7 @@ begin return res[1] end function generator_dataset(init_func_other_args_, next_func_other_args_, finalize_func_other_args_; name=nothing, init_func=nothing, next_func=nothing, finalize_func=nothing, Tinit_func_args=nothing, Tnext_func_args=nothing, Tfinalize_func_args=nothing, output_types=nothing, output_shapes=nothing) - if tf.eager_mode + if tf.in_eager_mode() generator_dataset_eager(init_func_other_args_, next_func_other_args_, finalize_func_other_args_; name=name, init_func=init_func, next_func=next_func, finalize_func=finalize_func, Tinit_func_args=Tinit_func_args, Tnext_func_args=Tnext_func_args, Tfinalize_func_args=Tfinalize_func_args, output_types=output_types, output_shapes=output_shapes) else generator_dataset_graph(init_func_other_args_, next_func_other_args_, finalize_func_other_args_; name=name, init_func=init_func, next_func=next_func, finalize_func=finalize_func, Tinit_func_args=Tinit_func_args, Tnext_func_args=Tnext_func_args, Tfinalize_func_args=Tfinalize_func_args, output_types=output_types, output_shapes=output_shapes) @@ -57438,7 +57438,7 @@ begin return res[1] end function tensor_forest_tree_serialize(tree_handle_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() tensor_forest_tree_serialize_eager(tree_handle_; name=name) else tensor_forest_tree_serialize_graph(tree_handle_; name=name) @@ -57479,7 +57479,7 @@ begin return res[1] end function next_after(x1_, x2_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() next_after_eager(x1_, x2_; name=name) else next_after_graph(x1_, x2_; name=name) @@ -57513,7 +57513,7 @@ begin return res[1] end function tensor_array_close_v2(handle_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() tensor_array_close_v2_eager(handle_; name=name) else tensor_array_close_v2_graph(handle_; name=name) @@ -57591,7 +57591,7 @@ begin return res[1] end function big_query_reader(; name=nothing, container=nothing, shared_name=nothing, project_id=nothing, dataset_id=nothing, table_id=nothing, columns=nothing, timestamp_millis=nothing, test_end_point=nothing) - if tf.eager_mode + if tf.in_eager_mode() big_query_reader_eager(; name=name, container=container, shared_name=shared_name, project_id=project_id, dataset_id=dataset_id, table_id=table_id, columns=columns, timestamp_millis=timestamp_millis, test_end_point=test_end_point) else big_query_reader_graph(; name=name, container=container, shared_name=shared_name, project_id=project_id, dataset_id=dataset_id, table_id=table_id, columns=columns, timestamp_millis=timestamp_millis, test_end_point=test_end_point) @@ -57634,7 +57634,7 @@ begin return res end function reader_read_v2(reader_handle_, queue_handle_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() reader_read_v2_eager(reader_handle_, queue_handle_; name=name) else reader_read_v2_graph(reader_handle_, queue_handle_; name=name) @@ -57675,7 +57675,7 @@ begin return res[1] end function mod(x_, y_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() mod_eager(x_, y_; name=name) else mod_graph(x_, y_; name=name) @@ -57716,7 +57716,7 @@ begin return res[1] end function add_v2(x_, y_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() add_v2_eager(x_, y_; name=name) else add_v2_graph(x_, y_; name=name) @@ -57764,7 +57764,7 @@ begin return res[1] end function stateless_random_normal(shape_, seed_; name=nothing, dtype=nothing) - if tf.eager_mode + if tf.in_eager_mode() stateless_random_normal_eager(shape_, seed_; name=name, dtype=dtype) else stateless_random_normal_graph(shape_, seed_; name=name, dtype=dtype) @@ -57890,7 +57890,7 @@ begin return res[1] end function strided_slice_assign(ref_, begin_, end_, strides_, value_; name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing) - if tf.eager_mode + if tf.in_eager_mode() strided_slice_assign_eager(ref_, begin_, end_, strides_, value_; name=name, Index=Index, begin_mask=begin_mask, end_mask=end_mask, ellipsis_mask=ellipsis_mask, new_axis_mask=new_axis_mask, shrink_axis_mask=shrink_axis_mask) else strided_slice_assign_graph(ref_, begin_, end_, strides_, value_; name=name, Index=Index, begin_mask=begin_mask, end_mask=end_mask, ellipsis_mask=ellipsis_mask, new_axis_mask=new_axis_mask, shrink_axis_mask=shrink_axis_mask) @@ -57944,7 +57944,7 @@ begin return res[1] end function scatter_min(ref_, indices_, updates_; name=nothing, use_locking=nothing) - if tf.eager_mode + if tf.in_eager_mode() scatter_min_eager(ref_, indices_, updates_; name=name, use_locking=use_locking) else scatter_min_graph(ref_, indices_, updates_; name=name, use_locking=use_locking) @@ -58069,7 +58069,7 @@ begin return res[1] end function resource_strided_slice_assign(ref_, begin_, end_, strides_, value_; name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing) - if tf.eager_mode + if tf.in_eager_mode() resource_strided_slice_assign_eager(ref_, begin_, end_, strides_, value_; name=name, Index=Index, begin_mask=begin_mask, end_mask=end_mask, ellipsis_mask=ellipsis_mask, new_axis_mask=new_axis_mask, shrink_axis_mask=shrink_axis_mask) else resource_strided_slice_assign_graph(ref_, begin_, end_, strides_, value_; name=name, Index=Index, begin_mask=begin_mask, end_mask=end_mask, ellipsis_mask=ellipsis_mask, new_axis_mask=new_axis_mask, shrink_axis_mask=shrink_axis_mask) @@ -58110,7 +58110,7 @@ begin return res[1] end function random_gamma_grad(alpha_, sample_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() random_gamma_grad_eager(alpha_, sample_; name=name) else random_gamma_grad_graph(alpha_, sample_; name=name) @@ -58183,7 +58183,7 @@ begin return res[1] end function resource_sparse_apply_keras_momentum(var_, accum_, lr_, grad_, indices_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) - if tf.eager_mode + if tf.in_eager_mode() resource_sparse_apply_keras_momentum_eager(var_, accum_, lr_, grad_, indices_, momentum_; name=name, use_locking=use_locking, use_nesterov=use_nesterov) else resource_sparse_apply_keras_momentum_graph(var_, accum_, lr_, grad_, indices_, momentum_; name=name, use_locking=use_locking, use_nesterov=use_nesterov) @@ -58231,7 +58231,7 @@ begin return res[1] end function boosted_trees_create_quantile_stream_resource(quantile_stream_resource_handle_, epsilon_, num_streams_; name=nothing, max_elements=nothing) - if tf.eager_mode + if tf.in_eager_mode() boosted_trees_create_quantile_stream_resource_eager(quantile_stream_resource_handle_, epsilon_, num_streams_; name=name, max_elements=max_elements) else boosted_trees_create_quantile_stream_resource_graph(quantile_stream_resource_handle_, epsilon_, num_streams_; name=name, max_elements=max_elements) @@ -58286,7 +58286,7 @@ begin return res end function quantized_relu6(features_, min_features_, max_features_; name=nothing, out_type=nothing) - if tf.eager_mode + if tf.in_eager_mode() quantized_relu6_eager(features_, min_features_, max_features_; name=name, out_type=out_type) else quantized_relu6_graph(features_, min_features_, max_features_; name=name, out_type=out_type) @@ -58348,7 +58348,7 @@ begin return res end function sparse_sparse_maximum(a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() sparse_sparse_maximum_eager(a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_; name=name) else sparse_sparse_maximum_graph(a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_; name=name) @@ -58416,7 +58416,7 @@ begin return res[1] end function batch_norm_with_global_normalization(t_, m_, v_, beta_, gamma_; name=nothing, variance_epsilon=nothing, scale_after_normalization=nothing) - if tf.eager_mode + if tf.in_eager_mode() batch_norm_with_global_normalization_eager(t_, m_, v_, beta_, gamma_; name=name, variance_epsilon=variance_epsilon, scale_after_normalization=scale_after_normalization) else batch_norm_with_global_normalization_graph(t_, m_, v_, beta_, gamma_; name=name, variance_epsilon=variance_epsilon, scale_after_normalization=scale_after_normalization) @@ -58461,7 +58461,7 @@ begin return res[1] end function in_top_kv2(predictions_, targets_, k_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() in_top_kv2_eager(predictions_, targets_, k_; name=name) else in_top_kv2_graph(predictions_, targets_, k_; name=name) @@ -58497,7 +58497,7 @@ begin return res[1] end function cholesky(input_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() cholesky_eager(input_; name=name) else cholesky_graph(input_; name=name) @@ -58575,7 +58575,7 @@ begin return res[1] end function resource_apply_centered_rms_prop(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=nothing, use_locking=nothing) - if tf.eager_mode + if tf.in_eager_mode() resource_apply_centered_rms_prop_eager(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=name, use_locking=use_locking) else resource_apply_centered_rms_prop_graph(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=name, use_locking=use_locking) @@ -58636,7 +58636,7 @@ begin return res[1] end function resource_apply_adagrad(var_, accum_, lr_, grad_; name=nothing, use_locking=nothing, update_slots=nothing) - if tf.eager_mode + if tf.in_eager_mode() resource_apply_adagrad_eager(var_, accum_, lr_, grad_; name=name, use_locking=use_locking, update_slots=update_slots) else resource_apply_adagrad_graph(var_, accum_, lr_, grad_; name=name, use_locking=use_locking, update_slots=update_slots) @@ -58718,7 +58718,7 @@ begin return res[1] end function experimental_parallel_interleave_dataset(input_dataset_, other_arguments_, cycle_length_, block_length_, sloppy_, buffer_output_elements_, prefetch_input_elements_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) - if tf.eager_mode + if tf.in_eager_mode() experimental_parallel_interleave_dataset_eager(input_dataset_, other_arguments_, cycle_length_, block_length_, sloppy_, buffer_output_elements_, prefetch_input_elements_; name=name, f=f, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes) else experimental_parallel_interleave_dataset_graph(input_dataset_, other_arguments_, cycle_length_, block_length_, sloppy_, buffer_output_elements_, prefetch_input_elements_; name=name, f=f, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes) @@ -58764,7 +58764,7 @@ begin return res[1] end function resize_bicubic_grad(grads_, original_image_; name=nothing, align_corners=nothing) - if tf.eager_mode + if tf.in_eager_mode() resize_bicubic_grad_eager(grads_, original_image_; name=name, align_corners=align_corners) else resize_bicubic_grad_graph(grads_, original_image_; name=name, align_corners=align_corners) @@ -58800,7 +58800,7 @@ begin return res[1] end function batch_self_adjoint_eig(input_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() batch_self_adjoint_eig_eager(input_; name=name) else batch_self_adjoint_eig_graph(input_; name=name) @@ -58844,7 +58844,7 @@ begin return res[1] end function sparse_softmax(sp_indices_, sp_values_, sp_shape_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() sparse_softmax_eager(sp_indices_, sp_values_, sp_shape_; name=name) else sparse_softmax_graph(sp_indices_, sp_values_, sp_shape_; name=name) @@ -58880,7 +58880,7 @@ begin return res[1] end function asinh(x_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() asinh_eager(x_; name=name) else asinh_graph(x_; name=name) @@ -58967,7 +58967,7 @@ begin return res end function quantized_conv2d_and_relu(input_, filter_, min_input_, max_input_, min_filter_, max_filter_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) - if tf.eager_mode + if tf.in_eager_mode() quantized_conv2d_and_relu_eager(input_, filter_, min_input_, max_input_, min_filter_, max_filter_; name=name, out_type=out_type, strides=strides, padding=padding, dilations=dilations) else quantized_conv2d_and_relu_graph(input_, filter_, min_input_, max_input_, min_filter_, max_filter_; name=name, out_type=out_type, strides=strides, padding=padding, dilations=dilations) @@ -59009,7 +59009,7 @@ begin return res[1] end function matrix_inverse(input_; name=nothing, adjoint=nothing) - if tf.eager_mode + if tf.in_eager_mode() matrix_inverse_eager(input_; name=name, adjoint=adjoint) else matrix_inverse_graph(input_; name=name, adjoint=adjoint) @@ -59053,7 +59053,7 @@ begin return res[1] end function tensor_list_concat_lists(input_a_, input_b_; name=nothing, element_dtype=nothing) - if tf.eager_mode + if tf.in_eager_mode() tensor_list_concat_lists_eager(input_a_, input_b_; name=name, element_dtype=element_dtype) else tensor_list_concat_lists_graph(input_a_, input_b_; name=name, element_dtype=element_dtype) @@ -59116,7 +59116,7 @@ begin return res end function requantize(input_, input_min_, input_max_, requested_output_min_, requested_output_max_; name=nothing, out_type=nothing) - if tf.eager_mode + if tf.in_eager_mode() requantize_eager(input_, input_min_, input_max_, requested_output_min_, requested_output_max_; name=name, out_type=out_type) else requantize_graph(input_, input_min_, input_max_, requested_output_min_, requested_output_max_; name=name, out_type=out_type) @@ -59152,7 +59152,7 @@ begin return res[1] end function fft(input_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() fft_eager(input_; name=name) else fft_graph(input_; name=name) @@ -59194,7 +59194,7 @@ begin return res[1] end function conjugate_transpose(x_, perm_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() conjugate_transpose_eager(x_, perm_; name=name) else conjugate_transpose_graph(x_, perm_; name=name) @@ -59254,7 +59254,7 @@ begin return res[1] end function unstage(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) - if tf.eager_mode + if tf.in_eager_mode() unstage_eager(; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) else unstage_graph(; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) @@ -59295,7 +59295,7 @@ begin return res[1] end function relu6grad(gradients_, features_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() relu6grad_eager(gradients_, features_; name=name) else relu6grad_graph(gradients_, features_; name=name) @@ -59350,7 +59350,7 @@ begin return res[1] end function scale_and_translate_grad(grads_, original_image_, scale_, translation_; name=nothing, kernel_type=nothing) - if tf.eager_mode + if tf.in_eager_mode() scale_and_translate_grad_eager(grads_, original_image_, scale_, translation_; name=name, kernel_type=kernel_type) else scale_and_translate_grad_graph(grads_, original_image_, scale_, translation_; name=name, kernel_type=kernel_type) @@ -59398,7 +59398,7 @@ begin return res[1] end function _array_to_list(input_; name=nothing, N=nothing, out_types=nothing) - if tf.eager_mode + if tf.in_eager_mode() _array_to_list_eager(input_; name=name, N=N, out_types=out_types) else _array_to_list_graph(input_; name=name, N=N, out_types=out_types) @@ -59500,7 +59500,7 @@ begin return res end function cudnn_rnnv3(input_, input_h_, input_c_, params_, sequence_lengths_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing, is_training=nothing) - if tf.eager_mode + if tf.in_eager_mode() cudnn_rnnv3_eager(input_, input_h_, input_c_, params_, sequence_lengths_; name=name, rnn_mode=rnn_mode, input_mode=input_mode, direction=direction, dropout=dropout, seed=seed, seed2=seed2, is_training=is_training) else cudnn_rnnv3_graph(input_, input_h_, input_c_, params_, sequence_lengths_; name=name, rnn_mode=rnn_mode, input_mode=input_mode, direction=direction, dropout=dropout, seed=seed, seed2=seed2, is_training=is_training) @@ -59543,7 +59543,7 @@ begin return res[1] end function expand_dims(input_, dim_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() expand_dims_eager(input_, dim_; name=name) else expand_dims_graph(input_, dim_; name=name) @@ -59584,7 +59584,7 @@ begin return res[1] end function inv_grad(y_, dy_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() inv_grad_eager(y_, dy_; name=name) else inv_grad_graph(y_, dy_; name=name) @@ -59632,7 +59632,7 @@ begin return res[1] end function non_max_suppression(boxes_, scores_, max_output_size_; name=nothing, iou_threshold=nothing) - if tf.eager_mode + if tf.in_eager_mode() non_max_suppression_eager(boxes_, scores_, max_output_size_; name=name, iou_threshold=iou_threshold) else non_max_suppression_graph(boxes_, scores_, max_output_size_; name=name, iou_threshold=iou_threshold) @@ -59668,7 +59668,7 @@ begin return res[1] end function l2loss(t_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() l2loss_eager(t_; name=name) else l2loss_graph(t_; name=name) @@ -59714,7 +59714,7 @@ begin return res[1] end function resize_area(images_, size_; name=nothing, align_corners=nothing) - if tf.eager_mode + if tf.in_eager_mode() resize_area_eager(images_, size_; name=name, align_corners=align_corners) else resize_area_graph(images_, size_; name=name, align_corners=align_corners) @@ -59813,7 +59813,7 @@ begin return res end function sparse_cross(indices_, values_, shapes_, dense_inputs_; name=nothing, N=nothing, hashed_output=nothing, num_buckets=nothing, hash_key=nothing, sparse_types=nothing, dense_types=nothing, out_type=nothing, internal_type=nothing) - if tf.eager_mode + if tf.in_eager_mode() sparse_cross_eager(indices_, values_, shapes_, dense_inputs_; name=name, N=N, hashed_output=hashed_output, num_buckets=num_buckets, hash_key=hash_key, sparse_types=sparse_types, dense_types=dense_types, out_type=out_type, internal_type=internal_type) else sparse_cross_graph(indices_, values_, shapes_, dense_inputs_; name=name, N=N, hashed_output=hashed_output, num_buckets=num_buckets, hash_key=hash_key, sparse_types=sparse_types, dense_types=dense_types, out_type=out_type, internal_type=internal_type) @@ -59847,7 +59847,7 @@ begin return res[1] end function batch_fft3d(input_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() batch_fft3d_eager(input_; name=name) else batch_fft3d_graph(input_; name=name) @@ -59901,7 +59901,7 @@ begin return res[1] end function random_standard_normal(shape_; name=nothing, seed=nothing, seed2=nothing, dtype=nothing) - if tf.eager_mode + if tf.in_eager_mode() random_standard_normal_eager(shape_; name=name, seed=seed, seed2=seed2, dtype=dtype) else random_standard_normal_graph(shape_; name=name, seed=seed, seed2=seed2, dtype=dtype) @@ -59954,7 +59954,7 @@ begin return res[1] end function resource_scatter_mul(resource_, indices_, updates_; name=nothing, dtype=nothing) - if tf.eager_mode + if tf.in_eager_mode() resource_scatter_mul_eager(resource_, indices_, updates_; name=name, dtype=dtype) else resource_scatter_mul_graph(resource_, indices_, updates_; name=name, dtype=dtype) @@ -60083,7 +60083,7 @@ begin return res end function sdca_optimizer(sparse_example_indices_, sparse_feature_indices_, sparse_feature_values_, dense_features_, example_weights_, example_labels_, sparse_indices_, sparse_weights_, dense_weights_, example_state_data_; name=nothing, loss_type=nothing, adaptative=nothing, num_sparse_features=nothing, num_sparse_features_with_values=nothing, num_dense_features=nothing, l1=nothing, l2=nothing, num_loss_partitions=nothing, num_inner_iterations=nothing) - if tf.eager_mode + if tf.in_eager_mode() sdca_optimizer_eager(sparse_example_indices_, sparse_feature_indices_, sparse_feature_values_, dense_features_, example_weights_, example_labels_, sparse_indices_, sparse_weights_, dense_weights_, example_state_data_; name=name, loss_type=loss_type, adaptative=adaptative, num_sparse_features=num_sparse_features, num_sparse_features_with_values=num_sparse_features_with_values, num_dense_features=num_dense_features, l1=l1, l2=l2, num_loss_partitions=num_loss_partitions, num_inner_iterations=num_inner_iterations) else sdca_optimizer_graph(sparse_example_indices_, sparse_feature_indices_, sparse_feature_values_, dense_features_, example_weights_, example_labels_, sparse_indices_, sparse_weights_, dense_weights_, example_state_data_; name=name, loss_type=loss_type, adaptative=adaptative, num_sparse_features=num_sparse_features, num_sparse_features_with_values=num_sparse_features_with_values, num_dense_features=num_dense_features, l1=l1, l2=l2, num_loss_partitions=num_loss_partitions, num_inner_iterations=num_inner_iterations) @@ -60124,7 +60124,7 @@ begin return res[1] end function zeta(x_, q_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() zeta_eager(x_, q_; name=name) else zeta_graph(x_, q_; name=name) @@ -60211,7 +60211,7 @@ begin return res end function sample_distorted_bounding_box(image_size_, bounding_boxes_; name=nothing, seed=nothing, seed2=nothing, min_object_covered=nothing, aspect_ratio_range=nothing, area_range=nothing, max_attempts=nothing, use_image_if_no_bounding_boxes=nothing) - if tf.eager_mode + if tf.in_eager_mode() sample_distorted_bounding_box_eager(image_size_, bounding_boxes_; name=name, seed=seed, seed2=seed2, min_object_covered=min_object_covered, aspect_ratio_range=aspect_ratio_range, area_range=area_range, max_attempts=max_attempts, use_image_if_no_bounding_boxes=use_image_if_no_bounding_boxes) else sample_distorted_bounding_box_graph(image_size_, bounding_boxes_; name=name, seed=seed, seed2=seed2, min_object_covered=min_object_covered, aspect_ratio_range=aspect_ratio_range, area_range=area_range, max_attempts=max_attempts, use_image_if_no_bounding_boxes=use_image_if_no_bounding_boxes) @@ -60252,7 +60252,7 @@ begin return res[1] end function igamma_grad_a(a_, x_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() igamma_grad_a_eager(a_, x_; name=name) else igamma_grad_a_graph(a_, x_; name=name) @@ -60295,7 +60295,7 @@ begin return res[1] end function segment_max(data_, segment_ids_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() segment_max_eager(data_, segment_ids_; name=name) else segment_max_graph(data_, segment_ids_; name=name) @@ -60341,7 +60341,7 @@ begin return res[1] end function range(start_, limit_, delta_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() range_eager(start_, limit_, delta_; name=name) else range_graph(start_, limit_, delta_; name=name) @@ -60400,7 +60400,7 @@ begin return res end function retrieve_tpu_embedding_momentum_parameters_grad_accum_debug(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - if tf.eager_mode + if tf.in_eager_mode() retrieve_tpu_embedding_momentum_parameters_grad_accum_debug_eager(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) else retrieve_tpu_embedding_momentum_parameters_grad_accum_debug_graph(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) @@ -60434,7 +60434,7 @@ begin return res[1] end function flush_summary_writer(writer_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() flush_summary_writer_eager(writer_; name=name) else flush_summary_writer_graph(writer_; name=name) @@ -60484,7 +60484,7 @@ begin return res[1] end function dequantize(input_, min_range_, max_range_; name=nothing, mode=nothing) - if tf.eager_mode + if tf.in_eager_mode() dequantize_eager(input_, min_range_, max_range_; name=name, mode=mode) else dequantize_graph(input_, min_range_, max_range_; name=name, mode=mode) @@ -60529,7 +60529,7 @@ begin return res end function sparse_fill_empty_rows_grad(reverse_index_map_, grad_values_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() sparse_fill_empty_rows_grad_eager(reverse_index_map_, grad_values_; name=name) else sparse_fill_empty_rows_grad_graph(reverse_index_map_, grad_values_; name=name) @@ -60575,7 +60575,7 @@ begin return res[1] end function iterator_get_next(iterator_; name=nothing, output_types=nothing, output_shapes=nothing) - if tf.eager_mode + if tf.in_eager_mode() iterator_get_next_eager(iterator_; name=name, output_types=output_types, output_shapes=output_shapes) else iterator_get_next_graph(iterator_; name=name, output_types=output_types, output_shapes=output_shapes) @@ -60629,7 +60629,7 @@ begin return res[1] end function sparse_tensor_dense_add(a_indices_, a_values_, a_shape_, b_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() sparse_tensor_dense_add_eager(a_indices_, a_values_, a_shape_, b_; name=name) else sparse_tensor_dense_add_graph(a_indices_, a_values_, a_shape_, b_; name=name) @@ -60671,7 +60671,7 @@ begin return res[1] end function prevent_gradient(input_; name=nothing, message=nothing) - if tf.eager_mode + if tf.in_eager_mode() prevent_gradient_eager(input_; name=name, message=message) else prevent_gradient_graph(input_; name=name, message=message) @@ -60710,7 +60710,7 @@ begin return res end function lookup_table_export(table_handle_; name=nothing) - if tf.eager_mode + if tf.in_eager_mode() lookup_table_export_eager(table_handle_; name=name) else lookup_table_export_graph(table_handle_; name=name) diff --git a/src/ops/math.jl b/src/ops/math.jl index 56bfa457..8aadfcc4 100644 --- a/src/ops/math.jl +++ b/src/ops/math.jl @@ -202,7 +202,7 @@ for reduction in [:sum, :prod, :min, :max, :all, :any, :mean] if name === nothing name = get_name("reduce") end - if eager_mode + if in_eager_mode() if axis === nothing n_value = Array(n) # TODO use shape functions instead num_axis = length(size(n_value)) diff --git a/src/ops/sequences.jl b/src/ops/sequences.jl index f6a61822..eb1fd9a7 100644 --- a/src/ops/sequences.jl +++ b/src/ops/sequences.jl @@ -25,7 +25,7 @@ convert_eltype(x, dtype) = x else value = convert_eltype(value, dtype) end - if eager_mode + if in_eager_mode() EagerTensor(value) else Ops.const_(; value = value, dtype = dtype, kwargs...) diff --git a/src/ops/summaries.jl b/src/ops/summaries.jl index a9673434..3ca0a34b 100644 --- a/src/ops/summaries.jl +++ b/src/ops/summaries.jl @@ -20,7 +20,7 @@ for (jl_func, op) in [ ] @eval @tf.op function $jl_func(args...; collections=[:Summaries], step=0, kwargs...) res = tf.Ops.$op(args...; kwargs...) - if tf.eager_mode + if tf.in_eager_mode() tf.summary.record_summary(tf.item(res), step=step) else foreach(c->tf.add_to_collection(c, res), collections) diff --git a/src/summary_writer.jl b/src/summary_writer.jl index 0621f405..e325806b 100644 --- a/src/summary_writer.jl +++ b/src/summary_writer.jl @@ -29,7 +29,7 @@ Arguments: * graph: A `Graph` object. """ function FileWriter(log_dir::AbstractString; graph=nothing) - if !tf.eager_mode && graph===nothing + if !tf.in_eager_mode() && graph===nothing graph = get_def_graph() end mkpath(log_dir) @@ -64,9 +64,9 @@ function Base.write(writer::FileWriter, event::tensorflow.Event) proto_length_bytes = read(buffer) proto_length_bytes_rev = reverse(proto_length_bytes) write(file, proto_length_bytes) - write(file, CRC.masked_crc(proto_length_bytes)) + write(file, masked_crc(proto_length_bytes)) write(file, proto) - write(file, CRC.masked_crc(proto)) + write(file, masked_crc(proto)) flush(file) nothing end @@ -100,15 +100,26 @@ function Base.write(writer::FileWriter, graph::Graph) write(writer, event) end -default_file_writer = nothing function set_default(writer::FileWriter) - # todo use context - global default_file_writer = writer + context = tf.Context() + context.attrs["default_file_writer"] = writer + push!(tf.global_context, context) +end + +function with_default(writer::FileWriter, block) + context = tf.Context() + context.attrs["default_file_writer"] = writer + tf.with_context(context, block) +end + +function get_default_file_writer() + return tf.global_context["default_file_writer"] end function record_summary(summary_pb; step=0) - default_file_writer === nothing && return - write(default_file_writer, summary_pb, step) + writer = get_default_file_writer() + writer === nothing && return + write(writer, summary_pb, step) end From c42d1424b1f994ee924a0d12111a4e6c7d4a98fd Mon Sep 17 00:00:00 2001 From: Jon Malmaud Date: Tue, 26 Feb 2019 21:18:00 -0500 Subject: [PATCH 27/49] Move tape to context system --- src/eager.jl | 14 +- src/generate_ops.jl | 6 +- src/ops/imported_ops.jl | 6890 ++++++++++++++++++++++++++------------- src/tape.jl | 24 +- 4 files changed, 4621 insertions(+), 2313 deletions(-) diff --git a/src/eager.jl b/src/eager.jl index 873d29f7..544301f1 100644 --- a/src/eager.jl +++ b/src/eager.jl @@ -145,6 +145,12 @@ function add_input(op::EagerOp, h::TensorHandle) return end +function add_input(op::EagerOp, hs::Vector{TensorHandle}) + for h in hs + add_input(op, h) + end +end + function execute(op::EagerOp) op_desc = get_op_def(op.op_name) n_outputs = length(op_desc.output_arg) @@ -348,9 +354,8 @@ end function Base.getindex(c::ContextStack, name) value = nothing for context in c.contexts - new_value = get(context.attrs, name, nothing) - if new_value !== nothing - value = new_value + if name in keys(context.attrs) + value = context.attrs[name] end end return value @@ -366,8 +371,9 @@ end function with_context(ctx, block) push!(global_context, ctx) - block() + res = block() pop!(global_context) + return res end function get_eager_context() diff --git a/src/generate_ops.jl b/src/generate_ops.jl index c561df86..25cf9cda 100644 --- a/src/generate_ops.jl +++ b/src/generate_ops.jl @@ -256,8 +256,10 @@ function to_function(op::tensorflow.OpDef) $(t_block...) res = tf.execute(desc) node = tf.TapeNode($jl_name, [$(inputs[2:end]...)], $(inputs[1].args...), res) - tf.add_node(res[1], node) - $eager_output_block + if length(res) >= 1 + tf.add_node(res[1], node) + $eager_output_block + end end end diff --git a/src/ops/imported_ops.jl b/src/ops/imported_ops.jl index fb22f952..0cab1070 100644 --- a/src/ops/imported_ops.jl +++ b/src/ops/imported_ops.jl @@ -1,4 +1,4 @@ -# Autogenerated on 2019-02-24T21:47:31.178 +# Autogenerated on 2019-02-26T20:52:08.497 module Ops import TensorFlow @@ -40,8 +40,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(reduce_join, [inputs_, reduction_indices_], name=nothing, keep_dims=nothing, separator=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function reduce_join(inputs_, reduction_indices_; name=nothing, keep_dims=nothing, separator=nothing) if tf.in_eager_mode() @@ -118,8 +120,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(reduce_dataset, [input_dataset_, initial_state_, other_arguments_], name=nothing, f=nothing, Tstate=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function reduce_dataset(input_dataset_, initial_state_, other_arguments_; name=nothing, f=nothing, Tstate=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing) if tf.in_eager_mode() @@ -172,8 +176,10 @@ begin desc["shape_type"] = tf.data_type(element_shape_) res = tf.execute(desc) node = tf.TapeNode(tensor_list_from_tensor, [tensor_, element_shape_], name=nothing, element_dtype=nothing, shape_type=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function tensor_list_from_tensor(tensor_, element_shape_; name=nothing, element_dtype=nothing, shape_type=nothing) if tf.in_eager_mode() @@ -212,8 +218,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(extract_jpeg_shape, [contents_], name=nothing, output_type=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function extract_jpeg_shape(contents_; name=nothing, output_type=nothing) if tf.in_eager_mode() @@ -265,8 +273,10 @@ begin desc["T"] = tf.data_type(input_) res = tf.execute(desc) node = tf.TapeNode(svd, [input_], name=nothing, compute_uv=nothing, full_matrices=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function svd(input_; name=nothing, compute_uv=nothing, full_matrices=nothing) if tf.in_eager_mode() @@ -311,8 +321,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(iterator_get_next_sync, [iterator_], name=nothing, output_types=nothing, output_shapes=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function iterator_get_next_sync(iterator_; name=nothing, output_types=nothing, output_shapes=nothing) if tf.in_eager_mode() @@ -365,8 +377,10 @@ begin desc["T"] = tf.data_type(data_) res = tf.execute(desc) node = tf.TapeNode(ref_enter, [data_], name=nothing, frame_name=nothing, is_constant=nothing, parallel_iterations=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function ref_enter(data_; name=nothing, frame_name=nothing, is_constant=nothing, parallel_iterations=nothing) if tf.in_eager_mode() @@ -401,8 +415,10 @@ begin desc["T"] = tf.data_type(x_) res = tf.execute(desc) node = tf.TapeNode(erf, [x_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function erf(x_; name=nothing) if tf.in_eager_mode() @@ -440,8 +456,10 @@ begin tf.add_input(desc, table_handle_) res = tf.execute(desc) node = tf.TapeNode(lookup_table_export_v2, [table_handle_], name=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function lookup_table_export_v2(table_handle_; name=nothing) if tf.in_eager_mode() @@ -476,8 +494,10 @@ begin desc["T"] = tf.data_type(x_) res = tf.execute(desc) node = tf.TapeNode(round, [x_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function round(x_; name=nothing) if tf.in_eager_mode() @@ -524,8 +544,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(outfeed_dequeue, [], name=nothing, dtype=nothing, shape=nothing, device_ordinal=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function outfeed_dequeue(; name=nothing, dtype=nothing, shape=nothing, device_ordinal=nothing) if tf.in_eager_mode() @@ -558,8 +580,10 @@ begin tf.add_input(desc, tree_handle_) res = tf.execute(desc) node = tf.TapeNode(tensor_forest_tree_is_initialized_op, [tree_handle_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function tensor_forest_tree_is_initialized_op(tree_handle_; name=nothing) if tf.in_eager_mode() @@ -605,8 +629,10 @@ begin desc["T"] = tf.data_type(inputs_) res = tf.execute(desc) node = tf.TapeNode(merge, [inputs_], name=nothing, N=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function merge(inputs_; name=nothing, N=nothing) if tf.in_eager_mode() @@ -656,8 +682,10 @@ begin desc["T"] = tf.data_type(value_range_) res = tf.execute(desc) node = tf.TapeNode(histogram_fixed_width, [values_, value_range_, nbins_], name=nothing, dtype=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function histogram_fixed_width(values_, value_range_, nbins_; name=nothing, dtype=nothing) if tf.in_eager_mode() @@ -692,8 +720,10 @@ begin desc["T"] = tf.data_type(x_) res = tf.execute(desc) node = tf.TapeNode(asin, [x_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function asin(x_; name=nothing) if tf.in_eager_mode() @@ -739,8 +769,10 @@ begin desc["Tidx"] = tf.data_type(reduction_indices_) res = tf.execute(desc) node = tf.TapeNode(any, [input_, reduction_indices_], name=nothing, keep_dims=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function any(input_, reduction_indices_; name=nothing, keep_dims=nothing) if tf.in_eager_mode() @@ -780,8 +812,10 @@ begin desc["T"] = tf.data_type(dy_) res = tf.execute(desc) node = tf.TapeNode(rsqrt_grad, [y_, dy_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function rsqrt_grad(y_, dy_; name=nothing) if tf.in_eager_mode() @@ -828,8 +862,10 @@ begin desc["T"] = tf.data_type(value_) res = tf.execute(desc) node = tf.TapeNode(tensor_array_scatter, [handle_, indices_, value_, flow_in_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function tensor_array_scatter(handle_, indices_, value_, flow_in_; name=nothing) if tf.in_eager_mode() @@ -879,8 +915,10 @@ begin desc["T"] = tf.data_type(data_) res = tf.execute(desc) node = tf.TapeNode(dynamic_partition, [data_, partitions_], name=nothing, num_partitions=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function dynamic_partition(data_, partitions_; name=nothing, num_partitions=nothing) if tf.in_eager_mode() @@ -929,8 +967,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(experimental_private_thread_pool_dataset, [input_dataset_, num_threads_], name=nothing, output_types=nothing, output_shapes=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function experimental_private_thread_pool_dataset(input_dataset_, num_threads_; name=nothing, output_types=nothing, output_shapes=nothing) if tf.in_eager_mode() @@ -963,8 +1003,10 @@ begin tf.add_input(desc, reader_handle_) res = tf.execute(desc) node = tf.TapeNode(reader_serialize_state, [reader_handle_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function reader_serialize_state(reader_handle_; name=nothing) if tf.in_eager_mode() @@ -1004,8 +1046,10 @@ begin desc["T"] = tf.data_type(y_) res = tf.execute(desc) node = tf.TapeNode(right_shift, [x_, y_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function right_shift(x_, y_; name=nothing) if tf.in_eager_mode() @@ -1064,8 +1108,10 @@ begin desc["T"] = tf.data_type(input_) res = tf.execute(desc) node = tf.TapeNode(avg_pool3d, [input_], name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function avg_pool3d(input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) if tf.in_eager_mode() @@ -1106,8 +1152,10 @@ begin desc["T"] = tf.data_type(image_) res = tf.execute(desc) node = tf.TapeNode(encode_png, [image_], name=nothing, compression=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function encode_png(image_; name=nothing, compression=nothing) if tf.in_eager_mode() @@ -1166,8 +1214,10 @@ begin desc["T"] = tf.data_type(input_) res = tf.execute(desc) node = tf.TapeNode(debug_identity, [input_], name=nothing, device_name=nothing, tensor_name=nothing, debug_urls=nothing, gated_grpc=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function debug_identity(input_; name=nothing, device_name=nothing, tensor_name=nothing, debug_urls=nothing, gated_grpc=nothing) if tf.in_eager_mode() @@ -1202,8 +1252,10 @@ begin desc["T"] = tf.data_type(input_) res = tf.execute(desc) node = tf.TapeNode(imag, [input_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function imag(input_; name=nothing) if tf.in_eager_mode() @@ -1288,8 +1340,10 @@ begin desc["T"] = tf.data_type(lr_power_) res = tf.execute(desc) node = tf.TapeNode(resource_sparse_apply_ftrl_v2, [var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, l2_shrinkage_, lr_power_], name=nothing, use_locking=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function resource_sparse_apply_ftrl_v2(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=nothing, use_locking=nothing) if tf.in_eager_mode() @@ -1348,8 +1402,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(stage_clear, [], name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function stage_clear(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) if tf.in_eager_mode() @@ -1384,8 +1440,10 @@ begin desc["T"] = tf.data_type(x_) res = tf.execute(desc) node = tf.TapeNode(sign, [x_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function sign(x_; name=nothing) if tf.in_eager_mode() @@ -1420,8 +1478,10 @@ begin desc["T"] = tf.data_type(x_) res = tf.execute(desc) node = tf.TapeNode(population_count, [x_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function population_count(x_; name=nothing) if tf.in_eager_mode() @@ -1456,8 +1516,10 @@ begin desc["T"] = tf.data_type(x_) res = tf.execute(desc) node = tf.TapeNode(neg, [x_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function neg(x_; name=nothing) if tf.in_eager_mode() @@ -1498,8 +1560,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(anonymous_iterator, [], name=nothing, output_types=nothing, output_shapes=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function anonymous_iterator(; name=nothing, output_types=nothing, output_shapes=nothing) if tf.in_eager_mode() @@ -1552,8 +1616,10 @@ begin desc["T"] = tf.data_type(input_values_) res = tf.execute(desc) node = tf.TapeNode(sparse_reduce_sum, [input_indices_, input_values_, input_shape_, reduction_axes_], name=nothing, keep_dims=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function sparse_reduce_sum(input_indices_, input_values_, input_shape_, reduction_axes_; name=nothing, keep_dims=nothing) if tf.in_eager_mode() @@ -1614,8 +1680,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(filter_dataset, [input_dataset_, other_arguments_], name=nothing, predicate=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function filter_dataset(input_dataset_, other_arguments_; name=nothing, predicate=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) if tf.in_eager_mode() @@ -1654,8 +1722,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(string_length, [input_], name=nothing, unit=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function string_length(input_; name=nothing, unit=nothing) if tf.in_eager_mode() @@ -1719,8 +1789,10 @@ begin desc["T"] = tf.data_type(filter_) res = tf.execute(desc) node = tf.TapeNode(conv3d, [input_, filter_], name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function conv3d(input_, filter_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) if tf.in_eager_mode() @@ -1778,8 +1850,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(retrieve_tpu_embedding_adagrad_parameters, [], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function retrieve_tpu_embedding_adagrad_parameters(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) if tf.in_eager_mode() @@ -1812,8 +1886,10 @@ begin tf.add_input(desc, optional_) res = tf.execute(desc) node = tf.TapeNode(optional_has_value, [optional_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function optional_has_value(optional_; name=nothing) if tf.in_eager_mode() @@ -1905,8 +1981,10 @@ begin desc["T"] = tf.data_type(grad_) res = tf.execute(desc) node = tf.TapeNode(apply_adam, [var_, m_, v_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_], name=nothing, use_locking=nothing, use_nesterov=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function apply_adam(var_, m_, v_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing, use_nesterov=nothing) if tf.in_eager_mode() @@ -2000,8 +2078,10 @@ begin desc["T"] = tf.data_type(params_) res = tf.execute(desc) node = tf.TapeNode(cudnn_rnn_params_to_canonical, [num_layers_, num_units_, input_size_, params_], name=nothing, num_params=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function cudnn_rnn_params_to_canonical(num_layers_, num_units_, input_size_, params_; name=nothing, num_params=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) if tf.in_eager_mode() @@ -2038,8 +2118,10 @@ begin tf.add_input(desc, fft_length_) res = tf.execute(desc) node = tf.TapeNode(irfft3d, [input_, fft_length_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function irfft3d(input_, fft_length_; name=nothing) if tf.in_eager_mode() @@ -2074,8 +2156,10 @@ begin desc["T"] = tf.data_type(input_) res = tf.execute(desc) node = tf.TapeNode(angle, [input_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function angle(input_; name=nothing) if tf.in_eager_mode() @@ -2116,8 +2200,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(tensor_forest_tree_resource_handle_op, [], name=nothing, container=nothing, shared_name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function tensor_forest_tree_resource_handle_op(; name=nothing, container=nothing, shared_name=nothing) if tf.in_eager_mode() @@ -2191,8 +2277,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(learned_unigram_candidate_sampler, [true_classes_], name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function learned_unigram_candidate_sampler(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing) if tf.in_eager_mode() @@ -2227,8 +2315,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(_arg, [], name=nothing, index=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function _arg(; name=nothing, index=nothing) if tf.in_eager_mode() @@ -2263,8 +2353,10 @@ begin desc["T"] = tf.data_type(input_) res = tf.execute(desc) node = tf.TapeNode(matrix_square_root, [input_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function matrix_square_root(input_; name=nothing) if tf.in_eager_mode() @@ -2312,8 +2404,10 @@ begin desc["T"] = tf.data_type(dense_) res = tf.execute(desc) node = tf.TapeNode(sparse_dense_cwise_mul, [sp_indices_, sp_values_, sp_shape_, dense_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function sparse_dense_cwise_mul(sp_indices_, sp_values_, sp_shape_, dense_; name=nothing) if tf.in_eager_mode() @@ -2367,8 +2461,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(tensor_array_concat_v3, [handle_, flow_in_], name=nothing, dtype=nothing, element_shape_except0=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function tensor_array_concat_v3(handle_, flow_in_; name=nothing, dtype=nothing, element_shape_except0=nothing) if tf.in_eager_mode() @@ -2401,8 +2497,10 @@ begin tf.add_input(desc, input_) res = tf.execute(desc) node = tf.TapeNode(unicode_script, [input_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function unicode_script(input_; name=nothing) if tf.in_eager_mode() @@ -2442,8 +2540,10 @@ begin desc["T"] = tf.data_type(grad_) res = tf.execute(desc) node = tf.TapeNode(batch_cholesky_grad, [l_, grad_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function batch_cholesky_grad(l_, grad_; name=nothing) if tf.in_eager_mode() @@ -2491,8 +2591,10 @@ begin desc["Tidx"] = tf.data_type(reduction_indices_) res = tf.execute(desc) node = tf.TapeNode(mean, [input_, reduction_indices_], name=nothing, keep_dims=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function mean(input_, reduction_indices_; name=nothing, keep_dims=nothing) if tf.in_eager_mode() @@ -2525,8 +2627,10 @@ begin tf.add_input(desc, input_) res = tf.execute(desc) node = tf.TapeNode(batch_fft, [input_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function batch_fft(input_; name=nothing) if tf.in_eager_mode() @@ -2561,8 +2665,10 @@ begin desc["T"] = tf.data_type(x_) res = tf.execute(desc) node = tf.TapeNode(sin, [x_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function sin(x_; name=nothing) if tf.in_eager_mode() @@ -2603,8 +2709,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(boosted_trees_ensemble_resource_handle_op, [], name=nothing, container=nothing, shared_name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function boosted_trees_ensemble_resource_handle_op(; name=nothing, container=nothing, shared_name=nothing) if tf.in_eager_mode() @@ -2670,8 +2778,10 @@ begin desc["T"] = tf.data_type(input_) res = tf.execute(desc) node = tf.TapeNode(quantized_max_pool, [input_, min_input_, max_input_], name=nothing, ksize=nothing, strides=nothing, padding=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function quantized_max_pool(input_, min_input_, max_input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing) if tf.in_eager_mode() @@ -2748,8 +2858,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(ordered_map_stage, [key_, indices_, values_], name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, fake_dtypes=nothing, container=nothing, shared_name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function ordered_map_stage(key_, indices_, values_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, fake_dtypes=nothing, container=nothing, shared_name=nothing) if tf.in_eager_mode() @@ -2818,8 +2930,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(partitioned_call, [args_], name=nothing, Tin=nothing, Tout=nothing, f=nothing, config=nothing, config_proto=nothing, executor_type=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function partitioned_call(args_; name=nothing, Tin=nothing, Tout=nothing, f=nothing, config=nothing, config_proto=nothing, executor_type=nothing) if tf.in_eager_mode() @@ -2888,8 +3002,10 @@ begin desc["Tindices"] = tf.data_type(indices_) res = tf.execute(desc) node = tf.TapeNode(sparse_apply_adagrad, [var_, accum_, lr_, grad_, indices_], name=nothing, use_locking=nothing, update_slots=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function sparse_apply_adagrad(var_, accum_, lr_, grad_, indices_; name=nothing, use_locking=nothing, update_slots=nothing) if tf.in_eager_mode() @@ -2963,8 +3079,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(decode_proto_v2, [bytes_], name=nothing, message_type=nothing, field_names=nothing, output_types=nothing, descriptor_source=nothing, message_format=nothing, sanitize=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function decode_proto_v2(bytes_; name=nothing, message_type=nothing, field_names=nothing, output_types=nothing, descriptor_source=nothing, message_format=nothing, sanitize=nothing) if tf.in_eager_mode() @@ -3009,8 +3127,10 @@ begin desc["T"] = tf.data_type(x_) res = tf.execute(desc) node = tf.TapeNode(betainc, [a_, b_, x_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function betainc(a_, b_, x_; name=nothing) if tf.in_eager_mode() @@ -3045,8 +3165,10 @@ begin desc["T"] = tf.data_type(input_) res = tf.execute(desc) node = tf.TapeNode(guarantee_const, [input_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function guarantee_const(input_; name=nothing) if tf.in_eager_mode() @@ -3085,8 +3207,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(decode_bmp, [contents_], name=nothing, channels=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function decode_bmp(contents_; name=nothing, channels=nothing) if tf.in_eager_mode() @@ -3134,8 +3258,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(boosted_trees_bucketize, [float_values_, bucket_boundaries_], name=nothing, num_features=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function boosted_trees_bucketize(float_values_, bucket_boundaries_; name=nothing, num_features=nothing) if tf.in_eager_mode() @@ -3165,8 +3291,10 @@ begin desc = tf.EagerOp("ShutdownDistributedTPU") res = tf.execute(desc) node = tf.TapeNode(shutdown_distributed_tpu, [], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function shutdown_distributed_tpu(; name=nothing) if tf.in_eager_mode() @@ -3199,8 +3327,10 @@ begin tf.add_input(desc, iterator_) res = tf.execute(desc) node = tf.TapeNode(experimental_stats_aggregator_summary, [iterator_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function experimental_stats_aggregator_summary(iterator_; name=nothing) if tf.in_eager_mode() @@ -3230,8 +3360,10 @@ begin desc = tf.EagerOp("Timestamp") res = tf.execute(desc) node = tf.TapeNode(timestamp, [], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function timestamp(; name=nothing) if tf.in_eager_mode() @@ -3266,8 +3398,10 @@ begin desc["T"] = tf.data_type(input_) res = tf.execute(desc) node = tf.TapeNode(matrix_exponential, [input_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function matrix_exponential(input_; name=nothing) if tf.in_eager_mode() @@ -3308,8 +3442,10 @@ begin desc["T"] = tf.data_type(input_) res = tf.execute(desc) node = tf.TapeNode(size, [input_], name=nothing, out_type=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function size(input_; name=nothing, out_type=nothing) if tf.in_eager_mode() @@ -3350,8 +3486,10 @@ begin desc["T"] = tf.data_type(inputs_) res = tf.execute(desc) node = tf.TapeNode(add_n, [inputs_], name=nothing, N=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function add_n(inputs_; name=nothing, N=nothing) if tf.in_eager_mode() @@ -3397,8 +3535,10 @@ begin desc["Tidx"] = tf.data_type(indices_) res = tf.execute(desc) node = tf.TapeNode(sparse_segment_sum, [data_, indices_, segment_ids_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function sparse_segment_sum(data_, indices_, segment_ids_; name=nothing) if tf.in_eager_mode() @@ -3447,8 +3587,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(batch_dataset, [input_dataset_, batch_size_], name=nothing, output_types=nothing, output_shapes=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function batch_dataset(input_dataset_, batch_size_; name=nothing, output_types=nothing, output_shapes=nothing) if tf.in_eager_mode() @@ -3519,8 +3661,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(record_input, [], name=nothing, file_pattern=nothing, file_random_seed=nothing, file_shuffle_shift_ratio=nothing, file_buffer_size=nothing, file_parallelism=nothing, batch_size=nothing, compression_type=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function record_input(; name=nothing, file_pattern=nothing, file_random_seed=nothing, file_shuffle_shift_ratio=nothing, file_buffer_size=nothing, file_parallelism=nothing, batch_size=nothing, compression_type=nothing) if tf.in_eager_mode() @@ -3569,8 +3713,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(queue_dequeue_up_to_v2, [handle_, n_], name=nothing, component_types=nothing, timeout_ms=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function queue_dequeue_up_to_v2(handle_, n_; name=nothing, component_types=nothing, timeout_ms=nothing) if tf.in_eager_mode() @@ -3628,8 +3774,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(retrieve_tpu_embedding_proximal_adagrad_parameters_grad_accum_debug, [], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function retrieve_tpu_embedding_proximal_adagrad_parameters_grad_accum_debug(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) if tf.in_eager_mode() @@ -3698,8 +3846,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(load_tpu_embedding_rms_prop_parameters_grad_accum_debug, [parameters_, ms_, mom_, gradient_accumulators_], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function load_tpu_embedding_rms_prop_parameters_grad_accum_debug(parameters_, ms_, mom_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) if tf.in_eager_mode() @@ -3734,8 +3884,10 @@ begin desc["T"] = tf.data_type(tensor_) res = tf.execute(desc) node = tf.TapeNode(serialize_tensor, [tensor_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function serialize_tensor(tensor_; name=nothing) if tf.in_eager_mode() @@ -3775,8 +3927,10 @@ begin desc["T"] = tf.data_type(y_) res = tf.execute(desc) node = tf.TapeNode(mul, [x_, y_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function mul(x_, y_; name=nothing) if tf.in_eager_mode() @@ -3821,8 +3975,10 @@ begin desc["T"] = tf.data_type(labels_) res = tf.execute(desc) node = tf.TapeNode(softmax_cross_entropy_with_logits, [features_, labels_], name=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function softmax_cross_entropy_with_logits(features_, labels_; name=nothing) if tf.in_eager_mode() @@ -3874,8 +4030,10 @@ begin desc["dtype"] = tf.data_type(updates_) res = tf.execute(desc) node = tf.TapeNode(resource_scatter_div, [resource_, indices_, updates_], name=nothing, dtype=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function resource_scatter_div(resource_, indices_, updates_; name=nothing, dtype=nothing) if tf.in_eager_mode() @@ -3928,8 +4086,10 @@ begin tf.add_input(desc, compression_type_) res = tf.execute(desc) node = tf.TapeNode(fixed_length_record_dataset_v2, [filenames_, header_bytes_, record_bytes_, footer_bytes_, buffer_size_, compression_type_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function fixed_length_record_dataset_v2(filenames_, header_bytes_, record_bytes_, footer_bytes_, buffer_size_, compression_type_; name=nothing) if tf.in_eager_mode() @@ -3978,8 +4138,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(skip_dataset, [input_dataset_, count_], name=nothing, output_types=nothing, output_shapes=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function skip_dataset(input_dataset_, count_; name=nothing, output_types=nothing, output_shapes=nothing) if tf.in_eager_mode() @@ -4014,8 +4176,10 @@ begin desc["T"] = tf.data_type(x_) res = tf.execute(desc) node = tf.TapeNode(cosh, [x_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function cosh(x_; name=nothing) if tf.in_eager_mode() @@ -4100,8 +4264,10 @@ begin desc["U"] = tf.data_type(variance_) res = tf.execute(desc) node = tf.TapeNode(fused_batch_norm_v2, [x_, scale_, offset_, mean_, variance_], name=nothing, U=nothing, epsilon=nothing, data_format=nothing, is_training=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function fused_batch_norm_v2(x_, scale_, offset_, mean_, variance_; name=nothing, U=nothing, epsilon=nothing, data_format=nothing, is_training=nothing) if tf.in_eager_mode() @@ -4148,8 +4314,10 @@ begin desc["T"] = tf.data_type(value_) res = tf.execute(desc) node = tf.TapeNode(tensor_array_split, [handle_, value_, lengths_, flow_in_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function tensor_array_split(handle_, value_, lengths_, flow_in_; name=nothing) if tf.in_eager_mode() @@ -4217,8 +4385,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(ctc_loss, [inputs_, labels_indices_, labels_values_, sequence_length_], name=nothing, preprocess_collapse_repeated=nothing, ctc_merge_repeated=nothing, ignore_longer_outputs_than_inputs=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function ctc_loss(inputs_, labels_indices_, labels_values_, sequence_length_; name=nothing, preprocess_collapse_repeated=nothing, ctc_merge_repeated=nothing, ignore_longer_outputs_than_inputs=nothing) if tf.in_eager_mode() @@ -4272,8 +4442,10 @@ begin desc["Tshape"] = tf.data_type(shape_) res = tf.execute(desc) node = tf.TapeNode(quantized_reshape, [tensor_, shape_, input_min_, input_max_], name=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function quantized_reshape(tensor_, shape_, input_min_, input_max_; name=nothing) if tf.in_eager_mode() @@ -4313,8 +4485,10 @@ begin desc["T"] = tf.data_type(y_) res = tf.execute(desc) node = tf.TapeNode(floor_div, [x_, y_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function floor_div(x_, y_; name=nothing) if tf.in_eager_mode() @@ -4377,8 +4551,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(tensor_array_v2, [size_], name=nothing, dtype=nothing, element_shape=nothing, dynamic_size=nothing, clear_after_read=nothing, tensor_array_name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function tensor_array_v2(size_; name=nothing, dtype=nothing, element_shape=nothing, dynamic_size=nothing, clear_after_read=nothing, tensor_array_name=nothing) if tf.in_eager_mode() @@ -4417,8 +4593,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(barrier_close, [handle_], name=nothing, cancel_pending_enqueues=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function barrier_close(handle_; name=nothing, cancel_pending_enqueues=nothing) if tf.in_eager_mode() @@ -4457,8 +4635,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(read_variable_op, [resource_], name=nothing, dtype=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function read_variable_op(resource_; name=nothing, dtype=nothing) if tf.in_eager_mode() @@ -4520,8 +4700,10 @@ begin desc["T2"] = tf.data_type(y_) res = tf.execute(desc) node = tf.TapeNode(quantized_mul, [x_, y_, min_x_, max_x_, min_y_, max_y_], name=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function quantized_mul(x_, y_, min_x_, max_x_, min_y_, max_y_; name=nothing) if tf.in_eager_mode() @@ -4556,8 +4738,10 @@ begin desc["T"] = tf.data_type(features_) res = tf.execute(desc) node = tf.TapeNode(selu, [features_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function selu(features_; name=nothing) if tf.in_eager_mode() @@ -4691,8 +4875,10 @@ begin desc["T"] = tf.data_type(reserve_space_) res = tf.execute(desc) node = tf.TapeNode(cudnn_rnn_backprop_v3, [input_, input_h_, input_c_, params_, sequence_lengths_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_, host_reserved_], name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function cudnn_rnn_backprop_v3(input_, input_h_, input_c_, params_, sequence_lengths_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_, host_reserved_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) if tf.in_eager_mode() @@ -4737,8 +4923,10 @@ begin desc["Tout"] = tf.data_type(values_) res = tf.execute(desc) node = tf.TapeNode(lookup_table_insert, [table_handle_, keys_, values_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function lookup_table_insert(table_handle_, keys_, values_; name=nothing) if tf.in_eager_mode() @@ -4773,8 +4961,10 @@ begin desc["T"] = tf.data_type(x_) res = tf.execute(desc) node = tf.TapeNode(complex_abs, [x_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function complex_abs(x_; name=nothing) if tf.in_eager_mode() @@ -4814,8 +5004,10 @@ begin desc["T"] = tf.data_type(rhs_) res = tf.execute(desc) node = tf.TapeNode(tridiagonal_solve, [diagonals_, rhs_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function tridiagonal_solve(diagonals_, rhs_; name=nothing) if tf.in_eager_mode() @@ -4860,8 +5052,10 @@ begin desc["Tout"] = tf.data_type(values_) res = tf.execute(desc) node = tf.TapeNode(lookup_table_import, [table_handle_, keys_, values_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function lookup_table_import(table_handle_, keys_, values_; name=nothing) if tf.in_eager_mode() @@ -4896,8 +5090,10 @@ begin desc["T"] = tf.data_type(x_) res = tf.execute(desc) node = tf.TapeNode(abs, [x_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function abs(x_; name=nothing) if tf.in_eager_mode() @@ -4986,8 +5182,10 @@ begin desc["T"] = tf.data_type(grad_) res = tf.execute(desc) node = tf.TapeNode(resource_apply_adam, [var_, m_, v_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_], name=nothing, use_locking=nothing, use_nesterov=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function resource_apply_adam(var_, m_, v_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing, use_nesterov=nothing) if tf.in_eager_mode() @@ -5034,8 +5232,10 @@ begin desc["T"] = tf.data_type(values_) res = tf.execute(desc) node = tf.TapeNode(write_histogram_summary, [writer_, step_, tag_, values_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function write_histogram_summary(writer_, step_, tag_, values_; name=nothing) if tf.in_eager_mode() @@ -5072,8 +5272,10 @@ begin tf.add_input(desc, materialized_) res = tf.execute(desc) node = tf.TapeNode(experimental_indexed_dataset_materialize, [dataset_, materialized_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function experimental_indexed_dataset_materialize(dataset_, materialized_; name=nothing) if tf.in_eager_mode() @@ -5138,8 +5340,10 @@ begin desc["T"] = tf.data_type(tensor_) res = tf.execute(desc) node = tf.TapeNode(_host_send, [tensor_], name=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function _host_send(tensor_; name=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing) if tf.in_eager_mode() @@ -5179,8 +5383,10 @@ begin desc["T"] = tf.data_type(y_) res = tf.execute(desc) node = tf.TapeNode(greater, [x_, y_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function greater(x_, y_; name=nothing) if tf.in_eager_mode() @@ -5221,8 +5427,10 @@ begin desc["T"] = tf.data_type(input_) res = tf.execute(desc) node = tf.TapeNode(nccl_broadcast, [input_], name=nothing, shape=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function nccl_broadcast(input_; name=nothing, shape=nothing) if tf.in_eager_mode() @@ -5267,8 +5475,10 @@ begin desc["element_dtype"] = tf.data_type(tensor_) res = tf.execute(desc) node = tf.TapeNode(tensor_list_push_back_batch, [input_handles_, tensor_], name=nothing, element_dtype=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function tensor_list_push_back_batch(input_handles_, tensor_; name=nothing, element_dtype=nothing) if tf.in_eager_mode() @@ -5320,8 +5530,10 @@ begin desc["dtype"] = tf.data_type(updates_) res = tf.execute(desc) node = tf.TapeNode(resource_scatter_min, [resource_, indices_, updates_], name=nothing, dtype=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function resource_scatter_min(resource_, indices_, updates_; name=nothing, dtype=nothing) if tf.in_eager_mode() @@ -5374,8 +5586,10 @@ begin desc["Index"] = tf.data_type(size_) res = tf.execute(desc) node = tf.TapeNode(slice, [input_, begin_, size_], name=nothing, Index=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function slice(input_, begin_, size_; name=nothing, Index=nothing) if tf.in_eager_mode() @@ -5437,8 +5651,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(unicode_decode, [input_], name=nothing, input_encoding=nothing, errors=nothing, replacement_char=nothing, replace_control_characters=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function unicode_decode(input_; name=nothing, input_encoding=nothing, errors=nothing, replacement_char=nothing, replace_control_characters=nothing) if tf.in_eager_mode() @@ -5487,8 +5703,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(take_dataset, [input_dataset_, count_], name=nothing, output_types=nothing, output_shapes=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function take_dataset(input_dataset_, count_; name=nothing, output_types=nothing, output_shapes=nothing) if tf.in_eager_mode() @@ -5551,8 +5769,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(boosted_trees_make_stats_summary, [node_ids_, gradients_, hessians_, bucketized_features_list_], name=nothing, max_splits=nothing, num_buckets=nothing, num_features=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function boosted_trees_make_stats_summary(node_ids_, gradients_, hessians_, bucketized_features_list_; name=nothing, max_splits=nothing, num_buckets=nothing, num_features=nothing) if tf.in_eager_mode() @@ -5620,8 +5840,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(all_candidate_sampler, [true_classes_], name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, seed=nothing, seed2=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function all_candidate_sampler(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, seed=nothing, seed2=nothing) if tf.in_eager_mode() @@ -5701,8 +5923,10 @@ begin desc["T"] = tf.data_type(out_backprop_) res = tf.execute(desc) node = tf.TapeNode(conv2d_backprop_input, [input_sizes_, filter_, out_backprop_], name=nothing, strides=nothing, use_cudnn_on_gpu=nothing, padding=nothing, explicit_paddings=nothing, data_format=nothing, dilations=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function conv2d_backprop_input(input_sizes_, filter_, out_backprop_; name=nothing, strides=nothing, use_cudnn_on_gpu=nothing, padding=nothing, explicit_paddings=nothing, data_format=nothing, dilations=nothing) if tf.in_eager_mode() @@ -5747,8 +5971,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(dataset_to_single_element, [dataset_], name=nothing, output_types=nothing, output_shapes=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function dataset_to_single_element(dataset_; name=nothing, output_types=nothing, output_shapes=nothing) if tf.in_eager_mode() @@ -5797,8 +6023,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(cache_dataset, [input_dataset_, filename_], name=nothing, output_types=nothing, output_shapes=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function cache_dataset(input_dataset_, filename_; name=nothing, output_types=nothing, output_shapes=nothing) if tf.in_eager_mode() @@ -5860,8 +6088,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(fake_quant_with_min_max_vars_gradient, [gradients_, inputs_, min_, max_], name=nothing, num_bits=nothing, narrow_range=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function fake_quant_with_min_max_vars_gradient(gradients_, inputs_, min_, max_; name=nothing, num_bits=nothing, narrow_range=nothing) if tf.in_eager_mode() @@ -5933,8 +6163,10 @@ begin desc["T"] = tf.data_type(filter_) res = tf.execute(desc) node = tf.TapeNode(fused_resize_and_pad_conv2d, [input_, size_, paddings_, filter_], name=nothing, resize_align_corners=nothing, mode=nothing, strides=nothing, padding=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function fused_resize_and_pad_conv2d(input_, size_, paddings_, filter_; name=nothing, resize_align_corners=nothing, mode=nothing, strides=nothing, padding=nothing) if tf.in_eager_mode() @@ -6032,8 +6264,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(batch, [in_tensors_], name=nothing, num_batch_threads=nothing, max_batch_size=nothing, max_enqueued_batches=nothing, batch_timeout_micros=nothing, allowed_batch_sizes=nothing, grad_timeout_micros=nothing, container=nothing, shared_name=nothing, batching_queue=nothing, T=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function batch(in_tensors_; name=nothing, num_batch_threads=nothing, max_batch_size=nothing, max_enqueued_batches=nothing, batch_timeout_micros=nothing, allowed_batch_sizes=nothing, grad_timeout_micros=nothing, container=nothing, shared_name=nothing, batching_queue=nothing, T=nothing) if tf.in_eager_mode() @@ -6086,8 +6320,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(collective_bcast_recv, [], name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, shape=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function collective_bcast_recv(; name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, shape=nothing) if tf.in_eager_mode() @@ -6134,8 +6370,10 @@ begin desc["Tcrops"] = tf.data_type(crops_) res = tf.execute(desc) node = tf.TapeNode(batch_to_space_nd, [input_, block_shape_, crops_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function batch_to_space_nd(input_, block_shape_, crops_; name=nothing) if tf.in_eager_mode() @@ -6168,8 +6406,10 @@ begin tf.add_input(desc, input_) res = tf.execute(desc) node = tf.TapeNode(loop_cond, [input_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function loop_cond(input_; name=nothing) if tf.in_eager_mode() @@ -6216,8 +6456,10 @@ begin desc["T"] = tf.data_type(input_) res = tf.execute(desc) node = tf.TapeNode(depth_to_space, [input_], name=nothing, block_size=nothing, data_format=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function depth_to_space(input_; name=nothing, block_size=nothing, data_format=nothing) if tf.in_eager_mode() @@ -6258,8 +6500,10 @@ begin desc["T"] = tf.data_type(ref_) res = tf.execute(desc) node = tf.TapeNode(destroy_temporary_variable, [ref_], name=nothing, var_name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function destroy_temporary_variable(ref_; name=nothing, var_name=nothing) if tf.in_eager_mode() @@ -6356,8 +6600,10 @@ begin desc["T"] = tf.data_type(params_) res = tf.execute(desc) node = tf.TapeNode(cudnn_rnn, [input_, input_h_, input_c_, params_], name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing, is_training=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function cudnn_rnn(input_, input_h_, input_c_, params_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing, is_training=nothing) if tf.in_eager_mode() @@ -6392,8 +6638,10 @@ begin desc["T"] = tf.data_type(input_) res = tf.execute(desc) node = tf.TapeNode(ref_identity, [input_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function ref_identity(input_; name=nothing) if tf.in_eager_mode() @@ -6463,8 +6711,10 @@ begin desc["T"] = tf.data_type(grad_) res = tf.execute(desc) node = tf.TapeNode(max_pool3d_grad, [orig_input_, orig_output_, grad_], name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function max_pool3d_grad(orig_input_, orig_output_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) if tf.in_eager_mode() @@ -6529,8 +6779,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(load_tpu_embedding_momentum_parameters_grad_accum_debug, [parameters_, momenta_, gradient_accumulators_], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function load_tpu_embedding_momentum_parameters_grad_accum_debug(parameters_, momenta_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) if tf.in_eager_mode() @@ -6589,8 +6841,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(padding_fifo_queue_v2, [], name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function padding_fifo_queue_v2(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) if tf.in_eager_mode() @@ -6653,8 +6907,10 @@ begin desc["T"] = tf.data_type(out_backprop_) res = tf.execute(desc) node = tf.TapeNode(conv3d_backprop_input, [input_, filter_, out_backprop_], name=nothing, strides=nothing, padding=nothing, dilations=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function conv3d_backprop_input(input_, filter_, out_backprop_; name=nothing, strides=nothing, padding=nothing, dilations=nothing) if tf.in_eager_mode() @@ -6689,8 +6945,10 @@ begin desc["T"] = tf.data_type(data_) res = tf.execute(desc) node = tf.TapeNode(ref_exit, [data_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function ref_exit(data_; name=nothing) if tf.in_eager_mode() @@ -6749,8 +7007,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(map_clear, [], name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function map_clear(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) if tf.in_eager_mode() @@ -6787,8 +7047,10 @@ begin tf.add_input(desc, sample_rate_) res = tf.execute(desc) node = tf.TapeNode(encode_wav, [audio_, sample_rate_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function encode_wav(audio_, sample_rate_; name=nothing) if tf.in_eager_mode() @@ -6831,8 +7093,10 @@ begin desc["T"] = tf.data_type(tensor_) res = tf.execute(desc) node = tf.TapeNode(tensor_summary_v2, [tag_, tensor_, serialized_summary_metadata_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function tensor_summary_v2(tag_, tensor_, serialized_summary_metadata_; name=nothing) if tf.in_eager_mode() @@ -6881,8 +7145,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(queue_dequeue_up_to, [handle_, n_], name=nothing, component_types=nothing, timeout_ms=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function queue_dequeue_up_to(handle_, n_; name=nothing, component_types=nothing, timeout_ms=nothing) if tf.in_eager_mode() @@ -6928,8 +7194,10 @@ begin desc["Tindex"] = tf.data_type(num_upper_) res = tf.execute(desc) node = tf.TapeNode(matrix_band_part, [input_, num_lower_, num_upper_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function matrix_band_part(input_, num_lower_, num_upper_; name=nothing) if tf.in_eager_mode() @@ -6976,8 +7244,10 @@ begin desc["T"] = tf.data_type(input_) res = tf.execute(desc) node = tf.TapeNode(copy, [input_], name=nothing, tensor_name=nothing, debug_ops_spec=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function copy(input_; name=nothing, tensor_name=nothing, debug_ops_spec=nothing) if tf.in_eager_mode() @@ -7029,8 +7299,10 @@ begin desc["T"] = tf.data_type(input_) res = tf.execute(desc) node = tf.TapeNode(shape_n, [input_], name=nothing, N=nothing, out_type=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function shape_n(input_; name=nothing, N=nothing, out_type=nothing) if tf.in_eager_mode() @@ -7119,8 +7391,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(experimental_parse_example_dataset, [input_dataset_, num_parallel_calls_, dense_defaults_], name=nothing, sparse_keys=nothing, dense_keys=nothing, sparse_types=nothing, Tdense=nothing, dense_shapes=nothing, output_types=nothing, output_shapes=nothing, sloppy=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function experimental_parse_example_dataset(input_dataset_, num_parallel_calls_, dense_defaults_; name=nothing, sparse_keys=nothing, dense_keys=nothing, sparse_types=nothing, Tdense=nothing, dense_shapes=nothing, output_types=nothing, output_shapes=nothing, sloppy=nothing) if tf.in_eager_mode() @@ -7165,8 +7439,10 @@ begin desc["T"] = tf.data_type(values_) res = tf.execute(desc) node = tf.TapeNode(concat, [concat_dim_, values_], name=nothing, N=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function concat(concat_dim_, values_; name=nothing, N=nothing) if tf.in_eager_mode() @@ -7213,8 +7489,10 @@ begin desc["T"] = tf.data_type(x_) res = tf.execute(desc) node = tf.TapeNode(data_format_dim_map, [x_], name=nothing, src_format=nothing, dst_format=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function data_format_dim_map(x_; name=nothing, src_format=nothing, dst_format=nothing) if tf.in_eager_mode() @@ -7255,8 +7533,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(identity_reader, [], name=nothing, container=nothing, shared_name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function identity_reader(; name=nothing, container=nothing, shared_name=nothing) if tf.in_eager_mode() @@ -7291,8 +7571,10 @@ begin desc["T"] = tf.data_type(features_) res = tf.execute(desc) node = tf.TapeNode(softplus, [features_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function softplus(features_; name=nothing) if tf.in_eager_mode() @@ -7363,8 +7645,10 @@ begin desc["Tindices"] = tf.data_type(indices_) res = tf.execute(desc) node = tf.TapeNode(resource_sparse_apply_proximal_adagrad, [var_, accum_, lr_, l1_, l2_, grad_, indices_], name=nothing, use_locking=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function resource_sparse_apply_proximal_adagrad(var_, accum_, lr_, l1_, l2_, grad_, indices_; name=nothing, use_locking=nothing) if tf.in_eager_mode() @@ -7490,8 +7774,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(parse_single_sequence_example, [serialized_, feature_list_dense_missing_assumed_empty_, context_sparse_keys_, context_dense_keys_, feature_list_sparse_keys_, feature_list_dense_keys_, context_dense_defaults_, debug_name_], name=nothing, Ncontext_sparse=nothing, Ncontext_dense=nothing, Nfeature_list_sparse=nothing, Nfeature_list_dense=nothing, context_sparse_types=nothing, Tcontext_dense=nothing, feature_list_dense_types=nothing, context_dense_shapes=nothing, feature_list_sparse_types=nothing, feature_list_dense_shapes=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function parse_single_sequence_example(serialized_, feature_list_dense_missing_assumed_empty_, context_sparse_keys_, context_dense_keys_, feature_list_sparse_keys_, feature_list_dense_keys_, context_dense_defaults_, debug_name_; name=nothing, Ncontext_sparse=nothing, Ncontext_dense=nothing, Nfeature_list_sparse=nothing, Nfeature_list_dense=nothing, context_sparse_types=nothing, Tcontext_dense=nothing, feature_list_dense_types=nothing, context_dense_shapes=nothing, feature_list_sparse_types=nothing, feature_list_dense_shapes=nothing) if tf.in_eager_mode() @@ -7526,8 +7812,10 @@ begin desc["T"] = tf.data_type(diagonal_) res = tf.execute(desc) node = tf.TapeNode(matrix_diag, [diagonal_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function matrix_diag(diagonal_; name=nothing) if tf.in_eager_mode() @@ -7557,8 +7845,10 @@ begin desc = tf.EagerOp("Fact") res = tf.execute(desc) node = tf.TapeNode(fact, [], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function fact(; name=nothing) if tf.in_eager_mode() @@ -7611,8 +7901,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(shard_dataset, [input_dataset_, num_shards_, index_], name=nothing, output_types=nothing, output_shapes=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function shard_dataset(input_dataset_, num_shards_, index_; name=nothing, output_types=nothing, output_shapes=nothing) if tf.in_eager_mode() @@ -7681,8 +7973,10 @@ begin desc["T"] = tf.data_type(grad_) res = tf.execute(desc) node = tf.TapeNode(max_pool_grad_grad, [orig_input_, orig_output_, grad_], name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function max_pool_grad_grad(orig_input_, orig_output_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) if tf.in_eager_mode() @@ -7727,8 +8021,10 @@ begin desc["T"] = tf.data_type(original_image_) res = tf.execute(desc) node = tf.TapeNode(resize_bilinear_grad, [grads_, original_image_], name=nothing, align_corners=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function resize_bilinear_grad(grads_, original_image_; name=nothing, align_corners=nothing) if tf.in_eager_mode() @@ -7776,8 +8072,10 @@ begin desc["Tidx"] = tf.data_type(crops_) res = tf.execute(desc) node = tf.TapeNode(batch_to_space, [input_, crops_], name=nothing, block_size=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function batch_to_space(input_, crops_; name=nothing, block_size=nothing) if tf.in_eager_mode() @@ -7816,8 +8114,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(optional_from_value, [components_], name=nothing, Toutput_types=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function optional_from_value(components_; name=nothing, Toutput_types=nothing) if tf.in_eager_mode() @@ -7857,8 +8157,10 @@ begin desc["T"] = tf.data_type(y_) res = tf.execute(desc) node = tf.TapeNode(xlogy, [x_, y_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function xlogy(x_, y_; name=nothing) if tf.in_eager_mode() @@ -7898,8 +8200,10 @@ begin desc["T"] = tf.data_type(b_) res = tf.execute(desc) node = tf.TapeNode(cross, [a_, b_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function cross(a_, b_; name=nothing) if tf.in_eager_mode() @@ -7939,8 +8243,10 @@ begin desc["T"] = tf.data_type(y_) res = tf.execute(desc) node = tf.TapeNode(bitwise_and, [x_, y_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function bitwise_and(x_, y_; name=nothing) if tf.in_eager_mode() @@ -7981,8 +8287,10 @@ begin desc["Tidx"] = tf.data_type(shape_) res = tf.execute(desc) node = tf.TapeNode(broadcast_to, [input_, shape_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function broadcast_to(input_, shape_; name=nothing) if tf.in_eager_mode() @@ -8022,8 +8330,10 @@ begin desc["T"] = tf.data_type(outputs_) res = tf.execute(desc) node = tf.TapeNode(elu_grad, [gradients_, outputs_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function elu_grad(gradients_, outputs_; name=nothing) if tf.in_eager_mode() @@ -8149,8 +8459,10 @@ begin desc["T"] = tf.data_type(reserve_space_) res = tf.execute(desc) node = tf.TapeNode(cudnn_rnn_backprop, [input_, input_h_, input_c_, params_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_], name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function cudnn_rnn_backprop(input_, input_h_, input_c_, params_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) if tf.in_eager_mode() @@ -8189,8 +8501,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(string_to_hash_bucket_fast, [input_], name=nothing, num_buckets=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function string_to_hash_bucket_fast(input_; name=nothing, num_buckets=nothing) if tf.in_eager_mode() @@ -8249,8 +8563,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(mutable_hash_table, [], name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function mutable_hash_table(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing) if tf.in_eager_mode() @@ -8285,8 +8601,10 @@ begin desc["T"] = tf.data_type(features_) res = tf.execute(desc) node = tf.TapeNode(relu, [features_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function relu(features_; name=nothing) if tf.in_eager_mode() @@ -8331,8 +8649,10 @@ begin desc["T"] = tf.data_type(input_) res = tf.execute(desc) node = tf.TapeNode(nth_element, [input_, n_], name=nothing, reverse=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function nth_element(input_, n_; name=nothing, reverse=nothing) if tf.in_eager_mode() @@ -8367,8 +8687,10 @@ begin desc["T"] = tf.data_type(features_) res = tf.execute(desc) node = tf.TapeNode(softsign, [features_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function softsign(features_; name=nothing) if tf.in_eager_mode() @@ -8451,8 +8773,10 @@ begin desc["key_dtype"] = tf.data_type(empty_key_) res = tf.execute(desc) node = tf.TapeNode(mutable_dense_hash_table, [empty_key_], name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing, initial_num_buckets=nothing, max_load_factor=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function mutable_dense_hash_table(empty_key_; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing, initial_num_buckets=nothing, max_load_factor=nothing) if tf.in_eager_mode() @@ -8482,8 +8806,10 @@ begin desc = tf.EagerOp("_ShutdownDistributedTPU") res = tf.execute(desc) node = tf.TapeNode(_shutdown_distributed_tpu, [], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function _shutdown_distributed_tpu(; name=nothing) if tf.in_eager_mode() @@ -8523,8 +8849,10 @@ begin desc["T"] = tf.data_type(x_) res = tf.execute(desc) node = tf.TapeNode(polygamma, [a_, x_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function polygamma(a_, x_; name=nothing) if tf.in_eager_mode() @@ -8571,8 +8899,10 @@ begin desc["T"] = tf.data_type(input_) res = tf.execute(desc) node = tf.TapeNode(nccl_reduce, [input_], name=nothing, reduction=nothing, num_devices=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function nccl_reduce(input_; name=nothing, reduction=nothing, num_devices=nothing) if tf.in_eager_mode() @@ -8620,8 +8950,10 @@ begin desc["Tidx"] = tf.data_type(dimension_) res = tf.execute(desc) node = tf.TapeNode(arg_max, [input_, dimension_], name=nothing, output_type=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function arg_max(input_, dimension_; name=nothing, output_type=nothing) if tf.in_eager_mode() @@ -8661,8 +8993,10 @@ begin desc["T"] = tf.data_type(diagonal_) res = tf.execute(desc) node = tf.TapeNode(matrix_set_diag, [input_, diagonal_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function matrix_set_diag(input_, diagonal_; name=nothing) if tf.in_eager_mode() @@ -8709,8 +9043,10 @@ begin desc["Tpaddings"] = tf.data_type(paddings_) res = tf.execute(desc) node = tf.TapeNode(space_to_batch_nd, [input_, block_shape_, paddings_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function space_to_batch_nd(input_, block_shape_, paddings_; name=nothing) if tf.in_eager_mode() @@ -8756,8 +9092,10 @@ begin tf.add_input(desc, new_shape_) res = tf.execute(desc) node = tf.TapeNode(sparse_reshape, [input_indices_, input_shape_, new_shape_], name=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function sparse_reshape(input_indices_, input_shape_, new_shape_; name=nothing) if tf.in_eager_mode() @@ -8806,8 +9144,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(optimize_dataset, [input_dataset_, optimizations_], name=nothing, output_types=nothing, output_shapes=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function optimize_dataset(input_dataset_, optimizations_; name=nothing, output_types=nothing, output_shapes=nothing) if tf.in_eager_mode() @@ -8855,8 +9195,10 @@ begin desc["Tidx"] = tf.data_type(axis_) res = tf.execute(desc) node = tf.TapeNode(concat_v2, [values_, axis_], name=nothing, N=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function concat_v2(values_, axis_; name=nothing, N=nothing) if tf.in_eager_mode() @@ -8931,8 +9273,10 @@ begin desc["Tindices"] = tf.data_type(indices_) res = tf.execute(desc) node = tf.TapeNode(resource_sparse_apply_adadelta, [var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_, indices_], name=nothing, use_locking=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function resource_sparse_apply_adadelta(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) if tf.in_eager_mode() @@ -8973,8 +9317,10 @@ begin desc["Tmultiples"] = tf.data_type(multiples_) res = tf.execute(desc) node = tf.TapeNode(tile, [input_, multiples_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function tile(input_, multiples_; name=nothing) if tf.in_eager_mode() @@ -9015,8 +9361,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(mutex_v2, [], name=nothing, container=nothing, shared_name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function mutex_v2(; name=nothing, container=nothing, shared_name=nothing) if tf.in_eager_mode() @@ -9065,8 +9413,10 @@ begin desc["T"] = tf.data_type(sparse_values_) res = tf.execute(desc) node = tf.TapeNode(serialize_many_sparse, [sparse_indices_, sparse_values_, sparse_shape_], name=nothing, out_type=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function serialize_many_sparse(sparse_indices_, sparse_values_, sparse_shape_; name=nothing, out_type=nothing) if tf.in_eager_mode() @@ -9115,8 +9465,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(tpu_embedding_activations, [embedding_variable_, sliced_activations_], name=nothing, table_id=nothing, lookup_id=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function tpu_embedding_activations(embedding_variable_, sliced_activations_; name=nothing, table_id=nothing, lookup_id=nothing) if tf.in_eager_mode() @@ -9166,8 +9518,10 @@ begin desc["T"] = tf.data_type(rhs_) res = tf.execute(desc) node = tf.TapeNode(batch_matrix_solve_ls, [matrix_, rhs_, l2_regularizer_], name=nothing, fast=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function batch_matrix_solve_ls(matrix_, rhs_, l2_regularizer_; name=nothing, fast=nothing) if tf.in_eager_mode() @@ -9207,8 +9561,10 @@ begin desc["T"] = tf.data_type(y_) res = tf.execute(desc) node = tf.TapeNode(not_equal, [x_, y_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function not_equal(x_, y_; name=nothing) if tf.in_eager_mode() @@ -9243,8 +9599,10 @@ begin desc["T"] = tf.data_type(x_) res = tf.execute(desc) node = tf.TapeNode(lgamma, [x_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function lgamma(x_; name=nothing) if tf.in_eager_mode() @@ -9327,8 +9685,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(tpu_replicate_metadata, [], name=nothing, num_replicas=nothing, num_cores_per_replica=nothing, topology=nothing, use_tpu=nothing, device_assignment=nothing, computation_shape=nothing, host_compute_core=nothing, padding_map=nothing, step_marker_location=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function tpu_replicate_metadata(; name=nothing, num_replicas=nothing, num_cores_per_replica=nothing, topology=nothing, use_tpu=nothing, device_assignment=nothing, computation_shape=nothing, host_compute_core=nothing, padding_map=nothing, step_marker_location=nothing) if tf.in_eager_mode() @@ -9387,8 +9747,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(experimental_thread_pool_handle, [], name=nothing, num_threads=nothing, max_intra_op_parallelism=nothing, display_name=nothing, container=nothing, shared_name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function experimental_thread_pool_handle(; name=nothing, num_threads=nothing, max_intra_op_parallelism=nothing, display_name=nothing, container=nothing, shared_name=nothing) if tf.in_eager_mode() @@ -9423,8 +9785,10 @@ begin desc["T"] = tf.data_type(input_) res = tf.execute(desc) node = tf.TapeNode(self_adjoint_eig, [input_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function self_adjoint_eig(input_; name=nothing) if tf.in_eager_mode() @@ -9468,8 +9832,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(boosted_trees_quantile_stream_resource_get_bucket_boundaries, [quantile_stream_resource_handle_], name=nothing, num_features=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function boosted_trees_quantile_stream_resource_get_bucket_boundaries(quantile_stream_resource_handle_; name=nothing, num_features=nothing) if tf.in_eager_mode() @@ -9517,8 +9883,10 @@ begin desc["T"] = tf.data_type(dense_) res = tf.execute(desc) node = tf.TapeNode(sparse_dense_cwise_div, [sp_indices_, sp_values_, sp_shape_, dense_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function sparse_dense_cwise_div(sp_indices_, sp_values_, sp_shape_, dense_; name=nothing) if tf.in_eager_mode() @@ -9553,8 +9921,10 @@ begin desc["T"] = tf.data_type(x_) res = tf.execute(desc) node = tf.TapeNode(acos, [x_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function acos(x_; name=nothing) if tf.in_eager_mode() @@ -9600,8 +9970,10 @@ begin desc["Tidx"] = tf.data_type(reduction_indices_) res = tf.execute(desc) node = tf.TapeNode(all, [input_, reduction_indices_], name=nothing, keep_dims=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function all(input_, reduction_indices_; name=nothing, keep_dims=nothing) if tf.in_eager_mode() @@ -9641,8 +10013,10 @@ begin desc["T"] = tf.data_type(threshold_) res = tf.execute(desc) node = tf.TapeNode(compare_and_bitpack, [input_, threshold_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function compare_and_bitpack(input_, threshold_; name=nothing) if tf.in_eager_mode() @@ -9695,8 +10069,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(var_handle_op, [], name=nothing, container=nothing, shared_name=nothing, dtype=nothing, shape=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function var_handle_op(; name=nothing, container=nothing, shared_name=nothing, dtype=nothing, shape=nothing) if tf.in_eager_mode() @@ -9741,8 +10117,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(experimental_unique_dataset, [input_dataset_], name=nothing, output_types=nothing, output_shapes=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function experimental_unique_dataset(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) if tf.in_eager_mode() @@ -9836,8 +10214,10 @@ begin desc["Tfilter"] = tf.data_type(filter_) res = tf.execute(desc) node = tf.TapeNode(quantized_conv2d_with_bias_sum_and_relu, [input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_, summand_], name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function quantized_conv2d_with_bias_sum_and_relu(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_, summand_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) if tf.in_eager_mode() @@ -9888,8 +10268,10 @@ begin desc["T"] = tf.data_type(y_) res = tf.execute(desc) node = tf.TapeNode(list_diff, [x_, y_], name=nothing, out_idx=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function list_diff(x_, y_; name=nothing, out_idx=nothing) if tf.in_eager_mode() @@ -9938,8 +10320,10 @@ begin tf.add_input(desc, filename_suffix_) res = tf.execute(desc) node = tf.TapeNode(create_summary_file_writer, [writer_, logdir_, max_queue_, flush_millis_, filename_suffix_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function create_summary_file_writer(writer_, logdir_, max_queue_, flush_millis_, filename_suffix_; name=nothing) if tf.in_eager_mode() @@ -9999,8 +10383,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(generate_vocab_remapping, [new_vocab_file_, old_vocab_file_], name=nothing, new_vocab_offset=nothing, num_new_vocab=nothing, old_vocab_size=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function generate_vocab_remapping(new_vocab_file_, old_vocab_file_; name=nothing, new_vocab_offset=nothing, num_new_vocab=nothing, old_vocab_size=nothing) if tf.in_eager_mode() @@ -10041,8 +10427,10 @@ begin desc["T"] = tf.data_type(input_) res = tf.execute(desc) node = tf.TapeNode(batch_matrix_inverse, [input_], name=nothing, adjoint=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function batch_matrix_inverse(input_; name=nothing, adjoint=nothing) if tf.in_eager_mode() @@ -10072,8 +10460,10 @@ begin desc = tf.EagerOp("ControlTrigger") res = tf.execute(desc) node = tf.TapeNode(control_trigger, [], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function control_trigger(; name=nothing) if tf.in_eager_mode() @@ -10103,8 +10493,10 @@ begin desc = tf.EagerOp("TPUOrdinalSelector") res = tf.execute(desc) node = tf.TapeNode(tpu_ordinal_selector, [], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function tpu_ordinal_selector(; name=nothing) if tf.in_eager_mode() @@ -10139,8 +10531,10 @@ begin desc["T"] = tf.data_type(input_) res = tf.execute(desc) node = tf.TapeNode(stop_gradient, [input_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function stop_gradient(input_; name=nothing) if tf.in_eager_mode() @@ -10191,8 +10585,10 @@ begin desc["T"] = tf.data_type(value_) res = tf.execute(desc) node = tf.TapeNode(split, [split_dim_, value_], name=nothing, num_split=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function split(split_dim_, value_; name=nothing, num_split=nothing) if tf.in_eager_mode() @@ -10250,8 +10646,10 @@ begin desc["T"] = tf.data_type(value_) res = tf.execute(desc) node = tf.TapeNode(unpack, [value_], name=nothing, num=nothing, axis=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function unpack(value_; name=nothing, num=nothing, axis=nothing) if tf.in_eager_mode() @@ -10303,8 +10701,10 @@ begin desc["dtype"] = tf.data_type(updates_) res = tf.execute(desc) node = tf.TapeNode(resource_scatter_max, [resource_, indices_, updates_], name=nothing, dtype=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function resource_scatter_max(resource_, indices_, updates_; name=nothing, dtype=nothing) if tf.in_eager_mode() @@ -10351,8 +10751,10 @@ begin desc["T"] = tf.data_type(value_) res = tf.execute(desc) node = tf.TapeNode(tensor_array_write, [handle_, index_, value_, flow_in_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function tensor_array_write(handle_, index_, value_, flow_in_; name=nothing) if tf.in_eager_mode() @@ -10399,8 +10801,10 @@ begin desc["T"] = tf.data_type(value_) res = tf.execute(desc) node = tf.TapeNode(fill, [dims_, value_], name=nothing, index_type=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function fill(dims_, value_; name=nothing, index_type=nothing) if tf.in_eager_mode() @@ -10500,8 +10904,10 @@ begin desc["Tbias"] = tf.data_type(bias_) res = tf.execute(desc) node = tf.TapeNode(quantized_conv2d_with_bias_and_requantize, [input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_], name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function quantized_conv2d_with_bias_and_requantize(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) if tf.in_eager_mode() @@ -10536,8 +10942,10 @@ begin desc["T"] = tf.data_type(logits_) res = tf.execute(desc) node = tf.TapeNode(softmax, [logits_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function softmax(logits_; name=nothing) if tf.in_eager_mode() @@ -10582,8 +10990,10 @@ begin desc["T"] = tf.data_type(images_) res = tf.execute(desc) node = tf.TapeNode(resize_bicubic, [images_, size_], name=nothing, align_corners=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function resize_bicubic(images_, size_; name=nothing, align_corners=nothing) if tf.in_eager_mode() @@ -10624,8 +11034,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(infeed_dequeue_tuple, [], name=nothing, dtypes=nothing, shapes=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function infeed_dequeue_tuple(; name=nothing, dtypes=nothing, shapes=nothing) if tf.in_eager_mode() @@ -10684,8 +11096,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(multi_device_iterator, [], name=nothing, devices=nothing, shared_name=nothing, container=nothing, output_types=nothing, output_shapes=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function multi_device_iterator(; name=nothing, devices=nothing, shared_name=nothing, container=nothing, output_types=nothing, output_shapes=nothing) if tf.in_eager_mode() @@ -10752,8 +11166,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(decode_csv, [records_, record_defaults_], name=nothing, OUT_TYPE=nothing, field_delim=nothing, use_quote_delim=nothing, na_value=nothing, select_cols=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function decode_csv(records_, record_defaults_; name=nothing, OUT_TYPE=nothing, field_delim=nothing, use_quote_delim=nothing, na_value=nothing, select_cols=nothing) if tf.in_eager_mode() @@ -10798,8 +11214,10 @@ begin desc["Tout"] = tf.data_type(default_value_) res = tf.execute(desc) node = tf.TapeNode(lookup_table_find, [table_handle_, keys_, default_value_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function lookup_table_find(table_handle_, keys_, default_value_; name=nothing) if tf.in_eager_mode() @@ -10860,8 +11278,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(shuffle_and_repeat_dataset, [input_dataset_, buffer_size_, seed_, seed2_, count_], name=nothing, output_types=nothing, output_shapes=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function shuffle_and_repeat_dataset(input_dataset_, buffer_size_, seed_, seed2_, count_; name=nothing, output_types=nothing, output_shapes=nothing) if tf.in_eager_mode() @@ -10915,8 +11335,10 @@ begin desc["T"] = tf.data_type(input_) res = tf.execute(desc) node = tf.TapeNode(requantization_range_per_channel, [input_, input_min_, input_max_], name=nothing, clip_value_max=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function requantization_range_per_channel(input_, input_min_, input_max_; name=nothing, clip_value_max=nothing) if tf.in_eager_mode() @@ -10961,8 +11383,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(experimental_unbatch_dataset, [input_dataset_], name=nothing, output_types=nothing, output_shapes=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function experimental_unbatch_dataset(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) if tf.in_eager_mode() @@ -11025,8 +11449,10 @@ begin desc["T"] = tf.data_type(grad_) res = tf.execute(desc) node = tf.TapeNode(avg_pool3d_grad, [orig_input_shape_, grad_], name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function avg_pool3d_grad(orig_input_shape_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) if tf.in_eager_mode() @@ -11073,8 +11499,10 @@ begin desc["dtype"] = tf.data_type(input_) res = tf.execute(desc) node = tf.TapeNode(placeholder_with_default, [input_], name=nothing, dtype=nothing, shape=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function placeholder_with_default(input_; name=nothing, dtype=nothing, shape=nothing) if tf.in_eager_mode() @@ -11119,8 +11547,10 @@ begin desc["Tval"] = tf.data_type(values_) res = tf.execute(desc) node = tf.TapeNode(initialize_table_v2, [table_handle_, keys_, values_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function initialize_table_v2(table_handle_, keys_, values_; name=nothing) if tf.in_eager_mode() @@ -11169,8 +11599,10 @@ begin desc["T"] = tf.data_type(set_values_) res = tf.execute(desc) node = tf.TapeNode(set_size, [set_indices_, set_values_, set_shape_], name=nothing, validate_indices=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function set_size(set_indices_, set_values_, set_shape_; name=nothing, validate_indices=nothing) if tf.in_eager_mode() @@ -11219,8 +11651,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(assert, [condition_, data_], name=nothing, T=nothing, summarize=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function assert(condition_, data_; name=nothing, T=nothing, summarize=nothing) if tf.in_eager_mode() @@ -11283,8 +11717,10 @@ begin desc["T"] = tf.data_type(scores_) res = tf.execute(desc) node = tf.TapeNode(non_max_suppression_v4, [boxes_, scores_, max_output_size_, iou_threshold_, score_threshold_], name=nothing, pad_to_max_output_size=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function non_max_suppression_v4(boxes_, scores_, max_output_size_, iou_threshold_, score_threshold_; name=nothing, pad_to_max_output_size=nothing) if tf.in_eager_mode() @@ -11368,8 +11804,10 @@ begin desc["T"] = tf.data_type(image_size_) res = tf.execute(desc) node = tf.TapeNode(sample_distorted_bounding_box_v2, [image_size_, bounding_boxes_, min_object_covered_], name=nothing, seed=nothing, seed2=nothing, aspect_ratio_range=nothing, area_range=nothing, max_attempts=nothing, use_image_if_no_bounding_boxes=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function sample_distorted_bounding_box_v2(image_size_, bounding_boxes_, min_object_covered_; name=nothing, seed=nothing, seed2=nothing, aspect_ratio_range=nothing, area_range=nothing, max_attempts=nothing, use_image_if_no_bounding_boxes=nothing) if tf.in_eager_mode() @@ -11430,8 +11868,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(initialize_table_from_text_file, [table_handle_, filename_], name=nothing, key_index=nothing, value_index=nothing, vocab_size=nothing, delimiter=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function initialize_table_from_text_file(table_handle_, filename_; name=nothing, key_index=nothing, value_index=nothing, vocab_size=nothing, delimiter=nothing) if tf.in_eager_mode() @@ -11464,8 +11904,10 @@ begin tf.add_input(desc, table_handle_) res = tf.execute(desc) node = tf.TapeNode(lookup_table_size, [table_handle_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function lookup_table_size(table_handle_; name=nothing) if tf.in_eager_mode() @@ -11547,8 +11989,10 @@ begin desc["T"] = tf.data_type(l2_) res = tf.execute(desc) node = tf.TapeNode(sparse_apply_adagrad_da, [var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, indices_, lr_, l1_, l2_, global_step_], name=nothing, use_locking=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function sparse_apply_adagrad_da(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, indices_, lr_, l1_, l2_, global_step_; name=nothing, use_locking=nothing) if tf.in_eager_mode() @@ -11593,8 +12037,10 @@ begin desc["T"] = tf.data_type(s1_) res = tf.execute(desc) node = tf.TapeNode(broadcast_gradient_args, [s0_, s1_], name=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function broadcast_gradient_args(s0_, s1_; name=nothing) if tf.in_eager_mode() @@ -11635,8 +12081,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(summary_writer, [], name=nothing, shared_name=nothing, container=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function summary_writer(; name=nothing, shared_name=nothing, container=nothing) if tf.in_eager_mode() @@ -11682,8 +12130,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(recv_tpu_embedding_activations, [], name=nothing, num_outputs=nothing, config=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function recv_tpu_embedding_activations(; name=nothing, num_outputs=nothing, config=nothing) if tf.in_eager_mode() @@ -11734,8 +12184,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(_while, [input_], name=nothing, T=nothing, cond=nothing, body=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function _while(input_; name=nothing, T=nothing, cond=nothing, body=nothing) if tf.in_eager_mode() @@ -11780,8 +12232,10 @@ begin desc["Tval"] = tf.data_type(values_) res = tf.execute(desc) node = tf.TapeNode(initialize_table, [table_handle_, keys_, values_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function initialize_table(table_handle_, keys_, values_; name=nothing) if tf.in_eager_mode() @@ -11858,8 +12312,10 @@ begin desc["T"] = tf.data_type(input_) res = tf.execute(desc) node = tf.TapeNode(debug_numeric_summary, [input_], name=nothing, device_name=nothing, tensor_name=nothing, debug_urls=nothing, lower_bound=nothing, upper_bound=nothing, mute_if_healthy=nothing, gated_grpc=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function debug_numeric_summary(input_; name=nothing, device_name=nothing, tensor_name=nothing, debug_urls=nothing, lower_bound=nothing, upper_bound=nothing, mute_if_healthy=nothing, gated_grpc=nothing) if tf.in_eager_mode() @@ -11917,8 +12373,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(retrieve_tpu_embedding_adagrad_parameters_grad_accum_debug, [], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function retrieve_tpu_embedding_adagrad_parameters_grad_accum_debug(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) if tf.in_eager_mode() @@ -11953,8 +12411,10 @@ begin desc["T"] = tf.data_type(x_) res = tf.execute(desc) node = tf.TapeNode(tanh, [x_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function tanh(x_; name=nothing) if tf.in_eager_mode() @@ -12005,8 +12465,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(symbolic_gradient, [input_], name=nothing, Tin=nothing, Tout=nothing, f=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function symbolic_gradient(input_; name=nothing, Tin=nothing, Tout=nothing, f=nothing) if tf.in_eager_mode() @@ -12083,8 +12545,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(boosted_trees_update_ensemble, [tree_ensemble_handle_, feature_ids_, node_ids_, gains_, thresholds_, left_node_contribs_, right_node_contribs_, max_depth_, learning_rate_], name=nothing, pruning_mode=nothing, num_features=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function boosted_trees_update_ensemble(tree_ensemble_handle_, feature_ids_, node_ids_, gains_, thresholds_, left_node_contribs_, right_node_contribs_, max_depth_, learning_rate_; name=nothing, pruning_mode=nothing, num_features=nothing) if tf.in_eager_mode() @@ -12151,8 +12615,10 @@ begin desc["T"] = tf.data_type(momentum_) res = tf.execute(desc) node = tf.TapeNode(apply_momentum, [var_, accum_, lr_, grad_, momentum_], name=nothing, use_locking=nothing, use_nesterov=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function apply_momentum(var_, accum_, lr_, grad_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) if tf.in_eager_mode() @@ -12194,8 +12660,10 @@ begin tf.add_input(desc, queue_handle_) res = tf.execute(desc) node = tf.TapeNode(reader_read, [reader_handle_, queue_handle_], name=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function reader_read(reader_handle_, queue_handle_; name=nothing) if tf.in_eager_mode() @@ -12240,8 +12708,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(_wait_for_distributed_tpu, [inputs_], name=nothing, startup_timeout_sec=nothing, N=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function _wait_for_distributed_tpu(inputs_; name=nothing, startup_timeout_sec=nothing, N=nothing) if tf.in_eager_mode() @@ -12274,8 +12744,10 @@ begin tf.add_input(desc, mutex_) res = tf.execute(desc) node = tf.TapeNode(mutex_lock, [mutex_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function mutex_lock(mutex_; name=nothing) if tf.in_eager_mode() @@ -12312,8 +12784,10 @@ begin tf.add_input(desc, new_global_step_) res = tf.execute(desc) node = tf.TapeNode(accumulator_set_global_step, [handle_, new_global_step_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function accumulator_set_global_step(handle_, new_global_step_; name=nothing) if tf.in_eager_mode() @@ -12375,8 +12849,10 @@ begin desc["T2"] = tf.data_type(y_) res = tf.execute(desc) node = tf.TapeNode(quantized_add, [x_, y_, min_x_, max_x_, min_y_, max_y_], name=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function quantized_add(x_, y_, min_x_, max_x_, min_y_, max_y_; name=nothing) if tf.in_eager_mode() @@ -12417,8 +12893,10 @@ begin desc["T"] = tf.data_type(input_) res = tf.execute(desc) node = tf.TapeNode(squeeze, [input_], name=nothing, squeeze_dims=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function squeeze(input_; name=nothing, squeeze_dims=nothing) if tf.in_eager_mode() @@ -12451,8 +12929,10 @@ begin tf.add_input(desc, patterns_) res = tf.execute(desc) node = tf.TapeNode(experimental_matching_files_dataset, [patterns_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function experimental_matching_files_dataset(patterns_; name=nothing) if tf.in_eager_mode() @@ -12493,8 +12973,10 @@ begin tf.add_input(desc, compression_type_) res = tf.execute(desc) node = tf.TapeNode(experimental_dataset_to_tf_record, [input_dataset_, filename_, compression_type_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function experimental_dataset_to_tf_record(input_dataset_, filename_, compression_type_; name=nothing) if tf.in_eager_mode() @@ -12551,8 +13033,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(load_tpu_embedding_stochastic_gradient_descent_parameters, [parameters_], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function load_tpu_embedding_stochastic_gradient_descent_parameters(parameters_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) if tf.in_eager_mode() @@ -12582,8 +13066,10 @@ begin desc = tf.EagerOp("NoOp") res = tf.execute(desc) node = tf.TapeNode(no_op, [], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function no_op(; name=nothing) if tf.in_eager_mode() @@ -12634,8 +13120,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(zip_dataset, [input_datasets_], name=nothing, output_types=nothing, output_shapes=nothing, N=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function zip_dataset(input_datasets_; name=nothing, output_types=nothing, output_shapes=nothing, N=nothing) if tf.in_eager_mode() @@ -12676,8 +13164,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(identity_reader_v2, [], name=nothing, container=nothing, shared_name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function identity_reader_v2(; name=nothing, container=nothing, shared_name=nothing) if tf.in_eager_mode() @@ -12718,8 +13208,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(lmdb_reader, [], name=nothing, container=nothing, shared_name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function lmdb_reader(; name=nothing, container=nothing, shared_name=nothing) if tf.in_eager_mode() @@ -12772,8 +13264,10 @@ begin desc["T"] = tf.data_type(input_) res = tf.execute(desc) node = tf.TapeNode(nccl_all_reduce, [input_], name=nothing, reduction=nothing, num_devices=nothing, shared_name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function nccl_all_reduce(input_; name=nothing, reduction=nothing, num_devices=nothing, shared_name=nothing) if tf.in_eager_mode() @@ -12814,8 +13308,10 @@ begin tf.add_input(desc, buffer_size_) res = tf.execute(desc) node = tf.TapeNode(text_line_dataset, [filenames_, compression_type_, buffer_size_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function text_line_dataset(filenames_, compression_type_, buffer_size_; name=nothing) if tf.in_eager_mode() @@ -12866,8 +13362,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(sdca_shrink_l1, [weights_], name=nothing, num_features=nothing, l1=nothing, l2=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function sdca_shrink_l1(weights_; name=nothing, num_features=nothing, l1=nothing, l2=nothing) if tf.in_eager_mode() @@ -12914,8 +13412,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(tf_record_reader_v2, [], name=nothing, container=nothing, shared_name=nothing, compression_type=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function tf_record_reader_v2(; name=nothing, container=nothing, shared_name=nothing, compression_type=nothing) if tf.in_eager_mode() @@ -12960,8 +13460,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(multi_device_iterator_from_string_handle, [string_handle_], name=nothing, output_types=nothing, output_shapes=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function multi_device_iterator_from_string_handle(string_handle_; name=nothing, output_types=nothing, output_shapes=nothing) if tf.in_eager_mode() @@ -13028,8 +13530,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(padded_batch_dataset_v2, [input_dataset_, batch_size_, padded_shapes_, padding_values_, drop_remainder_], name=nothing, Toutput_types=nothing, output_shapes=nothing, N=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function padded_batch_dataset_v2(input_dataset_, batch_size_, padded_shapes_, padding_values_, drop_remainder_; name=nothing, Toutput_types=nothing, output_shapes=nothing, N=nothing) if tf.in_eager_mode() @@ -13090,8 +13594,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(load_tpu_embedding_proximal_adagrad_parameters, [parameters_, accumulators_], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function load_tpu_embedding_proximal_adagrad_parameters(parameters_, accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) if tf.in_eager_mode() @@ -13128,8 +13634,10 @@ begin tf.add_input(desc, flow_in_) res = tf.execute(desc) node = tf.TapeNode(tensor_array_size, [handle_, flow_in_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function tensor_array_size(handle_, flow_in_; name=nothing) if tf.in_eager_mode() @@ -13188,8 +13696,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(ordered_map_size, [], name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function ordered_map_size(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) if tf.in_eager_mode() @@ -13236,8 +13746,10 @@ begin desc["Tseed"] = tf.data_type(seed_) res = tf.execute(desc) node = tf.TapeNode(stateless_random_uniform, [shape_, seed_], name=nothing, dtype=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function stateless_random_uniform(shape_, seed_; name=nothing, dtype=nothing) if tf.in_eager_mode() @@ -13310,8 +13822,10 @@ begin desc["T"] = tf.data_type(set2_values_) res = tf.execute(desc) node = tf.TapeNode(sparse_to_sparse_set_operation, [set1_indices_, set1_values_, set1_shape_, set2_indices_, set2_values_, set2_shape_], name=nothing, set_operation=nothing, validate_indices=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function sparse_to_sparse_set_operation(set1_indices_, set1_values_, set1_shape_, set2_indices_, set2_values_, set2_shape_; name=nothing, set_operation=nothing, validate_indices=nothing) if tf.in_eager_mode() @@ -13364,8 +13878,10 @@ begin desc["T"] = tf.data_type(tensor_) res = tf.execute(desc) node = tf.TapeNode(tensor_summary, [tensor_], name=nothing, description=nothing, labels=nothing, display_name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function tensor_summary(tensor_; name=nothing, description=nothing, labels=nothing, display_name=nothing) if tf.in_eager_mode() @@ -13416,8 +13932,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(remote_fused_graph_execute, [inputs_], name=nothing, Tinputs=nothing, Toutputs=nothing, serialized_remote_fused_graph_execute_info=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function remote_fused_graph_execute(inputs_; name=nothing, Tinputs=nothing, Toutputs=nothing, serialized_remote_fused_graph_execute_info=nothing) if tf.in_eager_mode() @@ -13464,8 +13982,10 @@ begin desc["T"] = tf.data_type(backprop_val_grad_) res = tf.execute(desc) node = tf.TapeNode(sparse_slice_grad, [backprop_val_grad_, input_indices_, input_start_, output_indices_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function sparse_slice_grad(backprop_val_grad_, input_indices_, input_start_, output_indices_; name=nothing) if tf.in_eager_mode() @@ -13519,8 +14039,10 @@ begin desc["Tidx"] = tf.data_type(axis_) res = tf.execute(desc) node = tf.TapeNode(cumsum, [x_, axis_], name=nothing, exclusive=nothing, reverse=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function cumsum(x_, axis_; name=nothing, exclusive=nothing, reverse=nothing) if tf.in_eager_mode() @@ -13592,8 +14114,10 @@ begin desc["T"] = tf.data_type(backprop_) res = tf.execute(desc) node = tf.TapeNode(batch_norm_with_global_normalization_grad, [t_, m_, v_, gamma_, backprop_], name=nothing, variance_epsilon=nothing, scale_after_normalization=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function batch_norm_with_global_normalization_grad(t_, m_, v_, gamma_, backprop_; name=nothing, variance_epsilon=nothing, scale_after_normalization=nothing) if tf.in_eager_mode() @@ -13656,8 +14180,10 @@ begin desc["T"] = tf.data_type(grad_) res = tf.execute(desc) node = tf.TapeNode(avg_pool_grad, [orig_input_shape_, grad_], name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function avg_pool_grad(orig_input_shape_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) if tf.in_eager_mode() @@ -13704,8 +14230,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(restore_v2, [prefix_, tensor_names_, shape_and_slices_], name=nothing, dtypes=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function restore_v2(prefix_, tensor_names_, shape_and_slices_; name=nothing, dtypes=nothing) if tf.in_eager_mode() @@ -13740,8 +14268,10 @@ begin desc["T"] = tf.data_type(features_) res = tf.execute(desc) node = tf.TapeNode(relu6, [features_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function relu6(features_; name=nothing) if tf.in_eager_mode() @@ -13824,8 +14354,10 @@ begin desc["Tindices"] = tf.data_type(indices_) res = tf.execute(desc) node = tf.TapeNode(sparse_apply_rms_prop, [var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_], name=nothing, use_locking=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function sparse_apply_rms_prop(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) if tf.in_eager_mode() @@ -13890,8 +14422,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(_recv, [], name=nothing, tensor_type=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function _recv(; name=nothing, tensor_type=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing) if tf.in_eager_mode() @@ -13950,8 +14484,10 @@ begin desc["T"] = tf.data_type(input_) res = tf.execute(desc) node = tf.TapeNode(max_pool, [input_], name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function max_pool(input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) if tf.in_eager_mode() @@ -13986,8 +14522,10 @@ begin desc["T"] = tf.data_type(x_) res = tf.execute(desc) node = tf.TapeNode(invert, [x_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function invert(x_; name=nothing) if tf.in_eager_mode() @@ -14028,8 +14566,10 @@ begin desc["T"] = tf.data_type(x_) res = tf.execute(desc) node = tf.TapeNode(_unary_ops_composition, [x_], name=nothing, op_names=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function _unary_ops_composition(x_; name=nothing, op_names=nothing) if tf.in_eager_mode() @@ -14102,8 +14642,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(experimental_map_dataset, [input_dataset_, other_arguments_], name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing, preserve_cardinality=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function experimental_map_dataset(input_dataset_, other_arguments_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing, preserve_cardinality=nothing) if tf.in_eager_mode() @@ -14168,8 +14710,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(load_tpu_embedding_adam_parameters, [parameters_, momenta_, velocities_], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function load_tpu_embedding_adam_parameters(parameters_, momenta_, velocities_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) if tf.in_eager_mode() @@ -14208,8 +14752,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(parse_tensor, [serialized_], name=nothing, out_type=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function parse_tensor(serialized_; name=nothing, out_type=nothing) if tf.in_eager_mode() @@ -14262,8 +14808,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(experimental_materialized_index_dataset_handle, [], name=nothing, container=nothing, shared_name=nothing, output_types=nothing, output_shapes=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function experimental_materialized_index_dataset_handle(; name=nothing, container=nothing, shared_name=nothing, output_types=nothing, output_shapes=nothing) if tf.in_eager_mode() @@ -14316,8 +14864,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(multi_device_iterator_get_next_from_shard, [multi_device_iterator_, shard_num_, incarnation_id_], name=nothing, output_types=nothing, output_shapes=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function multi_device_iterator_get_next_from_shard(multi_device_iterator_, shard_num_, incarnation_id_; name=nothing, output_types=nothing, output_shapes=nothing) if tf.in_eager_mode() @@ -14375,8 +14925,10 @@ begin desc["Tout"] = tf.data_type(maxval_) res = tf.execute(desc) node = tf.TapeNode(random_uniform_int, [shape_, minval_, maxval_], name=nothing, seed=nothing, seed2=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function random_uniform_int(shape_, minval_, maxval_; name=nothing, seed=nothing, seed2=nothing) if tf.in_eager_mode() @@ -14422,8 +14974,10 @@ begin desc["Tlabels"] = tf.data_type(labels_) res = tf.execute(desc) node = tf.TapeNode(sparse_softmax_cross_entropy_with_logits, [features_, labels_], name=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function sparse_softmax_cross_entropy_with_logits(features_, labels_; name=nothing) if tf.in_eager_mode() @@ -14470,8 +15024,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(tensor_array_read_v2, [handle_, index_, flow_in_], name=nothing, dtype=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function tensor_array_read_v2(handle_, index_, flow_in_; name=nothing, dtype=nothing) if tf.in_eager_mode() @@ -14517,8 +15073,10 @@ begin tf.add_input(desc, num_records_) res = tf.execute(desc) node = tf.TapeNode(reader_read_up_to, [reader_handle_, queue_handle_, num_records_], name=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function reader_read_up_to(reader_handle_, queue_handle_, num_records_; name=nothing) if tf.in_eager_mode() @@ -14579,8 +15137,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(encode_proto, [sizes_, values_], name=nothing, field_names=nothing, message_type=nothing, descriptor_source=nothing, Tinput_types=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function encode_proto(sizes_, values_; name=nothing, field_names=nothing, message_type=nothing, descriptor_source=nothing, Tinput_types=nothing) if tf.in_eager_mode() @@ -14705,8 +15265,10 @@ begin desc["T"] = tf.data_type(dy_) res = tf.execute(desc) node = tf.TapeNode(strided_slice_grad, [shape_, begin_, end_, strides_, dy_], name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function strided_slice_grad(shape_, begin_, end_, strides_, dy_; name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing) if tf.in_eager_mode() @@ -14759,8 +15321,10 @@ begin desc["T"] = tf.data_type(input_) res = tf.execute(desc) node = tf.TapeNode(_nccl_reduce_send, [input_], name=nothing, reduction=nothing, num_devices=nothing, shared_name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function _nccl_reduce_send(input_; name=nothing, reduction=nothing, num_devices=nothing, shared_name=nothing) if tf.in_eager_mode() @@ -14823,8 +15387,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(padded_batch_dataset, [input_dataset_, batch_size_, padded_shapes_, padding_values_], name=nothing, Toutput_types=nothing, output_shapes=nothing, N=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function padded_batch_dataset(input_dataset_, batch_size_, padded_shapes_, padding_values_; name=nothing, Toutput_types=nothing, output_shapes=nothing, N=nothing) if tf.in_eager_mode() @@ -14871,8 +15437,10 @@ begin desc["T"] = tf.data_type(x_) res = tf.execute(desc) node = tf.TapeNode(data_format_vec_permute, [x_], name=nothing, src_format=nothing, dst_format=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function data_format_vec_permute(x_; name=nothing, src_format=nothing, dst_format=nothing) if tf.in_eager_mode() @@ -14929,8 +15497,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(string_format, [inputs_], name=nothing, T=nothing, template=nothing, placeholder=nothing, summarize=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function string_format(inputs_; name=nothing, T=nothing, template=nothing, placeholder=nothing, summarize=nothing) if tf.in_eager_mode() @@ -14995,8 +15565,10 @@ begin desc["T"] = tf.data_type(input_) res = tf.execute(desc) node = tf.TapeNode(as_string, [input_], name=nothing, precision=nothing, scientific=nothing, shortest=nothing, width=nothing, fill=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function as_string(input_; name=nothing, precision=nothing, scientific=nothing, shortest=nothing, width=nothing, fill=nothing) if tf.in_eager_mode() @@ -15045,8 +15617,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(queue_enqueue_many, [handle_, components_], name=nothing, Tcomponents=nothing, timeout_ms=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function queue_enqueue_many(handle_, components_; name=nothing, Tcomponents=nothing, timeout_ms=nothing) if tf.in_eager_mode() @@ -15087,8 +15661,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(fake_param, [], name=nothing, dtype=nothing, shape=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function fake_param(; name=nothing, dtype=nothing, shape=nothing) if tf.in_eager_mode() @@ -15150,8 +15726,10 @@ begin desc["T"] = tf.data_type(grad_) res = tf.execute(desc) node = tf.TapeNode(apply_adagrad, [var_, accum_, lr_, grad_], name=nothing, use_locking=nothing, update_slots=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function apply_adagrad(var_, accum_, lr_, grad_; name=nothing, use_locking=nothing, update_slots=nothing) if tf.in_eager_mode() @@ -15184,8 +15762,10 @@ begin tf.add_input(desc, resource_) res = tf.execute(desc) node = tf.TapeNode(experimental_iterator_get_device, [resource_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function experimental_iterator_get_device(resource_; name=nothing) if tf.in_eager_mode() @@ -15232,8 +15812,10 @@ begin desc["T"] = tf.data_type(images_) res = tf.execute(desc) node = tf.TapeNode(adjust_contrast, [images_, contrast_factor_, min_value_, max_value_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function adjust_contrast(images_, contrast_factor_, min_value_, max_value_; name=nothing) if tf.in_eager_mode() @@ -15292,8 +15874,10 @@ begin desc["T"] = tf.data_type(images_) res = tf.execute(desc) node = tf.TapeNode(extract_image_patches, [images_], name=nothing, ksizes=nothing, strides=nothing, rates=nothing, padding=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function extract_image_patches(images_; name=nothing, ksizes=nothing, strides=nothing, rates=nothing, padding=nothing) if tf.in_eager_mode() @@ -15346,8 +15930,10 @@ begin desc["T"] = tf.data_type(images_) res = tf.execute(desc) node = tf.TapeNode(scale_and_translate, [images_, size_, scale_, translation_], name=nothing, kernel_type=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function scale_and_translate(images_, size_, scale_, translation_; name=nothing, kernel_type=nothing) if tf.in_eager_mode() @@ -15377,8 +15963,10 @@ begin desc = tf.EagerOp("OptionalNone") res = tf.execute(desc) node = tf.TapeNode(optional_none, [], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function optional_none(; name=nothing) if tf.in_eager_mode() @@ -15431,8 +16019,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(variable_v2, [], name=nothing, shape=nothing, dtype=nothing, container=nothing, shared_name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function variable_v2(; name=nothing, shape=nothing, dtype=nothing, container=nothing, shared_name=nothing) if tf.in_eager_mode() @@ -15467,8 +16057,10 @@ begin desc["T"] = tf.data_type(features_) res = tf.execute(desc) node = tf.TapeNode(elu, [features_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function elu(features_; name=nothing) if tf.in_eager_mode() @@ -15521,8 +16113,10 @@ begin desc["T"] = tf.data_type(updates_) res = tf.execute(desc) node = tf.TapeNode(scatter_update, [ref_, indices_, updates_], name=nothing, use_locking=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function scatter_update(ref_, indices_, updates_; name=nothing, use_locking=nothing) if tf.in_eager_mode() @@ -15562,8 +16156,10 @@ begin desc["T"] = tf.data_type(y_) res = tf.execute(desc) node = tf.TapeNode(floor_mod, [x_, y_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function floor_mod(x_, y_; name=nothing) if tf.in_eager_mode() @@ -15608,8 +16204,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(experimental_ignore_errors_dataset, [input_dataset_], name=nothing, output_types=nothing, output_shapes=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function experimental_ignore_errors_dataset(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) if tf.in_eager_mode() @@ -15666,8 +16264,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(experimental_set_stats_aggregator_dataset, [input_dataset_, stats_aggregator_, tag_, counter_prefix_], name=nothing, output_types=nothing, output_shapes=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function experimental_set_stats_aggregator_dataset(input_dataset_, stats_aggregator_, tag_, counter_prefix_; name=nothing, output_types=nothing, output_shapes=nothing) if tf.in_eager_mode() @@ -15727,8 +16327,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(compute_accidental_hits, [true_classes_, sampled_candidates_], name=nothing, num_true=nothing, seed=nothing, seed2=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function compute_accidental_hits(true_classes_, sampled_candidates_; name=nothing, num_true=nothing, seed=nothing, seed2=nothing) if tf.in_eager_mode() @@ -15767,8 +16369,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(string_to_number, [string_tensor_], name=nothing, out_type=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function string_to_number(string_tensor_; name=nothing, out_type=nothing) if tf.in_eager_mode() @@ -15803,8 +16407,10 @@ begin desc["T"] = tf.data_type(input_) res = tf.execute(desc) node = tf.TapeNode(snapshot, [input_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function snapshot(input_; name=nothing) if tf.in_eager_mode() @@ -15841,8 +16447,10 @@ begin tf.add_input(desc, serialized_) res = tf.execute(desc) node = tf.TapeNode(deserialize_iterator, [resource_handle_, serialized_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function deserialize_iterator(resource_handle_, serialized_; name=nothing) if tf.in_eager_mode() @@ -15877,8 +16485,10 @@ begin desc["T"] = tf.data_type(x_) res = tf.execute(desc) node = tf.TapeNode(atan, [x_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function atan(x_; name=nothing) if tf.in_eager_mode() @@ -15930,8 +16540,10 @@ begin desc["T"] = tf.data_type(b_) res = tf.execute(desc) node = tf.TapeNode(mat_mul, [a_, b_], name=nothing, transpose_a=nothing, transpose_b=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function mat_mul(a_, b_; name=nothing, transpose_a=nothing, transpose_b=nothing) if tf.in_eager_mode() @@ -15966,8 +16578,10 @@ begin desc["T"] = tf.data_type(x_) res = tf.execute(desc) node = tf.TapeNode(erfc, [x_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function erfc(x_; name=nothing) if tf.in_eager_mode() @@ -16007,8 +16621,10 @@ begin desc["T"] = tf.data_type(dy_) res = tf.execute(desc) node = tf.TapeNode(sigmoid_grad, [y_, dy_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function sigmoid_grad(y_, dy_; name=nothing) if tf.in_eager_mode() @@ -16079,8 +16695,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(fixed_length_record_reader_v2, [], name=nothing, header_bytes=nothing, record_bytes=nothing, footer_bytes=nothing, hop_bytes=nothing, container=nothing, shared_name=nothing, encoding=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function fixed_length_record_reader_v2(; name=nothing, header_bytes=nothing, record_bytes=nothing, footer_bytes=nothing, hop_bytes=nothing, container=nothing, shared_name=nothing, encoding=nothing) if tf.in_eager_mode() @@ -16132,8 +16750,10 @@ begin desc["T"] = tf.data_type(scores_) res = tf.execute(desc) node = tf.TapeNode(non_max_suppression_v3, [boxes_, scores_, max_output_size_, iou_threshold_, score_threshold_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function non_max_suppression_v3(boxes_, scores_, max_output_size_, iou_threshold_, score_threshold_; name=nothing) if tf.in_eager_mode() @@ -16196,8 +16816,10 @@ begin desc["T"] = tf.data_type(out_backprop_) res = tf.execute(desc) node = tf.TapeNode(dilation2d_backprop_input, [input_, filter_, out_backprop_], name=nothing, strides=nothing, rates=nothing, padding=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function dilation2d_backprop_input(input_, filter_, out_backprop_; name=nothing, strides=nothing, rates=nothing, padding=nothing) if tf.in_eager_mode() @@ -16234,8 +16856,10 @@ begin tf.add_input(desc, y_) res = tf.execute(desc) node = tf.TapeNode(logical_or, [x_, y_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function logical_or(x_, y_; name=nothing) if tf.in_eager_mode() @@ -16303,8 +16927,10 @@ begin desc["T"] = tf.data_type(grad_) res = tf.execute(desc) node = tf.TapeNode(resource_apply_adadelta, [var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_], name=nothing, use_locking=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function resource_apply_adadelta(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_; name=nothing, use_locking=nothing) if tf.in_eager_mode() @@ -16369,8 +16995,10 @@ begin desc["T"] = tf.data_type(set2_values_) res = tf.execute(desc) node = tf.TapeNode(dense_to_sparse_set_operation, [set1_, set2_indices_, set2_values_, set2_shape_], name=nothing, set_operation=nothing, validate_indices=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function dense_to_sparse_set_operation(set1_, set2_indices_, set2_values_, set2_shape_; name=nothing, set_operation=nothing, validate_indices=nothing) if tf.in_eager_mode() @@ -16403,8 +17031,10 @@ begin tf.add_input(desc, reader_handle_) res = tf.execute(desc) node = tf.TapeNode(reader_num_records_produced, [reader_handle_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function reader_num_records_produced(reader_handle_; name=nothing) if tf.in_eager_mode() @@ -16443,8 +17073,10 @@ begin desc["T"] = tf.data_type(images_) res = tf.execute(desc) node = tf.TapeNode(adjust_hue, [images_, delta_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function adjust_hue(images_, delta_; name=nothing) if tf.in_eager_mode() @@ -16487,8 +17119,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(boosted_trees_quantile_stream_resource_flush, [quantile_stream_resource_handle_, num_buckets_], name=nothing, generate_quantiles=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function boosted_trees_quantile_stream_resource_flush(quantile_stream_resource_handle_, num_buckets_; name=nothing, generate_quantiles=nothing) if tf.in_eager_mode() @@ -16567,8 +17201,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(experimental_map_and_batch_dataset, [input_dataset_, other_arguments_, batch_size_, num_parallel_calls_, drop_remainder_], name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, preserve_cardinality=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function experimental_map_and_batch_dataset(input_dataset_, other_arguments_, batch_size_, num_parallel_calls_, drop_remainder_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, preserve_cardinality=nothing) if tf.in_eager_mode() @@ -16608,8 +17244,10 @@ begin desc["T"] = tf.data_type(y_) res = tf.execute(desc) node = tf.TapeNode(real_div, [x_, y_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function real_div(x_, y_; name=nothing) if tf.in_eager_mode() @@ -16662,8 +17300,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(restore_slice, [file_pattern_, tensor_name_, shape_and_slice_], name=nothing, dt=nothing, preferred_shard=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function restore_slice(file_pattern_, tensor_name_, shape_and_slice_; name=nothing, dt=nothing, preferred_shard=nothing) if tf.in_eager_mode() @@ -16702,8 +17342,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(stack_pop_v2, [handle_], name=nothing, elem_type=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function stack_pop_v2(handle_; name=nothing, elem_type=nothing) if tf.in_eager_mode() @@ -16742,8 +17384,10 @@ begin desc["T"] = tf.data_type(tensor_) res = tf.execute(desc) node = tf.TapeNode(reverse, [tensor_, dims_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function reverse(tensor_, dims_; name=nothing) if tf.in_eager_mode() @@ -16788,8 +17432,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(decode_png, [contents_], name=nothing, channels=nothing, dtype=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function decode_png(contents_; name=nothing, channels=nothing, dtype=nothing) if tf.in_eager_mode() @@ -16837,8 +17483,10 @@ begin desc["T"] = tf.data_type(scores_) res = tf.execute(desc) node = tf.TapeNode(non_max_suppression_v2, [boxes_, scores_, max_output_size_, iou_threshold_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function non_max_suppression_v2(boxes_, scores_, max_output_size_, iou_threshold_; name=nothing) if tf.in_eager_mode() @@ -16878,8 +17526,10 @@ begin desc["T"] = tf.data_type(x_) res = tf.execute(desc) node = tf.TapeNode(igamma, [a_, x_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function igamma(a_, x_; name=nothing) if tf.in_eager_mode() @@ -16914,8 +17564,10 @@ begin desc["T"] = tf.data_type(x_) res = tf.execute(desc) node = tf.TapeNode(digamma, [x_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function digamma(x_; name=nothing) if tf.in_eager_mode() @@ -16993,8 +17645,10 @@ begin desc["T"] = tf.data_type(grad_) res = tf.execute(desc) node = tf.TapeNode(resource_apply_ada_max, [var_, m_, v_, beta1_power_, lr_, beta1_, beta2_, epsilon_, grad_], name=nothing, use_locking=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function resource_apply_ada_max(var_, m_, v_, beta1_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing) if tf.in_eager_mode() @@ -17041,8 +17695,10 @@ begin desc["T"] = tf.data_type(input_) res = tf.execute(desc) node = tf.TapeNode(space_to_depth, [input_], name=nothing, block_size=nothing, data_format=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function space_to_depth(input_; name=nothing, block_size=nothing, data_format=nothing) if tf.in_eager_mode() @@ -17082,8 +17738,10 @@ begin desc["T"] = tf.data_type(dy_) res = tf.execute(desc) node = tf.TapeNode(sqrt_grad, [y_, dy_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function sqrt_grad(y_, dy_; name=nothing) if tf.in_eager_mode() @@ -17150,8 +17808,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(map_unstage, [key_, indices_], name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function map_unstage(key_, indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) if tf.in_eager_mode() @@ -17197,8 +17857,10 @@ begin desc["T"] = tf.data_type(input_) res = tf.execute(desc) node = tf.TapeNode(qr, [input_], name=nothing, full_matrices=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function qr(input_; name=nothing, full_matrices=nothing) if tf.in_eager_mode() @@ -17268,8 +17930,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(boosted_trees_calculate_best_gains_per_feature, [node_id_range_, stats_summary_list_, l1_, l2_, tree_complexity_, min_node_weight_], name=nothing, max_splits=nothing, num_features=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function boosted_trees_calculate_best_gains_per_feature(node_id_range_, stats_summary_list_, l1_, l2_, tree_complexity_, min_node_weight_; name=nothing, max_splits=nothing, num_features=nothing) if tf.in_eager_mode() @@ -17329,8 +17993,10 @@ begin desc["T"] = tf.data_type(grad_) res = tf.execute(desc) node = tf.TapeNode(unbatch_grad, [original_input_, batch_index_, grad_, id_], name=nothing, container=nothing, shared_name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function unbatch_grad(original_input_, batch_index_, grad_, id_; name=nothing, container=nothing, shared_name=nothing) if tf.in_eager_mode() @@ -17365,8 +18031,10 @@ begin desc["T"] = tf.data_type(logits_) res = tf.execute(desc) node = tf.TapeNode(log_softmax, [logits_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function log_softmax(logits_; name=nothing) if tf.in_eager_mode() @@ -17405,8 +18073,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(resource_count_up_to, [resource_], name=nothing, limit=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function resource_count_up_to(resource_; name=nothing, limit=nothing) if tf.in_eager_mode() @@ -17453,8 +18123,10 @@ begin desc["T"] = tf.data_type(inputs_) res = tf.execute(desc) node = tf.TapeNode(accumulate_nv2, [inputs_], name=nothing, N=nothing, shape=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function accumulate_nv2(inputs_; name=nothing, N=nothing, shape=nothing) if tf.in_eager_mode() @@ -17537,8 +18209,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(parallel_map_dataset, [input_dataset_, other_arguments_, num_parallel_calls_], name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing, sloppy=nothing, preserve_cardinality=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function parallel_map_dataset(input_dataset_, other_arguments_, num_parallel_calls_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing, sloppy=nothing, preserve_cardinality=nothing) if tf.in_eager_mode() @@ -17591,8 +18265,10 @@ begin desc["T"] = tf.data_type(shape_) res = tf.execute(desc) node = tf.TapeNode(random_uniform, [shape_], name=nothing, seed=nothing, seed2=nothing, dtype=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function random_uniform(shape_; name=nothing, seed=nothing, seed2=nothing, dtype=nothing) if tf.in_eager_mode() @@ -17655,8 +18331,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(unicode_transcode, [input_], name=nothing, input_encoding=nothing, output_encoding=nothing, errors=nothing, replacement_char=nothing, replace_control_characters=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function unicode_transcode(input_; name=nothing, input_encoding=nothing, output_encoding=nothing, errors=nothing, replacement_char=nothing, replace_control_characters=nothing) if tf.in_eager_mode() @@ -17689,8 +18367,10 @@ begin tf.add_input(desc, reader_handle_) res = tf.execute(desc) node = tf.TapeNode(reader_reset, [reader_handle_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function reader_reset(reader_handle_; name=nothing) if tf.in_eager_mode() @@ -17737,8 +18417,10 @@ begin desc["T"] = tf.data_type(input_) res = tf.execute(desc) node = tf.TapeNode(_nccl_broadcast_send, [input_], name=nothing, num_devices=nothing, shared_name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function _nccl_broadcast_send(input_; name=nothing, num_devices=nothing, shared_name=nothing) if tf.in_eager_mode() @@ -17773,8 +18455,10 @@ begin desc["T"] = tf.data_type(input_) res = tf.execute(desc) node = tf.TapeNode(batch_matrix_determinant, [input_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function batch_matrix_determinant(input_; name=nothing) if tf.in_eager_mode() @@ -17814,8 +18498,10 @@ begin desc["T"] = tf.data_type(y_) res = tf.execute(desc) node = tf.TapeNode(less_equal, [x_, y_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function less_equal(x_, y_; name=nothing) if tf.in_eager_mode() @@ -17866,8 +18552,10 @@ begin desc["T"] = tf.data_type(delta_) res = tf.execute(desc) node = tf.TapeNode(apply_gradient_descent, [var_, alpha_, delta_], name=nothing, use_locking=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function apply_gradient_descent(var_, alpha_, delta_; name=nothing, use_locking=nothing) if tf.in_eager_mode() @@ -17913,8 +18601,10 @@ begin desc["Tidx"] = tf.data_type(indices_) res = tf.execute(desc) node = tf.TapeNode(sparse_segment_sqrt_n, [data_, indices_, segment_ids_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function sparse_segment_sqrt_n(data_, indices_, segment_ids_; name=nothing) if tf.in_eager_mode() @@ -17949,8 +18639,10 @@ begin desc["T"] = tf.data_type(input_) res = tf.execute(desc) node = tf.TapeNode(matrix_logarithm, [input_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function matrix_logarithm(input_; name=nothing) if tf.in_eager_mode() @@ -18003,8 +18695,10 @@ begin desc["T"] = tf.data_type(updates_) res = tf.execute(desc) node = tf.TapeNode(scatter_mul, [ref_, indices_, updates_], name=nothing, use_locking=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function scatter_mul(ref_, indices_, updates_; name=nothing, use_locking=nothing) if tf.in_eager_mode() @@ -18073,8 +18767,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(decode_jpeg, [contents_], name=nothing, channels=nothing, ratio=nothing, fancy_upscaling=nothing, try_recover_truncated=nothing, acceptable_fraction=nothing, dct_method=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function decode_jpeg(contents_; name=nothing, channels=nothing, ratio=nothing, fancy_upscaling=nothing, try_recover_truncated=nothing, acceptable_fraction=nothing, dct_method=nothing) if tf.in_eager_mode() @@ -18151,8 +18847,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(random_shuffle_queue_v2, [], name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, min_after_dequeue=nothing, seed=nothing, seed2=nothing, container=nothing, shared_name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function random_shuffle_queue_v2(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, min_after_dequeue=nothing, seed=nothing, seed2=nothing, container=nothing, shared_name=nothing) if tf.in_eager_mode() @@ -18201,8 +18899,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(queue_enqueue_many_v2, [handle_, components_], name=nothing, Tcomponents=nothing, timeout_ms=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function queue_enqueue_many_v2(handle_, components_; name=nothing, Tcomponents=nothing, timeout_ms=nothing) if tf.in_eager_mode() @@ -18286,8 +18986,10 @@ begin desc["Tindices"] = tf.data_type(indices_) res = tf.execute(desc) node = tf.TapeNode(resource_sparse_apply_centered_rms_prop, [var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_], name=nothing, use_locking=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function resource_sparse_apply_centered_rms_prop(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) if tf.in_eager_mode() @@ -18356,8 +19058,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(interleave_dataset, [input_dataset_, other_arguments_, cycle_length_, block_length_], name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function interleave_dataset(input_dataset_, other_arguments_, cycle_length_, block_length_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) if tf.in_eager_mode() @@ -18396,8 +19100,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(stack_pop, [handle_], name=nothing, elem_type=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function stack_pop(handle_; name=nothing, elem_type=nothing) if tf.in_eager_mode() @@ -18452,8 +19158,10 @@ begin desc["T"] = tf.data_type(input_) res = tf.execute(desc) node = tf.TapeNode(max_pool_v2, [input_, ksize_, strides_], name=nothing, padding=nothing, data_format=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function max_pool_v2(input_, ksize_, strides_; name=nothing, padding=nothing, data_format=nothing) if tf.in_eager_mode() @@ -18494,8 +19202,10 @@ begin tf.add_input(desc, tree_ensemble_serialized_) res = tf.execute(desc) node = tf.TapeNode(boosted_trees_deserialize_ensemble, [tree_ensemble_handle_, stamp_token_, tree_ensemble_serialized_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function boosted_trees_deserialize_ensemble(tree_ensemble_handle_, stamp_token_, tree_ensemble_serialized_; name=nothing) if tf.in_eager_mode() @@ -18562,8 +19272,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(load_and_remap_matrix, [ckpt_path_, old_tensor_name_, row_remapping_, col_remapping_, initializing_values_], name=nothing, num_rows=nothing, num_cols=nothing, max_rows_in_memory=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function load_and_remap_matrix(ckpt_path_, old_tensor_name_, row_remapping_, col_remapping_, initializing_values_; name=nothing, num_rows=nothing, num_cols=nothing, max_rows_in_memory=nothing) if tf.in_eager_mode() @@ -18631,8 +19343,10 @@ begin desc["Tindices"] = tf.data_type(indices_) res = tf.execute(desc) node = tf.TapeNode(sparse_apply_proximal_gradient_descent, [var_, alpha_, l1_, l2_, grad_, indices_], name=nothing, use_locking=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function sparse_apply_proximal_gradient_descent(var_, alpha_, l1_, l2_, grad_, indices_; name=nothing, use_locking=nothing) if tf.in_eager_mode() @@ -18683,8 +19397,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(py_func_stateless, [input_], name=nothing, token=nothing, Tin=nothing, Tout=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function py_func_stateless(input_; name=nothing, token=nothing, Tin=nothing, Tout=nothing) if tf.in_eager_mode() @@ -18719,8 +19435,10 @@ begin desc["T"] = tf.data_type(input_) res = tf.execute(desc) node = tf.TapeNode(where, [input_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function where(input_; name=nothing) if tf.in_eager_mode() @@ -18781,8 +19499,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(mfcc, [spectrogram_, sample_rate_], name=nothing, upper_frequency_limit=nothing, lower_frequency_limit=nothing, filterbank_channel_count=nothing, dct_coefficient_count=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function mfcc(spectrogram_, sample_rate_; name=nothing, upper_frequency_limit=nothing, lower_frequency_limit=nothing, filterbank_channel_count=nothing, dct_coefficient_count=nothing) if tf.in_eager_mode() @@ -18823,8 +19543,10 @@ begin desc["T"] = tf.data_type(tensor_) res = tf.execute(desc) node = tf.TapeNode(check_numerics, [tensor_], name=nothing, message=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function check_numerics(tensor_; name=nothing, message=nothing) if tf.in_eager_mode() @@ -18854,8 +19576,10 @@ begin desc = tf.EagerOp("TPUCompilationResult") res = tf.execute(desc) node = tf.TapeNode(tpu_compilation_result, [], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function tpu_compilation_result(; name=nothing) if tf.in_eager_mode() @@ -18908,8 +19632,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(retrieve_tpu_embedding_stochastic_gradient_descent_parameters, [], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function retrieve_tpu_embedding_stochastic_gradient_descent_parameters(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) if tf.in_eager_mode() @@ -18959,8 +19685,10 @@ begin desc["Tidx"] = tf.data_type(indices_) res = tf.execute(desc) node = tf.TapeNode(sparse_segment_mean_grad, [grad_, indices_, segment_ids_, output_dim0_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function sparse_segment_mean_grad(grad_, indices_, segment_ids_, output_dim0_; name=nothing) if tf.in_eager_mode() @@ -19024,8 +19752,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(try_rpc, [address_, method_, request_], name=nothing, protocol=nothing, fail_fast=nothing, timeout_in_ms=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function try_rpc(address_, method_, request_; name=nothing, protocol=nothing, fail_fast=nothing, timeout_in_ms=nothing) if tf.in_eager_mode() @@ -19077,8 +19807,10 @@ begin desc["T"] = tf.data_type(rhs_) res = tf.execute(desc) node = tf.TapeNode(batch_matrix_triangular_solve, [matrix_, rhs_], name=nothing, lower=nothing, adjoint=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function batch_matrix_triangular_solve(matrix_, rhs_; name=nothing, lower=nothing, adjoint=nothing) if tf.in_eager_mode() @@ -19119,8 +19851,10 @@ begin desc["T"] = tf.data_type(input_) res = tf.execute(desc) node = tf.TapeNode(_retval, [input_], name=nothing, index=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function _retval(input_; name=nothing, index=nothing) if tf.in_eager_mode() @@ -19166,8 +19900,10 @@ begin desc["T"] = tf.data_type(x_) res = tf.execute(desc) node = tf.TapeNode(unique_with_counts, [x_], name=nothing, out_idx=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function unique_with_counts(x_; name=nothing, out_idx=nothing) if tf.in_eager_mode() @@ -19207,8 +19943,10 @@ begin desc["T"] = tf.data_type(y_) res = tf.execute(desc) node = tf.TapeNode(add, [x_, y_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function add(x_, y_; name=nothing) if tf.in_eager_mode() @@ -19285,8 +20023,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(experimental_scan_dataset, [input_dataset_, initial_state_, other_arguments_], name=nothing, f=nothing, Tstate=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, preserve_cardinality=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function experimental_scan_dataset(input_dataset_, initial_state_, other_arguments_; name=nothing, f=nothing, Tstate=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, preserve_cardinality=nothing) if tf.in_eager_mode() @@ -19331,8 +20071,10 @@ begin desc["dtype"] = tf.data_type(value_) res = tf.execute(desc) node = tf.TapeNode(assign_add_variable_op, [resource_, value_], name=nothing, dtype=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function assign_add_variable_op(resource_, value_; name=nothing, dtype=nothing) if tf.in_eager_mode() @@ -19389,8 +20131,10 @@ begin desc["Tlen"] = tf.data_type(size_splits_) res = tf.execute(desc) node = tf.TapeNode(split_v, [value_, size_splits_, split_dim_], name=nothing, num_split=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function split_v(value_, size_splits_, split_dim_; name=nothing, num_split=nothing) if tf.in_eager_mode() @@ -19442,8 +20186,10 @@ begin desc["T"] = tf.data_type(value_) res = tf.execute(desc) node = tf.TapeNode(assign, [ref_, value_], name=nothing, validate_shape=nothing, use_locking=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function assign(ref_, value_; name=nothing, validate_shape=nothing, use_locking=nothing) if tf.in_eager_mode() @@ -19501,8 +20247,10 @@ begin desc["T"] = tf.data_type(input_) res = tf.execute(desc) node = tf.TapeNode(max_pool_with_argmax, [input_], name=nothing, ksize=nothing, strides=nothing, padding=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function max_pool_with_argmax(input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing) if tf.in_eager_mode() @@ -19560,8 +20308,10 @@ begin desc["Tinput"] = tf.data_type(features_) res = tf.execute(desc) node = tf.TapeNode(quantized_relu_x, [features_, max_value_, min_features_, max_features_], name=nothing, out_type=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function quantized_relu_x(features_, max_value_, min_features_, max_features_; name=nothing, out_type=nothing) if tf.in_eager_mode() @@ -19638,8 +20388,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(random_shuffle_queue, [], name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, min_after_dequeue=nothing, seed=nothing, seed2=nothing, container=nothing, shared_name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function random_shuffle_queue(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, min_after_dequeue=nothing, seed=nothing, seed2=nothing, container=nothing, shared_name=nothing) if tf.in_eager_mode() @@ -19674,8 +20426,10 @@ begin desc["Tcomplex"] = tf.data_type(input_) res = tf.execute(desc) node = tf.TapeNode(fft2d, [input_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function fft2d(input_; name=nothing) if tf.in_eager_mode() @@ -19724,8 +20478,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(experimental_thread_pool_dataset, [input_dataset_, thread_pool_], name=nothing, output_types=nothing, output_shapes=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function experimental_thread_pool_dataset(input_dataset_, thread_pool_; name=nothing, output_types=nothing, output_shapes=nothing) if tf.in_eager_mode() @@ -19780,8 +20536,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(experimental_directed_interleave_dataset, [selector_input_dataset_, data_input_datasets_], name=nothing, output_types=nothing, output_shapes=nothing, N=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function experimental_directed_interleave_dataset(selector_input_dataset_, data_input_datasets_; name=nothing, output_types=nothing, output_shapes=nothing, N=nothing) if tf.in_eager_mode() @@ -19831,8 +20589,10 @@ begin desc["Tidx"] = tf.data_type(indices_) res = tf.execute(desc) node = tf.TapeNode(sparse_segment_sqrt_n_grad, [grad_, indices_, segment_ids_, output_dim0_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function sparse_segment_sqrt_n_grad(grad_, indices_, segment_ids_, output_dim0_; name=nothing) if tf.in_eager_mode() @@ -19867,8 +20627,10 @@ begin desc["T"] = tf.data_type(input_) res = tf.execute(desc) node = tf.TapeNode(real, [input_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function real(input_; name=nothing) if tf.in_eager_mode() @@ -19935,8 +20697,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(ordered_map_unstage, [key_, indices_], name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function ordered_map_unstage(key_, indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) if tf.in_eager_mode() @@ -19973,8 +20737,10 @@ begin tf.add_input(desc, fft_length_) res = tf.execute(desc) node = tf.TapeNode(rfft2d, [input_, fft_length_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function rfft2d(input_, fft_length_; name=nothing) if tf.in_eager_mode() @@ -20007,8 +20773,10 @@ begin tf.add_input(desc, resource_) res = tf.execute(desc) node = tf.TapeNode(var_is_initialized_op, [resource_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function var_is_initialized_op(resource_; name=nothing) if tf.in_eager_mode() @@ -20049,8 +20817,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(boosted_trees_quantile_stream_resource_handle_op, [], name=nothing, container=nothing, shared_name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function boosted_trees_quantile_stream_resource_handle_op(; name=nothing, container=nothing, shared_name=nothing) if tf.in_eager_mode() @@ -20090,8 +20860,10 @@ begin desc["T"] = tf.data_type(x_) res = tf.execute(desc) node = tf.TapeNode(atan2, [y_, x_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function atan2(y_, x_; name=nothing) if tf.in_eager_mode() @@ -20156,8 +20928,10 @@ begin desc["dtype"] = tf.data_type(rate_) res = tf.execute(desc) node = tf.TapeNode(random_poisson, [shape_, rate_], name=nothing, seed=nothing, seed2=nothing, S=nothing, dtype=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function random_poisson(shape_, rate_; name=nothing, seed=nothing, seed2=nothing, S=nothing, dtype=nothing) if tf.in_eager_mode() @@ -20210,8 +20984,10 @@ begin desc["Tlen"] = tf.data_type(seq_lengths_) res = tf.execute(desc) node = tf.TapeNode(reverse_sequence, [input_, seq_lengths_], name=nothing, seq_dim=nothing, batch_dim=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function reverse_sequence(input_, seq_lengths_; name=nothing, seq_dim=nothing, batch_dim=nothing) if tf.in_eager_mode() @@ -20252,8 +21028,10 @@ begin desc["dtype"] = tf.data_type(input_) res = tf.execute(desc) node = tf.TapeNode(outfeed_enqueue, [input_], name=nothing, dtype=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function outfeed_enqueue(input_; name=nothing, dtype=nothing) if tf.in_eager_mode() @@ -20293,8 +21071,10 @@ begin desc["T"] = tf.data_type(y_) res = tf.execute(desc) node = tf.TapeNode(sub, [x_, y_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function sub(x_, y_; name=nothing) if tf.in_eager_mode() @@ -20342,8 +21122,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(string_split, [input_, delimiter_], name=nothing, skip_empty=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function string_split(input_, delimiter_; name=nothing, skip_empty=nothing) if tf.in_eager_mode() @@ -20397,8 +21179,10 @@ begin desc["Tidx"] = tf.data_type(axis_) res = tf.execute(desc) node = tf.TapeNode(cumprod, [x_, axis_], name=nothing, exclusive=nothing, reverse=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function cumprod(x_, axis_; name=nothing, exclusive=nothing, reverse=nothing) if tf.in_eager_mode() @@ -20456,8 +21240,10 @@ begin desc["T"] = tf.data_type(images_) res = tf.execute(desc) node = tf.TapeNode(quantized_resize_bilinear, [images_, size_, min_, max_], name=nothing, align_corners=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function quantized_resize_bilinear(images_, size_, min_, max_; name=nothing, align_corners=nothing) if tf.in_eager_mode() @@ -20535,8 +21321,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(parse_single_example, [serialized_, dense_defaults_], name=nothing, num_sparse=nothing, sparse_keys=nothing, dense_keys=nothing, sparse_types=nothing, Tdense=nothing, dense_shapes=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function parse_single_example(serialized_, dense_defaults_; name=nothing, num_sparse=nothing, sparse_keys=nothing, dense_keys=nothing, sparse_types=nothing, Tdense=nothing, dense_shapes=nothing) if tf.in_eager_mode() @@ -20577,8 +21365,10 @@ begin desc["dtype"] = tf.data_type(ref_) res = tf.execute(desc) node = tf.TapeNode(is_variable_initialized, [ref_], name=nothing, dtype=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function is_variable_initialized(ref_; name=nothing, dtype=nothing) if tf.in_eager_mode() @@ -20619,8 +21409,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(experimental_stats_aggregator_handle, [], name=nothing, container=nothing, shared_name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function experimental_stats_aggregator_handle(; name=nothing, container=nothing, shared_name=nothing) if tf.in_eager_mode() @@ -20680,8 +21472,10 @@ begin desc["shape_type"] = tf.data_type(element_shape_) res = tf.execute(desc) node = tf.TapeNode(tensor_list_concat_v2, [input_handle_, element_shape_, leading_dims_], name=nothing, element_dtype=nothing, shape_type=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function tensor_list_concat_v2(input_handle_, element_shape_, leading_dims_; name=nothing, element_dtype=nothing, shape_type=nothing) if tf.in_eager_mode() @@ -20778,8 +21572,10 @@ begin desc["T"] = tf.data_type(params_) res = tf.execute(desc) node = tf.TapeNode(cudnn_rnnv2, [input_, input_h_, input_c_, params_], name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing, is_training=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function cudnn_rnnv2(input_, input_h_, input_c_, params_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing, is_training=nothing) if tf.in_eager_mode() @@ -20831,8 +21627,10 @@ begin desc["dtype"] = tf.data_type(updates_) res = tf.execute(desc) node = tf.TapeNode(resource_scatter_sub, [resource_, indices_, updates_], name=nothing, dtype=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function resource_scatter_sub(resource_, indices_, updates_; name=nothing, dtype=nothing) if tf.in_eager_mode() @@ -20878,8 +21676,10 @@ begin desc["T"] = tf.data_type(value_) res = tf.execute(desc) node = tf.TapeNode(assign_add, [ref_, value_], name=nothing, use_locking=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function assign_add(ref_, value_; name=nothing, use_locking=nothing) if tf.in_eager_mode() @@ -20924,8 +21724,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(tensor_dataset, [components_], name=nothing, Toutput_types=nothing, output_shapes=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function tensor_dataset(components_; name=nothing, Toutput_types=nothing, output_shapes=nothing) if tf.in_eager_mode() @@ -20966,8 +21768,10 @@ begin desc["T"] = tf.data_type(input_) res = tf.execute(desc) node = tf.TapeNode(bucketize, [input_], name=nothing, boundaries=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function bucketize(input_; name=nothing, boundaries=nothing) if tf.in_eager_mode() @@ -21020,8 +21824,10 @@ begin desc["T"] = tf.data_type(input_values_) res = tf.execute(desc) node = tf.TapeNode(sparse_reduce_max, [input_indices_, input_values_, input_shape_, reduction_axes_], name=nothing, keep_dims=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function sparse_reduce_max(input_indices_, input_values_, input_shape_, reduction_axes_; name=nothing, keep_dims=nothing) if tf.in_eager_mode() @@ -21079,8 +21885,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(retrieve_tpu_embedding_mdl_adagrad_light_parameters, [], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function retrieve_tpu_embedding_mdl_adagrad_light_parameters(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) if tf.in_eager_mode() @@ -21132,8 +21940,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(tensor_array_grad_with_shape, [handle_, flow_in_, shape_to_prepend_], name=nothing, source=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function tensor_array_grad_with_shape(handle_, flow_in_, shape_to_prepend_; name=nothing, source=nothing) if tf.in_eager_mode() @@ -21166,8 +21976,10 @@ begin tf.add_input(desc, handle_) res = tf.execute(desc) node = tf.TapeNode(tensor_array_close_v3, [handle_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function tensor_array_close_v3(handle_; name=nothing) if tf.in_eager_mode() @@ -21216,8 +22028,10 @@ begin tf.add_input(desc, score_threshold_) res = tf.execute(desc) node = tf.TapeNode(non_max_suppression_with_overlaps, [overlaps_, scores_, max_output_size_, overlap_threshold_, score_threshold_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function non_max_suppression_with_overlaps(overlaps_, scores_, max_output_size_, overlap_threshold_, score_threshold_; name=nothing) if tf.in_eager_mode() @@ -21270,8 +22084,10 @@ begin desc["T"] = tf.data_type(values_) res = tf.execute(desc) node = tf.TapeNode(pack, [values_], name=nothing, N=nothing, axis=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function pack(values_; name=nothing, N=nothing, axis=nothing) if tf.in_eager_mode() @@ -21314,8 +22130,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(tensor_array_grad_v2, [handle_, flow_in_], name=nothing, source=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function tensor_array_grad_v2(handle_, flow_in_; name=nothing, source=nothing) if tf.in_eager_mode() @@ -21360,8 +22178,10 @@ begin desc["dtype"] = tf.data_type(value_) res = tf.execute(desc) node = tf.TapeNode(assign_sub_variable_op, [resource_, value_], name=nothing, dtype=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function assign_sub_variable_op(resource_, value_; name=nothing, dtype=nothing) if tf.in_eager_mode() @@ -21394,8 +22214,10 @@ begin tf.add_input(desc, input_) res = tf.execute(desc) node = tf.TapeNode(batch_fft2d, [input_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function batch_fft2d(input_; name=nothing) if tf.in_eager_mode() @@ -21428,8 +22250,10 @@ begin tf.add_input(desc, writer_) res = tf.execute(desc) node = tf.TapeNode(close_summary_writer, [writer_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function close_summary_writer(writer_; name=nothing) if tf.in_eager_mode() @@ -21464,8 +22288,10 @@ begin desc["T"] = tf.data_type(input_) res = tf.execute(desc) node = tf.TapeNode(rank, [input_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function rank(input_; name=nothing) if tf.in_eager_mode() @@ -21500,8 +22326,10 @@ begin desc["Tcomplex"] = tf.data_type(input_) res = tf.execute(desc) node = tf.TapeNode(fft3d, [input_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function fft3d(input_; name=nothing) if tf.in_eager_mode() @@ -21577,8 +22405,10 @@ begin desc["T"] = tf.data_type(lr_power_) res = tf.execute(desc) node = tf.TapeNode(apply_ftrl, [var_, accum_, linear_, grad_, lr_, l1_, l2_, lr_power_], name=nothing, use_locking=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function apply_ftrl(var_, accum_, linear_, grad_, lr_, l1_, l2_, lr_power_; name=nothing, use_locking=nothing) if tf.in_eager_mode() @@ -21619,8 +22449,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(abort, [], name=nothing, error_msg=nothing, exit_without_error=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function abort(; name=nothing, error_msg=nothing, exit_without_error=nothing) if tf.in_eager_mode() @@ -21671,8 +22503,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(audio_spectrogram, [input_], name=nothing, window_size=nothing, stride=nothing, magnitude_squared=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function audio_spectrogram(input_; name=nothing, window_size=nothing, stride=nothing, magnitude_squared=nothing) if tf.in_eager_mode() @@ -21711,8 +22545,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(variable_shape, [input_], name=nothing, out_type=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function variable_shape(input_; name=nothing, out_type=nothing) if tf.in_eager_mode() @@ -21771,8 +22607,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(fifo_queue_v2, [], name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function fifo_queue_v2(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) if tf.in_eager_mode() @@ -21825,8 +22663,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(variable, [], name=nothing, shape=nothing, dtype=nothing, container=nothing, shared_name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function variable(; name=nothing, shape=nothing, dtype=nothing, container=nothing, shared_name=nothing) if tf.in_eager_mode() @@ -21863,8 +22703,10 @@ begin tf.add_input(desc, tree_config_) res = tf.execute(desc) node = tf.TapeNode(tensor_forest_create_tree_variable, [tree_handle_, tree_config_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function tensor_forest_create_tree_variable(tree_handle_, tree_config_; name=nothing) if tf.in_eager_mode() @@ -21928,8 +22770,10 @@ begin desc["Targmax"] = tf.data_type(argmax_) res = tf.execute(desc) node = tf.TapeNode(max_pool_grad_with_argmax, [input_, grad_, argmax_], name=nothing, ksize=nothing, strides=nothing, padding=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function max_pool_grad_with_argmax(input_, grad_, argmax_; name=nothing, ksize=nothing, strides=nothing, padding=nothing) if tf.in_eager_mode() @@ -21973,8 +22817,10 @@ begin desc["T"] = tf.data_type(data_) res = tf.execute(desc) node = tf.TapeNode(ref_switch, [data_, pred_], name=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function ref_switch(data_, pred_; name=nothing) if tf.in_eager_mode() @@ -22007,8 +22853,10 @@ begin tf.add_input(desc, input_) res = tf.execute(desc) node = tf.TapeNode(sdca_fprint, [input_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function sdca_fprint(input_; name=nothing) if tf.in_eager_mode() @@ -22065,8 +22913,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(experimental_choose_fastest_dataset, [input_datasets_], name=nothing, N=nothing, num_experiments=nothing, output_types=nothing, output_shapes=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function experimental_choose_fastest_dataset(input_datasets_; name=nothing, N=nothing, num_experiments=nothing, output_types=nothing, output_shapes=nothing) if tf.in_eager_mode() @@ -22107,8 +22957,10 @@ begin desc["T"] = tf.data_type(features_) res = tf.execute(desc) node = tf.TapeNode(leaky_relu, [features_], name=nothing, alpha=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function leaky_relu(features_; name=nothing, alpha=nothing) if tf.in_eager_mode() @@ -22147,8 +22999,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(identity_n, [input_], name=nothing, T=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function identity_n(input_; name=nothing, T=nothing) if tf.in_eager_mode() @@ -22278,8 +23132,10 @@ begin desc["T"] = tf.data_type(reserve_space_) res = tf.execute(desc) node = tf.TapeNode(cudnn_rnn_backprop_v2, [input_, input_h_, input_c_, params_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_, host_reserved_], name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function cudnn_rnn_backprop_v2(input_, input_h_, input_c_, params_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_, host_reserved_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) if tf.in_eager_mode() @@ -22327,8 +23183,10 @@ begin desc["Tinput"] = tf.data_type(input_) res = tf.execute(desc) node = tf.TapeNode(requantization_range, [input_, input_min_, input_max_], name=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function requantization_range(input_, input_min_, input_max_; name=nothing) if tf.in_eager_mode() @@ -22368,8 +23226,10 @@ begin desc["T"] = tf.data_type(y_) res = tf.execute(desc) node = tf.TapeNode(maximum, [x_, y_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function maximum(x_, y_; name=nothing) if tf.in_eager_mode() @@ -22410,8 +23270,10 @@ begin desc["Tshape"] = tf.data_type(shape_) res = tf.execute(desc) node = tf.TapeNode(reshape, [tensor_, shape_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function reshape(tensor_, shape_; name=nothing) if tf.in_eager_mode() @@ -22461,8 +23323,10 @@ begin desc["T"] = tf.data_type(rhs_) res = tf.execute(desc) node = tf.TapeNode(matrix_solve_ls, [matrix_, rhs_, l2_regularizer_], name=nothing, fast=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function matrix_solve_ls(matrix_, rhs_, l2_regularizer_; name=nothing, fast=nothing) if tf.in_eager_mode() @@ -22503,8 +23367,10 @@ begin tf.add_input(desc, buffer_size_) res = tf.execute(desc) node = tf.TapeNode(tf_record_dataset, [filenames_, compression_type_, buffer_size_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function tf_record_dataset(filenames_, compression_type_, buffer_size_; name=nothing) if tf.in_eager_mode() @@ -22553,8 +23419,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(boosted_trees_example_debug_outputs, [tree_ensemble_handle_, bucketized_features_], name=nothing, num_bucketized_features=nothing, logits_dimension=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function boosted_trees_example_debug_outputs(tree_ensemble_handle_, bucketized_features_; name=nothing, num_bucketized_features=nothing, logits_dimension=nothing) if tf.in_eager_mode() @@ -22589,8 +23457,10 @@ begin desc["T"] = tf.data_type(images_) res = tf.execute(desc) node = tf.TapeNode(hsv_to_rgb, [images_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function hsv_to_rgb(images_; name=nothing) if tf.in_eager_mode() @@ -22639,8 +23509,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(experimental_max_intra_op_parallelism_dataset, [input_dataset_, max_intra_op_parallelism_], name=nothing, output_types=nothing, output_shapes=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function experimental_max_intra_op_parallelism_dataset(input_dataset_, max_intra_op_parallelism_; name=nothing, output_types=nothing, output_shapes=nothing) if tf.in_eager_mode() @@ -22693,8 +23565,10 @@ begin desc["T"] = tf.data_type(updates_) res = tf.execute(desc) node = tf.TapeNode(scatter_div, [ref_, indices_, updates_], name=nothing, use_locking=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function scatter_div(ref_, indices_, updates_; name=nothing, use_locking=nothing) if tf.in_eager_mode() @@ -22744,8 +23618,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(decode_wav, [contents_], name=nothing, desired_channels=nothing, desired_samples=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function decode_wav(contents_; name=nothing, desired_channels=nothing, desired_samples=nothing) if tf.in_eager_mode() @@ -22780,8 +23656,10 @@ begin desc["T"] = tf.data_type(x_) res = tf.execute(desc) node = tf.TapeNode(log, [x_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function log(x_; name=nothing) if tf.in_eager_mode() @@ -22832,8 +23710,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(save_v2, [prefix_, tensor_names_, shape_and_slices_, tensors_], name=nothing, dtypes=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function save_v2(prefix_, tensor_names_, shape_and_slices_, tensors_; name=nothing, dtypes=nothing) if tf.in_eager_mode() @@ -22868,8 +23748,10 @@ begin desc["T"] = tf.data_type(x_) res = tf.execute(desc) node = tf.TapeNode(deep_copy, [x_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function deep_copy(x_; name=nothing) if tf.in_eager_mode() @@ -22914,8 +23796,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(model_dataset, [input_dataset_], name=nothing, output_types=nothing, output_shapes=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function model_dataset(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) if tf.in_eager_mode() @@ -23051,8 +23935,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(parse_sequence_example, [serialized_, debug_name_, context_dense_defaults_], name=nothing, feature_list_dense_missing_assumed_empty=nothing, context_sparse_keys=nothing, context_dense_keys=nothing, feature_list_sparse_keys=nothing, feature_list_dense_keys=nothing, Ncontext_sparse=nothing, Ncontext_dense=nothing, Nfeature_list_sparse=nothing, Nfeature_list_dense=nothing, context_sparse_types=nothing, Tcontext_dense=nothing, feature_list_dense_types=nothing, context_dense_shapes=nothing, feature_list_sparse_types=nothing, feature_list_dense_shapes=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function parse_sequence_example(serialized_, debug_name_, context_dense_defaults_; name=nothing, feature_list_dense_missing_assumed_empty=nothing, context_sparse_keys=nothing, context_dense_keys=nothing, feature_list_sparse_keys=nothing, feature_list_dense_keys=nothing, Ncontext_sparse=nothing, Ncontext_dense=nothing, Nfeature_list_sparse=nothing, Nfeature_list_dense=nothing, context_sparse_types=nothing, Tcontext_dense=nothing, feature_list_dense_types=nothing, context_dense_shapes=nothing, feature_list_sparse_types=nothing, feature_list_dense_shapes=nothing) if tf.in_eager_mode() @@ -23087,8 +23973,10 @@ begin desc["T"] = tf.data_type(x_) res = tf.execute(desc) node = tf.TapeNode(sinh, [x_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function sinh(x_; name=nothing) if tf.in_eager_mode() @@ -23141,8 +24029,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(iterator_v2, [], name=nothing, shared_name=nothing, container=nothing, output_types=nothing, output_shapes=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function iterator_v2(; name=nothing, shared_name=nothing, container=nothing, output_types=nothing, output_shapes=nothing) if tf.in_eager_mode() @@ -23189,8 +24079,10 @@ begin desc["T"] = tf.data_type(value_) res = tf.execute(desc) node = tf.TapeNode(tensor_array_write_v2, [handle_, index_, value_, flow_in_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function tensor_array_write_v2(handle_, index_, value_, flow_in_; name=nothing) if tf.in_eager_mode() @@ -23229,8 +24121,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(tensor_list_element_shape, [input_handle_], name=nothing, shape_type=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function tensor_list_element_shape(input_handle_; name=nothing, shape_type=nothing) if tf.in_eager_mode() @@ -23263,8 +24157,10 @@ begin tf.add_input(desc, handle_) res = tf.execute(desc) node = tf.TapeNode(queue_size_v2, [handle_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function queue_size_v2(handle_; name=nothing) if tf.in_eager_mode() @@ -23299,8 +24195,10 @@ begin desc["T"] = tf.data_type(x_) res = tf.execute(desc) node = tf.TapeNode(expm1, [x_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function expm1(x_; name=nothing) if tf.in_eager_mode() @@ -23343,8 +24241,10 @@ begin desc["T"] = tf.data_type(input_) res = tf.execute(desc) node = tf.TapeNode(batch_matrix_band_part, [input_, num_lower_, num_upper_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function batch_matrix_band_part(input_, num_lower_, num_upper_; name=nothing) if tf.in_eager_mode() @@ -23393,8 +24293,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(concatenate_dataset, [input_dataset_, another_dataset_], name=nothing, output_types=nothing, output_shapes=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function concatenate_dataset(input_dataset_, another_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) if tf.in_eager_mode() @@ -23427,8 +24329,10 @@ begin tf.add_input(desc, contents_) res = tf.execute(desc) node = tf.TapeNode(decode_gif, [contents_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function decode_gif(contents_; name=nothing) if tf.in_eager_mode() @@ -23557,8 +24461,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(tpu_replicate, [inputs_, broadcast_inputs_, variables_, guaranteed_constants_], name=nothing, computation=nothing, num_replicas=nothing, num_cores_per_replica=nothing, topology=nothing, use_tpu=nothing, device_assignment=nothing, host_compute_core=nothing, Tinputs=nothing, Tbroadcast_inputs=nothing, NumVariables=nothing, Tguaranteed_constants=nothing, output_types=nothing, padding_map=nothing, step_marker_location=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function tpu_replicate(inputs_, broadcast_inputs_, variables_, guaranteed_constants_; name=nothing, computation=nothing, num_replicas=nothing, num_cores_per_replica=nothing, topology=nothing, use_tpu=nothing, device_assignment=nothing, host_compute_core=nothing, Tinputs=nothing, Tbroadcast_inputs=nothing, NumVariables=nothing, Tguaranteed_constants=nothing, output_types=nothing, padding_map=nothing, step_marker_location=nothing) if tf.in_eager_mode() @@ -23604,8 +24510,10 @@ begin desc["T"] = tf.data_type(input_) res = tf.execute(desc) node = tf.TapeNode(batch_self_adjoint_eig_v2, [input_], name=nothing, compute_v=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function batch_self_adjoint_eig_v2(input_; name=nothing, compute_v=nothing) if tf.in_eager_mode() @@ -23646,8 +24554,10 @@ begin desc["T"] = tf.data_type(input_) res = tf.execute(desc) node = tf.TapeNode(shape, [input_], name=nothing, out_type=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function shape(input_; name=nothing, out_type=nothing) if tf.in_eager_mode() @@ -23696,8 +24606,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(repeat_dataset, [input_dataset_, count_], name=nothing, output_types=nothing, output_shapes=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function repeat_dataset(input_dataset_, count_; name=nothing, output_types=nothing, output_shapes=nothing) if tf.in_eager_mode() @@ -23750,8 +24662,10 @@ begin desc["T"] = tf.data_type(image_) res = tf.execute(desc) node = tf.TapeNode(crop_and_resize_grad_boxes, [grads_, image_, boxes_, box_ind_], name=nothing, method=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function crop_and_resize_grad_boxes(grads_, image_, boxes_, box_ind_; name=nothing, method=nothing) if tf.in_eager_mode() @@ -23791,8 +24705,10 @@ begin desc["T"] = tf.data_type(dy_) res = tf.execute(desc) node = tf.TapeNode(reciprocal_grad, [y_, dy_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function reciprocal_grad(y_, dy_; name=nothing) if tf.in_eager_mode() @@ -23838,8 +24754,10 @@ begin desc["T"] = tf.data_type(rhs_) res = tf.execute(desc) node = tf.TapeNode(batch_matrix_solve, [matrix_, rhs_], name=nothing, adjoint=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function batch_matrix_solve(matrix_, rhs_; name=nothing, adjoint=nothing) if tf.in_eager_mode() @@ -23898,8 +24816,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(mutable_hash_table_v2, [], name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function mutable_hash_table_v2(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing) if tf.in_eager_mode() @@ -23934,8 +24854,10 @@ begin desc["T"] = tf.data_type(data_) res = tf.execute(desc) node = tf.TapeNode(exit, [data_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function exit(data_; name=nothing) if tf.in_eager_mode() @@ -23994,8 +24916,10 @@ begin desc["T"] = tf.data_type(input_) res = tf.execute(desc) node = tf.TapeNode(lrn, [input_], name=nothing, depth_radius=nothing, bias=nothing, alpha=nothing, beta=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function lrn(input_; name=nothing, depth_radius=nothing, bias=nothing, alpha=nothing, beta=nothing) if tf.in_eager_mode() @@ -24058,8 +24982,10 @@ begin desc["Tcond"] = tf.data_type(cond_) res = tf.execute(desc) node = tf.TapeNode(stateless_if, [cond_, input_], name=nothing, Tin=nothing, Tout=nothing, then_branch=nothing, else_branch=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function stateless_if(cond_, input_; name=nothing, Tin=nothing, Tout=nothing, then_branch=nothing, else_branch=nothing) if tf.in_eager_mode() @@ -24108,8 +25034,10 @@ begin desc["element_dtype"] = tf.data_type(item_) res = tf.execute(desc) node = tf.TapeNode(tensor_list_set_item, [input_handle_, index_, item_], name=nothing, element_dtype=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function tensor_list_set_item(input_handle_, index_, item_; name=nothing, element_dtype=nothing) if tf.in_eager_mode() @@ -24144,8 +25072,10 @@ begin desc["T"] = tf.data_type(x_) res = tf.execute(desc) node = tf.TapeNode(rsqrt, [x_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function rsqrt(x_; name=nothing) if tf.in_eager_mode() @@ -24259,8 +25189,10 @@ begin desc["Tsummand"] = tf.data_type(summand_) res = tf.execute(desc) node = tf.TapeNode(quantized_conv2d_with_bias_sum_and_relu_and_requantize, [input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_, summand_, min_summand_, max_summand_], name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function quantized_conv2d_with_bias_sum_and_relu_and_requantize(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_, summand_, min_summand_, max_summand_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) if tf.in_eager_mode() @@ -24293,8 +25225,10 @@ begin tf.add_input(desc, handle_) res = tf.execute(desc) node = tf.TapeNode(delete_session_tensor, [handle_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function delete_session_tensor(handle_; name=nothing) if tf.in_eager_mode() @@ -24357,8 +25291,10 @@ begin desc["T"] = tf.data_type(off_value_) res = tf.execute(desc) node = tf.TapeNode(one_hot, [indices_, depth_, on_value_, off_value_], name=nothing, axis=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function one_hot(indices_, depth_, on_value_, off_value_; name=nothing, axis=nothing) if tf.in_eager_mode() @@ -24431,8 +25367,10 @@ begin desc["T"] = tf.data_type(lr_power_) res = tf.execute(desc) node = tf.TapeNode(resource_apply_ftrl, [var_, accum_, linear_, grad_, lr_, l1_, l2_, lr_power_], name=nothing, use_locking=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function resource_apply_ftrl(var_, accum_, linear_, grad_, lr_, l1_, l2_, lr_power_; name=nothing, use_locking=nothing) if tf.in_eager_mode() @@ -24560,8 +25498,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(sdca_optimizer_v2, [sparse_example_indices_, sparse_feature_indices_, sparse_feature_values_, dense_features_, example_weights_, example_labels_, sparse_indices_, sparse_weights_, dense_weights_, example_state_data_], name=nothing, loss_type=nothing, adaptive=nothing, num_sparse_features=nothing, num_sparse_features_with_values=nothing, num_dense_features=nothing, l1=nothing, l2=nothing, num_loss_partitions=nothing, num_inner_iterations=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function sdca_optimizer_v2(sparse_example_indices_, sparse_feature_indices_, sparse_feature_values_, dense_features_, example_weights_, example_labels_, sparse_indices_, sparse_weights_, dense_weights_, example_state_data_; name=nothing, loss_type=nothing, adaptive=nothing, num_sparse_features=nothing, num_sparse_features_with_values=nothing, num_dense_features=nothing, l1=nothing, l2=nothing, num_loss_partitions=nothing, num_inner_iterations=nothing) if tf.in_eager_mode() @@ -24610,8 +25550,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(queue_enqueue, [handle_, components_], name=nothing, Tcomponents=nothing, timeout_ms=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function queue_enqueue(handle_, components_; name=nothing, Tcomponents=nothing, timeout_ms=nothing) if tf.in_eager_mode() @@ -24670,8 +25612,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(conditional_accumulator, [], name=nothing, dtype=nothing, shape=nothing, container=nothing, shared_name=nothing, reduction_type=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function conditional_accumulator(; name=nothing, dtype=nothing, shape=nothing, container=nothing, shared_name=nothing, reduction_type=nothing) if tf.in_eager_mode() @@ -24731,8 +25675,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(ctc_beam_search_decoder, [inputs_, sequence_length_], name=nothing, beam_width=nothing, top_paths=nothing, merge_repeated=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function ctc_beam_search_decoder(inputs_, sequence_length_; name=nothing, beam_width=nothing, top_paths=nothing, merge_repeated=nothing) if tf.in_eager_mode() @@ -24773,8 +25719,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(whole_file_reader, [], name=nothing, container=nothing, shared_name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function whole_file_reader(; name=nothing, container=nothing, shared_name=nothing) if tf.in_eager_mode() @@ -24850,8 +25798,10 @@ begin desc["T"] = tf.data_type(grad_) res = tf.execute(desc) node = tf.TapeNode(apply_rms_prop, [var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_], name=nothing, use_locking=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function apply_rms_prop(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=nothing, use_locking=nothing) if tf.in_eager_mode() @@ -24890,8 +25840,10 @@ begin desc["T"] = tf.data_type(images_) res = tf.execute(desc) node = tf.TapeNode(adjust_saturation, [images_, scale_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function adjust_saturation(images_, scale_; name=nothing) if tf.in_eager_mode() @@ -24930,8 +25882,10 @@ begin desc["Tin"] = tf.data_type(keys_) res = tf.execute(desc) node = tf.TapeNode(lookup_table_remove_v2, [table_handle_, keys_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function lookup_table_remove_v2(table_handle_, keys_; name=nothing) if tf.in_eager_mode() @@ -24970,8 +25924,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(queue_close, [handle_], name=nothing, cancel_pending_enqueues=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function queue_close(handle_; name=nothing, cancel_pending_enqueues=nothing) if tf.in_eager_mode() @@ -25020,8 +25976,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(prefetch_dataset, [input_dataset_, buffer_size_], name=nothing, output_types=nothing, output_shapes=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function prefetch_dataset(input_dataset_, buffer_size_; name=nothing, output_types=nothing, output_shapes=nothing) if tf.in_eager_mode() @@ -25094,8 +26052,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(map_dataset, [input_dataset_, other_arguments_], name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing, preserve_cardinality=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function map_dataset(input_dataset_, other_arguments_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing, preserve_cardinality=nothing) if tf.in_eager_mode() @@ -25185,8 +26145,10 @@ begin desc["Tfilter"] = tf.data_type(filter_) res = tf.execute(desc) node = tf.TapeNode(quantized_conv2d_with_bias, [input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_], name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function quantized_conv2d_with_bias(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) if tf.in_eager_mode() @@ -25233,8 +26195,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(tensor_array_read_v3, [handle_, index_, flow_in_], name=nothing, dtype=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function tensor_array_read_v3(handle_, index_, flow_in_; name=nothing, dtype=nothing) if tf.in_eager_mode() @@ -25269,8 +26233,10 @@ begin desc["T"] = tf.data_type(input_) res = tf.execute(desc) node = tf.TapeNode(identity, [input_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function identity(input_; name=nothing) if tf.in_eager_mode() @@ -25333,8 +26299,10 @@ begin desc["T"] = tf.data_type(input_) res = tf.execute(desc) node = tf.TapeNode(print, [input_, data_], name=nothing, U=nothing, message=nothing, first_n=nothing, summarize=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function print(input_, data_; name=nothing, U=nothing, message=nothing, first_n=nothing, summarize=nothing) if tf.in_eager_mode() @@ -25393,8 +26361,10 @@ begin desc["T"] = tf.data_type(input_) res = tf.execute(desc) node = tf.TapeNode(collective_bcast_send, [input_], name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, shape=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function collective_bcast_send(input_; name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, shape=nothing) if tf.in_eager_mode() @@ -25444,8 +26414,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(_list_to_array, [input_], name=nothing, Tin=nothing, N=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function _list_to_array(input_; name=nothing, Tin=nothing, N=nothing) if tf.in_eager_mode() @@ -25506,8 +26478,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(neg_train, [w_in_, w_out_, examples_, labels_, lr_], name=nothing, vocab_count=nothing, num_negative_samples=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function neg_train(w_in_, w_out_, examples_, labels_, lr_; name=nothing, vocab_count=nothing, num_negative_samples=nothing) if tf.in_eager_mode() @@ -25540,8 +26514,10 @@ begin tf.add_input(desc, request_) res = tf.execute(desc) node = tf.TapeNode(worker_heartbeat, [request_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function worker_heartbeat(request_; name=nothing) if tf.in_eager_mode() @@ -25584,8 +26560,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(merge_v2checkpoints, [checkpoint_prefixes_, destination_prefix_], name=nothing, delete_old_dirs=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function merge_v2checkpoints(checkpoint_prefixes_, destination_prefix_; name=nothing, delete_old_dirs=nothing) if tf.in_eager_mode() @@ -25624,8 +26602,10 @@ begin desc["T"] = tf.data_type(input_) res = tf.execute(desc) node = tf.TapeNode(collective_permute, [input_, source_target_pairs_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function collective_permute(input_, source_target_pairs_; name=nothing) if tf.in_eager_mode() @@ -25686,8 +26666,10 @@ begin desc["T"] = tf.data_type(input_max_) res = tf.execute(desc) node = tf.TapeNode(quantize_and_dequantize_v3, [input_, input_min_, input_max_, num_bits_], name=nothing, signed_input=nothing, range_given=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function quantize_and_dequantize_v3(input_, input_min_, input_max_, num_bits_; name=nothing, signed_input=nothing, range_given=nothing) if tf.in_eager_mode() @@ -25746,8 +26728,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(hash_table, [], name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function hash_table(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing) if tf.in_eager_mode() @@ -25787,8 +26771,10 @@ begin desc["T"] = tf.data_type(features_) res = tf.execute(desc) node = tf.TapeNode(softplus_grad, [gradients_, features_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function softplus_grad(gradients_, features_; name=nothing) if tf.in_eager_mode() @@ -25853,8 +26839,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(fixed_length_record_reader, [], name=nothing, header_bytes=nothing, record_bytes=nothing, footer_bytes=nothing, hop_bytes=nothing, container=nothing, shared_name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function fixed_length_record_reader(; name=nothing, header_bytes=nothing, record_bytes=nothing, footer_bytes=nothing, hop_bytes=nothing, container=nothing, shared_name=nothing) if tf.in_eager_mode() @@ -25901,8 +26889,10 @@ begin desc["T"] = tf.data_type(value_) res = tf.execute(desc) node = tf.TapeNode(tensor_array_scatter_v2, [handle_, indices_, value_, flow_in_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function tensor_array_scatter_v2(handle_, indices_, value_, flow_in_; name=nothing) if tf.in_eager_mode() @@ -25935,8 +26925,10 @@ begin tf.add_input(desc, json_examples_) res = tf.execute(desc) node = tf.TapeNode(decode_json_example, [json_examples_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function decode_json_example(json_examples_; name=nothing) if tf.in_eager_mode() @@ -26020,8 +27012,10 @@ begin desc["U"] = tf.data_type(reserve_space_2_) res = tf.execute(desc) node = tf.TapeNode(fused_batch_norm_grad_v2, [y_backprop_, x_, scale_, reserve_space_1_, reserve_space_2_], name=nothing, U=nothing, epsilon=nothing, data_format=nothing, is_training=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function fused_batch_norm_grad_v2(y_backprop_, x_, scale_, reserve_space_1_, reserve_space_2_; name=nothing, U=nothing, epsilon=nothing, data_format=nothing, is_training=nothing) if tf.in_eager_mode() @@ -26074,8 +27068,10 @@ begin desc["SrcT"] = tf.data_type(x_) res = tf.execute(desc) node = tf.TapeNode(_host_cast, [x_], name=nothing, SrcT=nothing, DstT=nothing, Truncate=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function _host_cast(x_; name=nothing, SrcT=nothing, DstT=nothing, Truncate=nothing) if tf.in_eager_mode() @@ -26122,8 +27118,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(tf_record_reader, [], name=nothing, container=nothing, shared_name=nothing, compression_type=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function tf_record_reader(; name=nothing, container=nothing, shared_name=nothing, compression_type=nothing) if tf.in_eager_mode() @@ -26186,8 +27184,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(while_, [input_], name=nothing, T=nothing, cond=nothing, body=nothing, output_shapes=nothing, parallel_iterations=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function while_(input_; name=nothing, T=nothing, cond=nothing, body=nothing, output_shapes=nothing, parallel_iterations=nothing) if tf.in_eager_mode() @@ -26238,8 +27238,10 @@ begin desc["Tseed"] = tf.data_type(seed_) res = tf.execute(desc) node = tf.TapeNode(stateless_multinomial, [logits_, num_samples_, seed_], name=nothing, output_dtype=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function stateless_multinomial(logits_, num_samples_, seed_; name=nothing, output_dtype=nothing) if tf.in_eager_mode() @@ -26292,8 +27294,10 @@ begin desc["T"] = tf.data_type(updates_) res = tf.execute(desc) node = tf.TapeNode(scatter_add, [ref_, indices_, updates_], name=nothing, use_locking=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function scatter_add(ref_, indices_, updates_; name=nothing, use_locking=nothing) if tf.in_eager_mode() @@ -26328,8 +27332,10 @@ begin desc["T"] = tf.data_type(input_) res = tf.execute(desc) node = tf.TapeNode(conj, [input_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function conj(input_; name=nothing) if tf.in_eager_mode() @@ -26374,8 +27380,10 @@ begin desc["T"] = tf.data_type(data_) res = tf.execute(desc) node = tf.TapeNode(parallel_dynamic_stitch, [indices_, data_], name=nothing, N=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function parallel_dynamic_stitch(indices_, data_; name=nothing, N=nothing) if tf.in_eager_mode() @@ -26412,8 +27420,10 @@ begin tf.add_input(desc, iterator_) res = tf.execute(desc) node = tf.TapeNode(make_iterator, [dataset_, iterator_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function make_iterator(dataset_, iterator_; name=nothing) if tf.in_eager_mode() @@ -26450,8 +27460,10 @@ begin tf.add_input(desc, fft_length_) res = tf.execute(desc) node = tf.TapeNode(rfft3d, [input_, fft_length_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function rfft3d(input_, fft_length_; name=nothing) if tf.in_eager_mode() @@ -26509,8 +27521,10 @@ begin desc["T"] = tf.data_type(input_values_) res = tf.execute(desc) node = tf.TapeNode(sparse_reduce_sum_sparse, [input_indices_, input_values_, input_shape_, reduction_axes_], name=nothing, keep_dims=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function sparse_reduce_sum_sparse(input_indices_, input_values_, input_shape_, reduction_axes_; name=nothing, keep_dims=nothing) if tf.in_eager_mode() @@ -26569,8 +27583,10 @@ begin desc["T"] = tf.data_type(input_) res = tf.execute(desc) node = tf.TapeNode(collective_gather, [input_], name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, shape=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function collective_gather(input_; name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, shape=nothing) if tf.in_eager_mode() @@ -26634,8 +27650,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(combined_non_max_suppression, [boxes_, scores_, max_output_size_per_class_, max_total_size_, iou_threshold_, score_threshold_], name=nothing, pad_per_class=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function combined_non_max_suppression(boxes_, scores_, max_output_size_per_class_, max_total_size_, iou_threshold_, score_threshold_; name=nothing, pad_per_class=nothing) if tf.in_eager_mode() @@ -26694,8 +27712,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(_scoped_allocator, [], name=nothing, shapes=nothing, shape=nothing, sa_name=nothing, id=nothing, expected_call_count=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function _scoped_allocator(; name=nothing, shapes=nothing, shape=nothing, sa_name=nothing, id=nothing, expected_call_count=nothing) if tf.in_eager_mode() @@ -26760,8 +27780,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(load_tpu_embedding_adadelta_parameters, [parameters_, accumulators_, updates_], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function load_tpu_embedding_adadelta_parameters(parameters_, accumulators_, updates_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) if tf.in_eager_mode() @@ -26828,8 +27850,10 @@ begin desc["Treal"] = tf.data_type(thresh_) res = tf.execute(desc) node = tf.TapeNode(sparse_add, [a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_, thresh_], name=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function sparse_add(a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_, thresh_; name=nothing) if tf.in_eager_mode() @@ -26877,8 +27901,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(ctc_greedy_decoder, [inputs_, sequence_length_], name=nothing, merge_repeated=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function ctc_greedy_decoder(inputs_, sequence_length_; name=nothing, merge_repeated=nothing) if tf.in_eager_mode() @@ -26925,8 +27951,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(immutable_const, [], name=nothing, dtype=nothing, shape=nothing, memory_region_name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function immutable_const(; name=nothing, dtype=nothing, shape=nothing, memory_region_name=nothing) if tf.in_eager_mode() @@ -26959,8 +27987,10 @@ begin tf.add_input(desc, mutex_lock_) res = tf.execute(desc) node = tf.TapeNode(consume_mutex_lock, [mutex_lock_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function consume_mutex_lock(mutex_lock_; name=nothing) if tf.in_eager_mode() @@ -27000,8 +28030,10 @@ begin desc["T"] = tf.data_type(y_) res = tf.execute(desc) node = tf.TapeNode(greater_equal, [x_, y_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function greater_equal(x_, y_; name=nothing) if tf.in_eager_mode() @@ -27062,8 +28094,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(initialize_table_from_text_file_v2, [table_handle_, filename_], name=nothing, key_index=nothing, value_index=nothing, vocab_size=nothing, delimiter=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function initialize_table_from_text_file_v2(table_handle_, filename_; name=nothing, key_index=nothing, value_index=nothing, vocab_size=nothing, delimiter=nothing) if tf.in_eager_mode() @@ -27108,8 +28142,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(queue_dequeue, [handle_], name=nothing, component_types=nothing, timeout_ms=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function queue_dequeue(handle_; name=nothing, component_types=nothing, timeout_ms=nothing) if tf.in_eager_mode() @@ -27149,8 +28185,10 @@ begin desc["T"] = tf.data_type(y_) res = tf.execute(desc) node = tf.TapeNode(equal, [x_, y_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function equal(x_, y_; name=nothing) if tf.in_eager_mode() @@ -27195,8 +28233,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(iterator_from_string_handle, [string_handle_], name=nothing, output_types=nothing, output_shapes=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function iterator_from_string_handle(string_handle_; name=nothing, output_types=nothing, output_shapes=nothing) if tf.in_eager_mode() @@ -27253,8 +28293,10 @@ begin desc["shape_type"] = tf.data_type(element_shape_) res = tf.execute(desc) node = tf.TapeNode(tensor_list_split, [tensor_, element_shape_, lengths_], name=nothing, element_dtype=nothing, shape_type=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function tensor_list_split(tensor_, element_shape_, lengths_; name=nothing, element_dtype=nothing, shape_type=nothing) if tf.in_eager_mode() @@ -27330,8 +28372,10 @@ begin desc["T"] = tf.data_type(value_) res = tf.execute(desc) node = tf.TapeNode(fractional_max_pool, [value_], name=nothing, pooling_ratio=nothing, pseudo_random=nothing, overlapping=nothing, deterministic=nothing, seed=nothing, seed2=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function fractional_max_pool(value_; name=nothing, pooling_ratio=nothing, pseudo_random=nothing, overlapping=nothing, deterministic=nothing, seed=nothing, seed2=nothing) if tf.in_eager_mode() @@ -27378,8 +28422,10 @@ begin desc["Tindices"] = tf.data_type(shape_) res = tf.execute(desc) node = tf.TapeNode(scatter_nd, [indices_, updates_, shape_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function scatter_nd(indices_, updates_, shape_; name=nothing) if tf.in_eager_mode() @@ -27428,8 +28474,10 @@ begin desc["element_dtype"] = tf.data_type(tensor_) res = tf.execute(desc) node = tf.TapeNode(tensor_list_scatter_into_existing_list, [input_handle_, tensor_, indices_], name=nothing, element_dtype=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function tensor_list_scatter_into_existing_list(input_handle_, tensor_, indices_; name=nothing, element_dtype=nothing) if tf.in_eager_mode() @@ -27473,8 +28521,10 @@ begin desc["T"] = tf.data_type(e_) res = tf.execute(desc) node = tf.TapeNode(select, [condition_, t_, e_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function select(condition_, t_, e_; name=nothing) if tf.in_eager_mode() @@ -27522,8 +28572,10 @@ begin desc["Tidx"] = tf.data_type(reduction_indices_) res = tf.execute(desc) node = tf.TapeNode(min, [input_, reduction_indices_], name=nothing, keep_dims=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function min(input_, reduction_indices_; name=nothing, keep_dims=nothing) if tf.in_eager_mode() @@ -27592,8 +28644,10 @@ begin desc["T"] = tf.data_type(output_image_) res = tf.execute(desc) node = tf.TapeNode(lrn_grad, [input_grads_, input_image_, output_image_], name=nothing, depth_radius=nothing, bias=nothing, alpha=nothing, beta=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function lrn_grad(input_grads_, input_image_, output_image_; name=nothing, depth_radius=nothing, bias=nothing, alpha=nothing, beta=nothing) if tf.in_eager_mode() @@ -27664,8 +28718,10 @@ begin desc["R"] = tf.data_type(rate_) res = tf.execute(desc) node = tf.TapeNode(random_poisson_v2, [shape_, rate_], name=nothing, seed=nothing, seed2=nothing, S=nothing, R=nothing, dtype=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function random_poisson_v2(shape_, rate_; name=nothing, seed=nothing, seed2=nothing, S=nothing, R=nothing, dtype=nothing) if tf.in_eager_mode() @@ -27724,8 +28780,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(fifo_queue, [], name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function fifo_queue(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) if tf.in_eager_mode() @@ -27792,8 +28850,10 @@ begin desc["Tindices"] = tf.data_type(indices_) res = tf.execute(desc) node = tf.TapeNode(resource_sparse_apply_proximal_gradient_descent, [var_, alpha_, l1_, l2_, grad_, indices_], name=nothing, use_locking=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function resource_sparse_apply_proximal_gradient_descent(var_, alpha_, l1_, l2_, grad_, indices_; name=nothing, use_locking=nothing) if tf.in_eager_mode() @@ -27838,8 +28898,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(experimental_non_serializable_dataset, [input_dataset_], name=nothing, output_types=nothing, output_shapes=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function experimental_non_serializable_dataset(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) if tf.in_eager_mode() @@ -27888,8 +28950,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(experimental_bytes_produced_stats_dataset, [input_dataset_, tag_], name=nothing, output_types=nothing, output_shapes=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function experimental_bytes_produced_stats_dataset(input_dataset_, tag_; name=nothing, output_types=nothing, output_shapes=nothing) if tf.in_eager_mode() @@ -27952,8 +29016,10 @@ begin desc["T"] = tf.data_type(out_backprop_) res = tf.execute(desc) node = tf.TapeNode(dilation2d_backprop_filter, [input_, filter_, out_backprop_], name=nothing, strides=nothing, rates=nothing, padding=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function dilation2d_backprop_filter(input_, filter_, out_backprop_; name=nothing, strides=nothing, rates=nothing, padding=nothing) if tf.in_eager_mode() @@ -28016,8 +29082,10 @@ begin desc["Tcond"] = tf.data_type(cond_) res = tf.execute(desc) node = tf.TapeNode(_if, [cond_, input_], name=nothing, Tin=nothing, Tout=nothing, then_branch=nothing, else_branch=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function _if(cond_, input_; name=nothing, Tin=nothing, Tout=nothing, then_branch=nothing, else_branch=nothing) if tf.in_eager_mode() @@ -28058,8 +29126,10 @@ begin desc["T"] = tf.data_type(out_backprop_) res = tf.execute(desc) node = tf.TapeNode(bias_add_grad, [out_backprop_], name=nothing, data_format=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function bias_add_grad(out_backprop_; name=nothing, data_format=nothing) if tf.in_eager_mode() @@ -28092,8 +29162,10 @@ begin tf.add_input(desc, reader_handle_) res = tf.execute(desc) node = tf.TapeNode(reader_serialize_state_v2, [reader_handle_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function reader_serialize_state_v2(reader_handle_; name=nothing) if tf.in_eager_mode() @@ -28126,8 +29198,10 @@ begin tf.add_input(desc, input_handle_) res = tf.execute(desc) node = tf.TapeNode(wrap_dataset_variant, [input_handle_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function wrap_dataset_variant(input_handle_; name=nothing) if tf.in_eager_mode() @@ -28206,8 +29280,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(parallel_interleave_dataset_v2, [input_dataset_, other_arguments_, cycle_length_, block_length_, num_parallel_calls_], name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, sloppy=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function parallel_interleave_dataset_v2(input_dataset_, other_arguments_, cycle_length_, block_length_, num_parallel_calls_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, sloppy=nothing) if tf.in_eager_mode() @@ -28275,8 +29351,10 @@ begin desc["T"] = tf.data_type(out_backprop_) res = tf.execute(desc) node = tf.TapeNode(depthwise_conv2d_native_backprop_input, [input_sizes_, filter_, out_backprop_], name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function depthwise_conv2d_native_backprop_input(input_sizes_, filter_, out_backprop_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) if tf.in_eager_mode() @@ -28349,8 +29427,10 @@ begin desc["T"] = tf.data_type(grad_) res = tf.execute(desc) node = tf.TapeNode(resource_apply_rms_prop, [var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_], name=nothing, use_locking=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function resource_apply_rms_prop(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=nothing, use_locking=nothing) if tf.in_eager_mode() @@ -28398,8 +29478,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(sparse_accumulator_take_gradient, [handle_, num_required_], name=nothing, dtype=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function sparse_accumulator_take_gradient(handle_, num_required_; name=nothing, dtype=nothing) if tf.in_eager_mode() @@ -28444,8 +29526,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(experimental_lmdb_dataset, [filenames_], name=nothing, output_types=nothing, output_shapes=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function experimental_lmdb_dataset(filenames_; name=nothing, output_types=nothing, output_shapes=nothing) if tf.in_eager_mode() @@ -28478,8 +29562,10 @@ begin tf.add_input(desc, handle_) res = tf.execute(desc) node = tf.TapeNode(stack_close_v2, [handle_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function stack_close_v2(handle_; name=nothing) if tf.in_eager_mode() @@ -28538,8 +29624,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(map_size, [], name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function map_size(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) if tf.in_eager_mode() @@ -28611,8 +29699,10 @@ begin desc["T"] = tf.data_type(l2_) res = tf.execute(desc) node = tf.TapeNode(resource_apply_adagrad_da, [var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, lr_, l1_, l2_, global_step_], name=nothing, use_locking=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function resource_apply_adagrad_da(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, lr_, l1_, l2_, global_step_; name=nothing, use_locking=nothing) if tf.in_eager_mode() @@ -28645,8 +29735,10 @@ begin tf.add_input(desc, tree_handle_) res = tf.execute(desc) node = tf.TapeNode(tensor_forest_tree_size, [tree_handle_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function tensor_forest_tree_size(tree_handle_; name=nothing) if tf.in_eager_mode() @@ -28681,8 +29773,10 @@ begin desc["T"] = tf.data_type(input_) res = tf.execute(desc) node = tf.TapeNode(matrix_diag_part, [input_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function matrix_diag_part(input_; name=nothing) if tf.in_eager_mode() @@ -28715,8 +29809,10 @@ begin tf.add_input(desc, reader_handle_) res = tf.execute(desc) node = tf.TapeNode(reader_num_work_units_completed_v2, [reader_handle_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function reader_num_work_units_completed_v2(reader_handle_; name=nothing) if tf.in_eager_mode() @@ -28763,8 +29859,10 @@ begin desc["T"] = tf.data_type(value_) res = tf.execute(desc) node = tf.TapeNode(tensor_array_split_v3, [handle_, value_, lengths_, flow_in_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function tensor_array_split_v3(handle_, value_, lengths_, flow_in_; name=nothing) if tf.in_eager_mode() @@ -28823,8 +29921,10 @@ begin desc["T"] = tf.data_type(default_value_) res = tf.execute(desc) node = tf.TapeNode(sparse_to_dense, [sparse_indices_, output_shape_, sparse_values_, default_value_], name=nothing, validate_indices=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function sparse_to_dense(sparse_indices_, output_shape_, sparse_values_, default_value_; name=nothing, validate_indices=nothing) if tf.in_eager_mode() @@ -28865,8 +29965,10 @@ begin desc["T"] = tf.data_type(inputs_) res = tf.execute(desc) node = tf.TapeNode(tpu_replicated_input, [inputs_], name=nothing, N=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function tpu_replicated_input(inputs_; name=nothing, N=nothing) if tf.in_eager_mode() @@ -28899,8 +30001,10 @@ begin tf.add_input(desc, handle_) res = tf.execute(desc) node = tf.TapeNode(stack_close, [handle_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function stack_close(handle_; name=nothing) if tf.in_eager_mode() @@ -28944,8 +30048,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(deserialize_many_sparse, [serialized_sparse_], name=nothing, dtype=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function deserialize_many_sparse(serialized_sparse_; name=nothing, dtype=nothing) if tf.in_eager_mode() @@ -28998,8 +30104,10 @@ begin desc["T"] = tf.data_type(input_) res = tf.execute(desc) node = tf.TapeNode(_nccl_reduce_recv, [input_], name=nothing, reduction=nothing, num_devices=nothing, shared_name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function _nccl_reduce_recv(input_; name=nothing, reduction=nothing, num_devices=nothing, shared_name=nothing) if tf.in_eager_mode() @@ -29046,8 +30154,10 @@ begin desc["Tpaddings"] = tf.data_type(paddings_) res = tf.execute(desc) node = tf.TapeNode(mirror_pad_grad, [input_, paddings_], name=nothing, mode=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function mirror_pad_grad(input_, paddings_; name=nothing, mode=nothing) if tf.in_eager_mode() @@ -29087,8 +30197,10 @@ begin desc["T"] = tf.data_type(s1_) res = tf.execute(desc) node = tf.TapeNode(broadcast_args, [s0_, s1_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function broadcast_args(s0_, s1_; name=nothing) if tf.in_eager_mode() @@ -29135,8 +30247,10 @@ begin desc["Tseed"] = tf.data_type(seed_) res = tf.execute(desc) node = tf.TapeNode(stateless_truncated_normal, [shape_, seed_], name=nothing, dtype=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function stateless_truncated_normal(shape_, seed_; name=nothing, dtype=nothing) if tf.in_eager_mode() @@ -29173,8 +30287,10 @@ begin tf.add_input(desc, pattern_) res = tf.execute(desc) node = tf.TapeNode(regex_full_match, [input_, pattern_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function regex_full_match(input_, pattern_; name=nothing) if tf.in_eager_mode() @@ -29207,8 +30323,10 @@ begin tf.add_input(desc, input_handle_) res = tf.execute(desc) node = tf.TapeNode(unwrap_dataset_variant, [input_handle_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function unwrap_dataset_variant(input_handle_; name=nothing) if tf.in_eager_mode() @@ -29253,8 +30371,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(empty, [shape_], name=nothing, dtype=nothing, init=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function empty(shape_; name=nothing, dtype=nothing, init=nothing) if tf.in_eager_mode() @@ -29301,8 +30421,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(outfeed_dequeue_tuple, [], name=nothing, dtypes=nothing, shapes=nothing, device_ordinal=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function outfeed_dequeue_tuple(; name=nothing, dtypes=nothing, shapes=nothing, device_ordinal=nothing) if tf.in_eager_mode() @@ -29342,8 +30464,10 @@ begin desc["T"] = tf.data_type(y_) res = tf.execute(desc) node = tf.TapeNode(div, [x_, y_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function div(x_, y_; name=nothing) if tf.in_eager_mode() @@ -29402,8 +30526,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(barrier, [], name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function barrier(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) if tf.in_eager_mode() @@ -29443,8 +30569,10 @@ begin desc["T"] = tf.data_type(y_) res = tf.execute(desc) node = tf.TapeNode(truncate_div, [x_, y_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function truncate_div(x_, y_; name=nothing) if tf.in_eager_mode() @@ -29499,8 +30627,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(unicode_encode, [input_values_, input_splits_], name=nothing, errors=nothing, output_encoding=nothing, replacement_char=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function unicode_encode(input_values_, input_splits_; name=nothing, errors=nothing, output_encoding=nothing, replacement_char=nothing) if tf.in_eager_mode() @@ -29539,8 +30669,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(merge_summary, [inputs_], name=nothing, N=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function merge_summary(inputs_; name=nothing, N=nothing) if tf.in_eager_mode() @@ -29573,8 +30705,10 @@ begin tf.add_input(desc, resource_) res = tf.execute(desc) node = tf.TapeNode(fake_queue, [resource_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function fake_queue(resource_; name=nothing) if tf.in_eager_mode() @@ -29609,8 +30743,10 @@ begin desc["T"] = tf.data_type(input_) res = tf.execute(desc) node = tf.TapeNode(batch_cholesky, [input_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function batch_cholesky(input_; name=nothing) if tf.in_eager_mode() @@ -29663,8 +30799,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(iterator, [], name=nothing, shared_name=nothing, container=nothing, output_types=nothing, output_shapes=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function iterator(; name=nothing, shared_name=nothing, container=nothing, output_types=nothing, output_shapes=nothing) if tf.in_eager_mode() @@ -29699,8 +30837,10 @@ begin desc["T"] = tf.data_type(x_) res = tf.execute(desc) node = tf.TapeNode(bessel_i1e, [x_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function bessel_i1e(x_; name=nothing) if tf.in_eager_mode() @@ -29737,8 +30877,10 @@ begin tf.add_input(desc, event_) res = tf.execute(desc) node = tf.TapeNode(import_event, [writer_, event_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function import_event(writer_, event_; name=nothing) if tf.in_eager_mode() @@ -29816,8 +30958,10 @@ begin desc["T"] = tf.data_type(x_) res = tf.execute(desc) node = tf.TapeNode(quantized_instance_norm, [x_, x_min_, x_max_], name=nothing, output_range_given=nothing, given_y_min=nothing, given_y_max=nothing, variance_epsilon=nothing, min_separation=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function quantized_instance_norm(x_, x_min_, x_max_; name=nothing, output_range_given=nothing, given_y_min=nothing, given_y_max=nothing, variance_epsilon=nothing, min_separation=nothing) if tf.in_eager_mode() @@ -29878,8 +31022,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(load_tpu_embedding_adagrad_parameters, [parameters_, accumulators_], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function load_tpu_embedding_adagrad_parameters(parameters_, accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) if tf.in_eager_mode() @@ -29926,8 +31072,10 @@ begin desc["T"] = tf.data_type(value_) res = tf.execute(desc) node = tf.TapeNode(tensor_array_write_v3, [handle_, index_, value_, flow_in_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function tensor_array_write_v3(handle_, index_, value_, flow_in_; name=nothing) if tf.in_eager_mode() @@ -29984,8 +31132,10 @@ begin desc["T"] = tf.data_type(set2_) res = tf.execute(desc) node = tf.TapeNode(dense_to_dense_set_operation, [set1_, set2_], name=nothing, set_operation=nothing, validate_indices=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function dense_to_dense_set_operation(set1_, set2_; name=nothing, set_operation=nothing, validate_indices=nothing) if tf.in_eager_mode() @@ -30072,8 +31222,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(encode_jpeg, [image_], name=nothing, format=nothing, quality=nothing, progressive=nothing, optimize_size=nothing, chroma_downsampling=nothing, density_unit=nothing, x_density=nothing, y_density=nothing, xmp_metadata=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function encode_jpeg(image_; name=nothing, format=nothing, quality=nothing, progressive=nothing, optimize_size=nothing, chroma_downsampling=nothing, density_unit=nothing, x_density=nothing, y_density=nothing, xmp_metadata=nothing) if tf.in_eager_mode() @@ -30117,8 +31269,10 @@ begin desc["T"] = tf.data_type(v_) res = tf.execute(desc) node = tf.TapeNode(inplace_update, [x_, i_, v_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function inplace_update(x_, i_, v_; name=nothing) if tf.in_eager_mode() @@ -30180,8 +31334,10 @@ begin desc["T"] = tf.data_type(filter_) res = tf.execute(desc) node = tf.TapeNode(fused_pad_conv2d, [input_, paddings_, filter_], name=nothing, mode=nothing, strides=nothing, padding=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function fused_pad_conv2d(input_, paddings_, filter_; name=nothing, mode=nothing, strides=nothing, padding=nothing) if tf.in_eager_mode() @@ -30235,8 +31391,10 @@ begin desc["Tinput"] = tf.data_type(features_) res = tf.execute(desc) node = tf.TapeNode(quantized_relu, [features_, min_features_, max_features_], name=nothing, out_type=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function quantized_relu(features_, min_features_, max_features_; name=nothing, out_type=nothing) if tf.in_eager_mode() @@ -30278,8 +31436,10 @@ begin desc["Tindices"] = tf.data_type(indices_) res = tf.execute(desc) node = tf.TapeNode(gather_nd, [params_, indices_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function gather_nd(params_, indices_; name=nothing) if tf.in_eager_mode() @@ -30320,8 +31480,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(placeholder, [], name=nothing, dtype=nothing, shape=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function placeholder(; name=nothing, dtype=nothing, shape=nothing) if tf.in_eager_mode() @@ -30366,8 +31528,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(filter_by_last_component_dataset, [input_dataset_], name=nothing, output_types=nothing, output_shapes=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function filter_by_last_component_dataset(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) if tf.in_eager_mode() @@ -30412,8 +31576,10 @@ begin desc["T"] = tf.data_type(clip_value_max_) res = tf.execute(desc) node = tf.TapeNode(clip_by_value, [t_, clip_value_min_, clip_value_max_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function clip_by_value(t_, clip_value_min_, clip_value_max_; name=nothing) if tf.in_eager_mode() @@ -30464,8 +31630,10 @@ begin desc["T"] = tf.data_type(tensor_) res = tf.execute(desc) node = tf.TapeNode(image_summary, [tag_, tensor_], name=nothing, max_images=nothing, bad_color=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function image_summary(tag_, tensor_; name=nothing, max_images=nothing, bad_color=nothing) if tf.in_eager_mode() @@ -30523,8 +31691,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(retrieve_tpu_embedding_adadelta_parameters, [], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function retrieve_tpu_embedding_adadelta_parameters(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) if tf.in_eager_mode() @@ -30569,8 +31739,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(string_join, [inputs_], name=nothing, N=nothing, separator=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function string_join(inputs_; name=nothing, N=nothing, separator=nothing) if tf.in_eager_mode() @@ -30622,8 +31794,10 @@ begin desc["T"] = tf.data_type(updates_) res = tf.execute(desc) node = tf.TapeNode(resource_scatter_nd_add, [ref_, indices_, updates_], name=nothing, use_locking=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function resource_scatter_nd_add(ref_, indices_, updates_; name=nothing, use_locking=nothing) if tf.in_eager_mode() @@ -30666,8 +31840,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(boosted_trees_quantile_stream_resource_deserialize, [quantile_stream_resource_handle_, bucket_boundaries_], name=nothing, num_streams=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function boosted_trees_quantile_stream_resource_deserialize(quantile_stream_resource_handle_, bucket_boundaries_; name=nothing, num_streams=nothing) if tf.in_eager_mode() @@ -30707,8 +31883,10 @@ begin desc["T"] = tf.data_type(y_) res = tf.execute(desc) node = tf.TapeNode(left_shift, [x_, y_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function left_shift(x_, y_; name=nothing) if tf.in_eager_mode() @@ -30770,8 +31948,10 @@ begin desc["T"] = tf.data_type(input_) res = tf.execute(desc) node = tf.TapeNode(requantize_per_channel, [input_, input_min_, input_max_, requested_output_min_, requested_output_max_], name=nothing, out_type=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function requantize_per_channel(input_, input_min_, input_max_, requested_output_min_, requested_output_max_; name=nothing, out_type=nothing) if tf.in_eager_mode() @@ -30818,8 +31998,10 @@ begin desc["T"] = tf.data_type(updates_) res = tf.execute(desc) node = tf.TapeNode(tensor_scatter_add, [tensor_, indices_, updates_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function tensor_scatter_add(tensor_, indices_, updates_; name=nothing) if tf.in_eager_mode() @@ -30883,8 +32065,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(_var_handles_op, [], name=nothing, containers=nothing, shared_names=nothing, N=nothing, dtypes=nothing, shapes=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function _var_handles_op(; name=nothing, containers=nothing, shared_names=nothing, N=nothing, dtypes=nothing, shapes=nothing) if tf.in_eager_mode() @@ -30919,8 +32103,10 @@ begin desc["Tcomplex"] = tf.data_type(input_) res = tf.execute(desc) node = tf.TapeNode(ifft3d, [input_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function ifft3d(input_; name=nothing) if tf.in_eager_mode() @@ -30968,8 +32154,10 @@ begin desc["Tidx"] = tf.data_type(reduction_indices_) res = tf.execute(desc) node = tf.TapeNode(euclidean_norm, [input_, reduction_indices_], name=nothing, keep_dims=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function euclidean_norm(input_, reduction_indices_; name=nothing, keep_dims=nothing) if tf.in_eager_mode() @@ -31014,8 +32202,10 @@ begin desc["T"] = tf.data_type(inputs_) res = tf.execute(desc) node = tf.TapeNode(ref_select, [index_, inputs_], name=nothing, N=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function ref_select(index_, inputs_; name=nothing, N=nothing) if tf.in_eager_mode() @@ -31058,8 +32248,10 @@ begin desc["Tvalues"] = tf.data_type(values_) res = tf.execute(desc) node = tf.TapeNode(sparse_tensor_slice_dataset, [indices_, values_, dense_shape_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function sparse_tensor_slice_dataset(indices_, values_, dense_shape_; name=nothing) if tf.in_eager_mode() @@ -31117,8 +32309,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(retrieve_tpu_embedding_ftrl_parameters_grad_accum_debug, [], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function retrieve_tpu_embedding_ftrl_parameters_grad_accum_debug(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) if tf.in_eager_mode() @@ -31151,8 +32345,10 @@ begin tf.add_input(desc, input_) res = tf.execute(desc) node = tf.TapeNode(batch_ifft2d, [input_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function batch_ifft2d(input_; name=nothing) if tf.in_eager_mode() @@ -31205,8 +32401,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(tensor_array_gather, [handle_, indices_, flow_in_], name=nothing, dtype=nothing, element_shape=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function tensor_array_gather(handle_, indices_, flow_in_; name=nothing, dtype=nothing, element_shape=nothing) if tf.in_eager_mode() @@ -31258,8 +32456,10 @@ begin desc["Tnumsegments"] = tf.data_type(num_segments_) res = tf.execute(desc) node = tf.TapeNode(sparse_segment_mean_with_num_segments, [data_, indices_, segment_ids_, num_segments_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function sparse_segment_mean_with_num_segments(data_, indices_, segment_ids_, num_segments_; name=nothing) if tf.in_eager_mode() @@ -31300,8 +32500,10 @@ begin desc["T"] = tf.data_type(input_) res = tf.execute(desc) node = tf.TapeNode(ensure_shape, [input_], name=nothing, shape=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function ensure_shape(input_; name=nothing, shape=nothing) if tf.in_eager_mode() @@ -31362,8 +32564,10 @@ begin desc["T"] = tf.data_type(delta_) res = tf.execute(desc) node = tf.TapeNode(apply_proximal_gradient_descent, [var_, alpha_, l1_, l2_, delta_], name=nothing, use_locking=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function apply_proximal_gradient_descent(var_, alpha_, l1_, l2_, delta_; name=nothing, use_locking=nothing) if tf.in_eager_mode() @@ -31440,8 +32644,10 @@ begin desc["T"] = tf.data_type(input_) res = tf.execute(desc) node = tf.TapeNode(collective_reduce, [input_], name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, merge_op=nothing, final_op=nothing, subdiv_offsets=nothing, wait_for=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function collective_reduce(input_; name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, merge_op=nothing, final_op=nothing, subdiv_offsets=nothing, wait_for=nothing) if tf.in_eager_mode() @@ -31476,8 +32682,10 @@ begin desc["T"] = tf.data_type(x_) res = tf.execute(desc) node = tf.TapeNode(is_nan, [x_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function is_nan(x_; name=nothing) if tf.in_eager_mode() @@ -31558,8 +32766,10 @@ begin desc["T"] = tf.data_type(grad_) res = tf.execute(desc) node = tf.TapeNode(apply_ada_max, [var_, m_, v_, beta1_power_, lr_, beta1_, beta2_, epsilon_, grad_], name=nothing, use_locking=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function apply_ada_max(var_, m_, v_, beta1_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing) if tf.in_eager_mode() @@ -31632,8 +32842,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(decode_and_crop_jpeg, [contents_, crop_window_], name=nothing, channels=nothing, ratio=nothing, fancy_upscaling=nothing, try_recover_truncated=nothing, acceptable_fraction=nothing, dct_method=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function decode_and_crop_jpeg(contents_, crop_window_; name=nothing, channels=nothing, ratio=nothing, fancy_upscaling=nothing, try_recover_truncated=nothing, acceptable_fraction=nothing, dct_method=nothing) if tf.in_eager_mode() @@ -31714,8 +32926,10 @@ begin desc["T"] = tf.data_type(grad_) res = tf.execute(desc) node = tf.TapeNode(apply_centered_rms_prop, [var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_], name=nothing, use_locking=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function apply_centered_rms_prop(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=nothing, use_locking=nothing) if tf.in_eager_mode() @@ -31783,8 +32997,10 @@ begin desc["T"] = tf.data_type(out_backprop_) res = tf.execute(desc) node = tf.TapeNode(conv3d_backprop_filter_v2, [input_, filter_sizes_, out_backprop_], name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function conv3d_backprop_filter_v2(input_, filter_sizes_, out_backprop_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) if tf.in_eager_mode() @@ -31836,8 +33052,10 @@ begin desc["T"] = tf.data_type(rhs_) res = tf.execute(desc) node = tf.TapeNode(matrix_triangular_solve, [matrix_, rhs_], name=nothing, lower=nothing, adjoint=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function matrix_triangular_solve(matrix_, rhs_; name=nothing, lower=nothing, adjoint=nothing) if tf.in_eager_mode() @@ -31870,8 +33088,10 @@ begin tf.add_input(desc, reader_handle_) res = tf.execute(desc) node = tf.TapeNode(reader_num_work_units_completed, [reader_handle_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function reader_num_work_units_completed(reader_handle_; name=nothing) if tf.in_eager_mode() @@ -31926,8 +33146,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(write_audio_summary, [writer_, step_, tag_, tensor_, sample_rate_], name=nothing, max_outputs=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function write_audio_summary(writer_, step_, tag_, tensor_, sample_rate_; name=nothing, max_outputs=nothing) if tf.in_eager_mode() @@ -31964,8 +33186,10 @@ begin tf.add_input(desc, num_shards_) res = tf.execute(desc) node = tf.TapeNode(sharded_filespec, [basename_, num_shards_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function sharded_filespec(basename_, num_shards_; name=nothing) if tf.in_eager_mode() @@ -32005,8 +33229,10 @@ begin desc["T"] = tf.data_type(y_) res = tf.execute(desc) node = tf.TapeNode(div_no_nan, [x_, y_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function div_no_nan(x_, y_; name=nothing) if tf.in_eager_mode() @@ -32069,8 +33295,10 @@ begin desc["dtype"] = tf.data_type(gradient_values_) res = tf.execute(desc) node = tf.TapeNode(sparse_accumulator_apply_gradient, [handle_, local_step_, gradient_indices_, gradient_values_, gradient_shape_], name=nothing, dtype=nothing, has_known_shape=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function sparse_accumulator_apply_gradient(handle_, local_step_, gradient_indices_, gradient_values_, gradient_shape_; name=nothing, dtype=nothing, has_known_shape=nothing) if tf.in_eager_mode() @@ -32120,8 +33348,10 @@ begin desc["T"] = tf.data_type(rt_dense_values_) res = tf.execute(desc) node = tf.TapeNode(ragged_tensor_to_sparse, [rt_nested_splits_, rt_dense_values_], name=nothing, RAGGED_RANK=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function ragged_tensor_to_sparse(rt_nested_splits_, rt_dense_values_; name=nothing, RAGGED_RANK=nothing) if tf.in_eager_mode() @@ -32174,8 +33404,10 @@ begin desc["T"] = tf.data_type(input_) res = tf.execute(desc) node = tf.TapeNode(extract_volume_patches, [input_], name=nothing, ksizes=nothing, strides=nothing, padding=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function extract_volume_patches(input_; name=nothing, ksizes=nothing, strides=nothing, padding=nothing) if tf.in_eager_mode() @@ -32230,8 +33462,10 @@ begin desc["T"] = tf.data_type(values_) res = tf.execute(desc) node = tf.TapeNode(barrier_insert_many, [handle_, keys_, values_], name=nothing, component_index=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function barrier_insert_many(handle_, keys_, values_; name=nothing, component_index=nothing) if tf.in_eager_mode() @@ -32272,8 +33506,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(const_, [], name=nothing, value=nothing, dtype=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function const_(; name=nothing, value=nothing, dtype=nothing) if tf.in_eager_mode() @@ -32320,8 +33556,10 @@ begin desc["Tpaddings"] = tf.data_type(paddings_) res = tf.execute(desc) node = tf.TapeNode(space_to_batch, [input_, paddings_], name=nothing, block_size=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function space_to_batch(input_, paddings_; name=nothing, block_size=nothing) if tf.in_eager_mode() @@ -32380,8 +33618,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(stage_size, [], name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function stage_size(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) if tf.in_eager_mode() @@ -32432,8 +33672,10 @@ begin desc["shape_type"] = tf.data_type(element_shape_) res = tf.execute(desc) node = tf.TapeNode(empty_tensor_list, [element_shape_, max_num_elements_], name=nothing, element_dtype=nothing, shape_type=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function empty_tensor_list(element_shape_, max_num_elements_; name=nothing, element_dtype=nothing, shape_type=nothing) if tf.in_eager_mode() @@ -32527,8 +33769,10 @@ begin desc["Tfilter"] = tf.data_type(filter_) res = tf.execute(desc) node = tf.TapeNode(quantized_conv2d_and_requantize, [input_, filter_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_], name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function quantized_conv2d_and_requantize(input_, filter_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) if tf.in_eager_mode() @@ -32574,8 +33818,10 @@ begin desc["T"] = tf.data_type(input_) res = tf.execute(desc) node = tf.TapeNode(lu, [input_], name=nothing, output_idx_type=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function lu(input_; name=nothing, output_idx_type=nothing) if tf.in_eager_mode() @@ -32614,8 +33860,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(decode_compressed, [bytes_], name=nothing, compression_type=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function decode_compressed(bytes_; name=nothing, compression_type=nothing) if tf.in_eager_mode() @@ -32654,8 +33902,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(get_session_tensor, [handle_], name=nothing, dtype=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function get_session_tensor(handle_; name=nothing, dtype=nothing) if tf.in_eager_mode() @@ -32708,8 +33958,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(tensor_array_gather_v3, [handle_, indices_, flow_in_], name=nothing, dtype=nothing, element_shape=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function tensor_array_gather_v3(handle_, indices_, flow_in_; name=nothing, dtype=nothing, element_shape=nothing) if tf.in_eager_mode() @@ -32778,8 +34030,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(load_tpu_embedding_ftrl_parameters_grad_accum_debug, [parameters_, accumulators_, linears_, gradient_accumulators_], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function load_tpu_embedding_ftrl_parameters_grad_accum_debug(parameters_, accumulators_, linears_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) if tf.in_eager_mode() @@ -32818,8 +34072,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(destroy_resource_op, [resource_], name=nothing, ignore_lookup_error=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function destroy_resource_op(resource_; name=nothing, ignore_lookup_error=nothing) if tf.in_eager_mode() @@ -32866,8 +34122,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(text_line_reader, [], name=nothing, skip_header_lines=nothing, container=nothing, shared_name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function text_line_reader(; name=nothing, skip_header_lines=nothing, container=nothing, shared_name=nothing) if tf.in_eager_mode() @@ -32916,8 +34174,10 @@ begin tf.add_input(desc, user_name_) res = tf.execute(desc) node = tf.TapeNode(create_summary_db_writer, [writer_, db_uri_, experiment_name_, run_name_, user_name_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function create_summary_db_writer(writer_, db_uri_, experiment_name_, run_name_, user_name_; name=nothing) if tf.in_eager_mode() @@ -32957,8 +34217,10 @@ begin desc["T"] = tf.data_type(dy_) res = tf.execute(desc) node = tf.TapeNode(tanh_grad, [y_, dy_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function tanh_grad(y_, dy_; name=nothing) if tf.in_eager_mode() @@ -32991,8 +34253,10 @@ begin tf.add_input(desc, input_) res = tf.execute(desc) node = tf.TapeNode(decode_base64, [input_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function decode_base64(input_; name=nothing) if tf.in_eager_mode() @@ -33057,8 +34321,10 @@ begin desc["T"] = tf.data_type(grad_) res = tf.execute(desc) node = tf.TapeNode(max_pool_grad_grad_v2, [orig_input_, orig_output_, grad_, ksize_, strides_], name=nothing, padding=nothing, data_format=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function max_pool_grad_grad_v2(orig_input_, orig_output_, grad_, ksize_, strides_; name=nothing, padding=nothing, data_format=nothing) if tf.in_eager_mode() @@ -33105,8 +34371,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(audio_summary_v2, [tag_, tensor_, sample_rate_], name=nothing, max_outputs=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function audio_summary_v2(tag_, tensor_, sample_rate_; name=nothing, max_outputs=nothing) if tf.in_eager_mode() @@ -33175,8 +34443,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(stateful_partitioned_call, [args_], name=nothing, Tin=nothing, Tout=nothing, f=nothing, config=nothing, config_proto=nothing, executor_type=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function stateful_partitioned_call(args_; name=nothing, Tin=nothing, Tout=nothing, f=nothing, config=nothing, config_proto=nothing, executor_type=nothing) if tf.in_eager_mode() @@ -33246,8 +34516,10 @@ begin desc["T"] = tf.data_type(inputs_) res = tf.execute(desc) node = tf.TapeNode(_scoped_allocator_concat, [backing_, inputs_], name=nothing, shape=nothing, reshape=nothing, sa_name=nothing, id=nothing, N=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function _scoped_allocator_concat(backing_, inputs_; name=nothing, shape=nothing, reshape=nothing, sa_name=nothing, id=nothing, N=nothing) if tf.in_eager_mode() @@ -33308,8 +34580,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(fake_quant_with_min_max_args_gradient, [gradients_, inputs_], name=nothing, min=nothing, max=nothing, num_bits=nothing, narrow_range=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function fake_quant_with_min_max_args_gradient(gradients_, inputs_; name=nothing, min=nothing, max=nothing, num_bits=nothing, narrow_range=nothing) if tf.in_eager_mode() @@ -33361,8 +34635,10 @@ begin desc["T"] = tf.data_type(input_) res = tf.execute(desc) node = tf.TapeNode(batch_svd, [input_], name=nothing, compute_uv=nothing, full_matrices=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function batch_svd(input_; name=nothing, compute_uv=nothing, full_matrices=nothing) if tf.in_eager_mode() @@ -33439,8 +34715,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(map_stage, [key_, indices_, values_], name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, fake_dtypes=nothing, container=nothing, shared_name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function map_stage(key_, indices_, values_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, fake_dtypes=nothing, container=nothing, shared_name=nothing) if tf.in_eager_mode() @@ -33520,8 +34798,10 @@ begin desc["T"] = tf.data_type(lr_power_) res = tf.execute(desc) node = tf.TapeNode(resource_sparse_apply_ftrl, [var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, lr_power_], name=nothing, use_locking=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function resource_sparse_apply_ftrl(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, lr_power_; name=nothing, use_locking=nothing) if tf.in_eager_mode() @@ -33566,8 +34846,10 @@ begin desc["T"] = tf.data_type(images_) res = tf.execute(desc) node = tf.TapeNode(resize_nearest_neighbor, [images_, size_], name=nothing, align_corners=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function resize_nearest_neighbor(images_, size_; name=nothing, align_corners=nothing) if tf.in_eager_mode() @@ -33644,8 +34926,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(experimental_csv_dataset, [filenames_, compression_type_, buffer_size_, header_, field_delim_, use_quote_delim_, na_value_, select_cols_, record_defaults_], name=nothing, output_types=nothing, output_shapes=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function experimental_csv_dataset(filenames_, compression_type_, buffer_size_, header_, field_delim_, use_quote_delim_, na_value_, select_cols_, record_defaults_; name=nothing, output_types=nothing, output_shapes=nothing) if tf.in_eager_mode() @@ -33698,8 +34982,10 @@ begin desc["T"] = tf.data_type(y_) res = tf.execute(desc) node = tf.TapeNode(_mkl_mul, [x_, y_, mkl_x_, mkl_y_], name=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function _mkl_mul(x_, y_, mkl_x_, mkl_y_; name=nothing) if tf.in_eager_mode() @@ -33734,8 +35020,10 @@ begin desc["T"] = tf.data_type(diagonal_) res = tf.execute(desc) node = tf.TapeNode(batch_matrix_diag, [diagonal_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function batch_matrix_diag(diagonal_; name=nothing) if tf.in_eager_mode() @@ -33770,8 +35058,10 @@ begin desc["T"] = tf.data_type(x_) res = tf.execute(desc) node = tf.TapeNode(is_inf, [x_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function is_inf(x_; name=nothing) if tf.in_eager_mode() @@ -33881,8 +35171,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(fixed_unigram_candidate_sampler, [true_classes_], name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, vocab_file=nothing, distortion=nothing, num_reserved_ids=nothing, num_shards=nothing, shard=nothing, unigrams=nothing, seed=nothing, seed2=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function fixed_unigram_candidate_sampler(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, vocab_file=nothing, distortion=nothing, num_reserved_ids=nothing, num_shards=nothing, shard=nothing, unigrams=nothing, seed=nothing, seed2=nothing) if tf.in_eager_mode() @@ -33970,8 +35262,10 @@ begin desc["T"] = tf.data_type(lr_power_) res = tf.execute(desc) node = tf.TapeNode(sparse_apply_ftrl_v2, [var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, l2_shrinkage_, lr_power_], name=nothing, use_locking=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function sparse_apply_ftrl_v2(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=nothing, use_locking=nothing) if tf.in_eager_mode() @@ -34013,8 +35307,10 @@ begin desc["Tidx"] = tf.data_type(dims_) res = tf.execute(desc) node = tf.TapeNode(unravel_index, [indices_, dims_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function unravel_index(indices_, dims_; name=nothing) if tf.in_eager_mode() @@ -34062,8 +35358,10 @@ begin desc["Tidx"] = tf.data_type(reduction_indices_) res = tf.execute(desc) node = tf.TapeNode(max, [input_, reduction_indices_], name=nothing, keep_dims=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function max(input_, reduction_indices_; name=nothing, keep_dims=nothing) if tf.in_eager_mode() @@ -34098,8 +35396,10 @@ begin desc["Tcomplex"] = tf.data_type(input_) res = tf.execute(desc) node = tf.TapeNode(ifft2d, [input_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function ifft2d(input_; name=nothing) if tf.in_eager_mode() @@ -34165,8 +35465,10 @@ begin desc["T"] = tf.data_type(values_) res = tf.execute(desc) node = tf.TapeNode(sparse_concat, [indices_, values_, shapes_], name=nothing, concat_dim=nothing, N=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function sparse_concat(indices_, values_, shapes_; name=nothing, concat_dim=nothing, N=nothing) if tf.in_eager_mode() @@ -34205,8 +35507,10 @@ begin desc["T"] = tf.data_type(values_) res = tf.execute(desc) node = tf.TapeNode(histogram_summary, [tag_, values_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function histogram_summary(tag_, values_; name=nothing) if tf.in_eager_mode() @@ -34248,8 +35552,10 @@ begin desc["Tindices"] = tf.data_type(segment_ids_) res = tf.execute(desc) node = tf.TapeNode(segment_sum, [data_, segment_ids_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function segment_sum(data_, segment_ids_; name=nothing) if tf.in_eager_mode() @@ -34284,8 +35590,10 @@ begin desc["T"] = tf.data_type(x_) res = tf.execute(desc) node = tf.TapeNode(exp, [x_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function exp(x_; name=nothing) if tf.in_eager_mode() @@ -34332,8 +35640,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(configure_distributed_tpu, [], name=nothing, embedding_config=nothing, tpu_embedding_config=nothing, is_global_init=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function configure_distributed_tpu(; name=nothing, embedding_config=nothing, tpu_embedding_config=nothing, is_global_init=nothing) if tf.in_eager_mode() @@ -34385,8 +35695,10 @@ begin desc["T"] = tf.data_type(updates_) res = tf.execute(desc) node = tf.TapeNode(resource_scatter_nd_sub, [ref_, indices_, updates_], name=nothing, use_locking=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function resource_scatter_nd_sub(ref_, indices_, updates_; name=nothing, use_locking=nothing) if tf.in_eager_mode() @@ -34441,8 +35753,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(_xla_send_from_host, [inputs_, dynamic_key_], name=nothing, Tinputs=nothing, key=nothing, device_ordinal=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function _xla_send_from_host(inputs_, dynamic_key_; name=nothing, Tinputs=nothing, key=nothing, device_ordinal=nothing) if tf.in_eager_mode() @@ -34477,8 +35791,10 @@ begin desc["T"] = tf.data_type(value_) res = tf.execute(desc) node = tf.TapeNode(get_session_handle_v2, [value_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function get_session_handle_v2(value_; name=nothing) if tf.in_eager_mode() @@ -34518,8 +35834,10 @@ begin desc["T"] = tf.data_type(features_) res = tf.execute(desc) node = tf.TapeNode(relu_grad, [gradients_, features_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function relu_grad(gradients_, features_; name=nothing) if tf.in_eager_mode() @@ -34567,8 +35885,10 @@ begin desc["Tnumsegments"] = tf.data_type(num_segments_) res = tf.execute(desc) node = tf.TapeNode(unsorted_segment_min, [data_, segment_ids_, num_segments_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function unsorted_segment_min(data_, segment_ids_, num_segments_; name=nothing) if tf.in_eager_mode() @@ -34652,8 +35972,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(parse_example, [serialized_, names_, sparse_keys_, dense_keys_, dense_defaults_], name=nothing, Nsparse=nothing, Ndense=nothing, sparse_types=nothing, Tdense=nothing, dense_shapes=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function parse_example(serialized_, names_, sparse_keys_, dense_keys_, dense_defaults_; name=nothing, Nsparse=nothing, Ndense=nothing, sparse_types=nothing, Tdense=nothing, dense_shapes=nothing) if tf.in_eager_mode() @@ -34702,8 +36024,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(queue_enqueue_v2, [handle_, components_], name=nothing, Tcomponents=nothing, timeout_ms=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function queue_enqueue_v2(handle_, components_; name=nothing, Tcomponents=nothing, timeout_ms=nothing) if tf.in_eager_mode() @@ -34756,8 +36080,10 @@ begin desc["T"] = tf.data_type(updates_) res = tf.execute(desc) node = tf.TapeNode(scatter_nd_add, [ref_, indices_, updates_], name=nothing, use_locking=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function scatter_nd_add(ref_, indices_, updates_; name=nothing, use_locking=nothing) if tf.in_eager_mode() @@ -34790,8 +36116,10 @@ begin tf.add_input(desc, reader_handle_) res = tf.execute(desc) node = tf.TapeNode(reader_num_records_produced_v2, [reader_handle_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function reader_num_records_produced_v2(reader_handle_; name=nothing) if tf.in_eager_mode() @@ -34860,8 +36188,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(load_tpu_embedding_centered_rms_prop_parameters, [parameters_, ms_, mom_, mg_], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function load_tpu_embedding_centered_rms_prop_parameters(parameters_, ms_, mom_, mg_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) if tf.in_eager_mode() @@ -34907,8 +36237,10 @@ begin desc["T"] = tf.data_type(value_) res = tf.execute(desc) node = tf.TapeNode(assign_sub, [ref_, value_], name=nothing, use_locking=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function assign_sub(ref_, value_; name=nothing, use_locking=nothing) if tf.in_eager_mode() @@ -34956,8 +36288,10 @@ begin desc["Tnumsegments"] = tf.data_type(num_segments_) res = tf.execute(desc) node = tf.TapeNode(unsorted_segment_sum, [data_, segment_ids_, num_segments_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function unsorted_segment_sum(data_, segment_ids_, num_segments_; name=nothing) if tf.in_eager_mode() @@ -35035,8 +36369,10 @@ begin desc["T"] = tf.data_type(reserve_space_2_) res = tf.execute(desc) node = tf.TapeNode(fused_batch_norm_grad, [y_backprop_, x_, scale_, reserve_space_1_, reserve_space_2_], name=nothing, epsilon=nothing, data_format=nothing, is_training=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function fused_batch_norm_grad(y_backprop_, x_, scale_, reserve_space_1_, reserve_space_2_; name=nothing, epsilon=nothing, data_format=nothing, is_training=nothing) if tf.in_eager_mode() @@ -35101,8 +36437,10 @@ begin desc["T"] = tf.data_type(grad_) res = tf.execute(desc) node = tf.TapeNode(max_pool_grad_v2, [orig_input_, orig_output_, grad_, ksize_, strides_], name=nothing, padding=nothing, data_format=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function max_pool_grad_v2(orig_input_, orig_output_, grad_, ksize_, strides_; name=nothing, padding=nothing, data_format=nothing) if tf.in_eager_mode() @@ -35192,8 +36530,10 @@ begin desc["Tfilter"] = tf.data_type(filter_) res = tf.execute(desc) node = tf.TapeNode(quantized_conv2d_with_bias_and_relu, [input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_], name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function quantized_conv2d_with_bias_and_relu(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) if tf.in_eager_mode() @@ -35234,8 +36574,10 @@ begin tf.add_input(desc, tree_ensemble_serialized_) res = tf.execute(desc) node = tf.TapeNode(boosted_trees_create_ensemble, [tree_ensemble_handle_, stamp_token_, tree_ensemble_serialized_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function boosted_trees_create_ensemble(tree_ensemble_handle_, stamp_token_, tree_ensemble_serialized_; name=nothing) if tf.in_eager_mode() @@ -35294,8 +36636,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(ordered_map_incomplete_size, [], name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function ordered_map_incomplete_size(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) if tf.in_eager_mode() @@ -35359,8 +36703,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(skipgram, [], name=nothing, filename=nothing, batch_size=nothing, window_size=nothing, min_count=nothing, subsample=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function skipgram(; name=nothing, filename=nothing, batch_size=nothing, window_size=nothing, min_count=nothing, subsample=nothing) if tf.in_eager_mode() @@ -35408,8 +36754,10 @@ begin desc["Tidx"] = tf.data_type(dimension_) res = tf.execute(desc) node = tf.TapeNode(arg_min, [input_, dimension_], name=nothing, output_type=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function arg_min(input_, dimension_; name=nothing, output_type=nothing) if tf.in_eager_mode() @@ -35458,8 +36806,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(queue_dequeue_many, [handle_, n_], name=nothing, component_types=nothing, timeout_ms=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function queue_dequeue_many(handle_, n_; name=nothing, component_types=nothing, timeout_ms=nothing) if tf.in_eager_mode() @@ -35497,8 +36847,10 @@ begin tf.add_input(desc, tree_ensemble_handle_) res = tf.execute(desc) node = tf.TapeNode(boosted_trees_serialize_ensemble, [tree_ensemble_handle_], name=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function boosted_trees_serialize_ensemble(tree_ensemble_handle_; name=nothing) if tf.in_eager_mode() @@ -35538,8 +36890,10 @@ begin desc["T"] = tf.data_type(y_) res = tf.execute(desc) node = tf.TapeNode(minimum, [x_, y_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function minimum(x_, y_; name=nothing) if tf.in_eager_mode() @@ -35589,8 +36943,10 @@ begin desc["T"] = tf.data_type(len_) res = tf.execute(desc) node = tf.TapeNode(substr, [input_, pos_, len_], name=nothing, unit=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function substr(input_, pos_, len_; name=nothing, unit=nothing) if tf.in_eager_mode() @@ -35623,8 +36979,10 @@ begin tf.add_input(desc, handle_) res = tf.execute(desc) node = tf.TapeNode(queue_size, [handle_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function queue_size(handle_; name=nothing) if tf.in_eager_mode() @@ -35705,8 +37063,10 @@ begin desc["T"] = tf.data_type(lr_power_) res = tf.execute(desc) node = tf.TapeNode(apply_ftrl_v2, [var_, accum_, linear_, grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_], name=nothing, use_locking=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function apply_ftrl_v2(var_, accum_, linear_, grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=nothing, use_locking=nothing) if tf.in_eager_mode() @@ -35767,8 +37127,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(load_tpu_embedding_momentum_parameters, [parameters_, momenta_], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function load_tpu_embedding_momentum_parameters(parameters_, momenta_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) if tf.in_eager_mode() @@ -35814,8 +37176,10 @@ begin desc["Tidx"] = tf.data_type(indices_) res = tf.execute(desc) node = tf.TapeNode(sparse_segment_mean, [data_, indices_, segment_ids_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function sparse_segment_mean(data_, indices_, segment_ids_; name=nothing) if tf.in_eager_mode() @@ -35879,8 +37243,10 @@ begin desc["T"] = tf.data_type(grad_) res = tf.execute(desc) node = tf.TapeNode(resource_apply_proximal_adagrad, [var_, accum_, lr_, l1_, l2_, grad_], name=nothing, use_locking=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function resource_apply_proximal_adagrad(var_, accum_, lr_, l1_, l2_, grad_; name=nothing, use_locking=nothing) if tf.in_eager_mode() @@ -35933,8 +37299,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(tensor_array_gather_v2, [handle_, indices_, flow_in_], name=nothing, dtype=nothing, element_shape=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function tensor_array_gather_v2(handle_, indices_, flow_in_; name=nothing, dtype=nothing, element_shape=nothing) if tf.in_eager_mode() @@ -35974,8 +37342,10 @@ begin desc["T"] = tf.data_type(y_) res = tf.execute(desc) node = tf.TapeNode(less, [x_, y_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function less(x_, y_; name=nothing) if tf.in_eager_mode() @@ -36016,8 +37386,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(host_const, [], name=nothing, value=nothing, dtype=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function host_const(; name=nothing, value=nothing, dtype=nothing) if tf.in_eager_mode() @@ -36063,8 +37435,10 @@ begin desc["T"] = tf.data_type(values_) res = tf.execute(desc) node = tf.TapeNode(upper_bound, [sorted_inputs_, values_], name=nothing, out_type=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function upper_bound(sorted_inputs_, values_; name=nothing, out_type=nothing) if tf.in_eager_mode() @@ -36111,8 +37485,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(tensor_list_get_item, [input_handle_, index_, element_shape_], name=nothing, element_dtype=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function tensor_list_get_item(input_handle_, index_, element_shape_; name=nothing, element_dtype=nothing) if tf.in_eager_mode() @@ -36165,8 +37541,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(fake_quant_with_min_max_vars, [inputs_, min_, max_], name=nothing, num_bits=nothing, narrow_range=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function fake_quant_with_min_max_vars(inputs_, min_, max_; name=nothing, num_bits=nothing, narrow_range=nothing) if tf.in_eager_mode() @@ -36199,8 +37577,10 @@ begin tf.add_input(desc, quantile_stream_resource_handle_) res = tf.execute(desc) node = tf.TapeNode(is_boosted_trees_quantile_stream_resource_initialized, [quantile_stream_resource_handle_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function is_boosted_trees_quantile_stream_resource_initialized(quantile_stream_resource_handle_; name=nothing) if tf.in_eager_mode() @@ -36246,8 +37626,10 @@ begin tf.add_input(desc, num_records_) res = tf.execute(desc) node = tf.TapeNode(reader_read_up_to_v2, [reader_handle_, queue_handle_, num_records_], name=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function reader_read_up_to_v2(reader_handle_, queue_handle_, num_records_; name=nothing) if tf.in_eager_mode() @@ -36287,8 +37669,10 @@ begin desc["T"] = tf.data_type(imag_) res = tf.execute(desc) node = tf.TapeNode(complex, [real_, imag_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function complex(real_, imag_; name=nothing) if tf.in_eager_mode() @@ -36339,8 +37723,10 @@ begin desc["shape_type"] = tf.data_type(element_shape_) res = tf.execute(desc) node = tf.TapeNode(tensor_list_reserve, [element_shape_, num_elements_], name=nothing, element_dtype=nothing, shape_type=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function tensor_list_reserve(element_shape_, num_elements_; name=nothing, element_dtype=nothing, shape_type=nothing) if tf.in_eager_mode() @@ -36381,8 +37767,10 @@ begin desc["T"] = tf.data_type(input_) res = tf.execute(desc) node = tf.TapeNode(bitcast, [input_], name=nothing, type_=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function bitcast(input_; name=nothing, type_=nothing) if tf.in_eager_mode() @@ -36441,8 +37829,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(priority_queue, [], name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function priority_queue(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) if tf.in_eager_mode() @@ -36560,8 +37950,10 @@ begin desc["Tinput"] = tf.data_type(gamma_) res = tf.execute(desc) node = tf.TapeNode(quantized_batch_norm_with_global_normalization, [t_, t_min_, t_max_, m_, m_min_, m_max_, v_, v_min_, v_max_, beta_, beta_min_, beta_max_, gamma_, gamma_min_, gamma_max_], name=nothing, out_type=nothing, variance_epsilon=nothing, scale_after_normalization=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function quantized_batch_norm_with_global_normalization(t_, t_min_, t_max_, m_, m_min_, m_max_, v_, v_min_, v_max_, beta_, beta_min_, beta_max_, gamma_, gamma_min_, gamma_max_; name=nothing, out_type=nothing, variance_epsilon=nothing, scale_after_normalization=nothing) if tf.in_eager_mode() @@ -36596,8 +37988,10 @@ begin desc["T"] = tf.data_type(x_) res = tf.execute(desc) node = tf.TapeNode(cos, [x_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function cos(x_; name=nothing) if tf.in_eager_mode() @@ -36651,8 +38045,10 @@ begin desc["Tinput"] = tf.data_type(input_) res = tf.execute(desc) node = tf.TapeNode(quantize_down_and_shrink_range, [input_, input_min_, input_max_], name=nothing, out_type=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function quantize_down_and_shrink_range(input_, input_min_, input_max_; name=nothing, out_type=nothing) if tf.in_eager_mode() @@ -36701,8 +38097,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(experimental_random_dataset, [seed_, seed2_], name=nothing, output_types=nothing, output_shapes=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function experimental_random_dataset(seed_, seed2_; name=nothing, output_types=nothing, output_shapes=nothing) if tf.in_eager_mode() @@ -36761,8 +38159,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(rpc, [address_, method_, request_], name=nothing, protocol=nothing, fail_fast=nothing, timeout_in_ms=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function rpc(address_, method_, request_; name=nothing, protocol=nothing, fail_fast=nothing, timeout_in_ms=nothing) if tf.in_eager_mode() @@ -36876,8 +38276,10 @@ begin desc["Tsummand"] = tf.data_type(summand_) res = tf.execute(desc) node = tf.TapeNode(quantized_conv2d_with_bias_signed_sum_and_relu_and_requantize, [input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_, summand_, min_summand_, max_summand_], name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function quantized_conv2d_with_bias_signed_sum_and_relu_and_requantize(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_, summand_, min_summand_, max_summand_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) if tf.in_eager_mode() @@ -36910,8 +38312,10 @@ begin tf.add_input(desc, input_handle_) res = tf.execute(desc) node = tf.TapeNode(tensor_list_length, [input_handle_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function tensor_list_length(input_handle_; name=nothing) if tf.in_eager_mode() @@ -36970,8 +38374,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(map_incomplete_size, [], name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function map_incomplete_size(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) if tf.in_eager_mode() @@ -37022,8 +38428,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(stateless_while, [input_], name=nothing, T=nothing, cond=nothing, body=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function stateless_while(input_; name=nothing, T=nothing, cond=nothing, body=nothing) if tf.in_eager_mode() @@ -37082,8 +38490,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(sparse_conditional_accumulator, [], name=nothing, dtype=nothing, shape=nothing, container=nothing, shared_name=nothing, reduction_type=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function sparse_conditional_accumulator(; name=nothing, dtype=nothing, shape=nothing, container=nothing, shared_name=nothing, reduction_type=nothing) if tf.in_eager_mode() @@ -37125,8 +38535,10 @@ begin desc["Tindices"] = tf.data_type(segment_ids_) res = tf.execute(desc) node = tf.TapeNode(segment_min, [data_, segment_ids_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function segment_min(data_, segment_ids_; name=nothing) if tf.in_eager_mode() @@ -37167,8 +38579,10 @@ begin tf.add_input(desc, tensor_) res = tf.execute(desc) node = tf.TapeNode(write_graph_summary, [writer_, step_, tensor_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function write_graph_summary(writer_, step_, tensor_; name=nothing) if tf.in_eager_mode() @@ -37208,8 +38622,10 @@ begin desc["T"] = tf.data_type(grad_) res = tf.execute(desc) node = tf.TapeNode(cholesky_grad, [l_, grad_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function cholesky_grad(l_, grad_; name=nothing) if tf.in_eager_mode() @@ -37283,8 +38699,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(log_uniform_candidate_sampler, [true_classes_], name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function log_uniform_candidate_sampler(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing) if tf.in_eager_mode() @@ -37333,8 +38751,10 @@ begin desc["T"] = tf.data_type(sparse_values_) res = tf.execute(desc) node = tf.TapeNode(serialize_sparse, [sparse_indices_, sparse_values_, sparse_shape_], name=nothing, out_type=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function serialize_sparse(sparse_indices_, sparse_values_, sparse_shape_; name=nothing, out_type=nothing) if tf.in_eager_mode() @@ -37381,8 +38801,10 @@ begin desc["T"] = tf.data_type(updates_) res = tf.execute(desc) node = tf.TapeNode(scatter_nd_non_aliasing_add, [input_, indices_, updates_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function scatter_nd_non_aliasing_add(input_, indices_, updates_; name=nothing) if tf.in_eager_mode() @@ -37428,8 +38850,10 @@ begin desc["T"] = tf.data_type(inputs_) res = tf.execute(desc) node = tf.TapeNode(ref_merge, [inputs_], name=nothing, N=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function ref_merge(inputs_; name=nothing, N=nothing) if tf.in_eager_mode() @@ -37479,8 +38903,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(tensor_list_concat, [input_handle_], name=nothing, element_dtype=nothing, element_shape=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function tensor_list_concat(input_handle_; name=nothing, element_dtype=nothing, element_shape=nothing) if tf.in_eager_mode() @@ -37574,8 +39000,10 @@ begin desc["T"] = tf.data_type(biases_) res = tf.execute(desc) node = tf.TapeNode(cudnn_rnn_canonical_to_params, [num_layers_, num_units_, input_size_, weights_, biases_], name=nothing, num_params=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function cudnn_rnn_canonical_to_params(num_layers_, num_units_, input_size_, weights_, biases_; name=nothing, num_params=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) if tf.in_eager_mode() @@ -37653,8 +39081,10 @@ begin desc["Tindices"] = tf.data_type(indices_) res = tf.execute(desc) node = tf.TapeNode(sparse_apply_adadelta, [var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_, indices_], name=nothing, use_locking=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function sparse_apply_adadelta(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) if tf.in_eager_mode() @@ -37687,8 +39117,10 @@ begin tf.add_input(desc, handle_) res = tf.execute(desc) node = tf.TapeNode(tensor_array_close, [handle_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function tensor_array_close(handle_; name=nothing) if tf.in_eager_mode() @@ -37728,8 +39160,10 @@ begin desc["T"] = tf.data_type(outputs_) res = tf.execute(desc) node = tf.TapeNode(selu_grad, [gradients_, outputs_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function selu_grad(gradients_, outputs_; name=nothing) if tf.in_eager_mode() @@ -37780,8 +39214,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(crop_and_resize_grad_image, [grads_, boxes_, box_ind_, image_size_], name=nothing, method=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function crop_and_resize_grad_image(grads_, boxes_, box_ind_, image_size_; name=nothing, method=nothing) if tf.in_eager_mode() @@ -37818,8 +39254,10 @@ begin tf.add_input(desc, fft_length_) res = tf.execute(desc) node = tf.TapeNode(rfft, [input_, fft_length_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function rfft(input_, fft_length_; name=nothing) if tf.in_eager_mode() @@ -37872,8 +39310,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(experimental_sql_dataset, [driver_name_, data_source_name_, query_], name=nothing, output_types=nothing, output_shapes=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function experimental_sql_dataset(driver_name_, data_source_name_, query_; name=nothing, output_types=nothing, output_shapes=nothing) if tf.in_eager_mode() @@ -37942,8 +39382,10 @@ begin desc["T"] = tf.data_type(grad_) res = tf.execute(desc) node = tf.TapeNode(resource_apply_power_sign, [var_, m_, lr_, logbase_, sign_decay_, beta_, grad_], name=nothing, use_locking=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function resource_apply_power_sign(var_, m_, lr_, logbase_, sign_decay_, beta_, grad_; name=nothing, use_locking=nothing) if tf.in_eager_mode() @@ -37978,8 +39420,10 @@ begin desc["T"] = tf.data_type(input_) res = tf.execute(desc) node = tf.TapeNode(matrix_determinant, [input_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function matrix_determinant(input_; name=nothing) if tf.in_eager_mode() @@ -38030,8 +39474,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(static_regex_replace, [input_], name=nothing, pattern=nothing, rewrite=nothing, replace_global=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function static_regex_replace(input_; name=nothing, pattern=nothing, rewrite=nothing, replace_global=nothing) if tf.in_eager_mode() @@ -38090,8 +39536,10 @@ begin desc["T"] = tf.data_type(value_) res = tf.execute(desc) node = tf.TapeNode(avg_pool, [value_], name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function avg_pool(value_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) if tf.in_eager_mode() @@ -38139,8 +39587,10 @@ begin desc["T"] = tf.data_type(dense_) res = tf.execute(desc) node = tf.TapeNode(sparse_dense_cwise_add, [sp_indices_, sp_values_, sp_shape_, dense_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function sparse_dense_cwise_add(sp_indices_, sp_values_, sp_shape_, dense_; name=nothing) if tf.in_eager_mode() @@ -38180,8 +39630,10 @@ begin desc["T"] = tf.data_type(bias_) res = tf.execute(desc) node = tf.TapeNode(bias_add_v1, [value_, bias_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function bias_add_v1(value_, bias_; name=nothing) if tf.in_eager_mode() @@ -38216,8 +39668,10 @@ begin desc["T"] = tf.data_type(x_) res = tf.execute(desc) node = tf.TapeNode(invert_permutation, [x_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function invert_permutation(x_; name=nothing) if tf.in_eager_mode() @@ -38276,8 +39730,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(hash_table_v2, [], name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function hash_table_v2(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing) if tf.in_eager_mode() @@ -38351,8 +39807,10 @@ begin desc["T"] = tf.data_type(momentum_) res = tf.execute(desc) node = tf.TapeNode(sparse_apply_momentum, [var_, accum_, lr_, grad_, indices_, momentum_], name=nothing, use_locking=nothing, use_nesterov=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function sparse_apply_momentum(var_, accum_, lr_, grad_, indices_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) if tf.in_eager_mode() @@ -38411,8 +39869,10 @@ begin desc["dtype"] = tf.data_type(input_) res = tf.execute(desc) node = tf.TapeNode(infeed_enqueue, [input_], name=nothing, dtype=nothing, shape=nothing, layout=nothing, device_ordinal=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function infeed_enqueue(input_; name=nothing, dtype=nothing, shape=nothing, layout=nothing, device_ordinal=nothing) if tf.in_eager_mode() @@ -38470,8 +39930,10 @@ begin desc["dtype"] = tf.data_type(maxval_) res = tf.execute(desc) node = tf.TapeNode(stateless_random_uniform_int, [shape_, seed_, minval_, maxval_], name=nothing, dtype=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function stateless_random_uniform_int(shape_, seed_, minval_, maxval_; name=nothing, dtype=nothing) if tf.in_eager_mode() @@ -38540,8 +40002,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(load_tpu_embedding_adadelta_parameters_grad_accum_debug, [parameters_, accumulators_, updates_, gradient_accumulators_], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function load_tpu_embedding_adadelta_parameters_grad_accum_debug(parameters_, accumulators_, updates_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) if tf.in_eager_mode() @@ -38606,8 +40070,10 @@ begin desc["T"] = tf.data_type(tensor_) res = tf.execute(desc) node = tf.TapeNode(_send, [tensor_], name=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function _send(tensor_; name=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing) if tf.in_eager_mode() @@ -38674,8 +40140,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(map_peek, [key_, indices_], name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function map_peek(key_, indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) if tf.in_eager_mode() @@ -38722,8 +40190,10 @@ begin desc["T"] = tf.data_type(value_) res = tf.execute(desc) node = tf.TapeNode(write_scalar_summary, [writer_, step_, tag_, value_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function write_scalar_summary(writer_, step_, tag_, value_; name=nothing) if tf.in_eager_mode() @@ -38791,8 +40261,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(ordered_map_unstage_no_key, [indices_], name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function ordered_map_unstage_no_key(indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) if tf.in_eager_mode() @@ -38880,8 +40352,10 @@ begin desc["Tindices"] = tf.data_type(indices_) res = tf.execute(desc) node = tf.TapeNode(sparse_apply_centered_rms_prop, [var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_], name=nothing, use_locking=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function sparse_apply_centered_rms_prop(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) if tf.in_eager_mode() @@ -38942,8 +40416,10 @@ begin desc["shape_type"] = tf.data_type(element_shape_) res = tf.execute(desc) node = tf.TapeNode(tensor_list_scatter_v2, [tensor_, indices_, element_shape_, num_elements_], name=nothing, element_dtype=nothing, shape_type=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function tensor_list_scatter_v2(tensor_, indices_, element_shape_, num_elements_; name=nothing, element_dtype=nothing, shape_type=nothing) if tf.in_eager_mode() @@ -39013,8 +40489,10 @@ begin desc["T"] = tf.data_type(out_backprop_) res = tf.execute(desc) node = tf.TapeNode(conv3d_backprop_input_v2, [input_sizes_, filter_, out_backprop_], name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function conv3d_backprop_input_v2(input_sizes_, filter_, out_backprop_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) if tf.in_eager_mode() @@ -39072,8 +40550,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(retrieve_tpu_embedding_proximal_adagrad_parameters, [], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function retrieve_tpu_embedding_proximal_adagrad_parameters(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) if tf.in_eager_mode() @@ -39120,8 +40600,10 @@ begin desc["T"] = tf.data_type(value_) res = tf.execute(desc) node = tf.TapeNode(random_shuffle, [value_], name=nothing, seed=nothing, seed2=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function random_shuffle(value_; name=nothing, seed=nothing, seed2=nothing) if tf.in_eager_mode() @@ -39195,8 +40677,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(uniform_candidate_sampler, [true_classes_], name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function uniform_candidate_sampler(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing) if tf.in_eager_mode() @@ -39243,8 +40727,10 @@ begin desc["T"] = tf.data_type(value_) res = tf.execute(desc) node = tf.TapeNode(tensor_array_split_v2, [handle_, value_, lengths_, flow_in_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function tensor_array_split_v2(handle_, value_, lengths_, flow_in_; name=nothing) if tf.in_eager_mode() @@ -39332,8 +40818,10 @@ begin desc["key_dtype"] = tf.data_type(deleted_key_) res = tf.execute(desc) node = tf.TapeNode(mutable_dense_hash_table_v2, [empty_key_, deleted_key_], name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing, initial_num_buckets=nothing, max_load_factor=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function mutable_dense_hash_table_v2(empty_key_, deleted_key_; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing, initial_num_buckets=nothing, max_load_factor=nothing) if tf.in_eager_mode() @@ -39372,8 +40860,10 @@ begin desc["T"] = tf.data_type(images_) res = tf.execute(desc) node = tf.TapeNode(draw_bounding_boxes, [images_, boxes_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function draw_bounding_boxes(images_, boxes_; name=nothing) if tf.in_eager_mode() @@ -39446,8 +40936,10 @@ begin desc["Tindices"] = tf.data_type(indices_) res = tf.execute(desc) node = tf.TapeNode(sparse_apply_proximal_adagrad, [var_, accum_, lr_, l1_, l2_, grad_, indices_], name=nothing, use_locking=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function sparse_apply_proximal_adagrad(var_, accum_, lr_, l1_, l2_, grad_, indices_; name=nothing, use_locking=nothing) if tf.in_eager_mode() @@ -39500,8 +40992,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(range_dataset, [start_, stop_, step_], name=nothing, output_types=nothing, output_shapes=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function range_dataset(start_, stop_, step_; name=nothing, output_types=nothing, output_shapes=nothing) if tf.in_eager_mode() @@ -39538,8 +41032,10 @@ begin tf.add_input(desc, state_) res = tf.execute(desc) node = tf.TapeNode(reader_restore_state_v2, [reader_handle_, state_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function reader_restore_state_v2(reader_handle_, state_; name=nothing) if tf.in_eager_mode() @@ -39589,8 +41085,10 @@ begin desc["T"] = tf.data_type(input_) res = tf.execute(desc) node = tf.TapeNode(top_kv2, [input_, k_], name=nothing, sorted=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function top_kv2(input_, k_; name=nothing, sorted=nothing) if tf.in_eager_mode() @@ -39625,8 +41123,10 @@ begin desc["T"] = tf.data_type(x_) res = tf.execute(desc) node = tf.TapeNode(atanh, [x_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function atanh(x_; name=nothing) if tf.in_eager_mode() @@ -39661,8 +41161,10 @@ begin desc["T"] = tf.data_type(input_) res = tf.execute(desc) node = tf.TapeNode(debug_gradient_identity, [input_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function debug_gradient_identity(input_; name=nothing) if tf.in_eager_mode() @@ -39714,8 +41216,10 @@ begin desc["T"] = tf.data_type(backprop_val_grad_) res = tf.execute(desc) node = tf.TapeNode(sparse_add_grad, [backprop_val_grad_, a_indices_, b_indices_, sum_indices_], name=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function sparse_add_grad(backprop_val_grad_, a_indices_, b_indices_, sum_indices_; name=nothing) if tf.in_eager_mode() @@ -39767,8 +41271,10 @@ begin desc["dtype"] = tf.data_type(updates_) res = tf.execute(desc) node = tf.TapeNode(resource_scatter_add, [resource_, indices_, updates_], name=nothing, dtype=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function resource_scatter_add(resource_, indices_, updates_; name=nothing, dtype=nothing) if tf.in_eager_mode() @@ -39803,8 +41309,10 @@ begin desc["T"] = tf.data_type(x_) res = tf.execute(desc) node = tf.TapeNode(ceil, [x_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function ceil(x_; name=nothing) if tf.in_eager_mode() @@ -39851,8 +41359,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(save, [filename_, tensor_names_, data_], name=nothing, T=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function save(filename_, tensor_names_, data_; name=nothing, T=nothing) if tf.in_eager_mode() @@ -39910,8 +41420,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(retrieve_tpu_embedding_centered_rms_prop_parameters, [], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function retrieve_tpu_embedding_centered_rms_prop_parameters(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) if tf.in_eager_mode() @@ -39969,8 +41481,10 @@ begin desc["T"] = tf.data_type(values_) res = tf.execute(desc) node = tf.TapeNode(quantized_concat, [concat_dim_, values_, input_mins_, input_maxes_], name=nothing, N=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function quantized_concat(concat_dim_, values_, input_mins_, input_maxes_; name=nothing, N=nothing) if tf.in_eager_mode() @@ -40005,8 +41519,10 @@ begin desc["T"] = tf.data_type(x_) res = tf.execute(desc) node = tf.TapeNode(zeros_like, [x_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function zeros_like(x_; name=nothing) if tf.in_eager_mode() @@ -40082,8 +41598,10 @@ begin desc["T"] = tf.data_type(value_) res = tf.execute(desc) node = tf.TapeNode(fractional_avg_pool, [value_], name=nothing, pooling_ratio=nothing, pseudo_random=nothing, overlapping=nothing, deterministic=nothing, seed=nothing, seed2=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function fractional_avg_pool(value_; name=nothing, pooling_ratio=nothing, pseudo_random=nothing, overlapping=nothing, deterministic=nothing, seed=nothing, seed2=nothing) if tf.in_eager_mode() @@ -40145,8 +41663,10 @@ begin desc["T"] = tf.data_type(truth_values_) res = tf.execute(desc) node = tf.TapeNode(edit_distance, [hypothesis_indices_, hypothesis_values_, hypothesis_shape_, truth_indices_, truth_values_, truth_shape_], name=nothing, normalize=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function edit_distance(hypothesis_indices_, hypothesis_values_, hypothesis_shape_, truth_indices_, truth_values_, truth_shape_; name=nothing, normalize=nothing) if tf.in_eager_mode() @@ -40198,8 +41718,10 @@ begin desc["Taxis"] = tf.data_type(axis_) res = tf.execute(desc) node = tf.TapeNode(unique_v2, [x_, axis_], name=nothing, out_idx=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function unique_v2(x_, axis_; name=nothing, out_idx=nothing) if tf.in_eager_mode() @@ -40268,8 +41790,10 @@ begin desc["T"] = tf.data_type(input_max_) res = tf.execute(desc) node = tf.TapeNode(quantize_and_dequantize_v2, [input_, input_min_, input_max_], name=nothing, signed_input=nothing, num_bits=nothing, range_given=nothing, round_mode=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function quantize_and_dequantize_v2(input_, input_min_, input_max_; name=nothing, signed_input=nothing, num_bits=nothing, range_given=nothing, round_mode=nothing) if tf.in_eager_mode() @@ -40334,8 +41858,10 @@ begin desc["T"] = tf.data_type(input_) res = tf.execute(desc) node = tf.TapeNode(quantize_and_dequantize, [input_], name=nothing, signed_input=nothing, num_bits=nothing, range_given=nothing, input_min=nothing, input_max=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function quantize_and_dequantize(input_; name=nothing, signed_input=nothing, num_bits=nothing, range_given=nothing, input_min=nothing, input_max=nothing) if tf.in_eager_mode() @@ -40383,8 +41909,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(tensor_list_pop_back, [input_handle_, element_shape_], name=nothing, element_dtype=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function tensor_list_pop_back(input_handle_, element_shape_; name=nothing, element_dtype=nothing) if tf.in_eager_mode() @@ -40443,8 +41971,10 @@ begin desc["T"] = tf.data_type(input_) res = tf.execute(desc) node = tf.TapeNode(debug_nan_count, [input_], name=nothing, device_name=nothing, tensor_name=nothing, debug_urls=nothing, gated_grpc=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function debug_nan_count(input_; name=nothing, device_name=nothing, tensor_name=nothing, debug_urls=nothing, gated_grpc=nothing) if tf.in_eager_mode() @@ -40519,8 +42049,10 @@ begin desc["T"] = tf.data_type(l2_) res = tf.execute(desc) node = tf.TapeNode(apply_adagrad_da, [var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, lr_, l1_, l2_, global_step_], name=nothing, use_locking=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function apply_adagrad_da(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, lr_, l1_, l2_, global_step_; name=nothing, use_locking=nothing) if tf.in_eager_mode() @@ -40584,8 +42116,10 @@ begin desc["T"] = tf.data_type(filter_) res = tf.execute(desc) node = tf.TapeNode(depthwise_conv2d_native, [input_, filter_], name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function depthwise_conv2d_native(input_, filter_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) if tf.in_eager_mode() @@ -40618,8 +42152,10 @@ begin tf.add_input(desc, resource_handle_) res = tf.execute(desc) node = tf.TapeNode(serialize_iterator, [resource_handle_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function serialize_iterator(resource_handle_; name=nothing) if tf.in_eager_mode() @@ -40652,8 +42188,10 @@ begin tf.add_input(desc, input_dataset_) res = tf.execute(desc) node = tf.TapeNode(dataset_to_graph, [input_dataset_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function dataset_to_graph(input_dataset_; name=nothing) if tf.in_eager_mode() @@ -40705,8 +42243,10 @@ begin desc["T"] = tf.data_type(input_) res = tf.execute(desc) node = tf.TapeNode(top_k, [input_], name=nothing, k=nothing, sorted=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function top_k(input_; name=nothing, k=nothing, sorted=nothing) if tf.in_eager_mode() @@ -40784,8 +42324,10 @@ begin desc["T"] = tf.data_type(lr_power_) res = tf.execute(desc) node = tf.TapeNode(resource_apply_ftrl_v2, [var_, accum_, linear_, grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_], name=nothing, use_locking=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function resource_apply_ftrl_v2(var_, accum_, linear_, grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=nothing, use_locking=nothing) if tf.in_eager_mode() @@ -40830,8 +42372,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(_nccl_broadcast_recv, [shape_], name=nothing, num_devices=nothing, shared_name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function _nccl_broadcast_recv(shape_; name=nothing, num_devices=nothing, shared_name=nothing) if tf.in_eager_mode() @@ -40864,8 +42408,10 @@ begin tf.add_input(desc, handle_) res = tf.execute(desc) node = tf.TapeNode(queue_is_closed, [handle_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function queue_is_closed(handle_; name=nothing) if tf.in_eager_mode() @@ -40928,8 +42474,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(shuffle_dataset, [input_dataset_, buffer_size_, seed_, seed2_], name=nothing, reshuffle_each_iteration=nothing, output_types=nothing, output_shapes=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function shuffle_dataset(input_dataset_, buffer_size_, seed_, seed2_; name=nothing, reshuffle_each_iteration=nothing, output_types=nothing, output_shapes=nothing) if tf.in_eager_mode() @@ -40975,8 +42523,10 @@ begin desc["Tserialized"] = tf.data_type(serialized_sparse_) res = tf.execute(desc) node = tf.TapeNode(deserialize_sparse, [serialized_sparse_], name=nothing, dtype=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function deserialize_sparse(serialized_sparse_; name=nothing, dtype=nothing) if tf.in_eager_mode() @@ -41035,8 +42585,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(priority_queue_v2, [], name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function priority_queue_v2(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) if tf.in_eager_mode() @@ -41071,8 +42623,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(_device_arg, [], name=nothing, index=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function _device_arg(; name=nothing, index=nothing) if tf.in_eager_mode() @@ -41125,8 +42679,10 @@ begin desc["T"] = tf.data_type(shape_) res = tf.execute(desc) node = tf.TapeNode(truncated_normal, [shape_], name=nothing, seed=nothing, seed2=nothing, dtype=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function truncated_normal(shape_; name=nothing, seed=nothing, seed2=nothing, dtype=nothing) if tf.in_eager_mode() @@ -41169,8 +42725,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(tensor_forest_tree_predict, [tree_handle_, dense_features_], name=nothing, logits_dimension=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function tensor_forest_tree_predict(tree_handle_, dense_features_; name=nothing, logits_dimension=nothing) if tf.in_eager_mode() @@ -41215,8 +42773,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(stack_v2, [max_size_], name=nothing, elem_type=nothing, stack_name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function stack_v2(max_size_; name=nothing, elem_type=nothing, stack_name=nothing) if tf.in_eager_mode() @@ -41249,8 +42809,10 @@ begin tf.add_input(desc, handle_) res = tf.execute(desc) node = tf.TapeNode(accumulator_num_accumulated, [handle_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function accumulator_num_accumulated(handle_; name=nothing) if tf.in_eager_mode() @@ -41283,8 +42845,10 @@ begin tf.add_input(desc, reader_handle_) res = tf.execute(desc) node = tf.TapeNode(reader_reset_v2, [reader_handle_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function reader_reset_v2(reader_handle_; name=nothing) if tf.in_eager_mode() @@ -41355,8 +42919,10 @@ begin desc["T"] = tf.data_type(grad_) res = tf.execute(desc) node = tf.TapeNode(apply_add_sign, [var_, m_, lr_, alpha_, sign_decay_, beta_, grad_], name=nothing, use_locking=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function apply_add_sign(var_, m_, lr_, alpha_, sign_decay_, beta_, grad_; name=nothing, use_locking=nothing) if tf.in_eager_mode() @@ -41414,8 +42980,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(retrieve_tpu_embedding_rms_prop_parameters_grad_accum_debug, [], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function retrieve_tpu_embedding_rms_prop_parameters_grad_accum_debug(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) if tf.in_eager_mode() @@ -41450,8 +43018,10 @@ begin desc["T"] = tf.data_type(x_) res = tf.execute(desc) node = tf.TapeNode(rint, [x_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function rint(x_; name=nothing) if tf.in_eager_mode() @@ -41509,8 +43079,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(retrieve_tpu_embedding_adadelta_parameters_grad_accum_debug, [], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function retrieve_tpu_embedding_adadelta_parameters_grad_accum_debug(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) if tf.in_eager_mode() @@ -41575,8 +43147,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(extract_glimpse, [input_, size_, offsets_], name=nothing, centered=nothing, normalized=nothing, uniform_noise=nothing, noise=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function extract_glimpse(input_, size_, offsets_; name=nothing, centered=nothing, normalized=nothing, uniform_noise=nothing, noise=nothing) if tf.in_eager_mode() @@ -41621,8 +43195,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(string_to_hash_bucket_strong, [input_], name=nothing, num_buckets=nothing, key=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function string_to_hash_bucket_strong(input_; name=nothing, num_buckets=nothing, key=nothing) if tf.in_eager_mode() @@ -41681,8 +43257,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(one_shot_iterator, [], name=nothing, dataset_factory=nothing, output_types=nothing, output_shapes=nothing, container=nothing, shared_name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function one_shot_iterator(; name=nothing, dataset_factory=nothing, output_types=nothing, output_shapes=nothing, container=nothing, shared_name=nothing) if tf.in_eager_mode() @@ -41754,8 +43332,10 @@ begin desc["T"] = tf.data_type(momentum_) res = tf.execute(desc) node = tf.TapeNode(resource_sparse_apply_momentum, [var_, accum_, lr_, grad_, indices_, momentum_], name=nothing, use_locking=nothing, use_nesterov=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function resource_sparse_apply_momentum(var_, accum_, lr_, grad_, indices_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) if tf.in_eager_mode() @@ -41806,8 +43386,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(save_slices, [filename_, tensor_names_, shapes_and_slices_, data_], name=nothing, T=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function save_slices(filename_, tensor_names_, shapes_and_slices_, data_; name=nothing, T=nothing) if tf.in_eager_mode() @@ -41840,8 +43422,10 @@ begin tf.add_input(desc, input_dataset_) res = tf.execute(desc) node = tf.TapeNode(experimental_dataset_cardinality, [input_dataset_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function experimental_dataset_cardinality(input_dataset_; name=nothing) if tf.in_eager_mode() @@ -41876,8 +43460,10 @@ begin desc["T"] = tf.data_type(x_) res = tf.execute(desc) node = tf.TapeNode(is_finite, [x_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function is_finite(x_; name=nothing) if tf.in_eager_mode() @@ -41956,8 +43542,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(experimental_numa_map_and_batch_dataset, [input_dataset_, other_arguments_, batch_size_, num_parallel_calls_, drop_remainder_], name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, preserve_cardinality=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function experimental_numa_map_and_batch_dataset(input_dataset_, other_arguments_, batch_size_, num_parallel_calls_, drop_remainder_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, preserve_cardinality=nothing) if tf.in_eager_mode() @@ -42014,8 +43602,10 @@ begin desc["T"] = tf.data_type(input_) res = tf.execute(desc) node = tf.TapeNode(all_to_all, [input_, group_assignment_], name=nothing, concat_dimension=nothing, split_dimension=nothing, split_count=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function all_to_all(input_, group_assignment_; name=nothing, concat_dimension=nothing, split_dimension=nothing, split_count=nothing) if tf.in_eager_mode() @@ -42071,8 +43661,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(take_many_sparse_from_tensors_map, [sparse_handles_], name=nothing, dtype=nothing, container=nothing, shared_name=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function take_many_sparse_from_tensors_map(sparse_handles_; name=nothing, dtype=nothing, container=nothing, shared_name=nothing) if tf.in_eager_mode() @@ -42107,8 +43699,10 @@ begin desc["T"] = tf.data_type(input_) res = tf.execute(desc) node = tf.TapeNode(batch_matrix_diag_part, [input_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function batch_matrix_diag_part(input_; name=nothing) if tf.in_eager_mode() @@ -42157,8 +43751,10 @@ begin tf.add_input(desc, buffer_size_) res = tf.execute(desc) node = tf.TapeNode(fixed_length_record_dataset, [filenames_, header_bytes_, record_bytes_, footer_bytes_, buffer_size_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function fixed_length_record_dataset(filenames_, header_bytes_, record_bytes_, footer_bytes_, buffer_size_; name=nothing) if tf.in_eager_mode() @@ -42203,8 +43799,10 @@ begin desc["T"] = tf.data_type(elem_) res = tf.execute(desc) node = tf.TapeNode(stack_push, [handle_, elem_], name=nothing, swap_memory=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function stack_push(handle_, elem_; name=nothing, swap_memory=nothing) if tf.in_eager_mode() @@ -42245,8 +43843,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(placeholder_v2, [], name=nothing, dtype=nothing, shape=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function placeholder_v2(; name=nothing, dtype=nothing, shape=nothing) if tf.in_eager_mode() @@ -42287,8 +43887,10 @@ begin tf.add_input(desc, max_buffer_size_) res = tf.execute(desc) node = tf.TapeNode(multi_device_iterator_init, [dataset_, multi_device_iterator_, max_buffer_size_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function multi_device_iterator_init(dataset_, multi_device_iterator_, max_buffer_size_; name=nothing) if tf.in_eager_mode() @@ -42329,8 +43931,10 @@ begin tf.add_input(desc, max_staleness_) res = tf.execute(desc) node = tf.TapeNode(gcs_configure_block_cache, [max_cache_size_, block_size_, max_staleness_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function gcs_configure_block_cache(max_cache_size_, block_size_, max_staleness_; name=nothing) if tf.in_eager_mode() @@ -42375,8 +43979,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(queue_dequeue_v2, [handle_], name=nothing, component_types=nothing, timeout_ms=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function queue_dequeue_v2(handle_; name=nothing, component_types=nothing, timeout_ms=nothing) if tf.in_eager_mode() @@ -42434,8 +44040,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(retrieve_tpu_embedding_rms_prop_parameters, [], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function retrieve_tpu_embedding_rms_prop_parameters(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) if tf.in_eager_mode() @@ -42476,8 +44084,10 @@ begin desc["Tperm"] = tf.data_type(perm_) res = tf.execute(desc) node = tf.TapeNode(transpose, [x_, perm_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function transpose(x_, perm_; name=nothing) if tf.in_eager_mode() @@ -42512,8 +44122,10 @@ begin desc["Tcomplex"] = tf.data_type(input_) res = tf.execute(desc) node = tf.TapeNode(ifft, [input_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function ifft(input_; name=nothing) if tf.in_eager_mode() @@ -42565,8 +44177,10 @@ begin desc["Tnumsegments"] = tf.data_type(num_segments_) res = tf.execute(desc) node = tf.TapeNode(sparse_segment_sum_with_num_segments, [data_, indices_, segment_ids_, num_segments_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function sparse_segment_sum_with_num_segments(data_, indices_, segment_ids_, num_segments_; name=nothing) if tf.in_eager_mode() @@ -42599,8 +44213,10 @@ begin tf.add_input(desc, handle_) res = tf.execute(desc) node = tf.TapeNode(queue_is_closed_v2, [handle_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function queue_is_closed_v2(handle_; name=nothing) if tf.in_eager_mode() @@ -42674,8 +44290,10 @@ begin desc["dtype"] = tf.data_type(maxvals_) res = tf.execute(desc) node = tf.TapeNode(parameterized_truncated_normal, [shape_, means_, stdevs_, minvals_, maxvals_], name=nothing, seed=nothing, seed2=nothing, dtype=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function parameterized_truncated_normal(shape_, means_, stdevs_, minvals_, maxvals_; name=nothing, seed=nothing, seed2=nothing, dtype=nothing) if tf.in_eager_mode() @@ -42710,8 +44328,10 @@ begin desc["T"] = tf.data_type(input_) res = tf.execute(desc) node = tf.TapeNode(diag_part, [input_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function diag_part(input_; name=nothing) if tf.in_eager_mode() @@ -42756,8 +44376,10 @@ begin tf.add_input(desc, num_retries_per_sample_) res = tf.execute(desc) node = tf.TapeNode(kmeans_plus_plus_initialization, [points_, num_to_sample_, seed_, num_retries_per_sample_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function kmeans_plus_plus_initialization(points_, num_to_sample_, seed_, num_retries_per_sample_; name=nothing) if tf.in_eager_mode() @@ -42804,8 +44426,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(regex_replace, [input_, pattern_, rewrite_], name=nothing, replace_global=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function regex_replace(input_, pattern_, rewrite_; name=nothing, replace_global=nothing) if tf.in_eager_mode() @@ -42868,8 +44492,10 @@ begin desc["T"] = tf.data_type(b_) res = tf.execute(desc) node = tf.TapeNode(sparse_tensor_dense_mat_mul, [a_indices_, a_values_, a_shape_, b_], name=nothing, adjoint_a=nothing, adjoint_b=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function sparse_tensor_dense_mat_mul(a_indices_, a_values_, a_shape_, b_; name=nothing, adjoint_a=nothing, adjoint_b=nothing) if tf.in_eager_mode() @@ -42936,8 +44562,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(map_defun, [arguments_, captured_inputs_], name=nothing, Targuments=nothing, Tcaptured=nothing, output_types=nothing, output_shapes=nothing, f=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function map_defun(arguments_, captured_inputs_; name=nothing, Targuments=nothing, Tcaptured=nothing, output_types=nothing, output_shapes=nothing, f=nothing) if tf.in_eager_mode() @@ -43011,8 +44639,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(thread_unsafe_unigram_candidate_sampler, [true_classes_], name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function thread_unsafe_unigram_candidate_sampler(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing) if tf.in_eager_mode() @@ -43070,8 +44700,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(retrieve_tpu_embedding_adam_parameters_grad_accum_debug, [], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function retrieve_tpu_embedding_adam_parameters_grad_accum_debug(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) if tf.in_eager_mode() @@ -43118,8 +44750,10 @@ begin desc["T"] = tf.data_type(values_) res = tf.execute(desc) node = tf.TapeNode(parallel_concat, [values_], name=nothing, N=nothing, shape=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function parallel_concat(values_; name=nothing, N=nothing, shape=nothing) if tf.in_eager_mode() @@ -43164,8 +44798,10 @@ begin desc["Tout"] = tf.data_type(default_value_) res = tf.execute(desc) node = tf.TapeNode(lookup_table_find_v2, [table_handle_, keys_, default_value_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function lookup_table_find_v2(table_handle_, keys_, default_value_; name=nothing) if tf.in_eager_mode() @@ -43202,8 +44838,10 @@ begin tf.add_input(desc, tree_config_) res = tf.execute(desc) node = tf.TapeNode(tensor_forest_tree_deserialize, [tree_handle_, tree_config_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function tensor_forest_tree_deserialize(tree_handle_, tree_config_; name=nothing) if tf.in_eager_mode() @@ -43261,8 +44899,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(retrieve_tpu_embedding_momentum_parameters, [], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function retrieve_tpu_embedding_momentum_parameters(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) if tf.in_eager_mode() @@ -43319,8 +44959,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(fake_quant_with_min_max_args, [inputs_], name=nothing, min=nothing, max=nothing, num_bits=nothing, narrow_range=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function fake_quant_with_min_max_args(inputs_; name=nothing, min=nothing, max=nothing, num_bits=nothing, narrow_range=nothing) if tf.in_eager_mode() @@ -43370,8 +45012,10 @@ begin desc["T"] = tf.data_type(delta_) res = tf.execute(desc) node = tf.TapeNode(resource_apply_gradient_descent, [var_, alpha_, delta_], name=nothing, use_locking=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function resource_apply_gradient_descent(var_, alpha_, delta_; name=nothing, use_locking=nothing) if tf.in_eager_mode() @@ -43428,8 +45072,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(experimental_sliding_window_dataset, [input_dataset_, window_size_, window_shift_, window_stride_], name=nothing, output_types=nothing, output_shapes=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function experimental_sliding_window_dataset(input_dataset_, window_size_, window_shift_, window_stride_; name=nothing, output_types=nothing, output_shapes=nothing) if tf.in_eager_mode() @@ -43474,8 +45120,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(decode_raw, [bytes_], name=nothing, out_type=nothing, little_endian=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function decode_raw(bytes_; name=nothing, out_type=nothing, little_endian=nothing) if tf.in_eager_mode() @@ -43537,8 +45185,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(fake_quant_with_min_max_vars_per_channel_gradient, [gradients_, inputs_, min_, max_], name=nothing, num_bits=nothing, narrow_range=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function fake_quant_with_min_max_vars_per_channel_gradient(gradients_, inputs_, min_, max_; name=nothing, num_bits=nothing, narrow_range=nothing) if tf.in_eager_mode() @@ -43590,8 +45240,10 @@ begin desc["Taxis"] = tf.data_type(axis_) res = tf.execute(desc) node = tf.TapeNode(unique_with_counts_v2, [x_, axis_], name=nothing, out_idx=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function unique_with_counts_v2(x_, axis_; name=nothing, out_idx=nothing) if tf.in_eager_mode() @@ -43640,8 +45292,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(experimental_sleep_dataset, [input_dataset_, sleep_microseconds_], name=nothing, output_types=nothing, output_shapes=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function experimental_sleep_dataset(input_dataset_, sleep_microseconds_; name=nothing, output_types=nothing, output_shapes=nothing) if tf.in_eager_mode() @@ -43687,8 +45341,10 @@ begin desc["T"] = tf.data_type(input_) res = tf.execute(desc) node = tf.TapeNode(tpu_replicated_output, [input_], name=nothing, num_replicas=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function tpu_replicated_output(input_; name=nothing, num_replicas=nothing) if tf.in_eager_mode() @@ -43734,8 +45390,10 @@ begin desc["T"] = tf.data_type(values_) res = tf.execute(desc) node = tf.TapeNode(lower_bound, [sorted_inputs_, values_], name=nothing, out_type=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function lower_bound(sorted_inputs_, values_; name=nothing, out_type=nothing) if tf.in_eager_mode() @@ -43770,8 +45428,10 @@ begin desc["T"] = tf.data_type(x_) res = tf.execute(desc) node = tf.TapeNode(tan, [x_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function tan(x_; name=nothing) if tf.in_eager_mode() @@ -43824,8 +45484,10 @@ begin desc["T"] = tf.data_type(data_) res = tf.execute(desc) node = tf.TapeNode(enter, [data_], name=nothing, frame_name=nothing, is_constant=nothing, parallel_iterations=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function enter(data_; name=nothing, frame_name=nothing, is_constant=nothing, parallel_iterations=nothing) if tf.in_eager_mode() @@ -43882,8 +45544,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(infeed_enqueue_tuple, [inputs_], name=nothing, dtypes=nothing, shapes=nothing, layouts=nothing, device_ordinal=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function infeed_enqueue_tuple(inputs_; name=nothing, dtypes=nothing, shapes=nothing, layouts=nothing, device_ordinal=nothing) if tf.in_eager_mode() @@ -43916,8 +45580,10 @@ begin tf.add_input(desc, topology_) res = tf.execute(desc) node = tf.TapeNode(_set_global_tpu_array, [topology_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function _set_global_tpu_array(topology_; name=nothing) if tf.in_eager_mode() @@ -43952,8 +45618,10 @@ begin desc["T"] = tf.data_type(x_) res = tf.execute(desc) node = tf.TapeNode(square, [x_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function square(x_; name=nothing) if tf.in_eager_mode() @@ -43988,8 +45656,10 @@ begin desc["T"] = tf.data_type(input_) res = tf.execute(desc) node = tf.TapeNode(debug_gradient_ref_identity, [input_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function debug_gradient_ref_identity(input_; name=nothing) if tf.in_eager_mode() @@ -44060,8 +45730,10 @@ begin desc["T"] = tf.data_type(grad_) res = tf.execute(desc) node = tf.TapeNode(apply_adadelta, [var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_], name=nothing, use_locking=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function apply_adadelta(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_; name=nothing, use_locking=nothing) if tf.in_eager_mode() @@ -44154,8 +45826,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(experimental_group_by_window_dataset, [input_dataset_, key_func_other_arguments_, reduce_func_other_arguments_, window_size_func_other_arguments_], name=nothing, key_func=nothing, reduce_func=nothing, window_size_func=nothing, Tkey_func_other_arguments=nothing, Treduce_func_other_arguments=nothing, Twindow_size_func_other_arguments=nothing, output_types=nothing, output_shapes=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function experimental_group_by_window_dataset(input_dataset_, key_func_other_arguments_, reduce_func_other_arguments_, window_size_func_other_arguments_; name=nothing, key_func=nothing, reduce_func=nothing, window_size_func=nothing, Tkey_func_other_arguments=nothing, Treduce_func_other_arguments=nothing, Twindow_size_func_other_arguments=nothing, output_types=nothing, output_shapes=nothing) if tf.in_eager_mode() @@ -44204,8 +45878,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(audio_summary, [tag_, tensor_], name=nothing, sample_rate=nothing, max_outputs=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function audio_summary(tag_, tensor_; name=nothing, sample_rate=nothing, max_outputs=nothing) if tf.in_eager_mode() @@ -44245,8 +45921,10 @@ begin desc["T"] = tf.data_type(y_) res = tf.execute(desc) node = tf.TapeNode(squared_difference, [x_, y_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function squared_difference(x_, y_; name=nothing) if tf.in_eager_mode() @@ -44307,8 +45985,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(experimental_take_while_dataset, [input_dataset_, other_arguments_], name=nothing, predicate=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function experimental_take_while_dataset(input_dataset_, other_arguments_; name=nothing, predicate=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) if tf.in_eager_mode() @@ -44361,8 +46041,10 @@ begin desc["T"] = tf.data_type(updates_) res = tf.execute(desc) node = tf.TapeNode(scatter_nd_update, [ref_, indices_, updates_], name=nothing, use_locking=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function scatter_nd_update(ref_, indices_, updates_; name=nothing, use_locking=nothing) if tf.in_eager_mode() @@ -44407,8 +46089,10 @@ begin desc["T"] = tf.data_type(data_) res = tf.execute(desc) node = tf.TapeNode(dynamic_stitch, [indices_, data_], name=nothing, N=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function dynamic_stitch(indices_, data_; name=nothing, N=nothing) if tf.in_eager_mode() @@ -44443,8 +46127,10 @@ begin desc["T"] = tf.data_type(x_) res = tf.execute(desc) node = tf.TapeNode(ones_like, [x_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function ones_like(x_; name=nothing) if tf.in_eager_mode() @@ -44503,8 +46189,10 @@ begin desc["T"] = tf.data_type(out_backprop_) res = tf.execute(desc) node = tf.TapeNode(fractional_max_pool_grad, [orig_input_, orig_output_, out_backprop_, row_pooling_sequence_, col_pooling_sequence_], name=nothing, overlapping=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function fractional_max_pool_grad(orig_input_, orig_output_, out_backprop_, row_pooling_sequence_, col_pooling_sequence_; name=nothing, overlapping=nothing) if tf.in_eager_mode() @@ -44559,8 +46247,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(remote_call, [target_, args_], name=nothing, Tin=nothing, Tout=nothing, f=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function remote_call(target_, args_; name=nothing, Tin=nothing, Tout=nothing, f=nothing) if tf.in_eager_mode() @@ -44608,8 +46298,10 @@ begin desc["Tindices"] = tf.data_type(indices_) res = tf.execute(desc) node = tf.TapeNode(gather, [params_, indices_], name=nothing, validate_indices=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function gather(params_, indices_; name=nothing, validate_indices=nothing) if tf.in_eager_mode() @@ -44683,8 +46375,10 @@ begin desc["T2"] = tf.data_type(b_) res = tf.execute(desc) node = tf.TapeNode(quantized_mat_mul, [a_, b_, min_a_, max_a_, min_b_, max_b_], name=nothing, transpose_a=nothing, transpose_b=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function quantized_mat_mul(a_, b_, min_a_, max_a_, min_b_, max_b_; name=nothing, transpose_a=nothing, transpose_b=nothing) if tf.in_eager_mode() @@ -44746,8 +46440,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(unicode_decode_with_offsets, [input_], name=nothing, input_encoding=nothing, errors=nothing, replacement_char=nothing, replace_control_characters=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function unicode_decode_with_offsets(input_; name=nothing, input_encoding=nothing, errors=nothing, replacement_char=nothing, replace_control_characters=nothing) if tf.in_eager_mode() @@ -44816,8 +46512,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(enqueue_tpu_embedding_sparse_tensor_batch, [sample_indices_, embedding_indices_, aggregation_weights_, mode_override_], name=nothing, N=nothing, device_ordinal=nothing, combiners=nothing, table_ids=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function enqueue_tpu_embedding_sparse_tensor_batch(sample_indices_, embedding_indices_, aggregation_weights_, mode_override_; name=nothing, N=nothing, device_ordinal=nothing, combiners=nothing, table_ids=nothing) if tf.in_eager_mode() @@ -44866,8 +46564,10 @@ begin desc["dtype"] = tf.data_type(gradient_) res = tf.execute(desc) node = tf.TapeNode(accumulator_apply_gradient, [handle_, local_step_, gradient_], name=nothing, dtype=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function accumulator_apply_gradient(handle_, local_step_, gradient_; name=nothing, dtype=nothing) if tf.in_eager_mode() @@ -44918,8 +46618,10 @@ begin desc["T"] = tf.data_type(tensor_) res = tf.execute(desc) node = tf.TapeNode(write_summary, [writer_, step_, tensor_, tag_, summary_metadata_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function write_summary(writer_, step_, tensor_, tag_, summary_metadata_; name=nothing) if tf.in_eager_mode() @@ -45005,8 +46707,10 @@ begin desc["Tfilter"] = tf.data_type(filter_) res = tf.execute(desc) node = tf.TapeNode(quantized_conv2d, [input_, filter_, min_input_, max_input_, min_filter_, max_filter_], name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function quantized_conv2d(input_, filter_, min_input_, max_input_, min_filter_, max_filter_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) if tf.in_eager_mode() @@ -45071,8 +46775,10 @@ begin desc["T"] = tf.data_type(momentum_) res = tf.execute(desc) node = tf.TapeNode(resource_apply_momentum, [var_, accum_, lr_, grad_, momentum_], name=nothing, use_locking=nothing, use_nesterov=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function resource_apply_momentum(var_, accum_, lr_, grad_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) if tf.in_eager_mode() @@ -45107,8 +46813,10 @@ begin desc["T"] = tf.data_type(x_) res = tf.execute(desc) node = tf.TapeNode(log1p, [x_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function log1p(x_; name=nothing) if tf.in_eager_mode() @@ -45167,8 +46875,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(ordered_map_clear, [], name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function ordered_map_clear(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) if tf.in_eager_mode() @@ -45220,8 +46930,10 @@ begin desc["dtype"] = tf.data_type(updates_) res = tf.execute(desc) node = tf.TapeNode(resource_scatter_update, [resource_, indices_, updates_], name=nothing, dtype=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function resource_scatter_update(resource_, indices_, updates_; name=nothing, dtype=nothing) if tf.in_eager_mode() @@ -45287,8 +46999,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(barrier_take_many, [handle_, num_elements_], name=nothing, component_types=nothing, allow_small_batch=nothing, wait_for_incomplete=nothing, timeout_ms=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function barrier_take_many(handle_, num_elements_; name=nothing, component_types=nothing, allow_small_batch=nothing, wait_for_incomplete=nothing, timeout_ms=nothing) if tf.in_eager_mode() @@ -45353,8 +47067,10 @@ begin desc["T"] = tf.data_type(momentum_) res = tf.execute(desc) node = tf.TapeNode(resource_apply_keras_momentum, [var_, accum_, lr_, grad_, momentum_], name=nothing, use_locking=nothing, use_nesterov=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function resource_apply_keras_momentum(var_, accum_, lr_, grad_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) if tf.in_eager_mode() @@ -45425,8 +47141,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(generate_big_query_reader_partitions, [], name=nothing, project_id=nothing, dataset_id=nothing, table_id=nothing, columns=nothing, timestamp_millis=nothing, num_partitions=nothing, test_end_point=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function generate_big_query_reader_partitions(; name=nothing, project_id=nothing, dataset_id=nothing, table_id=nothing, columns=nothing, timestamp_millis=nothing, num_partitions=nothing, test_end_point=nothing) if tf.in_eager_mode() @@ -45477,8 +47195,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(_xla_recv_at_host, [dynamic_key_], name=nothing, Toutputs=nothing, key=nothing, device_ordinal=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function _xla_recv_at_host(dynamic_key_; name=nothing, Toutputs=nothing, key=nothing, device_ordinal=nothing) if tf.in_eager_mode() @@ -45544,8 +47264,10 @@ begin desc["T"] = tf.data_type(input_) res = tf.execute(desc) node = tf.TapeNode(quantized_avg_pool, [input_, min_input_, max_input_], name=nothing, ksize=nothing, strides=nothing, padding=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function quantized_avg_pool(input_, min_input_, max_input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing) if tf.in_eager_mode() @@ -45632,8 +47354,10 @@ begin desc["T"] = tf.data_type(grad_) res = tf.execute(desc) node = tf.TapeNode(resource_apply_adam_with_amsgrad, [var_, m_, v_, vhat_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_], name=nothing, use_locking=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function resource_apply_adam_with_amsgrad(var_, m_, v_, vhat_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing) if tf.in_eager_mode() @@ -45670,8 +47394,10 @@ begin tf.add_input(desc, size_) res = tf.execute(desc) node = tf.TapeNode(tensor_list_resize, [input_handle_, size_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function tensor_list_resize(input_handle_, size_; name=nothing) if tf.in_eager_mode() @@ -45736,8 +47462,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(_host_recv, [], name=nothing, tensor_type=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function _host_recv(; name=nothing, tensor_type=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing) if tf.in_eager_mode() @@ -45786,8 +47514,10 @@ begin tf.add_input(desc, l2_) res = tf.execute(desc) node = tf.TapeNode(boosted_trees_center_bias, [tree_ensemble_handle_, mean_gradients_, mean_hessians_, l1_, l2_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function boosted_trees_center_bias(tree_ensemble_handle_, mean_gradients_, mean_hessians_, l1_, l2_; name=nothing) if tf.in_eager_mode() @@ -45820,8 +47550,10 @@ begin tf.add_input(desc, table_handle_) res = tf.execute(desc) node = tf.TapeNode(lookup_table_size_v2, [table_handle_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function lookup_table_size_v2(table_handle_; name=nothing) if tf.in_eager_mode() @@ -45858,8 +47590,10 @@ begin tf.add_input(desc, fft_length_) res = tf.execute(desc) node = tf.TapeNode(irfft, [input_, fft_length_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function irfft(input_, fft_length_; name=nothing) if tf.in_eager_mode() @@ -45903,8 +47637,10 @@ begin desc["T"] = tf.data_type(v_) res = tf.execute(desc) node = tf.TapeNode(inplace_add, [x_, i_, v_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function inplace_add(x_, i_, v_; name=nothing) if tf.in_eager_mode() @@ -45950,8 +47686,10 @@ begin desc["T"] = tf.data_type(bias_) res = tf.execute(desc) node = tf.TapeNode(bias_add, [value_, bias_], name=nothing, data_format=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function bias_add(value_, bias_; name=nothing, data_format=nothing) if tf.in_eager_mode() @@ -46020,8 +47758,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(load_tpu_embedding_adam_parameters_grad_accum_debug, [parameters_, momenta_, velocities_, gradient_accumulators_], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function load_tpu_embedding_adam_parameters_grad_accum_debug(parameters_, momenta_, velocities_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) if tf.in_eager_mode() @@ -46051,8 +47791,10 @@ begin desc = tf.EagerOp("_DisconnectHostFromDistributedTPUSystem") res = tf.execute(desc) node = tf.TapeNode(_disconnect_host_from_distributed_tpu_system, [], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function _disconnect_host_from_distributed_tpu_system(; name=nothing) if tf.in_eager_mode() @@ -46102,8 +47844,10 @@ begin desc["T"] = tf.data_type(deltas_) res = tf.execute(desc) node = tf.TapeNode(ragged_range, [starts_, limits_, deltas_], name=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function ragged_range(starts_, limits_, deltas_; name=nothing) if tf.in_eager_mode() @@ -46164,8 +47908,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(window_dataset, [input_dataset_, size_, shift_, stride_, drop_remainder_], name=nothing, output_types=nothing, output_shapes=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function window_dataset(input_dataset_, size_, shift_, stride_, drop_remainder_; name=nothing, output_types=nothing, output_shapes=nothing) if tf.in_eager_mode() @@ -46200,8 +47946,10 @@ begin desc["T"] = tf.data_type(diagonal_) res = tf.execute(desc) node = tf.TapeNode(diag, [diagonal_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function diag(diagonal_; name=nothing) if tf.in_eager_mode() @@ -46242,8 +47990,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(infeed_dequeue, [], name=nothing, dtype=nothing, shape=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function infeed_dequeue(; name=nothing, dtype=nothing, shape=nothing) if tf.in_eager_mode() @@ -46292,8 +48042,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(experimental_latency_stats_dataset, [input_dataset_, tag_], name=nothing, output_types=nothing, output_shapes=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function experimental_latency_stats_dataset(input_dataset_, tag_; name=nothing, output_types=nothing, output_shapes=nothing) if tf.in_eager_mode() @@ -46348,8 +48100,10 @@ begin desc["T"] = tf.data_type(sparse_values_) res = tf.execute(desc) node = tf.TapeNode(add_sparse_to_tensors_map, [sparse_indices_, sparse_values_, sparse_shape_], name=nothing, container=nothing, shared_name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function add_sparse_to_tensors_map(sparse_indices_, sparse_values_, sparse_shape_; name=nothing, container=nothing, shared_name=nothing) if tf.in_eager_mode() @@ -46412,8 +48166,10 @@ begin desc["Tindices"] = tf.data_type(indices_) res = tf.execute(desc) node = tf.TapeNode(ragged_gather, [params_nested_splits_, params_dense_values_, indices_], name=nothing, PARAMS_RAGGED_RANK=nothing, OUTPUT_RAGGED_RANK=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function ragged_gather(params_nested_splits_, params_dense_values_, indices_; name=nothing, PARAMS_RAGGED_RANK=nothing, OUTPUT_RAGGED_RANK=nothing) if tf.in_eager_mode() @@ -46448,8 +48204,10 @@ begin desc["T"] = tf.data_type(images_) res = tf.execute(desc) node = tf.TapeNode(rgb_to_hsv, [images_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function rgb_to_hsv(images_; name=nothing) if tf.in_eager_mode() @@ -46482,8 +48240,10 @@ begin tf.add_input(desc, multi_device_iterator_) res = tf.execute(desc) node = tf.TapeNode(multi_device_iterator_to_string_handle, [multi_device_iterator_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function multi_device_iterator_to_string_handle(multi_device_iterator_; name=nothing) if tf.in_eager_mode() @@ -46540,8 +48300,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(for_, [start_, limit_, delta_, input_], name=nothing, T=nothing, body=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function for_(start_, limit_, delta_, input_; name=nothing, T=nothing, body=nothing) if tf.in_eager_mode() @@ -46599,8 +48361,10 @@ begin desc["T"] = tf.data_type(input_values_) res = tf.execute(desc) node = tf.TapeNode(sparse_reduce_max_sparse, [input_indices_, input_values_, input_shape_, reduction_axes_], name=nothing, keep_dims=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function sparse_reduce_max_sparse(input_indices_, input_values_, input_shape_, reduction_axes_; name=nothing, keep_dims=nothing) if tf.in_eager_mode() @@ -46648,8 +48412,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(concat_offset, [concat_dim_, shape_], name=nothing, N=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function concat_offset(concat_dim_, shape_; name=nothing, N=nothing) if tf.in_eager_mode() @@ -46712,8 +48478,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(stage, [values_], name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function stage(values_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) if tf.in_eager_mode() @@ -46757,8 +48525,10 @@ begin desc["T"] = tf.data_type(data_) res = tf.execute(desc) node = tf.TapeNode(switch, [data_, pred_], name=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function switch(data_, pred_; name=nothing) if tf.in_eager_mode() @@ -46807,8 +48577,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(queue_dequeue_many_v2, [handle_, n_], name=nothing, component_types=nothing, timeout_ms=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function queue_dequeue_many_v2(handle_, n_; name=nothing, component_types=nothing, timeout_ms=nothing) if tf.in_eager_mode() @@ -46850,8 +48622,10 @@ begin desc["Tindices"] = tf.data_type(segment_ids_) res = tf.execute(desc) node = tf.TapeNode(segment_prod, [data_, segment_ids_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function segment_prod(data_, segment_ids_; name=nothing) if tf.in_eager_mode() @@ -46897,8 +48671,10 @@ begin desc["T"] = tf.data_type(y_) res = tf.execute(desc) node = tf.TapeNode(approximate_equal, [x_, y_], name=nothing, tolerance=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function approximate_equal(x_, y_; name=nothing, tolerance=nothing) if tf.in_eager_mode() @@ -46974,8 +48750,10 @@ begin desc["T"] = tf.data_type(filter_) res = tf.execute(desc) node = tf.TapeNode(conv2d, [input_, filter_], name=nothing, strides=nothing, use_cudnn_on_gpu=nothing, padding=nothing, explicit_paddings=nothing, data_format=nothing, dilations=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function conv2d(input_, filter_; name=nothing, strides=nothing, use_cudnn_on_gpu=nothing, padding=nothing, explicit_paddings=nothing, data_format=nothing, dilations=nothing) if tf.in_eager_mode() @@ -47014,8 +48792,10 @@ begin desc["T"] = tf.data_type(input_) res = tf.execute(desc) node = tf.TapeNode(cross_replica_sum, [input_, group_assignment_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function cross_replica_sum(input_, group_assignment_; name=nothing) if tf.in_eager_mode() @@ -47080,8 +48860,10 @@ begin desc["Tb"] = tf.data_type(b_) res = tf.execute(desc) node = tf.TapeNode(sparse_mat_mul, [a_, b_], name=nothing, transpose_a=nothing, transpose_b=nothing, a_is_sparse=nothing, b_is_sparse=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function sparse_mat_mul(a_, b_; name=nothing, transpose_a=nothing, transpose_b=nothing, a_is_sparse=nothing, b_is_sparse=nothing) if tf.in_eager_mode() @@ -47150,8 +48932,10 @@ begin desc["T"] = tf.data_type(split_) res = tf.execute(desc) node = tf.TapeNode(_scoped_allocator_split, [concat_, split_], name=nothing, sa_name=nothing, id=nothing, N=nothing, shapes=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function _scoped_allocator_split(concat_, split_; name=nothing, sa_name=nothing, id=nothing, N=nothing, shapes=nothing) if tf.in_eager_mode() @@ -47191,8 +48975,10 @@ begin desc["T"] = tf.data_type(x_) res = tf.execute(desc) node = tf.TapeNode(igammac, [a_, x_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function igammac(a_, x_; name=nothing) if tf.in_eager_mode() @@ -47244,8 +49030,10 @@ begin desc["T"] = tf.data_type(y_) res = tf.execute(desc) node = tf.TapeNode(batch_mat_mul, [x_, y_], name=nothing, adj_x=nothing, adj_y=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function batch_mat_mul(x_, y_; name=nothing, adj_x=nothing, adj_y=nothing) if tf.in_eager_mode() @@ -47308,8 +49096,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(enqueue_tpu_embedding_sparse_batch, [sample_indices_, embedding_indices_, aggregation_weights_, mode_override_], name=nothing, N=nothing, device_ordinal=nothing, combiners=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function enqueue_tpu_embedding_sparse_batch(sample_indices_, embedding_indices_, aggregation_weights_, mode_override_; name=nothing, N=nothing, device_ordinal=nothing, combiners=nothing) if tf.in_eager_mode() @@ -47348,8 +49138,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(queue_close_v2, [handle_], name=nothing, cancel_pending_enqueues=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function queue_close_v2(handle_; name=nothing, cancel_pending_enqueues=nothing) if tf.in_eager_mode() @@ -47398,8 +49190,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(tensor_array_pack, [handle_, flow_in_], name=nothing, dtype=nothing, element_shape=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function tensor_array_pack(handle_, flow_in_; name=nothing, dtype=nothing, element_shape=nothing) if tf.in_eager_mode() @@ -47436,8 +49230,10 @@ begin tf.add_input(desc, state_) res = tf.execute(desc) node = tf.TapeNode(reader_restore_state, [reader_handle_, state_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function reader_restore_state(reader_handle_, state_; name=nothing) if tf.in_eager_mode() @@ -47530,8 +49326,10 @@ begin desc["T"] = tf.data_type(args_) res = tf.execute(desc) node = tf.TapeNode(_fused_conv2d, [input_, filter_, args_], name=nothing, num_args=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing, use_cudnn_on_gpu=nothing, fused_ops=nothing, epsilon=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function _fused_conv2d(input_, filter_, args_; name=nothing, num_args=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing, use_cudnn_on_gpu=nothing, fused_ops=nothing, epsilon=nothing) if tf.in_eager_mode() @@ -47576,8 +49374,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(_read_variables_op, [resources_], name=nothing, N=nothing, dtypes=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function _read_variables_op(resources_; name=nothing, N=nothing, dtypes=nothing) if tf.in_eager_mode() @@ -47642,8 +49442,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(mutable_hash_table_of_tensors, [], name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function mutable_hash_table_of_tensors(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing) if tf.in_eager_mode() @@ -47676,8 +49478,10 @@ begin tf.add_input(desc, filename_) res = tf.execute(desc) node = tf.TapeNode(read_file, [filename_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function read_file(filename_; name=nothing) if tf.in_eager_mode() @@ -47746,8 +49550,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(load_tpu_embedding_mdl_adagrad_light_parameters, [parameters_, accumulators_, weights_, benefits_], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function load_tpu_embedding_mdl_adagrad_light_parameters(parameters_, accumulators_, weights_, benefits_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) if tf.in_eager_mode() @@ -47800,8 +49606,10 @@ begin desc["T"] = tf.data_type(out_backprop_) res = tf.execute(desc) node = tf.TapeNode(fractional_avg_pool_grad, [orig_input_tensor_shape_, out_backprop_, row_pooling_sequence_, col_pooling_sequence_], name=nothing, overlapping=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function fractional_avg_pool_grad(orig_input_tensor_shape_, out_backprop_, row_pooling_sequence_, col_pooling_sequence_; name=nothing, overlapping=nothing) if tf.in_eager_mode() @@ -47866,8 +49674,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(load_tpu_embedding_adagrad_parameters_grad_accum_debug, [parameters_, accumulators_, gradient_accumulators_], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function load_tpu_embedding_adagrad_parameters_grad_accum_debug(parameters_, accumulators_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) if tf.in_eager_mode() @@ -47922,8 +49732,10 @@ begin desc["shape_dtype"] = tf.data_type(shape_) res = tf.execute(desc) node = tf.TapeNode(stateful_standard_normal_v2, [resource_, algorithm_, shape_], name=nothing, dtype=nothing, shape_dtype=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function stateful_standard_normal_v2(resource_, algorithm_, shape_; name=nothing, dtype=nothing, shape_dtype=nothing) if tf.in_eager_mode() @@ -47966,8 +49778,10 @@ begin desc["T"] = tf.data_type(weights_) res = tf.execute(desc) node = tf.TapeNode(bincount, [arr_, size_, weights_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function bincount(arr_, size_, weights_; name=nothing) if tf.in_eager_mode() @@ -48002,8 +49816,10 @@ begin desc["T"] = tf.data_type(x_) res = tf.execute(desc) node = tf.TapeNode(inv, [x_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function inv(x_; name=nothing) if tf.in_eager_mode() @@ -48069,8 +49885,10 @@ begin desc["T"] = tf.data_type(grad_) res = tf.execute(desc) node = tf.TapeNode(apply_proximal_adagrad, [var_, accum_, lr_, l1_, l2_, grad_], name=nothing, use_locking=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function apply_proximal_adagrad(var_, accum_, lr_, l1_, l2_, grad_; name=nothing, use_locking=nothing) if tf.in_eager_mode() @@ -48118,8 +49936,10 @@ begin desc["Taxis"] = tf.data_type(axis_) res = tf.execute(desc) node = tf.TapeNode(gather_v2, [params_, indices_, axis_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function gather_v2(params_, indices_, axis_; name=nothing) if tf.in_eager_mode() @@ -48156,8 +49976,10 @@ begin tf.add_input(desc, contents_) res = tf.execute(desc) node = tf.TapeNode(write_file, [filename_, contents_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function write_file(filename_, contents_; name=nothing) if tf.in_eager_mode() @@ -48195,8 +50017,10 @@ begin tf.add_input(desc, tree_ensemble_handle_) res = tf.execute(desc) node = tf.TapeNode(boosted_trees_get_ensemble_states, [tree_ensemble_handle_], name=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function boosted_trees_get_ensemble_states(tree_ensemble_handle_; name=nothing) if tf.in_eager_mode() @@ -48248,8 +50072,10 @@ begin desc["Tindices"] = tf.data_type(indices_) res = tf.execute(desc) node = tf.TapeNode(resource_gather, [resource_, indices_], name=nothing, validate_indices=nothing, dtype=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function resource_gather(resource_, indices_; name=nothing, validate_indices=nothing, dtype=nothing) if tf.in_eager_mode() @@ -48309,8 +50135,10 @@ begin desc["T"] = tf.data_type(delta_) res = tf.execute(desc) node = tf.TapeNode(resource_apply_proximal_gradient_descent, [var_, alpha_, l1_, l2_, delta_], name=nothing, use_locking=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function resource_apply_proximal_gradient_descent(var_, alpha_, l1_, l2_, delta_; name=nothing, use_locking=nothing) if tf.in_eager_mode() @@ -48350,8 +50178,10 @@ begin desc["T"] = tf.data_type(y_) res = tf.execute(desc) node = tf.TapeNode(truncate_mod, [x_, y_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function truncate_mod(x_, y_; name=nothing) if tf.in_eager_mode() @@ -48391,8 +50221,10 @@ begin desc["T"] = tf.data_type(input_) res = tf.execute(desc) node = tf.TapeNode(log_matrix_determinant, [input_], name=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function log_matrix_determinant(input_; name=nothing) if tf.in_eager_mode() @@ -48429,8 +50261,10 @@ begin tf.add_input(desc, fft_length_) res = tf.execute(desc) node = tf.TapeNode(irfft2d, [input_, fft_length_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function irfft2d(input_, fft_length_; name=nothing) if tf.in_eager_mode() @@ -48492,8 +50326,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(boosted_trees_training_predict, [tree_ensemble_handle_, cached_tree_ids_, cached_node_ids_, bucketized_features_], name=nothing, num_bucketized_features=nothing, logits_dimension=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function boosted_trees_training_predict(tree_ensemble_handle_, cached_tree_ids_, cached_node_ids_, bucketized_features_; name=nothing, num_bucketized_features=nothing, logits_dimension=nothing) if tf.in_eager_mode() @@ -48539,8 +50375,10 @@ begin tf.add_input(desc, k_) res = tf.execute(desc) node = tf.TapeNode(nearest_neighbors, [points_, centers_, k_], name=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function nearest_neighbors(points_, centers_, k_; name=nothing) if tf.in_eager_mode() @@ -48575,8 +50413,10 @@ begin desc["T"] = tf.data_type(x_) res = tf.execute(desc) node = tf.TapeNode(floor, [x_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function floor(x_; name=nothing) if tf.in_eager_mode() @@ -48641,8 +50481,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(load_tpu_embedding_proximal_adagrad_parameters_grad_accum_debug, [parameters_, accumulators_, gradient_accumulators_], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function load_tpu_embedding_proximal_adagrad_parameters_grad_accum_debug(parameters_, accumulators_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) if tf.in_eager_mode() @@ -48699,8 +50541,10 @@ begin desc["T"] = tf.data_type(tensor_) res = tf.execute(desc) node = tf.TapeNode(write_image_summary, [writer_, step_, tag_, tensor_, bad_color_], name=nothing, max_images=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function write_image_summary(writer_, step_, tag_, tensor_, bad_color_; name=nothing, max_images=nothing) if tf.in_eager_mode() @@ -48739,8 +50583,10 @@ begin desc["T"] = tf.data_type(input_) res = tf.execute(desc) node = tf.TapeNode(tile_grad, [input_, multiples_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function tile_grad(input_, multiples_; name=nothing) if tf.in_eager_mode() @@ -48788,8 +50634,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(tensor_array_grad_v3, [handle_, flow_in_], name=nothing, source=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function tensor_array_grad_v3(handle_, flow_in_; name=nothing, source=nothing) if tf.in_eager_mode() @@ -48838,8 +50686,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(enqueue_tpu_embedding_integer_batch, [batch_, mode_override_], name=nothing, N=nothing, device_ordinal=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function enqueue_tpu_embedding_integer_batch(batch_, mode_override_; name=nothing, N=nothing, device_ordinal=nothing) if tf.in_eager_mode() @@ -48917,8 +50767,10 @@ begin desc["T"] = tf.data_type(variance_) res = tf.execute(desc) node = tf.TapeNode(fused_batch_norm, [x_, scale_, offset_, mean_, variance_], name=nothing, epsilon=nothing, data_format=nothing, is_training=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function fused_batch_norm(x_, scale_, offset_, mean_, variance_; name=nothing, epsilon=nothing, data_format=nothing, is_training=nothing) if tf.in_eager_mode() @@ -48955,8 +50807,10 @@ begin tf.add_input(desc, y_) res = tf.execute(desc) node = tf.TapeNode(logical_and, [x_, y_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function logical_and(x_, y_; name=nothing) if tf.in_eager_mode() @@ -49003,8 +50857,10 @@ begin desc["T"] = tf.data_type(updates_) res = tf.execute(desc) node = tf.TapeNode(tensor_scatter_update, [tensor_, indices_, updates_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function tensor_scatter_update(tensor_, indices_, updates_; name=nothing) if tf.in_eager_mode() @@ -49051,8 +50907,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(text_line_reader_v2, [], name=nothing, skip_header_lines=nothing, container=nothing, shared_name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function text_line_reader_v2(; name=nothing, skip_header_lines=nothing, container=nothing, shared_name=nothing) if tf.in_eager_mode() @@ -49097,8 +50955,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(tensor_slice_dataset, [components_], name=nothing, Toutput_types=nothing, output_shapes=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function tensor_slice_dataset(components_; name=nothing, Toutput_types=nothing, output_shapes=nothing) if tf.in_eager_mode() @@ -49145,8 +51005,10 @@ begin desc["T"] = tf.data_type(value_) res = tf.execute(desc) node = tf.TapeNode(tensor_array_scatter_v3, [handle_, indices_, value_, flow_in_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function tensor_array_scatter_v3(handle_, indices_, value_, flow_in_; name=nothing) if tf.in_eager_mode() @@ -49191,8 +51053,10 @@ begin desc["T"] = tf.data_type(grads_) res = tf.execute(desc) node = tf.TapeNode(resize_nearest_neighbor_grad, [grads_, size_], name=nothing, align_corners=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function resize_nearest_neighbor_grad(grads_, size_; name=nothing, align_corners=nothing) if tf.in_eager_mode() @@ -49263,8 +51127,10 @@ begin desc["T"] = tf.data_type(grad_) res = tf.execute(desc) node = tf.TapeNode(apply_power_sign, [var_, m_, lr_, logbase_, sign_decay_, beta_, grad_], name=nothing, use_locking=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function apply_power_sign(var_, m_, lr_, logbase_, sign_decay_, beta_, grad_; name=nothing, use_locking=nothing) if tf.in_eager_mode() @@ -49313,8 +51179,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(experimental_rebatch_dataset, [input_dataset_, num_workers_], name=nothing, output_types=nothing, output_shapes=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function experimental_rebatch_dataset(input_dataset_, num_workers_; name=nothing, output_types=nothing, output_shapes=nothing) if tf.in_eager_mode() @@ -49361,8 +51229,10 @@ begin desc["Tpaddings"] = tf.data_type(paddings_) res = tf.execute(desc) node = tf.TapeNode(mirror_pad, [input_, paddings_], name=nothing, mode=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function mirror_pad(input_, paddings_; name=nothing, mode=nothing) if tf.in_eager_mode() @@ -49395,8 +51265,10 @@ begin tf.add_input(desc, x_) res = tf.execute(desc) node = tf.TapeNode(logical_not, [x_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function logical_not(x_; name=nothing) if tf.in_eager_mode() @@ -49429,8 +51301,10 @@ begin tf.add_input(desc, input_) res = tf.execute(desc) node = tf.TapeNode(batch_ifft, [input_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function batch_ifft(input_; name=nothing) if tf.in_eager_mode() @@ -49484,8 +51358,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(tensor_array_concat_v2, [handle_, flow_in_], name=nothing, dtype=nothing, element_shape_except0=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function tensor_array_concat_v2(handle_, flow_in_; name=nothing, dtype=nothing, element_shape_except0=nothing) if tf.in_eager_mode() @@ -49533,8 +51409,10 @@ begin desc["Tidx"] = tf.data_type(reduction_indices_) res = tf.execute(desc) node = tf.TapeNode(sum, [input_, reduction_indices_], name=nothing, keep_dims=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function sum(input_, reduction_indices_; name=nothing, keep_dims=nothing) if tf.in_eager_mode() @@ -49583,8 +51461,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(boosted_trees_predict, [tree_ensemble_handle_, bucketized_features_], name=nothing, num_bucketized_features=nothing, logits_dimension=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function boosted_trees_predict(tree_ensemble_handle_, bucketized_features_; name=nothing, num_bucketized_features=nothing, logits_dimension=nothing) if tf.in_eager_mode() @@ -49684,8 +51564,10 @@ begin desc["Tbias"] = tf.data_type(bias_) res = tf.execute(desc) node = tf.TapeNode(quantized_conv2d_with_bias_and_relu_and_requantize, [input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_], name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function quantized_conv2d_with_bias_and_relu_and_requantize(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) if tf.in_eager_mode() @@ -49752,8 +51634,10 @@ begin desc["Tindices"] = tf.data_type(indices_) res = tf.execute(desc) node = tf.TapeNode(resource_sparse_apply_adagrad, [var_, accum_, lr_, grad_, indices_], name=nothing, use_locking=nothing, update_slots=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function resource_sparse_apply_adagrad(var_, accum_, lr_, grad_, indices_; name=nothing, use_locking=nothing, update_slots=nothing) if tf.in_eager_mode() @@ -49799,8 +51683,10 @@ begin desc["T"] = tf.data_type(features_) res = tf.execute(desc) node = tf.TapeNode(leaky_relu_grad, [gradients_, features_], name=nothing, alpha=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function leaky_relu_grad(gradients_, features_; name=nothing, alpha=nothing) if tf.in_eager_mode() @@ -49841,8 +51727,10 @@ begin desc["T"] = tf.data_type(input_) res = tf.execute(desc) node = tf.TapeNode(_device_retval, [input_], name=nothing, index=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function _device_retval(input_; name=nothing, index=nothing) if tf.in_eager_mode() @@ -49883,8 +51771,10 @@ begin desc["Tpaddings"] = tf.data_type(paddings_) res = tf.execute(desc) node = tf.TapeNode(pad, [input_, paddings_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function pad(input_, paddings_; name=nothing) if tf.in_eager_mode() @@ -49939,8 +51829,10 @@ begin desc["T"] = tf.data_type(sparse_values_) res = tf.execute(desc) node = tf.TapeNode(add_many_sparse_to_tensors_map, [sparse_indices_, sparse_values_, sparse_shape_], name=nothing, container=nothing, shared_name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function add_many_sparse_to_tensors_map(sparse_indices_, sparse_values_, sparse_shape_; name=nothing, container=nothing, shared_name=nothing) if tf.in_eager_mode() @@ -49988,8 +51880,10 @@ begin desc["T"] = tf.data_type(input_values_) res = tf.execute(desc) node = tf.TapeNode(sparse_reorder, [input_indices_, input_values_, input_shape_], name=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function sparse_reorder(input_indices_, input_values_, input_shape_; name=nothing) if tf.in_eager_mode() @@ -50029,8 +51923,10 @@ begin desc["T"] = tf.data_type(y_) res = tf.execute(desc) node = tf.TapeNode(bitwise_xor, [x_, y_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function bitwise_xor(x_, y_; name=nothing) if tf.in_eager_mode() @@ -50070,8 +51966,10 @@ begin desc["T"] = tf.data_type(diagonal_) res = tf.execute(desc) node = tf.TapeNode(batch_matrix_set_diag, [input_, diagonal_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function batch_matrix_set_diag(input_, diagonal_; name=nothing) if tf.in_eager_mode() @@ -50116,8 +52014,10 @@ begin desc["Tout"] = tf.data_type(values_) res = tf.execute(desc) node = tf.TapeNode(lookup_table_insert_v2, [table_handle_, keys_, values_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function lookup_table_insert_v2(table_handle_, keys_, values_; name=nothing) if tf.in_eager_mode() @@ -50170,8 +52070,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(experimental_dense_to_sparse_batch_dataset, [input_dataset_, batch_size_, row_shape_], name=nothing, output_types=nothing, output_shapes=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function experimental_dense_to_sparse_batch_dataset(input_dataset_, batch_size_, row_shape_; name=nothing, output_types=nothing, output_shapes=nothing) if tf.in_eager_mode() @@ -50251,8 +52153,10 @@ begin desc["Tindices"] = tf.data_type(indices_) res = tf.execute(desc) node = tf.TapeNode(resource_sparse_apply_rms_prop, [var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_], name=nothing, use_locking=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function resource_sparse_apply_rms_prop(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) if tf.in_eager_mode() @@ -50303,8 +52207,10 @@ begin desc["T"] = tf.data_type(image_) res = tf.execute(desc) node = tf.TapeNode(random_crop, [image_, size_], name=nothing, seed=nothing, seed2=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function random_crop(image_, size_; name=nothing, seed=nothing, seed2=nothing) if tf.in_eager_mode() @@ -50349,8 +52255,10 @@ begin desc["Tout"] = tf.data_type(values_) res = tf.execute(desc) node = tf.TapeNode(lookup_table_import_v2, [table_handle_, keys_, values_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function lookup_table_import_v2(table_handle_, keys_, values_; name=nothing) if tf.in_eager_mode() @@ -50402,8 +52310,10 @@ begin desc["T"] = tf.data_type(updates_) res = tf.execute(desc) node = tf.TapeNode(resource_scatter_nd_update, [ref_, indices_, updates_], name=nothing, use_locking=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function resource_scatter_nd_update(ref_, indices_, updates_; name=nothing, use_locking=nothing) if tf.in_eager_mode() @@ -50442,8 +52352,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(static_regex_full_match, [input_], name=nothing, pattern=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function static_regex_full_match(input_; name=nothing, pattern=nothing) if tf.in_eager_mode() @@ -50476,8 +52388,10 @@ begin tf.add_input(desc, json_) res = tf.execute(desc) node = tf.TapeNode(gcs_configure_credentials, [json_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function gcs_configure_credentials(json_; name=nothing) if tf.in_eager_mode() @@ -50514,8 +52428,10 @@ begin tf.add_input(desc, flow_in_) res = tf.execute(desc) node = tf.TapeNode(tensor_array_size_v3, [handle_, flow_in_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function tensor_array_size_v3(handle_, flow_in_; name=nothing) if tf.in_eager_mode() @@ -50567,8 +52483,10 @@ begin desc["Tnumsegments"] = tf.data_type(num_segments_) res = tf.execute(desc) node = tf.TapeNode(sparse_segment_sqrt_n_with_num_segments, [data_, indices_, segment_ids_, num_segments_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function sparse_segment_sqrt_n_with_num_segments(data_, indices_, segment_ids_, num_segments_; name=nothing) if tf.in_eager_mode() @@ -50677,8 +52595,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(experimental_group_by_reducer_dataset, [input_dataset_, key_func_other_arguments_, init_func_other_arguments_, reduce_func_other_arguments_, finalize_func_other_arguments_], name=nothing, key_func=nothing, init_func=nothing, reduce_func=nothing, finalize_func=nothing, Tkey_func_other_arguments=nothing, Tinit_func_other_arguments=nothing, Treduce_func_other_arguments=nothing, Tfinalize_func_other_arguments=nothing, output_types=nothing, output_shapes=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function experimental_group_by_reducer_dataset(input_dataset_, key_func_other_arguments_, init_func_other_arguments_, reduce_func_other_arguments_, finalize_func_other_arguments_; name=nothing, key_func=nothing, init_func=nothing, reduce_func=nothing, finalize_func=nothing, Tkey_func_other_arguments=nothing, Tinit_func_other_arguments=nothing, Treduce_func_other_arguments=nothing, Tfinalize_func_other_arguments=nothing, output_types=nothing, output_shapes=nothing) if tf.in_eager_mode() @@ -50758,8 +52678,10 @@ begin desc["T"] = tf.data_type(out_backprop_) res = tf.execute(desc) node = tf.TapeNode(conv2d_backprop_filter, [input_, filter_sizes_, out_backprop_], name=nothing, strides=nothing, use_cudnn_on_gpu=nothing, padding=nothing, explicit_paddings=nothing, data_format=nothing, dilations=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function conv2d_backprop_filter(input_, filter_sizes_, out_backprop_; name=nothing, strides=nothing, use_cudnn_on_gpu=nothing, padding=nothing, explicit_paddings=nothing, data_format=nothing, dilations=nothing) if tf.in_eager_mode() @@ -50828,8 +52750,10 @@ begin desc["T"] = tf.data_type(grad_) res = tf.execute(desc) node = tf.TapeNode(max_pool_grad, [orig_input_, orig_output_, grad_], name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function max_pool_grad(orig_input_, orig_output_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) if tf.in_eager_mode() @@ -50862,8 +52786,10 @@ begin tf.add_input(desc, input_) res = tf.execute(desc) node = tf.TapeNode(_initialize_host_for_distributed_tpu, [input_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function _initialize_host_for_distributed_tpu(input_; name=nothing) if tf.in_eager_mode() @@ -50926,8 +52852,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(stage_peek, [index_], name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function stage_peek(index_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) if tf.in_eager_mode() @@ -50973,8 +52901,10 @@ begin desc["T"] = tf.data_type(constant_values_) res = tf.execute(desc) node = tf.TapeNode(pad_v2, [input_, paddings_, constant_values_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function pad_v2(input_, paddings_, constant_values_; name=nothing) if tf.in_eager_mode() @@ -51015,8 +52945,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(_parallel_concat_start, [], name=nothing, shape=nothing, dtype=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function _parallel_concat_start(; name=nothing, shape=nothing, dtype=nothing) if tf.in_eager_mode() @@ -51055,8 +52987,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(print_v2, [input_], name=nothing, output_stream=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function print_v2(input_; name=nothing, output_stream=nothing) if tf.in_eager_mode() @@ -51101,8 +53035,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(optional_get_value, [optional_], name=nothing, output_types=nothing, output_shapes=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function optional_get_value(optional_; name=nothing, output_types=nothing, output_shapes=nothing) if tf.in_eager_mode() @@ -51167,8 +53103,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(load_tpu_embedding_ftrl_parameters, [parameters_, accumulators_, linears_], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function load_tpu_embedding_ftrl_parameters(parameters_, accumulators_, linears_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) if tf.in_eager_mode() @@ -51224,8 +53162,10 @@ begin desc["T"] = tf.data_type(values_) res = tf.execute(desc) node = tf.TapeNode(sparse_slice, [indices_, values_, shape_, start_, size_], name=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function sparse_slice(indices_, values_, shape_, start_, size_; name=nothing) if tf.in_eager_mode() @@ -51277,8 +53217,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(boosted_trees_make_quantile_summaries, [float_values_, example_weights_, epsilon_], name=nothing, num_features=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function boosted_trees_make_quantile_summaries(float_values_, example_weights_, epsilon_; name=nothing, num_features=nothing) if tf.in_eager_mode() @@ -51324,8 +53266,10 @@ begin desc["T"] = tf.data_type(rhs_) res = tf.execute(desc) node = tf.TapeNode(matrix_solve, [matrix_, rhs_], name=nothing, adjoint=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function matrix_solve(matrix_, rhs_; name=nothing, adjoint=nothing) if tf.in_eager_mode() @@ -51364,8 +53308,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(_configure_distributed_tpu, [inputs_], name=nothing, N=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function _configure_distributed_tpu(inputs_; name=nothing, N=nothing) if tf.in_eager_mode() @@ -51404,8 +53350,10 @@ begin desc["T"] = tf.data_type(images_) res = tf.execute(desc) node = tf.TapeNode(adjust_contrastv2, [images_, contrast_factor_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function adjust_contrastv2(images_, contrast_factor_; name=nothing) if tf.in_eager_mode() @@ -51458,8 +53406,10 @@ begin desc["T"] = tf.data_type(y_) res = tf.execute(desc) node = tf.TapeNode(_mkl_maximum, [x_, y_, mkl_x_, mkl_y_], name=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function _mkl_maximum(x_, y_, mkl_x_, mkl_y_; name=nothing) if tf.in_eager_mode() @@ -51542,8 +53492,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(cudnn_rnn_params_size, [num_layers_, num_units_, input_size_], name=nothing, S=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function cudnn_rnn_params_size(num_layers_, num_units_, input_size_; name=nothing, S=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) if tf.in_eager_mode() @@ -51586,8 +53538,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(boosted_trees_quantile_stream_resource_add_summaries, [quantile_stream_resource_handle_, summaries_], name=nothing, num_features=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function boosted_trees_quantile_stream_resource_add_summaries(quantile_stream_resource_handle_, summaries_; name=nothing, num_features=nothing) if tf.in_eager_mode() @@ -51620,8 +53574,10 @@ begin tf.add_input(desc, input_) res = tf.execute(desc) node = tf.TapeNode(batch_ifft3d, [input_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function batch_ifft3d(input_; name=nothing) if tf.in_eager_mode() @@ -51656,8 +53612,10 @@ begin desc["T"] = tf.data_type(x_) res = tf.execute(desc) node = tf.TapeNode(sigmoid, [x_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function sigmoid(x_; name=nothing) if tf.in_eager_mode() @@ -51699,8 +53657,10 @@ begin desc["Tindices"] = tf.data_type(segment_ids_) res = tf.execute(desc) node = tf.TapeNode(segment_mean, [data_, segment_ids_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function segment_mean(data_, segment_ids_; name=nothing) if tf.in_eager_mode() @@ -51733,8 +53693,10 @@ begin tf.add_input(desc, tree_ensemble_handle_) res = tf.execute(desc) node = tf.TapeNode(is_boosted_trees_ensemble_initialized, [tree_ensemble_handle_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function is_boosted_trees_ensemble_initialized(tree_ensemble_handle_; name=nothing) if tf.in_eager_mode() @@ -51771,8 +53733,10 @@ begin tf.add_input(desc, flow_in_) res = tf.execute(desc) node = tf.TapeNode(tensor_array_size_v2, [handle_, flow_in_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function tensor_array_size_v2(handle_, flow_in_; name=nothing) if tf.in_eager_mode() @@ -51825,8 +53789,10 @@ begin desc["T"] = tf.data_type(y_) res = tf.execute(desc) node = tf.TapeNode(_mkl_sub, [x_, y_, mkl_x_, mkl_y_], name=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function _mkl_sub(x_, y_, mkl_x_, mkl_y_; name=nothing) if tf.in_eager_mode() @@ -51881,8 +53847,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(send_tpu_embedding_gradients, [inputs_, learning_rates_], name=nothing, N=nothing, NN=nothing, config=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function send_tpu_embedding_gradients(inputs_, learning_rates_; name=nothing, N=nothing, NN=nothing, config=nothing) if tf.in_eager_mode() @@ -51941,8 +53909,10 @@ begin desc["T"] = tf.data_type(input_) res = tf.execute(desc) node = tf.TapeNode(max_pool3d, [input_], name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function max_pool3d(input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) if tf.in_eager_mode() @@ -51990,8 +53960,10 @@ begin desc["Tidx"] = tf.data_type(reduction_indices_) res = tf.execute(desc) node = tf.TapeNode(prod, [input_, reduction_indices_], name=nothing, keep_dims=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function prod(input_, reduction_indices_; name=nothing, keep_dims=nothing) if tf.in_eager_mode() @@ -52024,8 +53996,10 @@ begin tf.add_input(desc, size_) res = tf.execute(desc) node = tf.TapeNode(experimental_identity_indexed_dataset, [size_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function experimental_identity_indexed_dataset(size_; name=nothing) if tf.in_eager_mode() @@ -52070,8 +54044,10 @@ begin desc["element_dtype"] = tf.data_type(tensor_) res = tf.execute(desc) node = tf.TapeNode(tensor_list_push_back, [input_handle_, tensor_], name=nothing, element_dtype=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function tensor_list_push_back(input_handle_, tensor_; name=nothing, element_dtype=nothing) if tf.in_eager_mode() @@ -52180,8 +54156,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(batch_function, [in_tensors_, captured_tensors_], name=nothing, f=nothing, num_batch_threads=nothing, max_batch_size=nothing, batch_timeout_micros=nothing, max_enqueued_batches=nothing, allowed_batch_sizes=nothing, container=nothing, shared_name=nothing, batching_queue=nothing, Tin=nothing, Tcaptured=nothing, Tout=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function batch_function(in_tensors_, captured_tensors_; name=nothing, f=nothing, num_batch_threads=nothing, max_batch_size=nothing, batch_timeout_micros=nothing, max_enqueued_batches=nothing, allowed_batch_sizes=nothing, container=nothing, shared_name=nothing, batching_queue=nothing, Tin=nothing, Tcaptured=nothing, Tout=nothing) if tf.in_eager_mode() @@ -52234,8 +54212,10 @@ begin desc["T"] = tf.data_type(default_value_) res = tf.execute(desc) node = tf.TapeNode(sparse_fill_empty_rows, [indices_, values_, dense_shape_, default_value_], name=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function sparse_fill_empty_rows(indices_, values_, dense_shape_, default_value_; name=nothing) if tf.in_eager_mode() @@ -52281,8 +54261,10 @@ begin desc["T"] = tf.data_type(input_) res = tf.execute(desc) node = tf.TapeNode(self_adjoint_eig_v2, [input_], name=nothing, compute_v=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function self_adjoint_eig_v2(input_; name=nothing, compute_v=nothing) if tf.in_eager_mode() @@ -52340,8 +54322,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(retrieve_tpu_embedding_ftrl_parameters, [], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function retrieve_tpu_embedding_ftrl_parameters(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) if tf.in_eager_mode() @@ -52420,8 +54404,10 @@ begin desc["T"] = tf.data_type(l2_) res = tf.execute(desc) node = tf.TapeNode(resource_sparse_apply_adagrad_da, [var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, indices_, lr_, l1_, l2_, global_step_], name=nothing, use_locking=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function resource_sparse_apply_adagrad_da(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, indices_, lr_, l1_, l2_, global_step_; name=nothing, use_locking=nothing) if tf.in_eager_mode() @@ -52468,8 +54454,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(temporary_variable, [], name=nothing, shape=nothing, dtype=nothing, var_name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function temporary_variable(; name=nothing, shape=nothing, dtype=nothing, var_name=nothing) if tf.in_eager_mode() @@ -52538,8 +54526,10 @@ begin desc["T"] = tf.data_type(grad_) res = tf.execute(desc) node = tf.TapeNode(resource_apply_add_sign, [var_, m_, lr_, alpha_, sign_decay_, beta_, grad_], name=nothing, use_locking=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function resource_apply_add_sign(var_, m_, lr_, alpha_, sign_decay_, beta_, grad_; name=nothing, use_locking=nothing) if tf.in_eager_mode() @@ -52586,8 +54576,10 @@ begin desc["Taxis"] = tf.data_type(axis_) res = tf.execute(desc) node = tf.TapeNode(roll, [input_, shift_, axis_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function roll(input_, shift_, axis_; name=nothing) if tf.in_eager_mode() @@ -52627,8 +54619,10 @@ begin desc["T"] = tf.data_type(y_) res = tf.execute(desc) node = tf.TapeNode(xdivy, [x_, y_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function xdivy(x_, y_; name=nothing) if tf.in_eager_mode() @@ -52697,8 +54691,10 @@ begin desc["T"] = tf.data_type(grad_) res = tf.execute(desc) node = tf.TapeNode(max_pool3d_grad_grad, [orig_input_, orig_output_, grad_], name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function max_pool3d_grad_grad(orig_input_, orig_output_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) if tf.in_eager_mode() @@ -52757,8 +54753,10 @@ begin desc["T"] = tf.data_type(image_) res = tf.execute(desc) node = tf.TapeNode(crop_and_resize, [image_, boxes_, box_ind_, crop_size_], name=nothing, method=nothing, extrapolation_value=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function crop_and_resize(image_, boxes_, box_ind_, crop_size_; name=nothing, method=nothing, extrapolation_value=nothing) if tf.in_eager_mode() @@ -52826,8 +54824,10 @@ begin desc["T2"] = tf.data_type(bias_) res = tf.execute(desc) node = tf.TapeNode(quantized_bias_add, [input_, bias_, min_input_, max_input_, min_bias_, max_bias_], name=nothing, out_type=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function quantized_bias_add(input_, bias_, min_input_, max_input_, min_bias_, max_bias_; name=nothing, out_type=nothing) if tf.in_eager_mode() @@ -52864,8 +54864,10 @@ begin tf.add_input(desc, seed_) res = tf.execute(desc) node = tf.TapeNode(kmc2chain_initialization, [distances_, seed_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function kmc2chain_initialization(distances_, seed_; name=nothing) if tf.in_eager_mode() @@ -52933,8 +54935,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(map_unstage_no_key, [indices_], name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function map_unstage_no_key(indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) if tf.in_eager_mode() @@ -52987,8 +54991,10 @@ begin desc["T"] = tf.data_type(updates_) res = tf.execute(desc) node = tf.TapeNode(scatter_nd_sub, [ref_, indices_, updates_], name=nothing, use_locking=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function scatter_nd_sub(ref_, indices_, updates_; name=nothing, use_locking=nothing) if tf.in_eager_mode() @@ -53033,8 +55039,10 @@ begin desc["T"] = tf.data_type(images_) res = tf.execute(desc) node = tf.TapeNode(resize_bilinear, [images_, size_], name=nothing, align_corners=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function resize_bilinear(images_, size_; name=nothing, align_corners=nothing) if tf.in_eager_mode() @@ -53101,8 +55109,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(ordered_map_peek, [key_, indices_], name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function ordered_map_peek(key_, indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) if tf.in_eager_mode() @@ -53165,8 +55175,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(tensor_array, [size_], name=nothing, dtype=nothing, dynamic_size=nothing, clear_after_read=nothing, tensor_array_name=nothing, element_shape=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function tensor_array(size_; name=nothing, dtype=nothing, dynamic_size=nothing, clear_after_read=nothing, tensor_array_name=nothing, element_shape=nothing) if tf.in_eager_mode() @@ -53210,8 +55222,10 @@ begin desc["T"] = tf.data_type(v_) res = tf.execute(desc) node = tf.TapeNode(inplace_sub, [x_, i_, v_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function inplace_sub(x_, i_, v_; name=nothing) if tf.in_eager_mode() @@ -53251,8 +55265,10 @@ begin desc["T"] = tf.data_type(y_) res = tf.execute(desc) node = tf.TapeNode(pow, [x_, y_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function pow(x_, y_; name=nothing) if tf.in_eager_mode() @@ -53303,8 +55319,10 @@ begin desc["shape_dtype"] = tf.data_type(shape_) res = tf.execute(desc) node = tf.TapeNode(stateful_standard_normal, [resource_, shape_], name=nothing, dtype=nothing, shape_dtype=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function stateful_standard_normal(resource_, shape_; name=nothing, dtype=nothing, shape_dtype=nothing) if tf.in_eager_mode() @@ -53339,8 +55357,10 @@ begin desc["T"] = tf.data_type(data_) res = tf.execute(desc) node = tf.TapeNode(ref_next_iteration, [data_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function ref_next_iteration(data_; name=nothing) if tf.in_eager_mode() @@ -53379,8 +55399,10 @@ begin desc["T"] = tf.data_type(values_) res = tf.execute(desc) node = tf.TapeNode(scalar_summary, [tags_, values_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function scalar_summary(tags_, values_; name=nothing) if tf.in_eager_mode() @@ -53428,8 +55450,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(string_split_v2, [input_, sep_], name=nothing, maxsplit=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function string_split_v2(input_, sep_; name=nothing, maxsplit=nothing) if tf.in_eager_mode() @@ -53464,8 +55488,10 @@ begin desc["T"] = tf.data_type(x_) res = tf.execute(desc) node = tf.TapeNode(bessel_i0e, [x_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function bessel_i0e(x_; name=nothing) if tf.in_eager_mode() @@ -53511,8 +55537,10 @@ begin desc["T"] = tf.data_type(x_) res = tf.execute(desc) node = tf.TapeNode(unique, [x_], name=nothing, out_idx=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function unique(x_; name=nothing, out_idx=nothing) if tf.in_eager_mode() @@ -53577,8 +55605,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(load_tpu_embedding_rms_prop_parameters, [parameters_, ms_, mom_], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function load_tpu_embedding_rms_prop_parameters(parameters_, ms_, mom_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) if tf.in_eager_mode() @@ -53619,8 +55649,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(whole_file_reader_v2, [], name=nothing, container=nothing, shared_name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function whole_file_reader_v2(; name=nothing, container=nothing, shared_name=nothing) if tf.in_eager_mode() @@ -53671,8 +55703,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(eager_py_func, [input_], name=nothing, token=nothing, Tin=nothing, Tout=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function eager_py_func(input_; name=nothing, token=nothing, Tin=nothing, Tout=nothing) if tf.in_eager_mode() @@ -53707,8 +55741,10 @@ begin desc["T"] = tf.data_type(data_) res = tf.execute(desc) node = tf.TapeNode(next_iteration, [data_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function next_iteration(data_; name=nothing) if tf.in_eager_mode() @@ -53769,8 +55805,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(case, [branch_index_, input_], name=nothing, Tin=nothing, Tout=nothing, branches=nothing, output_shapes=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function case(branch_index_, input_; name=nothing, Tin=nothing, Tout=nothing, branches=nothing, output_shapes=nothing) if tf.in_eager_mode() @@ -53817,8 +55855,10 @@ begin desc["T"] = tf.data_type(updates_) res = tf.execute(desc) node = tf.TapeNode(tensor_scatter_sub, [tensor_, indices_, updates_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function tensor_scatter_sub(tensor_, indices_, updates_; name=nothing) if tf.in_eager_mode() @@ -53871,8 +55911,10 @@ begin desc["T"] = tf.data_type(updates_) res = tf.execute(desc) node = tf.TapeNode(scatter_max, [ref_, indices_, updates_], name=nothing, use_locking=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function scatter_max(ref_, indices_, updates_; name=nothing, use_locking=nothing) if tf.in_eager_mode() @@ -53907,8 +55949,10 @@ begin desc["T"] = tf.data_type(x_) res = tf.execute(desc) node = tf.TapeNode(sqrt, [x_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function sqrt(x_; name=nothing) if tf.in_eager_mode() @@ -53951,8 +55995,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(accumulator_take_gradient, [handle_, num_required_], name=nothing, dtype=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function accumulator_take_gradient(handle_, num_required_; name=nothing, dtype=nothing) if tf.in_eager_mode() @@ -54005,8 +56051,10 @@ begin desc["T"] = tf.data_type(y_) res = tf.execute(desc) node = tf.TapeNode(_mkl_add, [x_, y_, mkl_x_, mkl_y_], name=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function _mkl_add(x_, y_, mkl_x_, mkl_y_; name=nothing) if tf.in_eager_mode() @@ -54041,8 +56089,10 @@ begin desc["T"] = tf.data_type(x_) res = tf.execute(desc) node = tf.TapeNode(reciprocal, [x_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function reciprocal(x_; name=nothing) if tf.in_eager_mode() @@ -54081,8 +56131,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(outfeed_enqueue_tuple, [inputs_], name=nothing, dtypes=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function outfeed_enqueue_tuple(inputs_; name=nothing, dtypes=nothing) if tf.in_eager_mode() @@ -54115,8 +56167,10 @@ begin tf.add_input(desc, input_) res = tf.execute(desc) node = tf.TapeNode(string_strip, [input_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function string_strip(input_; name=nothing) if tf.in_eager_mode() @@ -54169,8 +56223,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(fake_quant_with_min_max_vars_per_channel, [inputs_, min_, max_], name=nothing, num_bits=nothing, narrow_range=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function fake_quant_with_min_max_vars_per_channel(inputs_, min_, max_; name=nothing, num_bits=nothing, narrow_range=nothing) if tf.in_eager_mode() @@ -54203,8 +56259,10 @@ begin tf.add_input(desc, handle_) res = tf.execute(desc) node = tf.TapeNode(barrier_ready_size, [handle_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function barrier_ready_size(handle_; name=nothing) if tf.in_eager_mode() @@ -54243,8 +56301,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(string_to_hash_bucket, [string_tensor_], name=nothing, num_buckets=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function string_to_hash_bucket(string_tensor_; name=nothing, num_buckets=nothing) if tf.in_eager_mode() @@ -54298,8 +56358,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(tensor_array_concat, [handle_, flow_in_], name=nothing, dtype=nothing, element_shape_except0=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function tensor_array_concat(handle_, flow_in_; name=nothing, dtype=nothing, element_shape_except0=nothing) if tf.in_eager_mode() @@ -54340,8 +56402,10 @@ begin tf.add_input(desc, num_shards_) res = tf.execute(desc) node = tf.TapeNode(sharded_filename, [basename_, shard_, num_shards_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function sharded_filename(basename_, shard_, num_shards_; name=nothing) if tf.in_eager_mode() @@ -54392,8 +56456,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(py_func, [input_], name=nothing, token=nothing, Tin=nothing, Tout=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function py_func(input_; name=nothing, token=nothing, Tin=nothing, Tout=nothing) if tf.in_eager_mode() @@ -54441,8 +56507,10 @@ begin desc["Tnumsegments"] = tf.data_type(num_segments_) res = tf.execute(desc) node = tf.TapeNode(unsorted_segment_prod, [data_, segment_ids_, num_segments_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function unsorted_segment_prod(data_, segment_ids_, num_segments_; name=nothing) if tf.in_eager_mode() @@ -54483,8 +56551,10 @@ begin desc["T"] = tf.data_type(ref_) res = tf.execute(desc) node = tf.TapeNode(count_up_to, [ref_], name=nothing, limit=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function count_up_to(ref_; name=nothing, limit=nothing) if tf.in_eager_mode() @@ -54543,8 +56613,10 @@ begin desc["T"] = tf.data_type(alpha_) res = tf.execute(desc) node = tf.TapeNode(random_gamma, [shape_, alpha_], name=nothing, seed=nothing, seed2=nothing, S=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function random_gamma(shape_, alpha_; name=nothing, seed=nothing, seed2=nothing, S=nothing) if tf.in_eager_mode() @@ -54587,8 +56659,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(tensor_array_grad, [handle_, flow_in_], name=nothing, source=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function tensor_array_grad(handle_, flow_in_; name=nothing, source=nothing) if tf.in_eager_mode() @@ -54646,8 +56720,10 @@ begin desc["T"] = tf.data_type(filter_) res = tf.execute(desc) node = tf.TapeNode(dilation2d, [input_, filter_], name=nothing, strides=nothing, rates=nothing, padding=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function dilation2d(input_, filter_; name=nothing, strides=nothing, rates=nothing, padding=nothing) if tf.in_eager_mode() @@ -54708,8 +56784,10 @@ begin desc["T"] = tf.data_type(batched_tensor_) res = tf.execute(desc) node = tf.TapeNode(unbatch, [batched_tensor_, batch_index_, id_], name=nothing, timeout_micros=nothing, container=nothing, shared_name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function unbatch(batched_tensor_, batch_index_, id_; name=nothing, timeout_micros=nothing, container=nothing, shared_name=nothing) if tf.in_eager_mode() @@ -54744,8 +56822,10 @@ begin desc["T"] = tf.data_type(value_) res = tf.execute(desc) node = tf.TapeNode(get_session_handle, [value_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function get_session_handle(value_; name=nothing) if tf.in_eager_mode() @@ -54803,8 +56883,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(retrieve_tpu_embedding_adam_parameters, [], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function retrieve_tpu_embedding_adam_parameters(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) if tf.in_eager_mode() @@ -54869,8 +56951,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(mutable_hash_table_of_tensors_v2, [], name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function mutable_hash_table_of_tensors_v2(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing) if tf.in_eager_mode() @@ -54953,8 +57037,10 @@ begin desc["T"] = tf.data_type(lr_power_) res = tf.execute(desc) node = tf.TapeNode(sparse_apply_ftrl, [var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, lr_power_], name=nothing, use_locking=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function sparse_apply_ftrl(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, lr_power_; name=nothing, use_locking=nothing) if tf.in_eager_mode() @@ -55007,8 +57093,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(batch_dataset_v2, [input_dataset_, batch_size_, drop_remainder_], name=nothing, output_types=nothing, output_shapes=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function batch_dataset_v2(input_dataset_, batch_size_, drop_remainder_; name=nothing, output_types=nothing, output_shapes=nothing) if tf.in_eager_mode() @@ -55069,8 +57157,10 @@ begin desc["T"] = tf.data_type(b_values_) res = tf.execute(desc) node = tf.TapeNode(sparse_sparse_minimum, [a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_], name=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function sparse_sparse_minimum(a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_; name=nothing) if tf.in_eager_mode() @@ -55112,8 +57202,10 @@ begin desc["Tidx"] = tf.data_type(axis_) res = tf.execute(desc) node = tf.TapeNode(reverse_v2, [tensor_, axis_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function reverse_v2(tensor_, axis_; name=nothing) if tf.in_eager_mode() @@ -55233,8 +57325,10 @@ begin desc["Index"] = tf.data_type(strides_) res = tf.execute(desc) node = tf.TapeNode(strided_slice, [input_, begin_, end_, strides_], name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function strided_slice(input_, begin_, end_, strides_; name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing) if tf.in_eager_mode() @@ -55267,8 +57361,10 @@ begin tf.add_input(desc, pattern_) res = tf.execute(desc) node = tf.TapeNode(matching_files, [pattern_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function matching_files(pattern_; name=nothing) if tf.in_eager_mode() @@ -55307,8 +57403,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(encode_base64, [input_], name=nothing, pad=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function encode_base64(input_; name=nothing, pad=nothing) if tf.in_eager_mode() @@ -55353,8 +57451,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(iterator_get_next_as_optional, [iterator_], name=nothing, output_types=nothing, output_shapes=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function iterator_get_next_as_optional(iterator_; name=nothing, output_types=nothing, output_shapes=nothing) if tf.in_eager_mode() @@ -55413,8 +57513,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(padding_fifo_queue, [], name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function padding_fifo_queue(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) if tf.in_eager_mode() @@ -55447,8 +57549,10 @@ begin tf.add_input(desc, resource_handle_) res = tf.execute(desc) node = tf.TapeNode(iterator_to_string_handle, [resource_handle_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function iterator_to_string_handle(resource_handle_; name=nothing) if tf.in_eager_mode() @@ -55512,8 +57616,10 @@ begin desc["Targmax"] = tf.data_type(argmax_) res = tf.execute(desc) node = tf.TapeNode(max_pool_grad_grad_with_argmax, [input_, grad_, argmax_], name=nothing, ksize=nothing, strides=nothing, padding=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function max_pool_grad_grad_with_argmax(input_, grad_, argmax_; name=nothing, ksize=nothing, strides=nothing, padding=nothing) if tf.in_eager_mode() @@ -55560,8 +57666,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(tensor_list_gather, [input_handle_, indices_, element_shape_], name=nothing, element_dtype=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function tensor_list_gather(input_handle_, indices_, element_shape_; name=nothing, element_dtype=nothing) if tf.in_eager_mode() @@ -55618,8 +57726,10 @@ begin desc["T"] = tf.data_type(logits_) res = tf.execute(desc) node = tf.TapeNode(multinomial, [logits_, num_samples_], name=nothing, seed=nothing, seed2=nothing, output_dtype=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function multinomial(logits_, num_samples_; name=nothing, seed=nothing, seed2=nothing, output_dtype=nothing) if tf.in_eager_mode() @@ -55666,8 +57776,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(tensor_array_read, [handle_, index_, flow_in_], name=nothing, dtype=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function tensor_array_read(handle_, index_, flow_in_; name=nothing, dtype=nothing) if tf.in_eager_mode() @@ -55716,8 +57828,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(experimental_indexed_dataset_get, [materialized_, index_], name=nothing, output_types=nothing, output_shapes=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function experimental_indexed_dataset_get(materialized_, index_; name=nothing, output_types=nothing, output_shapes=nothing) if tf.in_eager_mode() @@ -55772,8 +57886,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(tpu_partitioned_call, [args_, device_ordinal_], name=nothing, Tin=nothing, Tout=nothing, f=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function tpu_partitioned_call(args_, device_ordinal_; name=nothing, Tin=nothing, Tout=nothing, f=nothing) if tf.in_eager_mode() @@ -55867,8 +57983,10 @@ begin desc["Tfilter"] = tf.data_type(filter_) res = tf.execute(desc) node = tf.TapeNode(quantized_conv2d_and_relu_and_requantize, [input_, filter_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_], name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function quantized_conv2d_and_relu_and_requantize(input_, filter_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) if tf.in_eager_mode() @@ -55913,8 +58031,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(iterator_from_string_handle_v2, [string_handle_], name=nothing, output_types=nothing, output_shapes=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function iterator_from_string_handle_v2(string_handle_; name=nothing, output_types=nothing, output_shapes=nothing) if tf.in_eager_mode() @@ -55954,8 +58074,10 @@ begin desc["T"] = tf.data_type(y_) res = tf.execute(desc) node = tf.TapeNode(bitwise_or, [x_, y_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function bitwise_or(x_, y_; name=nothing) if tf.in_eager_mode() @@ -56003,8 +58125,10 @@ begin desc["Tnumsegments"] = tf.data_type(num_segments_) res = tf.execute(desc) node = tf.TapeNode(unsorted_segment_max, [data_, segment_ids_, num_segments_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function unsorted_segment_max(data_, segment_ids_, num_segments_; name=nothing) if tf.in_eager_mode() @@ -56057,8 +58181,10 @@ begin desc["T"] = tf.data_type(y_) res = tf.execute(desc) node = tf.TapeNode(_mkl_squared_difference, [x_, y_, mkl_x_, mkl_y_], name=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function _mkl_squared_difference(x_, y_, mkl_x_, mkl_y_; name=nothing) if tf.in_eager_mode() @@ -56121,8 +58247,10 @@ begin desc["T"] = tf.data_type(out_backprop_) res = tf.execute(desc) node = tf.TapeNode(conv3d_backprop_filter, [input_, filter_, out_backprop_], name=nothing, strides=nothing, padding=nothing, dilations=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function conv3d_backprop_filter(input_, filter_, out_backprop_; name=nothing, strides=nothing, padding=nothing, dilations=nothing) if tf.in_eager_mode() @@ -56191,8 +58319,10 @@ begin desc["Tcond"] = tf.data_type(cond_) res = tf.execute(desc) node = tf.TapeNode(if_, [cond_, input_], name=nothing, Tin=nothing, Tout=nothing, then_branch=nothing, else_branch=nothing, output_shapes=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function if_(cond_, input_; name=nothing, Tin=nothing, Tout=nothing, then_branch=nothing, else_branch=nothing, output_shapes=nothing) if tf.in_eager_mode() @@ -56253,8 +58383,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(flat_map_dataset, [input_dataset_, other_arguments_], name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function flat_map_dataset(input_dataset_, other_arguments_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) if tf.in_eager_mode() @@ -56311,8 +58443,10 @@ begin desc["shape_type"] = tf.data_type(element_shape_) res = tf.execute(desc) node = tf.TapeNode(tensor_list_scatter, [tensor_, indices_, element_shape_], name=nothing, element_dtype=nothing, shape_type=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function tensor_list_scatter(tensor_, indices_, element_shape_; name=nothing, element_dtype=nothing, shape_type=nothing) if tf.in_eager_mode() @@ -56352,8 +58486,10 @@ begin desc["T"] = tf.data_type(features_) res = tf.execute(desc) node = tf.TapeNode(softsign_grad, [gradients_, features_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function softsign_grad(gradients_, features_; name=nothing) if tf.in_eager_mode() @@ -56400,8 +58536,10 @@ begin desc["T"] = tf.data_type(input_) res = tf.execute(desc) node = tf.TapeNode(copy_host, [input_], name=nothing, tensor_name=nothing, debug_ops_spec=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function copy_host(input_; name=nothing, tensor_name=nothing, debug_ops_spec=nothing) if tf.in_eager_mode() @@ -56448,8 +58586,10 @@ begin desc["Tidx"] = tf.data_type(num_) res = tf.execute(desc) node = tf.TapeNode(lin_space, [start_, stop_, num_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function lin_space(start_, stop_, num_; name=nothing) if tf.in_eager_mode() @@ -56495,8 +58635,10 @@ begin desc["T"] = tf.data_type(update_) res = tf.execute(desc) node = tf.TapeNode(_parallel_concat_update, [value_, update_], name=nothing, loc=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function _parallel_concat_update(value_, update_; name=nothing, loc=nothing) if tf.in_eager_mode() @@ -56537,8 +58679,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(stack, [], name=nothing, elem_type=nothing, stack_name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function stack(; name=nothing, elem_type=nothing, stack_name=nothing) if tf.in_eager_mode() @@ -56583,8 +58727,10 @@ begin desc["T"] = tf.data_type(elem_) res = tf.execute(desc) node = tf.TapeNode(stack_push_v2, [handle_, elem_], name=nothing, swap_memory=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function stack_push_v2(handle_, elem_; name=nothing, swap_memory=nothing) if tf.in_eager_mode() @@ -56629,8 +58775,10 @@ begin desc["dtype"] = tf.data_type(value_) res = tf.execute(desc) node = tf.TapeNode(assign_variable_op, [resource_, value_], name=nothing, dtype=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function assign_variable_op(resource_, value_; name=nothing, dtype=nothing) if tf.in_eager_mode() @@ -56689,8 +58837,10 @@ begin desc["T"] = tf.data_type(values_) res = tf.execute(desc) node = tf.TapeNode(sparse_split, [split_dim_, indices_, values_, shape_], name=nothing, num_split=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function sparse_split(split_dim_, indices_, values_, shape_; name=nothing, num_split=nothing) if tf.in_eager_mode() @@ -56733,8 +58883,10 @@ begin desc["T"] = tf.data_type(value_) res = tf.execute(desc) node = tf.TapeNode(tensor_array_unpack, [handle_, value_, flow_in_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function tensor_array_unpack(handle_, value_, flow_in_; name=nothing) if tf.in_eager_mode() @@ -56783,8 +58935,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(tensor_list_stack, [input_handle_, element_shape_], name=nothing, element_dtype=nothing, num_elements=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function tensor_list_stack(input_handle_, element_shape_; name=nothing, element_dtype=nothing, num_elements=nothing) if tf.in_eager_mode() @@ -56817,8 +58971,10 @@ begin tf.add_input(desc, handle_) res = tf.execute(desc) node = tf.TapeNode(barrier_incomplete_size, [handle_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function barrier_incomplete_size(handle_; name=nothing) if tf.in_eager_mode() @@ -56867,8 +59023,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(restore, [file_pattern_, tensor_name_], name=nothing, dt=nothing, preferred_shard=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function restore(file_pattern_, tensor_name_; name=nothing, dt=nothing, preferred_shard=nothing) if tf.in_eager_mode() @@ -56942,8 +59100,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(tensor_array_v3, [size_], name=nothing, dtype=nothing, element_shape=nothing, dynamic_size=nothing, clear_after_read=nothing, identical_element_shapes=nothing, tensor_array_name=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function tensor_array_v3(size_; name=nothing, dtype=nothing, element_shape=nothing, dynamic_size=nothing, clear_after_read=nothing, identical_element_shapes=nothing, tensor_array_name=nothing) if tf.in_eager_mode() @@ -56992,8 +59152,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(experimental_assert_next_dataset, [input_dataset_, transformations_], name=nothing, output_types=nothing, output_shapes=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function experimental_assert_next_dataset(input_dataset_, transformations_; name=nothing, output_types=nothing, output_shapes=nothing) if tf.in_eager_mode() @@ -57038,8 +59200,10 @@ begin desc["T"] = tf.data_type(targets_) res = tf.execute(desc) node = tf.TapeNode(in_top_k, [predictions_, targets_], name=nothing, k=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function in_top_k(predictions_, targets_; name=nothing, k=nothing) if tf.in_eager_mode() @@ -57092,8 +59256,10 @@ begin desc["T"] = tf.data_type(updates_) res = tf.execute(desc) node = tf.TapeNode(scatter_sub, [ref_, indices_, updates_], name=nothing, use_locking=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function scatter_sub(ref_, indices_, updates_; name=nothing, use_locking=nothing) if tf.in_eager_mode() @@ -57128,8 +59294,10 @@ begin desc["T"] = tf.data_type(x_) res = tf.execute(desc) node = tf.TapeNode(acosh, [x_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function acosh(x_; name=nothing) if tf.in_eager_mode() @@ -57197,8 +59365,10 @@ begin desc["T"] = tf.data_type(out_backprop_) res = tf.execute(desc) node = tf.TapeNode(depthwise_conv2d_native_backprop_filter, [input_, filter_sizes_, out_backprop_], name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function depthwise_conv2d_native_backprop_filter(input_, filter_sizes_, out_backprop_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) if tf.in_eager_mode() @@ -57251,8 +59421,10 @@ begin desc["SrcT"] = tf.data_type(x_) res = tf.execute(desc) node = tf.TapeNode(cast, [x_], name=nothing, SrcT=nothing, DstT=nothing, Truncate=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function cast(x_; name=nothing, SrcT=nothing, DstT=nothing, Truncate=nothing) if tf.in_eager_mode() @@ -57310,8 +59482,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(quantize_v2, [input_, min_range_, max_range_], name=nothing, mode=nothing, round_mode=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function quantize_v2(input_, min_range_, max_range_; name=nothing, mode=nothing, round_mode=nothing) if tf.in_eager_mode() @@ -57400,8 +59574,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(generator_dataset, [init_func_other_args_, next_func_other_args_, finalize_func_other_args_], name=nothing, init_func=nothing, next_func=nothing, finalize_func=nothing, Tinit_func_args=nothing, Tnext_func_args=nothing, Tfinalize_func_args=nothing, output_types=nothing, output_shapes=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function generator_dataset(init_func_other_args_, next_func_other_args_, finalize_func_other_args_; name=nothing, init_func=nothing, next_func=nothing, finalize_func=nothing, Tinit_func_args=nothing, Tnext_func_args=nothing, Tfinalize_func_args=nothing, output_types=nothing, output_shapes=nothing) if tf.in_eager_mode() @@ -57434,8 +59610,10 @@ begin tf.add_input(desc, tree_handle_) res = tf.execute(desc) node = tf.TapeNode(tensor_forest_tree_serialize, [tree_handle_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function tensor_forest_tree_serialize(tree_handle_; name=nothing) if tf.in_eager_mode() @@ -57475,8 +59653,10 @@ begin desc["T"] = tf.data_type(x2_) res = tf.execute(desc) node = tf.TapeNode(next_after, [x1_, x2_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function next_after(x1_, x2_; name=nothing) if tf.in_eager_mode() @@ -57509,8 +59689,10 @@ begin tf.add_input(desc, handle_) res = tf.execute(desc) node = tf.TapeNode(tensor_array_close_v2, [handle_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function tensor_array_close_v2(handle_; name=nothing) if tf.in_eager_mode() @@ -57587,8 +59769,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(big_query_reader, [], name=nothing, container=nothing, shared_name=nothing, project_id=nothing, dataset_id=nothing, table_id=nothing, columns=nothing, timestamp_millis=nothing, test_end_point=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function big_query_reader(; name=nothing, container=nothing, shared_name=nothing, project_id=nothing, dataset_id=nothing, table_id=nothing, columns=nothing, timestamp_millis=nothing, test_end_point=nothing) if tf.in_eager_mode() @@ -57630,8 +59814,10 @@ begin tf.add_input(desc, queue_handle_) res = tf.execute(desc) node = tf.TapeNode(reader_read_v2, [reader_handle_, queue_handle_], name=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function reader_read_v2(reader_handle_, queue_handle_; name=nothing) if tf.in_eager_mode() @@ -57671,8 +59857,10 @@ begin desc["T"] = tf.data_type(y_) res = tf.execute(desc) node = tf.TapeNode(mod, [x_, y_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function mod(x_, y_; name=nothing) if tf.in_eager_mode() @@ -57712,8 +59900,10 @@ begin desc["T"] = tf.data_type(y_) res = tf.execute(desc) node = tf.TapeNode(add_v2, [x_, y_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function add_v2(x_, y_; name=nothing) if tf.in_eager_mode() @@ -57760,8 +59950,10 @@ begin desc["Tseed"] = tf.data_type(seed_) res = tf.execute(desc) node = tf.TapeNode(stateless_random_normal, [shape_, seed_], name=nothing, dtype=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function stateless_random_normal(shape_, seed_; name=nothing, dtype=nothing) if tf.in_eager_mode() @@ -57886,8 +60078,10 @@ begin desc["T"] = tf.data_type(value_) res = tf.execute(desc) node = tf.TapeNode(strided_slice_assign, [ref_, begin_, end_, strides_, value_], name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function strided_slice_assign(ref_, begin_, end_, strides_, value_; name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing) if tf.in_eager_mode() @@ -57940,8 +60134,10 @@ begin desc["T"] = tf.data_type(updates_) res = tf.execute(desc) node = tf.TapeNode(scatter_min, [ref_, indices_, updates_], name=nothing, use_locking=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function scatter_min(ref_, indices_, updates_; name=nothing, use_locking=nothing) if tf.in_eager_mode() @@ -58065,8 +60261,10 @@ begin desc["T"] = tf.data_type(value_) res = tf.execute(desc) node = tf.TapeNode(resource_strided_slice_assign, [ref_, begin_, end_, strides_, value_], name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function resource_strided_slice_assign(ref_, begin_, end_, strides_, value_; name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing) if tf.in_eager_mode() @@ -58106,8 +60304,10 @@ begin desc["T"] = tf.data_type(sample_) res = tf.execute(desc) node = tf.TapeNode(random_gamma_grad, [alpha_, sample_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function random_gamma_grad(alpha_, sample_; name=nothing) if tf.in_eager_mode() @@ -58179,8 +60379,10 @@ begin desc["T"] = tf.data_type(momentum_) res = tf.execute(desc) node = tf.TapeNode(resource_sparse_apply_keras_momentum, [var_, accum_, lr_, grad_, indices_, momentum_], name=nothing, use_locking=nothing, use_nesterov=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function resource_sparse_apply_keras_momentum(var_, accum_, lr_, grad_, indices_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) if tf.in_eager_mode() @@ -58227,8 +60429,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(boosted_trees_create_quantile_stream_resource, [quantile_stream_resource_handle_, epsilon_, num_streams_], name=nothing, max_elements=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function boosted_trees_create_quantile_stream_resource(quantile_stream_resource_handle_, epsilon_, num_streams_; name=nothing, max_elements=nothing) if tf.in_eager_mode() @@ -58282,8 +60486,10 @@ begin desc["Tinput"] = tf.data_type(features_) res = tf.execute(desc) node = tf.TapeNode(quantized_relu6, [features_, min_features_, max_features_], name=nothing, out_type=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function quantized_relu6(features_, min_features_, max_features_; name=nothing, out_type=nothing) if tf.in_eager_mode() @@ -58344,8 +60550,10 @@ begin desc["T"] = tf.data_type(b_values_) res = tf.execute(desc) node = tf.TapeNode(sparse_sparse_maximum, [a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_], name=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function sparse_sparse_maximum(a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_; name=nothing) if tf.in_eager_mode() @@ -58412,8 +60620,10 @@ begin desc["T"] = tf.data_type(gamma_) res = tf.execute(desc) node = tf.TapeNode(batch_norm_with_global_normalization, [t_, m_, v_, beta_, gamma_], name=nothing, variance_epsilon=nothing, scale_after_normalization=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function batch_norm_with_global_normalization(t_, m_, v_, beta_, gamma_; name=nothing, variance_epsilon=nothing, scale_after_normalization=nothing) if tf.in_eager_mode() @@ -58457,8 +60667,10 @@ begin desc["T"] = tf.data_type(k_) res = tf.execute(desc) node = tf.TapeNode(in_top_kv2, [predictions_, targets_, k_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function in_top_kv2(predictions_, targets_, k_; name=nothing) if tf.in_eager_mode() @@ -58493,8 +60705,10 @@ begin desc["T"] = tf.data_type(input_) res = tf.execute(desc) node = tf.TapeNode(cholesky, [input_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function cholesky(input_; name=nothing) if tf.in_eager_mode() @@ -58571,8 +60785,10 @@ begin desc["T"] = tf.data_type(grad_) res = tf.execute(desc) node = tf.TapeNode(resource_apply_centered_rms_prop, [var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_], name=nothing, use_locking=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function resource_apply_centered_rms_prop(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=nothing, use_locking=nothing) if tf.in_eager_mode() @@ -58632,8 +60848,10 @@ begin desc["T"] = tf.data_type(grad_) res = tf.execute(desc) node = tf.TapeNode(resource_apply_adagrad, [var_, accum_, lr_, grad_], name=nothing, use_locking=nothing, update_slots=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function resource_apply_adagrad(var_, accum_, lr_, grad_; name=nothing, use_locking=nothing, update_slots=nothing) if tf.in_eager_mode() @@ -58714,8 +60932,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(experimental_parallel_interleave_dataset, [input_dataset_, other_arguments_, cycle_length_, block_length_, sloppy_, buffer_output_elements_, prefetch_input_elements_], name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function experimental_parallel_interleave_dataset(input_dataset_, other_arguments_, cycle_length_, block_length_, sloppy_, buffer_output_elements_, prefetch_input_elements_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) if tf.in_eager_mode() @@ -58760,8 +60980,10 @@ begin desc["T"] = tf.data_type(original_image_) res = tf.execute(desc) node = tf.TapeNode(resize_bicubic_grad, [grads_, original_image_], name=nothing, align_corners=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function resize_bicubic_grad(grads_, original_image_; name=nothing, align_corners=nothing) if tf.in_eager_mode() @@ -58796,8 +61018,10 @@ begin desc["T"] = tf.data_type(input_) res = tf.execute(desc) node = tf.TapeNode(batch_self_adjoint_eig, [input_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function batch_self_adjoint_eig(input_; name=nothing) if tf.in_eager_mode() @@ -58840,8 +61064,10 @@ begin desc["T"] = tf.data_type(sp_values_) res = tf.execute(desc) node = tf.TapeNode(sparse_softmax, [sp_indices_, sp_values_, sp_shape_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function sparse_softmax(sp_indices_, sp_values_, sp_shape_; name=nothing) if tf.in_eager_mode() @@ -58876,8 +61102,10 @@ begin desc["T"] = tf.data_type(x_) res = tf.execute(desc) node = tf.TapeNode(asinh, [x_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function asinh(x_; name=nothing) if tf.in_eager_mode() @@ -58963,8 +61191,10 @@ begin desc["Tfilter"] = tf.data_type(filter_) res = tf.execute(desc) node = tf.TapeNode(quantized_conv2d_and_relu, [input_, filter_, min_input_, max_input_, min_filter_, max_filter_], name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function quantized_conv2d_and_relu(input_, filter_, min_input_, max_input_, min_filter_, max_filter_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) if tf.in_eager_mode() @@ -59005,8 +61235,10 @@ begin desc["T"] = tf.data_type(input_) res = tf.execute(desc) node = tf.TapeNode(matrix_inverse, [input_], name=nothing, adjoint=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function matrix_inverse(input_; name=nothing, adjoint=nothing) if tf.in_eager_mode() @@ -59049,8 +61281,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(tensor_list_concat_lists, [input_a_, input_b_], name=nothing, element_dtype=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function tensor_list_concat_lists(input_a_, input_b_; name=nothing, element_dtype=nothing) if tf.in_eager_mode() @@ -59112,8 +61346,10 @@ begin desc["Tinput"] = tf.data_type(input_) res = tf.execute(desc) node = tf.TapeNode(requantize, [input_, input_min_, input_max_, requested_output_min_, requested_output_max_], name=nothing, out_type=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function requantize(input_, input_min_, input_max_, requested_output_min_, requested_output_max_; name=nothing, out_type=nothing) if tf.in_eager_mode() @@ -59148,8 +61384,10 @@ begin desc["Tcomplex"] = tf.data_type(input_) res = tf.execute(desc) node = tf.TapeNode(fft, [input_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function fft(input_; name=nothing) if tf.in_eager_mode() @@ -59190,8 +61428,10 @@ begin desc["Tperm"] = tf.data_type(perm_) res = tf.execute(desc) node = tf.TapeNode(conjugate_transpose, [x_, perm_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function conjugate_transpose(x_, perm_; name=nothing) if tf.in_eager_mode() @@ -59250,8 +61490,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(unstage, [], name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function unstage(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) if tf.in_eager_mode() @@ -59291,8 +61533,10 @@ begin desc["T"] = tf.data_type(features_) res = tf.execute(desc) node = tf.TapeNode(relu6grad, [gradients_, features_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function relu6grad(gradients_, features_; name=nothing) if tf.in_eager_mode() @@ -59346,8 +61590,10 @@ begin desc["T"] = tf.data_type(original_image_) res = tf.execute(desc) node = tf.TapeNode(scale_and_translate_grad, [grads_, original_image_, scale_, translation_], name=nothing, kernel_type=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function scale_and_translate_grad(grads_, original_image_, scale_, translation_; name=nothing, kernel_type=nothing) if tf.in_eager_mode() @@ -59394,8 +61640,10 @@ begin desc["T"] = tf.data_type(input_) res = tf.execute(desc) node = tf.TapeNode(_array_to_list, [input_], name=nothing, N=nothing, out_types=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function _array_to_list(input_; name=nothing, N=nothing, out_types=nothing) if tf.in_eager_mode() @@ -59496,8 +61744,10 @@ begin desc["T"] = tf.data_type(params_) res = tf.execute(desc) node = tf.TapeNode(cudnn_rnnv3, [input_, input_h_, input_c_, params_, sequence_lengths_], name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing, is_training=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function cudnn_rnnv3(input_, input_h_, input_c_, params_, sequence_lengths_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing, is_training=nothing) if tf.in_eager_mode() @@ -59539,8 +61789,10 @@ begin desc["Tdim"] = tf.data_type(dim_) res = tf.execute(desc) node = tf.TapeNode(expand_dims, [input_, dim_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function expand_dims(input_, dim_; name=nothing) if tf.in_eager_mode() @@ -59580,8 +61832,10 @@ begin desc["T"] = tf.data_type(dy_) res = tf.execute(desc) node = tf.TapeNode(inv_grad, [y_, dy_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function inv_grad(y_, dy_; name=nothing) if tf.in_eager_mode() @@ -59628,8 +61882,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(non_max_suppression, [boxes_, scores_, max_output_size_], name=nothing, iou_threshold=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function non_max_suppression(boxes_, scores_, max_output_size_; name=nothing, iou_threshold=nothing) if tf.in_eager_mode() @@ -59664,8 +61920,10 @@ begin desc["T"] = tf.data_type(t_) res = tf.execute(desc) node = tf.TapeNode(l2loss, [t_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function l2loss(t_; name=nothing) if tf.in_eager_mode() @@ -59710,8 +61968,10 @@ begin desc["T"] = tf.data_type(images_) res = tf.execute(desc) node = tf.TapeNode(resize_area, [images_, size_], name=nothing, align_corners=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function resize_area(images_, size_; name=nothing, align_corners=nothing) if tf.in_eager_mode() @@ -59809,8 +62069,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(sparse_cross, [indices_, values_, shapes_, dense_inputs_], name=nothing, N=nothing, hashed_output=nothing, num_buckets=nothing, hash_key=nothing, sparse_types=nothing, dense_types=nothing, out_type=nothing, internal_type=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function sparse_cross(indices_, values_, shapes_, dense_inputs_; name=nothing, N=nothing, hashed_output=nothing, num_buckets=nothing, hash_key=nothing, sparse_types=nothing, dense_types=nothing, out_type=nothing, internal_type=nothing) if tf.in_eager_mode() @@ -59843,8 +62105,10 @@ begin tf.add_input(desc, input_) res = tf.execute(desc) node = tf.TapeNode(batch_fft3d, [input_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function batch_fft3d(input_; name=nothing) if tf.in_eager_mode() @@ -59897,8 +62161,10 @@ begin desc["T"] = tf.data_type(shape_) res = tf.execute(desc) node = tf.TapeNode(random_standard_normal, [shape_], name=nothing, seed=nothing, seed2=nothing, dtype=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function random_standard_normal(shape_; name=nothing, seed=nothing, seed2=nothing, dtype=nothing) if tf.in_eager_mode() @@ -59950,8 +62216,10 @@ begin desc["dtype"] = tf.data_type(updates_) res = tf.execute(desc) node = tf.TapeNode(resource_scatter_mul, [resource_, indices_, updates_], name=nothing, dtype=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function resource_scatter_mul(resource_, indices_, updates_; name=nothing, dtype=nothing) if tf.in_eager_mode() @@ -60079,8 +62347,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(sdca_optimizer, [sparse_example_indices_, sparse_feature_indices_, sparse_feature_values_, dense_features_, example_weights_, example_labels_, sparse_indices_, sparse_weights_, dense_weights_, example_state_data_], name=nothing, loss_type=nothing, adaptative=nothing, num_sparse_features=nothing, num_sparse_features_with_values=nothing, num_dense_features=nothing, l1=nothing, l2=nothing, num_loss_partitions=nothing, num_inner_iterations=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function sdca_optimizer(sparse_example_indices_, sparse_feature_indices_, sparse_feature_values_, dense_features_, example_weights_, example_labels_, sparse_indices_, sparse_weights_, dense_weights_, example_state_data_; name=nothing, loss_type=nothing, adaptative=nothing, num_sparse_features=nothing, num_sparse_features_with_values=nothing, num_dense_features=nothing, l1=nothing, l2=nothing, num_loss_partitions=nothing, num_inner_iterations=nothing) if tf.in_eager_mode() @@ -60120,8 +62390,10 @@ begin desc["T"] = tf.data_type(q_) res = tf.execute(desc) node = tf.TapeNode(zeta, [x_, q_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function zeta(x_, q_; name=nothing) if tf.in_eager_mode() @@ -60207,8 +62479,10 @@ begin desc["T"] = tf.data_type(image_size_) res = tf.execute(desc) node = tf.TapeNode(sample_distorted_bounding_box, [image_size_, bounding_boxes_], name=nothing, seed=nothing, seed2=nothing, min_object_covered=nothing, aspect_ratio_range=nothing, area_range=nothing, max_attempts=nothing, use_image_if_no_bounding_boxes=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function sample_distorted_bounding_box(image_size_, bounding_boxes_; name=nothing, seed=nothing, seed2=nothing, min_object_covered=nothing, aspect_ratio_range=nothing, area_range=nothing, max_attempts=nothing, use_image_if_no_bounding_boxes=nothing) if tf.in_eager_mode() @@ -60248,8 +62522,10 @@ begin desc["T"] = tf.data_type(x_) res = tf.execute(desc) node = tf.TapeNode(igamma_grad_a, [a_, x_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function igamma_grad_a(a_, x_; name=nothing) if tf.in_eager_mode() @@ -60291,8 +62567,10 @@ begin desc["Tindices"] = tf.data_type(segment_ids_) res = tf.execute(desc) node = tf.TapeNode(segment_max, [data_, segment_ids_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function segment_max(data_, segment_ids_; name=nothing) if tf.in_eager_mode() @@ -60337,8 +62615,10 @@ begin desc["Tidx"] = tf.data_type(delta_) res = tf.execute(desc) node = tf.TapeNode(range, [start_, limit_, delta_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function range(start_, limit_, delta_; name=nothing) if tf.in_eager_mode() @@ -60396,8 +62676,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(retrieve_tpu_embedding_momentum_parameters_grad_accum_debug, [], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function retrieve_tpu_embedding_momentum_parameters_grad_accum_debug(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) if tf.in_eager_mode() @@ -60430,8 +62712,10 @@ begin tf.add_input(desc, writer_) res = tf.execute(desc) node = tf.TapeNode(flush_summary_writer, [writer_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function flush_summary_writer(writer_; name=nothing) if tf.in_eager_mode() @@ -60480,8 +62764,10 @@ begin desc["T"] = tf.data_type(input_) res = tf.execute(desc) node = tf.TapeNode(dequantize, [input_, min_range_, max_range_], name=nothing, mode=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function dequantize(input_, min_range_, max_range_; name=nothing, mode=nothing) if tf.in_eager_mode() @@ -60525,8 +62811,10 @@ begin desc["T"] = tf.data_type(grad_values_) res = tf.execute(desc) node = tf.TapeNode(sparse_fill_empty_rows_grad, [reverse_index_map_, grad_values_], name=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function sparse_fill_empty_rows_grad(reverse_index_map_, grad_values_; name=nothing) if tf.in_eager_mode() @@ -60571,8 +62859,10 @@ begin end res = tf.execute(desc) node = tf.TapeNode(iterator_get_next, [iterator_], name=nothing, output_types=nothing, output_shapes=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function iterator_get_next(iterator_; name=nothing, output_types=nothing, output_shapes=nothing) if tf.in_eager_mode() @@ -60625,8 +62915,10 @@ begin desc["T"] = tf.data_type(b_) res = tf.execute(desc) node = tf.TapeNode(sparse_tensor_dense_add, [a_indices_, a_values_, a_shape_, b_], name=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function sparse_tensor_dense_add(a_indices_, a_values_, a_shape_, b_; name=nothing) if tf.in_eager_mode() @@ -60667,8 +62959,10 @@ begin desc["T"] = tf.data_type(input_) res = tf.execute(desc) node = tf.TapeNode(prevent_gradient, [input_], name=nothing, message=nothing, res) - tf.add_node(res[1], node) - return res[1] + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end function prevent_gradient(input_; name=nothing, message=nothing) if tf.in_eager_mode() @@ -60706,8 +63000,10 @@ begin tf.add_input(desc, table_handle_) res = tf.execute(desc) node = tf.TapeNode(lookup_table_export, [table_handle_], name=nothing, res) - tf.add_node(res[1], node) - return res + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end function lookup_table_export(table_handle_; name=nothing) if tf.in_eager_mode() diff --git a/src/tape.jl b/src/tape.jl index fa29f6a2..37f92c0c 100644 --- a/src/tape.jl +++ b/src/tape.jl @@ -3,8 +3,8 @@ import MacroTools: splitdef, combinedef mutable struct TapeNode op::Function - args::Vector{TensorHandle} - results::Vector{TensorHandle} + args::Vector + results::Vector kwargs::Dict end @@ -17,17 +17,22 @@ end Tape() = Tape(Dict{TensorHandle, TapeNode}()) -tape = nothing - function set_tape(new_tape=nothing) if new_tape === nothing new_tape = Tape() end - global tape = new_tape - return tape + context = Context() + context.attrs["tape"] = new_tape + push!(global_context, context) + return new_tape +end + +function get_tape() + return global_context["tape"] end function add_node(t, node) + tape = get_tape() tape === nothing && return tape.nodes[t] = node end @@ -63,10 +68,9 @@ end) end) function with_no_grad(f) - old_tape = tape - global tape = nothing - res = f() - global tape = old_tape + context = Context() + context.attrs["tape"] = nothing + res = with_context(context, f) return res end From 323b6a30a1dbdee4e66c64da09f6841a96a85c65 Mon Sep 17 00:00:00 2001 From: Jon Malmaud Date: Tue, 26 Feb 2019 21:32:41 -0500 Subject: [PATCH 28/49] Clear tape --- examples/keras.jl | 4 ++-- src/tape.jl | 5 +++-- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/examples/keras.jl b/examples/keras.jl index 64019f67..59a5b1e7 100644 --- a/examples/keras.jl +++ b/examples/keras.jl @@ -8,5 +8,5 @@ tf.add(m, tf.Dense(10, 3)) x=constant(randn(5,3)) y=3x+5 -tf.compile(m, optimizer=tf.SGD(lr=1e-3), loss=tf.mse) -tf.fit(m, x, y, n_epochs=1000) +tf.compile(m, optimizer=tf.SGD(lr=1e-4), loss=tf.mse) +tf.fit(m, x, y, n_epochs=200) diff --git a/src/tape.jl b/src/tape.jl index 37f92c0c..f2ace4f7 100644 --- a/src/tape.jl +++ b/src/tape.jl @@ -8,14 +8,14 @@ mutable struct TapeNode kwargs::Dict end - TapeNode(op, args, results; kwargs...) = TapeNode(op, args, results, kwargs) mutable struct Tape nodes::Dict{TensorHandle, TapeNode} + attrs::Dict end -Tape() = Tape(Dict{TensorHandle, TapeNode}()) +Tape(;kwargs...) = Tape(Dict{TensorHandle, TapeNode}(), Dict(kwargs...)) function set_tape(new_tape=nothing) if new_tape === nothing @@ -162,6 +162,7 @@ end function grad(tape, tensor, in_tensors::AbstractArray, out_grad=constant(1.0)) grads = Dict() _grad(tape, tensor, out_grad, grads) + get(tape.attrs, "preserve", false) || empty!(tape.nodes) return [get(grads, tensor, nothing) for tensor in in_tensors] end From cc1dd39bb511206d4233cb22011ae61645b240a2 Mon Sep 17 00:00:00 2001 From: Jon Malmaud Date: Mon, 4 Mar 2019 16:28:26 -0500 Subject: [PATCH 29/49] Add CRC32 dep --- Project.toml | 1 + src/summary_writer.jl | 1 - 2 files changed, 1 insertion(+), 1 deletion(-) diff --git a/Project.toml b/Project.toml index 5962271e..12abda20 100644 --- a/Project.toml +++ b/Project.toml @@ -5,6 +5,7 @@ version = "0.12.0" [deps] AutoHashEquals = "15f4f7f2-30c1-5605-9d31-71845cf9641f" +CRC32c = "8bf52ea8-c179-5cab-976a-9e18b702a9bc" Compat = "34da2185-b29b-5c13-b0c7-acf172513d20" Conda = "8f4d0f93-b110-5947-807f-2305c1781a2d" Dates = "ade2ca70-3891-5945-98fb-dc099432e06a" diff --git a/src/summary_writer.jl b/src/summary_writer.jl index e325806b..8bd2a0ae 100644 --- a/src/summary_writer.jl +++ b/src/summary_writer.jl @@ -62,7 +62,6 @@ function Base.write(writer::FileWriter, event::tensorflow.Event) write(buffer, proto_length) seekstart(buffer) proto_length_bytes = read(buffer) - proto_length_bytes_rev = reverse(proto_length_bytes) write(file, proto_length_bytes) write(file, masked_crc(proto_length_bytes)) write(file, proto) From a555ff82926f5685dedb6c08516b7d909311b264 Mon Sep 17 00:00:00 2001 From: Jon Malmaud Date: Mon, 4 Mar 2019 16:37:33 -0500 Subject: [PATCH 30/49] Disable eager by default --- src/eager.jl | 9 +- src/generate_ops.jl | 1 + src/ops/imported_ops.jl | 2299 ++++++++++++++++++++------------------- 3 files changed, 1159 insertions(+), 1150 deletions(-) diff --git a/src/eager.jl b/src/eager.jl index 544301f1..17b4cf0f 100644 --- a/src/eager.jl +++ b/src/eager.jl @@ -347,10 +347,17 @@ end function default_context() context = Context() - context.attrs["eager"] = true + context.attrs["eager"] = false return context end +function enable_eager_execution() + context = Context() + context.attrs["eager"] = true + push!(global_context, context) + return nothing +end + function Base.getindex(c::ContextStack, name) value = nothing for context in c.contexts diff --git a/src/generate_ops.jl b/src/generate_ops.jl index 25cf9cda..c2676ecb 100644 --- a/src/generate_ops.jl +++ b/src/generate_ops.jl @@ -359,6 +359,7 @@ function import_ops(op_names) module Ops import TensorFlow const tf = TensorFlow + import TensorFlow: Tensor """) for name in op_names op = ops[name] diff --git a/src/ops/imported_ops.jl b/src/ops/imported_ops.jl index 0cab1070..cd6538ca 100644 --- a/src/ops/imported_ops.jl +++ b/src/ops/imported_ops.jl @@ -1,15 +1,16 @@ -# Autogenerated on 2019-02-26T20:52:08.497 +# Autogenerated on 2019-03-04T16:35:07.066 module Ops import TensorFlow const tf = TensorFlow +import TensorFlow: Tensor """ reduce_join(inputs, reduction_indices; keep_dims=false, separator=) """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function reduce_join_graph(inputs_, reduction_indices_; name=nothing, keep_dims=nothing, separator=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function reduce_join_graph(inputs_, reduction_indices_; name=nothing, keep_dims=nothing, separator=nothing) local desc tf.with_op_name(name, "ReduceJoin") do desc = tf.NodeDescription("ReduceJoin") @@ -61,7 +62,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function reduce_dataset_graph(input_dataset_, initial_state_, other_arguments_; name=nothing, f=nothing, Tstate=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function reduce_dataset_graph(input_dataset_, initial_state_, other_arguments_; name=nothing, f=nothing, Tstate=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing) local desc tf.with_op_name(name, "ReduceDataset") do desc = tf.NodeDescription("ReduceDataset") @@ -141,7 +142,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_list_from_tensor_graph(tensor_, element_shape_; name=nothing, element_dtype=nothing, shape_type=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_list_from_tensor_graph(tensor_, element_shape_; name=nothing, element_dtype=nothing, shape_type=nothing) local desc tf.with_op_name(name, "TensorListFromTensor") do desc = tf.NodeDescription("TensorListFromTensor") @@ -197,7 +198,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function extract_jpeg_shape_graph(contents_; name=nothing, output_type=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function extract_jpeg_shape_graph(contents_; name=nothing, output_type=nothing) local desc tf.with_op_name(name, "ExtractJpegShape") do desc = tf.NodeDescription("ExtractJpegShape") @@ -239,7 +240,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function svd_graph(input_; name=nothing, compute_uv=nothing, full_matrices=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function svd_graph(input_; name=nothing, compute_uv=nothing, full_matrices=nothing) local desc tf.with_op_name(name, "Svd") do desc = tf.NodeDescription("Svd") @@ -294,7 +295,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function iterator_get_next_sync_graph(iterator_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function iterator_get_next_sync_graph(iterator_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "IteratorGetNextSync") do desc = tf.NodeDescription("IteratorGetNextSync") @@ -342,7 +343,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ref_enter_graph(data_; name=nothing, frame_name=nothing, is_constant=nothing, parallel_iterations=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ref_enter_graph(data_; name=nothing, frame_name=nothing, is_constant=nothing, parallel_iterations=nothing) local desc tf.with_op_name(name, "RefEnter") do desc = tf.NodeDescription("RefEnter") @@ -398,7 +399,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function erf_graph(x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function erf_graph(x_; name=nothing) local desc tf.with_op_name(name, "Erf") do desc = tf.NodeDescription("Erf") @@ -436,7 +437,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function lookup_table_export_v2_graph(table_handle_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function lookup_table_export_v2_graph(table_handle_; name=nothing) local desc tf.with_op_name(name, "LookupTableExportV2") do desc = tf.NodeDescription("LookupTableExportV2") @@ -477,7 +478,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function round_graph(x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function round_graph(x_; name=nothing) local desc tf.with_op_name(name, "Round") do desc = tf.NodeDescription("Round") @@ -515,7 +516,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function outfeed_dequeue_graph(; name=nothing, dtype=nothing, shape=nothing, device_ordinal=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function outfeed_dequeue_graph(; name=nothing, dtype=nothing, shape=nothing, device_ordinal=nothing) local desc tf.with_op_name(name, "OutfeedDequeue") do desc = tf.NodeDescription("OutfeedDequeue") @@ -565,7 +566,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_forest_tree_is_initialized_op_graph(tree_handle_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_forest_tree_is_initialized_op_graph(tree_handle_; name=nothing) local desc tf.with_op_name(name, "TensorForestTreeIsInitializedOp") do desc = tf.NodeDescription("TensorForestTreeIsInitializedOp") @@ -601,7 +602,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function merge_graph(inputs_; name=nothing, N=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function merge_graph(inputs_; name=nothing, N=nothing) local desc tf.with_op_name(name, "Merge") do desc = tf.NodeDescription("Merge") @@ -650,7 +651,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function histogram_fixed_width_graph(values_, value_range_, nbins_; name=nothing, dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function histogram_fixed_width_graph(values_, value_range_, nbins_; name=nothing, dtype=nothing) local desc tf.with_op_name(name, "HistogramFixedWidth") do desc = tf.NodeDescription("HistogramFixedWidth") @@ -703,7 +704,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function asin_graph(x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function asin_graph(x_; name=nothing) local desc tf.with_op_name(name, "Asin") do desc = tf.NodeDescription("Asin") @@ -741,7 +742,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function any_graph(input_, reduction_indices_; name=nothing, keep_dims=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function any_graph(input_, reduction_indices_; name=nothing, keep_dims=nothing) local desc tf.with_op_name(name, "Any") do desc = tf.NodeDescription("Any") @@ -790,7 +791,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function rsqrt_grad_graph(y_, dy_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function rsqrt_grad_graph(y_, dy_; name=nothing) local desc tf.with_op_name(name, "RsqrtGrad") do desc = tf.NodeDescription("RsqrtGrad") @@ -833,7 +834,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_scatter_graph(handle_, indices_, value_, flow_in_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_scatter_graph(handle_, indices_, value_, flow_in_; name=nothing) local desc tf.with_op_name(name, "TensorArrayScatter") do desc = tf.NodeDescription("TensorArrayScatter") @@ -883,7 +884,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function dynamic_partition_graph(data_, partitions_; name=nothing, num_partitions=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function dynamic_partition_graph(data_, partitions_; name=nothing, num_partitions=nothing) local desc tf.with_op_name(name, "DynamicPartition") do desc = tf.NodeDescription("DynamicPartition") @@ -936,7 +937,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_private_thread_pool_dataset_graph(input_dataset_, num_threads_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_private_thread_pool_dataset_graph(input_dataset_, num_threads_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "ExperimentalPrivateThreadPoolDataset") do desc = tf.NodeDescription("ExperimentalPrivateThreadPoolDataset") @@ -988,7 +989,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function reader_serialize_state_graph(reader_handle_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function reader_serialize_state_graph(reader_handle_; name=nothing) local desc tf.with_op_name(name, "ReaderSerializeState") do desc = tf.NodeDescription("ReaderSerializeState") @@ -1024,7 +1025,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function right_shift_graph(x_, y_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function right_shift_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "RightShift") do desc = tf.NodeDescription("RightShift") @@ -1067,7 +1068,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function avg_pool3d_graph(input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function avg_pool3d_graph(input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) local desc tf.with_op_name(name, "AvgPool3D") do desc = tf.NodeDescription("AvgPool3D") @@ -1129,7 +1130,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function encode_png_graph(image_; name=nothing, compression=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function encode_png_graph(image_; name=nothing, compression=nothing) local desc tf.with_op_name(name, "EncodePng") do desc = tf.NodeDescription("EncodePng") @@ -1173,7 +1174,7 @@ end Debug Identity Op. """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function debug_identity_graph(input_; name=nothing, device_name=nothing, tensor_name=nothing, debug_urls=nothing, gated_grpc=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function debug_identity_graph(input_; name=nothing, device_name=nothing, tensor_name=nothing, debug_urls=nothing, gated_grpc=nothing) local desc tf.with_op_name(name, "DebugIdentity") do desc = tf.NodeDescription("DebugIdentity") @@ -1235,7 +1236,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function imag_graph(input_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function imag_graph(input_; name=nothing) local desc tf.with_op_name(name, "Imag") do desc = tf.NodeDescription("Imag") @@ -1273,7 +1274,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_sparse_apply_ftrl_v2_graph(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_sparse_apply_ftrl_v2_graph(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ResourceSparseApplyFtrlV2") do desc = tf.NodeDescription("ResourceSparseApplyFtrlV2") @@ -1361,7 +1362,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function stage_clear_graph(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function stage_clear_graph(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "StageClear") do desc = tf.NodeDescription("StageClear") @@ -1423,7 +1424,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sign_graph(x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sign_graph(x_; name=nothing) local desc tf.with_op_name(name, "Sign") do desc = tf.NodeDescription("Sign") @@ -1461,7 +1462,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function population_count_graph(x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function population_count_graph(x_; name=nothing) local desc tf.with_op_name(name, "PopulationCount") do desc = tf.NodeDescription("PopulationCount") @@ -1499,7 +1500,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function neg_graph(x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function neg_graph(x_; name=nothing) local desc tf.with_op_name(name, "Neg") do desc = tf.NodeDescription("Neg") @@ -1537,7 +1538,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function anonymous_iterator_graph(; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function anonymous_iterator_graph(; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "AnonymousIterator") do desc = tf.NodeDescription("AnonymousIterator") @@ -1581,7 +1582,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_reduce_sum_graph(input_indices_, input_values_, input_shape_, reduction_axes_; name=nothing, keep_dims=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_reduce_sum_graph(input_indices_, input_values_, input_shape_, reduction_axes_; name=nothing, keep_dims=nothing) local desc tf.with_op_name(name, "SparseReduceSum") do desc = tf.NodeDescription("SparseReduceSum") @@ -1637,7 +1638,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function filter_dataset_graph(input_dataset_, other_arguments_; name=nothing, predicate=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function filter_dataset_graph(input_dataset_, other_arguments_; name=nothing, predicate=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "FilterDataset") do desc = tf.NodeDescription("FilterDataset") @@ -1701,7 +1702,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function string_length_graph(input_; name=nothing, unit=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function string_length_graph(input_; name=nothing, unit=nothing) local desc tf.with_op_name(name, "StringLength") do desc = tf.NodeDescription("StringLength") @@ -1743,7 +1744,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function conv3d_graph(input_, filter_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function conv3d_graph(input_, filter_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) local desc tf.with_op_name(name, "Conv3D") do desc = tf.NodeDescription("Conv3D") @@ -1810,7 +1811,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function retrieve_tpu_embedding_adagrad_parameters_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function retrieve_tpu_embedding_adagrad_parameters_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "RetrieveTPUEmbeddingAdagradParameters") do desc = tf.NodeDescription("RetrieveTPUEmbeddingAdagradParameters") @@ -1871,7 +1872,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function optional_has_value_graph(optional_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function optional_has_value_graph(optional_; name=nothing) local desc tf.with_op_name(name, "OptionalHasValue") do desc = tf.NodeDescription("OptionalHasValue") @@ -1907,7 +1908,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function apply_adam_graph(var_, m_, v_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing, use_nesterov=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function apply_adam_graph(var_, m_, v_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing, use_nesterov=nothing) local desc tf.with_op_name(name, "ApplyAdam") do desc = tf.NodeDescription("ApplyAdam") @@ -2002,7 +2003,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function cudnn_rnn_params_to_canonical_graph(num_layers_, num_units_, input_size_, params_; name=nothing, num_params=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function cudnn_rnn_params_to_canonical_graph(num_layers_, num_units_, input_size_, params_; name=nothing, num_params=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) local desc tf.with_op_name(name, "CudnnRNNParamsToCanonical") do desc = tf.NodeDescription("CudnnRNNParamsToCanonical") @@ -2099,7 +2100,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function irfft3d_graph(input_, fft_length_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function irfft3d_graph(input_, fft_length_; name=nothing) local desc tf.with_op_name(name, "IRFFT3D") do desc = tf.NodeDescription("IRFFT3D") @@ -2139,7 +2140,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function angle_graph(input_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function angle_graph(input_; name=nothing) local desc tf.with_op_name(name, "Angle") do desc = tf.NodeDescription("Angle") @@ -2177,7 +2178,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_forest_tree_resource_handle_op_graph(; name=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_forest_tree_resource_handle_op_graph(; name=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "TensorForestTreeResourceHandleOp") do desc = tf.NodeDescription("TensorForestTreeResourceHandleOp") @@ -2221,7 +2222,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function learned_unigram_candidate_sampler_graph(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function learned_unigram_candidate_sampler_graph(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing) local desc tf.with_op_name(name, "LearnedUnigramCandidateSampler") do desc = tf.NodeDescription("LearnedUnigramCandidateSampler") @@ -2298,7 +2299,7 @@ end A graph node which represents an argument to a function. """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _arg_graph(; name=nothing, index=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _arg_graph(; name=nothing, index=nothing) local desc tf.with_op_name(name, "_Arg") do desc = tf.NodeDescription("_Arg") @@ -2336,7 +2337,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function matrix_square_root_graph(input_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function matrix_square_root_graph(input_; name=nothing) local desc tf.with_op_name(name, "MatrixSquareRoot") do desc = tf.NodeDescription("MatrixSquareRoot") @@ -2374,7 +2375,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_dense_cwise_mul_graph(sp_indices_, sp_values_, sp_shape_, dense_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_dense_cwise_mul_graph(sp_indices_, sp_values_, sp_shape_, dense_; name=nothing) local desc tf.with_op_name(name, "SparseDenseCwiseMul") do desc = tf.NodeDescription("SparseDenseCwiseMul") @@ -2425,7 +2426,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_concat_v3_graph(handle_, flow_in_; name=nothing, dtype=nothing, element_shape_except0=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_concat_v3_graph(handle_, flow_in_; name=nothing, dtype=nothing, element_shape_except0=nothing) local desc tf.with_op_name(name, "TensorArrayConcatV3") do desc = tf.NodeDescription("TensorArrayConcatV3") @@ -2482,7 +2483,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function unicode_script_graph(input_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function unicode_script_graph(input_; name=nothing) local desc tf.with_op_name(name, "UnicodeScript") do desc = tf.NodeDescription("UnicodeScript") @@ -2518,7 +2519,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_cholesky_grad_graph(l_, grad_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_cholesky_grad_graph(l_, grad_; name=nothing) local desc tf.with_op_name(name, "BatchCholeskyGrad") do desc = tf.NodeDescription("BatchCholeskyGrad") @@ -2561,7 +2562,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function mean_graph(input_, reduction_indices_; name=nothing, keep_dims=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function mean_graph(input_, reduction_indices_; name=nothing, keep_dims=nothing) local desc tf.with_op_name(name, "Mean") do desc = tf.NodeDescription("Mean") @@ -2612,7 +2613,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_fft_graph(input_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_fft_graph(input_; name=nothing) local desc tf.with_op_name(name, "BatchFFT") do desc = tf.NodeDescription("BatchFFT") @@ -2648,7 +2649,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sin_graph(x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sin_graph(x_; name=nothing) local desc tf.with_op_name(name, "Sin") do desc = tf.NodeDescription("Sin") @@ -2686,7 +2687,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function boosted_trees_ensemble_resource_handle_op_graph(; name=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function boosted_trees_ensemble_resource_handle_op_graph(; name=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "BoostedTreesEnsembleResourceHandleOp") do desc = tf.NodeDescription("BoostedTreesEnsembleResourceHandleOp") @@ -2730,7 +2731,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantized_max_pool_graph(input_, min_input_, max_input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantized_max_pool_graph(input_, min_input_, max_input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing) local desc tf.with_op_name(name, "QuantizedMaxPool") do desc = tf.NodeDescription("QuantizedMaxPool") @@ -2799,7 +2800,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ordered_map_stage_graph(key_, indices_, values_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, fake_dtypes=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ordered_map_stage_graph(key_, indices_, values_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, fake_dtypes=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "OrderedMapStage") do desc = tf.NodeDescription("OrderedMapStage") @@ -2879,7 +2880,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function partitioned_call_graph(args_; name=nothing, Tin=nothing, Tout=nothing, f=nothing, config=nothing, config_proto=nothing, executor_type=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function partitioned_call_graph(args_; name=nothing, Tin=nothing, Tout=nothing, f=nothing, config=nothing, config_proto=nothing, executor_type=nothing) local desc tf.with_op_name(name, "PartitionedCall") do desc = tf.NodeDescription("PartitionedCall") @@ -2951,7 +2952,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_apply_adagrad_graph(var_, accum_, lr_, grad_, indices_; name=nothing, use_locking=nothing, update_slots=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_apply_adagrad_graph(var_, accum_, lr_, grad_, indices_; name=nothing, use_locking=nothing, update_slots=nothing) local desc tf.with_op_name(name, "SparseApplyAdagrad") do desc = tf.NodeDescription("SparseApplyAdagrad") @@ -3023,7 +3024,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function decode_proto_v2_graph(bytes_; name=nothing, message_type=nothing, field_names=nothing, output_types=nothing, descriptor_source=nothing, message_format=nothing, sanitize=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function decode_proto_v2_graph(bytes_; name=nothing, message_type=nothing, field_names=nothing, output_types=nothing, descriptor_source=nothing, message_format=nothing, sanitize=nothing) local desc tf.with_op_name(name, "DecodeProtoV2") do desc = tf.NodeDescription("DecodeProtoV2") @@ -3100,7 +3101,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function betainc_graph(a_, b_, x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function betainc_graph(a_, b_, x_; name=nothing) local desc tf.with_op_name(name, "Betainc") do desc = tf.NodeDescription("Betainc") @@ -3148,7 +3149,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function guarantee_const_graph(input_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function guarantee_const_graph(input_; name=nothing) local desc tf.with_op_name(name, "GuaranteeConst") do desc = tf.NodeDescription("GuaranteeConst") @@ -3186,7 +3187,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function decode_bmp_graph(contents_; name=nothing, channels=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function decode_bmp_graph(contents_; name=nothing, channels=nothing) local desc tf.with_op_name(name, "DecodeBmp") do desc = tf.NodeDescription("DecodeBmp") @@ -3228,7 +3229,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function boosted_trees_bucketize_graph(float_values_, bucket_boundaries_; name=nothing, num_features=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function boosted_trees_bucketize_graph(float_values_, bucket_boundaries_; name=nothing, num_features=nothing) local desc tf.with_op_name(name, "BoostedTreesBucketize") do desc = tf.NodeDescription("BoostedTreesBucketize") @@ -3279,7 +3280,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function shutdown_distributed_tpu_graph(; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function shutdown_distributed_tpu_graph(; name=nothing) local desc tf.with_op_name(name, "ShutdownDistributedTPU") do desc @@ -3312,7 +3313,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_stats_aggregator_summary_graph(iterator_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_stats_aggregator_summary_graph(iterator_; name=nothing) local desc tf.with_op_name(name, "ExperimentalStatsAggregatorSummary") do desc = tf.NodeDescription("ExperimentalStatsAggregatorSummary") @@ -3348,7 +3349,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function timestamp_graph(; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function timestamp_graph(; name=nothing) local desc tf.with_op_name(name, "Timestamp") do desc @@ -3381,7 +3382,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function matrix_exponential_graph(input_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function matrix_exponential_graph(input_; name=nothing) local desc tf.with_op_name(name, "MatrixExponential") do desc = tf.NodeDescription("MatrixExponential") @@ -3419,7 +3420,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function size_graph(input_; name=nothing, out_type=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function size_graph(input_; name=nothing, out_type=nothing) local desc tf.with_op_name(name, "Size") do desc = tf.NodeDescription("Size") @@ -3463,7 +3464,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function add_n_graph(inputs_; name=nothing, N=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function add_n_graph(inputs_; name=nothing, N=nothing) local desc tf.with_op_name(name, "AddN") do desc = tf.NodeDescription("AddN") @@ -3507,7 +3508,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_segment_sum_graph(data_, indices_, segment_ids_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_segment_sum_graph(data_, indices_, segment_ids_; name=nothing) local desc tf.with_op_name(name, "SparseSegmentSum") do desc = tf.NodeDescription("SparseSegmentSum") @@ -3556,7 +3557,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_dataset_graph(input_dataset_, batch_size_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_dataset_graph(input_dataset_, batch_size_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "BatchDataset") do desc = tf.NodeDescription("BatchDataset") @@ -3608,7 +3609,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function record_input_graph(; name=nothing, file_pattern=nothing, file_random_seed=nothing, file_shuffle_shift_ratio=nothing, file_buffer_size=nothing, file_parallelism=nothing, batch_size=nothing, compression_type=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function record_input_graph(; name=nothing, file_pattern=nothing, file_random_seed=nothing, file_shuffle_shift_ratio=nothing, file_buffer_size=nothing, file_parallelism=nothing, batch_size=nothing, compression_type=nothing) local desc tf.with_op_name(name, "RecordInput") do desc = tf.NodeDescription("RecordInput") @@ -3682,7 +3683,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function queue_dequeue_up_to_v2_graph(handle_, n_; name=nothing, component_types=nothing, timeout_ms=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function queue_dequeue_up_to_v2_graph(handle_, n_; name=nothing, component_types=nothing, timeout_ms=nothing) local desc tf.with_op_name(name, "QueueDequeueUpToV2") do desc = tf.NodeDescription("QueueDequeueUpToV2") @@ -3734,7 +3735,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function retrieve_tpu_embedding_proximal_adagrad_parameters_grad_accum_debug_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function retrieve_tpu_embedding_proximal_adagrad_parameters_grad_accum_debug_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "RetrieveTPUEmbeddingProximalAdagradParametersGradAccumDebug") do desc = tf.NodeDescription("RetrieveTPUEmbeddingProximalAdagradParametersGradAccumDebug") @@ -3795,7 +3796,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function load_tpu_embedding_rms_prop_parameters_grad_accum_debug_graph(parameters_, ms_, mom_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function load_tpu_embedding_rms_prop_parameters_grad_accum_debug_graph(parameters_, ms_, mom_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "LoadTPUEmbeddingRMSPropParametersGradAccumDebug") do desc = tf.NodeDescription("LoadTPUEmbeddingRMSPropParametersGradAccumDebug") @@ -3867,7 +3868,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function serialize_tensor_graph(tensor_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function serialize_tensor_graph(tensor_; name=nothing) local desc tf.with_op_name(name, "SerializeTensor") do desc = tf.NodeDescription("SerializeTensor") @@ -3905,7 +3906,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function mul_graph(x_, y_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function mul_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "Mul") do desc = tf.NodeDescription("Mul") @@ -3948,7 +3949,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function softmax_cross_entropy_with_logits_graph(features_, labels_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function softmax_cross_entropy_with_logits_graph(features_, labels_; name=nothing) local desc tf.with_op_name(name, "SoftmaxCrossEntropyWithLogits") do desc = tf.NodeDescription("SoftmaxCrossEntropyWithLogits") @@ -3996,7 +3997,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_scatter_div_graph(resource_, indices_, updates_; name=nothing, dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_scatter_div_graph(resource_, indices_, updates_; name=nothing, dtype=nothing) local desc tf.with_op_name(name, "ResourceScatterDiv") do desc = tf.NodeDescription("ResourceScatterDiv") @@ -4051,7 +4052,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fixed_length_record_dataset_v2_graph(filenames_, header_bytes_, record_bytes_, footer_bytes_, buffer_size_, compression_type_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fixed_length_record_dataset_v2_graph(filenames_, header_bytes_, record_bytes_, footer_bytes_, buffer_size_, compression_type_; name=nothing) local desc tf.with_op_name(name, "FixedLengthRecordDatasetV2") do desc = tf.NodeDescription("FixedLengthRecordDatasetV2") @@ -4107,7 +4108,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function skip_dataset_graph(input_dataset_, count_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function skip_dataset_graph(input_dataset_, count_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "SkipDataset") do desc = tf.NodeDescription("SkipDataset") @@ -4159,7 +4160,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function cosh_graph(x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function cosh_graph(x_; name=nothing) local desc tf.with_op_name(name, "Cosh") do desc = tf.NodeDescription("Cosh") @@ -4197,7 +4198,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fused_batch_norm_v2_graph(x_, scale_, offset_, mean_, variance_; name=nothing, U=nothing, epsilon=nothing, data_format=nothing, is_training=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fused_batch_norm_v2_graph(x_, scale_, offset_, mean_, variance_; name=nothing, U=nothing, epsilon=nothing, data_format=nothing, is_training=nothing) local desc tf.with_op_name(name, "FusedBatchNormV2") do desc = tf.NodeDescription("FusedBatchNormV2") @@ -4285,7 +4286,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_split_graph(handle_, value_, lengths_, flow_in_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_split_graph(handle_, value_, lengths_, flow_in_; name=nothing) local desc tf.with_op_name(name, "TensorArraySplit") do desc = tf.NodeDescription("TensorArraySplit") @@ -4335,7 +4336,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ctc_loss_graph(inputs_, labels_indices_, labels_values_, sequence_length_; name=nothing, preprocess_collapse_repeated=nothing, ctc_merge_repeated=nothing, ignore_longer_outputs_than_inputs=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ctc_loss_graph(inputs_, labels_indices_, labels_values_, sequence_length_; name=nothing, preprocess_collapse_repeated=nothing, ctc_merge_repeated=nothing, ignore_longer_outputs_than_inputs=nothing) local desc tf.with_op_name(name, "CTCLoss") do desc = tf.NodeDescription("CTCLoss") @@ -4406,7 +4407,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantized_reshape_graph(tensor_, shape_, input_min_, input_max_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantized_reshape_graph(tensor_, shape_, input_min_, input_max_; name=nothing) local desc tf.with_op_name(name, "QuantizedReshape") do desc = tf.NodeDescription("QuantizedReshape") @@ -4463,7 +4464,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function floor_div_graph(x_, y_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function floor_div_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "FloorDiv") do desc = tf.NodeDescription("FloorDiv") @@ -4506,7 +4507,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_v2_graph(size_; name=nothing, dtype=nothing, element_shape=nothing, dynamic_size=nothing, clear_after_read=nothing, tensor_array_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_v2_graph(size_; name=nothing, dtype=nothing, element_shape=nothing, dynamic_size=nothing, clear_after_read=nothing, tensor_array_name=nothing) local desc tf.with_op_name(name, "TensorArrayV2") do desc = tf.NodeDescription("TensorArrayV2") @@ -4572,7 +4573,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function barrier_close_graph(handle_; name=nothing, cancel_pending_enqueues=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function barrier_close_graph(handle_; name=nothing, cancel_pending_enqueues=nothing) local desc tf.with_op_name(name, "BarrierClose") do desc = tf.NodeDescription("BarrierClose") @@ -4614,7 +4615,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function read_variable_op_graph(resource_; name=nothing, dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function read_variable_op_graph(resource_; name=nothing, dtype=nothing) local desc tf.with_op_name(name, "ReadVariableOp") do desc = tf.NodeDescription("ReadVariableOp") @@ -4656,7 +4657,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantized_mul_graph(x_, y_, min_x_, max_x_, min_y_, max_y_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantized_mul_graph(x_, y_, min_x_, max_x_, min_y_, max_y_; name=nothing) local desc tf.with_op_name(name, "QuantizedMul") do desc = tf.NodeDescription("QuantizedMul") @@ -4721,7 +4722,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function selu_graph(features_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function selu_graph(features_; name=nothing) local desc tf.with_op_name(name, "Selu") do desc = tf.NodeDescription("Selu") @@ -4759,7 +4760,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function cudnn_rnn_backprop_v3_graph(input_, input_h_, input_c_, params_, sequence_lengths_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_, host_reserved_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function cudnn_rnn_backprop_v3_graph(input_, input_h_, input_c_, params_, sequence_lengths_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_, host_reserved_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) local desc tf.with_op_name(name, "CudnnRNNBackpropV3") do desc = tf.NodeDescription("CudnnRNNBackpropV3") @@ -4896,7 +4897,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function lookup_table_insert_graph(table_handle_, keys_, values_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function lookup_table_insert_graph(table_handle_, keys_, values_; name=nothing) local desc tf.with_op_name(name, "LookupTableInsert") do desc = tf.NodeDescription("LookupTableInsert") @@ -4944,7 +4945,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function complex_abs_graph(x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function complex_abs_graph(x_; name=nothing) local desc tf.with_op_name(name, "ComplexAbs") do desc = tf.NodeDescription("ComplexAbs") @@ -4982,7 +4983,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tridiagonal_solve_graph(diagonals_, rhs_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tridiagonal_solve_graph(diagonals_, rhs_; name=nothing) local desc tf.with_op_name(name, "TridiagonalSolve") do desc = tf.NodeDescription("TridiagonalSolve") @@ -5025,7 +5026,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function lookup_table_import_graph(table_handle_, keys_, values_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function lookup_table_import_graph(table_handle_, keys_, values_; name=nothing) local desc tf.with_op_name(name, "LookupTableImport") do desc = tf.NodeDescription("LookupTableImport") @@ -5073,7 +5074,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function abs_graph(x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function abs_graph(x_; name=nothing) local desc tf.with_op_name(name, "Abs") do desc = tf.NodeDescription("Abs") @@ -5111,7 +5112,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_apply_adam_graph(var_, m_, v_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing, use_nesterov=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_apply_adam_graph(var_, m_, v_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing, use_nesterov=nothing) local desc tf.with_op_name(name, "ResourceApplyAdam") do desc = tf.NodeDescription("ResourceApplyAdam") @@ -5203,7 +5204,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function write_histogram_summary_graph(writer_, step_, tag_, values_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function write_histogram_summary_graph(writer_, step_, tag_, values_; name=nothing) local desc tf.with_op_name(name, "WriteHistogramSummary") do desc = tf.NodeDescription("WriteHistogramSummary") @@ -5253,7 +5254,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_indexed_dataset_materialize_graph(dataset_, materialized_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_indexed_dataset_materialize_graph(dataset_, materialized_; name=nothing) local desc tf.with_op_name(name, "ExperimentalIndexedDatasetMaterialize") do desc = tf.NodeDescription("ExperimentalIndexedDatasetMaterialize") @@ -5293,7 +5294,7 @@ end Sends the named tensor from send_device to recv_device. """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _host_send_graph(tensor_; name=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _host_send_graph(tensor_; name=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing) local desc tf.with_op_name(name, "_HostSend") do desc = tf.NodeDescription("_HostSend") @@ -5361,7 +5362,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function greater_graph(x_, y_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function greater_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "Greater") do desc = tf.NodeDescription("Greater") @@ -5404,7 +5405,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function nccl_broadcast_graph(input_; name=nothing, shape=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function nccl_broadcast_graph(input_; name=nothing, shape=nothing) local desc tf.with_op_name(name, "NcclBroadcast") do desc = tf.NodeDescription("NcclBroadcast") @@ -5448,7 +5449,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_list_push_back_batch_graph(input_handles_, tensor_; name=nothing, element_dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_list_push_back_batch_graph(input_handles_, tensor_; name=nothing, element_dtype=nothing) local desc tf.with_op_name(name, "TensorListPushBackBatch") do desc = tf.NodeDescription("TensorListPushBackBatch") @@ -5496,7 +5497,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_scatter_min_graph(resource_, indices_, updates_; name=nothing, dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_scatter_min_graph(resource_, indices_, updates_; name=nothing, dtype=nothing) local desc tf.with_op_name(name, "ResourceScatterMin") do desc = tf.NodeDescription("ResourceScatterMin") @@ -5551,7 +5552,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function slice_graph(input_, begin_, size_; name=nothing, Index=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function slice_graph(input_, begin_, size_; name=nothing, Index=nothing) local desc tf.with_op_name(name, "Slice") do desc = tf.NodeDescription("Slice") @@ -5607,7 +5608,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function unicode_decode_graph(input_; name=nothing, input_encoding=nothing, errors=nothing, replacement_char=nothing, replace_control_characters=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function unicode_decode_graph(input_; name=nothing, input_encoding=nothing, errors=nothing, replacement_char=nothing, replace_control_characters=nothing) local desc tf.with_op_name(name, "UnicodeDecode") do desc = tf.NodeDescription("UnicodeDecode") @@ -5672,7 +5673,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function take_dataset_graph(input_dataset_, count_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function take_dataset_graph(input_dataset_, count_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "TakeDataset") do desc = tf.NodeDescription("TakeDataset") @@ -5724,7 +5725,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function boosted_trees_make_stats_summary_graph(node_ids_, gradients_, hessians_, bucketized_features_list_; name=nothing, max_splits=nothing, num_buckets=nothing, num_features=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function boosted_trees_make_stats_summary_graph(node_ids_, gradients_, hessians_, bucketized_features_list_; name=nothing, max_splits=nothing, num_buckets=nothing, num_features=nothing) local desc tf.with_op_name(name, "BoostedTreesMakeStatsSummary") do desc = tf.NodeDescription("BoostedTreesMakeStatsSummary") @@ -5790,7 +5791,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function all_candidate_sampler_graph(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, seed=nothing, seed2=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function all_candidate_sampler_graph(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, seed=nothing, seed2=nothing) local desc tf.with_op_name(name, "AllCandidateSampler") do desc = tf.NodeDescription("AllCandidateSampler") @@ -5861,7 +5862,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function conv2d_backprop_input_graph(input_sizes_, filter_, out_backprop_; name=nothing, strides=nothing, use_cudnn_on_gpu=nothing, padding=nothing, explicit_paddings=nothing, data_format=nothing, dilations=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function conv2d_backprop_input_graph(input_sizes_, filter_, out_backprop_; name=nothing, strides=nothing, use_cudnn_on_gpu=nothing, padding=nothing, explicit_paddings=nothing, data_format=nothing, dilations=nothing) local desc tf.with_op_name(name, "Conv2DBackpropInput") do desc = tf.NodeDescription("Conv2DBackpropInput") @@ -5944,7 +5945,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function dataset_to_single_element_graph(dataset_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function dataset_to_single_element_graph(dataset_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "DatasetToSingleElement") do desc = tf.NodeDescription("DatasetToSingleElement") @@ -5992,7 +5993,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function cache_dataset_graph(input_dataset_, filename_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function cache_dataset_graph(input_dataset_, filename_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "CacheDataset") do desc = tf.NodeDescription("CacheDataset") @@ -6044,7 +6045,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fake_quant_with_min_max_vars_gradient_graph(gradients_, inputs_, min_, max_; name=nothing, num_bits=nothing, narrow_range=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fake_quant_with_min_max_vars_gradient_graph(gradients_, inputs_, min_, max_; name=nothing, num_bits=nothing, narrow_range=nothing) local desc tf.with_op_name(name, "FakeQuantWithMinMaxVarsGradient") do desc = tf.NodeDescription("FakeQuantWithMinMaxVarsGradient") @@ -6109,7 +6110,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fused_resize_and_pad_conv2d_graph(input_, size_, paddings_, filter_; name=nothing, resize_align_corners=nothing, mode=nothing, strides=nothing, padding=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fused_resize_and_pad_conv2d_graph(input_, size_, paddings_, filter_; name=nothing, resize_align_corners=nothing, mode=nothing, strides=nothing, padding=nothing) local desc tf.with_op_name(name, "FusedResizeAndPadConv2D") do desc = tf.NodeDescription("FusedResizeAndPadConv2D") @@ -6184,7 +6185,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_graph(in_tensors_; name=nothing, num_batch_threads=nothing, max_batch_size=nothing, max_enqueued_batches=nothing, batch_timeout_micros=nothing, allowed_batch_sizes=nothing, grad_timeout_micros=nothing, container=nothing, shared_name=nothing, batching_queue=nothing, T=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_graph(in_tensors_; name=nothing, num_batch_threads=nothing, max_batch_size=nothing, max_enqueued_batches=nothing, batch_timeout_micros=nothing, allowed_batch_sizes=nothing, grad_timeout_micros=nothing, container=nothing, shared_name=nothing, batching_queue=nothing, T=nothing) local desc tf.with_op_name(name, "Batch") do desc = tf.NodeDescription("Batch") @@ -6285,7 +6286,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function collective_bcast_recv_graph(; name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, shape=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function collective_bcast_recv_graph(; name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, shape=nothing) local desc tf.with_op_name(name, "CollectiveBcastRecv") do desc = tf.NodeDescription("CollectiveBcastRecv") @@ -6341,7 +6342,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_to_space_nd_graph(input_, block_shape_, crops_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_to_space_nd_graph(input_, block_shape_, crops_; name=nothing) local desc tf.with_op_name(name, "BatchToSpaceND") do desc = tf.NodeDescription("BatchToSpaceND") @@ -6391,7 +6392,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function loop_cond_graph(input_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function loop_cond_graph(input_; name=nothing) local desc tf.with_op_name(name, "LoopCond") do desc = tf.NodeDescription("LoopCond") @@ -6427,7 +6428,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function depth_to_space_graph(input_; name=nothing, block_size=nothing, data_format=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function depth_to_space_graph(input_; name=nothing, block_size=nothing, data_format=nothing) local desc tf.with_op_name(name, "DepthToSpace") do desc = tf.NodeDescription("DepthToSpace") @@ -6477,7 +6478,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function destroy_temporary_variable_graph(ref_; name=nothing, var_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function destroy_temporary_variable_graph(ref_; name=nothing, var_name=nothing) local desc tf.with_op_name(name, "DestroyTemporaryVariable") do desc = tf.NodeDescription("DestroyTemporaryVariable") @@ -6521,7 +6522,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function cudnn_rnn_graph(input_, input_h_, input_c_, params_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing, is_training=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function cudnn_rnn_graph(input_, input_h_, input_c_, params_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing, is_training=nothing) local desc tf.with_op_name(name, "CudnnRNN") do desc = tf.NodeDescription("CudnnRNN") @@ -6621,7 +6622,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ref_identity_graph(input_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ref_identity_graph(input_; name=nothing) local desc tf.with_op_name(name, "RefIdentity") do desc = tf.NodeDescription("RefIdentity") @@ -6659,7 +6660,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function max_pool3d_grad_graph(orig_input_, orig_output_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function max_pool3d_grad_graph(orig_input_, orig_output_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) local desc tf.with_op_name(name, "MaxPool3DGrad") do desc = tf.NodeDescription("MaxPool3DGrad") @@ -6732,7 +6733,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function load_tpu_embedding_momentum_parameters_grad_accum_debug_graph(parameters_, momenta_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function load_tpu_embedding_momentum_parameters_grad_accum_debug_graph(parameters_, momenta_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "LoadTPUEmbeddingMomentumParametersGradAccumDebug") do desc = tf.NodeDescription("LoadTPUEmbeddingMomentumParametersGradAccumDebug") @@ -6800,7 +6801,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function padding_fifo_queue_v2_graph(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function padding_fifo_queue_v2_graph(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "PaddingFIFOQueueV2") do desc = tf.NodeDescription("PaddingFIFOQueueV2") @@ -6862,7 +6863,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function conv3d_backprop_input_graph(input_, filter_, out_backprop_; name=nothing, strides=nothing, padding=nothing, dilations=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function conv3d_backprop_input_graph(input_, filter_, out_backprop_; name=nothing, strides=nothing, padding=nothing, dilations=nothing) local desc tf.with_op_name(name, "Conv3DBackpropInput") do desc = tf.NodeDescription("Conv3DBackpropInput") @@ -6928,7 +6929,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ref_exit_graph(data_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ref_exit_graph(data_; name=nothing) local desc tf.with_op_name(name, "RefExit") do desc = tf.NodeDescription("RefExit") @@ -6966,7 +6967,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function map_clear_graph(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function map_clear_graph(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "MapClear") do desc = tf.NodeDescription("MapClear") @@ -7028,7 +7029,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function encode_wav_graph(audio_, sample_rate_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function encode_wav_graph(audio_, sample_rate_; name=nothing) local desc tf.with_op_name(name, "EncodeWav") do desc = tf.NodeDescription("EncodeWav") @@ -7068,7 +7069,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_summary_v2_graph(tag_, tensor_, serialized_summary_metadata_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_summary_v2_graph(tag_, tensor_, serialized_summary_metadata_; name=nothing) local desc tf.with_op_name(name, "TensorSummaryV2") do desc = tf.NodeDescription("TensorSummaryV2") @@ -7114,7 +7115,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function queue_dequeue_up_to_graph(handle_, n_; name=nothing, component_types=nothing, timeout_ms=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function queue_dequeue_up_to_graph(handle_, n_; name=nothing, component_types=nothing, timeout_ms=nothing) local desc tf.with_op_name(name, "QueueDequeueUpTo") do desc = tf.NodeDescription("QueueDequeueUpTo") @@ -7166,7 +7167,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function matrix_band_part_graph(input_, num_lower_, num_upper_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function matrix_band_part_graph(input_, num_lower_, num_upper_; name=nothing) local desc tf.with_op_name(name, "MatrixBandPart") do desc = tf.NodeDescription("MatrixBandPart") @@ -7215,7 +7216,7 @@ end Copy Op. """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function copy_graph(input_; name=nothing, tensor_name=nothing, debug_ops_spec=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function copy_graph(input_; name=nothing, tensor_name=nothing, debug_ops_spec=nothing) local desc tf.with_op_name(name, "Copy") do desc = tf.NodeDescription("Copy") @@ -7265,7 +7266,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function shape_n_graph(input_; name=nothing, N=nothing, out_type=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function shape_n_graph(input_; name=nothing, N=nothing, out_type=nothing) local desc tf.with_op_name(name, "ShapeN") do desc = tf.NodeDescription("ShapeN") @@ -7320,7 +7321,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_parse_example_dataset_graph(input_dataset_, num_parallel_calls_, dense_defaults_; name=nothing, sparse_keys=nothing, dense_keys=nothing, sparse_types=nothing, Tdense=nothing, dense_shapes=nothing, output_types=nothing, output_shapes=nothing, sloppy=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_parse_example_dataset_graph(input_dataset_, num_parallel_calls_, dense_defaults_; name=nothing, sparse_keys=nothing, dense_keys=nothing, sparse_types=nothing, Tdense=nothing, dense_shapes=nothing, output_types=nothing, output_shapes=nothing, sloppy=nothing) local desc tf.with_op_name(name, "ExperimentalParseExampleDataset") do desc = tf.NodeDescription("ExperimentalParseExampleDataset") @@ -7412,7 +7413,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function concat_graph(concat_dim_, values_; name=nothing, N=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function concat_graph(concat_dim_, values_; name=nothing, N=nothing) local desc tf.with_op_name(name, "Concat") do desc = tf.NodeDescription("Concat") @@ -7460,7 +7461,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function data_format_dim_map_graph(x_; name=nothing, src_format=nothing, dst_format=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function data_format_dim_map_graph(x_; name=nothing, src_format=nothing, dst_format=nothing) local desc tf.with_op_name(name, "DataFormatDimMap") do desc = tf.NodeDescription("DataFormatDimMap") @@ -7510,7 +7511,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function identity_reader_graph(; name=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function identity_reader_graph(; name=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "IdentityReader") do desc = tf.NodeDescription("IdentityReader") @@ -7554,7 +7555,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function softplus_graph(features_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function softplus_graph(features_; name=nothing) local desc tf.with_op_name(name, "Softplus") do desc = tf.NodeDescription("Softplus") @@ -7592,7 +7593,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_sparse_apply_proximal_adagrad_graph(var_, accum_, lr_, l1_, l2_, grad_, indices_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_sparse_apply_proximal_adagrad_graph(var_, accum_, lr_, l1_, l2_, grad_, indices_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ResourceSparseApplyProximalAdagrad") do desc = tf.NodeDescription("ResourceSparseApplyProximalAdagrad") @@ -7666,7 +7667,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function parse_single_sequence_example_graph(serialized_, feature_list_dense_missing_assumed_empty_, context_sparse_keys_, context_dense_keys_, feature_list_sparse_keys_, feature_list_dense_keys_, context_dense_defaults_, debug_name_; name=nothing, Ncontext_sparse=nothing, Ncontext_dense=nothing, Nfeature_list_sparse=nothing, Nfeature_list_dense=nothing, context_sparse_types=nothing, Tcontext_dense=nothing, feature_list_dense_types=nothing, context_dense_shapes=nothing, feature_list_sparse_types=nothing, feature_list_dense_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function parse_single_sequence_example_graph(serialized_, feature_list_dense_missing_assumed_empty_, context_sparse_keys_, context_dense_keys_, feature_list_sparse_keys_, feature_list_dense_keys_, context_dense_defaults_, debug_name_; name=nothing, Ncontext_sparse=nothing, Ncontext_dense=nothing, Nfeature_list_sparse=nothing, Nfeature_list_dense=nothing, context_sparse_types=nothing, Tcontext_dense=nothing, feature_list_dense_types=nothing, context_dense_shapes=nothing, feature_list_sparse_types=nothing, feature_list_dense_shapes=nothing) local desc tf.with_op_name(name, "ParseSingleSequenceExample") do desc = tf.NodeDescription("ParseSingleSequenceExample") @@ -7795,7 +7796,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function matrix_diag_graph(diagonal_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function matrix_diag_graph(diagonal_; name=nothing) local desc tf.with_op_name(name, "MatrixDiag") do desc = tf.NodeDescription("MatrixDiag") @@ -7833,7 +7834,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fact_graph(; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fact_graph(; name=nothing) local desc tf.with_op_name(name, "Fact") do desc @@ -7866,7 +7867,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function shard_dataset_graph(input_dataset_, num_shards_, index_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function shard_dataset_graph(input_dataset_, num_shards_, index_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "ShardDataset") do desc = tf.NodeDescription("ShardDataset") @@ -7922,7 +7923,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function max_pool_grad_grad_graph(orig_input_, orig_output_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function max_pool_grad_grad_graph(orig_input_, orig_output_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) local desc tf.with_op_name(name, "MaxPoolGradGrad") do desc = tf.NodeDescription("MaxPoolGradGrad") @@ -7994,7 +7995,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resize_bilinear_grad_graph(grads_, original_image_; name=nothing, align_corners=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resize_bilinear_grad_graph(grads_, original_image_; name=nothing, align_corners=nothing) local desc tf.with_op_name(name, "ResizeBilinearGrad") do desc = tf.NodeDescription("ResizeBilinearGrad") @@ -8042,7 +8043,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_to_space_graph(input_, crops_; name=nothing, block_size=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_to_space_graph(input_, crops_; name=nothing, block_size=nothing) local desc tf.with_op_name(name, "BatchToSpace") do desc = tf.NodeDescription("BatchToSpace") @@ -8093,7 +8094,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function optional_from_value_graph(components_; name=nothing, Toutput_types=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function optional_from_value_graph(components_; name=nothing, Toutput_types=nothing) local desc tf.with_op_name(name, "OptionalFromValue") do desc = tf.NodeDescription("OptionalFromValue") @@ -8135,7 +8136,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function xlogy_graph(x_, y_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function xlogy_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "Xlogy") do desc = tf.NodeDescription("Xlogy") @@ -8178,7 +8179,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function cross_graph(a_, b_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function cross_graph(a_, b_; name=nothing) local desc tf.with_op_name(name, "Cross") do desc = tf.NodeDescription("Cross") @@ -8221,7 +8222,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function bitwise_and_graph(x_, y_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function bitwise_and_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "BitwiseAnd") do desc = tf.NodeDescription("BitwiseAnd") @@ -8264,7 +8265,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function broadcast_to_graph(input_, shape_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function broadcast_to_graph(input_, shape_; name=nothing) local desc tf.with_op_name(name, "BroadcastTo") do desc = tf.NodeDescription("BroadcastTo") @@ -8308,7 +8309,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function elu_grad_graph(gradients_, outputs_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function elu_grad_graph(gradients_, outputs_; name=nothing) local desc tf.with_op_name(name, "EluGrad") do desc = tf.NodeDescription("EluGrad") @@ -8351,7 +8352,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function cudnn_rnn_backprop_graph(input_, input_h_, input_c_, params_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function cudnn_rnn_backprop_graph(input_, input_h_, input_c_, params_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) local desc tf.with_op_name(name, "CudnnRNNBackprop") do desc = tf.NodeDescription("CudnnRNNBackprop") @@ -8480,7 +8481,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function string_to_hash_bucket_fast_graph(input_; name=nothing, num_buckets=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function string_to_hash_bucket_fast_graph(input_; name=nothing, num_buckets=nothing) local desc tf.with_op_name(name, "StringToHashBucketFast") do desc = tf.NodeDescription("StringToHashBucketFast") @@ -8522,7 +8523,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function mutable_hash_table_graph(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function mutable_hash_table_graph(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing) local desc tf.with_op_name(name, "MutableHashTable") do desc = tf.NodeDescription("MutableHashTable") @@ -8584,7 +8585,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function relu_graph(features_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function relu_graph(features_; name=nothing) local desc tf.with_op_name(name, "Relu") do desc = tf.NodeDescription("Relu") @@ -8622,7 +8623,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function nth_element_graph(input_, n_; name=nothing, reverse=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function nth_element_graph(input_, n_; name=nothing, reverse=nothing) local desc tf.with_op_name(name, "NthElement") do desc = tf.NodeDescription("NthElement") @@ -8670,7 +8671,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function softsign_graph(features_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function softsign_graph(features_; name=nothing) local desc tf.with_op_name(name, "Softsign") do desc = tf.NodeDescription("Softsign") @@ -8708,7 +8709,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function mutable_dense_hash_table_graph(empty_key_; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing, initial_num_buckets=nothing, max_load_factor=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function mutable_dense_hash_table_graph(empty_key_; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing, initial_num_buckets=nothing, max_load_factor=nothing) local desc tf.with_op_name(name, "MutableDenseHashTable") do desc = tf.NodeDescription("MutableDenseHashTable") @@ -8794,7 +8795,7 @@ end An op that shuts down a running distributed TPU system. The Op returns """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _shutdown_distributed_tpu_graph(; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _shutdown_distributed_tpu_graph(; name=nothing) local desc tf.with_op_name(name, "_ShutdownDistributedTPU") do desc @@ -8827,7 +8828,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function polygamma_graph(a_, x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function polygamma_graph(a_, x_; name=nothing) local desc tf.with_op_name(name, "Polygamma") do desc = tf.NodeDescription("Polygamma") @@ -8870,7 +8871,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function nccl_reduce_graph(input_; name=nothing, reduction=nothing, num_devices=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function nccl_reduce_graph(input_; name=nothing, reduction=nothing, num_devices=nothing) local desc tf.with_op_name(name, "NcclReduce") do desc = tf.NodeDescription("NcclReduce") @@ -8920,7 +8921,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function arg_max_graph(input_, dimension_; name=nothing, output_type=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function arg_max_graph(input_, dimension_; name=nothing, output_type=nothing) local desc tf.with_op_name(name, "ArgMax") do desc = tf.NodeDescription("ArgMax") @@ -8971,7 +8972,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function matrix_set_diag_graph(input_, diagonal_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function matrix_set_diag_graph(input_, diagonal_; name=nothing) local desc tf.with_op_name(name, "MatrixSetDiag") do desc = tf.NodeDescription("MatrixSetDiag") @@ -9014,7 +9015,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function space_to_batch_nd_graph(input_, block_shape_, paddings_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function space_to_batch_nd_graph(input_, block_shape_, paddings_; name=nothing) local desc tf.with_op_name(name, "SpaceToBatchND") do desc = tf.NodeDescription("SpaceToBatchND") @@ -9064,7 +9065,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_reshape_graph(input_indices_, input_shape_, new_shape_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_reshape_graph(input_indices_, input_shape_, new_shape_; name=nothing) local desc tf.with_op_name(name, "SparseReshape") do desc = tf.NodeDescription("SparseReshape") @@ -9113,7 +9114,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function optimize_dataset_graph(input_dataset_, optimizations_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function optimize_dataset_graph(input_dataset_, optimizations_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "OptimizeDataset") do desc = tf.NodeDescription("OptimizeDataset") @@ -9165,7 +9166,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function concat_v2_graph(values_, axis_; name=nothing, N=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function concat_v2_graph(values_, axis_; name=nothing, N=nothing) local desc tf.with_op_name(name, "ConcatV2") do desc = tf.NodeDescription("ConcatV2") @@ -9216,7 +9217,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_sparse_apply_adadelta_graph(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_sparse_apply_adadelta_graph(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ResourceSparseApplyAdadelta") do desc = tf.NodeDescription("ResourceSparseApplyAdadelta") @@ -9294,7 +9295,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tile_graph(input_, multiples_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tile_graph(input_, multiples_; name=nothing) local desc tf.with_op_name(name, "Tile") do desc = tf.NodeDescription("Tile") @@ -9338,7 +9339,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function mutex_v2_graph(; name=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function mutex_v2_graph(; name=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "MutexV2") do desc = tf.NodeDescription("MutexV2") @@ -9382,7 +9383,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function serialize_many_sparse_graph(sparse_indices_, sparse_values_, sparse_shape_; name=nothing, out_type=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function serialize_many_sparse_graph(sparse_indices_, sparse_values_, sparse_shape_; name=nothing, out_type=nothing) local desc tf.with_op_name(name, "SerializeManySparse") do desc = tf.NodeDescription("SerializeManySparse") @@ -9434,7 +9435,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tpu_embedding_activations_graph(embedding_variable_, sliced_activations_; name=nothing, table_id=nothing, lookup_id=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tpu_embedding_activations_graph(embedding_variable_, sliced_activations_; name=nothing, table_id=nothing, lookup_id=nothing) local desc tf.with_op_name(name, "TPUEmbeddingActivations") do desc = tf.NodeDescription("TPUEmbeddingActivations") @@ -9486,7 +9487,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_matrix_solve_ls_graph(matrix_, rhs_, l2_regularizer_; name=nothing, fast=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_matrix_solve_ls_graph(matrix_, rhs_, l2_regularizer_; name=nothing, fast=nothing) local desc tf.with_op_name(name, "BatchMatrixSolveLs") do desc = tf.NodeDescription("BatchMatrixSolveLs") @@ -9539,7 +9540,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function not_equal_graph(x_, y_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function not_equal_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "NotEqual") do desc = tf.NodeDescription("NotEqual") @@ -9582,7 +9583,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function lgamma_graph(x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function lgamma_graph(x_; name=nothing) local desc tf.with_op_name(name, "Lgamma") do desc = tf.NodeDescription("Lgamma") @@ -9620,7 +9621,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tpu_replicate_metadata_graph(; name=nothing, num_replicas=nothing, num_cores_per_replica=nothing, topology=nothing, use_tpu=nothing, device_assignment=nothing, computation_shape=nothing, host_compute_core=nothing, padding_map=nothing, step_marker_location=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tpu_replicate_metadata_graph(; name=nothing, num_replicas=nothing, num_cores_per_replica=nothing, topology=nothing, use_tpu=nothing, device_assignment=nothing, computation_shape=nothing, host_compute_core=nothing, padding_map=nothing, step_marker_location=nothing) local desc tf.with_op_name(name, "TPUReplicateMetadata") do desc = tf.NodeDescription("TPUReplicateMetadata") @@ -9706,7 +9707,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_thread_pool_handle_graph(; name=nothing, num_threads=nothing, max_intra_op_parallelism=nothing, display_name=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_thread_pool_handle_graph(; name=nothing, num_threads=nothing, max_intra_op_parallelism=nothing, display_name=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "ExperimentalThreadPoolHandle") do desc = tf.NodeDescription("ExperimentalThreadPoolHandle") @@ -9768,7 +9769,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function self_adjoint_eig_graph(input_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function self_adjoint_eig_graph(input_; name=nothing) local desc tf.with_op_name(name, "SelfAdjointEig") do desc = tf.NodeDescription("SelfAdjointEig") @@ -9806,7 +9807,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function boosted_trees_quantile_stream_resource_get_bucket_boundaries_graph(quantile_stream_resource_handle_; name=nothing, num_features=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function boosted_trees_quantile_stream_resource_get_bucket_boundaries_graph(quantile_stream_resource_handle_; name=nothing, num_features=nothing) local desc tf.with_op_name(name, "BoostedTreesQuantileStreamResourceGetBucketBoundaries") do desc = tf.NodeDescription("BoostedTreesQuantileStreamResourceGetBucketBoundaries") @@ -9853,7 +9854,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_dense_cwise_div_graph(sp_indices_, sp_values_, sp_shape_, dense_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_dense_cwise_div_graph(sp_indices_, sp_values_, sp_shape_, dense_; name=nothing) local desc tf.with_op_name(name, "SparseDenseCwiseDiv") do desc = tf.NodeDescription("SparseDenseCwiseDiv") @@ -9904,7 +9905,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function acos_graph(x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function acos_graph(x_; name=nothing) local desc tf.with_op_name(name, "Acos") do desc = tf.NodeDescription("Acos") @@ -9942,7 +9943,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function all_graph(input_, reduction_indices_; name=nothing, keep_dims=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function all_graph(input_, reduction_indices_; name=nothing, keep_dims=nothing) local desc tf.with_op_name(name, "All") do desc = tf.NodeDescription("All") @@ -9991,7 +9992,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function compare_and_bitpack_graph(input_, threshold_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function compare_and_bitpack_graph(input_, threshold_; name=nothing) local desc tf.with_op_name(name, "CompareAndBitpack") do desc = tf.NodeDescription("CompareAndBitpack") @@ -10034,7 +10035,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function var_handle_op_graph(; name=nothing, container=nothing, shared_name=nothing, dtype=nothing, shape=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function var_handle_op_graph(; name=nothing, container=nothing, shared_name=nothing, dtype=nothing, shape=nothing) local desc tf.with_op_name(name, "VarHandleOp") do desc = tf.NodeDescription("VarHandleOp") @@ -10090,7 +10091,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_unique_dataset_graph(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_unique_dataset_graph(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "ExperimentalUniqueDataset") do desc = tf.NodeDescription("ExperimentalUniqueDataset") @@ -10138,7 +10139,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantized_conv2d_with_bias_sum_and_relu_graph(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_, summand_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantized_conv2d_with_bias_sum_and_relu_graph(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_, summand_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) local desc tf.with_op_name(name, "QuantizedConv2DWithBiasSumAndRelu") do desc = tf.NodeDescription("QuantizedConv2DWithBiasSumAndRelu") @@ -10235,7 +10236,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function list_diff_graph(x_, y_; name=nothing, out_idx=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function list_diff_graph(x_, y_; name=nothing, out_idx=nothing) local desc tf.with_op_name(name, "ListDiff") do desc = tf.NodeDescription("ListDiff") @@ -10289,7 +10290,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function create_summary_file_writer_graph(writer_, logdir_, max_queue_, flush_millis_, filename_suffix_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function create_summary_file_writer_graph(writer_, logdir_, max_queue_, flush_millis_, filename_suffix_; name=nothing) local desc tf.with_op_name(name, "CreateSummaryFileWriter") do desc = tf.NodeDescription("CreateSummaryFileWriter") @@ -10341,7 +10342,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function generate_vocab_remapping_graph(new_vocab_file_, old_vocab_file_; name=nothing, new_vocab_offset=nothing, num_new_vocab=nothing, old_vocab_size=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function generate_vocab_remapping_graph(new_vocab_file_, old_vocab_file_; name=nothing, new_vocab_offset=nothing, num_new_vocab=nothing, old_vocab_size=nothing) local desc tf.with_op_name(name, "GenerateVocabRemapping") do desc = tf.NodeDescription("GenerateVocabRemapping") @@ -10404,7 +10405,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_matrix_inverse_graph(input_; name=nothing, adjoint=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_matrix_inverse_graph(input_; name=nothing, adjoint=nothing) local desc tf.with_op_name(name, "BatchMatrixInverse") do desc = tf.NodeDescription("BatchMatrixInverse") @@ -10448,7 +10449,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function control_trigger_graph(; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function control_trigger_graph(; name=nothing) local desc tf.with_op_name(name, "ControlTrigger") do desc @@ -10481,7 +10482,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tpu_ordinal_selector_graph(; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tpu_ordinal_selector_graph(; name=nothing) local desc tf.with_op_name(name, "TPUOrdinalSelector") do desc @@ -10514,7 +10515,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function stop_gradient_graph(input_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function stop_gradient_graph(input_; name=nothing) local desc tf.with_op_name(name, "StopGradient") do desc = tf.NodeDescription("StopGradient") @@ -10552,7 +10553,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function split_graph(split_dim_, value_; name=nothing, num_split=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function split_graph(split_dim_, value_; name=nothing, num_split=nothing) local desc tf.with_op_name(name, "Split") do desc = tf.NodeDescription("Split") @@ -10606,7 +10607,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function unpack_graph(value_; name=nothing, num=nothing, axis=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function unpack_graph(value_; name=nothing, num=nothing, axis=nothing) local desc tf.with_op_name(name, "Unpack") do desc = tf.NodeDescription("Unpack") @@ -10667,7 +10668,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_scatter_max_graph(resource_, indices_, updates_; name=nothing, dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_scatter_max_graph(resource_, indices_, updates_; name=nothing, dtype=nothing) local desc tf.with_op_name(name, "ResourceScatterMax") do desc = tf.NodeDescription("ResourceScatterMax") @@ -10722,7 +10723,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_write_graph(handle_, index_, value_, flow_in_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_write_graph(handle_, index_, value_, flow_in_; name=nothing) local desc tf.with_op_name(name, "TensorArrayWrite") do desc = tf.NodeDescription("TensorArrayWrite") @@ -10772,7 +10773,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fill_graph(dims_, value_; name=nothing, index_type=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fill_graph(dims_, value_; name=nothing, index_type=nothing) local desc tf.with_op_name(name, "Fill") do desc = tf.NodeDescription("Fill") @@ -10822,7 +10823,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantized_conv2d_with_bias_and_requantize_graph(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantized_conv2d_with_bias_and_requantize_graph(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) local desc tf.with_op_name(name, "QuantizedConv2DWithBiasAndRequantize") do desc = tf.NodeDescription("QuantizedConv2DWithBiasAndRequantize") @@ -10925,7 +10926,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function softmax_graph(logits_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function softmax_graph(logits_; name=nothing) local desc tf.with_op_name(name, "Softmax") do desc = tf.NodeDescription("Softmax") @@ -10963,7 +10964,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resize_bicubic_graph(images_, size_; name=nothing, align_corners=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resize_bicubic_graph(images_, size_; name=nothing, align_corners=nothing) local desc tf.with_op_name(name, "ResizeBicubic") do desc = tf.NodeDescription("ResizeBicubic") @@ -11011,7 +11012,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function infeed_dequeue_tuple_graph(; name=nothing, dtypes=nothing, shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function infeed_dequeue_tuple_graph(; name=nothing, dtypes=nothing, shapes=nothing) local desc tf.with_op_name(name, "InfeedDequeueTuple") do desc = tf.NodeDescription("InfeedDequeueTuple") @@ -11055,7 +11056,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function multi_device_iterator_graph(; name=nothing, devices=nothing, shared_name=nothing, container=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function multi_device_iterator_graph(; name=nothing, devices=nothing, shared_name=nothing, container=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "MultiDeviceIterator") do desc = tf.NodeDescription("MultiDeviceIterator") @@ -11117,7 +11118,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function decode_csv_graph(records_, record_defaults_; name=nothing, OUT_TYPE=nothing, field_delim=nothing, use_quote_delim=nothing, na_value=nothing, select_cols=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function decode_csv_graph(records_, record_defaults_; name=nothing, OUT_TYPE=nothing, field_delim=nothing, use_quote_delim=nothing, na_value=nothing, select_cols=nothing) local desc tf.with_op_name(name, "DecodeCSV") do desc = tf.NodeDescription("DecodeCSV") @@ -11187,7 +11188,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function lookup_table_find_graph(table_handle_, keys_, default_value_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function lookup_table_find_graph(table_handle_, keys_, default_value_; name=nothing) local desc tf.with_op_name(name, "LookupTableFind") do desc = tf.NodeDescription("LookupTableFind") @@ -11235,7 +11236,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function shuffle_and_repeat_dataset_graph(input_dataset_, buffer_size_, seed_, seed2_, count_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function shuffle_and_repeat_dataset_graph(input_dataset_, buffer_size_, seed_, seed2_, count_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "ShuffleAndRepeatDataset") do desc = tf.NodeDescription("ShuffleAndRepeatDataset") @@ -11299,7 +11300,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function requantization_range_per_channel_graph(input_, input_min_, input_max_; name=nothing, clip_value_max=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function requantization_range_per_channel_graph(input_, input_min_, input_max_; name=nothing, clip_value_max=nothing) local desc tf.with_op_name(name, "RequantizationRangePerChannel") do desc = tf.NodeDescription("RequantizationRangePerChannel") @@ -11356,7 +11357,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_unbatch_dataset_graph(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_unbatch_dataset_graph(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "ExperimentalUnbatchDataset") do desc = tf.NodeDescription("ExperimentalUnbatchDataset") @@ -11404,7 +11405,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function avg_pool3d_grad_graph(orig_input_shape_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function avg_pool3d_grad_graph(orig_input_shape_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) local desc tf.with_op_name(name, "AvgPool3DGrad") do desc = tf.NodeDescription("AvgPool3DGrad") @@ -11470,7 +11471,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function placeholder_with_default_graph(input_; name=nothing, dtype=nothing, shape=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function placeholder_with_default_graph(input_; name=nothing, dtype=nothing, shape=nothing) local desc tf.with_op_name(name, "PlaceholderWithDefault") do desc = tf.NodeDescription("PlaceholderWithDefault") @@ -11520,7 +11521,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function initialize_table_v2_graph(table_handle_, keys_, values_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function initialize_table_v2_graph(table_handle_, keys_, values_; name=nothing) local desc tf.with_op_name(name, "InitializeTableV2") do desc = tf.NodeDescription("InitializeTableV2") @@ -11568,7 +11569,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function set_size_graph(set_indices_, set_values_, set_shape_; name=nothing, validate_indices=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function set_size_graph(set_indices_, set_values_, set_shape_; name=nothing, validate_indices=nothing) local desc tf.with_op_name(name, "SetSize") do desc = tf.NodeDescription("SetSize") @@ -11620,7 +11621,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function assert_graph(condition_, data_; name=nothing, T=nothing, summarize=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function assert_graph(condition_, data_; name=nothing, T=nothing, summarize=nothing) local desc tf.with_op_name(name, "Assert") do desc = tf.NodeDescription("Assert") @@ -11672,7 +11673,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function non_max_suppression_v4_graph(boxes_, scores_, max_output_size_, iou_threshold_, score_threshold_; name=nothing, pad_to_max_output_size=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function non_max_suppression_v4_graph(boxes_, scores_, max_output_size_, iou_threshold_, score_threshold_; name=nothing, pad_to_max_output_size=nothing) local desc tf.with_op_name(name, "NonMaxSuppressionV4") do desc = tf.NodeDescription("NonMaxSuppressionV4") @@ -11738,7 +11739,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sample_distorted_bounding_box_v2_graph(image_size_, bounding_boxes_, min_object_covered_; name=nothing, seed=nothing, seed2=nothing, aspect_ratio_range=nothing, area_range=nothing, max_attempts=nothing, use_image_if_no_bounding_boxes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sample_distorted_bounding_box_v2_graph(image_size_, bounding_boxes_, min_object_covered_; name=nothing, seed=nothing, seed2=nothing, aspect_ratio_range=nothing, area_range=nothing, max_attempts=nothing, use_image_if_no_bounding_boxes=nothing) local desc tf.with_op_name(name, "SampleDistortedBoundingBoxV2") do desc = tf.NodeDescription("SampleDistortedBoundingBoxV2") @@ -11825,7 +11826,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function initialize_table_from_text_file_graph(table_handle_, filename_; name=nothing, key_index=nothing, value_index=nothing, vocab_size=nothing, delimiter=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function initialize_table_from_text_file_graph(table_handle_, filename_; name=nothing, key_index=nothing, value_index=nothing, vocab_size=nothing, delimiter=nothing) local desc tf.with_op_name(name, "InitializeTableFromTextFile") do desc = tf.NodeDescription("InitializeTableFromTextFile") @@ -11889,7 +11890,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function lookup_table_size_graph(table_handle_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function lookup_table_size_graph(table_handle_; name=nothing) local desc tf.with_op_name(name, "LookupTableSize") do desc = tf.NodeDescription("LookupTableSize") @@ -11925,7 +11926,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_apply_adagrad_da_graph(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, indices_, lr_, l1_, l2_, global_step_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_apply_adagrad_da_graph(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, indices_, lr_, l1_, l2_, global_step_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "SparseApplyAdagradDA") do desc = tf.NodeDescription("SparseApplyAdagradDA") @@ -12010,7 +12011,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function broadcast_gradient_args_graph(s0_, s1_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function broadcast_gradient_args_graph(s0_, s1_; name=nothing) local desc tf.with_op_name(name, "BroadcastGradientArgs") do desc = tf.NodeDescription("BroadcastGradientArgs") @@ -12058,7 +12059,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function summary_writer_graph(; name=nothing, shared_name=nothing, container=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function summary_writer_graph(; name=nothing, shared_name=nothing, container=nothing) local desc tf.with_op_name(name, "SummaryWriter") do desc = tf.NodeDescription("SummaryWriter") @@ -12102,7 +12103,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function recv_tpu_embedding_activations_graph(; name=nothing, num_outputs=nothing, config=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function recv_tpu_embedding_activations_graph(; name=nothing, num_outputs=nothing, config=nothing) local desc tf.with_op_name(name, "RecvTPUEmbeddingActivations") do desc = tf.NodeDescription("RecvTPUEmbeddingActivations") @@ -12151,7 +12152,7 @@ end output = input; While (Cond(output)) { output = Body(output) } """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _while_graph(input_; name=nothing, T=nothing, cond=nothing, body=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _while_graph(input_; name=nothing, T=nothing, cond=nothing, body=nothing) local desc tf.with_op_name(name, "_While") do desc = tf.NodeDescription("_While") @@ -12205,7 +12206,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function initialize_table_graph(table_handle_, keys_, values_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function initialize_table_graph(table_handle_, keys_, values_; name=nothing) local desc tf.with_op_name(name, "InitializeTable") do desc = tf.NodeDescription("InitializeTable") @@ -12253,7 +12254,7 @@ end Debug Numeric Summary Op. """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function debug_numeric_summary_graph(input_; name=nothing, device_name=nothing, tensor_name=nothing, debug_urls=nothing, lower_bound=nothing, upper_bound=nothing, mute_if_healthy=nothing, gated_grpc=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function debug_numeric_summary_graph(input_; name=nothing, device_name=nothing, tensor_name=nothing, debug_urls=nothing, lower_bound=nothing, upper_bound=nothing, mute_if_healthy=nothing, gated_grpc=nothing) local desc tf.with_op_name(name, "DebugNumericSummary") do desc = tf.NodeDescription("DebugNumericSummary") @@ -12333,7 +12334,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function retrieve_tpu_embedding_adagrad_parameters_grad_accum_debug_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function retrieve_tpu_embedding_adagrad_parameters_grad_accum_debug_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "RetrieveTPUEmbeddingAdagradParametersGradAccumDebug") do desc = tf.NodeDescription("RetrieveTPUEmbeddingAdagradParametersGradAccumDebug") @@ -12394,7 +12395,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tanh_graph(x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tanh_graph(x_; name=nothing) local desc tf.with_op_name(name, "Tanh") do desc = tf.NodeDescription("Tanh") @@ -12432,7 +12433,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function symbolic_gradient_graph(input_; name=nothing, Tin=nothing, Tout=nothing, f=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function symbolic_gradient_graph(input_; name=nothing, Tin=nothing, Tout=nothing, f=nothing) local desc tf.with_op_name(name, "SymbolicGradient") do desc = tf.NodeDescription("SymbolicGradient") @@ -12486,7 +12487,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function boosted_trees_update_ensemble_graph(tree_ensemble_handle_, feature_ids_, node_ids_, gains_, thresholds_, left_node_contribs_, right_node_contribs_, max_depth_, learning_rate_; name=nothing, pruning_mode=nothing, num_features=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function boosted_trees_update_ensemble_graph(tree_ensemble_handle_, feature_ids_, node_ids_, gains_, thresholds_, left_node_contribs_, right_node_contribs_, max_depth_, learning_rate_; name=nothing, pruning_mode=nothing, num_features=nothing) local desc tf.with_op_name(name, "BoostedTreesUpdateEnsemble") do desc = tf.NodeDescription("BoostedTreesUpdateEnsemble") @@ -12566,7 +12567,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function apply_momentum_graph(var_, accum_, lr_, grad_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function apply_momentum_graph(var_, accum_, lr_, grad_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) local desc tf.with_op_name(name, "ApplyMomentum") do desc = tf.NodeDescription("ApplyMomentum") @@ -12636,7 +12637,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function reader_read_graph(reader_handle_, queue_handle_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function reader_read_graph(reader_handle_, queue_handle_; name=nothing) local desc tf.with_op_name(name, "ReaderRead") do desc = tf.NodeDescription("ReaderRead") @@ -12681,7 +12682,7 @@ end An op that blocks execution until a distributed TPU system has """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _wait_for_distributed_tpu_graph(inputs_; name=nothing, startup_timeout_sec=nothing, N=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _wait_for_distributed_tpu_graph(inputs_; name=nothing, startup_timeout_sec=nothing, N=nothing) local desc tf.with_op_name(name, "_WaitForDistributedTPU") do desc = tf.NodeDescription("_WaitForDistributedTPU") @@ -12729,7 +12730,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function mutex_lock_graph(mutex_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function mutex_lock_graph(mutex_; name=nothing) local desc tf.with_op_name(name, "MutexLock") do desc = tf.NodeDescription("MutexLock") @@ -12765,7 +12766,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function accumulator_set_global_step_graph(handle_, new_global_step_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function accumulator_set_global_step_graph(handle_, new_global_step_; name=nothing) local desc tf.with_op_name(name, "AccumulatorSetGlobalStep") do desc = tf.NodeDescription("AccumulatorSetGlobalStep") @@ -12805,7 +12806,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantized_add_graph(x_, y_, min_x_, max_x_, min_y_, max_y_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantized_add_graph(x_, y_, min_x_, max_x_, min_y_, max_y_; name=nothing) local desc tf.with_op_name(name, "QuantizedAdd") do desc = tf.NodeDescription("QuantizedAdd") @@ -12870,7 +12871,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function squeeze_graph(input_; name=nothing, squeeze_dims=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function squeeze_graph(input_; name=nothing, squeeze_dims=nothing) local desc tf.with_op_name(name, "Squeeze") do desc = tf.NodeDescription("Squeeze") @@ -12914,7 +12915,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_matching_files_dataset_graph(patterns_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_matching_files_dataset_graph(patterns_; name=nothing) local desc tf.with_op_name(name, "ExperimentalMatchingFilesDataset") do desc = tf.NodeDescription("ExperimentalMatchingFilesDataset") @@ -12950,7 +12951,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_dataset_to_tf_record_graph(input_dataset_, filename_, compression_type_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_dataset_to_tf_record_graph(input_dataset_, filename_, compression_type_; name=nothing) local desc tf.with_op_name(name, "ExperimentalDatasetToTFRecord") do desc = tf.NodeDescription("ExperimentalDatasetToTFRecord") @@ -12994,7 +12995,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function load_tpu_embedding_stochastic_gradient_descent_parameters_graph(parameters_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function load_tpu_embedding_stochastic_gradient_descent_parameters_graph(parameters_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "LoadTPUEmbeddingStochasticGradientDescentParameters") do desc = tf.NodeDescription("LoadTPUEmbeddingStochasticGradientDescentParameters") @@ -13054,7 +13055,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function no_op_graph(; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function no_op_graph(; name=nothing) local desc tf.with_op_name(name, "NoOp") do desc @@ -13087,7 +13088,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function zip_dataset_graph(input_datasets_; name=nothing, output_types=nothing, output_shapes=nothing, N=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function zip_dataset_graph(input_datasets_; name=nothing, output_types=nothing, output_shapes=nothing, N=nothing) local desc tf.with_op_name(name, "ZipDataset") do desc = tf.NodeDescription("ZipDataset") @@ -13141,7 +13142,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function identity_reader_v2_graph(; name=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function identity_reader_v2_graph(; name=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "IdentityReaderV2") do desc = tf.NodeDescription("IdentityReaderV2") @@ -13185,7 +13186,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function lmdb_reader_graph(; name=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function lmdb_reader_graph(; name=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "LMDBReader") do desc = tf.NodeDescription("LMDBReader") @@ -13229,7 +13230,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function nccl_all_reduce_graph(input_; name=nothing, reduction=nothing, num_devices=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function nccl_all_reduce_graph(input_; name=nothing, reduction=nothing, num_devices=nothing, shared_name=nothing) local desc tf.with_op_name(name, "NcclAllReduce") do desc = tf.NodeDescription("NcclAllReduce") @@ -13285,7 +13286,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function text_line_dataset_graph(filenames_, compression_type_, buffer_size_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function text_line_dataset_graph(filenames_, compression_type_, buffer_size_; name=nothing) local desc tf.with_op_name(name, "TextLineDataset") do desc = tf.NodeDescription("TextLineDataset") @@ -13329,7 +13330,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sdca_shrink_l1_graph(weights_; name=nothing, num_features=nothing, l1=nothing, l2=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sdca_shrink_l1_graph(weights_; name=nothing, num_features=nothing, l1=nothing, l2=nothing) local desc tf.with_op_name(name, "SdcaShrinkL1") do desc = tf.NodeDescription("SdcaShrinkL1") @@ -13383,7 +13384,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tf_record_reader_v2_graph(; name=nothing, container=nothing, shared_name=nothing, compression_type=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tf_record_reader_v2_graph(; name=nothing, container=nothing, shared_name=nothing, compression_type=nothing) local desc tf.with_op_name(name, "TFRecordReaderV2") do desc = tf.NodeDescription("TFRecordReaderV2") @@ -13433,7 +13434,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function multi_device_iterator_from_string_handle_graph(string_handle_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function multi_device_iterator_from_string_handle_graph(string_handle_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "MultiDeviceIteratorFromStringHandle") do desc = tf.NodeDescription("MultiDeviceIteratorFromStringHandle") @@ -13481,7 +13482,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function padded_batch_dataset_v2_graph(input_dataset_, batch_size_, padded_shapes_, padding_values_, drop_remainder_; name=nothing, Toutput_types=nothing, output_shapes=nothing, N=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function padded_batch_dataset_v2_graph(input_dataset_, batch_size_, padded_shapes_, padding_values_, drop_remainder_; name=nothing, Toutput_types=nothing, output_shapes=nothing, N=nothing) local desc tf.with_op_name(name, "PaddedBatchDatasetV2") do desc = tf.NodeDescription("PaddedBatchDatasetV2") @@ -13551,7 +13552,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function load_tpu_embedding_proximal_adagrad_parameters_graph(parameters_, accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function load_tpu_embedding_proximal_adagrad_parameters_graph(parameters_, accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "LoadTPUEmbeddingProximalAdagradParameters") do desc = tf.NodeDescription("LoadTPUEmbeddingProximalAdagradParameters") @@ -13615,7 +13616,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_size_graph(handle_, flow_in_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_size_graph(handle_, flow_in_; name=nothing) local desc tf.with_op_name(name, "TensorArraySize") do desc = tf.NodeDescription("TensorArraySize") @@ -13655,7 +13656,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ordered_map_size_graph(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ordered_map_size_graph(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "OrderedMapSize") do desc = tf.NodeDescription("OrderedMapSize") @@ -13717,7 +13718,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function stateless_random_uniform_graph(shape_, seed_; name=nothing, dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function stateless_random_uniform_graph(shape_, seed_; name=nothing, dtype=nothing) local desc tf.with_op_name(name, "StatelessRandomUniform") do desc = tf.NodeDescription("StatelessRandomUniform") @@ -13767,7 +13768,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_to_sparse_set_operation_graph(set1_indices_, set1_values_, set1_shape_, set2_indices_, set2_values_, set2_shape_; name=nothing, set_operation=nothing, validate_indices=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_to_sparse_set_operation_graph(set1_indices_, set1_values_, set1_shape_, set2_indices_, set2_values_, set2_shape_; name=nothing, set_operation=nothing, validate_indices=nothing) local desc tf.with_op_name(name, "SparseToSparseSetOperation") do desc = tf.NodeDescription("SparseToSparseSetOperation") @@ -13843,7 +13844,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_summary_graph(tensor_; name=nothing, description=nothing, labels=nothing, display_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_summary_graph(tensor_; name=nothing, description=nothing, labels=nothing, display_name=nothing) local desc tf.with_op_name(name, "TensorSummary") do desc = tf.NodeDescription("TensorSummary") @@ -13899,7 +13900,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function remote_fused_graph_execute_graph(inputs_; name=nothing, Tinputs=nothing, Toutputs=nothing, serialized_remote_fused_graph_execute_info=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function remote_fused_graph_execute_graph(inputs_; name=nothing, Tinputs=nothing, Toutputs=nothing, serialized_remote_fused_graph_execute_info=nothing) local desc tf.with_op_name(name, "RemoteFusedGraphExecute") do desc = tf.NodeDescription("RemoteFusedGraphExecute") @@ -13953,7 +13954,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_slice_grad_graph(backprop_val_grad_, input_indices_, input_start_, output_indices_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_slice_grad_graph(backprop_val_grad_, input_indices_, input_start_, output_indices_; name=nothing) local desc tf.with_op_name(name, "SparseSliceGrad") do desc = tf.NodeDescription("SparseSliceGrad") @@ -14003,7 +14004,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function cumsum_graph(x_, axis_; name=nothing, exclusive=nothing, reverse=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function cumsum_graph(x_, axis_; name=nothing, exclusive=nothing, reverse=nothing) local desc tf.with_op_name(name, "Cumsum") do desc = tf.NodeDescription("Cumsum") @@ -14060,7 +14061,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_norm_with_global_normalization_grad_graph(t_, m_, v_, gamma_, backprop_; name=nothing, variance_epsilon=nothing, scale_after_normalization=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_norm_with_global_normalization_grad_graph(t_, m_, v_, gamma_, backprop_; name=nothing, variance_epsilon=nothing, scale_after_normalization=nothing) local desc tf.with_op_name(name, "BatchNormWithGlobalNormalizationGrad") do desc = tf.NodeDescription("BatchNormWithGlobalNormalizationGrad") @@ -14135,7 +14136,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function avg_pool_grad_graph(orig_input_shape_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function avg_pool_grad_graph(orig_input_shape_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) local desc tf.with_op_name(name, "AvgPoolGrad") do desc = tf.NodeDescription("AvgPoolGrad") @@ -14201,7 +14202,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function restore_v2_graph(prefix_, tensor_names_, shape_and_slices_; name=nothing, dtypes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function restore_v2_graph(prefix_, tensor_names_, shape_and_slices_; name=nothing, dtypes=nothing) local desc tf.with_op_name(name, "RestoreV2") do desc = tf.NodeDescription("RestoreV2") @@ -14251,7 +14252,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function relu6_graph(features_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function relu6_graph(features_; name=nothing) local desc tf.with_op_name(name, "Relu6") do desc = tf.NodeDescription("Relu6") @@ -14289,7 +14290,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_apply_rms_prop_graph(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_apply_rms_prop_graph(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "SparseApplyRMSProp") do desc = tf.NodeDescription("SparseApplyRMSProp") @@ -14375,7 +14376,7 @@ end Receives the named tensor from send_device on recv_device. """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _recv_graph(; name=nothing, tensor_type=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _recv_graph(; name=nothing, tensor_type=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing) local desc tf.with_op_name(name, "_Recv") do desc = tf.NodeDescription("_Recv") @@ -14443,7 +14444,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function max_pool_graph(input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function max_pool_graph(input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) local desc tf.with_op_name(name, "MaxPool") do desc = tf.NodeDescription("MaxPool") @@ -14505,7 +14506,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function invert_graph(x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function invert_graph(x_; name=nothing) local desc tf.with_op_name(name, "Invert") do desc = tf.NodeDescription("Invert") @@ -14543,7 +14544,7 @@ end *NOTE*: Do not invoke this operator directly in Python. Graph rewrite pass is """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _unary_ops_composition_graph(x_; name=nothing, op_names=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _unary_ops_composition_graph(x_; name=nothing, op_names=nothing) local desc tf.with_op_name(name, "_UnaryOpsComposition") do desc = tf.NodeDescription("_UnaryOpsComposition") @@ -14587,7 +14588,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_map_dataset_graph(input_dataset_, other_arguments_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing, preserve_cardinality=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_map_dataset_graph(input_dataset_, other_arguments_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing, preserve_cardinality=nothing) local desc tf.with_op_name(name, "ExperimentalMapDataset") do desc = tf.NodeDescription("ExperimentalMapDataset") @@ -14663,7 +14664,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function load_tpu_embedding_adam_parameters_graph(parameters_, momenta_, velocities_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function load_tpu_embedding_adam_parameters_graph(parameters_, momenta_, velocities_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "LoadTPUEmbeddingADAMParameters") do desc = tf.NodeDescription("LoadTPUEmbeddingADAMParameters") @@ -14731,7 +14732,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function parse_tensor_graph(serialized_; name=nothing, out_type=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function parse_tensor_graph(serialized_; name=nothing, out_type=nothing) local desc tf.with_op_name(name, "ParseTensor") do desc = tf.NodeDescription("ParseTensor") @@ -14773,7 +14774,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_materialized_index_dataset_handle_graph(; name=nothing, container=nothing, shared_name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_materialized_index_dataset_handle_graph(; name=nothing, container=nothing, shared_name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "ExperimentalMaterializedIndexDatasetHandle") do desc = tf.NodeDescription("ExperimentalMaterializedIndexDatasetHandle") @@ -14829,7 +14830,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function multi_device_iterator_get_next_from_shard_graph(multi_device_iterator_, shard_num_, incarnation_id_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function multi_device_iterator_get_next_from_shard_graph(multi_device_iterator_, shard_num_, incarnation_id_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "MultiDeviceIteratorGetNextFromShard") do desc = tf.NodeDescription("MultiDeviceIteratorGetNextFromShard") @@ -14885,7 +14886,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function random_uniform_int_graph(shape_, minval_, maxval_; name=nothing, seed=nothing, seed2=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function random_uniform_int_graph(shape_, minval_, maxval_; name=nothing, seed=nothing, seed2=nothing) local desc tf.with_op_name(name, "RandomUniformInt") do desc = tf.NodeDescription("RandomUniformInt") @@ -14946,7 +14947,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_softmax_cross_entropy_with_logits_graph(features_, labels_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_softmax_cross_entropy_with_logits_graph(features_, labels_; name=nothing) local desc tf.with_op_name(name, "SparseSoftmaxCrossEntropyWithLogits") do desc = tf.NodeDescription("SparseSoftmaxCrossEntropyWithLogits") @@ -14995,7 +14996,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_read_v2_graph(handle_, index_, flow_in_; name=nothing, dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_read_v2_graph(handle_, index_, flow_in_; name=nothing, dtype=nothing) local desc tf.with_op_name(name, "TensorArrayReadV2") do desc = tf.NodeDescription("TensorArrayReadV2") @@ -15045,7 +15046,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function reader_read_up_to_graph(reader_handle_, queue_handle_, num_records_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function reader_read_up_to_graph(reader_handle_, queue_handle_, num_records_; name=nothing) local desc tf.with_op_name(name, "ReaderReadUpTo") do desc = tf.NodeDescription("ReaderReadUpTo") @@ -15094,7 +15095,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function encode_proto_graph(sizes_, values_; name=nothing, field_names=nothing, message_type=nothing, descriptor_source=nothing, Tinput_types=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function encode_proto_graph(sizes_, values_; name=nothing, field_names=nothing, message_type=nothing, descriptor_source=nothing, Tinput_types=nothing) local desc tf.with_op_name(name, "EncodeProto") do desc = tf.NodeDescription("EncodeProto") @@ -15158,7 +15159,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function strided_slice_grad_graph(shape_, begin_, end_, strides_, dy_; name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function strided_slice_grad_graph(shape_, begin_, end_, strides_, dy_; name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing) local desc tf.with_op_name(name, "StridedSliceGrad") do desc = tf.NodeDescription("StridedSliceGrad") @@ -15286,7 +15287,7 @@ end Replacement node for NcclReduce. """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _nccl_reduce_send_graph(input_; name=nothing, reduction=nothing, num_devices=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _nccl_reduce_send_graph(input_; name=nothing, reduction=nothing, num_devices=nothing, shared_name=nothing) local desc tf.with_op_name(name, "_NcclReduceSend") do desc = tf.NodeDescription("_NcclReduceSend") @@ -15342,7 +15343,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function padded_batch_dataset_graph(input_dataset_, batch_size_, padded_shapes_, padding_values_; name=nothing, Toutput_types=nothing, output_shapes=nothing, N=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function padded_batch_dataset_graph(input_dataset_, batch_size_, padded_shapes_, padding_values_; name=nothing, Toutput_types=nothing, output_shapes=nothing, N=nothing) local desc tf.with_op_name(name, "PaddedBatchDataset") do desc = tf.NodeDescription("PaddedBatchDataset") @@ -15408,7 +15409,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function data_format_vec_permute_graph(x_; name=nothing, src_format=nothing, dst_format=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function data_format_vec_permute_graph(x_; name=nothing, src_format=nothing, dst_format=nothing) local desc tf.with_op_name(name, "DataFormatVecPermute") do desc = tf.NodeDescription("DataFormatVecPermute") @@ -15458,7 +15459,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function string_format_graph(inputs_; name=nothing, T=nothing, template=nothing, placeholder=nothing, summarize=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function string_format_graph(inputs_; name=nothing, T=nothing, template=nothing, placeholder=nothing, summarize=nothing) local desc tf.with_op_name(name, "StringFormat") do desc = tf.NodeDescription("StringFormat") @@ -15518,7 +15519,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function as_string_graph(input_; name=nothing, precision=nothing, scientific=nothing, shortest=nothing, width=nothing, fill=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function as_string_graph(input_; name=nothing, precision=nothing, scientific=nothing, shortest=nothing, width=nothing, fill=nothing) local desc tf.with_op_name(name, "AsString") do desc = tf.NodeDescription("AsString") @@ -15586,7 +15587,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function queue_enqueue_many_graph(handle_, components_; name=nothing, Tcomponents=nothing, timeout_ms=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function queue_enqueue_many_graph(handle_, components_; name=nothing, Tcomponents=nothing, timeout_ms=nothing) local desc tf.with_op_name(name, "QueueEnqueueMany") do desc = tf.NodeDescription("QueueEnqueueMany") @@ -15638,7 +15639,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fake_param_graph(; name=nothing, dtype=nothing, shape=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fake_param_graph(; name=nothing, dtype=nothing, shape=nothing) local desc tf.with_op_name(name, "FakeParam") do desc = tf.NodeDescription("FakeParam") @@ -15682,7 +15683,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function apply_adagrad_graph(var_, accum_, lr_, grad_; name=nothing, use_locking=nothing, update_slots=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function apply_adagrad_graph(var_, accum_, lr_, grad_; name=nothing, use_locking=nothing, update_slots=nothing) local desc tf.with_op_name(name, "ApplyAdagrad") do desc = tf.NodeDescription("ApplyAdagrad") @@ -15747,7 +15748,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_iterator_get_device_graph(resource_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_iterator_get_device_graph(resource_; name=nothing) local desc tf.with_op_name(name, "ExperimentalIteratorGetDevice") do desc = tf.NodeDescription("ExperimentalIteratorGetDevice") @@ -15783,7 +15784,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function adjust_contrast_graph(images_, contrast_factor_, min_value_, max_value_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function adjust_contrast_graph(images_, contrast_factor_, min_value_, max_value_; name=nothing) local desc tf.with_op_name(name, "AdjustContrast") do desc = tf.NodeDescription("AdjustContrast") @@ -15833,7 +15834,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function extract_image_patches_graph(images_; name=nothing, ksizes=nothing, strides=nothing, rates=nothing, padding=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function extract_image_patches_graph(images_; name=nothing, ksizes=nothing, strides=nothing, rates=nothing, padding=nothing) local desc tf.with_op_name(name, "ExtractImagePatches") do desc = tf.NodeDescription("ExtractImagePatches") @@ -15895,7 +15896,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function scale_and_translate_graph(images_, size_, scale_, translation_; name=nothing, kernel_type=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function scale_and_translate_graph(images_, size_, scale_, translation_; name=nothing, kernel_type=nothing) local desc tf.with_op_name(name, "ScaleAndTranslate") do desc = tf.NodeDescription("ScaleAndTranslate") @@ -15951,7 +15952,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function optional_none_graph(; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function optional_none_graph(; name=nothing) local desc tf.with_op_name(name, "OptionalNone") do desc @@ -15984,7 +15985,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function variable_v2_graph(; name=nothing, shape=nothing, dtype=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function variable_v2_graph(; name=nothing, shape=nothing, dtype=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "VariableV2") do desc = tf.NodeDescription("VariableV2") @@ -16040,7 +16041,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function elu_graph(features_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function elu_graph(features_; name=nothing) local desc tf.with_op_name(name, "Elu") do desc = tf.NodeDescription("Elu") @@ -16078,7 +16079,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function scatter_update_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function scatter_update_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ScatterUpdate") do desc = tf.NodeDescription("ScatterUpdate") @@ -16134,7 +16135,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function floor_mod_graph(x_, y_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function floor_mod_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "FloorMod") do desc = tf.NodeDescription("FloorMod") @@ -16177,7 +16178,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_ignore_errors_dataset_graph(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_ignore_errors_dataset_graph(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "ExperimentalIgnoreErrorsDataset") do desc = tf.NodeDescription("ExperimentalIgnoreErrorsDataset") @@ -16225,7 +16226,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_set_stats_aggregator_dataset_graph(input_dataset_, stats_aggregator_, tag_, counter_prefix_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_set_stats_aggregator_dataset_graph(input_dataset_, stats_aggregator_, tag_, counter_prefix_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "ExperimentalSetStatsAggregatorDataset") do desc = tf.NodeDescription("ExperimentalSetStatsAggregatorDataset") @@ -16285,7 +16286,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function compute_accidental_hits_graph(true_classes_, sampled_candidates_; name=nothing, num_true=nothing, seed=nothing, seed2=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function compute_accidental_hits_graph(true_classes_, sampled_candidates_; name=nothing, num_true=nothing, seed=nothing, seed2=nothing) local desc tf.with_op_name(name, "ComputeAccidentalHits") do desc = tf.NodeDescription("ComputeAccidentalHits") @@ -16348,7 +16349,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function string_to_number_graph(string_tensor_; name=nothing, out_type=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function string_to_number_graph(string_tensor_; name=nothing, out_type=nothing) local desc tf.with_op_name(name, "StringToNumber") do desc = tf.NodeDescription("StringToNumber") @@ -16390,7 +16391,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function snapshot_graph(input_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function snapshot_graph(input_; name=nothing) local desc tf.with_op_name(name, "Snapshot") do desc = tf.NodeDescription("Snapshot") @@ -16428,7 +16429,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function deserialize_iterator_graph(resource_handle_, serialized_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function deserialize_iterator_graph(resource_handle_, serialized_; name=nothing) local desc tf.with_op_name(name, "DeserializeIterator") do desc = tf.NodeDescription("DeserializeIterator") @@ -16468,7 +16469,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function atan_graph(x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function atan_graph(x_; name=nothing) local desc tf.with_op_name(name, "Atan") do desc = tf.NodeDescription("Atan") @@ -16506,7 +16507,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function mat_mul_graph(a_, b_; name=nothing, transpose_a=nothing, transpose_b=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function mat_mul_graph(a_, b_; name=nothing, transpose_a=nothing, transpose_b=nothing) local desc tf.with_op_name(name, "MatMul") do desc = tf.NodeDescription("MatMul") @@ -16561,7 +16562,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function erfc_graph(x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function erfc_graph(x_; name=nothing) local desc tf.with_op_name(name, "Erfc") do desc = tf.NodeDescription("Erfc") @@ -16599,7 +16600,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sigmoid_grad_graph(y_, dy_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sigmoid_grad_graph(y_, dy_; name=nothing) local desc tf.with_op_name(name, "SigmoidGrad") do desc = tf.NodeDescription("SigmoidGrad") @@ -16642,7 +16643,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fixed_length_record_reader_v2_graph(; name=nothing, header_bytes=nothing, record_bytes=nothing, footer_bytes=nothing, hop_bytes=nothing, container=nothing, shared_name=nothing, encoding=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fixed_length_record_reader_v2_graph(; name=nothing, header_bytes=nothing, record_bytes=nothing, footer_bytes=nothing, hop_bytes=nothing, container=nothing, shared_name=nothing, encoding=nothing) local desc tf.with_op_name(name, "FixedLengthRecordReaderV2") do desc = tf.NodeDescription("FixedLengthRecordReaderV2") @@ -16716,7 +16717,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function non_max_suppression_v3_graph(boxes_, scores_, max_output_size_, iou_threshold_, score_threshold_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function non_max_suppression_v3_graph(boxes_, scores_, max_output_size_, iou_threshold_, score_threshold_; name=nothing) local desc tf.with_op_name(name, "NonMaxSuppressionV3") do desc = tf.NodeDescription("NonMaxSuppressionV3") @@ -16771,7 +16772,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function dilation2d_backprop_input_graph(input_, filter_, out_backprop_; name=nothing, strides=nothing, rates=nothing, padding=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function dilation2d_backprop_input_graph(input_, filter_, out_backprop_; name=nothing, strides=nothing, rates=nothing, padding=nothing) local desc tf.with_op_name(name, "Dilation2DBackpropInput") do desc = tf.NodeDescription("Dilation2DBackpropInput") @@ -16837,7 +16838,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function logical_or_graph(x_, y_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function logical_or_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "LogicalOr") do desc = tf.NodeDescription("LogicalOr") @@ -16877,7 +16878,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_apply_adadelta_graph(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_apply_adadelta_graph(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ResourceApplyAdadelta") do desc = tf.NodeDescription("ResourceApplyAdadelta") @@ -16948,7 +16949,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function dense_to_sparse_set_operation_graph(set1_, set2_indices_, set2_values_, set2_shape_; name=nothing, set_operation=nothing, validate_indices=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function dense_to_sparse_set_operation_graph(set1_, set2_indices_, set2_values_, set2_shape_; name=nothing, set_operation=nothing, validate_indices=nothing) local desc tf.with_op_name(name, "DenseToSparseSetOperation") do desc = tf.NodeDescription("DenseToSparseSetOperation") @@ -17016,7 +17017,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function reader_num_records_produced_graph(reader_handle_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function reader_num_records_produced_graph(reader_handle_; name=nothing) local desc tf.with_op_name(name, "ReaderNumRecordsProduced") do desc = tf.NodeDescription("ReaderNumRecordsProduced") @@ -17052,7 +17053,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function adjust_hue_graph(images_, delta_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function adjust_hue_graph(images_, delta_; name=nothing) local desc tf.with_op_name(name, "AdjustHue") do desc = tf.NodeDescription("AdjustHue") @@ -17094,7 +17095,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function boosted_trees_quantile_stream_resource_flush_graph(quantile_stream_resource_handle_, num_buckets_; name=nothing, generate_quantiles=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function boosted_trees_quantile_stream_resource_flush_graph(quantile_stream_resource_handle_, num_buckets_; name=nothing, generate_quantiles=nothing) local desc tf.with_op_name(name, "BoostedTreesQuantileStreamResourceFlush") do desc = tf.NodeDescription("BoostedTreesQuantileStreamResourceFlush") @@ -17140,7 +17141,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_map_and_batch_dataset_graph(input_dataset_, other_arguments_, batch_size_, num_parallel_calls_, drop_remainder_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, preserve_cardinality=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_map_and_batch_dataset_graph(input_dataset_, other_arguments_, batch_size_, num_parallel_calls_, drop_remainder_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, preserve_cardinality=nothing) local desc tf.with_op_name(name, "ExperimentalMapAndBatchDataset") do desc = tf.NodeDescription("ExperimentalMapAndBatchDataset") @@ -17222,7 +17223,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function real_div_graph(x_, y_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function real_div_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "RealDiv") do desc = tf.NodeDescription("RealDiv") @@ -17265,7 +17266,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function restore_slice_graph(file_pattern_, tensor_name_, shape_and_slice_; name=nothing, dt=nothing, preferred_shard=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function restore_slice_graph(file_pattern_, tensor_name_, shape_and_slice_; name=nothing, dt=nothing, preferred_shard=nothing) local desc tf.with_op_name(name, "RestoreSlice") do desc = tf.NodeDescription("RestoreSlice") @@ -17321,7 +17322,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function stack_pop_v2_graph(handle_; name=nothing, elem_type=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function stack_pop_v2_graph(handle_; name=nothing, elem_type=nothing) local desc tf.with_op_name(name, "StackPopV2") do desc = tf.NodeDescription("StackPopV2") @@ -17363,7 +17364,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function reverse_graph(tensor_, dims_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function reverse_graph(tensor_, dims_; name=nothing) local desc tf.with_op_name(name, "Reverse") do desc = tf.NodeDescription("Reverse") @@ -17405,7 +17406,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function decode_png_graph(contents_; name=nothing, channels=nothing, dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function decode_png_graph(contents_; name=nothing, channels=nothing, dtype=nothing) local desc tf.with_op_name(name, "DecodePng") do desc = tf.NodeDescription("DecodePng") @@ -17453,7 +17454,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function non_max_suppression_v2_graph(boxes_, scores_, max_output_size_, iou_threshold_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function non_max_suppression_v2_graph(boxes_, scores_, max_output_size_, iou_threshold_; name=nothing) local desc tf.with_op_name(name, "NonMaxSuppressionV2") do desc = tf.NodeDescription("NonMaxSuppressionV2") @@ -17504,7 +17505,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function igamma_graph(a_, x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function igamma_graph(a_, x_; name=nothing) local desc tf.with_op_name(name, "Igamma") do desc = tf.NodeDescription("Igamma") @@ -17547,7 +17548,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function digamma_graph(x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function digamma_graph(x_; name=nothing) local desc tf.with_op_name(name, "Digamma") do desc = tf.NodeDescription("Digamma") @@ -17585,7 +17586,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_apply_ada_max_graph(var_, m_, v_, beta1_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_apply_ada_max_graph(var_, m_, v_, beta1_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ResourceApplyAdaMax") do desc = tf.NodeDescription("ResourceApplyAdaMax") @@ -17666,7 +17667,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function space_to_depth_graph(input_; name=nothing, block_size=nothing, data_format=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function space_to_depth_graph(input_; name=nothing, block_size=nothing, data_format=nothing) local desc tf.with_op_name(name, "SpaceToDepth") do desc = tf.NodeDescription("SpaceToDepth") @@ -17716,7 +17717,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sqrt_grad_graph(y_, dy_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sqrt_grad_graph(y_, dy_; name=nothing) local desc tf.with_op_name(name, "SqrtGrad") do desc = tf.NodeDescription("SqrtGrad") @@ -17759,7 +17760,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function map_unstage_graph(key_, indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function map_unstage_graph(key_, indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "MapUnstage") do desc = tf.NodeDescription("MapUnstage") @@ -17829,7 +17830,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function qr_graph(input_; name=nothing, full_matrices=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function qr_graph(input_; name=nothing, full_matrices=nothing) local desc tf.with_op_name(name, "Qr") do desc = tf.NodeDescription("Qr") @@ -17878,7 +17879,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function boosted_trees_calculate_best_gains_per_feature_graph(node_id_range_, stats_summary_list_, l1_, l2_, tree_complexity_, min_node_weight_; name=nothing, max_splits=nothing, num_features=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function boosted_trees_calculate_best_gains_per_feature_graph(node_id_range_, stats_summary_list_, l1_, l2_, tree_complexity_, min_node_weight_; name=nothing, max_splits=nothing, num_features=nothing) local desc tf.with_op_name(name, "BoostedTreesCalculateBestGainsPerFeature") do desc = tf.NodeDescription("BoostedTreesCalculateBestGainsPerFeature") @@ -17951,7 +17952,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function unbatch_grad_graph(original_input_, batch_index_, grad_, id_; name=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function unbatch_grad_graph(original_input_, batch_index_, grad_, id_; name=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "UnbatchGrad") do desc = tf.NodeDescription("UnbatchGrad") @@ -18014,7 +18015,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function log_softmax_graph(logits_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function log_softmax_graph(logits_; name=nothing) local desc tf.with_op_name(name, "LogSoftmax") do desc = tf.NodeDescription("LogSoftmax") @@ -18052,7 +18053,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_count_up_to_graph(resource_; name=nothing, limit=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_count_up_to_graph(resource_; name=nothing, limit=nothing) local desc tf.with_op_name(name, "ResourceCountUpTo") do desc = tf.NodeDescription("ResourceCountUpTo") @@ -18094,7 +18095,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function accumulate_nv2_graph(inputs_; name=nothing, N=nothing, shape=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function accumulate_nv2_graph(inputs_; name=nothing, N=nothing, shape=nothing) local desc tf.with_op_name(name, "AccumulateNV2") do desc = tf.NodeDescription("AccumulateNV2") @@ -18144,7 +18145,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function parallel_map_dataset_graph(input_dataset_, other_arguments_, num_parallel_calls_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing, sloppy=nothing, preserve_cardinality=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function parallel_map_dataset_graph(input_dataset_, other_arguments_, num_parallel_calls_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing, sloppy=nothing, preserve_cardinality=nothing) local desc tf.with_op_name(name, "ParallelMapDataset") do desc = tf.NodeDescription("ParallelMapDataset") @@ -18230,7 +18231,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function random_uniform_graph(shape_; name=nothing, seed=nothing, seed2=nothing, dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function random_uniform_graph(shape_; name=nothing, seed=nothing, seed2=nothing, dtype=nothing) local desc tf.with_op_name(name, "RandomUniform") do desc = tf.NodeDescription("RandomUniform") @@ -18286,7 +18287,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function unicode_transcode_graph(input_; name=nothing, input_encoding=nothing, output_encoding=nothing, errors=nothing, replacement_char=nothing, replace_control_characters=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function unicode_transcode_graph(input_; name=nothing, input_encoding=nothing, output_encoding=nothing, errors=nothing, replacement_char=nothing, replace_control_characters=nothing) local desc tf.with_op_name(name, "UnicodeTranscode") do desc = tf.NodeDescription("UnicodeTranscode") @@ -18352,7 +18353,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function reader_reset_graph(reader_handle_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function reader_reset_graph(reader_handle_; name=nothing) local desc tf.with_op_name(name, "ReaderReset") do desc = tf.NodeDescription("ReaderReset") @@ -18388,7 +18389,7 @@ end Replacement node for NcclBroadcast. """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _nccl_broadcast_send_graph(input_; name=nothing, num_devices=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _nccl_broadcast_send_graph(input_; name=nothing, num_devices=nothing, shared_name=nothing) local desc tf.with_op_name(name, "_NcclBroadcastSend") do desc = tf.NodeDescription("_NcclBroadcastSend") @@ -18438,7 +18439,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_matrix_determinant_graph(input_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_matrix_determinant_graph(input_; name=nothing) local desc tf.with_op_name(name, "BatchMatrixDeterminant") do desc = tf.NodeDescription("BatchMatrixDeterminant") @@ -18476,7 +18477,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function less_equal_graph(x_, y_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function less_equal_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "LessEqual") do desc = tf.NodeDescription("LessEqual") @@ -18519,7 +18520,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function apply_gradient_descent_graph(var_, alpha_, delta_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function apply_gradient_descent_graph(var_, alpha_, delta_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ApplyGradientDescent") do desc = tf.NodeDescription("ApplyGradientDescent") @@ -18573,7 +18574,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_segment_sqrt_n_graph(data_, indices_, segment_ids_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_segment_sqrt_n_graph(data_, indices_, segment_ids_; name=nothing) local desc tf.with_op_name(name, "SparseSegmentSqrtN") do desc = tf.NodeDescription("SparseSegmentSqrtN") @@ -18622,7 +18623,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function matrix_logarithm_graph(input_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function matrix_logarithm_graph(input_; name=nothing) local desc tf.with_op_name(name, "MatrixLogarithm") do desc = tf.NodeDescription("MatrixLogarithm") @@ -18660,7 +18661,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function scatter_mul_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function scatter_mul_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ScatterMul") do desc = tf.NodeDescription("ScatterMul") @@ -18716,7 +18717,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function decode_jpeg_graph(contents_; name=nothing, channels=nothing, ratio=nothing, fancy_upscaling=nothing, try_recover_truncated=nothing, acceptable_fraction=nothing, dct_method=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function decode_jpeg_graph(contents_; name=nothing, channels=nothing, ratio=nothing, fancy_upscaling=nothing, try_recover_truncated=nothing, acceptable_fraction=nothing, dct_method=nothing) local desc tf.with_op_name(name, "DecodeJpeg") do desc = tf.NodeDescription("DecodeJpeg") @@ -18788,7 +18789,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function random_shuffle_queue_v2_graph(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, min_after_dequeue=nothing, seed=nothing, seed2=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function random_shuffle_queue_v2_graph(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, min_after_dequeue=nothing, seed=nothing, seed2=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "RandomShuffleQueueV2") do desc = tf.NodeDescription("RandomShuffleQueueV2") @@ -18868,7 +18869,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function queue_enqueue_many_v2_graph(handle_, components_; name=nothing, Tcomponents=nothing, timeout_ms=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function queue_enqueue_many_v2_graph(handle_, components_; name=nothing, Tcomponents=nothing, timeout_ms=nothing) local desc tf.with_op_name(name, "QueueEnqueueManyV2") do desc = tf.NodeDescription("QueueEnqueueManyV2") @@ -18920,7 +18921,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_sparse_apply_centered_rms_prop_graph(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_sparse_apply_centered_rms_prop_graph(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ResourceSparseApplyCenteredRMSProp") do desc = tf.NodeDescription("ResourceSparseApplyCenteredRMSProp") @@ -19007,7 +19008,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function interleave_dataset_graph(input_dataset_, other_arguments_, cycle_length_, block_length_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function interleave_dataset_graph(input_dataset_, other_arguments_, cycle_length_, block_length_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "InterleaveDataset") do desc = tf.NodeDescription("InterleaveDataset") @@ -19079,7 +19080,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function stack_pop_graph(handle_; name=nothing, elem_type=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function stack_pop_graph(handle_; name=nothing, elem_type=nothing) local desc tf.with_op_name(name, "StackPop") do desc = tf.NodeDescription("StackPop") @@ -19121,7 +19122,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function max_pool_v2_graph(input_, ksize_, strides_; name=nothing, padding=nothing, data_format=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function max_pool_v2_graph(input_, ksize_, strides_; name=nothing, padding=nothing, data_format=nothing) local desc tf.with_op_name(name, "MaxPoolV2") do desc = tf.NodeDescription("MaxPoolV2") @@ -19179,7 +19180,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function boosted_trees_deserialize_ensemble_graph(tree_ensemble_handle_, stamp_token_, tree_ensemble_serialized_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function boosted_trees_deserialize_ensemble_graph(tree_ensemble_handle_, stamp_token_, tree_ensemble_serialized_; name=nothing) local desc tf.with_op_name(name, "BoostedTreesDeserializeEnsemble") do desc = tf.NodeDescription("BoostedTreesDeserializeEnsemble") @@ -19223,7 +19224,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function load_and_remap_matrix_graph(ckpt_path_, old_tensor_name_, row_remapping_, col_remapping_, initializing_values_; name=nothing, num_rows=nothing, num_cols=nothing, max_rows_in_memory=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function load_and_remap_matrix_graph(ckpt_path_, old_tensor_name_, row_remapping_, col_remapping_, initializing_values_; name=nothing, num_rows=nothing, num_cols=nothing, max_rows_in_memory=nothing) local desc tf.with_op_name(name, "LoadAndRemapMatrix") do desc = tf.NodeDescription("LoadAndRemapMatrix") @@ -19293,7 +19294,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_apply_proximal_gradient_descent_graph(var_, alpha_, l1_, l2_, grad_, indices_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_apply_proximal_gradient_descent_graph(var_, alpha_, l1_, l2_, grad_, indices_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "SparseApplyProximalGradientDescent") do desc = tf.NodeDescription("SparseApplyProximalGradientDescent") @@ -19364,7 +19365,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function py_func_stateless_graph(input_; name=nothing, token=nothing, Tin=nothing, Tout=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function py_func_stateless_graph(input_; name=nothing, token=nothing, Tin=nothing, Tout=nothing) local desc tf.with_op_name(name, "PyFuncStateless") do desc = tf.NodeDescription("PyFuncStateless") @@ -19418,7 +19419,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function where_graph(input_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function where_graph(input_; name=nothing) local desc tf.with_op_name(name, "Where") do desc = tf.NodeDescription("Where") @@ -19456,7 +19457,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function mfcc_graph(spectrogram_, sample_rate_; name=nothing, upper_frequency_limit=nothing, lower_frequency_limit=nothing, filterbank_channel_count=nothing, dct_coefficient_count=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function mfcc_graph(spectrogram_, sample_rate_; name=nothing, upper_frequency_limit=nothing, lower_frequency_limit=nothing, filterbank_channel_count=nothing, dct_coefficient_count=nothing) local desc tf.with_op_name(name, "Mfcc") do desc = tf.NodeDescription("Mfcc") @@ -19520,7 +19521,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function check_numerics_graph(tensor_; name=nothing, message=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function check_numerics_graph(tensor_; name=nothing, message=nothing) local desc tf.with_op_name(name, "CheckNumerics") do desc = tf.NodeDescription("CheckNumerics") @@ -19564,7 +19565,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tpu_compilation_result_graph(; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tpu_compilation_result_graph(; name=nothing) local desc tf.with_op_name(name, "TPUCompilationResult") do desc @@ -19597,7 +19598,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function retrieve_tpu_embedding_stochastic_gradient_descent_parameters_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function retrieve_tpu_embedding_stochastic_gradient_descent_parameters_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "RetrieveTPUEmbeddingStochasticGradientDescentParameters") do desc = tf.NodeDescription("RetrieveTPUEmbeddingStochasticGradientDescentParameters") @@ -19653,7 +19654,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_segment_mean_grad_graph(grad_, indices_, segment_ids_, output_dim0_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_segment_mean_grad_graph(grad_, indices_, segment_ids_, output_dim0_; name=nothing) local desc tf.with_op_name(name, "SparseSegmentMeanGrad") do desc = tf.NodeDescription("SparseSegmentMeanGrad") @@ -19706,7 +19707,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function try_rpc_graph(address_, method_, request_; name=nothing, protocol=nothing, fail_fast=nothing, timeout_in_ms=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function try_rpc_graph(address_, method_, request_; name=nothing, protocol=nothing, fail_fast=nothing, timeout_in_ms=nothing) local desc tf.with_op_name(name, "TryRpc") do desc = tf.NodeDescription("TryRpc") @@ -19773,7 +19774,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_matrix_triangular_solve_graph(matrix_, rhs_; name=nothing, lower=nothing, adjoint=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_matrix_triangular_solve_graph(matrix_, rhs_; name=nothing, lower=nothing, adjoint=nothing) local desc tf.with_op_name(name, "BatchMatrixTriangularSolve") do desc = tf.NodeDescription("BatchMatrixTriangularSolve") @@ -19828,7 +19829,7 @@ end A graph node which represents a return value of a function. """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _retval_graph(input_; name=nothing, index=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _retval_graph(input_; name=nothing, index=nothing) local desc tf.with_op_name(name, "_Retval") do desc = tf.NodeDescription("_Retval") @@ -19872,7 +19873,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function unique_with_counts_graph(x_; name=nothing, out_idx=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function unique_with_counts_graph(x_; name=nothing, out_idx=nothing) local desc tf.with_op_name(name, "UniqueWithCounts") do desc = tf.NodeDescription("UniqueWithCounts") @@ -19921,7 +19922,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function add_graph(x_, y_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function add_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "Add") do desc = tf.NodeDescription("Add") @@ -19964,7 +19965,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_scan_dataset_graph(input_dataset_, initial_state_, other_arguments_; name=nothing, f=nothing, Tstate=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, preserve_cardinality=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_scan_dataset_graph(input_dataset_, initial_state_, other_arguments_; name=nothing, f=nothing, Tstate=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, preserve_cardinality=nothing) local desc tf.with_op_name(name, "ExperimentalScanDataset") do desc = tf.NodeDescription("ExperimentalScanDataset") @@ -20044,7 +20045,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function assign_add_variable_op_graph(resource_, value_; name=nothing, dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function assign_add_variable_op_graph(resource_, value_; name=nothing, dtype=nothing) local desc tf.with_op_name(name, "AssignAddVariableOp") do desc = tf.NodeDescription("AssignAddVariableOp") @@ -20092,7 +20093,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function split_v_graph(value_, size_splits_, split_dim_; name=nothing, num_split=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function split_v_graph(value_, size_splits_, split_dim_; name=nothing, num_split=nothing) local desc tf.with_op_name(name, "SplitV") do desc = tf.NodeDescription("SplitV") @@ -20152,7 +20153,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function assign_graph(ref_, value_; name=nothing, validate_shape=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function assign_graph(ref_, value_; name=nothing, validate_shape=nothing, use_locking=nothing) local desc tf.with_op_name(name, "Assign") do desc = tf.NodeDescription("Assign") @@ -20207,7 +20208,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function max_pool_with_argmax_graph(input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function max_pool_with_argmax_graph(input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing) local desc tf.with_op_name(name, "MaxPoolWithArgmax") do desc = tf.NodeDescription("MaxPoolWithArgmax") @@ -20268,7 +20269,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantized_relu_x_graph(features_, max_value_, min_features_, max_features_; name=nothing, out_type=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantized_relu_x_graph(features_, max_value_, min_features_, max_features_; name=nothing, out_type=nothing) local desc tf.with_op_name(name, "QuantizedReluX") do desc = tf.NodeDescription("QuantizedReluX") @@ -20329,7 +20330,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function random_shuffle_queue_graph(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, min_after_dequeue=nothing, seed=nothing, seed2=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function random_shuffle_queue_graph(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, min_after_dequeue=nothing, seed=nothing, seed2=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "RandomShuffleQueue") do desc = tf.NodeDescription("RandomShuffleQueue") @@ -20409,7 +20410,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fft2d_graph(input_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fft2d_graph(input_; name=nothing) local desc tf.with_op_name(name, "FFT2D") do desc = tf.NodeDescription("FFT2D") @@ -20447,7 +20448,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_thread_pool_dataset_graph(input_dataset_, thread_pool_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_thread_pool_dataset_graph(input_dataset_, thread_pool_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "ExperimentalThreadPoolDataset") do desc = tf.NodeDescription("ExperimentalThreadPoolDataset") @@ -20499,7 +20500,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_directed_interleave_dataset_graph(selector_input_dataset_, data_input_datasets_; name=nothing, output_types=nothing, output_shapes=nothing, N=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_directed_interleave_dataset_graph(selector_input_dataset_, data_input_datasets_; name=nothing, output_types=nothing, output_shapes=nothing, N=nothing) local desc tf.with_op_name(name, "ExperimentalDirectedInterleaveDataset") do desc = tf.NodeDescription("ExperimentalDirectedInterleaveDataset") @@ -20557,7 +20558,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_segment_sqrt_n_grad_graph(grad_, indices_, segment_ids_, output_dim0_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_segment_sqrt_n_grad_graph(grad_, indices_, segment_ids_, output_dim0_; name=nothing) local desc tf.with_op_name(name, "SparseSegmentSqrtNGrad") do desc = tf.NodeDescription("SparseSegmentSqrtNGrad") @@ -20610,7 +20611,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function real_graph(input_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function real_graph(input_; name=nothing) local desc tf.with_op_name(name, "Real") do desc = tf.NodeDescription("Real") @@ -20648,7 +20649,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ordered_map_unstage_graph(key_, indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ordered_map_unstage_graph(key_, indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "OrderedMapUnstage") do desc = tf.NodeDescription("OrderedMapUnstage") @@ -20718,7 +20719,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function rfft2d_graph(input_, fft_length_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function rfft2d_graph(input_, fft_length_; name=nothing) local desc tf.with_op_name(name, "RFFT2D") do desc = tf.NodeDescription("RFFT2D") @@ -20758,7 +20759,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function var_is_initialized_op_graph(resource_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function var_is_initialized_op_graph(resource_; name=nothing) local desc tf.with_op_name(name, "VarIsInitializedOp") do desc = tf.NodeDescription("VarIsInitializedOp") @@ -20794,7 +20795,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function boosted_trees_quantile_stream_resource_handle_op_graph(; name=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function boosted_trees_quantile_stream_resource_handle_op_graph(; name=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "BoostedTreesQuantileStreamResourceHandleOp") do desc = tf.NodeDescription("BoostedTreesQuantileStreamResourceHandleOp") @@ -20838,7 +20839,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function atan2_graph(y_, x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function atan2_graph(y_, x_; name=nothing) local desc tf.with_op_name(name, "Atan2") do desc = tf.NodeDescription("Atan2") @@ -20881,7 +20882,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function random_poisson_graph(shape_, rate_; name=nothing, seed=nothing, seed2=nothing, S=nothing, dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function random_poisson_graph(shape_, rate_; name=nothing, seed=nothing, seed2=nothing, S=nothing, dtype=nothing) local desc tf.with_op_name(name, "RandomPoisson") do desc = tf.NodeDescription("RandomPoisson") @@ -20949,7 +20950,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function reverse_sequence_graph(input_, seq_lengths_; name=nothing, seq_dim=nothing, batch_dim=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function reverse_sequence_graph(input_, seq_lengths_; name=nothing, seq_dim=nothing, batch_dim=nothing) local desc tf.with_op_name(name, "ReverseSequence") do desc = tf.NodeDescription("ReverseSequence") @@ -21005,7 +21006,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function outfeed_enqueue_graph(input_; name=nothing, dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function outfeed_enqueue_graph(input_; name=nothing, dtype=nothing) local desc tf.with_op_name(name, "OutfeedEnqueue") do desc = tf.NodeDescription("OutfeedEnqueue") @@ -21049,7 +21050,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sub_graph(x_, y_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sub_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "Sub") do desc = tf.NodeDescription("Sub") @@ -21092,7 +21093,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function string_split_graph(input_, delimiter_; name=nothing, skip_empty=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function string_split_graph(input_, delimiter_; name=nothing, skip_empty=nothing) local desc tf.with_op_name(name, "StringSplit") do desc = tf.NodeDescription("StringSplit") @@ -21143,7 +21144,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function cumprod_graph(x_, axis_; name=nothing, exclusive=nothing, reverse=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function cumprod_graph(x_, axis_; name=nothing, exclusive=nothing, reverse=nothing) local desc tf.with_op_name(name, "Cumprod") do desc = tf.NodeDescription("Cumprod") @@ -21200,7 +21201,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantized_resize_bilinear_graph(images_, size_, min_, max_; name=nothing, align_corners=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantized_resize_bilinear_graph(images_, size_, min_, max_; name=nothing, align_corners=nothing) local desc tf.with_op_name(name, "QuantizedResizeBilinear") do desc = tf.NodeDescription("QuantizedResizeBilinear") @@ -21261,7 +21262,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function parse_single_example_graph(serialized_, dense_defaults_; name=nothing, num_sparse=nothing, sparse_keys=nothing, dense_keys=nothing, sparse_types=nothing, Tdense=nothing, dense_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function parse_single_example_graph(serialized_, dense_defaults_; name=nothing, num_sparse=nothing, sparse_keys=nothing, dense_keys=nothing, sparse_types=nothing, Tdense=nothing, dense_shapes=nothing) local desc tf.with_op_name(name, "ParseSingleExample") do desc = tf.NodeDescription("ParseSingleExample") @@ -21342,7 +21343,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function is_variable_initialized_graph(ref_; name=nothing, dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function is_variable_initialized_graph(ref_; name=nothing, dtype=nothing) local desc tf.with_op_name(name, "IsVariableInitialized") do desc = tf.NodeDescription("IsVariableInitialized") @@ -21386,7 +21387,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_stats_aggregator_handle_graph(; name=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_stats_aggregator_handle_graph(; name=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "ExperimentalStatsAggregatorHandle") do desc = tf.NodeDescription("ExperimentalStatsAggregatorHandle") @@ -21430,7 +21431,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_list_concat_v2_graph(input_handle_, element_shape_, leading_dims_; name=nothing, element_dtype=nothing, shape_type=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_list_concat_v2_graph(input_handle_, element_shape_, leading_dims_; name=nothing, element_dtype=nothing, shape_type=nothing) local desc tf.with_op_name(name, "TensorListConcatV2") do desc = tf.NodeDescription("TensorListConcatV2") @@ -21493,7 +21494,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function cudnn_rnnv2_graph(input_, input_h_, input_c_, params_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing, is_training=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function cudnn_rnnv2_graph(input_, input_h_, input_c_, params_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing, is_training=nothing) local desc tf.with_op_name(name, "CudnnRNNV2") do desc = tf.NodeDescription("CudnnRNNV2") @@ -21593,7 +21594,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_scatter_sub_graph(resource_, indices_, updates_; name=nothing, dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_scatter_sub_graph(resource_, indices_, updates_; name=nothing, dtype=nothing) local desc tf.with_op_name(name, "ResourceScatterSub") do desc = tf.NodeDescription("ResourceScatterSub") @@ -21648,7 +21649,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function assign_add_graph(ref_, value_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function assign_add_graph(ref_, value_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "AssignAdd") do desc = tf.NodeDescription("AssignAdd") @@ -21697,7 +21698,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_dataset_graph(components_; name=nothing, Toutput_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_dataset_graph(components_; name=nothing, Toutput_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "TensorDataset") do desc = tf.NodeDescription("TensorDataset") @@ -21745,7 +21746,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function bucketize_graph(input_; name=nothing, boundaries=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function bucketize_graph(input_; name=nothing, boundaries=nothing) local desc tf.with_op_name(name, "Bucketize") do desc = tf.NodeDescription("Bucketize") @@ -21789,7 +21790,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_reduce_max_graph(input_indices_, input_values_, input_shape_, reduction_axes_; name=nothing, keep_dims=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_reduce_max_graph(input_indices_, input_values_, input_shape_, reduction_axes_; name=nothing, keep_dims=nothing) local desc tf.with_op_name(name, "SparseReduceMax") do desc = tf.NodeDescription("SparseReduceMax") @@ -21845,7 +21846,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function retrieve_tpu_embedding_mdl_adagrad_light_parameters_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function retrieve_tpu_embedding_mdl_adagrad_light_parameters_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "RetrieveTPUEmbeddingMDLAdagradLightParameters") do desc = tf.NodeDescription("RetrieveTPUEmbeddingMDLAdagradLightParameters") @@ -21906,7 +21907,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_grad_with_shape_graph(handle_, flow_in_, shape_to_prepend_; name=nothing, source=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_grad_with_shape_graph(handle_, flow_in_, shape_to_prepend_; name=nothing, source=nothing) local desc tf.with_op_name(name, "TensorArrayGradWithShape") do desc = tf.NodeDescription("TensorArrayGradWithShape") @@ -21961,7 +21962,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_close_v3_graph(handle_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_close_v3_graph(handle_; name=nothing) local desc tf.with_op_name(name, "TensorArrayCloseV3") do desc = tf.NodeDescription("TensorArrayCloseV3") @@ -21997,7 +21998,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function non_max_suppression_with_overlaps_graph(overlaps_, scores_, max_output_size_, overlap_threshold_, score_threshold_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function non_max_suppression_with_overlaps_graph(overlaps_, scores_, max_output_size_, overlap_threshold_, score_threshold_; name=nothing) local desc tf.with_op_name(name, "NonMaxSuppressionWithOverlaps") do desc = tf.NodeDescription("NonMaxSuppressionWithOverlaps") @@ -22049,7 +22050,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function pack_graph(values_; name=nothing, N=nothing, axis=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function pack_graph(values_; name=nothing, N=nothing, axis=nothing) local desc tf.with_op_name(name, "Pack") do desc = tf.NodeDescription("Pack") @@ -22105,7 +22106,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_grad_v2_graph(handle_, flow_in_; name=nothing, source=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_grad_v2_graph(handle_, flow_in_; name=nothing, source=nothing) local desc tf.with_op_name(name, "TensorArrayGradV2") do desc = tf.NodeDescription("TensorArrayGradV2") @@ -22151,7 +22152,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function assign_sub_variable_op_graph(resource_, value_; name=nothing, dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function assign_sub_variable_op_graph(resource_, value_; name=nothing, dtype=nothing) local desc tf.with_op_name(name, "AssignSubVariableOp") do desc = tf.NodeDescription("AssignSubVariableOp") @@ -22199,7 +22200,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_fft2d_graph(input_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_fft2d_graph(input_; name=nothing) local desc tf.with_op_name(name, "BatchFFT2D") do desc = tf.NodeDescription("BatchFFT2D") @@ -22235,7 +22236,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function close_summary_writer_graph(writer_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function close_summary_writer_graph(writer_; name=nothing) local desc tf.with_op_name(name, "CloseSummaryWriter") do desc = tf.NodeDescription("CloseSummaryWriter") @@ -22271,7 +22272,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function rank_graph(input_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function rank_graph(input_; name=nothing) local desc tf.with_op_name(name, "Rank") do desc = tf.NodeDescription("Rank") @@ -22309,7 +22310,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fft3d_graph(input_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fft3d_graph(input_; name=nothing) local desc tf.with_op_name(name, "FFT3D") do desc = tf.NodeDescription("FFT3D") @@ -22347,7 +22348,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function apply_ftrl_graph(var_, accum_, linear_, grad_, lr_, l1_, l2_, lr_power_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function apply_ftrl_graph(var_, accum_, linear_, grad_, lr_, l1_, l2_, lr_power_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ApplyFtrl") do desc = tf.NodeDescription("ApplyFtrl") @@ -22426,7 +22427,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function abort_graph(; name=nothing, error_msg=nothing, exit_without_error=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function abort_graph(; name=nothing, error_msg=nothing, exit_without_error=nothing) local desc tf.with_op_name(name, "Abort") do desc = tf.NodeDescription("Abort") @@ -22470,7 +22471,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function audio_spectrogram_graph(input_; name=nothing, window_size=nothing, stride=nothing, magnitude_squared=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function audio_spectrogram_graph(input_; name=nothing, window_size=nothing, stride=nothing, magnitude_squared=nothing) local desc tf.with_op_name(name, "AudioSpectrogram") do desc = tf.NodeDescription("AudioSpectrogram") @@ -22524,7 +22525,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function variable_shape_graph(input_; name=nothing, out_type=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function variable_shape_graph(input_; name=nothing, out_type=nothing) local desc tf.with_op_name(name, "VariableShape") do desc = tf.NodeDescription("VariableShape") @@ -22566,7 +22567,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fifo_queue_v2_graph(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fifo_queue_v2_graph(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "FIFOQueueV2") do desc = tf.NodeDescription("FIFOQueueV2") @@ -22628,7 +22629,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function variable_graph(; name=nothing, shape=nothing, dtype=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function variable_graph(; name=nothing, shape=nothing, dtype=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "Variable") do desc = tf.NodeDescription("Variable") @@ -22684,7 +22685,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_forest_create_tree_variable_graph(tree_handle_, tree_config_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_forest_create_tree_variable_graph(tree_handle_, tree_config_; name=nothing) local desc tf.with_op_name(name, "TensorForestCreateTreeVariable") do desc = tf.NodeDescription("TensorForestCreateTreeVariable") @@ -22724,7 +22725,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function max_pool_grad_with_argmax_graph(input_, grad_, argmax_; name=nothing, ksize=nothing, strides=nothing, padding=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function max_pool_grad_with_argmax_graph(input_, grad_, argmax_; name=nothing, ksize=nothing, strides=nothing, padding=nothing) local desc tf.with_op_name(name, "MaxPoolGradWithArgmax") do desc = tf.NodeDescription("MaxPoolGradWithArgmax") @@ -22791,7 +22792,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ref_switch_graph(data_, pred_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ref_switch_graph(data_, pred_; name=nothing) local desc tf.with_op_name(name, "RefSwitch") do desc = tf.NodeDescription("RefSwitch") @@ -22838,7 +22839,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sdca_fprint_graph(input_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sdca_fprint_graph(input_; name=nothing) local desc tf.with_op_name(name, "SdcaFprint") do desc = tf.NodeDescription("SdcaFprint") @@ -22874,7 +22875,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_choose_fastest_dataset_graph(input_datasets_; name=nothing, N=nothing, num_experiments=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_choose_fastest_dataset_graph(input_datasets_; name=nothing, N=nothing, num_experiments=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "ExperimentalChooseFastestDataset") do desc = tf.NodeDescription("ExperimentalChooseFastestDataset") @@ -22934,7 +22935,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function leaky_relu_graph(features_; name=nothing, alpha=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function leaky_relu_graph(features_; name=nothing, alpha=nothing) local desc tf.with_op_name(name, "LeakyRelu") do desc = tf.NodeDescription("LeakyRelu") @@ -22978,7 +22979,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function identity_n_graph(input_; name=nothing, T=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function identity_n_graph(input_; name=nothing, T=nothing) local desc tf.with_op_name(name, "IdentityN") do desc = tf.NodeDescription("IdentityN") @@ -23020,7 +23021,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function cudnn_rnn_backprop_v2_graph(input_, input_h_, input_c_, params_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_, host_reserved_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function cudnn_rnn_backprop_v2_graph(input_, input_h_, input_c_, params_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_, host_reserved_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) local desc tf.with_op_name(name, "CudnnRNNBackpropV2") do desc = tf.NodeDescription("CudnnRNNBackpropV2") @@ -23153,7 +23154,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function requantization_range_graph(input_, input_min_, input_max_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function requantization_range_graph(input_, input_min_, input_max_; name=nothing) local desc tf.with_op_name(name, "RequantizationRange") do desc = tf.NodeDescription("RequantizationRange") @@ -23204,7 +23205,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function maximum_graph(x_, y_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function maximum_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "Maximum") do desc = tf.NodeDescription("Maximum") @@ -23247,7 +23248,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function reshape_graph(tensor_, shape_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function reshape_graph(tensor_, shape_; name=nothing) local desc tf.with_op_name(name, "Reshape") do desc = tf.NodeDescription("Reshape") @@ -23291,7 +23292,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function matrix_solve_ls_graph(matrix_, rhs_, l2_regularizer_; name=nothing, fast=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function matrix_solve_ls_graph(matrix_, rhs_, l2_regularizer_; name=nothing, fast=nothing) local desc tf.with_op_name(name, "MatrixSolveLs") do desc = tf.NodeDescription("MatrixSolveLs") @@ -23344,7 +23345,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tf_record_dataset_graph(filenames_, compression_type_, buffer_size_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tf_record_dataset_graph(filenames_, compression_type_, buffer_size_; name=nothing) local desc tf.with_op_name(name, "TFRecordDataset") do desc = tf.NodeDescription("TFRecordDataset") @@ -23388,7 +23389,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function boosted_trees_example_debug_outputs_graph(tree_ensemble_handle_, bucketized_features_; name=nothing, num_bucketized_features=nothing, logits_dimension=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function boosted_trees_example_debug_outputs_graph(tree_ensemble_handle_, bucketized_features_; name=nothing, num_bucketized_features=nothing, logits_dimension=nothing) local desc tf.with_op_name(name, "BoostedTreesExampleDebugOutputs") do desc = tf.NodeDescription("BoostedTreesExampleDebugOutputs") @@ -23440,7 +23441,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function hsv_to_rgb_graph(images_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function hsv_to_rgb_graph(images_; name=nothing) local desc tf.with_op_name(name, "HSVToRGB") do desc = tf.NodeDescription("HSVToRGB") @@ -23478,7 +23479,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_max_intra_op_parallelism_dataset_graph(input_dataset_, max_intra_op_parallelism_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_max_intra_op_parallelism_dataset_graph(input_dataset_, max_intra_op_parallelism_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "ExperimentalMaxIntraOpParallelismDataset") do desc = tf.NodeDescription("ExperimentalMaxIntraOpParallelismDataset") @@ -23530,7 +23531,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function scatter_div_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function scatter_div_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ScatterDiv") do desc = tf.NodeDescription("ScatterDiv") @@ -23586,7 +23587,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function decode_wav_graph(contents_; name=nothing, desired_channels=nothing, desired_samples=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function decode_wav_graph(contents_; name=nothing, desired_channels=nothing, desired_samples=nothing) local desc tf.with_op_name(name, "DecodeWav") do desc = tf.NodeDescription("DecodeWav") @@ -23639,7 +23640,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function log_graph(x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function log_graph(x_; name=nothing) local desc tf.with_op_name(name, "Log") do desc = tf.NodeDescription("Log") @@ -23677,7 +23678,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function save_v2_graph(prefix_, tensor_names_, shape_and_slices_, tensors_; name=nothing, dtypes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function save_v2_graph(prefix_, tensor_names_, shape_and_slices_, tensors_; name=nothing, dtypes=nothing) local desc tf.with_op_name(name, "SaveV2") do desc = tf.NodeDescription("SaveV2") @@ -23731,7 +23732,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function deep_copy_graph(x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function deep_copy_graph(x_; name=nothing) local desc tf.with_op_name(name, "DeepCopy") do desc = tf.NodeDescription("DeepCopy") @@ -23769,7 +23770,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function model_dataset_graph(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function model_dataset_graph(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "ModelDataset") do desc = tf.NodeDescription("ModelDataset") @@ -23817,7 +23818,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function parse_sequence_example_graph(serialized_, debug_name_, context_dense_defaults_; name=nothing, feature_list_dense_missing_assumed_empty=nothing, context_sparse_keys=nothing, context_dense_keys=nothing, feature_list_sparse_keys=nothing, feature_list_dense_keys=nothing, Ncontext_sparse=nothing, Ncontext_dense=nothing, Nfeature_list_sparse=nothing, Nfeature_list_dense=nothing, context_sparse_types=nothing, Tcontext_dense=nothing, feature_list_dense_types=nothing, context_dense_shapes=nothing, feature_list_sparse_types=nothing, feature_list_dense_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function parse_sequence_example_graph(serialized_, debug_name_, context_dense_defaults_; name=nothing, feature_list_dense_missing_assumed_empty=nothing, context_sparse_keys=nothing, context_dense_keys=nothing, feature_list_sparse_keys=nothing, feature_list_dense_keys=nothing, Ncontext_sparse=nothing, Ncontext_dense=nothing, Nfeature_list_sparse=nothing, Nfeature_list_dense=nothing, context_sparse_types=nothing, Tcontext_dense=nothing, feature_list_dense_types=nothing, context_dense_shapes=nothing, feature_list_sparse_types=nothing, feature_list_dense_shapes=nothing) local desc tf.with_op_name(name, "ParseSequenceExample") do desc = tf.NodeDescription("ParseSequenceExample") @@ -23956,7 +23957,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sinh_graph(x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sinh_graph(x_; name=nothing) local desc tf.with_op_name(name, "Sinh") do desc = tf.NodeDescription("Sinh") @@ -23994,7 +23995,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function iterator_v2_graph(; name=nothing, shared_name=nothing, container=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function iterator_v2_graph(; name=nothing, shared_name=nothing, container=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "IteratorV2") do desc = tf.NodeDescription("IteratorV2") @@ -24050,7 +24051,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_write_v2_graph(handle_, index_, value_, flow_in_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_write_v2_graph(handle_, index_, value_, flow_in_; name=nothing) local desc tf.with_op_name(name, "TensorArrayWriteV2") do desc = tf.NodeDescription("TensorArrayWriteV2") @@ -24100,7 +24101,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_list_element_shape_graph(input_handle_; name=nothing, shape_type=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_list_element_shape_graph(input_handle_; name=nothing, shape_type=nothing) local desc tf.with_op_name(name, "TensorListElementShape") do desc = tf.NodeDescription("TensorListElementShape") @@ -24142,7 +24143,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function queue_size_v2_graph(handle_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function queue_size_v2_graph(handle_; name=nothing) local desc tf.with_op_name(name, "QueueSizeV2") do desc = tf.NodeDescription("QueueSizeV2") @@ -24178,7 +24179,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function expm1_graph(x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function expm1_graph(x_; name=nothing) local desc tf.with_op_name(name, "Expm1") do desc = tf.NodeDescription("Expm1") @@ -24216,7 +24217,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_matrix_band_part_graph(input_, num_lower_, num_upper_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_matrix_band_part_graph(input_, num_lower_, num_upper_; name=nothing) local desc tf.with_op_name(name, "BatchMatrixBandPart") do desc = tf.NodeDescription("BatchMatrixBandPart") @@ -24262,7 +24263,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function concatenate_dataset_graph(input_dataset_, another_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function concatenate_dataset_graph(input_dataset_, another_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "ConcatenateDataset") do desc = tf.NodeDescription("ConcatenateDataset") @@ -24314,7 +24315,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function decode_gif_graph(contents_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function decode_gif_graph(contents_; name=nothing) local desc tf.with_op_name(name, "DecodeGif") do desc = tf.NodeDescription("DecodeGif") @@ -24350,7 +24351,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tpu_replicate_graph(inputs_, broadcast_inputs_, variables_, guaranteed_constants_; name=nothing, computation=nothing, num_replicas=nothing, num_cores_per_replica=nothing, topology=nothing, use_tpu=nothing, device_assignment=nothing, host_compute_core=nothing, Tinputs=nothing, Tbroadcast_inputs=nothing, NumVariables=nothing, Tguaranteed_constants=nothing, output_types=nothing, padding_map=nothing, step_marker_location=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tpu_replicate_graph(inputs_, broadcast_inputs_, variables_, guaranteed_constants_; name=nothing, computation=nothing, num_replicas=nothing, num_cores_per_replica=nothing, topology=nothing, use_tpu=nothing, device_assignment=nothing, host_compute_core=nothing, Tinputs=nothing, Tbroadcast_inputs=nothing, NumVariables=nothing, Tguaranteed_constants=nothing, output_types=nothing, padding_map=nothing, step_marker_location=nothing) local desc tf.with_op_name(name, "TPUReplicate") do desc = tf.NodeDescription("TPUReplicate") @@ -24482,7 +24483,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_self_adjoint_eig_v2_graph(input_; name=nothing, compute_v=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_self_adjoint_eig_v2_graph(input_; name=nothing, compute_v=nothing) local desc tf.with_op_name(name, "BatchSelfAdjointEigV2") do desc = tf.NodeDescription("BatchSelfAdjointEigV2") @@ -24531,7 +24532,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function shape_graph(input_; name=nothing, out_type=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function shape_graph(input_; name=nothing, out_type=nothing) local desc tf.with_op_name(name, "Shape") do desc = tf.NodeDescription("Shape") @@ -24575,7 +24576,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function repeat_dataset_graph(input_dataset_, count_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function repeat_dataset_graph(input_dataset_, count_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "RepeatDataset") do desc = tf.NodeDescription("RepeatDataset") @@ -24627,7 +24628,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function crop_and_resize_grad_boxes_graph(grads_, image_, boxes_, box_ind_; name=nothing, method=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function crop_and_resize_grad_boxes_graph(grads_, image_, boxes_, box_ind_; name=nothing, method=nothing) local desc tf.with_op_name(name, "CropAndResizeGradBoxes") do desc = tf.NodeDescription("CropAndResizeGradBoxes") @@ -24683,7 +24684,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function reciprocal_grad_graph(y_, dy_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function reciprocal_grad_graph(y_, dy_; name=nothing) local desc tf.with_op_name(name, "ReciprocalGrad") do desc = tf.NodeDescription("ReciprocalGrad") @@ -24726,7 +24727,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_matrix_solve_graph(matrix_, rhs_; name=nothing, adjoint=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_matrix_solve_graph(matrix_, rhs_; name=nothing, adjoint=nothing) local desc tf.with_op_name(name, "BatchMatrixSolve") do desc = tf.NodeDescription("BatchMatrixSolve") @@ -24775,7 +24776,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function mutable_hash_table_v2_graph(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function mutable_hash_table_v2_graph(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing) local desc tf.with_op_name(name, "MutableHashTableV2") do desc = tf.NodeDescription("MutableHashTableV2") @@ -24837,7 +24838,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function exit_graph(data_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function exit_graph(data_; name=nothing) local desc tf.with_op_name(name, "Exit") do desc = tf.NodeDescription("Exit") @@ -24875,7 +24876,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function lrn_graph(input_; name=nothing, depth_radius=nothing, bias=nothing, alpha=nothing, beta=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function lrn_graph(input_; name=nothing, depth_radius=nothing, bias=nothing, alpha=nothing, beta=nothing) local desc tf.with_op_name(name, "LRN") do desc = tf.NodeDescription("LRN") @@ -24937,7 +24938,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function stateless_if_graph(cond_, input_; name=nothing, Tin=nothing, Tout=nothing, then_branch=nothing, else_branch=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function stateless_if_graph(cond_, input_; name=nothing, Tin=nothing, Tout=nothing, then_branch=nothing, else_branch=nothing) local desc tf.with_op_name(name, "StatelessIf") do desc = tf.NodeDescription("StatelessIf") @@ -25003,7 +25004,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_list_set_item_graph(input_handle_, index_, item_; name=nothing, element_dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_list_set_item_graph(input_handle_, index_, item_; name=nothing, element_dtype=nothing) local desc tf.with_op_name(name, "TensorListSetItem") do desc = tf.NodeDescription("TensorListSetItem") @@ -25055,7 +25056,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function rsqrt_graph(x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function rsqrt_graph(x_; name=nothing) local desc tf.with_op_name(name, "Rsqrt") do desc = tf.NodeDescription("Rsqrt") @@ -25093,7 +25094,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantized_conv2d_with_bias_sum_and_relu_and_requantize_graph(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_, summand_, min_summand_, max_summand_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantized_conv2d_with_bias_sum_and_relu_and_requantize_graph(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_, summand_, min_summand_, max_summand_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) local desc tf.with_op_name(name, "QuantizedConv2DWithBiasSumAndReluAndRequantize") do desc = tf.NodeDescription("QuantizedConv2DWithBiasSumAndReluAndRequantize") @@ -25210,7 +25211,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function delete_session_tensor_graph(handle_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function delete_session_tensor_graph(handle_; name=nothing) local desc tf.with_op_name(name, "DeleteSessionTensor") do desc = tf.NodeDescription("DeleteSessionTensor") @@ -25246,7 +25247,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function one_hot_graph(indices_, depth_, on_value_, off_value_; name=nothing, axis=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function one_hot_graph(indices_, depth_, on_value_, off_value_; name=nothing, axis=nothing) local desc tf.with_op_name(name, "OneHot") do desc = tf.NodeDescription("OneHot") @@ -25312,7 +25313,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_apply_ftrl_graph(var_, accum_, linear_, grad_, lr_, l1_, l2_, lr_power_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_apply_ftrl_graph(var_, accum_, linear_, grad_, lr_, l1_, l2_, lr_power_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ResourceApplyFtrl") do desc = tf.NodeDescription("ResourceApplyFtrl") @@ -25388,7 +25389,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sdca_optimizer_v2_graph(sparse_example_indices_, sparse_feature_indices_, sparse_feature_values_, dense_features_, example_weights_, example_labels_, sparse_indices_, sparse_weights_, dense_weights_, example_state_data_; name=nothing, loss_type=nothing, adaptive=nothing, num_sparse_features=nothing, num_sparse_features_with_values=nothing, num_dense_features=nothing, l1=nothing, l2=nothing, num_loss_partitions=nothing, num_inner_iterations=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sdca_optimizer_v2_graph(sparse_example_indices_, sparse_feature_indices_, sparse_feature_values_, dense_features_, example_weights_, example_labels_, sparse_indices_, sparse_weights_, dense_weights_, example_state_data_; name=nothing, loss_type=nothing, adaptive=nothing, num_sparse_features=nothing, num_sparse_features_with_values=nothing, num_dense_features=nothing, l1=nothing, l2=nothing, num_loss_partitions=nothing, num_inner_iterations=nothing) local desc tf.with_op_name(name, "SdcaOptimizerV2") do desc = tf.NodeDescription("SdcaOptimizerV2") @@ -25519,7 +25520,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function queue_enqueue_graph(handle_, components_; name=nothing, Tcomponents=nothing, timeout_ms=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function queue_enqueue_graph(handle_, components_; name=nothing, Tcomponents=nothing, timeout_ms=nothing) local desc tf.with_op_name(name, "QueueEnqueue") do desc = tf.NodeDescription("QueueEnqueue") @@ -25571,7 +25572,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function conditional_accumulator_graph(; name=nothing, dtype=nothing, shape=nothing, container=nothing, shared_name=nothing, reduction_type=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function conditional_accumulator_graph(; name=nothing, dtype=nothing, shape=nothing, container=nothing, shared_name=nothing, reduction_type=nothing) local desc tf.with_op_name(name, "ConditionalAccumulator") do desc = tf.NodeDescription("ConditionalAccumulator") @@ -25633,7 +25634,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ctc_beam_search_decoder_graph(inputs_, sequence_length_; name=nothing, beam_width=nothing, top_paths=nothing, merge_repeated=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ctc_beam_search_decoder_graph(inputs_, sequence_length_; name=nothing, beam_width=nothing, top_paths=nothing, merge_repeated=nothing) local desc tf.with_op_name(name, "CTCBeamSearchDecoder") do desc = tf.NodeDescription("CTCBeamSearchDecoder") @@ -25696,7 +25697,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function whole_file_reader_graph(; name=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function whole_file_reader_graph(; name=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "WholeFileReader") do desc = tf.NodeDescription("WholeFileReader") @@ -25740,7 +25741,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function apply_rms_prop_graph(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function apply_rms_prop_graph(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ApplyRMSProp") do desc = tf.NodeDescription("ApplyRMSProp") @@ -25819,7 +25820,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function adjust_saturation_graph(images_, scale_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function adjust_saturation_graph(images_, scale_; name=nothing) local desc tf.with_op_name(name, "AdjustSaturation") do desc = tf.NodeDescription("AdjustSaturation") @@ -25861,7 +25862,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function lookup_table_remove_v2_graph(table_handle_, keys_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function lookup_table_remove_v2_graph(table_handle_, keys_; name=nothing) local desc tf.with_op_name(name, "LookupTableRemoveV2") do desc = tf.NodeDescription("LookupTableRemoveV2") @@ -25903,7 +25904,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function queue_close_graph(handle_; name=nothing, cancel_pending_enqueues=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function queue_close_graph(handle_; name=nothing, cancel_pending_enqueues=nothing) local desc tf.with_op_name(name, "QueueClose") do desc = tf.NodeDescription("QueueClose") @@ -25945,7 +25946,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function prefetch_dataset_graph(input_dataset_, buffer_size_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function prefetch_dataset_graph(input_dataset_, buffer_size_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "PrefetchDataset") do desc = tf.NodeDescription("PrefetchDataset") @@ -25997,7 +25998,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function map_dataset_graph(input_dataset_, other_arguments_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing, preserve_cardinality=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function map_dataset_graph(input_dataset_, other_arguments_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing, preserve_cardinality=nothing) local desc tf.with_op_name(name, "MapDataset") do desc = tf.NodeDescription("MapDataset") @@ -26073,7 +26074,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantized_conv2d_with_bias_graph(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantized_conv2d_with_bias_graph(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) local desc tf.with_op_name(name, "QuantizedConv2DWithBias") do desc = tf.NodeDescription("QuantizedConv2DWithBias") @@ -26166,7 +26167,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_read_v3_graph(handle_, index_, flow_in_; name=nothing, dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_read_v3_graph(handle_, index_, flow_in_; name=nothing, dtype=nothing) local desc tf.with_op_name(name, "TensorArrayReadV3") do desc = tf.NodeDescription("TensorArrayReadV3") @@ -26216,7 +26217,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function identity_graph(input_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function identity_graph(input_; name=nothing) local desc tf.with_op_name(name, "Identity") do desc = tf.NodeDescription("Identity") @@ -26254,7 +26255,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function print_graph(input_, data_; name=nothing, U=nothing, message=nothing, first_n=nothing, summarize=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function print_graph(input_, data_; name=nothing, U=nothing, message=nothing, first_n=nothing, summarize=nothing) local desc tf.with_op_name(name, "Print") do desc = tf.NodeDescription("Print") @@ -26320,7 +26321,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function collective_bcast_send_graph(input_; name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, shape=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function collective_bcast_send_graph(input_; name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, shape=nothing) local desc tf.with_op_name(name, "CollectiveBcastSend") do desc = tf.NodeDescription("CollectiveBcastSend") @@ -26382,7 +26383,7 @@ end Converts a list of tensors to an array of tensors. """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _list_to_array_graph(input_; name=nothing, Tin=nothing, N=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _list_to_array_graph(input_; name=nothing, Tin=nothing, N=nothing) local desc tf.with_op_name(name, "_ListToArray") do desc = tf.NodeDescription("_ListToArray") @@ -26435,7 +26436,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function neg_train_graph(w_in_, w_out_, examples_, labels_, lr_; name=nothing, vocab_count=nothing, num_negative_samples=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function neg_train_graph(w_in_, w_out_, examples_, labels_, lr_; name=nothing, vocab_count=nothing, num_negative_samples=nothing) local desc tf.with_op_name(name, "NegTrain") do desc = tf.NodeDescription("NegTrain") @@ -26499,7 +26500,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function worker_heartbeat_graph(request_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function worker_heartbeat_graph(request_; name=nothing) local desc tf.with_op_name(name, "WorkerHeartbeat") do desc = tf.NodeDescription("WorkerHeartbeat") @@ -26535,7 +26536,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function merge_v2checkpoints_graph(checkpoint_prefixes_, destination_prefix_; name=nothing, delete_old_dirs=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function merge_v2checkpoints_graph(checkpoint_prefixes_, destination_prefix_; name=nothing, delete_old_dirs=nothing) local desc tf.with_op_name(name, "MergeV2Checkpoints") do desc = tf.NodeDescription("MergeV2Checkpoints") @@ -26581,7 +26582,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function collective_permute_graph(input_, source_target_pairs_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function collective_permute_graph(input_, source_target_pairs_; name=nothing) local desc tf.with_op_name(name, "CollectivePermute") do desc = tf.NodeDescription("CollectivePermute") @@ -26623,7 +26624,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantize_and_dequantize_v3_graph(input_, input_min_, input_max_, num_bits_; name=nothing, signed_input=nothing, range_given=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantize_and_dequantize_v3_graph(input_, input_min_, input_max_, num_bits_; name=nothing, signed_input=nothing, range_given=nothing) local desc tf.with_op_name(name, "QuantizeAndDequantizeV3") do desc = tf.NodeDescription("QuantizeAndDequantizeV3") @@ -26687,7 +26688,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function hash_table_graph(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function hash_table_graph(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing) local desc tf.with_op_name(name, "HashTable") do desc = tf.NodeDescription("HashTable") @@ -26749,7 +26750,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function softplus_grad_graph(gradients_, features_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function softplus_grad_graph(gradients_, features_; name=nothing) local desc tf.with_op_name(name, "SoftplusGrad") do desc = tf.NodeDescription("SoftplusGrad") @@ -26792,7 +26793,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fixed_length_record_reader_graph(; name=nothing, header_bytes=nothing, record_bytes=nothing, footer_bytes=nothing, hop_bytes=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fixed_length_record_reader_graph(; name=nothing, header_bytes=nothing, record_bytes=nothing, footer_bytes=nothing, hop_bytes=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "FixedLengthRecordReader") do desc = tf.NodeDescription("FixedLengthRecordReader") @@ -26860,7 +26861,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_scatter_v2_graph(handle_, indices_, value_, flow_in_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_scatter_v2_graph(handle_, indices_, value_, flow_in_; name=nothing) local desc tf.with_op_name(name, "TensorArrayScatterV2") do desc = tf.NodeDescription("TensorArrayScatterV2") @@ -26910,7 +26911,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function decode_json_example_graph(json_examples_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function decode_json_example_graph(json_examples_; name=nothing) local desc tf.with_op_name(name, "DecodeJSONExample") do desc = tf.NodeDescription("DecodeJSONExample") @@ -26946,7 +26947,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fused_batch_norm_grad_v2_graph(y_backprop_, x_, scale_, reserve_space_1_, reserve_space_2_; name=nothing, U=nothing, epsilon=nothing, data_format=nothing, is_training=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fused_batch_norm_grad_v2_graph(y_backprop_, x_, scale_, reserve_space_1_, reserve_space_2_; name=nothing, U=nothing, epsilon=nothing, data_format=nothing, is_training=nothing) local desc tf.with_op_name(name, "FusedBatchNormGradV2") do desc = tf.NodeDescription("FusedBatchNormGradV2") @@ -27033,7 +27034,7 @@ end Cast x of type SrcT to y of DstT. """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _host_cast_graph(x_; name=nothing, SrcT=nothing, DstT=nothing, Truncate=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _host_cast_graph(x_; name=nothing, SrcT=nothing, DstT=nothing, Truncate=nothing) local desc tf.with_op_name(name, "_HostCast") do desc = tf.NodeDescription("_HostCast") @@ -27089,7 +27090,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tf_record_reader_graph(; name=nothing, container=nothing, shared_name=nothing, compression_type=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tf_record_reader_graph(; name=nothing, container=nothing, shared_name=nothing, compression_type=nothing) local desc tf.with_op_name(name, "TFRecordReader") do desc = tf.NodeDescription("TFRecordReader") @@ -27139,7 +27140,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function while__graph(input_; name=nothing, T=nothing, cond=nothing, body=nothing, output_shapes=nothing, parallel_iterations=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function while__graph(input_; name=nothing, T=nothing, cond=nothing, body=nothing, output_shapes=nothing, parallel_iterations=nothing) local desc tf.with_op_name(name, "While") do desc = tf.NodeDescription("While") @@ -27205,7 +27206,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function stateless_multinomial_graph(logits_, num_samples_, seed_; name=nothing, output_dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function stateless_multinomial_graph(logits_, num_samples_, seed_; name=nothing, output_dtype=nothing) local desc tf.with_op_name(name, "StatelessMultinomial") do desc = tf.NodeDescription("StatelessMultinomial") @@ -27259,7 +27260,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function scatter_add_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function scatter_add_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ScatterAdd") do desc = tf.NodeDescription("ScatterAdd") @@ -27315,7 +27316,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function conj_graph(input_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function conj_graph(input_; name=nothing) local desc tf.with_op_name(name, "Conj") do desc = tf.NodeDescription("Conj") @@ -27353,7 +27354,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function parallel_dynamic_stitch_graph(indices_, data_; name=nothing, N=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function parallel_dynamic_stitch_graph(indices_, data_; name=nothing, N=nothing) local desc tf.with_op_name(name, "ParallelDynamicStitch") do desc = tf.NodeDescription("ParallelDynamicStitch") @@ -27401,7 +27402,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function make_iterator_graph(dataset_, iterator_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function make_iterator_graph(dataset_, iterator_; name=nothing) local desc tf.with_op_name(name, "MakeIterator") do desc = tf.NodeDescription("MakeIterator") @@ -27441,7 +27442,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function rfft3d_graph(input_, fft_length_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function rfft3d_graph(input_, fft_length_; name=nothing) local desc tf.with_op_name(name, "RFFT3D") do desc = tf.NodeDescription("RFFT3D") @@ -27481,7 +27482,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_reduce_sum_sparse_graph(input_indices_, input_values_, input_shape_, reduction_axes_; name=nothing, keep_dims=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_reduce_sum_sparse_graph(input_indices_, input_values_, input_shape_, reduction_axes_; name=nothing, keep_dims=nothing) local desc tf.with_op_name(name, "SparseReduceSumSparse") do desc = tf.NodeDescription("SparseReduceSumSparse") @@ -27542,7 +27543,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function collective_gather_graph(input_; name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, shape=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function collective_gather_graph(input_; name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, shape=nothing) local desc tf.with_op_name(name, "CollectiveGather") do desc = tf.NodeDescription("CollectiveGather") @@ -27604,7 +27605,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function combined_non_max_suppression_graph(boxes_, scores_, max_output_size_per_class_, max_total_size_, iou_threshold_, score_threshold_; name=nothing, pad_per_class=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function combined_non_max_suppression_graph(boxes_, scores_, max_output_size_per_class_, max_total_size_, iou_threshold_, score_threshold_; name=nothing, pad_per_class=nothing) local desc tf.with_op_name(name, "CombinedNonMaxSuppression") do desc = tf.NodeDescription("CombinedNonMaxSuppression") @@ -27671,7 +27672,7 @@ end Allocates a mutable tensor that becomes available to appropriately annotated """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _scoped_allocator_graph(; name=nothing, shapes=nothing, shape=nothing, sa_name=nothing, id=nothing, expected_call_count=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _scoped_allocator_graph(; name=nothing, shapes=nothing, shape=nothing, sa_name=nothing, id=nothing, expected_call_count=nothing) local desc tf.with_op_name(name, "_ScopedAllocator") do desc = tf.NodeDescription("_ScopedAllocator") @@ -27733,7 +27734,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function load_tpu_embedding_adadelta_parameters_graph(parameters_, accumulators_, updates_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function load_tpu_embedding_adadelta_parameters_graph(parameters_, accumulators_, updates_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "LoadTPUEmbeddingAdadeltaParameters") do desc = tf.NodeDescription("LoadTPUEmbeddingAdadeltaParameters") @@ -27801,7 +27802,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_add_graph(a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_, thresh_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_add_graph(a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_, thresh_; name=nothing) local desc tf.with_op_name(name, "SparseAdd") do desc = tf.NodeDescription("SparseAdd") @@ -27871,7 +27872,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ctc_greedy_decoder_graph(inputs_, sequence_length_; name=nothing, merge_repeated=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ctc_greedy_decoder_graph(inputs_, sequence_length_; name=nothing, merge_repeated=nothing) local desc tf.with_op_name(name, "CTCGreedyDecoder") do desc = tf.NodeDescription("CTCGreedyDecoder") @@ -27922,7 +27923,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function immutable_const_graph(; name=nothing, dtype=nothing, shape=nothing, memory_region_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function immutable_const_graph(; name=nothing, dtype=nothing, shape=nothing, memory_region_name=nothing) local desc tf.with_op_name(name, "ImmutableConst") do desc = tf.NodeDescription("ImmutableConst") @@ -27972,7 +27973,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function consume_mutex_lock_graph(mutex_lock_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function consume_mutex_lock_graph(mutex_lock_; name=nothing) local desc tf.with_op_name(name, "ConsumeMutexLock") do desc = tf.NodeDescription("ConsumeMutexLock") @@ -28008,7 +28009,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function greater_equal_graph(x_, y_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function greater_equal_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "GreaterEqual") do desc = tf.NodeDescription("GreaterEqual") @@ -28051,7 +28052,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function initialize_table_from_text_file_v2_graph(table_handle_, filename_; name=nothing, key_index=nothing, value_index=nothing, vocab_size=nothing, delimiter=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function initialize_table_from_text_file_v2_graph(table_handle_, filename_; name=nothing, key_index=nothing, value_index=nothing, vocab_size=nothing, delimiter=nothing) local desc tf.with_op_name(name, "InitializeTableFromTextFileV2") do desc = tf.NodeDescription("InitializeTableFromTextFileV2") @@ -28115,7 +28116,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function queue_dequeue_graph(handle_; name=nothing, component_types=nothing, timeout_ms=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function queue_dequeue_graph(handle_; name=nothing, component_types=nothing, timeout_ms=nothing) local desc tf.with_op_name(name, "QueueDequeue") do desc = tf.NodeDescription("QueueDequeue") @@ -28163,7 +28164,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function equal_graph(x_, y_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function equal_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "Equal") do desc = tf.NodeDescription("Equal") @@ -28206,7 +28207,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function iterator_from_string_handle_graph(string_handle_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function iterator_from_string_handle_graph(string_handle_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "IteratorFromStringHandle") do desc = tf.NodeDescription("IteratorFromStringHandle") @@ -28254,7 +28255,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_list_split_graph(tensor_, element_shape_, lengths_; name=nothing, element_dtype=nothing, shape_type=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_list_split_graph(tensor_, element_shape_, lengths_; name=nothing, element_dtype=nothing, shape_type=nothing) local desc tf.with_op_name(name, "TensorListSplit") do desc = tf.NodeDescription("TensorListSplit") @@ -28314,7 +28315,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fractional_max_pool_graph(value_; name=nothing, pooling_ratio=nothing, pseudo_random=nothing, overlapping=nothing, deterministic=nothing, seed=nothing, seed2=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fractional_max_pool_graph(value_; name=nothing, pooling_ratio=nothing, pseudo_random=nothing, overlapping=nothing, deterministic=nothing, seed=nothing, seed2=nothing) local desc tf.with_op_name(name, "FractionalMaxPool") do desc = tf.NodeDescription("FractionalMaxPool") @@ -28393,7 +28394,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function scatter_nd_graph(indices_, updates_, shape_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function scatter_nd_graph(indices_, updates_, shape_; name=nothing) local desc tf.with_op_name(name, "ScatterNd") do desc = tf.NodeDescription("ScatterNd") @@ -28443,7 +28444,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_list_scatter_into_existing_list_graph(input_handle_, tensor_, indices_; name=nothing, element_dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_list_scatter_into_existing_list_graph(input_handle_, tensor_, indices_; name=nothing, element_dtype=nothing) local desc tf.with_op_name(name, "TensorListScatterIntoExistingList") do desc = tf.NodeDescription("TensorListScatterIntoExistingList") @@ -28495,7 +28496,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function select_graph(condition_, t_, e_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function select_graph(condition_, t_, e_; name=nothing) local desc tf.with_op_name(name, "Select") do desc = tf.NodeDescription("Select") @@ -28542,7 +28543,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function min_graph(input_, reduction_indices_; name=nothing, keep_dims=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function min_graph(input_, reduction_indices_; name=nothing, keep_dims=nothing) local desc tf.with_op_name(name, "Min") do desc = tf.NodeDescription("Min") @@ -28593,7 +28594,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function lrn_grad_graph(input_grads_, input_image_, output_image_; name=nothing, depth_radius=nothing, bias=nothing, alpha=nothing, beta=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function lrn_grad_graph(input_grads_, input_image_, output_image_; name=nothing, depth_radius=nothing, bias=nothing, alpha=nothing, beta=nothing) local desc tf.with_op_name(name, "LRNGrad") do desc = tf.NodeDescription("LRNGrad") @@ -28665,7 +28666,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function random_poisson_v2_graph(shape_, rate_; name=nothing, seed=nothing, seed2=nothing, S=nothing, R=nothing, dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function random_poisson_v2_graph(shape_, rate_; name=nothing, seed=nothing, seed2=nothing, S=nothing, R=nothing, dtype=nothing) local desc tf.with_op_name(name, "RandomPoissonV2") do desc = tf.NodeDescription("RandomPoissonV2") @@ -28739,7 +28740,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fifo_queue_graph(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fifo_queue_graph(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "FIFOQueue") do desc = tf.NodeDescription("FIFOQueue") @@ -28801,7 +28802,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_sparse_apply_proximal_gradient_descent_graph(var_, alpha_, l1_, l2_, grad_, indices_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_sparse_apply_proximal_gradient_descent_graph(var_, alpha_, l1_, l2_, grad_, indices_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ResourceSparseApplyProximalGradientDescent") do desc = tf.NodeDescription("ResourceSparseApplyProximalGradientDescent") @@ -28871,7 +28872,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_non_serializable_dataset_graph(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_non_serializable_dataset_graph(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "ExperimentalNonSerializableDataset") do desc = tf.NodeDescription("ExperimentalNonSerializableDataset") @@ -28919,7 +28920,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_bytes_produced_stats_dataset_graph(input_dataset_, tag_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_bytes_produced_stats_dataset_graph(input_dataset_, tag_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "ExperimentalBytesProducedStatsDataset") do desc = tf.NodeDescription("ExperimentalBytesProducedStatsDataset") @@ -28971,7 +28972,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function dilation2d_backprop_filter_graph(input_, filter_, out_backprop_; name=nothing, strides=nothing, rates=nothing, padding=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function dilation2d_backprop_filter_graph(input_, filter_, out_backprop_; name=nothing, strides=nothing, rates=nothing, padding=nothing) local desc tf.with_op_name(name, "Dilation2DBackpropFilter") do desc = tf.NodeDescription("Dilation2DBackpropFilter") @@ -29037,7 +29038,7 @@ end output = cond ? then_branch(input) : else_branch(input) """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _if_graph(cond_, input_; name=nothing, Tin=nothing, Tout=nothing, then_branch=nothing, else_branch=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _if_graph(cond_, input_; name=nothing, Tin=nothing, Tout=nothing, then_branch=nothing, else_branch=nothing) local desc tf.with_op_name(name, "_If") do desc = tf.NodeDescription("_If") @@ -29103,7 +29104,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function bias_add_grad_graph(out_backprop_; name=nothing, data_format=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function bias_add_grad_graph(out_backprop_; name=nothing, data_format=nothing) local desc tf.with_op_name(name, "BiasAddGrad") do desc = tf.NodeDescription("BiasAddGrad") @@ -29147,7 +29148,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function reader_serialize_state_v2_graph(reader_handle_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function reader_serialize_state_v2_graph(reader_handle_; name=nothing) local desc tf.with_op_name(name, "ReaderSerializeStateV2") do desc = tf.NodeDescription("ReaderSerializeStateV2") @@ -29183,7 +29184,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function wrap_dataset_variant_graph(input_handle_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function wrap_dataset_variant_graph(input_handle_; name=nothing) local desc tf.with_op_name(name, "WrapDatasetVariant") do desc = tf.NodeDescription("WrapDatasetVariant") @@ -29219,7 +29220,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function parallel_interleave_dataset_v2_graph(input_dataset_, other_arguments_, cycle_length_, block_length_, num_parallel_calls_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, sloppy=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function parallel_interleave_dataset_v2_graph(input_dataset_, other_arguments_, cycle_length_, block_length_, num_parallel_calls_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, sloppy=nothing) local desc tf.with_op_name(name, "ParallelInterleaveDatasetV2") do desc = tf.NodeDescription("ParallelInterleaveDatasetV2") @@ -29301,7 +29302,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function depthwise_conv2d_native_backprop_input_graph(input_sizes_, filter_, out_backprop_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function depthwise_conv2d_native_backprop_input_graph(input_sizes_, filter_, out_backprop_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) local desc tf.with_op_name(name, "DepthwiseConv2dNativeBackpropInput") do desc = tf.NodeDescription("DepthwiseConv2dNativeBackpropInput") @@ -29372,7 +29373,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_apply_rms_prop_graph(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_apply_rms_prop_graph(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ResourceApplyRMSProp") do desc = tf.NodeDescription("ResourceApplyRMSProp") @@ -29448,7 +29449,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_accumulator_take_gradient_graph(handle_, num_required_; name=nothing, dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_accumulator_take_gradient_graph(handle_, num_required_; name=nothing, dtype=nothing) local desc tf.with_op_name(name, "SparseAccumulatorTakeGradient") do desc = tf.NodeDescription("SparseAccumulatorTakeGradient") @@ -29499,7 +29500,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_lmdb_dataset_graph(filenames_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_lmdb_dataset_graph(filenames_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "ExperimentalLMDBDataset") do desc = tf.NodeDescription("ExperimentalLMDBDataset") @@ -29547,7 +29548,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function stack_close_v2_graph(handle_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function stack_close_v2_graph(handle_; name=nothing) local desc tf.with_op_name(name, "StackCloseV2") do desc = tf.NodeDescription("StackCloseV2") @@ -29583,7 +29584,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function map_size_graph(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function map_size_graph(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "MapSize") do desc = tf.NodeDescription("MapSize") @@ -29645,7 +29646,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_apply_adagrad_da_graph(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, lr_, l1_, l2_, global_step_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_apply_adagrad_da_graph(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, lr_, l1_, l2_, global_step_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ResourceApplyAdagradDA") do desc = tf.NodeDescription("ResourceApplyAdagradDA") @@ -29720,7 +29721,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_forest_tree_size_graph(tree_handle_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_forest_tree_size_graph(tree_handle_; name=nothing) local desc tf.with_op_name(name, "TensorForestTreeSize") do desc = tf.NodeDescription("TensorForestTreeSize") @@ -29756,7 +29757,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function matrix_diag_part_graph(input_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function matrix_diag_part_graph(input_; name=nothing) local desc tf.with_op_name(name, "MatrixDiagPart") do desc = tf.NodeDescription("MatrixDiagPart") @@ -29794,7 +29795,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function reader_num_work_units_completed_v2_graph(reader_handle_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function reader_num_work_units_completed_v2_graph(reader_handle_; name=nothing) local desc tf.with_op_name(name, "ReaderNumWorkUnitsCompletedV2") do desc = tf.NodeDescription("ReaderNumWorkUnitsCompletedV2") @@ -29830,7 +29831,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_split_v3_graph(handle_, value_, lengths_, flow_in_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_split_v3_graph(handle_, value_, lengths_, flow_in_; name=nothing) local desc tf.with_op_name(name, "TensorArraySplitV3") do desc = tf.NodeDescription("TensorArraySplitV3") @@ -29880,7 +29881,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_to_dense_graph(sparse_indices_, output_shape_, sparse_values_, default_value_; name=nothing, validate_indices=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_to_dense_graph(sparse_indices_, output_shape_, sparse_values_, default_value_; name=nothing, validate_indices=nothing) local desc tf.with_op_name(name, "SparseToDense") do desc = tf.NodeDescription("SparseToDense") @@ -29942,7 +29943,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tpu_replicated_input_graph(inputs_; name=nothing, N=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tpu_replicated_input_graph(inputs_; name=nothing, N=nothing) local desc tf.with_op_name(name, "TPUReplicatedInput") do desc = tf.NodeDescription("TPUReplicatedInput") @@ -29986,7 +29987,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function stack_close_graph(handle_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function stack_close_graph(handle_; name=nothing) local desc tf.with_op_name(name, "StackClose") do desc = tf.NodeDescription("StackClose") @@ -30022,7 +30023,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function deserialize_many_sparse_graph(serialized_sparse_; name=nothing, dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function deserialize_many_sparse_graph(serialized_sparse_; name=nothing, dtype=nothing) local desc tf.with_op_name(name, "DeserializeManySparse") do desc = tf.NodeDescription("DeserializeManySparse") @@ -30069,7 +30070,7 @@ end Replacement node for NcclReduce. """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _nccl_reduce_recv_graph(input_; name=nothing, reduction=nothing, num_devices=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _nccl_reduce_recv_graph(input_; name=nothing, reduction=nothing, num_devices=nothing, shared_name=nothing) local desc tf.with_op_name(name, "_NcclReduceRecv") do desc = tf.NodeDescription("_NcclReduceRecv") @@ -30125,7 +30126,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function mirror_pad_grad_graph(input_, paddings_; name=nothing, mode=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function mirror_pad_grad_graph(input_, paddings_; name=nothing, mode=nothing) local desc tf.with_op_name(name, "MirrorPadGrad") do desc = tf.NodeDescription("MirrorPadGrad") @@ -30175,7 +30176,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function broadcast_args_graph(s0_, s1_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function broadcast_args_graph(s0_, s1_; name=nothing) local desc tf.with_op_name(name, "BroadcastArgs") do desc = tf.NodeDescription("BroadcastArgs") @@ -30218,7 +30219,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function stateless_truncated_normal_graph(shape_, seed_; name=nothing, dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function stateless_truncated_normal_graph(shape_, seed_; name=nothing, dtype=nothing) local desc tf.with_op_name(name, "StatelessTruncatedNormal") do desc = tf.NodeDescription("StatelessTruncatedNormal") @@ -30268,7 +30269,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function regex_full_match_graph(input_, pattern_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function regex_full_match_graph(input_, pattern_; name=nothing) local desc tf.with_op_name(name, "RegexFullMatch") do desc = tf.NodeDescription("RegexFullMatch") @@ -30308,7 +30309,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function unwrap_dataset_variant_graph(input_handle_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function unwrap_dataset_variant_graph(input_handle_; name=nothing) local desc tf.with_op_name(name, "UnwrapDatasetVariant") do desc = tf.NodeDescription("UnwrapDatasetVariant") @@ -30344,7 +30345,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function empty_graph(shape_; name=nothing, dtype=nothing, init=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function empty_graph(shape_; name=nothing, dtype=nothing, init=nothing) local desc tf.with_op_name(name, "Empty") do desc = tf.NodeDescription("Empty") @@ -30392,7 +30393,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function outfeed_dequeue_tuple_graph(; name=nothing, dtypes=nothing, shapes=nothing, device_ordinal=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function outfeed_dequeue_tuple_graph(; name=nothing, dtypes=nothing, shapes=nothing, device_ordinal=nothing) local desc tf.with_op_name(name, "OutfeedDequeueTuple") do desc = tf.NodeDescription("OutfeedDequeueTuple") @@ -30442,7 +30443,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function div_graph(x_, y_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function div_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "Div") do desc = tf.NodeDescription("Div") @@ -30485,7 +30486,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function barrier_graph(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function barrier_graph(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "Barrier") do desc = tf.NodeDescription("Barrier") @@ -30547,7 +30548,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function truncate_div_graph(x_, y_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function truncate_div_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "TruncateDiv") do desc = tf.NodeDescription("TruncateDiv") @@ -30590,7 +30591,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function unicode_encode_graph(input_values_, input_splits_; name=nothing, errors=nothing, output_encoding=nothing, replacement_char=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function unicode_encode_graph(input_values_, input_splits_; name=nothing, errors=nothing, output_encoding=nothing, replacement_char=nothing) local desc tf.with_op_name(name, "UnicodeEncode") do desc = tf.NodeDescription("UnicodeEncode") @@ -30648,7 +30649,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function merge_summary_graph(inputs_; name=nothing, N=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function merge_summary_graph(inputs_; name=nothing, N=nothing) local desc tf.with_op_name(name, "MergeSummary") do desc = tf.NodeDescription("MergeSummary") @@ -30690,7 +30691,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fake_queue_graph(resource_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fake_queue_graph(resource_; name=nothing) local desc tf.with_op_name(name, "FakeQueue") do desc = tf.NodeDescription("FakeQueue") @@ -30726,7 +30727,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_cholesky_graph(input_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_cholesky_graph(input_; name=nothing) local desc tf.with_op_name(name, "BatchCholesky") do desc = tf.NodeDescription("BatchCholesky") @@ -30764,7 +30765,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function iterator_graph(; name=nothing, shared_name=nothing, container=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function iterator_graph(; name=nothing, shared_name=nothing, container=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "Iterator") do desc = tf.NodeDescription("Iterator") @@ -30820,7 +30821,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function bessel_i1e_graph(x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function bessel_i1e_graph(x_; name=nothing) local desc tf.with_op_name(name, "BesselI1e") do desc = tf.NodeDescription("BesselI1e") @@ -30858,7 +30859,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function import_event_graph(writer_, event_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function import_event_graph(writer_, event_; name=nothing) local desc tf.with_op_name(name, "ImportEvent") do desc = tf.NodeDescription("ImportEvent") @@ -30898,7 +30899,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantized_instance_norm_graph(x_, x_min_, x_max_; name=nothing, output_range_given=nothing, given_y_min=nothing, given_y_max=nothing, variance_epsilon=nothing, min_separation=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantized_instance_norm_graph(x_, x_min_, x_max_; name=nothing, output_range_given=nothing, given_y_min=nothing, given_y_max=nothing, variance_epsilon=nothing, min_separation=nothing) local desc tf.with_op_name(name, "QuantizedInstanceNorm") do desc = tf.NodeDescription("QuantizedInstanceNorm") @@ -30979,7 +30980,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function load_tpu_embedding_adagrad_parameters_graph(parameters_, accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function load_tpu_embedding_adagrad_parameters_graph(parameters_, accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "LoadTPUEmbeddingAdagradParameters") do desc = tf.NodeDescription("LoadTPUEmbeddingAdagradParameters") @@ -31043,7 +31044,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_write_v3_graph(handle_, index_, value_, flow_in_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_write_v3_graph(handle_, index_, value_, flow_in_; name=nothing) local desc tf.with_op_name(name, "TensorArrayWriteV3") do desc = tf.NodeDescription("TensorArrayWriteV3") @@ -31093,7 +31094,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function dense_to_dense_set_operation_graph(set1_, set2_; name=nothing, set_operation=nothing, validate_indices=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function dense_to_dense_set_operation_graph(set1_, set2_; name=nothing, set_operation=nothing, validate_indices=nothing) local desc tf.with_op_name(name, "DenseToDenseSetOperation") do desc = tf.NodeDescription("DenseToDenseSetOperation") @@ -31153,7 +31154,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function encode_jpeg_graph(image_; name=nothing, format=nothing, quality=nothing, progressive=nothing, optimize_size=nothing, chroma_downsampling=nothing, density_unit=nothing, x_density=nothing, y_density=nothing, xmp_metadata=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function encode_jpeg_graph(image_; name=nothing, format=nothing, quality=nothing, progressive=nothing, optimize_size=nothing, chroma_downsampling=nothing, density_unit=nothing, x_density=nothing, y_density=nothing, xmp_metadata=nothing) local desc tf.with_op_name(name, "EncodeJpeg") do desc = tf.NodeDescription("EncodeJpeg") @@ -31243,7 +31244,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function inplace_update_graph(x_, i_, v_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function inplace_update_graph(x_, i_, v_; name=nothing) local desc tf.with_op_name(name, "InplaceUpdate") do desc = tf.NodeDescription("InplaceUpdate") @@ -31290,7 +31291,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fused_pad_conv2d_graph(input_, paddings_, filter_; name=nothing, mode=nothing, strides=nothing, padding=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fused_pad_conv2d_graph(input_, paddings_, filter_; name=nothing, mode=nothing, strides=nothing, padding=nothing) local desc tf.with_op_name(name, "FusedPadConv2D") do desc = tf.NodeDescription("FusedPadConv2D") @@ -31355,7 +31356,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantized_relu_graph(features_, min_features_, max_features_; name=nothing, out_type=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantized_relu_graph(features_, min_features_, max_features_; name=nothing, out_type=nothing) local desc tf.with_op_name(name, "QuantizedRelu") do desc = tf.NodeDescription("QuantizedRelu") @@ -31412,7 +31413,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function gather_nd_graph(params_, indices_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function gather_nd_graph(params_, indices_; name=nothing) local desc tf.with_op_name(name, "GatherNd") do desc = tf.NodeDescription("GatherNd") @@ -31457,7 +31458,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function placeholder_graph(; name=nothing, dtype=nothing, shape=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function placeholder_graph(; name=nothing, dtype=nothing, shape=nothing) local desc tf.with_op_name(name, "Placeholder") do desc = tf.NodeDescription("Placeholder") @@ -31501,7 +31502,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function filter_by_last_component_dataset_graph(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function filter_by_last_component_dataset_graph(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "FilterByLastComponentDataset") do desc = tf.NodeDescription("FilterByLastComponentDataset") @@ -31549,7 +31550,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function clip_by_value_graph(t_, clip_value_min_, clip_value_max_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function clip_by_value_graph(t_, clip_value_min_, clip_value_max_; name=nothing) local desc tf.with_op_name(name, "ClipByValue") do desc = tf.NodeDescription("ClipByValue") @@ -31597,7 +31598,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function image_summary_graph(tag_, tensor_; name=nothing, max_images=nothing, bad_color=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function image_summary_graph(tag_, tensor_; name=nothing, max_images=nothing, bad_color=nothing) local desc tf.with_op_name(name, "ImageSummary") do desc = tf.NodeDescription("ImageSummary") @@ -31651,7 +31652,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function retrieve_tpu_embedding_adadelta_parameters_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function retrieve_tpu_embedding_adadelta_parameters_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "RetrieveTPUEmbeddingAdadeltaParameters") do desc = tf.NodeDescription("RetrieveTPUEmbeddingAdadeltaParameters") @@ -31712,7 +31713,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function string_join_graph(inputs_; name=nothing, N=nothing, separator=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function string_join_graph(inputs_; name=nothing, N=nothing, separator=nothing) local desc tf.with_op_name(name, "StringJoin") do desc = tf.NodeDescription("StringJoin") @@ -31760,7 +31761,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_scatter_nd_add_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_scatter_nd_add_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ResourceScatterNdAdd") do desc = tf.NodeDescription("ResourceScatterNdAdd") @@ -31815,7 +31816,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function boosted_trees_quantile_stream_resource_deserialize_graph(quantile_stream_resource_handle_, bucket_boundaries_; name=nothing, num_streams=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function boosted_trees_quantile_stream_resource_deserialize_graph(quantile_stream_resource_handle_, bucket_boundaries_; name=nothing, num_streams=nothing) local desc tf.with_op_name(name, "BoostedTreesQuantileStreamResourceDeserialize") do desc = tf.NodeDescription("BoostedTreesQuantileStreamResourceDeserialize") @@ -31861,7 +31862,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function left_shift_graph(x_, y_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function left_shift_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "LeftShift") do desc = tf.NodeDescription("LeftShift") @@ -31904,7 +31905,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function requantize_per_channel_graph(input_, input_min_, input_max_, requested_output_min_, requested_output_max_; name=nothing, out_type=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function requantize_per_channel_graph(input_, input_min_, input_max_, requested_output_min_, requested_output_max_; name=nothing, out_type=nothing) local desc tf.with_op_name(name, "RequantizePerChannel") do desc = tf.NodeDescription("RequantizePerChannel") @@ -31969,7 +31970,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_scatter_add_graph(tensor_, indices_, updates_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_scatter_add_graph(tensor_, indices_, updates_; name=nothing) local desc tf.with_op_name(name, "TensorScatterAdd") do desc = tf.NodeDescription("TensorScatterAdd") @@ -32019,7 +32020,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _var_handles_op_graph(; name=nothing, containers=nothing, shared_names=nothing, N=nothing, dtypes=nothing, shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _var_handles_op_graph(; name=nothing, containers=nothing, shared_names=nothing, N=nothing, dtypes=nothing, shapes=nothing) local desc tf.with_op_name(name, "_VarHandlesOp") do desc = tf.NodeDescription("_VarHandlesOp") @@ -32086,7 +32087,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ifft3d_graph(input_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ifft3d_graph(input_; name=nothing) local desc tf.with_op_name(name, "IFFT3D") do desc = tf.NodeDescription("IFFT3D") @@ -32124,7 +32125,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function euclidean_norm_graph(input_, reduction_indices_; name=nothing, keep_dims=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function euclidean_norm_graph(input_, reduction_indices_; name=nothing, keep_dims=nothing) local desc tf.with_op_name(name, "EuclideanNorm") do desc = tf.NodeDescription("EuclideanNorm") @@ -32175,7 +32176,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ref_select_graph(index_, inputs_; name=nothing, N=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ref_select_graph(index_, inputs_; name=nothing, N=nothing) local desc tf.with_op_name(name, "RefSelect") do desc = tf.NodeDescription("RefSelect") @@ -32223,7 +32224,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_tensor_slice_dataset_graph(indices_, values_, dense_shape_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_tensor_slice_dataset_graph(indices_, values_, dense_shape_; name=nothing) local desc tf.with_op_name(name, "SparseTensorSliceDataset") do desc = tf.NodeDescription("SparseTensorSliceDataset") @@ -32269,7 +32270,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function retrieve_tpu_embedding_ftrl_parameters_grad_accum_debug_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function retrieve_tpu_embedding_ftrl_parameters_grad_accum_debug_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "RetrieveTPUEmbeddingFTRLParametersGradAccumDebug") do desc = tf.NodeDescription("RetrieveTPUEmbeddingFTRLParametersGradAccumDebug") @@ -32330,7 +32331,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_ifft2d_graph(input_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_ifft2d_graph(input_; name=nothing) local desc tf.with_op_name(name, "BatchIFFT2D") do desc = tf.NodeDescription("BatchIFFT2D") @@ -32366,7 +32367,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_gather_graph(handle_, indices_, flow_in_; name=nothing, dtype=nothing, element_shape=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_gather_graph(handle_, indices_, flow_in_; name=nothing, dtype=nothing, element_shape=nothing) local desc tf.with_op_name(name, "TensorArrayGather") do desc = tf.NodeDescription("TensorArrayGather") @@ -32422,7 +32423,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_segment_mean_with_num_segments_graph(data_, indices_, segment_ids_, num_segments_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_segment_mean_with_num_segments_graph(data_, indices_, segment_ids_, num_segments_; name=nothing) local desc tf.with_op_name(name, "SparseSegmentMeanWithNumSegments") do desc = tf.NodeDescription("SparseSegmentMeanWithNumSegments") @@ -32477,7 +32478,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ensure_shape_graph(input_; name=nothing, shape=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ensure_shape_graph(input_; name=nothing, shape=nothing) local desc tf.with_op_name(name, "EnsureShape") do desc = tf.NodeDescription("EnsureShape") @@ -32521,7 +32522,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function apply_proximal_gradient_descent_graph(var_, alpha_, l1_, l2_, delta_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function apply_proximal_gradient_descent_graph(var_, alpha_, l1_, l2_, delta_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ApplyProximalGradientDescent") do desc = tf.NodeDescription("ApplyProximalGradientDescent") @@ -32585,7 +32586,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function collective_reduce_graph(input_; name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, merge_op=nothing, final_op=nothing, subdiv_offsets=nothing, wait_for=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function collective_reduce_graph(input_; name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, merge_op=nothing, final_op=nothing, subdiv_offsets=nothing, wait_for=nothing) local desc tf.with_op_name(name, "CollectiveReduce") do desc = tf.NodeDescription("CollectiveReduce") @@ -32665,7 +32666,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function is_nan_graph(x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function is_nan_graph(x_; name=nothing) local desc tf.with_op_name(name, "IsNan") do desc = tf.NodeDescription("IsNan") @@ -32703,7 +32704,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function apply_ada_max_graph(var_, m_, v_, beta1_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function apply_ada_max_graph(var_, m_, v_, beta1_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ApplyAdaMax") do desc = tf.NodeDescription("ApplyAdaMax") @@ -32787,7 +32788,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function decode_and_crop_jpeg_graph(contents_, crop_window_; name=nothing, channels=nothing, ratio=nothing, fancy_upscaling=nothing, try_recover_truncated=nothing, acceptable_fraction=nothing, dct_method=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function decode_and_crop_jpeg_graph(contents_, crop_window_; name=nothing, channels=nothing, ratio=nothing, fancy_upscaling=nothing, try_recover_truncated=nothing, acceptable_fraction=nothing, dct_method=nothing) local desc tf.with_op_name(name, "DecodeAndCropJpeg") do desc = tf.NodeDescription("DecodeAndCropJpeg") @@ -32863,7 +32864,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function apply_centered_rms_prop_graph(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function apply_centered_rms_prop_graph(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ApplyCenteredRMSProp") do desc = tf.NodeDescription("ApplyCenteredRMSProp") @@ -32947,7 +32948,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function conv3d_backprop_filter_v2_graph(input_, filter_sizes_, out_backprop_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function conv3d_backprop_filter_v2_graph(input_, filter_sizes_, out_backprop_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) local desc tf.with_op_name(name, "Conv3DBackpropFilterV2") do desc = tf.NodeDescription("Conv3DBackpropFilterV2") @@ -33018,7 +33019,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function matrix_triangular_solve_graph(matrix_, rhs_; name=nothing, lower=nothing, adjoint=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function matrix_triangular_solve_graph(matrix_, rhs_; name=nothing, lower=nothing, adjoint=nothing) local desc tf.with_op_name(name, "MatrixTriangularSolve") do desc = tf.NodeDescription("MatrixTriangularSolve") @@ -33073,7 +33074,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function reader_num_work_units_completed_graph(reader_handle_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function reader_num_work_units_completed_graph(reader_handle_; name=nothing) local desc tf.with_op_name(name, "ReaderNumWorkUnitsCompleted") do desc = tf.NodeDescription("ReaderNumWorkUnitsCompleted") @@ -33109,7 +33110,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function write_audio_summary_graph(writer_, step_, tag_, tensor_, sample_rate_; name=nothing, max_outputs=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function write_audio_summary_graph(writer_, step_, tag_, tensor_, sample_rate_; name=nothing, max_outputs=nothing) local desc tf.with_op_name(name, "WriteAudioSummary") do desc = tf.NodeDescription("WriteAudioSummary") @@ -33167,7 +33168,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sharded_filespec_graph(basename_, num_shards_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sharded_filespec_graph(basename_, num_shards_; name=nothing) local desc tf.with_op_name(name, "ShardedFilespec") do desc = tf.NodeDescription("ShardedFilespec") @@ -33207,7 +33208,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function div_no_nan_graph(x_, y_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function div_no_nan_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "DivNoNan") do desc = tf.NodeDescription("DivNoNan") @@ -33250,7 +33251,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_accumulator_apply_gradient_graph(handle_, local_step_, gradient_indices_, gradient_values_, gradient_shape_; name=nothing, dtype=nothing, has_known_shape=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_accumulator_apply_gradient_graph(handle_, local_step_, gradient_indices_, gradient_values_, gradient_shape_; name=nothing, dtype=nothing, has_known_shape=nothing) local desc tf.with_op_name(name, "SparseAccumulatorApplyGradient") do desc = tf.NodeDescription("SparseAccumulatorApplyGradient") @@ -33316,7 +33317,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ragged_tensor_to_sparse_graph(rt_nested_splits_, rt_dense_values_; name=nothing, RAGGED_RANK=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ragged_tensor_to_sparse_graph(rt_nested_splits_, rt_dense_values_; name=nothing, RAGGED_RANK=nothing) local desc tf.with_op_name(name, "RaggedTensorToSparse") do desc = tf.NodeDescription("RaggedTensorToSparse") @@ -33369,7 +33370,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function extract_volume_patches_graph(input_; name=nothing, ksizes=nothing, strides=nothing, padding=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function extract_volume_patches_graph(input_; name=nothing, ksizes=nothing, strides=nothing, padding=nothing) local desc tf.with_op_name(name, "ExtractVolumePatches") do desc = tf.NodeDescription("ExtractVolumePatches") @@ -33425,7 +33426,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function barrier_insert_many_graph(handle_, keys_, values_; name=nothing, component_index=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function barrier_insert_many_graph(handle_, keys_, values_; name=nothing, component_index=nothing) local desc tf.with_op_name(name, "BarrierInsertMany") do desc = tf.NodeDescription("BarrierInsertMany") @@ -33483,7 +33484,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function const__graph(; name=nothing, value=nothing, dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function const__graph(; name=nothing, value=nothing, dtype=nothing) local desc tf.with_op_name(name, "Const") do desc = tf.NodeDescription("Const") @@ -33527,7 +33528,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function space_to_batch_graph(input_, paddings_; name=nothing, block_size=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function space_to_batch_graph(input_, paddings_; name=nothing, block_size=nothing) local desc tf.with_op_name(name, "SpaceToBatch") do desc = tf.NodeDescription("SpaceToBatch") @@ -33577,7 +33578,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function stage_size_graph(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function stage_size_graph(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "StageSize") do desc = tf.NodeDescription("StageSize") @@ -33639,7 +33640,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function empty_tensor_list_graph(element_shape_, max_num_elements_; name=nothing, element_dtype=nothing, shape_type=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function empty_tensor_list_graph(element_shape_, max_num_elements_; name=nothing, element_dtype=nothing, shape_type=nothing) local desc tf.with_op_name(name, "EmptyTensorList") do desc = tf.NodeDescription("EmptyTensorList") @@ -33693,7 +33694,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantized_conv2d_and_requantize_graph(input_, filter_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantized_conv2d_and_requantize_graph(input_, filter_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) local desc tf.with_op_name(name, "QuantizedConv2DAndRequantize") do desc = tf.NodeDescription("QuantizedConv2DAndRequantize") @@ -33790,7 +33791,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function lu_graph(input_; name=nothing, output_idx_type=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function lu_graph(input_; name=nothing, output_idx_type=nothing) local desc tf.with_op_name(name, "Lu") do desc = tf.NodeDescription("Lu") @@ -33839,7 +33840,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function decode_compressed_graph(bytes_; name=nothing, compression_type=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function decode_compressed_graph(bytes_; name=nothing, compression_type=nothing) local desc tf.with_op_name(name, "DecodeCompressed") do desc = tf.NodeDescription("DecodeCompressed") @@ -33881,7 +33882,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function get_session_tensor_graph(handle_; name=nothing, dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function get_session_tensor_graph(handle_; name=nothing, dtype=nothing) local desc tf.with_op_name(name, "GetSessionTensor") do desc = tf.NodeDescription("GetSessionTensor") @@ -33923,7 +33924,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_gather_v3_graph(handle_, indices_, flow_in_; name=nothing, dtype=nothing, element_shape=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_gather_v3_graph(handle_, indices_, flow_in_; name=nothing, dtype=nothing, element_shape=nothing) local desc tf.with_op_name(name, "TensorArrayGatherV3") do desc = tf.NodeDescription("TensorArrayGatherV3") @@ -33979,7 +33980,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function load_tpu_embedding_ftrl_parameters_grad_accum_debug_graph(parameters_, accumulators_, linears_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function load_tpu_embedding_ftrl_parameters_grad_accum_debug_graph(parameters_, accumulators_, linears_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "LoadTPUEmbeddingFTRLParametersGradAccumDebug") do desc = tf.NodeDescription("LoadTPUEmbeddingFTRLParametersGradAccumDebug") @@ -34051,7 +34052,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function destroy_resource_op_graph(resource_; name=nothing, ignore_lookup_error=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function destroy_resource_op_graph(resource_; name=nothing, ignore_lookup_error=nothing) local desc tf.with_op_name(name, "DestroyResourceOp") do desc = tf.NodeDescription("DestroyResourceOp") @@ -34093,7 +34094,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function text_line_reader_graph(; name=nothing, skip_header_lines=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function text_line_reader_graph(; name=nothing, skip_header_lines=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "TextLineReader") do desc = tf.NodeDescription("TextLineReader") @@ -34143,7 +34144,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function create_summary_db_writer_graph(writer_, db_uri_, experiment_name_, run_name_, user_name_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function create_summary_db_writer_graph(writer_, db_uri_, experiment_name_, run_name_, user_name_; name=nothing) local desc tf.with_op_name(name, "CreateSummaryDbWriter") do desc = tf.NodeDescription("CreateSummaryDbWriter") @@ -34195,7 +34196,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tanh_grad_graph(y_, dy_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tanh_grad_graph(y_, dy_; name=nothing) local desc tf.with_op_name(name, "TanhGrad") do desc = tf.NodeDescription("TanhGrad") @@ -34238,7 +34239,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function decode_base64_graph(input_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function decode_base64_graph(input_; name=nothing) local desc tf.with_op_name(name, "DecodeBase64") do desc = tf.NodeDescription("DecodeBase64") @@ -34274,7 +34275,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function max_pool_grad_grad_v2_graph(orig_input_, orig_output_, grad_, ksize_, strides_; name=nothing, padding=nothing, data_format=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function max_pool_grad_grad_v2_graph(orig_input_, orig_output_, grad_, ksize_, strides_; name=nothing, padding=nothing, data_format=nothing) local desc tf.with_op_name(name, "MaxPoolGradGradV2") do desc = tf.NodeDescription("MaxPoolGradGradV2") @@ -34342,7 +34343,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function audio_summary_v2_graph(tag_, tensor_, sample_rate_; name=nothing, max_outputs=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function audio_summary_v2_graph(tag_, tensor_, sample_rate_; name=nothing, max_outputs=nothing) local desc tf.with_op_name(name, "AudioSummaryV2") do desc = tf.NodeDescription("AudioSummaryV2") @@ -34392,7 +34393,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function stateful_partitioned_call_graph(args_; name=nothing, Tin=nothing, Tout=nothing, f=nothing, config=nothing, config_proto=nothing, executor_type=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function stateful_partitioned_call_graph(args_; name=nothing, Tin=nothing, Tout=nothing, f=nothing, config=nothing, config_proto=nothing, executor_type=nothing) local desc tf.with_op_name(name, "StatefulPartitionedCall") do desc = tf.NodeDescription("StatefulPartitionedCall") @@ -34464,7 +34465,7 @@ end Acts like a Concat Op that merges multple tensors into one, however it must """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _scoped_allocator_concat_graph(backing_, inputs_; name=nothing, shape=nothing, reshape=nothing, sa_name=nothing, id=nothing, N=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _scoped_allocator_concat_graph(backing_, inputs_; name=nothing, shape=nothing, reshape=nothing, sa_name=nothing, id=nothing, N=nothing) local desc tf.with_op_name(name, "_ScopedAllocatorConcat") do desc = tf.NodeDescription("_ScopedAllocatorConcat") @@ -34537,7 +34538,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fake_quant_with_min_max_args_gradient_graph(gradients_, inputs_; name=nothing, min=nothing, max=nothing, num_bits=nothing, narrow_range=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fake_quant_with_min_max_args_gradient_graph(gradients_, inputs_; name=nothing, min=nothing, max=nothing, num_bits=nothing, narrow_range=nothing) local desc tf.with_op_name(name, "FakeQuantWithMinMaxArgsGradient") do desc = tf.NodeDescription("FakeQuantWithMinMaxArgsGradient") @@ -34601,7 +34602,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_svd_graph(input_; name=nothing, compute_uv=nothing, full_matrices=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_svd_graph(input_; name=nothing, compute_uv=nothing, full_matrices=nothing) local desc tf.with_op_name(name, "BatchSvd") do desc = tf.NodeDescription("BatchSvd") @@ -34656,7 +34657,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function map_stage_graph(key_, indices_, values_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, fake_dtypes=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function map_stage_graph(key_, indices_, values_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, fake_dtypes=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "MapStage") do desc = tf.NodeDescription("MapStage") @@ -34736,7 +34737,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_sparse_apply_ftrl_graph(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, lr_power_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_sparse_apply_ftrl_graph(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, lr_power_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ResourceSparseApplyFtrl") do desc = tf.NodeDescription("ResourceSparseApplyFtrl") @@ -34819,7 +34820,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resize_nearest_neighbor_graph(images_, size_; name=nothing, align_corners=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resize_nearest_neighbor_graph(images_, size_; name=nothing, align_corners=nothing) local desc tf.with_op_name(name, "ResizeNearestNeighbor") do desc = tf.NodeDescription("ResizeNearestNeighbor") @@ -34867,7 +34868,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_csv_dataset_graph(filenames_, compression_type_, buffer_size_, header_, field_delim_, use_quote_delim_, na_value_, select_cols_, record_defaults_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_csv_dataset_graph(filenames_, compression_type_, buffer_size_, header_, field_delim_, use_quote_delim_, na_value_, select_cols_, record_defaults_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "ExperimentalCSVDataset") do desc = tf.NodeDescription("ExperimentalCSVDataset") @@ -34947,7 +34948,7 @@ end Returns x * y element-wise. """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _mkl_mul_graph(x_, y_, mkl_x_, mkl_y_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _mkl_mul_graph(x_, y_, mkl_x_, mkl_y_; name=nothing) local desc tf.with_op_name(name, "_MklMul") do desc = tf.NodeDescription("_MklMul") @@ -35003,7 +35004,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_matrix_diag_graph(diagonal_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_matrix_diag_graph(diagonal_; name=nothing) local desc tf.with_op_name(name, "BatchMatrixDiag") do desc = tf.NodeDescription("BatchMatrixDiag") @@ -35041,7 +35042,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function is_inf_graph(x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function is_inf_graph(x_; name=nothing) local desc tf.with_op_name(name, "IsInf") do desc = tf.NodeDescription("IsInf") @@ -35079,7 +35080,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fixed_unigram_candidate_sampler_graph(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, vocab_file=nothing, distortion=nothing, num_reserved_ids=nothing, num_shards=nothing, shard=nothing, unigrams=nothing, seed=nothing, seed2=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fixed_unigram_candidate_sampler_graph(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, vocab_file=nothing, distortion=nothing, num_reserved_ids=nothing, num_shards=nothing, shard=nothing, unigrams=nothing, seed=nothing, seed2=nothing) local desc tf.with_op_name(name, "FixedUnigramCandidateSampler") do desc = tf.NodeDescription("FixedUnigramCandidateSampler") @@ -35192,7 +35193,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_apply_ftrl_v2_graph(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_apply_ftrl_v2_graph(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "SparseApplyFtrlV2") do desc = tf.NodeDescription("SparseApplyFtrlV2") @@ -35283,7 +35284,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function unravel_index_graph(indices_, dims_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function unravel_index_graph(indices_, dims_; name=nothing) local desc tf.with_op_name(name, "UnravelIndex") do desc = tf.NodeDescription("UnravelIndex") @@ -35328,7 +35329,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function max_graph(input_, reduction_indices_; name=nothing, keep_dims=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function max_graph(input_, reduction_indices_; name=nothing, keep_dims=nothing) local desc tf.with_op_name(name, "Max") do desc = tf.NodeDescription("Max") @@ -35379,7 +35380,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ifft2d_graph(input_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ifft2d_graph(input_; name=nothing) local desc tf.with_op_name(name, "IFFT2D") do desc = tf.NodeDescription("IFFT2D") @@ -35417,7 +35418,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_concat_graph(indices_, values_, shapes_; name=nothing, concat_dim=nothing, N=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_concat_graph(indices_, values_, shapes_; name=nothing, concat_dim=nothing, N=nothing) local desc tf.with_op_name(name, "SparseConcat") do desc = tf.NodeDescription("SparseConcat") @@ -35486,7 +35487,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function histogram_summary_graph(tag_, values_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function histogram_summary_graph(tag_, values_; name=nothing) local desc tf.with_op_name(name, "HistogramSummary") do desc = tf.NodeDescription("HistogramSummary") @@ -35528,7 +35529,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function segment_sum_graph(data_, segment_ids_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function segment_sum_graph(data_, segment_ids_; name=nothing) local desc tf.with_op_name(name, "SegmentSum") do desc = tf.NodeDescription("SegmentSum") @@ -35573,7 +35574,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function exp_graph(x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function exp_graph(x_; name=nothing) local desc tf.with_op_name(name, "Exp") do desc = tf.NodeDescription("Exp") @@ -35611,7 +35612,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function configure_distributed_tpu_graph(; name=nothing, embedding_config=nothing, tpu_embedding_config=nothing, is_global_init=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function configure_distributed_tpu_graph(; name=nothing, embedding_config=nothing, tpu_embedding_config=nothing, is_global_init=nothing) local desc tf.with_op_name(name, "ConfigureDistributedTPU") do desc = tf.NodeDescription("ConfigureDistributedTPU") @@ -35661,7 +35662,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_scatter_nd_sub_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_scatter_nd_sub_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ResourceScatterNdSub") do desc = tf.NodeDescription("ResourceScatterNdSub") @@ -35716,7 +35717,7 @@ end A placeholder op for multiple values that will be sent from TensorFlow to a """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _xla_send_from_host_graph(inputs_, dynamic_key_; name=nothing, Tinputs=nothing, key=nothing, device_ordinal=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _xla_send_from_host_graph(inputs_, dynamic_key_; name=nothing, Tinputs=nothing, key=nothing, device_ordinal=nothing) local desc tf.with_op_name(name, "_XlaSendFromHost") do desc = tf.NodeDescription("_XlaSendFromHost") @@ -35774,7 +35775,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function get_session_handle_v2_graph(value_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function get_session_handle_v2_graph(value_; name=nothing) local desc tf.with_op_name(name, "GetSessionHandleV2") do desc = tf.NodeDescription("GetSessionHandleV2") @@ -35812,7 +35813,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function relu_grad_graph(gradients_, features_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function relu_grad_graph(gradients_, features_; name=nothing) local desc tf.with_op_name(name, "ReluGrad") do desc = tf.NodeDescription("ReluGrad") @@ -35855,7 +35856,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function unsorted_segment_min_graph(data_, segment_ids_, num_segments_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function unsorted_segment_min_graph(data_, segment_ids_, num_segments_; name=nothing) local desc tf.with_op_name(name, "UnsortedSegmentMin") do desc = tf.NodeDescription("UnsortedSegmentMin") @@ -35906,7 +35907,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function parse_example_graph(serialized_, names_, sparse_keys_, dense_keys_, dense_defaults_; name=nothing, Nsparse=nothing, Ndense=nothing, sparse_types=nothing, Tdense=nothing, dense_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function parse_example_graph(serialized_, names_, sparse_keys_, dense_keys_, dense_defaults_; name=nothing, Nsparse=nothing, Ndense=nothing, sparse_types=nothing, Tdense=nothing, dense_shapes=nothing) local desc tf.with_op_name(name, "ParseExample") do desc = tf.NodeDescription("ParseExample") @@ -35993,7 +35994,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function queue_enqueue_v2_graph(handle_, components_; name=nothing, Tcomponents=nothing, timeout_ms=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function queue_enqueue_v2_graph(handle_, components_; name=nothing, Tcomponents=nothing, timeout_ms=nothing) local desc tf.with_op_name(name, "QueueEnqueueV2") do desc = tf.NodeDescription("QueueEnqueueV2") @@ -36045,7 +36046,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function scatter_nd_add_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function scatter_nd_add_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ScatterNdAdd") do desc = tf.NodeDescription("ScatterNdAdd") @@ -36101,7 +36102,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function reader_num_records_produced_v2_graph(reader_handle_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function reader_num_records_produced_v2_graph(reader_handle_; name=nothing) local desc tf.with_op_name(name, "ReaderNumRecordsProducedV2") do desc = tf.NodeDescription("ReaderNumRecordsProducedV2") @@ -36137,7 +36138,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function load_tpu_embedding_centered_rms_prop_parameters_graph(parameters_, ms_, mom_, mg_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function load_tpu_embedding_centered_rms_prop_parameters_graph(parameters_, ms_, mom_, mg_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "LoadTPUEmbeddingCenteredRMSPropParameters") do desc = tf.NodeDescription("LoadTPUEmbeddingCenteredRMSPropParameters") @@ -36209,7 +36210,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function assign_sub_graph(ref_, value_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function assign_sub_graph(ref_, value_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "AssignSub") do desc = tf.NodeDescription("AssignSub") @@ -36258,7 +36259,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function unsorted_segment_sum_graph(data_, segment_ids_, num_segments_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function unsorted_segment_sum_graph(data_, segment_ids_, num_segments_; name=nothing) local desc tf.with_op_name(name, "UnsortedSegmentSum") do desc = tf.NodeDescription("UnsortedSegmentSum") @@ -36309,7 +36310,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fused_batch_norm_grad_graph(y_backprop_, x_, scale_, reserve_space_1_, reserve_space_2_; name=nothing, epsilon=nothing, data_format=nothing, is_training=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fused_batch_norm_grad_graph(y_backprop_, x_, scale_, reserve_space_1_, reserve_space_2_; name=nothing, epsilon=nothing, data_format=nothing, is_training=nothing) local desc tf.with_op_name(name, "FusedBatchNormGrad") do desc = tf.NodeDescription("FusedBatchNormGrad") @@ -36390,7 +36391,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function max_pool_grad_v2_graph(orig_input_, orig_output_, grad_, ksize_, strides_; name=nothing, padding=nothing, data_format=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function max_pool_grad_v2_graph(orig_input_, orig_output_, grad_, ksize_, strides_; name=nothing, padding=nothing, data_format=nothing) local desc tf.with_op_name(name, "MaxPoolGradV2") do desc = tf.NodeDescription("MaxPoolGradV2") @@ -36458,7 +36459,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantized_conv2d_with_bias_and_relu_graph(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantized_conv2d_with_bias_and_relu_graph(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) local desc tf.with_op_name(name, "QuantizedConv2DWithBiasAndRelu") do desc = tf.NodeDescription("QuantizedConv2DWithBiasAndRelu") @@ -36551,7 +36552,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function boosted_trees_create_ensemble_graph(tree_ensemble_handle_, stamp_token_, tree_ensemble_serialized_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function boosted_trees_create_ensemble_graph(tree_ensemble_handle_, stamp_token_, tree_ensemble_serialized_; name=nothing) local desc tf.with_op_name(name, "BoostedTreesCreateEnsemble") do desc = tf.NodeDescription("BoostedTreesCreateEnsemble") @@ -36595,7 +36596,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ordered_map_incomplete_size_graph(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ordered_map_incomplete_size_graph(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "OrderedMapIncompleteSize") do desc = tf.NodeDescription("OrderedMapIncompleteSize") @@ -36657,7 +36658,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function skipgram_graph(; name=nothing, filename=nothing, batch_size=nothing, window_size=nothing, min_count=nothing, subsample=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function skipgram_graph(; name=nothing, filename=nothing, batch_size=nothing, window_size=nothing, min_count=nothing, subsample=nothing) local desc tf.with_op_name(name, "Skipgram") do desc = tf.NodeDescription("Skipgram") @@ -36724,7 +36725,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function arg_min_graph(input_, dimension_; name=nothing, output_type=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function arg_min_graph(input_, dimension_; name=nothing, output_type=nothing) local desc tf.with_op_name(name, "ArgMin") do desc = tf.NodeDescription("ArgMin") @@ -36775,7 +36776,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function queue_dequeue_many_graph(handle_, n_; name=nothing, component_types=nothing, timeout_ms=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function queue_dequeue_many_graph(handle_, n_; name=nothing, component_types=nothing, timeout_ms=nothing) local desc tf.with_op_name(name, "QueueDequeueMany") do desc = tf.NodeDescription("QueueDequeueMany") @@ -36827,7 +36828,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function boosted_trees_serialize_ensemble_graph(tree_ensemble_handle_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function boosted_trees_serialize_ensemble_graph(tree_ensemble_handle_; name=nothing) local desc tf.with_op_name(name, "BoostedTreesSerializeEnsemble") do desc = tf.NodeDescription("BoostedTreesSerializeEnsemble") @@ -36868,7 +36869,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function minimum_graph(x_, y_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function minimum_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "Minimum") do desc = tf.NodeDescription("Minimum") @@ -36911,7 +36912,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function substr_graph(input_, pos_, len_; name=nothing, unit=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function substr_graph(input_, pos_, len_; name=nothing, unit=nothing) local desc tf.with_op_name(name, "Substr") do desc = tf.NodeDescription("Substr") @@ -36964,7 +36965,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function queue_size_graph(handle_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function queue_size_graph(handle_; name=nothing) local desc tf.with_op_name(name, "QueueSize") do desc = tf.NodeDescription("QueueSize") @@ -37000,7 +37001,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function apply_ftrl_v2_graph(var_, accum_, linear_, grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function apply_ftrl_v2_graph(var_, accum_, linear_, grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ApplyFtrlV2") do desc = tf.NodeDescription("ApplyFtrlV2") @@ -37084,7 +37085,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function load_tpu_embedding_momentum_parameters_graph(parameters_, momenta_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function load_tpu_embedding_momentum_parameters_graph(parameters_, momenta_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "LoadTPUEmbeddingMomentumParameters") do desc = tf.NodeDescription("LoadTPUEmbeddingMomentumParameters") @@ -37148,7 +37149,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_segment_mean_graph(data_, indices_, segment_ids_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_segment_mean_graph(data_, indices_, segment_ids_; name=nothing) local desc tf.with_op_name(name, "SparseSegmentMean") do desc = tf.NodeDescription("SparseSegmentMean") @@ -37197,7 +37198,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_apply_proximal_adagrad_graph(var_, accum_, lr_, l1_, l2_, grad_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_apply_proximal_adagrad_graph(var_, accum_, lr_, l1_, l2_, grad_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ResourceApplyProximalAdagrad") do desc = tf.NodeDescription("ResourceApplyProximalAdagrad") @@ -37264,7 +37265,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_gather_v2_graph(handle_, indices_, flow_in_; name=nothing, dtype=nothing, element_shape=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_gather_v2_graph(handle_, indices_, flow_in_; name=nothing, dtype=nothing, element_shape=nothing) local desc tf.with_op_name(name, "TensorArrayGatherV2") do desc = tf.NodeDescription("TensorArrayGatherV2") @@ -37320,7 +37321,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function less_graph(x_, y_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function less_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "Less") do desc = tf.NodeDescription("Less") @@ -37363,7 +37364,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function host_const_graph(; name=nothing, value=nothing, dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function host_const_graph(; name=nothing, value=nothing, dtype=nothing) local desc tf.with_op_name(name, "HostConst") do desc = tf.NodeDescription("HostConst") @@ -37407,7 +37408,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function upper_bound_graph(sorted_inputs_, values_; name=nothing, out_type=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function upper_bound_graph(sorted_inputs_, values_; name=nothing, out_type=nothing) local desc tf.with_op_name(name, "UpperBound") do desc = tf.NodeDescription("UpperBound") @@ -37456,7 +37457,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_list_get_item_graph(input_handle_, index_, element_shape_; name=nothing, element_dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_list_get_item_graph(input_handle_, index_, element_shape_; name=nothing, element_dtype=nothing) local desc tf.with_op_name(name, "TensorListGetItem") do desc = tf.NodeDescription("TensorListGetItem") @@ -37506,7 +37507,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fake_quant_with_min_max_vars_graph(inputs_, min_, max_; name=nothing, num_bits=nothing, narrow_range=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fake_quant_with_min_max_vars_graph(inputs_, min_, max_; name=nothing, num_bits=nothing, narrow_range=nothing) local desc tf.with_op_name(name, "FakeQuantWithMinMaxVars") do desc = tf.NodeDescription("FakeQuantWithMinMaxVars") @@ -37562,7 +37563,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function is_boosted_trees_quantile_stream_resource_initialized_graph(quantile_stream_resource_handle_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function is_boosted_trees_quantile_stream_resource_initialized_graph(quantile_stream_resource_handle_; name=nothing) local desc tf.with_op_name(name, "IsBoostedTreesQuantileStreamResourceInitialized") do desc = tf.NodeDescription("IsBoostedTreesQuantileStreamResourceInitialized") @@ -37598,7 +37599,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function reader_read_up_to_v2_graph(reader_handle_, queue_handle_, num_records_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function reader_read_up_to_v2_graph(reader_handle_, queue_handle_, num_records_; name=nothing) local desc tf.with_op_name(name, "ReaderReadUpToV2") do desc = tf.NodeDescription("ReaderReadUpToV2") @@ -37647,7 +37648,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function complex_graph(real_, imag_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function complex_graph(real_, imag_; name=nothing) local desc tf.with_op_name(name, "Complex") do desc = tf.NodeDescription("Complex") @@ -37690,7 +37691,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_list_reserve_graph(element_shape_, num_elements_; name=nothing, element_dtype=nothing, shape_type=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_list_reserve_graph(element_shape_, num_elements_; name=nothing, element_dtype=nothing, shape_type=nothing) local desc tf.with_op_name(name, "TensorListReserve") do desc = tf.NodeDescription("TensorListReserve") @@ -37744,7 +37745,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function bitcast_graph(input_; name=nothing, type_=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function bitcast_graph(input_; name=nothing, type_=nothing) local desc tf.with_op_name(name, "Bitcast") do desc = tf.NodeDescription("Bitcast") @@ -37788,7 +37789,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function priority_queue_graph(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function priority_queue_graph(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "PriorityQueue") do desc = tf.NodeDescription("PriorityQueue") @@ -37850,7 +37851,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantized_batch_norm_with_global_normalization_graph(t_, t_min_, t_max_, m_, m_min_, m_max_, v_, v_min_, v_max_, beta_, beta_min_, beta_max_, gamma_, gamma_min_, gamma_max_; name=nothing, out_type=nothing, variance_epsilon=nothing, scale_after_normalization=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantized_batch_norm_with_global_normalization_graph(t_, t_min_, t_max_, m_, m_min_, m_max_, v_, v_min_, v_max_, beta_, beta_min_, beta_max_, gamma_, gamma_min_, gamma_max_; name=nothing, out_type=nothing, variance_epsilon=nothing, scale_after_normalization=nothing) local desc tf.with_op_name(name, "QuantizedBatchNormWithGlobalNormalization") do desc = tf.NodeDescription("QuantizedBatchNormWithGlobalNormalization") @@ -37971,7 +37972,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function cos_graph(x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function cos_graph(x_; name=nothing) local desc tf.with_op_name(name, "Cos") do desc = tf.NodeDescription("Cos") @@ -38009,7 +38010,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantize_down_and_shrink_range_graph(input_, input_min_, input_max_; name=nothing, out_type=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantize_down_and_shrink_range_graph(input_, input_min_, input_max_; name=nothing, out_type=nothing) local desc tf.with_op_name(name, "QuantizeDownAndShrinkRange") do desc = tf.NodeDescription("QuantizeDownAndShrinkRange") @@ -38066,7 +38067,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_random_dataset_graph(seed_, seed2_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_random_dataset_graph(seed_, seed2_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "ExperimentalRandomDataset") do desc = tf.NodeDescription("ExperimentalRandomDataset") @@ -38118,7 +38119,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function rpc_graph(address_, method_, request_; name=nothing, protocol=nothing, fail_fast=nothing, timeout_in_ms=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function rpc_graph(address_, method_, request_; name=nothing, protocol=nothing, fail_fast=nothing, timeout_in_ms=nothing) local desc tf.with_op_name(name, "Rpc") do desc = tf.NodeDescription("Rpc") @@ -38180,7 +38181,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantized_conv2d_with_bias_signed_sum_and_relu_and_requantize_graph(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_, summand_, min_summand_, max_summand_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantized_conv2d_with_bias_signed_sum_and_relu_and_requantize_graph(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_, summand_, min_summand_, max_summand_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) local desc tf.with_op_name(name, "QuantizedConv2DWithBiasSignedSumAndReluAndRequantize") do desc = tf.NodeDescription("QuantizedConv2DWithBiasSignedSumAndReluAndRequantize") @@ -38297,7 +38298,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_list_length_graph(input_handle_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_list_length_graph(input_handle_; name=nothing) local desc tf.with_op_name(name, "TensorListLength") do desc = tf.NodeDescription("TensorListLength") @@ -38333,7 +38334,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function map_incomplete_size_graph(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function map_incomplete_size_graph(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "MapIncompleteSize") do desc = tf.NodeDescription("MapIncompleteSize") @@ -38395,7 +38396,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function stateless_while_graph(input_; name=nothing, T=nothing, cond=nothing, body=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function stateless_while_graph(input_; name=nothing, T=nothing, cond=nothing, body=nothing) local desc tf.with_op_name(name, "StatelessWhile") do desc = tf.NodeDescription("StatelessWhile") @@ -38449,7 +38450,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_conditional_accumulator_graph(; name=nothing, dtype=nothing, shape=nothing, container=nothing, shared_name=nothing, reduction_type=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_conditional_accumulator_graph(; name=nothing, dtype=nothing, shape=nothing, container=nothing, shared_name=nothing, reduction_type=nothing) local desc tf.with_op_name(name, "SparseConditionalAccumulator") do desc = tf.NodeDescription("SparseConditionalAccumulator") @@ -38511,7 +38512,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function segment_min_graph(data_, segment_ids_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function segment_min_graph(data_, segment_ids_; name=nothing) local desc tf.with_op_name(name, "SegmentMin") do desc = tf.NodeDescription("SegmentMin") @@ -38556,7 +38557,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function write_graph_summary_graph(writer_, step_, tensor_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function write_graph_summary_graph(writer_, step_, tensor_; name=nothing) local desc tf.with_op_name(name, "WriteGraphSummary") do desc = tf.NodeDescription("WriteGraphSummary") @@ -38600,7 +38601,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function cholesky_grad_graph(l_, grad_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function cholesky_grad_graph(l_, grad_; name=nothing) local desc tf.with_op_name(name, "CholeskyGrad") do desc = tf.NodeDescription("CholeskyGrad") @@ -38643,7 +38644,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function log_uniform_candidate_sampler_graph(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function log_uniform_candidate_sampler_graph(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing) local desc tf.with_op_name(name, "LogUniformCandidateSampler") do desc = tf.NodeDescription("LogUniformCandidateSampler") @@ -38720,7 +38721,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function serialize_sparse_graph(sparse_indices_, sparse_values_, sparse_shape_; name=nothing, out_type=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function serialize_sparse_graph(sparse_indices_, sparse_values_, sparse_shape_; name=nothing, out_type=nothing) local desc tf.with_op_name(name, "SerializeSparse") do desc = tf.NodeDescription("SerializeSparse") @@ -38772,7 +38773,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function scatter_nd_non_aliasing_add_graph(input_, indices_, updates_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function scatter_nd_non_aliasing_add_graph(input_, indices_, updates_; name=nothing) local desc tf.with_op_name(name, "ScatterNdNonAliasingAdd") do desc = tf.NodeDescription("ScatterNdNonAliasingAdd") @@ -38822,7 +38823,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ref_merge_graph(inputs_; name=nothing, N=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ref_merge_graph(inputs_; name=nothing, N=nothing) local desc tf.with_op_name(name, "RefMerge") do desc = tf.NodeDescription("RefMerge") @@ -38871,7 +38872,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_list_concat_graph(input_handle_; name=nothing, element_dtype=nothing, element_shape=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_list_concat_graph(input_handle_; name=nothing, element_dtype=nothing, element_shape=nothing) local desc tf.with_op_name(name, "TensorListConcat") do desc = tf.NodeDescription("TensorListConcat") @@ -38924,7 +38925,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function cudnn_rnn_canonical_to_params_graph(num_layers_, num_units_, input_size_, weights_, biases_; name=nothing, num_params=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function cudnn_rnn_canonical_to_params_graph(num_layers_, num_units_, input_size_, weights_, biases_; name=nothing, num_params=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) local desc tf.with_op_name(name, "CudnnRNNCanonicalToParams") do desc = tf.NodeDescription("CudnnRNNCanonicalToParams") @@ -39021,7 +39022,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_apply_adadelta_graph(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_apply_adadelta_graph(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "SparseApplyAdadelta") do desc = tf.NodeDescription("SparseApplyAdadelta") @@ -39102,7 +39103,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_close_graph(handle_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_close_graph(handle_; name=nothing) local desc tf.with_op_name(name, "TensorArrayClose") do desc = tf.NodeDescription("TensorArrayClose") @@ -39138,7 +39139,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function selu_grad_graph(gradients_, outputs_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function selu_grad_graph(gradients_, outputs_; name=nothing) local desc tf.with_op_name(name, "SeluGrad") do desc = tf.NodeDescription("SeluGrad") @@ -39181,7 +39182,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function crop_and_resize_grad_image_graph(grads_, boxes_, box_ind_, image_size_; name=nothing, method=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function crop_and_resize_grad_image_graph(grads_, boxes_, box_ind_, image_size_; name=nothing, method=nothing) local desc tf.with_op_name(name, "CropAndResizeGradImage") do desc = tf.NodeDescription("CropAndResizeGradImage") @@ -39235,7 +39236,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function rfft_graph(input_, fft_length_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function rfft_graph(input_, fft_length_; name=nothing) local desc tf.with_op_name(name, "RFFT") do desc = tf.NodeDescription("RFFT") @@ -39275,7 +39276,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_sql_dataset_graph(driver_name_, data_source_name_, query_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_sql_dataset_graph(driver_name_, data_source_name_, query_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "ExperimentalSqlDataset") do desc = tf.NodeDescription("ExperimentalSqlDataset") @@ -39331,7 +39332,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_apply_power_sign_graph(var_, m_, lr_, logbase_, sign_decay_, beta_, grad_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_apply_power_sign_graph(var_, m_, lr_, logbase_, sign_decay_, beta_, grad_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ResourceApplyPowerSign") do desc = tf.NodeDescription("ResourceApplyPowerSign") @@ -39403,7 +39404,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function matrix_determinant_graph(input_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function matrix_determinant_graph(input_; name=nothing) local desc tf.with_op_name(name, "MatrixDeterminant") do desc = tf.NodeDescription("MatrixDeterminant") @@ -39441,7 +39442,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function static_regex_replace_graph(input_; name=nothing, pattern=nothing, rewrite=nothing, replace_global=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function static_regex_replace_graph(input_; name=nothing, pattern=nothing, rewrite=nothing, replace_global=nothing) local desc tf.with_op_name(name, "StaticRegexReplace") do desc = tf.NodeDescription("StaticRegexReplace") @@ -39495,7 +39496,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function avg_pool_graph(value_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function avg_pool_graph(value_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) local desc tf.with_op_name(name, "AvgPool") do desc = tf.NodeDescription("AvgPool") @@ -39557,7 +39558,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_dense_cwise_add_graph(sp_indices_, sp_values_, sp_shape_, dense_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_dense_cwise_add_graph(sp_indices_, sp_values_, sp_shape_, dense_; name=nothing) local desc tf.with_op_name(name, "SparseDenseCwiseAdd") do desc = tf.NodeDescription("SparseDenseCwiseAdd") @@ -39608,7 +39609,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function bias_add_v1_graph(value_, bias_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function bias_add_v1_graph(value_, bias_; name=nothing) local desc tf.with_op_name(name, "BiasAddV1") do desc = tf.NodeDescription("BiasAddV1") @@ -39651,7 +39652,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function invert_permutation_graph(x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function invert_permutation_graph(x_; name=nothing) local desc tf.with_op_name(name, "InvertPermutation") do desc = tf.NodeDescription("InvertPermutation") @@ -39689,7 +39690,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function hash_table_v2_graph(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function hash_table_v2_graph(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing) local desc tf.with_op_name(name, "HashTableV2") do desc = tf.NodeDescription("HashTableV2") @@ -39751,7 +39752,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_apply_momentum_graph(var_, accum_, lr_, grad_, indices_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_apply_momentum_graph(var_, accum_, lr_, grad_, indices_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) local desc tf.with_op_name(name, "SparseApplyMomentum") do desc = tf.NodeDescription("SparseApplyMomentum") @@ -39828,7 +39829,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function infeed_enqueue_graph(input_; name=nothing, dtype=nothing, shape=nothing, layout=nothing, device_ordinal=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function infeed_enqueue_graph(input_; name=nothing, dtype=nothing, shape=nothing, layout=nothing, device_ordinal=nothing) local desc tf.with_op_name(name, "InfeedEnqueue") do desc = tf.NodeDescription("InfeedEnqueue") @@ -39890,7 +39891,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function stateless_random_uniform_int_graph(shape_, seed_, minval_, maxval_; name=nothing, dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function stateless_random_uniform_int_graph(shape_, seed_, minval_, maxval_; name=nothing, dtype=nothing) local desc tf.with_op_name(name, "StatelessRandomUniformInt") do desc = tf.NodeDescription("StatelessRandomUniformInt") @@ -39951,7 +39952,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function load_tpu_embedding_adadelta_parameters_grad_accum_debug_graph(parameters_, accumulators_, updates_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function load_tpu_embedding_adadelta_parameters_grad_accum_debug_graph(parameters_, accumulators_, updates_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "LoadTPUEmbeddingAdadeltaParametersGradAccumDebug") do desc = tf.NodeDescription("LoadTPUEmbeddingAdadeltaParametersGradAccumDebug") @@ -40023,7 +40024,7 @@ end Sends the named tensor from send_device to recv_device. """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _send_graph(tensor_; name=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _send_graph(tensor_; name=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing) local desc tf.with_op_name(name, "_Send") do desc = tf.NodeDescription("_Send") @@ -40091,7 +40092,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function map_peek_graph(key_, indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function map_peek_graph(key_, indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "MapPeek") do desc = tf.NodeDescription("MapPeek") @@ -40161,7 +40162,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function write_scalar_summary_graph(writer_, step_, tag_, value_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function write_scalar_summary_graph(writer_, step_, tag_, value_; name=nothing) local desc tf.with_op_name(name, "WriteScalarSummary") do desc = tf.NodeDescription("WriteScalarSummary") @@ -40211,7 +40212,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ordered_map_unstage_no_key_graph(indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ordered_map_unstage_no_key_graph(indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "OrderedMapUnstageNoKey") do desc = tf.NodeDescription("OrderedMapUnstageNoKey") @@ -40282,7 +40283,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_apply_centered_rms_prop_graph(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_apply_centered_rms_prop_graph(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "SparseApplyCenteredRMSProp") do desc = tf.NodeDescription("SparseApplyCenteredRMSProp") @@ -40373,7 +40374,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_list_scatter_v2_graph(tensor_, indices_, element_shape_, num_elements_; name=nothing, element_dtype=nothing, shape_type=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_list_scatter_v2_graph(tensor_, indices_, element_shape_, num_elements_; name=nothing, element_dtype=nothing, shape_type=nothing) local desc tf.with_op_name(name, "TensorListScatterV2") do desc = tf.NodeDescription("TensorListScatterV2") @@ -40437,7 +40438,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function conv3d_backprop_input_v2_graph(input_sizes_, filter_, out_backprop_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function conv3d_backprop_input_v2_graph(input_sizes_, filter_, out_backprop_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) local desc tf.with_op_name(name, "Conv3DBackpropInputV2") do desc = tf.NodeDescription("Conv3DBackpropInputV2") @@ -40510,7 +40511,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function retrieve_tpu_embedding_proximal_adagrad_parameters_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function retrieve_tpu_embedding_proximal_adagrad_parameters_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "RetrieveTPUEmbeddingProximalAdagradParameters") do desc = tf.NodeDescription("RetrieveTPUEmbeddingProximalAdagradParameters") @@ -40571,7 +40572,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function random_shuffle_graph(value_; name=nothing, seed=nothing, seed2=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function random_shuffle_graph(value_; name=nothing, seed=nothing, seed2=nothing) local desc tf.with_op_name(name, "RandomShuffle") do desc = tf.NodeDescription("RandomShuffle") @@ -40621,7 +40622,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function uniform_candidate_sampler_graph(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function uniform_candidate_sampler_graph(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing) local desc tf.with_op_name(name, "UniformCandidateSampler") do desc = tf.NodeDescription("UniformCandidateSampler") @@ -40698,7 +40699,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_split_v2_graph(handle_, value_, lengths_, flow_in_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_split_v2_graph(handle_, value_, lengths_, flow_in_; name=nothing) local desc tf.with_op_name(name, "TensorArraySplitV2") do desc = tf.NodeDescription("TensorArraySplitV2") @@ -40748,7 +40749,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function mutable_dense_hash_table_v2_graph(empty_key_, deleted_key_; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing, initial_num_buckets=nothing, max_load_factor=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function mutable_dense_hash_table_v2_graph(empty_key_, deleted_key_; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing, initial_num_buckets=nothing, max_load_factor=nothing) local desc tf.with_op_name(name, "MutableDenseHashTableV2") do desc = tf.NodeDescription("MutableDenseHashTableV2") @@ -40839,7 +40840,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function draw_bounding_boxes_graph(images_, boxes_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function draw_bounding_boxes_graph(images_, boxes_; name=nothing) local desc tf.with_op_name(name, "DrawBoundingBoxes") do desc = tf.NodeDescription("DrawBoundingBoxes") @@ -40881,7 +40882,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_apply_proximal_adagrad_graph(var_, accum_, lr_, l1_, l2_, grad_, indices_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_apply_proximal_adagrad_graph(var_, accum_, lr_, l1_, l2_, grad_, indices_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "SparseApplyProximalAdagrad") do desc = tf.NodeDescription("SparseApplyProximalAdagrad") @@ -40957,7 +40958,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function range_dataset_graph(start_, stop_, step_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function range_dataset_graph(start_, stop_, step_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "RangeDataset") do desc = tf.NodeDescription("RangeDataset") @@ -41013,7 +41014,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function reader_restore_state_v2_graph(reader_handle_, state_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function reader_restore_state_v2_graph(reader_handle_, state_; name=nothing) local desc tf.with_op_name(name, "ReaderRestoreStateV2") do desc = tf.NodeDescription("ReaderRestoreStateV2") @@ -41053,7 +41054,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function top_kv2_graph(input_, k_; name=nothing, sorted=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function top_kv2_graph(input_, k_; name=nothing, sorted=nothing) local desc tf.with_op_name(name, "TopKV2") do desc = tf.NodeDescription("TopKV2") @@ -41106,7 +41107,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function atanh_graph(x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function atanh_graph(x_; name=nothing) local desc tf.with_op_name(name, "Atanh") do desc = tf.NodeDescription("Atanh") @@ -41144,7 +41145,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function debug_gradient_identity_graph(input_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function debug_gradient_identity_graph(input_; name=nothing) local desc tf.with_op_name(name, "DebugGradientIdentity") do desc = tf.NodeDescription("DebugGradientIdentity") @@ -41182,7 +41183,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_add_grad_graph(backprop_val_grad_, a_indices_, b_indices_, sum_indices_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_add_grad_graph(backprop_val_grad_, a_indices_, b_indices_, sum_indices_; name=nothing) local desc tf.with_op_name(name, "SparseAddGrad") do desc = tf.NodeDescription("SparseAddGrad") @@ -41237,7 +41238,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_scatter_add_graph(resource_, indices_, updates_; name=nothing, dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_scatter_add_graph(resource_, indices_, updates_; name=nothing, dtype=nothing) local desc tf.with_op_name(name, "ResourceScatterAdd") do desc = tf.NodeDescription("ResourceScatterAdd") @@ -41292,7 +41293,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ceil_graph(x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ceil_graph(x_; name=nothing) local desc tf.with_op_name(name, "Ceil") do desc = tf.NodeDescription("Ceil") @@ -41330,7 +41331,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function save_graph(filename_, tensor_names_, data_; name=nothing, T=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function save_graph(filename_, tensor_names_, data_; name=nothing, T=nothing) local desc tf.with_op_name(name, "Save") do desc = tf.NodeDescription("Save") @@ -41380,7 +41381,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function retrieve_tpu_embedding_centered_rms_prop_parameters_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function retrieve_tpu_embedding_centered_rms_prop_parameters_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "RetrieveTPUEmbeddingCenteredRMSPropParameters") do desc = tf.NodeDescription("RetrieveTPUEmbeddingCenteredRMSPropParameters") @@ -41441,7 +41442,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantized_concat_graph(concat_dim_, values_, input_mins_, input_maxes_; name=nothing, N=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantized_concat_graph(concat_dim_, values_, input_mins_, input_maxes_; name=nothing, N=nothing) local desc tf.with_op_name(name, "QuantizedConcat") do desc = tf.NodeDescription("QuantizedConcat") @@ -41502,7 +41503,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function zeros_like_graph(x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function zeros_like_graph(x_; name=nothing) local desc tf.with_op_name(name, "ZerosLike") do desc = tf.NodeDescription("ZerosLike") @@ -41540,7 +41541,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fractional_avg_pool_graph(value_; name=nothing, pooling_ratio=nothing, pseudo_random=nothing, overlapping=nothing, deterministic=nothing, seed=nothing, seed2=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fractional_avg_pool_graph(value_; name=nothing, pooling_ratio=nothing, pseudo_random=nothing, overlapping=nothing, deterministic=nothing, seed=nothing, seed2=nothing) local desc tf.with_op_name(name, "FractionalAvgPool") do desc = tf.NodeDescription("FractionalAvgPool") @@ -41619,7 +41620,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function edit_distance_graph(hypothesis_indices_, hypothesis_values_, hypothesis_shape_, truth_indices_, truth_values_, truth_shape_; name=nothing, normalize=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function edit_distance_graph(hypothesis_indices_, hypothesis_values_, hypothesis_shape_, truth_indices_, truth_values_, truth_shape_; name=nothing, normalize=nothing) local desc tf.with_op_name(name, "EditDistance") do desc = tf.NodeDescription("EditDistance") @@ -41684,7 +41685,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function unique_v2_graph(x_, axis_; name=nothing, out_idx=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function unique_v2_graph(x_, axis_; name=nothing, out_idx=nothing) local desc tf.with_op_name(name, "UniqueV2") do desc = tf.NodeDescription("UniqueV2") @@ -41739,7 +41740,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantize_and_dequantize_v2_graph(input_, input_min_, input_max_; name=nothing, signed_input=nothing, num_bits=nothing, range_given=nothing, round_mode=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantize_and_dequantize_v2_graph(input_, input_min_, input_max_; name=nothing, signed_input=nothing, num_bits=nothing, range_given=nothing, round_mode=nothing) local desc tf.with_op_name(name, "QuantizeAndDequantizeV2") do desc = tf.NodeDescription("QuantizeAndDequantizeV2") @@ -41811,7 +41812,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantize_and_dequantize_graph(input_; name=nothing, signed_input=nothing, num_bits=nothing, range_given=nothing, input_min=nothing, input_max=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantize_and_dequantize_graph(input_; name=nothing, signed_input=nothing, num_bits=nothing, range_given=nothing, input_min=nothing, input_max=nothing) local desc tf.with_op_name(name, "QuantizeAndDequantize") do desc = tf.NodeDescription("QuantizeAndDequantize") @@ -41879,7 +41880,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_list_pop_back_graph(input_handle_, element_shape_; name=nothing, element_dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_list_pop_back_graph(input_handle_, element_shape_; name=nothing, element_dtype=nothing) local desc tf.with_op_name(name, "TensorListPopBack") do desc = tf.NodeDescription("TensorListPopBack") @@ -41930,7 +41931,7 @@ end Debug NaN Value Counter Op """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function debug_nan_count_graph(input_; name=nothing, device_name=nothing, tensor_name=nothing, debug_urls=nothing, gated_grpc=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function debug_nan_count_graph(input_; name=nothing, device_name=nothing, tensor_name=nothing, debug_urls=nothing, gated_grpc=nothing) local desc tf.with_op_name(name, "DebugNanCount") do desc = tf.NodeDescription("DebugNanCount") @@ -41992,7 +41993,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function apply_adagrad_da_graph(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, lr_, l1_, l2_, global_step_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function apply_adagrad_da_graph(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, lr_, l1_, l2_, global_step_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ApplyAdagradDA") do desc = tf.NodeDescription("ApplyAdagradDA") @@ -42070,7 +42071,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function depthwise_conv2d_native_graph(input_, filter_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function depthwise_conv2d_native_graph(input_, filter_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) local desc tf.with_op_name(name, "DepthwiseConv2dNative") do desc = tf.NodeDescription("DepthwiseConv2dNative") @@ -42137,7 +42138,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function serialize_iterator_graph(resource_handle_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function serialize_iterator_graph(resource_handle_; name=nothing) local desc tf.with_op_name(name, "SerializeIterator") do desc = tf.NodeDescription("SerializeIterator") @@ -42173,7 +42174,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function dataset_to_graph_graph(input_dataset_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function dataset_to_graph_graph(input_dataset_; name=nothing) local desc tf.with_op_name(name, "DatasetToGraph") do desc = tf.NodeDescription("DatasetToGraph") @@ -42209,7 +42210,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function top_k_graph(input_; name=nothing, k=nothing, sorted=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function top_k_graph(input_; name=nothing, k=nothing, sorted=nothing) local desc tf.with_op_name(name, "TopK") do desc = tf.NodeDescription("TopK") @@ -42264,7 +42265,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_apply_ftrl_v2_graph(var_, accum_, linear_, grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_apply_ftrl_v2_graph(var_, accum_, linear_, grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ResourceApplyFtrlV2") do desc = tf.NodeDescription("ResourceApplyFtrlV2") @@ -42345,7 +42346,7 @@ end Replacement node for NcclBroadcast. """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _nccl_broadcast_recv_graph(shape_; name=nothing, num_devices=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _nccl_broadcast_recv_graph(shape_; name=nothing, num_devices=nothing, shared_name=nothing) local desc tf.with_op_name(name, "_NcclBroadcastRecv") do desc = tf.NodeDescription("_NcclBroadcastRecv") @@ -42393,7 +42394,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function queue_is_closed_graph(handle_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function queue_is_closed_graph(handle_; name=nothing) local desc tf.with_op_name(name, "QueueIsClosed") do desc = tf.NodeDescription("QueueIsClosed") @@ -42429,7 +42430,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function shuffle_dataset_graph(input_dataset_, buffer_size_, seed_, seed2_; name=nothing, reshuffle_each_iteration=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function shuffle_dataset_graph(input_dataset_, buffer_size_, seed_, seed2_; name=nothing, reshuffle_each_iteration=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "ShuffleDataset") do desc = tf.NodeDescription("ShuffleDataset") @@ -42495,7 +42496,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function deserialize_sparse_graph(serialized_sparse_; name=nothing, dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function deserialize_sparse_graph(serialized_sparse_; name=nothing, dtype=nothing) local desc tf.with_op_name(name, "DeserializeSparse") do desc = tf.NodeDescription("DeserializeSparse") @@ -42544,7 +42545,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function priority_queue_v2_graph(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function priority_queue_v2_graph(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "PriorityQueueV2") do desc = tf.NodeDescription("PriorityQueueV2") @@ -42606,7 +42607,7 @@ end A graph node which represents an argument to a function. """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _device_arg_graph(; name=nothing, index=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _device_arg_graph(; name=nothing, index=nothing) local desc tf.with_op_name(name, "_DeviceArg") do desc = tf.NodeDescription("_DeviceArg") @@ -42644,7 +42645,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function truncated_normal_graph(shape_; name=nothing, seed=nothing, seed2=nothing, dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function truncated_normal_graph(shape_; name=nothing, seed=nothing, seed2=nothing, dtype=nothing) local desc tf.with_op_name(name, "TruncatedNormal") do desc = tf.NodeDescription("TruncatedNormal") @@ -42700,7 +42701,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_forest_tree_predict_graph(tree_handle_, dense_features_; name=nothing, logits_dimension=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_forest_tree_predict_graph(tree_handle_, dense_features_; name=nothing, logits_dimension=nothing) local desc tf.with_op_name(name, "TensorForestTreePredict") do desc = tf.NodeDescription("TensorForestTreePredict") @@ -42746,7 +42747,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function stack_v2_graph(max_size_; name=nothing, elem_type=nothing, stack_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function stack_v2_graph(max_size_; name=nothing, elem_type=nothing, stack_name=nothing) local desc tf.with_op_name(name, "StackV2") do desc = tf.NodeDescription("StackV2") @@ -42794,7 +42795,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function accumulator_num_accumulated_graph(handle_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function accumulator_num_accumulated_graph(handle_; name=nothing) local desc tf.with_op_name(name, "AccumulatorNumAccumulated") do desc = tf.NodeDescription("AccumulatorNumAccumulated") @@ -42830,7 +42831,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function reader_reset_v2_graph(reader_handle_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function reader_reset_v2_graph(reader_handle_; name=nothing) local desc tf.with_op_name(name, "ReaderResetV2") do desc = tf.NodeDescription("ReaderResetV2") @@ -42866,7 +42867,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function apply_add_sign_graph(var_, m_, lr_, alpha_, sign_decay_, beta_, grad_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function apply_add_sign_graph(var_, m_, lr_, alpha_, sign_decay_, beta_, grad_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ApplyAddSign") do desc = tf.NodeDescription("ApplyAddSign") @@ -42940,7 +42941,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function retrieve_tpu_embedding_rms_prop_parameters_grad_accum_debug_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function retrieve_tpu_embedding_rms_prop_parameters_grad_accum_debug_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "RetrieveTPUEmbeddingRMSPropParametersGradAccumDebug") do desc = tf.NodeDescription("RetrieveTPUEmbeddingRMSPropParametersGradAccumDebug") @@ -43001,7 +43002,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function rint_graph(x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function rint_graph(x_; name=nothing) local desc tf.with_op_name(name, "Rint") do desc = tf.NodeDescription("Rint") @@ -43039,7 +43040,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function retrieve_tpu_embedding_adadelta_parameters_grad_accum_debug_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function retrieve_tpu_embedding_adadelta_parameters_grad_accum_debug_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "RetrieveTPUEmbeddingAdadeltaParametersGradAccumDebug") do desc = tf.NodeDescription("RetrieveTPUEmbeddingAdadeltaParametersGradAccumDebug") @@ -43100,7 +43101,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function extract_glimpse_graph(input_, size_, offsets_; name=nothing, centered=nothing, normalized=nothing, uniform_noise=nothing, noise=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function extract_glimpse_graph(input_, size_, offsets_; name=nothing, centered=nothing, normalized=nothing, uniform_noise=nothing, noise=nothing) local desc tf.with_op_name(name, "ExtractGlimpse") do desc = tf.NodeDescription("ExtractGlimpse") @@ -43168,7 +43169,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function string_to_hash_bucket_strong_graph(input_; name=nothing, num_buckets=nothing, key=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function string_to_hash_bucket_strong_graph(input_; name=nothing, num_buckets=nothing, key=nothing) local desc tf.with_op_name(name, "StringToHashBucketStrong") do desc = tf.NodeDescription("StringToHashBucketStrong") @@ -43216,7 +43217,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function one_shot_iterator_graph(; name=nothing, dataset_factory=nothing, output_types=nothing, output_shapes=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function one_shot_iterator_graph(; name=nothing, dataset_factory=nothing, output_types=nothing, output_shapes=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "OneShotIterator") do desc = tf.NodeDescription("OneShotIterator") @@ -43278,7 +43279,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_sparse_apply_momentum_graph(var_, accum_, lr_, grad_, indices_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_sparse_apply_momentum_graph(var_, accum_, lr_, grad_, indices_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) local desc tf.with_op_name(name, "ResourceSparseApplyMomentum") do desc = tf.NodeDescription("ResourceSparseApplyMomentum") @@ -43353,7 +43354,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function save_slices_graph(filename_, tensor_names_, shapes_and_slices_, data_; name=nothing, T=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function save_slices_graph(filename_, tensor_names_, shapes_and_slices_, data_; name=nothing, T=nothing) local desc tf.with_op_name(name, "SaveSlices") do desc = tf.NodeDescription("SaveSlices") @@ -43407,7 +43408,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_dataset_cardinality_graph(input_dataset_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_dataset_cardinality_graph(input_dataset_; name=nothing) local desc tf.with_op_name(name, "ExperimentalDatasetCardinality") do desc = tf.NodeDescription("ExperimentalDatasetCardinality") @@ -43443,7 +43444,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function is_finite_graph(x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function is_finite_graph(x_; name=nothing) local desc tf.with_op_name(name, "IsFinite") do desc = tf.NodeDescription("IsFinite") @@ -43481,7 +43482,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_numa_map_and_batch_dataset_graph(input_dataset_, other_arguments_, batch_size_, num_parallel_calls_, drop_remainder_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, preserve_cardinality=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_numa_map_and_batch_dataset_graph(input_dataset_, other_arguments_, batch_size_, num_parallel_calls_, drop_remainder_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, preserve_cardinality=nothing) local desc tf.with_op_name(name, "ExperimentalNumaMapAndBatchDataset") do desc = tf.NodeDescription("ExperimentalNumaMapAndBatchDataset") @@ -43563,7 +43564,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function all_to_all_graph(input_, group_assignment_; name=nothing, concat_dimension=nothing, split_dimension=nothing, split_count=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function all_to_all_graph(input_, group_assignment_; name=nothing, concat_dimension=nothing, split_dimension=nothing, split_count=nothing) local desc tf.with_op_name(name, "AllToAll") do desc = tf.NodeDescription("AllToAll") @@ -43623,7 +43624,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function take_many_sparse_from_tensors_map_graph(sparse_handles_; name=nothing, dtype=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function take_many_sparse_from_tensors_map_graph(sparse_handles_; name=nothing, dtype=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "TakeManySparseFromTensorsMap") do desc = tf.NodeDescription("TakeManySparseFromTensorsMap") @@ -43682,7 +43683,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_matrix_diag_part_graph(input_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_matrix_diag_part_graph(input_; name=nothing) local desc tf.with_op_name(name, "BatchMatrixDiagPart") do desc = tf.NodeDescription("BatchMatrixDiagPart") @@ -43720,7 +43721,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fixed_length_record_dataset_graph(filenames_, header_bytes_, record_bytes_, footer_bytes_, buffer_size_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fixed_length_record_dataset_graph(filenames_, header_bytes_, record_bytes_, footer_bytes_, buffer_size_; name=nothing) local desc tf.with_op_name(name, "FixedLengthRecordDataset") do desc = tf.NodeDescription("FixedLengthRecordDataset") @@ -43772,7 +43773,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function stack_push_graph(handle_, elem_; name=nothing, swap_memory=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function stack_push_graph(handle_, elem_; name=nothing, swap_memory=nothing) local desc tf.with_op_name(name, "StackPush") do desc = tf.NodeDescription("StackPush") @@ -43820,7 +43821,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function placeholder_v2_graph(; name=nothing, dtype=nothing, shape=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function placeholder_v2_graph(; name=nothing, dtype=nothing, shape=nothing) local desc tf.with_op_name(name, "PlaceholderV2") do desc = tf.NodeDescription("PlaceholderV2") @@ -43864,7 +43865,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function multi_device_iterator_init_graph(dataset_, multi_device_iterator_, max_buffer_size_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function multi_device_iterator_init_graph(dataset_, multi_device_iterator_, max_buffer_size_; name=nothing) local desc tf.with_op_name(name, "MultiDeviceIteratorInit") do desc = tf.NodeDescription("MultiDeviceIteratorInit") @@ -43908,7 +43909,7 @@ end Re-configures the GCS block cache with the new configuration values. """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function gcs_configure_block_cache_graph(max_cache_size_, block_size_, max_staleness_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function gcs_configure_block_cache_graph(max_cache_size_, block_size_, max_staleness_; name=nothing) local desc tf.with_op_name(name, "GcsConfigureBlockCache") do desc = tf.NodeDescription("GcsConfigureBlockCache") @@ -43952,7 +43953,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function queue_dequeue_v2_graph(handle_; name=nothing, component_types=nothing, timeout_ms=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function queue_dequeue_v2_graph(handle_; name=nothing, component_types=nothing, timeout_ms=nothing) local desc tf.with_op_name(name, "QueueDequeueV2") do desc = tf.NodeDescription("QueueDequeueV2") @@ -44000,7 +44001,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function retrieve_tpu_embedding_rms_prop_parameters_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function retrieve_tpu_embedding_rms_prop_parameters_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "RetrieveTPUEmbeddingRMSPropParameters") do desc = tf.NodeDescription("RetrieveTPUEmbeddingRMSPropParameters") @@ -44061,7 +44062,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function transpose_graph(x_, perm_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function transpose_graph(x_, perm_; name=nothing) local desc tf.with_op_name(name, "Transpose") do desc = tf.NodeDescription("Transpose") @@ -44105,7 +44106,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ifft_graph(input_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ifft_graph(input_; name=nothing) local desc tf.with_op_name(name, "IFFT") do desc = tf.NodeDescription("IFFT") @@ -44143,7 +44144,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_segment_sum_with_num_segments_graph(data_, indices_, segment_ids_, num_segments_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_segment_sum_with_num_segments_graph(data_, indices_, segment_ids_, num_segments_; name=nothing) local desc tf.with_op_name(name, "SparseSegmentSumWithNumSegments") do desc = tf.NodeDescription("SparseSegmentSumWithNumSegments") @@ -44198,7 +44199,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function queue_is_closed_v2_graph(handle_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function queue_is_closed_v2_graph(handle_; name=nothing) local desc tf.with_op_name(name, "QueueIsClosedV2") do desc = tf.NodeDescription("QueueIsClosedV2") @@ -44234,7 +44235,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function parameterized_truncated_normal_graph(shape_, means_, stdevs_, minvals_, maxvals_; name=nothing, seed=nothing, seed2=nothing, dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function parameterized_truncated_normal_graph(shape_, means_, stdevs_, minvals_, maxvals_; name=nothing, seed=nothing, seed2=nothing, dtype=nothing) local desc tf.with_op_name(name, "ParameterizedTruncatedNormal") do desc = tf.NodeDescription("ParameterizedTruncatedNormal") @@ -44311,7 +44312,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function diag_part_graph(input_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function diag_part_graph(input_; name=nothing) local desc tf.with_op_name(name, "DiagPart") do desc = tf.NodeDescription("DiagPart") @@ -44349,7 +44350,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function kmeans_plus_plus_initialization_graph(points_, num_to_sample_, seed_, num_retries_per_sample_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function kmeans_plus_plus_initialization_graph(points_, num_to_sample_, seed_, num_retries_per_sample_; name=nothing) local desc tf.with_op_name(name, "KmeansPlusPlusInitialization") do desc = tf.NodeDescription("KmeansPlusPlusInitialization") @@ -44397,7 +44398,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function regex_replace_graph(input_, pattern_, rewrite_; name=nothing, replace_global=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function regex_replace_graph(input_, pattern_, rewrite_; name=nothing, replace_global=nothing) local desc tf.with_op_name(name, "RegexReplace") do desc = tf.NodeDescription("RegexReplace") @@ -44447,7 +44448,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_tensor_dense_mat_mul_graph(a_indices_, a_values_, a_shape_, b_; name=nothing, adjoint_a=nothing, adjoint_b=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_tensor_dense_mat_mul_graph(a_indices_, a_values_, a_shape_, b_; name=nothing, adjoint_a=nothing, adjoint_b=nothing) local desc tf.with_op_name(name, "SparseTensorDenseMatMul") do desc = tf.NodeDescription("SparseTensorDenseMatMul") @@ -44513,7 +44514,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function map_defun_graph(arguments_, captured_inputs_; name=nothing, Targuments=nothing, Tcaptured=nothing, output_types=nothing, output_shapes=nothing, f=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function map_defun_graph(arguments_, captured_inputs_; name=nothing, Targuments=nothing, Tcaptured=nothing, output_types=nothing, output_shapes=nothing, f=nothing) local desc tf.with_op_name(name, "MapDefun") do desc = tf.NodeDescription("MapDefun") @@ -44583,7 +44584,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function thread_unsafe_unigram_candidate_sampler_graph(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function thread_unsafe_unigram_candidate_sampler_graph(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing) local desc tf.with_op_name(name, "ThreadUnsafeUnigramCandidateSampler") do desc = tf.NodeDescription("ThreadUnsafeUnigramCandidateSampler") @@ -44660,7 +44661,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function retrieve_tpu_embedding_adam_parameters_grad_accum_debug_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function retrieve_tpu_embedding_adam_parameters_grad_accum_debug_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "RetrieveTPUEmbeddingADAMParametersGradAccumDebug") do desc = tf.NodeDescription("RetrieveTPUEmbeddingADAMParametersGradAccumDebug") @@ -44721,7 +44722,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function parallel_concat_graph(values_; name=nothing, N=nothing, shape=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function parallel_concat_graph(values_; name=nothing, N=nothing, shape=nothing) local desc tf.with_op_name(name, "ParallelConcat") do desc = tf.NodeDescription("ParallelConcat") @@ -44771,7 +44772,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function lookup_table_find_v2_graph(table_handle_, keys_, default_value_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function lookup_table_find_v2_graph(table_handle_, keys_, default_value_; name=nothing) local desc tf.with_op_name(name, "LookupTableFindV2") do desc = tf.NodeDescription("LookupTableFindV2") @@ -44819,7 +44820,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_forest_tree_deserialize_graph(tree_handle_, tree_config_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_forest_tree_deserialize_graph(tree_handle_, tree_config_; name=nothing) local desc tf.with_op_name(name, "TensorForestTreeDeserialize") do desc = tf.NodeDescription("TensorForestTreeDeserialize") @@ -44859,7 +44860,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function retrieve_tpu_embedding_momentum_parameters_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function retrieve_tpu_embedding_momentum_parameters_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "RetrieveTPUEmbeddingMomentumParameters") do desc = tf.NodeDescription("RetrieveTPUEmbeddingMomentumParameters") @@ -44920,7 +44921,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fake_quant_with_min_max_args_graph(inputs_; name=nothing, min=nothing, max=nothing, num_bits=nothing, narrow_range=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fake_quant_with_min_max_args_graph(inputs_; name=nothing, min=nothing, max=nothing, num_bits=nothing, narrow_range=nothing) local desc tf.with_op_name(name, "FakeQuantWithMinMaxArgs") do desc = tf.NodeDescription("FakeQuantWithMinMaxArgs") @@ -44980,7 +44981,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_apply_gradient_descent_graph(var_, alpha_, delta_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_apply_gradient_descent_graph(var_, alpha_, delta_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ResourceApplyGradientDescent") do desc = tf.NodeDescription("ResourceApplyGradientDescent") @@ -45033,7 +45034,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_sliding_window_dataset_graph(input_dataset_, window_size_, window_shift_, window_stride_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_sliding_window_dataset_graph(input_dataset_, window_size_, window_shift_, window_stride_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "ExperimentalSlidingWindowDataset") do desc = tf.NodeDescription("ExperimentalSlidingWindowDataset") @@ -45093,7 +45094,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function decode_raw_graph(bytes_; name=nothing, out_type=nothing, little_endian=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function decode_raw_graph(bytes_; name=nothing, out_type=nothing, little_endian=nothing) local desc tf.with_op_name(name, "DecodeRaw") do desc = tf.NodeDescription("DecodeRaw") @@ -45141,7 +45142,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fake_quant_with_min_max_vars_per_channel_gradient_graph(gradients_, inputs_, min_, max_; name=nothing, num_bits=nothing, narrow_range=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fake_quant_with_min_max_vars_per_channel_gradient_graph(gradients_, inputs_, min_, max_; name=nothing, num_bits=nothing, narrow_range=nothing) local desc tf.with_op_name(name, "FakeQuantWithMinMaxVarsPerChannelGradient") do desc = tf.NodeDescription("FakeQuantWithMinMaxVarsPerChannelGradient") @@ -45206,7 +45207,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function unique_with_counts_v2_graph(x_, axis_; name=nothing, out_idx=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function unique_with_counts_v2_graph(x_, axis_; name=nothing, out_idx=nothing) local desc tf.with_op_name(name, "UniqueWithCountsV2") do desc = tf.NodeDescription("UniqueWithCountsV2") @@ -45261,7 +45262,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_sleep_dataset_graph(input_dataset_, sleep_microseconds_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_sleep_dataset_graph(input_dataset_, sleep_microseconds_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "ExperimentalSleepDataset") do desc = tf.NodeDescription("ExperimentalSleepDataset") @@ -45313,7 +45314,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tpu_replicated_output_graph(input_; name=nothing, num_replicas=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tpu_replicated_output_graph(input_; name=nothing, num_replicas=nothing) local desc tf.with_op_name(name, "TPUReplicatedOutput") do desc = tf.NodeDescription("TPUReplicatedOutput") @@ -45362,7 +45363,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function lower_bound_graph(sorted_inputs_, values_; name=nothing, out_type=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function lower_bound_graph(sorted_inputs_, values_; name=nothing, out_type=nothing) local desc tf.with_op_name(name, "LowerBound") do desc = tf.NodeDescription("LowerBound") @@ -45411,7 +45412,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tan_graph(x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tan_graph(x_; name=nothing) local desc tf.with_op_name(name, "Tan") do desc = tf.NodeDescription("Tan") @@ -45449,7 +45450,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function enter_graph(data_; name=nothing, frame_name=nothing, is_constant=nothing, parallel_iterations=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function enter_graph(data_; name=nothing, frame_name=nothing, is_constant=nothing, parallel_iterations=nothing) local desc tf.with_op_name(name, "Enter") do desc = tf.NodeDescription("Enter") @@ -45505,7 +45506,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function infeed_enqueue_tuple_graph(inputs_; name=nothing, dtypes=nothing, shapes=nothing, layouts=nothing, device_ordinal=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function infeed_enqueue_tuple_graph(inputs_; name=nothing, dtypes=nothing, shapes=nothing, layouts=nothing, device_ordinal=nothing) local desc tf.with_op_name(name, "InfeedEnqueueTuple") do desc = tf.NodeDescription("InfeedEnqueueTuple") @@ -45565,7 +45566,7 @@ end An op that informs a host of the global ids of all the of TPUs in the """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _set_global_tpu_array_graph(topology_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _set_global_tpu_array_graph(topology_; name=nothing) local desc tf.with_op_name(name, "_SetGlobalTPUArray") do desc = tf.NodeDescription("_SetGlobalTPUArray") @@ -45601,7 +45602,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function square_graph(x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function square_graph(x_; name=nothing) local desc tf.with_op_name(name, "Square") do desc = tf.NodeDescription("Square") @@ -45639,7 +45640,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function debug_gradient_ref_identity_graph(input_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function debug_gradient_ref_identity_graph(input_; name=nothing) local desc tf.with_op_name(name, "DebugGradientRefIdentity") do desc = tf.NodeDescription("DebugGradientRefIdentity") @@ -45677,7 +45678,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function apply_adadelta_graph(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function apply_adadelta_graph(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ApplyAdadelta") do desc = tf.NodeDescription("ApplyAdadelta") @@ -45751,7 +45752,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_group_by_window_dataset_graph(input_dataset_, key_func_other_arguments_, reduce_func_other_arguments_, window_size_func_other_arguments_; name=nothing, key_func=nothing, reduce_func=nothing, window_size_func=nothing, Tkey_func_other_arguments=nothing, Treduce_func_other_arguments=nothing, Twindow_size_func_other_arguments=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_group_by_window_dataset_graph(input_dataset_, key_func_other_arguments_, reduce_func_other_arguments_, window_size_func_other_arguments_; name=nothing, key_func=nothing, reduce_func=nothing, window_size_func=nothing, Tkey_func_other_arguments=nothing, Treduce_func_other_arguments=nothing, Twindow_size_func_other_arguments=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "ExperimentalGroupByWindowDataset") do desc = tf.NodeDescription("ExperimentalGroupByWindowDataset") @@ -45847,7 +45848,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function audio_summary_graph(tag_, tensor_; name=nothing, sample_rate=nothing, max_outputs=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function audio_summary_graph(tag_, tensor_; name=nothing, sample_rate=nothing, max_outputs=nothing) local desc tf.with_op_name(name, "AudioSummary") do desc = tf.NodeDescription("AudioSummary") @@ -45899,7 +45900,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function squared_difference_graph(x_, y_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function squared_difference_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "SquaredDifference") do desc = tf.NodeDescription("SquaredDifference") @@ -45942,7 +45943,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_take_while_dataset_graph(input_dataset_, other_arguments_; name=nothing, predicate=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_take_while_dataset_graph(input_dataset_, other_arguments_; name=nothing, predicate=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "ExperimentalTakeWhileDataset") do desc = tf.NodeDescription("ExperimentalTakeWhileDataset") @@ -46006,7 +46007,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function scatter_nd_update_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function scatter_nd_update_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ScatterNdUpdate") do desc = tf.NodeDescription("ScatterNdUpdate") @@ -46062,7 +46063,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function dynamic_stitch_graph(indices_, data_; name=nothing, N=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function dynamic_stitch_graph(indices_, data_; name=nothing, N=nothing) local desc tf.with_op_name(name, "DynamicStitch") do desc = tf.NodeDescription("DynamicStitch") @@ -46110,7 +46111,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ones_like_graph(x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ones_like_graph(x_; name=nothing) local desc tf.with_op_name(name, "OnesLike") do desc = tf.NodeDescription("OnesLike") @@ -46148,7 +46149,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fractional_max_pool_grad_graph(orig_input_, orig_output_, out_backprop_, row_pooling_sequence_, col_pooling_sequence_; name=nothing, overlapping=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fractional_max_pool_grad_graph(orig_input_, orig_output_, out_backprop_, row_pooling_sequence_, col_pooling_sequence_; name=nothing, overlapping=nothing) local desc tf.with_op_name(name, "FractionalMaxPoolGrad") do desc = tf.NodeDescription("FractionalMaxPoolGrad") @@ -46210,7 +46211,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function remote_call_graph(target_, args_; name=nothing, Tin=nothing, Tout=nothing, f=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function remote_call_graph(target_, args_; name=nothing, Tin=nothing, Tout=nothing, f=nothing) local desc tf.with_op_name(name, "RemoteCall") do desc = tf.NodeDescription("RemoteCall") @@ -46268,7 +46269,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function gather_graph(params_, indices_; name=nothing, validate_indices=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function gather_graph(params_, indices_; name=nothing, validate_indices=nothing) local desc tf.with_op_name(name, "Gather") do desc = tf.NodeDescription("Gather") @@ -46319,7 +46320,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantized_mat_mul_graph(a_, b_, min_a_, max_a_, min_b_, max_b_; name=nothing, transpose_a=nothing, transpose_b=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantized_mat_mul_graph(a_, b_, min_a_, max_a_, min_b_, max_b_; name=nothing, transpose_a=nothing, transpose_b=nothing) local desc tf.with_op_name(name, "QuantizedMatMul") do desc = tf.NodeDescription("QuantizedMatMul") @@ -46396,7 +46397,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function unicode_decode_with_offsets_graph(input_; name=nothing, input_encoding=nothing, errors=nothing, replacement_char=nothing, replace_control_characters=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function unicode_decode_with_offsets_graph(input_; name=nothing, input_encoding=nothing, errors=nothing, replacement_char=nothing, replace_control_characters=nothing) local desc tf.with_op_name(name, "UnicodeDecodeWithOffsets") do desc = tf.NodeDescription("UnicodeDecodeWithOffsets") @@ -46461,7 +46462,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function enqueue_tpu_embedding_sparse_tensor_batch_graph(sample_indices_, embedding_indices_, aggregation_weights_, mode_override_; name=nothing, N=nothing, device_ordinal=nothing, combiners=nothing, table_ids=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function enqueue_tpu_embedding_sparse_tensor_batch_graph(sample_indices_, embedding_indices_, aggregation_weights_, mode_override_; name=nothing, N=nothing, device_ordinal=nothing, combiners=nothing, table_ids=nothing) local desc tf.with_op_name(name, "EnqueueTPUEmbeddingSparseTensorBatch") do desc = tf.NodeDescription("EnqueueTPUEmbeddingSparseTensorBatch") @@ -46533,7 +46534,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function accumulator_apply_gradient_graph(handle_, local_step_, gradient_; name=nothing, dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function accumulator_apply_gradient_graph(handle_, local_step_, gradient_; name=nothing, dtype=nothing) local desc tf.with_op_name(name, "AccumulatorApplyGradient") do desc = tf.NodeDescription("AccumulatorApplyGradient") @@ -46585,7 +46586,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function write_summary_graph(writer_, step_, tensor_, tag_, summary_metadata_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function write_summary_graph(writer_, step_, tensor_, tag_, summary_metadata_; name=nothing) local desc tf.with_op_name(name, "WriteSummary") do desc = tf.NodeDescription("WriteSummary") @@ -46639,7 +46640,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantized_conv2d_graph(input_, filter_, min_input_, max_input_, min_filter_, max_filter_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantized_conv2d_graph(input_, filter_, min_input_, max_input_, min_filter_, max_filter_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) local desc tf.with_op_name(name, "QuantizedConv2D") do desc = tf.NodeDescription("QuantizedConv2D") @@ -46728,7 +46729,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_apply_momentum_graph(var_, accum_, lr_, grad_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_apply_momentum_graph(var_, accum_, lr_, grad_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) local desc tf.with_op_name(name, "ResourceApplyMomentum") do desc = tf.NodeDescription("ResourceApplyMomentum") @@ -46796,7 +46797,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function log1p_graph(x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function log1p_graph(x_; name=nothing) local desc tf.with_op_name(name, "Log1p") do desc = tf.NodeDescription("Log1p") @@ -46834,7 +46835,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ordered_map_clear_graph(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ordered_map_clear_graph(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "OrderedMapClear") do desc = tf.NodeDescription("OrderedMapClear") @@ -46896,7 +46897,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_scatter_update_graph(resource_, indices_, updates_; name=nothing, dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_scatter_update_graph(resource_, indices_, updates_; name=nothing, dtype=nothing) local desc tf.with_op_name(name, "ResourceScatterUpdate") do desc = tf.NodeDescription("ResourceScatterUpdate") @@ -46951,7 +46952,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function barrier_take_many_graph(handle_, num_elements_; name=nothing, component_types=nothing, allow_small_batch=nothing, wait_for_incomplete=nothing, timeout_ms=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function barrier_take_many_graph(handle_, num_elements_; name=nothing, component_types=nothing, allow_small_batch=nothing, wait_for_incomplete=nothing, timeout_ms=nothing) local desc tf.with_op_name(name, "BarrierTakeMany") do desc = tf.NodeDescription("BarrierTakeMany") @@ -47020,7 +47021,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_apply_keras_momentum_graph(var_, accum_, lr_, grad_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_apply_keras_momentum_graph(var_, accum_, lr_, grad_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) local desc tf.with_op_name(name, "ResourceApplyKerasMomentum") do desc = tf.NodeDescription("ResourceApplyKerasMomentum") @@ -47088,7 +47089,7 @@ end Generates serialized partition messages suitable for batch reads. """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function generate_big_query_reader_partitions_graph(; name=nothing, project_id=nothing, dataset_id=nothing, table_id=nothing, columns=nothing, timestamp_millis=nothing, num_partitions=nothing, test_end_point=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function generate_big_query_reader_partitions_graph(; name=nothing, project_id=nothing, dataset_id=nothing, table_id=nothing, columns=nothing, timestamp_millis=nothing, num_partitions=nothing, test_end_point=nothing) local desc tf.with_op_name(name, "GenerateBigQueryReaderPartitions") do desc = tf.NodeDescription("GenerateBigQueryReaderPartitions") @@ -47162,7 +47163,7 @@ end A placeholder op for multiple values that will be sent to TensorFlow from a """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _xla_recv_at_host_graph(dynamic_key_; name=nothing, Toutputs=nothing, key=nothing, device_ordinal=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _xla_recv_at_host_graph(dynamic_key_; name=nothing, Toutputs=nothing, key=nothing, device_ordinal=nothing) local desc tf.with_op_name(name, "_XlaRecvAtHost") do desc = tf.NodeDescription("_XlaRecvAtHost") @@ -47216,7 +47217,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantized_avg_pool_graph(input_, min_input_, max_input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantized_avg_pool_graph(input_, min_input_, max_input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing) local desc tf.with_op_name(name, "QuantizedAvgPool") do desc = tf.NodeDescription("QuantizedAvgPool") @@ -47285,7 +47286,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_apply_adam_with_amsgrad_graph(var_, m_, v_, vhat_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_apply_adam_with_amsgrad_graph(var_, m_, v_, vhat_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ResourceApplyAdamWithAmsgrad") do desc = tf.NodeDescription("ResourceApplyAdamWithAmsgrad") @@ -47375,7 +47376,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_list_resize_graph(input_handle_, size_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_list_resize_graph(input_handle_, size_; name=nothing) local desc tf.with_op_name(name, "TensorListResize") do desc = tf.NodeDescription("TensorListResize") @@ -47415,7 +47416,7 @@ end Receives the named tensor from send_device on recv_device. """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _host_recv_graph(; name=nothing, tensor_type=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _host_recv_graph(; name=nothing, tensor_type=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing) local desc tf.with_op_name(name, "_HostRecv") do desc = tf.NodeDescription("_HostRecv") @@ -47483,7 +47484,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function boosted_trees_center_bias_graph(tree_ensemble_handle_, mean_gradients_, mean_hessians_, l1_, l2_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function boosted_trees_center_bias_graph(tree_ensemble_handle_, mean_gradients_, mean_hessians_, l1_, l2_; name=nothing) local desc tf.with_op_name(name, "BoostedTreesCenterBias") do desc = tf.NodeDescription("BoostedTreesCenterBias") @@ -47535,7 +47536,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function lookup_table_size_v2_graph(table_handle_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function lookup_table_size_v2_graph(table_handle_; name=nothing) local desc tf.with_op_name(name, "LookupTableSizeV2") do desc = tf.NodeDescription("LookupTableSizeV2") @@ -47571,7 +47572,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function irfft_graph(input_, fft_length_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function irfft_graph(input_, fft_length_; name=nothing) local desc tf.with_op_name(name, "IRFFT") do desc = tf.NodeDescription("IRFFT") @@ -47611,7 +47612,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function inplace_add_graph(x_, i_, v_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function inplace_add_graph(x_, i_, v_; name=nothing) local desc tf.with_op_name(name, "InplaceAdd") do desc = tf.NodeDescription("InplaceAdd") @@ -47658,7 +47659,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function bias_add_graph(value_, bias_; name=nothing, data_format=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function bias_add_graph(value_, bias_; name=nothing, data_format=nothing) local desc tf.with_op_name(name, "BiasAdd") do desc = tf.NodeDescription("BiasAdd") @@ -47707,7 +47708,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function load_tpu_embedding_adam_parameters_grad_accum_debug_graph(parameters_, momenta_, velocities_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function load_tpu_embedding_adam_parameters_grad_accum_debug_graph(parameters_, momenta_, velocities_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "LoadTPUEmbeddingADAMParametersGradAccumDebug") do desc = tf.NodeDescription("LoadTPUEmbeddingADAMParametersGradAccumDebug") @@ -47779,7 +47780,7 @@ end An op that disconnects the TPUs on a host from a running distributed """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _disconnect_host_from_distributed_tpu_system_graph(; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _disconnect_host_from_distributed_tpu_system_graph(; name=nothing) local desc tf.with_op_name(name, "_DisconnectHostFromDistributedTPUSystem") do desc @@ -47812,7 +47813,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ragged_range_graph(starts_, limits_, deltas_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ragged_range_graph(starts_, limits_, deltas_; name=nothing) local desc tf.with_op_name(name, "RaggedRange") do desc = tf.NodeDescription("RaggedRange") @@ -47865,7 +47866,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function window_dataset_graph(input_dataset_, size_, shift_, stride_, drop_remainder_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function window_dataset_graph(input_dataset_, size_, shift_, stride_, drop_remainder_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "WindowDataset") do desc = tf.NodeDescription("WindowDataset") @@ -47929,7 +47930,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function diag_graph(diagonal_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function diag_graph(diagonal_; name=nothing) local desc tf.with_op_name(name, "Diag") do desc = tf.NodeDescription("Diag") @@ -47967,7 +47968,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function infeed_dequeue_graph(; name=nothing, dtype=nothing, shape=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function infeed_dequeue_graph(; name=nothing, dtype=nothing, shape=nothing) local desc tf.with_op_name(name, "InfeedDequeue") do desc = tf.NodeDescription("InfeedDequeue") @@ -48011,7 +48012,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_latency_stats_dataset_graph(input_dataset_, tag_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_latency_stats_dataset_graph(input_dataset_, tag_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "ExperimentalLatencyStatsDataset") do desc = tf.NodeDescription("ExperimentalLatencyStatsDataset") @@ -48063,7 +48064,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function add_sparse_to_tensors_map_graph(sparse_indices_, sparse_values_, sparse_shape_; name=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function add_sparse_to_tensors_map_graph(sparse_indices_, sparse_values_, sparse_shape_; name=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "AddSparseToTensorsMap") do desc = tf.NodeDescription("AddSparseToTensorsMap") @@ -48121,7 +48122,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ragged_gather_graph(params_nested_splits_, params_dense_values_, indices_; name=nothing, PARAMS_RAGGED_RANK=nothing, OUTPUT_RAGGED_RANK=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ragged_gather_graph(params_nested_splits_, params_dense_values_, indices_; name=nothing, PARAMS_RAGGED_RANK=nothing, OUTPUT_RAGGED_RANK=nothing) local desc tf.with_op_name(name, "RaggedGather") do desc = tf.NodeDescription("RaggedGather") @@ -48187,7 +48188,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function rgb_to_hsv_graph(images_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function rgb_to_hsv_graph(images_; name=nothing) local desc tf.with_op_name(name, "RGBToHSV") do desc = tf.NodeDescription("RGBToHSV") @@ -48225,7 +48226,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function multi_device_iterator_to_string_handle_graph(multi_device_iterator_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function multi_device_iterator_to_string_handle_graph(multi_device_iterator_; name=nothing) local desc tf.with_op_name(name, "MultiDeviceIteratorToStringHandle") do desc = tf.NodeDescription("MultiDeviceIteratorToStringHandle") @@ -48261,7 +48262,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function for__graph(start_, limit_, delta_, input_; name=nothing, T=nothing, body=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function for__graph(start_, limit_, delta_, input_; name=nothing, T=nothing, body=nothing) local desc tf.with_op_name(name, "For") do desc = tf.NodeDescription("For") @@ -48321,7 +48322,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_reduce_max_sparse_graph(input_indices_, input_values_, input_shape_, reduction_axes_; name=nothing, keep_dims=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_reduce_max_sparse_graph(input_indices_, input_values_, input_shape_, reduction_axes_; name=nothing, keep_dims=nothing) local desc tf.with_op_name(name, "SparseReduceMaxSparse") do desc = tf.NodeDescription("SparseReduceMaxSparse") @@ -48382,7 +48383,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function concat_offset_graph(concat_dim_, shape_; name=nothing, N=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function concat_offset_graph(concat_dim_, shape_; name=nothing, N=nothing) local desc tf.with_op_name(name, "ConcatOffset") do desc = tf.NodeDescription("ConcatOffset") @@ -48433,7 +48434,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function stage_graph(values_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function stage_graph(values_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "Stage") do desc = tf.NodeDescription("Stage") @@ -48499,7 +48500,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function switch_graph(data_, pred_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function switch_graph(data_, pred_; name=nothing) local desc tf.with_op_name(name, "Switch") do desc = tf.NodeDescription("Switch") @@ -48546,7 +48547,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function queue_dequeue_many_v2_graph(handle_, n_; name=nothing, component_types=nothing, timeout_ms=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function queue_dequeue_many_v2_graph(handle_, n_; name=nothing, component_types=nothing, timeout_ms=nothing) local desc tf.with_op_name(name, "QueueDequeueManyV2") do desc = tf.NodeDescription("QueueDequeueManyV2") @@ -48598,7 +48599,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function segment_prod_graph(data_, segment_ids_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function segment_prod_graph(data_, segment_ids_; name=nothing) local desc tf.with_op_name(name, "SegmentProd") do desc = tf.NodeDescription("SegmentProd") @@ -48643,7 +48644,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function approximate_equal_graph(x_, y_; name=nothing, tolerance=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function approximate_equal_graph(x_, y_; name=nothing, tolerance=nothing) local desc tf.with_op_name(name, "ApproximateEqual") do desc = tf.NodeDescription("ApproximateEqual") @@ -48692,7 +48693,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function conv2d_graph(input_, filter_; name=nothing, strides=nothing, use_cudnn_on_gpu=nothing, padding=nothing, explicit_paddings=nothing, data_format=nothing, dilations=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function conv2d_graph(input_, filter_; name=nothing, strides=nothing, use_cudnn_on_gpu=nothing, padding=nothing, explicit_paddings=nothing, data_format=nothing, dilations=nothing) local desc tf.with_op_name(name, "Conv2D") do desc = tf.NodeDescription("Conv2D") @@ -48771,7 +48772,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function cross_replica_sum_graph(input_, group_assignment_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function cross_replica_sum_graph(input_, group_assignment_; name=nothing) local desc tf.with_op_name(name, "CrossReplicaSum") do desc = tf.NodeDescription("CrossReplicaSum") @@ -48813,7 +48814,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_mat_mul_graph(a_, b_; name=nothing, transpose_a=nothing, transpose_b=nothing, a_is_sparse=nothing, b_is_sparse=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_mat_mul_graph(a_, b_; name=nothing, transpose_a=nothing, transpose_b=nothing, a_is_sparse=nothing, b_is_sparse=nothing) local desc tf.with_op_name(name, "SparseMatMul") do desc = tf.NodeDescription("SparseMatMul") @@ -48881,7 +48882,7 @@ end Acts roughly like a SplitV Op that splits one tensor into multiple tensors """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _scoped_allocator_split_graph(concat_, split_; name=nothing, sa_name=nothing, id=nothing, N=nothing, shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _scoped_allocator_split_graph(concat_, split_; name=nothing, sa_name=nothing, id=nothing, N=nothing, shapes=nothing) local desc tf.with_op_name(name, "_ScopedAllocatorSplit") do desc = tf.NodeDescription("_ScopedAllocatorSplit") @@ -48953,7 +48954,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function igammac_graph(a_, x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function igammac_graph(a_, x_; name=nothing) local desc tf.with_op_name(name, "Igammac") do desc = tf.NodeDescription("Igammac") @@ -48996,7 +48997,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_mat_mul_graph(x_, y_; name=nothing, adj_x=nothing, adj_y=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_mat_mul_graph(x_, y_; name=nothing, adj_x=nothing, adj_y=nothing) local desc tf.with_op_name(name, "BatchMatMul") do desc = tf.NodeDescription("BatchMatMul") @@ -49051,7 +49052,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function enqueue_tpu_embedding_sparse_batch_graph(sample_indices_, embedding_indices_, aggregation_weights_, mode_override_; name=nothing, N=nothing, device_ordinal=nothing, combiners=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function enqueue_tpu_embedding_sparse_batch_graph(sample_indices_, embedding_indices_, aggregation_weights_, mode_override_; name=nothing, N=nothing, device_ordinal=nothing, combiners=nothing) local desc tf.with_op_name(name, "EnqueueTPUEmbeddingSparseBatch") do desc = tf.NodeDescription("EnqueueTPUEmbeddingSparseBatch") @@ -49117,7 +49118,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function queue_close_v2_graph(handle_; name=nothing, cancel_pending_enqueues=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function queue_close_v2_graph(handle_; name=nothing, cancel_pending_enqueues=nothing) local desc tf.with_op_name(name, "QueueCloseV2") do desc = tf.NodeDescription("QueueCloseV2") @@ -49159,7 +49160,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_pack_graph(handle_, flow_in_; name=nothing, dtype=nothing, element_shape=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_pack_graph(handle_, flow_in_; name=nothing, dtype=nothing, element_shape=nothing) local desc tf.with_op_name(name, "TensorArrayPack") do desc = tf.NodeDescription("TensorArrayPack") @@ -49211,7 +49212,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function reader_restore_state_graph(reader_handle_, state_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function reader_restore_state_graph(reader_handle_, state_; name=nothing) local desc tf.with_op_name(name, "ReaderRestoreState") do desc = tf.NodeDescription("ReaderRestoreState") @@ -49251,7 +49252,7 @@ end *NOTE*: Do not invoke this operator directly in Python. Grappler is """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _fused_conv2d_graph(input_, filter_, args_; name=nothing, num_args=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing, use_cudnn_on_gpu=nothing, fused_ops=nothing, epsilon=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _fused_conv2d_graph(input_, filter_, args_; name=nothing, num_args=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing, use_cudnn_on_gpu=nothing, fused_ops=nothing, epsilon=nothing) local desc tf.with_op_name(name, "_FusedConv2D") do desc = tf.NodeDescription("_FusedConv2D") @@ -49347,7 +49348,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _read_variables_op_graph(resources_; name=nothing, N=nothing, dtypes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _read_variables_op_graph(resources_; name=nothing, N=nothing, dtypes=nothing) local desc tf.with_op_name(name, "_ReadVariablesOp") do desc = tf.NodeDescription("_ReadVariablesOp") @@ -49395,7 +49396,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function mutable_hash_table_of_tensors_graph(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function mutable_hash_table_of_tensors_graph(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing) local desc tf.with_op_name(name, "MutableHashTableOfTensors") do desc = tf.NodeDescription("MutableHashTableOfTensors") @@ -49463,7 +49464,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function read_file_graph(filename_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function read_file_graph(filename_; name=nothing) local desc tf.with_op_name(name, "ReadFile") do desc = tf.NodeDescription("ReadFile") @@ -49499,7 +49500,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function load_tpu_embedding_mdl_adagrad_light_parameters_graph(parameters_, accumulators_, weights_, benefits_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function load_tpu_embedding_mdl_adagrad_light_parameters_graph(parameters_, accumulators_, weights_, benefits_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "LoadTPUEmbeddingMDLAdagradLightParameters") do desc = tf.NodeDescription("LoadTPUEmbeddingMDLAdagradLightParameters") @@ -49571,7 +49572,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fractional_avg_pool_grad_graph(orig_input_tensor_shape_, out_backprop_, row_pooling_sequence_, col_pooling_sequence_; name=nothing, overlapping=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fractional_avg_pool_grad_graph(orig_input_tensor_shape_, out_backprop_, row_pooling_sequence_, col_pooling_sequence_; name=nothing, overlapping=nothing) local desc tf.with_op_name(name, "FractionalAvgPoolGrad") do desc = tf.NodeDescription("FractionalAvgPoolGrad") @@ -49627,7 +49628,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function load_tpu_embedding_adagrad_parameters_grad_accum_debug_graph(parameters_, accumulators_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function load_tpu_embedding_adagrad_parameters_grad_accum_debug_graph(parameters_, accumulators_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "LoadTPUEmbeddingAdagradParametersGradAccumDebug") do desc = tf.NodeDescription("LoadTPUEmbeddingAdagradParametersGradAccumDebug") @@ -49695,7 +49696,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function stateful_standard_normal_v2_graph(resource_, algorithm_, shape_; name=nothing, dtype=nothing, shape_dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function stateful_standard_normal_v2_graph(resource_, algorithm_, shape_; name=nothing, dtype=nothing, shape_dtype=nothing) local desc tf.with_op_name(name, "StatefulStandardNormalV2") do desc = tf.NodeDescription("StatefulStandardNormalV2") @@ -49753,7 +49754,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function bincount_graph(arr_, size_, weights_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function bincount_graph(arr_, size_, weights_; name=nothing) local desc tf.with_op_name(name, "Bincount") do desc = tf.NodeDescription("Bincount") @@ -49799,7 +49800,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function inv_graph(x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function inv_graph(x_; name=nothing) local desc tf.with_op_name(name, "Inv") do desc = tf.NodeDescription("Inv") @@ -49837,7 +49838,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function apply_proximal_adagrad_graph(var_, accum_, lr_, l1_, l2_, grad_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function apply_proximal_adagrad_graph(var_, accum_, lr_, l1_, l2_, grad_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ApplyProximalAdagrad") do desc = tf.NodeDescription("ApplyProximalAdagrad") @@ -49906,7 +49907,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function gather_v2_graph(params_, indices_, axis_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function gather_v2_graph(params_, indices_, axis_; name=nothing) local desc tf.with_op_name(name, "GatherV2") do desc = tf.NodeDescription("GatherV2") @@ -49957,7 +49958,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function write_file_graph(filename_, contents_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function write_file_graph(filename_, contents_; name=nothing) local desc tf.with_op_name(name, "WriteFile") do desc = tf.NodeDescription("WriteFile") @@ -49997,7 +49998,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function boosted_trees_get_ensemble_states_graph(tree_ensemble_handle_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function boosted_trees_get_ensemble_states_graph(tree_ensemble_handle_; name=nothing) local desc tf.with_op_name(name, "BoostedTreesGetEnsembleStates") do desc = tf.NodeDescription("BoostedTreesGetEnsembleStates") @@ -50038,7 +50039,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_gather_graph(resource_, indices_; name=nothing, validate_indices=nothing, dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_gather_graph(resource_, indices_; name=nothing, validate_indices=nothing, dtype=nothing) local desc tf.with_op_name(name, "ResourceGather") do desc = tf.NodeDescription("ResourceGather") @@ -50093,7 +50094,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_apply_proximal_gradient_descent_graph(var_, alpha_, l1_, l2_, delta_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_apply_proximal_gradient_descent_graph(var_, alpha_, l1_, l2_, delta_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ResourceApplyProximalGradientDescent") do desc = tf.NodeDescription("ResourceApplyProximalGradientDescent") @@ -50156,7 +50157,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function truncate_mod_graph(x_, y_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function truncate_mod_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "TruncateMod") do desc = tf.NodeDescription("TruncateMod") @@ -50199,7 +50200,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function log_matrix_determinant_graph(input_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function log_matrix_determinant_graph(input_; name=nothing) local desc tf.with_op_name(name, "LogMatrixDeterminant") do desc = tf.NodeDescription("LogMatrixDeterminant") @@ -50242,7 +50243,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function irfft2d_graph(input_, fft_length_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function irfft2d_graph(input_, fft_length_; name=nothing) local desc tf.with_op_name(name, "IRFFT2D") do desc = tf.NodeDescription("IRFFT2D") @@ -50282,7 +50283,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function boosted_trees_training_predict_graph(tree_ensemble_handle_, cached_tree_ids_, cached_node_ids_, bucketized_features_; name=nothing, num_bucketized_features=nothing, logits_dimension=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function boosted_trees_training_predict_graph(tree_ensemble_handle_, cached_tree_ids_, cached_node_ids_, bucketized_features_; name=nothing, num_bucketized_features=nothing, logits_dimension=nothing) local desc tf.with_op_name(name, "BoostedTreesTrainingPredict") do desc = tf.NodeDescription("BoostedTreesTrainingPredict") @@ -50347,7 +50348,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function nearest_neighbors_graph(points_, centers_, k_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function nearest_neighbors_graph(points_, centers_, k_; name=nothing) local desc tf.with_op_name(name, "NearestNeighbors") do desc = tf.NodeDescription("NearestNeighbors") @@ -50396,7 +50397,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function floor_graph(x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function floor_graph(x_; name=nothing) local desc tf.with_op_name(name, "Floor") do desc = tf.NodeDescription("Floor") @@ -50434,7 +50435,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function load_tpu_embedding_proximal_adagrad_parameters_grad_accum_debug_graph(parameters_, accumulators_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function load_tpu_embedding_proximal_adagrad_parameters_grad_accum_debug_graph(parameters_, accumulators_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "LoadTPUEmbeddingProximalAdagradParametersGradAccumDebug") do desc = tf.NodeDescription("LoadTPUEmbeddingProximalAdagradParametersGradAccumDebug") @@ -50502,7 +50503,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function write_image_summary_graph(writer_, step_, tag_, tensor_, bad_color_; name=nothing, max_images=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function write_image_summary_graph(writer_, step_, tag_, tensor_, bad_color_; name=nothing, max_images=nothing) local desc tf.with_op_name(name, "WriteImageSummary") do desc = tf.NodeDescription("WriteImageSummary") @@ -50562,7 +50563,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tile_grad_graph(input_, multiples_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tile_grad_graph(input_, multiples_; name=nothing) local desc tf.with_op_name(name, "TileGrad") do desc = tf.NodeDescription("TileGrad") @@ -50604,7 +50605,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_grad_v3_graph(handle_, flow_in_; name=nothing, source=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_grad_v3_graph(handle_, flow_in_; name=nothing, source=nothing) local desc tf.with_op_name(name, "TensorArrayGradV3") do desc = tf.NodeDescription("TensorArrayGradV3") @@ -50655,7 +50656,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function enqueue_tpu_embedding_integer_batch_graph(batch_, mode_override_; name=nothing, N=nothing, device_ordinal=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function enqueue_tpu_embedding_integer_batch_graph(batch_, mode_override_; name=nothing, N=nothing, device_ordinal=nothing) local desc tf.with_op_name(name, "EnqueueTPUEmbeddingIntegerBatch") do desc = tf.NodeDescription("EnqueueTPUEmbeddingIntegerBatch") @@ -50707,7 +50708,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fused_batch_norm_graph(x_, scale_, offset_, mean_, variance_; name=nothing, epsilon=nothing, data_format=nothing, is_training=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fused_batch_norm_graph(x_, scale_, offset_, mean_, variance_; name=nothing, epsilon=nothing, data_format=nothing, is_training=nothing) local desc tf.with_op_name(name, "FusedBatchNorm") do desc = tf.NodeDescription("FusedBatchNorm") @@ -50788,7 +50789,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function logical_and_graph(x_, y_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function logical_and_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "LogicalAnd") do desc = tf.NodeDescription("LogicalAnd") @@ -50828,7 +50829,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_scatter_update_graph(tensor_, indices_, updates_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_scatter_update_graph(tensor_, indices_, updates_; name=nothing) local desc tf.with_op_name(name, "TensorScatterUpdate") do desc = tf.NodeDescription("TensorScatterUpdate") @@ -50878,7 +50879,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function text_line_reader_v2_graph(; name=nothing, skip_header_lines=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function text_line_reader_v2_graph(; name=nothing, skip_header_lines=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "TextLineReaderV2") do desc = tf.NodeDescription("TextLineReaderV2") @@ -50928,7 +50929,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_slice_dataset_graph(components_; name=nothing, Toutput_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_slice_dataset_graph(components_; name=nothing, Toutput_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "TensorSliceDataset") do desc = tf.NodeDescription("TensorSliceDataset") @@ -50976,7 +50977,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_scatter_v3_graph(handle_, indices_, value_, flow_in_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_scatter_v3_graph(handle_, indices_, value_, flow_in_; name=nothing) local desc tf.with_op_name(name, "TensorArrayScatterV3") do desc = tf.NodeDescription("TensorArrayScatterV3") @@ -51026,7 +51027,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resize_nearest_neighbor_grad_graph(grads_, size_; name=nothing, align_corners=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resize_nearest_neighbor_grad_graph(grads_, size_; name=nothing, align_corners=nothing) local desc tf.with_op_name(name, "ResizeNearestNeighborGrad") do desc = tf.NodeDescription("ResizeNearestNeighborGrad") @@ -51074,7 +51075,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function apply_power_sign_graph(var_, m_, lr_, logbase_, sign_decay_, beta_, grad_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function apply_power_sign_graph(var_, m_, lr_, logbase_, sign_decay_, beta_, grad_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ApplyPowerSign") do desc = tf.NodeDescription("ApplyPowerSign") @@ -51148,7 +51149,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_rebatch_dataset_graph(input_dataset_, num_workers_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_rebatch_dataset_graph(input_dataset_, num_workers_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "ExperimentalRebatchDataset") do desc = tf.NodeDescription("ExperimentalRebatchDataset") @@ -51200,7 +51201,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function mirror_pad_graph(input_, paddings_; name=nothing, mode=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function mirror_pad_graph(input_, paddings_; name=nothing, mode=nothing) local desc tf.with_op_name(name, "MirrorPad") do desc = tf.NodeDescription("MirrorPad") @@ -51250,7 +51251,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function logical_not_graph(x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function logical_not_graph(x_; name=nothing) local desc tf.with_op_name(name, "LogicalNot") do desc = tf.NodeDescription("LogicalNot") @@ -51286,7 +51287,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_ifft_graph(input_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_ifft_graph(input_; name=nothing) local desc tf.with_op_name(name, "BatchIFFT") do desc = tf.NodeDescription("BatchIFFT") @@ -51322,7 +51323,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_concat_v2_graph(handle_, flow_in_; name=nothing, dtype=nothing, element_shape_except0=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_concat_v2_graph(handle_, flow_in_; name=nothing, dtype=nothing, element_shape_except0=nothing) local desc tf.with_op_name(name, "TensorArrayConcatV2") do desc = tf.NodeDescription("TensorArrayConcatV2") @@ -51379,7 +51380,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sum_graph(input_, reduction_indices_; name=nothing, keep_dims=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sum_graph(input_, reduction_indices_; name=nothing, keep_dims=nothing) local desc tf.with_op_name(name, "Sum") do desc = tf.NodeDescription("Sum") @@ -51430,7 +51431,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function boosted_trees_predict_graph(tree_ensemble_handle_, bucketized_features_; name=nothing, num_bucketized_features=nothing, logits_dimension=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function boosted_trees_predict_graph(tree_ensemble_handle_, bucketized_features_; name=nothing, num_bucketized_features=nothing, logits_dimension=nothing) local desc tf.with_op_name(name, "BoostedTreesPredict") do desc = tf.NodeDescription("BoostedTreesPredict") @@ -51482,7 +51483,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantized_conv2d_with_bias_and_relu_and_requantize_graph(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantized_conv2d_with_bias_and_relu_and_requantize_graph(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) local desc tf.with_op_name(name, "QuantizedConv2DWithBiasAndReluAndRequantize") do desc = tf.NodeDescription("QuantizedConv2DWithBiasAndReluAndRequantize") @@ -51585,7 +51586,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_sparse_apply_adagrad_graph(var_, accum_, lr_, grad_, indices_; name=nothing, use_locking=nothing, update_slots=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_sparse_apply_adagrad_graph(var_, accum_, lr_, grad_, indices_; name=nothing, use_locking=nothing, update_slots=nothing) local desc tf.with_op_name(name, "ResourceSparseApplyAdagrad") do desc = tf.NodeDescription("ResourceSparseApplyAdagrad") @@ -51655,7 +51656,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function leaky_relu_grad_graph(gradients_, features_; name=nothing, alpha=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function leaky_relu_grad_graph(gradients_, features_; name=nothing, alpha=nothing) local desc tf.with_op_name(name, "LeakyReluGrad") do desc = tf.NodeDescription("LeakyReluGrad") @@ -51704,7 +51705,7 @@ end A graph node which represents a return value of a function. """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _device_retval_graph(input_; name=nothing, index=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _device_retval_graph(input_; name=nothing, index=nothing) local desc tf.with_op_name(name, "_DeviceRetval") do desc = tf.NodeDescription("_DeviceRetval") @@ -51748,7 +51749,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function pad_graph(input_, paddings_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function pad_graph(input_, paddings_; name=nothing) local desc tf.with_op_name(name, "Pad") do desc = tf.NodeDescription("Pad") @@ -51792,7 +51793,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function add_many_sparse_to_tensors_map_graph(sparse_indices_, sparse_values_, sparse_shape_; name=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function add_many_sparse_to_tensors_map_graph(sparse_indices_, sparse_values_, sparse_shape_; name=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "AddManySparseToTensorsMap") do desc = tf.NodeDescription("AddManySparseToTensorsMap") @@ -51850,7 +51851,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_reorder_graph(input_indices_, input_values_, input_shape_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_reorder_graph(input_indices_, input_values_, input_shape_; name=nothing) local desc tf.with_op_name(name, "SparseReorder") do desc = tf.NodeDescription("SparseReorder") @@ -51901,7 +51902,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function bitwise_xor_graph(x_, y_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function bitwise_xor_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "BitwiseXor") do desc = tf.NodeDescription("BitwiseXor") @@ -51944,7 +51945,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_matrix_set_diag_graph(input_, diagonal_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_matrix_set_diag_graph(input_, diagonal_; name=nothing) local desc tf.with_op_name(name, "BatchMatrixSetDiag") do desc = tf.NodeDescription("BatchMatrixSetDiag") @@ -51987,7 +51988,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function lookup_table_insert_v2_graph(table_handle_, keys_, values_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function lookup_table_insert_v2_graph(table_handle_, keys_, values_; name=nothing) local desc tf.with_op_name(name, "LookupTableInsertV2") do desc = tf.NodeDescription("LookupTableInsertV2") @@ -52035,7 +52036,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_dense_to_sparse_batch_dataset_graph(input_dataset_, batch_size_, row_shape_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_dense_to_sparse_batch_dataset_graph(input_dataset_, batch_size_, row_shape_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "ExperimentalDenseToSparseBatchDataset") do desc = tf.NodeDescription("ExperimentalDenseToSparseBatchDataset") @@ -52091,7 +52092,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_sparse_apply_rms_prop_graph(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_sparse_apply_rms_prop_graph(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ResourceSparseApplyRMSProp") do desc = tf.NodeDescription("ResourceSparseApplyRMSProp") @@ -52174,7 +52175,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function random_crop_graph(image_, size_; name=nothing, seed=nothing, seed2=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function random_crop_graph(image_, size_; name=nothing, seed=nothing, seed2=nothing) local desc tf.with_op_name(name, "RandomCrop") do desc = tf.NodeDescription("RandomCrop") @@ -52228,7 +52229,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function lookup_table_import_v2_graph(table_handle_, keys_, values_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function lookup_table_import_v2_graph(table_handle_, keys_, values_; name=nothing) local desc tf.with_op_name(name, "LookupTableImportV2") do desc = tf.NodeDescription("LookupTableImportV2") @@ -52276,7 +52277,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_scatter_nd_update_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_scatter_nd_update_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ResourceScatterNdUpdate") do desc = tf.NodeDescription("ResourceScatterNdUpdate") @@ -52331,7 +52332,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function static_regex_full_match_graph(input_; name=nothing, pattern=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function static_regex_full_match_graph(input_; name=nothing, pattern=nothing) local desc tf.with_op_name(name, "StaticRegexFullMatch") do desc = tf.NodeDescription("StaticRegexFullMatch") @@ -52373,7 +52374,7 @@ end Configures the credentials used by the GCS client of the local TF runtime. """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function gcs_configure_credentials_graph(json_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function gcs_configure_credentials_graph(json_; name=nothing) local desc tf.with_op_name(name, "GcsConfigureCredentials") do desc = tf.NodeDescription("GcsConfigureCredentials") @@ -52409,7 +52410,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_size_v3_graph(handle_, flow_in_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_size_v3_graph(handle_, flow_in_; name=nothing) local desc tf.with_op_name(name, "TensorArraySizeV3") do desc = tf.NodeDescription("TensorArraySizeV3") @@ -52449,7 +52450,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_segment_sqrt_n_with_num_segments_graph(data_, indices_, segment_ids_, num_segments_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_segment_sqrt_n_with_num_segments_graph(data_, indices_, segment_ids_, num_segments_; name=nothing) local desc tf.with_op_name(name, "SparseSegmentSqrtNWithNumSegments") do desc = tf.NodeDescription("SparseSegmentSqrtNWithNumSegments") @@ -52504,7 +52505,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_group_by_reducer_dataset_graph(input_dataset_, key_func_other_arguments_, init_func_other_arguments_, reduce_func_other_arguments_, finalize_func_other_arguments_; name=nothing, key_func=nothing, init_func=nothing, reduce_func=nothing, finalize_func=nothing, Tkey_func_other_arguments=nothing, Tinit_func_other_arguments=nothing, Treduce_func_other_arguments=nothing, Tfinalize_func_other_arguments=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_group_by_reducer_dataset_graph(input_dataset_, key_func_other_arguments_, init_func_other_arguments_, reduce_func_other_arguments_, finalize_func_other_arguments_; name=nothing, key_func=nothing, init_func=nothing, reduce_func=nothing, finalize_func=nothing, Tkey_func_other_arguments=nothing, Tinit_func_other_arguments=nothing, Treduce_func_other_arguments=nothing, Tfinalize_func_other_arguments=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "ExperimentalGroupByReducerDataset") do desc = tf.NodeDescription("ExperimentalGroupByReducerDataset") @@ -52616,7 +52617,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function conv2d_backprop_filter_graph(input_, filter_sizes_, out_backprop_; name=nothing, strides=nothing, use_cudnn_on_gpu=nothing, padding=nothing, explicit_paddings=nothing, data_format=nothing, dilations=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function conv2d_backprop_filter_graph(input_, filter_sizes_, out_backprop_; name=nothing, strides=nothing, use_cudnn_on_gpu=nothing, padding=nothing, explicit_paddings=nothing, data_format=nothing, dilations=nothing) local desc tf.with_op_name(name, "Conv2DBackpropFilter") do desc = tf.NodeDescription("Conv2DBackpropFilter") @@ -52699,7 +52700,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function max_pool_grad_graph(orig_input_, orig_output_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function max_pool_grad_graph(orig_input_, orig_output_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) local desc tf.with_op_name(name, "MaxPoolGrad") do desc = tf.NodeDescription("MaxPoolGrad") @@ -52771,7 +52772,7 @@ end An op that connects each chip on the host to a centralized UberDriver to allow """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _initialize_host_for_distributed_tpu_graph(input_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _initialize_host_for_distributed_tpu_graph(input_; name=nothing) local desc tf.with_op_name(name, "_InitializeHostForDistributedTPU") do desc = tf.NodeDescription("_InitializeHostForDistributedTPU") @@ -52807,7 +52808,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function stage_peek_graph(index_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function stage_peek_graph(index_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "StagePeek") do desc = tf.NodeDescription("StagePeek") @@ -52873,7 +52874,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function pad_v2_graph(input_, paddings_, constant_values_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function pad_v2_graph(input_, paddings_, constant_values_; name=nothing) local desc tf.with_op_name(name, "PadV2") do desc = tf.NodeDescription("PadV2") @@ -52922,7 +52923,7 @@ end Creates an empty Tensor with shape `shape` and type `dtype`. """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _parallel_concat_start_graph(; name=nothing, shape=nothing, dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _parallel_concat_start_graph(; name=nothing, shape=nothing, dtype=nothing) local desc tf.with_op_name(name, "_ParallelConcatStart") do desc = tf.NodeDescription("_ParallelConcatStart") @@ -52966,7 +52967,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function print_v2_graph(input_; name=nothing, output_stream=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function print_v2_graph(input_; name=nothing, output_stream=nothing) local desc tf.with_op_name(name, "PrintV2") do desc = tf.NodeDescription("PrintV2") @@ -53008,7 +53009,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function optional_get_value_graph(optional_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function optional_get_value_graph(optional_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "OptionalGetValue") do desc = tf.NodeDescription("OptionalGetValue") @@ -53056,7 +53057,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function load_tpu_embedding_ftrl_parameters_graph(parameters_, accumulators_, linears_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function load_tpu_embedding_ftrl_parameters_graph(parameters_, accumulators_, linears_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "LoadTPUEmbeddingFTRLParameters") do desc = tf.NodeDescription("LoadTPUEmbeddingFTRLParameters") @@ -53124,7 +53125,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_slice_graph(indices_, values_, shape_, start_, size_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_slice_graph(indices_, values_, shape_, start_, size_; name=nothing) local desc tf.with_op_name(name, "SparseSlice") do desc = tf.NodeDescription("SparseSlice") @@ -53183,7 +53184,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function boosted_trees_make_quantile_summaries_graph(float_values_, example_weights_, epsilon_; name=nothing, num_features=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function boosted_trees_make_quantile_summaries_graph(float_values_, example_weights_, epsilon_; name=nothing, num_features=nothing) local desc tf.with_op_name(name, "BoostedTreesMakeQuantileSummaries") do desc = tf.NodeDescription("BoostedTreesMakeQuantileSummaries") @@ -53238,7 +53239,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function matrix_solve_graph(matrix_, rhs_; name=nothing, adjoint=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function matrix_solve_graph(matrix_, rhs_; name=nothing, adjoint=nothing) local desc tf.with_op_name(name, "MatrixSolve") do desc = tf.NodeDescription("MatrixSolve") @@ -53287,7 +53288,7 @@ end An op that sets up the centralized structures for a distributed TPU """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _configure_distributed_tpu_graph(inputs_; name=nothing, N=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _configure_distributed_tpu_graph(inputs_; name=nothing, N=nothing) local desc tf.with_op_name(name, "_ConfigureDistributedTPU") do desc = tf.NodeDescription("_ConfigureDistributedTPU") @@ -53329,7 +53330,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function adjust_contrastv2_graph(images_, contrast_factor_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function adjust_contrastv2_graph(images_, contrast_factor_; name=nothing) local desc tf.with_op_name(name, "AdjustContrastv2") do desc = tf.NodeDescription("AdjustContrastv2") @@ -53371,7 +53372,7 @@ end Returns the max of x and y (i.e. x > y ? x : y) element-wise. """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _mkl_maximum_graph(x_, y_, mkl_x_, mkl_y_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _mkl_maximum_graph(x_, y_, mkl_x_, mkl_y_; name=nothing) local desc tf.with_op_name(name, "_MklMaximum") do desc = tf.NodeDescription("_MklMaximum") @@ -53427,7 +53428,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function cudnn_rnn_params_size_graph(num_layers_, num_units_, input_size_; name=nothing, S=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function cudnn_rnn_params_size_graph(num_layers_, num_units_, input_size_; name=nothing, S=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) local desc tf.with_op_name(name, "CudnnRNNParamsSize") do desc = tf.NodeDescription("CudnnRNNParamsSize") @@ -53513,7 +53514,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function boosted_trees_quantile_stream_resource_add_summaries_graph(quantile_stream_resource_handle_, summaries_; name=nothing, num_features=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function boosted_trees_quantile_stream_resource_add_summaries_graph(quantile_stream_resource_handle_, summaries_; name=nothing, num_features=nothing) local desc tf.with_op_name(name, "BoostedTreesQuantileStreamResourceAddSummaries") do desc = tf.NodeDescription("BoostedTreesQuantileStreamResourceAddSummaries") @@ -53559,7 +53560,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_ifft3d_graph(input_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_ifft3d_graph(input_; name=nothing) local desc tf.with_op_name(name, "BatchIFFT3D") do desc = tf.NodeDescription("BatchIFFT3D") @@ -53595,7 +53596,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sigmoid_graph(x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sigmoid_graph(x_; name=nothing) local desc tf.with_op_name(name, "Sigmoid") do desc = tf.NodeDescription("Sigmoid") @@ -53633,7 +53634,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function segment_mean_graph(data_, segment_ids_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function segment_mean_graph(data_, segment_ids_; name=nothing) local desc tf.with_op_name(name, "SegmentMean") do desc = tf.NodeDescription("SegmentMean") @@ -53678,7 +53679,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function is_boosted_trees_ensemble_initialized_graph(tree_ensemble_handle_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function is_boosted_trees_ensemble_initialized_graph(tree_ensemble_handle_; name=nothing) local desc tf.with_op_name(name, "IsBoostedTreesEnsembleInitialized") do desc = tf.NodeDescription("IsBoostedTreesEnsembleInitialized") @@ -53714,7 +53715,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_size_v2_graph(handle_, flow_in_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_size_v2_graph(handle_, flow_in_; name=nothing) local desc tf.with_op_name(name, "TensorArraySizeV2") do desc = tf.NodeDescription("TensorArraySizeV2") @@ -53754,7 +53755,7 @@ end Returns x - y element-wise. """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _mkl_sub_graph(x_, y_, mkl_x_, mkl_y_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _mkl_sub_graph(x_, y_, mkl_x_, mkl_y_; name=nothing) local desc tf.with_op_name(name, "_MklSub") do desc = tf.NodeDescription("_MklSub") @@ -53810,7 +53811,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function send_tpu_embedding_gradients_graph(inputs_, learning_rates_; name=nothing, N=nothing, NN=nothing, config=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function send_tpu_embedding_gradients_graph(inputs_, learning_rates_; name=nothing, N=nothing, NN=nothing, config=nothing) local desc tf.with_op_name(name, "SendTPUEmbeddingGradients") do desc = tf.NodeDescription("SendTPUEmbeddingGradients") @@ -53868,7 +53869,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function max_pool3d_graph(input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function max_pool3d_graph(input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) local desc tf.with_op_name(name, "MaxPool3D") do desc = tf.NodeDescription("MaxPool3D") @@ -53930,7 +53931,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function prod_graph(input_, reduction_indices_; name=nothing, keep_dims=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function prod_graph(input_, reduction_indices_; name=nothing, keep_dims=nothing) local desc tf.with_op_name(name, "Prod") do desc = tf.NodeDescription("Prod") @@ -53981,7 +53982,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_identity_indexed_dataset_graph(size_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_identity_indexed_dataset_graph(size_; name=nothing) local desc tf.with_op_name(name, "ExperimentalIdentityIndexedDataset") do desc = tf.NodeDescription("ExperimentalIdentityIndexedDataset") @@ -54017,7 +54018,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_list_push_back_graph(input_handle_, tensor_; name=nothing, element_dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_list_push_back_graph(input_handle_, tensor_; name=nothing, element_dtype=nothing) local desc tf.with_op_name(name, "TensorListPushBack") do desc = tf.NodeDescription("TensorListPushBack") @@ -54065,7 +54066,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_function_graph(in_tensors_, captured_tensors_; name=nothing, f=nothing, num_batch_threads=nothing, max_batch_size=nothing, batch_timeout_micros=nothing, max_enqueued_batches=nothing, allowed_batch_sizes=nothing, container=nothing, shared_name=nothing, batching_queue=nothing, Tin=nothing, Tcaptured=nothing, Tout=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_function_graph(in_tensors_, captured_tensors_; name=nothing, f=nothing, num_batch_threads=nothing, max_batch_size=nothing, batch_timeout_micros=nothing, max_enqueued_batches=nothing, allowed_batch_sizes=nothing, container=nothing, shared_name=nothing, batching_queue=nothing, Tin=nothing, Tcaptured=nothing, Tout=nothing) local desc tf.with_op_name(name, "BatchFunction") do desc = tf.NodeDescription("BatchFunction") @@ -54177,7 +54178,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_fill_empty_rows_graph(indices_, values_, dense_shape_, default_value_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_fill_empty_rows_graph(indices_, values_, dense_shape_, default_value_; name=nothing) local desc tf.with_op_name(name, "SparseFillEmptyRows") do desc = tf.NodeDescription("SparseFillEmptyRows") @@ -54233,7 +54234,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function self_adjoint_eig_v2_graph(input_; name=nothing, compute_v=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function self_adjoint_eig_v2_graph(input_; name=nothing, compute_v=nothing) local desc tf.with_op_name(name, "SelfAdjointEigV2") do desc = tf.NodeDescription("SelfAdjointEigV2") @@ -54282,7 +54283,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function retrieve_tpu_embedding_ftrl_parameters_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function retrieve_tpu_embedding_ftrl_parameters_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "RetrieveTPUEmbeddingFTRLParameters") do desc = tf.NodeDescription("RetrieveTPUEmbeddingFTRLParameters") @@ -54343,7 +54344,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_sparse_apply_adagrad_da_graph(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, indices_, lr_, l1_, l2_, global_step_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_sparse_apply_adagrad_da_graph(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, indices_, lr_, l1_, l2_, global_step_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ResourceSparseApplyAdagradDA") do desc = tf.NodeDescription("ResourceSparseApplyAdagradDA") @@ -54425,7 +54426,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function temporary_variable_graph(; name=nothing, shape=nothing, dtype=nothing, var_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function temporary_variable_graph(; name=nothing, shape=nothing, dtype=nothing, var_name=nothing) local desc tf.with_op_name(name, "TemporaryVariable") do desc = tf.NodeDescription("TemporaryVariable") @@ -54475,7 +54476,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_apply_add_sign_graph(var_, m_, lr_, alpha_, sign_decay_, beta_, grad_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_apply_add_sign_graph(var_, m_, lr_, alpha_, sign_decay_, beta_, grad_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ResourceApplyAddSign") do desc = tf.NodeDescription("ResourceApplyAddSign") @@ -54547,7 +54548,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function roll_graph(input_, shift_, axis_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function roll_graph(input_, shift_, axis_; name=nothing) local desc tf.with_op_name(name, "Roll") do desc = tf.NodeDescription("Roll") @@ -54597,7 +54598,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function xdivy_graph(x_, y_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function xdivy_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "Xdivy") do desc = tf.NodeDescription("Xdivy") @@ -54640,7 +54641,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function max_pool3d_grad_grad_graph(orig_input_, orig_output_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function max_pool3d_grad_grad_graph(orig_input_, orig_output_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) local desc tf.with_op_name(name, "MaxPool3DGradGrad") do desc = tf.NodeDescription("MaxPool3DGradGrad") @@ -54712,7 +54713,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function crop_and_resize_graph(image_, boxes_, box_ind_, crop_size_; name=nothing, method=nothing, extrapolation_value=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function crop_and_resize_graph(image_, boxes_, box_ind_, crop_size_; name=nothing, method=nothing, extrapolation_value=nothing) local desc tf.with_op_name(name, "CropAndResize") do desc = tf.NodeDescription("CropAndResize") @@ -54774,7 +54775,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantized_bias_add_graph(input_, bias_, min_input_, max_input_, min_bias_, max_bias_; name=nothing, out_type=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantized_bias_add_graph(input_, bias_, min_input_, max_input_, min_bias_, max_bias_; name=nothing, out_type=nothing) local desc tf.with_op_name(name, "QuantizedBiasAdd") do desc = tf.NodeDescription("QuantizedBiasAdd") @@ -54845,7 +54846,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function kmc2chain_initialization_graph(distances_, seed_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function kmc2chain_initialization_graph(distances_, seed_; name=nothing) local desc tf.with_op_name(name, "KMC2ChainInitialization") do desc = tf.NodeDescription("KMC2ChainInitialization") @@ -54885,7 +54886,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function map_unstage_no_key_graph(indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function map_unstage_no_key_graph(indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "MapUnstageNoKey") do desc = tf.NodeDescription("MapUnstageNoKey") @@ -54956,7 +54957,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function scatter_nd_sub_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function scatter_nd_sub_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ScatterNdSub") do desc = tf.NodeDescription("ScatterNdSub") @@ -55012,7 +55013,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resize_bilinear_graph(images_, size_; name=nothing, align_corners=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resize_bilinear_graph(images_, size_; name=nothing, align_corners=nothing) local desc tf.with_op_name(name, "ResizeBilinear") do desc = tf.NodeDescription("ResizeBilinear") @@ -55060,7 +55061,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ordered_map_peek_graph(key_, indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ordered_map_peek_graph(key_, indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "OrderedMapPeek") do desc = tf.NodeDescription("OrderedMapPeek") @@ -55130,7 +55131,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_graph(size_; name=nothing, dtype=nothing, dynamic_size=nothing, clear_after_read=nothing, tensor_array_name=nothing, element_shape=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_graph(size_; name=nothing, dtype=nothing, dynamic_size=nothing, clear_after_read=nothing, tensor_array_name=nothing, element_shape=nothing) local desc tf.with_op_name(name, "TensorArray") do desc = tf.NodeDescription("TensorArray") @@ -55196,7 +55197,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function inplace_sub_graph(x_, i_, v_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function inplace_sub_graph(x_, i_, v_; name=nothing) local desc tf.with_op_name(name, "InplaceSub") do desc = tf.NodeDescription("InplaceSub") @@ -55243,7 +55244,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function pow_graph(x_, y_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function pow_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "Pow") do desc = tf.NodeDescription("Pow") @@ -55286,7 +55287,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function stateful_standard_normal_graph(resource_, shape_; name=nothing, dtype=nothing, shape_dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function stateful_standard_normal_graph(resource_, shape_; name=nothing, dtype=nothing, shape_dtype=nothing) local desc tf.with_op_name(name, "StatefulStandardNormal") do desc = tf.NodeDescription("StatefulStandardNormal") @@ -55340,7 +55341,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ref_next_iteration_graph(data_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ref_next_iteration_graph(data_; name=nothing) local desc tf.with_op_name(name, "RefNextIteration") do desc = tf.NodeDescription("RefNextIteration") @@ -55378,7 +55379,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function scalar_summary_graph(tags_, values_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function scalar_summary_graph(tags_, values_; name=nothing) local desc tf.with_op_name(name, "ScalarSummary") do desc = tf.NodeDescription("ScalarSummary") @@ -55420,7 +55421,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function string_split_v2_graph(input_, sep_; name=nothing, maxsplit=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function string_split_v2_graph(input_, sep_; name=nothing, maxsplit=nothing) local desc tf.with_op_name(name, "StringSplitV2") do desc = tf.NodeDescription("StringSplitV2") @@ -55471,7 +55472,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function bessel_i0e_graph(x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function bessel_i0e_graph(x_; name=nothing) local desc tf.with_op_name(name, "BesselI0e") do desc = tf.NodeDescription("BesselI0e") @@ -55509,7 +55510,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function unique_graph(x_; name=nothing, out_idx=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function unique_graph(x_; name=nothing, out_idx=nothing) local desc tf.with_op_name(name, "Unique") do desc = tf.NodeDescription("Unique") @@ -55558,7 +55559,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function load_tpu_embedding_rms_prop_parameters_graph(parameters_, ms_, mom_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function load_tpu_embedding_rms_prop_parameters_graph(parameters_, ms_, mom_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "LoadTPUEmbeddingRMSPropParameters") do desc = tf.NodeDescription("LoadTPUEmbeddingRMSPropParameters") @@ -55626,7 +55627,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function whole_file_reader_v2_graph(; name=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function whole_file_reader_v2_graph(; name=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "WholeFileReaderV2") do desc = tf.NodeDescription("WholeFileReaderV2") @@ -55670,7 +55671,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function eager_py_func_graph(input_; name=nothing, token=nothing, Tin=nothing, Tout=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function eager_py_func_graph(input_; name=nothing, token=nothing, Tin=nothing, Tout=nothing) local desc tf.with_op_name(name, "EagerPyFunc") do desc = tf.NodeDescription("EagerPyFunc") @@ -55724,7 +55725,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function next_iteration_graph(data_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function next_iteration_graph(data_; name=nothing) local desc tf.with_op_name(name, "NextIteration") do desc = tf.NodeDescription("NextIteration") @@ -55762,7 +55763,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function case_graph(branch_index_, input_; name=nothing, Tin=nothing, Tout=nothing, branches=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function case_graph(branch_index_, input_; name=nothing, Tin=nothing, Tout=nothing, branches=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "Case") do desc = tf.NodeDescription("Case") @@ -55826,7 +55827,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_scatter_sub_graph(tensor_, indices_, updates_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_scatter_sub_graph(tensor_, indices_, updates_; name=nothing) local desc tf.with_op_name(name, "TensorScatterSub") do desc = tf.NodeDescription("TensorScatterSub") @@ -55876,7 +55877,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function scatter_max_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function scatter_max_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ScatterMax") do desc = tf.NodeDescription("ScatterMax") @@ -55932,7 +55933,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sqrt_graph(x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sqrt_graph(x_; name=nothing) local desc tf.with_op_name(name, "Sqrt") do desc = tf.NodeDescription("Sqrt") @@ -55970,7 +55971,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function accumulator_take_gradient_graph(handle_, num_required_; name=nothing, dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function accumulator_take_gradient_graph(handle_, num_required_; name=nothing, dtype=nothing) local desc tf.with_op_name(name, "AccumulatorTakeGradient") do desc = tf.NodeDescription("AccumulatorTakeGradient") @@ -56016,7 +56017,7 @@ end Returns x + y element-wise. """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _mkl_add_graph(x_, y_, mkl_x_, mkl_y_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _mkl_add_graph(x_, y_, mkl_x_, mkl_y_; name=nothing) local desc tf.with_op_name(name, "_MklAdd") do desc = tf.NodeDescription("_MklAdd") @@ -56072,7 +56073,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function reciprocal_graph(x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function reciprocal_graph(x_; name=nothing) local desc tf.with_op_name(name, "Reciprocal") do desc = tf.NodeDescription("Reciprocal") @@ -56110,7 +56111,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function outfeed_enqueue_tuple_graph(inputs_; name=nothing, dtypes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function outfeed_enqueue_tuple_graph(inputs_; name=nothing, dtypes=nothing) local desc tf.with_op_name(name, "OutfeedEnqueueTuple") do desc = tf.NodeDescription("OutfeedEnqueueTuple") @@ -56152,7 +56153,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function string_strip_graph(input_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function string_strip_graph(input_; name=nothing) local desc tf.with_op_name(name, "StringStrip") do desc = tf.NodeDescription("StringStrip") @@ -56188,7 +56189,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fake_quant_with_min_max_vars_per_channel_graph(inputs_, min_, max_; name=nothing, num_bits=nothing, narrow_range=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fake_quant_with_min_max_vars_per_channel_graph(inputs_, min_, max_; name=nothing, num_bits=nothing, narrow_range=nothing) local desc tf.with_op_name(name, "FakeQuantWithMinMaxVarsPerChannel") do desc = tf.NodeDescription("FakeQuantWithMinMaxVarsPerChannel") @@ -56244,7 +56245,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function barrier_ready_size_graph(handle_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function barrier_ready_size_graph(handle_; name=nothing) local desc tf.with_op_name(name, "BarrierReadySize") do desc = tf.NodeDescription("BarrierReadySize") @@ -56280,7 +56281,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function string_to_hash_bucket_graph(string_tensor_; name=nothing, num_buckets=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function string_to_hash_bucket_graph(string_tensor_; name=nothing, num_buckets=nothing) local desc tf.with_op_name(name, "StringToHashBucket") do desc = tf.NodeDescription("StringToHashBucket") @@ -56322,7 +56323,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_concat_graph(handle_, flow_in_; name=nothing, dtype=nothing, element_shape_except0=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_concat_graph(handle_, flow_in_; name=nothing, dtype=nothing, element_shape_except0=nothing) local desc tf.with_op_name(name, "TensorArrayConcat") do desc = tf.NodeDescription("TensorArrayConcat") @@ -56379,7 +56380,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sharded_filename_graph(basename_, shard_, num_shards_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sharded_filename_graph(basename_, shard_, num_shards_; name=nothing) local desc tf.with_op_name(name, "ShardedFilename") do desc = tf.NodeDescription("ShardedFilename") @@ -56423,7 +56424,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function py_func_graph(input_; name=nothing, token=nothing, Tin=nothing, Tout=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function py_func_graph(input_; name=nothing, token=nothing, Tin=nothing, Tout=nothing) local desc tf.with_op_name(name, "PyFunc") do desc = tf.NodeDescription("PyFunc") @@ -56477,7 +56478,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function unsorted_segment_prod_graph(data_, segment_ids_, num_segments_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function unsorted_segment_prod_graph(data_, segment_ids_, num_segments_; name=nothing) local desc tf.with_op_name(name, "UnsortedSegmentProd") do desc = tf.NodeDescription("UnsortedSegmentProd") @@ -56528,7 +56529,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function count_up_to_graph(ref_; name=nothing, limit=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function count_up_to_graph(ref_; name=nothing, limit=nothing) local desc tf.with_op_name(name, "CountUpTo") do desc = tf.NodeDescription("CountUpTo") @@ -56572,7 +56573,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function random_gamma_graph(shape_, alpha_; name=nothing, seed=nothing, seed2=nothing, S=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function random_gamma_graph(shape_, alpha_; name=nothing, seed=nothing, seed2=nothing, S=nothing) local desc tf.with_op_name(name, "RandomGamma") do desc = tf.NodeDescription("RandomGamma") @@ -56634,7 +56635,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_grad_graph(handle_, flow_in_; name=nothing, source=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_grad_graph(handle_, flow_in_; name=nothing, source=nothing) local desc tf.with_op_name(name, "TensorArrayGrad") do desc = tf.NodeDescription("TensorArrayGrad") @@ -56680,7 +56681,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function dilation2d_graph(input_, filter_; name=nothing, strides=nothing, rates=nothing, padding=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function dilation2d_graph(input_, filter_; name=nothing, strides=nothing, rates=nothing, padding=nothing) local desc tf.with_op_name(name, "Dilation2D") do desc = tf.NodeDescription("Dilation2D") @@ -56741,7 +56742,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function unbatch_graph(batched_tensor_, batch_index_, id_; name=nothing, timeout_micros=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function unbatch_graph(batched_tensor_, batch_index_, id_; name=nothing, timeout_micros=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "Unbatch") do desc = tf.NodeDescription("Unbatch") @@ -56805,7 +56806,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function get_session_handle_graph(value_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function get_session_handle_graph(value_; name=nothing) local desc tf.with_op_name(name, "GetSessionHandle") do desc = tf.NodeDescription("GetSessionHandle") @@ -56843,7 +56844,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function retrieve_tpu_embedding_adam_parameters_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function retrieve_tpu_embedding_adam_parameters_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "RetrieveTPUEmbeddingADAMParameters") do desc = tf.NodeDescription("RetrieveTPUEmbeddingADAMParameters") @@ -56904,7 +56905,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function mutable_hash_table_of_tensors_v2_graph(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function mutable_hash_table_of_tensors_v2_graph(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing) local desc tf.with_op_name(name, "MutableHashTableOfTensorsV2") do desc = tf.NodeDescription("MutableHashTableOfTensorsV2") @@ -56972,7 +56973,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_apply_ftrl_graph(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, lr_power_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_apply_ftrl_graph(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, lr_power_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "SparseApplyFtrl") do desc = tf.NodeDescription("SparseApplyFtrl") @@ -57058,7 +57059,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_dataset_v2_graph(input_dataset_, batch_size_, drop_remainder_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_dataset_v2_graph(input_dataset_, batch_size_, drop_remainder_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "BatchDatasetV2") do desc = tf.NodeDescription("BatchDatasetV2") @@ -57114,7 +57115,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_sparse_minimum_graph(a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_sparse_minimum_graph(a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_; name=nothing) local desc tf.with_op_name(name, "SparseSparseMinimum") do desc = tf.NodeDescription("SparseSparseMinimum") @@ -57178,7 +57179,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function reverse_v2_graph(tensor_, axis_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function reverse_v2_graph(tensor_, axis_; name=nothing) local desc tf.with_op_name(name, "ReverseV2") do desc = tf.NodeDescription("ReverseV2") @@ -57223,7 +57224,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function strided_slice_graph(input_, begin_, end_, strides_; name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function strided_slice_graph(input_, begin_, end_, strides_; name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing) local desc tf.with_op_name(name, "StridedSlice") do desc = tf.NodeDescription("StridedSlice") @@ -57346,7 +57347,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function matching_files_graph(pattern_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function matching_files_graph(pattern_; name=nothing) local desc tf.with_op_name(name, "MatchingFiles") do desc = tf.NodeDescription("MatchingFiles") @@ -57382,7 +57383,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function encode_base64_graph(input_; name=nothing, pad=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function encode_base64_graph(input_; name=nothing, pad=nothing) local desc tf.with_op_name(name, "EncodeBase64") do desc = tf.NodeDescription("EncodeBase64") @@ -57424,7 +57425,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function iterator_get_next_as_optional_graph(iterator_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function iterator_get_next_as_optional_graph(iterator_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "IteratorGetNextAsOptional") do desc = tf.NodeDescription("IteratorGetNextAsOptional") @@ -57472,7 +57473,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function padding_fifo_queue_graph(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function padding_fifo_queue_graph(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "PaddingFIFOQueue") do desc = tf.NodeDescription("PaddingFIFOQueue") @@ -57534,7 +57535,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function iterator_to_string_handle_graph(resource_handle_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function iterator_to_string_handle_graph(resource_handle_; name=nothing) local desc tf.with_op_name(name, "IteratorToStringHandle") do desc = tf.NodeDescription("IteratorToStringHandle") @@ -57570,7 +57571,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function max_pool_grad_grad_with_argmax_graph(input_, grad_, argmax_; name=nothing, ksize=nothing, strides=nothing, padding=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function max_pool_grad_grad_with_argmax_graph(input_, grad_, argmax_; name=nothing, ksize=nothing, strides=nothing, padding=nothing) local desc tf.with_op_name(name, "MaxPoolGradGradWithArgmax") do desc = tf.NodeDescription("MaxPoolGradGradWithArgmax") @@ -57637,7 +57638,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_list_gather_graph(input_handle_, indices_, element_shape_; name=nothing, element_dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_list_gather_graph(input_handle_, indices_, element_shape_; name=nothing, element_dtype=nothing) local desc tf.with_op_name(name, "TensorListGather") do desc = tf.NodeDescription("TensorListGather") @@ -57687,7 +57688,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function multinomial_graph(logits_, num_samples_; name=nothing, seed=nothing, seed2=nothing, output_dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function multinomial_graph(logits_, num_samples_; name=nothing, seed=nothing, seed2=nothing, output_dtype=nothing) local desc tf.with_op_name(name, "Multinomial") do desc = tf.NodeDescription("Multinomial") @@ -57747,7 +57748,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_read_graph(handle_, index_, flow_in_; name=nothing, dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_read_graph(handle_, index_, flow_in_; name=nothing, dtype=nothing) local desc tf.with_op_name(name, "TensorArrayRead") do desc = tf.NodeDescription("TensorArrayRead") @@ -57797,7 +57798,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_indexed_dataset_get_graph(materialized_, index_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_indexed_dataset_get_graph(materialized_, index_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "ExperimentalIndexedDatasetGet") do desc = tf.NodeDescription("ExperimentalIndexedDatasetGet") @@ -57849,7 +57850,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tpu_partitioned_call_graph(args_, device_ordinal_; name=nothing, Tin=nothing, Tout=nothing, f=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tpu_partitioned_call_graph(args_, device_ordinal_; name=nothing, Tin=nothing, Tout=nothing, f=nothing) local desc tf.with_op_name(name, "TPUPartitionedCall") do desc = tf.NodeDescription("TPUPartitionedCall") @@ -57907,7 +57908,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantized_conv2d_and_relu_and_requantize_graph(input_, filter_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantized_conv2d_and_relu_and_requantize_graph(input_, filter_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) local desc tf.with_op_name(name, "QuantizedConv2DAndReluAndRequantize") do desc = tf.NodeDescription("QuantizedConv2DAndReluAndRequantize") @@ -58004,7 +58005,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function iterator_from_string_handle_v2_graph(string_handle_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function iterator_from_string_handle_v2_graph(string_handle_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "IteratorFromStringHandleV2") do desc = tf.NodeDescription("IteratorFromStringHandleV2") @@ -58052,7 +58053,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function bitwise_or_graph(x_, y_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function bitwise_or_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "BitwiseOr") do desc = tf.NodeDescription("BitwiseOr") @@ -58095,7 +58096,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function unsorted_segment_max_graph(data_, segment_ids_, num_segments_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function unsorted_segment_max_graph(data_, segment_ids_, num_segments_; name=nothing) local desc tf.with_op_name(name, "UnsortedSegmentMax") do desc = tf.NodeDescription("UnsortedSegmentMax") @@ -58146,7 +58147,7 @@ end Returns (x - y)(x - y) element-wise. """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _mkl_squared_difference_graph(x_, y_, mkl_x_, mkl_y_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _mkl_squared_difference_graph(x_, y_, mkl_x_, mkl_y_; name=nothing) local desc tf.with_op_name(name, "_MklSquaredDifference") do desc = tf.NodeDescription("_MklSquaredDifference") @@ -58202,7 +58203,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function conv3d_backprop_filter_graph(input_, filter_, out_backprop_; name=nothing, strides=nothing, padding=nothing, dilations=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function conv3d_backprop_filter_graph(input_, filter_, out_backprop_; name=nothing, strides=nothing, padding=nothing, dilations=nothing) local desc tf.with_op_name(name, "Conv3DBackpropFilter") do desc = tf.NodeDescription("Conv3DBackpropFilter") @@ -58268,7 +58269,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function if__graph(cond_, input_; name=nothing, Tin=nothing, Tout=nothing, then_branch=nothing, else_branch=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function if__graph(cond_, input_; name=nothing, Tin=nothing, Tout=nothing, then_branch=nothing, else_branch=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "If") do desc = tf.NodeDescription("If") @@ -58340,7 +58341,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function flat_map_dataset_graph(input_dataset_, other_arguments_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function flat_map_dataset_graph(input_dataset_, other_arguments_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "FlatMapDataset") do desc = tf.NodeDescription("FlatMapDataset") @@ -58404,7 +58405,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_list_scatter_graph(tensor_, indices_, element_shape_; name=nothing, element_dtype=nothing, shape_type=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_list_scatter_graph(tensor_, indices_, element_shape_; name=nothing, element_dtype=nothing, shape_type=nothing) local desc tf.with_op_name(name, "TensorListScatter") do desc = tf.NodeDescription("TensorListScatter") @@ -58464,7 +58465,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function softsign_grad_graph(gradients_, features_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function softsign_grad_graph(gradients_, features_; name=nothing) local desc tf.with_op_name(name, "SoftsignGrad") do desc = tf.NodeDescription("SoftsignGrad") @@ -58507,7 +58508,7 @@ end Copy Host Op. """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function copy_host_graph(input_; name=nothing, tensor_name=nothing, debug_ops_spec=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function copy_host_graph(input_; name=nothing, tensor_name=nothing, debug_ops_spec=nothing) local desc tf.with_op_name(name, "CopyHost") do desc = tf.NodeDescription("CopyHost") @@ -58557,7 +58558,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function lin_space_graph(start_, stop_, num_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function lin_space_graph(start_, stop_, num_; name=nothing) local desc tf.with_op_name(name, "LinSpace") do desc = tf.NodeDescription("LinSpace") @@ -58607,7 +58608,7 @@ end Updates input `value` at `loc` with `update`. """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _parallel_concat_update_graph(value_, update_; name=nothing, loc=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _parallel_concat_update_graph(value_, update_; name=nothing, loc=nothing) local desc tf.with_op_name(name, "_ParallelConcatUpdate") do desc = tf.NodeDescription("_ParallelConcatUpdate") @@ -58656,7 +58657,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function stack_graph(; name=nothing, elem_type=nothing, stack_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function stack_graph(; name=nothing, elem_type=nothing, stack_name=nothing) local desc tf.with_op_name(name, "Stack") do desc = tf.NodeDescription("Stack") @@ -58700,7 +58701,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function stack_push_v2_graph(handle_, elem_; name=nothing, swap_memory=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function stack_push_v2_graph(handle_, elem_; name=nothing, swap_memory=nothing) local desc tf.with_op_name(name, "StackPushV2") do desc = tf.NodeDescription("StackPushV2") @@ -58748,7 +58749,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function assign_variable_op_graph(resource_, value_; name=nothing, dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function assign_variable_op_graph(resource_, value_; name=nothing, dtype=nothing) local desc tf.with_op_name(name, "AssignVariableOp") do desc = tf.NodeDescription("AssignVariableOp") @@ -58796,7 +58797,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_split_graph(split_dim_, indices_, values_, shape_; name=nothing, num_split=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_split_graph(split_dim_, indices_, values_, shape_; name=nothing, num_split=nothing) local desc tf.with_op_name(name, "SparseSplit") do desc = tf.NodeDescription("SparseSplit") @@ -58858,7 +58859,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_unpack_graph(handle_, value_, flow_in_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_unpack_graph(handle_, value_, flow_in_; name=nothing) local desc tf.with_op_name(name, "TensorArrayUnpack") do desc = tf.NodeDescription("TensorArrayUnpack") @@ -58904,7 +58905,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_list_stack_graph(input_handle_, element_shape_; name=nothing, element_dtype=nothing, num_elements=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_list_stack_graph(input_handle_, element_shape_; name=nothing, element_dtype=nothing, num_elements=nothing) local desc tf.with_op_name(name, "TensorListStack") do desc = tf.NodeDescription("TensorListStack") @@ -58956,7 +58957,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function barrier_incomplete_size_graph(handle_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function barrier_incomplete_size_graph(handle_; name=nothing) local desc tf.with_op_name(name, "BarrierIncompleteSize") do desc = tf.NodeDescription("BarrierIncompleteSize") @@ -58992,7 +58993,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function restore_graph(file_pattern_, tensor_name_; name=nothing, dt=nothing, preferred_shard=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function restore_graph(file_pattern_, tensor_name_; name=nothing, dt=nothing, preferred_shard=nothing) local desc tf.with_op_name(name, "Restore") do desc = tf.NodeDescription("Restore") @@ -59044,7 +59045,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_v3_graph(size_; name=nothing, dtype=nothing, element_shape=nothing, dynamic_size=nothing, clear_after_read=nothing, identical_element_shapes=nothing, tensor_array_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_v3_graph(size_; name=nothing, dtype=nothing, element_shape=nothing, dynamic_size=nothing, clear_after_read=nothing, identical_element_shapes=nothing, tensor_array_name=nothing) local desc tf.with_op_name(name, "TensorArrayV3") do desc = tf.NodeDescription("TensorArrayV3") @@ -59121,7 +59122,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_assert_next_dataset_graph(input_dataset_, transformations_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_assert_next_dataset_graph(input_dataset_, transformations_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "ExperimentalAssertNextDataset") do desc = tf.NodeDescription("ExperimentalAssertNextDataset") @@ -59173,7 +59174,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function in_top_k_graph(predictions_, targets_; name=nothing, k=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function in_top_k_graph(predictions_, targets_; name=nothing, k=nothing) local desc tf.with_op_name(name, "InTopK") do desc = tf.NodeDescription("InTopK") @@ -59221,7 +59222,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function scatter_sub_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function scatter_sub_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ScatterSub") do desc = tf.NodeDescription("ScatterSub") @@ -59277,7 +59278,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function acosh_graph(x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function acosh_graph(x_; name=nothing) local desc tf.with_op_name(name, "Acosh") do desc = tf.NodeDescription("Acosh") @@ -59315,7 +59316,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function depthwise_conv2d_native_backprop_filter_graph(input_, filter_sizes_, out_backprop_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function depthwise_conv2d_native_backprop_filter_graph(input_, filter_sizes_, out_backprop_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) local desc tf.with_op_name(name, "DepthwiseConv2dNativeBackpropFilter") do desc = tf.NodeDescription("DepthwiseConv2dNativeBackpropFilter") @@ -59386,7 +59387,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function cast_graph(x_; name=nothing, SrcT=nothing, DstT=nothing, Truncate=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function cast_graph(x_; name=nothing, SrcT=nothing, DstT=nothing, Truncate=nothing) local desc tf.with_op_name(name, "Cast") do desc = tf.NodeDescription("Cast") @@ -59442,7 +59443,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantize_v2_graph(input_, min_range_, max_range_; name=nothing, mode=nothing, round_mode=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantize_v2_graph(input_, min_range_, max_range_; name=nothing, mode=nothing, round_mode=nothing) local desc tf.with_op_name(name, "QuantizeV2") do desc = tf.NodeDescription("QuantizeV2") @@ -59503,7 +59504,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function generator_dataset_graph(init_func_other_args_, next_func_other_args_, finalize_func_other_args_; name=nothing, init_func=nothing, next_func=nothing, finalize_func=nothing, Tinit_func_args=nothing, Tnext_func_args=nothing, Tfinalize_func_args=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function generator_dataset_graph(init_func_other_args_, next_func_other_args_, finalize_func_other_args_; name=nothing, init_func=nothing, next_func=nothing, finalize_func=nothing, Tinit_func_args=nothing, Tnext_func_args=nothing, Tfinalize_func_args=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "GeneratorDataset") do desc = tf.NodeDescription("GeneratorDataset") @@ -59595,7 +59596,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_forest_tree_serialize_graph(tree_handle_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_forest_tree_serialize_graph(tree_handle_; name=nothing) local desc tf.with_op_name(name, "TensorForestTreeSerialize") do desc = tf.NodeDescription("TensorForestTreeSerialize") @@ -59631,7 +59632,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function next_after_graph(x1_, x2_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function next_after_graph(x1_, x2_; name=nothing) local desc tf.with_op_name(name, "NextAfter") do desc = tf.NodeDescription("NextAfter") @@ -59674,7 +59675,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_close_v2_graph(handle_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_close_v2_graph(handle_; name=nothing) local desc tf.with_op_name(name, "TensorArrayCloseV2") do desc = tf.NodeDescription("TensorArrayCloseV2") @@ -59710,7 +59711,7 @@ end A Reader that outputs rows from a BigQuery table as tensorflow Examples. """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function big_query_reader_graph(; name=nothing, container=nothing, shared_name=nothing, project_id=nothing, dataset_id=nothing, table_id=nothing, columns=nothing, timestamp_millis=nothing, test_end_point=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function big_query_reader_graph(; name=nothing, container=nothing, shared_name=nothing, project_id=nothing, dataset_id=nothing, table_id=nothing, columns=nothing, timestamp_millis=nothing, test_end_point=nothing) local desc tf.with_op_name(name, "BigQueryReader") do desc = tf.NodeDescription("BigQueryReader") @@ -59790,7 +59791,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function reader_read_v2_graph(reader_handle_, queue_handle_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function reader_read_v2_graph(reader_handle_, queue_handle_; name=nothing) local desc tf.with_op_name(name, "ReaderReadV2") do desc = tf.NodeDescription("ReaderReadV2") @@ -59835,7 +59836,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function mod_graph(x_, y_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function mod_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "Mod") do desc = tf.NodeDescription("Mod") @@ -59878,7 +59879,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function add_v2_graph(x_, y_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function add_v2_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "AddV2") do desc = tf.NodeDescription("AddV2") @@ -59921,7 +59922,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function stateless_random_normal_graph(shape_, seed_; name=nothing, dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function stateless_random_normal_graph(shape_, seed_; name=nothing, dtype=nothing) local desc tf.with_op_name(name, "StatelessRandomNormal") do desc = tf.NodeDescription("StatelessRandomNormal") @@ -59971,7 +59972,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function strided_slice_assign_graph(ref_, begin_, end_, strides_, value_; name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function strided_slice_assign_graph(ref_, begin_, end_, strides_, value_; name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing) local desc tf.with_op_name(name, "StridedSliceAssign") do desc = tf.NodeDescription("StridedSliceAssign") @@ -60099,7 +60100,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function scatter_min_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function scatter_min_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ScatterMin") do desc = tf.NodeDescription("ScatterMin") @@ -60155,7 +60156,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_strided_slice_assign_graph(ref_, begin_, end_, strides_, value_; name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_strided_slice_assign_graph(ref_, begin_, end_, strides_, value_; name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing) local desc tf.with_op_name(name, "ResourceStridedSliceAssign") do desc = tf.NodeDescription("ResourceStridedSliceAssign") @@ -60282,7 +60283,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function random_gamma_grad_graph(alpha_, sample_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function random_gamma_grad_graph(alpha_, sample_; name=nothing) local desc tf.with_op_name(name, "RandomGammaGrad") do desc = tf.NodeDescription("RandomGammaGrad") @@ -60325,7 +60326,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_sparse_apply_keras_momentum_graph(var_, accum_, lr_, grad_, indices_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_sparse_apply_keras_momentum_graph(var_, accum_, lr_, grad_, indices_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) local desc tf.with_op_name(name, "ResourceSparseApplyKerasMomentum") do desc = tf.NodeDescription("ResourceSparseApplyKerasMomentum") @@ -60400,7 +60401,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function boosted_trees_create_quantile_stream_resource_graph(quantile_stream_resource_handle_, epsilon_, num_streams_; name=nothing, max_elements=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function boosted_trees_create_quantile_stream_resource_graph(quantile_stream_resource_handle_, epsilon_, num_streams_; name=nothing, max_elements=nothing) local desc tf.with_op_name(name, "BoostedTreesCreateQuantileStreamResource") do desc = tf.NodeDescription("BoostedTreesCreateQuantileStreamResource") @@ -60450,7 +60451,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantized_relu6_graph(features_, min_features_, max_features_; name=nothing, out_type=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantized_relu6_graph(features_, min_features_, max_features_; name=nothing, out_type=nothing) local desc tf.with_op_name(name, "QuantizedRelu6") do desc = tf.NodeDescription("QuantizedRelu6") @@ -60507,7 +60508,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_sparse_maximum_graph(a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_sparse_maximum_graph(a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_; name=nothing) local desc tf.with_op_name(name, "SparseSparseMaximum") do desc = tf.NodeDescription("SparseSparseMaximum") @@ -60571,7 +60572,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_norm_with_global_normalization_graph(t_, m_, v_, beta_, gamma_; name=nothing, variance_epsilon=nothing, scale_after_normalization=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_norm_with_global_normalization_graph(t_, m_, v_, beta_, gamma_; name=nothing, variance_epsilon=nothing, scale_after_normalization=nothing) local desc tf.with_op_name(name, "BatchNormWithGlobalNormalization") do desc = tf.NodeDescription("BatchNormWithGlobalNormalization") @@ -60641,7 +60642,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function in_top_kv2_graph(predictions_, targets_, k_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function in_top_kv2_graph(predictions_, targets_, k_; name=nothing) local desc tf.with_op_name(name, "InTopKV2") do desc = tf.NodeDescription("InTopKV2") @@ -60688,7 +60689,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function cholesky_graph(input_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function cholesky_graph(input_; name=nothing) local desc tf.with_op_name(name, "Cholesky") do desc = tf.NodeDescription("Cholesky") @@ -60726,7 +60727,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_apply_centered_rms_prop_graph(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=nothing, use_locking=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_apply_centered_rms_prop_graph(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ResourceApplyCenteredRMSProp") do desc = tf.NodeDescription("ResourceApplyCenteredRMSProp") @@ -60806,7 +60807,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_apply_adagrad_graph(var_, accum_, lr_, grad_; name=nothing, use_locking=nothing, update_slots=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_apply_adagrad_graph(var_, accum_, lr_, grad_; name=nothing, use_locking=nothing, update_slots=nothing) local desc tf.with_op_name(name, "ResourceApplyAdagrad") do desc = tf.NodeDescription("ResourceApplyAdagrad") @@ -60869,7 +60870,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_parallel_interleave_dataset_graph(input_dataset_, other_arguments_, cycle_length_, block_length_, sloppy_, buffer_output_elements_, prefetch_input_elements_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_parallel_interleave_dataset_graph(input_dataset_, other_arguments_, cycle_length_, block_length_, sloppy_, buffer_output_elements_, prefetch_input_elements_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "ExperimentalParallelInterleaveDataset") do desc = tf.NodeDescription("ExperimentalParallelInterleaveDataset") @@ -60953,7 +60954,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resize_bicubic_grad_graph(grads_, original_image_; name=nothing, align_corners=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resize_bicubic_grad_graph(grads_, original_image_; name=nothing, align_corners=nothing) local desc tf.with_op_name(name, "ResizeBicubicGrad") do desc = tf.NodeDescription("ResizeBicubicGrad") @@ -61001,7 +61002,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_self_adjoint_eig_graph(input_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_self_adjoint_eig_graph(input_; name=nothing) local desc tf.with_op_name(name, "BatchSelfAdjointEig") do desc = tf.NodeDescription("BatchSelfAdjointEig") @@ -61039,7 +61040,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_softmax_graph(sp_indices_, sp_values_, sp_shape_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_softmax_graph(sp_indices_, sp_values_, sp_shape_; name=nothing) local desc tf.with_op_name(name, "SparseSoftmax") do desc = tf.NodeDescription("SparseSoftmax") @@ -61085,7 +61086,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function asinh_graph(x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function asinh_graph(x_; name=nothing) local desc tf.with_op_name(name, "Asinh") do desc = tf.NodeDescription("Asinh") @@ -61123,7 +61124,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantized_conv2d_and_relu_graph(input_, filter_, min_input_, max_input_, min_filter_, max_filter_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantized_conv2d_and_relu_graph(input_, filter_, min_input_, max_input_, min_filter_, max_filter_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) local desc tf.with_op_name(name, "QuantizedConv2DAndRelu") do desc = tf.NodeDescription("QuantizedConv2DAndRelu") @@ -61212,7 +61213,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function matrix_inverse_graph(input_; name=nothing, adjoint=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function matrix_inverse_graph(input_; name=nothing, adjoint=nothing) local desc tf.with_op_name(name, "MatrixInverse") do desc = tf.NodeDescription("MatrixInverse") @@ -61256,7 +61257,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_list_concat_lists_graph(input_a_, input_b_; name=nothing, element_dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_list_concat_lists_graph(input_a_, input_b_; name=nothing, element_dtype=nothing) local desc tf.with_op_name(name, "TensorListConcatLists") do desc = tf.NodeDescription("TensorListConcatLists") @@ -61302,7 +61303,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function requantize_graph(input_, input_min_, input_max_, requested_output_min_, requested_output_max_; name=nothing, out_type=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function requantize_graph(input_, input_min_, input_max_, requested_output_min_, requested_output_max_; name=nothing, out_type=nothing) local desc tf.with_op_name(name, "Requantize") do desc = tf.NodeDescription("Requantize") @@ -61367,7 +61368,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fft_graph(input_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fft_graph(input_; name=nothing) local desc tf.with_op_name(name, "FFT") do desc = tf.NodeDescription("FFT") @@ -61405,7 +61406,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function conjugate_transpose_graph(x_, perm_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function conjugate_transpose_graph(x_, perm_; name=nothing) local desc tf.with_op_name(name, "ConjugateTranspose") do desc = tf.NodeDescription("ConjugateTranspose") @@ -61449,7 +61450,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function unstage_graph(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function unstage_graph(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "Unstage") do desc = tf.NodeDescription("Unstage") @@ -61511,7 +61512,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function relu6grad_graph(gradients_, features_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function relu6grad_graph(gradients_, features_; name=nothing) local desc tf.with_op_name(name, "Relu6Grad") do desc = tf.NodeDescription("Relu6Grad") @@ -61554,7 +61555,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function scale_and_translate_grad_graph(grads_, original_image_, scale_, translation_; name=nothing, kernel_type=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function scale_and_translate_grad_graph(grads_, original_image_, scale_, translation_; name=nothing, kernel_type=nothing) local desc tf.with_op_name(name, "ScaleAndTranslateGrad") do desc = tf.NodeDescription("ScaleAndTranslateGrad") @@ -61611,7 +61612,7 @@ end Converts an array of tensors to a list of tensors. """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _array_to_list_graph(input_; name=nothing, N=nothing, out_types=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _array_to_list_graph(input_; name=nothing, N=nothing, out_types=nothing) local desc tf.with_op_name(name, "_ArrayToList") do desc = tf.NodeDescription("_ArrayToList") @@ -61661,7 +61662,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function cudnn_rnnv3_graph(input_, input_h_, input_c_, params_, sequence_lengths_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing, is_training=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function cudnn_rnnv3_graph(input_, input_h_, input_c_, params_, sequence_lengths_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing, is_training=nothing) local desc tf.with_op_name(name, "CudnnRNNV3") do desc = tf.NodeDescription("CudnnRNNV3") @@ -61765,7 +61766,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function expand_dims_graph(input_, dim_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function expand_dims_graph(input_, dim_; name=nothing) local desc tf.with_op_name(name, "ExpandDims") do desc = tf.NodeDescription("ExpandDims") @@ -61810,7 +61811,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function inv_grad_graph(y_, dy_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function inv_grad_graph(y_, dy_; name=nothing) local desc tf.with_op_name(name, "InvGrad") do desc = tf.NodeDescription("InvGrad") @@ -61853,7 +61854,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function non_max_suppression_graph(boxes_, scores_, max_output_size_; name=nothing, iou_threshold=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function non_max_suppression_graph(boxes_, scores_, max_output_size_; name=nothing, iou_threshold=nothing) local desc tf.with_op_name(name, "NonMaxSuppression") do desc = tf.NodeDescription("NonMaxSuppression") @@ -61903,7 +61904,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function l2loss_graph(t_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function l2loss_graph(t_; name=nothing) local desc tf.with_op_name(name, "L2Loss") do desc = tf.NodeDescription("L2Loss") @@ -61941,7 +61942,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resize_area_graph(images_, size_; name=nothing, align_corners=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resize_area_graph(images_, size_; name=nothing, align_corners=nothing) local desc tf.with_op_name(name, "ResizeArea") do desc = tf.NodeDescription("ResizeArea") @@ -61989,7 +61990,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_cross_graph(indices_, values_, shapes_, dense_inputs_; name=nothing, N=nothing, hashed_output=nothing, num_buckets=nothing, hash_key=nothing, sparse_types=nothing, dense_types=nothing, out_type=nothing, internal_type=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_cross_graph(indices_, values_, shapes_, dense_inputs_; name=nothing, N=nothing, hashed_output=nothing, num_buckets=nothing, hash_key=nothing, sparse_types=nothing, dense_types=nothing, out_type=nothing, internal_type=nothing) local desc tf.with_op_name(name, "SparseCross") do desc = tf.NodeDescription("SparseCross") @@ -62090,7 +62091,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_fft3d_graph(input_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_fft3d_graph(input_; name=nothing) local desc tf.with_op_name(name, "BatchFFT3D") do desc = tf.NodeDescription("BatchFFT3D") @@ -62126,7 +62127,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function random_standard_normal_graph(shape_; name=nothing, seed=nothing, seed2=nothing, dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function random_standard_normal_graph(shape_; name=nothing, seed=nothing, seed2=nothing, dtype=nothing) local desc tf.with_op_name(name, "RandomStandardNormal") do desc = tf.NodeDescription("RandomStandardNormal") @@ -62182,7 +62183,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_scatter_mul_graph(resource_, indices_, updates_; name=nothing, dtype=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_scatter_mul_graph(resource_, indices_, updates_; name=nothing, dtype=nothing) local desc tf.with_op_name(name, "ResourceScatterMul") do desc = tf.NodeDescription("ResourceScatterMul") @@ -62237,7 +62238,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sdca_optimizer_graph(sparse_example_indices_, sparse_feature_indices_, sparse_feature_values_, dense_features_, example_weights_, example_labels_, sparse_indices_, sparse_weights_, dense_weights_, example_state_data_; name=nothing, loss_type=nothing, adaptative=nothing, num_sparse_features=nothing, num_sparse_features_with_values=nothing, num_dense_features=nothing, l1=nothing, l2=nothing, num_loss_partitions=nothing, num_inner_iterations=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sdca_optimizer_graph(sparse_example_indices_, sparse_feature_indices_, sparse_feature_values_, dense_features_, example_weights_, example_labels_, sparse_indices_, sparse_weights_, dense_weights_, example_state_data_; name=nothing, loss_type=nothing, adaptative=nothing, num_sparse_features=nothing, num_sparse_features_with_values=nothing, num_dense_features=nothing, l1=nothing, l2=nothing, num_loss_partitions=nothing, num_inner_iterations=nothing) local desc tf.with_op_name(name, "SdcaOptimizer") do desc = tf.NodeDescription("SdcaOptimizer") @@ -62368,7 +62369,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function zeta_graph(x_, q_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function zeta_graph(x_, q_; name=nothing) local desc tf.with_op_name(name, "Zeta") do desc = tf.NodeDescription("Zeta") @@ -62411,7 +62412,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sample_distorted_bounding_box_graph(image_size_, bounding_boxes_; name=nothing, seed=nothing, seed2=nothing, min_object_covered=nothing, aspect_ratio_range=nothing, area_range=nothing, max_attempts=nothing, use_image_if_no_bounding_boxes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sample_distorted_bounding_box_graph(image_size_, bounding_boxes_; name=nothing, seed=nothing, seed2=nothing, min_object_covered=nothing, aspect_ratio_range=nothing, area_range=nothing, max_attempts=nothing, use_image_if_no_bounding_boxes=nothing) local desc tf.with_op_name(name, "SampleDistortedBoundingBox") do desc = tf.NodeDescription("SampleDistortedBoundingBox") @@ -62500,7 +62501,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function igamma_grad_a_graph(a_, x_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function igamma_grad_a_graph(a_, x_; name=nothing) local desc tf.with_op_name(name, "IgammaGradA") do desc = tf.NodeDescription("IgammaGradA") @@ -62543,7 +62544,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function segment_max_graph(data_, segment_ids_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function segment_max_graph(data_, segment_ids_; name=nothing) local desc tf.with_op_name(name, "SegmentMax") do desc = tf.NodeDescription("SegmentMax") @@ -62588,7 +62589,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function range_graph(start_, limit_, delta_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function range_graph(start_, limit_, delta_; name=nothing) local desc tf.with_op_name(name, "Range") do desc = tf.NodeDescription("Range") @@ -62636,7 +62637,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function retrieve_tpu_embedding_momentum_parameters_grad_accum_debug_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function retrieve_tpu_embedding_momentum_parameters_grad_accum_debug_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "RetrieveTPUEmbeddingMomentumParametersGradAccumDebug") do desc = tf.NodeDescription("RetrieveTPUEmbeddingMomentumParametersGradAccumDebug") @@ -62697,7 +62698,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function flush_summary_writer_graph(writer_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function flush_summary_writer_graph(writer_; name=nothing) local desc tf.with_op_name(name, "FlushSummaryWriter") do desc = tf.NodeDescription("FlushSummaryWriter") @@ -62733,7 +62734,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function dequantize_graph(input_, min_range_, max_range_; name=nothing, mode=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function dequantize_graph(input_, min_range_, max_range_; name=nothing, mode=nothing) local desc tf.with_op_name(name, "Dequantize") do desc = tf.NodeDescription("Dequantize") @@ -62785,7 +62786,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_fill_empty_rows_grad_graph(reverse_index_map_, grad_values_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_fill_empty_rows_grad_graph(reverse_index_map_, grad_values_; name=nothing) local desc tf.with_op_name(name, "SparseFillEmptyRowsGrad") do desc = tf.NodeDescription("SparseFillEmptyRowsGrad") @@ -62832,7 +62833,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function iterator_get_next_graph(iterator_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function iterator_get_next_graph(iterator_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "IteratorGetNext") do desc = tf.NodeDescription("IteratorGetNext") @@ -62880,7 +62881,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_tensor_dense_add_graph(a_indices_, a_values_, a_shape_, b_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_tensor_dense_add_graph(a_indices_, a_values_, a_shape_, b_; name=nothing) local desc tf.with_op_name(name, "SparseTensorDenseAdd") do desc = tf.NodeDescription("SparseTensorDenseAdd") @@ -62936,7 +62937,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function prevent_gradient_graph(input_; name=nothing, message=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function prevent_gradient_graph(input_; name=nothing, message=nothing) local desc tf.with_op_name(name, "PreventGradient") do desc = tf.NodeDescription("PreventGradient") @@ -62980,7 +62981,7 @@ end """ begin - #= /Users/malmaud/code/TensorFlow/src/generate_ops.jl:231 =# tf.@op function lookup_table_export_graph(table_handle_; name=nothing) + #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function lookup_table_export_graph(table_handle_; name=nothing) local desc tf.with_op_name(name, "LookupTableExport") do desc = tf.NodeDescription("LookupTableExport") From 1c12f64b48300d305c397036e6a3b8ad16ddd75b Mon Sep 17 00:00:00 2001 From: Jon Malmaud Date: Thu, 14 Mar 2019 15:13:18 -0400 Subject: [PATCH 31/49] PyCall adjustments etc. --- src/TensorFlow.jl | 2 +- src/core.jl | 33 ++++++++++++++++----------------- src/eager.jl | 4 +--- src/io/tfrecord.jl | 12 ++++++------ src/py.jl | 34 ++++++++++++++++------------------ src/summary_writer.jl | 2 +- src/tape.jl | 18 ++++++++---------- src/version.jl | 2 +- 8 files changed, 50 insertions(+), 57 deletions(-) diff --git a/src/TensorFlow.jl b/src/TensorFlow.jl index b3bcd406..338838d4 100644 --- a/src/TensorFlow.jl +++ b/src/TensorFlow.jl @@ -153,7 +153,7 @@ end ContextStack() = ContextStack(Context[]) -global_context = ContextStack() +const global_context = ContextStack() function __init__() c_deallocator[] = @cfunction(deallocator, Cvoid, (Ptr{Cvoid}, Csize_t, Ptr{Cvoid})) diff --git a/src/core.jl b/src/core.jl index 2c917d04..ed327c71 100644 --- a/src/core.jl +++ b/src/core.jl @@ -507,24 +507,20 @@ end mutable struct DeviceList ptr::Ptr{Cvoid} count::Int +end - function DeviceList(s::Session) - status = Status() - ptr = @tfcall(:TF_SessionListDevices, Ptr{Cvoid}, - (Ptr{Cvoid}, Ptr{Cvoid}), s, status) - check_status(status) - count = @tfcall(:TF_DeviceListCount, Cint, (Ptr{Cvoid},), - ptr) - this = new(ptr, count) - finalizer(this) do self - close(self) - end - this - end - - function DeviceList(ptr, count) - new(ptr, count) +function DeviceList(s::Session) + status = Status() + ptr = @tfcall(:TF_SessionListDevices, Ptr{Cvoid}, + (Ptr{Cvoid}, Ptr{Cvoid}), s, status) + check_status(status) + count = @tfcall(:TF_DeviceListCount, Cint, (Ptr{Cvoid},), + ptr) + this = DeviceList(ptr, count) + finalizer(this) do self + close(self) end + this end struct DeviceInfo @@ -1174,7 +1170,10 @@ function load_proto(value::tensorflow.AttrValue) load_proto(value.list) elseif has_field(value, :_type) type_ = value._type - get(proto_type_map, type_, Float32) # wrong + get(proto_type_map, type_) do + @warn "Unrecognized type. Defaulting to Float32." type_ + Float32 + end end end diff --git a/src/eager.jl b/src/eager.jl index 17b4cf0f..0dd4a0fd 100644 --- a/src/eager.jl +++ b/src/eager.jl @@ -214,13 +214,11 @@ function setindex!(op::EagerOp, value::Vector, attr_name) end function set_attr_list(op::EagerOp, attr_name, list::Vector{<:Integer}) - # list = Int64[Int64(x) for x in list] list = Int64.(list) @tfcall(:TFE_OpSetAttrIntList, Cvoid, (Ptr{Cvoid}, Cstring, Ptr{Int64}, Cint), op, attr_name, list, length(list)) end function set_attr_list(op::EagerOp, attr_name, list::Vector{<:AbstractFloat}) - # list = Float32[Float32(x) for x in list] list = Float32.(list) @tfcall(:TFE_OpSetAttrFloatList, Cvoid, (Ptr{Cvoid}, Cstring, Ptr{Float32}, Cint), op, attr_name, list, length(list)) end @@ -376,7 +374,7 @@ function in_eager_mode() return context_value("eager")::Bool end -function with_context(ctx, block) +function with_context(block, ctx) push!(global_context, ctx) res = block() pop!(global_context) diff --git a/src/io/tfrecord.jl b/src/io/tfrecord.jl index 82b425f8..e0b5b7ce 100644 --- a/src/io/tfrecord.jl +++ b/src/io/tfrecord.jl @@ -22,7 +22,7 @@ Opens a TensorFlow record writer. Records will be written to the file at the given path. """ function RecordWriter(path::AbstractString) - pyo = @tf.py_proc py_tf[][:python_io][:TFRecordWriter]($path) + pyo = @tf.py_proc py_tf[].python_io.TFRecordWriter($path) RecordWriter(pyo) end @@ -33,7 +33,7 @@ Writes a record `msg` to the TensorFlow writer `writer`. Tries to convert the msg to `Vector{UInt8}` before writing. """ function Base.write(writer::RecordWriter, msg::Vector{UInt8}) - fetch(@tf.py_proc $(writer.pyo)[:write](py_bytes($msg))) + fetch(@tf.py_proc $(writer.pyo).write(py_bytes($msg))) end Base.write(writer::RecordWriter, s::AbstractString) = write(writer, Vector{UInt8}(s)) @@ -45,7 +45,7 @@ function RecordWriter(f::Function, path) end function Base.close(writer::RecordWriter) - fetch(@tf.py_proc $(writer.pyo)[:close]()) + fetch(@tf.py_proc $(writer.pyo).close()) end struct RecordIterator @@ -59,17 +59,17 @@ Returns a Julia iterator that returns the records in the TF Record file at `path` as `Vector{UInt8}` objects. """ function RecordIterator(path::AbstractString) - pyo = @tf.py_proc py_tf[][:python_io][:tf_record_iterator]($path) + pyo = @tf.py_proc py_tf[].python_io.tf_record_iterator($path) RecordIterator(pyo) end function _next(iter::RecordIterator) try ans=@static if PyCall.pyversion >= v"3.0.0" - fetch(@tf.py_proc $(iter.pyo)[:__next__]()) + fetch(@tf.py_proc $(iter.pyo).__next__()) else #Python 2 - fetch(@tf.py_proc $(iter.pyo)[:next]()) + fetch(@tf.py_proc $(iter.pyo).next()) end Vector{UInt8}(ans) catch err diff --git a/src/py.jl b/src/py.jl index 707430bb..20152c54 100644 --- a/src/py.jl +++ b/src/py.jl @@ -11,14 +11,14 @@ function init() py_tf_core[] = pyimport("tensorflow.core") pywrap_tensorflow[] = pyimport("tensorflow.python.pywrap_tensorflow") catch err - error("The Python TensorFlow package could not be imported. You must install Python TensorFlow before using this package.") + error("The Python TensorFlow package could not be imported. You must install Python TensorFlow before using this package. The error was $err") end end function py_with(f, ctx_mngr) - ctx_mngr[:__enter__]() + ctx_mngr.__enter__() f() - ctx_mngr[:__exit__](nothing, nothing, nothing) + ctx_mngr.__exit__(nothing, nothing, nothing) end function py_bytes(b::DenseVector{UInt8}) @@ -41,31 +41,29 @@ macro py_catch(ex) try $(esc(ex)) catch err - s = string("Python error: ", repr(err.val)) + s = string("Python error: ", repr(err)) error(s) end end end function make_py_graph(graph_proto) - py_graph = py_tf[][:Graph]() - py_with(py_graph[:as_default]()) do - # graph_def = py_tf[][:GraphDef]() - graph_def = py_tf_core[][:protobuf][:meta_graph_pb2][:MetaGraphDef]() - graph_def[:ParseFromString](graph_proto|>py_bytes) - # @py_catch py_tf[][:import_graph_def](graph_def, name="") - @py_catch py_tf[][:train][:import_meta_graph](graph_def) + py_graph = py_tf[].Graph() + py_with(py_graph.as_default()) do + graph_def = py_tf_core[].protobuf.meta_graph_pb2.MetaGraphDef() + graph_def.ParseFromString(graph_proto|>py_bytes) + @py_catch py_tf[].train.import_meta_graph(graph_def) end py_graph end function to_protos(py_graph) - nodes = PyVector(py_graph[:node]) + nodes = PyVector(py_graph.node) n_nodes = length(nodes) protos = [] for node_idx in 1:n_nodes node_py = nodes[node_idx] - proto = codeunits(node_py[:SerializeToString]()) + proto = codeunits(node_py.SerializeToString()) push!(protos, proto) end return protos @@ -74,15 +72,15 @@ end function py_gradients(jl_graph_proto, x_names, y_names, grad_y_names) py_graph = make_py_graph(jl_graph_proto) - to_py_node(node_name) = py_graph[:get_tensor_by_name](string(node_name[1], ":", node_name[2]-1)) + to_py_node(node_name) = py_graph.get_tensor_by_name(string(node_name[1], ":", node_name[2]-1)) to_py_node(node_names::AbstractVector) = tuple(to_py_node.(node_names)...) #Need tuple as Vector will not be accepted to_py_node(::Cvoid) = nothing py_x = to_py_node(x_names) py_y = to_py_node(y_names) py_grad_y = to_py_node(grad_y_names) - @py_catch grad_node = py_tf[][:gradients](py_y, py_x, py_grad_y) - py_graph_def = py_graph[:as_graph_def]() + @py_catch grad_node = py_tf[].gradients(py_y, py_x, py_grad_y) + py_graph_def = py_graph.as_graph_def() grad_names = [] for node in grad_node if node === nothing @@ -90,9 +88,9 @@ function py_gradients(jl_graph_proto, x_names, y_names, grad_y_names) continue end try - push!(grad_names, (node[:values][:name], node[:indices][:name])) + push!(grad_names, (node.values.name, node.indices.name)) catch - push!(grad_names, node[:name]) + push!(grad_names, node.name) end end return to_protos(py_graph_def), grad_names diff --git a/src/summary_writer.jl b/src/summary_writer.jl index 8bd2a0ae..dd9d9acf 100644 --- a/src/summary_writer.jl +++ b/src/summary_writer.jl @@ -108,7 +108,7 @@ end function with_default(writer::FileWriter, block) context = tf.Context() context.attrs["default_file_writer"] = writer - tf.with_context(context, block) + tf.with_context(block, context) end function get_default_file_writer() diff --git a/src/tape.jl b/src/tape.jl index f2ace4f7..77426279 100644 --- a/src/tape.jl +++ b/src/tape.jl @@ -27,9 +27,7 @@ function set_tape(new_tape=nothing) return new_tape end -function get_tape() - return global_context["tape"] -end +get_tape() = global_context["tape"] function add_node(t, node) tape = get_tape() @@ -67,13 +65,6 @@ end) [y.* (x.^(y.-1)), nothing] end) -function with_no_grad(f) - context = Context() - context.attrs["tape"] = nothing - res = with_context(context, f) - return res -end - @back_for(Ops.exp, function f(grad, x; kwargs...) Ops.exp(x) .* grad end) @@ -136,6 +127,13 @@ end) [grad, Ops.bias_add_grad(grad)] end) +function with_no_grad(f) + context = Context() + context.attrs["tape"] = nothing + res = with_context(f, context) + return res +end + ensure_vector(x::AbstractArray) = x ensure_vector(x) = [x] diff --git a/src/version.jl b/src/version.jl index 71d98370..30d4e41e 100644 --- a/src/version.jl +++ b/src/version.jl @@ -17,7 +17,7 @@ function tf_version(; kind=:backend) if kind == :backend res = @tfcall(:TF_Version, Cstring, ()) |> unsafe_string elseif kind == :python - res = fetch(@py_proc py_tf[][:VERSION]) + res = fetch(@py_proc py_tf[].VERSION) elseif kind == :julia return Pkg.installed()["TensorFlow"] else From 305190799d02933ae46f79434b033ef583d4f4c7 Mon Sep 17 00:00:00 2001 From: Jon Malmaud Date: Thu, 14 Mar 2019 19:17:39 -0400 Subject: [PATCH 32/49] Overhault context system --- src/TensorFlow.jl | 18 ++++---------- src/context.jl | 44 ++++++++++++++++++++++++++++++++++ src/eager.jl | 56 +++++++++++-------------------------------- src/summary_writer.jl | 13 ++++------ src/tape.jl | 21 ++++++++++------ 5 files changed, 80 insertions(+), 72 deletions(-) create mode 100644 src/context.jl diff --git a/src/TensorFlow.jl b/src/TensorFlow.jl index 338838d4..1a935b0c 100644 --- a/src/TensorFlow.jl +++ b/src/TensorFlow.jl @@ -141,23 +141,13 @@ function deallocator(data, len, arg) end -struct Context - attrs::Dict -end - -Context() = Context(Dict()) - -struct ContextStack - contexts::Vector{Context} -end - -ContextStack() = ContextStack(Context[]) - -const global_context = ContextStack() +include("context.jl") function __init__() c_deallocator[] = @cfunction(deallocator, Cvoid, (Ptr{Cvoid}, Csize_t, Ptr{Cvoid})) - push!(global_context, default_context()) + for context in default_context() + push!(global_context, context) + end end function load_python_process(;force_reload=false) diff --git a/src/context.jl b/src/context.jl new file mode 100644 index 00000000..f70d71ab --- /dev/null +++ b/src/context.jl @@ -0,0 +1,44 @@ +abstract type Context +end + +struct ContextStack + contexts::Vector{Context} +end + +ContextStack() = ContextStack(Context[]) + +function Base.push!(stack::ContextStack, context::Context) + push!(stack.contexts, context) +end + +function Base.pop!(stack::ContextStack) + pop!(stack.contexts) +end + +function default_context() + return [ExecutionMode(eager=false)] +end + +function context_value(context_type) + return global_context[context_type] +end + +function Base.getindex(c::ContextStack, context_type) + value = nothing + for context in c.contexts + if isa(context, context_type) + value = context + end + end + return value +end + +function with_context(block, ctx) + push!(global_context, ctx) + res = block() + pop!(global_context) + return res +end + + +const global_context = ContextStack() diff --git a/src/eager.jl b/src/eager.jl index 0dd4a0fd..cb1fe454 100644 --- a/src/eager.jl +++ b/src/eager.jl @@ -1,4 +1,4 @@ -mutable struct EagerContext +mutable struct EagerContext <: Context ptr::Ptr{Cvoid} end @@ -121,9 +121,10 @@ end function EagerOp(op_name) if get_eager_context() === nothing - ctx = Context() - ctx.attrs["eager_context"] = EagerContext() - push!(global_context, ctx) + # ctx = Context() + # ctx.attrs["eager_context"] = EagerContext() + # push!(global_context, ctx) + push!(global_context, EagerContext()) end ctx = get_eager_context() status = Status() @@ -245,7 +246,6 @@ function clear_caches(ctx::EagerContext) @tfcall(:TFE_ContextClearCaches, Cvoid, (Ptr{Cvoid},), ctx) end - function num_dims(h::TensorHandle) status = Status() res = @tfcall(:TFE_TensorHandleNumDims, Cint, (Ptr{Cvoid}, Ptr{Cvoid}), h, status) @@ -335,52 +335,24 @@ function inplace_sub(x, y) Ops.inplace_sub(x, i, y) end -function Base.push!(stack::ContextStack, context::Context) - push!(stack.contexts, context) -end - -function Base.pop!(stack::ContextStack) - pop!(stack.contexts) +struct ExecutionMode <: Context + eager::Bool end -function default_context() - context = Context() - context.attrs["eager"] = false - return context -end +ExecutionMode(;eager=true) = ExecutionMode(eager) function enable_eager_execution() - context = Context() - context.attrs["eager"] = true - push!(global_context, context) + # context = Context() + # context.attrs["eager"] = true + # push!(global_context, context) + push!(global_context, ExecutionMode(eager=true)) return nothing end -function Base.getindex(c::ContextStack, name) - value = nothing - for context in c.contexts - if name in keys(context.attrs) - value = context.attrs[name] - end - end - return value -end - -function context_value(name) - return global_context[name] -end - function in_eager_mode() - return context_value("eager")::Bool -end - -function with_context(block, ctx) - push!(global_context, ctx) - res = block() - pop!(global_context) - return res + return context_value(ExecutionMode).eager end function get_eager_context() - return context_value("eager_context") + return context_value(EagerContext) end diff --git a/src/summary_writer.jl b/src/summary_writer.jl index dd9d9acf..2b0e2568 100644 --- a/src/summary_writer.jl +++ b/src/summary_writer.jl @@ -4,7 +4,7 @@ import ..TensorFlow const tf = TensorFlow import ..TensorFlow: tensorflow, Graph, get_def_graph, @py_proc -struct FileWriter +struct FileWriter <: tf.Context file_handle logdir::String end @@ -100,19 +100,15 @@ function Base.write(writer::FileWriter, graph::Graph) end function set_default(writer::FileWriter) - context = tf.Context() - context.attrs["default_file_writer"] = writer - push!(tf.global_context, context) + push!(tf.global_context, writer) end function with_default(writer::FileWriter, block) - context = tf.Context() - context.attrs["default_file_writer"] = writer - tf.with_context(block, context) + tf.with_context(block, writer) end function get_default_file_writer() - return tf.global_context["default_file_writer"] + return tf.context_value(FileWriter) end function record_summary(summary_pb; step=0) @@ -121,7 +117,6 @@ function record_summary(summary_pb; step=0) write(writer, summary_pb, step) end - function Base.close(writer::FileWriter) close(writer.file_handle) nothing diff --git a/src/tape.jl b/src/tape.jl index 77426279..a3ad5904 100644 --- a/src/tape.jl +++ b/src/tape.jl @@ -17,17 +17,26 @@ end Tape(;kwargs...) = Tape(Dict{TensorHandle, TapeNode}(), Dict(kwargs...)) +struct TapeContext <: Context + tape::Union{Tape, Nothing} +end + function set_tape(new_tape=nothing) if new_tape === nothing new_tape = Tape() end - context = Context() - context.attrs["tape"] = new_tape - push!(global_context, context) + push!(global_context, TapeContext(new_tape)) return new_tape end -get_tape() = global_context["tape"] +function get_tape() + tape_context = context_value(TapeContext) + if tape_context === nothing + return nothing + else + return tape_context.tape + end +end function add_node(t, node) tape = get_tape() @@ -128,9 +137,7 @@ end) end) function with_no_grad(f) - context = Context() - context.attrs["tape"] = nothing - res = with_context(f, context) + res = with_context(f, TapeContext(nothing)) return res end From f7cbd381134a19996861743f4c6d1c2895384d97 Mon Sep 17 00:00:00 2001 From: Jon Malmaud Date: Thu, 14 Mar 2019 19:33:41 -0400 Subject: [PATCH 33/49] Switch to dispatch for gradients --- examples/keras.jl | 1 + src/tape.jl | 8 +++++--- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/examples/keras.jl b/examples/keras.jl index 59a5b1e7..a6f472c3 100644 --- a/examples/keras.jl +++ b/examples/keras.jl @@ -1,5 +1,6 @@ using TensorFlow tf=TensorFlow +tf.enable_eager_execution() m = tf.Sequential() tf.add(m, tf.Dense(3,10)) diff --git a/src/tape.jl b/src/tape.jl index a3ad5904..d85162b3 100644 --- a/src/tape.jl +++ b/src/tape.jl @@ -44,16 +44,18 @@ function add_node(t, node) tape.nodes[t] = node end -grad_fns = Dict() +function backwards +end macro back_for(target, fn) def = splitdef(fn) if def[:name] == :f def[:name] = Symbol(string(target, "_", "backwards")) end + backwards_expr = :(backwards(::typeof($target)) = $(def[:name])) quote $(esc(combinedef(def))) - grad_fns[$target] = $(def[:name]) + $(esc(backwards_expr)) end end @@ -150,7 +152,7 @@ function _grad(tape::Tape, tensor, out_grad, grads) end node = tape.nodes[tensor] - back_op = grad_fns[node.op] + back_op = backwards(node.op) arg_grads = with_no_grad() do back_op(out_grad, node.args...; output=node.results, node.kwargs...) end From e05997b8440fd3321d9536342a0b4d26cb4da7e7 Mon Sep 17 00:00:00 2001 From: Jon Malmaud Date: Fri, 15 Mar 2019 15:30:17 -0400 Subject: [PATCH 34/49] Misc improvements --- src/core.jl | 9 +++---- src/eager.jl | 63 ++++++++++++++++++++++--------------------- src/generate_ops.jl | 8 +++--- src/keras.jl | 12 +++------ src/ops/math.jl | 2 +- src/show.jl | 3 +-- src/tape.jl | 2 +- test/summary_eager.jl | 5 ++++ 8 files changed, 49 insertions(+), 55 deletions(-) diff --git a/src/core.jl b/src/core.jl index ed327c71..b66b4c71 100644 --- a/src/core.jl +++ b/src/core.jl @@ -1223,17 +1223,11 @@ Represents the output of an operation in the computation graph value_index::Int end -get_graph(t::AbstractTensor) = Tensor(t).op.graph - -node_name(t::AbstractTensor) = (node_name(Tensor(t).op), Tensor(t).value_index) - function Tensor(op::Operation, value_index::Int) base_tensor = Tensor{Any}(op, value_index) Tensor{get_output_type(base_tensor)}(op, value_index) end -# Tensor constructors - Tensor(op::Operation) = Tensor(op, 1) Tensor(value) = convert(Tensor, value) @@ -1249,6 +1243,9 @@ Base.convert(::Type{Tensor{Any}}, value::Tensor{R}) where {R} = value Base.convert(::Type{Tensor{T}}, value) where {T} = convert(Tensor{T}, constant(value)) +get_graph(t::AbstractTensor) = Tensor(t).op.graph + +node_name(t::AbstractTensor) = (node_name(Tensor(t).op), Tensor(t).value_index) function operation_output_type(port::Port) @tfcall(:TF_OperationOutputType, TF_DataType, (Port,), port) diff --git a/src/eager.jl b/src/eager.jl index cb1fe454..5b10dd78 100644 --- a/src/eager.jl +++ b/src/eager.jl @@ -1,7 +1,3 @@ -mutable struct EagerContext <: Context - ptr::Ptr{Cvoid} -end - @enum PlacementPolicy begin PLACEMENT_EXPLICIT=0 PLACEMENT_WARN=1 @@ -9,6 +5,10 @@ end PLACEMENT_SILENT_FOR_INT32=3 end +mutable struct EagerContext <: Context + ptr::Ptr{Cvoid} +end + function EagerContext(;async=false, placement_policy=nothing) # For some reason, this has to be called before :TFE_Execute or else tf # crashes. Maybe something about TF_GetAllOpList is causing the tf @@ -44,21 +44,17 @@ end mutable struct TensorHandle <: AbstractTensor{Any} ptr::Ptr{Cvoid} +end - function TensorHandle(tensor::RawTensor) - status = Status() - ptr = @tfcall(:TFE_NewTensorHandle, Ptr{Cvoid}, (Ptr{Cvoid}, Ptr{Cvoid}), tensor.ptr, status) - check_status(status) - this = new(ptr) - finalizer(this) do self - @tfcall(:TFE_DeleteTensorHandle, Cvoid, (Ptr{Cvoid},), self.ptr) - end - return this - end - - function TensorHandle() - return new() +function TensorHandle(tensor::RawTensor) + status = Status() + ptr = @tfcall(:TFE_NewTensorHandle, Ptr{Cvoid}, (Ptr{Cvoid}, Ptr{Cvoid}), tensor.ptr, status) + check_status(status) + this = TensorHandle(ptr) + finalizer(this) do self + @tfcall(:TFE_DeleteTensorHandle, Cvoid, (Ptr{Cvoid},), self.ptr) end + return this end EagerTensor(value) = TensorHandle(RawTensor(value)) @@ -99,9 +95,7 @@ function resolve(h::TensorHandle) return tensor end -function Base.Array(h::TensorHandle) - convert(Array, resolve(h)) -end +Base.convert(::Type{Array}, h::TensorHandle) = convert(Array, resolve(h)) mutable struct EagerOp ptr::Ptr{Cvoid} @@ -120,12 +114,6 @@ function EagerOp(ctx::EagerContext, op_name) end function EagerOp(op_name) - if get_eager_context() === nothing - # ctx = Context() - # ctx.attrs["eager_context"] = EagerContext() - # push!(global_context, ctx) - push!(global_context, EagerContext()) - end ctx = get_eager_context() status = Status() ptr = @tfcall(:TFE_NewOp, Ptr{Cvoid}, (Ptr{Cvoid}, Cstring, Ptr{Cvoid}), ctx, op_name, status) @@ -155,7 +143,7 @@ end function execute(op::EagerOp) op_desc = get_op_def(op.op_name) n_outputs = length(op_desc.output_arg) - handles = [TensorHandle() for _ in 1:n_outputs] + handles = [TensorHandle(C_NULL) for _ in 1:n_outputs] ptrs = [Ptr{Cvoid}(0) for _ in 1:n_outputs] num_ret = Ref{Cint}(n_outputs) status = Status() @@ -298,18 +286,26 @@ function get_device(op::EagerOp) return String(str) end + +""" + set_xla_compilation(op::EagerOp, enable::Bool) + +When 'enable' is 'true', and if TensorFlow is build with XLA support, a subsequent +'execute' call on `op` will run the op via XLA. + +If the library is not built with XLA support, this call is a no-op. +""" function set_xla_compilation(op::EagerOp, enable) @tfcall(:TFE_OpSetXLACompilation, Ptr{Cvoid}, (Ptr{Cvoid}, Cuchar), op, enable) - return enable end Base.convert(::Type{TensorHandle}, h::TensorHandle) = h Base.convert(::Type{TensorHandle}, h) = constant(h) function item(t::TensorHandle) - x = Array(t) + x = convert(Array, t) if length(x) != 1 - throw(ErrorException("item can only be called on scalar tensors")) + throw(DimensionMismatch("item can only be called on scalar tensors")) end return x[1] end @@ -354,5 +350,10 @@ function in_eager_mode() end function get_eager_context() - return context_value(EagerContext) + ctx = context_value(EagerContext) + if ctx === nothing + ctx = EagerContext() + push!(global_context, ctx) + end + return ctx end diff --git a/src/generate_ops.jl b/src/generate_ops.jl index c2676ecb..792443e3 100644 --- a/src/generate_ops.jl +++ b/src/generate_ops.jl @@ -92,7 +92,7 @@ function to_function(op::tensorflow.OpDef) end for (input_idx, input) in enumerate(op.input_arg) sym = inputs[input_idx] - convert_target = (tf.Tensor{Any}) + convert_target = tf.Tensor{Any} # Heuristic for when 1-based conversion is necessary # Generally, you can tell by the name of the type attribute. @@ -117,7 +117,7 @@ function to_function(op::tensorflow.OpDef) end end if input._type > 0 && haskey(proto_type_map, input._type) - convert_target = (tf.Tensor{(proto_type_map[input._type])}) + convert_target = tf.Tensor{(proto_type_map[input._type])} end convert_expr = if isempty(input.number_attr) && isempty(input.type_list_attr) # Scalar input :($sym = convert($(convert_target), $sym)) @@ -140,7 +140,6 @@ function to_function(op::tensorflow.OpDef) tf.add_input(desc, $(inputs[input_idx])) end) end - eager_input_block = input_block kwargs = Expr(:parameters) push!(kwargs.args, Expr(:kw, :name, nothing)) @@ -251,7 +250,7 @@ function to_function(op::tensorflow.OpDef) desc = tf.EagerOp($(op.name)) # $convert_block $(eager_convert_block...) - $eager_input_block + $input_block $attr_block $(t_block...) res = tf.execute(desc) @@ -329,7 +328,6 @@ function stringify_func(opfunc::OpFunc) s = string(expr) docstring = replace(opfunc.docstring, "\$" => "") doc_line = "\"\"\"\n$(docstring)\n\"\"\"" - # "$doc_line\n$s\n$doc_line\n$(string(opfunc.eager_expr))\n" "$doc_line\n$s\n" end diff --git a/src/keras.jl b/src/keras.jl index 17780362..8bde2d87 100644 --- a/src/keras.jl +++ b/src/keras.jl @@ -74,9 +74,7 @@ function add(m::Sequential, layer) push!(m.attrs["layers"], layer) end -function forward(d::Dense, x) - Ops.bias_add(x*d.weights, d.bias) -end +forward(d::Dense, x) = Ops.bias_add(x*d.weights, d.bias) function forward(m::Sequential, x) for layer in m.attrs["layers"] @@ -85,9 +83,7 @@ function forward(m::Sequential, x) return x end -function mse(y, y_target) - return mean((y .- y_target) .^ 2) -end +mse(y, y_target) = mean((y .- y_target) .^ 2) function set_trainable(m::Sequential, tensor) push!(m.attrs["trainable"], tensor) @@ -98,9 +94,7 @@ function compile(m::Sequential; optimizer=nothing, loss=nothing) m.attrs["loss"] = loss end -function optimizier_step(g::SGD, value, grads) - inplace_sub(value, g.lr .* grads) -end +optimizier_step(g::SGD, value, grads) = inplace_sub(value, g.lr .* grads) function fit(m::Sequential, x, y; n_epochs=1, batch_size=nothing) optimizer = m.attrs["optimizer"] diff --git a/src/ops/math.jl b/src/ops/math.jl index 8aadfcc4..fe271aa5 100644 --- a/src/ops/math.jl +++ b/src/ops/math.jl @@ -204,7 +204,7 @@ for reduction in [:sum, :prod, :min, :max, :all, :any, :mean] end if in_eager_mode() if axis === nothing - n_value = Array(n) # TODO use shape functions instead + n_value = convert(Array, n) # TODO use shape functions instead num_axis = length(size(n_value)) axis = Ops.range(constant(0), constant(num_axis), constant(1)) fn = Ops.$reduction diff --git a/src/show.jl b/src/show.jl index f54ef127..dcd83b38 100644 --- a/src/show.jl +++ b/src/show.jl @@ -41,8 +41,7 @@ function Base.show(io::IO, t::RawTensor) end function Base.show(io::IO, t::TensorHandle) - raw_tensor = resolve(t) - jl_array = convert(Array, raw_tensor) + jl_array = convert(Array, t) ptr = pointer_from_objref(t) print(io, "Tensor<$ptr>($(jl_array))") end diff --git a/src/tape.jl b/src/tape.jl index d85162b3..b9ea6b01 100644 --- a/src/tape.jl +++ b/src/tape.jl @@ -1,7 +1,7 @@ using MacroTools import MacroTools: splitdef, combinedef -mutable struct TapeNode +struct TapeNode op::Function args::Vector results::Vector diff --git a/test/summary_eager.jl b/test/summary_eager.jl index 315b98c9..3eac3433 100644 --- a/test/summary_eager.jl +++ b/test/summary_eager.jl @@ -1,5 +1,6 @@ using TensorFlow tf = TensorFlow +tf.enable_eager_execution() summary = tf.summary mktempdir() do tmpdir writer = summary.FileWriter(tmpdir) @@ -8,4 +9,8 @@ mktempdir() do tmpdir summary.scalar(tag, 3.2, step=0) summary.scalar(tag, 5.0, step=1) summary.scalar(tag, -2.5, step=2) + + # Test convenience macros + loss=2.0 + @tf.summary.scalar(loss, step=1) end From f6ef4c7c485f4b64166200d1c5bd1a61d02c5f3e Mon Sep 17 00:00:00 2001 From: Lyndon White Date: Fri, 15 Mar 2019 15:34:50 -0400 Subject: [PATCH 35/49] Apply suggestions from code review Co-Authored-By: malmaud --- src/eager.jl | 7 +++++-- src/generate_ops.jl | 2 +- src/keras.jl | 2 +- src/meta.jl | 2 -- src/show.jl | 2 +- src/summary_writer.jl | 3 +-- 6 files changed, 9 insertions(+), 9 deletions(-) diff --git a/src/eager.jl b/src/eager.jl index 5b10dd78..a674f6df 100644 --- a/src/eager.jl +++ b/src/eager.jl @@ -10,7 +10,8 @@ mutable struct EagerContext <: Context end function EagerContext(;async=false, placement_policy=nothing) - # For some reason, this has to be called before :TFE_Execute or else tf + # For some reason, `get_all_op_list` + # has to be called before :TFE_Execute or else tf # crashes. Maybe something about TF_GetAllOpList is causing the tf # library to enter a bad state. get_all_op_list() @@ -20,6 +21,7 @@ function EagerContext(;async=false, placement_policy=nothing) if placement_policy !== nothing @tfcall(:TFE_ContextOptionsSetDevicePlacementPolicy, Cvoid, (Ptr{Cvoid}, Int), options, placement_policy) end + status = Status() context = @tfcall(:TFE_NewContext, Ptr{Cvoid}, (Ptr{Cvoid}, Ptr{Cvoid}), options, status) check_status(status) @@ -312,7 +314,8 @@ end Base.length(t::TensorHandle) = item(Ops.size(t)) -Base.eltype(::Type{TensorHandle}) = Float64 # temp hack +Base.IteratorEltype(::Type{TensorHandle}) = Base.EltypeUnknown() # temp hack +Base.eltype(::Type{TensorHandle}) = Any Base.collect(t::TensorHandle) = Array(t) Base.iterate(t::TensorHandle, args...) = iterate(Array(t), args...) Base.zero(t::AbstractTensor) = Ops.zeros_like(t) diff --git a/src/generate_ops.jl b/src/generate_ops.jl index 792443e3..3d909482 100644 --- a/src/generate_ops.jl +++ b/src/generate_ops.jl @@ -260,8 +260,8 @@ function to_function(op::tensorflow.OpDef) $eager_output_block end end - end + call_kw_params = Expr(:parameters) for arg in inputs[1].args push!(call_kw_params.args, Expr(:kw, arg.args[1], arg.args[1])) diff --git a/src/keras.jl b/src/keras.jl index 8bde2d87..7ada4881 100644 --- a/src/keras.jl +++ b/src/keras.jl @@ -105,7 +105,7 @@ function fit(m::Sequential, x, y; n_epochs=1, batch_size=nothing) y_predicted = forward(layer, y_predicted) end loss = m.attrs["loss"](y, y_predicted) - println("Epoch $epoch: Loss if $(item(loss))") + @info "" epoch loss=item(loss) values = collect(m.attrs["trainable"]) grads = grad(tape, loss, values) for (value, g) in zip(values, grads) diff --git a/src/meta.jl b/src/meta.jl index cfa7078b..59bc5561 100644 --- a/src/meta.jl +++ b/src/meta.jl @@ -173,6 +173,4 @@ end macro scalar_summary(f) @capture(f, funcname(args__; kwargs__)) - end - diff --git a/src/show.jl b/src/show.jl index dcd83b38..e01122cc 100644 --- a/src/show.jl +++ b/src/show.jl @@ -43,7 +43,7 @@ end function Base.show(io::IO, t::TensorHandle) jl_array = convert(Array, t) ptr = pointer_from_objref(t) - print(io, "Tensor<$ptr>($(jl_array))") + print(io, "EagerTensor<$ptr>($(jl_array))") end function Base.show(io::IO, n::Operation) diff --git a/src/summary_writer.jl b/src/summary_writer.jl index 2b0e2568..7ca2bae4 100644 --- a/src/summary_writer.jl +++ b/src/summary_writer.jl @@ -29,7 +29,7 @@ Arguments: * graph: A `Graph` object. """ function FileWriter(log_dir::AbstractString; graph=nothing) - if !tf.in_eager_mode() && graph===nothing + if !tf.in_eager_mode() && graph === nothing graph = get_def_graph() end mkpath(log_dir) @@ -38,7 +38,6 @@ function FileWriter(log_dir::AbstractString; graph=nothing) path = joinpath(log_dir, "events.out.tfevents.$i") isfile(path) || break end - rm(path, force=true) writer = FileWriter(open(path, "w"), String(log_dir)) if graph !== nothing write(writer, graph) From fb36d47265541c85aa485c8c971118e58904d817 Mon Sep 17 00:00:00 2001 From: Jon Malmaud Date: Fri, 15 Mar 2019 16:03:21 -0400 Subject: [PATCH 36/49] Rename TensorHandle->EagerTensor --- src/eager.jl | 67 +- src/generate_ops.jl | 2 +- src/keras.jl | 8 +- src/ops/imported_ops.jl | 5342 +++++++++++++++++++-------------------- src/show.jl | 2 +- src/tape.jl | 4 +- 6 files changed, 2713 insertions(+), 2712 deletions(-) diff --git a/src/eager.jl b/src/eager.jl index a674f6df..8df643ae 100644 --- a/src/eager.jl +++ b/src/eager.jl @@ -44,23 +44,24 @@ function DeviceList(ctx::EagerContext) return this end -mutable struct TensorHandle <: AbstractTensor{Any} +mutable struct EagerTensor <: AbstractTensor{Any} ptr::Ptr{Cvoid} + EagerTensor(ptr::Ptr) = new(ptr) end -function TensorHandle(tensor::RawTensor) +function EagerTensor(tensor::RawTensor) status = Status() ptr = @tfcall(:TFE_NewTensorHandle, Ptr{Cvoid}, (Ptr{Cvoid}, Ptr{Cvoid}), tensor.ptr, status) check_status(status) - this = TensorHandle(ptr) + this = EagerTensor(ptr) finalizer(this) do self @tfcall(:TFE_DeleteTensorHandle, Cvoid, (Ptr{Cvoid},), self.ptr) end return this end -EagerTensor(value) = TensorHandle(RawTensor(value)) -Base.unsafe_convert(::Type{Ptr{Cvoid}}, h::TensorHandle) = h.ptr +EagerTensor(value) = EagerTensor(RawTensor(value)) +Base.unsafe_convert(::Type{Ptr{Cvoid}}, h::EagerTensor) = h.ptr function async_wait(ctx::EagerContext) status = Status() @@ -68,14 +69,14 @@ function async_wait(ctx::EagerContext) check_status(status) end -function device_name(h::TensorHandle) +function device_name(h::EagerTensor) status = Status() c_name = @tfcall(:TFE_TensorHandleDeviceName, Cstring, (Ptr{Cvoid}, Ptr{Cvoid}), h, status) check_status(status) return unsafe_string(c_name) end -function backing_device_name(h::TensorHandle) +function backing_device_name(h::EagerTensor) status = Status() c_name = @tfcall(:TFE_TensorHandleBackingDeviceName, Cstring, (Ptr{Cvoid}, Ptr{Cvoid}), h, status) check_status(status) @@ -83,13 +84,13 @@ function backing_device_name(h::TensorHandle) end -function data_type(h::TensorHandle) +function data_type(h::EagerTensor) return @tfcall(:TFE_TensorHandleDataType, TF_DataType, (Ptr{Cvoid},), h) |> tf_to_jl_type end -Base.eltype(h::TensorHandle) = data_type(h) +Base.eltype(h::EagerTensor) = data_type(h) -function resolve(h::TensorHandle) +function resolve(h::EagerTensor) status = Status() ptr = @tfcall(:TFE_TensorHandleResolve, Ptr{Cvoid}, (Ptr{Cvoid}, Ptr{Cvoid}), h, status) check_status(status) @@ -97,7 +98,7 @@ function resolve(h::TensorHandle) return tensor end -Base.convert(::Type{Array}, h::TensorHandle) = convert(Array, resolve(h)) +Base.convert(::Type{Array}, h::EagerTensor) = convert(Array, resolve(h)) mutable struct EagerOp ptr::Ptr{Cvoid} @@ -129,14 +130,14 @@ end Base.unsafe_convert(::Type{Ptr{Cvoid}}, op::EagerOp) = op.ptr -function add_input(op::EagerOp, h::TensorHandle) +function add_input(op::EagerOp, h::EagerTensor) status = Status() @tfcall(:TFE_OpAddInput, Cvoid, (Ptr{Cvoid}, Ptr{Cvoid}, Ptr{Cvoid}), op, h, status) check_status(status) return end -function add_input(op::EagerOp, hs::Vector{TensorHandle}) +function add_input(op::EagerOp, hs::Vector{EagerTensor}) for h in hs add_input(op, h) end @@ -145,7 +146,7 @@ end function execute(op::EagerOp) op_desc = get_op_def(op.op_name) n_outputs = length(op_desc.output_arg) - handles = [TensorHandle(C_NULL) for _ in 1:n_outputs] + handles = [EagerTensor(C_NULL) for _ in 1:n_outputs] ptrs = [Ptr{Cvoid}(0) for _ in 1:n_outputs] num_ret = Ref{Cint}(n_outputs) status = Status() @@ -159,8 +160,8 @@ end function test_eager() ctx = EagerContext() - h1 = TensorHandle(RawTensor([1,2])) - h2 = TensorHandle(RawTensor([3,4])) + h1 = EagerTensor(RawTensor([1,2])) + h2 = EagerTensor(RawTensor([3,4])) op = EagerOp(ctx, "Add") add_input(op, h1) add_input(op, h2) @@ -236,14 +237,14 @@ function clear_caches(ctx::EagerContext) @tfcall(:TFE_ContextClearCaches, Cvoid, (Ptr{Cvoid},), ctx) end -function num_dims(h::TensorHandle) +function num_dims(h::EagerTensor) status = Status() res = @tfcall(:TFE_TensorHandleNumDims, Cint, (Ptr{Cvoid}, Ptr{Cvoid}), h, status) check_status(status) Int(res) end -function num_elements(h::TensorHandle) +function num_elements(h::EagerTensor) status = Status() res = @tfcall(:TFE_TensorHandleNumElements, Int64, (Ptr{Cvoid}, Ptr{Cvoid}), h, status) check_status(status) @@ -251,24 +252,24 @@ function num_elements(h::TensorHandle) end -function dim(h::TensorHandle, dim_index) +function dim(h::EagerTensor, dim_index) status = Status() res = @tfcall(:TFE_TensorHandleDim, Int64, (Ptr{Cvoid}, Cint, Ptr{Cvoid}), h, dim_index-1, status) check_status(status) Int(res) end -function copy_sharing_tensor(h::TensorHandle) +function copy_sharing_tensor(h::EagerTensor) status = Status() - res = TensorHandle() + res = EagerTensor() res.ptr = @tfcall(:TFE_TensorHandleCopySharingTensor, Ptr{Cvoid}, (Ptr{Cvoid}, Ptr{Cvoid}), h, status) check_status(status) return res end -function copy_to_device(ctx::EagerContext, h::TensorHandle, device_name) +function copy_to_device(ctx::EagerContext, h::EagerTensor, device_name) status = Status() - res = TensorHandle() + res = EagerTensor() res.ptr = @tfcall(:TFE_TensorHandleCopyToDevice, Ptr{Cvoid}, (Ptr{Cvoid}, Ptr{Cvoid}, Cstring, Ptr{Cvoid}), h, ctx, device_name, status) check_status(status) return res @@ -301,10 +302,10 @@ function set_xla_compilation(op::EagerOp, enable) @tfcall(:TFE_OpSetXLACompilation, Ptr{Cvoid}, (Ptr{Cvoid}, Cuchar), op, enable) end -Base.convert(::Type{TensorHandle}, h::TensorHandle) = h -Base.convert(::Type{TensorHandle}, h) = constant(h) +Base.convert(::Type{EagerTensor}, h::EagerTensor) = h +Base.convert(::Type{EagerTensor}, h) = constant(h) -function item(t::TensorHandle) +function item(t::EagerTensor) x = convert(Array, t) if length(x) != 1 throw(DimensionMismatch("item can only be called on scalar tensors")) @@ -312,20 +313,20 @@ function item(t::TensorHandle) return x[1] end -Base.length(t::TensorHandle) = item(Ops.size(t)) +Base.length(t::EagerTensor) = item(Ops.size(t)) -Base.IteratorEltype(::Type{TensorHandle}) = Base.EltypeUnknown() # temp hack -Base.eltype(::Type{TensorHandle}) = Any -Base.collect(t::TensorHandle) = Array(t) -Base.iterate(t::TensorHandle, args...) = iterate(Array(t), args...) +Base.IteratorEltype(::Type{EagerTensor}) = Base.EltypeUnknown() # temp hack +Base.eltype(::Type{EagerTensor}) = Any +Base.collect(t::EagerTensor) = Array(t) +Base.iterate(t::EagerTensor, args...) = iterate(Array(t), args...) Base.zero(t::AbstractTensor) = Ops.zeros_like(t) Base.ones(t::AbstractTensor) = Ops.ones_like(t) -function Base.:*(t1::TensorHandle, t2::Number) +function Base.:*(t1::EagerTensor, t2::Number) return t1 .* t2 end -function Base.:*(t1::Number, t2::TensorHandle) +function Base.:*(t1::Number, t2::EagerTensor) return t1 .* t2 end diff --git a/src/generate_ops.jl b/src/generate_ops.jl index 3d909482..9c1c5230 100644 --- a/src/generate_ops.jl +++ b/src/generate_ops.jl @@ -241,7 +241,7 @@ function to_function(op::tensorflow.OpDef) eager_convert_block = [] for input in inputs[2:end] - c = :($input = convert(tf.TensorHandle, $input)) + c = :($input = convert(tf.EagerTensor, $input)) push!(eager_convert_block, c) end diff --git a/src/keras.jl b/src/keras.jl index 7ada4881..030f8d49 100644 --- a/src/keras.jl +++ b/src/keras.jl @@ -34,8 +34,8 @@ end end @callable mutable struct Dense <: Layer - weights::TensorHandle - bias::TensorHandle + weights::EagerTensor + bias::EagerTensor end function Dense(in_size::Integer, out_size::Integer) @@ -51,10 +51,10 @@ function forward(r::Relu, x) end struct SGD - lr::TensorHandle + lr::EagerTensor end -SGD(;lr=1e-3)= SGD(convert(TensorHandle, lr)) +SGD(;lr=1e-3)= SGD(convert(EagerTensor, lr)) function Sequential() d = Dict() diff --git a/src/ops/imported_ops.jl b/src/ops/imported_ops.jl index cd6538ca..0d141eb2 100644 --- a/src/ops/imported_ops.jl +++ b/src/ops/imported_ops.jl @@ -29,8 +29,8 @@ begin end function reduce_join_eager(inputs_, reduction_indices_; name=nothing, keep_dims=nothing, separator=nothing) desc = tf.EagerOp("ReduceJoin") - inputs_ = convert(tf.TensorHandle, inputs_) - reduction_indices_ = convert(tf.TensorHandle, reduction_indices_) + inputs_ = convert(tf.EagerTensor, inputs_) + reduction_indices_ = convert(tf.EagerTensor, reduction_indices_) tf.add_input(desc, inputs_) tf.add_input(desc, reduction_indices_) if keep_dims !== nothing @@ -95,9 +95,9 @@ begin end function reduce_dataset_eager(input_dataset_, initial_state_, other_arguments_; name=nothing, f=nothing, Tstate=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing) desc = tf.EagerOp("ReduceDataset") - input_dataset_ = convert(tf.TensorHandle, input_dataset_) - initial_state_ = convert(tf.TensorHandle, initial_state_) - other_arguments_ = convert(tf.TensorHandle, other_arguments_) + input_dataset_ = convert(tf.EagerTensor, input_dataset_) + initial_state_ = convert(tf.EagerTensor, initial_state_) + other_arguments_ = convert(tf.EagerTensor, other_arguments_) tf.add_input(desc, input_dataset_) tf.add_input(desc, initial_state_) tf.add_input(desc, other_arguments_) @@ -163,8 +163,8 @@ begin end function tensor_list_from_tensor_eager(tensor_, element_shape_; name=nothing, element_dtype=nothing, shape_type=nothing) desc = tf.EagerOp("TensorListFromTensor") - tensor_ = convert(tf.TensorHandle, tensor_) - element_shape_ = convert(tf.TensorHandle, element_shape_) + tensor_ = convert(tf.EagerTensor, tensor_) + element_shape_ = convert(tf.EagerTensor, element_shape_) tf.add_input(desc, tensor_) tf.add_input(desc, element_shape_) if element_dtype !== nothing @@ -212,7 +212,7 @@ begin end function extract_jpeg_shape_eager(contents_; name=nothing, output_type=nothing) desc = tf.EagerOp("ExtractJpegShape") - contents_ = convert(tf.TensorHandle, contents_) + contents_ = convert(tf.EagerTensor, contents_) tf.add_input(desc, contents_) if output_type !== nothing desc["output_type"] = Base.identity(output_type) @@ -263,7 +263,7 @@ begin end function svd_eager(input_; name=nothing, compute_uv=nothing, full_matrices=nothing) desc = tf.EagerOp("Svd") - input_ = convert(tf.TensorHandle, input_) + input_ = convert(tf.EagerTensor, input_) tf.add_input(desc, input_) if compute_uv !== nothing desc["compute_uv"] = Base.Bool(compute_uv) @@ -312,7 +312,7 @@ begin end function iterator_get_next_sync_eager(iterator_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("IteratorGetNextSync") - iterator_ = convert(tf.TensorHandle, iterator_) + iterator_ = convert(tf.EagerTensor, iterator_) tf.add_input(desc, iterator_) if output_types !== nothing desc["output_types"] = map(Base.identity, output_types) @@ -364,7 +364,7 @@ begin end function ref_enter_eager(data_; name=nothing, frame_name=nothing, is_constant=nothing, parallel_iterations=nothing) desc = tf.EagerOp("RefEnter") - data_ = convert(tf.TensorHandle, data_) + data_ = convert(tf.EagerTensor, data_) tf.add_input(desc, data_) if frame_name !== nothing desc["frame_name"] = Base.String(frame_name) @@ -411,7 +411,7 @@ begin end function erf_eager(x_; name=nothing) desc = tf.EagerOp("Erf") - x_ = convert(tf.TensorHandle, x_) + x_ = convert(tf.EagerTensor, x_) tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) res = tf.execute(desc) @@ -453,7 +453,7 @@ begin end function lookup_table_export_v2_eager(table_handle_; name=nothing) desc = tf.EagerOp("LookupTableExportV2") - table_handle_ = convert(tf.TensorHandle, table_handle_) + table_handle_ = convert(tf.EagerTensor, table_handle_) tf.add_input(desc, table_handle_) res = tf.execute(desc) node = tf.TapeNode(lookup_table_export_v2, [table_handle_], name=nothing, res) @@ -490,7 +490,7 @@ begin end function round_eager(x_; name=nothing) desc = tf.EagerOp("Round") - x_ = convert(tf.TensorHandle, x_) + x_ = convert(tf.EagerTensor, x_) tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) res = tf.execute(desc) @@ -577,7 +577,7 @@ begin end function tensor_forest_tree_is_initialized_op_eager(tree_handle_; name=nothing) desc = tf.EagerOp("TensorForestTreeIsInitializedOp") - tree_handle_ = convert(tf.TensorHandle, tree_handle_) + tree_handle_ = convert(tf.EagerTensor, tree_handle_) tf.add_input(desc, tree_handle_) res = tf.execute(desc) node = tf.TapeNode(tensor_forest_tree_is_initialized_op, [tree_handle_], name=nothing, res) @@ -622,7 +622,7 @@ begin end function merge_eager(inputs_; name=nothing, N=nothing) desc = tf.EagerOp("Merge") - inputs_ = convert(tf.TensorHandle, inputs_) + inputs_ = convert(tf.EagerTensor, inputs_) tf.add_input(desc, inputs_) if N !== nothing desc["N"] = Base.Int(N) @@ -670,9 +670,9 @@ begin end function histogram_fixed_width_eager(values_, value_range_, nbins_; name=nothing, dtype=nothing) desc = tf.EagerOp("HistogramFixedWidth") - values_ = convert(tf.TensorHandle, values_) - value_range_ = convert(tf.TensorHandle, value_range_) - nbins_ = convert(tf.TensorHandle, nbins_) + values_ = convert(tf.EagerTensor, values_) + value_range_ = convert(tf.EagerTensor, value_range_) + nbins_ = convert(tf.EagerTensor, nbins_) tf.add_input(desc, values_) tf.add_input(desc, value_range_) tf.add_input(desc, nbins_) @@ -716,7 +716,7 @@ begin end function asin_eager(x_; name=nothing) desc = tf.EagerOp("Asin") - x_ = convert(tf.TensorHandle, x_) + x_ = convert(tf.EagerTensor, x_) tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) res = tf.execute(desc) @@ -760,8 +760,8 @@ begin end function any_eager(input_, reduction_indices_; name=nothing, keep_dims=nothing) desc = tf.EagerOp("Any") - input_ = convert(tf.TensorHandle, input_) - reduction_indices_ = convert(tf.TensorHandle, reduction_indices_) + input_ = convert(tf.EagerTensor, input_) + reduction_indices_ = convert(tf.EagerTensor, reduction_indices_) tf.add_input(desc, input_) tf.add_input(desc, reduction_indices_) if keep_dims !== nothing @@ -805,8 +805,8 @@ begin end function rsqrt_grad_eager(y_, dy_; name=nothing) desc = tf.EagerOp("RsqrtGrad") - y_ = convert(tf.TensorHandle, y_) - dy_ = convert(tf.TensorHandle, dy_) + y_ = convert(tf.EagerTensor, y_) + dy_ = convert(tf.EagerTensor, dy_) tf.add_input(desc, y_) tf.add_input(desc, dy_) desc["T"] = tf.data_type(y_) @@ -852,10 +852,10 @@ begin end function tensor_array_scatter_eager(handle_, indices_, value_, flow_in_; name=nothing) desc = tf.EagerOp("TensorArrayScatter") - handle_ = convert(tf.TensorHandle, handle_) - indices_ = convert(tf.TensorHandle, indices_) - value_ = convert(tf.TensorHandle, value_) - flow_in_ = convert(tf.TensorHandle, flow_in_) + handle_ = convert(tf.EagerTensor, handle_) + indices_ = convert(tf.EagerTensor, indices_) + value_ = convert(tf.EagerTensor, value_) + flow_in_ = convert(tf.EagerTensor, flow_in_) tf.add_input(desc, handle_) tf.add_input(desc, indices_) tf.add_input(desc, value_) @@ -906,8 +906,8 @@ begin end function dynamic_partition_eager(data_, partitions_; name=nothing, num_partitions=nothing) desc = tf.EagerOp("DynamicPartition") - data_ = convert(tf.TensorHandle, data_) - partitions_ = convert(tf.TensorHandle, partitions_) + data_ = convert(tf.EagerTensor, data_) + partitions_ = convert(tf.EagerTensor, partitions_) tf.add_input(desc, data_) tf.add_input(desc, partitions_) if num_partitions !== nothing @@ -956,8 +956,8 @@ begin end function experimental_private_thread_pool_dataset_eager(input_dataset_, num_threads_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("ExperimentalPrivateThreadPoolDataset") - input_dataset_ = convert(tf.TensorHandle, input_dataset_) - num_threads_ = convert(tf.TensorHandle, num_threads_) + input_dataset_ = convert(tf.EagerTensor, input_dataset_) + num_threads_ = convert(tf.EagerTensor, num_threads_) tf.add_input(desc, input_dataset_) tf.add_input(desc, num_threads_) if output_types !== nothing @@ -1000,7 +1000,7 @@ begin end function reader_serialize_state_eager(reader_handle_; name=nothing) desc = tf.EagerOp("ReaderSerializeState") - reader_handle_ = convert(tf.TensorHandle, reader_handle_) + reader_handle_ = convert(tf.EagerTensor, reader_handle_) tf.add_input(desc, reader_handle_) res = tf.execute(desc) node = tf.TapeNode(reader_serialize_state, [reader_handle_], name=nothing, res) @@ -1039,8 +1039,8 @@ begin end function right_shift_eager(x_, y_; name=nothing) desc = tf.EagerOp("RightShift") - x_ = convert(tf.TensorHandle, x_) - y_ = convert(tf.TensorHandle, y_) + x_ = convert(tf.EagerTensor, x_) + y_ = convert(tf.EagerTensor, y_) tf.add_input(desc, x_) tf.add_input(desc, y_) desc["T"] = tf.data_type(x_) @@ -1092,7 +1092,7 @@ begin end function avg_pool3d_eager(input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) desc = tf.EagerOp("AvgPool3D") - input_ = convert(tf.TensorHandle, input_) + input_ = convert(tf.EagerTensor, input_) tf.add_input(desc, input_) if ksize !== nothing desc["ksize"] = map(Base.identity, ksize) @@ -1145,7 +1145,7 @@ begin end function encode_png_eager(image_; name=nothing, compression=nothing) desc = tf.EagerOp("EncodePng") - image_ = convert(tf.TensorHandle, image_) + image_ = convert(tf.EagerTensor, image_) tf.add_input(desc, image_) if compression !== nothing desc["compression"] = Base.Int(compression) @@ -1198,7 +1198,7 @@ begin end function debug_identity_eager(input_; name=nothing, device_name=nothing, tensor_name=nothing, debug_urls=nothing, gated_grpc=nothing) desc = tf.EagerOp("DebugIdentity") - input_ = convert(tf.TensorHandle, input_) + input_ = convert(tf.EagerTensor, input_) tf.add_input(desc, input_) if device_name !== nothing desc["device_name"] = Base.String(device_name) @@ -1248,7 +1248,7 @@ begin end function imag_eager(input_; name=nothing) desc = tf.EagerOp("Imag") - input_ = convert(tf.TensorHandle, input_) + input_ = convert(tf.EagerTensor, input_) tf.add_input(desc, input_) desc["T"] = tf.data_type(input_) res = tf.execute(desc) @@ -1309,16 +1309,16 @@ begin end function resource_sparse_apply_ftrl_v2_eager(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ResourceSparseApplyFtrlV2") - var_ = convert(tf.TensorHandle, var_) - accum_ = convert(tf.TensorHandle, accum_) - linear_ = convert(tf.TensorHandle, linear_) - grad_ = convert(tf.TensorHandle, grad_) - indices_ = convert(tf.TensorHandle, indices_) - lr_ = convert(tf.TensorHandle, lr_) - l1_ = convert(tf.TensorHandle, l1_) - l2_ = convert(tf.TensorHandle, l2_) - l2_shrinkage_ = convert(tf.TensorHandle, l2_shrinkage_) - lr_power_ = convert(tf.TensorHandle, lr_power_) + var_ = convert(tf.EagerTensor, var_) + accum_ = convert(tf.EagerTensor, accum_) + linear_ = convert(tf.EagerTensor, linear_) + grad_ = convert(tf.EagerTensor, grad_) + indices_ = convert(tf.EagerTensor, indices_) + lr_ = convert(tf.EagerTensor, lr_) + l1_ = convert(tf.EagerTensor, l1_) + l2_ = convert(tf.EagerTensor, l2_) + l2_shrinkage_ = convert(tf.EagerTensor, l2_shrinkage_) + lr_power_ = convert(tf.EagerTensor, lr_power_) tf.add_input(desc, var_) tf.add_input(desc, accum_) tf.add_input(desc, linear_) @@ -1436,7 +1436,7 @@ begin end function sign_eager(x_; name=nothing) desc = tf.EagerOp("Sign") - x_ = convert(tf.TensorHandle, x_) + x_ = convert(tf.EagerTensor, x_) tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) res = tf.execute(desc) @@ -1474,7 +1474,7 @@ begin end function population_count_eager(x_; name=nothing) desc = tf.EagerOp("PopulationCount") - x_ = convert(tf.TensorHandle, x_) + x_ = convert(tf.EagerTensor, x_) tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) res = tf.execute(desc) @@ -1512,7 +1512,7 @@ begin end function neg_eager(x_; name=nothing) desc = tf.EagerOp("Neg") - x_ = convert(tf.TensorHandle, x_) + x_ = convert(tf.EagerTensor, x_) tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) res = tf.execute(desc) @@ -1603,10 +1603,10 @@ begin end function sparse_reduce_sum_eager(input_indices_, input_values_, input_shape_, reduction_axes_; name=nothing, keep_dims=nothing) desc = tf.EagerOp("SparseReduceSum") - input_indices_ = convert(tf.TensorHandle, input_indices_) - input_values_ = convert(tf.TensorHandle, input_values_) - input_shape_ = convert(tf.TensorHandle, input_shape_) - reduction_axes_ = convert(tf.TensorHandle, reduction_axes_) + input_indices_ = convert(tf.EagerTensor, input_indices_) + input_values_ = convert(tf.EagerTensor, input_values_) + input_shape_ = convert(tf.EagerTensor, input_shape_) + reduction_axes_ = convert(tf.EagerTensor, reduction_axes_) tf.add_input(desc, input_indices_) tf.add_input(desc, input_values_) tf.add_input(desc, input_shape_) @@ -1663,8 +1663,8 @@ begin end function filter_dataset_eager(input_dataset_, other_arguments_; name=nothing, predicate=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("FilterDataset") - input_dataset_ = convert(tf.TensorHandle, input_dataset_) - other_arguments_ = convert(tf.TensorHandle, other_arguments_) + input_dataset_ = convert(tf.EagerTensor, input_dataset_) + other_arguments_ = convert(tf.EagerTensor, other_arguments_) tf.add_input(desc, input_dataset_) tf.add_input(desc, other_arguments_) if predicate !== nothing @@ -1716,7 +1716,7 @@ begin end function string_length_eager(input_; name=nothing, unit=nothing) desc = tf.EagerOp("StringLength") - input_ = convert(tf.TensorHandle, input_) + input_ = convert(tf.EagerTensor, input_) tf.add_input(desc, input_) if unit !== nothing desc["unit"] = Base.String(unit) @@ -1770,8 +1770,8 @@ begin end function conv3d_eager(input_, filter_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) desc = tf.EagerOp("Conv3D") - input_ = convert(tf.TensorHandle, input_) - filter_ = convert(tf.TensorHandle, filter_) + input_ = convert(tf.EagerTensor, input_) + filter_ = convert(tf.EagerTensor, filter_) tf.add_input(desc, input_) tf.add_input(desc, filter_) if strides !== nothing @@ -1883,7 +1883,7 @@ begin end function optional_has_value_eager(optional_; name=nothing) desc = tf.EagerOp("OptionalHasValue") - optional_ = convert(tf.TensorHandle, optional_) + optional_ = convert(tf.EagerTensor, optional_) tf.add_input(desc, optional_) res = tf.execute(desc) node = tf.TapeNode(optional_has_value, [optional_], name=nothing, res) @@ -1944,16 +1944,16 @@ begin end function apply_adam_eager(var_, m_, v_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing, use_nesterov=nothing) desc = tf.EagerOp("ApplyAdam") - var_ = convert(tf.TensorHandle, var_) - m_ = convert(tf.TensorHandle, m_) - v_ = convert(tf.TensorHandle, v_) - beta1_power_ = convert(tf.TensorHandle, beta1_power_) - beta2_power_ = convert(tf.TensorHandle, beta2_power_) - lr_ = convert(tf.TensorHandle, lr_) - beta1_ = convert(tf.TensorHandle, beta1_) - beta2_ = convert(tf.TensorHandle, beta2_) - epsilon_ = convert(tf.TensorHandle, epsilon_) - grad_ = convert(tf.TensorHandle, grad_) + var_ = convert(tf.EagerTensor, var_) + m_ = convert(tf.EagerTensor, m_) + v_ = convert(tf.EagerTensor, v_) + beta1_power_ = convert(tf.EagerTensor, beta1_power_) + beta2_power_ = convert(tf.EagerTensor, beta2_power_) + lr_ = convert(tf.EagerTensor, lr_) + beta1_ = convert(tf.EagerTensor, beta1_) + beta2_ = convert(tf.EagerTensor, beta2_) + epsilon_ = convert(tf.EagerTensor, epsilon_) + grad_ = convert(tf.EagerTensor, grad_) tf.add_input(desc, var_) tf.add_input(desc, m_) tf.add_input(desc, v_) @@ -2047,10 +2047,10 @@ begin end function cudnn_rnn_params_to_canonical_eager(num_layers_, num_units_, input_size_, params_; name=nothing, num_params=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) desc = tf.EagerOp("CudnnRNNParamsToCanonical") - num_layers_ = convert(tf.TensorHandle, num_layers_) - num_units_ = convert(tf.TensorHandle, num_units_) - input_size_ = convert(tf.TensorHandle, input_size_) - params_ = convert(tf.TensorHandle, params_) + num_layers_ = convert(tf.EagerTensor, num_layers_) + num_units_ = convert(tf.EagerTensor, num_units_) + input_size_ = convert(tf.EagerTensor, input_size_) + params_ = convert(tf.EagerTensor, params_) tf.add_input(desc, num_layers_) tf.add_input(desc, num_units_) tf.add_input(desc, input_size_) @@ -2113,8 +2113,8 @@ begin end function irfft3d_eager(input_, fft_length_; name=nothing) desc = tf.EagerOp("IRFFT3D") - input_ = convert(tf.TensorHandle, input_) - fft_length_ = convert(tf.TensorHandle, fft_length_) + input_ = convert(tf.EagerTensor, input_) + fft_length_ = convert(tf.EagerTensor, fft_length_) tf.add_input(desc, input_) tf.add_input(desc, fft_length_) res = tf.execute(desc) @@ -2152,7 +2152,7 @@ begin end function angle_eager(input_; name=nothing) desc = tf.EagerOp("Angle") - input_ = convert(tf.TensorHandle, input_) + input_ = convert(tf.EagerTensor, input_) tf.add_input(desc, input_) desc["T"] = tf.data_type(input_) res = tf.execute(desc) @@ -2256,7 +2256,7 @@ begin end function learned_unigram_candidate_sampler_eager(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing) desc = tf.EagerOp("LearnedUnigramCandidateSampler") - true_classes_ = convert(tf.TensorHandle, true_classes_) + true_classes_ = convert(tf.EagerTensor, true_classes_) tf.add_input(desc, true_classes_) if num_true !== nothing desc["num_true"] = Base.Int(num_true) @@ -2349,7 +2349,7 @@ begin end function matrix_square_root_eager(input_; name=nothing) desc = tf.EagerOp("MatrixSquareRoot") - input_ = convert(tf.TensorHandle, input_) + input_ = convert(tf.EagerTensor, input_) tf.add_input(desc, input_) desc["T"] = tf.data_type(input_) res = tf.execute(desc) @@ -2393,10 +2393,10 @@ begin end function sparse_dense_cwise_mul_eager(sp_indices_, sp_values_, sp_shape_, dense_; name=nothing) desc = tf.EagerOp("SparseDenseCwiseMul") - sp_indices_ = convert(tf.TensorHandle, sp_indices_) - sp_values_ = convert(tf.TensorHandle, sp_values_) - sp_shape_ = convert(tf.TensorHandle, sp_shape_) - dense_ = convert(tf.TensorHandle, dense_) + sp_indices_ = convert(tf.EagerTensor, sp_indices_) + sp_values_ = convert(tf.EagerTensor, sp_values_) + sp_shape_ = convert(tf.EagerTensor, sp_shape_) + dense_ = convert(tf.EagerTensor, dense_) tf.add_input(desc, sp_indices_) tf.add_input(desc, sp_values_) tf.add_input(desc, sp_shape_) @@ -2450,8 +2450,8 @@ begin end function tensor_array_concat_v3_eager(handle_, flow_in_; name=nothing, dtype=nothing, element_shape_except0=nothing) desc = tf.EagerOp("TensorArrayConcatV3") - handle_ = convert(tf.TensorHandle, handle_) - flow_in_ = convert(tf.TensorHandle, flow_in_) + handle_ = convert(tf.EagerTensor, handle_) + flow_in_ = convert(tf.EagerTensor, flow_in_) tf.add_input(desc, handle_) tf.add_input(desc, flow_in_) if dtype !== nothing @@ -2494,7 +2494,7 @@ begin end function unicode_script_eager(input_; name=nothing) desc = tf.EagerOp("UnicodeScript") - input_ = convert(tf.TensorHandle, input_) + input_ = convert(tf.EagerTensor, input_) tf.add_input(desc, input_) res = tf.execute(desc) node = tf.TapeNode(unicode_script, [input_], name=nothing, res) @@ -2533,8 +2533,8 @@ begin end function batch_cholesky_grad_eager(l_, grad_; name=nothing) desc = tf.EagerOp("BatchCholeskyGrad") - l_ = convert(tf.TensorHandle, l_) - grad_ = convert(tf.TensorHandle, grad_) + l_ = convert(tf.EagerTensor, l_) + grad_ = convert(tf.EagerTensor, grad_) tf.add_input(desc, l_) tf.add_input(desc, grad_) desc["T"] = tf.data_type(l_) @@ -2581,8 +2581,8 @@ begin end function mean_eager(input_, reduction_indices_; name=nothing, keep_dims=nothing) desc = tf.EagerOp("Mean") - input_ = convert(tf.TensorHandle, input_) - reduction_indices_ = convert(tf.TensorHandle, reduction_indices_) + input_ = convert(tf.EagerTensor, input_) + reduction_indices_ = convert(tf.EagerTensor, reduction_indices_) tf.add_input(desc, input_) tf.add_input(desc, reduction_indices_) if keep_dims !== nothing @@ -2624,7 +2624,7 @@ begin end function batch_fft_eager(input_; name=nothing) desc = tf.EagerOp("BatchFFT") - input_ = convert(tf.TensorHandle, input_) + input_ = convert(tf.EagerTensor, input_) tf.add_input(desc, input_) res = tf.execute(desc) node = tf.TapeNode(batch_fft, [input_], name=nothing, res) @@ -2661,7 +2661,7 @@ begin end function sin_eager(x_; name=nothing) desc = tf.EagerOp("Sin") - x_ = convert(tf.TensorHandle, x_) + x_ = convert(tf.EagerTensor, x_) tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) res = tf.execute(desc) @@ -2761,9 +2761,9 @@ begin end function quantized_max_pool_eager(input_, min_input_, max_input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing) desc = tf.EagerOp("QuantizedMaxPool") - input_ = convert(tf.TensorHandle, input_) - min_input_ = convert(tf.TensorHandle, min_input_) - max_input_ = convert(tf.TensorHandle, max_input_) + input_ = convert(tf.EagerTensor, input_) + min_input_ = convert(tf.EagerTensor, min_input_) + max_input_ = convert(tf.EagerTensor, max_input_) tf.add_input(desc, input_) tf.add_input(desc, min_input_) tf.add_input(desc, max_input_) @@ -2833,9 +2833,9 @@ begin end function ordered_map_stage_eager(key_, indices_, values_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, fake_dtypes=nothing, container=nothing, shared_name=nothing) desc = tf.EagerOp("OrderedMapStage") - key_ = convert(tf.TensorHandle, key_) - indices_ = convert(tf.TensorHandle, indices_) - values_ = convert(tf.TensorHandle, values_) + key_ = convert(tf.EagerTensor, key_) + indices_ = convert(tf.EagerTensor, indices_) + values_ = convert(tf.EagerTensor, values_) tf.add_input(desc, key_) tf.add_input(desc, indices_) tf.add_input(desc, values_) @@ -2909,7 +2909,7 @@ begin end function partitioned_call_eager(args_; name=nothing, Tin=nothing, Tout=nothing, f=nothing, config=nothing, config_proto=nothing, executor_type=nothing) desc = tf.EagerOp("PartitionedCall") - args_ = convert(tf.TensorHandle, args_) + args_ = convert(tf.EagerTensor, args_) tf.add_input(desc, args_) if Tin !== nothing desc["Tin"] = map(Base.identity, Tin) @@ -2980,11 +2980,11 @@ begin end function sparse_apply_adagrad_eager(var_, accum_, lr_, grad_, indices_; name=nothing, use_locking=nothing, update_slots=nothing) desc = tf.EagerOp("SparseApplyAdagrad") - var_ = convert(tf.TensorHandle, var_) - accum_ = convert(tf.TensorHandle, accum_) - lr_ = convert(tf.TensorHandle, lr_) - grad_ = convert(tf.TensorHandle, grad_) - indices_ = convert(tf.TensorHandle, indices_) + var_ = convert(tf.EagerTensor, var_) + accum_ = convert(tf.EagerTensor, accum_) + lr_ = convert(tf.EagerTensor, lr_) + grad_ = convert(tf.EagerTensor, grad_) + indices_ = convert(tf.EagerTensor, indices_) tf.add_input(desc, var_) tf.add_input(desc, accum_) tf.add_input(desc, lr_) @@ -3058,7 +3058,7 @@ begin end function decode_proto_v2_eager(bytes_; name=nothing, message_type=nothing, field_names=nothing, output_types=nothing, descriptor_source=nothing, message_format=nothing, sanitize=nothing) desc = tf.EagerOp("DecodeProtoV2") - bytes_ = convert(tf.TensorHandle, bytes_) + bytes_ = convert(tf.EagerTensor, bytes_) tf.add_input(desc, bytes_) if message_type !== nothing desc["message_type"] = Base.String(message_type) @@ -3117,9 +3117,9 @@ begin end function betainc_eager(a_, b_, x_; name=nothing) desc = tf.EagerOp("Betainc") - a_ = convert(tf.TensorHandle, a_) - b_ = convert(tf.TensorHandle, b_) - x_ = convert(tf.TensorHandle, x_) + a_ = convert(tf.EagerTensor, a_) + b_ = convert(tf.EagerTensor, b_) + x_ = convert(tf.EagerTensor, x_) tf.add_input(desc, a_) tf.add_input(desc, b_) tf.add_input(desc, x_) @@ -3161,7 +3161,7 @@ begin end function guarantee_const_eager(input_; name=nothing) desc = tf.EagerOp("GuaranteeConst") - input_ = convert(tf.TensorHandle, input_) + input_ = convert(tf.EagerTensor, input_) tf.add_input(desc, input_) desc["T"] = tf.data_type(input_) res = tf.execute(desc) @@ -3201,7 +3201,7 @@ begin end function decode_bmp_eager(contents_; name=nothing, channels=nothing) desc = tf.EagerOp("DecodeBmp") - contents_ = convert(tf.TensorHandle, contents_) + contents_ = convert(tf.EagerTensor, contents_) tf.add_input(desc, contents_) if channels !== nothing desc["channels"] = Base.Int(channels) @@ -3250,8 +3250,8 @@ begin end function boosted_trees_bucketize_eager(float_values_, bucket_boundaries_; name=nothing, num_features=nothing) desc = tf.EagerOp("BoostedTreesBucketize") - float_values_ = convert(tf.TensorHandle, float_values_) - bucket_boundaries_ = convert(tf.TensorHandle, bucket_boundaries_) + float_values_ = convert(tf.EagerTensor, float_values_) + bucket_boundaries_ = convert(tf.EagerTensor, bucket_boundaries_) tf.add_input(desc, float_values_) tf.add_input(desc, bucket_boundaries_) if num_features !== nothing @@ -3324,7 +3324,7 @@ begin end function experimental_stats_aggregator_summary_eager(iterator_; name=nothing) desc = tf.EagerOp("ExperimentalStatsAggregatorSummary") - iterator_ = convert(tf.TensorHandle, iterator_) + iterator_ = convert(tf.EagerTensor, iterator_) tf.add_input(desc, iterator_) res = tf.execute(desc) node = tf.TapeNode(experimental_stats_aggregator_summary, [iterator_], name=nothing, res) @@ -3394,7 +3394,7 @@ begin end function matrix_exponential_eager(input_; name=nothing) desc = tf.EagerOp("MatrixExponential") - input_ = convert(tf.TensorHandle, input_) + input_ = convert(tf.EagerTensor, input_) tf.add_input(desc, input_) desc["T"] = tf.data_type(input_) res = tf.execute(desc) @@ -3435,7 +3435,7 @@ begin end function size_eager(input_; name=nothing, out_type=nothing) desc = tf.EagerOp("Size") - input_ = convert(tf.TensorHandle, input_) + input_ = convert(tf.EagerTensor, input_) tf.add_input(desc, input_) if out_type !== nothing desc["out_type"] = Base.identity(out_type) @@ -3479,7 +3479,7 @@ begin end function add_n_eager(inputs_; name=nothing, N=nothing) desc = tf.EagerOp("AddN") - inputs_ = convert(tf.TensorHandle, inputs_) + inputs_ = convert(tf.EagerTensor, inputs_) tf.add_input(desc, inputs_) if N !== nothing desc["N"] = Base.Int(N) @@ -3526,9 +3526,9 @@ begin end function sparse_segment_sum_eager(data_, indices_, segment_ids_; name=nothing) desc = tf.EagerOp("SparseSegmentSum") - data_ = convert(tf.TensorHandle, data_) - indices_ = convert(tf.TensorHandle, indices_) - segment_ids_ = convert(tf.TensorHandle, segment_ids_) + data_ = convert(tf.EagerTensor, data_) + indices_ = convert(tf.EagerTensor, indices_) + segment_ids_ = convert(tf.EagerTensor, segment_ids_) tf.add_input(desc, data_) tf.add_input(desc, indices_) tf.add_input(desc, segment_ids_) @@ -3576,8 +3576,8 @@ begin end function batch_dataset_eager(input_dataset_, batch_size_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("BatchDataset") - input_dataset_ = convert(tf.TensorHandle, input_dataset_) - batch_size_ = convert(tf.TensorHandle, batch_size_) + input_dataset_ = convert(tf.EagerTensor, input_dataset_) + batch_size_ = convert(tf.EagerTensor, batch_size_) tf.add_input(desc, input_dataset_) tf.add_input(desc, batch_size_) if output_types !== nothing @@ -3702,8 +3702,8 @@ begin end function queue_dequeue_up_to_v2_eager(handle_, n_; name=nothing, component_types=nothing, timeout_ms=nothing) desc = tf.EagerOp("QueueDequeueUpToV2") - handle_ = convert(tf.TensorHandle, handle_) - n_ = convert(tf.TensorHandle, n_) + handle_ = convert(tf.EagerTensor, handle_) + n_ = convert(tf.EagerTensor, n_) tf.add_input(desc, handle_) tf.add_input(desc, n_) if component_types !== nothing @@ -3825,10 +3825,10 @@ begin end function load_tpu_embedding_rms_prop_parameters_grad_accum_debug_eager(parameters_, ms_, mom_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) desc = tf.EagerOp("LoadTPUEmbeddingRMSPropParametersGradAccumDebug") - parameters_ = convert(tf.TensorHandle, parameters_) - ms_ = convert(tf.TensorHandle, ms_) - mom_ = convert(tf.TensorHandle, mom_) - gradient_accumulators_ = convert(tf.TensorHandle, gradient_accumulators_) + parameters_ = convert(tf.EagerTensor, parameters_) + ms_ = convert(tf.EagerTensor, ms_) + mom_ = convert(tf.EagerTensor, mom_) + gradient_accumulators_ = convert(tf.EagerTensor, gradient_accumulators_) tf.add_input(desc, parameters_) tf.add_input(desc, ms_) tf.add_input(desc, mom_) @@ -3880,7 +3880,7 @@ begin end function serialize_tensor_eager(tensor_; name=nothing) desc = tf.EagerOp("SerializeTensor") - tensor_ = convert(tf.TensorHandle, tensor_) + tensor_ = convert(tf.EagerTensor, tensor_) tf.add_input(desc, tensor_) desc["T"] = tf.data_type(tensor_) res = tf.execute(desc) @@ -3920,8 +3920,8 @@ begin end function mul_eager(x_, y_; name=nothing) desc = tf.EagerOp("Mul") - x_ = convert(tf.TensorHandle, x_) - y_ = convert(tf.TensorHandle, y_) + x_ = convert(tf.EagerTensor, x_) + y_ = convert(tf.EagerTensor, y_) tf.add_input(desc, x_) tf.add_input(desc, y_) desc["T"] = tf.data_type(x_) @@ -3968,8 +3968,8 @@ begin end function softmax_cross_entropy_with_logits_eager(features_, labels_; name=nothing) desc = tf.EagerOp("SoftmaxCrossEntropyWithLogits") - features_ = convert(tf.TensorHandle, features_) - labels_ = convert(tf.TensorHandle, labels_) + features_ = convert(tf.EagerTensor, features_) + labels_ = convert(tf.EagerTensor, labels_) tf.add_input(desc, features_) tf.add_input(desc, labels_) desc["T"] = tf.data_type(features_) @@ -4018,9 +4018,9 @@ begin end function resource_scatter_div_eager(resource_, indices_, updates_; name=nothing, dtype=nothing) desc = tf.EagerOp("ResourceScatterDiv") - resource_ = convert(tf.TensorHandle, resource_) - indices_ = convert(tf.TensorHandle, indices_) - updates_ = convert(tf.TensorHandle, updates_) + resource_ = convert(tf.EagerTensor, resource_) + indices_ = convert(tf.EagerTensor, indices_) + updates_ = convert(tf.EagerTensor, updates_) tf.add_input(desc, resource_) tf.add_input(desc, indices_) tf.add_input(desc, updates_) @@ -4073,12 +4073,12 @@ begin end function fixed_length_record_dataset_v2_eager(filenames_, header_bytes_, record_bytes_, footer_bytes_, buffer_size_, compression_type_; name=nothing) desc = tf.EagerOp("FixedLengthRecordDatasetV2") - filenames_ = convert(tf.TensorHandle, filenames_) - header_bytes_ = convert(tf.TensorHandle, header_bytes_) - record_bytes_ = convert(tf.TensorHandle, record_bytes_) - footer_bytes_ = convert(tf.TensorHandle, footer_bytes_) - buffer_size_ = convert(tf.TensorHandle, buffer_size_) - compression_type_ = convert(tf.TensorHandle, compression_type_) + filenames_ = convert(tf.EagerTensor, filenames_) + header_bytes_ = convert(tf.EagerTensor, header_bytes_) + record_bytes_ = convert(tf.EagerTensor, record_bytes_) + footer_bytes_ = convert(tf.EagerTensor, footer_bytes_) + buffer_size_ = convert(tf.EagerTensor, buffer_size_) + compression_type_ = convert(tf.EagerTensor, compression_type_) tf.add_input(desc, filenames_) tf.add_input(desc, header_bytes_) tf.add_input(desc, record_bytes_) @@ -4127,8 +4127,8 @@ begin end function skip_dataset_eager(input_dataset_, count_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("SkipDataset") - input_dataset_ = convert(tf.TensorHandle, input_dataset_) - count_ = convert(tf.TensorHandle, count_) + input_dataset_ = convert(tf.EagerTensor, input_dataset_) + count_ = convert(tf.EagerTensor, count_) tf.add_input(desc, input_dataset_) tf.add_input(desc, count_) if output_types !== nothing @@ -4172,7 +4172,7 @@ begin end function cosh_eager(x_; name=nothing) desc = tf.EagerOp("Cosh") - x_ = convert(tf.TensorHandle, x_) + x_ = convert(tf.EagerTensor, x_) tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) res = tf.execute(desc) @@ -4236,11 +4236,11 @@ begin end function fused_batch_norm_v2_eager(x_, scale_, offset_, mean_, variance_; name=nothing, U=nothing, epsilon=nothing, data_format=nothing, is_training=nothing) desc = tf.EagerOp("FusedBatchNormV2") - x_ = convert(tf.TensorHandle, x_) - scale_ = convert(tf.TensorHandle, scale_) - offset_ = convert(tf.TensorHandle, offset_) - mean_ = convert(tf.TensorHandle, mean_) - variance_ = convert(tf.TensorHandle, variance_) + x_ = convert(tf.EagerTensor, x_) + scale_ = convert(tf.EagerTensor, scale_) + offset_ = convert(tf.EagerTensor, offset_) + mean_ = convert(tf.EagerTensor, mean_) + variance_ = convert(tf.EagerTensor, variance_) tf.add_input(desc, x_) tf.add_input(desc, scale_) tf.add_input(desc, offset_) @@ -4304,10 +4304,10 @@ begin end function tensor_array_split_eager(handle_, value_, lengths_, flow_in_; name=nothing) desc = tf.EagerOp("TensorArraySplit") - handle_ = convert(tf.TensorHandle, handle_) - value_ = convert(tf.TensorHandle, value_) - lengths_ = convert(tf.TensorHandle, lengths_) - flow_in_ = convert(tf.TensorHandle, flow_in_) + handle_ = convert(tf.EagerTensor, handle_) + value_ = convert(tf.EagerTensor, value_) + lengths_ = convert(tf.EagerTensor, lengths_) + flow_in_ = convert(tf.EagerTensor, flow_in_) tf.add_input(desc, handle_) tf.add_input(desc, value_) tf.add_input(desc, lengths_) @@ -4367,10 +4367,10 @@ begin end function ctc_loss_eager(inputs_, labels_indices_, labels_values_, sequence_length_; name=nothing, preprocess_collapse_repeated=nothing, ctc_merge_repeated=nothing, ignore_longer_outputs_than_inputs=nothing) desc = tf.EagerOp("CTCLoss") - inputs_ = convert(tf.TensorHandle, inputs_) - labels_indices_ = convert(tf.TensorHandle, labels_indices_) - labels_values_ = convert(tf.TensorHandle, labels_values_) - sequence_length_ = convert(tf.TensorHandle, sequence_length_) + inputs_ = convert(tf.EagerTensor, inputs_) + labels_indices_ = convert(tf.EagerTensor, labels_indices_) + labels_values_ = convert(tf.EagerTensor, labels_values_) + sequence_length_ = convert(tf.EagerTensor, sequence_length_) tf.add_input(desc, inputs_) tf.add_input(desc, labels_indices_) tf.add_input(desc, labels_values_) @@ -4431,10 +4431,10 @@ begin end function quantized_reshape_eager(tensor_, shape_, input_min_, input_max_; name=nothing) desc = tf.EagerOp("QuantizedReshape") - tensor_ = convert(tf.TensorHandle, tensor_) - shape_ = convert(tf.TensorHandle, shape_) - input_min_ = convert(tf.TensorHandle, input_min_) - input_max_ = convert(tf.TensorHandle, input_max_) + tensor_ = convert(tf.EagerTensor, tensor_) + shape_ = convert(tf.EagerTensor, shape_) + input_min_ = convert(tf.EagerTensor, input_min_) + input_max_ = convert(tf.EagerTensor, input_max_) tf.add_input(desc, tensor_) tf.add_input(desc, shape_) tf.add_input(desc, input_min_) @@ -4478,8 +4478,8 @@ begin end function floor_div_eager(x_, y_; name=nothing) desc = tf.EagerOp("FloorDiv") - x_ = convert(tf.TensorHandle, x_) - y_ = convert(tf.TensorHandle, y_) + x_ = convert(tf.EagerTensor, x_) + y_ = convert(tf.EagerTensor, y_) tf.add_input(desc, x_) tf.add_input(desc, y_) desc["T"] = tf.data_type(x_) @@ -4533,7 +4533,7 @@ begin end function tensor_array_v2_eager(size_; name=nothing, dtype=nothing, element_shape=nothing, dynamic_size=nothing, clear_after_read=nothing, tensor_array_name=nothing) desc = tf.EagerOp("TensorArrayV2") - size_ = convert(tf.TensorHandle, size_) + size_ = convert(tf.EagerTensor, size_) tf.add_input(desc, size_) if dtype !== nothing desc["dtype"] = Base.identity(dtype) @@ -4587,7 +4587,7 @@ begin end function barrier_close_eager(handle_; name=nothing, cancel_pending_enqueues=nothing) desc = tf.EagerOp("BarrierClose") - handle_ = convert(tf.TensorHandle, handle_) + handle_ = convert(tf.EagerTensor, handle_) tf.add_input(desc, handle_) if cancel_pending_enqueues !== nothing desc["cancel_pending_enqueues"] = Base.Bool(cancel_pending_enqueues) @@ -4629,7 +4629,7 @@ begin end function read_variable_op_eager(resource_; name=nothing, dtype=nothing) desc = tf.EagerOp("ReadVariableOp") - resource_ = convert(tf.TensorHandle, resource_) + resource_ = convert(tf.EagerTensor, resource_) tf.add_input(desc, resource_) if dtype !== nothing desc["dtype"] = Base.identity(dtype) @@ -4685,12 +4685,12 @@ begin end function quantized_mul_eager(x_, y_, min_x_, max_x_, min_y_, max_y_; name=nothing) desc = tf.EagerOp("QuantizedMul") - x_ = convert(tf.TensorHandle, x_) - y_ = convert(tf.TensorHandle, y_) - min_x_ = convert(tf.TensorHandle, min_x_) - max_x_ = convert(tf.TensorHandle, max_x_) - min_y_ = convert(tf.TensorHandle, min_y_) - max_y_ = convert(tf.TensorHandle, max_y_) + x_ = convert(tf.EagerTensor, x_) + y_ = convert(tf.EagerTensor, y_) + min_x_ = convert(tf.EagerTensor, min_x_) + max_x_ = convert(tf.EagerTensor, max_x_) + min_y_ = convert(tf.EagerTensor, min_y_) + max_y_ = convert(tf.EagerTensor, max_y_) tf.add_input(desc, x_) tf.add_input(desc, y_) tf.add_input(desc, min_x_) @@ -4734,7 +4734,7 @@ begin end function selu_eager(features_; name=nothing) desc = tf.EagerOp("Selu") - features_ = convert(tf.TensorHandle, features_) + features_ = convert(tf.EagerTensor, features_) tf.add_input(desc, features_) desc["T"] = tf.data_type(features_) res = tf.execute(desc) @@ -4819,19 +4819,19 @@ begin end function cudnn_rnn_backprop_v3_eager(input_, input_h_, input_c_, params_, sequence_lengths_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_, host_reserved_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) desc = tf.EagerOp("CudnnRNNBackpropV3") - input_ = convert(tf.TensorHandle, input_) - input_h_ = convert(tf.TensorHandle, input_h_) - input_c_ = convert(tf.TensorHandle, input_c_) - params_ = convert(tf.TensorHandle, params_) - sequence_lengths_ = convert(tf.TensorHandle, sequence_lengths_) - output_ = convert(tf.TensorHandle, output_) - output_h_ = convert(tf.TensorHandle, output_h_) - output_c_ = convert(tf.TensorHandle, output_c_) - output_backprop_ = convert(tf.TensorHandle, output_backprop_) - output_h_backprop_ = convert(tf.TensorHandle, output_h_backprop_) - output_c_backprop_ = convert(tf.TensorHandle, output_c_backprop_) - reserve_space_ = convert(tf.TensorHandle, reserve_space_) - host_reserved_ = convert(tf.TensorHandle, host_reserved_) + input_ = convert(tf.EagerTensor, input_) + input_h_ = convert(tf.EagerTensor, input_h_) + input_c_ = convert(tf.EagerTensor, input_c_) + params_ = convert(tf.EagerTensor, params_) + sequence_lengths_ = convert(tf.EagerTensor, sequence_lengths_) + output_ = convert(tf.EagerTensor, output_) + output_h_ = convert(tf.EagerTensor, output_h_) + output_c_ = convert(tf.EagerTensor, output_c_) + output_backprop_ = convert(tf.EagerTensor, output_backprop_) + output_h_backprop_ = convert(tf.EagerTensor, output_h_backprop_) + output_c_backprop_ = convert(tf.EagerTensor, output_c_backprop_) + reserve_space_ = convert(tf.EagerTensor, reserve_space_) + host_reserved_ = convert(tf.EagerTensor, host_reserved_) tf.add_input(desc, input_) tf.add_input(desc, input_h_) tf.add_input(desc, input_c_) @@ -4914,9 +4914,9 @@ begin end function lookup_table_insert_eager(table_handle_, keys_, values_; name=nothing) desc = tf.EagerOp("LookupTableInsert") - table_handle_ = convert(tf.TensorHandle, table_handle_) - keys_ = convert(tf.TensorHandle, keys_) - values_ = convert(tf.TensorHandle, values_) + table_handle_ = convert(tf.EagerTensor, table_handle_) + keys_ = convert(tf.EagerTensor, keys_) + values_ = convert(tf.EagerTensor, values_) tf.add_input(desc, table_handle_) tf.add_input(desc, keys_) tf.add_input(desc, values_) @@ -4957,7 +4957,7 @@ begin end function complex_abs_eager(x_; name=nothing) desc = tf.EagerOp("ComplexAbs") - x_ = convert(tf.TensorHandle, x_) + x_ = convert(tf.EagerTensor, x_) tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) res = tf.execute(desc) @@ -4997,8 +4997,8 @@ begin end function tridiagonal_solve_eager(diagonals_, rhs_; name=nothing) desc = tf.EagerOp("TridiagonalSolve") - diagonals_ = convert(tf.TensorHandle, diagonals_) - rhs_ = convert(tf.TensorHandle, rhs_) + diagonals_ = convert(tf.EagerTensor, diagonals_) + rhs_ = convert(tf.EagerTensor, rhs_) tf.add_input(desc, diagonals_) tf.add_input(desc, rhs_) desc["T"] = tf.data_type(diagonals_) @@ -5043,9 +5043,9 @@ begin end function lookup_table_import_eager(table_handle_, keys_, values_; name=nothing) desc = tf.EagerOp("LookupTableImport") - table_handle_ = convert(tf.TensorHandle, table_handle_) - keys_ = convert(tf.TensorHandle, keys_) - values_ = convert(tf.TensorHandle, values_) + table_handle_ = convert(tf.EagerTensor, table_handle_) + keys_ = convert(tf.EagerTensor, keys_) + values_ = convert(tf.EagerTensor, values_) tf.add_input(desc, table_handle_) tf.add_input(desc, keys_) tf.add_input(desc, values_) @@ -5086,7 +5086,7 @@ begin end function abs_eager(x_; name=nothing) desc = tf.EagerOp("Abs") - x_ = convert(tf.TensorHandle, x_) + x_ = convert(tf.EagerTensor, x_) tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) res = tf.execute(desc) @@ -5148,16 +5148,16 @@ begin end function resource_apply_adam_eager(var_, m_, v_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing, use_nesterov=nothing) desc = tf.EagerOp("ResourceApplyAdam") - var_ = convert(tf.TensorHandle, var_) - m_ = convert(tf.TensorHandle, m_) - v_ = convert(tf.TensorHandle, v_) - beta1_power_ = convert(tf.TensorHandle, beta1_power_) - beta2_power_ = convert(tf.TensorHandle, beta2_power_) - lr_ = convert(tf.TensorHandle, lr_) - beta1_ = convert(tf.TensorHandle, beta1_) - beta2_ = convert(tf.TensorHandle, beta2_) - epsilon_ = convert(tf.TensorHandle, epsilon_) - grad_ = convert(tf.TensorHandle, grad_) + var_ = convert(tf.EagerTensor, var_) + m_ = convert(tf.EagerTensor, m_) + v_ = convert(tf.EagerTensor, v_) + beta1_power_ = convert(tf.EagerTensor, beta1_power_) + beta2_power_ = convert(tf.EagerTensor, beta2_power_) + lr_ = convert(tf.EagerTensor, lr_) + beta1_ = convert(tf.EagerTensor, beta1_) + beta2_ = convert(tf.EagerTensor, beta2_) + epsilon_ = convert(tf.EagerTensor, epsilon_) + grad_ = convert(tf.EagerTensor, grad_) tf.add_input(desc, var_) tf.add_input(desc, m_) tf.add_input(desc, v_) @@ -5222,10 +5222,10 @@ begin end function write_histogram_summary_eager(writer_, step_, tag_, values_; name=nothing) desc = tf.EagerOp("WriteHistogramSummary") - writer_ = convert(tf.TensorHandle, writer_) - step_ = convert(tf.TensorHandle, step_) - tag_ = convert(tf.TensorHandle, tag_) - values_ = convert(tf.TensorHandle, values_) + writer_ = convert(tf.EagerTensor, writer_) + step_ = convert(tf.EagerTensor, step_) + tag_ = convert(tf.EagerTensor, tag_) + values_ = convert(tf.EagerTensor, values_) tf.add_input(desc, writer_) tf.add_input(desc, step_) tf.add_input(desc, tag_) @@ -5267,8 +5267,8 @@ begin end function experimental_indexed_dataset_materialize_eager(dataset_, materialized_; name=nothing) desc = tf.EagerOp("ExperimentalIndexedDatasetMaterialize") - dataset_ = convert(tf.TensorHandle, dataset_) - materialized_ = convert(tf.TensorHandle, materialized_) + dataset_ = convert(tf.EagerTensor, dataset_) + materialized_ = convert(tf.EagerTensor, materialized_) tf.add_input(desc, dataset_) tf.add_input(desc, materialized_) res = tf.execute(desc) @@ -5321,7 +5321,7 @@ begin end function _host_send_eager(tensor_; name=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing) desc = tf.EagerOp("_HostSend") - tensor_ = convert(tf.TensorHandle, tensor_) + tensor_ = convert(tf.EagerTensor, tensor_) tf.add_input(desc, tensor_) if tensor_name !== nothing desc["tensor_name"] = Base.String(tensor_name) @@ -5376,8 +5376,8 @@ begin end function greater_eager(x_, y_; name=nothing) desc = tf.EagerOp("Greater") - x_ = convert(tf.TensorHandle, x_) - y_ = convert(tf.TensorHandle, y_) + x_ = convert(tf.EagerTensor, x_) + y_ = convert(tf.EagerTensor, y_) tf.add_input(desc, x_) tf.add_input(desc, y_) desc["T"] = tf.data_type(x_) @@ -5420,7 +5420,7 @@ begin end function nccl_broadcast_eager(input_; name=nothing, shape=nothing) desc = tf.EagerOp("NcclBroadcast") - input_ = convert(tf.TensorHandle, input_) + input_ = convert(tf.EagerTensor, input_) tf.add_input(desc, input_) if shape !== nothing desc["shape"] = Base.identity(shape) @@ -5466,8 +5466,8 @@ begin end function tensor_list_push_back_batch_eager(input_handles_, tensor_; name=nothing, element_dtype=nothing) desc = tf.EagerOp("TensorListPushBackBatch") - input_handles_ = convert(tf.TensorHandle, input_handles_) - tensor_ = convert(tf.TensorHandle, tensor_) + input_handles_ = convert(tf.EagerTensor, input_handles_) + tensor_ = convert(tf.EagerTensor, tensor_) tf.add_input(desc, input_handles_) tf.add_input(desc, tensor_) if element_dtype !== nothing @@ -5518,9 +5518,9 @@ begin end function resource_scatter_min_eager(resource_, indices_, updates_; name=nothing, dtype=nothing) desc = tf.EagerOp("ResourceScatterMin") - resource_ = convert(tf.TensorHandle, resource_) - indices_ = convert(tf.TensorHandle, indices_) - updates_ = convert(tf.TensorHandle, updates_) + resource_ = convert(tf.EagerTensor, resource_) + indices_ = convert(tf.EagerTensor, indices_) + updates_ = convert(tf.EagerTensor, updates_) tf.add_input(desc, resource_) tf.add_input(desc, indices_) tf.add_input(desc, updates_) @@ -5573,9 +5573,9 @@ begin end function slice_eager(input_, begin_, size_; name=nothing, Index=nothing) desc = tf.EagerOp("Slice") - input_ = convert(tf.TensorHandle, input_) - begin_ = convert(tf.TensorHandle, begin_) - size_ = convert(tf.TensorHandle, size_) + input_ = convert(tf.EagerTensor, input_) + begin_ = convert(tf.EagerTensor, begin_) + size_ = convert(tf.EagerTensor, size_) tf.add_input(desc, input_) tf.add_input(desc, begin_) tf.add_input(desc, size_) @@ -5636,7 +5636,7 @@ begin end function unicode_decode_eager(input_; name=nothing, input_encoding=nothing, errors=nothing, replacement_char=nothing, replace_control_characters=nothing) desc = tf.EagerOp("UnicodeDecode") - input_ = convert(tf.TensorHandle, input_) + input_ = convert(tf.EagerTensor, input_) tf.add_input(desc, input_) if input_encoding !== nothing desc["input_encoding"] = Base.String(input_encoding) @@ -5692,8 +5692,8 @@ begin end function take_dataset_eager(input_dataset_, count_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("TakeDataset") - input_dataset_ = convert(tf.TensorHandle, input_dataset_) - count_ = convert(tf.TensorHandle, count_) + input_dataset_ = convert(tf.EagerTensor, input_dataset_) + count_ = convert(tf.EagerTensor, count_) tf.add_input(desc, input_dataset_) tf.add_input(desc, count_) if output_types !== nothing @@ -5751,10 +5751,10 @@ begin end function boosted_trees_make_stats_summary_eager(node_ids_, gradients_, hessians_, bucketized_features_list_; name=nothing, max_splits=nothing, num_buckets=nothing, num_features=nothing) desc = tf.EagerOp("BoostedTreesMakeStatsSummary") - node_ids_ = convert(tf.TensorHandle, node_ids_) - gradients_ = convert(tf.TensorHandle, gradients_) - hessians_ = convert(tf.TensorHandle, hessians_) - bucketized_features_list_ = convert(tf.TensorHandle, bucketized_features_list_) + node_ids_ = convert(tf.EagerTensor, node_ids_) + gradients_ = convert(tf.EagerTensor, gradients_) + hessians_ = convert(tf.EagerTensor, hessians_) + bucketized_features_list_ = convert(tf.EagerTensor, bucketized_features_list_) tf.add_input(desc, node_ids_) tf.add_input(desc, gradients_) tf.add_input(desc, hessians_) @@ -5822,7 +5822,7 @@ begin end function all_candidate_sampler_eager(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, seed=nothing, seed2=nothing) desc = tf.EagerOp("AllCandidateSampler") - true_classes_ = convert(tf.TensorHandle, true_classes_) + true_classes_ = convert(tf.EagerTensor, true_classes_) tf.add_input(desc, true_classes_) if num_true !== nothing desc["num_true"] = Base.Int(num_true) @@ -5896,9 +5896,9 @@ begin end function conv2d_backprop_input_eager(input_sizes_, filter_, out_backprop_; name=nothing, strides=nothing, use_cudnn_on_gpu=nothing, padding=nothing, explicit_paddings=nothing, data_format=nothing, dilations=nothing) desc = tf.EagerOp("Conv2DBackpropInput") - input_sizes_ = convert(tf.TensorHandle, input_sizes_) - filter_ = convert(tf.TensorHandle, filter_) - out_backprop_ = convert(tf.TensorHandle, out_backprop_) + input_sizes_ = convert(tf.EagerTensor, input_sizes_) + filter_ = convert(tf.EagerTensor, filter_) + out_backprop_ = convert(tf.EagerTensor, out_backprop_) tf.add_input(desc, input_sizes_) tf.add_input(desc, filter_) tf.add_input(desc, out_backprop_) @@ -5962,7 +5962,7 @@ begin end function dataset_to_single_element_eager(dataset_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("DatasetToSingleElement") - dataset_ = convert(tf.TensorHandle, dataset_) + dataset_ = convert(tf.EagerTensor, dataset_) tf.add_input(desc, dataset_) if output_types !== nothing desc["output_types"] = map(Base.identity, output_types) @@ -6012,8 +6012,8 @@ begin end function cache_dataset_eager(input_dataset_, filename_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("CacheDataset") - input_dataset_ = convert(tf.TensorHandle, input_dataset_) - filename_ = convert(tf.TensorHandle, filename_) + input_dataset_ = convert(tf.EagerTensor, input_dataset_) + filename_ = convert(tf.EagerTensor, filename_) tf.add_input(desc, input_dataset_) tf.add_input(desc, filename_) if output_types !== nothing @@ -6073,10 +6073,10 @@ begin end function fake_quant_with_min_max_vars_gradient_eager(gradients_, inputs_, min_, max_; name=nothing, num_bits=nothing, narrow_range=nothing) desc = tf.EagerOp("FakeQuantWithMinMaxVarsGradient") - gradients_ = convert(tf.TensorHandle, gradients_) - inputs_ = convert(tf.TensorHandle, inputs_) - min_ = convert(tf.TensorHandle, min_) - max_ = convert(tf.TensorHandle, max_) + gradients_ = convert(tf.EagerTensor, gradients_) + inputs_ = convert(tf.EagerTensor, inputs_) + min_ = convert(tf.EagerTensor, min_) + max_ = convert(tf.EagerTensor, max_) tf.add_input(desc, gradients_) tf.add_input(desc, inputs_) tf.add_input(desc, min_) @@ -6140,10 +6140,10 @@ begin end function fused_resize_and_pad_conv2d_eager(input_, size_, paddings_, filter_; name=nothing, resize_align_corners=nothing, mode=nothing, strides=nothing, padding=nothing) desc = tf.EagerOp("FusedResizeAndPadConv2D") - input_ = convert(tf.TensorHandle, input_) - size_ = convert(tf.TensorHandle, size_) - paddings_ = convert(tf.TensorHandle, paddings_) - filter_ = convert(tf.TensorHandle, filter_) + input_ = convert(tf.EagerTensor, input_) + size_ = convert(tf.EagerTensor, size_) + paddings_ = convert(tf.EagerTensor, paddings_) + filter_ = convert(tf.EagerTensor, filter_) tf.add_input(desc, input_) tf.add_input(desc, size_) tf.add_input(desc, paddings_) @@ -6231,7 +6231,7 @@ begin end function batch_eager(in_tensors_; name=nothing, num_batch_threads=nothing, max_batch_size=nothing, max_enqueued_batches=nothing, batch_timeout_micros=nothing, allowed_batch_sizes=nothing, grad_timeout_micros=nothing, container=nothing, shared_name=nothing, batching_queue=nothing, T=nothing) desc = tf.EagerOp("Batch") - in_tensors_ = convert(tf.TensorHandle, in_tensors_) + in_tensors_ = convert(tf.EagerTensor, in_tensors_) tf.add_input(desc, in_tensors_) if num_batch_threads !== nothing desc["num_batch_threads"] = Base.Int(num_batch_threads) @@ -6360,9 +6360,9 @@ begin end function batch_to_space_nd_eager(input_, block_shape_, crops_; name=nothing) desc = tf.EagerOp("BatchToSpaceND") - input_ = convert(tf.TensorHandle, input_) - block_shape_ = convert(tf.TensorHandle, block_shape_) - crops_ = convert(tf.TensorHandle, crops_) + input_ = convert(tf.EagerTensor, input_) + block_shape_ = convert(tf.EagerTensor, block_shape_) + crops_ = convert(tf.EagerTensor, crops_) tf.add_input(desc, input_) tf.add_input(desc, block_shape_) tf.add_input(desc, crops_) @@ -6403,7 +6403,7 @@ begin end function loop_cond_eager(input_; name=nothing) desc = tf.EagerOp("LoopCond") - input_ = convert(tf.TensorHandle, input_) + input_ = convert(tf.EagerTensor, input_) tf.add_input(desc, input_) res = tf.execute(desc) node = tf.TapeNode(loop_cond, [input_], name=nothing, res) @@ -6446,7 +6446,7 @@ begin end function depth_to_space_eager(input_; name=nothing, block_size=nothing, data_format=nothing) desc = tf.EagerOp("DepthToSpace") - input_ = convert(tf.TensorHandle, input_) + input_ = convert(tf.EagerTensor, input_) tf.add_input(desc, input_) if block_size !== nothing desc["block_size"] = Base.Int(block_size) @@ -6493,7 +6493,7 @@ begin end function destroy_temporary_variable_eager(ref_; name=nothing, var_name=nothing) desc = tf.EagerOp("DestroyTemporaryVariable") - ref_ = convert(tf.TensorHandle, ref_) + ref_ = convert(tf.EagerTensor, ref_) tf.add_input(desc, ref_) if var_name !== nothing desc["var_name"] = Base.String(var_name) @@ -6566,10 +6566,10 @@ begin end function cudnn_rnn_eager(input_, input_h_, input_c_, params_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing, is_training=nothing) desc = tf.EagerOp("CudnnRNN") - input_ = convert(tf.TensorHandle, input_) - input_h_ = convert(tf.TensorHandle, input_h_) - input_c_ = convert(tf.TensorHandle, input_c_) - params_ = convert(tf.TensorHandle, params_) + input_ = convert(tf.EagerTensor, input_) + input_h_ = convert(tf.EagerTensor, input_h_) + input_c_ = convert(tf.EagerTensor, input_c_) + params_ = convert(tf.EagerTensor, params_) tf.add_input(desc, input_) tf.add_input(desc, input_h_) tf.add_input(desc, input_c_) @@ -6634,7 +6634,7 @@ begin end function ref_identity_eager(input_; name=nothing) desc = tf.EagerOp("RefIdentity") - input_ = convert(tf.TensorHandle, input_) + input_ = convert(tf.EagerTensor, input_) tf.add_input(desc, input_) desc["T"] = tf.data_type(input_) res = tf.execute(desc) @@ -6689,9 +6689,9 @@ begin end function max_pool3d_grad_eager(orig_input_, orig_output_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) desc = tf.EagerOp("MaxPool3DGrad") - orig_input_ = convert(tf.TensorHandle, orig_input_) - orig_output_ = convert(tf.TensorHandle, orig_output_) - grad_ = convert(tf.TensorHandle, grad_) + orig_input_ = convert(tf.EagerTensor, orig_input_) + orig_output_ = convert(tf.EagerTensor, orig_output_) + grad_ = convert(tf.EagerTensor, grad_) tf.add_input(desc, orig_input_) tf.add_input(desc, orig_output_) tf.add_input(desc, grad_) @@ -6760,9 +6760,9 @@ begin end function load_tpu_embedding_momentum_parameters_grad_accum_debug_eager(parameters_, momenta_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) desc = tf.EagerOp("LoadTPUEmbeddingMomentumParametersGradAccumDebug") - parameters_ = convert(tf.TensorHandle, parameters_) - momenta_ = convert(tf.TensorHandle, momenta_) - gradient_accumulators_ = convert(tf.TensorHandle, gradient_accumulators_) + parameters_ = convert(tf.EagerTensor, parameters_) + momenta_ = convert(tf.EagerTensor, momenta_) + gradient_accumulators_ = convert(tf.EagerTensor, gradient_accumulators_) tf.add_input(desc, parameters_) tf.add_input(desc, momenta_) tf.add_input(desc, gradient_accumulators_) @@ -6888,9 +6888,9 @@ begin end function conv3d_backprop_input_eager(input_, filter_, out_backprop_; name=nothing, strides=nothing, padding=nothing, dilations=nothing) desc = tf.EagerOp("Conv3DBackpropInput") - input_ = convert(tf.TensorHandle, input_) - filter_ = convert(tf.TensorHandle, filter_) - out_backprop_ = convert(tf.TensorHandle, out_backprop_) + input_ = convert(tf.EagerTensor, input_) + filter_ = convert(tf.EagerTensor, filter_) + out_backprop_ = convert(tf.EagerTensor, out_backprop_) tf.add_input(desc, input_) tf.add_input(desc, filter_) tf.add_input(desc, out_backprop_) @@ -6941,7 +6941,7 @@ begin end function ref_exit_eager(data_; name=nothing) desc = tf.EagerOp("RefExit") - data_ = convert(tf.TensorHandle, data_) + data_ = convert(tf.EagerTensor, data_) tf.add_input(desc, data_) desc["T"] = tf.data_type(data_) res = tf.execute(desc) @@ -7042,8 +7042,8 @@ begin end function encode_wav_eager(audio_, sample_rate_; name=nothing) desc = tf.EagerOp("EncodeWav") - audio_ = convert(tf.TensorHandle, audio_) - sample_rate_ = convert(tf.TensorHandle, sample_rate_) + audio_ = convert(tf.EagerTensor, audio_) + sample_rate_ = convert(tf.EagerTensor, sample_rate_) tf.add_input(desc, audio_) tf.add_input(desc, sample_rate_) res = tf.execute(desc) @@ -7085,9 +7085,9 @@ begin end function tensor_summary_v2_eager(tag_, tensor_, serialized_summary_metadata_; name=nothing) desc = tf.EagerOp("TensorSummaryV2") - tag_ = convert(tf.TensorHandle, tag_) - tensor_ = convert(tf.TensorHandle, tensor_) - serialized_summary_metadata_ = convert(tf.TensorHandle, serialized_summary_metadata_) + tag_ = convert(tf.EagerTensor, tag_) + tensor_ = convert(tf.EagerTensor, tensor_) + serialized_summary_metadata_ = convert(tf.EagerTensor, serialized_summary_metadata_) tf.add_input(desc, tag_) tf.add_input(desc, tensor_) tf.add_input(desc, serialized_summary_metadata_) @@ -7134,8 +7134,8 @@ begin end function queue_dequeue_up_to_eager(handle_, n_; name=nothing, component_types=nothing, timeout_ms=nothing) desc = tf.EagerOp("QueueDequeueUpTo") - handle_ = convert(tf.TensorHandle, handle_) - n_ = convert(tf.TensorHandle, n_) + handle_ = convert(tf.EagerTensor, handle_) + n_ = convert(tf.EagerTensor, n_) tf.add_input(desc, handle_) tf.add_input(desc, n_) if component_types !== nothing @@ -7184,9 +7184,9 @@ begin end function matrix_band_part_eager(input_, num_lower_, num_upper_; name=nothing) desc = tf.EagerOp("MatrixBandPart") - input_ = convert(tf.TensorHandle, input_) - num_lower_ = convert(tf.TensorHandle, num_lower_) - num_upper_ = convert(tf.TensorHandle, num_upper_) + input_ = convert(tf.EagerTensor, input_) + num_lower_ = convert(tf.EagerTensor, num_lower_) + num_upper_ = convert(tf.EagerTensor, num_upper_) tf.add_input(desc, input_) tf.add_input(desc, num_lower_) tf.add_input(desc, num_upper_) @@ -7234,7 +7234,7 @@ begin end function copy_eager(input_; name=nothing, tensor_name=nothing, debug_ops_spec=nothing) desc = tf.EagerOp("Copy") - input_ = convert(tf.TensorHandle, input_) + input_ = convert(tf.EagerTensor, input_) tf.add_input(desc, input_) if tensor_name !== nothing desc["tensor_name"] = Base.String(tensor_name) @@ -7289,7 +7289,7 @@ begin end function shape_n_eager(input_; name=nothing, N=nothing, out_type=nothing) desc = tf.EagerOp("ShapeN") - input_ = convert(tf.TensorHandle, input_) + input_ = convert(tf.EagerTensor, input_) tf.add_input(desc, input_) if N !== nothing desc["N"] = Base.Int(N) @@ -7360,9 +7360,9 @@ begin end function experimental_parse_example_dataset_eager(input_dataset_, num_parallel_calls_, dense_defaults_; name=nothing, sparse_keys=nothing, dense_keys=nothing, sparse_types=nothing, Tdense=nothing, dense_shapes=nothing, output_types=nothing, output_shapes=nothing, sloppy=nothing) desc = tf.EagerOp("ExperimentalParseExampleDataset") - input_dataset_ = convert(tf.TensorHandle, input_dataset_) - num_parallel_calls_ = convert(tf.TensorHandle, num_parallel_calls_) - dense_defaults_ = convert(tf.TensorHandle, dense_defaults_) + input_dataset_ = convert(tf.EagerTensor, input_dataset_) + num_parallel_calls_ = convert(tf.EagerTensor, num_parallel_calls_) + dense_defaults_ = convert(tf.EagerTensor, dense_defaults_) tf.add_input(desc, input_dataset_) tf.add_input(desc, num_parallel_calls_) tf.add_input(desc, dense_defaults_) @@ -7430,8 +7430,8 @@ begin end function concat_eager(concat_dim_, values_; name=nothing, N=nothing) desc = tf.EagerOp("Concat") - concat_dim_ = convert(tf.TensorHandle, concat_dim_) - values_ = convert(tf.TensorHandle, values_) + concat_dim_ = convert(tf.EagerTensor, concat_dim_) + values_ = convert(tf.EagerTensor, values_) tf.add_input(desc, concat_dim_) tf.add_input(desc, values_) if N !== nothing @@ -7479,7 +7479,7 @@ begin end function data_format_dim_map_eager(x_; name=nothing, src_format=nothing, dst_format=nothing) desc = tf.EagerOp("DataFormatDimMap") - x_ = convert(tf.TensorHandle, x_) + x_ = convert(tf.EagerTensor, x_) tf.add_input(desc, x_) if src_format !== nothing desc["src_format"] = Base.String(src_format) @@ -7567,7 +7567,7 @@ begin end function softplus_eager(features_; name=nothing) desc = tf.EagerOp("Softplus") - features_ = convert(tf.TensorHandle, features_) + features_ = convert(tf.EagerTensor, features_) tf.add_input(desc, features_) desc["T"] = tf.data_type(features_) res = tf.execute(desc) @@ -7622,13 +7622,13 @@ begin end function resource_sparse_apply_proximal_adagrad_eager(var_, accum_, lr_, l1_, l2_, grad_, indices_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ResourceSparseApplyProximalAdagrad") - var_ = convert(tf.TensorHandle, var_) - accum_ = convert(tf.TensorHandle, accum_) - lr_ = convert(tf.TensorHandle, lr_) - l1_ = convert(tf.TensorHandle, l1_) - l2_ = convert(tf.TensorHandle, l2_) - grad_ = convert(tf.TensorHandle, grad_) - indices_ = convert(tf.TensorHandle, indices_) + var_ = convert(tf.EagerTensor, var_) + accum_ = convert(tf.EagerTensor, accum_) + lr_ = convert(tf.EagerTensor, lr_) + l1_ = convert(tf.EagerTensor, l1_) + l2_ = convert(tf.EagerTensor, l2_) + grad_ = convert(tf.EagerTensor, grad_) + indices_ = convert(tf.EagerTensor, indices_) tf.add_input(desc, var_) tf.add_input(desc, accum_) tf.add_input(desc, lr_) @@ -7727,14 +7727,14 @@ begin end function parse_single_sequence_example_eager(serialized_, feature_list_dense_missing_assumed_empty_, context_sparse_keys_, context_dense_keys_, feature_list_sparse_keys_, feature_list_dense_keys_, context_dense_defaults_, debug_name_; name=nothing, Ncontext_sparse=nothing, Ncontext_dense=nothing, Nfeature_list_sparse=nothing, Nfeature_list_dense=nothing, context_sparse_types=nothing, Tcontext_dense=nothing, feature_list_dense_types=nothing, context_dense_shapes=nothing, feature_list_sparse_types=nothing, feature_list_dense_shapes=nothing) desc = tf.EagerOp("ParseSingleSequenceExample") - serialized_ = convert(tf.TensorHandle, serialized_) - feature_list_dense_missing_assumed_empty_ = convert(tf.TensorHandle, feature_list_dense_missing_assumed_empty_) - context_sparse_keys_ = convert(tf.TensorHandle, context_sparse_keys_) - context_dense_keys_ = convert(tf.TensorHandle, context_dense_keys_) - feature_list_sparse_keys_ = convert(tf.TensorHandle, feature_list_sparse_keys_) - feature_list_dense_keys_ = convert(tf.TensorHandle, feature_list_dense_keys_) - context_dense_defaults_ = convert(tf.TensorHandle, context_dense_defaults_) - debug_name_ = convert(tf.TensorHandle, debug_name_) + serialized_ = convert(tf.EagerTensor, serialized_) + feature_list_dense_missing_assumed_empty_ = convert(tf.EagerTensor, feature_list_dense_missing_assumed_empty_) + context_sparse_keys_ = convert(tf.EagerTensor, context_sparse_keys_) + context_dense_keys_ = convert(tf.EagerTensor, context_dense_keys_) + feature_list_sparse_keys_ = convert(tf.EagerTensor, feature_list_sparse_keys_) + feature_list_dense_keys_ = convert(tf.EagerTensor, feature_list_dense_keys_) + context_dense_defaults_ = convert(tf.EagerTensor, context_dense_defaults_) + debug_name_ = convert(tf.EagerTensor, debug_name_) tf.add_input(desc, serialized_) tf.add_input(desc, feature_list_dense_missing_assumed_empty_) tf.add_input(desc, context_sparse_keys_) @@ -7808,7 +7808,7 @@ begin end function matrix_diag_eager(diagonal_; name=nothing) desc = tf.EagerOp("MatrixDiag") - diagonal_ = convert(tf.TensorHandle, diagonal_) + diagonal_ = convert(tf.EagerTensor, diagonal_) tf.add_input(desc, diagonal_) desc["T"] = tf.data_type(diagonal_) res = tf.execute(desc) @@ -7888,9 +7888,9 @@ begin end function shard_dataset_eager(input_dataset_, num_shards_, index_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("ShardDataset") - input_dataset_ = convert(tf.TensorHandle, input_dataset_) - num_shards_ = convert(tf.TensorHandle, num_shards_) - index_ = convert(tf.TensorHandle, index_) + input_dataset_ = convert(tf.EagerTensor, input_dataset_) + num_shards_ = convert(tf.EagerTensor, num_shards_) + index_ = convert(tf.EagerTensor, index_) tf.add_input(desc, input_dataset_) tf.add_input(desc, num_shards_) tf.add_input(desc, index_) @@ -7951,9 +7951,9 @@ begin end function max_pool_grad_grad_eager(orig_input_, orig_output_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) desc = tf.EagerOp("MaxPoolGradGrad") - orig_input_ = convert(tf.TensorHandle, orig_input_) - orig_output_ = convert(tf.TensorHandle, orig_output_) - grad_ = convert(tf.TensorHandle, grad_) + orig_input_ = convert(tf.EagerTensor, orig_input_) + orig_output_ = convert(tf.EagerTensor, orig_output_) + grad_ = convert(tf.EagerTensor, grad_) tf.add_input(desc, orig_input_) tf.add_input(desc, orig_output_) tf.add_input(desc, grad_) @@ -8012,8 +8012,8 @@ begin end function resize_bilinear_grad_eager(grads_, original_image_; name=nothing, align_corners=nothing) desc = tf.EagerOp("ResizeBilinearGrad") - grads_ = convert(tf.TensorHandle, grads_) - original_image_ = convert(tf.TensorHandle, original_image_) + grads_ = convert(tf.EagerTensor, grads_) + original_image_ = convert(tf.EagerTensor, original_image_) tf.add_input(desc, grads_) tf.add_input(desc, original_image_) if align_corners !== nothing @@ -8062,8 +8062,8 @@ begin end function batch_to_space_eager(input_, crops_; name=nothing, block_size=nothing) desc = tf.EagerOp("BatchToSpace") - input_ = convert(tf.TensorHandle, input_) - crops_ = convert(tf.TensorHandle, crops_) + input_ = convert(tf.EagerTensor, input_) + crops_ = convert(tf.EagerTensor, crops_) tf.add_input(desc, input_) tf.add_input(desc, crops_) if block_size !== nothing @@ -8108,7 +8108,7 @@ begin end function optional_from_value_eager(components_; name=nothing, Toutput_types=nothing) desc = tf.EagerOp("OptionalFromValue") - components_ = convert(tf.TensorHandle, components_) + components_ = convert(tf.EagerTensor, components_) tf.add_input(desc, components_) if Toutput_types !== nothing desc["Toutput_types"] = map(Base.identity, Toutput_types) @@ -8150,8 +8150,8 @@ begin end function xlogy_eager(x_, y_; name=nothing) desc = tf.EagerOp("Xlogy") - x_ = convert(tf.TensorHandle, x_) - y_ = convert(tf.TensorHandle, y_) + x_ = convert(tf.EagerTensor, x_) + y_ = convert(tf.EagerTensor, y_) tf.add_input(desc, x_) tf.add_input(desc, y_) desc["T"] = tf.data_type(x_) @@ -8193,8 +8193,8 @@ begin end function cross_eager(a_, b_; name=nothing) desc = tf.EagerOp("Cross") - a_ = convert(tf.TensorHandle, a_) - b_ = convert(tf.TensorHandle, b_) + a_ = convert(tf.EagerTensor, a_) + b_ = convert(tf.EagerTensor, b_) tf.add_input(desc, a_) tf.add_input(desc, b_) desc["T"] = tf.data_type(a_) @@ -8236,8 +8236,8 @@ begin end function bitwise_and_eager(x_, y_; name=nothing) desc = tf.EagerOp("BitwiseAnd") - x_ = convert(tf.TensorHandle, x_) - y_ = convert(tf.TensorHandle, y_) + x_ = convert(tf.EagerTensor, x_) + y_ = convert(tf.EagerTensor, y_) tf.add_input(desc, x_) tf.add_input(desc, y_) desc["T"] = tf.data_type(x_) @@ -8280,8 +8280,8 @@ begin end function broadcast_to_eager(input_, shape_; name=nothing) desc = tf.EagerOp("BroadcastTo") - input_ = convert(tf.TensorHandle, input_) - shape_ = convert(tf.TensorHandle, shape_) + input_ = convert(tf.EagerTensor, input_) + shape_ = convert(tf.EagerTensor, shape_) tf.add_input(desc, input_) tf.add_input(desc, shape_) desc["T"] = tf.data_type(input_) @@ -8323,8 +8323,8 @@ begin end function elu_grad_eager(gradients_, outputs_; name=nothing) desc = tf.EagerOp("EluGrad") - gradients_ = convert(tf.TensorHandle, gradients_) - outputs_ = convert(tf.TensorHandle, outputs_) + gradients_ = convert(tf.EagerTensor, gradients_) + outputs_ = convert(tf.EagerTensor, outputs_) tf.add_input(desc, gradients_) tf.add_input(desc, outputs_) desc["T"] = tf.data_type(gradients_) @@ -8407,17 +8407,17 @@ begin end function cudnn_rnn_backprop_eager(input_, input_h_, input_c_, params_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) desc = tf.EagerOp("CudnnRNNBackprop") - input_ = convert(tf.TensorHandle, input_) - input_h_ = convert(tf.TensorHandle, input_h_) - input_c_ = convert(tf.TensorHandle, input_c_) - params_ = convert(tf.TensorHandle, params_) - output_ = convert(tf.TensorHandle, output_) - output_h_ = convert(tf.TensorHandle, output_h_) - output_c_ = convert(tf.TensorHandle, output_c_) - output_backprop_ = convert(tf.TensorHandle, output_backprop_) - output_h_backprop_ = convert(tf.TensorHandle, output_h_backprop_) - output_c_backprop_ = convert(tf.TensorHandle, output_c_backprop_) - reserve_space_ = convert(tf.TensorHandle, reserve_space_) + input_ = convert(tf.EagerTensor, input_) + input_h_ = convert(tf.EagerTensor, input_h_) + input_c_ = convert(tf.EagerTensor, input_c_) + params_ = convert(tf.EagerTensor, params_) + output_ = convert(tf.EagerTensor, output_) + output_h_ = convert(tf.EagerTensor, output_h_) + output_c_ = convert(tf.EagerTensor, output_c_) + output_backprop_ = convert(tf.EagerTensor, output_backprop_) + output_h_backprop_ = convert(tf.EagerTensor, output_h_backprop_) + output_c_backprop_ = convert(tf.EagerTensor, output_c_backprop_) + reserve_space_ = convert(tf.EagerTensor, reserve_space_) tf.add_input(desc, input_) tf.add_input(desc, input_h_) tf.add_input(desc, input_c_) @@ -8495,7 +8495,7 @@ begin end function string_to_hash_bucket_fast_eager(input_; name=nothing, num_buckets=nothing) desc = tf.EagerOp("StringToHashBucketFast") - input_ = convert(tf.TensorHandle, input_) + input_ = convert(tf.EagerTensor, input_) tf.add_input(desc, input_) if num_buckets !== nothing desc["num_buckets"] = Base.Int(num_buckets) @@ -8597,7 +8597,7 @@ begin end function relu_eager(features_; name=nothing) desc = tf.EagerOp("Relu") - features_ = convert(tf.TensorHandle, features_) + features_ = convert(tf.EagerTensor, features_) tf.add_input(desc, features_) desc["T"] = tf.data_type(features_) res = tf.execute(desc) @@ -8640,8 +8640,8 @@ begin end function nth_element_eager(input_, n_; name=nothing, reverse=nothing) desc = tf.EagerOp("NthElement") - input_ = convert(tf.TensorHandle, input_) - n_ = convert(tf.TensorHandle, n_) + input_ = convert(tf.EagerTensor, input_) + n_ = convert(tf.EagerTensor, n_) tf.add_input(desc, input_) tf.add_input(desc, n_) if reverse !== nothing @@ -8683,7 +8683,7 @@ begin end function softsign_eager(features_; name=nothing) desc = tf.EagerOp("Softsign") - features_ = convert(tf.TensorHandle, features_) + features_ = convert(tf.EagerTensor, features_) tf.add_input(desc, features_) desc["T"] = tf.data_type(features_) res = tf.execute(desc) @@ -8745,7 +8745,7 @@ begin end function mutable_dense_hash_table_eager(empty_key_; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing, initial_num_buckets=nothing, max_load_factor=nothing) desc = tf.EagerOp("MutableDenseHashTable") - empty_key_ = convert(tf.TensorHandle, empty_key_) + empty_key_ = convert(tf.EagerTensor, empty_key_) tf.add_input(desc, empty_key_) if container !== nothing desc["container"] = Base.String(container) @@ -8842,8 +8842,8 @@ begin end function polygamma_eager(a_, x_; name=nothing) desc = tf.EagerOp("Polygamma") - a_ = convert(tf.TensorHandle, a_) - x_ = convert(tf.TensorHandle, x_) + a_ = convert(tf.EagerTensor, a_) + x_ = convert(tf.EagerTensor, x_) tf.add_input(desc, a_) tf.add_input(desc, x_) desc["T"] = tf.data_type(a_) @@ -8889,7 +8889,7 @@ begin end function nccl_reduce_eager(input_; name=nothing, reduction=nothing, num_devices=nothing) desc = tf.EagerOp("NcclReduce") - input_ = convert(tf.TensorHandle, input_) + input_ = convert(tf.EagerTensor, input_) tf.add_input(desc, input_) if reduction !== nothing desc["reduction"] = Base.String(reduction) @@ -8940,8 +8940,8 @@ begin end function arg_max_eager(input_, dimension_; name=nothing, output_type=nothing) desc = tf.EagerOp("ArgMax") - input_ = convert(tf.TensorHandle, input_) - dimension_ = convert(tf.TensorHandle, dimension_) + input_ = convert(tf.EagerTensor, input_) + dimension_ = convert(tf.EagerTensor, dimension_) tf.add_input(desc, input_) tf.add_input(desc, dimension_) if output_type !== nothing @@ -8986,8 +8986,8 @@ begin end function matrix_set_diag_eager(input_, diagonal_; name=nothing) desc = tf.EagerOp("MatrixSetDiag") - input_ = convert(tf.TensorHandle, input_) - diagonal_ = convert(tf.TensorHandle, diagonal_) + input_ = convert(tf.EagerTensor, input_) + diagonal_ = convert(tf.EagerTensor, diagonal_) tf.add_input(desc, input_) tf.add_input(desc, diagonal_) desc["T"] = tf.data_type(input_) @@ -9033,9 +9033,9 @@ begin end function space_to_batch_nd_eager(input_, block_shape_, paddings_; name=nothing) desc = tf.EagerOp("SpaceToBatchND") - input_ = convert(tf.TensorHandle, input_) - block_shape_ = convert(tf.TensorHandle, block_shape_) - paddings_ = convert(tf.TensorHandle, paddings_) + input_ = convert(tf.EagerTensor, input_) + block_shape_ = convert(tf.EagerTensor, block_shape_) + paddings_ = convert(tf.EagerTensor, paddings_) tf.add_input(desc, input_) tf.add_input(desc, block_shape_) tf.add_input(desc, paddings_) @@ -9085,9 +9085,9 @@ begin end function sparse_reshape_eager(input_indices_, input_shape_, new_shape_; name=nothing) desc = tf.EagerOp("SparseReshape") - input_indices_ = convert(tf.TensorHandle, input_indices_) - input_shape_ = convert(tf.TensorHandle, input_shape_) - new_shape_ = convert(tf.TensorHandle, new_shape_) + input_indices_ = convert(tf.EagerTensor, input_indices_) + input_shape_ = convert(tf.EagerTensor, input_shape_) + new_shape_ = convert(tf.EagerTensor, new_shape_) tf.add_input(desc, input_indices_) tf.add_input(desc, input_shape_) tf.add_input(desc, new_shape_) @@ -9133,8 +9133,8 @@ begin end function optimize_dataset_eager(input_dataset_, optimizations_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("OptimizeDataset") - input_dataset_ = convert(tf.TensorHandle, input_dataset_) - optimizations_ = convert(tf.TensorHandle, optimizations_) + input_dataset_ = convert(tf.EagerTensor, input_dataset_) + optimizations_ = convert(tf.EagerTensor, optimizations_) tf.add_input(desc, input_dataset_) tf.add_input(desc, optimizations_) if output_types !== nothing @@ -9185,8 +9185,8 @@ begin end function concat_v2_eager(values_, axis_; name=nothing, N=nothing) desc = tf.EagerOp("ConcatV2") - values_ = convert(tf.TensorHandle, values_) - axis_ = convert(tf.TensorHandle, axis_) + values_ = convert(tf.EagerTensor, values_) + axis_ = convert(tf.EagerTensor, axis_) tf.add_input(desc, values_) tf.add_input(desc, axis_) if N !== nothing @@ -9248,14 +9248,14 @@ begin end function resource_sparse_apply_adadelta_eager(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ResourceSparseApplyAdadelta") - var_ = convert(tf.TensorHandle, var_) - accum_ = convert(tf.TensorHandle, accum_) - accum_update_ = convert(tf.TensorHandle, accum_update_) - lr_ = convert(tf.TensorHandle, lr_) - rho_ = convert(tf.TensorHandle, rho_) - epsilon_ = convert(tf.TensorHandle, epsilon_) - grad_ = convert(tf.TensorHandle, grad_) - indices_ = convert(tf.TensorHandle, indices_) + var_ = convert(tf.EagerTensor, var_) + accum_ = convert(tf.EagerTensor, accum_) + accum_update_ = convert(tf.EagerTensor, accum_update_) + lr_ = convert(tf.EagerTensor, lr_) + rho_ = convert(tf.EagerTensor, rho_) + epsilon_ = convert(tf.EagerTensor, epsilon_) + grad_ = convert(tf.EagerTensor, grad_) + indices_ = convert(tf.EagerTensor, indices_) tf.add_input(desc, var_) tf.add_input(desc, accum_) tf.add_input(desc, accum_update_) @@ -9310,8 +9310,8 @@ begin end function tile_eager(input_, multiples_; name=nothing) desc = tf.EagerOp("Tile") - input_ = convert(tf.TensorHandle, input_) - multiples_ = convert(tf.TensorHandle, multiples_) + input_ = convert(tf.EagerTensor, input_) + multiples_ = convert(tf.EagerTensor, multiples_) tf.add_input(desc, input_) tf.add_input(desc, multiples_) desc["T"] = tf.data_type(input_) @@ -9402,9 +9402,9 @@ begin end function serialize_many_sparse_eager(sparse_indices_, sparse_values_, sparse_shape_; name=nothing, out_type=nothing) desc = tf.EagerOp("SerializeManySparse") - sparse_indices_ = convert(tf.TensorHandle, sparse_indices_) - sparse_values_ = convert(tf.TensorHandle, sparse_values_) - sparse_shape_ = convert(tf.TensorHandle, sparse_shape_) + sparse_indices_ = convert(tf.EagerTensor, sparse_indices_) + sparse_values_ = convert(tf.EagerTensor, sparse_values_) + sparse_shape_ = convert(tf.EagerTensor, sparse_shape_) tf.add_input(desc, sparse_indices_) tf.add_input(desc, sparse_values_) tf.add_input(desc, sparse_shape_) @@ -9454,8 +9454,8 @@ begin end function tpu_embedding_activations_eager(embedding_variable_, sliced_activations_; name=nothing, table_id=nothing, lookup_id=nothing) desc = tf.EagerOp("TPUEmbeddingActivations") - embedding_variable_ = convert(tf.TensorHandle, embedding_variable_) - sliced_activations_ = convert(tf.TensorHandle, sliced_activations_) + embedding_variable_ = convert(tf.EagerTensor, embedding_variable_) + sliced_activations_ = convert(tf.EagerTensor, sliced_activations_) tf.add_input(desc, embedding_variable_) tf.add_input(desc, sliced_activations_) if table_id !== nothing @@ -9506,9 +9506,9 @@ begin end function batch_matrix_solve_ls_eager(matrix_, rhs_, l2_regularizer_; name=nothing, fast=nothing) desc = tf.EagerOp("BatchMatrixSolveLs") - matrix_ = convert(tf.TensorHandle, matrix_) - rhs_ = convert(tf.TensorHandle, rhs_) - l2_regularizer_ = convert(tf.TensorHandle, l2_regularizer_) + matrix_ = convert(tf.EagerTensor, matrix_) + rhs_ = convert(tf.EagerTensor, rhs_) + l2_regularizer_ = convert(tf.EagerTensor, l2_regularizer_) tf.add_input(desc, matrix_) tf.add_input(desc, rhs_) tf.add_input(desc, l2_regularizer_) @@ -9554,8 +9554,8 @@ begin end function not_equal_eager(x_, y_; name=nothing) desc = tf.EagerOp("NotEqual") - x_ = convert(tf.TensorHandle, x_) - y_ = convert(tf.TensorHandle, y_) + x_ = convert(tf.EagerTensor, x_) + y_ = convert(tf.EagerTensor, y_) tf.add_input(desc, x_) tf.add_input(desc, y_) desc["T"] = tf.data_type(x_) @@ -9595,7 +9595,7 @@ begin end function lgamma_eager(x_; name=nothing) desc = tf.EagerOp("Lgamma") - x_ = convert(tf.TensorHandle, x_) + x_ = convert(tf.EagerTensor, x_) tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) res = tf.execute(desc) @@ -9781,7 +9781,7 @@ begin end function self_adjoint_eig_eager(input_; name=nothing) desc = tf.EagerOp("SelfAdjointEig") - input_ = convert(tf.TensorHandle, input_) + input_ = convert(tf.EagerTensor, input_) tf.add_input(desc, input_) desc["T"] = tf.data_type(input_) res = tf.execute(desc) @@ -9826,7 +9826,7 @@ begin end function boosted_trees_quantile_stream_resource_get_bucket_boundaries_eager(quantile_stream_resource_handle_; name=nothing, num_features=nothing) desc = tf.EagerOp("BoostedTreesQuantileStreamResourceGetBucketBoundaries") - quantile_stream_resource_handle_ = convert(tf.TensorHandle, quantile_stream_resource_handle_) + quantile_stream_resource_handle_ = convert(tf.EagerTensor, quantile_stream_resource_handle_) tf.add_input(desc, quantile_stream_resource_handle_) if num_features !== nothing desc["num_features"] = Base.Int(num_features) @@ -9872,10 +9872,10 @@ begin end function sparse_dense_cwise_div_eager(sp_indices_, sp_values_, sp_shape_, dense_; name=nothing) desc = tf.EagerOp("SparseDenseCwiseDiv") - sp_indices_ = convert(tf.TensorHandle, sp_indices_) - sp_values_ = convert(tf.TensorHandle, sp_values_) - sp_shape_ = convert(tf.TensorHandle, sp_shape_) - dense_ = convert(tf.TensorHandle, dense_) + sp_indices_ = convert(tf.EagerTensor, sp_indices_) + sp_values_ = convert(tf.EagerTensor, sp_values_) + sp_shape_ = convert(tf.EagerTensor, sp_shape_) + dense_ = convert(tf.EagerTensor, dense_) tf.add_input(desc, sp_indices_) tf.add_input(desc, sp_values_) tf.add_input(desc, sp_shape_) @@ -9917,7 +9917,7 @@ begin end function acos_eager(x_; name=nothing) desc = tf.EagerOp("Acos") - x_ = convert(tf.TensorHandle, x_) + x_ = convert(tf.EagerTensor, x_) tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) res = tf.execute(desc) @@ -9961,8 +9961,8 @@ begin end function all_eager(input_, reduction_indices_; name=nothing, keep_dims=nothing) desc = tf.EagerOp("All") - input_ = convert(tf.TensorHandle, input_) - reduction_indices_ = convert(tf.TensorHandle, reduction_indices_) + input_ = convert(tf.EagerTensor, input_) + reduction_indices_ = convert(tf.EagerTensor, reduction_indices_) tf.add_input(desc, input_) tf.add_input(desc, reduction_indices_) if keep_dims !== nothing @@ -10006,8 +10006,8 @@ begin end function compare_and_bitpack_eager(input_, threshold_; name=nothing) desc = tf.EagerOp("CompareAndBitpack") - input_ = convert(tf.TensorHandle, input_) - threshold_ = convert(tf.TensorHandle, threshold_) + input_ = convert(tf.EagerTensor, input_) + threshold_ = convert(tf.EagerTensor, threshold_) tf.add_input(desc, input_) tf.add_input(desc, threshold_) desc["T"] = tf.data_type(input_) @@ -10108,7 +10108,7 @@ begin end function experimental_unique_dataset_eager(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("ExperimentalUniqueDataset") - input_dataset_ = convert(tf.TensorHandle, input_dataset_) + input_dataset_ = convert(tf.EagerTensor, input_dataset_) tf.add_input(desc, input_dataset_) if output_types !== nothing desc["output_types"] = map(Base.identity, output_types) @@ -10183,14 +10183,14 @@ begin end function quantized_conv2d_with_bias_sum_and_relu_eager(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_, summand_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) desc = tf.EagerOp("QuantizedConv2DWithBiasSumAndRelu") - input_ = convert(tf.TensorHandle, input_) - filter_ = convert(tf.TensorHandle, filter_) - bias_ = convert(tf.TensorHandle, bias_) - min_input_ = convert(tf.TensorHandle, min_input_) - max_input_ = convert(tf.TensorHandle, max_input_) - min_filter_ = convert(tf.TensorHandle, min_filter_) - max_filter_ = convert(tf.TensorHandle, max_filter_) - summand_ = convert(tf.TensorHandle, summand_) + input_ = convert(tf.EagerTensor, input_) + filter_ = convert(tf.EagerTensor, filter_) + bias_ = convert(tf.EagerTensor, bias_) + min_input_ = convert(tf.EagerTensor, min_input_) + max_input_ = convert(tf.EagerTensor, max_input_) + min_filter_ = convert(tf.EagerTensor, min_filter_) + max_filter_ = convert(tf.EagerTensor, max_filter_) + summand_ = convert(tf.EagerTensor, summand_) tf.add_input(desc, input_) tf.add_input(desc, filter_) tf.add_input(desc, bias_) @@ -10258,8 +10258,8 @@ begin end function list_diff_eager(x_, y_; name=nothing, out_idx=nothing) desc = tf.EagerOp("ListDiff") - x_ = convert(tf.TensorHandle, x_) - y_ = convert(tf.TensorHandle, y_) + x_ = convert(tf.EagerTensor, x_) + y_ = convert(tf.EagerTensor, y_) tf.add_input(desc, x_) tf.add_input(desc, y_) if out_idx !== nothing @@ -10309,11 +10309,11 @@ begin end function create_summary_file_writer_eager(writer_, logdir_, max_queue_, flush_millis_, filename_suffix_; name=nothing) desc = tf.EagerOp("CreateSummaryFileWriter") - writer_ = convert(tf.TensorHandle, writer_) - logdir_ = convert(tf.TensorHandle, logdir_) - max_queue_ = convert(tf.TensorHandle, max_queue_) - flush_millis_ = convert(tf.TensorHandle, flush_millis_) - filename_suffix_ = convert(tf.TensorHandle, filename_suffix_) + writer_ = convert(tf.EagerTensor, writer_) + logdir_ = convert(tf.EagerTensor, logdir_) + max_queue_ = convert(tf.EagerTensor, max_queue_) + flush_millis_ = convert(tf.EagerTensor, flush_millis_) + filename_suffix_ = convert(tf.EagerTensor, filename_suffix_) tf.add_input(desc, writer_) tf.add_input(desc, logdir_) tf.add_input(desc, max_queue_) @@ -10369,8 +10369,8 @@ begin end function generate_vocab_remapping_eager(new_vocab_file_, old_vocab_file_; name=nothing, new_vocab_offset=nothing, num_new_vocab=nothing, old_vocab_size=nothing) desc = tf.EagerOp("GenerateVocabRemapping") - new_vocab_file_ = convert(tf.TensorHandle, new_vocab_file_) - old_vocab_file_ = convert(tf.TensorHandle, old_vocab_file_) + new_vocab_file_ = convert(tf.EagerTensor, new_vocab_file_) + old_vocab_file_ = convert(tf.EagerTensor, old_vocab_file_) tf.add_input(desc, new_vocab_file_) tf.add_input(desc, old_vocab_file_) if new_vocab_offset !== nothing @@ -10420,7 +10420,7 @@ begin end function batch_matrix_inverse_eager(input_; name=nothing, adjoint=nothing) desc = tf.EagerOp("BatchMatrixInverse") - input_ = convert(tf.TensorHandle, input_) + input_ = convert(tf.EagerTensor, input_) tf.add_input(desc, input_) if adjoint !== nothing desc["adjoint"] = Base.Bool(adjoint) @@ -10527,7 +10527,7 @@ begin end function stop_gradient_eager(input_; name=nothing) desc = tf.EagerOp("StopGradient") - input_ = convert(tf.TensorHandle, input_) + input_ = convert(tf.EagerTensor, input_) tf.add_input(desc, input_) desc["T"] = tf.data_type(input_) res = tf.execute(desc) @@ -10576,8 +10576,8 @@ begin end function split_eager(split_dim_, value_; name=nothing, num_split=nothing) desc = tf.EagerOp("Split") - split_dim_ = convert(tf.TensorHandle, split_dim_) - value_ = convert(tf.TensorHandle, value_) + split_dim_ = convert(tf.EagerTensor, split_dim_) + value_ = convert(tf.EagerTensor, value_) tf.add_input(desc, split_dim_) tf.add_input(desc, value_) if num_split !== nothing @@ -10633,7 +10633,7 @@ begin end function unpack_eager(value_; name=nothing, num=nothing, axis=nothing) desc = tf.EagerOp("Unpack") - value_ = convert(tf.TensorHandle, value_) + value_ = convert(tf.EagerTensor, value_) tf.add_input(desc, value_) if num !== nothing desc["num"] = Base.Int(num) @@ -10689,9 +10689,9 @@ begin end function resource_scatter_max_eager(resource_, indices_, updates_; name=nothing, dtype=nothing) desc = tf.EagerOp("ResourceScatterMax") - resource_ = convert(tf.TensorHandle, resource_) - indices_ = convert(tf.TensorHandle, indices_) - updates_ = convert(tf.TensorHandle, updates_) + resource_ = convert(tf.EagerTensor, resource_) + indices_ = convert(tf.EagerTensor, indices_) + updates_ = convert(tf.EagerTensor, updates_) tf.add_input(desc, resource_) tf.add_input(desc, indices_) tf.add_input(desc, updates_) @@ -10741,10 +10741,10 @@ begin end function tensor_array_write_eager(handle_, index_, value_, flow_in_; name=nothing) desc = tf.EagerOp("TensorArrayWrite") - handle_ = convert(tf.TensorHandle, handle_) - index_ = convert(tf.TensorHandle, index_) - value_ = convert(tf.TensorHandle, value_) - flow_in_ = convert(tf.TensorHandle, flow_in_) + handle_ = convert(tf.EagerTensor, handle_) + index_ = convert(tf.EagerTensor, index_) + value_ = convert(tf.EagerTensor, value_) + flow_in_ = convert(tf.EagerTensor, flow_in_) tf.add_input(desc, handle_) tf.add_input(desc, index_) tf.add_input(desc, value_) @@ -10791,8 +10791,8 @@ begin end function fill_eager(dims_, value_; name=nothing, index_type=nothing) desc = tf.EagerOp("Fill") - dims_ = convert(tf.TensorHandle, dims_) - value_ = convert(tf.TensorHandle, value_) + dims_ = convert(tf.EagerTensor, dims_) + value_ = convert(tf.EagerTensor, value_) tf.add_input(desc, dims_) tf.add_input(desc, value_) if index_type !== nothing @@ -10870,15 +10870,15 @@ begin end function quantized_conv2d_with_bias_and_requantize_eager(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) desc = tf.EagerOp("QuantizedConv2DWithBiasAndRequantize") - input_ = convert(tf.TensorHandle, input_) - filter_ = convert(tf.TensorHandle, filter_) - bias_ = convert(tf.TensorHandle, bias_) - min_input_ = convert(tf.TensorHandle, min_input_) - max_input_ = convert(tf.TensorHandle, max_input_) - min_filter_ = convert(tf.TensorHandle, min_filter_) - max_filter_ = convert(tf.TensorHandle, max_filter_) - min_freezed_output_ = convert(tf.TensorHandle, min_freezed_output_) - max_freezed_output_ = convert(tf.TensorHandle, max_freezed_output_) + input_ = convert(tf.EagerTensor, input_) + filter_ = convert(tf.EagerTensor, filter_) + bias_ = convert(tf.EagerTensor, bias_) + min_input_ = convert(tf.EagerTensor, min_input_) + max_input_ = convert(tf.EagerTensor, max_input_) + min_filter_ = convert(tf.EagerTensor, min_filter_) + max_filter_ = convert(tf.EagerTensor, max_filter_) + min_freezed_output_ = convert(tf.EagerTensor, min_freezed_output_) + max_freezed_output_ = convert(tf.EagerTensor, max_freezed_output_) tf.add_input(desc, input_) tf.add_input(desc, filter_) tf.add_input(desc, bias_) @@ -10938,7 +10938,7 @@ begin end function softmax_eager(logits_; name=nothing) desc = tf.EagerOp("Softmax") - logits_ = convert(tf.TensorHandle, logits_) + logits_ = convert(tf.EagerTensor, logits_) tf.add_input(desc, logits_) desc["T"] = tf.data_type(logits_) res = tf.execute(desc) @@ -10981,8 +10981,8 @@ begin end function resize_bicubic_eager(images_, size_; name=nothing, align_corners=nothing) desc = tf.EagerOp("ResizeBicubic") - images_ = convert(tf.TensorHandle, images_) - size_ = convert(tf.TensorHandle, size_) + images_ = convert(tf.EagerTensor, images_) + size_ = convert(tf.EagerTensor, size_) tf.add_input(desc, images_) tf.add_input(desc, size_) if align_corners !== nothing @@ -11146,8 +11146,8 @@ begin end function decode_csv_eager(records_, record_defaults_; name=nothing, OUT_TYPE=nothing, field_delim=nothing, use_quote_delim=nothing, na_value=nothing, select_cols=nothing) desc = tf.EagerOp("DecodeCSV") - records_ = convert(tf.TensorHandle, records_) - record_defaults_ = convert(tf.TensorHandle, record_defaults_) + records_ = convert(tf.EagerTensor, records_) + record_defaults_ = convert(tf.EagerTensor, record_defaults_) tf.add_input(desc, records_) tf.add_input(desc, record_defaults_) if OUT_TYPE !== nothing @@ -11205,9 +11205,9 @@ begin end function lookup_table_find_eager(table_handle_, keys_, default_value_; name=nothing) desc = tf.EagerOp("LookupTableFind") - table_handle_ = convert(tf.TensorHandle, table_handle_) - keys_ = convert(tf.TensorHandle, keys_) - default_value_ = convert(tf.TensorHandle, default_value_) + table_handle_ = convert(tf.EagerTensor, table_handle_) + keys_ = convert(tf.EagerTensor, keys_) + default_value_ = convert(tf.EagerTensor, default_value_) tf.add_input(desc, table_handle_) tf.add_input(desc, keys_) tf.add_input(desc, default_value_) @@ -11261,11 +11261,11 @@ begin end function shuffle_and_repeat_dataset_eager(input_dataset_, buffer_size_, seed_, seed2_, count_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("ShuffleAndRepeatDataset") - input_dataset_ = convert(tf.TensorHandle, input_dataset_) - buffer_size_ = convert(tf.TensorHandle, buffer_size_) - seed_ = convert(tf.TensorHandle, seed_) - seed2_ = convert(tf.TensorHandle, seed2_) - count_ = convert(tf.TensorHandle, count_) + input_dataset_ = convert(tf.EagerTensor, input_dataset_) + buffer_size_ = convert(tf.EagerTensor, buffer_size_) + seed_ = convert(tf.EagerTensor, seed_) + seed2_ = convert(tf.EagerTensor, seed2_) + count_ = convert(tf.EagerTensor, count_) tf.add_input(desc, input_dataset_) tf.add_input(desc, buffer_size_) tf.add_input(desc, seed_) @@ -11324,9 +11324,9 @@ begin end function requantization_range_per_channel_eager(input_, input_min_, input_max_; name=nothing, clip_value_max=nothing) desc = tf.EagerOp("RequantizationRangePerChannel") - input_ = convert(tf.TensorHandle, input_) - input_min_ = convert(tf.TensorHandle, input_min_) - input_max_ = convert(tf.TensorHandle, input_max_) + input_ = convert(tf.EagerTensor, input_) + input_min_ = convert(tf.EagerTensor, input_min_) + input_max_ = convert(tf.EagerTensor, input_max_) tf.add_input(desc, input_) tf.add_input(desc, input_min_) tf.add_input(desc, input_max_) @@ -11374,7 +11374,7 @@ begin end function experimental_unbatch_dataset_eager(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("ExperimentalUnbatchDataset") - input_dataset_ = convert(tf.TensorHandle, input_dataset_) + input_dataset_ = convert(tf.EagerTensor, input_dataset_) tf.add_input(desc, input_dataset_) if output_types !== nothing desc["output_types"] = map(Base.identity, output_types) @@ -11431,8 +11431,8 @@ begin end function avg_pool3d_grad_eager(orig_input_shape_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) desc = tf.EagerOp("AvgPool3DGrad") - orig_input_shape_ = convert(tf.TensorHandle, orig_input_shape_) - grad_ = convert(tf.TensorHandle, grad_) + orig_input_shape_ = convert(tf.EagerTensor, orig_input_shape_) + grad_ = convert(tf.EagerTensor, grad_) tf.add_input(desc, orig_input_shape_) tf.add_input(desc, grad_) if ksize !== nothing @@ -11489,7 +11489,7 @@ begin end function placeholder_with_default_eager(input_; name=nothing, dtype=nothing, shape=nothing) desc = tf.EagerOp("PlaceholderWithDefault") - input_ = convert(tf.TensorHandle, input_) + input_ = convert(tf.EagerTensor, input_) tf.add_input(desc, input_) if dtype !== nothing desc["dtype"] = Base.identity(dtype) @@ -11538,9 +11538,9 @@ begin end function initialize_table_v2_eager(table_handle_, keys_, values_; name=nothing) desc = tf.EagerOp("InitializeTableV2") - table_handle_ = convert(tf.TensorHandle, table_handle_) - keys_ = convert(tf.TensorHandle, keys_) - values_ = convert(tf.TensorHandle, values_) + table_handle_ = convert(tf.EagerTensor, table_handle_) + keys_ = convert(tf.EagerTensor, keys_) + values_ = convert(tf.EagerTensor, values_) tf.add_input(desc, table_handle_) tf.add_input(desc, keys_) tf.add_input(desc, values_) @@ -11588,9 +11588,9 @@ begin end function set_size_eager(set_indices_, set_values_, set_shape_; name=nothing, validate_indices=nothing) desc = tf.EagerOp("SetSize") - set_indices_ = convert(tf.TensorHandle, set_indices_) - set_values_ = convert(tf.TensorHandle, set_values_) - set_shape_ = convert(tf.TensorHandle, set_shape_) + set_indices_ = convert(tf.EagerTensor, set_indices_) + set_values_ = convert(tf.EagerTensor, set_values_) + set_shape_ = convert(tf.EagerTensor, set_shape_) tf.add_input(desc, set_indices_) tf.add_input(desc, set_values_) tf.add_input(desc, set_shape_) @@ -11640,8 +11640,8 @@ begin end function assert_eager(condition_, data_; name=nothing, T=nothing, summarize=nothing) desc = tf.EagerOp("Assert") - condition_ = convert(tf.TensorHandle, condition_) - data_ = convert(tf.TensorHandle, data_) + condition_ = convert(tf.EagerTensor, condition_) + data_ = convert(tf.EagerTensor, data_) tf.add_input(desc, condition_) tf.add_input(desc, data_) if T !== nothing @@ -11701,11 +11701,11 @@ begin end function non_max_suppression_v4_eager(boxes_, scores_, max_output_size_, iou_threshold_, score_threshold_; name=nothing, pad_to_max_output_size=nothing) desc = tf.EagerOp("NonMaxSuppressionV4") - boxes_ = convert(tf.TensorHandle, boxes_) - scores_ = convert(tf.TensorHandle, scores_) - max_output_size_ = convert(tf.TensorHandle, max_output_size_) - iou_threshold_ = convert(tf.TensorHandle, iou_threshold_) - score_threshold_ = convert(tf.TensorHandle, score_threshold_) + boxes_ = convert(tf.EagerTensor, boxes_) + scores_ = convert(tf.EagerTensor, scores_) + max_output_size_ = convert(tf.EagerTensor, max_output_size_) + iou_threshold_ = convert(tf.EagerTensor, iou_threshold_) + score_threshold_ = convert(tf.EagerTensor, score_threshold_) tf.add_input(desc, boxes_) tf.add_input(desc, scores_) tf.add_input(desc, max_output_size_) @@ -11778,9 +11778,9 @@ begin end function sample_distorted_bounding_box_v2_eager(image_size_, bounding_boxes_, min_object_covered_; name=nothing, seed=nothing, seed2=nothing, aspect_ratio_range=nothing, area_range=nothing, max_attempts=nothing, use_image_if_no_bounding_boxes=nothing) desc = tf.EagerOp("SampleDistortedBoundingBoxV2") - image_size_ = convert(tf.TensorHandle, image_size_) - bounding_boxes_ = convert(tf.TensorHandle, bounding_boxes_) - min_object_covered_ = convert(tf.TensorHandle, min_object_covered_) + image_size_ = convert(tf.EagerTensor, image_size_) + bounding_boxes_ = convert(tf.EagerTensor, bounding_boxes_) + min_object_covered_ = convert(tf.EagerTensor, min_object_covered_) tf.add_input(desc, image_size_) tf.add_input(desc, bounding_boxes_) tf.add_input(desc, min_object_covered_) @@ -11851,8 +11851,8 @@ begin end function initialize_table_from_text_file_eager(table_handle_, filename_; name=nothing, key_index=nothing, value_index=nothing, vocab_size=nothing, delimiter=nothing) desc = tf.EagerOp("InitializeTableFromTextFile") - table_handle_ = convert(tf.TensorHandle, table_handle_) - filename_ = convert(tf.TensorHandle, filename_) + table_handle_ = convert(tf.EagerTensor, table_handle_) + filename_ = convert(tf.EagerTensor, filename_) tf.add_input(desc, table_handle_) tf.add_input(desc, filename_) if key_index !== nothing @@ -11901,7 +11901,7 @@ begin end function lookup_table_size_eager(table_handle_; name=nothing) desc = tf.EagerOp("LookupTableSize") - table_handle_ = convert(tf.TensorHandle, table_handle_) + table_handle_ = convert(tf.EagerTensor, table_handle_) tf.add_input(desc, table_handle_) res = tf.execute(desc) node = tf.TapeNode(lookup_table_size, [table_handle_], name=nothing, res) @@ -11959,15 +11959,15 @@ begin end function sparse_apply_adagrad_da_eager(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, indices_, lr_, l1_, l2_, global_step_; name=nothing, use_locking=nothing) desc = tf.EagerOp("SparseApplyAdagradDA") - var_ = convert(tf.TensorHandle, var_) - gradient_accumulator_ = convert(tf.TensorHandle, gradient_accumulator_) - gradient_squared_accumulator_ = convert(tf.TensorHandle, gradient_squared_accumulator_) - grad_ = convert(tf.TensorHandle, grad_) - indices_ = convert(tf.TensorHandle, indices_) - lr_ = convert(tf.TensorHandle, lr_) - l1_ = convert(tf.TensorHandle, l1_) - l2_ = convert(tf.TensorHandle, l2_) - global_step_ = convert(tf.TensorHandle, global_step_) + var_ = convert(tf.EagerTensor, var_) + gradient_accumulator_ = convert(tf.EagerTensor, gradient_accumulator_) + gradient_squared_accumulator_ = convert(tf.EagerTensor, gradient_squared_accumulator_) + grad_ = convert(tf.EagerTensor, grad_) + indices_ = convert(tf.EagerTensor, indices_) + lr_ = convert(tf.EagerTensor, lr_) + l1_ = convert(tf.EagerTensor, l1_) + l2_ = convert(tf.EagerTensor, l2_) + global_step_ = convert(tf.EagerTensor, global_step_) tf.add_input(desc, var_) tf.add_input(desc, gradient_accumulator_) tf.add_input(desc, gradient_squared_accumulator_) @@ -12030,8 +12030,8 @@ begin end function broadcast_gradient_args_eager(s0_, s1_; name=nothing) desc = tf.EagerOp("BroadcastGradientArgs") - s0_ = convert(tf.TensorHandle, s0_) - s1_ = convert(tf.TensorHandle, s1_) + s0_ = convert(tf.EagerTensor, s0_) + s1_ = convert(tf.EagerTensor, s1_) tf.add_input(desc, s0_) tf.add_input(desc, s1_) desc["T"] = tf.data_type(s0_) @@ -12172,7 +12172,7 @@ begin end function _while_eager(input_; name=nothing, T=nothing, cond=nothing, body=nothing) desc = tf.EagerOp("_While") - input_ = convert(tf.TensorHandle, input_) + input_ = convert(tf.EagerTensor, input_) tf.add_input(desc, input_) if T !== nothing desc["T"] = map(Base.identity, T) @@ -12223,9 +12223,9 @@ begin end function initialize_table_eager(table_handle_, keys_, values_; name=nothing) desc = tf.EagerOp("InitializeTable") - table_handle_ = convert(tf.TensorHandle, table_handle_) - keys_ = convert(tf.TensorHandle, keys_) - values_ = convert(tf.TensorHandle, values_) + table_handle_ = convert(tf.EagerTensor, table_handle_) + keys_ = convert(tf.EagerTensor, keys_) + values_ = convert(tf.EagerTensor, values_) tf.add_input(desc, table_handle_) tf.add_input(desc, keys_) tf.add_input(desc, values_) @@ -12287,7 +12287,7 @@ begin end function debug_numeric_summary_eager(input_; name=nothing, device_name=nothing, tensor_name=nothing, debug_urls=nothing, lower_bound=nothing, upper_bound=nothing, mute_if_healthy=nothing, gated_grpc=nothing) desc = tf.EagerOp("DebugNumericSummary") - input_ = convert(tf.TensorHandle, input_) + input_ = convert(tf.EagerTensor, input_) tf.add_input(desc, input_) if device_name !== nothing desc["device_name"] = Base.String(device_name) @@ -12407,7 +12407,7 @@ begin end function tanh_eager(x_; name=nothing) desc = tf.EagerOp("Tanh") - x_ = convert(tf.TensorHandle, x_) + x_ = convert(tf.EagerTensor, x_) tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) res = tf.execute(desc) @@ -12453,7 +12453,7 @@ begin end function symbolic_gradient_eager(input_; name=nothing, Tin=nothing, Tout=nothing, f=nothing) desc = tf.EagerOp("SymbolicGradient") - input_ = convert(tf.TensorHandle, input_) + input_ = convert(tf.EagerTensor, input_) tf.add_input(desc, input_) if Tin !== nothing desc["Tin"] = map(Base.identity, Tin) @@ -12520,15 +12520,15 @@ begin end function boosted_trees_update_ensemble_eager(tree_ensemble_handle_, feature_ids_, node_ids_, gains_, thresholds_, left_node_contribs_, right_node_contribs_, max_depth_, learning_rate_; name=nothing, pruning_mode=nothing, num_features=nothing) desc = tf.EagerOp("BoostedTreesUpdateEnsemble") - tree_ensemble_handle_ = convert(tf.TensorHandle, tree_ensemble_handle_) - feature_ids_ = convert(tf.TensorHandle, feature_ids_) - node_ids_ = convert(tf.TensorHandle, node_ids_) - gains_ = convert(tf.TensorHandle, gains_) - thresholds_ = convert(tf.TensorHandle, thresholds_) - left_node_contribs_ = convert(tf.TensorHandle, left_node_contribs_) - right_node_contribs_ = convert(tf.TensorHandle, right_node_contribs_) - max_depth_ = convert(tf.TensorHandle, max_depth_) - learning_rate_ = convert(tf.TensorHandle, learning_rate_) + tree_ensemble_handle_ = convert(tf.EagerTensor, tree_ensemble_handle_) + feature_ids_ = convert(tf.EagerTensor, feature_ids_) + node_ids_ = convert(tf.EagerTensor, node_ids_) + gains_ = convert(tf.EagerTensor, gains_) + thresholds_ = convert(tf.EagerTensor, thresholds_) + left_node_contribs_ = convert(tf.EagerTensor, left_node_contribs_) + right_node_contribs_ = convert(tf.EagerTensor, right_node_contribs_) + max_depth_ = convert(tf.EagerTensor, max_depth_) + learning_rate_ = convert(tf.EagerTensor, learning_rate_) tf.add_input(desc, tree_ensemble_handle_) tf.add_input(desc, feature_ids_) tf.add_input(desc, node_ids_) @@ -12593,11 +12593,11 @@ begin end function apply_momentum_eager(var_, accum_, lr_, grad_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) desc = tf.EagerOp("ApplyMomentum") - var_ = convert(tf.TensorHandle, var_) - accum_ = convert(tf.TensorHandle, accum_) - lr_ = convert(tf.TensorHandle, lr_) - grad_ = convert(tf.TensorHandle, grad_) - momentum_ = convert(tf.TensorHandle, momentum_) + var_ = convert(tf.EagerTensor, var_) + accum_ = convert(tf.EagerTensor, accum_) + lr_ = convert(tf.EagerTensor, lr_) + grad_ = convert(tf.EagerTensor, grad_) + momentum_ = convert(tf.EagerTensor, momentum_) tf.add_input(desc, var_) tf.add_input(desc, accum_) tf.add_input(desc, lr_) @@ -12655,8 +12655,8 @@ begin end function reader_read_eager(reader_handle_, queue_handle_; name=nothing) desc = tf.EagerOp("ReaderRead") - reader_handle_ = convert(tf.TensorHandle, reader_handle_) - queue_handle_ = convert(tf.TensorHandle, queue_handle_) + reader_handle_ = convert(tf.EagerTensor, reader_handle_) + queue_handle_ = convert(tf.EagerTensor, queue_handle_) tf.add_input(desc, reader_handle_) tf.add_input(desc, queue_handle_) res = tf.execute(desc) @@ -12699,7 +12699,7 @@ begin end function _wait_for_distributed_tpu_eager(inputs_; name=nothing, startup_timeout_sec=nothing, N=nothing) desc = tf.EagerOp("_WaitForDistributedTPU") - inputs_ = convert(tf.TensorHandle, inputs_) + inputs_ = convert(tf.EagerTensor, inputs_) tf.add_input(desc, inputs_) if startup_timeout_sec !== nothing desc["startup_timeout_sec"] = Base.Int(startup_timeout_sec) @@ -12741,7 +12741,7 @@ begin end function mutex_lock_eager(mutex_; name=nothing) desc = tf.EagerOp("MutexLock") - mutex_ = convert(tf.TensorHandle, mutex_) + mutex_ = convert(tf.EagerTensor, mutex_) tf.add_input(desc, mutex_) res = tf.execute(desc) node = tf.TapeNode(mutex_lock, [mutex_], name=nothing, res) @@ -12779,8 +12779,8 @@ begin end function accumulator_set_global_step_eager(handle_, new_global_step_; name=nothing) desc = tf.EagerOp("AccumulatorSetGlobalStep") - handle_ = convert(tf.TensorHandle, handle_) - new_global_step_ = convert(tf.TensorHandle, new_global_step_) + handle_ = convert(tf.EagerTensor, handle_) + new_global_step_ = convert(tf.EagerTensor, new_global_step_) tf.add_input(desc, handle_) tf.add_input(desc, new_global_step_) res = tf.execute(desc) @@ -12834,12 +12834,12 @@ begin end function quantized_add_eager(x_, y_, min_x_, max_x_, min_y_, max_y_; name=nothing) desc = tf.EagerOp("QuantizedAdd") - x_ = convert(tf.TensorHandle, x_) - y_ = convert(tf.TensorHandle, y_) - min_x_ = convert(tf.TensorHandle, min_x_) - max_x_ = convert(tf.TensorHandle, max_x_) - min_y_ = convert(tf.TensorHandle, min_y_) - max_y_ = convert(tf.TensorHandle, max_y_) + x_ = convert(tf.EagerTensor, x_) + y_ = convert(tf.EagerTensor, y_) + min_x_ = convert(tf.EagerTensor, min_x_) + max_x_ = convert(tf.EagerTensor, max_x_) + min_y_ = convert(tf.EagerTensor, min_y_) + max_y_ = convert(tf.EagerTensor, max_y_) tf.add_input(desc, x_) tf.add_input(desc, y_) tf.add_input(desc, min_x_) @@ -12886,7 +12886,7 @@ begin end function squeeze_eager(input_; name=nothing, squeeze_dims=nothing) desc = tf.EagerOp("Squeeze") - input_ = convert(tf.TensorHandle, input_) + input_ = convert(tf.EagerTensor, input_) tf.add_input(desc, input_) if squeeze_dims !== nothing desc["squeeze_dims"] = map(Base.identity, squeeze_dims) @@ -12926,7 +12926,7 @@ begin end function experimental_matching_files_dataset_eager(patterns_; name=nothing) desc = tf.EagerOp("ExperimentalMatchingFilesDataset") - patterns_ = convert(tf.TensorHandle, patterns_) + patterns_ = convert(tf.EagerTensor, patterns_) tf.add_input(desc, patterns_) res = tf.execute(desc) node = tf.TapeNode(experimental_matching_files_dataset, [patterns_], name=nothing, res) @@ -12966,9 +12966,9 @@ begin end function experimental_dataset_to_tf_record_eager(input_dataset_, filename_, compression_type_; name=nothing) desc = tf.EagerOp("ExperimentalDatasetToTFRecord") - input_dataset_ = convert(tf.TensorHandle, input_dataset_) - filename_ = convert(tf.TensorHandle, filename_) - compression_type_ = convert(tf.TensorHandle, compression_type_) + input_dataset_ = convert(tf.EagerTensor, input_dataset_) + filename_ = convert(tf.EagerTensor, filename_) + compression_type_ = convert(tf.EagerTensor, compression_type_) tf.add_input(desc, input_dataset_) tf.add_input(desc, filename_) tf.add_input(desc, compression_type_) @@ -13018,7 +13018,7 @@ begin end function load_tpu_embedding_stochastic_gradient_descent_parameters_eager(parameters_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) desc = tf.EagerOp("LoadTPUEmbeddingStochasticGradientDescentParameters") - parameters_ = convert(tf.TensorHandle, parameters_) + parameters_ = convert(tf.EagerTensor, parameters_) tf.add_input(desc, parameters_) if table_id !== nothing desc["table_id"] = Base.Int(table_id) @@ -13108,7 +13108,7 @@ begin end function zip_dataset_eager(input_datasets_; name=nothing, output_types=nothing, output_shapes=nothing, N=nothing) desc = tf.EagerOp("ZipDataset") - input_datasets_ = convert(tf.TensorHandle, input_datasets_) + input_datasets_ = convert(tf.EagerTensor, input_datasets_) tf.add_input(desc, input_datasets_) if output_types !== nothing desc["output_types"] = map(Base.identity, output_types) @@ -13251,7 +13251,7 @@ begin end function nccl_all_reduce_eager(input_; name=nothing, reduction=nothing, num_devices=nothing, shared_name=nothing) desc = tf.EagerOp("NcclAllReduce") - input_ = convert(tf.TensorHandle, input_) + input_ = convert(tf.EagerTensor, input_) tf.add_input(desc, input_) if reduction !== nothing desc["reduction"] = Base.String(reduction) @@ -13301,9 +13301,9 @@ begin end function text_line_dataset_eager(filenames_, compression_type_, buffer_size_; name=nothing) desc = tf.EagerOp("TextLineDataset") - filenames_ = convert(tf.TensorHandle, filenames_) - compression_type_ = convert(tf.TensorHandle, compression_type_) - buffer_size_ = convert(tf.TensorHandle, buffer_size_) + filenames_ = convert(tf.EagerTensor, filenames_) + compression_type_ = convert(tf.EagerTensor, compression_type_) + buffer_size_ = convert(tf.EagerTensor, buffer_size_) tf.add_input(desc, filenames_) tf.add_input(desc, compression_type_) tf.add_input(desc, buffer_size_) @@ -13350,7 +13350,7 @@ begin end function sdca_shrink_l1_eager(weights_; name=nothing, num_features=nothing, l1=nothing, l2=nothing) desc = tf.EagerOp("SdcaShrinkL1") - weights_ = convert(tf.TensorHandle, weights_) + weights_ = convert(tf.EagerTensor, weights_) tf.add_input(desc, weights_) if num_features !== nothing desc["num_features"] = Base.Int(num_features) @@ -13451,7 +13451,7 @@ begin end function multi_device_iterator_from_string_handle_eager(string_handle_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("MultiDeviceIteratorFromStringHandle") - string_handle_ = convert(tf.TensorHandle, string_handle_) + string_handle_ = convert(tf.EagerTensor, string_handle_) tf.add_input(desc, string_handle_) if output_types !== nothing desc["output_types"] = map(Base.identity, output_types) @@ -13510,11 +13510,11 @@ begin end function padded_batch_dataset_v2_eager(input_dataset_, batch_size_, padded_shapes_, padding_values_, drop_remainder_; name=nothing, Toutput_types=nothing, output_shapes=nothing, N=nothing) desc = tf.EagerOp("PaddedBatchDatasetV2") - input_dataset_ = convert(tf.TensorHandle, input_dataset_) - batch_size_ = convert(tf.TensorHandle, batch_size_) - padded_shapes_ = convert(tf.TensorHandle, padded_shapes_) - padding_values_ = convert(tf.TensorHandle, padding_values_) - drop_remainder_ = convert(tf.TensorHandle, drop_remainder_) + input_dataset_ = convert(tf.EagerTensor, input_dataset_) + batch_size_ = convert(tf.EagerTensor, batch_size_) + padded_shapes_ = convert(tf.EagerTensor, padded_shapes_) + padding_values_ = convert(tf.EagerTensor, padding_values_) + drop_remainder_ = convert(tf.EagerTensor, drop_remainder_) tf.add_input(desc, input_dataset_) tf.add_input(desc, batch_size_) tf.add_input(desc, padded_shapes_) @@ -13577,8 +13577,8 @@ begin end function load_tpu_embedding_proximal_adagrad_parameters_eager(parameters_, accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) desc = tf.EagerOp("LoadTPUEmbeddingProximalAdagradParameters") - parameters_ = convert(tf.TensorHandle, parameters_) - accumulators_ = convert(tf.TensorHandle, accumulators_) + parameters_ = convert(tf.EagerTensor, parameters_) + accumulators_ = convert(tf.EagerTensor, accumulators_) tf.add_input(desc, parameters_) tf.add_input(desc, accumulators_) if table_id !== nothing @@ -13629,8 +13629,8 @@ begin end function tensor_array_size_eager(handle_, flow_in_; name=nothing) desc = tf.EagerOp("TensorArraySize") - handle_ = convert(tf.TensorHandle, handle_) - flow_in_ = convert(tf.TensorHandle, flow_in_) + handle_ = convert(tf.EagerTensor, handle_) + flow_in_ = convert(tf.EagerTensor, flow_in_) tf.add_input(desc, handle_) tf.add_input(desc, flow_in_) res = tf.execute(desc) @@ -13736,8 +13736,8 @@ begin end function stateless_random_uniform_eager(shape_, seed_; name=nothing, dtype=nothing) desc = tf.EagerOp("StatelessRandomUniform") - shape_ = convert(tf.TensorHandle, shape_) - seed_ = convert(tf.TensorHandle, seed_) + shape_ = convert(tf.EagerTensor, shape_) + seed_ = convert(tf.EagerTensor, seed_) tf.add_input(desc, shape_) tf.add_input(desc, seed_) if dtype !== nothing @@ -13801,12 +13801,12 @@ begin end function sparse_to_sparse_set_operation_eager(set1_indices_, set1_values_, set1_shape_, set2_indices_, set2_values_, set2_shape_; name=nothing, set_operation=nothing, validate_indices=nothing) desc = tf.EagerOp("SparseToSparseSetOperation") - set1_indices_ = convert(tf.TensorHandle, set1_indices_) - set1_values_ = convert(tf.TensorHandle, set1_values_) - set1_shape_ = convert(tf.TensorHandle, set1_shape_) - set2_indices_ = convert(tf.TensorHandle, set2_indices_) - set2_values_ = convert(tf.TensorHandle, set2_values_) - set2_shape_ = convert(tf.TensorHandle, set2_shape_) + set1_indices_ = convert(tf.EagerTensor, set1_indices_) + set1_values_ = convert(tf.EagerTensor, set1_values_) + set1_shape_ = convert(tf.EagerTensor, set1_shape_) + set2_indices_ = convert(tf.EagerTensor, set2_indices_) + set2_values_ = convert(tf.EagerTensor, set2_values_) + set2_shape_ = convert(tf.EagerTensor, set2_shape_) tf.add_input(desc, set1_indices_) tf.add_input(desc, set1_values_) tf.add_input(desc, set1_shape_) @@ -13865,7 +13865,7 @@ begin end function tensor_summary_eager(tensor_; name=nothing, description=nothing, labels=nothing, display_name=nothing) desc = tf.EagerOp("TensorSummary") - tensor_ = convert(tf.TensorHandle, tensor_) + tensor_ = convert(tf.EagerTensor, tensor_) tf.add_input(desc, tensor_) if description !== nothing desc["description"] = Base.String(description) @@ -13920,7 +13920,7 @@ begin end function remote_fused_graph_execute_eager(inputs_; name=nothing, Tinputs=nothing, Toutputs=nothing, serialized_remote_fused_graph_execute_info=nothing) desc = tf.EagerOp("RemoteFusedGraphExecute") - inputs_ = convert(tf.TensorHandle, inputs_) + inputs_ = convert(tf.EagerTensor, inputs_) tf.add_input(desc, inputs_) if Tinputs !== nothing desc["Tinputs"] = map(Base.identity, Tinputs) @@ -13972,10 +13972,10 @@ begin end function sparse_slice_grad_eager(backprop_val_grad_, input_indices_, input_start_, output_indices_; name=nothing) desc = tf.EagerOp("SparseSliceGrad") - backprop_val_grad_ = convert(tf.TensorHandle, backprop_val_grad_) - input_indices_ = convert(tf.TensorHandle, input_indices_) - input_start_ = convert(tf.TensorHandle, input_start_) - output_indices_ = convert(tf.TensorHandle, output_indices_) + backprop_val_grad_ = convert(tf.EagerTensor, backprop_val_grad_) + input_indices_ = convert(tf.EagerTensor, input_indices_) + input_start_ = convert(tf.EagerTensor, input_start_) + output_indices_ = convert(tf.EagerTensor, output_indices_) tf.add_input(desc, backprop_val_grad_) tf.add_input(desc, input_indices_) tf.add_input(desc, input_start_) @@ -14026,8 +14026,8 @@ begin end function cumsum_eager(x_, axis_; name=nothing, exclusive=nothing, reverse=nothing) desc = tf.EagerOp("Cumsum") - x_ = convert(tf.TensorHandle, x_) - axis_ = convert(tf.TensorHandle, axis_) + x_ = convert(tf.EagerTensor, x_) + axis_ = convert(tf.EagerTensor, axis_) tf.add_input(desc, x_) tf.add_input(desc, axis_) if exclusive !== nothing @@ -14092,11 +14092,11 @@ begin end function batch_norm_with_global_normalization_grad_eager(t_, m_, v_, gamma_, backprop_; name=nothing, variance_epsilon=nothing, scale_after_normalization=nothing) desc = tf.EagerOp("BatchNormWithGlobalNormalizationGrad") - t_ = convert(tf.TensorHandle, t_) - m_ = convert(tf.TensorHandle, m_) - v_ = convert(tf.TensorHandle, v_) - gamma_ = convert(tf.TensorHandle, gamma_) - backprop_ = convert(tf.TensorHandle, backprop_) + t_ = convert(tf.EagerTensor, t_) + m_ = convert(tf.EagerTensor, m_) + v_ = convert(tf.EagerTensor, v_) + gamma_ = convert(tf.EagerTensor, gamma_) + backprop_ = convert(tf.EagerTensor, backprop_) tf.add_input(desc, t_) tf.add_input(desc, m_) tf.add_input(desc, v_) @@ -14162,8 +14162,8 @@ begin end function avg_pool_grad_eager(orig_input_shape_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) desc = tf.EagerOp("AvgPoolGrad") - orig_input_shape_ = convert(tf.TensorHandle, orig_input_shape_) - grad_ = convert(tf.TensorHandle, grad_) + orig_input_shape_ = convert(tf.EagerTensor, orig_input_shape_) + grad_ = convert(tf.EagerTensor, grad_) tf.add_input(desc, orig_input_shape_) tf.add_input(desc, grad_) if ksize !== nothing @@ -14220,9 +14220,9 @@ begin end function restore_v2_eager(prefix_, tensor_names_, shape_and_slices_; name=nothing, dtypes=nothing) desc = tf.EagerOp("RestoreV2") - prefix_ = convert(tf.TensorHandle, prefix_) - tensor_names_ = convert(tf.TensorHandle, tensor_names_) - shape_and_slices_ = convert(tf.TensorHandle, shape_and_slices_) + prefix_ = convert(tf.EagerTensor, prefix_) + tensor_names_ = convert(tf.EagerTensor, tensor_names_) + shape_and_slices_ = convert(tf.EagerTensor, shape_and_slices_) tf.add_input(desc, prefix_) tf.add_input(desc, tensor_names_) tf.add_input(desc, shape_and_slices_) @@ -14264,7 +14264,7 @@ begin end function relu6_eager(features_; name=nothing) desc = tf.EagerOp("Relu6") - features_ = convert(tf.TensorHandle, features_) + features_ = convert(tf.EagerTensor, features_) tf.add_input(desc, features_) desc["T"] = tf.data_type(features_) res = tf.execute(desc) @@ -14323,15 +14323,15 @@ begin end function sparse_apply_rms_prop_eager(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) desc = tf.EagerOp("SparseApplyRMSProp") - var_ = convert(tf.TensorHandle, var_) - ms_ = convert(tf.TensorHandle, ms_) - mom_ = convert(tf.TensorHandle, mom_) - lr_ = convert(tf.TensorHandle, lr_) - rho_ = convert(tf.TensorHandle, rho_) - momentum_ = convert(tf.TensorHandle, momentum_) - epsilon_ = convert(tf.TensorHandle, epsilon_) - grad_ = convert(tf.TensorHandle, grad_) - indices_ = convert(tf.TensorHandle, indices_) + var_ = convert(tf.EagerTensor, var_) + ms_ = convert(tf.EagerTensor, ms_) + mom_ = convert(tf.EagerTensor, mom_) + lr_ = convert(tf.EagerTensor, lr_) + rho_ = convert(tf.EagerTensor, rho_) + momentum_ = convert(tf.EagerTensor, momentum_) + epsilon_ = convert(tf.EagerTensor, epsilon_) + grad_ = convert(tf.EagerTensor, grad_) + indices_ = convert(tf.EagerTensor, indices_) tf.add_input(desc, var_) tf.add_input(desc, ms_) tf.add_input(desc, mom_) @@ -14468,7 +14468,7 @@ begin end function max_pool_eager(input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) desc = tf.EagerOp("MaxPool") - input_ = convert(tf.TensorHandle, input_) + input_ = convert(tf.EagerTensor, input_) tf.add_input(desc, input_) if ksize !== nothing desc["ksize"] = map(Base.identity, ksize) @@ -14518,7 +14518,7 @@ begin end function invert_eager(x_; name=nothing) desc = tf.EagerOp("Invert") - x_ = convert(tf.TensorHandle, x_) + x_ = convert(tf.EagerTensor, x_) tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) res = tf.execute(desc) @@ -14559,7 +14559,7 @@ begin end function _unary_ops_composition_eager(x_; name=nothing, op_names=nothing) desc = tf.EagerOp("_UnaryOpsComposition") - x_ = convert(tf.TensorHandle, x_) + x_ = convert(tf.EagerTensor, x_) tf.add_input(desc, x_) if op_names !== nothing desc["op_names"] = map(Base.identity, op_names) @@ -14619,8 +14619,8 @@ begin end function experimental_map_dataset_eager(input_dataset_, other_arguments_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing, preserve_cardinality=nothing) desc = tf.EagerOp("ExperimentalMapDataset") - input_dataset_ = convert(tf.TensorHandle, input_dataset_) - other_arguments_ = convert(tf.TensorHandle, other_arguments_) + input_dataset_ = convert(tf.EagerTensor, input_dataset_) + other_arguments_ = convert(tf.EagerTensor, other_arguments_) tf.add_input(desc, input_dataset_) tf.add_input(desc, other_arguments_) if f !== nothing @@ -14691,9 +14691,9 @@ begin end function load_tpu_embedding_adam_parameters_eager(parameters_, momenta_, velocities_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) desc = tf.EagerOp("LoadTPUEmbeddingADAMParameters") - parameters_ = convert(tf.TensorHandle, parameters_) - momenta_ = convert(tf.TensorHandle, momenta_) - velocities_ = convert(tf.TensorHandle, velocities_) + parameters_ = convert(tf.EagerTensor, parameters_) + momenta_ = convert(tf.EagerTensor, momenta_) + velocities_ = convert(tf.EagerTensor, velocities_) tf.add_input(desc, parameters_) tf.add_input(desc, momenta_) tf.add_input(desc, velocities_) @@ -14746,7 +14746,7 @@ begin end function parse_tensor_eager(serialized_; name=nothing, out_type=nothing) desc = tf.EagerOp("ParseTensor") - serialized_ = convert(tf.TensorHandle, serialized_) + serialized_ = convert(tf.EagerTensor, serialized_) tf.add_input(desc, serialized_) if out_type !== nothing desc["out_type"] = Base.identity(out_type) @@ -14851,9 +14851,9 @@ begin end function multi_device_iterator_get_next_from_shard_eager(multi_device_iterator_, shard_num_, incarnation_id_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("MultiDeviceIteratorGetNextFromShard") - multi_device_iterator_ = convert(tf.TensorHandle, multi_device_iterator_) - shard_num_ = convert(tf.TensorHandle, shard_num_) - incarnation_id_ = convert(tf.TensorHandle, incarnation_id_) + multi_device_iterator_ = convert(tf.EagerTensor, multi_device_iterator_) + shard_num_ = convert(tf.EagerTensor, shard_num_) + incarnation_id_ = convert(tf.EagerTensor, incarnation_id_) tf.add_input(desc, multi_device_iterator_) tf.add_input(desc, shard_num_) tf.add_input(desc, incarnation_id_) @@ -14909,9 +14909,9 @@ begin end function random_uniform_int_eager(shape_, minval_, maxval_; name=nothing, seed=nothing, seed2=nothing) desc = tf.EagerOp("RandomUniformInt") - shape_ = convert(tf.TensorHandle, shape_) - minval_ = convert(tf.TensorHandle, minval_) - maxval_ = convert(tf.TensorHandle, maxval_) + shape_ = convert(tf.EagerTensor, shape_) + minval_ = convert(tf.EagerTensor, minval_) + maxval_ = convert(tf.EagerTensor, maxval_) tf.add_input(desc, shape_) tf.add_input(desc, minval_) tf.add_input(desc, maxval_) @@ -14967,8 +14967,8 @@ begin end function sparse_softmax_cross_entropy_with_logits_eager(features_, labels_; name=nothing) desc = tf.EagerOp("SparseSoftmaxCrossEntropyWithLogits") - features_ = convert(tf.TensorHandle, features_) - labels_ = convert(tf.TensorHandle, labels_) + features_ = convert(tf.EagerTensor, features_) + labels_ = convert(tf.EagerTensor, labels_) tf.add_input(desc, features_) tf.add_input(desc, labels_) desc["T"] = tf.data_type(features_) @@ -15014,9 +15014,9 @@ begin end function tensor_array_read_v2_eager(handle_, index_, flow_in_; name=nothing, dtype=nothing) desc = tf.EagerOp("TensorArrayReadV2") - handle_ = convert(tf.TensorHandle, handle_) - index_ = convert(tf.TensorHandle, index_) - flow_in_ = convert(tf.TensorHandle, flow_in_) + handle_ = convert(tf.EagerTensor, handle_) + index_ = convert(tf.EagerTensor, index_) + flow_in_ = convert(tf.EagerTensor, flow_in_) tf.add_input(desc, handle_) tf.add_input(desc, index_) tf.add_input(desc, flow_in_) @@ -15066,9 +15066,9 @@ begin end function reader_read_up_to_eager(reader_handle_, queue_handle_, num_records_; name=nothing) desc = tf.EagerOp("ReaderReadUpTo") - reader_handle_ = convert(tf.TensorHandle, reader_handle_) - queue_handle_ = convert(tf.TensorHandle, queue_handle_) - num_records_ = convert(tf.TensorHandle, num_records_) + reader_handle_ = convert(tf.EagerTensor, reader_handle_) + queue_handle_ = convert(tf.EagerTensor, queue_handle_) + num_records_ = convert(tf.EagerTensor, num_records_) tf.add_input(desc, reader_handle_) tf.add_input(desc, queue_handle_) tf.add_input(desc, num_records_) @@ -15120,8 +15120,8 @@ begin end function encode_proto_eager(sizes_, values_; name=nothing, field_names=nothing, message_type=nothing, descriptor_source=nothing, Tinput_types=nothing) desc = tf.EagerOp("EncodeProto") - sizes_ = convert(tf.TensorHandle, sizes_) - values_ = convert(tf.TensorHandle, values_) + sizes_ = convert(tf.EagerTensor, sizes_) + values_ = convert(tf.EagerTensor, values_) tf.add_input(desc, sizes_) tf.add_input(desc, values_) if field_names !== nothing @@ -15216,11 +15216,11 @@ begin end function strided_slice_grad_eager(shape_, begin_, end_, strides_, dy_; name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing) desc = tf.EagerOp("StridedSliceGrad") - shape_ = convert(tf.TensorHandle, shape_) - begin_ = convert(tf.TensorHandle, begin_) - end_ = convert(tf.TensorHandle, end_) - strides_ = convert(tf.TensorHandle, strides_) - dy_ = convert(tf.TensorHandle, dy_) + shape_ = convert(tf.EagerTensor, shape_) + begin_ = convert(tf.EagerTensor, begin_) + end_ = convert(tf.EagerTensor, end_) + strides_ = convert(tf.EagerTensor, strides_) + dy_ = convert(tf.EagerTensor, dy_) tf.add_input(desc, shape_) tf.add_input(desc, begin_) tf.add_input(desc, end_) @@ -15308,7 +15308,7 @@ begin end function _nccl_reduce_send_eager(input_; name=nothing, reduction=nothing, num_devices=nothing, shared_name=nothing) desc = tf.EagerOp("_NcclReduceSend") - input_ = convert(tf.TensorHandle, input_) + input_ = convert(tf.EagerTensor, input_) tf.add_input(desc, input_) if reduction !== nothing desc["reduction"] = Base.String(reduction) @@ -15369,10 +15369,10 @@ begin end function padded_batch_dataset_eager(input_dataset_, batch_size_, padded_shapes_, padding_values_; name=nothing, Toutput_types=nothing, output_shapes=nothing, N=nothing) desc = tf.EagerOp("PaddedBatchDataset") - input_dataset_ = convert(tf.TensorHandle, input_dataset_) - batch_size_ = convert(tf.TensorHandle, batch_size_) - padded_shapes_ = convert(tf.TensorHandle, padded_shapes_) - padding_values_ = convert(tf.TensorHandle, padding_values_) + input_dataset_ = convert(tf.EagerTensor, input_dataset_) + batch_size_ = convert(tf.EagerTensor, batch_size_) + padded_shapes_ = convert(tf.EagerTensor, padded_shapes_) + padding_values_ = convert(tf.EagerTensor, padding_values_) tf.add_input(desc, input_dataset_) tf.add_input(desc, batch_size_) tf.add_input(desc, padded_shapes_) @@ -15427,7 +15427,7 @@ begin end function data_format_vec_permute_eager(x_; name=nothing, src_format=nothing, dst_format=nothing) desc = tf.EagerOp("DataFormatVecPermute") - x_ = convert(tf.TensorHandle, x_) + x_ = convert(tf.EagerTensor, x_) tf.add_input(desc, x_) if src_format !== nothing desc["src_format"] = Base.String(src_format) @@ -15482,7 +15482,7 @@ begin end function string_format_eager(inputs_; name=nothing, T=nothing, template=nothing, placeholder=nothing, summarize=nothing) desc = tf.EagerOp("StringFormat") - inputs_ = convert(tf.TensorHandle, inputs_) + inputs_ = convert(tf.EagerTensor, inputs_) tf.add_input(desc, inputs_) if T !== nothing desc["T"] = map(Base.identity, T) @@ -15546,7 +15546,7 @@ begin end function as_string_eager(input_; name=nothing, precision=nothing, scientific=nothing, shortest=nothing, width=nothing, fill=nothing) desc = tf.EagerOp("AsString") - input_ = convert(tf.TensorHandle, input_) + input_ = convert(tf.EagerTensor, input_) tf.add_input(desc, input_) if precision !== nothing desc["precision"] = Base.Int(precision) @@ -15606,8 +15606,8 @@ begin end function queue_enqueue_many_eager(handle_, components_; name=nothing, Tcomponents=nothing, timeout_ms=nothing) desc = tf.EagerOp("QueueEnqueueMany") - handle_ = convert(tf.TensorHandle, handle_) - components_ = convert(tf.TensorHandle, components_) + handle_ = convert(tf.EagerTensor, handle_) + components_ = convert(tf.EagerTensor, components_) tf.add_input(desc, handle_) tf.add_input(desc, components_) if Tcomponents !== nothing @@ -15707,10 +15707,10 @@ begin end function apply_adagrad_eager(var_, accum_, lr_, grad_; name=nothing, use_locking=nothing, update_slots=nothing) desc = tf.EagerOp("ApplyAdagrad") - var_ = convert(tf.TensorHandle, var_) - accum_ = convert(tf.TensorHandle, accum_) - lr_ = convert(tf.TensorHandle, lr_) - grad_ = convert(tf.TensorHandle, grad_) + var_ = convert(tf.EagerTensor, var_) + accum_ = convert(tf.EagerTensor, accum_) + lr_ = convert(tf.EagerTensor, lr_) + grad_ = convert(tf.EagerTensor, grad_) tf.add_input(desc, var_) tf.add_input(desc, accum_) tf.add_input(desc, lr_) @@ -15759,7 +15759,7 @@ begin end function experimental_iterator_get_device_eager(resource_; name=nothing) desc = tf.EagerOp("ExperimentalIteratorGetDevice") - resource_ = convert(tf.TensorHandle, resource_) + resource_ = convert(tf.EagerTensor, resource_) tf.add_input(desc, resource_) res = tf.execute(desc) node = tf.TapeNode(experimental_iterator_get_device, [resource_], name=nothing, res) @@ -15802,10 +15802,10 @@ begin end function adjust_contrast_eager(images_, contrast_factor_, min_value_, max_value_; name=nothing) desc = tf.EagerOp("AdjustContrast") - images_ = convert(tf.TensorHandle, images_) - contrast_factor_ = convert(tf.TensorHandle, contrast_factor_) - min_value_ = convert(tf.TensorHandle, min_value_) - max_value_ = convert(tf.TensorHandle, max_value_) + images_ = convert(tf.EagerTensor, images_) + contrast_factor_ = convert(tf.EagerTensor, contrast_factor_) + min_value_ = convert(tf.EagerTensor, min_value_) + max_value_ = convert(tf.EagerTensor, max_value_) tf.add_input(desc, images_) tf.add_input(desc, contrast_factor_) tf.add_input(desc, min_value_) @@ -15858,7 +15858,7 @@ begin end function extract_image_patches_eager(images_; name=nothing, ksizes=nothing, strides=nothing, rates=nothing, padding=nothing) desc = tf.EagerOp("ExtractImagePatches") - images_ = convert(tf.TensorHandle, images_) + images_ = convert(tf.EagerTensor, images_) tf.add_input(desc, images_) if ksizes !== nothing desc["ksizes"] = map(Base.identity, ksizes) @@ -15917,10 +15917,10 @@ begin end function scale_and_translate_eager(images_, size_, scale_, translation_; name=nothing, kernel_type=nothing) desc = tf.EagerOp("ScaleAndTranslate") - images_ = convert(tf.TensorHandle, images_) - size_ = convert(tf.TensorHandle, size_) - scale_ = convert(tf.TensorHandle, scale_) - translation_ = convert(tf.TensorHandle, translation_) + images_ = convert(tf.EagerTensor, images_) + size_ = convert(tf.EagerTensor, size_) + scale_ = convert(tf.EagerTensor, scale_) + translation_ = convert(tf.EagerTensor, translation_) tf.add_input(desc, images_) tf.add_input(desc, size_) tf.add_input(desc, scale_) @@ -16053,7 +16053,7 @@ begin end function elu_eager(features_; name=nothing) desc = tf.EagerOp("Elu") - features_ = convert(tf.TensorHandle, features_) + features_ = convert(tf.EagerTensor, features_) tf.add_input(desc, features_) desc["T"] = tf.data_type(features_) res = tf.execute(desc) @@ -16100,9 +16100,9 @@ begin end function scatter_update_eager(ref_, indices_, updates_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ScatterUpdate") - ref_ = convert(tf.TensorHandle, ref_) - indices_ = convert(tf.TensorHandle, indices_) - updates_ = convert(tf.TensorHandle, updates_) + ref_ = convert(tf.EagerTensor, ref_) + indices_ = convert(tf.EagerTensor, indices_) + updates_ = convert(tf.EagerTensor, updates_) tf.add_input(desc, ref_) tf.add_input(desc, indices_) tf.add_input(desc, updates_) @@ -16149,8 +16149,8 @@ begin end function floor_mod_eager(x_, y_; name=nothing) desc = tf.EagerOp("FloorMod") - x_ = convert(tf.TensorHandle, x_) - y_ = convert(tf.TensorHandle, y_) + x_ = convert(tf.EagerTensor, x_) + y_ = convert(tf.EagerTensor, y_) tf.add_input(desc, x_) tf.add_input(desc, y_) desc["T"] = tf.data_type(x_) @@ -16195,7 +16195,7 @@ begin end function experimental_ignore_errors_dataset_eager(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("ExperimentalIgnoreErrorsDataset") - input_dataset_ = convert(tf.TensorHandle, input_dataset_) + input_dataset_ = convert(tf.EagerTensor, input_dataset_) tf.add_input(desc, input_dataset_) if output_types !== nothing desc["output_types"] = map(Base.identity, output_types) @@ -16249,10 +16249,10 @@ begin end function experimental_set_stats_aggregator_dataset_eager(input_dataset_, stats_aggregator_, tag_, counter_prefix_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("ExperimentalSetStatsAggregatorDataset") - input_dataset_ = convert(tf.TensorHandle, input_dataset_) - stats_aggregator_ = convert(tf.TensorHandle, stats_aggregator_) - tag_ = convert(tf.TensorHandle, tag_) - counter_prefix_ = convert(tf.TensorHandle, counter_prefix_) + input_dataset_ = convert(tf.EagerTensor, input_dataset_) + stats_aggregator_ = convert(tf.EagerTensor, stats_aggregator_) + tag_ = convert(tf.EagerTensor, tag_) + counter_prefix_ = convert(tf.EagerTensor, counter_prefix_) tf.add_input(desc, input_dataset_) tf.add_input(desc, stats_aggregator_) tf.add_input(desc, tag_) @@ -16313,8 +16313,8 @@ begin end function compute_accidental_hits_eager(true_classes_, sampled_candidates_; name=nothing, num_true=nothing, seed=nothing, seed2=nothing) desc = tf.EagerOp("ComputeAccidentalHits") - true_classes_ = convert(tf.TensorHandle, true_classes_) - sampled_candidates_ = convert(tf.TensorHandle, sampled_candidates_) + true_classes_ = convert(tf.EagerTensor, true_classes_) + sampled_candidates_ = convert(tf.EagerTensor, sampled_candidates_) tf.add_input(desc, true_classes_) tf.add_input(desc, sampled_candidates_) if num_true !== nothing @@ -16363,7 +16363,7 @@ begin end function string_to_number_eager(string_tensor_; name=nothing, out_type=nothing) desc = tf.EagerOp("StringToNumber") - string_tensor_ = convert(tf.TensorHandle, string_tensor_) + string_tensor_ = convert(tf.EagerTensor, string_tensor_) tf.add_input(desc, string_tensor_) if out_type !== nothing desc["out_type"] = Base.identity(out_type) @@ -16403,7 +16403,7 @@ begin end function snapshot_eager(input_; name=nothing) desc = tf.EagerOp("Snapshot") - input_ = convert(tf.TensorHandle, input_) + input_ = convert(tf.EagerTensor, input_) tf.add_input(desc, input_) desc["T"] = tf.data_type(input_) res = tf.execute(desc) @@ -16442,8 +16442,8 @@ begin end function deserialize_iterator_eager(resource_handle_, serialized_; name=nothing) desc = tf.EagerOp("DeserializeIterator") - resource_handle_ = convert(tf.TensorHandle, resource_handle_) - serialized_ = convert(tf.TensorHandle, serialized_) + resource_handle_ = convert(tf.EagerTensor, resource_handle_) + serialized_ = convert(tf.EagerTensor, serialized_) tf.add_input(desc, resource_handle_) tf.add_input(desc, serialized_) res = tf.execute(desc) @@ -16481,7 +16481,7 @@ begin end function atan_eager(x_; name=nothing) desc = tf.EagerOp("Atan") - x_ = convert(tf.TensorHandle, x_) + x_ = convert(tf.EagerTensor, x_) tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) res = tf.execute(desc) @@ -16527,8 +16527,8 @@ begin end function mat_mul_eager(a_, b_; name=nothing, transpose_a=nothing, transpose_b=nothing) desc = tf.EagerOp("MatMul") - a_ = convert(tf.TensorHandle, a_) - b_ = convert(tf.TensorHandle, b_) + a_ = convert(tf.EagerTensor, a_) + b_ = convert(tf.EagerTensor, b_) tf.add_input(desc, a_) tf.add_input(desc, b_) if transpose_a !== nothing @@ -16574,7 +16574,7 @@ begin end function erfc_eager(x_; name=nothing) desc = tf.EagerOp("Erfc") - x_ = convert(tf.TensorHandle, x_) + x_ = convert(tf.EagerTensor, x_) tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) res = tf.execute(desc) @@ -16614,8 +16614,8 @@ begin end function sigmoid_grad_eager(y_, dy_; name=nothing) desc = tf.EagerOp("SigmoidGrad") - y_ = convert(tf.TensorHandle, y_) - dy_ = convert(tf.TensorHandle, dy_) + y_ = convert(tf.EagerTensor, y_) + dy_ = convert(tf.EagerTensor, dy_) tf.add_input(desc, y_) tf.add_input(desc, dy_) desc["T"] = tf.data_type(y_) @@ -16737,11 +16737,11 @@ begin end function non_max_suppression_v3_eager(boxes_, scores_, max_output_size_, iou_threshold_, score_threshold_; name=nothing) desc = tf.EagerOp("NonMaxSuppressionV3") - boxes_ = convert(tf.TensorHandle, boxes_) - scores_ = convert(tf.TensorHandle, scores_) - max_output_size_ = convert(tf.TensorHandle, max_output_size_) - iou_threshold_ = convert(tf.TensorHandle, iou_threshold_) - score_threshold_ = convert(tf.TensorHandle, score_threshold_) + boxes_ = convert(tf.EagerTensor, boxes_) + scores_ = convert(tf.EagerTensor, scores_) + max_output_size_ = convert(tf.EagerTensor, max_output_size_) + iou_threshold_ = convert(tf.EagerTensor, iou_threshold_) + score_threshold_ = convert(tf.EagerTensor, score_threshold_) tf.add_input(desc, boxes_) tf.add_input(desc, scores_) tf.add_input(desc, max_output_size_) @@ -16797,9 +16797,9 @@ begin end function dilation2d_backprop_input_eager(input_, filter_, out_backprop_; name=nothing, strides=nothing, rates=nothing, padding=nothing) desc = tf.EagerOp("Dilation2DBackpropInput") - input_ = convert(tf.TensorHandle, input_) - filter_ = convert(tf.TensorHandle, filter_) - out_backprop_ = convert(tf.TensorHandle, out_backprop_) + input_ = convert(tf.EagerTensor, input_) + filter_ = convert(tf.EagerTensor, filter_) + out_backprop_ = convert(tf.EagerTensor, out_backprop_) tf.add_input(desc, input_) tf.add_input(desc, filter_) tf.add_input(desc, out_backprop_) @@ -16851,8 +16851,8 @@ begin end function logical_or_eager(x_, y_; name=nothing) desc = tf.EagerOp("LogicalOr") - x_ = convert(tf.TensorHandle, x_) - y_ = convert(tf.TensorHandle, y_) + x_ = convert(tf.EagerTensor, x_) + y_ = convert(tf.EagerTensor, y_) tf.add_input(desc, x_) tf.add_input(desc, y_) res = tf.execute(desc) @@ -16905,13 +16905,13 @@ begin end function resource_apply_adadelta_eager(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ResourceApplyAdadelta") - var_ = convert(tf.TensorHandle, var_) - accum_ = convert(tf.TensorHandle, accum_) - accum_update_ = convert(tf.TensorHandle, accum_update_) - lr_ = convert(tf.TensorHandle, lr_) - rho_ = convert(tf.TensorHandle, rho_) - epsilon_ = convert(tf.TensorHandle, epsilon_) - grad_ = convert(tf.TensorHandle, grad_) + var_ = convert(tf.EagerTensor, var_) + accum_ = convert(tf.EagerTensor, accum_) + accum_update_ = convert(tf.EagerTensor, accum_update_) + lr_ = convert(tf.EagerTensor, lr_) + rho_ = convert(tf.EagerTensor, rho_) + epsilon_ = convert(tf.EagerTensor, epsilon_) + grad_ = convert(tf.EagerTensor, grad_) tf.add_input(desc, var_) tf.add_input(desc, accum_) tf.add_input(desc, accum_update_) @@ -16978,10 +16978,10 @@ begin end function dense_to_sparse_set_operation_eager(set1_, set2_indices_, set2_values_, set2_shape_; name=nothing, set_operation=nothing, validate_indices=nothing) desc = tf.EagerOp("DenseToSparseSetOperation") - set1_ = convert(tf.TensorHandle, set1_) - set2_indices_ = convert(tf.TensorHandle, set2_indices_) - set2_values_ = convert(tf.TensorHandle, set2_values_) - set2_shape_ = convert(tf.TensorHandle, set2_shape_) + set1_ = convert(tf.EagerTensor, set1_) + set2_indices_ = convert(tf.EagerTensor, set2_indices_) + set2_values_ = convert(tf.EagerTensor, set2_values_) + set2_shape_ = convert(tf.EagerTensor, set2_shape_) tf.add_input(desc, set1_) tf.add_input(desc, set2_indices_) tf.add_input(desc, set2_values_) @@ -17028,7 +17028,7 @@ begin end function reader_num_records_produced_eager(reader_handle_; name=nothing) desc = tf.EagerOp("ReaderNumRecordsProduced") - reader_handle_ = convert(tf.TensorHandle, reader_handle_) + reader_handle_ = convert(tf.EagerTensor, reader_handle_) tf.add_input(desc, reader_handle_) res = tf.execute(desc) node = tf.TapeNode(reader_num_records_produced, [reader_handle_], name=nothing, res) @@ -17067,8 +17067,8 @@ begin end function adjust_hue_eager(images_, delta_; name=nothing) desc = tf.EagerOp("AdjustHue") - images_ = convert(tf.TensorHandle, images_) - delta_ = convert(tf.TensorHandle, delta_) + images_ = convert(tf.EagerTensor, images_) + delta_ = convert(tf.EagerTensor, delta_) tf.add_input(desc, images_) tf.add_input(desc, delta_) desc["T"] = tf.data_type(images_) @@ -17111,8 +17111,8 @@ begin end function boosted_trees_quantile_stream_resource_flush_eager(quantile_stream_resource_handle_, num_buckets_; name=nothing, generate_quantiles=nothing) desc = tf.EagerOp("BoostedTreesQuantileStreamResourceFlush") - quantile_stream_resource_handle_ = convert(tf.TensorHandle, quantile_stream_resource_handle_) - num_buckets_ = convert(tf.TensorHandle, num_buckets_) + quantile_stream_resource_handle_ = convert(tf.EagerTensor, quantile_stream_resource_handle_) + num_buckets_ = convert(tf.EagerTensor, num_buckets_) tf.add_input(desc, quantile_stream_resource_handle_) tf.add_input(desc, num_buckets_) if generate_quantiles !== nothing @@ -17175,11 +17175,11 @@ begin end function experimental_map_and_batch_dataset_eager(input_dataset_, other_arguments_, batch_size_, num_parallel_calls_, drop_remainder_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, preserve_cardinality=nothing) desc = tf.EagerOp("ExperimentalMapAndBatchDataset") - input_dataset_ = convert(tf.TensorHandle, input_dataset_) - other_arguments_ = convert(tf.TensorHandle, other_arguments_) - batch_size_ = convert(tf.TensorHandle, batch_size_) - num_parallel_calls_ = convert(tf.TensorHandle, num_parallel_calls_) - drop_remainder_ = convert(tf.TensorHandle, drop_remainder_) + input_dataset_ = convert(tf.EagerTensor, input_dataset_) + other_arguments_ = convert(tf.EagerTensor, other_arguments_) + batch_size_ = convert(tf.EagerTensor, batch_size_) + num_parallel_calls_ = convert(tf.EagerTensor, num_parallel_calls_) + drop_remainder_ = convert(tf.EagerTensor, drop_remainder_) tf.add_input(desc, input_dataset_) tf.add_input(desc, other_arguments_) tf.add_input(desc, batch_size_) @@ -17237,8 +17237,8 @@ begin end function real_div_eager(x_, y_; name=nothing) desc = tf.EagerOp("RealDiv") - x_ = convert(tf.TensorHandle, x_) - y_ = convert(tf.TensorHandle, y_) + x_ = convert(tf.EagerTensor, x_) + y_ = convert(tf.EagerTensor, y_) tf.add_input(desc, x_) tf.add_input(desc, y_) desc["T"] = tf.data_type(x_) @@ -17287,9 +17287,9 @@ begin end function restore_slice_eager(file_pattern_, tensor_name_, shape_and_slice_; name=nothing, dt=nothing, preferred_shard=nothing) desc = tf.EagerOp("RestoreSlice") - file_pattern_ = convert(tf.TensorHandle, file_pattern_) - tensor_name_ = convert(tf.TensorHandle, tensor_name_) - shape_and_slice_ = convert(tf.TensorHandle, shape_and_slice_) + file_pattern_ = convert(tf.EagerTensor, file_pattern_) + tensor_name_ = convert(tf.EagerTensor, tensor_name_) + shape_and_slice_ = convert(tf.EagerTensor, shape_and_slice_) tf.add_input(desc, file_pattern_) tf.add_input(desc, tensor_name_) tf.add_input(desc, shape_and_slice_) @@ -17336,7 +17336,7 @@ begin end function stack_pop_v2_eager(handle_; name=nothing, elem_type=nothing) desc = tf.EagerOp("StackPopV2") - handle_ = convert(tf.TensorHandle, handle_) + handle_ = convert(tf.EagerTensor, handle_) tf.add_input(desc, handle_) if elem_type !== nothing desc["elem_type"] = Base.identity(elem_type) @@ -17378,8 +17378,8 @@ begin end function reverse_eager(tensor_, dims_; name=nothing) desc = tf.EagerOp("Reverse") - tensor_ = convert(tf.TensorHandle, tensor_) - dims_ = convert(tf.TensorHandle, dims_) + tensor_ = convert(tf.EagerTensor, tensor_) + dims_ = convert(tf.EagerTensor, dims_) tf.add_input(desc, tensor_) tf.add_input(desc, dims_) desc["T"] = tf.data_type(tensor_) @@ -17423,7 +17423,7 @@ begin end function decode_png_eager(contents_; name=nothing, channels=nothing, dtype=nothing) desc = tf.EagerOp("DecodePng") - contents_ = convert(tf.TensorHandle, contents_) + contents_ = convert(tf.EagerTensor, contents_) tf.add_input(desc, contents_) if channels !== nothing desc["channels"] = Base.Int(channels) @@ -17472,10 +17472,10 @@ begin end function non_max_suppression_v2_eager(boxes_, scores_, max_output_size_, iou_threshold_; name=nothing) desc = tf.EagerOp("NonMaxSuppressionV2") - boxes_ = convert(tf.TensorHandle, boxes_) - scores_ = convert(tf.TensorHandle, scores_) - max_output_size_ = convert(tf.TensorHandle, max_output_size_) - iou_threshold_ = convert(tf.TensorHandle, iou_threshold_) + boxes_ = convert(tf.EagerTensor, boxes_) + scores_ = convert(tf.EagerTensor, scores_) + max_output_size_ = convert(tf.EagerTensor, max_output_size_) + iou_threshold_ = convert(tf.EagerTensor, iou_threshold_) tf.add_input(desc, boxes_) tf.add_input(desc, scores_) tf.add_input(desc, max_output_size_) @@ -17519,8 +17519,8 @@ begin end function igamma_eager(a_, x_; name=nothing) desc = tf.EagerOp("Igamma") - a_ = convert(tf.TensorHandle, a_) - x_ = convert(tf.TensorHandle, x_) + a_ = convert(tf.EagerTensor, a_) + x_ = convert(tf.EagerTensor, x_) tf.add_input(desc, a_) tf.add_input(desc, x_) desc["T"] = tf.data_type(a_) @@ -17560,7 +17560,7 @@ begin end function digamma_eager(x_; name=nothing) desc = tf.EagerOp("Digamma") - x_ = convert(tf.TensorHandle, x_) + x_ = convert(tf.EagerTensor, x_) tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) res = tf.execute(desc) @@ -17617,15 +17617,15 @@ begin end function resource_apply_ada_max_eager(var_, m_, v_, beta1_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ResourceApplyAdaMax") - var_ = convert(tf.TensorHandle, var_) - m_ = convert(tf.TensorHandle, m_) - v_ = convert(tf.TensorHandle, v_) - beta1_power_ = convert(tf.TensorHandle, beta1_power_) - lr_ = convert(tf.TensorHandle, lr_) - beta1_ = convert(tf.TensorHandle, beta1_) - beta2_ = convert(tf.TensorHandle, beta2_) - epsilon_ = convert(tf.TensorHandle, epsilon_) - grad_ = convert(tf.TensorHandle, grad_) + var_ = convert(tf.EagerTensor, var_) + m_ = convert(tf.EagerTensor, m_) + v_ = convert(tf.EagerTensor, v_) + beta1_power_ = convert(tf.EagerTensor, beta1_power_) + lr_ = convert(tf.EagerTensor, lr_) + beta1_ = convert(tf.EagerTensor, beta1_) + beta2_ = convert(tf.EagerTensor, beta2_) + epsilon_ = convert(tf.EagerTensor, epsilon_) + grad_ = convert(tf.EagerTensor, grad_) tf.add_input(desc, var_) tf.add_input(desc, m_) tf.add_input(desc, v_) @@ -17685,7 +17685,7 @@ begin end function space_to_depth_eager(input_; name=nothing, block_size=nothing, data_format=nothing) desc = tf.EagerOp("SpaceToDepth") - input_ = convert(tf.TensorHandle, input_) + input_ = convert(tf.EagerTensor, input_) tf.add_input(desc, input_) if block_size !== nothing desc["block_size"] = Base.Int(block_size) @@ -17731,8 +17731,8 @@ begin end function sqrt_grad_eager(y_, dy_; name=nothing) desc = tf.EagerOp("SqrtGrad") - y_ = convert(tf.TensorHandle, y_) - dy_ = convert(tf.TensorHandle, dy_) + y_ = convert(tf.EagerTensor, y_) + dy_ = convert(tf.EagerTensor, dy_) tf.add_input(desc, y_) tf.add_input(desc, dy_) desc["T"] = tf.data_type(y_) @@ -17788,8 +17788,8 @@ begin end function map_unstage_eager(key_, indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) desc = tf.EagerOp("MapUnstage") - key_ = convert(tf.TensorHandle, key_) - indices_ = convert(tf.TensorHandle, indices_) + key_ = convert(tf.EagerTensor, key_) + indices_ = convert(tf.EagerTensor, indices_) tf.add_input(desc, key_) tf.add_input(desc, indices_) if capacity !== nothing @@ -17850,7 +17850,7 @@ begin end function qr_eager(input_; name=nothing, full_matrices=nothing) desc = tf.EagerOp("Qr") - input_ = convert(tf.TensorHandle, input_) + input_ = convert(tf.EagerTensor, input_) tf.add_input(desc, input_) if full_matrices !== nothing desc["full_matrices"] = Base.Bool(full_matrices) @@ -17911,12 +17911,12 @@ begin end function boosted_trees_calculate_best_gains_per_feature_eager(node_id_range_, stats_summary_list_, l1_, l2_, tree_complexity_, min_node_weight_; name=nothing, max_splits=nothing, num_features=nothing) desc = tf.EagerOp("BoostedTreesCalculateBestGainsPerFeature") - node_id_range_ = convert(tf.TensorHandle, node_id_range_) - stats_summary_list_ = convert(tf.TensorHandle, stats_summary_list_) - l1_ = convert(tf.TensorHandle, l1_) - l2_ = convert(tf.TensorHandle, l2_) - tree_complexity_ = convert(tf.TensorHandle, tree_complexity_) - min_node_weight_ = convert(tf.TensorHandle, min_node_weight_) + node_id_range_ = convert(tf.EagerTensor, node_id_range_) + stats_summary_list_ = convert(tf.EagerTensor, stats_summary_list_) + l1_ = convert(tf.EagerTensor, l1_) + l2_ = convert(tf.EagerTensor, l2_) + tree_complexity_ = convert(tf.EagerTensor, tree_complexity_) + min_node_weight_ = convert(tf.EagerTensor, min_node_weight_) tf.add_input(desc, node_id_range_) tf.add_input(desc, stats_summary_list_) tf.add_input(desc, l1_) @@ -17976,10 +17976,10 @@ begin end function unbatch_grad_eager(original_input_, batch_index_, grad_, id_; name=nothing, container=nothing, shared_name=nothing) desc = tf.EagerOp("UnbatchGrad") - original_input_ = convert(tf.TensorHandle, original_input_) - batch_index_ = convert(tf.TensorHandle, batch_index_) - grad_ = convert(tf.TensorHandle, grad_) - id_ = convert(tf.TensorHandle, id_) + original_input_ = convert(tf.EagerTensor, original_input_) + batch_index_ = convert(tf.EagerTensor, batch_index_) + grad_ = convert(tf.EagerTensor, grad_) + id_ = convert(tf.EagerTensor, id_) tf.add_input(desc, original_input_) tf.add_input(desc, batch_index_) tf.add_input(desc, grad_) @@ -18027,7 +18027,7 @@ begin end function log_softmax_eager(logits_; name=nothing) desc = tf.EagerOp("LogSoftmax") - logits_ = convert(tf.TensorHandle, logits_) + logits_ = convert(tf.EagerTensor, logits_) tf.add_input(desc, logits_) desc["T"] = tf.data_type(logits_) res = tf.execute(desc) @@ -18067,7 +18067,7 @@ begin end function resource_count_up_to_eager(resource_; name=nothing, limit=nothing) desc = tf.EagerOp("ResourceCountUpTo") - resource_ = convert(tf.TensorHandle, resource_) + resource_ = convert(tf.EagerTensor, resource_) tf.add_input(desc, resource_) if limit !== nothing desc["limit"] = Base.Int(limit) @@ -18113,7 +18113,7 @@ begin end function accumulate_nv2_eager(inputs_; name=nothing, N=nothing, shape=nothing) desc = tf.EagerOp("AccumulateNV2") - inputs_ = convert(tf.TensorHandle, inputs_) + inputs_ = convert(tf.EagerTensor, inputs_) tf.add_input(desc, inputs_) if N !== nothing desc["N"] = Base.Int(N) @@ -18181,9 +18181,9 @@ begin end function parallel_map_dataset_eager(input_dataset_, other_arguments_, num_parallel_calls_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing, sloppy=nothing, preserve_cardinality=nothing) desc = tf.EagerOp("ParallelMapDataset") - input_dataset_ = convert(tf.TensorHandle, input_dataset_) - other_arguments_ = convert(tf.TensorHandle, other_arguments_) - num_parallel_calls_ = convert(tf.TensorHandle, num_parallel_calls_) + input_dataset_ = convert(tf.EagerTensor, input_dataset_) + other_arguments_ = convert(tf.EagerTensor, other_arguments_) + num_parallel_calls_ = convert(tf.EagerTensor, num_parallel_calls_) tf.add_input(desc, input_dataset_) tf.add_input(desc, other_arguments_) tf.add_input(desc, num_parallel_calls_) @@ -18252,7 +18252,7 @@ begin end function random_uniform_eager(shape_; name=nothing, seed=nothing, seed2=nothing, dtype=nothing) desc = tf.EagerOp("RandomUniform") - shape_ = convert(tf.TensorHandle, shape_) + shape_ = convert(tf.EagerTensor, shape_) tf.add_input(desc, shape_) if seed !== nothing desc["seed"] = Base.Int(seed) @@ -18313,7 +18313,7 @@ begin end function unicode_transcode_eager(input_; name=nothing, input_encoding=nothing, output_encoding=nothing, errors=nothing, replacement_char=nothing, replace_control_characters=nothing) desc = tf.EagerOp("UnicodeTranscode") - input_ = convert(tf.TensorHandle, input_) + input_ = convert(tf.EagerTensor, input_) tf.add_input(desc, input_) if input_encoding !== nothing desc["input_encoding"] = Base.String(input_encoding) @@ -18364,7 +18364,7 @@ begin end function reader_reset_eager(reader_handle_; name=nothing) desc = tf.EagerOp("ReaderReset") - reader_handle_ = convert(tf.TensorHandle, reader_handle_) + reader_handle_ = convert(tf.EagerTensor, reader_handle_) tf.add_input(desc, reader_handle_) res = tf.execute(desc) node = tf.TapeNode(reader_reset, [reader_handle_], name=nothing, res) @@ -18407,7 +18407,7 @@ begin end function _nccl_broadcast_send_eager(input_; name=nothing, num_devices=nothing, shared_name=nothing) desc = tf.EagerOp("_NcclBroadcastSend") - input_ = convert(tf.TensorHandle, input_) + input_ = convert(tf.EagerTensor, input_) tf.add_input(desc, input_) if num_devices !== nothing desc["num_devices"] = Base.Int(num_devices) @@ -18451,7 +18451,7 @@ begin end function batch_matrix_determinant_eager(input_; name=nothing) desc = tf.EagerOp("BatchMatrixDeterminant") - input_ = convert(tf.TensorHandle, input_) + input_ = convert(tf.EagerTensor, input_) tf.add_input(desc, input_) desc["T"] = tf.data_type(input_) res = tf.execute(desc) @@ -18491,8 +18491,8 @@ begin end function less_equal_eager(x_, y_; name=nothing) desc = tf.EagerOp("LessEqual") - x_ = convert(tf.TensorHandle, x_) - y_ = convert(tf.TensorHandle, y_) + x_ = convert(tf.EagerTensor, x_) + y_ = convert(tf.EagerTensor, y_) tf.add_input(desc, x_) tf.add_input(desc, y_) desc["T"] = tf.data_type(x_) @@ -18539,9 +18539,9 @@ begin end function apply_gradient_descent_eager(var_, alpha_, delta_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ApplyGradientDescent") - var_ = convert(tf.TensorHandle, var_) - alpha_ = convert(tf.TensorHandle, alpha_) - delta_ = convert(tf.TensorHandle, delta_) + var_ = convert(tf.EagerTensor, var_) + alpha_ = convert(tf.EagerTensor, alpha_) + delta_ = convert(tf.EagerTensor, delta_) tf.add_input(desc, var_) tf.add_input(desc, alpha_) tf.add_input(desc, delta_) @@ -18592,9 +18592,9 @@ begin end function sparse_segment_sqrt_n_eager(data_, indices_, segment_ids_; name=nothing) desc = tf.EagerOp("SparseSegmentSqrtN") - data_ = convert(tf.TensorHandle, data_) - indices_ = convert(tf.TensorHandle, indices_) - segment_ids_ = convert(tf.TensorHandle, segment_ids_) + data_ = convert(tf.EagerTensor, data_) + indices_ = convert(tf.EagerTensor, indices_) + segment_ids_ = convert(tf.EagerTensor, segment_ids_) tf.add_input(desc, data_) tf.add_input(desc, indices_) tf.add_input(desc, segment_ids_) @@ -18635,7 +18635,7 @@ begin end function matrix_logarithm_eager(input_; name=nothing) desc = tf.EagerOp("MatrixLogarithm") - input_ = convert(tf.TensorHandle, input_) + input_ = convert(tf.EagerTensor, input_) tf.add_input(desc, input_) desc["T"] = tf.data_type(input_) res = tf.execute(desc) @@ -18682,9 +18682,9 @@ begin end function scatter_mul_eager(ref_, indices_, updates_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ScatterMul") - ref_ = convert(tf.TensorHandle, ref_) - indices_ = convert(tf.TensorHandle, indices_) - updates_ = convert(tf.TensorHandle, updates_) + ref_ = convert(tf.EagerTensor, ref_) + indices_ = convert(tf.EagerTensor, indices_) + updates_ = convert(tf.EagerTensor, updates_) tf.add_input(desc, ref_) tf.add_input(desc, indices_) tf.add_input(desc, updates_) @@ -18746,7 +18746,7 @@ begin end function decode_jpeg_eager(contents_; name=nothing, channels=nothing, ratio=nothing, fancy_upscaling=nothing, try_recover_truncated=nothing, acceptable_fraction=nothing, dct_method=nothing) desc = tf.EagerOp("DecodeJpeg") - contents_ = convert(tf.TensorHandle, contents_) + contents_ = convert(tf.EagerTensor, contents_) tf.add_input(desc, contents_) if channels !== nothing desc["channels"] = Base.Int(channels) @@ -18888,8 +18888,8 @@ begin end function queue_enqueue_many_v2_eager(handle_, components_; name=nothing, Tcomponents=nothing, timeout_ms=nothing) desc = tf.EagerOp("QueueEnqueueManyV2") - handle_ = convert(tf.TensorHandle, handle_) - components_ = convert(tf.TensorHandle, components_) + handle_ = convert(tf.EagerTensor, handle_) + components_ = convert(tf.EagerTensor, components_) tf.add_input(desc, handle_) tf.add_input(desc, components_) if Tcomponents !== nothing @@ -18956,16 +18956,16 @@ begin end function resource_sparse_apply_centered_rms_prop_eager(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ResourceSparseApplyCenteredRMSProp") - var_ = convert(tf.TensorHandle, var_) - mg_ = convert(tf.TensorHandle, mg_) - ms_ = convert(tf.TensorHandle, ms_) - mom_ = convert(tf.TensorHandle, mom_) - lr_ = convert(tf.TensorHandle, lr_) - rho_ = convert(tf.TensorHandle, rho_) - momentum_ = convert(tf.TensorHandle, momentum_) - epsilon_ = convert(tf.TensorHandle, epsilon_) - grad_ = convert(tf.TensorHandle, grad_) - indices_ = convert(tf.TensorHandle, indices_) + var_ = convert(tf.EagerTensor, var_) + mg_ = convert(tf.EagerTensor, mg_) + ms_ = convert(tf.EagerTensor, ms_) + mom_ = convert(tf.EagerTensor, mom_) + lr_ = convert(tf.EagerTensor, lr_) + rho_ = convert(tf.EagerTensor, rho_) + momentum_ = convert(tf.EagerTensor, momentum_) + epsilon_ = convert(tf.EagerTensor, epsilon_) + grad_ = convert(tf.EagerTensor, grad_) + indices_ = convert(tf.EagerTensor, indices_) tf.add_input(desc, var_) tf.add_input(desc, mg_) tf.add_input(desc, ms_) @@ -19037,10 +19037,10 @@ begin end function interleave_dataset_eager(input_dataset_, other_arguments_, cycle_length_, block_length_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("InterleaveDataset") - input_dataset_ = convert(tf.TensorHandle, input_dataset_) - other_arguments_ = convert(tf.TensorHandle, other_arguments_) - cycle_length_ = convert(tf.TensorHandle, cycle_length_) - block_length_ = convert(tf.TensorHandle, block_length_) + input_dataset_ = convert(tf.EagerTensor, input_dataset_) + other_arguments_ = convert(tf.EagerTensor, other_arguments_) + cycle_length_ = convert(tf.EagerTensor, cycle_length_) + block_length_ = convert(tf.EagerTensor, block_length_) tf.add_input(desc, input_dataset_) tf.add_input(desc, other_arguments_) tf.add_input(desc, cycle_length_) @@ -19094,7 +19094,7 @@ begin end function stack_pop_eager(handle_; name=nothing, elem_type=nothing) desc = tf.EagerOp("StackPop") - handle_ = convert(tf.TensorHandle, handle_) + handle_ = convert(tf.EagerTensor, handle_) tf.add_input(desc, handle_) if elem_type !== nothing desc["elem_type"] = Base.identity(elem_type) @@ -19144,9 +19144,9 @@ begin end function max_pool_v2_eager(input_, ksize_, strides_; name=nothing, padding=nothing, data_format=nothing) desc = tf.EagerOp("MaxPoolV2") - input_ = convert(tf.TensorHandle, input_) - ksize_ = convert(tf.TensorHandle, ksize_) - strides_ = convert(tf.TensorHandle, strides_) + input_ = convert(tf.EagerTensor, input_) + ksize_ = convert(tf.EagerTensor, ksize_) + strides_ = convert(tf.EagerTensor, strides_) tf.add_input(desc, input_) tf.add_input(desc, ksize_) tf.add_input(desc, strides_) @@ -19195,9 +19195,9 @@ begin end function boosted_trees_deserialize_ensemble_eager(tree_ensemble_handle_, stamp_token_, tree_ensemble_serialized_; name=nothing) desc = tf.EagerOp("BoostedTreesDeserializeEnsemble") - tree_ensemble_handle_ = convert(tf.TensorHandle, tree_ensemble_handle_) - stamp_token_ = convert(tf.TensorHandle, stamp_token_) - tree_ensemble_serialized_ = convert(tf.TensorHandle, tree_ensemble_serialized_) + tree_ensemble_handle_ = convert(tf.EagerTensor, tree_ensemble_handle_) + stamp_token_ = convert(tf.EagerTensor, stamp_token_) + tree_ensemble_serialized_ = convert(tf.EagerTensor, tree_ensemble_serialized_) tf.add_input(desc, tree_ensemble_handle_) tf.add_input(desc, stamp_token_) tf.add_input(desc, tree_ensemble_serialized_) @@ -19252,11 +19252,11 @@ begin end function load_and_remap_matrix_eager(ckpt_path_, old_tensor_name_, row_remapping_, col_remapping_, initializing_values_; name=nothing, num_rows=nothing, num_cols=nothing, max_rows_in_memory=nothing) desc = tf.EagerOp("LoadAndRemapMatrix") - ckpt_path_ = convert(tf.TensorHandle, ckpt_path_) - old_tensor_name_ = convert(tf.TensorHandle, old_tensor_name_) - row_remapping_ = convert(tf.TensorHandle, row_remapping_) - col_remapping_ = convert(tf.TensorHandle, col_remapping_) - initializing_values_ = convert(tf.TensorHandle, initializing_values_) + ckpt_path_ = convert(tf.EagerTensor, ckpt_path_) + old_tensor_name_ = convert(tf.EagerTensor, old_tensor_name_) + row_remapping_ = convert(tf.EagerTensor, row_remapping_) + col_remapping_ = convert(tf.EagerTensor, col_remapping_) + initializing_values_ = convert(tf.EagerTensor, initializing_values_) tf.add_input(desc, ckpt_path_) tf.add_input(desc, old_tensor_name_) tf.add_input(desc, row_remapping_) @@ -19321,12 +19321,12 @@ begin end function sparse_apply_proximal_gradient_descent_eager(var_, alpha_, l1_, l2_, grad_, indices_; name=nothing, use_locking=nothing) desc = tf.EagerOp("SparseApplyProximalGradientDescent") - var_ = convert(tf.TensorHandle, var_) - alpha_ = convert(tf.TensorHandle, alpha_) - l1_ = convert(tf.TensorHandle, l1_) - l2_ = convert(tf.TensorHandle, l2_) - grad_ = convert(tf.TensorHandle, grad_) - indices_ = convert(tf.TensorHandle, indices_) + var_ = convert(tf.EagerTensor, var_) + alpha_ = convert(tf.EagerTensor, alpha_) + l1_ = convert(tf.EagerTensor, l1_) + l2_ = convert(tf.EagerTensor, l2_) + grad_ = convert(tf.EagerTensor, grad_) + indices_ = convert(tf.EagerTensor, indices_) tf.add_input(desc, var_) tf.add_input(desc, alpha_) tf.add_input(desc, l1_) @@ -19385,7 +19385,7 @@ begin end function py_func_stateless_eager(input_; name=nothing, token=nothing, Tin=nothing, Tout=nothing) desc = tf.EagerOp("PyFuncStateless") - input_ = convert(tf.TensorHandle, input_) + input_ = convert(tf.EagerTensor, input_) tf.add_input(desc, input_) if token !== nothing desc["token"] = Base.String(token) @@ -19431,7 +19431,7 @@ begin end function where_eager(input_; name=nothing) desc = tf.EagerOp("Where") - input_ = convert(tf.TensorHandle, input_) + input_ = convert(tf.EagerTensor, input_) tf.add_input(desc, input_) desc["T"] = tf.data_type(input_) res = tf.execute(desc) @@ -19482,8 +19482,8 @@ begin end function mfcc_eager(spectrogram_, sample_rate_; name=nothing, upper_frequency_limit=nothing, lower_frequency_limit=nothing, filterbank_channel_count=nothing, dct_coefficient_count=nothing) desc = tf.EagerOp("Mfcc") - spectrogram_ = convert(tf.TensorHandle, spectrogram_) - sample_rate_ = convert(tf.TensorHandle, sample_rate_) + spectrogram_ = convert(tf.EagerTensor, spectrogram_) + sample_rate_ = convert(tf.EagerTensor, sample_rate_) tf.add_input(desc, spectrogram_) tf.add_input(desc, sample_rate_) if upper_frequency_limit !== nothing @@ -19536,7 +19536,7 @@ begin end function check_numerics_eager(tensor_; name=nothing, message=nothing) desc = tf.EagerOp("CheckNumerics") - tensor_ = convert(tf.TensorHandle, tensor_) + tensor_ = convert(tf.EagerTensor, tensor_) tf.add_input(desc, tensor_) if message !== nothing desc["message"] = Base.String(message) @@ -19674,10 +19674,10 @@ begin end function sparse_segment_mean_grad_eager(grad_, indices_, segment_ids_, output_dim0_; name=nothing) desc = tf.EagerOp("SparseSegmentMeanGrad") - grad_ = convert(tf.TensorHandle, grad_) - indices_ = convert(tf.TensorHandle, indices_) - segment_ids_ = convert(tf.TensorHandle, segment_ids_) - output_dim0_ = convert(tf.TensorHandle, output_dim0_) + grad_ = convert(tf.EagerTensor, grad_) + indices_ = convert(tf.EagerTensor, indices_) + segment_ids_ = convert(tf.EagerTensor, segment_ids_) + output_dim0_ = convert(tf.EagerTensor, output_dim0_) tf.add_input(desc, grad_) tf.add_input(desc, indices_) tf.add_input(desc, segment_ids_) @@ -19736,9 +19736,9 @@ begin end function try_rpc_eager(address_, method_, request_; name=nothing, protocol=nothing, fail_fast=nothing, timeout_in_ms=nothing) desc = tf.EagerOp("TryRpc") - address_ = convert(tf.TensorHandle, address_) - method_ = convert(tf.TensorHandle, method_) - request_ = convert(tf.TensorHandle, request_) + address_ = convert(tf.EagerTensor, address_) + method_ = convert(tf.EagerTensor, method_) + request_ = convert(tf.EagerTensor, request_) tf.add_input(desc, address_) tf.add_input(desc, method_) tf.add_input(desc, request_) @@ -19794,8 +19794,8 @@ begin end function batch_matrix_triangular_solve_eager(matrix_, rhs_; name=nothing, lower=nothing, adjoint=nothing) desc = tf.EagerOp("BatchMatrixTriangularSolve") - matrix_ = convert(tf.TensorHandle, matrix_) - rhs_ = convert(tf.TensorHandle, rhs_) + matrix_ = convert(tf.EagerTensor, matrix_) + rhs_ = convert(tf.EagerTensor, rhs_) tf.add_input(desc, matrix_) tf.add_input(desc, rhs_) if lower !== nothing @@ -19844,7 +19844,7 @@ begin end function _retval_eager(input_; name=nothing, index=nothing) desc = tf.EagerOp("_Retval") - input_ = convert(tf.TensorHandle, input_) + input_ = convert(tf.EagerTensor, input_) tf.add_input(desc, input_) if index !== nothing desc["index"] = Base.Int(index) @@ -19893,7 +19893,7 @@ begin end function unique_with_counts_eager(x_; name=nothing, out_idx=nothing) desc = tf.EagerOp("UniqueWithCounts") - x_ = convert(tf.TensorHandle, x_) + x_ = convert(tf.EagerTensor, x_) tf.add_input(desc, x_) if out_idx !== nothing desc["out_idx"] = Base.identity(out_idx) @@ -19936,8 +19936,8 @@ begin end function add_eager(x_, y_; name=nothing) desc = tf.EagerOp("Add") - x_ = convert(tf.TensorHandle, x_) - y_ = convert(tf.TensorHandle, y_) + x_ = convert(tf.EagerTensor, x_) + y_ = convert(tf.EagerTensor, y_) tf.add_input(desc, x_) tf.add_input(desc, y_) desc["T"] = tf.data_type(x_) @@ -19998,9 +19998,9 @@ begin end function experimental_scan_dataset_eager(input_dataset_, initial_state_, other_arguments_; name=nothing, f=nothing, Tstate=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, preserve_cardinality=nothing) desc = tf.EagerOp("ExperimentalScanDataset") - input_dataset_ = convert(tf.TensorHandle, input_dataset_) - initial_state_ = convert(tf.TensorHandle, initial_state_) - other_arguments_ = convert(tf.TensorHandle, other_arguments_) + input_dataset_ = convert(tf.EagerTensor, input_dataset_) + initial_state_ = convert(tf.EagerTensor, initial_state_) + other_arguments_ = convert(tf.EagerTensor, other_arguments_) tf.add_input(desc, input_dataset_) tf.add_input(desc, initial_state_) tf.add_input(desc, other_arguments_) @@ -20062,8 +20062,8 @@ begin end function assign_add_variable_op_eager(resource_, value_; name=nothing, dtype=nothing) desc = tf.EagerOp("AssignAddVariableOp") - resource_ = convert(tf.TensorHandle, resource_) - value_ = convert(tf.TensorHandle, value_) + resource_ = convert(tf.EagerTensor, resource_) + value_ = convert(tf.EagerTensor, value_) tf.add_input(desc, resource_) tf.add_input(desc, value_) if dtype !== nothing @@ -20119,9 +20119,9 @@ begin end function split_v_eager(value_, size_splits_, split_dim_; name=nothing, num_split=nothing) desc = tf.EagerOp("SplitV") - value_ = convert(tf.TensorHandle, value_) - size_splits_ = convert(tf.TensorHandle, size_splits_) - split_dim_ = convert(tf.TensorHandle, split_dim_) + value_ = convert(tf.EagerTensor, value_) + size_splits_ = convert(tf.EagerTensor, size_splits_) + split_dim_ = convert(tf.EagerTensor, split_dim_) tf.add_input(desc, value_) tf.add_input(desc, size_splits_) tf.add_input(desc, split_dim_) @@ -20173,8 +20173,8 @@ begin end function assign_eager(ref_, value_; name=nothing, validate_shape=nothing, use_locking=nothing) desc = tf.EagerOp("Assign") - ref_ = convert(tf.TensorHandle, ref_) - value_ = convert(tf.TensorHandle, value_) + ref_ = convert(tf.EagerTensor, ref_) + value_ = convert(tf.EagerTensor, value_) tf.add_input(desc, ref_) tf.add_input(desc, value_) if validate_shape !== nothing @@ -20234,7 +20234,7 @@ begin end function max_pool_with_argmax_eager(input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing) desc = tf.EagerOp("MaxPoolWithArgmax") - input_ = convert(tf.TensorHandle, input_) + input_ = convert(tf.EagerTensor, input_) tf.add_input(desc, input_) if ksize !== nothing desc["ksize"] = map(Base.identity, ksize) @@ -20295,10 +20295,10 @@ begin end function quantized_relu_x_eager(features_, max_value_, min_features_, max_features_; name=nothing, out_type=nothing) desc = tf.EagerOp("QuantizedReluX") - features_ = convert(tf.TensorHandle, features_) - max_value_ = convert(tf.TensorHandle, max_value_) - min_features_ = convert(tf.TensorHandle, min_features_) - max_features_ = convert(tf.TensorHandle, max_features_) + features_ = convert(tf.EagerTensor, features_) + max_value_ = convert(tf.EagerTensor, max_value_) + min_features_ = convert(tf.EagerTensor, min_features_) + max_features_ = convert(tf.EagerTensor, max_features_) tf.add_input(desc, features_) tf.add_input(desc, max_value_) tf.add_input(desc, min_features_) @@ -20422,7 +20422,7 @@ begin end function fft2d_eager(input_; name=nothing) desc = tf.EagerOp("FFT2D") - input_ = convert(tf.TensorHandle, input_) + input_ = convert(tf.EagerTensor, input_) tf.add_input(desc, input_) desc["Tcomplex"] = tf.data_type(input_) res = tf.execute(desc) @@ -20467,8 +20467,8 @@ begin end function experimental_thread_pool_dataset_eager(input_dataset_, thread_pool_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("ExperimentalThreadPoolDataset") - input_dataset_ = convert(tf.TensorHandle, input_dataset_) - thread_pool_ = convert(tf.TensorHandle, thread_pool_) + input_dataset_ = convert(tf.EagerTensor, input_dataset_) + thread_pool_ = convert(tf.EagerTensor, thread_pool_) tf.add_input(desc, input_dataset_) tf.add_input(desc, thread_pool_) if output_types !== nothing @@ -20522,8 +20522,8 @@ begin end function experimental_directed_interleave_dataset_eager(selector_input_dataset_, data_input_datasets_; name=nothing, output_types=nothing, output_shapes=nothing, N=nothing) desc = tf.EagerOp("ExperimentalDirectedInterleaveDataset") - selector_input_dataset_ = convert(tf.TensorHandle, selector_input_dataset_) - data_input_datasets_ = convert(tf.TensorHandle, data_input_datasets_) + selector_input_dataset_ = convert(tf.EagerTensor, selector_input_dataset_) + data_input_datasets_ = convert(tf.EagerTensor, data_input_datasets_) tf.add_input(desc, selector_input_dataset_) tf.add_input(desc, data_input_datasets_) if output_types !== nothing @@ -20578,10 +20578,10 @@ begin end function sparse_segment_sqrt_n_grad_eager(grad_, indices_, segment_ids_, output_dim0_; name=nothing) desc = tf.EagerOp("SparseSegmentSqrtNGrad") - grad_ = convert(tf.TensorHandle, grad_) - indices_ = convert(tf.TensorHandle, indices_) - segment_ids_ = convert(tf.TensorHandle, segment_ids_) - output_dim0_ = convert(tf.TensorHandle, output_dim0_) + grad_ = convert(tf.EagerTensor, grad_) + indices_ = convert(tf.EagerTensor, indices_) + segment_ids_ = convert(tf.EagerTensor, segment_ids_) + output_dim0_ = convert(tf.EagerTensor, output_dim0_) tf.add_input(desc, grad_) tf.add_input(desc, indices_) tf.add_input(desc, segment_ids_) @@ -20623,7 +20623,7 @@ begin end function real_eager(input_; name=nothing) desc = tf.EagerOp("Real") - input_ = convert(tf.TensorHandle, input_) + input_ = convert(tf.EagerTensor, input_) tf.add_input(desc, input_) desc["T"] = tf.data_type(input_) res = tf.execute(desc) @@ -20677,8 +20677,8 @@ begin end function ordered_map_unstage_eager(key_, indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) desc = tf.EagerOp("OrderedMapUnstage") - key_ = convert(tf.TensorHandle, key_) - indices_ = convert(tf.TensorHandle, indices_) + key_ = convert(tf.EagerTensor, key_) + indices_ = convert(tf.EagerTensor, indices_) tf.add_input(desc, key_) tf.add_input(desc, indices_) if capacity !== nothing @@ -20732,8 +20732,8 @@ begin end function rfft2d_eager(input_, fft_length_; name=nothing) desc = tf.EagerOp("RFFT2D") - input_ = convert(tf.TensorHandle, input_) - fft_length_ = convert(tf.TensorHandle, fft_length_) + input_ = convert(tf.EagerTensor, input_) + fft_length_ = convert(tf.EagerTensor, fft_length_) tf.add_input(desc, input_) tf.add_input(desc, fft_length_) res = tf.execute(desc) @@ -20770,7 +20770,7 @@ begin end function var_is_initialized_op_eager(resource_; name=nothing) desc = tf.EagerOp("VarIsInitializedOp") - resource_ = convert(tf.TensorHandle, resource_) + resource_ = convert(tf.EagerTensor, resource_) tf.add_input(desc, resource_) res = tf.execute(desc) node = tf.TapeNode(var_is_initialized_op, [resource_], name=nothing, res) @@ -20853,8 +20853,8 @@ begin end function atan2_eager(y_, x_; name=nothing) desc = tf.EagerOp("Atan2") - y_ = convert(tf.TensorHandle, y_) - x_ = convert(tf.TensorHandle, x_) + y_ = convert(tf.EagerTensor, y_) + x_ = convert(tf.EagerTensor, x_) tf.add_input(desc, y_) tf.add_input(desc, x_) desc["T"] = tf.data_type(y_) @@ -20909,8 +20909,8 @@ begin end function random_poisson_eager(shape_, rate_; name=nothing, seed=nothing, seed2=nothing, S=nothing, dtype=nothing) desc = tf.EagerOp("RandomPoisson") - shape_ = convert(tf.TensorHandle, shape_) - rate_ = convert(tf.TensorHandle, rate_) + shape_ = convert(tf.EagerTensor, shape_) + rate_ = convert(tf.EagerTensor, rate_) tf.add_input(desc, shape_) tf.add_input(desc, rate_) if seed !== nothing @@ -20971,8 +20971,8 @@ begin end function reverse_sequence_eager(input_, seq_lengths_; name=nothing, seq_dim=nothing, batch_dim=nothing) desc = tf.EagerOp("ReverseSequence") - input_ = convert(tf.TensorHandle, input_) - seq_lengths_ = convert(tf.TensorHandle, seq_lengths_) + input_ = convert(tf.EagerTensor, input_) + seq_lengths_ = convert(tf.EagerTensor, seq_lengths_) tf.add_input(desc, input_) tf.add_input(desc, seq_lengths_) if seq_dim !== nothing @@ -21021,7 +21021,7 @@ begin end function outfeed_enqueue_eager(input_; name=nothing, dtype=nothing) desc = tf.EagerOp("OutfeedEnqueue") - input_ = convert(tf.TensorHandle, input_) + input_ = convert(tf.EagerTensor, input_) tf.add_input(desc, input_) if dtype !== nothing desc["dtype"] = Base.identity(dtype) @@ -21064,8 +21064,8 @@ begin end function sub_eager(x_, y_; name=nothing) desc = tf.EagerOp("Sub") - x_ = convert(tf.TensorHandle, x_) - y_ = convert(tf.TensorHandle, y_) + x_ = convert(tf.EagerTensor, x_) + y_ = convert(tf.EagerTensor, y_) tf.add_input(desc, x_) tf.add_input(desc, y_) desc["T"] = tf.data_type(x_) @@ -21114,8 +21114,8 @@ begin end function string_split_eager(input_, delimiter_; name=nothing, skip_empty=nothing) desc = tf.EagerOp("StringSplit") - input_ = convert(tf.TensorHandle, input_) - delimiter_ = convert(tf.TensorHandle, delimiter_) + input_ = convert(tf.EagerTensor, input_) + delimiter_ = convert(tf.EagerTensor, delimiter_) tf.add_input(desc, input_) tf.add_input(desc, delimiter_) if skip_empty !== nothing @@ -21166,8 +21166,8 @@ begin end function cumprod_eager(x_, axis_; name=nothing, exclusive=nothing, reverse=nothing) desc = tf.EagerOp("Cumprod") - x_ = convert(tf.TensorHandle, x_) - axis_ = convert(tf.TensorHandle, axis_) + x_ = convert(tf.EagerTensor, x_) + axis_ = convert(tf.EagerTensor, axis_) tf.add_input(desc, x_) tf.add_input(desc, axis_) if exclusive !== nothing @@ -21227,10 +21227,10 @@ begin end function quantized_resize_bilinear_eager(images_, size_, min_, max_; name=nothing, align_corners=nothing) desc = tf.EagerOp("QuantizedResizeBilinear") - images_ = convert(tf.TensorHandle, images_) - size_ = convert(tf.TensorHandle, size_) - min_ = convert(tf.TensorHandle, min_) - max_ = convert(tf.TensorHandle, max_) + images_ = convert(tf.EagerTensor, images_) + size_ = convert(tf.EagerTensor, size_) + min_ = convert(tf.EagerTensor, min_) + max_ = convert(tf.EagerTensor, max_) tf.add_input(desc, images_) tf.add_input(desc, size_) tf.add_input(desc, min_) @@ -21298,8 +21298,8 @@ begin end function parse_single_example_eager(serialized_, dense_defaults_; name=nothing, num_sparse=nothing, sparse_keys=nothing, dense_keys=nothing, sparse_types=nothing, Tdense=nothing, dense_shapes=nothing) desc = tf.EagerOp("ParseSingleExample") - serialized_ = convert(tf.TensorHandle, serialized_) - dense_defaults_ = convert(tf.TensorHandle, dense_defaults_) + serialized_ = convert(tf.EagerTensor, serialized_) + dense_defaults_ = convert(tf.EagerTensor, dense_defaults_) tf.add_input(desc, serialized_) tf.add_input(desc, dense_defaults_) if num_sparse !== nothing @@ -21358,7 +21358,7 @@ begin end function is_variable_initialized_eager(ref_; name=nothing, dtype=nothing) desc = tf.EagerOp("IsVariableInitialized") - ref_ = convert(tf.TensorHandle, ref_) + ref_ = convert(tf.EagerTensor, ref_) tf.add_input(desc, ref_) if dtype !== nothing desc["dtype"] = Base.identity(dtype) @@ -21458,9 +21458,9 @@ begin end function tensor_list_concat_v2_eager(input_handle_, element_shape_, leading_dims_; name=nothing, element_dtype=nothing, shape_type=nothing) desc = tf.EagerOp("TensorListConcatV2") - input_handle_ = convert(tf.TensorHandle, input_handle_) - element_shape_ = convert(tf.TensorHandle, element_shape_) - leading_dims_ = convert(tf.TensorHandle, leading_dims_) + input_handle_ = convert(tf.EagerTensor, input_handle_) + element_shape_ = convert(tf.EagerTensor, element_shape_) + leading_dims_ = convert(tf.EagerTensor, leading_dims_) tf.add_input(desc, input_handle_) tf.add_input(desc, element_shape_) tf.add_input(desc, leading_dims_) @@ -21538,10 +21538,10 @@ begin end function cudnn_rnnv2_eager(input_, input_h_, input_c_, params_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing, is_training=nothing) desc = tf.EagerOp("CudnnRNNV2") - input_ = convert(tf.TensorHandle, input_) - input_h_ = convert(tf.TensorHandle, input_h_) - input_c_ = convert(tf.TensorHandle, input_c_) - params_ = convert(tf.TensorHandle, params_) + input_ = convert(tf.EagerTensor, input_) + input_h_ = convert(tf.EagerTensor, input_h_) + input_c_ = convert(tf.EagerTensor, input_c_) + params_ = convert(tf.EagerTensor, params_) tf.add_input(desc, input_) tf.add_input(desc, input_h_) tf.add_input(desc, input_c_) @@ -21615,9 +21615,9 @@ begin end function resource_scatter_sub_eager(resource_, indices_, updates_; name=nothing, dtype=nothing) desc = tf.EagerOp("ResourceScatterSub") - resource_ = convert(tf.TensorHandle, resource_) - indices_ = convert(tf.TensorHandle, indices_) - updates_ = convert(tf.TensorHandle, updates_) + resource_ = convert(tf.EagerTensor, resource_) + indices_ = convert(tf.EagerTensor, indices_) + updates_ = convert(tf.EagerTensor, updates_) tf.add_input(desc, resource_) tf.add_input(desc, indices_) tf.add_input(desc, updates_) @@ -21666,8 +21666,8 @@ begin end function assign_add_eager(ref_, value_; name=nothing, use_locking=nothing) desc = tf.EagerOp("AssignAdd") - ref_ = convert(tf.TensorHandle, ref_) - value_ = convert(tf.TensorHandle, value_) + ref_ = convert(tf.EagerTensor, ref_) + value_ = convert(tf.EagerTensor, value_) tf.add_input(desc, ref_) tf.add_input(desc, value_) if use_locking !== nothing @@ -21715,7 +21715,7 @@ begin end function tensor_dataset_eager(components_; name=nothing, Toutput_types=nothing, output_shapes=nothing) desc = tf.EagerOp("TensorDataset") - components_ = convert(tf.TensorHandle, components_) + components_ = convert(tf.EagerTensor, components_) tf.add_input(desc, components_) if Toutput_types !== nothing desc["Toutput_types"] = map(Base.identity, Toutput_types) @@ -21761,7 +21761,7 @@ begin end function bucketize_eager(input_; name=nothing, boundaries=nothing) desc = tf.EagerOp("Bucketize") - input_ = convert(tf.TensorHandle, input_) + input_ = convert(tf.EagerTensor, input_) tf.add_input(desc, input_) if boundaries !== nothing desc["boundaries"] = map(Base.identity, boundaries) @@ -21811,10 +21811,10 @@ begin end function sparse_reduce_max_eager(input_indices_, input_values_, input_shape_, reduction_axes_; name=nothing, keep_dims=nothing) desc = tf.EagerOp("SparseReduceMax") - input_indices_ = convert(tf.TensorHandle, input_indices_) - input_values_ = convert(tf.TensorHandle, input_values_) - input_shape_ = convert(tf.TensorHandle, input_shape_) - reduction_axes_ = convert(tf.TensorHandle, reduction_axes_) + input_indices_ = convert(tf.EagerTensor, input_indices_) + input_values_ = convert(tf.EagerTensor, input_values_) + input_shape_ = convert(tf.EagerTensor, input_shape_) + reduction_axes_ = convert(tf.EagerTensor, reduction_axes_) tf.add_input(desc, input_indices_) tf.add_input(desc, input_values_) tf.add_input(desc, input_shape_) @@ -21930,9 +21930,9 @@ begin end function tensor_array_grad_with_shape_eager(handle_, flow_in_, shape_to_prepend_; name=nothing, source=nothing) desc = tf.EagerOp("TensorArrayGradWithShape") - handle_ = convert(tf.TensorHandle, handle_) - flow_in_ = convert(tf.TensorHandle, flow_in_) - shape_to_prepend_ = convert(tf.TensorHandle, shape_to_prepend_) + handle_ = convert(tf.EagerTensor, handle_) + flow_in_ = convert(tf.EagerTensor, flow_in_) + shape_to_prepend_ = convert(tf.EagerTensor, shape_to_prepend_) tf.add_input(desc, handle_) tf.add_input(desc, flow_in_) tf.add_input(desc, shape_to_prepend_) @@ -21973,7 +21973,7 @@ begin end function tensor_array_close_v3_eager(handle_; name=nothing) desc = tf.EagerOp("TensorArrayCloseV3") - handle_ = convert(tf.TensorHandle, handle_) + handle_ = convert(tf.EagerTensor, handle_) tf.add_input(desc, handle_) res = tf.execute(desc) node = tf.TapeNode(tensor_array_close_v3, [handle_], name=nothing, res) @@ -22017,11 +22017,11 @@ begin end function non_max_suppression_with_overlaps_eager(overlaps_, scores_, max_output_size_, overlap_threshold_, score_threshold_; name=nothing) desc = tf.EagerOp("NonMaxSuppressionWithOverlaps") - overlaps_ = convert(tf.TensorHandle, overlaps_) - scores_ = convert(tf.TensorHandle, scores_) - max_output_size_ = convert(tf.TensorHandle, max_output_size_) - overlap_threshold_ = convert(tf.TensorHandle, overlap_threshold_) - score_threshold_ = convert(tf.TensorHandle, score_threshold_) + overlaps_ = convert(tf.EagerTensor, overlaps_) + scores_ = convert(tf.EagerTensor, scores_) + max_output_size_ = convert(tf.EagerTensor, max_output_size_) + overlap_threshold_ = convert(tf.EagerTensor, overlap_threshold_) + score_threshold_ = convert(tf.EagerTensor, score_threshold_) tf.add_input(desc, overlaps_) tf.add_input(desc, scores_) tf.add_input(desc, max_output_size_) @@ -22071,7 +22071,7 @@ begin end function pack_eager(values_; name=nothing, N=nothing, axis=nothing) desc = tf.EagerOp("Pack") - values_ = convert(tf.TensorHandle, values_) + values_ = convert(tf.EagerTensor, values_) tf.add_input(desc, values_) if N !== nothing desc["N"] = Base.Int(N) @@ -22122,8 +22122,8 @@ begin end function tensor_array_grad_v2_eager(handle_, flow_in_; name=nothing, source=nothing) desc = tf.EagerOp("TensorArrayGradV2") - handle_ = convert(tf.TensorHandle, handle_) - flow_in_ = convert(tf.TensorHandle, flow_in_) + handle_ = convert(tf.EagerTensor, handle_) + flow_in_ = convert(tf.EagerTensor, flow_in_) tf.add_input(desc, handle_) tf.add_input(desc, flow_in_) if source !== nothing @@ -22169,8 +22169,8 @@ begin end function assign_sub_variable_op_eager(resource_, value_; name=nothing, dtype=nothing) desc = tf.EagerOp("AssignSubVariableOp") - resource_ = convert(tf.TensorHandle, resource_) - value_ = convert(tf.TensorHandle, value_) + resource_ = convert(tf.EagerTensor, resource_) + value_ = convert(tf.EagerTensor, value_) tf.add_input(desc, resource_) tf.add_input(desc, value_) if dtype !== nothing @@ -22211,7 +22211,7 @@ begin end function batch_fft2d_eager(input_; name=nothing) desc = tf.EagerOp("BatchFFT2D") - input_ = convert(tf.TensorHandle, input_) + input_ = convert(tf.EagerTensor, input_) tf.add_input(desc, input_) res = tf.execute(desc) node = tf.TapeNode(batch_fft2d, [input_], name=nothing, res) @@ -22247,7 +22247,7 @@ begin end function close_summary_writer_eager(writer_; name=nothing) desc = tf.EagerOp("CloseSummaryWriter") - writer_ = convert(tf.TensorHandle, writer_) + writer_ = convert(tf.EagerTensor, writer_) tf.add_input(desc, writer_) res = tf.execute(desc) node = tf.TapeNode(close_summary_writer, [writer_], name=nothing, res) @@ -22284,7 +22284,7 @@ begin end function rank_eager(input_; name=nothing) desc = tf.EagerOp("Rank") - input_ = convert(tf.TensorHandle, input_) + input_ = convert(tf.EagerTensor, input_) tf.add_input(desc, input_) desc["T"] = tf.data_type(input_) res = tf.execute(desc) @@ -22322,7 +22322,7 @@ begin end function fft3d_eager(input_; name=nothing) desc = tf.EagerOp("FFT3D") - input_ = convert(tf.TensorHandle, input_) + input_ = convert(tf.EagerTensor, input_) tf.add_input(desc, input_) desc["Tcomplex"] = tf.data_type(input_) res = tf.execute(desc) @@ -22377,14 +22377,14 @@ begin end function apply_ftrl_eager(var_, accum_, linear_, grad_, lr_, l1_, l2_, lr_power_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ApplyFtrl") - var_ = convert(tf.TensorHandle, var_) - accum_ = convert(tf.TensorHandle, accum_) - linear_ = convert(tf.TensorHandle, linear_) - grad_ = convert(tf.TensorHandle, grad_) - lr_ = convert(tf.TensorHandle, lr_) - l1_ = convert(tf.TensorHandle, l1_) - l2_ = convert(tf.TensorHandle, l2_) - lr_power_ = convert(tf.TensorHandle, lr_power_) + var_ = convert(tf.EagerTensor, var_) + accum_ = convert(tf.EagerTensor, accum_) + linear_ = convert(tf.EagerTensor, linear_) + grad_ = convert(tf.EagerTensor, grad_) + lr_ = convert(tf.EagerTensor, lr_) + l1_ = convert(tf.EagerTensor, l1_) + l2_ = convert(tf.EagerTensor, l2_) + lr_power_ = convert(tf.EagerTensor, lr_power_) tf.add_input(desc, var_) tf.add_input(desc, accum_) tf.add_input(desc, linear_) @@ -22491,7 +22491,7 @@ begin end function audio_spectrogram_eager(input_; name=nothing, window_size=nothing, stride=nothing, magnitude_squared=nothing) desc = tf.EagerOp("AudioSpectrogram") - input_ = convert(tf.TensorHandle, input_) + input_ = convert(tf.EagerTensor, input_) tf.add_input(desc, input_) if window_size !== nothing desc["window_size"] = Base.Int(window_size) @@ -22539,7 +22539,7 @@ begin end function variable_shape_eager(input_; name=nothing, out_type=nothing) desc = tf.EagerOp("VariableShape") - input_ = convert(tf.TensorHandle, input_) + input_ = convert(tf.EagerTensor, input_) tf.add_input(desc, input_) if out_type !== nothing desc["out_type"] = Base.identity(out_type) @@ -22698,8 +22698,8 @@ begin end function tensor_forest_create_tree_variable_eager(tree_handle_, tree_config_; name=nothing) desc = tf.EagerOp("TensorForestCreateTreeVariable") - tree_handle_ = convert(tf.TensorHandle, tree_handle_) - tree_config_ = convert(tf.TensorHandle, tree_config_) + tree_handle_ = convert(tf.EagerTensor, tree_handle_) + tree_config_ = convert(tf.EagerTensor, tree_config_) tf.add_input(desc, tree_handle_) tf.add_input(desc, tree_config_) res = tf.execute(desc) @@ -22751,9 +22751,9 @@ begin end function max_pool_grad_with_argmax_eager(input_, grad_, argmax_; name=nothing, ksize=nothing, strides=nothing, padding=nothing) desc = tf.EagerOp("MaxPoolGradWithArgmax") - input_ = convert(tf.TensorHandle, input_) - grad_ = convert(tf.TensorHandle, grad_) - argmax_ = convert(tf.TensorHandle, argmax_) + input_ = convert(tf.EagerTensor, input_) + grad_ = convert(tf.EagerTensor, grad_) + argmax_ = convert(tf.EagerTensor, argmax_) tf.add_input(desc, input_) tf.add_input(desc, grad_) tf.add_input(desc, argmax_) @@ -22811,8 +22811,8 @@ begin end function ref_switch_eager(data_, pred_; name=nothing) desc = tf.EagerOp("RefSwitch") - data_ = convert(tf.TensorHandle, data_) - pred_ = convert(tf.TensorHandle, pred_) + data_ = convert(tf.EagerTensor, data_) + pred_ = convert(tf.EagerTensor, pred_) tf.add_input(desc, data_) tf.add_input(desc, pred_) desc["T"] = tf.data_type(data_) @@ -22850,7 +22850,7 @@ begin end function sdca_fprint_eager(input_; name=nothing) desc = tf.EagerOp("SdcaFprint") - input_ = convert(tf.TensorHandle, input_) + input_ = convert(tf.EagerTensor, input_) tf.add_input(desc, input_) res = tf.execute(desc) node = tf.TapeNode(sdca_fprint, [input_], name=nothing, res) @@ -22898,7 +22898,7 @@ begin end function experimental_choose_fastest_dataset_eager(input_datasets_; name=nothing, N=nothing, num_experiments=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("ExperimentalChooseFastestDataset") - input_datasets_ = convert(tf.TensorHandle, input_datasets_) + input_datasets_ = convert(tf.EagerTensor, input_datasets_) tf.add_input(desc, input_datasets_) if N !== nothing desc["N"] = Base.Int(N) @@ -22950,7 +22950,7 @@ begin end function leaky_relu_eager(features_; name=nothing, alpha=nothing) desc = tf.EagerOp("LeakyRelu") - features_ = convert(tf.TensorHandle, features_) + features_ = convert(tf.EagerTensor, features_) tf.add_input(desc, features_) if alpha !== nothing desc["alpha"] = Base.identity(alpha) @@ -22993,7 +22993,7 @@ begin end function identity_n_eager(input_; name=nothing, T=nothing) desc = tf.EagerOp("IdentityN") - input_ = convert(tf.TensorHandle, input_) + input_ = convert(tf.EagerTensor, input_) tf.add_input(desc, input_) if T !== nothing desc["T"] = map(Base.identity, T) @@ -23078,18 +23078,18 @@ begin end function cudnn_rnn_backprop_v2_eager(input_, input_h_, input_c_, params_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_, host_reserved_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) desc = tf.EagerOp("CudnnRNNBackpropV2") - input_ = convert(tf.TensorHandle, input_) - input_h_ = convert(tf.TensorHandle, input_h_) - input_c_ = convert(tf.TensorHandle, input_c_) - params_ = convert(tf.TensorHandle, params_) - output_ = convert(tf.TensorHandle, output_) - output_h_ = convert(tf.TensorHandle, output_h_) - output_c_ = convert(tf.TensorHandle, output_c_) - output_backprop_ = convert(tf.TensorHandle, output_backprop_) - output_h_backprop_ = convert(tf.TensorHandle, output_h_backprop_) - output_c_backprop_ = convert(tf.TensorHandle, output_c_backprop_) - reserve_space_ = convert(tf.TensorHandle, reserve_space_) - host_reserved_ = convert(tf.TensorHandle, host_reserved_) + input_ = convert(tf.EagerTensor, input_) + input_h_ = convert(tf.EagerTensor, input_h_) + input_c_ = convert(tf.EagerTensor, input_c_) + params_ = convert(tf.EagerTensor, params_) + output_ = convert(tf.EagerTensor, output_) + output_h_ = convert(tf.EagerTensor, output_h_) + output_c_ = convert(tf.EagerTensor, output_c_) + output_backprop_ = convert(tf.EagerTensor, output_backprop_) + output_h_backprop_ = convert(tf.EagerTensor, output_h_backprop_) + output_c_backprop_ = convert(tf.EagerTensor, output_c_backprop_) + reserve_space_ = convert(tf.EagerTensor, reserve_space_) + host_reserved_ = convert(tf.EagerTensor, host_reserved_) tf.add_input(desc, input_) tf.add_input(desc, input_h_) tf.add_input(desc, input_c_) @@ -23175,9 +23175,9 @@ begin end function requantization_range_eager(input_, input_min_, input_max_; name=nothing) desc = tf.EagerOp("RequantizationRange") - input_ = convert(tf.TensorHandle, input_) - input_min_ = convert(tf.TensorHandle, input_min_) - input_max_ = convert(tf.TensorHandle, input_max_) + input_ = convert(tf.EagerTensor, input_) + input_min_ = convert(tf.EagerTensor, input_min_) + input_max_ = convert(tf.EagerTensor, input_max_) tf.add_input(desc, input_) tf.add_input(desc, input_min_) tf.add_input(desc, input_max_) @@ -23219,8 +23219,8 @@ begin end function maximum_eager(x_, y_; name=nothing) desc = tf.EagerOp("Maximum") - x_ = convert(tf.TensorHandle, x_) - y_ = convert(tf.TensorHandle, y_) + x_ = convert(tf.EagerTensor, x_) + y_ = convert(tf.EagerTensor, y_) tf.add_input(desc, x_) tf.add_input(desc, y_) desc["T"] = tf.data_type(x_) @@ -23263,8 +23263,8 @@ begin end function reshape_eager(tensor_, shape_; name=nothing) desc = tf.EagerOp("Reshape") - tensor_ = convert(tf.TensorHandle, tensor_) - shape_ = convert(tf.TensorHandle, shape_) + tensor_ = convert(tf.EagerTensor, tensor_) + shape_ = convert(tf.EagerTensor, shape_) tf.add_input(desc, tensor_) tf.add_input(desc, shape_) desc["T"] = tf.data_type(tensor_) @@ -23311,9 +23311,9 @@ begin end function matrix_solve_ls_eager(matrix_, rhs_, l2_regularizer_; name=nothing, fast=nothing) desc = tf.EagerOp("MatrixSolveLs") - matrix_ = convert(tf.TensorHandle, matrix_) - rhs_ = convert(tf.TensorHandle, rhs_) - l2_regularizer_ = convert(tf.TensorHandle, l2_regularizer_) + matrix_ = convert(tf.EagerTensor, matrix_) + rhs_ = convert(tf.EagerTensor, rhs_) + l2_regularizer_ = convert(tf.EagerTensor, l2_regularizer_) tf.add_input(desc, matrix_) tf.add_input(desc, rhs_) tf.add_input(desc, l2_regularizer_) @@ -23360,9 +23360,9 @@ begin end function tf_record_dataset_eager(filenames_, compression_type_, buffer_size_; name=nothing) desc = tf.EagerOp("TFRecordDataset") - filenames_ = convert(tf.TensorHandle, filenames_) - compression_type_ = convert(tf.TensorHandle, compression_type_) - buffer_size_ = convert(tf.TensorHandle, buffer_size_) + filenames_ = convert(tf.EagerTensor, filenames_) + compression_type_ = convert(tf.EagerTensor, compression_type_) + buffer_size_ = convert(tf.EagerTensor, buffer_size_) tf.add_input(desc, filenames_) tf.add_input(desc, compression_type_) tf.add_input(desc, buffer_size_) @@ -23408,8 +23408,8 @@ begin end function boosted_trees_example_debug_outputs_eager(tree_ensemble_handle_, bucketized_features_; name=nothing, num_bucketized_features=nothing, logits_dimension=nothing) desc = tf.EagerOp("BoostedTreesExampleDebugOutputs") - tree_ensemble_handle_ = convert(tf.TensorHandle, tree_ensemble_handle_) - bucketized_features_ = convert(tf.TensorHandle, bucketized_features_) + tree_ensemble_handle_ = convert(tf.EagerTensor, tree_ensemble_handle_) + bucketized_features_ = convert(tf.EagerTensor, bucketized_features_) tf.add_input(desc, tree_ensemble_handle_) tf.add_input(desc, bucketized_features_) if num_bucketized_features !== nothing @@ -23453,7 +23453,7 @@ begin end function hsv_to_rgb_eager(images_; name=nothing) desc = tf.EagerOp("HSVToRGB") - images_ = convert(tf.TensorHandle, images_) + images_ = convert(tf.EagerTensor, images_) tf.add_input(desc, images_) desc["T"] = tf.data_type(images_) res = tf.execute(desc) @@ -23498,8 +23498,8 @@ begin end function experimental_max_intra_op_parallelism_dataset_eager(input_dataset_, max_intra_op_parallelism_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("ExperimentalMaxIntraOpParallelismDataset") - input_dataset_ = convert(tf.TensorHandle, input_dataset_) - max_intra_op_parallelism_ = convert(tf.TensorHandle, max_intra_op_parallelism_) + input_dataset_ = convert(tf.EagerTensor, input_dataset_) + max_intra_op_parallelism_ = convert(tf.EagerTensor, max_intra_op_parallelism_) tf.add_input(desc, input_dataset_) tf.add_input(desc, max_intra_op_parallelism_) if output_types !== nothing @@ -23552,9 +23552,9 @@ begin end function scatter_div_eager(ref_, indices_, updates_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ScatterDiv") - ref_ = convert(tf.TensorHandle, ref_) - indices_ = convert(tf.TensorHandle, indices_) - updates_ = convert(tf.TensorHandle, updates_) + ref_ = convert(tf.EagerTensor, ref_) + indices_ = convert(tf.EagerTensor, indices_) + updates_ = convert(tf.EagerTensor, updates_) tf.add_input(desc, ref_) tf.add_input(desc, indices_) tf.add_input(desc, updates_) @@ -23609,7 +23609,7 @@ begin end function decode_wav_eager(contents_; name=nothing, desired_channels=nothing, desired_samples=nothing) desc = tf.EagerOp("DecodeWav") - contents_ = convert(tf.TensorHandle, contents_) + contents_ = convert(tf.EagerTensor, contents_) tf.add_input(desc, contents_) if desired_channels !== nothing desc["desired_channels"] = Base.Int(desired_channels) @@ -23652,7 +23652,7 @@ begin end function log_eager(x_; name=nothing) desc = tf.EagerOp("Log") - x_ = convert(tf.TensorHandle, x_) + x_ = convert(tf.EagerTensor, x_) tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) res = tf.execute(desc) @@ -23698,10 +23698,10 @@ begin end function save_v2_eager(prefix_, tensor_names_, shape_and_slices_, tensors_; name=nothing, dtypes=nothing) desc = tf.EagerOp("SaveV2") - prefix_ = convert(tf.TensorHandle, prefix_) - tensor_names_ = convert(tf.TensorHandle, tensor_names_) - shape_and_slices_ = convert(tf.TensorHandle, shape_and_slices_) - tensors_ = convert(tf.TensorHandle, tensors_) + prefix_ = convert(tf.EagerTensor, prefix_) + tensor_names_ = convert(tf.EagerTensor, tensor_names_) + shape_and_slices_ = convert(tf.EagerTensor, shape_and_slices_) + tensors_ = convert(tf.EagerTensor, tensors_) tf.add_input(desc, prefix_) tf.add_input(desc, tensor_names_) tf.add_input(desc, shape_and_slices_) @@ -23744,7 +23744,7 @@ begin end function deep_copy_eager(x_; name=nothing) desc = tf.EagerOp("DeepCopy") - x_ = convert(tf.TensorHandle, x_) + x_ = convert(tf.EagerTensor, x_) tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) res = tf.execute(desc) @@ -23787,7 +23787,7 @@ begin end function model_dataset_eager(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("ModelDataset") - input_dataset_ = convert(tf.TensorHandle, input_dataset_) + input_dataset_ = convert(tf.EagerTensor, input_dataset_) tf.add_input(desc, input_dataset_) if output_types !== nothing desc["output_types"] = map(Base.identity, output_types) @@ -23883,9 +23883,9 @@ begin end function parse_sequence_example_eager(serialized_, debug_name_, context_dense_defaults_; name=nothing, feature_list_dense_missing_assumed_empty=nothing, context_sparse_keys=nothing, context_dense_keys=nothing, feature_list_sparse_keys=nothing, feature_list_dense_keys=nothing, Ncontext_sparse=nothing, Ncontext_dense=nothing, Nfeature_list_sparse=nothing, Nfeature_list_dense=nothing, context_sparse_types=nothing, Tcontext_dense=nothing, feature_list_dense_types=nothing, context_dense_shapes=nothing, feature_list_sparse_types=nothing, feature_list_dense_shapes=nothing) desc = tf.EagerOp("ParseSequenceExample") - serialized_ = convert(tf.TensorHandle, serialized_) - debug_name_ = convert(tf.TensorHandle, debug_name_) - context_dense_defaults_ = convert(tf.TensorHandle, context_dense_defaults_) + serialized_ = convert(tf.EagerTensor, serialized_) + debug_name_ = convert(tf.EagerTensor, debug_name_) + context_dense_defaults_ = convert(tf.EagerTensor, context_dense_defaults_) tf.add_input(desc, serialized_) tf.add_input(desc, debug_name_) tf.add_input(desc, context_dense_defaults_) @@ -23969,7 +23969,7 @@ begin end function sinh_eager(x_; name=nothing) desc = tf.EagerOp("Sinh") - x_ = convert(tf.TensorHandle, x_) + x_ = convert(tf.EagerTensor, x_) tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) res = tf.execute(desc) @@ -24069,10 +24069,10 @@ begin end function tensor_array_write_v2_eager(handle_, index_, value_, flow_in_; name=nothing) desc = tf.EagerOp("TensorArrayWriteV2") - handle_ = convert(tf.TensorHandle, handle_) - index_ = convert(tf.TensorHandle, index_) - value_ = convert(tf.TensorHandle, value_) - flow_in_ = convert(tf.TensorHandle, flow_in_) + handle_ = convert(tf.EagerTensor, handle_) + index_ = convert(tf.EagerTensor, index_) + value_ = convert(tf.EagerTensor, value_) + flow_in_ = convert(tf.EagerTensor, flow_in_) tf.add_input(desc, handle_) tf.add_input(desc, index_) tf.add_input(desc, value_) @@ -24115,7 +24115,7 @@ begin end function tensor_list_element_shape_eager(input_handle_; name=nothing, shape_type=nothing) desc = tf.EagerOp("TensorListElementShape") - input_handle_ = convert(tf.TensorHandle, input_handle_) + input_handle_ = convert(tf.EagerTensor, input_handle_) tf.add_input(desc, input_handle_) if shape_type !== nothing desc["shape_type"] = Base.identity(shape_type) @@ -24154,7 +24154,7 @@ begin end function queue_size_v2_eager(handle_; name=nothing) desc = tf.EagerOp("QueueSizeV2") - handle_ = convert(tf.TensorHandle, handle_) + handle_ = convert(tf.EagerTensor, handle_) tf.add_input(desc, handle_) res = tf.execute(desc) node = tf.TapeNode(queue_size_v2, [handle_], name=nothing, res) @@ -24191,7 +24191,7 @@ begin end function expm1_eager(x_; name=nothing) desc = tf.EagerOp("Expm1") - x_ = convert(tf.TensorHandle, x_) + x_ = convert(tf.EagerTensor, x_) tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) res = tf.execute(desc) @@ -24233,9 +24233,9 @@ begin end function batch_matrix_band_part_eager(input_, num_lower_, num_upper_; name=nothing) desc = tf.EagerOp("BatchMatrixBandPart") - input_ = convert(tf.TensorHandle, input_) - num_lower_ = convert(tf.TensorHandle, num_lower_) - num_upper_ = convert(tf.TensorHandle, num_upper_) + input_ = convert(tf.EagerTensor, input_) + num_lower_ = convert(tf.EagerTensor, num_lower_) + num_upper_ = convert(tf.EagerTensor, num_upper_) tf.add_input(desc, input_) tf.add_input(desc, num_lower_) tf.add_input(desc, num_upper_) @@ -24282,8 +24282,8 @@ begin end function concatenate_dataset_eager(input_dataset_, another_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("ConcatenateDataset") - input_dataset_ = convert(tf.TensorHandle, input_dataset_) - another_dataset_ = convert(tf.TensorHandle, another_dataset_) + input_dataset_ = convert(tf.EagerTensor, input_dataset_) + another_dataset_ = convert(tf.EagerTensor, another_dataset_) tf.add_input(desc, input_dataset_) tf.add_input(desc, another_dataset_) if output_types !== nothing @@ -24326,7 +24326,7 @@ begin end function decode_gif_eager(contents_; name=nothing) desc = tf.EagerOp("DecodeGif") - contents_ = convert(tf.TensorHandle, contents_) + contents_ = convert(tf.EagerTensor, contents_) tf.add_input(desc, contents_) res = tf.execute(desc) node = tf.TapeNode(decode_gif, [contents_], name=nothing, res) @@ -24410,10 +24410,10 @@ begin end function tpu_replicate_eager(inputs_, broadcast_inputs_, variables_, guaranteed_constants_; name=nothing, computation=nothing, num_replicas=nothing, num_cores_per_replica=nothing, topology=nothing, use_tpu=nothing, device_assignment=nothing, host_compute_core=nothing, Tinputs=nothing, Tbroadcast_inputs=nothing, NumVariables=nothing, Tguaranteed_constants=nothing, output_types=nothing, padding_map=nothing, step_marker_location=nothing) desc = tf.EagerOp("TPUReplicate") - inputs_ = convert(tf.TensorHandle, inputs_) - broadcast_inputs_ = convert(tf.TensorHandle, broadcast_inputs_) - variables_ = convert(tf.TensorHandle, variables_) - guaranteed_constants_ = convert(tf.TensorHandle, guaranteed_constants_) + inputs_ = convert(tf.EagerTensor, inputs_) + broadcast_inputs_ = convert(tf.EagerTensor, broadcast_inputs_) + variables_ = convert(tf.EagerTensor, variables_) + guaranteed_constants_ = convert(tf.EagerTensor, guaranteed_constants_) tf.add_input(desc, inputs_) tf.add_input(desc, broadcast_inputs_) tf.add_input(desc, variables_) @@ -24503,7 +24503,7 @@ begin end function batch_self_adjoint_eig_v2_eager(input_; name=nothing, compute_v=nothing) desc = tf.EagerOp("BatchSelfAdjointEigV2") - input_ = convert(tf.TensorHandle, input_) + input_ = convert(tf.EagerTensor, input_) tf.add_input(desc, input_) if compute_v !== nothing desc["compute_v"] = Base.Bool(compute_v) @@ -24547,7 +24547,7 @@ begin end function shape_eager(input_; name=nothing, out_type=nothing) desc = tf.EagerOp("Shape") - input_ = convert(tf.TensorHandle, input_) + input_ = convert(tf.EagerTensor, input_) tf.add_input(desc, input_) if out_type !== nothing desc["out_type"] = Base.identity(out_type) @@ -24595,8 +24595,8 @@ begin end function repeat_dataset_eager(input_dataset_, count_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("RepeatDataset") - input_dataset_ = convert(tf.TensorHandle, input_dataset_) - count_ = convert(tf.TensorHandle, count_) + input_dataset_ = convert(tf.EagerTensor, input_dataset_) + count_ = convert(tf.EagerTensor, count_) tf.add_input(desc, input_dataset_) tf.add_input(desc, count_) if output_types !== nothing @@ -24649,10 +24649,10 @@ begin end function crop_and_resize_grad_boxes_eager(grads_, image_, boxes_, box_ind_; name=nothing, method=nothing) desc = tf.EagerOp("CropAndResizeGradBoxes") - grads_ = convert(tf.TensorHandle, grads_) - image_ = convert(tf.TensorHandle, image_) - boxes_ = convert(tf.TensorHandle, boxes_) - box_ind_ = convert(tf.TensorHandle, box_ind_) + grads_ = convert(tf.EagerTensor, grads_) + image_ = convert(tf.EagerTensor, image_) + boxes_ = convert(tf.EagerTensor, boxes_) + box_ind_ = convert(tf.EagerTensor, box_ind_) tf.add_input(desc, grads_) tf.add_input(desc, image_) tf.add_input(desc, boxes_) @@ -24698,8 +24698,8 @@ begin end function reciprocal_grad_eager(y_, dy_; name=nothing) desc = tf.EagerOp("ReciprocalGrad") - y_ = convert(tf.TensorHandle, y_) - dy_ = convert(tf.TensorHandle, dy_) + y_ = convert(tf.EagerTensor, y_) + dy_ = convert(tf.EagerTensor, dy_) tf.add_input(desc, y_) tf.add_input(desc, dy_) desc["T"] = tf.data_type(y_) @@ -24744,8 +24744,8 @@ begin end function batch_matrix_solve_eager(matrix_, rhs_; name=nothing, adjoint=nothing) desc = tf.EagerOp("BatchMatrixSolve") - matrix_ = convert(tf.TensorHandle, matrix_) - rhs_ = convert(tf.TensorHandle, rhs_) + matrix_ = convert(tf.EagerTensor, matrix_) + rhs_ = convert(tf.EagerTensor, rhs_) tf.add_input(desc, matrix_) tf.add_input(desc, rhs_) if adjoint !== nothing @@ -24850,7 +24850,7 @@ begin end function exit_eager(data_; name=nothing) desc = tf.EagerOp("Exit") - data_ = convert(tf.TensorHandle, data_) + data_ = convert(tf.EagerTensor, data_) tf.add_input(desc, data_) desc["T"] = tf.data_type(data_) res = tf.execute(desc) @@ -24900,7 +24900,7 @@ begin end function lrn_eager(input_; name=nothing, depth_radius=nothing, bias=nothing, alpha=nothing, beta=nothing) desc = tf.EagerOp("LRN") - input_ = convert(tf.TensorHandle, input_) + input_ = convert(tf.EagerTensor, input_) tf.add_input(desc, input_) if depth_radius !== nothing desc["depth_radius"] = Base.Int(depth_radius) @@ -24964,8 +24964,8 @@ begin end function stateless_if_eager(cond_, input_; name=nothing, Tin=nothing, Tout=nothing, then_branch=nothing, else_branch=nothing) desc = tf.EagerOp("StatelessIf") - cond_ = convert(tf.TensorHandle, cond_) - input_ = convert(tf.TensorHandle, input_) + cond_ = convert(tf.EagerTensor, cond_) + input_ = convert(tf.EagerTensor, input_) tf.add_input(desc, cond_) tf.add_input(desc, input_) if Tin !== nothing @@ -25023,9 +25023,9 @@ begin end function tensor_list_set_item_eager(input_handle_, index_, item_; name=nothing, element_dtype=nothing) desc = tf.EagerOp("TensorListSetItem") - input_handle_ = convert(tf.TensorHandle, input_handle_) - index_ = convert(tf.TensorHandle, index_) - item_ = convert(tf.TensorHandle, item_) + input_handle_ = convert(tf.EagerTensor, input_handle_) + index_ = convert(tf.EagerTensor, index_) + item_ = convert(tf.EagerTensor, item_) tf.add_input(desc, input_handle_) tf.add_input(desc, index_) tf.add_input(desc, item_) @@ -25068,7 +25068,7 @@ begin end function rsqrt_eager(x_; name=nothing) desc = tf.EagerOp("Rsqrt") - x_ = convert(tf.TensorHandle, x_) + x_ = convert(tf.EagerTensor, x_) tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) res = tf.execute(desc) @@ -25148,18 +25148,18 @@ begin end function quantized_conv2d_with_bias_sum_and_relu_and_requantize_eager(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_, summand_, min_summand_, max_summand_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) desc = tf.EagerOp("QuantizedConv2DWithBiasSumAndReluAndRequantize") - input_ = convert(tf.TensorHandle, input_) - filter_ = convert(tf.TensorHandle, filter_) - bias_ = convert(tf.TensorHandle, bias_) - min_input_ = convert(tf.TensorHandle, min_input_) - max_input_ = convert(tf.TensorHandle, max_input_) - min_filter_ = convert(tf.TensorHandle, min_filter_) - max_filter_ = convert(tf.TensorHandle, max_filter_) - min_freezed_output_ = convert(tf.TensorHandle, min_freezed_output_) - max_freezed_output_ = convert(tf.TensorHandle, max_freezed_output_) - summand_ = convert(tf.TensorHandle, summand_) - min_summand_ = convert(tf.TensorHandle, min_summand_) - max_summand_ = convert(tf.TensorHandle, max_summand_) + input_ = convert(tf.EagerTensor, input_) + filter_ = convert(tf.EagerTensor, filter_) + bias_ = convert(tf.EagerTensor, bias_) + min_input_ = convert(tf.EagerTensor, min_input_) + max_input_ = convert(tf.EagerTensor, max_input_) + min_filter_ = convert(tf.EagerTensor, min_filter_) + max_filter_ = convert(tf.EagerTensor, max_filter_) + min_freezed_output_ = convert(tf.EagerTensor, min_freezed_output_) + max_freezed_output_ = convert(tf.EagerTensor, max_freezed_output_) + summand_ = convert(tf.EagerTensor, summand_) + min_summand_ = convert(tf.EagerTensor, min_summand_) + max_summand_ = convert(tf.EagerTensor, max_summand_) tf.add_input(desc, input_) tf.add_input(desc, filter_) tf.add_input(desc, bias_) @@ -25222,7 +25222,7 @@ begin end function delete_session_tensor_eager(handle_; name=nothing) desc = tf.EagerOp("DeleteSessionTensor") - handle_ = convert(tf.TensorHandle, handle_) + handle_ = convert(tf.EagerTensor, handle_) tf.add_input(desc, handle_) res = tf.execute(desc) node = tf.TapeNode(delete_session_tensor, [handle_], name=nothing, res) @@ -25273,10 +25273,10 @@ begin end function one_hot_eager(indices_, depth_, on_value_, off_value_; name=nothing, axis=nothing) desc = tf.EagerOp("OneHot") - indices_ = convert(tf.TensorHandle, indices_) - depth_ = convert(tf.TensorHandle, depth_) - on_value_ = convert(tf.TensorHandle, on_value_) - off_value_ = convert(tf.TensorHandle, off_value_) + indices_ = convert(tf.EagerTensor, indices_) + depth_ = convert(tf.EagerTensor, depth_) + on_value_ = convert(tf.EagerTensor, on_value_) + off_value_ = convert(tf.EagerTensor, off_value_) tf.add_input(desc, indices_) tf.add_input(desc, depth_) tf.add_input(desc, on_value_) @@ -25342,14 +25342,14 @@ begin end function resource_apply_ftrl_eager(var_, accum_, linear_, grad_, lr_, l1_, l2_, lr_power_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ResourceApplyFtrl") - var_ = convert(tf.TensorHandle, var_) - accum_ = convert(tf.TensorHandle, accum_) - linear_ = convert(tf.TensorHandle, linear_) - grad_ = convert(tf.TensorHandle, grad_) - lr_ = convert(tf.TensorHandle, lr_) - l1_ = convert(tf.TensorHandle, l1_) - l2_ = convert(tf.TensorHandle, l2_) - lr_power_ = convert(tf.TensorHandle, lr_power_) + var_ = convert(tf.EagerTensor, var_) + accum_ = convert(tf.EagerTensor, accum_) + linear_ = convert(tf.EagerTensor, linear_) + grad_ = convert(tf.EagerTensor, grad_) + lr_ = convert(tf.EagerTensor, lr_) + l1_ = convert(tf.EagerTensor, l1_) + l2_ = convert(tf.EagerTensor, l2_) + lr_power_ = convert(tf.EagerTensor, lr_power_) tf.add_input(desc, var_) tf.add_input(desc, accum_) tf.add_input(desc, linear_) @@ -25450,16 +25450,16 @@ begin end function sdca_optimizer_v2_eager(sparse_example_indices_, sparse_feature_indices_, sparse_feature_values_, dense_features_, example_weights_, example_labels_, sparse_indices_, sparse_weights_, dense_weights_, example_state_data_; name=nothing, loss_type=nothing, adaptive=nothing, num_sparse_features=nothing, num_sparse_features_with_values=nothing, num_dense_features=nothing, l1=nothing, l2=nothing, num_loss_partitions=nothing, num_inner_iterations=nothing) desc = tf.EagerOp("SdcaOptimizerV2") - sparse_example_indices_ = convert(tf.TensorHandle, sparse_example_indices_) - sparse_feature_indices_ = convert(tf.TensorHandle, sparse_feature_indices_) - sparse_feature_values_ = convert(tf.TensorHandle, sparse_feature_values_) - dense_features_ = convert(tf.TensorHandle, dense_features_) - example_weights_ = convert(tf.TensorHandle, example_weights_) - example_labels_ = convert(tf.TensorHandle, example_labels_) - sparse_indices_ = convert(tf.TensorHandle, sparse_indices_) - sparse_weights_ = convert(tf.TensorHandle, sparse_weights_) - dense_weights_ = convert(tf.TensorHandle, dense_weights_) - example_state_data_ = convert(tf.TensorHandle, example_state_data_) + sparse_example_indices_ = convert(tf.EagerTensor, sparse_example_indices_) + sparse_feature_indices_ = convert(tf.EagerTensor, sparse_feature_indices_) + sparse_feature_values_ = convert(tf.EagerTensor, sparse_feature_values_) + dense_features_ = convert(tf.EagerTensor, dense_features_) + example_weights_ = convert(tf.EagerTensor, example_weights_) + example_labels_ = convert(tf.EagerTensor, example_labels_) + sparse_indices_ = convert(tf.EagerTensor, sparse_indices_) + sparse_weights_ = convert(tf.EagerTensor, sparse_weights_) + dense_weights_ = convert(tf.EagerTensor, dense_weights_) + example_state_data_ = convert(tf.EagerTensor, example_state_data_) tf.add_input(desc, sparse_example_indices_) tf.add_input(desc, sparse_feature_indices_) tf.add_input(desc, sparse_feature_values_) @@ -25539,8 +25539,8 @@ begin end function queue_enqueue_eager(handle_, components_; name=nothing, Tcomponents=nothing, timeout_ms=nothing) desc = tf.EagerOp("QueueEnqueue") - handle_ = convert(tf.TensorHandle, handle_) - components_ = convert(tf.TensorHandle, components_) + handle_ = convert(tf.EagerTensor, handle_) + components_ = convert(tf.EagerTensor, components_) tf.add_input(desc, handle_) tf.add_input(desc, components_) if Tcomponents !== nothing @@ -25661,8 +25661,8 @@ begin end function ctc_beam_search_decoder_eager(inputs_, sequence_length_; name=nothing, beam_width=nothing, top_paths=nothing, merge_repeated=nothing) desc = tf.EagerOp("CTCBeamSearchDecoder") - inputs_ = convert(tf.TensorHandle, inputs_) - sequence_length_ = convert(tf.TensorHandle, sequence_length_) + inputs_ = convert(tf.EagerTensor, inputs_) + sequence_length_ = convert(tf.EagerTensor, sequence_length_) tf.add_input(desc, inputs_) tf.add_input(desc, sequence_length_) if beam_width !== nothing @@ -25770,14 +25770,14 @@ begin end function apply_rms_prop_eager(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ApplyRMSProp") - var_ = convert(tf.TensorHandle, var_) - ms_ = convert(tf.TensorHandle, ms_) - mom_ = convert(tf.TensorHandle, mom_) - lr_ = convert(tf.TensorHandle, lr_) - rho_ = convert(tf.TensorHandle, rho_) - momentum_ = convert(tf.TensorHandle, momentum_) - epsilon_ = convert(tf.TensorHandle, epsilon_) - grad_ = convert(tf.TensorHandle, grad_) + var_ = convert(tf.EagerTensor, var_) + ms_ = convert(tf.EagerTensor, ms_) + mom_ = convert(tf.EagerTensor, mom_) + lr_ = convert(tf.EagerTensor, lr_) + rho_ = convert(tf.EagerTensor, rho_) + momentum_ = convert(tf.EagerTensor, momentum_) + epsilon_ = convert(tf.EagerTensor, epsilon_) + grad_ = convert(tf.EagerTensor, grad_) tf.add_input(desc, var_) tf.add_input(desc, ms_) tf.add_input(desc, mom_) @@ -25834,8 +25834,8 @@ begin end function adjust_saturation_eager(images_, scale_; name=nothing) desc = tf.EagerOp("AdjustSaturation") - images_ = convert(tf.TensorHandle, images_) - scale_ = convert(tf.TensorHandle, scale_) + images_ = convert(tf.EagerTensor, images_) + scale_ = convert(tf.EagerTensor, scale_) tf.add_input(desc, images_) tf.add_input(desc, scale_) desc["T"] = tf.data_type(images_) @@ -25876,8 +25876,8 @@ begin end function lookup_table_remove_v2_eager(table_handle_, keys_; name=nothing) desc = tf.EagerOp("LookupTableRemoveV2") - table_handle_ = convert(tf.TensorHandle, table_handle_) - keys_ = convert(tf.TensorHandle, keys_) + table_handle_ = convert(tf.EagerTensor, table_handle_) + keys_ = convert(tf.EagerTensor, keys_) tf.add_input(desc, table_handle_) tf.add_input(desc, keys_) desc["Tin"] = tf.data_type(keys_) @@ -25918,7 +25918,7 @@ begin end function queue_close_eager(handle_; name=nothing, cancel_pending_enqueues=nothing) desc = tf.EagerOp("QueueClose") - handle_ = convert(tf.TensorHandle, handle_) + handle_ = convert(tf.EagerTensor, handle_) tf.add_input(desc, handle_) if cancel_pending_enqueues !== nothing desc["cancel_pending_enqueues"] = Base.Bool(cancel_pending_enqueues) @@ -25965,8 +25965,8 @@ begin end function prefetch_dataset_eager(input_dataset_, buffer_size_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("PrefetchDataset") - input_dataset_ = convert(tf.TensorHandle, input_dataset_) - buffer_size_ = convert(tf.TensorHandle, buffer_size_) + input_dataset_ = convert(tf.EagerTensor, input_dataset_) + buffer_size_ = convert(tf.EagerTensor, buffer_size_) tf.add_input(desc, input_dataset_) tf.add_input(desc, buffer_size_) if output_types !== nothing @@ -26029,8 +26029,8 @@ begin end function map_dataset_eager(input_dataset_, other_arguments_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing, preserve_cardinality=nothing) desc = tf.EagerOp("MapDataset") - input_dataset_ = convert(tf.TensorHandle, input_dataset_) - other_arguments_ = convert(tf.TensorHandle, other_arguments_) + input_dataset_ = convert(tf.EagerTensor, input_dataset_) + other_arguments_ = convert(tf.EagerTensor, other_arguments_) tf.add_input(desc, input_dataset_) tf.add_input(desc, other_arguments_) if f !== nothing @@ -26116,13 +26116,13 @@ begin end function quantized_conv2d_with_bias_eager(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) desc = tf.EagerOp("QuantizedConv2DWithBias") - input_ = convert(tf.TensorHandle, input_) - filter_ = convert(tf.TensorHandle, filter_) - bias_ = convert(tf.TensorHandle, bias_) - min_input_ = convert(tf.TensorHandle, min_input_) - max_input_ = convert(tf.TensorHandle, max_input_) - min_filter_ = convert(tf.TensorHandle, min_filter_) - max_filter_ = convert(tf.TensorHandle, max_filter_) + input_ = convert(tf.EagerTensor, input_) + filter_ = convert(tf.EagerTensor, filter_) + bias_ = convert(tf.EagerTensor, bias_) + min_input_ = convert(tf.EagerTensor, min_input_) + max_input_ = convert(tf.EagerTensor, max_input_) + min_filter_ = convert(tf.EagerTensor, min_filter_) + max_filter_ = convert(tf.EagerTensor, max_filter_) tf.add_input(desc, input_) tf.add_input(desc, filter_) tf.add_input(desc, bias_) @@ -26185,9 +26185,9 @@ begin end function tensor_array_read_v3_eager(handle_, index_, flow_in_; name=nothing, dtype=nothing) desc = tf.EagerOp("TensorArrayReadV3") - handle_ = convert(tf.TensorHandle, handle_) - index_ = convert(tf.TensorHandle, index_) - flow_in_ = convert(tf.TensorHandle, flow_in_) + handle_ = convert(tf.EagerTensor, handle_) + index_ = convert(tf.EagerTensor, index_) + flow_in_ = convert(tf.EagerTensor, flow_in_) tf.add_input(desc, handle_) tf.add_input(desc, index_) tf.add_input(desc, flow_in_) @@ -26229,7 +26229,7 @@ begin end function identity_eager(input_; name=nothing) desc = tf.EagerOp("Identity") - input_ = convert(tf.TensorHandle, input_) + input_ = convert(tf.EagerTensor, input_) tf.add_input(desc, input_) desc["T"] = tf.data_type(input_) res = tf.execute(desc) @@ -26281,8 +26281,8 @@ begin end function print_eager(input_, data_; name=nothing, U=nothing, message=nothing, first_n=nothing, summarize=nothing) desc = tf.EagerOp("Print") - input_ = convert(tf.TensorHandle, input_) - data_ = convert(tf.TensorHandle, data_) + input_ = convert(tf.EagerTensor, input_) + data_ = convert(tf.EagerTensor, data_) tf.add_input(desc, input_) tf.add_input(desc, data_) if U !== nothing @@ -26345,7 +26345,7 @@ begin end function collective_bcast_send_eager(input_; name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, shape=nothing) desc = tf.EagerOp("CollectiveBcastSend") - input_ = convert(tf.TensorHandle, input_) + input_ = convert(tf.EagerTensor, input_) tf.add_input(desc, input_) if group_size !== nothing desc["group_size"] = Base.Int(group_size) @@ -26405,7 +26405,7 @@ begin end function _list_to_array_eager(input_; name=nothing, Tin=nothing, N=nothing) desc = tf.EagerOp("_ListToArray") - input_ = convert(tf.TensorHandle, input_) + input_ = convert(tf.EagerTensor, input_) tf.add_input(desc, input_) if Tin !== nothing desc["Tin"] = map(Base.identity, Tin) @@ -26461,11 +26461,11 @@ begin end function neg_train_eager(w_in_, w_out_, examples_, labels_, lr_; name=nothing, vocab_count=nothing, num_negative_samples=nothing) desc = tf.EagerOp("NegTrain") - w_in_ = convert(tf.TensorHandle, w_in_) - w_out_ = convert(tf.TensorHandle, w_out_) - examples_ = convert(tf.TensorHandle, examples_) - labels_ = convert(tf.TensorHandle, labels_) - lr_ = convert(tf.TensorHandle, lr_) + w_in_ = convert(tf.EagerTensor, w_in_) + w_out_ = convert(tf.EagerTensor, w_out_) + examples_ = convert(tf.EagerTensor, examples_) + labels_ = convert(tf.EagerTensor, labels_) + lr_ = convert(tf.EagerTensor, lr_) tf.add_input(desc, w_in_) tf.add_input(desc, w_out_) tf.add_input(desc, examples_) @@ -26511,7 +26511,7 @@ begin end function worker_heartbeat_eager(request_; name=nothing) desc = tf.EagerOp("WorkerHeartbeat") - request_ = convert(tf.TensorHandle, request_) + request_ = convert(tf.EagerTensor, request_) tf.add_input(desc, request_) res = tf.execute(desc) node = tf.TapeNode(worker_heartbeat, [request_], name=nothing, res) @@ -26552,8 +26552,8 @@ begin end function merge_v2checkpoints_eager(checkpoint_prefixes_, destination_prefix_; name=nothing, delete_old_dirs=nothing) desc = tf.EagerOp("MergeV2Checkpoints") - checkpoint_prefixes_ = convert(tf.TensorHandle, checkpoint_prefixes_) - destination_prefix_ = convert(tf.TensorHandle, destination_prefix_) + checkpoint_prefixes_ = convert(tf.EagerTensor, checkpoint_prefixes_) + destination_prefix_ = convert(tf.EagerTensor, destination_prefix_) tf.add_input(desc, checkpoint_prefixes_) tf.add_input(desc, destination_prefix_) if delete_old_dirs !== nothing @@ -26596,8 +26596,8 @@ begin end function collective_permute_eager(input_, source_target_pairs_; name=nothing) desc = tf.EagerOp("CollectivePermute") - input_ = convert(tf.TensorHandle, input_) - source_target_pairs_ = convert(tf.TensorHandle, source_target_pairs_) + input_ = convert(tf.EagerTensor, input_) + source_target_pairs_ = convert(tf.EagerTensor, source_target_pairs_) tf.add_input(desc, input_) tf.add_input(desc, source_target_pairs_) desc["T"] = tf.data_type(input_) @@ -26648,10 +26648,10 @@ begin end function quantize_and_dequantize_v3_eager(input_, input_min_, input_max_, num_bits_; name=nothing, signed_input=nothing, range_given=nothing) desc = tf.EagerOp("QuantizeAndDequantizeV3") - input_ = convert(tf.TensorHandle, input_) - input_min_ = convert(tf.TensorHandle, input_min_) - input_max_ = convert(tf.TensorHandle, input_max_) - num_bits_ = convert(tf.TensorHandle, num_bits_) + input_ = convert(tf.EagerTensor, input_) + input_min_ = convert(tf.EagerTensor, input_min_) + input_max_ = convert(tf.EagerTensor, input_max_) + num_bits_ = convert(tf.EagerTensor, num_bits_) tf.add_input(desc, input_) tf.add_input(desc, input_min_) tf.add_input(desc, input_max_) @@ -26764,8 +26764,8 @@ begin end function softplus_grad_eager(gradients_, features_; name=nothing) desc = tf.EagerOp("SoftplusGrad") - gradients_ = convert(tf.TensorHandle, gradients_) - features_ = convert(tf.TensorHandle, features_) + gradients_ = convert(tf.EagerTensor, gradients_) + features_ = convert(tf.EagerTensor, features_) tf.add_input(desc, gradients_) tf.add_input(desc, features_) desc["T"] = tf.data_type(gradients_) @@ -26879,10 +26879,10 @@ begin end function tensor_array_scatter_v2_eager(handle_, indices_, value_, flow_in_; name=nothing) desc = tf.EagerOp("TensorArrayScatterV2") - handle_ = convert(tf.TensorHandle, handle_) - indices_ = convert(tf.TensorHandle, indices_) - value_ = convert(tf.TensorHandle, value_) - flow_in_ = convert(tf.TensorHandle, flow_in_) + handle_ = convert(tf.EagerTensor, handle_) + indices_ = convert(tf.EagerTensor, indices_) + value_ = convert(tf.EagerTensor, value_) + flow_in_ = convert(tf.EagerTensor, flow_in_) tf.add_input(desc, handle_) tf.add_input(desc, indices_) tf.add_input(desc, value_) @@ -26922,7 +26922,7 @@ begin end function decode_json_example_eager(json_examples_; name=nothing) desc = tf.EagerOp("DecodeJSONExample") - json_examples_ = convert(tf.TensorHandle, json_examples_) + json_examples_ = convert(tf.EagerTensor, json_examples_) tf.add_input(desc, json_examples_) res = tf.execute(desc) node = tf.TapeNode(decode_json_example, [json_examples_], name=nothing, res) @@ -26985,11 +26985,11 @@ begin end function fused_batch_norm_grad_v2_eager(y_backprop_, x_, scale_, reserve_space_1_, reserve_space_2_; name=nothing, U=nothing, epsilon=nothing, data_format=nothing, is_training=nothing) desc = tf.EagerOp("FusedBatchNormGradV2") - y_backprop_ = convert(tf.TensorHandle, y_backprop_) - x_ = convert(tf.TensorHandle, x_) - scale_ = convert(tf.TensorHandle, scale_) - reserve_space_1_ = convert(tf.TensorHandle, reserve_space_1_) - reserve_space_2_ = convert(tf.TensorHandle, reserve_space_2_) + y_backprop_ = convert(tf.EagerTensor, y_backprop_) + x_ = convert(tf.EagerTensor, x_) + scale_ = convert(tf.EagerTensor, scale_) + reserve_space_1_ = convert(tf.EagerTensor, reserve_space_1_) + reserve_space_2_ = convert(tf.EagerTensor, reserve_space_2_) tf.add_input(desc, y_backprop_) tf.add_input(desc, x_) tf.add_input(desc, scale_) @@ -27055,7 +27055,7 @@ begin end function _host_cast_eager(x_; name=nothing, SrcT=nothing, DstT=nothing, Truncate=nothing) desc = tf.EagerOp("_HostCast") - x_ = convert(tf.TensorHandle, x_) + x_ = convert(tf.EagerTensor, x_) tf.add_input(desc, x_) if SrcT !== nothing desc["SrcT"] = Base.identity(SrcT) @@ -27166,7 +27166,7 @@ begin end function while__eager(input_; name=nothing, T=nothing, cond=nothing, body=nothing, output_shapes=nothing, parallel_iterations=nothing) desc = tf.EagerOp("While") - input_ = convert(tf.TensorHandle, input_) + input_ = convert(tf.EagerTensor, input_) tf.add_input(desc, input_) if T !== nothing desc["T"] = map(Base.identity, T) @@ -27226,9 +27226,9 @@ begin end function stateless_multinomial_eager(logits_, num_samples_, seed_; name=nothing, output_dtype=nothing) desc = tf.EagerOp("StatelessMultinomial") - logits_ = convert(tf.TensorHandle, logits_) - num_samples_ = convert(tf.TensorHandle, num_samples_) - seed_ = convert(tf.TensorHandle, seed_) + logits_ = convert(tf.EagerTensor, logits_) + num_samples_ = convert(tf.EagerTensor, num_samples_) + seed_ = convert(tf.EagerTensor, seed_) tf.add_input(desc, logits_) tf.add_input(desc, num_samples_) tf.add_input(desc, seed_) @@ -27281,9 +27281,9 @@ begin end function scatter_add_eager(ref_, indices_, updates_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ScatterAdd") - ref_ = convert(tf.TensorHandle, ref_) - indices_ = convert(tf.TensorHandle, indices_) - updates_ = convert(tf.TensorHandle, updates_) + ref_ = convert(tf.EagerTensor, ref_) + indices_ = convert(tf.EagerTensor, indices_) + updates_ = convert(tf.EagerTensor, updates_) tf.add_input(desc, ref_) tf.add_input(desc, indices_) tf.add_input(desc, updates_) @@ -27328,7 +27328,7 @@ begin end function conj_eager(input_; name=nothing) desc = tf.EagerOp("Conj") - input_ = convert(tf.TensorHandle, input_) + input_ = convert(tf.EagerTensor, input_) tf.add_input(desc, input_) desc["T"] = tf.data_type(input_) res = tf.execute(desc) @@ -27371,8 +27371,8 @@ begin end function parallel_dynamic_stitch_eager(indices_, data_; name=nothing, N=nothing) desc = tf.EagerOp("ParallelDynamicStitch") - indices_ = convert(tf.TensorHandle, indices_) - data_ = convert(tf.TensorHandle, data_) + indices_ = convert(tf.EagerTensor, indices_) + data_ = convert(tf.EagerTensor, data_) tf.add_input(desc, indices_) tf.add_input(desc, data_) if N !== nothing @@ -27415,8 +27415,8 @@ begin end function make_iterator_eager(dataset_, iterator_; name=nothing) desc = tf.EagerOp("MakeIterator") - dataset_ = convert(tf.TensorHandle, dataset_) - iterator_ = convert(tf.TensorHandle, iterator_) + dataset_ = convert(tf.EagerTensor, dataset_) + iterator_ = convert(tf.EagerTensor, iterator_) tf.add_input(desc, dataset_) tf.add_input(desc, iterator_) res = tf.execute(desc) @@ -27455,8 +27455,8 @@ begin end function rfft3d_eager(input_, fft_length_; name=nothing) desc = tf.EagerOp("RFFT3D") - input_ = convert(tf.TensorHandle, input_) - fft_length_ = convert(tf.TensorHandle, fft_length_) + input_ = convert(tf.EagerTensor, input_) + fft_length_ = convert(tf.EagerTensor, fft_length_) tf.add_input(desc, input_) tf.add_input(desc, fft_length_) res = tf.execute(desc) @@ -27508,10 +27508,10 @@ begin end function sparse_reduce_sum_sparse_eager(input_indices_, input_values_, input_shape_, reduction_axes_; name=nothing, keep_dims=nothing) desc = tf.EagerOp("SparseReduceSumSparse") - input_indices_ = convert(tf.TensorHandle, input_indices_) - input_values_ = convert(tf.TensorHandle, input_values_) - input_shape_ = convert(tf.TensorHandle, input_shape_) - reduction_axes_ = convert(tf.TensorHandle, reduction_axes_) + input_indices_ = convert(tf.EagerTensor, input_indices_) + input_values_ = convert(tf.EagerTensor, input_values_) + input_shape_ = convert(tf.EagerTensor, input_shape_) + reduction_axes_ = convert(tf.EagerTensor, reduction_axes_) tf.add_input(desc, input_indices_) tf.add_input(desc, input_values_) tf.add_input(desc, input_shape_) @@ -27567,7 +27567,7 @@ begin end function collective_gather_eager(input_; name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, shape=nothing) desc = tf.EagerOp("CollectiveGather") - input_ = convert(tf.TensorHandle, input_) + input_ = convert(tf.EagerTensor, input_) tf.add_input(desc, input_) if group_size !== nothing desc["group_size"] = Base.Int(group_size) @@ -27634,12 +27634,12 @@ begin end function combined_non_max_suppression_eager(boxes_, scores_, max_output_size_per_class_, max_total_size_, iou_threshold_, score_threshold_; name=nothing, pad_per_class=nothing) desc = tf.EagerOp("CombinedNonMaxSuppression") - boxes_ = convert(tf.TensorHandle, boxes_) - scores_ = convert(tf.TensorHandle, scores_) - max_output_size_per_class_ = convert(tf.TensorHandle, max_output_size_per_class_) - max_total_size_ = convert(tf.TensorHandle, max_total_size_) - iou_threshold_ = convert(tf.TensorHandle, iou_threshold_) - score_threshold_ = convert(tf.TensorHandle, score_threshold_) + boxes_ = convert(tf.EagerTensor, boxes_) + scores_ = convert(tf.EagerTensor, scores_) + max_output_size_per_class_ = convert(tf.EagerTensor, max_output_size_per_class_) + max_total_size_ = convert(tf.EagerTensor, max_total_size_) + iou_threshold_ = convert(tf.EagerTensor, iou_threshold_) + score_threshold_ = convert(tf.EagerTensor, score_threshold_) tf.add_input(desc, boxes_) tf.add_input(desc, scores_) tf.add_input(desc, max_output_size_per_class_) @@ -27761,9 +27761,9 @@ begin end function load_tpu_embedding_adadelta_parameters_eager(parameters_, accumulators_, updates_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) desc = tf.EagerOp("LoadTPUEmbeddingAdadeltaParameters") - parameters_ = convert(tf.TensorHandle, parameters_) - accumulators_ = convert(tf.TensorHandle, accumulators_) - updates_ = convert(tf.TensorHandle, updates_) + parameters_ = convert(tf.EagerTensor, parameters_) + accumulators_ = convert(tf.EagerTensor, accumulators_) + updates_ = convert(tf.EagerTensor, updates_) tf.add_input(desc, parameters_) tf.add_input(desc, accumulators_) tf.add_input(desc, updates_) @@ -27832,13 +27832,13 @@ begin end function sparse_add_eager(a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_, thresh_; name=nothing) desc = tf.EagerOp("SparseAdd") - a_indices_ = convert(tf.TensorHandle, a_indices_) - a_values_ = convert(tf.TensorHandle, a_values_) - a_shape_ = convert(tf.TensorHandle, a_shape_) - b_indices_ = convert(tf.TensorHandle, b_indices_) - b_values_ = convert(tf.TensorHandle, b_values_) - b_shape_ = convert(tf.TensorHandle, b_shape_) - thresh_ = convert(tf.TensorHandle, thresh_) + a_indices_ = convert(tf.EagerTensor, a_indices_) + a_values_ = convert(tf.EagerTensor, a_values_) + a_shape_ = convert(tf.EagerTensor, a_shape_) + b_indices_ = convert(tf.EagerTensor, b_indices_) + b_values_ = convert(tf.EagerTensor, b_values_) + b_shape_ = convert(tf.EagerTensor, b_shape_) + thresh_ = convert(tf.EagerTensor, thresh_) tf.add_input(desc, a_indices_) tf.add_input(desc, a_values_) tf.add_input(desc, a_shape_) @@ -27893,8 +27893,8 @@ begin end function ctc_greedy_decoder_eager(inputs_, sequence_length_; name=nothing, merge_repeated=nothing) desc = tf.EagerOp("CTCGreedyDecoder") - inputs_ = convert(tf.TensorHandle, inputs_) - sequence_length_ = convert(tf.TensorHandle, sequence_length_) + inputs_ = convert(tf.EagerTensor, inputs_) + sequence_length_ = convert(tf.EagerTensor, sequence_length_) tf.add_input(desc, inputs_) tf.add_input(desc, sequence_length_) if merge_repeated !== nothing @@ -27984,7 +27984,7 @@ begin end function consume_mutex_lock_eager(mutex_lock_; name=nothing) desc = tf.EagerOp("ConsumeMutexLock") - mutex_lock_ = convert(tf.TensorHandle, mutex_lock_) + mutex_lock_ = convert(tf.EagerTensor, mutex_lock_) tf.add_input(desc, mutex_lock_) res = tf.execute(desc) node = tf.TapeNode(consume_mutex_lock, [mutex_lock_], name=nothing, res) @@ -28023,8 +28023,8 @@ begin end function greater_equal_eager(x_, y_; name=nothing) desc = tf.EagerOp("GreaterEqual") - x_ = convert(tf.TensorHandle, x_) - y_ = convert(tf.TensorHandle, y_) + x_ = convert(tf.EagerTensor, x_) + y_ = convert(tf.EagerTensor, y_) tf.add_input(desc, x_) tf.add_input(desc, y_) desc["T"] = tf.data_type(x_) @@ -28077,8 +28077,8 @@ begin end function initialize_table_from_text_file_v2_eager(table_handle_, filename_; name=nothing, key_index=nothing, value_index=nothing, vocab_size=nothing, delimiter=nothing) desc = tf.EagerOp("InitializeTableFromTextFileV2") - table_handle_ = convert(tf.TensorHandle, table_handle_) - filename_ = convert(tf.TensorHandle, filename_) + table_handle_ = convert(tf.EagerTensor, table_handle_) + filename_ = convert(tf.EagerTensor, filename_) tf.add_input(desc, table_handle_) tf.add_input(desc, filename_) if key_index !== nothing @@ -28133,7 +28133,7 @@ begin end function queue_dequeue_eager(handle_; name=nothing, component_types=nothing, timeout_ms=nothing) desc = tf.EagerOp("QueueDequeue") - handle_ = convert(tf.TensorHandle, handle_) + handle_ = convert(tf.EagerTensor, handle_) tf.add_input(desc, handle_) if component_types !== nothing desc["component_types"] = map(Base.identity, component_types) @@ -28178,8 +28178,8 @@ begin end function equal_eager(x_, y_; name=nothing) desc = tf.EagerOp("Equal") - x_ = convert(tf.TensorHandle, x_) - y_ = convert(tf.TensorHandle, y_) + x_ = convert(tf.EagerTensor, x_) + y_ = convert(tf.EagerTensor, y_) tf.add_input(desc, x_) tf.add_input(desc, y_) desc["T"] = tf.data_type(x_) @@ -28224,7 +28224,7 @@ begin end function iterator_from_string_handle_eager(string_handle_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("IteratorFromStringHandle") - string_handle_ = convert(tf.TensorHandle, string_handle_) + string_handle_ = convert(tf.EagerTensor, string_handle_) tf.add_input(desc, string_handle_) if output_types !== nothing desc["output_types"] = map(Base.identity, output_types) @@ -28278,9 +28278,9 @@ begin end function tensor_list_split_eager(tensor_, element_shape_, lengths_; name=nothing, element_dtype=nothing, shape_type=nothing) desc = tf.EagerOp("TensorListSplit") - tensor_ = convert(tf.TensorHandle, tensor_) - element_shape_ = convert(tf.TensorHandle, element_shape_) - lengths_ = convert(tf.TensorHandle, lengths_) + tensor_ = convert(tf.EagerTensor, tensor_) + element_shape_ = convert(tf.EagerTensor, element_shape_) + lengths_ = convert(tf.EagerTensor, lengths_) tf.add_input(desc, tensor_) tf.add_input(desc, element_shape_) tf.add_input(desc, lengths_) @@ -28350,7 +28350,7 @@ begin end function fractional_max_pool_eager(value_; name=nothing, pooling_ratio=nothing, pseudo_random=nothing, overlapping=nothing, deterministic=nothing, seed=nothing, seed2=nothing) desc = tf.EagerOp("FractionalMaxPool") - value_ = convert(tf.TensorHandle, value_) + value_ = convert(tf.EagerTensor, value_) tf.add_input(desc, value_) if pooling_ratio !== nothing desc["pooling_ratio"] = map(Base.identity, pooling_ratio) @@ -28412,9 +28412,9 @@ begin end function scatter_nd_eager(indices_, updates_, shape_; name=nothing) desc = tf.EagerOp("ScatterNd") - indices_ = convert(tf.TensorHandle, indices_) - updates_ = convert(tf.TensorHandle, updates_) - shape_ = convert(tf.TensorHandle, shape_) + indices_ = convert(tf.EagerTensor, indices_) + updates_ = convert(tf.EagerTensor, updates_) + shape_ = convert(tf.EagerTensor, shape_) tf.add_input(desc, indices_) tf.add_input(desc, updates_) tf.add_input(desc, shape_) @@ -28463,9 +28463,9 @@ begin end function tensor_list_scatter_into_existing_list_eager(input_handle_, tensor_, indices_; name=nothing, element_dtype=nothing) desc = tf.EagerOp("TensorListScatterIntoExistingList") - input_handle_ = convert(tf.TensorHandle, input_handle_) - tensor_ = convert(tf.TensorHandle, tensor_) - indices_ = convert(tf.TensorHandle, indices_) + input_handle_ = convert(tf.EagerTensor, input_handle_) + tensor_ = convert(tf.EagerTensor, tensor_) + indices_ = convert(tf.EagerTensor, indices_) tf.add_input(desc, input_handle_) tf.add_input(desc, tensor_) tf.add_input(desc, indices_) @@ -28512,9 +28512,9 @@ begin end function select_eager(condition_, t_, e_; name=nothing) desc = tf.EagerOp("Select") - condition_ = convert(tf.TensorHandle, condition_) - t_ = convert(tf.TensorHandle, t_) - e_ = convert(tf.TensorHandle, e_) + condition_ = convert(tf.EagerTensor, condition_) + t_ = convert(tf.EagerTensor, t_) + e_ = convert(tf.EagerTensor, e_) tf.add_input(desc, condition_) tf.add_input(desc, t_) tf.add_input(desc, e_) @@ -28562,8 +28562,8 @@ begin end function min_eager(input_, reduction_indices_; name=nothing, keep_dims=nothing) desc = tf.EagerOp("Min") - input_ = convert(tf.TensorHandle, input_) - reduction_indices_ = convert(tf.TensorHandle, reduction_indices_) + input_ = convert(tf.EagerTensor, input_) + reduction_indices_ = convert(tf.EagerTensor, reduction_indices_) tf.add_input(desc, input_) tf.add_input(desc, reduction_indices_) if keep_dims !== nothing @@ -28622,9 +28622,9 @@ begin end function lrn_grad_eager(input_grads_, input_image_, output_image_; name=nothing, depth_radius=nothing, bias=nothing, alpha=nothing, beta=nothing) desc = tf.EagerOp("LRNGrad") - input_grads_ = convert(tf.TensorHandle, input_grads_) - input_image_ = convert(tf.TensorHandle, input_image_) - output_image_ = convert(tf.TensorHandle, output_image_) + input_grads_ = convert(tf.EagerTensor, input_grads_) + input_image_ = convert(tf.EagerTensor, input_image_) + output_image_ = convert(tf.EagerTensor, output_image_) tf.add_input(desc, input_grads_) tf.add_input(desc, input_image_) tf.add_input(desc, output_image_) @@ -28696,8 +28696,8 @@ begin end function random_poisson_v2_eager(shape_, rate_; name=nothing, seed=nothing, seed2=nothing, S=nothing, R=nothing, dtype=nothing) desc = tf.EagerOp("RandomPoissonV2") - shape_ = convert(tf.TensorHandle, shape_) - rate_ = convert(tf.TensorHandle, rate_) + shape_ = convert(tf.EagerTensor, shape_) + rate_ = convert(tf.EagerTensor, rate_) tf.add_input(desc, shape_) tf.add_input(desc, rate_) if seed !== nothing @@ -28829,12 +28829,12 @@ begin end function resource_sparse_apply_proximal_gradient_descent_eager(var_, alpha_, l1_, l2_, grad_, indices_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ResourceSparseApplyProximalGradientDescent") - var_ = convert(tf.TensorHandle, var_) - alpha_ = convert(tf.TensorHandle, alpha_) - l1_ = convert(tf.TensorHandle, l1_) - l2_ = convert(tf.TensorHandle, l2_) - grad_ = convert(tf.TensorHandle, grad_) - indices_ = convert(tf.TensorHandle, indices_) + var_ = convert(tf.EagerTensor, var_) + alpha_ = convert(tf.EagerTensor, alpha_) + l1_ = convert(tf.EagerTensor, l1_) + l2_ = convert(tf.EagerTensor, l2_) + grad_ = convert(tf.EagerTensor, grad_) + indices_ = convert(tf.EagerTensor, indices_) tf.add_input(desc, var_) tf.add_input(desc, alpha_) tf.add_input(desc, l1_) @@ -28889,7 +28889,7 @@ begin end function experimental_non_serializable_dataset_eager(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("ExperimentalNonSerializableDataset") - input_dataset_ = convert(tf.TensorHandle, input_dataset_) + input_dataset_ = convert(tf.EagerTensor, input_dataset_) tf.add_input(desc, input_dataset_) if output_types !== nothing desc["output_types"] = map(Base.identity, output_types) @@ -28939,8 +28939,8 @@ begin end function experimental_bytes_produced_stats_dataset_eager(input_dataset_, tag_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("ExperimentalBytesProducedStatsDataset") - input_dataset_ = convert(tf.TensorHandle, input_dataset_) - tag_ = convert(tf.TensorHandle, tag_) + input_dataset_ = convert(tf.EagerTensor, input_dataset_) + tag_ = convert(tf.EagerTensor, tag_) tf.add_input(desc, input_dataset_) tf.add_input(desc, tag_) if output_types !== nothing @@ -28997,9 +28997,9 @@ begin end function dilation2d_backprop_filter_eager(input_, filter_, out_backprop_; name=nothing, strides=nothing, rates=nothing, padding=nothing) desc = tf.EagerOp("Dilation2DBackpropFilter") - input_ = convert(tf.TensorHandle, input_) - filter_ = convert(tf.TensorHandle, filter_) - out_backprop_ = convert(tf.TensorHandle, out_backprop_) + input_ = convert(tf.EagerTensor, input_) + filter_ = convert(tf.EagerTensor, filter_) + out_backprop_ = convert(tf.EagerTensor, out_backprop_) tf.add_input(desc, input_) tf.add_input(desc, filter_) tf.add_input(desc, out_backprop_) @@ -29064,8 +29064,8 @@ begin end function _if_eager(cond_, input_; name=nothing, Tin=nothing, Tout=nothing, then_branch=nothing, else_branch=nothing) desc = tf.EagerOp("_If") - cond_ = convert(tf.TensorHandle, cond_) - input_ = convert(tf.TensorHandle, input_) + cond_ = convert(tf.EagerTensor, cond_) + input_ = convert(tf.EagerTensor, input_) tf.add_input(desc, cond_) tf.add_input(desc, input_) if Tin !== nothing @@ -29119,7 +29119,7 @@ begin end function bias_add_grad_eager(out_backprop_; name=nothing, data_format=nothing) desc = tf.EagerOp("BiasAddGrad") - out_backprop_ = convert(tf.TensorHandle, out_backprop_) + out_backprop_ = convert(tf.EagerTensor, out_backprop_) tf.add_input(desc, out_backprop_) if data_format !== nothing desc["data_format"] = Base.String(data_format) @@ -29159,7 +29159,7 @@ begin end function reader_serialize_state_v2_eager(reader_handle_; name=nothing) desc = tf.EagerOp("ReaderSerializeStateV2") - reader_handle_ = convert(tf.TensorHandle, reader_handle_) + reader_handle_ = convert(tf.EagerTensor, reader_handle_) tf.add_input(desc, reader_handle_) res = tf.execute(desc) node = tf.TapeNode(reader_serialize_state_v2, [reader_handle_], name=nothing, res) @@ -29195,7 +29195,7 @@ begin end function wrap_dataset_variant_eager(input_handle_; name=nothing) desc = tf.EagerOp("WrapDatasetVariant") - input_handle_ = convert(tf.TensorHandle, input_handle_) + input_handle_ = convert(tf.EagerTensor, input_handle_) tf.add_input(desc, input_handle_) res = tf.execute(desc) node = tf.TapeNode(wrap_dataset_variant, [input_handle_], name=nothing, res) @@ -29254,11 +29254,11 @@ begin end function parallel_interleave_dataset_v2_eager(input_dataset_, other_arguments_, cycle_length_, block_length_, num_parallel_calls_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, sloppy=nothing) desc = tf.EagerOp("ParallelInterleaveDatasetV2") - input_dataset_ = convert(tf.TensorHandle, input_dataset_) - other_arguments_ = convert(tf.TensorHandle, other_arguments_) - cycle_length_ = convert(tf.TensorHandle, cycle_length_) - block_length_ = convert(tf.TensorHandle, block_length_) - num_parallel_calls_ = convert(tf.TensorHandle, num_parallel_calls_) + input_dataset_ = convert(tf.EagerTensor, input_dataset_) + other_arguments_ = convert(tf.EagerTensor, other_arguments_) + cycle_length_ = convert(tf.EagerTensor, cycle_length_) + block_length_ = convert(tf.EagerTensor, block_length_) + num_parallel_calls_ = convert(tf.EagerTensor, num_parallel_calls_) tf.add_input(desc, input_dataset_) tf.add_input(desc, other_arguments_) tf.add_input(desc, cycle_length_) @@ -29330,9 +29330,9 @@ begin end function depthwise_conv2d_native_backprop_input_eager(input_sizes_, filter_, out_backprop_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) desc = tf.EagerOp("DepthwiseConv2dNativeBackpropInput") - input_sizes_ = convert(tf.TensorHandle, input_sizes_) - filter_ = convert(tf.TensorHandle, filter_) - out_backprop_ = convert(tf.TensorHandle, out_backprop_) + input_sizes_ = convert(tf.EagerTensor, input_sizes_) + filter_ = convert(tf.EagerTensor, filter_) + out_backprop_ = convert(tf.EagerTensor, out_backprop_) tf.add_input(desc, input_sizes_) tf.add_input(desc, filter_) tf.add_input(desc, out_backprop_) @@ -29402,14 +29402,14 @@ begin end function resource_apply_rms_prop_eager(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ResourceApplyRMSProp") - var_ = convert(tf.TensorHandle, var_) - ms_ = convert(tf.TensorHandle, ms_) - mom_ = convert(tf.TensorHandle, mom_) - lr_ = convert(tf.TensorHandle, lr_) - rho_ = convert(tf.TensorHandle, rho_) - momentum_ = convert(tf.TensorHandle, momentum_) - epsilon_ = convert(tf.TensorHandle, epsilon_) - grad_ = convert(tf.TensorHandle, grad_) + var_ = convert(tf.EagerTensor, var_) + ms_ = convert(tf.EagerTensor, ms_) + mom_ = convert(tf.EagerTensor, mom_) + lr_ = convert(tf.EagerTensor, lr_) + rho_ = convert(tf.EagerTensor, rho_) + momentum_ = convert(tf.EagerTensor, momentum_) + epsilon_ = convert(tf.EagerTensor, epsilon_) + grad_ = convert(tf.EagerTensor, grad_) tf.add_input(desc, var_) tf.add_input(desc, ms_) tf.add_input(desc, mom_) @@ -29470,8 +29470,8 @@ begin end function sparse_accumulator_take_gradient_eager(handle_, num_required_; name=nothing, dtype=nothing) desc = tf.EagerOp("SparseAccumulatorTakeGradient") - handle_ = convert(tf.TensorHandle, handle_) - num_required_ = convert(tf.TensorHandle, num_required_) + handle_ = convert(tf.EagerTensor, handle_) + num_required_ = convert(tf.EagerTensor, num_required_) tf.add_input(desc, handle_) tf.add_input(desc, num_required_) if dtype !== nothing @@ -29517,7 +29517,7 @@ begin end function experimental_lmdb_dataset_eager(filenames_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("ExperimentalLMDBDataset") - filenames_ = convert(tf.TensorHandle, filenames_) + filenames_ = convert(tf.EagerTensor, filenames_) tf.add_input(desc, filenames_) if output_types !== nothing desc["output_types"] = map(Base.identity, output_types) @@ -29559,7 +29559,7 @@ begin end function stack_close_v2_eager(handle_; name=nothing) desc = tf.EagerOp("StackCloseV2") - handle_ = convert(tf.TensorHandle, handle_) + handle_ = convert(tf.EagerTensor, handle_) tf.add_input(desc, handle_) res = tf.execute(desc) node = tf.TapeNode(stack_close_v2, [handle_], name=nothing, res) @@ -29675,14 +29675,14 @@ begin end function resource_apply_adagrad_da_eager(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, lr_, l1_, l2_, global_step_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ResourceApplyAdagradDA") - var_ = convert(tf.TensorHandle, var_) - gradient_accumulator_ = convert(tf.TensorHandle, gradient_accumulator_) - gradient_squared_accumulator_ = convert(tf.TensorHandle, gradient_squared_accumulator_) - grad_ = convert(tf.TensorHandle, grad_) - lr_ = convert(tf.TensorHandle, lr_) - l1_ = convert(tf.TensorHandle, l1_) - l2_ = convert(tf.TensorHandle, l2_) - global_step_ = convert(tf.TensorHandle, global_step_) + var_ = convert(tf.EagerTensor, var_) + gradient_accumulator_ = convert(tf.EagerTensor, gradient_accumulator_) + gradient_squared_accumulator_ = convert(tf.EagerTensor, gradient_squared_accumulator_) + grad_ = convert(tf.EagerTensor, grad_) + lr_ = convert(tf.EagerTensor, lr_) + l1_ = convert(tf.EagerTensor, l1_) + l2_ = convert(tf.EagerTensor, l2_) + global_step_ = convert(tf.EagerTensor, global_step_) tf.add_input(desc, var_) tf.add_input(desc, gradient_accumulator_) tf.add_input(desc, gradient_squared_accumulator_) @@ -29732,7 +29732,7 @@ begin end function tensor_forest_tree_size_eager(tree_handle_; name=nothing) desc = tf.EagerOp("TensorForestTreeSize") - tree_handle_ = convert(tf.TensorHandle, tree_handle_) + tree_handle_ = convert(tf.EagerTensor, tree_handle_) tf.add_input(desc, tree_handle_) res = tf.execute(desc) node = tf.TapeNode(tensor_forest_tree_size, [tree_handle_], name=nothing, res) @@ -29769,7 +29769,7 @@ begin end function matrix_diag_part_eager(input_; name=nothing) desc = tf.EagerOp("MatrixDiagPart") - input_ = convert(tf.TensorHandle, input_) + input_ = convert(tf.EagerTensor, input_) tf.add_input(desc, input_) desc["T"] = tf.data_type(input_) res = tf.execute(desc) @@ -29806,7 +29806,7 @@ begin end function reader_num_work_units_completed_v2_eager(reader_handle_; name=nothing) desc = tf.EagerOp("ReaderNumWorkUnitsCompletedV2") - reader_handle_ = convert(tf.TensorHandle, reader_handle_) + reader_handle_ = convert(tf.EagerTensor, reader_handle_) tf.add_input(desc, reader_handle_) res = tf.execute(desc) node = tf.TapeNode(reader_num_work_units_completed_v2, [reader_handle_], name=nothing, res) @@ -29849,10 +29849,10 @@ begin end function tensor_array_split_v3_eager(handle_, value_, lengths_, flow_in_; name=nothing) desc = tf.EagerOp("TensorArraySplitV3") - handle_ = convert(tf.TensorHandle, handle_) - value_ = convert(tf.TensorHandle, value_) - lengths_ = convert(tf.TensorHandle, lengths_) - flow_in_ = convert(tf.TensorHandle, flow_in_) + handle_ = convert(tf.EagerTensor, handle_) + value_ = convert(tf.EagerTensor, value_) + lengths_ = convert(tf.EagerTensor, lengths_) + flow_in_ = convert(tf.EagerTensor, flow_in_) tf.add_input(desc, handle_) tf.add_input(desc, value_) tf.add_input(desc, lengths_) @@ -29905,10 +29905,10 @@ begin end function sparse_to_dense_eager(sparse_indices_, output_shape_, sparse_values_, default_value_; name=nothing, validate_indices=nothing) desc = tf.EagerOp("SparseToDense") - sparse_indices_ = convert(tf.TensorHandle, sparse_indices_) - output_shape_ = convert(tf.TensorHandle, output_shape_) - sparse_values_ = convert(tf.TensorHandle, sparse_values_) - default_value_ = convert(tf.TensorHandle, default_value_) + sparse_indices_ = convert(tf.EagerTensor, sparse_indices_) + output_shape_ = convert(tf.EagerTensor, output_shape_) + sparse_values_ = convert(tf.EagerTensor, sparse_values_) + default_value_ = convert(tf.EagerTensor, default_value_) tf.add_input(desc, sparse_indices_) tf.add_input(desc, output_shape_) tf.add_input(desc, sparse_values_) @@ -29958,7 +29958,7 @@ begin end function tpu_replicated_input_eager(inputs_; name=nothing, N=nothing) desc = tf.EagerOp("TPUReplicatedInput") - inputs_ = convert(tf.TensorHandle, inputs_) + inputs_ = convert(tf.EagerTensor, inputs_) tf.add_input(desc, inputs_) if N !== nothing desc["N"] = Base.Int(N) @@ -29998,7 +29998,7 @@ begin end function stack_close_eager(handle_; name=nothing) desc = tf.EagerOp("StackClose") - handle_ = convert(tf.TensorHandle, handle_) + handle_ = convert(tf.EagerTensor, handle_) tf.add_input(desc, handle_) res = tf.execute(desc) node = tf.TapeNode(stack_close, [handle_], name=nothing, res) @@ -30042,7 +30042,7 @@ begin end function deserialize_many_sparse_eager(serialized_sparse_; name=nothing, dtype=nothing) desc = tf.EagerOp("DeserializeManySparse") - serialized_sparse_ = convert(tf.TensorHandle, serialized_sparse_) + serialized_sparse_ = convert(tf.EagerTensor, serialized_sparse_) tf.add_input(desc, serialized_sparse_) if dtype !== nothing desc["dtype"] = Base.identity(dtype) @@ -30091,7 +30091,7 @@ begin end function _nccl_reduce_recv_eager(input_; name=nothing, reduction=nothing, num_devices=nothing, shared_name=nothing) desc = tf.EagerOp("_NcclReduceRecv") - input_ = convert(tf.TensorHandle, input_) + input_ = convert(tf.EagerTensor, input_) tf.add_input(desc, input_) if reduction !== nothing desc["reduction"] = Base.String(reduction) @@ -30144,8 +30144,8 @@ begin end function mirror_pad_grad_eager(input_, paddings_; name=nothing, mode=nothing) desc = tf.EagerOp("MirrorPadGrad") - input_ = convert(tf.TensorHandle, input_) - paddings_ = convert(tf.TensorHandle, paddings_) + input_ = convert(tf.EagerTensor, input_) + paddings_ = convert(tf.EagerTensor, paddings_) tf.add_input(desc, input_) tf.add_input(desc, paddings_) if mode !== nothing @@ -30190,8 +30190,8 @@ begin end function broadcast_args_eager(s0_, s1_; name=nothing) desc = tf.EagerOp("BroadcastArgs") - s0_ = convert(tf.TensorHandle, s0_) - s1_ = convert(tf.TensorHandle, s1_) + s0_ = convert(tf.EagerTensor, s0_) + s1_ = convert(tf.EagerTensor, s1_) tf.add_input(desc, s0_) tf.add_input(desc, s1_) desc["T"] = tf.data_type(s0_) @@ -30237,8 +30237,8 @@ begin end function stateless_truncated_normal_eager(shape_, seed_; name=nothing, dtype=nothing) desc = tf.EagerOp("StatelessTruncatedNormal") - shape_ = convert(tf.TensorHandle, shape_) - seed_ = convert(tf.TensorHandle, seed_) + shape_ = convert(tf.EagerTensor, shape_) + seed_ = convert(tf.EagerTensor, seed_) tf.add_input(desc, shape_) tf.add_input(desc, seed_) if dtype !== nothing @@ -30282,8 +30282,8 @@ begin end function regex_full_match_eager(input_, pattern_; name=nothing) desc = tf.EagerOp("RegexFullMatch") - input_ = convert(tf.TensorHandle, input_) - pattern_ = convert(tf.TensorHandle, pattern_) + input_ = convert(tf.EagerTensor, input_) + pattern_ = convert(tf.EagerTensor, pattern_) tf.add_input(desc, input_) tf.add_input(desc, pattern_) res = tf.execute(desc) @@ -30320,7 +30320,7 @@ begin end function unwrap_dataset_variant_eager(input_handle_; name=nothing) desc = tf.EagerOp("UnwrapDatasetVariant") - input_handle_ = convert(tf.TensorHandle, input_handle_) + input_handle_ = convert(tf.EagerTensor, input_handle_) tf.add_input(desc, input_handle_) res = tf.execute(desc) node = tf.TapeNode(unwrap_dataset_variant, [input_handle_], name=nothing, res) @@ -30362,7 +30362,7 @@ begin end function empty_eager(shape_; name=nothing, dtype=nothing, init=nothing) desc = tf.EagerOp("Empty") - shape_ = convert(tf.TensorHandle, shape_) + shape_ = convert(tf.EagerTensor, shape_) tf.add_input(desc, shape_) if dtype !== nothing desc["dtype"] = Base.identity(dtype) @@ -30457,8 +30457,8 @@ begin end function div_eager(x_, y_; name=nothing) desc = tf.EagerOp("Div") - x_ = convert(tf.TensorHandle, x_) - y_ = convert(tf.TensorHandle, y_) + x_ = convert(tf.EagerTensor, x_) + y_ = convert(tf.EagerTensor, y_) tf.add_input(desc, x_) tf.add_input(desc, y_) desc["T"] = tf.data_type(x_) @@ -30562,8 +30562,8 @@ begin end function truncate_div_eager(x_, y_; name=nothing) desc = tf.EagerOp("TruncateDiv") - x_ = convert(tf.TensorHandle, x_) - y_ = convert(tf.TensorHandle, y_) + x_ = convert(tf.EagerTensor, x_) + y_ = convert(tf.EagerTensor, y_) tf.add_input(desc, x_) tf.add_input(desc, y_) desc["T"] = tf.data_type(x_) @@ -30613,8 +30613,8 @@ begin end function unicode_encode_eager(input_values_, input_splits_; name=nothing, errors=nothing, output_encoding=nothing, replacement_char=nothing) desc = tf.EagerOp("UnicodeEncode") - input_values_ = convert(tf.TensorHandle, input_values_) - input_splits_ = convert(tf.TensorHandle, input_splits_) + input_values_ = convert(tf.EagerTensor, input_values_) + input_splits_ = convert(tf.EagerTensor, input_splits_) tf.add_input(desc, input_values_) tf.add_input(desc, input_splits_) if errors !== nothing @@ -30663,7 +30663,7 @@ begin end function merge_summary_eager(inputs_; name=nothing, N=nothing) desc = tf.EagerOp("MergeSummary") - inputs_ = convert(tf.TensorHandle, inputs_) + inputs_ = convert(tf.EagerTensor, inputs_) tf.add_input(desc, inputs_) if N !== nothing desc["N"] = Base.Int(N) @@ -30702,7 +30702,7 @@ begin end function fake_queue_eager(resource_; name=nothing) desc = tf.EagerOp("FakeQueue") - resource_ = convert(tf.TensorHandle, resource_) + resource_ = convert(tf.EagerTensor, resource_) tf.add_input(desc, resource_) res = tf.execute(desc) node = tf.TapeNode(fake_queue, [resource_], name=nothing, res) @@ -30739,7 +30739,7 @@ begin end function batch_cholesky_eager(input_; name=nothing) desc = tf.EagerOp("BatchCholesky") - input_ = convert(tf.TensorHandle, input_) + input_ = convert(tf.EagerTensor, input_) tf.add_input(desc, input_) desc["T"] = tf.data_type(input_) res = tf.execute(desc) @@ -30833,7 +30833,7 @@ begin end function bessel_i1e_eager(x_; name=nothing) desc = tf.EagerOp("BesselI1e") - x_ = convert(tf.TensorHandle, x_) + x_ = convert(tf.EagerTensor, x_) tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) res = tf.execute(desc) @@ -30872,8 +30872,8 @@ begin end function import_event_eager(writer_, event_; name=nothing) desc = tf.EagerOp("ImportEvent") - writer_ = convert(tf.TensorHandle, writer_) - event_ = convert(tf.TensorHandle, event_) + writer_ = convert(tf.EagerTensor, writer_) + event_ = convert(tf.EagerTensor, event_) tf.add_input(desc, writer_) tf.add_input(desc, event_) res = tf.execute(desc) @@ -30935,9 +30935,9 @@ begin end function quantized_instance_norm_eager(x_, x_min_, x_max_; name=nothing, output_range_given=nothing, given_y_min=nothing, given_y_max=nothing, variance_epsilon=nothing, min_separation=nothing) desc = tf.EagerOp("QuantizedInstanceNorm") - x_ = convert(tf.TensorHandle, x_) - x_min_ = convert(tf.TensorHandle, x_min_) - x_max_ = convert(tf.TensorHandle, x_max_) + x_ = convert(tf.EagerTensor, x_) + x_min_ = convert(tf.EagerTensor, x_min_) + x_max_ = convert(tf.EagerTensor, x_max_) tf.add_input(desc, x_) tf.add_input(desc, x_min_) tf.add_input(desc, x_max_) @@ -31005,8 +31005,8 @@ begin end function load_tpu_embedding_adagrad_parameters_eager(parameters_, accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) desc = tf.EagerOp("LoadTPUEmbeddingAdagradParameters") - parameters_ = convert(tf.TensorHandle, parameters_) - accumulators_ = convert(tf.TensorHandle, accumulators_) + parameters_ = convert(tf.EagerTensor, parameters_) + accumulators_ = convert(tf.EagerTensor, accumulators_) tf.add_input(desc, parameters_) tf.add_input(desc, accumulators_) if table_id !== nothing @@ -31062,10 +31062,10 @@ begin end function tensor_array_write_v3_eager(handle_, index_, value_, flow_in_; name=nothing) desc = tf.EagerOp("TensorArrayWriteV3") - handle_ = convert(tf.TensorHandle, handle_) - index_ = convert(tf.TensorHandle, index_) - value_ = convert(tf.TensorHandle, value_) - flow_in_ = convert(tf.TensorHandle, flow_in_) + handle_ = convert(tf.EagerTensor, handle_) + index_ = convert(tf.EagerTensor, index_) + value_ = convert(tf.EagerTensor, value_) + flow_in_ = convert(tf.EagerTensor, flow_in_) tf.add_input(desc, handle_) tf.add_input(desc, index_) tf.add_input(desc, value_) @@ -31119,8 +31119,8 @@ begin end function dense_to_dense_set_operation_eager(set1_, set2_; name=nothing, set_operation=nothing, validate_indices=nothing) desc = tf.EagerOp("DenseToDenseSetOperation") - set1_ = convert(tf.TensorHandle, set1_) - set2_ = convert(tf.TensorHandle, set2_) + set1_ = convert(tf.EagerTensor, set1_) + set2_ = convert(tf.EagerTensor, set2_) tf.add_input(desc, set1_) tf.add_input(desc, set2_) if set_operation !== nothing @@ -31192,7 +31192,7 @@ begin end function encode_jpeg_eager(image_; name=nothing, format=nothing, quality=nothing, progressive=nothing, optimize_size=nothing, chroma_downsampling=nothing, density_unit=nothing, x_density=nothing, y_density=nothing, xmp_metadata=nothing) desc = tf.EagerOp("EncodeJpeg") - image_ = convert(tf.TensorHandle, image_) + image_ = convert(tf.EagerTensor, image_) tf.add_input(desc, image_) if format !== nothing desc["format"] = Base.String(format) @@ -31260,9 +31260,9 @@ begin end function inplace_update_eager(x_, i_, v_; name=nothing) desc = tf.EagerOp("InplaceUpdate") - x_ = convert(tf.TensorHandle, x_) - i_ = convert(tf.TensorHandle, i_) - v_ = convert(tf.TensorHandle, v_) + x_ = convert(tf.EagerTensor, x_) + i_ = convert(tf.EagerTensor, i_) + v_ = convert(tf.EagerTensor, v_) tf.add_input(desc, x_) tf.add_input(desc, i_) tf.add_input(desc, v_) @@ -31316,9 +31316,9 @@ begin end function fused_pad_conv2d_eager(input_, paddings_, filter_; name=nothing, mode=nothing, strides=nothing, padding=nothing) desc = tf.EagerOp("FusedPadConv2D") - input_ = convert(tf.TensorHandle, input_) - paddings_ = convert(tf.TensorHandle, paddings_) - filter_ = convert(tf.TensorHandle, filter_) + input_ = convert(tf.EagerTensor, input_) + paddings_ = convert(tf.EagerTensor, paddings_) + filter_ = convert(tf.EagerTensor, filter_) tf.add_input(desc, input_) tf.add_input(desc, paddings_) tf.add_input(desc, filter_) @@ -31380,9 +31380,9 @@ begin end function quantized_relu_eager(features_, min_features_, max_features_; name=nothing, out_type=nothing) desc = tf.EagerOp("QuantizedRelu") - features_ = convert(tf.TensorHandle, features_) - min_features_ = convert(tf.TensorHandle, min_features_) - max_features_ = convert(tf.TensorHandle, max_features_) + features_ = convert(tf.EagerTensor, features_) + min_features_ = convert(tf.EagerTensor, min_features_) + max_features_ = convert(tf.EagerTensor, max_features_) tf.add_input(desc, features_) tf.add_input(desc, min_features_) tf.add_input(desc, max_features_) @@ -31429,8 +31429,8 @@ begin end function gather_nd_eager(params_, indices_; name=nothing) desc = tf.EagerOp("GatherNd") - params_ = convert(tf.TensorHandle, params_) - indices_ = convert(tf.TensorHandle, indices_) + params_ = convert(tf.EagerTensor, params_) + indices_ = convert(tf.EagerTensor, indices_) tf.add_input(desc, params_) tf.add_input(desc, indices_) desc["Tparams"] = tf.data_type(params_) @@ -31519,7 +31519,7 @@ begin end function filter_by_last_component_dataset_eager(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("FilterByLastComponentDataset") - input_dataset_ = convert(tf.TensorHandle, input_dataset_) + input_dataset_ = convert(tf.EagerTensor, input_dataset_) tf.add_input(desc, input_dataset_) if output_types !== nothing desc["output_types"] = map(Base.identity, output_types) @@ -31566,9 +31566,9 @@ begin end function clip_by_value_eager(t_, clip_value_min_, clip_value_max_; name=nothing) desc = tf.EagerOp("ClipByValue") - t_ = convert(tf.TensorHandle, t_) - clip_value_min_ = convert(tf.TensorHandle, clip_value_min_) - clip_value_max_ = convert(tf.TensorHandle, clip_value_max_) + t_ = convert(tf.EagerTensor, t_) + clip_value_min_ = convert(tf.EagerTensor, clip_value_min_) + clip_value_max_ = convert(tf.EagerTensor, clip_value_max_) tf.add_input(desc, t_) tf.add_input(desc, clip_value_min_) tf.add_input(desc, clip_value_max_) @@ -31618,8 +31618,8 @@ begin end function image_summary_eager(tag_, tensor_; name=nothing, max_images=nothing, bad_color=nothing) desc = tf.EagerOp("ImageSummary") - tag_ = convert(tf.TensorHandle, tag_) - tensor_ = convert(tf.TensorHandle, tensor_) + tag_ = convert(tf.EagerTensor, tag_) + tensor_ = convert(tf.EagerTensor, tensor_) tf.add_input(desc, tag_) tf.add_input(desc, tensor_) if max_images !== nothing @@ -31730,7 +31730,7 @@ begin end function string_join_eager(inputs_; name=nothing, N=nothing, separator=nothing) desc = tf.EagerOp("StringJoin") - inputs_ = convert(tf.TensorHandle, inputs_) + inputs_ = convert(tf.EagerTensor, inputs_) tf.add_input(desc, inputs_) if N !== nothing desc["N"] = Base.Int(N) @@ -31782,9 +31782,9 @@ begin end function resource_scatter_nd_add_eager(ref_, indices_, updates_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ResourceScatterNdAdd") - ref_ = convert(tf.TensorHandle, ref_) - indices_ = convert(tf.TensorHandle, indices_) - updates_ = convert(tf.TensorHandle, updates_) + ref_ = convert(tf.EagerTensor, ref_) + indices_ = convert(tf.EagerTensor, indices_) + updates_ = convert(tf.EagerTensor, updates_) tf.add_input(desc, ref_) tf.add_input(desc, indices_) tf.add_input(desc, updates_) @@ -31832,8 +31832,8 @@ begin end function boosted_trees_quantile_stream_resource_deserialize_eager(quantile_stream_resource_handle_, bucket_boundaries_; name=nothing, num_streams=nothing) desc = tf.EagerOp("BoostedTreesQuantileStreamResourceDeserialize") - quantile_stream_resource_handle_ = convert(tf.TensorHandle, quantile_stream_resource_handle_) - bucket_boundaries_ = convert(tf.TensorHandle, bucket_boundaries_) + quantile_stream_resource_handle_ = convert(tf.EagerTensor, quantile_stream_resource_handle_) + bucket_boundaries_ = convert(tf.EagerTensor, bucket_boundaries_) tf.add_input(desc, quantile_stream_resource_handle_) tf.add_input(desc, bucket_boundaries_) if num_streams !== nothing @@ -31876,8 +31876,8 @@ begin end function left_shift_eager(x_, y_; name=nothing) desc = tf.EagerOp("LeftShift") - x_ = convert(tf.TensorHandle, x_) - y_ = convert(tf.TensorHandle, y_) + x_ = convert(tf.EagerTensor, x_) + y_ = convert(tf.EagerTensor, y_) tf.add_input(desc, x_) tf.add_input(desc, y_) desc["T"] = tf.data_type(x_) @@ -31933,11 +31933,11 @@ begin end function requantize_per_channel_eager(input_, input_min_, input_max_, requested_output_min_, requested_output_max_; name=nothing, out_type=nothing) desc = tf.EagerOp("RequantizePerChannel") - input_ = convert(tf.TensorHandle, input_) - input_min_ = convert(tf.TensorHandle, input_min_) - input_max_ = convert(tf.TensorHandle, input_max_) - requested_output_min_ = convert(tf.TensorHandle, requested_output_min_) - requested_output_max_ = convert(tf.TensorHandle, requested_output_max_) + input_ = convert(tf.EagerTensor, input_) + input_min_ = convert(tf.EagerTensor, input_min_) + input_max_ = convert(tf.EagerTensor, input_max_) + requested_output_min_ = convert(tf.EagerTensor, requested_output_min_) + requested_output_max_ = convert(tf.EagerTensor, requested_output_max_) tf.add_input(desc, input_) tf.add_input(desc, input_min_) tf.add_input(desc, input_max_) @@ -31988,9 +31988,9 @@ begin end function tensor_scatter_add_eager(tensor_, indices_, updates_; name=nothing) desc = tf.EagerOp("TensorScatterAdd") - tensor_ = convert(tf.TensorHandle, tensor_) - indices_ = convert(tf.TensorHandle, indices_) - updates_ = convert(tf.TensorHandle, updates_) + tensor_ = convert(tf.EagerTensor, tensor_) + indices_ = convert(tf.EagerTensor, indices_) + updates_ = convert(tf.EagerTensor, updates_) tf.add_input(desc, tensor_) tf.add_input(desc, indices_) tf.add_input(desc, updates_) @@ -32099,7 +32099,7 @@ begin end function ifft3d_eager(input_; name=nothing) desc = tf.EagerOp("IFFT3D") - input_ = convert(tf.TensorHandle, input_) + input_ = convert(tf.EagerTensor, input_) tf.add_input(desc, input_) desc["Tcomplex"] = tf.data_type(input_) res = tf.execute(desc) @@ -32144,8 +32144,8 @@ begin end function euclidean_norm_eager(input_, reduction_indices_; name=nothing, keep_dims=nothing) desc = tf.EagerOp("EuclideanNorm") - input_ = convert(tf.TensorHandle, input_) - reduction_indices_ = convert(tf.TensorHandle, reduction_indices_) + input_ = convert(tf.EagerTensor, input_) + reduction_indices_ = convert(tf.EagerTensor, reduction_indices_) tf.add_input(desc, input_) tf.add_input(desc, reduction_indices_) if keep_dims !== nothing @@ -32193,8 +32193,8 @@ begin end function ref_select_eager(index_, inputs_; name=nothing, N=nothing) desc = tf.EagerOp("RefSelect") - index_ = convert(tf.TensorHandle, index_) - inputs_ = convert(tf.TensorHandle, inputs_) + index_ = convert(tf.EagerTensor, index_) + inputs_ = convert(tf.EagerTensor, inputs_) tf.add_input(desc, index_) tf.add_input(desc, inputs_) if N !== nothing @@ -32240,9 +32240,9 @@ begin end function sparse_tensor_slice_dataset_eager(indices_, values_, dense_shape_; name=nothing) desc = tf.EagerOp("SparseTensorSliceDataset") - indices_ = convert(tf.TensorHandle, indices_) - values_ = convert(tf.TensorHandle, values_) - dense_shape_ = convert(tf.TensorHandle, dense_shape_) + indices_ = convert(tf.EagerTensor, indices_) + values_ = convert(tf.EagerTensor, values_) + dense_shape_ = convert(tf.EagerTensor, dense_shape_) tf.add_input(desc, indices_) tf.add_input(desc, values_) tf.add_input(desc, dense_shape_) @@ -32342,7 +32342,7 @@ begin end function batch_ifft2d_eager(input_; name=nothing) desc = tf.EagerOp("BatchIFFT2D") - input_ = convert(tf.TensorHandle, input_) + input_ = convert(tf.EagerTensor, input_) tf.add_input(desc, input_) res = tf.execute(desc) node = tf.TapeNode(batch_ifft2d, [input_], name=nothing, res) @@ -32388,9 +32388,9 @@ begin end function tensor_array_gather_eager(handle_, indices_, flow_in_; name=nothing, dtype=nothing, element_shape=nothing) desc = tf.EagerOp("TensorArrayGather") - handle_ = convert(tf.TensorHandle, handle_) - indices_ = convert(tf.TensorHandle, indices_) - flow_in_ = convert(tf.TensorHandle, flow_in_) + handle_ = convert(tf.EagerTensor, handle_) + indices_ = convert(tf.EagerTensor, indices_) + flow_in_ = convert(tf.EagerTensor, flow_in_) tf.add_input(desc, handle_) tf.add_input(desc, indices_) tf.add_input(desc, flow_in_) @@ -32444,10 +32444,10 @@ begin end function sparse_segment_mean_with_num_segments_eager(data_, indices_, segment_ids_, num_segments_; name=nothing) desc = tf.EagerOp("SparseSegmentMeanWithNumSegments") - data_ = convert(tf.TensorHandle, data_) - indices_ = convert(tf.TensorHandle, indices_) - segment_ids_ = convert(tf.TensorHandle, segment_ids_) - num_segments_ = convert(tf.TensorHandle, num_segments_) + data_ = convert(tf.EagerTensor, data_) + indices_ = convert(tf.EagerTensor, indices_) + segment_ids_ = convert(tf.EagerTensor, segment_ids_) + num_segments_ = convert(tf.EagerTensor, num_segments_) tf.add_input(desc, data_) tf.add_input(desc, indices_) tf.add_input(desc, segment_ids_) @@ -32493,7 +32493,7 @@ begin end function ensure_shape_eager(input_; name=nothing, shape=nothing) desc = tf.EagerOp("EnsureShape") - input_ = convert(tf.TensorHandle, input_) + input_ = convert(tf.EagerTensor, input_) tf.add_input(desc, input_) if shape !== nothing desc["shape"] = Base.identity(shape) @@ -32545,11 +32545,11 @@ begin end function apply_proximal_gradient_descent_eager(var_, alpha_, l1_, l2_, delta_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ApplyProximalGradientDescent") - var_ = convert(tf.TensorHandle, var_) - alpha_ = convert(tf.TensorHandle, alpha_) - l1_ = convert(tf.TensorHandle, l1_) - l2_ = convert(tf.TensorHandle, l2_) - delta_ = convert(tf.TensorHandle, delta_) + var_ = convert(tf.EagerTensor, var_) + alpha_ = convert(tf.EagerTensor, alpha_) + l1_ = convert(tf.EagerTensor, l1_) + l2_ = convert(tf.EagerTensor, l2_) + delta_ = convert(tf.EagerTensor, delta_) tf.add_input(desc, var_) tf.add_input(desc, alpha_) tf.add_input(desc, l1_) @@ -32619,7 +32619,7 @@ begin end function collective_reduce_eager(input_; name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, merge_op=nothing, final_op=nothing, subdiv_offsets=nothing, wait_for=nothing) desc = tf.EagerOp("CollectiveReduce") - input_ = convert(tf.TensorHandle, input_) + input_ = convert(tf.EagerTensor, input_) tf.add_input(desc, input_) if group_size !== nothing desc["group_size"] = Base.Int(group_size) @@ -32678,7 +32678,7 @@ begin end function is_nan_eager(x_; name=nothing) desc = tf.EagerOp("IsNan") - x_ = convert(tf.TensorHandle, x_) + x_ = convert(tf.EagerTensor, x_) tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) res = tf.execute(desc) @@ -32735,15 +32735,15 @@ begin end function apply_ada_max_eager(var_, m_, v_, beta1_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ApplyAdaMax") - var_ = convert(tf.TensorHandle, var_) - m_ = convert(tf.TensorHandle, m_) - v_ = convert(tf.TensorHandle, v_) - beta1_power_ = convert(tf.TensorHandle, beta1_power_) - lr_ = convert(tf.TensorHandle, lr_) - beta1_ = convert(tf.TensorHandle, beta1_) - beta2_ = convert(tf.TensorHandle, beta2_) - epsilon_ = convert(tf.TensorHandle, epsilon_) - grad_ = convert(tf.TensorHandle, grad_) + var_ = convert(tf.EagerTensor, var_) + m_ = convert(tf.EagerTensor, m_) + v_ = convert(tf.EagerTensor, v_) + beta1_power_ = convert(tf.EagerTensor, beta1_power_) + lr_ = convert(tf.EagerTensor, lr_) + beta1_ = convert(tf.EagerTensor, beta1_) + beta2_ = convert(tf.EagerTensor, beta2_) + epsilon_ = convert(tf.EagerTensor, epsilon_) + grad_ = convert(tf.EagerTensor, grad_) tf.add_input(desc, var_) tf.add_input(desc, m_) tf.add_input(desc, v_) @@ -32819,8 +32819,8 @@ begin end function decode_and_crop_jpeg_eager(contents_, crop_window_; name=nothing, channels=nothing, ratio=nothing, fancy_upscaling=nothing, try_recover_truncated=nothing, acceptable_fraction=nothing, dct_method=nothing) desc = tf.EagerOp("DecodeAndCropJpeg") - contents_ = convert(tf.TensorHandle, contents_) - crop_window_ = convert(tf.TensorHandle, crop_window_) + contents_ = convert(tf.EagerTensor, contents_) + crop_window_ = convert(tf.EagerTensor, crop_window_) tf.add_input(desc, contents_) tf.add_input(desc, crop_window_) if channels !== nothing @@ -32895,15 +32895,15 @@ begin end function apply_centered_rms_prop_eager(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ApplyCenteredRMSProp") - var_ = convert(tf.TensorHandle, var_) - mg_ = convert(tf.TensorHandle, mg_) - ms_ = convert(tf.TensorHandle, ms_) - mom_ = convert(tf.TensorHandle, mom_) - lr_ = convert(tf.TensorHandle, lr_) - rho_ = convert(tf.TensorHandle, rho_) - momentum_ = convert(tf.TensorHandle, momentum_) - epsilon_ = convert(tf.TensorHandle, epsilon_) - grad_ = convert(tf.TensorHandle, grad_) + var_ = convert(tf.EagerTensor, var_) + mg_ = convert(tf.EagerTensor, mg_) + ms_ = convert(tf.EagerTensor, ms_) + mom_ = convert(tf.EagerTensor, mom_) + lr_ = convert(tf.EagerTensor, lr_) + rho_ = convert(tf.EagerTensor, rho_) + momentum_ = convert(tf.EagerTensor, momentum_) + epsilon_ = convert(tf.EagerTensor, epsilon_) + grad_ = convert(tf.EagerTensor, grad_) tf.add_input(desc, var_) tf.add_input(desc, mg_) tf.add_input(desc, ms_) @@ -32976,9 +32976,9 @@ begin end function conv3d_backprop_filter_v2_eager(input_, filter_sizes_, out_backprop_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) desc = tf.EagerOp("Conv3DBackpropFilterV2") - input_ = convert(tf.TensorHandle, input_) - filter_sizes_ = convert(tf.TensorHandle, filter_sizes_) - out_backprop_ = convert(tf.TensorHandle, out_backprop_) + input_ = convert(tf.EagerTensor, input_) + filter_sizes_ = convert(tf.EagerTensor, filter_sizes_) + out_backprop_ = convert(tf.EagerTensor, out_backprop_) tf.add_input(desc, input_) tf.add_input(desc, filter_sizes_) tf.add_input(desc, out_backprop_) @@ -33039,8 +33039,8 @@ begin end function matrix_triangular_solve_eager(matrix_, rhs_; name=nothing, lower=nothing, adjoint=nothing) desc = tf.EagerOp("MatrixTriangularSolve") - matrix_ = convert(tf.TensorHandle, matrix_) - rhs_ = convert(tf.TensorHandle, rhs_) + matrix_ = convert(tf.EagerTensor, matrix_) + rhs_ = convert(tf.EagerTensor, rhs_) tf.add_input(desc, matrix_) tf.add_input(desc, rhs_) if lower !== nothing @@ -33085,7 +33085,7 @@ begin end function reader_num_work_units_completed_eager(reader_handle_; name=nothing) desc = tf.EagerOp("ReaderNumWorkUnitsCompleted") - reader_handle_ = convert(tf.TensorHandle, reader_handle_) + reader_handle_ = convert(tf.EagerTensor, reader_handle_) tf.add_input(desc, reader_handle_) res = tf.execute(desc) node = tf.TapeNode(reader_num_work_units_completed, [reader_handle_], name=nothing, res) @@ -33132,11 +33132,11 @@ begin end function write_audio_summary_eager(writer_, step_, tag_, tensor_, sample_rate_; name=nothing, max_outputs=nothing) desc = tf.EagerOp("WriteAudioSummary") - writer_ = convert(tf.TensorHandle, writer_) - step_ = convert(tf.TensorHandle, step_) - tag_ = convert(tf.TensorHandle, tag_) - tensor_ = convert(tf.TensorHandle, tensor_) - sample_rate_ = convert(tf.TensorHandle, sample_rate_) + writer_ = convert(tf.EagerTensor, writer_) + step_ = convert(tf.EagerTensor, step_) + tag_ = convert(tf.EagerTensor, tag_) + tensor_ = convert(tf.EagerTensor, tensor_) + sample_rate_ = convert(tf.EagerTensor, sample_rate_) tf.add_input(desc, writer_) tf.add_input(desc, step_) tf.add_input(desc, tag_) @@ -33181,8 +33181,8 @@ begin end function sharded_filespec_eager(basename_, num_shards_; name=nothing) desc = tf.EagerOp("ShardedFilespec") - basename_ = convert(tf.TensorHandle, basename_) - num_shards_ = convert(tf.TensorHandle, num_shards_) + basename_ = convert(tf.EagerTensor, basename_) + num_shards_ = convert(tf.EagerTensor, num_shards_) tf.add_input(desc, basename_) tf.add_input(desc, num_shards_) res = tf.execute(desc) @@ -33222,8 +33222,8 @@ begin end function div_no_nan_eager(x_, y_; name=nothing) desc = tf.EagerOp("DivNoNan") - x_ = convert(tf.TensorHandle, x_) - y_ = convert(tf.TensorHandle, y_) + x_ = convert(tf.EagerTensor, x_) + y_ = convert(tf.EagerTensor, y_) tf.add_input(desc, x_) tf.add_input(desc, y_) desc["T"] = tf.data_type(x_) @@ -33277,11 +33277,11 @@ begin end function sparse_accumulator_apply_gradient_eager(handle_, local_step_, gradient_indices_, gradient_values_, gradient_shape_; name=nothing, dtype=nothing, has_known_shape=nothing) desc = tf.EagerOp("SparseAccumulatorApplyGradient") - handle_ = convert(tf.TensorHandle, handle_) - local_step_ = convert(tf.TensorHandle, local_step_) - gradient_indices_ = convert(tf.TensorHandle, gradient_indices_) - gradient_values_ = convert(tf.TensorHandle, gradient_values_) - gradient_shape_ = convert(tf.TensorHandle, gradient_shape_) + handle_ = convert(tf.EagerTensor, handle_) + local_step_ = convert(tf.EagerTensor, local_step_) + gradient_indices_ = convert(tf.EagerTensor, gradient_indices_) + gradient_values_ = convert(tf.EagerTensor, gradient_values_) + gradient_shape_ = convert(tf.EagerTensor, gradient_shape_) tf.add_input(desc, handle_) tf.add_input(desc, local_step_) tf.add_input(desc, gradient_indices_) @@ -33339,8 +33339,8 @@ begin end function ragged_tensor_to_sparse_eager(rt_nested_splits_, rt_dense_values_; name=nothing, RAGGED_RANK=nothing) desc = tf.EagerOp("RaggedTensorToSparse") - rt_nested_splits_ = convert(tf.TensorHandle, rt_nested_splits_) - rt_dense_values_ = convert(tf.TensorHandle, rt_dense_values_) + rt_nested_splits_ = convert(tf.EagerTensor, rt_nested_splits_) + rt_dense_values_ = convert(tf.EagerTensor, rt_dense_values_) tf.add_input(desc, rt_nested_splits_) tf.add_input(desc, rt_dense_values_) if RAGGED_RANK !== nothing @@ -33391,7 +33391,7 @@ begin end function extract_volume_patches_eager(input_; name=nothing, ksizes=nothing, strides=nothing, padding=nothing) desc = tf.EagerOp("ExtractVolumePatches") - input_ = convert(tf.TensorHandle, input_) + input_ = convert(tf.EagerTensor, input_) tf.add_input(desc, input_) if ksizes !== nothing desc["ksizes"] = map(Base.identity, ksizes) @@ -33448,9 +33448,9 @@ begin end function barrier_insert_many_eager(handle_, keys_, values_; name=nothing, component_index=nothing) desc = tf.EagerOp("BarrierInsertMany") - handle_ = convert(tf.TensorHandle, handle_) - keys_ = convert(tf.TensorHandle, keys_) - values_ = convert(tf.TensorHandle, values_) + handle_ = convert(tf.EagerTensor, handle_) + keys_ = convert(tf.EagerTensor, keys_) + values_ = convert(tf.EagerTensor, values_) tf.add_input(desc, handle_) tf.add_input(desc, keys_) tf.add_input(desc, values_) @@ -33546,8 +33546,8 @@ begin end function space_to_batch_eager(input_, paddings_; name=nothing, block_size=nothing) desc = tf.EagerOp("SpaceToBatch") - input_ = convert(tf.TensorHandle, input_) - paddings_ = convert(tf.TensorHandle, paddings_) + input_ = convert(tf.EagerTensor, input_) + paddings_ = convert(tf.EagerTensor, paddings_) tf.add_input(desc, input_) tf.add_input(desc, paddings_) if block_size !== nothing @@ -33660,8 +33660,8 @@ begin end function empty_tensor_list_eager(element_shape_, max_num_elements_; name=nothing, element_dtype=nothing, shape_type=nothing) desc = tf.EagerOp("EmptyTensorList") - element_shape_ = convert(tf.TensorHandle, element_shape_) - max_num_elements_ = convert(tf.TensorHandle, max_num_elements_) + element_shape_ = convert(tf.EagerTensor, element_shape_) + max_num_elements_ = convert(tf.EagerTensor, max_num_elements_) tf.add_input(desc, element_shape_) tf.add_input(desc, max_num_elements_) if element_dtype !== nothing @@ -33738,14 +33738,14 @@ begin end function quantized_conv2d_and_requantize_eager(input_, filter_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) desc = tf.EagerOp("QuantizedConv2DAndRequantize") - input_ = convert(tf.TensorHandle, input_) - filter_ = convert(tf.TensorHandle, filter_) - min_input_ = convert(tf.TensorHandle, min_input_) - max_input_ = convert(tf.TensorHandle, max_input_) - min_filter_ = convert(tf.TensorHandle, min_filter_) - max_filter_ = convert(tf.TensorHandle, max_filter_) - min_freezed_output_ = convert(tf.TensorHandle, min_freezed_output_) - max_freezed_output_ = convert(tf.TensorHandle, max_freezed_output_) + input_ = convert(tf.EagerTensor, input_) + filter_ = convert(tf.EagerTensor, filter_) + min_input_ = convert(tf.EagerTensor, min_input_) + max_input_ = convert(tf.EagerTensor, max_input_) + min_filter_ = convert(tf.EagerTensor, min_filter_) + max_filter_ = convert(tf.EagerTensor, max_filter_) + min_freezed_output_ = convert(tf.EagerTensor, min_freezed_output_) + max_freezed_output_ = convert(tf.EagerTensor, max_freezed_output_) tf.add_input(desc, input_) tf.add_input(desc, filter_) tf.add_input(desc, min_input_) @@ -33811,7 +33811,7 @@ begin end function lu_eager(input_; name=nothing, output_idx_type=nothing) desc = tf.EagerOp("Lu") - input_ = convert(tf.TensorHandle, input_) + input_ = convert(tf.EagerTensor, input_) tf.add_input(desc, input_) if output_idx_type !== nothing desc["output_idx_type"] = Base.identity(output_idx_type) @@ -33854,7 +33854,7 @@ begin end function decode_compressed_eager(bytes_; name=nothing, compression_type=nothing) desc = tf.EagerOp("DecodeCompressed") - bytes_ = convert(tf.TensorHandle, bytes_) + bytes_ = convert(tf.EagerTensor, bytes_) tf.add_input(desc, bytes_) if compression_type !== nothing desc["compression_type"] = Base.String(compression_type) @@ -33896,7 +33896,7 @@ begin end function get_session_tensor_eager(handle_; name=nothing, dtype=nothing) desc = tf.EagerOp("GetSessionTensor") - handle_ = convert(tf.TensorHandle, handle_) + handle_ = convert(tf.EagerTensor, handle_) tf.add_input(desc, handle_) if dtype !== nothing desc["dtype"] = Base.identity(dtype) @@ -33945,9 +33945,9 @@ begin end function tensor_array_gather_v3_eager(handle_, indices_, flow_in_; name=nothing, dtype=nothing, element_shape=nothing) desc = tf.EagerOp("TensorArrayGatherV3") - handle_ = convert(tf.TensorHandle, handle_) - indices_ = convert(tf.TensorHandle, indices_) - flow_in_ = convert(tf.TensorHandle, flow_in_) + handle_ = convert(tf.EagerTensor, handle_) + indices_ = convert(tf.EagerTensor, indices_) + flow_in_ = convert(tf.EagerTensor, flow_in_) tf.add_input(desc, handle_) tf.add_input(desc, indices_) tf.add_input(desc, flow_in_) @@ -34009,10 +34009,10 @@ begin end function load_tpu_embedding_ftrl_parameters_grad_accum_debug_eager(parameters_, accumulators_, linears_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) desc = tf.EagerOp("LoadTPUEmbeddingFTRLParametersGradAccumDebug") - parameters_ = convert(tf.TensorHandle, parameters_) - accumulators_ = convert(tf.TensorHandle, accumulators_) - linears_ = convert(tf.TensorHandle, linears_) - gradient_accumulators_ = convert(tf.TensorHandle, gradient_accumulators_) + parameters_ = convert(tf.EagerTensor, parameters_) + accumulators_ = convert(tf.EagerTensor, accumulators_) + linears_ = convert(tf.EagerTensor, linears_) + gradient_accumulators_ = convert(tf.EagerTensor, gradient_accumulators_) tf.add_input(desc, parameters_) tf.add_input(desc, accumulators_) tf.add_input(desc, linears_) @@ -34066,7 +34066,7 @@ begin end function destroy_resource_op_eager(resource_; name=nothing, ignore_lookup_error=nothing) desc = tf.EagerOp("DestroyResourceOp") - resource_ = convert(tf.TensorHandle, resource_) + resource_ = convert(tf.EagerTensor, resource_) tf.add_input(desc, resource_) if ignore_lookup_error !== nothing desc["ignore_lookup_error"] = Base.Bool(ignore_lookup_error) @@ -34163,11 +34163,11 @@ begin end function create_summary_db_writer_eager(writer_, db_uri_, experiment_name_, run_name_, user_name_; name=nothing) desc = tf.EagerOp("CreateSummaryDbWriter") - writer_ = convert(tf.TensorHandle, writer_) - db_uri_ = convert(tf.TensorHandle, db_uri_) - experiment_name_ = convert(tf.TensorHandle, experiment_name_) - run_name_ = convert(tf.TensorHandle, run_name_) - user_name_ = convert(tf.TensorHandle, user_name_) + writer_ = convert(tf.EagerTensor, writer_) + db_uri_ = convert(tf.EagerTensor, db_uri_) + experiment_name_ = convert(tf.EagerTensor, experiment_name_) + run_name_ = convert(tf.EagerTensor, run_name_) + user_name_ = convert(tf.EagerTensor, user_name_) tf.add_input(desc, writer_) tf.add_input(desc, db_uri_) tf.add_input(desc, experiment_name_) @@ -34210,8 +34210,8 @@ begin end function tanh_grad_eager(y_, dy_; name=nothing) desc = tf.EagerOp("TanhGrad") - y_ = convert(tf.TensorHandle, y_) - dy_ = convert(tf.TensorHandle, dy_) + y_ = convert(tf.EagerTensor, y_) + dy_ = convert(tf.EagerTensor, dy_) tf.add_input(desc, y_) tf.add_input(desc, dy_) desc["T"] = tf.data_type(y_) @@ -34250,7 +34250,7 @@ begin end function decode_base64_eager(input_; name=nothing) desc = tf.EagerOp("DecodeBase64") - input_ = convert(tf.TensorHandle, input_) + input_ = convert(tf.EagerTensor, input_) tf.add_input(desc, input_) res = tf.execute(desc) node = tf.TapeNode(decode_base64, [input_], name=nothing, res) @@ -34301,11 +34301,11 @@ begin end function max_pool_grad_grad_v2_eager(orig_input_, orig_output_, grad_, ksize_, strides_; name=nothing, padding=nothing, data_format=nothing) desc = tf.EagerOp("MaxPoolGradGradV2") - orig_input_ = convert(tf.TensorHandle, orig_input_) - orig_output_ = convert(tf.TensorHandle, orig_output_) - grad_ = convert(tf.TensorHandle, grad_) - ksize_ = convert(tf.TensorHandle, ksize_) - strides_ = convert(tf.TensorHandle, strides_) + orig_input_ = convert(tf.EagerTensor, orig_input_) + orig_output_ = convert(tf.EagerTensor, orig_output_) + grad_ = convert(tf.EagerTensor, grad_) + ksize_ = convert(tf.EagerTensor, ksize_) + strides_ = convert(tf.EagerTensor, strides_) tf.add_input(desc, orig_input_) tf.add_input(desc, orig_output_) tf.add_input(desc, grad_) @@ -34361,9 +34361,9 @@ begin end function audio_summary_v2_eager(tag_, tensor_, sample_rate_; name=nothing, max_outputs=nothing) desc = tf.EagerOp("AudioSummaryV2") - tag_ = convert(tf.TensorHandle, tag_) - tensor_ = convert(tf.TensorHandle, tensor_) - sample_rate_ = convert(tf.TensorHandle, sample_rate_) + tag_ = convert(tf.EagerTensor, tag_) + tensor_ = convert(tf.EagerTensor, tensor_) + sample_rate_ = convert(tf.EagerTensor, sample_rate_) tf.add_input(desc, tag_) tf.add_input(desc, tensor_) tf.add_input(desc, sample_rate_) @@ -34422,7 +34422,7 @@ begin end function stateful_partitioned_call_eager(args_; name=nothing, Tin=nothing, Tout=nothing, f=nothing, config=nothing, config_proto=nothing, executor_type=nothing) desc = tf.EagerOp("StatefulPartitionedCall") - args_ = convert(tf.TensorHandle, args_) + args_ = convert(tf.EagerTensor, args_) tf.add_input(desc, args_) if Tin !== nothing desc["Tin"] = map(Base.identity, Tin) @@ -34494,8 +34494,8 @@ begin end function _scoped_allocator_concat_eager(backing_, inputs_; name=nothing, shape=nothing, reshape=nothing, sa_name=nothing, id=nothing, N=nothing) desc = tf.EagerOp("_ScopedAllocatorConcat") - backing_ = convert(tf.TensorHandle, backing_) - inputs_ = convert(tf.TensorHandle, inputs_) + backing_ = convert(tf.EagerTensor, backing_) + inputs_ = convert(tf.EagerTensor, inputs_) tf.add_input(desc, backing_) tf.add_input(desc, inputs_) if shape !== nothing @@ -34563,8 +34563,8 @@ begin end function fake_quant_with_min_max_args_gradient_eager(gradients_, inputs_; name=nothing, min=nothing, max=nothing, num_bits=nothing, narrow_range=nothing) desc = tf.EagerOp("FakeQuantWithMinMaxArgsGradient") - gradients_ = convert(tf.TensorHandle, gradients_) - inputs_ = convert(tf.TensorHandle, inputs_) + gradients_ = convert(tf.EagerTensor, gradients_) + inputs_ = convert(tf.EagerTensor, inputs_) tf.add_input(desc, gradients_) tf.add_input(desc, inputs_) if min !== nothing @@ -34625,7 +34625,7 @@ begin end function batch_svd_eager(input_; name=nothing, compute_uv=nothing, full_matrices=nothing) desc = tf.EagerOp("BatchSvd") - input_ = convert(tf.TensorHandle, input_) + input_ = convert(tf.EagerTensor, input_) tf.add_input(desc, input_) if compute_uv !== nothing desc["compute_uv"] = Base.Bool(compute_uv) @@ -34690,9 +34690,9 @@ begin end function map_stage_eager(key_, indices_, values_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, fake_dtypes=nothing, container=nothing, shared_name=nothing) desc = tf.EagerOp("MapStage") - key_ = convert(tf.TensorHandle, key_) - indices_ = convert(tf.TensorHandle, indices_) - values_ = convert(tf.TensorHandle, values_) + key_ = convert(tf.EagerTensor, key_) + indices_ = convert(tf.EagerTensor, indices_) + values_ = convert(tf.EagerTensor, values_) tf.add_input(desc, key_) tf.add_input(desc, indices_) tf.add_input(desc, values_) @@ -34770,15 +34770,15 @@ begin end function resource_sparse_apply_ftrl_eager(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, lr_power_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ResourceSparseApplyFtrl") - var_ = convert(tf.TensorHandle, var_) - accum_ = convert(tf.TensorHandle, accum_) - linear_ = convert(tf.TensorHandle, linear_) - grad_ = convert(tf.TensorHandle, grad_) - indices_ = convert(tf.TensorHandle, indices_) - lr_ = convert(tf.TensorHandle, lr_) - l1_ = convert(tf.TensorHandle, l1_) - l2_ = convert(tf.TensorHandle, l2_) - lr_power_ = convert(tf.TensorHandle, lr_power_) + var_ = convert(tf.EagerTensor, var_) + accum_ = convert(tf.EagerTensor, accum_) + linear_ = convert(tf.EagerTensor, linear_) + grad_ = convert(tf.EagerTensor, grad_) + indices_ = convert(tf.EagerTensor, indices_) + lr_ = convert(tf.EagerTensor, lr_) + l1_ = convert(tf.EagerTensor, l1_) + l2_ = convert(tf.EagerTensor, l2_) + lr_power_ = convert(tf.EagerTensor, lr_power_) tf.add_input(desc, var_) tf.add_input(desc, accum_) tf.add_input(desc, linear_) @@ -34837,8 +34837,8 @@ begin end function resize_nearest_neighbor_eager(images_, size_; name=nothing, align_corners=nothing) desc = tf.EagerOp("ResizeNearestNeighbor") - images_ = convert(tf.TensorHandle, images_) - size_ = convert(tf.TensorHandle, size_) + images_ = convert(tf.EagerTensor, images_) + size_ = convert(tf.EagerTensor, size_) tf.add_input(desc, images_) tf.add_input(desc, size_) if align_corners !== nothing @@ -34901,15 +34901,15 @@ begin end function experimental_csv_dataset_eager(filenames_, compression_type_, buffer_size_, header_, field_delim_, use_quote_delim_, na_value_, select_cols_, record_defaults_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("ExperimentalCSVDataset") - filenames_ = convert(tf.TensorHandle, filenames_) - compression_type_ = convert(tf.TensorHandle, compression_type_) - buffer_size_ = convert(tf.TensorHandle, buffer_size_) - header_ = convert(tf.TensorHandle, header_) - field_delim_ = convert(tf.TensorHandle, field_delim_) - use_quote_delim_ = convert(tf.TensorHandle, use_quote_delim_) - na_value_ = convert(tf.TensorHandle, na_value_) - select_cols_ = convert(tf.TensorHandle, select_cols_) - record_defaults_ = convert(tf.TensorHandle, record_defaults_) + filenames_ = convert(tf.EagerTensor, filenames_) + compression_type_ = convert(tf.EagerTensor, compression_type_) + buffer_size_ = convert(tf.EagerTensor, buffer_size_) + header_ = convert(tf.EagerTensor, header_) + field_delim_ = convert(tf.EagerTensor, field_delim_) + use_quote_delim_ = convert(tf.EagerTensor, use_quote_delim_) + na_value_ = convert(tf.EagerTensor, na_value_) + select_cols_ = convert(tf.EagerTensor, select_cols_) + record_defaults_ = convert(tf.EagerTensor, record_defaults_) tf.add_input(desc, filenames_) tf.add_input(desc, compression_type_) tf.add_input(desc, buffer_size_) @@ -34971,10 +34971,10 @@ begin end function _mkl_mul_eager(x_, y_, mkl_x_, mkl_y_; name=nothing) desc = tf.EagerOp("_MklMul") - x_ = convert(tf.TensorHandle, x_) - y_ = convert(tf.TensorHandle, y_) - mkl_x_ = convert(tf.TensorHandle, mkl_x_) - mkl_y_ = convert(tf.TensorHandle, mkl_y_) + x_ = convert(tf.EagerTensor, x_) + y_ = convert(tf.EagerTensor, y_) + mkl_x_ = convert(tf.EagerTensor, mkl_x_) + mkl_y_ = convert(tf.EagerTensor, mkl_y_) tf.add_input(desc, x_) tf.add_input(desc, y_) tf.add_input(desc, mkl_x_) @@ -35016,7 +35016,7 @@ begin end function batch_matrix_diag_eager(diagonal_; name=nothing) desc = tf.EagerOp("BatchMatrixDiag") - diagonal_ = convert(tf.TensorHandle, diagonal_) + diagonal_ = convert(tf.EagerTensor, diagonal_) tf.add_input(desc, diagonal_) desc["T"] = tf.data_type(diagonal_) res = tf.execute(desc) @@ -35054,7 +35054,7 @@ begin end function is_inf_eager(x_; name=nothing) desc = tf.EagerOp("IsInf") - x_ = convert(tf.TensorHandle, x_) + x_ = convert(tf.EagerTensor, x_) tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) res = tf.execute(desc) @@ -35132,7 +35132,7 @@ begin end function fixed_unigram_candidate_sampler_eager(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, vocab_file=nothing, distortion=nothing, num_reserved_ids=nothing, num_shards=nothing, shard=nothing, unigrams=nothing, seed=nothing, seed2=nothing) desc = tf.EagerOp("FixedUnigramCandidateSampler") - true_classes_ = convert(tf.TensorHandle, true_classes_) + true_classes_ = convert(tf.EagerTensor, true_classes_) tf.add_input(desc, true_classes_) if num_true !== nothing desc["num_true"] = Base.Int(num_true) @@ -35228,16 +35228,16 @@ begin end function sparse_apply_ftrl_v2_eager(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=nothing, use_locking=nothing) desc = tf.EagerOp("SparseApplyFtrlV2") - var_ = convert(tf.TensorHandle, var_) - accum_ = convert(tf.TensorHandle, accum_) - linear_ = convert(tf.TensorHandle, linear_) - grad_ = convert(tf.TensorHandle, grad_) - indices_ = convert(tf.TensorHandle, indices_) - lr_ = convert(tf.TensorHandle, lr_) - l1_ = convert(tf.TensorHandle, l1_) - l2_ = convert(tf.TensorHandle, l2_) - l2_shrinkage_ = convert(tf.TensorHandle, l2_shrinkage_) - lr_power_ = convert(tf.TensorHandle, lr_power_) + var_ = convert(tf.EagerTensor, var_) + accum_ = convert(tf.EagerTensor, accum_) + linear_ = convert(tf.EagerTensor, linear_) + grad_ = convert(tf.EagerTensor, grad_) + indices_ = convert(tf.EagerTensor, indices_) + lr_ = convert(tf.EagerTensor, lr_) + l1_ = convert(tf.EagerTensor, l1_) + l2_ = convert(tf.EagerTensor, l2_) + l2_shrinkage_ = convert(tf.EagerTensor, l2_shrinkage_) + lr_power_ = convert(tf.EagerTensor, lr_power_) tf.add_input(desc, var_) tf.add_input(desc, accum_) tf.add_input(desc, linear_) @@ -35300,8 +35300,8 @@ begin end function unravel_index_eager(indices_, dims_; name=nothing) desc = tf.EagerOp("UnravelIndex") - indices_ = convert(tf.TensorHandle, indices_) - dims_ = convert(tf.TensorHandle, dims_) + indices_ = convert(tf.EagerTensor, indices_) + dims_ = convert(tf.EagerTensor, dims_) tf.add_input(desc, indices_) tf.add_input(desc, dims_) desc["Tidx"] = tf.data_type(indices_) @@ -35348,8 +35348,8 @@ begin end function max_eager(input_, reduction_indices_; name=nothing, keep_dims=nothing) desc = tf.EagerOp("Max") - input_ = convert(tf.TensorHandle, input_) - reduction_indices_ = convert(tf.TensorHandle, reduction_indices_) + input_ = convert(tf.EagerTensor, input_) + reduction_indices_ = convert(tf.EagerTensor, reduction_indices_) tf.add_input(desc, input_) tf.add_input(desc, reduction_indices_) if keep_dims !== nothing @@ -35392,7 +35392,7 @@ begin end function ifft2d_eager(input_; name=nothing) desc = tf.EagerOp("IFFT2D") - input_ = convert(tf.TensorHandle, input_) + input_ = convert(tf.EagerTensor, input_) tf.add_input(desc, input_) desc["Tcomplex"] = tf.data_type(input_) res = tf.execute(desc) @@ -35448,9 +35448,9 @@ begin end function sparse_concat_eager(indices_, values_, shapes_; name=nothing, concat_dim=nothing, N=nothing) desc = tf.EagerOp("SparseConcat") - indices_ = convert(tf.TensorHandle, indices_) - values_ = convert(tf.TensorHandle, values_) - shapes_ = convert(tf.TensorHandle, shapes_) + indices_ = convert(tf.EagerTensor, indices_) + values_ = convert(tf.EagerTensor, values_) + shapes_ = convert(tf.EagerTensor, shapes_) tf.add_input(desc, indices_) tf.add_input(desc, values_) tf.add_input(desc, shapes_) @@ -35501,8 +35501,8 @@ begin end function histogram_summary_eager(tag_, values_; name=nothing) desc = tf.EagerOp("HistogramSummary") - tag_ = convert(tf.TensorHandle, tag_) - values_ = convert(tf.TensorHandle, values_) + tag_ = convert(tf.EagerTensor, tag_) + values_ = convert(tf.EagerTensor, values_) tf.add_input(desc, tag_) tf.add_input(desc, values_) desc["T"] = tf.data_type(values_) @@ -35545,8 +35545,8 @@ begin end function segment_sum_eager(data_, segment_ids_; name=nothing) desc = tf.EagerOp("SegmentSum") - data_ = convert(tf.TensorHandle, data_) - segment_ids_ = convert(tf.TensorHandle, segment_ids_) + data_ = convert(tf.EagerTensor, data_) + segment_ids_ = convert(tf.EagerTensor, segment_ids_) tf.add_input(desc, data_) tf.add_input(desc, segment_ids_) desc["T"] = tf.data_type(data_) @@ -35586,7 +35586,7 @@ begin end function exp_eager(x_; name=nothing) desc = tf.EagerOp("Exp") - x_ = convert(tf.TensorHandle, x_) + x_ = convert(tf.EagerTensor, x_) tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) res = tf.execute(desc) @@ -35683,9 +35683,9 @@ begin end function resource_scatter_nd_sub_eager(ref_, indices_, updates_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ResourceScatterNdSub") - ref_ = convert(tf.TensorHandle, ref_) - indices_ = convert(tf.TensorHandle, indices_) - updates_ = convert(tf.TensorHandle, updates_) + ref_ = convert(tf.EagerTensor, ref_) + indices_ = convert(tf.EagerTensor, indices_) + updates_ = convert(tf.EagerTensor, updates_) tf.add_input(desc, ref_) tf.add_input(desc, indices_) tf.add_input(desc, updates_) @@ -35739,8 +35739,8 @@ begin end function _xla_send_from_host_eager(inputs_, dynamic_key_; name=nothing, Tinputs=nothing, key=nothing, device_ordinal=nothing) desc = tf.EagerOp("_XlaSendFromHost") - inputs_ = convert(tf.TensorHandle, inputs_) - dynamic_key_ = convert(tf.TensorHandle, dynamic_key_) + inputs_ = convert(tf.EagerTensor, inputs_) + dynamic_key_ = convert(tf.EagerTensor, dynamic_key_) tf.add_input(desc, inputs_) tf.add_input(desc, dynamic_key_) if Tinputs !== nothing @@ -35787,7 +35787,7 @@ begin end function get_session_handle_v2_eager(value_; name=nothing) desc = tf.EagerOp("GetSessionHandleV2") - value_ = convert(tf.TensorHandle, value_) + value_ = convert(tf.EagerTensor, value_) tf.add_input(desc, value_) desc["T"] = tf.data_type(value_) res = tf.execute(desc) @@ -35827,8 +35827,8 @@ begin end function relu_grad_eager(gradients_, features_; name=nothing) desc = tf.EagerOp("ReluGrad") - gradients_ = convert(tf.TensorHandle, gradients_) - features_ = convert(tf.TensorHandle, features_) + gradients_ = convert(tf.EagerTensor, gradients_) + features_ = convert(tf.EagerTensor, features_) tf.add_input(desc, gradients_) tf.add_input(desc, features_) desc["T"] = tf.data_type(gradients_) @@ -35875,9 +35875,9 @@ begin end function unsorted_segment_min_eager(data_, segment_ids_, num_segments_; name=nothing) desc = tf.EagerOp("UnsortedSegmentMin") - data_ = convert(tf.TensorHandle, data_) - segment_ids_ = convert(tf.TensorHandle, segment_ids_) - num_segments_ = convert(tf.TensorHandle, num_segments_) + data_ = convert(tf.EagerTensor, data_) + segment_ids_ = convert(tf.EagerTensor, segment_ids_) + num_segments_ = convert(tf.EagerTensor, num_segments_) tf.add_input(desc, data_) tf.add_input(desc, segment_ids_) tf.add_input(desc, num_segments_) @@ -35946,11 +35946,11 @@ begin end function parse_example_eager(serialized_, names_, sparse_keys_, dense_keys_, dense_defaults_; name=nothing, Nsparse=nothing, Ndense=nothing, sparse_types=nothing, Tdense=nothing, dense_shapes=nothing) desc = tf.EagerOp("ParseExample") - serialized_ = convert(tf.TensorHandle, serialized_) - names_ = convert(tf.TensorHandle, names_) - sparse_keys_ = convert(tf.TensorHandle, sparse_keys_) - dense_keys_ = convert(tf.TensorHandle, dense_keys_) - dense_defaults_ = convert(tf.TensorHandle, dense_defaults_) + serialized_ = convert(tf.EagerTensor, serialized_) + names_ = convert(tf.EagerTensor, names_) + sparse_keys_ = convert(tf.EagerTensor, sparse_keys_) + dense_keys_ = convert(tf.EagerTensor, dense_keys_) + dense_defaults_ = convert(tf.EagerTensor, dense_defaults_) tf.add_input(desc, serialized_) tf.add_input(desc, names_) tf.add_input(desc, sparse_keys_) @@ -36013,8 +36013,8 @@ begin end function queue_enqueue_v2_eager(handle_, components_; name=nothing, Tcomponents=nothing, timeout_ms=nothing) desc = tf.EagerOp("QueueEnqueueV2") - handle_ = convert(tf.TensorHandle, handle_) - components_ = convert(tf.TensorHandle, components_) + handle_ = convert(tf.EagerTensor, handle_) + components_ = convert(tf.EagerTensor, components_) tf.add_input(desc, handle_) tf.add_input(desc, components_) if Tcomponents !== nothing @@ -36067,9 +36067,9 @@ begin end function scatter_nd_add_eager(ref_, indices_, updates_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ScatterNdAdd") - ref_ = convert(tf.TensorHandle, ref_) - indices_ = convert(tf.TensorHandle, indices_) - updates_ = convert(tf.TensorHandle, updates_) + ref_ = convert(tf.EagerTensor, ref_) + indices_ = convert(tf.EagerTensor, indices_) + updates_ = convert(tf.EagerTensor, updates_) tf.add_input(desc, ref_) tf.add_input(desc, indices_) tf.add_input(desc, updates_) @@ -36113,7 +36113,7 @@ begin end function reader_num_records_produced_v2_eager(reader_handle_; name=nothing) desc = tf.EagerOp("ReaderNumRecordsProducedV2") - reader_handle_ = convert(tf.TensorHandle, reader_handle_) + reader_handle_ = convert(tf.EagerTensor, reader_handle_) tf.add_input(desc, reader_handle_) res = tf.execute(desc) node = tf.TapeNode(reader_num_records_produced_v2, [reader_handle_], name=nothing, res) @@ -36167,10 +36167,10 @@ begin end function load_tpu_embedding_centered_rms_prop_parameters_eager(parameters_, ms_, mom_, mg_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) desc = tf.EagerOp("LoadTPUEmbeddingCenteredRMSPropParameters") - parameters_ = convert(tf.TensorHandle, parameters_) - ms_ = convert(tf.TensorHandle, ms_) - mom_ = convert(tf.TensorHandle, mom_) - mg_ = convert(tf.TensorHandle, mg_) + parameters_ = convert(tf.EagerTensor, parameters_) + ms_ = convert(tf.EagerTensor, ms_) + mom_ = convert(tf.EagerTensor, mom_) + mg_ = convert(tf.EagerTensor, mg_) tf.add_input(desc, parameters_) tf.add_input(desc, ms_) tf.add_input(desc, mom_) @@ -36227,8 +36227,8 @@ begin end function assign_sub_eager(ref_, value_; name=nothing, use_locking=nothing) desc = tf.EagerOp("AssignSub") - ref_ = convert(tf.TensorHandle, ref_) - value_ = convert(tf.TensorHandle, value_) + ref_ = convert(tf.EagerTensor, ref_) + value_ = convert(tf.EagerTensor, value_) tf.add_input(desc, ref_) tf.add_input(desc, value_) if use_locking !== nothing @@ -36278,9 +36278,9 @@ begin end function unsorted_segment_sum_eager(data_, segment_ids_, num_segments_; name=nothing) desc = tf.EagerOp("UnsortedSegmentSum") - data_ = convert(tf.TensorHandle, data_) - segment_ids_ = convert(tf.TensorHandle, segment_ids_) - num_segments_ = convert(tf.TensorHandle, num_segments_) + data_ = convert(tf.EagerTensor, data_) + segment_ids_ = convert(tf.EagerTensor, segment_ids_) + num_segments_ = convert(tf.EagerTensor, num_segments_) tf.add_input(desc, data_) tf.add_input(desc, segment_ids_) tf.add_input(desc, num_segments_) @@ -36344,11 +36344,11 @@ begin end function fused_batch_norm_grad_eager(y_backprop_, x_, scale_, reserve_space_1_, reserve_space_2_; name=nothing, epsilon=nothing, data_format=nothing, is_training=nothing) desc = tf.EagerOp("FusedBatchNormGrad") - y_backprop_ = convert(tf.TensorHandle, y_backprop_) - x_ = convert(tf.TensorHandle, x_) - scale_ = convert(tf.TensorHandle, scale_) - reserve_space_1_ = convert(tf.TensorHandle, reserve_space_1_) - reserve_space_2_ = convert(tf.TensorHandle, reserve_space_2_) + y_backprop_ = convert(tf.EagerTensor, y_backprop_) + x_ = convert(tf.EagerTensor, x_) + scale_ = convert(tf.EagerTensor, scale_) + reserve_space_1_ = convert(tf.EagerTensor, reserve_space_1_) + reserve_space_2_ = convert(tf.EagerTensor, reserve_space_2_) tf.add_input(desc, y_backprop_) tf.add_input(desc, x_) tf.add_input(desc, scale_) @@ -36417,11 +36417,11 @@ begin end function max_pool_grad_v2_eager(orig_input_, orig_output_, grad_, ksize_, strides_; name=nothing, padding=nothing, data_format=nothing) desc = tf.EagerOp("MaxPoolGradV2") - orig_input_ = convert(tf.TensorHandle, orig_input_) - orig_output_ = convert(tf.TensorHandle, orig_output_) - grad_ = convert(tf.TensorHandle, grad_) - ksize_ = convert(tf.TensorHandle, ksize_) - strides_ = convert(tf.TensorHandle, strides_) + orig_input_ = convert(tf.EagerTensor, orig_input_) + orig_output_ = convert(tf.EagerTensor, orig_output_) + grad_ = convert(tf.EagerTensor, grad_) + ksize_ = convert(tf.EagerTensor, ksize_) + strides_ = convert(tf.EagerTensor, strides_) tf.add_input(desc, orig_input_) tf.add_input(desc, orig_output_) tf.add_input(desc, grad_) @@ -36501,13 +36501,13 @@ begin end function quantized_conv2d_with_bias_and_relu_eager(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) desc = tf.EagerOp("QuantizedConv2DWithBiasAndRelu") - input_ = convert(tf.TensorHandle, input_) - filter_ = convert(tf.TensorHandle, filter_) - bias_ = convert(tf.TensorHandle, bias_) - min_input_ = convert(tf.TensorHandle, min_input_) - max_input_ = convert(tf.TensorHandle, max_input_) - min_filter_ = convert(tf.TensorHandle, min_filter_) - max_filter_ = convert(tf.TensorHandle, max_filter_) + input_ = convert(tf.EagerTensor, input_) + filter_ = convert(tf.EagerTensor, filter_) + bias_ = convert(tf.EagerTensor, bias_) + min_input_ = convert(tf.EagerTensor, min_input_) + max_input_ = convert(tf.EagerTensor, max_input_) + min_filter_ = convert(tf.EagerTensor, min_filter_) + max_filter_ = convert(tf.EagerTensor, max_filter_) tf.add_input(desc, input_) tf.add_input(desc, filter_) tf.add_input(desc, bias_) @@ -36567,9 +36567,9 @@ begin end function boosted_trees_create_ensemble_eager(tree_ensemble_handle_, stamp_token_, tree_ensemble_serialized_; name=nothing) desc = tf.EagerOp("BoostedTreesCreateEnsemble") - tree_ensemble_handle_ = convert(tf.TensorHandle, tree_ensemble_handle_) - stamp_token_ = convert(tf.TensorHandle, stamp_token_) - tree_ensemble_serialized_ = convert(tf.TensorHandle, tree_ensemble_serialized_) + tree_ensemble_handle_ = convert(tf.EagerTensor, tree_ensemble_handle_) + stamp_token_ = convert(tf.EagerTensor, stamp_token_) + tree_ensemble_serialized_ = convert(tf.EagerTensor, tree_ensemble_serialized_) tf.add_input(desc, tree_ensemble_handle_) tf.add_input(desc, stamp_token_) tf.add_input(desc, tree_ensemble_serialized_) @@ -36744,8 +36744,8 @@ begin end function arg_min_eager(input_, dimension_; name=nothing, output_type=nothing) desc = tf.EagerOp("ArgMin") - input_ = convert(tf.TensorHandle, input_) - dimension_ = convert(tf.TensorHandle, dimension_) + input_ = convert(tf.EagerTensor, input_) + dimension_ = convert(tf.EagerTensor, dimension_) tf.add_input(desc, input_) tf.add_input(desc, dimension_) if output_type !== nothing @@ -36795,8 +36795,8 @@ begin end function queue_dequeue_many_eager(handle_, n_; name=nothing, component_types=nothing, timeout_ms=nothing) desc = tf.EagerOp("QueueDequeueMany") - handle_ = convert(tf.TensorHandle, handle_) - n_ = convert(tf.TensorHandle, n_) + handle_ = convert(tf.EagerTensor, handle_) + n_ = convert(tf.EagerTensor, n_) tf.add_input(desc, handle_) tf.add_input(desc, n_) if component_types !== nothing @@ -36844,7 +36844,7 @@ begin end function boosted_trees_serialize_ensemble_eager(tree_ensemble_handle_; name=nothing) desc = tf.EagerOp("BoostedTreesSerializeEnsemble") - tree_ensemble_handle_ = convert(tf.TensorHandle, tree_ensemble_handle_) + tree_ensemble_handle_ = convert(tf.EagerTensor, tree_ensemble_handle_) tf.add_input(desc, tree_ensemble_handle_) res = tf.execute(desc) node = tf.TapeNode(boosted_trees_serialize_ensemble, [tree_ensemble_handle_], name=nothing, res) @@ -36883,8 +36883,8 @@ begin end function minimum_eager(x_, y_; name=nothing) desc = tf.EagerOp("Minimum") - x_ = convert(tf.TensorHandle, x_) - y_ = convert(tf.TensorHandle, y_) + x_ = convert(tf.EagerTensor, x_) + y_ = convert(tf.EagerTensor, y_) tf.add_input(desc, x_) tf.add_input(desc, y_) desc["T"] = tf.data_type(x_) @@ -36931,9 +36931,9 @@ begin end function substr_eager(input_, pos_, len_; name=nothing, unit=nothing) desc = tf.EagerOp("Substr") - input_ = convert(tf.TensorHandle, input_) - pos_ = convert(tf.TensorHandle, pos_) - len_ = convert(tf.TensorHandle, len_) + input_ = convert(tf.EagerTensor, input_) + pos_ = convert(tf.EagerTensor, pos_) + len_ = convert(tf.EagerTensor, len_) tf.add_input(desc, input_) tf.add_input(desc, pos_) tf.add_input(desc, len_) @@ -36976,7 +36976,7 @@ begin end function queue_size_eager(handle_; name=nothing) desc = tf.EagerOp("QueueSize") - handle_ = convert(tf.TensorHandle, handle_) + handle_ = convert(tf.EagerTensor, handle_) tf.add_input(desc, handle_) res = tf.execute(desc) node = tf.TapeNode(queue_size, [handle_], name=nothing, res) @@ -37032,15 +37032,15 @@ begin end function apply_ftrl_v2_eager(var_, accum_, linear_, grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ApplyFtrlV2") - var_ = convert(tf.TensorHandle, var_) - accum_ = convert(tf.TensorHandle, accum_) - linear_ = convert(tf.TensorHandle, linear_) - grad_ = convert(tf.TensorHandle, grad_) - lr_ = convert(tf.TensorHandle, lr_) - l1_ = convert(tf.TensorHandle, l1_) - l2_ = convert(tf.TensorHandle, l2_) - l2_shrinkage_ = convert(tf.TensorHandle, l2_shrinkage_) - lr_power_ = convert(tf.TensorHandle, lr_power_) + var_ = convert(tf.EagerTensor, var_) + accum_ = convert(tf.EagerTensor, accum_) + linear_ = convert(tf.EagerTensor, linear_) + grad_ = convert(tf.EagerTensor, grad_) + lr_ = convert(tf.EagerTensor, lr_) + l1_ = convert(tf.EagerTensor, l1_) + l2_ = convert(tf.EagerTensor, l2_) + l2_shrinkage_ = convert(tf.EagerTensor, l2_shrinkage_) + lr_power_ = convert(tf.EagerTensor, lr_power_) tf.add_input(desc, var_) tf.add_input(desc, accum_) tf.add_input(desc, linear_) @@ -37110,8 +37110,8 @@ begin end function load_tpu_embedding_momentum_parameters_eager(parameters_, momenta_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) desc = tf.EagerOp("LoadTPUEmbeddingMomentumParameters") - parameters_ = convert(tf.TensorHandle, parameters_) - momenta_ = convert(tf.TensorHandle, momenta_) + parameters_ = convert(tf.EagerTensor, parameters_) + momenta_ = convert(tf.EagerTensor, momenta_) tf.add_input(desc, parameters_) tf.add_input(desc, momenta_) if table_id !== nothing @@ -37167,9 +37167,9 @@ begin end function sparse_segment_mean_eager(data_, indices_, segment_ids_; name=nothing) desc = tf.EagerOp("SparseSegmentMean") - data_ = convert(tf.TensorHandle, data_) - indices_ = convert(tf.TensorHandle, indices_) - segment_ids_ = convert(tf.TensorHandle, segment_ids_) + data_ = convert(tf.EagerTensor, data_) + indices_ = convert(tf.EagerTensor, indices_) + segment_ids_ = convert(tf.EagerTensor, segment_ids_) tf.add_input(desc, data_) tf.add_input(desc, indices_) tf.add_input(desc, segment_ids_) @@ -37223,12 +37223,12 @@ begin end function resource_apply_proximal_adagrad_eager(var_, accum_, lr_, l1_, l2_, grad_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ResourceApplyProximalAdagrad") - var_ = convert(tf.TensorHandle, var_) - accum_ = convert(tf.TensorHandle, accum_) - lr_ = convert(tf.TensorHandle, lr_) - l1_ = convert(tf.TensorHandle, l1_) - l2_ = convert(tf.TensorHandle, l2_) - grad_ = convert(tf.TensorHandle, grad_) + var_ = convert(tf.EagerTensor, var_) + accum_ = convert(tf.EagerTensor, accum_) + lr_ = convert(tf.EagerTensor, lr_) + l1_ = convert(tf.EagerTensor, l1_) + l2_ = convert(tf.EagerTensor, l2_) + grad_ = convert(tf.EagerTensor, grad_) tf.add_input(desc, var_) tf.add_input(desc, accum_) tf.add_input(desc, lr_) @@ -37286,9 +37286,9 @@ begin end function tensor_array_gather_v2_eager(handle_, indices_, flow_in_; name=nothing, dtype=nothing, element_shape=nothing) desc = tf.EagerOp("TensorArrayGatherV2") - handle_ = convert(tf.TensorHandle, handle_) - indices_ = convert(tf.TensorHandle, indices_) - flow_in_ = convert(tf.TensorHandle, flow_in_) + handle_ = convert(tf.EagerTensor, handle_) + indices_ = convert(tf.EagerTensor, indices_) + flow_in_ = convert(tf.EagerTensor, flow_in_) tf.add_input(desc, handle_) tf.add_input(desc, indices_) tf.add_input(desc, flow_in_) @@ -37335,8 +37335,8 @@ begin end function less_eager(x_, y_; name=nothing) desc = tf.EagerOp("Less") - x_ = convert(tf.TensorHandle, x_) - y_ = convert(tf.TensorHandle, y_) + x_ = convert(tf.EagerTensor, x_) + y_ = convert(tf.EagerTensor, y_) tf.add_input(desc, x_) tf.add_input(desc, y_) desc["T"] = tf.data_type(x_) @@ -37425,8 +37425,8 @@ begin end function upper_bound_eager(sorted_inputs_, values_; name=nothing, out_type=nothing) desc = tf.EagerOp("UpperBound") - sorted_inputs_ = convert(tf.TensorHandle, sorted_inputs_) - values_ = convert(tf.TensorHandle, values_) + sorted_inputs_ = convert(tf.EagerTensor, sorted_inputs_) + values_ = convert(tf.EagerTensor, values_) tf.add_input(desc, sorted_inputs_) tf.add_input(desc, values_) if out_type !== nothing @@ -37475,9 +37475,9 @@ begin end function tensor_list_get_item_eager(input_handle_, index_, element_shape_; name=nothing, element_dtype=nothing) desc = tf.EagerOp("TensorListGetItem") - input_handle_ = convert(tf.TensorHandle, input_handle_) - index_ = convert(tf.TensorHandle, index_) - element_shape_ = convert(tf.TensorHandle, element_shape_) + input_handle_ = convert(tf.EagerTensor, input_handle_) + index_ = convert(tf.EagerTensor, index_) + element_shape_ = convert(tf.EagerTensor, element_shape_) tf.add_input(desc, input_handle_) tf.add_input(desc, index_) tf.add_input(desc, element_shape_) @@ -37528,9 +37528,9 @@ begin end function fake_quant_with_min_max_vars_eager(inputs_, min_, max_; name=nothing, num_bits=nothing, narrow_range=nothing) desc = tf.EagerOp("FakeQuantWithMinMaxVars") - inputs_ = convert(tf.TensorHandle, inputs_) - min_ = convert(tf.TensorHandle, min_) - max_ = convert(tf.TensorHandle, max_) + inputs_ = convert(tf.EagerTensor, inputs_) + min_ = convert(tf.EagerTensor, min_) + max_ = convert(tf.EagerTensor, max_) tf.add_input(desc, inputs_) tf.add_input(desc, min_) tf.add_input(desc, max_) @@ -37574,7 +37574,7 @@ begin end function is_boosted_trees_quantile_stream_resource_initialized_eager(quantile_stream_resource_handle_; name=nothing) desc = tf.EagerOp("IsBoostedTreesQuantileStreamResourceInitialized") - quantile_stream_resource_handle_ = convert(tf.TensorHandle, quantile_stream_resource_handle_) + quantile_stream_resource_handle_ = convert(tf.EagerTensor, quantile_stream_resource_handle_) tf.add_input(desc, quantile_stream_resource_handle_) res = tf.execute(desc) node = tf.TapeNode(is_boosted_trees_quantile_stream_resource_initialized, [quantile_stream_resource_handle_], name=nothing, res) @@ -37619,9 +37619,9 @@ begin end function reader_read_up_to_v2_eager(reader_handle_, queue_handle_, num_records_; name=nothing) desc = tf.EagerOp("ReaderReadUpToV2") - reader_handle_ = convert(tf.TensorHandle, reader_handle_) - queue_handle_ = convert(tf.TensorHandle, queue_handle_) - num_records_ = convert(tf.TensorHandle, num_records_) + reader_handle_ = convert(tf.EagerTensor, reader_handle_) + queue_handle_ = convert(tf.EagerTensor, queue_handle_) + num_records_ = convert(tf.EagerTensor, num_records_) tf.add_input(desc, reader_handle_) tf.add_input(desc, queue_handle_) tf.add_input(desc, num_records_) @@ -37662,8 +37662,8 @@ begin end function complex_eager(real_, imag_; name=nothing) desc = tf.EagerOp("Complex") - real_ = convert(tf.TensorHandle, real_) - imag_ = convert(tf.TensorHandle, imag_) + real_ = convert(tf.EagerTensor, real_) + imag_ = convert(tf.EagerTensor, imag_) tf.add_input(desc, real_) tf.add_input(desc, imag_) desc["T"] = tf.data_type(real_) @@ -37711,8 +37711,8 @@ begin end function tensor_list_reserve_eager(element_shape_, num_elements_; name=nothing, element_dtype=nothing, shape_type=nothing) desc = tf.EagerOp("TensorListReserve") - element_shape_ = convert(tf.TensorHandle, element_shape_) - num_elements_ = convert(tf.TensorHandle, num_elements_) + element_shape_ = convert(tf.EagerTensor, element_shape_) + num_elements_ = convert(tf.EagerTensor, num_elements_) tf.add_input(desc, element_shape_) tf.add_input(desc, num_elements_) if element_dtype !== nothing @@ -37760,7 +37760,7 @@ begin end function bitcast_eager(input_; name=nothing, type_=nothing) desc = tf.EagerOp("Bitcast") - input_ = convert(tf.TensorHandle, input_) + input_ = convert(tf.EagerTensor, input_) tf.add_input(desc, input_) if type_ !== nothing desc["type"] = Base.identity(type_) @@ -37905,21 +37905,21 @@ begin end function quantized_batch_norm_with_global_normalization_eager(t_, t_min_, t_max_, m_, m_min_, m_max_, v_, v_min_, v_max_, beta_, beta_min_, beta_max_, gamma_, gamma_min_, gamma_max_; name=nothing, out_type=nothing, variance_epsilon=nothing, scale_after_normalization=nothing) desc = tf.EagerOp("QuantizedBatchNormWithGlobalNormalization") - t_ = convert(tf.TensorHandle, t_) - t_min_ = convert(tf.TensorHandle, t_min_) - t_max_ = convert(tf.TensorHandle, t_max_) - m_ = convert(tf.TensorHandle, m_) - m_min_ = convert(tf.TensorHandle, m_min_) - m_max_ = convert(tf.TensorHandle, m_max_) - v_ = convert(tf.TensorHandle, v_) - v_min_ = convert(tf.TensorHandle, v_min_) - v_max_ = convert(tf.TensorHandle, v_max_) - beta_ = convert(tf.TensorHandle, beta_) - beta_min_ = convert(tf.TensorHandle, beta_min_) - beta_max_ = convert(tf.TensorHandle, beta_max_) - gamma_ = convert(tf.TensorHandle, gamma_) - gamma_min_ = convert(tf.TensorHandle, gamma_min_) - gamma_max_ = convert(tf.TensorHandle, gamma_max_) + t_ = convert(tf.EagerTensor, t_) + t_min_ = convert(tf.EagerTensor, t_min_) + t_max_ = convert(tf.EagerTensor, t_max_) + m_ = convert(tf.EagerTensor, m_) + m_min_ = convert(tf.EagerTensor, m_min_) + m_max_ = convert(tf.EagerTensor, m_max_) + v_ = convert(tf.EagerTensor, v_) + v_min_ = convert(tf.EagerTensor, v_min_) + v_max_ = convert(tf.EagerTensor, v_max_) + beta_ = convert(tf.EagerTensor, beta_) + beta_min_ = convert(tf.EagerTensor, beta_min_) + beta_max_ = convert(tf.EagerTensor, beta_max_) + gamma_ = convert(tf.EagerTensor, gamma_) + gamma_min_ = convert(tf.EagerTensor, gamma_min_) + gamma_max_ = convert(tf.EagerTensor, gamma_max_) tf.add_input(desc, t_) tf.add_input(desc, t_min_) tf.add_input(desc, t_max_) @@ -37984,7 +37984,7 @@ begin end function cos_eager(x_; name=nothing) desc = tf.EagerOp("Cos") - x_ = convert(tf.TensorHandle, x_) + x_ = convert(tf.EagerTensor, x_) tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) res = tf.execute(desc) @@ -38034,9 +38034,9 @@ begin end function quantize_down_and_shrink_range_eager(input_, input_min_, input_max_; name=nothing, out_type=nothing) desc = tf.EagerOp("QuantizeDownAndShrinkRange") - input_ = convert(tf.TensorHandle, input_) - input_min_ = convert(tf.TensorHandle, input_min_) - input_max_ = convert(tf.TensorHandle, input_max_) + input_ = convert(tf.EagerTensor, input_) + input_min_ = convert(tf.EagerTensor, input_min_) + input_max_ = convert(tf.EagerTensor, input_max_) tf.add_input(desc, input_) tf.add_input(desc, input_min_) tf.add_input(desc, input_max_) @@ -38086,8 +38086,8 @@ begin end function experimental_random_dataset_eager(seed_, seed2_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("ExperimentalRandomDataset") - seed_ = convert(tf.TensorHandle, seed_) - seed2_ = convert(tf.TensorHandle, seed2_) + seed_ = convert(tf.EagerTensor, seed_) + seed2_ = convert(tf.EagerTensor, seed2_) tf.add_input(desc, seed_) tf.add_input(desc, seed2_) if output_types !== nothing @@ -38143,9 +38143,9 @@ begin end function rpc_eager(address_, method_, request_; name=nothing, protocol=nothing, fail_fast=nothing, timeout_in_ms=nothing) desc = tf.EagerOp("Rpc") - address_ = convert(tf.TensorHandle, address_) - method_ = convert(tf.TensorHandle, method_) - request_ = convert(tf.TensorHandle, request_) + address_ = convert(tf.EagerTensor, address_) + method_ = convert(tf.EagerTensor, method_) + request_ = convert(tf.EagerTensor, request_) tf.add_input(desc, address_) tf.add_input(desc, method_) tf.add_input(desc, request_) @@ -38235,18 +38235,18 @@ begin end function quantized_conv2d_with_bias_signed_sum_and_relu_and_requantize_eager(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_, summand_, min_summand_, max_summand_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) desc = tf.EagerOp("QuantizedConv2DWithBiasSignedSumAndReluAndRequantize") - input_ = convert(tf.TensorHandle, input_) - filter_ = convert(tf.TensorHandle, filter_) - bias_ = convert(tf.TensorHandle, bias_) - min_input_ = convert(tf.TensorHandle, min_input_) - max_input_ = convert(tf.TensorHandle, max_input_) - min_filter_ = convert(tf.TensorHandle, min_filter_) - max_filter_ = convert(tf.TensorHandle, max_filter_) - min_freezed_output_ = convert(tf.TensorHandle, min_freezed_output_) - max_freezed_output_ = convert(tf.TensorHandle, max_freezed_output_) - summand_ = convert(tf.TensorHandle, summand_) - min_summand_ = convert(tf.TensorHandle, min_summand_) - max_summand_ = convert(tf.TensorHandle, max_summand_) + input_ = convert(tf.EagerTensor, input_) + filter_ = convert(tf.EagerTensor, filter_) + bias_ = convert(tf.EagerTensor, bias_) + min_input_ = convert(tf.EagerTensor, min_input_) + max_input_ = convert(tf.EagerTensor, max_input_) + min_filter_ = convert(tf.EagerTensor, min_filter_) + max_filter_ = convert(tf.EagerTensor, max_filter_) + min_freezed_output_ = convert(tf.EagerTensor, min_freezed_output_) + max_freezed_output_ = convert(tf.EagerTensor, max_freezed_output_) + summand_ = convert(tf.EagerTensor, summand_) + min_summand_ = convert(tf.EagerTensor, min_summand_) + max_summand_ = convert(tf.EagerTensor, max_summand_) tf.add_input(desc, input_) tf.add_input(desc, filter_) tf.add_input(desc, bias_) @@ -38309,7 +38309,7 @@ begin end function tensor_list_length_eager(input_handle_; name=nothing) desc = tf.EagerOp("TensorListLength") - input_handle_ = convert(tf.TensorHandle, input_handle_) + input_handle_ = convert(tf.EagerTensor, input_handle_) tf.add_input(desc, input_handle_) res = tf.execute(desc) node = tf.TapeNode(tensor_list_length, [input_handle_], name=nothing, res) @@ -38416,7 +38416,7 @@ begin end function stateless_while_eager(input_; name=nothing, T=nothing, cond=nothing, body=nothing) desc = tf.EagerOp("StatelessWhile") - input_ = convert(tf.TensorHandle, input_) + input_ = convert(tf.EagerTensor, input_) tf.add_input(desc, input_) if T !== nothing desc["T"] = map(Base.identity, T) @@ -38528,8 +38528,8 @@ begin end function segment_min_eager(data_, segment_ids_; name=nothing) desc = tf.EagerOp("SegmentMin") - data_ = convert(tf.TensorHandle, data_) - segment_ids_ = convert(tf.TensorHandle, segment_ids_) + data_ = convert(tf.EagerTensor, data_) + segment_ids_ = convert(tf.EagerTensor, segment_ids_) tf.add_input(desc, data_) tf.add_input(desc, segment_ids_) desc["T"] = tf.data_type(data_) @@ -38572,9 +38572,9 @@ begin end function write_graph_summary_eager(writer_, step_, tensor_; name=nothing) desc = tf.EagerOp("WriteGraphSummary") - writer_ = convert(tf.TensorHandle, writer_) - step_ = convert(tf.TensorHandle, step_) - tensor_ = convert(tf.TensorHandle, tensor_) + writer_ = convert(tf.EagerTensor, writer_) + step_ = convert(tf.EagerTensor, step_) + tensor_ = convert(tf.EagerTensor, tensor_) tf.add_input(desc, writer_) tf.add_input(desc, step_) tf.add_input(desc, tensor_) @@ -38615,8 +38615,8 @@ begin end function cholesky_grad_eager(l_, grad_; name=nothing) desc = tf.EagerOp("CholeskyGrad") - l_ = convert(tf.TensorHandle, l_) - grad_ = convert(tf.TensorHandle, grad_) + l_ = convert(tf.EagerTensor, l_) + grad_ = convert(tf.EagerTensor, grad_) tf.add_input(desc, l_) tf.add_input(desc, grad_) desc["T"] = tf.data_type(l_) @@ -38678,7 +38678,7 @@ begin end function log_uniform_candidate_sampler_eager(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing) desc = tf.EagerOp("LogUniformCandidateSampler") - true_classes_ = convert(tf.TensorHandle, true_classes_) + true_classes_ = convert(tf.EagerTensor, true_classes_) tf.add_input(desc, true_classes_) if num_true !== nothing desc["num_true"] = Base.Int(num_true) @@ -38740,9 +38740,9 @@ begin end function serialize_sparse_eager(sparse_indices_, sparse_values_, sparse_shape_; name=nothing, out_type=nothing) desc = tf.EagerOp("SerializeSparse") - sparse_indices_ = convert(tf.TensorHandle, sparse_indices_) - sparse_values_ = convert(tf.TensorHandle, sparse_values_) - sparse_shape_ = convert(tf.TensorHandle, sparse_shape_) + sparse_indices_ = convert(tf.EagerTensor, sparse_indices_) + sparse_values_ = convert(tf.EagerTensor, sparse_values_) + sparse_shape_ = convert(tf.EagerTensor, sparse_shape_) tf.add_input(desc, sparse_indices_) tf.add_input(desc, sparse_values_) tf.add_input(desc, sparse_shape_) @@ -38791,9 +38791,9 @@ begin end function scatter_nd_non_aliasing_add_eager(input_, indices_, updates_; name=nothing) desc = tf.EagerOp("ScatterNdNonAliasingAdd") - input_ = convert(tf.TensorHandle, input_) - indices_ = convert(tf.TensorHandle, indices_) - updates_ = convert(tf.TensorHandle, updates_) + input_ = convert(tf.EagerTensor, input_) + indices_ = convert(tf.EagerTensor, indices_) + updates_ = convert(tf.EagerTensor, updates_) tf.add_input(desc, input_) tf.add_input(desc, indices_) tf.add_input(desc, updates_) @@ -38843,7 +38843,7 @@ begin end function ref_merge_eager(inputs_; name=nothing, N=nothing) desc = tf.EagerOp("RefMerge") - inputs_ = convert(tf.TensorHandle, inputs_) + inputs_ = convert(tf.EagerTensor, inputs_) tf.add_input(desc, inputs_) if N !== nothing desc["N"] = Base.Int(N) @@ -38894,7 +38894,7 @@ begin end function tensor_list_concat_eager(input_handle_; name=nothing, element_dtype=nothing, element_shape=nothing) desc = tf.EagerOp("TensorListConcat") - input_handle_ = convert(tf.TensorHandle, input_handle_) + input_handle_ = convert(tf.EagerTensor, input_handle_) tf.add_input(desc, input_handle_) if element_dtype !== nothing desc["element_dtype"] = Base.identity(element_dtype) @@ -38966,11 +38966,11 @@ begin end function cudnn_rnn_canonical_to_params_eager(num_layers_, num_units_, input_size_, weights_, biases_; name=nothing, num_params=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) desc = tf.EagerOp("CudnnRNNCanonicalToParams") - num_layers_ = convert(tf.TensorHandle, num_layers_) - num_units_ = convert(tf.TensorHandle, num_units_) - input_size_ = convert(tf.TensorHandle, input_size_) - weights_ = convert(tf.TensorHandle, weights_) - biases_ = convert(tf.TensorHandle, biases_) + num_layers_ = convert(tf.EagerTensor, num_layers_) + num_units_ = convert(tf.EagerTensor, num_units_) + input_size_ = convert(tf.EagerTensor, input_size_) + weights_ = convert(tf.EagerTensor, weights_) + biases_ = convert(tf.EagerTensor, biases_) tf.add_input(desc, num_layers_) tf.add_input(desc, num_units_) tf.add_input(desc, input_size_) @@ -39053,14 +39053,14 @@ begin end function sparse_apply_adadelta_eager(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) desc = tf.EagerOp("SparseApplyAdadelta") - var_ = convert(tf.TensorHandle, var_) - accum_ = convert(tf.TensorHandle, accum_) - accum_update_ = convert(tf.TensorHandle, accum_update_) - lr_ = convert(tf.TensorHandle, lr_) - rho_ = convert(tf.TensorHandle, rho_) - epsilon_ = convert(tf.TensorHandle, epsilon_) - grad_ = convert(tf.TensorHandle, grad_) - indices_ = convert(tf.TensorHandle, indices_) + var_ = convert(tf.EagerTensor, var_) + accum_ = convert(tf.EagerTensor, accum_) + accum_update_ = convert(tf.EagerTensor, accum_update_) + lr_ = convert(tf.EagerTensor, lr_) + rho_ = convert(tf.EagerTensor, rho_) + epsilon_ = convert(tf.EagerTensor, epsilon_) + grad_ = convert(tf.EagerTensor, grad_) + indices_ = convert(tf.EagerTensor, indices_) tf.add_input(desc, var_) tf.add_input(desc, accum_) tf.add_input(desc, accum_update_) @@ -39114,7 +39114,7 @@ begin end function tensor_array_close_eager(handle_; name=nothing) desc = tf.EagerOp("TensorArrayClose") - handle_ = convert(tf.TensorHandle, handle_) + handle_ = convert(tf.EagerTensor, handle_) tf.add_input(desc, handle_) res = tf.execute(desc) node = tf.TapeNode(tensor_array_close, [handle_], name=nothing, res) @@ -39153,8 +39153,8 @@ begin end function selu_grad_eager(gradients_, outputs_; name=nothing) desc = tf.EagerOp("SeluGrad") - gradients_ = convert(tf.TensorHandle, gradients_) - outputs_ = convert(tf.TensorHandle, outputs_) + gradients_ = convert(tf.EagerTensor, gradients_) + outputs_ = convert(tf.EagerTensor, outputs_) tf.add_input(desc, gradients_) tf.add_input(desc, outputs_) desc["T"] = tf.data_type(gradients_) @@ -39202,10 +39202,10 @@ begin end function crop_and_resize_grad_image_eager(grads_, boxes_, box_ind_, image_size_; name=nothing, method=nothing) desc = tf.EagerOp("CropAndResizeGradImage") - grads_ = convert(tf.TensorHandle, grads_) - boxes_ = convert(tf.TensorHandle, boxes_) - box_ind_ = convert(tf.TensorHandle, box_ind_) - image_size_ = convert(tf.TensorHandle, image_size_) + grads_ = convert(tf.EagerTensor, grads_) + boxes_ = convert(tf.EagerTensor, boxes_) + box_ind_ = convert(tf.EagerTensor, box_ind_) + image_size_ = convert(tf.EagerTensor, image_size_) tf.add_input(desc, grads_) tf.add_input(desc, boxes_) tf.add_input(desc, box_ind_) @@ -39249,8 +39249,8 @@ begin end function rfft_eager(input_, fft_length_; name=nothing) desc = tf.EagerOp("RFFT") - input_ = convert(tf.TensorHandle, input_) - fft_length_ = convert(tf.TensorHandle, fft_length_) + input_ = convert(tf.EagerTensor, input_) + fft_length_ = convert(tf.EagerTensor, fft_length_) tf.add_input(desc, input_) tf.add_input(desc, fft_length_) res = tf.execute(desc) @@ -39297,9 +39297,9 @@ begin end function experimental_sql_dataset_eager(driver_name_, data_source_name_, query_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("ExperimentalSqlDataset") - driver_name_ = convert(tf.TensorHandle, driver_name_) - data_source_name_ = convert(tf.TensorHandle, data_source_name_) - query_ = convert(tf.TensorHandle, query_) + driver_name_ = convert(tf.EagerTensor, driver_name_) + data_source_name_ = convert(tf.EagerTensor, data_source_name_) + query_ = convert(tf.EagerTensor, query_) tf.add_input(desc, driver_name_) tf.add_input(desc, data_source_name_) tf.add_input(desc, query_) @@ -39359,13 +39359,13 @@ begin end function resource_apply_power_sign_eager(var_, m_, lr_, logbase_, sign_decay_, beta_, grad_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ResourceApplyPowerSign") - var_ = convert(tf.TensorHandle, var_) - m_ = convert(tf.TensorHandle, m_) - lr_ = convert(tf.TensorHandle, lr_) - logbase_ = convert(tf.TensorHandle, logbase_) - sign_decay_ = convert(tf.TensorHandle, sign_decay_) - beta_ = convert(tf.TensorHandle, beta_) - grad_ = convert(tf.TensorHandle, grad_) + var_ = convert(tf.EagerTensor, var_) + m_ = convert(tf.EagerTensor, m_) + lr_ = convert(tf.EagerTensor, lr_) + logbase_ = convert(tf.EagerTensor, logbase_) + sign_decay_ = convert(tf.EagerTensor, sign_decay_) + beta_ = convert(tf.EagerTensor, beta_) + grad_ = convert(tf.EagerTensor, grad_) tf.add_input(desc, var_) tf.add_input(desc, m_) tf.add_input(desc, lr_) @@ -39416,7 +39416,7 @@ begin end function matrix_determinant_eager(input_; name=nothing) desc = tf.EagerOp("MatrixDeterminant") - input_ = convert(tf.TensorHandle, input_) + input_ = convert(tf.EagerTensor, input_) tf.add_input(desc, input_) desc["T"] = tf.data_type(input_) res = tf.execute(desc) @@ -39462,7 +39462,7 @@ begin end function static_regex_replace_eager(input_; name=nothing, pattern=nothing, rewrite=nothing, replace_global=nothing) desc = tf.EagerOp("StaticRegexReplace") - input_ = convert(tf.TensorHandle, input_) + input_ = convert(tf.EagerTensor, input_) tf.add_input(desc, input_) if pattern !== nothing desc["pattern"] = Base.String(pattern) @@ -39520,7 +39520,7 @@ begin end function avg_pool_eager(value_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) desc = tf.EagerOp("AvgPool") - value_ = convert(tf.TensorHandle, value_) + value_ = convert(tf.EagerTensor, value_) tf.add_input(desc, value_) if ksize !== nothing desc["ksize"] = map(Base.identity, ksize) @@ -39576,10 +39576,10 @@ begin end function sparse_dense_cwise_add_eager(sp_indices_, sp_values_, sp_shape_, dense_; name=nothing) desc = tf.EagerOp("SparseDenseCwiseAdd") - sp_indices_ = convert(tf.TensorHandle, sp_indices_) - sp_values_ = convert(tf.TensorHandle, sp_values_) - sp_shape_ = convert(tf.TensorHandle, sp_shape_) - dense_ = convert(tf.TensorHandle, dense_) + sp_indices_ = convert(tf.EagerTensor, sp_indices_) + sp_values_ = convert(tf.EagerTensor, sp_values_) + sp_shape_ = convert(tf.EagerTensor, sp_shape_) + dense_ = convert(tf.EagerTensor, dense_) tf.add_input(desc, sp_indices_) tf.add_input(desc, sp_values_) tf.add_input(desc, sp_shape_) @@ -39623,8 +39623,8 @@ begin end function bias_add_v1_eager(value_, bias_; name=nothing) desc = tf.EagerOp("BiasAddV1") - value_ = convert(tf.TensorHandle, value_) - bias_ = convert(tf.TensorHandle, bias_) + value_ = convert(tf.EagerTensor, value_) + bias_ = convert(tf.EagerTensor, bias_) tf.add_input(desc, value_) tf.add_input(desc, bias_) desc["T"] = tf.data_type(value_) @@ -39664,7 +39664,7 @@ begin end function invert_permutation_eager(x_; name=nothing) desc = tf.EagerOp("InvertPermutation") - x_ = convert(tf.TensorHandle, x_) + x_ = convert(tf.EagerTensor, x_) tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) res = tf.execute(desc) @@ -39782,12 +39782,12 @@ begin end function sparse_apply_momentum_eager(var_, accum_, lr_, grad_, indices_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) desc = tf.EagerOp("SparseApplyMomentum") - var_ = convert(tf.TensorHandle, var_) - accum_ = convert(tf.TensorHandle, accum_) - lr_ = convert(tf.TensorHandle, lr_) - grad_ = convert(tf.TensorHandle, grad_) - indices_ = convert(tf.TensorHandle, indices_) - momentum_ = convert(tf.TensorHandle, momentum_) + var_ = convert(tf.EagerTensor, var_) + accum_ = convert(tf.EagerTensor, accum_) + lr_ = convert(tf.EagerTensor, lr_) + grad_ = convert(tf.EagerTensor, grad_) + indices_ = convert(tf.EagerTensor, indices_) + momentum_ = convert(tf.EagerTensor, momentum_) tf.add_input(desc, var_) tf.add_input(desc, accum_) tf.add_input(desc, lr_) @@ -39853,7 +39853,7 @@ begin end function infeed_enqueue_eager(input_; name=nothing, dtype=nothing, shape=nothing, layout=nothing, device_ordinal=nothing) desc = tf.EagerOp("InfeedEnqueue") - input_ = convert(tf.TensorHandle, input_) + input_ = convert(tf.EagerTensor, input_) tf.add_input(desc, input_) if dtype !== nothing desc["dtype"] = Base.identity(dtype) @@ -39914,10 +39914,10 @@ begin end function stateless_random_uniform_int_eager(shape_, seed_, minval_, maxval_; name=nothing, dtype=nothing) desc = tf.EagerOp("StatelessRandomUniformInt") - shape_ = convert(tf.TensorHandle, shape_) - seed_ = convert(tf.TensorHandle, seed_) - minval_ = convert(tf.TensorHandle, minval_) - maxval_ = convert(tf.TensorHandle, maxval_) + shape_ = convert(tf.EagerTensor, shape_) + seed_ = convert(tf.EagerTensor, seed_) + minval_ = convert(tf.EagerTensor, minval_) + maxval_ = convert(tf.EagerTensor, maxval_) tf.add_input(desc, shape_) tf.add_input(desc, seed_) tf.add_input(desc, minval_) @@ -39981,10 +39981,10 @@ begin end function load_tpu_embedding_adadelta_parameters_grad_accum_debug_eager(parameters_, accumulators_, updates_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) desc = tf.EagerOp("LoadTPUEmbeddingAdadeltaParametersGradAccumDebug") - parameters_ = convert(tf.TensorHandle, parameters_) - accumulators_ = convert(tf.TensorHandle, accumulators_) - updates_ = convert(tf.TensorHandle, updates_) - gradient_accumulators_ = convert(tf.TensorHandle, gradient_accumulators_) + parameters_ = convert(tf.EagerTensor, parameters_) + accumulators_ = convert(tf.EagerTensor, accumulators_) + updates_ = convert(tf.EagerTensor, updates_) + gradient_accumulators_ = convert(tf.EagerTensor, gradient_accumulators_) tf.add_input(desc, parameters_) tf.add_input(desc, accumulators_) tf.add_input(desc, updates_) @@ -40051,7 +40051,7 @@ begin end function _send_eager(tensor_; name=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing) desc = tf.EagerOp("_Send") - tensor_ = convert(tf.TensorHandle, tensor_) + tensor_ = convert(tf.EagerTensor, tensor_) tf.add_input(desc, tensor_) if tensor_name !== nothing desc["tensor_name"] = Base.String(tensor_name) @@ -40120,8 +40120,8 @@ begin end function map_peek_eager(key_, indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) desc = tf.EagerOp("MapPeek") - key_ = convert(tf.TensorHandle, key_) - indices_ = convert(tf.TensorHandle, indices_) + key_ = convert(tf.EagerTensor, key_) + indices_ = convert(tf.EagerTensor, indices_) tf.add_input(desc, key_) tf.add_input(desc, indices_) if capacity !== nothing @@ -40180,10 +40180,10 @@ begin end function write_scalar_summary_eager(writer_, step_, tag_, value_; name=nothing) desc = tf.EagerOp("WriteScalarSummary") - writer_ = convert(tf.TensorHandle, writer_) - step_ = convert(tf.TensorHandle, step_) - tag_ = convert(tf.TensorHandle, tag_) - value_ = convert(tf.TensorHandle, value_) + writer_ = convert(tf.EagerTensor, writer_) + step_ = convert(tf.EagerTensor, step_) + tag_ = convert(tf.EagerTensor, tag_) + value_ = convert(tf.EagerTensor, value_) tf.add_input(desc, writer_) tf.add_input(desc, step_) tf.add_input(desc, tag_) @@ -40243,7 +40243,7 @@ begin end function ordered_map_unstage_no_key_eager(indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) desc = tf.EagerOp("OrderedMapUnstageNoKey") - indices_ = convert(tf.TensorHandle, indices_) + indices_ = convert(tf.EagerTensor, indices_) tf.add_input(desc, indices_) if capacity !== nothing desc["capacity"] = Base.Int(capacity) @@ -40318,16 +40318,16 @@ begin end function sparse_apply_centered_rms_prop_eager(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) desc = tf.EagerOp("SparseApplyCenteredRMSProp") - var_ = convert(tf.TensorHandle, var_) - mg_ = convert(tf.TensorHandle, mg_) - ms_ = convert(tf.TensorHandle, ms_) - mom_ = convert(tf.TensorHandle, mom_) - lr_ = convert(tf.TensorHandle, lr_) - rho_ = convert(tf.TensorHandle, rho_) - momentum_ = convert(tf.TensorHandle, momentum_) - epsilon_ = convert(tf.TensorHandle, epsilon_) - grad_ = convert(tf.TensorHandle, grad_) - indices_ = convert(tf.TensorHandle, indices_) + var_ = convert(tf.EagerTensor, var_) + mg_ = convert(tf.EagerTensor, mg_) + ms_ = convert(tf.EagerTensor, ms_) + mom_ = convert(tf.EagerTensor, mom_) + lr_ = convert(tf.EagerTensor, lr_) + rho_ = convert(tf.EagerTensor, rho_) + momentum_ = convert(tf.EagerTensor, momentum_) + epsilon_ = convert(tf.EagerTensor, epsilon_) + grad_ = convert(tf.EagerTensor, grad_) + indices_ = convert(tf.EagerTensor, indices_) tf.add_input(desc, var_) tf.add_input(desc, mg_) tf.add_input(desc, ms_) @@ -40399,10 +40399,10 @@ begin end function tensor_list_scatter_v2_eager(tensor_, indices_, element_shape_, num_elements_; name=nothing, element_dtype=nothing, shape_type=nothing) desc = tf.EagerOp("TensorListScatterV2") - tensor_ = convert(tf.TensorHandle, tensor_) - indices_ = convert(tf.TensorHandle, indices_) - element_shape_ = convert(tf.TensorHandle, element_shape_) - num_elements_ = convert(tf.TensorHandle, num_elements_) + tensor_ = convert(tf.EagerTensor, tensor_) + indices_ = convert(tf.EagerTensor, indices_) + element_shape_ = convert(tf.EagerTensor, element_shape_) + num_elements_ = convert(tf.EagerTensor, num_elements_) tf.add_input(desc, tensor_) tf.add_input(desc, indices_) tf.add_input(desc, element_shape_) @@ -40467,9 +40467,9 @@ begin end function conv3d_backprop_input_v2_eager(input_sizes_, filter_, out_backprop_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) desc = tf.EagerOp("Conv3DBackpropInputV2") - input_sizes_ = convert(tf.TensorHandle, input_sizes_) - filter_ = convert(tf.TensorHandle, filter_) - out_backprop_ = convert(tf.TensorHandle, out_backprop_) + input_sizes_ = convert(tf.EagerTensor, input_sizes_) + filter_ = convert(tf.EagerTensor, filter_) + out_backprop_ = convert(tf.EagerTensor, out_backprop_) tf.add_input(desc, input_sizes_) tf.add_input(desc, filter_) tf.add_input(desc, out_backprop_) @@ -40590,7 +40590,7 @@ begin end function random_shuffle_eager(value_; name=nothing, seed=nothing, seed2=nothing) desc = tf.EagerOp("RandomShuffle") - value_ = convert(tf.TensorHandle, value_) + value_ = convert(tf.EagerTensor, value_) tf.add_input(desc, value_) if seed !== nothing desc["seed"] = Base.Int(seed) @@ -40656,7 +40656,7 @@ begin end function uniform_candidate_sampler_eager(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing) desc = tf.EagerOp("UniformCandidateSampler") - true_classes_ = convert(tf.TensorHandle, true_classes_) + true_classes_ = convert(tf.EagerTensor, true_classes_) tf.add_input(desc, true_classes_) if num_true !== nothing desc["num_true"] = Base.Int(num_true) @@ -40717,10 +40717,10 @@ begin end function tensor_array_split_v2_eager(handle_, value_, lengths_, flow_in_; name=nothing) desc = tf.EagerOp("TensorArraySplitV2") - handle_ = convert(tf.TensorHandle, handle_) - value_ = convert(tf.TensorHandle, value_) - lengths_ = convert(tf.TensorHandle, lengths_) - flow_in_ = convert(tf.TensorHandle, flow_in_) + handle_ = convert(tf.EagerTensor, handle_) + value_ = convert(tf.EagerTensor, value_) + lengths_ = convert(tf.EagerTensor, lengths_) + flow_in_ = convert(tf.EagerTensor, flow_in_) tf.add_input(desc, handle_) tf.add_input(desc, value_) tf.add_input(desc, lengths_) @@ -40787,8 +40787,8 @@ begin end function mutable_dense_hash_table_v2_eager(empty_key_, deleted_key_; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing, initial_num_buckets=nothing, max_load_factor=nothing) desc = tf.EagerOp("MutableDenseHashTableV2") - empty_key_ = convert(tf.TensorHandle, empty_key_) - deleted_key_ = convert(tf.TensorHandle, deleted_key_) + empty_key_ = convert(tf.EagerTensor, empty_key_) + deleted_key_ = convert(tf.EagerTensor, deleted_key_) tf.add_input(desc, empty_key_) tf.add_input(desc, deleted_key_) if container !== nothing @@ -40854,8 +40854,8 @@ begin end function draw_bounding_boxes_eager(images_, boxes_; name=nothing) desc = tf.EagerOp("DrawBoundingBoxes") - images_ = convert(tf.TensorHandle, images_) - boxes_ = convert(tf.TensorHandle, boxes_) + images_ = convert(tf.EagerTensor, images_) + boxes_ = convert(tf.EagerTensor, boxes_) tf.add_input(desc, images_) tf.add_input(desc, boxes_) desc["T"] = tf.data_type(images_) @@ -40911,13 +40911,13 @@ begin end function sparse_apply_proximal_adagrad_eager(var_, accum_, lr_, l1_, l2_, grad_, indices_; name=nothing, use_locking=nothing) desc = tf.EagerOp("SparseApplyProximalAdagrad") - var_ = convert(tf.TensorHandle, var_) - accum_ = convert(tf.TensorHandle, accum_) - lr_ = convert(tf.TensorHandle, lr_) - l1_ = convert(tf.TensorHandle, l1_) - l2_ = convert(tf.TensorHandle, l2_) - grad_ = convert(tf.TensorHandle, grad_) - indices_ = convert(tf.TensorHandle, indices_) + var_ = convert(tf.EagerTensor, var_) + accum_ = convert(tf.EagerTensor, accum_) + lr_ = convert(tf.EagerTensor, lr_) + l1_ = convert(tf.EagerTensor, l1_) + l2_ = convert(tf.EagerTensor, l2_) + grad_ = convert(tf.EagerTensor, grad_) + indices_ = convert(tf.EagerTensor, indices_) tf.add_input(desc, var_) tf.add_input(desc, accum_) tf.add_input(desc, lr_) @@ -40979,9 +40979,9 @@ begin end function range_dataset_eager(start_, stop_, step_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("RangeDataset") - start_ = convert(tf.TensorHandle, start_) - stop_ = convert(tf.TensorHandle, stop_) - step_ = convert(tf.TensorHandle, step_) + start_ = convert(tf.EagerTensor, start_) + stop_ = convert(tf.EagerTensor, stop_) + step_ = convert(tf.EagerTensor, step_) tf.add_input(desc, start_) tf.add_input(desc, stop_) tf.add_input(desc, step_) @@ -41027,8 +41027,8 @@ begin end function reader_restore_state_v2_eager(reader_handle_, state_; name=nothing) desc = tf.EagerOp("ReaderRestoreStateV2") - reader_handle_ = convert(tf.TensorHandle, reader_handle_) - state_ = convert(tf.TensorHandle, state_) + reader_handle_ = convert(tf.EagerTensor, reader_handle_) + state_ = convert(tf.EagerTensor, state_) tf.add_input(desc, reader_handle_) tf.add_input(desc, state_) res = tf.execute(desc) @@ -41076,8 +41076,8 @@ begin end function top_kv2_eager(input_, k_; name=nothing, sorted=nothing) desc = tf.EagerOp("TopKV2") - input_ = convert(tf.TensorHandle, input_) - k_ = convert(tf.TensorHandle, k_) + input_ = convert(tf.EagerTensor, input_) + k_ = convert(tf.EagerTensor, k_) tf.add_input(desc, input_) tf.add_input(desc, k_) if sorted !== nothing @@ -41119,7 +41119,7 @@ begin end function atanh_eager(x_; name=nothing) desc = tf.EagerOp("Atanh") - x_ = convert(tf.TensorHandle, x_) + x_ = convert(tf.EagerTensor, x_) tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) res = tf.execute(desc) @@ -41157,7 +41157,7 @@ begin end function debug_gradient_identity_eager(input_; name=nothing) desc = tf.EagerOp("DebugGradientIdentity") - input_ = convert(tf.TensorHandle, input_) + input_ = convert(tf.EagerTensor, input_) tf.add_input(desc, input_) desc["T"] = tf.data_type(input_) res = tf.execute(desc) @@ -41206,10 +41206,10 @@ begin end function sparse_add_grad_eager(backprop_val_grad_, a_indices_, b_indices_, sum_indices_; name=nothing) desc = tf.EagerOp("SparseAddGrad") - backprop_val_grad_ = convert(tf.TensorHandle, backprop_val_grad_) - a_indices_ = convert(tf.TensorHandle, a_indices_) - b_indices_ = convert(tf.TensorHandle, b_indices_) - sum_indices_ = convert(tf.TensorHandle, sum_indices_) + backprop_val_grad_ = convert(tf.EagerTensor, backprop_val_grad_) + a_indices_ = convert(tf.EagerTensor, a_indices_) + b_indices_ = convert(tf.EagerTensor, b_indices_) + sum_indices_ = convert(tf.EagerTensor, sum_indices_) tf.add_input(desc, backprop_val_grad_) tf.add_input(desc, a_indices_) tf.add_input(desc, b_indices_) @@ -41259,9 +41259,9 @@ begin end function resource_scatter_add_eager(resource_, indices_, updates_; name=nothing, dtype=nothing) desc = tf.EagerOp("ResourceScatterAdd") - resource_ = convert(tf.TensorHandle, resource_) - indices_ = convert(tf.TensorHandle, indices_) - updates_ = convert(tf.TensorHandle, updates_) + resource_ = convert(tf.EagerTensor, resource_) + indices_ = convert(tf.EagerTensor, indices_) + updates_ = convert(tf.EagerTensor, updates_) tf.add_input(desc, resource_) tf.add_input(desc, indices_) tf.add_input(desc, updates_) @@ -41305,7 +41305,7 @@ begin end function ceil_eager(x_; name=nothing) desc = tf.EagerOp("Ceil") - x_ = convert(tf.TensorHandle, x_) + x_ = convert(tf.EagerTensor, x_) tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) res = tf.execute(desc) @@ -41349,9 +41349,9 @@ begin end function save_eager(filename_, tensor_names_, data_; name=nothing, T=nothing) desc = tf.EagerOp("Save") - filename_ = convert(tf.TensorHandle, filename_) - tensor_names_ = convert(tf.TensorHandle, tensor_names_) - data_ = convert(tf.TensorHandle, data_) + filename_ = convert(tf.EagerTensor, filename_) + tensor_names_ = convert(tf.EagerTensor, tensor_names_) + data_ = convert(tf.EagerTensor, data_) tf.add_input(desc, filename_) tf.add_input(desc, tensor_names_) tf.add_input(desc, data_) @@ -41468,10 +41468,10 @@ begin end function quantized_concat_eager(concat_dim_, values_, input_mins_, input_maxes_; name=nothing, N=nothing) desc = tf.EagerOp("QuantizedConcat") - concat_dim_ = convert(tf.TensorHandle, concat_dim_) - values_ = convert(tf.TensorHandle, values_) - input_mins_ = convert(tf.TensorHandle, input_mins_) - input_maxes_ = convert(tf.TensorHandle, input_maxes_) + concat_dim_ = convert(tf.EagerTensor, concat_dim_) + values_ = convert(tf.EagerTensor, values_) + input_mins_ = convert(tf.EagerTensor, input_mins_) + input_maxes_ = convert(tf.EagerTensor, input_maxes_) tf.add_input(desc, concat_dim_) tf.add_input(desc, values_) tf.add_input(desc, input_mins_) @@ -41515,7 +41515,7 @@ begin end function zeros_like_eager(x_; name=nothing) desc = tf.EagerOp("ZerosLike") - x_ = convert(tf.TensorHandle, x_) + x_ = convert(tf.EagerTensor, x_) tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) res = tf.execute(desc) @@ -41576,7 +41576,7 @@ begin end function fractional_avg_pool_eager(value_; name=nothing, pooling_ratio=nothing, pseudo_random=nothing, overlapping=nothing, deterministic=nothing, seed=nothing, seed2=nothing) desc = tf.EagerOp("FractionalAvgPool") - value_ = convert(tf.TensorHandle, value_) + value_ = convert(tf.EagerTensor, value_) tf.add_input(desc, value_) if pooling_ratio !== nothing desc["pooling_ratio"] = map(Base.identity, pooling_ratio) @@ -41645,12 +41645,12 @@ begin end function edit_distance_eager(hypothesis_indices_, hypothesis_values_, hypothesis_shape_, truth_indices_, truth_values_, truth_shape_; name=nothing, normalize=nothing) desc = tf.EagerOp("EditDistance") - hypothesis_indices_ = convert(tf.TensorHandle, hypothesis_indices_) - hypothesis_values_ = convert(tf.TensorHandle, hypothesis_values_) - hypothesis_shape_ = convert(tf.TensorHandle, hypothesis_shape_) - truth_indices_ = convert(tf.TensorHandle, truth_indices_) - truth_values_ = convert(tf.TensorHandle, truth_values_) - truth_shape_ = convert(tf.TensorHandle, truth_shape_) + hypothesis_indices_ = convert(tf.EagerTensor, hypothesis_indices_) + hypothesis_values_ = convert(tf.EagerTensor, hypothesis_values_) + hypothesis_shape_ = convert(tf.EagerTensor, hypothesis_shape_) + truth_indices_ = convert(tf.EagerTensor, truth_indices_) + truth_values_ = convert(tf.EagerTensor, truth_values_) + truth_shape_ = convert(tf.EagerTensor, truth_shape_) tf.add_input(desc, hypothesis_indices_) tf.add_input(desc, hypothesis_values_) tf.add_input(desc, hypothesis_shape_) @@ -41708,8 +41708,8 @@ begin end function unique_v2_eager(x_, axis_; name=nothing, out_idx=nothing) desc = tf.EagerOp("UniqueV2") - x_ = convert(tf.TensorHandle, x_) - axis_ = convert(tf.TensorHandle, axis_) + x_ = convert(tf.EagerTensor, x_) + axis_ = convert(tf.EagerTensor, axis_) tf.add_input(desc, x_) tf.add_input(desc, axis_) if out_idx !== nothing @@ -41768,9 +41768,9 @@ begin end function quantize_and_dequantize_v2_eager(input_, input_min_, input_max_; name=nothing, signed_input=nothing, num_bits=nothing, range_given=nothing, round_mode=nothing) desc = tf.EagerOp("QuantizeAndDequantizeV2") - input_ = convert(tf.TensorHandle, input_) - input_min_ = convert(tf.TensorHandle, input_min_) - input_max_ = convert(tf.TensorHandle, input_max_) + input_ = convert(tf.EagerTensor, input_) + input_min_ = convert(tf.EagerTensor, input_min_) + input_max_ = convert(tf.EagerTensor, input_max_) tf.add_input(desc, input_) tf.add_input(desc, input_min_) tf.add_input(desc, input_max_) @@ -41839,7 +41839,7 @@ begin end function quantize_and_dequantize_eager(input_; name=nothing, signed_input=nothing, num_bits=nothing, range_given=nothing, input_min=nothing, input_max=nothing) desc = tf.EagerOp("QuantizeAndDequantize") - input_ = convert(tf.TensorHandle, input_) + input_ = convert(tf.EagerTensor, input_) tf.add_input(desc, input_) if signed_input !== nothing desc["signed_input"] = Base.Bool(signed_input) @@ -41901,8 +41901,8 @@ begin end function tensor_list_pop_back_eager(input_handle_, element_shape_; name=nothing, element_dtype=nothing) desc = tf.EagerOp("TensorListPopBack") - input_handle_ = convert(tf.TensorHandle, input_handle_) - element_shape_ = convert(tf.TensorHandle, element_shape_) + input_handle_ = convert(tf.EagerTensor, input_handle_) + element_shape_ = convert(tf.EagerTensor, element_shape_) tf.add_input(desc, input_handle_) tf.add_input(desc, element_shape_) if element_dtype !== nothing @@ -41955,7 +41955,7 @@ begin end function debug_nan_count_eager(input_; name=nothing, device_name=nothing, tensor_name=nothing, debug_urls=nothing, gated_grpc=nothing) desc = tf.EagerOp("DebugNanCount") - input_ = convert(tf.TensorHandle, input_) + input_ = convert(tf.EagerTensor, input_) tf.add_input(desc, input_) if device_name !== nothing desc["device_name"] = Base.String(device_name) @@ -42022,14 +42022,14 @@ begin end function apply_adagrad_da_eager(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, lr_, l1_, l2_, global_step_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ApplyAdagradDA") - var_ = convert(tf.TensorHandle, var_) - gradient_accumulator_ = convert(tf.TensorHandle, gradient_accumulator_) - gradient_squared_accumulator_ = convert(tf.TensorHandle, gradient_squared_accumulator_) - grad_ = convert(tf.TensorHandle, grad_) - lr_ = convert(tf.TensorHandle, lr_) - l1_ = convert(tf.TensorHandle, l1_) - l2_ = convert(tf.TensorHandle, l2_) - global_step_ = convert(tf.TensorHandle, global_step_) + var_ = convert(tf.EagerTensor, var_) + gradient_accumulator_ = convert(tf.EagerTensor, gradient_accumulator_) + gradient_squared_accumulator_ = convert(tf.EagerTensor, gradient_squared_accumulator_) + grad_ = convert(tf.EagerTensor, grad_) + lr_ = convert(tf.EagerTensor, lr_) + l1_ = convert(tf.EagerTensor, l1_) + l2_ = convert(tf.EagerTensor, l2_) + global_step_ = convert(tf.EagerTensor, global_step_) tf.add_input(desc, var_) tf.add_input(desc, gradient_accumulator_) tf.add_input(desc, gradient_squared_accumulator_) @@ -42097,8 +42097,8 @@ begin end function depthwise_conv2d_native_eager(input_, filter_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) desc = tf.EagerOp("DepthwiseConv2dNative") - input_ = convert(tf.TensorHandle, input_) - filter_ = convert(tf.TensorHandle, filter_) + input_ = convert(tf.EagerTensor, input_) + filter_ = convert(tf.EagerTensor, filter_) tf.add_input(desc, input_) tf.add_input(desc, filter_) if strides !== nothing @@ -42149,7 +42149,7 @@ begin end function serialize_iterator_eager(resource_handle_; name=nothing) desc = tf.EagerOp("SerializeIterator") - resource_handle_ = convert(tf.TensorHandle, resource_handle_) + resource_handle_ = convert(tf.EagerTensor, resource_handle_) tf.add_input(desc, resource_handle_) res = tf.execute(desc) node = tf.TapeNode(serialize_iterator, [resource_handle_], name=nothing, res) @@ -42185,7 +42185,7 @@ begin end function dataset_to_graph_eager(input_dataset_; name=nothing) desc = tf.EagerOp("DatasetToGraph") - input_dataset_ = convert(tf.TensorHandle, input_dataset_) + input_dataset_ = convert(tf.EagerTensor, input_dataset_) tf.add_input(desc, input_dataset_) res = tf.execute(desc) node = tf.TapeNode(dataset_to_graph, [input_dataset_], name=nothing, res) @@ -42233,7 +42233,7 @@ begin end function top_k_eager(input_; name=nothing, k=nothing, sorted=nothing) desc = tf.EagerOp("TopK") - input_ = convert(tf.TensorHandle, input_) + input_ = convert(tf.EagerTensor, input_) tf.add_input(desc, input_) if k !== nothing desc["k"] = Base.Int(k) @@ -42296,15 +42296,15 @@ begin end function resource_apply_ftrl_v2_eager(var_, accum_, linear_, grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ResourceApplyFtrlV2") - var_ = convert(tf.TensorHandle, var_) - accum_ = convert(tf.TensorHandle, accum_) - linear_ = convert(tf.TensorHandle, linear_) - grad_ = convert(tf.TensorHandle, grad_) - lr_ = convert(tf.TensorHandle, lr_) - l1_ = convert(tf.TensorHandle, l1_) - l2_ = convert(tf.TensorHandle, l2_) - l2_shrinkage_ = convert(tf.TensorHandle, l2_shrinkage_) - lr_power_ = convert(tf.TensorHandle, lr_power_) + var_ = convert(tf.EagerTensor, var_) + accum_ = convert(tf.EagerTensor, accum_) + linear_ = convert(tf.EagerTensor, linear_) + grad_ = convert(tf.EagerTensor, grad_) + lr_ = convert(tf.EagerTensor, lr_) + l1_ = convert(tf.EagerTensor, l1_) + l2_ = convert(tf.EagerTensor, l2_) + l2_shrinkage_ = convert(tf.EagerTensor, l2_shrinkage_) + lr_power_ = convert(tf.EagerTensor, lr_power_) tf.add_input(desc, var_) tf.add_input(desc, accum_) tf.add_input(desc, linear_) @@ -42363,7 +42363,7 @@ begin end function _nccl_broadcast_recv_eager(shape_; name=nothing, num_devices=nothing, shared_name=nothing) desc = tf.EagerOp("_NcclBroadcastRecv") - shape_ = convert(tf.TensorHandle, shape_) + shape_ = convert(tf.EagerTensor, shape_) tf.add_input(desc, shape_) if num_devices !== nothing desc["num_devices"] = Base.Int(num_devices) @@ -42405,7 +42405,7 @@ begin end function queue_is_closed_eager(handle_; name=nothing) desc = tf.EagerOp("QueueIsClosed") - handle_ = convert(tf.TensorHandle, handle_) + handle_ = convert(tf.EagerTensor, handle_) tf.add_input(desc, handle_) res = tf.execute(desc) node = tf.TapeNode(queue_is_closed, [handle_], name=nothing, res) @@ -42456,10 +42456,10 @@ begin end function shuffle_dataset_eager(input_dataset_, buffer_size_, seed_, seed2_; name=nothing, reshuffle_each_iteration=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("ShuffleDataset") - input_dataset_ = convert(tf.TensorHandle, input_dataset_) - buffer_size_ = convert(tf.TensorHandle, buffer_size_) - seed_ = convert(tf.TensorHandle, seed_) - seed2_ = convert(tf.TensorHandle, seed2_) + input_dataset_ = convert(tf.EagerTensor, input_dataset_) + buffer_size_ = convert(tf.EagerTensor, buffer_size_) + seed_ = convert(tf.EagerTensor, seed_) + seed2_ = convert(tf.EagerTensor, seed2_) tf.add_input(desc, input_dataset_) tf.add_input(desc, buffer_size_) tf.add_input(desc, seed_) @@ -42516,7 +42516,7 @@ begin end function deserialize_sparse_eager(serialized_sparse_; name=nothing, dtype=nothing) desc = tf.EagerOp("DeserializeSparse") - serialized_sparse_ = convert(tf.TensorHandle, serialized_sparse_) + serialized_sparse_ = convert(tf.EagerTensor, serialized_sparse_) tf.add_input(desc, serialized_sparse_) if dtype !== nothing desc["dtype"] = Base.identity(dtype) @@ -42666,7 +42666,7 @@ begin end function truncated_normal_eager(shape_; name=nothing, seed=nothing, seed2=nothing, dtype=nothing) desc = tf.EagerOp("TruncatedNormal") - shape_ = convert(tf.TensorHandle, shape_) + shape_ = convert(tf.EagerTensor, shape_) tf.add_input(desc, shape_) if seed !== nothing desc["seed"] = Base.Int(seed) @@ -42717,8 +42717,8 @@ begin end function tensor_forest_tree_predict_eager(tree_handle_, dense_features_; name=nothing, logits_dimension=nothing) desc = tf.EagerOp("TensorForestTreePredict") - tree_handle_ = convert(tf.TensorHandle, tree_handle_) - dense_features_ = convert(tf.TensorHandle, dense_features_) + tree_handle_ = convert(tf.EagerTensor, tree_handle_) + dense_features_ = convert(tf.EagerTensor, dense_features_) tf.add_input(desc, tree_handle_) tf.add_input(desc, dense_features_) if logits_dimension !== nothing @@ -42764,7 +42764,7 @@ begin end function stack_v2_eager(max_size_; name=nothing, elem_type=nothing, stack_name=nothing) desc = tf.EagerOp("StackV2") - max_size_ = convert(tf.TensorHandle, max_size_) + max_size_ = convert(tf.EagerTensor, max_size_) tf.add_input(desc, max_size_) if elem_type !== nothing desc["elem_type"] = Base.identity(elem_type) @@ -42806,7 +42806,7 @@ begin end function accumulator_num_accumulated_eager(handle_; name=nothing) desc = tf.EagerOp("AccumulatorNumAccumulated") - handle_ = convert(tf.TensorHandle, handle_) + handle_ = convert(tf.EagerTensor, handle_) tf.add_input(desc, handle_) res = tf.execute(desc) node = tf.TapeNode(accumulator_num_accumulated, [handle_], name=nothing, res) @@ -42842,7 +42842,7 @@ begin end function reader_reset_v2_eager(reader_handle_; name=nothing) desc = tf.EagerOp("ReaderResetV2") - reader_handle_ = convert(tf.TensorHandle, reader_handle_) + reader_handle_ = convert(tf.EagerTensor, reader_handle_) tf.add_input(desc, reader_handle_) res = tf.execute(desc) node = tf.TapeNode(reader_reset_v2, [reader_handle_], name=nothing, res) @@ -42894,13 +42894,13 @@ begin end function apply_add_sign_eager(var_, m_, lr_, alpha_, sign_decay_, beta_, grad_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ApplyAddSign") - var_ = convert(tf.TensorHandle, var_) - m_ = convert(tf.TensorHandle, m_) - lr_ = convert(tf.TensorHandle, lr_) - alpha_ = convert(tf.TensorHandle, alpha_) - sign_decay_ = convert(tf.TensorHandle, sign_decay_) - beta_ = convert(tf.TensorHandle, beta_) - grad_ = convert(tf.TensorHandle, grad_) + var_ = convert(tf.EagerTensor, var_) + m_ = convert(tf.EagerTensor, m_) + lr_ = convert(tf.EagerTensor, lr_) + alpha_ = convert(tf.EagerTensor, alpha_) + sign_decay_ = convert(tf.EagerTensor, sign_decay_) + beta_ = convert(tf.EagerTensor, beta_) + grad_ = convert(tf.EagerTensor, grad_) tf.add_input(desc, var_) tf.add_input(desc, m_) tf.add_input(desc, lr_) @@ -43014,7 +43014,7 @@ begin end function rint_eager(x_; name=nothing) desc = tf.EagerOp("Rint") - x_ = convert(tf.TensorHandle, x_) + x_ = convert(tf.EagerTensor, x_) tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) res = tf.execute(desc) @@ -43128,9 +43128,9 @@ begin end function extract_glimpse_eager(input_, size_, offsets_; name=nothing, centered=nothing, normalized=nothing, uniform_noise=nothing, noise=nothing) desc = tf.EagerOp("ExtractGlimpse") - input_ = convert(tf.TensorHandle, input_) - size_ = convert(tf.TensorHandle, size_) - offsets_ = convert(tf.TensorHandle, offsets_) + input_ = convert(tf.EagerTensor, input_) + size_ = convert(tf.EagerTensor, size_) + offsets_ = convert(tf.EagerTensor, offsets_) tf.add_input(desc, input_) tf.add_input(desc, size_) tf.add_input(desc, offsets_) @@ -43186,7 +43186,7 @@ begin end function string_to_hash_bucket_strong_eager(input_; name=nothing, num_buckets=nothing, key=nothing) desc = tf.EagerOp("StringToHashBucketStrong") - input_ = convert(tf.TensorHandle, input_) + input_ = convert(tf.EagerTensor, input_) tf.add_input(desc, input_) if num_buckets !== nothing desc["num_buckets"] = Base.Int(num_buckets) @@ -43309,12 +43309,12 @@ begin end function resource_sparse_apply_momentum_eager(var_, accum_, lr_, grad_, indices_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) desc = tf.EagerOp("ResourceSparseApplyMomentum") - var_ = convert(tf.TensorHandle, var_) - accum_ = convert(tf.TensorHandle, accum_) - lr_ = convert(tf.TensorHandle, lr_) - grad_ = convert(tf.TensorHandle, grad_) - indices_ = convert(tf.TensorHandle, indices_) - momentum_ = convert(tf.TensorHandle, momentum_) + var_ = convert(tf.EagerTensor, var_) + accum_ = convert(tf.EagerTensor, accum_) + lr_ = convert(tf.EagerTensor, lr_) + grad_ = convert(tf.EagerTensor, grad_) + indices_ = convert(tf.EagerTensor, indices_) + momentum_ = convert(tf.EagerTensor, momentum_) tf.add_input(desc, var_) tf.add_input(desc, accum_) tf.add_input(desc, lr_) @@ -43374,10 +43374,10 @@ begin end function save_slices_eager(filename_, tensor_names_, shapes_and_slices_, data_; name=nothing, T=nothing) desc = tf.EagerOp("SaveSlices") - filename_ = convert(tf.TensorHandle, filename_) - tensor_names_ = convert(tf.TensorHandle, tensor_names_) - shapes_and_slices_ = convert(tf.TensorHandle, shapes_and_slices_) - data_ = convert(tf.TensorHandle, data_) + filename_ = convert(tf.EagerTensor, filename_) + tensor_names_ = convert(tf.EagerTensor, tensor_names_) + shapes_and_slices_ = convert(tf.EagerTensor, shapes_and_slices_) + data_ = convert(tf.EagerTensor, data_) tf.add_input(desc, filename_) tf.add_input(desc, tensor_names_) tf.add_input(desc, shapes_and_slices_) @@ -43419,7 +43419,7 @@ begin end function experimental_dataset_cardinality_eager(input_dataset_; name=nothing) desc = tf.EagerOp("ExperimentalDatasetCardinality") - input_dataset_ = convert(tf.TensorHandle, input_dataset_) + input_dataset_ = convert(tf.EagerTensor, input_dataset_) tf.add_input(desc, input_dataset_) res = tf.execute(desc) node = tf.TapeNode(experimental_dataset_cardinality, [input_dataset_], name=nothing, res) @@ -43456,7 +43456,7 @@ begin end function is_finite_eager(x_; name=nothing) desc = tf.EagerOp("IsFinite") - x_ = convert(tf.TensorHandle, x_) + x_ = convert(tf.EagerTensor, x_) tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) res = tf.execute(desc) @@ -43516,11 +43516,11 @@ begin end function experimental_numa_map_and_batch_dataset_eager(input_dataset_, other_arguments_, batch_size_, num_parallel_calls_, drop_remainder_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, preserve_cardinality=nothing) desc = tf.EagerOp("ExperimentalNumaMapAndBatchDataset") - input_dataset_ = convert(tf.TensorHandle, input_dataset_) - other_arguments_ = convert(tf.TensorHandle, other_arguments_) - batch_size_ = convert(tf.TensorHandle, batch_size_) - num_parallel_calls_ = convert(tf.TensorHandle, num_parallel_calls_) - drop_remainder_ = convert(tf.TensorHandle, drop_remainder_) + input_dataset_ = convert(tf.EagerTensor, input_dataset_) + other_arguments_ = convert(tf.EagerTensor, other_arguments_) + batch_size_ = convert(tf.EagerTensor, batch_size_) + num_parallel_calls_ = convert(tf.EagerTensor, num_parallel_calls_) + drop_remainder_ = convert(tf.EagerTensor, drop_remainder_) tf.add_input(desc, input_dataset_) tf.add_input(desc, other_arguments_) tf.add_input(desc, batch_size_) @@ -43587,8 +43587,8 @@ begin end function all_to_all_eager(input_, group_assignment_; name=nothing, concat_dimension=nothing, split_dimension=nothing, split_count=nothing) desc = tf.EagerOp("AllToAll") - input_ = convert(tf.TensorHandle, input_) - group_assignment_ = convert(tf.TensorHandle, group_assignment_) + input_ = convert(tf.EagerTensor, input_) + group_assignment_ = convert(tf.EagerTensor, group_assignment_) tf.add_input(desc, input_) tf.add_input(desc, group_assignment_) if concat_dimension !== nothing @@ -43649,7 +43649,7 @@ begin end function take_many_sparse_from_tensors_map_eager(sparse_handles_; name=nothing, dtype=nothing, container=nothing, shared_name=nothing) desc = tf.EagerOp("TakeManySparseFromTensorsMap") - sparse_handles_ = convert(tf.TensorHandle, sparse_handles_) + sparse_handles_ = convert(tf.EagerTensor, sparse_handles_) tf.add_input(desc, sparse_handles_) if dtype !== nothing desc["dtype"] = Base.identity(dtype) @@ -43695,7 +43695,7 @@ begin end function batch_matrix_diag_part_eager(input_; name=nothing) desc = tf.EagerOp("BatchMatrixDiagPart") - input_ = convert(tf.TensorHandle, input_) + input_ = convert(tf.EagerTensor, input_) tf.add_input(desc, input_) desc["T"] = tf.data_type(input_) res = tf.execute(desc) @@ -43740,11 +43740,11 @@ begin end function fixed_length_record_dataset_eager(filenames_, header_bytes_, record_bytes_, footer_bytes_, buffer_size_; name=nothing) desc = tf.EagerOp("FixedLengthRecordDataset") - filenames_ = convert(tf.TensorHandle, filenames_) - header_bytes_ = convert(tf.TensorHandle, header_bytes_) - record_bytes_ = convert(tf.TensorHandle, record_bytes_) - footer_bytes_ = convert(tf.TensorHandle, footer_bytes_) - buffer_size_ = convert(tf.TensorHandle, buffer_size_) + filenames_ = convert(tf.EagerTensor, filenames_) + header_bytes_ = convert(tf.EagerTensor, header_bytes_) + record_bytes_ = convert(tf.EagerTensor, record_bytes_) + footer_bytes_ = convert(tf.EagerTensor, footer_bytes_) + buffer_size_ = convert(tf.EagerTensor, buffer_size_) tf.add_input(desc, filenames_) tf.add_input(desc, header_bytes_) tf.add_input(desc, record_bytes_) @@ -43790,8 +43790,8 @@ begin end function stack_push_eager(handle_, elem_; name=nothing, swap_memory=nothing) desc = tf.EagerOp("StackPush") - handle_ = convert(tf.TensorHandle, handle_) - elem_ = convert(tf.TensorHandle, elem_) + handle_ = convert(tf.EagerTensor, handle_) + elem_ = convert(tf.EagerTensor, elem_) tf.add_input(desc, handle_) tf.add_input(desc, elem_) if swap_memory !== nothing @@ -43880,9 +43880,9 @@ begin end function multi_device_iterator_init_eager(dataset_, multi_device_iterator_, max_buffer_size_; name=nothing) desc = tf.EagerOp("MultiDeviceIteratorInit") - dataset_ = convert(tf.TensorHandle, dataset_) - multi_device_iterator_ = convert(tf.TensorHandle, multi_device_iterator_) - max_buffer_size_ = convert(tf.TensorHandle, max_buffer_size_) + dataset_ = convert(tf.EagerTensor, dataset_) + multi_device_iterator_ = convert(tf.EagerTensor, multi_device_iterator_) + max_buffer_size_ = convert(tf.EagerTensor, max_buffer_size_) tf.add_input(desc, dataset_) tf.add_input(desc, multi_device_iterator_) tf.add_input(desc, max_buffer_size_) @@ -43924,9 +43924,9 @@ begin end function gcs_configure_block_cache_eager(max_cache_size_, block_size_, max_staleness_; name=nothing) desc = tf.EagerOp("GcsConfigureBlockCache") - max_cache_size_ = convert(tf.TensorHandle, max_cache_size_) - block_size_ = convert(tf.TensorHandle, block_size_) - max_staleness_ = convert(tf.TensorHandle, max_staleness_) + max_cache_size_ = convert(tf.EagerTensor, max_cache_size_) + block_size_ = convert(tf.EagerTensor, block_size_) + max_staleness_ = convert(tf.EagerTensor, max_staleness_) tf.add_input(desc, max_cache_size_) tf.add_input(desc, block_size_) tf.add_input(desc, max_staleness_) @@ -43970,7 +43970,7 @@ begin end function queue_dequeue_v2_eager(handle_; name=nothing, component_types=nothing, timeout_ms=nothing) desc = tf.EagerOp("QueueDequeueV2") - handle_ = convert(tf.TensorHandle, handle_) + handle_ = convert(tf.EagerTensor, handle_) tf.add_input(desc, handle_) if component_types !== nothing desc["component_types"] = map(Base.identity, component_types) @@ -44077,8 +44077,8 @@ begin end function transpose_eager(x_, perm_; name=nothing) desc = tf.EagerOp("Transpose") - x_ = convert(tf.TensorHandle, x_) - perm_ = convert(tf.TensorHandle, perm_) + x_ = convert(tf.EagerTensor, x_) + perm_ = convert(tf.EagerTensor, perm_) tf.add_input(desc, x_) tf.add_input(desc, perm_) desc["T"] = tf.data_type(x_) @@ -44118,7 +44118,7 @@ begin end function ifft_eager(input_; name=nothing) desc = tf.EagerOp("IFFT") - input_ = convert(tf.TensorHandle, input_) + input_ = convert(tf.EagerTensor, input_) tf.add_input(desc, input_) desc["Tcomplex"] = tf.data_type(input_) res = tf.execute(desc) @@ -44165,10 +44165,10 @@ begin end function sparse_segment_sum_with_num_segments_eager(data_, indices_, segment_ids_, num_segments_; name=nothing) desc = tf.EagerOp("SparseSegmentSumWithNumSegments") - data_ = convert(tf.TensorHandle, data_) - indices_ = convert(tf.TensorHandle, indices_) - segment_ids_ = convert(tf.TensorHandle, segment_ids_) - num_segments_ = convert(tf.TensorHandle, num_segments_) + data_ = convert(tf.EagerTensor, data_) + indices_ = convert(tf.EagerTensor, indices_) + segment_ids_ = convert(tf.EagerTensor, segment_ids_) + num_segments_ = convert(tf.EagerTensor, num_segments_) tf.add_input(desc, data_) tf.add_input(desc, indices_) tf.add_input(desc, segment_ids_) @@ -44210,7 +44210,7 @@ begin end function queue_is_closed_v2_eager(handle_; name=nothing) desc = tf.EagerOp("QueueIsClosedV2") - handle_ = convert(tf.TensorHandle, handle_) + handle_ = convert(tf.EagerTensor, handle_) tf.add_input(desc, handle_) res = tf.execute(desc) node = tf.TapeNode(queue_is_closed_v2, [handle_], name=nothing, res) @@ -44265,11 +44265,11 @@ begin end function parameterized_truncated_normal_eager(shape_, means_, stdevs_, minvals_, maxvals_; name=nothing, seed=nothing, seed2=nothing, dtype=nothing) desc = tf.EagerOp("ParameterizedTruncatedNormal") - shape_ = convert(tf.TensorHandle, shape_) - means_ = convert(tf.TensorHandle, means_) - stdevs_ = convert(tf.TensorHandle, stdevs_) - minvals_ = convert(tf.TensorHandle, minvals_) - maxvals_ = convert(tf.TensorHandle, maxvals_) + shape_ = convert(tf.EagerTensor, shape_) + means_ = convert(tf.EagerTensor, means_) + stdevs_ = convert(tf.EagerTensor, stdevs_) + minvals_ = convert(tf.EagerTensor, minvals_) + maxvals_ = convert(tf.EagerTensor, maxvals_) tf.add_input(desc, shape_) tf.add_input(desc, means_) tf.add_input(desc, stdevs_) @@ -44324,7 +44324,7 @@ begin end function diag_part_eager(input_; name=nothing) desc = tf.EagerOp("DiagPart") - input_ = convert(tf.TensorHandle, input_) + input_ = convert(tf.EagerTensor, input_) tf.add_input(desc, input_) desc["T"] = tf.data_type(input_) res = tf.execute(desc) @@ -44367,10 +44367,10 @@ begin end function kmeans_plus_plus_initialization_eager(points_, num_to_sample_, seed_, num_retries_per_sample_; name=nothing) desc = tf.EagerOp("KmeansPlusPlusInitialization") - points_ = convert(tf.TensorHandle, points_) - num_to_sample_ = convert(tf.TensorHandle, num_to_sample_) - seed_ = convert(tf.TensorHandle, seed_) - num_retries_per_sample_ = convert(tf.TensorHandle, num_retries_per_sample_) + points_ = convert(tf.EagerTensor, points_) + num_to_sample_ = convert(tf.EagerTensor, num_to_sample_) + seed_ = convert(tf.EagerTensor, seed_) + num_retries_per_sample_ = convert(tf.EagerTensor, num_retries_per_sample_) tf.add_input(desc, points_) tf.add_input(desc, num_to_sample_) tf.add_input(desc, seed_) @@ -44416,9 +44416,9 @@ begin end function regex_replace_eager(input_, pattern_, rewrite_; name=nothing, replace_global=nothing) desc = tf.EagerOp("RegexReplace") - input_ = convert(tf.TensorHandle, input_) - pattern_ = convert(tf.TensorHandle, pattern_) - rewrite_ = convert(tf.TensorHandle, rewrite_) + input_ = convert(tf.EagerTensor, input_) + pattern_ = convert(tf.EagerTensor, pattern_) + rewrite_ = convert(tf.EagerTensor, rewrite_) tf.add_input(desc, input_) tf.add_input(desc, pattern_) tf.add_input(desc, rewrite_) @@ -44474,10 +44474,10 @@ begin end function sparse_tensor_dense_mat_mul_eager(a_indices_, a_values_, a_shape_, b_; name=nothing, adjoint_a=nothing, adjoint_b=nothing) desc = tf.EagerOp("SparseTensorDenseMatMul") - a_indices_ = convert(tf.TensorHandle, a_indices_) - a_values_ = convert(tf.TensorHandle, a_values_) - a_shape_ = convert(tf.TensorHandle, a_shape_) - b_ = convert(tf.TensorHandle, b_) + a_indices_ = convert(tf.EagerTensor, a_indices_) + a_values_ = convert(tf.EagerTensor, a_values_) + a_shape_ = convert(tf.EagerTensor, a_shape_) + b_ = convert(tf.EagerTensor, b_) tf.add_input(desc, a_indices_) tf.add_input(desc, a_values_) tf.add_input(desc, a_shape_) @@ -44542,8 +44542,8 @@ begin end function map_defun_eager(arguments_, captured_inputs_; name=nothing, Targuments=nothing, Tcaptured=nothing, output_types=nothing, output_shapes=nothing, f=nothing) desc = tf.EagerOp("MapDefun") - arguments_ = convert(tf.TensorHandle, arguments_) - captured_inputs_ = convert(tf.TensorHandle, captured_inputs_) + arguments_ = convert(tf.EagerTensor, arguments_) + captured_inputs_ = convert(tf.EagerTensor, captured_inputs_) tf.add_input(desc, arguments_) tf.add_input(desc, captured_inputs_) if Targuments !== nothing @@ -44618,7 +44618,7 @@ begin end function thread_unsafe_unigram_candidate_sampler_eager(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing) desc = tf.EagerOp("ThreadUnsafeUnigramCandidateSampler") - true_classes_ = convert(tf.TensorHandle, true_classes_) + true_classes_ = convert(tf.EagerTensor, true_classes_) tf.add_input(desc, true_classes_) if num_true !== nothing desc["num_true"] = Base.Int(num_true) @@ -44740,7 +44740,7 @@ begin end function parallel_concat_eager(values_; name=nothing, N=nothing, shape=nothing) desc = tf.EagerOp("ParallelConcat") - values_ = convert(tf.TensorHandle, values_) + values_ = convert(tf.EagerTensor, values_) tf.add_input(desc, values_) if N !== nothing desc["N"] = Base.Int(N) @@ -44789,9 +44789,9 @@ begin end function lookup_table_find_v2_eager(table_handle_, keys_, default_value_; name=nothing) desc = tf.EagerOp("LookupTableFindV2") - table_handle_ = convert(tf.TensorHandle, table_handle_) - keys_ = convert(tf.TensorHandle, keys_) - default_value_ = convert(tf.TensorHandle, default_value_) + table_handle_ = convert(tf.EagerTensor, table_handle_) + keys_ = convert(tf.EagerTensor, keys_) + default_value_ = convert(tf.EagerTensor, default_value_) tf.add_input(desc, table_handle_) tf.add_input(desc, keys_) tf.add_input(desc, default_value_) @@ -44833,8 +44833,8 @@ begin end function tensor_forest_tree_deserialize_eager(tree_handle_, tree_config_; name=nothing) desc = tf.EagerOp("TensorForestTreeDeserialize") - tree_handle_ = convert(tf.TensorHandle, tree_handle_) - tree_config_ = convert(tf.TensorHandle, tree_config_) + tree_handle_ = convert(tf.EagerTensor, tree_handle_) + tree_config_ = convert(tf.EagerTensor, tree_config_) tf.add_input(desc, tree_handle_) tf.add_input(desc, tree_config_) res = tf.execute(desc) @@ -44944,7 +44944,7 @@ begin end function fake_quant_with_min_max_args_eager(inputs_; name=nothing, min=nothing, max=nothing, num_bits=nothing, narrow_range=nothing) desc = tf.EagerOp("FakeQuantWithMinMaxArgs") - inputs_ = convert(tf.TensorHandle, inputs_) + inputs_ = convert(tf.EagerTensor, inputs_) tf.add_input(desc, inputs_) if min !== nothing desc["min"] = Base.identity(min) @@ -45000,9 +45000,9 @@ begin end function resource_apply_gradient_descent_eager(var_, alpha_, delta_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ResourceApplyGradientDescent") - var_ = convert(tf.TensorHandle, var_) - alpha_ = convert(tf.TensorHandle, alpha_) - delta_ = convert(tf.TensorHandle, delta_) + var_ = convert(tf.EagerTensor, var_) + alpha_ = convert(tf.EagerTensor, alpha_) + delta_ = convert(tf.EagerTensor, delta_) tf.add_input(desc, var_) tf.add_input(desc, alpha_) tf.add_input(desc, delta_) @@ -45057,10 +45057,10 @@ begin end function experimental_sliding_window_dataset_eager(input_dataset_, window_size_, window_shift_, window_stride_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("ExperimentalSlidingWindowDataset") - input_dataset_ = convert(tf.TensorHandle, input_dataset_) - window_size_ = convert(tf.TensorHandle, window_size_) - window_shift_ = convert(tf.TensorHandle, window_shift_) - window_stride_ = convert(tf.TensorHandle, window_stride_) + input_dataset_ = convert(tf.EagerTensor, input_dataset_) + window_size_ = convert(tf.EagerTensor, window_size_) + window_shift_ = convert(tf.EagerTensor, window_shift_) + window_stride_ = convert(tf.EagerTensor, window_stride_) tf.add_input(desc, input_dataset_) tf.add_input(desc, window_size_) tf.add_input(desc, window_shift_) @@ -45111,7 +45111,7 @@ begin end function decode_raw_eager(bytes_; name=nothing, out_type=nothing, little_endian=nothing) desc = tf.EagerOp("DecodeRaw") - bytes_ = convert(tf.TensorHandle, bytes_) + bytes_ = convert(tf.EagerTensor, bytes_) tf.add_input(desc, bytes_) if out_type !== nothing desc["out_type"] = Base.identity(out_type) @@ -45170,10 +45170,10 @@ begin end function fake_quant_with_min_max_vars_per_channel_gradient_eager(gradients_, inputs_, min_, max_; name=nothing, num_bits=nothing, narrow_range=nothing) desc = tf.EagerOp("FakeQuantWithMinMaxVarsPerChannelGradient") - gradients_ = convert(tf.TensorHandle, gradients_) - inputs_ = convert(tf.TensorHandle, inputs_) - min_ = convert(tf.TensorHandle, min_) - max_ = convert(tf.TensorHandle, max_) + gradients_ = convert(tf.EagerTensor, gradients_) + inputs_ = convert(tf.EagerTensor, inputs_) + min_ = convert(tf.EagerTensor, min_) + max_ = convert(tf.EagerTensor, max_) tf.add_input(desc, gradients_) tf.add_input(desc, inputs_) tf.add_input(desc, min_) @@ -45230,8 +45230,8 @@ begin end function unique_with_counts_v2_eager(x_, axis_; name=nothing, out_idx=nothing) desc = tf.EagerOp("UniqueWithCountsV2") - x_ = convert(tf.TensorHandle, x_) - axis_ = convert(tf.TensorHandle, axis_) + x_ = convert(tf.EagerTensor, x_) + axis_ = convert(tf.EagerTensor, axis_) tf.add_input(desc, x_) tf.add_input(desc, axis_) if out_idx !== nothing @@ -45281,8 +45281,8 @@ begin end function experimental_sleep_dataset_eager(input_dataset_, sleep_microseconds_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("ExperimentalSleepDataset") - input_dataset_ = convert(tf.TensorHandle, input_dataset_) - sleep_microseconds_ = convert(tf.TensorHandle, sleep_microseconds_) + input_dataset_ = convert(tf.EagerTensor, input_dataset_) + sleep_microseconds_ = convert(tf.EagerTensor, sleep_microseconds_) tf.add_input(desc, input_dataset_) tf.add_input(desc, sleep_microseconds_) if output_types !== nothing @@ -45334,7 +45334,7 @@ begin end function tpu_replicated_output_eager(input_; name=nothing, num_replicas=nothing) desc = tf.EagerOp("TPUReplicatedOutput") - input_ = convert(tf.TensorHandle, input_) + input_ = convert(tf.EagerTensor, input_) tf.add_input(desc, input_) if num_replicas !== nothing desc["num_replicas"] = Base.Int(num_replicas) @@ -45380,8 +45380,8 @@ begin end function lower_bound_eager(sorted_inputs_, values_; name=nothing, out_type=nothing) desc = tf.EagerOp("LowerBound") - sorted_inputs_ = convert(tf.TensorHandle, sorted_inputs_) - values_ = convert(tf.TensorHandle, values_) + sorted_inputs_ = convert(tf.EagerTensor, sorted_inputs_) + values_ = convert(tf.EagerTensor, values_) tf.add_input(desc, sorted_inputs_) tf.add_input(desc, values_) if out_type !== nothing @@ -45424,7 +45424,7 @@ begin end function tan_eager(x_; name=nothing) desc = tf.EagerOp("Tan") - x_ = convert(tf.TensorHandle, x_) + x_ = convert(tf.EagerTensor, x_) tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) res = tf.execute(desc) @@ -45471,7 +45471,7 @@ begin end function enter_eager(data_; name=nothing, frame_name=nothing, is_constant=nothing, parallel_iterations=nothing) desc = tf.EagerOp("Enter") - data_ = convert(tf.TensorHandle, data_) + data_ = convert(tf.EagerTensor, data_) tf.add_input(desc, data_) if frame_name !== nothing desc["frame_name"] = Base.String(frame_name) @@ -45529,7 +45529,7 @@ begin end function infeed_enqueue_tuple_eager(inputs_; name=nothing, dtypes=nothing, shapes=nothing, layouts=nothing, device_ordinal=nothing) desc = tf.EagerOp("InfeedEnqueueTuple") - inputs_ = convert(tf.TensorHandle, inputs_) + inputs_ = convert(tf.EagerTensor, inputs_) tf.add_input(desc, inputs_) if dtypes !== nothing desc["dtypes"] = map(Base.identity, dtypes) @@ -45577,7 +45577,7 @@ begin end function _set_global_tpu_array_eager(topology_; name=nothing) desc = tf.EagerOp("_SetGlobalTPUArray") - topology_ = convert(tf.TensorHandle, topology_) + topology_ = convert(tf.EagerTensor, topology_) tf.add_input(desc, topology_) res = tf.execute(desc) node = tf.TapeNode(_set_global_tpu_array, [topology_], name=nothing, res) @@ -45614,7 +45614,7 @@ begin end function square_eager(x_; name=nothing) desc = tf.EagerOp("Square") - x_ = convert(tf.TensorHandle, x_) + x_ = convert(tf.EagerTensor, x_) tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) res = tf.execute(desc) @@ -45652,7 +45652,7 @@ begin end function debug_gradient_ref_identity_eager(input_; name=nothing) desc = tf.EagerOp("DebugGradientRefIdentity") - input_ = convert(tf.TensorHandle, input_) + input_ = convert(tf.EagerTensor, input_) tf.add_input(desc, input_) desc["T"] = tf.data_type(input_) res = tf.execute(desc) @@ -45705,13 +45705,13 @@ begin end function apply_adadelta_eager(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ApplyAdadelta") - var_ = convert(tf.TensorHandle, var_) - accum_ = convert(tf.TensorHandle, accum_) - accum_update_ = convert(tf.TensorHandle, accum_update_) - lr_ = convert(tf.TensorHandle, lr_) - rho_ = convert(tf.TensorHandle, rho_) - epsilon_ = convert(tf.TensorHandle, epsilon_) - grad_ = convert(tf.TensorHandle, grad_) + var_ = convert(tf.EagerTensor, var_) + accum_ = convert(tf.EagerTensor, accum_) + accum_update_ = convert(tf.EagerTensor, accum_update_) + lr_ = convert(tf.EagerTensor, lr_) + rho_ = convert(tf.EagerTensor, rho_) + epsilon_ = convert(tf.EagerTensor, epsilon_) + grad_ = convert(tf.EagerTensor, grad_) tf.add_input(desc, var_) tf.add_input(desc, accum_) tf.add_input(desc, accum_update_) @@ -45793,10 +45793,10 @@ begin end function experimental_group_by_window_dataset_eager(input_dataset_, key_func_other_arguments_, reduce_func_other_arguments_, window_size_func_other_arguments_; name=nothing, key_func=nothing, reduce_func=nothing, window_size_func=nothing, Tkey_func_other_arguments=nothing, Treduce_func_other_arguments=nothing, Twindow_size_func_other_arguments=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("ExperimentalGroupByWindowDataset") - input_dataset_ = convert(tf.TensorHandle, input_dataset_) - key_func_other_arguments_ = convert(tf.TensorHandle, key_func_other_arguments_) - reduce_func_other_arguments_ = convert(tf.TensorHandle, reduce_func_other_arguments_) - window_size_func_other_arguments_ = convert(tf.TensorHandle, window_size_func_other_arguments_) + input_dataset_ = convert(tf.EagerTensor, input_dataset_) + key_func_other_arguments_ = convert(tf.EagerTensor, key_func_other_arguments_) + reduce_func_other_arguments_ = convert(tf.EagerTensor, reduce_func_other_arguments_) + window_size_func_other_arguments_ = convert(tf.EagerTensor, window_size_func_other_arguments_) tf.add_input(desc, input_dataset_) tf.add_input(desc, key_func_other_arguments_) tf.add_input(desc, reduce_func_other_arguments_) @@ -45867,8 +45867,8 @@ begin end function audio_summary_eager(tag_, tensor_; name=nothing, sample_rate=nothing, max_outputs=nothing) desc = tf.EagerOp("AudioSummary") - tag_ = convert(tf.TensorHandle, tag_) - tensor_ = convert(tf.TensorHandle, tensor_) + tag_ = convert(tf.EagerTensor, tag_) + tensor_ = convert(tf.EagerTensor, tensor_) tf.add_input(desc, tag_) tf.add_input(desc, tensor_) if sample_rate !== nothing @@ -45914,8 +45914,8 @@ begin end function squared_difference_eager(x_, y_; name=nothing) desc = tf.EagerOp("SquaredDifference") - x_ = convert(tf.TensorHandle, x_) - y_ = convert(tf.TensorHandle, y_) + x_ = convert(tf.EagerTensor, x_) + y_ = convert(tf.EagerTensor, y_) tf.add_input(desc, x_) tf.add_input(desc, y_) desc["T"] = tf.data_type(x_) @@ -45968,8 +45968,8 @@ begin end function experimental_take_while_dataset_eager(input_dataset_, other_arguments_; name=nothing, predicate=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("ExperimentalTakeWhileDataset") - input_dataset_ = convert(tf.TensorHandle, input_dataset_) - other_arguments_ = convert(tf.TensorHandle, other_arguments_) + input_dataset_ = convert(tf.EagerTensor, input_dataset_) + other_arguments_ = convert(tf.EagerTensor, other_arguments_) tf.add_input(desc, input_dataset_) tf.add_input(desc, other_arguments_) if predicate !== nothing @@ -46028,9 +46028,9 @@ begin end function scatter_nd_update_eager(ref_, indices_, updates_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ScatterNdUpdate") - ref_ = convert(tf.TensorHandle, ref_) - indices_ = convert(tf.TensorHandle, indices_) - updates_ = convert(tf.TensorHandle, updates_) + ref_ = convert(tf.EagerTensor, ref_) + indices_ = convert(tf.EagerTensor, indices_) + updates_ = convert(tf.EagerTensor, updates_) tf.add_input(desc, ref_) tf.add_input(desc, indices_) tf.add_input(desc, updates_) @@ -46080,8 +46080,8 @@ begin end function dynamic_stitch_eager(indices_, data_; name=nothing, N=nothing) desc = tf.EagerOp("DynamicStitch") - indices_ = convert(tf.TensorHandle, indices_) - data_ = convert(tf.TensorHandle, data_) + indices_ = convert(tf.EagerTensor, indices_) + data_ = convert(tf.EagerTensor, data_) tf.add_input(desc, indices_) tf.add_input(desc, data_) if N !== nothing @@ -46123,7 +46123,7 @@ begin end function ones_like_eager(x_; name=nothing) desc = tf.EagerOp("OnesLike") - x_ = convert(tf.TensorHandle, x_) + x_ = convert(tf.EagerTensor, x_) tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) res = tf.execute(desc) @@ -46172,11 +46172,11 @@ begin end function fractional_max_pool_grad_eager(orig_input_, orig_output_, out_backprop_, row_pooling_sequence_, col_pooling_sequence_; name=nothing, overlapping=nothing) desc = tf.EagerOp("FractionalMaxPoolGrad") - orig_input_ = convert(tf.TensorHandle, orig_input_) - orig_output_ = convert(tf.TensorHandle, orig_output_) - out_backprop_ = convert(tf.TensorHandle, out_backprop_) - row_pooling_sequence_ = convert(tf.TensorHandle, row_pooling_sequence_) - col_pooling_sequence_ = convert(tf.TensorHandle, col_pooling_sequence_) + orig_input_ = convert(tf.EagerTensor, orig_input_) + orig_output_ = convert(tf.EagerTensor, orig_output_) + out_backprop_ = convert(tf.EagerTensor, out_backprop_) + row_pooling_sequence_ = convert(tf.EagerTensor, row_pooling_sequence_) + col_pooling_sequence_ = convert(tf.EagerTensor, col_pooling_sequence_) tf.add_input(desc, orig_input_) tf.add_input(desc, orig_output_) tf.add_input(desc, out_backprop_) @@ -46233,8 +46233,8 @@ begin end function remote_call_eager(target_, args_; name=nothing, Tin=nothing, Tout=nothing, f=nothing) desc = tf.EagerOp("RemoteCall") - target_ = convert(tf.TensorHandle, target_) - args_ = convert(tf.TensorHandle, args_) + target_ = convert(tf.EagerTensor, target_) + args_ = convert(tf.EagerTensor, args_) tf.add_input(desc, target_) tf.add_input(desc, args_) if Tin !== nothing @@ -46288,8 +46288,8 @@ begin end function gather_eager(params_, indices_; name=nothing, validate_indices=nothing) desc = tf.EagerOp("Gather") - params_ = convert(tf.TensorHandle, params_) - indices_ = convert(tf.TensorHandle, indices_) + params_ = convert(tf.EagerTensor, params_) + indices_ = convert(tf.EagerTensor, indices_) tf.add_input(desc, params_) tf.add_input(desc, indices_) if validate_indices !== nothing @@ -46354,12 +46354,12 @@ begin end function quantized_mat_mul_eager(a_, b_, min_a_, max_a_, min_b_, max_b_; name=nothing, transpose_a=nothing, transpose_b=nothing) desc = tf.EagerOp("QuantizedMatMul") - a_ = convert(tf.TensorHandle, a_) - b_ = convert(tf.TensorHandle, b_) - min_a_ = convert(tf.TensorHandle, min_a_) - max_a_ = convert(tf.TensorHandle, max_a_) - min_b_ = convert(tf.TensorHandle, min_b_) - max_b_ = convert(tf.TensorHandle, max_b_) + a_ = convert(tf.EagerTensor, a_) + b_ = convert(tf.EagerTensor, b_) + min_a_ = convert(tf.EagerTensor, min_a_) + max_a_ = convert(tf.EagerTensor, max_a_) + min_b_ = convert(tf.EagerTensor, min_b_) + max_b_ = convert(tf.EagerTensor, max_b_) tf.add_input(desc, a_) tf.add_input(desc, b_) tf.add_input(desc, min_a_) @@ -46425,7 +46425,7 @@ begin end function unicode_decode_with_offsets_eager(input_; name=nothing, input_encoding=nothing, errors=nothing, replacement_char=nothing, replace_control_characters=nothing) desc = tf.EagerOp("UnicodeDecodeWithOffsets") - input_ = convert(tf.TensorHandle, input_) + input_ = convert(tf.EagerTensor, input_) tf.add_input(desc, input_) if input_encoding !== nothing desc["input_encoding"] = Base.String(input_encoding) @@ -46491,10 +46491,10 @@ begin end function enqueue_tpu_embedding_sparse_tensor_batch_eager(sample_indices_, embedding_indices_, aggregation_weights_, mode_override_; name=nothing, N=nothing, device_ordinal=nothing, combiners=nothing, table_ids=nothing) desc = tf.EagerOp("EnqueueTPUEmbeddingSparseTensorBatch") - sample_indices_ = convert(tf.TensorHandle, sample_indices_) - embedding_indices_ = convert(tf.TensorHandle, embedding_indices_) - aggregation_weights_ = convert(tf.TensorHandle, aggregation_weights_) - mode_override_ = convert(tf.TensorHandle, mode_override_) + sample_indices_ = convert(tf.EagerTensor, sample_indices_) + embedding_indices_ = convert(tf.EagerTensor, embedding_indices_) + aggregation_weights_ = convert(tf.EagerTensor, aggregation_weights_) + mode_override_ = convert(tf.EagerTensor, mode_override_) tf.add_input(desc, sample_indices_) tf.add_input(desc, embedding_indices_) tf.add_input(desc, aggregation_weights_) @@ -46553,9 +46553,9 @@ begin end function accumulator_apply_gradient_eager(handle_, local_step_, gradient_; name=nothing, dtype=nothing) desc = tf.EagerOp("AccumulatorApplyGradient") - handle_ = convert(tf.TensorHandle, handle_) - local_step_ = convert(tf.TensorHandle, local_step_) - gradient_ = convert(tf.TensorHandle, gradient_) + handle_ = convert(tf.EagerTensor, handle_) + local_step_ = convert(tf.EagerTensor, local_step_) + gradient_ = convert(tf.EagerTensor, gradient_) tf.add_input(desc, handle_) tf.add_input(desc, local_step_) tf.add_input(desc, gradient_) @@ -46606,11 +46606,11 @@ begin end function write_summary_eager(writer_, step_, tensor_, tag_, summary_metadata_; name=nothing) desc = tf.EagerOp("WriteSummary") - writer_ = convert(tf.TensorHandle, writer_) - step_ = convert(tf.TensorHandle, step_) - tensor_ = convert(tf.TensorHandle, tensor_) - tag_ = convert(tf.TensorHandle, tag_) - summary_metadata_ = convert(tf.TensorHandle, summary_metadata_) + writer_ = convert(tf.EagerTensor, writer_) + step_ = convert(tf.EagerTensor, step_) + tensor_ = convert(tf.EagerTensor, tensor_) + tag_ = convert(tf.EagerTensor, tag_) + summary_metadata_ = convert(tf.EagerTensor, summary_metadata_) tf.add_input(desc, writer_) tf.add_input(desc, step_) tf.add_input(desc, tensor_) @@ -46680,12 +46680,12 @@ begin end function quantized_conv2d_eager(input_, filter_, min_input_, max_input_, min_filter_, max_filter_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) desc = tf.EagerOp("QuantizedConv2D") - input_ = convert(tf.TensorHandle, input_) - filter_ = convert(tf.TensorHandle, filter_) - min_input_ = convert(tf.TensorHandle, min_input_) - max_input_ = convert(tf.TensorHandle, max_input_) - min_filter_ = convert(tf.TensorHandle, min_filter_) - max_filter_ = convert(tf.TensorHandle, max_filter_) + input_ = convert(tf.EagerTensor, input_) + filter_ = convert(tf.EagerTensor, filter_) + min_input_ = convert(tf.EagerTensor, min_input_) + max_input_ = convert(tf.EagerTensor, max_input_) + min_filter_ = convert(tf.EagerTensor, min_filter_) + max_filter_ = convert(tf.EagerTensor, max_filter_) tf.add_input(desc, input_) tf.add_input(desc, filter_) tf.add_input(desc, min_input_) @@ -46755,11 +46755,11 @@ begin end function resource_apply_momentum_eager(var_, accum_, lr_, grad_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) desc = tf.EagerOp("ResourceApplyMomentum") - var_ = convert(tf.TensorHandle, var_) - accum_ = convert(tf.TensorHandle, accum_) - lr_ = convert(tf.TensorHandle, lr_) - grad_ = convert(tf.TensorHandle, grad_) - momentum_ = convert(tf.TensorHandle, momentum_) + var_ = convert(tf.EagerTensor, var_) + accum_ = convert(tf.EagerTensor, accum_) + lr_ = convert(tf.EagerTensor, lr_) + grad_ = convert(tf.EagerTensor, grad_) + momentum_ = convert(tf.EagerTensor, momentum_) tf.add_input(desc, var_) tf.add_input(desc, accum_) tf.add_input(desc, lr_) @@ -46809,7 +46809,7 @@ begin end function log1p_eager(x_; name=nothing) desc = tf.EagerOp("Log1p") - x_ = convert(tf.TensorHandle, x_) + x_ = convert(tf.EagerTensor, x_) tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) res = tf.execute(desc) @@ -46918,9 +46918,9 @@ begin end function resource_scatter_update_eager(resource_, indices_, updates_; name=nothing, dtype=nothing) desc = tf.EagerOp("ResourceScatterUpdate") - resource_ = convert(tf.TensorHandle, resource_) - indices_ = convert(tf.TensorHandle, indices_) - updates_ = convert(tf.TensorHandle, updates_) + resource_ = convert(tf.EagerTensor, resource_) + indices_ = convert(tf.EagerTensor, indices_) + updates_ = convert(tf.EagerTensor, updates_) tf.add_input(desc, resource_) tf.add_input(desc, indices_) tf.add_input(desc, updates_) @@ -46982,8 +46982,8 @@ begin end function barrier_take_many_eager(handle_, num_elements_; name=nothing, component_types=nothing, allow_small_batch=nothing, wait_for_incomplete=nothing, timeout_ms=nothing) desc = tf.EagerOp("BarrierTakeMany") - handle_ = convert(tf.TensorHandle, handle_) - num_elements_ = convert(tf.TensorHandle, num_elements_) + handle_ = convert(tf.EagerTensor, handle_) + num_elements_ = convert(tf.EagerTensor, num_elements_) tf.add_input(desc, handle_) tf.add_input(desc, num_elements_) if component_types !== nothing @@ -47047,11 +47047,11 @@ begin end function resource_apply_keras_momentum_eager(var_, accum_, lr_, grad_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) desc = tf.EagerOp("ResourceApplyKerasMomentum") - var_ = convert(tf.TensorHandle, var_) - accum_ = convert(tf.TensorHandle, accum_) - lr_ = convert(tf.TensorHandle, lr_) - grad_ = convert(tf.TensorHandle, grad_) - momentum_ = convert(tf.TensorHandle, momentum_) + var_ = convert(tf.EagerTensor, var_) + accum_ = convert(tf.EagerTensor, accum_) + lr_ = convert(tf.EagerTensor, lr_) + grad_ = convert(tf.EagerTensor, grad_) + momentum_ = convert(tf.EagerTensor, momentum_) tf.add_input(desc, var_) tf.add_input(desc, accum_) tf.add_input(desc, lr_) @@ -47183,7 +47183,7 @@ begin end function _xla_recv_at_host_eager(dynamic_key_; name=nothing, Toutputs=nothing, key=nothing, device_ordinal=nothing) desc = tf.EagerOp("_XlaRecvAtHost") - dynamic_key_ = convert(tf.TensorHandle, dynamic_key_) + dynamic_key_ = convert(tf.EagerTensor, dynamic_key_) tf.add_input(desc, dynamic_key_) if Toutputs !== nothing desc["Toutputs"] = map(Base.identity, Toutputs) @@ -47247,9 +47247,9 @@ begin end function quantized_avg_pool_eager(input_, min_input_, max_input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing) desc = tf.EagerOp("QuantizedAvgPool") - input_ = convert(tf.TensorHandle, input_) - min_input_ = convert(tf.TensorHandle, min_input_) - max_input_ = convert(tf.TensorHandle, max_input_) + input_ = convert(tf.EagerTensor, input_) + min_input_ = convert(tf.EagerTensor, min_input_) + max_input_ = convert(tf.EagerTensor, max_input_) tf.add_input(desc, input_) tf.add_input(desc, min_input_) tf.add_input(desc, max_input_) @@ -47321,17 +47321,17 @@ begin end function resource_apply_adam_with_amsgrad_eager(var_, m_, v_, vhat_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ResourceApplyAdamWithAmsgrad") - var_ = convert(tf.TensorHandle, var_) - m_ = convert(tf.TensorHandle, m_) - v_ = convert(tf.TensorHandle, v_) - vhat_ = convert(tf.TensorHandle, vhat_) - beta1_power_ = convert(tf.TensorHandle, beta1_power_) - beta2_power_ = convert(tf.TensorHandle, beta2_power_) - lr_ = convert(tf.TensorHandle, lr_) - beta1_ = convert(tf.TensorHandle, beta1_) - beta2_ = convert(tf.TensorHandle, beta2_) - epsilon_ = convert(tf.TensorHandle, epsilon_) - grad_ = convert(tf.TensorHandle, grad_) + var_ = convert(tf.EagerTensor, var_) + m_ = convert(tf.EagerTensor, m_) + v_ = convert(tf.EagerTensor, v_) + vhat_ = convert(tf.EagerTensor, vhat_) + beta1_power_ = convert(tf.EagerTensor, beta1_power_) + beta2_power_ = convert(tf.EagerTensor, beta2_power_) + lr_ = convert(tf.EagerTensor, lr_) + beta1_ = convert(tf.EagerTensor, beta1_) + beta2_ = convert(tf.EagerTensor, beta2_) + epsilon_ = convert(tf.EagerTensor, epsilon_) + grad_ = convert(tf.EagerTensor, grad_) tf.add_input(desc, var_) tf.add_input(desc, m_) tf.add_input(desc, v_) @@ -47389,8 +47389,8 @@ begin end function tensor_list_resize_eager(input_handle_, size_; name=nothing) desc = tf.EagerOp("TensorListResize") - input_handle_ = convert(tf.TensorHandle, input_handle_) - size_ = convert(tf.TensorHandle, size_) + input_handle_ = convert(tf.EagerTensor, input_handle_) + size_ = convert(tf.EagerTensor, size_) tf.add_input(desc, input_handle_) tf.add_input(desc, size_) res = tf.execute(desc) @@ -47503,11 +47503,11 @@ begin end function boosted_trees_center_bias_eager(tree_ensemble_handle_, mean_gradients_, mean_hessians_, l1_, l2_; name=nothing) desc = tf.EagerOp("BoostedTreesCenterBias") - tree_ensemble_handle_ = convert(tf.TensorHandle, tree_ensemble_handle_) - mean_gradients_ = convert(tf.TensorHandle, mean_gradients_) - mean_hessians_ = convert(tf.TensorHandle, mean_hessians_) - l1_ = convert(tf.TensorHandle, l1_) - l2_ = convert(tf.TensorHandle, l2_) + tree_ensemble_handle_ = convert(tf.EagerTensor, tree_ensemble_handle_) + mean_gradients_ = convert(tf.EagerTensor, mean_gradients_) + mean_hessians_ = convert(tf.EagerTensor, mean_hessians_) + l1_ = convert(tf.EagerTensor, l1_) + l2_ = convert(tf.EagerTensor, l2_) tf.add_input(desc, tree_ensemble_handle_) tf.add_input(desc, mean_gradients_) tf.add_input(desc, mean_hessians_) @@ -47547,7 +47547,7 @@ begin end function lookup_table_size_v2_eager(table_handle_; name=nothing) desc = tf.EagerOp("LookupTableSizeV2") - table_handle_ = convert(tf.TensorHandle, table_handle_) + table_handle_ = convert(tf.EagerTensor, table_handle_) tf.add_input(desc, table_handle_) res = tf.execute(desc) node = tf.TapeNode(lookup_table_size_v2, [table_handle_], name=nothing, res) @@ -47585,8 +47585,8 @@ begin end function irfft_eager(input_, fft_length_; name=nothing) desc = tf.EagerOp("IRFFT") - input_ = convert(tf.TensorHandle, input_) - fft_length_ = convert(tf.TensorHandle, fft_length_) + input_ = convert(tf.EagerTensor, input_) + fft_length_ = convert(tf.EagerTensor, fft_length_) tf.add_input(desc, input_) tf.add_input(desc, fft_length_) res = tf.execute(desc) @@ -47628,9 +47628,9 @@ begin end function inplace_add_eager(x_, i_, v_; name=nothing) desc = tf.EagerOp("InplaceAdd") - x_ = convert(tf.TensorHandle, x_) - i_ = convert(tf.TensorHandle, i_) - v_ = convert(tf.TensorHandle, v_) + x_ = convert(tf.EagerTensor, x_) + i_ = convert(tf.EagerTensor, i_) + v_ = convert(tf.EagerTensor, v_) tf.add_input(desc, x_) tf.add_input(desc, i_) tf.add_input(desc, v_) @@ -47676,8 +47676,8 @@ begin end function bias_add_eager(value_, bias_; name=nothing, data_format=nothing) desc = tf.EagerOp("BiasAdd") - value_ = convert(tf.TensorHandle, value_) - bias_ = convert(tf.TensorHandle, bias_) + value_ = convert(tf.EagerTensor, value_) + bias_ = convert(tf.EagerTensor, bias_) tf.add_input(desc, value_) tf.add_input(desc, bias_) if data_format !== nothing @@ -47737,10 +47737,10 @@ begin end function load_tpu_embedding_adam_parameters_grad_accum_debug_eager(parameters_, momenta_, velocities_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) desc = tf.EagerOp("LoadTPUEmbeddingADAMParametersGradAccumDebug") - parameters_ = convert(tf.TensorHandle, parameters_) - momenta_ = convert(tf.TensorHandle, momenta_) - velocities_ = convert(tf.TensorHandle, velocities_) - gradient_accumulators_ = convert(tf.TensorHandle, gradient_accumulators_) + parameters_ = convert(tf.EagerTensor, parameters_) + momenta_ = convert(tf.EagerTensor, momenta_) + velocities_ = convert(tf.EagerTensor, velocities_) + gradient_accumulators_ = convert(tf.EagerTensor, gradient_accumulators_) tf.add_input(desc, parameters_) tf.add_input(desc, momenta_) tf.add_input(desc, velocities_) @@ -47834,9 +47834,9 @@ begin end function ragged_range_eager(starts_, limits_, deltas_; name=nothing) desc = tf.EagerOp("RaggedRange") - starts_ = convert(tf.TensorHandle, starts_) - limits_ = convert(tf.TensorHandle, limits_) - deltas_ = convert(tf.TensorHandle, deltas_) + starts_ = convert(tf.EagerTensor, starts_) + limits_ = convert(tf.EagerTensor, limits_) + deltas_ = convert(tf.EagerTensor, deltas_) tf.add_input(desc, starts_) tf.add_input(desc, limits_) tf.add_input(desc, deltas_) @@ -47891,11 +47891,11 @@ begin end function window_dataset_eager(input_dataset_, size_, shift_, stride_, drop_remainder_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("WindowDataset") - input_dataset_ = convert(tf.TensorHandle, input_dataset_) - size_ = convert(tf.TensorHandle, size_) - shift_ = convert(tf.TensorHandle, shift_) - stride_ = convert(tf.TensorHandle, stride_) - drop_remainder_ = convert(tf.TensorHandle, drop_remainder_) + input_dataset_ = convert(tf.EagerTensor, input_dataset_) + size_ = convert(tf.EagerTensor, size_) + shift_ = convert(tf.EagerTensor, shift_) + stride_ = convert(tf.EagerTensor, stride_) + drop_remainder_ = convert(tf.EagerTensor, drop_remainder_) tf.add_input(desc, input_dataset_) tf.add_input(desc, size_) tf.add_input(desc, shift_) @@ -47942,7 +47942,7 @@ begin end function diag_eager(diagonal_; name=nothing) desc = tf.EagerOp("Diag") - diagonal_ = convert(tf.TensorHandle, diagonal_) + diagonal_ = convert(tf.EagerTensor, diagonal_) tf.add_input(desc, diagonal_) desc["T"] = tf.data_type(diagonal_) res = tf.execute(desc) @@ -48031,8 +48031,8 @@ begin end function experimental_latency_stats_dataset_eager(input_dataset_, tag_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("ExperimentalLatencyStatsDataset") - input_dataset_ = convert(tf.TensorHandle, input_dataset_) - tag_ = convert(tf.TensorHandle, tag_) + input_dataset_ = convert(tf.EagerTensor, input_dataset_) + tag_ = convert(tf.EagerTensor, tag_) tf.add_input(desc, input_dataset_) tf.add_input(desc, tag_) if output_types !== nothing @@ -48086,9 +48086,9 @@ begin end function add_sparse_to_tensors_map_eager(sparse_indices_, sparse_values_, sparse_shape_; name=nothing, container=nothing, shared_name=nothing) desc = tf.EagerOp("AddSparseToTensorsMap") - sparse_indices_ = convert(tf.TensorHandle, sparse_indices_) - sparse_values_ = convert(tf.TensorHandle, sparse_values_) - sparse_shape_ = convert(tf.TensorHandle, sparse_shape_) + sparse_indices_ = convert(tf.EagerTensor, sparse_indices_) + sparse_values_ = convert(tf.EagerTensor, sparse_values_) + sparse_shape_ = convert(tf.EagerTensor, sparse_shape_) tf.add_input(desc, sparse_indices_) tf.add_input(desc, sparse_values_) tf.add_input(desc, sparse_shape_) @@ -48151,9 +48151,9 @@ begin end function ragged_gather_eager(params_nested_splits_, params_dense_values_, indices_; name=nothing, PARAMS_RAGGED_RANK=nothing, OUTPUT_RAGGED_RANK=nothing) desc = tf.EagerOp("RaggedGather") - params_nested_splits_ = convert(tf.TensorHandle, params_nested_splits_) - params_dense_values_ = convert(tf.TensorHandle, params_dense_values_) - indices_ = convert(tf.TensorHandle, indices_) + params_nested_splits_ = convert(tf.EagerTensor, params_nested_splits_) + params_dense_values_ = convert(tf.EagerTensor, params_dense_values_) + indices_ = convert(tf.EagerTensor, indices_) tf.add_input(desc, params_nested_splits_) tf.add_input(desc, params_dense_values_) tf.add_input(desc, indices_) @@ -48200,7 +48200,7 @@ begin end function rgb_to_hsv_eager(images_; name=nothing) desc = tf.EagerOp("RGBToHSV") - images_ = convert(tf.TensorHandle, images_) + images_ = convert(tf.EagerTensor, images_) tf.add_input(desc, images_) desc["T"] = tf.data_type(images_) res = tf.execute(desc) @@ -48237,7 +48237,7 @@ begin end function multi_device_iterator_to_string_handle_eager(multi_device_iterator_; name=nothing) desc = tf.EagerOp("MultiDeviceIteratorToStringHandle") - multi_device_iterator_ = convert(tf.TensorHandle, multi_device_iterator_) + multi_device_iterator_ = convert(tf.EagerTensor, multi_device_iterator_) tf.add_input(desc, multi_device_iterator_) res = tf.execute(desc) node = tf.TapeNode(multi_device_iterator_to_string_handle, [multi_device_iterator_], name=nothing, res) @@ -48285,10 +48285,10 @@ begin end function for__eager(start_, limit_, delta_, input_; name=nothing, T=nothing, body=nothing) desc = tf.EagerOp("For") - start_ = convert(tf.TensorHandle, start_) - limit_ = convert(tf.TensorHandle, limit_) - delta_ = convert(tf.TensorHandle, delta_) - input_ = convert(tf.TensorHandle, input_) + start_ = convert(tf.EagerTensor, start_) + limit_ = convert(tf.EagerTensor, limit_) + delta_ = convert(tf.EagerTensor, delta_) + input_ = convert(tf.EagerTensor, input_) tf.add_input(desc, start_) tf.add_input(desc, limit_) tf.add_input(desc, delta_) @@ -48348,10 +48348,10 @@ begin end function sparse_reduce_max_sparse_eager(input_indices_, input_values_, input_shape_, reduction_axes_; name=nothing, keep_dims=nothing) desc = tf.EagerOp("SparseReduceMaxSparse") - input_indices_ = convert(tf.TensorHandle, input_indices_) - input_values_ = convert(tf.TensorHandle, input_values_) - input_shape_ = convert(tf.TensorHandle, input_shape_) - reduction_axes_ = convert(tf.TensorHandle, reduction_axes_) + input_indices_ = convert(tf.EagerTensor, input_indices_) + input_values_ = convert(tf.EagerTensor, input_values_) + input_shape_ = convert(tf.EagerTensor, input_shape_) + reduction_axes_ = convert(tf.EagerTensor, reduction_axes_) tf.add_input(desc, input_indices_) tf.add_input(desc, input_values_) tf.add_input(desc, input_shape_) @@ -48404,8 +48404,8 @@ begin end function concat_offset_eager(concat_dim_, shape_; name=nothing, N=nothing) desc = tf.EagerOp("ConcatOffset") - concat_dim_ = convert(tf.TensorHandle, concat_dim_) - shape_ = convert(tf.TensorHandle, shape_) + concat_dim_ = convert(tf.EagerTensor, concat_dim_) + shape_ = convert(tf.EagerTensor, shape_) tf.add_input(desc, concat_dim_) tf.add_input(desc, shape_) if N !== nothing @@ -48460,7 +48460,7 @@ begin end function stage_eager(values_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) desc = tf.EagerOp("Stage") - values_ = convert(tf.TensorHandle, values_) + values_ = convert(tf.EagerTensor, values_) tf.add_input(desc, values_) if capacity !== nothing desc["capacity"] = Base.Int(capacity) @@ -48519,8 +48519,8 @@ begin end function switch_eager(data_, pred_; name=nothing) desc = tf.EagerOp("Switch") - data_ = convert(tf.TensorHandle, data_) - pred_ = convert(tf.TensorHandle, pred_) + data_ = convert(tf.EagerTensor, data_) + pred_ = convert(tf.EagerTensor, pred_) tf.add_input(desc, data_) tf.add_input(desc, pred_) desc["T"] = tf.data_type(data_) @@ -48566,8 +48566,8 @@ begin end function queue_dequeue_many_v2_eager(handle_, n_; name=nothing, component_types=nothing, timeout_ms=nothing) desc = tf.EagerOp("QueueDequeueManyV2") - handle_ = convert(tf.TensorHandle, handle_) - n_ = convert(tf.TensorHandle, n_) + handle_ = convert(tf.EagerTensor, handle_) + n_ = convert(tf.EagerTensor, n_) tf.add_input(desc, handle_) tf.add_input(desc, n_) if component_types !== nothing @@ -48615,8 +48615,8 @@ begin end function segment_prod_eager(data_, segment_ids_; name=nothing) desc = tf.EagerOp("SegmentProd") - data_ = convert(tf.TensorHandle, data_) - segment_ids_ = convert(tf.TensorHandle, segment_ids_) + data_ = convert(tf.EagerTensor, data_) + segment_ids_ = convert(tf.EagerTensor, segment_ids_) tf.add_input(desc, data_) tf.add_input(desc, segment_ids_) desc["T"] = tf.data_type(data_) @@ -48661,8 +48661,8 @@ begin end function approximate_equal_eager(x_, y_; name=nothing, tolerance=nothing) desc = tf.EagerOp("ApproximateEqual") - x_ = convert(tf.TensorHandle, x_) - y_ = convert(tf.TensorHandle, y_) + x_ = convert(tf.EagerTensor, x_) + y_ = convert(tf.EagerTensor, y_) tf.add_input(desc, x_) tf.add_input(desc, y_) if tolerance !== nothing @@ -48725,8 +48725,8 @@ begin end function conv2d_eager(input_, filter_; name=nothing, strides=nothing, use_cudnn_on_gpu=nothing, padding=nothing, explicit_paddings=nothing, data_format=nothing, dilations=nothing) desc = tf.EagerOp("Conv2D") - input_ = convert(tf.TensorHandle, input_) - filter_ = convert(tf.TensorHandle, filter_) + input_ = convert(tf.EagerTensor, input_) + filter_ = convert(tf.EagerTensor, filter_) tf.add_input(desc, input_) tf.add_input(desc, filter_) if strides !== nothing @@ -48786,8 +48786,8 @@ begin end function cross_replica_sum_eager(input_, group_assignment_; name=nothing) desc = tf.EagerOp("CrossReplicaSum") - input_ = convert(tf.TensorHandle, input_) - group_assignment_ = convert(tf.TensorHandle, group_assignment_) + input_ = convert(tf.EagerTensor, input_) + group_assignment_ = convert(tf.EagerTensor, group_assignment_) tf.add_input(desc, input_) tf.add_input(desc, group_assignment_) desc["T"] = tf.data_type(input_) @@ -48841,8 +48841,8 @@ begin end function sparse_mat_mul_eager(a_, b_; name=nothing, transpose_a=nothing, transpose_b=nothing, a_is_sparse=nothing, b_is_sparse=nothing) desc = tf.EagerOp("SparseMatMul") - a_ = convert(tf.TensorHandle, a_) - b_ = convert(tf.TensorHandle, b_) + a_ = convert(tf.EagerTensor, a_) + b_ = convert(tf.EagerTensor, b_) tf.add_input(desc, a_) tf.add_input(desc, b_) if transpose_a !== nothing @@ -48913,8 +48913,8 @@ begin end function _scoped_allocator_split_eager(concat_, split_; name=nothing, sa_name=nothing, id=nothing, N=nothing, shapes=nothing) desc = tf.EagerOp("_ScopedAllocatorSplit") - concat_ = convert(tf.TensorHandle, concat_) - split_ = convert(tf.TensorHandle, split_) + concat_ = convert(tf.EagerTensor, concat_) + split_ = convert(tf.EagerTensor, split_) tf.add_input(desc, concat_) tf.add_input(desc, split_) if sa_name !== nothing @@ -48968,8 +48968,8 @@ begin end function igammac_eager(a_, x_; name=nothing) desc = tf.EagerOp("Igammac") - a_ = convert(tf.TensorHandle, a_) - x_ = convert(tf.TensorHandle, x_) + a_ = convert(tf.EagerTensor, a_) + x_ = convert(tf.EagerTensor, x_) tf.add_input(desc, a_) tf.add_input(desc, x_) desc["T"] = tf.data_type(a_) @@ -49017,8 +49017,8 @@ begin end function batch_mat_mul_eager(x_, y_; name=nothing, adj_x=nothing, adj_y=nothing) desc = tf.EagerOp("BatchMatMul") - x_ = convert(tf.TensorHandle, x_) - y_ = convert(tf.TensorHandle, y_) + x_ = convert(tf.EagerTensor, x_) + y_ = convert(tf.EagerTensor, y_) tf.add_input(desc, x_) tf.add_input(desc, y_) if adj_x !== nothing @@ -49078,10 +49078,10 @@ begin end function enqueue_tpu_embedding_sparse_batch_eager(sample_indices_, embedding_indices_, aggregation_weights_, mode_override_; name=nothing, N=nothing, device_ordinal=nothing, combiners=nothing) desc = tf.EagerOp("EnqueueTPUEmbeddingSparseBatch") - sample_indices_ = convert(tf.TensorHandle, sample_indices_) - embedding_indices_ = convert(tf.TensorHandle, embedding_indices_) - aggregation_weights_ = convert(tf.TensorHandle, aggregation_weights_) - mode_override_ = convert(tf.TensorHandle, mode_override_) + sample_indices_ = convert(tf.EagerTensor, sample_indices_) + embedding_indices_ = convert(tf.EagerTensor, embedding_indices_) + aggregation_weights_ = convert(tf.EagerTensor, aggregation_weights_) + mode_override_ = convert(tf.EagerTensor, mode_override_) tf.add_input(desc, sample_indices_) tf.add_input(desc, embedding_indices_) tf.add_input(desc, aggregation_weights_) @@ -49132,7 +49132,7 @@ begin end function queue_close_v2_eager(handle_; name=nothing, cancel_pending_enqueues=nothing) desc = tf.EagerOp("QueueCloseV2") - handle_ = convert(tf.TensorHandle, handle_) + handle_ = convert(tf.EagerTensor, handle_) tf.add_input(desc, handle_) if cancel_pending_enqueues !== nothing desc["cancel_pending_enqueues"] = Base.Bool(cancel_pending_enqueues) @@ -49179,8 +49179,8 @@ begin end function tensor_array_pack_eager(handle_, flow_in_; name=nothing, dtype=nothing, element_shape=nothing) desc = tf.EagerOp("TensorArrayPack") - handle_ = convert(tf.TensorHandle, handle_) - flow_in_ = convert(tf.TensorHandle, flow_in_) + handle_ = convert(tf.EagerTensor, handle_) + flow_in_ = convert(tf.EagerTensor, flow_in_) tf.add_input(desc, handle_) tf.add_input(desc, flow_in_) if dtype !== nothing @@ -49225,8 +49225,8 @@ begin end function reader_restore_state_eager(reader_handle_, state_; name=nothing) desc = tf.EagerOp("ReaderRestoreState") - reader_handle_ = convert(tf.TensorHandle, reader_handle_) - state_ = convert(tf.TensorHandle, state_) + reader_handle_ = convert(tf.EagerTensor, reader_handle_) + state_ = convert(tf.EagerTensor, state_) tf.add_input(desc, reader_handle_) tf.add_input(desc, state_) res = tf.execute(desc) @@ -49292,9 +49292,9 @@ begin end function _fused_conv2d_eager(input_, filter_, args_; name=nothing, num_args=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing, use_cudnn_on_gpu=nothing, fused_ops=nothing, epsilon=nothing) desc = tf.EagerOp("_FusedConv2D") - input_ = convert(tf.TensorHandle, input_) - filter_ = convert(tf.TensorHandle, filter_) - args_ = convert(tf.TensorHandle, args_) + input_ = convert(tf.EagerTensor, input_) + filter_ = convert(tf.EagerTensor, filter_) + args_ = convert(tf.EagerTensor, args_) tf.add_input(desc, input_) tf.add_input(desc, filter_) tf.add_input(desc, args_) @@ -49365,7 +49365,7 @@ begin end function _read_variables_op_eager(resources_; name=nothing, N=nothing, dtypes=nothing) desc = tf.EagerOp("_ReadVariablesOp") - resources_ = convert(tf.TensorHandle, resources_) + resources_ = convert(tf.EagerTensor, resources_) tf.add_input(desc, resources_) if N !== nothing desc["N"] = Base.Int(N) @@ -49475,7 +49475,7 @@ begin end function read_file_eager(filename_; name=nothing) desc = tf.EagerOp("ReadFile") - filename_ = convert(tf.TensorHandle, filename_) + filename_ = convert(tf.EagerTensor, filename_) tf.add_input(desc, filename_) res = tf.execute(desc) node = tf.TapeNode(read_file, [filename_], name=nothing, res) @@ -49529,10 +49529,10 @@ begin end function load_tpu_embedding_mdl_adagrad_light_parameters_eager(parameters_, accumulators_, weights_, benefits_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) desc = tf.EagerOp("LoadTPUEmbeddingMDLAdagradLightParameters") - parameters_ = convert(tf.TensorHandle, parameters_) - accumulators_ = convert(tf.TensorHandle, accumulators_) - weights_ = convert(tf.TensorHandle, weights_) - benefits_ = convert(tf.TensorHandle, benefits_) + parameters_ = convert(tf.EagerTensor, parameters_) + accumulators_ = convert(tf.EagerTensor, accumulators_) + weights_ = convert(tf.EagerTensor, weights_) + benefits_ = convert(tf.EagerTensor, benefits_) tf.add_input(desc, parameters_) tf.add_input(desc, accumulators_) tf.add_input(desc, weights_) @@ -49593,10 +49593,10 @@ begin end function fractional_avg_pool_grad_eager(orig_input_tensor_shape_, out_backprop_, row_pooling_sequence_, col_pooling_sequence_; name=nothing, overlapping=nothing) desc = tf.EagerOp("FractionalAvgPoolGrad") - orig_input_tensor_shape_ = convert(tf.TensorHandle, orig_input_tensor_shape_) - out_backprop_ = convert(tf.TensorHandle, out_backprop_) - row_pooling_sequence_ = convert(tf.TensorHandle, row_pooling_sequence_) - col_pooling_sequence_ = convert(tf.TensorHandle, col_pooling_sequence_) + orig_input_tensor_shape_ = convert(tf.EagerTensor, orig_input_tensor_shape_) + out_backprop_ = convert(tf.EagerTensor, out_backprop_) + row_pooling_sequence_ = convert(tf.EagerTensor, row_pooling_sequence_) + col_pooling_sequence_ = convert(tf.EagerTensor, col_pooling_sequence_) tf.add_input(desc, orig_input_tensor_shape_) tf.add_input(desc, out_backprop_) tf.add_input(desc, row_pooling_sequence_) @@ -49655,9 +49655,9 @@ begin end function load_tpu_embedding_adagrad_parameters_grad_accum_debug_eager(parameters_, accumulators_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) desc = tf.EagerOp("LoadTPUEmbeddingAdagradParametersGradAccumDebug") - parameters_ = convert(tf.TensorHandle, parameters_) - accumulators_ = convert(tf.TensorHandle, accumulators_) - gradient_accumulators_ = convert(tf.TensorHandle, gradient_accumulators_) + parameters_ = convert(tf.EagerTensor, parameters_) + accumulators_ = convert(tf.EagerTensor, accumulators_) + gradient_accumulators_ = convert(tf.EagerTensor, gradient_accumulators_) tf.add_input(desc, parameters_) tf.add_input(desc, accumulators_) tf.add_input(desc, gradient_accumulators_) @@ -49718,9 +49718,9 @@ begin end function stateful_standard_normal_v2_eager(resource_, algorithm_, shape_; name=nothing, dtype=nothing, shape_dtype=nothing) desc = tf.EagerOp("StatefulStandardNormalV2") - resource_ = convert(tf.TensorHandle, resource_) - algorithm_ = convert(tf.TensorHandle, algorithm_) - shape_ = convert(tf.TensorHandle, shape_) + resource_ = convert(tf.EagerTensor, resource_) + algorithm_ = convert(tf.EagerTensor, algorithm_) + shape_ = convert(tf.EagerTensor, shape_) tf.add_input(desc, resource_) tf.add_input(desc, algorithm_) tf.add_input(desc, shape_) @@ -49770,9 +49770,9 @@ begin end function bincount_eager(arr_, size_, weights_; name=nothing) desc = tf.EagerOp("Bincount") - arr_ = convert(tf.TensorHandle, arr_) - size_ = convert(tf.TensorHandle, size_) - weights_ = convert(tf.TensorHandle, weights_) + arr_ = convert(tf.EagerTensor, arr_) + size_ = convert(tf.EagerTensor, size_) + weights_ = convert(tf.EagerTensor, weights_) tf.add_input(desc, arr_) tf.add_input(desc, size_) tf.add_input(desc, weights_) @@ -49812,7 +49812,7 @@ begin end function inv_eager(x_; name=nothing) desc = tf.EagerOp("Inv") - x_ = convert(tf.TensorHandle, x_) + x_ = convert(tf.EagerTensor, x_) tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) res = tf.execute(desc) @@ -49863,12 +49863,12 @@ begin end function apply_proximal_adagrad_eager(var_, accum_, lr_, l1_, l2_, grad_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ApplyProximalAdagrad") - var_ = convert(tf.TensorHandle, var_) - accum_ = convert(tf.TensorHandle, accum_) - lr_ = convert(tf.TensorHandle, lr_) - l1_ = convert(tf.TensorHandle, l1_) - l2_ = convert(tf.TensorHandle, l2_) - grad_ = convert(tf.TensorHandle, grad_) + var_ = convert(tf.EagerTensor, var_) + accum_ = convert(tf.EagerTensor, accum_) + lr_ = convert(tf.EagerTensor, lr_) + l1_ = convert(tf.EagerTensor, l1_) + l2_ = convert(tf.EagerTensor, l2_) + grad_ = convert(tf.EagerTensor, grad_) tf.add_input(desc, var_) tf.add_input(desc, accum_) tf.add_input(desc, lr_) @@ -49926,9 +49926,9 @@ begin end function gather_v2_eager(params_, indices_, axis_; name=nothing) desc = tf.EagerOp("GatherV2") - params_ = convert(tf.TensorHandle, params_) - indices_ = convert(tf.TensorHandle, indices_) - axis_ = convert(tf.TensorHandle, axis_) + params_ = convert(tf.EagerTensor, params_) + indices_ = convert(tf.EagerTensor, indices_) + axis_ = convert(tf.EagerTensor, axis_) tf.add_input(desc, params_) tf.add_input(desc, indices_) tf.add_input(desc, axis_) @@ -49971,8 +49971,8 @@ begin end function write_file_eager(filename_, contents_; name=nothing) desc = tf.EagerOp("WriteFile") - filename_ = convert(tf.TensorHandle, filename_) - contents_ = convert(tf.TensorHandle, contents_) + filename_ = convert(tf.EagerTensor, filename_) + contents_ = convert(tf.EagerTensor, contents_) tf.add_input(desc, filename_) tf.add_input(desc, contents_) res = tf.execute(desc) @@ -50014,7 +50014,7 @@ begin end function boosted_trees_get_ensemble_states_eager(tree_ensemble_handle_; name=nothing) desc = tf.EagerOp("BoostedTreesGetEnsembleStates") - tree_ensemble_handle_ = convert(tf.TensorHandle, tree_ensemble_handle_) + tree_ensemble_handle_ = convert(tf.EagerTensor, tree_ensemble_handle_) tf.add_input(desc, tree_ensemble_handle_) res = tf.execute(desc) node = tf.TapeNode(boosted_trees_get_ensemble_states, [tree_ensemble_handle_], name=nothing, res) @@ -50060,8 +50060,8 @@ begin end function resource_gather_eager(resource_, indices_; name=nothing, validate_indices=nothing, dtype=nothing) desc = tf.EagerOp("ResourceGather") - resource_ = convert(tf.TensorHandle, resource_) - indices_ = convert(tf.TensorHandle, indices_) + resource_ = convert(tf.EagerTensor, resource_) + indices_ = convert(tf.EagerTensor, indices_) tf.add_input(desc, resource_) tf.add_input(desc, indices_) if validate_indices !== nothing @@ -50117,11 +50117,11 @@ begin end function resource_apply_proximal_gradient_descent_eager(var_, alpha_, l1_, l2_, delta_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ResourceApplyProximalGradientDescent") - var_ = convert(tf.TensorHandle, var_) - alpha_ = convert(tf.TensorHandle, alpha_) - l1_ = convert(tf.TensorHandle, l1_) - l2_ = convert(tf.TensorHandle, l2_) - delta_ = convert(tf.TensorHandle, delta_) + var_ = convert(tf.EagerTensor, var_) + alpha_ = convert(tf.EagerTensor, alpha_) + l1_ = convert(tf.EagerTensor, l1_) + l2_ = convert(tf.EagerTensor, l2_) + delta_ = convert(tf.EagerTensor, delta_) tf.add_input(desc, var_) tf.add_input(desc, alpha_) tf.add_input(desc, l1_) @@ -50171,8 +50171,8 @@ begin end function truncate_mod_eager(x_, y_; name=nothing) desc = tf.EagerOp("TruncateMod") - x_ = convert(tf.TensorHandle, x_) - y_ = convert(tf.TensorHandle, y_) + x_ = convert(tf.EagerTensor, x_) + y_ = convert(tf.EagerTensor, y_) tf.add_input(desc, x_) tf.add_input(desc, y_) desc["T"] = tf.data_type(x_) @@ -50217,7 +50217,7 @@ begin end function log_matrix_determinant_eager(input_; name=nothing) desc = tf.EagerOp("LogMatrixDeterminant") - input_ = convert(tf.TensorHandle, input_) + input_ = convert(tf.EagerTensor, input_) tf.add_input(desc, input_) desc["T"] = tf.data_type(input_) res = tf.execute(desc) @@ -50256,8 +50256,8 @@ begin end function irfft2d_eager(input_, fft_length_; name=nothing) desc = tf.EagerOp("IRFFT2D") - input_ = convert(tf.TensorHandle, input_) - fft_length_ = convert(tf.TensorHandle, fft_length_) + input_ = convert(tf.EagerTensor, input_) + fft_length_ = convert(tf.EagerTensor, fft_length_) tf.add_input(desc, input_) tf.add_input(desc, fft_length_) res = tf.execute(desc) @@ -50311,10 +50311,10 @@ begin end function boosted_trees_training_predict_eager(tree_ensemble_handle_, cached_tree_ids_, cached_node_ids_, bucketized_features_; name=nothing, num_bucketized_features=nothing, logits_dimension=nothing) desc = tf.EagerOp("BoostedTreesTrainingPredict") - tree_ensemble_handle_ = convert(tf.TensorHandle, tree_ensemble_handle_) - cached_tree_ids_ = convert(tf.TensorHandle, cached_tree_ids_) - cached_node_ids_ = convert(tf.TensorHandle, cached_node_ids_) - bucketized_features_ = convert(tf.TensorHandle, bucketized_features_) + tree_ensemble_handle_ = convert(tf.EagerTensor, tree_ensemble_handle_) + cached_tree_ids_ = convert(tf.EagerTensor, cached_tree_ids_) + cached_node_ids_ = convert(tf.EagerTensor, cached_node_ids_) + bucketized_features_ = convert(tf.EagerTensor, bucketized_features_) tf.add_input(desc, tree_ensemble_handle_) tf.add_input(desc, cached_tree_ids_) tf.add_input(desc, cached_node_ids_) @@ -50368,9 +50368,9 @@ begin end function nearest_neighbors_eager(points_, centers_, k_; name=nothing) desc = tf.EagerOp("NearestNeighbors") - points_ = convert(tf.TensorHandle, points_) - centers_ = convert(tf.TensorHandle, centers_) - k_ = convert(tf.TensorHandle, k_) + points_ = convert(tf.EagerTensor, points_) + centers_ = convert(tf.EagerTensor, centers_) + k_ = convert(tf.EagerTensor, k_) tf.add_input(desc, points_) tf.add_input(desc, centers_) tf.add_input(desc, k_) @@ -50409,7 +50409,7 @@ begin end function floor_eager(x_; name=nothing) desc = tf.EagerOp("Floor") - x_ = convert(tf.TensorHandle, x_) + x_ = convert(tf.EagerTensor, x_) tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) res = tf.execute(desc) @@ -50462,9 +50462,9 @@ begin end function load_tpu_embedding_proximal_adagrad_parameters_grad_accum_debug_eager(parameters_, accumulators_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) desc = tf.EagerOp("LoadTPUEmbeddingProximalAdagradParametersGradAccumDebug") - parameters_ = convert(tf.TensorHandle, parameters_) - accumulators_ = convert(tf.TensorHandle, accumulators_) - gradient_accumulators_ = convert(tf.TensorHandle, gradient_accumulators_) + parameters_ = convert(tf.EagerTensor, parameters_) + accumulators_ = convert(tf.EagerTensor, accumulators_) + gradient_accumulators_ = convert(tf.EagerTensor, gradient_accumulators_) tf.add_input(desc, parameters_) tf.add_input(desc, accumulators_) tf.add_input(desc, gradient_accumulators_) @@ -50526,11 +50526,11 @@ begin end function write_image_summary_eager(writer_, step_, tag_, tensor_, bad_color_; name=nothing, max_images=nothing) desc = tf.EagerOp("WriteImageSummary") - writer_ = convert(tf.TensorHandle, writer_) - step_ = convert(tf.TensorHandle, step_) - tag_ = convert(tf.TensorHandle, tag_) - tensor_ = convert(tf.TensorHandle, tensor_) - bad_color_ = convert(tf.TensorHandle, bad_color_) + writer_ = convert(tf.EagerTensor, writer_) + step_ = convert(tf.EagerTensor, step_) + tag_ = convert(tf.EagerTensor, tag_) + tensor_ = convert(tf.EagerTensor, tensor_) + bad_color_ = convert(tf.EagerTensor, bad_color_) tf.add_input(desc, writer_) tf.add_input(desc, step_) tf.add_input(desc, tag_) @@ -50577,8 +50577,8 @@ begin end function tile_grad_eager(input_, multiples_; name=nothing) desc = tf.EagerOp("TileGrad") - input_ = convert(tf.TensorHandle, input_) - multiples_ = convert(tf.TensorHandle, multiples_) + input_ = convert(tf.EagerTensor, input_) + multiples_ = convert(tf.EagerTensor, multiples_) tf.add_input(desc, input_) tf.add_input(desc, multiples_) desc["T"] = tf.data_type(input_) @@ -50626,8 +50626,8 @@ begin end function tensor_array_grad_v3_eager(handle_, flow_in_; name=nothing, source=nothing) desc = tf.EagerOp("TensorArrayGradV3") - handle_ = convert(tf.TensorHandle, handle_) - flow_in_ = convert(tf.TensorHandle, flow_in_) + handle_ = convert(tf.EagerTensor, handle_) + flow_in_ = convert(tf.EagerTensor, flow_in_) tf.add_input(desc, handle_) tf.add_input(desc, flow_in_) if source !== nothing @@ -50675,8 +50675,8 @@ begin end function enqueue_tpu_embedding_integer_batch_eager(batch_, mode_override_; name=nothing, N=nothing, device_ordinal=nothing) desc = tf.EagerOp("EnqueueTPUEmbeddingIntegerBatch") - batch_ = convert(tf.TensorHandle, batch_) - mode_override_ = convert(tf.TensorHandle, mode_override_) + batch_ = convert(tf.EagerTensor, batch_) + mode_override_ = convert(tf.EagerTensor, mode_override_) tf.add_input(desc, batch_) tf.add_input(desc, mode_override_) if N !== nothing @@ -50742,11 +50742,11 @@ begin end function fused_batch_norm_eager(x_, scale_, offset_, mean_, variance_; name=nothing, epsilon=nothing, data_format=nothing, is_training=nothing) desc = tf.EagerOp("FusedBatchNorm") - x_ = convert(tf.TensorHandle, x_) - scale_ = convert(tf.TensorHandle, scale_) - offset_ = convert(tf.TensorHandle, offset_) - mean_ = convert(tf.TensorHandle, mean_) - variance_ = convert(tf.TensorHandle, variance_) + x_ = convert(tf.EagerTensor, x_) + scale_ = convert(tf.EagerTensor, scale_) + offset_ = convert(tf.EagerTensor, offset_) + mean_ = convert(tf.EagerTensor, mean_) + variance_ = convert(tf.EagerTensor, variance_) tf.add_input(desc, x_) tf.add_input(desc, scale_) tf.add_input(desc, offset_) @@ -50802,8 +50802,8 @@ begin end function logical_and_eager(x_, y_; name=nothing) desc = tf.EagerOp("LogicalAnd") - x_ = convert(tf.TensorHandle, x_) - y_ = convert(tf.TensorHandle, y_) + x_ = convert(tf.EagerTensor, x_) + y_ = convert(tf.EagerTensor, y_) tf.add_input(desc, x_) tf.add_input(desc, y_) res = tf.execute(desc) @@ -50847,9 +50847,9 @@ begin end function tensor_scatter_update_eager(tensor_, indices_, updates_; name=nothing) desc = tf.EagerOp("TensorScatterUpdate") - tensor_ = convert(tf.TensorHandle, tensor_) - indices_ = convert(tf.TensorHandle, indices_) - updates_ = convert(tf.TensorHandle, updates_) + tensor_ = convert(tf.EagerTensor, tensor_) + indices_ = convert(tf.EagerTensor, indices_) + updates_ = convert(tf.EagerTensor, updates_) tf.add_input(desc, tensor_) tf.add_input(desc, indices_) tf.add_input(desc, updates_) @@ -50946,7 +50946,7 @@ begin end function tensor_slice_dataset_eager(components_; name=nothing, Toutput_types=nothing, output_shapes=nothing) desc = tf.EagerOp("TensorSliceDataset") - components_ = convert(tf.TensorHandle, components_) + components_ = convert(tf.EagerTensor, components_) tf.add_input(desc, components_) if Toutput_types !== nothing desc["Toutput_types"] = map(Base.identity, Toutput_types) @@ -50995,10 +50995,10 @@ begin end function tensor_array_scatter_v3_eager(handle_, indices_, value_, flow_in_; name=nothing) desc = tf.EagerOp("TensorArrayScatterV3") - handle_ = convert(tf.TensorHandle, handle_) - indices_ = convert(tf.TensorHandle, indices_) - value_ = convert(tf.TensorHandle, value_) - flow_in_ = convert(tf.TensorHandle, flow_in_) + handle_ = convert(tf.EagerTensor, handle_) + indices_ = convert(tf.EagerTensor, indices_) + value_ = convert(tf.EagerTensor, value_) + flow_in_ = convert(tf.EagerTensor, flow_in_) tf.add_input(desc, handle_) tf.add_input(desc, indices_) tf.add_input(desc, value_) @@ -51044,8 +51044,8 @@ begin end function resize_nearest_neighbor_grad_eager(grads_, size_; name=nothing, align_corners=nothing) desc = tf.EagerOp("ResizeNearestNeighborGrad") - grads_ = convert(tf.TensorHandle, grads_) - size_ = convert(tf.TensorHandle, size_) + grads_ = convert(tf.EagerTensor, grads_) + size_ = convert(tf.EagerTensor, size_) tf.add_input(desc, grads_) tf.add_input(desc, size_) if align_corners !== nothing @@ -51102,13 +51102,13 @@ begin end function apply_power_sign_eager(var_, m_, lr_, logbase_, sign_decay_, beta_, grad_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ApplyPowerSign") - var_ = convert(tf.TensorHandle, var_) - m_ = convert(tf.TensorHandle, m_) - lr_ = convert(tf.TensorHandle, lr_) - logbase_ = convert(tf.TensorHandle, logbase_) - sign_decay_ = convert(tf.TensorHandle, sign_decay_) - beta_ = convert(tf.TensorHandle, beta_) - grad_ = convert(tf.TensorHandle, grad_) + var_ = convert(tf.EagerTensor, var_) + m_ = convert(tf.EagerTensor, m_) + lr_ = convert(tf.EagerTensor, lr_) + logbase_ = convert(tf.EagerTensor, logbase_) + sign_decay_ = convert(tf.EagerTensor, sign_decay_) + beta_ = convert(tf.EagerTensor, beta_) + grad_ = convert(tf.EagerTensor, grad_) tf.add_input(desc, var_) tf.add_input(desc, m_) tf.add_input(desc, lr_) @@ -51168,8 +51168,8 @@ begin end function experimental_rebatch_dataset_eager(input_dataset_, num_workers_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("ExperimentalRebatchDataset") - input_dataset_ = convert(tf.TensorHandle, input_dataset_) - num_workers_ = convert(tf.TensorHandle, num_workers_) + input_dataset_ = convert(tf.EagerTensor, input_dataset_) + num_workers_ = convert(tf.EagerTensor, num_workers_) tf.add_input(desc, input_dataset_) tf.add_input(desc, num_workers_) if output_types !== nothing @@ -51219,8 +51219,8 @@ begin end function mirror_pad_eager(input_, paddings_; name=nothing, mode=nothing) desc = tf.EagerOp("MirrorPad") - input_ = convert(tf.TensorHandle, input_) - paddings_ = convert(tf.TensorHandle, paddings_) + input_ = convert(tf.EagerTensor, input_) + paddings_ = convert(tf.EagerTensor, paddings_) tf.add_input(desc, input_) tf.add_input(desc, paddings_) if mode !== nothing @@ -51262,7 +51262,7 @@ begin end function logical_not_eager(x_; name=nothing) desc = tf.EagerOp("LogicalNot") - x_ = convert(tf.TensorHandle, x_) + x_ = convert(tf.EagerTensor, x_) tf.add_input(desc, x_) res = tf.execute(desc) node = tf.TapeNode(logical_not, [x_], name=nothing, res) @@ -51298,7 +51298,7 @@ begin end function batch_ifft_eager(input_; name=nothing) desc = tf.EagerOp("BatchIFFT") - input_ = convert(tf.TensorHandle, input_) + input_ = convert(tf.EagerTensor, input_) tf.add_input(desc, input_) res = tf.execute(desc) node = tf.TapeNode(batch_ifft, [input_], name=nothing, res) @@ -51347,8 +51347,8 @@ begin end function tensor_array_concat_v2_eager(handle_, flow_in_; name=nothing, dtype=nothing, element_shape_except0=nothing) desc = tf.EagerOp("TensorArrayConcatV2") - handle_ = convert(tf.TensorHandle, handle_) - flow_in_ = convert(tf.TensorHandle, flow_in_) + handle_ = convert(tf.EagerTensor, handle_) + flow_in_ = convert(tf.EagerTensor, flow_in_) tf.add_input(desc, handle_) tf.add_input(desc, flow_in_) if dtype !== nothing @@ -51399,8 +51399,8 @@ begin end function sum_eager(input_, reduction_indices_; name=nothing, keep_dims=nothing) desc = tf.EagerOp("Sum") - input_ = convert(tf.TensorHandle, input_) - reduction_indices_ = convert(tf.TensorHandle, reduction_indices_) + input_ = convert(tf.EagerTensor, input_) + reduction_indices_ = convert(tf.EagerTensor, reduction_indices_) tf.add_input(desc, input_) tf.add_input(desc, reduction_indices_) if keep_dims !== nothing @@ -51450,8 +51450,8 @@ begin end function boosted_trees_predict_eager(tree_ensemble_handle_, bucketized_features_; name=nothing, num_bucketized_features=nothing, logits_dimension=nothing) desc = tf.EagerOp("BoostedTreesPredict") - tree_ensemble_handle_ = convert(tf.TensorHandle, tree_ensemble_handle_) - bucketized_features_ = convert(tf.TensorHandle, bucketized_features_) + tree_ensemble_handle_ = convert(tf.EagerTensor, tree_ensemble_handle_) + bucketized_features_ = convert(tf.EagerTensor, bucketized_features_) tf.add_input(desc, tree_ensemble_handle_) tf.add_input(desc, bucketized_features_) if num_bucketized_features !== nothing @@ -51530,15 +51530,15 @@ begin end function quantized_conv2d_with_bias_and_relu_and_requantize_eager(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) desc = tf.EagerOp("QuantizedConv2DWithBiasAndReluAndRequantize") - input_ = convert(tf.TensorHandle, input_) - filter_ = convert(tf.TensorHandle, filter_) - bias_ = convert(tf.TensorHandle, bias_) - min_input_ = convert(tf.TensorHandle, min_input_) - max_input_ = convert(tf.TensorHandle, max_input_) - min_filter_ = convert(tf.TensorHandle, min_filter_) - max_filter_ = convert(tf.TensorHandle, max_filter_) - min_freezed_output_ = convert(tf.TensorHandle, min_freezed_output_) - max_freezed_output_ = convert(tf.TensorHandle, max_freezed_output_) + input_ = convert(tf.EagerTensor, input_) + filter_ = convert(tf.EagerTensor, filter_) + bias_ = convert(tf.EagerTensor, bias_) + min_input_ = convert(tf.EagerTensor, min_input_) + max_input_ = convert(tf.EagerTensor, max_input_) + min_filter_ = convert(tf.EagerTensor, min_filter_) + max_filter_ = convert(tf.EagerTensor, max_filter_) + min_freezed_output_ = convert(tf.EagerTensor, min_freezed_output_) + max_freezed_output_ = convert(tf.EagerTensor, max_freezed_output_) tf.add_input(desc, input_) tf.add_input(desc, filter_) tf.add_input(desc, bias_) @@ -51614,11 +51614,11 @@ begin end function resource_sparse_apply_adagrad_eager(var_, accum_, lr_, grad_, indices_; name=nothing, use_locking=nothing, update_slots=nothing) desc = tf.EagerOp("ResourceSparseApplyAdagrad") - var_ = convert(tf.TensorHandle, var_) - accum_ = convert(tf.TensorHandle, accum_) - lr_ = convert(tf.TensorHandle, lr_) - grad_ = convert(tf.TensorHandle, grad_) - indices_ = convert(tf.TensorHandle, indices_) + var_ = convert(tf.EagerTensor, var_) + accum_ = convert(tf.EagerTensor, accum_) + lr_ = convert(tf.EagerTensor, lr_) + grad_ = convert(tf.EagerTensor, grad_) + indices_ = convert(tf.EagerTensor, indices_) tf.add_input(desc, var_) tf.add_input(desc, accum_) tf.add_input(desc, lr_) @@ -51673,8 +51673,8 @@ begin end function leaky_relu_grad_eager(gradients_, features_; name=nothing, alpha=nothing) desc = tf.EagerOp("LeakyReluGrad") - gradients_ = convert(tf.TensorHandle, gradients_) - features_ = convert(tf.TensorHandle, features_) + gradients_ = convert(tf.EagerTensor, gradients_) + features_ = convert(tf.EagerTensor, features_) tf.add_input(desc, gradients_) tf.add_input(desc, features_) if alpha !== nothing @@ -51720,7 +51720,7 @@ begin end function _device_retval_eager(input_; name=nothing, index=nothing) desc = tf.EagerOp("_DeviceRetval") - input_ = convert(tf.TensorHandle, input_) + input_ = convert(tf.EagerTensor, input_) tf.add_input(desc, input_) if index !== nothing desc["index"] = Base.Int(index) @@ -51764,8 +51764,8 @@ begin end function pad_eager(input_, paddings_; name=nothing) desc = tf.EagerOp("Pad") - input_ = convert(tf.TensorHandle, input_) - paddings_ = convert(tf.TensorHandle, paddings_) + input_ = convert(tf.EagerTensor, input_) + paddings_ = convert(tf.EagerTensor, paddings_) tf.add_input(desc, input_) tf.add_input(desc, paddings_) desc["T"] = tf.data_type(input_) @@ -51815,9 +51815,9 @@ begin end function add_many_sparse_to_tensors_map_eager(sparse_indices_, sparse_values_, sparse_shape_; name=nothing, container=nothing, shared_name=nothing) desc = tf.EagerOp("AddManySparseToTensorsMap") - sparse_indices_ = convert(tf.TensorHandle, sparse_indices_) - sparse_values_ = convert(tf.TensorHandle, sparse_values_) - sparse_shape_ = convert(tf.TensorHandle, sparse_shape_) + sparse_indices_ = convert(tf.EagerTensor, sparse_indices_) + sparse_values_ = convert(tf.EagerTensor, sparse_values_) + sparse_shape_ = convert(tf.EagerTensor, sparse_shape_) tf.add_input(desc, sparse_indices_) tf.add_input(desc, sparse_values_) tf.add_input(desc, sparse_shape_) @@ -51872,9 +51872,9 @@ begin end function sparse_reorder_eager(input_indices_, input_values_, input_shape_; name=nothing) desc = tf.EagerOp("SparseReorder") - input_indices_ = convert(tf.TensorHandle, input_indices_) - input_values_ = convert(tf.TensorHandle, input_values_) - input_shape_ = convert(tf.TensorHandle, input_shape_) + input_indices_ = convert(tf.EagerTensor, input_indices_) + input_values_ = convert(tf.EagerTensor, input_values_) + input_shape_ = convert(tf.EagerTensor, input_shape_) tf.add_input(desc, input_indices_) tf.add_input(desc, input_values_) tf.add_input(desc, input_shape_) @@ -51916,8 +51916,8 @@ begin end function bitwise_xor_eager(x_, y_; name=nothing) desc = tf.EagerOp("BitwiseXor") - x_ = convert(tf.TensorHandle, x_) - y_ = convert(tf.TensorHandle, y_) + x_ = convert(tf.EagerTensor, x_) + y_ = convert(tf.EagerTensor, y_) tf.add_input(desc, x_) tf.add_input(desc, y_) desc["T"] = tf.data_type(x_) @@ -51959,8 +51959,8 @@ begin end function batch_matrix_set_diag_eager(input_, diagonal_; name=nothing) desc = tf.EagerOp("BatchMatrixSetDiag") - input_ = convert(tf.TensorHandle, input_) - diagonal_ = convert(tf.TensorHandle, diagonal_) + input_ = convert(tf.EagerTensor, input_) + diagonal_ = convert(tf.EagerTensor, diagonal_) tf.add_input(desc, input_) tf.add_input(desc, diagonal_) desc["T"] = tf.data_type(input_) @@ -52005,9 +52005,9 @@ begin end function lookup_table_insert_v2_eager(table_handle_, keys_, values_; name=nothing) desc = tf.EagerOp("LookupTableInsertV2") - table_handle_ = convert(tf.TensorHandle, table_handle_) - keys_ = convert(tf.TensorHandle, keys_) - values_ = convert(tf.TensorHandle, values_) + table_handle_ = convert(tf.EagerTensor, table_handle_) + keys_ = convert(tf.EagerTensor, keys_) + values_ = convert(tf.EagerTensor, values_) tf.add_input(desc, table_handle_) tf.add_input(desc, keys_) tf.add_input(desc, values_) @@ -52057,9 +52057,9 @@ begin end function experimental_dense_to_sparse_batch_dataset_eager(input_dataset_, batch_size_, row_shape_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("ExperimentalDenseToSparseBatchDataset") - input_dataset_ = convert(tf.TensorHandle, input_dataset_) - batch_size_ = convert(tf.TensorHandle, batch_size_) - row_shape_ = convert(tf.TensorHandle, row_shape_) + input_dataset_ = convert(tf.EagerTensor, input_dataset_) + batch_size_ = convert(tf.EagerTensor, batch_size_) + row_shape_ = convert(tf.EagerTensor, row_shape_) tf.add_input(desc, input_dataset_) tf.add_input(desc, batch_size_) tf.add_input(desc, row_shape_) @@ -52125,15 +52125,15 @@ begin end function resource_sparse_apply_rms_prop_eager(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ResourceSparseApplyRMSProp") - var_ = convert(tf.TensorHandle, var_) - ms_ = convert(tf.TensorHandle, ms_) - mom_ = convert(tf.TensorHandle, mom_) - lr_ = convert(tf.TensorHandle, lr_) - rho_ = convert(tf.TensorHandle, rho_) - momentum_ = convert(tf.TensorHandle, momentum_) - epsilon_ = convert(tf.TensorHandle, epsilon_) - grad_ = convert(tf.TensorHandle, grad_) - indices_ = convert(tf.TensorHandle, indices_) + var_ = convert(tf.EagerTensor, var_) + ms_ = convert(tf.EagerTensor, ms_) + mom_ = convert(tf.EagerTensor, mom_) + lr_ = convert(tf.EagerTensor, lr_) + rho_ = convert(tf.EagerTensor, rho_) + momentum_ = convert(tf.EagerTensor, momentum_) + epsilon_ = convert(tf.EagerTensor, epsilon_) + grad_ = convert(tf.EagerTensor, grad_) + indices_ = convert(tf.EagerTensor, indices_) tf.add_input(desc, var_) tf.add_input(desc, ms_) tf.add_input(desc, mom_) @@ -52195,8 +52195,8 @@ begin end function random_crop_eager(image_, size_; name=nothing, seed=nothing, seed2=nothing) desc = tf.EagerOp("RandomCrop") - image_ = convert(tf.TensorHandle, image_) - size_ = convert(tf.TensorHandle, size_) + image_ = convert(tf.EagerTensor, image_) + size_ = convert(tf.EagerTensor, size_) tf.add_input(desc, image_) tf.add_input(desc, size_) if seed !== nothing @@ -52246,9 +52246,9 @@ begin end function lookup_table_import_v2_eager(table_handle_, keys_, values_; name=nothing) desc = tf.EagerOp("LookupTableImportV2") - table_handle_ = convert(tf.TensorHandle, table_handle_) - keys_ = convert(tf.TensorHandle, keys_) - values_ = convert(tf.TensorHandle, values_) + table_handle_ = convert(tf.EagerTensor, table_handle_) + keys_ = convert(tf.EagerTensor, keys_) + values_ = convert(tf.EagerTensor, values_) tf.add_input(desc, table_handle_) tf.add_input(desc, keys_) tf.add_input(desc, values_) @@ -52298,9 +52298,9 @@ begin end function resource_scatter_nd_update_eager(ref_, indices_, updates_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ResourceScatterNdUpdate") - ref_ = convert(tf.TensorHandle, ref_) - indices_ = convert(tf.TensorHandle, indices_) - updates_ = convert(tf.TensorHandle, updates_) + ref_ = convert(tf.EagerTensor, ref_) + indices_ = convert(tf.EagerTensor, indices_) + updates_ = convert(tf.EagerTensor, updates_) tf.add_input(desc, ref_) tf.add_input(desc, indices_) tf.add_input(desc, updates_) @@ -52346,7 +52346,7 @@ begin end function static_regex_full_match_eager(input_; name=nothing, pattern=nothing) desc = tf.EagerOp("StaticRegexFullMatch") - input_ = convert(tf.TensorHandle, input_) + input_ = convert(tf.EagerTensor, input_) tf.add_input(desc, input_) if pattern !== nothing desc["pattern"] = Base.String(pattern) @@ -52385,7 +52385,7 @@ begin end function gcs_configure_credentials_eager(json_; name=nothing) desc = tf.EagerOp("GcsConfigureCredentials") - json_ = convert(tf.TensorHandle, json_) + json_ = convert(tf.EagerTensor, json_) tf.add_input(desc, json_) res = tf.execute(desc) node = tf.TapeNode(gcs_configure_credentials, [json_], name=nothing, res) @@ -52423,8 +52423,8 @@ begin end function tensor_array_size_v3_eager(handle_, flow_in_; name=nothing) desc = tf.EagerOp("TensorArraySizeV3") - handle_ = convert(tf.TensorHandle, handle_) - flow_in_ = convert(tf.TensorHandle, flow_in_) + handle_ = convert(tf.EagerTensor, handle_) + flow_in_ = convert(tf.EagerTensor, flow_in_) tf.add_input(desc, handle_) tf.add_input(desc, flow_in_) res = tf.execute(desc) @@ -52471,10 +52471,10 @@ begin end function sparse_segment_sqrt_n_with_num_segments_eager(data_, indices_, segment_ids_, num_segments_; name=nothing) desc = tf.EagerOp("SparseSegmentSqrtNWithNumSegments") - data_ = convert(tf.TensorHandle, data_) - indices_ = convert(tf.TensorHandle, indices_) - segment_ids_ = convert(tf.TensorHandle, segment_ids_) - num_segments_ = convert(tf.TensorHandle, num_segments_) + data_ = convert(tf.EagerTensor, data_) + indices_ = convert(tf.EagerTensor, indices_) + segment_ids_ = convert(tf.EagerTensor, segment_ids_) + num_segments_ = convert(tf.EagerTensor, num_segments_) tf.add_input(desc, data_) tf.add_input(desc, indices_) tf.add_input(desc, segment_ids_) @@ -52554,11 +52554,11 @@ begin end function experimental_group_by_reducer_dataset_eager(input_dataset_, key_func_other_arguments_, init_func_other_arguments_, reduce_func_other_arguments_, finalize_func_other_arguments_; name=nothing, key_func=nothing, init_func=nothing, reduce_func=nothing, finalize_func=nothing, Tkey_func_other_arguments=nothing, Tinit_func_other_arguments=nothing, Treduce_func_other_arguments=nothing, Tfinalize_func_other_arguments=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("ExperimentalGroupByReducerDataset") - input_dataset_ = convert(tf.TensorHandle, input_dataset_) - key_func_other_arguments_ = convert(tf.TensorHandle, key_func_other_arguments_) - init_func_other_arguments_ = convert(tf.TensorHandle, init_func_other_arguments_) - reduce_func_other_arguments_ = convert(tf.TensorHandle, reduce_func_other_arguments_) - finalize_func_other_arguments_ = convert(tf.TensorHandle, finalize_func_other_arguments_) + input_dataset_ = convert(tf.EagerTensor, input_dataset_) + key_func_other_arguments_ = convert(tf.EagerTensor, key_func_other_arguments_) + init_func_other_arguments_ = convert(tf.EagerTensor, init_func_other_arguments_) + reduce_func_other_arguments_ = convert(tf.EagerTensor, reduce_func_other_arguments_) + finalize_func_other_arguments_ = convert(tf.EagerTensor, finalize_func_other_arguments_) tf.add_input(desc, input_dataset_) tf.add_input(desc, key_func_other_arguments_) tf.add_input(desc, init_func_other_arguments_) @@ -52651,9 +52651,9 @@ begin end function conv2d_backprop_filter_eager(input_, filter_sizes_, out_backprop_; name=nothing, strides=nothing, use_cudnn_on_gpu=nothing, padding=nothing, explicit_paddings=nothing, data_format=nothing, dilations=nothing) desc = tf.EagerOp("Conv2DBackpropFilter") - input_ = convert(tf.TensorHandle, input_) - filter_sizes_ = convert(tf.TensorHandle, filter_sizes_) - out_backprop_ = convert(tf.TensorHandle, out_backprop_) + input_ = convert(tf.EagerTensor, input_) + filter_sizes_ = convert(tf.EagerTensor, filter_sizes_) + out_backprop_ = convert(tf.EagerTensor, out_backprop_) tf.add_input(desc, input_) tf.add_input(desc, filter_sizes_) tf.add_input(desc, out_backprop_) @@ -52728,9 +52728,9 @@ begin end function max_pool_grad_eager(orig_input_, orig_output_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) desc = tf.EagerOp("MaxPoolGrad") - orig_input_ = convert(tf.TensorHandle, orig_input_) - orig_output_ = convert(tf.TensorHandle, orig_output_) - grad_ = convert(tf.TensorHandle, grad_) + orig_input_ = convert(tf.EagerTensor, orig_input_) + orig_output_ = convert(tf.EagerTensor, orig_output_) + grad_ = convert(tf.EagerTensor, grad_) tf.add_input(desc, orig_input_) tf.add_input(desc, orig_output_) tf.add_input(desc, grad_) @@ -52783,7 +52783,7 @@ begin end function _initialize_host_for_distributed_tpu_eager(input_; name=nothing) desc = tf.EagerOp("_InitializeHostForDistributedTPU") - input_ = convert(tf.TensorHandle, input_) + input_ = convert(tf.EagerTensor, input_) tf.add_input(desc, input_) res = tf.execute(desc) node = tf.TapeNode(_initialize_host_for_distributed_tpu, [input_], name=nothing, res) @@ -52834,7 +52834,7 @@ begin end function stage_peek_eager(index_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) desc = tf.EagerOp("StagePeek") - index_ = convert(tf.TensorHandle, index_) + index_ = convert(tf.EagerTensor, index_) tf.add_input(desc, index_) if capacity !== nothing desc["capacity"] = Base.Int(capacity) @@ -52891,9 +52891,9 @@ begin end function pad_v2_eager(input_, paddings_, constant_values_; name=nothing) desc = tf.EagerOp("PadV2") - input_ = convert(tf.TensorHandle, input_) - paddings_ = convert(tf.TensorHandle, paddings_) - constant_values_ = convert(tf.TensorHandle, constant_values_) + input_ = convert(tf.EagerTensor, input_) + paddings_ = convert(tf.EagerTensor, paddings_) + constant_values_ = convert(tf.EagerTensor, constant_values_) tf.add_input(desc, input_) tf.add_input(desc, paddings_) tf.add_input(desc, constant_values_) @@ -52981,7 +52981,7 @@ begin end function print_v2_eager(input_; name=nothing, output_stream=nothing) desc = tf.EagerOp("PrintV2") - input_ = convert(tf.TensorHandle, input_) + input_ = convert(tf.EagerTensor, input_) tf.add_input(desc, input_) if output_stream !== nothing desc["output_stream"] = Base.String(output_stream) @@ -53026,7 +53026,7 @@ begin end function optional_get_value_eager(optional_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("OptionalGetValue") - optional_ = convert(tf.TensorHandle, optional_) + optional_ = convert(tf.EagerTensor, optional_) tf.add_input(desc, optional_) if output_types !== nothing desc["output_types"] = map(Base.identity, output_types) @@ -53084,9 +53084,9 @@ begin end function load_tpu_embedding_ftrl_parameters_eager(parameters_, accumulators_, linears_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) desc = tf.EagerOp("LoadTPUEmbeddingFTRLParameters") - parameters_ = convert(tf.TensorHandle, parameters_) - accumulators_ = convert(tf.TensorHandle, accumulators_) - linears_ = convert(tf.TensorHandle, linears_) + parameters_ = convert(tf.EagerTensor, parameters_) + accumulators_ = convert(tf.EagerTensor, accumulators_) + linears_ = convert(tf.EagerTensor, linears_) tf.add_input(desc, parameters_) tf.add_input(desc, accumulators_) tf.add_input(desc, linears_) @@ -53150,11 +53150,11 @@ begin end function sparse_slice_eager(indices_, values_, shape_, start_, size_; name=nothing) desc = tf.EagerOp("SparseSlice") - indices_ = convert(tf.TensorHandle, indices_) - values_ = convert(tf.TensorHandle, values_) - shape_ = convert(tf.TensorHandle, shape_) - start_ = convert(tf.TensorHandle, start_) - size_ = convert(tf.TensorHandle, size_) + indices_ = convert(tf.EagerTensor, indices_) + values_ = convert(tf.EagerTensor, values_) + shape_ = convert(tf.EagerTensor, shape_) + start_ = convert(tf.EagerTensor, start_) + size_ = convert(tf.EagerTensor, size_) tf.add_input(desc, indices_) tf.add_input(desc, values_) tf.add_input(desc, shape_) @@ -53207,9 +53207,9 @@ begin end function boosted_trees_make_quantile_summaries_eager(float_values_, example_weights_, epsilon_; name=nothing, num_features=nothing) desc = tf.EagerOp("BoostedTreesMakeQuantileSummaries") - float_values_ = convert(tf.TensorHandle, float_values_) - example_weights_ = convert(tf.TensorHandle, example_weights_) - epsilon_ = convert(tf.TensorHandle, epsilon_) + float_values_ = convert(tf.EagerTensor, float_values_) + example_weights_ = convert(tf.EagerTensor, example_weights_) + epsilon_ = convert(tf.EagerTensor, epsilon_) tf.add_input(desc, float_values_) tf.add_input(desc, example_weights_) tf.add_input(desc, epsilon_) @@ -53256,8 +53256,8 @@ begin end function matrix_solve_eager(matrix_, rhs_; name=nothing, adjoint=nothing) desc = tf.EagerOp("MatrixSolve") - matrix_ = convert(tf.TensorHandle, matrix_) - rhs_ = convert(tf.TensorHandle, rhs_) + matrix_ = convert(tf.EagerTensor, matrix_) + rhs_ = convert(tf.EagerTensor, rhs_) tf.add_input(desc, matrix_) tf.add_input(desc, rhs_) if adjoint !== nothing @@ -53302,7 +53302,7 @@ begin end function _configure_distributed_tpu_eager(inputs_; name=nothing, N=nothing) desc = tf.EagerOp("_ConfigureDistributedTPU") - inputs_ = convert(tf.TensorHandle, inputs_) + inputs_ = convert(tf.EagerTensor, inputs_) tf.add_input(desc, inputs_) if N !== nothing desc["N"] = Base.Int(N) @@ -53344,8 +53344,8 @@ begin end function adjust_contrastv2_eager(images_, contrast_factor_; name=nothing) desc = tf.EagerOp("AdjustContrastv2") - images_ = convert(tf.TensorHandle, images_) - contrast_factor_ = convert(tf.TensorHandle, contrast_factor_) + images_ = convert(tf.EagerTensor, images_) + contrast_factor_ = convert(tf.EagerTensor, contrast_factor_) tf.add_input(desc, images_) tf.add_input(desc, contrast_factor_) desc["T"] = tf.data_type(images_) @@ -53395,10 +53395,10 @@ begin end function _mkl_maximum_eager(x_, y_, mkl_x_, mkl_y_; name=nothing) desc = tf.EagerOp("_MklMaximum") - x_ = convert(tf.TensorHandle, x_) - y_ = convert(tf.TensorHandle, y_) - mkl_x_ = convert(tf.TensorHandle, mkl_x_) - mkl_y_ = convert(tf.TensorHandle, mkl_y_) + x_ = convert(tf.EagerTensor, x_) + y_ = convert(tf.EagerTensor, y_) + mkl_x_ = convert(tf.EagerTensor, mkl_x_) + mkl_y_ = convert(tf.EagerTensor, mkl_y_) tf.add_input(desc, x_) tf.add_input(desc, y_) tf.add_input(desc, mkl_x_) @@ -53464,9 +53464,9 @@ begin end function cudnn_rnn_params_size_eager(num_layers_, num_units_, input_size_; name=nothing, S=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) desc = tf.EagerOp("CudnnRNNParamsSize") - num_layers_ = convert(tf.TensorHandle, num_layers_) - num_units_ = convert(tf.TensorHandle, num_units_) - input_size_ = convert(tf.TensorHandle, input_size_) + num_layers_ = convert(tf.EagerTensor, num_layers_) + num_units_ = convert(tf.EagerTensor, num_units_) + input_size_ = convert(tf.EagerTensor, input_size_) tf.add_input(desc, num_layers_) tf.add_input(desc, num_units_) tf.add_input(desc, input_size_) @@ -53530,8 +53530,8 @@ begin end function boosted_trees_quantile_stream_resource_add_summaries_eager(quantile_stream_resource_handle_, summaries_; name=nothing, num_features=nothing) desc = tf.EagerOp("BoostedTreesQuantileStreamResourceAddSummaries") - quantile_stream_resource_handle_ = convert(tf.TensorHandle, quantile_stream_resource_handle_) - summaries_ = convert(tf.TensorHandle, summaries_) + quantile_stream_resource_handle_ = convert(tf.EagerTensor, quantile_stream_resource_handle_) + summaries_ = convert(tf.EagerTensor, summaries_) tf.add_input(desc, quantile_stream_resource_handle_) tf.add_input(desc, summaries_) if num_features !== nothing @@ -53571,7 +53571,7 @@ begin end function batch_ifft3d_eager(input_; name=nothing) desc = tf.EagerOp("BatchIFFT3D") - input_ = convert(tf.TensorHandle, input_) + input_ = convert(tf.EagerTensor, input_) tf.add_input(desc, input_) res = tf.execute(desc) node = tf.TapeNode(batch_ifft3d, [input_], name=nothing, res) @@ -53608,7 +53608,7 @@ begin end function sigmoid_eager(x_; name=nothing) desc = tf.EagerOp("Sigmoid") - x_ = convert(tf.TensorHandle, x_) + x_ = convert(tf.EagerTensor, x_) tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) res = tf.execute(desc) @@ -53650,8 +53650,8 @@ begin end function segment_mean_eager(data_, segment_ids_; name=nothing) desc = tf.EagerOp("SegmentMean") - data_ = convert(tf.TensorHandle, data_) - segment_ids_ = convert(tf.TensorHandle, segment_ids_) + data_ = convert(tf.EagerTensor, data_) + segment_ids_ = convert(tf.EagerTensor, segment_ids_) tf.add_input(desc, data_) tf.add_input(desc, segment_ids_) desc["T"] = tf.data_type(data_) @@ -53690,7 +53690,7 @@ begin end function is_boosted_trees_ensemble_initialized_eager(tree_ensemble_handle_; name=nothing) desc = tf.EagerOp("IsBoostedTreesEnsembleInitialized") - tree_ensemble_handle_ = convert(tf.TensorHandle, tree_ensemble_handle_) + tree_ensemble_handle_ = convert(tf.EagerTensor, tree_ensemble_handle_) tf.add_input(desc, tree_ensemble_handle_) res = tf.execute(desc) node = tf.TapeNode(is_boosted_trees_ensemble_initialized, [tree_ensemble_handle_], name=nothing, res) @@ -53728,8 +53728,8 @@ begin end function tensor_array_size_v2_eager(handle_, flow_in_; name=nothing) desc = tf.EagerOp("TensorArraySizeV2") - handle_ = convert(tf.TensorHandle, handle_) - flow_in_ = convert(tf.TensorHandle, flow_in_) + handle_ = convert(tf.EagerTensor, handle_) + flow_in_ = convert(tf.EagerTensor, flow_in_) tf.add_input(desc, handle_) tf.add_input(desc, flow_in_) res = tf.execute(desc) @@ -53778,10 +53778,10 @@ begin end function _mkl_sub_eager(x_, y_, mkl_x_, mkl_y_; name=nothing) desc = tf.EagerOp("_MklSub") - x_ = convert(tf.TensorHandle, x_) - y_ = convert(tf.TensorHandle, y_) - mkl_x_ = convert(tf.TensorHandle, mkl_x_) - mkl_y_ = convert(tf.TensorHandle, mkl_y_) + x_ = convert(tf.EagerTensor, x_) + y_ = convert(tf.EagerTensor, y_) + mkl_x_ = convert(tf.EagerTensor, mkl_x_) + mkl_y_ = convert(tf.EagerTensor, mkl_y_) tf.add_input(desc, x_) tf.add_input(desc, y_) tf.add_input(desc, mkl_x_) @@ -53833,8 +53833,8 @@ begin end function send_tpu_embedding_gradients_eager(inputs_, learning_rates_; name=nothing, N=nothing, NN=nothing, config=nothing) desc = tf.EagerOp("SendTPUEmbeddingGradients") - inputs_ = convert(tf.TensorHandle, inputs_) - learning_rates_ = convert(tf.TensorHandle, learning_rates_) + inputs_ = convert(tf.EagerTensor, inputs_) + learning_rates_ = convert(tf.EagerTensor, learning_rates_) tf.add_input(desc, inputs_) tf.add_input(desc, learning_rates_) if N !== nothing @@ -53893,7 +53893,7 @@ begin end function max_pool3d_eager(input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) desc = tf.EagerOp("MaxPool3D") - input_ = convert(tf.TensorHandle, input_) + input_ = convert(tf.EagerTensor, input_) tf.add_input(desc, input_) if ksize !== nothing desc["ksize"] = map(Base.identity, ksize) @@ -53950,8 +53950,8 @@ begin end function prod_eager(input_, reduction_indices_; name=nothing, keep_dims=nothing) desc = tf.EagerOp("Prod") - input_ = convert(tf.TensorHandle, input_) - reduction_indices_ = convert(tf.TensorHandle, reduction_indices_) + input_ = convert(tf.EagerTensor, input_) + reduction_indices_ = convert(tf.EagerTensor, reduction_indices_) tf.add_input(desc, input_) tf.add_input(desc, reduction_indices_) if keep_dims !== nothing @@ -53993,7 +53993,7 @@ begin end function experimental_identity_indexed_dataset_eager(size_; name=nothing) desc = tf.EagerOp("ExperimentalIdentityIndexedDataset") - size_ = convert(tf.TensorHandle, size_) + size_ = convert(tf.EagerTensor, size_) tf.add_input(desc, size_) res = tf.execute(desc) node = tf.TapeNode(experimental_identity_indexed_dataset, [size_], name=nothing, res) @@ -54035,8 +54035,8 @@ begin end function tensor_list_push_back_eager(input_handle_, tensor_; name=nothing, element_dtype=nothing) desc = tf.EagerOp("TensorListPushBack") - input_handle_ = convert(tf.TensorHandle, input_handle_) - tensor_ = convert(tf.TensorHandle, tensor_) + input_handle_ = convert(tf.EagerTensor, input_handle_) + tensor_ = convert(tf.EagerTensor, tensor_) tf.add_input(desc, input_handle_) tf.add_input(desc, tensor_) if element_dtype !== nothing @@ -54115,8 +54115,8 @@ begin end function batch_function_eager(in_tensors_, captured_tensors_; name=nothing, f=nothing, num_batch_threads=nothing, max_batch_size=nothing, batch_timeout_micros=nothing, max_enqueued_batches=nothing, allowed_batch_sizes=nothing, container=nothing, shared_name=nothing, batching_queue=nothing, Tin=nothing, Tcaptured=nothing, Tout=nothing) desc = tf.EagerOp("BatchFunction") - in_tensors_ = convert(tf.TensorHandle, in_tensors_) - captured_tensors_ = convert(tf.TensorHandle, captured_tensors_) + in_tensors_ = convert(tf.EagerTensor, in_tensors_) + captured_tensors_ = convert(tf.EagerTensor, captured_tensors_) tf.add_input(desc, in_tensors_) tf.add_input(desc, captured_tensors_) if f !== nothing @@ -54201,10 +54201,10 @@ begin end function sparse_fill_empty_rows_eager(indices_, values_, dense_shape_, default_value_; name=nothing) desc = tf.EagerOp("SparseFillEmptyRows") - indices_ = convert(tf.TensorHandle, indices_) - values_ = convert(tf.TensorHandle, values_) - dense_shape_ = convert(tf.TensorHandle, dense_shape_) - default_value_ = convert(tf.TensorHandle, default_value_) + indices_ = convert(tf.EagerTensor, indices_) + values_ = convert(tf.EagerTensor, values_) + dense_shape_ = convert(tf.EagerTensor, dense_shape_) + default_value_ = convert(tf.EagerTensor, default_value_) tf.add_input(desc, indices_) tf.add_input(desc, values_) tf.add_input(desc, dense_shape_) @@ -54254,7 +54254,7 @@ begin end function self_adjoint_eig_v2_eager(input_; name=nothing, compute_v=nothing) desc = tf.EagerOp("SelfAdjointEigV2") - input_ = convert(tf.TensorHandle, input_) + input_ = convert(tf.EagerTensor, input_) tf.add_input(desc, input_) if compute_v !== nothing desc["compute_v"] = Base.Bool(compute_v) @@ -54377,15 +54377,15 @@ begin end function resource_sparse_apply_adagrad_da_eager(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, indices_, lr_, l1_, l2_, global_step_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ResourceSparseApplyAdagradDA") - var_ = convert(tf.TensorHandle, var_) - gradient_accumulator_ = convert(tf.TensorHandle, gradient_accumulator_) - gradient_squared_accumulator_ = convert(tf.TensorHandle, gradient_squared_accumulator_) - grad_ = convert(tf.TensorHandle, grad_) - indices_ = convert(tf.TensorHandle, indices_) - lr_ = convert(tf.TensorHandle, lr_) - l1_ = convert(tf.TensorHandle, l1_) - l2_ = convert(tf.TensorHandle, l2_) - global_step_ = convert(tf.TensorHandle, global_step_) + var_ = convert(tf.EagerTensor, var_) + gradient_accumulator_ = convert(tf.EagerTensor, gradient_accumulator_) + gradient_squared_accumulator_ = convert(tf.EagerTensor, gradient_squared_accumulator_) + grad_ = convert(tf.EagerTensor, grad_) + indices_ = convert(tf.EagerTensor, indices_) + lr_ = convert(tf.EagerTensor, lr_) + l1_ = convert(tf.EagerTensor, l1_) + l2_ = convert(tf.EagerTensor, l2_) + global_step_ = convert(tf.EagerTensor, global_step_) tf.add_input(desc, var_) tf.add_input(desc, gradient_accumulator_) tf.add_input(desc, gradient_squared_accumulator_) @@ -54503,13 +54503,13 @@ begin end function resource_apply_add_sign_eager(var_, m_, lr_, alpha_, sign_decay_, beta_, grad_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ResourceApplyAddSign") - var_ = convert(tf.TensorHandle, var_) - m_ = convert(tf.TensorHandle, m_) - lr_ = convert(tf.TensorHandle, lr_) - alpha_ = convert(tf.TensorHandle, alpha_) - sign_decay_ = convert(tf.TensorHandle, sign_decay_) - beta_ = convert(tf.TensorHandle, beta_) - grad_ = convert(tf.TensorHandle, grad_) + var_ = convert(tf.EagerTensor, var_) + m_ = convert(tf.EagerTensor, m_) + lr_ = convert(tf.EagerTensor, lr_) + alpha_ = convert(tf.EagerTensor, alpha_) + sign_decay_ = convert(tf.EagerTensor, sign_decay_) + beta_ = convert(tf.EagerTensor, beta_) + grad_ = convert(tf.EagerTensor, grad_) tf.add_input(desc, var_) tf.add_input(desc, m_) tf.add_input(desc, lr_) @@ -54566,9 +54566,9 @@ begin end function roll_eager(input_, shift_, axis_; name=nothing) desc = tf.EagerOp("Roll") - input_ = convert(tf.TensorHandle, input_) - shift_ = convert(tf.TensorHandle, shift_) - axis_ = convert(tf.TensorHandle, axis_) + input_ = convert(tf.EagerTensor, input_) + shift_ = convert(tf.EagerTensor, shift_) + axis_ = convert(tf.EagerTensor, axis_) tf.add_input(desc, input_) tf.add_input(desc, shift_) tf.add_input(desc, axis_) @@ -54612,8 +54612,8 @@ begin end function xdivy_eager(x_, y_; name=nothing) desc = tf.EagerOp("Xdivy") - x_ = convert(tf.TensorHandle, x_) - y_ = convert(tf.TensorHandle, y_) + x_ = convert(tf.EagerTensor, x_) + y_ = convert(tf.EagerTensor, y_) tf.add_input(desc, x_) tf.add_input(desc, y_) desc["T"] = tf.data_type(x_) @@ -54669,9 +54669,9 @@ begin end function max_pool3d_grad_grad_eager(orig_input_, orig_output_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) desc = tf.EagerOp("MaxPool3DGradGrad") - orig_input_ = convert(tf.TensorHandle, orig_input_) - orig_output_ = convert(tf.TensorHandle, orig_output_) - grad_ = convert(tf.TensorHandle, grad_) + orig_input_ = convert(tf.EagerTensor, orig_input_) + orig_output_ = convert(tf.EagerTensor, orig_output_) + grad_ = convert(tf.EagerTensor, grad_) tf.add_input(desc, orig_input_) tf.add_input(desc, orig_output_) tf.add_input(desc, grad_) @@ -54737,10 +54737,10 @@ begin end function crop_and_resize_eager(image_, boxes_, box_ind_, crop_size_; name=nothing, method=nothing, extrapolation_value=nothing) desc = tf.EagerOp("CropAndResize") - image_ = convert(tf.TensorHandle, image_) - boxes_ = convert(tf.TensorHandle, boxes_) - box_ind_ = convert(tf.TensorHandle, box_ind_) - crop_size_ = convert(tf.TensorHandle, crop_size_) + image_ = convert(tf.EagerTensor, image_) + boxes_ = convert(tf.EagerTensor, boxes_) + box_ind_ = convert(tf.EagerTensor, box_ind_) + crop_size_ = convert(tf.EagerTensor, crop_size_) tf.add_input(desc, image_) tf.add_input(desc, boxes_) tf.add_input(desc, box_ind_) @@ -54806,12 +54806,12 @@ begin end function quantized_bias_add_eager(input_, bias_, min_input_, max_input_, min_bias_, max_bias_; name=nothing, out_type=nothing) desc = tf.EagerOp("QuantizedBiasAdd") - input_ = convert(tf.TensorHandle, input_) - bias_ = convert(tf.TensorHandle, bias_) - min_input_ = convert(tf.TensorHandle, min_input_) - max_input_ = convert(tf.TensorHandle, max_input_) - min_bias_ = convert(tf.TensorHandle, min_bias_) - max_bias_ = convert(tf.TensorHandle, max_bias_) + input_ = convert(tf.EagerTensor, input_) + bias_ = convert(tf.EagerTensor, bias_) + min_input_ = convert(tf.EagerTensor, min_input_) + max_input_ = convert(tf.EagerTensor, max_input_) + min_bias_ = convert(tf.EagerTensor, min_bias_) + max_bias_ = convert(tf.EagerTensor, max_bias_) tf.add_input(desc, input_) tf.add_input(desc, bias_) tf.add_input(desc, min_input_) @@ -54859,8 +54859,8 @@ begin end function kmc2chain_initialization_eager(distances_, seed_; name=nothing) desc = tf.EagerOp("KMC2ChainInitialization") - distances_ = convert(tf.TensorHandle, distances_) - seed_ = convert(tf.TensorHandle, seed_) + distances_ = convert(tf.EagerTensor, distances_) + seed_ = convert(tf.EagerTensor, seed_) tf.add_input(desc, distances_) tf.add_input(desc, seed_) res = tf.execute(desc) @@ -54917,7 +54917,7 @@ begin end function map_unstage_no_key_eager(indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) desc = tf.EagerOp("MapUnstageNoKey") - indices_ = convert(tf.TensorHandle, indices_) + indices_ = convert(tf.EagerTensor, indices_) tf.add_input(desc, indices_) if capacity !== nothing desc["capacity"] = Base.Int(capacity) @@ -54978,9 +54978,9 @@ begin end function scatter_nd_sub_eager(ref_, indices_, updates_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ScatterNdSub") - ref_ = convert(tf.TensorHandle, ref_) - indices_ = convert(tf.TensorHandle, indices_) - updates_ = convert(tf.TensorHandle, updates_) + ref_ = convert(tf.EagerTensor, ref_) + indices_ = convert(tf.EagerTensor, indices_) + updates_ = convert(tf.EagerTensor, updates_) tf.add_input(desc, ref_) tf.add_input(desc, indices_) tf.add_input(desc, updates_) @@ -55030,8 +55030,8 @@ begin end function resize_bilinear_eager(images_, size_; name=nothing, align_corners=nothing) desc = tf.EagerOp("ResizeBilinear") - images_ = convert(tf.TensorHandle, images_) - size_ = convert(tf.TensorHandle, size_) + images_ = convert(tf.EagerTensor, images_) + size_ = convert(tf.EagerTensor, size_) tf.add_input(desc, images_) tf.add_input(desc, size_) if align_corners !== nothing @@ -55089,8 +55089,8 @@ begin end function ordered_map_peek_eager(key_, indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) desc = tf.EagerOp("OrderedMapPeek") - key_ = convert(tf.TensorHandle, key_) - indices_ = convert(tf.TensorHandle, indices_) + key_ = convert(tf.EagerTensor, key_) + indices_ = convert(tf.EagerTensor, indices_) tf.add_input(desc, key_) tf.add_input(desc, indices_) if capacity !== nothing @@ -55157,7 +55157,7 @@ begin end function tensor_array_eager(size_; name=nothing, dtype=nothing, dynamic_size=nothing, clear_after_read=nothing, tensor_array_name=nothing, element_shape=nothing) desc = tf.EagerOp("TensorArray") - size_ = convert(tf.TensorHandle, size_) + size_ = convert(tf.EagerTensor, size_) tf.add_input(desc, size_) if dtype !== nothing desc["dtype"] = Base.identity(dtype) @@ -55213,9 +55213,9 @@ begin end function inplace_sub_eager(x_, i_, v_; name=nothing) desc = tf.EagerOp("InplaceSub") - x_ = convert(tf.TensorHandle, x_) - i_ = convert(tf.TensorHandle, i_) - v_ = convert(tf.TensorHandle, v_) + x_ = convert(tf.EagerTensor, x_) + i_ = convert(tf.EagerTensor, i_) + v_ = convert(tf.EagerTensor, v_) tf.add_input(desc, x_) tf.add_input(desc, i_) tf.add_input(desc, v_) @@ -55258,8 +55258,8 @@ begin end function pow_eager(x_, y_; name=nothing) desc = tf.EagerOp("Pow") - x_ = convert(tf.TensorHandle, x_) - y_ = convert(tf.TensorHandle, y_) + x_ = convert(tf.EagerTensor, x_) + y_ = convert(tf.EagerTensor, y_) tf.add_input(desc, x_) tf.add_input(desc, y_) desc["T"] = tf.data_type(x_) @@ -55307,8 +55307,8 @@ begin end function stateful_standard_normal_eager(resource_, shape_; name=nothing, dtype=nothing, shape_dtype=nothing) desc = tf.EagerOp("StatefulStandardNormal") - resource_ = convert(tf.TensorHandle, resource_) - shape_ = convert(tf.TensorHandle, shape_) + resource_ = convert(tf.EagerTensor, resource_) + shape_ = convert(tf.EagerTensor, shape_) tf.add_input(desc, resource_) tf.add_input(desc, shape_) if dtype !== nothing @@ -55353,7 +55353,7 @@ begin end function ref_next_iteration_eager(data_; name=nothing) desc = tf.EagerOp("RefNextIteration") - data_ = convert(tf.TensorHandle, data_) + data_ = convert(tf.EagerTensor, data_) tf.add_input(desc, data_) desc["T"] = tf.data_type(data_) res = tf.execute(desc) @@ -55393,8 +55393,8 @@ begin end function scalar_summary_eager(tags_, values_; name=nothing) desc = tf.EagerOp("ScalarSummary") - tags_ = convert(tf.TensorHandle, tags_) - values_ = convert(tf.TensorHandle, values_) + tags_ = convert(tf.EagerTensor, tags_) + values_ = convert(tf.EagerTensor, values_) tf.add_input(desc, tags_) tf.add_input(desc, values_) desc["T"] = tf.data_type(values_) @@ -55442,8 +55442,8 @@ begin end function string_split_v2_eager(input_, sep_; name=nothing, maxsplit=nothing) desc = tf.EagerOp("StringSplitV2") - input_ = convert(tf.TensorHandle, input_) - sep_ = convert(tf.TensorHandle, sep_) + input_ = convert(tf.EagerTensor, input_) + sep_ = convert(tf.EagerTensor, sep_) tf.add_input(desc, input_) tf.add_input(desc, sep_) if maxsplit !== nothing @@ -55484,7 +55484,7 @@ begin end function bessel_i0e_eager(x_; name=nothing) desc = tf.EagerOp("BesselI0e") - x_ = convert(tf.TensorHandle, x_) + x_ = convert(tf.EagerTensor, x_) tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) res = tf.execute(desc) @@ -55530,7 +55530,7 @@ begin end function unique_eager(x_; name=nothing, out_idx=nothing) desc = tf.EagerOp("Unique") - x_ = convert(tf.TensorHandle, x_) + x_ = convert(tf.EagerTensor, x_) tf.add_input(desc, x_) if out_idx !== nothing desc["out_idx"] = Base.identity(out_idx) @@ -55586,9 +55586,9 @@ begin end function load_tpu_embedding_rms_prop_parameters_eager(parameters_, ms_, mom_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) desc = tf.EagerOp("LoadTPUEmbeddingRMSPropParameters") - parameters_ = convert(tf.TensorHandle, parameters_) - ms_ = convert(tf.TensorHandle, ms_) - mom_ = convert(tf.TensorHandle, mom_) + parameters_ = convert(tf.EagerTensor, parameters_) + ms_ = convert(tf.EagerTensor, ms_) + mom_ = convert(tf.EagerTensor, mom_) tf.add_input(desc, parameters_) tf.add_input(desc, ms_) tf.add_input(desc, mom_) @@ -55691,7 +55691,7 @@ begin end function eager_py_func_eager(input_; name=nothing, token=nothing, Tin=nothing, Tout=nothing) desc = tf.EagerOp("EagerPyFunc") - input_ = convert(tf.TensorHandle, input_) + input_ = convert(tf.EagerTensor, input_) tf.add_input(desc, input_) if token !== nothing desc["token"] = Base.String(token) @@ -55737,7 +55737,7 @@ begin end function next_iteration_eager(data_; name=nothing) desc = tf.EagerOp("NextIteration") - data_ = convert(tf.TensorHandle, data_) + data_ = convert(tf.EagerTensor, data_) tf.add_input(desc, data_) desc["T"] = tf.data_type(data_) res = tf.execute(desc) @@ -55788,8 +55788,8 @@ begin end function case_eager(branch_index_, input_; name=nothing, Tin=nothing, Tout=nothing, branches=nothing, output_shapes=nothing) desc = tf.EagerOp("Case") - branch_index_ = convert(tf.TensorHandle, branch_index_) - input_ = convert(tf.TensorHandle, input_) + branch_index_ = convert(tf.EagerTensor, branch_index_) + input_ = convert(tf.EagerTensor, input_) tf.add_input(desc, branch_index_) tf.add_input(desc, input_) if Tin !== nothing @@ -55845,9 +55845,9 @@ begin end function tensor_scatter_sub_eager(tensor_, indices_, updates_; name=nothing) desc = tf.EagerOp("TensorScatterSub") - tensor_ = convert(tf.TensorHandle, tensor_) - indices_ = convert(tf.TensorHandle, indices_) - updates_ = convert(tf.TensorHandle, updates_) + tensor_ = convert(tf.EagerTensor, tensor_) + indices_ = convert(tf.EagerTensor, indices_) + updates_ = convert(tf.EagerTensor, updates_) tf.add_input(desc, tensor_) tf.add_input(desc, indices_) tf.add_input(desc, updates_) @@ -55898,9 +55898,9 @@ begin end function scatter_max_eager(ref_, indices_, updates_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ScatterMax") - ref_ = convert(tf.TensorHandle, ref_) - indices_ = convert(tf.TensorHandle, indices_) - updates_ = convert(tf.TensorHandle, updates_) + ref_ = convert(tf.EagerTensor, ref_) + indices_ = convert(tf.EagerTensor, indices_) + updates_ = convert(tf.EagerTensor, updates_) tf.add_input(desc, ref_) tf.add_input(desc, indices_) tf.add_input(desc, updates_) @@ -55945,7 +55945,7 @@ begin end function sqrt_eager(x_; name=nothing) desc = tf.EagerOp("Sqrt") - x_ = convert(tf.TensorHandle, x_) + x_ = convert(tf.EagerTensor, x_) tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) res = tf.execute(desc) @@ -55987,8 +55987,8 @@ begin end function accumulator_take_gradient_eager(handle_, num_required_; name=nothing, dtype=nothing) desc = tf.EagerOp("AccumulatorTakeGradient") - handle_ = convert(tf.TensorHandle, handle_) - num_required_ = convert(tf.TensorHandle, num_required_) + handle_ = convert(tf.EagerTensor, handle_) + num_required_ = convert(tf.EagerTensor, num_required_) tf.add_input(desc, handle_) tf.add_input(desc, num_required_) if dtype !== nothing @@ -56040,10 +56040,10 @@ begin end function _mkl_add_eager(x_, y_, mkl_x_, mkl_y_; name=nothing) desc = tf.EagerOp("_MklAdd") - x_ = convert(tf.TensorHandle, x_) - y_ = convert(tf.TensorHandle, y_) - mkl_x_ = convert(tf.TensorHandle, mkl_x_) - mkl_y_ = convert(tf.TensorHandle, mkl_y_) + x_ = convert(tf.EagerTensor, x_) + y_ = convert(tf.EagerTensor, y_) + mkl_x_ = convert(tf.EagerTensor, mkl_x_) + mkl_y_ = convert(tf.EagerTensor, mkl_y_) tf.add_input(desc, x_) tf.add_input(desc, y_) tf.add_input(desc, mkl_x_) @@ -56085,7 +56085,7 @@ begin end function reciprocal_eager(x_; name=nothing) desc = tf.EagerOp("Reciprocal") - x_ = convert(tf.TensorHandle, x_) + x_ = convert(tf.EagerTensor, x_) tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) res = tf.execute(desc) @@ -56125,7 +56125,7 @@ begin end function outfeed_enqueue_tuple_eager(inputs_; name=nothing, dtypes=nothing) desc = tf.EagerOp("OutfeedEnqueueTuple") - inputs_ = convert(tf.TensorHandle, inputs_) + inputs_ = convert(tf.EagerTensor, inputs_) tf.add_input(desc, inputs_) if dtypes !== nothing desc["dtypes"] = map(Base.identity, dtypes) @@ -56164,7 +56164,7 @@ begin end function string_strip_eager(input_; name=nothing) desc = tf.EagerOp("StringStrip") - input_ = convert(tf.TensorHandle, input_) + input_ = convert(tf.EagerTensor, input_) tf.add_input(desc, input_) res = tf.execute(desc) node = tf.TapeNode(string_strip, [input_], name=nothing, res) @@ -56210,9 +56210,9 @@ begin end function fake_quant_with_min_max_vars_per_channel_eager(inputs_, min_, max_; name=nothing, num_bits=nothing, narrow_range=nothing) desc = tf.EagerOp("FakeQuantWithMinMaxVarsPerChannel") - inputs_ = convert(tf.TensorHandle, inputs_) - min_ = convert(tf.TensorHandle, min_) - max_ = convert(tf.TensorHandle, max_) + inputs_ = convert(tf.EagerTensor, inputs_) + min_ = convert(tf.EagerTensor, min_) + max_ = convert(tf.EagerTensor, max_) tf.add_input(desc, inputs_) tf.add_input(desc, min_) tf.add_input(desc, max_) @@ -56256,7 +56256,7 @@ begin end function barrier_ready_size_eager(handle_; name=nothing) desc = tf.EagerOp("BarrierReadySize") - handle_ = convert(tf.TensorHandle, handle_) + handle_ = convert(tf.EagerTensor, handle_) tf.add_input(desc, handle_) res = tf.execute(desc) node = tf.TapeNode(barrier_ready_size, [handle_], name=nothing, res) @@ -56295,7 +56295,7 @@ begin end function string_to_hash_bucket_eager(string_tensor_; name=nothing, num_buckets=nothing) desc = tf.EagerOp("StringToHashBucket") - string_tensor_ = convert(tf.TensorHandle, string_tensor_) + string_tensor_ = convert(tf.EagerTensor, string_tensor_) tf.add_input(desc, string_tensor_) if num_buckets !== nothing desc["num_buckets"] = Base.Int(num_buckets) @@ -56347,8 +56347,8 @@ begin end function tensor_array_concat_eager(handle_, flow_in_; name=nothing, dtype=nothing, element_shape_except0=nothing) desc = tf.EagerOp("TensorArrayConcat") - handle_ = convert(tf.TensorHandle, handle_) - flow_in_ = convert(tf.TensorHandle, flow_in_) + handle_ = convert(tf.EagerTensor, handle_) + flow_in_ = convert(tf.EagerTensor, flow_in_) tf.add_input(desc, handle_) tf.add_input(desc, flow_in_) if dtype !== nothing @@ -56395,9 +56395,9 @@ begin end function sharded_filename_eager(basename_, shard_, num_shards_; name=nothing) desc = tf.EagerOp("ShardedFilename") - basename_ = convert(tf.TensorHandle, basename_) - shard_ = convert(tf.TensorHandle, shard_) - num_shards_ = convert(tf.TensorHandle, num_shards_) + basename_ = convert(tf.EagerTensor, basename_) + shard_ = convert(tf.EagerTensor, shard_) + num_shards_ = convert(tf.EagerTensor, num_shards_) tf.add_input(desc, basename_) tf.add_input(desc, shard_) tf.add_input(desc, num_shards_) @@ -56444,7 +56444,7 @@ begin end function py_func_eager(input_; name=nothing, token=nothing, Tin=nothing, Tout=nothing) desc = tf.EagerOp("PyFunc") - input_ = convert(tf.TensorHandle, input_) + input_ = convert(tf.EagerTensor, input_) tf.add_input(desc, input_) if token !== nothing desc["token"] = Base.String(token) @@ -56497,9 +56497,9 @@ begin end function unsorted_segment_prod_eager(data_, segment_ids_, num_segments_; name=nothing) desc = tf.EagerOp("UnsortedSegmentProd") - data_ = convert(tf.TensorHandle, data_) - segment_ids_ = convert(tf.TensorHandle, segment_ids_) - num_segments_ = convert(tf.TensorHandle, num_segments_) + data_ = convert(tf.EagerTensor, data_) + segment_ids_ = convert(tf.EagerTensor, segment_ids_) + num_segments_ = convert(tf.EagerTensor, num_segments_) tf.add_input(desc, data_) tf.add_input(desc, segment_ids_) tf.add_input(desc, num_segments_) @@ -56544,7 +56544,7 @@ begin end function count_up_to_eager(ref_; name=nothing, limit=nothing) desc = tf.EagerOp("CountUpTo") - ref_ = convert(tf.TensorHandle, ref_) + ref_ = convert(tf.EagerTensor, ref_) tf.add_input(desc, ref_) if limit !== nothing desc["limit"] = Base.Int(limit) @@ -56597,8 +56597,8 @@ begin end function random_gamma_eager(shape_, alpha_; name=nothing, seed=nothing, seed2=nothing, S=nothing) desc = tf.EagerOp("RandomGamma") - shape_ = convert(tf.TensorHandle, shape_) - alpha_ = convert(tf.TensorHandle, alpha_) + shape_ = convert(tf.EagerTensor, shape_) + alpha_ = convert(tf.EagerTensor, alpha_) tf.add_input(desc, shape_) tf.add_input(desc, alpha_) if seed !== nothing @@ -56651,8 +56651,8 @@ begin end function tensor_array_grad_eager(handle_, flow_in_; name=nothing, source=nothing) desc = tf.EagerOp("TensorArrayGrad") - handle_ = convert(tf.TensorHandle, handle_) - flow_in_ = convert(tf.TensorHandle, flow_in_) + handle_ = convert(tf.EagerTensor, handle_) + flow_in_ = convert(tf.EagerTensor, flow_in_) tf.add_input(desc, handle_) tf.add_input(desc, flow_in_) if source !== nothing @@ -56704,8 +56704,8 @@ begin end function dilation2d_eager(input_, filter_; name=nothing, strides=nothing, rates=nothing, padding=nothing) desc = tf.EagerOp("Dilation2D") - input_ = convert(tf.TensorHandle, input_) - filter_ = convert(tf.TensorHandle, filter_) + input_ = convert(tf.EagerTensor, input_) + filter_ = convert(tf.EagerTensor, filter_) tf.add_input(desc, input_) tf.add_input(desc, filter_) if strides !== nothing @@ -56767,9 +56767,9 @@ begin end function unbatch_eager(batched_tensor_, batch_index_, id_; name=nothing, timeout_micros=nothing, container=nothing, shared_name=nothing) desc = tf.EagerOp("Unbatch") - batched_tensor_ = convert(tf.TensorHandle, batched_tensor_) - batch_index_ = convert(tf.TensorHandle, batch_index_) - id_ = convert(tf.TensorHandle, id_) + batched_tensor_ = convert(tf.EagerTensor, batched_tensor_) + batch_index_ = convert(tf.EagerTensor, batch_index_) + id_ = convert(tf.EagerTensor, id_) tf.add_input(desc, batched_tensor_) tf.add_input(desc, batch_index_) tf.add_input(desc, id_) @@ -56818,7 +56818,7 @@ begin end function get_session_handle_eager(value_; name=nothing) desc = tf.EagerOp("GetSessionHandle") - value_ = convert(tf.TensorHandle, value_) + value_ = convert(tf.EagerTensor, value_) tf.add_input(desc, value_) desc["T"] = tf.data_type(value_) res = tf.execute(desc) @@ -57006,15 +57006,15 @@ begin end function sparse_apply_ftrl_eager(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, lr_power_; name=nothing, use_locking=nothing) desc = tf.EagerOp("SparseApplyFtrl") - var_ = convert(tf.TensorHandle, var_) - accum_ = convert(tf.TensorHandle, accum_) - linear_ = convert(tf.TensorHandle, linear_) - grad_ = convert(tf.TensorHandle, grad_) - indices_ = convert(tf.TensorHandle, indices_) - lr_ = convert(tf.TensorHandle, lr_) - l1_ = convert(tf.TensorHandle, l1_) - l2_ = convert(tf.TensorHandle, l2_) - lr_power_ = convert(tf.TensorHandle, lr_power_) + var_ = convert(tf.EagerTensor, var_) + accum_ = convert(tf.EagerTensor, accum_) + linear_ = convert(tf.EagerTensor, linear_) + grad_ = convert(tf.EagerTensor, grad_) + indices_ = convert(tf.EagerTensor, indices_) + lr_ = convert(tf.EagerTensor, lr_) + l1_ = convert(tf.EagerTensor, l1_) + l2_ = convert(tf.EagerTensor, l2_) + lr_power_ = convert(tf.EagerTensor, lr_power_) tf.add_input(desc, var_) tf.add_input(desc, accum_) tf.add_input(desc, linear_) @@ -57080,9 +57080,9 @@ begin end function batch_dataset_v2_eager(input_dataset_, batch_size_, drop_remainder_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("BatchDatasetV2") - input_dataset_ = convert(tf.TensorHandle, input_dataset_) - batch_size_ = convert(tf.TensorHandle, batch_size_) - drop_remainder_ = convert(tf.TensorHandle, drop_remainder_) + input_dataset_ = convert(tf.EagerTensor, input_dataset_) + batch_size_ = convert(tf.EagerTensor, batch_size_) + drop_remainder_ = convert(tf.EagerTensor, drop_remainder_) tf.add_input(desc, input_dataset_) tf.add_input(desc, batch_size_) tf.add_input(desc, drop_remainder_) @@ -57142,12 +57142,12 @@ begin end function sparse_sparse_minimum_eager(a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_; name=nothing) desc = tf.EagerOp("SparseSparseMinimum") - a_indices_ = convert(tf.TensorHandle, a_indices_) - a_values_ = convert(tf.TensorHandle, a_values_) - a_shape_ = convert(tf.TensorHandle, a_shape_) - b_indices_ = convert(tf.TensorHandle, b_indices_) - b_values_ = convert(tf.TensorHandle, b_values_) - b_shape_ = convert(tf.TensorHandle, b_shape_) + a_indices_ = convert(tf.EagerTensor, a_indices_) + a_values_ = convert(tf.EagerTensor, a_values_) + a_shape_ = convert(tf.EagerTensor, a_shape_) + b_indices_ = convert(tf.EagerTensor, b_indices_) + b_values_ = convert(tf.EagerTensor, b_values_) + b_shape_ = convert(tf.EagerTensor, b_shape_) tf.add_input(desc, a_indices_) tf.add_input(desc, a_values_) tf.add_input(desc, a_shape_) @@ -57195,8 +57195,8 @@ begin end function reverse_v2_eager(tensor_, axis_; name=nothing) desc = tf.EagerOp("ReverseV2") - tensor_ = convert(tf.TensorHandle, tensor_) - axis_ = convert(tf.TensorHandle, axis_) + tensor_ = convert(tf.EagerTensor, tensor_) + axis_ = convert(tf.EagerTensor, axis_) tf.add_input(desc, tensor_) tf.add_input(desc, axis_) desc["T"] = tf.data_type(tensor_) @@ -57279,10 +57279,10 @@ begin end function strided_slice_eager(input_, begin_, end_, strides_; name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing) desc = tf.EagerOp("StridedSlice") - input_ = convert(tf.TensorHandle, input_) - begin_ = convert(tf.TensorHandle, begin_) - end_ = convert(tf.TensorHandle, end_) - strides_ = convert(tf.TensorHandle, strides_) + input_ = convert(tf.EagerTensor, input_) + begin_ = convert(tf.EagerTensor, begin_) + end_ = convert(tf.EagerTensor, end_) + strides_ = convert(tf.EagerTensor, strides_) tf.add_input(desc, input_) tf.add_input(desc, begin_) tf.add_input(desc, end_) @@ -57358,7 +57358,7 @@ begin end function matching_files_eager(pattern_; name=nothing) desc = tf.EagerOp("MatchingFiles") - pattern_ = convert(tf.TensorHandle, pattern_) + pattern_ = convert(tf.EagerTensor, pattern_) tf.add_input(desc, pattern_) res = tf.execute(desc) node = tf.TapeNode(matching_files, [pattern_], name=nothing, res) @@ -57397,7 +57397,7 @@ begin end function encode_base64_eager(input_; name=nothing, pad=nothing) desc = tf.EagerOp("EncodeBase64") - input_ = convert(tf.TensorHandle, input_) + input_ = convert(tf.EagerTensor, input_) tf.add_input(desc, input_) if pad !== nothing desc["pad"] = Base.Bool(pad) @@ -57442,7 +57442,7 @@ begin end function iterator_get_next_as_optional_eager(iterator_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("IteratorGetNextAsOptional") - iterator_ = convert(tf.TensorHandle, iterator_) + iterator_ = convert(tf.EagerTensor, iterator_) tf.add_input(desc, iterator_) if output_types !== nothing desc["output_types"] = map(Base.identity, output_types) @@ -57546,7 +57546,7 @@ begin end function iterator_to_string_handle_eager(resource_handle_; name=nothing) desc = tf.EagerOp("IteratorToStringHandle") - resource_handle_ = convert(tf.TensorHandle, resource_handle_) + resource_handle_ = convert(tf.EagerTensor, resource_handle_) tf.add_input(desc, resource_handle_) res = tf.execute(desc) node = tf.TapeNode(iterator_to_string_handle, [resource_handle_], name=nothing, res) @@ -57597,9 +57597,9 @@ begin end function max_pool_grad_grad_with_argmax_eager(input_, grad_, argmax_; name=nothing, ksize=nothing, strides=nothing, padding=nothing) desc = tf.EagerOp("MaxPoolGradGradWithArgmax") - input_ = convert(tf.TensorHandle, input_) - grad_ = convert(tf.TensorHandle, grad_) - argmax_ = convert(tf.TensorHandle, argmax_) + input_ = convert(tf.EagerTensor, input_) + grad_ = convert(tf.EagerTensor, grad_) + argmax_ = convert(tf.EagerTensor, argmax_) tf.add_input(desc, input_) tf.add_input(desc, grad_) tf.add_input(desc, argmax_) @@ -57656,9 +57656,9 @@ begin end function tensor_list_gather_eager(input_handle_, indices_, element_shape_; name=nothing, element_dtype=nothing) desc = tf.EagerOp("TensorListGather") - input_handle_ = convert(tf.TensorHandle, input_handle_) - indices_ = convert(tf.TensorHandle, indices_) - element_shape_ = convert(tf.TensorHandle, element_shape_) + input_handle_ = convert(tf.EagerTensor, input_handle_) + indices_ = convert(tf.EagerTensor, indices_) + element_shape_ = convert(tf.EagerTensor, element_shape_) tf.add_input(desc, input_handle_) tf.add_input(desc, indices_) tf.add_input(desc, element_shape_) @@ -57711,8 +57711,8 @@ begin end function multinomial_eager(logits_, num_samples_; name=nothing, seed=nothing, seed2=nothing, output_dtype=nothing) desc = tf.EagerOp("Multinomial") - logits_ = convert(tf.TensorHandle, logits_) - num_samples_ = convert(tf.TensorHandle, num_samples_) + logits_ = convert(tf.EagerTensor, logits_) + num_samples_ = convert(tf.EagerTensor, num_samples_) tf.add_input(desc, logits_) tf.add_input(desc, num_samples_) if seed !== nothing @@ -57766,9 +57766,9 @@ begin end function tensor_array_read_eager(handle_, index_, flow_in_; name=nothing, dtype=nothing) desc = tf.EagerOp("TensorArrayRead") - handle_ = convert(tf.TensorHandle, handle_) - index_ = convert(tf.TensorHandle, index_) - flow_in_ = convert(tf.TensorHandle, flow_in_) + handle_ = convert(tf.EagerTensor, handle_) + index_ = convert(tf.EagerTensor, index_) + flow_in_ = convert(tf.EagerTensor, flow_in_) tf.add_input(desc, handle_) tf.add_input(desc, index_) tf.add_input(desc, flow_in_) @@ -57817,8 +57817,8 @@ begin end function experimental_indexed_dataset_get_eager(materialized_, index_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("ExperimentalIndexedDatasetGet") - materialized_ = convert(tf.TensorHandle, materialized_) - index_ = convert(tf.TensorHandle, index_) + materialized_ = convert(tf.EagerTensor, materialized_) + index_ = convert(tf.EagerTensor, index_) tf.add_input(desc, materialized_) tf.add_input(desc, index_) if output_types !== nothing @@ -57872,8 +57872,8 @@ begin end function tpu_partitioned_call_eager(args_, device_ordinal_; name=nothing, Tin=nothing, Tout=nothing, f=nothing) desc = tf.EagerOp("TPUPartitionedCall") - args_ = convert(tf.TensorHandle, args_) - device_ordinal_ = convert(tf.TensorHandle, device_ordinal_) + args_ = convert(tf.EagerTensor, args_) + device_ordinal_ = convert(tf.EagerTensor, device_ordinal_) tf.add_input(desc, args_) tf.add_input(desc, device_ordinal_) if Tin !== nothing @@ -57952,14 +57952,14 @@ begin end function quantized_conv2d_and_relu_and_requantize_eager(input_, filter_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) desc = tf.EagerOp("QuantizedConv2DAndReluAndRequantize") - input_ = convert(tf.TensorHandle, input_) - filter_ = convert(tf.TensorHandle, filter_) - min_input_ = convert(tf.TensorHandle, min_input_) - max_input_ = convert(tf.TensorHandle, max_input_) - min_filter_ = convert(tf.TensorHandle, min_filter_) - max_filter_ = convert(tf.TensorHandle, max_filter_) - min_freezed_output_ = convert(tf.TensorHandle, min_freezed_output_) - max_freezed_output_ = convert(tf.TensorHandle, max_freezed_output_) + input_ = convert(tf.EagerTensor, input_) + filter_ = convert(tf.EagerTensor, filter_) + min_input_ = convert(tf.EagerTensor, min_input_) + max_input_ = convert(tf.EagerTensor, max_input_) + min_filter_ = convert(tf.EagerTensor, min_filter_) + max_filter_ = convert(tf.EagerTensor, max_filter_) + min_freezed_output_ = convert(tf.EagerTensor, min_freezed_output_) + max_freezed_output_ = convert(tf.EagerTensor, max_freezed_output_) tf.add_input(desc, input_) tf.add_input(desc, filter_) tf.add_input(desc, min_input_) @@ -58022,7 +58022,7 @@ begin end function iterator_from_string_handle_v2_eager(string_handle_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("IteratorFromStringHandleV2") - string_handle_ = convert(tf.TensorHandle, string_handle_) + string_handle_ = convert(tf.EagerTensor, string_handle_) tf.add_input(desc, string_handle_) if output_types !== nothing desc["output_types"] = map(Base.identity, output_types) @@ -58067,8 +58067,8 @@ begin end function bitwise_or_eager(x_, y_; name=nothing) desc = tf.EagerOp("BitwiseOr") - x_ = convert(tf.TensorHandle, x_) - y_ = convert(tf.TensorHandle, y_) + x_ = convert(tf.EagerTensor, x_) + y_ = convert(tf.EagerTensor, y_) tf.add_input(desc, x_) tf.add_input(desc, y_) desc["T"] = tf.data_type(x_) @@ -58115,9 +58115,9 @@ begin end function unsorted_segment_max_eager(data_, segment_ids_, num_segments_; name=nothing) desc = tf.EagerOp("UnsortedSegmentMax") - data_ = convert(tf.TensorHandle, data_) - segment_ids_ = convert(tf.TensorHandle, segment_ids_) - num_segments_ = convert(tf.TensorHandle, num_segments_) + data_ = convert(tf.EagerTensor, data_) + segment_ids_ = convert(tf.EagerTensor, segment_ids_) + num_segments_ = convert(tf.EagerTensor, num_segments_) tf.add_input(desc, data_) tf.add_input(desc, segment_ids_) tf.add_input(desc, num_segments_) @@ -58170,10 +58170,10 @@ begin end function _mkl_squared_difference_eager(x_, y_, mkl_x_, mkl_y_; name=nothing) desc = tf.EagerOp("_MklSquaredDifference") - x_ = convert(tf.TensorHandle, x_) - y_ = convert(tf.TensorHandle, y_) - mkl_x_ = convert(tf.TensorHandle, mkl_x_) - mkl_y_ = convert(tf.TensorHandle, mkl_y_) + x_ = convert(tf.EagerTensor, x_) + y_ = convert(tf.EagerTensor, y_) + mkl_x_ = convert(tf.EagerTensor, mkl_x_) + mkl_y_ = convert(tf.EagerTensor, mkl_y_) tf.add_input(desc, x_) tf.add_input(desc, y_) tf.add_input(desc, mkl_x_) @@ -58228,9 +58228,9 @@ begin end function conv3d_backprop_filter_eager(input_, filter_, out_backprop_; name=nothing, strides=nothing, padding=nothing, dilations=nothing) desc = tf.EagerOp("Conv3DBackpropFilter") - input_ = convert(tf.TensorHandle, input_) - filter_ = convert(tf.TensorHandle, filter_) - out_backprop_ = convert(tf.TensorHandle, out_backprop_) + input_ = convert(tf.EagerTensor, input_) + filter_ = convert(tf.EagerTensor, filter_) + out_backprop_ = convert(tf.EagerTensor, out_backprop_) tf.add_input(desc, input_) tf.add_input(desc, filter_) tf.add_input(desc, out_backprop_) @@ -58298,8 +58298,8 @@ begin end function if__eager(cond_, input_; name=nothing, Tin=nothing, Tout=nothing, then_branch=nothing, else_branch=nothing, output_shapes=nothing) desc = tf.EagerOp("If") - cond_ = convert(tf.TensorHandle, cond_) - input_ = convert(tf.TensorHandle, input_) + cond_ = convert(tf.EagerTensor, cond_) + input_ = convert(tf.EagerTensor, input_) tf.add_input(desc, cond_) tf.add_input(desc, input_) if Tin !== nothing @@ -58366,8 +58366,8 @@ begin end function flat_map_dataset_eager(input_dataset_, other_arguments_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("FlatMapDataset") - input_dataset_ = convert(tf.TensorHandle, input_dataset_) - other_arguments_ = convert(tf.TensorHandle, other_arguments_) + input_dataset_ = convert(tf.EagerTensor, input_dataset_) + other_arguments_ = convert(tf.EagerTensor, other_arguments_) tf.add_input(desc, input_dataset_) tf.add_input(desc, other_arguments_) if f !== nothing @@ -58428,9 +58428,9 @@ begin end function tensor_list_scatter_eager(tensor_, indices_, element_shape_; name=nothing, element_dtype=nothing, shape_type=nothing) desc = tf.EagerOp("TensorListScatter") - tensor_ = convert(tf.TensorHandle, tensor_) - indices_ = convert(tf.TensorHandle, indices_) - element_shape_ = convert(tf.TensorHandle, element_shape_) + tensor_ = convert(tf.EagerTensor, tensor_) + indices_ = convert(tf.EagerTensor, indices_) + element_shape_ = convert(tf.EagerTensor, element_shape_) tf.add_input(desc, tensor_) tf.add_input(desc, indices_) tf.add_input(desc, element_shape_) @@ -58479,8 +58479,8 @@ begin end function softsign_grad_eager(gradients_, features_; name=nothing) desc = tf.EagerOp("SoftsignGrad") - gradients_ = convert(tf.TensorHandle, gradients_) - features_ = convert(tf.TensorHandle, features_) + gradients_ = convert(tf.EagerTensor, gradients_) + features_ = convert(tf.EagerTensor, features_) tf.add_input(desc, gradients_) tf.add_input(desc, features_) desc["T"] = tf.data_type(gradients_) @@ -58526,7 +58526,7 @@ begin end function copy_host_eager(input_; name=nothing, tensor_name=nothing, debug_ops_spec=nothing) desc = tf.EagerOp("CopyHost") - input_ = convert(tf.TensorHandle, input_) + input_ = convert(tf.EagerTensor, input_) tf.add_input(desc, input_) if tensor_name !== nothing desc["tensor_name"] = Base.String(tensor_name) @@ -58576,9 +58576,9 @@ begin end function lin_space_eager(start_, stop_, num_; name=nothing) desc = tf.EagerOp("LinSpace") - start_ = convert(tf.TensorHandle, start_) - stop_ = convert(tf.TensorHandle, stop_) - num_ = convert(tf.TensorHandle, num_) + start_ = convert(tf.EagerTensor, start_) + stop_ = convert(tf.EagerTensor, stop_) + num_ = convert(tf.EagerTensor, num_) tf.add_input(desc, start_) tf.add_input(desc, stop_) tf.add_input(desc, num_) @@ -58625,8 +58625,8 @@ begin end function _parallel_concat_update_eager(value_, update_; name=nothing, loc=nothing) desc = tf.EagerOp("_ParallelConcatUpdate") - value_ = convert(tf.TensorHandle, value_) - update_ = convert(tf.TensorHandle, update_) + value_ = convert(tf.EagerTensor, value_) + update_ = convert(tf.EagerTensor, update_) tf.add_input(desc, value_) tf.add_input(desc, update_) if loc !== nothing @@ -58718,8 +58718,8 @@ begin end function stack_push_v2_eager(handle_, elem_; name=nothing, swap_memory=nothing) desc = tf.EagerOp("StackPushV2") - handle_ = convert(tf.TensorHandle, handle_) - elem_ = convert(tf.TensorHandle, elem_) + handle_ = convert(tf.EagerTensor, handle_) + elem_ = convert(tf.EagerTensor, elem_) tf.add_input(desc, handle_) tf.add_input(desc, elem_) if swap_memory !== nothing @@ -58766,8 +58766,8 @@ begin end function assign_variable_op_eager(resource_, value_; name=nothing, dtype=nothing) desc = tf.EagerOp("AssignVariableOp") - resource_ = convert(tf.TensorHandle, resource_) - value_ = convert(tf.TensorHandle, value_) + resource_ = convert(tf.EagerTensor, resource_) + value_ = convert(tf.EagerTensor, value_) tf.add_input(desc, resource_) tf.add_input(desc, value_) if dtype !== nothing @@ -58824,10 +58824,10 @@ begin end function sparse_split_eager(split_dim_, indices_, values_, shape_; name=nothing, num_split=nothing) desc = tf.EagerOp("SparseSplit") - split_dim_ = convert(tf.TensorHandle, split_dim_) - indices_ = convert(tf.TensorHandle, indices_) - values_ = convert(tf.TensorHandle, values_) - shape_ = convert(tf.TensorHandle, shape_) + split_dim_ = convert(tf.EagerTensor, split_dim_) + indices_ = convert(tf.EagerTensor, indices_) + values_ = convert(tf.EagerTensor, values_) + shape_ = convert(tf.EagerTensor, shape_) tf.add_input(desc, split_dim_) tf.add_input(desc, indices_) tf.add_input(desc, values_) @@ -58875,9 +58875,9 @@ begin end function tensor_array_unpack_eager(handle_, value_, flow_in_; name=nothing) desc = tf.EagerOp("TensorArrayUnpack") - handle_ = convert(tf.TensorHandle, handle_) - value_ = convert(tf.TensorHandle, value_) - flow_in_ = convert(tf.TensorHandle, flow_in_) + handle_ = convert(tf.EagerTensor, handle_) + value_ = convert(tf.EagerTensor, value_) + flow_in_ = convert(tf.EagerTensor, flow_in_) tf.add_input(desc, handle_) tf.add_input(desc, value_) tf.add_input(desc, flow_in_) @@ -58924,8 +58924,8 @@ begin end function tensor_list_stack_eager(input_handle_, element_shape_; name=nothing, element_dtype=nothing, num_elements=nothing) desc = tf.EagerOp("TensorListStack") - input_handle_ = convert(tf.TensorHandle, input_handle_) - element_shape_ = convert(tf.TensorHandle, element_shape_) + input_handle_ = convert(tf.EagerTensor, input_handle_) + element_shape_ = convert(tf.EagerTensor, element_shape_) tf.add_input(desc, input_handle_) tf.add_input(desc, element_shape_) if element_dtype !== nothing @@ -58968,7 +58968,7 @@ begin end function barrier_incomplete_size_eager(handle_; name=nothing) desc = tf.EagerOp("BarrierIncompleteSize") - handle_ = convert(tf.TensorHandle, handle_) + handle_ = convert(tf.EagerTensor, handle_) tf.add_input(desc, handle_) res = tf.execute(desc) node = tf.TapeNode(barrier_incomplete_size, [handle_], name=nothing, res) @@ -59012,8 +59012,8 @@ begin end function restore_eager(file_pattern_, tensor_name_; name=nothing, dt=nothing, preferred_shard=nothing) desc = tf.EagerOp("Restore") - file_pattern_ = convert(tf.TensorHandle, file_pattern_) - tensor_name_ = convert(tf.TensorHandle, tensor_name_) + file_pattern_ = convert(tf.EagerTensor, file_pattern_) + tensor_name_ = convert(tf.EagerTensor, tensor_name_) tf.add_input(desc, file_pattern_) tf.add_input(desc, tensor_name_) if dt !== nothing @@ -59079,7 +59079,7 @@ begin end function tensor_array_v3_eager(size_; name=nothing, dtype=nothing, element_shape=nothing, dynamic_size=nothing, clear_after_read=nothing, identical_element_shapes=nothing, tensor_array_name=nothing) desc = tf.EagerOp("TensorArrayV3") - size_ = convert(tf.TensorHandle, size_) + size_ = convert(tf.EagerTensor, size_) tf.add_input(desc, size_) if dtype !== nothing desc["dtype"] = Base.identity(dtype) @@ -59141,8 +59141,8 @@ begin end function experimental_assert_next_dataset_eager(input_dataset_, transformations_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("ExperimentalAssertNextDataset") - input_dataset_ = convert(tf.TensorHandle, input_dataset_) - transformations_ = convert(tf.TensorHandle, transformations_) + input_dataset_ = convert(tf.EagerTensor, input_dataset_) + transformations_ = convert(tf.EagerTensor, transformations_) tf.add_input(desc, input_dataset_) tf.add_input(desc, transformations_) if output_types !== nothing @@ -59191,8 +59191,8 @@ begin end function in_top_k_eager(predictions_, targets_; name=nothing, k=nothing) desc = tf.EagerOp("InTopK") - predictions_ = convert(tf.TensorHandle, predictions_) - targets_ = convert(tf.TensorHandle, targets_) + predictions_ = convert(tf.EagerTensor, predictions_) + targets_ = convert(tf.EagerTensor, targets_) tf.add_input(desc, predictions_) tf.add_input(desc, targets_) if k !== nothing @@ -59243,9 +59243,9 @@ begin end function scatter_sub_eager(ref_, indices_, updates_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ScatterSub") - ref_ = convert(tf.TensorHandle, ref_) - indices_ = convert(tf.TensorHandle, indices_) - updates_ = convert(tf.TensorHandle, updates_) + ref_ = convert(tf.EagerTensor, ref_) + indices_ = convert(tf.EagerTensor, indices_) + updates_ = convert(tf.EagerTensor, updates_) tf.add_input(desc, ref_) tf.add_input(desc, indices_) tf.add_input(desc, updates_) @@ -59290,7 +59290,7 @@ begin end function acosh_eager(x_; name=nothing) desc = tf.EagerOp("Acosh") - x_ = convert(tf.TensorHandle, x_) + x_ = convert(tf.EagerTensor, x_) tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) res = tf.execute(desc) @@ -59344,9 +59344,9 @@ begin end function depthwise_conv2d_native_backprop_filter_eager(input_, filter_sizes_, out_backprop_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) desc = tf.EagerOp("DepthwiseConv2dNativeBackpropFilter") - input_ = convert(tf.TensorHandle, input_) - filter_sizes_ = convert(tf.TensorHandle, filter_sizes_) - out_backprop_ = convert(tf.TensorHandle, out_backprop_) + input_ = convert(tf.EagerTensor, input_) + filter_sizes_ = convert(tf.EagerTensor, filter_sizes_) + out_backprop_ = convert(tf.EagerTensor, out_backprop_) tf.add_input(desc, input_) tf.add_input(desc, filter_sizes_) tf.add_input(desc, out_backprop_) @@ -59408,7 +59408,7 @@ begin end function cast_eager(x_; name=nothing, SrcT=nothing, DstT=nothing, Truncate=nothing) desc = tf.EagerOp("Cast") - x_ = convert(tf.TensorHandle, x_) + x_ = convert(tf.EagerTensor, x_) tf.add_input(desc, x_) if SrcT !== nothing desc["SrcT"] = Base.identity(SrcT) @@ -59469,9 +59469,9 @@ begin end function quantize_v2_eager(input_, min_range_, max_range_; name=nothing, mode=nothing, round_mode=nothing) desc = tf.EagerOp("QuantizeV2") - input_ = convert(tf.TensorHandle, input_) - min_range_ = convert(tf.TensorHandle, min_range_) - max_range_ = convert(tf.TensorHandle, max_range_) + input_ = convert(tf.EagerTensor, input_) + min_range_ = convert(tf.EagerTensor, min_range_) + max_range_ = convert(tf.EagerTensor, max_range_) tf.add_input(desc, input_) tf.add_input(desc, min_range_) tf.add_input(desc, max_range_) @@ -59543,9 +59543,9 @@ begin end function generator_dataset_eager(init_func_other_args_, next_func_other_args_, finalize_func_other_args_; name=nothing, init_func=nothing, next_func=nothing, finalize_func=nothing, Tinit_func_args=nothing, Tnext_func_args=nothing, Tfinalize_func_args=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("GeneratorDataset") - init_func_other_args_ = convert(tf.TensorHandle, init_func_other_args_) - next_func_other_args_ = convert(tf.TensorHandle, next_func_other_args_) - finalize_func_other_args_ = convert(tf.TensorHandle, finalize_func_other_args_) + init_func_other_args_ = convert(tf.EagerTensor, init_func_other_args_) + next_func_other_args_ = convert(tf.EagerTensor, next_func_other_args_) + finalize_func_other_args_ = convert(tf.EagerTensor, finalize_func_other_args_) tf.add_input(desc, init_func_other_args_) tf.add_input(desc, next_func_other_args_) tf.add_input(desc, finalize_func_other_args_) @@ -59607,7 +59607,7 @@ begin end function tensor_forest_tree_serialize_eager(tree_handle_; name=nothing) desc = tf.EagerOp("TensorForestTreeSerialize") - tree_handle_ = convert(tf.TensorHandle, tree_handle_) + tree_handle_ = convert(tf.EagerTensor, tree_handle_) tf.add_input(desc, tree_handle_) res = tf.execute(desc) node = tf.TapeNode(tensor_forest_tree_serialize, [tree_handle_], name=nothing, res) @@ -59646,8 +59646,8 @@ begin end function next_after_eager(x1_, x2_; name=nothing) desc = tf.EagerOp("NextAfter") - x1_ = convert(tf.TensorHandle, x1_) - x2_ = convert(tf.TensorHandle, x2_) + x1_ = convert(tf.EagerTensor, x1_) + x2_ = convert(tf.EagerTensor, x2_) tf.add_input(desc, x1_) tf.add_input(desc, x2_) desc["T"] = tf.data_type(x1_) @@ -59686,7 +59686,7 @@ begin end function tensor_array_close_v2_eager(handle_; name=nothing) desc = tf.EagerOp("TensorArrayCloseV2") - handle_ = convert(tf.TensorHandle, handle_) + handle_ = convert(tf.EagerTensor, handle_) tf.add_input(desc, handle_) res = tf.execute(desc) node = tf.TapeNode(tensor_array_close_v2, [handle_], name=nothing, res) @@ -59809,8 +59809,8 @@ begin end function reader_read_v2_eager(reader_handle_, queue_handle_; name=nothing) desc = tf.EagerOp("ReaderReadV2") - reader_handle_ = convert(tf.TensorHandle, reader_handle_) - queue_handle_ = convert(tf.TensorHandle, queue_handle_) + reader_handle_ = convert(tf.EagerTensor, reader_handle_) + queue_handle_ = convert(tf.EagerTensor, queue_handle_) tf.add_input(desc, reader_handle_) tf.add_input(desc, queue_handle_) res = tf.execute(desc) @@ -59850,8 +59850,8 @@ begin end function mod_eager(x_, y_; name=nothing) desc = tf.EagerOp("Mod") - x_ = convert(tf.TensorHandle, x_) - y_ = convert(tf.TensorHandle, y_) + x_ = convert(tf.EagerTensor, x_) + y_ = convert(tf.EagerTensor, y_) tf.add_input(desc, x_) tf.add_input(desc, y_) desc["T"] = tf.data_type(x_) @@ -59893,8 +59893,8 @@ begin end function add_v2_eager(x_, y_; name=nothing) desc = tf.EagerOp("AddV2") - x_ = convert(tf.TensorHandle, x_) - y_ = convert(tf.TensorHandle, y_) + x_ = convert(tf.EagerTensor, x_) + y_ = convert(tf.EagerTensor, y_) tf.add_input(desc, x_) tf.add_input(desc, y_) desc["T"] = tf.data_type(x_) @@ -59940,8 +59940,8 @@ begin end function stateless_random_normal_eager(shape_, seed_; name=nothing, dtype=nothing) desc = tf.EagerOp("StatelessRandomNormal") - shape_ = convert(tf.TensorHandle, shape_) - seed_ = convert(tf.TensorHandle, seed_) + shape_ = convert(tf.EagerTensor, shape_) + seed_ = convert(tf.EagerTensor, seed_) tf.add_input(desc, shape_) tf.add_input(desc, seed_) if dtype !== nothing @@ -60029,11 +60029,11 @@ begin end function strided_slice_assign_eager(ref_, begin_, end_, strides_, value_; name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing) desc = tf.EagerOp("StridedSliceAssign") - ref_ = convert(tf.TensorHandle, ref_) - begin_ = convert(tf.TensorHandle, begin_) - end_ = convert(tf.TensorHandle, end_) - strides_ = convert(tf.TensorHandle, strides_) - value_ = convert(tf.TensorHandle, value_) + ref_ = convert(tf.EagerTensor, ref_) + begin_ = convert(tf.EagerTensor, begin_) + end_ = convert(tf.EagerTensor, end_) + strides_ = convert(tf.EagerTensor, strides_) + value_ = convert(tf.EagerTensor, value_) tf.add_input(desc, ref_) tf.add_input(desc, begin_) tf.add_input(desc, end_) @@ -60121,9 +60121,9 @@ begin end function scatter_min_eager(ref_, indices_, updates_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ScatterMin") - ref_ = convert(tf.TensorHandle, ref_) - indices_ = convert(tf.TensorHandle, indices_) - updates_ = convert(tf.TensorHandle, updates_) + ref_ = convert(tf.EagerTensor, ref_) + indices_ = convert(tf.EagerTensor, indices_) + updates_ = convert(tf.EagerTensor, updates_) tf.add_input(desc, ref_) tf.add_input(desc, indices_) tf.add_input(desc, updates_) @@ -60213,11 +60213,11 @@ begin end function resource_strided_slice_assign_eager(ref_, begin_, end_, strides_, value_; name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing) desc = tf.EagerOp("ResourceStridedSliceAssign") - ref_ = convert(tf.TensorHandle, ref_) - begin_ = convert(tf.TensorHandle, begin_) - end_ = convert(tf.TensorHandle, end_) - strides_ = convert(tf.TensorHandle, strides_) - value_ = convert(tf.TensorHandle, value_) + ref_ = convert(tf.EagerTensor, ref_) + begin_ = convert(tf.EagerTensor, begin_) + end_ = convert(tf.EagerTensor, end_) + strides_ = convert(tf.EagerTensor, strides_) + value_ = convert(tf.EagerTensor, value_) tf.add_input(desc, ref_) tf.add_input(desc, begin_) tf.add_input(desc, end_) @@ -60297,8 +60297,8 @@ begin end function random_gamma_grad_eager(alpha_, sample_; name=nothing) desc = tf.EagerOp("RandomGammaGrad") - alpha_ = convert(tf.TensorHandle, alpha_) - sample_ = convert(tf.TensorHandle, sample_) + alpha_ = convert(tf.EagerTensor, alpha_) + sample_ = convert(tf.EagerTensor, sample_) tf.add_input(desc, alpha_) tf.add_input(desc, sample_) desc["T"] = tf.data_type(alpha_) @@ -60356,12 +60356,12 @@ begin end function resource_sparse_apply_keras_momentum_eager(var_, accum_, lr_, grad_, indices_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) desc = tf.EagerOp("ResourceSparseApplyKerasMomentum") - var_ = convert(tf.TensorHandle, var_) - accum_ = convert(tf.TensorHandle, accum_) - lr_ = convert(tf.TensorHandle, lr_) - grad_ = convert(tf.TensorHandle, grad_) - indices_ = convert(tf.TensorHandle, indices_) - momentum_ = convert(tf.TensorHandle, momentum_) + var_ = convert(tf.EagerTensor, var_) + accum_ = convert(tf.EagerTensor, accum_) + lr_ = convert(tf.EagerTensor, lr_) + grad_ = convert(tf.EagerTensor, grad_) + indices_ = convert(tf.EagerTensor, indices_) + momentum_ = convert(tf.EagerTensor, momentum_) tf.add_input(desc, var_) tf.add_input(desc, accum_) tf.add_input(desc, lr_) @@ -60419,9 +60419,9 @@ begin end function boosted_trees_create_quantile_stream_resource_eager(quantile_stream_resource_handle_, epsilon_, num_streams_; name=nothing, max_elements=nothing) desc = tf.EagerOp("BoostedTreesCreateQuantileStreamResource") - quantile_stream_resource_handle_ = convert(tf.TensorHandle, quantile_stream_resource_handle_) - epsilon_ = convert(tf.TensorHandle, epsilon_) - num_streams_ = convert(tf.TensorHandle, num_streams_) + quantile_stream_resource_handle_ = convert(tf.EagerTensor, quantile_stream_resource_handle_) + epsilon_ = convert(tf.EagerTensor, epsilon_) + num_streams_ = convert(tf.EagerTensor, num_streams_) tf.add_input(desc, quantile_stream_resource_handle_) tf.add_input(desc, epsilon_) tf.add_input(desc, num_streams_) @@ -60475,9 +60475,9 @@ begin end function quantized_relu6_eager(features_, min_features_, max_features_; name=nothing, out_type=nothing) desc = tf.EagerOp("QuantizedRelu6") - features_ = convert(tf.TensorHandle, features_) - min_features_ = convert(tf.TensorHandle, min_features_) - max_features_ = convert(tf.TensorHandle, max_features_) + features_ = convert(tf.EagerTensor, features_) + min_features_ = convert(tf.EagerTensor, min_features_) + max_features_ = convert(tf.EagerTensor, max_features_) tf.add_input(desc, features_) tf.add_input(desc, min_features_) tf.add_input(desc, max_features_) @@ -60535,12 +60535,12 @@ begin end function sparse_sparse_maximum_eager(a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_; name=nothing) desc = tf.EagerOp("SparseSparseMaximum") - a_indices_ = convert(tf.TensorHandle, a_indices_) - a_values_ = convert(tf.TensorHandle, a_values_) - a_shape_ = convert(tf.TensorHandle, a_shape_) - b_indices_ = convert(tf.TensorHandle, b_indices_) - b_values_ = convert(tf.TensorHandle, b_values_) - b_shape_ = convert(tf.TensorHandle, b_shape_) + a_indices_ = convert(tf.EagerTensor, a_indices_) + a_values_ = convert(tf.EagerTensor, a_values_) + a_shape_ = convert(tf.EagerTensor, a_shape_) + b_indices_ = convert(tf.EagerTensor, b_indices_) + b_values_ = convert(tf.EagerTensor, b_values_) + b_shape_ = convert(tf.EagerTensor, b_shape_) tf.add_input(desc, a_indices_) tf.add_input(desc, a_values_) tf.add_input(desc, a_shape_) @@ -60598,11 +60598,11 @@ begin end function batch_norm_with_global_normalization_eager(t_, m_, v_, beta_, gamma_; name=nothing, variance_epsilon=nothing, scale_after_normalization=nothing) desc = tf.EagerOp("BatchNormWithGlobalNormalization") - t_ = convert(tf.TensorHandle, t_) - m_ = convert(tf.TensorHandle, m_) - v_ = convert(tf.TensorHandle, v_) - beta_ = convert(tf.TensorHandle, beta_) - gamma_ = convert(tf.TensorHandle, gamma_) + t_ = convert(tf.EagerTensor, t_) + m_ = convert(tf.EagerTensor, m_) + v_ = convert(tf.EagerTensor, v_) + beta_ = convert(tf.EagerTensor, beta_) + gamma_ = convert(tf.EagerTensor, gamma_) tf.add_input(desc, t_) tf.add_input(desc, m_) tf.add_input(desc, v_) @@ -60658,9 +60658,9 @@ begin end function in_top_kv2_eager(predictions_, targets_, k_; name=nothing) desc = tf.EagerOp("InTopKV2") - predictions_ = convert(tf.TensorHandle, predictions_) - targets_ = convert(tf.TensorHandle, targets_) - k_ = convert(tf.TensorHandle, k_) + predictions_ = convert(tf.EagerTensor, predictions_) + targets_ = convert(tf.EagerTensor, targets_) + k_ = convert(tf.EagerTensor, k_) tf.add_input(desc, predictions_) tf.add_input(desc, targets_) tf.add_input(desc, k_) @@ -60701,7 +60701,7 @@ begin end function cholesky_eager(input_; name=nothing) desc = tf.EagerOp("Cholesky") - input_ = convert(tf.TensorHandle, input_) + input_ = convert(tf.EagerTensor, input_) tf.add_input(desc, input_) desc["T"] = tf.data_type(input_) res = tf.execute(desc) @@ -60758,15 +60758,15 @@ begin end function resource_apply_centered_rms_prop_eager(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ResourceApplyCenteredRMSProp") - var_ = convert(tf.TensorHandle, var_) - mg_ = convert(tf.TensorHandle, mg_) - ms_ = convert(tf.TensorHandle, ms_) - mom_ = convert(tf.TensorHandle, mom_) - lr_ = convert(tf.TensorHandle, lr_) - rho_ = convert(tf.TensorHandle, rho_) - momentum_ = convert(tf.TensorHandle, momentum_) - epsilon_ = convert(tf.TensorHandle, epsilon_) - grad_ = convert(tf.TensorHandle, grad_) + var_ = convert(tf.EagerTensor, var_) + mg_ = convert(tf.EagerTensor, mg_) + ms_ = convert(tf.EagerTensor, ms_) + mom_ = convert(tf.EagerTensor, mom_) + lr_ = convert(tf.EagerTensor, lr_) + rho_ = convert(tf.EagerTensor, rho_) + momentum_ = convert(tf.EagerTensor, momentum_) + epsilon_ = convert(tf.EagerTensor, epsilon_) + grad_ = convert(tf.EagerTensor, grad_) tf.add_input(desc, var_) tf.add_input(desc, mg_) tf.add_input(desc, ms_) @@ -60831,10 +60831,10 @@ begin end function resource_apply_adagrad_eager(var_, accum_, lr_, grad_; name=nothing, use_locking=nothing, update_slots=nothing) desc = tf.EagerOp("ResourceApplyAdagrad") - var_ = convert(tf.TensorHandle, var_) - accum_ = convert(tf.TensorHandle, accum_) - lr_ = convert(tf.TensorHandle, lr_) - grad_ = convert(tf.TensorHandle, grad_) + var_ = convert(tf.EagerTensor, var_) + accum_ = convert(tf.EagerTensor, accum_) + lr_ = convert(tf.EagerTensor, lr_) + grad_ = convert(tf.EagerTensor, grad_) tf.add_input(desc, var_) tf.add_input(desc, accum_) tf.add_input(desc, lr_) @@ -60905,13 +60905,13 @@ begin end function experimental_parallel_interleave_dataset_eager(input_dataset_, other_arguments_, cycle_length_, block_length_, sloppy_, buffer_output_elements_, prefetch_input_elements_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("ExperimentalParallelInterleaveDataset") - input_dataset_ = convert(tf.TensorHandle, input_dataset_) - other_arguments_ = convert(tf.TensorHandle, other_arguments_) - cycle_length_ = convert(tf.TensorHandle, cycle_length_) - block_length_ = convert(tf.TensorHandle, block_length_) - sloppy_ = convert(tf.TensorHandle, sloppy_) - buffer_output_elements_ = convert(tf.TensorHandle, buffer_output_elements_) - prefetch_input_elements_ = convert(tf.TensorHandle, prefetch_input_elements_) + input_dataset_ = convert(tf.EagerTensor, input_dataset_) + other_arguments_ = convert(tf.EagerTensor, other_arguments_) + cycle_length_ = convert(tf.EagerTensor, cycle_length_) + block_length_ = convert(tf.EagerTensor, block_length_) + sloppy_ = convert(tf.EagerTensor, sloppy_) + buffer_output_elements_ = convert(tf.EagerTensor, buffer_output_elements_) + prefetch_input_elements_ = convert(tf.EagerTensor, prefetch_input_elements_) tf.add_input(desc, input_dataset_) tf.add_input(desc, other_arguments_) tf.add_input(desc, cycle_length_) @@ -60971,8 +60971,8 @@ begin end function resize_bicubic_grad_eager(grads_, original_image_; name=nothing, align_corners=nothing) desc = tf.EagerOp("ResizeBicubicGrad") - grads_ = convert(tf.TensorHandle, grads_) - original_image_ = convert(tf.TensorHandle, original_image_) + grads_ = convert(tf.EagerTensor, grads_) + original_image_ = convert(tf.EagerTensor, original_image_) tf.add_input(desc, grads_) tf.add_input(desc, original_image_) if align_corners !== nothing @@ -61014,7 +61014,7 @@ begin end function batch_self_adjoint_eig_eager(input_; name=nothing) desc = tf.EagerOp("BatchSelfAdjointEig") - input_ = convert(tf.TensorHandle, input_) + input_ = convert(tf.EagerTensor, input_) tf.add_input(desc, input_) desc["T"] = tf.data_type(input_) res = tf.execute(desc) @@ -61056,9 +61056,9 @@ begin end function sparse_softmax_eager(sp_indices_, sp_values_, sp_shape_; name=nothing) desc = tf.EagerOp("SparseSoftmax") - sp_indices_ = convert(tf.TensorHandle, sp_indices_) - sp_values_ = convert(tf.TensorHandle, sp_values_) - sp_shape_ = convert(tf.TensorHandle, sp_shape_) + sp_indices_ = convert(tf.EagerTensor, sp_indices_) + sp_values_ = convert(tf.EagerTensor, sp_values_) + sp_shape_ = convert(tf.EagerTensor, sp_shape_) tf.add_input(desc, sp_indices_) tf.add_input(desc, sp_values_) tf.add_input(desc, sp_shape_) @@ -61098,7 +61098,7 @@ begin end function asinh_eager(x_; name=nothing) desc = tf.EagerOp("Asinh") - x_ = convert(tf.TensorHandle, x_) + x_ = convert(tf.EagerTensor, x_) tf.add_input(desc, x_) desc["T"] = tf.data_type(x_) res = tf.execute(desc) @@ -61164,12 +61164,12 @@ begin end function quantized_conv2d_and_relu_eager(input_, filter_, min_input_, max_input_, min_filter_, max_filter_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) desc = tf.EagerOp("QuantizedConv2DAndRelu") - input_ = convert(tf.TensorHandle, input_) - filter_ = convert(tf.TensorHandle, filter_) - min_input_ = convert(tf.TensorHandle, min_input_) - max_input_ = convert(tf.TensorHandle, max_input_) - min_filter_ = convert(tf.TensorHandle, min_filter_) - max_filter_ = convert(tf.TensorHandle, max_filter_) + input_ = convert(tf.EagerTensor, input_) + filter_ = convert(tf.EagerTensor, filter_) + min_input_ = convert(tf.EagerTensor, min_input_) + max_input_ = convert(tf.EagerTensor, max_input_) + min_filter_ = convert(tf.EagerTensor, min_filter_) + max_filter_ = convert(tf.EagerTensor, max_filter_) tf.add_input(desc, input_) tf.add_input(desc, filter_) tf.add_input(desc, min_input_) @@ -61228,7 +61228,7 @@ begin end function matrix_inverse_eager(input_; name=nothing, adjoint=nothing) desc = tf.EagerOp("MatrixInverse") - input_ = convert(tf.TensorHandle, input_) + input_ = convert(tf.EagerTensor, input_) tf.add_input(desc, input_) if adjoint !== nothing desc["adjoint"] = Base.Bool(adjoint) @@ -61273,8 +61273,8 @@ begin end function tensor_list_concat_lists_eager(input_a_, input_b_; name=nothing, element_dtype=nothing) desc = tf.EagerOp("TensorListConcatLists") - input_a_ = convert(tf.TensorHandle, input_a_) - input_b_ = convert(tf.TensorHandle, input_b_) + input_a_ = convert(tf.EagerTensor, input_a_) + input_b_ = convert(tf.EagerTensor, input_b_) tf.add_input(desc, input_a_) tf.add_input(desc, input_b_) if element_dtype !== nothing @@ -61331,11 +61331,11 @@ begin end function requantize_eager(input_, input_min_, input_max_, requested_output_min_, requested_output_max_; name=nothing, out_type=nothing) desc = tf.EagerOp("Requantize") - input_ = convert(tf.TensorHandle, input_) - input_min_ = convert(tf.TensorHandle, input_min_) - input_max_ = convert(tf.TensorHandle, input_max_) - requested_output_min_ = convert(tf.TensorHandle, requested_output_min_) - requested_output_max_ = convert(tf.TensorHandle, requested_output_max_) + input_ = convert(tf.EagerTensor, input_) + input_min_ = convert(tf.EagerTensor, input_min_) + input_max_ = convert(tf.EagerTensor, input_max_) + requested_output_min_ = convert(tf.EagerTensor, requested_output_min_) + requested_output_max_ = convert(tf.EagerTensor, requested_output_max_) tf.add_input(desc, input_) tf.add_input(desc, input_min_) tf.add_input(desc, input_max_) @@ -61380,7 +61380,7 @@ begin end function fft_eager(input_; name=nothing) desc = tf.EagerOp("FFT") - input_ = convert(tf.TensorHandle, input_) + input_ = convert(tf.EagerTensor, input_) tf.add_input(desc, input_) desc["Tcomplex"] = tf.data_type(input_) res = tf.execute(desc) @@ -61421,8 +61421,8 @@ begin end function conjugate_transpose_eager(x_, perm_; name=nothing) desc = tf.EagerOp("ConjugateTranspose") - x_ = convert(tf.TensorHandle, x_) - perm_ = convert(tf.TensorHandle, perm_) + x_ = convert(tf.EagerTensor, x_) + perm_ = convert(tf.EagerTensor, perm_) tf.add_input(desc, x_) tf.add_input(desc, perm_) desc["T"] = tf.data_type(x_) @@ -61526,8 +61526,8 @@ begin end function relu6grad_eager(gradients_, features_; name=nothing) desc = tf.EagerOp("Relu6Grad") - gradients_ = convert(tf.TensorHandle, gradients_) - features_ = convert(tf.TensorHandle, features_) + gradients_ = convert(tf.EagerTensor, gradients_) + features_ = convert(tf.EagerTensor, features_) tf.add_input(desc, gradients_) tf.add_input(desc, features_) desc["T"] = tf.data_type(gradients_) @@ -61576,10 +61576,10 @@ begin end function scale_and_translate_grad_eager(grads_, original_image_, scale_, translation_; name=nothing, kernel_type=nothing) desc = tf.EagerOp("ScaleAndTranslateGrad") - grads_ = convert(tf.TensorHandle, grads_) - original_image_ = convert(tf.TensorHandle, original_image_) - scale_ = convert(tf.TensorHandle, scale_) - translation_ = convert(tf.TensorHandle, translation_) + grads_ = convert(tf.EagerTensor, grads_) + original_image_ = convert(tf.EagerTensor, original_image_) + scale_ = convert(tf.EagerTensor, scale_) + translation_ = convert(tf.EagerTensor, translation_) tf.add_input(desc, grads_) tf.add_input(desc, original_image_) tf.add_input(desc, scale_) @@ -61630,7 +61630,7 @@ begin end function _array_to_list_eager(input_; name=nothing, N=nothing, out_types=nothing) desc = tf.EagerOp("_ArrayToList") - input_ = convert(tf.TensorHandle, input_) + input_ = convert(tf.EagerTensor, input_) tf.add_input(desc, input_) if N !== nothing desc["N"] = Base.Int(N) @@ -61708,11 +61708,11 @@ begin end function cudnn_rnnv3_eager(input_, input_h_, input_c_, params_, sequence_lengths_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing, is_training=nothing) desc = tf.EagerOp("CudnnRNNV3") - input_ = convert(tf.TensorHandle, input_) - input_h_ = convert(tf.TensorHandle, input_h_) - input_c_ = convert(tf.TensorHandle, input_c_) - params_ = convert(tf.TensorHandle, params_) - sequence_lengths_ = convert(tf.TensorHandle, sequence_lengths_) + input_ = convert(tf.EagerTensor, input_) + input_h_ = convert(tf.EagerTensor, input_h_) + input_c_ = convert(tf.EagerTensor, input_c_) + params_ = convert(tf.EagerTensor, params_) + sequence_lengths_ = convert(tf.EagerTensor, sequence_lengths_) tf.add_input(desc, input_) tf.add_input(desc, input_h_) tf.add_input(desc, input_c_) @@ -61782,8 +61782,8 @@ begin end function expand_dims_eager(input_, dim_; name=nothing) desc = tf.EagerOp("ExpandDims") - input_ = convert(tf.TensorHandle, input_) - dim_ = convert(tf.TensorHandle, dim_) + input_ = convert(tf.EagerTensor, input_) + dim_ = convert(tf.EagerTensor, dim_) tf.add_input(desc, input_) tf.add_input(desc, dim_) desc["T"] = tf.data_type(input_) @@ -61825,8 +61825,8 @@ begin end function inv_grad_eager(y_, dy_; name=nothing) desc = tf.EagerOp("InvGrad") - y_ = convert(tf.TensorHandle, y_) - dy_ = convert(tf.TensorHandle, dy_) + y_ = convert(tf.EagerTensor, y_) + dy_ = convert(tf.EagerTensor, dy_) tf.add_input(desc, y_) tf.add_input(desc, dy_) desc["T"] = tf.data_type(y_) @@ -61872,9 +61872,9 @@ begin end function non_max_suppression_eager(boxes_, scores_, max_output_size_; name=nothing, iou_threshold=nothing) desc = tf.EagerOp("NonMaxSuppression") - boxes_ = convert(tf.TensorHandle, boxes_) - scores_ = convert(tf.TensorHandle, scores_) - max_output_size_ = convert(tf.TensorHandle, max_output_size_) + boxes_ = convert(tf.EagerTensor, boxes_) + scores_ = convert(tf.EagerTensor, scores_) + max_output_size_ = convert(tf.EagerTensor, max_output_size_) tf.add_input(desc, boxes_) tf.add_input(desc, scores_) tf.add_input(desc, max_output_size_) @@ -61916,7 +61916,7 @@ begin end function l2loss_eager(t_; name=nothing) desc = tf.EagerOp("L2Loss") - t_ = convert(tf.TensorHandle, t_) + t_ = convert(tf.EagerTensor, t_) tf.add_input(desc, t_) desc["T"] = tf.data_type(t_) res = tf.execute(desc) @@ -61959,8 +61959,8 @@ begin end function resize_area_eager(images_, size_; name=nothing, align_corners=nothing) desc = tf.EagerOp("ResizeArea") - images_ = convert(tf.TensorHandle, images_) - size_ = convert(tf.TensorHandle, size_) + images_ = convert(tf.EagerTensor, images_) + size_ = convert(tf.EagerTensor, size_) tf.add_input(desc, images_) tf.add_input(desc, size_) if align_corners !== nothing @@ -62036,10 +62036,10 @@ begin end function sparse_cross_eager(indices_, values_, shapes_, dense_inputs_; name=nothing, N=nothing, hashed_output=nothing, num_buckets=nothing, hash_key=nothing, sparse_types=nothing, dense_types=nothing, out_type=nothing, internal_type=nothing) desc = tf.EagerOp("SparseCross") - indices_ = convert(tf.TensorHandle, indices_) - values_ = convert(tf.TensorHandle, values_) - shapes_ = convert(tf.TensorHandle, shapes_) - dense_inputs_ = convert(tf.TensorHandle, dense_inputs_) + indices_ = convert(tf.EagerTensor, indices_) + values_ = convert(tf.EagerTensor, values_) + shapes_ = convert(tf.EagerTensor, shapes_) + dense_inputs_ = convert(tf.EagerTensor, dense_inputs_) tf.add_input(desc, indices_) tf.add_input(desc, values_) tf.add_input(desc, shapes_) @@ -62102,7 +62102,7 @@ begin end function batch_fft3d_eager(input_; name=nothing) desc = tf.EagerOp("BatchFFT3D") - input_ = convert(tf.TensorHandle, input_) + input_ = convert(tf.EagerTensor, input_) tf.add_input(desc, input_) res = tf.execute(desc) node = tf.TapeNode(batch_fft3d, [input_], name=nothing, res) @@ -62148,7 +62148,7 @@ begin end function random_standard_normal_eager(shape_; name=nothing, seed=nothing, seed2=nothing, dtype=nothing) desc = tf.EagerOp("RandomStandardNormal") - shape_ = convert(tf.TensorHandle, shape_) + shape_ = convert(tf.EagerTensor, shape_) tf.add_input(desc, shape_) if seed !== nothing desc["seed"] = Base.Int(seed) @@ -62204,9 +62204,9 @@ begin end function resource_scatter_mul_eager(resource_, indices_, updates_; name=nothing, dtype=nothing) desc = tf.EagerOp("ResourceScatterMul") - resource_ = convert(tf.TensorHandle, resource_) - indices_ = convert(tf.TensorHandle, indices_) - updates_ = convert(tf.TensorHandle, updates_) + resource_ = convert(tf.EagerTensor, resource_) + indices_ = convert(tf.EagerTensor, indices_) + updates_ = convert(tf.EagerTensor, updates_) tf.add_input(desc, resource_) tf.add_input(desc, indices_) tf.add_input(desc, updates_) @@ -62299,16 +62299,16 @@ begin end function sdca_optimizer_eager(sparse_example_indices_, sparse_feature_indices_, sparse_feature_values_, dense_features_, example_weights_, example_labels_, sparse_indices_, sparse_weights_, dense_weights_, example_state_data_; name=nothing, loss_type=nothing, adaptative=nothing, num_sparse_features=nothing, num_sparse_features_with_values=nothing, num_dense_features=nothing, l1=nothing, l2=nothing, num_loss_partitions=nothing, num_inner_iterations=nothing) desc = tf.EagerOp("SdcaOptimizer") - sparse_example_indices_ = convert(tf.TensorHandle, sparse_example_indices_) - sparse_feature_indices_ = convert(tf.TensorHandle, sparse_feature_indices_) - sparse_feature_values_ = convert(tf.TensorHandle, sparse_feature_values_) - dense_features_ = convert(tf.TensorHandle, dense_features_) - example_weights_ = convert(tf.TensorHandle, example_weights_) - example_labels_ = convert(tf.TensorHandle, example_labels_) - sparse_indices_ = convert(tf.TensorHandle, sparse_indices_) - sparse_weights_ = convert(tf.TensorHandle, sparse_weights_) - dense_weights_ = convert(tf.TensorHandle, dense_weights_) - example_state_data_ = convert(tf.TensorHandle, example_state_data_) + sparse_example_indices_ = convert(tf.EagerTensor, sparse_example_indices_) + sparse_feature_indices_ = convert(tf.EagerTensor, sparse_feature_indices_) + sparse_feature_values_ = convert(tf.EagerTensor, sparse_feature_values_) + dense_features_ = convert(tf.EagerTensor, dense_features_) + example_weights_ = convert(tf.EagerTensor, example_weights_) + example_labels_ = convert(tf.EagerTensor, example_labels_) + sparse_indices_ = convert(tf.EagerTensor, sparse_indices_) + sparse_weights_ = convert(tf.EagerTensor, sparse_weights_) + dense_weights_ = convert(tf.EagerTensor, dense_weights_) + example_state_data_ = convert(tf.EagerTensor, example_state_data_) tf.add_input(desc, sparse_example_indices_) tf.add_input(desc, sparse_feature_indices_) tf.add_input(desc, sparse_feature_values_) @@ -62383,8 +62383,8 @@ begin end function zeta_eager(x_, q_; name=nothing) desc = tf.EagerOp("Zeta") - x_ = convert(tf.TensorHandle, x_) - q_ = convert(tf.TensorHandle, q_) + x_ = convert(tf.EagerTensor, x_) + q_ = convert(tf.EagerTensor, q_) tf.add_input(desc, x_) tf.add_input(desc, q_) desc["T"] = tf.data_type(x_) @@ -62452,8 +62452,8 @@ begin end function sample_distorted_bounding_box_eager(image_size_, bounding_boxes_; name=nothing, seed=nothing, seed2=nothing, min_object_covered=nothing, aspect_ratio_range=nothing, area_range=nothing, max_attempts=nothing, use_image_if_no_bounding_boxes=nothing) desc = tf.EagerOp("SampleDistortedBoundingBox") - image_size_ = convert(tf.TensorHandle, image_size_) - bounding_boxes_ = convert(tf.TensorHandle, bounding_boxes_) + image_size_ = convert(tf.EagerTensor, image_size_) + bounding_boxes_ = convert(tf.EagerTensor, bounding_boxes_) tf.add_input(desc, image_size_) tf.add_input(desc, bounding_boxes_) if seed !== nothing @@ -62515,8 +62515,8 @@ begin end function igamma_grad_a_eager(a_, x_; name=nothing) desc = tf.EagerOp("IgammaGradA") - a_ = convert(tf.TensorHandle, a_) - x_ = convert(tf.TensorHandle, x_) + a_ = convert(tf.EagerTensor, a_) + x_ = convert(tf.EagerTensor, x_) tf.add_input(desc, a_) tf.add_input(desc, x_) desc["T"] = tf.data_type(a_) @@ -62560,8 +62560,8 @@ begin end function segment_max_eager(data_, segment_ids_; name=nothing) desc = tf.EagerOp("SegmentMax") - data_ = convert(tf.TensorHandle, data_) - segment_ids_ = convert(tf.TensorHandle, segment_ids_) + data_ = convert(tf.EagerTensor, data_) + segment_ids_ = convert(tf.EagerTensor, segment_ids_) tf.add_input(desc, data_) tf.add_input(desc, segment_ids_) desc["T"] = tf.data_type(data_) @@ -62605,9 +62605,9 @@ begin end function range_eager(start_, limit_, delta_; name=nothing) desc = tf.EagerOp("Range") - start_ = convert(tf.TensorHandle, start_) - limit_ = convert(tf.TensorHandle, limit_) - delta_ = convert(tf.TensorHandle, delta_) + start_ = convert(tf.EagerTensor, start_) + limit_ = convert(tf.EagerTensor, limit_) + delta_ = convert(tf.EagerTensor, delta_) tf.add_input(desc, start_) tf.add_input(desc, limit_) tf.add_input(desc, delta_) @@ -62709,7 +62709,7 @@ begin end function flush_summary_writer_eager(writer_; name=nothing) desc = tf.EagerOp("FlushSummaryWriter") - writer_ = convert(tf.TensorHandle, writer_) + writer_ = convert(tf.EagerTensor, writer_) tf.add_input(desc, writer_) res = tf.execute(desc) node = tf.TapeNode(flush_summary_writer, [writer_], name=nothing, res) @@ -62753,9 +62753,9 @@ begin end function dequantize_eager(input_, min_range_, max_range_; name=nothing, mode=nothing) desc = tf.EagerOp("Dequantize") - input_ = convert(tf.TensorHandle, input_) - min_range_ = convert(tf.TensorHandle, min_range_) - max_range_ = convert(tf.TensorHandle, max_range_) + input_ = convert(tf.EagerTensor, input_) + min_range_ = convert(tf.EagerTensor, min_range_) + max_range_ = convert(tf.EagerTensor, max_range_) tf.add_input(desc, input_) tf.add_input(desc, min_range_) tf.add_input(desc, max_range_) @@ -62805,8 +62805,8 @@ begin end function sparse_fill_empty_rows_grad_eager(reverse_index_map_, grad_values_; name=nothing) desc = tf.EagerOp("SparseFillEmptyRowsGrad") - reverse_index_map_ = convert(tf.TensorHandle, reverse_index_map_) - grad_values_ = convert(tf.TensorHandle, grad_values_) + reverse_index_map_ = convert(tf.EagerTensor, reverse_index_map_) + grad_values_ = convert(tf.EagerTensor, grad_values_) tf.add_input(desc, reverse_index_map_) tf.add_input(desc, grad_values_) desc["T"] = tf.data_type(grad_values_) @@ -62850,7 +62850,7 @@ begin end function iterator_get_next_eager(iterator_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("IteratorGetNext") - iterator_ = convert(tf.TensorHandle, iterator_) + iterator_ = convert(tf.EagerTensor, iterator_) tf.add_input(desc, iterator_) if output_types !== nothing desc["output_types"] = map(Base.identity, output_types) @@ -62902,10 +62902,10 @@ begin end function sparse_tensor_dense_add_eager(a_indices_, a_values_, a_shape_, b_; name=nothing) desc = tf.EagerOp("SparseTensorDenseAdd") - a_indices_ = convert(tf.TensorHandle, a_indices_) - a_values_ = convert(tf.TensorHandle, a_values_) - a_shape_ = convert(tf.TensorHandle, a_shape_) - b_ = convert(tf.TensorHandle, b_) + a_indices_ = convert(tf.EagerTensor, a_indices_) + a_values_ = convert(tf.EagerTensor, a_values_) + a_shape_ = convert(tf.EagerTensor, a_shape_) + b_ = convert(tf.EagerTensor, b_) tf.add_input(desc, a_indices_) tf.add_input(desc, a_values_) tf.add_input(desc, a_shape_) @@ -62952,7 +62952,7 @@ begin end function prevent_gradient_eager(input_; name=nothing, message=nothing) desc = tf.EagerOp("PreventGradient") - input_ = convert(tf.TensorHandle, input_) + input_ = convert(tf.EagerTensor, input_) tf.add_input(desc, input_) if message !== nothing desc["message"] = Base.String(message) @@ -62997,7 +62997,7 @@ begin end function lookup_table_export_eager(table_handle_; name=nothing) desc = tf.EagerOp("LookupTableExport") - table_handle_ = convert(tf.TensorHandle, table_handle_) + table_handle_ = convert(tf.EagerTensor, table_handle_) tf.add_input(desc, table_handle_) res = tf.execute(desc) node = tf.TapeNode(lookup_table_export, [table_handle_], name=nothing, res) diff --git a/src/show.jl b/src/show.jl index e01122cc..f631df65 100644 --- a/src/show.jl +++ b/src/show.jl @@ -40,7 +40,7 @@ function Base.show(io::IO, t::RawTensor) end end -function Base.show(io::IO, t::TensorHandle) +function Base.show(io::IO, t::EagerTensor) jl_array = convert(Array, t) ptr = pointer_from_objref(t) print(io, "EagerTensor<$ptr>($(jl_array))") diff --git a/src/tape.jl b/src/tape.jl index b9ea6b01..523e7ab8 100644 --- a/src/tape.jl +++ b/src/tape.jl @@ -11,11 +11,11 @@ end TapeNode(op, args, results; kwargs...) = TapeNode(op, args, results, kwargs) mutable struct Tape - nodes::Dict{TensorHandle, TapeNode} + nodes::Dict{EagerTensor, TapeNode} attrs::Dict end -Tape(;kwargs...) = Tape(Dict{TensorHandle, TapeNode}(), Dict(kwargs...)) +Tape(;kwargs...) = Tape(Dict{EagerTensor, TapeNode}(), Dict(kwargs...)) struct TapeContext <: Context tape::Union{Tape, Nothing} From 3f6e44e147d1fca6aeeaed9cadf80a8ebad96ca8 Mon Sep 17 00:00:00 2001 From: Jon Malmaud Date: Fri, 15 Mar 2019 16:56:18 -0400 Subject: [PATCH 37/49] Add 'reverse' with dims kw argument --- src/ops/sequences.jl | 4 ++++ src/ops/transformations.jl | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/src/ops/sequences.jl b/src/ops/sequences.jl index eb1fd9a7..003b3cfe 100644 --- a/src/ops/sequences.jl +++ b/src/ops/sequences.jl @@ -109,6 +109,10 @@ end Ops.reverse_v2(x, indices; kwargs...) end +@op function Base.reverse(x::AbstractTensor; dims=0, kwargs...) + reverse(x, [dims]; kwargs...) +end + @op function Base.fill(n::AbstractTensor, dims::Tuple{Vararg{Int64,N}} where N; kwargs...) invoke(fill, Tuple{AbstractTensor,Any}, n, dims; kwargs...) end diff --git a/src/ops/transformations.jl b/src/ops/transformations.jl index b2f457b1..a1c5a846 100644 --- a/src/ops/transformations.jl +++ b/src/ops/transformations.jl @@ -381,7 +381,7 @@ Returns: with_op_name(name, "Transpose") do if perm === nothing r = range(constant(0), LinearAlgebra.rank(n)-1) - perm = reverse(r, [0]) + perm = reverse(r) end result = Ops.transpose(n, perm) end From edac1b6f1644829822f389bc60f077158ab51770 Mon Sep 17 00:00:00 2001 From: Jon Malmaud Date: Fri, 15 Mar 2019 17:04:04 -0400 Subject: [PATCH 38/49] call_args style --- src/generate_ops.jl | 16 ++-------------- 1 file changed, 2 insertions(+), 14 deletions(-) diff --git a/src/generate_ops.jl b/src/generate_ops.jl index 9c1c5230..7e334aab 100644 --- a/src/generate_ops.jl +++ b/src/generate_ops.jl @@ -214,16 +214,7 @@ function to_function(op::tensorflow.OpDef) out end end - eager_output_block = if scalar_output - quote - return res[1] - end - else - quote - #tf.execute(desc) - return res - end - end + eager_output_block = scalar_output ? :(return res[1]) : :(return res) graph_name = Symbol("$(jl_name)_graph") eager_name = Symbol("$(jl_name)_eager") expr = quote @@ -266,10 +257,7 @@ function to_function(op::tensorflow.OpDef) for arg in inputs[1].args push!(call_kw_params.args, Expr(:kw, arg.args[1], arg.args[1])) end - call_args = Any[call_kw_params] - for input in inputs[2:end] - push!(call_args, input) - end + call_args = [call_kw_params; inputs[2:end]] dispatch_expr = quote function $jl_name($(inputs...)) if tf.in_eager_mode() From 81dea7d2e7a59cbc0f4ad0b1a50828380a9626e0 Mon Sep 17 00:00:00 2001 From: Jon Malmaud Date: Fri, 15 Mar 2019 17:13:48 -0400 Subject: [PATCH 39/49] Better fields in Sequential --- src/keras.jl | 37 ++++++++++++++++--------------------- 1 file changed, 16 insertions(+), 21 deletions(-) diff --git a/src/keras.jl b/src/keras.jl index 030f8d49..8263a14c 100644 --- a/src/keras.jl +++ b/src/keras.jl @@ -30,10 +30,13 @@ macro callable(f) end @callable mutable struct Sequential <: Model - attrs::Dict + layers::Vector{Layer} + loss # TODO constrain these fields more + optimizer + trainable::Set end -@callable mutable struct Dense <: Layer +@callable struct Dense <: Layer weights::EagerTensor bias::EagerTensor end @@ -56,28 +59,20 @@ end SGD(;lr=1e-3)= SGD(convert(EagerTensor, lr)) -function Sequential() - d = Dict() - d["trainable"] = Set() - d["layers"] = [] - Sequential(d) -end - +Sequential() = Sequential([], nothing, nothing, Set()) function add(m::Sequential, d::Dense) set_trainable(m, d.weights) set_trainable(m, d.bias) - push!(m.attrs["layers"], d) + push!(m.layers, d) end -function add(m::Sequential, layer) - push!(m.attrs["layers"], layer) -end +add(m::Sequential, layer) = push!(m.layers, layer) forward(d::Dense, x) = Ops.bias_add(x*d.weights, d.bias) function forward(m::Sequential, x) - for layer in m.attrs["layers"] + for layer in m.layers x = forward(layer, x) end return x @@ -86,27 +81,27 @@ end mse(y, y_target) = mean((y .- y_target) .^ 2) function set_trainable(m::Sequential, tensor) - push!(m.attrs["trainable"], tensor) + push!(m.trainable, tensor) end function compile(m::Sequential; optimizer=nothing, loss=nothing) - m.attrs["optimizer"] = optimizer - m.attrs["loss"] = loss + m.optimizer = optimizer + m.loss = loss end optimizier_step(g::SGD, value, grads) = inplace_sub(value, g.lr .* grads) function fit(m::Sequential, x, y; n_epochs=1, batch_size=nothing) - optimizer = m.attrs["optimizer"] + optimizer = m.optimizer for epoch in 1:n_epochs tape = set_tape() y_predicted = x - for layer in m.attrs["layers"] + for layer in m.layers y_predicted = forward(layer, y_predicted) end - loss = m.attrs["loss"](y, y_predicted) + loss = m.loss(y, y_predicted) @info "" epoch loss=item(loss) - values = collect(m.attrs["trainable"]) + values = collect(m.trainable) grads = grad(tape, loss, values) for (value, g) in zip(values, grads) if g === nothing From d4fae80c483b6fff3690efbf3eb146a0017d8cf4 Mon Sep 17 00:00:00 2001 From: Jon Malmaud Date: Fri, 15 Mar 2019 17:15:53 -0400 Subject: [PATCH 40/49] eliminate some stubs --- src/eager.jl | 14 -------------- src/meta.jl | 4 ---- 2 files changed, 18 deletions(-) diff --git a/src/eager.jl b/src/eager.jl index 8df643ae..84f3e5f0 100644 --- a/src/eager.jl +++ b/src/eager.jl @@ -158,20 +158,6 @@ function execute(op::EagerOp) return handles end -function test_eager() - ctx = EagerContext() - h1 = EagerTensor(RawTensor([1,2])) - h2 = EagerTensor(RawTensor([3,4])) - op = EagerOp(ctx, "Add") - add_input(op, h1) - add_input(op, h2) - dtype = data_type(h1) - op["T"] = dtype - res = execute(op) - return res[1] - # return resolve(res[1]) -end - function setindex!(op::EagerOp, tensor::RawTensor, attr_name) status = Status() @tfcall(:TFE_OpSetAttrTensor, Cvoid, (Ptr{Cvoid}, Cstring, Ptr{Cvoid}, Ptr{Cvoid}), op, attr_name, tensor, status) diff --git a/src/meta.jl b/src/meta.jl index 59bc5561..66d63b92 100644 --- a/src/meta.jl +++ b/src/meta.jl @@ -170,7 +170,3 @@ macro tf(ex) end end |> esc end - -macro scalar_summary(f) - @capture(f, funcname(args__; kwargs__)) -end From 492d3bf861e004ad5ab690da9da275432f0eb820 Mon Sep 17 00:00:00 2001 From: Jon Malmaud Date: Fri, 15 Mar 2019 17:20:57 -0400 Subject: [PATCH 41/49] remove accidental scratch --- src/ops/module_test.jl | 10 ---------- 1 file changed, 10 deletions(-) delete mode 100644 src/ops/module_test.jl diff --git a/src/ops/module_test.jl b/src/ops/module_test.jl deleted file mode 100644 index 81e88392..00000000 --- a/src/ops/module_test.jl +++ /dev/null @@ -1,10 +0,0 @@ -module M - export x - x=1 - module Y - using ..M - function f(y) - return y+x - end - end -end From 5fda54766318e64614a3239bccdfeab590a1eed2 Mon Sep 17 00:00:00 2001 From: Jon Malmaud Date: Fri, 15 Mar 2019 17:21:45 -0400 Subject: [PATCH 42/49] remove neural ode example until we get it robustly functional --- examples/neural_ode.jl | 8 -------- 1 file changed, 8 deletions(-) delete mode 100644 examples/neural_ode.jl diff --git a/examples/neural_ode.jl b/examples/neural_ode.jl deleted file mode 100644 index f80e95f4..00000000 --- a/examples/neural_ode.jl +++ /dev/null @@ -1,8 +0,0 @@ -using TensorFlow -using DifferentialEquations - -model = tf.Sequential([tf.Dense(2, 1)]) -f(u, p, t) = model(u) -problem = ODEProblem(f, u0=[0.5, 0.5], tspan=(0.0, 1.0)) -tf.compile(model, optimizer=tf.Adam(), loss=tf.diffeq_loss(problem, t=[0.0, 0.5, 1.0])) -tf.fit(m, [1.0, 2.0, 5.0], n_epochs=100) From 8cd906faf8d9be3b178424a5b5d237554f11d7e0 Mon Sep 17 00:00:00 2001 From: Jon Malmaud Date: Fri, 15 Mar 2019 19:04:00 -0400 Subject: [PATCH 43/49] Export symbols and change tape. --- src/TensorFlow.jl | 9 ++++++++- src/context.jl | 2 +- src/keras.jl | 2 +- src/summary_writer.jl | 1 + src/tape.jl | 12 ++++++++---- 5 files changed, 19 insertions(+), 7 deletions(-) diff --git a/src/TensorFlow.jl b/src/TensorFlow.jl index 1a935b0c..27b62e23 100644 --- a/src/TensorFlow.jl +++ b/src/TensorFlow.jl @@ -124,7 +124,14 @@ Ops, slice, import_op, @tfimport, -tf_versioninfo +tf_versioninfo, +copy_to_device, +enable_eager_execution, +EagerTensor, +summary, +create_tape, +set_tape, +with_tape using Distributed diff --git a/src/context.jl b/src/context.jl index f70d71ab..b1405da9 100644 --- a/src/context.jl +++ b/src/context.jl @@ -36,9 +36,9 @@ end function with_context(block, ctx) push!(global_context, ctx) res = block() + # This assumes the block doesn't adjust the context. We should pop explicitly the pushed context. pop!(global_context) return res end - const global_context = ContextStack() diff --git a/src/keras.jl b/src/keras.jl index 8263a14c..cd1cfd92 100644 --- a/src/keras.jl +++ b/src/keras.jl @@ -94,7 +94,7 @@ optimizier_step(g::SGD, value, grads) = inplace_sub(value, g.lr .* grads) function fit(m::Sequential, x, y; n_epochs=1, batch_size=nothing) optimizer = m.optimizer for epoch in 1:n_epochs - tape = set_tape() + tape = create_tape() y_predicted = x for layer in m.layers y_predicted = forward(layer, y_predicted) diff --git a/src/summary_writer.jl b/src/summary_writer.jl index 7ca2bae4..57ef4d21 100644 --- a/src/summary_writer.jl +++ b/src/summary_writer.jl @@ -3,6 +3,7 @@ using CRC32c import ..TensorFlow const tf = TensorFlow import ..TensorFlow: tensorflow, Graph, get_def_graph, @py_proc +export FileWriter struct FileWriter <: tf.Context file_handle diff --git a/src/tape.jl b/src/tape.jl index 523e7ab8..3afa4f64 100644 --- a/src/tape.jl +++ b/src/tape.jl @@ -21,14 +21,18 @@ struct TapeContext <: Context tape::Union{Tape, Nothing} end -function set_tape(new_tape=nothing) - if new_tape === nothing - new_tape = Tape() - end +create_tape() = set_tape(Tape()) + +function set_tape(new_tape) push!(global_context, TapeContext(new_tape)) return new_tape end +function with_tape(block, tape=Tape()) + ctx = TapeContext(tape) + with_context(block, ctx) +end + function get_tape() tape_context = context_value(TapeContext) if tape_context === nothing From 8c39cf5aeba9acbe6b1a97b6c09f5d398da19e2d Mon Sep 17 00:00:00 2001 From: Jon Malmaud Date: Fri, 15 Mar 2019 19:14:08 -0400 Subject: [PATCH 44/49] Bump tf version --- deps/build.jl | 4 +- src/ops/imported_ops.jl | 9398 ++++++++++++++------------------------- 2 files changed, 3319 insertions(+), 6083 deletions(-) diff --git a/deps/build.jl b/deps/build.jl index caabe43d..50978a3a 100644 --- a/deps/build.jl +++ b/deps/build.jl @@ -1,8 +1,8 @@ using PyCall using Conda -const cur_version = "1.12.0" -const cur_py_version = "1.12.0" +const cur_version = "1.13.1" +const cur_py_version = "1.13.1" ############################ diff --git a/src/ops/imported_ops.jl b/src/ops/imported_ops.jl index 0d141eb2..d0be97f8 100644 --- a/src/ops/imported_ops.jl +++ b/src/ops/imported_ops.jl @@ -1,4 +1,4 @@ -# Autogenerated on 2019-03-04T16:35:07.066 +# Autogenerated on 2019-03-15T19:13:57.806 module Ops import TensorFlow @@ -10,7 +10,7 @@ import TensorFlow: Tensor """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function reduce_join_graph(inputs_, reduction_indices_; name=nothing, keep_dims=nothing, separator=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function reduce_join_graph(inputs_, reduction_indices_; name=nothing, keep_dims=nothing, separator=nothing) local desc tf.with_op_name(name, "ReduceJoin") do desc = tf.NodeDescription("ReduceJoin") @@ -62,7 +62,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function reduce_dataset_graph(input_dataset_, initial_state_, other_arguments_; name=nothing, f=nothing, Tstate=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function reduce_dataset_graph(input_dataset_, initial_state_, other_arguments_; name=nothing, f=nothing, Tstate=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing) local desc tf.with_op_name(name, "ReduceDataset") do desc = tf.NodeDescription("ReduceDataset") @@ -142,7 +142,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_list_from_tensor_graph(tensor_, element_shape_; name=nothing, element_dtype=nothing, shape_type=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tensor_list_from_tensor_graph(tensor_, element_shape_; name=nothing, element_dtype=nothing, shape_type=nothing) local desc tf.with_op_name(name, "TensorListFromTensor") do desc = tf.NodeDescription("TensorListFromTensor") @@ -198,7 +198,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function extract_jpeg_shape_graph(contents_; name=nothing, output_type=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function extract_jpeg_shape_graph(contents_; name=nothing, output_type=nothing) local desc tf.with_op_name(name, "ExtractJpegShape") do desc = tf.NodeDescription("ExtractJpegShape") @@ -240,7 +240,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function svd_graph(input_; name=nothing, compute_uv=nothing, full_matrices=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function svd_graph(input_; name=nothing, compute_uv=nothing, full_matrices=nothing) local desc tf.with_op_name(name, "Svd") do desc = tf.NodeDescription("Svd") @@ -295,7 +295,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function iterator_get_next_sync_graph(iterator_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function iterator_get_next_sync_graph(iterator_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "IteratorGetNextSync") do desc = tf.NodeDescription("IteratorGetNextSync") @@ -343,7 +343,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ref_enter_graph(data_; name=nothing, frame_name=nothing, is_constant=nothing, parallel_iterations=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function ref_enter_graph(data_; name=nothing, frame_name=nothing, is_constant=nothing, parallel_iterations=nothing) local desc tf.with_op_name(name, "RefEnter") do desc = tf.NodeDescription("RefEnter") @@ -399,7 +399,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function erf_graph(x_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function erf_graph(x_; name=nothing) local desc tf.with_op_name(name, "Erf") do desc = tf.NodeDescription("Erf") @@ -437,7 +437,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function lookup_table_export_v2_graph(table_handle_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function lookup_table_export_v2_graph(table_handle_; name=nothing) local desc tf.with_op_name(name, "LookupTableExportV2") do desc = tf.NodeDescription("LookupTableExportV2") @@ -478,7 +478,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function round_graph(x_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function round_graph(x_; name=nothing) local desc tf.with_op_name(name, "Round") do desc = tf.NodeDescription("Round") @@ -513,10 +513,10 @@ end """ outfeed_dequeue(; device_ordinal=-1) - +Retrieves a single tensor from the computation outfeed. This operation will """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function outfeed_dequeue_graph(; name=nothing, dtype=nothing, shape=nothing, device_ordinal=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function outfeed_dequeue_graph(; name=nothing, dtype=nothing, shape=nothing, device_ordinal=nothing) local desc tf.with_op_name(name, "OutfeedDequeue") do desc = tf.NodeDescription("OutfeedDequeue") @@ -566,7 +566,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_forest_tree_is_initialized_op_graph(tree_handle_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tensor_forest_tree_is_initialized_op_graph(tree_handle_; name=nothing) local desc tf.with_op_name(name, "TensorForestTreeIsInitializedOp") do desc = tf.NodeDescription("TensorForestTreeIsInitializedOp") @@ -602,7 +602,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function merge_graph(inputs_; name=nothing, N=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function merge_graph(inputs_; name=nothing, N=nothing) local desc tf.with_op_name(name, "Merge") do desc = tf.NodeDescription("Merge") @@ -651,7 +651,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function histogram_fixed_width_graph(values_, value_range_, nbins_; name=nothing, dtype=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function histogram_fixed_width_graph(values_, value_range_, nbins_; name=nothing, dtype=nothing) local desc tf.with_op_name(name, "HistogramFixedWidth") do desc = tf.NodeDescription("HistogramFixedWidth") @@ -704,7 +704,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function asin_graph(x_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function asin_graph(x_; name=nothing) local desc tf.with_op_name(name, "Asin") do desc = tf.NodeDescription("Asin") @@ -742,7 +742,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function any_graph(input_, reduction_indices_; name=nothing, keep_dims=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function any_graph(input_, reduction_indices_; name=nothing, keep_dims=nothing) local desc tf.with_op_name(name, "Any") do desc = tf.NodeDescription("Any") @@ -791,7 +791,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function rsqrt_grad_graph(y_, dy_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function rsqrt_grad_graph(y_, dy_; name=nothing) local desc tf.with_op_name(name, "RsqrtGrad") do desc = tf.NodeDescription("RsqrtGrad") @@ -834,7 +834,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_scatter_graph(handle_, indices_, value_, flow_in_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tensor_array_scatter_graph(handle_, indices_, value_, flow_in_; name=nothing) local desc tf.with_op_name(name, "TensorArrayScatter") do desc = tf.NodeDescription("TensorArrayScatter") @@ -884,7 +884,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function dynamic_partition_graph(data_, partitions_; name=nothing, num_partitions=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function dynamic_partition_graph(data_, partitions_; name=nothing, num_partitions=nothing) local desc tf.with_op_name(name, "DynamicPartition") do desc = tf.NodeDescription("DynamicPartition") @@ -937,7 +937,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_private_thread_pool_dataset_graph(input_dataset_, num_threads_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function experimental_private_thread_pool_dataset_graph(input_dataset_, num_threads_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "ExperimentalPrivateThreadPoolDataset") do desc = tf.NodeDescription("ExperimentalPrivateThreadPoolDataset") @@ -989,7 +989,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function reader_serialize_state_graph(reader_handle_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function reader_serialize_state_graph(reader_handle_; name=nothing) local desc tf.with_op_name(name, "ReaderSerializeState") do desc = tf.NodeDescription("ReaderSerializeState") @@ -1025,7 +1025,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function right_shift_graph(x_, y_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function right_shift_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "RightShift") do desc = tf.NodeDescription("RightShift") @@ -1068,7 +1068,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function avg_pool3d_graph(input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function avg_pool3d_graph(input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) local desc tf.with_op_name(name, "AvgPool3D") do desc = tf.NodeDescription("AvgPool3D") @@ -1130,7 +1130,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function encode_png_graph(image_; name=nothing, compression=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function encode_png_graph(image_; name=nothing, compression=nothing) local desc tf.with_op_name(name, "EncodePng") do desc = tf.NodeDescription("EncodePng") @@ -1174,7 +1174,7 @@ end Debug Identity Op. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function debug_identity_graph(input_; name=nothing, device_name=nothing, tensor_name=nothing, debug_urls=nothing, gated_grpc=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function debug_identity_graph(input_; name=nothing, device_name=nothing, tensor_name=nothing, debug_urls=nothing, gated_grpc=nothing) local desc tf.with_op_name(name, "DebugIdentity") do desc = tf.NodeDescription("DebugIdentity") @@ -1236,7 +1236,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function imag_graph(input_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function imag_graph(input_; name=nothing) local desc tf.with_op_name(name, "Imag") do desc = tf.NodeDescription("Imag") @@ -1274,7 +1274,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_sparse_apply_ftrl_v2_graph(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=nothing, use_locking=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function resource_sparse_apply_ftrl_v2_graph(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ResourceSparseApplyFtrlV2") do desc = tf.NodeDescription("ResourceSparseApplyFtrlV2") @@ -1362,7 +1362,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function stage_clear_graph(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function stage_clear_graph(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "StageClear") do desc = tf.NodeDescription("StageClear") @@ -1424,7 +1424,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sign_graph(x_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function sign_graph(x_; name=nothing) local desc tf.with_op_name(name, "Sign") do desc = tf.NodeDescription("Sign") @@ -1462,7 +1462,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function population_count_graph(x_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function population_count_graph(x_; name=nothing) local desc tf.with_op_name(name, "PopulationCount") do desc = tf.NodeDescription("PopulationCount") @@ -1500,7 +1500,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function neg_graph(x_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function neg_graph(x_; name=nothing) local desc tf.with_op_name(name, "Neg") do desc = tf.NodeDescription("Neg") @@ -1538,7 +1538,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function anonymous_iterator_graph(; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function anonymous_iterator_graph(; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "AnonymousIterator") do desc = tf.NodeDescription("AnonymousIterator") @@ -1582,7 +1582,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_reduce_sum_graph(input_indices_, input_values_, input_shape_, reduction_axes_; name=nothing, keep_dims=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function sparse_reduce_sum_graph(input_indices_, input_values_, input_shape_, reduction_axes_; name=nothing, keep_dims=nothing) local desc tf.with_op_name(name, "SparseReduceSum") do desc = tf.NodeDescription("SparseReduceSum") @@ -1632,13 +1632,55 @@ begin end +""" + string_length(input; unit=BYTE) + + +""" +begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function string_length_graph(input_; name=nothing, unit=nothing) + local desc + tf.with_op_name(name, "StringLength") do + desc = tf.NodeDescription("StringLength") + input_ = convert(Tensor{String}, input_) + tf.add_input(desc, input_) + if unit !== nothing + desc["unit"] = Base.String(unit) + end + end + tf.Tensor(tf.Operation(desc)) + end + function string_length_eager(input_; name=nothing, unit=nothing) + desc = tf.EagerOp("StringLength") + input_ = convert(tf.EagerTensor, input_) + tf.add_input(desc, input_) + if unit !== nothing + desc["unit"] = Base.String(unit) + end + res = tf.execute(desc) + node = tf.TapeNode(string_length, [input_], name=nothing, unit=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + function string_length(input_; name=nothing, unit=nothing) + if tf.in_eager_mode() + string_length_eager(input_; name=name, unit=unit) + else + string_length_graph(input_; name=name, unit=unit) + end + end +end + + """ filter_dataset(input_dataset, other_arguments) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function filter_dataset_graph(input_dataset_, other_arguments_; name=nothing, predicate=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function filter_dataset_graph(input_dataset_, other_arguments_; name=nothing, predicate=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "FilterDataset") do desc = tf.NodeDescription("FilterDataset") @@ -1696,55 +1738,13 @@ begin end -""" - string_length(input; unit=BYTE) - - -""" -begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function string_length_graph(input_; name=nothing, unit=nothing) - local desc - tf.with_op_name(name, "StringLength") do - desc = tf.NodeDescription("StringLength") - input_ = convert(Tensor{String}, input_) - tf.add_input(desc, input_) - if unit !== nothing - desc["unit"] = Base.String(unit) - end - end - tf.Tensor(tf.Operation(desc)) - end - function string_length_eager(input_; name=nothing, unit=nothing) - desc = tf.EagerOp("StringLength") - input_ = convert(tf.EagerTensor, input_) - tf.add_input(desc, input_) - if unit !== nothing - desc["unit"] = Base.String(unit) - end - res = tf.execute(desc) - node = tf.TapeNode(string_length, [input_], name=nothing, unit=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - function string_length(input_; name=nothing, unit=nothing) - if tf.in_eager_mode() - string_length_eager(input_; name=name, unit=unit) - else - string_length_graph(input_; name=name, unit=unit) - end - end -end - - """ conv3d(input, filter; data_format=NDHWC, dilations=[1, 1, 1, 1, 1]) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function conv3d_graph(input_, filter_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function conv3d_graph(input_, filter_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) local desc tf.with_op_name(name, "Conv3D") do desc = tf.NodeDescription("Conv3D") @@ -1808,10 +1808,10 @@ end """ retrieve_tpu_embedding_adagrad_parameters(; table_id=-1, table_name=) - +Retrieve embedding parameters for a single table. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function retrieve_tpu_embedding_adagrad_parameters_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function retrieve_tpu_embedding_adagrad_parameters_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "RetrieveTPUEmbeddingAdagradParameters") do desc = tf.NodeDescription("RetrieveTPUEmbeddingAdagradParameters") @@ -1872,7 +1872,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function optional_has_value_graph(optional_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function optional_has_value_graph(optional_; name=nothing) local desc tf.with_op_name(name, "OptionalHasValue") do desc = tf.NodeDescription("OptionalHasValue") @@ -1908,7 +1908,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function apply_adam_graph(var_, m_, v_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing, use_nesterov=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function apply_adam_graph(var_, m_, v_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing, use_nesterov=nothing) local desc tf.with_op_name(name, "ApplyAdam") do desc = tf.NodeDescription("ApplyAdam") @@ -2003,7 +2003,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function cudnn_rnn_params_to_canonical_graph(num_layers_, num_units_, input_size_, params_; name=nothing, num_params=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function cudnn_rnn_params_to_canonical_graph(num_layers_, num_units_, input_size_, params_; name=nothing, num_params=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) local desc tf.with_op_name(name, "CudnnRNNParamsToCanonical") do desc = tf.NodeDescription("CudnnRNNParamsToCanonical") @@ -2100,7 +2100,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function irfft3d_graph(input_, fft_length_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function irfft3d_graph(input_, fft_length_; name=nothing) local desc tf.with_op_name(name, "IRFFT3D") do desc = tf.NodeDescription("IRFFT3D") @@ -2140,7 +2140,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function angle_graph(input_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function angle_graph(input_; name=nothing) local desc tf.with_op_name(name, "Angle") do desc = tf.NodeDescription("Angle") @@ -2178,7 +2178,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_forest_tree_resource_handle_op_graph(; name=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tensor_forest_tree_resource_handle_op_graph(; name=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "TensorForestTreeResourceHandleOp") do desc = tf.NodeDescription("TensorForestTreeResourceHandleOp") @@ -2222,7 +2222,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function learned_unigram_candidate_sampler_graph(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function learned_unigram_candidate_sampler_graph(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing) local desc tf.with_op_name(name, "LearnedUnigramCandidateSampler") do desc = tf.NodeDescription("LearnedUnigramCandidateSampler") @@ -2299,7 +2299,7 @@ end A graph node which represents an argument to a function. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _arg_graph(; name=nothing, index=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function _arg_graph(; name=nothing, index=nothing) local desc tf.with_op_name(name, "_Arg") do desc = tf.NodeDescription("_Arg") @@ -2337,7 +2337,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function matrix_square_root_graph(input_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function matrix_square_root_graph(input_; name=nothing) local desc tf.with_op_name(name, "MatrixSquareRoot") do desc = tf.NodeDescription("MatrixSquareRoot") @@ -2375,7 +2375,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_dense_cwise_mul_graph(sp_indices_, sp_values_, sp_shape_, dense_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function sparse_dense_cwise_mul_graph(sp_indices_, sp_values_, sp_shape_, dense_; name=nothing) local desc tf.with_op_name(name, "SparseDenseCwiseMul") do desc = tf.NodeDescription("SparseDenseCwiseMul") @@ -2426,7 +2426,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_concat_v3_graph(handle_, flow_in_; name=nothing, dtype=nothing, element_shape_except0=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tensor_array_concat_v3_graph(handle_, flow_in_; name=nothing, dtype=nothing, element_shape_except0=nothing) local desc tf.with_op_name(name, "TensorArrayConcatV3") do desc = tf.NodeDescription("TensorArrayConcatV3") @@ -2483,7 +2483,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function unicode_script_graph(input_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function unicode_script_graph(input_; name=nothing) local desc tf.with_op_name(name, "UnicodeScript") do desc = tf.NodeDescription("UnicodeScript") @@ -2519,7 +2519,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_cholesky_grad_graph(l_, grad_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function batch_cholesky_grad_graph(l_, grad_; name=nothing) local desc tf.with_op_name(name, "BatchCholeskyGrad") do desc = tf.NodeDescription("BatchCholeskyGrad") @@ -2562,7 +2562,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function mean_graph(input_, reduction_indices_; name=nothing, keep_dims=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function mean_graph(input_, reduction_indices_; name=nothing, keep_dims=nothing) local desc tf.with_op_name(name, "Mean") do desc = tf.NodeDescription("Mean") @@ -2613,7 +2613,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_fft_graph(input_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function batch_fft_graph(input_; name=nothing) local desc tf.with_op_name(name, "BatchFFT") do desc = tf.NodeDescription("BatchFFT") @@ -2649,7 +2649,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sin_graph(x_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function sin_graph(x_; name=nothing) local desc tf.with_op_name(name, "Sin") do desc = tf.NodeDescription("Sin") @@ -2687,7 +2687,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function boosted_trees_ensemble_resource_handle_op_graph(; name=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function boosted_trees_ensemble_resource_handle_op_graph(; name=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "BoostedTreesEnsembleResourceHandleOp") do desc = tf.NodeDescription("BoostedTreesEnsembleResourceHandleOp") @@ -2731,7 +2731,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantized_max_pool_graph(input_, min_input_, max_input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function quantized_max_pool_graph(input_, min_input_, max_input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing) local desc tf.with_op_name(name, "QuantizedMaxPool") do desc = tf.NodeDescription("QuantizedMaxPool") @@ -2800,7 +2800,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ordered_map_stage_graph(key_, indices_, values_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, fake_dtypes=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function ordered_map_stage_graph(key_, indices_, values_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, fake_dtypes=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "OrderedMapStage") do desc = tf.NodeDescription("OrderedMapStage") @@ -2880,7 +2880,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function partitioned_call_graph(args_; name=nothing, Tin=nothing, Tout=nothing, f=nothing, config=nothing, config_proto=nothing, executor_type=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function partitioned_call_graph(args_; name=nothing, Tin=nothing, Tout=nothing, f=nothing, config=nothing, config_proto=nothing, executor_type=nothing) local desc tf.with_op_name(name, "PartitionedCall") do desc = tf.NodeDescription("PartitionedCall") @@ -2952,7 +2952,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_apply_adagrad_graph(var_, accum_, lr_, grad_, indices_; name=nothing, use_locking=nothing, update_slots=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function sparse_apply_adagrad_graph(var_, accum_, lr_, grad_, indices_; name=nothing, use_locking=nothing, update_slots=nothing) local desc tf.with_op_name(name, "SparseApplyAdagrad") do desc = tf.NodeDescription("SparseApplyAdagrad") @@ -3024,7 +3024,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function decode_proto_v2_graph(bytes_; name=nothing, message_type=nothing, field_names=nothing, output_types=nothing, descriptor_source=nothing, message_format=nothing, sanitize=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function decode_proto_v2_graph(bytes_; name=nothing, message_type=nothing, field_names=nothing, output_types=nothing, descriptor_source=nothing, message_format=nothing, sanitize=nothing) local desc tf.with_op_name(name, "DecodeProtoV2") do desc = tf.NodeDescription("DecodeProtoV2") @@ -3101,7 +3101,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function betainc_graph(a_, b_, x_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function betainc_graph(a_, b_, x_; name=nothing) local desc tf.with_op_name(name, "Betainc") do desc = tf.NodeDescription("Betainc") @@ -3149,7 +3149,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function guarantee_const_graph(input_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function guarantee_const_graph(input_; name=nothing) local desc tf.with_op_name(name, "GuaranteeConst") do desc = tf.NodeDescription("GuaranteeConst") @@ -3187,7 +3187,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function decode_bmp_graph(contents_; name=nothing, channels=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function decode_bmp_graph(contents_; name=nothing, channels=nothing) local desc tf.with_op_name(name, "DecodeBmp") do desc = tf.NodeDescription("DecodeBmp") @@ -3229,7 +3229,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function boosted_trees_bucketize_graph(float_values_, bucket_boundaries_; name=nothing, num_features=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function boosted_trees_bucketize_graph(float_values_, bucket_boundaries_; name=nothing, num_features=nothing) local desc tf.with_op_name(name, "BoostedTreesBucketize") do desc = tf.NodeDescription("BoostedTreesBucketize") @@ -3277,10 +3277,10 @@ end """ shutdown_distributed_tpu() - +An op that shuts down a running distributed TPU system. The Op returns """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function shutdown_distributed_tpu_graph(; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function shutdown_distributed_tpu_graph(; name=nothing) local desc tf.with_op_name(name, "ShutdownDistributedTPU") do desc @@ -3313,7 +3313,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_stats_aggregator_summary_graph(iterator_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function experimental_stats_aggregator_summary_graph(iterator_; name=nothing) local desc tf.with_op_name(name, "ExperimentalStatsAggregatorSummary") do desc = tf.NodeDescription("ExperimentalStatsAggregatorSummary") @@ -3349,7 +3349,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function timestamp_graph(; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function timestamp_graph(; name=nothing) local desc tf.with_op_name(name, "Timestamp") do desc @@ -3382,7 +3382,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function matrix_exponential_graph(input_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function matrix_exponential_graph(input_; name=nothing) local desc tf.with_op_name(name, "MatrixExponential") do desc = tf.NodeDescription("MatrixExponential") @@ -3420,7 +3420,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function size_graph(input_; name=nothing, out_type=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function size_graph(input_; name=nothing, out_type=nothing) local desc tf.with_op_name(name, "Size") do desc = tf.NodeDescription("Size") @@ -3464,7 +3464,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function add_n_graph(inputs_; name=nothing, N=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function add_n_graph(inputs_; name=nothing, N=nothing) local desc tf.with_op_name(name, "AddN") do desc = tf.NodeDescription("AddN") @@ -3508,7 +3508,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_segment_sum_graph(data_, indices_, segment_ids_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function sparse_segment_sum_graph(data_, indices_, segment_ids_; name=nothing) local desc tf.with_op_name(name, "SparseSegmentSum") do desc = tf.NodeDescription("SparseSegmentSum") @@ -3557,7 +3557,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_dataset_graph(input_dataset_, batch_size_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function batch_dataset_graph(input_dataset_, batch_size_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "BatchDataset") do desc = tf.NodeDescription("BatchDataset") @@ -3609,7 +3609,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function record_input_graph(; name=nothing, file_pattern=nothing, file_random_seed=nothing, file_shuffle_shift_ratio=nothing, file_buffer_size=nothing, file_parallelism=nothing, batch_size=nothing, compression_type=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function record_input_graph(; name=nothing, file_pattern=nothing, file_random_seed=nothing, file_shuffle_shift_ratio=nothing, file_buffer_size=nothing, file_parallelism=nothing, batch_size=nothing, compression_type=nothing) local desc tf.with_op_name(name, "RecordInput") do desc = tf.NodeDescription("RecordInput") @@ -3683,7 +3683,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function queue_dequeue_up_to_v2_graph(handle_, n_; name=nothing, component_types=nothing, timeout_ms=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function queue_dequeue_up_to_v2_graph(handle_, n_; name=nothing, component_types=nothing, timeout_ms=nothing) local desc tf.with_op_name(name, "QueueDequeueUpToV2") do desc = tf.NodeDescription("QueueDequeueUpToV2") @@ -3732,10 +3732,10 @@ end """ retrieve_tpu_embedding_proximal_adagrad_parameters_grad_accum_debug(; table_id=-1, table_name=) - +Retrieve embedding parameters for a single table. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function retrieve_tpu_embedding_proximal_adagrad_parameters_grad_accum_debug_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function retrieve_tpu_embedding_proximal_adagrad_parameters_grad_accum_debug_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "RetrieveTPUEmbeddingProximalAdagradParametersGradAccumDebug") do desc = tf.NodeDescription("RetrieveTPUEmbeddingProximalAdagradParametersGradAccumDebug") @@ -3793,10 +3793,10 @@ end """ load_tpu_embedding_rms_prop_parameters_grad_accum_debug(parameters, ms, mom, gradient_accumulators; table_id=-1, table_name=) - +Load embedding parameters for a single table. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function load_tpu_embedding_rms_prop_parameters_grad_accum_debug_graph(parameters_, ms_, mom_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function load_tpu_embedding_rms_prop_parameters_grad_accum_debug_graph(parameters_, ms_, mom_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "LoadTPUEmbeddingRMSPropParametersGradAccumDebug") do desc = tf.NodeDescription("LoadTPUEmbeddingRMSPropParametersGradAccumDebug") @@ -3868,7 +3868,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function serialize_tensor_graph(tensor_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function serialize_tensor_graph(tensor_; name=nothing) local desc tf.with_op_name(name, "SerializeTensor") do desc = tf.NodeDescription("SerializeTensor") @@ -3906,7 +3906,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function mul_graph(x_, y_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function mul_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "Mul") do desc = tf.NodeDescription("Mul") @@ -3949,7 +3949,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function softmax_cross_entropy_with_logits_graph(features_, labels_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function softmax_cross_entropy_with_logits_graph(features_, labels_; name=nothing) local desc tf.with_op_name(name, "SoftmaxCrossEntropyWithLogits") do desc = tf.NodeDescription("SoftmaxCrossEntropyWithLogits") @@ -3997,7 +3997,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_scatter_div_graph(resource_, indices_, updates_; name=nothing, dtype=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function resource_scatter_div_graph(resource_, indices_, updates_; name=nothing, dtype=nothing) local desc tf.with_op_name(name, "ResourceScatterDiv") do desc = tf.NodeDescription("ResourceScatterDiv") @@ -4052,7 +4052,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fixed_length_record_dataset_v2_graph(filenames_, header_bytes_, record_bytes_, footer_bytes_, buffer_size_, compression_type_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function fixed_length_record_dataset_v2_graph(filenames_, header_bytes_, record_bytes_, footer_bytes_, buffer_size_, compression_type_; name=nothing) local desc tf.with_op_name(name, "FixedLengthRecordDatasetV2") do desc = tf.NodeDescription("FixedLengthRecordDatasetV2") @@ -4108,7 +4108,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function skip_dataset_graph(input_dataset_, count_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function skip_dataset_graph(input_dataset_, count_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "SkipDataset") do desc = tf.NodeDescription("SkipDataset") @@ -4160,7 +4160,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function cosh_graph(x_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function cosh_graph(x_; name=nothing) local desc tf.with_op_name(name, "Cosh") do desc = tf.NodeDescription("Cosh") @@ -4198,7 +4198,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fused_batch_norm_v2_graph(x_, scale_, offset_, mean_, variance_; name=nothing, U=nothing, epsilon=nothing, data_format=nothing, is_training=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function fused_batch_norm_v2_graph(x_, scale_, offset_, mean_, variance_; name=nothing, U=nothing, epsilon=nothing, data_format=nothing, is_training=nothing) local desc tf.with_op_name(name, "FusedBatchNormV2") do desc = tf.NodeDescription("FusedBatchNormV2") @@ -4286,7 +4286,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_split_graph(handle_, value_, lengths_, flow_in_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tensor_array_split_graph(handle_, value_, lengths_, flow_in_; name=nothing) local desc tf.with_op_name(name, "TensorArraySplit") do desc = tf.NodeDescription("TensorArraySplit") @@ -4336,7 +4336,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ctc_loss_graph(inputs_, labels_indices_, labels_values_, sequence_length_; name=nothing, preprocess_collapse_repeated=nothing, ctc_merge_repeated=nothing, ignore_longer_outputs_than_inputs=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function ctc_loss_graph(inputs_, labels_indices_, labels_values_, sequence_length_; name=nothing, preprocess_collapse_repeated=nothing, ctc_merge_repeated=nothing, ignore_longer_outputs_than_inputs=nothing) local desc tf.with_op_name(name, "CTCLoss") do desc = tf.NodeDescription("CTCLoss") @@ -4407,7 +4407,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantized_reshape_graph(tensor_, shape_, input_min_, input_max_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function quantized_reshape_graph(tensor_, shape_, input_min_, input_max_; name=nothing) local desc tf.with_op_name(name, "QuantizedReshape") do desc = tf.NodeDescription("QuantizedReshape") @@ -4464,7 +4464,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function floor_div_graph(x_, y_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function floor_div_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "FloorDiv") do desc = tf.NodeDescription("FloorDiv") @@ -4507,7 +4507,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_v2_graph(size_; name=nothing, dtype=nothing, element_shape=nothing, dynamic_size=nothing, clear_after_read=nothing, tensor_array_name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tensor_array_v2_graph(size_; name=nothing, dtype=nothing, element_shape=nothing, dynamic_size=nothing, clear_after_read=nothing, tensor_array_name=nothing) local desc tf.with_op_name(name, "TensorArrayV2") do desc = tf.NodeDescription("TensorArrayV2") @@ -4573,7 +4573,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function barrier_close_graph(handle_; name=nothing, cancel_pending_enqueues=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function barrier_close_graph(handle_; name=nothing, cancel_pending_enqueues=nothing) local desc tf.with_op_name(name, "BarrierClose") do desc = tf.NodeDescription("BarrierClose") @@ -4615,7 +4615,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function read_variable_op_graph(resource_; name=nothing, dtype=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function read_variable_op_graph(resource_; name=nothing, dtype=nothing) local desc tf.with_op_name(name, "ReadVariableOp") do desc = tf.NodeDescription("ReadVariableOp") @@ -4657,7 +4657,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantized_mul_graph(x_, y_, min_x_, max_x_, min_y_, max_y_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function quantized_mul_graph(x_, y_, min_x_, max_x_, min_y_, max_y_; name=nothing) local desc tf.with_op_name(name, "QuantizedMul") do desc = tf.NodeDescription("QuantizedMul") @@ -4722,7 +4722,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function selu_graph(features_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function selu_graph(features_; name=nothing) local desc tf.with_op_name(name, "Selu") do desc = tf.NodeDescription("Selu") @@ -4754,150 +4754,13 @@ begin end -""" - cudnn_rnn_backprop_v3(input, input_h, input_c, params, sequence_lengths, output, output_h, output_c, output_backprop, output_h_backprop, output_c_backprop, reserve_space, host_reserved; rnn_mode=lstm, input_mode=linear_input, direction=unidirectional, dropout=?, seed=0, seed2=0) - - -""" -begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function cudnn_rnn_backprop_v3_graph(input_, input_h_, input_c_, params_, sequence_lengths_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_, host_reserved_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) - local desc - tf.with_op_name(name, "CudnnRNNBackpropV3") do - desc = tf.NodeDescription("CudnnRNNBackpropV3") - input_ = convert(Tensor{Any}, input_) - input_h_ = convert(Tensor{Any}, input_h_) - input_c_ = convert(Tensor{Any}, input_c_) - params_ = convert(Tensor{Any}, params_) - sequence_lengths_ = convert(Tensor{Int32}, sequence_lengths_) - output_ = convert(Tensor{Any}, output_) - output_h_ = convert(Tensor{Any}, output_h_) - output_c_ = convert(Tensor{Any}, output_c_) - output_backprop_ = convert(Tensor{Any}, output_backprop_) - output_h_backprop_ = convert(Tensor{Any}, output_h_backprop_) - output_c_backprop_ = convert(Tensor{Any}, output_c_backprop_) - reserve_space_ = convert(Tensor{Any}, reserve_space_) - host_reserved_ = convert(Tensor{Any}, host_reserved_) - (input_, input_h_, input_c_, params_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_) = tf.tf_promote(input_, input_h_, input_c_, params_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_) - tf.add_input(desc, input_) - tf.add_input(desc, input_h_) - tf.add_input(desc, input_c_) - tf.add_input(desc, params_) - tf.add_input(desc, sequence_lengths_) - tf.add_input(desc, output_) - tf.add_input(desc, output_h_) - tf.add_input(desc, output_c_) - tf.add_input(desc, output_backprop_) - tf.add_input(desc, output_h_backprop_) - tf.add_input(desc, output_c_backprop_) - tf.add_input(desc, reserve_space_) - tf.add_input(desc, host_reserved_) - if rnn_mode !== nothing - desc["rnn_mode"] = Base.String(rnn_mode) - end - if input_mode !== nothing - desc["input_mode"] = Base.String(input_mode) - end - if direction !== nothing - desc["direction"] = Base.String(direction) - end - if dropout !== nothing - desc["dropout"] = Base.identity(dropout) - end - if seed !== nothing - desc["seed"] = Base.Int(seed) - end - if seed2 !== nothing - desc["seed2"] = Base.Int(seed2) - end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:4 - push!(out, tf.Tensor(op, out_idx)) - end - out - end - function cudnn_rnn_backprop_v3_eager(input_, input_h_, input_c_, params_, sequence_lengths_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_, host_reserved_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) - desc = tf.EagerOp("CudnnRNNBackpropV3") - input_ = convert(tf.EagerTensor, input_) - input_h_ = convert(tf.EagerTensor, input_h_) - input_c_ = convert(tf.EagerTensor, input_c_) - params_ = convert(tf.EagerTensor, params_) - sequence_lengths_ = convert(tf.EagerTensor, sequence_lengths_) - output_ = convert(tf.EagerTensor, output_) - output_h_ = convert(tf.EagerTensor, output_h_) - output_c_ = convert(tf.EagerTensor, output_c_) - output_backprop_ = convert(tf.EagerTensor, output_backprop_) - output_h_backprop_ = convert(tf.EagerTensor, output_h_backprop_) - output_c_backprop_ = convert(tf.EagerTensor, output_c_backprop_) - reserve_space_ = convert(tf.EagerTensor, reserve_space_) - host_reserved_ = convert(tf.EagerTensor, host_reserved_) - tf.add_input(desc, input_) - tf.add_input(desc, input_h_) - tf.add_input(desc, input_c_) - tf.add_input(desc, params_) - tf.add_input(desc, sequence_lengths_) - tf.add_input(desc, output_) - tf.add_input(desc, output_h_) - tf.add_input(desc, output_c_) - tf.add_input(desc, output_backprop_) - tf.add_input(desc, output_h_backprop_) - tf.add_input(desc, output_c_backprop_) - tf.add_input(desc, reserve_space_) - tf.add_input(desc, host_reserved_) - if rnn_mode !== nothing - desc["rnn_mode"] = Base.String(rnn_mode) - end - if input_mode !== nothing - desc["input_mode"] = Base.String(input_mode) - end - if direction !== nothing - desc["direction"] = Base.String(direction) - end - if dropout !== nothing - desc["dropout"] = Base.identity(dropout) - end - if seed !== nothing - desc["seed"] = Base.Int(seed) - end - if seed2 !== nothing - desc["seed2"] = Base.Int(seed2) - end - desc["T"] = tf.data_type(input_) - desc["T"] = tf.data_type(input_h_) - desc["T"] = tf.data_type(input_c_) - desc["T"] = tf.data_type(params_) - desc["T"] = tf.data_type(output_) - desc["T"] = tf.data_type(output_h_) - desc["T"] = tf.data_type(output_c_) - desc["T"] = tf.data_type(output_backprop_) - desc["T"] = tf.data_type(output_h_backprop_) - desc["T"] = tf.data_type(output_c_backprop_) - desc["T"] = tf.data_type(reserve_space_) - res = tf.execute(desc) - node = tf.TapeNode(cudnn_rnn_backprop_v3, [input_, input_h_, input_c_, params_, sequence_lengths_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_, host_reserved_], name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end - end - function cudnn_rnn_backprop_v3(input_, input_h_, input_c_, params_, sequence_lengths_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_, host_reserved_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) - if tf.in_eager_mode() - cudnn_rnn_backprop_v3_eager(input_, input_h_, input_c_, params_, sequence_lengths_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_, host_reserved_; name=name, rnn_mode=rnn_mode, input_mode=input_mode, direction=direction, dropout=dropout, seed=seed, seed2=seed2) - else - cudnn_rnn_backprop_v3_graph(input_, input_h_, input_c_, params_, sequence_lengths_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_, host_reserved_; name=name, rnn_mode=rnn_mode, input_mode=input_mode, direction=direction, dropout=dropout, seed=seed, seed2=seed2) - end - end -end - - """ lookup_table_insert(table_handle, keys, values) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function lookup_table_insert_graph(table_handle_, keys_, values_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function lookup_table_insert_graph(table_handle_, keys_, values_; name=nothing) local desc tf.with_op_name(name, "LookupTableInsert") do desc = tf.NodeDescription("LookupTableInsert") @@ -4945,7 +4808,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function complex_abs_graph(x_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function complex_abs_graph(x_; name=nothing) local desc tf.with_op_name(name, "ComplexAbs") do desc = tf.NodeDescription("ComplexAbs") @@ -4978,43 +4841,38 @@ end """ - tridiagonal_solve(diagonals, rhs) + abs(x) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tridiagonal_solve_graph(diagonals_, rhs_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function abs_graph(x_; name=nothing) local desc - tf.with_op_name(name, "TridiagonalSolve") do - desc = tf.NodeDescription("TridiagonalSolve") - diagonals_ = convert(Tensor{Any}, diagonals_) - rhs_ = convert(Tensor{Any}, rhs_) - (diagonals_, rhs_) = tf.tf_promote(diagonals_, rhs_) - tf.add_input(desc, diagonals_) - tf.add_input(desc, rhs_) + tf.with_op_name(name, "Abs") do + desc = tf.NodeDescription("Abs") + x_ = convert(Tensor{Any}, x_) + (x_,) = tf.tf_promote(x_) + tf.add_input(desc, x_) end tf.Tensor(tf.Operation(desc)) end - function tridiagonal_solve_eager(diagonals_, rhs_; name=nothing) - desc = tf.EagerOp("TridiagonalSolve") - diagonals_ = convert(tf.EagerTensor, diagonals_) - rhs_ = convert(tf.EagerTensor, rhs_) - tf.add_input(desc, diagonals_) - tf.add_input(desc, rhs_) - desc["T"] = tf.data_type(diagonals_) - desc["T"] = tf.data_type(rhs_) + function abs_eager(x_; name=nothing) + desc = tf.EagerOp("Abs") + x_ = convert(tf.EagerTensor, x_) + tf.add_input(desc, x_) + desc["T"] = tf.data_type(x_) res = tf.execute(desc) - node = tf.TapeNode(tridiagonal_solve, [diagonals_, rhs_], name=nothing, res) + node = tf.TapeNode(abs, [x_], name=nothing, res) if length(res) >= 1 tf.add_node(res[1], node) return res[1] end end - function tridiagonal_solve(diagonals_, rhs_; name=nothing) + function abs(x_; name=nothing) if tf.in_eager_mode() - tridiagonal_solve_eager(diagonals_, rhs_; name=name) + abs_eager(x_; name=name) else - tridiagonal_solve_graph(diagonals_, rhs_; name=name) + abs_graph(x_; name=name) end end end @@ -5026,7 +4884,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function lookup_table_import_graph(table_handle_, keys_, values_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function lookup_table_import_graph(table_handle_, keys_, values_; name=nothing) local desc tf.with_op_name(name, "LookupTableImport") do desc = tf.NodeDescription("LookupTableImport") @@ -5068,51 +4926,13 @@ begin end -""" - abs(x) - - -""" -begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function abs_graph(x_; name=nothing) - local desc - tf.with_op_name(name, "Abs") do - desc = tf.NodeDescription("Abs") - x_ = convert(Tensor{Any}, x_) - (x_,) = tf.tf_promote(x_) - tf.add_input(desc, x_) - end - tf.Tensor(tf.Operation(desc)) - end - function abs_eager(x_; name=nothing) - desc = tf.EagerOp("Abs") - x_ = convert(tf.EagerTensor, x_) - tf.add_input(desc, x_) - desc["T"] = tf.data_type(x_) - res = tf.execute(desc) - node = tf.TapeNode(abs, [x_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - function abs(x_; name=nothing) - if tf.in_eager_mode() - abs_eager(x_; name=name) - else - abs_graph(x_; name=name) - end - end -end - - """ resource_apply_adam(var, m, v, beta1_power, beta2_power, lr, beta1, beta2, epsilon, grad; use_locking=false, use_nesterov=false) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_apply_adam_graph(var_, m_, v_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing, use_nesterov=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function resource_apply_adam_graph(var_, m_, v_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing, use_nesterov=nothing) local desc tf.with_op_name(name, "ResourceApplyAdam") do desc = tf.NodeDescription("ResourceApplyAdam") @@ -5204,7 +5024,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function write_histogram_summary_graph(writer_, step_, tag_, values_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function write_histogram_summary_graph(writer_, step_, tag_, values_; name=nothing) local desc tf.with_op_name(name, "WriteHistogramSummary") do desc = tf.NodeDescription("WriteHistogramSummary") @@ -5248,53 +5068,13 @@ begin end -""" - experimental_indexed_dataset_materialize(dataset, materialized) - - -""" -begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_indexed_dataset_materialize_graph(dataset_, materialized_; name=nothing) - local desc - tf.with_op_name(name, "ExperimentalIndexedDatasetMaterialize") do - desc = tf.NodeDescription("ExperimentalIndexedDatasetMaterialize") - dataset_ = convert(Tensor{Any}, dataset_) - materialized_ = convert(Tensor{Any}, materialized_) - tf.add_input(desc, dataset_) - tf.add_input(desc, materialized_) - end - tf.Tensor(tf.Operation(desc)) - end - function experimental_indexed_dataset_materialize_eager(dataset_, materialized_; name=nothing) - desc = tf.EagerOp("ExperimentalIndexedDatasetMaterialize") - dataset_ = convert(tf.EagerTensor, dataset_) - materialized_ = convert(tf.EagerTensor, materialized_) - tf.add_input(desc, dataset_) - tf.add_input(desc, materialized_) - res = tf.execute(desc) - node = tf.TapeNode(experimental_indexed_dataset_materialize, [dataset_, materialized_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - function experimental_indexed_dataset_materialize(dataset_, materialized_; name=nothing) - if tf.in_eager_mode() - experimental_indexed_dataset_materialize_eager(dataset_, materialized_; name=name) - else - experimental_indexed_dataset_materialize_graph(dataset_, materialized_; name=name) - end - end -end - - """ _host_send(tensor; client_terminated=false) Sends the named tensor from send_device to recv_device. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _host_send_graph(tensor_; name=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function _host_send_graph(tensor_; name=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing) local desc tf.with_op_name(name, "_HostSend") do desc = tf.NodeDescription("_HostSend") @@ -5356,13 +5136,53 @@ begin end +""" + experimental_indexed_dataset_materialize(dataset, materialized) + + +""" +begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function experimental_indexed_dataset_materialize_graph(dataset_, materialized_; name=nothing) + local desc + tf.with_op_name(name, "ExperimentalIndexedDatasetMaterialize") do + desc = tf.NodeDescription("ExperimentalIndexedDatasetMaterialize") + dataset_ = convert(Tensor{Any}, dataset_) + materialized_ = convert(Tensor{Any}, materialized_) + tf.add_input(desc, dataset_) + tf.add_input(desc, materialized_) + end + tf.Tensor(tf.Operation(desc)) + end + function experimental_indexed_dataset_materialize_eager(dataset_, materialized_; name=nothing) + desc = tf.EagerOp("ExperimentalIndexedDatasetMaterialize") + dataset_ = convert(tf.EagerTensor, dataset_) + materialized_ = convert(tf.EagerTensor, materialized_) + tf.add_input(desc, dataset_) + tf.add_input(desc, materialized_) + res = tf.execute(desc) + node = tf.TapeNode(experimental_indexed_dataset_materialize, [dataset_, materialized_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + function experimental_indexed_dataset_materialize(dataset_, materialized_; name=nothing) + if tf.in_eager_mode() + experimental_indexed_dataset_materialize_eager(dataset_, materialized_; name=name) + else + experimental_indexed_dataset_materialize_graph(dataset_, materialized_; name=name) + end + end +end + + """ greater(x, y) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function greater_graph(x_, y_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function greater_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "Greater") do desc = tf.NodeDescription("Greater") @@ -5405,7 +5225,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function nccl_broadcast_graph(input_; name=nothing, shape=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function nccl_broadcast_graph(input_; name=nothing, shape=nothing) local desc tf.with_op_name(name, "NcclBroadcast") do desc = tf.NodeDescription("NcclBroadcast") @@ -5449,7 +5269,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_list_push_back_batch_graph(input_handles_, tensor_; name=nothing, element_dtype=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tensor_list_push_back_batch_graph(input_handles_, tensor_; name=nothing, element_dtype=nothing) local desc tf.with_op_name(name, "TensorListPushBackBatch") do desc = tf.NodeDescription("TensorListPushBackBatch") @@ -5497,7 +5317,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_scatter_min_graph(resource_, indices_, updates_; name=nothing, dtype=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function resource_scatter_min_graph(resource_, indices_, updates_; name=nothing, dtype=nothing) local desc tf.with_op_name(name, "ResourceScatterMin") do desc = tf.NodeDescription("ResourceScatterMin") @@ -5552,7 +5372,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function slice_graph(input_, begin_, size_; name=nothing, Index=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function slice_graph(input_, begin_, size_; name=nothing, Index=nothing) local desc tf.with_op_name(name, "Slice") do desc = tf.NodeDescription("Slice") @@ -5608,7 +5428,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function unicode_decode_graph(input_; name=nothing, input_encoding=nothing, errors=nothing, replacement_char=nothing, replace_control_characters=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function unicode_decode_graph(input_; name=nothing, input_encoding=nothing, errors=nothing, replacement_char=nothing, replace_control_characters=nothing) local desc tf.with_op_name(name, "UnicodeDecode") do desc = tf.NodeDescription("UnicodeDecode") @@ -5673,7 +5493,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function take_dataset_graph(input_dataset_, count_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function take_dataset_graph(input_dataset_, count_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "TakeDataset") do desc = tf.NodeDescription("TakeDataset") @@ -5725,7 +5545,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function boosted_trees_make_stats_summary_graph(node_ids_, gradients_, hessians_, bucketized_features_list_; name=nothing, max_splits=nothing, num_buckets=nothing, num_features=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function boosted_trees_make_stats_summary_graph(node_ids_, gradients_, hessians_, bucketized_features_list_; name=nothing, max_splits=nothing, num_buckets=nothing, num_features=nothing) local desc tf.with_op_name(name, "BoostedTreesMakeStatsSummary") do desc = tf.NodeDescription("BoostedTreesMakeStatsSummary") @@ -5791,7 +5611,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function all_candidate_sampler_graph(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, seed=nothing, seed2=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function all_candidate_sampler_graph(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, seed=nothing, seed2=nothing) local desc tf.with_op_name(name, "AllCandidateSampler") do desc = tf.NodeDescription("AllCandidateSampler") @@ -5857,12 +5677,12 @@ end """ - conv2d_backprop_input(input_sizes, filter, out_backprop; use_cudnn_on_gpu=true, explicit_paddings=Int64[], data_format=NHWC, dilations=[1, 1, 1, 1]) + conv2d_backprop_input(input_sizes, filter, out_backprop; use_cudnn_on_gpu=true, data_format=NHWC, dilations=[1, 1, 1, 1]) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function conv2d_backprop_input_graph(input_sizes_, filter_, out_backprop_; name=nothing, strides=nothing, use_cudnn_on_gpu=nothing, padding=nothing, explicit_paddings=nothing, data_format=nothing, dilations=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function conv2d_backprop_input_graph(input_sizes_, filter_, out_backprop_; name=nothing, strides=nothing, use_cudnn_on_gpu=nothing, padding=nothing, data_format=nothing, dilations=nothing) local desc tf.with_op_name(name, "Conv2DBackpropInput") do desc = tf.NodeDescription("Conv2DBackpropInput") @@ -5882,9 +5702,6 @@ begin if padding !== nothing desc["padding"] = Base.String(padding) end - if explicit_paddings !== nothing - desc["explicit_paddings"] = map(Base.identity, explicit_paddings) - end if data_format !== nothing desc["data_format"] = Base.String(data_format) end @@ -5894,7 +5711,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function conv2d_backprop_input_eager(input_sizes_, filter_, out_backprop_; name=nothing, strides=nothing, use_cudnn_on_gpu=nothing, padding=nothing, explicit_paddings=nothing, data_format=nothing, dilations=nothing) + function conv2d_backprop_input_eager(input_sizes_, filter_, out_backprop_; name=nothing, strides=nothing, use_cudnn_on_gpu=nothing, padding=nothing, data_format=nothing, dilations=nothing) desc = tf.EagerOp("Conv2DBackpropInput") input_sizes_ = convert(tf.EagerTensor, input_sizes_) filter_ = convert(tf.EagerTensor, filter_) @@ -5911,9 +5728,6 @@ begin if padding !== nothing desc["padding"] = Base.String(padding) end - if explicit_paddings !== nothing - desc["explicit_paddings"] = map(Base.identity, explicit_paddings) - end if data_format !== nothing desc["data_format"] = Base.String(data_format) end @@ -5923,17 +5737,17 @@ begin desc["T"] = tf.data_type(filter_) desc["T"] = tf.data_type(out_backprop_) res = tf.execute(desc) - node = tf.TapeNode(conv2d_backprop_input, [input_sizes_, filter_, out_backprop_], name=nothing, strides=nothing, use_cudnn_on_gpu=nothing, padding=nothing, explicit_paddings=nothing, data_format=nothing, dilations=nothing, res) + node = tf.TapeNode(conv2d_backprop_input, [input_sizes_, filter_, out_backprop_], name=nothing, strides=nothing, use_cudnn_on_gpu=nothing, padding=nothing, data_format=nothing, dilations=nothing, res) if length(res) >= 1 tf.add_node(res[1], node) return res[1] end end - function conv2d_backprop_input(input_sizes_, filter_, out_backprop_; name=nothing, strides=nothing, use_cudnn_on_gpu=nothing, padding=nothing, explicit_paddings=nothing, data_format=nothing, dilations=nothing) + function conv2d_backprop_input(input_sizes_, filter_, out_backprop_; name=nothing, strides=nothing, use_cudnn_on_gpu=nothing, padding=nothing, data_format=nothing, dilations=nothing) if tf.in_eager_mode() - conv2d_backprop_input_eager(input_sizes_, filter_, out_backprop_; name=name, strides=strides, use_cudnn_on_gpu=use_cudnn_on_gpu, padding=padding, explicit_paddings=explicit_paddings, data_format=data_format, dilations=dilations) + conv2d_backprop_input_eager(input_sizes_, filter_, out_backprop_; name=name, strides=strides, use_cudnn_on_gpu=use_cudnn_on_gpu, padding=padding, data_format=data_format, dilations=dilations) else - conv2d_backprop_input_graph(input_sizes_, filter_, out_backprop_; name=name, strides=strides, use_cudnn_on_gpu=use_cudnn_on_gpu, padding=padding, explicit_paddings=explicit_paddings, data_format=data_format, dilations=dilations) + conv2d_backprop_input_graph(input_sizes_, filter_, out_backprop_; name=name, strides=strides, use_cudnn_on_gpu=use_cudnn_on_gpu, padding=padding, data_format=data_format, dilations=dilations) end end end @@ -5945,7 +5759,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function dataset_to_single_element_graph(dataset_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function dataset_to_single_element_graph(dataset_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "DatasetToSingleElement") do desc = tf.NodeDescription("DatasetToSingleElement") @@ -5993,7 +5807,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function cache_dataset_graph(input_dataset_, filename_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function cache_dataset_graph(input_dataset_, filename_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "CacheDataset") do desc = tf.NodeDescription("CacheDataset") @@ -6045,7 +5859,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fake_quant_with_min_max_vars_gradient_graph(gradients_, inputs_, min_, max_; name=nothing, num_bits=nothing, narrow_range=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function fake_quant_with_min_max_vars_gradient_graph(gradients_, inputs_, min_, max_; name=nothing, num_bits=nothing, narrow_range=nothing) local desc tf.with_op_name(name, "FakeQuantWithMinMaxVarsGradient") do desc = tf.NodeDescription("FakeQuantWithMinMaxVarsGradient") @@ -6110,7 +5924,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fused_resize_and_pad_conv2d_graph(input_, size_, paddings_, filter_; name=nothing, resize_align_corners=nothing, mode=nothing, strides=nothing, padding=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function fused_resize_and_pad_conv2d_graph(input_, size_, paddings_, filter_; name=nothing, resize_align_corners=nothing, mode=nothing, strides=nothing, padding=nothing) local desc tf.with_op_name(name, "FusedResizeAndPadConv2D") do desc = tf.NodeDescription("FusedResizeAndPadConv2D") @@ -6185,7 +5999,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_graph(in_tensors_; name=nothing, num_batch_threads=nothing, max_batch_size=nothing, max_enqueued_batches=nothing, batch_timeout_micros=nothing, allowed_batch_sizes=nothing, grad_timeout_micros=nothing, container=nothing, shared_name=nothing, batching_queue=nothing, T=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function batch_graph(in_tensors_; name=nothing, num_batch_threads=nothing, max_batch_size=nothing, max_enqueued_batches=nothing, batch_timeout_micros=nothing, allowed_batch_sizes=nothing, grad_timeout_micros=nothing, container=nothing, shared_name=nothing, batching_queue=nothing, T=nothing) local desc tf.with_op_name(name, "Batch") do desc = tf.NodeDescription("Batch") @@ -6286,7 +6100,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function collective_bcast_recv_graph(; name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, shape=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function collective_bcast_recv_graph(; name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, shape=nothing) local desc tf.with_op_name(name, "CollectiveBcastRecv") do desc = tf.NodeDescription("CollectiveBcastRecv") @@ -6342,7 +6156,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_to_space_nd_graph(input_, block_shape_, crops_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function batch_to_space_nd_graph(input_, block_shape_, crops_; name=nothing) local desc tf.with_op_name(name, "BatchToSpaceND") do desc = tf.NodeDescription("BatchToSpaceND") @@ -6392,7 +6206,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function loop_cond_graph(input_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function loop_cond_graph(input_; name=nothing) local desc tf.with_op_name(name, "LoopCond") do desc = tf.NodeDescription("LoopCond") @@ -6428,7 +6242,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function depth_to_space_graph(input_; name=nothing, block_size=nothing, data_format=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function depth_to_space_graph(input_; name=nothing, block_size=nothing, data_format=nothing) local desc tf.with_op_name(name, "DepthToSpace") do desc = tf.NodeDescription("DepthToSpace") @@ -6478,7 +6292,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function destroy_temporary_variable_graph(ref_; name=nothing, var_name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function destroy_temporary_variable_graph(ref_; name=nothing, var_name=nothing) local desc tf.with_op_name(name, "DestroyTemporaryVariable") do desc = tf.NodeDescription("DestroyTemporaryVariable") @@ -6522,7 +6336,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function cudnn_rnn_graph(input_, input_h_, input_c_, params_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing, is_training=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function cudnn_rnn_graph(input_, input_h_, input_c_, params_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing, is_training=nothing) local desc tf.with_op_name(name, "CudnnRNN") do desc = tf.NodeDescription("CudnnRNN") @@ -6622,7 +6436,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ref_identity_graph(input_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function ref_identity_graph(input_; name=nothing) local desc tf.with_op_name(name, "RefIdentity") do desc = tf.NodeDescription("RefIdentity") @@ -6660,7 +6474,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function max_pool3d_grad_graph(orig_input_, orig_output_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function max_pool3d_grad_graph(orig_input_, orig_output_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) local desc tf.with_op_name(name, "MaxPool3DGrad") do desc = tf.NodeDescription("MaxPool3DGrad") @@ -6730,10 +6544,10 @@ end """ load_tpu_embedding_momentum_parameters_grad_accum_debug(parameters, momenta, gradient_accumulators; table_id=-1, table_name=) - +Load embedding parameters for a single table. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function load_tpu_embedding_momentum_parameters_grad_accum_debug_graph(parameters_, momenta_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function load_tpu_embedding_momentum_parameters_grad_accum_debug_graph(parameters_, momenta_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "LoadTPUEmbeddingMomentumParametersGradAccumDebug") do desc = tf.NodeDescription("LoadTPUEmbeddingMomentumParametersGradAccumDebug") @@ -6795,75 +6609,13 @@ begin end -""" - padding_fifo_queue_v2(; shapes=Int64[], capacity=-1, container=, shared_name=) - - -""" -begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function padding_fifo_queue_v2_graph(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) - local desc - tf.with_op_name(name, "PaddingFIFOQueueV2") do - desc = tf.NodeDescription("PaddingFIFOQueueV2") - if component_types !== nothing - desc["component_types"] = map(Base.identity, component_types) - end - if shapes !== nothing - desc["shapes"] = map(Base.identity, shapes) - end - if capacity !== nothing - desc["capacity"] = Base.Int(capacity) - end - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - end - tf.Tensor(tf.Operation(desc)) - end - function padding_fifo_queue_v2_eager(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) - desc = tf.EagerOp("PaddingFIFOQueueV2") - if component_types !== nothing - desc["component_types"] = map(Base.identity, component_types) - end - if shapes !== nothing - desc["shapes"] = map(Base.identity, shapes) - end - if capacity !== nothing - desc["capacity"] = Base.Int(capacity) - end - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - res = tf.execute(desc) - node = tf.TapeNode(padding_fifo_queue_v2, [], name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - function padding_fifo_queue_v2(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) - if tf.in_eager_mode() - padding_fifo_queue_v2_eager(; name=name, component_types=component_types, shapes=shapes, capacity=capacity, container=container, shared_name=shared_name) - else - padding_fifo_queue_v2_graph(; name=name, component_types=component_types, shapes=shapes, capacity=capacity, container=container, shared_name=shared_name) - end - end -end - - """ conv3d_backprop_input(input, filter, out_backprop; dilations=[1, 1, 1, 1, 1]) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function conv3d_backprop_input_graph(input_, filter_, out_backprop_; name=nothing, strides=nothing, padding=nothing, dilations=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function conv3d_backprop_input_graph(input_, filter_, out_backprop_; name=nothing, strides=nothing, padding=nothing, dilations=nothing) local desc tf.with_op_name(name, "Conv3DBackpropInput") do desc = tf.NodeDescription("Conv3DBackpropInput") @@ -6923,13 +6675,75 @@ begin end +""" + padding_fifo_queue_v2(; shapes=Int64[], capacity=-1, container=, shared_name=) + + +""" +begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function padding_fifo_queue_v2_graph(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "PaddingFIFOQueueV2") do + desc = tf.NodeDescription("PaddingFIFOQueueV2") + if component_types !== nothing + desc["component_types"] = map(Base.identity, component_types) + end + if shapes !== nothing + desc["shapes"] = map(Base.identity, shapes) + end + if capacity !== nothing + desc["capacity"] = Base.Int(capacity) + end + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + tf.Tensor(tf.Operation(desc)) + end + function padding_fifo_queue_v2_eager(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) + desc = tf.EagerOp("PaddingFIFOQueueV2") + if component_types !== nothing + desc["component_types"] = map(Base.identity, component_types) + end + if shapes !== nothing + desc["shapes"] = map(Base.identity, shapes) + end + if capacity !== nothing + desc["capacity"] = Base.Int(capacity) + end + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + res = tf.execute(desc) + node = tf.TapeNode(padding_fifo_queue_v2, [], name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + function padding_fifo_queue_v2(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) + if tf.in_eager_mode() + padding_fifo_queue_v2_eager(; name=name, component_types=component_types, shapes=shapes, capacity=capacity, container=container, shared_name=shared_name) + else + padding_fifo_queue_v2_graph(; name=name, component_types=component_types, shapes=shapes, capacity=capacity, container=container, shared_name=shared_name) + end + end +end + + """ ref_exit(data) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ref_exit_graph(data_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function ref_exit_graph(data_; name=nothing) local desc tf.with_op_name(name, "RefExit") do desc = tf.NodeDescription("RefExit") @@ -6967,7 +6781,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function map_clear_graph(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function map_clear_graph(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "MapClear") do desc = tf.NodeDescription("MapClear") @@ -7029,7 +6843,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function encode_wav_graph(audio_, sample_rate_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function encode_wav_graph(audio_, sample_rate_; name=nothing) local desc tf.with_op_name(name, "EncodeWav") do desc = tf.NodeDescription("EncodeWav") @@ -7069,7 +6883,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_summary_v2_graph(tag_, tensor_, serialized_summary_metadata_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tensor_summary_v2_graph(tag_, tensor_, serialized_summary_metadata_; name=nothing) local desc tf.with_op_name(name, "TensorSummaryV2") do desc = tf.NodeDescription("TensorSummaryV2") @@ -7115,7 +6929,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function queue_dequeue_up_to_graph(handle_, n_; name=nothing, component_types=nothing, timeout_ms=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function queue_dequeue_up_to_graph(handle_, n_; name=nothing, component_types=nothing, timeout_ms=nothing) local desc tf.with_op_name(name, "QueueDequeueUpTo") do desc = tf.NodeDescription("QueueDequeueUpTo") @@ -7167,7 +6981,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function matrix_band_part_graph(input_, num_lower_, num_upper_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function matrix_band_part_graph(input_, num_lower_, num_upper_; name=nothing) local desc tf.with_op_name(name, "MatrixBandPart") do desc = tf.NodeDescription("MatrixBandPart") @@ -7216,7 +7030,7 @@ end Copy Op. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function copy_graph(input_; name=nothing, tensor_name=nothing, debug_ops_spec=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function copy_graph(input_; name=nothing, tensor_name=nothing, debug_ops_spec=nothing) local desc tf.with_op_name(name, "Copy") do desc = tf.NodeDescription("Copy") @@ -7266,7 +7080,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function shape_n_graph(input_; name=nothing, N=nothing, out_type=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function shape_n_graph(input_; name=nothing, N=nothing, out_type=nothing) local desc tf.with_op_name(name, "ShapeN") do desc = tf.NodeDescription("ShapeN") @@ -7321,7 +7135,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_parse_example_dataset_graph(input_dataset_, num_parallel_calls_, dense_defaults_; name=nothing, sparse_keys=nothing, dense_keys=nothing, sparse_types=nothing, Tdense=nothing, dense_shapes=nothing, output_types=nothing, output_shapes=nothing, sloppy=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function experimental_parse_example_dataset_graph(input_dataset_, num_parallel_calls_, dense_defaults_; name=nothing, sparse_keys=nothing, dense_keys=nothing, sparse_types=nothing, Tdense=nothing, dense_shapes=nothing, output_types=nothing, output_shapes=nothing, sloppy=nothing) local desc tf.with_op_name(name, "ExperimentalParseExampleDataset") do desc = tf.NodeDescription("ExperimentalParseExampleDataset") @@ -7413,7 +7227,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function concat_graph(concat_dim_, values_; name=nothing, N=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function concat_graph(concat_dim_, values_; name=nothing, N=nothing) local desc tf.with_op_name(name, "Concat") do desc = tf.NodeDescription("Concat") @@ -7461,7 +7275,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function data_format_dim_map_graph(x_; name=nothing, src_format=nothing, dst_format=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function data_format_dim_map_graph(x_; name=nothing, src_format=nothing, dst_format=nothing) local desc tf.with_op_name(name, "DataFormatDimMap") do desc = tf.NodeDescription("DataFormatDimMap") @@ -7511,7 +7325,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function identity_reader_graph(; name=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function identity_reader_graph(; name=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "IdentityReader") do desc = tf.NodeDescription("IdentityReader") @@ -7555,7 +7369,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function softplus_graph(features_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function softplus_graph(features_; name=nothing) local desc tf.with_op_name(name, "Softplus") do desc = tf.NodeDescription("Softplus") @@ -7593,7 +7407,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_sparse_apply_proximal_adagrad_graph(var_, accum_, lr_, l1_, l2_, grad_, indices_; name=nothing, use_locking=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function resource_sparse_apply_proximal_adagrad_graph(var_, accum_, lr_, l1_, l2_, grad_, indices_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ResourceSparseApplyProximalAdagrad") do desc = tf.NodeDescription("ResourceSparseApplyProximalAdagrad") @@ -7667,7 +7481,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function parse_single_sequence_example_graph(serialized_, feature_list_dense_missing_assumed_empty_, context_sparse_keys_, context_dense_keys_, feature_list_sparse_keys_, feature_list_dense_keys_, context_dense_defaults_, debug_name_; name=nothing, Ncontext_sparse=nothing, Ncontext_dense=nothing, Nfeature_list_sparse=nothing, Nfeature_list_dense=nothing, context_sparse_types=nothing, Tcontext_dense=nothing, feature_list_dense_types=nothing, context_dense_shapes=nothing, feature_list_sparse_types=nothing, feature_list_dense_shapes=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function parse_single_sequence_example_graph(serialized_, feature_list_dense_missing_assumed_empty_, context_sparse_keys_, context_dense_keys_, feature_list_sparse_keys_, feature_list_dense_keys_, context_dense_defaults_, debug_name_; name=nothing, Ncontext_sparse=nothing, Ncontext_dense=nothing, Nfeature_list_sparse=nothing, Nfeature_list_dense=nothing, context_sparse_types=nothing, Tcontext_dense=nothing, feature_list_dense_types=nothing, context_dense_shapes=nothing, feature_list_sparse_types=nothing, feature_list_dense_shapes=nothing) local desc tf.with_op_name(name, "ParseSingleSequenceExample") do desc = tf.NodeDescription("ParseSingleSequenceExample") @@ -7796,7 +7610,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function matrix_diag_graph(diagonal_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function matrix_diag_graph(diagonal_; name=nothing) local desc tf.with_op_name(name, "MatrixDiag") do desc = tf.NodeDescription("MatrixDiag") @@ -7834,7 +7648,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fact_graph(; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function fact_graph(; name=nothing) local desc tf.with_op_name(name, "Fact") do desc @@ -7861,69 +7675,13 @@ begin end -""" - shard_dataset(input_dataset, num_shards, index) - - -""" -begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function shard_dataset_graph(input_dataset_, num_shards_, index_; name=nothing, output_types=nothing, output_shapes=nothing) - local desc - tf.with_op_name(name, "ShardDataset") do - desc = tf.NodeDescription("ShardDataset") - input_dataset_ = convert(Tensor{Any}, input_dataset_) - num_shards_ = convert(Tensor{Int64}, num_shards_) - index_ = convert(Tensor{Int64}, index_) - tf.add_input(desc, input_dataset_) - tf.add_input(desc, num_shards_) - tf.add_input(desc, index_) - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - end - tf.Tensor(tf.Operation(desc)) - end - function shard_dataset_eager(input_dataset_, num_shards_, index_; name=nothing, output_types=nothing, output_shapes=nothing) - desc = tf.EagerOp("ShardDataset") - input_dataset_ = convert(tf.EagerTensor, input_dataset_) - num_shards_ = convert(tf.EagerTensor, num_shards_) - index_ = convert(tf.EagerTensor, index_) - tf.add_input(desc, input_dataset_) - tf.add_input(desc, num_shards_) - tf.add_input(desc, index_) - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - res = tf.execute(desc) - node = tf.TapeNode(shard_dataset, [input_dataset_, num_shards_, index_], name=nothing, output_types=nothing, output_shapes=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - function shard_dataset(input_dataset_, num_shards_, index_; name=nothing, output_types=nothing, output_shapes=nothing) - if tf.in_eager_mode() - shard_dataset_eager(input_dataset_, num_shards_, index_; name=name, output_types=output_types, output_shapes=output_shapes) - else - shard_dataset_graph(input_dataset_, num_shards_, index_; name=name, output_types=output_types, output_shapes=output_shapes) - end - end -end - - """ max_pool_grad_grad(orig_input, orig_output, grad; data_format=NHWC) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function max_pool_grad_grad_graph(orig_input_, orig_output_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function max_pool_grad_grad_graph(orig_input_, orig_output_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) local desc tf.with_op_name(name, "MaxPoolGradGrad") do desc = tf.NodeDescription("MaxPoolGradGrad") @@ -7995,7 +7753,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resize_bilinear_grad_graph(grads_, original_image_; name=nothing, align_corners=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function resize_bilinear_grad_graph(grads_, original_image_; name=nothing, align_corners=nothing) local desc tf.with_op_name(name, "ResizeBilinearGrad") do desc = tf.NodeDescription("ResizeBilinearGrad") @@ -8043,7 +7801,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_to_space_graph(input_, crops_; name=nothing, block_size=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function batch_to_space_graph(input_, crops_; name=nothing, block_size=nothing) local desc tf.with_op_name(name, "BatchToSpace") do desc = tf.NodeDescription("BatchToSpace") @@ -8094,7 +7852,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function optional_from_value_graph(components_; name=nothing, Toutput_types=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function optional_from_value_graph(components_; name=nothing, Toutput_types=nothing) local desc tf.with_op_name(name, "OptionalFromValue") do desc = tf.NodeDescription("OptionalFromValue") @@ -8136,7 +7894,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function xlogy_graph(x_, y_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function xlogy_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "Xlogy") do desc = tf.NodeDescription("Xlogy") @@ -8179,7 +7937,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function cross_graph(a_, b_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function cross_graph(a_, b_; name=nothing) local desc tf.with_op_name(name, "Cross") do desc = tf.NodeDescription("Cross") @@ -8222,7 +7980,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function bitwise_and_graph(x_, y_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function bitwise_and_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "BitwiseAnd") do desc = tf.NodeDescription("BitwiseAnd") @@ -8265,7 +8023,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function broadcast_to_graph(input_, shape_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function broadcast_to_graph(input_, shape_; name=nothing) local desc tf.with_op_name(name, "BroadcastTo") do desc = tf.NodeDescription("BroadcastTo") @@ -8309,7 +8067,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function elu_grad_graph(gradients_, outputs_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function elu_grad_graph(gradients_, outputs_; name=nothing) local desc tf.with_op_name(name, "EluGrad") do desc = tf.NodeDescription("EluGrad") @@ -8352,7 +8110,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function cudnn_rnn_backprop_graph(input_, input_h_, input_c_, params_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function cudnn_rnn_backprop_graph(input_, input_h_, input_c_, params_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) local desc tf.with_op_name(name, "CudnnRNNBackprop") do desc = tf.NodeDescription("CudnnRNNBackprop") @@ -8481,7 +8239,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function string_to_hash_bucket_fast_graph(input_; name=nothing, num_buckets=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function string_to_hash_bucket_fast_graph(input_; name=nothing, num_buckets=nothing) local desc tf.with_op_name(name, "StringToHashBucketFast") do desc = tf.NodeDescription("StringToHashBucketFast") @@ -8523,7 +8281,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function mutable_hash_table_graph(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function mutable_hash_table_graph(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing) local desc tf.with_op_name(name, "MutableHashTable") do desc = tf.NodeDescription("MutableHashTable") @@ -8585,7 +8343,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function relu_graph(features_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function relu_graph(features_; name=nothing) local desc tf.with_op_name(name, "Relu") do desc = tf.NodeDescription("Relu") @@ -8623,7 +8381,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function nth_element_graph(input_, n_; name=nothing, reverse=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function nth_element_graph(input_, n_; name=nothing, reverse=nothing) local desc tf.with_op_name(name, "NthElement") do desc = tf.NodeDescription("NthElement") @@ -8671,7 +8429,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function softsign_graph(features_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function softsign_graph(features_; name=nothing) local desc tf.with_op_name(name, "Softsign") do desc = tf.NodeDescription("Softsign") @@ -8709,7 +8467,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function mutable_dense_hash_table_graph(empty_key_; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing, initial_num_buckets=nothing, max_load_factor=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function mutable_dense_hash_table_graph(empty_key_; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing, initial_num_buckets=nothing, max_load_factor=nothing) local desc tf.with_op_name(name, "MutableDenseHashTable") do desc = tf.NodeDescription("MutableDenseHashTable") @@ -8795,7 +8553,7 @@ end An op that shuts down a running distributed TPU system. The Op returns """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _shutdown_distributed_tpu_graph(; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function _shutdown_distributed_tpu_graph(; name=nothing) local desc tf.with_op_name(name, "_ShutdownDistributedTPU") do desc @@ -8828,7 +8586,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function polygamma_graph(a_, x_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function polygamma_graph(a_, x_; name=nothing) local desc tf.with_op_name(name, "Polygamma") do desc = tf.NodeDescription("Polygamma") @@ -8871,7 +8629,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function nccl_reduce_graph(input_; name=nothing, reduction=nothing, num_devices=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function nccl_reduce_graph(input_; name=nothing, reduction=nothing, num_devices=nothing) local desc tf.with_op_name(name, "NcclReduce") do desc = tf.NodeDescription("NcclReduce") @@ -8921,7 +8679,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function arg_max_graph(input_, dimension_; name=nothing, output_type=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function arg_max_graph(input_, dimension_; name=nothing, output_type=nothing) local desc tf.with_op_name(name, "ArgMax") do desc = tf.NodeDescription("ArgMax") @@ -8972,7 +8730,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function matrix_set_diag_graph(input_, diagonal_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function matrix_set_diag_graph(input_, diagonal_; name=nothing) local desc tf.with_op_name(name, "MatrixSetDiag") do desc = tf.NodeDescription("MatrixSetDiag") @@ -9015,7 +8773,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function space_to_batch_nd_graph(input_, block_shape_, paddings_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function space_to_batch_nd_graph(input_, block_shape_, paddings_; name=nothing) local desc tf.with_op_name(name, "SpaceToBatchND") do desc = tf.NodeDescription("SpaceToBatchND") @@ -9065,7 +8823,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_reshape_graph(input_indices_, input_shape_, new_shape_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function sparse_reshape_graph(input_indices_, input_shape_, new_shape_; name=nothing) local desc tf.with_op_name(name, "SparseReshape") do desc = tf.NodeDescription("SparseReshape") @@ -9114,7 +8872,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function optimize_dataset_graph(input_dataset_, optimizations_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function optimize_dataset_graph(input_dataset_, optimizations_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "OptimizeDataset") do desc = tf.NodeDescription("OptimizeDataset") @@ -9166,7 +8924,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function concat_v2_graph(values_, axis_; name=nothing, N=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function concat_v2_graph(values_, axis_; name=nothing, N=nothing) local desc tf.with_op_name(name, "ConcatV2") do desc = tf.NodeDescription("ConcatV2") @@ -9217,7 +8975,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_sparse_apply_adadelta_graph(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function resource_sparse_apply_adadelta_graph(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ResourceSparseApplyAdadelta") do desc = tf.NodeDescription("ResourceSparseApplyAdadelta") @@ -9295,7 +9053,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tile_graph(input_, multiples_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tile_graph(input_, multiples_; name=nothing) local desc tf.with_op_name(name, "Tile") do desc = tf.NodeDescription("Tile") @@ -9339,7 +9097,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function mutex_v2_graph(; name=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function mutex_v2_graph(; name=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "MutexV2") do desc = tf.NodeDescription("MutexV2") @@ -9383,7 +9141,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function serialize_many_sparse_graph(sparse_indices_, sparse_values_, sparse_shape_; name=nothing, out_type=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function serialize_many_sparse_graph(sparse_indices_, sparse_values_, sparse_shape_; name=nothing, out_type=nothing) local desc tf.with_op_name(name, "SerializeManySparse") do desc = tf.NodeDescription("SerializeManySparse") @@ -9432,10 +9190,10 @@ end """ tpu_embedding_activations(embedding_variable, sliced_activations) - +An op enabling differentiation of TPU Embeddings. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tpu_embedding_activations_graph(embedding_variable_, sliced_activations_; name=nothing, table_id=nothing, lookup_id=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tpu_embedding_activations_graph(embedding_variable_, sliced_activations_; name=nothing, table_id=nothing, lookup_id=nothing) local desc tf.with_op_name(name, "TPUEmbeddingActivations") do desc = tf.NodeDescription("TPUEmbeddingActivations") @@ -9487,7 +9245,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_matrix_solve_ls_graph(matrix_, rhs_, l2_regularizer_; name=nothing, fast=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function batch_matrix_solve_ls_graph(matrix_, rhs_, l2_regularizer_; name=nothing, fast=nothing) local desc tf.with_op_name(name, "BatchMatrixSolveLs") do desc = tf.NodeDescription("BatchMatrixSolveLs") @@ -9540,7 +9298,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function not_equal_graph(x_, y_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function not_equal_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "NotEqual") do desc = tf.NodeDescription("NotEqual") @@ -9583,7 +9341,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function lgamma_graph(x_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function lgamma_graph(x_; name=nothing) local desc tf.with_op_name(name, "Lgamma") do desc = tf.NodeDescription("Lgamma") @@ -9616,12 +9374,12 @@ end """ - tpu_replicate_metadata(; num_cores_per_replica=1, topology=, use_tpu=true, device_assignment=Int64[], computation_shape=Int64[], host_compute_core=Int64[], padding_map=Int64[], step_marker_location=STEP_MARK_AT_ENTRY) + tpu_replicate_metadata(; num_cores_per_replica=1, topology=, use_tpu=true, device_assignment=Int64[], computation_shape=Int64[], host_compute_core=Int64[]) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tpu_replicate_metadata_graph(; name=nothing, num_replicas=nothing, num_cores_per_replica=nothing, topology=nothing, use_tpu=nothing, device_assignment=nothing, computation_shape=nothing, host_compute_core=nothing, padding_map=nothing, step_marker_location=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tpu_replicate_metadata_graph(; name=nothing, num_replicas=nothing, num_cores_per_replica=nothing, topology=nothing, use_tpu=nothing, device_assignment=nothing, computation_shape=nothing, host_compute_core=nothing) local desc tf.with_op_name(name, "TPUReplicateMetadata") do desc = tf.NodeDescription("TPUReplicateMetadata") @@ -9646,16 +9404,10 @@ begin if host_compute_core !== nothing desc["host_compute_core"] = map(Base.identity, host_compute_core) end - if padding_map !== nothing - desc["padding_map"] = map(Base.identity, padding_map) - end - if step_marker_location !== nothing - desc["step_marker_location"] = Base.String(step_marker_location) - end end tf.Tensor(tf.Operation(desc)) end - function tpu_replicate_metadata_eager(; name=nothing, num_replicas=nothing, num_cores_per_replica=nothing, topology=nothing, use_tpu=nothing, device_assignment=nothing, computation_shape=nothing, host_compute_core=nothing, padding_map=nothing, step_marker_location=nothing) + function tpu_replicate_metadata_eager(; name=nothing, num_replicas=nothing, num_cores_per_replica=nothing, topology=nothing, use_tpu=nothing, device_assignment=nothing, computation_shape=nothing, host_compute_core=nothing) desc = tf.EagerOp("TPUReplicateMetadata") if num_replicas !== nothing desc["num_replicas"] = Base.Int(num_replicas) @@ -9678,24 +9430,18 @@ begin if host_compute_core !== nothing desc["host_compute_core"] = map(Base.identity, host_compute_core) end - if padding_map !== nothing - desc["padding_map"] = map(Base.identity, padding_map) - end - if step_marker_location !== nothing - desc["step_marker_location"] = Base.String(step_marker_location) - end res = tf.execute(desc) - node = tf.TapeNode(tpu_replicate_metadata, [], name=nothing, num_replicas=nothing, num_cores_per_replica=nothing, topology=nothing, use_tpu=nothing, device_assignment=nothing, computation_shape=nothing, host_compute_core=nothing, padding_map=nothing, step_marker_location=nothing, res) + node = tf.TapeNode(tpu_replicate_metadata, [], name=nothing, num_replicas=nothing, num_cores_per_replica=nothing, topology=nothing, use_tpu=nothing, device_assignment=nothing, computation_shape=nothing, host_compute_core=nothing, res) if length(res) >= 1 tf.add_node(res[1], node) return res[1] end end - function tpu_replicate_metadata(; name=nothing, num_replicas=nothing, num_cores_per_replica=nothing, topology=nothing, use_tpu=nothing, device_assignment=nothing, computation_shape=nothing, host_compute_core=nothing, padding_map=nothing, step_marker_location=nothing) + function tpu_replicate_metadata(; name=nothing, num_replicas=nothing, num_cores_per_replica=nothing, topology=nothing, use_tpu=nothing, device_assignment=nothing, computation_shape=nothing, host_compute_core=nothing) if tf.in_eager_mode() - tpu_replicate_metadata_eager(; name=name, num_replicas=num_replicas, num_cores_per_replica=num_cores_per_replica, topology=topology, use_tpu=use_tpu, device_assignment=device_assignment, computation_shape=computation_shape, host_compute_core=host_compute_core, padding_map=padding_map, step_marker_location=step_marker_location) + tpu_replicate_metadata_eager(; name=name, num_replicas=num_replicas, num_cores_per_replica=num_cores_per_replica, topology=topology, use_tpu=use_tpu, device_assignment=device_assignment, computation_shape=computation_shape, host_compute_core=host_compute_core) else - tpu_replicate_metadata_graph(; name=name, num_replicas=num_replicas, num_cores_per_replica=num_cores_per_replica, topology=topology, use_tpu=use_tpu, device_assignment=device_assignment, computation_shape=computation_shape, host_compute_core=host_compute_core, padding_map=padding_map, step_marker_location=step_marker_location) + tpu_replicate_metadata_graph(; name=name, num_replicas=num_replicas, num_cores_per_replica=num_cores_per_replica, topology=topology, use_tpu=use_tpu, device_assignment=device_assignment, computation_shape=computation_shape, host_compute_core=host_compute_core) end end end @@ -9707,7 +9453,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_thread_pool_handle_graph(; name=nothing, num_threads=nothing, max_intra_op_parallelism=nothing, display_name=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function experimental_thread_pool_handle_graph(; name=nothing, num_threads=nothing, max_intra_op_parallelism=nothing, display_name=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "ExperimentalThreadPoolHandle") do desc = tf.NodeDescription("ExperimentalThreadPoolHandle") @@ -9769,7 +9515,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function self_adjoint_eig_graph(input_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function self_adjoint_eig_graph(input_; name=nothing) local desc tf.with_op_name(name, "SelfAdjointEig") do desc = tf.NodeDescription("SelfAdjointEig") @@ -9807,7 +9553,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function boosted_trees_quantile_stream_resource_get_bucket_boundaries_graph(quantile_stream_resource_handle_; name=nothing, num_features=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function boosted_trees_quantile_stream_resource_get_bucket_boundaries_graph(quantile_stream_resource_handle_; name=nothing, num_features=nothing) local desc tf.with_op_name(name, "BoostedTreesQuantileStreamResourceGetBucketBoundaries") do desc = tf.NodeDescription("BoostedTreesQuantileStreamResourceGetBucketBoundaries") @@ -9854,7 +9600,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_dense_cwise_div_graph(sp_indices_, sp_values_, sp_shape_, dense_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function sparse_dense_cwise_div_graph(sp_indices_, sp_values_, sp_shape_, dense_; name=nothing) local desc tf.with_op_name(name, "SparseDenseCwiseDiv") do desc = tf.NodeDescription("SparseDenseCwiseDiv") @@ -9905,7 +9651,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function acos_graph(x_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function acos_graph(x_; name=nothing) local desc tf.with_op_name(name, "Acos") do desc = tf.NodeDescription("Acos") @@ -9943,7 +9689,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function all_graph(input_, reduction_indices_; name=nothing, keep_dims=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function all_graph(input_, reduction_indices_; name=nothing, keep_dims=nothing) local desc tf.with_op_name(name, "All") do desc = tf.NodeDescription("All") @@ -9992,7 +9738,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function compare_and_bitpack_graph(input_, threshold_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function compare_and_bitpack_graph(input_, threshold_; name=nothing) local desc tf.with_op_name(name, "CompareAndBitpack") do desc = tf.NodeDescription("CompareAndBitpack") @@ -10035,7 +9781,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function var_handle_op_graph(; name=nothing, container=nothing, shared_name=nothing, dtype=nothing, shape=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function var_handle_op_graph(; name=nothing, container=nothing, shared_name=nothing, dtype=nothing, shape=nothing) local desc tf.with_op_name(name, "VarHandleOp") do desc = tf.NodeDescription("VarHandleOp") @@ -10091,7 +9837,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_unique_dataset_graph(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function experimental_unique_dataset_graph(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "ExperimentalUniqueDataset") do desc = tf.NodeDescription("ExperimentalUniqueDataset") @@ -10133,110 +9879,13 @@ begin end -""" - quantized_conv2d_with_bias_sum_and_relu(input, filter, bias, min_input, max_input, min_filter, max_filter, summand; out_type=Float32, dilations=[1, 1, 1, 1]) - - -""" -begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantized_conv2d_with_bias_sum_and_relu_graph(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_, summand_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) - local desc - tf.with_op_name(name, "QuantizedConv2DWithBiasSumAndRelu") do - desc = tf.NodeDescription("QuantizedConv2DWithBiasSumAndRelu") - input_ = convert(Tensor{Any}, input_) - filter_ = convert(Tensor{Any}, filter_) - bias_ = convert(Tensor{Float32}, bias_) - min_input_ = convert(Tensor{Float32}, min_input_) - max_input_ = convert(Tensor{Float32}, max_input_) - min_filter_ = convert(Tensor{Float32}, min_filter_) - max_filter_ = convert(Tensor{Float32}, max_filter_) - summand_ = convert(Tensor{Float32}, summand_) - (filter_,) = tf.tf_promote(filter_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - tf.add_input(desc, filter_) - tf.add_input(desc, bias_) - tf.add_input(desc, min_input_) - tf.add_input(desc, max_input_) - tf.add_input(desc, min_filter_) - tf.add_input(desc, max_filter_) - tf.add_input(desc, summand_) - if out_type !== nothing - desc["out_type"] = Base.identity(out_type) - end - if strides !== nothing - desc["strides"] = map(Base.identity, strides) - end - if padding !== nothing - desc["padding"] = Base.String(padding) - end - if dilations !== nothing - desc["dilations"] = map(Base.identity, dilations) - end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:3 - push!(out, tf.Tensor(op, out_idx)) - end - out - end - function quantized_conv2d_with_bias_sum_and_relu_eager(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_, summand_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) - desc = tf.EagerOp("QuantizedConv2DWithBiasSumAndRelu") - input_ = convert(tf.EagerTensor, input_) - filter_ = convert(tf.EagerTensor, filter_) - bias_ = convert(tf.EagerTensor, bias_) - min_input_ = convert(tf.EagerTensor, min_input_) - max_input_ = convert(tf.EagerTensor, max_input_) - min_filter_ = convert(tf.EagerTensor, min_filter_) - max_filter_ = convert(tf.EagerTensor, max_filter_) - summand_ = convert(tf.EagerTensor, summand_) - tf.add_input(desc, input_) - tf.add_input(desc, filter_) - tf.add_input(desc, bias_) - tf.add_input(desc, min_input_) - tf.add_input(desc, max_input_) - tf.add_input(desc, min_filter_) - tf.add_input(desc, max_filter_) - tf.add_input(desc, summand_) - if out_type !== nothing - desc["out_type"] = Base.identity(out_type) - end - if strides !== nothing - desc["strides"] = map(Base.identity, strides) - end - if padding !== nothing - desc["padding"] = Base.String(padding) - end - if dilations !== nothing - desc["dilations"] = map(Base.identity, dilations) - end - desc["Tinput"] = tf.data_type(input_) - desc["Tfilter"] = tf.data_type(filter_) - res = tf.execute(desc) - node = tf.TapeNode(quantized_conv2d_with_bias_sum_and_relu, [input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_, summand_], name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end - end - function quantized_conv2d_with_bias_sum_and_relu(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_, summand_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) - if tf.in_eager_mode() - quantized_conv2d_with_bias_sum_and_relu_eager(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_, summand_; name=name, out_type=out_type, strides=strides, padding=padding, dilations=dilations) - else - quantized_conv2d_with_bias_sum_and_relu_graph(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_, summand_; name=name, out_type=out_type, strides=strides, padding=padding, dilations=dilations) - end - end -end - - """ list_diff(x, y; out_idx=Int32) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function list_diff_graph(x_, y_; name=nothing, out_idx=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function list_diff_graph(x_, y_; name=nothing, out_idx=nothing) local desc tf.with_op_name(name, "ListDiff") do desc = tf.NodeDescription("ListDiff") @@ -10290,7 +9939,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function create_summary_file_writer_graph(writer_, logdir_, max_queue_, flush_millis_, filename_suffix_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function create_summary_file_writer_graph(writer_, logdir_, max_queue_, flush_millis_, filename_suffix_; name=nothing) local desc tf.with_op_name(name, "CreateSummaryFileWriter") do desc = tf.NodeDescription("CreateSummaryFileWriter") @@ -10342,7 +9991,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function generate_vocab_remapping_graph(new_vocab_file_, old_vocab_file_; name=nothing, new_vocab_offset=nothing, num_new_vocab=nothing, old_vocab_size=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function generate_vocab_remapping_graph(new_vocab_file_, old_vocab_file_; name=nothing, new_vocab_offset=nothing, num_new_vocab=nothing, old_vocab_size=nothing) local desc tf.with_op_name(name, "GenerateVocabRemapping") do desc = tf.NodeDescription("GenerateVocabRemapping") @@ -10405,7 +10054,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_matrix_inverse_graph(input_; name=nothing, adjoint=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function batch_matrix_inverse_graph(input_; name=nothing, adjoint=nothing) local desc tf.with_op_name(name, "BatchMatrixInverse") do desc = tf.NodeDescription("BatchMatrixInverse") @@ -10449,7 +10098,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function control_trigger_graph(; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function control_trigger_graph(; name=nothing) local desc tf.with_op_name(name, "ControlTrigger") do desc @@ -10476,46 +10125,13 @@ begin end -""" - tpu_ordinal_selector() - - -""" -begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tpu_ordinal_selector_graph(; name=nothing) - local desc - tf.with_op_name(name, "TPUOrdinalSelector") do - desc - tf.NodeDescription("TPUOrdinalSelector") - end - tf.Tensor(tf.Operation(desc)) - end - function tpu_ordinal_selector_eager(; name=nothing) - desc = tf.EagerOp("TPUOrdinalSelector") - res = tf.execute(desc) - node = tf.TapeNode(tpu_ordinal_selector, [], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - function tpu_ordinal_selector(; name=nothing) - if tf.in_eager_mode() - tpu_ordinal_selector_eager(; name=name) - else - tpu_ordinal_selector_graph(; name=name) - end - end -end - - """ stop_gradient(input) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function stop_gradient_graph(input_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function stop_gradient_graph(input_; name=nothing) local desc tf.with_op_name(name, "StopGradient") do desc = tf.NodeDescription("StopGradient") @@ -10553,7 +10169,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function split_graph(split_dim_, value_; name=nothing, num_split=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function split_graph(split_dim_, value_; name=nothing, num_split=nothing) local desc tf.with_op_name(name, "Split") do desc = tf.NodeDescription("Split") @@ -10607,7 +10223,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function unpack_graph(value_; name=nothing, num=nothing, axis=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function unpack_graph(value_; name=nothing, num=nothing, axis=nothing) local desc tf.with_op_name(name, "Unpack") do desc = tf.NodeDescription("Unpack") @@ -10668,7 +10284,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_scatter_max_graph(resource_, indices_, updates_; name=nothing, dtype=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function resource_scatter_max_graph(resource_, indices_, updates_; name=nothing, dtype=nothing) local desc tf.with_op_name(name, "ResourceScatterMax") do desc = tf.NodeDescription("ResourceScatterMax") @@ -10723,7 +10339,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_write_graph(handle_, index_, value_, flow_in_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tensor_array_write_graph(handle_, index_, value_, flow_in_; name=nothing) local desc tf.with_op_name(name, "TensorArrayWrite") do desc = tf.NodeDescription("TensorArrayWrite") @@ -10773,7 +10389,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fill_graph(dims_, value_; name=nothing, index_type=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function fill_graph(dims_, value_; name=nothing, index_type=nothing) local desc tf.with_op_name(name, "Fill") do desc = tf.NodeDescription("Fill") @@ -10817,116 +10433,13 @@ begin end -""" - quantized_conv2d_with_bias_and_requantize(input, filter, bias, min_input, max_input, min_filter, max_filter, min_freezed_output, max_freezed_output; out_type=Float32, dilations=[1, 1, 1, 1]) - - -""" -begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantized_conv2d_with_bias_and_requantize_graph(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) - local desc - tf.with_op_name(name, "QuantizedConv2DWithBiasAndRequantize") do - desc = tf.NodeDescription("QuantizedConv2DWithBiasAndRequantize") - input_ = convert(Tensor{Any}, input_) - filter_ = convert(Tensor{Any}, filter_) - bias_ = convert(Tensor{Any}, bias_) - min_input_ = convert(Tensor{Float32}, min_input_) - max_input_ = convert(Tensor{Float32}, max_input_) - min_filter_ = convert(Tensor{Float32}, min_filter_) - max_filter_ = convert(Tensor{Float32}, max_filter_) - min_freezed_output_ = convert(Tensor{Float32}, min_freezed_output_) - max_freezed_output_ = convert(Tensor{Float32}, max_freezed_output_) - (filter_,) = tf.tf_promote(filter_) - (input_,) = tf.tf_promote(input_) - (bias_,) = tf.tf_promote(bias_) - tf.add_input(desc, input_) - tf.add_input(desc, filter_) - tf.add_input(desc, bias_) - tf.add_input(desc, min_input_) - tf.add_input(desc, max_input_) - tf.add_input(desc, min_filter_) - tf.add_input(desc, max_filter_) - tf.add_input(desc, min_freezed_output_) - tf.add_input(desc, max_freezed_output_) - if out_type !== nothing - desc["out_type"] = Base.identity(out_type) - end - if strides !== nothing - desc["strides"] = map(Base.identity, strides) - end - if padding !== nothing - desc["padding"] = Base.String(padding) - end - if dilations !== nothing - desc["dilations"] = map(Base.identity, dilations) - end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:3 - push!(out, tf.Tensor(op, out_idx)) - end - out - end - function quantized_conv2d_with_bias_and_requantize_eager(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) - desc = tf.EagerOp("QuantizedConv2DWithBiasAndRequantize") - input_ = convert(tf.EagerTensor, input_) - filter_ = convert(tf.EagerTensor, filter_) - bias_ = convert(tf.EagerTensor, bias_) - min_input_ = convert(tf.EagerTensor, min_input_) - max_input_ = convert(tf.EagerTensor, max_input_) - min_filter_ = convert(tf.EagerTensor, min_filter_) - max_filter_ = convert(tf.EagerTensor, max_filter_) - min_freezed_output_ = convert(tf.EagerTensor, min_freezed_output_) - max_freezed_output_ = convert(tf.EagerTensor, max_freezed_output_) - tf.add_input(desc, input_) - tf.add_input(desc, filter_) - tf.add_input(desc, bias_) - tf.add_input(desc, min_input_) - tf.add_input(desc, max_input_) - tf.add_input(desc, min_filter_) - tf.add_input(desc, max_filter_) - tf.add_input(desc, min_freezed_output_) - tf.add_input(desc, max_freezed_output_) - if out_type !== nothing - desc["out_type"] = Base.identity(out_type) - end - if strides !== nothing - desc["strides"] = map(Base.identity, strides) - end - if padding !== nothing - desc["padding"] = Base.String(padding) - end - if dilations !== nothing - desc["dilations"] = map(Base.identity, dilations) - end - desc["Tinput"] = tf.data_type(input_) - desc["Tfilter"] = tf.data_type(filter_) - desc["Tbias"] = tf.data_type(bias_) - res = tf.execute(desc) - node = tf.TapeNode(quantized_conv2d_with_bias_and_requantize, [input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_], name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end - end - function quantized_conv2d_with_bias_and_requantize(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) - if tf.in_eager_mode() - quantized_conv2d_with_bias_and_requantize_eager(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_; name=name, out_type=out_type, strides=strides, padding=padding, dilations=dilations) - else - quantized_conv2d_with_bias_and_requantize_graph(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_; name=name, out_type=out_type, strides=strides, padding=padding, dilations=dilations) - end - end -end - - """ softmax(logits) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function softmax_graph(logits_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function softmax_graph(logits_; name=nothing) local desc tf.with_op_name(name, "Softmax") do desc = tf.NodeDescription("Softmax") @@ -10964,7 +10477,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resize_bicubic_graph(images_, size_; name=nothing, align_corners=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function resize_bicubic_graph(images_, size_; name=nothing, align_corners=nothing) local desc tf.with_op_name(name, "ResizeBicubic") do desc = tf.NodeDescription("ResizeBicubic") @@ -11009,10 +10522,10 @@ end """ infeed_dequeue_tuple() - +A placeholder op for multiple values that will be fed into the computation """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function infeed_dequeue_tuple_graph(; name=nothing, dtypes=nothing, shapes=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function infeed_dequeue_tuple_graph(; name=nothing, dtypes=nothing, shapes=nothing) local desc tf.with_op_name(name, "InfeedDequeueTuple") do desc = tf.NodeDescription("InfeedDequeueTuple") @@ -11056,7 +10569,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function multi_device_iterator_graph(; name=nothing, devices=nothing, shared_name=nothing, container=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function multi_device_iterator_graph(; name=nothing, devices=nothing, shared_name=nothing, container=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "MultiDeviceIterator") do desc = tf.NodeDescription("MultiDeviceIterator") @@ -11118,7 +10631,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function decode_csv_graph(records_, record_defaults_; name=nothing, OUT_TYPE=nothing, field_delim=nothing, use_quote_delim=nothing, na_value=nothing, select_cols=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function decode_csv_graph(records_, record_defaults_; name=nothing, OUT_TYPE=nothing, field_delim=nothing, use_quote_delim=nothing, na_value=nothing, select_cols=nothing) local desc tf.with_op_name(name, "DecodeCSV") do desc = tf.NodeDescription("DecodeCSV") @@ -11188,7 +10701,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function lookup_table_find_graph(table_handle_, keys_, default_value_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function lookup_table_find_graph(table_handle_, keys_, default_value_; name=nothing) local desc tf.with_op_name(name, "LookupTableFind") do desc = tf.NodeDescription("LookupTableFind") @@ -11236,7 +10749,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function shuffle_and_repeat_dataset_graph(input_dataset_, buffer_size_, seed_, seed2_, count_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function shuffle_and_repeat_dataset_graph(input_dataset_, buffer_size_, seed_, seed2_, count_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "ShuffleAndRepeatDataset") do desc = tf.NodeDescription("ShuffleAndRepeatDataset") @@ -11294,70 +10807,13 @@ begin end -""" - requantization_range_per_channel(input, input_min, input_max) - - -""" -begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function requantization_range_per_channel_graph(input_, input_min_, input_max_; name=nothing, clip_value_max=nothing) - local desc - tf.with_op_name(name, "RequantizationRangePerChannel") do - desc = tf.NodeDescription("RequantizationRangePerChannel") - input_ = convert(Tensor{Float32}, input_) - input_min_ = convert(Tensor{Float32}, input_min_) - input_max_ = convert(Tensor{Float32}, input_max_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - tf.add_input(desc, input_min_) - tf.add_input(desc, input_max_) - if clip_value_max !== nothing - desc["clip_value_max"] = Base.identity(clip_value_max) - end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:2 - push!(out, tf.Tensor(op, out_idx)) - end - out - end - function requantization_range_per_channel_eager(input_, input_min_, input_max_; name=nothing, clip_value_max=nothing) - desc = tf.EagerOp("RequantizationRangePerChannel") - input_ = convert(tf.EagerTensor, input_) - input_min_ = convert(tf.EagerTensor, input_min_) - input_max_ = convert(tf.EagerTensor, input_max_) - tf.add_input(desc, input_) - tf.add_input(desc, input_min_) - tf.add_input(desc, input_max_) - if clip_value_max !== nothing - desc["clip_value_max"] = Base.identity(clip_value_max) - end - desc["T"] = tf.data_type(input_) - res = tf.execute(desc) - node = tf.TapeNode(requantization_range_per_channel, [input_, input_min_, input_max_], name=nothing, clip_value_max=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end - end - function requantization_range_per_channel(input_, input_min_, input_max_; name=nothing, clip_value_max=nothing) - if tf.in_eager_mode() - requantization_range_per_channel_eager(input_, input_min_, input_max_; name=name, clip_value_max=clip_value_max) - else - requantization_range_per_channel_graph(input_, input_min_, input_max_; name=name, clip_value_max=clip_value_max) - end - end -end - - """ experimental_unbatch_dataset(input_dataset) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_unbatch_dataset_graph(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function experimental_unbatch_dataset_graph(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "ExperimentalUnbatchDataset") do desc = tf.NodeDescription("ExperimentalUnbatchDataset") @@ -11405,7 +10861,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function avg_pool3d_grad_graph(orig_input_shape_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function avg_pool3d_grad_graph(orig_input_shape_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) local desc tf.with_op_name(name, "AvgPool3DGrad") do desc = tf.NodeDescription("AvgPool3DGrad") @@ -11471,7 +10927,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function placeholder_with_default_graph(input_; name=nothing, dtype=nothing, shape=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function placeholder_with_default_graph(input_; name=nothing, dtype=nothing, shape=nothing) local desc tf.with_op_name(name, "PlaceholderWithDefault") do desc = tf.NodeDescription("PlaceholderWithDefault") @@ -11521,7 +10977,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function initialize_table_v2_graph(table_handle_, keys_, values_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function initialize_table_v2_graph(table_handle_, keys_, values_; name=nothing) local desc tf.with_op_name(name, "InitializeTableV2") do desc = tf.NodeDescription("InitializeTableV2") @@ -11569,7 +11025,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function set_size_graph(set_indices_, set_values_, set_shape_; name=nothing, validate_indices=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function set_size_graph(set_indices_, set_values_, set_shape_; name=nothing, validate_indices=nothing) local desc tf.with_op_name(name, "SetSize") do desc = tf.NodeDescription("SetSize") @@ -11621,7 +11077,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function assert_graph(condition_, data_; name=nothing, T=nothing, summarize=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function assert_graph(condition_, data_; name=nothing, T=nothing, summarize=nothing) local desc tf.with_op_name(name, "Assert") do desc = tf.NodeDescription("Assert") @@ -11673,7 +11129,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function non_max_suppression_v4_graph(boxes_, scores_, max_output_size_, iou_threshold_, score_threshold_; name=nothing, pad_to_max_output_size=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function non_max_suppression_v4_graph(boxes_, scores_, max_output_size_, iou_threshold_, score_threshold_; name=nothing, pad_to_max_output_size=nothing) local desc tf.with_op_name(name, "NonMaxSuppressionV4") do desc = tf.NodeDescription("NonMaxSuppressionV4") @@ -11739,7 +11195,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sample_distorted_bounding_box_v2_graph(image_size_, bounding_boxes_, min_object_covered_; name=nothing, seed=nothing, seed2=nothing, aspect_ratio_range=nothing, area_range=nothing, max_attempts=nothing, use_image_if_no_bounding_boxes=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function sample_distorted_bounding_box_v2_graph(image_size_, bounding_boxes_, min_object_covered_; name=nothing, seed=nothing, seed2=nothing, aspect_ratio_range=nothing, area_range=nothing, max_attempts=nothing, use_image_if_no_bounding_boxes=nothing) local desc tf.with_op_name(name, "SampleDistortedBoundingBoxV2") do desc = tf.NodeDescription("SampleDistortedBoundingBoxV2") @@ -11826,7 +11282,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function initialize_table_from_text_file_graph(table_handle_, filename_; name=nothing, key_index=nothing, value_index=nothing, vocab_size=nothing, delimiter=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function initialize_table_from_text_file_graph(table_handle_, filename_; name=nothing, key_index=nothing, value_index=nothing, vocab_size=nothing, delimiter=nothing) local desc tf.with_op_name(name, "InitializeTableFromTextFile") do desc = tf.NodeDescription("InitializeTableFromTextFile") @@ -11890,7 +11346,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function lookup_table_size_graph(table_handle_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function lookup_table_size_graph(table_handle_; name=nothing) local desc tf.with_op_name(name, "LookupTableSize") do desc = tf.NodeDescription("LookupTableSize") @@ -11926,7 +11382,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_apply_adagrad_da_graph(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, indices_, lr_, l1_, l2_, global_step_; name=nothing, use_locking=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function sparse_apply_adagrad_da_graph(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, indices_, lr_, l1_, l2_, global_step_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "SparseApplyAdagradDA") do desc = tf.NodeDescription("SparseApplyAdagradDA") @@ -12011,7 +11467,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function broadcast_gradient_args_graph(s0_, s1_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function broadcast_gradient_args_graph(s0_, s1_; name=nothing) local desc tf.with_op_name(name, "BroadcastGradientArgs") do desc = tf.NodeDescription("BroadcastGradientArgs") @@ -12059,7 +11515,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function summary_writer_graph(; name=nothing, shared_name=nothing, container=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function summary_writer_graph(; name=nothing, shared_name=nothing, container=nothing) local desc tf.with_op_name(name, "SummaryWriter") do desc = tf.NodeDescription("SummaryWriter") @@ -12097,62 +11553,13 @@ begin end -""" - recv_tpu_embedding_activations() - - -""" -begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function recv_tpu_embedding_activations_graph(; name=nothing, num_outputs=nothing, config=nothing) - local desc - tf.with_op_name(name, "RecvTPUEmbeddingActivations") do - desc = tf.NodeDescription("RecvTPUEmbeddingActivations") - if num_outputs !== nothing - desc["num_outputs"] = Base.Int(num_outputs) - end - if config !== nothing - desc["config"] = Base.String(config) - end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:num_outputs - push!(out, tf.Tensor(op, out_idx)) - end - out - end - function recv_tpu_embedding_activations_eager(; name=nothing, num_outputs=nothing, config=nothing) - desc = tf.EagerOp("RecvTPUEmbeddingActivations") - if num_outputs !== nothing - desc["num_outputs"] = Base.Int(num_outputs) - end - if config !== nothing - desc["config"] = Base.String(config) - end - res = tf.execute(desc) - node = tf.TapeNode(recv_tpu_embedding_activations, [], name=nothing, num_outputs=nothing, config=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end - end - function recv_tpu_embedding_activations(; name=nothing, num_outputs=nothing, config=nothing) - if tf.in_eager_mode() - recv_tpu_embedding_activations_eager(; name=name, num_outputs=num_outputs, config=config) - else - recv_tpu_embedding_activations_graph(; name=name, num_outputs=num_outputs, config=config) - end - end -end - - """ _while(input) output = input; While (Cond(output)) { output = Body(output) } """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _while_graph(input_; name=nothing, T=nothing, cond=nothing, body=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function _while_graph(input_; name=nothing, T=nothing, cond=nothing, body=nothing) local desc tf.with_op_name(name, "_While") do desc = tf.NodeDescription("_While") @@ -12200,13 +11607,62 @@ begin end +""" + recv_tpu_embedding_activations() + +An op that receives embedding activations on the TPU. +""" +begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function recv_tpu_embedding_activations_graph(; name=nothing, num_outputs=nothing, config=nothing) + local desc + tf.with_op_name(name, "RecvTPUEmbeddingActivations") do + desc = tf.NodeDescription("RecvTPUEmbeddingActivations") + if num_outputs !== nothing + desc["num_outputs"] = Base.Int(num_outputs) + end + if config !== nothing + desc["config"] = Base.String(config) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:num_outputs + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function recv_tpu_embedding_activations_eager(; name=nothing, num_outputs=nothing, config=nothing) + desc = tf.EagerOp("RecvTPUEmbeddingActivations") + if num_outputs !== nothing + desc["num_outputs"] = Base.Int(num_outputs) + end + if config !== nothing + desc["config"] = Base.String(config) + end + res = tf.execute(desc) + node = tf.TapeNode(recv_tpu_embedding_activations, [], name=nothing, num_outputs=nothing, config=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end + end + function recv_tpu_embedding_activations(; name=nothing, num_outputs=nothing, config=nothing) + if tf.in_eager_mode() + recv_tpu_embedding_activations_eager(; name=name, num_outputs=num_outputs, config=config) + else + recv_tpu_embedding_activations_graph(; name=name, num_outputs=num_outputs, config=config) + end + end +end + + """ initialize_table(table_handle, keys, values) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function initialize_table_graph(table_handle_, keys_, values_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function initialize_table_graph(table_handle_, keys_, values_; name=nothing) local desc tf.with_op_name(name, "InitializeTable") do desc = tf.NodeDescription("InitializeTable") @@ -12254,7 +11710,7 @@ end Debug Numeric Summary Op. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function debug_numeric_summary_graph(input_; name=nothing, device_name=nothing, tensor_name=nothing, debug_urls=nothing, lower_bound=nothing, upper_bound=nothing, mute_if_healthy=nothing, gated_grpc=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function debug_numeric_summary_graph(input_; name=nothing, device_name=nothing, tensor_name=nothing, debug_urls=nothing, lower_bound=nothing, upper_bound=nothing, mute_if_healthy=nothing, gated_grpc=nothing) local desc tf.with_op_name(name, "DebugNumericSummary") do desc = tf.NodeDescription("DebugNumericSummary") @@ -12331,10 +11787,10 @@ end """ retrieve_tpu_embedding_adagrad_parameters_grad_accum_debug(; table_id=-1, table_name=) - +Retrieve embedding parameters for a single table. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function retrieve_tpu_embedding_adagrad_parameters_grad_accum_debug_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function retrieve_tpu_embedding_adagrad_parameters_grad_accum_debug_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "RetrieveTPUEmbeddingAdagradParametersGradAccumDebug") do desc = tf.NodeDescription("RetrieveTPUEmbeddingAdagradParametersGradAccumDebug") @@ -12395,7 +11851,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tanh_graph(x_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tanh_graph(x_; name=nothing) local desc tf.with_op_name(name, "Tanh") do desc = tf.NodeDescription("Tanh") @@ -12433,7 +11889,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function symbolic_gradient_graph(input_; name=nothing, Tin=nothing, Tout=nothing, f=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function symbolic_gradient_graph(input_; name=nothing, Tin=nothing, Tout=nothing, f=nothing) local desc tf.with_op_name(name, "SymbolicGradient") do desc = tf.NodeDescription("SymbolicGradient") @@ -12487,7 +11943,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function boosted_trees_update_ensemble_graph(tree_ensemble_handle_, feature_ids_, node_ids_, gains_, thresholds_, left_node_contribs_, right_node_contribs_, max_depth_, learning_rate_; name=nothing, pruning_mode=nothing, num_features=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function boosted_trees_update_ensemble_graph(tree_ensemble_handle_, feature_ids_, node_ids_, gains_, thresholds_, left_node_contribs_, right_node_contribs_, max_depth_, learning_rate_; name=nothing, pruning_mode=nothing, num_features=nothing) local desc tf.with_op_name(name, "BoostedTreesUpdateEnsemble") do desc = tf.NodeDescription("BoostedTreesUpdateEnsemble") @@ -12567,7 +12023,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function apply_momentum_graph(var_, accum_, lr_, grad_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function apply_momentum_graph(var_, accum_, lr_, grad_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) local desc tf.with_op_name(name, "ApplyMomentum") do desc = tf.NodeDescription("ApplyMomentum") @@ -12637,7 +12093,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function reader_read_graph(reader_handle_, queue_handle_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function reader_read_graph(reader_handle_, queue_handle_; name=nothing) local desc tf.with_op_name(name, "ReaderRead") do desc = tf.NodeDescription("ReaderRead") @@ -12682,7 +12138,7 @@ end An op that blocks execution until a distributed TPU system has """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _wait_for_distributed_tpu_graph(inputs_; name=nothing, startup_timeout_sec=nothing, N=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function _wait_for_distributed_tpu_graph(inputs_; name=nothing, startup_timeout_sec=nothing, N=nothing) local desc tf.with_op_name(name, "_WaitForDistributedTPU") do desc = tf.NodeDescription("_WaitForDistributedTPU") @@ -12730,7 +12186,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function mutex_lock_graph(mutex_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function mutex_lock_graph(mutex_; name=nothing) local desc tf.with_op_name(name, "MutexLock") do desc = tf.NodeDescription("MutexLock") @@ -12766,7 +12222,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function accumulator_set_global_step_graph(handle_, new_global_step_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function accumulator_set_global_step_graph(handle_, new_global_step_; name=nothing) local desc tf.with_op_name(name, "AccumulatorSetGlobalStep") do desc = tf.NodeDescription("AccumulatorSetGlobalStep") @@ -12806,7 +12262,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantized_add_graph(x_, y_, min_x_, max_x_, min_y_, max_y_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function quantized_add_graph(x_, y_, min_x_, max_x_, min_y_, max_y_; name=nothing) local desc tf.with_op_name(name, "QuantizedAdd") do desc = tf.NodeDescription("QuantizedAdd") @@ -12871,7 +12327,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function squeeze_graph(input_; name=nothing, squeeze_dims=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function squeeze_graph(input_; name=nothing, squeeze_dims=nothing) local desc tf.with_op_name(name, "Squeeze") do desc = tf.NodeDescription("Squeeze") @@ -12915,7 +12371,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_matching_files_dataset_graph(patterns_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function experimental_matching_files_dataset_graph(patterns_; name=nothing) local desc tf.with_op_name(name, "ExperimentalMatchingFilesDataset") do desc = tf.NodeDescription("ExperimentalMatchingFilesDataset") @@ -12951,7 +12407,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_dataset_to_tf_record_graph(input_dataset_, filename_, compression_type_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function experimental_dataset_to_tf_record_graph(input_dataset_, filename_, compression_type_; name=nothing) local desc tf.with_op_name(name, "ExperimentalDatasetToTFRecord") do desc = tf.NodeDescription("ExperimentalDatasetToTFRecord") @@ -12989,73 +12445,13 @@ begin end -""" - load_tpu_embedding_stochastic_gradient_descent_parameters(parameters; table_id=-1, table_name=) - - -""" -begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function load_tpu_embedding_stochastic_gradient_descent_parameters_graph(parameters_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - local desc - tf.with_op_name(name, "LoadTPUEmbeddingStochasticGradientDescentParameters") do - desc = tf.NodeDescription("LoadTPUEmbeddingStochasticGradientDescentParameters") - parameters_ = convert(Tensor{Float32}, parameters_) - tf.add_input(desc, parameters_) - if table_id !== nothing - desc["table_id"] = Base.Int(table_id) - end - if table_name !== nothing - desc["table_name"] = Base.String(table_name) - end - if num_shards !== nothing - desc["num_shards"] = Base.Int(num_shards) - end - if shard_id !== nothing - desc["shard_id"] = Base.Int(shard_id) - end - end - tf.Tensor(tf.Operation(desc)) - end - function load_tpu_embedding_stochastic_gradient_descent_parameters_eager(parameters_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - desc = tf.EagerOp("LoadTPUEmbeddingStochasticGradientDescentParameters") - parameters_ = convert(tf.EagerTensor, parameters_) - tf.add_input(desc, parameters_) - if table_id !== nothing - desc["table_id"] = Base.Int(table_id) - end - if table_name !== nothing - desc["table_name"] = Base.String(table_name) - end - if num_shards !== nothing - desc["num_shards"] = Base.Int(num_shards) - end - if shard_id !== nothing - desc["shard_id"] = Base.Int(shard_id) - end - res = tf.execute(desc) - node = tf.TapeNode(load_tpu_embedding_stochastic_gradient_descent_parameters, [parameters_], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - function load_tpu_embedding_stochastic_gradient_descent_parameters(parameters_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - if tf.in_eager_mode() - load_tpu_embedding_stochastic_gradient_descent_parameters_eager(parameters_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) - else - load_tpu_embedding_stochastic_gradient_descent_parameters_graph(parameters_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) - end - end -end - - """ no_op() """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function no_op_graph(; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function no_op_graph(; name=nothing) local desc tf.with_op_name(name, "NoOp") do desc @@ -13088,7 +12484,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function zip_dataset_graph(input_datasets_; name=nothing, output_types=nothing, output_shapes=nothing, N=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function zip_dataset_graph(input_datasets_; name=nothing, output_types=nothing, output_shapes=nothing, N=nothing) local desc tf.with_op_name(name, "ZipDataset") do desc = tf.NodeDescription("ZipDataset") @@ -13136,13 +12532,73 @@ begin end +""" + load_tpu_embedding_stochastic_gradient_descent_parameters(parameters; table_id=-1, table_name=) + +Load embedding parameters for a single table. +""" +begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function load_tpu_embedding_stochastic_gradient_descent_parameters_graph(parameters_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + local desc + tf.with_op_name(name, "LoadTPUEmbeddingStochasticGradientDescentParameters") do + desc = tf.NodeDescription("LoadTPUEmbeddingStochasticGradientDescentParameters") + parameters_ = convert(Tensor{Float32}, parameters_) + tf.add_input(desc, parameters_) + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + end + tf.Tensor(tf.Operation(desc)) + end + function load_tpu_embedding_stochastic_gradient_descent_parameters_eager(parameters_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + desc = tf.EagerOp("LoadTPUEmbeddingStochasticGradientDescentParameters") + parameters_ = convert(tf.EagerTensor, parameters_) + tf.add_input(desc, parameters_) + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + res = tf.execute(desc) + node = tf.TapeNode(load_tpu_embedding_stochastic_gradient_descent_parameters, [parameters_], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + function load_tpu_embedding_stochastic_gradient_descent_parameters(parameters_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + if tf.in_eager_mode() + load_tpu_embedding_stochastic_gradient_descent_parameters_eager(parameters_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + else + load_tpu_embedding_stochastic_gradient_descent_parameters_graph(parameters_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + end + end +end + + """ identity_reader_v2(; container=, shared_name=) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function identity_reader_v2_graph(; name=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function identity_reader_v2_graph(; name=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "IdentityReaderV2") do desc = tf.NodeDescription("IdentityReaderV2") @@ -13186,7 +12642,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function lmdb_reader_graph(; name=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function lmdb_reader_graph(; name=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "LMDBReader") do desc = tf.NodeDescription("LMDBReader") @@ -13230,7 +12686,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function nccl_all_reduce_graph(input_; name=nothing, reduction=nothing, num_devices=nothing, shared_name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function nccl_all_reduce_graph(input_; name=nothing, reduction=nothing, num_devices=nothing, shared_name=nothing) local desc tf.with_op_name(name, "NcclAllReduce") do desc = tf.NodeDescription("NcclAllReduce") @@ -13286,7 +12742,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function text_line_dataset_graph(filenames_, compression_type_, buffer_size_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function text_line_dataset_graph(filenames_, compression_type_, buffer_size_; name=nothing) local desc tf.with_op_name(name, "TextLineDataset") do desc = tf.NodeDescription("TextLineDataset") @@ -13330,7 +12786,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sdca_shrink_l1_graph(weights_; name=nothing, num_features=nothing, l1=nothing, l2=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function sdca_shrink_l1_graph(weights_; name=nothing, num_features=nothing, l1=nothing, l2=nothing) local desc tf.with_op_name(name, "SdcaShrinkL1") do desc = tf.NodeDescription("SdcaShrinkL1") @@ -13384,7 +12840,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tf_record_reader_v2_graph(; name=nothing, container=nothing, shared_name=nothing, compression_type=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tf_record_reader_v2_graph(; name=nothing, container=nothing, shared_name=nothing, compression_type=nothing) local desc tf.with_op_name(name, "TFRecordReaderV2") do desc = tf.NodeDescription("TFRecordReaderV2") @@ -13428,61 +12884,13 @@ begin end -""" - multi_device_iterator_from_string_handle(string_handle; output_types=Int64[], output_shapes=Int64[]) - - -""" -begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function multi_device_iterator_from_string_handle_graph(string_handle_; name=nothing, output_types=nothing, output_shapes=nothing) - local desc - tf.with_op_name(name, "MultiDeviceIteratorFromStringHandle") do - desc = tf.NodeDescription("MultiDeviceIteratorFromStringHandle") - string_handle_ = convert(Tensor{String}, string_handle_) - tf.add_input(desc, string_handle_) - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - end - tf.Tensor(tf.Operation(desc)) - end - function multi_device_iterator_from_string_handle_eager(string_handle_; name=nothing, output_types=nothing, output_shapes=nothing) - desc = tf.EagerOp("MultiDeviceIteratorFromStringHandle") - string_handle_ = convert(tf.EagerTensor, string_handle_) - tf.add_input(desc, string_handle_) - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - res = tf.execute(desc) - node = tf.TapeNode(multi_device_iterator_from_string_handle, [string_handle_], name=nothing, output_types=nothing, output_shapes=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - function multi_device_iterator_from_string_handle(string_handle_; name=nothing, output_types=nothing, output_shapes=nothing) - if tf.in_eager_mode() - multi_device_iterator_from_string_handle_eager(string_handle_; name=name, output_types=output_types, output_shapes=output_shapes) - else - multi_device_iterator_from_string_handle_graph(string_handle_; name=name, output_types=output_types, output_shapes=output_shapes) - end - end -end - - """ padded_batch_dataset_v2(input_dataset, batch_size, padded_shapes, padding_values, drop_remainder) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function padded_batch_dataset_v2_graph(input_dataset_, batch_size_, padded_shapes_, padding_values_, drop_remainder_; name=nothing, Toutput_types=nothing, output_shapes=nothing, N=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function padded_batch_dataset_v2_graph(input_dataset_, batch_size_, padded_shapes_, padding_values_, drop_remainder_; name=nothing, Toutput_types=nothing, output_shapes=nothing, N=nothing) local desc tf.with_op_name(name, "PaddedBatchDatasetV2") do desc = tf.NodeDescription("PaddedBatchDatasetV2") @@ -13547,12 +12955,60 @@ end """ - load_tpu_embedding_proximal_adagrad_parameters(parameters, accumulators; table_id=-1, table_name=) + multi_device_iterator_from_string_handle(string_handle; output_types=Int64[], output_shapes=Int64[]) + + +""" +begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function multi_device_iterator_from_string_handle_graph(string_handle_; name=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "MultiDeviceIteratorFromStringHandle") do + desc = tf.NodeDescription("MultiDeviceIteratorFromStringHandle") + string_handle_ = convert(Tensor{String}, string_handle_) + tf.add_input(desc, string_handle_) + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + tf.Tensor(tf.Operation(desc)) + end + function multi_device_iterator_from_string_handle_eager(string_handle_; name=nothing, output_types=nothing, output_shapes=nothing) + desc = tf.EagerOp("MultiDeviceIteratorFromStringHandle") + string_handle_ = convert(tf.EagerTensor, string_handle_) + tf.add_input(desc, string_handle_) + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + res = tf.execute(desc) + node = tf.TapeNode(multi_device_iterator_from_string_handle, [string_handle_], name=nothing, output_types=nothing, output_shapes=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + function multi_device_iterator_from_string_handle(string_handle_; name=nothing, output_types=nothing, output_shapes=nothing) + if tf.in_eager_mode() + multi_device_iterator_from_string_handle_eager(string_handle_; name=name, output_types=output_types, output_shapes=output_shapes) + else + multi_device_iterator_from_string_handle_graph(string_handle_; name=name, output_types=output_types, output_shapes=output_shapes) + end + end +end +""" + load_tpu_embedding_proximal_adagrad_parameters(parameters, accumulators; table_id=-1, table_name=) + +Load embedding parameters for a single table. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function load_tpu_embedding_proximal_adagrad_parameters_graph(parameters_, accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function load_tpu_embedding_proximal_adagrad_parameters_graph(parameters_, accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "LoadTPUEmbeddingProximalAdagradParameters") do desc = tf.NodeDescription("LoadTPUEmbeddingProximalAdagradParameters") @@ -13616,7 +13072,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_size_graph(handle_, flow_in_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tensor_array_size_graph(handle_, flow_in_; name=nothing) local desc tf.with_op_name(name, "TensorArraySize") do desc = tf.NodeDescription("TensorArraySize") @@ -13656,7 +13112,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ordered_map_size_graph(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function ordered_map_size_graph(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "OrderedMapSize") do desc = tf.NodeDescription("OrderedMapSize") @@ -13718,7 +13174,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function stateless_random_uniform_graph(shape_, seed_; name=nothing, dtype=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function stateless_random_uniform_graph(shape_, seed_; name=nothing, dtype=nothing) local desc tf.with_op_name(name, "StatelessRandomUniform") do desc = tf.NodeDescription("StatelessRandomUniform") @@ -13768,7 +13224,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_to_sparse_set_operation_graph(set1_indices_, set1_values_, set1_shape_, set2_indices_, set2_values_, set2_shape_; name=nothing, set_operation=nothing, validate_indices=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function sparse_to_sparse_set_operation_graph(set1_indices_, set1_values_, set1_shape_, set2_indices_, set2_values_, set2_shape_; name=nothing, set_operation=nothing, validate_indices=nothing) local desc tf.with_op_name(name, "SparseToSparseSetOperation") do desc = tf.NodeDescription("SparseToSparseSetOperation") @@ -13844,7 +13300,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_summary_graph(tensor_; name=nothing, description=nothing, labels=nothing, display_name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tensor_summary_graph(tensor_; name=nothing, description=nothing, labels=nothing, display_name=nothing) local desc tf.with_op_name(name, "TensorSummary") do desc = tf.NodeDescription("TensorSummary") @@ -13900,7 +13356,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function remote_fused_graph_execute_graph(inputs_; name=nothing, Tinputs=nothing, Toutputs=nothing, serialized_remote_fused_graph_execute_info=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function remote_fused_graph_execute_graph(inputs_; name=nothing, Tinputs=nothing, Toutputs=nothing, serialized_remote_fused_graph_execute_info=nothing) local desc tf.with_op_name(name, "RemoteFusedGraphExecute") do desc = tf.NodeDescription("RemoteFusedGraphExecute") @@ -13954,7 +13410,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_slice_grad_graph(backprop_val_grad_, input_indices_, input_start_, output_indices_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function sparse_slice_grad_graph(backprop_val_grad_, input_indices_, input_start_, output_indices_; name=nothing) local desc tf.with_op_name(name, "SparseSliceGrad") do desc = tf.NodeDescription("SparseSliceGrad") @@ -14004,7 +13460,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function cumsum_graph(x_, axis_; name=nothing, exclusive=nothing, reverse=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function cumsum_graph(x_, axis_; name=nothing, exclusive=nothing, reverse=nothing) local desc tf.with_op_name(name, "Cumsum") do desc = tf.NodeDescription("Cumsum") @@ -14061,7 +13517,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_norm_with_global_normalization_grad_graph(t_, m_, v_, gamma_, backprop_; name=nothing, variance_epsilon=nothing, scale_after_normalization=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function batch_norm_with_global_normalization_grad_graph(t_, m_, v_, gamma_, backprop_; name=nothing, variance_epsilon=nothing, scale_after_normalization=nothing) local desc tf.with_op_name(name, "BatchNormWithGlobalNormalizationGrad") do desc = tf.NodeDescription("BatchNormWithGlobalNormalizationGrad") @@ -14136,7 +13592,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function avg_pool_grad_graph(orig_input_shape_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function avg_pool_grad_graph(orig_input_shape_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) local desc tf.with_op_name(name, "AvgPoolGrad") do desc = tf.NodeDescription("AvgPoolGrad") @@ -14202,7 +13658,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function restore_v2_graph(prefix_, tensor_names_, shape_and_slices_; name=nothing, dtypes=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function restore_v2_graph(prefix_, tensor_names_, shape_and_slices_; name=nothing, dtypes=nothing) local desc tf.with_op_name(name, "RestoreV2") do desc = tf.NodeDescription("RestoreV2") @@ -14252,7 +13708,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function relu6_graph(features_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function relu6_graph(features_; name=nothing) local desc tf.with_op_name(name, "Relu6") do desc = tf.NodeDescription("Relu6") @@ -14290,7 +13746,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_apply_rms_prop_graph(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function sparse_apply_rms_prop_graph(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "SparseApplyRMSProp") do desc = tf.NodeDescription("SparseApplyRMSProp") @@ -14376,7 +13832,7 @@ end Receives the named tensor from send_device on recv_device. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _recv_graph(; name=nothing, tensor_type=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function _recv_graph(; name=nothing, tensor_type=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing) local desc tf.with_op_name(name, "_Recv") do desc = tf.NodeDescription("_Recv") @@ -14444,7 +13900,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function max_pool_graph(input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function max_pool_graph(input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) local desc tf.with_op_name(name, "MaxPool") do desc = tf.NodeDescription("MaxPool") @@ -14506,7 +13962,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function invert_graph(x_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function invert_graph(x_; name=nothing) local desc tf.with_op_name(name, "Invert") do desc = tf.NodeDescription("Invert") @@ -14544,7 +14000,7 @@ end *NOTE*: Do not invoke this operator directly in Python. Graph rewrite pass is """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _unary_ops_composition_graph(x_; name=nothing, op_names=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function _unary_ops_composition_graph(x_; name=nothing, op_names=nothing) local desc tf.with_op_name(name, "_UnaryOpsComposition") do desc = tf.NodeDescription("_UnaryOpsComposition") @@ -14588,7 +14044,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_map_dataset_graph(input_dataset_, other_arguments_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing, preserve_cardinality=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function experimental_map_dataset_graph(input_dataset_, other_arguments_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing, preserve_cardinality=nothing) local desc tf.with_op_name(name, "ExperimentalMapDataset") do desc = tf.NodeDescription("ExperimentalMapDataset") @@ -14661,10 +14117,10 @@ end """ load_tpu_embedding_adam_parameters(parameters, momenta, velocities; table_id=-1, table_name=) - +Load embedding parameters for a single table. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function load_tpu_embedding_adam_parameters_graph(parameters_, momenta_, velocities_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function load_tpu_embedding_adam_parameters_graph(parameters_, momenta_, velocities_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "LoadTPUEmbeddingADAMParameters") do desc = tf.NodeDescription("LoadTPUEmbeddingADAMParameters") @@ -14732,7 +14188,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function parse_tensor_graph(serialized_; name=nothing, out_type=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function parse_tensor_graph(serialized_; name=nothing, out_type=nothing) local desc tf.with_op_name(name, "ParseTensor") do desc = tf.NodeDescription("ParseTensor") @@ -14774,7 +14230,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_materialized_index_dataset_handle_graph(; name=nothing, container=nothing, shared_name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function experimental_materialized_index_dataset_handle_graph(; name=nothing, container=nothing, shared_name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "ExperimentalMaterializedIndexDatasetHandle") do desc = tf.NodeDescription("ExperimentalMaterializedIndexDatasetHandle") @@ -14830,7 +14286,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function multi_device_iterator_get_next_from_shard_graph(multi_device_iterator_, shard_num_, incarnation_id_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function multi_device_iterator_get_next_from_shard_graph(multi_device_iterator_, shard_num_, incarnation_id_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "MultiDeviceIteratorGetNextFromShard") do desc = tf.NodeDescription("MultiDeviceIteratorGetNextFromShard") @@ -14886,7 +14342,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function random_uniform_int_graph(shape_, minval_, maxval_; name=nothing, seed=nothing, seed2=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function random_uniform_int_graph(shape_, minval_, maxval_; name=nothing, seed=nothing, seed2=nothing) local desc tf.with_op_name(name, "RandomUniformInt") do desc = tf.NodeDescription("RandomUniformInt") @@ -14947,7 +14403,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_softmax_cross_entropy_with_logits_graph(features_, labels_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function sparse_softmax_cross_entropy_with_logits_graph(features_, labels_; name=nothing) local desc tf.with_op_name(name, "SparseSoftmaxCrossEntropyWithLogits") do desc = tf.NodeDescription("SparseSoftmaxCrossEntropyWithLogits") @@ -14996,7 +14452,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_read_v2_graph(handle_, index_, flow_in_; name=nothing, dtype=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tensor_array_read_v2_graph(handle_, index_, flow_in_; name=nothing, dtype=nothing) local desc tf.with_op_name(name, "TensorArrayReadV2") do desc = tf.NodeDescription("TensorArrayReadV2") @@ -15046,7 +14502,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function reader_read_up_to_graph(reader_handle_, queue_handle_, num_records_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function reader_read_up_to_graph(reader_handle_, queue_handle_, num_records_; name=nothing) local desc tf.with_op_name(name, "ReaderReadUpTo") do desc = tf.NodeDescription("ReaderReadUpTo") @@ -15095,7 +14551,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function encode_proto_graph(sizes_, values_; name=nothing, field_names=nothing, message_type=nothing, descriptor_source=nothing, Tinput_types=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function encode_proto_graph(sizes_, values_; name=nothing, field_names=nothing, message_type=nothing, descriptor_source=nothing, Tinput_types=nothing) local desc tf.with_op_name(name, "EncodeProto") do desc = tf.NodeDescription("EncodeProto") @@ -15159,7 +14615,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function strided_slice_grad_graph(shape_, begin_, end_, strides_, dy_; name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function strided_slice_grad_graph(shape_, begin_, end_, strides_, dy_; name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing) local desc tf.with_op_name(name, "StridedSliceGrad") do desc = tf.NodeDescription("StridedSliceGrad") @@ -15287,7 +14743,7 @@ end Replacement node for NcclReduce. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _nccl_reduce_send_graph(input_; name=nothing, reduction=nothing, num_devices=nothing, shared_name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function _nccl_reduce_send_graph(input_; name=nothing, reduction=nothing, num_devices=nothing, shared_name=nothing) local desc tf.with_op_name(name, "_NcclReduceSend") do desc = tf.NodeDescription("_NcclReduceSend") @@ -15343,7 +14799,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function padded_batch_dataset_graph(input_dataset_, batch_size_, padded_shapes_, padding_values_; name=nothing, Toutput_types=nothing, output_shapes=nothing, N=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function padded_batch_dataset_graph(input_dataset_, batch_size_, padded_shapes_, padding_values_; name=nothing, Toutput_types=nothing, output_shapes=nothing, N=nothing) local desc tf.with_op_name(name, "PaddedBatchDataset") do desc = tf.NodeDescription("PaddedBatchDataset") @@ -15409,7 +14865,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function data_format_vec_permute_graph(x_; name=nothing, src_format=nothing, dst_format=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function data_format_vec_permute_graph(x_; name=nothing, src_format=nothing, dst_format=nothing) local desc tf.with_op_name(name, "DataFormatVecPermute") do desc = tf.NodeDescription("DataFormatVecPermute") @@ -15459,7 +14915,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function string_format_graph(inputs_; name=nothing, T=nothing, template=nothing, placeholder=nothing, summarize=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function string_format_graph(inputs_; name=nothing, T=nothing, template=nothing, placeholder=nothing, summarize=nothing) local desc tf.with_op_name(name, "StringFormat") do desc = tf.NodeDescription("StringFormat") @@ -15519,7 +14975,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function as_string_graph(input_; name=nothing, precision=nothing, scientific=nothing, shortest=nothing, width=nothing, fill=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function as_string_graph(input_; name=nothing, precision=nothing, scientific=nothing, shortest=nothing, width=nothing, fill=nothing) local desc tf.with_op_name(name, "AsString") do desc = tf.NodeDescription("AsString") @@ -15587,7 +15043,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function queue_enqueue_many_graph(handle_, components_; name=nothing, Tcomponents=nothing, timeout_ms=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function queue_enqueue_many_graph(handle_, components_; name=nothing, Tcomponents=nothing, timeout_ms=nothing) local desc tf.with_op_name(name, "QueueEnqueueMany") do desc = tf.NodeDescription("QueueEnqueueMany") @@ -15639,7 +15095,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fake_param_graph(; name=nothing, dtype=nothing, shape=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function fake_param_graph(; name=nothing, dtype=nothing, shape=nothing) local desc tf.with_op_name(name, "FakeParam") do desc = tf.NodeDescription("FakeParam") @@ -15683,7 +15139,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function apply_adagrad_graph(var_, accum_, lr_, grad_; name=nothing, use_locking=nothing, update_slots=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function apply_adagrad_graph(var_, accum_, lr_, grad_; name=nothing, use_locking=nothing, update_slots=nothing) local desc tf.with_op_name(name, "ApplyAdagrad") do desc = tf.NodeDescription("ApplyAdagrad") @@ -15748,7 +15204,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_iterator_get_device_graph(resource_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function experimental_iterator_get_device_graph(resource_; name=nothing) local desc tf.with_op_name(name, "ExperimentalIteratorGetDevice") do desc = tf.NodeDescription("ExperimentalIteratorGetDevice") @@ -15784,7 +15240,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function adjust_contrast_graph(images_, contrast_factor_, min_value_, max_value_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function adjust_contrast_graph(images_, contrast_factor_, min_value_, max_value_; name=nothing) local desc tf.with_op_name(name, "AdjustContrast") do desc = tf.NodeDescription("AdjustContrast") @@ -15828,13 +15284,46 @@ begin end +""" + optional_none() + + +""" +begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function optional_none_graph(; name=nothing) + local desc + tf.with_op_name(name, "OptionalNone") do + desc + tf.NodeDescription("OptionalNone") + end + tf.Tensor(tf.Operation(desc)) + end + function optional_none_eager(; name=nothing) + desc = tf.EagerOp("OptionalNone") + res = tf.execute(desc) + node = tf.TapeNode(optional_none, [], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + function optional_none(; name=nothing) + if tf.in_eager_mode() + optional_none_eager(; name=name) + else + optional_none_graph(; name=name) + end + end +end + + """ extract_image_patches(images) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function extract_image_patches_graph(images_; name=nothing, ksizes=nothing, strides=nothing, rates=nothing, padding=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function extract_image_patches_graph(images_; name=nothing, ksizes=nothing, strides=nothing, rates=nothing, padding=nothing) local desc tf.with_op_name(name, "ExtractImagePatches") do desc = tf.NodeDescription("ExtractImagePatches") @@ -15890,102 +15379,13 @@ begin end -""" - scale_and_translate(images, size, scale, translation; kernel_type=lanczos3) - - -""" -begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function scale_and_translate_graph(images_, size_, scale_, translation_; name=nothing, kernel_type=nothing) - local desc - tf.with_op_name(name, "ScaleAndTranslate") do - desc = tf.NodeDescription("ScaleAndTranslate") - images_ = convert(Tensor{Any}, images_) - size_ = convert(Tensor{Int32}, size_) - scale_ = convert(Tensor{Float32}, scale_) - translation_ = convert(Tensor{Float32}, translation_) - (images_,) = tf.tf_promote(images_) - tf.add_input(desc, images_) - tf.add_input(desc, size_) - tf.add_input(desc, scale_) - tf.add_input(desc, translation_) - if kernel_type !== nothing - desc["kernel_type"] = Base.String(kernel_type) - end - end - tf.Tensor(tf.Operation(desc)) - end - function scale_and_translate_eager(images_, size_, scale_, translation_; name=nothing, kernel_type=nothing) - desc = tf.EagerOp("ScaleAndTranslate") - images_ = convert(tf.EagerTensor, images_) - size_ = convert(tf.EagerTensor, size_) - scale_ = convert(tf.EagerTensor, scale_) - translation_ = convert(tf.EagerTensor, translation_) - tf.add_input(desc, images_) - tf.add_input(desc, size_) - tf.add_input(desc, scale_) - tf.add_input(desc, translation_) - if kernel_type !== nothing - desc["kernel_type"] = Base.String(kernel_type) - end - desc["T"] = tf.data_type(images_) - res = tf.execute(desc) - node = tf.TapeNode(scale_and_translate, [images_, size_, scale_, translation_], name=nothing, kernel_type=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - function scale_and_translate(images_, size_, scale_, translation_; name=nothing, kernel_type=nothing) - if tf.in_eager_mode() - scale_and_translate_eager(images_, size_, scale_, translation_; name=name, kernel_type=kernel_type) - else - scale_and_translate_graph(images_, size_, scale_, translation_; name=name, kernel_type=kernel_type) - end - end -end - - -""" - optional_none() - - -""" -begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function optional_none_graph(; name=nothing) - local desc - tf.with_op_name(name, "OptionalNone") do - desc - tf.NodeDescription("OptionalNone") - end - tf.Tensor(tf.Operation(desc)) - end - function optional_none_eager(; name=nothing) - desc = tf.EagerOp("OptionalNone") - res = tf.execute(desc) - node = tf.TapeNode(optional_none, [], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - function optional_none(; name=nothing) - if tf.in_eager_mode() - optional_none_eager(; name=name) - else - optional_none_graph(; name=name) - end - end -end - - """ variable_v2(; container=, shared_name=) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function variable_v2_graph(; name=nothing, shape=nothing, dtype=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function variable_v2_graph(; name=nothing, shape=nothing, dtype=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "VariableV2") do desc = tf.NodeDescription("VariableV2") @@ -16041,7 +15441,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function elu_graph(features_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function elu_graph(features_; name=nothing) local desc tf.with_op_name(name, "Elu") do desc = tf.NodeDescription("Elu") @@ -16079,7 +15479,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function scatter_update_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function scatter_update_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ScatterUpdate") do desc = tf.NodeDescription("ScatterUpdate") @@ -16135,7 +15535,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function floor_mod_graph(x_, y_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function floor_mod_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "FloorMod") do desc = tf.NodeDescription("FloorMod") @@ -16178,7 +15578,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_ignore_errors_dataset_graph(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function experimental_ignore_errors_dataset_graph(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "ExperimentalIgnoreErrorsDataset") do desc = tf.NodeDescription("ExperimentalIgnoreErrorsDataset") @@ -16226,7 +15626,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_set_stats_aggregator_dataset_graph(input_dataset_, stats_aggregator_, tag_, counter_prefix_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function experimental_set_stats_aggregator_dataset_graph(input_dataset_, stats_aggregator_, tag_, counter_prefix_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "ExperimentalSetStatsAggregatorDataset") do desc = tf.NodeDescription("ExperimentalSetStatsAggregatorDataset") @@ -16286,7 +15686,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function compute_accidental_hits_graph(true_classes_, sampled_candidates_; name=nothing, num_true=nothing, seed=nothing, seed2=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function compute_accidental_hits_graph(true_classes_, sampled_candidates_; name=nothing, num_true=nothing, seed=nothing, seed2=nothing) local desc tf.with_op_name(name, "ComputeAccidentalHits") do desc = tf.NodeDescription("ComputeAccidentalHits") @@ -16349,7 +15749,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function string_to_number_graph(string_tensor_; name=nothing, out_type=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function string_to_number_graph(string_tensor_; name=nothing, out_type=nothing) local desc tf.with_op_name(name, "StringToNumber") do desc = tf.NodeDescription("StringToNumber") @@ -16391,7 +15791,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function snapshot_graph(input_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function snapshot_graph(input_; name=nothing) local desc tf.with_op_name(name, "Snapshot") do desc = tf.NodeDescription("Snapshot") @@ -16429,7 +15829,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function deserialize_iterator_graph(resource_handle_, serialized_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function deserialize_iterator_graph(resource_handle_, serialized_; name=nothing) local desc tf.with_op_name(name, "DeserializeIterator") do desc = tf.NodeDescription("DeserializeIterator") @@ -16469,7 +15869,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function atan_graph(x_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function atan_graph(x_; name=nothing) local desc tf.with_op_name(name, "Atan") do desc = tf.NodeDescription("Atan") @@ -16507,7 +15907,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function mat_mul_graph(a_, b_; name=nothing, transpose_a=nothing, transpose_b=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function mat_mul_graph(a_, b_; name=nothing, transpose_a=nothing, transpose_b=nothing) local desc tf.with_op_name(name, "MatMul") do desc = tf.NodeDescription("MatMul") @@ -16562,7 +15962,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function erfc_graph(x_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function erfc_graph(x_; name=nothing) local desc tf.with_op_name(name, "Erfc") do desc = tf.NodeDescription("Erfc") @@ -16600,7 +16000,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sigmoid_grad_graph(y_, dy_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function sigmoid_grad_graph(y_, dy_; name=nothing) local desc tf.with_op_name(name, "SigmoidGrad") do desc = tf.NodeDescription("SigmoidGrad") @@ -16643,7 +16043,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fixed_length_record_reader_v2_graph(; name=nothing, header_bytes=nothing, record_bytes=nothing, footer_bytes=nothing, hop_bytes=nothing, container=nothing, shared_name=nothing, encoding=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function fixed_length_record_reader_v2_graph(; name=nothing, header_bytes=nothing, record_bytes=nothing, footer_bytes=nothing, hop_bytes=nothing, container=nothing, shared_name=nothing, encoding=nothing) local desc tf.with_op_name(name, "FixedLengthRecordReaderV2") do desc = tf.NodeDescription("FixedLengthRecordReaderV2") @@ -16717,7 +16117,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function non_max_suppression_v3_graph(boxes_, scores_, max_output_size_, iou_threshold_, score_threshold_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function non_max_suppression_v3_graph(boxes_, scores_, max_output_size_, iou_threshold_, score_threshold_; name=nothing) local desc tf.with_op_name(name, "NonMaxSuppressionV3") do desc = tf.NodeDescription("NonMaxSuppressionV3") @@ -16772,7 +16172,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function dilation2d_backprop_input_graph(input_, filter_, out_backprop_; name=nothing, strides=nothing, rates=nothing, padding=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function dilation2d_backprop_input_graph(input_, filter_, out_backprop_; name=nothing, strides=nothing, rates=nothing, padding=nothing) local desc tf.with_op_name(name, "Dilation2DBackpropInput") do desc = tf.NodeDescription("Dilation2DBackpropInput") @@ -16832,13 +16232,84 @@ begin end +""" + resource_apply_adadelta(var, accum, accum_update, lr, rho, epsilon, grad; use_locking=false) + + +""" +begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function resource_apply_adadelta_graph(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "ResourceApplyAdadelta") do + desc = tf.NodeDescription("ResourceApplyAdadelta") + var_ = convert(Tensor{Any}, var_) + accum_ = convert(Tensor{Any}, accum_) + accum_update_ = convert(Tensor{Any}, accum_update_) + lr_ = convert(Tensor{Any}, lr_) + rho_ = convert(Tensor{Any}, rho_) + epsilon_ = convert(Tensor{Any}, epsilon_) + grad_ = convert(Tensor{Any}, grad_) + (lr_, rho_, epsilon_, grad_) = tf.tf_promote(lr_, rho_, epsilon_, grad_) + tf.add_input(desc, var_) + tf.add_input(desc, accum_) + tf.add_input(desc, accum_update_) + tf.add_input(desc, lr_) + tf.add_input(desc, rho_) + tf.add_input(desc, epsilon_) + tf.add_input(desc, grad_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + tf.Tensor(tf.Operation(desc)) + end + function resource_apply_adadelta_eager(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_; name=nothing, use_locking=nothing) + desc = tf.EagerOp("ResourceApplyAdadelta") + var_ = convert(tf.EagerTensor, var_) + accum_ = convert(tf.EagerTensor, accum_) + accum_update_ = convert(tf.EagerTensor, accum_update_) + lr_ = convert(tf.EagerTensor, lr_) + rho_ = convert(tf.EagerTensor, rho_) + epsilon_ = convert(tf.EagerTensor, epsilon_) + grad_ = convert(tf.EagerTensor, grad_) + tf.add_input(desc, var_) + tf.add_input(desc, accum_) + tf.add_input(desc, accum_update_) + tf.add_input(desc, lr_) + tf.add_input(desc, rho_) + tf.add_input(desc, epsilon_) + tf.add_input(desc, grad_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + desc["T"] = tf.data_type(lr_) + desc["T"] = tf.data_type(rho_) + desc["T"] = tf.data_type(epsilon_) + desc["T"] = tf.data_type(grad_) + res = tf.execute(desc) + node = tf.TapeNode(resource_apply_adadelta, [var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_], name=nothing, use_locking=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + function resource_apply_adadelta(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_; name=nothing, use_locking=nothing) + if tf.in_eager_mode() + resource_apply_adadelta_eager(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_; name=name, use_locking=use_locking) + else + resource_apply_adadelta_graph(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_; name=name, use_locking=use_locking) + end + end +end + + """ logical_or(x, y) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function logical_or_graph(x_, y_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function logical_or_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "LogicalOr") do desc = tf.NodeDescription("LogicalOr") @@ -16872,84 +16343,13 @@ begin end -""" - resource_apply_adadelta(var, accum, accum_update, lr, rho, epsilon, grad; use_locking=false) - - -""" -begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_apply_adadelta_graph(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_; name=nothing, use_locking=nothing) - local desc - tf.with_op_name(name, "ResourceApplyAdadelta") do - desc = tf.NodeDescription("ResourceApplyAdadelta") - var_ = convert(Tensor{Any}, var_) - accum_ = convert(Tensor{Any}, accum_) - accum_update_ = convert(Tensor{Any}, accum_update_) - lr_ = convert(Tensor{Any}, lr_) - rho_ = convert(Tensor{Any}, rho_) - epsilon_ = convert(Tensor{Any}, epsilon_) - grad_ = convert(Tensor{Any}, grad_) - (lr_, rho_, epsilon_, grad_) = tf.tf_promote(lr_, rho_, epsilon_, grad_) - tf.add_input(desc, var_) - tf.add_input(desc, accum_) - tf.add_input(desc, accum_update_) - tf.add_input(desc, lr_) - tf.add_input(desc, rho_) - tf.add_input(desc, epsilon_) - tf.add_input(desc, grad_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - end - tf.Tensor(tf.Operation(desc)) - end - function resource_apply_adadelta_eager(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_; name=nothing, use_locking=nothing) - desc = tf.EagerOp("ResourceApplyAdadelta") - var_ = convert(tf.EagerTensor, var_) - accum_ = convert(tf.EagerTensor, accum_) - accum_update_ = convert(tf.EagerTensor, accum_update_) - lr_ = convert(tf.EagerTensor, lr_) - rho_ = convert(tf.EagerTensor, rho_) - epsilon_ = convert(tf.EagerTensor, epsilon_) - grad_ = convert(tf.EagerTensor, grad_) - tf.add_input(desc, var_) - tf.add_input(desc, accum_) - tf.add_input(desc, accum_update_) - tf.add_input(desc, lr_) - tf.add_input(desc, rho_) - tf.add_input(desc, epsilon_) - tf.add_input(desc, grad_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - desc["T"] = tf.data_type(lr_) - desc["T"] = tf.data_type(rho_) - desc["T"] = tf.data_type(epsilon_) - desc["T"] = tf.data_type(grad_) - res = tf.execute(desc) - node = tf.TapeNode(resource_apply_adadelta, [var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_], name=nothing, use_locking=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - function resource_apply_adadelta(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_; name=nothing, use_locking=nothing) - if tf.in_eager_mode() - resource_apply_adadelta_eager(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_; name=name, use_locking=use_locking) - else - resource_apply_adadelta_graph(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_; name=name, use_locking=use_locking) - end - end -end - - """ dense_to_sparse_set_operation(set1, set2_indices, set2_values, set2_shape; validate_indices=true) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function dense_to_sparse_set_operation_graph(set1_, set2_indices_, set2_values_, set2_shape_; name=nothing, set_operation=nothing, validate_indices=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function dense_to_sparse_set_operation_graph(set1_, set2_indices_, set2_values_, set2_shape_; name=nothing, set_operation=nothing, validate_indices=nothing) local desc tf.with_op_name(name, "DenseToSparseSetOperation") do desc = tf.NodeDescription("DenseToSparseSetOperation") @@ -17017,7 +16417,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function reader_num_records_produced_graph(reader_handle_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function reader_num_records_produced_graph(reader_handle_; name=nothing) local desc tf.with_op_name(name, "ReaderNumRecordsProduced") do desc = tf.NodeDescription("ReaderNumRecordsProduced") @@ -17053,13 +16453,12 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function adjust_hue_graph(images_, delta_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function adjust_hue_graph(images_, delta_; name=nothing) local desc tf.with_op_name(name, "AdjustHue") do desc = tf.NodeDescription("AdjustHue") images_ = convert(Tensor{Float32}, images_) delta_ = convert(Tensor{Float32}, delta_) - (images_,) = tf.tf_promote(images_) tf.add_input(desc, images_) tf.add_input(desc, delta_) end @@ -17071,7 +16470,6 @@ begin delta_ = convert(tf.EagerTensor, delta_) tf.add_input(desc, images_) tf.add_input(desc, delta_) - desc["T"] = tf.data_type(images_) res = tf.execute(desc) node = tf.TapeNode(adjust_hue, [images_, delta_], name=nothing, res) if length(res) >= 1 @@ -17095,7 +16493,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function boosted_trees_quantile_stream_resource_flush_graph(quantile_stream_resource_handle_, num_buckets_; name=nothing, generate_quantiles=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function boosted_trees_quantile_stream_resource_flush_graph(quantile_stream_resource_handle_, num_buckets_; name=nothing, generate_quantiles=nothing) local desc tf.with_op_name(name, "BoostedTreesQuantileStreamResourceFlush") do desc = tf.NodeDescription("BoostedTreesQuantileStreamResourceFlush") @@ -17141,7 +16539,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_map_and_batch_dataset_graph(input_dataset_, other_arguments_, batch_size_, num_parallel_calls_, drop_remainder_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, preserve_cardinality=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function experimental_map_and_batch_dataset_graph(input_dataset_, other_arguments_, batch_size_, num_parallel_calls_, drop_remainder_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, preserve_cardinality=nothing) local desc tf.with_op_name(name, "ExperimentalMapAndBatchDataset") do desc = tf.NodeDescription("ExperimentalMapAndBatchDataset") @@ -17223,7 +16621,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function real_div_graph(x_, y_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function real_div_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "RealDiv") do desc = tf.NodeDescription("RealDiv") @@ -17266,7 +16664,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function restore_slice_graph(file_pattern_, tensor_name_, shape_and_slice_; name=nothing, dt=nothing, preferred_shard=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function restore_slice_graph(file_pattern_, tensor_name_, shape_and_slice_; name=nothing, dt=nothing, preferred_shard=nothing) local desc tf.with_op_name(name, "RestoreSlice") do desc = tf.NodeDescription("RestoreSlice") @@ -17322,7 +16720,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function stack_pop_v2_graph(handle_; name=nothing, elem_type=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function stack_pop_v2_graph(handle_; name=nothing, elem_type=nothing) local desc tf.with_op_name(name, "StackPopV2") do desc = tf.NodeDescription("StackPopV2") @@ -17364,7 +16762,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function reverse_graph(tensor_, dims_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function reverse_graph(tensor_, dims_; name=nothing) local desc tf.with_op_name(name, "Reverse") do desc = tf.NodeDescription("Reverse") @@ -17406,7 +16804,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function decode_png_graph(contents_; name=nothing, channels=nothing, dtype=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function decode_png_graph(contents_; name=nothing, channels=nothing, dtype=nothing) local desc tf.with_op_name(name, "DecodePng") do desc = tf.NodeDescription("DecodePng") @@ -17454,7 +16852,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function non_max_suppression_v2_graph(boxes_, scores_, max_output_size_, iou_threshold_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function non_max_suppression_v2_graph(boxes_, scores_, max_output_size_, iou_threshold_; name=nothing) local desc tf.with_op_name(name, "NonMaxSuppressionV2") do desc = tf.NodeDescription("NonMaxSuppressionV2") @@ -17505,7 +16903,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function igamma_graph(a_, x_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function igamma_graph(a_, x_; name=nothing) local desc tf.with_op_name(name, "Igamma") do desc = tf.NodeDescription("Igamma") @@ -17548,7 +16946,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function digamma_graph(x_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function digamma_graph(x_; name=nothing) local desc tf.with_op_name(name, "Digamma") do desc = tf.NodeDescription("Digamma") @@ -17586,7 +16984,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_apply_ada_max_graph(var_, m_, v_, beta1_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function resource_apply_ada_max_graph(var_, m_, v_, beta1_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ResourceApplyAdaMax") do desc = tf.NodeDescription("ResourceApplyAdaMax") @@ -17667,7 +17065,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function space_to_depth_graph(input_; name=nothing, block_size=nothing, data_format=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function space_to_depth_graph(input_; name=nothing, block_size=nothing, data_format=nothing) local desc tf.with_op_name(name, "SpaceToDepth") do desc = tf.NodeDescription("SpaceToDepth") @@ -17717,7 +17115,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sqrt_grad_graph(y_, dy_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function sqrt_grad_graph(y_, dy_; name=nothing) local desc tf.with_op_name(name, "SqrtGrad") do desc = tf.NodeDescription("SqrtGrad") @@ -17760,7 +17158,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function map_unstage_graph(key_, indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function map_unstage_graph(key_, indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "MapUnstage") do desc = tf.NodeDescription("MapUnstage") @@ -17830,7 +17228,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function qr_graph(input_; name=nothing, full_matrices=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function qr_graph(input_; name=nothing, full_matrices=nothing) local desc tf.with_op_name(name, "Qr") do desc = tf.NodeDescription("Qr") @@ -17879,7 +17277,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function boosted_trees_calculate_best_gains_per_feature_graph(node_id_range_, stats_summary_list_, l1_, l2_, tree_complexity_, min_node_weight_; name=nothing, max_splits=nothing, num_features=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function boosted_trees_calculate_best_gains_per_feature_graph(node_id_range_, stats_summary_list_, l1_, l2_, tree_complexity_, min_node_weight_; name=nothing, max_splits=nothing, num_features=nothing) local desc tf.with_op_name(name, "BoostedTreesCalculateBestGainsPerFeature") do desc = tf.NodeDescription("BoostedTreesCalculateBestGainsPerFeature") @@ -17952,7 +17350,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function unbatch_grad_graph(original_input_, batch_index_, grad_, id_; name=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function unbatch_grad_graph(original_input_, batch_index_, grad_, id_; name=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "UnbatchGrad") do desc = tf.NodeDescription("UnbatchGrad") @@ -18015,7 +17413,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function log_softmax_graph(logits_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function log_softmax_graph(logits_; name=nothing) local desc tf.with_op_name(name, "LogSoftmax") do desc = tf.NodeDescription("LogSoftmax") @@ -18053,7 +17451,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_count_up_to_graph(resource_; name=nothing, limit=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function resource_count_up_to_graph(resource_; name=nothing, limit=nothing) local desc tf.with_op_name(name, "ResourceCountUpTo") do desc = tf.NodeDescription("ResourceCountUpTo") @@ -18095,7 +17493,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function accumulate_nv2_graph(inputs_; name=nothing, N=nothing, shape=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function accumulate_nv2_graph(inputs_; name=nothing, N=nothing, shape=nothing) local desc tf.with_op_name(name, "AccumulateNV2") do desc = tf.NodeDescription("AccumulateNV2") @@ -18145,7 +17543,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function parallel_map_dataset_graph(input_dataset_, other_arguments_, num_parallel_calls_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing, sloppy=nothing, preserve_cardinality=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function parallel_map_dataset_graph(input_dataset_, other_arguments_, num_parallel_calls_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing, sloppy=nothing, preserve_cardinality=nothing) local desc tf.with_op_name(name, "ParallelMapDataset") do desc = tf.NodeDescription("ParallelMapDataset") @@ -18231,7 +17629,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function random_uniform_graph(shape_; name=nothing, seed=nothing, seed2=nothing, dtype=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function random_uniform_graph(shape_; name=nothing, seed=nothing, seed2=nothing, dtype=nothing) local desc tf.with_op_name(name, "RandomUniform") do desc = tf.NodeDescription("RandomUniform") @@ -18287,7 +17685,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function unicode_transcode_graph(input_; name=nothing, input_encoding=nothing, output_encoding=nothing, errors=nothing, replacement_char=nothing, replace_control_characters=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function unicode_transcode_graph(input_; name=nothing, input_encoding=nothing, output_encoding=nothing, errors=nothing, replacement_char=nothing, replace_control_characters=nothing) local desc tf.with_op_name(name, "UnicodeTranscode") do desc = tf.NodeDescription("UnicodeTranscode") @@ -18353,7 +17751,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function reader_reset_graph(reader_handle_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function reader_reset_graph(reader_handle_; name=nothing) local desc tf.with_op_name(name, "ReaderReset") do desc = tf.NodeDescription("ReaderReset") @@ -18389,7 +17787,7 @@ end Replacement node for NcclBroadcast. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _nccl_broadcast_send_graph(input_; name=nothing, num_devices=nothing, shared_name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function _nccl_broadcast_send_graph(input_; name=nothing, num_devices=nothing, shared_name=nothing) local desc tf.with_op_name(name, "_NcclBroadcastSend") do desc = tf.NodeDescription("_NcclBroadcastSend") @@ -18439,7 +17837,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_matrix_determinant_graph(input_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function batch_matrix_determinant_graph(input_; name=nothing) local desc tf.with_op_name(name, "BatchMatrixDeterminant") do desc = tf.NodeDescription("BatchMatrixDeterminant") @@ -18477,7 +17875,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function less_equal_graph(x_, y_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function less_equal_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "LessEqual") do desc = tf.NodeDescription("LessEqual") @@ -18520,7 +17918,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function apply_gradient_descent_graph(var_, alpha_, delta_; name=nothing, use_locking=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function apply_gradient_descent_graph(var_, alpha_, delta_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ApplyGradientDescent") do desc = tf.NodeDescription("ApplyGradientDescent") @@ -18574,7 +17972,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_segment_sqrt_n_graph(data_, indices_, segment_ids_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function sparse_segment_sqrt_n_graph(data_, indices_, segment_ids_; name=nothing) local desc tf.with_op_name(name, "SparseSegmentSqrtN") do desc = tf.NodeDescription("SparseSegmentSqrtN") @@ -18623,7 +18021,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function matrix_logarithm_graph(input_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function matrix_logarithm_graph(input_; name=nothing) local desc tf.with_op_name(name, "MatrixLogarithm") do desc = tf.NodeDescription("MatrixLogarithm") @@ -18661,7 +18059,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function scatter_mul_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function scatter_mul_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ScatterMul") do desc = tf.NodeDescription("ScatterMul") @@ -18717,7 +18115,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function decode_jpeg_graph(contents_; name=nothing, channels=nothing, ratio=nothing, fancy_upscaling=nothing, try_recover_truncated=nothing, acceptable_fraction=nothing, dct_method=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function decode_jpeg_graph(contents_; name=nothing, channels=nothing, ratio=nothing, fancy_upscaling=nothing, try_recover_truncated=nothing, acceptable_fraction=nothing, dct_method=nothing) local desc tf.with_op_name(name, "DecodeJpeg") do desc = tf.NodeDescription("DecodeJpeg") @@ -18789,7 +18187,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function random_shuffle_queue_v2_graph(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, min_after_dequeue=nothing, seed=nothing, seed2=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function random_shuffle_queue_v2_graph(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, min_after_dequeue=nothing, seed=nothing, seed2=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "RandomShuffleQueueV2") do desc = tf.NodeDescription("RandomShuffleQueueV2") @@ -18869,7 +18267,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function queue_enqueue_many_v2_graph(handle_, components_; name=nothing, Tcomponents=nothing, timeout_ms=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function queue_enqueue_many_v2_graph(handle_, components_; name=nothing, Tcomponents=nothing, timeout_ms=nothing) local desc tf.with_op_name(name, "QueueEnqueueManyV2") do desc = tf.NodeDescription("QueueEnqueueManyV2") @@ -18921,7 +18319,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_sparse_apply_centered_rms_prop_graph(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function resource_sparse_apply_centered_rms_prop_graph(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ResourceSparseApplyCenteredRMSProp") do desc = tf.NodeDescription("ResourceSparseApplyCenteredRMSProp") @@ -19008,7 +18406,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function interleave_dataset_graph(input_dataset_, other_arguments_, cycle_length_, block_length_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function interleave_dataset_graph(input_dataset_, other_arguments_, cycle_length_, block_length_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "InterleaveDataset") do desc = tf.NodeDescription("InterleaveDataset") @@ -19080,7 +18478,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function stack_pop_graph(handle_; name=nothing, elem_type=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function stack_pop_graph(handle_; name=nothing, elem_type=nothing) local desc tf.with_op_name(name, "StackPop") do desc = tf.NodeDescription("StackPop") @@ -19116,13 +18514,57 @@ begin end +""" + boosted_trees_deserialize_ensemble(tree_ensemble_handle, stamp_token, tree_ensemble_serialized) + + +""" +begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function boosted_trees_deserialize_ensemble_graph(tree_ensemble_handle_, stamp_token_, tree_ensemble_serialized_; name=nothing) + local desc + tf.with_op_name(name, "BoostedTreesDeserializeEnsemble") do + desc = tf.NodeDescription("BoostedTreesDeserializeEnsemble") + tree_ensemble_handle_ = convert(Tensor{Any}, tree_ensemble_handle_) + stamp_token_ = convert(Tensor{Int64}, stamp_token_) + tree_ensemble_serialized_ = convert(Tensor{String}, tree_ensemble_serialized_) + tf.add_input(desc, tree_ensemble_handle_) + tf.add_input(desc, stamp_token_) + tf.add_input(desc, tree_ensemble_serialized_) + end + tf.Tensor(tf.Operation(desc)) + end + function boosted_trees_deserialize_ensemble_eager(tree_ensemble_handle_, stamp_token_, tree_ensemble_serialized_; name=nothing) + desc = tf.EagerOp("BoostedTreesDeserializeEnsemble") + tree_ensemble_handle_ = convert(tf.EagerTensor, tree_ensemble_handle_) + stamp_token_ = convert(tf.EagerTensor, stamp_token_) + tree_ensemble_serialized_ = convert(tf.EagerTensor, tree_ensemble_serialized_) + tf.add_input(desc, tree_ensemble_handle_) + tf.add_input(desc, stamp_token_) + tf.add_input(desc, tree_ensemble_serialized_) + res = tf.execute(desc) + node = tf.TapeNode(boosted_trees_deserialize_ensemble, [tree_ensemble_handle_, stamp_token_, tree_ensemble_serialized_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + function boosted_trees_deserialize_ensemble(tree_ensemble_handle_, stamp_token_, tree_ensemble_serialized_; name=nothing) + if tf.in_eager_mode() + boosted_trees_deserialize_ensemble_eager(tree_ensemble_handle_, stamp_token_, tree_ensemble_serialized_; name=name) + else + boosted_trees_deserialize_ensemble_graph(tree_ensemble_handle_, stamp_token_, tree_ensemble_serialized_; name=name) + end + end +end + + """ max_pool_v2(input, ksize, strides; data_format=NHWC) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function max_pool_v2_graph(input_, ksize_, strides_; name=nothing, padding=nothing, data_format=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function max_pool_v2_graph(input_, ksize_, strides_; name=nothing, padding=nothing, data_format=nothing) local desc tf.with_op_name(name, "MaxPoolV2") do desc = tf.NodeDescription("MaxPoolV2") @@ -19174,57 +18616,13 @@ begin end -""" - boosted_trees_deserialize_ensemble(tree_ensemble_handle, stamp_token, tree_ensemble_serialized) - - -""" -begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function boosted_trees_deserialize_ensemble_graph(tree_ensemble_handle_, stamp_token_, tree_ensemble_serialized_; name=nothing) - local desc - tf.with_op_name(name, "BoostedTreesDeserializeEnsemble") do - desc = tf.NodeDescription("BoostedTreesDeserializeEnsemble") - tree_ensemble_handle_ = convert(Tensor{Any}, tree_ensemble_handle_) - stamp_token_ = convert(Tensor{Int64}, stamp_token_) - tree_ensemble_serialized_ = convert(Tensor{String}, tree_ensemble_serialized_) - tf.add_input(desc, tree_ensemble_handle_) - tf.add_input(desc, stamp_token_) - tf.add_input(desc, tree_ensemble_serialized_) - end - tf.Tensor(tf.Operation(desc)) - end - function boosted_trees_deserialize_ensemble_eager(tree_ensemble_handle_, stamp_token_, tree_ensemble_serialized_; name=nothing) - desc = tf.EagerOp("BoostedTreesDeserializeEnsemble") - tree_ensemble_handle_ = convert(tf.EagerTensor, tree_ensemble_handle_) - stamp_token_ = convert(tf.EagerTensor, stamp_token_) - tree_ensemble_serialized_ = convert(tf.EagerTensor, tree_ensemble_serialized_) - tf.add_input(desc, tree_ensemble_handle_) - tf.add_input(desc, stamp_token_) - tf.add_input(desc, tree_ensemble_serialized_) - res = tf.execute(desc) - node = tf.TapeNode(boosted_trees_deserialize_ensemble, [tree_ensemble_handle_, stamp_token_, tree_ensemble_serialized_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - function boosted_trees_deserialize_ensemble(tree_ensemble_handle_, stamp_token_, tree_ensemble_serialized_; name=nothing) - if tf.in_eager_mode() - boosted_trees_deserialize_ensemble_eager(tree_ensemble_handle_, stamp_token_, tree_ensemble_serialized_; name=name) - else - boosted_trees_deserialize_ensemble_graph(tree_ensemble_handle_, stamp_token_, tree_ensemble_serialized_; name=name) - end - end -end - - """ load_and_remap_matrix(ckpt_path, old_tensor_name, row_remapping, col_remapping, initializing_values; max_rows_in_memory=-1) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function load_and_remap_matrix_graph(ckpt_path_, old_tensor_name_, row_remapping_, col_remapping_, initializing_values_; name=nothing, num_rows=nothing, num_cols=nothing, max_rows_in_memory=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function load_and_remap_matrix_graph(ckpt_path_, old_tensor_name_, row_remapping_, col_remapping_, initializing_values_; name=nothing, num_rows=nothing, num_cols=nothing, max_rows_in_memory=nothing) local desc tf.with_op_name(name, "LoadAndRemapMatrix") do desc = tf.NodeDescription("LoadAndRemapMatrix") @@ -19294,7 +18692,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_apply_proximal_gradient_descent_graph(var_, alpha_, l1_, l2_, grad_, indices_; name=nothing, use_locking=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function sparse_apply_proximal_gradient_descent_graph(var_, alpha_, l1_, l2_, grad_, indices_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "SparseApplyProximalGradientDescent") do desc = tf.NodeDescription("SparseApplyProximalGradientDescent") @@ -19365,7 +18763,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function py_func_stateless_graph(input_; name=nothing, token=nothing, Tin=nothing, Tout=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function py_func_stateless_graph(input_; name=nothing, token=nothing, Tin=nothing, Tout=nothing) local desc tf.with_op_name(name, "PyFuncStateless") do desc = tf.NodeDescription("PyFuncStateless") @@ -19419,7 +18817,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function where_graph(input_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function where_graph(input_; name=nothing) local desc tf.with_op_name(name, "Where") do desc = tf.NodeDescription("Where") @@ -19457,7 +18855,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function mfcc_graph(spectrogram_, sample_rate_; name=nothing, upper_frequency_limit=nothing, lower_frequency_limit=nothing, filterbank_channel_count=nothing, dct_coefficient_count=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function mfcc_graph(spectrogram_, sample_rate_; name=nothing, upper_frequency_limit=nothing, lower_frequency_limit=nothing, filterbank_channel_count=nothing, dct_coefficient_count=nothing) local desc tf.with_op_name(name, "Mfcc") do desc = tf.NodeDescription("Mfcc") @@ -19521,7 +18919,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function check_numerics_graph(tensor_; name=nothing, message=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function check_numerics_graph(tensor_; name=nothing, message=nothing) local desc tf.with_op_name(name, "CheckNumerics") do desc = tf.NodeDescription("CheckNumerics") @@ -19565,7 +18963,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tpu_compilation_result_graph(; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tpu_compilation_result_graph(; name=nothing) local desc tf.with_op_name(name, "TPUCompilationResult") do desc @@ -19595,10 +18993,10 @@ end """ retrieve_tpu_embedding_stochastic_gradient_descent_parameters(; table_id=-1, table_name=) - +Retrieve embedding parameters for a single table. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function retrieve_tpu_embedding_stochastic_gradient_descent_parameters_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function retrieve_tpu_embedding_stochastic_gradient_descent_parameters_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "RetrieveTPUEmbeddingStochasticGradientDescentParameters") do desc = tf.NodeDescription("RetrieveTPUEmbeddingStochasticGradientDescentParameters") @@ -19654,7 +19052,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_segment_mean_grad_graph(grad_, indices_, segment_ids_, output_dim0_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function sparse_segment_mean_grad_graph(grad_, indices_, segment_ids_, output_dim0_; name=nothing) local desc tf.with_op_name(name, "SparseSegmentMeanGrad") do desc = tf.NodeDescription("SparseSegmentMeanGrad") @@ -19707,7 +19105,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function try_rpc_graph(address_, method_, request_; name=nothing, protocol=nothing, fail_fast=nothing, timeout_in_ms=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function try_rpc_graph(address_, method_, request_; name=nothing, protocol=nothing, fail_fast=nothing, timeout_in_ms=nothing) local desc tf.with_op_name(name, "TryRpc") do desc = tf.NodeDescription("TryRpc") @@ -19774,7 +19172,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_matrix_triangular_solve_graph(matrix_, rhs_; name=nothing, lower=nothing, adjoint=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function batch_matrix_triangular_solve_graph(matrix_, rhs_; name=nothing, lower=nothing, adjoint=nothing) local desc tf.with_op_name(name, "BatchMatrixTriangularSolve") do desc = tf.NodeDescription("BatchMatrixTriangularSolve") @@ -19829,7 +19227,7 @@ end A graph node which represents a return value of a function. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _retval_graph(input_; name=nothing, index=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function _retval_graph(input_; name=nothing, index=nothing) local desc tf.with_op_name(name, "_Retval") do desc = tf.NodeDescription("_Retval") @@ -19873,7 +19271,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function unique_with_counts_graph(x_; name=nothing, out_idx=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function unique_with_counts_graph(x_; name=nothing, out_idx=nothing) local desc tf.with_op_name(name, "UniqueWithCounts") do desc = tf.NodeDescription("UniqueWithCounts") @@ -19922,7 +19320,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function add_graph(x_, y_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function add_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "Add") do desc = tf.NodeDescription("Add") @@ -19965,7 +19363,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_scan_dataset_graph(input_dataset_, initial_state_, other_arguments_; name=nothing, f=nothing, Tstate=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, preserve_cardinality=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function experimental_scan_dataset_graph(input_dataset_, initial_state_, other_arguments_; name=nothing, f=nothing, Tstate=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, preserve_cardinality=nothing) local desc tf.with_op_name(name, "ExperimentalScanDataset") do desc = tf.NodeDescription("ExperimentalScanDataset") @@ -20045,7 +19443,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function assign_add_variable_op_graph(resource_, value_; name=nothing, dtype=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function assign_add_variable_op_graph(resource_, value_; name=nothing, dtype=nothing) local desc tf.with_op_name(name, "AssignAddVariableOp") do desc = tf.NodeDescription("AssignAddVariableOp") @@ -20093,7 +19491,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function split_v_graph(value_, size_splits_, split_dim_; name=nothing, num_split=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function split_v_graph(value_, size_splits_, split_dim_; name=nothing, num_split=nothing) local desc tf.with_op_name(name, "SplitV") do desc = tf.NodeDescription("SplitV") @@ -20153,7 +19551,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function assign_graph(ref_, value_; name=nothing, validate_shape=nothing, use_locking=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function assign_graph(ref_, value_; name=nothing, validate_shape=nothing, use_locking=nothing) local desc tf.with_op_name(name, "Assign") do desc = tf.NodeDescription("Assign") @@ -20208,7 +19606,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function max_pool_with_argmax_graph(input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function max_pool_with_argmax_graph(input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing) local desc tf.with_op_name(name, "MaxPoolWithArgmax") do desc = tf.NodeDescription("MaxPoolWithArgmax") @@ -20269,7 +19667,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantized_relu_x_graph(features_, max_value_, min_features_, max_features_; name=nothing, out_type=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function quantized_relu_x_graph(features_, max_value_, min_features_, max_features_; name=nothing, out_type=nothing) local desc tf.with_op_name(name, "QuantizedReluX") do desc = tf.NodeDescription("QuantizedReluX") @@ -20330,7 +19728,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function random_shuffle_queue_graph(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, min_after_dequeue=nothing, seed=nothing, seed2=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function random_shuffle_queue_graph(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, min_after_dequeue=nothing, seed=nothing, seed2=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "RandomShuffleQueue") do desc = tf.NodeDescription("RandomShuffleQueue") @@ -20410,7 +19808,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fft2d_graph(input_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function fft2d_graph(input_; name=nothing) local desc tf.with_op_name(name, "FFT2D") do desc = tf.NodeDescription("FFT2D") @@ -20448,7 +19846,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_thread_pool_dataset_graph(input_dataset_, thread_pool_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function experimental_thread_pool_dataset_graph(input_dataset_, thread_pool_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "ExperimentalThreadPoolDataset") do desc = tf.NodeDescription("ExperimentalThreadPoolDataset") @@ -20494,13 +19892,83 @@ begin end +""" + ordered_map_unstage(key, indices; capacity=0, memory_limit=0, container=, shared_name=) + + +""" +begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function ordered_map_unstage_graph(key_, indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "OrderedMapUnstage") do + desc = tf.NodeDescription("OrderedMapUnstage") + key_ = convert(Tensor{Int64}, key_) + indices_ = convert(Tensor{Int32}, indices_) + tf.add_input(desc, key_) + tf.add_input(desc, indices_) + if capacity !== nothing + desc["capacity"] = Base.Int(capacity) + end + if memory_limit !== nothing + desc["memory_limit"] = Base.Int(memory_limit) + end + if dtypes !== nothing + desc["dtypes"] = map(Base.identity, dtypes) + end + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + tf.Tensor(tf.Operation(desc)) + end + function ordered_map_unstage_eager(key_, indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + desc = tf.EagerOp("OrderedMapUnstage") + key_ = convert(tf.EagerTensor, key_) + indices_ = convert(tf.EagerTensor, indices_) + tf.add_input(desc, key_) + tf.add_input(desc, indices_) + if capacity !== nothing + desc["capacity"] = Base.Int(capacity) + end + if memory_limit !== nothing + desc["memory_limit"] = Base.Int(memory_limit) + end + if dtypes !== nothing + desc["dtypes"] = map(Base.identity, dtypes) + end + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + res = tf.execute(desc) + node = tf.TapeNode(ordered_map_unstage, [key_, indices_], name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + function ordered_map_unstage(key_, indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + if tf.in_eager_mode() + ordered_map_unstage_eager(key_, indices_; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) + else + ordered_map_unstage_graph(key_, indices_; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) + end + end +end + + """ experimental_directed_interleave_dataset(selector_input_dataset, data_input_datasets) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_directed_interleave_dataset_graph(selector_input_dataset_, data_input_datasets_; name=nothing, output_types=nothing, output_shapes=nothing, N=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function experimental_directed_interleave_dataset_graph(selector_input_dataset_, data_input_datasets_; name=nothing, output_types=nothing, output_shapes=nothing, N=nothing) local desc tf.with_op_name(name, "ExperimentalDirectedInterleaveDataset") do desc = tf.NodeDescription("ExperimentalDirectedInterleaveDataset") @@ -20552,66 +20020,13 @@ begin end -""" - sparse_segment_sqrt_n_grad(grad, indices, segment_ids, output_dim0) - - -""" -begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_segment_sqrt_n_grad_graph(grad_, indices_, segment_ids_, output_dim0_; name=nothing) - local desc - tf.with_op_name(name, "SparseSegmentSqrtNGrad") do - desc = tf.NodeDescription("SparseSegmentSqrtNGrad") - grad_ = convert(Tensor{Any}, grad_) - indices_ = convert(Tensor{Int32}, indices_) - indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) - segment_ids_ = convert(Tensor{Int32}, segment_ids_) - output_dim0_ = convert(Tensor{Int32}, output_dim0_) - (grad_,) = tf.tf_promote(grad_) - (indices_,) = tf.tf_promote(indices_) - tf.add_input(desc, grad_) - tf.add_input(desc, indices_) - tf.add_input(desc, segment_ids_) - tf.add_input(desc, output_dim0_) - end - tf.Tensor(tf.Operation(desc)) - end - function sparse_segment_sqrt_n_grad_eager(grad_, indices_, segment_ids_, output_dim0_; name=nothing) - desc = tf.EagerOp("SparseSegmentSqrtNGrad") - grad_ = convert(tf.EagerTensor, grad_) - indices_ = convert(tf.EagerTensor, indices_) - segment_ids_ = convert(tf.EagerTensor, segment_ids_) - output_dim0_ = convert(tf.EagerTensor, output_dim0_) - tf.add_input(desc, grad_) - tf.add_input(desc, indices_) - tf.add_input(desc, segment_ids_) - tf.add_input(desc, output_dim0_) - desc["T"] = tf.data_type(grad_) - desc["Tidx"] = tf.data_type(indices_) - res = tf.execute(desc) - node = tf.TapeNode(sparse_segment_sqrt_n_grad, [grad_, indices_, segment_ids_, output_dim0_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - function sparse_segment_sqrt_n_grad(grad_, indices_, segment_ids_, output_dim0_; name=nothing) - if tf.in_eager_mode() - sparse_segment_sqrt_n_grad_eager(grad_, indices_, segment_ids_, output_dim0_; name=name) - else - sparse_segment_sqrt_n_grad_graph(grad_, indices_, segment_ids_, output_dim0_; name=name) - end - end -end - - """ real(input) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function real_graph(input_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function real_graph(input_; name=nothing) local desc tf.with_op_name(name, "Real") do desc = tf.NodeDescription("Real") @@ -20644,70 +20059,53 @@ end """ - ordered_map_unstage(key, indices; capacity=0, memory_limit=0, container=, shared_name=) + sparse_segment_sqrt_n_grad(grad, indices, segment_ids, output_dim0) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ordered_map_unstage_graph(key_, indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function sparse_segment_sqrt_n_grad_graph(grad_, indices_, segment_ids_, output_dim0_; name=nothing) local desc - tf.with_op_name(name, "OrderedMapUnstage") do - desc = tf.NodeDescription("OrderedMapUnstage") - key_ = convert(Tensor{Int64}, key_) + tf.with_op_name(name, "SparseSegmentSqrtNGrad") do + desc = tf.NodeDescription("SparseSegmentSqrtNGrad") + grad_ = convert(Tensor{Any}, grad_) indices_ = convert(Tensor{Int32}, indices_) - tf.add_input(desc, key_) + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + segment_ids_ = convert(Tensor{Int32}, segment_ids_) + output_dim0_ = convert(Tensor{Int32}, output_dim0_) + (grad_,) = tf.tf_promote(grad_) + (indices_,) = tf.tf_promote(indices_) + tf.add_input(desc, grad_) tf.add_input(desc, indices_) - if capacity !== nothing - desc["capacity"] = Base.Int(capacity) - end - if memory_limit !== nothing - desc["memory_limit"] = Base.Int(memory_limit) - end - if dtypes !== nothing - desc["dtypes"] = map(Base.identity, dtypes) - end - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end + tf.add_input(desc, segment_ids_) + tf.add_input(desc, output_dim0_) end tf.Tensor(tf.Operation(desc)) end - function ordered_map_unstage_eager(key_, indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) - desc = tf.EagerOp("OrderedMapUnstage") - key_ = convert(tf.EagerTensor, key_) + function sparse_segment_sqrt_n_grad_eager(grad_, indices_, segment_ids_, output_dim0_; name=nothing) + desc = tf.EagerOp("SparseSegmentSqrtNGrad") + grad_ = convert(tf.EagerTensor, grad_) indices_ = convert(tf.EagerTensor, indices_) - tf.add_input(desc, key_) + segment_ids_ = convert(tf.EagerTensor, segment_ids_) + output_dim0_ = convert(tf.EagerTensor, output_dim0_) + tf.add_input(desc, grad_) tf.add_input(desc, indices_) - if capacity !== nothing - desc["capacity"] = Base.Int(capacity) - end - if memory_limit !== nothing - desc["memory_limit"] = Base.Int(memory_limit) - end - if dtypes !== nothing - desc["dtypes"] = map(Base.identity, dtypes) - end - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end + tf.add_input(desc, segment_ids_) + tf.add_input(desc, output_dim0_) + desc["T"] = tf.data_type(grad_) + desc["Tidx"] = tf.data_type(indices_) res = tf.execute(desc) - node = tf.TapeNode(ordered_map_unstage, [key_, indices_], name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing, res) + node = tf.TapeNode(sparse_segment_sqrt_n_grad, [grad_, indices_, segment_ids_, output_dim0_], name=nothing, res) if length(res) >= 1 tf.add_node(res[1], node) return res[1] end end - function ordered_map_unstage(key_, indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + function sparse_segment_sqrt_n_grad(grad_, indices_, segment_ids_, output_dim0_; name=nothing) if tf.in_eager_mode() - ordered_map_unstage_eager(key_, indices_; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) + sparse_segment_sqrt_n_grad_eager(grad_, indices_, segment_ids_, output_dim0_; name=name) else - ordered_map_unstage_graph(key_, indices_; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) + sparse_segment_sqrt_n_grad_graph(grad_, indices_, segment_ids_, output_dim0_; name=name) end end end @@ -20719,7 +20117,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function rfft2d_graph(input_, fft_length_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function rfft2d_graph(input_, fft_length_; name=nothing) local desc tf.with_op_name(name, "RFFT2D") do desc = tf.NodeDescription("RFFT2D") @@ -20759,7 +20157,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function var_is_initialized_op_graph(resource_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function var_is_initialized_op_graph(resource_; name=nothing) local desc tf.with_op_name(name, "VarIsInitializedOp") do desc = tf.NodeDescription("VarIsInitializedOp") @@ -20795,7 +20193,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function boosted_trees_quantile_stream_resource_handle_op_graph(; name=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function boosted_trees_quantile_stream_resource_handle_op_graph(; name=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "BoostedTreesQuantileStreamResourceHandleOp") do desc = tf.NodeDescription("BoostedTreesQuantileStreamResourceHandleOp") @@ -20839,7 +20237,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function atan2_graph(y_, x_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function atan2_graph(y_, x_; name=nothing) local desc tf.with_op_name(name, "Atan2") do desc = tf.NodeDescription("Atan2") @@ -20882,7 +20280,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function random_poisson_graph(shape_, rate_; name=nothing, seed=nothing, seed2=nothing, S=nothing, dtype=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function random_poisson_graph(shape_, rate_; name=nothing, seed=nothing, seed2=nothing, S=nothing, dtype=nothing) local desc tf.with_op_name(name, "RandomPoisson") do desc = tf.NodeDescription("RandomPoisson") @@ -20950,7 +20348,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function reverse_sequence_graph(input_, seq_lengths_; name=nothing, seq_dim=nothing, batch_dim=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function reverse_sequence_graph(input_, seq_lengths_; name=nothing, seq_dim=nothing, batch_dim=nothing) local desc tf.with_op_name(name, "ReverseSequence") do desc = tf.NodeDescription("ReverseSequence") @@ -21003,10 +20401,10 @@ end """ outfeed_enqueue(input) - +An op which emits a single Tensor value from an XLA computation. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function outfeed_enqueue_graph(input_; name=nothing, dtype=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function outfeed_enqueue_graph(input_; name=nothing, dtype=nothing) local desc tf.with_op_name(name, "OutfeedEnqueue") do desc = tf.NodeDescription("OutfeedEnqueue") @@ -21050,7 +20448,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sub_graph(x_, y_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function sub_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "Sub") do desc = tf.NodeDescription("Sub") @@ -21093,7 +20491,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function string_split_graph(input_, delimiter_; name=nothing, skip_empty=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function string_split_graph(input_, delimiter_; name=nothing, skip_empty=nothing) local desc tf.with_op_name(name, "StringSplit") do desc = tf.NodeDescription("StringSplit") @@ -21144,7 +20542,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function cumprod_graph(x_, axis_; name=nothing, exclusive=nothing, reverse=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function cumprod_graph(x_, axis_; name=nothing, exclusive=nothing, reverse=nothing) local desc tf.with_op_name(name, "Cumprod") do desc = tf.NodeDescription("Cumprod") @@ -21201,7 +20599,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantized_resize_bilinear_graph(images_, size_, min_, max_; name=nothing, align_corners=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function quantized_resize_bilinear_graph(images_, size_, min_, max_; name=nothing, align_corners=nothing) local desc tf.with_op_name(name, "QuantizedResizeBilinear") do desc = tf.NodeDescription("QuantizedResizeBilinear") @@ -21262,7 +20660,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function parse_single_example_graph(serialized_, dense_defaults_; name=nothing, num_sparse=nothing, sparse_keys=nothing, dense_keys=nothing, sparse_types=nothing, Tdense=nothing, dense_shapes=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function parse_single_example_graph(serialized_, dense_defaults_; name=nothing, num_sparse=nothing, sparse_keys=nothing, dense_keys=nothing, sparse_types=nothing, Tdense=nothing, dense_shapes=nothing) local desc tf.with_op_name(name, "ParseSingleExample") do desc = tf.NodeDescription("ParseSingleExample") @@ -21343,7 +20741,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function is_variable_initialized_graph(ref_; name=nothing, dtype=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function is_variable_initialized_graph(ref_; name=nothing, dtype=nothing) local desc tf.with_op_name(name, "IsVariableInitialized") do desc = tf.NodeDescription("IsVariableInitialized") @@ -21382,107 +20780,99 @@ end """ - experimental_stats_aggregator_handle(; container=, shared_name=) + resource_scatter_sub(resource, indices, updates) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_stats_aggregator_handle_graph(; name=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function resource_scatter_sub_graph(resource_, indices_, updates_; name=nothing, dtype=nothing) local desc - tf.with_op_name(name, "ExperimentalStatsAggregatorHandle") do - desc = tf.NodeDescription("ExperimentalStatsAggregatorHandle") - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) + tf.with_op_name(name, "ResourceScatterSub") do + desc = tf.NodeDescription("ResourceScatterSub") + resource_ = convert(Tensor{Any}, resource_) + indices_ = convert(Tensor{Any}, indices_) + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + updates_ = convert(Tensor{Any}, updates_) + (updates_,) = tf.tf_promote(updates_) + (indices_,) = tf.tf_promote(indices_) + tf.add_input(desc, resource_) + tf.add_input(desc, indices_) + tf.add_input(desc, updates_) + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) end end tf.Tensor(tf.Operation(desc)) end - function experimental_stats_aggregator_handle_eager(; name=nothing, container=nothing, shared_name=nothing) - desc = tf.EagerOp("ExperimentalStatsAggregatorHandle") - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) + function resource_scatter_sub_eager(resource_, indices_, updates_; name=nothing, dtype=nothing) + desc = tf.EagerOp("ResourceScatterSub") + resource_ = convert(tf.EagerTensor, resource_) + indices_ = convert(tf.EagerTensor, indices_) + updates_ = convert(tf.EagerTensor, updates_) + tf.add_input(desc, resource_) + tf.add_input(desc, indices_) + tf.add_input(desc, updates_) + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) end + desc["Tindices"] = tf.data_type(indices_) + desc["dtype"] = tf.data_type(updates_) res = tf.execute(desc) - node = tf.TapeNode(experimental_stats_aggregator_handle, [], name=nothing, container=nothing, shared_name=nothing, res) + node = tf.TapeNode(resource_scatter_sub, [resource_, indices_, updates_], name=nothing, dtype=nothing, res) if length(res) >= 1 tf.add_node(res[1], node) return res[1] end end - function experimental_stats_aggregator_handle(; name=nothing, container=nothing, shared_name=nothing) + function resource_scatter_sub(resource_, indices_, updates_; name=nothing, dtype=nothing) if tf.in_eager_mode() - experimental_stats_aggregator_handle_eager(; name=name, container=container, shared_name=shared_name) + resource_scatter_sub_eager(resource_, indices_, updates_; name=name, dtype=dtype) else - experimental_stats_aggregator_handle_graph(; name=name, container=container, shared_name=shared_name) + resource_scatter_sub_graph(resource_, indices_, updates_; name=name, dtype=dtype) end end end """ - tensor_list_concat_v2(input_handle, element_shape, leading_dims) + experimental_stats_aggregator_handle(; container=, shared_name=) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_list_concat_v2_graph(input_handle_, element_shape_, leading_dims_; name=nothing, element_dtype=nothing, shape_type=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function experimental_stats_aggregator_handle_graph(; name=nothing, container=nothing, shared_name=nothing) local desc - tf.with_op_name(name, "TensorListConcatV2") do - desc = tf.NodeDescription("TensorListConcatV2") - input_handle_ = convert(Tensor{Any}, input_handle_) - element_shape_ = convert(Tensor{Any}, element_shape_) - leading_dims_ = convert(Tensor{Int64}, leading_dims_) - (element_shape_,) = tf.tf_promote(element_shape_) - tf.add_input(desc, input_handle_) - tf.add_input(desc, element_shape_) - tf.add_input(desc, leading_dims_) - if element_dtype !== nothing - desc["element_dtype"] = Base.identity(element_dtype) + tf.with_op_name(name, "ExperimentalStatsAggregatorHandle") do + desc = tf.NodeDescription("ExperimentalStatsAggregatorHandle") + if container !== nothing + desc["container"] = Base.String(container) end - if shape_type !== nothing - desc["shape_type"] = Base.identity(shape_type) + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) end end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:2 - push!(out, tf.Tensor(op, out_idx)) - end - out + tf.Tensor(tf.Operation(desc)) end - function tensor_list_concat_v2_eager(input_handle_, element_shape_, leading_dims_; name=nothing, element_dtype=nothing, shape_type=nothing) - desc = tf.EagerOp("TensorListConcatV2") - input_handle_ = convert(tf.EagerTensor, input_handle_) - element_shape_ = convert(tf.EagerTensor, element_shape_) - leading_dims_ = convert(tf.EagerTensor, leading_dims_) - tf.add_input(desc, input_handle_) - tf.add_input(desc, element_shape_) - tf.add_input(desc, leading_dims_) - if element_dtype !== nothing - desc["element_dtype"] = Base.identity(element_dtype) + function experimental_stats_aggregator_handle_eager(; name=nothing, container=nothing, shared_name=nothing) + desc = tf.EagerOp("ExperimentalStatsAggregatorHandle") + if container !== nothing + desc["container"] = Base.String(container) end - if shape_type !== nothing - desc["shape_type"] = Base.identity(shape_type) + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) end - desc["shape_type"] = tf.data_type(element_shape_) res = tf.execute(desc) - node = tf.TapeNode(tensor_list_concat_v2, [input_handle_, element_shape_, leading_dims_], name=nothing, element_dtype=nothing, shape_type=nothing, res) + node = tf.TapeNode(experimental_stats_aggregator_handle, [], name=nothing, container=nothing, shared_name=nothing, res) if length(res) >= 1 tf.add_node(res[1], node) - return res + return res[1] end end - function tensor_list_concat_v2(input_handle_, element_shape_, leading_dims_; name=nothing, element_dtype=nothing, shape_type=nothing) + function experimental_stats_aggregator_handle(; name=nothing, container=nothing, shared_name=nothing) if tf.in_eager_mode() - tensor_list_concat_v2_eager(input_handle_, element_shape_, leading_dims_; name=name, element_dtype=element_dtype, shape_type=shape_type) + experimental_stats_aggregator_handle_eager(; name=name, container=container, shared_name=shared_name) else - tensor_list_concat_v2_graph(input_handle_, element_shape_, leading_dims_; name=name, element_dtype=element_dtype, shape_type=shape_type) + experimental_stats_aggregator_handle_graph(; name=name, container=container, shared_name=shared_name) end end end @@ -21494,7 +20884,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function cudnn_rnnv2_graph(input_, input_h_, input_c_, params_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing, is_training=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function cudnn_rnnv2_graph(input_, input_h_, input_c_, params_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing, is_training=nothing) local desc tf.with_op_name(name, "CudnnRNNV2") do desc = tf.NodeDescription("CudnnRNNV2") @@ -21588,68 +20978,13 @@ begin end -""" - resource_scatter_sub(resource, indices, updates) - - -""" -begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_scatter_sub_graph(resource_, indices_, updates_; name=nothing, dtype=nothing) - local desc - tf.with_op_name(name, "ResourceScatterSub") do - desc = tf.NodeDescription("ResourceScatterSub") - resource_ = convert(Tensor{Any}, resource_) - indices_ = convert(Tensor{Any}, indices_) - indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) - updates_ = convert(Tensor{Any}, updates_) - (updates_,) = tf.tf_promote(updates_) - (indices_,) = tf.tf_promote(indices_) - tf.add_input(desc, resource_) - tf.add_input(desc, indices_) - tf.add_input(desc, updates_) - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end - end - tf.Tensor(tf.Operation(desc)) - end - function resource_scatter_sub_eager(resource_, indices_, updates_; name=nothing, dtype=nothing) - desc = tf.EagerOp("ResourceScatterSub") - resource_ = convert(tf.EagerTensor, resource_) - indices_ = convert(tf.EagerTensor, indices_) - updates_ = convert(tf.EagerTensor, updates_) - tf.add_input(desc, resource_) - tf.add_input(desc, indices_) - tf.add_input(desc, updates_) - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end - desc["Tindices"] = tf.data_type(indices_) - desc["dtype"] = tf.data_type(updates_) - res = tf.execute(desc) - node = tf.TapeNode(resource_scatter_sub, [resource_, indices_, updates_], name=nothing, dtype=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - function resource_scatter_sub(resource_, indices_, updates_; name=nothing, dtype=nothing) - if tf.in_eager_mode() - resource_scatter_sub_eager(resource_, indices_, updates_; name=name, dtype=dtype) - else - resource_scatter_sub_graph(resource_, indices_, updates_; name=name, dtype=dtype) - end - end -end - - """ assign_add(ref, value; use_locking=false) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function assign_add_graph(ref_, value_; name=nothing, use_locking=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function assign_add_graph(ref_, value_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "AssignAdd") do desc = tf.NodeDescription("AssignAdd") @@ -21698,7 +21033,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_dataset_graph(components_; name=nothing, Toutput_types=nothing, output_shapes=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tensor_dataset_graph(components_; name=nothing, Toutput_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "TensorDataset") do desc = tf.NodeDescription("TensorDataset") @@ -21746,7 +21081,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function bucketize_graph(input_; name=nothing, boundaries=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function bucketize_graph(input_; name=nothing, boundaries=nothing) local desc tf.with_op_name(name, "Bucketize") do desc = tf.NodeDescription("Bucketize") @@ -21790,7 +21125,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_reduce_max_graph(input_indices_, input_values_, input_shape_, reduction_axes_; name=nothing, keep_dims=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function sparse_reduce_max_graph(input_indices_, input_values_, input_shape_, reduction_axes_; name=nothing, keep_dims=nothing) local desc tf.with_op_name(name, "SparseReduceMax") do desc = tf.NodeDescription("SparseReduceMax") @@ -21841,12 +21176,67 @@ end """ - retrieve_tpu_embedding_mdl_adagrad_light_parameters(; table_id=-1, table_name=) + tensor_array_grad_with_shape(handle, flow_in, shape_to_prepend) + + +""" +begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tensor_array_grad_with_shape_graph(handle_, flow_in_, shape_to_prepend_; name=nothing, source=nothing) + local desc + tf.with_op_name(name, "TensorArrayGradWithShape") do + desc = tf.NodeDescription("TensorArrayGradWithShape") + handle_ = convert(Tensor{Any}, handle_) + flow_in_ = convert(Tensor{Float32}, flow_in_) + shape_to_prepend_ = convert(Tensor{Int32}, shape_to_prepend_) + tf.add_input(desc, handle_) + tf.add_input(desc, flow_in_) + tf.add_input(desc, shape_to_prepend_) + if source !== nothing + desc["source"] = Base.String(source) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function tensor_array_grad_with_shape_eager(handle_, flow_in_, shape_to_prepend_; name=nothing, source=nothing) + desc = tf.EagerOp("TensorArrayGradWithShape") + handle_ = convert(tf.EagerTensor, handle_) + flow_in_ = convert(tf.EagerTensor, flow_in_) + shape_to_prepend_ = convert(tf.EagerTensor, shape_to_prepend_) + tf.add_input(desc, handle_) + tf.add_input(desc, flow_in_) + tf.add_input(desc, shape_to_prepend_) + if source !== nothing + desc["source"] = Base.String(source) + end + res = tf.execute(desc) + node = tf.TapeNode(tensor_array_grad_with_shape, [handle_, flow_in_, shape_to_prepend_], name=nothing, source=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end + end + function tensor_array_grad_with_shape(handle_, flow_in_, shape_to_prepend_; name=nothing, source=nothing) + if tf.in_eager_mode() + tensor_array_grad_with_shape_eager(handle_, flow_in_, shape_to_prepend_; name=name, source=source) + else + tensor_array_grad_with_shape_graph(handle_, flow_in_, shape_to_prepend_; name=name, source=source) + end + end +end + +""" + retrieve_tpu_embedding_mdl_adagrad_light_parameters(; table_id=-1, table_name=) +Retrieve embedding parameters for a single table. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function retrieve_tpu_embedding_mdl_adagrad_light_parameters_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function retrieve_tpu_embedding_mdl_adagrad_light_parameters_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "RetrieveTPUEmbeddingMDLAdagradLightParameters") do desc = tf.NodeDescription("RetrieveTPUEmbeddingMDLAdagradLightParameters") @@ -21901,68 +21291,13 @@ begin end -""" - tensor_array_grad_with_shape(handle, flow_in, shape_to_prepend) - - -""" -begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_grad_with_shape_graph(handle_, flow_in_, shape_to_prepend_; name=nothing, source=nothing) - local desc - tf.with_op_name(name, "TensorArrayGradWithShape") do - desc = tf.NodeDescription("TensorArrayGradWithShape") - handle_ = convert(Tensor{Any}, handle_) - flow_in_ = convert(Tensor{Float32}, flow_in_) - shape_to_prepend_ = convert(Tensor{Int32}, shape_to_prepend_) - tf.add_input(desc, handle_) - tf.add_input(desc, flow_in_) - tf.add_input(desc, shape_to_prepend_) - if source !== nothing - desc["source"] = Base.String(source) - end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:2 - push!(out, tf.Tensor(op, out_idx)) - end - out - end - function tensor_array_grad_with_shape_eager(handle_, flow_in_, shape_to_prepend_; name=nothing, source=nothing) - desc = tf.EagerOp("TensorArrayGradWithShape") - handle_ = convert(tf.EagerTensor, handle_) - flow_in_ = convert(tf.EagerTensor, flow_in_) - shape_to_prepend_ = convert(tf.EagerTensor, shape_to_prepend_) - tf.add_input(desc, handle_) - tf.add_input(desc, flow_in_) - tf.add_input(desc, shape_to_prepend_) - if source !== nothing - desc["source"] = Base.String(source) - end - res = tf.execute(desc) - node = tf.TapeNode(tensor_array_grad_with_shape, [handle_, flow_in_, shape_to_prepend_], name=nothing, source=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end - end - function tensor_array_grad_with_shape(handle_, flow_in_, shape_to_prepend_; name=nothing, source=nothing) - if tf.in_eager_mode() - tensor_array_grad_with_shape_eager(handle_, flow_in_, shape_to_prepend_; name=name, source=source) - else - tensor_array_grad_with_shape_graph(handle_, flow_in_, shape_to_prepend_; name=name, source=source) - end - end -end - - """ tensor_array_close_v3(handle) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_close_v3_graph(handle_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tensor_array_close_v3_graph(handle_; name=nothing) local desc tf.with_op_name(name, "TensorArrayCloseV3") do desc = tf.NodeDescription("TensorArrayCloseV3") @@ -21998,7 +21333,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function non_max_suppression_with_overlaps_graph(overlaps_, scores_, max_output_size_, overlap_threshold_, score_threshold_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function non_max_suppression_with_overlaps_graph(overlaps_, scores_, max_output_size_, overlap_threshold_, score_threshold_; name=nothing) local desc tf.with_op_name(name, "NonMaxSuppressionWithOverlaps") do desc = tf.NodeDescription("NonMaxSuppressionWithOverlaps") @@ -22050,7 +21385,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function pack_graph(values_; name=nothing, N=nothing, axis=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function pack_graph(values_; name=nothing, N=nothing, axis=nothing) local desc tf.with_op_name(name, "Pack") do desc = tf.NodeDescription("Pack") @@ -22106,7 +21441,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_grad_v2_graph(handle_, flow_in_; name=nothing, source=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tensor_array_grad_v2_graph(handle_, flow_in_; name=nothing, source=nothing) local desc tf.with_op_name(name, "TensorArrayGradV2") do desc = tf.NodeDescription("TensorArrayGradV2") @@ -22152,7 +21487,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function assign_sub_variable_op_graph(resource_, value_; name=nothing, dtype=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function assign_sub_variable_op_graph(resource_, value_; name=nothing, dtype=nothing) local desc tf.with_op_name(name, "AssignSubVariableOp") do desc = tf.NodeDescription("AssignSubVariableOp") @@ -22200,7 +21535,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_fft2d_graph(input_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function batch_fft2d_graph(input_; name=nothing) local desc tf.with_op_name(name, "BatchFFT2D") do desc = tf.NodeDescription("BatchFFT2D") @@ -22236,7 +21571,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function close_summary_writer_graph(writer_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function close_summary_writer_graph(writer_; name=nothing) local desc tf.with_op_name(name, "CloseSummaryWriter") do desc = tf.NodeDescription("CloseSummaryWriter") @@ -22272,7 +21607,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function rank_graph(input_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function rank_graph(input_; name=nothing) local desc tf.with_op_name(name, "Rank") do desc = tf.NodeDescription("Rank") @@ -22310,7 +21645,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fft3d_graph(input_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function fft3d_graph(input_; name=nothing) local desc tf.with_op_name(name, "FFT3D") do desc = tf.NodeDescription("FFT3D") @@ -22348,7 +21683,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function apply_ftrl_graph(var_, accum_, linear_, grad_, lr_, l1_, l2_, lr_power_; name=nothing, use_locking=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function apply_ftrl_graph(var_, accum_, linear_, grad_, lr_, l1_, l2_, lr_power_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ApplyFtrl") do desc = tf.NodeDescription("ApplyFtrl") @@ -22427,7 +21762,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function abort_graph(; name=nothing, error_msg=nothing, exit_without_error=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function abort_graph(; name=nothing, error_msg=nothing, exit_without_error=nothing) local desc tf.with_op_name(name, "Abort") do desc = tf.NodeDescription("Abort") @@ -22471,7 +21806,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function audio_spectrogram_graph(input_; name=nothing, window_size=nothing, stride=nothing, magnitude_squared=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function audio_spectrogram_graph(input_; name=nothing, window_size=nothing, stride=nothing, magnitude_squared=nothing) local desc tf.with_op_name(name, "AudioSpectrogram") do desc = tf.NodeDescription("AudioSpectrogram") @@ -22525,7 +21860,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function variable_shape_graph(input_; name=nothing, out_type=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function variable_shape_graph(input_; name=nothing, out_type=nothing) local desc tf.with_op_name(name, "VariableShape") do desc = tf.NodeDescription("VariableShape") @@ -22567,7 +21902,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fifo_queue_v2_graph(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function fifo_queue_v2_graph(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "FIFOQueueV2") do desc = tf.NodeDescription("FIFOQueueV2") @@ -22629,7 +21964,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function variable_graph(; name=nothing, shape=nothing, dtype=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function variable_graph(; name=nothing, shape=nothing, dtype=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "Variable") do desc = tf.NodeDescription("Variable") @@ -22685,7 +22020,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_forest_create_tree_variable_graph(tree_handle_, tree_config_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tensor_forest_create_tree_variable_graph(tree_handle_, tree_config_; name=nothing) local desc tf.with_op_name(name, "TensorForestCreateTreeVariable") do desc = tf.NodeDescription("TensorForestCreateTreeVariable") @@ -22725,7 +22060,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function max_pool_grad_with_argmax_graph(input_, grad_, argmax_; name=nothing, ksize=nothing, strides=nothing, padding=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function max_pool_grad_with_argmax_graph(input_, grad_, argmax_; name=nothing, ksize=nothing, strides=nothing, padding=nothing) local desc tf.with_op_name(name, "MaxPoolGradWithArgmax") do desc = tf.NodeDescription("MaxPoolGradWithArgmax") @@ -22792,7 +22127,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ref_switch_graph(data_, pred_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function ref_switch_graph(data_, pred_; name=nothing) local desc tf.with_op_name(name, "RefSwitch") do desc = tf.NodeDescription("RefSwitch") @@ -22839,7 +22174,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sdca_fprint_graph(input_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function sdca_fprint_graph(input_; name=nothing) local desc tf.with_op_name(name, "SdcaFprint") do desc = tf.NodeDescription("SdcaFprint") @@ -22869,73 +22204,13 @@ begin end -""" - experimental_choose_fastest_dataset(input_datasets) - - -""" -begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_choose_fastest_dataset_graph(input_datasets_; name=nothing, N=nothing, num_experiments=nothing, output_types=nothing, output_shapes=nothing) - local desc - tf.with_op_name(name, "ExperimentalChooseFastestDataset") do - desc = tf.NodeDescription("ExperimentalChooseFastestDataset") - input_datasets_ = [convert(Tensor{Any}, x) for x = input_datasets_] - tf.add_input(desc, input_datasets_) - if N !== nothing - desc["N"] = Base.Int(N) - end - if num_experiments !== nothing - desc["num_experiments"] = Base.Int(num_experiments) - end - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - end - tf.Tensor(tf.Operation(desc)) - end - function experimental_choose_fastest_dataset_eager(input_datasets_; name=nothing, N=nothing, num_experiments=nothing, output_types=nothing, output_shapes=nothing) - desc = tf.EagerOp("ExperimentalChooseFastestDataset") - input_datasets_ = convert(tf.EagerTensor, input_datasets_) - tf.add_input(desc, input_datasets_) - if N !== nothing - desc["N"] = Base.Int(N) - end - if num_experiments !== nothing - desc["num_experiments"] = Base.Int(num_experiments) - end - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - res = tf.execute(desc) - node = tf.TapeNode(experimental_choose_fastest_dataset, [input_datasets_], name=nothing, N=nothing, num_experiments=nothing, output_types=nothing, output_shapes=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - function experimental_choose_fastest_dataset(input_datasets_; name=nothing, N=nothing, num_experiments=nothing, output_types=nothing, output_shapes=nothing) - if tf.in_eager_mode() - experimental_choose_fastest_dataset_eager(input_datasets_; name=name, N=N, num_experiments=num_experiments, output_types=output_types, output_shapes=output_shapes) - else - experimental_choose_fastest_dataset_graph(input_datasets_; name=name, N=N, num_experiments=num_experiments, output_types=output_types, output_shapes=output_shapes) - end - end -end - - """ leaky_relu(features; alpha=?) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function leaky_relu_graph(features_; name=nothing, alpha=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function leaky_relu_graph(features_; name=nothing, alpha=nothing) local desc tf.with_op_name(name, "LeakyRelu") do desc = tf.NodeDescription("LeakyRelu") @@ -22979,7 +22254,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function identity_n_graph(input_; name=nothing, T=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function identity_n_graph(input_; name=nothing, T=nothing) local desc tf.with_op_name(name, "IdentityN") do desc = tf.NodeDescription("IdentityN") @@ -23021,7 +22296,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function cudnn_rnn_backprop_v2_graph(input_, input_h_, input_c_, params_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_, host_reserved_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function cudnn_rnn_backprop_v2_graph(input_, input_h_, input_c_, params_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_, host_reserved_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) local desc tf.with_op_name(name, "CudnnRNNBackpropV2") do desc = tf.NodeDescription("CudnnRNNBackpropV2") @@ -23154,7 +22429,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function requantization_range_graph(input_, input_min_, input_max_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function requantization_range_graph(input_, input_min_, input_max_; name=nothing) local desc tf.with_op_name(name, "RequantizationRange") do desc = tf.NodeDescription("RequantizationRange") @@ -23205,7 +22480,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function maximum_graph(x_, y_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function maximum_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "Maximum") do desc = tf.NodeDescription("Maximum") @@ -23248,7 +22523,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function reshape_graph(tensor_, shape_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function reshape_graph(tensor_, shape_; name=nothing) local desc tf.with_op_name(name, "Reshape") do desc = tf.NodeDescription("Reshape") @@ -23292,7 +22567,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function matrix_solve_ls_graph(matrix_, rhs_, l2_regularizer_; name=nothing, fast=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function matrix_solve_ls_graph(matrix_, rhs_, l2_regularizer_; name=nothing, fast=nothing) local desc tf.with_op_name(name, "MatrixSolveLs") do desc = tf.NodeDescription("MatrixSolveLs") @@ -23345,7 +22620,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tf_record_dataset_graph(filenames_, compression_type_, buffer_size_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tf_record_dataset_graph(filenames_, compression_type_, buffer_size_; name=nothing) local desc tf.with_op_name(name, "TFRecordDataset") do desc = tf.NodeDescription("TFRecordDataset") @@ -23389,7 +22664,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function boosted_trees_example_debug_outputs_graph(tree_ensemble_handle_, bucketized_features_; name=nothing, num_bucketized_features=nothing, logits_dimension=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function boosted_trees_example_debug_outputs_graph(tree_ensemble_handle_, bucketized_features_; name=nothing, num_bucketized_features=nothing, logits_dimension=nothing) local desc tf.with_op_name(name, "BoostedTreesExampleDebugOutputs") do desc = tf.NodeDescription("BoostedTreesExampleDebugOutputs") @@ -23435,51 +22710,13 @@ begin end -""" - hsv_to_rgb(images) - - -""" -begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function hsv_to_rgb_graph(images_; name=nothing) - local desc - tf.with_op_name(name, "HSVToRGB") do - desc = tf.NodeDescription("HSVToRGB") - images_ = convert(Tensor{Float32}, images_) - (images_,) = tf.tf_promote(images_) - tf.add_input(desc, images_) - end - tf.Tensor(tf.Operation(desc)) - end - function hsv_to_rgb_eager(images_; name=nothing) - desc = tf.EagerOp("HSVToRGB") - images_ = convert(tf.EagerTensor, images_) - tf.add_input(desc, images_) - desc["T"] = tf.data_type(images_) - res = tf.execute(desc) - node = tf.TapeNode(hsv_to_rgb, [images_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - function hsv_to_rgb(images_; name=nothing) - if tf.in_eager_mode() - hsv_to_rgb_eager(images_; name=name) - else - hsv_to_rgb_graph(images_; name=name) - end - end -end - - """ experimental_max_intra_op_parallelism_dataset(input_dataset, max_intra_op_parallelism) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_max_intra_op_parallelism_dataset_graph(input_dataset_, max_intra_op_parallelism_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function experimental_max_intra_op_parallelism_dataset_graph(input_dataset_, max_intra_op_parallelism_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "ExperimentalMaxIntraOpParallelismDataset") do desc = tf.NodeDescription("ExperimentalMaxIntraOpParallelismDataset") @@ -23525,13 +22762,51 @@ begin end +""" + hsv_to_rgb(images) + + +""" +begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function hsv_to_rgb_graph(images_; name=nothing) + local desc + tf.with_op_name(name, "HSVToRGB") do + desc = tf.NodeDescription("HSVToRGB") + images_ = convert(Tensor{Float32}, images_) + (images_,) = tf.tf_promote(images_) + tf.add_input(desc, images_) + end + tf.Tensor(tf.Operation(desc)) + end + function hsv_to_rgb_eager(images_; name=nothing) + desc = tf.EagerOp("HSVToRGB") + images_ = convert(tf.EagerTensor, images_) + tf.add_input(desc, images_) + desc["T"] = tf.data_type(images_) + res = tf.execute(desc) + node = tf.TapeNode(hsv_to_rgb, [images_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + function hsv_to_rgb(images_; name=nothing) + if tf.in_eager_mode() + hsv_to_rgb_eager(images_; name=name) + else + hsv_to_rgb_graph(images_; name=name) + end + end +end + + """ scatter_div(ref, indices, updates; use_locking=false) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function scatter_div_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function scatter_div_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ScatterDiv") do desc = tf.NodeDescription("ScatterDiv") @@ -23587,7 +22862,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function decode_wav_graph(contents_; name=nothing, desired_channels=nothing, desired_samples=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function decode_wav_graph(contents_; name=nothing, desired_channels=nothing, desired_samples=nothing) local desc tf.with_op_name(name, "DecodeWav") do desc = tf.NodeDescription("DecodeWav") @@ -23640,7 +22915,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function log_graph(x_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function log_graph(x_; name=nothing) local desc tf.with_op_name(name, "Log") do desc = tf.NodeDescription("Log") @@ -23678,7 +22953,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function save_v2_graph(prefix_, tensor_names_, shape_and_slices_, tensors_; name=nothing, dtypes=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function save_v2_graph(prefix_, tensor_names_, shape_and_slices_, tensors_; name=nothing, dtypes=nothing) local desc tf.with_op_name(name, "SaveV2") do desc = tf.NodeDescription("SaveV2") @@ -23732,7 +23007,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function deep_copy_graph(x_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function deep_copy_graph(x_; name=nothing) local desc tf.with_op_name(name, "DeepCopy") do desc = tf.NodeDescription("DeepCopy") @@ -23770,7 +23045,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function model_dataset_graph(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function model_dataset_graph(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "ModelDataset") do desc = tf.NodeDescription("ModelDataset") @@ -23818,7 +23093,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function parse_sequence_example_graph(serialized_, debug_name_, context_dense_defaults_; name=nothing, feature_list_dense_missing_assumed_empty=nothing, context_sparse_keys=nothing, context_dense_keys=nothing, feature_list_sparse_keys=nothing, feature_list_dense_keys=nothing, Ncontext_sparse=nothing, Ncontext_dense=nothing, Nfeature_list_sparse=nothing, Nfeature_list_dense=nothing, context_sparse_types=nothing, Tcontext_dense=nothing, feature_list_dense_types=nothing, context_dense_shapes=nothing, feature_list_sparse_types=nothing, feature_list_dense_shapes=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function parse_sequence_example_graph(serialized_, debug_name_, context_dense_defaults_; name=nothing, feature_list_dense_missing_assumed_empty=nothing, context_sparse_keys=nothing, context_dense_keys=nothing, feature_list_sparse_keys=nothing, feature_list_dense_keys=nothing, Ncontext_sparse=nothing, Ncontext_dense=nothing, Nfeature_list_sparse=nothing, Nfeature_list_dense=nothing, context_sparse_types=nothing, Tcontext_dense=nothing, feature_list_dense_types=nothing, context_dense_shapes=nothing, feature_list_sparse_types=nothing, feature_list_dense_shapes=nothing) local desc tf.with_op_name(name, "ParseSequenceExample") do desc = tf.NodeDescription("ParseSequenceExample") @@ -23957,7 +23232,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sinh_graph(x_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function sinh_graph(x_; name=nothing) local desc tf.with_op_name(name, "Sinh") do desc = tf.NodeDescription("Sinh") @@ -23995,7 +23270,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function iterator_v2_graph(; name=nothing, shared_name=nothing, container=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function iterator_v2_graph(; name=nothing, shared_name=nothing, container=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "IteratorV2") do desc = tf.NodeDescription("IteratorV2") @@ -24051,7 +23326,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_write_v2_graph(handle_, index_, value_, flow_in_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tensor_array_write_v2_graph(handle_, index_, value_, flow_in_; name=nothing) local desc tf.with_op_name(name, "TensorArrayWriteV2") do desc = tf.NodeDescription("TensorArrayWriteV2") @@ -24101,7 +23376,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_list_element_shape_graph(input_handle_; name=nothing, shape_type=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tensor_list_element_shape_graph(input_handle_; name=nothing, shape_type=nothing) local desc tf.with_op_name(name, "TensorListElementShape") do desc = tf.NodeDescription("TensorListElementShape") @@ -24143,7 +23418,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function queue_size_v2_graph(handle_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function queue_size_v2_graph(handle_; name=nothing) local desc tf.with_op_name(name, "QueueSizeV2") do desc = tf.NodeDescription("QueueSizeV2") @@ -24179,7 +23454,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function expm1_graph(x_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function expm1_graph(x_; name=nothing) local desc tf.with_op_name(name, "Expm1") do desc = tf.NodeDescription("Expm1") @@ -24217,7 +23492,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_matrix_band_part_graph(input_, num_lower_, num_upper_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function batch_matrix_band_part_graph(input_, num_lower_, num_upper_; name=nothing) local desc tf.with_op_name(name, "BatchMatrixBandPart") do desc = tf.NodeDescription("BatchMatrixBandPart") @@ -24263,7 +23538,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function concatenate_dataset_graph(input_dataset_, another_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function concatenate_dataset_graph(input_dataset_, another_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "ConcatenateDataset") do desc = tf.NodeDescription("ConcatenateDataset") @@ -24315,7 +23590,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function decode_gif_graph(contents_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function decode_gif_graph(contents_; name=nothing) local desc tf.with_op_name(name, "DecodeGif") do desc = tf.NodeDescription("DecodeGif") @@ -24346,12 +23621,12 @@ end """ - tpu_replicate(inputs, broadcast_inputs, variables, guaranteed_constants; num_cores_per_replica=1, topology=, use_tpu=true, device_assignment=Int64[], host_compute_core=Int64[], padding_map=Int64[], step_marker_location=STEP_MARK_AT_ENTRY) - + tpu_replicate(inputs, broadcast_inputs, variables, guaranteed_constants; num_cores_per_replica=1, topology=, use_tpu=true, device_assignment=Int64[], host_compute_core=Int64[]) +Runs replicated computations on a distributed TPU system. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tpu_replicate_graph(inputs_, broadcast_inputs_, variables_, guaranteed_constants_; name=nothing, computation=nothing, num_replicas=nothing, num_cores_per_replica=nothing, topology=nothing, use_tpu=nothing, device_assignment=nothing, host_compute_core=nothing, Tinputs=nothing, Tbroadcast_inputs=nothing, NumVariables=nothing, Tguaranteed_constants=nothing, output_types=nothing, padding_map=nothing, step_marker_location=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tpu_replicate_graph(inputs_, broadcast_inputs_, variables_, guaranteed_constants_; name=nothing, computation=nothing, num_replicas=nothing, num_cores_per_replica=nothing, topology=nothing, use_tpu=nothing, device_assignment=nothing, host_compute_core=nothing, Tinputs=nothing, Tbroadcast_inputs=nothing, NumVariables=nothing, Tguaranteed_constants=nothing, output_types=nothing) local desc tf.with_op_name(name, "TPUReplicate") do desc = tf.NodeDescription("TPUReplicate") @@ -24399,16 +23674,10 @@ begin if output_types !== nothing desc["output_types"] = map(Base.identity, output_types) end - if padding_map !== nothing - desc["padding_map"] = map(Base.identity, padding_map) - end - if step_marker_location !== nothing - desc["step_marker_location"] = Base.String(step_marker_location) - end end tf.Tensor(tf.Operation(desc)) end - function tpu_replicate_eager(inputs_, broadcast_inputs_, variables_, guaranteed_constants_; name=nothing, computation=nothing, num_replicas=nothing, num_cores_per_replica=nothing, topology=nothing, use_tpu=nothing, device_assignment=nothing, host_compute_core=nothing, Tinputs=nothing, Tbroadcast_inputs=nothing, NumVariables=nothing, Tguaranteed_constants=nothing, output_types=nothing, padding_map=nothing, step_marker_location=nothing) + function tpu_replicate_eager(inputs_, broadcast_inputs_, variables_, guaranteed_constants_; name=nothing, computation=nothing, num_replicas=nothing, num_cores_per_replica=nothing, topology=nothing, use_tpu=nothing, device_assignment=nothing, host_compute_core=nothing, Tinputs=nothing, Tbroadcast_inputs=nothing, NumVariables=nothing, Tguaranteed_constants=nothing, output_types=nothing) desc = tf.EagerOp("TPUReplicate") inputs_ = convert(tf.EagerTensor, inputs_) broadcast_inputs_ = convert(tf.EagerTensor, broadcast_inputs_) @@ -24454,24 +23723,18 @@ begin if output_types !== nothing desc["output_types"] = map(Base.identity, output_types) end - if padding_map !== nothing - desc["padding_map"] = map(Base.identity, padding_map) - end - if step_marker_location !== nothing - desc["step_marker_location"] = Base.String(step_marker_location) - end res = tf.execute(desc) - node = tf.TapeNode(tpu_replicate, [inputs_, broadcast_inputs_, variables_, guaranteed_constants_], name=nothing, computation=nothing, num_replicas=nothing, num_cores_per_replica=nothing, topology=nothing, use_tpu=nothing, device_assignment=nothing, host_compute_core=nothing, Tinputs=nothing, Tbroadcast_inputs=nothing, NumVariables=nothing, Tguaranteed_constants=nothing, output_types=nothing, padding_map=nothing, step_marker_location=nothing, res) + node = tf.TapeNode(tpu_replicate, [inputs_, broadcast_inputs_, variables_, guaranteed_constants_], name=nothing, computation=nothing, num_replicas=nothing, num_cores_per_replica=nothing, topology=nothing, use_tpu=nothing, device_assignment=nothing, host_compute_core=nothing, Tinputs=nothing, Tbroadcast_inputs=nothing, NumVariables=nothing, Tguaranteed_constants=nothing, output_types=nothing, res) if length(res) >= 1 tf.add_node(res[1], node) return res[1] end end - function tpu_replicate(inputs_, broadcast_inputs_, variables_, guaranteed_constants_; name=nothing, computation=nothing, num_replicas=nothing, num_cores_per_replica=nothing, topology=nothing, use_tpu=nothing, device_assignment=nothing, host_compute_core=nothing, Tinputs=nothing, Tbroadcast_inputs=nothing, NumVariables=nothing, Tguaranteed_constants=nothing, output_types=nothing, padding_map=nothing, step_marker_location=nothing) + function tpu_replicate(inputs_, broadcast_inputs_, variables_, guaranteed_constants_; name=nothing, computation=nothing, num_replicas=nothing, num_cores_per_replica=nothing, topology=nothing, use_tpu=nothing, device_assignment=nothing, host_compute_core=nothing, Tinputs=nothing, Tbroadcast_inputs=nothing, NumVariables=nothing, Tguaranteed_constants=nothing, output_types=nothing) if tf.in_eager_mode() - tpu_replicate_eager(inputs_, broadcast_inputs_, variables_, guaranteed_constants_; name=name, computation=computation, num_replicas=num_replicas, num_cores_per_replica=num_cores_per_replica, topology=topology, use_tpu=use_tpu, device_assignment=device_assignment, host_compute_core=host_compute_core, Tinputs=Tinputs, Tbroadcast_inputs=Tbroadcast_inputs, NumVariables=NumVariables, Tguaranteed_constants=Tguaranteed_constants, output_types=output_types, padding_map=padding_map, step_marker_location=step_marker_location) + tpu_replicate_eager(inputs_, broadcast_inputs_, variables_, guaranteed_constants_; name=name, computation=computation, num_replicas=num_replicas, num_cores_per_replica=num_cores_per_replica, topology=topology, use_tpu=use_tpu, device_assignment=device_assignment, host_compute_core=host_compute_core, Tinputs=Tinputs, Tbroadcast_inputs=Tbroadcast_inputs, NumVariables=NumVariables, Tguaranteed_constants=Tguaranteed_constants, output_types=output_types) else - tpu_replicate_graph(inputs_, broadcast_inputs_, variables_, guaranteed_constants_; name=name, computation=computation, num_replicas=num_replicas, num_cores_per_replica=num_cores_per_replica, topology=topology, use_tpu=use_tpu, device_assignment=device_assignment, host_compute_core=host_compute_core, Tinputs=Tinputs, Tbroadcast_inputs=Tbroadcast_inputs, NumVariables=NumVariables, Tguaranteed_constants=Tguaranteed_constants, output_types=output_types, padding_map=padding_map, step_marker_location=step_marker_location) + tpu_replicate_graph(inputs_, broadcast_inputs_, variables_, guaranteed_constants_; name=name, computation=computation, num_replicas=num_replicas, num_cores_per_replica=num_cores_per_replica, topology=topology, use_tpu=use_tpu, device_assignment=device_assignment, host_compute_core=host_compute_core, Tinputs=Tinputs, Tbroadcast_inputs=Tbroadcast_inputs, NumVariables=NumVariables, Tguaranteed_constants=Tguaranteed_constants, output_types=output_types) end end end @@ -24483,7 +23746,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_self_adjoint_eig_v2_graph(input_; name=nothing, compute_v=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function batch_self_adjoint_eig_v2_graph(input_; name=nothing, compute_v=nothing) local desc tf.with_op_name(name, "BatchSelfAdjointEigV2") do desc = tf.NodeDescription("BatchSelfAdjointEigV2") @@ -24532,7 +23795,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function shape_graph(input_; name=nothing, out_type=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function shape_graph(input_; name=nothing, out_type=nothing) local desc tf.with_op_name(name, "Shape") do desc = tf.NodeDescription("Shape") @@ -24576,7 +23839,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function repeat_dataset_graph(input_dataset_, count_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function repeat_dataset_graph(input_dataset_, count_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "RepeatDataset") do desc = tf.NodeDescription("RepeatDataset") @@ -24622,13 +23885,56 @@ begin end +""" + reciprocal_grad(y, dy) + + +""" +begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function reciprocal_grad_graph(y_, dy_; name=nothing) + local desc + tf.with_op_name(name, "ReciprocalGrad") do + desc = tf.NodeDescription("ReciprocalGrad") + y_ = convert(Tensor{Any}, y_) + dy_ = convert(Tensor{Any}, dy_) + (y_, dy_) = tf.tf_promote(y_, dy_) + tf.add_input(desc, y_) + tf.add_input(desc, dy_) + end + tf.Tensor(tf.Operation(desc)) + end + function reciprocal_grad_eager(y_, dy_; name=nothing) + desc = tf.EagerOp("ReciprocalGrad") + y_ = convert(tf.EagerTensor, y_) + dy_ = convert(tf.EagerTensor, dy_) + tf.add_input(desc, y_) + tf.add_input(desc, dy_) + desc["T"] = tf.data_type(y_) + desc["T"] = tf.data_type(dy_) + res = tf.execute(desc) + node = tf.TapeNode(reciprocal_grad, [y_, dy_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + function reciprocal_grad(y_, dy_; name=nothing) + if tf.in_eager_mode() + reciprocal_grad_eager(y_, dy_; name=name) + else + reciprocal_grad_graph(y_, dy_; name=name) + end + end +end + + """ crop_and_resize_grad_boxes(grads, image, boxes, box_ind; method=bilinear) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function crop_and_resize_grad_boxes_graph(grads_, image_, boxes_, box_ind_; name=nothing, method=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function crop_and_resize_grad_boxes_graph(grads_, image_, boxes_, box_ind_; name=nothing, method=nothing) local desc tf.with_op_name(name, "CropAndResizeGradBoxes") do desc = tf.NodeDescription("CropAndResizeGradBoxes") @@ -24678,56 +23984,13 @@ begin end -""" - reciprocal_grad(y, dy) - - -""" -begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function reciprocal_grad_graph(y_, dy_; name=nothing) - local desc - tf.with_op_name(name, "ReciprocalGrad") do - desc = tf.NodeDescription("ReciprocalGrad") - y_ = convert(Tensor{Any}, y_) - dy_ = convert(Tensor{Any}, dy_) - (y_, dy_) = tf.tf_promote(y_, dy_) - tf.add_input(desc, y_) - tf.add_input(desc, dy_) - end - tf.Tensor(tf.Operation(desc)) - end - function reciprocal_grad_eager(y_, dy_; name=nothing) - desc = tf.EagerOp("ReciprocalGrad") - y_ = convert(tf.EagerTensor, y_) - dy_ = convert(tf.EagerTensor, dy_) - tf.add_input(desc, y_) - tf.add_input(desc, dy_) - desc["T"] = tf.data_type(y_) - desc["T"] = tf.data_type(dy_) - res = tf.execute(desc) - node = tf.TapeNode(reciprocal_grad, [y_, dy_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - function reciprocal_grad(y_, dy_; name=nothing) - if tf.in_eager_mode() - reciprocal_grad_eager(y_, dy_; name=name) - else - reciprocal_grad_graph(y_, dy_; name=name) - end - end -end - - """ batch_matrix_solve(matrix, rhs; adjoint=false) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_matrix_solve_graph(matrix_, rhs_; name=nothing, adjoint=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function batch_matrix_solve_graph(matrix_, rhs_; name=nothing, adjoint=nothing) local desc tf.with_op_name(name, "BatchMatrixSolve") do desc = tf.NodeDescription("BatchMatrixSolve") @@ -24776,7 +24039,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function mutable_hash_table_v2_graph(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function mutable_hash_table_v2_graph(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing) local desc tf.with_op_name(name, "MutableHashTableV2") do desc = tf.NodeDescription("MutableHashTableV2") @@ -24838,7 +24101,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function exit_graph(data_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function exit_graph(data_; name=nothing) local desc tf.with_op_name(name, "Exit") do desc = tf.NodeDescription("Exit") @@ -24876,7 +24139,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function lrn_graph(input_; name=nothing, depth_radius=nothing, bias=nothing, alpha=nothing, beta=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function lrn_graph(input_; name=nothing, depth_radius=nothing, bias=nothing, alpha=nothing, beta=nothing) local desc tf.with_op_name(name, "LRN") do desc = tf.NodeDescription("LRN") @@ -24938,7 +24201,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function stateless_if_graph(cond_, input_; name=nothing, Tin=nothing, Tout=nothing, then_branch=nothing, else_branch=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function stateless_if_graph(cond_, input_; name=nothing, Tin=nothing, Tout=nothing, then_branch=nothing, else_branch=nothing) local desc tf.with_op_name(name, "StatelessIf") do desc = tf.NodeDescription("StatelessIf") @@ -25004,7 +24267,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_list_set_item_graph(input_handle_, index_, item_; name=nothing, element_dtype=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tensor_list_set_item_graph(input_handle_, index_, item_; name=nothing, element_dtype=nothing) local desc tf.with_op_name(name, "TensorListSetItem") do desc = tf.NodeDescription("TensorListSetItem") @@ -25056,7 +24319,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function rsqrt_graph(x_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function rsqrt_graph(x_; name=nothing) local desc tf.with_op_name(name, "Rsqrt") do desc = tf.NodeDescription("Rsqrt") @@ -25088,130 +24351,13 @@ begin end -""" - quantized_conv2d_with_bias_sum_and_relu_and_requantize(input, filter, bias, min_input, max_input, min_filter, max_filter, min_freezed_output, max_freezed_output, summand, min_summand, max_summand; out_type=Float32, dilations=[1, 1, 1, 1]) - - -""" -begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantized_conv2d_with_bias_sum_and_relu_and_requantize_graph(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_, summand_, min_summand_, max_summand_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) - local desc - tf.with_op_name(name, "QuantizedConv2DWithBiasSumAndReluAndRequantize") do - desc = tf.NodeDescription("QuantizedConv2DWithBiasSumAndReluAndRequantize") - input_ = convert(Tensor{Any}, input_) - filter_ = convert(Tensor{Any}, filter_) - bias_ = convert(Tensor{Any}, bias_) - min_input_ = convert(Tensor{Float32}, min_input_) - max_input_ = convert(Tensor{Float32}, max_input_) - min_filter_ = convert(Tensor{Float32}, min_filter_) - max_filter_ = convert(Tensor{Float32}, max_filter_) - min_freezed_output_ = convert(Tensor{Float32}, min_freezed_output_) - max_freezed_output_ = convert(Tensor{Float32}, max_freezed_output_) - summand_ = convert(Tensor{Any}, summand_) - min_summand_ = convert(Tensor{Float32}, min_summand_) - max_summand_ = convert(Tensor{Float32}, max_summand_) - (summand_,) = tf.tf_promote(summand_) - (filter_,) = tf.tf_promote(filter_) - (input_,) = tf.tf_promote(input_) - (bias_,) = tf.tf_promote(bias_) - tf.add_input(desc, input_) - tf.add_input(desc, filter_) - tf.add_input(desc, bias_) - tf.add_input(desc, min_input_) - tf.add_input(desc, max_input_) - tf.add_input(desc, min_filter_) - tf.add_input(desc, max_filter_) - tf.add_input(desc, min_freezed_output_) - tf.add_input(desc, max_freezed_output_) - tf.add_input(desc, summand_) - tf.add_input(desc, min_summand_) - tf.add_input(desc, max_summand_) - if out_type !== nothing - desc["out_type"] = Base.identity(out_type) - end - if strides !== nothing - desc["strides"] = map(Base.identity, strides) - end - if padding !== nothing - desc["padding"] = Base.String(padding) - end - if dilations !== nothing - desc["dilations"] = map(Base.identity, dilations) - end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:3 - push!(out, tf.Tensor(op, out_idx)) - end - out - end - function quantized_conv2d_with_bias_sum_and_relu_and_requantize_eager(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_, summand_, min_summand_, max_summand_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) - desc = tf.EagerOp("QuantizedConv2DWithBiasSumAndReluAndRequantize") - input_ = convert(tf.EagerTensor, input_) - filter_ = convert(tf.EagerTensor, filter_) - bias_ = convert(tf.EagerTensor, bias_) - min_input_ = convert(tf.EagerTensor, min_input_) - max_input_ = convert(tf.EagerTensor, max_input_) - min_filter_ = convert(tf.EagerTensor, min_filter_) - max_filter_ = convert(tf.EagerTensor, max_filter_) - min_freezed_output_ = convert(tf.EagerTensor, min_freezed_output_) - max_freezed_output_ = convert(tf.EagerTensor, max_freezed_output_) - summand_ = convert(tf.EagerTensor, summand_) - min_summand_ = convert(tf.EagerTensor, min_summand_) - max_summand_ = convert(tf.EagerTensor, max_summand_) - tf.add_input(desc, input_) - tf.add_input(desc, filter_) - tf.add_input(desc, bias_) - tf.add_input(desc, min_input_) - tf.add_input(desc, max_input_) - tf.add_input(desc, min_filter_) - tf.add_input(desc, max_filter_) - tf.add_input(desc, min_freezed_output_) - tf.add_input(desc, max_freezed_output_) - tf.add_input(desc, summand_) - tf.add_input(desc, min_summand_) - tf.add_input(desc, max_summand_) - if out_type !== nothing - desc["out_type"] = Base.identity(out_type) - end - if strides !== nothing - desc["strides"] = map(Base.identity, strides) - end - if padding !== nothing - desc["padding"] = Base.String(padding) - end - if dilations !== nothing - desc["dilations"] = map(Base.identity, dilations) - end - desc["Tinput"] = tf.data_type(input_) - desc["Tfilter"] = tf.data_type(filter_) - desc["Tbias"] = tf.data_type(bias_) - desc["Tsummand"] = tf.data_type(summand_) - res = tf.execute(desc) - node = tf.TapeNode(quantized_conv2d_with_bias_sum_and_relu_and_requantize, [input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_, summand_, min_summand_, max_summand_], name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end - end - function quantized_conv2d_with_bias_sum_and_relu_and_requantize(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_, summand_, min_summand_, max_summand_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) - if tf.in_eager_mode() - quantized_conv2d_with_bias_sum_and_relu_and_requantize_eager(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_, summand_, min_summand_, max_summand_; name=name, out_type=out_type, strides=strides, padding=padding, dilations=dilations) - else - quantized_conv2d_with_bias_sum_and_relu_and_requantize_graph(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_, summand_, min_summand_, max_summand_; name=name, out_type=out_type, strides=strides, padding=padding, dilations=dilations) - end - end -end - - """ delete_session_tensor(handle) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function delete_session_tensor_graph(handle_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function delete_session_tensor_graph(handle_; name=nothing) local desc tf.with_op_name(name, "DeleteSessionTensor") do desc = tf.NodeDescription("DeleteSessionTensor") @@ -25247,7 +24393,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function one_hot_graph(indices_, depth_, on_value_, off_value_; name=nothing, axis=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function one_hot_graph(indices_, depth_, on_value_, off_value_; name=nothing, axis=nothing) local desc tf.with_op_name(name, "OneHot") do desc = tf.NodeDescription("OneHot") @@ -25313,7 +24459,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_apply_ftrl_graph(var_, accum_, linear_, grad_, lr_, l1_, l2_, lr_power_; name=nothing, use_locking=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function resource_apply_ftrl_graph(var_, accum_, linear_, grad_, lr_, l1_, l2_, lr_power_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ResourceApplyFtrl") do desc = tf.NodeDescription("ResourceApplyFtrl") @@ -25389,7 +24535,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sdca_optimizer_v2_graph(sparse_example_indices_, sparse_feature_indices_, sparse_feature_values_, dense_features_, example_weights_, example_labels_, sparse_indices_, sparse_weights_, dense_weights_, example_state_data_; name=nothing, loss_type=nothing, adaptive=nothing, num_sparse_features=nothing, num_sparse_features_with_values=nothing, num_dense_features=nothing, l1=nothing, l2=nothing, num_loss_partitions=nothing, num_inner_iterations=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function sdca_optimizer_v2_graph(sparse_example_indices_, sparse_feature_indices_, sparse_feature_values_, dense_features_, example_weights_, example_labels_, sparse_indices_, sparse_weights_, dense_weights_, example_state_data_; name=nothing, loss_type=nothing, adaptive=nothing, num_sparse_features=nothing, num_sparse_features_with_values=nothing, num_dense_features=nothing, l1=nothing, l2=nothing, num_loss_partitions=nothing, num_inner_iterations=nothing) local desc tf.with_op_name(name, "SdcaOptimizerV2") do desc = tf.NodeDescription("SdcaOptimizerV2") @@ -25520,7 +24666,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function queue_enqueue_graph(handle_, components_; name=nothing, Tcomponents=nothing, timeout_ms=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function queue_enqueue_graph(handle_, components_; name=nothing, Tcomponents=nothing, timeout_ms=nothing) local desc tf.with_op_name(name, "QueueEnqueue") do desc = tf.NodeDescription("QueueEnqueue") @@ -25566,75 +24712,13 @@ begin end -""" - conditional_accumulator(; container=, shared_name=, reduction_type=MEAN) - - -""" -begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function conditional_accumulator_graph(; name=nothing, dtype=nothing, shape=nothing, container=nothing, shared_name=nothing, reduction_type=nothing) - local desc - tf.with_op_name(name, "ConditionalAccumulator") do - desc = tf.NodeDescription("ConditionalAccumulator") - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end - if shape !== nothing - desc["shape"] = Base.identity(shape) - end - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - if reduction_type !== nothing - desc["reduction_type"] = Base.String(reduction_type) - end - end - tf.Tensor(tf.Operation(desc)) - end - function conditional_accumulator_eager(; name=nothing, dtype=nothing, shape=nothing, container=nothing, shared_name=nothing, reduction_type=nothing) - desc = tf.EagerOp("ConditionalAccumulator") - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end - if shape !== nothing - desc["shape"] = Base.identity(shape) - end - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - if reduction_type !== nothing - desc["reduction_type"] = Base.String(reduction_type) - end - res = tf.execute(desc) - node = tf.TapeNode(conditional_accumulator, [], name=nothing, dtype=nothing, shape=nothing, container=nothing, shared_name=nothing, reduction_type=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - function conditional_accumulator(; name=nothing, dtype=nothing, shape=nothing, container=nothing, shared_name=nothing, reduction_type=nothing) - if tf.in_eager_mode() - conditional_accumulator_eager(; name=name, dtype=dtype, shape=shape, container=container, shared_name=shared_name, reduction_type=reduction_type) - else - conditional_accumulator_graph(; name=name, dtype=dtype, shape=shape, container=container, shared_name=shared_name, reduction_type=reduction_type) - end - end -end - - """ ctc_beam_search_decoder(inputs, sequence_length; merge_repeated=true) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ctc_beam_search_decoder_graph(inputs_, sequence_length_; name=nothing, beam_width=nothing, top_paths=nothing, merge_repeated=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function ctc_beam_search_decoder_graph(inputs_, sequence_length_; name=nothing, beam_width=nothing, top_paths=nothing, merge_repeated=nothing) local desc tf.with_op_name(name, "CTCBeamSearchDecoder") do desc = tf.NodeDescription("CTCBeamSearchDecoder") @@ -25691,13 +24775,75 @@ begin end +""" + conditional_accumulator(; container=, shared_name=, reduction_type=MEAN) + + +""" +begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function conditional_accumulator_graph(; name=nothing, dtype=nothing, shape=nothing, container=nothing, shared_name=nothing, reduction_type=nothing) + local desc + tf.with_op_name(name, "ConditionalAccumulator") do + desc = tf.NodeDescription("ConditionalAccumulator") + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + if shape !== nothing + desc["shape"] = Base.identity(shape) + end + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + if reduction_type !== nothing + desc["reduction_type"] = Base.String(reduction_type) + end + end + tf.Tensor(tf.Operation(desc)) + end + function conditional_accumulator_eager(; name=nothing, dtype=nothing, shape=nothing, container=nothing, shared_name=nothing, reduction_type=nothing) + desc = tf.EagerOp("ConditionalAccumulator") + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + if shape !== nothing + desc["shape"] = Base.identity(shape) + end + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + if reduction_type !== nothing + desc["reduction_type"] = Base.String(reduction_type) + end + res = tf.execute(desc) + node = tf.TapeNode(conditional_accumulator, [], name=nothing, dtype=nothing, shape=nothing, container=nothing, shared_name=nothing, reduction_type=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + function conditional_accumulator(; name=nothing, dtype=nothing, shape=nothing, container=nothing, shared_name=nothing, reduction_type=nothing) + if tf.in_eager_mode() + conditional_accumulator_eager(; name=name, dtype=dtype, shape=shape, container=container, shared_name=shared_name, reduction_type=reduction_type) + else + conditional_accumulator_graph(; name=name, dtype=dtype, shape=shape, container=container, shared_name=shared_name, reduction_type=reduction_type) + end + end +end + + """ whole_file_reader(; container=, shared_name=) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function whole_file_reader_graph(; name=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function whole_file_reader_graph(; name=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "WholeFileReader") do desc = tf.NodeDescription("WholeFileReader") @@ -25741,7 +24887,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function apply_rms_prop_graph(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=nothing, use_locking=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function apply_rms_prop_graph(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ApplyRMSProp") do desc = tf.NodeDescription("ApplyRMSProp") @@ -25820,13 +24966,12 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function adjust_saturation_graph(images_, scale_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function adjust_saturation_graph(images_, scale_; name=nothing) local desc tf.with_op_name(name, "AdjustSaturation") do desc = tf.NodeDescription("AdjustSaturation") images_ = convert(Tensor{Float32}, images_) scale_ = convert(Tensor{Float32}, scale_) - (images_,) = tf.tf_promote(images_) tf.add_input(desc, images_) tf.add_input(desc, scale_) end @@ -25838,7 +24983,6 @@ begin scale_ = convert(tf.EagerTensor, scale_) tf.add_input(desc, images_) tf.add_input(desc, scale_) - desc["T"] = tf.data_type(images_) res = tf.execute(desc) node = tf.TapeNode(adjust_saturation, [images_, scale_], name=nothing, res) if length(res) >= 1 @@ -25862,7 +25006,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function lookup_table_remove_v2_graph(table_handle_, keys_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function lookup_table_remove_v2_graph(table_handle_, keys_; name=nothing) local desc tf.with_op_name(name, "LookupTableRemoveV2") do desc = tf.NodeDescription("LookupTableRemoveV2") @@ -25904,7 +25048,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function queue_close_graph(handle_; name=nothing, cancel_pending_enqueues=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function queue_close_graph(handle_; name=nothing, cancel_pending_enqueues=nothing) local desc tf.with_op_name(name, "QueueClose") do desc = tf.NodeDescription("QueueClose") @@ -25946,7 +25090,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function prefetch_dataset_graph(input_dataset_, buffer_size_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function prefetch_dataset_graph(input_dataset_, buffer_size_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "PrefetchDataset") do desc = tf.NodeDescription("PrefetchDataset") @@ -25998,7 +25142,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function map_dataset_graph(input_dataset_, other_arguments_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing, preserve_cardinality=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function map_dataset_graph(input_dataset_, other_arguments_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing, preserve_cardinality=nothing) local desc tf.with_op_name(name, "MapDataset") do desc = tf.NodeDescription("MapDataset") @@ -26068,106 +25212,13 @@ begin end -""" - quantized_conv2d_with_bias(input, filter, bias, min_input, max_input, min_filter, max_filter; out_type=Float32, dilations=[1, 1, 1, 1]) - - -""" -begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantized_conv2d_with_bias_graph(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) - local desc - tf.with_op_name(name, "QuantizedConv2DWithBias") do - desc = tf.NodeDescription("QuantizedConv2DWithBias") - input_ = convert(Tensor{Any}, input_) - filter_ = convert(Tensor{Any}, filter_) - bias_ = convert(Tensor{Float32}, bias_) - min_input_ = convert(Tensor{Float32}, min_input_) - max_input_ = convert(Tensor{Float32}, max_input_) - min_filter_ = convert(Tensor{Float32}, min_filter_) - max_filter_ = convert(Tensor{Float32}, max_filter_) - (filter_,) = tf.tf_promote(filter_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - tf.add_input(desc, filter_) - tf.add_input(desc, bias_) - tf.add_input(desc, min_input_) - tf.add_input(desc, max_input_) - tf.add_input(desc, min_filter_) - tf.add_input(desc, max_filter_) - if out_type !== nothing - desc["out_type"] = Base.identity(out_type) - end - if strides !== nothing - desc["strides"] = map(Base.identity, strides) - end - if padding !== nothing - desc["padding"] = Base.String(padding) - end - if dilations !== nothing - desc["dilations"] = map(Base.identity, dilations) - end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:3 - push!(out, tf.Tensor(op, out_idx)) - end - out - end - function quantized_conv2d_with_bias_eager(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) - desc = tf.EagerOp("QuantizedConv2DWithBias") - input_ = convert(tf.EagerTensor, input_) - filter_ = convert(tf.EagerTensor, filter_) - bias_ = convert(tf.EagerTensor, bias_) - min_input_ = convert(tf.EagerTensor, min_input_) - max_input_ = convert(tf.EagerTensor, max_input_) - min_filter_ = convert(tf.EagerTensor, min_filter_) - max_filter_ = convert(tf.EagerTensor, max_filter_) - tf.add_input(desc, input_) - tf.add_input(desc, filter_) - tf.add_input(desc, bias_) - tf.add_input(desc, min_input_) - tf.add_input(desc, max_input_) - tf.add_input(desc, min_filter_) - tf.add_input(desc, max_filter_) - if out_type !== nothing - desc["out_type"] = Base.identity(out_type) - end - if strides !== nothing - desc["strides"] = map(Base.identity, strides) - end - if padding !== nothing - desc["padding"] = Base.String(padding) - end - if dilations !== nothing - desc["dilations"] = map(Base.identity, dilations) - end - desc["Tinput"] = tf.data_type(input_) - desc["Tfilter"] = tf.data_type(filter_) - res = tf.execute(desc) - node = tf.TapeNode(quantized_conv2d_with_bias, [input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_], name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end - end - function quantized_conv2d_with_bias(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) - if tf.in_eager_mode() - quantized_conv2d_with_bias_eager(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_; name=name, out_type=out_type, strides=strides, padding=padding, dilations=dilations) - else - quantized_conv2d_with_bias_graph(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_; name=name, out_type=out_type, strides=strides, padding=padding, dilations=dilations) - end - end -end - - """ tensor_array_read_v3(handle, index, flow_in) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_read_v3_graph(handle_, index_, flow_in_; name=nothing, dtype=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tensor_array_read_v3_graph(handle_, index_, flow_in_; name=nothing, dtype=nothing) local desc tf.with_op_name(name, "TensorArrayReadV3") do desc = tf.NodeDescription("TensorArrayReadV3") @@ -26217,7 +25268,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function identity_graph(input_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function identity_graph(input_; name=nothing) local desc tf.with_op_name(name, "Identity") do desc = tf.NodeDescription("Identity") @@ -26255,7 +25306,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function print_graph(input_, data_; name=nothing, U=nothing, message=nothing, first_n=nothing, summarize=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function print_graph(input_, data_; name=nothing, U=nothing, message=nothing, first_n=nothing, summarize=nothing) local desc tf.with_op_name(name, "Print") do desc = tf.NodeDescription("Print") @@ -26321,7 +25372,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function collective_bcast_send_graph(input_; name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, shape=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function collective_bcast_send_graph(input_; name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, shape=nothing) local desc tf.with_op_name(name, "CollectiveBcastSend") do desc = tf.NodeDescription("CollectiveBcastSend") @@ -26383,7 +25434,7 @@ end Converts a list of tensors to an array of tensors. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _list_to_array_graph(input_; name=nothing, Tin=nothing, N=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function _list_to_array_graph(input_; name=nothing, Tin=nothing, N=nothing) local desc tf.with_op_name(name, "_ListToArray") do desc = tf.NodeDescription("_ListToArray") @@ -26436,7 +25487,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function neg_train_graph(w_in_, w_out_, examples_, labels_, lr_; name=nothing, vocab_count=nothing, num_negative_samples=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function neg_train_graph(w_in_, w_out_, examples_, labels_, lr_; name=nothing, vocab_count=nothing, num_negative_samples=nothing) local desc tf.with_op_name(name, "NegTrain") do desc = tf.NodeDescription("NegTrain") @@ -26494,49 +25545,13 @@ begin end -""" - worker_heartbeat(request) - - -""" -begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function worker_heartbeat_graph(request_; name=nothing) - local desc - tf.with_op_name(name, "WorkerHeartbeat") do - desc = tf.NodeDescription("WorkerHeartbeat") - request_ = convert(Tensor{String}, request_) - tf.add_input(desc, request_) - end - tf.Tensor(tf.Operation(desc)) - end - function worker_heartbeat_eager(request_; name=nothing) - desc = tf.EagerOp("WorkerHeartbeat") - request_ = convert(tf.EagerTensor, request_) - tf.add_input(desc, request_) - res = tf.execute(desc) - node = tf.TapeNode(worker_heartbeat, [request_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - function worker_heartbeat(request_; name=nothing) - if tf.in_eager_mode() - worker_heartbeat_eager(request_; name=name) - else - worker_heartbeat_graph(request_; name=name) - end - end -end - - """ merge_v2checkpoints(checkpoint_prefixes, destination_prefix; delete_old_dirs=true) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function merge_v2checkpoints_graph(checkpoint_prefixes_, destination_prefix_; name=nothing, delete_old_dirs=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function merge_v2checkpoints_graph(checkpoint_prefixes_, destination_prefix_; name=nothing, delete_old_dirs=nothing) local desc tf.with_op_name(name, "MergeV2Checkpoints") do desc = tf.NodeDescription("MergeV2Checkpoints") @@ -26577,12 +25592,48 @@ end """ - collective_permute(input, source_target_pairs) + worker_heartbeat(request) + +Worker heartbeat op. +""" +begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function worker_heartbeat_graph(request_; name=nothing) + local desc + tf.with_op_name(name, "WorkerHeartbeat") do + desc = tf.NodeDescription("WorkerHeartbeat") + request_ = convert(Tensor{String}, request_) + tf.add_input(desc, request_) + end + tf.Tensor(tf.Operation(desc)) + end + function worker_heartbeat_eager(request_; name=nothing) + desc = tf.EagerOp("WorkerHeartbeat") + request_ = convert(tf.EagerTensor, request_) + tf.add_input(desc, request_) + res = tf.execute(desc) + node = tf.TapeNode(worker_heartbeat, [request_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + function worker_heartbeat(request_; name=nothing) + if tf.in_eager_mode() + worker_heartbeat_eager(request_; name=name) + else + worker_heartbeat_graph(request_; name=name) + end + end +end +""" + collective_permute(input, source_target_pairs) + +An Op to permute tensors across replicated TPU instances. Each instance """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function collective_permute_graph(input_, source_target_pairs_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function collective_permute_graph(input_, source_target_pairs_; name=nothing) local desc tf.with_op_name(name, "CollectivePermute") do desc = tf.NodeDescription("CollectivePermute") @@ -26624,7 +25675,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantize_and_dequantize_v3_graph(input_, input_min_, input_max_, num_bits_; name=nothing, signed_input=nothing, range_given=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function quantize_and_dequantize_v3_graph(input_, input_min_, input_max_, num_bits_; name=nothing, signed_input=nothing, range_given=nothing) local desc tf.with_op_name(name, "QuantizeAndDequantizeV3") do desc = tf.NodeDescription("QuantizeAndDequantizeV3") @@ -26688,7 +25739,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function hash_table_graph(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function hash_table_graph(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing) local desc tf.with_op_name(name, "HashTable") do desc = tf.NodeDescription("HashTable") @@ -26750,7 +25801,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function softplus_grad_graph(gradients_, features_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function softplus_grad_graph(gradients_, features_; name=nothing) local desc tf.with_op_name(name, "SoftplusGrad") do desc = tf.NodeDescription("SoftplusGrad") @@ -26793,7 +25844,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fixed_length_record_reader_graph(; name=nothing, header_bytes=nothing, record_bytes=nothing, footer_bytes=nothing, hop_bytes=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function fixed_length_record_reader_graph(; name=nothing, header_bytes=nothing, record_bytes=nothing, footer_bytes=nothing, hop_bytes=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "FixedLengthRecordReader") do desc = tf.NodeDescription("FixedLengthRecordReader") @@ -26861,7 +25912,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_scatter_v2_graph(handle_, indices_, value_, flow_in_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tensor_array_scatter_v2_graph(handle_, indices_, value_, flow_in_; name=nothing) local desc tf.with_op_name(name, "TensorArrayScatterV2") do desc = tf.NodeDescription("TensorArrayScatterV2") @@ -26911,7 +25962,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function decode_json_example_graph(json_examples_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function decode_json_example_graph(json_examples_; name=nothing) local desc tf.with_op_name(name, "DecodeJSONExample") do desc = tf.NodeDescription("DecodeJSONExample") @@ -26947,7 +25998,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fused_batch_norm_grad_v2_graph(y_backprop_, x_, scale_, reserve_space_1_, reserve_space_2_; name=nothing, U=nothing, epsilon=nothing, data_format=nothing, is_training=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function fused_batch_norm_grad_v2_graph(y_backprop_, x_, scale_, reserve_space_1_, reserve_space_2_; name=nothing, U=nothing, epsilon=nothing, data_format=nothing, is_training=nothing) local desc tf.with_op_name(name, "FusedBatchNormGradV2") do desc = tf.NodeDescription("FusedBatchNormGradV2") @@ -27034,7 +26085,7 @@ end Cast x of type SrcT to y of DstT. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _host_cast_graph(x_; name=nothing, SrcT=nothing, DstT=nothing, Truncate=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function _host_cast_graph(x_; name=nothing, SrcT=nothing, DstT=nothing, Truncate=nothing) local desc tf.with_op_name(name, "_HostCast") do desc = tf.NodeDescription("_HostCast") @@ -27090,7 +26141,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tf_record_reader_graph(; name=nothing, container=nothing, shared_name=nothing, compression_type=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tf_record_reader_graph(; name=nothing, container=nothing, shared_name=nothing, compression_type=nothing) local desc tf.with_op_name(name, "TFRecordReader") do desc = tf.NodeDescription("TFRecordReader") @@ -27135,12 +26186,12 @@ end """ - while_(input; output_shapes=Int64[], parallel_iterations=10) + while_(input; output_shapes=Int64[]) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function while__graph(input_; name=nothing, T=nothing, cond=nothing, body=nothing, output_shapes=nothing, parallel_iterations=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function while__graph(input_; name=nothing, T=nothing, cond=nothing, body=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "While") do desc = tf.NodeDescription("While") @@ -27158,13 +26209,10 @@ begin if output_shapes !== nothing desc["output_shapes"] = map(Base.identity, output_shapes) end - if parallel_iterations !== nothing - desc["parallel_iterations"] = Base.Int(parallel_iterations) - end end tf.Tensor(tf.Operation(desc)) end - function while__eager(input_; name=nothing, T=nothing, cond=nothing, body=nothing, output_shapes=nothing, parallel_iterations=nothing) + function while__eager(input_; name=nothing, T=nothing, cond=nothing, body=nothing, output_shapes=nothing) desc = tf.EagerOp("While") input_ = convert(tf.EagerTensor, input_) tf.add_input(desc, input_) @@ -27180,21 +26228,18 @@ begin if output_shapes !== nothing desc["output_shapes"] = map(Base.identity, output_shapes) end - if parallel_iterations !== nothing - desc["parallel_iterations"] = Base.Int(parallel_iterations) - end res = tf.execute(desc) - node = tf.TapeNode(while_, [input_], name=nothing, T=nothing, cond=nothing, body=nothing, output_shapes=nothing, parallel_iterations=nothing, res) + node = tf.TapeNode(while_, [input_], name=nothing, T=nothing, cond=nothing, body=nothing, output_shapes=nothing, res) if length(res) >= 1 tf.add_node(res[1], node) return res[1] end end - function while_(input_; name=nothing, T=nothing, cond=nothing, body=nothing, output_shapes=nothing, parallel_iterations=nothing) + function while_(input_; name=nothing, T=nothing, cond=nothing, body=nothing, output_shapes=nothing) if tf.in_eager_mode() - while__eager(input_; name=name, T=T, cond=cond, body=body, output_shapes=output_shapes, parallel_iterations=parallel_iterations) + while__eager(input_; name=name, T=T, cond=cond, body=body, output_shapes=output_shapes) else - while__graph(input_; name=name, T=T, cond=cond, body=body, output_shapes=output_shapes, parallel_iterations=parallel_iterations) + while__graph(input_; name=name, T=T, cond=cond, body=body, output_shapes=output_shapes) end end end @@ -27206,7 +26251,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function stateless_multinomial_graph(logits_, num_samples_, seed_; name=nothing, output_dtype=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function stateless_multinomial_graph(logits_, num_samples_, seed_; name=nothing, output_dtype=nothing) local desc tf.with_op_name(name, "StatelessMultinomial") do desc = tf.NodeDescription("StatelessMultinomial") @@ -27260,7 +26305,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function scatter_add_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function scatter_add_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ScatterAdd") do desc = tf.NodeDescription("ScatterAdd") @@ -27316,7 +26361,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function conj_graph(input_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function conj_graph(input_; name=nothing) local desc tf.with_op_name(name, "Conj") do desc = tf.NodeDescription("Conj") @@ -27354,7 +26399,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function parallel_dynamic_stitch_graph(indices_, data_; name=nothing, N=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function parallel_dynamic_stitch_graph(indices_, data_; name=nothing, N=nothing) local desc tf.with_op_name(name, "ParallelDynamicStitch") do desc = tf.NodeDescription("ParallelDynamicStitch") @@ -27402,7 +26447,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function make_iterator_graph(dataset_, iterator_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function make_iterator_graph(dataset_, iterator_; name=nothing) local desc tf.with_op_name(name, "MakeIterator") do desc = tf.NodeDescription("MakeIterator") @@ -27442,7 +26487,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function rfft3d_graph(input_, fft_length_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function rfft3d_graph(input_, fft_length_; name=nothing) local desc tf.with_op_name(name, "RFFT3D") do desc = tf.NodeDescription("RFFT3D") @@ -27482,7 +26527,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_reduce_sum_sparse_graph(input_indices_, input_values_, input_shape_, reduction_axes_; name=nothing, keep_dims=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function sparse_reduce_sum_sparse_graph(input_indices_, input_values_, input_shape_, reduction_axes_; name=nothing, keep_dims=nothing) local desc tf.with_op_name(name, "SparseReduceSumSparse") do desc = tf.NodeDescription("SparseReduceSumSparse") @@ -27537,142 +26582,13 @@ begin end -""" - collective_gather(input) - - -""" -begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function collective_gather_graph(input_; name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, shape=nothing) - local desc - tf.with_op_name(name, "CollectiveGather") do - desc = tf.NodeDescription("CollectiveGather") - input_ = convert(Tensor{Any}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - if group_size !== nothing - desc["group_size"] = Base.Int(group_size) - end - if group_key !== nothing - desc["group_key"] = Base.Int(group_key) - end - if instance_key !== nothing - desc["instance_key"] = Base.Int(instance_key) - end - if shape !== nothing - desc["shape"] = Base.identity(shape) - end - end - tf.Tensor(tf.Operation(desc)) - end - function collective_gather_eager(input_; name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, shape=nothing) - desc = tf.EagerOp("CollectiveGather") - input_ = convert(tf.EagerTensor, input_) - tf.add_input(desc, input_) - if group_size !== nothing - desc["group_size"] = Base.Int(group_size) - end - if group_key !== nothing - desc["group_key"] = Base.Int(group_key) - end - if instance_key !== nothing - desc["instance_key"] = Base.Int(instance_key) - end - if shape !== nothing - desc["shape"] = Base.identity(shape) - end - desc["T"] = tf.data_type(input_) - res = tf.execute(desc) - node = tf.TapeNode(collective_gather, [input_], name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, shape=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - function collective_gather(input_; name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, shape=nothing) - if tf.in_eager_mode() - collective_gather_eager(input_; name=name, group_size=group_size, group_key=group_key, instance_key=instance_key, shape=shape) - else - collective_gather_graph(input_; name=name, group_size=group_size, group_key=group_key, instance_key=instance_key, shape=shape) - end - end -end - - -""" - combined_non_max_suppression(boxes, scores, max_output_size_per_class, max_total_size, iou_threshold, score_threshold; pad_per_class=false) - - -""" -begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function combined_non_max_suppression_graph(boxes_, scores_, max_output_size_per_class_, max_total_size_, iou_threshold_, score_threshold_; name=nothing, pad_per_class=nothing) - local desc - tf.with_op_name(name, "CombinedNonMaxSuppression") do - desc = tf.NodeDescription("CombinedNonMaxSuppression") - boxes_ = convert(Tensor{Float32}, boxes_) - scores_ = convert(Tensor{Float32}, scores_) - max_output_size_per_class_ = convert(Tensor{Int32}, max_output_size_per_class_) - max_total_size_ = convert(Tensor{Int32}, max_total_size_) - iou_threshold_ = convert(Tensor{Float32}, iou_threshold_) - score_threshold_ = convert(Tensor{Float32}, score_threshold_) - tf.add_input(desc, boxes_) - tf.add_input(desc, scores_) - tf.add_input(desc, max_output_size_per_class_) - tf.add_input(desc, max_total_size_) - tf.add_input(desc, iou_threshold_) - tf.add_input(desc, score_threshold_) - if pad_per_class !== nothing - desc["pad_per_class"] = Base.Bool(pad_per_class) - end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:4 - push!(out, tf.Tensor(op, out_idx)) - end - out - end - function combined_non_max_suppression_eager(boxes_, scores_, max_output_size_per_class_, max_total_size_, iou_threshold_, score_threshold_; name=nothing, pad_per_class=nothing) - desc = tf.EagerOp("CombinedNonMaxSuppression") - boxes_ = convert(tf.EagerTensor, boxes_) - scores_ = convert(tf.EagerTensor, scores_) - max_output_size_per_class_ = convert(tf.EagerTensor, max_output_size_per_class_) - max_total_size_ = convert(tf.EagerTensor, max_total_size_) - iou_threshold_ = convert(tf.EagerTensor, iou_threshold_) - score_threshold_ = convert(tf.EagerTensor, score_threshold_) - tf.add_input(desc, boxes_) - tf.add_input(desc, scores_) - tf.add_input(desc, max_output_size_per_class_) - tf.add_input(desc, max_total_size_) - tf.add_input(desc, iou_threshold_) - tf.add_input(desc, score_threshold_) - if pad_per_class !== nothing - desc["pad_per_class"] = Base.Bool(pad_per_class) - end - res = tf.execute(desc) - node = tf.TapeNode(combined_non_max_suppression, [boxes_, scores_, max_output_size_per_class_, max_total_size_, iou_threshold_, score_threshold_], name=nothing, pad_per_class=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end - end - function combined_non_max_suppression(boxes_, scores_, max_output_size_per_class_, max_total_size_, iou_threshold_, score_threshold_; name=nothing, pad_per_class=nothing) - if tf.in_eager_mode() - combined_non_max_suppression_eager(boxes_, scores_, max_output_size_per_class_, max_total_size_, iou_threshold_, score_threshold_; name=name, pad_per_class=pad_per_class) - else - combined_non_max_suppression_graph(boxes_, scores_, max_output_size_per_class_, max_total_size_, iou_threshold_, score_threshold_; name=name, pad_per_class=pad_per_class) - end - end -end - - """ _scoped_allocator() Allocates a mutable tensor that becomes available to appropriately annotated """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _scoped_allocator_graph(; name=nothing, shapes=nothing, shape=nothing, sa_name=nothing, id=nothing, expected_call_count=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function _scoped_allocator_graph(; name=nothing, shapes=nothing, shape=nothing, sa_name=nothing, id=nothing, expected_call_count=nothing) local desc tf.with_op_name(name, "_ScopedAllocator") do desc = tf.NodeDescription("_ScopedAllocator") @@ -27731,10 +26647,10 @@ end """ load_tpu_embedding_adadelta_parameters(parameters, accumulators, updates; table_id=-1, table_name=) - +Load embedding parameters for a single table. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function load_tpu_embedding_adadelta_parameters_graph(parameters_, accumulators_, updates_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function load_tpu_embedding_adadelta_parameters_graph(parameters_, accumulators_, updates_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "LoadTPUEmbeddingAdadeltaParameters") do desc = tf.NodeDescription("LoadTPUEmbeddingAdadeltaParameters") @@ -27802,7 +26718,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_add_graph(a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_, thresh_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function sparse_add_graph(a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_, thresh_; name=nothing) local desc tf.with_op_name(name, "SparseAdd") do desc = tf.NodeDescription("SparseAdd") @@ -27872,7 +26788,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ctc_greedy_decoder_graph(inputs_, sequence_length_; name=nothing, merge_repeated=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function ctc_greedy_decoder_graph(inputs_, sequence_length_; name=nothing, merge_repeated=nothing) local desc tf.with_op_name(name, "CTCGreedyDecoder") do desc = tf.NodeDescription("CTCGreedyDecoder") @@ -27923,7 +26839,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function immutable_const_graph(; name=nothing, dtype=nothing, shape=nothing, memory_region_name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function immutable_const_graph(; name=nothing, dtype=nothing, shape=nothing, memory_region_name=nothing) local desc tf.with_op_name(name, "ImmutableConst") do desc = tf.NodeDescription("ImmutableConst") @@ -27973,7 +26889,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function consume_mutex_lock_graph(mutex_lock_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function consume_mutex_lock_graph(mutex_lock_; name=nothing) local desc tf.with_op_name(name, "ConsumeMutexLock") do desc = tf.NodeDescription("ConsumeMutexLock") @@ -28009,7 +26925,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function greater_equal_graph(x_, y_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function greater_equal_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "GreaterEqual") do desc = tf.NodeDescription("GreaterEqual") @@ -28052,7 +26968,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function initialize_table_from_text_file_v2_graph(table_handle_, filename_; name=nothing, key_index=nothing, value_index=nothing, vocab_size=nothing, delimiter=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function initialize_table_from_text_file_v2_graph(table_handle_, filename_; name=nothing, key_index=nothing, value_index=nothing, vocab_size=nothing, delimiter=nothing) local desc tf.with_op_name(name, "InitializeTableFromTextFileV2") do desc = tf.NodeDescription("InitializeTableFromTextFileV2") @@ -28116,7 +27032,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function queue_dequeue_graph(handle_; name=nothing, component_types=nothing, timeout_ms=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function queue_dequeue_graph(handle_; name=nothing, component_types=nothing, timeout_ms=nothing) local desc tf.with_op_name(name, "QueueDequeue") do desc = tf.NodeDescription("QueueDequeue") @@ -28164,7 +27080,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function equal_graph(x_, y_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function equal_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "Equal") do desc = tf.NodeDescription("Equal") @@ -28207,7 +27123,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function iterator_from_string_handle_graph(string_handle_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function iterator_from_string_handle_graph(string_handle_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "IteratorFromStringHandle") do desc = tf.NodeDescription("IteratorFromStringHandle") @@ -28255,7 +27171,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_list_split_graph(tensor_, element_shape_, lengths_; name=nothing, element_dtype=nothing, shape_type=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tensor_list_split_graph(tensor_, element_shape_, lengths_; name=nothing, element_dtype=nothing, shape_type=nothing) local desc tf.with_op_name(name, "TensorListSplit") do desc = tf.NodeDescription("TensorListSplit") @@ -28315,7 +27231,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fractional_max_pool_graph(value_; name=nothing, pooling_ratio=nothing, pseudo_random=nothing, overlapping=nothing, deterministic=nothing, seed=nothing, seed2=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function fractional_max_pool_graph(value_; name=nothing, pooling_ratio=nothing, pseudo_random=nothing, overlapping=nothing, deterministic=nothing, seed=nothing, seed2=nothing) local desc tf.with_op_name(name, "FractionalMaxPool") do desc = tf.NodeDescription("FractionalMaxPool") @@ -28394,7 +27310,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function scatter_nd_graph(indices_, updates_, shape_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function scatter_nd_graph(indices_, updates_, shape_; name=nothing) local desc tf.with_op_name(name, "ScatterNd") do desc = tf.NodeDescription("ScatterNd") @@ -28438,65 +27354,13 @@ begin end -""" - tensor_list_scatter_into_existing_list(input_handle, tensor, indices) - - -""" -begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_list_scatter_into_existing_list_graph(input_handle_, tensor_, indices_; name=nothing, element_dtype=nothing) - local desc - tf.with_op_name(name, "TensorListScatterIntoExistingList") do - desc = tf.NodeDescription("TensorListScatterIntoExistingList") - input_handle_ = convert(Tensor{Any}, input_handle_) - tensor_ = convert(Tensor{Any}, tensor_) - indices_ = convert(Tensor{Int32}, indices_) - (tensor_,) = tf.tf_promote(tensor_) - tf.add_input(desc, input_handle_) - tf.add_input(desc, tensor_) - tf.add_input(desc, indices_) - if element_dtype !== nothing - desc["element_dtype"] = Base.identity(element_dtype) - end - end - tf.Tensor(tf.Operation(desc)) - end - function tensor_list_scatter_into_existing_list_eager(input_handle_, tensor_, indices_; name=nothing, element_dtype=nothing) - desc = tf.EagerOp("TensorListScatterIntoExistingList") - input_handle_ = convert(tf.EagerTensor, input_handle_) - tensor_ = convert(tf.EagerTensor, tensor_) - indices_ = convert(tf.EagerTensor, indices_) - tf.add_input(desc, input_handle_) - tf.add_input(desc, tensor_) - tf.add_input(desc, indices_) - if element_dtype !== nothing - desc["element_dtype"] = Base.identity(element_dtype) - end - desc["element_dtype"] = tf.data_type(tensor_) - res = tf.execute(desc) - node = tf.TapeNode(tensor_list_scatter_into_existing_list, [input_handle_, tensor_, indices_], name=nothing, element_dtype=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - function tensor_list_scatter_into_existing_list(input_handle_, tensor_, indices_; name=nothing, element_dtype=nothing) - if tf.in_eager_mode() - tensor_list_scatter_into_existing_list_eager(input_handle_, tensor_, indices_; name=name, element_dtype=element_dtype) - else - tensor_list_scatter_into_existing_list_graph(input_handle_, tensor_, indices_; name=name, element_dtype=element_dtype) - end - end -end - - """ select(condition, t, e) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function select_graph(condition_, t_, e_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function select_graph(condition_, t_, e_; name=nothing) local desc tf.with_op_name(name, "Select") do desc = tf.NodeDescription("Select") @@ -28543,7 +27407,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function min_graph(input_, reduction_indices_; name=nothing, keep_dims=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function min_graph(input_, reduction_indices_; name=nothing, keep_dims=nothing) local desc tf.with_op_name(name, "Min") do desc = tf.NodeDescription("Min") @@ -28594,7 +27458,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function lrn_grad_graph(input_grads_, input_image_, output_image_; name=nothing, depth_radius=nothing, bias=nothing, alpha=nothing, beta=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function lrn_grad_graph(input_grads_, input_image_, output_image_; name=nothing, depth_radius=nothing, bias=nothing, alpha=nothing, beta=nothing) local desc tf.with_op_name(name, "LRNGrad") do desc = tf.NodeDescription("LRNGrad") @@ -28666,7 +27530,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function random_poisson_v2_graph(shape_, rate_; name=nothing, seed=nothing, seed2=nothing, S=nothing, R=nothing, dtype=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function random_poisson_v2_graph(shape_, rate_; name=nothing, seed=nothing, seed2=nothing, S=nothing, R=nothing, dtype=nothing) local desc tf.with_op_name(name, "RandomPoissonV2") do desc = tf.NodeDescription("RandomPoissonV2") @@ -28740,7 +27604,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fifo_queue_graph(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function fifo_queue_graph(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "FIFOQueue") do desc = tf.NodeDescription("FIFOQueue") @@ -28802,7 +27666,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_sparse_apply_proximal_gradient_descent_graph(var_, alpha_, l1_, l2_, grad_, indices_; name=nothing, use_locking=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function resource_sparse_apply_proximal_gradient_descent_graph(var_, alpha_, l1_, l2_, grad_, indices_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ResourceSparseApplyProximalGradientDescent") do desc = tf.NodeDescription("ResourceSparseApplyProximalGradientDescent") @@ -28872,7 +27736,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_non_serializable_dataset_graph(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function experimental_non_serializable_dataset_graph(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "ExperimentalNonSerializableDataset") do desc = tf.NodeDescription("ExperimentalNonSerializableDataset") @@ -28914,65 +27778,13 @@ begin end -""" - experimental_bytes_produced_stats_dataset(input_dataset, tag) - - -""" -begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_bytes_produced_stats_dataset_graph(input_dataset_, tag_; name=nothing, output_types=nothing, output_shapes=nothing) - local desc - tf.with_op_name(name, "ExperimentalBytesProducedStatsDataset") do - desc = tf.NodeDescription("ExperimentalBytesProducedStatsDataset") - input_dataset_ = convert(Tensor{Any}, input_dataset_) - tag_ = convert(Tensor{String}, tag_) - tf.add_input(desc, input_dataset_) - tf.add_input(desc, tag_) - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - end - tf.Tensor(tf.Operation(desc)) - end - function experimental_bytes_produced_stats_dataset_eager(input_dataset_, tag_; name=nothing, output_types=nothing, output_shapes=nothing) - desc = tf.EagerOp("ExperimentalBytesProducedStatsDataset") - input_dataset_ = convert(tf.EagerTensor, input_dataset_) - tag_ = convert(tf.EagerTensor, tag_) - tf.add_input(desc, input_dataset_) - tf.add_input(desc, tag_) - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - res = tf.execute(desc) - node = tf.TapeNode(experimental_bytes_produced_stats_dataset, [input_dataset_, tag_], name=nothing, output_types=nothing, output_shapes=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - function experimental_bytes_produced_stats_dataset(input_dataset_, tag_; name=nothing, output_types=nothing, output_shapes=nothing) - if tf.in_eager_mode() - experimental_bytes_produced_stats_dataset_eager(input_dataset_, tag_; name=name, output_types=output_types, output_shapes=output_shapes) - else - experimental_bytes_produced_stats_dataset_graph(input_dataset_, tag_; name=name, output_types=output_types, output_shapes=output_shapes) - end - end -end - - """ dilation2d_backprop_filter(input, filter, out_backprop) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function dilation2d_backprop_filter_graph(input_, filter_, out_backprop_; name=nothing, strides=nothing, rates=nothing, padding=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function dilation2d_backprop_filter_graph(input_, filter_, out_backprop_; name=nothing, strides=nothing, rates=nothing, padding=nothing) local desc tf.with_op_name(name, "Dilation2DBackpropFilter") do desc = tf.NodeDescription("Dilation2DBackpropFilter") @@ -29032,13 +27844,65 @@ begin end +""" + experimental_bytes_produced_stats_dataset(input_dataset, tag) + + +""" +begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function experimental_bytes_produced_stats_dataset_graph(input_dataset_, tag_; name=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "ExperimentalBytesProducedStatsDataset") do + desc = tf.NodeDescription("ExperimentalBytesProducedStatsDataset") + input_dataset_ = convert(Tensor{Any}, input_dataset_) + tag_ = convert(Tensor{String}, tag_) + tf.add_input(desc, input_dataset_) + tf.add_input(desc, tag_) + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + tf.Tensor(tf.Operation(desc)) + end + function experimental_bytes_produced_stats_dataset_eager(input_dataset_, tag_; name=nothing, output_types=nothing, output_shapes=nothing) + desc = tf.EagerOp("ExperimentalBytesProducedStatsDataset") + input_dataset_ = convert(tf.EagerTensor, input_dataset_) + tag_ = convert(tf.EagerTensor, tag_) + tf.add_input(desc, input_dataset_) + tf.add_input(desc, tag_) + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + res = tf.execute(desc) + node = tf.TapeNode(experimental_bytes_produced_stats_dataset, [input_dataset_, tag_], name=nothing, output_types=nothing, output_shapes=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + function experimental_bytes_produced_stats_dataset(input_dataset_, tag_; name=nothing, output_types=nothing, output_shapes=nothing) + if tf.in_eager_mode() + experimental_bytes_produced_stats_dataset_eager(input_dataset_, tag_; name=name, output_types=output_types, output_shapes=output_shapes) + else + experimental_bytes_produced_stats_dataset_graph(input_dataset_, tag_; name=name, output_types=output_types, output_shapes=output_shapes) + end + end +end + + """ _if(cond, input) output = cond ? then_branch(input) : else_branch(input) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _if_graph(cond_, input_; name=nothing, Tin=nothing, Tout=nothing, then_branch=nothing, else_branch=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function _if_graph(cond_, input_; name=nothing, Tin=nothing, Tout=nothing, then_branch=nothing, else_branch=nothing) local desc tf.with_op_name(name, "_If") do desc = tf.NodeDescription("_If") @@ -29104,7 +27968,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function bias_add_grad_graph(out_backprop_; name=nothing, data_format=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function bias_add_grad_graph(out_backprop_; name=nothing, data_format=nothing) local desc tf.with_op_name(name, "BiasAddGrad") do desc = tf.NodeDescription("BiasAddGrad") @@ -29148,7 +28012,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function reader_serialize_state_v2_graph(reader_handle_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function reader_serialize_state_v2_graph(reader_handle_; name=nothing) local desc tf.with_op_name(name, "ReaderSerializeStateV2") do desc = tf.NodeDescription("ReaderSerializeStateV2") @@ -29184,7 +28048,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function wrap_dataset_variant_graph(input_handle_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function wrap_dataset_variant_graph(input_handle_; name=nothing) local desc tf.with_op_name(name, "WrapDatasetVariant") do desc = tf.NodeDescription("WrapDatasetVariant") @@ -29220,7 +28084,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function parallel_interleave_dataset_v2_graph(input_dataset_, other_arguments_, cycle_length_, block_length_, num_parallel_calls_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, sloppy=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function parallel_interleave_dataset_v2_graph(input_dataset_, other_arguments_, cycle_length_, block_length_, num_parallel_calls_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, sloppy=nothing) local desc tf.with_op_name(name, "ParallelInterleaveDatasetV2") do desc = tf.NodeDescription("ParallelInterleaveDatasetV2") @@ -29302,7 +28166,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function depthwise_conv2d_native_backprop_input_graph(input_sizes_, filter_, out_backprop_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function depthwise_conv2d_native_backprop_input_graph(input_sizes_, filter_, out_backprop_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) local desc tf.with_op_name(name, "DepthwiseConv2dNativeBackpropInput") do desc = tf.NodeDescription("DepthwiseConv2dNativeBackpropInput") @@ -29373,7 +28237,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_apply_rms_prop_graph(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=nothing, use_locking=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function resource_apply_rms_prop_graph(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ResourceApplyRMSProp") do desc = tf.NodeDescription("ResourceApplyRMSProp") @@ -29443,13 +28307,61 @@ begin end +""" + experimental_lmdb_dataset(filenames) + + +""" +begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function experimental_lmdb_dataset_graph(filenames_; name=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "ExperimentalLMDBDataset") do + desc = tf.NodeDescription("ExperimentalLMDBDataset") + filenames_ = convert(Tensor{String}, filenames_) + tf.add_input(desc, filenames_) + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + tf.Tensor(tf.Operation(desc)) + end + function experimental_lmdb_dataset_eager(filenames_; name=nothing, output_types=nothing, output_shapes=nothing) + desc = tf.EagerOp("ExperimentalLMDBDataset") + filenames_ = convert(tf.EagerTensor, filenames_) + tf.add_input(desc, filenames_) + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + res = tf.execute(desc) + node = tf.TapeNode(experimental_lmdb_dataset, [filenames_], name=nothing, output_types=nothing, output_shapes=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + function experimental_lmdb_dataset(filenames_; name=nothing, output_types=nothing, output_shapes=nothing) + if tf.in_eager_mode() + experimental_lmdb_dataset_eager(filenames_; name=name, output_types=output_types, output_shapes=output_shapes) + else + experimental_lmdb_dataset_graph(filenames_; name=name, output_types=output_types, output_shapes=output_shapes) + end + end +end + + """ sparse_accumulator_take_gradient(handle, num_required) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_accumulator_take_gradient_graph(handle_, num_required_; name=nothing, dtype=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function sparse_accumulator_take_gradient_graph(handle_, num_required_; name=nothing, dtype=nothing) local desc tf.with_op_name(name, "SparseAccumulatorTakeGradient") do desc = tf.NodeDescription("SparseAccumulatorTakeGradient") @@ -29494,61 +28406,13 @@ begin end -""" - experimental_lmdb_dataset(filenames) - - -""" -begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_lmdb_dataset_graph(filenames_; name=nothing, output_types=nothing, output_shapes=nothing) - local desc - tf.with_op_name(name, "ExperimentalLMDBDataset") do - desc = tf.NodeDescription("ExperimentalLMDBDataset") - filenames_ = convert(Tensor{String}, filenames_) - tf.add_input(desc, filenames_) - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - end - tf.Tensor(tf.Operation(desc)) - end - function experimental_lmdb_dataset_eager(filenames_; name=nothing, output_types=nothing, output_shapes=nothing) - desc = tf.EagerOp("ExperimentalLMDBDataset") - filenames_ = convert(tf.EagerTensor, filenames_) - tf.add_input(desc, filenames_) - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - res = tf.execute(desc) - node = tf.TapeNode(experimental_lmdb_dataset, [filenames_], name=nothing, output_types=nothing, output_shapes=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - function experimental_lmdb_dataset(filenames_; name=nothing, output_types=nothing, output_shapes=nothing) - if tf.in_eager_mode() - experimental_lmdb_dataset_eager(filenames_; name=name, output_types=output_types, output_shapes=output_shapes) - else - experimental_lmdb_dataset_graph(filenames_; name=name, output_types=output_types, output_shapes=output_shapes) - end - end -end - - """ stack_close_v2(handle) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function stack_close_v2_graph(handle_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function stack_close_v2_graph(handle_; name=nothing) local desc tf.with_op_name(name, "StackCloseV2") do desc = tf.NodeDescription("StackCloseV2") @@ -29584,7 +28448,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function map_size_graph(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function map_size_graph(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "MapSize") do desc = tf.NodeDescription("MapSize") @@ -29646,7 +28510,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_apply_adagrad_da_graph(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, lr_, l1_, l2_, global_step_; name=nothing, use_locking=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function resource_apply_adagrad_da_graph(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, lr_, l1_, l2_, global_step_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ResourceApplyAdagradDA") do desc = tf.NodeDescription("ResourceApplyAdagradDA") @@ -29721,7 +28585,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_forest_tree_size_graph(tree_handle_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tensor_forest_tree_size_graph(tree_handle_; name=nothing) local desc tf.with_op_name(name, "TensorForestTreeSize") do desc = tf.NodeDescription("TensorForestTreeSize") @@ -29757,7 +28621,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function matrix_diag_part_graph(input_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function matrix_diag_part_graph(input_; name=nothing) local desc tf.with_op_name(name, "MatrixDiagPart") do desc = tf.NodeDescription("MatrixDiagPart") @@ -29795,7 +28659,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function reader_num_work_units_completed_v2_graph(reader_handle_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function reader_num_work_units_completed_v2_graph(reader_handle_; name=nothing) local desc tf.with_op_name(name, "ReaderNumWorkUnitsCompletedV2") do desc = tf.NodeDescription("ReaderNumWorkUnitsCompletedV2") @@ -29831,7 +28695,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_split_v3_graph(handle_, value_, lengths_, flow_in_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tensor_array_split_v3_graph(handle_, value_, lengths_, flow_in_; name=nothing) local desc tf.with_op_name(name, "TensorArraySplitV3") do desc = tf.NodeDescription("TensorArraySplitV3") @@ -29881,7 +28745,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_to_dense_graph(sparse_indices_, output_shape_, sparse_values_, default_value_; name=nothing, validate_indices=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function sparse_to_dense_graph(sparse_indices_, output_shape_, sparse_values_, default_value_; name=nothing, validate_indices=nothing) local desc tf.with_op_name(name, "SparseToDense") do desc = tf.NodeDescription("SparseToDense") @@ -29940,10 +28804,10 @@ end """ tpu_replicated_input(inputs) - +Operator that connects N unreplicated inputs to an N-way replicated TPU computation. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tpu_replicated_input_graph(inputs_; name=nothing, N=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tpu_replicated_input_graph(inputs_; name=nothing, N=nothing) local desc tf.with_op_name(name, "TPUReplicatedInput") do desc = tf.NodeDescription("TPUReplicatedInput") @@ -29987,7 +28851,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function stack_close_graph(handle_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function stack_close_graph(handle_; name=nothing) local desc tf.with_op_name(name, "StackClose") do desc = tf.NodeDescription("StackClose") @@ -30023,7 +28887,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function deserialize_many_sparse_graph(serialized_sparse_; name=nothing, dtype=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function deserialize_many_sparse_graph(serialized_sparse_; name=nothing, dtype=nothing) local desc tf.with_op_name(name, "DeserializeManySparse") do desc = tf.NodeDescription("DeserializeManySparse") @@ -30070,7 +28934,7 @@ end Replacement node for NcclReduce. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _nccl_reduce_recv_graph(input_; name=nothing, reduction=nothing, num_devices=nothing, shared_name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function _nccl_reduce_recv_graph(input_; name=nothing, reduction=nothing, num_devices=nothing, shared_name=nothing) local desc tf.with_op_name(name, "_NcclReduceRecv") do desc = tf.NodeDescription("_NcclReduceRecv") @@ -30126,7 +28990,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function mirror_pad_grad_graph(input_, paddings_; name=nothing, mode=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function mirror_pad_grad_graph(input_, paddings_; name=nothing, mode=nothing) local desc tf.with_op_name(name, "MirrorPadGrad") do desc = tf.NodeDescription("MirrorPadGrad") @@ -30176,7 +29040,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function broadcast_args_graph(s0_, s1_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function broadcast_args_graph(s0_, s1_; name=nothing) local desc tf.with_op_name(name, "BroadcastArgs") do desc = tf.NodeDescription("BroadcastArgs") @@ -30219,7 +29083,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function stateless_truncated_normal_graph(shape_, seed_; name=nothing, dtype=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function stateless_truncated_normal_graph(shape_, seed_; name=nothing, dtype=nothing) local desc tf.with_op_name(name, "StatelessTruncatedNormal") do desc = tf.NodeDescription("StatelessTruncatedNormal") @@ -30269,7 +29133,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function regex_full_match_graph(input_, pattern_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function regex_full_match_graph(input_, pattern_; name=nothing) local desc tf.with_op_name(name, "RegexFullMatch") do desc = tf.NodeDescription("RegexFullMatch") @@ -30309,7 +29173,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function unwrap_dataset_variant_graph(input_handle_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function unwrap_dataset_variant_graph(input_handle_; name=nothing) local desc tf.with_op_name(name, "UnwrapDatasetVariant") do desc = tf.NodeDescription("UnwrapDatasetVariant") @@ -30345,7 +29209,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function empty_graph(shape_; name=nothing, dtype=nothing, init=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function empty_graph(shape_; name=nothing, dtype=nothing, init=nothing) local desc tf.with_op_name(name, "Empty") do desc = tf.NodeDescription("Empty") @@ -30390,10 +29254,10 @@ end """ outfeed_dequeue_tuple(; device_ordinal=-1) - +Retrieve multiple values that will be emitted by the computation as an XLA """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function outfeed_dequeue_tuple_graph(; name=nothing, dtypes=nothing, shapes=nothing, device_ordinal=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function outfeed_dequeue_tuple_graph(; name=nothing, dtypes=nothing, shapes=nothing, device_ordinal=nothing) local desc tf.with_op_name(name, "OutfeedDequeueTuple") do desc = tf.NodeDescription("OutfeedDequeueTuple") @@ -30443,7 +29307,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function div_graph(x_, y_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function div_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "Div") do desc = tf.NodeDescription("Div") @@ -30486,7 +29350,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function barrier_graph(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function barrier_graph(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "Barrier") do desc = tf.NodeDescription("Barrier") @@ -30548,7 +29412,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function truncate_div_graph(x_, y_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function truncate_div_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "TruncateDiv") do desc = tf.NodeDescription("TruncateDiv") @@ -30591,7 +29455,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function unicode_encode_graph(input_values_, input_splits_; name=nothing, errors=nothing, output_encoding=nothing, replacement_char=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function unicode_encode_graph(input_values_, input_splits_; name=nothing, errors=nothing, output_encoding=nothing, replacement_char=nothing) local desc tf.with_op_name(name, "UnicodeEncode") do desc = tf.NodeDescription("UnicodeEncode") @@ -30649,7 +29513,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function merge_summary_graph(inputs_; name=nothing, N=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function merge_summary_graph(inputs_; name=nothing, N=nothing) local desc tf.with_op_name(name, "MergeSummary") do desc = tf.NodeDescription("MergeSummary") @@ -30691,7 +29555,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fake_queue_graph(resource_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function fake_queue_graph(resource_; name=nothing) local desc tf.with_op_name(name, "FakeQueue") do desc = tf.NodeDescription("FakeQueue") @@ -30727,7 +29591,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_cholesky_graph(input_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function batch_cholesky_graph(input_; name=nothing) local desc tf.with_op_name(name, "BatchCholesky") do desc = tf.NodeDescription("BatchCholesky") @@ -30765,7 +29629,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function iterator_graph(; name=nothing, shared_name=nothing, container=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function iterator_graph(; name=nothing, shared_name=nothing, container=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "Iterator") do desc = tf.NodeDescription("Iterator") @@ -30821,7 +29685,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function bessel_i1e_graph(x_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function bessel_i1e_graph(x_; name=nothing) local desc tf.with_op_name(name, "BesselI1e") do desc = tf.NodeDescription("BesselI1e") @@ -30859,7 +29723,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function import_event_graph(writer_, event_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function import_event_graph(writer_, event_; name=nothing) local desc tf.with_op_name(name, "ImportEvent") do desc = tf.NodeDescription("ImportEvent") @@ -30899,7 +29763,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantized_instance_norm_graph(x_, x_min_, x_max_; name=nothing, output_range_given=nothing, given_y_min=nothing, given_y_max=nothing, variance_epsilon=nothing, min_separation=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function quantized_instance_norm_graph(x_, x_min_, x_max_; name=nothing, output_range_given=nothing, given_y_min=nothing, given_y_max=nothing, variance_epsilon=nothing, min_separation=nothing) local desc tf.with_op_name(name, "QuantizedInstanceNorm") do desc = tf.NodeDescription("QuantizedInstanceNorm") @@ -30975,12 +29839,62 @@ end """ - load_tpu_embedding_adagrad_parameters(parameters, accumulators; table_id=-1, table_name=) + tensor_array_write_v3(handle, index, value, flow_in) + + +""" +begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tensor_array_write_v3_graph(handle_, index_, value_, flow_in_; name=nothing) + local desc + tf.with_op_name(name, "TensorArrayWriteV3") do + desc = tf.NodeDescription("TensorArrayWriteV3") + handle_ = convert(Tensor{Any}, handle_) + index_ = convert(Tensor{Int32}, index_) + value_ = convert(Tensor{Any}, value_) + flow_in_ = convert(Tensor{Float32}, flow_in_) + (value_,) = tf.tf_promote(value_) + tf.add_input(desc, handle_) + tf.add_input(desc, index_) + tf.add_input(desc, value_) + tf.add_input(desc, flow_in_) + end + tf.Tensor(tf.Operation(desc)) + end + function tensor_array_write_v3_eager(handle_, index_, value_, flow_in_; name=nothing) + desc = tf.EagerOp("TensorArrayWriteV3") + handle_ = convert(tf.EagerTensor, handle_) + index_ = convert(tf.EagerTensor, index_) + value_ = convert(tf.EagerTensor, value_) + flow_in_ = convert(tf.EagerTensor, flow_in_) + tf.add_input(desc, handle_) + tf.add_input(desc, index_) + tf.add_input(desc, value_) + tf.add_input(desc, flow_in_) + desc["T"] = tf.data_type(value_) + res = tf.execute(desc) + node = tf.TapeNode(tensor_array_write_v3, [handle_, index_, value_, flow_in_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + function tensor_array_write_v3(handle_, index_, value_, flow_in_; name=nothing) + if tf.in_eager_mode() + tensor_array_write_v3_eager(handle_, index_, value_, flow_in_; name=name) + else + tensor_array_write_v3_graph(handle_, index_, value_, flow_in_; name=name) + end + end +end + +""" + load_tpu_embedding_adagrad_parameters(parameters, accumulators; table_id=-1, table_name=) +Load embedding parameters for a single table. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function load_tpu_embedding_adagrad_parameters_graph(parameters_, accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function load_tpu_embedding_adagrad_parameters_graph(parameters_, accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "LoadTPUEmbeddingAdagradParameters") do desc = tf.NodeDescription("LoadTPUEmbeddingAdagradParameters") @@ -31038,63 +29952,13 @@ begin end -""" - tensor_array_write_v3(handle, index, value, flow_in) - - -""" -begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_write_v3_graph(handle_, index_, value_, flow_in_; name=nothing) - local desc - tf.with_op_name(name, "TensorArrayWriteV3") do - desc = tf.NodeDescription("TensorArrayWriteV3") - handle_ = convert(Tensor{Any}, handle_) - index_ = convert(Tensor{Int32}, index_) - value_ = convert(Tensor{Any}, value_) - flow_in_ = convert(Tensor{Float32}, flow_in_) - (value_,) = tf.tf_promote(value_) - tf.add_input(desc, handle_) - tf.add_input(desc, index_) - tf.add_input(desc, value_) - tf.add_input(desc, flow_in_) - end - tf.Tensor(tf.Operation(desc)) - end - function tensor_array_write_v3_eager(handle_, index_, value_, flow_in_; name=nothing) - desc = tf.EagerOp("TensorArrayWriteV3") - handle_ = convert(tf.EagerTensor, handle_) - index_ = convert(tf.EagerTensor, index_) - value_ = convert(tf.EagerTensor, value_) - flow_in_ = convert(tf.EagerTensor, flow_in_) - tf.add_input(desc, handle_) - tf.add_input(desc, index_) - tf.add_input(desc, value_) - tf.add_input(desc, flow_in_) - desc["T"] = tf.data_type(value_) - res = tf.execute(desc) - node = tf.TapeNode(tensor_array_write_v3, [handle_, index_, value_, flow_in_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - function tensor_array_write_v3(handle_, index_, value_, flow_in_; name=nothing) - if tf.in_eager_mode() - tensor_array_write_v3_eager(handle_, index_, value_, flow_in_; name=name) - else - tensor_array_write_v3_graph(handle_, index_, value_, flow_in_; name=name) - end - end -end - - """ dense_to_dense_set_operation(set1, set2; validate_indices=true) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function dense_to_dense_set_operation_graph(set1_, set2_; name=nothing, set_operation=nothing, validate_indices=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function dense_to_dense_set_operation_graph(set1_, set2_; name=nothing, set_operation=nothing, validate_indices=nothing) local desc tf.with_op_name(name, "DenseToDenseSetOperation") do desc = tf.NodeDescription("DenseToDenseSetOperation") @@ -31154,7 +30018,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function encode_jpeg_graph(image_; name=nothing, format=nothing, quality=nothing, progressive=nothing, optimize_size=nothing, chroma_downsampling=nothing, density_unit=nothing, x_density=nothing, y_density=nothing, xmp_metadata=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function encode_jpeg_graph(image_; name=nothing, format=nothing, quality=nothing, progressive=nothing, optimize_size=nothing, chroma_downsampling=nothing, density_unit=nothing, x_density=nothing, y_density=nothing, xmp_metadata=nothing) local desc tf.with_op_name(name, "EncodeJpeg") do desc = tf.NodeDescription("EncodeJpeg") @@ -31238,60 +30102,13 @@ begin end -""" - inplace_update(x, i, v) - - -""" -begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function inplace_update_graph(x_, i_, v_; name=nothing) - local desc - tf.with_op_name(name, "InplaceUpdate") do - desc = tf.NodeDescription("InplaceUpdate") - x_ = convert(Tensor{Any}, x_) - i_ = convert(Tensor{Int32}, i_) - v_ = convert(Tensor{Any}, v_) - (x_, v_) = tf.tf_promote(x_, v_) - tf.add_input(desc, x_) - tf.add_input(desc, i_) - tf.add_input(desc, v_) - end - tf.Tensor(tf.Operation(desc)) - end - function inplace_update_eager(x_, i_, v_; name=nothing) - desc = tf.EagerOp("InplaceUpdate") - x_ = convert(tf.EagerTensor, x_) - i_ = convert(tf.EagerTensor, i_) - v_ = convert(tf.EagerTensor, v_) - tf.add_input(desc, x_) - tf.add_input(desc, i_) - tf.add_input(desc, v_) - desc["T"] = tf.data_type(x_) - desc["T"] = tf.data_type(v_) - res = tf.execute(desc) - node = tf.TapeNode(inplace_update, [x_, i_, v_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - function inplace_update(x_, i_, v_; name=nothing) - if tf.in_eager_mode() - inplace_update_eager(x_, i_, v_; name=name) - else - inplace_update_graph(x_, i_, v_; name=name) - end - end -end - - """ fused_pad_conv2d(input, paddings, filter) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fused_pad_conv2d_graph(input_, paddings_, filter_; name=nothing, mode=nothing, strides=nothing, padding=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function fused_pad_conv2d_graph(input_, paddings_, filter_; name=nothing, mode=nothing, strides=nothing, padding=nothing) local desc tf.with_op_name(name, "FusedPadConv2D") do desc = tf.NodeDescription("FusedPadConv2D") @@ -31350,13 +30167,60 @@ begin end +""" + inplace_update(x, i, v) + + +""" +begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function inplace_update_graph(x_, i_, v_; name=nothing) + local desc + tf.with_op_name(name, "InplaceUpdate") do + desc = tf.NodeDescription("InplaceUpdate") + x_ = convert(Tensor{Any}, x_) + i_ = convert(Tensor{Int32}, i_) + v_ = convert(Tensor{Any}, v_) + (x_, v_) = tf.tf_promote(x_, v_) + tf.add_input(desc, x_) + tf.add_input(desc, i_) + tf.add_input(desc, v_) + end + tf.Tensor(tf.Operation(desc)) + end + function inplace_update_eager(x_, i_, v_; name=nothing) + desc = tf.EagerOp("InplaceUpdate") + x_ = convert(tf.EagerTensor, x_) + i_ = convert(tf.EagerTensor, i_) + v_ = convert(tf.EagerTensor, v_) + tf.add_input(desc, x_) + tf.add_input(desc, i_) + tf.add_input(desc, v_) + desc["T"] = tf.data_type(x_) + desc["T"] = tf.data_type(v_) + res = tf.execute(desc) + node = tf.TapeNode(inplace_update, [x_, i_, v_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + function inplace_update(x_, i_, v_; name=nothing) + if tf.in_eager_mode() + inplace_update_eager(x_, i_, v_; name=name) + else + inplace_update_graph(x_, i_, v_; name=name) + end + end +end + + """ quantized_relu(features, min_features, max_features; out_type=Float32) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantized_relu_graph(features_, min_features_, max_features_; name=nothing, out_type=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function quantized_relu_graph(features_, min_features_, max_features_; name=nothing, out_type=nothing) local desc tf.with_op_name(name, "QuantizedRelu") do desc = tf.NodeDescription("QuantizedRelu") @@ -31413,7 +30277,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function gather_nd_graph(params_, indices_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function gather_nd_graph(params_, indices_; name=nothing) local desc tf.with_op_name(name, "GatherNd") do desc = tf.NodeDescription("GatherNd") @@ -31458,7 +30322,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function placeholder_graph(; name=nothing, dtype=nothing, shape=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function placeholder_graph(; name=nothing, dtype=nothing, shape=nothing) local desc tf.with_op_name(name, "Placeholder") do desc = tf.NodeDescription("Placeholder") @@ -31502,7 +30366,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function filter_by_last_component_dataset_graph(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function filter_by_last_component_dataset_graph(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "FilterByLastComponentDataset") do desc = tf.NodeDescription("FilterByLastComponentDataset") @@ -31550,7 +30414,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function clip_by_value_graph(t_, clip_value_min_, clip_value_max_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function clip_by_value_graph(t_, clip_value_min_, clip_value_max_; name=nothing) local desc tf.with_op_name(name, "ClipByValue") do desc = tf.NodeDescription("ClipByValue") @@ -31598,7 +30462,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function image_summary_graph(tag_, tensor_; name=nothing, max_images=nothing, bad_color=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function image_summary_graph(tag_, tensor_; name=nothing, max_images=nothing, bad_color=nothing) local desc tf.with_op_name(name, "ImageSummary") do desc = tf.NodeDescription("ImageSummary") @@ -31649,10 +30513,10 @@ end """ retrieve_tpu_embedding_adadelta_parameters(; table_id=-1, table_name=) - +Retrieve embedding parameters for a single table. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function retrieve_tpu_embedding_adadelta_parameters_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function retrieve_tpu_embedding_adadelta_parameters_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "RetrieveTPUEmbeddingAdadeltaParameters") do desc = tf.NodeDescription("RetrieveTPUEmbeddingAdadeltaParameters") @@ -31713,7 +30577,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function string_join_graph(inputs_; name=nothing, N=nothing, separator=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function string_join_graph(inputs_; name=nothing, N=nothing, separator=nothing) local desc tf.with_op_name(name, "StringJoin") do desc = tf.NodeDescription("StringJoin") @@ -31761,7 +30625,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_scatter_nd_add_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function resource_scatter_nd_add_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ResourceScatterNdAdd") do desc = tf.NodeDescription("ResourceScatterNdAdd") @@ -31816,7 +30680,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function boosted_trees_quantile_stream_resource_deserialize_graph(quantile_stream_resource_handle_, bucket_boundaries_; name=nothing, num_streams=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function boosted_trees_quantile_stream_resource_deserialize_graph(quantile_stream_resource_handle_, bucket_boundaries_; name=nothing, num_streams=nothing) local desc tf.with_op_name(name, "BoostedTreesQuantileStreamResourceDeserialize") do desc = tf.NodeDescription("BoostedTreesQuantileStreamResourceDeserialize") @@ -31862,7 +30726,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function left_shift_graph(x_, y_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function left_shift_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "LeftShift") do desc = tf.NodeDescription("LeftShift") @@ -31899,78 +30763,13 @@ begin end -""" - requantize_per_channel(input, input_min, input_max, requested_output_min, requested_output_max; out_type=Float32) - - -""" -begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function requantize_per_channel_graph(input_, input_min_, input_max_, requested_output_min_, requested_output_max_; name=nothing, out_type=nothing) - local desc - tf.with_op_name(name, "RequantizePerChannel") do - desc = tf.NodeDescription("RequantizePerChannel") - input_ = convert(Tensor{Float32}, input_) - input_min_ = convert(Tensor{Float32}, input_min_) - input_max_ = convert(Tensor{Float32}, input_max_) - requested_output_min_ = convert(Tensor{Float32}, requested_output_min_) - requested_output_max_ = convert(Tensor{Float32}, requested_output_max_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - tf.add_input(desc, input_min_) - tf.add_input(desc, input_max_) - tf.add_input(desc, requested_output_min_) - tf.add_input(desc, requested_output_max_) - if out_type !== nothing - desc["out_type"] = Base.identity(out_type) - end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:3 - push!(out, tf.Tensor(op, out_idx)) - end - out - end - function requantize_per_channel_eager(input_, input_min_, input_max_, requested_output_min_, requested_output_max_; name=nothing, out_type=nothing) - desc = tf.EagerOp("RequantizePerChannel") - input_ = convert(tf.EagerTensor, input_) - input_min_ = convert(tf.EagerTensor, input_min_) - input_max_ = convert(tf.EagerTensor, input_max_) - requested_output_min_ = convert(tf.EagerTensor, requested_output_min_) - requested_output_max_ = convert(tf.EagerTensor, requested_output_max_) - tf.add_input(desc, input_) - tf.add_input(desc, input_min_) - tf.add_input(desc, input_max_) - tf.add_input(desc, requested_output_min_) - tf.add_input(desc, requested_output_max_) - if out_type !== nothing - desc["out_type"] = Base.identity(out_type) - end - desc["T"] = tf.data_type(input_) - res = tf.execute(desc) - node = tf.TapeNode(requantize_per_channel, [input_, input_min_, input_max_, requested_output_min_, requested_output_max_], name=nothing, out_type=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end - end - function requantize_per_channel(input_, input_min_, input_max_, requested_output_min_, requested_output_max_; name=nothing, out_type=nothing) - if tf.in_eager_mode() - requantize_per_channel_eager(input_, input_min_, input_max_, requested_output_min_, requested_output_max_; name=name, out_type=out_type) - else - requantize_per_channel_graph(input_, input_min_, input_max_, requested_output_min_, requested_output_max_; name=name, out_type=out_type) - end - end -end - - """ tensor_scatter_add(tensor, indices, updates) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_scatter_add_graph(tensor_, indices_, updates_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tensor_scatter_add_graph(tensor_, indices_, updates_; name=nothing) local desc tf.with_op_name(name, "TensorScatterAdd") do desc = tf.NodeDescription("TensorScatterAdd") @@ -32020,7 +30819,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _var_handles_op_graph(; name=nothing, containers=nothing, shared_names=nothing, N=nothing, dtypes=nothing, shapes=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function _var_handles_op_graph(; name=nothing, containers=nothing, shared_names=nothing, N=nothing, dtypes=nothing, shapes=nothing) local desc tf.with_op_name(name, "_VarHandlesOp") do desc = tf.NodeDescription("_VarHandlesOp") @@ -32087,7 +30886,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ifft3d_graph(input_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function ifft3d_graph(input_; name=nothing) local desc tf.with_op_name(name, "IFFT3D") do desc = tf.NodeDescription("IFFT3D") @@ -32119,64 +30918,13 @@ begin end -""" - euclidean_norm(input, reduction_indices; keep_dims=false) - - -""" -begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function euclidean_norm_graph(input_, reduction_indices_; name=nothing, keep_dims=nothing) - local desc - tf.with_op_name(name, "EuclideanNorm") do - desc = tf.NodeDescription("EuclideanNorm") - input_ = convert(Tensor{Any}, input_) - reduction_indices_ = convert(Tensor{Int32}, reduction_indices_) - reduction_indices_ = reduction_indices_ - convert(tf.Tensor{eltype(reduction_indices_)}, 1) - (input_,) = tf.tf_promote(input_) - (reduction_indices_,) = tf.tf_promote(reduction_indices_) - tf.add_input(desc, input_) - tf.add_input(desc, reduction_indices_) - if keep_dims !== nothing - desc["keep_dims"] = Base.Bool(keep_dims) - end - end - tf.Tensor(tf.Operation(desc)) - end - function euclidean_norm_eager(input_, reduction_indices_; name=nothing, keep_dims=nothing) - desc = tf.EagerOp("EuclideanNorm") - input_ = convert(tf.EagerTensor, input_) - reduction_indices_ = convert(tf.EagerTensor, reduction_indices_) - tf.add_input(desc, input_) - tf.add_input(desc, reduction_indices_) - if keep_dims !== nothing - desc["keep_dims"] = Base.Bool(keep_dims) - end - desc["T"] = tf.data_type(input_) - desc["Tidx"] = tf.data_type(reduction_indices_) - res = tf.execute(desc) - node = tf.TapeNode(euclidean_norm, [input_, reduction_indices_], name=nothing, keep_dims=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - function euclidean_norm(input_, reduction_indices_; name=nothing, keep_dims=nothing) - if tf.in_eager_mode() - euclidean_norm_eager(input_, reduction_indices_; name=name, keep_dims=keep_dims) - else - euclidean_norm_graph(input_, reduction_indices_; name=name, keep_dims=keep_dims) - end - end -end - - """ ref_select(index, inputs) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ref_select_graph(index_, inputs_; name=nothing, N=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function ref_select_graph(index_, inputs_; name=nothing, N=nothing) local desc tf.with_op_name(name, "RefSelect") do desc = tf.NodeDescription("RefSelect") @@ -32224,7 +30972,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_tensor_slice_dataset_graph(indices_, values_, dense_shape_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function sparse_tensor_slice_dataset_graph(indices_, values_, dense_shape_; name=nothing) local desc tf.with_op_name(name, "SparseTensorSliceDataset") do desc = tf.NodeDescription("SparseTensorSliceDataset") @@ -32267,10 +31015,10 @@ end """ retrieve_tpu_embedding_ftrl_parameters_grad_accum_debug(; table_id=-1, table_name=) - +Retrieve embedding parameters for a single table. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function retrieve_tpu_embedding_ftrl_parameters_grad_accum_debug_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function retrieve_tpu_embedding_ftrl_parameters_grad_accum_debug_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "RetrieveTPUEmbeddingFTRLParametersGradAccumDebug") do desc = tf.NodeDescription("RetrieveTPUEmbeddingFTRLParametersGradAccumDebug") @@ -32331,7 +31079,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_ifft2d_graph(input_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function batch_ifft2d_graph(input_; name=nothing) local desc tf.with_op_name(name, "BatchIFFT2D") do desc = tf.NodeDescription("BatchIFFT2D") @@ -32367,7 +31115,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_gather_graph(handle_, indices_, flow_in_; name=nothing, dtype=nothing, element_shape=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tensor_array_gather_graph(handle_, indices_, flow_in_; name=nothing, dtype=nothing, element_shape=nothing) local desc tf.with_op_name(name, "TensorArrayGather") do desc = tf.NodeDescription("TensorArrayGather") @@ -32423,7 +31171,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_segment_mean_with_num_segments_graph(data_, indices_, segment_ids_, num_segments_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function sparse_segment_mean_with_num_segments_graph(data_, indices_, segment_ids_, num_segments_; name=nothing) local desc tf.with_op_name(name, "SparseSegmentMeanWithNumSegments") do desc = tf.NodeDescription("SparseSegmentMeanWithNumSegments") @@ -32478,7 +31226,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ensure_shape_graph(input_; name=nothing, shape=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function ensure_shape_graph(input_; name=nothing, shape=nothing) local desc tf.with_op_name(name, "EnsureShape") do desc = tf.NodeDescription("EnsureShape") @@ -32522,7 +31270,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function apply_proximal_gradient_descent_graph(var_, alpha_, l1_, l2_, delta_; name=nothing, use_locking=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function apply_proximal_gradient_descent_graph(var_, alpha_, l1_, l2_, delta_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ApplyProximalGradientDescent") do desc = tf.NodeDescription("ApplyProximalGradientDescent") @@ -32581,12 +31329,12 @@ end """ - collective_reduce(input; wait_for=Int64[]) + collective_reduce(input) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function collective_reduce_graph(input_; name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, merge_op=nothing, final_op=nothing, subdiv_offsets=nothing, wait_for=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function collective_reduce_graph(input_; name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, merge_op=nothing, final_op=nothing, subdiv_offsets=nothing) local desc tf.with_op_name(name, "CollectiveReduce") do desc = tf.NodeDescription("CollectiveReduce") @@ -32611,13 +31359,10 @@ begin if subdiv_offsets !== nothing desc["subdiv_offsets"] = map(Base.identity, subdiv_offsets) end - if wait_for !== nothing - desc["wait_for"] = map(Base.identity, wait_for) - end end tf.Tensor(tf.Operation(desc)) end - function collective_reduce_eager(input_; name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, merge_op=nothing, final_op=nothing, subdiv_offsets=nothing, wait_for=nothing) + function collective_reduce_eager(input_; name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, merge_op=nothing, final_op=nothing, subdiv_offsets=nothing) desc = tf.EagerOp("CollectiveReduce") input_ = convert(tf.EagerTensor, input_) tf.add_input(desc, input_) @@ -32639,22 +31384,19 @@ begin if subdiv_offsets !== nothing desc["subdiv_offsets"] = map(Base.identity, subdiv_offsets) end - if wait_for !== nothing - desc["wait_for"] = map(Base.identity, wait_for) - end desc["T"] = tf.data_type(input_) res = tf.execute(desc) - node = tf.TapeNode(collective_reduce, [input_], name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, merge_op=nothing, final_op=nothing, subdiv_offsets=nothing, wait_for=nothing, res) + node = tf.TapeNode(collective_reduce, [input_], name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, merge_op=nothing, final_op=nothing, subdiv_offsets=nothing, res) if length(res) >= 1 tf.add_node(res[1], node) return res[1] end end - function collective_reduce(input_; name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, merge_op=nothing, final_op=nothing, subdiv_offsets=nothing, wait_for=nothing) + function collective_reduce(input_; name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, merge_op=nothing, final_op=nothing, subdiv_offsets=nothing) if tf.in_eager_mode() - collective_reduce_eager(input_; name=name, group_size=group_size, group_key=group_key, instance_key=instance_key, merge_op=merge_op, final_op=final_op, subdiv_offsets=subdiv_offsets, wait_for=wait_for) + collective_reduce_eager(input_; name=name, group_size=group_size, group_key=group_key, instance_key=instance_key, merge_op=merge_op, final_op=final_op, subdiv_offsets=subdiv_offsets) else - collective_reduce_graph(input_; name=name, group_size=group_size, group_key=group_key, instance_key=instance_key, merge_op=merge_op, final_op=final_op, subdiv_offsets=subdiv_offsets, wait_for=wait_for) + collective_reduce_graph(input_; name=name, group_size=group_size, group_key=group_key, instance_key=instance_key, merge_op=merge_op, final_op=final_op, subdiv_offsets=subdiv_offsets) end end end @@ -32666,7 +31408,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function is_nan_graph(x_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function is_nan_graph(x_; name=nothing) local desc tf.with_op_name(name, "IsNan") do desc = tf.NodeDescription("IsNan") @@ -32704,7 +31446,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function apply_ada_max_graph(var_, m_, v_, beta1_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function apply_ada_max_graph(var_, m_, v_, beta1_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ApplyAdaMax") do desc = tf.NodeDescription("ApplyAdaMax") @@ -32788,7 +31530,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function decode_and_crop_jpeg_graph(contents_, crop_window_; name=nothing, channels=nothing, ratio=nothing, fancy_upscaling=nothing, try_recover_truncated=nothing, acceptable_fraction=nothing, dct_method=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function decode_and_crop_jpeg_graph(contents_, crop_window_; name=nothing, channels=nothing, ratio=nothing, fancy_upscaling=nothing, try_recover_truncated=nothing, acceptable_fraction=nothing, dct_method=nothing) local desc tf.with_op_name(name, "DecodeAndCropJpeg") do desc = tf.NodeDescription("DecodeAndCropJpeg") @@ -32864,7 +31606,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function apply_centered_rms_prop_graph(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=nothing, use_locking=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function apply_centered_rms_prop_graph(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ApplyCenteredRMSProp") do desc = tf.NodeDescription("ApplyCenteredRMSProp") @@ -32948,7 +31690,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function conv3d_backprop_filter_v2_graph(input_, filter_sizes_, out_backprop_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function conv3d_backprop_filter_v2_graph(input_, filter_sizes_, out_backprop_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) local desc tf.with_op_name(name, "Conv3DBackpropFilterV2") do desc = tf.NodeDescription("Conv3DBackpropFilterV2") @@ -33019,7 +31761,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function matrix_triangular_solve_graph(matrix_, rhs_; name=nothing, lower=nothing, adjoint=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function matrix_triangular_solve_graph(matrix_, rhs_; name=nothing, lower=nothing, adjoint=nothing) local desc tf.with_op_name(name, "MatrixTriangularSolve") do desc = tf.NodeDescription("MatrixTriangularSolve") @@ -33074,7 +31816,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function reader_num_work_units_completed_graph(reader_handle_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function reader_num_work_units_completed_graph(reader_handle_; name=nothing) local desc tf.with_op_name(name, "ReaderNumWorkUnitsCompleted") do desc = tf.NodeDescription("ReaderNumWorkUnitsCompleted") @@ -33110,7 +31852,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function write_audio_summary_graph(writer_, step_, tag_, tensor_, sample_rate_; name=nothing, max_outputs=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function write_audio_summary_graph(writer_, step_, tag_, tensor_, sample_rate_; name=nothing, max_outputs=nothing) local desc tf.with_op_name(name, "WriteAudioSummary") do desc = tf.NodeDescription("WriteAudioSummary") @@ -33168,7 +31910,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sharded_filespec_graph(basename_, num_shards_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function sharded_filespec_graph(basename_, num_shards_; name=nothing) local desc tf.with_op_name(name, "ShardedFilespec") do desc = tf.NodeDescription("ShardedFilespec") @@ -33208,7 +31950,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function div_no_nan_graph(x_, y_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function div_no_nan_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "DivNoNan") do desc = tf.NodeDescription("DivNoNan") @@ -33251,7 +31993,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_accumulator_apply_gradient_graph(handle_, local_step_, gradient_indices_, gradient_values_, gradient_shape_; name=nothing, dtype=nothing, has_known_shape=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function sparse_accumulator_apply_gradient_graph(handle_, local_step_, gradient_indices_, gradient_values_, gradient_shape_; name=nothing, dtype=nothing, has_known_shape=nothing) local desc tf.with_op_name(name, "SparseAccumulatorApplyGradient") do desc = tf.NodeDescription("SparseAccumulatorApplyGradient") @@ -33317,7 +32059,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ragged_tensor_to_sparse_graph(rt_nested_splits_, rt_dense_values_; name=nothing, RAGGED_RANK=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function ragged_tensor_to_sparse_graph(rt_nested_splits_, rt_dense_values_; name=nothing, RAGGED_RANK=nothing) local desc tf.with_op_name(name, "RaggedTensorToSparse") do desc = tf.NodeDescription("RaggedTensorToSparse") @@ -33370,7 +32112,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function extract_volume_patches_graph(input_; name=nothing, ksizes=nothing, strides=nothing, padding=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function extract_volume_patches_graph(input_; name=nothing, ksizes=nothing, strides=nothing, padding=nothing) local desc tf.with_op_name(name, "ExtractVolumePatches") do desc = tf.NodeDescription("ExtractVolumePatches") @@ -33426,7 +32168,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function barrier_insert_many_graph(handle_, keys_, values_; name=nothing, component_index=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function barrier_insert_many_graph(handle_, keys_, values_; name=nothing, component_index=nothing) local desc tf.with_op_name(name, "BarrierInsertMany") do desc = tf.NodeDescription("BarrierInsertMany") @@ -33484,7 +32226,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function const__graph(; name=nothing, value=nothing, dtype=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function const__graph(; name=nothing, value=nothing, dtype=nothing) local desc tf.with_op_name(name, "Const") do desc = tf.NodeDescription("Const") @@ -33528,7 +32270,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function space_to_batch_graph(input_, paddings_; name=nothing, block_size=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function space_to_batch_graph(input_, paddings_; name=nothing, block_size=nothing) local desc tf.with_op_name(name, "SpaceToBatch") do desc = tf.NodeDescription("SpaceToBatch") @@ -33578,7 +32320,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function stage_size_graph(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function stage_size_graph(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "StageSize") do desc = tf.NodeDescription("StageSize") @@ -33640,7 +32382,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function empty_tensor_list_graph(element_shape_, max_num_elements_; name=nothing, element_dtype=nothing, shape_type=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function empty_tensor_list_graph(element_shape_, max_num_elements_; name=nothing, element_dtype=nothing, shape_type=nothing) local desc tf.with_op_name(name, "EmptyTensorList") do desc = tf.NodeDescription("EmptyTensorList") @@ -33688,110 +32430,13 @@ begin end -""" - quantized_conv2d_and_requantize(input, filter, min_input, max_input, min_filter, max_filter, min_freezed_output, max_freezed_output; out_type=Float32, dilations=[1, 1, 1, 1]) - - -""" -begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantized_conv2d_and_requantize_graph(input_, filter_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) - local desc - tf.with_op_name(name, "QuantizedConv2DAndRequantize") do - desc = tf.NodeDescription("QuantizedConv2DAndRequantize") - input_ = convert(Tensor{Any}, input_) - filter_ = convert(Tensor{Any}, filter_) - min_input_ = convert(Tensor{Float32}, min_input_) - max_input_ = convert(Tensor{Float32}, max_input_) - min_filter_ = convert(Tensor{Float32}, min_filter_) - max_filter_ = convert(Tensor{Float32}, max_filter_) - min_freezed_output_ = convert(Tensor{Float32}, min_freezed_output_) - max_freezed_output_ = convert(Tensor{Float32}, max_freezed_output_) - (filter_,) = tf.tf_promote(filter_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - tf.add_input(desc, filter_) - tf.add_input(desc, min_input_) - tf.add_input(desc, max_input_) - tf.add_input(desc, min_filter_) - tf.add_input(desc, max_filter_) - tf.add_input(desc, min_freezed_output_) - tf.add_input(desc, max_freezed_output_) - if out_type !== nothing - desc["out_type"] = Base.identity(out_type) - end - if strides !== nothing - desc["strides"] = map(Base.identity, strides) - end - if padding !== nothing - desc["padding"] = Base.String(padding) - end - if dilations !== nothing - desc["dilations"] = map(Base.identity, dilations) - end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:3 - push!(out, tf.Tensor(op, out_idx)) - end - out - end - function quantized_conv2d_and_requantize_eager(input_, filter_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) - desc = tf.EagerOp("QuantizedConv2DAndRequantize") - input_ = convert(tf.EagerTensor, input_) - filter_ = convert(tf.EagerTensor, filter_) - min_input_ = convert(tf.EagerTensor, min_input_) - max_input_ = convert(tf.EagerTensor, max_input_) - min_filter_ = convert(tf.EagerTensor, min_filter_) - max_filter_ = convert(tf.EagerTensor, max_filter_) - min_freezed_output_ = convert(tf.EagerTensor, min_freezed_output_) - max_freezed_output_ = convert(tf.EagerTensor, max_freezed_output_) - tf.add_input(desc, input_) - tf.add_input(desc, filter_) - tf.add_input(desc, min_input_) - tf.add_input(desc, max_input_) - tf.add_input(desc, min_filter_) - tf.add_input(desc, max_filter_) - tf.add_input(desc, min_freezed_output_) - tf.add_input(desc, max_freezed_output_) - if out_type !== nothing - desc["out_type"] = Base.identity(out_type) - end - if strides !== nothing - desc["strides"] = map(Base.identity, strides) - end - if padding !== nothing - desc["padding"] = Base.String(padding) - end - if dilations !== nothing - desc["dilations"] = map(Base.identity, dilations) - end - desc["Tinput"] = tf.data_type(input_) - desc["Tfilter"] = tf.data_type(filter_) - res = tf.execute(desc) - node = tf.TapeNode(quantized_conv2d_and_requantize, [input_, filter_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_], name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end - end - function quantized_conv2d_and_requantize(input_, filter_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) - if tf.in_eager_mode() - quantized_conv2d_and_requantize_eager(input_, filter_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_; name=name, out_type=out_type, strides=strides, padding=padding, dilations=dilations) - else - quantized_conv2d_and_requantize_graph(input_, filter_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_; name=name, out_type=out_type, strides=strides, padding=padding, dilations=dilations) - end - end -end - - """ lu(input; output_idx_type=Int32) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function lu_graph(input_; name=nothing, output_idx_type=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function lu_graph(input_; name=nothing, output_idx_type=nothing) local desc tf.with_op_name(name, "Lu") do desc = tf.NodeDescription("Lu") @@ -33840,7 +32485,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function decode_compressed_graph(bytes_; name=nothing, compression_type=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function decode_compressed_graph(bytes_; name=nothing, compression_type=nothing) local desc tf.with_op_name(name, "DecodeCompressed") do desc = tf.NodeDescription("DecodeCompressed") @@ -33882,7 +32527,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function get_session_tensor_graph(handle_; name=nothing, dtype=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function get_session_tensor_graph(handle_; name=nothing, dtype=nothing) local desc tf.with_op_name(name, "GetSessionTensor") do desc = tf.NodeDescription("GetSessionTensor") @@ -33924,7 +32569,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_gather_v3_graph(handle_, indices_, flow_in_; name=nothing, dtype=nothing, element_shape=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tensor_array_gather_v3_graph(handle_, indices_, flow_in_; name=nothing, dtype=nothing, element_shape=nothing) local desc tf.with_op_name(name, "TensorArrayGatherV3") do desc = tf.NodeDescription("TensorArrayGatherV3") @@ -33975,12 +32620,54 @@ end """ - load_tpu_embedding_ftrl_parameters_grad_accum_debug(parameters, accumulators, linears, gradient_accumulators; table_id=-1, table_name=) + destroy_resource_op(resource; ignore_lookup_error=true) + + +""" +begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function destroy_resource_op_graph(resource_; name=nothing, ignore_lookup_error=nothing) + local desc + tf.with_op_name(name, "DestroyResourceOp") do + desc = tf.NodeDescription("DestroyResourceOp") + resource_ = convert(Tensor{Any}, resource_) + tf.add_input(desc, resource_) + if ignore_lookup_error !== nothing + desc["ignore_lookup_error"] = Base.Bool(ignore_lookup_error) + end + end + tf.Tensor(tf.Operation(desc)) + end + function destroy_resource_op_eager(resource_; name=nothing, ignore_lookup_error=nothing) + desc = tf.EagerOp("DestroyResourceOp") + resource_ = convert(tf.EagerTensor, resource_) + tf.add_input(desc, resource_) + if ignore_lookup_error !== nothing + desc["ignore_lookup_error"] = Base.Bool(ignore_lookup_error) + end + res = tf.execute(desc) + node = tf.TapeNode(destroy_resource_op, [resource_], name=nothing, ignore_lookup_error=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + function destroy_resource_op(resource_; name=nothing, ignore_lookup_error=nothing) + if tf.in_eager_mode() + destroy_resource_op_eager(resource_; name=name, ignore_lookup_error=ignore_lookup_error) + else + destroy_resource_op_graph(resource_; name=name, ignore_lookup_error=ignore_lookup_error) + end + end +end +""" + load_tpu_embedding_ftrl_parameters_grad_accum_debug(parameters, accumulators, linears, gradient_accumulators; table_id=-1, table_name=) + +Load embedding parameters for a single table. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function load_tpu_embedding_ftrl_parameters_grad_accum_debug_graph(parameters_, accumulators_, linears_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function load_tpu_embedding_ftrl_parameters_grad_accum_debug_graph(parameters_, accumulators_, linears_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "LoadTPUEmbeddingFTRLParametersGradAccumDebug") do desc = tf.NodeDescription("LoadTPUEmbeddingFTRLParametersGradAccumDebug") @@ -34046,55 +32733,13 @@ begin end -""" - destroy_resource_op(resource; ignore_lookup_error=true) - - -""" -begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function destroy_resource_op_graph(resource_; name=nothing, ignore_lookup_error=nothing) - local desc - tf.with_op_name(name, "DestroyResourceOp") do - desc = tf.NodeDescription("DestroyResourceOp") - resource_ = convert(Tensor{Any}, resource_) - tf.add_input(desc, resource_) - if ignore_lookup_error !== nothing - desc["ignore_lookup_error"] = Base.Bool(ignore_lookup_error) - end - end - tf.Tensor(tf.Operation(desc)) - end - function destroy_resource_op_eager(resource_; name=nothing, ignore_lookup_error=nothing) - desc = tf.EagerOp("DestroyResourceOp") - resource_ = convert(tf.EagerTensor, resource_) - tf.add_input(desc, resource_) - if ignore_lookup_error !== nothing - desc["ignore_lookup_error"] = Base.Bool(ignore_lookup_error) - end - res = tf.execute(desc) - node = tf.TapeNode(destroy_resource_op, [resource_], name=nothing, ignore_lookup_error=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - function destroy_resource_op(resource_; name=nothing, ignore_lookup_error=nothing) - if tf.in_eager_mode() - destroy_resource_op_eager(resource_; name=name, ignore_lookup_error=ignore_lookup_error) - else - destroy_resource_op_graph(resource_; name=name, ignore_lookup_error=ignore_lookup_error) - end - end -end - - """ text_line_reader(; skip_header_lines=0, container=, shared_name=) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function text_line_reader_graph(; name=nothing, skip_header_lines=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function text_line_reader_graph(; name=nothing, skip_header_lines=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "TextLineReader") do desc = tf.NodeDescription("TextLineReader") @@ -34144,7 +32789,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function create_summary_db_writer_graph(writer_, db_uri_, experiment_name_, run_name_, user_name_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function create_summary_db_writer_graph(writer_, db_uri_, experiment_name_, run_name_, user_name_; name=nothing) local desc tf.with_op_name(name, "CreateSummaryDbWriter") do desc = tf.NodeDescription("CreateSummaryDbWriter") @@ -34196,7 +32841,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tanh_grad_graph(y_, dy_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tanh_grad_graph(y_, dy_; name=nothing) local desc tf.with_op_name(name, "TanhGrad") do desc = tf.NodeDescription("TanhGrad") @@ -34239,7 +32884,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function decode_base64_graph(input_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function decode_base64_graph(input_; name=nothing) local desc tf.with_op_name(name, "DecodeBase64") do desc = tf.NodeDescription("DecodeBase64") @@ -34275,7 +32920,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function max_pool_grad_grad_v2_graph(orig_input_, orig_output_, grad_, ksize_, strides_; name=nothing, padding=nothing, data_format=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function max_pool_grad_grad_v2_graph(orig_input_, orig_output_, grad_, ksize_, strides_; name=nothing, padding=nothing, data_format=nothing) local desc tf.with_op_name(name, "MaxPoolGradGradV2") do desc = tf.NodeDescription("MaxPoolGradGradV2") @@ -34343,7 +32988,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function audio_summary_v2_graph(tag_, tensor_, sample_rate_; name=nothing, max_outputs=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function audio_summary_v2_graph(tag_, tensor_, sample_rate_; name=nothing, max_outputs=nothing) local desc tf.with_op_name(name, "AudioSummaryV2") do desc = tf.NodeDescription("AudioSummaryV2") @@ -34393,7 +33038,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function stateful_partitioned_call_graph(args_; name=nothing, Tin=nothing, Tout=nothing, f=nothing, config=nothing, config_proto=nothing, executor_type=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function stateful_partitioned_call_graph(args_; name=nothing, Tin=nothing, Tout=nothing, f=nothing, config=nothing, config_proto=nothing, executor_type=nothing) local desc tf.with_op_name(name, "StatefulPartitionedCall") do desc = tf.NodeDescription("StatefulPartitionedCall") @@ -34465,7 +33110,7 @@ end Acts like a Concat Op that merges multple tensors into one, however it must """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _scoped_allocator_concat_graph(backing_, inputs_; name=nothing, shape=nothing, reshape=nothing, sa_name=nothing, id=nothing, N=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function _scoped_allocator_concat_graph(backing_, inputs_; name=nothing, shape=nothing, reshape=nothing, sa_name=nothing, id=nothing, N=nothing) local desc tf.with_op_name(name, "_ScopedAllocatorConcat") do desc = tf.NodeDescription("_ScopedAllocatorConcat") @@ -34538,7 +33183,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fake_quant_with_min_max_args_gradient_graph(gradients_, inputs_; name=nothing, min=nothing, max=nothing, num_bits=nothing, narrow_range=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function fake_quant_with_min_max_args_gradient_graph(gradients_, inputs_; name=nothing, min=nothing, max=nothing, num_bits=nothing, narrow_range=nothing) local desc tf.with_op_name(name, "FakeQuantWithMinMaxArgsGradient") do desc = tf.NodeDescription("FakeQuantWithMinMaxArgsGradient") @@ -34602,7 +33247,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_svd_graph(input_; name=nothing, compute_uv=nothing, full_matrices=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function batch_svd_graph(input_; name=nothing, compute_uv=nothing, full_matrices=nothing) local desc tf.with_op_name(name, "BatchSvd") do desc = tf.NodeDescription("BatchSvd") @@ -34657,7 +33302,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function map_stage_graph(key_, indices_, values_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, fake_dtypes=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function map_stage_graph(key_, indices_, values_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, fake_dtypes=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "MapStage") do desc = tf.NodeDescription("MapStage") @@ -34737,7 +33382,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_sparse_apply_ftrl_graph(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, lr_power_; name=nothing, use_locking=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function resource_sparse_apply_ftrl_graph(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, lr_power_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ResourceSparseApplyFtrl") do desc = tf.NodeDescription("ResourceSparseApplyFtrl") @@ -34820,7 +33465,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resize_nearest_neighbor_graph(images_, size_; name=nothing, align_corners=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function resize_nearest_neighbor_graph(images_, size_; name=nothing, align_corners=nothing) local desc tf.with_op_name(name, "ResizeNearestNeighbor") do desc = tf.NodeDescription("ResizeNearestNeighbor") @@ -34868,7 +33513,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_csv_dataset_graph(filenames_, compression_type_, buffer_size_, header_, field_delim_, use_quote_delim_, na_value_, select_cols_, record_defaults_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function experimental_csv_dataset_graph(filenames_, compression_type_, buffer_size_, header_, field_delim_, use_quote_delim_, na_value_, select_cols_, record_defaults_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "ExperimentalCSVDataset") do desc = tf.NodeDescription("ExperimentalCSVDataset") @@ -34948,7 +33593,7 @@ end Returns x * y element-wise. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _mkl_mul_graph(x_, y_, mkl_x_, mkl_y_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function _mkl_mul_graph(x_, y_, mkl_x_, mkl_y_; name=nothing) local desc tf.with_op_name(name, "_MklMul") do desc = tf.NodeDescription("_MklMul") @@ -35004,7 +33649,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_matrix_diag_graph(diagonal_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function batch_matrix_diag_graph(diagonal_; name=nothing) local desc tf.with_op_name(name, "BatchMatrixDiag") do desc = tf.NodeDescription("BatchMatrixDiag") @@ -35042,7 +33687,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function is_inf_graph(x_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function is_inf_graph(x_; name=nothing) local desc tf.with_op_name(name, "IsInf") do desc = tf.NodeDescription("IsInf") @@ -35080,7 +33725,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fixed_unigram_candidate_sampler_graph(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, vocab_file=nothing, distortion=nothing, num_reserved_ids=nothing, num_shards=nothing, shard=nothing, unigrams=nothing, seed=nothing, seed2=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function fixed_unigram_candidate_sampler_graph(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, vocab_file=nothing, distortion=nothing, num_reserved_ids=nothing, num_shards=nothing, shard=nothing, unigrams=nothing, seed=nothing, seed2=nothing) local desc tf.with_op_name(name, "FixedUnigramCandidateSampler") do desc = tf.NodeDescription("FixedUnigramCandidateSampler") @@ -35187,13 +33832,58 @@ begin end +""" + unravel_index(indices, dims) + + +""" +begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function unravel_index_graph(indices_, dims_; name=nothing) + local desc + tf.with_op_name(name, "UnravelIndex") do + desc = tf.NodeDescription("UnravelIndex") + indices_ = convert(Tensor{Int32}, indices_) + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + dims_ = convert(Tensor{Int32}, dims_) + dims_ = dims_ - convert(tf.Tensor{eltype(dims_)}, 1) + (indices_, dims_) = tf.tf_promote(indices_, dims_) + tf.add_input(desc, indices_) + tf.add_input(desc, dims_) + end + tf.Tensor(tf.Operation(desc)) + end + function unravel_index_eager(indices_, dims_; name=nothing) + desc = tf.EagerOp("UnravelIndex") + indices_ = convert(tf.EagerTensor, indices_) + dims_ = convert(tf.EagerTensor, dims_) + tf.add_input(desc, indices_) + tf.add_input(desc, dims_) + desc["Tidx"] = tf.data_type(indices_) + desc["Tidx"] = tf.data_type(dims_) + res = tf.execute(desc) + node = tf.TapeNode(unravel_index, [indices_, dims_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + function unravel_index(indices_, dims_; name=nothing) + if tf.in_eager_mode() + unravel_index_eager(indices_, dims_; name=name) + else + unravel_index_graph(indices_, dims_; name=name) + end + end +end + + """ sparse_apply_ftrl_v2(var, accum, linear, grad, indices, lr, l1, l2, l2_shrinkage, lr_power; use_locking=false) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_apply_ftrl_v2_graph(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=nothing, use_locking=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function sparse_apply_ftrl_v2_graph(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "SparseApplyFtrlV2") do desc = tf.NodeDescription("SparseApplyFtrlV2") @@ -35278,58 +33968,13 @@ begin end -""" - unravel_index(indices, dims) - - -""" -begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function unravel_index_graph(indices_, dims_; name=nothing) - local desc - tf.with_op_name(name, "UnravelIndex") do - desc = tf.NodeDescription("UnravelIndex") - indices_ = convert(Tensor{Int32}, indices_) - indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) - dims_ = convert(Tensor{Int32}, dims_) - dims_ = dims_ - convert(tf.Tensor{eltype(dims_)}, 1) - (indices_, dims_) = tf.tf_promote(indices_, dims_) - tf.add_input(desc, indices_) - tf.add_input(desc, dims_) - end - tf.Tensor(tf.Operation(desc)) - end - function unravel_index_eager(indices_, dims_; name=nothing) - desc = tf.EagerOp("UnravelIndex") - indices_ = convert(tf.EagerTensor, indices_) - dims_ = convert(tf.EagerTensor, dims_) - tf.add_input(desc, indices_) - tf.add_input(desc, dims_) - desc["Tidx"] = tf.data_type(indices_) - desc["Tidx"] = tf.data_type(dims_) - res = tf.execute(desc) - node = tf.TapeNode(unravel_index, [indices_, dims_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - function unravel_index(indices_, dims_; name=nothing) - if tf.in_eager_mode() - unravel_index_eager(indices_, dims_; name=name) - else - unravel_index_graph(indices_, dims_; name=name) - end - end -end - - """ max(input, reduction_indices; keep_dims=false) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function max_graph(input_, reduction_indices_; name=nothing, keep_dims=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function max_graph(input_, reduction_indices_; name=nothing, keep_dims=nothing) local desc tf.with_op_name(name, "Max") do desc = tf.NodeDescription("Max") @@ -35380,7 +34025,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ifft2d_graph(input_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function ifft2d_graph(input_; name=nothing) local desc tf.with_op_name(name, "IFFT2D") do desc = tf.NodeDescription("IFFT2D") @@ -35418,7 +34063,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_concat_graph(indices_, values_, shapes_; name=nothing, concat_dim=nothing, N=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function sparse_concat_graph(indices_, values_, shapes_; name=nothing, concat_dim=nothing, N=nothing) local desc tf.with_op_name(name, "SparseConcat") do desc = tf.NodeDescription("SparseConcat") @@ -35487,7 +34132,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function histogram_summary_graph(tag_, values_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function histogram_summary_graph(tag_, values_; name=nothing) local desc tf.with_op_name(name, "HistogramSummary") do desc = tf.NodeDescription("HistogramSummary") @@ -35529,7 +34174,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function segment_sum_graph(data_, segment_ids_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function segment_sum_graph(data_, segment_ids_; name=nothing) local desc tf.with_op_name(name, "SegmentSum") do desc = tf.NodeDescription("SegmentSum") @@ -35574,7 +34219,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function exp_graph(x_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function exp_graph(x_; name=nothing) local desc tf.with_op_name(name, "Exp") do desc = tf.NodeDescription("Exp") @@ -35609,10 +34254,10 @@ end """ configure_distributed_tpu(; embedding_config=, tpu_embedding_config=, is_global_init=false) - +An op that sets up the centralized structures for a distributed TPU """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function configure_distributed_tpu_graph(; name=nothing, embedding_config=nothing, tpu_embedding_config=nothing, is_global_init=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function configure_distributed_tpu_graph(; name=nothing, embedding_config=nothing, tpu_embedding_config=nothing, is_global_init=nothing) local desc tf.with_op_name(name, "ConfigureDistributedTPU") do desc = tf.NodeDescription("ConfigureDistributedTPU") @@ -35656,68 +34301,13 @@ begin end -""" - resource_scatter_nd_sub(ref, indices, updates; use_locking=true) - - -""" -begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_scatter_nd_sub_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) - local desc - tf.with_op_name(name, "ResourceScatterNdSub") do - desc = tf.NodeDescription("ResourceScatterNdSub") - ref_ = convert(Tensor{Any}, ref_) - indices_ = convert(Tensor{Any}, indices_) - indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) - updates_ = convert(Tensor{Any}, updates_) - (updates_,) = tf.tf_promote(updates_) - (indices_,) = tf.tf_promote(indices_) - tf.add_input(desc, ref_) - tf.add_input(desc, indices_) - tf.add_input(desc, updates_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - end - tf.Tensor(tf.Operation(desc)) - end - function resource_scatter_nd_sub_eager(ref_, indices_, updates_; name=nothing, use_locking=nothing) - desc = tf.EagerOp("ResourceScatterNdSub") - ref_ = convert(tf.EagerTensor, ref_) - indices_ = convert(tf.EagerTensor, indices_) - updates_ = convert(tf.EagerTensor, updates_) - tf.add_input(desc, ref_) - tf.add_input(desc, indices_) - tf.add_input(desc, updates_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - desc["Tindices"] = tf.data_type(indices_) - desc["T"] = tf.data_type(updates_) - res = tf.execute(desc) - node = tf.TapeNode(resource_scatter_nd_sub, [ref_, indices_, updates_], name=nothing, use_locking=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - function resource_scatter_nd_sub(ref_, indices_, updates_; name=nothing, use_locking=nothing) - if tf.in_eager_mode() - resource_scatter_nd_sub_eager(ref_, indices_, updates_; name=name, use_locking=use_locking) - else - resource_scatter_nd_sub_graph(ref_, indices_, updates_; name=name, use_locking=use_locking) - end - end -end - - """ _xla_send_from_host(inputs, dynamic_key) A placeholder op for multiple values that will be sent from TensorFlow to a """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _xla_send_from_host_graph(inputs_, dynamic_key_; name=nothing, Tinputs=nothing, key=nothing, device_ordinal=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function _xla_send_from_host_graph(inputs_, dynamic_key_; name=nothing, Tinputs=nothing, key=nothing, device_ordinal=nothing) local desc tf.with_op_name(name, "_XlaSendFromHost") do desc = tf.NodeDescription("_XlaSendFromHost") @@ -35775,7 +34365,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function get_session_handle_v2_graph(value_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function get_session_handle_v2_graph(value_; name=nothing) local desc tf.with_op_name(name, "GetSessionHandleV2") do desc = tf.NodeDescription("GetSessionHandleV2") @@ -35813,7 +34403,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function relu_grad_graph(gradients_, features_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function relu_grad_graph(gradients_, features_; name=nothing) local desc tf.with_op_name(name, "ReluGrad") do desc = tf.NodeDescription("ReluGrad") @@ -35856,7 +34446,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function unsorted_segment_min_graph(data_, segment_ids_, num_segments_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function unsorted_segment_min_graph(data_, segment_ids_, num_segments_; name=nothing) local desc tf.with_op_name(name, "UnsortedSegmentMin") do desc = tf.NodeDescription("UnsortedSegmentMin") @@ -35907,7 +34497,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function parse_example_graph(serialized_, names_, sparse_keys_, dense_keys_, dense_defaults_; name=nothing, Nsparse=nothing, Ndense=nothing, sparse_types=nothing, Tdense=nothing, dense_shapes=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function parse_example_graph(serialized_, names_, sparse_keys_, dense_keys_, dense_defaults_; name=nothing, Nsparse=nothing, Ndense=nothing, sparse_types=nothing, Tdense=nothing, dense_shapes=nothing) local desc tf.with_op_name(name, "ParseExample") do desc = tf.NodeDescription("ParseExample") @@ -35994,7 +34584,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function queue_enqueue_v2_graph(handle_, components_; name=nothing, Tcomponents=nothing, timeout_ms=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function queue_enqueue_v2_graph(handle_, components_; name=nothing, Tcomponents=nothing, timeout_ms=nothing) local desc tf.with_op_name(name, "QueueEnqueueV2") do desc = tf.NodeDescription("QueueEnqueueV2") @@ -36046,7 +34636,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function scatter_nd_add_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function scatter_nd_add_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ScatterNdAdd") do desc = tf.NodeDescription("ScatterNdAdd") @@ -36102,7 +34692,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function reader_num_records_produced_v2_graph(reader_handle_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function reader_num_records_produced_v2_graph(reader_handle_; name=nothing) local desc tf.with_op_name(name, "ReaderNumRecordsProducedV2") do desc = tf.NodeDescription("ReaderNumRecordsProducedV2") @@ -36135,10 +34725,10 @@ end """ load_tpu_embedding_centered_rms_prop_parameters(parameters, ms, mom, mg; table_id=-1, table_name=) - +Load embedding parameters for a single table. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function load_tpu_embedding_centered_rms_prop_parameters_graph(parameters_, ms_, mom_, mg_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function load_tpu_embedding_centered_rms_prop_parameters_graph(parameters_, ms_, mom_, mg_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "LoadTPUEmbeddingCenteredRMSPropParameters") do desc = tf.NodeDescription("LoadTPUEmbeddingCenteredRMSPropParameters") @@ -36210,7 +34800,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function assign_sub_graph(ref_, value_; name=nothing, use_locking=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function assign_sub_graph(ref_, value_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "AssignSub") do desc = tf.NodeDescription("AssignSub") @@ -36259,7 +34849,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function unsorted_segment_sum_graph(data_, segment_ids_, num_segments_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function unsorted_segment_sum_graph(data_, segment_ids_, num_segments_; name=nothing) local desc tf.with_op_name(name, "UnsortedSegmentSum") do desc = tf.NodeDescription("UnsortedSegmentSum") @@ -36310,7 +34900,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fused_batch_norm_grad_graph(y_backprop_, x_, scale_, reserve_space_1_, reserve_space_2_; name=nothing, epsilon=nothing, data_format=nothing, is_training=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function fused_batch_norm_grad_graph(y_backprop_, x_, scale_, reserve_space_1_, reserve_space_2_; name=nothing, epsilon=nothing, data_format=nothing, is_training=nothing) local desc tf.with_op_name(name, "FusedBatchNormGrad") do desc = tf.NodeDescription("FusedBatchNormGrad") @@ -36391,7 +34981,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function max_pool_grad_v2_graph(orig_input_, orig_output_, grad_, ksize_, strides_; name=nothing, padding=nothing, data_format=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function max_pool_grad_v2_graph(orig_input_, orig_output_, grad_, ksize_, strides_; name=nothing, padding=nothing, data_format=nothing) local desc tf.with_op_name(name, "MaxPoolGradV2") do desc = tf.NodeDescription("MaxPoolGradV2") @@ -36453,106 +35043,13 @@ begin end -""" - quantized_conv2d_with_bias_and_relu(input, filter, bias, min_input, max_input, min_filter, max_filter; out_type=Float32, dilations=[1, 1, 1, 1]) - - -""" -begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantized_conv2d_with_bias_and_relu_graph(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) - local desc - tf.with_op_name(name, "QuantizedConv2DWithBiasAndRelu") do - desc = tf.NodeDescription("QuantizedConv2DWithBiasAndRelu") - input_ = convert(Tensor{Any}, input_) - filter_ = convert(Tensor{Any}, filter_) - bias_ = convert(Tensor{Float32}, bias_) - min_input_ = convert(Tensor{Float32}, min_input_) - max_input_ = convert(Tensor{Float32}, max_input_) - min_filter_ = convert(Tensor{Float32}, min_filter_) - max_filter_ = convert(Tensor{Float32}, max_filter_) - (filter_,) = tf.tf_promote(filter_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - tf.add_input(desc, filter_) - tf.add_input(desc, bias_) - tf.add_input(desc, min_input_) - tf.add_input(desc, max_input_) - tf.add_input(desc, min_filter_) - tf.add_input(desc, max_filter_) - if out_type !== nothing - desc["out_type"] = Base.identity(out_type) - end - if strides !== nothing - desc["strides"] = map(Base.identity, strides) - end - if padding !== nothing - desc["padding"] = Base.String(padding) - end - if dilations !== nothing - desc["dilations"] = map(Base.identity, dilations) - end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:3 - push!(out, tf.Tensor(op, out_idx)) - end - out - end - function quantized_conv2d_with_bias_and_relu_eager(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) - desc = tf.EagerOp("QuantizedConv2DWithBiasAndRelu") - input_ = convert(tf.EagerTensor, input_) - filter_ = convert(tf.EagerTensor, filter_) - bias_ = convert(tf.EagerTensor, bias_) - min_input_ = convert(tf.EagerTensor, min_input_) - max_input_ = convert(tf.EagerTensor, max_input_) - min_filter_ = convert(tf.EagerTensor, min_filter_) - max_filter_ = convert(tf.EagerTensor, max_filter_) - tf.add_input(desc, input_) - tf.add_input(desc, filter_) - tf.add_input(desc, bias_) - tf.add_input(desc, min_input_) - tf.add_input(desc, max_input_) - tf.add_input(desc, min_filter_) - tf.add_input(desc, max_filter_) - if out_type !== nothing - desc["out_type"] = Base.identity(out_type) - end - if strides !== nothing - desc["strides"] = map(Base.identity, strides) - end - if padding !== nothing - desc["padding"] = Base.String(padding) - end - if dilations !== nothing - desc["dilations"] = map(Base.identity, dilations) - end - desc["Tinput"] = tf.data_type(input_) - desc["Tfilter"] = tf.data_type(filter_) - res = tf.execute(desc) - node = tf.TapeNode(quantized_conv2d_with_bias_and_relu, [input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_], name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end - end - function quantized_conv2d_with_bias_and_relu(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) - if tf.in_eager_mode() - quantized_conv2d_with_bias_and_relu_eager(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_; name=name, out_type=out_type, strides=strides, padding=padding, dilations=dilations) - else - quantized_conv2d_with_bias_and_relu_graph(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_; name=name, out_type=out_type, strides=strides, padding=padding, dilations=dilations) - end - end -end - - """ boosted_trees_create_ensemble(tree_ensemble_handle, stamp_token, tree_ensemble_serialized) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function boosted_trees_create_ensemble_graph(tree_ensemble_handle_, stamp_token_, tree_ensemble_serialized_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function boosted_trees_create_ensemble_graph(tree_ensemble_handle_, stamp_token_, tree_ensemble_serialized_; name=nothing) local desc tf.with_op_name(name, "BoostedTreesCreateEnsemble") do desc = tf.NodeDescription("BoostedTreesCreateEnsemble") @@ -36596,7 +35093,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ordered_map_incomplete_size_graph(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function ordered_map_incomplete_size_graph(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "OrderedMapIncompleteSize") do desc = tf.NodeDescription("OrderedMapIncompleteSize") @@ -36658,7 +35155,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function skipgram_graph(; name=nothing, filename=nothing, batch_size=nothing, window_size=nothing, min_count=nothing, subsample=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function skipgram_graph(; name=nothing, filename=nothing, batch_size=nothing, window_size=nothing, min_count=nothing, subsample=nothing) local desc tf.with_op_name(name, "Skipgram") do desc = tf.NodeDescription("Skipgram") @@ -36725,7 +35222,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function arg_min_graph(input_, dimension_; name=nothing, output_type=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function arg_min_graph(input_, dimension_; name=nothing, output_type=nothing) local desc tf.with_op_name(name, "ArgMin") do desc = tf.NodeDescription("ArgMin") @@ -36776,7 +35273,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function queue_dequeue_many_graph(handle_, n_; name=nothing, component_types=nothing, timeout_ms=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function queue_dequeue_many_graph(handle_, n_; name=nothing, component_types=nothing, timeout_ms=nothing) local desc tf.with_op_name(name, "QueueDequeueMany") do desc = tf.NodeDescription("QueueDequeueMany") @@ -36828,7 +35325,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function boosted_trees_serialize_ensemble_graph(tree_ensemble_handle_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function boosted_trees_serialize_ensemble_graph(tree_ensemble_handle_; name=nothing) local desc tf.with_op_name(name, "BoostedTreesSerializeEnsemble") do desc = tf.NodeDescription("BoostedTreesSerializeEnsemble") @@ -36869,7 +35366,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function minimum_graph(x_, y_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function minimum_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "Minimum") do desc = tf.NodeDescription("Minimum") @@ -36912,7 +35409,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function substr_graph(input_, pos_, len_; name=nothing, unit=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function substr_graph(input_, pos_, len_; name=nothing, unit=nothing) local desc tf.with_op_name(name, "Substr") do desc = tf.NodeDescription("Substr") @@ -36965,7 +35462,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function queue_size_graph(handle_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function queue_size_graph(handle_; name=nothing) local desc tf.with_op_name(name, "QueueSize") do desc = tf.NodeDescription("QueueSize") @@ -37001,7 +35498,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function apply_ftrl_v2_graph(var_, accum_, linear_, grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=nothing, use_locking=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function apply_ftrl_v2_graph(var_, accum_, linear_, grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ApplyFtrlV2") do desc = tf.NodeDescription("ApplyFtrlV2") @@ -37080,12 +35577,61 @@ end """ - load_tpu_embedding_momentum_parameters(parameters, momenta; table_id=-1, table_name=) + sparse_segment_mean(data, indices, segment_ids) + + +""" +begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function sparse_segment_mean_graph(data_, indices_, segment_ids_; name=nothing) + local desc + tf.with_op_name(name, "SparseSegmentMean") do + desc = tf.NodeDescription("SparseSegmentMean") + data_ = convert(Tensor{Any}, data_) + indices_ = convert(Tensor{Int32}, indices_) + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + segment_ids_ = convert(Tensor{Int32}, segment_ids_) + (data_,) = tf.tf_promote(data_) + (indices_,) = tf.tf_promote(indices_) + tf.add_input(desc, data_) + tf.add_input(desc, indices_) + tf.add_input(desc, segment_ids_) + end + tf.Tensor(tf.Operation(desc)) + end + function sparse_segment_mean_eager(data_, indices_, segment_ids_; name=nothing) + desc = tf.EagerOp("SparseSegmentMean") + data_ = convert(tf.EagerTensor, data_) + indices_ = convert(tf.EagerTensor, indices_) + segment_ids_ = convert(tf.EagerTensor, segment_ids_) + tf.add_input(desc, data_) + tf.add_input(desc, indices_) + tf.add_input(desc, segment_ids_) + desc["T"] = tf.data_type(data_) + desc["Tidx"] = tf.data_type(indices_) + res = tf.execute(desc) + node = tf.TapeNode(sparse_segment_mean, [data_, indices_, segment_ids_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + function sparse_segment_mean(data_, indices_, segment_ids_; name=nothing) + if tf.in_eager_mode() + sparse_segment_mean_eager(data_, indices_, segment_ids_; name=name) + else + sparse_segment_mean_graph(data_, indices_, segment_ids_; name=name) + end + end +end + +""" + load_tpu_embedding_momentum_parameters(parameters, momenta; table_id=-1, table_name=) +Load embedding parameters for a single table. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function load_tpu_embedding_momentum_parameters_graph(parameters_, momenta_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function load_tpu_embedding_momentum_parameters_graph(parameters_, momenta_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "LoadTPUEmbeddingMomentumParameters") do desc = tf.NodeDescription("LoadTPUEmbeddingMomentumParameters") @@ -37143,62 +35689,13 @@ begin end -""" - sparse_segment_mean(data, indices, segment_ids) - - -""" -begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_segment_mean_graph(data_, indices_, segment_ids_; name=nothing) - local desc - tf.with_op_name(name, "SparseSegmentMean") do - desc = tf.NodeDescription("SparseSegmentMean") - data_ = convert(Tensor{Any}, data_) - indices_ = convert(Tensor{Int32}, indices_) - indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) - segment_ids_ = convert(Tensor{Int32}, segment_ids_) - (data_,) = tf.tf_promote(data_) - (indices_,) = tf.tf_promote(indices_) - tf.add_input(desc, data_) - tf.add_input(desc, indices_) - tf.add_input(desc, segment_ids_) - end - tf.Tensor(tf.Operation(desc)) - end - function sparse_segment_mean_eager(data_, indices_, segment_ids_; name=nothing) - desc = tf.EagerOp("SparseSegmentMean") - data_ = convert(tf.EagerTensor, data_) - indices_ = convert(tf.EagerTensor, indices_) - segment_ids_ = convert(tf.EagerTensor, segment_ids_) - tf.add_input(desc, data_) - tf.add_input(desc, indices_) - tf.add_input(desc, segment_ids_) - desc["T"] = tf.data_type(data_) - desc["Tidx"] = tf.data_type(indices_) - res = tf.execute(desc) - node = tf.TapeNode(sparse_segment_mean, [data_, indices_, segment_ids_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - function sparse_segment_mean(data_, indices_, segment_ids_; name=nothing) - if tf.in_eager_mode() - sparse_segment_mean_eager(data_, indices_, segment_ids_; name=name) - else - sparse_segment_mean_graph(data_, indices_, segment_ids_; name=name) - end - end -end - - """ resource_apply_proximal_adagrad(var, accum, lr, l1, l2, grad; use_locking=false) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_apply_proximal_adagrad_graph(var_, accum_, lr_, l1_, l2_, grad_; name=nothing, use_locking=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function resource_apply_proximal_adagrad_graph(var_, accum_, lr_, l1_, l2_, grad_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ResourceApplyProximalAdagrad") do desc = tf.NodeDescription("ResourceApplyProximalAdagrad") @@ -37265,7 +35762,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_gather_v2_graph(handle_, indices_, flow_in_; name=nothing, dtype=nothing, element_shape=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tensor_array_gather_v2_graph(handle_, indices_, flow_in_; name=nothing, dtype=nothing, element_shape=nothing) local desc tf.with_op_name(name, "TensorArrayGatherV2") do desc = tf.NodeDescription("TensorArrayGatherV2") @@ -37321,7 +35818,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function less_graph(x_, y_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function less_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "Less") do desc = tf.NodeDescription("Less") @@ -37364,7 +35861,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function host_const_graph(; name=nothing, value=nothing, dtype=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function host_const_graph(; name=nothing, value=nothing, dtype=nothing) local desc tf.with_op_name(name, "HostConst") do desc = tf.NodeDescription("HostConst") @@ -37408,7 +35905,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function upper_bound_graph(sorted_inputs_, values_; name=nothing, out_type=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function upper_bound_graph(sorted_inputs_, values_; name=nothing, out_type=nothing) local desc tf.with_op_name(name, "UpperBound") do desc = tf.NodeDescription("UpperBound") @@ -37452,50 +35949,46 @@ end """ - tensor_list_get_item(input_handle, index, element_shape) + tensor_list_get_item(input_handle, index) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_list_get_item_graph(input_handle_, index_, element_shape_; name=nothing, element_dtype=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tensor_list_get_item_graph(input_handle_, index_; name=nothing, element_dtype=nothing) local desc tf.with_op_name(name, "TensorListGetItem") do desc = tf.NodeDescription("TensorListGetItem") input_handle_ = convert(Tensor{Any}, input_handle_) index_ = convert(Tensor{Int32}, index_) - element_shape_ = convert(Tensor{Int32}, element_shape_) tf.add_input(desc, input_handle_) tf.add_input(desc, index_) - tf.add_input(desc, element_shape_) if element_dtype !== nothing desc["element_dtype"] = Base.identity(element_dtype) end end tf.Tensor(tf.Operation(desc)) end - function tensor_list_get_item_eager(input_handle_, index_, element_shape_; name=nothing, element_dtype=nothing) + function tensor_list_get_item_eager(input_handle_, index_; name=nothing, element_dtype=nothing) desc = tf.EagerOp("TensorListGetItem") input_handle_ = convert(tf.EagerTensor, input_handle_) index_ = convert(tf.EagerTensor, index_) - element_shape_ = convert(tf.EagerTensor, element_shape_) tf.add_input(desc, input_handle_) tf.add_input(desc, index_) - tf.add_input(desc, element_shape_) if element_dtype !== nothing desc["element_dtype"] = Base.identity(element_dtype) end res = tf.execute(desc) - node = tf.TapeNode(tensor_list_get_item, [input_handle_, index_, element_shape_], name=nothing, element_dtype=nothing, res) + node = tf.TapeNode(tensor_list_get_item, [input_handle_, index_], name=nothing, element_dtype=nothing, res) if length(res) >= 1 tf.add_node(res[1], node) return res[1] end end - function tensor_list_get_item(input_handle_, index_, element_shape_; name=nothing, element_dtype=nothing) + function tensor_list_get_item(input_handle_, index_; name=nothing, element_dtype=nothing) if tf.in_eager_mode() - tensor_list_get_item_eager(input_handle_, index_, element_shape_; name=name, element_dtype=element_dtype) + tensor_list_get_item_eager(input_handle_, index_; name=name, element_dtype=element_dtype) else - tensor_list_get_item_graph(input_handle_, index_, element_shape_; name=name, element_dtype=element_dtype) + tensor_list_get_item_graph(input_handle_, index_; name=name, element_dtype=element_dtype) end end end @@ -37507,7 +36000,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fake_quant_with_min_max_vars_graph(inputs_, min_, max_; name=nothing, num_bits=nothing, narrow_range=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function fake_quant_with_min_max_vars_graph(inputs_, min_, max_; name=nothing, num_bits=nothing, narrow_range=nothing) local desc tf.with_op_name(name, "FakeQuantWithMinMaxVars") do desc = tf.NodeDescription("FakeQuantWithMinMaxVars") @@ -37563,7 +36056,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function is_boosted_trees_quantile_stream_resource_initialized_graph(quantile_stream_resource_handle_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function is_boosted_trees_quantile_stream_resource_initialized_graph(quantile_stream_resource_handle_; name=nothing) local desc tf.with_op_name(name, "IsBoostedTreesQuantileStreamResourceInitialized") do desc = tf.NodeDescription("IsBoostedTreesQuantileStreamResourceInitialized") @@ -37599,7 +36092,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function reader_read_up_to_v2_graph(reader_handle_, queue_handle_, num_records_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function reader_read_up_to_v2_graph(reader_handle_, queue_handle_, num_records_; name=nothing) local desc tf.with_op_name(name, "ReaderReadUpToV2") do desc = tf.NodeDescription("ReaderReadUpToV2") @@ -37648,7 +36141,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function complex_graph(real_, imag_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function complex_graph(real_, imag_; name=nothing) local desc tf.with_op_name(name, "Complex") do desc = tf.NodeDescription("Complex") @@ -37691,7 +36184,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_list_reserve_graph(element_shape_, num_elements_; name=nothing, element_dtype=nothing, shape_type=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tensor_list_reserve_graph(element_shape_, num_elements_; name=nothing, element_dtype=nothing, shape_type=nothing) local desc tf.with_op_name(name, "TensorListReserve") do desc = tf.NodeDescription("TensorListReserve") @@ -37745,7 +36238,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function bitcast_graph(input_; name=nothing, type_=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function bitcast_graph(input_; name=nothing, type_=nothing) local desc tf.with_op_name(name, "Bitcast") do desc = tf.NodeDescription("Bitcast") @@ -37789,7 +36282,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function priority_queue_graph(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function priority_queue_graph(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "PriorityQueue") do desc = tf.NodeDescription("PriorityQueue") @@ -37851,7 +36344,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantized_batch_norm_with_global_normalization_graph(t_, t_min_, t_max_, m_, m_min_, m_max_, v_, v_min_, v_max_, beta_, beta_min_, beta_max_, gamma_, gamma_min_, gamma_max_; name=nothing, out_type=nothing, variance_epsilon=nothing, scale_after_normalization=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function quantized_batch_norm_with_global_normalization_graph(t_, t_min_, t_max_, m_, m_min_, m_max_, v_, v_min_, v_max_, beta_, beta_min_, beta_max_, gamma_, gamma_min_, gamma_max_; name=nothing, out_type=nothing, variance_epsilon=nothing, scale_after_normalization=nothing) local desc tf.with_op_name(name, "QuantizedBatchNormWithGlobalNormalization") do desc = tf.NodeDescription("QuantizedBatchNormWithGlobalNormalization") @@ -37972,7 +36465,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function cos_graph(x_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function cos_graph(x_; name=nothing) local desc tf.with_op_name(name, "Cos") do desc = tf.NodeDescription("Cos") @@ -38010,7 +36503,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantize_down_and_shrink_range_graph(input_, input_min_, input_max_; name=nothing, out_type=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function quantize_down_and_shrink_range_graph(input_, input_min_, input_max_; name=nothing, out_type=nothing) local desc tf.with_op_name(name, "QuantizeDownAndShrinkRange") do desc = tf.NodeDescription("QuantizeDownAndShrinkRange") @@ -38067,7 +36560,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_random_dataset_graph(seed_, seed2_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function experimental_random_dataset_graph(seed_, seed2_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "ExperimentalRandomDataset") do desc = tf.NodeDescription("ExperimentalRandomDataset") @@ -38119,7 +36612,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function rpc_graph(address_, method_, request_; name=nothing, protocol=nothing, fail_fast=nothing, timeout_in_ms=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function rpc_graph(address_, method_, request_; name=nothing, protocol=nothing, fail_fast=nothing, timeout_in_ms=nothing) local desc tf.with_op_name(name, "Rpc") do desc = tf.NodeDescription("Rpc") @@ -38175,130 +36668,13 @@ begin end -""" - quantized_conv2d_with_bias_signed_sum_and_relu_and_requantize(input, filter, bias, min_input, max_input, min_filter, max_filter, min_freezed_output, max_freezed_output, summand, min_summand, max_summand; out_type=Float32, dilations=[1, 1, 1, 1]) - - -""" -begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantized_conv2d_with_bias_signed_sum_and_relu_and_requantize_graph(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_, summand_, min_summand_, max_summand_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) - local desc - tf.with_op_name(name, "QuantizedConv2DWithBiasSignedSumAndReluAndRequantize") do - desc = tf.NodeDescription("QuantizedConv2DWithBiasSignedSumAndReluAndRequantize") - input_ = convert(Tensor{Any}, input_) - filter_ = convert(Tensor{Any}, filter_) - bias_ = convert(Tensor{Any}, bias_) - min_input_ = convert(Tensor{Float32}, min_input_) - max_input_ = convert(Tensor{Float32}, max_input_) - min_filter_ = convert(Tensor{Float32}, min_filter_) - max_filter_ = convert(Tensor{Float32}, max_filter_) - min_freezed_output_ = convert(Tensor{Float32}, min_freezed_output_) - max_freezed_output_ = convert(Tensor{Float32}, max_freezed_output_) - summand_ = convert(Tensor{Any}, summand_) - min_summand_ = convert(Tensor{Float32}, min_summand_) - max_summand_ = convert(Tensor{Float32}, max_summand_) - (summand_,) = tf.tf_promote(summand_) - (filter_,) = tf.tf_promote(filter_) - (input_,) = tf.tf_promote(input_) - (bias_,) = tf.tf_promote(bias_) - tf.add_input(desc, input_) - tf.add_input(desc, filter_) - tf.add_input(desc, bias_) - tf.add_input(desc, min_input_) - tf.add_input(desc, max_input_) - tf.add_input(desc, min_filter_) - tf.add_input(desc, max_filter_) - tf.add_input(desc, min_freezed_output_) - tf.add_input(desc, max_freezed_output_) - tf.add_input(desc, summand_) - tf.add_input(desc, min_summand_) - tf.add_input(desc, max_summand_) - if out_type !== nothing - desc["out_type"] = Base.identity(out_type) - end - if strides !== nothing - desc["strides"] = map(Base.identity, strides) - end - if padding !== nothing - desc["padding"] = Base.String(padding) - end - if dilations !== nothing - desc["dilations"] = map(Base.identity, dilations) - end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:3 - push!(out, tf.Tensor(op, out_idx)) - end - out - end - function quantized_conv2d_with_bias_signed_sum_and_relu_and_requantize_eager(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_, summand_, min_summand_, max_summand_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) - desc = tf.EagerOp("QuantizedConv2DWithBiasSignedSumAndReluAndRequantize") - input_ = convert(tf.EagerTensor, input_) - filter_ = convert(tf.EagerTensor, filter_) - bias_ = convert(tf.EagerTensor, bias_) - min_input_ = convert(tf.EagerTensor, min_input_) - max_input_ = convert(tf.EagerTensor, max_input_) - min_filter_ = convert(tf.EagerTensor, min_filter_) - max_filter_ = convert(tf.EagerTensor, max_filter_) - min_freezed_output_ = convert(tf.EagerTensor, min_freezed_output_) - max_freezed_output_ = convert(tf.EagerTensor, max_freezed_output_) - summand_ = convert(tf.EagerTensor, summand_) - min_summand_ = convert(tf.EagerTensor, min_summand_) - max_summand_ = convert(tf.EagerTensor, max_summand_) - tf.add_input(desc, input_) - tf.add_input(desc, filter_) - tf.add_input(desc, bias_) - tf.add_input(desc, min_input_) - tf.add_input(desc, max_input_) - tf.add_input(desc, min_filter_) - tf.add_input(desc, max_filter_) - tf.add_input(desc, min_freezed_output_) - tf.add_input(desc, max_freezed_output_) - tf.add_input(desc, summand_) - tf.add_input(desc, min_summand_) - tf.add_input(desc, max_summand_) - if out_type !== nothing - desc["out_type"] = Base.identity(out_type) - end - if strides !== nothing - desc["strides"] = map(Base.identity, strides) - end - if padding !== nothing - desc["padding"] = Base.String(padding) - end - if dilations !== nothing - desc["dilations"] = map(Base.identity, dilations) - end - desc["Tinput"] = tf.data_type(input_) - desc["Tfilter"] = tf.data_type(filter_) - desc["Tbias"] = tf.data_type(bias_) - desc["Tsummand"] = tf.data_type(summand_) - res = tf.execute(desc) - node = tf.TapeNode(quantized_conv2d_with_bias_signed_sum_and_relu_and_requantize, [input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_, summand_, min_summand_, max_summand_], name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end - end - function quantized_conv2d_with_bias_signed_sum_and_relu_and_requantize(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_, summand_, min_summand_, max_summand_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) - if tf.in_eager_mode() - quantized_conv2d_with_bias_signed_sum_and_relu_and_requantize_eager(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_, summand_, min_summand_, max_summand_; name=name, out_type=out_type, strides=strides, padding=padding, dilations=dilations) - else - quantized_conv2d_with_bias_signed_sum_and_relu_and_requantize_graph(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_, summand_, min_summand_, max_summand_; name=name, out_type=out_type, strides=strides, padding=padding, dilations=dilations) - end - end -end - - """ tensor_list_length(input_handle) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_list_length_graph(input_handle_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tensor_list_length_graph(input_handle_; name=nothing) local desc tf.with_op_name(name, "TensorListLength") do desc = tf.NodeDescription("TensorListLength") @@ -38334,7 +36710,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function map_incomplete_size_graph(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function map_incomplete_size_graph(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "MapIncompleteSize") do desc = tf.NodeDescription("MapIncompleteSize") @@ -38396,7 +36772,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function stateless_while_graph(input_; name=nothing, T=nothing, cond=nothing, body=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function stateless_while_graph(input_; name=nothing, T=nothing, cond=nothing, body=nothing) local desc tf.with_op_name(name, "StatelessWhile") do desc = tf.NodeDescription("StatelessWhile") @@ -38450,7 +36826,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_conditional_accumulator_graph(; name=nothing, dtype=nothing, shape=nothing, container=nothing, shared_name=nothing, reduction_type=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function sparse_conditional_accumulator_graph(; name=nothing, dtype=nothing, shape=nothing, container=nothing, shared_name=nothing, reduction_type=nothing) local desc tf.with_op_name(name, "SparseConditionalAccumulator") do desc = tf.NodeDescription("SparseConditionalAccumulator") @@ -38512,7 +36888,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function segment_min_graph(data_, segment_ids_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function segment_min_graph(data_, segment_ids_; name=nothing) local desc tf.with_op_name(name, "SegmentMin") do desc = tf.NodeDescription("SegmentMin") @@ -38557,7 +36933,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function write_graph_summary_graph(writer_, step_, tensor_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function write_graph_summary_graph(writer_, step_, tensor_; name=nothing) local desc tf.with_op_name(name, "WriteGraphSummary") do desc = tf.NodeDescription("WriteGraphSummary") @@ -38601,7 +36977,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function cholesky_grad_graph(l_, grad_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function cholesky_grad_graph(l_, grad_; name=nothing) local desc tf.with_op_name(name, "CholeskyGrad") do desc = tf.NodeDescription("CholeskyGrad") @@ -38644,7 +37020,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function log_uniform_candidate_sampler_graph(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function log_uniform_candidate_sampler_graph(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing) local desc tf.with_op_name(name, "LogUniformCandidateSampler") do desc = tf.NodeDescription("LogUniformCandidateSampler") @@ -38721,7 +37097,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function serialize_sparse_graph(sparse_indices_, sparse_values_, sparse_shape_; name=nothing, out_type=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function serialize_sparse_graph(sparse_indices_, sparse_values_, sparse_shape_; name=nothing, out_type=nothing) local desc tf.with_op_name(name, "SerializeSparse") do desc = tf.NodeDescription("SerializeSparse") @@ -38773,7 +37149,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function scatter_nd_non_aliasing_add_graph(input_, indices_, updates_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function scatter_nd_non_aliasing_add_graph(input_, indices_, updates_; name=nothing) local desc tf.with_op_name(name, "ScatterNdNonAliasingAdd") do desc = tf.NodeDescription("ScatterNdNonAliasingAdd") @@ -38823,7 +37199,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ref_merge_graph(inputs_; name=nothing, N=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function ref_merge_graph(inputs_; name=nothing, N=nothing) local desc tf.with_op_name(name, "RefMerge") do desc = tf.NodeDescription("RefMerge") @@ -38867,12 +37243,12 @@ end """ - tensor_list_concat(input_handle; element_shape=?) + tensor_list_concat(input_handle) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_list_concat_graph(input_handle_; name=nothing, element_dtype=nothing, element_shape=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tensor_list_concat_graph(input_handle_; name=nothing, element_dtype=nothing) local desc tf.with_op_name(name, "TensorListConcat") do desc = tf.NodeDescription("TensorListConcat") @@ -38881,9 +37257,6 @@ begin if element_dtype !== nothing desc["element_dtype"] = Base.identity(element_dtype) end - if element_shape !== nothing - desc["element_shape"] = Base.identity(element_shape) - end end out = tf.Tensor[] op = tf.Operation(desc) @@ -38892,28 +37265,25 @@ begin end out end - function tensor_list_concat_eager(input_handle_; name=nothing, element_dtype=nothing, element_shape=nothing) + function tensor_list_concat_eager(input_handle_; name=nothing, element_dtype=nothing) desc = tf.EagerOp("TensorListConcat") input_handle_ = convert(tf.EagerTensor, input_handle_) tf.add_input(desc, input_handle_) if element_dtype !== nothing desc["element_dtype"] = Base.identity(element_dtype) end - if element_shape !== nothing - desc["element_shape"] = Base.identity(element_shape) - end res = tf.execute(desc) - node = tf.TapeNode(tensor_list_concat, [input_handle_], name=nothing, element_dtype=nothing, element_shape=nothing, res) + node = tf.TapeNode(tensor_list_concat, [input_handle_], name=nothing, element_dtype=nothing, res) if length(res) >= 1 tf.add_node(res[1], node) return res end end - function tensor_list_concat(input_handle_; name=nothing, element_dtype=nothing, element_shape=nothing) + function tensor_list_concat(input_handle_; name=nothing, element_dtype=nothing) if tf.in_eager_mode() - tensor_list_concat_eager(input_handle_; name=name, element_dtype=element_dtype, element_shape=element_shape) + tensor_list_concat_eager(input_handle_; name=name, element_dtype=element_dtype) else - tensor_list_concat_graph(input_handle_; name=name, element_dtype=element_dtype, element_shape=element_shape) + tensor_list_concat_graph(input_handle_; name=name, element_dtype=element_dtype) end end end @@ -38925,7 +37295,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function cudnn_rnn_canonical_to_params_graph(num_layers_, num_units_, input_size_, weights_, biases_; name=nothing, num_params=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function cudnn_rnn_canonical_to_params_graph(num_layers_, num_units_, input_size_, weights_, biases_; name=nothing, num_params=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) local desc tf.with_op_name(name, "CudnnRNNCanonicalToParams") do desc = tf.NodeDescription("CudnnRNNCanonicalToParams") @@ -39022,7 +37392,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_apply_adadelta_graph(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function sparse_apply_adadelta_graph(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "SparseApplyAdadelta") do desc = tf.NodeDescription("SparseApplyAdadelta") @@ -39103,7 +37473,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_close_graph(handle_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tensor_array_close_graph(handle_; name=nothing) local desc tf.with_op_name(name, "TensorArrayClose") do desc = tf.NodeDescription("TensorArrayClose") @@ -39139,7 +37509,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function selu_grad_graph(gradients_, outputs_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function selu_grad_graph(gradients_, outputs_; name=nothing) local desc tf.with_op_name(name, "SeluGrad") do desc = tf.NodeDescription("SeluGrad") @@ -39182,7 +37552,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function crop_and_resize_grad_image_graph(grads_, boxes_, box_ind_, image_size_; name=nothing, method=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function crop_and_resize_grad_image_graph(grads_, boxes_, box_ind_, image_size_; name=nothing, method=nothing) local desc tf.with_op_name(name, "CropAndResizeGradImage") do desc = tf.NodeDescription("CropAndResizeGradImage") @@ -39236,7 +37606,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function rfft_graph(input_, fft_length_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function rfft_graph(input_, fft_length_; name=nothing) local desc tf.with_op_name(name, "RFFT") do desc = tf.NodeDescription("RFFT") @@ -39276,7 +37646,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_sql_dataset_graph(driver_name_, data_source_name_, query_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function experimental_sql_dataset_graph(driver_name_, data_source_name_, query_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "ExperimentalSqlDataset") do desc = tf.NodeDescription("ExperimentalSqlDataset") @@ -39332,7 +37702,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_apply_power_sign_graph(var_, m_, lr_, logbase_, sign_decay_, beta_, grad_; name=nothing, use_locking=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function resource_apply_power_sign_graph(var_, m_, lr_, logbase_, sign_decay_, beta_, grad_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ResourceApplyPowerSign") do desc = tf.NodeDescription("ResourceApplyPowerSign") @@ -39404,7 +37774,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function matrix_determinant_graph(input_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function matrix_determinant_graph(input_; name=nothing) local desc tf.with_op_name(name, "MatrixDeterminant") do desc = tf.NodeDescription("MatrixDeterminant") @@ -39442,7 +37812,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function static_regex_replace_graph(input_; name=nothing, pattern=nothing, rewrite=nothing, replace_global=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function static_regex_replace_graph(input_; name=nothing, pattern=nothing, rewrite=nothing, replace_global=nothing) local desc tf.with_op_name(name, "StaticRegexReplace") do desc = tf.NodeDescription("StaticRegexReplace") @@ -39496,7 +37866,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function avg_pool_graph(value_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function avg_pool_graph(value_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) local desc tf.with_op_name(name, "AvgPool") do desc = tf.NodeDescription("AvgPool") @@ -39558,7 +37928,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_dense_cwise_add_graph(sp_indices_, sp_values_, sp_shape_, dense_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function sparse_dense_cwise_add_graph(sp_indices_, sp_values_, sp_shape_, dense_; name=nothing) local desc tf.with_op_name(name, "SparseDenseCwiseAdd") do desc = tf.NodeDescription("SparseDenseCwiseAdd") @@ -39609,7 +37979,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function bias_add_v1_graph(value_, bias_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function bias_add_v1_graph(value_, bias_; name=nothing) local desc tf.with_op_name(name, "BiasAddV1") do desc = tf.NodeDescription("BiasAddV1") @@ -39652,7 +38022,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function invert_permutation_graph(x_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function invert_permutation_graph(x_; name=nothing) local desc tf.with_op_name(name, "InvertPermutation") do desc = tf.NodeDescription("InvertPermutation") @@ -39690,7 +38060,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function hash_table_v2_graph(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function hash_table_v2_graph(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing) local desc tf.with_op_name(name, "HashTableV2") do desc = tf.NodeDescription("HashTableV2") @@ -39752,7 +38122,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_apply_momentum_graph(var_, accum_, lr_, grad_, indices_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function sparse_apply_momentum_graph(var_, accum_, lr_, grad_, indices_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) local desc tf.with_op_name(name, "SparseApplyMomentum") do desc = tf.NodeDescription("SparseApplyMomentum") @@ -39824,12 +38194,12 @@ end """ - infeed_enqueue(input; shape=?, layout=Int64[], device_ordinal=-1) - + infeed_enqueue(input; shape=?, device_ordinal=-1) +An op which feeds a single Tensor value into the computation. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function infeed_enqueue_graph(input_; name=nothing, dtype=nothing, shape=nothing, layout=nothing, device_ordinal=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function infeed_enqueue_graph(input_; name=nothing, dtype=nothing, shape=nothing, device_ordinal=nothing) local desc tf.with_op_name(name, "InfeedEnqueue") do desc = tf.NodeDescription("InfeedEnqueue") @@ -39842,16 +38212,13 @@ begin if shape !== nothing desc["shape"] = Base.identity(shape) end - if layout !== nothing - desc["layout"] = map(Base.identity, layout) - end if device_ordinal !== nothing desc["device_ordinal"] = Base.Int(device_ordinal) end end tf.Tensor(tf.Operation(desc)) end - function infeed_enqueue_eager(input_; name=nothing, dtype=nothing, shape=nothing, layout=nothing, device_ordinal=nothing) + function infeed_enqueue_eager(input_; name=nothing, dtype=nothing, shape=nothing, device_ordinal=nothing) desc = tf.EagerOp("InfeedEnqueue") input_ = convert(tf.EagerTensor, input_) tf.add_input(desc, input_) @@ -39861,25 +38228,22 @@ begin if shape !== nothing desc["shape"] = Base.identity(shape) end - if layout !== nothing - desc["layout"] = map(Base.identity, layout) - end if device_ordinal !== nothing desc["device_ordinal"] = Base.Int(device_ordinal) end desc["dtype"] = tf.data_type(input_) res = tf.execute(desc) - node = tf.TapeNode(infeed_enqueue, [input_], name=nothing, dtype=nothing, shape=nothing, layout=nothing, device_ordinal=nothing, res) + node = tf.TapeNode(infeed_enqueue, [input_], name=nothing, dtype=nothing, shape=nothing, device_ordinal=nothing, res) if length(res) >= 1 tf.add_node(res[1], node) return res[1] end end - function infeed_enqueue(input_; name=nothing, dtype=nothing, shape=nothing, layout=nothing, device_ordinal=nothing) + function infeed_enqueue(input_; name=nothing, dtype=nothing, shape=nothing, device_ordinal=nothing) if tf.in_eager_mode() - infeed_enqueue_eager(input_; name=name, dtype=dtype, shape=shape, layout=layout, device_ordinal=device_ordinal) + infeed_enqueue_eager(input_; name=name, dtype=dtype, shape=shape, device_ordinal=device_ordinal) else - infeed_enqueue_graph(input_; name=name, dtype=dtype, shape=shape, layout=layout, device_ordinal=device_ordinal) + infeed_enqueue_graph(input_; name=name, dtype=dtype, shape=shape, device_ordinal=device_ordinal) end end end @@ -39891,7 +38255,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function stateless_random_uniform_int_graph(shape_, seed_, minval_, maxval_; name=nothing, dtype=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function stateless_random_uniform_int_graph(shape_, seed_, minval_, maxval_; name=nothing, dtype=nothing) local desc tf.with_op_name(name, "StatelessRandomUniformInt") do desc = tf.NodeDescription("StatelessRandomUniformInt") @@ -39947,12 +38311,80 @@ end """ - load_tpu_embedding_adadelta_parameters_grad_accum_debug(parameters, accumulators, updates, gradient_accumulators; table_id=-1, table_name=) + _send(tensor; client_terminated=false) + +Sends the named tensor from send_device to recv_device. +""" +begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function _send_graph(tensor_; name=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing) + local desc + tf.with_op_name(name, "_Send") do + desc = tf.NodeDescription("_Send") + tensor_ = convert(Tensor{Any}, tensor_) + (tensor_,) = tf.tf_promote(tensor_) + tf.add_input(desc, tensor_) + if tensor_name !== nothing + desc["tensor_name"] = Base.String(tensor_name) + end + if send_device !== nothing + desc["send_device"] = Base.String(send_device) + end + if send_device_incarnation !== nothing + desc["send_device_incarnation"] = Base.Int(send_device_incarnation) + end + if recv_device !== nothing + desc["recv_device"] = Base.String(recv_device) + end + if client_terminated !== nothing + desc["client_terminated"] = Base.Bool(client_terminated) + end + end + tf.Tensor(tf.Operation(desc)) + end + function _send_eager(tensor_; name=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing) + desc = tf.EagerOp("_Send") + tensor_ = convert(tf.EagerTensor, tensor_) + tf.add_input(desc, tensor_) + if tensor_name !== nothing + desc["tensor_name"] = Base.String(tensor_name) + end + if send_device !== nothing + desc["send_device"] = Base.String(send_device) + end + if send_device_incarnation !== nothing + desc["send_device_incarnation"] = Base.Int(send_device_incarnation) + end + if recv_device !== nothing + desc["recv_device"] = Base.String(recv_device) + end + if client_terminated !== nothing + desc["client_terminated"] = Base.Bool(client_terminated) + end + desc["T"] = tf.data_type(tensor_) + res = tf.execute(desc) + node = tf.TapeNode(_send, [tensor_], name=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + function _send(tensor_; name=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing) + if tf.in_eager_mode() + _send_eager(tensor_; name=name, tensor_name=tensor_name, send_device=send_device, send_device_incarnation=send_device_incarnation, recv_device=recv_device, client_terminated=client_terminated) + else + _send_graph(tensor_; name=name, tensor_name=tensor_name, send_device=send_device, send_device_incarnation=send_device_incarnation, recv_device=recv_device, client_terminated=client_terminated) + end + end +end +""" + load_tpu_embedding_adadelta_parameters_grad_accum_debug(parameters, accumulators, updates, gradient_accumulators; table_id=-1, table_name=) + +Load embedding parameters for a single table. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function load_tpu_embedding_adadelta_parameters_grad_accum_debug_graph(parameters_, accumulators_, updates_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function load_tpu_embedding_adadelta_parameters_grad_accum_debug_graph(parameters_, accumulators_, updates_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "LoadTPUEmbeddingAdadeltaParametersGradAccumDebug") do desc = tf.NodeDescription("LoadTPUEmbeddingAdadeltaParametersGradAccumDebug") @@ -40018,81 +38450,13 @@ begin end -""" - _send(tensor; client_terminated=false) - -Sends the named tensor from send_device to recv_device. -""" -begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _send_graph(tensor_; name=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing) - local desc - tf.with_op_name(name, "_Send") do - desc = tf.NodeDescription("_Send") - tensor_ = convert(Tensor{Any}, tensor_) - (tensor_,) = tf.tf_promote(tensor_) - tf.add_input(desc, tensor_) - if tensor_name !== nothing - desc["tensor_name"] = Base.String(tensor_name) - end - if send_device !== nothing - desc["send_device"] = Base.String(send_device) - end - if send_device_incarnation !== nothing - desc["send_device_incarnation"] = Base.Int(send_device_incarnation) - end - if recv_device !== nothing - desc["recv_device"] = Base.String(recv_device) - end - if client_terminated !== nothing - desc["client_terminated"] = Base.Bool(client_terminated) - end - end - tf.Tensor(tf.Operation(desc)) - end - function _send_eager(tensor_; name=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing) - desc = tf.EagerOp("_Send") - tensor_ = convert(tf.EagerTensor, tensor_) - tf.add_input(desc, tensor_) - if tensor_name !== nothing - desc["tensor_name"] = Base.String(tensor_name) - end - if send_device !== nothing - desc["send_device"] = Base.String(send_device) - end - if send_device_incarnation !== nothing - desc["send_device_incarnation"] = Base.Int(send_device_incarnation) - end - if recv_device !== nothing - desc["recv_device"] = Base.String(recv_device) - end - if client_terminated !== nothing - desc["client_terminated"] = Base.Bool(client_terminated) - end - desc["T"] = tf.data_type(tensor_) - res = tf.execute(desc) - node = tf.TapeNode(_send, [tensor_], name=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - function _send(tensor_; name=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing) - if tf.in_eager_mode() - _send_eager(tensor_; name=name, tensor_name=tensor_name, send_device=send_device, send_device_incarnation=send_device_incarnation, recv_device=recv_device, client_terminated=client_terminated) - else - _send_graph(tensor_; name=name, tensor_name=tensor_name, send_device=send_device, send_device_incarnation=send_device_incarnation, recv_device=recv_device, client_terminated=client_terminated) - end - end -end - - """ map_peek(key, indices; capacity=0, memory_limit=0, container=, shared_name=) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function map_peek_graph(key_, indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function map_peek_graph(key_, indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "MapPeek") do desc = tf.NodeDescription("MapPeek") @@ -40162,7 +38526,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function write_scalar_summary_graph(writer_, step_, tag_, value_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function write_scalar_summary_graph(writer_, step_, tag_, value_; name=nothing) local desc tf.with_op_name(name, "WriteScalarSummary") do desc = tf.NodeDescription("WriteScalarSummary") @@ -40212,7 +38576,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ordered_map_unstage_no_key_graph(indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function ordered_map_unstage_no_key_graph(indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "OrderedMapUnstageNoKey") do desc = tf.NodeDescription("OrderedMapUnstageNoKey") @@ -40283,7 +38647,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_apply_centered_rms_prop_graph(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function sparse_apply_centered_rms_prop_graph(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "SparseApplyCenteredRMSProp") do desc = tf.NodeDescription("SparseApplyCenteredRMSProp") @@ -40368,77 +38732,13 @@ begin end -""" - tensor_list_scatter_v2(tensor, indices, element_shape, num_elements) - - -""" -begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_list_scatter_v2_graph(tensor_, indices_, element_shape_, num_elements_; name=nothing, element_dtype=nothing, shape_type=nothing) - local desc - tf.with_op_name(name, "TensorListScatterV2") do - desc = tf.NodeDescription("TensorListScatterV2") - tensor_ = convert(Tensor{Any}, tensor_) - indices_ = convert(Tensor{Int32}, indices_) - element_shape_ = convert(Tensor{Any}, element_shape_) - num_elements_ = convert(Tensor{Int32}, num_elements_) - (tensor_,) = tf.tf_promote(tensor_) - (element_shape_,) = tf.tf_promote(element_shape_) - tf.add_input(desc, tensor_) - tf.add_input(desc, indices_) - tf.add_input(desc, element_shape_) - tf.add_input(desc, num_elements_) - if element_dtype !== nothing - desc["element_dtype"] = Base.identity(element_dtype) - end - if shape_type !== nothing - desc["shape_type"] = Base.identity(shape_type) - end - end - tf.Tensor(tf.Operation(desc)) - end - function tensor_list_scatter_v2_eager(tensor_, indices_, element_shape_, num_elements_; name=nothing, element_dtype=nothing, shape_type=nothing) - desc = tf.EagerOp("TensorListScatterV2") - tensor_ = convert(tf.EagerTensor, tensor_) - indices_ = convert(tf.EagerTensor, indices_) - element_shape_ = convert(tf.EagerTensor, element_shape_) - num_elements_ = convert(tf.EagerTensor, num_elements_) - tf.add_input(desc, tensor_) - tf.add_input(desc, indices_) - tf.add_input(desc, element_shape_) - tf.add_input(desc, num_elements_) - if element_dtype !== nothing - desc["element_dtype"] = Base.identity(element_dtype) - end - if shape_type !== nothing - desc["shape_type"] = Base.identity(shape_type) - end - desc["element_dtype"] = tf.data_type(tensor_) - desc["shape_type"] = tf.data_type(element_shape_) - res = tf.execute(desc) - node = tf.TapeNode(tensor_list_scatter_v2, [tensor_, indices_, element_shape_, num_elements_], name=nothing, element_dtype=nothing, shape_type=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - function tensor_list_scatter_v2(tensor_, indices_, element_shape_, num_elements_; name=nothing, element_dtype=nothing, shape_type=nothing) - if tf.in_eager_mode() - tensor_list_scatter_v2_eager(tensor_, indices_, element_shape_, num_elements_; name=name, element_dtype=element_dtype, shape_type=shape_type) - else - tensor_list_scatter_v2_graph(tensor_, indices_, element_shape_, num_elements_; name=name, element_dtype=element_dtype, shape_type=shape_type) - end - end -end - - """ conv3d_backprop_input_v2(input_sizes, filter, out_backprop; data_format=NDHWC, dilations=[1, 1, 1, 1, 1]) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function conv3d_backprop_input_v2_graph(input_sizes_, filter_, out_backprop_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function conv3d_backprop_input_v2_graph(input_sizes_, filter_, out_backprop_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) local desc tf.with_op_name(name, "Conv3DBackpropInputV2") do desc = tf.NodeDescription("Conv3DBackpropInputV2") @@ -40508,10 +38808,10 @@ end """ retrieve_tpu_embedding_proximal_adagrad_parameters(; table_id=-1, table_name=) - +Retrieve embedding parameters for a single table. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function retrieve_tpu_embedding_proximal_adagrad_parameters_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function retrieve_tpu_embedding_proximal_adagrad_parameters_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "RetrieveTPUEmbeddingProximalAdagradParameters") do desc = tf.NodeDescription("RetrieveTPUEmbeddingProximalAdagradParameters") @@ -40572,7 +38872,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function random_shuffle_graph(value_; name=nothing, seed=nothing, seed2=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function random_shuffle_graph(value_; name=nothing, seed=nothing, seed2=nothing) local desc tf.with_op_name(name, "RandomShuffle") do desc = tf.NodeDescription("RandomShuffle") @@ -40622,7 +38922,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function uniform_candidate_sampler_graph(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function uniform_candidate_sampler_graph(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing) local desc tf.with_op_name(name, "UniformCandidateSampler") do desc = tf.NodeDescription("UniformCandidateSampler") @@ -40699,7 +38999,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_split_v2_graph(handle_, value_, lengths_, flow_in_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tensor_array_split_v2_graph(handle_, value_, lengths_, flow_in_; name=nothing) local desc tf.with_op_name(name, "TensorArraySplitV2") do desc = tf.NodeDescription("TensorArraySplitV2") @@ -40749,7 +39049,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function mutable_dense_hash_table_v2_graph(empty_key_, deleted_key_; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing, initial_num_buckets=nothing, max_load_factor=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function mutable_dense_hash_table_v2_graph(empty_key_, deleted_key_; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing, initial_num_buckets=nothing, max_load_factor=nothing) local desc tf.with_op_name(name, "MutableDenseHashTableV2") do desc = tf.NodeDescription("MutableDenseHashTableV2") @@ -40840,7 +39140,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function draw_bounding_boxes_graph(images_, boxes_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function draw_bounding_boxes_graph(images_, boxes_; name=nothing) local desc tf.with_op_name(name, "DrawBoundingBoxes") do desc = tf.NodeDescription("DrawBoundingBoxes") @@ -40882,7 +39182,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_apply_proximal_adagrad_graph(var_, accum_, lr_, l1_, l2_, grad_, indices_; name=nothing, use_locking=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function sparse_apply_proximal_adagrad_graph(var_, accum_, lr_, l1_, l2_, grad_, indices_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "SparseApplyProximalAdagrad") do desc = tf.NodeDescription("SparseApplyProximalAdagrad") @@ -40958,7 +39258,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function range_dataset_graph(start_, stop_, step_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function range_dataset_graph(start_, stop_, step_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "RangeDataset") do desc = tf.NodeDescription("RangeDataset") @@ -41014,7 +39314,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function reader_restore_state_v2_graph(reader_handle_, state_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function reader_restore_state_v2_graph(reader_handle_, state_; name=nothing) local desc tf.with_op_name(name, "ReaderRestoreStateV2") do desc = tf.NodeDescription("ReaderRestoreStateV2") @@ -41054,7 +39354,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function top_kv2_graph(input_, k_; name=nothing, sorted=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function top_kv2_graph(input_, k_; name=nothing, sorted=nothing) local desc tf.with_op_name(name, "TopKV2") do desc = tf.NodeDescription("TopKV2") @@ -41107,7 +39407,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function atanh_graph(x_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function atanh_graph(x_; name=nothing) local desc tf.with_op_name(name, "Atanh") do desc = tf.NodeDescription("Atanh") @@ -41145,7 +39445,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function debug_gradient_identity_graph(input_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function debug_gradient_identity_graph(input_; name=nothing) local desc tf.with_op_name(name, "DebugGradientIdentity") do desc = tf.NodeDescription("DebugGradientIdentity") @@ -41183,7 +39483,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_add_grad_graph(backprop_val_grad_, a_indices_, b_indices_, sum_indices_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function sparse_add_grad_graph(backprop_val_grad_, a_indices_, b_indices_, sum_indices_; name=nothing) local desc tf.with_op_name(name, "SparseAddGrad") do desc = tf.NodeDescription("SparseAddGrad") @@ -41238,7 +39538,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_scatter_add_graph(resource_, indices_, updates_; name=nothing, dtype=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function resource_scatter_add_graph(resource_, indices_, updates_; name=nothing, dtype=nothing) local desc tf.with_op_name(name, "ResourceScatterAdd") do desc = tf.NodeDescription("ResourceScatterAdd") @@ -41293,7 +39593,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ceil_graph(x_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function ceil_graph(x_; name=nothing) local desc tf.with_op_name(name, "Ceil") do desc = tf.NodeDescription("Ceil") @@ -41331,7 +39631,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function save_graph(filename_, tensor_names_, data_; name=nothing, T=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function save_graph(filename_, tensor_names_, data_; name=nothing, T=nothing) local desc tf.with_op_name(name, "Save") do desc = tf.NodeDescription("Save") @@ -41378,10 +39678,10 @@ end """ retrieve_tpu_embedding_centered_rms_prop_parameters(; table_id=-1, table_name=) - +Retrieve embedding parameters for a single table. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function retrieve_tpu_embedding_centered_rms_prop_parameters_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function retrieve_tpu_embedding_centered_rms_prop_parameters_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "RetrieveTPUEmbeddingCenteredRMSPropParameters") do desc = tf.NodeDescription("RetrieveTPUEmbeddingCenteredRMSPropParameters") @@ -41442,7 +39742,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantized_concat_graph(concat_dim_, values_, input_mins_, input_maxes_; name=nothing, N=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function quantized_concat_graph(concat_dim_, values_, input_mins_, input_maxes_; name=nothing, N=nothing) local desc tf.with_op_name(name, "QuantizedConcat") do desc = tf.NodeDescription("QuantizedConcat") @@ -41503,7 +39803,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function zeros_like_graph(x_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function zeros_like_graph(x_; name=nothing) local desc tf.with_op_name(name, "ZerosLike") do desc = tf.NodeDescription("ZerosLike") @@ -41541,7 +39841,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fractional_avg_pool_graph(value_; name=nothing, pooling_ratio=nothing, pseudo_random=nothing, overlapping=nothing, deterministic=nothing, seed=nothing, seed2=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function fractional_avg_pool_graph(value_; name=nothing, pooling_ratio=nothing, pseudo_random=nothing, overlapping=nothing, deterministic=nothing, seed=nothing, seed2=nothing) local desc tf.with_op_name(name, "FractionalAvgPool") do desc = tf.NodeDescription("FractionalAvgPool") @@ -41620,7 +39920,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function edit_distance_graph(hypothesis_indices_, hypothesis_values_, hypothesis_shape_, truth_indices_, truth_values_, truth_shape_; name=nothing, normalize=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function edit_distance_graph(hypothesis_indices_, hypothesis_values_, hypothesis_shape_, truth_indices_, truth_values_, truth_shape_; name=nothing, normalize=nothing) local desc tf.with_op_name(name, "EditDistance") do desc = tf.NodeDescription("EditDistance") @@ -41685,7 +39985,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function unique_v2_graph(x_, axis_; name=nothing, out_idx=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function unique_v2_graph(x_, axis_; name=nothing, out_idx=nothing) local desc tf.with_op_name(name, "UniqueV2") do desc = tf.NodeDescription("UniqueV2") @@ -41740,7 +40040,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantize_and_dequantize_v2_graph(input_, input_min_, input_max_; name=nothing, signed_input=nothing, num_bits=nothing, range_given=nothing, round_mode=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function quantize_and_dequantize_v2_graph(input_, input_min_, input_max_; name=nothing, signed_input=nothing, num_bits=nothing, range_given=nothing, round_mode=nothing) local desc tf.with_op_name(name, "QuantizeAndDequantizeV2") do desc = tf.NodeDescription("QuantizeAndDequantizeV2") @@ -41812,7 +40112,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantize_and_dequantize_graph(input_; name=nothing, signed_input=nothing, num_bits=nothing, range_given=nothing, input_min=nothing, input_max=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function quantize_and_dequantize_graph(input_; name=nothing, signed_input=nothing, num_bits=nothing, range_given=nothing, input_min=nothing, input_max=nothing) local desc tf.with_op_name(name, "QuantizeAndDequantize") do desc = tf.NodeDescription("QuantizeAndDequantize") @@ -41875,19 +40175,17 @@ end """ - tensor_list_pop_back(input_handle, element_shape) + tensor_list_pop_back(input_handle) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_list_pop_back_graph(input_handle_, element_shape_; name=nothing, element_dtype=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tensor_list_pop_back_graph(input_handle_; name=nothing, element_dtype=nothing) local desc tf.with_op_name(name, "TensorListPopBack") do desc = tf.NodeDescription("TensorListPopBack") input_handle_ = convert(Tensor{Any}, input_handle_) - element_shape_ = convert(Tensor{Int32}, element_shape_) tf.add_input(desc, input_handle_) - tf.add_input(desc, element_shape_) if element_dtype !== nothing desc["element_dtype"] = Base.identity(element_dtype) end @@ -41899,27 +40197,25 @@ begin end out end - function tensor_list_pop_back_eager(input_handle_, element_shape_; name=nothing, element_dtype=nothing) + function tensor_list_pop_back_eager(input_handle_; name=nothing, element_dtype=nothing) desc = tf.EagerOp("TensorListPopBack") input_handle_ = convert(tf.EagerTensor, input_handle_) - element_shape_ = convert(tf.EagerTensor, element_shape_) tf.add_input(desc, input_handle_) - tf.add_input(desc, element_shape_) if element_dtype !== nothing desc["element_dtype"] = Base.identity(element_dtype) end res = tf.execute(desc) - node = tf.TapeNode(tensor_list_pop_back, [input_handle_, element_shape_], name=nothing, element_dtype=nothing, res) + node = tf.TapeNode(tensor_list_pop_back, [input_handle_], name=nothing, element_dtype=nothing, res) if length(res) >= 1 tf.add_node(res[1], node) return res end end - function tensor_list_pop_back(input_handle_, element_shape_; name=nothing, element_dtype=nothing) + function tensor_list_pop_back(input_handle_; name=nothing, element_dtype=nothing) if tf.in_eager_mode() - tensor_list_pop_back_eager(input_handle_, element_shape_; name=name, element_dtype=element_dtype) + tensor_list_pop_back_eager(input_handle_; name=name, element_dtype=element_dtype) else - tensor_list_pop_back_graph(input_handle_, element_shape_; name=name, element_dtype=element_dtype) + tensor_list_pop_back_graph(input_handle_; name=name, element_dtype=element_dtype) end end end @@ -41931,7 +40227,7 @@ end Debug NaN Value Counter Op """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function debug_nan_count_graph(input_; name=nothing, device_name=nothing, tensor_name=nothing, debug_urls=nothing, gated_grpc=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function debug_nan_count_graph(input_; name=nothing, device_name=nothing, tensor_name=nothing, debug_urls=nothing, gated_grpc=nothing) local desc tf.with_op_name(name, "DebugNanCount") do desc = tf.NodeDescription("DebugNanCount") @@ -41993,7 +40289,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function apply_adagrad_da_graph(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, lr_, l1_, l2_, global_step_; name=nothing, use_locking=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function apply_adagrad_da_graph(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, lr_, l1_, l2_, global_step_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ApplyAdagradDA") do desc = tf.NodeDescription("ApplyAdagradDA") @@ -42071,7 +40367,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function depthwise_conv2d_native_graph(input_, filter_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function depthwise_conv2d_native_graph(input_, filter_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) local desc tf.with_op_name(name, "DepthwiseConv2dNative") do desc = tf.NodeDescription("DepthwiseConv2dNative") @@ -42138,7 +40434,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function serialize_iterator_graph(resource_handle_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function serialize_iterator_graph(resource_handle_; name=nothing) local desc tf.with_op_name(name, "SerializeIterator") do desc = tf.NodeDescription("SerializeIterator") @@ -42174,7 +40470,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function dataset_to_graph_graph(input_dataset_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function dataset_to_graph_graph(input_dataset_; name=nothing) local desc tf.with_op_name(name, "DatasetToGraph") do desc = tf.NodeDescription("DatasetToGraph") @@ -42210,7 +40506,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function top_k_graph(input_; name=nothing, k=nothing, sorted=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function top_k_graph(input_; name=nothing, k=nothing, sorted=nothing) local desc tf.with_op_name(name, "TopK") do desc = tf.NodeDescription("TopK") @@ -42265,7 +40561,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_apply_ftrl_v2_graph(var_, accum_, linear_, grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=nothing, use_locking=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function resource_apply_ftrl_v2_graph(var_, accum_, linear_, grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ResourceApplyFtrlV2") do desc = tf.NodeDescription("ResourceApplyFtrlV2") @@ -42346,7 +40642,7 @@ end Replacement node for NcclBroadcast. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _nccl_broadcast_recv_graph(shape_; name=nothing, num_devices=nothing, shared_name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function _nccl_broadcast_recv_graph(shape_; name=nothing, num_devices=nothing, shared_name=nothing) local desc tf.with_op_name(name, "_NcclBroadcastRecv") do desc = tf.NodeDescription("_NcclBroadcastRecv") @@ -42394,7 +40690,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function queue_is_closed_graph(handle_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function queue_is_closed_graph(handle_; name=nothing) local desc tf.with_op_name(name, "QueueIsClosed") do desc = tf.NodeDescription("QueueIsClosed") @@ -42430,7 +40726,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function shuffle_dataset_graph(input_dataset_, buffer_size_, seed_, seed2_; name=nothing, reshuffle_each_iteration=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function shuffle_dataset_graph(input_dataset_, buffer_size_, seed_, seed2_; name=nothing, reshuffle_each_iteration=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "ShuffleDataset") do desc = tf.NodeDescription("ShuffleDataset") @@ -42496,7 +40792,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function deserialize_sparse_graph(serialized_sparse_; name=nothing, dtype=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function deserialize_sparse_graph(serialized_sparse_; name=nothing, dtype=nothing) local desc tf.with_op_name(name, "DeserializeSparse") do desc = tf.NodeDescription("DeserializeSparse") @@ -42545,7 +40841,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function priority_queue_v2_graph(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function priority_queue_v2_graph(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "PriorityQueueV2") do desc = tf.NodeDescription("PriorityQueueV2") @@ -42607,7 +40903,7 @@ end A graph node which represents an argument to a function. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _device_arg_graph(; name=nothing, index=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function _device_arg_graph(; name=nothing, index=nothing) local desc tf.with_op_name(name, "_DeviceArg") do desc = tf.NodeDescription("_DeviceArg") @@ -42645,7 +40941,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function truncated_normal_graph(shape_; name=nothing, seed=nothing, seed2=nothing, dtype=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function truncated_normal_graph(shape_; name=nothing, seed=nothing, seed2=nothing, dtype=nothing) local desc tf.with_op_name(name, "TruncatedNormal") do desc = tf.NodeDescription("TruncatedNormal") @@ -42701,7 +40997,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_forest_tree_predict_graph(tree_handle_, dense_features_; name=nothing, logits_dimension=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tensor_forest_tree_predict_graph(tree_handle_, dense_features_; name=nothing, logits_dimension=nothing) local desc tf.with_op_name(name, "TensorForestTreePredict") do desc = tf.NodeDescription("TensorForestTreePredict") @@ -42747,7 +41043,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function stack_v2_graph(max_size_; name=nothing, elem_type=nothing, stack_name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function stack_v2_graph(max_size_; name=nothing, elem_type=nothing, stack_name=nothing) local desc tf.with_op_name(name, "StackV2") do desc = tf.NodeDescription("StackV2") @@ -42795,7 +41091,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function accumulator_num_accumulated_graph(handle_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function accumulator_num_accumulated_graph(handle_; name=nothing) local desc tf.with_op_name(name, "AccumulatorNumAccumulated") do desc = tf.NodeDescription("AccumulatorNumAccumulated") @@ -42831,7 +41127,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function reader_reset_v2_graph(reader_handle_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function reader_reset_v2_graph(reader_handle_; name=nothing) local desc tf.with_op_name(name, "ReaderResetV2") do desc = tf.NodeDescription("ReaderResetV2") @@ -42867,7 +41163,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function apply_add_sign_graph(var_, m_, lr_, alpha_, sign_decay_, beta_, grad_; name=nothing, use_locking=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function apply_add_sign_graph(var_, m_, lr_, alpha_, sign_decay_, beta_, grad_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ApplyAddSign") do desc = tf.NodeDescription("ApplyAddSign") @@ -42938,10 +41234,10 @@ end """ retrieve_tpu_embedding_rms_prop_parameters_grad_accum_debug(; table_id=-1, table_name=) - +Retrieve embedding parameters for a single table. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function retrieve_tpu_embedding_rms_prop_parameters_grad_accum_debug_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function retrieve_tpu_embedding_rms_prop_parameters_grad_accum_debug_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "RetrieveTPUEmbeddingRMSPropParametersGradAccumDebug") do desc = tf.NodeDescription("RetrieveTPUEmbeddingRMSPropParametersGradAccumDebug") @@ -43002,7 +41298,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function rint_graph(x_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function rint_graph(x_; name=nothing) local desc tf.with_op_name(name, "Rint") do desc = tf.NodeDescription("Rint") @@ -43037,10 +41333,10 @@ end """ retrieve_tpu_embedding_adadelta_parameters_grad_accum_debug(; table_id=-1, table_name=) - +Retrieve embedding parameters for a single table. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function retrieve_tpu_embedding_adadelta_parameters_grad_accum_debug_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function retrieve_tpu_embedding_adadelta_parameters_grad_accum_debug_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "RetrieveTPUEmbeddingAdadeltaParametersGradAccumDebug") do desc = tf.NodeDescription("RetrieveTPUEmbeddingAdadeltaParametersGradAccumDebug") @@ -43096,12 +41392,12 @@ end """ - extract_glimpse(input, size, offsets; centered=true, normalized=true, uniform_noise=true, noise=uniform) + extract_glimpse(input, size, offsets; centered=true, normalized=true, uniform_noise=true) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function extract_glimpse_graph(input_, size_, offsets_; name=nothing, centered=nothing, normalized=nothing, uniform_noise=nothing, noise=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function extract_glimpse_graph(input_, size_, offsets_; name=nothing, centered=nothing, normalized=nothing, uniform_noise=nothing) local desc tf.with_op_name(name, "ExtractGlimpse") do desc = tf.NodeDescription("ExtractGlimpse") @@ -43120,13 +41416,10 @@ begin if uniform_noise !== nothing desc["uniform_noise"] = Base.Bool(uniform_noise) end - if noise !== nothing - desc["noise"] = Base.String(noise) - end end tf.Tensor(tf.Operation(desc)) end - function extract_glimpse_eager(input_, size_, offsets_; name=nothing, centered=nothing, normalized=nothing, uniform_noise=nothing, noise=nothing) + function extract_glimpse_eager(input_, size_, offsets_; name=nothing, centered=nothing, normalized=nothing, uniform_noise=nothing) desc = tf.EagerOp("ExtractGlimpse") input_ = convert(tf.EagerTensor, input_) size_ = convert(tf.EagerTensor, size_) @@ -43143,21 +41436,18 @@ begin if uniform_noise !== nothing desc["uniform_noise"] = Base.Bool(uniform_noise) end - if noise !== nothing - desc["noise"] = Base.String(noise) - end res = tf.execute(desc) - node = tf.TapeNode(extract_glimpse, [input_, size_, offsets_], name=nothing, centered=nothing, normalized=nothing, uniform_noise=nothing, noise=nothing, res) + node = tf.TapeNode(extract_glimpse, [input_, size_, offsets_], name=nothing, centered=nothing, normalized=nothing, uniform_noise=nothing, res) if length(res) >= 1 tf.add_node(res[1], node) return res[1] end end - function extract_glimpse(input_, size_, offsets_; name=nothing, centered=nothing, normalized=nothing, uniform_noise=nothing, noise=nothing) + function extract_glimpse(input_, size_, offsets_; name=nothing, centered=nothing, normalized=nothing, uniform_noise=nothing) if tf.in_eager_mode() - extract_glimpse_eager(input_, size_, offsets_; name=name, centered=centered, normalized=normalized, uniform_noise=uniform_noise, noise=noise) + extract_glimpse_eager(input_, size_, offsets_; name=name, centered=centered, normalized=normalized, uniform_noise=uniform_noise) else - extract_glimpse_graph(input_, size_, offsets_; name=name, centered=centered, normalized=normalized, uniform_noise=uniform_noise, noise=noise) + extract_glimpse_graph(input_, size_, offsets_; name=name, centered=centered, normalized=normalized, uniform_noise=uniform_noise) end end end @@ -43169,7 +41459,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function string_to_hash_bucket_strong_graph(input_; name=nothing, num_buckets=nothing, key=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function string_to_hash_bucket_strong_graph(input_; name=nothing, num_buckets=nothing, key=nothing) local desc tf.with_op_name(name, "StringToHashBucketStrong") do desc = tf.NodeDescription("StringToHashBucketStrong") @@ -43217,7 +41507,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function one_shot_iterator_graph(; name=nothing, dataset_factory=nothing, output_types=nothing, output_shapes=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function one_shot_iterator_graph(; name=nothing, dataset_factory=nothing, output_types=nothing, output_shapes=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "OneShotIterator") do desc = tf.NodeDescription("OneShotIterator") @@ -43279,7 +41569,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_sparse_apply_momentum_graph(var_, accum_, lr_, grad_, indices_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function resource_sparse_apply_momentum_graph(var_, accum_, lr_, grad_, indices_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) local desc tf.with_op_name(name, "ResourceSparseApplyMomentum") do desc = tf.NodeDescription("ResourceSparseApplyMomentum") @@ -43354,7 +41644,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function save_slices_graph(filename_, tensor_names_, shapes_and_slices_, data_; name=nothing, T=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function save_slices_graph(filename_, tensor_names_, shapes_and_slices_, data_; name=nothing, T=nothing) local desc tf.with_op_name(name, "SaveSlices") do desc = tf.NodeDescription("SaveSlices") @@ -43408,7 +41698,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_dataset_cardinality_graph(input_dataset_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function experimental_dataset_cardinality_graph(input_dataset_; name=nothing) local desc tf.with_op_name(name, "ExperimentalDatasetCardinality") do desc = tf.NodeDescription("ExperimentalDatasetCardinality") @@ -43438,51 +41728,13 @@ begin end -""" - is_finite(x) - - -""" -begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function is_finite_graph(x_; name=nothing) - local desc - tf.with_op_name(name, "IsFinite") do - desc = tf.NodeDescription("IsFinite") - x_ = convert(Tensor{Any}, x_) - (x_,) = tf.tf_promote(x_) - tf.add_input(desc, x_) - end - tf.Tensor(tf.Operation(desc)) - end - function is_finite_eager(x_; name=nothing) - desc = tf.EagerOp("IsFinite") - x_ = convert(tf.EagerTensor, x_) - tf.add_input(desc, x_) - desc["T"] = tf.data_type(x_) - res = tf.execute(desc) - node = tf.TapeNode(is_finite, [x_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - function is_finite(x_; name=nothing) - if tf.in_eager_mode() - is_finite_eager(x_; name=name) - else - is_finite_graph(x_; name=name) - end - end -end - - """ experimental_numa_map_and_batch_dataset(input_dataset, other_arguments, batch_size, num_parallel_calls, drop_remainder; preserve_cardinality=false) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_numa_map_and_batch_dataset_graph(input_dataset_, other_arguments_, batch_size_, num_parallel_calls_, drop_remainder_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, preserve_cardinality=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function experimental_numa_map_and_batch_dataset_graph(input_dataset_, other_arguments_, batch_size_, num_parallel_calls_, drop_remainder_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, preserve_cardinality=nothing) local desc tf.with_op_name(name, "ExperimentalNumaMapAndBatchDataset") do desc = tf.NodeDescription("ExperimentalNumaMapAndBatchDataset") @@ -43559,12 +41811,50 @@ end """ - all_to_all(input, group_assignment) + is_finite(x) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function all_to_all_graph(input_, group_assignment_; name=nothing, concat_dimension=nothing, split_dimension=nothing, split_count=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function is_finite_graph(x_; name=nothing) + local desc + tf.with_op_name(name, "IsFinite") do + desc = tf.NodeDescription("IsFinite") + x_ = convert(Tensor{Any}, x_) + (x_,) = tf.tf_promote(x_) + tf.add_input(desc, x_) + end + tf.Tensor(tf.Operation(desc)) + end + function is_finite_eager(x_; name=nothing) + desc = tf.EagerOp("IsFinite") + x_ = convert(tf.EagerTensor, x_) + tf.add_input(desc, x_) + desc["T"] = tf.data_type(x_) + res = tf.execute(desc) + node = tf.TapeNode(is_finite, [x_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + function is_finite(x_; name=nothing) + if tf.in_eager_mode() + is_finite_eager(x_; name=name) + else + is_finite_graph(x_; name=name) + end + end +end + + +""" + all_to_all(input, group_assignment) + +An Op to exchange data across TPU replicas. On each replica, the input is +""" +begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function all_to_all_graph(input_, group_assignment_; name=nothing, concat_dimension=nothing, split_dimension=nothing, split_count=nothing) local desc tf.with_op_name(name, "AllToAll") do desc = tf.NodeDescription("AllToAll") @@ -43624,7 +41914,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function take_many_sparse_from_tensors_map_graph(sparse_handles_; name=nothing, dtype=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function take_many_sparse_from_tensors_map_graph(sparse_handles_; name=nothing, dtype=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "TakeManySparseFromTensorsMap") do desc = tf.NodeDescription("TakeManySparseFromTensorsMap") @@ -43683,7 +41973,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_matrix_diag_part_graph(input_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function batch_matrix_diag_part_graph(input_; name=nothing) local desc tf.with_op_name(name, "BatchMatrixDiagPart") do desc = tf.NodeDescription("BatchMatrixDiagPart") @@ -43721,7 +42011,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fixed_length_record_dataset_graph(filenames_, header_bytes_, record_bytes_, footer_bytes_, buffer_size_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function fixed_length_record_dataset_graph(filenames_, header_bytes_, record_bytes_, footer_bytes_, buffer_size_; name=nothing) local desc tf.with_op_name(name, "FixedLengthRecordDataset") do desc = tf.NodeDescription("FixedLengthRecordDataset") @@ -43773,7 +42063,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function stack_push_graph(handle_, elem_; name=nothing, swap_memory=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function stack_push_graph(handle_, elem_; name=nothing, swap_memory=nothing) local desc tf.with_op_name(name, "StackPush") do desc = tf.NodeDescription("StackPush") @@ -43821,7 +42111,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function placeholder_v2_graph(; name=nothing, dtype=nothing, shape=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function placeholder_v2_graph(; name=nothing, dtype=nothing, shape=nothing) local desc tf.with_op_name(name, "PlaceholderV2") do desc = tf.NodeDescription("PlaceholderV2") @@ -43865,7 +42155,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function multi_device_iterator_init_graph(dataset_, multi_device_iterator_, max_buffer_size_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function multi_device_iterator_init_graph(dataset_, multi_device_iterator_, max_buffer_size_; name=nothing) local desc tf.with_op_name(name, "MultiDeviceIteratorInit") do desc = tf.NodeDescription("MultiDeviceIteratorInit") @@ -43909,7 +42199,7 @@ end Re-configures the GCS block cache with the new configuration values. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function gcs_configure_block_cache_graph(max_cache_size_, block_size_, max_staleness_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function gcs_configure_block_cache_graph(max_cache_size_, block_size_, max_staleness_; name=nothing) local desc tf.with_op_name(name, "GcsConfigureBlockCache") do desc = tf.NodeDescription("GcsConfigureBlockCache") @@ -43953,7 +42243,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function queue_dequeue_v2_graph(handle_; name=nothing, component_types=nothing, timeout_ms=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function queue_dequeue_v2_graph(handle_; name=nothing, component_types=nothing, timeout_ms=nothing) local desc tf.with_op_name(name, "QueueDequeueV2") do desc = tf.NodeDescription("QueueDequeueV2") @@ -43996,12 +42286,56 @@ end """ - retrieve_tpu_embedding_rms_prop_parameters(; table_id=-1, table_name=) + transpose(x, perm) + + +""" +begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function transpose_graph(x_, perm_; name=nothing) + local desc + tf.with_op_name(name, "Transpose") do + desc = tf.NodeDescription("Transpose") + x_ = convert(Tensor{Any}, x_) + perm_ = convert(Tensor{Int32}, perm_) + (perm_,) = tf.tf_promote(perm_) + (x_,) = tf.tf_promote(x_) + tf.add_input(desc, x_) + tf.add_input(desc, perm_) + end + tf.Tensor(tf.Operation(desc)) + end + function transpose_eager(x_, perm_; name=nothing) + desc = tf.EagerOp("Transpose") + x_ = convert(tf.EagerTensor, x_) + perm_ = convert(tf.EagerTensor, perm_) + tf.add_input(desc, x_) + tf.add_input(desc, perm_) + desc["T"] = tf.data_type(x_) + desc["Tperm"] = tf.data_type(perm_) + res = tf.execute(desc) + node = tf.TapeNode(transpose, [x_, perm_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + function transpose(x_, perm_; name=nothing) + if tf.in_eager_mode() + transpose_eager(x_, perm_; name=name) + else + transpose_graph(x_, perm_; name=name) + end + end +end +""" + retrieve_tpu_embedding_rms_prop_parameters(; table_id=-1, table_name=) + +Retrieve embedding parameters for a single table. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function retrieve_tpu_embedding_rms_prop_parameters_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function retrieve_tpu_embedding_rms_prop_parameters_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "RetrieveTPUEmbeddingRMSPropParameters") do desc = tf.NodeDescription("RetrieveTPUEmbeddingRMSPropParameters") @@ -44056,57 +42390,13 @@ begin end -""" - transpose(x, perm) - - -""" -begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function transpose_graph(x_, perm_; name=nothing) - local desc - tf.with_op_name(name, "Transpose") do - desc = tf.NodeDescription("Transpose") - x_ = convert(Tensor{Any}, x_) - perm_ = convert(Tensor{Int32}, perm_) - (perm_,) = tf.tf_promote(perm_) - (x_,) = tf.tf_promote(x_) - tf.add_input(desc, x_) - tf.add_input(desc, perm_) - end - tf.Tensor(tf.Operation(desc)) - end - function transpose_eager(x_, perm_; name=nothing) - desc = tf.EagerOp("Transpose") - x_ = convert(tf.EagerTensor, x_) - perm_ = convert(tf.EagerTensor, perm_) - tf.add_input(desc, x_) - tf.add_input(desc, perm_) - desc["T"] = tf.data_type(x_) - desc["Tperm"] = tf.data_type(perm_) - res = tf.execute(desc) - node = tf.TapeNode(transpose, [x_, perm_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - function transpose(x_, perm_; name=nothing) - if tf.in_eager_mode() - transpose_eager(x_, perm_; name=name) - else - transpose_graph(x_, perm_; name=name) - end - end -end - - """ ifft(input) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ifft_graph(input_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function ifft_graph(input_; name=nothing) local desc tf.with_op_name(name, "IFFT") do desc = tf.NodeDescription("IFFT") @@ -44144,7 +42434,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_segment_sum_with_num_segments_graph(data_, indices_, segment_ids_, num_segments_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function sparse_segment_sum_with_num_segments_graph(data_, indices_, segment_ids_, num_segments_; name=nothing) local desc tf.with_op_name(name, "SparseSegmentSumWithNumSegments") do desc = tf.NodeDescription("SparseSegmentSumWithNumSegments") @@ -44199,7 +42489,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function queue_is_closed_v2_graph(handle_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function queue_is_closed_v2_graph(handle_; name=nothing) local desc tf.with_op_name(name, "QueueIsClosedV2") do desc = tf.NodeDescription("QueueIsClosedV2") @@ -44235,7 +42525,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function parameterized_truncated_normal_graph(shape_, means_, stdevs_, minvals_, maxvals_; name=nothing, seed=nothing, seed2=nothing, dtype=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function parameterized_truncated_normal_graph(shape_, means_, stdevs_, minvals_, maxvals_; name=nothing, seed=nothing, seed2=nothing, dtype=nothing) local desc tf.with_op_name(name, "ParameterizedTruncatedNormal") do desc = tf.NodeDescription("ParameterizedTruncatedNormal") @@ -44312,7 +42602,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function diag_part_graph(input_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function diag_part_graph(input_; name=nothing) local desc tf.with_op_name(name, "DiagPart") do desc = tf.NodeDescription("DiagPart") @@ -44344,61 +42634,13 @@ begin end -""" - kmeans_plus_plus_initialization(points, num_to_sample, seed, num_retries_per_sample) - - -""" -begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function kmeans_plus_plus_initialization_graph(points_, num_to_sample_, seed_, num_retries_per_sample_; name=nothing) - local desc - tf.with_op_name(name, "KmeansPlusPlusInitialization") do - desc = tf.NodeDescription("KmeansPlusPlusInitialization") - points_ = convert(Tensor{Float32}, points_) - num_to_sample_ = convert(Tensor{Int64}, num_to_sample_) - seed_ = convert(Tensor{Int64}, seed_) - num_retries_per_sample_ = convert(Tensor{Int64}, num_retries_per_sample_) - tf.add_input(desc, points_) - tf.add_input(desc, num_to_sample_) - tf.add_input(desc, seed_) - tf.add_input(desc, num_retries_per_sample_) - end - tf.Tensor(tf.Operation(desc)) - end - function kmeans_plus_plus_initialization_eager(points_, num_to_sample_, seed_, num_retries_per_sample_; name=nothing) - desc = tf.EagerOp("KmeansPlusPlusInitialization") - points_ = convert(tf.EagerTensor, points_) - num_to_sample_ = convert(tf.EagerTensor, num_to_sample_) - seed_ = convert(tf.EagerTensor, seed_) - num_retries_per_sample_ = convert(tf.EagerTensor, num_retries_per_sample_) - tf.add_input(desc, points_) - tf.add_input(desc, num_to_sample_) - tf.add_input(desc, seed_) - tf.add_input(desc, num_retries_per_sample_) - res = tf.execute(desc) - node = tf.TapeNode(kmeans_plus_plus_initialization, [points_, num_to_sample_, seed_, num_retries_per_sample_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - function kmeans_plus_plus_initialization(points_, num_to_sample_, seed_, num_retries_per_sample_; name=nothing) - if tf.in_eager_mode() - kmeans_plus_plus_initialization_eager(points_, num_to_sample_, seed_, num_retries_per_sample_; name=name) - else - kmeans_plus_plus_initialization_graph(points_, num_to_sample_, seed_, num_retries_per_sample_; name=name) - end - end -end - - """ regex_replace(input, pattern, rewrite; replace_global=true) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function regex_replace_graph(input_, pattern_, rewrite_; name=nothing, replace_global=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function regex_replace_graph(input_, pattern_, rewrite_; name=nothing, replace_global=nothing) local desc tf.with_op_name(name, "RegexReplace") do desc = tf.NodeDescription("RegexReplace") @@ -44448,7 +42690,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_tensor_dense_mat_mul_graph(a_indices_, a_values_, a_shape_, b_; name=nothing, adjoint_a=nothing, adjoint_b=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function sparse_tensor_dense_mat_mul_graph(a_indices_, a_values_, a_shape_, b_; name=nothing, adjoint_a=nothing, adjoint_b=nothing) local desc tf.with_op_name(name, "SparseTensorDenseMatMul") do desc = tf.NodeDescription("SparseTensorDenseMatMul") @@ -44514,7 +42756,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function map_defun_graph(arguments_, captured_inputs_; name=nothing, Targuments=nothing, Tcaptured=nothing, output_types=nothing, output_shapes=nothing, f=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function map_defun_graph(arguments_, captured_inputs_; name=nothing, Targuments=nothing, Tcaptured=nothing, output_types=nothing, output_shapes=nothing, f=nothing) local desc tf.with_op_name(name, "MapDefun") do desc = tf.NodeDescription("MapDefun") @@ -44584,7 +42826,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function thread_unsafe_unigram_candidate_sampler_graph(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function thread_unsafe_unigram_candidate_sampler_graph(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing) local desc tf.with_op_name(name, "ThreadUnsafeUnigramCandidateSampler") do desc = tf.NodeDescription("ThreadUnsafeUnigramCandidateSampler") @@ -44658,10 +42900,10 @@ end """ retrieve_tpu_embedding_adam_parameters_grad_accum_debug(; table_id=-1, table_name=) - +Retrieve embedding parameters for a single table. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function retrieve_tpu_embedding_adam_parameters_grad_accum_debug_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function retrieve_tpu_embedding_adam_parameters_grad_accum_debug_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "RetrieveTPUEmbeddingADAMParametersGradAccumDebug") do desc = tf.NodeDescription("RetrieveTPUEmbeddingADAMParametersGradAccumDebug") @@ -44722,7 +42964,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function parallel_concat_graph(values_; name=nothing, N=nothing, shape=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function parallel_concat_graph(values_; name=nothing, N=nothing, shape=nothing) local desc tf.with_op_name(name, "ParallelConcat") do desc = tf.NodeDescription("ParallelConcat") @@ -44772,7 +43014,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function lookup_table_find_v2_graph(table_handle_, keys_, default_value_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function lookup_table_find_v2_graph(table_handle_, keys_, default_value_; name=nothing) local desc tf.with_op_name(name, "LookupTableFindV2") do desc = tf.NodeDescription("LookupTableFindV2") @@ -44820,7 +43062,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_forest_tree_deserialize_graph(tree_handle_, tree_config_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tensor_forest_tree_deserialize_graph(tree_handle_, tree_config_; name=nothing) local desc tf.with_op_name(name, "TensorForestTreeDeserialize") do desc = tf.NodeDescription("TensorForestTreeDeserialize") @@ -44857,10 +43099,10 @@ end """ retrieve_tpu_embedding_momentum_parameters(; table_id=-1, table_name=) - +Retrieve embedding parameters for a single table. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function retrieve_tpu_embedding_momentum_parameters_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function retrieve_tpu_embedding_momentum_parameters_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "RetrieveTPUEmbeddingMomentumParameters") do desc = tf.NodeDescription("RetrieveTPUEmbeddingMomentumParameters") @@ -44921,7 +43163,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fake_quant_with_min_max_args_graph(inputs_; name=nothing, min=nothing, max=nothing, num_bits=nothing, narrow_range=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function fake_quant_with_min_max_args_graph(inputs_; name=nothing, min=nothing, max=nothing, num_bits=nothing, narrow_range=nothing) local desc tf.with_op_name(name, "FakeQuantWithMinMaxArgs") do desc = tf.NodeDescription("FakeQuantWithMinMaxArgs") @@ -44981,7 +43223,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_apply_gradient_descent_graph(var_, alpha_, delta_; name=nothing, use_locking=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function resource_apply_gradient_descent_graph(var_, alpha_, delta_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ResourceApplyGradientDescent") do desc = tf.NodeDescription("ResourceApplyGradientDescent") @@ -45034,7 +43276,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_sliding_window_dataset_graph(input_dataset_, window_size_, window_shift_, window_stride_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function experimental_sliding_window_dataset_graph(input_dataset_, window_size_, window_shift_, window_stride_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "ExperimentalSlidingWindowDataset") do desc = tf.NodeDescription("ExperimentalSlidingWindowDataset") @@ -45094,7 +43336,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function decode_raw_graph(bytes_; name=nothing, out_type=nothing, little_endian=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function decode_raw_graph(bytes_; name=nothing, out_type=nothing, little_endian=nothing) local desc tf.with_op_name(name, "DecodeRaw") do desc = tf.NodeDescription("DecodeRaw") @@ -45142,7 +43384,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fake_quant_with_min_max_vars_per_channel_gradient_graph(gradients_, inputs_, min_, max_; name=nothing, num_bits=nothing, narrow_range=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function fake_quant_with_min_max_vars_per_channel_gradient_graph(gradients_, inputs_, min_, max_; name=nothing, num_bits=nothing, narrow_range=nothing) local desc tf.with_op_name(name, "FakeQuantWithMinMaxVarsPerChannelGradient") do desc = tf.NodeDescription("FakeQuantWithMinMaxVarsPerChannelGradient") @@ -45207,7 +43449,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function unique_with_counts_v2_graph(x_, axis_; name=nothing, out_idx=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function unique_with_counts_v2_graph(x_, axis_; name=nothing, out_idx=nothing) local desc tf.with_op_name(name, "UniqueWithCountsV2") do desc = tf.NodeDescription("UniqueWithCountsV2") @@ -45262,7 +43504,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_sleep_dataset_graph(input_dataset_, sleep_microseconds_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function experimental_sleep_dataset_graph(input_dataset_, sleep_microseconds_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "ExperimentalSleepDataset") do desc = tf.NodeDescription("ExperimentalSleepDataset") @@ -45311,10 +43553,10 @@ end """ tpu_replicated_output(input) - +Operator that connects the output of an N-way replicated TPU computation to N separate outputs. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tpu_replicated_output_graph(input_; name=nothing, num_replicas=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tpu_replicated_output_graph(input_; name=nothing, num_replicas=nothing) local desc tf.with_op_name(name, "TPUReplicatedOutput") do desc = tf.NodeDescription("TPUReplicatedOutput") @@ -45363,7 +43605,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function lower_bound_graph(sorted_inputs_, values_; name=nothing, out_type=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function lower_bound_graph(sorted_inputs_, values_; name=nothing, out_type=nothing) local desc tf.with_op_name(name, "LowerBound") do desc = tf.NodeDescription("LowerBound") @@ -45412,7 +43654,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tan_graph(x_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tan_graph(x_; name=nothing) local desc tf.with_op_name(name, "Tan") do desc = tf.NodeDescription("Tan") @@ -45450,7 +43692,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function enter_graph(data_; name=nothing, frame_name=nothing, is_constant=nothing, parallel_iterations=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function enter_graph(data_; name=nothing, frame_name=nothing, is_constant=nothing, parallel_iterations=nothing) local desc tf.with_op_name(name, "Enter") do desc = tf.NodeDescription("Enter") @@ -45501,12 +43743,12 @@ end """ - infeed_enqueue_tuple(inputs; layouts=Int64[], device_ordinal=-1) - + infeed_enqueue_tuple(inputs; device_ordinal=-1) +An op which feeds multiple Tensor values into the computation as an XLA tuple. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function infeed_enqueue_tuple_graph(inputs_; name=nothing, dtypes=nothing, shapes=nothing, layouts=nothing, device_ordinal=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function infeed_enqueue_tuple_graph(inputs_; name=nothing, dtypes=nothing, shapes=nothing, device_ordinal=nothing) local desc tf.with_op_name(name, "InfeedEnqueueTuple") do desc = tf.NodeDescription("InfeedEnqueueTuple") @@ -45518,16 +43760,13 @@ begin if shapes !== nothing desc["shapes"] = map(Base.identity, shapes) end - if layouts !== nothing - desc["layouts"] = map(Base.identity, layouts) - end if device_ordinal !== nothing desc["device_ordinal"] = Base.Int(device_ordinal) end end tf.Tensor(tf.Operation(desc)) end - function infeed_enqueue_tuple_eager(inputs_; name=nothing, dtypes=nothing, shapes=nothing, layouts=nothing, device_ordinal=nothing) + function infeed_enqueue_tuple_eager(inputs_; name=nothing, dtypes=nothing, shapes=nothing, device_ordinal=nothing) desc = tf.EagerOp("InfeedEnqueueTuple") inputs_ = convert(tf.EagerTensor, inputs_) tf.add_input(desc, inputs_) @@ -45537,60 +43776,21 @@ begin if shapes !== nothing desc["shapes"] = map(Base.identity, shapes) end - if layouts !== nothing - desc["layouts"] = map(Base.identity, layouts) - end if device_ordinal !== nothing desc["device_ordinal"] = Base.Int(device_ordinal) end res = tf.execute(desc) - node = tf.TapeNode(infeed_enqueue_tuple, [inputs_], name=nothing, dtypes=nothing, shapes=nothing, layouts=nothing, device_ordinal=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - function infeed_enqueue_tuple(inputs_; name=nothing, dtypes=nothing, shapes=nothing, layouts=nothing, device_ordinal=nothing) - if tf.in_eager_mode() - infeed_enqueue_tuple_eager(inputs_; name=name, dtypes=dtypes, shapes=shapes, layouts=layouts, device_ordinal=device_ordinal) - else - infeed_enqueue_tuple_graph(inputs_; name=name, dtypes=dtypes, shapes=shapes, layouts=layouts, device_ordinal=device_ordinal) - end - end -end - - -""" - _set_global_tpu_array(topology) - -An op that informs a host of the global ids of all the of TPUs in the -""" -begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _set_global_tpu_array_graph(topology_; name=nothing) - local desc - tf.with_op_name(name, "_SetGlobalTPUArray") do - desc = tf.NodeDescription("_SetGlobalTPUArray") - topology_ = convert(Tensor{String}, topology_) - tf.add_input(desc, topology_) - end - tf.Tensor(tf.Operation(desc)) - end - function _set_global_tpu_array_eager(topology_; name=nothing) - desc = tf.EagerOp("_SetGlobalTPUArray") - topology_ = convert(tf.EagerTensor, topology_) - tf.add_input(desc, topology_) - res = tf.execute(desc) - node = tf.TapeNode(_set_global_tpu_array, [topology_], name=nothing, res) + node = tf.TapeNode(infeed_enqueue_tuple, [inputs_], name=nothing, dtypes=nothing, shapes=nothing, device_ordinal=nothing, res) if length(res) >= 1 tf.add_node(res[1], node) return res[1] end end - function _set_global_tpu_array(topology_; name=nothing) + function infeed_enqueue_tuple(inputs_; name=nothing, dtypes=nothing, shapes=nothing, device_ordinal=nothing) if tf.in_eager_mode() - _set_global_tpu_array_eager(topology_; name=name) + infeed_enqueue_tuple_eager(inputs_; name=name, dtypes=dtypes, shapes=shapes, device_ordinal=device_ordinal) else - _set_global_tpu_array_graph(topology_; name=name) + infeed_enqueue_tuple_graph(inputs_; name=name, dtypes=dtypes, shapes=shapes, device_ordinal=device_ordinal) end end end @@ -45602,7 +43802,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function square_graph(x_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function square_graph(x_; name=nothing) local desc tf.with_op_name(name, "Square") do desc = tf.NodeDescription("Square") @@ -45634,13 +43834,49 @@ begin end +""" + _set_global_tpu_array(topology) + +An op that informs a host of the global ids of all the of TPUs in the +""" +begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function _set_global_tpu_array_graph(topology_; name=nothing) + local desc + tf.with_op_name(name, "_SetGlobalTPUArray") do + desc = tf.NodeDescription("_SetGlobalTPUArray") + topology_ = convert(Tensor{String}, topology_) + tf.add_input(desc, topology_) + end + tf.Tensor(tf.Operation(desc)) + end + function _set_global_tpu_array_eager(topology_; name=nothing) + desc = tf.EagerOp("_SetGlobalTPUArray") + topology_ = convert(tf.EagerTensor, topology_) + tf.add_input(desc, topology_) + res = tf.execute(desc) + node = tf.TapeNode(_set_global_tpu_array, [topology_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + function _set_global_tpu_array(topology_; name=nothing) + if tf.in_eager_mode() + _set_global_tpu_array_eager(topology_; name=name) + else + _set_global_tpu_array_graph(topology_; name=name) + end + end +end + + """ debug_gradient_ref_identity(input) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function debug_gradient_ref_identity_graph(input_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function debug_gradient_ref_identity_graph(input_; name=nothing) local desc tf.with_op_name(name, "DebugGradientRefIdentity") do desc = tf.NodeDescription("DebugGradientRefIdentity") @@ -45678,7 +43914,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function apply_adadelta_graph(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_; name=nothing, use_locking=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function apply_adadelta_graph(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ApplyAdadelta") do desc = tf.NodeDescription("ApplyAdadelta") @@ -45752,7 +43988,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_group_by_window_dataset_graph(input_dataset_, key_func_other_arguments_, reduce_func_other_arguments_, window_size_func_other_arguments_; name=nothing, key_func=nothing, reduce_func=nothing, window_size_func=nothing, Tkey_func_other_arguments=nothing, Treduce_func_other_arguments=nothing, Twindow_size_func_other_arguments=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function experimental_group_by_window_dataset_graph(input_dataset_, key_func_other_arguments_, reduce_func_other_arguments_, window_size_func_other_arguments_; name=nothing, key_func=nothing, reduce_func=nothing, window_size_func=nothing, Tkey_func_other_arguments=nothing, Treduce_func_other_arguments=nothing, Twindow_size_func_other_arguments=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "ExperimentalGroupByWindowDataset") do desc = tf.NodeDescription("ExperimentalGroupByWindowDataset") @@ -45848,7 +44084,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function audio_summary_graph(tag_, tensor_; name=nothing, sample_rate=nothing, max_outputs=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function audio_summary_graph(tag_, tensor_; name=nothing, sample_rate=nothing, max_outputs=nothing) local desc tf.with_op_name(name, "AudioSummary") do desc = tf.NodeDescription("AudioSummary") @@ -45900,7 +44136,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function squared_difference_graph(x_, y_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function squared_difference_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "SquaredDifference") do desc = tf.NodeDescription("SquaredDifference") @@ -45937,77 +44173,13 @@ begin end -""" - experimental_take_while_dataset(input_dataset, other_arguments) - - -""" -begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_take_while_dataset_graph(input_dataset_, other_arguments_; name=nothing, predicate=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) - local desc - tf.with_op_name(name, "ExperimentalTakeWhileDataset") do - desc = tf.NodeDescription("ExperimentalTakeWhileDataset") - input_dataset_ = convert(Tensor{Any}, input_dataset_) - other_arguments_ = [convert(Tensor{Any}, x) for x = other_arguments_] - tf.add_input(desc, input_dataset_) - tf.add_input(desc, other_arguments_) - if predicate !== nothing - desc["predicate"] = Base.identity(predicate) - end - if Targuments !== nothing - desc["Targuments"] = map(Base.identity, Targuments) - end - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - end - tf.Tensor(tf.Operation(desc)) - end - function experimental_take_while_dataset_eager(input_dataset_, other_arguments_; name=nothing, predicate=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) - desc = tf.EagerOp("ExperimentalTakeWhileDataset") - input_dataset_ = convert(tf.EagerTensor, input_dataset_) - other_arguments_ = convert(tf.EagerTensor, other_arguments_) - tf.add_input(desc, input_dataset_) - tf.add_input(desc, other_arguments_) - if predicate !== nothing - desc["predicate"] = Base.identity(predicate) - end - if Targuments !== nothing - desc["Targuments"] = map(Base.identity, Targuments) - end - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - res = tf.execute(desc) - node = tf.TapeNode(experimental_take_while_dataset, [input_dataset_, other_arguments_], name=nothing, predicate=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - function experimental_take_while_dataset(input_dataset_, other_arguments_; name=nothing, predicate=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) - if tf.in_eager_mode() - experimental_take_while_dataset_eager(input_dataset_, other_arguments_; name=name, predicate=predicate, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes) - else - experimental_take_while_dataset_graph(input_dataset_, other_arguments_; name=name, predicate=predicate, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes) - end - end -end - - """ scatter_nd_update(ref, indices, updates; use_locking=true) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function scatter_nd_update_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function scatter_nd_update_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ScatterNdUpdate") do desc = tf.NodeDescription("ScatterNdUpdate") @@ -46063,7 +44235,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function dynamic_stitch_graph(indices_, data_; name=nothing, N=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function dynamic_stitch_graph(indices_, data_; name=nothing, N=nothing) local desc tf.with_op_name(name, "DynamicStitch") do desc = tf.NodeDescription("DynamicStitch") @@ -46111,7 +44283,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ones_like_graph(x_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function ones_like_graph(x_; name=nothing) local desc tf.with_op_name(name, "OnesLike") do desc = tf.NodeDescription("OnesLike") @@ -46149,7 +44321,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fractional_max_pool_grad_graph(orig_input_, orig_output_, out_backprop_, row_pooling_sequence_, col_pooling_sequence_; name=nothing, overlapping=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function fractional_max_pool_grad_graph(orig_input_, orig_output_, out_backprop_, row_pooling_sequence_, col_pooling_sequence_; name=nothing, overlapping=nothing) local desc tf.with_op_name(name, "FractionalMaxPoolGrad") do desc = tf.NodeDescription("FractionalMaxPoolGrad") @@ -46211,7 +44383,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function remote_call_graph(target_, args_; name=nothing, Tin=nothing, Tout=nothing, f=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function remote_call_graph(target_, args_; name=nothing, Tin=nothing, Tout=nothing, f=nothing) local desc tf.with_op_name(name, "RemoteCall") do desc = tf.NodeDescription("RemoteCall") @@ -46269,7 +44441,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function gather_graph(params_, indices_; name=nothing, validate_indices=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function gather_graph(params_, indices_; name=nothing, validate_indices=nothing) local desc tf.with_op_name(name, "Gather") do desc = tf.NodeDescription("Gather") @@ -46320,7 +44492,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantized_mat_mul_graph(a_, b_, min_a_, max_a_, min_b_, max_b_; name=nothing, transpose_a=nothing, transpose_b=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function quantized_mat_mul_graph(a_, b_, min_a_, max_a_, min_b_, max_b_; name=nothing, transpose_a=nothing, transpose_b=nothing) local desc tf.with_op_name(name, "QuantizedMatMul") do desc = tf.NodeDescription("QuantizedMatMul") @@ -46397,7 +44569,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function unicode_decode_with_offsets_graph(input_; name=nothing, input_encoding=nothing, errors=nothing, replacement_char=nothing, replace_control_characters=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function unicode_decode_with_offsets_graph(input_; name=nothing, input_encoding=nothing, errors=nothing, replacement_char=nothing, replace_control_characters=nothing) local desc tf.with_op_name(name, "UnicodeDecodeWithOffsets") do desc = tf.NodeDescription("UnicodeDecodeWithOffsets") @@ -46457,12 +44629,64 @@ end """ - enqueue_tpu_embedding_sparse_tensor_batch(sample_indices, embedding_indices, aggregation_weights, mode_override; device_ordinal=-1, combiners=Int64[]) + accumulator_apply_gradient(handle, local_step, gradient) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function enqueue_tpu_embedding_sparse_tensor_batch_graph(sample_indices_, embedding_indices_, aggregation_weights_, mode_override_; name=nothing, N=nothing, device_ordinal=nothing, combiners=nothing, table_ids=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function accumulator_apply_gradient_graph(handle_, local_step_, gradient_; name=nothing, dtype=nothing) + local desc + tf.with_op_name(name, "AccumulatorApplyGradient") do + desc = tf.NodeDescription("AccumulatorApplyGradient") + handle_ = convert(Tensor{String}, handle_) + local_step_ = convert(Tensor{Int64}, local_step_) + gradient_ = convert(Tensor{Any}, gradient_) + (gradient_,) = tf.tf_promote(gradient_) + tf.add_input(desc, handle_) + tf.add_input(desc, local_step_) + tf.add_input(desc, gradient_) + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + tf.Tensor(tf.Operation(desc)) + end + function accumulator_apply_gradient_eager(handle_, local_step_, gradient_; name=nothing, dtype=nothing) + desc = tf.EagerOp("AccumulatorApplyGradient") + handle_ = convert(tf.EagerTensor, handle_) + local_step_ = convert(tf.EagerTensor, local_step_) + gradient_ = convert(tf.EagerTensor, gradient_) + tf.add_input(desc, handle_) + tf.add_input(desc, local_step_) + tf.add_input(desc, gradient_) + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + desc["dtype"] = tf.data_type(gradient_) + res = tf.execute(desc) + node = tf.TapeNode(accumulator_apply_gradient, [handle_, local_step_, gradient_], name=nothing, dtype=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + function accumulator_apply_gradient(handle_, local_step_, gradient_; name=nothing, dtype=nothing) + if tf.in_eager_mode() + accumulator_apply_gradient_eager(handle_, local_step_, gradient_; name=name, dtype=dtype) + else + accumulator_apply_gradient_graph(handle_, local_step_, gradient_; name=name, dtype=dtype) + end + end +end + + +""" + enqueue_tpu_embedding_sparse_tensor_batch(sample_indices, embedding_indices, aggregation_weights, mode_override; device_ordinal=-1, combiners=Int64[]) + +This Op eases the porting of code that uses tf.nn.embedding_lookup_sparse(). +""" +begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function enqueue_tpu_embedding_sparse_tensor_batch_graph(sample_indices_, embedding_indices_, aggregation_weights_, mode_override_; name=nothing, N=nothing, device_ordinal=nothing, combiners=nothing, table_ids=nothing) local desc tf.with_op_name(name, "EnqueueTPUEmbeddingSparseTensorBatch") do desc = tf.NodeDescription("EnqueueTPUEmbeddingSparseTensorBatch") @@ -46528,65 +44752,13 @@ begin end -""" - accumulator_apply_gradient(handle, local_step, gradient) - - -""" -begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function accumulator_apply_gradient_graph(handle_, local_step_, gradient_; name=nothing, dtype=nothing) - local desc - tf.with_op_name(name, "AccumulatorApplyGradient") do - desc = tf.NodeDescription("AccumulatorApplyGradient") - handle_ = convert(Tensor{String}, handle_) - local_step_ = convert(Tensor{Int64}, local_step_) - gradient_ = convert(Tensor{Any}, gradient_) - (gradient_,) = tf.tf_promote(gradient_) - tf.add_input(desc, handle_) - tf.add_input(desc, local_step_) - tf.add_input(desc, gradient_) - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end - end - tf.Tensor(tf.Operation(desc)) - end - function accumulator_apply_gradient_eager(handle_, local_step_, gradient_; name=nothing, dtype=nothing) - desc = tf.EagerOp("AccumulatorApplyGradient") - handle_ = convert(tf.EagerTensor, handle_) - local_step_ = convert(tf.EagerTensor, local_step_) - gradient_ = convert(tf.EagerTensor, gradient_) - tf.add_input(desc, handle_) - tf.add_input(desc, local_step_) - tf.add_input(desc, gradient_) - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end - desc["dtype"] = tf.data_type(gradient_) - res = tf.execute(desc) - node = tf.TapeNode(accumulator_apply_gradient, [handle_, local_step_, gradient_], name=nothing, dtype=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - function accumulator_apply_gradient(handle_, local_step_, gradient_; name=nothing, dtype=nothing) - if tf.in_eager_mode() - accumulator_apply_gradient_eager(handle_, local_step_, gradient_; name=name, dtype=dtype) - else - accumulator_apply_gradient_graph(handle_, local_step_, gradient_; name=name, dtype=dtype) - end - end -end - - """ write_summary(writer, step, tensor, tag, summary_metadata) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function write_summary_graph(writer_, step_, tensor_, tag_, summary_metadata_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function write_summary_graph(writer_, step_, tensor_, tag_, summary_metadata_; name=nothing) local desc tf.with_op_name(name, "WriteSummary") do desc = tf.NodeDescription("WriteSummary") @@ -46640,7 +44812,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantized_conv2d_graph(input_, filter_, min_input_, max_input_, min_filter_, max_filter_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function quantized_conv2d_graph(input_, filter_, min_input_, max_input_, min_filter_, max_filter_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) local desc tf.with_op_name(name, "QuantizedConv2D") do desc = tf.NodeDescription("QuantizedConv2D") @@ -46729,7 +44901,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_apply_momentum_graph(var_, accum_, lr_, grad_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function resource_apply_momentum_graph(var_, accum_, lr_, grad_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) local desc tf.with_op_name(name, "ResourceApplyMomentum") do desc = tf.NodeDescription("ResourceApplyMomentum") @@ -46797,7 +44969,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function log1p_graph(x_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function log1p_graph(x_; name=nothing) local desc tf.with_op_name(name, "Log1p") do desc = tf.NodeDescription("Log1p") @@ -46835,7 +45007,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ordered_map_clear_graph(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function ordered_map_clear_graph(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "OrderedMapClear") do desc = tf.NodeDescription("OrderedMapClear") @@ -46897,7 +45069,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_scatter_update_graph(resource_, indices_, updates_; name=nothing, dtype=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function resource_scatter_update_graph(resource_, indices_, updates_; name=nothing, dtype=nothing) local desc tf.with_op_name(name, "ResourceScatterUpdate") do desc = tf.NodeDescription("ResourceScatterUpdate") @@ -46952,7 +45124,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function barrier_take_many_graph(handle_, num_elements_; name=nothing, component_types=nothing, allow_small_batch=nothing, wait_for_incomplete=nothing, timeout_ms=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function barrier_take_many_graph(handle_, num_elements_; name=nothing, component_types=nothing, allow_small_batch=nothing, wait_for_incomplete=nothing, timeout_ms=nothing) local desc tf.with_op_name(name, "BarrierTakeMany") do desc = tf.NodeDescription("BarrierTakeMany") @@ -47021,7 +45193,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_apply_keras_momentum_graph(var_, accum_, lr_, grad_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function resource_apply_keras_momentum_graph(var_, accum_, lr_, grad_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) local desc tf.with_op_name(name, "ResourceApplyKerasMomentum") do desc = tf.NodeDescription("ResourceApplyKerasMomentum") @@ -47089,7 +45261,7 @@ end Generates serialized partition messages suitable for batch reads. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function generate_big_query_reader_partitions_graph(; name=nothing, project_id=nothing, dataset_id=nothing, table_id=nothing, columns=nothing, timestamp_millis=nothing, num_partitions=nothing, test_end_point=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function generate_big_query_reader_partitions_graph(; name=nothing, project_id=nothing, dataset_id=nothing, table_id=nothing, columns=nothing, timestamp_millis=nothing, num_partitions=nothing, test_end_point=nothing) local desc tf.with_op_name(name, "GenerateBigQueryReaderPartitions") do desc = tf.NodeDescription("GenerateBigQueryReaderPartitions") @@ -47163,7 +45335,7 @@ end A placeholder op for multiple values that will be sent to TensorFlow from a """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _xla_recv_at_host_graph(dynamic_key_; name=nothing, Toutputs=nothing, key=nothing, device_ordinal=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function _xla_recv_at_host_graph(dynamic_key_; name=nothing, Toutputs=nothing, key=nothing, device_ordinal=nothing) local desc tf.with_op_name(name, "_XlaRecvAtHost") do desc = tf.NodeDescription("_XlaRecvAtHost") @@ -47217,7 +45389,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantized_avg_pool_graph(input_, min_input_, max_input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function quantized_avg_pool_graph(input_, min_input_, max_input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing) local desc tf.with_op_name(name, "QuantizedAvgPool") do desc = tf.NodeDescription("QuantizedAvgPool") @@ -47286,7 +45458,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_apply_adam_with_amsgrad_graph(var_, m_, v_, vhat_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function resource_apply_adam_with_amsgrad_graph(var_, m_, v_, vhat_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ResourceApplyAdamWithAmsgrad") do desc = tf.NodeDescription("ResourceApplyAdamWithAmsgrad") @@ -47370,53 +45542,13 @@ begin end -""" - tensor_list_resize(input_handle, size) - - -""" -begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_list_resize_graph(input_handle_, size_; name=nothing) - local desc - tf.with_op_name(name, "TensorListResize") do - desc = tf.NodeDescription("TensorListResize") - input_handle_ = convert(Tensor{Any}, input_handle_) - size_ = convert(Tensor{Int32}, size_) - tf.add_input(desc, input_handle_) - tf.add_input(desc, size_) - end - tf.Tensor(tf.Operation(desc)) - end - function tensor_list_resize_eager(input_handle_, size_; name=nothing) - desc = tf.EagerOp("TensorListResize") - input_handle_ = convert(tf.EagerTensor, input_handle_) - size_ = convert(tf.EagerTensor, size_) - tf.add_input(desc, input_handle_) - tf.add_input(desc, size_) - res = tf.execute(desc) - node = tf.TapeNode(tensor_list_resize, [input_handle_, size_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - function tensor_list_resize(input_handle_, size_; name=nothing) - if tf.in_eager_mode() - tensor_list_resize_eager(input_handle_, size_; name=name) - else - tensor_list_resize_graph(input_handle_, size_; name=name) - end - end -end - - """ _host_recv(; client_terminated=false) Receives the named tensor from send_device on recv_device. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _host_recv_graph(; name=nothing, tensor_type=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function _host_recv_graph(; name=nothing, tensor_type=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing) local desc tf.with_op_name(name, "_HostRecv") do desc = tf.NodeDescription("_HostRecv") @@ -47484,7 +45616,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function boosted_trees_center_bias_graph(tree_ensemble_handle_, mean_gradients_, mean_hessians_, l1_, l2_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function boosted_trees_center_bias_graph(tree_ensemble_handle_, mean_gradients_, mean_hessians_, l1_, l2_; name=nothing) local desc tf.with_op_name(name, "BoostedTreesCenterBias") do desc = tf.NodeDescription("BoostedTreesCenterBias") @@ -47536,7 +45668,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function lookup_table_size_v2_graph(table_handle_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function lookup_table_size_v2_graph(table_handle_; name=nothing) local desc tf.with_op_name(name, "LookupTableSizeV2") do desc = tf.NodeDescription("LookupTableSizeV2") @@ -47572,7 +45704,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function irfft_graph(input_, fft_length_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function irfft_graph(input_, fft_length_; name=nothing) local desc tf.with_op_name(name, "IRFFT") do desc = tf.NodeDescription("IRFFT") @@ -47612,7 +45744,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function inplace_add_graph(x_, i_, v_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function inplace_add_graph(x_, i_, v_; name=nothing) local desc tf.with_op_name(name, "InplaceAdd") do desc = tf.NodeDescription("InplaceAdd") @@ -47659,7 +45791,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function bias_add_graph(value_, bias_; name=nothing, data_format=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function bias_add_graph(value_, bias_; name=nothing, data_format=nothing) local desc tf.with_op_name(name, "BiasAdd") do desc = tf.NodeDescription("BiasAdd") @@ -47703,12 +45835,45 @@ end """ - load_tpu_embedding_adam_parameters_grad_accum_debug(parameters, momenta, velocities, gradient_accumulators; table_id=-1, table_name=) + _disconnect_host_from_distributed_tpu_system() + +An op that disconnects the TPUs on a host from a running distributed +""" +begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function _disconnect_host_from_distributed_tpu_system_graph(; name=nothing) + local desc + tf.with_op_name(name, "_DisconnectHostFromDistributedTPUSystem") do + desc + tf.NodeDescription("_DisconnectHostFromDistributedTPUSystem") + end + tf.Tensor(tf.Operation(desc)) + end + function _disconnect_host_from_distributed_tpu_system_eager(; name=nothing) + desc = tf.EagerOp("_DisconnectHostFromDistributedTPUSystem") + res = tf.execute(desc) + node = tf.TapeNode(_disconnect_host_from_distributed_tpu_system, [], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + function _disconnect_host_from_distributed_tpu_system(; name=nothing) + if tf.in_eager_mode() + _disconnect_host_from_distributed_tpu_system_eager(; name=name) + else + _disconnect_host_from_distributed_tpu_system_graph(; name=name) + end + end +end + +""" + load_tpu_embedding_adam_parameters_grad_accum_debug(parameters, momenta, velocities, gradient_accumulators; table_id=-1, table_name=) +Load embedding parameters for a single table. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function load_tpu_embedding_adam_parameters_grad_accum_debug_graph(parameters_, momenta_, velocities_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function load_tpu_embedding_adam_parameters_grad_accum_debug_graph(parameters_, momenta_, velocities_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "LoadTPUEmbeddingADAMParametersGradAccumDebug") do desc = tf.NodeDescription("LoadTPUEmbeddingADAMParametersGradAccumDebug") @@ -47774,46 +45939,13 @@ begin end -""" - _disconnect_host_from_distributed_tpu_system() - -An op that disconnects the TPUs on a host from a running distributed -""" -begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _disconnect_host_from_distributed_tpu_system_graph(; name=nothing) - local desc - tf.with_op_name(name, "_DisconnectHostFromDistributedTPUSystem") do - desc - tf.NodeDescription("_DisconnectHostFromDistributedTPUSystem") - end - tf.Tensor(tf.Operation(desc)) - end - function _disconnect_host_from_distributed_tpu_system_eager(; name=nothing) - desc = tf.EagerOp("_DisconnectHostFromDistributedTPUSystem") - res = tf.execute(desc) - node = tf.TapeNode(_disconnect_host_from_distributed_tpu_system, [], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - function _disconnect_host_from_distributed_tpu_system(; name=nothing) - if tf.in_eager_mode() - _disconnect_host_from_distributed_tpu_system_eager(; name=name) - else - _disconnect_host_from_distributed_tpu_system_graph(; name=name) - end - end -end - - """ ragged_range(starts, limits, deltas) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ragged_range_graph(starts_, limits_, deltas_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function ragged_range_graph(starts_, limits_, deltas_; name=nothing) local desc tf.with_op_name(name, "RaggedRange") do desc = tf.NodeDescription("RaggedRange") @@ -47866,7 +45998,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function window_dataset_graph(input_dataset_, size_, shift_, stride_, drop_remainder_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function window_dataset_graph(input_dataset_, size_, shift_, stride_, drop_remainder_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "WindowDataset") do desc = tf.NodeDescription("WindowDataset") @@ -47930,7 +46062,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function diag_graph(diagonal_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function diag_graph(diagonal_; name=nothing) local desc tf.with_op_name(name, "Diag") do desc = tf.NodeDescription("Diag") @@ -47965,10 +46097,10 @@ end """ infeed_dequeue() - +A placeholder op for a value that will be fed into the computation. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function infeed_dequeue_graph(; name=nothing, dtype=nothing, shape=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function infeed_dequeue_graph(; name=nothing, dtype=nothing, shape=nothing) local desc tf.with_op_name(name, "InfeedDequeue") do desc = tf.NodeDescription("InfeedDequeue") @@ -48012,7 +46144,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_latency_stats_dataset_graph(input_dataset_, tag_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function experimental_latency_stats_dataset_graph(input_dataset_, tag_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "ExperimentalLatencyStatsDataset") do desc = tf.NodeDescription("ExperimentalLatencyStatsDataset") @@ -48064,7 +46196,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function add_sparse_to_tensors_map_graph(sparse_indices_, sparse_values_, sparse_shape_; name=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function add_sparse_to_tensors_map_graph(sparse_indices_, sparse_values_, sparse_shape_; name=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "AddSparseToTensorsMap") do desc = tf.NodeDescription("AddSparseToTensorsMap") @@ -48122,7 +46254,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ragged_gather_graph(params_nested_splits_, params_dense_values_, indices_; name=nothing, PARAMS_RAGGED_RANK=nothing, OUTPUT_RAGGED_RANK=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function ragged_gather_graph(params_nested_splits_, params_dense_values_, indices_; name=nothing, PARAMS_RAGGED_RANK=nothing, OUTPUT_RAGGED_RANK=nothing) local desc tf.with_op_name(name, "RaggedGather") do desc = tf.NodeDescription("RaggedGather") @@ -48188,7 +46320,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function rgb_to_hsv_graph(images_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function rgb_to_hsv_graph(images_; name=nothing) local desc tf.with_op_name(name, "RGBToHSV") do desc = tf.NodeDescription("RGBToHSV") @@ -48226,7 +46358,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function multi_device_iterator_to_string_handle_graph(multi_device_iterator_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function multi_device_iterator_to_string_handle_graph(multi_device_iterator_; name=nothing) local desc tf.with_op_name(name, "MultiDeviceIteratorToStringHandle") do desc = tf.NodeDescription("MultiDeviceIteratorToStringHandle") @@ -48262,7 +46394,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function for__graph(start_, limit_, delta_, input_; name=nothing, T=nothing, body=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function for__graph(start_, limit_, delta_, input_; name=nothing, T=nothing, body=nothing) local desc tf.with_op_name(name, "For") do desc = tf.NodeDescription("For") @@ -48322,7 +46454,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_reduce_max_sparse_graph(input_indices_, input_values_, input_shape_, reduction_axes_; name=nothing, keep_dims=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function sparse_reduce_max_sparse_graph(input_indices_, input_values_, input_shape_, reduction_axes_; name=nothing, keep_dims=nothing) local desc tf.with_op_name(name, "SparseReduceMaxSparse") do desc = tf.NodeDescription("SparseReduceMaxSparse") @@ -48383,7 +46515,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function concat_offset_graph(concat_dim_, shape_; name=nothing, N=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function concat_offset_graph(concat_dim_, shape_; name=nothing, N=nothing) local desc tf.with_op_name(name, "ConcatOffset") do desc = tf.NodeDescription("ConcatOffset") @@ -48434,7 +46566,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function stage_graph(values_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function stage_graph(values_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "Stage") do desc = tf.NodeDescription("Stage") @@ -48500,7 +46632,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function switch_graph(data_, pred_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function switch_graph(data_, pred_; name=nothing) local desc tf.with_op_name(name, "Switch") do desc = tf.NodeDescription("Switch") @@ -48547,7 +46679,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function queue_dequeue_many_v2_graph(handle_, n_; name=nothing, component_types=nothing, timeout_ms=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function queue_dequeue_many_v2_graph(handle_, n_; name=nothing, component_types=nothing, timeout_ms=nothing) local desc tf.with_op_name(name, "QueueDequeueManyV2") do desc = tf.NodeDescription("QueueDequeueManyV2") @@ -48599,7 +46731,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function segment_prod_graph(data_, segment_ids_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function segment_prod_graph(data_, segment_ids_; name=nothing) local desc tf.with_op_name(name, "SegmentProd") do desc = tf.NodeDescription("SegmentProd") @@ -48644,7 +46776,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function approximate_equal_graph(x_, y_; name=nothing, tolerance=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function approximate_equal_graph(x_, y_; name=nothing, tolerance=nothing) local desc tf.with_op_name(name, "ApproximateEqual") do desc = tf.NodeDescription("ApproximateEqual") @@ -48688,12 +46820,12 @@ end """ - conv2d(input, filter; use_cudnn_on_gpu=true, explicit_paddings=Int64[], data_format=NHWC, dilations=[1, 1, 1, 1]) + conv2d(input, filter; use_cudnn_on_gpu=true, data_format=NHWC, dilations=[1, 1, 1, 1]) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function conv2d_graph(input_, filter_; name=nothing, strides=nothing, use_cudnn_on_gpu=nothing, padding=nothing, explicit_paddings=nothing, data_format=nothing, dilations=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function conv2d_graph(input_, filter_; name=nothing, strides=nothing, use_cudnn_on_gpu=nothing, padding=nothing, data_format=nothing, dilations=nothing) local desc tf.with_op_name(name, "Conv2D") do desc = tf.NodeDescription("Conv2D") @@ -48711,9 +46843,6 @@ begin if padding !== nothing desc["padding"] = Base.String(padding) end - if explicit_paddings !== nothing - desc["explicit_paddings"] = map(Base.identity, explicit_paddings) - end if data_format !== nothing desc["data_format"] = Base.String(data_format) end @@ -48723,7 +46852,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function conv2d_eager(input_, filter_; name=nothing, strides=nothing, use_cudnn_on_gpu=nothing, padding=nothing, explicit_paddings=nothing, data_format=nothing, dilations=nothing) + function conv2d_eager(input_, filter_; name=nothing, strides=nothing, use_cudnn_on_gpu=nothing, padding=nothing, data_format=nothing, dilations=nothing) desc = tf.EagerOp("Conv2D") input_ = convert(tf.EagerTensor, input_) filter_ = convert(tf.EagerTensor, filter_) @@ -48738,9 +46867,6 @@ begin if padding !== nothing desc["padding"] = Base.String(padding) end - if explicit_paddings !== nothing - desc["explicit_paddings"] = map(Base.identity, explicit_paddings) - end if data_format !== nothing desc["data_format"] = Base.String(data_format) end @@ -48750,17 +46876,17 @@ begin desc["T"] = tf.data_type(input_) desc["T"] = tf.data_type(filter_) res = tf.execute(desc) - node = tf.TapeNode(conv2d, [input_, filter_], name=nothing, strides=nothing, use_cudnn_on_gpu=nothing, padding=nothing, explicit_paddings=nothing, data_format=nothing, dilations=nothing, res) + node = tf.TapeNode(conv2d, [input_, filter_], name=nothing, strides=nothing, use_cudnn_on_gpu=nothing, padding=nothing, data_format=nothing, dilations=nothing, res) if length(res) >= 1 tf.add_node(res[1], node) return res[1] end end - function conv2d(input_, filter_; name=nothing, strides=nothing, use_cudnn_on_gpu=nothing, padding=nothing, explicit_paddings=nothing, data_format=nothing, dilations=nothing) + function conv2d(input_, filter_; name=nothing, strides=nothing, use_cudnn_on_gpu=nothing, padding=nothing, data_format=nothing, dilations=nothing) if tf.in_eager_mode() - conv2d_eager(input_, filter_; name=name, strides=strides, use_cudnn_on_gpu=use_cudnn_on_gpu, padding=padding, explicit_paddings=explicit_paddings, data_format=data_format, dilations=dilations) + conv2d_eager(input_, filter_; name=name, strides=strides, use_cudnn_on_gpu=use_cudnn_on_gpu, padding=padding, data_format=data_format, dilations=dilations) else - conv2d_graph(input_, filter_; name=name, strides=strides, use_cudnn_on_gpu=use_cudnn_on_gpu, padding=padding, explicit_paddings=explicit_paddings, data_format=data_format, dilations=dilations) + conv2d_graph(input_, filter_; name=name, strides=strides, use_cudnn_on_gpu=use_cudnn_on_gpu, padding=padding, data_format=data_format, dilations=dilations) end end end @@ -48769,10 +46895,10 @@ end """ cross_replica_sum(input, group_assignment) - +An Op to sum inputs across replicated TPU instances. Each instance supplies its """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function cross_replica_sum_graph(input_, group_assignment_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function cross_replica_sum_graph(input_, group_assignment_; name=nothing) local desc tf.with_op_name(name, "CrossReplicaSum") do desc = tf.NodeDescription("CrossReplicaSum") @@ -48814,7 +46940,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_mat_mul_graph(a_, b_; name=nothing, transpose_a=nothing, transpose_b=nothing, a_is_sparse=nothing, b_is_sparse=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function sparse_mat_mul_graph(a_, b_; name=nothing, transpose_a=nothing, transpose_b=nothing, a_is_sparse=nothing, b_is_sparse=nothing) local desc tf.with_op_name(name, "SparseMatMul") do desc = tf.NodeDescription("SparseMatMul") @@ -48882,7 +47008,7 @@ end Acts roughly like a SplitV Op that splits one tensor into multiple tensors """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _scoped_allocator_split_graph(concat_, split_; name=nothing, sa_name=nothing, id=nothing, N=nothing, shapes=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function _scoped_allocator_split_graph(concat_, split_; name=nothing, sa_name=nothing, id=nothing, N=nothing, shapes=nothing) local desc tf.with_op_name(name, "_ScopedAllocatorSplit") do desc = tf.NodeDescription("_ScopedAllocatorSplit") @@ -48954,7 +47080,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function igammac_graph(a_, x_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function igammac_graph(a_, x_; name=nothing) local desc tf.with_op_name(name, "Igammac") do desc = tf.NodeDescription("Igammac") @@ -48997,7 +47123,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_mat_mul_graph(x_, y_; name=nothing, adj_x=nothing, adj_y=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function batch_mat_mul_graph(x_, y_; name=nothing, adj_x=nothing, adj_y=nothing) local desc tf.with_op_name(name, "BatchMatMul") do desc = tf.NodeDescription("BatchMatMul") @@ -49047,66 +47173,52 @@ end """ - enqueue_tpu_embedding_sparse_batch(sample_indices, embedding_indices, aggregation_weights, mode_override; device_ordinal=-1, combiners=Int64[]) + tensor_array_pack(handle, flow_in; element_shape=?) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function enqueue_tpu_embedding_sparse_batch_graph(sample_indices_, embedding_indices_, aggregation_weights_, mode_override_; name=nothing, N=nothing, device_ordinal=nothing, combiners=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tensor_array_pack_graph(handle_, flow_in_; name=nothing, dtype=nothing, element_shape=nothing) local desc - tf.with_op_name(name, "EnqueueTPUEmbeddingSparseBatch") do - desc = tf.NodeDescription("EnqueueTPUEmbeddingSparseBatch") - sample_indices_ = [convert(Tensor{Int32}, x) for x = sample_indices_] - embedding_indices_ = [convert(Tensor{Int32}, x) for x = embedding_indices_] - aggregation_weights_ = [convert(Tensor{Float32}, x) for x = aggregation_weights_] - mode_override_ = convert(Tensor{String}, mode_override_) - tf.add_input(desc, sample_indices_) - tf.add_input(desc, embedding_indices_) - tf.add_input(desc, aggregation_weights_) - tf.add_input(desc, mode_override_) - if N !== nothing - desc["N"] = Base.Int(N) - end - if device_ordinal !== nothing - desc["device_ordinal"] = Base.Int(device_ordinal) + tf.with_op_name(name, "TensorArrayPack") do + desc = tf.NodeDescription("TensorArrayPack") + handle_ = convert(Tensor{String}, handle_) + flow_in_ = convert(Tensor{Float32}, flow_in_) + tf.add_input(desc, handle_) + tf.add_input(desc, flow_in_) + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) end - if combiners !== nothing - desc["combiners"] = map(Base.identity, combiners) + if element_shape !== nothing + desc["element_shape"] = Base.identity(element_shape) end end tf.Tensor(tf.Operation(desc)) end - function enqueue_tpu_embedding_sparse_batch_eager(sample_indices_, embedding_indices_, aggregation_weights_, mode_override_; name=nothing, N=nothing, device_ordinal=nothing, combiners=nothing) - desc = tf.EagerOp("EnqueueTPUEmbeddingSparseBatch") - sample_indices_ = convert(tf.EagerTensor, sample_indices_) - embedding_indices_ = convert(tf.EagerTensor, embedding_indices_) - aggregation_weights_ = convert(tf.EagerTensor, aggregation_weights_) - mode_override_ = convert(tf.EagerTensor, mode_override_) - tf.add_input(desc, sample_indices_) - tf.add_input(desc, embedding_indices_) - tf.add_input(desc, aggregation_weights_) - tf.add_input(desc, mode_override_) - if N !== nothing - desc["N"] = Base.Int(N) - end - if device_ordinal !== nothing - desc["device_ordinal"] = Base.Int(device_ordinal) + function tensor_array_pack_eager(handle_, flow_in_; name=nothing, dtype=nothing, element_shape=nothing) + desc = tf.EagerOp("TensorArrayPack") + handle_ = convert(tf.EagerTensor, handle_) + flow_in_ = convert(tf.EagerTensor, flow_in_) + tf.add_input(desc, handle_) + tf.add_input(desc, flow_in_) + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) end - if combiners !== nothing - desc["combiners"] = map(Base.identity, combiners) + if element_shape !== nothing + desc["element_shape"] = Base.identity(element_shape) end res = tf.execute(desc) - node = tf.TapeNode(enqueue_tpu_embedding_sparse_batch, [sample_indices_, embedding_indices_, aggregation_weights_, mode_override_], name=nothing, N=nothing, device_ordinal=nothing, combiners=nothing, res) + node = tf.TapeNode(tensor_array_pack, [handle_, flow_in_], name=nothing, dtype=nothing, element_shape=nothing, res) if length(res) >= 1 tf.add_node(res[1], node) return res[1] end end - function enqueue_tpu_embedding_sparse_batch(sample_indices_, embedding_indices_, aggregation_weights_, mode_override_; name=nothing, N=nothing, device_ordinal=nothing, combiners=nothing) + function tensor_array_pack(handle_, flow_in_; name=nothing, dtype=nothing, element_shape=nothing) if tf.in_eager_mode() - enqueue_tpu_embedding_sparse_batch_eager(sample_indices_, embedding_indices_, aggregation_weights_, mode_override_; name=name, N=N, device_ordinal=device_ordinal, combiners=combiners) + tensor_array_pack_eager(handle_, flow_in_; name=name, dtype=dtype, element_shape=element_shape) else - enqueue_tpu_embedding_sparse_batch_graph(sample_indices_, embedding_indices_, aggregation_weights_, mode_override_; name=name, N=N, device_ordinal=device_ordinal, combiners=combiners) + tensor_array_pack_graph(handle_, flow_in_; name=name, dtype=dtype, element_shape=element_shape) end end end @@ -49118,7 +47230,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function queue_close_v2_graph(handle_; name=nothing, cancel_pending_enqueues=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function queue_close_v2_graph(handle_; name=nothing, cancel_pending_enqueues=nothing) local desc tf.with_op_name(name, "QueueCloseV2") do desc = tf.NodeDescription("QueueCloseV2") @@ -49155,52 +47267,66 @@ end """ - tensor_array_pack(handle, flow_in; element_shape=?) - + enqueue_tpu_embedding_sparse_batch(sample_indices, embedding_indices, aggregation_weights, mode_override; device_ordinal=-1, combiners=Int64[]) +An op that enqueues TPUEmbedding input indices from a SparseTensor. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_pack_graph(handle_, flow_in_; name=nothing, dtype=nothing, element_shape=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function enqueue_tpu_embedding_sparse_batch_graph(sample_indices_, embedding_indices_, aggregation_weights_, mode_override_; name=nothing, N=nothing, device_ordinal=nothing, combiners=nothing) local desc - tf.with_op_name(name, "TensorArrayPack") do - desc = tf.NodeDescription("TensorArrayPack") - handle_ = convert(Tensor{String}, handle_) - flow_in_ = convert(Tensor{Float32}, flow_in_) - tf.add_input(desc, handle_) - tf.add_input(desc, flow_in_) - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) + tf.with_op_name(name, "EnqueueTPUEmbeddingSparseBatch") do + desc = tf.NodeDescription("EnqueueTPUEmbeddingSparseBatch") + sample_indices_ = [convert(Tensor{Int32}, x) for x = sample_indices_] + embedding_indices_ = [convert(Tensor{Int32}, x) for x = embedding_indices_] + aggregation_weights_ = [convert(Tensor{Float32}, x) for x = aggregation_weights_] + mode_override_ = convert(Tensor{String}, mode_override_) + tf.add_input(desc, sample_indices_) + tf.add_input(desc, embedding_indices_) + tf.add_input(desc, aggregation_weights_) + tf.add_input(desc, mode_override_) + if N !== nothing + desc["N"] = Base.Int(N) end - if element_shape !== nothing - desc["element_shape"] = Base.identity(element_shape) + if device_ordinal !== nothing + desc["device_ordinal"] = Base.Int(device_ordinal) + end + if combiners !== nothing + desc["combiners"] = map(Base.identity, combiners) end end tf.Tensor(tf.Operation(desc)) end - function tensor_array_pack_eager(handle_, flow_in_; name=nothing, dtype=nothing, element_shape=nothing) - desc = tf.EagerOp("TensorArrayPack") - handle_ = convert(tf.EagerTensor, handle_) - flow_in_ = convert(tf.EagerTensor, flow_in_) - tf.add_input(desc, handle_) - tf.add_input(desc, flow_in_) - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) + function enqueue_tpu_embedding_sparse_batch_eager(sample_indices_, embedding_indices_, aggregation_weights_, mode_override_; name=nothing, N=nothing, device_ordinal=nothing, combiners=nothing) + desc = tf.EagerOp("EnqueueTPUEmbeddingSparseBatch") + sample_indices_ = convert(tf.EagerTensor, sample_indices_) + embedding_indices_ = convert(tf.EagerTensor, embedding_indices_) + aggregation_weights_ = convert(tf.EagerTensor, aggregation_weights_) + mode_override_ = convert(tf.EagerTensor, mode_override_) + tf.add_input(desc, sample_indices_) + tf.add_input(desc, embedding_indices_) + tf.add_input(desc, aggregation_weights_) + tf.add_input(desc, mode_override_) + if N !== nothing + desc["N"] = Base.Int(N) end - if element_shape !== nothing - desc["element_shape"] = Base.identity(element_shape) + if device_ordinal !== nothing + desc["device_ordinal"] = Base.Int(device_ordinal) + end + if combiners !== nothing + desc["combiners"] = map(Base.identity, combiners) end res = tf.execute(desc) - node = tf.TapeNode(tensor_array_pack, [handle_, flow_in_], name=nothing, dtype=nothing, element_shape=nothing, res) + node = tf.TapeNode(enqueue_tpu_embedding_sparse_batch, [sample_indices_, embedding_indices_, aggregation_weights_, mode_override_], name=nothing, N=nothing, device_ordinal=nothing, combiners=nothing, res) if length(res) >= 1 tf.add_node(res[1], node) return res[1] end end - function tensor_array_pack(handle_, flow_in_; name=nothing, dtype=nothing, element_shape=nothing) + function enqueue_tpu_embedding_sparse_batch(sample_indices_, embedding_indices_, aggregation_weights_, mode_override_; name=nothing, N=nothing, device_ordinal=nothing, combiners=nothing) if tf.in_eager_mode() - tensor_array_pack_eager(handle_, flow_in_; name=name, dtype=dtype, element_shape=element_shape) + enqueue_tpu_embedding_sparse_batch_eager(sample_indices_, embedding_indices_, aggregation_weights_, mode_override_; name=name, N=N, device_ordinal=device_ordinal, combiners=combiners) else - tensor_array_pack_graph(handle_, flow_in_; name=name, dtype=dtype, element_shape=element_shape) + enqueue_tpu_embedding_sparse_batch_graph(sample_indices_, embedding_indices_, aggregation_weights_, mode_override_; name=name, N=N, device_ordinal=device_ordinal, combiners=combiners) end end end @@ -49212,7 +47338,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function reader_restore_state_graph(reader_handle_, state_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function reader_restore_state_graph(reader_handle_, state_; name=nothing) local desc tf.with_op_name(name, "ReaderRestoreState") do desc = tf.NodeDescription("ReaderRestoreState") @@ -49247,12 +47373,12 @@ end """ - _fused_conv2d(input, filter, args; data_format=NHWC, dilations=[1, 1, 1, 1], use_cudnn_on_gpu=true, fused_ops=Int64[], epsilon=?) + _fused_conv2d(input, filter, args; data_format=NHWC, dilations=[1, 1, 1, 1], fused_ops=Int64[], epsilon=?) *NOTE*: Do not invoke this operator directly in Python. Grappler is """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _fused_conv2d_graph(input_, filter_, args_; name=nothing, num_args=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing, use_cudnn_on_gpu=nothing, fused_ops=nothing, epsilon=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function _fused_conv2d_graph(input_, filter_, args_; name=nothing, num_args=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing, fused_ops=nothing, epsilon=nothing) local desc tf.with_op_name(name, "_FusedConv2D") do desc = tf.NodeDescription("_FusedConv2D") @@ -49278,9 +47404,6 @@ begin if dilations !== nothing desc["dilations"] = map(Base.identity, dilations) end - if use_cudnn_on_gpu !== nothing - desc["use_cudnn_on_gpu"] = Base.Bool(use_cudnn_on_gpu) - end if fused_ops !== nothing desc["fused_ops"] = map(Base.identity, fused_ops) end @@ -49290,7 +47413,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function _fused_conv2d_eager(input_, filter_, args_; name=nothing, num_args=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing, use_cudnn_on_gpu=nothing, fused_ops=nothing, epsilon=nothing) + function _fused_conv2d_eager(input_, filter_, args_; name=nothing, num_args=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing, fused_ops=nothing, epsilon=nothing) desc = tf.EagerOp("_FusedConv2D") input_ = convert(tf.EagerTensor, input_) filter_ = convert(tf.EagerTensor, filter_) @@ -49313,9 +47436,6 @@ begin if dilations !== nothing desc["dilations"] = map(Base.identity, dilations) end - if use_cudnn_on_gpu !== nothing - desc["use_cudnn_on_gpu"] = Base.Bool(use_cudnn_on_gpu) - end if fused_ops !== nothing desc["fused_ops"] = map(Base.identity, fused_ops) end @@ -49326,17 +47446,17 @@ begin desc["T"] = tf.data_type(filter_) desc["T"] = tf.data_type(args_) res = tf.execute(desc) - node = tf.TapeNode(_fused_conv2d, [input_, filter_, args_], name=nothing, num_args=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing, use_cudnn_on_gpu=nothing, fused_ops=nothing, epsilon=nothing, res) + node = tf.TapeNode(_fused_conv2d, [input_, filter_, args_], name=nothing, num_args=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing, fused_ops=nothing, epsilon=nothing, res) if length(res) >= 1 tf.add_node(res[1], node) return res[1] end end - function _fused_conv2d(input_, filter_, args_; name=nothing, num_args=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing, use_cudnn_on_gpu=nothing, fused_ops=nothing, epsilon=nothing) + function _fused_conv2d(input_, filter_, args_; name=nothing, num_args=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing, fused_ops=nothing, epsilon=nothing) if tf.in_eager_mode() - _fused_conv2d_eager(input_, filter_, args_; name=name, num_args=num_args, strides=strides, padding=padding, data_format=data_format, dilations=dilations, use_cudnn_on_gpu=use_cudnn_on_gpu, fused_ops=fused_ops, epsilon=epsilon) + _fused_conv2d_eager(input_, filter_, args_; name=name, num_args=num_args, strides=strides, padding=padding, data_format=data_format, dilations=dilations, fused_ops=fused_ops, epsilon=epsilon) else - _fused_conv2d_graph(input_, filter_, args_; name=name, num_args=num_args, strides=strides, padding=padding, data_format=data_format, dilations=dilations, use_cudnn_on_gpu=use_cudnn_on_gpu, fused_ops=fused_ops, epsilon=epsilon) + _fused_conv2d_graph(input_, filter_, args_; name=name, num_args=num_args, strides=strides, padding=padding, data_format=data_format, dilations=dilations, fused_ops=fused_ops, epsilon=epsilon) end end end @@ -49348,7 +47468,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _read_variables_op_graph(resources_; name=nothing, N=nothing, dtypes=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function _read_variables_op_graph(resources_; name=nothing, N=nothing, dtypes=nothing) local desc tf.with_op_name(name, "_ReadVariablesOp") do desc = tf.NodeDescription("_ReadVariablesOp") @@ -49396,7 +47516,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function mutable_hash_table_of_tensors_graph(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function mutable_hash_table_of_tensors_graph(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing) local desc tf.with_op_name(name, "MutableHashTableOfTensors") do desc = tf.NodeDescription("MutableHashTableOfTensors") @@ -49464,7 +47584,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function read_file_graph(filename_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function read_file_graph(filename_; name=nothing) local desc tf.with_op_name(name, "ReadFile") do desc = tf.NodeDescription("ReadFile") @@ -49497,10 +47617,10 @@ end """ load_tpu_embedding_mdl_adagrad_light_parameters(parameters, accumulators, weights, benefits; table_id=-1, table_name=) - +Load embedding parameters for a single table. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function load_tpu_embedding_mdl_adagrad_light_parameters_graph(parameters_, accumulators_, weights_, benefits_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function load_tpu_embedding_mdl_adagrad_light_parameters_graph(parameters_, accumulators_, weights_, benefits_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "LoadTPUEmbeddingMDLAdagradLightParameters") do desc = tf.NodeDescription("LoadTPUEmbeddingMDLAdagradLightParameters") @@ -49572,7 +47692,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fractional_avg_pool_grad_graph(orig_input_tensor_shape_, out_backprop_, row_pooling_sequence_, col_pooling_sequence_; name=nothing, overlapping=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function fractional_avg_pool_grad_graph(orig_input_tensor_shape_, out_backprop_, row_pooling_sequence_, col_pooling_sequence_; name=nothing, overlapping=nothing) local desc tf.with_op_name(name, "FractionalAvgPoolGrad") do desc = tf.NodeDescription("FractionalAvgPoolGrad") @@ -49625,10 +47745,10 @@ end """ load_tpu_embedding_adagrad_parameters_grad_accum_debug(parameters, accumulators, gradient_accumulators; table_id=-1, table_name=) - +Load embedding parameters for a single table. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function load_tpu_embedding_adagrad_parameters_grad_accum_debug_graph(parameters_, accumulators_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function load_tpu_embedding_adagrad_parameters_grad_accum_debug_graph(parameters_, accumulators_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "LoadTPUEmbeddingAdagradParametersGradAccumDebug") do desc = tf.NodeDescription("LoadTPUEmbeddingAdagradParametersGradAccumDebug") @@ -49690,71 +47810,13 @@ begin end -""" - stateful_standard_normal_v2(resource, algorithm, shape; dtype=Float32, shape_dtype=Int64) - - -""" -begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function stateful_standard_normal_v2_graph(resource_, algorithm_, shape_; name=nothing, dtype=nothing, shape_dtype=nothing) - local desc - tf.with_op_name(name, "StatefulStandardNormalV2") do - desc = tf.NodeDescription("StatefulStandardNormalV2") - resource_ = convert(Tensor{Any}, resource_) - algorithm_ = convert(Tensor{Int64}, algorithm_) - shape_ = convert(Tensor{Int64}, shape_) - (shape_,) = tf.tf_promote(shape_) - tf.add_input(desc, resource_) - tf.add_input(desc, algorithm_) - tf.add_input(desc, shape_) - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end - if shape_dtype !== nothing - desc["shape_dtype"] = Base.identity(shape_dtype) - end - end - tf.Tensor(tf.Operation(desc)) - end - function stateful_standard_normal_v2_eager(resource_, algorithm_, shape_; name=nothing, dtype=nothing, shape_dtype=nothing) - desc = tf.EagerOp("StatefulStandardNormalV2") - resource_ = convert(tf.EagerTensor, resource_) - algorithm_ = convert(tf.EagerTensor, algorithm_) - shape_ = convert(tf.EagerTensor, shape_) - tf.add_input(desc, resource_) - tf.add_input(desc, algorithm_) - tf.add_input(desc, shape_) - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end - if shape_dtype !== nothing - desc["shape_dtype"] = Base.identity(shape_dtype) - end - desc["shape_dtype"] = tf.data_type(shape_) - res = tf.execute(desc) - node = tf.TapeNode(stateful_standard_normal_v2, [resource_, algorithm_, shape_], name=nothing, dtype=nothing, shape_dtype=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - function stateful_standard_normal_v2(resource_, algorithm_, shape_; name=nothing, dtype=nothing, shape_dtype=nothing) - if tf.in_eager_mode() - stateful_standard_normal_v2_eager(resource_, algorithm_, shape_; name=name, dtype=dtype, shape_dtype=shape_dtype) - else - stateful_standard_normal_v2_graph(resource_, algorithm_, shape_; name=name, dtype=dtype, shape_dtype=shape_dtype) - end - end -end - - """ bincount(arr, size, weights) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function bincount_graph(arr_, size_, weights_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function bincount_graph(arr_, size_, weights_; name=nothing) local desc tf.with_op_name(name, "Bincount") do desc = tf.NodeDescription("Bincount") @@ -49800,7 +47862,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function inv_graph(x_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function inv_graph(x_; name=nothing) local desc tf.with_op_name(name, "Inv") do desc = tf.NodeDescription("Inv") @@ -49838,7 +47900,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function apply_proximal_adagrad_graph(var_, accum_, lr_, l1_, l2_, grad_; name=nothing, use_locking=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function apply_proximal_adagrad_graph(var_, accum_, lr_, l1_, l2_, grad_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ApplyProximalAdagrad") do desc = tf.NodeDescription("ApplyProximalAdagrad") @@ -49907,7 +47969,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function gather_v2_graph(params_, indices_, axis_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function gather_v2_graph(params_, indices_, axis_; name=nothing) local desc tf.with_op_name(name, "GatherV2") do desc = tf.NodeDescription("GatherV2") @@ -49958,7 +48020,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function write_file_graph(filename_, contents_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function write_file_graph(filename_, contents_; name=nothing) local desc tf.with_op_name(name, "WriteFile") do desc = tf.NodeDescription("WriteFile") @@ -49998,7 +48060,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function boosted_trees_get_ensemble_states_graph(tree_ensemble_handle_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function boosted_trees_get_ensemble_states_graph(tree_ensemble_handle_; name=nothing) local desc tf.with_op_name(name, "BoostedTreesGetEnsembleStates") do desc = tf.NodeDescription("BoostedTreesGetEnsembleStates") @@ -50039,7 +48101,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_gather_graph(resource_, indices_; name=nothing, validate_indices=nothing, dtype=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function resource_gather_graph(resource_, indices_; name=nothing, validate_indices=nothing, dtype=nothing) local desc tf.with_op_name(name, "ResourceGather") do desc = tf.NodeDescription("ResourceGather") @@ -50094,7 +48156,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_apply_proximal_gradient_descent_graph(var_, alpha_, l1_, l2_, delta_; name=nothing, use_locking=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function resource_apply_proximal_gradient_descent_graph(var_, alpha_, l1_, l2_, delta_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ResourceApplyProximalGradientDescent") do desc = tf.NodeDescription("ResourceApplyProximalGradientDescent") @@ -50157,7 +48219,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function truncate_mod_graph(x_, y_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function truncate_mod_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "TruncateMod") do desc = tf.NodeDescription("TruncateMod") @@ -50200,7 +48262,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function log_matrix_determinant_graph(input_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function log_matrix_determinant_graph(input_; name=nothing) local desc tf.with_op_name(name, "LogMatrixDeterminant") do desc = tf.NodeDescription("LogMatrixDeterminant") @@ -50243,7 +48305,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function irfft2d_graph(input_, fft_length_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function irfft2d_graph(input_, fft_length_; name=nothing) local desc tf.with_op_name(name, "IRFFT2D") do desc = tf.NodeDescription("IRFFT2D") @@ -50283,7 +48345,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function boosted_trees_training_predict_graph(tree_ensemble_handle_, cached_tree_ids_, cached_node_ids_, bucketized_features_; name=nothing, num_bucketized_features=nothing, logits_dimension=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function boosted_trees_training_predict_graph(tree_ensemble_handle_, cached_tree_ids_, cached_node_ids_, bucketized_features_; name=nothing, num_bucketized_features=nothing, logits_dimension=nothing) local desc tf.with_op_name(name, "BoostedTreesTrainingPredict") do desc = tf.NodeDescription("BoostedTreesTrainingPredict") @@ -50342,62 +48404,13 @@ begin end -""" - nearest_neighbors(points, centers, k) - - -""" -begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function nearest_neighbors_graph(points_, centers_, k_; name=nothing) - local desc - tf.with_op_name(name, "NearestNeighbors") do - desc = tf.NodeDescription("NearestNeighbors") - points_ = convert(Tensor{Float32}, points_) - centers_ = convert(Tensor{Float32}, centers_) - k_ = convert(Tensor{Int64}, k_) - tf.add_input(desc, points_) - tf.add_input(desc, centers_) - tf.add_input(desc, k_) - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:2 - push!(out, tf.Tensor(op, out_idx)) - end - out - end - function nearest_neighbors_eager(points_, centers_, k_; name=nothing) - desc = tf.EagerOp("NearestNeighbors") - points_ = convert(tf.EagerTensor, points_) - centers_ = convert(tf.EagerTensor, centers_) - k_ = convert(tf.EagerTensor, k_) - tf.add_input(desc, points_) - tf.add_input(desc, centers_) - tf.add_input(desc, k_) - res = tf.execute(desc) - node = tf.TapeNode(nearest_neighbors, [points_, centers_, k_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end - end - function nearest_neighbors(points_, centers_, k_; name=nothing) - if tf.in_eager_mode() - nearest_neighbors_eager(points_, centers_, k_; name=name) - else - nearest_neighbors_graph(points_, centers_, k_; name=name) - end - end -end - - """ floor(x) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function floor_graph(x_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function floor_graph(x_; name=nothing) local desc tf.with_op_name(name, "Floor") do desc = tf.NodeDescription("Floor") @@ -50429,81 +48442,13 @@ begin end -""" - load_tpu_embedding_proximal_adagrad_parameters_grad_accum_debug(parameters, accumulators, gradient_accumulators; table_id=-1, table_name=) - - -""" -begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function load_tpu_embedding_proximal_adagrad_parameters_grad_accum_debug_graph(parameters_, accumulators_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - local desc - tf.with_op_name(name, "LoadTPUEmbeddingProximalAdagradParametersGradAccumDebug") do - desc = tf.NodeDescription("LoadTPUEmbeddingProximalAdagradParametersGradAccumDebug") - parameters_ = convert(Tensor{Float32}, parameters_) - accumulators_ = convert(Tensor{Float32}, accumulators_) - gradient_accumulators_ = convert(Tensor{Float32}, gradient_accumulators_) - tf.add_input(desc, parameters_) - tf.add_input(desc, accumulators_) - tf.add_input(desc, gradient_accumulators_) - if table_id !== nothing - desc["table_id"] = Base.Int(table_id) - end - if table_name !== nothing - desc["table_name"] = Base.String(table_name) - end - if num_shards !== nothing - desc["num_shards"] = Base.Int(num_shards) - end - if shard_id !== nothing - desc["shard_id"] = Base.Int(shard_id) - end - end - tf.Tensor(tf.Operation(desc)) - end - function load_tpu_embedding_proximal_adagrad_parameters_grad_accum_debug_eager(parameters_, accumulators_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - desc = tf.EagerOp("LoadTPUEmbeddingProximalAdagradParametersGradAccumDebug") - parameters_ = convert(tf.EagerTensor, parameters_) - accumulators_ = convert(tf.EagerTensor, accumulators_) - gradient_accumulators_ = convert(tf.EagerTensor, gradient_accumulators_) - tf.add_input(desc, parameters_) - tf.add_input(desc, accumulators_) - tf.add_input(desc, gradient_accumulators_) - if table_id !== nothing - desc["table_id"] = Base.Int(table_id) - end - if table_name !== nothing - desc["table_name"] = Base.String(table_name) - end - if num_shards !== nothing - desc["num_shards"] = Base.Int(num_shards) - end - if shard_id !== nothing - desc["shard_id"] = Base.Int(shard_id) - end - res = tf.execute(desc) - node = tf.TapeNode(load_tpu_embedding_proximal_adagrad_parameters_grad_accum_debug, [parameters_, accumulators_, gradient_accumulators_], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - function load_tpu_embedding_proximal_adagrad_parameters_grad_accum_debug(parameters_, accumulators_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - if tf.in_eager_mode() - load_tpu_embedding_proximal_adagrad_parameters_grad_accum_debug_eager(parameters_, accumulators_, gradient_accumulators_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) - else - load_tpu_embedding_proximal_adagrad_parameters_grad_accum_debug_graph(parameters_, accumulators_, gradient_accumulators_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) - end - end -end - - """ write_image_summary(writer, step, tag, tensor, bad_color; max_images=3) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function write_image_summary_graph(writer_, step_, tag_, tensor_, bad_color_; name=nothing, max_images=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function write_image_summary_graph(writer_, step_, tag_, tensor_, bad_color_; name=nothing, max_images=nothing) local desc tf.with_op_name(name, "WriteImageSummary") do desc = tf.NodeDescription("WriteImageSummary") @@ -50563,7 +48508,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tile_grad_graph(input_, multiples_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tile_grad_graph(input_, multiples_; name=nothing) local desc tf.with_op_name(name, "TileGrad") do desc = tf.NodeDescription("TileGrad") @@ -50599,13 +48544,81 @@ begin end +""" + load_tpu_embedding_proximal_adagrad_parameters_grad_accum_debug(parameters, accumulators, gradient_accumulators; table_id=-1, table_name=) + +Load embedding parameters for a single table. +""" +begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function load_tpu_embedding_proximal_adagrad_parameters_grad_accum_debug_graph(parameters_, accumulators_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + local desc + tf.with_op_name(name, "LoadTPUEmbeddingProximalAdagradParametersGradAccumDebug") do + desc = tf.NodeDescription("LoadTPUEmbeddingProximalAdagradParametersGradAccumDebug") + parameters_ = convert(Tensor{Float32}, parameters_) + accumulators_ = convert(Tensor{Float32}, accumulators_) + gradient_accumulators_ = convert(Tensor{Float32}, gradient_accumulators_) + tf.add_input(desc, parameters_) + tf.add_input(desc, accumulators_) + tf.add_input(desc, gradient_accumulators_) + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + end + tf.Tensor(tf.Operation(desc)) + end + function load_tpu_embedding_proximal_adagrad_parameters_grad_accum_debug_eager(parameters_, accumulators_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + desc = tf.EagerOp("LoadTPUEmbeddingProximalAdagradParametersGradAccumDebug") + parameters_ = convert(tf.EagerTensor, parameters_) + accumulators_ = convert(tf.EagerTensor, accumulators_) + gradient_accumulators_ = convert(tf.EagerTensor, gradient_accumulators_) + tf.add_input(desc, parameters_) + tf.add_input(desc, accumulators_) + tf.add_input(desc, gradient_accumulators_) + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + res = tf.execute(desc) + node = tf.TapeNode(load_tpu_embedding_proximal_adagrad_parameters_grad_accum_debug, [parameters_, accumulators_, gradient_accumulators_], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + function load_tpu_embedding_proximal_adagrad_parameters_grad_accum_debug(parameters_, accumulators_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + if tf.in_eager_mode() + load_tpu_embedding_proximal_adagrad_parameters_grad_accum_debug_eager(parameters_, accumulators_, gradient_accumulators_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + else + load_tpu_embedding_proximal_adagrad_parameters_grad_accum_debug_graph(parameters_, accumulators_, gradient_accumulators_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + end + end +end + + """ tensor_array_grad_v3(handle, flow_in) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_grad_v3_graph(handle_, flow_in_; name=nothing, source=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tensor_array_grad_v3_graph(handle_, flow_in_; name=nothing, source=nothing) local desc tf.with_op_name(name, "TensorArrayGradV3") do desc = tf.NodeDescription("TensorArrayGradV3") @@ -50653,10 +48666,10 @@ end """ enqueue_tpu_embedding_integer_batch(batch, mode_override; device_ordinal=-1) - +An op that enqueues a list of input batch tensors to TPUEmbedding. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function enqueue_tpu_embedding_integer_batch_graph(batch_, mode_override_; name=nothing, N=nothing, device_ordinal=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function enqueue_tpu_embedding_integer_batch_graph(batch_, mode_override_; name=nothing, N=nothing, device_ordinal=nothing) local desc tf.with_op_name(name, "EnqueueTPUEmbeddingIntegerBatch") do desc = tf.NodeDescription("EnqueueTPUEmbeddingIntegerBatch") @@ -50708,7 +48721,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fused_batch_norm_graph(x_, scale_, offset_, mean_, variance_; name=nothing, epsilon=nothing, data_format=nothing, is_training=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function fused_batch_norm_graph(x_, scale_, offset_, mean_, variance_; name=nothing, epsilon=nothing, data_format=nothing, is_training=nothing) local desc tf.with_op_name(name, "FusedBatchNorm") do desc = tf.NodeDescription("FusedBatchNorm") @@ -50789,7 +48802,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function logical_and_graph(x_, y_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function logical_and_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "LogicalAnd") do desc = tf.NodeDescription("LogicalAnd") @@ -50829,7 +48842,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_scatter_update_graph(tensor_, indices_, updates_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tensor_scatter_update_graph(tensor_, indices_, updates_; name=nothing) local desc tf.with_op_name(name, "TensorScatterUpdate") do desc = tf.NodeDescription("TensorScatterUpdate") @@ -50879,7 +48892,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function text_line_reader_v2_graph(; name=nothing, skip_header_lines=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function text_line_reader_v2_graph(; name=nothing, skip_header_lines=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "TextLineReaderV2") do desc = tf.NodeDescription("TextLineReaderV2") @@ -50929,7 +48942,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_slice_dataset_graph(components_; name=nothing, Toutput_types=nothing, output_shapes=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tensor_slice_dataset_graph(components_; name=nothing, Toutput_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "TensorSliceDataset") do desc = tf.NodeDescription("TensorSliceDataset") @@ -50977,7 +48990,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_scatter_v3_graph(handle_, indices_, value_, flow_in_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tensor_array_scatter_v3_graph(handle_, indices_, value_, flow_in_; name=nothing) local desc tf.with_op_name(name, "TensorArrayScatterV3") do desc = tf.NodeDescription("TensorArrayScatterV3") @@ -51027,7 +49040,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resize_nearest_neighbor_grad_graph(grads_, size_; name=nothing, align_corners=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function resize_nearest_neighbor_grad_graph(grads_, size_; name=nothing, align_corners=nothing) local desc tf.with_op_name(name, "ResizeNearestNeighborGrad") do desc = tf.NodeDescription("ResizeNearestNeighborGrad") @@ -51075,7 +49088,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function apply_power_sign_graph(var_, m_, lr_, logbase_, sign_decay_, beta_, grad_; name=nothing, use_locking=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function apply_power_sign_graph(var_, m_, lr_, logbase_, sign_decay_, beta_, grad_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ApplyPowerSign") do desc = tf.NodeDescription("ApplyPowerSign") @@ -51143,65 +49156,13 @@ begin end -""" - experimental_rebatch_dataset(input_dataset, num_workers) - - -""" -begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_rebatch_dataset_graph(input_dataset_, num_workers_; name=nothing, output_types=nothing, output_shapes=nothing) - local desc - tf.with_op_name(name, "ExperimentalRebatchDataset") do - desc = tf.NodeDescription("ExperimentalRebatchDataset") - input_dataset_ = convert(Tensor{Any}, input_dataset_) - num_workers_ = convert(Tensor{Int64}, num_workers_) - tf.add_input(desc, input_dataset_) - tf.add_input(desc, num_workers_) - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - end - tf.Tensor(tf.Operation(desc)) - end - function experimental_rebatch_dataset_eager(input_dataset_, num_workers_; name=nothing, output_types=nothing, output_shapes=nothing) - desc = tf.EagerOp("ExperimentalRebatchDataset") - input_dataset_ = convert(tf.EagerTensor, input_dataset_) - num_workers_ = convert(tf.EagerTensor, num_workers_) - tf.add_input(desc, input_dataset_) - tf.add_input(desc, num_workers_) - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - res = tf.execute(desc) - node = tf.TapeNode(experimental_rebatch_dataset, [input_dataset_, num_workers_], name=nothing, output_types=nothing, output_shapes=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - function experimental_rebatch_dataset(input_dataset_, num_workers_; name=nothing, output_types=nothing, output_shapes=nothing) - if tf.in_eager_mode() - experimental_rebatch_dataset_eager(input_dataset_, num_workers_; name=name, output_types=output_types, output_shapes=output_shapes) - else - experimental_rebatch_dataset_graph(input_dataset_, num_workers_; name=name, output_types=output_types, output_shapes=output_shapes) - end - end -end - - """ mirror_pad(input, paddings) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function mirror_pad_graph(input_, paddings_; name=nothing, mode=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function mirror_pad_graph(input_, paddings_; name=nothing, mode=nothing) local desc tf.with_op_name(name, "MirrorPad") do desc = tf.NodeDescription("MirrorPad") @@ -51251,7 +49212,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function logical_not_graph(x_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function logical_not_graph(x_; name=nothing) local desc tf.with_op_name(name, "LogicalNot") do desc = tf.NodeDescription("LogicalNot") @@ -51287,7 +49248,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_ifft_graph(input_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function batch_ifft_graph(input_; name=nothing) local desc tf.with_op_name(name, "BatchIFFT") do desc = tf.NodeDescription("BatchIFFT") @@ -51323,7 +49284,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_concat_v2_graph(handle_, flow_in_; name=nothing, dtype=nothing, element_shape_except0=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tensor_array_concat_v2_graph(handle_, flow_in_; name=nothing, dtype=nothing, element_shape_except0=nothing) local desc tf.with_op_name(name, "TensorArrayConcatV2") do desc = tf.NodeDescription("TensorArrayConcatV2") @@ -51380,7 +49341,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sum_graph(input_, reduction_indices_; name=nothing, keep_dims=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function sum_graph(input_, reduction_indices_; name=nothing, keep_dims=nothing) local desc tf.with_op_name(name, "Sum") do desc = tf.NodeDescription("Sum") @@ -51431,7 +49392,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function boosted_trees_predict_graph(tree_ensemble_handle_, bucketized_features_; name=nothing, num_bucketized_features=nothing, logits_dimension=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function boosted_trees_predict_graph(tree_ensemble_handle_, bucketized_features_; name=nothing, num_bucketized_features=nothing, logits_dimension=nothing) local desc tf.with_op_name(name, "BoostedTreesPredict") do desc = tf.NodeDescription("BoostedTreesPredict") @@ -51477,116 +49438,13 @@ begin end -""" - quantized_conv2d_with_bias_and_relu_and_requantize(input, filter, bias, min_input, max_input, min_filter, max_filter, min_freezed_output, max_freezed_output; out_type=Float32, dilations=[1, 1, 1, 1]) - - -""" -begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantized_conv2d_with_bias_and_relu_and_requantize_graph(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) - local desc - tf.with_op_name(name, "QuantizedConv2DWithBiasAndReluAndRequantize") do - desc = tf.NodeDescription("QuantizedConv2DWithBiasAndReluAndRequantize") - input_ = convert(Tensor{Any}, input_) - filter_ = convert(Tensor{Any}, filter_) - bias_ = convert(Tensor{Any}, bias_) - min_input_ = convert(Tensor{Float32}, min_input_) - max_input_ = convert(Tensor{Float32}, max_input_) - min_filter_ = convert(Tensor{Float32}, min_filter_) - max_filter_ = convert(Tensor{Float32}, max_filter_) - min_freezed_output_ = convert(Tensor{Float32}, min_freezed_output_) - max_freezed_output_ = convert(Tensor{Float32}, max_freezed_output_) - (filter_,) = tf.tf_promote(filter_) - (input_,) = tf.tf_promote(input_) - (bias_,) = tf.tf_promote(bias_) - tf.add_input(desc, input_) - tf.add_input(desc, filter_) - tf.add_input(desc, bias_) - tf.add_input(desc, min_input_) - tf.add_input(desc, max_input_) - tf.add_input(desc, min_filter_) - tf.add_input(desc, max_filter_) - tf.add_input(desc, min_freezed_output_) - tf.add_input(desc, max_freezed_output_) - if out_type !== nothing - desc["out_type"] = Base.identity(out_type) - end - if strides !== nothing - desc["strides"] = map(Base.identity, strides) - end - if padding !== nothing - desc["padding"] = Base.String(padding) - end - if dilations !== nothing - desc["dilations"] = map(Base.identity, dilations) - end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:3 - push!(out, tf.Tensor(op, out_idx)) - end - out - end - function quantized_conv2d_with_bias_and_relu_and_requantize_eager(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) - desc = tf.EagerOp("QuantizedConv2DWithBiasAndReluAndRequantize") - input_ = convert(tf.EagerTensor, input_) - filter_ = convert(tf.EagerTensor, filter_) - bias_ = convert(tf.EagerTensor, bias_) - min_input_ = convert(tf.EagerTensor, min_input_) - max_input_ = convert(tf.EagerTensor, max_input_) - min_filter_ = convert(tf.EagerTensor, min_filter_) - max_filter_ = convert(tf.EagerTensor, max_filter_) - min_freezed_output_ = convert(tf.EagerTensor, min_freezed_output_) - max_freezed_output_ = convert(tf.EagerTensor, max_freezed_output_) - tf.add_input(desc, input_) - tf.add_input(desc, filter_) - tf.add_input(desc, bias_) - tf.add_input(desc, min_input_) - tf.add_input(desc, max_input_) - tf.add_input(desc, min_filter_) - tf.add_input(desc, max_filter_) - tf.add_input(desc, min_freezed_output_) - tf.add_input(desc, max_freezed_output_) - if out_type !== nothing - desc["out_type"] = Base.identity(out_type) - end - if strides !== nothing - desc["strides"] = map(Base.identity, strides) - end - if padding !== nothing - desc["padding"] = Base.String(padding) - end - if dilations !== nothing - desc["dilations"] = map(Base.identity, dilations) - end - desc["Tinput"] = tf.data_type(input_) - desc["Tfilter"] = tf.data_type(filter_) - desc["Tbias"] = tf.data_type(bias_) - res = tf.execute(desc) - node = tf.TapeNode(quantized_conv2d_with_bias_and_relu_and_requantize, [input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_], name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end - end - function quantized_conv2d_with_bias_and_relu_and_requantize(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) - if tf.in_eager_mode() - quantized_conv2d_with_bias_and_relu_and_requantize_eager(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_; name=name, out_type=out_type, strides=strides, padding=padding, dilations=dilations) - else - quantized_conv2d_with_bias_and_relu_and_requantize_graph(input_, filter_, bias_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_; name=name, out_type=out_type, strides=strides, padding=padding, dilations=dilations) - end - end -end - - """ resource_sparse_apply_adagrad(var, accum, lr, grad, indices; use_locking=false, update_slots=true) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_sparse_apply_adagrad_graph(var_, accum_, lr_, grad_, indices_; name=nothing, use_locking=nothing, update_slots=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function resource_sparse_apply_adagrad_graph(var_, accum_, lr_, grad_, indices_; name=nothing, use_locking=nothing, update_slots=nothing) local desc tf.with_op_name(name, "ResourceSparseApplyAdagrad") do desc = tf.NodeDescription("ResourceSparseApplyAdagrad") @@ -51656,7 +49514,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function leaky_relu_grad_graph(gradients_, features_; name=nothing, alpha=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function leaky_relu_grad_graph(gradients_, features_; name=nothing, alpha=nothing) local desc tf.with_op_name(name, "LeakyReluGrad") do desc = tf.NodeDescription("LeakyReluGrad") @@ -51705,7 +49563,7 @@ end A graph node which represents a return value of a function. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _device_retval_graph(input_; name=nothing, index=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function _device_retval_graph(input_; name=nothing, index=nothing) local desc tf.with_op_name(name, "_DeviceRetval") do desc = tf.NodeDescription("_DeviceRetval") @@ -51749,7 +49607,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function pad_graph(input_, paddings_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function pad_graph(input_, paddings_; name=nothing) local desc tf.with_op_name(name, "Pad") do desc = tf.NodeDescription("Pad") @@ -51793,7 +49651,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function add_many_sparse_to_tensors_map_graph(sparse_indices_, sparse_values_, sparse_shape_; name=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function add_many_sparse_to_tensors_map_graph(sparse_indices_, sparse_values_, sparse_shape_; name=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "AddManySparseToTensorsMap") do desc = tf.NodeDescription("AddManySparseToTensorsMap") @@ -51851,7 +49709,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_reorder_graph(input_indices_, input_values_, input_shape_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function sparse_reorder_graph(input_indices_, input_values_, input_shape_; name=nothing) local desc tf.with_op_name(name, "SparseReorder") do desc = tf.NodeDescription("SparseReorder") @@ -51902,7 +49760,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function bitwise_xor_graph(x_, y_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function bitwise_xor_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "BitwiseXor") do desc = tf.NodeDescription("BitwiseXor") @@ -51945,7 +49803,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_matrix_set_diag_graph(input_, diagonal_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function batch_matrix_set_diag_graph(input_, diagonal_; name=nothing) local desc tf.with_op_name(name, "BatchMatrixSetDiag") do desc = tf.NodeDescription("BatchMatrixSetDiag") @@ -51988,7 +49846,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function lookup_table_insert_v2_graph(table_handle_, keys_, values_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function lookup_table_insert_v2_graph(table_handle_, keys_, values_; name=nothing) local desc tf.with_op_name(name, "LookupTableInsertV2") do desc = tf.NodeDescription("LookupTableInsertV2") @@ -52036,7 +49894,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_dense_to_sparse_batch_dataset_graph(input_dataset_, batch_size_, row_shape_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function experimental_dense_to_sparse_batch_dataset_graph(input_dataset_, batch_size_, row_shape_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "ExperimentalDenseToSparseBatchDataset") do desc = tf.NodeDescription("ExperimentalDenseToSparseBatchDataset") @@ -52092,7 +49950,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_sparse_apply_rms_prop_graph(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function resource_sparse_apply_rms_prop_graph(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ResourceSparseApplyRMSProp") do desc = tf.NodeDescription("ResourceSparseApplyRMSProp") @@ -52175,7 +50033,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function random_crop_graph(image_, size_; name=nothing, seed=nothing, seed2=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function random_crop_graph(image_, size_; name=nothing, seed=nothing, seed2=nothing) local desc tf.with_op_name(name, "RandomCrop") do desc = tf.NodeDescription("RandomCrop") @@ -52229,7 +50087,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function lookup_table_import_v2_graph(table_handle_, keys_, values_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function lookup_table_import_v2_graph(table_handle_, keys_, values_; name=nothing) local desc tf.with_op_name(name, "LookupTableImportV2") do desc = tf.NodeDescription("LookupTableImportV2") @@ -52277,7 +50135,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_scatter_nd_update_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function resource_scatter_nd_update_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ResourceScatterNdUpdate") do desc = tf.NodeDescription("ResourceScatterNdUpdate") @@ -52332,7 +50190,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function static_regex_full_match_graph(input_; name=nothing, pattern=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function static_regex_full_match_graph(input_; name=nothing, pattern=nothing) local desc tf.with_op_name(name, "StaticRegexFullMatch") do desc = tf.NodeDescription("StaticRegexFullMatch") @@ -52374,7 +50232,7 @@ end Configures the credentials used by the GCS client of the local TF runtime. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function gcs_configure_credentials_graph(json_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function gcs_configure_credentials_graph(json_; name=nothing) local desc tf.with_op_name(name, "GcsConfigureCredentials") do desc = tf.NodeDescription("GcsConfigureCredentials") @@ -52410,7 +50268,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_size_v3_graph(handle_, flow_in_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tensor_array_size_v3_graph(handle_, flow_in_; name=nothing) local desc tf.with_op_name(name, "TensorArraySizeV3") do desc = tf.NodeDescription("TensorArraySizeV3") @@ -52450,7 +50308,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_segment_sqrt_n_with_num_segments_graph(data_, indices_, segment_ids_, num_segments_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function sparse_segment_sqrt_n_with_num_segments_graph(data_, indices_, segment_ids_, num_segments_; name=nothing) local desc tf.with_op_name(name, "SparseSegmentSqrtNWithNumSegments") do desc = tf.NodeDescription("SparseSegmentSqrtNWithNumSegments") @@ -52500,124 +50358,12 @@ end """ - experimental_group_by_reducer_dataset(input_dataset, key_func_other_arguments, init_func_other_arguments, reduce_func_other_arguments, finalize_func_other_arguments) - - -""" -begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_group_by_reducer_dataset_graph(input_dataset_, key_func_other_arguments_, init_func_other_arguments_, reduce_func_other_arguments_, finalize_func_other_arguments_; name=nothing, key_func=nothing, init_func=nothing, reduce_func=nothing, finalize_func=nothing, Tkey_func_other_arguments=nothing, Tinit_func_other_arguments=nothing, Treduce_func_other_arguments=nothing, Tfinalize_func_other_arguments=nothing, output_types=nothing, output_shapes=nothing) - local desc - tf.with_op_name(name, "ExperimentalGroupByReducerDataset") do - desc = tf.NodeDescription("ExperimentalGroupByReducerDataset") - input_dataset_ = convert(Tensor{Any}, input_dataset_) - key_func_other_arguments_ = [convert(Tensor{Any}, x) for x = key_func_other_arguments_] - init_func_other_arguments_ = [convert(Tensor{Any}, x) for x = init_func_other_arguments_] - reduce_func_other_arguments_ = [convert(Tensor{Any}, x) for x = reduce_func_other_arguments_] - finalize_func_other_arguments_ = [convert(Tensor{Any}, x) for x = finalize_func_other_arguments_] - tf.add_input(desc, input_dataset_) - tf.add_input(desc, key_func_other_arguments_) - tf.add_input(desc, init_func_other_arguments_) - tf.add_input(desc, reduce_func_other_arguments_) - tf.add_input(desc, finalize_func_other_arguments_) - if key_func !== nothing - desc["key_func"] = Base.identity(key_func) - end - if init_func !== nothing - desc["init_func"] = Base.identity(init_func) - end - if reduce_func !== nothing - desc["reduce_func"] = Base.identity(reduce_func) - end - if finalize_func !== nothing - desc["finalize_func"] = Base.identity(finalize_func) - end - if Tkey_func_other_arguments !== nothing - desc["Tkey_func_other_arguments"] = map(Base.identity, Tkey_func_other_arguments) - end - if Tinit_func_other_arguments !== nothing - desc["Tinit_func_other_arguments"] = map(Base.identity, Tinit_func_other_arguments) - end - if Treduce_func_other_arguments !== nothing - desc["Treduce_func_other_arguments"] = map(Base.identity, Treduce_func_other_arguments) - end - if Tfinalize_func_other_arguments !== nothing - desc["Tfinalize_func_other_arguments"] = map(Base.identity, Tfinalize_func_other_arguments) - end - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - end - tf.Tensor(tf.Operation(desc)) - end - function experimental_group_by_reducer_dataset_eager(input_dataset_, key_func_other_arguments_, init_func_other_arguments_, reduce_func_other_arguments_, finalize_func_other_arguments_; name=nothing, key_func=nothing, init_func=nothing, reduce_func=nothing, finalize_func=nothing, Tkey_func_other_arguments=nothing, Tinit_func_other_arguments=nothing, Treduce_func_other_arguments=nothing, Tfinalize_func_other_arguments=nothing, output_types=nothing, output_shapes=nothing) - desc = tf.EagerOp("ExperimentalGroupByReducerDataset") - input_dataset_ = convert(tf.EagerTensor, input_dataset_) - key_func_other_arguments_ = convert(tf.EagerTensor, key_func_other_arguments_) - init_func_other_arguments_ = convert(tf.EagerTensor, init_func_other_arguments_) - reduce_func_other_arguments_ = convert(tf.EagerTensor, reduce_func_other_arguments_) - finalize_func_other_arguments_ = convert(tf.EagerTensor, finalize_func_other_arguments_) - tf.add_input(desc, input_dataset_) - tf.add_input(desc, key_func_other_arguments_) - tf.add_input(desc, init_func_other_arguments_) - tf.add_input(desc, reduce_func_other_arguments_) - tf.add_input(desc, finalize_func_other_arguments_) - if key_func !== nothing - desc["key_func"] = Base.identity(key_func) - end - if init_func !== nothing - desc["init_func"] = Base.identity(init_func) - end - if reduce_func !== nothing - desc["reduce_func"] = Base.identity(reduce_func) - end - if finalize_func !== nothing - desc["finalize_func"] = Base.identity(finalize_func) - end - if Tkey_func_other_arguments !== nothing - desc["Tkey_func_other_arguments"] = map(Base.identity, Tkey_func_other_arguments) - end - if Tinit_func_other_arguments !== nothing - desc["Tinit_func_other_arguments"] = map(Base.identity, Tinit_func_other_arguments) - end - if Treduce_func_other_arguments !== nothing - desc["Treduce_func_other_arguments"] = map(Base.identity, Treduce_func_other_arguments) - end - if Tfinalize_func_other_arguments !== nothing - desc["Tfinalize_func_other_arguments"] = map(Base.identity, Tfinalize_func_other_arguments) - end - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - res = tf.execute(desc) - node = tf.TapeNode(experimental_group_by_reducer_dataset, [input_dataset_, key_func_other_arguments_, init_func_other_arguments_, reduce_func_other_arguments_, finalize_func_other_arguments_], name=nothing, key_func=nothing, init_func=nothing, reduce_func=nothing, finalize_func=nothing, Tkey_func_other_arguments=nothing, Tinit_func_other_arguments=nothing, Treduce_func_other_arguments=nothing, Tfinalize_func_other_arguments=nothing, output_types=nothing, output_shapes=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - function experimental_group_by_reducer_dataset(input_dataset_, key_func_other_arguments_, init_func_other_arguments_, reduce_func_other_arguments_, finalize_func_other_arguments_; name=nothing, key_func=nothing, init_func=nothing, reduce_func=nothing, finalize_func=nothing, Tkey_func_other_arguments=nothing, Tinit_func_other_arguments=nothing, Treduce_func_other_arguments=nothing, Tfinalize_func_other_arguments=nothing, output_types=nothing, output_shapes=nothing) - if tf.in_eager_mode() - experimental_group_by_reducer_dataset_eager(input_dataset_, key_func_other_arguments_, init_func_other_arguments_, reduce_func_other_arguments_, finalize_func_other_arguments_; name=name, key_func=key_func, init_func=init_func, reduce_func=reduce_func, finalize_func=finalize_func, Tkey_func_other_arguments=Tkey_func_other_arguments, Tinit_func_other_arguments=Tinit_func_other_arguments, Treduce_func_other_arguments=Treduce_func_other_arguments, Tfinalize_func_other_arguments=Tfinalize_func_other_arguments, output_types=output_types, output_shapes=output_shapes) - else - experimental_group_by_reducer_dataset_graph(input_dataset_, key_func_other_arguments_, init_func_other_arguments_, reduce_func_other_arguments_, finalize_func_other_arguments_; name=name, key_func=key_func, init_func=init_func, reduce_func=reduce_func, finalize_func=finalize_func, Tkey_func_other_arguments=Tkey_func_other_arguments, Tinit_func_other_arguments=Tinit_func_other_arguments, Treduce_func_other_arguments=Treduce_func_other_arguments, Tfinalize_func_other_arguments=Tfinalize_func_other_arguments, output_types=output_types, output_shapes=output_shapes) - end - end -end - - -""" - conv2d_backprop_filter(input, filter_sizes, out_backprop; use_cudnn_on_gpu=true, explicit_paddings=Int64[], data_format=NHWC, dilations=[1, 1, 1, 1]) + conv2d_backprop_filter(input, filter_sizes, out_backprop; use_cudnn_on_gpu=true, data_format=NHWC, dilations=[1, 1, 1, 1]) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function conv2d_backprop_filter_graph(input_, filter_sizes_, out_backprop_; name=nothing, strides=nothing, use_cudnn_on_gpu=nothing, padding=nothing, explicit_paddings=nothing, data_format=nothing, dilations=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function conv2d_backprop_filter_graph(input_, filter_sizes_, out_backprop_; name=nothing, strides=nothing, use_cudnn_on_gpu=nothing, padding=nothing, data_format=nothing, dilations=nothing) local desc tf.with_op_name(name, "Conv2DBackpropFilter") do desc = tf.NodeDescription("Conv2DBackpropFilter") @@ -52637,9 +50383,6 @@ begin if padding !== nothing desc["padding"] = Base.String(padding) end - if explicit_paddings !== nothing - desc["explicit_paddings"] = map(Base.identity, explicit_paddings) - end if data_format !== nothing desc["data_format"] = Base.String(data_format) end @@ -52649,7 +50392,7 @@ begin end tf.Tensor(tf.Operation(desc)) end - function conv2d_backprop_filter_eager(input_, filter_sizes_, out_backprop_; name=nothing, strides=nothing, use_cudnn_on_gpu=nothing, padding=nothing, explicit_paddings=nothing, data_format=nothing, dilations=nothing) + function conv2d_backprop_filter_eager(input_, filter_sizes_, out_backprop_; name=nothing, strides=nothing, use_cudnn_on_gpu=nothing, padding=nothing, data_format=nothing, dilations=nothing) desc = tf.EagerOp("Conv2DBackpropFilter") input_ = convert(tf.EagerTensor, input_) filter_sizes_ = convert(tf.EagerTensor, filter_sizes_) @@ -52666,9 +50409,6 @@ begin if padding !== nothing desc["padding"] = Base.String(padding) end - if explicit_paddings !== nothing - desc["explicit_paddings"] = map(Base.identity, explicit_paddings) - end if data_format !== nothing desc["data_format"] = Base.String(data_format) end @@ -52678,17 +50418,129 @@ begin desc["T"] = tf.data_type(input_) desc["T"] = tf.data_type(out_backprop_) res = tf.execute(desc) - node = tf.TapeNode(conv2d_backprop_filter, [input_, filter_sizes_, out_backprop_], name=nothing, strides=nothing, use_cudnn_on_gpu=nothing, padding=nothing, explicit_paddings=nothing, data_format=nothing, dilations=nothing, res) + node = tf.TapeNode(conv2d_backprop_filter, [input_, filter_sizes_, out_backprop_], name=nothing, strides=nothing, use_cudnn_on_gpu=nothing, padding=nothing, data_format=nothing, dilations=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + function conv2d_backprop_filter(input_, filter_sizes_, out_backprop_; name=nothing, strides=nothing, use_cudnn_on_gpu=nothing, padding=nothing, data_format=nothing, dilations=nothing) + if tf.in_eager_mode() + conv2d_backprop_filter_eager(input_, filter_sizes_, out_backprop_; name=name, strides=strides, use_cudnn_on_gpu=use_cudnn_on_gpu, padding=padding, data_format=data_format, dilations=dilations) + else + conv2d_backprop_filter_graph(input_, filter_sizes_, out_backprop_; name=name, strides=strides, use_cudnn_on_gpu=use_cudnn_on_gpu, padding=padding, data_format=data_format, dilations=dilations) + end + end +end + + +""" + experimental_group_by_reducer_dataset(input_dataset, key_func_other_arguments, init_func_other_arguments, reduce_func_other_arguments, finalize_func_other_arguments) + + +""" +begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function experimental_group_by_reducer_dataset_graph(input_dataset_, key_func_other_arguments_, init_func_other_arguments_, reduce_func_other_arguments_, finalize_func_other_arguments_; name=nothing, key_func=nothing, init_func=nothing, reduce_func=nothing, finalize_func=nothing, Tkey_func_other_arguments=nothing, Tinit_func_other_arguments=nothing, Treduce_func_other_arguments=nothing, Tfinalize_func_other_arguments=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "ExperimentalGroupByReducerDataset") do + desc = tf.NodeDescription("ExperimentalGroupByReducerDataset") + input_dataset_ = convert(Tensor{Any}, input_dataset_) + key_func_other_arguments_ = [convert(Tensor{Any}, x) for x = key_func_other_arguments_] + init_func_other_arguments_ = [convert(Tensor{Any}, x) for x = init_func_other_arguments_] + reduce_func_other_arguments_ = [convert(Tensor{Any}, x) for x = reduce_func_other_arguments_] + finalize_func_other_arguments_ = [convert(Tensor{Any}, x) for x = finalize_func_other_arguments_] + tf.add_input(desc, input_dataset_) + tf.add_input(desc, key_func_other_arguments_) + tf.add_input(desc, init_func_other_arguments_) + tf.add_input(desc, reduce_func_other_arguments_) + tf.add_input(desc, finalize_func_other_arguments_) + if key_func !== nothing + desc["key_func"] = Base.identity(key_func) + end + if init_func !== nothing + desc["init_func"] = Base.identity(init_func) + end + if reduce_func !== nothing + desc["reduce_func"] = Base.identity(reduce_func) + end + if finalize_func !== nothing + desc["finalize_func"] = Base.identity(finalize_func) + end + if Tkey_func_other_arguments !== nothing + desc["Tkey_func_other_arguments"] = map(Base.identity, Tkey_func_other_arguments) + end + if Tinit_func_other_arguments !== nothing + desc["Tinit_func_other_arguments"] = map(Base.identity, Tinit_func_other_arguments) + end + if Treduce_func_other_arguments !== nothing + desc["Treduce_func_other_arguments"] = map(Base.identity, Treduce_func_other_arguments) + end + if Tfinalize_func_other_arguments !== nothing + desc["Tfinalize_func_other_arguments"] = map(Base.identity, Tfinalize_func_other_arguments) + end + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + tf.Tensor(tf.Operation(desc)) + end + function experimental_group_by_reducer_dataset_eager(input_dataset_, key_func_other_arguments_, init_func_other_arguments_, reduce_func_other_arguments_, finalize_func_other_arguments_; name=nothing, key_func=nothing, init_func=nothing, reduce_func=nothing, finalize_func=nothing, Tkey_func_other_arguments=nothing, Tinit_func_other_arguments=nothing, Treduce_func_other_arguments=nothing, Tfinalize_func_other_arguments=nothing, output_types=nothing, output_shapes=nothing) + desc = tf.EagerOp("ExperimentalGroupByReducerDataset") + input_dataset_ = convert(tf.EagerTensor, input_dataset_) + key_func_other_arguments_ = convert(tf.EagerTensor, key_func_other_arguments_) + init_func_other_arguments_ = convert(tf.EagerTensor, init_func_other_arguments_) + reduce_func_other_arguments_ = convert(tf.EagerTensor, reduce_func_other_arguments_) + finalize_func_other_arguments_ = convert(tf.EagerTensor, finalize_func_other_arguments_) + tf.add_input(desc, input_dataset_) + tf.add_input(desc, key_func_other_arguments_) + tf.add_input(desc, init_func_other_arguments_) + tf.add_input(desc, reduce_func_other_arguments_) + tf.add_input(desc, finalize_func_other_arguments_) + if key_func !== nothing + desc["key_func"] = Base.identity(key_func) + end + if init_func !== nothing + desc["init_func"] = Base.identity(init_func) + end + if reduce_func !== nothing + desc["reduce_func"] = Base.identity(reduce_func) + end + if finalize_func !== nothing + desc["finalize_func"] = Base.identity(finalize_func) + end + if Tkey_func_other_arguments !== nothing + desc["Tkey_func_other_arguments"] = map(Base.identity, Tkey_func_other_arguments) + end + if Tinit_func_other_arguments !== nothing + desc["Tinit_func_other_arguments"] = map(Base.identity, Tinit_func_other_arguments) + end + if Treduce_func_other_arguments !== nothing + desc["Treduce_func_other_arguments"] = map(Base.identity, Treduce_func_other_arguments) + end + if Tfinalize_func_other_arguments !== nothing + desc["Tfinalize_func_other_arguments"] = map(Base.identity, Tfinalize_func_other_arguments) + end + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + res = tf.execute(desc) + node = tf.TapeNode(experimental_group_by_reducer_dataset, [input_dataset_, key_func_other_arguments_, init_func_other_arguments_, reduce_func_other_arguments_, finalize_func_other_arguments_], name=nothing, key_func=nothing, init_func=nothing, reduce_func=nothing, finalize_func=nothing, Tkey_func_other_arguments=nothing, Tinit_func_other_arguments=nothing, Treduce_func_other_arguments=nothing, Tfinalize_func_other_arguments=nothing, output_types=nothing, output_shapes=nothing, res) if length(res) >= 1 tf.add_node(res[1], node) return res[1] end end - function conv2d_backprop_filter(input_, filter_sizes_, out_backprop_; name=nothing, strides=nothing, use_cudnn_on_gpu=nothing, padding=nothing, explicit_paddings=nothing, data_format=nothing, dilations=nothing) + function experimental_group_by_reducer_dataset(input_dataset_, key_func_other_arguments_, init_func_other_arguments_, reduce_func_other_arguments_, finalize_func_other_arguments_; name=nothing, key_func=nothing, init_func=nothing, reduce_func=nothing, finalize_func=nothing, Tkey_func_other_arguments=nothing, Tinit_func_other_arguments=nothing, Treduce_func_other_arguments=nothing, Tfinalize_func_other_arguments=nothing, output_types=nothing, output_shapes=nothing) if tf.in_eager_mode() - conv2d_backprop_filter_eager(input_, filter_sizes_, out_backprop_; name=name, strides=strides, use_cudnn_on_gpu=use_cudnn_on_gpu, padding=padding, explicit_paddings=explicit_paddings, data_format=data_format, dilations=dilations) + experimental_group_by_reducer_dataset_eager(input_dataset_, key_func_other_arguments_, init_func_other_arguments_, reduce_func_other_arguments_, finalize_func_other_arguments_; name=name, key_func=key_func, init_func=init_func, reduce_func=reduce_func, finalize_func=finalize_func, Tkey_func_other_arguments=Tkey_func_other_arguments, Tinit_func_other_arguments=Tinit_func_other_arguments, Treduce_func_other_arguments=Treduce_func_other_arguments, Tfinalize_func_other_arguments=Tfinalize_func_other_arguments, output_types=output_types, output_shapes=output_shapes) else - conv2d_backprop_filter_graph(input_, filter_sizes_, out_backprop_; name=name, strides=strides, use_cudnn_on_gpu=use_cudnn_on_gpu, padding=padding, explicit_paddings=explicit_paddings, data_format=data_format, dilations=dilations) + experimental_group_by_reducer_dataset_graph(input_dataset_, key_func_other_arguments_, init_func_other_arguments_, reduce_func_other_arguments_, finalize_func_other_arguments_; name=name, key_func=key_func, init_func=init_func, reduce_func=reduce_func, finalize_func=finalize_func, Tkey_func_other_arguments=Tkey_func_other_arguments, Tinit_func_other_arguments=Tinit_func_other_arguments, Treduce_func_other_arguments=Treduce_func_other_arguments, Tfinalize_func_other_arguments=Tfinalize_func_other_arguments, output_types=output_types, output_shapes=output_shapes) end end end @@ -52700,7 +50552,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function max_pool_grad_graph(orig_input_, orig_output_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function max_pool_grad_graph(orig_input_, orig_output_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) local desc tf.with_op_name(name, "MaxPoolGrad") do desc = tf.NodeDescription("MaxPoolGrad") @@ -52772,7 +50624,7 @@ end An op that connects each chip on the host to a centralized UberDriver to allow """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _initialize_host_for_distributed_tpu_graph(input_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function _initialize_host_for_distributed_tpu_graph(input_; name=nothing) local desc tf.with_op_name(name, "_InitializeHostForDistributedTPU") do desc = tf.NodeDescription("_InitializeHostForDistributedTPU") @@ -52808,7 +50660,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function stage_peek_graph(index_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function stage_peek_graph(index_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "StagePeek") do desc = tf.NodeDescription("StagePeek") @@ -52874,7 +50726,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function pad_v2_graph(input_, paddings_, constant_values_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function pad_v2_graph(input_, paddings_, constant_values_; name=nothing) local desc tf.with_op_name(name, "PadV2") do desc = tf.NodeDescription("PadV2") @@ -52918,44 +50770,48 @@ end """ - _parallel_concat_start() + optional_get_value(optional) + -Creates an empty Tensor with shape `shape` and type `dtype`. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _parallel_concat_start_graph(; name=nothing, shape=nothing, dtype=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function optional_get_value_graph(optional_; name=nothing, output_types=nothing, output_shapes=nothing) local desc - tf.with_op_name(name, "_ParallelConcatStart") do - desc = tf.NodeDescription("_ParallelConcatStart") - if shape !== nothing - desc["shape"] = Base.identity(shape) + tf.with_op_name(name, "OptionalGetValue") do + desc = tf.NodeDescription("OptionalGetValue") + optional_ = convert(Tensor{Any}, optional_) + tf.add_input(desc, optional_) + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) end - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) end end tf.Tensor(tf.Operation(desc)) end - function _parallel_concat_start_eager(; name=nothing, shape=nothing, dtype=nothing) - desc = tf.EagerOp("_ParallelConcatStart") - if shape !== nothing - desc["shape"] = Base.identity(shape) + function optional_get_value_eager(optional_; name=nothing, output_types=nothing, output_shapes=nothing) + desc = tf.EagerOp("OptionalGetValue") + optional_ = convert(tf.EagerTensor, optional_) + tf.add_input(desc, optional_) + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) end - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) end res = tf.execute(desc) - node = tf.TapeNode(_parallel_concat_start, [], name=nothing, shape=nothing, dtype=nothing, res) + node = tf.TapeNode(optional_get_value, [optional_], name=nothing, output_types=nothing, output_shapes=nothing, res) if length(res) >= 1 tf.add_node(res[1], node) return res[1] end end - function _parallel_concat_start(; name=nothing, shape=nothing, dtype=nothing) + function optional_get_value(optional_; name=nothing, output_types=nothing, output_shapes=nothing) if tf.in_eager_mode() - _parallel_concat_start_eager(; name=name, shape=shape, dtype=dtype) + optional_get_value_eager(optional_; name=name, output_types=output_types, output_shapes=output_shapes) else - _parallel_concat_start_graph(; name=name, shape=shape, dtype=dtype) + optional_get_value_graph(optional_; name=name, output_types=output_types, output_shapes=output_shapes) end end end @@ -52967,7 +50823,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function print_v2_graph(input_; name=nothing, output_stream=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function print_v2_graph(input_; name=nothing, output_stream=nothing) local desc tf.with_op_name(name, "PrintV2") do desc = tf.NodeDescription("PrintV2") @@ -53004,48 +50860,44 @@ end """ - optional_get_value(optional) - + _parallel_concat_start() +Creates an empty Tensor with shape `shape` and type `dtype`. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function optional_get_value_graph(optional_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function _parallel_concat_start_graph(; name=nothing, shape=nothing, dtype=nothing) local desc - tf.with_op_name(name, "OptionalGetValue") do - desc = tf.NodeDescription("OptionalGetValue") - optional_ = convert(Tensor{Any}, optional_) - tf.add_input(desc, optional_) - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) + tf.with_op_name(name, "_ParallelConcatStart") do + desc = tf.NodeDescription("_ParallelConcatStart") + if shape !== nothing + desc["shape"] = Base.identity(shape) end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) end end tf.Tensor(tf.Operation(desc)) end - function optional_get_value_eager(optional_; name=nothing, output_types=nothing, output_shapes=nothing) - desc = tf.EagerOp("OptionalGetValue") - optional_ = convert(tf.EagerTensor, optional_) - tf.add_input(desc, optional_) - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) + function _parallel_concat_start_eager(; name=nothing, shape=nothing, dtype=nothing) + desc = tf.EagerOp("_ParallelConcatStart") + if shape !== nothing + desc["shape"] = Base.identity(shape) end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) end res = tf.execute(desc) - node = tf.TapeNode(optional_get_value, [optional_], name=nothing, output_types=nothing, output_shapes=nothing, res) + node = tf.TapeNode(_parallel_concat_start, [], name=nothing, shape=nothing, dtype=nothing, res) if length(res) >= 1 tf.add_node(res[1], node) return res[1] end end - function optional_get_value(optional_; name=nothing, output_types=nothing, output_shapes=nothing) + function _parallel_concat_start(; name=nothing, shape=nothing, dtype=nothing) if tf.in_eager_mode() - optional_get_value_eager(optional_; name=name, output_types=output_types, output_shapes=output_shapes) + _parallel_concat_start_eager(; name=name, shape=shape, dtype=dtype) else - optional_get_value_graph(optional_; name=name, output_types=output_types, output_shapes=output_shapes) + _parallel_concat_start_graph(; name=name, shape=shape, dtype=dtype) end end end @@ -53054,10 +50906,10 @@ end """ load_tpu_embedding_ftrl_parameters(parameters, accumulators, linears; table_id=-1, table_name=) - +Load embedding parameters for a single table. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function load_tpu_embedding_ftrl_parameters_graph(parameters_, accumulators_, linears_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function load_tpu_embedding_ftrl_parameters_graph(parameters_, accumulators_, linears_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "LoadTPUEmbeddingFTRLParameters") do desc = tf.NodeDescription("LoadTPUEmbeddingFTRLParameters") @@ -53125,7 +50977,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_slice_graph(indices_, values_, shape_, start_, size_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function sparse_slice_graph(indices_, values_, shape_, start_, size_; name=nothing) local desc tf.with_op_name(name, "SparseSlice") do desc = tf.NodeDescription("SparseSlice") @@ -53184,7 +51036,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function boosted_trees_make_quantile_summaries_graph(float_values_, example_weights_, epsilon_; name=nothing, num_features=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function boosted_trees_make_quantile_summaries_graph(float_values_, example_weights_, epsilon_; name=nothing, num_features=nothing) local desc tf.with_op_name(name, "BoostedTreesMakeQuantileSummaries") do desc = tf.NodeDescription("BoostedTreesMakeQuantileSummaries") @@ -53239,7 +51091,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function matrix_solve_graph(matrix_, rhs_; name=nothing, adjoint=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function matrix_solve_graph(matrix_, rhs_; name=nothing, adjoint=nothing) local desc tf.with_op_name(name, "MatrixSolve") do desc = tf.NodeDescription("MatrixSolve") @@ -53288,7 +51140,7 @@ end An op that sets up the centralized structures for a distributed TPU """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _configure_distributed_tpu_graph(inputs_; name=nothing, N=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function _configure_distributed_tpu_graph(inputs_; name=nothing, N=nothing) local desc tf.with_op_name(name, "_ConfigureDistributedTPU") do desc = tf.NodeDescription("_ConfigureDistributedTPU") @@ -53330,13 +51182,12 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function adjust_contrastv2_graph(images_, contrast_factor_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function adjust_contrastv2_graph(images_, contrast_factor_; name=nothing) local desc tf.with_op_name(name, "AdjustContrastv2") do desc = tf.NodeDescription("AdjustContrastv2") images_ = convert(Tensor{Float32}, images_) contrast_factor_ = convert(Tensor{Float32}, contrast_factor_) - (images_,) = tf.tf_promote(images_) tf.add_input(desc, images_) tf.add_input(desc, contrast_factor_) end @@ -53348,7 +51199,6 @@ begin contrast_factor_ = convert(tf.EagerTensor, contrast_factor_) tf.add_input(desc, images_) tf.add_input(desc, contrast_factor_) - desc["T"] = tf.data_type(images_) res = tf.execute(desc) node = tf.TapeNode(adjust_contrastv2, [images_, contrast_factor_], name=nothing, res) if length(res) >= 1 @@ -53372,7 +51222,7 @@ end Returns the max of x and y (i.e. x > y ? x : y) element-wise. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _mkl_maximum_graph(x_, y_, mkl_x_, mkl_y_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function _mkl_maximum_graph(x_, y_, mkl_x_, mkl_y_; name=nothing) local desc tf.with_op_name(name, "_MklMaximum") do desc = tf.NodeDescription("_MklMaximum") @@ -53428,7 +51278,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function cudnn_rnn_params_size_graph(num_layers_, num_units_, input_size_; name=nothing, S=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function cudnn_rnn_params_size_graph(num_layers_, num_units_, input_size_; name=nothing, S=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) local desc tf.with_op_name(name, "CudnnRNNParamsSize") do desc = tf.NodeDescription("CudnnRNNParamsSize") @@ -53514,7 +51364,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function boosted_trees_quantile_stream_resource_add_summaries_graph(quantile_stream_resource_handle_, summaries_; name=nothing, num_features=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function boosted_trees_quantile_stream_resource_add_summaries_graph(quantile_stream_resource_handle_, summaries_; name=nothing, num_features=nothing) local desc tf.with_op_name(name, "BoostedTreesQuantileStreamResourceAddSummaries") do desc = tf.NodeDescription("BoostedTreesQuantileStreamResourceAddSummaries") @@ -53560,7 +51410,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_ifft3d_graph(input_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function batch_ifft3d_graph(input_; name=nothing) local desc tf.with_op_name(name, "BatchIFFT3D") do desc = tf.NodeDescription("BatchIFFT3D") @@ -53596,7 +51446,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sigmoid_graph(x_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function sigmoid_graph(x_; name=nothing) local desc tf.with_op_name(name, "Sigmoid") do desc = tf.NodeDescription("Sigmoid") @@ -53634,7 +51484,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function segment_mean_graph(data_, segment_ids_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function segment_mean_graph(data_, segment_ids_; name=nothing) local desc tf.with_op_name(name, "SegmentMean") do desc = tf.NodeDescription("SegmentMean") @@ -53679,7 +51529,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function is_boosted_trees_ensemble_initialized_graph(tree_ensemble_handle_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function is_boosted_trees_ensemble_initialized_graph(tree_ensemble_handle_; name=nothing) local desc tf.with_op_name(name, "IsBoostedTreesEnsembleInitialized") do desc = tf.NodeDescription("IsBoostedTreesEnsembleInitialized") @@ -53715,7 +51565,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_size_v2_graph(handle_, flow_in_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tensor_array_size_v2_graph(handle_, flow_in_; name=nothing) local desc tf.with_op_name(name, "TensorArraySizeV2") do desc = tf.NodeDescription("TensorArraySizeV2") @@ -53755,7 +51605,7 @@ end Returns x - y element-wise. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _mkl_sub_graph(x_, y_, mkl_x_, mkl_y_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function _mkl_sub_graph(x_, y_, mkl_x_, mkl_y_; name=nothing) local desc tf.with_op_name(name, "_MklSub") do desc = tf.NodeDescription("_MklSub") @@ -53808,10 +51658,10 @@ end """ send_tpu_embedding_gradients(inputs, learning_rates; NN=0) - +An op that performs gradient updates of embedding tables. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function send_tpu_embedding_gradients_graph(inputs_, learning_rates_; name=nothing, N=nothing, NN=nothing, config=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function send_tpu_embedding_gradients_graph(inputs_, learning_rates_; name=nothing, N=nothing, NN=nothing, config=nothing) local desc tf.with_op_name(name, "SendTPUEmbeddingGradients") do desc = tf.NodeDescription("SendTPUEmbeddingGradients") @@ -53869,7 +51719,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function max_pool3d_graph(input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function max_pool3d_graph(input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) local desc tf.with_op_name(name, "MaxPool3D") do desc = tf.NodeDescription("MaxPool3D") @@ -53931,7 +51781,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function prod_graph(input_, reduction_indices_; name=nothing, keep_dims=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function prod_graph(input_, reduction_indices_; name=nothing, keep_dims=nothing) local desc tf.with_op_name(name, "Prod") do desc = tf.NodeDescription("Prod") @@ -53982,7 +51832,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_identity_indexed_dataset_graph(size_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function experimental_identity_indexed_dataset_graph(size_; name=nothing) local desc tf.with_op_name(name, "ExperimentalIdentityIndexedDataset") do desc = tf.NodeDescription("ExperimentalIdentityIndexedDataset") @@ -54018,7 +51868,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_list_push_back_graph(input_handle_, tensor_; name=nothing, element_dtype=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tensor_list_push_back_graph(input_handle_, tensor_; name=nothing, element_dtype=nothing) local desc tf.with_op_name(name, "TensorListPushBack") do desc = tf.NodeDescription("TensorListPushBack") @@ -54066,7 +51916,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_function_graph(in_tensors_, captured_tensors_; name=nothing, f=nothing, num_batch_threads=nothing, max_batch_size=nothing, batch_timeout_micros=nothing, max_enqueued_batches=nothing, allowed_batch_sizes=nothing, container=nothing, shared_name=nothing, batching_queue=nothing, Tin=nothing, Tcaptured=nothing, Tout=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function batch_function_graph(in_tensors_, captured_tensors_; name=nothing, f=nothing, num_batch_threads=nothing, max_batch_size=nothing, batch_timeout_micros=nothing, max_enqueued_batches=nothing, allowed_batch_sizes=nothing, container=nothing, shared_name=nothing, batching_queue=nothing, Tin=nothing, Tcaptured=nothing, Tout=nothing) local desc tf.with_op_name(name, "BatchFunction") do desc = tf.NodeDescription("BatchFunction") @@ -54178,7 +52028,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_fill_empty_rows_graph(indices_, values_, dense_shape_, default_value_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function sparse_fill_empty_rows_graph(indices_, values_, dense_shape_, default_value_; name=nothing) local desc tf.with_op_name(name, "SparseFillEmptyRows") do desc = tf.NodeDescription("SparseFillEmptyRows") @@ -54234,7 +52084,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function self_adjoint_eig_v2_graph(input_; name=nothing, compute_v=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function self_adjoint_eig_v2_graph(input_; name=nothing, compute_v=nothing) local desc tf.with_op_name(name, "SelfAdjointEigV2") do desc = tf.NodeDescription("SelfAdjointEigV2") @@ -54280,10 +52130,10 @@ end """ retrieve_tpu_embedding_ftrl_parameters(; table_id=-1, table_name=) - +Retrieve embedding parameters for a single table. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function retrieve_tpu_embedding_ftrl_parameters_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function retrieve_tpu_embedding_ftrl_parameters_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "RetrieveTPUEmbeddingFTRLParameters") do desc = tf.NodeDescription("RetrieveTPUEmbeddingFTRLParameters") @@ -54344,7 +52194,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_sparse_apply_adagrad_da_graph(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, indices_, lr_, l1_, l2_, global_step_; name=nothing, use_locking=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function resource_sparse_apply_adagrad_da_graph(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, indices_, lr_, l1_, l2_, global_step_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ResourceSparseApplyAdagradDA") do desc = tf.NodeDescription("ResourceSparseApplyAdagradDA") @@ -54426,7 +52276,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function temporary_variable_graph(; name=nothing, shape=nothing, dtype=nothing, var_name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function temporary_variable_graph(; name=nothing, shape=nothing, dtype=nothing, var_name=nothing) local desc tf.with_op_name(name, "TemporaryVariable") do desc = tf.NodeDescription("TemporaryVariable") @@ -54476,7 +52326,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_apply_add_sign_graph(var_, m_, lr_, alpha_, sign_decay_, beta_, grad_; name=nothing, use_locking=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function resource_apply_add_sign_graph(var_, m_, lr_, alpha_, sign_decay_, beta_, grad_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ResourceApplyAddSign") do desc = tf.NodeDescription("ResourceApplyAddSign") @@ -54548,7 +52398,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function roll_graph(input_, shift_, axis_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function roll_graph(input_, shift_, axis_; name=nothing) local desc tf.with_op_name(name, "Roll") do desc = tf.NodeDescription("Roll") @@ -54598,7 +52448,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function xdivy_graph(x_, y_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function xdivy_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "Xdivy") do desc = tf.NodeDescription("Xdivy") @@ -54641,7 +52491,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function max_pool3d_grad_grad_graph(orig_input_, orig_output_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function max_pool3d_grad_grad_graph(orig_input_, orig_output_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) local desc tf.with_op_name(name, "MaxPool3DGradGrad") do desc = tf.NodeDescription("MaxPool3DGradGrad") @@ -54707,13 +52557,84 @@ begin end +""" + quantized_bias_add(input, bias, min_input, max_input, min_bias, max_bias) + + +""" +begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function quantized_bias_add_graph(input_, bias_, min_input_, max_input_, min_bias_, max_bias_; name=nothing, out_type=nothing) + local desc + tf.with_op_name(name, "QuantizedBiasAdd") do + desc = tf.NodeDescription("QuantizedBiasAdd") + input_ = convert(Tensor{Any}, input_) + bias_ = convert(Tensor{Any}, bias_) + min_input_ = convert(Tensor{Float32}, min_input_) + max_input_ = convert(Tensor{Float32}, max_input_) + min_bias_ = convert(Tensor{Float32}, min_bias_) + max_bias_ = convert(Tensor{Float32}, max_bias_) + (input_,) = tf.tf_promote(input_) + (bias_,) = tf.tf_promote(bias_) + tf.add_input(desc, input_) + tf.add_input(desc, bias_) + tf.add_input(desc, min_input_) + tf.add_input(desc, max_input_) + tf.add_input(desc, min_bias_) + tf.add_input(desc, max_bias_) + if out_type !== nothing + desc["out_type"] = Base.identity(out_type) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + function quantized_bias_add_eager(input_, bias_, min_input_, max_input_, min_bias_, max_bias_; name=nothing, out_type=nothing) + desc = tf.EagerOp("QuantizedBiasAdd") + input_ = convert(tf.EagerTensor, input_) + bias_ = convert(tf.EagerTensor, bias_) + min_input_ = convert(tf.EagerTensor, min_input_) + max_input_ = convert(tf.EagerTensor, max_input_) + min_bias_ = convert(tf.EagerTensor, min_bias_) + max_bias_ = convert(tf.EagerTensor, max_bias_) + tf.add_input(desc, input_) + tf.add_input(desc, bias_) + tf.add_input(desc, min_input_) + tf.add_input(desc, max_input_) + tf.add_input(desc, min_bias_) + tf.add_input(desc, max_bias_) + if out_type !== nothing + desc["out_type"] = Base.identity(out_type) + end + desc["T1"] = tf.data_type(input_) + desc["T2"] = tf.data_type(bias_) + res = tf.execute(desc) + node = tf.TapeNode(quantized_bias_add, [input_, bias_, min_input_, max_input_, min_bias_, max_bias_], name=nothing, out_type=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end + end + function quantized_bias_add(input_, bias_, min_input_, max_input_, min_bias_, max_bias_; name=nothing, out_type=nothing) + if tf.in_eager_mode() + quantized_bias_add_eager(input_, bias_, min_input_, max_input_, min_bias_, max_bias_; name=name, out_type=out_type) + else + quantized_bias_add_graph(input_, bias_, min_input_, max_input_, min_bias_, max_bias_; name=name, out_type=out_type) + end + end +end + + """ crop_and_resize(image, boxes, box_ind, crop_size; method=bilinear, extrapolation_value=?) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function crop_and_resize_graph(image_, boxes_, box_ind_, crop_size_; name=nothing, method=nothing, extrapolation_value=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function crop_and_resize_graph(image_, boxes_, box_ind_, crop_size_; name=nothing, method=nothing, extrapolation_value=nothing) local desc tf.with_op_name(name, "CropAndResize") do desc = tf.NodeDescription("CropAndResize") @@ -54769,124 +52690,13 @@ begin end -""" - quantized_bias_add(input, bias, min_input, max_input, min_bias, max_bias) - - -""" -begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantized_bias_add_graph(input_, bias_, min_input_, max_input_, min_bias_, max_bias_; name=nothing, out_type=nothing) - local desc - tf.with_op_name(name, "QuantizedBiasAdd") do - desc = tf.NodeDescription("QuantizedBiasAdd") - input_ = convert(Tensor{Any}, input_) - bias_ = convert(Tensor{Any}, bias_) - min_input_ = convert(Tensor{Float32}, min_input_) - max_input_ = convert(Tensor{Float32}, max_input_) - min_bias_ = convert(Tensor{Float32}, min_bias_) - max_bias_ = convert(Tensor{Float32}, max_bias_) - (input_,) = tf.tf_promote(input_) - (bias_,) = tf.tf_promote(bias_) - tf.add_input(desc, input_) - tf.add_input(desc, bias_) - tf.add_input(desc, min_input_) - tf.add_input(desc, max_input_) - tf.add_input(desc, min_bias_) - tf.add_input(desc, max_bias_) - if out_type !== nothing - desc["out_type"] = Base.identity(out_type) - end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:3 - push!(out, tf.Tensor(op, out_idx)) - end - out - end - function quantized_bias_add_eager(input_, bias_, min_input_, max_input_, min_bias_, max_bias_; name=nothing, out_type=nothing) - desc = tf.EagerOp("QuantizedBiasAdd") - input_ = convert(tf.EagerTensor, input_) - bias_ = convert(tf.EagerTensor, bias_) - min_input_ = convert(tf.EagerTensor, min_input_) - max_input_ = convert(tf.EagerTensor, max_input_) - min_bias_ = convert(tf.EagerTensor, min_bias_) - max_bias_ = convert(tf.EagerTensor, max_bias_) - tf.add_input(desc, input_) - tf.add_input(desc, bias_) - tf.add_input(desc, min_input_) - tf.add_input(desc, max_input_) - tf.add_input(desc, min_bias_) - tf.add_input(desc, max_bias_) - if out_type !== nothing - desc["out_type"] = Base.identity(out_type) - end - desc["T1"] = tf.data_type(input_) - desc["T2"] = tf.data_type(bias_) - res = tf.execute(desc) - node = tf.TapeNode(quantized_bias_add, [input_, bias_, min_input_, max_input_, min_bias_, max_bias_], name=nothing, out_type=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end - end - function quantized_bias_add(input_, bias_, min_input_, max_input_, min_bias_, max_bias_; name=nothing, out_type=nothing) - if tf.in_eager_mode() - quantized_bias_add_eager(input_, bias_, min_input_, max_input_, min_bias_, max_bias_; name=name, out_type=out_type) - else - quantized_bias_add_graph(input_, bias_, min_input_, max_input_, min_bias_, max_bias_; name=name, out_type=out_type) - end - end -end - - -""" - kmc2chain_initialization(distances, seed) - - -""" -begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function kmc2chain_initialization_graph(distances_, seed_; name=nothing) - local desc - tf.with_op_name(name, "KMC2ChainInitialization") do - desc = tf.NodeDescription("KMC2ChainInitialization") - distances_ = convert(Tensor{Float32}, distances_) - seed_ = convert(Tensor{Int64}, seed_) - tf.add_input(desc, distances_) - tf.add_input(desc, seed_) - end - tf.Tensor(tf.Operation(desc)) - end - function kmc2chain_initialization_eager(distances_, seed_; name=nothing) - desc = tf.EagerOp("KMC2ChainInitialization") - distances_ = convert(tf.EagerTensor, distances_) - seed_ = convert(tf.EagerTensor, seed_) - tf.add_input(desc, distances_) - tf.add_input(desc, seed_) - res = tf.execute(desc) - node = tf.TapeNode(kmc2chain_initialization, [distances_, seed_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - function kmc2chain_initialization(distances_, seed_; name=nothing) - if tf.in_eager_mode() - kmc2chain_initialization_eager(distances_, seed_; name=name) - else - kmc2chain_initialization_graph(distances_, seed_; name=name) - end - end -end - - """ map_unstage_no_key(indices; capacity=0, memory_limit=0, container=, shared_name=) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function map_unstage_no_key_graph(indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function map_unstage_no_key_graph(indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "MapUnstageNoKey") do desc = tf.NodeDescription("MapUnstageNoKey") @@ -54957,7 +52767,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function scatter_nd_sub_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function scatter_nd_sub_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ScatterNdSub") do desc = tf.NodeDescription("ScatterNdSub") @@ -55013,7 +52823,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resize_bilinear_graph(images_, size_; name=nothing, align_corners=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function resize_bilinear_graph(images_, size_; name=nothing, align_corners=nothing) local desc tf.with_op_name(name, "ResizeBilinear") do desc = tf.NodeDescription("ResizeBilinear") @@ -55061,7 +52871,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ordered_map_peek_graph(key_, indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function ordered_map_peek_graph(key_, indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "OrderedMapPeek") do desc = tf.NodeDescription("OrderedMapPeek") @@ -55131,7 +52941,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_graph(size_; name=nothing, dtype=nothing, dynamic_size=nothing, clear_after_read=nothing, tensor_array_name=nothing, element_shape=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tensor_array_graph(size_; name=nothing, dtype=nothing, dynamic_size=nothing, clear_after_read=nothing, tensor_array_name=nothing, element_shape=nothing) local desc tf.with_op_name(name, "TensorArray") do desc = tf.NodeDescription("TensorArray") @@ -55197,7 +53007,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function inplace_sub_graph(x_, i_, v_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function inplace_sub_graph(x_, i_, v_; name=nothing) local desc tf.with_op_name(name, "InplaceSub") do desc = tf.NodeDescription("InplaceSub") @@ -55244,7 +53054,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function pow_graph(x_, y_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function pow_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "Pow") do desc = tf.NodeDescription("Pow") @@ -55281,67 +53091,13 @@ begin end -""" - stateful_standard_normal(resource, shape; dtype=Float32, shape_dtype=Int64) - - -""" -begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function stateful_standard_normal_graph(resource_, shape_; name=nothing, dtype=nothing, shape_dtype=nothing) - local desc - tf.with_op_name(name, "StatefulStandardNormal") do - desc = tf.NodeDescription("StatefulStandardNormal") - resource_ = convert(Tensor{Any}, resource_) - shape_ = convert(Tensor{Int64}, shape_) - (shape_,) = tf.tf_promote(shape_) - tf.add_input(desc, resource_) - tf.add_input(desc, shape_) - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end - if shape_dtype !== nothing - desc["shape_dtype"] = Base.identity(shape_dtype) - end - end - tf.Tensor(tf.Operation(desc)) - end - function stateful_standard_normal_eager(resource_, shape_; name=nothing, dtype=nothing, shape_dtype=nothing) - desc = tf.EagerOp("StatefulStandardNormal") - resource_ = convert(tf.EagerTensor, resource_) - shape_ = convert(tf.EagerTensor, shape_) - tf.add_input(desc, resource_) - tf.add_input(desc, shape_) - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end - if shape_dtype !== nothing - desc["shape_dtype"] = Base.identity(shape_dtype) - end - desc["shape_dtype"] = tf.data_type(shape_) - res = tf.execute(desc) - node = tf.TapeNode(stateful_standard_normal, [resource_, shape_], name=nothing, dtype=nothing, shape_dtype=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - function stateful_standard_normal(resource_, shape_; name=nothing, dtype=nothing, shape_dtype=nothing) - if tf.in_eager_mode() - stateful_standard_normal_eager(resource_, shape_; name=name, dtype=dtype, shape_dtype=shape_dtype) - else - stateful_standard_normal_graph(resource_, shape_; name=name, dtype=dtype, shape_dtype=shape_dtype) - end - end -end - - """ ref_next_iteration(data) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function ref_next_iteration_graph(data_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function ref_next_iteration_graph(data_; name=nothing) local desc tf.with_op_name(name, "RefNextIteration") do desc = tf.NodeDescription("RefNextIteration") @@ -55379,7 +53135,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function scalar_summary_graph(tags_, values_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function scalar_summary_graph(tags_, values_; name=nothing) local desc tf.with_op_name(name, "ScalarSummary") do desc = tf.NodeDescription("ScalarSummary") @@ -55421,7 +53177,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function string_split_v2_graph(input_, sep_; name=nothing, maxsplit=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function string_split_v2_graph(input_, sep_; name=nothing, maxsplit=nothing) local desc tf.with_op_name(name, "StringSplitV2") do desc = tf.NodeDescription("StringSplitV2") @@ -55472,7 +53228,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function bessel_i0e_graph(x_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function bessel_i0e_graph(x_; name=nothing) local desc tf.with_op_name(name, "BesselI0e") do desc = tf.NodeDescription("BesselI0e") @@ -55510,7 +53266,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function unique_graph(x_; name=nothing, out_idx=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function unique_graph(x_; name=nothing, out_idx=nothing) local desc tf.with_op_name(name, "Unique") do desc = tf.NodeDescription("Unique") @@ -55553,179 +53309,13 @@ begin end -""" - load_tpu_embedding_rms_prop_parameters(parameters, ms, mom; table_id=-1, table_name=) - - -""" -begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function load_tpu_embedding_rms_prop_parameters_graph(parameters_, ms_, mom_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - local desc - tf.with_op_name(name, "LoadTPUEmbeddingRMSPropParameters") do - desc = tf.NodeDescription("LoadTPUEmbeddingRMSPropParameters") - parameters_ = convert(Tensor{Float32}, parameters_) - ms_ = convert(Tensor{Float32}, ms_) - mom_ = convert(Tensor{Float32}, mom_) - tf.add_input(desc, parameters_) - tf.add_input(desc, ms_) - tf.add_input(desc, mom_) - if table_id !== nothing - desc["table_id"] = Base.Int(table_id) - end - if table_name !== nothing - desc["table_name"] = Base.String(table_name) - end - if num_shards !== nothing - desc["num_shards"] = Base.Int(num_shards) - end - if shard_id !== nothing - desc["shard_id"] = Base.Int(shard_id) - end - end - tf.Tensor(tf.Operation(desc)) - end - function load_tpu_embedding_rms_prop_parameters_eager(parameters_, ms_, mom_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - desc = tf.EagerOp("LoadTPUEmbeddingRMSPropParameters") - parameters_ = convert(tf.EagerTensor, parameters_) - ms_ = convert(tf.EagerTensor, ms_) - mom_ = convert(tf.EagerTensor, mom_) - tf.add_input(desc, parameters_) - tf.add_input(desc, ms_) - tf.add_input(desc, mom_) - if table_id !== nothing - desc["table_id"] = Base.Int(table_id) - end - if table_name !== nothing - desc["table_name"] = Base.String(table_name) - end - if num_shards !== nothing - desc["num_shards"] = Base.Int(num_shards) - end - if shard_id !== nothing - desc["shard_id"] = Base.Int(shard_id) - end - res = tf.execute(desc) - node = tf.TapeNode(load_tpu_embedding_rms_prop_parameters, [parameters_, ms_, mom_], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - function load_tpu_embedding_rms_prop_parameters(parameters_, ms_, mom_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - if tf.in_eager_mode() - load_tpu_embedding_rms_prop_parameters_eager(parameters_, ms_, mom_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) - else - load_tpu_embedding_rms_prop_parameters_graph(parameters_, ms_, mom_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) - end - end -end - - -""" - whole_file_reader_v2(; container=, shared_name=) - - -""" -begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function whole_file_reader_v2_graph(; name=nothing, container=nothing, shared_name=nothing) - local desc - tf.with_op_name(name, "WholeFileReaderV2") do - desc = tf.NodeDescription("WholeFileReaderV2") - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - end - tf.Tensor(tf.Operation(desc)) - end - function whole_file_reader_v2_eager(; name=nothing, container=nothing, shared_name=nothing) - desc = tf.EagerOp("WholeFileReaderV2") - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - res = tf.execute(desc) - node = tf.TapeNode(whole_file_reader_v2, [], name=nothing, container=nothing, shared_name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - function whole_file_reader_v2(; name=nothing, container=nothing, shared_name=nothing) - if tf.in_eager_mode() - whole_file_reader_v2_eager(; name=name, container=container, shared_name=shared_name) - else - whole_file_reader_v2_graph(; name=name, container=container, shared_name=shared_name) - end - end -end - - -""" - eager_py_func(input) - - -""" -begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function eager_py_func_graph(input_; name=nothing, token=nothing, Tin=nothing, Tout=nothing) - local desc - tf.with_op_name(name, "EagerPyFunc") do - desc = tf.NodeDescription("EagerPyFunc") - input_ = [convert(Tensor{Any}, x) for x = input_] - tf.add_input(desc, input_) - if token !== nothing - desc["token"] = Base.String(token) - end - if Tin !== nothing - desc["Tin"] = map(Base.identity, Tin) - end - if Tout !== nothing - desc["Tout"] = map(Base.identity, Tout) - end - end - tf.Tensor(tf.Operation(desc)) - end - function eager_py_func_eager(input_; name=nothing, token=nothing, Tin=nothing, Tout=nothing) - desc = tf.EagerOp("EagerPyFunc") - input_ = convert(tf.EagerTensor, input_) - tf.add_input(desc, input_) - if token !== nothing - desc["token"] = Base.String(token) - end - if Tin !== nothing - desc["Tin"] = map(Base.identity, Tin) - end - if Tout !== nothing - desc["Tout"] = map(Base.identity, Tout) - end - res = tf.execute(desc) - node = tf.TapeNode(eager_py_func, [input_], name=nothing, token=nothing, Tin=nothing, Tout=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - function eager_py_func(input_; name=nothing, token=nothing, Tin=nothing, Tout=nothing) - if tf.in_eager_mode() - eager_py_func_eager(input_; name=name, token=token, Tin=Tin, Tout=Tout) - else - eager_py_func_graph(input_; name=name, token=token, Tin=Tin, Tout=Tout) - end - end -end - - """ next_iteration(data) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function next_iteration_graph(data_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function next_iteration_graph(data_; name=nothing) local desc tf.with_op_name(name, "NextIteration") do desc = tf.NodeDescription("NextIteration") @@ -55758,64 +53348,166 @@ end """ - case(branch_index, input; output_shapes=Int64[]) + load_tpu_embedding_rms_prop_parameters(parameters, ms, mom; table_id=-1, table_name=) + +Load embedding parameters for a single table. +""" +begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function load_tpu_embedding_rms_prop_parameters_graph(parameters_, ms_, mom_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + local desc + tf.with_op_name(name, "LoadTPUEmbeddingRMSPropParameters") do + desc = tf.NodeDescription("LoadTPUEmbeddingRMSPropParameters") + parameters_ = convert(Tensor{Float32}, parameters_) + ms_ = convert(Tensor{Float32}, ms_) + mom_ = convert(Tensor{Float32}, mom_) + tf.add_input(desc, parameters_) + tf.add_input(desc, ms_) + tf.add_input(desc, mom_) + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + end + tf.Tensor(tf.Operation(desc)) + end + function load_tpu_embedding_rms_prop_parameters_eager(parameters_, ms_, mom_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + desc = tf.EagerOp("LoadTPUEmbeddingRMSPropParameters") + parameters_ = convert(tf.EagerTensor, parameters_) + ms_ = convert(tf.EagerTensor, ms_) + mom_ = convert(tf.EagerTensor, mom_) + tf.add_input(desc, parameters_) + tf.add_input(desc, ms_) + tf.add_input(desc, mom_) + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + res = tf.execute(desc) + node = tf.TapeNode(load_tpu_embedding_rms_prop_parameters, [parameters_, ms_, mom_], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + function load_tpu_embedding_rms_prop_parameters(parameters_, ms_, mom_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + if tf.in_eager_mode() + load_tpu_embedding_rms_prop_parameters_eager(parameters_, ms_, mom_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + else + load_tpu_embedding_rms_prop_parameters_graph(parameters_, ms_, mom_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + end + end +end + + +""" + eager_py_func(input) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function case_graph(branch_index_, input_; name=nothing, Tin=nothing, Tout=nothing, branches=nothing, output_shapes=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function eager_py_func_graph(input_; name=nothing, token=nothing, Tin=nothing, Tout=nothing) local desc - tf.with_op_name(name, "Case") do - desc = tf.NodeDescription("Case") - branch_index_ = convert(Tensor{Int32}, branch_index_) + tf.with_op_name(name, "EagerPyFunc") do + desc = tf.NodeDescription("EagerPyFunc") input_ = [convert(Tensor{Any}, x) for x = input_] - tf.add_input(desc, branch_index_) tf.add_input(desc, input_) + if token !== nothing + desc["token"] = Base.String(token) + end if Tin !== nothing desc["Tin"] = map(Base.identity, Tin) end if Tout !== nothing desc["Tout"] = map(Base.identity, Tout) end - if branches !== nothing - desc["branches"] = map(Base.identity, branches) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end end tf.Tensor(tf.Operation(desc)) end - function case_eager(branch_index_, input_; name=nothing, Tin=nothing, Tout=nothing, branches=nothing, output_shapes=nothing) - desc = tf.EagerOp("Case") - branch_index_ = convert(tf.EagerTensor, branch_index_) + function eager_py_func_eager(input_; name=nothing, token=nothing, Tin=nothing, Tout=nothing) + desc = tf.EagerOp("EagerPyFunc") input_ = convert(tf.EagerTensor, input_) - tf.add_input(desc, branch_index_) tf.add_input(desc, input_) + if token !== nothing + desc["token"] = Base.String(token) + end if Tin !== nothing desc["Tin"] = map(Base.identity, Tin) end if Tout !== nothing desc["Tout"] = map(Base.identity, Tout) end - if branches !== nothing - desc["branches"] = map(Base.identity, branches) + res = tf.execute(desc) + node = tf.TapeNode(eager_py_func, [input_], name=nothing, token=nothing, Tin=nothing, Tout=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) + end + function eager_py_func(input_; name=nothing, token=nothing, Tin=nothing, Tout=nothing) + if tf.in_eager_mode() + eager_py_func_eager(input_; name=name, token=token, Tin=Tin, Tout=Tout) + else + eager_py_func_graph(input_; name=name, token=token, Tin=Tin, Tout=Tout) + end + end +end + + +""" + whole_file_reader_v2(; container=, shared_name=) + + +""" +begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function whole_file_reader_v2_graph(; name=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "WholeFileReaderV2") do + desc = tf.NodeDescription("WholeFileReaderV2") + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + tf.Tensor(tf.Operation(desc)) + end + function whole_file_reader_v2_eager(; name=nothing, container=nothing, shared_name=nothing) + desc = tf.EagerOp("WholeFileReaderV2") + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) end res = tf.execute(desc) - node = tf.TapeNode(case, [branch_index_, input_], name=nothing, Tin=nothing, Tout=nothing, branches=nothing, output_shapes=nothing, res) + node = tf.TapeNode(whole_file_reader_v2, [], name=nothing, container=nothing, shared_name=nothing, res) if length(res) >= 1 tf.add_node(res[1], node) return res[1] end end - function case(branch_index_, input_; name=nothing, Tin=nothing, Tout=nothing, branches=nothing, output_shapes=nothing) + function whole_file_reader_v2(; name=nothing, container=nothing, shared_name=nothing) if tf.in_eager_mode() - case_eager(branch_index_, input_; name=name, Tin=Tin, Tout=Tout, branches=branches, output_shapes=output_shapes) + whole_file_reader_v2_eager(; name=name, container=container, shared_name=shared_name) else - case_graph(branch_index_, input_; name=name, Tin=Tin, Tout=Tout, branches=branches, output_shapes=output_shapes) + whole_file_reader_v2_graph(; name=name, container=container, shared_name=shared_name) end end end @@ -55827,7 +53519,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_scatter_sub_graph(tensor_, indices_, updates_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tensor_scatter_sub_graph(tensor_, indices_, updates_; name=nothing) local desc tf.with_op_name(name, "TensorScatterSub") do desc = tf.NodeDescription("TensorScatterSub") @@ -55877,7 +53569,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function scatter_max_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function scatter_max_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ScatterMax") do desc = tf.NodeDescription("ScatterMax") @@ -55933,7 +53625,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sqrt_graph(x_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function sqrt_graph(x_; name=nothing) local desc tf.with_op_name(name, "Sqrt") do desc = tf.NodeDescription("Sqrt") @@ -55971,7 +53663,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function accumulator_take_gradient_graph(handle_, num_required_; name=nothing, dtype=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function accumulator_take_gradient_graph(handle_, num_required_; name=nothing, dtype=nothing) local desc tf.with_op_name(name, "AccumulatorTakeGradient") do desc = tf.NodeDescription("AccumulatorTakeGradient") @@ -56017,7 +53709,7 @@ end Returns x + y element-wise. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _mkl_add_graph(x_, y_, mkl_x_, mkl_y_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function _mkl_add_graph(x_, y_, mkl_x_, mkl_y_; name=nothing) local desc tf.with_op_name(name, "_MklAdd") do desc = tf.NodeDescription("_MklAdd") @@ -56067,13 +53759,55 @@ begin end +""" + outfeed_enqueue_tuple(inputs) + +An op which emits multiple Tensor values from an XLA computation. +""" +begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function outfeed_enqueue_tuple_graph(inputs_; name=nothing, dtypes=nothing) + local desc + tf.with_op_name(name, "OutfeedEnqueueTuple") do + desc = tf.NodeDescription("OutfeedEnqueueTuple") + inputs_ = [convert(Tensor{Any}, x) for x = inputs_] + tf.add_input(desc, inputs_) + if dtypes !== nothing + desc["dtypes"] = map(Base.identity, dtypes) + end + end + tf.Tensor(tf.Operation(desc)) + end + function outfeed_enqueue_tuple_eager(inputs_; name=nothing, dtypes=nothing) + desc = tf.EagerOp("OutfeedEnqueueTuple") + inputs_ = convert(tf.EagerTensor, inputs_) + tf.add_input(desc, inputs_) + if dtypes !== nothing + desc["dtypes"] = map(Base.identity, dtypes) + end + res = tf.execute(desc) + node = tf.TapeNode(outfeed_enqueue_tuple, [inputs_], name=nothing, dtypes=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + function outfeed_enqueue_tuple(inputs_; name=nothing, dtypes=nothing) + if tf.in_eager_mode() + outfeed_enqueue_tuple_eager(inputs_; name=name, dtypes=dtypes) + else + outfeed_enqueue_tuple_graph(inputs_; name=name, dtypes=dtypes) + end + end +end + + """ reciprocal(x) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function reciprocal_graph(x_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function reciprocal_graph(x_; name=nothing) local desc tf.with_op_name(name, "Reciprocal") do desc = tf.NodeDescription("Reciprocal") @@ -56106,78 +53840,72 @@ end """ - outfeed_enqueue_tuple(inputs) + string_strip(input) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function outfeed_enqueue_tuple_graph(inputs_; name=nothing, dtypes=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function string_strip_graph(input_; name=nothing) local desc - tf.with_op_name(name, "OutfeedEnqueueTuple") do - desc = tf.NodeDescription("OutfeedEnqueueTuple") - inputs_ = [convert(Tensor{Any}, x) for x = inputs_] - tf.add_input(desc, inputs_) - if dtypes !== nothing - desc["dtypes"] = map(Base.identity, dtypes) - end + tf.with_op_name(name, "StringStrip") do + desc = tf.NodeDescription("StringStrip") + input_ = convert(Tensor{String}, input_) + tf.add_input(desc, input_) end tf.Tensor(tf.Operation(desc)) end - function outfeed_enqueue_tuple_eager(inputs_; name=nothing, dtypes=nothing) - desc = tf.EagerOp("OutfeedEnqueueTuple") - inputs_ = convert(tf.EagerTensor, inputs_) - tf.add_input(desc, inputs_) - if dtypes !== nothing - desc["dtypes"] = map(Base.identity, dtypes) - end + function string_strip_eager(input_; name=nothing) + desc = tf.EagerOp("StringStrip") + input_ = convert(tf.EagerTensor, input_) + tf.add_input(desc, input_) res = tf.execute(desc) - node = tf.TapeNode(outfeed_enqueue_tuple, [inputs_], name=nothing, dtypes=nothing, res) + node = tf.TapeNode(string_strip, [input_], name=nothing, res) if length(res) >= 1 tf.add_node(res[1], node) return res[1] end end - function outfeed_enqueue_tuple(inputs_; name=nothing, dtypes=nothing) + function string_strip(input_; name=nothing) if tf.in_eager_mode() - outfeed_enqueue_tuple_eager(inputs_; name=name, dtypes=dtypes) + string_strip_eager(input_; name=name) else - outfeed_enqueue_tuple_graph(inputs_; name=name, dtypes=dtypes) + string_strip_graph(input_; name=name) end end end """ - string_strip(input) + barrier_ready_size(handle) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function string_strip_graph(input_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function barrier_ready_size_graph(handle_; name=nothing) local desc - tf.with_op_name(name, "StringStrip") do - desc = tf.NodeDescription("StringStrip") - input_ = convert(Tensor{String}, input_) - tf.add_input(desc, input_) + tf.with_op_name(name, "BarrierReadySize") do + desc = tf.NodeDescription("BarrierReadySize") + handle_ = convert(Tensor{String}, handle_) + tf.add_input(desc, handle_) end tf.Tensor(tf.Operation(desc)) end - function string_strip_eager(input_; name=nothing) - desc = tf.EagerOp("StringStrip") - input_ = convert(tf.EagerTensor, input_) - tf.add_input(desc, input_) + function barrier_ready_size_eager(handle_; name=nothing) + desc = tf.EagerOp("BarrierReadySize") + handle_ = convert(tf.EagerTensor, handle_) + tf.add_input(desc, handle_) res = tf.execute(desc) - node = tf.TapeNode(string_strip, [input_], name=nothing, res) + node = tf.TapeNode(barrier_ready_size, [handle_], name=nothing, res) if length(res) >= 1 tf.add_node(res[1], node) return res[1] end end - function string_strip(input_; name=nothing) + function barrier_ready_size(handle_; name=nothing) if tf.in_eager_mode() - string_strip_eager(input_; name=name) + barrier_ready_size_eager(handle_; name=name) else - string_strip_graph(input_; name=name) + barrier_ready_size_graph(handle_; name=name) end end end @@ -56189,7 +53917,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fake_quant_with_min_max_vars_per_channel_graph(inputs_, min_, max_; name=nothing, num_bits=nothing, narrow_range=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function fake_quant_with_min_max_vars_per_channel_graph(inputs_, min_, max_; name=nothing, num_bits=nothing, narrow_range=nothing) local desc tf.with_op_name(name, "FakeQuantWithMinMaxVarsPerChannel") do desc = tf.NodeDescription("FakeQuantWithMinMaxVarsPerChannel") @@ -56239,49 +53967,13 @@ begin end -""" - barrier_ready_size(handle) - - -""" -begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function barrier_ready_size_graph(handle_; name=nothing) - local desc - tf.with_op_name(name, "BarrierReadySize") do - desc = tf.NodeDescription("BarrierReadySize") - handle_ = convert(Tensor{String}, handle_) - tf.add_input(desc, handle_) - end - tf.Tensor(tf.Operation(desc)) - end - function barrier_ready_size_eager(handle_; name=nothing) - desc = tf.EagerOp("BarrierReadySize") - handle_ = convert(tf.EagerTensor, handle_) - tf.add_input(desc, handle_) - res = tf.execute(desc) - node = tf.TapeNode(barrier_ready_size, [handle_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - function barrier_ready_size(handle_; name=nothing) - if tf.in_eager_mode() - barrier_ready_size_eager(handle_; name=name) - else - barrier_ready_size_graph(handle_; name=name) - end - end -end - - """ string_to_hash_bucket(string_tensor) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function string_to_hash_bucket_graph(string_tensor_; name=nothing, num_buckets=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function string_to_hash_bucket_graph(string_tensor_; name=nothing, num_buckets=nothing) local desc tf.with_op_name(name, "StringToHashBucket") do desc = tf.NodeDescription("StringToHashBucket") @@ -56323,7 +54015,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_concat_graph(handle_, flow_in_; name=nothing, dtype=nothing, element_shape_except0=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tensor_array_concat_graph(handle_, flow_in_; name=nothing, dtype=nothing, element_shape_except0=nothing) local desc tf.with_op_name(name, "TensorArrayConcat") do desc = tf.NodeDescription("TensorArrayConcat") @@ -56380,7 +54072,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sharded_filename_graph(basename_, shard_, num_shards_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function sharded_filename_graph(basename_, shard_, num_shards_; name=nothing) local desc tf.with_op_name(name, "ShardedFilename") do desc = tf.NodeDescription("ShardedFilename") @@ -56424,7 +54116,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function py_func_graph(input_; name=nothing, token=nothing, Tin=nothing, Tout=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function py_func_graph(input_; name=nothing, token=nothing, Tin=nothing, Tout=nothing) local desc tf.with_op_name(name, "PyFunc") do desc = tf.NodeDescription("PyFunc") @@ -56478,7 +54170,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function unsorted_segment_prod_graph(data_, segment_ids_, num_segments_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function unsorted_segment_prod_graph(data_, segment_ids_, num_segments_; name=nothing) local desc tf.with_op_name(name, "UnsortedSegmentProd") do desc = tf.NodeDescription("UnsortedSegmentProd") @@ -56529,7 +54221,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function count_up_to_graph(ref_; name=nothing, limit=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function count_up_to_graph(ref_; name=nothing, limit=nothing) local desc tf.with_op_name(name, "CountUpTo") do desc = tf.NodeDescription("CountUpTo") @@ -56573,7 +54265,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function random_gamma_graph(shape_, alpha_; name=nothing, seed=nothing, seed2=nothing, S=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function random_gamma_graph(shape_, alpha_; name=nothing, seed=nothing, seed2=nothing, S=nothing) local desc tf.with_op_name(name, "RandomGamma") do desc = tf.NodeDescription("RandomGamma") @@ -56635,7 +54327,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_grad_graph(handle_, flow_in_; name=nothing, source=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tensor_array_grad_graph(handle_, flow_in_; name=nothing, source=nothing) local desc tf.with_op_name(name, "TensorArrayGrad") do desc = tf.NodeDescription("TensorArrayGrad") @@ -56681,7 +54373,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function dilation2d_graph(input_, filter_; name=nothing, strides=nothing, rates=nothing, padding=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function dilation2d_graph(input_, filter_; name=nothing, strides=nothing, rates=nothing, padding=nothing) local desc tf.with_op_name(name, "Dilation2D") do desc = tf.NodeDescription("Dilation2D") @@ -56742,7 +54434,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function unbatch_graph(batched_tensor_, batch_index_, id_; name=nothing, timeout_micros=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function unbatch_graph(batched_tensor_, batch_index_, id_; name=nothing, timeout_micros=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "Unbatch") do desc = tf.NodeDescription("Unbatch") @@ -56806,7 +54498,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function get_session_handle_graph(value_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function get_session_handle_graph(value_; name=nothing) local desc tf.with_op_name(name, "GetSessionHandle") do desc = tf.NodeDescription("GetSessionHandle") @@ -56841,10 +54533,10 @@ end """ retrieve_tpu_embedding_adam_parameters(; table_id=-1, table_name=) - +Retrieve embedding parameters for a single table. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function retrieve_tpu_embedding_adam_parameters_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function retrieve_tpu_embedding_adam_parameters_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "RetrieveTPUEmbeddingADAMParameters") do desc = tf.NodeDescription("RetrieveTPUEmbeddingADAMParameters") @@ -56905,7 +54597,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function mutable_hash_table_of_tensors_v2_graph(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function mutable_hash_table_of_tensors_v2_graph(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing) local desc tf.with_op_name(name, "MutableHashTableOfTensorsV2") do desc = tf.NodeDescription("MutableHashTableOfTensorsV2") @@ -56973,7 +54665,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_apply_ftrl_graph(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, lr_power_; name=nothing, use_locking=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function sparse_apply_ftrl_graph(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, lr_power_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "SparseApplyFtrl") do desc = tf.NodeDescription("SparseApplyFtrl") @@ -57059,7 +54751,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_dataset_v2_graph(input_dataset_, batch_size_, drop_remainder_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function batch_dataset_v2_graph(input_dataset_, batch_size_, drop_remainder_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "BatchDatasetV2") do desc = tf.NodeDescription("BatchDatasetV2") @@ -57115,7 +54807,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_sparse_minimum_graph(a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function sparse_sparse_minimum_graph(a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_; name=nothing) local desc tf.with_op_name(name, "SparseSparseMinimum") do desc = tf.NodeDescription("SparseSparseMinimum") @@ -57179,7 +54871,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function reverse_v2_graph(tensor_, axis_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function reverse_v2_graph(tensor_, axis_; name=nothing) local desc tf.with_op_name(name, "ReverseV2") do desc = tf.NodeDescription("ReverseV2") @@ -57224,7 +54916,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function strided_slice_graph(input_, begin_, end_, strides_; name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function strided_slice_graph(input_, begin_, end_, strides_; name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing) local desc tf.with_op_name(name, "StridedSlice") do desc = tf.NodeDescription("StridedSlice") @@ -57347,7 +55039,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function matching_files_graph(pattern_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function matching_files_graph(pattern_; name=nothing) local desc tf.with_op_name(name, "MatchingFiles") do desc = tf.NodeDescription("MatchingFiles") @@ -57383,7 +55075,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function encode_base64_graph(input_; name=nothing, pad=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function encode_base64_graph(input_; name=nothing, pad=nothing) local desc tf.with_op_name(name, "EncodeBase64") do desc = tf.NodeDescription("EncodeBase64") @@ -57425,7 +55117,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function iterator_get_next_as_optional_graph(iterator_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function iterator_get_next_as_optional_graph(iterator_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "IteratorGetNextAsOptional") do desc = tf.NodeDescription("IteratorGetNextAsOptional") @@ -57473,7 +55165,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function padding_fifo_queue_graph(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function padding_fifo_queue_graph(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "PaddingFIFOQueue") do desc = tf.NodeDescription("PaddingFIFOQueue") @@ -57535,7 +55227,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function iterator_to_string_handle_graph(resource_handle_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function iterator_to_string_handle_graph(resource_handle_; name=nothing) local desc tf.with_op_name(name, "IteratorToStringHandle") do desc = tf.NodeDescription("IteratorToStringHandle") @@ -57571,7 +55263,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function max_pool_grad_grad_with_argmax_graph(input_, grad_, argmax_; name=nothing, ksize=nothing, strides=nothing, padding=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function max_pool_grad_grad_with_argmax_graph(input_, grad_, argmax_; name=nothing, ksize=nothing, strides=nothing, padding=nothing) local desc tf.with_op_name(name, "MaxPoolGradGradWithArgmax") do desc = tf.NodeDescription("MaxPoolGradGradWithArgmax") @@ -57633,50 +55325,46 @@ end """ - tensor_list_gather(input_handle, indices, element_shape) + tensor_list_gather(input_handle, indices) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_list_gather_graph(input_handle_, indices_, element_shape_; name=nothing, element_dtype=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tensor_list_gather_graph(input_handle_, indices_; name=nothing, element_dtype=nothing) local desc tf.with_op_name(name, "TensorListGather") do desc = tf.NodeDescription("TensorListGather") input_handle_ = convert(Tensor{Any}, input_handle_) indices_ = convert(Tensor{Int32}, indices_) - element_shape_ = convert(Tensor{Int32}, element_shape_) tf.add_input(desc, input_handle_) tf.add_input(desc, indices_) - tf.add_input(desc, element_shape_) if element_dtype !== nothing desc["element_dtype"] = Base.identity(element_dtype) end end tf.Tensor(tf.Operation(desc)) end - function tensor_list_gather_eager(input_handle_, indices_, element_shape_; name=nothing, element_dtype=nothing) + function tensor_list_gather_eager(input_handle_, indices_; name=nothing, element_dtype=nothing) desc = tf.EagerOp("TensorListGather") input_handle_ = convert(tf.EagerTensor, input_handle_) indices_ = convert(tf.EagerTensor, indices_) - element_shape_ = convert(tf.EagerTensor, element_shape_) tf.add_input(desc, input_handle_) tf.add_input(desc, indices_) - tf.add_input(desc, element_shape_) if element_dtype !== nothing desc["element_dtype"] = Base.identity(element_dtype) end res = tf.execute(desc) - node = tf.TapeNode(tensor_list_gather, [input_handle_, indices_, element_shape_], name=nothing, element_dtype=nothing, res) + node = tf.TapeNode(tensor_list_gather, [input_handle_, indices_], name=nothing, element_dtype=nothing, res) if length(res) >= 1 tf.add_node(res[1], node) return res[1] end end - function tensor_list_gather(input_handle_, indices_, element_shape_; name=nothing, element_dtype=nothing) + function tensor_list_gather(input_handle_, indices_; name=nothing, element_dtype=nothing) if tf.in_eager_mode() - tensor_list_gather_eager(input_handle_, indices_, element_shape_; name=name, element_dtype=element_dtype) + tensor_list_gather_eager(input_handle_, indices_; name=name, element_dtype=element_dtype) else - tensor_list_gather_graph(input_handle_, indices_, element_shape_; name=name, element_dtype=element_dtype) + tensor_list_gather_graph(input_handle_, indices_; name=name, element_dtype=element_dtype) end end end @@ -57688,7 +55376,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function multinomial_graph(logits_, num_samples_; name=nothing, seed=nothing, seed2=nothing, output_dtype=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function multinomial_graph(logits_, num_samples_; name=nothing, seed=nothing, seed2=nothing, output_dtype=nothing) local desc tf.with_op_name(name, "Multinomial") do desc = tf.NodeDescription("Multinomial") @@ -57748,7 +55436,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_read_graph(handle_, index_, flow_in_; name=nothing, dtype=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tensor_array_read_graph(handle_, index_, flow_in_; name=nothing, dtype=nothing) local desc tf.with_op_name(name, "TensorArrayRead") do desc = tf.NodeDescription("TensorArrayRead") @@ -57798,7 +55486,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_indexed_dataset_get_graph(materialized_, index_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function experimental_indexed_dataset_get_graph(materialized_, index_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "ExperimentalIndexedDatasetGet") do desc = tf.NodeDescription("ExperimentalIndexedDatasetGet") @@ -57844,168 +55532,13 @@ begin end -""" - tpu_partitioned_call(args, device_ordinal) - - -""" -begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tpu_partitioned_call_graph(args_, device_ordinal_; name=nothing, Tin=nothing, Tout=nothing, f=nothing) - local desc - tf.with_op_name(name, "TPUPartitionedCall") do - desc = tf.NodeDescription("TPUPartitionedCall") - args_ = [convert(Tensor{Any}, x) for x = args_] - device_ordinal_ = convert(Tensor{Int32}, device_ordinal_) - tf.add_input(desc, args_) - tf.add_input(desc, device_ordinal_) - if Tin !== nothing - desc["Tin"] = map(Base.identity, Tin) - end - if Tout !== nothing - desc["Tout"] = map(Base.identity, Tout) - end - if f !== nothing - desc["f"] = Base.identity(f) - end - end - tf.Tensor(tf.Operation(desc)) - end - function tpu_partitioned_call_eager(args_, device_ordinal_; name=nothing, Tin=nothing, Tout=nothing, f=nothing) - desc = tf.EagerOp("TPUPartitionedCall") - args_ = convert(tf.EagerTensor, args_) - device_ordinal_ = convert(tf.EagerTensor, device_ordinal_) - tf.add_input(desc, args_) - tf.add_input(desc, device_ordinal_) - if Tin !== nothing - desc["Tin"] = map(Base.identity, Tin) - end - if Tout !== nothing - desc["Tout"] = map(Base.identity, Tout) - end - if f !== nothing - desc["f"] = Base.identity(f) - end - res = tf.execute(desc) - node = tf.TapeNode(tpu_partitioned_call, [args_, device_ordinal_], name=nothing, Tin=nothing, Tout=nothing, f=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - function tpu_partitioned_call(args_, device_ordinal_; name=nothing, Tin=nothing, Tout=nothing, f=nothing) - if tf.in_eager_mode() - tpu_partitioned_call_eager(args_, device_ordinal_; name=name, Tin=Tin, Tout=Tout, f=f) - else - tpu_partitioned_call_graph(args_, device_ordinal_; name=name, Tin=Tin, Tout=Tout, f=f) - end - end -end - - -""" - quantized_conv2d_and_relu_and_requantize(input, filter, min_input, max_input, min_filter, max_filter, min_freezed_output, max_freezed_output; out_type=Float32, dilations=[1, 1, 1, 1]) - - -""" -begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantized_conv2d_and_relu_and_requantize_graph(input_, filter_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) - local desc - tf.with_op_name(name, "QuantizedConv2DAndReluAndRequantize") do - desc = tf.NodeDescription("QuantizedConv2DAndReluAndRequantize") - input_ = convert(Tensor{Any}, input_) - filter_ = convert(Tensor{Any}, filter_) - min_input_ = convert(Tensor{Float32}, min_input_) - max_input_ = convert(Tensor{Float32}, max_input_) - min_filter_ = convert(Tensor{Float32}, min_filter_) - max_filter_ = convert(Tensor{Float32}, max_filter_) - min_freezed_output_ = convert(Tensor{Float32}, min_freezed_output_) - max_freezed_output_ = convert(Tensor{Float32}, max_freezed_output_) - (filter_,) = tf.tf_promote(filter_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - tf.add_input(desc, filter_) - tf.add_input(desc, min_input_) - tf.add_input(desc, max_input_) - tf.add_input(desc, min_filter_) - tf.add_input(desc, max_filter_) - tf.add_input(desc, min_freezed_output_) - tf.add_input(desc, max_freezed_output_) - if out_type !== nothing - desc["out_type"] = Base.identity(out_type) - end - if strides !== nothing - desc["strides"] = map(Base.identity, strides) - end - if padding !== nothing - desc["padding"] = Base.String(padding) - end - if dilations !== nothing - desc["dilations"] = map(Base.identity, dilations) - end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:3 - push!(out, tf.Tensor(op, out_idx)) - end - out - end - function quantized_conv2d_and_relu_and_requantize_eager(input_, filter_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) - desc = tf.EagerOp("QuantizedConv2DAndReluAndRequantize") - input_ = convert(tf.EagerTensor, input_) - filter_ = convert(tf.EagerTensor, filter_) - min_input_ = convert(tf.EagerTensor, min_input_) - max_input_ = convert(tf.EagerTensor, max_input_) - min_filter_ = convert(tf.EagerTensor, min_filter_) - max_filter_ = convert(tf.EagerTensor, max_filter_) - min_freezed_output_ = convert(tf.EagerTensor, min_freezed_output_) - max_freezed_output_ = convert(tf.EagerTensor, max_freezed_output_) - tf.add_input(desc, input_) - tf.add_input(desc, filter_) - tf.add_input(desc, min_input_) - tf.add_input(desc, max_input_) - tf.add_input(desc, min_filter_) - tf.add_input(desc, max_filter_) - tf.add_input(desc, min_freezed_output_) - tf.add_input(desc, max_freezed_output_) - if out_type !== nothing - desc["out_type"] = Base.identity(out_type) - end - if strides !== nothing - desc["strides"] = map(Base.identity, strides) - end - if padding !== nothing - desc["padding"] = Base.String(padding) - end - if dilations !== nothing - desc["dilations"] = map(Base.identity, dilations) - end - desc["Tinput"] = tf.data_type(input_) - desc["Tfilter"] = tf.data_type(filter_) - res = tf.execute(desc) - node = tf.TapeNode(quantized_conv2d_and_relu_and_requantize, [input_, filter_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_], name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end - end - function quantized_conv2d_and_relu_and_requantize(input_, filter_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) - if tf.in_eager_mode() - quantized_conv2d_and_relu_and_requantize_eager(input_, filter_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_; name=name, out_type=out_type, strides=strides, padding=padding, dilations=dilations) - else - quantized_conv2d_and_relu_and_requantize_graph(input_, filter_, min_input_, max_input_, min_filter_, max_filter_, min_freezed_output_, max_freezed_output_; name=name, out_type=out_type, strides=strides, padding=padding, dilations=dilations) - end - end -end - - """ iterator_from_string_handle_v2(string_handle; output_types=Int64[], output_shapes=Int64[]) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function iterator_from_string_handle_v2_graph(string_handle_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function iterator_from_string_handle_v2_graph(string_handle_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "IteratorFromStringHandleV2") do desc = tf.NodeDescription("IteratorFromStringHandleV2") @@ -58053,7 +55586,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function bitwise_or_graph(x_, y_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function bitwise_or_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "BitwiseOr") do desc = tf.NodeDescription("BitwiseOr") @@ -58096,7 +55629,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function unsorted_segment_max_graph(data_, segment_ids_, num_segments_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function unsorted_segment_max_graph(data_, segment_ids_, num_segments_; name=nothing) local desc tf.with_op_name(name, "UnsortedSegmentMax") do desc = tf.NodeDescription("UnsortedSegmentMax") @@ -58147,7 +55680,7 @@ end Returns (x - y)(x - y) element-wise. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _mkl_squared_difference_graph(x_, y_, mkl_x_, mkl_y_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function _mkl_squared_difference_graph(x_, y_, mkl_x_, mkl_y_; name=nothing) local desc tf.with_op_name(name, "_MklSquaredDifference") do desc = tf.NodeDescription("_MklSquaredDifference") @@ -58203,7 +55736,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function conv3d_backprop_filter_graph(input_, filter_, out_backprop_; name=nothing, strides=nothing, padding=nothing, dilations=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function conv3d_backprop_filter_graph(input_, filter_, out_backprop_; name=nothing, strides=nothing, padding=nothing, dilations=nothing) local desc tf.with_op_name(name, "Conv3DBackpropFilter") do desc = tf.NodeDescription("Conv3DBackpropFilter") @@ -58269,7 +55802,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function if__graph(cond_, input_; name=nothing, Tin=nothing, Tout=nothing, then_branch=nothing, else_branch=nothing, output_shapes=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function if__graph(cond_, input_; name=nothing, Tin=nothing, Tout=nothing, then_branch=nothing, else_branch=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "If") do desc = tf.NodeDescription("If") @@ -58341,7 +55874,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function flat_map_dataset_graph(input_dataset_, other_arguments_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function flat_map_dataset_graph(input_dataset_, other_arguments_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "FlatMapDataset") do desc = tf.NodeDescription("FlatMapDataset") @@ -58405,7 +55938,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_list_scatter_graph(tensor_, indices_, element_shape_; name=nothing, element_dtype=nothing, shape_type=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tensor_list_scatter_graph(tensor_, indices_, element_shape_; name=nothing, element_dtype=nothing, shape_type=nothing) local desc tf.with_op_name(name, "TensorListScatter") do desc = tf.NodeDescription("TensorListScatter") @@ -58465,7 +55998,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function softsign_grad_graph(gradients_, features_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function softsign_grad_graph(gradients_, features_; name=nothing) local desc tf.with_op_name(name, "SoftsignGrad") do desc = tf.NodeDescription("SoftsignGrad") @@ -58508,7 +56041,7 @@ end Copy Host Op. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function copy_host_graph(input_; name=nothing, tensor_name=nothing, debug_ops_spec=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function copy_host_graph(input_; name=nothing, tensor_name=nothing, debug_ops_spec=nothing) local desc tf.with_op_name(name, "CopyHost") do desc = tf.NodeDescription("CopyHost") @@ -58558,7 +56091,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function lin_space_graph(start_, stop_, num_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function lin_space_graph(start_, stop_, num_; name=nothing) local desc tf.with_op_name(name, "LinSpace") do desc = tf.NodeDescription("LinSpace") @@ -58608,7 +56141,7 @@ end Updates input `value` at `loc` with `update`. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _parallel_concat_update_graph(value_, update_; name=nothing, loc=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function _parallel_concat_update_graph(value_, update_; name=nothing, loc=nothing) local desc tf.with_op_name(name, "_ParallelConcatUpdate") do desc = tf.NodeDescription("_ParallelConcatUpdate") @@ -58657,7 +56190,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function stack_graph(; name=nothing, elem_type=nothing, stack_name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function stack_graph(; name=nothing, elem_type=nothing, stack_name=nothing) local desc tf.with_op_name(name, "Stack") do desc = tf.NodeDescription("Stack") @@ -58701,7 +56234,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function stack_push_v2_graph(handle_, elem_; name=nothing, swap_memory=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function stack_push_v2_graph(handle_, elem_; name=nothing, swap_memory=nothing) local desc tf.with_op_name(name, "StackPushV2") do desc = tf.NodeDescription("StackPushV2") @@ -58749,7 +56282,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function assign_variable_op_graph(resource_, value_; name=nothing, dtype=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function assign_variable_op_graph(resource_, value_; name=nothing, dtype=nothing) local desc tf.with_op_name(name, "AssignVariableOp") do desc = tf.NodeDescription("AssignVariableOp") @@ -58797,7 +56330,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_split_graph(split_dim_, indices_, values_, shape_; name=nothing, num_split=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function sparse_split_graph(split_dim_, indices_, values_, shape_; name=nothing, num_split=nothing) local desc tf.with_op_name(name, "SparseSplit") do desc = tf.NodeDescription("SparseSplit") @@ -58859,7 +56392,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_unpack_graph(handle_, value_, flow_in_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tensor_array_unpack_graph(handle_, value_, flow_in_; name=nothing) local desc tf.with_op_name(name, "TensorArrayUnpack") do desc = tf.NodeDescription("TensorArrayUnpack") @@ -58900,19 +56433,17 @@ end """ - tensor_list_stack(input_handle, element_shape; num_elements=-1) + tensor_list_stack(input_handle; num_elements=-1) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_list_stack_graph(input_handle_, element_shape_; name=nothing, element_dtype=nothing, num_elements=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tensor_list_stack_graph(input_handle_; name=nothing, element_dtype=nothing, num_elements=nothing) local desc tf.with_op_name(name, "TensorListStack") do desc = tf.NodeDescription("TensorListStack") input_handle_ = convert(Tensor{Any}, input_handle_) - element_shape_ = convert(Tensor{Int32}, element_shape_) tf.add_input(desc, input_handle_) - tf.add_input(desc, element_shape_) if element_dtype !== nothing desc["element_dtype"] = Base.identity(element_dtype) end @@ -58922,12 +56453,10 @@ begin end tf.Tensor(tf.Operation(desc)) end - function tensor_list_stack_eager(input_handle_, element_shape_; name=nothing, element_dtype=nothing, num_elements=nothing) + function tensor_list_stack_eager(input_handle_; name=nothing, element_dtype=nothing, num_elements=nothing) desc = tf.EagerOp("TensorListStack") input_handle_ = convert(tf.EagerTensor, input_handle_) - element_shape_ = convert(tf.EagerTensor, element_shape_) tf.add_input(desc, input_handle_) - tf.add_input(desc, element_shape_) if element_dtype !== nothing desc["element_dtype"] = Base.identity(element_dtype) end @@ -58935,17 +56464,17 @@ begin desc["num_elements"] = Base.Int(num_elements) end res = tf.execute(desc) - node = tf.TapeNode(tensor_list_stack, [input_handle_, element_shape_], name=nothing, element_dtype=nothing, num_elements=nothing, res) + node = tf.TapeNode(tensor_list_stack, [input_handle_], name=nothing, element_dtype=nothing, num_elements=nothing, res) if length(res) >= 1 tf.add_node(res[1], node) return res[1] end end - function tensor_list_stack(input_handle_, element_shape_; name=nothing, element_dtype=nothing, num_elements=nothing) + function tensor_list_stack(input_handle_; name=nothing, element_dtype=nothing, num_elements=nothing) if tf.in_eager_mode() - tensor_list_stack_eager(input_handle_, element_shape_; name=name, element_dtype=element_dtype, num_elements=num_elements) + tensor_list_stack_eager(input_handle_; name=name, element_dtype=element_dtype, num_elements=num_elements) else - tensor_list_stack_graph(input_handle_, element_shape_; name=name, element_dtype=element_dtype, num_elements=num_elements) + tensor_list_stack_graph(input_handle_; name=name, element_dtype=element_dtype, num_elements=num_elements) end end end @@ -58957,7 +56486,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function barrier_incomplete_size_graph(handle_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function barrier_incomplete_size_graph(handle_; name=nothing) local desc tf.with_op_name(name, "BarrierIncompleteSize") do desc = tf.NodeDescription("BarrierIncompleteSize") @@ -58993,7 +56522,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function restore_graph(file_pattern_, tensor_name_; name=nothing, dt=nothing, preferred_shard=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function restore_graph(file_pattern_, tensor_name_; name=nothing, dt=nothing, preferred_shard=nothing) local desc tf.with_op_name(name, "Restore") do desc = tf.NodeDescription("Restore") @@ -59045,7 +56574,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_v3_graph(size_; name=nothing, dtype=nothing, element_shape=nothing, dynamic_size=nothing, clear_after_read=nothing, identical_element_shapes=nothing, tensor_array_name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tensor_array_v3_graph(size_; name=nothing, dtype=nothing, element_shape=nothing, dynamic_size=nothing, clear_after_read=nothing, identical_element_shapes=nothing, tensor_array_name=nothing) local desc tf.with_op_name(name, "TensorArrayV3") do desc = tf.NodeDescription("TensorArrayV3") @@ -59122,7 +56651,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_assert_next_dataset_graph(input_dataset_, transformations_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function experimental_assert_next_dataset_graph(input_dataset_, transformations_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "ExperimentalAssertNextDataset") do desc = tf.NodeDescription("ExperimentalAssertNextDataset") @@ -59174,7 +56703,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function in_top_k_graph(predictions_, targets_; name=nothing, k=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function in_top_k_graph(predictions_, targets_; name=nothing, k=nothing) local desc tf.with_op_name(name, "InTopK") do desc = tf.NodeDescription("InTopK") @@ -59222,7 +56751,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function scatter_sub_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function scatter_sub_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ScatterSub") do desc = tf.NodeDescription("ScatterSub") @@ -59278,7 +56807,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function acosh_graph(x_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function acosh_graph(x_; name=nothing) local desc tf.with_op_name(name, "Acosh") do desc = tf.NodeDescription("Acosh") @@ -59316,7 +56845,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function depthwise_conv2d_native_backprop_filter_graph(input_, filter_sizes_, out_backprop_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function depthwise_conv2d_native_backprop_filter_graph(input_, filter_sizes_, out_backprop_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) local desc tf.with_op_name(name, "DepthwiseConv2dNativeBackpropFilter") do desc = tf.NodeDescription("DepthwiseConv2dNativeBackpropFilter") @@ -59381,69 +56910,13 @@ begin end -""" - cast(x; Truncate=false) - - -""" -begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function cast_graph(x_; name=nothing, SrcT=nothing, DstT=nothing, Truncate=nothing) - local desc - tf.with_op_name(name, "Cast") do - desc = tf.NodeDescription("Cast") - x_ = convert(Tensor{Any}, x_) - (x_,) = tf.tf_promote(x_) - tf.add_input(desc, x_) - if SrcT !== nothing - desc["SrcT"] = Base.identity(SrcT) - end - if DstT !== nothing - desc["DstT"] = Base.identity(DstT) - end - if Truncate !== nothing - desc["Truncate"] = Base.Bool(Truncate) - end - end - tf.Tensor(tf.Operation(desc)) - end - function cast_eager(x_; name=nothing, SrcT=nothing, DstT=nothing, Truncate=nothing) - desc = tf.EagerOp("Cast") - x_ = convert(tf.EagerTensor, x_) - tf.add_input(desc, x_) - if SrcT !== nothing - desc["SrcT"] = Base.identity(SrcT) - end - if DstT !== nothing - desc["DstT"] = Base.identity(DstT) - end - if Truncate !== nothing - desc["Truncate"] = Base.Bool(Truncate) - end - desc["SrcT"] = tf.data_type(x_) - res = tf.execute(desc) - node = tf.TapeNode(cast, [x_], name=nothing, SrcT=nothing, DstT=nothing, Truncate=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - function cast(x_; name=nothing, SrcT=nothing, DstT=nothing, Truncate=nothing) - if tf.in_eager_mode() - cast_eager(x_; name=name, SrcT=SrcT, DstT=DstT, Truncate=Truncate) - else - cast_graph(x_; name=name, SrcT=SrcT, DstT=DstT, Truncate=Truncate) - end - end -end - - """ quantize_v2(input, min_range, max_range; mode=MIN_COMBINED, round_mode=HALF_AWAY_FROM_ZERO) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantize_v2_graph(input_, min_range_, max_range_; name=nothing, mode=nothing, round_mode=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function quantize_v2_graph(input_, min_range_, max_range_; name=nothing, mode=nothing, round_mode=nothing) local desc tf.with_op_name(name, "QuantizeV2") do desc = tf.NodeDescription("QuantizeV2") @@ -59498,13 +56971,69 @@ begin end +""" + cast(x; Truncate=false) + + +""" +begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function cast_graph(x_; name=nothing, SrcT=nothing, DstT=nothing, Truncate=nothing) + local desc + tf.with_op_name(name, "Cast") do + desc = tf.NodeDescription("Cast") + x_ = convert(Tensor{Any}, x_) + (x_,) = tf.tf_promote(x_) + tf.add_input(desc, x_) + if SrcT !== nothing + desc["SrcT"] = Base.identity(SrcT) + end + if DstT !== nothing + desc["DstT"] = Base.identity(DstT) + end + if Truncate !== nothing + desc["Truncate"] = Base.Bool(Truncate) + end + end + tf.Tensor(tf.Operation(desc)) + end + function cast_eager(x_; name=nothing, SrcT=nothing, DstT=nothing, Truncate=nothing) + desc = tf.EagerOp("Cast") + x_ = convert(tf.EagerTensor, x_) + tf.add_input(desc, x_) + if SrcT !== nothing + desc["SrcT"] = Base.identity(SrcT) + end + if DstT !== nothing + desc["DstT"] = Base.identity(DstT) + end + if Truncate !== nothing + desc["Truncate"] = Base.Bool(Truncate) + end + desc["SrcT"] = tf.data_type(x_) + res = tf.execute(desc) + node = tf.TapeNode(cast, [x_], name=nothing, SrcT=nothing, DstT=nothing, Truncate=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + function cast(x_; name=nothing, SrcT=nothing, DstT=nothing, Truncate=nothing) + if tf.in_eager_mode() + cast_eager(x_; name=name, SrcT=SrcT, DstT=DstT, Truncate=Truncate) + else + cast_graph(x_; name=name, SrcT=SrcT, DstT=DstT, Truncate=Truncate) + end + end +end + + """ generator_dataset(init_func_other_args, next_func_other_args, finalize_func_other_args) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function generator_dataset_graph(init_func_other_args_, next_func_other_args_, finalize_func_other_args_; name=nothing, init_func=nothing, next_func=nothing, finalize_func=nothing, Tinit_func_args=nothing, Tnext_func_args=nothing, Tfinalize_func_args=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function generator_dataset_graph(init_func_other_args_, next_func_other_args_, finalize_func_other_args_; name=nothing, init_func=nothing, next_func=nothing, finalize_func=nothing, Tinit_func_args=nothing, Tnext_func_args=nothing, Tfinalize_func_args=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "GeneratorDataset") do desc = tf.NodeDescription("GeneratorDataset") @@ -59596,7 +57125,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_forest_tree_serialize_graph(tree_handle_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tensor_forest_tree_serialize_graph(tree_handle_; name=nothing) local desc tf.with_op_name(name, "TensorForestTreeSerialize") do desc = tf.NodeDescription("TensorForestTreeSerialize") @@ -59626,56 +57155,13 @@ begin end -""" - next_after(x1, x2) - - -""" -begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function next_after_graph(x1_, x2_; name=nothing) - local desc - tf.with_op_name(name, "NextAfter") do - desc = tf.NodeDescription("NextAfter") - x1_ = convert(Tensor{Float32}, x1_) - x2_ = convert(Tensor{Float32}, x2_) - (x1_, x2_) = tf.tf_promote(x1_, x2_) - tf.add_input(desc, x1_) - tf.add_input(desc, x2_) - end - tf.Tensor(tf.Operation(desc)) - end - function next_after_eager(x1_, x2_; name=nothing) - desc = tf.EagerOp("NextAfter") - x1_ = convert(tf.EagerTensor, x1_) - x2_ = convert(tf.EagerTensor, x2_) - tf.add_input(desc, x1_) - tf.add_input(desc, x2_) - desc["T"] = tf.data_type(x1_) - desc["T"] = tf.data_type(x2_) - res = tf.execute(desc) - node = tf.TapeNode(next_after, [x1_, x2_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - function next_after(x1_, x2_; name=nothing) - if tf.in_eager_mode() - next_after_eager(x1_, x2_; name=name) - else - next_after_graph(x1_, x2_; name=name) - end - end -end - - """ tensor_array_close_v2(handle) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_array_close_v2_graph(handle_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tensor_array_close_v2_graph(handle_; name=nothing) local desc tf.with_op_name(name, "TensorArrayCloseV2") do desc = tf.NodeDescription("TensorArrayCloseV2") @@ -59711,7 +57197,7 @@ end A Reader that outputs rows from a BigQuery table as tensorflow Examples. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function big_query_reader_graph(; name=nothing, container=nothing, shared_name=nothing, project_id=nothing, dataset_id=nothing, table_id=nothing, columns=nothing, timestamp_millis=nothing, test_end_point=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function big_query_reader_graph(; name=nothing, container=nothing, shared_name=nothing, project_id=nothing, dataset_id=nothing, table_id=nothing, columns=nothing, timestamp_millis=nothing, test_end_point=nothing) local desc tf.with_op_name(name, "BigQueryReader") do desc = tf.NodeDescription("BigQueryReader") @@ -59791,7 +57277,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function reader_read_v2_graph(reader_handle_, queue_handle_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function reader_read_v2_graph(reader_handle_, queue_handle_; name=nothing) local desc tf.with_op_name(name, "ReaderReadV2") do desc = tf.NodeDescription("ReaderReadV2") @@ -59836,7 +57322,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function mod_graph(x_, y_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function mod_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "Mod") do desc = tf.NodeDescription("Mod") @@ -59879,7 +57365,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function add_v2_graph(x_, y_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function add_v2_graph(x_, y_; name=nothing) local desc tf.with_op_name(name, "AddV2") do desc = tf.NodeDescription("AddV2") @@ -59922,7 +57408,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function stateless_random_normal_graph(shape_, seed_; name=nothing, dtype=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function stateless_random_normal_graph(shape_, seed_; name=nothing, dtype=nothing) local desc tf.with_op_name(name, "StatelessRandomNormal") do desc = tf.NodeDescription("StatelessRandomNormal") @@ -59972,7 +57458,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function strided_slice_assign_graph(ref_, begin_, end_, strides_, value_; name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function strided_slice_assign_graph(ref_, begin_, end_, strides_, value_; name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing) local desc tf.with_op_name(name, "StridedSliceAssign") do desc = tf.NodeDescription("StridedSliceAssign") @@ -60100,7 +57586,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function scatter_min_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function scatter_min_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ScatterMin") do desc = tf.NodeDescription("ScatterMin") @@ -60156,7 +57642,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_strided_slice_assign_graph(ref_, begin_, end_, strides_, value_; name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function resource_strided_slice_assign_graph(ref_, begin_, end_, strides_, value_; name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing) local desc tf.with_op_name(name, "ResourceStridedSliceAssign") do desc = tf.NodeDescription("ResourceStridedSliceAssign") @@ -60283,7 +57769,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function random_gamma_grad_graph(alpha_, sample_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function random_gamma_grad_graph(alpha_, sample_; name=nothing) local desc tf.with_op_name(name, "RandomGammaGrad") do desc = tf.NodeDescription("RandomGammaGrad") @@ -60326,7 +57812,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_sparse_apply_keras_momentum_graph(var_, accum_, lr_, grad_, indices_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function resource_sparse_apply_keras_momentum_graph(var_, accum_, lr_, grad_, indices_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) local desc tf.with_op_name(name, "ResourceSparseApplyKerasMomentum") do desc = tf.NodeDescription("ResourceSparseApplyKerasMomentum") @@ -60401,7 +57887,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function boosted_trees_create_quantile_stream_resource_graph(quantile_stream_resource_handle_, epsilon_, num_streams_; name=nothing, max_elements=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function boosted_trees_create_quantile_stream_resource_graph(quantile_stream_resource_handle_, epsilon_, num_streams_; name=nothing, max_elements=nothing) local desc tf.with_op_name(name, "BoostedTreesCreateQuantileStreamResource") do desc = tf.NodeDescription("BoostedTreesCreateQuantileStreamResource") @@ -60451,7 +57937,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantized_relu6_graph(features_, min_features_, max_features_; name=nothing, out_type=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function quantized_relu6_graph(features_, min_features_, max_features_; name=nothing, out_type=nothing) local desc tf.with_op_name(name, "QuantizedRelu6") do desc = tf.NodeDescription("QuantizedRelu6") @@ -60508,7 +57994,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_sparse_maximum_graph(a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function sparse_sparse_maximum_graph(a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_; name=nothing) local desc tf.with_op_name(name, "SparseSparseMaximum") do desc = tf.NodeDescription("SparseSparseMaximum") @@ -60572,7 +58058,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_norm_with_global_normalization_graph(t_, m_, v_, beta_, gamma_; name=nothing, variance_epsilon=nothing, scale_after_normalization=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function batch_norm_with_global_normalization_graph(t_, m_, v_, beta_, gamma_; name=nothing, variance_epsilon=nothing, scale_after_normalization=nothing) local desc tf.with_op_name(name, "BatchNormWithGlobalNormalization") do desc = tf.NodeDescription("BatchNormWithGlobalNormalization") @@ -60642,7 +58128,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function in_top_kv2_graph(predictions_, targets_, k_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function in_top_kv2_graph(predictions_, targets_, k_; name=nothing) local desc tf.with_op_name(name, "InTopKV2") do desc = tf.NodeDescription("InTopKV2") @@ -60689,7 +58175,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function cholesky_graph(input_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function cholesky_graph(input_; name=nothing) local desc tf.with_op_name(name, "Cholesky") do desc = tf.NodeDescription("Cholesky") @@ -60727,7 +58213,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_apply_centered_rms_prop_graph(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=nothing, use_locking=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function resource_apply_centered_rms_prop_graph(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=nothing, use_locking=nothing) local desc tf.with_op_name(name, "ResourceApplyCenteredRMSProp") do desc = tf.NodeDescription("ResourceApplyCenteredRMSProp") @@ -60807,7 +58293,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_apply_adagrad_graph(var_, accum_, lr_, grad_; name=nothing, use_locking=nothing, update_slots=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function resource_apply_adagrad_graph(var_, accum_, lr_, grad_; name=nothing, use_locking=nothing, update_slots=nothing) local desc tf.with_op_name(name, "ResourceApplyAdagrad") do desc = tf.NodeDescription("ResourceApplyAdagrad") @@ -60870,7 +58356,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function experimental_parallel_interleave_dataset_graph(input_dataset_, other_arguments_, cycle_length_, block_length_, sloppy_, buffer_output_elements_, prefetch_input_elements_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function experimental_parallel_interleave_dataset_graph(input_dataset_, other_arguments_, cycle_length_, block_length_, sloppy_, buffer_output_elements_, prefetch_input_elements_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "ExperimentalParallelInterleaveDataset") do desc = tf.NodeDescription("ExperimentalParallelInterleaveDataset") @@ -60954,7 +58440,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resize_bicubic_grad_graph(grads_, original_image_; name=nothing, align_corners=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function resize_bicubic_grad_graph(grads_, original_image_; name=nothing, align_corners=nothing) local desc tf.with_op_name(name, "ResizeBicubicGrad") do desc = tf.NodeDescription("ResizeBicubicGrad") @@ -61002,7 +58488,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_self_adjoint_eig_graph(input_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function batch_self_adjoint_eig_graph(input_; name=nothing) local desc tf.with_op_name(name, "BatchSelfAdjointEig") do desc = tf.NodeDescription("BatchSelfAdjointEig") @@ -61040,7 +58526,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_softmax_graph(sp_indices_, sp_values_, sp_shape_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function sparse_softmax_graph(sp_indices_, sp_values_, sp_shape_; name=nothing) local desc tf.with_op_name(name, "SparseSoftmax") do desc = tf.NodeDescription("SparseSoftmax") @@ -61086,7 +58572,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function asinh_graph(x_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function asinh_graph(x_; name=nothing) local desc tf.with_op_name(name, "Asinh") do desc = tf.NodeDescription("Asinh") @@ -61118,102 +58604,13 @@ begin end -""" - quantized_conv2d_and_relu(input, filter, min_input, max_input, min_filter, max_filter; out_type=Float32, dilations=[1, 1, 1, 1]) - - -""" -begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function quantized_conv2d_and_relu_graph(input_, filter_, min_input_, max_input_, min_filter_, max_filter_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) - local desc - tf.with_op_name(name, "QuantizedConv2DAndRelu") do - desc = tf.NodeDescription("QuantizedConv2DAndRelu") - input_ = convert(Tensor{Any}, input_) - filter_ = convert(Tensor{Any}, filter_) - min_input_ = convert(Tensor{Float32}, min_input_) - max_input_ = convert(Tensor{Float32}, max_input_) - min_filter_ = convert(Tensor{Float32}, min_filter_) - max_filter_ = convert(Tensor{Float32}, max_filter_) - (filter_,) = tf.tf_promote(filter_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - tf.add_input(desc, filter_) - tf.add_input(desc, min_input_) - tf.add_input(desc, max_input_) - tf.add_input(desc, min_filter_) - tf.add_input(desc, max_filter_) - if out_type !== nothing - desc["out_type"] = Base.identity(out_type) - end - if strides !== nothing - desc["strides"] = map(Base.identity, strides) - end - if padding !== nothing - desc["padding"] = Base.String(padding) - end - if dilations !== nothing - desc["dilations"] = map(Base.identity, dilations) - end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:3 - push!(out, tf.Tensor(op, out_idx)) - end - out - end - function quantized_conv2d_and_relu_eager(input_, filter_, min_input_, max_input_, min_filter_, max_filter_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) - desc = tf.EagerOp("QuantizedConv2DAndRelu") - input_ = convert(tf.EagerTensor, input_) - filter_ = convert(tf.EagerTensor, filter_) - min_input_ = convert(tf.EagerTensor, min_input_) - max_input_ = convert(tf.EagerTensor, max_input_) - min_filter_ = convert(tf.EagerTensor, min_filter_) - max_filter_ = convert(tf.EagerTensor, max_filter_) - tf.add_input(desc, input_) - tf.add_input(desc, filter_) - tf.add_input(desc, min_input_) - tf.add_input(desc, max_input_) - tf.add_input(desc, min_filter_) - tf.add_input(desc, max_filter_) - if out_type !== nothing - desc["out_type"] = Base.identity(out_type) - end - if strides !== nothing - desc["strides"] = map(Base.identity, strides) - end - if padding !== nothing - desc["padding"] = Base.String(padding) - end - if dilations !== nothing - desc["dilations"] = map(Base.identity, dilations) - end - desc["Tinput"] = tf.data_type(input_) - desc["Tfilter"] = tf.data_type(filter_) - res = tf.execute(desc) - node = tf.TapeNode(quantized_conv2d_and_relu, [input_, filter_, min_input_, max_input_, min_filter_, max_filter_], name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end - end - function quantized_conv2d_and_relu(input_, filter_, min_input_, max_input_, min_filter_, max_filter_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) - if tf.in_eager_mode() - quantized_conv2d_and_relu_eager(input_, filter_, min_input_, max_input_, min_filter_, max_filter_; name=name, out_type=out_type, strides=strides, padding=padding, dilations=dilations) - else - quantized_conv2d_and_relu_graph(input_, filter_, min_input_, max_input_, min_filter_, max_filter_; name=name, out_type=out_type, strides=strides, padding=padding, dilations=dilations) - end - end -end - - """ matrix_inverse(input; adjoint=false) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function matrix_inverse_graph(input_; name=nothing, adjoint=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function matrix_inverse_graph(input_; name=nothing, adjoint=nothing) local desc tf.with_op_name(name, "MatrixInverse") do desc = tf.NodeDescription("MatrixInverse") @@ -61257,7 +58654,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function tensor_list_concat_lists_graph(input_a_, input_b_; name=nothing, element_dtype=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tensor_list_concat_lists_graph(input_a_, input_b_; name=nothing, element_dtype=nothing) local desc tf.with_op_name(name, "TensorListConcatLists") do desc = tf.NodeDescription("TensorListConcatLists") @@ -61303,7 +58700,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function requantize_graph(input_, input_min_, input_max_, requested_output_min_, requested_output_max_; name=nothing, out_type=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function requantize_graph(input_, input_min_, input_max_, requested_output_min_, requested_output_max_; name=nothing, out_type=nothing) local desc tf.with_op_name(name, "Requantize") do desc = tf.NodeDescription("Requantize") @@ -61368,7 +58765,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function fft_graph(input_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function fft_graph(input_; name=nothing) local desc tf.with_op_name(name, "FFT") do desc = tf.NodeDescription("FFT") @@ -61406,7 +58803,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function conjugate_transpose_graph(x_, perm_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function conjugate_transpose_graph(x_, perm_; name=nothing) local desc tf.with_op_name(name, "ConjugateTranspose") do desc = tf.NodeDescription("ConjugateTranspose") @@ -61450,7 +58847,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function unstage_graph(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function unstage_graph(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) local desc tf.with_op_name(name, "Unstage") do desc = tf.NodeDescription("Unstage") @@ -61512,7 +58909,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function relu6grad_graph(gradients_, features_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function relu6grad_graph(gradients_, features_; name=nothing) local desc tf.with_op_name(name, "Relu6Grad") do desc = tf.NodeDescription("Relu6Grad") @@ -61549,70 +58946,13 @@ begin end -""" - scale_and_translate_grad(grads, original_image, scale, translation; kernel_type=lanczos3) - - -""" -begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function scale_and_translate_grad_graph(grads_, original_image_, scale_, translation_; name=nothing, kernel_type=nothing) - local desc - tf.with_op_name(name, "ScaleAndTranslateGrad") do - desc = tf.NodeDescription("ScaleAndTranslateGrad") - grads_ = convert(Tensor{Any}, grads_) - original_image_ = convert(Tensor{Any}, original_image_) - scale_ = convert(Tensor{Float32}, scale_) - translation_ = convert(Tensor{Float32}, translation_) - (grads_, original_image_) = tf.tf_promote(grads_, original_image_) - tf.add_input(desc, grads_) - tf.add_input(desc, original_image_) - tf.add_input(desc, scale_) - tf.add_input(desc, translation_) - if kernel_type !== nothing - desc["kernel_type"] = Base.String(kernel_type) - end - end - tf.Tensor(tf.Operation(desc)) - end - function scale_and_translate_grad_eager(grads_, original_image_, scale_, translation_; name=nothing, kernel_type=nothing) - desc = tf.EagerOp("ScaleAndTranslateGrad") - grads_ = convert(tf.EagerTensor, grads_) - original_image_ = convert(tf.EagerTensor, original_image_) - scale_ = convert(tf.EagerTensor, scale_) - translation_ = convert(tf.EagerTensor, translation_) - tf.add_input(desc, grads_) - tf.add_input(desc, original_image_) - tf.add_input(desc, scale_) - tf.add_input(desc, translation_) - if kernel_type !== nothing - desc["kernel_type"] = Base.String(kernel_type) - end - desc["T"] = tf.data_type(grads_) - desc["T"] = tf.data_type(original_image_) - res = tf.execute(desc) - node = tf.TapeNode(scale_and_translate_grad, [grads_, original_image_, scale_, translation_], name=nothing, kernel_type=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - function scale_and_translate_grad(grads_, original_image_, scale_, translation_; name=nothing, kernel_type=nothing) - if tf.in_eager_mode() - scale_and_translate_grad_eager(grads_, original_image_, scale_, translation_; name=name, kernel_type=kernel_type) - else - scale_and_translate_grad_graph(grads_, original_image_, scale_, translation_; name=name, kernel_type=kernel_type) - end - end -end - - """ _array_to_list(input) Converts an array of tensors to a list of tensors. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function _array_to_list_graph(input_; name=nothing, N=nothing, out_types=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function _array_to_list_graph(input_; name=nothing, N=nothing, out_types=nothing) local desc tf.with_op_name(name, "_ArrayToList") do desc = tf.NodeDescription("_ArrayToList") @@ -61656,117 +58996,13 @@ begin end -""" - cudnn_rnnv3(input, input_h, input_c, params, sequence_lengths; rnn_mode=lstm, input_mode=linear_input, direction=unidirectional, dropout=?, seed=0, seed2=0, is_training=true) - - -""" -begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function cudnn_rnnv3_graph(input_, input_h_, input_c_, params_, sequence_lengths_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing, is_training=nothing) - local desc - tf.with_op_name(name, "CudnnRNNV3") do - desc = tf.NodeDescription("CudnnRNNV3") - input_ = convert(Tensor{Any}, input_) - input_h_ = convert(Tensor{Any}, input_h_) - input_c_ = convert(Tensor{Any}, input_c_) - params_ = convert(Tensor{Any}, params_) - sequence_lengths_ = convert(Tensor{Int32}, sequence_lengths_) - (input_, input_h_, input_c_, params_) = tf.tf_promote(input_, input_h_, input_c_, params_) - tf.add_input(desc, input_) - tf.add_input(desc, input_h_) - tf.add_input(desc, input_c_) - tf.add_input(desc, params_) - tf.add_input(desc, sequence_lengths_) - if rnn_mode !== nothing - desc["rnn_mode"] = Base.String(rnn_mode) - end - if input_mode !== nothing - desc["input_mode"] = Base.String(input_mode) - end - if direction !== nothing - desc["direction"] = Base.String(direction) - end - if dropout !== nothing - desc["dropout"] = Base.identity(dropout) - end - if seed !== nothing - desc["seed"] = Base.Int(seed) - end - if seed2 !== nothing - desc["seed2"] = Base.Int(seed2) - end - if is_training !== nothing - desc["is_training"] = Base.Bool(is_training) - end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:5 - push!(out, tf.Tensor(op, out_idx)) - end - out - end - function cudnn_rnnv3_eager(input_, input_h_, input_c_, params_, sequence_lengths_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing, is_training=nothing) - desc = tf.EagerOp("CudnnRNNV3") - input_ = convert(tf.EagerTensor, input_) - input_h_ = convert(tf.EagerTensor, input_h_) - input_c_ = convert(tf.EagerTensor, input_c_) - params_ = convert(tf.EagerTensor, params_) - sequence_lengths_ = convert(tf.EagerTensor, sequence_lengths_) - tf.add_input(desc, input_) - tf.add_input(desc, input_h_) - tf.add_input(desc, input_c_) - tf.add_input(desc, params_) - tf.add_input(desc, sequence_lengths_) - if rnn_mode !== nothing - desc["rnn_mode"] = Base.String(rnn_mode) - end - if input_mode !== nothing - desc["input_mode"] = Base.String(input_mode) - end - if direction !== nothing - desc["direction"] = Base.String(direction) - end - if dropout !== nothing - desc["dropout"] = Base.identity(dropout) - end - if seed !== nothing - desc["seed"] = Base.Int(seed) - end - if seed2 !== nothing - desc["seed2"] = Base.Int(seed2) - end - if is_training !== nothing - desc["is_training"] = Base.Bool(is_training) - end - desc["T"] = tf.data_type(input_) - desc["T"] = tf.data_type(input_h_) - desc["T"] = tf.data_type(input_c_) - desc["T"] = tf.data_type(params_) - res = tf.execute(desc) - node = tf.TapeNode(cudnn_rnnv3, [input_, input_h_, input_c_, params_, sequence_lengths_], name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing, is_training=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end - end - function cudnn_rnnv3(input_, input_h_, input_c_, params_, sequence_lengths_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing, is_training=nothing) - if tf.in_eager_mode() - cudnn_rnnv3_eager(input_, input_h_, input_c_, params_, sequence_lengths_; name=name, rnn_mode=rnn_mode, input_mode=input_mode, direction=direction, dropout=dropout, seed=seed, seed2=seed2, is_training=is_training) - else - cudnn_rnnv3_graph(input_, input_h_, input_c_, params_, sequence_lengths_; name=name, rnn_mode=rnn_mode, input_mode=input_mode, direction=direction, dropout=dropout, seed=seed, seed2=seed2, is_training=is_training) - end - end -end - - """ expand_dims(input, dim) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function expand_dims_graph(input_, dim_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function expand_dims_graph(input_, dim_; name=nothing) local desc tf.with_op_name(name, "ExpandDims") do desc = tf.NodeDescription("ExpandDims") @@ -61811,7 +59047,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function inv_grad_graph(y_, dy_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function inv_grad_graph(y_, dy_; name=nothing) local desc tf.with_op_name(name, "InvGrad") do desc = tf.NodeDescription("InvGrad") @@ -61854,7 +59090,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function non_max_suppression_graph(boxes_, scores_, max_output_size_; name=nothing, iou_threshold=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function non_max_suppression_graph(boxes_, scores_, max_output_size_; name=nothing, iou_threshold=nothing) local desc tf.with_op_name(name, "NonMaxSuppression") do desc = tf.NodeDescription("NonMaxSuppression") @@ -61904,7 +59140,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function l2loss_graph(t_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function l2loss_graph(t_; name=nothing) local desc tf.with_op_name(name, "L2Loss") do desc = tf.NodeDescription("L2Loss") @@ -61942,7 +59178,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resize_area_graph(images_, size_; name=nothing, align_corners=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function resize_area_graph(images_, size_; name=nothing, align_corners=nothing) local desc tf.with_op_name(name, "ResizeArea") do desc = tf.NodeDescription("ResizeArea") @@ -61990,7 +59226,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_cross_graph(indices_, values_, shapes_, dense_inputs_; name=nothing, N=nothing, hashed_output=nothing, num_buckets=nothing, hash_key=nothing, sparse_types=nothing, dense_types=nothing, out_type=nothing, internal_type=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function sparse_cross_graph(indices_, values_, shapes_, dense_inputs_; name=nothing, N=nothing, hashed_output=nothing, num_buckets=nothing, hash_key=nothing, sparse_types=nothing, dense_types=nothing, out_type=nothing, internal_type=nothing) local desc tf.with_op_name(name, "SparseCross") do desc = tf.NodeDescription("SparseCross") @@ -62091,7 +59327,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function batch_fft3d_graph(input_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function batch_fft3d_graph(input_; name=nothing) local desc tf.with_op_name(name, "BatchFFT3D") do desc = tf.NodeDescription("BatchFFT3D") @@ -62127,7 +59363,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function random_standard_normal_graph(shape_; name=nothing, seed=nothing, seed2=nothing, dtype=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function random_standard_normal_graph(shape_; name=nothing, seed=nothing, seed2=nothing, dtype=nothing) local desc tf.with_op_name(name, "RandomStandardNormal") do desc = tf.NodeDescription("RandomStandardNormal") @@ -62183,7 +59419,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function resource_scatter_mul_graph(resource_, indices_, updates_; name=nothing, dtype=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function resource_scatter_mul_graph(resource_, indices_, updates_; name=nothing, dtype=nothing) local desc tf.with_op_name(name, "ResourceScatterMul") do desc = tf.NodeDescription("ResourceScatterMul") @@ -62238,7 +59474,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sdca_optimizer_graph(sparse_example_indices_, sparse_feature_indices_, sparse_feature_values_, dense_features_, example_weights_, example_labels_, sparse_indices_, sparse_weights_, dense_weights_, example_state_data_; name=nothing, loss_type=nothing, adaptative=nothing, num_sparse_features=nothing, num_sparse_features_with_values=nothing, num_dense_features=nothing, l1=nothing, l2=nothing, num_loss_partitions=nothing, num_inner_iterations=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function sdca_optimizer_graph(sparse_example_indices_, sparse_feature_indices_, sparse_feature_values_, dense_features_, example_weights_, example_labels_, sparse_indices_, sparse_weights_, dense_weights_, example_state_data_; name=nothing, loss_type=nothing, adaptative=nothing, num_sparse_features=nothing, num_sparse_features_with_values=nothing, num_dense_features=nothing, l1=nothing, l2=nothing, num_loss_partitions=nothing, num_inner_iterations=nothing) local desc tf.with_op_name(name, "SdcaOptimizer") do desc = tf.NodeDescription("SdcaOptimizer") @@ -62369,7 +59605,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function zeta_graph(x_, q_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function zeta_graph(x_, q_; name=nothing) local desc tf.with_op_name(name, "Zeta") do desc = tf.NodeDescription("Zeta") @@ -62412,7 +59648,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sample_distorted_bounding_box_graph(image_size_, bounding_boxes_; name=nothing, seed=nothing, seed2=nothing, min_object_covered=nothing, aspect_ratio_range=nothing, area_range=nothing, max_attempts=nothing, use_image_if_no_bounding_boxes=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function sample_distorted_bounding_box_graph(image_size_, bounding_boxes_; name=nothing, seed=nothing, seed2=nothing, min_object_covered=nothing, aspect_ratio_range=nothing, area_range=nothing, max_attempts=nothing, use_image_if_no_bounding_boxes=nothing) local desc tf.with_op_name(name, "SampleDistortedBoundingBox") do desc = tf.NodeDescription("SampleDistortedBoundingBox") @@ -62501,7 +59737,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function igamma_grad_a_graph(a_, x_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function igamma_grad_a_graph(a_, x_; name=nothing) local desc tf.with_op_name(name, "IgammaGradA") do desc = tf.NodeDescription("IgammaGradA") @@ -62544,7 +59780,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function segment_max_graph(data_, segment_ids_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function segment_max_graph(data_, segment_ids_; name=nothing) local desc tf.with_op_name(name, "SegmentMax") do desc = tf.NodeDescription("SegmentMax") @@ -62589,7 +59825,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function range_graph(start_, limit_, delta_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function range_graph(start_, limit_, delta_; name=nothing) local desc tf.with_op_name(name, "Range") do desc = tf.NodeDescription("Range") @@ -62634,10 +59870,10 @@ end """ retrieve_tpu_embedding_momentum_parameters_grad_accum_debug(; table_id=-1, table_name=) - +Retrieve embedding parameters for a single table. """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function retrieve_tpu_embedding_momentum_parameters_grad_accum_debug_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function retrieve_tpu_embedding_momentum_parameters_grad_accum_debug_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) local desc tf.with_op_name(name, "RetrieveTPUEmbeddingMomentumParametersGradAccumDebug") do desc = tf.NodeDescription("RetrieveTPUEmbeddingMomentumParametersGradAccumDebug") @@ -62698,7 +59934,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function flush_summary_writer_graph(writer_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function flush_summary_writer_graph(writer_; name=nothing) local desc tf.with_op_name(name, "FlushSummaryWriter") do desc = tf.NodeDescription("FlushSummaryWriter") @@ -62734,7 +59970,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function dequantize_graph(input_, min_range_, max_range_; name=nothing, mode=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function dequantize_graph(input_, min_range_, max_range_; name=nothing, mode=nothing) local desc tf.with_op_name(name, "Dequantize") do desc = tf.NodeDescription("Dequantize") @@ -62786,7 +60022,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_fill_empty_rows_grad_graph(reverse_index_map_, grad_values_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function sparse_fill_empty_rows_grad_graph(reverse_index_map_, grad_values_; name=nothing) local desc tf.with_op_name(name, "SparseFillEmptyRowsGrad") do desc = tf.NodeDescription("SparseFillEmptyRowsGrad") @@ -62833,7 +60069,7 @@ end """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function iterator_get_next_graph(iterator_; name=nothing, output_types=nothing, output_shapes=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function iterator_get_next_graph(iterator_; name=nothing, output_types=nothing, output_shapes=nothing) local desc tf.with_op_name(name, "IteratorGetNext") do desc = tf.NodeDescription("IteratorGetNext") @@ -62875,13 +60111,57 @@ begin end +""" + prevent_gradient(input; message=) + + +""" +begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function prevent_gradient_graph(input_; name=nothing, message=nothing) + local desc + tf.with_op_name(name, "PreventGradient") do + desc = tf.NodeDescription("PreventGradient") + input_ = convert(Tensor{Any}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + if message !== nothing + desc["message"] = Base.String(message) + end + end + tf.Tensor(tf.Operation(desc)) + end + function prevent_gradient_eager(input_; name=nothing, message=nothing) + desc = tf.EagerOp("PreventGradient") + input_ = convert(tf.EagerTensor, input_) + tf.add_input(desc, input_) + if message !== nothing + desc["message"] = Base.String(message) + end + desc["T"] = tf.data_type(input_) + res = tf.execute(desc) + node = tf.TapeNode(prevent_gradient, [input_], name=nothing, message=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + function prevent_gradient(input_; name=nothing, message=nothing) + if tf.in_eager_mode() + prevent_gradient_eager(input_; name=name, message=message) + else + prevent_gradient_graph(input_; name=name, message=message) + end + end +end + + """ sparse_tensor_dense_add(a_indices, a_values, a_shape, b) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function sparse_tensor_dense_add_graph(a_indices_, a_values_, a_shape_, b_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function sparse_tensor_dense_add_graph(a_indices_, a_values_, a_shape_, b_; name=nothing) local desc tf.with_op_name(name, "SparseTensorDenseAdd") do desc = tf.NodeDescription("SparseTensorDenseAdd") @@ -62931,57 +60211,13 @@ begin end -""" - prevent_gradient(input; message=) - - -""" -begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function prevent_gradient_graph(input_; name=nothing, message=nothing) - local desc - tf.with_op_name(name, "PreventGradient") do - desc = tf.NodeDescription("PreventGradient") - input_ = convert(Tensor{Any}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - if message !== nothing - desc["message"] = Base.String(message) - end - end - tf.Tensor(tf.Operation(desc)) - end - function prevent_gradient_eager(input_; name=nothing, message=nothing) - desc = tf.EagerOp("PreventGradient") - input_ = convert(tf.EagerTensor, input_) - tf.add_input(desc, input_) - if message !== nothing - desc["message"] = Base.String(message) - end - desc["T"] = tf.data_type(input_) - res = tf.execute(desc) - node = tf.TapeNode(prevent_gradient, [input_], name=nothing, message=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - function prevent_gradient(input_; name=nothing, message=nothing) - if tf.in_eager_mode() - prevent_gradient_eager(input_; name=name, message=message) - else - prevent_gradient_graph(input_; name=name, message=message) - end - end -end - - """ lookup_table_export(table_handle) """ begin - #= /Users/malmaud/.julia/dev/TensorFlow/src/generate_ops.jl:231 =# tf.@op function lookup_table_export_graph(table_handle_; name=nothing) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function lookup_table_export_graph(table_handle_; name=nothing) local desc tf.with_op_name(name, "LookupTableExport") do desc = tf.NodeDescription("LookupTableExport") From 71e668eda96a0be0a38d740da8b03da5017a585a Mon Sep 17 00:00:00 2001 From: Jon Malmaud Date: Fri, 15 Mar 2019 19:54:05 -0400 Subject: [PATCH 45/49] Fix some tests --- src/io/tfrecord.jl | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/io/tfrecord.jl b/src/io/tfrecord.jl index e0b5b7ce..82b425f8 100644 --- a/src/io/tfrecord.jl +++ b/src/io/tfrecord.jl @@ -22,7 +22,7 @@ Opens a TensorFlow record writer. Records will be written to the file at the given path. """ function RecordWriter(path::AbstractString) - pyo = @tf.py_proc py_tf[].python_io.TFRecordWriter($path) + pyo = @tf.py_proc py_tf[][:python_io][:TFRecordWriter]($path) RecordWriter(pyo) end @@ -33,7 +33,7 @@ Writes a record `msg` to the TensorFlow writer `writer`. Tries to convert the msg to `Vector{UInt8}` before writing. """ function Base.write(writer::RecordWriter, msg::Vector{UInt8}) - fetch(@tf.py_proc $(writer.pyo).write(py_bytes($msg))) + fetch(@tf.py_proc $(writer.pyo)[:write](py_bytes($msg))) end Base.write(writer::RecordWriter, s::AbstractString) = write(writer, Vector{UInt8}(s)) @@ -45,7 +45,7 @@ function RecordWriter(f::Function, path) end function Base.close(writer::RecordWriter) - fetch(@tf.py_proc $(writer.pyo).close()) + fetch(@tf.py_proc $(writer.pyo)[:close]()) end struct RecordIterator @@ -59,17 +59,17 @@ Returns a Julia iterator that returns the records in the TF Record file at `path` as `Vector{UInt8}` objects. """ function RecordIterator(path::AbstractString) - pyo = @tf.py_proc py_tf[].python_io.tf_record_iterator($path) + pyo = @tf.py_proc py_tf[][:python_io][:tf_record_iterator]($path) RecordIterator(pyo) end function _next(iter::RecordIterator) try ans=@static if PyCall.pyversion >= v"3.0.0" - fetch(@tf.py_proc $(iter.pyo).__next__()) + fetch(@tf.py_proc $(iter.pyo)[:__next__]()) else #Python 2 - fetch(@tf.py_proc $(iter.pyo).next()) + fetch(@tf.py_proc $(iter.pyo)[:next]()) end Vector{UInt8}(ans) catch err From ef6b3abafa29681fb8fe4a0a9b177d2b3e31ce96 Mon Sep 17 00:00:00 2001 From: Jon Malmaud Date: Fri, 15 Mar 2019 20:21:42 -0400 Subject: [PATCH 46/49] Move @op --- src/generate_ops.jl | 4 +- src/ops/imported_ops.jl | 50916 +++++++++++++++++++------------------- test/runtests.jl | 6 +- 3 files changed, 25465 insertions(+), 25461 deletions(-) diff --git a/src/generate_ops.jl b/src/generate_ops.jl index 7e334aab..a893d63a 100644 --- a/src/generate_ops.jl +++ b/src/generate_ops.jl @@ -218,7 +218,7 @@ function to_function(op::tensorflow.OpDef) graph_name = Symbol("$(jl_name)_graph") eager_name = Symbol("$(jl_name)_eager") expr = quote - @tf.op function $graph_name($(inputs...)) + function $graph_name($(inputs...)) local desc tf.with_op_name(name, $(op.name)) do desc = tf.NodeDescription($(op.name)) @@ -259,7 +259,7 @@ function to_function(op::tensorflow.OpDef) end call_args = [call_kw_params; inputs[2:end]] dispatch_expr = quote - function $jl_name($(inputs...)) + @tf.op function $jl_name($(inputs...)) if tf.in_eager_mode() $(eager_name)($(call_args...)) else diff --git a/src/ops/imported_ops.jl b/src/ops/imported_ops.jl index d0be97f8..9161cd63 100644 --- a/src/ops/imported_ops.jl +++ b/src/ops/imported_ops.jl @@ -1,4 +1,4 @@ -# Autogenerated on 2019-03-15T19:13:57.806 +# Autogenerated on 2019-03-15T19:52:15.908 module Ops import TensorFlow @@ -10,23 +10,23 @@ import TensorFlow: Tensor """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function reduce_join_graph(inputs_, reduction_indices_; name=nothing, keep_dims=nothing, separator=nothing) - local desc - tf.with_op_name(name, "ReduceJoin") do - desc = tf.NodeDescription("ReduceJoin") - inputs_ = convert(Tensor{String}, inputs_) - reduction_indices_ = convert(Tensor{Int32}, reduction_indices_) - tf.add_input(desc, inputs_) - tf.add_input(desc, reduction_indices_) - if keep_dims !== nothing - desc["keep_dims"] = Base.Bool(keep_dims) - end - if separator !== nothing - desc["separator"] = Base.String(separator) - end + function reduce_join_graph(inputs_, reduction_indices_; name=nothing, keep_dims=nothing, separator=nothing) + local desc + tf.with_op_name(name, "ReduceJoin") do + desc = tf.NodeDescription("ReduceJoin") + inputs_ = convert(Tensor{String}, inputs_) + reduction_indices_ = convert(Tensor{Int32}, reduction_indices_) + tf.add_input(desc, inputs_) + tf.add_input(desc, reduction_indices_) + if keep_dims !== nothing + desc["keep_dims"] = Base.Bool(keep_dims) + end + if separator !== nothing + desc["separator"] = Base.String(separator) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function reduce_join_eager(inputs_, reduction_indices_; name=nothing, keep_dims=nothing, separator=nothing) desc = tf.EagerOp("ReduceJoin") inputs_ = convert(tf.EagerTensor, inputs_) @@ -46,13 +46,13 @@ begin return res[1] end end - function reduce_join(inputs_, reduction_indices_; name=nothing, keep_dims=nothing, separator=nothing) - if tf.in_eager_mode() - reduce_join_eager(inputs_, reduction_indices_; name=name, keep_dims=keep_dims, separator=separator) - else - reduce_join_graph(inputs_, reduction_indices_; name=name, keep_dims=keep_dims, separator=separator) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function reduce_join(inputs_, reduction_indices_; name=nothing, keep_dims=nothing, separator=nothing) + if tf.in_eager_mode() + reduce_join_eager(inputs_, reduction_indices_; name=name, keep_dims=keep_dims, separator=separator) + else + reduce_join_graph(inputs_, reduction_indices_; name=name, keep_dims=keep_dims, separator=separator) + end end - end end @@ -62,37 +62,37 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function reduce_dataset_graph(input_dataset_, initial_state_, other_arguments_; name=nothing, f=nothing, Tstate=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing) - local desc - tf.with_op_name(name, "ReduceDataset") do - desc = tf.NodeDescription("ReduceDataset") - input_dataset_ = convert(Tensor{Any}, input_dataset_) - initial_state_ = [convert(Tensor{Any}, x) for x = initial_state_] - other_arguments_ = [convert(Tensor{Any}, x) for x = other_arguments_] - tf.add_input(desc, input_dataset_) - tf.add_input(desc, initial_state_) - tf.add_input(desc, other_arguments_) - if f !== nothing - desc["f"] = Base.identity(f) - end - if Tstate !== nothing - desc["Tstate"] = map(Base.identity, Tstate) - end - if Targuments !== nothing - desc["Targuments"] = map(Base.identity, Targuments) - end - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - if use_inter_op_parallelism !== nothing - desc["use_inter_op_parallelism"] = Base.Bool(use_inter_op_parallelism) - end - end - tf.Tensor(tf.Operation(desc)) + function reduce_dataset_graph(input_dataset_, initial_state_, other_arguments_; name=nothing, f=nothing, Tstate=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing) + local desc + tf.with_op_name(name, "ReduceDataset") do + desc = tf.NodeDescription("ReduceDataset") + input_dataset_ = convert(Tensor{Any}, input_dataset_) + initial_state_ = [convert(Tensor{Any}, x) for x = initial_state_] + other_arguments_ = [convert(Tensor{Any}, x) for x = other_arguments_] + tf.add_input(desc, input_dataset_) + tf.add_input(desc, initial_state_) + tf.add_input(desc, other_arguments_) + if f !== nothing + desc["f"] = Base.identity(f) + end + if Tstate !== nothing + desc["Tstate"] = map(Base.identity, Tstate) + end + if Targuments !== nothing + desc["Targuments"] = map(Base.identity, Targuments) + end + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + if use_inter_op_parallelism !== nothing + desc["use_inter_op_parallelism"] = Base.Bool(use_inter_op_parallelism) + end end + tf.Tensor(tf.Operation(desc)) + end function reduce_dataset_eager(input_dataset_, initial_state_, other_arguments_; name=nothing, f=nothing, Tstate=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing) desc = tf.EagerOp("ReduceDataset") input_dataset_ = convert(tf.EagerTensor, input_dataset_) @@ -126,13 +126,13 @@ begin return res[1] end end - function reduce_dataset(input_dataset_, initial_state_, other_arguments_; name=nothing, f=nothing, Tstate=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing) - if tf.in_eager_mode() - reduce_dataset_eager(input_dataset_, initial_state_, other_arguments_; name=name, f=f, Tstate=Tstate, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes, use_inter_op_parallelism=use_inter_op_parallelism) - else - reduce_dataset_graph(input_dataset_, initial_state_, other_arguments_; name=name, f=f, Tstate=Tstate, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes, use_inter_op_parallelism=use_inter_op_parallelism) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function reduce_dataset(input_dataset_, initial_state_, other_arguments_; name=nothing, f=nothing, Tstate=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing) + if tf.in_eager_mode() + reduce_dataset_eager(input_dataset_, initial_state_, other_arguments_; name=name, f=f, Tstate=Tstate, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes, use_inter_op_parallelism=use_inter_op_parallelism) + else + reduce_dataset_graph(input_dataset_, initial_state_, other_arguments_; name=name, f=f, Tstate=Tstate, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes, use_inter_op_parallelism=use_inter_op_parallelism) + end end - end end @@ -142,25 +142,25 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tensor_list_from_tensor_graph(tensor_, element_shape_; name=nothing, element_dtype=nothing, shape_type=nothing) - local desc - tf.with_op_name(name, "TensorListFromTensor") do - desc = tf.NodeDescription("TensorListFromTensor") - tensor_ = convert(Tensor{Any}, tensor_) - element_shape_ = convert(Tensor{Any}, element_shape_) - (tensor_,) = tf.tf_promote(tensor_) - (element_shape_,) = tf.tf_promote(element_shape_) - tf.add_input(desc, tensor_) - tf.add_input(desc, element_shape_) - if element_dtype !== nothing - desc["element_dtype"] = Base.identity(element_dtype) - end - if shape_type !== nothing - desc["shape_type"] = Base.identity(shape_type) - end + function tensor_list_from_tensor_graph(tensor_, element_shape_; name=nothing, element_dtype=nothing, shape_type=nothing) + local desc + tf.with_op_name(name, "TensorListFromTensor") do + desc = tf.NodeDescription("TensorListFromTensor") + tensor_ = convert(Tensor{Any}, tensor_) + element_shape_ = convert(Tensor{Any}, element_shape_) + (tensor_,) = tf.tf_promote(tensor_) + (element_shape_,) = tf.tf_promote(element_shape_) + tf.add_input(desc, tensor_) + tf.add_input(desc, element_shape_) + if element_dtype !== nothing + desc["element_dtype"] = Base.identity(element_dtype) + end + if shape_type !== nothing + desc["shape_type"] = Base.identity(shape_type) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function tensor_list_from_tensor_eager(tensor_, element_shape_; name=nothing, element_dtype=nothing, shape_type=nothing) desc = tf.EagerOp("TensorListFromTensor") tensor_ = convert(tf.EagerTensor, tensor_) @@ -182,13 +182,13 @@ begin return res[1] end end - function tensor_list_from_tensor(tensor_, element_shape_; name=nothing, element_dtype=nothing, shape_type=nothing) - if tf.in_eager_mode() - tensor_list_from_tensor_eager(tensor_, element_shape_; name=name, element_dtype=element_dtype, shape_type=shape_type) - else - tensor_list_from_tensor_graph(tensor_, element_shape_; name=name, element_dtype=element_dtype, shape_type=shape_type) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_list_from_tensor(tensor_, element_shape_; name=nothing, element_dtype=nothing, shape_type=nothing) + if tf.in_eager_mode() + tensor_list_from_tensor_eager(tensor_, element_shape_; name=name, element_dtype=element_dtype, shape_type=shape_type) + else + tensor_list_from_tensor_graph(tensor_, element_shape_; name=name, element_dtype=element_dtype, shape_type=shape_type) + end end - end end @@ -198,18 +198,18 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function extract_jpeg_shape_graph(contents_; name=nothing, output_type=nothing) - local desc - tf.with_op_name(name, "ExtractJpegShape") do - desc = tf.NodeDescription("ExtractJpegShape") - contents_ = convert(Tensor{String}, contents_) - tf.add_input(desc, contents_) - if output_type !== nothing - desc["output_type"] = Base.identity(output_type) - end + function extract_jpeg_shape_graph(contents_; name=nothing, output_type=nothing) + local desc + tf.with_op_name(name, "ExtractJpegShape") do + desc = tf.NodeDescription("ExtractJpegShape") + contents_ = convert(Tensor{String}, contents_) + tf.add_input(desc, contents_) + if output_type !== nothing + desc["output_type"] = Base.identity(output_type) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function extract_jpeg_shape_eager(contents_; name=nothing, output_type=nothing) desc = tf.EagerOp("ExtractJpegShape") contents_ = convert(tf.EagerTensor, contents_) @@ -224,13 +224,13 @@ begin return res[1] end end - function extract_jpeg_shape(contents_; name=nothing, output_type=nothing) - if tf.in_eager_mode() - extract_jpeg_shape_eager(contents_; name=name, output_type=output_type) - else - extract_jpeg_shape_graph(contents_; name=name, output_type=output_type) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function extract_jpeg_shape(contents_; name=nothing, output_type=nothing) + if tf.in_eager_mode() + extract_jpeg_shape_eager(contents_; name=name, output_type=output_type) + else + extract_jpeg_shape_graph(contents_; name=name, output_type=output_type) + end end - end end @@ -240,27 +240,27 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function svd_graph(input_; name=nothing, compute_uv=nothing, full_matrices=nothing) - local desc - tf.with_op_name(name, "Svd") do - desc = tf.NodeDescription("Svd") - input_ = convert(Tensor{Any}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - if compute_uv !== nothing - desc["compute_uv"] = Base.Bool(compute_uv) - end - if full_matrices !== nothing - desc["full_matrices"] = Base.Bool(full_matrices) - end + function svd_graph(input_; name=nothing, compute_uv=nothing, full_matrices=nothing) + local desc + tf.with_op_name(name, "Svd") do + desc = tf.NodeDescription("Svd") + input_ = convert(Tensor{Any}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + if compute_uv !== nothing + desc["compute_uv"] = Base.Bool(compute_uv) end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:3 - push!(out, tf.Tensor(op, out_idx)) + if full_matrices !== nothing + desc["full_matrices"] = Base.Bool(full_matrices) end - out end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end function svd_eager(input_; name=nothing, compute_uv=nothing, full_matrices=nothing) desc = tf.EagerOp("Svd") input_ = convert(tf.EagerTensor, input_) @@ -279,13 +279,13 @@ begin return res end end - function svd(input_; name=nothing, compute_uv=nothing, full_matrices=nothing) - if tf.in_eager_mode() - svd_eager(input_; name=name, compute_uv=compute_uv, full_matrices=full_matrices) - else - svd_graph(input_; name=name, compute_uv=compute_uv, full_matrices=full_matrices) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function svd(input_; name=nothing, compute_uv=nothing, full_matrices=nothing) + if tf.in_eager_mode() + svd_eager(input_; name=name, compute_uv=compute_uv, full_matrices=full_matrices) + else + svd_graph(input_; name=name, compute_uv=compute_uv, full_matrices=full_matrices) + end end - end end @@ -295,21 +295,21 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function iterator_get_next_sync_graph(iterator_; name=nothing, output_types=nothing, output_shapes=nothing) - local desc - tf.with_op_name(name, "IteratorGetNextSync") do - desc = tf.NodeDescription("IteratorGetNextSync") - iterator_ = convert(Tensor{Any}, iterator_) - tf.add_input(desc, iterator_) - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end + function iterator_get_next_sync_graph(iterator_; name=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "IteratorGetNextSync") do + desc = tf.NodeDescription("IteratorGetNextSync") + iterator_ = convert(Tensor{Any}, iterator_) + tf.add_input(desc, iterator_) + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function iterator_get_next_sync_eager(iterator_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("IteratorGetNextSync") iterator_ = convert(tf.EagerTensor, iterator_) @@ -327,13 +327,13 @@ begin return res[1] end end - function iterator_get_next_sync(iterator_; name=nothing, output_types=nothing, output_shapes=nothing) - if tf.in_eager_mode() - iterator_get_next_sync_eager(iterator_; name=name, output_types=output_types, output_shapes=output_shapes) - else - iterator_get_next_sync_graph(iterator_; name=name, output_types=output_types, output_shapes=output_shapes) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function iterator_get_next_sync(iterator_; name=nothing, output_types=nothing, output_shapes=nothing) + if tf.in_eager_mode() + iterator_get_next_sync_eager(iterator_; name=name, output_types=output_types, output_shapes=output_shapes) + else + iterator_get_next_sync_graph(iterator_; name=name, output_types=output_types, output_shapes=output_shapes) + end end - end end @@ -343,25 +343,25 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function ref_enter_graph(data_; name=nothing, frame_name=nothing, is_constant=nothing, parallel_iterations=nothing) - local desc - tf.with_op_name(name, "RefEnter") do - desc = tf.NodeDescription("RefEnter") - data_ = convert(Tensor{Any}, data_) - (data_,) = tf.tf_promote(data_) - tf.add_input(desc, data_) - if frame_name !== nothing - desc["frame_name"] = Base.String(frame_name) - end - if is_constant !== nothing - desc["is_constant"] = Base.Bool(is_constant) - end - if parallel_iterations !== nothing - desc["parallel_iterations"] = Base.Int(parallel_iterations) - end + function ref_enter_graph(data_; name=nothing, frame_name=nothing, is_constant=nothing, parallel_iterations=nothing) + local desc + tf.with_op_name(name, "RefEnter") do + desc = tf.NodeDescription("RefEnter") + data_ = convert(Tensor{Any}, data_) + (data_,) = tf.tf_promote(data_) + tf.add_input(desc, data_) + if frame_name !== nothing + desc["frame_name"] = Base.String(frame_name) + end + if is_constant !== nothing + desc["is_constant"] = Base.Bool(is_constant) + end + if parallel_iterations !== nothing + desc["parallel_iterations"] = Base.Int(parallel_iterations) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function ref_enter_eager(data_; name=nothing, frame_name=nothing, is_constant=nothing, parallel_iterations=nothing) desc = tf.EagerOp("RefEnter") data_ = convert(tf.EagerTensor, data_) @@ -383,13 +383,13 @@ begin return res[1] end end - function ref_enter(data_; name=nothing, frame_name=nothing, is_constant=nothing, parallel_iterations=nothing) - if tf.in_eager_mode() - ref_enter_eager(data_; name=name, frame_name=frame_name, is_constant=is_constant, parallel_iterations=parallel_iterations) - else - ref_enter_graph(data_; name=name, frame_name=frame_name, is_constant=is_constant, parallel_iterations=parallel_iterations) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function ref_enter(data_; name=nothing, frame_name=nothing, is_constant=nothing, parallel_iterations=nothing) + if tf.in_eager_mode() + ref_enter_eager(data_; name=name, frame_name=frame_name, is_constant=is_constant, parallel_iterations=parallel_iterations) + else + ref_enter_graph(data_; name=name, frame_name=frame_name, is_constant=is_constant, parallel_iterations=parallel_iterations) + end end - end end @@ -399,16 +399,16 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function erf_graph(x_; name=nothing) - local desc - tf.with_op_name(name, "Erf") do - desc = tf.NodeDescription("Erf") - x_ = convert(Tensor{Any}, x_) - (x_,) = tf.tf_promote(x_) - tf.add_input(desc, x_) - end - tf.Tensor(tf.Operation(desc)) + function erf_graph(x_; name=nothing) + local desc + tf.with_op_name(name, "Erf") do + desc = tf.NodeDescription("Erf") + x_ = convert(Tensor{Any}, x_) + (x_,) = tf.tf_promote(x_) + tf.add_input(desc, x_) end + tf.Tensor(tf.Operation(desc)) + end function erf_eager(x_; name=nothing) desc = tf.EagerOp("Erf") x_ = convert(tf.EagerTensor, x_) @@ -421,13 +421,13 @@ begin return res[1] end end - function erf(x_; name=nothing) - if tf.in_eager_mode() - erf_eager(x_; name=name) - else - erf_graph(x_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function erf(x_; name=nothing) + if tf.in_eager_mode() + erf_eager(x_; name=name) + else + erf_graph(x_; name=name) + end end - end end @@ -437,20 +437,20 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function lookup_table_export_v2_graph(table_handle_; name=nothing) - local desc - tf.with_op_name(name, "LookupTableExportV2") do - desc = tf.NodeDescription("LookupTableExportV2") - table_handle_ = convert(Tensor{Any}, table_handle_) - tf.add_input(desc, table_handle_) - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:2 - push!(out, tf.Tensor(op, out_idx)) - end - out + function lookup_table_export_v2_graph(table_handle_; name=nothing) + local desc + tf.with_op_name(name, "LookupTableExportV2") do + desc = tf.NodeDescription("LookupTableExportV2") + table_handle_ = convert(Tensor{Any}, table_handle_) + tf.add_input(desc, table_handle_) end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end function lookup_table_export_v2_eager(table_handle_; name=nothing) desc = tf.EagerOp("LookupTableExportV2") table_handle_ = convert(tf.EagerTensor, table_handle_) @@ -462,13 +462,13 @@ begin return res end end - function lookup_table_export_v2(table_handle_; name=nothing) - if tf.in_eager_mode() - lookup_table_export_v2_eager(table_handle_; name=name) - else - lookup_table_export_v2_graph(table_handle_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function lookup_table_export_v2(table_handle_; name=nothing) + if tf.in_eager_mode() + lookup_table_export_v2_eager(table_handle_; name=name) + else + lookup_table_export_v2_graph(table_handle_; name=name) + end end - end end @@ -478,16 +478,16 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function round_graph(x_; name=nothing) - local desc - tf.with_op_name(name, "Round") do - desc = tf.NodeDescription("Round") - x_ = convert(Tensor{Any}, x_) - (x_,) = tf.tf_promote(x_) - tf.add_input(desc, x_) - end - tf.Tensor(tf.Operation(desc)) + function round_graph(x_; name=nothing) + local desc + tf.with_op_name(name, "Round") do + desc = tf.NodeDescription("Round") + x_ = convert(Tensor{Any}, x_) + (x_,) = tf.tf_promote(x_) + tf.add_input(desc, x_) end + tf.Tensor(tf.Operation(desc)) + end function round_eager(x_; name=nothing) desc = tf.EagerOp("Round") x_ = convert(tf.EagerTensor, x_) @@ -500,13 +500,13 @@ begin return res[1] end end - function round(x_; name=nothing) - if tf.in_eager_mode() - round_eager(x_; name=name) - else - round_graph(x_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function round(x_; name=nothing) + if tf.in_eager_mode() + round_eager(x_; name=name) + else + round_graph(x_; name=name) + end end - end end @@ -516,22 +516,22 @@ end Retrieves a single tensor from the computation outfeed. This operation will """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function outfeed_dequeue_graph(; name=nothing, dtype=nothing, shape=nothing, device_ordinal=nothing) - local desc - tf.with_op_name(name, "OutfeedDequeue") do - desc = tf.NodeDescription("OutfeedDequeue") - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end - if shape !== nothing - desc["shape"] = Base.identity(shape) - end - if device_ordinal !== nothing - desc["device_ordinal"] = Base.Int(device_ordinal) - end + function outfeed_dequeue_graph(; name=nothing, dtype=nothing, shape=nothing, device_ordinal=nothing) + local desc + tf.with_op_name(name, "OutfeedDequeue") do + desc = tf.NodeDescription("OutfeedDequeue") + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + if shape !== nothing + desc["shape"] = Base.identity(shape) + end + if device_ordinal !== nothing + desc["device_ordinal"] = Base.Int(device_ordinal) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function outfeed_dequeue_eager(; name=nothing, dtype=nothing, shape=nothing, device_ordinal=nothing) desc = tf.EagerOp("OutfeedDequeue") if dtype !== nothing @@ -550,13 +550,13 @@ begin return res[1] end end - function outfeed_dequeue(; name=nothing, dtype=nothing, shape=nothing, device_ordinal=nothing) - if tf.in_eager_mode() - outfeed_dequeue_eager(; name=name, dtype=dtype, shape=shape, device_ordinal=device_ordinal) - else - outfeed_dequeue_graph(; name=name, dtype=dtype, shape=shape, device_ordinal=device_ordinal) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function outfeed_dequeue(; name=nothing, dtype=nothing, shape=nothing, device_ordinal=nothing) + if tf.in_eager_mode() + outfeed_dequeue_eager(; name=name, dtype=dtype, shape=shape, device_ordinal=device_ordinal) + else + outfeed_dequeue_graph(; name=name, dtype=dtype, shape=shape, device_ordinal=device_ordinal) + end end - end end @@ -566,15 +566,15 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tensor_forest_tree_is_initialized_op_graph(tree_handle_; name=nothing) - local desc - tf.with_op_name(name, "TensorForestTreeIsInitializedOp") do - desc = tf.NodeDescription("TensorForestTreeIsInitializedOp") - tree_handle_ = convert(Tensor{Any}, tree_handle_) - tf.add_input(desc, tree_handle_) - end - tf.Tensor(tf.Operation(desc)) + function tensor_forest_tree_is_initialized_op_graph(tree_handle_; name=nothing) + local desc + tf.with_op_name(name, "TensorForestTreeIsInitializedOp") do + desc = tf.NodeDescription("TensorForestTreeIsInitializedOp") + tree_handle_ = convert(Tensor{Any}, tree_handle_) + tf.add_input(desc, tree_handle_) end + tf.Tensor(tf.Operation(desc)) + end function tensor_forest_tree_is_initialized_op_eager(tree_handle_; name=nothing) desc = tf.EagerOp("TensorForestTreeIsInitializedOp") tree_handle_ = convert(tf.EagerTensor, tree_handle_) @@ -586,13 +586,13 @@ begin return res[1] end end - function tensor_forest_tree_is_initialized_op(tree_handle_; name=nothing) - if tf.in_eager_mode() - tensor_forest_tree_is_initialized_op_eager(tree_handle_; name=name) - else - tensor_forest_tree_is_initialized_op_graph(tree_handle_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_forest_tree_is_initialized_op(tree_handle_; name=nothing) + if tf.in_eager_mode() + tensor_forest_tree_is_initialized_op_eager(tree_handle_; name=name) + else + tensor_forest_tree_is_initialized_op_graph(tree_handle_; name=name) + end end - end end @@ -602,24 +602,24 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function merge_graph(inputs_; name=nothing, N=nothing) - local desc - tf.with_op_name(name, "Merge") do - desc = tf.NodeDescription("Merge") - inputs_ = [convert(Tensor{Any}, x) for x = inputs_] - (inputs_,) = tf.tf_promote(inputs_) - tf.add_input(desc, inputs_) - if N !== nothing - desc["N"] = Base.Int(N) - end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:2 - push!(out, tf.Tensor(op, out_idx)) + function merge_graph(inputs_; name=nothing, N=nothing) + local desc + tf.with_op_name(name, "Merge") do + desc = tf.NodeDescription("Merge") + inputs_ = [convert(Tensor{Any}, x) for x = inputs_] + (inputs_,) = tf.tf_promote(inputs_) + tf.add_input(desc, inputs_) + if N !== nothing + desc["N"] = Base.Int(N) end - out end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end function merge_eager(inputs_; name=nothing, N=nothing) desc = tf.EagerOp("Merge") inputs_ = convert(tf.EagerTensor, inputs_) @@ -635,13 +635,13 @@ begin return res end end - function merge(inputs_; name=nothing, N=nothing) - if tf.in_eager_mode() - merge_eager(inputs_; name=name, N=N) - else - merge_graph(inputs_; name=name, N=N) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function merge(inputs_; name=nothing, N=nothing) + if tf.in_eager_mode() + merge_eager(inputs_; name=name, N=N) + else + merge_graph(inputs_; name=name, N=N) + end end - end end @@ -651,23 +651,23 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function histogram_fixed_width_graph(values_, value_range_, nbins_; name=nothing, dtype=nothing) - local desc - tf.with_op_name(name, "HistogramFixedWidth") do - desc = tf.NodeDescription("HistogramFixedWidth") - values_ = convert(Tensor{Any}, values_) - value_range_ = convert(Tensor{Any}, value_range_) - nbins_ = convert(Tensor{Int32}, nbins_) - (values_, value_range_) = tf.tf_promote(values_, value_range_) - tf.add_input(desc, values_) - tf.add_input(desc, value_range_) - tf.add_input(desc, nbins_) - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end + function histogram_fixed_width_graph(values_, value_range_, nbins_; name=nothing, dtype=nothing) + local desc + tf.with_op_name(name, "HistogramFixedWidth") do + desc = tf.NodeDescription("HistogramFixedWidth") + values_ = convert(Tensor{Any}, values_) + value_range_ = convert(Tensor{Any}, value_range_) + nbins_ = convert(Tensor{Int32}, nbins_) + (values_, value_range_) = tf.tf_promote(values_, value_range_) + tf.add_input(desc, values_) + tf.add_input(desc, value_range_) + tf.add_input(desc, nbins_) + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function histogram_fixed_width_eager(values_, value_range_, nbins_; name=nothing, dtype=nothing) desc = tf.EagerOp("HistogramFixedWidth") values_ = convert(tf.EagerTensor, values_) @@ -688,13 +688,13 @@ begin return res[1] end end - function histogram_fixed_width(values_, value_range_, nbins_; name=nothing, dtype=nothing) - if tf.in_eager_mode() - histogram_fixed_width_eager(values_, value_range_, nbins_; name=name, dtype=dtype) - else - histogram_fixed_width_graph(values_, value_range_, nbins_; name=name, dtype=dtype) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function histogram_fixed_width(values_, value_range_, nbins_; name=nothing, dtype=nothing) + if tf.in_eager_mode() + histogram_fixed_width_eager(values_, value_range_, nbins_; name=name, dtype=dtype) + else + histogram_fixed_width_graph(values_, value_range_, nbins_; name=name, dtype=dtype) + end end - end end @@ -704,16 +704,16 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function asin_graph(x_; name=nothing) - local desc - tf.with_op_name(name, "Asin") do - desc = tf.NodeDescription("Asin") - x_ = convert(Tensor{Any}, x_) - (x_,) = tf.tf_promote(x_) - tf.add_input(desc, x_) - end - tf.Tensor(tf.Operation(desc)) + function asin_graph(x_; name=nothing) + local desc + tf.with_op_name(name, "Asin") do + desc = tf.NodeDescription("Asin") + x_ = convert(Tensor{Any}, x_) + (x_,) = tf.tf_promote(x_) + tf.add_input(desc, x_) end + tf.Tensor(tf.Operation(desc)) + end function asin_eager(x_; name=nothing) desc = tf.EagerOp("Asin") x_ = convert(tf.EagerTensor, x_) @@ -726,13 +726,13 @@ begin return res[1] end end - function asin(x_; name=nothing) - if tf.in_eager_mode() - asin_eager(x_; name=name) - else - asin_graph(x_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function asin(x_; name=nothing) + if tf.in_eager_mode() + asin_eager(x_; name=name) + else + asin_graph(x_; name=name) + end end - end end @@ -742,22 +742,22 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function any_graph(input_, reduction_indices_; name=nothing, keep_dims=nothing) - local desc - tf.with_op_name(name, "Any") do - desc = tf.NodeDescription("Any") - input_ = convert(Tensor{Bool}, input_) - reduction_indices_ = convert(Tensor{Int32}, reduction_indices_) - reduction_indices_ = reduction_indices_ - convert(tf.Tensor{eltype(reduction_indices_)}, 1) - (reduction_indices_,) = tf.tf_promote(reduction_indices_) - tf.add_input(desc, input_) - tf.add_input(desc, reduction_indices_) - if keep_dims !== nothing - desc["keep_dims"] = Base.Bool(keep_dims) - end + function any_graph(input_, reduction_indices_; name=nothing, keep_dims=nothing) + local desc + tf.with_op_name(name, "Any") do + desc = tf.NodeDescription("Any") + input_ = convert(Tensor{Bool}, input_) + reduction_indices_ = convert(Tensor{Int32}, reduction_indices_) + reduction_indices_ = reduction_indices_ - convert(tf.Tensor{eltype(reduction_indices_)}, 1) + (reduction_indices_,) = tf.tf_promote(reduction_indices_) + tf.add_input(desc, input_) + tf.add_input(desc, reduction_indices_) + if keep_dims !== nothing + desc["keep_dims"] = Base.Bool(keep_dims) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function any_eager(input_, reduction_indices_; name=nothing, keep_dims=nothing) desc = tf.EagerOp("Any") input_ = convert(tf.EagerTensor, input_) @@ -775,13 +775,13 @@ begin return res[1] end end - function any(input_, reduction_indices_; name=nothing, keep_dims=nothing) - if tf.in_eager_mode() - any_eager(input_, reduction_indices_; name=name, keep_dims=keep_dims) - else - any_graph(input_, reduction_indices_; name=name, keep_dims=keep_dims) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function any(input_, reduction_indices_; name=nothing, keep_dims=nothing) + if tf.in_eager_mode() + any_eager(input_, reduction_indices_; name=name, keep_dims=keep_dims) + else + any_graph(input_, reduction_indices_; name=name, keep_dims=keep_dims) + end end - end end @@ -791,18 +791,18 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function rsqrt_grad_graph(y_, dy_; name=nothing) - local desc - tf.with_op_name(name, "RsqrtGrad") do - desc = tf.NodeDescription("RsqrtGrad") - y_ = convert(Tensor{Any}, y_) - dy_ = convert(Tensor{Any}, dy_) - (y_, dy_) = tf.tf_promote(y_, dy_) - tf.add_input(desc, y_) - tf.add_input(desc, dy_) - end - tf.Tensor(tf.Operation(desc)) + function rsqrt_grad_graph(y_, dy_; name=nothing) + local desc + tf.with_op_name(name, "RsqrtGrad") do + desc = tf.NodeDescription("RsqrtGrad") + y_ = convert(Tensor{Any}, y_) + dy_ = convert(Tensor{Any}, dy_) + (y_, dy_) = tf.tf_promote(y_, dy_) + tf.add_input(desc, y_) + tf.add_input(desc, dy_) end + tf.Tensor(tf.Operation(desc)) + end function rsqrt_grad_eager(y_, dy_; name=nothing) desc = tf.EagerOp("RsqrtGrad") y_ = convert(tf.EagerTensor, y_) @@ -818,13 +818,13 @@ begin return res[1] end end - function rsqrt_grad(y_, dy_; name=nothing) - if tf.in_eager_mode() - rsqrt_grad_eager(y_, dy_; name=name) - else - rsqrt_grad_graph(y_, dy_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function rsqrt_grad(y_, dy_; name=nothing) + if tf.in_eager_mode() + rsqrt_grad_eager(y_, dy_; name=name) + else + rsqrt_grad_graph(y_, dy_; name=name) + end end - end end @@ -834,22 +834,22 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tensor_array_scatter_graph(handle_, indices_, value_, flow_in_; name=nothing) - local desc - tf.with_op_name(name, "TensorArrayScatter") do - desc = tf.NodeDescription("TensorArrayScatter") - handle_ = convert(Tensor{String}, handle_) - indices_ = convert(Tensor{Int32}, indices_) - value_ = convert(Tensor{Any}, value_) - flow_in_ = convert(Tensor{Float32}, flow_in_) - (value_,) = tf.tf_promote(value_) - tf.add_input(desc, handle_) - tf.add_input(desc, indices_) - tf.add_input(desc, value_) - tf.add_input(desc, flow_in_) - end - tf.Tensor(tf.Operation(desc)) + function tensor_array_scatter_graph(handle_, indices_, value_, flow_in_; name=nothing) + local desc + tf.with_op_name(name, "TensorArrayScatter") do + desc = tf.NodeDescription("TensorArrayScatter") + handle_ = convert(Tensor{String}, handle_) + indices_ = convert(Tensor{Int32}, indices_) + value_ = convert(Tensor{Any}, value_) + flow_in_ = convert(Tensor{Float32}, flow_in_) + (value_,) = tf.tf_promote(value_) + tf.add_input(desc, handle_) + tf.add_input(desc, indices_) + tf.add_input(desc, value_) + tf.add_input(desc, flow_in_) end + tf.Tensor(tf.Operation(desc)) + end function tensor_array_scatter_eager(handle_, indices_, value_, flow_in_; name=nothing) desc = tf.EagerOp("TensorArrayScatter") handle_ = convert(tf.EagerTensor, handle_) @@ -868,13 +868,13 @@ begin return res[1] end end - function tensor_array_scatter(handle_, indices_, value_, flow_in_; name=nothing) - if tf.in_eager_mode() - tensor_array_scatter_eager(handle_, indices_, value_, flow_in_; name=name) - else - tensor_array_scatter_graph(handle_, indices_, value_, flow_in_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_array_scatter(handle_, indices_, value_, flow_in_; name=nothing) + if tf.in_eager_mode() + tensor_array_scatter_eager(handle_, indices_, value_, flow_in_; name=name) + else + tensor_array_scatter_graph(handle_, indices_, value_, flow_in_; name=name) + end end - end end @@ -884,26 +884,26 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function dynamic_partition_graph(data_, partitions_; name=nothing, num_partitions=nothing) - local desc - tf.with_op_name(name, "DynamicPartition") do - desc = tf.NodeDescription("DynamicPartition") - data_ = convert(Tensor{Any}, data_) - partitions_ = convert(Tensor{Int32}, partitions_) - (data_,) = tf.tf_promote(data_) - tf.add_input(desc, data_) - tf.add_input(desc, partitions_) - if num_partitions !== nothing - desc["num_partitions"] = Base.Int(num_partitions) - end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:num_partitions - push!(out, tf.Tensor(op, out_idx)) + function dynamic_partition_graph(data_, partitions_; name=nothing, num_partitions=nothing) + local desc + tf.with_op_name(name, "DynamicPartition") do + desc = tf.NodeDescription("DynamicPartition") + data_ = convert(Tensor{Any}, data_) + partitions_ = convert(Tensor{Int32}, partitions_) + (data_,) = tf.tf_promote(data_) + tf.add_input(desc, data_) + tf.add_input(desc, partitions_) + if num_partitions !== nothing + desc["num_partitions"] = Base.Int(num_partitions) end - out end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:num_partitions + push!(out, tf.Tensor(op, out_idx)) + end + out + end function dynamic_partition_eager(data_, partitions_; name=nothing, num_partitions=nothing) desc = tf.EagerOp("DynamicPartition") data_ = convert(tf.EagerTensor, data_) @@ -921,13 +921,13 @@ begin return res end end - function dynamic_partition(data_, partitions_; name=nothing, num_partitions=nothing) - if tf.in_eager_mode() - dynamic_partition_eager(data_, partitions_; name=name, num_partitions=num_partitions) - else - dynamic_partition_graph(data_, partitions_; name=name, num_partitions=num_partitions) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function dynamic_partition(data_, partitions_; name=nothing, num_partitions=nothing) + if tf.in_eager_mode() + dynamic_partition_eager(data_, partitions_; name=name, num_partitions=num_partitions) + else + dynamic_partition_graph(data_, partitions_; name=name, num_partitions=num_partitions) + end end - end end @@ -937,23 +937,23 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function experimental_private_thread_pool_dataset_graph(input_dataset_, num_threads_; name=nothing, output_types=nothing, output_shapes=nothing) - local desc - tf.with_op_name(name, "ExperimentalPrivateThreadPoolDataset") do - desc = tf.NodeDescription("ExperimentalPrivateThreadPoolDataset") - input_dataset_ = convert(Tensor{Any}, input_dataset_) - num_threads_ = convert(Tensor{Int64}, num_threads_) - tf.add_input(desc, input_dataset_) - tf.add_input(desc, num_threads_) - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end + function experimental_private_thread_pool_dataset_graph(input_dataset_, num_threads_; name=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "ExperimentalPrivateThreadPoolDataset") do + desc = tf.NodeDescription("ExperimentalPrivateThreadPoolDataset") + input_dataset_ = convert(Tensor{Any}, input_dataset_) + num_threads_ = convert(Tensor{Int64}, num_threads_) + tf.add_input(desc, input_dataset_) + tf.add_input(desc, num_threads_) + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function experimental_private_thread_pool_dataset_eager(input_dataset_, num_threads_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("ExperimentalPrivateThreadPoolDataset") input_dataset_ = convert(tf.EagerTensor, input_dataset_) @@ -973,13 +973,13 @@ begin return res[1] end end - function experimental_private_thread_pool_dataset(input_dataset_, num_threads_; name=nothing, output_types=nothing, output_shapes=nothing) - if tf.in_eager_mode() - experimental_private_thread_pool_dataset_eager(input_dataset_, num_threads_; name=name, output_types=output_types, output_shapes=output_shapes) - else - experimental_private_thread_pool_dataset_graph(input_dataset_, num_threads_; name=name, output_types=output_types, output_shapes=output_shapes) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_private_thread_pool_dataset(input_dataset_, num_threads_; name=nothing, output_types=nothing, output_shapes=nothing) + if tf.in_eager_mode() + experimental_private_thread_pool_dataset_eager(input_dataset_, num_threads_; name=name, output_types=output_types, output_shapes=output_shapes) + else + experimental_private_thread_pool_dataset_graph(input_dataset_, num_threads_; name=name, output_types=output_types, output_shapes=output_shapes) + end end - end end @@ -989,15 +989,15 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function reader_serialize_state_graph(reader_handle_; name=nothing) - local desc - tf.with_op_name(name, "ReaderSerializeState") do - desc = tf.NodeDescription("ReaderSerializeState") - reader_handle_ = convert(Tensor{String}, reader_handle_) - tf.add_input(desc, reader_handle_) - end - tf.Tensor(tf.Operation(desc)) + function reader_serialize_state_graph(reader_handle_; name=nothing) + local desc + tf.with_op_name(name, "ReaderSerializeState") do + desc = tf.NodeDescription("ReaderSerializeState") + reader_handle_ = convert(Tensor{String}, reader_handle_) + tf.add_input(desc, reader_handle_) end + tf.Tensor(tf.Operation(desc)) + end function reader_serialize_state_eager(reader_handle_; name=nothing) desc = tf.EagerOp("ReaderSerializeState") reader_handle_ = convert(tf.EagerTensor, reader_handle_) @@ -1009,13 +1009,13 @@ begin return res[1] end end - function reader_serialize_state(reader_handle_; name=nothing) - if tf.in_eager_mode() - reader_serialize_state_eager(reader_handle_; name=name) - else - reader_serialize_state_graph(reader_handle_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function reader_serialize_state(reader_handle_; name=nothing) + if tf.in_eager_mode() + reader_serialize_state_eager(reader_handle_; name=name) + else + reader_serialize_state_graph(reader_handle_; name=name) + end end - end end @@ -1025,18 +1025,18 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function right_shift_graph(x_, y_; name=nothing) - local desc - tf.with_op_name(name, "RightShift") do - desc = tf.NodeDescription("RightShift") - x_ = convert(Tensor{Any}, x_) - y_ = convert(Tensor{Any}, y_) - (x_, y_) = tf.tf_promote(x_, y_) - tf.add_input(desc, x_) - tf.add_input(desc, y_) - end - tf.Tensor(tf.Operation(desc)) + function right_shift_graph(x_, y_; name=nothing) + local desc + tf.with_op_name(name, "RightShift") do + desc = tf.NodeDescription("RightShift") + x_ = convert(Tensor{Any}, x_) + y_ = convert(Tensor{Any}, y_) + (x_, y_) = tf.tf_promote(x_, y_) + tf.add_input(desc, x_) + tf.add_input(desc, y_) end + tf.Tensor(tf.Operation(desc)) + end function right_shift_eager(x_, y_; name=nothing) desc = tf.EagerOp("RightShift") x_ = convert(tf.EagerTensor, x_) @@ -1052,44 +1052,44 @@ begin return res[1] end end - function right_shift(x_, y_; name=nothing) - if tf.in_eager_mode() - right_shift_eager(x_, y_; name=name) - else - right_shift_graph(x_, y_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function right_shift(x_, y_; name=nothing) + if tf.in_eager_mode() + right_shift_eager(x_, y_; name=name) + else + right_shift_graph(x_, y_; name=name) + end end - end end """ - avg_pool3d(input; data_format=NDHWC) + avg_pool3d(input; data_format=) """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function avg_pool3d_graph(input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) - local desc - tf.with_op_name(name, "AvgPool3D") do - desc = tf.NodeDescription("AvgPool3D") - input_ = convert(Tensor{Any}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - if ksize !== nothing - desc["ksize"] = map(Base.identity, ksize) - end - if strides !== nothing - desc["strides"] = map(Base.identity, strides) - end - if padding !== nothing - desc["padding"] = Base.String(padding) - end - if data_format !== nothing - desc["data_format"] = Base.String(data_format) - end + function avg_pool3d_graph(input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + local desc + tf.with_op_name(name, "AvgPool3D") do + desc = tf.NodeDescription("AvgPool3D") + input_ = convert(Tensor{Any}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + if ksize !== nothing + desc["ksize"] = map(Base.identity, ksize) + end + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + if padding !== nothing + desc["padding"] = Base.String(padding) + end + if data_format !== nothing + desc["data_format"] = Base.String(data_format) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function avg_pool3d_eager(input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) desc = tf.EagerOp("AvgPool3D") input_ = convert(tf.EagerTensor, input_) @@ -1114,13 +1114,13 @@ begin return res[1] end end - function avg_pool3d(input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) - if tf.in_eager_mode() - avg_pool3d_eager(input_; name=name, ksize=ksize, strides=strides, padding=padding, data_format=data_format) - else - avg_pool3d_graph(input_; name=name, ksize=ksize, strides=strides, padding=padding, data_format=data_format) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function avg_pool3d(input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + if tf.in_eager_mode() + avg_pool3d_eager(input_; name=name, ksize=ksize, strides=strides, padding=padding, data_format=data_format) + else + avg_pool3d_graph(input_; name=name, ksize=ksize, strides=strides, padding=padding, data_format=data_format) + end end - end end @@ -1130,19 +1130,19 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function encode_png_graph(image_; name=nothing, compression=nothing) - local desc - tf.with_op_name(name, "EncodePng") do - desc = tf.NodeDescription("EncodePng") - image_ = convert(Tensor{UInt8}, image_) - (image_,) = tf.tf_promote(image_) - tf.add_input(desc, image_) - if compression !== nothing - desc["compression"] = Base.Int(compression) - end + function encode_png_graph(image_; name=nothing, compression=nothing) + local desc + tf.with_op_name(name, "EncodePng") do + desc = tf.NodeDescription("EncodePng") + image_ = convert(Tensor{UInt8}, image_) + (image_,) = tf.tf_promote(image_) + tf.add_input(desc, image_) + if compression !== nothing + desc["compression"] = Base.Int(compression) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function encode_png_eager(image_; name=nothing, compression=nothing) desc = tf.EagerOp("EncodePng") image_ = convert(tf.EagerTensor, image_) @@ -1158,13 +1158,13 @@ begin return res[1] end end - function encode_png(image_; name=nothing, compression=nothing) - if tf.in_eager_mode() - encode_png_eager(image_; name=name, compression=compression) - else - encode_png_graph(image_; name=name, compression=compression) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function encode_png(image_; name=nothing, compression=nothing) + if tf.in_eager_mode() + encode_png_eager(image_; name=name, compression=compression) + else + encode_png_graph(image_; name=name, compression=compression) + end end - end end @@ -1174,28 +1174,28 @@ end Debug Identity Op. """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function debug_identity_graph(input_; name=nothing, device_name=nothing, tensor_name=nothing, debug_urls=nothing, gated_grpc=nothing) - local desc - tf.with_op_name(name, "DebugIdentity") do - desc = tf.NodeDescription("DebugIdentity") - input_ = convert(Tensor{Any}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - if device_name !== nothing - desc["device_name"] = Base.String(device_name) - end - if tensor_name !== nothing - desc["tensor_name"] = Base.String(tensor_name) - end - if debug_urls !== nothing - desc["debug_urls"] = map(Base.identity, debug_urls) - end - if gated_grpc !== nothing - desc["gated_grpc"] = Base.Bool(gated_grpc) - end - end - tf.Tensor(tf.Operation(desc)) + function debug_identity_graph(input_; name=nothing, device_name=nothing, tensor_name=nothing, debug_urls=nothing, gated_grpc=nothing) + local desc + tf.with_op_name(name, "DebugIdentity") do + desc = tf.NodeDescription("DebugIdentity") + input_ = convert(Tensor{Any}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + if device_name !== nothing + desc["device_name"] = Base.String(device_name) + end + if tensor_name !== nothing + desc["tensor_name"] = Base.String(tensor_name) + end + if debug_urls !== nothing + desc["debug_urls"] = map(Base.identity, debug_urls) + end + if gated_grpc !== nothing + desc["gated_grpc"] = Base.Bool(gated_grpc) + end end + tf.Tensor(tf.Operation(desc)) + end function debug_identity_eager(input_; name=nothing, device_name=nothing, tensor_name=nothing, debug_urls=nothing, gated_grpc=nothing) desc = tf.EagerOp("DebugIdentity") input_ = convert(tf.EagerTensor, input_) @@ -1220,13 +1220,13 @@ begin return res[1] end end - function debug_identity(input_; name=nothing, device_name=nothing, tensor_name=nothing, debug_urls=nothing, gated_grpc=nothing) - if tf.in_eager_mode() - debug_identity_eager(input_; name=name, device_name=device_name, tensor_name=tensor_name, debug_urls=debug_urls, gated_grpc=gated_grpc) - else - debug_identity_graph(input_; name=name, device_name=device_name, tensor_name=tensor_name, debug_urls=debug_urls, gated_grpc=gated_grpc) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function debug_identity(input_; name=nothing, device_name=nothing, tensor_name=nothing, debug_urls=nothing, gated_grpc=nothing) + if tf.in_eager_mode() + debug_identity_eager(input_; name=name, device_name=device_name, tensor_name=tensor_name, debug_urls=debug_urls, gated_grpc=gated_grpc) + else + debug_identity_graph(input_; name=name, device_name=device_name, tensor_name=tensor_name, debug_urls=debug_urls, gated_grpc=gated_grpc) + end end - end end @@ -1236,16 +1236,16 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function imag_graph(input_; name=nothing) - local desc - tf.with_op_name(name, "Imag") do - desc = tf.NodeDescription("Imag") - input_ = convert(Tensor{Complex{Float32}}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - end - tf.Tensor(tf.Operation(desc)) + function imag_graph(input_; name=nothing) + local desc + tf.with_op_name(name, "Imag") do + desc = tf.NodeDescription("Imag") + input_ = convert(Tensor{Complex{Float32}}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) end + tf.Tensor(tf.Operation(desc)) + end function imag_eager(input_; name=nothing) desc = tf.EagerOp("Imag") input_ = convert(tf.EagerTensor, input_) @@ -1258,13 +1258,13 @@ begin return res[1] end end - function imag(input_; name=nothing) - if tf.in_eager_mode() - imag_eager(input_; name=name) - else - imag_graph(input_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function imag(input_; name=nothing) + if tf.in_eager_mode() + imag_eager(input_; name=name) + else + imag_graph(input_; name=name) + end end - end end @@ -1274,39 +1274,39 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function resource_sparse_apply_ftrl_v2_graph(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=nothing, use_locking=nothing) - local desc - tf.with_op_name(name, "ResourceSparseApplyFtrlV2") do - desc = tf.NodeDescription("ResourceSparseApplyFtrlV2") - var_ = convert(Tensor{Any}, var_) - accum_ = convert(Tensor{Any}, accum_) - linear_ = convert(Tensor{Any}, linear_) - grad_ = convert(Tensor{Any}, grad_) - indices_ = convert(Tensor{Any}, indices_) - indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) - lr_ = convert(Tensor{Any}, lr_) - l1_ = convert(Tensor{Any}, l1_) - l2_ = convert(Tensor{Any}, l2_) - l2_shrinkage_ = convert(Tensor{Any}, l2_shrinkage_) - lr_power_ = convert(Tensor{Any}, lr_power_) - (grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_) = tf.tf_promote(grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_) - (indices_,) = tf.tf_promote(indices_) - tf.add_input(desc, var_) - tf.add_input(desc, accum_) - tf.add_input(desc, linear_) - tf.add_input(desc, grad_) - tf.add_input(desc, indices_) - tf.add_input(desc, lr_) - tf.add_input(desc, l1_) - tf.add_input(desc, l2_) - tf.add_input(desc, l2_shrinkage_) - tf.add_input(desc, lr_power_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - end - tf.Tensor(tf.Operation(desc)) - end + function resource_sparse_apply_ftrl_v2_graph(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "ResourceSparseApplyFtrlV2") do + desc = tf.NodeDescription("ResourceSparseApplyFtrlV2") + var_ = convert(Tensor{Any}, var_) + accum_ = convert(Tensor{Any}, accum_) + linear_ = convert(Tensor{Any}, linear_) + grad_ = convert(Tensor{Any}, grad_) + indices_ = convert(Tensor{Any}, indices_) + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + lr_ = convert(Tensor{Any}, lr_) + l1_ = convert(Tensor{Any}, l1_) + l2_ = convert(Tensor{Any}, l2_) + l2_shrinkage_ = convert(Tensor{Any}, l2_shrinkage_) + lr_power_ = convert(Tensor{Any}, lr_power_) + (grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_) = tf.tf_promote(grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_) + (indices_,) = tf.tf_promote(indices_) + tf.add_input(desc, var_) + tf.add_input(desc, accum_) + tf.add_input(desc, linear_) + tf.add_input(desc, grad_) + tf.add_input(desc, indices_) + tf.add_input(desc, lr_) + tf.add_input(desc, l1_) + tf.add_input(desc, l2_) + tf.add_input(desc, l2_shrinkage_) + tf.add_input(desc, lr_power_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + tf.Tensor(tf.Operation(desc)) + end function resource_sparse_apply_ftrl_v2_eager(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ResourceSparseApplyFtrlV2") var_ = convert(tf.EagerTensor, var_) @@ -1346,13 +1346,13 @@ begin return res[1] end end - function resource_sparse_apply_ftrl_v2(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=nothing, use_locking=nothing) - if tf.in_eager_mode() - resource_sparse_apply_ftrl_v2_eager(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=name, use_locking=use_locking) - else - resource_sparse_apply_ftrl_v2_graph(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=name, use_locking=use_locking) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_sparse_apply_ftrl_v2(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=nothing, use_locking=nothing) + if tf.in_eager_mode() + resource_sparse_apply_ftrl_v2_eager(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=name, use_locking=use_locking) + else + resource_sparse_apply_ftrl_v2_graph(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=name, use_locking=use_locking) + end end - end end @@ -1362,28 +1362,28 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function stage_clear_graph(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) - local desc - tf.with_op_name(name, "StageClear") do - desc = tf.NodeDescription("StageClear") - if capacity !== nothing - desc["capacity"] = Base.Int(capacity) - end - if memory_limit !== nothing - desc["memory_limit"] = Base.Int(memory_limit) - end - if dtypes !== nothing - desc["dtypes"] = map(Base.identity, dtypes) - end - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end + function stage_clear_graph(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "StageClear") do + desc = tf.NodeDescription("StageClear") + if capacity !== nothing + desc["capacity"] = Base.Int(capacity) + end + if memory_limit !== nothing + desc["memory_limit"] = Base.Int(memory_limit) + end + if dtypes !== nothing + desc["dtypes"] = map(Base.identity, dtypes) + end + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function stage_clear_eager(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) desc = tf.EagerOp("StageClear") if capacity !== nothing @@ -1408,13 +1408,13 @@ begin return res[1] end end - function stage_clear(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) - if tf.in_eager_mode() - stage_clear_eager(; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) - else - stage_clear_graph(; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function stage_clear(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + if tf.in_eager_mode() + stage_clear_eager(; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) + else + stage_clear_graph(; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) + end end - end end @@ -1424,16 +1424,16 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function sign_graph(x_; name=nothing) - local desc - tf.with_op_name(name, "Sign") do - desc = tf.NodeDescription("Sign") - x_ = convert(Tensor{Any}, x_) - (x_,) = tf.tf_promote(x_) - tf.add_input(desc, x_) - end - tf.Tensor(tf.Operation(desc)) + function sign_graph(x_; name=nothing) + local desc + tf.with_op_name(name, "Sign") do + desc = tf.NodeDescription("Sign") + x_ = convert(Tensor{Any}, x_) + (x_,) = tf.tf_promote(x_) + tf.add_input(desc, x_) end + tf.Tensor(tf.Operation(desc)) + end function sign_eager(x_; name=nothing) desc = tf.EagerOp("Sign") x_ = convert(tf.EagerTensor, x_) @@ -1446,13 +1446,13 @@ begin return res[1] end end - function sign(x_; name=nothing) - if tf.in_eager_mode() - sign_eager(x_; name=name) - else - sign_graph(x_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sign(x_; name=nothing) + if tf.in_eager_mode() + sign_eager(x_; name=name) + else + sign_graph(x_; name=name) + end end - end end @@ -1462,16 +1462,16 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function population_count_graph(x_; name=nothing) - local desc - tf.with_op_name(name, "PopulationCount") do - desc = tf.NodeDescription("PopulationCount") - x_ = convert(Tensor{Any}, x_) - (x_,) = tf.tf_promote(x_) - tf.add_input(desc, x_) - end - tf.Tensor(tf.Operation(desc)) + function population_count_graph(x_; name=nothing) + local desc + tf.with_op_name(name, "PopulationCount") do + desc = tf.NodeDescription("PopulationCount") + x_ = convert(Tensor{Any}, x_) + (x_,) = tf.tf_promote(x_) + tf.add_input(desc, x_) end + tf.Tensor(tf.Operation(desc)) + end function population_count_eager(x_; name=nothing) desc = tf.EagerOp("PopulationCount") x_ = convert(tf.EagerTensor, x_) @@ -1484,13 +1484,13 @@ begin return res[1] end end - function population_count(x_; name=nothing) - if tf.in_eager_mode() - population_count_eager(x_; name=name) - else - population_count_graph(x_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function population_count(x_; name=nothing) + if tf.in_eager_mode() + population_count_eager(x_; name=name) + else + population_count_graph(x_; name=name) + end end - end end @@ -1500,16 +1500,16 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function neg_graph(x_; name=nothing) - local desc - tf.with_op_name(name, "Neg") do - desc = tf.NodeDescription("Neg") - x_ = convert(Tensor{Any}, x_) - (x_,) = tf.tf_promote(x_) - tf.add_input(desc, x_) - end - tf.Tensor(tf.Operation(desc)) + function neg_graph(x_; name=nothing) + local desc + tf.with_op_name(name, "Neg") do + desc = tf.NodeDescription("Neg") + x_ = convert(Tensor{Any}, x_) + (x_,) = tf.tf_promote(x_) + tf.add_input(desc, x_) end + tf.Tensor(tf.Operation(desc)) + end function neg_eager(x_; name=nothing) desc = tf.EagerOp("Neg") x_ = convert(tf.EagerTensor, x_) @@ -1522,13 +1522,13 @@ begin return res[1] end end - function neg(x_; name=nothing) - if tf.in_eager_mode() - neg_eager(x_; name=name) - else - neg_graph(x_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function neg(x_; name=nothing) + if tf.in_eager_mode() + neg_eager(x_; name=name) + else + neg_graph(x_; name=name) + end end - end end @@ -1538,19 +1538,19 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function anonymous_iterator_graph(; name=nothing, output_types=nothing, output_shapes=nothing) - local desc - tf.with_op_name(name, "AnonymousIterator") do - desc = tf.NodeDescription("AnonymousIterator") - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end + function anonymous_iterator_graph(; name=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "AnonymousIterator") do + desc = tf.NodeDescription("AnonymousIterator") + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function anonymous_iterator_eager(; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("AnonymousIterator") if output_types !== nothing @@ -1566,13 +1566,13 @@ begin return res[1] end end - function anonymous_iterator(; name=nothing, output_types=nothing, output_shapes=nothing) - if tf.in_eager_mode() - anonymous_iterator_eager(; name=name, output_types=output_types, output_shapes=output_shapes) - else - anonymous_iterator_graph(; name=name, output_types=output_types, output_shapes=output_shapes) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function anonymous_iterator(; name=nothing, output_types=nothing, output_shapes=nothing) + if tf.in_eager_mode() + anonymous_iterator_eager(; name=name, output_types=output_types, output_shapes=output_shapes) + else + anonymous_iterator_graph(; name=name, output_types=output_types, output_shapes=output_shapes) + end end - end end @@ -1582,25 +1582,25 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function sparse_reduce_sum_graph(input_indices_, input_values_, input_shape_, reduction_axes_; name=nothing, keep_dims=nothing) - local desc - tf.with_op_name(name, "SparseReduceSum") do - desc = tf.NodeDescription("SparseReduceSum") - input_indices_ = convert(Tensor{Int64}, input_indices_) - input_values_ = convert(Tensor{Any}, input_values_) - input_shape_ = convert(Tensor{Int64}, input_shape_) - reduction_axes_ = convert(Tensor{Int32}, reduction_axes_) - (input_values_,) = tf.tf_promote(input_values_) - tf.add_input(desc, input_indices_) - tf.add_input(desc, input_values_) - tf.add_input(desc, input_shape_) - tf.add_input(desc, reduction_axes_) - if keep_dims !== nothing - desc["keep_dims"] = Base.Bool(keep_dims) - end + function sparse_reduce_sum_graph(input_indices_, input_values_, input_shape_, reduction_axes_; name=nothing, keep_dims=nothing) + local desc + tf.with_op_name(name, "SparseReduceSum") do + desc = tf.NodeDescription("SparseReduceSum") + input_indices_ = convert(Tensor{Int64}, input_indices_) + input_values_ = convert(Tensor{Any}, input_values_) + input_shape_ = convert(Tensor{Int64}, input_shape_) + reduction_axes_ = convert(Tensor{Int32}, reduction_axes_) + (input_values_,) = tf.tf_promote(input_values_) + tf.add_input(desc, input_indices_) + tf.add_input(desc, input_values_) + tf.add_input(desc, input_shape_) + tf.add_input(desc, reduction_axes_) + if keep_dims !== nothing + desc["keep_dims"] = Base.Bool(keep_dims) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function sparse_reduce_sum_eager(input_indices_, input_values_, input_shape_, reduction_axes_; name=nothing, keep_dims=nothing) desc = tf.EagerOp("SparseReduceSum") input_indices_ = convert(tf.EagerTensor, input_indices_) @@ -1622,34 +1622,34 @@ begin return res[1] end end - function sparse_reduce_sum(input_indices_, input_values_, input_shape_, reduction_axes_; name=nothing, keep_dims=nothing) - if tf.in_eager_mode() - sparse_reduce_sum_eager(input_indices_, input_values_, input_shape_, reduction_axes_; name=name, keep_dims=keep_dims) - else - sparse_reduce_sum_graph(input_indices_, input_values_, input_shape_, reduction_axes_; name=name, keep_dims=keep_dims) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_reduce_sum(input_indices_, input_values_, input_shape_, reduction_axes_; name=nothing, keep_dims=nothing) + if tf.in_eager_mode() + sparse_reduce_sum_eager(input_indices_, input_values_, input_shape_, reduction_axes_; name=name, keep_dims=keep_dims) + else + sparse_reduce_sum_graph(input_indices_, input_values_, input_shape_, reduction_axes_; name=name, keep_dims=keep_dims) + end end - end end """ - string_length(input; unit=BYTE) + string_length(input; unit=) """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function string_length_graph(input_; name=nothing, unit=nothing) - local desc - tf.with_op_name(name, "StringLength") do - desc = tf.NodeDescription("StringLength") - input_ = convert(Tensor{String}, input_) - tf.add_input(desc, input_) - if unit !== nothing - desc["unit"] = Base.String(unit) - end + function string_length_graph(input_; name=nothing, unit=nothing) + local desc + tf.with_op_name(name, "StringLength") do + desc = tf.NodeDescription("StringLength") + input_ = convert(Tensor{String}, input_) + tf.add_input(desc, input_) + if unit !== nothing + desc["unit"] = Base.String(unit) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function string_length_eager(input_; name=nothing, unit=nothing) desc = tf.EagerOp("StringLength") input_ = convert(tf.EagerTensor, input_) @@ -1664,13 +1664,13 @@ begin return res[1] end end - function string_length(input_; name=nothing, unit=nothing) - if tf.in_eager_mode() - string_length_eager(input_; name=name, unit=unit) - else - string_length_graph(input_; name=name, unit=unit) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function string_length(input_; name=nothing, unit=nothing) + if tf.in_eager_mode() + string_length_eager(input_; name=name, unit=unit) + else + string_length_graph(input_; name=name, unit=unit) + end end - end end @@ -1680,29 +1680,29 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function filter_dataset_graph(input_dataset_, other_arguments_; name=nothing, predicate=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) - local desc - tf.with_op_name(name, "FilterDataset") do - desc = tf.NodeDescription("FilterDataset") - input_dataset_ = convert(Tensor{Any}, input_dataset_) - other_arguments_ = [convert(Tensor{Any}, x) for x = other_arguments_] - tf.add_input(desc, input_dataset_) - tf.add_input(desc, other_arguments_) - if predicate !== nothing - desc["predicate"] = Base.identity(predicate) - end - if Targuments !== nothing - desc["Targuments"] = map(Base.identity, Targuments) - end - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end + function filter_dataset_graph(input_dataset_, other_arguments_; name=nothing, predicate=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "FilterDataset") do + desc = tf.NodeDescription("FilterDataset") + input_dataset_ = convert(Tensor{Any}, input_dataset_) + other_arguments_ = [convert(Tensor{Any}, x) for x = other_arguments_] + tf.add_input(desc, input_dataset_) + tf.add_input(desc, other_arguments_) + if predicate !== nothing + desc["predicate"] = Base.identity(predicate) + end + if Targuments !== nothing + desc["Targuments"] = map(Base.identity, Targuments) + end + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function filter_dataset_eager(input_dataset_, other_arguments_; name=nothing, predicate=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("FilterDataset") input_dataset_ = convert(tf.EagerTensor, input_dataset_) @@ -1728,46 +1728,46 @@ begin return res[1] end end - function filter_dataset(input_dataset_, other_arguments_; name=nothing, predicate=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) - if tf.in_eager_mode() - filter_dataset_eager(input_dataset_, other_arguments_; name=name, predicate=predicate, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes) - else - filter_dataset_graph(input_dataset_, other_arguments_; name=name, predicate=predicate, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function filter_dataset(input_dataset_, other_arguments_; name=nothing, predicate=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) + if tf.in_eager_mode() + filter_dataset_eager(input_dataset_, other_arguments_; name=name, predicate=predicate, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes) + else + filter_dataset_graph(input_dataset_, other_arguments_; name=name, predicate=predicate, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes) + end end - end end """ - conv3d(input, filter; data_format=NDHWC, dilations=[1, 1, 1, 1, 1]) + conv3d(input, filter; data_format=, dilations=[1, 1, 1, 1, 1]) """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function conv3d_graph(input_, filter_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) - local desc - tf.with_op_name(name, "Conv3D") do - desc = tf.NodeDescription("Conv3D") - input_ = convert(Tensor{Any}, input_) - filter_ = convert(Tensor{Any}, filter_) - (input_, filter_) = tf.tf_promote(input_, filter_) - tf.add_input(desc, input_) - tf.add_input(desc, filter_) - if strides !== nothing - desc["strides"] = map(Base.identity, strides) - end - if padding !== nothing - desc["padding"] = Base.String(padding) - end - if data_format !== nothing - desc["data_format"] = Base.String(data_format) - end - if dilations !== nothing - desc["dilations"] = map(Base.identity, dilations) - end + function conv3d_graph(input_, filter_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) + local desc + tf.with_op_name(name, "Conv3D") do + desc = tf.NodeDescription("Conv3D") + input_ = convert(Tensor{Any}, input_) + filter_ = convert(Tensor{Any}, filter_) + (input_, filter_) = tf.tf_promote(input_, filter_) + tf.add_input(desc, input_) + tf.add_input(desc, filter_) + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + if padding !== nothing + desc["padding"] = Base.String(padding) + end + if data_format !== nothing + desc["data_format"] = Base.String(data_format) + end + if dilations !== nothing + desc["dilations"] = map(Base.identity, dilations) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function conv3d_eager(input_, filter_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) desc = tf.EagerOp("Conv3D") input_ = convert(tf.EagerTensor, input_) @@ -1795,13 +1795,13 @@ begin return res[1] end end - function conv3d(input_, filter_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) - if tf.in_eager_mode() - conv3d_eager(input_, filter_; name=name, strides=strides, padding=padding, data_format=data_format, dilations=dilations) - else - conv3d_graph(input_, filter_; name=name, strides=strides, padding=padding, data_format=data_format, dilations=dilations) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function conv3d(input_, filter_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) + if tf.in_eager_mode() + conv3d_eager(input_, filter_; name=name, strides=strides, padding=padding, data_format=data_format, dilations=dilations) + else + conv3d_graph(input_, filter_; name=name, strides=strides, padding=padding, data_format=data_format, dilations=dilations) + end end - end end @@ -1811,30 +1811,30 @@ end Retrieve embedding parameters for a single table. """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function retrieve_tpu_embedding_adagrad_parameters_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - local desc - tf.with_op_name(name, "RetrieveTPUEmbeddingAdagradParameters") do - desc = tf.NodeDescription("RetrieveTPUEmbeddingAdagradParameters") - if table_id !== nothing - desc["table_id"] = Base.Int(table_id) - end - if table_name !== nothing - desc["table_name"] = Base.String(table_name) - end - if num_shards !== nothing - desc["num_shards"] = Base.Int(num_shards) - end - if shard_id !== nothing - desc["shard_id"] = Base.Int(shard_id) - end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:2 - push!(out, tf.Tensor(op, out_idx)) - end - out + function retrieve_tpu_embedding_adagrad_parameters_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + local desc + tf.with_op_name(name, "RetrieveTPUEmbeddingAdagradParameters") do + desc = tf.NodeDescription("RetrieveTPUEmbeddingAdagradParameters") + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end function retrieve_tpu_embedding_adagrad_parameters_eager(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) desc = tf.EagerOp("RetrieveTPUEmbeddingAdagradParameters") if table_id !== nothing @@ -1856,13 +1856,13 @@ begin return res end end - function retrieve_tpu_embedding_adagrad_parameters(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - if tf.in_eager_mode() - retrieve_tpu_embedding_adagrad_parameters_eager(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) - else - retrieve_tpu_embedding_adagrad_parameters_graph(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function retrieve_tpu_embedding_adagrad_parameters(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + if tf.in_eager_mode() + retrieve_tpu_embedding_adagrad_parameters_eager(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + else + retrieve_tpu_embedding_adagrad_parameters_graph(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + end end - end end @@ -1872,15 +1872,15 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function optional_has_value_graph(optional_; name=nothing) - local desc - tf.with_op_name(name, "OptionalHasValue") do - desc = tf.NodeDescription("OptionalHasValue") - optional_ = convert(Tensor{Any}, optional_) - tf.add_input(desc, optional_) - end - tf.Tensor(tf.Operation(desc)) + function optional_has_value_graph(optional_; name=nothing) + local desc + tf.with_op_name(name, "OptionalHasValue") do + desc = tf.NodeDescription("OptionalHasValue") + optional_ = convert(Tensor{Any}, optional_) + tf.add_input(desc, optional_) end + tf.Tensor(tf.Operation(desc)) + end function optional_has_value_eager(optional_; name=nothing) desc = tf.EagerOp("OptionalHasValue") optional_ = convert(tf.EagerTensor, optional_) @@ -1892,13 +1892,13 @@ begin return res[1] end end - function optional_has_value(optional_; name=nothing) - if tf.in_eager_mode() - optional_has_value_eager(optional_; name=name) - else - optional_has_value_graph(optional_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function optional_has_value(optional_; name=nothing) + if tf.in_eager_mode() + optional_has_value_eager(optional_; name=name) + else + optional_has_value_graph(optional_; name=name) + end end - end end @@ -1908,40 +1908,40 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function apply_adam_graph(var_, m_, v_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing, use_nesterov=nothing) - local desc - tf.with_op_name(name, "ApplyAdam") do - desc = tf.NodeDescription("ApplyAdam") - var_ = convert(Tensor{Any}, var_) - m_ = convert(Tensor{Any}, m_) - v_ = convert(Tensor{Any}, v_) - beta1_power_ = convert(Tensor{Any}, beta1_power_) - beta2_power_ = convert(Tensor{Any}, beta2_power_) - lr_ = convert(Tensor{Any}, lr_) - beta1_ = convert(Tensor{Any}, beta1_) - beta2_ = convert(Tensor{Any}, beta2_) - epsilon_ = convert(Tensor{Any}, epsilon_) - grad_ = convert(Tensor{Any}, grad_) - (var_, m_, v_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_) = tf.tf_promote(var_, m_, v_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_) - tf.add_input(desc, var_) - tf.add_input(desc, m_) - tf.add_input(desc, v_) - tf.add_input(desc, beta1_power_) - tf.add_input(desc, beta2_power_) - tf.add_input(desc, lr_) - tf.add_input(desc, beta1_) - tf.add_input(desc, beta2_) - tf.add_input(desc, epsilon_) - tf.add_input(desc, grad_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - if use_nesterov !== nothing - desc["use_nesterov"] = Base.Bool(use_nesterov) - end - end - tf.Tensor(tf.Operation(desc)) - end + function apply_adam_graph(var_, m_, v_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing, use_nesterov=nothing) + local desc + tf.with_op_name(name, "ApplyAdam") do + desc = tf.NodeDescription("ApplyAdam") + var_ = convert(Tensor{Any}, var_) + m_ = convert(Tensor{Any}, m_) + v_ = convert(Tensor{Any}, v_) + beta1_power_ = convert(Tensor{Any}, beta1_power_) + beta2_power_ = convert(Tensor{Any}, beta2_power_) + lr_ = convert(Tensor{Any}, lr_) + beta1_ = convert(Tensor{Any}, beta1_) + beta2_ = convert(Tensor{Any}, beta2_) + epsilon_ = convert(Tensor{Any}, epsilon_) + grad_ = convert(Tensor{Any}, grad_) + (var_, m_, v_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_) = tf.tf_promote(var_, m_, v_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_) + tf.add_input(desc, var_) + tf.add_input(desc, m_) + tf.add_input(desc, v_) + tf.add_input(desc, beta1_power_) + tf.add_input(desc, beta2_power_) + tf.add_input(desc, lr_) + tf.add_input(desc, beta1_) + tf.add_input(desc, beta2_) + tf.add_input(desc, epsilon_) + tf.add_input(desc, grad_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + if use_nesterov !== nothing + desc["use_nesterov"] = Base.Bool(use_nesterov) + end + end + tf.Tensor(tf.Operation(desc)) + end function apply_adam_eager(var_, m_, v_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing, use_nesterov=nothing) desc = tf.EagerOp("ApplyAdam") var_ = convert(tf.EagerTensor, var_) @@ -1987,64 +1987,64 @@ begin return res[1] end end - function apply_adam(var_, m_, v_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing, use_nesterov=nothing) - if tf.in_eager_mode() - apply_adam_eager(var_, m_, v_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=name, use_locking=use_locking, use_nesterov=use_nesterov) - else - apply_adam_graph(var_, m_, v_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=name, use_locking=use_locking, use_nesterov=use_nesterov) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function apply_adam(var_, m_, v_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing, use_nesterov=nothing) + if tf.in_eager_mode() + apply_adam_eager(var_, m_, v_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=name, use_locking=use_locking, use_nesterov=use_nesterov) + else + apply_adam_graph(var_, m_, v_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=name, use_locking=use_locking, use_nesterov=use_nesterov) + end end - end end """ - cudnn_rnn_params_to_canonical(num_layers, num_units, input_size, params; rnn_mode=lstm, input_mode=linear_input, direction=unidirectional, dropout=?, seed=0, seed2=0) + cudnn_rnn_params_to_canonical(num_layers, num_units, input_size, params; rnn_mode=, input_mode=, direction=, dropout=?, seed=0, seed2=0) """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function cudnn_rnn_params_to_canonical_graph(num_layers_, num_units_, input_size_, params_; name=nothing, num_params=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) - local desc - tf.with_op_name(name, "CudnnRNNParamsToCanonical") do - desc = tf.NodeDescription("CudnnRNNParamsToCanonical") - num_layers_ = convert(Tensor{Int32}, num_layers_) - num_units_ = convert(Tensor{Int32}, num_units_) - input_size_ = convert(Tensor{Int32}, input_size_) - params_ = convert(Tensor{Any}, params_) - (params_,) = tf.tf_promote(params_) - tf.add_input(desc, num_layers_) - tf.add_input(desc, num_units_) - tf.add_input(desc, input_size_) - tf.add_input(desc, params_) - if num_params !== nothing - desc["num_params"] = Base.Int(num_params) - end - if rnn_mode !== nothing - desc["rnn_mode"] = Base.String(rnn_mode) - end - if input_mode !== nothing - desc["input_mode"] = Base.String(input_mode) - end - if direction !== nothing - desc["direction"] = Base.String(direction) - end - if dropout !== nothing - desc["dropout"] = Base.identity(dropout) - end - if seed !== nothing - desc["seed"] = Base.Int(seed) - end - if seed2 !== nothing - desc["seed2"] = Base.Int(seed2) - end + function cudnn_rnn_params_to_canonical_graph(num_layers_, num_units_, input_size_, params_; name=nothing, num_params=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) + local desc + tf.with_op_name(name, "CudnnRNNParamsToCanonical") do + desc = tf.NodeDescription("CudnnRNNParamsToCanonical") + num_layers_ = convert(Tensor{Int32}, num_layers_) + num_units_ = convert(Tensor{Int32}, num_units_) + input_size_ = convert(Tensor{Int32}, input_size_) + params_ = convert(Tensor{Any}, params_) + (params_,) = tf.tf_promote(params_) + tf.add_input(desc, num_layers_) + tf.add_input(desc, num_units_) + tf.add_input(desc, input_size_) + tf.add_input(desc, params_) + if num_params !== nothing + desc["num_params"] = Base.Int(num_params) + end + if rnn_mode !== nothing + desc["rnn_mode"] = Base.String(rnn_mode) + end + if input_mode !== nothing + desc["input_mode"] = Base.String(input_mode) end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:2 - push!(out, tf.Tensor(op, out_idx)) + if direction !== nothing + desc["direction"] = Base.String(direction) end - out + if dropout !== nothing + desc["dropout"] = Base.identity(dropout) + end + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) end + out + end function cudnn_rnn_params_to_canonical_eager(num_layers_, num_units_, input_size_, params_; name=nothing, num_params=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) desc = tf.EagerOp("CudnnRNNParamsToCanonical") num_layers_ = convert(tf.EagerTensor, num_layers_) @@ -2084,13 +2084,13 @@ begin return res end end - function cudnn_rnn_params_to_canonical(num_layers_, num_units_, input_size_, params_; name=nothing, num_params=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) - if tf.in_eager_mode() - cudnn_rnn_params_to_canonical_eager(num_layers_, num_units_, input_size_, params_; name=name, num_params=num_params, rnn_mode=rnn_mode, input_mode=input_mode, direction=direction, dropout=dropout, seed=seed, seed2=seed2) - else - cudnn_rnn_params_to_canonical_graph(num_layers_, num_units_, input_size_, params_; name=name, num_params=num_params, rnn_mode=rnn_mode, input_mode=input_mode, direction=direction, dropout=dropout, seed=seed, seed2=seed2) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function cudnn_rnn_params_to_canonical(num_layers_, num_units_, input_size_, params_; name=nothing, num_params=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) + if tf.in_eager_mode() + cudnn_rnn_params_to_canonical_eager(num_layers_, num_units_, input_size_, params_; name=name, num_params=num_params, rnn_mode=rnn_mode, input_mode=input_mode, direction=direction, dropout=dropout, seed=seed, seed2=seed2) + else + cudnn_rnn_params_to_canonical_graph(num_layers_, num_units_, input_size_, params_; name=name, num_params=num_params, rnn_mode=rnn_mode, input_mode=input_mode, direction=direction, dropout=dropout, seed=seed, seed2=seed2) + end end - end end @@ -2100,17 +2100,17 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function irfft3d_graph(input_, fft_length_; name=nothing) - local desc - tf.with_op_name(name, "IRFFT3D") do - desc = tf.NodeDescription("IRFFT3D") - input_ = convert(Tensor{Complex{Float32}}, input_) - fft_length_ = convert(Tensor{Int32}, fft_length_) - tf.add_input(desc, input_) - tf.add_input(desc, fft_length_) - end - tf.Tensor(tf.Operation(desc)) + function irfft3d_graph(input_, fft_length_; name=nothing) + local desc + tf.with_op_name(name, "IRFFT3D") do + desc = tf.NodeDescription("IRFFT3D") + input_ = convert(Tensor{Complex{Float32}}, input_) + fft_length_ = convert(Tensor{Int32}, fft_length_) + tf.add_input(desc, input_) + tf.add_input(desc, fft_length_) end + tf.Tensor(tf.Operation(desc)) + end function irfft3d_eager(input_, fft_length_; name=nothing) desc = tf.EagerOp("IRFFT3D") input_ = convert(tf.EagerTensor, input_) @@ -2124,13 +2124,13 @@ begin return res[1] end end - function irfft3d(input_, fft_length_; name=nothing) - if tf.in_eager_mode() - irfft3d_eager(input_, fft_length_; name=name) - else - irfft3d_graph(input_, fft_length_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function irfft3d(input_, fft_length_; name=nothing) + if tf.in_eager_mode() + irfft3d_eager(input_, fft_length_; name=name) + else + irfft3d_graph(input_, fft_length_; name=name) + end end - end end @@ -2140,16 +2140,16 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function angle_graph(input_; name=nothing) - local desc - tf.with_op_name(name, "Angle") do - desc = tf.NodeDescription("Angle") - input_ = convert(Tensor{Complex{Float32}}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - end - tf.Tensor(tf.Operation(desc)) + function angle_graph(input_; name=nothing) + local desc + tf.with_op_name(name, "Angle") do + desc = tf.NodeDescription("Angle") + input_ = convert(Tensor{Complex{Float32}}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) end + tf.Tensor(tf.Operation(desc)) + end function angle_eager(input_; name=nothing) desc = tf.EagerOp("Angle") input_ = convert(tf.EagerTensor, input_) @@ -2162,13 +2162,13 @@ begin return res[1] end end - function angle(input_; name=nothing) - if tf.in_eager_mode() - angle_eager(input_; name=name) - else - angle_graph(input_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function angle(input_; name=nothing) + if tf.in_eager_mode() + angle_eager(input_; name=name) + else + angle_graph(input_; name=name) + end end - end end @@ -2178,19 +2178,19 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tensor_forest_tree_resource_handle_op_graph(; name=nothing, container=nothing, shared_name=nothing) - local desc - tf.with_op_name(name, "TensorForestTreeResourceHandleOp") do - desc = tf.NodeDescription("TensorForestTreeResourceHandleOp") - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end + function tensor_forest_tree_resource_handle_op_graph(; name=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "TensorForestTreeResourceHandleOp") do + desc = tf.NodeDescription("TensorForestTreeResourceHandleOp") + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function tensor_forest_tree_resource_handle_op_eager(; name=nothing, container=nothing, shared_name=nothing) desc = tf.EagerOp("TensorForestTreeResourceHandleOp") if container !== nothing @@ -2206,13 +2206,13 @@ begin return res[1] end end - function tensor_forest_tree_resource_handle_op(; name=nothing, container=nothing, shared_name=nothing) - if tf.in_eager_mode() - tensor_forest_tree_resource_handle_op_eager(; name=name, container=container, shared_name=shared_name) - else - tensor_forest_tree_resource_handle_op_graph(; name=name, container=container, shared_name=shared_name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_forest_tree_resource_handle_op(; name=nothing, container=nothing, shared_name=nothing) + if tf.in_eager_mode() + tensor_forest_tree_resource_handle_op_eager(; name=name, container=container, shared_name=shared_name) + else + tensor_forest_tree_resource_handle_op_graph(; name=name, container=container, shared_name=shared_name) + end end - end end @@ -2222,38 +2222,38 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function learned_unigram_candidate_sampler_graph(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing) - local desc - tf.with_op_name(name, "LearnedUnigramCandidateSampler") do - desc = tf.NodeDescription("LearnedUnigramCandidateSampler") - true_classes_ = convert(Tensor{Int64}, true_classes_) - tf.add_input(desc, true_classes_) - if num_true !== nothing - desc["num_true"] = Base.Int(num_true) - end - if num_sampled !== nothing - desc["num_sampled"] = Base.Int(num_sampled) - end - if unique !== nothing - desc["unique"] = Base.Bool(unique) - end - if range_max !== nothing - desc["range_max"] = Base.Int(range_max) - end - if seed !== nothing - desc["seed"] = Base.Int(seed) - end - if seed2 !== nothing - desc["seed2"] = Base.Int(seed2) - end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:3 - push!(out, tf.Tensor(op, out_idx)) - end - out + function learned_unigram_candidate_sampler_graph(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing) + local desc + tf.with_op_name(name, "LearnedUnigramCandidateSampler") do + desc = tf.NodeDescription("LearnedUnigramCandidateSampler") + true_classes_ = convert(Tensor{Int64}, true_classes_) + tf.add_input(desc, true_classes_) + if num_true !== nothing + desc["num_true"] = Base.Int(num_true) + end + if num_sampled !== nothing + desc["num_sampled"] = Base.Int(num_sampled) + end + if unique !== nothing + desc["unique"] = Base.Bool(unique) + end + if range_max !== nothing + desc["range_max"] = Base.Int(range_max) + end + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end function learned_unigram_candidate_sampler_eager(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing) desc = tf.EagerOp("LearnedUnigramCandidateSampler") true_classes_ = convert(tf.EagerTensor, true_classes_) @@ -2283,13 +2283,13 @@ begin return res end end - function learned_unigram_candidate_sampler(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing) - if tf.in_eager_mode() - learned_unigram_candidate_sampler_eager(true_classes_; name=name, num_true=num_true, num_sampled=num_sampled, unique=unique, range_max=range_max, seed=seed, seed2=seed2) - else - learned_unigram_candidate_sampler_graph(true_classes_; name=name, num_true=num_true, num_sampled=num_sampled, unique=unique, range_max=range_max, seed=seed, seed2=seed2) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function learned_unigram_candidate_sampler(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing) + if tf.in_eager_mode() + learned_unigram_candidate_sampler_eager(true_classes_; name=name, num_true=num_true, num_sampled=num_sampled, unique=unique, range_max=range_max, seed=seed, seed2=seed2) + else + learned_unigram_candidate_sampler_graph(true_classes_; name=name, num_true=num_true, num_sampled=num_sampled, unique=unique, range_max=range_max, seed=seed, seed2=seed2) + end end - end end @@ -2299,16 +2299,16 @@ end A graph node which represents an argument to a function. """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function _arg_graph(; name=nothing, index=nothing) - local desc - tf.with_op_name(name, "_Arg") do - desc = tf.NodeDescription("_Arg") - if index !== nothing - desc["index"] = Base.Int(index) - end + function _arg_graph(; name=nothing, index=nothing) + local desc + tf.with_op_name(name, "_Arg") do + desc = tf.NodeDescription("_Arg") + if index !== nothing + desc["index"] = Base.Int(index) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function _arg_eager(; name=nothing, index=nothing) desc = tf.EagerOp("_Arg") if index !== nothing @@ -2321,13 +2321,13 @@ begin return res[1] end end - function _arg(; name=nothing, index=nothing) - if tf.in_eager_mode() - _arg_eager(; name=name, index=index) - else - _arg_graph(; name=name, index=index) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _arg(; name=nothing, index=nothing) + if tf.in_eager_mode() + _arg_eager(; name=name, index=index) + else + _arg_graph(; name=name, index=index) + end end - end end @@ -2337,16 +2337,16 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function matrix_square_root_graph(input_; name=nothing) - local desc - tf.with_op_name(name, "MatrixSquareRoot") do - desc = tf.NodeDescription("MatrixSquareRoot") - input_ = convert(Tensor{Any}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - end - tf.Tensor(tf.Operation(desc)) + function matrix_square_root_graph(input_; name=nothing) + local desc + tf.with_op_name(name, "MatrixSquareRoot") do + desc = tf.NodeDescription("MatrixSquareRoot") + input_ = convert(Tensor{Any}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) end + tf.Tensor(tf.Operation(desc)) + end function matrix_square_root_eager(input_; name=nothing) desc = tf.EagerOp("MatrixSquareRoot") input_ = convert(tf.EagerTensor, input_) @@ -2359,13 +2359,13 @@ begin return res[1] end end - function matrix_square_root(input_; name=nothing) - if tf.in_eager_mode() - matrix_square_root_eager(input_; name=name) - else - matrix_square_root_graph(input_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function matrix_square_root(input_; name=nothing) + if tf.in_eager_mode() + matrix_square_root_eager(input_; name=name) + else + matrix_square_root_graph(input_; name=name) + end end - end end @@ -2375,22 +2375,22 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function sparse_dense_cwise_mul_graph(sp_indices_, sp_values_, sp_shape_, dense_; name=nothing) - local desc - tf.with_op_name(name, "SparseDenseCwiseMul") do - desc = tf.NodeDescription("SparseDenseCwiseMul") - sp_indices_ = convert(Tensor{Int64}, sp_indices_) - sp_values_ = convert(Tensor{Any}, sp_values_) - sp_shape_ = convert(Tensor{Int64}, sp_shape_) - dense_ = convert(Tensor{Any}, dense_) - (sp_values_, dense_) = tf.tf_promote(sp_values_, dense_) - tf.add_input(desc, sp_indices_) - tf.add_input(desc, sp_values_) - tf.add_input(desc, sp_shape_) - tf.add_input(desc, dense_) - end - tf.Tensor(tf.Operation(desc)) + function sparse_dense_cwise_mul_graph(sp_indices_, sp_values_, sp_shape_, dense_; name=nothing) + local desc + tf.with_op_name(name, "SparseDenseCwiseMul") do + desc = tf.NodeDescription("SparseDenseCwiseMul") + sp_indices_ = convert(Tensor{Int64}, sp_indices_) + sp_values_ = convert(Tensor{Any}, sp_values_) + sp_shape_ = convert(Tensor{Int64}, sp_shape_) + dense_ = convert(Tensor{Any}, dense_) + (sp_values_, dense_) = tf.tf_promote(sp_values_, dense_) + tf.add_input(desc, sp_indices_) + tf.add_input(desc, sp_values_) + tf.add_input(desc, sp_shape_) + tf.add_input(desc, dense_) end + tf.Tensor(tf.Operation(desc)) + end function sparse_dense_cwise_mul_eager(sp_indices_, sp_values_, sp_shape_, dense_; name=nothing) desc = tf.EagerOp("SparseDenseCwiseMul") sp_indices_ = convert(tf.EagerTensor, sp_indices_) @@ -2410,13 +2410,13 @@ begin return res[1] end end - function sparse_dense_cwise_mul(sp_indices_, sp_values_, sp_shape_, dense_; name=nothing) - if tf.in_eager_mode() - sparse_dense_cwise_mul_eager(sp_indices_, sp_values_, sp_shape_, dense_; name=name) - else - sparse_dense_cwise_mul_graph(sp_indices_, sp_values_, sp_shape_, dense_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_dense_cwise_mul(sp_indices_, sp_values_, sp_shape_, dense_; name=nothing) + if tf.in_eager_mode() + sparse_dense_cwise_mul_eager(sp_indices_, sp_values_, sp_shape_, dense_; name=name) + else + sparse_dense_cwise_mul_graph(sp_indices_, sp_values_, sp_shape_, dense_; name=name) + end end - end end @@ -2426,28 +2426,28 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tensor_array_concat_v3_graph(handle_, flow_in_; name=nothing, dtype=nothing, element_shape_except0=nothing) - local desc - tf.with_op_name(name, "TensorArrayConcatV3") do - desc = tf.NodeDescription("TensorArrayConcatV3") - handle_ = convert(Tensor{Any}, handle_) - flow_in_ = convert(Tensor{Float32}, flow_in_) - tf.add_input(desc, handle_) - tf.add_input(desc, flow_in_) - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end - if element_shape_except0 !== nothing - desc["element_shape_except0"] = Base.identity(element_shape_except0) - end + function tensor_array_concat_v3_graph(handle_, flow_in_; name=nothing, dtype=nothing, element_shape_except0=nothing) + local desc + tf.with_op_name(name, "TensorArrayConcatV3") do + desc = tf.NodeDescription("TensorArrayConcatV3") + handle_ = convert(Tensor{Any}, handle_) + flow_in_ = convert(Tensor{Float32}, flow_in_) + tf.add_input(desc, handle_) + tf.add_input(desc, flow_in_) + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:2 - push!(out, tf.Tensor(op, out_idx)) + if element_shape_except0 !== nothing + desc["element_shape_except0"] = Base.identity(element_shape_except0) end - out end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end function tensor_array_concat_v3_eager(handle_, flow_in_; name=nothing, dtype=nothing, element_shape_except0=nothing) desc = tf.EagerOp("TensorArrayConcatV3") handle_ = convert(tf.EagerTensor, handle_) @@ -2467,13 +2467,13 @@ begin return res end end - function tensor_array_concat_v3(handle_, flow_in_; name=nothing, dtype=nothing, element_shape_except0=nothing) - if tf.in_eager_mode() - tensor_array_concat_v3_eager(handle_, flow_in_; name=name, dtype=dtype, element_shape_except0=element_shape_except0) - else - tensor_array_concat_v3_graph(handle_, flow_in_; name=name, dtype=dtype, element_shape_except0=element_shape_except0) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_array_concat_v3(handle_, flow_in_; name=nothing, dtype=nothing, element_shape_except0=nothing) + if tf.in_eager_mode() + tensor_array_concat_v3_eager(handle_, flow_in_; name=name, dtype=dtype, element_shape_except0=element_shape_except0) + else + tensor_array_concat_v3_graph(handle_, flow_in_; name=name, dtype=dtype, element_shape_except0=element_shape_except0) + end end - end end @@ -2483,15 +2483,15 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function unicode_script_graph(input_; name=nothing) - local desc - tf.with_op_name(name, "UnicodeScript") do - desc = tf.NodeDescription("UnicodeScript") - input_ = convert(Tensor{Int32}, input_) - tf.add_input(desc, input_) - end - tf.Tensor(tf.Operation(desc)) + function unicode_script_graph(input_; name=nothing) + local desc + tf.with_op_name(name, "UnicodeScript") do + desc = tf.NodeDescription("UnicodeScript") + input_ = convert(Tensor{Int32}, input_) + tf.add_input(desc, input_) end + tf.Tensor(tf.Operation(desc)) + end function unicode_script_eager(input_; name=nothing) desc = tf.EagerOp("UnicodeScript") input_ = convert(tf.EagerTensor, input_) @@ -2503,13 +2503,13 @@ begin return res[1] end end - function unicode_script(input_; name=nothing) - if tf.in_eager_mode() - unicode_script_eager(input_; name=name) - else - unicode_script_graph(input_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function unicode_script(input_; name=nothing) + if tf.in_eager_mode() + unicode_script_eager(input_; name=name) + else + unicode_script_graph(input_; name=name) + end end - end end @@ -2519,18 +2519,18 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function batch_cholesky_grad_graph(l_, grad_; name=nothing) - local desc - tf.with_op_name(name, "BatchCholeskyGrad") do - desc = tf.NodeDescription("BatchCholeskyGrad") - l_ = convert(Tensor{Any}, l_) - grad_ = convert(Tensor{Any}, grad_) - (l_, grad_) = tf.tf_promote(l_, grad_) - tf.add_input(desc, l_) - tf.add_input(desc, grad_) - end - tf.Tensor(tf.Operation(desc)) + function batch_cholesky_grad_graph(l_, grad_; name=nothing) + local desc + tf.with_op_name(name, "BatchCholeskyGrad") do + desc = tf.NodeDescription("BatchCholeskyGrad") + l_ = convert(Tensor{Any}, l_) + grad_ = convert(Tensor{Any}, grad_) + (l_, grad_) = tf.tf_promote(l_, grad_) + tf.add_input(desc, l_) + tf.add_input(desc, grad_) end + tf.Tensor(tf.Operation(desc)) + end function batch_cholesky_grad_eager(l_, grad_; name=nothing) desc = tf.EagerOp("BatchCholeskyGrad") l_ = convert(tf.EagerTensor, l_) @@ -2546,13 +2546,13 @@ begin return res[1] end end - function batch_cholesky_grad(l_, grad_; name=nothing) - if tf.in_eager_mode() - batch_cholesky_grad_eager(l_, grad_; name=name) - else - batch_cholesky_grad_graph(l_, grad_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function batch_cholesky_grad(l_, grad_; name=nothing) + if tf.in_eager_mode() + batch_cholesky_grad_eager(l_, grad_; name=name) + else + batch_cholesky_grad_graph(l_, grad_; name=name) + end end - end end @@ -2562,23 +2562,23 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function mean_graph(input_, reduction_indices_; name=nothing, keep_dims=nothing) - local desc - tf.with_op_name(name, "Mean") do - desc = tf.NodeDescription("Mean") - input_ = convert(Tensor{Any}, input_) - reduction_indices_ = convert(Tensor{Int32}, reduction_indices_) - reduction_indices_ = reduction_indices_ - convert(tf.Tensor{eltype(reduction_indices_)}, 1) - (input_,) = tf.tf_promote(input_) - (reduction_indices_,) = tf.tf_promote(reduction_indices_) - tf.add_input(desc, input_) - tf.add_input(desc, reduction_indices_) - if keep_dims !== nothing - desc["keep_dims"] = Base.Bool(keep_dims) - end + function mean_graph(input_, reduction_indices_; name=nothing, keep_dims=nothing) + local desc + tf.with_op_name(name, "Mean") do + desc = tf.NodeDescription("Mean") + input_ = convert(Tensor{Any}, input_) + reduction_indices_ = convert(Tensor{Int32}, reduction_indices_) + reduction_indices_ = reduction_indices_ - convert(tf.Tensor{eltype(reduction_indices_)}, 1) + (input_,) = tf.tf_promote(input_) + (reduction_indices_,) = tf.tf_promote(reduction_indices_) + tf.add_input(desc, input_) + tf.add_input(desc, reduction_indices_) + if keep_dims !== nothing + desc["keep_dims"] = Base.Bool(keep_dims) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function mean_eager(input_, reduction_indices_; name=nothing, keep_dims=nothing) desc = tf.EagerOp("Mean") input_ = convert(tf.EagerTensor, input_) @@ -2597,13 +2597,13 @@ begin return res[1] end end - function mean(input_, reduction_indices_; name=nothing, keep_dims=nothing) - if tf.in_eager_mode() - mean_eager(input_, reduction_indices_; name=name, keep_dims=keep_dims) - else - mean_graph(input_, reduction_indices_; name=name, keep_dims=keep_dims) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function mean(input_, reduction_indices_; name=nothing, keep_dims=nothing) + if tf.in_eager_mode() + mean_eager(input_, reduction_indices_; name=name, keep_dims=keep_dims) + else + mean_graph(input_, reduction_indices_; name=name, keep_dims=keep_dims) + end end - end end @@ -2613,15 +2613,15 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function batch_fft_graph(input_; name=nothing) - local desc - tf.with_op_name(name, "BatchFFT") do - desc = tf.NodeDescription("BatchFFT") - input_ = convert(Tensor{Complex{Float32}}, input_) - tf.add_input(desc, input_) - end - tf.Tensor(tf.Operation(desc)) + function batch_fft_graph(input_; name=nothing) + local desc + tf.with_op_name(name, "BatchFFT") do + desc = tf.NodeDescription("BatchFFT") + input_ = convert(Tensor{Complex{Float32}}, input_) + tf.add_input(desc, input_) end + tf.Tensor(tf.Operation(desc)) + end function batch_fft_eager(input_; name=nothing) desc = tf.EagerOp("BatchFFT") input_ = convert(tf.EagerTensor, input_) @@ -2633,13 +2633,13 @@ begin return res[1] end end - function batch_fft(input_; name=nothing) - if tf.in_eager_mode() - batch_fft_eager(input_; name=name) - else - batch_fft_graph(input_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function batch_fft(input_; name=nothing) + if tf.in_eager_mode() + batch_fft_eager(input_; name=name) + else + batch_fft_graph(input_; name=name) + end end - end end @@ -2649,16 +2649,16 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function sin_graph(x_; name=nothing) - local desc - tf.with_op_name(name, "Sin") do - desc = tf.NodeDescription("Sin") - x_ = convert(Tensor{Any}, x_) - (x_,) = tf.tf_promote(x_) - tf.add_input(desc, x_) - end - tf.Tensor(tf.Operation(desc)) + function sin_graph(x_; name=nothing) + local desc + tf.with_op_name(name, "Sin") do + desc = tf.NodeDescription("Sin") + x_ = convert(Tensor{Any}, x_) + (x_,) = tf.tf_promote(x_) + tf.add_input(desc, x_) end + tf.Tensor(tf.Operation(desc)) + end function sin_eager(x_; name=nothing) desc = tf.EagerOp("Sin") x_ = convert(tf.EagerTensor, x_) @@ -2671,13 +2671,13 @@ begin return res[1] end end - function sin(x_; name=nothing) - if tf.in_eager_mode() - sin_eager(x_; name=name) - else - sin_graph(x_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sin(x_; name=nothing) + if tf.in_eager_mode() + sin_eager(x_; name=name) + else + sin_graph(x_; name=name) + end end - end end @@ -2687,19 +2687,19 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function boosted_trees_ensemble_resource_handle_op_graph(; name=nothing, container=nothing, shared_name=nothing) - local desc - tf.with_op_name(name, "BoostedTreesEnsembleResourceHandleOp") do - desc = tf.NodeDescription("BoostedTreesEnsembleResourceHandleOp") - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end + function boosted_trees_ensemble_resource_handle_op_graph(; name=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "BoostedTreesEnsembleResourceHandleOp") do + desc = tf.NodeDescription("BoostedTreesEnsembleResourceHandleOp") + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function boosted_trees_ensemble_resource_handle_op_eager(; name=nothing, container=nothing, shared_name=nothing) desc = tf.EagerOp("BoostedTreesEnsembleResourceHandleOp") if container !== nothing @@ -2715,13 +2715,13 @@ begin return res[1] end end - function boosted_trees_ensemble_resource_handle_op(; name=nothing, container=nothing, shared_name=nothing) - if tf.in_eager_mode() - boosted_trees_ensemble_resource_handle_op_eager(; name=name, container=container, shared_name=shared_name) - else - boosted_trees_ensemble_resource_handle_op_graph(; name=name, container=container, shared_name=shared_name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function boosted_trees_ensemble_resource_handle_op(; name=nothing, container=nothing, shared_name=nothing) + if tf.in_eager_mode() + boosted_trees_ensemble_resource_handle_op_eager(; name=name, container=container, shared_name=shared_name) + else + boosted_trees_ensemble_resource_handle_op_graph(; name=name, container=container, shared_name=shared_name) + end end - end end @@ -2731,34 +2731,34 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function quantized_max_pool_graph(input_, min_input_, max_input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing) - local desc - tf.with_op_name(name, "QuantizedMaxPool") do - desc = tf.NodeDescription("QuantizedMaxPool") - input_ = convert(Tensor{Any}, input_) - min_input_ = convert(Tensor{Float32}, min_input_) - max_input_ = convert(Tensor{Float32}, max_input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - tf.add_input(desc, min_input_) - tf.add_input(desc, max_input_) - if ksize !== nothing - desc["ksize"] = map(Base.identity, ksize) - end - if strides !== nothing - desc["strides"] = map(Base.identity, strides) - end - if padding !== nothing - desc["padding"] = Base.String(padding) - end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:3 - push!(out, tf.Tensor(op, out_idx)) - end - out + function quantized_max_pool_graph(input_, min_input_, max_input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing) + local desc + tf.with_op_name(name, "QuantizedMaxPool") do + desc = tf.NodeDescription("QuantizedMaxPool") + input_ = convert(Tensor{Any}, input_) + min_input_ = convert(Tensor{Float32}, min_input_) + max_input_ = convert(Tensor{Float32}, max_input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + tf.add_input(desc, min_input_) + tf.add_input(desc, max_input_) + if ksize !== nothing + desc["ksize"] = map(Base.identity, ksize) + end + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + if padding !== nothing + desc["padding"] = Base.String(padding) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) end + out + end function quantized_max_pool_eager(input_, min_input_, max_input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing) desc = tf.EagerOp("QuantizedMaxPool") input_ = convert(tf.EagerTensor, input_) @@ -2784,13 +2784,13 @@ begin return res end end - function quantized_max_pool(input_, min_input_, max_input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing) - if tf.in_eager_mode() - quantized_max_pool_eager(input_, min_input_, max_input_; name=name, ksize=ksize, strides=strides, padding=padding) - else - quantized_max_pool_graph(input_, min_input_, max_input_; name=name, ksize=ksize, strides=strides, padding=padding) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function quantized_max_pool(input_, min_input_, max_input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing) + if tf.in_eager_mode() + quantized_max_pool_eager(input_, min_input_, max_input_; name=name, ksize=ksize, strides=strides, padding=padding) + else + quantized_max_pool_graph(input_, min_input_, max_input_; name=name, ksize=ksize, strides=strides, padding=padding) + end end - end end @@ -2800,37 +2800,37 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function ordered_map_stage_graph(key_, indices_, values_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, fake_dtypes=nothing, container=nothing, shared_name=nothing) - local desc - tf.with_op_name(name, "OrderedMapStage") do - desc = tf.NodeDescription("OrderedMapStage") - key_ = convert(Tensor{Int64}, key_) - indices_ = convert(Tensor{Int32}, indices_) - values_ = [convert(Tensor{Any}, x) for x = values_] - tf.add_input(desc, key_) - tf.add_input(desc, indices_) - tf.add_input(desc, values_) - if capacity !== nothing - desc["capacity"] = Base.Int(capacity) - end - if memory_limit !== nothing - desc["memory_limit"] = Base.Int(memory_limit) - end - if dtypes !== nothing - desc["dtypes"] = map(Base.identity, dtypes) - end - if fake_dtypes !== nothing - desc["fake_dtypes"] = map(Base.identity, fake_dtypes) - end - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - end - tf.Tensor(tf.Operation(desc)) + function ordered_map_stage_graph(key_, indices_, values_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, fake_dtypes=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "OrderedMapStage") do + desc = tf.NodeDescription("OrderedMapStage") + key_ = convert(Tensor{Int64}, key_) + indices_ = convert(Tensor{Int32}, indices_) + values_ = [convert(Tensor{Any}, x) for x = values_] + tf.add_input(desc, key_) + tf.add_input(desc, indices_) + tf.add_input(desc, values_) + if capacity !== nothing + desc["capacity"] = Base.Int(capacity) + end + if memory_limit !== nothing + desc["memory_limit"] = Base.Int(memory_limit) + end + if dtypes !== nothing + desc["dtypes"] = map(Base.identity, dtypes) + end + if fake_dtypes !== nothing + desc["fake_dtypes"] = map(Base.identity, fake_dtypes) + end + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end end + tf.Tensor(tf.Operation(desc)) + end function ordered_map_stage_eager(key_, indices_, values_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, fake_dtypes=nothing, container=nothing, shared_name=nothing) desc = tf.EagerOp("OrderedMapStage") key_ = convert(tf.EagerTensor, key_) @@ -2864,13 +2864,13 @@ begin return res[1] end end - function ordered_map_stage(key_, indices_, values_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, fake_dtypes=nothing, container=nothing, shared_name=nothing) - if tf.in_eager_mode() - ordered_map_stage_eager(key_, indices_, values_; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, fake_dtypes=fake_dtypes, container=container, shared_name=shared_name) - else - ordered_map_stage_graph(key_, indices_, values_; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, fake_dtypes=fake_dtypes, container=container, shared_name=shared_name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function ordered_map_stage(key_, indices_, values_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, fake_dtypes=nothing, container=nothing, shared_name=nothing) + if tf.in_eager_mode() + ordered_map_stage_eager(key_, indices_, values_; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, fake_dtypes=fake_dtypes, container=container, shared_name=shared_name) + else + ordered_map_stage_graph(key_, indices_, values_; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, fake_dtypes=fake_dtypes, container=container, shared_name=shared_name) + end end - end end @@ -2880,33 +2880,33 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function partitioned_call_graph(args_; name=nothing, Tin=nothing, Tout=nothing, f=nothing, config=nothing, config_proto=nothing, executor_type=nothing) - local desc - tf.with_op_name(name, "PartitionedCall") do - desc = tf.NodeDescription("PartitionedCall") - args_ = [convert(Tensor{Any}, x) for x = args_] - tf.add_input(desc, args_) - if Tin !== nothing - desc["Tin"] = map(Base.identity, Tin) - end - if Tout !== nothing - desc["Tout"] = map(Base.identity, Tout) - end - if f !== nothing - desc["f"] = Base.identity(f) - end - if config !== nothing - desc["config"] = Base.String(config) - end - if config_proto !== nothing - desc["config_proto"] = Base.String(config_proto) - end - if executor_type !== nothing - desc["executor_type"] = Base.String(executor_type) - end - end - tf.Tensor(tf.Operation(desc)) + function partitioned_call_graph(args_; name=nothing, Tin=nothing, Tout=nothing, f=nothing, config=nothing, config_proto=nothing, executor_type=nothing) + local desc + tf.with_op_name(name, "PartitionedCall") do + desc = tf.NodeDescription("PartitionedCall") + args_ = [convert(Tensor{Any}, x) for x = args_] + tf.add_input(desc, args_) + if Tin !== nothing + desc["Tin"] = map(Base.identity, Tin) + end + if Tout !== nothing + desc["Tout"] = map(Base.identity, Tout) + end + if f !== nothing + desc["f"] = Base.identity(f) + end + if config !== nothing + desc["config"] = Base.String(config) + end + if config_proto !== nothing + desc["config_proto"] = Base.String(config_proto) + end + if executor_type !== nothing + desc["executor_type"] = Base.String(executor_type) + end end + tf.Tensor(tf.Operation(desc)) + end function partitioned_call_eager(args_; name=nothing, Tin=nothing, Tout=nothing, f=nothing, config=nothing, config_proto=nothing, executor_type=nothing) desc = tf.EagerOp("PartitionedCall") args_ = convert(tf.EagerTensor, args_) @@ -2936,13 +2936,13 @@ begin return res[1] end end - function partitioned_call(args_; name=nothing, Tin=nothing, Tout=nothing, f=nothing, config=nothing, config_proto=nothing, executor_type=nothing) - if tf.in_eager_mode() - partitioned_call_eager(args_; name=name, Tin=Tin, Tout=Tout, f=f, config=config, config_proto=config_proto, executor_type=executor_type) - else - partitioned_call_graph(args_; name=name, Tin=Tin, Tout=Tout, f=f, config=config, config_proto=config_proto, executor_type=executor_type) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function partitioned_call(args_; name=nothing, Tin=nothing, Tout=nothing, f=nothing, config=nothing, config_proto=nothing, executor_type=nothing) + if tf.in_eager_mode() + partitioned_call_eager(args_; name=name, Tin=Tin, Tout=Tout, f=f, config=config, config_proto=config_proto, executor_type=executor_type) + else + partitioned_call_graph(args_; name=name, Tin=Tin, Tout=Tout, f=f, config=config, config_proto=config_proto, executor_type=executor_type) + end end - end end @@ -2952,32 +2952,32 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function sparse_apply_adagrad_graph(var_, accum_, lr_, grad_, indices_; name=nothing, use_locking=nothing, update_slots=nothing) - local desc - tf.with_op_name(name, "SparseApplyAdagrad") do - desc = tf.NodeDescription("SparseApplyAdagrad") - var_ = convert(Tensor{Any}, var_) - accum_ = convert(Tensor{Any}, accum_) - lr_ = convert(Tensor{Any}, lr_) - grad_ = convert(Tensor{Any}, grad_) - indices_ = convert(Tensor{Any}, indices_) - indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) - (var_, accum_, lr_, grad_) = tf.tf_promote(var_, accum_, lr_, grad_) - (indices_,) = tf.tf_promote(indices_) - tf.add_input(desc, var_) - tf.add_input(desc, accum_) - tf.add_input(desc, lr_) - tf.add_input(desc, grad_) - tf.add_input(desc, indices_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - if update_slots !== nothing - desc["update_slots"] = Base.Bool(update_slots) - end - end - tf.Tensor(tf.Operation(desc)) + function sparse_apply_adagrad_graph(var_, accum_, lr_, grad_, indices_; name=nothing, use_locking=nothing, update_slots=nothing) + local desc + tf.with_op_name(name, "SparseApplyAdagrad") do + desc = tf.NodeDescription("SparseApplyAdagrad") + var_ = convert(Tensor{Any}, var_) + accum_ = convert(Tensor{Any}, accum_) + lr_ = convert(Tensor{Any}, lr_) + grad_ = convert(Tensor{Any}, grad_) + indices_ = convert(Tensor{Any}, indices_) + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + (var_, accum_, lr_, grad_) = tf.tf_promote(var_, accum_, lr_, grad_) + (indices_,) = tf.tf_promote(indices_) + tf.add_input(desc, var_) + tf.add_input(desc, accum_) + tf.add_input(desc, lr_) + tf.add_input(desc, grad_) + tf.add_input(desc, indices_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + if update_slots !== nothing + desc["update_slots"] = Base.Bool(update_slots) + end end + tf.Tensor(tf.Operation(desc)) + end function sparse_apply_adagrad_eager(var_, accum_, lr_, grad_, indices_; name=nothing, use_locking=nothing, update_slots=nothing) desc = tf.EagerOp("SparseApplyAdagrad") var_ = convert(tf.EagerTensor, var_) @@ -3008,54 +3008,54 @@ begin return res[1] end end - function sparse_apply_adagrad(var_, accum_, lr_, grad_, indices_; name=nothing, use_locking=nothing, update_slots=nothing) - if tf.in_eager_mode() - sparse_apply_adagrad_eager(var_, accum_, lr_, grad_, indices_; name=name, use_locking=use_locking, update_slots=update_slots) - else - sparse_apply_adagrad_graph(var_, accum_, lr_, grad_, indices_; name=name, use_locking=use_locking, update_slots=update_slots) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_apply_adagrad(var_, accum_, lr_, grad_, indices_; name=nothing, use_locking=nothing, update_slots=nothing) + if tf.in_eager_mode() + sparse_apply_adagrad_eager(var_, accum_, lr_, grad_, indices_; name=name, use_locking=use_locking, update_slots=update_slots) + else + sparse_apply_adagrad_graph(var_, accum_, lr_, grad_, indices_; name=name, use_locking=use_locking, update_slots=update_slots) + end end - end end """ - decode_proto_v2(bytes; descriptor_source=local://, message_format=binary, sanitize=false) + decode_proto_v2(bytes; descriptor_source=, message_format=, sanitize=false) """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function decode_proto_v2_graph(bytes_; name=nothing, message_type=nothing, field_names=nothing, output_types=nothing, descriptor_source=nothing, message_format=nothing, sanitize=nothing) - local desc - tf.with_op_name(name, "DecodeProtoV2") do - desc = tf.NodeDescription("DecodeProtoV2") - bytes_ = convert(Tensor{String}, bytes_) - tf.add_input(desc, bytes_) - if message_type !== nothing - desc["message_type"] = Base.String(message_type) - end - if field_names !== nothing - desc["field_names"] = map(Base.identity, field_names) - end - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if descriptor_source !== nothing - desc["descriptor_source"] = Base.String(descriptor_source) - end - if message_format !== nothing - desc["message_format"] = Base.String(message_format) - end - if sanitize !== nothing - desc["sanitize"] = Base.Bool(sanitize) - end + function decode_proto_v2_graph(bytes_; name=nothing, message_type=nothing, field_names=nothing, output_types=nothing, descriptor_source=nothing, message_format=nothing, sanitize=nothing) + local desc + tf.with_op_name(name, "DecodeProtoV2") do + desc = tf.NodeDescription("DecodeProtoV2") + bytes_ = convert(Tensor{String}, bytes_) + tf.add_input(desc, bytes_) + if message_type !== nothing + desc["message_type"] = Base.String(message_type) + end + if field_names !== nothing + desc["field_names"] = map(Base.identity, field_names) end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:2 - push!(out, tf.Tensor(op, out_idx)) + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) end - out + if descriptor_source !== nothing + desc["descriptor_source"] = Base.String(descriptor_source) + end + if message_format !== nothing + desc["message_format"] = Base.String(message_format) + end + if sanitize !== nothing + desc["sanitize"] = Base.Bool(sanitize) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) end + out + end function decode_proto_v2_eager(bytes_; name=nothing, message_type=nothing, field_names=nothing, output_types=nothing, descriptor_source=nothing, message_format=nothing, sanitize=nothing) desc = tf.EagerOp("DecodeProtoV2") bytes_ = convert(tf.EagerTensor, bytes_) @@ -3085,13 +3085,13 @@ begin return res end end - function decode_proto_v2(bytes_; name=nothing, message_type=nothing, field_names=nothing, output_types=nothing, descriptor_source=nothing, message_format=nothing, sanitize=nothing) - if tf.in_eager_mode() - decode_proto_v2_eager(bytes_; name=name, message_type=message_type, field_names=field_names, output_types=output_types, descriptor_source=descriptor_source, message_format=message_format, sanitize=sanitize) - else - decode_proto_v2_graph(bytes_; name=name, message_type=message_type, field_names=field_names, output_types=output_types, descriptor_source=descriptor_source, message_format=message_format, sanitize=sanitize) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function decode_proto_v2(bytes_; name=nothing, message_type=nothing, field_names=nothing, output_types=nothing, descriptor_source=nothing, message_format=nothing, sanitize=nothing) + if tf.in_eager_mode() + decode_proto_v2_eager(bytes_; name=name, message_type=message_type, field_names=field_names, output_types=output_types, descriptor_source=descriptor_source, message_format=message_format, sanitize=sanitize) + else + decode_proto_v2_graph(bytes_; name=name, message_type=message_type, field_names=field_names, output_types=output_types, descriptor_source=descriptor_source, message_format=message_format, sanitize=sanitize) + end end - end end @@ -3101,20 +3101,20 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function betainc_graph(a_, b_, x_; name=nothing) - local desc - tf.with_op_name(name, "Betainc") do - desc = tf.NodeDescription("Betainc") - a_ = convert(Tensor{Any}, a_) - b_ = convert(Tensor{Any}, b_) - x_ = convert(Tensor{Any}, x_) - (a_, b_, x_) = tf.tf_promote(a_, b_, x_) - tf.add_input(desc, a_) - tf.add_input(desc, b_) - tf.add_input(desc, x_) - end - tf.Tensor(tf.Operation(desc)) + function betainc_graph(a_, b_, x_; name=nothing) + local desc + tf.with_op_name(name, "Betainc") do + desc = tf.NodeDescription("Betainc") + a_ = convert(Tensor{Any}, a_) + b_ = convert(Tensor{Any}, b_) + x_ = convert(Tensor{Any}, x_) + (a_, b_, x_) = tf.tf_promote(a_, b_, x_) + tf.add_input(desc, a_) + tf.add_input(desc, b_) + tf.add_input(desc, x_) end + tf.Tensor(tf.Operation(desc)) + end function betainc_eager(a_, b_, x_; name=nothing) desc = tf.EagerOp("Betainc") a_ = convert(tf.EagerTensor, a_) @@ -3133,13 +3133,13 @@ begin return res[1] end end - function betainc(a_, b_, x_; name=nothing) - if tf.in_eager_mode() - betainc_eager(a_, b_, x_; name=name) - else - betainc_graph(a_, b_, x_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function betainc(a_, b_, x_; name=nothing) + if tf.in_eager_mode() + betainc_eager(a_, b_, x_; name=name) + else + betainc_graph(a_, b_, x_; name=name) + end end - end end @@ -3149,16 +3149,16 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function guarantee_const_graph(input_; name=nothing) - local desc - tf.with_op_name(name, "GuaranteeConst") do - desc = tf.NodeDescription("GuaranteeConst") - input_ = convert(Tensor{Any}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - end - tf.Tensor(tf.Operation(desc)) + function guarantee_const_graph(input_; name=nothing) + local desc + tf.with_op_name(name, "GuaranteeConst") do + desc = tf.NodeDescription("GuaranteeConst") + input_ = convert(Tensor{Any}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) end + tf.Tensor(tf.Operation(desc)) + end function guarantee_const_eager(input_; name=nothing) desc = tf.EagerOp("GuaranteeConst") input_ = convert(tf.EagerTensor, input_) @@ -3171,13 +3171,13 @@ begin return res[1] end end - function guarantee_const(input_; name=nothing) - if tf.in_eager_mode() - guarantee_const_eager(input_; name=name) - else - guarantee_const_graph(input_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function guarantee_const(input_; name=nothing) + if tf.in_eager_mode() + guarantee_const_eager(input_; name=name) + else + guarantee_const_graph(input_; name=name) + end end - end end @@ -3187,18 +3187,18 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function decode_bmp_graph(contents_; name=nothing, channels=nothing) - local desc - tf.with_op_name(name, "DecodeBmp") do - desc = tf.NodeDescription("DecodeBmp") - contents_ = convert(Tensor{String}, contents_) - tf.add_input(desc, contents_) - if channels !== nothing - desc["channels"] = Base.Int(channels) - end + function decode_bmp_graph(contents_; name=nothing, channels=nothing) + local desc + tf.with_op_name(name, "DecodeBmp") do + desc = tf.NodeDescription("DecodeBmp") + contents_ = convert(Tensor{String}, contents_) + tf.add_input(desc, contents_) + if channels !== nothing + desc["channels"] = Base.Int(channels) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function decode_bmp_eager(contents_; name=nothing, channels=nothing) desc = tf.EagerOp("DecodeBmp") contents_ = convert(tf.EagerTensor, contents_) @@ -3213,13 +3213,13 @@ begin return res[1] end end - function decode_bmp(contents_; name=nothing, channels=nothing) - if tf.in_eager_mode() - decode_bmp_eager(contents_; name=name, channels=channels) - else - decode_bmp_graph(contents_; name=name, channels=channels) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function decode_bmp(contents_; name=nothing, channels=nothing) + if tf.in_eager_mode() + decode_bmp_eager(contents_; name=name, channels=channels) + else + decode_bmp_graph(contents_; name=name, channels=channels) + end end - end end @@ -3229,25 +3229,25 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function boosted_trees_bucketize_graph(float_values_, bucket_boundaries_; name=nothing, num_features=nothing) - local desc - tf.with_op_name(name, "BoostedTreesBucketize") do - desc = tf.NodeDescription("BoostedTreesBucketize") - float_values_ = [convert(Tensor{Float32}, x) for x = float_values_] - bucket_boundaries_ = [convert(Tensor{Float32}, x) for x = bucket_boundaries_] - tf.add_input(desc, float_values_) - tf.add_input(desc, bucket_boundaries_) - if num_features !== nothing - desc["num_features"] = Base.Int(num_features) - end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:num_features - push!(out, tf.Tensor(op, out_idx)) + function boosted_trees_bucketize_graph(float_values_, bucket_boundaries_; name=nothing, num_features=nothing) + local desc + tf.with_op_name(name, "BoostedTreesBucketize") do + desc = tf.NodeDescription("BoostedTreesBucketize") + float_values_ = [convert(Tensor{Float32}, x) for x = float_values_] + bucket_boundaries_ = [convert(Tensor{Float32}, x) for x = bucket_boundaries_] + tf.add_input(desc, float_values_) + tf.add_input(desc, bucket_boundaries_) + if num_features !== nothing + desc["num_features"] = Base.Int(num_features) end - out end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:num_features + push!(out, tf.Tensor(op, out_idx)) + end + out + end function boosted_trees_bucketize_eager(float_values_, bucket_boundaries_; name=nothing, num_features=nothing) desc = tf.EagerOp("BoostedTreesBucketize") float_values_ = convert(tf.EagerTensor, float_values_) @@ -3264,13 +3264,13 @@ begin return res end end - function boosted_trees_bucketize(float_values_, bucket_boundaries_; name=nothing, num_features=nothing) - if tf.in_eager_mode() - boosted_trees_bucketize_eager(float_values_, bucket_boundaries_; name=name, num_features=num_features) - else - boosted_trees_bucketize_graph(float_values_, bucket_boundaries_; name=name, num_features=num_features) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function boosted_trees_bucketize(float_values_, bucket_boundaries_; name=nothing, num_features=nothing) + if tf.in_eager_mode() + boosted_trees_bucketize_eager(float_values_, bucket_boundaries_; name=name, num_features=num_features) + else + boosted_trees_bucketize_graph(float_values_, bucket_boundaries_; name=name, num_features=num_features) + end end - end end @@ -3280,14 +3280,14 @@ end An op that shuts down a running distributed TPU system. The Op returns """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function shutdown_distributed_tpu_graph(; name=nothing) - local desc - tf.with_op_name(name, "ShutdownDistributedTPU") do - desc - tf.NodeDescription("ShutdownDistributedTPU") - end - tf.Tensor(tf.Operation(desc)) + function shutdown_distributed_tpu_graph(; name=nothing) + local desc + tf.with_op_name(name, "ShutdownDistributedTPU") do + desc + tf.NodeDescription("ShutdownDistributedTPU") end + tf.Tensor(tf.Operation(desc)) + end function shutdown_distributed_tpu_eager(; name=nothing) desc = tf.EagerOp("ShutdownDistributedTPU") res = tf.execute(desc) @@ -3297,13 +3297,13 @@ begin return res[1] end end - function shutdown_distributed_tpu(; name=nothing) - if tf.in_eager_mode() - shutdown_distributed_tpu_eager(; name=name) - else - shutdown_distributed_tpu_graph(; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function shutdown_distributed_tpu(; name=nothing) + if tf.in_eager_mode() + shutdown_distributed_tpu_eager(; name=name) + else + shutdown_distributed_tpu_graph(; name=name) + end end - end end @@ -3313,15 +3313,15 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function experimental_stats_aggregator_summary_graph(iterator_; name=nothing) - local desc - tf.with_op_name(name, "ExperimentalStatsAggregatorSummary") do - desc = tf.NodeDescription("ExperimentalStatsAggregatorSummary") - iterator_ = convert(Tensor{Any}, iterator_) - tf.add_input(desc, iterator_) - end - tf.Tensor(tf.Operation(desc)) + function experimental_stats_aggregator_summary_graph(iterator_; name=nothing) + local desc + tf.with_op_name(name, "ExperimentalStatsAggregatorSummary") do + desc = tf.NodeDescription("ExperimentalStatsAggregatorSummary") + iterator_ = convert(Tensor{Any}, iterator_) + tf.add_input(desc, iterator_) end + tf.Tensor(tf.Operation(desc)) + end function experimental_stats_aggregator_summary_eager(iterator_; name=nothing) desc = tf.EagerOp("ExperimentalStatsAggregatorSummary") iterator_ = convert(tf.EagerTensor, iterator_) @@ -3333,13 +3333,13 @@ begin return res[1] end end - function experimental_stats_aggregator_summary(iterator_; name=nothing) - if tf.in_eager_mode() - experimental_stats_aggregator_summary_eager(iterator_; name=name) - else - experimental_stats_aggregator_summary_graph(iterator_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_stats_aggregator_summary(iterator_; name=nothing) + if tf.in_eager_mode() + experimental_stats_aggregator_summary_eager(iterator_; name=name) + else + experimental_stats_aggregator_summary_graph(iterator_; name=name) + end end - end end @@ -3349,14 +3349,14 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function timestamp_graph(; name=nothing) - local desc - tf.with_op_name(name, "Timestamp") do - desc - tf.NodeDescription("Timestamp") - end - tf.Tensor(tf.Operation(desc)) + function timestamp_graph(; name=nothing) + local desc + tf.with_op_name(name, "Timestamp") do + desc + tf.NodeDescription("Timestamp") end + tf.Tensor(tf.Operation(desc)) + end function timestamp_eager(; name=nothing) desc = tf.EagerOp("Timestamp") res = tf.execute(desc) @@ -3366,13 +3366,13 @@ begin return res[1] end end - function timestamp(; name=nothing) - if tf.in_eager_mode() - timestamp_eager(; name=name) - else - timestamp_graph(; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function timestamp(; name=nothing) + if tf.in_eager_mode() + timestamp_eager(; name=name) + else + timestamp_graph(; name=name) + end end - end end @@ -3382,16 +3382,16 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function matrix_exponential_graph(input_; name=nothing) - local desc - tf.with_op_name(name, "MatrixExponential") do - desc = tf.NodeDescription("MatrixExponential") - input_ = convert(Tensor{Any}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - end - tf.Tensor(tf.Operation(desc)) + function matrix_exponential_graph(input_; name=nothing) + local desc + tf.with_op_name(name, "MatrixExponential") do + desc = tf.NodeDescription("MatrixExponential") + input_ = convert(Tensor{Any}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) end + tf.Tensor(tf.Operation(desc)) + end function matrix_exponential_eager(input_; name=nothing) desc = tf.EagerOp("MatrixExponential") input_ = convert(tf.EagerTensor, input_) @@ -3404,13 +3404,13 @@ begin return res[1] end end - function matrix_exponential(input_; name=nothing) - if tf.in_eager_mode() - matrix_exponential_eager(input_; name=name) - else - matrix_exponential_graph(input_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function matrix_exponential(input_; name=nothing) + if tf.in_eager_mode() + matrix_exponential_eager(input_; name=name) + else + matrix_exponential_graph(input_; name=name) + end end - end end @@ -3420,19 +3420,19 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function size_graph(input_; name=nothing, out_type=nothing) - local desc - tf.with_op_name(name, "Size") do - desc = tf.NodeDescription("Size") - input_ = convert(Tensor{Any}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - if out_type !== nothing - desc["out_type"] = Base.identity(out_type) - end + function size_graph(input_; name=nothing, out_type=nothing) + local desc + tf.with_op_name(name, "Size") do + desc = tf.NodeDescription("Size") + input_ = convert(Tensor{Any}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + if out_type !== nothing + desc["out_type"] = Base.identity(out_type) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function size_eager(input_; name=nothing, out_type=nothing) desc = tf.EagerOp("Size") input_ = convert(tf.EagerTensor, input_) @@ -3448,13 +3448,13 @@ begin return res[1] end end - function size(input_; name=nothing, out_type=nothing) - if tf.in_eager_mode() - size_eager(input_; name=name, out_type=out_type) - else - size_graph(input_; name=name, out_type=out_type) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function size(input_; name=nothing, out_type=nothing) + if tf.in_eager_mode() + size_eager(input_; name=name, out_type=out_type) + else + size_graph(input_; name=name, out_type=out_type) + end end - end end @@ -3464,19 +3464,19 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function add_n_graph(inputs_; name=nothing, N=nothing) - local desc - tf.with_op_name(name, "AddN") do - desc = tf.NodeDescription("AddN") - inputs_ = [convert(Tensor{Any}, x) for x = inputs_] - (inputs_,) = tf.tf_promote(inputs_) - tf.add_input(desc, inputs_) - if N !== nothing - desc["N"] = Base.Int(N) - end + function add_n_graph(inputs_; name=nothing, N=nothing) + local desc + tf.with_op_name(name, "AddN") do + desc = tf.NodeDescription("AddN") + inputs_ = [convert(Tensor{Any}, x) for x = inputs_] + (inputs_,) = tf.tf_promote(inputs_) + tf.add_input(desc, inputs_) + if N !== nothing + desc["N"] = Base.Int(N) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function add_n_eager(inputs_; name=nothing, N=nothing) desc = tf.EagerOp("AddN") inputs_ = convert(tf.EagerTensor, inputs_) @@ -3492,13 +3492,13 @@ begin return res[1] end end - function add_n(inputs_; name=nothing, N=nothing) - if tf.in_eager_mode() - add_n_eager(inputs_; name=name, N=N) - else - add_n_graph(inputs_; name=name, N=N) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function add_n(inputs_; name=nothing, N=nothing) + if tf.in_eager_mode() + add_n_eager(inputs_; name=name, N=N) + else + add_n_graph(inputs_; name=name, N=N) + end end - end end @@ -3508,22 +3508,22 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function sparse_segment_sum_graph(data_, indices_, segment_ids_; name=nothing) - local desc - tf.with_op_name(name, "SparseSegmentSum") do - desc = tf.NodeDescription("SparseSegmentSum") - data_ = convert(Tensor{Any}, data_) - indices_ = convert(Tensor{Int32}, indices_) - indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) - segment_ids_ = convert(Tensor{Int32}, segment_ids_) - (data_,) = tf.tf_promote(data_) - (indices_,) = tf.tf_promote(indices_) - tf.add_input(desc, data_) - tf.add_input(desc, indices_) - tf.add_input(desc, segment_ids_) - end - tf.Tensor(tf.Operation(desc)) + function sparse_segment_sum_graph(data_, indices_, segment_ids_; name=nothing) + local desc + tf.with_op_name(name, "SparseSegmentSum") do + desc = tf.NodeDescription("SparseSegmentSum") + data_ = convert(Tensor{Any}, data_) + indices_ = convert(Tensor{Int32}, indices_) + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + segment_ids_ = convert(Tensor{Int32}, segment_ids_) + (data_,) = tf.tf_promote(data_) + (indices_,) = tf.tf_promote(indices_) + tf.add_input(desc, data_) + tf.add_input(desc, indices_) + tf.add_input(desc, segment_ids_) end + tf.Tensor(tf.Operation(desc)) + end function sparse_segment_sum_eager(data_, indices_, segment_ids_; name=nothing) desc = tf.EagerOp("SparseSegmentSum") data_ = convert(tf.EagerTensor, data_) @@ -3541,13 +3541,13 @@ begin return res[1] end end - function sparse_segment_sum(data_, indices_, segment_ids_; name=nothing) - if tf.in_eager_mode() - sparse_segment_sum_eager(data_, indices_, segment_ids_; name=name) - else - sparse_segment_sum_graph(data_, indices_, segment_ids_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_segment_sum(data_, indices_, segment_ids_; name=nothing) + if tf.in_eager_mode() + sparse_segment_sum_eager(data_, indices_, segment_ids_; name=name) + else + sparse_segment_sum_graph(data_, indices_, segment_ids_; name=name) + end end - end end @@ -3557,23 +3557,23 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function batch_dataset_graph(input_dataset_, batch_size_; name=nothing, output_types=nothing, output_shapes=nothing) - local desc - tf.with_op_name(name, "BatchDataset") do - desc = tf.NodeDescription("BatchDataset") - input_dataset_ = convert(Tensor{Any}, input_dataset_) - batch_size_ = convert(Tensor{Int64}, batch_size_) - tf.add_input(desc, input_dataset_) - tf.add_input(desc, batch_size_) - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end + function batch_dataset_graph(input_dataset_, batch_size_; name=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "BatchDataset") do + desc = tf.NodeDescription("BatchDataset") + input_dataset_ = convert(Tensor{Any}, input_dataset_) + batch_size_ = convert(Tensor{Int64}, batch_size_) + tf.add_input(desc, input_dataset_) + tf.add_input(desc, batch_size_) + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function batch_dataset_eager(input_dataset_, batch_size_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("BatchDataset") input_dataset_ = convert(tf.EagerTensor, input_dataset_) @@ -3593,13 +3593,13 @@ begin return res[1] end end - function batch_dataset(input_dataset_, batch_size_; name=nothing, output_types=nothing, output_shapes=nothing) - if tf.in_eager_mode() - batch_dataset_eager(input_dataset_, batch_size_; name=name, output_types=output_types, output_shapes=output_shapes) - else - batch_dataset_graph(input_dataset_, batch_size_; name=name, output_types=output_types, output_shapes=output_shapes) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function batch_dataset(input_dataset_, batch_size_; name=nothing, output_types=nothing, output_shapes=nothing) + if tf.in_eager_mode() + batch_dataset_eager(input_dataset_, batch_size_; name=name, output_types=output_types, output_shapes=output_shapes) + else + batch_dataset_graph(input_dataset_, batch_size_; name=name, output_types=output_types, output_shapes=output_shapes) + end end - end end @@ -3609,34 +3609,34 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function record_input_graph(; name=nothing, file_pattern=nothing, file_random_seed=nothing, file_shuffle_shift_ratio=nothing, file_buffer_size=nothing, file_parallelism=nothing, batch_size=nothing, compression_type=nothing) - local desc - tf.with_op_name(name, "RecordInput") do - desc = tf.NodeDescription("RecordInput") - if file_pattern !== nothing - desc["file_pattern"] = Base.String(file_pattern) - end - if file_random_seed !== nothing - desc["file_random_seed"] = Base.Int(file_random_seed) - end - if file_shuffle_shift_ratio !== nothing - desc["file_shuffle_shift_ratio"] = Base.identity(file_shuffle_shift_ratio) - end - if file_buffer_size !== nothing - desc["file_buffer_size"] = Base.Int(file_buffer_size) - end - if file_parallelism !== nothing - desc["file_parallelism"] = Base.Int(file_parallelism) - end - if batch_size !== nothing - desc["batch_size"] = Base.Int(batch_size) - end - if compression_type !== nothing - desc["compression_type"] = Base.String(compression_type) - end - end - tf.Tensor(tf.Operation(desc)) + function record_input_graph(; name=nothing, file_pattern=nothing, file_random_seed=nothing, file_shuffle_shift_ratio=nothing, file_buffer_size=nothing, file_parallelism=nothing, batch_size=nothing, compression_type=nothing) + local desc + tf.with_op_name(name, "RecordInput") do + desc = tf.NodeDescription("RecordInput") + if file_pattern !== nothing + desc["file_pattern"] = Base.String(file_pattern) + end + if file_random_seed !== nothing + desc["file_random_seed"] = Base.Int(file_random_seed) + end + if file_shuffle_shift_ratio !== nothing + desc["file_shuffle_shift_ratio"] = Base.identity(file_shuffle_shift_ratio) + end + if file_buffer_size !== nothing + desc["file_buffer_size"] = Base.Int(file_buffer_size) + end + if file_parallelism !== nothing + desc["file_parallelism"] = Base.Int(file_parallelism) + end + if batch_size !== nothing + desc["batch_size"] = Base.Int(batch_size) + end + if compression_type !== nothing + desc["compression_type"] = Base.String(compression_type) + end end + tf.Tensor(tf.Operation(desc)) + end function record_input_eager(; name=nothing, file_pattern=nothing, file_random_seed=nothing, file_shuffle_shift_ratio=nothing, file_buffer_size=nothing, file_parallelism=nothing, batch_size=nothing, compression_type=nothing) desc = tf.EagerOp("RecordInput") if file_pattern !== nothing @@ -3667,13 +3667,13 @@ begin return res[1] end end - function record_input(; name=nothing, file_pattern=nothing, file_random_seed=nothing, file_shuffle_shift_ratio=nothing, file_buffer_size=nothing, file_parallelism=nothing, batch_size=nothing, compression_type=nothing) - if tf.in_eager_mode() - record_input_eager(; name=name, file_pattern=file_pattern, file_random_seed=file_random_seed, file_shuffle_shift_ratio=file_shuffle_shift_ratio, file_buffer_size=file_buffer_size, file_parallelism=file_parallelism, batch_size=batch_size, compression_type=compression_type) - else - record_input_graph(; name=name, file_pattern=file_pattern, file_random_seed=file_random_seed, file_shuffle_shift_ratio=file_shuffle_shift_ratio, file_buffer_size=file_buffer_size, file_parallelism=file_parallelism, batch_size=batch_size, compression_type=compression_type) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function record_input(; name=nothing, file_pattern=nothing, file_random_seed=nothing, file_shuffle_shift_ratio=nothing, file_buffer_size=nothing, file_parallelism=nothing, batch_size=nothing, compression_type=nothing) + if tf.in_eager_mode() + record_input_eager(; name=name, file_pattern=file_pattern, file_random_seed=file_random_seed, file_shuffle_shift_ratio=file_shuffle_shift_ratio, file_buffer_size=file_buffer_size, file_parallelism=file_parallelism, batch_size=batch_size, compression_type=compression_type) + else + record_input_graph(; name=name, file_pattern=file_pattern, file_random_seed=file_random_seed, file_shuffle_shift_ratio=file_shuffle_shift_ratio, file_buffer_size=file_buffer_size, file_parallelism=file_parallelism, batch_size=batch_size, compression_type=compression_type) + end end - end end @@ -3683,23 +3683,23 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function queue_dequeue_up_to_v2_graph(handle_, n_; name=nothing, component_types=nothing, timeout_ms=nothing) - local desc - tf.with_op_name(name, "QueueDequeueUpToV2") do - desc = tf.NodeDescription("QueueDequeueUpToV2") - handle_ = convert(Tensor{Any}, handle_) - n_ = convert(Tensor{Int32}, n_) - tf.add_input(desc, handle_) - tf.add_input(desc, n_) - if component_types !== nothing - desc["component_types"] = map(Base.identity, component_types) - end - if timeout_ms !== nothing - desc["timeout_ms"] = Base.Int(timeout_ms) - end + function queue_dequeue_up_to_v2_graph(handle_, n_; name=nothing, component_types=nothing, timeout_ms=nothing) + local desc + tf.with_op_name(name, "QueueDequeueUpToV2") do + desc = tf.NodeDescription("QueueDequeueUpToV2") + handle_ = convert(Tensor{Any}, handle_) + n_ = convert(Tensor{Int32}, n_) + tf.add_input(desc, handle_) + tf.add_input(desc, n_) + if component_types !== nothing + desc["component_types"] = map(Base.identity, component_types) + end + if timeout_ms !== nothing + desc["timeout_ms"] = Base.Int(timeout_ms) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function queue_dequeue_up_to_v2_eager(handle_, n_; name=nothing, component_types=nothing, timeout_ms=nothing) desc = tf.EagerOp("QueueDequeueUpToV2") handle_ = convert(tf.EagerTensor, handle_) @@ -3719,13 +3719,13 @@ begin return res[1] end end - function queue_dequeue_up_to_v2(handle_, n_; name=nothing, component_types=nothing, timeout_ms=nothing) - if tf.in_eager_mode() - queue_dequeue_up_to_v2_eager(handle_, n_; name=name, component_types=component_types, timeout_ms=timeout_ms) - else - queue_dequeue_up_to_v2_graph(handle_, n_; name=name, component_types=component_types, timeout_ms=timeout_ms) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function queue_dequeue_up_to_v2(handle_, n_; name=nothing, component_types=nothing, timeout_ms=nothing) + if tf.in_eager_mode() + queue_dequeue_up_to_v2_eager(handle_, n_; name=name, component_types=component_types, timeout_ms=timeout_ms) + else + queue_dequeue_up_to_v2_graph(handle_, n_; name=name, component_types=component_types, timeout_ms=timeout_ms) + end end - end end @@ -3735,30 +3735,30 @@ end Retrieve embedding parameters for a single table. """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function retrieve_tpu_embedding_proximal_adagrad_parameters_grad_accum_debug_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - local desc - tf.with_op_name(name, "RetrieveTPUEmbeddingProximalAdagradParametersGradAccumDebug") do - desc = tf.NodeDescription("RetrieveTPUEmbeddingProximalAdagradParametersGradAccumDebug") - if table_id !== nothing - desc["table_id"] = Base.Int(table_id) - end - if table_name !== nothing - desc["table_name"] = Base.String(table_name) - end - if num_shards !== nothing - desc["num_shards"] = Base.Int(num_shards) - end - if shard_id !== nothing - desc["shard_id"] = Base.Int(shard_id) - end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:3 - push!(out, tf.Tensor(op, out_idx)) - end - out + function retrieve_tpu_embedding_proximal_adagrad_parameters_grad_accum_debug_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + local desc + tf.with_op_name(name, "RetrieveTPUEmbeddingProximalAdagradParametersGradAccumDebug") do + desc = tf.NodeDescription("RetrieveTPUEmbeddingProximalAdagradParametersGradAccumDebug") + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) end + out + end function retrieve_tpu_embedding_proximal_adagrad_parameters_grad_accum_debug_eager(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) desc = tf.EagerOp("RetrieveTPUEmbeddingProximalAdagradParametersGradAccumDebug") if table_id !== nothing @@ -3780,13 +3780,13 @@ begin return res end end - function retrieve_tpu_embedding_proximal_adagrad_parameters_grad_accum_debug(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - if tf.in_eager_mode() - retrieve_tpu_embedding_proximal_adagrad_parameters_grad_accum_debug_eager(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) - else - retrieve_tpu_embedding_proximal_adagrad_parameters_grad_accum_debug_graph(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function retrieve_tpu_embedding_proximal_adagrad_parameters_grad_accum_debug(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + if tf.in_eager_mode() + retrieve_tpu_embedding_proximal_adagrad_parameters_grad_accum_debug_eager(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + else + retrieve_tpu_embedding_proximal_adagrad_parameters_grad_accum_debug_graph(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + end end - end end @@ -3796,33 +3796,33 @@ end Load embedding parameters for a single table. """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function load_tpu_embedding_rms_prop_parameters_grad_accum_debug_graph(parameters_, ms_, mom_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - local desc - tf.with_op_name(name, "LoadTPUEmbeddingRMSPropParametersGradAccumDebug") do - desc = tf.NodeDescription("LoadTPUEmbeddingRMSPropParametersGradAccumDebug") - parameters_ = convert(Tensor{Float32}, parameters_) - ms_ = convert(Tensor{Float32}, ms_) - mom_ = convert(Tensor{Float32}, mom_) - gradient_accumulators_ = convert(Tensor{Float32}, gradient_accumulators_) - tf.add_input(desc, parameters_) - tf.add_input(desc, ms_) - tf.add_input(desc, mom_) - tf.add_input(desc, gradient_accumulators_) - if table_id !== nothing - desc["table_id"] = Base.Int(table_id) - end - if table_name !== nothing - desc["table_name"] = Base.String(table_name) - end - if num_shards !== nothing - desc["num_shards"] = Base.Int(num_shards) - end - if shard_id !== nothing - desc["shard_id"] = Base.Int(shard_id) - end - end - tf.Tensor(tf.Operation(desc)) + function load_tpu_embedding_rms_prop_parameters_grad_accum_debug_graph(parameters_, ms_, mom_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + local desc + tf.with_op_name(name, "LoadTPUEmbeddingRMSPropParametersGradAccumDebug") do + desc = tf.NodeDescription("LoadTPUEmbeddingRMSPropParametersGradAccumDebug") + parameters_ = convert(Tensor{Float32}, parameters_) + ms_ = convert(Tensor{Float32}, ms_) + mom_ = convert(Tensor{Float32}, mom_) + gradient_accumulators_ = convert(Tensor{Float32}, gradient_accumulators_) + tf.add_input(desc, parameters_) + tf.add_input(desc, ms_) + tf.add_input(desc, mom_) + tf.add_input(desc, gradient_accumulators_) + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end end + tf.Tensor(tf.Operation(desc)) + end function load_tpu_embedding_rms_prop_parameters_grad_accum_debug_eager(parameters_, ms_, mom_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) desc = tf.EagerOp("LoadTPUEmbeddingRMSPropParametersGradAccumDebug") parameters_ = convert(tf.EagerTensor, parameters_) @@ -3852,13 +3852,13 @@ begin return res[1] end end - function load_tpu_embedding_rms_prop_parameters_grad_accum_debug(parameters_, ms_, mom_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - if tf.in_eager_mode() - load_tpu_embedding_rms_prop_parameters_grad_accum_debug_eager(parameters_, ms_, mom_, gradient_accumulators_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) - else - load_tpu_embedding_rms_prop_parameters_grad_accum_debug_graph(parameters_, ms_, mom_, gradient_accumulators_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function load_tpu_embedding_rms_prop_parameters_grad_accum_debug(parameters_, ms_, mom_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + if tf.in_eager_mode() + load_tpu_embedding_rms_prop_parameters_grad_accum_debug_eager(parameters_, ms_, mom_, gradient_accumulators_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + else + load_tpu_embedding_rms_prop_parameters_grad_accum_debug_graph(parameters_, ms_, mom_, gradient_accumulators_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + end end - end end @@ -3868,16 +3868,16 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function serialize_tensor_graph(tensor_; name=nothing) - local desc - tf.with_op_name(name, "SerializeTensor") do - desc = tf.NodeDescription("SerializeTensor") - tensor_ = convert(Tensor{Any}, tensor_) - (tensor_,) = tf.tf_promote(tensor_) - tf.add_input(desc, tensor_) - end - tf.Tensor(tf.Operation(desc)) + function serialize_tensor_graph(tensor_; name=nothing) + local desc + tf.with_op_name(name, "SerializeTensor") do + desc = tf.NodeDescription("SerializeTensor") + tensor_ = convert(Tensor{Any}, tensor_) + (tensor_,) = tf.tf_promote(tensor_) + tf.add_input(desc, tensor_) end + tf.Tensor(tf.Operation(desc)) + end function serialize_tensor_eager(tensor_; name=nothing) desc = tf.EagerOp("SerializeTensor") tensor_ = convert(tf.EagerTensor, tensor_) @@ -3890,13 +3890,13 @@ begin return res[1] end end - function serialize_tensor(tensor_; name=nothing) - if tf.in_eager_mode() - serialize_tensor_eager(tensor_; name=name) - else - serialize_tensor_graph(tensor_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function serialize_tensor(tensor_; name=nothing) + if tf.in_eager_mode() + serialize_tensor_eager(tensor_; name=name) + else + serialize_tensor_graph(tensor_; name=name) + end end - end end @@ -3906,18 +3906,18 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function mul_graph(x_, y_; name=nothing) - local desc - tf.with_op_name(name, "Mul") do - desc = tf.NodeDescription("Mul") - x_ = convert(Tensor{Any}, x_) - y_ = convert(Tensor{Any}, y_) - (x_, y_) = tf.tf_promote(x_, y_) - tf.add_input(desc, x_) - tf.add_input(desc, y_) - end - tf.Tensor(tf.Operation(desc)) + function mul_graph(x_, y_; name=nothing) + local desc + tf.with_op_name(name, "Mul") do + desc = tf.NodeDescription("Mul") + x_ = convert(Tensor{Any}, x_) + y_ = convert(Tensor{Any}, y_) + (x_, y_) = tf.tf_promote(x_, y_) + tf.add_input(desc, x_) + tf.add_input(desc, y_) end + tf.Tensor(tf.Operation(desc)) + end function mul_eager(x_, y_; name=nothing) desc = tf.EagerOp("Mul") x_ = convert(tf.EagerTensor, x_) @@ -3933,13 +3933,13 @@ begin return res[1] end end - function mul(x_, y_; name=nothing) - if tf.in_eager_mode() - mul_eager(x_, y_; name=name) - else - mul_graph(x_, y_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function mul(x_, y_; name=nothing) + if tf.in_eager_mode() + mul_eager(x_, y_; name=name) + else + mul_graph(x_, y_; name=name) + end end - end end @@ -3949,23 +3949,23 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function softmax_cross_entropy_with_logits_graph(features_, labels_; name=nothing) - local desc - tf.with_op_name(name, "SoftmaxCrossEntropyWithLogits") do - desc = tf.NodeDescription("SoftmaxCrossEntropyWithLogits") - features_ = convert(Tensor{Any}, features_) - labels_ = convert(Tensor{Any}, labels_) - (features_, labels_) = tf.tf_promote(features_, labels_) - tf.add_input(desc, features_) - tf.add_input(desc, labels_) - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:2 - push!(out, tf.Tensor(op, out_idx)) - end - out + function softmax_cross_entropy_with_logits_graph(features_, labels_; name=nothing) + local desc + tf.with_op_name(name, "SoftmaxCrossEntropyWithLogits") do + desc = tf.NodeDescription("SoftmaxCrossEntropyWithLogits") + features_ = convert(Tensor{Any}, features_) + labels_ = convert(Tensor{Any}, labels_) + (features_, labels_) = tf.tf_promote(features_, labels_) + tf.add_input(desc, features_) + tf.add_input(desc, labels_) end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end function softmax_cross_entropy_with_logits_eager(features_, labels_; name=nothing) desc = tf.EagerOp("SoftmaxCrossEntropyWithLogits") features_ = convert(tf.EagerTensor, features_) @@ -3981,13 +3981,13 @@ begin return res end end - function softmax_cross_entropy_with_logits(features_, labels_; name=nothing) - if tf.in_eager_mode() - softmax_cross_entropy_with_logits_eager(features_, labels_; name=name) - else - softmax_cross_entropy_with_logits_graph(features_, labels_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function softmax_cross_entropy_with_logits(features_, labels_; name=nothing) + if tf.in_eager_mode() + softmax_cross_entropy_with_logits_eager(features_, labels_; name=name) + else + softmax_cross_entropy_with_logits_graph(features_, labels_; name=name) + end end - end end @@ -3997,25 +3997,25 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function resource_scatter_div_graph(resource_, indices_, updates_; name=nothing, dtype=nothing) - local desc - tf.with_op_name(name, "ResourceScatterDiv") do - desc = tf.NodeDescription("ResourceScatterDiv") - resource_ = convert(Tensor{Any}, resource_) - indices_ = convert(Tensor{Any}, indices_) - indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) - updates_ = convert(Tensor{Any}, updates_) - (updates_,) = tf.tf_promote(updates_) - (indices_,) = tf.tf_promote(indices_) - tf.add_input(desc, resource_) - tf.add_input(desc, indices_) - tf.add_input(desc, updates_) - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end + function resource_scatter_div_graph(resource_, indices_, updates_; name=nothing, dtype=nothing) + local desc + tf.with_op_name(name, "ResourceScatterDiv") do + desc = tf.NodeDescription("ResourceScatterDiv") + resource_ = convert(Tensor{Any}, resource_) + indices_ = convert(Tensor{Any}, indices_) + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + updates_ = convert(Tensor{Any}, updates_) + (updates_,) = tf.tf_promote(updates_) + (indices_,) = tf.tf_promote(indices_) + tf.add_input(desc, resource_) + tf.add_input(desc, indices_) + tf.add_input(desc, updates_) + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function resource_scatter_div_eager(resource_, indices_, updates_; name=nothing, dtype=nothing) desc = tf.EagerOp("ResourceScatterDiv") resource_ = convert(tf.EagerTensor, resource_) @@ -4036,13 +4036,13 @@ begin return res[1] end end - function resource_scatter_div(resource_, indices_, updates_; name=nothing, dtype=nothing) - if tf.in_eager_mode() - resource_scatter_div_eager(resource_, indices_, updates_; name=name, dtype=dtype) - else - resource_scatter_div_graph(resource_, indices_, updates_; name=name, dtype=dtype) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_scatter_div(resource_, indices_, updates_; name=nothing, dtype=nothing) + if tf.in_eager_mode() + resource_scatter_div_eager(resource_, indices_, updates_; name=name, dtype=dtype) + else + resource_scatter_div_graph(resource_, indices_, updates_; name=name, dtype=dtype) + end end - end end @@ -4052,25 +4052,25 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function fixed_length_record_dataset_v2_graph(filenames_, header_bytes_, record_bytes_, footer_bytes_, buffer_size_, compression_type_; name=nothing) - local desc - tf.with_op_name(name, "FixedLengthRecordDatasetV2") do - desc = tf.NodeDescription("FixedLengthRecordDatasetV2") - filenames_ = convert(Tensor{String}, filenames_) - header_bytes_ = convert(Tensor{Int64}, header_bytes_) - record_bytes_ = convert(Tensor{Int64}, record_bytes_) - footer_bytes_ = convert(Tensor{Int64}, footer_bytes_) - buffer_size_ = convert(Tensor{Int64}, buffer_size_) - compression_type_ = convert(Tensor{String}, compression_type_) - tf.add_input(desc, filenames_) - tf.add_input(desc, header_bytes_) - tf.add_input(desc, record_bytes_) - tf.add_input(desc, footer_bytes_) - tf.add_input(desc, buffer_size_) - tf.add_input(desc, compression_type_) - end - tf.Tensor(tf.Operation(desc)) + function fixed_length_record_dataset_v2_graph(filenames_, header_bytes_, record_bytes_, footer_bytes_, buffer_size_, compression_type_; name=nothing) + local desc + tf.with_op_name(name, "FixedLengthRecordDatasetV2") do + desc = tf.NodeDescription("FixedLengthRecordDatasetV2") + filenames_ = convert(Tensor{String}, filenames_) + header_bytes_ = convert(Tensor{Int64}, header_bytes_) + record_bytes_ = convert(Tensor{Int64}, record_bytes_) + footer_bytes_ = convert(Tensor{Int64}, footer_bytes_) + buffer_size_ = convert(Tensor{Int64}, buffer_size_) + compression_type_ = convert(Tensor{String}, compression_type_) + tf.add_input(desc, filenames_) + tf.add_input(desc, header_bytes_) + tf.add_input(desc, record_bytes_) + tf.add_input(desc, footer_bytes_) + tf.add_input(desc, buffer_size_) + tf.add_input(desc, compression_type_) end + tf.Tensor(tf.Operation(desc)) + end function fixed_length_record_dataset_v2_eager(filenames_, header_bytes_, record_bytes_, footer_bytes_, buffer_size_, compression_type_; name=nothing) desc = tf.EagerOp("FixedLengthRecordDatasetV2") filenames_ = convert(tf.EagerTensor, filenames_) @@ -4092,13 +4092,13 @@ begin return res[1] end end - function fixed_length_record_dataset_v2(filenames_, header_bytes_, record_bytes_, footer_bytes_, buffer_size_, compression_type_; name=nothing) - if tf.in_eager_mode() - fixed_length_record_dataset_v2_eager(filenames_, header_bytes_, record_bytes_, footer_bytes_, buffer_size_, compression_type_; name=name) - else - fixed_length_record_dataset_v2_graph(filenames_, header_bytes_, record_bytes_, footer_bytes_, buffer_size_, compression_type_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function fixed_length_record_dataset_v2(filenames_, header_bytes_, record_bytes_, footer_bytes_, buffer_size_, compression_type_; name=nothing) + if tf.in_eager_mode() + fixed_length_record_dataset_v2_eager(filenames_, header_bytes_, record_bytes_, footer_bytes_, buffer_size_, compression_type_; name=name) + else + fixed_length_record_dataset_v2_graph(filenames_, header_bytes_, record_bytes_, footer_bytes_, buffer_size_, compression_type_; name=name) + end end - end end @@ -4108,23 +4108,23 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function skip_dataset_graph(input_dataset_, count_; name=nothing, output_types=nothing, output_shapes=nothing) - local desc - tf.with_op_name(name, "SkipDataset") do - desc = tf.NodeDescription("SkipDataset") - input_dataset_ = convert(Tensor{Any}, input_dataset_) - count_ = convert(Tensor{Int64}, count_) - tf.add_input(desc, input_dataset_) - tf.add_input(desc, count_) - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end + function skip_dataset_graph(input_dataset_, count_; name=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "SkipDataset") do + desc = tf.NodeDescription("SkipDataset") + input_dataset_ = convert(Tensor{Any}, input_dataset_) + count_ = convert(Tensor{Int64}, count_) + tf.add_input(desc, input_dataset_) + tf.add_input(desc, count_) + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function skip_dataset_eager(input_dataset_, count_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("SkipDataset") input_dataset_ = convert(tf.EagerTensor, input_dataset_) @@ -4144,13 +4144,13 @@ begin return res[1] end end - function skip_dataset(input_dataset_, count_; name=nothing, output_types=nothing, output_shapes=nothing) - if tf.in_eager_mode() - skip_dataset_eager(input_dataset_, count_; name=name, output_types=output_types, output_shapes=output_shapes) - else - skip_dataset_graph(input_dataset_, count_; name=name, output_types=output_types, output_shapes=output_shapes) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function skip_dataset(input_dataset_, count_; name=nothing, output_types=nothing, output_shapes=nothing) + if tf.in_eager_mode() + skip_dataset_eager(input_dataset_, count_; name=name, output_types=output_types, output_shapes=output_shapes) + else + skip_dataset_graph(input_dataset_, count_; name=name, output_types=output_types, output_shapes=output_shapes) + end end - end end @@ -4160,16 +4160,16 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function cosh_graph(x_; name=nothing) - local desc - tf.with_op_name(name, "Cosh") do - desc = tf.NodeDescription("Cosh") - x_ = convert(Tensor{Any}, x_) - (x_,) = tf.tf_promote(x_) - tf.add_input(desc, x_) - end - tf.Tensor(tf.Operation(desc)) + function cosh_graph(x_; name=nothing) + local desc + tf.with_op_name(name, "Cosh") do + desc = tf.NodeDescription("Cosh") + x_ = convert(Tensor{Any}, x_) + (x_,) = tf.tf_promote(x_) + tf.add_input(desc, x_) end + tf.Tensor(tf.Operation(desc)) + end function cosh_eager(x_; name=nothing) desc = tf.EagerOp("Cosh") x_ = convert(tf.EagerTensor, x_) @@ -4182,58 +4182,58 @@ begin return res[1] end end - function cosh(x_; name=nothing) - if tf.in_eager_mode() - cosh_eager(x_; name=name) - else - cosh_graph(x_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function cosh(x_; name=nothing) + if tf.in_eager_mode() + cosh_eager(x_; name=name) + else + cosh_graph(x_; name=name) + end end - end end """ - fused_batch_norm_v2(x, scale, offset, mean, variance; epsilon=?, data_format=NHWC, is_training=true) + fused_batch_norm_v2(x, scale, offset, mean, variance; epsilon=?, data_format=, is_training=true) """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function fused_batch_norm_v2_graph(x_, scale_, offset_, mean_, variance_; name=nothing, U=nothing, epsilon=nothing, data_format=nothing, is_training=nothing) - local desc - tf.with_op_name(name, "FusedBatchNormV2") do - desc = tf.NodeDescription("FusedBatchNormV2") - x_ = convert(Tensor{Any}, x_) - scale_ = convert(Tensor{Any}, scale_) - offset_ = convert(Tensor{Any}, offset_) - mean_ = convert(Tensor{Any}, mean_) - variance_ = convert(Tensor{Any}, variance_) - (scale_, offset_, mean_, variance_) = tf.tf_promote(scale_, offset_, mean_, variance_) - (x_,) = tf.tf_promote(x_) - tf.add_input(desc, x_) - tf.add_input(desc, scale_) - tf.add_input(desc, offset_) - tf.add_input(desc, mean_) - tf.add_input(desc, variance_) - if U !== nothing - desc["U"] = Base.identity(U) - end - if epsilon !== nothing - desc["epsilon"] = Base.identity(epsilon) - end - if data_format !== nothing - desc["data_format"] = Base.String(data_format) - end - if is_training !== nothing - desc["is_training"] = Base.Bool(is_training) - end + function fused_batch_norm_v2_graph(x_, scale_, offset_, mean_, variance_; name=nothing, U=nothing, epsilon=nothing, data_format=nothing, is_training=nothing) + local desc + tf.with_op_name(name, "FusedBatchNormV2") do + desc = tf.NodeDescription("FusedBatchNormV2") + x_ = convert(Tensor{Any}, x_) + scale_ = convert(Tensor{Any}, scale_) + offset_ = convert(Tensor{Any}, offset_) + mean_ = convert(Tensor{Any}, mean_) + variance_ = convert(Tensor{Any}, variance_) + (scale_, offset_, mean_, variance_) = tf.tf_promote(scale_, offset_, mean_, variance_) + (x_,) = tf.tf_promote(x_) + tf.add_input(desc, x_) + tf.add_input(desc, scale_) + tf.add_input(desc, offset_) + tf.add_input(desc, mean_) + tf.add_input(desc, variance_) + if U !== nothing + desc["U"] = Base.identity(U) + end + if epsilon !== nothing + desc["epsilon"] = Base.identity(epsilon) end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:5 - push!(out, tf.Tensor(op, out_idx)) + if data_format !== nothing + desc["data_format"] = Base.String(data_format) end - out + if is_training !== nothing + desc["is_training"] = Base.Bool(is_training) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:5 + push!(out, tf.Tensor(op, out_idx)) end + out + end function fused_batch_norm_v2_eager(x_, scale_, offset_, mean_, variance_; name=nothing, U=nothing, epsilon=nothing, data_format=nothing, is_training=nothing) desc = tf.EagerOp("FusedBatchNormV2") x_ = convert(tf.EagerTensor, x_) @@ -4270,13 +4270,13 @@ begin return res end end - function fused_batch_norm_v2(x_, scale_, offset_, mean_, variance_; name=nothing, U=nothing, epsilon=nothing, data_format=nothing, is_training=nothing) - if tf.in_eager_mode() - fused_batch_norm_v2_eager(x_, scale_, offset_, mean_, variance_; name=name, U=U, epsilon=epsilon, data_format=data_format, is_training=is_training) - else - fused_batch_norm_v2_graph(x_, scale_, offset_, mean_, variance_; name=name, U=U, epsilon=epsilon, data_format=data_format, is_training=is_training) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function fused_batch_norm_v2(x_, scale_, offset_, mean_, variance_; name=nothing, U=nothing, epsilon=nothing, data_format=nothing, is_training=nothing) + if tf.in_eager_mode() + fused_batch_norm_v2_eager(x_, scale_, offset_, mean_, variance_; name=name, U=U, epsilon=epsilon, data_format=data_format, is_training=is_training) + else + fused_batch_norm_v2_graph(x_, scale_, offset_, mean_, variance_; name=name, U=U, epsilon=epsilon, data_format=data_format, is_training=is_training) + end end - end end @@ -4286,22 +4286,22 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tensor_array_split_graph(handle_, value_, lengths_, flow_in_; name=nothing) - local desc - tf.with_op_name(name, "TensorArraySplit") do - desc = tf.NodeDescription("TensorArraySplit") - handle_ = convert(Tensor{String}, handle_) - value_ = convert(Tensor{Any}, value_) - lengths_ = convert(Tensor{Int64}, lengths_) - flow_in_ = convert(Tensor{Float32}, flow_in_) - (value_,) = tf.tf_promote(value_) - tf.add_input(desc, handle_) - tf.add_input(desc, value_) - tf.add_input(desc, lengths_) - tf.add_input(desc, flow_in_) - end - tf.Tensor(tf.Operation(desc)) + function tensor_array_split_graph(handle_, value_, lengths_, flow_in_; name=nothing) + local desc + tf.with_op_name(name, "TensorArraySplit") do + desc = tf.NodeDescription("TensorArraySplit") + handle_ = convert(Tensor{String}, handle_) + value_ = convert(Tensor{Any}, value_) + lengths_ = convert(Tensor{Int64}, lengths_) + flow_in_ = convert(Tensor{Float32}, flow_in_) + (value_,) = tf.tf_promote(value_) + tf.add_input(desc, handle_) + tf.add_input(desc, value_) + tf.add_input(desc, lengths_) + tf.add_input(desc, flow_in_) end + tf.Tensor(tf.Operation(desc)) + end function tensor_array_split_eager(handle_, value_, lengths_, flow_in_; name=nothing) desc = tf.EagerOp("TensorArraySplit") handle_ = convert(tf.EagerTensor, handle_) @@ -4320,13 +4320,13 @@ begin return res[1] end end - function tensor_array_split(handle_, value_, lengths_, flow_in_; name=nothing) - if tf.in_eager_mode() - tensor_array_split_eager(handle_, value_, lengths_, flow_in_; name=name) - else - tensor_array_split_graph(handle_, value_, lengths_, flow_in_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_array_split(handle_, value_, lengths_, flow_in_; name=nothing) + if tf.in_eager_mode() + tensor_array_split_eager(handle_, value_, lengths_, flow_in_; name=name) + else + tensor_array_split_graph(handle_, value_, lengths_, flow_in_; name=name) + end end - end end @@ -4336,35 +4336,35 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function ctc_loss_graph(inputs_, labels_indices_, labels_values_, sequence_length_; name=nothing, preprocess_collapse_repeated=nothing, ctc_merge_repeated=nothing, ignore_longer_outputs_than_inputs=nothing) - local desc - tf.with_op_name(name, "CTCLoss") do - desc = tf.NodeDescription("CTCLoss") - inputs_ = convert(Tensor{Float32}, inputs_) - labels_indices_ = convert(Tensor{Int64}, labels_indices_) - labels_values_ = convert(Tensor{Int32}, labels_values_) - sequence_length_ = convert(Tensor{Int32}, sequence_length_) - tf.add_input(desc, inputs_) - tf.add_input(desc, labels_indices_) - tf.add_input(desc, labels_values_) - tf.add_input(desc, sequence_length_) - if preprocess_collapse_repeated !== nothing - desc["preprocess_collapse_repeated"] = Base.Bool(preprocess_collapse_repeated) - end - if ctc_merge_repeated !== nothing - desc["ctc_merge_repeated"] = Base.Bool(ctc_merge_repeated) - end - if ignore_longer_outputs_than_inputs !== nothing - desc["ignore_longer_outputs_than_inputs"] = Base.Bool(ignore_longer_outputs_than_inputs) - end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:2 - push!(out, tf.Tensor(op, out_idx)) - end - out + function ctc_loss_graph(inputs_, labels_indices_, labels_values_, sequence_length_; name=nothing, preprocess_collapse_repeated=nothing, ctc_merge_repeated=nothing, ignore_longer_outputs_than_inputs=nothing) + local desc + tf.with_op_name(name, "CTCLoss") do + desc = tf.NodeDescription("CTCLoss") + inputs_ = convert(Tensor{Float32}, inputs_) + labels_indices_ = convert(Tensor{Int64}, labels_indices_) + labels_values_ = convert(Tensor{Int32}, labels_values_) + sequence_length_ = convert(Tensor{Int32}, sequence_length_) + tf.add_input(desc, inputs_) + tf.add_input(desc, labels_indices_) + tf.add_input(desc, labels_values_) + tf.add_input(desc, sequence_length_) + if preprocess_collapse_repeated !== nothing + desc["preprocess_collapse_repeated"] = Base.Bool(preprocess_collapse_repeated) + end + if ctc_merge_repeated !== nothing + desc["ctc_merge_repeated"] = Base.Bool(ctc_merge_repeated) + end + if ignore_longer_outputs_than_inputs !== nothing + desc["ignore_longer_outputs_than_inputs"] = Base.Bool(ignore_longer_outputs_than_inputs) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) end + out + end function ctc_loss_eager(inputs_, labels_indices_, labels_values_, sequence_length_; name=nothing, preprocess_collapse_repeated=nothing, ctc_merge_repeated=nothing, ignore_longer_outputs_than_inputs=nothing) desc = tf.EagerOp("CTCLoss") inputs_ = convert(tf.EagerTensor, inputs_) @@ -4391,13 +4391,13 @@ begin return res end end - function ctc_loss(inputs_, labels_indices_, labels_values_, sequence_length_; name=nothing, preprocess_collapse_repeated=nothing, ctc_merge_repeated=nothing, ignore_longer_outputs_than_inputs=nothing) - if tf.in_eager_mode() - ctc_loss_eager(inputs_, labels_indices_, labels_values_, sequence_length_; name=name, preprocess_collapse_repeated=preprocess_collapse_repeated, ctc_merge_repeated=ctc_merge_repeated, ignore_longer_outputs_than_inputs=ignore_longer_outputs_than_inputs) - else - ctc_loss_graph(inputs_, labels_indices_, labels_values_, sequence_length_; name=name, preprocess_collapse_repeated=preprocess_collapse_repeated, ctc_merge_repeated=ctc_merge_repeated, ignore_longer_outputs_than_inputs=ignore_longer_outputs_than_inputs) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function ctc_loss(inputs_, labels_indices_, labels_values_, sequence_length_; name=nothing, preprocess_collapse_repeated=nothing, ctc_merge_repeated=nothing, ignore_longer_outputs_than_inputs=nothing) + if tf.in_eager_mode() + ctc_loss_eager(inputs_, labels_indices_, labels_values_, sequence_length_; name=name, preprocess_collapse_repeated=preprocess_collapse_repeated, ctc_merge_repeated=ctc_merge_repeated, ignore_longer_outputs_than_inputs=ignore_longer_outputs_than_inputs) + else + ctc_loss_graph(inputs_, labels_indices_, labels_values_, sequence_length_; name=name, preprocess_collapse_repeated=preprocess_collapse_repeated, ctc_merge_repeated=ctc_merge_repeated, ignore_longer_outputs_than_inputs=ignore_longer_outputs_than_inputs) + end end - end end @@ -4407,28 +4407,28 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function quantized_reshape_graph(tensor_, shape_, input_min_, input_max_; name=nothing) - local desc - tf.with_op_name(name, "QuantizedReshape") do - desc = tf.NodeDescription("QuantizedReshape") - tensor_ = convert(Tensor{Any}, tensor_) - shape_ = convert(Tensor{Int32}, shape_) - input_min_ = convert(Tensor{Float32}, input_min_) - input_max_ = convert(Tensor{Float32}, input_max_) - (tensor_,) = tf.tf_promote(tensor_) - (shape_,) = tf.tf_promote(shape_) - tf.add_input(desc, tensor_) - tf.add_input(desc, shape_) - tf.add_input(desc, input_min_) - tf.add_input(desc, input_max_) - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:3 - push!(out, tf.Tensor(op, out_idx)) - end - out + function quantized_reshape_graph(tensor_, shape_, input_min_, input_max_; name=nothing) + local desc + tf.with_op_name(name, "QuantizedReshape") do + desc = tf.NodeDescription("QuantizedReshape") + tensor_ = convert(Tensor{Any}, tensor_) + shape_ = convert(Tensor{Int32}, shape_) + input_min_ = convert(Tensor{Float32}, input_min_) + input_max_ = convert(Tensor{Float32}, input_max_) + (tensor_,) = tf.tf_promote(tensor_) + (shape_,) = tf.tf_promote(shape_) + tf.add_input(desc, tensor_) + tf.add_input(desc, shape_) + tf.add_input(desc, input_min_) + tf.add_input(desc, input_max_) + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) end + out + end function quantized_reshape_eager(tensor_, shape_, input_min_, input_max_; name=nothing) desc = tf.EagerOp("QuantizedReshape") tensor_ = convert(tf.EagerTensor, tensor_) @@ -4448,13 +4448,13 @@ begin return res end end - function quantized_reshape(tensor_, shape_, input_min_, input_max_; name=nothing) - if tf.in_eager_mode() - quantized_reshape_eager(tensor_, shape_, input_min_, input_max_; name=name) - else - quantized_reshape_graph(tensor_, shape_, input_min_, input_max_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function quantized_reshape(tensor_, shape_, input_min_, input_max_; name=nothing) + if tf.in_eager_mode() + quantized_reshape_eager(tensor_, shape_, input_min_, input_max_; name=name) + else + quantized_reshape_graph(tensor_, shape_, input_min_, input_max_; name=name) + end end - end end @@ -4464,18 +4464,18 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function floor_div_graph(x_, y_; name=nothing) - local desc - tf.with_op_name(name, "FloorDiv") do - desc = tf.NodeDescription("FloorDiv") - x_ = convert(Tensor{Any}, x_) - y_ = convert(Tensor{Any}, y_) - (x_, y_) = tf.tf_promote(x_, y_) - tf.add_input(desc, x_) - tf.add_input(desc, y_) - end - tf.Tensor(tf.Operation(desc)) + function floor_div_graph(x_, y_; name=nothing) + local desc + tf.with_op_name(name, "FloorDiv") do + desc = tf.NodeDescription("FloorDiv") + x_ = convert(Tensor{Any}, x_) + y_ = convert(Tensor{Any}, y_) + (x_, y_) = tf.tf_promote(x_, y_) + tf.add_input(desc, x_) + tf.add_input(desc, y_) end + tf.Tensor(tf.Operation(desc)) + end function floor_div_eager(x_, y_; name=nothing) desc = tf.EagerOp("FloorDiv") x_ = convert(tf.EagerTensor, x_) @@ -4491,13 +4491,13 @@ begin return res[1] end end - function floor_div(x_, y_; name=nothing) - if tf.in_eager_mode() - floor_div_eager(x_, y_; name=name) - else - floor_div_graph(x_, y_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function floor_div(x_, y_; name=nothing) + if tf.in_eager_mode() + floor_div_eager(x_, y_; name=name) + else + floor_div_graph(x_, y_; name=name) + end end - end end @@ -4507,30 +4507,30 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tensor_array_v2_graph(size_; name=nothing, dtype=nothing, element_shape=nothing, dynamic_size=nothing, clear_after_read=nothing, tensor_array_name=nothing) - local desc - tf.with_op_name(name, "TensorArrayV2") do - desc = tf.NodeDescription("TensorArrayV2") - size_ = convert(Tensor{Int32}, size_) - tf.add_input(desc, size_) - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end - if element_shape !== nothing - desc["element_shape"] = Base.identity(element_shape) - end - if dynamic_size !== nothing - desc["dynamic_size"] = Base.Bool(dynamic_size) - end - if clear_after_read !== nothing - desc["clear_after_read"] = Base.Bool(clear_after_read) - end - if tensor_array_name !== nothing - desc["tensor_array_name"] = Base.String(tensor_array_name) - end - end - tf.Tensor(tf.Operation(desc)) + function tensor_array_v2_graph(size_; name=nothing, dtype=nothing, element_shape=nothing, dynamic_size=nothing, clear_after_read=nothing, tensor_array_name=nothing) + local desc + tf.with_op_name(name, "TensorArrayV2") do + desc = tf.NodeDescription("TensorArrayV2") + size_ = convert(Tensor{Int32}, size_) + tf.add_input(desc, size_) + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + if element_shape !== nothing + desc["element_shape"] = Base.identity(element_shape) + end + if dynamic_size !== nothing + desc["dynamic_size"] = Base.Bool(dynamic_size) + end + if clear_after_read !== nothing + desc["clear_after_read"] = Base.Bool(clear_after_read) + end + if tensor_array_name !== nothing + desc["tensor_array_name"] = Base.String(tensor_array_name) + end end + tf.Tensor(tf.Operation(desc)) + end function tensor_array_v2_eager(size_; name=nothing, dtype=nothing, element_shape=nothing, dynamic_size=nothing, clear_after_read=nothing, tensor_array_name=nothing) desc = tf.EagerOp("TensorArrayV2") size_ = convert(tf.EagerTensor, size_) @@ -4557,13 +4557,13 @@ begin return res[1] end end - function tensor_array_v2(size_; name=nothing, dtype=nothing, element_shape=nothing, dynamic_size=nothing, clear_after_read=nothing, tensor_array_name=nothing) - if tf.in_eager_mode() - tensor_array_v2_eager(size_; name=name, dtype=dtype, element_shape=element_shape, dynamic_size=dynamic_size, clear_after_read=clear_after_read, tensor_array_name=tensor_array_name) - else - tensor_array_v2_graph(size_; name=name, dtype=dtype, element_shape=element_shape, dynamic_size=dynamic_size, clear_after_read=clear_after_read, tensor_array_name=tensor_array_name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_array_v2(size_; name=nothing, dtype=nothing, element_shape=nothing, dynamic_size=nothing, clear_after_read=nothing, tensor_array_name=nothing) + if tf.in_eager_mode() + tensor_array_v2_eager(size_; name=name, dtype=dtype, element_shape=element_shape, dynamic_size=dynamic_size, clear_after_read=clear_after_read, tensor_array_name=tensor_array_name) + else + tensor_array_v2_graph(size_; name=name, dtype=dtype, element_shape=element_shape, dynamic_size=dynamic_size, clear_after_read=clear_after_read, tensor_array_name=tensor_array_name) + end end - end end @@ -4573,18 +4573,18 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function barrier_close_graph(handle_; name=nothing, cancel_pending_enqueues=nothing) - local desc - tf.with_op_name(name, "BarrierClose") do - desc = tf.NodeDescription("BarrierClose") - handle_ = convert(Tensor{String}, handle_) - tf.add_input(desc, handle_) - if cancel_pending_enqueues !== nothing - desc["cancel_pending_enqueues"] = Base.Bool(cancel_pending_enqueues) - end + function barrier_close_graph(handle_; name=nothing, cancel_pending_enqueues=nothing) + local desc + tf.with_op_name(name, "BarrierClose") do + desc = tf.NodeDescription("BarrierClose") + handle_ = convert(Tensor{String}, handle_) + tf.add_input(desc, handle_) + if cancel_pending_enqueues !== nothing + desc["cancel_pending_enqueues"] = Base.Bool(cancel_pending_enqueues) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function barrier_close_eager(handle_; name=nothing, cancel_pending_enqueues=nothing) desc = tf.EagerOp("BarrierClose") handle_ = convert(tf.EagerTensor, handle_) @@ -4599,13 +4599,13 @@ begin return res[1] end end - function barrier_close(handle_; name=nothing, cancel_pending_enqueues=nothing) - if tf.in_eager_mode() - barrier_close_eager(handle_; name=name, cancel_pending_enqueues=cancel_pending_enqueues) - else - barrier_close_graph(handle_; name=name, cancel_pending_enqueues=cancel_pending_enqueues) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function barrier_close(handle_; name=nothing, cancel_pending_enqueues=nothing) + if tf.in_eager_mode() + barrier_close_eager(handle_; name=name, cancel_pending_enqueues=cancel_pending_enqueues) + else + barrier_close_graph(handle_; name=name, cancel_pending_enqueues=cancel_pending_enqueues) + end end - end end @@ -4615,18 +4615,18 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function read_variable_op_graph(resource_; name=nothing, dtype=nothing) - local desc - tf.with_op_name(name, "ReadVariableOp") do - desc = tf.NodeDescription("ReadVariableOp") - resource_ = convert(Tensor{Any}, resource_) - tf.add_input(desc, resource_) - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end + function read_variable_op_graph(resource_; name=nothing, dtype=nothing) + local desc + tf.with_op_name(name, "ReadVariableOp") do + desc = tf.NodeDescription("ReadVariableOp") + resource_ = convert(Tensor{Any}, resource_) + tf.add_input(desc, resource_) + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function read_variable_op_eager(resource_; name=nothing, dtype=nothing) desc = tf.EagerOp("ReadVariableOp") resource_ = convert(tf.EagerTensor, resource_) @@ -4641,13 +4641,13 @@ begin return res[1] end end - function read_variable_op(resource_; name=nothing, dtype=nothing) - if tf.in_eager_mode() - read_variable_op_eager(resource_; name=name, dtype=dtype) - else - read_variable_op_graph(resource_; name=name, dtype=dtype) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function read_variable_op(resource_; name=nothing, dtype=nothing) + if tf.in_eager_mode() + read_variable_op_eager(resource_; name=name, dtype=dtype) + else + read_variable_op_graph(resource_; name=name, dtype=dtype) + end end - end end @@ -4657,32 +4657,32 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function quantized_mul_graph(x_, y_, min_x_, max_x_, min_y_, max_y_; name=nothing) - local desc - tf.with_op_name(name, "QuantizedMul") do - desc = tf.NodeDescription("QuantizedMul") - x_ = convert(Tensor{Any}, x_) - y_ = convert(Tensor{Any}, y_) - min_x_ = convert(Tensor{Float32}, min_x_) - max_x_ = convert(Tensor{Float32}, max_x_) - min_y_ = convert(Tensor{Float32}, min_y_) - max_y_ = convert(Tensor{Float32}, max_y_) - (x_,) = tf.tf_promote(x_) - (y_,) = tf.tf_promote(y_) - tf.add_input(desc, x_) - tf.add_input(desc, y_) - tf.add_input(desc, min_x_) - tf.add_input(desc, max_x_) - tf.add_input(desc, min_y_) - tf.add_input(desc, max_y_) - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:3 - push!(out, tf.Tensor(op, out_idx)) - end - out + function quantized_mul_graph(x_, y_, min_x_, max_x_, min_y_, max_y_; name=nothing) + local desc + tf.with_op_name(name, "QuantizedMul") do + desc = tf.NodeDescription("QuantizedMul") + x_ = convert(Tensor{Any}, x_) + y_ = convert(Tensor{Any}, y_) + min_x_ = convert(Tensor{Float32}, min_x_) + max_x_ = convert(Tensor{Float32}, max_x_) + min_y_ = convert(Tensor{Float32}, min_y_) + max_y_ = convert(Tensor{Float32}, max_y_) + (x_,) = tf.tf_promote(x_) + (y_,) = tf.tf_promote(y_) + tf.add_input(desc, x_) + tf.add_input(desc, y_) + tf.add_input(desc, min_x_) + tf.add_input(desc, max_x_) + tf.add_input(desc, min_y_) + tf.add_input(desc, max_y_) + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) end + out + end function quantized_mul_eager(x_, y_, min_x_, max_x_, min_y_, max_y_; name=nothing) desc = tf.EagerOp("QuantizedMul") x_ = convert(tf.EagerTensor, x_) @@ -4706,13 +4706,13 @@ begin return res end end - function quantized_mul(x_, y_, min_x_, max_x_, min_y_, max_y_; name=nothing) - if tf.in_eager_mode() - quantized_mul_eager(x_, y_, min_x_, max_x_, min_y_, max_y_; name=name) - else - quantized_mul_graph(x_, y_, min_x_, max_x_, min_y_, max_y_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function quantized_mul(x_, y_, min_x_, max_x_, min_y_, max_y_; name=nothing) + if tf.in_eager_mode() + quantized_mul_eager(x_, y_, min_x_, max_x_, min_y_, max_y_; name=name) + else + quantized_mul_graph(x_, y_, min_x_, max_x_, min_y_, max_y_; name=name) + end end - end end @@ -4722,16 +4722,16 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function selu_graph(features_; name=nothing) - local desc - tf.with_op_name(name, "Selu") do - desc = tf.NodeDescription("Selu") - features_ = convert(Tensor{Any}, features_) - (features_,) = tf.tf_promote(features_) - tf.add_input(desc, features_) - end - tf.Tensor(tf.Operation(desc)) + function selu_graph(features_; name=nothing) + local desc + tf.with_op_name(name, "Selu") do + desc = tf.NodeDescription("Selu") + features_ = convert(Tensor{Any}, features_) + (features_,) = tf.tf_promote(features_) + tf.add_input(desc, features_) end + tf.Tensor(tf.Operation(desc)) + end function selu_eager(features_; name=nothing) desc = tf.EagerOp("Selu") features_ = convert(tf.EagerTensor, features_) @@ -4744,13 +4744,13 @@ begin return res[1] end end - function selu(features_; name=nothing) - if tf.in_eager_mode() - selu_eager(features_; name=name) - else - selu_graph(features_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function selu(features_; name=nothing) + if tf.in_eager_mode() + selu_eager(features_; name=name) + else + selu_graph(features_; name=name) + end end - end end @@ -4760,21 +4760,21 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function lookup_table_insert_graph(table_handle_, keys_, values_; name=nothing) - local desc - tf.with_op_name(name, "LookupTableInsert") do - desc = tf.NodeDescription("LookupTableInsert") - table_handle_ = convert(Tensor{String}, table_handle_) - keys_ = convert(Tensor{Any}, keys_) - values_ = convert(Tensor{Any}, values_) - (keys_,) = tf.tf_promote(keys_) - (values_,) = tf.tf_promote(values_) - tf.add_input(desc, table_handle_) - tf.add_input(desc, keys_) - tf.add_input(desc, values_) - end - tf.Tensor(tf.Operation(desc)) + function lookup_table_insert_graph(table_handle_, keys_, values_; name=nothing) + local desc + tf.with_op_name(name, "LookupTableInsert") do + desc = tf.NodeDescription("LookupTableInsert") + table_handle_ = convert(Tensor{String}, table_handle_) + keys_ = convert(Tensor{Any}, keys_) + values_ = convert(Tensor{Any}, values_) + (keys_,) = tf.tf_promote(keys_) + (values_,) = tf.tf_promote(values_) + tf.add_input(desc, table_handle_) + tf.add_input(desc, keys_) + tf.add_input(desc, values_) end + tf.Tensor(tf.Operation(desc)) + end function lookup_table_insert_eager(table_handle_, keys_, values_; name=nothing) desc = tf.EagerOp("LookupTableInsert") table_handle_ = convert(tf.EagerTensor, table_handle_) @@ -4792,13 +4792,13 @@ begin return res[1] end end - function lookup_table_insert(table_handle_, keys_, values_; name=nothing) - if tf.in_eager_mode() - lookup_table_insert_eager(table_handle_, keys_, values_; name=name) - else - lookup_table_insert_graph(table_handle_, keys_, values_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function lookup_table_insert(table_handle_, keys_, values_; name=nothing) + if tf.in_eager_mode() + lookup_table_insert_eager(table_handle_, keys_, values_; name=name) + else + lookup_table_insert_graph(table_handle_, keys_, values_; name=name) + end end - end end @@ -4808,16 +4808,16 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function complex_abs_graph(x_; name=nothing) - local desc - tf.with_op_name(name, "ComplexAbs") do - desc = tf.NodeDescription("ComplexAbs") - x_ = convert(Tensor{Complex{Float32}}, x_) - (x_,) = tf.tf_promote(x_) - tf.add_input(desc, x_) - end - tf.Tensor(tf.Operation(desc)) + function complex_abs_graph(x_; name=nothing) + local desc + tf.with_op_name(name, "ComplexAbs") do + desc = tf.NodeDescription("ComplexAbs") + x_ = convert(Tensor{Complex{Float32}}, x_) + (x_,) = tf.tf_promote(x_) + tf.add_input(desc, x_) end + tf.Tensor(tf.Operation(desc)) + end function complex_abs_eager(x_; name=nothing) desc = tf.EagerOp("ComplexAbs") x_ = convert(tf.EagerTensor, x_) @@ -4830,13 +4830,13 @@ begin return res[1] end end - function complex_abs(x_; name=nothing) - if tf.in_eager_mode() - complex_abs_eager(x_; name=name) - else - complex_abs_graph(x_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function complex_abs(x_; name=nothing) + if tf.in_eager_mode() + complex_abs_eager(x_; name=name) + else + complex_abs_graph(x_; name=name) + end end - end end @@ -4846,16 +4846,16 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function abs_graph(x_; name=nothing) - local desc - tf.with_op_name(name, "Abs") do - desc = tf.NodeDescription("Abs") - x_ = convert(Tensor{Any}, x_) - (x_,) = tf.tf_promote(x_) - tf.add_input(desc, x_) - end - tf.Tensor(tf.Operation(desc)) + function abs_graph(x_; name=nothing) + local desc + tf.with_op_name(name, "Abs") do + desc = tf.NodeDescription("Abs") + x_ = convert(Tensor{Any}, x_) + (x_,) = tf.tf_promote(x_) + tf.add_input(desc, x_) end + tf.Tensor(tf.Operation(desc)) + end function abs_eager(x_; name=nothing) desc = tf.EagerOp("Abs") x_ = convert(tf.EagerTensor, x_) @@ -4868,13 +4868,13 @@ begin return res[1] end end - function abs(x_; name=nothing) - if tf.in_eager_mode() - abs_eager(x_; name=name) - else - abs_graph(x_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function abs(x_; name=nothing) + if tf.in_eager_mode() + abs_eager(x_; name=name) + else + abs_graph(x_; name=name) + end end - end end @@ -4884,21 +4884,21 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function lookup_table_import_graph(table_handle_, keys_, values_; name=nothing) - local desc - tf.with_op_name(name, "LookupTableImport") do - desc = tf.NodeDescription("LookupTableImport") - table_handle_ = convert(Tensor{String}, table_handle_) - keys_ = convert(Tensor{Any}, keys_) - values_ = convert(Tensor{Any}, values_) - (keys_,) = tf.tf_promote(keys_) - (values_,) = tf.tf_promote(values_) - tf.add_input(desc, table_handle_) - tf.add_input(desc, keys_) - tf.add_input(desc, values_) - end - tf.Tensor(tf.Operation(desc)) + function lookup_table_import_graph(table_handle_, keys_, values_; name=nothing) + local desc + tf.with_op_name(name, "LookupTableImport") do + desc = tf.NodeDescription("LookupTableImport") + table_handle_ = convert(Tensor{String}, table_handle_) + keys_ = convert(Tensor{Any}, keys_) + values_ = convert(Tensor{Any}, values_) + (keys_,) = tf.tf_promote(keys_) + (values_,) = tf.tf_promote(values_) + tf.add_input(desc, table_handle_) + tf.add_input(desc, keys_) + tf.add_input(desc, values_) end + tf.Tensor(tf.Operation(desc)) + end function lookup_table_import_eager(table_handle_, keys_, values_; name=nothing) desc = tf.EagerOp("LookupTableImport") table_handle_ = convert(tf.EagerTensor, table_handle_) @@ -4916,13 +4916,13 @@ begin return res[1] end end - function lookup_table_import(table_handle_, keys_, values_; name=nothing) - if tf.in_eager_mode() - lookup_table_import_eager(table_handle_, keys_, values_; name=name) - else - lookup_table_import_graph(table_handle_, keys_, values_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function lookup_table_import(table_handle_, keys_, values_; name=nothing) + if tf.in_eager_mode() + lookup_table_import_eager(table_handle_, keys_, values_; name=name) + else + lookup_table_import_graph(table_handle_, keys_, values_; name=name) + end end - end end @@ -4932,40 +4932,40 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function resource_apply_adam_graph(var_, m_, v_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing, use_nesterov=nothing) - local desc - tf.with_op_name(name, "ResourceApplyAdam") do - desc = tf.NodeDescription("ResourceApplyAdam") - var_ = convert(Tensor{Any}, var_) - m_ = convert(Tensor{Any}, m_) - v_ = convert(Tensor{Any}, v_) - beta1_power_ = convert(Tensor{Any}, beta1_power_) - beta2_power_ = convert(Tensor{Any}, beta2_power_) - lr_ = convert(Tensor{Any}, lr_) - beta1_ = convert(Tensor{Any}, beta1_) - beta2_ = convert(Tensor{Any}, beta2_) - epsilon_ = convert(Tensor{Any}, epsilon_) - grad_ = convert(Tensor{Any}, grad_) - (beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_) = tf.tf_promote(beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_) - tf.add_input(desc, var_) - tf.add_input(desc, m_) - tf.add_input(desc, v_) - tf.add_input(desc, beta1_power_) - tf.add_input(desc, beta2_power_) - tf.add_input(desc, lr_) - tf.add_input(desc, beta1_) - tf.add_input(desc, beta2_) - tf.add_input(desc, epsilon_) - tf.add_input(desc, grad_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - if use_nesterov !== nothing - desc["use_nesterov"] = Base.Bool(use_nesterov) - end - end - tf.Tensor(tf.Operation(desc)) - end + function resource_apply_adam_graph(var_, m_, v_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing, use_nesterov=nothing) + local desc + tf.with_op_name(name, "ResourceApplyAdam") do + desc = tf.NodeDescription("ResourceApplyAdam") + var_ = convert(Tensor{Any}, var_) + m_ = convert(Tensor{Any}, m_) + v_ = convert(Tensor{Any}, v_) + beta1_power_ = convert(Tensor{Any}, beta1_power_) + beta2_power_ = convert(Tensor{Any}, beta2_power_) + lr_ = convert(Tensor{Any}, lr_) + beta1_ = convert(Tensor{Any}, beta1_) + beta2_ = convert(Tensor{Any}, beta2_) + epsilon_ = convert(Tensor{Any}, epsilon_) + grad_ = convert(Tensor{Any}, grad_) + (beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_) = tf.tf_promote(beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_) + tf.add_input(desc, var_) + tf.add_input(desc, m_) + tf.add_input(desc, v_) + tf.add_input(desc, beta1_power_) + tf.add_input(desc, beta2_power_) + tf.add_input(desc, lr_) + tf.add_input(desc, beta1_) + tf.add_input(desc, beta2_) + tf.add_input(desc, epsilon_) + tf.add_input(desc, grad_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + if use_nesterov !== nothing + desc["use_nesterov"] = Base.Bool(use_nesterov) + end + end + tf.Tensor(tf.Operation(desc)) + end function resource_apply_adam_eager(var_, m_, v_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing, use_nesterov=nothing) desc = tf.EagerOp("ResourceApplyAdam") var_ = convert(tf.EagerTensor, var_) @@ -5008,13 +5008,13 @@ begin return res[1] end end - function resource_apply_adam(var_, m_, v_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing, use_nesterov=nothing) - if tf.in_eager_mode() - resource_apply_adam_eager(var_, m_, v_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=name, use_locking=use_locking, use_nesterov=use_nesterov) - else - resource_apply_adam_graph(var_, m_, v_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=name, use_locking=use_locking, use_nesterov=use_nesterov) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_apply_adam(var_, m_, v_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing, use_nesterov=nothing) + if tf.in_eager_mode() + resource_apply_adam_eager(var_, m_, v_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=name, use_locking=use_locking, use_nesterov=use_nesterov) + else + resource_apply_adam_graph(var_, m_, v_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=name, use_locking=use_locking, use_nesterov=use_nesterov) + end end - end end @@ -5024,22 +5024,22 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function write_histogram_summary_graph(writer_, step_, tag_, values_; name=nothing) - local desc - tf.with_op_name(name, "WriteHistogramSummary") do - desc = tf.NodeDescription("WriteHistogramSummary") - writer_ = convert(Tensor{Any}, writer_) - step_ = convert(Tensor{Int64}, step_) - tag_ = convert(Tensor{String}, tag_) - values_ = convert(Tensor{Float32}, values_) - (values_,) = tf.tf_promote(values_) - tf.add_input(desc, writer_) - tf.add_input(desc, step_) - tf.add_input(desc, tag_) - tf.add_input(desc, values_) - end - tf.Tensor(tf.Operation(desc)) + function write_histogram_summary_graph(writer_, step_, tag_, values_; name=nothing) + local desc + tf.with_op_name(name, "WriteHistogramSummary") do + desc = tf.NodeDescription("WriteHistogramSummary") + writer_ = convert(Tensor{Any}, writer_) + step_ = convert(Tensor{Int64}, step_) + tag_ = convert(Tensor{String}, tag_) + values_ = convert(Tensor{Float32}, values_) + (values_,) = tf.tf_promote(values_) + tf.add_input(desc, writer_) + tf.add_input(desc, step_) + tf.add_input(desc, tag_) + tf.add_input(desc, values_) end + tf.Tensor(tf.Operation(desc)) + end function write_histogram_summary_eager(writer_, step_, tag_, values_; name=nothing) desc = tf.EagerOp("WriteHistogramSummary") writer_ = convert(tf.EagerTensor, writer_) @@ -5058,13 +5058,13 @@ begin return res[1] end end - function write_histogram_summary(writer_, step_, tag_, values_; name=nothing) - if tf.in_eager_mode() - write_histogram_summary_eager(writer_, step_, tag_, values_; name=name) - else - write_histogram_summary_graph(writer_, step_, tag_, values_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function write_histogram_summary(writer_, step_, tag_, values_; name=nothing) + if tf.in_eager_mode() + write_histogram_summary_eager(writer_, step_, tag_, values_; name=name) + else + write_histogram_summary_graph(writer_, step_, tag_, values_; name=name) + end end - end end @@ -5074,31 +5074,31 @@ end Sends the named tensor from send_device to recv_device. """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function _host_send_graph(tensor_; name=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing) - local desc - tf.with_op_name(name, "_HostSend") do - desc = tf.NodeDescription("_HostSend") - tensor_ = convert(Tensor{Any}, tensor_) - (tensor_,) = tf.tf_promote(tensor_) - tf.add_input(desc, tensor_) - if tensor_name !== nothing - desc["tensor_name"] = Base.String(tensor_name) - end - if send_device !== nothing - desc["send_device"] = Base.String(send_device) - end - if send_device_incarnation !== nothing - desc["send_device_incarnation"] = Base.Int(send_device_incarnation) - end - if recv_device !== nothing - desc["recv_device"] = Base.String(recv_device) - end - if client_terminated !== nothing - desc["client_terminated"] = Base.Bool(client_terminated) - end - end - tf.Tensor(tf.Operation(desc)) + function _host_send_graph(tensor_; name=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing) + local desc + tf.with_op_name(name, "_HostSend") do + desc = tf.NodeDescription("_HostSend") + tensor_ = convert(Tensor{Any}, tensor_) + (tensor_,) = tf.tf_promote(tensor_) + tf.add_input(desc, tensor_) + if tensor_name !== nothing + desc["tensor_name"] = Base.String(tensor_name) + end + if send_device !== nothing + desc["send_device"] = Base.String(send_device) + end + if send_device_incarnation !== nothing + desc["send_device_incarnation"] = Base.Int(send_device_incarnation) + end + if recv_device !== nothing + desc["recv_device"] = Base.String(recv_device) + end + if client_terminated !== nothing + desc["client_terminated"] = Base.Bool(client_terminated) + end end + tf.Tensor(tf.Operation(desc)) + end function _host_send_eager(tensor_; name=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing) desc = tf.EagerOp("_HostSend") tensor_ = convert(tf.EagerTensor, tensor_) @@ -5126,13 +5126,13 @@ begin return res[1] end end - function _host_send(tensor_; name=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing) - if tf.in_eager_mode() - _host_send_eager(tensor_; name=name, tensor_name=tensor_name, send_device=send_device, send_device_incarnation=send_device_incarnation, recv_device=recv_device, client_terminated=client_terminated) - else - _host_send_graph(tensor_; name=name, tensor_name=tensor_name, send_device=send_device, send_device_incarnation=send_device_incarnation, recv_device=recv_device, client_terminated=client_terminated) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _host_send(tensor_; name=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing) + if tf.in_eager_mode() + _host_send_eager(tensor_; name=name, tensor_name=tensor_name, send_device=send_device, send_device_incarnation=send_device_incarnation, recv_device=recv_device, client_terminated=client_terminated) + else + _host_send_graph(tensor_; name=name, tensor_name=tensor_name, send_device=send_device, send_device_incarnation=send_device_incarnation, recv_device=recv_device, client_terminated=client_terminated) + end end - end end @@ -5142,17 +5142,17 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function experimental_indexed_dataset_materialize_graph(dataset_, materialized_; name=nothing) - local desc - tf.with_op_name(name, "ExperimentalIndexedDatasetMaterialize") do - desc = tf.NodeDescription("ExperimentalIndexedDatasetMaterialize") - dataset_ = convert(Tensor{Any}, dataset_) - materialized_ = convert(Tensor{Any}, materialized_) - tf.add_input(desc, dataset_) - tf.add_input(desc, materialized_) - end - tf.Tensor(tf.Operation(desc)) + function experimental_indexed_dataset_materialize_graph(dataset_, materialized_; name=nothing) + local desc + tf.with_op_name(name, "ExperimentalIndexedDatasetMaterialize") do + desc = tf.NodeDescription("ExperimentalIndexedDatasetMaterialize") + dataset_ = convert(Tensor{Any}, dataset_) + materialized_ = convert(Tensor{Any}, materialized_) + tf.add_input(desc, dataset_) + tf.add_input(desc, materialized_) end + tf.Tensor(tf.Operation(desc)) + end function experimental_indexed_dataset_materialize_eager(dataset_, materialized_; name=nothing) desc = tf.EagerOp("ExperimentalIndexedDatasetMaterialize") dataset_ = convert(tf.EagerTensor, dataset_) @@ -5166,13 +5166,13 @@ begin return res[1] end end - function experimental_indexed_dataset_materialize(dataset_, materialized_; name=nothing) - if tf.in_eager_mode() - experimental_indexed_dataset_materialize_eager(dataset_, materialized_; name=name) - else - experimental_indexed_dataset_materialize_graph(dataset_, materialized_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_indexed_dataset_materialize(dataset_, materialized_; name=nothing) + if tf.in_eager_mode() + experimental_indexed_dataset_materialize_eager(dataset_, materialized_; name=name) + else + experimental_indexed_dataset_materialize_graph(dataset_, materialized_; name=name) + end end - end end @@ -5182,18 +5182,18 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function greater_graph(x_, y_; name=nothing) - local desc - tf.with_op_name(name, "Greater") do - desc = tf.NodeDescription("Greater") - x_ = convert(Tensor{Any}, x_) - y_ = convert(Tensor{Any}, y_) - (x_, y_) = tf.tf_promote(x_, y_) - tf.add_input(desc, x_) - tf.add_input(desc, y_) - end - tf.Tensor(tf.Operation(desc)) + function greater_graph(x_, y_; name=nothing) + local desc + tf.with_op_name(name, "Greater") do + desc = tf.NodeDescription("Greater") + x_ = convert(Tensor{Any}, x_) + y_ = convert(Tensor{Any}, y_) + (x_, y_) = tf.tf_promote(x_, y_) + tf.add_input(desc, x_) + tf.add_input(desc, y_) end + tf.Tensor(tf.Operation(desc)) + end function greater_eager(x_, y_; name=nothing) desc = tf.EagerOp("Greater") x_ = convert(tf.EagerTensor, x_) @@ -5209,13 +5209,13 @@ begin return res[1] end end - function greater(x_, y_; name=nothing) - if tf.in_eager_mode() - greater_eager(x_, y_; name=name) - else - greater_graph(x_, y_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function greater(x_, y_; name=nothing) + if tf.in_eager_mode() + greater_eager(x_, y_; name=name) + else + greater_graph(x_, y_; name=name) + end end - end end @@ -5225,19 +5225,19 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function nccl_broadcast_graph(input_; name=nothing, shape=nothing) - local desc - tf.with_op_name(name, "NcclBroadcast") do - desc = tf.NodeDescription("NcclBroadcast") - input_ = convert(Tensor{Any}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - if shape !== nothing - desc["shape"] = Base.identity(shape) - end + function nccl_broadcast_graph(input_; name=nothing, shape=nothing) + local desc + tf.with_op_name(name, "NcclBroadcast") do + desc = tf.NodeDescription("NcclBroadcast") + input_ = convert(Tensor{Any}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + if shape !== nothing + desc["shape"] = Base.identity(shape) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function nccl_broadcast_eager(input_; name=nothing, shape=nothing) desc = tf.EagerOp("NcclBroadcast") input_ = convert(tf.EagerTensor, input_) @@ -5253,13 +5253,13 @@ begin return res[1] end end - function nccl_broadcast(input_; name=nothing, shape=nothing) - if tf.in_eager_mode() - nccl_broadcast_eager(input_; name=name, shape=shape) - else - nccl_broadcast_graph(input_; name=name, shape=shape) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function nccl_broadcast(input_; name=nothing, shape=nothing) + if tf.in_eager_mode() + nccl_broadcast_eager(input_; name=name, shape=shape) + else + nccl_broadcast_graph(input_; name=name, shape=shape) + end end - end end @@ -5269,21 +5269,21 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tensor_list_push_back_batch_graph(input_handles_, tensor_; name=nothing, element_dtype=nothing) - local desc - tf.with_op_name(name, "TensorListPushBackBatch") do - desc = tf.NodeDescription("TensorListPushBackBatch") - input_handles_ = convert(Tensor{Any}, input_handles_) - tensor_ = convert(Tensor{Any}, tensor_) - (tensor_,) = tf.tf_promote(tensor_) - tf.add_input(desc, input_handles_) - tf.add_input(desc, tensor_) - if element_dtype !== nothing - desc["element_dtype"] = Base.identity(element_dtype) - end + function tensor_list_push_back_batch_graph(input_handles_, tensor_; name=nothing, element_dtype=nothing) + local desc + tf.with_op_name(name, "TensorListPushBackBatch") do + desc = tf.NodeDescription("TensorListPushBackBatch") + input_handles_ = convert(Tensor{Any}, input_handles_) + tensor_ = convert(Tensor{Any}, tensor_) + (tensor_,) = tf.tf_promote(tensor_) + tf.add_input(desc, input_handles_) + tf.add_input(desc, tensor_) + if element_dtype !== nothing + desc["element_dtype"] = Base.identity(element_dtype) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function tensor_list_push_back_batch_eager(input_handles_, tensor_; name=nothing, element_dtype=nothing) desc = tf.EagerOp("TensorListPushBackBatch") input_handles_ = convert(tf.EagerTensor, input_handles_) @@ -5301,13 +5301,13 @@ begin return res[1] end end - function tensor_list_push_back_batch(input_handles_, tensor_; name=nothing, element_dtype=nothing) - if tf.in_eager_mode() - tensor_list_push_back_batch_eager(input_handles_, tensor_; name=name, element_dtype=element_dtype) - else - tensor_list_push_back_batch_graph(input_handles_, tensor_; name=name, element_dtype=element_dtype) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_list_push_back_batch(input_handles_, tensor_; name=nothing, element_dtype=nothing) + if tf.in_eager_mode() + tensor_list_push_back_batch_eager(input_handles_, tensor_; name=name, element_dtype=element_dtype) + else + tensor_list_push_back_batch_graph(input_handles_, tensor_; name=name, element_dtype=element_dtype) + end end - end end @@ -5317,25 +5317,25 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function resource_scatter_min_graph(resource_, indices_, updates_; name=nothing, dtype=nothing) - local desc - tf.with_op_name(name, "ResourceScatterMin") do - desc = tf.NodeDescription("ResourceScatterMin") - resource_ = convert(Tensor{Any}, resource_) - indices_ = convert(Tensor{Any}, indices_) - indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) - updates_ = convert(Tensor{Any}, updates_) - (updates_,) = tf.tf_promote(updates_) - (indices_,) = tf.tf_promote(indices_) - tf.add_input(desc, resource_) - tf.add_input(desc, indices_) - tf.add_input(desc, updates_) - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end + function resource_scatter_min_graph(resource_, indices_, updates_; name=nothing, dtype=nothing) + local desc + tf.with_op_name(name, "ResourceScatterMin") do + desc = tf.NodeDescription("ResourceScatterMin") + resource_ = convert(Tensor{Any}, resource_) + indices_ = convert(Tensor{Any}, indices_) + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + updates_ = convert(Tensor{Any}, updates_) + (updates_,) = tf.tf_promote(updates_) + (indices_,) = tf.tf_promote(indices_) + tf.add_input(desc, resource_) + tf.add_input(desc, indices_) + tf.add_input(desc, updates_) + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function resource_scatter_min_eager(resource_, indices_, updates_; name=nothing, dtype=nothing) desc = tf.EagerOp("ResourceScatterMin") resource_ = convert(tf.EagerTensor, resource_) @@ -5356,13 +5356,13 @@ begin return res[1] end end - function resource_scatter_min(resource_, indices_, updates_; name=nothing, dtype=nothing) - if tf.in_eager_mode() - resource_scatter_min_eager(resource_, indices_, updates_; name=name, dtype=dtype) - else - resource_scatter_min_graph(resource_, indices_, updates_; name=name, dtype=dtype) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_scatter_min(resource_, indices_, updates_; name=nothing, dtype=nothing) + if tf.in_eager_mode() + resource_scatter_min_eager(resource_, indices_, updates_; name=name, dtype=dtype) + else + resource_scatter_min_graph(resource_, indices_, updates_; name=name, dtype=dtype) + end end - end end @@ -5372,25 +5372,25 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function slice_graph(input_, begin_, size_; name=nothing, Index=nothing) - local desc - tf.with_op_name(name, "Slice") do - desc = tf.NodeDescription("Slice") - input_ = convert(Tensor{Any}, input_) - begin_ = convert(Tensor{Any}, begin_) - begin_ = begin_ - convert(tf.Tensor{eltype(begin_)}, 1) - size_ = convert(Tensor{Any}, size_) - (input_,) = tf.tf_promote(input_) - (begin_, size_) = tf.tf_promote(begin_, size_) - tf.add_input(desc, input_) - tf.add_input(desc, begin_) - tf.add_input(desc, size_) - if Index !== nothing - desc["Index"] = Base.identity(Index) - end + function slice_graph(input_, begin_, size_; name=nothing, Index=nothing) + local desc + tf.with_op_name(name, "Slice") do + desc = tf.NodeDescription("Slice") + input_ = convert(Tensor{Any}, input_) + begin_ = convert(Tensor{Any}, begin_) + begin_ = begin_ - convert(tf.Tensor{eltype(begin_)}, 1) + size_ = convert(Tensor{Any}, size_) + (input_,) = tf.tf_promote(input_) + (begin_, size_) = tf.tf_promote(begin_, size_) + tf.add_input(desc, input_) + tf.add_input(desc, begin_) + tf.add_input(desc, size_) + if Index !== nothing + desc["Index"] = Base.identity(Index) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function slice_eager(input_, begin_, size_; name=nothing, Index=nothing) desc = tf.EagerOp("Slice") input_ = convert(tf.EagerTensor, input_) @@ -5412,48 +5412,48 @@ begin return res[1] end end - function slice(input_, begin_, size_; name=nothing, Index=nothing) - if tf.in_eager_mode() - slice_eager(input_, begin_, size_; name=name, Index=Index) - else - slice_graph(input_, begin_, size_; name=name, Index=Index) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function slice(input_, begin_, size_; name=nothing, Index=nothing) + if tf.in_eager_mode() + slice_eager(input_, begin_, size_; name=name, Index=Index) + else + slice_graph(input_, begin_, size_; name=name, Index=Index) + end end - end end """ - unicode_decode(input; errors=replace, replacement_char=65533, replace_control_characters=false) + unicode_decode(input; errors=, replacement_char=65533, replace_control_characters=false) """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function unicode_decode_graph(input_; name=nothing, input_encoding=nothing, errors=nothing, replacement_char=nothing, replace_control_characters=nothing) - local desc - tf.with_op_name(name, "UnicodeDecode") do - desc = tf.NodeDescription("UnicodeDecode") - input_ = convert(Tensor{String}, input_) - tf.add_input(desc, input_) - if input_encoding !== nothing - desc["input_encoding"] = Base.String(input_encoding) - end - if errors !== nothing - desc["errors"] = Base.String(errors) - end - if replacement_char !== nothing - desc["replacement_char"] = Base.Int(replacement_char) - end - if replace_control_characters !== nothing - desc["replace_control_characters"] = Base.Bool(replace_control_characters) - end + function unicode_decode_graph(input_; name=nothing, input_encoding=nothing, errors=nothing, replacement_char=nothing, replace_control_characters=nothing) + local desc + tf.with_op_name(name, "UnicodeDecode") do + desc = tf.NodeDescription("UnicodeDecode") + input_ = convert(Tensor{String}, input_) + tf.add_input(desc, input_) + if input_encoding !== nothing + desc["input_encoding"] = Base.String(input_encoding) + end + if errors !== nothing + desc["errors"] = Base.String(errors) + end + if replacement_char !== nothing + desc["replacement_char"] = Base.Int(replacement_char) end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:2 - push!(out, tf.Tensor(op, out_idx)) + if replace_control_characters !== nothing + desc["replace_control_characters"] = Base.Bool(replace_control_characters) end - out end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end function unicode_decode_eager(input_; name=nothing, input_encoding=nothing, errors=nothing, replacement_char=nothing, replace_control_characters=nothing) desc = tf.EagerOp("UnicodeDecode") input_ = convert(tf.EagerTensor, input_) @@ -5477,13 +5477,13 @@ begin return res end end - function unicode_decode(input_; name=nothing, input_encoding=nothing, errors=nothing, replacement_char=nothing, replace_control_characters=nothing) - if tf.in_eager_mode() - unicode_decode_eager(input_; name=name, input_encoding=input_encoding, errors=errors, replacement_char=replacement_char, replace_control_characters=replace_control_characters) - else - unicode_decode_graph(input_; name=name, input_encoding=input_encoding, errors=errors, replacement_char=replacement_char, replace_control_characters=replace_control_characters) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function unicode_decode(input_; name=nothing, input_encoding=nothing, errors=nothing, replacement_char=nothing, replace_control_characters=nothing) + if tf.in_eager_mode() + unicode_decode_eager(input_; name=name, input_encoding=input_encoding, errors=errors, replacement_char=replacement_char, replace_control_characters=replace_control_characters) + else + unicode_decode_graph(input_; name=name, input_encoding=input_encoding, errors=errors, replacement_char=replacement_char, replace_control_characters=replace_control_characters) + end end - end end @@ -5493,23 +5493,23 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function take_dataset_graph(input_dataset_, count_; name=nothing, output_types=nothing, output_shapes=nothing) - local desc - tf.with_op_name(name, "TakeDataset") do - desc = tf.NodeDescription("TakeDataset") - input_dataset_ = convert(Tensor{Any}, input_dataset_) - count_ = convert(Tensor{Int64}, count_) - tf.add_input(desc, input_dataset_) - tf.add_input(desc, count_) - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end + function take_dataset_graph(input_dataset_, count_; name=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "TakeDataset") do + desc = tf.NodeDescription("TakeDataset") + input_dataset_ = convert(Tensor{Any}, input_dataset_) + count_ = convert(Tensor{Int64}, count_) + tf.add_input(desc, input_dataset_) + tf.add_input(desc, count_) + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function take_dataset_eager(input_dataset_, count_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("TakeDataset") input_dataset_ = convert(tf.EagerTensor, input_dataset_) @@ -5529,13 +5529,13 @@ begin return res[1] end end - function take_dataset(input_dataset_, count_; name=nothing, output_types=nothing, output_shapes=nothing) - if tf.in_eager_mode() - take_dataset_eager(input_dataset_, count_; name=name, output_types=output_types, output_shapes=output_shapes) - else - take_dataset_graph(input_dataset_, count_; name=name, output_types=output_types, output_shapes=output_shapes) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function take_dataset(input_dataset_, count_; name=nothing, output_types=nothing, output_shapes=nothing) + if tf.in_eager_mode() + take_dataset_eager(input_dataset_, count_; name=name, output_types=output_types, output_shapes=output_shapes) + else + take_dataset_graph(input_dataset_, count_; name=name, output_types=output_types, output_shapes=output_shapes) + end end - end end @@ -5545,30 +5545,30 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function boosted_trees_make_stats_summary_graph(node_ids_, gradients_, hessians_, bucketized_features_list_; name=nothing, max_splits=nothing, num_buckets=nothing, num_features=nothing) - local desc - tf.with_op_name(name, "BoostedTreesMakeStatsSummary") do - desc = tf.NodeDescription("BoostedTreesMakeStatsSummary") - node_ids_ = convert(Tensor{Int32}, node_ids_) - gradients_ = convert(Tensor{Float32}, gradients_) - hessians_ = convert(Tensor{Float32}, hessians_) - bucketized_features_list_ = [convert(Tensor{Int32}, x) for x = bucketized_features_list_] - tf.add_input(desc, node_ids_) - tf.add_input(desc, gradients_) - tf.add_input(desc, hessians_) - tf.add_input(desc, bucketized_features_list_) - if max_splits !== nothing - desc["max_splits"] = Base.Int(max_splits) - end - if num_buckets !== nothing - desc["num_buckets"] = Base.Int(num_buckets) - end - if num_features !== nothing - desc["num_features"] = Base.Int(num_features) - end - end - tf.Tensor(tf.Operation(desc)) + function boosted_trees_make_stats_summary_graph(node_ids_, gradients_, hessians_, bucketized_features_list_; name=nothing, max_splits=nothing, num_buckets=nothing, num_features=nothing) + local desc + tf.with_op_name(name, "BoostedTreesMakeStatsSummary") do + desc = tf.NodeDescription("BoostedTreesMakeStatsSummary") + node_ids_ = convert(Tensor{Int32}, node_ids_) + gradients_ = convert(Tensor{Float32}, gradients_) + hessians_ = convert(Tensor{Float32}, hessians_) + bucketized_features_list_ = [convert(Tensor{Int32}, x) for x = bucketized_features_list_] + tf.add_input(desc, node_ids_) + tf.add_input(desc, gradients_) + tf.add_input(desc, hessians_) + tf.add_input(desc, bucketized_features_list_) + if max_splits !== nothing + desc["max_splits"] = Base.Int(max_splits) + end + if num_buckets !== nothing + desc["num_buckets"] = Base.Int(num_buckets) + end + if num_features !== nothing + desc["num_features"] = Base.Int(num_features) + end end + tf.Tensor(tf.Operation(desc)) + end function boosted_trees_make_stats_summary_eager(node_ids_, gradients_, hessians_, bucketized_features_list_; name=nothing, max_splits=nothing, num_buckets=nothing, num_features=nothing) desc = tf.EagerOp("BoostedTreesMakeStatsSummary") node_ids_ = convert(tf.EagerTensor, node_ids_) @@ -5595,13 +5595,13 @@ begin return res[1] end end - function boosted_trees_make_stats_summary(node_ids_, gradients_, hessians_, bucketized_features_list_; name=nothing, max_splits=nothing, num_buckets=nothing, num_features=nothing) - if tf.in_eager_mode() - boosted_trees_make_stats_summary_eager(node_ids_, gradients_, hessians_, bucketized_features_list_; name=name, max_splits=max_splits, num_buckets=num_buckets, num_features=num_features) - else - boosted_trees_make_stats_summary_graph(node_ids_, gradients_, hessians_, bucketized_features_list_; name=name, max_splits=max_splits, num_buckets=num_buckets, num_features=num_features) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function boosted_trees_make_stats_summary(node_ids_, gradients_, hessians_, bucketized_features_list_; name=nothing, max_splits=nothing, num_buckets=nothing, num_features=nothing) + if tf.in_eager_mode() + boosted_trees_make_stats_summary_eager(node_ids_, gradients_, hessians_, bucketized_features_list_; name=name, max_splits=max_splits, num_buckets=num_buckets, num_features=num_features) + else + boosted_trees_make_stats_summary_graph(node_ids_, gradients_, hessians_, bucketized_features_list_; name=name, max_splits=max_splits, num_buckets=num_buckets, num_features=num_features) + end end - end end @@ -5611,35 +5611,35 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function all_candidate_sampler_graph(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, seed=nothing, seed2=nothing) - local desc - tf.with_op_name(name, "AllCandidateSampler") do - desc = tf.NodeDescription("AllCandidateSampler") - true_classes_ = convert(Tensor{Int64}, true_classes_) - tf.add_input(desc, true_classes_) - if num_true !== nothing - desc["num_true"] = Base.Int(num_true) - end - if num_sampled !== nothing - desc["num_sampled"] = Base.Int(num_sampled) - end - if unique !== nothing - desc["unique"] = Base.Bool(unique) - end - if seed !== nothing - desc["seed"] = Base.Int(seed) - end - if seed2 !== nothing - desc["seed2"] = Base.Int(seed2) - end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:3 - push!(out, tf.Tensor(op, out_idx)) - end - out + function all_candidate_sampler_graph(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, seed=nothing, seed2=nothing) + local desc + tf.with_op_name(name, "AllCandidateSampler") do + desc = tf.NodeDescription("AllCandidateSampler") + true_classes_ = convert(Tensor{Int64}, true_classes_) + tf.add_input(desc, true_classes_) + if num_true !== nothing + desc["num_true"] = Base.Int(num_true) + end + if num_sampled !== nothing + desc["num_sampled"] = Base.Int(num_sampled) + end + if unique !== nothing + desc["unique"] = Base.Bool(unique) + end + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) end + out + end function all_candidate_sampler_eager(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, seed=nothing, seed2=nothing) desc = tf.EagerOp("AllCandidateSampler") true_classes_ = convert(tf.EagerTensor, true_classes_) @@ -5666,51 +5666,51 @@ begin return res end end - function all_candidate_sampler(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, seed=nothing, seed2=nothing) - if tf.in_eager_mode() - all_candidate_sampler_eager(true_classes_; name=name, num_true=num_true, num_sampled=num_sampled, unique=unique, seed=seed, seed2=seed2) - else - all_candidate_sampler_graph(true_classes_; name=name, num_true=num_true, num_sampled=num_sampled, unique=unique, seed=seed, seed2=seed2) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function all_candidate_sampler(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, seed=nothing, seed2=nothing) + if tf.in_eager_mode() + all_candidate_sampler_eager(true_classes_; name=name, num_true=num_true, num_sampled=num_sampled, unique=unique, seed=seed, seed2=seed2) + else + all_candidate_sampler_graph(true_classes_; name=name, num_true=num_true, num_sampled=num_sampled, unique=unique, seed=seed, seed2=seed2) + end end - end end """ - conv2d_backprop_input(input_sizes, filter, out_backprop; use_cudnn_on_gpu=true, data_format=NHWC, dilations=[1, 1, 1, 1]) + conv2d_backprop_input(input_sizes, filter, out_backprop; use_cudnn_on_gpu=true, data_format=, dilations=[1, 1, 1, 1]) """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function conv2d_backprop_input_graph(input_sizes_, filter_, out_backprop_; name=nothing, strides=nothing, use_cudnn_on_gpu=nothing, padding=nothing, data_format=nothing, dilations=nothing) - local desc - tf.with_op_name(name, "Conv2DBackpropInput") do - desc = tf.NodeDescription("Conv2DBackpropInput") - input_sizes_ = convert(Tensor{Int32}, input_sizes_) - filter_ = convert(Tensor{Any}, filter_) - out_backprop_ = convert(Tensor{Any}, out_backprop_) - (filter_, out_backprop_) = tf.tf_promote(filter_, out_backprop_) - tf.add_input(desc, input_sizes_) - tf.add_input(desc, filter_) - tf.add_input(desc, out_backprop_) - if strides !== nothing - desc["strides"] = map(Base.identity, strides) - end - if use_cudnn_on_gpu !== nothing - desc["use_cudnn_on_gpu"] = Base.Bool(use_cudnn_on_gpu) - end - if padding !== nothing - desc["padding"] = Base.String(padding) - end - if data_format !== nothing - desc["data_format"] = Base.String(data_format) - end - if dilations !== nothing - desc["dilations"] = map(Base.identity, dilations) - end + function conv2d_backprop_input_graph(input_sizes_, filter_, out_backprop_; name=nothing, strides=nothing, use_cudnn_on_gpu=nothing, padding=nothing, data_format=nothing, dilations=nothing) + local desc + tf.with_op_name(name, "Conv2DBackpropInput") do + desc = tf.NodeDescription("Conv2DBackpropInput") + input_sizes_ = convert(Tensor{Int32}, input_sizes_) + filter_ = convert(Tensor{Any}, filter_) + out_backprop_ = convert(Tensor{Any}, out_backprop_) + (filter_, out_backprop_) = tf.tf_promote(filter_, out_backprop_) + tf.add_input(desc, input_sizes_) + tf.add_input(desc, filter_) + tf.add_input(desc, out_backprop_) + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + if use_cudnn_on_gpu !== nothing + desc["use_cudnn_on_gpu"] = Base.Bool(use_cudnn_on_gpu) + end + if padding !== nothing + desc["padding"] = Base.String(padding) + end + if data_format !== nothing + desc["data_format"] = Base.String(data_format) + end + if dilations !== nothing + desc["dilations"] = map(Base.identity, dilations) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function conv2d_backprop_input_eager(input_sizes_, filter_, out_backprop_; name=nothing, strides=nothing, use_cudnn_on_gpu=nothing, padding=nothing, data_format=nothing, dilations=nothing) desc = tf.EagerOp("Conv2DBackpropInput") input_sizes_ = convert(tf.EagerTensor, input_sizes_) @@ -5743,13 +5743,13 @@ begin return res[1] end end - function conv2d_backprop_input(input_sizes_, filter_, out_backprop_; name=nothing, strides=nothing, use_cudnn_on_gpu=nothing, padding=nothing, data_format=nothing, dilations=nothing) - if tf.in_eager_mode() - conv2d_backprop_input_eager(input_sizes_, filter_, out_backprop_; name=name, strides=strides, use_cudnn_on_gpu=use_cudnn_on_gpu, padding=padding, data_format=data_format, dilations=dilations) - else - conv2d_backprop_input_graph(input_sizes_, filter_, out_backprop_; name=name, strides=strides, use_cudnn_on_gpu=use_cudnn_on_gpu, padding=padding, data_format=data_format, dilations=dilations) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function conv2d_backprop_input(input_sizes_, filter_, out_backprop_; name=nothing, strides=nothing, use_cudnn_on_gpu=nothing, padding=nothing, data_format=nothing, dilations=nothing) + if tf.in_eager_mode() + conv2d_backprop_input_eager(input_sizes_, filter_, out_backprop_; name=name, strides=strides, use_cudnn_on_gpu=use_cudnn_on_gpu, padding=padding, data_format=data_format, dilations=dilations) + else + conv2d_backprop_input_graph(input_sizes_, filter_, out_backprop_; name=name, strides=strides, use_cudnn_on_gpu=use_cudnn_on_gpu, padding=padding, data_format=data_format, dilations=dilations) + end end - end end @@ -5759,21 +5759,21 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function dataset_to_single_element_graph(dataset_; name=nothing, output_types=nothing, output_shapes=nothing) - local desc - tf.with_op_name(name, "DatasetToSingleElement") do - desc = tf.NodeDescription("DatasetToSingleElement") - dataset_ = convert(Tensor{Any}, dataset_) - tf.add_input(desc, dataset_) - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end + function dataset_to_single_element_graph(dataset_; name=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "DatasetToSingleElement") do + desc = tf.NodeDescription("DatasetToSingleElement") + dataset_ = convert(Tensor{Any}, dataset_) + tf.add_input(desc, dataset_) + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function dataset_to_single_element_eager(dataset_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("DatasetToSingleElement") dataset_ = convert(tf.EagerTensor, dataset_) @@ -5791,13 +5791,13 @@ begin return res[1] end end - function dataset_to_single_element(dataset_; name=nothing, output_types=nothing, output_shapes=nothing) - if tf.in_eager_mode() - dataset_to_single_element_eager(dataset_; name=name, output_types=output_types, output_shapes=output_shapes) - else - dataset_to_single_element_graph(dataset_; name=name, output_types=output_types, output_shapes=output_shapes) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function dataset_to_single_element(dataset_; name=nothing, output_types=nothing, output_shapes=nothing) + if tf.in_eager_mode() + dataset_to_single_element_eager(dataset_; name=name, output_types=output_types, output_shapes=output_shapes) + else + dataset_to_single_element_graph(dataset_; name=name, output_types=output_types, output_shapes=output_shapes) + end end - end end @@ -5807,23 +5807,23 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function cache_dataset_graph(input_dataset_, filename_; name=nothing, output_types=nothing, output_shapes=nothing) - local desc - tf.with_op_name(name, "CacheDataset") do - desc = tf.NodeDescription("CacheDataset") - input_dataset_ = convert(Tensor{Any}, input_dataset_) - filename_ = convert(Tensor{String}, filename_) - tf.add_input(desc, input_dataset_) - tf.add_input(desc, filename_) - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end + function cache_dataset_graph(input_dataset_, filename_; name=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "CacheDataset") do + desc = tf.NodeDescription("CacheDataset") + input_dataset_ = convert(Tensor{Any}, input_dataset_) + filename_ = convert(Tensor{String}, filename_) + tf.add_input(desc, input_dataset_) + tf.add_input(desc, filename_) + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function cache_dataset_eager(input_dataset_, filename_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("CacheDataset") input_dataset_ = convert(tf.EagerTensor, input_dataset_) @@ -5843,13 +5843,13 @@ begin return res[1] end end - function cache_dataset(input_dataset_, filename_; name=nothing, output_types=nothing, output_shapes=nothing) - if tf.in_eager_mode() - cache_dataset_eager(input_dataset_, filename_; name=name, output_types=output_types, output_shapes=output_shapes) - else - cache_dataset_graph(input_dataset_, filename_; name=name, output_types=output_types, output_shapes=output_shapes) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function cache_dataset(input_dataset_, filename_; name=nothing, output_types=nothing, output_shapes=nothing) + if tf.in_eager_mode() + cache_dataset_eager(input_dataset_, filename_; name=name, output_types=output_types, output_shapes=output_shapes) + else + cache_dataset_graph(input_dataset_, filename_; name=name, output_types=output_types, output_shapes=output_shapes) + end end - end end @@ -5859,32 +5859,32 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function fake_quant_with_min_max_vars_gradient_graph(gradients_, inputs_, min_, max_; name=nothing, num_bits=nothing, narrow_range=nothing) - local desc - tf.with_op_name(name, "FakeQuantWithMinMaxVarsGradient") do - desc = tf.NodeDescription("FakeQuantWithMinMaxVarsGradient") - gradients_ = convert(Tensor{Float32}, gradients_) - inputs_ = convert(Tensor{Float32}, inputs_) - min_ = convert(Tensor{Float32}, min_) - max_ = convert(Tensor{Float32}, max_) - tf.add_input(desc, gradients_) - tf.add_input(desc, inputs_) - tf.add_input(desc, min_) - tf.add_input(desc, max_) - if num_bits !== nothing - desc["num_bits"] = Base.Int(num_bits) - end - if narrow_range !== nothing - desc["narrow_range"] = Base.Bool(narrow_range) - end + function fake_quant_with_min_max_vars_gradient_graph(gradients_, inputs_, min_, max_; name=nothing, num_bits=nothing, narrow_range=nothing) + local desc + tf.with_op_name(name, "FakeQuantWithMinMaxVarsGradient") do + desc = tf.NodeDescription("FakeQuantWithMinMaxVarsGradient") + gradients_ = convert(Tensor{Float32}, gradients_) + inputs_ = convert(Tensor{Float32}, inputs_) + min_ = convert(Tensor{Float32}, min_) + max_ = convert(Tensor{Float32}, max_) + tf.add_input(desc, gradients_) + tf.add_input(desc, inputs_) + tf.add_input(desc, min_) + tf.add_input(desc, max_) + if num_bits !== nothing + desc["num_bits"] = Base.Int(num_bits) end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:3 - push!(out, tf.Tensor(op, out_idx)) + if narrow_range !== nothing + desc["narrow_range"] = Base.Bool(narrow_range) end - out end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end function fake_quant_with_min_max_vars_gradient_eager(gradients_, inputs_, min_, max_; name=nothing, num_bits=nothing, narrow_range=nothing) desc = tf.EagerOp("FakeQuantWithMinMaxVarsGradient") gradients_ = convert(tf.EagerTensor, gradients_) @@ -5908,13 +5908,13 @@ begin return res end end - function fake_quant_with_min_max_vars_gradient(gradients_, inputs_, min_, max_; name=nothing, num_bits=nothing, narrow_range=nothing) - if tf.in_eager_mode() - fake_quant_with_min_max_vars_gradient_eager(gradients_, inputs_, min_, max_; name=name, num_bits=num_bits, narrow_range=narrow_range) - else - fake_quant_with_min_max_vars_gradient_graph(gradients_, inputs_, min_, max_; name=name, num_bits=num_bits, narrow_range=narrow_range) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function fake_quant_with_min_max_vars_gradient(gradients_, inputs_, min_, max_; name=nothing, num_bits=nothing, narrow_range=nothing) + if tf.in_eager_mode() + fake_quant_with_min_max_vars_gradient_eager(gradients_, inputs_, min_, max_; name=name, num_bits=num_bits, narrow_range=narrow_range) + else + fake_quant_with_min_max_vars_gradient_graph(gradients_, inputs_, min_, max_; name=name, num_bits=num_bits, narrow_range=narrow_range) + end end - end end @@ -5924,34 +5924,34 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function fused_resize_and_pad_conv2d_graph(input_, size_, paddings_, filter_; name=nothing, resize_align_corners=nothing, mode=nothing, strides=nothing, padding=nothing) - local desc - tf.with_op_name(name, "FusedResizeAndPadConv2D") do - desc = tf.NodeDescription("FusedResizeAndPadConv2D") - input_ = convert(Tensor{Any}, input_) - size_ = convert(Tensor{Int32}, size_) - paddings_ = convert(Tensor{Int32}, paddings_) - filter_ = convert(Tensor{Any}, filter_) - (input_, filter_) = tf.tf_promote(input_, filter_) - tf.add_input(desc, input_) - tf.add_input(desc, size_) - tf.add_input(desc, paddings_) - tf.add_input(desc, filter_) - if resize_align_corners !== nothing - desc["resize_align_corners"] = Base.Bool(resize_align_corners) - end - if mode !== nothing - desc["mode"] = Base.String(mode) - end - if strides !== nothing - desc["strides"] = map(Base.identity, strides) - end - if padding !== nothing - desc["padding"] = Base.String(padding) - end - end - tf.Tensor(tf.Operation(desc)) + function fused_resize_and_pad_conv2d_graph(input_, size_, paddings_, filter_; name=nothing, resize_align_corners=nothing, mode=nothing, strides=nothing, padding=nothing) + local desc + tf.with_op_name(name, "FusedResizeAndPadConv2D") do + desc = tf.NodeDescription("FusedResizeAndPadConv2D") + input_ = convert(Tensor{Any}, input_) + size_ = convert(Tensor{Int32}, size_) + paddings_ = convert(Tensor{Int32}, paddings_) + filter_ = convert(Tensor{Any}, filter_) + (input_, filter_) = tf.tf_promote(input_, filter_) + tf.add_input(desc, input_) + tf.add_input(desc, size_) + tf.add_input(desc, paddings_) + tf.add_input(desc, filter_) + if resize_align_corners !== nothing + desc["resize_align_corners"] = Base.Bool(resize_align_corners) + end + if mode !== nothing + desc["mode"] = Base.String(mode) + end + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + if padding !== nothing + desc["padding"] = Base.String(padding) + end end + tf.Tensor(tf.Operation(desc)) + end function fused_resize_and_pad_conv2d_eager(input_, size_, paddings_, filter_; name=nothing, resize_align_corners=nothing, mode=nothing, strides=nothing, padding=nothing) desc = tf.EagerOp("FusedResizeAndPadConv2D") input_ = convert(tf.EagerTensor, input_) @@ -5983,13 +5983,13 @@ begin return res[1] end end - function fused_resize_and_pad_conv2d(input_, size_, paddings_, filter_; name=nothing, resize_align_corners=nothing, mode=nothing, strides=nothing, padding=nothing) - if tf.in_eager_mode() - fused_resize_and_pad_conv2d_eager(input_, size_, paddings_, filter_; name=name, resize_align_corners=resize_align_corners, mode=mode, strides=strides, padding=padding) - else - fused_resize_and_pad_conv2d_graph(input_, size_, paddings_, filter_; name=name, resize_align_corners=resize_align_corners, mode=mode, strides=strides, padding=padding) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function fused_resize_and_pad_conv2d(input_, size_, paddings_, filter_; name=nothing, resize_align_corners=nothing, mode=nothing, strides=nothing, padding=nothing) + if tf.in_eager_mode() + fused_resize_and_pad_conv2d_eager(input_, size_, paddings_, filter_; name=name, resize_align_corners=resize_align_corners, mode=mode, strides=strides, padding=padding) + else + fused_resize_and_pad_conv2d_graph(input_, size_, paddings_, filter_; name=name, resize_align_corners=resize_align_corners, mode=mode, strides=strides, padding=padding) + end end - end end @@ -5999,50 +5999,50 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function batch_graph(in_tensors_; name=nothing, num_batch_threads=nothing, max_batch_size=nothing, max_enqueued_batches=nothing, batch_timeout_micros=nothing, allowed_batch_sizes=nothing, grad_timeout_micros=nothing, container=nothing, shared_name=nothing, batching_queue=nothing, T=nothing) - local desc - tf.with_op_name(name, "Batch") do - desc = tf.NodeDescription("Batch") - in_tensors_ = [convert(Tensor{Any}, x) for x = in_tensors_] - tf.add_input(desc, in_tensors_) - if num_batch_threads !== nothing - desc["num_batch_threads"] = Base.Int(num_batch_threads) - end - if max_batch_size !== nothing - desc["max_batch_size"] = Base.Int(max_batch_size) - end - if max_enqueued_batches !== nothing - desc["max_enqueued_batches"] = Base.Int(max_enqueued_batches) - end - if batch_timeout_micros !== nothing - desc["batch_timeout_micros"] = Base.Int(batch_timeout_micros) - end - if allowed_batch_sizes !== nothing - desc["allowed_batch_sizes"] = map(Base.identity, allowed_batch_sizes) - end - if grad_timeout_micros !== nothing - desc["grad_timeout_micros"] = Base.Int(grad_timeout_micros) - end - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - if batching_queue !== nothing - desc["batching_queue"] = Base.String(batching_queue) - end - if T !== nothing - desc["T"] = map(Base.identity, T) - end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:3 - push!(out, tf.Tensor(op, out_idx)) - end - out + function batch_graph(in_tensors_; name=nothing, num_batch_threads=nothing, max_batch_size=nothing, max_enqueued_batches=nothing, batch_timeout_micros=nothing, allowed_batch_sizes=nothing, grad_timeout_micros=nothing, container=nothing, shared_name=nothing, batching_queue=nothing, T=nothing) + local desc + tf.with_op_name(name, "Batch") do + desc = tf.NodeDescription("Batch") + in_tensors_ = [convert(Tensor{Any}, x) for x = in_tensors_] + tf.add_input(desc, in_tensors_) + if num_batch_threads !== nothing + desc["num_batch_threads"] = Base.Int(num_batch_threads) + end + if max_batch_size !== nothing + desc["max_batch_size"] = Base.Int(max_batch_size) + end + if max_enqueued_batches !== nothing + desc["max_enqueued_batches"] = Base.Int(max_enqueued_batches) + end + if batch_timeout_micros !== nothing + desc["batch_timeout_micros"] = Base.Int(batch_timeout_micros) + end + if allowed_batch_sizes !== nothing + desc["allowed_batch_sizes"] = map(Base.identity, allowed_batch_sizes) + end + if grad_timeout_micros !== nothing + desc["grad_timeout_micros"] = Base.Int(grad_timeout_micros) + end + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + if batching_queue !== nothing + desc["batching_queue"] = Base.String(batching_queue) + end + if T !== nothing + desc["T"] = map(Base.identity, T) + end end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end function batch_eager(in_tensors_; name=nothing, num_batch_threads=nothing, max_batch_size=nothing, max_enqueued_batches=nothing, batch_timeout_micros=nothing, allowed_batch_sizes=nothing, grad_timeout_micros=nothing, container=nothing, shared_name=nothing, batching_queue=nothing, T=nothing) desc = tf.EagerOp("Batch") in_tensors_ = convert(tf.EagerTensor, in_tensors_) @@ -6084,13 +6084,13 @@ begin return res end end - function batch(in_tensors_; name=nothing, num_batch_threads=nothing, max_batch_size=nothing, max_enqueued_batches=nothing, batch_timeout_micros=nothing, allowed_batch_sizes=nothing, grad_timeout_micros=nothing, container=nothing, shared_name=nothing, batching_queue=nothing, T=nothing) - if tf.in_eager_mode() - batch_eager(in_tensors_; name=name, num_batch_threads=num_batch_threads, max_batch_size=max_batch_size, max_enqueued_batches=max_enqueued_batches, batch_timeout_micros=batch_timeout_micros, allowed_batch_sizes=allowed_batch_sizes, grad_timeout_micros=grad_timeout_micros, container=container, shared_name=shared_name, batching_queue=batching_queue, T=T) - else - batch_graph(in_tensors_; name=name, num_batch_threads=num_batch_threads, max_batch_size=max_batch_size, max_enqueued_batches=max_enqueued_batches, batch_timeout_micros=batch_timeout_micros, allowed_batch_sizes=allowed_batch_sizes, grad_timeout_micros=grad_timeout_micros, container=container, shared_name=shared_name, batching_queue=batching_queue, T=T) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function batch(in_tensors_; name=nothing, num_batch_threads=nothing, max_batch_size=nothing, max_enqueued_batches=nothing, batch_timeout_micros=nothing, allowed_batch_sizes=nothing, grad_timeout_micros=nothing, container=nothing, shared_name=nothing, batching_queue=nothing, T=nothing) + if tf.in_eager_mode() + batch_eager(in_tensors_; name=name, num_batch_threads=num_batch_threads, max_batch_size=max_batch_size, max_enqueued_batches=max_enqueued_batches, batch_timeout_micros=batch_timeout_micros, allowed_batch_sizes=allowed_batch_sizes, grad_timeout_micros=grad_timeout_micros, container=container, shared_name=shared_name, batching_queue=batching_queue, T=T) + else + batch_graph(in_tensors_; name=name, num_batch_threads=num_batch_threads, max_batch_size=max_batch_size, max_enqueued_batches=max_enqueued_batches, batch_timeout_micros=batch_timeout_micros, allowed_batch_sizes=allowed_batch_sizes, grad_timeout_micros=grad_timeout_micros, container=container, shared_name=shared_name, batching_queue=batching_queue, T=T) + end end - end end @@ -6100,25 +6100,25 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function collective_bcast_recv_graph(; name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, shape=nothing) - local desc - tf.with_op_name(name, "CollectiveBcastRecv") do - desc = tf.NodeDescription("CollectiveBcastRecv") - if group_size !== nothing - desc["group_size"] = Base.Int(group_size) - end - if group_key !== nothing - desc["group_key"] = Base.Int(group_key) - end - if instance_key !== nothing - desc["instance_key"] = Base.Int(instance_key) - end - if shape !== nothing - desc["shape"] = Base.identity(shape) - end + function collective_bcast_recv_graph(; name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, shape=nothing) + local desc + tf.with_op_name(name, "CollectiveBcastRecv") do + desc = tf.NodeDescription("CollectiveBcastRecv") + if group_size !== nothing + desc["group_size"] = Base.Int(group_size) + end + if group_key !== nothing + desc["group_key"] = Base.Int(group_key) + end + if instance_key !== nothing + desc["instance_key"] = Base.Int(instance_key) + end + if shape !== nothing + desc["shape"] = Base.identity(shape) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function collective_bcast_recv_eager(; name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, shape=nothing) desc = tf.EagerOp("CollectiveBcastRecv") if group_size !== nothing @@ -6140,13 +6140,13 @@ begin return res[1] end end - function collective_bcast_recv(; name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, shape=nothing) - if tf.in_eager_mode() - collective_bcast_recv_eager(; name=name, group_size=group_size, group_key=group_key, instance_key=instance_key, shape=shape) - else - collective_bcast_recv_graph(; name=name, group_size=group_size, group_key=group_key, instance_key=instance_key, shape=shape) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function collective_bcast_recv(; name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, shape=nothing) + if tf.in_eager_mode() + collective_bcast_recv_eager(; name=name, group_size=group_size, group_key=group_key, instance_key=instance_key, shape=shape) + else + collective_bcast_recv_graph(; name=name, group_size=group_size, group_key=group_key, instance_key=instance_key, shape=shape) + end end - end end @@ -6156,22 +6156,22 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function batch_to_space_nd_graph(input_, block_shape_, crops_; name=nothing) - local desc - tf.with_op_name(name, "BatchToSpaceND") do - desc = tf.NodeDescription("BatchToSpaceND") - input_ = convert(Tensor{Any}, input_) - block_shape_ = convert(Tensor{Int32}, block_shape_) - crops_ = convert(Tensor{Int32}, crops_) - (crops_,) = tf.tf_promote(crops_) - (input_,) = tf.tf_promote(input_) - (block_shape_,) = tf.tf_promote(block_shape_) - tf.add_input(desc, input_) - tf.add_input(desc, block_shape_) - tf.add_input(desc, crops_) - end - tf.Tensor(tf.Operation(desc)) + function batch_to_space_nd_graph(input_, block_shape_, crops_; name=nothing) + local desc + tf.with_op_name(name, "BatchToSpaceND") do + desc = tf.NodeDescription("BatchToSpaceND") + input_ = convert(Tensor{Any}, input_) + block_shape_ = convert(Tensor{Int32}, block_shape_) + crops_ = convert(Tensor{Int32}, crops_) + (crops_,) = tf.tf_promote(crops_) + (input_,) = tf.tf_promote(input_) + (block_shape_,) = tf.tf_promote(block_shape_) + tf.add_input(desc, input_) + tf.add_input(desc, block_shape_) + tf.add_input(desc, crops_) end + tf.Tensor(tf.Operation(desc)) + end function batch_to_space_nd_eager(input_, block_shape_, crops_; name=nothing) desc = tf.EagerOp("BatchToSpaceND") input_ = convert(tf.EagerTensor, input_) @@ -6190,13 +6190,13 @@ begin return res[1] end end - function batch_to_space_nd(input_, block_shape_, crops_; name=nothing) - if tf.in_eager_mode() - batch_to_space_nd_eager(input_, block_shape_, crops_; name=name) - else - batch_to_space_nd_graph(input_, block_shape_, crops_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function batch_to_space_nd(input_, block_shape_, crops_; name=nothing) + if tf.in_eager_mode() + batch_to_space_nd_eager(input_, block_shape_, crops_; name=name) + else + batch_to_space_nd_graph(input_, block_shape_, crops_; name=name) + end end - end end @@ -6206,15 +6206,15 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function loop_cond_graph(input_; name=nothing) - local desc - tf.with_op_name(name, "LoopCond") do - desc = tf.NodeDescription("LoopCond") - input_ = convert(Tensor{Bool}, input_) - tf.add_input(desc, input_) - end - tf.Tensor(tf.Operation(desc)) + function loop_cond_graph(input_; name=nothing) + local desc + tf.with_op_name(name, "LoopCond") do + desc = tf.NodeDescription("LoopCond") + input_ = convert(Tensor{Bool}, input_) + tf.add_input(desc, input_) end + tf.Tensor(tf.Operation(desc)) + end function loop_cond_eager(input_; name=nothing) desc = tf.EagerOp("LoopCond") input_ = convert(tf.EagerTensor, input_) @@ -6226,38 +6226,38 @@ begin return res[1] end end - function loop_cond(input_; name=nothing) - if tf.in_eager_mode() - loop_cond_eager(input_; name=name) - else - loop_cond_graph(input_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function loop_cond(input_; name=nothing) + if tf.in_eager_mode() + loop_cond_eager(input_; name=name) + else + loop_cond_graph(input_; name=name) + end end - end end """ - depth_to_space(input; data_format=NHWC) + depth_to_space(input; data_format=) """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function depth_to_space_graph(input_; name=nothing, block_size=nothing, data_format=nothing) - local desc - tf.with_op_name(name, "DepthToSpace") do - desc = tf.NodeDescription("DepthToSpace") - input_ = convert(Tensor{Any}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - if block_size !== nothing - desc["block_size"] = Base.Int(block_size) - end - if data_format !== nothing - desc["data_format"] = Base.String(data_format) - end + function depth_to_space_graph(input_; name=nothing, block_size=nothing, data_format=nothing) + local desc + tf.with_op_name(name, "DepthToSpace") do + desc = tf.NodeDescription("DepthToSpace") + input_ = convert(Tensor{Any}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + if block_size !== nothing + desc["block_size"] = Base.Int(block_size) + end + if data_format !== nothing + desc["data_format"] = Base.String(data_format) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function depth_to_space_eager(input_; name=nothing, block_size=nothing, data_format=nothing) desc = tf.EagerOp("DepthToSpace") input_ = convert(tf.EagerTensor, input_) @@ -6276,13 +6276,13 @@ begin return res[1] end end - function depth_to_space(input_; name=nothing, block_size=nothing, data_format=nothing) - if tf.in_eager_mode() - depth_to_space_eager(input_; name=name, block_size=block_size, data_format=data_format) - else - depth_to_space_graph(input_; name=name, block_size=block_size, data_format=data_format) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function depth_to_space(input_; name=nothing, block_size=nothing, data_format=nothing) + if tf.in_eager_mode() + depth_to_space_eager(input_; name=name, block_size=block_size, data_format=data_format) + else + depth_to_space_graph(input_; name=name, block_size=block_size, data_format=data_format) + end end - end end @@ -6292,19 +6292,19 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function destroy_temporary_variable_graph(ref_; name=nothing, var_name=nothing) - local desc - tf.with_op_name(name, "DestroyTemporaryVariable") do - desc = tf.NodeDescription("DestroyTemporaryVariable") - ref_ = convert(Tensor{Any}, ref_) - (ref_,) = tf.tf_promote(ref_) - tf.add_input(desc, ref_) - if var_name !== nothing - desc["var_name"] = Base.String(var_name) - end + function destroy_temporary_variable_graph(ref_; name=nothing, var_name=nothing) + local desc + tf.with_op_name(name, "DestroyTemporaryVariable") do + desc = tf.NodeDescription("DestroyTemporaryVariable") + ref_ = convert(Tensor{Any}, ref_) + (ref_,) = tf.tf_promote(ref_) + tf.add_input(desc, ref_) + if var_name !== nothing + desc["var_name"] = Base.String(var_name) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function destroy_temporary_variable_eager(ref_; name=nothing, var_name=nothing) desc = tf.EagerOp("DestroyTemporaryVariable") ref_ = convert(tf.EagerTensor, ref_) @@ -6320,64 +6320,64 @@ begin return res[1] end end - function destroy_temporary_variable(ref_; name=nothing, var_name=nothing) - if tf.in_eager_mode() - destroy_temporary_variable_eager(ref_; name=name, var_name=var_name) - else - destroy_temporary_variable_graph(ref_; name=name, var_name=var_name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function destroy_temporary_variable(ref_; name=nothing, var_name=nothing) + if tf.in_eager_mode() + destroy_temporary_variable_eager(ref_; name=name, var_name=var_name) + else + destroy_temporary_variable_graph(ref_; name=name, var_name=var_name) + end end - end end """ - cudnn_rnn(input, input_h, input_c, params; rnn_mode=lstm, input_mode=linear_input, direction=unidirectional, dropout=?, seed=0, seed2=0, is_training=true) + cudnn_rnn(input, input_h, input_c, params; rnn_mode=, input_mode=, direction=, dropout=?, seed=0, seed2=0, is_training=true) """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function cudnn_rnn_graph(input_, input_h_, input_c_, params_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing, is_training=nothing) - local desc - tf.with_op_name(name, "CudnnRNN") do - desc = tf.NodeDescription("CudnnRNN") - input_ = convert(Tensor{Any}, input_) - input_h_ = convert(Tensor{Any}, input_h_) - input_c_ = convert(Tensor{Any}, input_c_) - params_ = convert(Tensor{Any}, params_) - (input_, input_h_, input_c_, params_) = tf.tf_promote(input_, input_h_, input_c_, params_) - tf.add_input(desc, input_) - tf.add_input(desc, input_h_) - tf.add_input(desc, input_c_) - tf.add_input(desc, params_) - if rnn_mode !== nothing - desc["rnn_mode"] = Base.String(rnn_mode) - end - if input_mode !== nothing - desc["input_mode"] = Base.String(input_mode) - end - if direction !== nothing - desc["direction"] = Base.String(direction) - end - if dropout !== nothing - desc["dropout"] = Base.identity(dropout) - end - if seed !== nothing - desc["seed"] = Base.Int(seed) - end - if seed2 !== nothing - desc["seed2"] = Base.Int(seed2) - end - if is_training !== nothing - desc["is_training"] = Base.Bool(is_training) - end + function cudnn_rnn_graph(input_, input_h_, input_c_, params_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing, is_training=nothing) + local desc + tf.with_op_name(name, "CudnnRNN") do + desc = tf.NodeDescription("CudnnRNN") + input_ = convert(Tensor{Any}, input_) + input_h_ = convert(Tensor{Any}, input_h_) + input_c_ = convert(Tensor{Any}, input_c_) + params_ = convert(Tensor{Any}, params_) + (input_, input_h_, input_c_, params_) = tf.tf_promote(input_, input_h_, input_c_, params_) + tf.add_input(desc, input_) + tf.add_input(desc, input_h_) + tf.add_input(desc, input_c_) + tf.add_input(desc, params_) + if rnn_mode !== nothing + desc["rnn_mode"] = Base.String(rnn_mode) + end + if input_mode !== nothing + desc["input_mode"] = Base.String(input_mode) + end + if direction !== nothing + desc["direction"] = Base.String(direction) + end + if dropout !== nothing + desc["dropout"] = Base.identity(dropout) end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:4 - push!(out, tf.Tensor(op, out_idx)) + if seed !== nothing + desc["seed"] = Base.Int(seed) end - out + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end + if is_training !== nothing + desc["is_training"] = Base.Bool(is_training) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:4 + push!(out, tf.Tensor(op, out_idx)) end + out + end function cudnn_rnn_eager(input_, input_h_, input_c_, params_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing, is_training=nothing) desc = tf.EagerOp("CudnnRNN") input_ = convert(tf.EagerTensor, input_) @@ -6420,13 +6420,13 @@ begin return res end end - function cudnn_rnn(input_, input_h_, input_c_, params_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing, is_training=nothing) - if tf.in_eager_mode() - cudnn_rnn_eager(input_, input_h_, input_c_, params_; name=name, rnn_mode=rnn_mode, input_mode=input_mode, direction=direction, dropout=dropout, seed=seed, seed2=seed2, is_training=is_training) - else - cudnn_rnn_graph(input_, input_h_, input_c_, params_; name=name, rnn_mode=rnn_mode, input_mode=input_mode, direction=direction, dropout=dropout, seed=seed, seed2=seed2, is_training=is_training) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function cudnn_rnn(input_, input_h_, input_c_, params_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing, is_training=nothing) + if tf.in_eager_mode() + cudnn_rnn_eager(input_, input_h_, input_c_, params_; name=name, rnn_mode=rnn_mode, input_mode=input_mode, direction=direction, dropout=dropout, seed=seed, seed2=seed2, is_training=is_training) + else + cudnn_rnn_graph(input_, input_h_, input_c_, params_; name=name, rnn_mode=rnn_mode, input_mode=input_mode, direction=direction, dropout=dropout, seed=seed, seed2=seed2, is_training=is_training) + end end - end end @@ -6436,16 +6436,16 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function ref_identity_graph(input_; name=nothing) - local desc - tf.with_op_name(name, "RefIdentity") do - desc = tf.NodeDescription("RefIdentity") - input_ = convert(Tensor{Any}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - end - tf.Tensor(tf.Operation(desc)) + function ref_identity_graph(input_; name=nothing) + local desc + tf.with_op_name(name, "RefIdentity") do + desc = tf.NodeDescription("RefIdentity") + input_ = convert(Tensor{Any}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) end + tf.Tensor(tf.Operation(desc)) + end function ref_identity_eager(input_; name=nothing) desc = tf.EagerOp("RefIdentity") input_ = convert(tf.EagerTensor, input_) @@ -6458,49 +6458,49 @@ begin return res[1] end end - function ref_identity(input_; name=nothing) - if tf.in_eager_mode() - ref_identity_eager(input_; name=name) - else - ref_identity_graph(input_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function ref_identity(input_; name=nothing) + if tf.in_eager_mode() + ref_identity_eager(input_; name=name) + else + ref_identity_graph(input_; name=name) + end end - end end """ - max_pool3d_grad(orig_input, orig_output, grad; data_format=NDHWC) + max_pool3d_grad(orig_input, orig_output, grad; data_format=) """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function max_pool3d_grad_graph(orig_input_, orig_output_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) - local desc - tf.with_op_name(name, "MaxPool3DGrad") do - desc = tf.NodeDescription("MaxPool3DGrad") - orig_input_ = convert(Tensor{Float32}, orig_input_) - orig_output_ = convert(Tensor{Float32}, orig_output_) - grad_ = convert(Tensor{Float32}, grad_) - (grad_,) = tf.tf_promote(grad_) - (orig_input_, orig_output_) = tf.tf_promote(orig_input_, orig_output_) - tf.add_input(desc, orig_input_) - tf.add_input(desc, orig_output_) - tf.add_input(desc, grad_) - if ksize !== nothing - desc["ksize"] = map(Base.identity, ksize) - end - if strides !== nothing - desc["strides"] = map(Base.identity, strides) - end - if padding !== nothing - desc["padding"] = Base.String(padding) - end - if data_format !== nothing - desc["data_format"] = Base.String(data_format) - end + function max_pool3d_grad_graph(orig_input_, orig_output_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + local desc + tf.with_op_name(name, "MaxPool3DGrad") do + desc = tf.NodeDescription("MaxPool3DGrad") + orig_input_ = convert(Tensor{Float32}, orig_input_) + orig_output_ = convert(Tensor{Float32}, orig_output_) + grad_ = convert(Tensor{Float32}, grad_) + (grad_,) = tf.tf_promote(grad_) + (orig_input_, orig_output_) = tf.tf_promote(orig_input_, orig_output_) + tf.add_input(desc, orig_input_) + tf.add_input(desc, orig_output_) + tf.add_input(desc, grad_) + if ksize !== nothing + desc["ksize"] = map(Base.identity, ksize) + end + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + if padding !== nothing + desc["padding"] = Base.String(padding) + end + if data_format !== nothing + desc["data_format"] = Base.String(data_format) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function max_pool3d_grad_eager(orig_input_, orig_output_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) desc = tf.EagerOp("MaxPool3DGrad") orig_input_ = convert(tf.EagerTensor, orig_input_) @@ -6531,13 +6531,13 @@ begin return res[1] end end - function max_pool3d_grad(orig_input_, orig_output_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) - if tf.in_eager_mode() - max_pool3d_grad_eager(orig_input_, orig_output_, grad_; name=name, ksize=ksize, strides=strides, padding=padding, data_format=data_format) - else - max_pool3d_grad_graph(orig_input_, orig_output_, grad_; name=name, ksize=ksize, strides=strides, padding=padding, data_format=data_format) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function max_pool3d_grad(orig_input_, orig_output_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + if tf.in_eager_mode() + max_pool3d_grad_eager(orig_input_, orig_output_, grad_; name=name, ksize=ksize, strides=strides, padding=padding, data_format=data_format) + else + max_pool3d_grad_graph(orig_input_, orig_output_, grad_; name=name, ksize=ksize, strides=strides, padding=padding, data_format=data_format) + end end - end end @@ -6547,31 +6547,31 @@ end Load embedding parameters for a single table. """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function load_tpu_embedding_momentum_parameters_grad_accum_debug_graph(parameters_, momenta_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - local desc - tf.with_op_name(name, "LoadTPUEmbeddingMomentumParametersGradAccumDebug") do - desc = tf.NodeDescription("LoadTPUEmbeddingMomentumParametersGradAccumDebug") - parameters_ = convert(Tensor{Float32}, parameters_) - momenta_ = convert(Tensor{Float32}, momenta_) - gradient_accumulators_ = convert(Tensor{Float32}, gradient_accumulators_) - tf.add_input(desc, parameters_) - tf.add_input(desc, momenta_) - tf.add_input(desc, gradient_accumulators_) - if table_id !== nothing - desc["table_id"] = Base.Int(table_id) - end - if table_name !== nothing - desc["table_name"] = Base.String(table_name) - end - if num_shards !== nothing - desc["num_shards"] = Base.Int(num_shards) - end - if shard_id !== nothing - desc["shard_id"] = Base.Int(shard_id) - end - end - tf.Tensor(tf.Operation(desc)) + function load_tpu_embedding_momentum_parameters_grad_accum_debug_graph(parameters_, momenta_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + local desc + tf.with_op_name(name, "LoadTPUEmbeddingMomentumParametersGradAccumDebug") do + desc = tf.NodeDescription("LoadTPUEmbeddingMomentumParametersGradAccumDebug") + parameters_ = convert(Tensor{Float32}, parameters_) + momenta_ = convert(Tensor{Float32}, momenta_) + gradient_accumulators_ = convert(Tensor{Float32}, gradient_accumulators_) + tf.add_input(desc, parameters_) + tf.add_input(desc, momenta_) + tf.add_input(desc, gradient_accumulators_) + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end end + tf.Tensor(tf.Operation(desc)) + end function load_tpu_embedding_momentum_parameters_grad_accum_debug_eager(parameters_, momenta_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) desc = tf.EagerOp("LoadTPUEmbeddingMomentumParametersGradAccumDebug") parameters_ = convert(tf.EagerTensor, parameters_) @@ -6599,13 +6599,13 @@ begin return res[1] end end - function load_tpu_embedding_momentum_parameters_grad_accum_debug(parameters_, momenta_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - if tf.in_eager_mode() - load_tpu_embedding_momentum_parameters_grad_accum_debug_eager(parameters_, momenta_, gradient_accumulators_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) - else - load_tpu_embedding_momentum_parameters_grad_accum_debug_graph(parameters_, momenta_, gradient_accumulators_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function load_tpu_embedding_momentum_parameters_grad_accum_debug(parameters_, momenta_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + if tf.in_eager_mode() + load_tpu_embedding_momentum_parameters_grad_accum_debug_eager(parameters_, momenta_, gradient_accumulators_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + else + load_tpu_embedding_momentum_parameters_grad_accum_debug_graph(parameters_, momenta_, gradient_accumulators_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + end end - end end @@ -6615,29 +6615,29 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function conv3d_backprop_input_graph(input_, filter_, out_backprop_; name=nothing, strides=nothing, padding=nothing, dilations=nothing) - local desc - tf.with_op_name(name, "Conv3DBackpropInput") do - desc = tf.NodeDescription("Conv3DBackpropInput") - input_ = convert(Tensor{Any}, input_) - filter_ = convert(Tensor{Any}, filter_) - out_backprop_ = convert(Tensor{Any}, out_backprop_) - (input_, filter_, out_backprop_) = tf.tf_promote(input_, filter_, out_backprop_) - tf.add_input(desc, input_) - tf.add_input(desc, filter_) - tf.add_input(desc, out_backprop_) - if strides !== nothing - desc["strides"] = map(Base.identity, strides) - end - if padding !== nothing - desc["padding"] = Base.String(padding) - end - if dilations !== nothing - desc["dilations"] = map(Base.identity, dilations) - end + function conv3d_backprop_input_graph(input_, filter_, out_backprop_; name=nothing, strides=nothing, padding=nothing, dilations=nothing) + local desc + tf.with_op_name(name, "Conv3DBackpropInput") do + desc = tf.NodeDescription("Conv3DBackpropInput") + input_ = convert(Tensor{Any}, input_) + filter_ = convert(Tensor{Any}, filter_) + out_backprop_ = convert(Tensor{Any}, out_backprop_) + (input_, filter_, out_backprop_) = tf.tf_promote(input_, filter_, out_backprop_) + tf.add_input(desc, input_) + tf.add_input(desc, filter_) + tf.add_input(desc, out_backprop_) + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + if padding !== nothing + desc["padding"] = Base.String(padding) + end + if dilations !== nothing + desc["dilations"] = map(Base.identity, dilations) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function conv3d_backprop_input_eager(input_, filter_, out_backprop_; name=nothing, strides=nothing, padding=nothing, dilations=nothing) desc = tf.EagerOp("Conv3DBackpropInput") input_ = convert(tf.EagerTensor, input_) @@ -6665,13 +6665,13 @@ begin return res[1] end end - function conv3d_backprop_input(input_, filter_, out_backprop_; name=nothing, strides=nothing, padding=nothing, dilations=nothing) - if tf.in_eager_mode() - conv3d_backprop_input_eager(input_, filter_, out_backprop_; name=name, strides=strides, padding=padding, dilations=dilations) - else - conv3d_backprop_input_graph(input_, filter_, out_backprop_; name=name, strides=strides, padding=padding, dilations=dilations) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function conv3d_backprop_input(input_, filter_, out_backprop_; name=nothing, strides=nothing, padding=nothing, dilations=nothing) + if tf.in_eager_mode() + conv3d_backprop_input_eager(input_, filter_, out_backprop_; name=name, strides=strides, padding=padding, dilations=dilations) + else + conv3d_backprop_input_graph(input_, filter_, out_backprop_; name=name, strides=strides, padding=padding, dilations=dilations) + end end - end end @@ -6681,28 +6681,28 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function padding_fifo_queue_v2_graph(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) - local desc - tf.with_op_name(name, "PaddingFIFOQueueV2") do - desc = tf.NodeDescription("PaddingFIFOQueueV2") - if component_types !== nothing - desc["component_types"] = map(Base.identity, component_types) - end - if shapes !== nothing - desc["shapes"] = map(Base.identity, shapes) - end - if capacity !== nothing - desc["capacity"] = Base.Int(capacity) - end - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end + function padding_fifo_queue_v2_graph(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "PaddingFIFOQueueV2") do + desc = tf.NodeDescription("PaddingFIFOQueueV2") + if component_types !== nothing + desc["component_types"] = map(Base.identity, component_types) + end + if shapes !== nothing + desc["shapes"] = map(Base.identity, shapes) + end + if capacity !== nothing + desc["capacity"] = Base.Int(capacity) + end + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function padding_fifo_queue_v2_eager(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) desc = tf.EagerOp("PaddingFIFOQueueV2") if component_types !== nothing @@ -6727,13 +6727,13 @@ begin return res[1] end end - function padding_fifo_queue_v2(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) - if tf.in_eager_mode() - padding_fifo_queue_v2_eager(; name=name, component_types=component_types, shapes=shapes, capacity=capacity, container=container, shared_name=shared_name) - else - padding_fifo_queue_v2_graph(; name=name, component_types=component_types, shapes=shapes, capacity=capacity, container=container, shared_name=shared_name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function padding_fifo_queue_v2(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) + if tf.in_eager_mode() + padding_fifo_queue_v2_eager(; name=name, component_types=component_types, shapes=shapes, capacity=capacity, container=container, shared_name=shared_name) + else + padding_fifo_queue_v2_graph(; name=name, component_types=component_types, shapes=shapes, capacity=capacity, container=container, shared_name=shared_name) + end end - end end @@ -6743,16 +6743,16 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function ref_exit_graph(data_; name=nothing) - local desc - tf.with_op_name(name, "RefExit") do - desc = tf.NodeDescription("RefExit") - data_ = convert(Tensor{Any}, data_) - (data_,) = tf.tf_promote(data_) - tf.add_input(desc, data_) - end - tf.Tensor(tf.Operation(desc)) + function ref_exit_graph(data_; name=nothing) + local desc + tf.with_op_name(name, "RefExit") do + desc = tf.NodeDescription("RefExit") + data_ = convert(Tensor{Any}, data_) + (data_,) = tf.tf_promote(data_) + tf.add_input(desc, data_) end + tf.Tensor(tf.Operation(desc)) + end function ref_exit_eager(data_; name=nothing) desc = tf.EagerOp("RefExit") data_ = convert(tf.EagerTensor, data_) @@ -6765,13 +6765,13 @@ begin return res[1] end end - function ref_exit(data_; name=nothing) - if tf.in_eager_mode() - ref_exit_eager(data_; name=name) - else - ref_exit_graph(data_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function ref_exit(data_; name=nothing) + if tf.in_eager_mode() + ref_exit_eager(data_; name=name) + else + ref_exit_graph(data_; name=name) + end end - end end @@ -6781,28 +6781,28 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function map_clear_graph(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) - local desc - tf.with_op_name(name, "MapClear") do - desc = tf.NodeDescription("MapClear") - if capacity !== nothing - desc["capacity"] = Base.Int(capacity) - end - if memory_limit !== nothing - desc["memory_limit"] = Base.Int(memory_limit) - end - if dtypes !== nothing - desc["dtypes"] = map(Base.identity, dtypes) - end - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end + function map_clear_graph(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "MapClear") do + desc = tf.NodeDescription("MapClear") + if capacity !== nothing + desc["capacity"] = Base.Int(capacity) + end + if memory_limit !== nothing + desc["memory_limit"] = Base.Int(memory_limit) + end + if dtypes !== nothing + desc["dtypes"] = map(Base.identity, dtypes) + end + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function map_clear_eager(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) desc = tf.EagerOp("MapClear") if capacity !== nothing @@ -6827,13 +6827,13 @@ begin return res[1] end end - function map_clear(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) - if tf.in_eager_mode() - map_clear_eager(; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) - else - map_clear_graph(; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function map_clear(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + if tf.in_eager_mode() + map_clear_eager(; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) + else + map_clear_graph(; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) + end end - end end @@ -6843,17 +6843,17 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function encode_wav_graph(audio_, sample_rate_; name=nothing) - local desc - tf.with_op_name(name, "EncodeWav") do - desc = tf.NodeDescription("EncodeWav") - audio_ = convert(Tensor{Float32}, audio_) - sample_rate_ = convert(Tensor{Int32}, sample_rate_) - tf.add_input(desc, audio_) - tf.add_input(desc, sample_rate_) - end - tf.Tensor(tf.Operation(desc)) + function encode_wav_graph(audio_, sample_rate_; name=nothing) + local desc + tf.with_op_name(name, "EncodeWav") do + desc = tf.NodeDescription("EncodeWav") + audio_ = convert(Tensor{Float32}, audio_) + sample_rate_ = convert(Tensor{Int32}, sample_rate_) + tf.add_input(desc, audio_) + tf.add_input(desc, sample_rate_) end + tf.Tensor(tf.Operation(desc)) + end function encode_wav_eager(audio_, sample_rate_; name=nothing) desc = tf.EagerOp("EncodeWav") audio_ = convert(tf.EagerTensor, audio_) @@ -6867,13 +6867,13 @@ begin return res[1] end end - function encode_wav(audio_, sample_rate_; name=nothing) - if tf.in_eager_mode() - encode_wav_eager(audio_, sample_rate_; name=name) - else - encode_wav_graph(audio_, sample_rate_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function encode_wav(audio_, sample_rate_; name=nothing) + if tf.in_eager_mode() + encode_wav_eager(audio_, sample_rate_; name=name) + else + encode_wav_graph(audio_, sample_rate_; name=name) + end end - end end @@ -6883,20 +6883,20 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tensor_summary_v2_graph(tag_, tensor_, serialized_summary_metadata_; name=nothing) - local desc - tf.with_op_name(name, "TensorSummaryV2") do - desc = tf.NodeDescription("TensorSummaryV2") - tag_ = convert(Tensor{String}, tag_) - tensor_ = convert(Tensor{Any}, tensor_) - serialized_summary_metadata_ = convert(Tensor{String}, serialized_summary_metadata_) - (tensor_,) = tf.tf_promote(tensor_) - tf.add_input(desc, tag_) - tf.add_input(desc, tensor_) - tf.add_input(desc, serialized_summary_metadata_) - end - tf.Tensor(tf.Operation(desc)) + function tensor_summary_v2_graph(tag_, tensor_, serialized_summary_metadata_; name=nothing) + local desc + tf.with_op_name(name, "TensorSummaryV2") do + desc = tf.NodeDescription("TensorSummaryV2") + tag_ = convert(Tensor{String}, tag_) + tensor_ = convert(Tensor{Any}, tensor_) + serialized_summary_metadata_ = convert(Tensor{String}, serialized_summary_metadata_) + (tensor_,) = tf.tf_promote(tensor_) + tf.add_input(desc, tag_) + tf.add_input(desc, tensor_) + tf.add_input(desc, serialized_summary_metadata_) end + tf.Tensor(tf.Operation(desc)) + end function tensor_summary_v2_eager(tag_, tensor_, serialized_summary_metadata_; name=nothing) desc = tf.EagerOp("TensorSummaryV2") tag_ = convert(tf.EagerTensor, tag_) @@ -6913,13 +6913,13 @@ begin return res[1] end end - function tensor_summary_v2(tag_, tensor_, serialized_summary_metadata_; name=nothing) - if tf.in_eager_mode() - tensor_summary_v2_eager(tag_, tensor_, serialized_summary_metadata_; name=name) - else - tensor_summary_v2_graph(tag_, tensor_, serialized_summary_metadata_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_summary_v2(tag_, tensor_, serialized_summary_metadata_; name=nothing) + if tf.in_eager_mode() + tensor_summary_v2_eager(tag_, tensor_, serialized_summary_metadata_; name=name) + else + tensor_summary_v2_graph(tag_, tensor_, serialized_summary_metadata_; name=name) + end end - end end @@ -6929,23 +6929,23 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function queue_dequeue_up_to_graph(handle_, n_; name=nothing, component_types=nothing, timeout_ms=nothing) - local desc - tf.with_op_name(name, "QueueDequeueUpTo") do - desc = tf.NodeDescription("QueueDequeueUpTo") - handle_ = convert(Tensor{String}, handle_) - n_ = convert(Tensor{Int32}, n_) - tf.add_input(desc, handle_) - tf.add_input(desc, n_) - if component_types !== nothing - desc["component_types"] = map(Base.identity, component_types) - end - if timeout_ms !== nothing - desc["timeout_ms"] = Base.Int(timeout_ms) - end + function queue_dequeue_up_to_graph(handle_, n_; name=nothing, component_types=nothing, timeout_ms=nothing) + local desc + tf.with_op_name(name, "QueueDequeueUpTo") do + desc = tf.NodeDescription("QueueDequeueUpTo") + handle_ = convert(Tensor{String}, handle_) + n_ = convert(Tensor{Int32}, n_) + tf.add_input(desc, handle_) + tf.add_input(desc, n_) + if component_types !== nothing + desc["component_types"] = map(Base.identity, component_types) + end + if timeout_ms !== nothing + desc["timeout_ms"] = Base.Int(timeout_ms) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function queue_dequeue_up_to_eager(handle_, n_; name=nothing, component_types=nothing, timeout_ms=nothing) desc = tf.EagerOp("QueueDequeueUpTo") handle_ = convert(tf.EagerTensor, handle_) @@ -6965,13 +6965,13 @@ begin return res[1] end end - function queue_dequeue_up_to(handle_, n_; name=nothing, component_types=nothing, timeout_ms=nothing) - if tf.in_eager_mode() - queue_dequeue_up_to_eager(handle_, n_; name=name, component_types=component_types, timeout_ms=timeout_ms) - else - queue_dequeue_up_to_graph(handle_, n_; name=name, component_types=component_types, timeout_ms=timeout_ms) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function queue_dequeue_up_to(handle_, n_; name=nothing, component_types=nothing, timeout_ms=nothing) + if tf.in_eager_mode() + queue_dequeue_up_to_eager(handle_, n_; name=name, component_types=component_types, timeout_ms=timeout_ms) + else + queue_dequeue_up_to_graph(handle_, n_; name=name, component_types=component_types, timeout_ms=timeout_ms) + end end - end end @@ -6981,21 +6981,21 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function matrix_band_part_graph(input_, num_lower_, num_upper_; name=nothing) - local desc - tf.with_op_name(name, "MatrixBandPart") do - desc = tf.NodeDescription("MatrixBandPart") - input_ = convert(Tensor{Any}, input_) - num_lower_ = convert(Tensor{Int64}, num_lower_) - num_upper_ = convert(Tensor{Int64}, num_upper_) - (input_,) = tf.tf_promote(input_) - (num_lower_, num_upper_) = tf.tf_promote(num_lower_, num_upper_) - tf.add_input(desc, input_) - tf.add_input(desc, num_lower_) - tf.add_input(desc, num_upper_) - end - tf.Tensor(tf.Operation(desc)) + function matrix_band_part_graph(input_, num_lower_, num_upper_; name=nothing) + local desc + tf.with_op_name(name, "MatrixBandPart") do + desc = tf.NodeDescription("MatrixBandPart") + input_ = convert(Tensor{Any}, input_) + num_lower_ = convert(Tensor{Int64}, num_lower_) + num_upper_ = convert(Tensor{Int64}, num_upper_) + (input_,) = tf.tf_promote(input_) + (num_lower_, num_upper_) = tf.tf_promote(num_lower_, num_upper_) + tf.add_input(desc, input_) + tf.add_input(desc, num_lower_) + tf.add_input(desc, num_upper_) end + tf.Tensor(tf.Operation(desc)) + end function matrix_band_part_eager(input_, num_lower_, num_upper_; name=nothing) desc = tf.EagerOp("MatrixBandPart") input_ = convert(tf.EagerTensor, input_) @@ -7014,13 +7014,13 @@ begin return res[1] end end - function matrix_band_part(input_, num_lower_, num_upper_; name=nothing) - if tf.in_eager_mode() - matrix_band_part_eager(input_, num_lower_, num_upper_; name=name) - else - matrix_band_part_graph(input_, num_lower_, num_upper_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function matrix_band_part(input_, num_lower_, num_upper_; name=nothing) + if tf.in_eager_mode() + matrix_band_part_eager(input_, num_lower_, num_upper_; name=name) + else + matrix_band_part_graph(input_, num_lower_, num_upper_; name=name) + end end - end end @@ -7030,22 +7030,22 @@ end Copy Op. """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function copy_graph(input_; name=nothing, tensor_name=nothing, debug_ops_spec=nothing) - local desc - tf.with_op_name(name, "Copy") do - desc = tf.NodeDescription("Copy") - input_ = convert(Tensor{Any}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - if tensor_name !== nothing - desc["tensor_name"] = Base.String(tensor_name) - end - if debug_ops_spec !== nothing - desc["debug_ops_spec"] = map(Base.identity, debug_ops_spec) - end + function copy_graph(input_; name=nothing, tensor_name=nothing, debug_ops_spec=nothing) + local desc + tf.with_op_name(name, "Copy") do + desc = tf.NodeDescription("Copy") + input_ = convert(Tensor{Any}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + if tensor_name !== nothing + desc["tensor_name"] = Base.String(tensor_name) + end + if debug_ops_spec !== nothing + desc["debug_ops_spec"] = map(Base.identity, debug_ops_spec) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function copy_eager(input_; name=nothing, tensor_name=nothing, debug_ops_spec=nothing) desc = tf.EagerOp("Copy") input_ = convert(tf.EagerTensor, input_) @@ -7064,13 +7064,13 @@ begin return res[1] end end - function copy(input_; name=nothing, tensor_name=nothing, debug_ops_spec=nothing) - if tf.in_eager_mode() - copy_eager(input_; name=name, tensor_name=tensor_name, debug_ops_spec=debug_ops_spec) - else - copy_graph(input_; name=name, tensor_name=tensor_name, debug_ops_spec=debug_ops_spec) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function copy(input_; name=nothing, tensor_name=nothing, debug_ops_spec=nothing) + if tf.in_eager_mode() + copy_eager(input_; name=name, tensor_name=tensor_name, debug_ops_spec=debug_ops_spec) + else + copy_graph(input_; name=name, tensor_name=tensor_name, debug_ops_spec=debug_ops_spec) + end end - end end @@ -7080,27 +7080,27 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function shape_n_graph(input_; name=nothing, N=nothing, out_type=nothing) - local desc - tf.with_op_name(name, "ShapeN") do - desc = tf.NodeDescription("ShapeN") - input_ = [convert(Tensor{Any}, x) for x = input_] - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - if N !== nothing - desc["N"] = Base.Int(N) - end - if out_type !== nothing - desc["out_type"] = Base.identity(out_type) - end + function shape_n_graph(input_; name=nothing, N=nothing, out_type=nothing) + local desc + tf.with_op_name(name, "ShapeN") do + desc = tf.NodeDescription("ShapeN") + input_ = [convert(Tensor{Any}, x) for x = input_] + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + if N !== nothing + desc["N"] = Base.Int(N) end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:N - push!(out, tf.Tensor(op, out_idx)) + if out_type !== nothing + desc["out_type"] = Base.identity(out_type) end - out end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:N + push!(out, tf.Tensor(op, out_idx)) + end + out + end function shape_n_eager(input_; name=nothing, N=nothing, out_type=nothing) desc = tf.EagerOp("ShapeN") input_ = convert(tf.EagerTensor, input_) @@ -7119,13 +7119,13 @@ begin return res end end - function shape_n(input_; name=nothing, N=nothing, out_type=nothing) - if tf.in_eager_mode() - shape_n_eager(input_; name=name, N=N, out_type=out_type) - else - shape_n_graph(input_; name=name, N=N, out_type=out_type) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function shape_n(input_; name=nothing, N=nothing, out_type=nothing) + if tf.in_eager_mode() + shape_n_eager(input_; name=name, N=N, out_type=out_type) + else + shape_n_graph(input_; name=name, N=N, out_type=out_type) + end end - end end @@ -7135,43 +7135,43 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function experimental_parse_example_dataset_graph(input_dataset_, num_parallel_calls_, dense_defaults_; name=nothing, sparse_keys=nothing, dense_keys=nothing, sparse_types=nothing, Tdense=nothing, dense_shapes=nothing, output_types=nothing, output_shapes=nothing, sloppy=nothing) - local desc - tf.with_op_name(name, "ExperimentalParseExampleDataset") do - desc = tf.NodeDescription("ExperimentalParseExampleDataset") - input_dataset_ = convert(Tensor{Any}, input_dataset_) - num_parallel_calls_ = convert(Tensor{Int64}, num_parallel_calls_) - dense_defaults_ = [convert(Tensor{Any}, x) for x = dense_defaults_] - tf.add_input(desc, input_dataset_) - tf.add_input(desc, num_parallel_calls_) - tf.add_input(desc, dense_defaults_) - if sparse_keys !== nothing - desc["sparse_keys"] = map(Base.identity, sparse_keys) - end - if dense_keys !== nothing - desc["dense_keys"] = map(Base.identity, dense_keys) - end - if sparse_types !== nothing - desc["sparse_types"] = map(Base.identity, sparse_types) - end - if Tdense !== nothing - desc["Tdense"] = map(Base.identity, Tdense) - end - if dense_shapes !== nothing - desc["dense_shapes"] = map(Base.identity, dense_shapes) - end - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - if sloppy !== nothing - desc["sloppy"] = Base.Bool(sloppy) - end - end - tf.Tensor(tf.Operation(desc)) + function experimental_parse_example_dataset_graph(input_dataset_, num_parallel_calls_, dense_defaults_; name=nothing, sparse_keys=nothing, dense_keys=nothing, sparse_types=nothing, Tdense=nothing, dense_shapes=nothing, output_types=nothing, output_shapes=nothing, sloppy=nothing) + local desc + tf.with_op_name(name, "ExperimentalParseExampleDataset") do + desc = tf.NodeDescription("ExperimentalParseExampleDataset") + input_dataset_ = convert(Tensor{Any}, input_dataset_) + num_parallel_calls_ = convert(Tensor{Int64}, num_parallel_calls_) + dense_defaults_ = [convert(Tensor{Any}, x) for x = dense_defaults_] + tf.add_input(desc, input_dataset_) + tf.add_input(desc, num_parallel_calls_) + tf.add_input(desc, dense_defaults_) + if sparse_keys !== nothing + desc["sparse_keys"] = map(Base.identity, sparse_keys) + end + if dense_keys !== nothing + desc["dense_keys"] = map(Base.identity, dense_keys) + end + if sparse_types !== nothing + desc["sparse_types"] = map(Base.identity, sparse_types) + end + if Tdense !== nothing + desc["Tdense"] = map(Base.identity, Tdense) + end + if dense_shapes !== nothing + desc["dense_shapes"] = map(Base.identity, dense_shapes) + end + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + if sloppy !== nothing + desc["sloppy"] = Base.Bool(sloppy) + end end + tf.Tensor(tf.Operation(desc)) + end function experimental_parse_example_dataset_eager(input_dataset_, num_parallel_calls_, dense_defaults_; name=nothing, sparse_keys=nothing, dense_keys=nothing, sparse_types=nothing, Tdense=nothing, dense_shapes=nothing, output_types=nothing, output_shapes=nothing, sloppy=nothing) desc = tf.EagerOp("ExperimentalParseExampleDataset") input_dataset_ = convert(tf.EagerTensor, input_dataset_) @@ -7211,13 +7211,13 @@ begin return res[1] end end - function experimental_parse_example_dataset(input_dataset_, num_parallel_calls_, dense_defaults_; name=nothing, sparse_keys=nothing, dense_keys=nothing, sparse_types=nothing, Tdense=nothing, dense_shapes=nothing, output_types=nothing, output_shapes=nothing, sloppy=nothing) - if tf.in_eager_mode() - experimental_parse_example_dataset_eager(input_dataset_, num_parallel_calls_, dense_defaults_; name=name, sparse_keys=sparse_keys, dense_keys=dense_keys, sparse_types=sparse_types, Tdense=Tdense, dense_shapes=dense_shapes, output_types=output_types, output_shapes=output_shapes, sloppy=sloppy) - else - experimental_parse_example_dataset_graph(input_dataset_, num_parallel_calls_, dense_defaults_; name=name, sparse_keys=sparse_keys, dense_keys=dense_keys, sparse_types=sparse_types, Tdense=Tdense, dense_shapes=dense_shapes, output_types=output_types, output_shapes=output_shapes, sloppy=sloppy) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_parse_example_dataset(input_dataset_, num_parallel_calls_, dense_defaults_; name=nothing, sparse_keys=nothing, dense_keys=nothing, sparse_types=nothing, Tdense=nothing, dense_shapes=nothing, output_types=nothing, output_shapes=nothing, sloppy=nothing) + if tf.in_eager_mode() + experimental_parse_example_dataset_eager(input_dataset_, num_parallel_calls_, dense_defaults_; name=name, sparse_keys=sparse_keys, dense_keys=dense_keys, sparse_types=sparse_types, Tdense=Tdense, dense_shapes=dense_shapes, output_types=output_types, output_shapes=output_shapes, sloppy=sloppy) + else + experimental_parse_example_dataset_graph(input_dataset_, num_parallel_calls_, dense_defaults_; name=name, sparse_keys=sparse_keys, dense_keys=dense_keys, sparse_types=sparse_types, Tdense=Tdense, dense_shapes=dense_shapes, output_types=output_types, output_shapes=output_shapes, sloppy=sloppy) + end end - end end @@ -7227,21 +7227,21 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function concat_graph(concat_dim_, values_; name=nothing, N=nothing) - local desc - tf.with_op_name(name, "Concat") do - desc = tf.NodeDescription("Concat") - concat_dim_ = convert(Tensor{Int32}, concat_dim_) - values_ = [convert(Tensor{Any}, x) for x = values_] - (values_,) = tf.tf_promote(values_) - tf.add_input(desc, concat_dim_) - tf.add_input(desc, values_) - if N !== nothing - desc["N"] = Base.Int(N) - end + function concat_graph(concat_dim_, values_; name=nothing, N=nothing) + local desc + tf.with_op_name(name, "Concat") do + desc = tf.NodeDescription("Concat") + concat_dim_ = convert(Tensor{Int32}, concat_dim_) + values_ = [convert(Tensor{Any}, x) for x = values_] + (values_,) = tf.tf_promote(values_) + tf.add_input(desc, concat_dim_) + tf.add_input(desc, values_) + if N !== nothing + desc["N"] = Base.Int(N) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function concat_eager(concat_dim_, values_; name=nothing, N=nothing) desc = tf.EagerOp("Concat") concat_dim_ = convert(tf.EagerTensor, concat_dim_) @@ -7259,38 +7259,38 @@ begin return res[1] end end - function concat(concat_dim_, values_; name=nothing, N=nothing) - if tf.in_eager_mode() - concat_eager(concat_dim_, values_; name=name, N=N) - else - concat_graph(concat_dim_, values_; name=name, N=N) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function concat(concat_dim_, values_; name=nothing, N=nothing) + if tf.in_eager_mode() + concat_eager(concat_dim_, values_; name=name, N=N) + else + concat_graph(concat_dim_, values_; name=name, N=N) + end end - end end """ - data_format_dim_map(x; src_format=NHWC, dst_format=NCHW) + data_format_dim_map(x; src_format=, dst_format=) """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function data_format_dim_map_graph(x_; name=nothing, src_format=nothing, dst_format=nothing) - local desc - tf.with_op_name(name, "DataFormatDimMap") do - desc = tf.NodeDescription("DataFormatDimMap") - x_ = convert(Tensor{Int32}, x_) - (x_,) = tf.tf_promote(x_) - tf.add_input(desc, x_) - if src_format !== nothing - desc["src_format"] = Base.String(src_format) - end - if dst_format !== nothing - desc["dst_format"] = Base.String(dst_format) - end + function data_format_dim_map_graph(x_; name=nothing, src_format=nothing, dst_format=nothing) + local desc + tf.with_op_name(name, "DataFormatDimMap") do + desc = tf.NodeDescription("DataFormatDimMap") + x_ = convert(Tensor{Int32}, x_) + (x_,) = tf.tf_promote(x_) + tf.add_input(desc, x_) + if src_format !== nothing + desc["src_format"] = Base.String(src_format) + end + if dst_format !== nothing + desc["dst_format"] = Base.String(dst_format) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function data_format_dim_map_eager(x_; name=nothing, src_format=nothing, dst_format=nothing) desc = tf.EagerOp("DataFormatDimMap") x_ = convert(tf.EagerTensor, x_) @@ -7309,13 +7309,13 @@ begin return res[1] end end - function data_format_dim_map(x_; name=nothing, src_format=nothing, dst_format=nothing) - if tf.in_eager_mode() - data_format_dim_map_eager(x_; name=name, src_format=src_format, dst_format=dst_format) - else - data_format_dim_map_graph(x_; name=name, src_format=src_format, dst_format=dst_format) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function data_format_dim_map(x_; name=nothing, src_format=nothing, dst_format=nothing) + if tf.in_eager_mode() + data_format_dim_map_eager(x_; name=name, src_format=src_format, dst_format=dst_format) + else + data_format_dim_map_graph(x_; name=name, src_format=src_format, dst_format=dst_format) + end end - end end @@ -7325,19 +7325,19 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function identity_reader_graph(; name=nothing, container=nothing, shared_name=nothing) - local desc - tf.with_op_name(name, "IdentityReader") do - desc = tf.NodeDescription("IdentityReader") - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end + function identity_reader_graph(; name=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "IdentityReader") do + desc = tf.NodeDescription("IdentityReader") + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function identity_reader_eager(; name=nothing, container=nothing, shared_name=nothing) desc = tf.EagerOp("IdentityReader") if container !== nothing @@ -7353,13 +7353,13 @@ begin return res[1] end end - function identity_reader(; name=nothing, container=nothing, shared_name=nothing) - if tf.in_eager_mode() - identity_reader_eager(; name=name, container=container, shared_name=shared_name) - else - identity_reader_graph(; name=name, container=container, shared_name=shared_name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function identity_reader(; name=nothing, container=nothing, shared_name=nothing) + if tf.in_eager_mode() + identity_reader_eager(; name=name, container=container, shared_name=shared_name) + else + identity_reader_graph(; name=name, container=container, shared_name=shared_name) + end end - end end @@ -7369,16 +7369,16 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function softplus_graph(features_; name=nothing) - local desc - tf.with_op_name(name, "Softplus") do - desc = tf.NodeDescription("Softplus") - features_ = convert(Tensor{Any}, features_) - (features_,) = tf.tf_promote(features_) - tf.add_input(desc, features_) - end - tf.Tensor(tf.Operation(desc)) + function softplus_graph(features_; name=nothing) + local desc + tf.with_op_name(name, "Softplus") do + desc = tf.NodeDescription("Softplus") + features_ = convert(Tensor{Any}, features_) + (features_,) = tf.tf_promote(features_) + tf.add_input(desc, features_) end + tf.Tensor(tf.Operation(desc)) + end function softplus_eager(features_; name=nothing) desc = tf.EagerOp("Softplus") features_ = convert(tf.EagerTensor, features_) @@ -7391,13 +7391,13 @@ begin return res[1] end end - function softplus(features_; name=nothing) - if tf.in_eager_mode() - softplus_eager(features_; name=name) - else - softplus_graph(features_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function softplus(features_; name=nothing) + if tf.in_eager_mode() + softplus_eager(features_; name=name) + else + softplus_graph(features_; name=name) + end end - end end @@ -7407,33 +7407,33 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function resource_sparse_apply_proximal_adagrad_graph(var_, accum_, lr_, l1_, l2_, grad_, indices_; name=nothing, use_locking=nothing) - local desc - tf.with_op_name(name, "ResourceSparseApplyProximalAdagrad") do - desc = tf.NodeDescription("ResourceSparseApplyProximalAdagrad") - var_ = convert(Tensor{Any}, var_) - accum_ = convert(Tensor{Any}, accum_) - lr_ = convert(Tensor{Any}, lr_) - l1_ = convert(Tensor{Any}, l1_) - l2_ = convert(Tensor{Any}, l2_) - grad_ = convert(Tensor{Any}, grad_) - indices_ = convert(Tensor{Any}, indices_) - indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) - (lr_, l1_, l2_, grad_) = tf.tf_promote(lr_, l1_, l2_, grad_) - (indices_,) = tf.tf_promote(indices_) - tf.add_input(desc, var_) - tf.add_input(desc, accum_) - tf.add_input(desc, lr_) - tf.add_input(desc, l1_) - tf.add_input(desc, l2_) - tf.add_input(desc, grad_) - tf.add_input(desc, indices_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - end - tf.Tensor(tf.Operation(desc)) + function resource_sparse_apply_proximal_adagrad_graph(var_, accum_, lr_, l1_, l2_, grad_, indices_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "ResourceSparseApplyProximalAdagrad") do + desc = tf.NodeDescription("ResourceSparseApplyProximalAdagrad") + var_ = convert(Tensor{Any}, var_) + accum_ = convert(Tensor{Any}, accum_) + lr_ = convert(Tensor{Any}, lr_) + l1_ = convert(Tensor{Any}, l1_) + l2_ = convert(Tensor{Any}, l2_) + grad_ = convert(Tensor{Any}, grad_) + indices_ = convert(Tensor{Any}, indices_) + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + (lr_, l1_, l2_, grad_) = tf.tf_promote(lr_, l1_, l2_, grad_) + (indices_,) = tf.tf_promote(indices_) + tf.add_input(desc, var_) + tf.add_input(desc, accum_) + tf.add_input(desc, lr_) + tf.add_input(desc, l1_) + tf.add_input(desc, l2_) + tf.add_input(desc, grad_) + tf.add_input(desc, indices_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end end + tf.Tensor(tf.Operation(desc)) + end function resource_sparse_apply_proximal_adagrad_eager(var_, accum_, lr_, l1_, l2_, grad_, indices_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ResourceSparseApplyProximalAdagrad") var_ = convert(tf.EagerTensor, var_) @@ -7465,13 +7465,13 @@ begin return res[1] end end - function resource_sparse_apply_proximal_adagrad(var_, accum_, lr_, l1_, l2_, grad_, indices_; name=nothing, use_locking=nothing) - if tf.in_eager_mode() - resource_sparse_apply_proximal_adagrad_eager(var_, accum_, lr_, l1_, l2_, grad_, indices_; name=name, use_locking=use_locking) - else - resource_sparse_apply_proximal_adagrad_graph(var_, accum_, lr_, l1_, l2_, grad_, indices_; name=name, use_locking=use_locking) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_sparse_apply_proximal_adagrad(var_, accum_, lr_, l1_, l2_, grad_, indices_; name=nothing, use_locking=nothing) + if tf.in_eager_mode() + resource_sparse_apply_proximal_adagrad_eager(var_, accum_, lr_, l1_, l2_, grad_, indices_; name=name, use_locking=use_locking) + else + resource_sparse_apply_proximal_adagrad_graph(var_, accum_, lr_, l1_, l2_, grad_, indices_; name=name, use_locking=use_locking) + end end - end end @@ -7481,64 +7481,64 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function parse_single_sequence_example_graph(serialized_, feature_list_dense_missing_assumed_empty_, context_sparse_keys_, context_dense_keys_, feature_list_sparse_keys_, feature_list_dense_keys_, context_dense_defaults_, debug_name_; name=nothing, Ncontext_sparse=nothing, Ncontext_dense=nothing, Nfeature_list_sparse=nothing, Nfeature_list_dense=nothing, context_sparse_types=nothing, Tcontext_dense=nothing, feature_list_dense_types=nothing, context_dense_shapes=nothing, feature_list_sparse_types=nothing, feature_list_dense_shapes=nothing) - local desc - tf.with_op_name(name, "ParseSingleSequenceExample") do - desc = tf.NodeDescription("ParseSingleSequenceExample") - serialized_ = convert(Tensor{String}, serialized_) - feature_list_dense_missing_assumed_empty_ = convert(Tensor{String}, feature_list_dense_missing_assumed_empty_) - context_sparse_keys_ = [convert(Tensor{String}, x) for x = context_sparse_keys_] - context_dense_keys_ = [convert(Tensor{String}, x) for x = context_dense_keys_] - feature_list_sparse_keys_ = [convert(Tensor{String}, x) for x = feature_list_sparse_keys_] - feature_list_dense_keys_ = [convert(Tensor{String}, x) for x = feature_list_dense_keys_] - context_dense_defaults_ = [convert(Tensor{Any}, x) for x = context_dense_defaults_] - debug_name_ = convert(Tensor{String}, debug_name_) - tf.add_input(desc, serialized_) - tf.add_input(desc, feature_list_dense_missing_assumed_empty_) - tf.add_input(desc, context_sparse_keys_) - tf.add_input(desc, context_dense_keys_) - tf.add_input(desc, feature_list_sparse_keys_) - tf.add_input(desc, feature_list_dense_keys_) - tf.add_input(desc, context_dense_defaults_) - tf.add_input(desc, debug_name_) - if Ncontext_sparse !== nothing - desc["Ncontext_sparse"] = Base.Int(Ncontext_sparse) - end - if Ncontext_dense !== nothing - desc["Ncontext_dense"] = Base.Int(Ncontext_dense) - end - if Nfeature_list_sparse !== nothing - desc["Nfeature_list_sparse"] = Base.Int(Nfeature_list_sparse) - end - if Nfeature_list_dense !== nothing - desc["Nfeature_list_dense"] = Base.Int(Nfeature_list_dense) - end - if context_sparse_types !== nothing - desc["context_sparse_types"] = map(Base.identity, context_sparse_types) - end - if Tcontext_dense !== nothing - desc["Tcontext_dense"] = map(Base.identity, Tcontext_dense) - end - if feature_list_dense_types !== nothing - desc["feature_list_dense_types"] = map(Base.identity, feature_list_dense_types) - end - if context_dense_shapes !== nothing - desc["context_dense_shapes"] = map(Base.identity, context_dense_shapes) - end - if feature_list_sparse_types !== nothing - desc["feature_list_sparse_types"] = map(Base.identity, feature_list_sparse_types) - end - if feature_list_dense_shapes !== nothing - desc["feature_list_dense_shapes"] = map(Base.identity, feature_list_dense_shapes) - end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:8 - push!(out, tf.Tensor(op, out_idx)) - end - out + function parse_single_sequence_example_graph(serialized_, feature_list_dense_missing_assumed_empty_, context_sparse_keys_, context_dense_keys_, feature_list_sparse_keys_, feature_list_dense_keys_, context_dense_defaults_, debug_name_; name=nothing, Ncontext_sparse=nothing, Ncontext_dense=nothing, Nfeature_list_sparse=nothing, Nfeature_list_dense=nothing, context_sparse_types=nothing, Tcontext_dense=nothing, feature_list_dense_types=nothing, context_dense_shapes=nothing, feature_list_sparse_types=nothing, feature_list_dense_shapes=nothing) + local desc + tf.with_op_name(name, "ParseSingleSequenceExample") do + desc = tf.NodeDescription("ParseSingleSequenceExample") + serialized_ = convert(Tensor{String}, serialized_) + feature_list_dense_missing_assumed_empty_ = convert(Tensor{String}, feature_list_dense_missing_assumed_empty_) + context_sparse_keys_ = [convert(Tensor{String}, x) for x = context_sparse_keys_] + context_dense_keys_ = [convert(Tensor{String}, x) for x = context_dense_keys_] + feature_list_sparse_keys_ = [convert(Tensor{String}, x) for x = feature_list_sparse_keys_] + feature_list_dense_keys_ = [convert(Tensor{String}, x) for x = feature_list_dense_keys_] + context_dense_defaults_ = [convert(Tensor{Any}, x) for x = context_dense_defaults_] + debug_name_ = convert(Tensor{String}, debug_name_) + tf.add_input(desc, serialized_) + tf.add_input(desc, feature_list_dense_missing_assumed_empty_) + tf.add_input(desc, context_sparse_keys_) + tf.add_input(desc, context_dense_keys_) + tf.add_input(desc, feature_list_sparse_keys_) + tf.add_input(desc, feature_list_dense_keys_) + tf.add_input(desc, context_dense_defaults_) + tf.add_input(desc, debug_name_) + if Ncontext_sparse !== nothing + desc["Ncontext_sparse"] = Base.Int(Ncontext_sparse) + end + if Ncontext_dense !== nothing + desc["Ncontext_dense"] = Base.Int(Ncontext_dense) + end + if Nfeature_list_sparse !== nothing + desc["Nfeature_list_sparse"] = Base.Int(Nfeature_list_sparse) + end + if Nfeature_list_dense !== nothing + desc["Nfeature_list_dense"] = Base.Int(Nfeature_list_dense) + end + if context_sparse_types !== nothing + desc["context_sparse_types"] = map(Base.identity, context_sparse_types) + end + if Tcontext_dense !== nothing + desc["Tcontext_dense"] = map(Base.identity, Tcontext_dense) + end + if feature_list_dense_types !== nothing + desc["feature_list_dense_types"] = map(Base.identity, feature_list_dense_types) + end + if context_dense_shapes !== nothing + desc["context_dense_shapes"] = map(Base.identity, context_dense_shapes) + end + if feature_list_sparse_types !== nothing + desc["feature_list_sparse_types"] = map(Base.identity, feature_list_sparse_types) + end + if feature_list_dense_shapes !== nothing + desc["feature_list_dense_shapes"] = map(Base.identity, feature_list_dense_shapes) + end end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:8 + push!(out, tf.Tensor(op, out_idx)) + end + out + end function parse_single_sequence_example_eager(serialized_, feature_list_dense_missing_assumed_empty_, context_sparse_keys_, context_dense_keys_, feature_list_sparse_keys_, feature_list_dense_keys_, context_dense_defaults_, debug_name_; name=nothing, Ncontext_sparse=nothing, Ncontext_dense=nothing, Nfeature_list_sparse=nothing, Nfeature_list_dense=nothing, context_sparse_types=nothing, Tcontext_dense=nothing, feature_list_dense_types=nothing, context_dense_shapes=nothing, feature_list_sparse_types=nothing, feature_list_dense_shapes=nothing) desc = tf.EagerOp("ParseSingleSequenceExample") serialized_ = convert(tf.EagerTensor, serialized_) @@ -7594,13 +7594,13 @@ begin return res end end - function parse_single_sequence_example(serialized_, feature_list_dense_missing_assumed_empty_, context_sparse_keys_, context_dense_keys_, feature_list_sparse_keys_, feature_list_dense_keys_, context_dense_defaults_, debug_name_; name=nothing, Ncontext_sparse=nothing, Ncontext_dense=nothing, Nfeature_list_sparse=nothing, Nfeature_list_dense=nothing, context_sparse_types=nothing, Tcontext_dense=nothing, feature_list_dense_types=nothing, context_dense_shapes=nothing, feature_list_sparse_types=nothing, feature_list_dense_shapes=nothing) - if tf.in_eager_mode() - parse_single_sequence_example_eager(serialized_, feature_list_dense_missing_assumed_empty_, context_sparse_keys_, context_dense_keys_, feature_list_sparse_keys_, feature_list_dense_keys_, context_dense_defaults_, debug_name_; name=name, Ncontext_sparse=Ncontext_sparse, Ncontext_dense=Ncontext_dense, Nfeature_list_sparse=Nfeature_list_sparse, Nfeature_list_dense=Nfeature_list_dense, context_sparse_types=context_sparse_types, Tcontext_dense=Tcontext_dense, feature_list_dense_types=feature_list_dense_types, context_dense_shapes=context_dense_shapes, feature_list_sparse_types=feature_list_sparse_types, feature_list_dense_shapes=feature_list_dense_shapes) - else - parse_single_sequence_example_graph(serialized_, feature_list_dense_missing_assumed_empty_, context_sparse_keys_, context_dense_keys_, feature_list_sparse_keys_, feature_list_dense_keys_, context_dense_defaults_, debug_name_; name=name, Ncontext_sparse=Ncontext_sparse, Ncontext_dense=Ncontext_dense, Nfeature_list_sparse=Nfeature_list_sparse, Nfeature_list_dense=Nfeature_list_dense, context_sparse_types=context_sparse_types, Tcontext_dense=Tcontext_dense, feature_list_dense_types=feature_list_dense_types, context_dense_shapes=context_dense_shapes, feature_list_sparse_types=feature_list_sparse_types, feature_list_dense_shapes=feature_list_dense_shapes) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function parse_single_sequence_example(serialized_, feature_list_dense_missing_assumed_empty_, context_sparse_keys_, context_dense_keys_, feature_list_sparse_keys_, feature_list_dense_keys_, context_dense_defaults_, debug_name_; name=nothing, Ncontext_sparse=nothing, Ncontext_dense=nothing, Nfeature_list_sparse=nothing, Nfeature_list_dense=nothing, context_sparse_types=nothing, Tcontext_dense=nothing, feature_list_dense_types=nothing, context_dense_shapes=nothing, feature_list_sparse_types=nothing, feature_list_dense_shapes=nothing) + if tf.in_eager_mode() + parse_single_sequence_example_eager(serialized_, feature_list_dense_missing_assumed_empty_, context_sparse_keys_, context_dense_keys_, feature_list_sparse_keys_, feature_list_dense_keys_, context_dense_defaults_, debug_name_; name=name, Ncontext_sparse=Ncontext_sparse, Ncontext_dense=Ncontext_dense, Nfeature_list_sparse=Nfeature_list_sparse, Nfeature_list_dense=Nfeature_list_dense, context_sparse_types=context_sparse_types, Tcontext_dense=Tcontext_dense, feature_list_dense_types=feature_list_dense_types, context_dense_shapes=context_dense_shapes, feature_list_sparse_types=feature_list_sparse_types, feature_list_dense_shapes=feature_list_dense_shapes) + else + parse_single_sequence_example_graph(serialized_, feature_list_dense_missing_assumed_empty_, context_sparse_keys_, context_dense_keys_, feature_list_sparse_keys_, feature_list_dense_keys_, context_dense_defaults_, debug_name_; name=name, Ncontext_sparse=Ncontext_sparse, Ncontext_dense=Ncontext_dense, Nfeature_list_sparse=Nfeature_list_sparse, Nfeature_list_dense=Nfeature_list_dense, context_sparse_types=context_sparse_types, Tcontext_dense=Tcontext_dense, feature_list_dense_types=feature_list_dense_types, context_dense_shapes=context_dense_shapes, feature_list_sparse_types=feature_list_sparse_types, feature_list_dense_shapes=feature_list_dense_shapes) + end end - end end @@ -7610,16 +7610,16 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function matrix_diag_graph(diagonal_; name=nothing) - local desc - tf.with_op_name(name, "MatrixDiag") do - desc = tf.NodeDescription("MatrixDiag") - diagonal_ = convert(Tensor{Any}, diagonal_) - (diagonal_,) = tf.tf_promote(diagonal_) - tf.add_input(desc, diagonal_) - end - tf.Tensor(tf.Operation(desc)) + function matrix_diag_graph(diagonal_; name=nothing) + local desc + tf.with_op_name(name, "MatrixDiag") do + desc = tf.NodeDescription("MatrixDiag") + diagonal_ = convert(Tensor{Any}, diagonal_) + (diagonal_,) = tf.tf_promote(diagonal_) + tf.add_input(desc, diagonal_) end + tf.Tensor(tf.Operation(desc)) + end function matrix_diag_eager(diagonal_; name=nothing) desc = tf.EagerOp("MatrixDiag") diagonal_ = convert(tf.EagerTensor, diagonal_) @@ -7632,13 +7632,13 @@ begin return res[1] end end - function matrix_diag(diagonal_; name=nothing) - if tf.in_eager_mode() - matrix_diag_eager(diagonal_; name=name) - else - matrix_diag_graph(diagonal_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function matrix_diag(diagonal_; name=nothing) + if tf.in_eager_mode() + matrix_diag_eager(diagonal_; name=name) + else + matrix_diag_graph(diagonal_; name=name) + end end - end end @@ -7648,14 +7648,14 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function fact_graph(; name=nothing) - local desc - tf.with_op_name(name, "Fact") do - desc - tf.NodeDescription("Fact") - end - tf.Tensor(tf.Operation(desc)) + function fact_graph(; name=nothing) + local desc + tf.with_op_name(name, "Fact") do + desc + tf.NodeDescription("Fact") end + tf.Tensor(tf.Operation(desc)) + end function fact_eager(; name=nothing) desc = tf.EagerOp("Fact") res = tf.execute(desc) @@ -7665,48 +7665,48 @@ begin return res[1] end end - function fact(; name=nothing) - if tf.in_eager_mode() - fact_eager(; name=name) - else - fact_graph(; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function fact(; name=nothing) + if tf.in_eager_mode() + fact_eager(; name=name) + else + fact_graph(; name=name) + end end - end end """ - max_pool_grad_grad(orig_input, orig_output, grad; data_format=NHWC) + max_pool_grad_grad(orig_input, orig_output, grad; data_format=) """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function max_pool_grad_grad_graph(orig_input_, orig_output_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) - local desc - tf.with_op_name(name, "MaxPoolGradGrad") do - desc = tf.NodeDescription("MaxPoolGradGrad") - orig_input_ = convert(Tensor{Any}, orig_input_) - orig_output_ = convert(Tensor{Any}, orig_output_) - grad_ = convert(Tensor{Any}, grad_) - (orig_input_, orig_output_, grad_) = tf.tf_promote(orig_input_, orig_output_, grad_) - tf.add_input(desc, orig_input_) - tf.add_input(desc, orig_output_) - tf.add_input(desc, grad_) - if ksize !== nothing - desc["ksize"] = map(Base.identity, ksize) - end - if strides !== nothing - desc["strides"] = map(Base.identity, strides) - end - if padding !== nothing - desc["padding"] = Base.String(padding) - end - if data_format !== nothing - desc["data_format"] = Base.String(data_format) - end + function max_pool_grad_grad_graph(orig_input_, orig_output_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + local desc + tf.with_op_name(name, "MaxPoolGradGrad") do + desc = tf.NodeDescription("MaxPoolGradGrad") + orig_input_ = convert(Tensor{Any}, orig_input_) + orig_output_ = convert(Tensor{Any}, orig_output_) + grad_ = convert(Tensor{Any}, grad_) + (orig_input_, orig_output_, grad_) = tf.tf_promote(orig_input_, orig_output_, grad_) + tf.add_input(desc, orig_input_) + tf.add_input(desc, orig_output_) + tf.add_input(desc, grad_) + if ksize !== nothing + desc["ksize"] = map(Base.identity, ksize) + end + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + if padding !== nothing + desc["padding"] = Base.String(padding) + end + if data_format !== nothing + desc["data_format"] = Base.String(data_format) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function max_pool_grad_grad_eager(orig_input_, orig_output_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) desc = tf.EagerOp("MaxPoolGradGrad") orig_input_ = convert(tf.EagerTensor, orig_input_) @@ -7737,13 +7737,13 @@ begin return res[1] end end - function max_pool_grad_grad(orig_input_, orig_output_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) - if tf.in_eager_mode() - max_pool_grad_grad_eager(orig_input_, orig_output_, grad_; name=name, ksize=ksize, strides=strides, padding=padding, data_format=data_format) - else - max_pool_grad_grad_graph(orig_input_, orig_output_, grad_; name=name, ksize=ksize, strides=strides, padding=padding, data_format=data_format) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function max_pool_grad_grad(orig_input_, orig_output_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + if tf.in_eager_mode() + max_pool_grad_grad_eager(orig_input_, orig_output_, grad_; name=name, ksize=ksize, strides=strides, padding=padding, data_format=data_format) + else + max_pool_grad_grad_graph(orig_input_, orig_output_, grad_; name=name, ksize=ksize, strides=strides, padding=padding, data_format=data_format) + end end - end end @@ -7753,21 +7753,21 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function resize_bilinear_grad_graph(grads_, original_image_; name=nothing, align_corners=nothing) - local desc - tf.with_op_name(name, "ResizeBilinearGrad") do - desc = tf.NodeDescription("ResizeBilinearGrad") - grads_ = convert(Tensor{Float32}, grads_) - original_image_ = convert(Tensor{Any}, original_image_) - (original_image_,) = tf.tf_promote(original_image_) - tf.add_input(desc, grads_) - tf.add_input(desc, original_image_) - if align_corners !== nothing - desc["align_corners"] = Base.Bool(align_corners) - end + function resize_bilinear_grad_graph(grads_, original_image_; name=nothing, align_corners=nothing) + local desc + tf.with_op_name(name, "ResizeBilinearGrad") do + desc = tf.NodeDescription("ResizeBilinearGrad") + grads_ = convert(Tensor{Float32}, grads_) + original_image_ = convert(Tensor{Any}, original_image_) + (original_image_,) = tf.tf_promote(original_image_) + tf.add_input(desc, grads_) + tf.add_input(desc, original_image_) + if align_corners !== nothing + desc["align_corners"] = Base.Bool(align_corners) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function resize_bilinear_grad_eager(grads_, original_image_; name=nothing, align_corners=nothing) desc = tf.EagerOp("ResizeBilinearGrad") grads_ = convert(tf.EagerTensor, grads_) @@ -7785,13 +7785,13 @@ begin return res[1] end end - function resize_bilinear_grad(grads_, original_image_; name=nothing, align_corners=nothing) - if tf.in_eager_mode() - resize_bilinear_grad_eager(grads_, original_image_; name=name, align_corners=align_corners) - else - resize_bilinear_grad_graph(grads_, original_image_; name=name, align_corners=align_corners) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resize_bilinear_grad(grads_, original_image_; name=nothing, align_corners=nothing) + if tf.in_eager_mode() + resize_bilinear_grad_eager(grads_, original_image_; name=name, align_corners=align_corners) + else + resize_bilinear_grad_graph(grads_, original_image_; name=name, align_corners=align_corners) + end end - end end @@ -7801,23 +7801,23 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function batch_to_space_graph(input_, crops_; name=nothing, block_size=nothing) - local desc - tf.with_op_name(name, "BatchToSpace") do - desc = tf.NodeDescription("BatchToSpace") - input_ = convert(Tensor{Any}, input_) - crops_ = convert(Tensor{Int32}, crops_) - crops_ = crops_ - convert(tf.Tensor{eltype(crops_)}, 1) - (input_,) = tf.tf_promote(input_) - (crops_,) = tf.tf_promote(crops_) - tf.add_input(desc, input_) - tf.add_input(desc, crops_) - if block_size !== nothing - desc["block_size"] = Base.Int(block_size) - end + function batch_to_space_graph(input_, crops_; name=nothing, block_size=nothing) + local desc + tf.with_op_name(name, "BatchToSpace") do + desc = tf.NodeDescription("BatchToSpace") + input_ = convert(Tensor{Any}, input_) + crops_ = convert(Tensor{Int32}, crops_) + crops_ = crops_ - convert(tf.Tensor{eltype(crops_)}, 1) + (input_,) = tf.tf_promote(input_) + (crops_,) = tf.tf_promote(crops_) + tf.add_input(desc, input_) + tf.add_input(desc, crops_) + if block_size !== nothing + desc["block_size"] = Base.Int(block_size) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function batch_to_space_eager(input_, crops_; name=nothing, block_size=nothing) desc = tf.EagerOp("BatchToSpace") input_ = convert(tf.EagerTensor, input_) @@ -7836,13 +7836,13 @@ begin return res[1] end end - function batch_to_space(input_, crops_; name=nothing, block_size=nothing) - if tf.in_eager_mode() - batch_to_space_eager(input_, crops_; name=name, block_size=block_size) - else - batch_to_space_graph(input_, crops_; name=name, block_size=block_size) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function batch_to_space(input_, crops_; name=nothing, block_size=nothing) + if tf.in_eager_mode() + batch_to_space_eager(input_, crops_; name=name, block_size=block_size) + else + batch_to_space_graph(input_, crops_; name=name, block_size=block_size) + end end - end end @@ -7852,18 +7852,18 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function optional_from_value_graph(components_; name=nothing, Toutput_types=nothing) - local desc - tf.with_op_name(name, "OptionalFromValue") do - desc = tf.NodeDescription("OptionalFromValue") - components_ = [convert(Tensor{Any}, x) for x = components_] - tf.add_input(desc, components_) - if Toutput_types !== nothing - desc["Toutput_types"] = map(Base.identity, Toutput_types) - end + function optional_from_value_graph(components_; name=nothing, Toutput_types=nothing) + local desc + tf.with_op_name(name, "OptionalFromValue") do + desc = tf.NodeDescription("OptionalFromValue") + components_ = [convert(Tensor{Any}, x) for x = components_] + tf.add_input(desc, components_) + if Toutput_types !== nothing + desc["Toutput_types"] = map(Base.identity, Toutput_types) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function optional_from_value_eager(components_; name=nothing, Toutput_types=nothing) desc = tf.EagerOp("OptionalFromValue") components_ = convert(tf.EagerTensor, components_) @@ -7878,13 +7878,13 @@ begin return res[1] end end - function optional_from_value(components_; name=nothing, Toutput_types=nothing) - if tf.in_eager_mode() - optional_from_value_eager(components_; name=name, Toutput_types=Toutput_types) - else - optional_from_value_graph(components_; name=name, Toutput_types=Toutput_types) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function optional_from_value(components_; name=nothing, Toutput_types=nothing) + if tf.in_eager_mode() + optional_from_value_eager(components_; name=name, Toutput_types=Toutput_types) + else + optional_from_value_graph(components_; name=name, Toutput_types=Toutput_types) + end end - end end @@ -7894,18 +7894,18 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function xlogy_graph(x_, y_; name=nothing) - local desc - tf.with_op_name(name, "Xlogy") do - desc = tf.NodeDescription("Xlogy") - x_ = convert(Tensor{Any}, x_) - y_ = convert(Tensor{Any}, y_) - (x_, y_) = tf.tf_promote(x_, y_) - tf.add_input(desc, x_) - tf.add_input(desc, y_) - end - tf.Tensor(tf.Operation(desc)) + function xlogy_graph(x_, y_; name=nothing) + local desc + tf.with_op_name(name, "Xlogy") do + desc = tf.NodeDescription("Xlogy") + x_ = convert(Tensor{Any}, x_) + y_ = convert(Tensor{Any}, y_) + (x_, y_) = tf.tf_promote(x_, y_) + tf.add_input(desc, x_) + tf.add_input(desc, y_) end + tf.Tensor(tf.Operation(desc)) + end function xlogy_eager(x_, y_; name=nothing) desc = tf.EagerOp("Xlogy") x_ = convert(tf.EagerTensor, x_) @@ -7921,13 +7921,13 @@ begin return res[1] end end - function xlogy(x_, y_; name=nothing) - if tf.in_eager_mode() - xlogy_eager(x_, y_; name=name) - else - xlogy_graph(x_, y_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function xlogy(x_, y_; name=nothing) + if tf.in_eager_mode() + xlogy_eager(x_, y_; name=name) + else + xlogy_graph(x_, y_; name=name) + end end - end end @@ -7937,18 +7937,18 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function cross_graph(a_, b_; name=nothing) - local desc - tf.with_op_name(name, "Cross") do - desc = tf.NodeDescription("Cross") - a_ = convert(Tensor{Any}, a_) - b_ = convert(Tensor{Any}, b_) - (a_, b_) = tf.tf_promote(a_, b_) - tf.add_input(desc, a_) - tf.add_input(desc, b_) - end - tf.Tensor(tf.Operation(desc)) + function cross_graph(a_, b_; name=nothing) + local desc + tf.with_op_name(name, "Cross") do + desc = tf.NodeDescription("Cross") + a_ = convert(Tensor{Any}, a_) + b_ = convert(Tensor{Any}, b_) + (a_, b_) = tf.tf_promote(a_, b_) + tf.add_input(desc, a_) + tf.add_input(desc, b_) end + tf.Tensor(tf.Operation(desc)) + end function cross_eager(a_, b_; name=nothing) desc = tf.EagerOp("Cross") a_ = convert(tf.EagerTensor, a_) @@ -7964,13 +7964,13 @@ begin return res[1] end end - function cross(a_, b_; name=nothing) - if tf.in_eager_mode() - cross_eager(a_, b_; name=name) - else - cross_graph(a_, b_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function cross(a_, b_; name=nothing) + if tf.in_eager_mode() + cross_eager(a_, b_; name=name) + else + cross_graph(a_, b_; name=name) + end end - end end @@ -7980,18 +7980,18 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function bitwise_and_graph(x_, y_; name=nothing) - local desc - tf.with_op_name(name, "BitwiseAnd") do - desc = tf.NodeDescription("BitwiseAnd") - x_ = convert(Tensor{Any}, x_) - y_ = convert(Tensor{Any}, y_) - (x_, y_) = tf.tf_promote(x_, y_) - tf.add_input(desc, x_) - tf.add_input(desc, y_) - end - tf.Tensor(tf.Operation(desc)) + function bitwise_and_graph(x_, y_; name=nothing) + local desc + tf.with_op_name(name, "BitwiseAnd") do + desc = tf.NodeDescription("BitwiseAnd") + x_ = convert(Tensor{Any}, x_) + y_ = convert(Tensor{Any}, y_) + (x_, y_) = tf.tf_promote(x_, y_) + tf.add_input(desc, x_) + tf.add_input(desc, y_) end + tf.Tensor(tf.Operation(desc)) + end function bitwise_and_eager(x_, y_; name=nothing) desc = tf.EagerOp("BitwiseAnd") x_ = convert(tf.EagerTensor, x_) @@ -8007,13 +8007,13 @@ begin return res[1] end end - function bitwise_and(x_, y_; name=nothing) - if tf.in_eager_mode() - bitwise_and_eager(x_, y_; name=name) - else - bitwise_and_graph(x_, y_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function bitwise_and(x_, y_; name=nothing) + if tf.in_eager_mode() + bitwise_and_eager(x_, y_; name=name) + else + bitwise_and_graph(x_, y_; name=name) + end end - end end @@ -8023,19 +8023,19 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function broadcast_to_graph(input_, shape_; name=nothing) - local desc - tf.with_op_name(name, "BroadcastTo") do - desc = tf.NodeDescription("BroadcastTo") - input_ = convert(Tensor{Any}, input_) - shape_ = convert(Tensor{Int32}, shape_) - (input_,) = tf.tf_promote(input_) - (shape_,) = tf.tf_promote(shape_) - tf.add_input(desc, input_) - tf.add_input(desc, shape_) - end - tf.Tensor(tf.Operation(desc)) + function broadcast_to_graph(input_, shape_; name=nothing) + local desc + tf.with_op_name(name, "BroadcastTo") do + desc = tf.NodeDescription("BroadcastTo") + input_ = convert(Tensor{Any}, input_) + shape_ = convert(Tensor{Int32}, shape_) + (input_,) = tf.tf_promote(input_) + (shape_,) = tf.tf_promote(shape_) + tf.add_input(desc, input_) + tf.add_input(desc, shape_) end + tf.Tensor(tf.Operation(desc)) + end function broadcast_to_eager(input_, shape_; name=nothing) desc = tf.EagerOp("BroadcastTo") input_ = convert(tf.EagerTensor, input_) @@ -8051,13 +8051,13 @@ begin return res[1] end end - function broadcast_to(input_, shape_; name=nothing) - if tf.in_eager_mode() - broadcast_to_eager(input_, shape_; name=name) - else - broadcast_to_graph(input_, shape_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function broadcast_to(input_, shape_; name=nothing) + if tf.in_eager_mode() + broadcast_to_eager(input_, shape_; name=name) + else + broadcast_to_graph(input_, shape_; name=name) + end end - end end @@ -8067,18 +8067,18 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function elu_grad_graph(gradients_, outputs_; name=nothing) - local desc - tf.with_op_name(name, "EluGrad") do - desc = tf.NodeDescription("EluGrad") - gradients_ = convert(Tensor{Any}, gradients_) - outputs_ = convert(Tensor{Any}, outputs_) - (gradients_, outputs_) = tf.tf_promote(gradients_, outputs_) - tf.add_input(desc, gradients_) - tf.add_input(desc, outputs_) - end - tf.Tensor(tf.Operation(desc)) + function elu_grad_graph(gradients_, outputs_; name=nothing) + local desc + tf.with_op_name(name, "EluGrad") do + desc = tf.NodeDescription("EluGrad") + gradients_ = convert(Tensor{Any}, gradients_) + outputs_ = convert(Tensor{Any}, outputs_) + (gradients_, outputs_) = tf.tf_promote(gradients_, outputs_) + tf.add_input(desc, gradients_) + tf.add_input(desc, outputs_) end + tf.Tensor(tf.Operation(desc)) + end function elu_grad_eager(gradients_, outputs_; name=nothing) desc = tf.EagerOp("EluGrad") gradients_ = convert(tf.EagerTensor, gradients_) @@ -8094,75 +8094,75 @@ begin return res[1] end end - function elu_grad(gradients_, outputs_; name=nothing) - if tf.in_eager_mode() - elu_grad_eager(gradients_, outputs_; name=name) - else - elu_grad_graph(gradients_, outputs_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function elu_grad(gradients_, outputs_; name=nothing) + if tf.in_eager_mode() + elu_grad_eager(gradients_, outputs_; name=name) + else + elu_grad_graph(gradients_, outputs_; name=name) + end end - end end """ - cudnn_rnn_backprop(input, input_h, input_c, params, output, output_h, output_c, output_backprop, output_h_backprop, output_c_backprop, reserve_space; rnn_mode=lstm, input_mode=linear_input, direction=unidirectional, dropout=?, seed=0, seed2=0) + cudnn_rnn_backprop(input, input_h, input_c, params, output, output_h, output_c, output_backprop, output_h_backprop, output_c_backprop, reserve_space; rnn_mode=, input_mode=, direction=, dropout=?, seed=0, seed2=0) """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function cudnn_rnn_backprop_graph(input_, input_h_, input_c_, params_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) - local desc - tf.with_op_name(name, "CudnnRNNBackprop") do - desc = tf.NodeDescription("CudnnRNNBackprop") - input_ = convert(Tensor{Any}, input_) - input_h_ = convert(Tensor{Any}, input_h_) - input_c_ = convert(Tensor{Any}, input_c_) - params_ = convert(Tensor{Any}, params_) - output_ = convert(Tensor{Any}, output_) - output_h_ = convert(Tensor{Any}, output_h_) - output_c_ = convert(Tensor{Any}, output_c_) - output_backprop_ = convert(Tensor{Any}, output_backprop_) - output_h_backprop_ = convert(Tensor{Any}, output_h_backprop_) - output_c_backprop_ = convert(Tensor{Any}, output_c_backprop_) - reserve_space_ = convert(Tensor{Any}, reserve_space_) - (input_, input_h_, input_c_, params_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_) = tf.tf_promote(input_, input_h_, input_c_, params_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_) - tf.add_input(desc, input_) - tf.add_input(desc, input_h_) - tf.add_input(desc, input_c_) - tf.add_input(desc, params_) - tf.add_input(desc, output_) - tf.add_input(desc, output_h_) - tf.add_input(desc, output_c_) - tf.add_input(desc, output_backprop_) - tf.add_input(desc, output_h_backprop_) - tf.add_input(desc, output_c_backprop_) - tf.add_input(desc, reserve_space_) - if rnn_mode !== nothing - desc["rnn_mode"] = Base.String(rnn_mode) - end - if input_mode !== nothing - desc["input_mode"] = Base.String(input_mode) - end - if direction !== nothing - desc["direction"] = Base.String(direction) - end - if dropout !== nothing - desc["dropout"] = Base.identity(dropout) - end - if seed !== nothing - desc["seed"] = Base.Int(seed) - end - if seed2 !== nothing - desc["seed2"] = Base.Int(seed2) - end + function cudnn_rnn_backprop_graph(input_, input_h_, input_c_, params_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) + local desc + tf.with_op_name(name, "CudnnRNNBackprop") do + desc = tf.NodeDescription("CudnnRNNBackprop") + input_ = convert(Tensor{Any}, input_) + input_h_ = convert(Tensor{Any}, input_h_) + input_c_ = convert(Tensor{Any}, input_c_) + params_ = convert(Tensor{Any}, params_) + output_ = convert(Tensor{Any}, output_) + output_h_ = convert(Tensor{Any}, output_h_) + output_c_ = convert(Tensor{Any}, output_c_) + output_backprop_ = convert(Tensor{Any}, output_backprop_) + output_h_backprop_ = convert(Tensor{Any}, output_h_backprop_) + output_c_backprop_ = convert(Tensor{Any}, output_c_backprop_) + reserve_space_ = convert(Tensor{Any}, reserve_space_) + (input_, input_h_, input_c_, params_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_) = tf.tf_promote(input_, input_h_, input_c_, params_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_) + tf.add_input(desc, input_) + tf.add_input(desc, input_h_) + tf.add_input(desc, input_c_) + tf.add_input(desc, params_) + tf.add_input(desc, output_) + tf.add_input(desc, output_h_) + tf.add_input(desc, output_c_) + tf.add_input(desc, output_backprop_) + tf.add_input(desc, output_h_backprop_) + tf.add_input(desc, output_c_backprop_) + tf.add_input(desc, reserve_space_) + if rnn_mode !== nothing + desc["rnn_mode"] = Base.String(rnn_mode) + end + if input_mode !== nothing + desc["input_mode"] = Base.String(input_mode) end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:4 - push!(out, tf.Tensor(op, out_idx)) + if direction !== nothing + desc["direction"] = Base.String(direction) + end + if dropout !== nothing + desc["dropout"] = Base.identity(dropout) + end + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) end - out end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:4 + push!(out, tf.Tensor(op, out_idx)) + end + out + end function cudnn_rnn_backprop_eager(input_, input_h_, input_c_, params_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) desc = tf.EagerOp("CudnnRNNBackprop") input_ = convert(tf.EagerTensor, input_) @@ -8223,13 +8223,13 @@ begin return res end end - function cudnn_rnn_backprop(input_, input_h_, input_c_, params_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) - if tf.in_eager_mode() - cudnn_rnn_backprop_eager(input_, input_h_, input_c_, params_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_; name=name, rnn_mode=rnn_mode, input_mode=input_mode, direction=direction, dropout=dropout, seed=seed, seed2=seed2) - else - cudnn_rnn_backprop_graph(input_, input_h_, input_c_, params_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_; name=name, rnn_mode=rnn_mode, input_mode=input_mode, direction=direction, dropout=dropout, seed=seed, seed2=seed2) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function cudnn_rnn_backprop(input_, input_h_, input_c_, params_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) + if tf.in_eager_mode() + cudnn_rnn_backprop_eager(input_, input_h_, input_c_, params_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_; name=name, rnn_mode=rnn_mode, input_mode=input_mode, direction=direction, dropout=dropout, seed=seed, seed2=seed2) + else + cudnn_rnn_backprop_graph(input_, input_h_, input_c_, params_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_; name=name, rnn_mode=rnn_mode, input_mode=input_mode, direction=direction, dropout=dropout, seed=seed, seed2=seed2) + end end - end end @@ -8239,18 +8239,18 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function string_to_hash_bucket_fast_graph(input_; name=nothing, num_buckets=nothing) - local desc - tf.with_op_name(name, "StringToHashBucketFast") do - desc = tf.NodeDescription("StringToHashBucketFast") - input_ = convert(Tensor{String}, input_) - tf.add_input(desc, input_) - if num_buckets !== nothing - desc["num_buckets"] = Base.Int(num_buckets) - end + function string_to_hash_bucket_fast_graph(input_; name=nothing, num_buckets=nothing) + local desc + tf.with_op_name(name, "StringToHashBucketFast") do + desc = tf.NodeDescription("StringToHashBucketFast") + input_ = convert(Tensor{String}, input_) + tf.add_input(desc, input_) + if num_buckets !== nothing + desc["num_buckets"] = Base.Int(num_buckets) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function string_to_hash_bucket_fast_eager(input_; name=nothing, num_buckets=nothing) desc = tf.EagerOp("StringToHashBucketFast") input_ = convert(tf.EagerTensor, input_) @@ -8265,13 +8265,13 @@ begin return res[1] end end - function string_to_hash_bucket_fast(input_; name=nothing, num_buckets=nothing) - if tf.in_eager_mode() - string_to_hash_bucket_fast_eager(input_; name=name, num_buckets=num_buckets) - else - string_to_hash_bucket_fast_graph(input_; name=name, num_buckets=num_buckets) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function string_to_hash_bucket_fast(input_; name=nothing, num_buckets=nothing) + if tf.in_eager_mode() + string_to_hash_bucket_fast_eager(input_; name=name, num_buckets=num_buckets) + else + string_to_hash_bucket_fast_graph(input_; name=name, num_buckets=num_buckets) + end end - end end @@ -8281,28 +8281,28 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function mutable_hash_table_graph(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing) - local desc - tf.with_op_name(name, "MutableHashTable") do - desc = tf.NodeDescription("MutableHashTable") - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - if use_node_name_sharing !== nothing - desc["use_node_name_sharing"] = Base.Bool(use_node_name_sharing) - end - if key_dtype !== nothing - desc["key_dtype"] = Base.identity(key_dtype) - end - if value_dtype !== nothing - desc["value_dtype"] = Base.identity(value_dtype) - end + function mutable_hash_table_graph(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing) + local desc + tf.with_op_name(name, "MutableHashTable") do + desc = tf.NodeDescription("MutableHashTable") + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + if use_node_name_sharing !== nothing + desc["use_node_name_sharing"] = Base.Bool(use_node_name_sharing) + end + if key_dtype !== nothing + desc["key_dtype"] = Base.identity(key_dtype) + end + if value_dtype !== nothing + desc["value_dtype"] = Base.identity(value_dtype) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function mutable_hash_table_eager(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing) desc = tf.EagerOp("MutableHashTable") if container !== nothing @@ -8327,13 +8327,13 @@ begin return res[1] end end - function mutable_hash_table(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing) - if tf.in_eager_mode() - mutable_hash_table_eager(; name=name, container=container, shared_name=shared_name, use_node_name_sharing=use_node_name_sharing, key_dtype=key_dtype, value_dtype=value_dtype) - else - mutable_hash_table_graph(; name=name, container=container, shared_name=shared_name, use_node_name_sharing=use_node_name_sharing, key_dtype=key_dtype, value_dtype=value_dtype) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function mutable_hash_table(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing) + if tf.in_eager_mode() + mutable_hash_table_eager(; name=name, container=container, shared_name=shared_name, use_node_name_sharing=use_node_name_sharing, key_dtype=key_dtype, value_dtype=value_dtype) + else + mutable_hash_table_graph(; name=name, container=container, shared_name=shared_name, use_node_name_sharing=use_node_name_sharing, key_dtype=key_dtype, value_dtype=value_dtype) + end end - end end @@ -8343,16 +8343,16 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function relu_graph(features_; name=nothing) - local desc - tf.with_op_name(name, "Relu") do - desc = tf.NodeDescription("Relu") - features_ = convert(Tensor{Any}, features_) - (features_,) = tf.tf_promote(features_) - tf.add_input(desc, features_) - end - tf.Tensor(tf.Operation(desc)) + function relu_graph(features_; name=nothing) + local desc + tf.with_op_name(name, "Relu") do + desc = tf.NodeDescription("Relu") + features_ = convert(Tensor{Any}, features_) + (features_,) = tf.tf_promote(features_) + tf.add_input(desc, features_) end + tf.Tensor(tf.Operation(desc)) + end function relu_eager(features_; name=nothing) desc = tf.EagerOp("Relu") features_ = convert(tf.EagerTensor, features_) @@ -8365,13 +8365,13 @@ begin return res[1] end end - function relu(features_; name=nothing) - if tf.in_eager_mode() - relu_eager(features_; name=name) - else - relu_graph(features_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function relu(features_; name=nothing) + if tf.in_eager_mode() + relu_eager(features_; name=name) + else + relu_graph(features_; name=name) + end end - end end @@ -8381,21 +8381,21 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function nth_element_graph(input_, n_; name=nothing, reverse=nothing) - local desc - tf.with_op_name(name, "NthElement") do - desc = tf.NodeDescription("NthElement") - input_ = convert(Tensor{Any}, input_) - n_ = convert(Tensor{Int32}, n_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - tf.add_input(desc, n_) - if reverse !== nothing - desc["reverse"] = Base.Bool(reverse) - end + function nth_element_graph(input_, n_; name=nothing, reverse=nothing) + local desc + tf.with_op_name(name, "NthElement") do + desc = tf.NodeDescription("NthElement") + input_ = convert(Tensor{Any}, input_) + n_ = convert(Tensor{Int32}, n_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + tf.add_input(desc, n_) + if reverse !== nothing + desc["reverse"] = Base.Bool(reverse) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function nth_element_eager(input_, n_; name=nothing, reverse=nothing) desc = tf.EagerOp("NthElement") input_ = convert(tf.EagerTensor, input_) @@ -8413,13 +8413,13 @@ begin return res[1] end end - function nth_element(input_, n_; name=nothing, reverse=nothing) - if tf.in_eager_mode() - nth_element_eager(input_, n_; name=name, reverse=reverse) - else - nth_element_graph(input_, n_; name=name, reverse=reverse) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function nth_element(input_, n_; name=nothing, reverse=nothing) + if tf.in_eager_mode() + nth_element_eager(input_, n_; name=name, reverse=reverse) + else + nth_element_graph(input_, n_; name=name, reverse=reverse) + end end - end end @@ -8429,16 +8429,16 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function softsign_graph(features_; name=nothing) - local desc - tf.with_op_name(name, "Softsign") do - desc = tf.NodeDescription("Softsign") - features_ = convert(Tensor{Any}, features_) - (features_,) = tf.tf_promote(features_) - tf.add_input(desc, features_) - end - tf.Tensor(tf.Operation(desc)) + function softsign_graph(features_; name=nothing) + local desc + tf.with_op_name(name, "Softsign") do + desc = tf.NodeDescription("Softsign") + features_ = convert(Tensor{Any}, features_) + (features_,) = tf.tf_promote(features_) + tf.add_input(desc, features_) end + tf.Tensor(tf.Operation(desc)) + end function softsign_eager(features_; name=nothing) desc = tf.EagerOp("Softsign") features_ = convert(tf.EagerTensor, features_) @@ -8451,13 +8451,13 @@ begin return res[1] end end - function softsign(features_; name=nothing) - if tf.in_eager_mode() - softsign_eager(features_; name=name) - else - softsign_graph(features_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function softsign(features_; name=nothing) + if tf.in_eager_mode() + softsign_eager(features_; name=name) + else + softsign_graph(features_; name=name) + end end - end end @@ -8467,40 +8467,40 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function mutable_dense_hash_table_graph(empty_key_; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing, initial_num_buckets=nothing, max_load_factor=nothing) - local desc - tf.with_op_name(name, "MutableDenseHashTable") do - desc = tf.NodeDescription("MutableDenseHashTable") - empty_key_ = convert(Tensor{Any}, empty_key_) - (empty_key_,) = tf.tf_promote(empty_key_) - tf.add_input(desc, empty_key_) - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - if use_node_name_sharing !== nothing - desc["use_node_name_sharing"] = Base.Bool(use_node_name_sharing) - end - if key_dtype !== nothing - desc["key_dtype"] = Base.identity(key_dtype) - end - if value_dtype !== nothing - desc["value_dtype"] = Base.identity(value_dtype) - end - if value_shape !== nothing - desc["value_shape"] = Base.identity(value_shape) - end - if initial_num_buckets !== nothing - desc["initial_num_buckets"] = Base.Int(initial_num_buckets) - end - if max_load_factor !== nothing - desc["max_load_factor"] = Base.identity(max_load_factor) - end - end - tf.Tensor(tf.Operation(desc)) + function mutable_dense_hash_table_graph(empty_key_; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing, initial_num_buckets=nothing, max_load_factor=nothing) + local desc + tf.with_op_name(name, "MutableDenseHashTable") do + desc = tf.NodeDescription("MutableDenseHashTable") + empty_key_ = convert(Tensor{Any}, empty_key_) + (empty_key_,) = tf.tf_promote(empty_key_) + tf.add_input(desc, empty_key_) + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + if use_node_name_sharing !== nothing + desc["use_node_name_sharing"] = Base.Bool(use_node_name_sharing) + end + if key_dtype !== nothing + desc["key_dtype"] = Base.identity(key_dtype) + end + if value_dtype !== nothing + desc["value_dtype"] = Base.identity(value_dtype) + end + if value_shape !== nothing + desc["value_shape"] = Base.identity(value_shape) + end + if initial_num_buckets !== nothing + desc["initial_num_buckets"] = Base.Int(initial_num_buckets) + end + if max_load_factor !== nothing + desc["max_load_factor"] = Base.identity(max_load_factor) + end end + tf.Tensor(tf.Operation(desc)) + end function mutable_dense_hash_table_eager(empty_key_; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing, initial_num_buckets=nothing, max_load_factor=nothing) desc = tf.EagerOp("MutableDenseHashTable") empty_key_ = convert(tf.EagerTensor, empty_key_) @@ -8537,13 +8537,13 @@ begin return res[1] end end - function mutable_dense_hash_table(empty_key_; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing, initial_num_buckets=nothing, max_load_factor=nothing) - if tf.in_eager_mode() - mutable_dense_hash_table_eager(empty_key_; name=name, container=container, shared_name=shared_name, use_node_name_sharing=use_node_name_sharing, key_dtype=key_dtype, value_dtype=value_dtype, value_shape=value_shape, initial_num_buckets=initial_num_buckets, max_load_factor=max_load_factor) - else - mutable_dense_hash_table_graph(empty_key_; name=name, container=container, shared_name=shared_name, use_node_name_sharing=use_node_name_sharing, key_dtype=key_dtype, value_dtype=value_dtype, value_shape=value_shape, initial_num_buckets=initial_num_buckets, max_load_factor=max_load_factor) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function mutable_dense_hash_table(empty_key_; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing, initial_num_buckets=nothing, max_load_factor=nothing) + if tf.in_eager_mode() + mutable_dense_hash_table_eager(empty_key_; name=name, container=container, shared_name=shared_name, use_node_name_sharing=use_node_name_sharing, key_dtype=key_dtype, value_dtype=value_dtype, value_shape=value_shape, initial_num_buckets=initial_num_buckets, max_load_factor=max_load_factor) + else + mutable_dense_hash_table_graph(empty_key_; name=name, container=container, shared_name=shared_name, use_node_name_sharing=use_node_name_sharing, key_dtype=key_dtype, value_dtype=value_dtype, value_shape=value_shape, initial_num_buckets=initial_num_buckets, max_load_factor=max_load_factor) + end end - end end @@ -8553,14 +8553,14 @@ end An op that shuts down a running distributed TPU system. The Op returns """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function _shutdown_distributed_tpu_graph(; name=nothing) - local desc - tf.with_op_name(name, "_ShutdownDistributedTPU") do - desc - tf.NodeDescription("_ShutdownDistributedTPU") - end - tf.Tensor(tf.Operation(desc)) + function _shutdown_distributed_tpu_graph(; name=nothing) + local desc + tf.with_op_name(name, "_ShutdownDistributedTPU") do + desc + tf.NodeDescription("_ShutdownDistributedTPU") end + tf.Tensor(tf.Operation(desc)) + end function _shutdown_distributed_tpu_eager(; name=nothing) desc = tf.EagerOp("_ShutdownDistributedTPU") res = tf.execute(desc) @@ -8570,13 +8570,13 @@ begin return res[1] end end - function _shutdown_distributed_tpu(; name=nothing) - if tf.in_eager_mode() - _shutdown_distributed_tpu_eager(; name=name) - else - _shutdown_distributed_tpu_graph(; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _shutdown_distributed_tpu(; name=nothing) + if tf.in_eager_mode() + _shutdown_distributed_tpu_eager(; name=name) + else + _shutdown_distributed_tpu_graph(; name=name) + end end - end end @@ -8586,18 +8586,18 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function polygamma_graph(a_, x_; name=nothing) - local desc - tf.with_op_name(name, "Polygamma") do - desc = tf.NodeDescription("Polygamma") - a_ = convert(Tensor{Any}, a_) - x_ = convert(Tensor{Any}, x_) - (a_, x_) = tf.tf_promote(a_, x_) - tf.add_input(desc, a_) - tf.add_input(desc, x_) - end - tf.Tensor(tf.Operation(desc)) + function polygamma_graph(a_, x_; name=nothing) + local desc + tf.with_op_name(name, "Polygamma") do + desc = tf.NodeDescription("Polygamma") + a_ = convert(Tensor{Any}, a_) + x_ = convert(Tensor{Any}, x_) + (a_, x_) = tf.tf_promote(a_, x_) + tf.add_input(desc, a_) + tf.add_input(desc, x_) end + tf.Tensor(tf.Operation(desc)) + end function polygamma_eager(a_, x_; name=nothing) desc = tf.EagerOp("Polygamma") a_ = convert(tf.EagerTensor, a_) @@ -8613,13 +8613,13 @@ begin return res[1] end end - function polygamma(a_, x_; name=nothing) - if tf.in_eager_mode() - polygamma_eager(a_, x_; name=name) - else - polygamma_graph(a_, x_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function polygamma(a_, x_; name=nothing) + if tf.in_eager_mode() + polygamma_eager(a_, x_; name=name) + else + polygamma_graph(a_, x_; name=name) + end end - end end @@ -8629,22 +8629,22 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function nccl_reduce_graph(input_; name=nothing, reduction=nothing, num_devices=nothing) - local desc - tf.with_op_name(name, "NcclReduce") do - desc = tf.NodeDescription("NcclReduce") - input_ = [convert(Tensor{Any}, x) for x = input_] - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - if reduction !== nothing - desc["reduction"] = Base.String(reduction) - end - if num_devices !== nothing - desc["num_devices"] = Base.Int(num_devices) - end + function nccl_reduce_graph(input_; name=nothing, reduction=nothing, num_devices=nothing) + local desc + tf.with_op_name(name, "NcclReduce") do + desc = tf.NodeDescription("NcclReduce") + input_ = [convert(Tensor{Any}, x) for x = input_] + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + if reduction !== nothing + desc["reduction"] = Base.String(reduction) + end + if num_devices !== nothing + desc["num_devices"] = Base.Int(num_devices) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function nccl_reduce_eager(input_; name=nothing, reduction=nothing, num_devices=nothing) desc = tf.EagerOp("NcclReduce") input_ = convert(tf.EagerTensor, input_) @@ -8663,13 +8663,13 @@ begin return res[1] end end - function nccl_reduce(input_; name=nothing, reduction=nothing, num_devices=nothing) - if tf.in_eager_mode() - nccl_reduce_eager(input_; name=name, reduction=reduction, num_devices=num_devices) - else - nccl_reduce_graph(input_; name=name, reduction=reduction, num_devices=num_devices) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function nccl_reduce(input_; name=nothing, reduction=nothing, num_devices=nothing) + if tf.in_eager_mode() + nccl_reduce_eager(input_; name=name, reduction=reduction, num_devices=num_devices) + else + nccl_reduce_graph(input_; name=name, reduction=reduction, num_devices=num_devices) + end end - end end @@ -8679,23 +8679,23 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function arg_max_graph(input_, dimension_; name=nothing, output_type=nothing) - local desc - tf.with_op_name(name, "ArgMax") do - desc = tf.NodeDescription("ArgMax") - input_ = convert(Tensor{Any}, input_) - dimension_ = convert(Tensor{Int32}, dimension_) - dimension_ = dimension_ - convert(tf.Tensor{eltype(dimension_)}, 1) - (input_,) = tf.tf_promote(input_) - (dimension_,) = tf.tf_promote(dimension_) - tf.add_input(desc, input_) - tf.add_input(desc, dimension_) - if output_type !== nothing - desc["output_type"] = Base.identity(output_type) - end + function arg_max_graph(input_, dimension_; name=nothing, output_type=nothing) + local desc + tf.with_op_name(name, "ArgMax") do + desc = tf.NodeDescription("ArgMax") + input_ = convert(Tensor{Any}, input_) + dimension_ = convert(Tensor{Int32}, dimension_) + dimension_ = dimension_ - convert(tf.Tensor{eltype(dimension_)}, 1) + (input_,) = tf.tf_promote(input_) + (dimension_,) = tf.tf_promote(dimension_) + tf.add_input(desc, input_) + tf.add_input(desc, dimension_) + if output_type !== nothing + desc["output_type"] = Base.identity(output_type) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function arg_max_eager(input_, dimension_; name=nothing, output_type=nothing) desc = tf.EagerOp("ArgMax") input_ = convert(tf.EagerTensor, input_) @@ -8714,13 +8714,13 @@ begin return res[1] end end - function arg_max(input_, dimension_; name=nothing, output_type=nothing) - if tf.in_eager_mode() - arg_max_eager(input_, dimension_; name=name, output_type=output_type) - else - arg_max_graph(input_, dimension_; name=name, output_type=output_type) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function arg_max(input_, dimension_; name=nothing, output_type=nothing) + if tf.in_eager_mode() + arg_max_eager(input_, dimension_; name=name, output_type=output_type) + else + arg_max_graph(input_, dimension_; name=name, output_type=output_type) + end end - end end @@ -8730,18 +8730,18 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function matrix_set_diag_graph(input_, diagonal_; name=nothing) - local desc - tf.with_op_name(name, "MatrixSetDiag") do - desc = tf.NodeDescription("MatrixSetDiag") - input_ = convert(Tensor{Any}, input_) - diagonal_ = convert(Tensor{Any}, diagonal_) - (input_, diagonal_) = tf.tf_promote(input_, diagonal_) - tf.add_input(desc, input_) - tf.add_input(desc, diagonal_) - end - tf.Tensor(tf.Operation(desc)) + function matrix_set_diag_graph(input_, diagonal_; name=nothing) + local desc + tf.with_op_name(name, "MatrixSetDiag") do + desc = tf.NodeDescription("MatrixSetDiag") + input_ = convert(Tensor{Any}, input_) + diagonal_ = convert(Tensor{Any}, diagonal_) + (input_, diagonal_) = tf.tf_promote(input_, diagonal_) + tf.add_input(desc, input_) + tf.add_input(desc, diagonal_) end + tf.Tensor(tf.Operation(desc)) + end function matrix_set_diag_eager(input_, diagonal_; name=nothing) desc = tf.EagerOp("MatrixSetDiag") input_ = convert(tf.EagerTensor, input_) @@ -8757,13 +8757,13 @@ begin return res[1] end end - function matrix_set_diag(input_, diagonal_; name=nothing) - if tf.in_eager_mode() - matrix_set_diag_eager(input_, diagonal_; name=name) - else - matrix_set_diag_graph(input_, diagonal_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function matrix_set_diag(input_, diagonal_; name=nothing) + if tf.in_eager_mode() + matrix_set_diag_eager(input_, diagonal_; name=name) + else + matrix_set_diag_graph(input_, diagonal_; name=name) + end end - end end @@ -8773,22 +8773,22 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function space_to_batch_nd_graph(input_, block_shape_, paddings_; name=nothing) - local desc - tf.with_op_name(name, "SpaceToBatchND") do - desc = tf.NodeDescription("SpaceToBatchND") - input_ = convert(Tensor{Any}, input_) - block_shape_ = convert(Tensor{Int32}, block_shape_) - paddings_ = convert(Tensor{Int32}, paddings_) - (input_,) = tf.tf_promote(input_) - (paddings_,) = tf.tf_promote(paddings_) - (block_shape_,) = tf.tf_promote(block_shape_) - tf.add_input(desc, input_) - tf.add_input(desc, block_shape_) - tf.add_input(desc, paddings_) - end - tf.Tensor(tf.Operation(desc)) + function space_to_batch_nd_graph(input_, block_shape_, paddings_; name=nothing) + local desc + tf.with_op_name(name, "SpaceToBatchND") do + desc = tf.NodeDescription("SpaceToBatchND") + input_ = convert(Tensor{Any}, input_) + block_shape_ = convert(Tensor{Int32}, block_shape_) + paddings_ = convert(Tensor{Int32}, paddings_) + (input_,) = tf.tf_promote(input_) + (paddings_,) = tf.tf_promote(paddings_) + (block_shape_,) = tf.tf_promote(block_shape_) + tf.add_input(desc, input_) + tf.add_input(desc, block_shape_) + tf.add_input(desc, paddings_) end + tf.Tensor(tf.Operation(desc)) + end function space_to_batch_nd_eager(input_, block_shape_, paddings_; name=nothing) desc = tf.EagerOp("SpaceToBatchND") input_ = convert(tf.EagerTensor, input_) @@ -8807,13 +8807,13 @@ begin return res[1] end end - function space_to_batch_nd(input_, block_shape_, paddings_; name=nothing) - if tf.in_eager_mode() - space_to_batch_nd_eager(input_, block_shape_, paddings_; name=name) - else - space_to_batch_nd_graph(input_, block_shape_, paddings_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function space_to_batch_nd(input_, block_shape_, paddings_; name=nothing) + if tf.in_eager_mode() + space_to_batch_nd_eager(input_, block_shape_, paddings_; name=name) + else + space_to_batch_nd_graph(input_, block_shape_, paddings_; name=name) + end end - end end @@ -8823,24 +8823,24 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function sparse_reshape_graph(input_indices_, input_shape_, new_shape_; name=nothing) - local desc - tf.with_op_name(name, "SparseReshape") do - desc = tf.NodeDescription("SparseReshape") - input_indices_ = convert(Tensor{Int64}, input_indices_) - input_shape_ = convert(Tensor{Int64}, input_shape_) - new_shape_ = convert(Tensor{Int64}, new_shape_) - tf.add_input(desc, input_indices_) - tf.add_input(desc, input_shape_) - tf.add_input(desc, new_shape_) - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:2 - push!(out, tf.Tensor(op, out_idx)) - end - out + function sparse_reshape_graph(input_indices_, input_shape_, new_shape_; name=nothing) + local desc + tf.with_op_name(name, "SparseReshape") do + desc = tf.NodeDescription("SparseReshape") + input_indices_ = convert(Tensor{Int64}, input_indices_) + input_shape_ = convert(Tensor{Int64}, input_shape_) + new_shape_ = convert(Tensor{Int64}, new_shape_) + tf.add_input(desc, input_indices_) + tf.add_input(desc, input_shape_) + tf.add_input(desc, new_shape_) + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) end + out + end function sparse_reshape_eager(input_indices_, input_shape_, new_shape_; name=nothing) desc = tf.EagerOp("SparseReshape") input_indices_ = convert(tf.EagerTensor, input_indices_) @@ -8856,13 +8856,13 @@ begin return res end end - function sparse_reshape(input_indices_, input_shape_, new_shape_; name=nothing) - if tf.in_eager_mode() - sparse_reshape_eager(input_indices_, input_shape_, new_shape_; name=name) - else - sparse_reshape_graph(input_indices_, input_shape_, new_shape_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_reshape(input_indices_, input_shape_, new_shape_; name=nothing) + if tf.in_eager_mode() + sparse_reshape_eager(input_indices_, input_shape_, new_shape_; name=name) + else + sparse_reshape_graph(input_indices_, input_shape_, new_shape_; name=name) + end end - end end @@ -8872,23 +8872,23 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function optimize_dataset_graph(input_dataset_, optimizations_; name=nothing, output_types=nothing, output_shapes=nothing) - local desc - tf.with_op_name(name, "OptimizeDataset") do - desc = tf.NodeDescription("OptimizeDataset") - input_dataset_ = convert(Tensor{Any}, input_dataset_) - optimizations_ = convert(Tensor{String}, optimizations_) - tf.add_input(desc, input_dataset_) - tf.add_input(desc, optimizations_) - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end + function optimize_dataset_graph(input_dataset_, optimizations_; name=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "OptimizeDataset") do + desc = tf.NodeDescription("OptimizeDataset") + input_dataset_ = convert(Tensor{Any}, input_dataset_) + optimizations_ = convert(Tensor{String}, optimizations_) + tf.add_input(desc, input_dataset_) + tf.add_input(desc, optimizations_) + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function optimize_dataset_eager(input_dataset_, optimizations_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("OptimizeDataset") input_dataset_ = convert(tf.EagerTensor, input_dataset_) @@ -8908,13 +8908,13 @@ begin return res[1] end end - function optimize_dataset(input_dataset_, optimizations_; name=nothing, output_types=nothing, output_shapes=nothing) - if tf.in_eager_mode() - optimize_dataset_eager(input_dataset_, optimizations_; name=name, output_types=output_types, output_shapes=output_shapes) - else - optimize_dataset_graph(input_dataset_, optimizations_; name=name, output_types=output_types, output_shapes=output_shapes) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function optimize_dataset(input_dataset_, optimizations_; name=nothing, output_types=nothing, output_shapes=nothing) + if tf.in_eager_mode() + optimize_dataset_eager(input_dataset_, optimizations_; name=name, output_types=output_types, output_shapes=output_shapes) + else + optimize_dataset_graph(input_dataset_, optimizations_; name=name, output_types=output_types, output_shapes=output_shapes) + end end - end end @@ -8924,23 +8924,23 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function concat_v2_graph(values_, axis_; name=nothing, N=nothing) - local desc - tf.with_op_name(name, "ConcatV2") do - desc = tf.NodeDescription("ConcatV2") - values_ = [convert(Tensor{Any}, x) for x = values_] - axis_ = convert(Tensor{Int32}, axis_) - axis_ = axis_ - convert(tf.Tensor{eltype(axis_)}, 1) - (values_,) = tf.tf_promote(values_) - (axis_,) = tf.tf_promote(axis_) - tf.add_input(desc, values_) - tf.add_input(desc, axis_) - if N !== nothing - desc["N"] = Base.Int(N) - end + function concat_v2_graph(values_, axis_; name=nothing, N=nothing) + local desc + tf.with_op_name(name, "ConcatV2") do + desc = tf.NodeDescription("ConcatV2") + values_ = [convert(Tensor{Any}, x) for x = values_] + axis_ = convert(Tensor{Int32}, axis_) + axis_ = axis_ - convert(tf.Tensor{eltype(axis_)}, 1) + (values_,) = tf.tf_promote(values_) + (axis_,) = tf.tf_promote(axis_) + tf.add_input(desc, values_) + tf.add_input(desc, axis_) + if N !== nothing + desc["N"] = Base.Int(N) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function concat_v2_eager(values_, axis_; name=nothing, N=nothing) desc = tf.EagerOp("ConcatV2") values_ = convert(tf.EagerTensor, values_) @@ -8959,13 +8959,13 @@ begin return res[1] end end - function concat_v2(values_, axis_; name=nothing, N=nothing) - if tf.in_eager_mode() - concat_v2_eager(values_, axis_; name=name, N=N) - else - concat_v2_graph(values_, axis_; name=name, N=N) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function concat_v2(values_, axis_; name=nothing, N=nothing) + if tf.in_eager_mode() + concat_v2_eager(values_, axis_; name=name, N=N) + else + concat_v2_graph(values_, axis_; name=name, N=N) + end end - end end @@ -8975,35 +8975,35 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function resource_sparse_apply_adadelta_graph(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) - local desc - tf.with_op_name(name, "ResourceSparseApplyAdadelta") do - desc = tf.NodeDescription("ResourceSparseApplyAdadelta") - var_ = convert(Tensor{Any}, var_) - accum_ = convert(Tensor{Any}, accum_) - accum_update_ = convert(Tensor{Any}, accum_update_) - lr_ = convert(Tensor{Any}, lr_) - rho_ = convert(Tensor{Any}, rho_) - epsilon_ = convert(Tensor{Any}, epsilon_) - grad_ = convert(Tensor{Any}, grad_) - indices_ = convert(Tensor{Any}, indices_) - indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) - (lr_, rho_, epsilon_, grad_) = tf.tf_promote(lr_, rho_, epsilon_, grad_) - (indices_,) = tf.tf_promote(indices_) - tf.add_input(desc, var_) - tf.add_input(desc, accum_) - tf.add_input(desc, accum_update_) - tf.add_input(desc, lr_) - tf.add_input(desc, rho_) - tf.add_input(desc, epsilon_) - tf.add_input(desc, grad_) - tf.add_input(desc, indices_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - end - tf.Tensor(tf.Operation(desc)) - end + function resource_sparse_apply_adadelta_graph(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "ResourceSparseApplyAdadelta") do + desc = tf.NodeDescription("ResourceSparseApplyAdadelta") + var_ = convert(Tensor{Any}, var_) + accum_ = convert(Tensor{Any}, accum_) + accum_update_ = convert(Tensor{Any}, accum_update_) + lr_ = convert(Tensor{Any}, lr_) + rho_ = convert(Tensor{Any}, rho_) + epsilon_ = convert(Tensor{Any}, epsilon_) + grad_ = convert(Tensor{Any}, grad_) + indices_ = convert(Tensor{Any}, indices_) + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + (lr_, rho_, epsilon_, grad_) = tf.tf_promote(lr_, rho_, epsilon_, grad_) + (indices_,) = tf.tf_promote(indices_) + tf.add_input(desc, var_) + tf.add_input(desc, accum_) + tf.add_input(desc, accum_update_) + tf.add_input(desc, lr_) + tf.add_input(desc, rho_) + tf.add_input(desc, epsilon_) + tf.add_input(desc, grad_) + tf.add_input(desc, indices_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + tf.Tensor(tf.Operation(desc)) + end function resource_sparse_apply_adadelta_eager(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ResourceSparseApplyAdadelta") var_ = convert(tf.EagerTensor, var_) @@ -9037,13 +9037,13 @@ begin return res[1] end end - function resource_sparse_apply_adadelta(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) - if tf.in_eager_mode() - resource_sparse_apply_adadelta_eager(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_, indices_; name=name, use_locking=use_locking) - else - resource_sparse_apply_adadelta_graph(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_, indices_; name=name, use_locking=use_locking) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_sparse_apply_adadelta(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) + if tf.in_eager_mode() + resource_sparse_apply_adadelta_eager(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_, indices_; name=name, use_locking=use_locking) + else + resource_sparse_apply_adadelta_graph(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_, indices_; name=name, use_locking=use_locking) + end end - end end @@ -9053,19 +9053,19 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tile_graph(input_, multiples_; name=nothing) - local desc - tf.with_op_name(name, "Tile") do - desc = tf.NodeDescription("Tile") - input_ = convert(Tensor{Any}, input_) - multiples_ = convert(Tensor{Int32}, multiples_) - (input_,) = tf.tf_promote(input_) - (multiples_,) = tf.tf_promote(multiples_) - tf.add_input(desc, input_) - tf.add_input(desc, multiples_) - end - tf.Tensor(tf.Operation(desc)) + function tile_graph(input_, multiples_; name=nothing) + local desc + tf.with_op_name(name, "Tile") do + desc = tf.NodeDescription("Tile") + input_ = convert(Tensor{Any}, input_) + multiples_ = convert(Tensor{Int32}, multiples_) + (input_,) = tf.tf_promote(input_) + (multiples_,) = tf.tf_promote(multiples_) + tf.add_input(desc, input_) + tf.add_input(desc, multiples_) end + tf.Tensor(tf.Operation(desc)) + end function tile_eager(input_, multiples_; name=nothing) desc = tf.EagerOp("Tile") input_ = convert(tf.EagerTensor, input_) @@ -9081,13 +9081,13 @@ begin return res[1] end end - function tile(input_, multiples_; name=nothing) - if tf.in_eager_mode() - tile_eager(input_, multiples_; name=name) - else - tile_graph(input_, multiples_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tile(input_, multiples_; name=nothing) + if tf.in_eager_mode() + tile_eager(input_, multiples_; name=name) + else + tile_graph(input_, multiples_; name=name) + end end - end end @@ -9097,19 +9097,19 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function mutex_v2_graph(; name=nothing, container=nothing, shared_name=nothing) - local desc - tf.with_op_name(name, "MutexV2") do - desc = tf.NodeDescription("MutexV2") - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end + function mutex_v2_graph(; name=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "MutexV2") do + desc = tf.NodeDescription("MutexV2") + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function mutex_v2_eager(; name=nothing, container=nothing, shared_name=nothing) desc = tf.EagerOp("MutexV2") if container !== nothing @@ -9125,13 +9125,13 @@ begin return res[1] end end - function mutex_v2(; name=nothing, container=nothing, shared_name=nothing) - if tf.in_eager_mode() - mutex_v2_eager(; name=name, container=container, shared_name=shared_name) - else - mutex_v2_graph(; name=name, container=container, shared_name=shared_name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function mutex_v2(; name=nothing, container=nothing, shared_name=nothing) + if tf.in_eager_mode() + mutex_v2_eager(; name=name, container=container, shared_name=shared_name) + else + mutex_v2_graph(; name=name, container=container, shared_name=shared_name) + end end - end end @@ -9141,23 +9141,23 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function serialize_many_sparse_graph(sparse_indices_, sparse_values_, sparse_shape_; name=nothing, out_type=nothing) - local desc - tf.with_op_name(name, "SerializeManySparse") do - desc = tf.NodeDescription("SerializeManySparse") - sparse_indices_ = convert(Tensor{Int64}, sparse_indices_) - sparse_values_ = convert(Tensor{Any}, sparse_values_) - sparse_shape_ = convert(Tensor{Int64}, sparse_shape_) - (sparse_values_,) = tf.tf_promote(sparse_values_) - tf.add_input(desc, sparse_indices_) - tf.add_input(desc, sparse_values_) - tf.add_input(desc, sparse_shape_) - if out_type !== nothing - desc["out_type"] = Base.identity(out_type) - end + function serialize_many_sparse_graph(sparse_indices_, sparse_values_, sparse_shape_; name=nothing, out_type=nothing) + local desc + tf.with_op_name(name, "SerializeManySparse") do + desc = tf.NodeDescription("SerializeManySparse") + sparse_indices_ = convert(Tensor{Int64}, sparse_indices_) + sparse_values_ = convert(Tensor{Any}, sparse_values_) + sparse_shape_ = convert(Tensor{Int64}, sparse_shape_) + (sparse_values_,) = tf.tf_promote(sparse_values_) + tf.add_input(desc, sparse_indices_) + tf.add_input(desc, sparse_values_) + tf.add_input(desc, sparse_shape_) + if out_type !== nothing + desc["out_type"] = Base.identity(out_type) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function serialize_many_sparse_eager(sparse_indices_, sparse_values_, sparse_shape_; name=nothing, out_type=nothing) desc = tf.EagerOp("SerializeManySparse") sparse_indices_ = convert(tf.EagerTensor, sparse_indices_) @@ -9177,13 +9177,13 @@ begin return res[1] end end - function serialize_many_sparse(sparse_indices_, sparse_values_, sparse_shape_; name=nothing, out_type=nothing) - if tf.in_eager_mode() - serialize_many_sparse_eager(sparse_indices_, sparse_values_, sparse_shape_; name=name, out_type=out_type) - else - serialize_many_sparse_graph(sparse_indices_, sparse_values_, sparse_shape_; name=name, out_type=out_type) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function serialize_many_sparse(sparse_indices_, sparse_values_, sparse_shape_; name=nothing, out_type=nothing) + if tf.in_eager_mode() + serialize_many_sparse_eager(sparse_indices_, sparse_values_, sparse_shape_; name=name, out_type=out_type) + else + serialize_many_sparse_graph(sparse_indices_, sparse_values_, sparse_shape_; name=name, out_type=out_type) + end end - end end @@ -9193,23 +9193,23 @@ end An op enabling differentiation of TPU Embeddings. """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tpu_embedding_activations_graph(embedding_variable_, sliced_activations_; name=nothing, table_id=nothing, lookup_id=nothing) - local desc - tf.with_op_name(name, "TPUEmbeddingActivations") do - desc = tf.NodeDescription("TPUEmbeddingActivations") - embedding_variable_ = convert(Tensor{Float32}, embedding_variable_) - sliced_activations_ = convert(Tensor{Float32}, sliced_activations_) - tf.add_input(desc, embedding_variable_) - tf.add_input(desc, sliced_activations_) - if table_id !== nothing - desc["table_id"] = Base.Int(table_id) - end - if lookup_id !== nothing - desc["lookup_id"] = Base.Int(lookup_id) - end + function tpu_embedding_activations_graph(embedding_variable_, sliced_activations_; name=nothing, table_id=nothing, lookup_id=nothing) + local desc + tf.with_op_name(name, "TPUEmbeddingActivations") do + desc = tf.NodeDescription("TPUEmbeddingActivations") + embedding_variable_ = convert(Tensor{Float32}, embedding_variable_) + sliced_activations_ = convert(Tensor{Float32}, sliced_activations_) + tf.add_input(desc, embedding_variable_) + tf.add_input(desc, sliced_activations_) + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + if lookup_id !== nothing + desc["lookup_id"] = Base.Int(lookup_id) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function tpu_embedding_activations_eager(embedding_variable_, sliced_activations_; name=nothing, table_id=nothing, lookup_id=nothing) desc = tf.EagerOp("TPUEmbeddingActivations") embedding_variable_ = convert(tf.EagerTensor, embedding_variable_) @@ -9229,13 +9229,13 @@ begin return res[1] end end - function tpu_embedding_activations(embedding_variable_, sliced_activations_; name=nothing, table_id=nothing, lookup_id=nothing) - if tf.in_eager_mode() - tpu_embedding_activations_eager(embedding_variable_, sliced_activations_; name=name, table_id=table_id, lookup_id=lookup_id) - else - tpu_embedding_activations_graph(embedding_variable_, sliced_activations_; name=name, table_id=table_id, lookup_id=lookup_id) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tpu_embedding_activations(embedding_variable_, sliced_activations_; name=nothing, table_id=nothing, lookup_id=nothing) + if tf.in_eager_mode() + tpu_embedding_activations_eager(embedding_variable_, sliced_activations_; name=name, table_id=table_id, lookup_id=lookup_id) + else + tpu_embedding_activations_graph(embedding_variable_, sliced_activations_; name=name, table_id=table_id, lookup_id=lookup_id) + end end - end end @@ -9245,23 +9245,23 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function batch_matrix_solve_ls_graph(matrix_, rhs_, l2_regularizer_; name=nothing, fast=nothing) - local desc - tf.with_op_name(name, "BatchMatrixSolveLs") do - desc = tf.NodeDescription("BatchMatrixSolveLs") - matrix_ = convert(Tensor{Any}, matrix_) - rhs_ = convert(Tensor{Any}, rhs_) - l2_regularizer_ = convert(Tensor{Float64}, l2_regularizer_) - (matrix_, rhs_) = tf.tf_promote(matrix_, rhs_) - tf.add_input(desc, matrix_) - tf.add_input(desc, rhs_) - tf.add_input(desc, l2_regularizer_) - if fast !== nothing - desc["fast"] = Base.Bool(fast) - end + function batch_matrix_solve_ls_graph(matrix_, rhs_, l2_regularizer_; name=nothing, fast=nothing) + local desc + tf.with_op_name(name, "BatchMatrixSolveLs") do + desc = tf.NodeDescription("BatchMatrixSolveLs") + matrix_ = convert(Tensor{Any}, matrix_) + rhs_ = convert(Tensor{Any}, rhs_) + l2_regularizer_ = convert(Tensor{Float64}, l2_regularizer_) + (matrix_, rhs_) = tf.tf_promote(matrix_, rhs_) + tf.add_input(desc, matrix_) + tf.add_input(desc, rhs_) + tf.add_input(desc, l2_regularizer_) + if fast !== nothing + desc["fast"] = Base.Bool(fast) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function batch_matrix_solve_ls_eager(matrix_, rhs_, l2_regularizer_; name=nothing, fast=nothing) desc = tf.EagerOp("BatchMatrixSolveLs") matrix_ = convert(tf.EagerTensor, matrix_) @@ -9282,13 +9282,13 @@ begin return res[1] end end - function batch_matrix_solve_ls(matrix_, rhs_, l2_regularizer_; name=nothing, fast=nothing) - if tf.in_eager_mode() - batch_matrix_solve_ls_eager(matrix_, rhs_, l2_regularizer_; name=name, fast=fast) - else - batch_matrix_solve_ls_graph(matrix_, rhs_, l2_regularizer_; name=name, fast=fast) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function batch_matrix_solve_ls(matrix_, rhs_, l2_regularizer_; name=nothing, fast=nothing) + if tf.in_eager_mode() + batch_matrix_solve_ls_eager(matrix_, rhs_, l2_regularizer_; name=name, fast=fast) + else + batch_matrix_solve_ls_graph(matrix_, rhs_, l2_regularizer_; name=name, fast=fast) + end end - end end @@ -9298,18 +9298,18 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function not_equal_graph(x_, y_; name=nothing) - local desc - tf.with_op_name(name, "NotEqual") do - desc = tf.NodeDescription("NotEqual") - x_ = convert(Tensor{Any}, x_) - y_ = convert(Tensor{Any}, y_) - (x_, y_) = tf.tf_promote(x_, y_) - tf.add_input(desc, x_) - tf.add_input(desc, y_) - end - tf.Tensor(tf.Operation(desc)) + function not_equal_graph(x_, y_; name=nothing) + local desc + tf.with_op_name(name, "NotEqual") do + desc = tf.NodeDescription("NotEqual") + x_ = convert(Tensor{Any}, x_) + y_ = convert(Tensor{Any}, y_) + (x_, y_) = tf.tf_promote(x_, y_) + tf.add_input(desc, x_) + tf.add_input(desc, y_) end + tf.Tensor(tf.Operation(desc)) + end function not_equal_eager(x_, y_; name=nothing) desc = tf.EagerOp("NotEqual") x_ = convert(tf.EagerTensor, x_) @@ -9325,13 +9325,13 @@ begin return res[1] end end - function not_equal(x_, y_; name=nothing) - if tf.in_eager_mode() - not_equal_eager(x_, y_; name=name) - else - not_equal_graph(x_, y_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function not_equal(x_, y_; name=nothing) + if tf.in_eager_mode() + not_equal_eager(x_, y_; name=name) + else + not_equal_graph(x_, y_; name=name) + end end - end end @@ -9341,16 +9341,16 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function lgamma_graph(x_; name=nothing) - local desc - tf.with_op_name(name, "Lgamma") do - desc = tf.NodeDescription("Lgamma") - x_ = convert(Tensor{Any}, x_) - (x_,) = tf.tf_promote(x_) - tf.add_input(desc, x_) - end - tf.Tensor(tf.Operation(desc)) + function lgamma_graph(x_; name=nothing) + local desc + tf.with_op_name(name, "Lgamma") do + desc = tf.NodeDescription("Lgamma") + x_ = convert(Tensor{Any}, x_) + (x_,) = tf.tf_promote(x_) + tf.add_input(desc, x_) end + tf.Tensor(tf.Operation(desc)) + end function lgamma_eager(x_; name=nothing) desc = tf.EagerOp("Lgamma") x_ = convert(tf.EagerTensor, x_) @@ -9363,13 +9363,13 @@ begin return res[1] end end - function lgamma(x_; name=nothing) - if tf.in_eager_mode() - lgamma_eager(x_; name=name) - else - lgamma_graph(x_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function lgamma(x_; name=nothing) + if tf.in_eager_mode() + lgamma_eager(x_; name=name) + else + lgamma_graph(x_; name=name) + end end - end end @@ -9379,34 +9379,34 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tpu_replicate_metadata_graph(; name=nothing, num_replicas=nothing, num_cores_per_replica=nothing, topology=nothing, use_tpu=nothing, device_assignment=nothing, computation_shape=nothing, host_compute_core=nothing) - local desc - tf.with_op_name(name, "TPUReplicateMetadata") do - desc = tf.NodeDescription("TPUReplicateMetadata") - if num_replicas !== nothing - desc["num_replicas"] = Base.Int(num_replicas) - end - if num_cores_per_replica !== nothing - desc["num_cores_per_replica"] = Base.Int(num_cores_per_replica) - end - if topology !== nothing - desc["topology"] = Base.String(topology) - end - if use_tpu !== nothing - desc["use_tpu"] = Base.Bool(use_tpu) - end - if device_assignment !== nothing - desc["device_assignment"] = map(Base.identity, device_assignment) - end - if computation_shape !== nothing - desc["computation_shape"] = map(Base.identity, computation_shape) - end - if host_compute_core !== nothing - desc["host_compute_core"] = map(Base.identity, host_compute_core) - end - end - tf.Tensor(tf.Operation(desc)) + function tpu_replicate_metadata_graph(; name=nothing, num_replicas=nothing, num_cores_per_replica=nothing, topology=nothing, use_tpu=nothing, device_assignment=nothing, computation_shape=nothing, host_compute_core=nothing) + local desc + tf.with_op_name(name, "TPUReplicateMetadata") do + desc = tf.NodeDescription("TPUReplicateMetadata") + if num_replicas !== nothing + desc["num_replicas"] = Base.Int(num_replicas) + end + if num_cores_per_replica !== nothing + desc["num_cores_per_replica"] = Base.Int(num_cores_per_replica) + end + if topology !== nothing + desc["topology"] = Base.String(topology) + end + if use_tpu !== nothing + desc["use_tpu"] = Base.Bool(use_tpu) + end + if device_assignment !== nothing + desc["device_assignment"] = map(Base.identity, device_assignment) + end + if computation_shape !== nothing + desc["computation_shape"] = map(Base.identity, computation_shape) + end + if host_compute_core !== nothing + desc["host_compute_core"] = map(Base.identity, host_compute_core) + end end + tf.Tensor(tf.Operation(desc)) + end function tpu_replicate_metadata_eager(; name=nothing, num_replicas=nothing, num_cores_per_replica=nothing, topology=nothing, use_tpu=nothing, device_assignment=nothing, computation_shape=nothing, host_compute_core=nothing) desc = tf.EagerOp("TPUReplicateMetadata") if num_replicas !== nothing @@ -9437,13 +9437,13 @@ begin return res[1] end end - function tpu_replicate_metadata(; name=nothing, num_replicas=nothing, num_cores_per_replica=nothing, topology=nothing, use_tpu=nothing, device_assignment=nothing, computation_shape=nothing, host_compute_core=nothing) - if tf.in_eager_mode() - tpu_replicate_metadata_eager(; name=name, num_replicas=num_replicas, num_cores_per_replica=num_cores_per_replica, topology=topology, use_tpu=use_tpu, device_assignment=device_assignment, computation_shape=computation_shape, host_compute_core=host_compute_core) - else - tpu_replicate_metadata_graph(; name=name, num_replicas=num_replicas, num_cores_per_replica=num_cores_per_replica, topology=topology, use_tpu=use_tpu, device_assignment=device_assignment, computation_shape=computation_shape, host_compute_core=host_compute_core) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tpu_replicate_metadata(; name=nothing, num_replicas=nothing, num_cores_per_replica=nothing, topology=nothing, use_tpu=nothing, device_assignment=nothing, computation_shape=nothing, host_compute_core=nothing) + if tf.in_eager_mode() + tpu_replicate_metadata_eager(; name=name, num_replicas=num_replicas, num_cores_per_replica=num_cores_per_replica, topology=topology, use_tpu=use_tpu, device_assignment=device_assignment, computation_shape=computation_shape, host_compute_core=host_compute_core) + else + tpu_replicate_metadata_graph(; name=name, num_replicas=num_replicas, num_cores_per_replica=num_cores_per_replica, topology=topology, use_tpu=use_tpu, device_assignment=device_assignment, computation_shape=computation_shape, host_compute_core=host_compute_core) + end end - end end @@ -9453,28 +9453,28 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function experimental_thread_pool_handle_graph(; name=nothing, num_threads=nothing, max_intra_op_parallelism=nothing, display_name=nothing, container=nothing, shared_name=nothing) - local desc - tf.with_op_name(name, "ExperimentalThreadPoolHandle") do - desc = tf.NodeDescription("ExperimentalThreadPoolHandle") - if num_threads !== nothing - desc["num_threads"] = Base.Int(num_threads) - end - if max_intra_op_parallelism !== nothing - desc["max_intra_op_parallelism"] = Base.Int(max_intra_op_parallelism) - end - if display_name !== nothing - desc["display_name"] = Base.String(display_name) - end - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end + function experimental_thread_pool_handle_graph(; name=nothing, num_threads=nothing, max_intra_op_parallelism=nothing, display_name=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "ExperimentalThreadPoolHandle") do + desc = tf.NodeDescription("ExperimentalThreadPoolHandle") + if num_threads !== nothing + desc["num_threads"] = Base.Int(num_threads) + end + if max_intra_op_parallelism !== nothing + desc["max_intra_op_parallelism"] = Base.Int(max_intra_op_parallelism) + end + if display_name !== nothing + desc["display_name"] = Base.String(display_name) + end + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function experimental_thread_pool_handle_eager(; name=nothing, num_threads=nothing, max_intra_op_parallelism=nothing, display_name=nothing, container=nothing, shared_name=nothing) desc = tf.EagerOp("ExperimentalThreadPoolHandle") if num_threads !== nothing @@ -9499,13 +9499,13 @@ begin return res[1] end end - function experimental_thread_pool_handle(; name=nothing, num_threads=nothing, max_intra_op_parallelism=nothing, display_name=nothing, container=nothing, shared_name=nothing) - if tf.in_eager_mode() - experimental_thread_pool_handle_eager(; name=name, num_threads=num_threads, max_intra_op_parallelism=max_intra_op_parallelism, display_name=display_name, container=container, shared_name=shared_name) - else - experimental_thread_pool_handle_graph(; name=name, num_threads=num_threads, max_intra_op_parallelism=max_intra_op_parallelism, display_name=display_name, container=container, shared_name=shared_name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_thread_pool_handle(; name=nothing, num_threads=nothing, max_intra_op_parallelism=nothing, display_name=nothing, container=nothing, shared_name=nothing) + if tf.in_eager_mode() + experimental_thread_pool_handle_eager(; name=name, num_threads=num_threads, max_intra_op_parallelism=max_intra_op_parallelism, display_name=display_name, container=container, shared_name=shared_name) + else + experimental_thread_pool_handle_graph(; name=name, num_threads=num_threads, max_intra_op_parallelism=max_intra_op_parallelism, display_name=display_name, container=container, shared_name=shared_name) + end end - end end @@ -9515,16 +9515,16 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function self_adjoint_eig_graph(input_; name=nothing) - local desc - tf.with_op_name(name, "SelfAdjointEig") do - desc = tf.NodeDescription("SelfAdjointEig") - input_ = convert(Tensor{Any}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - end - tf.Tensor(tf.Operation(desc)) + function self_adjoint_eig_graph(input_; name=nothing) + local desc + tf.with_op_name(name, "SelfAdjointEig") do + desc = tf.NodeDescription("SelfAdjointEig") + input_ = convert(Tensor{Any}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) end + tf.Tensor(tf.Operation(desc)) + end function self_adjoint_eig_eager(input_; name=nothing) desc = tf.EagerOp("SelfAdjointEig") input_ = convert(tf.EagerTensor, input_) @@ -9537,13 +9537,13 @@ begin return res[1] end end - function self_adjoint_eig(input_; name=nothing) - if tf.in_eager_mode() - self_adjoint_eig_eager(input_; name=name) - else - self_adjoint_eig_graph(input_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function self_adjoint_eig(input_; name=nothing) + if tf.in_eager_mode() + self_adjoint_eig_eager(input_; name=name) + else + self_adjoint_eig_graph(input_; name=name) + end end - end end @@ -9553,23 +9553,23 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function boosted_trees_quantile_stream_resource_get_bucket_boundaries_graph(quantile_stream_resource_handle_; name=nothing, num_features=nothing) - local desc - tf.with_op_name(name, "BoostedTreesQuantileStreamResourceGetBucketBoundaries") do - desc = tf.NodeDescription("BoostedTreesQuantileStreamResourceGetBucketBoundaries") - quantile_stream_resource_handle_ = convert(Tensor{Any}, quantile_stream_resource_handle_) - tf.add_input(desc, quantile_stream_resource_handle_) - if num_features !== nothing - desc["num_features"] = Base.Int(num_features) - end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:num_features - push!(out, tf.Tensor(op, out_idx)) + function boosted_trees_quantile_stream_resource_get_bucket_boundaries_graph(quantile_stream_resource_handle_; name=nothing, num_features=nothing) + local desc + tf.with_op_name(name, "BoostedTreesQuantileStreamResourceGetBucketBoundaries") do + desc = tf.NodeDescription("BoostedTreesQuantileStreamResourceGetBucketBoundaries") + quantile_stream_resource_handle_ = convert(Tensor{Any}, quantile_stream_resource_handle_) + tf.add_input(desc, quantile_stream_resource_handle_) + if num_features !== nothing + desc["num_features"] = Base.Int(num_features) end - out end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:num_features + push!(out, tf.Tensor(op, out_idx)) + end + out + end function boosted_trees_quantile_stream_resource_get_bucket_boundaries_eager(quantile_stream_resource_handle_; name=nothing, num_features=nothing) desc = tf.EagerOp("BoostedTreesQuantileStreamResourceGetBucketBoundaries") quantile_stream_resource_handle_ = convert(tf.EagerTensor, quantile_stream_resource_handle_) @@ -9584,13 +9584,13 @@ begin return res end end - function boosted_trees_quantile_stream_resource_get_bucket_boundaries(quantile_stream_resource_handle_; name=nothing, num_features=nothing) - if tf.in_eager_mode() - boosted_trees_quantile_stream_resource_get_bucket_boundaries_eager(quantile_stream_resource_handle_; name=name, num_features=num_features) - else - boosted_trees_quantile_stream_resource_get_bucket_boundaries_graph(quantile_stream_resource_handle_; name=name, num_features=num_features) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function boosted_trees_quantile_stream_resource_get_bucket_boundaries(quantile_stream_resource_handle_; name=nothing, num_features=nothing) + if tf.in_eager_mode() + boosted_trees_quantile_stream_resource_get_bucket_boundaries_eager(quantile_stream_resource_handle_; name=name, num_features=num_features) + else + boosted_trees_quantile_stream_resource_get_bucket_boundaries_graph(quantile_stream_resource_handle_; name=name, num_features=num_features) + end end - end end @@ -9600,22 +9600,22 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function sparse_dense_cwise_div_graph(sp_indices_, sp_values_, sp_shape_, dense_; name=nothing) - local desc - tf.with_op_name(name, "SparseDenseCwiseDiv") do - desc = tf.NodeDescription("SparseDenseCwiseDiv") - sp_indices_ = convert(Tensor{Int64}, sp_indices_) - sp_values_ = convert(Tensor{Any}, sp_values_) - sp_shape_ = convert(Tensor{Int64}, sp_shape_) - dense_ = convert(Tensor{Any}, dense_) - (sp_values_, dense_) = tf.tf_promote(sp_values_, dense_) - tf.add_input(desc, sp_indices_) - tf.add_input(desc, sp_values_) - tf.add_input(desc, sp_shape_) - tf.add_input(desc, dense_) - end - tf.Tensor(tf.Operation(desc)) + function sparse_dense_cwise_div_graph(sp_indices_, sp_values_, sp_shape_, dense_; name=nothing) + local desc + tf.with_op_name(name, "SparseDenseCwiseDiv") do + desc = tf.NodeDescription("SparseDenseCwiseDiv") + sp_indices_ = convert(Tensor{Int64}, sp_indices_) + sp_values_ = convert(Tensor{Any}, sp_values_) + sp_shape_ = convert(Tensor{Int64}, sp_shape_) + dense_ = convert(Tensor{Any}, dense_) + (sp_values_, dense_) = tf.tf_promote(sp_values_, dense_) + tf.add_input(desc, sp_indices_) + tf.add_input(desc, sp_values_) + tf.add_input(desc, sp_shape_) + tf.add_input(desc, dense_) end + tf.Tensor(tf.Operation(desc)) + end function sparse_dense_cwise_div_eager(sp_indices_, sp_values_, sp_shape_, dense_; name=nothing) desc = tf.EagerOp("SparseDenseCwiseDiv") sp_indices_ = convert(tf.EagerTensor, sp_indices_) @@ -9635,13 +9635,13 @@ begin return res[1] end end - function sparse_dense_cwise_div(sp_indices_, sp_values_, sp_shape_, dense_; name=nothing) - if tf.in_eager_mode() - sparse_dense_cwise_div_eager(sp_indices_, sp_values_, sp_shape_, dense_; name=name) - else - sparse_dense_cwise_div_graph(sp_indices_, sp_values_, sp_shape_, dense_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_dense_cwise_div(sp_indices_, sp_values_, sp_shape_, dense_; name=nothing) + if tf.in_eager_mode() + sparse_dense_cwise_div_eager(sp_indices_, sp_values_, sp_shape_, dense_; name=name) + else + sparse_dense_cwise_div_graph(sp_indices_, sp_values_, sp_shape_, dense_; name=name) + end end - end end @@ -9651,16 +9651,16 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function acos_graph(x_; name=nothing) - local desc - tf.with_op_name(name, "Acos") do - desc = tf.NodeDescription("Acos") - x_ = convert(Tensor{Any}, x_) - (x_,) = tf.tf_promote(x_) - tf.add_input(desc, x_) - end - tf.Tensor(tf.Operation(desc)) + function acos_graph(x_; name=nothing) + local desc + tf.with_op_name(name, "Acos") do + desc = tf.NodeDescription("Acos") + x_ = convert(Tensor{Any}, x_) + (x_,) = tf.tf_promote(x_) + tf.add_input(desc, x_) end + tf.Tensor(tf.Operation(desc)) + end function acos_eager(x_; name=nothing) desc = tf.EagerOp("Acos") x_ = convert(tf.EagerTensor, x_) @@ -9673,13 +9673,13 @@ begin return res[1] end end - function acos(x_; name=nothing) - if tf.in_eager_mode() - acos_eager(x_; name=name) - else - acos_graph(x_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function acos(x_; name=nothing) + if tf.in_eager_mode() + acos_eager(x_; name=name) + else + acos_graph(x_; name=name) + end end - end end @@ -9689,22 +9689,22 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function all_graph(input_, reduction_indices_; name=nothing, keep_dims=nothing) - local desc - tf.with_op_name(name, "All") do - desc = tf.NodeDescription("All") - input_ = convert(Tensor{Bool}, input_) - reduction_indices_ = convert(Tensor{Int32}, reduction_indices_) - reduction_indices_ = reduction_indices_ - convert(tf.Tensor{eltype(reduction_indices_)}, 1) - (reduction_indices_,) = tf.tf_promote(reduction_indices_) - tf.add_input(desc, input_) - tf.add_input(desc, reduction_indices_) - if keep_dims !== nothing - desc["keep_dims"] = Base.Bool(keep_dims) - end + function all_graph(input_, reduction_indices_; name=nothing, keep_dims=nothing) + local desc + tf.with_op_name(name, "All") do + desc = tf.NodeDescription("All") + input_ = convert(Tensor{Bool}, input_) + reduction_indices_ = convert(Tensor{Int32}, reduction_indices_) + reduction_indices_ = reduction_indices_ - convert(tf.Tensor{eltype(reduction_indices_)}, 1) + (reduction_indices_,) = tf.tf_promote(reduction_indices_) + tf.add_input(desc, input_) + tf.add_input(desc, reduction_indices_) + if keep_dims !== nothing + desc["keep_dims"] = Base.Bool(keep_dims) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function all_eager(input_, reduction_indices_; name=nothing, keep_dims=nothing) desc = tf.EagerOp("All") input_ = convert(tf.EagerTensor, input_) @@ -9722,13 +9722,13 @@ begin return res[1] end end - function all(input_, reduction_indices_; name=nothing, keep_dims=nothing) - if tf.in_eager_mode() - all_eager(input_, reduction_indices_; name=name, keep_dims=keep_dims) - else - all_graph(input_, reduction_indices_; name=name, keep_dims=keep_dims) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function all(input_, reduction_indices_; name=nothing, keep_dims=nothing) + if tf.in_eager_mode() + all_eager(input_, reduction_indices_; name=name, keep_dims=keep_dims) + else + all_graph(input_, reduction_indices_; name=name, keep_dims=keep_dims) + end end - end end @@ -9738,18 +9738,18 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function compare_and_bitpack_graph(input_, threshold_; name=nothing) - local desc - tf.with_op_name(name, "CompareAndBitpack") do - desc = tf.NodeDescription("CompareAndBitpack") - input_ = convert(Tensor{Any}, input_) - threshold_ = convert(Tensor{Any}, threshold_) - (input_, threshold_) = tf.tf_promote(input_, threshold_) - tf.add_input(desc, input_) - tf.add_input(desc, threshold_) - end - tf.Tensor(tf.Operation(desc)) + function compare_and_bitpack_graph(input_, threshold_; name=nothing) + local desc + tf.with_op_name(name, "CompareAndBitpack") do + desc = tf.NodeDescription("CompareAndBitpack") + input_ = convert(Tensor{Any}, input_) + threshold_ = convert(Tensor{Any}, threshold_) + (input_, threshold_) = tf.tf_promote(input_, threshold_) + tf.add_input(desc, input_) + tf.add_input(desc, threshold_) end + tf.Tensor(tf.Operation(desc)) + end function compare_and_bitpack_eager(input_, threshold_; name=nothing) desc = tf.EagerOp("CompareAndBitpack") input_ = convert(tf.EagerTensor, input_) @@ -9765,13 +9765,13 @@ begin return res[1] end end - function compare_and_bitpack(input_, threshold_; name=nothing) - if tf.in_eager_mode() - compare_and_bitpack_eager(input_, threshold_; name=name) - else - compare_and_bitpack_graph(input_, threshold_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function compare_and_bitpack(input_, threshold_; name=nothing) + if tf.in_eager_mode() + compare_and_bitpack_eager(input_, threshold_; name=name) + else + compare_and_bitpack_graph(input_, threshold_; name=name) + end end - end end @@ -9781,25 +9781,25 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function var_handle_op_graph(; name=nothing, container=nothing, shared_name=nothing, dtype=nothing, shape=nothing) - local desc - tf.with_op_name(name, "VarHandleOp") do - desc = tf.NodeDescription("VarHandleOp") - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end - if shape !== nothing - desc["shape"] = Base.identity(shape) - end + function var_handle_op_graph(; name=nothing, container=nothing, shared_name=nothing, dtype=nothing, shape=nothing) + local desc + tf.with_op_name(name, "VarHandleOp") do + desc = tf.NodeDescription("VarHandleOp") + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + if shape !== nothing + desc["shape"] = Base.identity(shape) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function var_handle_op_eager(; name=nothing, container=nothing, shared_name=nothing, dtype=nothing, shape=nothing) desc = tf.EagerOp("VarHandleOp") if container !== nothing @@ -9821,13 +9821,13 @@ begin return res[1] end end - function var_handle_op(; name=nothing, container=nothing, shared_name=nothing, dtype=nothing, shape=nothing) - if tf.in_eager_mode() - var_handle_op_eager(; name=name, container=container, shared_name=shared_name, dtype=dtype, shape=shape) - else - var_handle_op_graph(; name=name, container=container, shared_name=shared_name, dtype=dtype, shape=shape) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function var_handle_op(; name=nothing, container=nothing, shared_name=nothing, dtype=nothing, shape=nothing) + if tf.in_eager_mode() + var_handle_op_eager(; name=name, container=container, shared_name=shared_name, dtype=dtype, shape=shape) + else + var_handle_op_graph(; name=name, container=container, shared_name=shared_name, dtype=dtype, shape=shape) + end end - end end @@ -9837,21 +9837,21 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function experimental_unique_dataset_graph(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) - local desc - tf.with_op_name(name, "ExperimentalUniqueDataset") do - desc = tf.NodeDescription("ExperimentalUniqueDataset") - input_dataset_ = convert(Tensor{Any}, input_dataset_) - tf.add_input(desc, input_dataset_) - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end + function experimental_unique_dataset_graph(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "ExperimentalUniqueDataset") do + desc = tf.NodeDescription("ExperimentalUniqueDataset") + input_dataset_ = convert(Tensor{Any}, input_dataset_) + tf.add_input(desc, input_dataset_) + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function experimental_unique_dataset_eager(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("ExperimentalUniqueDataset") input_dataset_ = convert(tf.EagerTensor, input_dataset_) @@ -9869,13 +9869,13 @@ begin return res[1] end end - function experimental_unique_dataset(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) - if tf.in_eager_mode() - experimental_unique_dataset_eager(input_dataset_; name=name, output_types=output_types, output_shapes=output_shapes) - else - experimental_unique_dataset_graph(input_dataset_; name=name, output_types=output_types, output_shapes=output_shapes) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_unique_dataset(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) + if tf.in_eager_mode() + experimental_unique_dataset_eager(input_dataset_; name=name, output_types=output_types, output_shapes=output_shapes) + else + experimental_unique_dataset_graph(input_dataset_; name=name, output_types=output_types, output_shapes=output_shapes) + end end - end end @@ -9885,26 +9885,26 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function list_diff_graph(x_, y_; name=nothing, out_idx=nothing) - local desc - tf.with_op_name(name, "ListDiff") do - desc = tf.NodeDescription("ListDiff") - x_ = convert(Tensor{Any}, x_) - y_ = convert(Tensor{Any}, y_) - (x_, y_) = tf.tf_promote(x_, y_) - tf.add_input(desc, x_) - tf.add_input(desc, y_) - if out_idx !== nothing - desc["out_idx"] = Base.identity(out_idx) - end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:2 - push!(out, tf.Tensor(op, out_idx)) + function list_diff_graph(x_, y_; name=nothing, out_idx=nothing) + local desc + tf.with_op_name(name, "ListDiff") do + desc = tf.NodeDescription("ListDiff") + x_ = convert(Tensor{Any}, x_) + y_ = convert(Tensor{Any}, y_) + (x_, y_) = tf.tf_promote(x_, y_) + tf.add_input(desc, x_) + tf.add_input(desc, y_) + if out_idx !== nothing + desc["out_idx"] = Base.identity(out_idx) end - out end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end function list_diff_eager(x_, y_; name=nothing, out_idx=nothing) desc = tf.EagerOp("ListDiff") x_ = convert(tf.EagerTensor, x_) @@ -9923,13 +9923,13 @@ begin return res end end - function list_diff(x_, y_; name=nothing, out_idx=nothing) - if tf.in_eager_mode() - list_diff_eager(x_, y_; name=name, out_idx=out_idx) - else - list_diff_graph(x_, y_; name=name, out_idx=out_idx) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function list_diff(x_, y_; name=nothing, out_idx=nothing) + if tf.in_eager_mode() + list_diff_eager(x_, y_; name=name, out_idx=out_idx) + else + list_diff_graph(x_, y_; name=name, out_idx=out_idx) + end end - end end @@ -9939,23 +9939,23 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function create_summary_file_writer_graph(writer_, logdir_, max_queue_, flush_millis_, filename_suffix_; name=nothing) - local desc - tf.with_op_name(name, "CreateSummaryFileWriter") do - desc = tf.NodeDescription("CreateSummaryFileWriter") - writer_ = convert(Tensor{Any}, writer_) - logdir_ = convert(Tensor{String}, logdir_) - max_queue_ = convert(Tensor{Int32}, max_queue_) - flush_millis_ = convert(Tensor{Int32}, flush_millis_) - filename_suffix_ = convert(Tensor{String}, filename_suffix_) - tf.add_input(desc, writer_) - tf.add_input(desc, logdir_) - tf.add_input(desc, max_queue_) - tf.add_input(desc, flush_millis_) - tf.add_input(desc, filename_suffix_) - end - tf.Tensor(tf.Operation(desc)) + function create_summary_file_writer_graph(writer_, logdir_, max_queue_, flush_millis_, filename_suffix_; name=nothing) + local desc + tf.with_op_name(name, "CreateSummaryFileWriter") do + desc = tf.NodeDescription("CreateSummaryFileWriter") + writer_ = convert(Tensor{Any}, writer_) + logdir_ = convert(Tensor{String}, logdir_) + max_queue_ = convert(Tensor{Int32}, max_queue_) + flush_millis_ = convert(Tensor{Int32}, flush_millis_) + filename_suffix_ = convert(Tensor{String}, filename_suffix_) + tf.add_input(desc, writer_) + tf.add_input(desc, logdir_) + tf.add_input(desc, max_queue_) + tf.add_input(desc, flush_millis_) + tf.add_input(desc, filename_suffix_) end + tf.Tensor(tf.Operation(desc)) + end function create_summary_file_writer_eager(writer_, logdir_, max_queue_, flush_millis_, filename_suffix_; name=nothing) desc = tf.EagerOp("CreateSummaryFileWriter") writer_ = convert(tf.EagerTensor, writer_) @@ -9975,13 +9975,13 @@ begin return res[1] end end - function create_summary_file_writer(writer_, logdir_, max_queue_, flush_millis_, filename_suffix_; name=nothing) - if tf.in_eager_mode() - create_summary_file_writer_eager(writer_, logdir_, max_queue_, flush_millis_, filename_suffix_; name=name) - else - create_summary_file_writer_graph(writer_, logdir_, max_queue_, flush_millis_, filename_suffix_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function create_summary_file_writer(writer_, logdir_, max_queue_, flush_millis_, filename_suffix_; name=nothing) + if tf.in_eager_mode() + create_summary_file_writer_eager(writer_, logdir_, max_queue_, flush_millis_, filename_suffix_; name=name) + else + create_summary_file_writer_graph(writer_, logdir_, max_queue_, flush_millis_, filename_suffix_; name=name) + end end - end end @@ -9991,31 +9991,31 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function generate_vocab_remapping_graph(new_vocab_file_, old_vocab_file_; name=nothing, new_vocab_offset=nothing, num_new_vocab=nothing, old_vocab_size=nothing) - local desc - tf.with_op_name(name, "GenerateVocabRemapping") do - desc = tf.NodeDescription("GenerateVocabRemapping") - new_vocab_file_ = convert(Tensor{String}, new_vocab_file_) - old_vocab_file_ = convert(Tensor{String}, old_vocab_file_) - tf.add_input(desc, new_vocab_file_) - tf.add_input(desc, old_vocab_file_) - if new_vocab_offset !== nothing - desc["new_vocab_offset"] = Base.Int(new_vocab_offset) - end - if num_new_vocab !== nothing - desc["num_new_vocab"] = Base.Int(num_new_vocab) - end - if old_vocab_size !== nothing - desc["old_vocab_size"] = Base.Int(old_vocab_size) - end + function generate_vocab_remapping_graph(new_vocab_file_, old_vocab_file_; name=nothing, new_vocab_offset=nothing, num_new_vocab=nothing, old_vocab_size=nothing) + local desc + tf.with_op_name(name, "GenerateVocabRemapping") do + desc = tf.NodeDescription("GenerateVocabRemapping") + new_vocab_file_ = convert(Tensor{String}, new_vocab_file_) + old_vocab_file_ = convert(Tensor{String}, old_vocab_file_) + tf.add_input(desc, new_vocab_file_) + tf.add_input(desc, old_vocab_file_) + if new_vocab_offset !== nothing + desc["new_vocab_offset"] = Base.Int(new_vocab_offset) + end + if num_new_vocab !== nothing + desc["num_new_vocab"] = Base.Int(num_new_vocab) end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:2 - push!(out, tf.Tensor(op, out_idx)) + if old_vocab_size !== nothing + desc["old_vocab_size"] = Base.Int(old_vocab_size) end - out end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end function generate_vocab_remapping_eager(new_vocab_file_, old_vocab_file_; name=nothing, new_vocab_offset=nothing, num_new_vocab=nothing, old_vocab_size=nothing) desc = tf.EagerOp("GenerateVocabRemapping") new_vocab_file_ = convert(tf.EagerTensor, new_vocab_file_) @@ -10038,13 +10038,13 @@ begin return res end end - function generate_vocab_remapping(new_vocab_file_, old_vocab_file_; name=nothing, new_vocab_offset=nothing, num_new_vocab=nothing, old_vocab_size=nothing) - if tf.in_eager_mode() - generate_vocab_remapping_eager(new_vocab_file_, old_vocab_file_; name=name, new_vocab_offset=new_vocab_offset, num_new_vocab=num_new_vocab, old_vocab_size=old_vocab_size) - else - generate_vocab_remapping_graph(new_vocab_file_, old_vocab_file_; name=name, new_vocab_offset=new_vocab_offset, num_new_vocab=num_new_vocab, old_vocab_size=old_vocab_size) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function generate_vocab_remapping(new_vocab_file_, old_vocab_file_; name=nothing, new_vocab_offset=nothing, num_new_vocab=nothing, old_vocab_size=nothing) + if tf.in_eager_mode() + generate_vocab_remapping_eager(new_vocab_file_, old_vocab_file_; name=name, new_vocab_offset=new_vocab_offset, num_new_vocab=num_new_vocab, old_vocab_size=old_vocab_size) + else + generate_vocab_remapping_graph(new_vocab_file_, old_vocab_file_; name=name, new_vocab_offset=new_vocab_offset, num_new_vocab=num_new_vocab, old_vocab_size=old_vocab_size) + end end - end end @@ -10054,19 +10054,19 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function batch_matrix_inverse_graph(input_; name=nothing, adjoint=nothing) - local desc - tf.with_op_name(name, "BatchMatrixInverse") do - desc = tf.NodeDescription("BatchMatrixInverse") - input_ = convert(Tensor{Any}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - if adjoint !== nothing - desc["adjoint"] = Base.Bool(adjoint) - end + function batch_matrix_inverse_graph(input_; name=nothing, adjoint=nothing) + local desc + tf.with_op_name(name, "BatchMatrixInverse") do + desc = tf.NodeDescription("BatchMatrixInverse") + input_ = convert(Tensor{Any}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + if adjoint !== nothing + desc["adjoint"] = Base.Bool(adjoint) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function batch_matrix_inverse_eager(input_; name=nothing, adjoint=nothing) desc = tf.EagerOp("BatchMatrixInverse") input_ = convert(tf.EagerTensor, input_) @@ -10082,13 +10082,13 @@ begin return res[1] end end - function batch_matrix_inverse(input_; name=nothing, adjoint=nothing) - if tf.in_eager_mode() - batch_matrix_inverse_eager(input_; name=name, adjoint=adjoint) - else - batch_matrix_inverse_graph(input_; name=name, adjoint=adjoint) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function batch_matrix_inverse(input_; name=nothing, adjoint=nothing) + if tf.in_eager_mode() + batch_matrix_inverse_eager(input_; name=name, adjoint=adjoint) + else + batch_matrix_inverse_graph(input_; name=name, adjoint=adjoint) + end end - end end @@ -10098,14 +10098,14 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function control_trigger_graph(; name=nothing) - local desc - tf.with_op_name(name, "ControlTrigger") do - desc - tf.NodeDescription("ControlTrigger") - end - tf.Tensor(tf.Operation(desc)) + function control_trigger_graph(; name=nothing) + local desc + tf.with_op_name(name, "ControlTrigger") do + desc + tf.NodeDescription("ControlTrigger") end + tf.Tensor(tf.Operation(desc)) + end function control_trigger_eager(; name=nothing) desc = tf.EagerOp("ControlTrigger") res = tf.execute(desc) @@ -10115,13 +10115,13 @@ begin return res[1] end end - function control_trigger(; name=nothing) - if tf.in_eager_mode() - control_trigger_eager(; name=name) - else - control_trigger_graph(; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function control_trigger(; name=nothing) + if tf.in_eager_mode() + control_trigger_eager(; name=name) + else + control_trigger_graph(; name=name) + end end - end end @@ -10131,16 +10131,16 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function stop_gradient_graph(input_; name=nothing) - local desc - tf.with_op_name(name, "StopGradient") do - desc = tf.NodeDescription("StopGradient") - input_ = convert(Tensor{Any}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - end - tf.Tensor(tf.Operation(desc)) + function stop_gradient_graph(input_; name=nothing) + local desc + tf.with_op_name(name, "StopGradient") do + desc = tf.NodeDescription("StopGradient") + input_ = convert(Tensor{Any}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) end + tf.Tensor(tf.Operation(desc)) + end function stop_gradient_eager(input_; name=nothing) desc = tf.EagerOp("StopGradient") input_ = convert(tf.EagerTensor, input_) @@ -10153,13 +10153,13 @@ begin return res[1] end end - function stop_gradient(input_; name=nothing) - if tf.in_eager_mode() - stop_gradient_eager(input_; name=name) - else - stop_gradient_graph(input_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function stop_gradient(input_; name=nothing) + if tf.in_eager_mode() + stop_gradient_eager(input_; name=name) + else + stop_gradient_graph(input_; name=name) + end end - end end @@ -10169,27 +10169,27 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function split_graph(split_dim_, value_; name=nothing, num_split=nothing) - local desc - tf.with_op_name(name, "Split") do - desc = tf.NodeDescription("Split") - split_dim_ = convert(Tensor{Int32}, split_dim_) - split_dim_ = split_dim_ - convert(tf.Tensor{eltype(split_dim_)}, 1) - value_ = convert(Tensor{Any}, value_) - (value_,) = tf.tf_promote(value_) - tf.add_input(desc, split_dim_) - tf.add_input(desc, value_) - if num_split !== nothing - desc["num_split"] = Base.Int(num_split) - end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:num_split - push!(out, tf.Tensor(op, out_idx)) + function split_graph(split_dim_, value_; name=nothing, num_split=nothing) + local desc + tf.with_op_name(name, "Split") do + desc = tf.NodeDescription("Split") + split_dim_ = convert(Tensor{Int32}, split_dim_) + split_dim_ = split_dim_ - convert(tf.Tensor{eltype(split_dim_)}, 1) + value_ = convert(Tensor{Any}, value_) + (value_,) = tf.tf_promote(value_) + tf.add_input(desc, split_dim_) + tf.add_input(desc, value_) + if num_split !== nothing + desc["num_split"] = Base.Int(num_split) end - out end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:num_split + push!(out, tf.Tensor(op, out_idx)) + end + out + end function split_eager(split_dim_, value_; name=nothing, num_split=nothing) desc = tf.EagerOp("Split") split_dim_ = convert(tf.EagerTensor, split_dim_) @@ -10207,13 +10207,13 @@ begin return res end end - function split(split_dim_, value_; name=nothing, num_split=nothing) - if tf.in_eager_mode() - split_eager(split_dim_, value_; name=name, num_split=num_split) - else - split_graph(split_dim_, value_; name=name, num_split=num_split) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function split(split_dim_, value_; name=nothing, num_split=nothing) + if tf.in_eager_mode() + split_eager(split_dim_, value_; name=name, num_split=num_split) + else + split_graph(split_dim_, value_; name=name, num_split=num_split) + end end - end end @@ -10223,30 +10223,30 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function unpack_graph(value_; name=nothing, num=nothing, axis=nothing) - local desc - tf.with_op_name(name, "Unpack") do - desc = tf.NodeDescription("Unpack") - value_ = convert(Tensor{Any}, value_) - (value_,) = tf.tf_promote(value_) - tf.add_input(desc, value_) - if num !== nothing - desc["num"] = Base.Int(num) - end - if axis !== nothing - axis = Base.Int(axis) - 1 - end - if axis !== nothing - desc["axis"] = Base.Int(axis) - end + function unpack_graph(value_; name=nothing, num=nothing, axis=nothing) + local desc + tf.with_op_name(name, "Unpack") do + desc = tf.NodeDescription("Unpack") + value_ = convert(Tensor{Any}, value_) + (value_,) = tf.tf_promote(value_) + tf.add_input(desc, value_) + if num !== nothing + desc["num"] = Base.Int(num) end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:num - push!(out, tf.Tensor(op, out_idx)) + if axis !== nothing + axis = Base.Int(axis) - 1 end - out + if axis !== nothing + desc["axis"] = Base.Int(axis) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:num + push!(out, tf.Tensor(op, out_idx)) end + out + end function unpack_eager(value_; name=nothing, num=nothing, axis=nothing) desc = tf.EagerOp("Unpack") value_ = convert(tf.EagerTensor, value_) @@ -10268,13 +10268,13 @@ begin return res end end - function unpack(value_; name=nothing, num=nothing, axis=nothing) - if tf.in_eager_mode() - unpack_eager(value_; name=name, num=num, axis=axis) - else - unpack_graph(value_; name=name, num=num, axis=axis) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function unpack(value_; name=nothing, num=nothing, axis=nothing) + if tf.in_eager_mode() + unpack_eager(value_; name=name, num=num, axis=axis) + else + unpack_graph(value_; name=name, num=num, axis=axis) + end end - end end @@ -10284,25 +10284,25 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function resource_scatter_max_graph(resource_, indices_, updates_; name=nothing, dtype=nothing) - local desc - tf.with_op_name(name, "ResourceScatterMax") do - desc = tf.NodeDescription("ResourceScatterMax") - resource_ = convert(Tensor{Any}, resource_) - indices_ = convert(Tensor{Any}, indices_) - indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) - updates_ = convert(Tensor{Any}, updates_) - (updates_,) = tf.tf_promote(updates_) - (indices_,) = tf.tf_promote(indices_) - tf.add_input(desc, resource_) - tf.add_input(desc, indices_) - tf.add_input(desc, updates_) - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end + function resource_scatter_max_graph(resource_, indices_, updates_; name=nothing, dtype=nothing) + local desc + tf.with_op_name(name, "ResourceScatterMax") do + desc = tf.NodeDescription("ResourceScatterMax") + resource_ = convert(Tensor{Any}, resource_) + indices_ = convert(Tensor{Any}, indices_) + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + updates_ = convert(Tensor{Any}, updates_) + (updates_,) = tf.tf_promote(updates_) + (indices_,) = tf.tf_promote(indices_) + tf.add_input(desc, resource_) + tf.add_input(desc, indices_) + tf.add_input(desc, updates_) + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function resource_scatter_max_eager(resource_, indices_, updates_; name=nothing, dtype=nothing) desc = tf.EagerOp("ResourceScatterMax") resource_ = convert(tf.EagerTensor, resource_) @@ -10323,13 +10323,13 @@ begin return res[1] end end - function resource_scatter_max(resource_, indices_, updates_; name=nothing, dtype=nothing) - if tf.in_eager_mode() - resource_scatter_max_eager(resource_, indices_, updates_; name=name, dtype=dtype) - else - resource_scatter_max_graph(resource_, indices_, updates_; name=name, dtype=dtype) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_scatter_max(resource_, indices_, updates_; name=nothing, dtype=nothing) + if tf.in_eager_mode() + resource_scatter_max_eager(resource_, indices_, updates_; name=name, dtype=dtype) + else + resource_scatter_max_graph(resource_, indices_, updates_; name=name, dtype=dtype) + end end - end end @@ -10339,22 +10339,22 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tensor_array_write_graph(handle_, index_, value_, flow_in_; name=nothing) - local desc - tf.with_op_name(name, "TensorArrayWrite") do - desc = tf.NodeDescription("TensorArrayWrite") - handle_ = convert(Tensor{String}, handle_) - index_ = convert(Tensor{Int32}, index_) - value_ = convert(Tensor{Any}, value_) - flow_in_ = convert(Tensor{Float32}, flow_in_) - (value_,) = tf.tf_promote(value_) - tf.add_input(desc, handle_) - tf.add_input(desc, index_) - tf.add_input(desc, value_) - tf.add_input(desc, flow_in_) - end - tf.Tensor(tf.Operation(desc)) + function tensor_array_write_graph(handle_, index_, value_, flow_in_; name=nothing) + local desc + tf.with_op_name(name, "TensorArrayWrite") do + desc = tf.NodeDescription("TensorArrayWrite") + handle_ = convert(Tensor{String}, handle_) + index_ = convert(Tensor{Int32}, index_) + value_ = convert(Tensor{Any}, value_) + flow_in_ = convert(Tensor{Float32}, flow_in_) + (value_,) = tf.tf_promote(value_) + tf.add_input(desc, handle_) + tf.add_input(desc, index_) + tf.add_input(desc, value_) + tf.add_input(desc, flow_in_) end + tf.Tensor(tf.Operation(desc)) + end function tensor_array_write_eager(handle_, index_, value_, flow_in_; name=nothing) desc = tf.EagerOp("TensorArrayWrite") handle_ = convert(tf.EagerTensor, handle_) @@ -10373,13 +10373,13 @@ begin return res[1] end end - function tensor_array_write(handle_, index_, value_, flow_in_; name=nothing) - if tf.in_eager_mode() - tensor_array_write_eager(handle_, index_, value_, flow_in_; name=name) - else - tensor_array_write_graph(handle_, index_, value_, flow_in_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_array_write(handle_, index_, value_, flow_in_; name=nothing) + if tf.in_eager_mode() + tensor_array_write_eager(handle_, index_, value_, flow_in_; name=name) + else + tensor_array_write_graph(handle_, index_, value_, flow_in_; name=name) + end end - end end @@ -10389,22 +10389,22 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function fill_graph(dims_, value_; name=nothing, index_type=nothing) - local desc - tf.with_op_name(name, "Fill") do - desc = tf.NodeDescription("Fill") - dims_ = convert(Tensor{Int32}, dims_) - value_ = convert(Tensor{Any}, value_) - (value_,) = tf.tf_promote(value_) - (dims_,) = tf.tf_promote(dims_) - tf.add_input(desc, dims_) - tf.add_input(desc, value_) - if index_type !== nothing - desc["index_type"] = Base.identity(index_type) - end + function fill_graph(dims_, value_; name=nothing, index_type=nothing) + local desc + tf.with_op_name(name, "Fill") do + desc = tf.NodeDescription("Fill") + dims_ = convert(Tensor{Int32}, dims_) + value_ = convert(Tensor{Any}, value_) + (value_,) = tf.tf_promote(value_) + (dims_,) = tf.tf_promote(dims_) + tf.add_input(desc, dims_) + tf.add_input(desc, value_) + if index_type !== nothing + desc["index_type"] = Base.identity(index_type) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function fill_eager(dims_, value_; name=nothing, index_type=nothing) desc = tf.EagerOp("Fill") dims_ = convert(tf.EagerTensor, dims_) @@ -10423,13 +10423,13 @@ begin return res[1] end end - function fill(dims_, value_; name=nothing, index_type=nothing) - if tf.in_eager_mode() - fill_eager(dims_, value_; name=name, index_type=index_type) - else - fill_graph(dims_, value_; name=name, index_type=index_type) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function fill(dims_, value_; name=nothing, index_type=nothing) + if tf.in_eager_mode() + fill_eager(dims_, value_; name=name, index_type=index_type) + else + fill_graph(dims_, value_; name=name, index_type=index_type) + end end - end end @@ -10439,16 +10439,16 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function softmax_graph(logits_; name=nothing) - local desc - tf.with_op_name(name, "Softmax") do - desc = tf.NodeDescription("Softmax") - logits_ = convert(Tensor{Any}, logits_) - (logits_,) = tf.tf_promote(logits_) - tf.add_input(desc, logits_) - end - tf.Tensor(tf.Operation(desc)) + function softmax_graph(logits_; name=nothing) + local desc + tf.with_op_name(name, "Softmax") do + desc = tf.NodeDescription("Softmax") + logits_ = convert(Tensor{Any}, logits_) + (logits_,) = tf.tf_promote(logits_) + tf.add_input(desc, logits_) end + tf.Tensor(tf.Operation(desc)) + end function softmax_eager(logits_; name=nothing) desc = tf.EagerOp("Softmax") logits_ = convert(tf.EagerTensor, logits_) @@ -10461,13 +10461,13 @@ begin return res[1] end end - function softmax(logits_; name=nothing) - if tf.in_eager_mode() - softmax_eager(logits_; name=name) - else - softmax_graph(logits_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function softmax(logits_; name=nothing) + if tf.in_eager_mode() + softmax_eager(logits_; name=name) + else + softmax_graph(logits_; name=name) + end end - end end @@ -10477,21 +10477,21 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function resize_bicubic_graph(images_, size_; name=nothing, align_corners=nothing) - local desc - tf.with_op_name(name, "ResizeBicubic") do - desc = tf.NodeDescription("ResizeBicubic") - images_ = convert(Tensor{Any}, images_) - size_ = convert(Tensor{Int32}, size_) - (images_,) = tf.tf_promote(images_) - tf.add_input(desc, images_) - tf.add_input(desc, size_) - if align_corners !== nothing - desc["align_corners"] = Base.Bool(align_corners) - end + function resize_bicubic_graph(images_, size_; name=nothing, align_corners=nothing) + local desc + tf.with_op_name(name, "ResizeBicubic") do + desc = tf.NodeDescription("ResizeBicubic") + images_ = convert(Tensor{Any}, images_) + size_ = convert(Tensor{Int32}, size_) + (images_,) = tf.tf_promote(images_) + tf.add_input(desc, images_) + tf.add_input(desc, size_) + if align_corners !== nothing + desc["align_corners"] = Base.Bool(align_corners) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function resize_bicubic_eager(images_, size_; name=nothing, align_corners=nothing) desc = tf.EagerOp("ResizeBicubic") images_ = convert(tf.EagerTensor, images_) @@ -10509,13 +10509,13 @@ begin return res[1] end end - function resize_bicubic(images_, size_; name=nothing, align_corners=nothing) - if tf.in_eager_mode() - resize_bicubic_eager(images_, size_; name=name, align_corners=align_corners) - else - resize_bicubic_graph(images_, size_; name=name, align_corners=align_corners) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resize_bicubic(images_, size_; name=nothing, align_corners=nothing) + if tf.in_eager_mode() + resize_bicubic_eager(images_, size_; name=name, align_corners=align_corners) + else + resize_bicubic_graph(images_, size_; name=name, align_corners=align_corners) + end end - end end @@ -10525,19 +10525,19 @@ end A placeholder op for multiple values that will be fed into the computation """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function infeed_dequeue_tuple_graph(; name=nothing, dtypes=nothing, shapes=nothing) - local desc - tf.with_op_name(name, "InfeedDequeueTuple") do - desc = tf.NodeDescription("InfeedDequeueTuple") - if dtypes !== nothing - desc["dtypes"] = map(Base.identity, dtypes) - end - if shapes !== nothing - desc["shapes"] = map(Base.identity, shapes) - end + function infeed_dequeue_tuple_graph(; name=nothing, dtypes=nothing, shapes=nothing) + local desc + tf.with_op_name(name, "InfeedDequeueTuple") do + desc = tf.NodeDescription("InfeedDequeueTuple") + if dtypes !== nothing + desc["dtypes"] = map(Base.identity, dtypes) + end + if shapes !== nothing + desc["shapes"] = map(Base.identity, shapes) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function infeed_dequeue_tuple_eager(; name=nothing, dtypes=nothing, shapes=nothing) desc = tf.EagerOp("InfeedDequeueTuple") if dtypes !== nothing @@ -10553,13 +10553,13 @@ begin return res[1] end end - function infeed_dequeue_tuple(; name=nothing, dtypes=nothing, shapes=nothing) - if tf.in_eager_mode() - infeed_dequeue_tuple_eager(; name=name, dtypes=dtypes, shapes=shapes) - else - infeed_dequeue_tuple_graph(; name=name, dtypes=dtypes, shapes=shapes) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function infeed_dequeue_tuple(; name=nothing, dtypes=nothing, shapes=nothing) + if tf.in_eager_mode() + infeed_dequeue_tuple_eager(; name=name, dtypes=dtypes, shapes=shapes) + else + infeed_dequeue_tuple_graph(; name=name, dtypes=dtypes, shapes=shapes) + end end - end end @@ -10569,28 +10569,28 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function multi_device_iterator_graph(; name=nothing, devices=nothing, shared_name=nothing, container=nothing, output_types=nothing, output_shapes=nothing) - local desc - tf.with_op_name(name, "MultiDeviceIterator") do - desc = tf.NodeDescription("MultiDeviceIterator") - if devices !== nothing - desc["devices"] = map(Base.identity, devices) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - if container !== nothing - desc["container"] = Base.String(container) - end - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end + function multi_device_iterator_graph(; name=nothing, devices=nothing, shared_name=nothing, container=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "MultiDeviceIterator") do + desc = tf.NodeDescription("MultiDeviceIterator") + if devices !== nothing + desc["devices"] = map(Base.identity, devices) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + if container !== nothing + desc["container"] = Base.String(container) + end + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function multi_device_iterator_eager(; name=nothing, devices=nothing, shared_name=nothing, container=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("MultiDeviceIterator") if devices !== nothing @@ -10615,48 +10615,48 @@ begin return res[1] end end - function multi_device_iterator(; name=nothing, devices=nothing, shared_name=nothing, container=nothing, output_types=nothing, output_shapes=nothing) - if tf.in_eager_mode() - multi_device_iterator_eager(; name=name, devices=devices, shared_name=shared_name, container=container, output_types=output_types, output_shapes=output_shapes) - else - multi_device_iterator_graph(; name=name, devices=devices, shared_name=shared_name, container=container, output_types=output_types, output_shapes=output_shapes) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function multi_device_iterator(; name=nothing, devices=nothing, shared_name=nothing, container=nothing, output_types=nothing, output_shapes=nothing) + if tf.in_eager_mode() + multi_device_iterator_eager(; name=name, devices=devices, shared_name=shared_name, container=container, output_types=output_types, output_shapes=output_shapes) + else + multi_device_iterator_graph(; name=name, devices=devices, shared_name=shared_name, container=container, output_types=output_types, output_shapes=output_shapes) + end end - end end """ - decode_csv(records, record_defaults; field_delim=,, use_quote_delim=true, na_value=, select_cols=Int64[]) + decode_csv(records, record_defaults; field_delim=, use_quote_delim=true, na_value=, select_cols=Int64[]) """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function decode_csv_graph(records_, record_defaults_; name=nothing, OUT_TYPE=nothing, field_delim=nothing, use_quote_delim=nothing, na_value=nothing, select_cols=nothing) - local desc - tf.with_op_name(name, "DecodeCSV") do - desc = tf.NodeDescription("DecodeCSV") - records_ = convert(Tensor{String}, records_) - record_defaults_ = [convert(Tensor{Any}, x) for x = record_defaults_] - tf.add_input(desc, records_) - tf.add_input(desc, record_defaults_) - if OUT_TYPE !== nothing - desc["OUT_TYPE"] = map(Base.identity, OUT_TYPE) - end - if field_delim !== nothing - desc["field_delim"] = Base.String(field_delim) - end - if use_quote_delim !== nothing - desc["use_quote_delim"] = Base.Bool(use_quote_delim) - end - if na_value !== nothing - desc["na_value"] = Base.String(na_value) - end - if select_cols !== nothing - desc["select_cols"] = map(Base.identity, select_cols) - end + function decode_csv_graph(records_, record_defaults_; name=nothing, OUT_TYPE=nothing, field_delim=nothing, use_quote_delim=nothing, na_value=nothing, select_cols=nothing) + local desc + tf.with_op_name(name, "DecodeCSV") do + desc = tf.NodeDescription("DecodeCSV") + records_ = convert(Tensor{String}, records_) + record_defaults_ = [convert(Tensor{Any}, x) for x = record_defaults_] + tf.add_input(desc, records_) + tf.add_input(desc, record_defaults_) + if OUT_TYPE !== nothing + desc["OUT_TYPE"] = map(Base.identity, OUT_TYPE) + end + if field_delim !== nothing + desc["field_delim"] = Base.String(field_delim) + end + if use_quote_delim !== nothing + desc["use_quote_delim"] = Base.Bool(use_quote_delim) + end + if na_value !== nothing + desc["na_value"] = Base.String(na_value) + end + if select_cols !== nothing + desc["select_cols"] = map(Base.identity, select_cols) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function decode_csv_eager(records_, record_defaults_; name=nothing, OUT_TYPE=nothing, field_delim=nothing, use_quote_delim=nothing, na_value=nothing, select_cols=nothing) desc = tf.EagerOp("DecodeCSV") records_ = convert(tf.EagerTensor, records_) @@ -10685,13 +10685,13 @@ begin return res[1] end end - function decode_csv(records_, record_defaults_; name=nothing, OUT_TYPE=nothing, field_delim=nothing, use_quote_delim=nothing, na_value=nothing, select_cols=nothing) - if tf.in_eager_mode() - decode_csv_eager(records_, record_defaults_; name=name, OUT_TYPE=OUT_TYPE, field_delim=field_delim, use_quote_delim=use_quote_delim, na_value=na_value, select_cols=select_cols) - else - decode_csv_graph(records_, record_defaults_; name=name, OUT_TYPE=OUT_TYPE, field_delim=field_delim, use_quote_delim=use_quote_delim, na_value=na_value, select_cols=select_cols) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function decode_csv(records_, record_defaults_; name=nothing, OUT_TYPE=nothing, field_delim=nothing, use_quote_delim=nothing, na_value=nothing, select_cols=nothing) + if tf.in_eager_mode() + decode_csv_eager(records_, record_defaults_; name=name, OUT_TYPE=OUT_TYPE, field_delim=field_delim, use_quote_delim=use_quote_delim, na_value=na_value, select_cols=select_cols) + else + decode_csv_graph(records_, record_defaults_; name=name, OUT_TYPE=OUT_TYPE, field_delim=field_delim, use_quote_delim=use_quote_delim, na_value=na_value, select_cols=select_cols) + end end - end end @@ -10701,21 +10701,21 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function lookup_table_find_graph(table_handle_, keys_, default_value_; name=nothing) - local desc - tf.with_op_name(name, "LookupTableFind") do - desc = tf.NodeDescription("LookupTableFind") - table_handle_ = convert(Tensor{String}, table_handle_) - keys_ = convert(Tensor{Any}, keys_) - default_value_ = convert(Tensor{Any}, default_value_) - (keys_,) = tf.tf_promote(keys_) - (default_value_,) = tf.tf_promote(default_value_) - tf.add_input(desc, table_handle_) - tf.add_input(desc, keys_) - tf.add_input(desc, default_value_) - end - tf.Tensor(tf.Operation(desc)) + function lookup_table_find_graph(table_handle_, keys_, default_value_; name=nothing) + local desc + tf.with_op_name(name, "LookupTableFind") do + desc = tf.NodeDescription("LookupTableFind") + table_handle_ = convert(Tensor{String}, table_handle_) + keys_ = convert(Tensor{Any}, keys_) + default_value_ = convert(Tensor{Any}, default_value_) + (keys_,) = tf.tf_promote(keys_) + (default_value_,) = tf.tf_promote(default_value_) + tf.add_input(desc, table_handle_) + tf.add_input(desc, keys_) + tf.add_input(desc, default_value_) end + tf.Tensor(tf.Operation(desc)) + end function lookup_table_find_eager(table_handle_, keys_, default_value_; name=nothing) desc = tf.EagerOp("LookupTableFind") table_handle_ = convert(tf.EagerTensor, table_handle_) @@ -10733,13 +10733,13 @@ begin return res[1] end end - function lookup_table_find(table_handle_, keys_, default_value_; name=nothing) - if tf.in_eager_mode() - lookup_table_find_eager(table_handle_, keys_, default_value_; name=name) - else - lookup_table_find_graph(table_handle_, keys_, default_value_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function lookup_table_find(table_handle_, keys_, default_value_; name=nothing) + if tf.in_eager_mode() + lookup_table_find_eager(table_handle_, keys_, default_value_; name=name) + else + lookup_table_find_graph(table_handle_, keys_, default_value_; name=name) + end end - end end @@ -10749,29 +10749,29 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function shuffle_and_repeat_dataset_graph(input_dataset_, buffer_size_, seed_, seed2_, count_; name=nothing, output_types=nothing, output_shapes=nothing) - local desc - tf.with_op_name(name, "ShuffleAndRepeatDataset") do - desc = tf.NodeDescription("ShuffleAndRepeatDataset") - input_dataset_ = convert(Tensor{Any}, input_dataset_) - buffer_size_ = convert(Tensor{Int64}, buffer_size_) - seed_ = convert(Tensor{Int64}, seed_) - seed2_ = convert(Tensor{Int64}, seed2_) - count_ = convert(Tensor{Int64}, count_) - tf.add_input(desc, input_dataset_) - tf.add_input(desc, buffer_size_) - tf.add_input(desc, seed_) - tf.add_input(desc, seed2_) - tf.add_input(desc, count_) - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end + function shuffle_and_repeat_dataset_graph(input_dataset_, buffer_size_, seed_, seed2_, count_; name=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "ShuffleAndRepeatDataset") do + desc = tf.NodeDescription("ShuffleAndRepeatDataset") + input_dataset_ = convert(Tensor{Any}, input_dataset_) + buffer_size_ = convert(Tensor{Int64}, buffer_size_) + seed_ = convert(Tensor{Int64}, seed_) + seed2_ = convert(Tensor{Int64}, seed2_) + count_ = convert(Tensor{Int64}, count_) + tf.add_input(desc, input_dataset_) + tf.add_input(desc, buffer_size_) + tf.add_input(desc, seed_) + tf.add_input(desc, seed2_) + tf.add_input(desc, count_) + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function shuffle_and_repeat_dataset_eager(input_dataset_, buffer_size_, seed_, seed2_, count_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("ShuffleAndRepeatDataset") input_dataset_ = convert(tf.EagerTensor, input_dataset_) @@ -10797,13 +10797,13 @@ begin return res[1] end end - function shuffle_and_repeat_dataset(input_dataset_, buffer_size_, seed_, seed2_, count_; name=nothing, output_types=nothing, output_shapes=nothing) - if tf.in_eager_mode() - shuffle_and_repeat_dataset_eager(input_dataset_, buffer_size_, seed_, seed2_, count_; name=name, output_types=output_types, output_shapes=output_shapes) - else - shuffle_and_repeat_dataset_graph(input_dataset_, buffer_size_, seed_, seed2_, count_; name=name, output_types=output_types, output_shapes=output_shapes) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function shuffle_and_repeat_dataset(input_dataset_, buffer_size_, seed_, seed2_, count_; name=nothing, output_types=nothing, output_shapes=nothing) + if tf.in_eager_mode() + shuffle_and_repeat_dataset_eager(input_dataset_, buffer_size_, seed_, seed2_, count_; name=name, output_types=output_types, output_shapes=output_shapes) + else + shuffle_and_repeat_dataset_graph(input_dataset_, buffer_size_, seed_, seed2_, count_; name=name, output_types=output_types, output_shapes=output_shapes) + end end - end end @@ -10813,21 +10813,21 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function experimental_unbatch_dataset_graph(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) - local desc - tf.with_op_name(name, "ExperimentalUnbatchDataset") do - desc = tf.NodeDescription("ExperimentalUnbatchDataset") - input_dataset_ = convert(Tensor{Any}, input_dataset_) - tf.add_input(desc, input_dataset_) - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end + function experimental_unbatch_dataset_graph(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "ExperimentalUnbatchDataset") do + desc = tf.NodeDescription("ExperimentalUnbatchDataset") + input_dataset_ = convert(Tensor{Any}, input_dataset_) + tf.add_input(desc, input_dataset_) + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function experimental_unbatch_dataset_eager(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("ExperimentalUnbatchDataset") input_dataset_ = convert(tf.EagerTensor, input_dataset_) @@ -10845,46 +10845,46 @@ begin return res[1] end end - function experimental_unbatch_dataset(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) - if tf.in_eager_mode() - experimental_unbatch_dataset_eager(input_dataset_; name=name, output_types=output_types, output_shapes=output_shapes) - else - experimental_unbatch_dataset_graph(input_dataset_; name=name, output_types=output_types, output_shapes=output_shapes) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_unbatch_dataset(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) + if tf.in_eager_mode() + experimental_unbatch_dataset_eager(input_dataset_; name=name, output_types=output_types, output_shapes=output_shapes) + else + experimental_unbatch_dataset_graph(input_dataset_; name=name, output_types=output_types, output_shapes=output_shapes) + end end - end end """ - avg_pool3d_grad(orig_input_shape, grad; data_format=NDHWC) + avg_pool3d_grad(orig_input_shape, grad; data_format=) """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function avg_pool3d_grad_graph(orig_input_shape_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) - local desc - tf.with_op_name(name, "AvgPool3DGrad") do - desc = tf.NodeDescription("AvgPool3DGrad") - orig_input_shape_ = convert(Tensor{Int32}, orig_input_shape_) - grad_ = convert(Tensor{Any}, grad_) - (grad_,) = tf.tf_promote(grad_) - tf.add_input(desc, orig_input_shape_) - tf.add_input(desc, grad_) - if ksize !== nothing - desc["ksize"] = map(Base.identity, ksize) - end - if strides !== nothing - desc["strides"] = map(Base.identity, strides) - end - if padding !== nothing - desc["padding"] = Base.String(padding) - end - if data_format !== nothing - desc["data_format"] = Base.String(data_format) - end + function avg_pool3d_grad_graph(orig_input_shape_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + local desc + tf.with_op_name(name, "AvgPool3DGrad") do + desc = tf.NodeDescription("AvgPool3DGrad") + orig_input_shape_ = convert(Tensor{Int32}, orig_input_shape_) + grad_ = convert(Tensor{Any}, grad_) + (grad_,) = tf.tf_promote(grad_) + tf.add_input(desc, orig_input_shape_) + tf.add_input(desc, grad_) + if ksize !== nothing + desc["ksize"] = map(Base.identity, ksize) + end + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + if padding !== nothing + desc["padding"] = Base.String(padding) + end + if data_format !== nothing + desc["data_format"] = Base.String(data_format) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function avg_pool3d_grad_eager(orig_input_shape_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) desc = tf.EagerOp("AvgPool3DGrad") orig_input_shape_ = convert(tf.EagerTensor, orig_input_shape_) @@ -10911,13 +10911,13 @@ begin return res[1] end end - function avg_pool3d_grad(orig_input_shape_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) - if tf.in_eager_mode() - avg_pool3d_grad_eager(orig_input_shape_, grad_; name=name, ksize=ksize, strides=strides, padding=padding, data_format=data_format) - else - avg_pool3d_grad_graph(orig_input_shape_, grad_; name=name, ksize=ksize, strides=strides, padding=padding, data_format=data_format) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function avg_pool3d_grad(orig_input_shape_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + if tf.in_eager_mode() + avg_pool3d_grad_eager(orig_input_shape_, grad_; name=name, ksize=ksize, strides=strides, padding=padding, data_format=data_format) + else + avg_pool3d_grad_graph(orig_input_shape_, grad_; name=name, ksize=ksize, strides=strides, padding=padding, data_format=data_format) + end end - end end @@ -10927,22 +10927,22 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function placeholder_with_default_graph(input_; name=nothing, dtype=nothing, shape=nothing) - local desc - tf.with_op_name(name, "PlaceholderWithDefault") do - desc = tf.NodeDescription("PlaceholderWithDefault") - input_ = convert(Tensor{Any}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end - if shape !== nothing - desc["shape"] = Base.identity(shape) - end + function placeholder_with_default_graph(input_; name=nothing, dtype=nothing, shape=nothing) + local desc + tf.with_op_name(name, "PlaceholderWithDefault") do + desc = tf.NodeDescription("PlaceholderWithDefault") + input_ = convert(Tensor{Any}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + if shape !== nothing + desc["shape"] = Base.identity(shape) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function placeholder_with_default_eager(input_; name=nothing, dtype=nothing, shape=nothing) desc = tf.EagerOp("PlaceholderWithDefault") input_ = convert(tf.EagerTensor, input_) @@ -10961,13 +10961,13 @@ begin return res[1] end end - function placeholder_with_default(input_; name=nothing, dtype=nothing, shape=nothing) - if tf.in_eager_mode() - placeholder_with_default_eager(input_; name=name, dtype=dtype, shape=shape) - else - placeholder_with_default_graph(input_; name=name, dtype=dtype, shape=shape) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function placeholder_with_default(input_; name=nothing, dtype=nothing, shape=nothing) + if tf.in_eager_mode() + placeholder_with_default_eager(input_; name=name, dtype=dtype, shape=shape) + else + placeholder_with_default_graph(input_; name=name, dtype=dtype, shape=shape) + end end - end end @@ -10977,21 +10977,21 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function initialize_table_v2_graph(table_handle_, keys_, values_; name=nothing) - local desc - tf.with_op_name(name, "InitializeTableV2") do - desc = tf.NodeDescription("InitializeTableV2") - table_handle_ = convert(Tensor{Any}, table_handle_) - keys_ = convert(Tensor{Any}, keys_) - values_ = convert(Tensor{Any}, values_) - (values_,) = tf.tf_promote(values_) - (keys_,) = tf.tf_promote(keys_) - tf.add_input(desc, table_handle_) - tf.add_input(desc, keys_) - tf.add_input(desc, values_) - end - tf.Tensor(tf.Operation(desc)) + function initialize_table_v2_graph(table_handle_, keys_, values_; name=nothing) + local desc + tf.with_op_name(name, "InitializeTableV2") do + desc = tf.NodeDescription("InitializeTableV2") + table_handle_ = convert(Tensor{Any}, table_handle_) + keys_ = convert(Tensor{Any}, keys_) + values_ = convert(Tensor{Any}, values_) + (values_,) = tf.tf_promote(values_) + (keys_,) = tf.tf_promote(keys_) + tf.add_input(desc, table_handle_) + tf.add_input(desc, keys_) + tf.add_input(desc, values_) end + tf.Tensor(tf.Operation(desc)) + end function initialize_table_v2_eager(table_handle_, keys_, values_; name=nothing) desc = tf.EagerOp("InitializeTableV2") table_handle_ = convert(tf.EagerTensor, table_handle_) @@ -11009,13 +11009,13 @@ begin return res[1] end end - function initialize_table_v2(table_handle_, keys_, values_; name=nothing) - if tf.in_eager_mode() - initialize_table_v2_eager(table_handle_, keys_, values_; name=name) - else - initialize_table_v2_graph(table_handle_, keys_, values_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function initialize_table_v2(table_handle_, keys_, values_; name=nothing) + if tf.in_eager_mode() + initialize_table_v2_eager(table_handle_, keys_, values_; name=name) + else + initialize_table_v2_graph(table_handle_, keys_, values_; name=name) + end end - end end @@ -11025,23 +11025,23 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function set_size_graph(set_indices_, set_values_, set_shape_; name=nothing, validate_indices=nothing) - local desc - tf.with_op_name(name, "SetSize") do - desc = tf.NodeDescription("SetSize") - set_indices_ = convert(Tensor{Int64}, set_indices_) - set_values_ = convert(Tensor{Any}, set_values_) - set_shape_ = convert(Tensor{Int64}, set_shape_) - (set_values_,) = tf.tf_promote(set_values_) - tf.add_input(desc, set_indices_) - tf.add_input(desc, set_values_) - tf.add_input(desc, set_shape_) - if validate_indices !== nothing - desc["validate_indices"] = Base.Bool(validate_indices) - end + function set_size_graph(set_indices_, set_values_, set_shape_; name=nothing, validate_indices=nothing) + local desc + tf.with_op_name(name, "SetSize") do + desc = tf.NodeDescription("SetSize") + set_indices_ = convert(Tensor{Int64}, set_indices_) + set_values_ = convert(Tensor{Any}, set_values_) + set_shape_ = convert(Tensor{Int64}, set_shape_) + (set_values_,) = tf.tf_promote(set_values_) + tf.add_input(desc, set_indices_) + tf.add_input(desc, set_values_) + tf.add_input(desc, set_shape_) + if validate_indices !== nothing + desc["validate_indices"] = Base.Bool(validate_indices) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function set_size_eager(set_indices_, set_values_, set_shape_; name=nothing, validate_indices=nothing) desc = tf.EagerOp("SetSize") set_indices_ = convert(tf.EagerTensor, set_indices_) @@ -11061,13 +11061,13 @@ begin return res[1] end end - function set_size(set_indices_, set_values_, set_shape_; name=nothing, validate_indices=nothing) - if tf.in_eager_mode() - set_size_eager(set_indices_, set_values_, set_shape_; name=name, validate_indices=validate_indices) - else - set_size_graph(set_indices_, set_values_, set_shape_; name=name, validate_indices=validate_indices) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function set_size(set_indices_, set_values_, set_shape_; name=nothing, validate_indices=nothing) + if tf.in_eager_mode() + set_size_eager(set_indices_, set_values_, set_shape_; name=name, validate_indices=validate_indices) + else + set_size_graph(set_indices_, set_values_, set_shape_; name=name, validate_indices=validate_indices) + end end - end end @@ -11077,23 +11077,23 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function assert_graph(condition_, data_; name=nothing, T=nothing, summarize=nothing) - local desc - tf.with_op_name(name, "Assert") do - desc = tf.NodeDescription("Assert") - condition_ = convert(Tensor{Bool}, condition_) - data_ = [convert(Tensor{Any}, x) for x = data_] - tf.add_input(desc, condition_) - tf.add_input(desc, data_) - if T !== nothing - desc["T"] = map(Base.identity, T) - end - if summarize !== nothing - desc["summarize"] = Base.Int(summarize) - end + function assert_graph(condition_, data_; name=nothing, T=nothing, summarize=nothing) + local desc + tf.with_op_name(name, "Assert") do + desc = tf.NodeDescription("Assert") + condition_ = convert(Tensor{Bool}, condition_) + data_ = [convert(Tensor{Any}, x) for x = data_] + tf.add_input(desc, condition_) + tf.add_input(desc, data_) + if T !== nothing + desc["T"] = map(Base.identity, T) + end + if summarize !== nothing + desc["summarize"] = Base.Int(summarize) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function assert_eager(condition_, data_; name=nothing, T=nothing, summarize=nothing) desc = tf.EagerOp("Assert") condition_ = convert(tf.EagerTensor, condition_) @@ -11113,13 +11113,13 @@ begin return res[1] end end - function assert(condition_, data_; name=nothing, T=nothing, summarize=nothing) - if tf.in_eager_mode() - assert_eager(condition_, data_; name=name, T=T, summarize=summarize) - else - assert_graph(condition_, data_; name=name, T=T, summarize=summarize) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function assert(condition_, data_; name=nothing, T=nothing, summarize=nothing) + if tf.in_eager_mode() + assert_eager(condition_, data_; name=name, T=T, summarize=summarize) + else + assert_graph(condition_, data_; name=name, T=T, summarize=summarize) + end end - end end @@ -11129,32 +11129,32 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function non_max_suppression_v4_graph(boxes_, scores_, max_output_size_, iou_threshold_, score_threshold_; name=nothing, pad_to_max_output_size=nothing) - local desc - tf.with_op_name(name, "NonMaxSuppressionV4") do - desc = tf.NodeDescription("NonMaxSuppressionV4") - boxes_ = convert(Tensor{Float32}, boxes_) - scores_ = convert(Tensor{Float32}, scores_) - max_output_size_ = convert(Tensor{Int32}, max_output_size_) - iou_threshold_ = convert(Tensor{Float32}, iou_threshold_) - score_threshold_ = convert(Tensor{Float32}, score_threshold_) - (boxes_, scores_) = tf.tf_promote(boxes_, scores_) - tf.add_input(desc, boxes_) - tf.add_input(desc, scores_) - tf.add_input(desc, max_output_size_) - tf.add_input(desc, iou_threshold_) - tf.add_input(desc, score_threshold_) - if pad_to_max_output_size !== nothing - desc["pad_to_max_output_size"] = Base.Bool(pad_to_max_output_size) - end + function non_max_suppression_v4_graph(boxes_, scores_, max_output_size_, iou_threshold_, score_threshold_; name=nothing, pad_to_max_output_size=nothing) + local desc + tf.with_op_name(name, "NonMaxSuppressionV4") do + desc = tf.NodeDescription("NonMaxSuppressionV4") + boxes_ = convert(Tensor{Float32}, boxes_) + scores_ = convert(Tensor{Float32}, scores_) + max_output_size_ = convert(Tensor{Int32}, max_output_size_) + iou_threshold_ = convert(Tensor{Float32}, iou_threshold_) + score_threshold_ = convert(Tensor{Float32}, score_threshold_) + (boxes_, scores_) = tf.tf_promote(boxes_, scores_) + tf.add_input(desc, boxes_) + tf.add_input(desc, scores_) + tf.add_input(desc, max_output_size_) + tf.add_input(desc, iou_threshold_) + tf.add_input(desc, score_threshold_) + if pad_to_max_output_size !== nothing + desc["pad_to_max_output_size"] = Base.Bool(pad_to_max_output_size) end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:2 - push!(out, tf.Tensor(op, out_idx)) - end - out end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end function non_max_suppression_v4_eager(boxes_, scores_, max_output_size_, iou_threshold_, score_threshold_; name=nothing, pad_to_max_output_size=nothing) desc = tf.EagerOp("NonMaxSuppressionV4") boxes_ = convert(tf.EagerTensor, boxes_) @@ -11179,13 +11179,13 @@ begin return res end end - function non_max_suppression_v4(boxes_, scores_, max_output_size_, iou_threshold_, score_threshold_; name=nothing, pad_to_max_output_size=nothing) - if tf.in_eager_mode() - non_max_suppression_v4_eager(boxes_, scores_, max_output_size_, iou_threshold_, score_threshold_; name=name, pad_to_max_output_size=pad_to_max_output_size) - else - non_max_suppression_v4_graph(boxes_, scores_, max_output_size_, iou_threshold_, score_threshold_; name=name, pad_to_max_output_size=pad_to_max_output_size) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function non_max_suppression_v4(boxes_, scores_, max_output_size_, iou_threshold_, score_threshold_; name=nothing, pad_to_max_output_size=nothing) + if tf.in_eager_mode() + non_max_suppression_v4_eager(boxes_, scores_, max_output_size_, iou_threshold_, score_threshold_; name=name, pad_to_max_output_size=pad_to_max_output_size) + else + non_max_suppression_v4_graph(boxes_, scores_, max_output_size_, iou_threshold_, score_threshold_; name=name, pad_to_max_output_size=pad_to_max_output_size) + end end - end end @@ -11195,43 +11195,43 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function sample_distorted_bounding_box_v2_graph(image_size_, bounding_boxes_, min_object_covered_; name=nothing, seed=nothing, seed2=nothing, aspect_ratio_range=nothing, area_range=nothing, max_attempts=nothing, use_image_if_no_bounding_boxes=nothing) - local desc - tf.with_op_name(name, "SampleDistortedBoundingBoxV2") do - desc = tf.NodeDescription("SampleDistortedBoundingBoxV2") - image_size_ = convert(Tensor{Any}, image_size_) - bounding_boxes_ = convert(Tensor{Float32}, bounding_boxes_) - min_object_covered_ = convert(Tensor{Float32}, min_object_covered_) - (image_size_,) = tf.tf_promote(image_size_) - tf.add_input(desc, image_size_) - tf.add_input(desc, bounding_boxes_) - tf.add_input(desc, min_object_covered_) - if seed !== nothing - desc["seed"] = Base.Int(seed) - end - if seed2 !== nothing - desc["seed2"] = Base.Int(seed2) - end - if aspect_ratio_range !== nothing - desc["aspect_ratio_range"] = map(Base.identity, aspect_ratio_range) - end - if area_range !== nothing - desc["area_range"] = map(Base.identity, area_range) - end - if max_attempts !== nothing - desc["max_attempts"] = Base.Int(max_attempts) - end - if use_image_if_no_bounding_boxes !== nothing - desc["use_image_if_no_bounding_boxes"] = Base.Bool(use_image_if_no_bounding_boxes) - end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:3 - push!(out, tf.Tensor(op, out_idx)) - end - out + function sample_distorted_bounding_box_v2_graph(image_size_, bounding_boxes_, min_object_covered_; name=nothing, seed=nothing, seed2=nothing, aspect_ratio_range=nothing, area_range=nothing, max_attempts=nothing, use_image_if_no_bounding_boxes=nothing) + local desc + tf.with_op_name(name, "SampleDistortedBoundingBoxV2") do + desc = tf.NodeDescription("SampleDistortedBoundingBoxV2") + image_size_ = convert(Tensor{Any}, image_size_) + bounding_boxes_ = convert(Tensor{Float32}, bounding_boxes_) + min_object_covered_ = convert(Tensor{Float32}, min_object_covered_) + (image_size_,) = tf.tf_promote(image_size_) + tf.add_input(desc, image_size_) + tf.add_input(desc, bounding_boxes_) + tf.add_input(desc, min_object_covered_) + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end + if aspect_ratio_range !== nothing + desc["aspect_ratio_range"] = map(Base.identity, aspect_ratio_range) + end + if area_range !== nothing + desc["area_range"] = map(Base.identity, area_range) + end + if max_attempts !== nothing + desc["max_attempts"] = Base.Int(max_attempts) + end + if use_image_if_no_bounding_boxes !== nothing + desc["use_image_if_no_bounding_boxes"] = Base.Bool(use_image_if_no_bounding_boxes) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) end + out + end function sample_distorted_bounding_box_v2_eager(image_size_, bounding_boxes_, min_object_covered_; name=nothing, seed=nothing, seed2=nothing, aspect_ratio_range=nothing, area_range=nothing, max_attempts=nothing, use_image_if_no_bounding_boxes=nothing) desc = tf.EagerOp("SampleDistortedBoundingBoxV2") image_size_ = convert(tf.EagerTensor, image_size_) @@ -11266,45 +11266,45 @@ begin return res end end - function sample_distorted_bounding_box_v2(image_size_, bounding_boxes_, min_object_covered_; name=nothing, seed=nothing, seed2=nothing, aspect_ratio_range=nothing, area_range=nothing, max_attempts=nothing, use_image_if_no_bounding_boxes=nothing) - if tf.in_eager_mode() - sample_distorted_bounding_box_v2_eager(image_size_, bounding_boxes_, min_object_covered_; name=name, seed=seed, seed2=seed2, aspect_ratio_range=aspect_ratio_range, area_range=area_range, max_attempts=max_attempts, use_image_if_no_bounding_boxes=use_image_if_no_bounding_boxes) - else - sample_distorted_bounding_box_v2_graph(image_size_, bounding_boxes_, min_object_covered_; name=name, seed=seed, seed2=seed2, aspect_ratio_range=aspect_ratio_range, area_range=area_range, max_attempts=max_attempts, use_image_if_no_bounding_boxes=use_image_if_no_bounding_boxes) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sample_distorted_bounding_box_v2(image_size_, bounding_boxes_, min_object_covered_; name=nothing, seed=nothing, seed2=nothing, aspect_ratio_range=nothing, area_range=nothing, max_attempts=nothing, use_image_if_no_bounding_boxes=nothing) + if tf.in_eager_mode() + sample_distorted_bounding_box_v2_eager(image_size_, bounding_boxes_, min_object_covered_; name=name, seed=seed, seed2=seed2, aspect_ratio_range=aspect_ratio_range, area_range=area_range, max_attempts=max_attempts, use_image_if_no_bounding_boxes=use_image_if_no_bounding_boxes) + else + sample_distorted_bounding_box_v2_graph(image_size_, bounding_boxes_, min_object_covered_; name=name, seed=seed, seed2=seed2, aspect_ratio_range=aspect_ratio_range, area_range=area_range, max_attempts=max_attempts, use_image_if_no_bounding_boxes=use_image_if_no_bounding_boxes) + end end - end end """ - initialize_table_from_text_file(table_handle, filename; vocab_size=-1, delimiter= ) + initialize_table_from_text_file(table_handle, filename; vocab_size=-1, delimiter=) """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function initialize_table_from_text_file_graph(table_handle_, filename_; name=nothing, key_index=nothing, value_index=nothing, vocab_size=nothing, delimiter=nothing) - local desc - tf.with_op_name(name, "InitializeTableFromTextFile") do - desc = tf.NodeDescription("InitializeTableFromTextFile") - table_handle_ = convert(Tensor{String}, table_handle_) - filename_ = convert(Tensor{String}, filename_) - tf.add_input(desc, table_handle_) - tf.add_input(desc, filename_) - if key_index !== nothing - desc["key_index"] = Base.Int(key_index) - end - if value_index !== nothing - desc["value_index"] = Base.Int(value_index) - end - if vocab_size !== nothing - desc["vocab_size"] = Base.Int(vocab_size) - end - if delimiter !== nothing - desc["delimiter"] = Base.String(delimiter) - end + function initialize_table_from_text_file_graph(table_handle_, filename_; name=nothing, key_index=nothing, value_index=nothing, vocab_size=nothing, delimiter=nothing) + local desc + tf.with_op_name(name, "InitializeTableFromTextFile") do + desc = tf.NodeDescription("InitializeTableFromTextFile") + table_handle_ = convert(Tensor{String}, table_handle_) + filename_ = convert(Tensor{String}, filename_) + tf.add_input(desc, table_handle_) + tf.add_input(desc, filename_) + if key_index !== nothing + desc["key_index"] = Base.Int(key_index) + end + if value_index !== nothing + desc["value_index"] = Base.Int(value_index) + end + if vocab_size !== nothing + desc["vocab_size"] = Base.Int(vocab_size) + end + if delimiter !== nothing + desc["delimiter"] = Base.String(delimiter) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function initialize_table_from_text_file_eager(table_handle_, filename_; name=nothing, key_index=nothing, value_index=nothing, vocab_size=nothing, delimiter=nothing) desc = tf.EagerOp("InitializeTableFromTextFile") table_handle_ = convert(tf.EagerTensor, table_handle_) @@ -11330,13 +11330,13 @@ begin return res[1] end end - function initialize_table_from_text_file(table_handle_, filename_; name=nothing, key_index=nothing, value_index=nothing, vocab_size=nothing, delimiter=nothing) - if tf.in_eager_mode() - initialize_table_from_text_file_eager(table_handle_, filename_; name=name, key_index=key_index, value_index=value_index, vocab_size=vocab_size, delimiter=delimiter) - else - initialize_table_from_text_file_graph(table_handle_, filename_; name=name, key_index=key_index, value_index=value_index, vocab_size=vocab_size, delimiter=delimiter) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function initialize_table_from_text_file(table_handle_, filename_; name=nothing, key_index=nothing, value_index=nothing, vocab_size=nothing, delimiter=nothing) + if tf.in_eager_mode() + initialize_table_from_text_file_eager(table_handle_, filename_; name=name, key_index=key_index, value_index=value_index, vocab_size=vocab_size, delimiter=delimiter) + else + initialize_table_from_text_file_graph(table_handle_, filename_; name=name, key_index=key_index, value_index=value_index, vocab_size=vocab_size, delimiter=delimiter) + end end - end end @@ -11346,15 +11346,15 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function lookup_table_size_graph(table_handle_; name=nothing) - local desc - tf.with_op_name(name, "LookupTableSize") do - desc = tf.NodeDescription("LookupTableSize") - table_handle_ = convert(Tensor{String}, table_handle_) - tf.add_input(desc, table_handle_) - end - tf.Tensor(tf.Operation(desc)) + function lookup_table_size_graph(table_handle_; name=nothing) + local desc + tf.with_op_name(name, "LookupTableSize") do + desc = tf.NodeDescription("LookupTableSize") + table_handle_ = convert(Tensor{String}, table_handle_) + tf.add_input(desc, table_handle_) end + tf.Tensor(tf.Operation(desc)) + end function lookup_table_size_eager(table_handle_; name=nothing) desc = tf.EagerOp("LookupTableSize") table_handle_ = convert(tf.EagerTensor, table_handle_) @@ -11366,13 +11366,13 @@ begin return res[1] end end - function lookup_table_size(table_handle_; name=nothing) - if tf.in_eager_mode() - lookup_table_size_eager(table_handle_; name=name) - else - lookup_table_size_graph(table_handle_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function lookup_table_size(table_handle_; name=nothing) + if tf.in_eager_mode() + lookup_table_size_eager(table_handle_; name=name) + else + lookup_table_size_graph(table_handle_; name=name) + end end - end end @@ -11382,37 +11382,37 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function sparse_apply_adagrad_da_graph(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, indices_, lr_, l1_, l2_, global_step_; name=nothing, use_locking=nothing) - local desc - tf.with_op_name(name, "SparseApplyAdagradDA") do - desc = tf.NodeDescription("SparseApplyAdagradDA") - var_ = convert(Tensor{Any}, var_) - gradient_accumulator_ = convert(Tensor{Any}, gradient_accumulator_) - gradient_squared_accumulator_ = convert(Tensor{Any}, gradient_squared_accumulator_) - grad_ = convert(Tensor{Any}, grad_) - indices_ = convert(Tensor{Any}, indices_) - indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) - lr_ = convert(Tensor{Any}, lr_) - l1_ = convert(Tensor{Any}, l1_) - l2_ = convert(Tensor{Any}, l2_) - global_step_ = convert(Tensor{Int64}, global_step_) - (var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, lr_, l1_, l2_) = tf.tf_promote(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, lr_, l1_, l2_) - (indices_,) = tf.tf_promote(indices_) - tf.add_input(desc, var_) - tf.add_input(desc, gradient_accumulator_) - tf.add_input(desc, gradient_squared_accumulator_) - tf.add_input(desc, grad_) - tf.add_input(desc, indices_) - tf.add_input(desc, lr_) - tf.add_input(desc, l1_) - tf.add_input(desc, l2_) - tf.add_input(desc, global_step_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - end - tf.Tensor(tf.Operation(desc)) - end + function sparse_apply_adagrad_da_graph(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, indices_, lr_, l1_, l2_, global_step_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "SparseApplyAdagradDA") do + desc = tf.NodeDescription("SparseApplyAdagradDA") + var_ = convert(Tensor{Any}, var_) + gradient_accumulator_ = convert(Tensor{Any}, gradient_accumulator_) + gradient_squared_accumulator_ = convert(Tensor{Any}, gradient_squared_accumulator_) + grad_ = convert(Tensor{Any}, grad_) + indices_ = convert(Tensor{Any}, indices_) + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + lr_ = convert(Tensor{Any}, lr_) + l1_ = convert(Tensor{Any}, l1_) + l2_ = convert(Tensor{Any}, l2_) + global_step_ = convert(Tensor{Int64}, global_step_) + (var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, lr_, l1_, l2_) = tf.tf_promote(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, lr_, l1_, l2_) + (indices_,) = tf.tf_promote(indices_) + tf.add_input(desc, var_) + tf.add_input(desc, gradient_accumulator_) + tf.add_input(desc, gradient_squared_accumulator_) + tf.add_input(desc, grad_) + tf.add_input(desc, indices_) + tf.add_input(desc, lr_) + tf.add_input(desc, l1_) + tf.add_input(desc, l2_) + tf.add_input(desc, global_step_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + tf.Tensor(tf.Operation(desc)) + end function sparse_apply_adagrad_da_eager(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, indices_, lr_, l1_, l2_, global_step_; name=nothing, use_locking=nothing) desc = tf.EagerOp("SparseApplyAdagradDA") var_ = convert(tf.EagerTensor, var_) @@ -11451,13 +11451,13 @@ begin return res[1] end end - function sparse_apply_adagrad_da(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, indices_, lr_, l1_, l2_, global_step_; name=nothing, use_locking=nothing) - if tf.in_eager_mode() - sparse_apply_adagrad_da_eager(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, indices_, lr_, l1_, l2_, global_step_; name=name, use_locking=use_locking) - else - sparse_apply_adagrad_da_graph(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, indices_, lr_, l1_, l2_, global_step_; name=name, use_locking=use_locking) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_apply_adagrad_da(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, indices_, lr_, l1_, l2_, global_step_; name=nothing, use_locking=nothing) + if tf.in_eager_mode() + sparse_apply_adagrad_da_eager(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, indices_, lr_, l1_, l2_, global_step_; name=name, use_locking=use_locking) + else + sparse_apply_adagrad_da_graph(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, indices_, lr_, l1_, l2_, global_step_; name=name, use_locking=use_locking) + end end - end end @@ -11467,23 +11467,23 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function broadcast_gradient_args_graph(s0_, s1_; name=nothing) - local desc - tf.with_op_name(name, "BroadcastGradientArgs") do - desc = tf.NodeDescription("BroadcastGradientArgs") - s0_ = convert(Tensor{Int32}, s0_) - s1_ = convert(Tensor{Int32}, s1_) - (s0_, s1_) = tf.tf_promote(s0_, s1_) - tf.add_input(desc, s0_) - tf.add_input(desc, s1_) - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:2 - push!(out, tf.Tensor(op, out_idx)) - end - out + function broadcast_gradient_args_graph(s0_, s1_; name=nothing) + local desc + tf.with_op_name(name, "BroadcastGradientArgs") do + desc = tf.NodeDescription("BroadcastGradientArgs") + s0_ = convert(Tensor{Int32}, s0_) + s1_ = convert(Tensor{Int32}, s1_) + (s0_, s1_) = tf.tf_promote(s0_, s1_) + tf.add_input(desc, s0_) + tf.add_input(desc, s1_) end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end function broadcast_gradient_args_eager(s0_, s1_; name=nothing) desc = tf.EagerOp("BroadcastGradientArgs") s0_ = convert(tf.EagerTensor, s0_) @@ -11499,13 +11499,13 @@ begin return res end end - function broadcast_gradient_args(s0_, s1_; name=nothing) - if tf.in_eager_mode() - broadcast_gradient_args_eager(s0_, s1_; name=name) - else - broadcast_gradient_args_graph(s0_, s1_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function broadcast_gradient_args(s0_, s1_; name=nothing) + if tf.in_eager_mode() + broadcast_gradient_args_eager(s0_, s1_; name=name) + else + broadcast_gradient_args_graph(s0_, s1_; name=name) + end end - end end @@ -11515,19 +11515,19 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function summary_writer_graph(; name=nothing, shared_name=nothing, container=nothing) - local desc - tf.with_op_name(name, "SummaryWriter") do - desc = tf.NodeDescription("SummaryWriter") - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - if container !== nothing - desc["container"] = Base.String(container) - end + function summary_writer_graph(; name=nothing, shared_name=nothing, container=nothing) + local desc + tf.with_op_name(name, "SummaryWriter") do + desc = tf.NodeDescription("SummaryWriter") + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + if container !== nothing + desc["container"] = Base.String(container) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function summary_writer_eager(; name=nothing, shared_name=nothing, container=nothing) desc = tf.EagerOp("SummaryWriter") if shared_name !== nothing @@ -11543,13 +11543,13 @@ begin return res[1] end end - function summary_writer(; name=nothing, shared_name=nothing, container=nothing) - if tf.in_eager_mode() - summary_writer_eager(; name=name, shared_name=shared_name, container=container) - else - summary_writer_graph(; name=name, shared_name=shared_name, container=container) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function summary_writer(; name=nothing, shared_name=nothing, container=nothing) + if tf.in_eager_mode() + summary_writer_eager(; name=name, shared_name=shared_name, container=container) + else + summary_writer_graph(; name=name, shared_name=shared_name, container=container) + end end - end end @@ -11559,24 +11559,24 @@ end output = input; While (Cond(output)) { output = Body(output) } """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function _while_graph(input_; name=nothing, T=nothing, cond=nothing, body=nothing) - local desc - tf.with_op_name(name, "_While") do - desc = tf.NodeDescription("_While") - input_ = [convert(Tensor{Any}, x) for x = input_] - tf.add_input(desc, input_) - if T !== nothing - desc["T"] = map(Base.identity, T) - end - if cond !== nothing - desc["cond"] = Base.identity(cond) - end - if body !== nothing - desc["body"] = Base.identity(body) - end - end - tf.Tensor(tf.Operation(desc)) + function _while_graph(input_; name=nothing, T=nothing, cond=nothing, body=nothing) + local desc + tf.with_op_name(name, "_While") do + desc = tf.NodeDescription("_While") + input_ = [convert(Tensor{Any}, x) for x = input_] + tf.add_input(desc, input_) + if T !== nothing + desc["T"] = map(Base.identity, T) + end + if cond !== nothing + desc["cond"] = Base.identity(cond) + end + if body !== nothing + desc["body"] = Base.identity(body) + end end + tf.Tensor(tf.Operation(desc)) + end function _while_eager(input_; name=nothing, T=nothing, cond=nothing, body=nothing) desc = tf.EagerOp("_While") input_ = convert(tf.EagerTensor, input_) @@ -11597,13 +11597,13 @@ begin return res[1] end end - function _while(input_; name=nothing, T=nothing, cond=nothing, body=nothing) - if tf.in_eager_mode() - _while_eager(input_; name=name, T=T, cond=cond, body=body) - else - _while_graph(input_; name=name, T=T, cond=cond, body=body) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _while(input_; name=nothing, T=nothing, cond=nothing, body=nothing) + if tf.in_eager_mode() + _while_eager(input_; name=name, T=T, cond=cond, body=body) + else + _while_graph(input_; name=name, T=T, cond=cond, body=body) + end end - end end @@ -11613,24 +11613,24 @@ end An op that receives embedding activations on the TPU. """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function recv_tpu_embedding_activations_graph(; name=nothing, num_outputs=nothing, config=nothing) - local desc - tf.with_op_name(name, "RecvTPUEmbeddingActivations") do - desc = tf.NodeDescription("RecvTPUEmbeddingActivations") - if num_outputs !== nothing - desc["num_outputs"] = Base.Int(num_outputs) - end - if config !== nothing - desc["config"] = Base.String(config) - end + function recv_tpu_embedding_activations_graph(; name=nothing, num_outputs=nothing, config=nothing) + local desc + tf.with_op_name(name, "RecvTPUEmbeddingActivations") do + desc = tf.NodeDescription("RecvTPUEmbeddingActivations") + if num_outputs !== nothing + desc["num_outputs"] = Base.Int(num_outputs) end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:num_outputs - push!(out, tf.Tensor(op, out_idx)) + if config !== nothing + desc["config"] = Base.String(config) end - out end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:num_outputs + push!(out, tf.Tensor(op, out_idx)) + end + out + end function recv_tpu_embedding_activations_eager(; name=nothing, num_outputs=nothing, config=nothing) desc = tf.EagerOp("RecvTPUEmbeddingActivations") if num_outputs !== nothing @@ -11646,13 +11646,13 @@ begin return res end end - function recv_tpu_embedding_activations(; name=nothing, num_outputs=nothing, config=nothing) - if tf.in_eager_mode() - recv_tpu_embedding_activations_eager(; name=name, num_outputs=num_outputs, config=config) - else - recv_tpu_embedding_activations_graph(; name=name, num_outputs=num_outputs, config=config) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function recv_tpu_embedding_activations(; name=nothing, num_outputs=nothing, config=nothing) + if tf.in_eager_mode() + recv_tpu_embedding_activations_eager(; name=name, num_outputs=num_outputs, config=config) + else + recv_tpu_embedding_activations_graph(; name=name, num_outputs=num_outputs, config=config) + end end - end end @@ -11662,21 +11662,21 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function initialize_table_graph(table_handle_, keys_, values_; name=nothing) - local desc - tf.with_op_name(name, "InitializeTable") do - desc = tf.NodeDescription("InitializeTable") - table_handle_ = convert(Tensor{String}, table_handle_) - keys_ = convert(Tensor{Any}, keys_) - values_ = convert(Tensor{Any}, values_) - (values_,) = tf.tf_promote(values_) - (keys_,) = tf.tf_promote(keys_) - tf.add_input(desc, table_handle_) - tf.add_input(desc, keys_) - tf.add_input(desc, values_) - end - tf.Tensor(tf.Operation(desc)) + function initialize_table_graph(table_handle_, keys_, values_; name=nothing) + local desc + tf.with_op_name(name, "InitializeTable") do + desc = tf.NodeDescription("InitializeTable") + table_handle_ = convert(Tensor{String}, table_handle_) + keys_ = convert(Tensor{Any}, keys_) + values_ = convert(Tensor{Any}, values_) + (values_,) = tf.tf_promote(values_) + (keys_,) = tf.tf_promote(keys_) + tf.add_input(desc, table_handle_) + tf.add_input(desc, keys_) + tf.add_input(desc, values_) end + tf.Tensor(tf.Operation(desc)) + end function initialize_table_eager(table_handle_, keys_, values_; name=nothing) desc = tf.EagerOp("InitializeTable") table_handle_ = convert(tf.EagerTensor, table_handle_) @@ -11694,13 +11694,13 @@ begin return res[1] end end - function initialize_table(table_handle_, keys_, values_; name=nothing) - if tf.in_eager_mode() - initialize_table_eager(table_handle_, keys_, values_; name=name) - else - initialize_table_graph(table_handle_, keys_, values_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function initialize_table(table_handle_, keys_, values_; name=nothing) + if tf.in_eager_mode() + initialize_table_eager(table_handle_, keys_, values_; name=name) + else + initialize_table_graph(table_handle_, keys_, values_; name=name) + end end - end end @@ -11710,37 +11710,37 @@ end Debug Numeric Summary Op. """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function debug_numeric_summary_graph(input_; name=nothing, device_name=nothing, tensor_name=nothing, debug_urls=nothing, lower_bound=nothing, upper_bound=nothing, mute_if_healthy=nothing, gated_grpc=nothing) - local desc - tf.with_op_name(name, "DebugNumericSummary") do - desc = tf.NodeDescription("DebugNumericSummary") - input_ = convert(Tensor{Any}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - if device_name !== nothing - desc["device_name"] = Base.String(device_name) - end - if tensor_name !== nothing - desc["tensor_name"] = Base.String(tensor_name) - end - if debug_urls !== nothing - desc["debug_urls"] = map(Base.identity, debug_urls) - end - if lower_bound !== nothing - desc["lower_bound"] = Base.identity(lower_bound) - end - if upper_bound !== nothing - desc["upper_bound"] = Base.identity(upper_bound) - end - if mute_if_healthy !== nothing - desc["mute_if_healthy"] = Base.Bool(mute_if_healthy) - end - if gated_grpc !== nothing - desc["gated_grpc"] = Base.Bool(gated_grpc) - end - end - tf.Tensor(tf.Operation(desc)) + function debug_numeric_summary_graph(input_; name=nothing, device_name=nothing, tensor_name=nothing, debug_urls=nothing, lower_bound=nothing, upper_bound=nothing, mute_if_healthy=nothing, gated_grpc=nothing) + local desc + tf.with_op_name(name, "DebugNumericSummary") do + desc = tf.NodeDescription("DebugNumericSummary") + input_ = convert(Tensor{Any}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + if device_name !== nothing + desc["device_name"] = Base.String(device_name) + end + if tensor_name !== nothing + desc["tensor_name"] = Base.String(tensor_name) + end + if debug_urls !== nothing + desc["debug_urls"] = map(Base.identity, debug_urls) + end + if lower_bound !== nothing + desc["lower_bound"] = Base.identity(lower_bound) + end + if upper_bound !== nothing + desc["upper_bound"] = Base.identity(upper_bound) + end + if mute_if_healthy !== nothing + desc["mute_if_healthy"] = Base.Bool(mute_if_healthy) + end + if gated_grpc !== nothing + desc["gated_grpc"] = Base.Bool(gated_grpc) + end end + tf.Tensor(tf.Operation(desc)) + end function debug_numeric_summary_eager(input_; name=nothing, device_name=nothing, tensor_name=nothing, debug_urls=nothing, lower_bound=nothing, upper_bound=nothing, mute_if_healthy=nothing, gated_grpc=nothing) desc = tf.EagerOp("DebugNumericSummary") input_ = convert(tf.EagerTensor, input_) @@ -11774,13 +11774,13 @@ begin return res[1] end end - function debug_numeric_summary(input_; name=nothing, device_name=nothing, tensor_name=nothing, debug_urls=nothing, lower_bound=nothing, upper_bound=nothing, mute_if_healthy=nothing, gated_grpc=nothing) - if tf.in_eager_mode() - debug_numeric_summary_eager(input_; name=name, device_name=device_name, tensor_name=tensor_name, debug_urls=debug_urls, lower_bound=lower_bound, upper_bound=upper_bound, mute_if_healthy=mute_if_healthy, gated_grpc=gated_grpc) - else - debug_numeric_summary_graph(input_; name=name, device_name=device_name, tensor_name=tensor_name, debug_urls=debug_urls, lower_bound=lower_bound, upper_bound=upper_bound, mute_if_healthy=mute_if_healthy, gated_grpc=gated_grpc) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function debug_numeric_summary(input_; name=nothing, device_name=nothing, tensor_name=nothing, debug_urls=nothing, lower_bound=nothing, upper_bound=nothing, mute_if_healthy=nothing, gated_grpc=nothing) + if tf.in_eager_mode() + debug_numeric_summary_eager(input_; name=name, device_name=device_name, tensor_name=tensor_name, debug_urls=debug_urls, lower_bound=lower_bound, upper_bound=upper_bound, mute_if_healthy=mute_if_healthy, gated_grpc=gated_grpc) + else + debug_numeric_summary_graph(input_; name=name, device_name=device_name, tensor_name=tensor_name, debug_urls=debug_urls, lower_bound=lower_bound, upper_bound=upper_bound, mute_if_healthy=mute_if_healthy, gated_grpc=gated_grpc) + end end - end end @@ -11790,30 +11790,30 @@ end Retrieve embedding parameters for a single table. """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function retrieve_tpu_embedding_adagrad_parameters_grad_accum_debug_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - local desc - tf.with_op_name(name, "RetrieveTPUEmbeddingAdagradParametersGradAccumDebug") do - desc = tf.NodeDescription("RetrieveTPUEmbeddingAdagradParametersGradAccumDebug") - if table_id !== nothing - desc["table_id"] = Base.Int(table_id) - end - if table_name !== nothing - desc["table_name"] = Base.String(table_name) - end - if num_shards !== nothing - desc["num_shards"] = Base.Int(num_shards) - end - if shard_id !== nothing - desc["shard_id"] = Base.Int(shard_id) - end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:3 - push!(out, tf.Tensor(op, out_idx)) - end - out + function retrieve_tpu_embedding_adagrad_parameters_grad_accum_debug_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + local desc + tf.with_op_name(name, "RetrieveTPUEmbeddingAdagradParametersGradAccumDebug") do + desc = tf.NodeDescription("RetrieveTPUEmbeddingAdagradParametersGradAccumDebug") + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) end + out + end function retrieve_tpu_embedding_adagrad_parameters_grad_accum_debug_eager(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) desc = tf.EagerOp("RetrieveTPUEmbeddingAdagradParametersGradAccumDebug") if table_id !== nothing @@ -11835,13 +11835,13 @@ begin return res end end - function retrieve_tpu_embedding_adagrad_parameters_grad_accum_debug(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - if tf.in_eager_mode() - retrieve_tpu_embedding_adagrad_parameters_grad_accum_debug_eager(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) - else - retrieve_tpu_embedding_adagrad_parameters_grad_accum_debug_graph(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function retrieve_tpu_embedding_adagrad_parameters_grad_accum_debug(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + if tf.in_eager_mode() + retrieve_tpu_embedding_adagrad_parameters_grad_accum_debug_eager(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + else + retrieve_tpu_embedding_adagrad_parameters_grad_accum_debug_graph(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + end end - end end @@ -11851,16 +11851,16 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tanh_graph(x_; name=nothing) - local desc - tf.with_op_name(name, "Tanh") do - desc = tf.NodeDescription("Tanh") - x_ = convert(Tensor{Any}, x_) - (x_,) = tf.tf_promote(x_) - tf.add_input(desc, x_) - end - tf.Tensor(tf.Operation(desc)) + function tanh_graph(x_; name=nothing) + local desc + tf.with_op_name(name, "Tanh") do + desc = tf.NodeDescription("Tanh") + x_ = convert(Tensor{Any}, x_) + (x_,) = tf.tf_promote(x_) + tf.add_input(desc, x_) end + tf.Tensor(tf.Operation(desc)) + end function tanh_eager(x_; name=nothing) desc = tf.EagerOp("Tanh") x_ = convert(tf.EagerTensor, x_) @@ -11873,13 +11873,13 @@ begin return res[1] end end - function tanh(x_; name=nothing) - if tf.in_eager_mode() - tanh_eager(x_; name=name) - else - tanh_graph(x_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tanh(x_; name=nothing) + if tf.in_eager_mode() + tanh_eager(x_; name=name) + else + tanh_graph(x_; name=name) + end end - end end @@ -11889,24 +11889,24 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function symbolic_gradient_graph(input_; name=nothing, Tin=nothing, Tout=nothing, f=nothing) - local desc - tf.with_op_name(name, "SymbolicGradient") do - desc = tf.NodeDescription("SymbolicGradient") - input_ = [convert(Tensor{Any}, x) for x = input_] - tf.add_input(desc, input_) - if Tin !== nothing - desc["Tin"] = map(Base.identity, Tin) - end - if Tout !== nothing - desc["Tout"] = map(Base.identity, Tout) - end - if f !== nothing - desc["f"] = Base.identity(f) - end + function symbolic_gradient_graph(input_; name=nothing, Tin=nothing, Tout=nothing, f=nothing) + local desc + tf.with_op_name(name, "SymbolicGradient") do + desc = tf.NodeDescription("SymbolicGradient") + input_ = [convert(Tensor{Any}, x) for x = input_] + tf.add_input(desc, input_) + if Tin !== nothing + desc["Tin"] = map(Base.identity, Tin) + end + if Tout !== nothing + desc["Tout"] = map(Base.identity, Tout) + end + if f !== nothing + desc["f"] = Base.identity(f) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function symbolic_gradient_eager(input_; name=nothing, Tin=nothing, Tout=nothing, f=nothing) desc = tf.EagerOp("SymbolicGradient") input_ = convert(tf.EagerTensor, input_) @@ -11927,13 +11927,13 @@ begin return res[1] end end - function symbolic_gradient(input_; name=nothing, Tin=nothing, Tout=nothing, f=nothing) - if tf.in_eager_mode() - symbolic_gradient_eager(input_; name=name, Tin=Tin, Tout=Tout, f=f) - else - symbolic_gradient_graph(input_; name=name, Tin=Tin, Tout=Tout, f=f) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function symbolic_gradient(input_; name=nothing, Tin=nothing, Tout=nothing, f=nothing) + if tf.in_eager_mode() + symbolic_gradient_eager(input_; name=name, Tin=Tin, Tout=Tout, f=f) + else + symbolic_gradient_graph(input_; name=name, Tin=Tin, Tout=Tout, f=f) + end end - end end @@ -11943,37 +11943,37 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function boosted_trees_update_ensemble_graph(tree_ensemble_handle_, feature_ids_, node_ids_, gains_, thresholds_, left_node_contribs_, right_node_contribs_, max_depth_, learning_rate_; name=nothing, pruning_mode=nothing, num_features=nothing) - local desc - tf.with_op_name(name, "BoostedTreesUpdateEnsemble") do - desc = tf.NodeDescription("BoostedTreesUpdateEnsemble") - tree_ensemble_handle_ = convert(Tensor{Any}, tree_ensemble_handle_) - feature_ids_ = convert(Tensor{Int32}, feature_ids_) - node_ids_ = [convert(Tensor{Int32}, x) for x = node_ids_] - gains_ = [convert(Tensor{Float32}, x) for x = gains_] - thresholds_ = [convert(Tensor{Int32}, x) for x = thresholds_] - left_node_contribs_ = [convert(Tensor{Float32}, x) for x = left_node_contribs_] - right_node_contribs_ = [convert(Tensor{Float32}, x) for x = right_node_contribs_] - max_depth_ = convert(Tensor{Int32}, max_depth_) - learning_rate_ = convert(Tensor{Float32}, learning_rate_) - tf.add_input(desc, tree_ensemble_handle_) - tf.add_input(desc, feature_ids_) - tf.add_input(desc, node_ids_) - tf.add_input(desc, gains_) - tf.add_input(desc, thresholds_) - tf.add_input(desc, left_node_contribs_) - tf.add_input(desc, right_node_contribs_) - tf.add_input(desc, max_depth_) - tf.add_input(desc, learning_rate_) - if pruning_mode !== nothing - desc["pruning_mode"] = Base.Int(pruning_mode) - end - if num_features !== nothing - desc["num_features"] = Base.Int(num_features) - end - end - tf.Tensor(tf.Operation(desc)) + function boosted_trees_update_ensemble_graph(tree_ensemble_handle_, feature_ids_, node_ids_, gains_, thresholds_, left_node_contribs_, right_node_contribs_, max_depth_, learning_rate_; name=nothing, pruning_mode=nothing, num_features=nothing) + local desc + tf.with_op_name(name, "BoostedTreesUpdateEnsemble") do + desc = tf.NodeDescription("BoostedTreesUpdateEnsemble") + tree_ensemble_handle_ = convert(Tensor{Any}, tree_ensemble_handle_) + feature_ids_ = convert(Tensor{Int32}, feature_ids_) + node_ids_ = [convert(Tensor{Int32}, x) for x = node_ids_] + gains_ = [convert(Tensor{Float32}, x) for x = gains_] + thresholds_ = [convert(Tensor{Int32}, x) for x = thresholds_] + left_node_contribs_ = [convert(Tensor{Float32}, x) for x = left_node_contribs_] + right_node_contribs_ = [convert(Tensor{Float32}, x) for x = right_node_contribs_] + max_depth_ = convert(Tensor{Int32}, max_depth_) + learning_rate_ = convert(Tensor{Float32}, learning_rate_) + tf.add_input(desc, tree_ensemble_handle_) + tf.add_input(desc, feature_ids_) + tf.add_input(desc, node_ids_) + tf.add_input(desc, gains_) + tf.add_input(desc, thresholds_) + tf.add_input(desc, left_node_contribs_) + tf.add_input(desc, right_node_contribs_) + tf.add_input(desc, max_depth_) + tf.add_input(desc, learning_rate_) + if pruning_mode !== nothing + desc["pruning_mode"] = Base.Int(pruning_mode) + end + if num_features !== nothing + desc["num_features"] = Base.Int(num_features) + end end + tf.Tensor(tf.Operation(desc)) + end function boosted_trees_update_ensemble_eager(tree_ensemble_handle_, feature_ids_, node_ids_, gains_, thresholds_, left_node_contribs_, right_node_contribs_, max_depth_, learning_rate_; name=nothing, pruning_mode=nothing, num_features=nothing) desc = tf.EagerOp("BoostedTreesUpdateEnsemble") tree_ensemble_handle_ = convert(tf.EagerTensor, tree_ensemble_handle_) @@ -12007,13 +12007,13 @@ begin return res[1] end end - function boosted_trees_update_ensemble(tree_ensemble_handle_, feature_ids_, node_ids_, gains_, thresholds_, left_node_contribs_, right_node_contribs_, max_depth_, learning_rate_; name=nothing, pruning_mode=nothing, num_features=nothing) - if tf.in_eager_mode() - boosted_trees_update_ensemble_eager(tree_ensemble_handle_, feature_ids_, node_ids_, gains_, thresholds_, left_node_contribs_, right_node_contribs_, max_depth_, learning_rate_; name=name, pruning_mode=pruning_mode, num_features=num_features) - else - boosted_trees_update_ensemble_graph(tree_ensemble_handle_, feature_ids_, node_ids_, gains_, thresholds_, left_node_contribs_, right_node_contribs_, max_depth_, learning_rate_; name=name, pruning_mode=pruning_mode, num_features=num_features) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function boosted_trees_update_ensemble(tree_ensemble_handle_, feature_ids_, node_ids_, gains_, thresholds_, left_node_contribs_, right_node_contribs_, max_depth_, learning_rate_; name=nothing, pruning_mode=nothing, num_features=nothing) + if tf.in_eager_mode() + boosted_trees_update_ensemble_eager(tree_ensemble_handle_, feature_ids_, node_ids_, gains_, thresholds_, left_node_contribs_, right_node_contribs_, max_depth_, learning_rate_; name=name, pruning_mode=pruning_mode, num_features=num_features) + else + boosted_trees_update_ensemble_graph(tree_ensemble_handle_, feature_ids_, node_ids_, gains_, thresholds_, left_node_contribs_, right_node_contribs_, max_depth_, learning_rate_; name=name, pruning_mode=pruning_mode, num_features=num_features) + end end - end end @@ -12023,30 +12023,30 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function apply_momentum_graph(var_, accum_, lr_, grad_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) - local desc - tf.with_op_name(name, "ApplyMomentum") do - desc = tf.NodeDescription("ApplyMomentum") - var_ = convert(Tensor{Any}, var_) - accum_ = convert(Tensor{Any}, accum_) - lr_ = convert(Tensor{Any}, lr_) - grad_ = convert(Tensor{Any}, grad_) - momentum_ = convert(Tensor{Any}, momentum_) - (var_, accum_, lr_, grad_, momentum_) = tf.tf_promote(var_, accum_, lr_, grad_, momentum_) - tf.add_input(desc, var_) - tf.add_input(desc, accum_) - tf.add_input(desc, lr_) - tf.add_input(desc, grad_) - tf.add_input(desc, momentum_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - if use_nesterov !== nothing - desc["use_nesterov"] = Base.Bool(use_nesterov) - end - end - tf.Tensor(tf.Operation(desc)) + function apply_momentum_graph(var_, accum_, lr_, grad_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) + local desc + tf.with_op_name(name, "ApplyMomentum") do + desc = tf.NodeDescription("ApplyMomentum") + var_ = convert(Tensor{Any}, var_) + accum_ = convert(Tensor{Any}, accum_) + lr_ = convert(Tensor{Any}, lr_) + grad_ = convert(Tensor{Any}, grad_) + momentum_ = convert(Tensor{Any}, momentum_) + (var_, accum_, lr_, grad_, momentum_) = tf.tf_promote(var_, accum_, lr_, grad_, momentum_) + tf.add_input(desc, var_) + tf.add_input(desc, accum_) + tf.add_input(desc, lr_) + tf.add_input(desc, grad_) + tf.add_input(desc, momentum_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + if use_nesterov !== nothing + desc["use_nesterov"] = Base.Bool(use_nesterov) + end end + tf.Tensor(tf.Operation(desc)) + end function apply_momentum_eager(var_, accum_, lr_, grad_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) desc = tf.EagerOp("ApplyMomentum") var_ = convert(tf.EagerTensor, var_) @@ -12077,13 +12077,13 @@ begin return res[1] end end - function apply_momentum(var_, accum_, lr_, grad_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) - if tf.in_eager_mode() - apply_momentum_eager(var_, accum_, lr_, grad_, momentum_; name=name, use_locking=use_locking, use_nesterov=use_nesterov) - else - apply_momentum_graph(var_, accum_, lr_, grad_, momentum_; name=name, use_locking=use_locking, use_nesterov=use_nesterov) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function apply_momentum(var_, accum_, lr_, grad_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) + if tf.in_eager_mode() + apply_momentum_eager(var_, accum_, lr_, grad_, momentum_; name=name, use_locking=use_locking, use_nesterov=use_nesterov) + else + apply_momentum_graph(var_, accum_, lr_, grad_, momentum_; name=name, use_locking=use_locking, use_nesterov=use_nesterov) + end end - end end @@ -12093,22 +12093,22 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function reader_read_graph(reader_handle_, queue_handle_; name=nothing) - local desc - tf.with_op_name(name, "ReaderRead") do - desc = tf.NodeDescription("ReaderRead") - reader_handle_ = convert(Tensor{String}, reader_handle_) - queue_handle_ = convert(Tensor{String}, queue_handle_) - tf.add_input(desc, reader_handle_) - tf.add_input(desc, queue_handle_) - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:2 - push!(out, tf.Tensor(op, out_idx)) - end - out + function reader_read_graph(reader_handle_, queue_handle_; name=nothing) + local desc + tf.with_op_name(name, "ReaderRead") do + desc = tf.NodeDescription("ReaderRead") + reader_handle_ = convert(Tensor{String}, reader_handle_) + queue_handle_ = convert(Tensor{String}, queue_handle_) + tf.add_input(desc, reader_handle_) + tf.add_input(desc, queue_handle_) + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) end + out + end function reader_read_eager(reader_handle_, queue_handle_; name=nothing) desc = tf.EagerOp("ReaderRead") reader_handle_ = convert(tf.EagerTensor, reader_handle_) @@ -12122,13 +12122,13 @@ begin return res end end - function reader_read(reader_handle_, queue_handle_; name=nothing) - if tf.in_eager_mode() - reader_read_eager(reader_handle_, queue_handle_; name=name) - else - reader_read_graph(reader_handle_, queue_handle_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function reader_read(reader_handle_, queue_handle_; name=nothing) + if tf.in_eager_mode() + reader_read_eager(reader_handle_, queue_handle_; name=name) + else + reader_read_graph(reader_handle_, queue_handle_; name=name) + end end - end end @@ -12138,21 +12138,21 @@ end An op that blocks execution until a distributed TPU system has """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function _wait_for_distributed_tpu_graph(inputs_; name=nothing, startup_timeout_sec=nothing, N=nothing) - local desc - tf.with_op_name(name, "_WaitForDistributedTPU") do - desc = tf.NodeDescription("_WaitForDistributedTPU") - inputs_ = [convert(Tensor{Int32}, x) for x = inputs_] - tf.add_input(desc, inputs_) - if startup_timeout_sec !== nothing - desc["startup_timeout_sec"] = Base.Int(startup_timeout_sec) - end - if N !== nothing - desc["N"] = Base.Int(N) - end + function _wait_for_distributed_tpu_graph(inputs_; name=nothing, startup_timeout_sec=nothing, N=nothing) + local desc + tf.with_op_name(name, "_WaitForDistributedTPU") do + desc = tf.NodeDescription("_WaitForDistributedTPU") + inputs_ = [convert(Tensor{Int32}, x) for x = inputs_] + tf.add_input(desc, inputs_) + if startup_timeout_sec !== nothing + desc["startup_timeout_sec"] = Base.Int(startup_timeout_sec) + end + if N !== nothing + desc["N"] = Base.Int(N) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function _wait_for_distributed_tpu_eager(inputs_; name=nothing, startup_timeout_sec=nothing, N=nothing) desc = tf.EagerOp("_WaitForDistributedTPU") inputs_ = convert(tf.EagerTensor, inputs_) @@ -12170,13 +12170,13 @@ begin return res[1] end end - function _wait_for_distributed_tpu(inputs_; name=nothing, startup_timeout_sec=nothing, N=nothing) - if tf.in_eager_mode() - _wait_for_distributed_tpu_eager(inputs_; name=name, startup_timeout_sec=startup_timeout_sec, N=N) - else - _wait_for_distributed_tpu_graph(inputs_; name=name, startup_timeout_sec=startup_timeout_sec, N=N) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _wait_for_distributed_tpu(inputs_; name=nothing, startup_timeout_sec=nothing, N=nothing) + if tf.in_eager_mode() + _wait_for_distributed_tpu_eager(inputs_; name=name, startup_timeout_sec=startup_timeout_sec, N=N) + else + _wait_for_distributed_tpu_graph(inputs_; name=name, startup_timeout_sec=startup_timeout_sec, N=N) + end end - end end @@ -12186,15 +12186,15 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function mutex_lock_graph(mutex_; name=nothing) - local desc - tf.with_op_name(name, "MutexLock") do - desc = tf.NodeDescription("MutexLock") - mutex_ = convert(Tensor{Any}, mutex_) - tf.add_input(desc, mutex_) - end - tf.Tensor(tf.Operation(desc)) + function mutex_lock_graph(mutex_; name=nothing) + local desc + tf.with_op_name(name, "MutexLock") do + desc = tf.NodeDescription("MutexLock") + mutex_ = convert(Tensor{Any}, mutex_) + tf.add_input(desc, mutex_) end + tf.Tensor(tf.Operation(desc)) + end function mutex_lock_eager(mutex_; name=nothing) desc = tf.EagerOp("MutexLock") mutex_ = convert(tf.EagerTensor, mutex_) @@ -12206,13 +12206,13 @@ begin return res[1] end end - function mutex_lock(mutex_; name=nothing) - if tf.in_eager_mode() - mutex_lock_eager(mutex_; name=name) - else - mutex_lock_graph(mutex_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function mutex_lock(mutex_; name=nothing) + if tf.in_eager_mode() + mutex_lock_eager(mutex_; name=name) + else + mutex_lock_graph(mutex_; name=name) + end end - end end @@ -12222,17 +12222,17 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function accumulator_set_global_step_graph(handle_, new_global_step_; name=nothing) - local desc - tf.with_op_name(name, "AccumulatorSetGlobalStep") do - desc = tf.NodeDescription("AccumulatorSetGlobalStep") - handle_ = convert(Tensor{String}, handle_) - new_global_step_ = convert(Tensor{Int64}, new_global_step_) - tf.add_input(desc, handle_) - tf.add_input(desc, new_global_step_) - end - tf.Tensor(tf.Operation(desc)) + function accumulator_set_global_step_graph(handle_, new_global_step_; name=nothing) + local desc + tf.with_op_name(name, "AccumulatorSetGlobalStep") do + desc = tf.NodeDescription("AccumulatorSetGlobalStep") + handle_ = convert(Tensor{String}, handle_) + new_global_step_ = convert(Tensor{Int64}, new_global_step_) + tf.add_input(desc, handle_) + tf.add_input(desc, new_global_step_) end + tf.Tensor(tf.Operation(desc)) + end function accumulator_set_global_step_eager(handle_, new_global_step_; name=nothing) desc = tf.EagerOp("AccumulatorSetGlobalStep") handle_ = convert(tf.EagerTensor, handle_) @@ -12246,13 +12246,13 @@ begin return res[1] end end - function accumulator_set_global_step(handle_, new_global_step_; name=nothing) - if tf.in_eager_mode() - accumulator_set_global_step_eager(handle_, new_global_step_; name=name) - else - accumulator_set_global_step_graph(handle_, new_global_step_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function accumulator_set_global_step(handle_, new_global_step_; name=nothing) + if tf.in_eager_mode() + accumulator_set_global_step_eager(handle_, new_global_step_; name=name) + else + accumulator_set_global_step_graph(handle_, new_global_step_; name=name) + end end - end end @@ -12262,32 +12262,32 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function quantized_add_graph(x_, y_, min_x_, max_x_, min_y_, max_y_; name=nothing) - local desc - tf.with_op_name(name, "QuantizedAdd") do - desc = tf.NodeDescription("QuantizedAdd") - x_ = convert(Tensor{Any}, x_) - y_ = convert(Tensor{Any}, y_) - min_x_ = convert(Tensor{Float32}, min_x_) - max_x_ = convert(Tensor{Float32}, max_x_) - min_y_ = convert(Tensor{Float32}, min_y_) - max_y_ = convert(Tensor{Float32}, max_y_) - (x_,) = tf.tf_promote(x_) - (y_,) = tf.tf_promote(y_) - tf.add_input(desc, x_) - tf.add_input(desc, y_) - tf.add_input(desc, min_x_) - tf.add_input(desc, max_x_) - tf.add_input(desc, min_y_) - tf.add_input(desc, max_y_) - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:3 - push!(out, tf.Tensor(op, out_idx)) - end - out + function quantized_add_graph(x_, y_, min_x_, max_x_, min_y_, max_y_; name=nothing) + local desc + tf.with_op_name(name, "QuantizedAdd") do + desc = tf.NodeDescription("QuantizedAdd") + x_ = convert(Tensor{Any}, x_) + y_ = convert(Tensor{Any}, y_) + min_x_ = convert(Tensor{Float32}, min_x_) + max_x_ = convert(Tensor{Float32}, max_x_) + min_y_ = convert(Tensor{Float32}, min_y_) + max_y_ = convert(Tensor{Float32}, max_y_) + (x_,) = tf.tf_promote(x_) + (y_,) = tf.tf_promote(y_) + tf.add_input(desc, x_) + tf.add_input(desc, y_) + tf.add_input(desc, min_x_) + tf.add_input(desc, max_x_) + tf.add_input(desc, min_y_) + tf.add_input(desc, max_y_) end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end function quantized_add_eager(x_, y_, min_x_, max_x_, min_y_, max_y_; name=nothing) desc = tf.EagerOp("QuantizedAdd") x_ = convert(tf.EagerTensor, x_) @@ -12311,13 +12311,13 @@ begin return res end end - function quantized_add(x_, y_, min_x_, max_x_, min_y_, max_y_; name=nothing) - if tf.in_eager_mode() - quantized_add_eager(x_, y_, min_x_, max_x_, min_y_, max_y_; name=name) - else - quantized_add_graph(x_, y_, min_x_, max_x_, min_y_, max_y_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function quantized_add(x_, y_, min_x_, max_x_, min_y_, max_y_; name=nothing) + if tf.in_eager_mode() + quantized_add_eager(x_, y_, min_x_, max_x_, min_y_, max_y_; name=name) + else + quantized_add_graph(x_, y_, min_x_, max_x_, min_y_, max_y_; name=name) + end end - end end @@ -12327,19 +12327,19 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function squeeze_graph(input_; name=nothing, squeeze_dims=nothing) - local desc - tf.with_op_name(name, "Squeeze") do - desc = tf.NodeDescription("Squeeze") - input_ = convert(Tensor{Any}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - if squeeze_dims !== nothing - desc["squeeze_dims"] = map(Base.identity, squeeze_dims) - end + function squeeze_graph(input_; name=nothing, squeeze_dims=nothing) + local desc + tf.with_op_name(name, "Squeeze") do + desc = tf.NodeDescription("Squeeze") + input_ = convert(Tensor{Any}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + if squeeze_dims !== nothing + desc["squeeze_dims"] = map(Base.identity, squeeze_dims) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function squeeze_eager(input_; name=nothing, squeeze_dims=nothing) desc = tf.EagerOp("Squeeze") input_ = convert(tf.EagerTensor, input_) @@ -12355,13 +12355,13 @@ begin return res[1] end end - function squeeze(input_; name=nothing, squeeze_dims=nothing) - if tf.in_eager_mode() - squeeze_eager(input_; name=name, squeeze_dims=squeeze_dims) - else - squeeze_graph(input_; name=name, squeeze_dims=squeeze_dims) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function squeeze(input_; name=nothing, squeeze_dims=nothing) + if tf.in_eager_mode() + squeeze_eager(input_; name=name, squeeze_dims=squeeze_dims) + else + squeeze_graph(input_; name=name, squeeze_dims=squeeze_dims) + end end - end end @@ -12371,15 +12371,15 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function experimental_matching_files_dataset_graph(patterns_; name=nothing) - local desc - tf.with_op_name(name, "ExperimentalMatchingFilesDataset") do - desc = tf.NodeDescription("ExperimentalMatchingFilesDataset") - patterns_ = convert(Tensor{String}, patterns_) - tf.add_input(desc, patterns_) - end - tf.Tensor(tf.Operation(desc)) + function experimental_matching_files_dataset_graph(patterns_; name=nothing) + local desc + tf.with_op_name(name, "ExperimentalMatchingFilesDataset") do + desc = tf.NodeDescription("ExperimentalMatchingFilesDataset") + patterns_ = convert(Tensor{String}, patterns_) + tf.add_input(desc, patterns_) end + tf.Tensor(tf.Operation(desc)) + end function experimental_matching_files_dataset_eager(patterns_; name=nothing) desc = tf.EagerOp("ExperimentalMatchingFilesDataset") patterns_ = convert(tf.EagerTensor, patterns_) @@ -12391,13 +12391,13 @@ begin return res[1] end end - function experimental_matching_files_dataset(patterns_; name=nothing) - if tf.in_eager_mode() - experimental_matching_files_dataset_eager(patterns_; name=name) - else - experimental_matching_files_dataset_graph(patterns_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_matching_files_dataset(patterns_; name=nothing) + if tf.in_eager_mode() + experimental_matching_files_dataset_eager(patterns_; name=name) + else + experimental_matching_files_dataset_graph(patterns_; name=name) + end end - end end @@ -12407,19 +12407,19 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function experimental_dataset_to_tf_record_graph(input_dataset_, filename_, compression_type_; name=nothing) - local desc - tf.with_op_name(name, "ExperimentalDatasetToTFRecord") do - desc = tf.NodeDescription("ExperimentalDatasetToTFRecord") - input_dataset_ = convert(Tensor{Any}, input_dataset_) - filename_ = convert(Tensor{String}, filename_) - compression_type_ = convert(Tensor{String}, compression_type_) - tf.add_input(desc, input_dataset_) - tf.add_input(desc, filename_) - tf.add_input(desc, compression_type_) - end - tf.Tensor(tf.Operation(desc)) + function experimental_dataset_to_tf_record_graph(input_dataset_, filename_, compression_type_; name=nothing) + local desc + tf.with_op_name(name, "ExperimentalDatasetToTFRecord") do + desc = tf.NodeDescription("ExperimentalDatasetToTFRecord") + input_dataset_ = convert(Tensor{Any}, input_dataset_) + filename_ = convert(Tensor{String}, filename_) + compression_type_ = convert(Tensor{String}, compression_type_) + tf.add_input(desc, input_dataset_) + tf.add_input(desc, filename_) + tf.add_input(desc, compression_type_) end + tf.Tensor(tf.Operation(desc)) + end function experimental_dataset_to_tf_record_eager(input_dataset_, filename_, compression_type_; name=nothing) desc = tf.EagerOp("ExperimentalDatasetToTFRecord") input_dataset_ = convert(tf.EagerTensor, input_dataset_) @@ -12435,13 +12435,13 @@ begin return res[1] end end - function experimental_dataset_to_tf_record(input_dataset_, filename_, compression_type_; name=nothing) - if tf.in_eager_mode() - experimental_dataset_to_tf_record_eager(input_dataset_, filename_, compression_type_; name=name) - else - experimental_dataset_to_tf_record_graph(input_dataset_, filename_, compression_type_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_dataset_to_tf_record(input_dataset_, filename_, compression_type_; name=nothing) + if tf.in_eager_mode() + experimental_dataset_to_tf_record_eager(input_dataset_, filename_, compression_type_; name=name) + else + experimental_dataset_to_tf_record_graph(input_dataset_, filename_, compression_type_; name=name) + end end - end end @@ -12451,14 +12451,14 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function no_op_graph(; name=nothing) - local desc - tf.with_op_name(name, "NoOp") do - desc - tf.NodeDescription("NoOp") - end - tf.Tensor(tf.Operation(desc)) + function no_op_graph(; name=nothing) + local desc + tf.with_op_name(name, "NoOp") do + desc + tf.NodeDescription("NoOp") end + tf.Tensor(tf.Operation(desc)) + end function no_op_eager(; name=nothing) desc = tf.EagerOp("NoOp") res = tf.execute(desc) @@ -12468,13 +12468,13 @@ begin return res[1] end end - function no_op(; name=nothing) - if tf.in_eager_mode() - no_op_eager(; name=name) - else - no_op_graph(; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function no_op(; name=nothing) + if tf.in_eager_mode() + no_op_eager(; name=name) + else + no_op_graph(; name=name) + end end - end end @@ -12484,24 +12484,24 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function zip_dataset_graph(input_datasets_; name=nothing, output_types=nothing, output_shapes=nothing, N=nothing) - local desc - tf.with_op_name(name, "ZipDataset") do - desc = tf.NodeDescription("ZipDataset") - input_datasets_ = [convert(Tensor{Any}, x) for x = input_datasets_] - tf.add_input(desc, input_datasets_) - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - if N !== nothing - desc["N"] = Base.Int(N) - end + function zip_dataset_graph(input_datasets_; name=nothing, output_types=nothing, output_shapes=nothing, N=nothing) + local desc + tf.with_op_name(name, "ZipDataset") do + desc = tf.NodeDescription("ZipDataset") + input_datasets_ = [convert(Tensor{Any}, x) for x = input_datasets_] + tf.add_input(desc, input_datasets_) + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + if N !== nothing + desc["N"] = Base.Int(N) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function zip_dataset_eager(input_datasets_; name=nothing, output_types=nothing, output_shapes=nothing, N=nothing) desc = tf.EagerOp("ZipDataset") input_datasets_ = convert(tf.EagerTensor, input_datasets_) @@ -12522,13 +12522,13 @@ begin return res[1] end end - function zip_dataset(input_datasets_; name=nothing, output_types=nothing, output_shapes=nothing, N=nothing) - if tf.in_eager_mode() - zip_dataset_eager(input_datasets_; name=name, output_types=output_types, output_shapes=output_shapes, N=N) - else - zip_dataset_graph(input_datasets_; name=name, output_types=output_types, output_shapes=output_shapes, N=N) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function zip_dataset(input_datasets_; name=nothing, output_types=nothing, output_shapes=nothing, N=nothing) + if tf.in_eager_mode() + zip_dataset_eager(input_datasets_; name=name, output_types=output_types, output_shapes=output_shapes, N=N) + else + zip_dataset_graph(input_datasets_; name=name, output_types=output_types, output_shapes=output_shapes, N=N) + end end - end end @@ -12538,27 +12538,27 @@ end Load embedding parameters for a single table. """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function load_tpu_embedding_stochastic_gradient_descent_parameters_graph(parameters_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - local desc - tf.with_op_name(name, "LoadTPUEmbeddingStochasticGradientDescentParameters") do - desc = tf.NodeDescription("LoadTPUEmbeddingStochasticGradientDescentParameters") - parameters_ = convert(Tensor{Float32}, parameters_) - tf.add_input(desc, parameters_) - if table_id !== nothing - desc["table_id"] = Base.Int(table_id) - end - if table_name !== nothing - desc["table_name"] = Base.String(table_name) - end - if num_shards !== nothing - desc["num_shards"] = Base.Int(num_shards) - end - if shard_id !== nothing - desc["shard_id"] = Base.Int(shard_id) - end - end - tf.Tensor(tf.Operation(desc)) + function load_tpu_embedding_stochastic_gradient_descent_parameters_graph(parameters_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + local desc + tf.with_op_name(name, "LoadTPUEmbeddingStochasticGradientDescentParameters") do + desc = tf.NodeDescription("LoadTPUEmbeddingStochasticGradientDescentParameters") + parameters_ = convert(Tensor{Float32}, parameters_) + tf.add_input(desc, parameters_) + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end end + tf.Tensor(tf.Operation(desc)) + end function load_tpu_embedding_stochastic_gradient_descent_parameters_eager(parameters_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) desc = tf.EagerOp("LoadTPUEmbeddingStochasticGradientDescentParameters") parameters_ = convert(tf.EagerTensor, parameters_) @@ -12582,13 +12582,13 @@ begin return res[1] end end - function load_tpu_embedding_stochastic_gradient_descent_parameters(parameters_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - if tf.in_eager_mode() - load_tpu_embedding_stochastic_gradient_descent_parameters_eager(parameters_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) - else - load_tpu_embedding_stochastic_gradient_descent_parameters_graph(parameters_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function load_tpu_embedding_stochastic_gradient_descent_parameters(parameters_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + if tf.in_eager_mode() + load_tpu_embedding_stochastic_gradient_descent_parameters_eager(parameters_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + else + load_tpu_embedding_stochastic_gradient_descent_parameters_graph(parameters_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + end end - end end @@ -12598,19 +12598,19 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function identity_reader_v2_graph(; name=nothing, container=nothing, shared_name=nothing) - local desc - tf.with_op_name(name, "IdentityReaderV2") do - desc = tf.NodeDescription("IdentityReaderV2") - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end + function identity_reader_v2_graph(; name=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "IdentityReaderV2") do + desc = tf.NodeDescription("IdentityReaderV2") + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function identity_reader_v2_eager(; name=nothing, container=nothing, shared_name=nothing) desc = tf.EagerOp("IdentityReaderV2") if container !== nothing @@ -12626,13 +12626,13 @@ begin return res[1] end end - function identity_reader_v2(; name=nothing, container=nothing, shared_name=nothing) - if tf.in_eager_mode() - identity_reader_v2_eager(; name=name, container=container, shared_name=shared_name) - else - identity_reader_v2_graph(; name=name, container=container, shared_name=shared_name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function identity_reader_v2(; name=nothing, container=nothing, shared_name=nothing) + if tf.in_eager_mode() + identity_reader_v2_eager(; name=name, container=container, shared_name=shared_name) + else + identity_reader_v2_graph(; name=name, container=container, shared_name=shared_name) + end end - end end @@ -12642,19 +12642,19 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function lmdb_reader_graph(; name=nothing, container=nothing, shared_name=nothing) - local desc - tf.with_op_name(name, "LMDBReader") do - desc = tf.NodeDescription("LMDBReader") - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end + function lmdb_reader_graph(; name=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "LMDBReader") do + desc = tf.NodeDescription("LMDBReader") + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function lmdb_reader_eager(; name=nothing, container=nothing, shared_name=nothing) desc = tf.EagerOp("LMDBReader") if container !== nothing @@ -12670,13 +12670,13 @@ begin return res[1] end end - function lmdb_reader(; name=nothing, container=nothing, shared_name=nothing) - if tf.in_eager_mode() - lmdb_reader_eager(; name=name, container=container, shared_name=shared_name) - else - lmdb_reader_graph(; name=name, container=container, shared_name=shared_name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function lmdb_reader(; name=nothing, container=nothing, shared_name=nothing) + if tf.in_eager_mode() + lmdb_reader_eager(; name=name, container=container, shared_name=shared_name) + else + lmdb_reader_graph(; name=name, container=container, shared_name=shared_name) + end end - end end @@ -12686,25 +12686,25 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function nccl_all_reduce_graph(input_; name=nothing, reduction=nothing, num_devices=nothing, shared_name=nothing) - local desc - tf.with_op_name(name, "NcclAllReduce") do - desc = tf.NodeDescription("NcclAllReduce") - input_ = convert(Tensor{Any}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - if reduction !== nothing - desc["reduction"] = Base.String(reduction) - end - if num_devices !== nothing - desc["num_devices"] = Base.Int(num_devices) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end + function nccl_all_reduce_graph(input_; name=nothing, reduction=nothing, num_devices=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "NcclAllReduce") do + desc = tf.NodeDescription("NcclAllReduce") + input_ = convert(Tensor{Any}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + if reduction !== nothing + desc["reduction"] = Base.String(reduction) + end + if num_devices !== nothing + desc["num_devices"] = Base.Int(num_devices) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function nccl_all_reduce_eager(input_; name=nothing, reduction=nothing, num_devices=nothing, shared_name=nothing) desc = tf.EagerOp("NcclAllReduce") input_ = convert(tf.EagerTensor, input_) @@ -12726,13 +12726,13 @@ begin return res[1] end end - function nccl_all_reduce(input_; name=nothing, reduction=nothing, num_devices=nothing, shared_name=nothing) - if tf.in_eager_mode() - nccl_all_reduce_eager(input_; name=name, reduction=reduction, num_devices=num_devices, shared_name=shared_name) - else - nccl_all_reduce_graph(input_; name=name, reduction=reduction, num_devices=num_devices, shared_name=shared_name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function nccl_all_reduce(input_; name=nothing, reduction=nothing, num_devices=nothing, shared_name=nothing) + if tf.in_eager_mode() + nccl_all_reduce_eager(input_; name=name, reduction=reduction, num_devices=num_devices, shared_name=shared_name) + else + nccl_all_reduce_graph(input_; name=name, reduction=reduction, num_devices=num_devices, shared_name=shared_name) + end end - end end @@ -12742,19 +12742,19 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function text_line_dataset_graph(filenames_, compression_type_, buffer_size_; name=nothing) - local desc - tf.with_op_name(name, "TextLineDataset") do - desc = tf.NodeDescription("TextLineDataset") - filenames_ = convert(Tensor{String}, filenames_) - compression_type_ = convert(Tensor{String}, compression_type_) - buffer_size_ = convert(Tensor{Int64}, buffer_size_) - tf.add_input(desc, filenames_) - tf.add_input(desc, compression_type_) - tf.add_input(desc, buffer_size_) - end - tf.Tensor(tf.Operation(desc)) + function text_line_dataset_graph(filenames_, compression_type_, buffer_size_; name=nothing) + local desc + tf.with_op_name(name, "TextLineDataset") do + desc = tf.NodeDescription("TextLineDataset") + filenames_ = convert(Tensor{String}, filenames_) + compression_type_ = convert(Tensor{String}, compression_type_) + buffer_size_ = convert(Tensor{Int64}, buffer_size_) + tf.add_input(desc, filenames_) + tf.add_input(desc, compression_type_) + tf.add_input(desc, buffer_size_) end + tf.Tensor(tf.Operation(desc)) + end function text_line_dataset_eager(filenames_, compression_type_, buffer_size_; name=nothing) desc = tf.EagerOp("TextLineDataset") filenames_ = convert(tf.EagerTensor, filenames_) @@ -12770,13 +12770,13 @@ begin return res[1] end end - function text_line_dataset(filenames_, compression_type_, buffer_size_; name=nothing) - if tf.in_eager_mode() - text_line_dataset_eager(filenames_, compression_type_, buffer_size_; name=name) - else - text_line_dataset_graph(filenames_, compression_type_, buffer_size_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function text_line_dataset(filenames_, compression_type_, buffer_size_; name=nothing) + if tf.in_eager_mode() + text_line_dataset_eager(filenames_, compression_type_, buffer_size_; name=name) + else + text_line_dataset_graph(filenames_, compression_type_, buffer_size_; name=name) + end end - end end @@ -12786,24 +12786,24 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function sdca_shrink_l1_graph(weights_; name=nothing, num_features=nothing, l1=nothing, l2=nothing) - local desc - tf.with_op_name(name, "SdcaShrinkL1") do - desc = tf.NodeDescription("SdcaShrinkL1") - weights_ = [convert(Tensor{Float32}, x) for x = weights_] - tf.add_input(desc, weights_) - if num_features !== nothing - desc["num_features"] = Base.Int(num_features) - end - if l1 !== nothing - desc["l1"] = Base.identity(l1) - end - if l2 !== nothing - desc["l2"] = Base.identity(l2) - end + function sdca_shrink_l1_graph(weights_; name=nothing, num_features=nothing, l1=nothing, l2=nothing) + local desc + tf.with_op_name(name, "SdcaShrinkL1") do + desc = tf.NodeDescription("SdcaShrinkL1") + weights_ = [convert(Tensor{Float32}, x) for x = weights_] + tf.add_input(desc, weights_) + if num_features !== nothing + desc["num_features"] = Base.Int(num_features) + end + if l1 !== nothing + desc["l1"] = Base.identity(l1) + end + if l2 !== nothing + desc["l2"] = Base.identity(l2) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function sdca_shrink_l1_eager(weights_; name=nothing, num_features=nothing, l1=nothing, l2=nothing) desc = tf.EagerOp("SdcaShrinkL1") weights_ = convert(tf.EagerTensor, weights_) @@ -12824,13 +12824,13 @@ begin return res[1] end end - function sdca_shrink_l1(weights_; name=nothing, num_features=nothing, l1=nothing, l2=nothing) - if tf.in_eager_mode() - sdca_shrink_l1_eager(weights_; name=name, num_features=num_features, l1=l1, l2=l2) - else - sdca_shrink_l1_graph(weights_; name=name, num_features=num_features, l1=l1, l2=l2) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sdca_shrink_l1(weights_; name=nothing, num_features=nothing, l1=nothing, l2=nothing) + if tf.in_eager_mode() + sdca_shrink_l1_eager(weights_; name=name, num_features=num_features, l1=l1, l2=l2) + else + sdca_shrink_l1_graph(weights_; name=name, num_features=num_features, l1=l1, l2=l2) + end end - end end @@ -12840,22 +12840,22 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tf_record_reader_v2_graph(; name=nothing, container=nothing, shared_name=nothing, compression_type=nothing) - local desc - tf.with_op_name(name, "TFRecordReaderV2") do - desc = tf.NodeDescription("TFRecordReaderV2") - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - if compression_type !== nothing - desc["compression_type"] = Base.String(compression_type) - end + function tf_record_reader_v2_graph(; name=nothing, container=nothing, shared_name=nothing, compression_type=nothing) + local desc + tf.with_op_name(name, "TFRecordReaderV2") do + desc = tf.NodeDescription("TFRecordReaderV2") + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + if compression_type !== nothing + desc["compression_type"] = Base.String(compression_type) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function tf_record_reader_v2_eager(; name=nothing, container=nothing, shared_name=nothing, compression_type=nothing) desc = tf.EagerOp("TFRecordReaderV2") if container !== nothing @@ -12874,13 +12874,13 @@ begin return res[1] end end - function tf_record_reader_v2(; name=nothing, container=nothing, shared_name=nothing, compression_type=nothing) - if tf.in_eager_mode() - tf_record_reader_v2_eager(; name=name, container=container, shared_name=shared_name, compression_type=compression_type) - else - tf_record_reader_v2_graph(; name=name, container=container, shared_name=shared_name, compression_type=compression_type) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tf_record_reader_v2(; name=nothing, container=nothing, shared_name=nothing, compression_type=nothing) + if tf.in_eager_mode() + tf_record_reader_v2_eager(; name=name, container=container, shared_name=shared_name, compression_type=compression_type) + else + tf_record_reader_v2_graph(; name=name, container=container, shared_name=shared_name, compression_type=compression_type) + end end - end end @@ -12890,32 +12890,32 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function padded_batch_dataset_v2_graph(input_dataset_, batch_size_, padded_shapes_, padding_values_, drop_remainder_; name=nothing, Toutput_types=nothing, output_shapes=nothing, N=nothing) - local desc - tf.with_op_name(name, "PaddedBatchDatasetV2") do - desc = tf.NodeDescription("PaddedBatchDatasetV2") - input_dataset_ = convert(Tensor{Any}, input_dataset_) - batch_size_ = convert(Tensor{Int64}, batch_size_) - padded_shapes_ = [convert(Tensor{Int64}, x) for x = padded_shapes_] - padding_values_ = [convert(Tensor{Any}, x) for x = padding_values_] - drop_remainder_ = convert(Tensor{Bool}, drop_remainder_) - tf.add_input(desc, input_dataset_) - tf.add_input(desc, batch_size_) - tf.add_input(desc, padded_shapes_) - tf.add_input(desc, padding_values_) - tf.add_input(desc, drop_remainder_) - if Toutput_types !== nothing - desc["Toutput_types"] = map(Base.identity, Toutput_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - if N !== nothing - desc["N"] = Base.Int(N) - end - end - tf.Tensor(tf.Operation(desc)) + function padded_batch_dataset_v2_graph(input_dataset_, batch_size_, padded_shapes_, padding_values_, drop_remainder_; name=nothing, Toutput_types=nothing, output_shapes=nothing, N=nothing) + local desc + tf.with_op_name(name, "PaddedBatchDatasetV2") do + desc = tf.NodeDescription("PaddedBatchDatasetV2") + input_dataset_ = convert(Tensor{Any}, input_dataset_) + batch_size_ = convert(Tensor{Int64}, batch_size_) + padded_shapes_ = [convert(Tensor{Int64}, x) for x = padded_shapes_] + padding_values_ = [convert(Tensor{Any}, x) for x = padding_values_] + drop_remainder_ = convert(Tensor{Bool}, drop_remainder_) + tf.add_input(desc, input_dataset_) + tf.add_input(desc, batch_size_) + tf.add_input(desc, padded_shapes_) + tf.add_input(desc, padding_values_) + tf.add_input(desc, drop_remainder_) + if Toutput_types !== nothing + desc["Toutput_types"] = map(Base.identity, Toutput_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + if N !== nothing + desc["N"] = Base.Int(N) + end end + tf.Tensor(tf.Operation(desc)) + end function padded_batch_dataset_v2_eager(input_dataset_, batch_size_, padded_shapes_, padding_values_, drop_remainder_; name=nothing, Toutput_types=nothing, output_shapes=nothing, N=nothing) desc = tf.EagerOp("PaddedBatchDatasetV2") input_dataset_ = convert(tf.EagerTensor, input_dataset_) @@ -12944,13 +12944,13 @@ begin return res[1] end end - function padded_batch_dataset_v2(input_dataset_, batch_size_, padded_shapes_, padding_values_, drop_remainder_; name=nothing, Toutput_types=nothing, output_shapes=nothing, N=nothing) - if tf.in_eager_mode() - padded_batch_dataset_v2_eager(input_dataset_, batch_size_, padded_shapes_, padding_values_, drop_remainder_; name=name, Toutput_types=Toutput_types, output_shapes=output_shapes, N=N) - else - padded_batch_dataset_v2_graph(input_dataset_, batch_size_, padded_shapes_, padding_values_, drop_remainder_; name=name, Toutput_types=Toutput_types, output_shapes=output_shapes, N=N) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function padded_batch_dataset_v2(input_dataset_, batch_size_, padded_shapes_, padding_values_, drop_remainder_; name=nothing, Toutput_types=nothing, output_shapes=nothing, N=nothing) + if tf.in_eager_mode() + padded_batch_dataset_v2_eager(input_dataset_, batch_size_, padded_shapes_, padding_values_, drop_remainder_; name=name, Toutput_types=Toutput_types, output_shapes=output_shapes, N=N) + else + padded_batch_dataset_v2_graph(input_dataset_, batch_size_, padded_shapes_, padding_values_, drop_remainder_; name=name, Toutput_types=Toutput_types, output_shapes=output_shapes, N=N) + end end - end end @@ -12960,21 +12960,21 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function multi_device_iterator_from_string_handle_graph(string_handle_; name=nothing, output_types=nothing, output_shapes=nothing) - local desc - tf.with_op_name(name, "MultiDeviceIteratorFromStringHandle") do - desc = tf.NodeDescription("MultiDeviceIteratorFromStringHandle") - string_handle_ = convert(Tensor{String}, string_handle_) - tf.add_input(desc, string_handle_) - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end + function multi_device_iterator_from_string_handle_graph(string_handle_; name=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "MultiDeviceIteratorFromStringHandle") do + desc = tf.NodeDescription("MultiDeviceIteratorFromStringHandle") + string_handle_ = convert(Tensor{String}, string_handle_) + tf.add_input(desc, string_handle_) + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function multi_device_iterator_from_string_handle_eager(string_handle_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("MultiDeviceIteratorFromStringHandle") string_handle_ = convert(tf.EagerTensor, string_handle_) @@ -12992,13 +12992,13 @@ begin return res[1] end end - function multi_device_iterator_from_string_handle(string_handle_; name=nothing, output_types=nothing, output_shapes=nothing) - if tf.in_eager_mode() - multi_device_iterator_from_string_handle_eager(string_handle_; name=name, output_types=output_types, output_shapes=output_shapes) - else - multi_device_iterator_from_string_handle_graph(string_handle_; name=name, output_types=output_types, output_shapes=output_shapes) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function multi_device_iterator_from_string_handle(string_handle_; name=nothing, output_types=nothing, output_shapes=nothing) + if tf.in_eager_mode() + multi_device_iterator_from_string_handle_eager(string_handle_; name=name, output_types=output_types, output_shapes=output_shapes) + else + multi_device_iterator_from_string_handle_graph(string_handle_; name=name, output_types=output_types, output_shapes=output_shapes) + end end - end end @@ -13008,29 +13008,29 @@ end Load embedding parameters for a single table. """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function load_tpu_embedding_proximal_adagrad_parameters_graph(parameters_, accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - local desc - tf.with_op_name(name, "LoadTPUEmbeddingProximalAdagradParameters") do - desc = tf.NodeDescription("LoadTPUEmbeddingProximalAdagradParameters") - parameters_ = convert(Tensor{Float32}, parameters_) - accumulators_ = convert(Tensor{Float32}, accumulators_) - tf.add_input(desc, parameters_) - tf.add_input(desc, accumulators_) - if table_id !== nothing - desc["table_id"] = Base.Int(table_id) - end - if table_name !== nothing - desc["table_name"] = Base.String(table_name) - end - if num_shards !== nothing - desc["num_shards"] = Base.Int(num_shards) - end - if shard_id !== nothing - desc["shard_id"] = Base.Int(shard_id) - end - end - tf.Tensor(tf.Operation(desc)) + function load_tpu_embedding_proximal_adagrad_parameters_graph(parameters_, accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + local desc + tf.with_op_name(name, "LoadTPUEmbeddingProximalAdagradParameters") do + desc = tf.NodeDescription("LoadTPUEmbeddingProximalAdagradParameters") + parameters_ = convert(Tensor{Float32}, parameters_) + accumulators_ = convert(Tensor{Float32}, accumulators_) + tf.add_input(desc, parameters_) + tf.add_input(desc, accumulators_) + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end end + tf.Tensor(tf.Operation(desc)) + end function load_tpu_embedding_proximal_adagrad_parameters_eager(parameters_, accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) desc = tf.EagerOp("LoadTPUEmbeddingProximalAdagradParameters") parameters_ = convert(tf.EagerTensor, parameters_) @@ -13056,13 +13056,13 @@ begin return res[1] end end - function load_tpu_embedding_proximal_adagrad_parameters(parameters_, accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - if tf.in_eager_mode() - load_tpu_embedding_proximal_adagrad_parameters_eager(parameters_, accumulators_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) - else - load_tpu_embedding_proximal_adagrad_parameters_graph(parameters_, accumulators_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function load_tpu_embedding_proximal_adagrad_parameters(parameters_, accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + if tf.in_eager_mode() + load_tpu_embedding_proximal_adagrad_parameters_eager(parameters_, accumulators_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + else + load_tpu_embedding_proximal_adagrad_parameters_graph(parameters_, accumulators_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + end end - end end @@ -13072,17 +13072,17 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tensor_array_size_graph(handle_, flow_in_; name=nothing) - local desc - tf.with_op_name(name, "TensorArraySize") do - desc = tf.NodeDescription("TensorArraySize") - handle_ = convert(Tensor{String}, handle_) - flow_in_ = convert(Tensor{Float32}, flow_in_) - tf.add_input(desc, handle_) - tf.add_input(desc, flow_in_) - end - tf.Tensor(tf.Operation(desc)) + function tensor_array_size_graph(handle_, flow_in_; name=nothing) + local desc + tf.with_op_name(name, "TensorArraySize") do + desc = tf.NodeDescription("TensorArraySize") + handle_ = convert(Tensor{String}, handle_) + flow_in_ = convert(Tensor{Float32}, flow_in_) + tf.add_input(desc, handle_) + tf.add_input(desc, flow_in_) end + tf.Tensor(tf.Operation(desc)) + end function tensor_array_size_eager(handle_, flow_in_; name=nothing) desc = tf.EagerOp("TensorArraySize") handle_ = convert(tf.EagerTensor, handle_) @@ -13096,13 +13096,13 @@ begin return res[1] end end - function tensor_array_size(handle_, flow_in_; name=nothing) - if tf.in_eager_mode() - tensor_array_size_eager(handle_, flow_in_; name=name) - else - tensor_array_size_graph(handle_, flow_in_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_array_size(handle_, flow_in_; name=nothing) + if tf.in_eager_mode() + tensor_array_size_eager(handle_, flow_in_; name=name) + else + tensor_array_size_graph(handle_, flow_in_; name=name) + end end - end end @@ -13112,28 +13112,28 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function ordered_map_size_graph(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) - local desc - tf.with_op_name(name, "OrderedMapSize") do - desc = tf.NodeDescription("OrderedMapSize") - if capacity !== nothing - desc["capacity"] = Base.Int(capacity) - end - if memory_limit !== nothing - desc["memory_limit"] = Base.Int(memory_limit) - end - if dtypes !== nothing - desc["dtypes"] = map(Base.identity, dtypes) - end - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end + function ordered_map_size_graph(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "OrderedMapSize") do + desc = tf.NodeDescription("OrderedMapSize") + if capacity !== nothing + desc["capacity"] = Base.Int(capacity) + end + if memory_limit !== nothing + desc["memory_limit"] = Base.Int(memory_limit) + end + if dtypes !== nothing + desc["dtypes"] = map(Base.identity, dtypes) + end + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function ordered_map_size_eager(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) desc = tf.EagerOp("OrderedMapSize") if capacity !== nothing @@ -13158,13 +13158,13 @@ begin return res[1] end end - function ordered_map_size(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) - if tf.in_eager_mode() - ordered_map_size_eager(; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) - else - ordered_map_size_graph(; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function ordered_map_size(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + if tf.in_eager_mode() + ordered_map_size_eager(; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) + else + ordered_map_size_graph(; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) + end end - end end @@ -13174,22 +13174,22 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function stateless_random_uniform_graph(shape_, seed_; name=nothing, dtype=nothing) - local desc - tf.with_op_name(name, "StatelessRandomUniform") do - desc = tf.NodeDescription("StatelessRandomUniform") - shape_ = convert(Tensor{Int32}, shape_) - seed_ = convert(Tensor{Int64}, seed_) - (shape_,) = tf.tf_promote(shape_) - (seed_,) = tf.tf_promote(seed_) - tf.add_input(desc, shape_) - tf.add_input(desc, seed_) - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end + function stateless_random_uniform_graph(shape_, seed_; name=nothing, dtype=nothing) + local desc + tf.with_op_name(name, "StatelessRandomUniform") do + desc = tf.NodeDescription("StatelessRandomUniform") + shape_ = convert(Tensor{Int32}, shape_) + seed_ = convert(Tensor{Int64}, seed_) + (shape_,) = tf.tf_promote(shape_) + (seed_,) = tf.tf_promote(seed_) + tf.add_input(desc, shape_) + tf.add_input(desc, seed_) + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function stateless_random_uniform_eager(shape_, seed_; name=nothing, dtype=nothing) desc = tf.EagerOp("StatelessRandomUniform") shape_ = convert(tf.EagerTensor, shape_) @@ -13208,13 +13208,13 @@ begin return res[1] end end - function stateless_random_uniform(shape_, seed_; name=nothing, dtype=nothing) - if tf.in_eager_mode() - stateless_random_uniform_eager(shape_, seed_; name=name, dtype=dtype) - else - stateless_random_uniform_graph(shape_, seed_; name=name, dtype=dtype) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function stateless_random_uniform(shape_, seed_; name=nothing, dtype=nothing) + if tf.in_eager_mode() + stateless_random_uniform_eager(shape_, seed_; name=name, dtype=dtype) + else + stateless_random_uniform_graph(shape_, seed_; name=name, dtype=dtype) + end end - end end @@ -13224,37 +13224,37 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function sparse_to_sparse_set_operation_graph(set1_indices_, set1_values_, set1_shape_, set2_indices_, set2_values_, set2_shape_; name=nothing, set_operation=nothing, validate_indices=nothing) - local desc - tf.with_op_name(name, "SparseToSparseSetOperation") do - desc = tf.NodeDescription("SparseToSparseSetOperation") - set1_indices_ = convert(Tensor{Int64}, set1_indices_) - set1_values_ = convert(Tensor{Any}, set1_values_) - set1_shape_ = convert(Tensor{Int64}, set1_shape_) - set2_indices_ = convert(Tensor{Int64}, set2_indices_) - set2_values_ = convert(Tensor{Any}, set2_values_) - set2_shape_ = convert(Tensor{Int64}, set2_shape_) - (set1_values_, set2_values_) = tf.tf_promote(set1_values_, set2_values_) - tf.add_input(desc, set1_indices_) - tf.add_input(desc, set1_values_) - tf.add_input(desc, set1_shape_) - tf.add_input(desc, set2_indices_) - tf.add_input(desc, set2_values_) - tf.add_input(desc, set2_shape_) - if set_operation !== nothing - desc["set_operation"] = Base.String(set_operation) - end - if validate_indices !== nothing - desc["validate_indices"] = Base.Bool(validate_indices) - end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:3 - push!(out, tf.Tensor(op, out_idx)) - end - out + function sparse_to_sparse_set_operation_graph(set1_indices_, set1_values_, set1_shape_, set2_indices_, set2_values_, set2_shape_; name=nothing, set_operation=nothing, validate_indices=nothing) + local desc + tf.with_op_name(name, "SparseToSparseSetOperation") do + desc = tf.NodeDescription("SparseToSparseSetOperation") + set1_indices_ = convert(Tensor{Int64}, set1_indices_) + set1_values_ = convert(Tensor{Any}, set1_values_) + set1_shape_ = convert(Tensor{Int64}, set1_shape_) + set2_indices_ = convert(Tensor{Int64}, set2_indices_) + set2_values_ = convert(Tensor{Any}, set2_values_) + set2_shape_ = convert(Tensor{Int64}, set2_shape_) + (set1_values_, set2_values_) = tf.tf_promote(set1_values_, set2_values_) + tf.add_input(desc, set1_indices_) + tf.add_input(desc, set1_values_) + tf.add_input(desc, set1_shape_) + tf.add_input(desc, set2_indices_) + tf.add_input(desc, set2_values_) + tf.add_input(desc, set2_shape_) + if set_operation !== nothing + desc["set_operation"] = Base.String(set_operation) + end + if validate_indices !== nothing + desc["validate_indices"] = Base.Bool(validate_indices) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) end + out + end function sparse_to_sparse_set_operation_eager(set1_indices_, set1_values_, set1_shape_, set2_indices_, set2_values_, set2_shape_; name=nothing, set_operation=nothing, validate_indices=nothing) desc = tf.EagerOp("SparseToSparseSetOperation") set1_indices_ = convert(tf.EagerTensor, set1_indices_) @@ -13284,13 +13284,13 @@ begin return res end end - function sparse_to_sparse_set_operation(set1_indices_, set1_values_, set1_shape_, set2_indices_, set2_values_, set2_shape_; name=nothing, set_operation=nothing, validate_indices=nothing) - if tf.in_eager_mode() - sparse_to_sparse_set_operation_eager(set1_indices_, set1_values_, set1_shape_, set2_indices_, set2_values_, set2_shape_; name=name, set_operation=set_operation, validate_indices=validate_indices) - else - sparse_to_sparse_set_operation_graph(set1_indices_, set1_values_, set1_shape_, set2_indices_, set2_values_, set2_shape_; name=name, set_operation=set_operation, validate_indices=validate_indices) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_to_sparse_set_operation(set1_indices_, set1_values_, set1_shape_, set2_indices_, set2_values_, set2_shape_; name=nothing, set_operation=nothing, validate_indices=nothing) + if tf.in_eager_mode() + sparse_to_sparse_set_operation_eager(set1_indices_, set1_values_, set1_shape_, set2_indices_, set2_values_, set2_shape_; name=name, set_operation=set_operation, validate_indices=validate_indices) + else + sparse_to_sparse_set_operation_graph(set1_indices_, set1_values_, set1_shape_, set2_indices_, set2_values_, set2_shape_; name=name, set_operation=set_operation, validate_indices=validate_indices) + end end - end end @@ -13300,25 +13300,25 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tensor_summary_graph(tensor_; name=nothing, description=nothing, labels=nothing, display_name=nothing) - local desc - tf.with_op_name(name, "TensorSummary") do - desc = tf.NodeDescription("TensorSummary") - tensor_ = convert(Tensor{Any}, tensor_) - (tensor_,) = tf.tf_promote(tensor_) - tf.add_input(desc, tensor_) - if description !== nothing - desc["description"] = Base.String(description) - end - if labels !== nothing - desc["labels"] = map(Base.identity, labels) - end - if display_name !== nothing - desc["display_name"] = Base.String(display_name) - end + function tensor_summary_graph(tensor_; name=nothing, description=nothing, labels=nothing, display_name=nothing) + local desc + tf.with_op_name(name, "TensorSummary") do + desc = tf.NodeDescription("TensorSummary") + tensor_ = convert(Tensor{Any}, tensor_) + (tensor_,) = tf.tf_promote(tensor_) + tf.add_input(desc, tensor_) + if description !== nothing + desc["description"] = Base.String(description) + end + if labels !== nothing + desc["labels"] = map(Base.identity, labels) + end + if display_name !== nothing + desc["display_name"] = Base.String(display_name) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function tensor_summary_eager(tensor_; name=nothing, description=nothing, labels=nothing, display_name=nothing) desc = tf.EagerOp("TensorSummary") tensor_ = convert(tf.EagerTensor, tensor_) @@ -13340,13 +13340,13 @@ begin return res[1] end end - function tensor_summary(tensor_; name=nothing, description=nothing, labels=nothing, display_name=nothing) - if tf.in_eager_mode() - tensor_summary_eager(tensor_; name=name, description=description, labels=labels, display_name=display_name) - else - tensor_summary_graph(tensor_; name=name, description=description, labels=labels, display_name=display_name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_summary(tensor_; name=nothing, description=nothing, labels=nothing, display_name=nothing) + if tf.in_eager_mode() + tensor_summary_eager(tensor_; name=name, description=description, labels=labels, display_name=display_name) + else + tensor_summary_graph(tensor_; name=name, description=description, labels=labels, display_name=display_name) + end end - end end @@ -13356,24 +13356,24 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function remote_fused_graph_execute_graph(inputs_; name=nothing, Tinputs=nothing, Toutputs=nothing, serialized_remote_fused_graph_execute_info=nothing) - local desc - tf.with_op_name(name, "RemoteFusedGraphExecute") do - desc = tf.NodeDescription("RemoteFusedGraphExecute") - inputs_ = [convert(Tensor{Any}, x) for x = inputs_] - tf.add_input(desc, inputs_) - if Tinputs !== nothing - desc["Tinputs"] = map(Base.identity, Tinputs) - end - if Toutputs !== nothing - desc["Toutputs"] = map(Base.identity, Toutputs) - end - if serialized_remote_fused_graph_execute_info !== nothing - desc["serialized_remote_fused_graph_execute_info"] = Base.String(serialized_remote_fused_graph_execute_info) - end + function remote_fused_graph_execute_graph(inputs_; name=nothing, Tinputs=nothing, Toutputs=nothing, serialized_remote_fused_graph_execute_info=nothing) + local desc + tf.with_op_name(name, "RemoteFusedGraphExecute") do + desc = tf.NodeDescription("RemoteFusedGraphExecute") + inputs_ = [convert(Tensor{Any}, x) for x = inputs_] + tf.add_input(desc, inputs_) + if Tinputs !== nothing + desc["Tinputs"] = map(Base.identity, Tinputs) + end + if Toutputs !== nothing + desc["Toutputs"] = map(Base.identity, Toutputs) + end + if serialized_remote_fused_graph_execute_info !== nothing + desc["serialized_remote_fused_graph_execute_info"] = Base.String(serialized_remote_fused_graph_execute_info) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function remote_fused_graph_execute_eager(inputs_; name=nothing, Tinputs=nothing, Toutputs=nothing, serialized_remote_fused_graph_execute_info=nothing) desc = tf.EagerOp("RemoteFusedGraphExecute") inputs_ = convert(tf.EagerTensor, inputs_) @@ -13394,13 +13394,13 @@ begin return res[1] end end - function remote_fused_graph_execute(inputs_; name=nothing, Tinputs=nothing, Toutputs=nothing, serialized_remote_fused_graph_execute_info=nothing) - if tf.in_eager_mode() - remote_fused_graph_execute_eager(inputs_; name=name, Tinputs=Tinputs, Toutputs=Toutputs, serialized_remote_fused_graph_execute_info=serialized_remote_fused_graph_execute_info) - else - remote_fused_graph_execute_graph(inputs_; name=name, Tinputs=Tinputs, Toutputs=Toutputs, serialized_remote_fused_graph_execute_info=serialized_remote_fused_graph_execute_info) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function remote_fused_graph_execute(inputs_; name=nothing, Tinputs=nothing, Toutputs=nothing, serialized_remote_fused_graph_execute_info=nothing) + if tf.in_eager_mode() + remote_fused_graph_execute_eager(inputs_; name=name, Tinputs=Tinputs, Toutputs=Toutputs, serialized_remote_fused_graph_execute_info=serialized_remote_fused_graph_execute_info) + else + remote_fused_graph_execute_graph(inputs_; name=name, Tinputs=Tinputs, Toutputs=Toutputs, serialized_remote_fused_graph_execute_info=serialized_remote_fused_graph_execute_info) + end end - end end @@ -13410,22 +13410,22 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function sparse_slice_grad_graph(backprop_val_grad_, input_indices_, input_start_, output_indices_; name=nothing) - local desc - tf.with_op_name(name, "SparseSliceGrad") do - desc = tf.NodeDescription("SparseSliceGrad") - backprop_val_grad_ = convert(Tensor{Any}, backprop_val_grad_) - input_indices_ = convert(Tensor{Int64}, input_indices_) - input_start_ = convert(Tensor{Int64}, input_start_) - output_indices_ = convert(Tensor{Int64}, output_indices_) - (backprop_val_grad_,) = tf.tf_promote(backprop_val_grad_) - tf.add_input(desc, backprop_val_grad_) - tf.add_input(desc, input_indices_) - tf.add_input(desc, input_start_) - tf.add_input(desc, output_indices_) - end - tf.Tensor(tf.Operation(desc)) + function sparse_slice_grad_graph(backprop_val_grad_, input_indices_, input_start_, output_indices_; name=nothing) + local desc + tf.with_op_name(name, "SparseSliceGrad") do + desc = tf.NodeDescription("SparseSliceGrad") + backprop_val_grad_ = convert(Tensor{Any}, backprop_val_grad_) + input_indices_ = convert(Tensor{Int64}, input_indices_) + input_start_ = convert(Tensor{Int64}, input_start_) + output_indices_ = convert(Tensor{Int64}, output_indices_) + (backprop_val_grad_,) = tf.tf_promote(backprop_val_grad_) + tf.add_input(desc, backprop_val_grad_) + tf.add_input(desc, input_indices_) + tf.add_input(desc, input_start_) + tf.add_input(desc, output_indices_) end + tf.Tensor(tf.Operation(desc)) + end function sparse_slice_grad_eager(backprop_val_grad_, input_indices_, input_start_, output_indices_; name=nothing) desc = tf.EagerOp("SparseSliceGrad") backprop_val_grad_ = convert(tf.EagerTensor, backprop_val_grad_) @@ -13444,13 +13444,13 @@ begin return res[1] end end - function sparse_slice_grad(backprop_val_grad_, input_indices_, input_start_, output_indices_; name=nothing) - if tf.in_eager_mode() - sparse_slice_grad_eager(backprop_val_grad_, input_indices_, input_start_, output_indices_; name=name) - else - sparse_slice_grad_graph(backprop_val_grad_, input_indices_, input_start_, output_indices_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_slice_grad(backprop_val_grad_, input_indices_, input_start_, output_indices_; name=nothing) + if tf.in_eager_mode() + sparse_slice_grad_eager(backprop_val_grad_, input_indices_, input_start_, output_indices_; name=name) + else + sparse_slice_grad_graph(backprop_val_grad_, input_indices_, input_start_, output_indices_; name=name) + end end - end end @@ -13460,26 +13460,26 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function cumsum_graph(x_, axis_; name=nothing, exclusive=nothing, reverse=nothing) - local desc - tf.with_op_name(name, "Cumsum") do - desc = tf.NodeDescription("Cumsum") - x_ = convert(Tensor{Any}, x_) - axis_ = convert(Tensor{Int32}, axis_) - axis_ = axis_ - convert(tf.Tensor{eltype(axis_)}, 1) - (x_,) = tf.tf_promote(x_) - (axis_,) = tf.tf_promote(axis_) - tf.add_input(desc, x_) - tf.add_input(desc, axis_) - if exclusive !== nothing - desc["exclusive"] = Base.Bool(exclusive) - end - if reverse !== nothing - desc["reverse"] = Base.Bool(reverse) - end + function cumsum_graph(x_, axis_; name=nothing, exclusive=nothing, reverse=nothing) + local desc + tf.with_op_name(name, "Cumsum") do + desc = tf.NodeDescription("Cumsum") + x_ = convert(Tensor{Any}, x_) + axis_ = convert(Tensor{Int32}, axis_) + axis_ = axis_ - convert(tf.Tensor{eltype(axis_)}, 1) + (x_,) = tf.tf_promote(x_) + (axis_,) = tf.tf_promote(axis_) + tf.add_input(desc, x_) + tf.add_input(desc, axis_) + if exclusive !== nothing + desc["exclusive"] = Base.Bool(exclusive) + end + if reverse !== nothing + desc["reverse"] = Base.Bool(reverse) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function cumsum_eager(x_, axis_; name=nothing, exclusive=nothing, reverse=nothing) desc = tf.EagerOp("Cumsum") x_ = convert(tf.EagerTensor, x_) @@ -13501,13 +13501,13 @@ begin return res[1] end end - function cumsum(x_, axis_; name=nothing, exclusive=nothing, reverse=nothing) - if tf.in_eager_mode() - cumsum_eager(x_, axis_; name=name, exclusive=exclusive, reverse=reverse) - else - cumsum_graph(x_, axis_; name=name, exclusive=exclusive, reverse=reverse) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function cumsum(x_, axis_; name=nothing, exclusive=nothing, reverse=nothing) + if tf.in_eager_mode() + cumsum_eager(x_, axis_; name=name, exclusive=exclusive, reverse=reverse) + else + cumsum_graph(x_, axis_; name=name, exclusive=exclusive, reverse=reverse) + end end - end end @@ -13517,35 +13517,35 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function batch_norm_with_global_normalization_grad_graph(t_, m_, v_, gamma_, backprop_; name=nothing, variance_epsilon=nothing, scale_after_normalization=nothing) - local desc - tf.with_op_name(name, "BatchNormWithGlobalNormalizationGrad") do - desc = tf.NodeDescription("BatchNormWithGlobalNormalizationGrad") - t_ = convert(Tensor{Any}, t_) - m_ = convert(Tensor{Any}, m_) - v_ = convert(Tensor{Any}, v_) - gamma_ = convert(Tensor{Any}, gamma_) - backprop_ = convert(Tensor{Any}, backprop_) - (t_, m_, v_, gamma_, backprop_) = tf.tf_promote(t_, m_, v_, gamma_, backprop_) - tf.add_input(desc, t_) - tf.add_input(desc, m_) - tf.add_input(desc, v_) - tf.add_input(desc, gamma_) - tf.add_input(desc, backprop_) - if variance_epsilon !== nothing - desc["variance_epsilon"] = Base.identity(variance_epsilon) - end - if scale_after_normalization !== nothing - desc["scale_after_normalization"] = Base.Bool(scale_after_normalization) - end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:5 - push!(out, tf.Tensor(op, out_idx)) - end - out + function batch_norm_with_global_normalization_grad_graph(t_, m_, v_, gamma_, backprop_; name=nothing, variance_epsilon=nothing, scale_after_normalization=nothing) + local desc + tf.with_op_name(name, "BatchNormWithGlobalNormalizationGrad") do + desc = tf.NodeDescription("BatchNormWithGlobalNormalizationGrad") + t_ = convert(Tensor{Any}, t_) + m_ = convert(Tensor{Any}, m_) + v_ = convert(Tensor{Any}, v_) + gamma_ = convert(Tensor{Any}, gamma_) + backprop_ = convert(Tensor{Any}, backprop_) + (t_, m_, v_, gamma_, backprop_) = tf.tf_promote(t_, m_, v_, gamma_, backprop_) + tf.add_input(desc, t_) + tf.add_input(desc, m_) + tf.add_input(desc, v_) + tf.add_input(desc, gamma_) + tf.add_input(desc, backprop_) + if variance_epsilon !== nothing + desc["variance_epsilon"] = Base.identity(variance_epsilon) + end + if scale_after_normalization !== nothing + desc["scale_after_normalization"] = Base.Bool(scale_after_normalization) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:5 + push!(out, tf.Tensor(op, out_idx)) end + out + end function batch_norm_with_global_normalization_grad_eager(t_, m_, v_, gamma_, backprop_; name=nothing, variance_epsilon=nothing, scale_after_normalization=nothing) desc = tf.EagerOp("BatchNormWithGlobalNormalizationGrad") t_ = convert(tf.EagerTensor, t_) @@ -13576,46 +13576,46 @@ begin return res end end - function batch_norm_with_global_normalization_grad(t_, m_, v_, gamma_, backprop_; name=nothing, variance_epsilon=nothing, scale_after_normalization=nothing) - if tf.in_eager_mode() - batch_norm_with_global_normalization_grad_eager(t_, m_, v_, gamma_, backprop_; name=name, variance_epsilon=variance_epsilon, scale_after_normalization=scale_after_normalization) - else - batch_norm_with_global_normalization_grad_graph(t_, m_, v_, gamma_, backprop_; name=name, variance_epsilon=variance_epsilon, scale_after_normalization=scale_after_normalization) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function batch_norm_with_global_normalization_grad(t_, m_, v_, gamma_, backprop_; name=nothing, variance_epsilon=nothing, scale_after_normalization=nothing) + if tf.in_eager_mode() + batch_norm_with_global_normalization_grad_eager(t_, m_, v_, gamma_, backprop_; name=name, variance_epsilon=variance_epsilon, scale_after_normalization=scale_after_normalization) + else + batch_norm_with_global_normalization_grad_graph(t_, m_, v_, gamma_, backprop_; name=name, variance_epsilon=variance_epsilon, scale_after_normalization=scale_after_normalization) + end end - end end """ - avg_pool_grad(orig_input_shape, grad; data_format=NHWC) + avg_pool_grad(orig_input_shape, grad; data_format=) """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function avg_pool_grad_graph(orig_input_shape_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) - local desc - tf.with_op_name(name, "AvgPoolGrad") do - desc = tf.NodeDescription("AvgPoolGrad") - orig_input_shape_ = convert(Tensor{Int32}, orig_input_shape_) - grad_ = convert(Tensor{Any}, grad_) - (grad_,) = tf.tf_promote(grad_) - tf.add_input(desc, orig_input_shape_) - tf.add_input(desc, grad_) - if ksize !== nothing - desc["ksize"] = map(Base.identity, ksize) - end - if strides !== nothing - desc["strides"] = map(Base.identity, strides) - end - if padding !== nothing - desc["padding"] = Base.String(padding) - end - if data_format !== nothing - desc["data_format"] = Base.String(data_format) - end + function avg_pool_grad_graph(orig_input_shape_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + local desc + tf.with_op_name(name, "AvgPoolGrad") do + desc = tf.NodeDescription("AvgPoolGrad") + orig_input_shape_ = convert(Tensor{Int32}, orig_input_shape_) + grad_ = convert(Tensor{Any}, grad_) + (grad_,) = tf.tf_promote(grad_) + tf.add_input(desc, orig_input_shape_) + tf.add_input(desc, grad_) + if ksize !== nothing + desc["ksize"] = map(Base.identity, ksize) + end + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + if padding !== nothing + desc["padding"] = Base.String(padding) + end + if data_format !== nothing + desc["data_format"] = Base.String(data_format) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function avg_pool_grad_eager(orig_input_shape_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) desc = tf.EagerOp("AvgPoolGrad") orig_input_shape_ = convert(tf.EagerTensor, orig_input_shape_) @@ -13642,13 +13642,13 @@ begin return res[1] end end - function avg_pool_grad(orig_input_shape_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) - if tf.in_eager_mode() - avg_pool_grad_eager(orig_input_shape_, grad_; name=name, ksize=ksize, strides=strides, padding=padding, data_format=data_format) - else - avg_pool_grad_graph(orig_input_shape_, grad_; name=name, ksize=ksize, strides=strides, padding=padding, data_format=data_format) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function avg_pool_grad(orig_input_shape_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + if tf.in_eager_mode() + avg_pool_grad_eager(orig_input_shape_, grad_; name=name, ksize=ksize, strides=strides, padding=padding, data_format=data_format) + else + avg_pool_grad_graph(orig_input_shape_, grad_; name=name, ksize=ksize, strides=strides, padding=padding, data_format=data_format) + end end - end end @@ -13658,22 +13658,22 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function restore_v2_graph(prefix_, tensor_names_, shape_and_slices_; name=nothing, dtypes=nothing) - local desc - tf.with_op_name(name, "RestoreV2") do - desc = tf.NodeDescription("RestoreV2") - prefix_ = convert(Tensor{String}, prefix_) - tensor_names_ = convert(Tensor{String}, tensor_names_) - shape_and_slices_ = convert(Tensor{String}, shape_and_slices_) - tf.add_input(desc, prefix_) - tf.add_input(desc, tensor_names_) - tf.add_input(desc, shape_and_slices_) - if dtypes !== nothing - desc["dtypes"] = map(Base.identity, dtypes) - end + function restore_v2_graph(prefix_, tensor_names_, shape_and_slices_; name=nothing, dtypes=nothing) + local desc + tf.with_op_name(name, "RestoreV2") do + desc = tf.NodeDescription("RestoreV2") + prefix_ = convert(Tensor{String}, prefix_) + tensor_names_ = convert(Tensor{String}, tensor_names_) + shape_and_slices_ = convert(Tensor{String}, shape_and_slices_) + tf.add_input(desc, prefix_) + tf.add_input(desc, tensor_names_) + tf.add_input(desc, shape_and_slices_) + if dtypes !== nothing + desc["dtypes"] = map(Base.identity, dtypes) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function restore_v2_eager(prefix_, tensor_names_, shape_and_slices_; name=nothing, dtypes=nothing) desc = tf.EagerOp("RestoreV2") prefix_ = convert(tf.EagerTensor, prefix_) @@ -13692,13 +13692,13 @@ begin return res[1] end end - function restore_v2(prefix_, tensor_names_, shape_and_slices_; name=nothing, dtypes=nothing) - if tf.in_eager_mode() - restore_v2_eager(prefix_, tensor_names_, shape_and_slices_; name=name, dtypes=dtypes) - else - restore_v2_graph(prefix_, tensor_names_, shape_and_slices_; name=name, dtypes=dtypes) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function restore_v2(prefix_, tensor_names_, shape_and_slices_; name=nothing, dtypes=nothing) + if tf.in_eager_mode() + restore_v2_eager(prefix_, tensor_names_, shape_and_slices_; name=name, dtypes=dtypes) + else + restore_v2_graph(prefix_, tensor_names_, shape_and_slices_; name=name, dtypes=dtypes) + end end - end end @@ -13708,16 +13708,16 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function relu6_graph(features_; name=nothing) - local desc - tf.with_op_name(name, "Relu6") do - desc = tf.NodeDescription("Relu6") - features_ = convert(Tensor{Any}, features_) - (features_,) = tf.tf_promote(features_) - tf.add_input(desc, features_) - end - tf.Tensor(tf.Operation(desc)) + function relu6_graph(features_; name=nothing) + local desc + tf.with_op_name(name, "Relu6") do + desc = tf.NodeDescription("Relu6") + features_ = convert(Tensor{Any}, features_) + (features_,) = tf.tf_promote(features_) + tf.add_input(desc, features_) end + tf.Tensor(tf.Operation(desc)) + end function relu6_eager(features_; name=nothing) desc = tf.EagerOp("Relu6") features_ = convert(tf.EagerTensor, features_) @@ -13730,13 +13730,13 @@ begin return res[1] end end - function relu6(features_; name=nothing) - if tf.in_eager_mode() - relu6_eager(features_; name=name) - else - relu6_graph(features_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function relu6(features_; name=nothing) + if tf.in_eager_mode() + relu6_eager(features_; name=name) + else + relu6_graph(features_; name=name) + end end - end end @@ -13746,37 +13746,37 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function sparse_apply_rms_prop_graph(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) - local desc - tf.with_op_name(name, "SparseApplyRMSProp") do - desc = tf.NodeDescription("SparseApplyRMSProp") - var_ = convert(Tensor{Any}, var_) - ms_ = convert(Tensor{Any}, ms_) - mom_ = convert(Tensor{Any}, mom_) - lr_ = convert(Tensor{Any}, lr_) - rho_ = convert(Tensor{Any}, rho_) - momentum_ = convert(Tensor{Any}, momentum_) - epsilon_ = convert(Tensor{Any}, epsilon_) - grad_ = convert(Tensor{Any}, grad_) - indices_ = convert(Tensor{Any}, indices_) - indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) - (var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_) = tf.tf_promote(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_) - (indices_,) = tf.tf_promote(indices_) - tf.add_input(desc, var_) - tf.add_input(desc, ms_) - tf.add_input(desc, mom_) - tf.add_input(desc, lr_) - tf.add_input(desc, rho_) - tf.add_input(desc, momentum_) - tf.add_input(desc, epsilon_) - tf.add_input(desc, grad_) - tf.add_input(desc, indices_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - end - tf.Tensor(tf.Operation(desc)) - end + function sparse_apply_rms_prop_graph(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "SparseApplyRMSProp") do + desc = tf.NodeDescription("SparseApplyRMSProp") + var_ = convert(Tensor{Any}, var_) + ms_ = convert(Tensor{Any}, ms_) + mom_ = convert(Tensor{Any}, mom_) + lr_ = convert(Tensor{Any}, lr_) + rho_ = convert(Tensor{Any}, rho_) + momentum_ = convert(Tensor{Any}, momentum_) + epsilon_ = convert(Tensor{Any}, epsilon_) + grad_ = convert(Tensor{Any}, grad_) + indices_ = convert(Tensor{Any}, indices_) + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + (var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_) = tf.tf_promote(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_) + (indices_,) = tf.tf_promote(indices_) + tf.add_input(desc, var_) + tf.add_input(desc, ms_) + tf.add_input(desc, mom_) + tf.add_input(desc, lr_) + tf.add_input(desc, rho_) + tf.add_input(desc, momentum_) + tf.add_input(desc, epsilon_) + tf.add_input(desc, grad_) + tf.add_input(desc, indices_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + tf.Tensor(tf.Operation(desc)) + end function sparse_apply_rms_prop_eager(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) desc = tf.EagerOp("SparseApplyRMSProp") var_ = convert(tf.EagerTensor, var_) @@ -13816,13 +13816,13 @@ begin return res[1] end end - function sparse_apply_rms_prop(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) - if tf.in_eager_mode() - sparse_apply_rms_prop_eager(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=name, use_locking=use_locking) - else - sparse_apply_rms_prop_graph(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=name, use_locking=use_locking) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_apply_rms_prop(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) + if tf.in_eager_mode() + sparse_apply_rms_prop_eager(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=name, use_locking=use_locking) + else + sparse_apply_rms_prop_graph(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=name, use_locking=use_locking) + end end - end end @@ -13832,31 +13832,31 @@ end Receives the named tensor from send_device on recv_device. """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function _recv_graph(; name=nothing, tensor_type=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing) - local desc - tf.with_op_name(name, "_Recv") do - desc = tf.NodeDescription("_Recv") - if tensor_type !== nothing - desc["tensor_type"] = Base.identity(tensor_type) - end - if tensor_name !== nothing - desc["tensor_name"] = Base.String(tensor_name) - end - if send_device !== nothing - desc["send_device"] = Base.String(send_device) - end - if send_device_incarnation !== nothing - desc["send_device_incarnation"] = Base.Int(send_device_incarnation) - end - if recv_device !== nothing - desc["recv_device"] = Base.String(recv_device) - end - if client_terminated !== nothing - desc["client_terminated"] = Base.Bool(client_terminated) - end - end - tf.Tensor(tf.Operation(desc)) + function _recv_graph(; name=nothing, tensor_type=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing) + local desc + tf.with_op_name(name, "_Recv") do + desc = tf.NodeDescription("_Recv") + if tensor_type !== nothing + desc["tensor_type"] = Base.identity(tensor_type) + end + if tensor_name !== nothing + desc["tensor_name"] = Base.String(tensor_name) + end + if send_device !== nothing + desc["send_device"] = Base.String(send_device) + end + if send_device_incarnation !== nothing + desc["send_device_incarnation"] = Base.Int(send_device_incarnation) + end + if recv_device !== nothing + desc["recv_device"] = Base.String(recv_device) + end + if client_terminated !== nothing + desc["client_terminated"] = Base.Bool(client_terminated) + end end + tf.Tensor(tf.Operation(desc)) + end function _recv_eager(; name=nothing, tensor_type=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing) desc = tf.EagerOp("_Recv") if tensor_type !== nothing @@ -13884,44 +13884,44 @@ begin return res[1] end end - function _recv(; name=nothing, tensor_type=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing) - if tf.in_eager_mode() - _recv_eager(; name=name, tensor_type=tensor_type, tensor_name=tensor_name, send_device=send_device, send_device_incarnation=send_device_incarnation, recv_device=recv_device, client_terminated=client_terminated) - else - _recv_graph(; name=name, tensor_type=tensor_type, tensor_name=tensor_name, send_device=send_device, send_device_incarnation=send_device_incarnation, recv_device=recv_device, client_terminated=client_terminated) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _recv(; name=nothing, tensor_type=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing) + if tf.in_eager_mode() + _recv_eager(; name=name, tensor_type=tensor_type, tensor_name=tensor_name, send_device=send_device, send_device_incarnation=send_device_incarnation, recv_device=recv_device, client_terminated=client_terminated) + else + _recv_graph(; name=name, tensor_type=tensor_type, tensor_name=tensor_name, send_device=send_device, send_device_incarnation=send_device_incarnation, recv_device=recv_device, client_terminated=client_terminated) + end end - end end """ - max_pool(input; data_format=NHWC) + max_pool(input; data_format=) """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function max_pool_graph(input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) - local desc - tf.with_op_name(name, "MaxPool") do - desc = tf.NodeDescription("MaxPool") - input_ = convert(Tensor{Float32}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - if ksize !== nothing - desc["ksize"] = map(Base.identity, ksize) - end - if strides !== nothing - desc["strides"] = map(Base.identity, strides) - end - if padding !== nothing - desc["padding"] = Base.String(padding) - end - if data_format !== nothing - desc["data_format"] = Base.String(data_format) - end + function max_pool_graph(input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + local desc + tf.with_op_name(name, "MaxPool") do + desc = tf.NodeDescription("MaxPool") + input_ = convert(Tensor{Float32}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + if ksize !== nothing + desc["ksize"] = map(Base.identity, ksize) + end + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + if padding !== nothing + desc["padding"] = Base.String(padding) + end + if data_format !== nothing + desc["data_format"] = Base.String(data_format) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function max_pool_eager(input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) desc = tf.EagerOp("MaxPool") input_ = convert(tf.EagerTensor, input_) @@ -13946,13 +13946,13 @@ begin return res[1] end end - function max_pool(input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) - if tf.in_eager_mode() - max_pool_eager(input_; name=name, ksize=ksize, strides=strides, padding=padding, data_format=data_format) - else - max_pool_graph(input_; name=name, ksize=ksize, strides=strides, padding=padding, data_format=data_format) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function max_pool(input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + if tf.in_eager_mode() + max_pool_eager(input_; name=name, ksize=ksize, strides=strides, padding=padding, data_format=data_format) + else + max_pool_graph(input_; name=name, ksize=ksize, strides=strides, padding=padding, data_format=data_format) + end end - end end @@ -13962,16 +13962,16 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function invert_graph(x_; name=nothing) - local desc - tf.with_op_name(name, "Invert") do - desc = tf.NodeDescription("Invert") - x_ = convert(Tensor{Any}, x_) - (x_,) = tf.tf_promote(x_) - tf.add_input(desc, x_) - end - tf.Tensor(tf.Operation(desc)) + function invert_graph(x_; name=nothing) + local desc + tf.with_op_name(name, "Invert") do + desc = tf.NodeDescription("Invert") + x_ = convert(Tensor{Any}, x_) + (x_,) = tf.tf_promote(x_) + tf.add_input(desc, x_) end + tf.Tensor(tf.Operation(desc)) + end function invert_eager(x_; name=nothing) desc = tf.EagerOp("Invert") x_ = convert(tf.EagerTensor, x_) @@ -13984,13 +13984,13 @@ begin return res[1] end end - function invert(x_; name=nothing) - if tf.in_eager_mode() - invert_eager(x_; name=name) - else - invert_graph(x_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function invert(x_; name=nothing) + if tf.in_eager_mode() + invert_eager(x_; name=name) + else + invert_graph(x_; name=name) + end end - end end @@ -14000,19 +14000,19 @@ end *NOTE*: Do not invoke this operator directly in Python. Graph rewrite pass is """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function _unary_ops_composition_graph(x_; name=nothing, op_names=nothing) - local desc - tf.with_op_name(name, "_UnaryOpsComposition") do - desc = tf.NodeDescription("_UnaryOpsComposition") - x_ = convert(Tensor{Any}, x_) - (x_,) = tf.tf_promote(x_) - tf.add_input(desc, x_) - if op_names !== nothing - desc["op_names"] = map(Base.identity, op_names) - end + function _unary_ops_composition_graph(x_; name=nothing, op_names=nothing) + local desc + tf.with_op_name(name, "_UnaryOpsComposition") do + desc = tf.NodeDescription("_UnaryOpsComposition") + x_ = convert(Tensor{Any}, x_) + (x_,) = tf.tf_promote(x_) + tf.add_input(desc, x_) + if op_names !== nothing + desc["op_names"] = map(Base.identity, op_names) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function _unary_ops_composition_eager(x_; name=nothing, op_names=nothing) desc = tf.EagerOp("_UnaryOpsComposition") x_ = convert(tf.EagerTensor, x_) @@ -14028,13 +14028,13 @@ begin return res[1] end end - function _unary_ops_composition(x_; name=nothing, op_names=nothing) - if tf.in_eager_mode() - _unary_ops_composition_eager(x_; name=name, op_names=op_names) - else - _unary_ops_composition_graph(x_; name=name, op_names=op_names) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _unary_ops_composition(x_; name=nothing, op_names=nothing) + if tf.in_eager_mode() + _unary_ops_composition_eager(x_; name=name, op_names=op_names) + else + _unary_ops_composition_graph(x_; name=name, op_names=op_names) + end end - end end @@ -14044,35 +14044,35 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function experimental_map_dataset_graph(input_dataset_, other_arguments_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing, preserve_cardinality=nothing) - local desc - tf.with_op_name(name, "ExperimentalMapDataset") do - desc = tf.NodeDescription("ExperimentalMapDataset") - input_dataset_ = convert(Tensor{Any}, input_dataset_) - other_arguments_ = [convert(Tensor{Any}, x) for x = other_arguments_] - tf.add_input(desc, input_dataset_) - tf.add_input(desc, other_arguments_) - if f !== nothing - desc["f"] = Base.identity(f) - end - if Targuments !== nothing - desc["Targuments"] = map(Base.identity, Targuments) - end - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - if use_inter_op_parallelism !== nothing - desc["use_inter_op_parallelism"] = Base.Bool(use_inter_op_parallelism) - end - if preserve_cardinality !== nothing - desc["preserve_cardinality"] = Base.Bool(preserve_cardinality) - end - end - tf.Tensor(tf.Operation(desc)) + function experimental_map_dataset_graph(input_dataset_, other_arguments_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing, preserve_cardinality=nothing) + local desc + tf.with_op_name(name, "ExperimentalMapDataset") do + desc = tf.NodeDescription("ExperimentalMapDataset") + input_dataset_ = convert(Tensor{Any}, input_dataset_) + other_arguments_ = [convert(Tensor{Any}, x) for x = other_arguments_] + tf.add_input(desc, input_dataset_) + tf.add_input(desc, other_arguments_) + if f !== nothing + desc["f"] = Base.identity(f) + end + if Targuments !== nothing + desc["Targuments"] = map(Base.identity, Targuments) + end + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + if use_inter_op_parallelism !== nothing + desc["use_inter_op_parallelism"] = Base.Bool(use_inter_op_parallelism) + end + if preserve_cardinality !== nothing + desc["preserve_cardinality"] = Base.Bool(preserve_cardinality) + end end + tf.Tensor(tf.Operation(desc)) + end function experimental_map_dataset_eager(input_dataset_, other_arguments_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing, preserve_cardinality=nothing) desc = tf.EagerOp("ExperimentalMapDataset") input_dataset_ = convert(tf.EagerTensor, input_dataset_) @@ -14104,13 +14104,13 @@ begin return res[1] end end - function experimental_map_dataset(input_dataset_, other_arguments_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing, preserve_cardinality=nothing) - if tf.in_eager_mode() - experimental_map_dataset_eager(input_dataset_, other_arguments_; name=name, f=f, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes, use_inter_op_parallelism=use_inter_op_parallelism, preserve_cardinality=preserve_cardinality) - else - experimental_map_dataset_graph(input_dataset_, other_arguments_; name=name, f=f, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes, use_inter_op_parallelism=use_inter_op_parallelism, preserve_cardinality=preserve_cardinality) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_map_dataset(input_dataset_, other_arguments_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing, preserve_cardinality=nothing) + if tf.in_eager_mode() + experimental_map_dataset_eager(input_dataset_, other_arguments_; name=name, f=f, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes, use_inter_op_parallelism=use_inter_op_parallelism, preserve_cardinality=preserve_cardinality) + else + experimental_map_dataset_graph(input_dataset_, other_arguments_; name=name, f=f, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes, use_inter_op_parallelism=use_inter_op_parallelism, preserve_cardinality=preserve_cardinality) + end end - end end @@ -14120,31 +14120,31 @@ end Load embedding parameters for a single table. """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function load_tpu_embedding_adam_parameters_graph(parameters_, momenta_, velocities_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - local desc - tf.with_op_name(name, "LoadTPUEmbeddingADAMParameters") do - desc = tf.NodeDescription("LoadTPUEmbeddingADAMParameters") - parameters_ = convert(Tensor{Float32}, parameters_) - momenta_ = convert(Tensor{Float32}, momenta_) - velocities_ = convert(Tensor{Float32}, velocities_) - tf.add_input(desc, parameters_) - tf.add_input(desc, momenta_) - tf.add_input(desc, velocities_) - if table_id !== nothing - desc["table_id"] = Base.Int(table_id) - end - if table_name !== nothing - desc["table_name"] = Base.String(table_name) - end - if num_shards !== nothing - desc["num_shards"] = Base.Int(num_shards) - end - if shard_id !== nothing - desc["shard_id"] = Base.Int(shard_id) - end - end - tf.Tensor(tf.Operation(desc)) + function load_tpu_embedding_adam_parameters_graph(parameters_, momenta_, velocities_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + local desc + tf.with_op_name(name, "LoadTPUEmbeddingADAMParameters") do + desc = tf.NodeDescription("LoadTPUEmbeddingADAMParameters") + parameters_ = convert(Tensor{Float32}, parameters_) + momenta_ = convert(Tensor{Float32}, momenta_) + velocities_ = convert(Tensor{Float32}, velocities_) + tf.add_input(desc, parameters_) + tf.add_input(desc, momenta_) + tf.add_input(desc, velocities_) + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end end + tf.Tensor(tf.Operation(desc)) + end function load_tpu_embedding_adam_parameters_eager(parameters_, momenta_, velocities_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) desc = tf.EagerOp("LoadTPUEmbeddingADAMParameters") parameters_ = convert(tf.EagerTensor, parameters_) @@ -14172,13 +14172,13 @@ begin return res[1] end end - function load_tpu_embedding_adam_parameters(parameters_, momenta_, velocities_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - if tf.in_eager_mode() - load_tpu_embedding_adam_parameters_eager(parameters_, momenta_, velocities_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) - else - load_tpu_embedding_adam_parameters_graph(parameters_, momenta_, velocities_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function load_tpu_embedding_adam_parameters(parameters_, momenta_, velocities_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + if tf.in_eager_mode() + load_tpu_embedding_adam_parameters_eager(parameters_, momenta_, velocities_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + else + load_tpu_embedding_adam_parameters_graph(parameters_, momenta_, velocities_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + end end - end end @@ -14188,18 +14188,18 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function parse_tensor_graph(serialized_; name=nothing, out_type=nothing) - local desc - tf.with_op_name(name, "ParseTensor") do - desc = tf.NodeDescription("ParseTensor") - serialized_ = convert(Tensor{String}, serialized_) - tf.add_input(desc, serialized_) - if out_type !== nothing - desc["out_type"] = Base.identity(out_type) - end + function parse_tensor_graph(serialized_; name=nothing, out_type=nothing) + local desc + tf.with_op_name(name, "ParseTensor") do + desc = tf.NodeDescription("ParseTensor") + serialized_ = convert(Tensor{String}, serialized_) + tf.add_input(desc, serialized_) + if out_type !== nothing + desc["out_type"] = Base.identity(out_type) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function parse_tensor_eager(serialized_; name=nothing, out_type=nothing) desc = tf.EagerOp("ParseTensor") serialized_ = convert(tf.EagerTensor, serialized_) @@ -14214,13 +14214,13 @@ begin return res[1] end end - function parse_tensor(serialized_; name=nothing, out_type=nothing) - if tf.in_eager_mode() - parse_tensor_eager(serialized_; name=name, out_type=out_type) - else - parse_tensor_graph(serialized_; name=name, out_type=out_type) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function parse_tensor(serialized_; name=nothing, out_type=nothing) + if tf.in_eager_mode() + parse_tensor_eager(serialized_; name=name, out_type=out_type) + else + parse_tensor_graph(serialized_; name=name, out_type=out_type) + end end - end end @@ -14230,25 +14230,25 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function experimental_materialized_index_dataset_handle_graph(; name=nothing, container=nothing, shared_name=nothing, output_types=nothing, output_shapes=nothing) - local desc - tf.with_op_name(name, "ExperimentalMaterializedIndexDatasetHandle") do - desc = tf.NodeDescription("ExperimentalMaterializedIndexDatasetHandle") - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end + function experimental_materialized_index_dataset_handle_graph(; name=nothing, container=nothing, shared_name=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "ExperimentalMaterializedIndexDatasetHandle") do + desc = tf.NodeDescription("ExperimentalMaterializedIndexDatasetHandle") + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function experimental_materialized_index_dataset_handle_eager(; name=nothing, container=nothing, shared_name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("ExperimentalMaterializedIndexDatasetHandle") if container !== nothing @@ -14270,13 +14270,13 @@ begin return res[1] end end - function experimental_materialized_index_dataset_handle(; name=nothing, container=nothing, shared_name=nothing, output_types=nothing, output_shapes=nothing) - if tf.in_eager_mode() - experimental_materialized_index_dataset_handle_eager(; name=name, container=container, shared_name=shared_name, output_types=output_types, output_shapes=output_shapes) - else - experimental_materialized_index_dataset_handle_graph(; name=name, container=container, shared_name=shared_name, output_types=output_types, output_shapes=output_shapes) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_materialized_index_dataset_handle(; name=nothing, container=nothing, shared_name=nothing, output_types=nothing, output_shapes=nothing) + if tf.in_eager_mode() + experimental_materialized_index_dataset_handle_eager(; name=name, container=container, shared_name=shared_name, output_types=output_types, output_shapes=output_shapes) + else + experimental_materialized_index_dataset_handle_graph(; name=name, container=container, shared_name=shared_name, output_types=output_types, output_shapes=output_shapes) + end end - end end @@ -14286,25 +14286,25 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function multi_device_iterator_get_next_from_shard_graph(multi_device_iterator_, shard_num_, incarnation_id_; name=nothing, output_types=nothing, output_shapes=nothing) - local desc - tf.with_op_name(name, "MultiDeviceIteratorGetNextFromShard") do - desc = tf.NodeDescription("MultiDeviceIteratorGetNextFromShard") - multi_device_iterator_ = convert(Tensor{Any}, multi_device_iterator_) - shard_num_ = convert(Tensor{Int32}, shard_num_) - incarnation_id_ = convert(Tensor{Int64}, incarnation_id_) - tf.add_input(desc, multi_device_iterator_) - tf.add_input(desc, shard_num_) - tf.add_input(desc, incarnation_id_) - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end + function multi_device_iterator_get_next_from_shard_graph(multi_device_iterator_, shard_num_, incarnation_id_; name=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "MultiDeviceIteratorGetNextFromShard") do + desc = tf.NodeDescription("MultiDeviceIteratorGetNextFromShard") + multi_device_iterator_ = convert(Tensor{Any}, multi_device_iterator_) + shard_num_ = convert(Tensor{Int32}, shard_num_) + incarnation_id_ = convert(Tensor{Int64}, incarnation_id_) + tf.add_input(desc, multi_device_iterator_) + tf.add_input(desc, shard_num_) + tf.add_input(desc, incarnation_id_) + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function multi_device_iterator_get_next_from_shard_eager(multi_device_iterator_, shard_num_, incarnation_id_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("MultiDeviceIteratorGetNextFromShard") multi_device_iterator_ = convert(tf.EagerTensor, multi_device_iterator_) @@ -14326,13 +14326,13 @@ begin return res[1] end end - function multi_device_iterator_get_next_from_shard(multi_device_iterator_, shard_num_, incarnation_id_; name=nothing, output_types=nothing, output_shapes=nothing) - if tf.in_eager_mode() - multi_device_iterator_get_next_from_shard_eager(multi_device_iterator_, shard_num_, incarnation_id_; name=name, output_types=output_types, output_shapes=output_shapes) - else - multi_device_iterator_get_next_from_shard_graph(multi_device_iterator_, shard_num_, incarnation_id_; name=name, output_types=output_types, output_shapes=output_shapes) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function multi_device_iterator_get_next_from_shard(multi_device_iterator_, shard_num_, incarnation_id_; name=nothing, output_types=nothing, output_shapes=nothing) + if tf.in_eager_mode() + multi_device_iterator_get_next_from_shard_eager(multi_device_iterator_, shard_num_, incarnation_id_; name=name, output_types=output_types, output_shapes=output_shapes) + else + multi_device_iterator_get_next_from_shard_graph(multi_device_iterator_, shard_num_, incarnation_id_; name=name, output_types=output_types, output_shapes=output_shapes) + end end - end end @@ -14342,27 +14342,27 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function random_uniform_int_graph(shape_, minval_, maxval_; name=nothing, seed=nothing, seed2=nothing) - local desc - tf.with_op_name(name, "RandomUniformInt") do - desc = tf.NodeDescription("RandomUniformInt") - shape_ = convert(Tensor{Any}, shape_) - minval_ = convert(Tensor{Any}, minval_) - maxval_ = convert(Tensor{Any}, maxval_) - (shape_,) = tf.tf_promote(shape_) - (minval_, maxval_) = tf.tf_promote(minval_, maxval_) - tf.add_input(desc, shape_) - tf.add_input(desc, minval_) - tf.add_input(desc, maxval_) - if seed !== nothing - desc["seed"] = Base.Int(seed) - end - if seed2 !== nothing - desc["seed2"] = Base.Int(seed2) - end + function random_uniform_int_graph(shape_, minval_, maxval_; name=nothing, seed=nothing, seed2=nothing) + local desc + tf.with_op_name(name, "RandomUniformInt") do + desc = tf.NodeDescription("RandomUniformInt") + shape_ = convert(Tensor{Any}, shape_) + minval_ = convert(Tensor{Any}, minval_) + maxval_ = convert(Tensor{Any}, maxval_) + (shape_,) = tf.tf_promote(shape_) + (minval_, maxval_) = tf.tf_promote(minval_, maxval_) + tf.add_input(desc, shape_) + tf.add_input(desc, minval_) + tf.add_input(desc, maxval_) + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function random_uniform_int_eager(shape_, minval_, maxval_; name=nothing, seed=nothing, seed2=nothing) desc = tf.EagerOp("RandomUniformInt") shape_ = convert(tf.EagerTensor, shape_) @@ -14387,13 +14387,13 @@ begin return res[1] end end - function random_uniform_int(shape_, minval_, maxval_; name=nothing, seed=nothing, seed2=nothing) - if tf.in_eager_mode() - random_uniform_int_eager(shape_, minval_, maxval_; name=name, seed=seed, seed2=seed2) - else - random_uniform_int_graph(shape_, minval_, maxval_; name=name, seed=seed, seed2=seed2) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function random_uniform_int(shape_, minval_, maxval_; name=nothing, seed=nothing, seed2=nothing) + if tf.in_eager_mode() + random_uniform_int_eager(shape_, minval_, maxval_; name=name, seed=seed, seed2=seed2) + else + random_uniform_int_graph(shape_, minval_, maxval_; name=name, seed=seed, seed2=seed2) + end end - end end @@ -14403,24 +14403,24 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function sparse_softmax_cross_entropy_with_logits_graph(features_, labels_; name=nothing) - local desc - tf.with_op_name(name, "SparseSoftmaxCrossEntropyWithLogits") do - desc = tf.NodeDescription("SparseSoftmaxCrossEntropyWithLogits") - features_ = convert(Tensor{Any}, features_) - labels_ = convert(Tensor{Int64}, labels_) - (features_,) = tf.tf_promote(features_) - (labels_,) = tf.tf_promote(labels_) - tf.add_input(desc, features_) - tf.add_input(desc, labels_) - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:2 - push!(out, tf.Tensor(op, out_idx)) - end - out + function sparse_softmax_cross_entropy_with_logits_graph(features_, labels_; name=nothing) + local desc + tf.with_op_name(name, "SparseSoftmaxCrossEntropyWithLogits") do + desc = tf.NodeDescription("SparseSoftmaxCrossEntropyWithLogits") + features_ = convert(Tensor{Any}, features_) + labels_ = convert(Tensor{Int64}, labels_) + (features_,) = tf.tf_promote(features_) + (labels_,) = tf.tf_promote(labels_) + tf.add_input(desc, features_) + tf.add_input(desc, labels_) end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end function sparse_softmax_cross_entropy_with_logits_eager(features_, labels_; name=nothing) desc = tf.EagerOp("SparseSoftmaxCrossEntropyWithLogits") features_ = convert(tf.EagerTensor, features_) @@ -14436,13 +14436,13 @@ begin return res end end - function sparse_softmax_cross_entropy_with_logits(features_, labels_; name=nothing) - if tf.in_eager_mode() - sparse_softmax_cross_entropy_with_logits_eager(features_, labels_; name=name) - else - sparse_softmax_cross_entropy_with_logits_graph(features_, labels_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_softmax_cross_entropy_with_logits(features_, labels_; name=nothing) + if tf.in_eager_mode() + sparse_softmax_cross_entropy_with_logits_eager(features_, labels_; name=name) + else + sparse_softmax_cross_entropy_with_logits_graph(features_, labels_; name=name) + end end - end end @@ -14452,22 +14452,22 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tensor_array_read_v2_graph(handle_, index_, flow_in_; name=nothing, dtype=nothing) - local desc - tf.with_op_name(name, "TensorArrayReadV2") do - desc = tf.NodeDescription("TensorArrayReadV2") - handle_ = convert(Tensor{String}, handle_) - index_ = convert(Tensor{Int32}, index_) - flow_in_ = convert(Tensor{Float32}, flow_in_) - tf.add_input(desc, handle_) - tf.add_input(desc, index_) - tf.add_input(desc, flow_in_) - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end + function tensor_array_read_v2_graph(handle_, index_, flow_in_; name=nothing, dtype=nothing) + local desc + tf.with_op_name(name, "TensorArrayReadV2") do + desc = tf.NodeDescription("TensorArrayReadV2") + handle_ = convert(Tensor{String}, handle_) + index_ = convert(Tensor{Int32}, index_) + flow_in_ = convert(Tensor{Float32}, flow_in_) + tf.add_input(desc, handle_) + tf.add_input(desc, index_) + tf.add_input(desc, flow_in_) + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function tensor_array_read_v2_eager(handle_, index_, flow_in_; name=nothing, dtype=nothing) desc = tf.EagerOp("TensorArrayReadV2") handle_ = convert(tf.EagerTensor, handle_) @@ -14486,13 +14486,13 @@ begin return res[1] end end - function tensor_array_read_v2(handle_, index_, flow_in_; name=nothing, dtype=nothing) - if tf.in_eager_mode() - tensor_array_read_v2_eager(handle_, index_, flow_in_; name=name, dtype=dtype) - else - tensor_array_read_v2_graph(handle_, index_, flow_in_; name=name, dtype=dtype) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_array_read_v2(handle_, index_, flow_in_; name=nothing, dtype=nothing) + if tf.in_eager_mode() + tensor_array_read_v2_eager(handle_, index_, flow_in_; name=name, dtype=dtype) + else + tensor_array_read_v2_graph(handle_, index_, flow_in_; name=name, dtype=dtype) + end end - end end @@ -14502,24 +14502,24 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function reader_read_up_to_graph(reader_handle_, queue_handle_, num_records_; name=nothing) - local desc - tf.with_op_name(name, "ReaderReadUpTo") do - desc = tf.NodeDescription("ReaderReadUpTo") - reader_handle_ = convert(Tensor{String}, reader_handle_) - queue_handle_ = convert(Tensor{String}, queue_handle_) - num_records_ = convert(Tensor{Int64}, num_records_) - tf.add_input(desc, reader_handle_) - tf.add_input(desc, queue_handle_) - tf.add_input(desc, num_records_) - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:2 - push!(out, tf.Tensor(op, out_idx)) - end - out + function reader_read_up_to_graph(reader_handle_, queue_handle_, num_records_; name=nothing) + local desc + tf.with_op_name(name, "ReaderReadUpTo") do + desc = tf.NodeDescription("ReaderReadUpTo") + reader_handle_ = convert(Tensor{String}, reader_handle_) + queue_handle_ = convert(Tensor{String}, queue_handle_) + num_records_ = convert(Tensor{Int64}, num_records_) + tf.add_input(desc, reader_handle_) + tf.add_input(desc, queue_handle_) + tf.add_input(desc, num_records_) end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end function reader_read_up_to_eager(reader_handle_, queue_handle_, num_records_; name=nothing) desc = tf.EagerOp("ReaderReadUpTo") reader_handle_ = convert(tf.EagerTensor, reader_handle_) @@ -14535,45 +14535,45 @@ begin return res end end - function reader_read_up_to(reader_handle_, queue_handle_, num_records_; name=nothing) - if tf.in_eager_mode() - reader_read_up_to_eager(reader_handle_, queue_handle_, num_records_; name=name) - else - reader_read_up_to_graph(reader_handle_, queue_handle_, num_records_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function reader_read_up_to(reader_handle_, queue_handle_, num_records_; name=nothing) + if tf.in_eager_mode() + reader_read_up_to_eager(reader_handle_, queue_handle_, num_records_; name=name) + else + reader_read_up_to_graph(reader_handle_, queue_handle_, num_records_; name=name) + end end - end end """ - encode_proto(sizes, values; descriptor_source=local://) + encode_proto(sizes, values; descriptor_source=) """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function encode_proto_graph(sizes_, values_; name=nothing, field_names=nothing, message_type=nothing, descriptor_source=nothing, Tinput_types=nothing) - local desc - tf.with_op_name(name, "EncodeProto") do - desc = tf.NodeDescription("EncodeProto") - sizes_ = convert(Tensor{Int32}, sizes_) - values_ = [convert(Tensor{Any}, x) for x = values_] - tf.add_input(desc, sizes_) - tf.add_input(desc, values_) - if field_names !== nothing - desc["field_names"] = map(Base.identity, field_names) - end - if message_type !== nothing - desc["message_type"] = Base.String(message_type) - end - if descriptor_source !== nothing - desc["descriptor_source"] = Base.String(descriptor_source) - end - if Tinput_types !== nothing - desc["Tinput_types"] = map(Base.identity, Tinput_types) - end + function encode_proto_graph(sizes_, values_; name=nothing, field_names=nothing, message_type=nothing, descriptor_source=nothing, Tinput_types=nothing) + local desc + tf.with_op_name(name, "EncodeProto") do + desc = tf.NodeDescription("EncodeProto") + sizes_ = convert(Tensor{Int32}, sizes_) + values_ = [convert(Tensor{Any}, x) for x = values_] + tf.add_input(desc, sizes_) + tf.add_input(desc, values_) + if field_names !== nothing + desc["field_names"] = map(Base.identity, field_names) + end + if message_type !== nothing + desc["message_type"] = Base.String(message_type) + end + if descriptor_source !== nothing + desc["descriptor_source"] = Base.String(descriptor_source) + end + if Tinput_types !== nothing + desc["Tinput_types"] = map(Base.identity, Tinput_types) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function encode_proto_eager(sizes_, values_; name=nothing, field_names=nothing, message_type=nothing, descriptor_source=nothing, Tinput_types=nothing) desc = tf.EagerOp("EncodeProto") sizes_ = convert(tf.EagerTensor, sizes_) @@ -14599,13 +14599,13 @@ begin return res[1] end end - function encode_proto(sizes_, values_; name=nothing, field_names=nothing, message_type=nothing, descriptor_source=nothing, Tinput_types=nothing) - if tf.in_eager_mode() - encode_proto_eager(sizes_, values_; name=name, field_names=field_names, message_type=message_type, descriptor_source=descriptor_source, Tinput_types=Tinput_types) - else - encode_proto_graph(sizes_, values_; name=name, field_names=field_names, message_type=message_type, descriptor_source=descriptor_source, Tinput_types=Tinput_types) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function encode_proto(sizes_, values_; name=nothing, field_names=nothing, message_type=nothing, descriptor_source=nothing, Tinput_types=nothing) + if tf.in_eager_mode() + encode_proto_eager(sizes_, values_; name=name, field_names=field_names, message_type=message_type, descriptor_source=descriptor_source, Tinput_types=Tinput_types) + else + encode_proto_graph(sizes_, values_; name=name, field_names=field_names, message_type=message_type, descriptor_source=descriptor_source, Tinput_types=Tinput_types) + end end - end end @@ -14615,61 +14615,61 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function strided_slice_grad_graph(shape_, begin_, end_, strides_, dy_; name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing) - local desc - tf.with_op_name(name, "StridedSliceGrad") do - desc = tf.NodeDescription("StridedSliceGrad") - shape_ = convert(Tensor{Any}, shape_) - begin_ = convert(Tensor{Any}, begin_) - begin_ = begin_ - convert(tf.Tensor{eltype(begin_)}, 1) - end_ = convert(Tensor{Any}, end_) - end_ = end_ - convert(tf.Tensor{eltype(end_)}, 1) - strides_ = convert(Tensor{Any}, strides_) - strides_ = strides_ - convert(tf.Tensor{eltype(strides_)}, 1) - dy_ = convert(Tensor{Any}, dy_) - (dy_,) = tf.tf_promote(dy_) - (shape_, begin_, end_, strides_) = tf.tf_promote(shape_, begin_, end_, strides_) - tf.add_input(desc, shape_) - tf.add_input(desc, begin_) - tf.add_input(desc, end_) - tf.add_input(desc, strides_) - tf.add_input(desc, dy_) - if Index !== nothing - desc["Index"] = Base.identity(Index) - end - if begin_mask !== nothing - begin_mask = Base.Int(begin_mask) - 1 - end - if begin_mask !== nothing - desc["begin_mask"] = Base.Int(begin_mask) - end - if end_mask !== nothing - end_mask = Base.Int(end_mask) - 1 - end - if end_mask !== nothing - desc["end_mask"] = Base.Int(end_mask) - end - if ellipsis_mask !== nothing - ellipsis_mask = Base.Int(ellipsis_mask) - 1 - end - if ellipsis_mask !== nothing - desc["ellipsis_mask"] = Base.Int(ellipsis_mask) - end - if new_axis_mask !== nothing - new_axis_mask = Base.Int(new_axis_mask) - 1 - end - if new_axis_mask !== nothing - desc["new_axis_mask"] = Base.Int(new_axis_mask) - end - if shrink_axis_mask !== nothing - shrink_axis_mask = Base.Int(shrink_axis_mask) - 1 - end - if shrink_axis_mask !== nothing - desc["shrink_axis_mask"] = Base.Int(shrink_axis_mask) - end - end - tf.Tensor(tf.Operation(desc)) + function strided_slice_grad_graph(shape_, begin_, end_, strides_, dy_; name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing) + local desc + tf.with_op_name(name, "StridedSliceGrad") do + desc = tf.NodeDescription("StridedSliceGrad") + shape_ = convert(Tensor{Any}, shape_) + begin_ = convert(Tensor{Any}, begin_) + begin_ = begin_ - convert(tf.Tensor{eltype(begin_)}, 1) + end_ = convert(Tensor{Any}, end_) + end_ = end_ - convert(tf.Tensor{eltype(end_)}, 1) + strides_ = convert(Tensor{Any}, strides_) + strides_ = strides_ - convert(tf.Tensor{eltype(strides_)}, 1) + dy_ = convert(Tensor{Any}, dy_) + (dy_,) = tf.tf_promote(dy_) + (shape_, begin_, end_, strides_) = tf.tf_promote(shape_, begin_, end_, strides_) + tf.add_input(desc, shape_) + tf.add_input(desc, begin_) + tf.add_input(desc, end_) + tf.add_input(desc, strides_) + tf.add_input(desc, dy_) + if Index !== nothing + desc["Index"] = Base.identity(Index) + end + if begin_mask !== nothing + begin_mask = Base.Int(begin_mask) - 1 + end + if begin_mask !== nothing + desc["begin_mask"] = Base.Int(begin_mask) + end + if end_mask !== nothing + end_mask = Base.Int(end_mask) - 1 + end + if end_mask !== nothing + desc["end_mask"] = Base.Int(end_mask) + end + if ellipsis_mask !== nothing + ellipsis_mask = Base.Int(ellipsis_mask) - 1 + end + if ellipsis_mask !== nothing + desc["ellipsis_mask"] = Base.Int(ellipsis_mask) + end + if new_axis_mask !== nothing + new_axis_mask = Base.Int(new_axis_mask) - 1 + end + if new_axis_mask !== nothing + desc["new_axis_mask"] = Base.Int(new_axis_mask) + end + if shrink_axis_mask !== nothing + shrink_axis_mask = Base.Int(shrink_axis_mask) - 1 + end + if shrink_axis_mask !== nothing + desc["shrink_axis_mask"] = Base.Int(shrink_axis_mask) + end end + tf.Tensor(tf.Operation(desc)) + end function strided_slice_grad_eager(shape_, begin_, end_, strides_, dy_; name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing) desc = tf.EagerOp("StridedSliceGrad") shape_ = convert(tf.EagerTensor, shape_) @@ -14727,13 +14727,13 @@ begin return res[1] end end - function strided_slice_grad(shape_, begin_, end_, strides_, dy_; name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing) - if tf.in_eager_mode() - strided_slice_grad_eager(shape_, begin_, end_, strides_, dy_; name=name, Index=Index, begin_mask=begin_mask, end_mask=end_mask, ellipsis_mask=ellipsis_mask, new_axis_mask=new_axis_mask, shrink_axis_mask=shrink_axis_mask) - else - strided_slice_grad_graph(shape_, begin_, end_, strides_, dy_; name=name, Index=Index, begin_mask=begin_mask, end_mask=end_mask, ellipsis_mask=ellipsis_mask, new_axis_mask=new_axis_mask, shrink_axis_mask=shrink_axis_mask) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function strided_slice_grad(shape_, begin_, end_, strides_, dy_; name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing) + if tf.in_eager_mode() + strided_slice_grad_eager(shape_, begin_, end_, strides_, dy_; name=name, Index=Index, begin_mask=begin_mask, end_mask=end_mask, ellipsis_mask=ellipsis_mask, new_axis_mask=new_axis_mask, shrink_axis_mask=shrink_axis_mask) + else + strided_slice_grad_graph(shape_, begin_, end_, strides_, dy_; name=name, Index=Index, begin_mask=begin_mask, end_mask=end_mask, ellipsis_mask=ellipsis_mask, new_axis_mask=new_axis_mask, shrink_axis_mask=shrink_axis_mask) + end end - end end @@ -14743,25 +14743,25 @@ end Replacement node for NcclReduce. """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function _nccl_reduce_send_graph(input_; name=nothing, reduction=nothing, num_devices=nothing, shared_name=nothing) - local desc - tf.with_op_name(name, "_NcclReduceSend") do - desc = tf.NodeDescription("_NcclReduceSend") - input_ = convert(Tensor{Any}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - if reduction !== nothing - desc["reduction"] = Base.String(reduction) - end - if num_devices !== nothing - desc["num_devices"] = Base.Int(num_devices) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - end - tf.Tensor(tf.Operation(desc)) + function _nccl_reduce_send_graph(input_; name=nothing, reduction=nothing, num_devices=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "_NcclReduceSend") do + desc = tf.NodeDescription("_NcclReduceSend") + input_ = convert(Tensor{Any}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + if reduction !== nothing + desc["reduction"] = Base.String(reduction) + end + if num_devices !== nothing + desc["num_devices"] = Base.Int(num_devices) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end end + tf.Tensor(tf.Operation(desc)) + end function _nccl_reduce_send_eager(input_; name=nothing, reduction=nothing, num_devices=nothing, shared_name=nothing) desc = tf.EagerOp("_NcclReduceSend") input_ = convert(tf.EagerTensor, input_) @@ -14783,13 +14783,13 @@ begin return res[1] end end - function _nccl_reduce_send(input_; name=nothing, reduction=nothing, num_devices=nothing, shared_name=nothing) - if tf.in_eager_mode() - _nccl_reduce_send_eager(input_; name=name, reduction=reduction, num_devices=num_devices, shared_name=shared_name) - else - _nccl_reduce_send_graph(input_; name=name, reduction=reduction, num_devices=num_devices, shared_name=shared_name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _nccl_reduce_send(input_; name=nothing, reduction=nothing, num_devices=nothing, shared_name=nothing) + if tf.in_eager_mode() + _nccl_reduce_send_eager(input_; name=name, reduction=reduction, num_devices=num_devices, shared_name=shared_name) + else + _nccl_reduce_send_graph(input_; name=name, reduction=reduction, num_devices=num_devices, shared_name=shared_name) + end end - end end @@ -14799,30 +14799,30 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function padded_batch_dataset_graph(input_dataset_, batch_size_, padded_shapes_, padding_values_; name=nothing, Toutput_types=nothing, output_shapes=nothing, N=nothing) - local desc - tf.with_op_name(name, "PaddedBatchDataset") do - desc = tf.NodeDescription("PaddedBatchDataset") - input_dataset_ = convert(Tensor{Any}, input_dataset_) - batch_size_ = convert(Tensor{Int64}, batch_size_) - padded_shapes_ = [convert(Tensor{Int64}, x) for x = padded_shapes_] - padding_values_ = [convert(Tensor{Any}, x) for x = padding_values_] - tf.add_input(desc, input_dataset_) - tf.add_input(desc, batch_size_) - tf.add_input(desc, padded_shapes_) - tf.add_input(desc, padding_values_) - if Toutput_types !== nothing - desc["Toutput_types"] = map(Base.identity, Toutput_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - if N !== nothing - desc["N"] = Base.Int(N) - end - end - tf.Tensor(tf.Operation(desc)) + function padded_batch_dataset_graph(input_dataset_, batch_size_, padded_shapes_, padding_values_; name=nothing, Toutput_types=nothing, output_shapes=nothing, N=nothing) + local desc + tf.with_op_name(name, "PaddedBatchDataset") do + desc = tf.NodeDescription("PaddedBatchDataset") + input_dataset_ = convert(Tensor{Any}, input_dataset_) + batch_size_ = convert(Tensor{Int64}, batch_size_) + padded_shapes_ = [convert(Tensor{Int64}, x) for x = padded_shapes_] + padding_values_ = [convert(Tensor{Any}, x) for x = padding_values_] + tf.add_input(desc, input_dataset_) + tf.add_input(desc, batch_size_) + tf.add_input(desc, padded_shapes_) + tf.add_input(desc, padding_values_) + if Toutput_types !== nothing + desc["Toutput_types"] = map(Base.identity, Toutput_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + if N !== nothing + desc["N"] = Base.Int(N) + end end + tf.Tensor(tf.Operation(desc)) + end function padded_batch_dataset_eager(input_dataset_, batch_size_, padded_shapes_, padding_values_; name=nothing, Toutput_types=nothing, output_shapes=nothing, N=nothing) desc = tf.EagerOp("PaddedBatchDataset") input_dataset_ = convert(tf.EagerTensor, input_dataset_) @@ -14849,38 +14849,38 @@ begin return res[1] end end - function padded_batch_dataset(input_dataset_, batch_size_, padded_shapes_, padding_values_; name=nothing, Toutput_types=nothing, output_shapes=nothing, N=nothing) - if tf.in_eager_mode() - padded_batch_dataset_eager(input_dataset_, batch_size_, padded_shapes_, padding_values_; name=name, Toutput_types=Toutput_types, output_shapes=output_shapes, N=N) - else - padded_batch_dataset_graph(input_dataset_, batch_size_, padded_shapes_, padding_values_; name=name, Toutput_types=Toutput_types, output_shapes=output_shapes, N=N) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function padded_batch_dataset(input_dataset_, batch_size_, padded_shapes_, padding_values_; name=nothing, Toutput_types=nothing, output_shapes=nothing, N=nothing) + if tf.in_eager_mode() + padded_batch_dataset_eager(input_dataset_, batch_size_, padded_shapes_, padding_values_; name=name, Toutput_types=Toutput_types, output_shapes=output_shapes, N=N) + else + padded_batch_dataset_graph(input_dataset_, batch_size_, padded_shapes_, padding_values_; name=name, Toutput_types=Toutput_types, output_shapes=output_shapes, N=N) + end end - end end """ - data_format_vec_permute(x; src_format=NHWC, dst_format=NCHW) + data_format_vec_permute(x; src_format=, dst_format=) """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function data_format_vec_permute_graph(x_; name=nothing, src_format=nothing, dst_format=nothing) - local desc - tf.with_op_name(name, "DataFormatVecPermute") do - desc = tf.NodeDescription("DataFormatVecPermute") - x_ = convert(Tensor{Int32}, x_) - (x_,) = tf.tf_promote(x_) - tf.add_input(desc, x_) - if src_format !== nothing - desc["src_format"] = Base.String(src_format) - end - if dst_format !== nothing - desc["dst_format"] = Base.String(dst_format) - end + function data_format_vec_permute_graph(x_; name=nothing, src_format=nothing, dst_format=nothing) + local desc + tf.with_op_name(name, "DataFormatVecPermute") do + desc = tf.NodeDescription("DataFormatVecPermute") + x_ = convert(Tensor{Int32}, x_) + (x_,) = tf.tf_promote(x_) + tf.add_input(desc, x_) + if src_format !== nothing + desc["src_format"] = Base.String(src_format) + end + if dst_format !== nothing + desc["dst_format"] = Base.String(dst_format) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function data_format_vec_permute_eager(x_; name=nothing, src_format=nothing, dst_format=nothing) desc = tf.EagerOp("DataFormatVecPermute") x_ = convert(tf.EagerTensor, x_) @@ -14899,43 +14899,43 @@ begin return res[1] end end - function data_format_vec_permute(x_; name=nothing, src_format=nothing, dst_format=nothing) - if tf.in_eager_mode() - data_format_vec_permute_eager(x_; name=name, src_format=src_format, dst_format=dst_format) - else - data_format_vec_permute_graph(x_; name=name, src_format=src_format, dst_format=dst_format) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function data_format_vec_permute(x_; name=nothing, src_format=nothing, dst_format=nothing) + if tf.in_eager_mode() + data_format_vec_permute_eager(x_; name=name, src_format=src_format, dst_format=dst_format) + else + data_format_vec_permute_graph(x_; name=name, src_format=src_format, dst_format=dst_format) + end end - end end """ - string_format(inputs; template=%s, placeholder=%s, summarize=3) + string_format(inputs; template=, placeholder=, summarize=3) """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function string_format_graph(inputs_; name=nothing, T=nothing, template=nothing, placeholder=nothing, summarize=nothing) - local desc - tf.with_op_name(name, "StringFormat") do - desc = tf.NodeDescription("StringFormat") - inputs_ = [convert(Tensor{Any}, x) for x = inputs_] - tf.add_input(desc, inputs_) - if T !== nothing - desc["T"] = map(Base.identity, T) - end - if template !== nothing - desc["template"] = Base.String(template) - end - if placeholder !== nothing - desc["placeholder"] = Base.String(placeholder) - end - if summarize !== nothing - desc["summarize"] = Base.Int(summarize) - end + function string_format_graph(inputs_; name=nothing, T=nothing, template=nothing, placeholder=nothing, summarize=nothing) + local desc + tf.with_op_name(name, "StringFormat") do + desc = tf.NodeDescription("StringFormat") + inputs_ = [convert(Tensor{Any}, x) for x = inputs_] + tf.add_input(desc, inputs_) + if T !== nothing + desc["T"] = map(Base.identity, T) + end + if template !== nothing + desc["template"] = Base.String(template) + end + if placeholder !== nothing + desc["placeholder"] = Base.String(placeholder) + end + if summarize !== nothing + desc["summarize"] = Base.Int(summarize) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function string_format_eager(inputs_; name=nothing, T=nothing, template=nothing, placeholder=nothing, summarize=nothing) desc = tf.EagerOp("StringFormat") inputs_ = convert(tf.EagerTensor, inputs_) @@ -14959,13 +14959,13 @@ begin return res[1] end end - function string_format(inputs_; name=nothing, T=nothing, template=nothing, placeholder=nothing, summarize=nothing) - if tf.in_eager_mode() - string_format_eager(inputs_; name=name, T=T, template=template, placeholder=placeholder, summarize=summarize) - else - string_format_graph(inputs_; name=name, T=T, template=template, placeholder=placeholder, summarize=summarize) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function string_format(inputs_; name=nothing, T=nothing, template=nothing, placeholder=nothing, summarize=nothing) + if tf.in_eager_mode() + string_format_eager(inputs_; name=name, T=T, template=template, placeholder=placeholder, summarize=summarize) + else + string_format_graph(inputs_; name=name, T=T, template=template, placeholder=placeholder, summarize=summarize) + end end - end end @@ -14975,31 +14975,31 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function as_string_graph(input_; name=nothing, precision=nothing, scientific=nothing, shortest=nothing, width=nothing, fill=nothing) - local desc - tf.with_op_name(name, "AsString") do - desc = tf.NodeDescription("AsString") - input_ = convert(Tensor{Any}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - if precision !== nothing - desc["precision"] = Base.Int(precision) - end - if scientific !== nothing - desc["scientific"] = Base.Bool(scientific) - end - if shortest !== nothing - desc["shortest"] = Base.Bool(shortest) - end - if width !== nothing - desc["width"] = Base.Int(width) - end - if fill !== nothing - desc["fill"] = Base.String(fill) - end - end - tf.Tensor(tf.Operation(desc)) + function as_string_graph(input_; name=nothing, precision=nothing, scientific=nothing, shortest=nothing, width=nothing, fill=nothing) + local desc + tf.with_op_name(name, "AsString") do + desc = tf.NodeDescription("AsString") + input_ = convert(Tensor{Any}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + if precision !== nothing + desc["precision"] = Base.Int(precision) + end + if scientific !== nothing + desc["scientific"] = Base.Bool(scientific) + end + if shortest !== nothing + desc["shortest"] = Base.Bool(shortest) + end + if width !== nothing + desc["width"] = Base.Int(width) + end + if fill !== nothing + desc["fill"] = Base.String(fill) + end end + tf.Tensor(tf.Operation(desc)) + end function as_string_eager(input_; name=nothing, precision=nothing, scientific=nothing, shortest=nothing, width=nothing, fill=nothing) desc = tf.EagerOp("AsString") input_ = convert(tf.EagerTensor, input_) @@ -15027,13 +15027,13 @@ begin return res[1] end end - function as_string(input_; name=nothing, precision=nothing, scientific=nothing, shortest=nothing, width=nothing, fill=nothing) - if tf.in_eager_mode() - as_string_eager(input_; name=name, precision=precision, scientific=scientific, shortest=shortest, width=width, fill=fill) - else - as_string_graph(input_; name=name, precision=precision, scientific=scientific, shortest=shortest, width=width, fill=fill) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function as_string(input_; name=nothing, precision=nothing, scientific=nothing, shortest=nothing, width=nothing, fill=nothing) + if tf.in_eager_mode() + as_string_eager(input_; name=name, precision=precision, scientific=scientific, shortest=shortest, width=width, fill=fill) + else + as_string_graph(input_; name=name, precision=precision, scientific=scientific, shortest=shortest, width=width, fill=fill) + end end - end end @@ -15043,23 +15043,23 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function queue_enqueue_many_graph(handle_, components_; name=nothing, Tcomponents=nothing, timeout_ms=nothing) - local desc - tf.with_op_name(name, "QueueEnqueueMany") do - desc = tf.NodeDescription("QueueEnqueueMany") - handle_ = convert(Tensor{String}, handle_) - components_ = [convert(Tensor{Any}, x) for x = components_] - tf.add_input(desc, handle_) - tf.add_input(desc, components_) - if Tcomponents !== nothing - desc["Tcomponents"] = map(Base.identity, Tcomponents) - end - if timeout_ms !== nothing - desc["timeout_ms"] = Base.Int(timeout_ms) - end + function queue_enqueue_many_graph(handle_, components_; name=nothing, Tcomponents=nothing, timeout_ms=nothing) + local desc + tf.with_op_name(name, "QueueEnqueueMany") do + desc = tf.NodeDescription("QueueEnqueueMany") + handle_ = convert(Tensor{String}, handle_) + components_ = [convert(Tensor{Any}, x) for x = components_] + tf.add_input(desc, handle_) + tf.add_input(desc, components_) + if Tcomponents !== nothing + desc["Tcomponents"] = map(Base.identity, Tcomponents) + end + if timeout_ms !== nothing + desc["timeout_ms"] = Base.Int(timeout_ms) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function queue_enqueue_many_eager(handle_, components_; name=nothing, Tcomponents=nothing, timeout_ms=nothing) desc = tf.EagerOp("QueueEnqueueMany") handle_ = convert(tf.EagerTensor, handle_) @@ -15079,13 +15079,13 @@ begin return res[1] end end - function queue_enqueue_many(handle_, components_; name=nothing, Tcomponents=nothing, timeout_ms=nothing) - if tf.in_eager_mode() - queue_enqueue_many_eager(handle_, components_; name=name, Tcomponents=Tcomponents, timeout_ms=timeout_ms) - else - queue_enqueue_many_graph(handle_, components_; name=name, Tcomponents=Tcomponents, timeout_ms=timeout_ms) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function queue_enqueue_many(handle_, components_; name=nothing, Tcomponents=nothing, timeout_ms=nothing) + if tf.in_eager_mode() + queue_enqueue_many_eager(handle_, components_; name=name, Tcomponents=Tcomponents, timeout_ms=timeout_ms) + else + queue_enqueue_many_graph(handle_, components_; name=name, Tcomponents=Tcomponents, timeout_ms=timeout_ms) + end end - end end @@ -15095,19 +15095,19 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function fake_param_graph(; name=nothing, dtype=nothing, shape=nothing) - local desc - tf.with_op_name(name, "FakeParam") do - desc = tf.NodeDescription("FakeParam") - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end - if shape !== nothing - desc["shape"] = Base.identity(shape) - end + function fake_param_graph(; name=nothing, dtype=nothing, shape=nothing) + local desc + tf.with_op_name(name, "FakeParam") do + desc = tf.NodeDescription("FakeParam") + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + if shape !== nothing + desc["shape"] = Base.identity(shape) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function fake_param_eager(; name=nothing, dtype=nothing, shape=nothing) desc = tf.EagerOp("FakeParam") if dtype !== nothing @@ -15123,13 +15123,13 @@ begin return res[1] end end - function fake_param(; name=nothing, dtype=nothing, shape=nothing) - if tf.in_eager_mode() - fake_param_eager(; name=name, dtype=dtype, shape=shape) - else - fake_param_graph(; name=name, dtype=dtype, shape=shape) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function fake_param(; name=nothing, dtype=nothing, shape=nothing) + if tf.in_eager_mode() + fake_param_eager(; name=name, dtype=dtype, shape=shape) + else + fake_param_graph(; name=name, dtype=dtype, shape=shape) + end end - end end @@ -15139,28 +15139,28 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function apply_adagrad_graph(var_, accum_, lr_, grad_; name=nothing, use_locking=nothing, update_slots=nothing) - local desc - tf.with_op_name(name, "ApplyAdagrad") do - desc = tf.NodeDescription("ApplyAdagrad") - var_ = convert(Tensor{Any}, var_) - accum_ = convert(Tensor{Any}, accum_) - lr_ = convert(Tensor{Any}, lr_) - grad_ = convert(Tensor{Any}, grad_) - (var_, accum_, lr_, grad_) = tf.tf_promote(var_, accum_, lr_, grad_) - tf.add_input(desc, var_) - tf.add_input(desc, accum_) - tf.add_input(desc, lr_) - tf.add_input(desc, grad_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - if update_slots !== nothing - desc["update_slots"] = Base.Bool(update_slots) - end + function apply_adagrad_graph(var_, accum_, lr_, grad_; name=nothing, use_locking=nothing, update_slots=nothing) + local desc + tf.with_op_name(name, "ApplyAdagrad") do + desc = tf.NodeDescription("ApplyAdagrad") + var_ = convert(Tensor{Any}, var_) + accum_ = convert(Tensor{Any}, accum_) + lr_ = convert(Tensor{Any}, lr_) + grad_ = convert(Tensor{Any}, grad_) + (var_, accum_, lr_, grad_) = tf.tf_promote(var_, accum_, lr_, grad_) + tf.add_input(desc, var_) + tf.add_input(desc, accum_) + tf.add_input(desc, lr_) + tf.add_input(desc, grad_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + if update_slots !== nothing + desc["update_slots"] = Base.Bool(update_slots) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function apply_adagrad_eager(var_, accum_, lr_, grad_; name=nothing, use_locking=nothing, update_slots=nothing) desc = tf.EagerOp("ApplyAdagrad") var_ = convert(tf.EagerTensor, var_) @@ -15188,13 +15188,13 @@ begin return res[1] end end - function apply_adagrad(var_, accum_, lr_, grad_; name=nothing, use_locking=nothing, update_slots=nothing) - if tf.in_eager_mode() - apply_adagrad_eager(var_, accum_, lr_, grad_; name=name, use_locking=use_locking, update_slots=update_slots) - else - apply_adagrad_graph(var_, accum_, lr_, grad_; name=name, use_locking=use_locking, update_slots=update_slots) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function apply_adagrad(var_, accum_, lr_, grad_; name=nothing, use_locking=nothing, update_slots=nothing) + if tf.in_eager_mode() + apply_adagrad_eager(var_, accum_, lr_, grad_; name=name, use_locking=use_locking, update_slots=update_slots) + else + apply_adagrad_graph(var_, accum_, lr_, grad_; name=name, use_locking=use_locking, update_slots=update_slots) + end end - end end @@ -15204,15 +15204,15 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function experimental_iterator_get_device_graph(resource_; name=nothing) - local desc - tf.with_op_name(name, "ExperimentalIteratorGetDevice") do - desc = tf.NodeDescription("ExperimentalIteratorGetDevice") - resource_ = convert(Tensor{Any}, resource_) - tf.add_input(desc, resource_) - end - tf.Tensor(tf.Operation(desc)) + function experimental_iterator_get_device_graph(resource_; name=nothing) + local desc + tf.with_op_name(name, "ExperimentalIteratorGetDevice") do + desc = tf.NodeDescription("ExperimentalIteratorGetDevice") + resource_ = convert(Tensor{Any}, resource_) + tf.add_input(desc, resource_) end + tf.Tensor(tf.Operation(desc)) + end function experimental_iterator_get_device_eager(resource_; name=nothing) desc = tf.EagerOp("ExperimentalIteratorGetDevice") resource_ = convert(tf.EagerTensor, resource_) @@ -15224,13 +15224,13 @@ begin return res[1] end end - function experimental_iterator_get_device(resource_; name=nothing) - if tf.in_eager_mode() - experimental_iterator_get_device_eager(resource_; name=name) - else - experimental_iterator_get_device_graph(resource_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_iterator_get_device(resource_; name=nothing) + if tf.in_eager_mode() + experimental_iterator_get_device_eager(resource_; name=name) + else + experimental_iterator_get_device_graph(resource_; name=name) + end end - end end @@ -15240,22 +15240,22 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function adjust_contrast_graph(images_, contrast_factor_, min_value_, max_value_; name=nothing) - local desc - tf.with_op_name(name, "AdjustContrast") do - desc = tf.NodeDescription("AdjustContrast") - images_ = convert(Tensor{Any}, images_) - contrast_factor_ = convert(Tensor{Float32}, contrast_factor_) - min_value_ = convert(Tensor{Float32}, min_value_) - max_value_ = convert(Tensor{Float32}, max_value_) - (images_,) = tf.tf_promote(images_) - tf.add_input(desc, images_) - tf.add_input(desc, contrast_factor_) - tf.add_input(desc, min_value_) - tf.add_input(desc, max_value_) - end - tf.Tensor(tf.Operation(desc)) + function adjust_contrast_graph(images_, contrast_factor_, min_value_, max_value_; name=nothing) + local desc + tf.with_op_name(name, "AdjustContrast") do + desc = tf.NodeDescription("AdjustContrast") + images_ = convert(Tensor{Any}, images_) + contrast_factor_ = convert(Tensor{Float32}, contrast_factor_) + min_value_ = convert(Tensor{Float32}, min_value_) + max_value_ = convert(Tensor{Float32}, max_value_) + (images_,) = tf.tf_promote(images_) + tf.add_input(desc, images_) + tf.add_input(desc, contrast_factor_) + tf.add_input(desc, min_value_) + tf.add_input(desc, max_value_) end + tf.Tensor(tf.Operation(desc)) + end function adjust_contrast_eager(images_, contrast_factor_, min_value_, max_value_; name=nothing) desc = tf.EagerOp("AdjustContrast") images_ = convert(tf.EagerTensor, images_) @@ -15274,13 +15274,13 @@ begin return res[1] end end - function adjust_contrast(images_, contrast_factor_, min_value_, max_value_; name=nothing) - if tf.in_eager_mode() - adjust_contrast_eager(images_, contrast_factor_, min_value_, max_value_; name=name) - else - adjust_contrast_graph(images_, contrast_factor_, min_value_, max_value_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function adjust_contrast(images_, contrast_factor_, min_value_, max_value_; name=nothing) + if tf.in_eager_mode() + adjust_contrast_eager(images_, contrast_factor_, min_value_, max_value_; name=name) + else + adjust_contrast_graph(images_, contrast_factor_, min_value_, max_value_; name=name) + end end - end end @@ -15290,14 +15290,14 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function optional_none_graph(; name=nothing) - local desc - tf.with_op_name(name, "OptionalNone") do - desc - tf.NodeDescription("OptionalNone") - end - tf.Tensor(tf.Operation(desc)) + function optional_none_graph(; name=nothing) + local desc + tf.with_op_name(name, "OptionalNone") do + desc + tf.NodeDescription("OptionalNone") end + tf.Tensor(tf.Operation(desc)) + end function optional_none_eager(; name=nothing) desc = tf.EagerOp("OptionalNone") res = tf.execute(desc) @@ -15307,13 +15307,13 @@ begin return res[1] end end - function optional_none(; name=nothing) - if tf.in_eager_mode() - optional_none_eager(; name=name) - else - optional_none_graph(; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function optional_none(; name=nothing) + if tf.in_eager_mode() + optional_none_eager(; name=name) + else + optional_none_graph(; name=name) + end end - end end @@ -15323,28 +15323,28 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function extract_image_patches_graph(images_; name=nothing, ksizes=nothing, strides=nothing, rates=nothing, padding=nothing) - local desc - tf.with_op_name(name, "ExtractImagePatches") do - desc = tf.NodeDescription("ExtractImagePatches") - images_ = convert(Tensor{Any}, images_) - (images_,) = tf.tf_promote(images_) - tf.add_input(desc, images_) - if ksizes !== nothing - desc["ksizes"] = map(Base.identity, ksizes) - end - if strides !== nothing - desc["strides"] = map(Base.identity, strides) - end - if rates !== nothing - desc["rates"] = map(Base.identity, rates) - end - if padding !== nothing - desc["padding"] = Base.String(padding) - end + function extract_image_patches_graph(images_; name=nothing, ksizes=nothing, strides=nothing, rates=nothing, padding=nothing) + local desc + tf.with_op_name(name, "ExtractImagePatches") do + desc = tf.NodeDescription("ExtractImagePatches") + images_ = convert(Tensor{Any}, images_) + (images_,) = tf.tf_promote(images_) + tf.add_input(desc, images_) + if ksizes !== nothing + desc["ksizes"] = map(Base.identity, ksizes) + end + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + if rates !== nothing + desc["rates"] = map(Base.identity, rates) + end + if padding !== nothing + desc["padding"] = Base.String(padding) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function extract_image_patches_eager(images_; name=nothing, ksizes=nothing, strides=nothing, rates=nothing, padding=nothing) desc = tf.EagerOp("ExtractImagePatches") images_ = convert(tf.EagerTensor, images_) @@ -15369,13 +15369,13 @@ begin return res[1] end end - function extract_image_patches(images_; name=nothing, ksizes=nothing, strides=nothing, rates=nothing, padding=nothing) - if tf.in_eager_mode() - extract_image_patches_eager(images_; name=name, ksizes=ksizes, strides=strides, rates=rates, padding=padding) - else - extract_image_patches_graph(images_; name=name, ksizes=ksizes, strides=strides, rates=rates, padding=padding) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function extract_image_patches(images_; name=nothing, ksizes=nothing, strides=nothing, rates=nothing, padding=nothing) + if tf.in_eager_mode() + extract_image_patches_eager(images_; name=name, ksizes=ksizes, strides=strides, rates=rates, padding=padding) + else + extract_image_patches_graph(images_; name=name, ksizes=ksizes, strides=strides, rates=rates, padding=padding) + end end - end end @@ -15385,25 +15385,25 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function variable_v2_graph(; name=nothing, shape=nothing, dtype=nothing, container=nothing, shared_name=nothing) - local desc - tf.with_op_name(name, "VariableV2") do - desc = tf.NodeDescription("VariableV2") - if shape !== nothing - desc["shape"] = Base.identity(shape) - end - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end + function variable_v2_graph(; name=nothing, shape=nothing, dtype=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "VariableV2") do + desc = tf.NodeDescription("VariableV2") + if shape !== nothing + desc["shape"] = Base.identity(shape) + end + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function variable_v2_eager(; name=nothing, shape=nothing, dtype=nothing, container=nothing, shared_name=nothing) desc = tf.EagerOp("VariableV2") if shape !== nothing @@ -15425,13 +15425,13 @@ begin return res[1] end end - function variable_v2(; name=nothing, shape=nothing, dtype=nothing, container=nothing, shared_name=nothing) - if tf.in_eager_mode() - variable_v2_eager(; name=name, shape=shape, dtype=dtype, container=container, shared_name=shared_name) - else - variable_v2_graph(; name=name, shape=shape, dtype=dtype, container=container, shared_name=shared_name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function variable_v2(; name=nothing, shape=nothing, dtype=nothing, container=nothing, shared_name=nothing) + if tf.in_eager_mode() + variable_v2_eager(; name=name, shape=shape, dtype=dtype, container=container, shared_name=shared_name) + else + variable_v2_graph(; name=name, shape=shape, dtype=dtype, container=container, shared_name=shared_name) + end end - end end @@ -15441,16 +15441,16 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function elu_graph(features_; name=nothing) - local desc - tf.with_op_name(name, "Elu") do - desc = tf.NodeDescription("Elu") - features_ = convert(Tensor{Any}, features_) - (features_,) = tf.tf_promote(features_) - tf.add_input(desc, features_) - end - tf.Tensor(tf.Operation(desc)) + function elu_graph(features_; name=nothing) + local desc + tf.with_op_name(name, "Elu") do + desc = tf.NodeDescription("Elu") + features_ = convert(Tensor{Any}, features_) + (features_,) = tf.tf_promote(features_) + tf.add_input(desc, features_) end + tf.Tensor(tf.Operation(desc)) + end function elu_eager(features_; name=nothing) desc = tf.EagerOp("Elu") features_ = convert(tf.EagerTensor, features_) @@ -15463,13 +15463,13 @@ begin return res[1] end end - function elu(features_; name=nothing) - if tf.in_eager_mode() - elu_eager(features_; name=name) - else - elu_graph(features_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function elu(features_; name=nothing) + if tf.in_eager_mode() + elu_eager(features_; name=name) + else + elu_graph(features_; name=name) + end end - end end @@ -15479,25 +15479,25 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function scatter_update_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) - local desc - tf.with_op_name(name, "ScatterUpdate") do - desc = tf.NodeDescription("ScatterUpdate") - ref_ = convert(Tensor{Any}, ref_) - indices_ = convert(Tensor{Any}, indices_) - indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) - updates_ = convert(Tensor{Any}, updates_) - (ref_, updates_) = tf.tf_promote(ref_, updates_) - (indices_,) = tf.tf_promote(indices_) - tf.add_input(desc, ref_) - tf.add_input(desc, indices_) - tf.add_input(desc, updates_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end + function scatter_update_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "ScatterUpdate") do + desc = tf.NodeDescription("ScatterUpdate") + ref_ = convert(Tensor{Any}, ref_) + indices_ = convert(Tensor{Any}, indices_) + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + updates_ = convert(Tensor{Any}, updates_) + (ref_, updates_) = tf.tf_promote(ref_, updates_) + (indices_,) = tf.tf_promote(indices_) + tf.add_input(desc, ref_) + tf.add_input(desc, indices_) + tf.add_input(desc, updates_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function scatter_update_eager(ref_, indices_, updates_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ScatterUpdate") ref_ = convert(tf.EagerTensor, ref_) @@ -15519,13 +15519,13 @@ begin return res[1] end end - function scatter_update(ref_, indices_, updates_; name=nothing, use_locking=nothing) - if tf.in_eager_mode() - scatter_update_eager(ref_, indices_, updates_; name=name, use_locking=use_locking) - else - scatter_update_graph(ref_, indices_, updates_; name=name, use_locking=use_locking) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function scatter_update(ref_, indices_, updates_; name=nothing, use_locking=nothing) + if tf.in_eager_mode() + scatter_update_eager(ref_, indices_, updates_; name=name, use_locking=use_locking) + else + scatter_update_graph(ref_, indices_, updates_; name=name, use_locking=use_locking) + end end - end end @@ -15535,18 +15535,18 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function floor_mod_graph(x_, y_; name=nothing) - local desc - tf.with_op_name(name, "FloorMod") do - desc = tf.NodeDescription("FloorMod") - x_ = convert(Tensor{Any}, x_) - y_ = convert(Tensor{Any}, y_) - (x_, y_) = tf.tf_promote(x_, y_) - tf.add_input(desc, x_) - tf.add_input(desc, y_) - end - tf.Tensor(tf.Operation(desc)) + function floor_mod_graph(x_, y_; name=nothing) + local desc + tf.with_op_name(name, "FloorMod") do + desc = tf.NodeDescription("FloorMod") + x_ = convert(Tensor{Any}, x_) + y_ = convert(Tensor{Any}, y_) + (x_, y_) = tf.tf_promote(x_, y_) + tf.add_input(desc, x_) + tf.add_input(desc, y_) end + tf.Tensor(tf.Operation(desc)) + end function floor_mod_eager(x_, y_; name=nothing) desc = tf.EagerOp("FloorMod") x_ = convert(tf.EagerTensor, x_) @@ -15562,13 +15562,13 @@ begin return res[1] end end - function floor_mod(x_, y_; name=nothing) - if tf.in_eager_mode() - floor_mod_eager(x_, y_; name=name) - else - floor_mod_graph(x_, y_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function floor_mod(x_, y_; name=nothing) + if tf.in_eager_mode() + floor_mod_eager(x_, y_; name=name) + else + floor_mod_graph(x_, y_; name=name) + end end - end end @@ -15578,21 +15578,21 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function experimental_ignore_errors_dataset_graph(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) - local desc - tf.with_op_name(name, "ExperimentalIgnoreErrorsDataset") do - desc = tf.NodeDescription("ExperimentalIgnoreErrorsDataset") - input_dataset_ = convert(Tensor{Any}, input_dataset_) - tf.add_input(desc, input_dataset_) - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end + function experimental_ignore_errors_dataset_graph(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "ExperimentalIgnoreErrorsDataset") do + desc = tf.NodeDescription("ExperimentalIgnoreErrorsDataset") + input_dataset_ = convert(Tensor{Any}, input_dataset_) + tf.add_input(desc, input_dataset_) + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function experimental_ignore_errors_dataset_eager(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("ExperimentalIgnoreErrorsDataset") input_dataset_ = convert(tf.EagerTensor, input_dataset_) @@ -15610,13 +15610,13 @@ begin return res[1] end end - function experimental_ignore_errors_dataset(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) - if tf.in_eager_mode() - experimental_ignore_errors_dataset_eager(input_dataset_; name=name, output_types=output_types, output_shapes=output_shapes) - else - experimental_ignore_errors_dataset_graph(input_dataset_; name=name, output_types=output_types, output_shapes=output_shapes) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_ignore_errors_dataset(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) + if tf.in_eager_mode() + experimental_ignore_errors_dataset_eager(input_dataset_; name=name, output_types=output_types, output_shapes=output_shapes) + else + experimental_ignore_errors_dataset_graph(input_dataset_; name=name, output_types=output_types, output_shapes=output_shapes) + end end - end end @@ -15626,27 +15626,27 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function experimental_set_stats_aggregator_dataset_graph(input_dataset_, stats_aggregator_, tag_, counter_prefix_; name=nothing, output_types=nothing, output_shapes=nothing) - local desc - tf.with_op_name(name, "ExperimentalSetStatsAggregatorDataset") do - desc = tf.NodeDescription("ExperimentalSetStatsAggregatorDataset") - input_dataset_ = convert(Tensor{Any}, input_dataset_) - stats_aggregator_ = convert(Tensor{Any}, stats_aggregator_) - tag_ = convert(Tensor{String}, tag_) - counter_prefix_ = convert(Tensor{String}, counter_prefix_) - tf.add_input(desc, input_dataset_) - tf.add_input(desc, stats_aggregator_) - tf.add_input(desc, tag_) - tf.add_input(desc, counter_prefix_) - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end + function experimental_set_stats_aggregator_dataset_graph(input_dataset_, stats_aggregator_, tag_, counter_prefix_; name=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "ExperimentalSetStatsAggregatorDataset") do + desc = tf.NodeDescription("ExperimentalSetStatsAggregatorDataset") + input_dataset_ = convert(Tensor{Any}, input_dataset_) + stats_aggregator_ = convert(Tensor{Any}, stats_aggregator_) + tag_ = convert(Tensor{String}, tag_) + counter_prefix_ = convert(Tensor{String}, counter_prefix_) + tf.add_input(desc, input_dataset_) + tf.add_input(desc, stats_aggregator_) + tf.add_input(desc, tag_) + tf.add_input(desc, counter_prefix_) + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function experimental_set_stats_aggregator_dataset_eager(input_dataset_, stats_aggregator_, tag_, counter_prefix_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("ExperimentalSetStatsAggregatorDataset") input_dataset_ = convert(tf.EagerTensor, input_dataset_) @@ -15670,13 +15670,13 @@ begin return res[1] end end - function experimental_set_stats_aggregator_dataset(input_dataset_, stats_aggregator_, tag_, counter_prefix_; name=nothing, output_types=nothing, output_shapes=nothing) - if tf.in_eager_mode() - experimental_set_stats_aggregator_dataset_eager(input_dataset_, stats_aggregator_, tag_, counter_prefix_; name=name, output_types=output_types, output_shapes=output_shapes) - else - experimental_set_stats_aggregator_dataset_graph(input_dataset_, stats_aggregator_, tag_, counter_prefix_; name=name, output_types=output_types, output_shapes=output_shapes) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_set_stats_aggregator_dataset(input_dataset_, stats_aggregator_, tag_, counter_prefix_; name=nothing, output_types=nothing, output_shapes=nothing) + if tf.in_eager_mode() + experimental_set_stats_aggregator_dataset_eager(input_dataset_, stats_aggregator_, tag_, counter_prefix_; name=name, output_types=output_types, output_shapes=output_shapes) + else + experimental_set_stats_aggregator_dataset_graph(input_dataset_, stats_aggregator_, tag_, counter_prefix_; name=name, output_types=output_types, output_shapes=output_shapes) + end end - end end @@ -15686,31 +15686,31 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function compute_accidental_hits_graph(true_classes_, sampled_candidates_; name=nothing, num_true=nothing, seed=nothing, seed2=nothing) - local desc - tf.with_op_name(name, "ComputeAccidentalHits") do - desc = tf.NodeDescription("ComputeAccidentalHits") - true_classes_ = convert(Tensor{Int64}, true_classes_) - sampled_candidates_ = convert(Tensor{Int64}, sampled_candidates_) - tf.add_input(desc, true_classes_) - tf.add_input(desc, sampled_candidates_) - if num_true !== nothing - desc["num_true"] = Base.Int(num_true) - end - if seed !== nothing - desc["seed"] = Base.Int(seed) - end - if seed2 !== nothing - desc["seed2"] = Base.Int(seed2) - end + function compute_accidental_hits_graph(true_classes_, sampled_candidates_; name=nothing, num_true=nothing, seed=nothing, seed2=nothing) + local desc + tf.with_op_name(name, "ComputeAccidentalHits") do + desc = tf.NodeDescription("ComputeAccidentalHits") + true_classes_ = convert(Tensor{Int64}, true_classes_) + sampled_candidates_ = convert(Tensor{Int64}, sampled_candidates_) + tf.add_input(desc, true_classes_) + tf.add_input(desc, sampled_candidates_) + if num_true !== nothing + desc["num_true"] = Base.Int(num_true) + end + if seed !== nothing + desc["seed"] = Base.Int(seed) end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:3 - push!(out, tf.Tensor(op, out_idx)) + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) end - out end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end function compute_accidental_hits_eager(true_classes_, sampled_candidates_; name=nothing, num_true=nothing, seed=nothing, seed2=nothing) desc = tf.EagerOp("ComputeAccidentalHits") true_classes_ = convert(tf.EagerTensor, true_classes_) @@ -15733,13 +15733,13 @@ begin return res end end - function compute_accidental_hits(true_classes_, sampled_candidates_; name=nothing, num_true=nothing, seed=nothing, seed2=nothing) - if tf.in_eager_mode() - compute_accidental_hits_eager(true_classes_, sampled_candidates_; name=name, num_true=num_true, seed=seed, seed2=seed2) - else - compute_accidental_hits_graph(true_classes_, sampled_candidates_; name=name, num_true=num_true, seed=seed, seed2=seed2) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function compute_accidental_hits(true_classes_, sampled_candidates_; name=nothing, num_true=nothing, seed=nothing, seed2=nothing) + if tf.in_eager_mode() + compute_accidental_hits_eager(true_classes_, sampled_candidates_; name=name, num_true=num_true, seed=seed, seed2=seed2) + else + compute_accidental_hits_graph(true_classes_, sampled_candidates_; name=name, num_true=num_true, seed=seed, seed2=seed2) + end end - end end @@ -15749,18 +15749,18 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function string_to_number_graph(string_tensor_; name=nothing, out_type=nothing) - local desc - tf.with_op_name(name, "StringToNumber") do - desc = tf.NodeDescription("StringToNumber") - string_tensor_ = convert(Tensor{String}, string_tensor_) - tf.add_input(desc, string_tensor_) - if out_type !== nothing - desc["out_type"] = Base.identity(out_type) - end + function string_to_number_graph(string_tensor_; name=nothing, out_type=nothing) + local desc + tf.with_op_name(name, "StringToNumber") do + desc = tf.NodeDescription("StringToNumber") + string_tensor_ = convert(Tensor{String}, string_tensor_) + tf.add_input(desc, string_tensor_) + if out_type !== nothing + desc["out_type"] = Base.identity(out_type) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function string_to_number_eager(string_tensor_; name=nothing, out_type=nothing) desc = tf.EagerOp("StringToNumber") string_tensor_ = convert(tf.EagerTensor, string_tensor_) @@ -15775,13 +15775,13 @@ begin return res[1] end end - function string_to_number(string_tensor_; name=nothing, out_type=nothing) - if tf.in_eager_mode() - string_to_number_eager(string_tensor_; name=name, out_type=out_type) - else - string_to_number_graph(string_tensor_; name=name, out_type=out_type) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function string_to_number(string_tensor_; name=nothing, out_type=nothing) + if tf.in_eager_mode() + string_to_number_eager(string_tensor_; name=name, out_type=out_type) + else + string_to_number_graph(string_tensor_; name=name, out_type=out_type) + end end - end end @@ -15791,16 +15791,16 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function snapshot_graph(input_; name=nothing) - local desc - tf.with_op_name(name, "Snapshot") do - desc = tf.NodeDescription("Snapshot") - input_ = convert(Tensor{Any}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - end - tf.Tensor(tf.Operation(desc)) + function snapshot_graph(input_; name=nothing) + local desc + tf.with_op_name(name, "Snapshot") do + desc = tf.NodeDescription("Snapshot") + input_ = convert(Tensor{Any}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) end + tf.Tensor(tf.Operation(desc)) + end function snapshot_eager(input_; name=nothing) desc = tf.EagerOp("Snapshot") input_ = convert(tf.EagerTensor, input_) @@ -15813,13 +15813,13 @@ begin return res[1] end end - function snapshot(input_; name=nothing) - if tf.in_eager_mode() - snapshot_eager(input_; name=name) - else - snapshot_graph(input_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function snapshot(input_; name=nothing) + if tf.in_eager_mode() + snapshot_eager(input_; name=name) + else + snapshot_graph(input_; name=name) + end end - end end @@ -15829,17 +15829,17 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function deserialize_iterator_graph(resource_handle_, serialized_; name=nothing) - local desc - tf.with_op_name(name, "DeserializeIterator") do - desc = tf.NodeDescription("DeserializeIterator") - resource_handle_ = convert(Tensor{Any}, resource_handle_) - serialized_ = convert(Tensor{Any}, serialized_) - tf.add_input(desc, resource_handle_) - tf.add_input(desc, serialized_) - end - tf.Tensor(tf.Operation(desc)) + function deserialize_iterator_graph(resource_handle_, serialized_; name=nothing) + local desc + tf.with_op_name(name, "DeserializeIterator") do + desc = tf.NodeDescription("DeserializeIterator") + resource_handle_ = convert(Tensor{Any}, resource_handle_) + serialized_ = convert(Tensor{Any}, serialized_) + tf.add_input(desc, resource_handle_) + tf.add_input(desc, serialized_) end + tf.Tensor(tf.Operation(desc)) + end function deserialize_iterator_eager(resource_handle_, serialized_; name=nothing) desc = tf.EagerOp("DeserializeIterator") resource_handle_ = convert(tf.EagerTensor, resource_handle_) @@ -15853,13 +15853,13 @@ begin return res[1] end end - function deserialize_iterator(resource_handle_, serialized_; name=nothing) - if tf.in_eager_mode() - deserialize_iterator_eager(resource_handle_, serialized_; name=name) - else - deserialize_iterator_graph(resource_handle_, serialized_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function deserialize_iterator(resource_handle_, serialized_; name=nothing) + if tf.in_eager_mode() + deserialize_iterator_eager(resource_handle_, serialized_; name=name) + else + deserialize_iterator_graph(resource_handle_, serialized_; name=name) + end end - end end @@ -15869,16 +15869,16 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function atan_graph(x_; name=nothing) - local desc - tf.with_op_name(name, "Atan") do - desc = tf.NodeDescription("Atan") - x_ = convert(Tensor{Any}, x_) - (x_,) = tf.tf_promote(x_) - tf.add_input(desc, x_) - end - tf.Tensor(tf.Operation(desc)) + function atan_graph(x_; name=nothing) + local desc + tf.with_op_name(name, "Atan") do + desc = tf.NodeDescription("Atan") + x_ = convert(Tensor{Any}, x_) + (x_,) = tf.tf_promote(x_) + tf.add_input(desc, x_) end + tf.Tensor(tf.Operation(desc)) + end function atan_eager(x_; name=nothing) desc = tf.EagerOp("Atan") x_ = convert(tf.EagerTensor, x_) @@ -15891,13 +15891,13 @@ begin return res[1] end end - function atan(x_; name=nothing) - if tf.in_eager_mode() - atan_eager(x_; name=name) - else - atan_graph(x_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function atan(x_; name=nothing) + if tf.in_eager_mode() + atan_eager(x_; name=name) + else + atan_graph(x_; name=name) + end end - end end @@ -15907,24 +15907,24 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function mat_mul_graph(a_, b_; name=nothing, transpose_a=nothing, transpose_b=nothing) - local desc - tf.with_op_name(name, "MatMul") do - desc = tf.NodeDescription("MatMul") - a_ = convert(Tensor{Any}, a_) - b_ = convert(Tensor{Any}, b_) - (a_, b_) = tf.tf_promote(a_, b_) - tf.add_input(desc, a_) - tf.add_input(desc, b_) - if transpose_a !== nothing - desc["transpose_a"] = Base.Bool(transpose_a) - end - if transpose_b !== nothing - desc["transpose_b"] = Base.Bool(transpose_b) - end + function mat_mul_graph(a_, b_; name=nothing, transpose_a=nothing, transpose_b=nothing) + local desc + tf.with_op_name(name, "MatMul") do + desc = tf.NodeDescription("MatMul") + a_ = convert(Tensor{Any}, a_) + b_ = convert(Tensor{Any}, b_) + (a_, b_) = tf.tf_promote(a_, b_) + tf.add_input(desc, a_) + tf.add_input(desc, b_) + if transpose_a !== nothing + desc["transpose_a"] = Base.Bool(transpose_a) + end + if transpose_b !== nothing + desc["transpose_b"] = Base.Bool(transpose_b) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function mat_mul_eager(a_, b_; name=nothing, transpose_a=nothing, transpose_b=nothing) desc = tf.EagerOp("MatMul") a_ = convert(tf.EagerTensor, a_) @@ -15946,13 +15946,13 @@ begin return res[1] end end - function mat_mul(a_, b_; name=nothing, transpose_a=nothing, transpose_b=nothing) - if tf.in_eager_mode() - mat_mul_eager(a_, b_; name=name, transpose_a=transpose_a, transpose_b=transpose_b) - else - mat_mul_graph(a_, b_; name=name, transpose_a=transpose_a, transpose_b=transpose_b) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function mat_mul(a_, b_; name=nothing, transpose_a=nothing, transpose_b=nothing) + if tf.in_eager_mode() + mat_mul_eager(a_, b_; name=name, transpose_a=transpose_a, transpose_b=transpose_b) + else + mat_mul_graph(a_, b_; name=name, transpose_a=transpose_a, transpose_b=transpose_b) + end end - end end @@ -15962,16 +15962,16 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function erfc_graph(x_; name=nothing) - local desc - tf.with_op_name(name, "Erfc") do - desc = tf.NodeDescription("Erfc") - x_ = convert(Tensor{Any}, x_) - (x_,) = tf.tf_promote(x_) - tf.add_input(desc, x_) - end - tf.Tensor(tf.Operation(desc)) + function erfc_graph(x_; name=nothing) + local desc + tf.with_op_name(name, "Erfc") do + desc = tf.NodeDescription("Erfc") + x_ = convert(Tensor{Any}, x_) + (x_,) = tf.tf_promote(x_) + tf.add_input(desc, x_) end + tf.Tensor(tf.Operation(desc)) + end function erfc_eager(x_; name=nothing) desc = tf.EagerOp("Erfc") x_ = convert(tf.EagerTensor, x_) @@ -15984,13 +15984,13 @@ begin return res[1] end end - function erfc(x_; name=nothing) - if tf.in_eager_mode() - erfc_eager(x_; name=name) - else - erfc_graph(x_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function erfc(x_; name=nothing) + if tf.in_eager_mode() + erfc_eager(x_; name=name) + else + erfc_graph(x_; name=name) + end end - end end @@ -16000,18 +16000,18 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function sigmoid_grad_graph(y_, dy_; name=nothing) - local desc - tf.with_op_name(name, "SigmoidGrad") do - desc = tf.NodeDescription("SigmoidGrad") - y_ = convert(Tensor{Any}, y_) - dy_ = convert(Tensor{Any}, dy_) - (y_, dy_) = tf.tf_promote(y_, dy_) - tf.add_input(desc, y_) - tf.add_input(desc, dy_) - end - tf.Tensor(tf.Operation(desc)) + function sigmoid_grad_graph(y_, dy_; name=nothing) + local desc + tf.with_op_name(name, "SigmoidGrad") do + desc = tf.NodeDescription("SigmoidGrad") + y_ = convert(Tensor{Any}, y_) + dy_ = convert(Tensor{Any}, dy_) + (y_, dy_) = tf.tf_promote(y_, dy_) + tf.add_input(desc, y_) + tf.add_input(desc, dy_) end + tf.Tensor(tf.Operation(desc)) + end function sigmoid_grad_eager(y_, dy_; name=nothing) desc = tf.EagerOp("SigmoidGrad") y_ = convert(tf.EagerTensor, y_) @@ -16027,13 +16027,13 @@ begin return res[1] end end - function sigmoid_grad(y_, dy_; name=nothing) - if tf.in_eager_mode() - sigmoid_grad_eager(y_, dy_; name=name) - else - sigmoid_grad_graph(y_, dy_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sigmoid_grad(y_, dy_; name=nothing) + if tf.in_eager_mode() + sigmoid_grad_eager(y_, dy_; name=name) + else + sigmoid_grad_graph(y_, dy_; name=name) + end end - end end @@ -16043,34 +16043,34 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function fixed_length_record_reader_v2_graph(; name=nothing, header_bytes=nothing, record_bytes=nothing, footer_bytes=nothing, hop_bytes=nothing, container=nothing, shared_name=nothing, encoding=nothing) - local desc - tf.with_op_name(name, "FixedLengthRecordReaderV2") do - desc = tf.NodeDescription("FixedLengthRecordReaderV2") - if header_bytes !== nothing - desc["header_bytes"] = Base.Int(header_bytes) - end - if record_bytes !== nothing - desc["record_bytes"] = Base.Int(record_bytes) - end - if footer_bytes !== nothing - desc["footer_bytes"] = Base.Int(footer_bytes) - end - if hop_bytes !== nothing - desc["hop_bytes"] = Base.Int(hop_bytes) - end - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - if encoding !== nothing - desc["encoding"] = Base.String(encoding) - end - end - tf.Tensor(tf.Operation(desc)) + function fixed_length_record_reader_v2_graph(; name=nothing, header_bytes=nothing, record_bytes=nothing, footer_bytes=nothing, hop_bytes=nothing, container=nothing, shared_name=nothing, encoding=nothing) + local desc + tf.with_op_name(name, "FixedLengthRecordReaderV2") do + desc = tf.NodeDescription("FixedLengthRecordReaderV2") + if header_bytes !== nothing + desc["header_bytes"] = Base.Int(header_bytes) + end + if record_bytes !== nothing + desc["record_bytes"] = Base.Int(record_bytes) + end + if footer_bytes !== nothing + desc["footer_bytes"] = Base.Int(footer_bytes) + end + if hop_bytes !== nothing + desc["hop_bytes"] = Base.Int(hop_bytes) + end + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + if encoding !== nothing + desc["encoding"] = Base.String(encoding) + end end + tf.Tensor(tf.Operation(desc)) + end function fixed_length_record_reader_v2_eager(; name=nothing, header_bytes=nothing, record_bytes=nothing, footer_bytes=nothing, hop_bytes=nothing, container=nothing, shared_name=nothing, encoding=nothing) desc = tf.EagerOp("FixedLengthRecordReaderV2") if header_bytes !== nothing @@ -16101,13 +16101,13 @@ begin return res[1] end end - function fixed_length_record_reader_v2(; name=nothing, header_bytes=nothing, record_bytes=nothing, footer_bytes=nothing, hop_bytes=nothing, container=nothing, shared_name=nothing, encoding=nothing) - if tf.in_eager_mode() - fixed_length_record_reader_v2_eager(; name=name, header_bytes=header_bytes, record_bytes=record_bytes, footer_bytes=footer_bytes, hop_bytes=hop_bytes, container=container, shared_name=shared_name, encoding=encoding) - else - fixed_length_record_reader_v2_graph(; name=name, header_bytes=header_bytes, record_bytes=record_bytes, footer_bytes=footer_bytes, hop_bytes=hop_bytes, container=container, shared_name=shared_name, encoding=encoding) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function fixed_length_record_reader_v2(; name=nothing, header_bytes=nothing, record_bytes=nothing, footer_bytes=nothing, hop_bytes=nothing, container=nothing, shared_name=nothing, encoding=nothing) + if tf.in_eager_mode() + fixed_length_record_reader_v2_eager(; name=name, header_bytes=header_bytes, record_bytes=record_bytes, footer_bytes=footer_bytes, hop_bytes=hop_bytes, container=container, shared_name=shared_name, encoding=encoding) + else + fixed_length_record_reader_v2_graph(; name=name, header_bytes=header_bytes, record_bytes=record_bytes, footer_bytes=footer_bytes, hop_bytes=hop_bytes, container=container, shared_name=shared_name, encoding=encoding) + end end - end end @@ -16117,24 +16117,24 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function non_max_suppression_v3_graph(boxes_, scores_, max_output_size_, iou_threshold_, score_threshold_; name=nothing) - local desc - tf.with_op_name(name, "NonMaxSuppressionV3") do - desc = tf.NodeDescription("NonMaxSuppressionV3") - boxes_ = convert(Tensor{Float32}, boxes_) - scores_ = convert(Tensor{Float32}, scores_) - max_output_size_ = convert(Tensor{Int32}, max_output_size_) - iou_threshold_ = convert(Tensor{Float32}, iou_threshold_) - score_threshold_ = convert(Tensor{Float32}, score_threshold_) - (boxes_, scores_) = tf.tf_promote(boxes_, scores_) - tf.add_input(desc, boxes_) - tf.add_input(desc, scores_) - tf.add_input(desc, max_output_size_) - tf.add_input(desc, iou_threshold_) - tf.add_input(desc, score_threshold_) - end - tf.Tensor(tf.Operation(desc)) + function non_max_suppression_v3_graph(boxes_, scores_, max_output_size_, iou_threshold_, score_threshold_; name=nothing) + local desc + tf.with_op_name(name, "NonMaxSuppressionV3") do + desc = tf.NodeDescription("NonMaxSuppressionV3") + boxes_ = convert(Tensor{Float32}, boxes_) + scores_ = convert(Tensor{Float32}, scores_) + max_output_size_ = convert(Tensor{Int32}, max_output_size_) + iou_threshold_ = convert(Tensor{Float32}, iou_threshold_) + score_threshold_ = convert(Tensor{Float32}, score_threshold_) + (boxes_, scores_) = tf.tf_promote(boxes_, scores_) + tf.add_input(desc, boxes_) + tf.add_input(desc, scores_) + tf.add_input(desc, max_output_size_) + tf.add_input(desc, iou_threshold_) + tf.add_input(desc, score_threshold_) end + tf.Tensor(tf.Operation(desc)) + end function non_max_suppression_v3_eager(boxes_, scores_, max_output_size_, iou_threshold_, score_threshold_; name=nothing) desc = tf.EagerOp("NonMaxSuppressionV3") boxes_ = convert(tf.EagerTensor, boxes_) @@ -16156,13 +16156,13 @@ begin return res[1] end end - function non_max_suppression_v3(boxes_, scores_, max_output_size_, iou_threshold_, score_threshold_; name=nothing) - if tf.in_eager_mode() - non_max_suppression_v3_eager(boxes_, scores_, max_output_size_, iou_threshold_, score_threshold_; name=name) - else - non_max_suppression_v3_graph(boxes_, scores_, max_output_size_, iou_threshold_, score_threshold_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function non_max_suppression_v3(boxes_, scores_, max_output_size_, iou_threshold_, score_threshold_; name=nothing) + if tf.in_eager_mode() + non_max_suppression_v3_eager(boxes_, scores_, max_output_size_, iou_threshold_, score_threshold_; name=name) + else + non_max_suppression_v3_graph(boxes_, scores_, max_output_size_, iou_threshold_, score_threshold_; name=name) + end end - end end @@ -16172,29 +16172,29 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function dilation2d_backprop_input_graph(input_, filter_, out_backprop_; name=nothing, strides=nothing, rates=nothing, padding=nothing) - local desc - tf.with_op_name(name, "Dilation2DBackpropInput") do - desc = tf.NodeDescription("Dilation2DBackpropInput") - input_ = convert(Tensor{Any}, input_) - filter_ = convert(Tensor{Any}, filter_) - out_backprop_ = convert(Tensor{Any}, out_backprop_) - (input_, filter_, out_backprop_) = tf.tf_promote(input_, filter_, out_backprop_) - tf.add_input(desc, input_) - tf.add_input(desc, filter_) - tf.add_input(desc, out_backprop_) - if strides !== nothing - desc["strides"] = map(Base.identity, strides) - end - if rates !== nothing - desc["rates"] = map(Base.identity, rates) - end - if padding !== nothing - desc["padding"] = Base.String(padding) - end + function dilation2d_backprop_input_graph(input_, filter_, out_backprop_; name=nothing, strides=nothing, rates=nothing, padding=nothing) + local desc + tf.with_op_name(name, "Dilation2DBackpropInput") do + desc = tf.NodeDescription("Dilation2DBackpropInput") + input_ = convert(Tensor{Any}, input_) + filter_ = convert(Tensor{Any}, filter_) + out_backprop_ = convert(Tensor{Any}, out_backprop_) + (input_, filter_, out_backprop_) = tf.tf_promote(input_, filter_, out_backprop_) + tf.add_input(desc, input_) + tf.add_input(desc, filter_) + tf.add_input(desc, out_backprop_) + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + if rates !== nothing + desc["rates"] = map(Base.identity, rates) + end + if padding !== nothing + desc["padding"] = Base.String(padding) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function dilation2d_backprop_input_eager(input_, filter_, out_backprop_; name=nothing, strides=nothing, rates=nothing, padding=nothing) desc = tf.EagerOp("Dilation2DBackpropInput") input_ = convert(tf.EagerTensor, input_) @@ -16222,13 +16222,13 @@ begin return res[1] end end - function dilation2d_backprop_input(input_, filter_, out_backprop_; name=nothing, strides=nothing, rates=nothing, padding=nothing) - if tf.in_eager_mode() - dilation2d_backprop_input_eager(input_, filter_, out_backprop_; name=name, strides=strides, rates=rates, padding=padding) - else - dilation2d_backprop_input_graph(input_, filter_, out_backprop_; name=name, strides=strides, rates=rates, padding=padding) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function dilation2d_backprop_input(input_, filter_, out_backprop_; name=nothing, strides=nothing, rates=nothing, padding=nothing) + if tf.in_eager_mode() + dilation2d_backprop_input_eager(input_, filter_, out_backprop_; name=name, strides=strides, rates=rates, padding=padding) + else + dilation2d_backprop_input_graph(input_, filter_, out_backprop_; name=name, strides=strides, rates=rates, padding=padding) + end end - end end @@ -16238,31 +16238,31 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function resource_apply_adadelta_graph(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_; name=nothing, use_locking=nothing) - local desc - tf.with_op_name(name, "ResourceApplyAdadelta") do - desc = tf.NodeDescription("ResourceApplyAdadelta") - var_ = convert(Tensor{Any}, var_) - accum_ = convert(Tensor{Any}, accum_) - accum_update_ = convert(Tensor{Any}, accum_update_) - lr_ = convert(Tensor{Any}, lr_) - rho_ = convert(Tensor{Any}, rho_) - epsilon_ = convert(Tensor{Any}, epsilon_) - grad_ = convert(Tensor{Any}, grad_) - (lr_, rho_, epsilon_, grad_) = tf.tf_promote(lr_, rho_, epsilon_, grad_) - tf.add_input(desc, var_) - tf.add_input(desc, accum_) - tf.add_input(desc, accum_update_) - tf.add_input(desc, lr_) - tf.add_input(desc, rho_) - tf.add_input(desc, epsilon_) - tf.add_input(desc, grad_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - end - tf.Tensor(tf.Operation(desc)) + function resource_apply_adadelta_graph(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "ResourceApplyAdadelta") do + desc = tf.NodeDescription("ResourceApplyAdadelta") + var_ = convert(Tensor{Any}, var_) + accum_ = convert(Tensor{Any}, accum_) + accum_update_ = convert(Tensor{Any}, accum_update_) + lr_ = convert(Tensor{Any}, lr_) + rho_ = convert(Tensor{Any}, rho_) + epsilon_ = convert(Tensor{Any}, epsilon_) + grad_ = convert(Tensor{Any}, grad_) + (lr_, rho_, epsilon_, grad_) = tf.tf_promote(lr_, rho_, epsilon_, grad_) + tf.add_input(desc, var_) + tf.add_input(desc, accum_) + tf.add_input(desc, accum_update_) + tf.add_input(desc, lr_) + tf.add_input(desc, rho_) + tf.add_input(desc, epsilon_) + tf.add_input(desc, grad_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end end + tf.Tensor(tf.Operation(desc)) + end function resource_apply_adadelta_eager(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ResourceApplyAdadelta") var_ = convert(tf.EagerTensor, var_) @@ -16293,13 +16293,13 @@ begin return res[1] end end - function resource_apply_adadelta(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_; name=nothing, use_locking=nothing) - if tf.in_eager_mode() - resource_apply_adadelta_eager(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_; name=name, use_locking=use_locking) - else - resource_apply_adadelta_graph(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_; name=name, use_locking=use_locking) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_apply_adadelta(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_; name=nothing, use_locking=nothing) + if tf.in_eager_mode() + resource_apply_adadelta_eager(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_; name=name, use_locking=use_locking) + else + resource_apply_adadelta_graph(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_; name=name, use_locking=use_locking) + end end - end end @@ -16309,17 +16309,17 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function logical_or_graph(x_, y_; name=nothing) - local desc - tf.with_op_name(name, "LogicalOr") do - desc = tf.NodeDescription("LogicalOr") - x_ = convert(Tensor{Bool}, x_) - y_ = convert(Tensor{Bool}, y_) - tf.add_input(desc, x_) - tf.add_input(desc, y_) - end - tf.Tensor(tf.Operation(desc)) + function logical_or_graph(x_, y_; name=nothing) + local desc + tf.with_op_name(name, "LogicalOr") do + desc = tf.NodeDescription("LogicalOr") + x_ = convert(Tensor{Bool}, x_) + y_ = convert(Tensor{Bool}, y_) + tf.add_input(desc, x_) + tf.add_input(desc, y_) end + tf.Tensor(tf.Operation(desc)) + end function logical_or_eager(x_, y_; name=nothing) desc = tf.EagerOp("LogicalOr") x_ = convert(tf.EagerTensor, x_) @@ -16333,13 +16333,13 @@ begin return res[1] end end - function logical_or(x_, y_; name=nothing) - if tf.in_eager_mode() - logical_or_eager(x_, y_; name=name) - else - logical_or_graph(x_, y_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function logical_or(x_, y_; name=nothing) + if tf.in_eager_mode() + logical_or_eager(x_, y_; name=name) + else + logical_or_graph(x_, y_; name=name) + end end - end end @@ -16349,33 +16349,33 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function dense_to_sparse_set_operation_graph(set1_, set2_indices_, set2_values_, set2_shape_; name=nothing, set_operation=nothing, validate_indices=nothing) - local desc - tf.with_op_name(name, "DenseToSparseSetOperation") do - desc = tf.NodeDescription("DenseToSparseSetOperation") - set1_ = convert(Tensor{Any}, set1_) - set2_indices_ = convert(Tensor{Int64}, set2_indices_) - set2_values_ = convert(Tensor{Any}, set2_values_) - set2_shape_ = convert(Tensor{Int64}, set2_shape_) - (set1_, set2_values_) = tf.tf_promote(set1_, set2_values_) - tf.add_input(desc, set1_) - tf.add_input(desc, set2_indices_) - tf.add_input(desc, set2_values_) - tf.add_input(desc, set2_shape_) - if set_operation !== nothing - desc["set_operation"] = Base.String(set_operation) - end - if validate_indices !== nothing - desc["validate_indices"] = Base.Bool(validate_indices) - end + function dense_to_sparse_set_operation_graph(set1_, set2_indices_, set2_values_, set2_shape_; name=nothing, set_operation=nothing, validate_indices=nothing) + local desc + tf.with_op_name(name, "DenseToSparseSetOperation") do + desc = tf.NodeDescription("DenseToSparseSetOperation") + set1_ = convert(Tensor{Any}, set1_) + set2_indices_ = convert(Tensor{Int64}, set2_indices_) + set2_values_ = convert(Tensor{Any}, set2_values_) + set2_shape_ = convert(Tensor{Int64}, set2_shape_) + (set1_, set2_values_) = tf.tf_promote(set1_, set2_values_) + tf.add_input(desc, set1_) + tf.add_input(desc, set2_indices_) + tf.add_input(desc, set2_values_) + tf.add_input(desc, set2_shape_) + if set_operation !== nothing + desc["set_operation"] = Base.String(set_operation) end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:3 - push!(out, tf.Tensor(op, out_idx)) + if validate_indices !== nothing + desc["validate_indices"] = Base.Bool(validate_indices) end - out end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end function dense_to_sparse_set_operation_eager(set1_, set2_indices_, set2_values_, set2_shape_; name=nothing, set_operation=nothing, validate_indices=nothing) desc = tf.EagerOp("DenseToSparseSetOperation") set1_ = convert(tf.EagerTensor, set1_) @@ -16401,13 +16401,13 @@ begin return res end end - function dense_to_sparse_set_operation(set1_, set2_indices_, set2_values_, set2_shape_; name=nothing, set_operation=nothing, validate_indices=nothing) - if tf.in_eager_mode() - dense_to_sparse_set_operation_eager(set1_, set2_indices_, set2_values_, set2_shape_; name=name, set_operation=set_operation, validate_indices=validate_indices) - else - dense_to_sparse_set_operation_graph(set1_, set2_indices_, set2_values_, set2_shape_; name=name, set_operation=set_operation, validate_indices=validate_indices) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function dense_to_sparse_set_operation(set1_, set2_indices_, set2_values_, set2_shape_; name=nothing, set_operation=nothing, validate_indices=nothing) + if tf.in_eager_mode() + dense_to_sparse_set_operation_eager(set1_, set2_indices_, set2_values_, set2_shape_; name=name, set_operation=set_operation, validate_indices=validate_indices) + else + dense_to_sparse_set_operation_graph(set1_, set2_indices_, set2_values_, set2_shape_; name=name, set_operation=set_operation, validate_indices=validate_indices) + end end - end end @@ -16417,15 +16417,15 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function reader_num_records_produced_graph(reader_handle_; name=nothing) - local desc - tf.with_op_name(name, "ReaderNumRecordsProduced") do - desc = tf.NodeDescription("ReaderNumRecordsProduced") - reader_handle_ = convert(Tensor{String}, reader_handle_) - tf.add_input(desc, reader_handle_) - end - tf.Tensor(tf.Operation(desc)) + function reader_num_records_produced_graph(reader_handle_; name=nothing) + local desc + tf.with_op_name(name, "ReaderNumRecordsProduced") do + desc = tf.NodeDescription("ReaderNumRecordsProduced") + reader_handle_ = convert(Tensor{String}, reader_handle_) + tf.add_input(desc, reader_handle_) end + tf.Tensor(tf.Operation(desc)) + end function reader_num_records_produced_eager(reader_handle_; name=nothing) desc = tf.EagerOp("ReaderNumRecordsProduced") reader_handle_ = convert(tf.EagerTensor, reader_handle_) @@ -16437,13 +16437,13 @@ begin return res[1] end end - function reader_num_records_produced(reader_handle_; name=nothing) - if tf.in_eager_mode() - reader_num_records_produced_eager(reader_handle_; name=name) - else - reader_num_records_produced_graph(reader_handle_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function reader_num_records_produced(reader_handle_; name=nothing) + if tf.in_eager_mode() + reader_num_records_produced_eager(reader_handle_; name=name) + else + reader_num_records_produced_graph(reader_handle_; name=name) + end end - end end @@ -16453,17 +16453,17 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function adjust_hue_graph(images_, delta_; name=nothing) - local desc - tf.with_op_name(name, "AdjustHue") do - desc = tf.NodeDescription("AdjustHue") - images_ = convert(Tensor{Float32}, images_) - delta_ = convert(Tensor{Float32}, delta_) - tf.add_input(desc, images_) - tf.add_input(desc, delta_) - end - tf.Tensor(tf.Operation(desc)) + function adjust_hue_graph(images_, delta_; name=nothing) + local desc + tf.with_op_name(name, "AdjustHue") do + desc = tf.NodeDescription("AdjustHue") + images_ = convert(Tensor{Float32}, images_) + delta_ = convert(Tensor{Float32}, delta_) + tf.add_input(desc, images_) + tf.add_input(desc, delta_) end + tf.Tensor(tf.Operation(desc)) + end function adjust_hue_eager(images_, delta_; name=nothing) desc = tf.EagerOp("AdjustHue") images_ = convert(tf.EagerTensor, images_) @@ -16477,13 +16477,13 @@ begin return res[1] end end - function adjust_hue(images_, delta_; name=nothing) - if tf.in_eager_mode() - adjust_hue_eager(images_, delta_; name=name) - else - adjust_hue_graph(images_, delta_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function adjust_hue(images_, delta_; name=nothing) + if tf.in_eager_mode() + adjust_hue_eager(images_, delta_; name=name) + else + adjust_hue_graph(images_, delta_; name=name) + end end - end end @@ -16493,20 +16493,20 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function boosted_trees_quantile_stream_resource_flush_graph(quantile_stream_resource_handle_, num_buckets_; name=nothing, generate_quantiles=nothing) - local desc - tf.with_op_name(name, "BoostedTreesQuantileStreamResourceFlush") do - desc = tf.NodeDescription("BoostedTreesQuantileStreamResourceFlush") - quantile_stream_resource_handle_ = convert(Tensor{Any}, quantile_stream_resource_handle_) - num_buckets_ = convert(Tensor{Int64}, num_buckets_) - tf.add_input(desc, quantile_stream_resource_handle_) - tf.add_input(desc, num_buckets_) - if generate_quantiles !== nothing - desc["generate_quantiles"] = Base.Bool(generate_quantiles) - end + function boosted_trees_quantile_stream_resource_flush_graph(quantile_stream_resource_handle_, num_buckets_; name=nothing, generate_quantiles=nothing) + local desc + tf.with_op_name(name, "BoostedTreesQuantileStreamResourceFlush") do + desc = tf.NodeDescription("BoostedTreesQuantileStreamResourceFlush") + quantile_stream_resource_handle_ = convert(Tensor{Any}, quantile_stream_resource_handle_) + num_buckets_ = convert(Tensor{Int64}, num_buckets_) + tf.add_input(desc, quantile_stream_resource_handle_) + tf.add_input(desc, num_buckets_) + if generate_quantiles !== nothing + desc["generate_quantiles"] = Base.Bool(generate_quantiles) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function boosted_trees_quantile_stream_resource_flush_eager(quantile_stream_resource_handle_, num_buckets_; name=nothing, generate_quantiles=nothing) desc = tf.EagerOp("BoostedTreesQuantileStreamResourceFlush") quantile_stream_resource_handle_ = convert(tf.EagerTensor, quantile_stream_resource_handle_) @@ -16523,13 +16523,13 @@ begin return res[1] end end - function boosted_trees_quantile_stream_resource_flush(quantile_stream_resource_handle_, num_buckets_; name=nothing, generate_quantiles=nothing) - if tf.in_eager_mode() - boosted_trees_quantile_stream_resource_flush_eager(quantile_stream_resource_handle_, num_buckets_; name=name, generate_quantiles=generate_quantiles) - else - boosted_trees_quantile_stream_resource_flush_graph(quantile_stream_resource_handle_, num_buckets_; name=name, generate_quantiles=generate_quantiles) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function boosted_trees_quantile_stream_resource_flush(quantile_stream_resource_handle_, num_buckets_; name=nothing, generate_quantiles=nothing) + if tf.in_eager_mode() + boosted_trees_quantile_stream_resource_flush_eager(quantile_stream_resource_handle_, num_buckets_; name=name, generate_quantiles=generate_quantiles) + else + boosted_trees_quantile_stream_resource_flush_graph(quantile_stream_resource_handle_, num_buckets_; name=name, generate_quantiles=generate_quantiles) + end end - end end @@ -16539,38 +16539,38 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function experimental_map_and_batch_dataset_graph(input_dataset_, other_arguments_, batch_size_, num_parallel_calls_, drop_remainder_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, preserve_cardinality=nothing) - local desc - tf.with_op_name(name, "ExperimentalMapAndBatchDataset") do - desc = tf.NodeDescription("ExperimentalMapAndBatchDataset") - input_dataset_ = convert(Tensor{Any}, input_dataset_) - other_arguments_ = [convert(Tensor{Any}, x) for x = other_arguments_] - batch_size_ = convert(Tensor{Int64}, batch_size_) - num_parallel_calls_ = convert(Tensor{Int64}, num_parallel_calls_) - drop_remainder_ = convert(Tensor{Bool}, drop_remainder_) - tf.add_input(desc, input_dataset_) - tf.add_input(desc, other_arguments_) - tf.add_input(desc, batch_size_) - tf.add_input(desc, num_parallel_calls_) - tf.add_input(desc, drop_remainder_) - if f !== nothing - desc["f"] = Base.identity(f) - end - if Targuments !== nothing - desc["Targuments"] = map(Base.identity, Targuments) - end - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - if preserve_cardinality !== nothing - desc["preserve_cardinality"] = Base.Bool(preserve_cardinality) - end - end - tf.Tensor(tf.Operation(desc)) + function experimental_map_and_batch_dataset_graph(input_dataset_, other_arguments_, batch_size_, num_parallel_calls_, drop_remainder_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, preserve_cardinality=nothing) + local desc + tf.with_op_name(name, "ExperimentalMapAndBatchDataset") do + desc = tf.NodeDescription("ExperimentalMapAndBatchDataset") + input_dataset_ = convert(Tensor{Any}, input_dataset_) + other_arguments_ = [convert(Tensor{Any}, x) for x = other_arguments_] + batch_size_ = convert(Tensor{Int64}, batch_size_) + num_parallel_calls_ = convert(Tensor{Int64}, num_parallel_calls_) + drop_remainder_ = convert(Tensor{Bool}, drop_remainder_) + tf.add_input(desc, input_dataset_) + tf.add_input(desc, other_arguments_) + tf.add_input(desc, batch_size_) + tf.add_input(desc, num_parallel_calls_) + tf.add_input(desc, drop_remainder_) + if f !== nothing + desc["f"] = Base.identity(f) + end + if Targuments !== nothing + desc["Targuments"] = map(Base.identity, Targuments) + end + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + if preserve_cardinality !== nothing + desc["preserve_cardinality"] = Base.Bool(preserve_cardinality) + end end + tf.Tensor(tf.Operation(desc)) + end function experimental_map_and_batch_dataset_eager(input_dataset_, other_arguments_, batch_size_, num_parallel_calls_, drop_remainder_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, preserve_cardinality=nothing) desc = tf.EagerOp("ExperimentalMapAndBatchDataset") input_dataset_ = convert(tf.EagerTensor, input_dataset_) @@ -16605,13 +16605,13 @@ begin return res[1] end end - function experimental_map_and_batch_dataset(input_dataset_, other_arguments_, batch_size_, num_parallel_calls_, drop_remainder_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, preserve_cardinality=nothing) - if tf.in_eager_mode() - experimental_map_and_batch_dataset_eager(input_dataset_, other_arguments_, batch_size_, num_parallel_calls_, drop_remainder_; name=name, f=f, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes, preserve_cardinality=preserve_cardinality) - else - experimental_map_and_batch_dataset_graph(input_dataset_, other_arguments_, batch_size_, num_parallel_calls_, drop_remainder_; name=name, f=f, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes, preserve_cardinality=preserve_cardinality) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_map_and_batch_dataset(input_dataset_, other_arguments_, batch_size_, num_parallel_calls_, drop_remainder_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, preserve_cardinality=nothing) + if tf.in_eager_mode() + experimental_map_and_batch_dataset_eager(input_dataset_, other_arguments_, batch_size_, num_parallel_calls_, drop_remainder_; name=name, f=f, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes, preserve_cardinality=preserve_cardinality) + else + experimental_map_and_batch_dataset_graph(input_dataset_, other_arguments_, batch_size_, num_parallel_calls_, drop_remainder_; name=name, f=f, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes, preserve_cardinality=preserve_cardinality) + end end - end end @@ -16621,18 +16621,18 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function real_div_graph(x_, y_; name=nothing) - local desc - tf.with_op_name(name, "RealDiv") do - desc = tf.NodeDescription("RealDiv") - x_ = convert(Tensor{Any}, x_) - y_ = convert(Tensor{Any}, y_) - (x_, y_) = tf.tf_promote(x_, y_) - tf.add_input(desc, x_) - tf.add_input(desc, y_) - end - tf.Tensor(tf.Operation(desc)) + function real_div_graph(x_, y_; name=nothing) + local desc + tf.with_op_name(name, "RealDiv") do + desc = tf.NodeDescription("RealDiv") + x_ = convert(Tensor{Any}, x_) + y_ = convert(Tensor{Any}, y_) + (x_, y_) = tf.tf_promote(x_, y_) + tf.add_input(desc, x_) + tf.add_input(desc, y_) end + tf.Tensor(tf.Operation(desc)) + end function real_div_eager(x_, y_; name=nothing) desc = tf.EagerOp("RealDiv") x_ = convert(tf.EagerTensor, x_) @@ -16648,13 +16648,13 @@ begin return res[1] end end - function real_div(x_, y_; name=nothing) - if tf.in_eager_mode() - real_div_eager(x_, y_; name=name) - else - real_div_graph(x_, y_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function real_div(x_, y_; name=nothing) + if tf.in_eager_mode() + real_div_eager(x_, y_; name=name) + else + real_div_graph(x_, y_; name=name) + end end - end end @@ -16664,25 +16664,25 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function restore_slice_graph(file_pattern_, tensor_name_, shape_and_slice_; name=nothing, dt=nothing, preferred_shard=nothing) - local desc - tf.with_op_name(name, "RestoreSlice") do - desc = tf.NodeDescription("RestoreSlice") - file_pattern_ = convert(Tensor{String}, file_pattern_) - tensor_name_ = convert(Tensor{String}, tensor_name_) - shape_and_slice_ = convert(Tensor{String}, shape_and_slice_) - tf.add_input(desc, file_pattern_) - tf.add_input(desc, tensor_name_) - tf.add_input(desc, shape_and_slice_) - if dt !== nothing - desc["dt"] = Base.identity(dt) - end - if preferred_shard !== nothing - desc["preferred_shard"] = Base.Int(preferred_shard) - end + function restore_slice_graph(file_pattern_, tensor_name_, shape_and_slice_; name=nothing, dt=nothing, preferred_shard=nothing) + local desc + tf.with_op_name(name, "RestoreSlice") do + desc = tf.NodeDescription("RestoreSlice") + file_pattern_ = convert(Tensor{String}, file_pattern_) + tensor_name_ = convert(Tensor{String}, tensor_name_) + shape_and_slice_ = convert(Tensor{String}, shape_and_slice_) + tf.add_input(desc, file_pattern_) + tf.add_input(desc, tensor_name_) + tf.add_input(desc, shape_and_slice_) + if dt !== nothing + desc["dt"] = Base.identity(dt) + end + if preferred_shard !== nothing + desc["preferred_shard"] = Base.Int(preferred_shard) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function restore_slice_eager(file_pattern_, tensor_name_, shape_and_slice_; name=nothing, dt=nothing, preferred_shard=nothing) desc = tf.EagerOp("RestoreSlice") file_pattern_ = convert(tf.EagerTensor, file_pattern_) @@ -16704,13 +16704,13 @@ begin return res[1] end end - function restore_slice(file_pattern_, tensor_name_, shape_and_slice_; name=nothing, dt=nothing, preferred_shard=nothing) - if tf.in_eager_mode() - restore_slice_eager(file_pattern_, tensor_name_, shape_and_slice_; name=name, dt=dt, preferred_shard=preferred_shard) - else - restore_slice_graph(file_pattern_, tensor_name_, shape_and_slice_; name=name, dt=dt, preferred_shard=preferred_shard) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function restore_slice(file_pattern_, tensor_name_, shape_and_slice_; name=nothing, dt=nothing, preferred_shard=nothing) + if tf.in_eager_mode() + restore_slice_eager(file_pattern_, tensor_name_, shape_and_slice_; name=name, dt=dt, preferred_shard=preferred_shard) + else + restore_slice_graph(file_pattern_, tensor_name_, shape_and_slice_; name=name, dt=dt, preferred_shard=preferred_shard) + end end - end end @@ -16720,18 +16720,18 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function stack_pop_v2_graph(handle_; name=nothing, elem_type=nothing) - local desc - tf.with_op_name(name, "StackPopV2") do - desc = tf.NodeDescription("StackPopV2") - handle_ = convert(Tensor{Any}, handle_) - tf.add_input(desc, handle_) - if elem_type !== nothing - desc["elem_type"] = Base.identity(elem_type) - end + function stack_pop_v2_graph(handle_; name=nothing, elem_type=nothing) + local desc + tf.with_op_name(name, "StackPopV2") do + desc = tf.NodeDescription("StackPopV2") + handle_ = convert(Tensor{Any}, handle_) + tf.add_input(desc, handle_) + if elem_type !== nothing + desc["elem_type"] = Base.identity(elem_type) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function stack_pop_v2_eager(handle_; name=nothing, elem_type=nothing) desc = tf.EagerOp("StackPopV2") handle_ = convert(tf.EagerTensor, handle_) @@ -16746,13 +16746,13 @@ begin return res[1] end end - function stack_pop_v2(handle_; name=nothing, elem_type=nothing) - if tf.in_eager_mode() - stack_pop_v2_eager(handle_; name=name, elem_type=elem_type) - else - stack_pop_v2_graph(handle_; name=name, elem_type=elem_type) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function stack_pop_v2(handle_; name=nothing, elem_type=nothing) + if tf.in_eager_mode() + stack_pop_v2_eager(handle_; name=name, elem_type=elem_type) + else + stack_pop_v2_graph(handle_; name=name, elem_type=elem_type) + end end - end end @@ -16762,18 +16762,18 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function reverse_graph(tensor_, dims_; name=nothing) - local desc - tf.with_op_name(name, "Reverse") do - desc = tf.NodeDescription("Reverse") - tensor_ = convert(Tensor{Any}, tensor_) - dims_ = convert(Tensor{Bool}, dims_) - (tensor_,) = tf.tf_promote(tensor_) - tf.add_input(desc, tensor_) - tf.add_input(desc, dims_) - end - tf.Tensor(tf.Operation(desc)) + function reverse_graph(tensor_, dims_; name=nothing) + local desc + tf.with_op_name(name, "Reverse") do + desc = tf.NodeDescription("Reverse") + tensor_ = convert(Tensor{Any}, tensor_) + dims_ = convert(Tensor{Bool}, dims_) + (tensor_,) = tf.tf_promote(tensor_) + tf.add_input(desc, tensor_) + tf.add_input(desc, dims_) end + tf.Tensor(tf.Operation(desc)) + end function reverse_eager(tensor_, dims_; name=nothing) desc = tf.EagerOp("Reverse") tensor_ = convert(tf.EagerTensor, tensor_) @@ -16788,13 +16788,13 @@ begin return res[1] end end - function reverse(tensor_, dims_; name=nothing) - if tf.in_eager_mode() - reverse_eager(tensor_, dims_; name=name) - else - reverse_graph(tensor_, dims_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function reverse(tensor_, dims_; name=nothing) + if tf.in_eager_mode() + reverse_eager(tensor_, dims_; name=name) + else + reverse_graph(tensor_, dims_; name=name) + end end - end end @@ -16804,21 +16804,21 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function decode_png_graph(contents_; name=nothing, channels=nothing, dtype=nothing) - local desc - tf.with_op_name(name, "DecodePng") do - desc = tf.NodeDescription("DecodePng") - contents_ = convert(Tensor{String}, contents_) - tf.add_input(desc, contents_) - if channels !== nothing - desc["channels"] = Base.Int(channels) - end - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end + function decode_png_graph(contents_; name=nothing, channels=nothing, dtype=nothing) + local desc + tf.with_op_name(name, "DecodePng") do + desc = tf.NodeDescription("DecodePng") + contents_ = convert(Tensor{String}, contents_) + tf.add_input(desc, contents_) + if channels !== nothing + desc["channels"] = Base.Int(channels) + end + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function decode_png_eager(contents_; name=nothing, channels=nothing, dtype=nothing) desc = tf.EagerOp("DecodePng") contents_ = convert(tf.EagerTensor, contents_) @@ -16836,13 +16836,13 @@ begin return res[1] end end - function decode_png(contents_; name=nothing, channels=nothing, dtype=nothing) - if tf.in_eager_mode() - decode_png_eager(contents_; name=name, channels=channels, dtype=dtype) - else - decode_png_graph(contents_; name=name, channels=channels, dtype=dtype) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function decode_png(contents_; name=nothing, channels=nothing, dtype=nothing) + if tf.in_eager_mode() + decode_png_eager(contents_; name=name, channels=channels, dtype=dtype) + else + decode_png_graph(contents_; name=name, channels=channels, dtype=dtype) + end end - end end @@ -16852,22 +16852,22 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function non_max_suppression_v2_graph(boxes_, scores_, max_output_size_, iou_threshold_; name=nothing) - local desc - tf.with_op_name(name, "NonMaxSuppressionV2") do - desc = tf.NodeDescription("NonMaxSuppressionV2") - boxes_ = convert(Tensor{Float32}, boxes_) - scores_ = convert(Tensor{Float32}, scores_) - max_output_size_ = convert(Tensor{Int32}, max_output_size_) - iou_threshold_ = convert(Tensor{Float32}, iou_threshold_) - (boxes_, scores_) = tf.tf_promote(boxes_, scores_) - tf.add_input(desc, boxes_) - tf.add_input(desc, scores_) - tf.add_input(desc, max_output_size_) - tf.add_input(desc, iou_threshold_) - end - tf.Tensor(tf.Operation(desc)) + function non_max_suppression_v2_graph(boxes_, scores_, max_output_size_, iou_threshold_; name=nothing) + local desc + tf.with_op_name(name, "NonMaxSuppressionV2") do + desc = tf.NodeDescription("NonMaxSuppressionV2") + boxes_ = convert(Tensor{Float32}, boxes_) + scores_ = convert(Tensor{Float32}, scores_) + max_output_size_ = convert(Tensor{Int32}, max_output_size_) + iou_threshold_ = convert(Tensor{Float32}, iou_threshold_) + (boxes_, scores_) = tf.tf_promote(boxes_, scores_) + tf.add_input(desc, boxes_) + tf.add_input(desc, scores_) + tf.add_input(desc, max_output_size_) + tf.add_input(desc, iou_threshold_) end + tf.Tensor(tf.Operation(desc)) + end function non_max_suppression_v2_eager(boxes_, scores_, max_output_size_, iou_threshold_; name=nothing) desc = tf.EagerOp("NonMaxSuppressionV2") boxes_ = convert(tf.EagerTensor, boxes_) @@ -16887,13 +16887,13 @@ begin return res[1] end end - function non_max_suppression_v2(boxes_, scores_, max_output_size_, iou_threshold_; name=nothing) - if tf.in_eager_mode() - non_max_suppression_v2_eager(boxes_, scores_, max_output_size_, iou_threshold_; name=name) - else - non_max_suppression_v2_graph(boxes_, scores_, max_output_size_, iou_threshold_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function non_max_suppression_v2(boxes_, scores_, max_output_size_, iou_threshold_; name=nothing) + if tf.in_eager_mode() + non_max_suppression_v2_eager(boxes_, scores_, max_output_size_, iou_threshold_; name=name) + else + non_max_suppression_v2_graph(boxes_, scores_, max_output_size_, iou_threshold_; name=name) + end end - end end @@ -16903,18 +16903,18 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function igamma_graph(a_, x_; name=nothing) - local desc - tf.with_op_name(name, "Igamma") do - desc = tf.NodeDescription("Igamma") - a_ = convert(Tensor{Any}, a_) - x_ = convert(Tensor{Any}, x_) - (a_, x_) = tf.tf_promote(a_, x_) - tf.add_input(desc, a_) - tf.add_input(desc, x_) - end - tf.Tensor(tf.Operation(desc)) + function igamma_graph(a_, x_; name=nothing) + local desc + tf.with_op_name(name, "Igamma") do + desc = tf.NodeDescription("Igamma") + a_ = convert(Tensor{Any}, a_) + x_ = convert(Tensor{Any}, x_) + (a_, x_) = tf.tf_promote(a_, x_) + tf.add_input(desc, a_) + tf.add_input(desc, x_) end + tf.Tensor(tf.Operation(desc)) + end function igamma_eager(a_, x_; name=nothing) desc = tf.EagerOp("Igamma") a_ = convert(tf.EagerTensor, a_) @@ -16930,13 +16930,13 @@ begin return res[1] end end - function igamma(a_, x_; name=nothing) - if tf.in_eager_mode() - igamma_eager(a_, x_; name=name) - else - igamma_graph(a_, x_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function igamma(a_, x_; name=nothing) + if tf.in_eager_mode() + igamma_eager(a_, x_; name=name) + else + igamma_graph(a_, x_; name=name) + end end - end end @@ -16946,16 +16946,16 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function digamma_graph(x_; name=nothing) - local desc - tf.with_op_name(name, "Digamma") do - desc = tf.NodeDescription("Digamma") - x_ = convert(Tensor{Any}, x_) - (x_,) = tf.tf_promote(x_) - tf.add_input(desc, x_) - end - tf.Tensor(tf.Operation(desc)) + function digamma_graph(x_; name=nothing) + local desc + tf.with_op_name(name, "Digamma") do + desc = tf.NodeDescription("Digamma") + x_ = convert(Tensor{Any}, x_) + (x_,) = tf.tf_promote(x_) + tf.add_input(desc, x_) end + tf.Tensor(tf.Operation(desc)) + end function digamma_eager(x_; name=nothing) desc = tf.EagerOp("Digamma") x_ = convert(tf.EagerTensor, x_) @@ -16968,13 +16968,13 @@ begin return res[1] end end - function digamma(x_; name=nothing) - if tf.in_eager_mode() - digamma_eager(x_; name=name) - else - digamma_graph(x_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function digamma(x_; name=nothing) + if tf.in_eager_mode() + digamma_eager(x_; name=name) + else + digamma_graph(x_; name=name) + end end - end end @@ -16984,35 +16984,35 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function resource_apply_ada_max_graph(var_, m_, v_, beta1_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing) - local desc - tf.with_op_name(name, "ResourceApplyAdaMax") do - desc = tf.NodeDescription("ResourceApplyAdaMax") - var_ = convert(Tensor{Any}, var_) - m_ = convert(Tensor{Any}, m_) - v_ = convert(Tensor{Any}, v_) - beta1_power_ = convert(Tensor{Any}, beta1_power_) - lr_ = convert(Tensor{Any}, lr_) - beta1_ = convert(Tensor{Any}, beta1_) - beta2_ = convert(Tensor{Any}, beta2_) - epsilon_ = convert(Tensor{Any}, epsilon_) - grad_ = convert(Tensor{Any}, grad_) - (beta1_power_, lr_, beta1_, beta2_, epsilon_, grad_) = tf.tf_promote(beta1_power_, lr_, beta1_, beta2_, epsilon_, grad_) - tf.add_input(desc, var_) - tf.add_input(desc, m_) - tf.add_input(desc, v_) - tf.add_input(desc, beta1_power_) - tf.add_input(desc, lr_) - tf.add_input(desc, beta1_) - tf.add_input(desc, beta2_) - tf.add_input(desc, epsilon_) - tf.add_input(desc, grad_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - end - tf.Tensor(tf.Operation(desc)) - end + function resource_apply_ada_max_graph(var_, m_, v_, beta1_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "ResourceApplyAdaMax") do + desc = tf.NodeDescription("ResourceApplyAdaMax") + var_ = convert(Tensor{Any}, var_) + m_ = convert(Tensor{Any}, m_) + v_ = convert(Tensor{Any}, v_) + beta1_power_ = convert(Tensor{Any}, beta1_power_) + lr_ = convert(Tensor{Any}, lr_) + beta1_ = convert(Tensor{Any}, beta1_) + beta2_ = convert(Tensor{Any}, beta2_) + epsilon_ = convert(Tensor{Any}, epsilon_) + grad_ = convert(Tensor{Any}, grad_) + (beta1_power_, lr_, beta1_, beta2_, epsilon_, grad_) = tf.tf_promote(beta1_power_, lr_, beta1_, beta2_, epsilon_, grad_) + tf.add_input(desc, var_) + tf.add_input(desc, m_) + tf.add_input(desc, v_) + tf.add_input(desc, beta1_power_) + tf.add_input(desc, lr_) + tf.add_input(desc, beta1_) + tf.add_input(desc, beta2_) + tf.add_input(desc, epsilon_) + tf.add_input(desc, grad_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + tf.Tensor(tf.Operation(desc)) + end function resource_apply_ada_max_eager(var_, m_, v_, beta1_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ResourceApplyAdaMax") var_ = convert(tf.EagerTensor, var_) @@ -17049,38 +17049,38 @@ begin return res[1] end end - function resource_apply_ada_max(var_, m_, v_, beta1_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing) - if tf.in_eager_mode() - resource_apply_ada_max_eager(var_, m_, v_, beta1_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=name, use_locking=use_locking) - else - resource_apply_ada_max_graph(var_, m_, v_, beta1_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=name, use_locking=use_locking) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_apply_ada_max(var_, m_, v_, beta1_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing) + if tf.in_eager_mode() + resource_apply_ada_max_eager(var_, m_, v_, beta1_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=name, use_locking=use_locking) + else + resource_apply_ada_max_graph(var_, m_, v_, beta1_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=name, use_locking=use_locking) + end end - end end """ - space_to_depth(input; data_format=NHWC) + space_to_depth(input; data_format=) """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function space_to_depth_graph(input_; name=nothing, block_size=nothing, data_format=nothing) - local desc - tf.with_op_name(name, "SpaceToDepth") do - desc = tf.NodeDescription("SpaceToDepth") - input_ = convert(Tensor{Any}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - if block_size !== nothing - desc["block_size"] = Base.Int(block_size) - end - if data_format !== nothing - desc["data_format"] = Base.String(data_format) - end + function space_to_depth_graph(input_; name=nothing, block_size=nothing, data_format=nothing) + local desc + tf.with_op_name(name, "SpaceToDepth") do + desc = tf.NodeDescription("SpaceToDepth") + input_ = convert(Tensor{Any}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + if block_size !== nothing + desc["block_size"] = Base.Int(block_size) + end + if data_format !== nothing + desc["data_format"] = Base.String(data_format) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function space_to_depth_eager(input_; name=nothing, block_size=nothing, data_format=nothing) desc = tf.EagerOp("SpaceToDepth") input_ = convert(tf.EagerTensor, input_) @@ -17099,13 +17099,13 @@ begin return res[1] end end - function space_to_depth(input_; name=nothing, block_size=nothing, data_format=nothing) - if tf.in_eager_mode() - space_to_depth_eager(input_; name=name, block_size=block_size, data_format=data_format) - else - space_to_depth_graph(input_; name=name, block_size=block_size, data_format=data_format) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function space_to_depth(input_; name=nothing, block_size=nothing, data_format=nothing) + if tf.in_eager_mode() + space_to_depth_eager(input_; name=name, block_size=block_size, data_format=data_format) + else + space_to_depth_graph(input_; name=name, block_size=block_size, data_format=data_format) + end end - end end @@ -17115,18 +17115,18 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function sqrt_grad_graph(y_, dy_; name=nothing) - local desc - tf.with_op_name(name, "SqrtGrad") do - desc = tf.NodeDescription("SqrtGrad") - y_ = convert(Tensor{Any}, y_) - dy_ = convert(Tensor{Any}, dy_) - (y_, dy_) = tf.tf_promote(y_, dy_) - tf.add_input(desc, y_) - tf.add_input(desc, dy_) - end - tf.Tensor(tf.Operation(desc)) + function sqrt_grad_graph(y_, dy_; name=nothing) + local desc + tf.with_op_name(name, "SqrtGrad") do + desc = tf.NodeDescription("SqrtGrad") + y_ = convert(Tensor{Any}, y_) + dy_ = convert(Tensor{Any}, dy_) + (y_, dy_) = tf.tf_promote(y_, dy_) + tf.add_input(desc, y_) + tf.add_input(desc, dy_) end + tf.Tensor(tf.Operation(desc)) + end function sqrt_grad_eager(y_, dy_; name=nothing) desc = tf.EagerOp("SqrtGrad") y_ = convert(tf.EagerTensor, y_) @@ -17142,13 +17142,13 @@ begin return res[1] end end - function sqrt_grad(y_, dy_; name=nothing) - if tf.in_eager_mode() - sqrt_grad_eager(y_, dy_; name=name) - else - sqrt_grad_graph(y_, dy_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sqrt_grad(y_, dy_; name=nothing) + if tf.in_eager_mode() + sqrt_grad_eager(y_, dy_; name=name) + else + sqrt_grad_graph(y_, dy_; name=name) + end end - end end @@ -17158,32 +17158,32 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function map_unstage_graph(key_, indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) - local desc - tf.with_op_name(name, "MapUnstage") do - desc = tf.NodeDescription("MapUnstage") - key_ = convert(Tensor{Int64}, key_) - indices_ = convert(Tensor{Int32}, indices_) - tf.add_input(desc, key_) - tf.add_input(desc, indices_) - if capacity !== nothing - desc["capacity"] = Base.Int(capacity) - end - if memory_limit !== nothing - desc["memory_limit"] = Base.Int(memory_limit) - end - if dtypes !== nothing - desc["dtypes"] = map(Base.identity, dtypes) - end - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - end - tf.Tensor(tf.Operation(desc)) + function map_unstage_graph(key_, indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "MapUnstage") do + desc = tf.NodeDescription("MapUnstage") + key_ = convert(Tensor{Int64}, key_) + indices_ = convert(Tensor{Int32}, indices_) + tf.add_input(desc, key_) + tf.add_input(desc, indices_) + if capacity !== nothing + desc["capacity"] = Base.Int(capacity) + end + if memory_limit !== nothing + desc["memory_limit"] = Base.Int(memory_limit) + end + if dtypes !== nothing + desc["dtypes"] = map(Base.identity, dtypes) + end + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end end + tf.Tensor(tf.Operation(desc)) + end function map_unstage_eager(key_, indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) desc = tf.EagerOp("MapUnstage") key_ = convert(tf.EagerTensor, key_) @@ -17212,13 +17212,13 @@ begin return res[1] end end - function map_unstage(key_, indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) - if tf.in_eager_mode() - map_unstage_eager(key_, indices_; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) - else - map_unstage_graph(key_, indices_; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function map_unstage(key_, indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + if tf.in_eager_mode() + map_unstage_eager(key_, indices_; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) + else + map_unstage_graph(key_, indices_; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) + end end - end end @@ -17228,24 +17228,24 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function qr_graph(input_; name=nothing, full_matrices=nothing) - local desc - tf.with_op_name(name, "Qr") do - desc = tf.NodeDescription("Qr") - input_ = convert(Tensor{Any}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - if full_matrices !== nothing - desc["full_matrices"] = Base.Bool(full_matrices) - end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:2 - push!(out, tf.Tensor(op, out_idx)) + function qr_graph(input_; name=nothing, full_matrices=nothing) + local desc + tf.with_op_name(name, "Qr") do + desc = tf.NodeDescription("Qr") + input_ = convert(Tensor{Any}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + if full_matrices !== nothing + desc["full_matrices"] = Base.Bool(full_matrices) end - out end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end function qr_eager(input_; name=nothing, full_matrices=nothing) desc = tf.EagerOp("Qr") input_ = convert(tf.EagerTensor, input_) @@ -17261,13 +17261,13 @@ begin return res end end - function qr(input_; name=nothing, full_matrices=nothing) - if tf.in_eager_mode() - qr_eager(input_; name=name, full_matrices=full_matrices) - else - qr_graph(input_; name=name, full_matrices=full_matrices) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function qr(input_; name=nothing, full_matrices=nothing) + if tf.in_eager_mode() + qr_eager(input_; name=name, full_matrices=full_matrices) + else + qr_graph(input_; name=name, full_matrices=full_matrices) + end end - end end @@ -17277,36 +17277,36 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function boosted_trees_calculate_best_gains_per_feature_graph(node_id_range_, stats_summary_list_, l1_, l2_, tree_complexity_, min_node_weight_; name=nothing, max_splits=nothing, num_features=nothing) - local desc - tf.with_op_name(name, "BoostedTreesCalculateBestGainsPerFeature") do - desc = tf.NodeDescription("BoostedTreesCalculateBestGainsPerFeature") - node_id_range_ = convert(Tensor{Int32}, node_id_range_) - stats_summary_list_ = [convert(Tensor{Float32}, x) for x = stats_summary_list_] - l1_ = convert(Tensor{Float32}, l1_) - l2_ = convert(Tensor{Float32}, l2_) - tree_complexity_ = convert(Tensor{Float32}, tree_complexity_) - min_node_weight_ = convert(Tensor{Float32}, min_node_weight_) - tf.add_input(desc, node_id_range_) - tf.add_input(desc, stats_summary_list_) - tf.add_input(desc, l1_) - tf.add_input(desc, l2_) - tf.add_input(desc, tree_complexity_) - tf.add_input(desc, min_node_weight_) - if max_splits !== nothing - desc["max_splits"] = Base.Int(max_splits) - end - if num_features !== nothing - desc["num_features"] = Base.Int(num_features) - end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:5 - push!(out, tf.Tensor(op, out_idx)) - end - out + function boosted_trees_calculate_best_gains_per_feature_graph(node_id_range_, stats_summary_list_, l1_, l2_, tree_complexity_, min_node_weight_; name=nothing, max_splits=nothing, num_features=nothing) + local desc + tf.with_op_name(name, "BoostedTreesCalculateBestGainsPerFeature") do + desc = tf.NodeDescription("BoostedTreesCalculateBestGainsPerFeature") + node_id_range_ = convert(Tensor{Int32}, node_id_range_) + stats_summary_list_ = [convert(Tensor{Float32}, x) for x = stats_summary_list_] + l1_ = convert(Tensor{Float32}, l1_) + l2_ = convert(Tensor{Float32}, l2_) + tree_complexity_ = convert(Tensor{Float32}, tree_complexity_) + min_node_weight_ = convert(Tensor{Float32}, min_node_weight_) + tf.add_input(desc, node_id_range_) + tf.add_input(desc, stats_summary_list_) + tf.add_input(desc, l1_) + tf.add_input(desc, l2_) + tf.add_input(desc, tree_complexity_) + tf.add_input(desc, min_node_weight_) + if max_splits !== nothing + desc["max_splits"] = Base.Int(max_splits) + end + if num_features !== nothing + desc["num_features"] = Base.Int(num_features) + end end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:5 + push!(out, tf.Tensor(op, out_idx)) + end + out + end function boosted_trees_calculate_best_gains_per_feature_eager(node_id_range_, stats_summary_list_, l1_, l2_, tree_complexity_, min_node_weight_; name=nothing, max_splits=nothing, num_features=nothing) desc = tf.EagerOp("BoostedTreesCalculateBestGainsPerFeature") node_id_range_ = convert(tf.EagerTensor, node_id_range_) @@ -17334,13 +17334,13 @@ begin return res end end - function boosted_trees_calculate_best_gains_per_feature(node_id_range_, stats_summary_list_, l1_, l2_, tree_complexity_, min_node_weight_; name=nothing, max_splits=nothing, num_features=nothing) - if tf.in_eager_mode() - boosted_trees_calculate_best_gains_per_feature_eager(node_id_range_, stats_summary_list_, l1_, l2_, tree_complexity_, min_node_weight_; name=name, max_splits=max_splits, num_features=num_features) - else - boosted_trees_calculate_best_gains_per_feature_graph(node_id_range_, stats_summary_list_, l1_, l2_, tree_complexity_, min_node_weight_; name=name, max_splits=max_splits, num_features=num_features) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function boosted_trees_calculate_best_gains_per_feature(node_id_range_, stats_summary_list_, l1_, l2_, tree_complexity_, min_node_weight_; name=nothing, max_splits=nothing, num_features=nothing) + if tf.in_eager_mode() + boosted_trees_calculate_best_gains_per_feature_eager(node_id_range_, stats_summary_list_, l1_, l2_, tree_complexity_, min_node_weight_; name=name, max_splits=max_splits, num_features=num_features) + else + boosted_trees_calculate_best_gains_per_feature_graph(node_id_range_, stats_summary_list_, l1_, l2_, tree_complexity_, min_node_weight_; name=name, max_splits=max_splits, num_features=num_features) + end end - end end @@ -17350,28 +17350,28 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function unbatch_grad_graph(original_input_, batch_index_, grad_, id_; name=nothing, container=nothing, shared_name=nothing) - local desc - tf.with_op_name(name, "UnbatchGrad") do - desc = tf.NodeDescription("UnbatchGrad") - original_input_ = convert(Tensor{Any}, original_input_) - batch_index_ = convert(Tensor{Int64}, batch_index_) - grad_ = convert(Tensor{Any}, grad_) - id_ = convert(Tensor{Int64}, id_) - (original_input_, grad_) = tf.tf_promote(original_input_, grad_) - tf.add_input(desc, original_input_) - tf.add_input(desc, batch_index_) - tf.add_input(desc, grad_) - tf.add_input(desc, id_) - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end + function unbatch_grad_graph(original_input_, batch_index_, grad_, id_; name=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "UnbatchGrad") do + desc = tf.NodeDescription("UnbatchGrad") + original_input_ = convert(Tensor{Any}, original_input_) + batch_index_ = convert(Tensor{Int64}, batch_index_) + grad_ = convert(Tensor{Any}, grad_) + id_ = convert(Tensor{Int64}, id_) + (original_input_, grad_) = tf.tf_promote(original_input_, grad_) + tf.add_input(desc, original_input_) + tf.add_input(desc, batch_index_) + tf.add_input(desc, grad_) + tf.add_input(desc, id_) + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function unbatch_grad_eager(original_input_, batch_index_, grad_, id_; name=nothing, container=nothing, shared_name=nothing) desc = tf.EagerOp("UnbatchGrad") original_input_ = convert(tf.EagerTensor, original_input_) @@ -17397,13 +17397,13 @@ begin return res[1] end end - function unbatch_grad(original_input_, batch_index_, grad_, id_; name=nothing, container=nothing, shared_name=nothing) - if tf.in_eager_mode() - unbatch_grad_eager(original_input_, batch_index_, grad_, id_; name=name, container=container, shared_name=shared_name) - else - unbatch_grad_graph(original_input_, batch_index_, grad_, id_; name=name, container=container, shared_name=shared_name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function unbatch_grad(original_input_, batch_index_, grad_, id_; name=nothing, container=nothing, shared_name=nothing) + if tf.in_eager_mode() + unbatch_grad_eager(original_input_, batch_index_, grad_, id_; name=name, container=container, shared_name=shared_name) + else + unbatch_grad_graph(original_input_, batch_index_, grad_, id_; name=name, container=container, shared_name=shared_name) + end end - end end @@ -17413,16 +17413,16 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function log_softmax_graph(logits_; name=nothing) - local desc - tf.with_op_name(name, "LogSoftmax") do - desc = tf.NodeDescription("LogSoftmax") - logits_ = convert(Tensor{Any}, logits_) - (logits_,) = tf.tf_promote(logits_) - tf.add_input(desc, logits_) - end - tf.Tensor(tf.Operation(desc)) + function log_softmax_graph(logits_; name=nothing) + local desc + tf.with_op_name(name, "LogSoftmax") do + desc = tf.NodeDescription("LogSoftmax") + logits_ = convert(Tensor{Any}, logits_) + (logits_,) = tf.tf_promote(logits_) + tf.add_input(desc, logits_) end + tf.Tensor(tf.Operation(desc)) + end function log_softmax_eager(logits_; name=nothing) desc = tf.EagerOp("LogSoftmax") logits_ = convert(tf.EagerTensor, logits_) @@ -17435,13 +17435,13 @@ begin return res[1] end end - function log_softmax(logits_; name=nothing) - if tf.in_eager_mode() - log_softmax_eager(logits_; name=name) - else - log_softmax_graph(logits_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function log_softmax(logits_; name=nothing) + if tf.in_eager_mode() + log_softmax_eager(logits_; name=name) + else + log_softmax_graph(logits_; name=name) + end end - end end @@ -17451,18 +17451,18 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function resource_count_up_to_graph(resource_; name=nothing, limit=nothing) - local desc - tf.with_op_name(name, "ResourceCountUpTo") do - desc = tf.NodeDescription("ResourceCountUpTo") - resource_ = convert(Tensor{Any}, resource_) - tf.add_input(desc, resource_) - if limit !== nothing - desc["limit"] = Base.Int(limit) - end + function resource_count_up_to_graph(resource_; name=nothing, limit=nothing) + local desc + tf.with_op_name(name, "ResourceCountUpTo") do + desc = tf.NodeDescription("ResourceCountUpTo") + resource_ = convert(Tensor{Any}, resource_) + tf.add_input(desc, resource_) + if limit !== nothing + desc["limit"] = Base.Int(limit) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function resource_count_up_to_eager(resource_; name=nothing, limit=nothing) desc = tf.EagerOp("ResourceCountUpTo") resource_ = convert(tf.EagerTensor, resource_) @@ -17477,13 +17477,13 @@ begin return res[1] end end - function resource_count_up_to(resource_; name=nothing, limit=nothing) - if tf.in_eager_mode() - resource_count_up_to_eager(resource_; name=name, limit=limit) - else - resource_count_up_to_graph(resource_; name=name, limit=limit) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_count_up_to(resource_; name=nothing, limit=nothing) + if tf.in_eager_mode() + resource_count_up_to_eager(resource_; name=name, limit=limit) + else + resource_count_up_to_graph(resource_; name=name, limit=limit) + end end - end end @@ -17493,22 +17493,22 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function accumulate_nv2_graph(inputs_; name=nothing, N=nothing, shape=nothing) - local desc - tf.with_op_name(name, "AccumulateNV2") do - desc = tf.NodeDescription("AccumulateNV2") - inputs_ = [convert(Tensor{Any}, x) for x = inputs_] - (inputs_,) = tf.tf_promote(inputs_) - tf.add_input(desc, inputs_) - if N !== nothing - desc["N"] = Base.Int(N) - end - if shape !== nothing - desc["shape"] = Base.identity(shape) - end + function accumulate_nv2_graph(inputs_; name=nothing, N=nothing, shape=nothing) + local desc + tf.with_op_name(name, "AccumulateNV2") do + desc = tf.NodeDescription("AccumulateNV2") + inputs_ = [convert(Tensor{Any}, x) for x = inputs_] + (inputs_,) = tf.tf_promote(inputs_) + tf.add_input(desc, inputs_) + if N !== nothing + desc["N"] = Base.Int(N) + end + if shape !== nothing + desc["shape"] = Base.identity(shape) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function accumulate_nv2_eager(inputs_; name=nothing, N=nothing, shape=nothing) desc = tf.EagerOp("AccumulateNV2") inputs_ = convert(tf.EagerTensor, inputs_) @@ -17527,13 +17527,13 @@ begin return res[1] end end - function accumulate_nv2(inputs_; name=nothing, N=nothing, shape=nothing) - if tf.in_eager_mode() - accumulate_nv2_eager(inputs_; name=name, N=N, shape=shape) - else - accumulate_nv2_graph(inputs_; name=name, N=N, shape=shape) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function accumulate_nv2(inputs_; name=nothing, N=nothing, shape=nothing) + if tf.in_eager_mode() + accumulate_nv2_eager(inputs_; name=name, N=N, shape=shape) + else + accumulate_nv2_graph(inputs_; name=name, N=N, shape=shape) + end end - end end @@ -17543,40 +17543,40 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function parallel_map_dataset_graph(input_dataset_, other_arguments_, num_parallel_calls_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing, sloppy=nothing, preserve_cardinality=nothing) - local desc - tf.with_op_name(name, "ParallelMapDataset") do - desc = tf.NodeDescription("ParallelMapDataset") - input_dataset_ = convert(Tensor{Any}, input_dataset_) - other_arguments_ = [convert(Tensor{Any}, x) for x = other_arguments_] - num_parallel_calls_ = convert(Tensor{Int32}, num_parallel_calls_) - tf.add_input(desc, input_dataset_) - tf.add_input(desc, other_arguments_) - tf.add_input(desc, num_parallel_calls_) - if f !== nothing - desc["f"] = Base.identity(f) - end - if Targuments !== nothing - desc["Targuments"] = map(Base.identity, Targuments) - end - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - if use_inter_op_parallelism !== nothing - desc["use_inter_op_parallelism"] = Base.Bool(use_inter_op_parallelism) - end - if sloppy !== nothing - desc["sloppy"] = Base.Bool(sloppy) - end - if preserve_cardinality !== nothing - desc["preserve_cardinality"] = Base.Bool(preserve_cardinality) - end - end - tf.Tensor(tf.Operation(desc)) + function parallel_map_dataset_graph(input_dataset_, other_arguments_, num_parallel_calls_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing, sloppy=nothing, preserve_cardinality=nothing) + local desc + tf.with_op_name(name, "ParallelMapDataset") do + desc = tf.NodeDescription("ParallelMapDataset") + input_dataset_ = convert(Tensor{Any}, input_dataset_) + other_arguments_ = [convert(Tensor{Any}, x) for x = other_arguments_] + num_parallel_calls_ = convert(Tensor{Int32}, num_parallel_calls_) + tf.add_input(desc, input_dataset_) + tf.add_input(desc, other_arguments_) + tf.add_input(desc, num_parallel_calls_) + if f !== nothing + desc["f"] = Base.identity(f) + end + if Targuments !== nothing + desc["Targuments"] = map(Base.identity, Targuments) + end + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + if use_inter_op_parallelism !== nothing + desc["use_inter_op_parallelism"] = Base.Bool(use_inter_op_parallelism) + end + if sloppy !== nothing + desc["sloppy"] = Base.Bool(sloppy) + end + if preserve_cardinality !== nothing + desc["preserve_cardinality"] = Base.Bool(preserve_cardinality) + end end + tf.Tensor(tf.Operation(desc)) + end function parallel_map_dataset_eager(input_dataset_, other_arguments_, num_parallel_calls_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing, sloppy=nothing, preserve_cardinality=nothing) desc = tf.EagerOp("ParallelMapDataset") input_dataset_ = convert(tf.EagerTensor, input_dataset_) @@ -17613,13 +17613,13 @@ begin return res[1] end end - function parallel_map_dataset(input_dataset_, other_arguments_, num_parallel_calls_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing, sloppy=nothing, preserve_cardinality=nothing) - if tf.in_eager_mode() - parallel_map_dataset_eager(input_dataset_, other_arguments_, num_parallel_calls_; name=name, f=f, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes, use_inter_op_parallelism=use_inter_op_parallelism, sloppy=sloppy, preserve_cardinality=preserve_cardinality) - else - parallel_map_dataset_graph(input_dataset_, other_arguments_, num_parallel_calls_; name=name, f=f, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes, use_inter_op_parallelism=use_inter_op_parallelism, sloppy=sloppy, preserve_cardinality=preserve_cardinality) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function parallel_map_dataset(input_dataset_, other_arguments_, num_parallel_calls_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing, sloppy=nothing, preserve_cardinality=nothing) + if tf.in_eager_mode() + parallel_map_dataset_eager(input_dataset_, other_arguments_, num_parallel_calls_; name=name, f=f, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes, use_inter_op_parallelism=use_inter_op_parallelism, sloppy=sloppy, preserve_cardinality=preserve_cardinality) + else + parallel_map_dataset_graph(input_dataset_, other_arguments_, num_parallel_calls_; name=name, f=f, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes, use_inter_op_parallelism=use_inter_op_parallelism, sloppy=sloppy, preserve_cardinality=preserve_cardinality) + end end - end end @@ -17629,25 +17629,25 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function random_uniform_graph(shape_; name=nothing, seed=nothing, seed2=nothing, dtype=nothing) - local desc - tf.with_op_name(name, "RandomUniform") do - desc = tf.NodeDescription("RandomUniform") - shape_ = convert(Tensor{Any}, shape_) - (shape_,) = tf.tf_promote(shape_) - tf.add_input(desc, shape_) - if seed !== nothing - desc["seed"] = Base.Int(seed) - end - if seed2 !== nothing - desc["seed2"] = Base.Int(seed2) - end - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end + function random_uniform_graph(shape_; name=nothing, seed=nothing, seed2=nothing, dtype=nothing) + local desc + tf.with_op_name(name, "RandomUniform") do + desc = tf.NodeDescription("RandomUniform") + shape_ = convert(Tensor{Any}, shape_) + (shape_,) = tf.tf_promote(shape_) + tf.add_input(desc, shape_) + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function random_uniform_eager(shape_; name=nothing, seed=nothing, seed2=nothing, dtype=nothing) desc = tf.EagerOp("RandomUniform") shape_ = convert(tf.EagerTensor, shape_) @@ -17669,46 +17669,46 @@ begin return res[1] end end - function random_uniform(shape_; name=nothing, seed=nothing, seed2=nothing, dtype=nothing) - if tf.in_eager_mode() - random_uniform_eager(shape_; name=name, seed=seed, seed2=seed2, dtype=dtype) - else - random_uniform_graph(shape_; name=name, seed=seed, seed2=seed2, dtype=dtype) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function random_uniform(shape_; name=nothing, seed=nothing, seed2=nothing, dtype=nothing) + if tf.in_eager_mode() + random_uniform_eager(shape_; name=name, seed=seed, seed2=seed2, dtype=dtype) + else + random_uniform_graph(shape_; name=name, seed=seed, seed2=seed2, dtype=dtype) + end end - end end """ - unicode_transcode(input; errors=replace, replacement_char=65533, replace_control_characters=false) + unicode_transcode(input; errors=, replacement_char=65533, replace_control_characters=false) """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function unicode_transcode_graph(input_; name=nothing, input_encoding=nothing, output_encoding=nothing, errors=nothing, replacement_char=nothing, replace_control_characters=nothing) - local desc - tf.with_op_name(name, "UnicodeTranscode") do - desc = tf.NodeDescription("UnicodeTranscode") - input_ = convert(Tensor{String}, input_) - tf.add_input(desc, input_) - if input_encoding !== nothing - desc["input_encoding"] = Base.String(input_encoding) - end - if output_encoding !== nothing - desc["output_encoding"] = Base.String(output_encoding) - end - if errors !== nothing - desc["errors"] = Base.String(errors) - end - if replacement_char !== nothing - desc["replacement_char"] = Base.Int(replacement_char) - end - if replace_control_characters !== nothing - desc["replace_control_characters"] = Base.Bool(replace_control_characters) - end + function unicode_transcode_graph(input_; name=nothing, input_encoding=nothing, output_encoding=nothing, errors=nothing, replacement_char=nothing, replace_control_characters=nothing) + local desc + tf.with_op_name(name, "UnicodeTranscode") do + desc = tf.NodeDescription("UnicodeTranscode") + input_ = convert(Tensor{String}, input_) + tf.add_input(desc, input_) + if input_encoding !== nothing + desc["input_encoding"] = Base.String(input_encoding) + end + if output_encoding !== nothing + desc["output_encoding"] = Base.String(output_encoding) + end + if errors !== nothing + desc["errors"] = Base.String(errors) + end + if replacement_char !== nothing + desc["replacement_char"] = Base.Int(replacement_char) + end + if replace_control_characters !== nothing + desc["replace_control_characters"] = Base.Bool(replace_control_characters) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function unicode_transcode_eager(input_; name=nothing, input_encoding=nothing, output_encoding=nothing, errors=nothing, replacement_char=nothing, replace_control_characters=nothing) desc = tf.EagerOp("UnicodeTranscode") input_ = convert(tf.EagerTensor, input_) @@ -17735,13 +17735,13 @@ begin return res[1] end end - function unicode_transcode(input_; name=nothing, input_encoding=nothing, output_encoding=nothing, errors=nothing, replacement_char=nothing, replace_control_characters=nothing) - if tf.in_eager_mode() - unicode_transcode_eager(input_; name=name, input_encoding=input_encoding, output_encoding=output_encoding, errors=errors, replacement_char=replacement_char, replace_control_characters=replace_control_characters) - else - unicode_transcode_graph(input_; name=name, input_encoding=input_encoding, output_encoding=output_encoding, errors=errors, replacement_char=replacement_char, replace_control_characters=replace_control_characters) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function unicode_transcode(input_; name=nothing, input_encoding=nothing, output_encoding=nothing, errors=nothing, replacement_char=nothing, replace_control_characters=nothing) + if tf.in_eager_mode() + unicode_transcode_eager(input_; name=name, input_encoding=input_encoding, output_encoding=output_encoding, errors=errors, replacement_char=replacement_char, replace_control_characters=replace_control_characters) + else + unicode_transcode_graph(input_; name=name, input_encoding=input_encoding, output_encoding=output_encoding, errors=errors, replacement_char=replacement_char, replace_control_characters=replace_control_characters) + end end - end end @@ -17751,15 +17751,15 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function reader_reset_graph(reader_handle_; name=nothing) - local desc - tf.with_op_name(name, "ReaderReset") do - desc = tf.NodeDescription("ReaderReset") - reader_handle_ = convert(Tensor{String}, reader_handle_) - tf.add_input(desc, reader_handle_) - end - tf.Tensor(tf.Operation(desc)) + function reader_reset_graph(reader_handle_; name=nothing) + local desc + tf.with_op_name(name, "ReaderReset") do + desc = tf.NodeDescription("ReaderReset") + reader_handle_ = convert(Tensor{String}, reader_handle_) + tf.add_input(desc, reader_handle_) end + tf.Tensor(tf.Operation(desc)) + end function reader_reset_eager(reader_handle_; name=nothing) desc = tf.EagerOp("ReaderReset") reader_handle_ = convert(tf.EagerTensor, reader_handle_) @@ -17771,13 +17771,13 @@ begin return res[1] end end - function reader_reset(reader_handle_; name=nothing) - if tf.in_eager_mode() - reader_reset_eager(reader_handle_; name=name) - else - reader_reset_graph(reader_handle_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function reader_reset(reader_handle_; name=nothing) + if tf.in_eager_mode() + reader_reset_eager(reader_handle_; name=name) + else + reader_reset_graph(reader_handle_; name=name) + end end - end end @@ -17787,22 +17787,22 @@ end Replacement node for NcclBroadcast. """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function _nccl_broadcast_send_graph(input_; name=nothing, num_devices=nothing, shared_name=nothing) - local desc - tf.with_op_name(name, "_NcclBroadcastSend") do - desc = tf.NodeDescription("_NcclBroadcastSend") - input_ = convert(Tensor{Any}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - if num_devices !== nothing - desc["num_devices"] = Base.Int(num_devices) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end + function _nccl_broadcast_send_graph(input_; name=nothing, num_devices=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "_NcclBroadcastSend") do + desc = tf.NodeDescription("_NcclBroadcastSend") + input_ = convert(Tensor{Any}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + if num_devices !== nothing + desc["num_devices"] = Base.Int(num_devices) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function _nccl_broadcast_send_eager(input_; name=nothing, num_devices=nothing, shared_name=nothing) desc = tf.EagerOp("_NcclBroadcastSend") input_ = convert(tf.EagerTensor, input_) @@ -17821,13 +17821,13 @@ begin return res[1] end end - function _nccl_broadcast_send(input_; name=nothing, num_devices=nothing, shared_name=nothing) - if tf.in_eager_mode() - _nccl_broadcast_send_eager(input_; name=name, num_devices=num_devices, shared_name=shared_name) - else - _nccl_broadcast_send_graph(input_; name=name, num_devices=num_devices, shared_name=shared_name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _nccl_broadcast_send(input_; name=nothing, num_devices=nothing, shared_name=nothing) + if tf.in_eager_mode() + _nccl_broadcast_send_eager(input_; name=name, num_devices=num_devices, shared_name=shared_name) + else + _nccl_broadcast_send_graph(input_; name=name, num_devices=num_devices, shared_name=shared_name) + end end - end end @@ -17837,16 +17837,16 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function batch_matrix_determinant_graph(input_; name=nothing) - local desc - tf.with_op_name(name, "BatchMatrixDeterminant") do - desc = tf.NodeDescription("BatchMatrixDeterminant") - input_ = convert(Tensor{Any}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - end - tf.Tensor(tf.Operation(desc)) + function batch_matrix_determinant_graph(input_; name=nothing) + local desc + tf.with_op_name(name, "BatchMatrixDeterminant") do + desc = tf.NodeDescription("BatchMatrixDeterminant") + input_ = convert(Tensor{Any}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) end + tf.Tensor(tf.Operation(desc)) + end function batch_matrix_determinant_eager(input_; name=nothing) desc = tf.EagerOp("BatchMatrixDeterminant") input_ = convert(tf.EagerTensor, input_) @@ -17859,13 +17859,13 @@ begin return res[1] end end - function batch_matrix_determinant(input_; name=nothing) - if tf.in_eager_mode() - batch_matrix_determinant_eager(input_; name=name) - else - batch_matrix_determinant_graph(input_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function batch_matrix_determinant(input_; name=nothing) + if tf.in_eager_mode() + batch_matrix_determinant_eager(input_; name=name) + else + batch_matrix_determinant_graph(input_; name=name) + end end - end end @@ -17875,18 +17875,18 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function less_equal_graph(x_, y_; name=nothing) - local desc - tf.with_op_name(name, "LessEqual") do - desc = tf.NodeDescription("LessEqual") - x_ = convert(Tensor{Any}, x_) - y_ = convert(Tensor{Any}, y_) - (x_, y_) = tf.tf_promote(x_, y_) - tf.add_input(desc, x_) - tf.add_input(desc, y_) - end - tf.Tensor(tf.Operation(desc)) + function less_equal_graph(x_, y_; name=nothing) + local desc + tf.with_op_name(name, "LessEqual") do + desc = tf.NodeDescription("LessEqual") + x_ = convert(Tensor{Any}, x_) + y_ = convert(Tensor{Any}, y_) + (x_, y_) = tf.tf_promote(x_, y_) + tf.add_input(desc, x_) + tf.add_input(desc, y_) end + tf.Tensor(tf.Operation(desc)) + end function less_equal_eager(x_, y_; name=nothing) desc = tf.EagerOp("LessEqual") x_ = convert(tf.EagerTensor, x_) @@ -17902,13 +17902,13 @@ begin return res[1] end end - function less_equal(x_, y_; name=nothing) - if tf.in_eager_mode() - less_equal_eager(x_, y_; name=name) - else - less_equal_graph(x_, y_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function less_equal(x_, y_; name=nothing) + if tf.in_eager_mode() + less_equal_eager(x_, y_; name=name) + else + less_equal_graph(x_, y_; name=name) + end end - end end @@ -17918,23 +17918,23 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function apply_gradient_descent_graph(var_, alpha_, delta_; name=nothing, use_locking=nothing) - local desc - tf.with_op_name(name, "ApplyGradientDescent") do - desc = tf.NodeDescription("ApplyGradientDescent") - var_ = convert(Tensor{Any}, var_) - alpha_ = convert(Tensor{Any}, alpha_) - delta_ = convert(Tensor{Any}, delta_) - (var_, alpha_, delta_) = tf.tf_promote(var_, alpha_, delta_) - tf.add_input(desc, var_) - tf.add_input(desc, alpha_) - tf.add_input(desc, delta_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end + function apply_gradient_descent_graph(var_, alpha_, delta_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "ApplyGradientDescent") do + desc = tf.NodeDescription("ApplyGradientDescent") + var_ = convert(Tensor{Any}, var_) + alpha_ = convert(Tensor{Any}, alpha_) + delta_ = convert(Tensor{Any}, delta_) + (var_, alpha_, delta_) = tf.tf_promote(var_, alpha_, delta_) + tf.add_input(desc, var_) + tf.add_input(desc, alpha_) + tf.add_input(desc, delta_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function apply_gradient_descent_eager(var_, alpha_, delta_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ApplyGradientDescent") var_ = convert(tf.EagerTensor, var_) @@ -17956,13 +17956,13 @@ begin return res[1] end end - function apply_gradient_descent(var_, alpha_, delta_; name=nothing, use_locking=nothing) - if tf.in_eager_mode() - apply_gradient_descent_eager(var_, alpha_, delta_; name=name, use_locking=use_locking) - else - apply_gradient_descent_graph(var_, alpha_, delta_; name=name, use_locking=use_locking) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function apply_gradient_descent(var_, alpha_, delta_; name=nothing, use_locking=nothing) + if tf.in_eager_mode() + apply_gradient_descent_eager(var_, alpha_, delta_; name=name, use_locking=use_locking) + else + apply_gradient_descent_graph(var_, alpha_, delta_; name=name, use_locking=use_locking) + end end - end end @@ -17972,22 +17972,22 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function sparse_segment_sqrt_n_graph(data_, indices_, segment_ids_; name=nothing) - local desc - tf.with_op_name(name, "SparseSegmentSqrtN") do - desc = tf.NodeDescription("SparseSegmentSqrtN") - data_ = convert(Tensor{Any}, data_) - indices_ = convert(Tensor{Int32}, indices_) - indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) - segment_ids_ = convert(Tensor{Int32}, segment_ids_) - (data_,) = tf.tf_promote(data_) - (indices_,) = tf.tf_promote(indices_) - tf.add_input(desc, data_) - tf.add_input(desc, indices_) - tf.add_input(desc, segment_ids_) - end - tf.Tensor(tf.Operation(desc)) + function sparse_segment_sqrt_n_graph(data_, indices_, segment_ids_; name=nothing) + local desc + tf.with_op_name(name, "SparseSegmentSqrtN") do + desc = tf.NodeDescription("SparseSegmentSqrtN") + data_ = convert(Tensor{Any}, data_) + indices_ = convert(Tensor{Int32}, indices_) + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + segment_ids_ = convert(Tensor{Int32}, segment_ids_) + (data_,) = tf.tf_promote(data_) + (indices_,) = tf.tf_promote(indices_) + tf.add_input(desc, data_) + tf.add_input(desc, indices_) + tf.add_input(desc, segment_ids_) end + tf.Tensor(tf.Operation(desc)) + end function sparse_segment_sqrt_n_eager(data_, indices_, segment_ids_; name=nothing) desc = tf.EagerOp("SparseSegmentSqrtN") data_ = convert(tf.EagerTensor, data_) @@ -18005,13 +18005,13 @@ begin return res[1] end end - function sparse_segment_sqrt_n(data_, indices_, segment_ids_; name=nothing) - if tf.in_eager_mode() - sparse_segment_sqrt_n_eager(data_, indices_, segment_ids_; name=name) - else - sparse_segment_sqrt_n_graph(data_, indices_, segment_ids_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_segment_sqrt_n(data_, indices_, segment_ids_; name=nothing) + if tf.in_eager_mode() + sparse_segment_sqrt_n_eager(data_, indices_, segment_ids_; name=name) + else + sparse_segment_sqrt_n_graph(data_, indices_, segment_ids_; name=name) + end end - end end @@ -18021,16 +18021,16 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function matrix_logarithm_graph(input_; name=nothing) - local desc - tf.with_op_name(name, "MatrixLogarithm") do - desc = tf.NodeDescription("MatrixLogarithm") - input_ = convert(Tensor{Any}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - end - tf.Tensor(tf.Operation(desc)) + function matrix_logarithm_graph(input_; name=nothing) + local desc + tf.with_op_name(name, "MatrixLogarithm") do + desc = tf.NodeDescription("MatrixLogarithm") + input_ = convert(Tensor{Any}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) end + tf.Tensor(tf.Operation(desc)) + end function matrix_logarithm_eager(input_; name=nothing) desc = tf.EagerOp("MatrixLogarithm") input_ = convert(tf.EagerTensor, input_) @@ -18043,13 +18043,13 @@ begin return res[1] end end - function matrix_logarithm(input_; name=nothing) - if tf.in_eager_mode() - matrix_logarithm_eager(input_; name=name) - else - matrix_logarithm_graph(input_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function matrix_logarithm(input_; name=nothing) + if tf.in_eager_mode() + matrix_logarithm_eager(input_; name=name) + else + matrix_logarithm_graph(input_; name=name) + end end - end end @@ -18059,25 +18059,25 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function scatter_mul_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) - local desc - tf.with_op_name(name, "ScatterMul") do - desc = tf.NodeDescription("ScatterMul") - ref_ = convert(Tensor{Any}, ref_) - indices_ = convert(Tensor{Any}, indices_) - indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) - updates_ = convert(Tensor{Any}, updates_) - (ref_, updates_) = tf.tf_promote(ref_, updates_) - (indices_,) = tf.tf_promote(indices_) - tf.add_input(desc, ref_) - tf.add_input(desc, indices_) - tf.add_input(desc, updates_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end + function scatter_mul_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "ScatterMul") do + desc = tf.NodeDescription("ScatterMul") + ref_ = convert(Tensor{Any}, ref_) + indices_ = convert(Tensor{Any}, indices_) + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + updates_ = convert(Tensor{Any}, updates_) + (ref_, updates_) = tf.tf_promote(ref_, updates_) + (indices_,) = tf.tf_promote(indices_) + tf.add_input(desc, ref_) + tf.add_input(desc, indices_) + tf.add_input(desc, updates_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function scatter_mul_eager(ref_, indices_, updates_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ScatterMul") ref_ = convert(tf.EagerTensor, ref_) @@ -18099,13 +18099,13 @@ begin return res[1] end end - function scatter_mul(ref_, indices_, updates_; name=nothing, use_locking=nothing) - if tf.in_eager_mode() - scatter_mul_eager(ref_, indices_, updates_; name=name, use_locking=use_locking) - else - scatter_mul_graph(ref_, indices_, updates_; name=name, use_locking=use_locking) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function scatter_mul(ref_, indices_, updates_; name=nothing, use_locking=nothing) + if tf.in_eager_mode() + scatter_mul_eager(ref_, indices_, updates_; name=name, use_locking=use_locking) + else + scatter_mul_graph(ref_, indices_, updates_; name=name, use_locking=use_locking) + end end - end end @@ -18115,33 +18115,33 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function decode_jpeg_graph(contents_; name=nothing, channels=nothing, ratio=nothing, fancy_upscaling=nothing, try_recover_truncated=nothing, acceptable_fraction=nothing, dct_method=nothing) - local desc - tf.with_op_name(name, "DecodeJpeg") do - desc = tf.NodeDescription("DecodeJpeg") - contents_ = convert(Tensor{String}, contents_) - tf.add_input(desc, contents_) - if channels !== nothing - desc["channels"] = Base.Int(channels) - end - if ratio !== nothing - desc["ratio"] = Base.Int(ratio) - end - if fancy_upscaling !== nothing - desc["fancy_upscaling"] = Base.Bool(fancy_upscaling) - end - if try_recover_truncated !== nothing - desc["try_recover_truncated"] = Base.Bool(try_recover_truncated) - end - if acceptable_fraction !== nothing - desc["acceptable_fraction"] = Base.identity(acceptable_fraction) - end - if dct_method !== nothing - desc["dct_method"] = Base.String(dct_method) - end - end - tf.Tensor(tf.Operation(desc)) + function decode_jpeg_graph(contents_; name=nothing, channels=nothing, ratio=nothing, fancy_upscaling=nothing, try_recover_truncated=nothing, acceptable_fraction=nothing, dct_method=nothing) + local desc + tf.with_op_name(name, "DecodeJpeg") do + desc = tf.NodeDescription("DecodeJpeg") + contents_ = convert(Tensor{String}, contents_) + tf.add_input(desc, contents_) + if channels !== nothing + desc["channels"] = Base.Int(channels) + end + if ratio !== nothing + desc["ratio"] = Base.Int(ratio) + end + if fancy_upscaling !== nothing + desc["fancy_upscaling"] = Base.Bool(fancy_upscaling) + end + if try_recover_truncated !== nothing + desc["try_recover_truncated"] = Base.Bool(try_recover_truncated) + end + if acceptable_fraction !== nothing + desc["acceptable_fraction"] = Base.identity(acceptable_fraction) + end + if dct_method !== nothing + desc["dct_method"] = Base.String(dct_method) + end end + tf.Tensor(tf.Operation(desc)) + end function decode_jpeg_eager(contents_; name=nothing, channels=nothing, ratio=nothing, fancy_upscaling=nothing, try_recover_truncated=nothing, acceptable_fraction=nothing, dct_method=nothing) desc = tf.EagerOp("DecodeJpeg") contents_ = convert(tf.EagerTensor, contents_) @@ -18171,13 +18171,13 @@ begin return res[1] end end - function decode_jpeg(contents_; name=nothing, channels=nothing, ratio=nothing, fancy_upscaling=nothing, try_recover_truncated=nothing, acceptable_fraction=nothing, dct_method=nothing) - if tf.in_eager_mode() - decode_jpeg_eager(contents_; name=name, channels=channels, ratio=ratio, fancy_upscaling=fancy_upscaling, try_recover_truncated=try_recover_truncated, acceptable_fraction=acceptable_fraction, dct_method=dct_method) - else - decode_jpeg_graph(contents_; name=name, channels=channels, ratio=ratio, fancy_upscaling=fancy_upscaling, try_recover_truncated=try_recover_truncated, acceptable_fraction=acceptable_fraction, dct_method=dct_method) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function decode_jpeg(contents_; name=nothing, channels=nothing, ratio=nothing, fancy_upscaling=nothing, try_recover_truncated=nothing, acceptable_fraction=nothing, dct_method=nothing) + if tf.in_eager_mode() + decode_jpeg_eager(contents_; name=name, channels=channels, ratio=ratio, fancy_upscaling=fancy_upscaling, try_recover_truncated=try_recover_truncated, acceptable_fraction=acceptable_fraction, dct_method=dct_method) + else + decode_jpeg_graph(contents_; name=name, channels=channels, ratio=ratio, fancy_upscaling=fancy_upscaling, try_recover_truncated=try_recover_truncated, acceptable_fraction=acceptable_fraction, dct_method=dct_method) + end end - end end @@ -18187,37 +18187,37 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function random_shuffle_queue_v2_graph(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, min_after_dequeue=nothing, seed=nothing, seed2=nothing, container=nothing, shared_name=nothing) - local desc - tf.with_op_name(name, "RandomShuffleQueueV2") do - desc = tf.NodeDescription("RandomShuffleQueueV2") - if component_types !== nothing - desc["component_types"] = map(Base.identity, component_types) - end - if shapes !== nothing - desc["shapes"] = map(Base.identity, shapes) - end - if capacity !== nothing - desc["capacity"] = Base.Int(capacity) - end - if min_after_dequeue !== nothing - desc["min_after_dequeue"] = Base.Int(min_after_dequeue) - end - if seed !== nothing - desc["seed"] = Base.Int(seed) - end - if seed2 !== nothing - desc["seed2"] = Base.Int(seed2) - end - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - end - tf.Tensor(tf.Operation(desc)) + function random_shuffle_queue_v2_graph(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, min_after_dequeue=nothing, seed=nothing, seed2=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "RandomShuffleQueueV2") do + desc = tf.NodeDescription("RandomShuffleQueueV2") + if component_types !== nothing + desc["component_types"] = map(Base.identity, component_types) + end + if shapes !== nothing + desc["shapes"] = map(Base.identity, shapes) + end + if capacity !== nothing + desc["capacity"] = Base.Int(capacity) + end + if min_after_dequeue !== nothing + desc["min_after_dequeue"] = Base.Int(min_after_dequeue) + end + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end end + tf.Tensor(tf.Operation(desc)) + end function random_shuffle_queue_v2_eager(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, min_after_dequeue=nothing, seed=nothing, seed2=nothing, container=nothing, shared_name=nothing) desc = tf.EagerOp("RandomShuffleQueueV2") if component_types !== nothing @@ -18251,13 +18251,13 @@ begin return res[1] end end - function random_shuffle_queue_v2(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, min_after_dequeue=nothing, seed=nothing, seed2=nothing, container=nothing, shared_name=nothing) - if tf.in_eager_mode() - random_shuffle_queue_v2_eager(; name=name, component_types=component_types, shapes=shapes, capacity=capacity, min_after_dequeue=min_after_dequeue, seed=seed, seed2=seed2, container=container, shared_name=shared_name) - else - random_shuffle_queue_v2_graph(; name=name, component_types=component_types, shapes=shapes, capacity=capacity, min_after_dequeue=min_after_dequeue, seed=seed, seed2=seed2, container=container, shared_name=shared_name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function random_shuffle_queue_v2(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, min_after_dequeue=nothing, seed=nothing, seed2=nothing, container=nothing, shared_name=nothing) + if tf.in_eager_mode() + random_shuffle_queue_v2_eager(; name=name, component_types=component_types, shapes=shapes, capacity=capacity, min_after_dequeue=min_after_dequeue, seed=seed, seed2=seed2, container=container, shared_name=shared_name) + else + random_shuffle_queue_v2_graph(; name=name, component_types=component_types, shapes=shapes, capacity=capacity, min_after_dequeue=min_after_dequeue, seed=seed, seed2=seed2, container=container, shared_name=shared_name) + end end - end end @@ -18267,23 +18267,23 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function queue_enqueue_many_v2_graph(handle_, components_; name=nothing, Tcomponents=nothing, timeout_ms=nothing) - local desc - tf.with_op_name(name, "QueueEnqueueManyV2") do - desc = tf.NodeDescription("QueueEnqueueManyV2") - handle_ = convert(Tensor{Any}, handle_) - components_ = [convert(Tensor{Any}, x) for x = components_] - tf.add_input(desc, handle_) - tf.add_input(desc, components_) - if Tcomponents !== nothing - desc["Tcomponents"] = map(Base.identity, Tcomponents) - end - if timeout_ms !== nothing - desc["timeout_ms"] = Base.Int(timeout_ms) - end + function queue_enqueue_many_v2_graph(handle_, components_; name=nothing, Tcomponents=nothing, timeout_ms=nothing) + local desc + tf.with_op_name(name, "QueueEnqueueManyV2") do + desc = tf.NodeDescription("QueueEnqueueManyV2") + handle_ = convert(Tensor{Any}, handle_) + components_ = [convert(Tensor{Any}, x) for x = components_] + tf.add_input(desc, handle_) + tf.add_input(desc, components_) + if Tcomponents !== nothing + desc["Tcomponents"] = map(Base.identity, Tcomponents) + end + if timeout_ms !== nothing + desc["timeout_ms"] = Base.Int(timeout_ms) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function queue_enqueue_many_v2_eager(handle_, components_; name=nothing, Tcomponents=nothing, timeout_ms=nothing) desc = tf.EagerOp("QueueEnqueueManyV2") handle_ = convert(tf.EagerTensor, handle_) @@ -18303,13 +18303,13 @@ begin return res[1] end end - function queue_enqueue_many_v2(handle_, components_; name=nothing, Tcomponents=nothing, timeout_ms=nothing) - if tf.in_eager_mode() - queue_enqueue_many_v2_eager(handle_, components_; name=name, Tcomponents=Tcomponents, timeout_ms=timeout_ms) - else - queue_enqueue_many_v2_graph(handle_, components_; name=name, Tcomponents=Tcomponents, timeout_ms=timeout_ms) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function queue_enqueue_many_v2(handle_, components_; name=nothing, Tcomponents=nothing, timeout_ms=nothing) + if tf.in_eager_mode() + queue_enqueue_many_v2_eager(handle_, components_; name=name, Tcomponents=Tcomponents, timeout_ms=timeout_ms) + else + queue_enqueue_many_v2_graph(handle_, components_; name=name, Tcomponents=Tcomponents, timeout_ms=timeout_ms) + end end - end end @@ -18319,39 +18319,39 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function resource_sparse_apply_centered_rms_prop_graph(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) - local desc - tf.with_op_name(name, "ResourceSparseApplyCenteredRMSProp") do - desc = tf.NodeDescription("ResourceSparseApplyCenteredRMSProp") - var_ = convert(Tensor{Any}, var_) - mg_ = convert(Tensor{Any}, mg_) - ms_ = convert(Tensor{Any}, ms_) - mom_ = convert(Tensor{Any}, mom_) - lr_ = convert(Tensor{Any}, lr_) - rho_ = convert(Tensor{Any}, rho_) - momentum_ = convert(Tensor{Any}, momentum_) - epsilon_ = convert(Tensor{Any}, epsilon_) - grad_ = convert(Tensor{Any}, grad_) - indices_ = convert(Tensor{Any}, indices_) - indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) - (lr_, rho_, momentum_, epsilon_, grad_) = tf.tf_promote(lr_, rho_, momentum_, epsilon_, grad_) - (indices_,) = tf.tf_promote(indices_) - tf.add_input(desc, var_) - tf.add_input(desc, mg_) - tf.add_input(desc, ms_) - tf.add_input(desc, mom_) - tf.add_input(desc, lr_) - tf.add_input(desc, rho_) - tf.add_input(desc, momentum_) - tf.add_input(desc, epsilon_) - tf.add_input(desc, grad_) - tf.add_input(desc, indices_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - end - tf.Tensor(tf.Operation(desc)) - end + function resource_sparse_apply_centered_rms_prop_graph(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "ResourceSparseApplyCenteredRMSProp") do + desc = tf.NodeDescription("ResourceSparseApplyCenteredRMSProp") + var_ = convert(Tensor{Any}, var_) + mg_ = convert(Tensor{Any}, mg_) + ms_ = convert(Tensor{Any}, ms_) + mom_ = convert(Tensor{Any}, mom_) + lr_ = convert(Tensor{Any}, lr_) + rho_ = convert(Tensor{Any}, rho_) + momentum_ = convert(Tensor{Any}, momentum_) + epsilon_ = convert(Tensor{Any}, epsilon_) + grad_ = convert(Tensor{Any}, grad_) + indices_ = convert(Tensor{Any}, indices_) + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + (lr_, rho_, momentum_, epsilon_, grad_) = tf.tf_promote(lr_, rho_, momentum_, epsilon_, grad_) + (indices_,) = tf.tf_promote(indices_) + tf.add_input(desc, var_) + tf.add_input(desc, mg_) + tf.add_input(desc, ms_) + tf.add_input(desc, mom_) + tf.add_input(desc, lr_) + tf.add_input(desc, rho_) + tf.add_input(desc, momentum_) + tf.add_input(desc, epsilon_) + tf.add_input(desc, grad_) + tf.add_input(desc, indices_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + tf.Tensor(tf.Operation(desc)) + end function resource_sparse_apply_centered_rms_prop_eager(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ResourceSparseApplyCenteredRMSProp") var_ = convert(tf.EagerTensor, var_) @@ -18390,13 +18390,13 @@ begin return res[1] end end - function resource_sparse_apply_centered_rms_prop(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) - if tf.in_eager_mode() - resource_sparse_apply_centered_rms_prop_eager(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=name, use_locking=use_locking) - else - resource_sparse_apply_centered_rms_prop_graph(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=name, use_locking=use_locking) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_sparse_apply_centered_rms_prop(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) + if tf.in_eager_mode() + resource_sparse_apply_centered_rms_prop_eager(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=name, use_locking=use_locking) + else + resource_sparse_apply_centered_rms_prop_graph(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=name, use_locking=use_locking) + end end - end end @@ -18406,33 +18406,33 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function interleave_dataset_graph(input_dataset_, other_arguments_, cycle_length_, block_length_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) - local desc - tf.with_op_name(name, "InterleaveDataset") do - desc = tf.NodeDescription("InterleaveDataset") - input_dataset_ = convert(Tensor{Any}, input_dataset_) - other_arguments_ = [convert(Tensor{Any}, x) for x = other_arguments_] - cycle_length_ = convert(Tensor{Int64}, cycle_length_) - block_length_ = convert(Tensor{Int64}, block_length_) - tf.add_input(desc, input_dataset_) - tf.add_input(desc, other_arguments_) - tf.add_input(desc, cycle_length_) - tf.add_input(desc, block_length_) - if f !== nothing - desc["f"] = Base.identity(f) - end - if Targuments !== nothing - desc["Targuments"] = map(Base.identity, Targuments) - end - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - end - tf.Tensor(tf.Operation(desc)) + function interleave_dataset_graph(input_dataset_, other_arguments_, cycle_length_, block_length_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "InterleaveDataset") do + desc = tf.NodeDescription("InterleaveDataset") + input_dataset_ = convert(Tensor{Any}, input_dataset_) + other_arguments_ = [convert(Tensor{Any}, x) for x = other_arguments_] + cycle_length_ = convert(Tensor{Int64}, cycle_length_) + block_length_ = convert(Tensor{Int64}, block_length_) + tf.add_input(desc, input_dataset_) + tf.add_input(desc, other_arguments_) + tf.add_input(desc, cycle_length_) + tf.add_input(desc, block_length_) + if f !== nothing + desc["f"] = Base.identity(f) + end + if Targuments !== nothing + desc["Targuments"] = map(Base.identity, Targuments) + end + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end end + tf.Tensor(tf.Operation(desc)) + end function interleave_dataset_eager(input_dataset_, other_arguments_, cycle_length_, block_length_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("InterleaveDataset") input_dataset_ = convert(tf.EagerTensor, input_dataset_) @@ -18462,13 +18462,13 @@ begin return res[1] end end - function interleave_dataset(input_dataset_, other_arguments_, cycle_length_, block_length_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) - if tf.in_eager_mode() - interleave_dataset_eager(input_dataset_, other_arguments_, cycle_length_, block_length_; name=name, f=f, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes) - else - interleave_dataset_graph(input_dataset_, other_arguments_, cycle_length_, block_length_; name=name, f=f, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function interleave_dataset(input_dataset_, other_arguments_, cycle_length_, block_length_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) + if tf.in_eager_mode() + interleave_dataset_eager(input_dataset_, other_arguments_, cycle_length_, block_length_; name=name, f=f, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes) + else + interleave_dataset_graph(input_dataset_, other_arguments_, cycle_length_, block_length_; name=name, f=f, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes) + end end - end end @@ -18478,18 +18478,18 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function stack_pop_graph(handle_; name=nothing, elem_type=nothing) - local desc - tf.with_op_name(name, "StackPop") do - desc = tf.NodeDescription("StackPop") - handle_ = convert(Tensor{String}, handle_) - tf.add_input(desc, handle_) - if elem_type !== nothing - desc["elem_type"] = Base.identity(elem_type) - end + function stack_pop_graph(handle_; name=nothing, elem_type=nothing) + local desc + tf.with_op_name(name, "StackPop") do + desc = tf.NodeDescription("StackPop") + handle_ = convert(Tensor{String}, handle_) + tf.add_input(desc, handle_) + if elem_type !== nothing + desc["elem_type"] = Base.identity(elem_type) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function stack_pop_eager(handle_; name=nothing, elem_type=nothing) desc = tf.EagerOp("StackPop") handle_ = convert(tf.EagerTensor, handle_) @@ -18504,13 +18504,13 @@ begin return res[1] end end - function stack_pop(handle_; name=nothing, elem_type=nothing) - if tf.in_eager_mode() - stack_pop_eager(handle_; name=name, elem_type=elem_type) - else - stack_pop_graph(handle_; name=name, elem_type=elem_type) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function stack_pop(handle_; name=nothing, elem_type=nothing) + if tf.in_eager_mode() + stack_pop_eager(handle_; name=name, elem_type=elem_type) + else + stack_pop_graph(handle_; name=name, elem_type=elem_type) + end end - end end @@ -18520,19 +18520,19 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function boosted_trees_deserialize_ensemble_graph(tree_ensemble_handle_, stamp_token_, tree_ensemble_serialized_; name=nothing) - local desc - tf.with_op_name(name, "BoostedTreesDeserializeEnsemble") do - desc = tf.NodeDescription("BoostedTreesDeserializeEnsemble") - tree_ensemble_handle_ = convert(Tensor{Any}, tree_ensemble_handle_) - stamp_token_ = convert(Tensor{Int64}, stamp_token_) - tree_ensemble_serialized_ = convert(Tensor{String}, tree_ensemble_serialized_) - tf.add_input(desc, tree_ensemble_handle_) - tf.add_input(desc, stamp_token_) - tf.add_input(desc, tree_ensemble_serialized_) - end - tf.Tensor(tf.Operation(desc)) + function boosted_trees_deserialize_ensemble_graph(tree_ensemble_handle_, stamp_token_, tree_ensemble_serialized_; name=nothing) + local desc + tf.with_op_name(name, "BoostedTreesDeserializeEnsemble") do + desc = tf.NodeDescription("BoostedTreesDeserializeEnsemble") + tree_ensemble_handle_ = convert(Tensor{Any}, tree_ensemble_handle_) + stamp_token_ = convert(Tensor{Int64}, stamp_token_) + tree_ensemble_serialized_ = convert(Tensor{String}, tree_ensemble_serialized_) + tf.add_input(desc, tree_ensemble_handle_) + tf.add_input(desc, stamp_token_) + tf.add_input(desc, tree_ensemble_serialized_) end + tf.Tensor(tf.Operation(desc)) + end function boosted_trees_deserialize_ensemble_eager(tree_ensemble_handle_, stamp_token_, tree_ensemble_serialized_; name=nothing) desc = tf.EagerOp("BoostedTreesDeserializeEnsemble") tree_ensemble_handle_ = convert(tf.EagerTensor, tree_ensemble_handle_) @@ -18548,42 +18548,42 @@ begin return res[1] end end - function boosted_trees_deserialize_ensemble(tree_ensemble_handle_, stamp_token_, tree_ensemble_serialized_; name=nothing) - if tf.in_eager_mode() - boosted_trees_deserialize_ensemble_eager(tree_ensemble_handle_, stamp_token_, tree_ensemble_serialized_; name=name) - else - boosted_trees_deserialize_ensemble_graph(tree_ensemble_handle_, stamp_token_, tree_ensemble_serialized_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function boosted_trees_deserialize_ensemble(tree_ensemble_handle_, stamp_token_, tree_ensemble_serialized_; name=nothing) + if tf.in_eager_mode() + boosted_trees_deserialize_ensemble_eager(tree_ensemble_handle_, stamp_token_, tree_ensemble_serialized_; name=name) + else + boosted_trees_deserialize_ensemble_graph(tree_ensemble_handle_, stamp_token_, tree_ensemble_serialized_; name=name) + end end - end end """ - max_pool_v2(input, ksize, strides; data_format=NHWC) + max_pool_v2(input, ksize, strides; data_format=) """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function max_pool_v2_graph(input_, ksize_, strides_; name=nothing, padding=nothing, data_format=nothing) - local desc - tf.with_op_name(name, "MaxPoolV2") do - desc = tf.NodeDescription("MaxPoolV2") - input_ = convert(Tensor{Float32}, input_) - ksize_ = convert(Tensor{Int32}, ksize_) - strides_ = convert(Tensor{Int32}, strides_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - tf.add_input(desc, ksize_) - tf.add_input(desc, strides_) - if padding !== nothing - desc["padding"] = Base.String(padding) - end - if data_format !== nothing - desc["data_format"] = Base.String(data_format) - end + function max_pool_v2_graph(input_, ksize_, strides_; name=nothing, padding=nothing, data_format=nothing) + local desc + tf.with_op_name(name, "MaxPoolV2") do + desc = tf.NodeDescription("MaxPoolV2") + input_ = convert(Tensor{Float32}, input_) + ksize_ = convert(Tensor{Int32}, ksize_) + strides_ = convert(Tensor{Int32}, strides_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + tf.add_input(desc, ksize_) + tf.add_input(desc, strides_) + if padding !== nothing + desc["padding"] = Base.String(padding) + end + if data_format !== nothing + desc["data_format"] = Base.String(data_format) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function max_pool_v2_eager(input_, ksize_, strides_; name=nothing, padding=nothing, data_format=nothing) desc = tf.EagerOp("MaxPoolV2") input_ = convert(tf.EagerTensor, input_) @@ -18606,13 +18606,13 @@ begin return res[1] end end - function max_pool_v2(input_, ksize_, strides_; name=nothing, padding=nothing, data_format=nothing) - if tf.in_eager_mode() - max_pool_v2_eager(input_, ksize_, strides_; name=name, padding=padding, data_format=data_format) - else - max_pool_v2_graph(input_, ksize_, strides_; name=name, padding=padding, data_format=data_format) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function max_pool_v2(input_, ksize_, strides_; name=nothing, padding=nothing, data_format=nothing) + if tf.in_eager_mode() + max_pool_v2_eager(input_, ksize_, strides_; name=name, padding=padding, data_format=data_format) + else + max_pool_v2_graph(input_, ksize_, strides_; name=name, padding=padding, data_format=data_format) + end end - end end @@ -18622,32 +18622,32 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function load_and_remap_matrix_graph(ckpt_path_, old_tensor_name_, row_remapping_, col_remapping_, initializing_values_; name=nothing, num_rows=nothing, num_cols=nothing, max_rows_in_memory=nothing) - local desc - tf.with_op_name(name, "LoadAndRemapMatrix") do - desc = tf.NodeDescription("LoadAndRemapMatrix") - ckpt_path_ = convert(Tensor{String}, ckpt_path_) - old_tensor_name_ = convert(Tensor{String}, old_tensor_name_) - row_remapping_ = convert(Tensor{Int64}, row_remapping_) - col_remapping_ = convert(Tensor{Int64}, col_remapping_) - initializing_values_ = convert(Tensor{Float32}, initializing_values_) - tf.add_input(desc, ckpt_path_) - tf.add_input(desc, old_tensor_name_) - tf.add_input(desc, row_remapping_) - tf.add_input(desc, col_remapping_) - tf.add_input(desc, initializing_values_) - if num_rows !== nothing - desc["num_rows"] = Base.Int(num_rows) - end - if num_cols !== nothing - desc["num_cols"] = Base.Int(num_cols) - end - if max_rows_in_memory !== nothing - desc["max_rows_in_memory"] = Base.Int(max_rows_in_memory) - end - end - tf.Tensor(tf.Operation(desc)) + function load_and_remap_matrix_graph(ckpt_path_, old_tensor_name_, row_remapping_, col_remapping_, initializing_values_; name=nothing, num_rows=nothing, num_cols=nothing, max_rows_in_memory=nothing) + local desc + tf.with_op_name(name, "LoadAndRemapMatrix") do + desc = tf.NodeDescription("LoadAndRemapMatrix") + ckpt_path_ = convert(Tensor{String}, ckpt_path_) + old_tensor_name_ = convert(Tensor{String}, old_tensor_name_) + row_remapping_ = convert(Tensor{Int64}, row_remapping_) + col_remapping_ = convert(Tensor{Int64}, col_remapping_) + initializing_values_ = convert(Tensor{Float32}, initializing_values_) + tf.add_input(desc, ckpt_path_) + tf.add_input(desc, old_tensor_name_) + tf.add_input(desc, row_remapping_) + tf.add_input(desc, col_remapping_) + tf.add_input(desc, initializing_values_) + if num_rows !== nothing + desc["num_rows"] = Base.Int(num_rows) + end + if num_cols !== nothing + desc["num_cols"] = Base.Int(num_cols) + end + if max_rows_in_memory !== nothing + desc["max_rows_in_memory"] = Base.Int(max_rows_in_memory) + end end + tf.Tensor(tf.Operation(desc)) + end function load_and_remap_matrix_eager(ckpt_path_, old_tensor_name_, row_remapping_, col_remapping_, initializing_values_; name=nothing, num_rows=nothing, num_cols=nothing, max_rows_in_memory=nothing) desc = tf.EagerOp("LoadAndRemapMatrix") ckpt_path_ = convert(tf.EagerTensor, ckpt_path_) @@ -18676,13 +18676,13 @@ begin return res[1] end end - function load_and_remap_matrix(ckpt_path_, old_tensor_name_, row_remapping_, col_remapping_, initializing_values_; name=nothing, num_rows=nothing, num_cols=nothing, max_rows_in_memory=nothing) - if tf.in_eager_mode() - load_and_remap_matrix_eager(ckpt_path_, old_tensor_name_, row_remapping_, col_remapping_, initializing_values_; name=name, num_rows=num_rows, num_cols=num_cols, max_rows_in_memory=max_rows_in_memory) - else - load_and_remap_matrix_graph(ckpt_path_, old_tensor_name_, row_remapping_, col_remapping_, initializing_values_; name=name, num_rows=num_rows, num_cols=num_cols, max_rows_in_memory=max_rows_in_memory) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function load_and_remap_matrix(ckpt_path_, old_tensor_name_, row_remapping_, col_remapping_, initializing_values_; name=nothing, num_rows=nothing, num_cols=nothing, max_rows_in_memory=nothing) + if tf.in_eager_mode() + load_and_remap_matrix_eager(ckpt_path_, old_tensor_name_, row_remapping_, col_remapping_, initializing_values_; name=name, num_rows=num_rows, num_cols=num_cols, max_rows_in_memory=max_rows_in_memory) + else + load_and_remap_matrix_graph(ckpt_path_, old_tensor_name_, row_remapping_, col_remapping_, initializing_values_; name=name, num_rows=num_rows, num_cols=num_cols, max_rows_in_memory=max_rows_in_memory) + end end - end end @@ -18692,31 +18692,31 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function sparse_apply_proximal_gradient_descent_graph(var_, alpha_, l1_, l2_, grad_, indices_; name=nothing, use_locking=nothing) - local desc - tf.with_op_name(name, "SparseApplyProximalGradientDescent") do - desc = tf.NodeDescription("SparseApplyProximalGradientDescent") - var_ = convert(Tensor{Any}, var_) - alpha_ = convert(Tensor{Any}, alpha_) - l1_ = convert(Tensor{Any}, l1_) - l2_ = convert(Tensor{Any}, l2_) - grad_ = convert(Tensor{Any}, grad_) - indices_ = convert(Tensor{Any}, indices_) - indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) - (var_, alpha_, l1_, l2_, grad_) = tf.tf_promote(var_, alpha_, l1_, l2_, grad_) - (indices_,) = tf.tf_promote(indices_) - tf.add_input(desc, var_) - tf.add_input(desc, alpha_) - tf.add_input(desc, l1_) - tf.add_input(desc, l2_) - tf.add_input(desc, grad_) - tf.add_input(desc, indices_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - end - tf.Tensor(tf.Operation(desc)) + function sparse_apply_proximal_gradient_descent_graph(var_, alpha_, l1_, l2_, grad_, indices_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "SparseApplyProximalGradientDescent") do + desc = tf.NodeDescription("SparseApplyProximalGradientDescent") + var_ = convert(Tensor{Any}, var_) + alpha_ = convert(Tensor{Any}, alpha_) + l1_ = convert(Tensor{Any}, l1_) + l2_ = convert(Tensor{Any}, l2_) + grad_ = convert(Tensor{Any}, grad_) + indices_ = convert(Tensor{Any}, indices_) + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + (var_, alpha_, l1_, l2_, grad_) = tf.tf_promote(var_, alpha_, l1_, l2_, grad_) + (indices_,) = tf.tf_promote(indices_) + tf.add_input(desc, var_) + tf.add_input(desc, alpha_) + tf.add_input(desc, l1_) + tf.add_input(desc, l2_) + tf.add_input(desc, grad_) + tf.add_input(desc, indices_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end end + tf.Tensor(tf.Operation(desc)) + end function sparse_apply_proximal_gradient_descent_eager(var_, alpha_, l1_, l2_, grad_, indices_; name=nothing, use_locking=nothing) desc = tf.EagerOp("SparseApplyProximalGradientDescent") var_ = convert(tf.EagerTensor, var_) @@ -18747,13 +18747,13 @@ begin return res[1] end end - function sparse_apply_proximal_gradient_descent(var_, alpha_, l1_, l2_, grad_, indices_; name=nothing, use_locking=nothing) - if tf.in_eager_mode() - sparse_apply_proximal_gradient_descent_eager(var_, alpha_, l1_, l2_, grad_, indices_; name=name, use_locking=use_locking) - else - sparse_apply_proximal_gradient_descent_graph(var_, alpha_, l1_, l2_, grad_, indices_; name=name, use_locking=use_locking) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_apply_proximal_gradient_descent(var_, alpha_, l1_, l2_, grad_, indices_; name=nothing, use_locking=nothing) + if tf.in_eager_mode() + sparse_apply_proximal_gradient_descent_eager(var_, alpha_, l1_, l2_, grad_, indices_; name=name, use_locking=use_locking) + else + sparse_apply_proximal_gradient_descent_graph(var_, alpha_, l1_, l2_, grad_, indices_; name=name, use_locking=use_locking) + end end - end end @@ -18763,24 +18763,24 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function py_func_stateless_graph(input_; name=nothing, token=nothing, Tin=nothing, Tout=nothing) - local desc - tf.with_op_name(name, "PyFuncStateless") do - desc = tf.NodeDescription("PyFuncStateless") - input_ = [convert(Tensor{Any}, x) for x = input_] - tf.add_input(desc, input_) - if token !== nothing - desc["token"] = Base.String(token) - end - if Tin !== nothing - desc["Tin"] = map(Base.identity, Tin) - end - if Tout !== nothing - desc["Tout"] = map(Base.identity, Tout) - end + function py_func_stateless_graph(input_; name=nothing, token=nothing, Tin=nothing, Tout=nothing) + local desc + tf.with_op_name(name, "PyFuncStateless") do + desc = tf.NodeDescription("PyFuncStateless") + input_ = [convert(Tensor{Any}, x) for x = input_] + tf.add_input(desc, input_) + if token !== nothing + desc["token"] = Base.String(token) + end + if Tin !== nothing + desc["Tin"] = map(Base.identity, Tin) + end + if Tout !== nothing + desc["Tout"] = map(Base.identity, Tout) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function py_func_stateless_eager(input_; name=nothing, token=nothing, Tin=nothing, Tout=nothing) desc = tf.EagerOp("PyFuncStateless") input_ = convert(tf.EagerTensor, input_) @@ -18801,13 +18801,13 @@ begin return res[1] end end - function py_func_stateless(input_; name=nothing, token=nothing, Tin=nothing, Tout=nothing) - if tf.in_eager_mode() - py_func_stateless_eager(input_; name=name, token=token, Tin=Tin, Tout=Tout) - else - py_func_stateless_graph(input_; name=name, token=token, Tin=Tin, Tout=Tout) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function py_func_stateless(input_; name=nothing, token=nothing, Tin=nothing, Tout=nothing) + if tf.in_eager_mode() + py_func_stateless_eager(input_; name=name, token=token, Tin=Tin, Tout=Tout) + else + py_func_stateless_graph(input_; name=name, token=token, Tin=Tin, Tout=Tout) + end end - end end @@ -18817,16 +18817,16 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function where_graph(input_; name=nothing) - local desc - tf.with_op_name(name, "Where") do - desc = tf.NodeDescription("Where") - input_ = convert(Tensor{Bool}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - end - tf.Tensor(tf.Operation(desc)) + function where_graph(input_; name=nothing) + local desc + tf.with_op_name(name, "Where") do + desc = tf.NodeDescription("Where") + input_ = convert(Tensor{Bool}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) end + tf.Tensor(tf.Operation(desc)) + end function where_eager(input_; name=nothing) desc = tf.EagerOp("Where") input_ = convert(tf.EagerTensor, input_) @@ -18839,13 +18839,13 @@ begin return res[1] end end - function where(input_; name=nothing) - if tf.in_eager_mode() - where_eager(input_; name=name) - else - where_graph(input_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function where(input_; name=nothing) + if tf.in_eager_mode() + where_eager(input_; name=name) + else + where_graph(input_; name=name) + end end - end end @@ -18855,29 +18855,29 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function mfcc_graph(spectrogram_, sample_rate_; name=nothing, upper_frequency_limit=nothing, lower_frequency_limit=nothing, filterbank_channel_count=nothing, dct_coefficient_count=nothing) - local desc - tf.with_op_name(name, "Mfcc") do - desc = tf.NodeDescription("Mfcc") - spectrogram_ = convert(Tensor{Float32}, spectrogram_) - sample_rate_ = convert(Tensor{Int32}, sample_rate_) - tf.add_input(desc, spectrogram_) - tf.add_input(desc, sample_rate_) - if upper_frequency_limit !== nothing - desc["upper_frequency_limit"] = Base.identity(upper_frequency_limit) - end - if lower_frequency_limit !== nothing - desc["lower_frequency_limit"] = Base.identity(lower_frequency_limit) - end - if filterbank_channel_count !== nothing - desc["filterbank_channel_count"] = Base.Int(filterbank_channel_count) - end - if dct_coefficient_count !== nothing - desc["dct_coefficient_count"] = Base.Int(dct_coefficient_count) - end + function mfcc_graph(spectrogram_, sample_rate_; name=nothing, upper_frequency_limit=nothing, lower_frequency_limit=nothing, filterbank_channel_count=nothing, dct_coefficient_count=nothing) + local desc + tf.with_op_name(name, "Mfcc") do + desc = tf.NodeDescription("Mfcc") + spectrogram_ = convert(Tensor{Float32}, spectrogram_) + sample_rate_ = convert(Tensor{Int32}, sample_rate_) + tf.add_input(desc, spectrogram_) + tf.add_input(desc, sample_rate_) + if upper_frequency_limit !== nothing + desc["upper_frequency_limit"] = Base.identity(upper_frequency_limit) + end + if lower_frequency_limit !== nothing + desc["lower_frequency_limit"] = Base.identity(lower_frequency_limit) + end + if filterbank_channel_count !== nothing + desc["filterbank_channel_count"] = Base.Int(filterbank_channel_count) + end + if dct_coefficient_count !== nothing + desc["dct_coefficient_count"] = Base.Int(dct_coefficient_count) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function mfcc_eager(spectrogram_, sample_rate_; name=nothing, upper_frequency_limit=nothing, lower_frequency_limit=nothing, filterbank_channel_count=nothing, dct_coefficient_count=nothing) desc = tf.EagerOp("Mfcc") spectrogram_ = convert(tf.EagerTensor, spectrogram_) @@ -18903,13 +18903,13 @@ begin return res[1] end end - function mfcc(spectrogram_, sample_rate_; name=nothing, upper_frequency_limit=nothing, lower_frequency_limit=nothing, filterbank_channel_count=nothing, dct_coefficient_count=nothing) - if tf.in_eager_mode() - mfcc_eager(spectrogram_, sample_rate_; name=name, upper_frequency_limit=upper_frequency_limit, lower_frequency_limit=lower_frequency_limit, filterbank_channel_count=filterbank_channel_count, dct_coefficient_count=dct_coefficient_count) - else - mfcc_graph(spectrogram_, sample_rate_; name=name, upper_frequency_limit=upper_frequency_limit, lower_frequency_limit=lower_frequency_limit, filterbank_channel_count=filterbank_channel_count, dct_coefficient_count=dct_coefficient_count) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function mfcc(spectrogram_, sample_rate_; name=nothing, upper_frequency_limit=nothing, lower_frequency_limit=nothing, filterbank_channel_count=nothing, dct_coefficient_count=nothing) + if tf.in_eager_mode() + mfcc_eager(spectrogram_, sample_rate_; name=name, upper_frequency_limit=upper_frequency_limit, lower_frequency_limit=lower_frequency_limit, filterbank_channel_count=filterbank_channel_count, dct_coefficient_count=dct_coefficient_count) + else + mfcc_graph(spectrogram_, sample_rate_; name=name, upper_frequency_limit=upper_frequency_limit, lower_frequency_limit=lower_frequency_limit, filterbank_channel_count=filterbank_channel_count, dct_coefficient_count=dct_coefficient_count) + end end - end end @@ -18919,19 +18919,19 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function check_numerics_graph(tensor_; name=nothing, message=nothing) - local desc - tf.with_op_name(name, "CheckNumerics") do - desc = tf.NodeDescription("CheckNumerics") - tensor_ = convert(Tensor{Any}, tensor_) - (tensor_,) = tf.tf_promote(tensor_) - tf.add_input(desc, tensor_) - if message !== nothing - desc["message"] = Base.String(message) - end + function check_numerics_graph(tensor_; name=nothing, message=nothing) + local desc + tf.with_op_name(name, "CheckNumerics") do + desc = tf.NodeDescription("CheckNumerics") + tensor_ = convert(Tensor{Any}, tensor_) + (tensor_,) = tf.tf_promote(tensor_) + tf.add_input(desc, tensor_) + if message !== nothing + desc["message"] = Base.String(message) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function check_numerics_eager(tensor_; name=nothing, message=nothing) desc = tf.EagerOp("CheckNumerics") tensor_ = convert(tf.EagerTensor, tensor_) @@ -18947,13 +18947,13 @@ begin return res[1] end end - function check_numerics(tensor_; name=nothing, message=nothing) - if tf.in_eager_mode() - check_numerics_eager(tensor_; name=name, message=message) - else - check_numerics_graph(tensor_; name=name, message=message) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function check_numerics(tensor_; name=nothing, message=nothing) + if tf.in_eager_mode() + check_numerics_eager(tensor_; name=name, message=message) + else + check_numerics_graph(tensor_; name=name, message=message) + end end - end end @@ -18963,14 +18963,14 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tpu_compilation_result_graph(; name=nothing) - local desc - tf.with_op_name(name, "TPUCompilationResult") do - desc - tf.NodeDescription("TPUCompilationResult") - end - tf.Tensor(tf.Operation(desc)) + function tpu_compilation_result_graph(; name=nothing) + local desc + tf.with_op_name(name, "TPUCompilationResult") do + desc + tf.NodeDescription("TPUCompilationResult") end + tf.Tensor(tf.Operation(desc)) + end function tpu_compilation_result_eager(; name=nothing) desc = tf.EagerOp("TPUCompilationResult") res = tf.execute(desc) @@ -18980,13 +18980,13 @@ begin return res[1] end end - function tpu_compilation_result(; name=nothing) - if tf.in_eager_mode() - tpu_compilation_result_eager(; name=name) - else - tpu_compilation_result_graph(; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tpu_compilation_result(; name=nothing) + if tf.in_eager_mode() + tpu_compilation_result_eager(; name=name) + else + tpu_compilation_result_graph(; name=name) + end end - end end @@ -18996,25 +18996,25 @@ end Retrieve embedding parameters for a single table. """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function retrieve_tpu_embedding_stochastic_gradient_descent_parameters_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - local desc - tf.with_op_name(name, "RetrieveTPUEmbeddingStochasticGradientDescentParameters") do - desc = tf.NodeDescription("RetrieveTPUEmbeddingStochasticGradientDescentParameters") - if table_id !== nothing - desc["table_id"] = Base.Int(table_id) - end - if table_name !== nothing - desc["table_name"] = Base.String(table_name) - end - if num_shards !== nothing - desc["num_shards"] = Base.Int(num_shards) - end - if shard_id !== nothing - desc["shard_id"] = Base.Int(shard_id) - end - end - tf.Tensor(tf.Operation(desc)) + function retrieve_tpu_embedding_stochastic_gradient_descent_parameters_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + local desc + tf.with_op_name(name, "RetrieveTPUEmbeddingStochasticGradientDescentParameters") do + desc = tf.NodeDescription("RetrieveTPUEmbeddingStochasticGradientDescentParameters") + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end end + tf.Tensor(tf.Operation(desc)) + end function retrieve_tpu_embedding_stochastic_gradient_descent_parameters_eager(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) desc = tf.EagerOp("RetrieveTPUEmbeddingStochasticGradientDescentParameters") if table_id !== nothing @@ -19036,13 +19036,13 @@ begin return res[1] end end - function retrieve_tpu_embedding_stochastic_gradient_descent_parameters(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - if tf.in_eager_mode() - retrieve_tpu_embedding_stochastic_gradient_descent_parameters_eager(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) - else - retrieve_tpu_embedding_stochastic_gradient_descent_parameters_graph(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function retrieve_tpu_embedding_stochastic_gradient_descent_parameters(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + if tf.in_eager_mode() + retrieve_tpu_embedding_stochastic_gradient_descent_parameters_eager(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + else + retrieve_tpu_embedding_stochastic_gradient_descent_parameters_graph(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + end end - end end @@ -19052,24 +19052,24 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function sparse_segment_mean_grad_graph(grad_, indices_, segment_ids_, output_dim0_; name=nothing) - local desc - tf.with_op_name(name, "SparseSegmentMeanGrad") do - desc = tf.NodeDescription("SparseSegmentMeanGrad") - grad_ = convert(Tensor{Any}, grad_) - indices_ = convert(Tensor{Int32}, indices_) - indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) - segment_ids_ = convert(Tensor{Int32}, segment_ids_) - output_dim0_ = convert(Tensor{Int32}, output_dim0_) - (grad_,) = tf.tf_promote(grad_) - (indices_,) = tf.tf_promote(indices_) - tf.add_input(desc, grad_) - tf.add_input(desc, indices_) - tf.add_input(desc, segment_ids_) - tf.add_input(desc, output_dim0_) - end - tf.Tensor(tf.Operation(desc)) + function sparse_segment_mean_grad_graph(grad_, indices_, segment_ids_, output_dim0_; name=nothing) + local desc + tf.with_op_name(name, "SparseSegmentMeanGrad") do + desc = tf.NodeDescription("SparseSegmentMeanGrad") + grad_ = convert(Tensor{Any}, grad_) + indices_ = convert(Tensor{Int32}, indices_) + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + segment_ids_ = convert(Tensor{Int32}, segment_ids_) + output_dim0_ = convert(Tensor{Int32}, output_dim0_) + (grad_,) = tf.tf_promote(grad_) + (indices_,) = tf.tf_promote(indices_) + tf.add_input(desc, grad_) + tf.add_input(desc, indices_) + tf.add_input(desc, segment_ids_) + tf.add_input(desc, output_dim0_) end + tf.Tensor(tf.Operation(desc)) + end function sparse_segment_mean_grad_eager(grad_, indices_, segment_ids_, output_dim0_; name=nothing) desc = tf.EagerOp("SparseSegmentMeanGrad") grad_ = convert(tf.EagerTensor, grad_) @@ -19089,13 +19089,13 @@ begin return res[1] end end - function sparse_segment_mean_grad(grad_, indices_, segment_ids_, output_dim0_; name=nothing) - if tf.in_eager_mode() - sparse_segment_mean_grad_eager(grad_, indices_, segment_ids_, output_dim0_; name=name) - else - sparse_segment_mean_grad_graph(grad_, indices_, segment_ids_, output_dim0_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_segment_mean_grad(grad_, indices_, segment_ids_, output_dim0_; name=nothing) + if tf.in_eager_mode() + sparse_segment_mean_grad_eager(grad_, indices_, segment_ids_, output_dim0_; name=name) + else + sparse_segment_mean_grad_graph(grad_, indices_, segment_ids_, output_dim0_; name=name) + end end - end end @@ -19105,33 +19105,33 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function try_rpc_graph(address_, method_, request_; name=nothing, protocol=nothing, fail_fast=nothing, timeout_in_ms=nothing) - local desc - tf.with_op_name(name, "TryRpc") do - desc = tf.NodeDescription("TryRpc") - address_ = convert(Tensor{String}, address_) - method_ = convert(Tensor{String}, method_) - request_ = convert(Tensor{String}, request_) - tf.add_input(desc, address_) - tf.add_input(desc, method_) - tf.add_input(desc, request_) - if protocol !== nothing - desc["protocol"] = Base.String(protocol) - end - if fail_fast !== nothing - desc["fail_fast"] = Base.Bool(fail_fast) - end - if timeout_in_ms !== nothing - desc["timeout_in_ms"] = Base.Int(timeout_in_ms) - end + function try_rpc_graph(address_, method_, request_; name=nothing, protocol=nothing, fail_fast=nothing, timeout_in_ms=nothing) + local desc + tf.with_op_name(name, "TryRpc") do + desc = tf.NodeDescription("TryRpc") + address_ = convert(Tensor{String}, address_) + method_ = convert(Tensor{String}, method_) + request_ = convert(Tensor{String}, request_) + tf.add_input(desc, address_) + tf.add_input(desc, method_) + tf.add_input(desc, request_) + if protocol !== nothing + desc["protocol"] = Base.String(protocol) end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:3 - push!(out, tf.Tensor(op, out_idx)) + if fail_fast !== nothing + desc["fail_fast"] = Base.Bool(fail_fast) end - out + if timeout_in_ms !== nothing + desc["timeout_in_ms"] = Base.Int(timeout_in_ms) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) end + out + end function try_rpc_eager(address_, method_, request_; name=nothing, protocol=nothing, fail_fast=nothing, timeout_in_ms=nothing) desc = tf.EagerOp("TryRpc") address_ = convert(tf.EagerTensor, address_) @@ -19156,13 +19156,13 @@ begin return res end end - function try_rpc(address_, method_, request_; name=nothing, protocol=nothing, fail_fast=nothing, timeout_in_ms=nothing) - if tf.in_eager_mode() - try_rpc_eager(address_, method_, request_; name=name, protocol=protocol, fail_fast=fail_fast, timeout_in_ms=timeout_in_ms) - else - try_rpc_graph(address_, method_, request_; name=name, protocol=protocol, fail_fast=fail_fast, timeout_in_ms=timeout_in_ms) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function try_rpc(address_, method_, request_; name=nothing, protocol=nothing, fail_fast=nothing, timeout_in_ms=nothing) + if tf.in_eager_mode() + try_rpc_eager(address_, method_, request_; name=name, protocol=protocol, fail_fast=fail_fast, timeout_in_ms=timeout_in_ms) + else + try_rpc_graph(address_, method_, request_; name=name, protocol=protocol, fail_fast=fail_fast, timeout_in_ms=timeout_in_ms) + end end - end end @@ -19172,24 +19172,24 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function batch_matrix_triangular_solve_graph(matrix_, rhs_; name=nothing, lower=nothing, adjoint=nothing) - local desc - tf.with_op_name(name, "BatchMatrixTriangularSolve") do - desc = tf.NodeDescription("BatchMatrixTriangularSolve") - matrix_ = convert(Tensor{Any}, matrix_) - rhs_ = convert(Tensor{Any}, rhs_) - (matrix_, rhs_) = tf.tf_promote(matrix_, rhs_) - tf.add_input(desc, matrix_) - tf.add_input(desc, rhs_) - if lower !== nothing - desc["lower"] = Base.Bool(lower) - end - if adjoint !== nothing - desc["adjoint"] = Base.Bool(adjoint) - end + function batch_matrix_triangular_solve_graph(matrix_, rhs_; name=nothing, lower=nothing, adjoint=nothing) + local desc + tf.with_op_name(name, "BatchMatrixTriangularSolve") do + desc = tf.NodeDescription("BatchMatrixTriangularSolve") + matrix_ = convert(Tensor{Any}, matrix_) + rhs_ = convert(Tensor{Any}, rhs_) + (matrix_, rhs_) = tf.tf_promote(matrix_, rhs_) + tf.add_input(desc, matrix_) + tf.add_input(desc, rhs_) + if lower !== nothing + desc["lower"] = Base.Bool(lower) + end + if adjoint !== nothing + desc["adjoint"] = Base.Bool(adjoint) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function batch_matrix_triangular_solve_eager(matrix_, rhs_; name=nothing, lower=nothing, adjoint=nothing) desc = tf.EagerOp("BatchMatrixTriangularSolve") matrix_ = convert(tf.EagerTensor, matrix_) @@ -19211,13 +19211,13 @@ begin return res[1] end end - function batch_matrix_triangular_solve(matrix_, rhs_; name=nothing, lower=nothing, adjoint=nothing) - if tf.in_eager_mode() - batch_matrix_triangular_solve_eager(matrix_, rhs_; name=name, lower=lower, adjoint=adjoint) - else - batch_matrix_triangular_solve_graph(matrix_, rhs_; name=name, lower=lower, adjoint=adjoint) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function batch_matrix_triangular_solve(matrix_, rhs_; name=nothing, lower=nothing, adjoint=nothing) + if tf.in_eager_mode() + batch_matrix_triangular_solve_eager(matrix_, rhs_; name=name, lower=lower, adjoint=adjoint) + else + batch_matrix_triangular_solve_graph(matrix_, rhs_; name=name, lower=lower, adjoint=adjoint) + end end - end end @@ -19227,19 +19227,19 @@ end A graph node which represents a return value of a function. """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function _retval_graph(input_; name=nothing, index=nothing) - local desc - tf.with_op_name(name, "_Retval") do - desc = tf.NodeDescription("_Retval") - input_ = convert(Tensor{Any}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - if index !== nothing - desc["index"] = Base.Int(index) - end + function _retval_graph(input_; name=nothing, index=nothing) + local desc + tf.with_op_name(name, "_Retval") do + desc = tf.NodeDescription("_Retval") + input_ = convert(Tensor{Any}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + if index !== nothing + desc["index"] = Base.Int(index) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function _retval_eager(input_; name=nothing, index=nothing) desc = tf.EagerOp("_Retval") input_ = convert(tf.EagerTensor, input_) @@ -19255,13 +19255,13 @@ begin return res[1] end end - function _retval(input_; name=nothing, index=nothing) - if tf.in_eager_mode() - _retval_eager(input_; name=name, index=index) - else - _retval_graph(input_; name=name, index=index) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _retval(input_; name=nothing, index=nothing) + if tf.in_eager_mode() + _retval_eager(input_; name=name, index=index) + else + _retval_graph(input_; name=name, index=index) + end end - end end @@ -19271,24 +19271,24 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function unique_with_counts_graph(x_; name=nothing, out_idx=nothing) - local desc - tf.with_op_name(name, "UniqueWithCounts") do - desc = tf.NodeDescription("UniqueWithCounts") - x_ = convert(Tensor{Any}, x_) - (x_,) = tf.tf_promote(x_) - tf.add_input(desc, x_) - if out_idx !== nothing - desc["out_idx"] = Base.identity(out_idx) - end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:3 - push!(out, tf.Tensor(op, out_idx)) + function unique_with_counts_graph(x_; name=nothing, out_idx=nothing) + local desc + tf.with_op_name(name, "UniqueWithCounts") do + desc = tf.NodeDescription("UniqueWithCounts") + x_ = convert(Tensor{Any}, x_) + (x_,) = tf.tf_promote(x_) + tf.add_input(desc, x_) + if out_idx !== nothing + desc["out_idx"] = Base.identity(out_idx) end - out end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end function unique_with_counts_eager(x_; name=nothing, out_idx=nothing) desc = tf.EagerOp("UniqueWithCounts") x_ = convert(tf.EagerTensor, x_) @@ -19304,13 +19304,13 @@ begin return res end end - function unique_with_counts(x_; name=nothing, out_idx=nothing) - if tf.in_eager_mode() - unique_with_counts_eager(x_; name=name, out_idx=out_idx) - else - unique_with_counts_graph(x_; name=name, out_idx=out_idx) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function unique_with_counts(x_; name=nothing, out_idx=nothing) + if tf.in_eager_mode() + unique_with_counts_eager(x_; name=name, out_idx=out_idx) + else + unique_with_counts_graph(x_; name=name, out_idx=out_idx) + end end - end end @@ -19320,18 +19320,18 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function add_graph(x_, y_; name=nothing) - local desc - tf.with_op_name(name, "Add") do - desc = tf.NodeDescription("Add") - x_ = convert(Tensor{Any}, x_) - y_ = convert(Tensor{Any}, y_) - (x_, y_) = tf.tf_promote(x_, y_) - tf.add_input(desc, x_) - tf.add_input(desc, y_) - end - tf.Tensor(tf.Operation(desc)) + function add_graph(x_, y_; name=nothing) + local desc + tf.with_op_name(name, "Add") do + desc = tf.NodeDescription("Add") + x_ = convert(Tensor{Any}, x_) + y_ = convert(Tensor{Any}, y_) + (x_, y_) = tf.tf_promote(x_, y_) + tf.add_input(desc, x_) + tf.add_input(desc, y_) end + tf.Tensor(tf.Operation(desc)) + end function add_eager(x_, y_; name=nothing) desc = tf.EagerOp("Add") x_ = convert(tf.EagerTensor, x_) @@ -19347,13 +19347,13 @@ begin return res[1] end end - function add(x_, y_; name=nothing) - if tf.in_eager_mode() - add_eager(x_, y_; name=name) - else - add_graph(x_, y_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function add(x_, y_; name=nothing) + if tf.in_eager_mode() + add_eager(x_, y_; name=name) + else + add_graph(x_, y_; name=name) + end end - end end @@ -19363,37 +19363,37 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function experimental_scan_dataset_graph(input_dataset_, initial_state_, other_arguments_; name=nothing, f=nothing, Tstate=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, preserve_cardinality=nothing) - local desc - tf.with_op_name(name, "ExperimentalScanDataset") do - desc = tf.NodeDescription("ExperimentalScanDataset") - input_dataset_ = convert(Tensor{Any}, input_dataset_) - initial_state_ = [convert(Tensor{Any}, x) for x = initial_state_] - other_arguments_ = [convert(Tensor{Any}, x) for x = other_arguments_] - tf.add_input(desc, input_dataset_) - tf.add_input(desc, initial_state_) - tf.add_input(desc, other_arguments_) - if f !== nothing - desc["f"] = Base.identity(f) - end - if Tstate !== nothing - desc["Tstate"] = map(Base.identity, Tstate) - end - if Targuments !== nothing - desc["Targuments"] = map(Base.identity, Targuments) - end - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - if preserve_cardinality !== nothing - desc["preserve_cardinality"] = Base.Bool(preserve_cardinality) - end - end - tf.Tensor(tf.Operation(desc)) + function experimental_scan_dataset_graph(input_dataset_, initial_state_, other_arguments_; name=nothing, f=nothing, Tstate=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, preserve_cardinality=nothing) + local desc + tf.with_op_name(name, "ExperimentalScanDataset") do + desc = tf.NodeDescription("ExperimentalScanDataset") + input_dataset_ = convert(Tensor{Any}, input_dataset_) + initial_state_ = [convert(Tensor{Any}, x) for x = initial_state_] + other_arguments_ = [convert(Tensor{Any}, x) for x = other_arguments_] + tf.add_input(desc, input_dataset_) + tf.add_input(desc, initial_state_) + tf.add_input(desc, other_arguments_) + if f !== nothing + desc["f"] = Base.identity(f) + end + if Tstate !== nothing + desc["Tstate"] = map(Base.identity, Tstate) + end + if Targuments !== nothing + desc["Targuments"] = map(Base.identity, Targuments) + end + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + if preserve_cardinality !== nothing + desc["preserve_cardinality"] = Base.Bool(preserve_cardinality) + end end + tf.Tensor(tf.Operation(desc)) + end function experimental_scan_dataset_eager(input_dataset_, initial_state_, other_arguments_; name=nothing, f=nothing, Tstate=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, preserve_cardinality=nothing) desc = tf.EagerOp("ExperimentalScanDataset") input_dataset_ = convert(tf.EagerTensor, input_dataset_) @@ -19427,13 +19427,13 @@ begin return res[1] end end - function experimental_scan_dataset(input_dataset_, initial_state_, other_arguments_; name=nothing, f=nothing, Tstate=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, preserve_cardinality=nothing) - if tf.in_eager_mode() - experimental_scan_dataset_eager(input_dataset_, initial_state_, other_arguments_; name=name, f=f, Tstate=Tstate, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes, preserve_cardinality=preserve_cardinality) - else - experimental_scan_dataset_graph(input_dataset_, initial_state_, other_arguments_; name=name, f=f, Tstate=Tstate, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes, preserve_cardinality=preserve_cardinality) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_scan_dataset(input_dataset_, initial_state_, other_arguments_; name=nothing, f=nothing, Tstate=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, preserve_cardinality=nothing) + if tf.in_eager_mode() + experimental_scan_dataset_eager(input_dataset_, initial_state_, other_arguments_; name=name, f=f, Tstate=Tstate, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes, preserve_cardinality=preserve_cardinality) + else + experimental_scan_dataset_graph(input_dataset_, initial_state_, other_arguments_; name=name, f=f, Tstate=Tstate, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes, preserve_cardinality=preserve_cardinality) + end end - end end @@ -19443,21 +19443,21 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function assign_add_variable_op_graph(resource_, value_; name=nothing, dtype=nothing) - local desc - tf.with_op_name(name, "AssignAddVariableOp") do - desc = tf.NodeDescription("AssignAddVariableOp") - resource_ = convert(Tensor{Any}, resource_) - value_ = convert(Tensor{Any}, value_) - (value_,) = tf.tf_promote(value_) - tf.add_input(desc, resource_) - tf.add_input(desc, value_) - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end + function assign_add_variable_op_graph(resource_, value_; name=nothing, dtype=nothing) + local desc + tf.with_op_name(name, "AssignAddVariableOp") do + desc = tf.NodeDescription("AssignAddVariableOp") + resource_ = convert(Tensor{Any}, resource_) + value_ = convert(Tensor{Any}, value_) + (value_,) = tf.tf_promote(value_) + tf.add_input(desc, resource_) + tf.add_input(desc, value_) + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function assign_add_variable_op_eager(resource_, value_; name=nothing, dtype=nothing) desc = tf.EagerOp("AssignAddVariableOp") resource_ = convert(tf.EagerTensor, resource_) @@ -19475,13 +19475,13 @@ begin return res[1] end end - function assign_add_variable_op(resource_, value_; name=nothing, dtype=nothing) - if tf.in_eager_mode() - assign_add_variable_op_eager(resource_, value_; name=name, dtype=dtype) - else - assign_add_variable_op_graph(resource_, value_; name=name, dtype=dtype) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function assign_add_variable_op(resource_, value_; name=nothing, dtype=nothing) + if tf.in_eager_mode() + assign_add_variable_op_eager(resource_, value_; name=name, dtype=dtype) + else + assign_add_variable_op_graph(resource_, value_; name=name, dtype=dtype) + end end - end end @@ -19491,30 +19491,30 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function split_v_graph(value_, size_splits_, split_dim_; name=nothing, num_split=nothing) - local desc - tf.with_op_name(name, "SplitV") do - desc = tf.NodeDescription("SplitV") - value_ = convert(Tensor{Any}, value_) - size_splits_ = convert(Tensor{Int64}, size_splits_) - split_dim_ = convert(Tensor{Int32}, split_dim_) - split_dim_ = split_dim_ - convert(tf.Tensor{eltype(split_dim_)}, 1) - (value_,) = tf.tf_promote(value_) - (size_splits_,) = tf.tf_promote(size_splits_) - tf.add_input(desc, value_) - tf.add_input(desc, size_splits_) - tf.add_input(desc, split_dim_) - if num_split !== nothing - desc["num_split"] = Base.Int(num_split) - end + function split_v_graph(value_, size_splits_, split_dim_; name=nothing, num_split=nothing) + local desc + tf.with_op_name(name, "SplitV") do + desc = tf.NodeDescription("SplitV") + value_ = convert(Tensor{Any}, value_) + size_splits_ = convert(Tensor{Int64}, size_splits_) + split_dim_ = convert(Tensor{Int32}, split_dim_) + split_dim_ = split_dim_ - convert(tf.Tensor{eltype(split_dim_)}, 1) + (value_,) = tf.tf_promote(value_) + (size_splits_,) = tf.tf_promote(size_splits_) + tf.add_input(desc, value_) + tf.add_input(desc, size_splits_) + tf.add_input(desc, split_dim_) + if num_split !== nothing + desc["num_split"] = Base.Int(num_split) end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:num_split - push!(out, tf.Tensor(op, out_idx)) - end - out end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:num_split + push!(out, tf.Tensor(op, out_idx)) + end + out + end function split_v_eager(value_, size_splits_, split_dim_; name=nothing, num_split=nothing) desc = tf.EagerOp("SplitV") value_ = convert(tf.EagerTensor, value_) @@ -19535,13 +19535,13 @@ begin return res end end - function split_v(value_, size_splits_, split_dim_; name=nothing, num_split=nothing) - if tf.in_eager_mode() - split_v_eager(value_, size_splits_, split_dim_; name=name, num_split=num_split) - else - split_v_graph(value_, size_splits_, split_dim_; name=name, num_split=num_split) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function split_v(value_, size_splits_, split_dim_; name=nothing, num_split=nothing) + if tf.in_eager_mode() + split_v_eager(value_, size_splits_, split_dim_; name=name, num_split=num_split) + else + split_v_graph(value_, size_splits_, split_dim_; name=name, num_split=num_split) + end end - end end @@ -19551,24 +19551,24 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function assign_graph(ref_, value_; name=nothing, validate_shape=nothing, use_locking=nothing) - local desc - tf.with_op_name(name, "Assign") do - desc = tf.NodeDescription("Assign") - ref_ = convert(Tensor{Any}, ref_) - value_ = convert(Tensor{Any}, value_) - (ref_, value_) = tf.tf_promote(ref_, value_) - tf.add_input(desc, ref_) - tf.add_input(desc, value_) - if validate_shape !== nothing - desc["validate_shape"] = Base.Bool(validate_shape) - end - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end + function assign_graph(ref_, value_; name=nothing, validate_shape=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "Assign") do + desc = tf.NodeDescription("Assign") + ref_ = convert(Tensor{Any}, ref_) + value_ = convert(Tensor{Any}, value_) + (ref_, value_) = tf.tf_promote(ref_, value_) + tf.add_input(desc, ref_) + tf.add_input(desc, value_) + if validate_shape !== nothing + desc["validate_shape"] = Base.Bool(validate_shape) + end + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function assign_eager(ref_, value_; name=nothing, validate_shape=nothing, use_locking=nothing) desc = tf.EagerOp("Assign") ref_ = convert(tf.EagerTensor, ref_) @@ -19590,13 +19590,13 @@ begin return res[1] end end - function assign(ref_, value_; name=nothing, validate_shape=nothing, use_locking=nothing) - if tf.in_eager_mode() - assign_eager(ref_, value_; name=name, validate_shape=validate_shape, use_locking=use_locking) - else - assign_graph(ref_, value_; name=name, validate_shape=validate_shape, use_locking=use_locking) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function assign(ref_, value_; name=nothing, validate_shape=nothing, use_locking=nothing) + if tf.in_eager_mode() + assign_eager(ref_, value_; name=name, validate_shape=validate_shape, use_locking=use_locking) + else + assign_graph(ref_, value_; name=name, validate_shape=validate_shape, use_locking=use_locking) + end end - end end @@ -19606,30 +19606,30 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function max_pool_with_argmax_graph(input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing) - local desc - tf.with_op_name(name, "MaxPoolWithArgmax") do - desc = tf.NodeDescription("MaxPoolWithArgmax") - input_ = convert(Tensor{Any}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - if ksize !== nothing - desc["ksize"] = map(Base.identity, ksize) - end - if strides !== nothing - desc["strides"] = map(Base.identity, strides) - end - if padding !== nothing - desc["padding"] = Base.String(padding) - end + function max_pool_with_argmax_graph(input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing) + local desc + tf.with_op_name(name, "MaxPoolWithArgmax") do + desc = tf.NodeDescription("MaxPoolWithArgmax") + input_ = convert(Tensor{Any}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + if ksize !== nothing + desc["ksize"] = map(Base.identity, ksize) + end + if strides !== nothing + desc["strides"] = map(Base.identity, strides) end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:2 - push!(out, tf.Tensor(op, out_idx)) + if padding !== nothing + desc["padding"] = Base.String(padding) end - out end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end function max_pool_with_argmax_eager(input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing) desc = tf.EagerOp("MaxPoolWithArgmax") input_ = convert(tf.EagerTensor, input_) @@ -19651,13 +19651,13 @@ begin return res end end - function max_pool_with_argmax(input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing) - if tf.in_eager_mode() - max_pool_with_argmax_eager(input_; name=name, ksize=ksize, strides=strides, padding=padding) - else - max_pool_with_argmax_graph(input_; name=name, ksize=ksize, strides=strides, padding=padding) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function max_pool_with_argmax(input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing) + if tf.in_eager_mode() + max_pool_with_argmax_eager(input_; name=name, ksize=ksize, strides=strides, padding=padding) + else + max_pool_with_argmax_graph(input_; name=name, ksize=ksize, strides=strides, padding=padding) + end end - end end @@ -19667,30 +19667,30 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function quantized_relu_x_graph(features_, max_value_, min_features_, max_features_; name=nothing, out_type=nothing) - local desc - tf.with_op_name(name, "QuantizedReluX") do - desc = tf.NodeDescription("QuantizedReluX") - features_ = convert(Tensor{Any}, features_) - max_value_ = convert(Tensor{Float32}, max_value_) - min_features_ = convert(Tensor{Float32}, min_features_) - max_features_ = convert(Tensor{Float32}, max_features_) - (features_,) = tf.tf_promote(features_) - tf.add_input(desc, features_) - tf.add_input(desc, max_value_) - tf.add_input(desc, min_features_) - tf.add_input(desc, max_features_) - if out_type !== nothing - desc["out_type"] = Base.identity(out_type) - end + function quantized_relu_x_graph(features_, max_value_, min_features_, max_features_; name=nothing, out_type=nothing) + local desc + tf.with_op_name(name, "QuantizedReluX") do + desc = tf.NodeDescription("QuantizedReluX") + features_ = convert(Tensor{Any}, features_) + max_value_ = convert(Tensor{Float32}, max_value_) + min_features_ = convert(Tensor{Float32}, min_features_) + max_features_ = convert(Tensor{Float32}, max_features_) + (features_,) = tf.tf_promote(features_) + tf.add_input(desc, features_) + tf.add_input(desc, max_value_) + tf.add_input(desc, min_features_) + tf.add_input(desc, max_features_) + if out_type !== nothing + desc["out_type"] = Base.identity(out_type) end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:3 - push!(out, tf.Tensor(op, out_idx)) - end - out end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end function quantized_relu_x_eager(features_, max_value_, min_features_, max_features_; name=nothing, out_type=nothing) desc = tf.EagerOp("QuantizedReluX") features_ = convert(tf.EagerTensor, features_) @@ -19712,13 +19712,13 @@ begin return res end end - function quantized_relu_x(features_, max_value_, min_features_, max_features_; name=nothing, out_type=nothing) - if tf.in_eager_mode() - quantized_relu_x_eager(features_, max_value_, min_features_, max_features_; name=name, out_type=out_type) - else - quantized_relu_x_graph(features_, max_value_, min_features_, max_features_; name=name, out_type=out_type) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function quantized_relu_x(features_, max_value_, min_features_, max_features_; name=nothing, out_type=nothing) + if tf.in_eager_mode() + quantized_relu_x_eager(features_, max_value_, min_features_, max_features_; name=name, out_type=out_type) + else + quantized_relu_x_graph(features_, max_value_, min_features_, max_features_; name=name, out_type=out_type) + end end - end end @@ -19728,37 +19728,37 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function random_shuffle_queue_graph(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, min_after_dequeue=nothing, seed=nothing, seed2=nothing, container=nothing, shared_name=nothing) - local desc - tf.with_op_name(name, "RandomShuffleQueue") do - desc = tf.NodeDescription("RandomShuffleQueue") - if component_types !== nothing - desc["component_types"] = map(Base.identity, component_types) - end - if shapes !== nothing - desc["shapes"] = map(Base.identity, shapes) - end - if capacity !== nothing - desc["capacity"] = Base.Int(capacity) - end - if min_after_dequeue !== nothing - desc["min_after_dequeue"] = Base.Int(min_after_dequeue) - end - if seed !== nothing - desc["seed"] = Base.Int(seed) - end - if seed2 !== nothing - desc["seed2"] = Base.Int(seed2) - end - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - end - tf.Tensor(tf.Operation(desc)) + function random_shuffle_queue_graph(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, min_after_dequeue=nothing, seed=nothing, seed2=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "RandomShuffleQueue") do + desc = tf.NodeDescription("RandomShuffleQueue") + if component_types !== nothing + desc["component_types"] = map(Base.identity, component_types) + end + if shapes !== nothing + desc["shapes"] = map(Base.identity, shapes) + end + if capacity !== nothing + desc["capacity"] = Base.Int(capacity) + end + if min_after_dequeue !== nothing + desc["min_after_dequeue"] = Base.Int(min_after_dequeue) + end + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end end + tf.Tensor(tf.Operation(desc)) + end function random_shuffle_queue_eager(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, min_after_dequeue=nothing, seed=nothing, seed2=nothing, container=nothing, shared_name=nothing) desc = tf.EagerOp("RandomShuffleQueue") if component_types !== nothing @@ -19792,13 +19792,13 @@ begin return res[1] end end - function random_shuffle_queue(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, min_after_dequeue=nothing, seed=nothing, seed2=nothing, container=nothing, shared_name=nothing) - if tf.in_eager_mode() - random_shuffle_queue_eager(; name=name, component_types=component_types, shapes=shapes, capacity=capacity, min_after_dequeue=min_after_dequeue, seed=seed, seed2=seed2, container=container, shared_name=shared_name) - else - random_shuffle_queue_graph(; name=name, component_types=component_types, shapes=shapes, capacity=capacity, min_after_dequeue=min_after_dequeue, seed=seed, seed2=seed2, container=container, shared_name=shared_name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function random_shuffle_queue(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, min_after_dequeue=nothing, seed=nothing, seed2=nothing, container=nothing, shared_name=nothing) + if tf.in_eager_mode() + random_shuffle_queue_eager(; name=name, component_types=component_types, shapes=shapes, capacity=capacity, min_after_dequeue=min_after_dequeue, seed=seed, seed2=seed2, container=container, shared_name=shared_name) + else + random_shuffle_queue_graph(; name=name, component_types=component_types, shapes=shapes, capacity=capacity, min_after_dequeue=min_after_dequeue, seed=seed, seed2=seed2, container=container, shared_name=shared_name) + end end - end end @@ -19808,16 +19808,16 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function fft2d_graph(input_; name=nothing) - local desc - tf.with_op_name(name, "FFT2D") do - desc = tf.NodeDescription("FFT2D") - input_ = convert(Tensor{Complex{Float32}}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - end - tf.Tensor(tf.Operation(desc)) + function fft2d_graph(input_; name=nothing) + local desc + tf.with_op_name(name, "FFT2D") do + desc = tf.NodeDescription("FFT2D") + input_ = convert(Tensor{Complex{Float32}}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) end + tf.Tensor(tf.Operation(desc)) + end function fft2d_eager(input_; name=nothing) desc = tf.EagerOp("FFT2D") input_ = convert(tf.EagerTensor, input_) @@ -19830,13 +19830,13 @@ begin return res[1] end end - function fft2d(input_; name=nothing) - if tf.in_eager_mode() - fft2d_eager(input_; name=name) - else - fft2d_graph(input_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function fft2d(input_; name=nothing) + if tf.in_eager_mode() + fft2d_eager(input_; name=name) + else + fft2d_graph(input_; name=name) + end end - end end @@ -19846,23 +19846,23 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function experimental_thread_pool_dataset_graph(input_dataset_, thread_pool_; name=nothing, output_types=nothing, output_shapes=nothing) - local desc - tf.with_op_name(name, "ExperimentalThreadPoolDataset") do - desc = tf.NodeDescription("ExperimentalThreadPoolDataset") - input_dataset_ = convert(Tensor{Any}, input_dataset_) - thread_pool_ = convert(Tensor{Any}, thread_pool_) - tf.add_input(desc, input_dataset_) - tf.add_input(desc, thread_pool_) - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end + function experimental_thread_pool_dataset_graph(input_dataset_, thread_pool_; name=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "ExperimentalThreadPoolDataset") do + desc = tf.NodeDescription("ExperimentalThreadPoolDataset") + input_dataset_ = convert(Tensor{Any}, input_dataset_) + thread_pool_ = convert(Tensor{Any}, thread_pool_) + tf.add_input(desc, input_dataset_) + tf.add_input(desc, thread_pool_) + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function experimental_thread_pool_dataset_eager(input_dataset_, thread_pool_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("ExperimentalThreadPoolDataset") input_dataset_ = convert(tf.EagerTensor, input_dataset_) @@ -19882,13 +19882,13 @@ begin return res[1] end end - function experimental_thread_pool_dataset(input_dataset_, thread_pool_; name=nothing, output_types=nothing, output_shapes=nothing) - if tf.in_eager_mode() - experimental_thread_pool_dataset_eager(input_dataset_, thread_pool_; name=name, output_types=output_types, output_shapes=output_shapes) - else - experimental_thread_pool_dataset_graph(input_dataset_, thread_pool_; name=name, output_types=output_types, output_shapes=output_shapes) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_thread_pool_dataset(input_dataset_, thread_pool_; name=nothing, output_types=nothing, output_shapes=nothing) + if tf.in_eager_mode() + experimental_thread_pool_dataset_eager(input_dataset_, thread_pool_; name=name, output_types=output_types, output_shapes=output_shapes) + else + experimental_thread_pool_dataset_graph(input_dataset_, thread_pool_; name=name, output_types=output_types, output_shapes=output_shapes) + end end - end end @@ -19898,32 +19898,32 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function ordered_map_unstage_graph(key_, indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) - local desc - tf.with_op_name(name, "OrderedMapUnstage") do - desc = tf.NodeDescription("OrderedMapUnstage") - key_ = convert(Tensor{Int64}, key_) - indices_ = convert(Tensor{Int32}, indices_) - tf.add_input(desc, key_) - tf.add_input(desc, indices_) - if capacity !== nothing - desc["capacity"] = Base.Int(capacity) - end - if memory_limit !== nothing - desc["memory_limit"] = Base.Int(memory_limit) - end - if dtypes !== nothing - desc["dtypes"] = map(Base.identity, dtypes) - end - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - end - tf.Tensor(tf.Operation(desc)) + function ordered_map_unstage_graph(key_, indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "OrderedMapUnstage") do + desc = tf.NodeDescription("OrderedMapUnstage") + key_ = convert(Tensor{Int64}, key_) + indices_ = convert(Tensor{Int32}, indices_) + tf.add_input(desc, key_) + tf.add_input(desc, indices_) + if capacity !== nothing + desc["capacity"] = Base.Int(capacity) + end + if memory_limit !== nothing + desc["memory_limit"] = Base.Int(memory_limit) + end + if dtypes !== nothing + desc["dtypes"] = map(Base.identity, dtypes) + end + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end end + tf.Tensor(tf.Operation(desc)) + end function ordered_map_unstage_eager(key_, indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) desc = tf.EagerOp("OrderedMapUnstage") key_ = convert(tf.EagerTensor, key_) @@ -19952,13 +19952,13 @@ begin return res[1] end end - function ordered_map_unstage(key_, indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) - if tf.in_eager_mode() - ordered_map_unstage_eager(key_, indices_; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) - else - ordered_map_unstage_graph(key_, indices_; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function ordered_map_unstage(key_, indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + if tf.in_eager_mode() + ordered_map_unstage_eager(key_, indices_; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) + else + ordered_map_unstage_graph(key_, indices_; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) + end end - end end @@ -19968,26 +19968,26 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function experimental_directed_interleave_dataset_graph(selector_input_dataset_, data_input_datasets_; name=nothing, output_types=nothing, output_shapes=nothing, N=nothing) - local desc - tf.with_op_name(name, "ExperimentalDirectedInterleaveDataset") do - desc = tf.NodeDescription("ExperimentalDirectedInterleaveDataset") - selector_input_dataset_ = convert(Tensor{Any}, selector_input_dataset_) - data_input_datasets_ = [convert(Tensor{Any}, x) for x = data_input_datasets_] - tf.add_input(desc, selector_input_dataset_) - tf.add_input(desc, data_input_datasets_) - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - if N !== nothing - desc["N"] = Base.Int(N) - end + function experimental_directed_interleave_dataset_graph(selector_input_dataset_, data_input_datasets_; name=nothing, output_types=nothing, output_shapes=nothing, N=nothing) + local desc + tf.with_op_name(name, "ExperimentalDirectedInterleaveDataset") do + desc = tf.NodeDescription("ExperimentalDirectedInterleaveDataset") + selector_input_dataset_ = convert(Tensor{Any}, selector_input_dataset_) + data_input_datasets_ = [convert(Tensor{Any}, x) for x = data_input_datasets_] + tf.add_input(desc, selector_input_dataset_) + tf.add_input(desc, data_input_datasets_) + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + if N !== nothing + desc["N"] = Base.Int(N) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function experimental_directed_interleave_dataset_eager(selector_input_dataset_, data_input_datasets_; name=nothing, output_types=nothing, output_shapes=nothing, N=nothing) desc = tf.EagerOp("ExperimentalDirectedInterleaveDataset") selector_input_dataset_ = convert(tf.EagerTensor, selector_input_dataset_) @@ -20010,13 +20010,13 @@ begin return res[1] end end - function experimental_directed_interleave_dataset(selector_input_dataset_, data_input_datasets_; name=nothing, output_types=nothing, output_shapes=nothing, N=nothing) - if tf.in_eager_mode() - experimental_directed_interleave_dataset_eager(selector_input_dataset_, data_input_datasets_; name=name, output_types=output_types, output_shapes=output_shapes, N=N) - else - experimental_directed_interleave_dataset_graph(selector_input_dataset_, data_input_datasets_; name=name, output_types=output_types, output_shapes=output_shapes, N=N) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_directed_interleave_dataset(selector_input_dataset_, data_input_datasets_; name=nothing, output_types=nothing, output_shapes=nothing, N=nothing) + if tf.in_eager_mode() + experimental_directed_interleave_dataset_eager(selector_input_dataset_, data_input_datasets_; name=name, output_types=output_types, output_shapes=output_shapes, N=N) + else + experimental_directed_interleave_dataset_graph(selector_input_dataset_, data_input_datasets_; name=name, output_types=output_types, output_shapes=output_shapes, N=N) + end end - end end @@ -20026,16 +20026,16 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function real_graph(input_; name=nothing) - local desc - tf.with_op_name(name, "Real") do - desc = tf.NodeDescription("Real") - input_ = convert(Tensor{Complex{Float32}}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - end - tf.Tensor(tf.Operation(desc)) + function real_graph(input_; name=nothing) + local desc + tf.with_op_name(name, "Real") do + desc = tf.NodeDescription("Real") + input_ = convert(Tensor{Complex{Float32}}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) end + tf.Tensor(tf.Operation(desc)) + end function real_eager(input_; name=nothing) desc = tf.EagerOp("Real") input_ = convert(tf.EagerTensor, input_) @@ -20048,13 +20048,13 @@ begin return res[1] end end - function real(input_; name=nothing) - if tf.in_eager_mode() - real_eager(input_; name=name) - else - real_graph(input_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function real(input_; name=nothing) + if tf.in_eager_mode() + real_eager(input_; name=name) + else + real_graph(input_; name=name) + end end - end end @@ -20064,24 +20064,24 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function sparse_segment_sqrt_n_grad_graph(grad_, indices_, segment_ids_, output_dim0_; name=nothing) - local desc - tf.with_op_name(name, "SparseSegmentSqrtNGrad") do - desc = tf.NodeDescription("SparseSegmentSqrtNGrad") - grad_ = convert(Tensor{Any}, grad_) - indices_ = convert(Tensor{Int32}, indices_) - indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) - segment_ids_ = convert(Tensor{Int32}, segment_ids_) - output_dim0_ = convert(Tensor{Int32}, output_dim0_) - (grad_,) = tf.tf_promote(grad_) - (indices_,) = tf.tf_promote(indices_) - tf.add_input(desc, grad_) - tf.add_input(desc, indices_) - tf.add_input(desc, segment_ids_) - tf.add_input(desc, output_dim0_) - end - tf.Tensor(tf.Operation(desc)) + function sparse_segment_sqrt_n_grad_graph(grad_, indices_, segment_ids_, output_dim0_; name=nothing) + local desc + tf.with_op_name(name, "SparseSegmentSqrtNGrad") do + desc = tf.NodeDescription("SparseSegmentSqrtNGrad") + grad_ = convert(Tensor{Any}, grad_) + indices_ = convert(Tensor{Int32}, indices_) + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + segment_ids_ = convert(Tensor{Int32}, segment_ids_) + output_dim0_ = convert(Tensor{Int32}, output_dim0_) + (grad_,) = tf.tf_promote(grad_) + (indices_,) = tf.tf_promote(indices_) + tf.add_input(desc, grad_) + tf.add_input(desc, indices_) + tf.add_input(desc, segment_ids_) + tf.add_input(desc, output_dim0_) end + tf.Tensor(tf.Operation(desc)) + end function sparse_segment_sqrt_n_grad_eager(grad_, indices_, segment_ids_, output_dim0_; name=nothing) desc = tf.EagerOp("SparseSegmentSqrtNGrad") grad_ = convert(tf.EagerTensor, grad_) @@ -20101,13 +20101,13 @@ begin return res[1] end end - function sparse_segment_sqrt_n_grad(grad_, indices_, segment_ids_, output_dim0_; name=nothing) - if tf.in_eager_mode() - sparse_segment_sqrt_n_grad_eager(grad_, indices_, segment_ids_, output_dim0_; name=name) - else - sparse_segment_sqrt_n_grad_graph(grad_, indices_, segment_ids_, output_dim0_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_segment_sqrt_n_grad(grad_, indices_, segment_ids_, output_dim0_; name=nothing) + if tf.in_eager_mode() + sparse_segment_sqrt_n_grad_eager(grad_, indices_, segment_ids_, output_dim0_; name=name) + else + sparse_segment_sqrt_n_grad_graph(grad_, indices_, segment_ids_, output_dim0_; name=name) + end end - end end @@ -20117,17 +20117,17 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function rfft2d_graph(input_, fft_length_; name=nothing) - local desc - tf.with_op_name(name, "RFFT2D") do - desc = tf.NodeDescription("RFFT2D") - input_ = convert(Tensor{Float32}, input_) - fft_length_ = convert(Tensor{Int32}, fft_length_) - tf.add_input(desc, input_) - tf.add_input(desc, fft_length_) - end - tf.Tensor(tf.Operation(desc)) + function rfft2d_graph(input_, fft_length_; name=nothing) + local desc + tf.with_op_name(name, "RFFT2D") do + desc = tf.NodeDescription("RFFT2D") + input_ = convert(Tensor{Float32}, input_) + fft_length_ = convert(Tensor{Int32}, fft_length_) + tf.add_input(desc, input_) + tf.add_input(desc, fft_length_) end + tf.Tensor(tf.Operation(desc)) + end function rfft2d_eager(input_, fft_length_; name=nothing) desc = tf.EagerOp("RFFT2D") input_ = convert(tf.EagerTensor, input_) @@ -20141,13 +20141,13 @@ begin return res[1] end end - function rfft2d(input_, fft_length_; name=nothing) - if tf.in_eager_mode() - rfft2d_eager(input_, fft_length_; name=name) - else - rfft2d_graph(input_, fft_length_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function rfft2d(input_, fft_length_; name=nothing) + if tf.in_eager_mode() + rfft2d_eager(input_, fft_length_; name=name) + else + rfft2d_graph(input_, fft_length_; name=name) + end end - end end @@ -20157,15 +20157,15 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function var_is_initialized_op_graph(resource_; name=nothing) - local desc - tf.with_op_name(name, "VarIsInitializedOp") do - desc = tf.NodeDescription("VarIsInitializedOp") - resource_ = convert(Tensor{Any}, resource_) - tf.add_input(desc, resource_) - end - tf.Tensor(tf.Operation(desc)) + function var_is_initialized_op_graph(resource_; name=nothing) + local desc + tf.with_op_name(name, "VarIsInitializedOp") do + desc = tf.NodeDescription("VarIsInitializedOp") + resource_ = convert(Tensor{Any}, resource_) + tf.add_input(desc, resource_) end + tf.Tensor(tf.Operation(desc)) + end function var_is_initialized_op_eager(resource_; name=nothing) desc = tf.EagerOp("VarIsInitializedOp") resource_ = convert(tf.EagerTensor, resource_) @@ -20177,13 +20177,13 @@ begin return res[1] end end - function var_is_initialized_op(resource_; name=nothing) - if tf.in_eager_mode() - var_is_initialized_op_eager(resource_; name=name) - else - var_is_initialized_op_graph(resource_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function var_is_initialized_op(resource_; name=nothing) + if tf.in_eager_mode() + var_is_initialized_op_eager(resource_; name=name) + else + var_is_initialized_op_graph(resource_; name=name) + end end - end end @@ -20193,19 +20193,19 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function boosted_trees_quantile_stream_resource_handle_op_graph(; name=nothing, container=nothing, shared_name=nothing) - local desc - tf.with_op_name(name, "BoostedTreesQuantileStreamResourceHandleOp") do - desc = tf.NodeDescription("BoostedTreesQuantileStreamResourceHandleOp") - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end + function boosted_trees_quantile_stream_resource_handle_op_graph(; name=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "BoostedTreesQuantileStreamResourceHandleOp") do + desc = tf.NodeDescription("BoostedTreesQuantileStreamResourceHandleOp") + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function boosted_trees_quantile_stream_resource_handle_op_eager(; name=nothing, container=nothing, shared_name=nothing) desc = tf.EagerOp("BoostedTreesQuantileStreamResourceHandleOp") if container !== nothing @@ -20221,13 +20221,13 @@ begin return res[1] end end - function boosted_trees_quantile_stream_resource_handle_op(; name=nothing, container=nothing, shared_name=nothing) - if tf.in_eager_mode() - boosted_trees_quantile_stream_resource_handle_op_eager(; name=name, container=container, shared_name=shared_name) - else - boosted_trees_quantile_stream_resource_handle_op_graph(; name=name, container=container, shared_name=shared_name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function boosted_trees_quantile_stream_resource_handle_op(; name=nothing, container=nothing, shared_name=nothing) + if tf.in_eager_mode() + boosted_trees_quantile_stream_resource_handle_op_eager(; name=name, container=container, shared_name=shared_name) + else + boosted_trees_quantile_stream_resource_handle_op_graph(; name=name, container=container, shared_name=shared_name) + end end - end end @@ -20237,18 +20237,18 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function atan2_graph(y_, x_; name=nothing) - local desc - tf.with_op_name(name, "Atan2") do - desc = tf.NodeDescription("Atan2") - y_ = convert(Tensor{Any}, y_) - x_ = convert(Tensor{Any}, x_) - (y_, x_) = tf.tf_promote(y_, x_) - tf.add_input(desc, y_) - tf.add_input(desc, x_) - end - tf.Tensor(tf.Operation(desc)) + function atan2_graph(y_, x_; name=nothing) + local desc + tf.with_op_name(name, "Atan2") do + desc = tf.NodeDescription("Atan2") + y_ = convert(Tensor{Any}, y_) + x_ = convert(Tensor{Any}, x_) + (y_, x_) = tf.tf_promote(y_, x_) + tf.add_input(desc, y_) + tf.add_input(desc, x_) end + tf.Tensor(tf.Operation(desc)) + end function atan2_eager(y_, x_; name=nothing) desc = tf.EagerOp("Atan2") y_ = convert(tf.EagerTensor, y_) @@ -20264,13 +20264,13 @@ begin return res[1] end end - function atan2(y_, x_; name=nothing) - if tf.in_eager_mode() - atan2_eager(y_, x_; name=name) - else - atan2_graph(y_, x_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function atan2(y_, x_; name=nothing) + if tf.in_eager_mode() + atan2_eager(y_, x_; name=name) + else + atan2_graph(y_, x_; name=name) + end end - end end @@ -20280,31 +20280,31 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function random_poisson_graph(shape_, rate_; name=nothing, seed=nothing, seed2=nothing, S=nothing, dtype=nothing) - local desc - tf.with_op_name(name, "RandomPoisson") do - desc = tf.NodeDescription("RandomPoisson") - shape_ = convert(Tensor{Any}, shape_) - rate_ = convert(Tensor{Any}, rate_) - (rate_,) = tf.tf_promote(rate_) - (shape_,) = tf.tf_promote(shape_) - tf.add_input(desc, shape_) - tf.add_input(desc, rate_) - if seed !== nothing - desc["seed"] = Base.Int(seed) - end - if seed2 !== nothing - desc["seed2"] = Base.Int(seed2) - end - if S !== nothing - desc["S"] = Base.identity(S) - end - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end - end - tf.Tensor(tf.Operation(desc)) + function random_poisson_graph(shape_, rate_; name=nothing, seed=nothing, seed2=nothing, S=nothing, dtype=nothing) + local desc + tf.with_op_name(name, "RandomPoisson") do + desc = tf.NodeDescription("RandomPoisson") + shape_ = convert(Tensor{Any}, shape_) + rate_ = convert(Tensor{Any}, rate_) + (rate_,) = tf.tf_promote(rate_) + (shape_,) = tf.tf_promote(shape_) + tf.add_input(desc, shape_) + tf.add_input(desc, rate_) + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end + if S !== nothing + desc["S"] = Base.identity(S) + end + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end end + tf.Tensor(tf.Operation(desc)) + end function random_poisson_eager(shape_, rate_; name=nothing, seed=nothing, seed2=nothing, S=nothing, dtype=nothing) desc = tf.EagerOp("RandomPoisson") shape_ = convert(tf.EagerTensor, shape_) @@ -20332,13 +20332,13 @@ begin return res[1] end end - function random_poisson(shape_, rate_; name=nothing, seed=nothing, seed2=nothing, S=nothing, dtype=nothing) - if tf.in_eager_mode() - random_poisson_eager(shape_, rate_; name=name, seed=seed, seed2=seed2, S=S, dtype=dtype) - else - random_poisson_graph(shape_, rate_; name=name, seed=seed, seed2=seed2, S=S, dtype=dtype) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function random_poisson(shape_, rate_; name=nothing, seed=nothing, seed2=nothing, S=nothing, dtype=nothing) + if tf.in_eager_mode() + random_poisson_eager(shape_, rate_; name=name, seed=seed, seed2=seed2, S=S, dtype=dtype) + else + random_poisson_graph(shape_, rate_; name=name, seed=seed, seed2=seed2, S=S, dtype=dtype) + end end - end end @@ -20348,25 +20348,25 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function reverse_sequence_graph(input_, seq_lengths_; name=nothing, seq_dim=nothing, batch_dim=nothing) - local desc - tf.with_op_name(name, "ReverseSequence") do - desc = tf.NodeDescription("ReverseSequence") - input_ = convert(Tensor{Any}, input_) - seq_lengths_ = convert(Tensor{Int64}, seq_lengths_) - (input_,) = tf.tf_promote(input_) - (seq_lengths_,) = tf.tf_promote(seq_lengths_) - tf.add_input(desc, input_) - tf.add_input(desc, seq_lengths_) - if seq_dim !== nothing - desc["seq_dim"] = Base.Int(seq_dim) - end - if batch_dim !== nothing - desc["batch_dim"] = Base.Int(batch_dim) - end + function reverse_sequence_graph(input_, seq_lengths_; name=nothing, seq_dim=nothing, batch_dim=nothing) + local desc + tf.with_op_name(name, "ReverseSequence") do + desc = tf.NodeDescription("ReverseSequence") + input_ = convert(Tensor{Any}, input_) + seq_lengths_ = convert(Tensor{Int64}, seq_lengths_) + (input_,) = tf.tf_promote(input_) + (seq_lengths_,) = tf.tf_promote(seq_lengths_) + tf.add_input(desc, input_) + tf.add_input(desc, seq_lengths_) + if seq_dim !== nothing + desc["seq_dim"] = Base.Int(seq_dim) + end + if batch_dim !== nothing + desc["batch_dim"] = Base.Int(batch_dim) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function reverse_sequence_eager(input_, seq_lengths_; name=nothing, seq_dim=nothing, batch_dim=nothing) desc = tf.EagerOp("ReverseSequence") input_ = convert(tf.EagerTensor, input_) @@ -20388,13 +20388,13 @@ begin return res[1] end end - function reverse_sequence(input_, seq_lengths_; name=nothing, seq_dim=nothing, batch_dim=nothing) - if tf.in_eager_mode() - reverse_sequence_eager(input_, seq_lengths_; name=name, seq_dim=seq_dim, batch_dim=batch_dim) - else - reverse_sequence_graph(input_, seq_lengths_; name=name, seq_dim=seq_dim, batch_dim=batch_dim) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function reverse_sequence(input_, seq_lengths_; name=nothing, seq_dim=nothing, batch_dim=nothing) + if tf.in_eager_mode() + reverse_sequence_eager(input_, seq_lengths_; name=name, seq_dim=seq_dim, batch_dim=batch_dim) + else + reverse_sequence_graph(input_, seq_lengths_; name=name, seq_dim=seq_dim, batch_dim=batch_dim) + end end - end end @@ -20404,19 +20404,19 @@ end An op which emits a single Tensor value from an XLA computation. """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function outfeed_enqueue_graph(input_; name=nothing, dtype=nothing) - local desc - tf.with_op_name(name, "OutfeedEnqueue") do - desc = tf.NodeDescription("OutfeedEnqueue") - input_ = convert(Tensor{Any}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end + function outfeed_enqueue_graph(input_; name=nothing, dtype=nothing) + local desc + tf.with_op_name(name, "OutfeedEnqueue") do + desc = tf.NodeDescription("OutfeedEnqueue") + input_ = convert(Tensor{Any}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function outfeed_enqueue_eager(input_; name=nothing, dtype=nothing) desc = tf.EagerOp("OutfeedEnqueue") input_ = convert(tf.EagerTensor, input_) @@ -20432,13 +20432,13 @@ begin return res[1] end end - function outfeed_enqueue(input_; name=nothing, dtype=nothing) - if tf.in_eager_mode() - outfeed_enqueue_eager(input_; name=name, dtype=dtype) - else - outfeed_enqueue_graph(input_; name=name, dtype=dtype) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function outfeed_enqueue(input_; name=nothing, dtype=nothing) + if tf.in_eager_mode() + outfeed_enqueue_eager(input_; name=name, dtype=dtype) + else + outfeed_enqueue_graph(input_; name=name, dtype=dtype) + end end - end end @@ -20448,18 +20448,18 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function sub_graph(x_, y_; name=nothing) - local desc - tf.with_op_name(name, "Sub") do - desc = tf.NodeDescription("Sub") - x_ = convert(Tensor{Any}, x_) - y_ = convert(Tensor{Any}, y_) - (x_, y_) = tf.tf_promote(x_, y_) - tf.add_input(desc, x_) - tf.add_input(desc, y_) - end - tf.Tensor(tf.Operation(desc)) + function sub_graph(x_, y_; name=nothing) + local desc + tf.with_op_name(name, "Sub") do + desc = tf.NodeDescription("Sub") + x_ = convert(Tensor{Any}, x_) + y_ = convert(Tensor{Any}, y_) + (x_, y_) = tf.tf_promote(x_, y_) + tf.add_input(desc, x_) + tf.add_input(desc, y_) end + tf.Tensor(tf.Operation(desc)) + end function sub_eager(x_, y_; name=nothing) desc = tf.EagerOp("Sub") x_ = convert(tf.EagerTensor, x_) @@ -20475,13 +20475,13 @@ begin return res[1] end end - function sub(x_, y_; name=nothing) - if tf.in_eager_mode() - sub_eager(x_, y_; name=name) - else - sub_graph(x_, y_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sub(x_, y_; name=nothing) + if tf.in_eager_mode() + sub_eager(x_, y_; name=name) + else + sub_graph(x_, y_; name=name) + end end - end end @@ -20491,25 +20491,25 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function string_split_graph(input_, delimiter_; name=nothing, skip_empty=nothing) - local desc - tf.with_op_name(name, "StringSplit") do - desc = tf.NodeDescription("StringSplit") - input_ = convert(Tensor{String}, input_) - delimiter_ = convert(Tensor{String}, delimiter_) - tf.add_input(desc, input_) - tf.add_input(desc, delimiter_) - if skip_empty !== nothing - desc["skip_empty"] = Base.Bool(skip_empty) - end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:3 - push!(out, tf.Tensor(op, out_idx)) + function string_split_graph(input_, delimiter_; name=nothing, skip_empty=nothing) + local desc + tf.with_op_name(name, "StringSplit") do + desc = tf.NodeDescription("StringSplit") + input_ = convert(Tensor{String}, input_) + delimiter_ = convert(Tensor{String}, delimiter_) + tf.add_input(desc, input_) + tf.add_input(desc, delimiter_) + if skip_empty !== nothing + desc["skip_empty"] = Base.Bool(skip_empty) end - out end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end function string_split_eager(input_, delimiter_; name=nothing, skip_empty=nothing) desc = tf.EagerOp("StringSplit") input_ = convert(tf.EagerTensor, input_) @@ -20526,13 +20526,13 @@ begin return res end end - function string_split(input_, delimiter_; name=nothing, skip_empty=nothing) - if tf.in_eager_mode() - string_split_eager(input_, delimiter_; name=name, skip_empty=skip_empty) - else - string_split_graph(input_, delimiter_; name=name, skip_empty=skip_empty) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function string_split(input_, delimiter_; name=nothing, skip_empty=nothing) + if tf.in_eager_mode() + string_split_eager(input_, delimiter_; name=name, skip_empty=skip_empty) + else + string_split_graph(input_, delimiter_; name=name, skip_empty=skip_empty) + end end - end end @@ -20542,26 +20542,26 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function cumprod_graph(x_, axis_; name=nothing, exclusive=nothing, reverse=nothing) - local desc - tf.with_op_name(name, "Cumprod") do - desc = tf.NodeDescription("Cumprod") - x_ = convert(Tensor{Any}, x_) - axis_ = convert(Tensor{Int32}, axis_) - axis_ = axis_ - convert(tf.Tensor{eltype(axis_)}, 1) - (x_,) = tf.tf_promote(x_) - (axis_,) = tf.tf_promote(axis_) - tf.add_input(desc, x_) - tf.add_input(desc, axis_) - if exclusive !== nothing - desc["exclusive"] = Base.Bool(exclusive) - end - if reverse !== nothing - desc["reverse"] = Base.Bool(reverse) - end + function cumprod_graph(x_, axis_; name=nothing, exclusive=nothing, reverse=nothing) + local desc + tf.with_op_name(name, "Cumprod") do + desc = tf.NodeDescription("Cumprod") + x_ = convert(Tensor{Any}, x_) + axis_ = convert(Tensor{Int32}, axis_) + axis_ = axis_ - convert(tf.Tensor{eltype(axis_)}, 1) + (x_,) = tf.tf_promote(x_) + (axis_,) = tf.tf_promote(axis_) + tf.add_input(desc, x_) + tf.add_input(desc, axis_) + if exclusive !== nothing + desc["exclusive"] = Base.Bool(exclusive) + end + if reverse !== nothing + desc["reverse"] = Base.Bool(reverse) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function cumprod_eager(x_, axis_; name=nothing, exclusive=nothing, reverse=nothing) desc = tf.EagerOp("Cumprod") x_ = convert(tf.EagerTensor, x_) @@ -20583,13 +20583,13 @@ begin return res[1] end end - function cumprod(x_, axis_; name=nothing, exclusive=nothing, reverse=nothing) - if tf.in_eager_mode() - cumprod_eager(x_, axis_; name=name, exclusive=exclusive, reverse=reverse) - else - cumprod_graph(x_, axis_; name=name, exclusive=exclusive, reverse=reverse) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function cumprod(x_, axis_; name=nothing, exclusive=nothing, reverse=nothing) + if tf.in_eager_mode() + cumprod_eager(x_, axis_; name=name, exclusive=exclusive, reverse=reverse) + else + cumprod_graph(x_, axis_; name=name, exclusive=exclusive, reverse=reverse) + end end - end end @@ -20599,30 +20599,30 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function quantized_resize_bilinear_graph(images_, size_, min_, max_; name=nothing, align_corners=nothing) - local desc - tf.with_op_name(name, "QuantizedResizeBilinear") do - desc = tf.NodeDescription("QuantizedResizeBilinear") - images_ = convert(Tensor{Any}, images_) - size_ = convert(Tensor{Int32}, size_) - min_ = convert(Tensor{Float32}, min_) - max_ = convert(Tensor{Float32}, max_) - (images_,) = tf.tf_promote(images_) - tf.add_input(desc, images_) - tf.add_input(desc, size_) - tf.add_input(desc, min_) - tf.add_input(desc, max_) - if align_corners !== nothing - desc["align_corners"] = Base.Bool(align_corners) - end + function quantized_resize_bilinear_graph(images_, size_, min_, max_; name=nothing, align_corners=nothing) + local desc + tf.with_op_name(name, "QuantizedResizeBilinear") do + desc = tf.NodeDescription("QuantizedResizeBilinear") + images_ = convert(Tensor{Any}, images_) + size_ = convert(Tensor{Int32}, size_) + min_ = convert(Tensor{Float32}, min_) + max_ = convert(Tensor{Float32}, max_) + (images_,) = tf.tf_promote(images_) + tf.add_input(desc, images_) + tf.add_input(desc, size_) + tf.add_input(desc, min_) + tf.add_input(desc, max_) + if align_corners !== nothing + desc["align_corners"] = Base.Bool(align_corners) end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:3 - push!(out, tf.Tensor(op, out_idx)) - end - out end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end function quantized_resize_bilinear_eager(images_, size_, min_, max_; name=nothing, align_corners=nothing) desc = tf.EagerOp("QuantizedResizeBilinear") images_ = convert(tf.EagerTensor, images_) @@ -20644,13 +20644,13 @@ begin return res end end - function quantized_resize_bilinear(images_, size_, min_, max_; name=nothing, align_corners=nothing) - if tf.in_eager_mode() - quantized_resize_bilinear_eager(images_, size_, min_, max_; name=name, align_corners=align_corners) - else - quantized_resize_bilinear_graph(images_, size_, min_, max_; name=name, align_corners=align_corners) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function quantized_resize_bilinear(images_, size_, min_, max_; name=nothing, align_corners=nothing) + if tf.in_eager_mode() + quantized_resize_bilinear_eager(images_, size_, min_, max_; name=name, align_corners=align_corners) + else + quantized_resize_bilinear_graph(images_, size_, min_, max_; name=name, align_corners=align_corners) + end end - end end @@ -20660,40 +20660,40 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function parse_single_example_graph(serialized_, dense_defaults_; name=nothing, num_sparse=nothing, sparse_keys=nothing, dense_keys=nothing, sparse_types=nothing, Tdense=nothing, dense_shapes=nothing) - local desc - tf.with_op_name(name, "ParseSingleExample") do - desc = tf.NodeDescription("ParseSingleExample") - serialized_ = convert(Tensor{String}, serialized_) - dense_defaults_ = [convert(Tensor{Any}, x) for x = dense_defaults_] - tf.add_input(desc, serialized_) - tf.add_input(desc, dense_defaults_) - if num_sparse !== nothing - desc["num_sparse"] = Base.Int(num_sparse) - end - if sparse_keys !== nothing - desc["sparse_keys"] = map(Base.identity, sparse_keys) - end - if dense_keys !== nothing - desc["dense_keys"] = map(Base.identity, dense_keys) - end - if sparse_types !== nothing - desc["sparse_types"] = map(Base.identity, sparse_types) - end - if Tdense !== nothing - desc["Tdense"] = map(Base.identity, Tdense) - end - if dense_shapes !== nothing - desc["dense_shapes"] = map(Base.identity, dense_shapes) - end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:4 - push!(out, tf.Tensor(op, out_idx)) - end - out + function parse_single_example_graph(serialized_, dense_defaults_; name=nothing, num_sparse=nothing, sparse_keys=nothing, dense_keys=nothing, sparse_types=nothing, Tdense=nothing, dense_shapes=nothing) + local desc + tf.with_op_name(name, "ParseSingleExample") do + desc = tf.NodeDescription("ParseSingleExample") + serialized_ = convert(Tensor{String}, serialized_) + dense_defaults_ = [convert(Tensor{Any}, x) for x = dense_defaults_] + tf.add_input(desc, serialized_) + tf.add_input(desc, dense_defaults_) + if num_sparse !== nothing + desc["num_sparse"] = Base.Int(num_sparse) + end + if sparse_keys !== nothing + desc["sparse_keys"] = map(Base.identity, sparse_keys) + end + if dense_keys !== nothing + desc["dense_keys"] = map(Base.identity, dense_keys) + end + if sparse_types !== nothing + desc["sparse_types"] = map(Base.identity, sparse_types) + end + if Tdense !== nothing + desc["Tdense"] = map(Base.identity, Tdense) + end + if dense_shapes !== nothing + desc["dense_shapes"] = map(Base.identity, dense_shapes) + end end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:4 + push!(out, tf.Tensor(op, out_idx)) + end + out + end function parse_single_example_eager(serialized_, dense_defaults_; name=nothing, num_sparse=nothing, sparse_keys=nothing, dense_keys=nothing, sparse_types=nothing, Tdense=nothing, dense_shapes=nothing) desc = tf.EagerOp("ParseSingleExample") serialized_ = convert(tf.EagerTensor, serialized_) @@ -20725,13 +20725,13 @@ begin return res end end - function parse_single_example(serialized_, dense_defaults_; name=nothing, num_sparse=nothing, sparse_keys=nothing, dense_keys=nothing, sparse_types=nothing, Tdense=nothing, dense_shapes=nothing) - if tf.in_eager_mode() - parse_single_example_eager(serialized_, dense_defaults_; name=name, num_sparse=num_sparse, sparse_keys=sparse_keys, dense_keys=dense_keys, sparse_types=sparse_types, Tdense=Tdense, dense_shapes=dense_shapes) - else - parse_single_example_graph(serialized_, dense_defaults_; name=name, num_sparse=num_sparse, sparse_keys=sparse_keys, dense_keys=dense_keys, sparse_types=sparse_types, Tdense=Tdense, dense_shapes=dense_shapes) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function parse_single_example(serialized_, dense_defaults_; name=nothing, num_sparse=nothing, sparse_keys=nothing, dense_keys=nothing, sparse_types=nothing, Tdense=nothing, dense_shapes=nothing) + if tf.in_eager_mode() + parse_single_example_eager(serialized_, dense_defaults_; name=name, num_sparse=num_sparse, sparse_keys=sparse_keys, dense_keys=dense_keys, sparse_types=sparse_types, Tdense=Tdense, dense_shapes=dense_shapes) + else + parse_single_example_graph(serialized_, dense_defaults_; name=name, num_sparse=num_sparse, sparse_keys=sparse_keys, dense_keys=dense_keys, sparse_types=sparse_types, Tdense=Tdense, dense_shapes=dense_shapes) + end end - end end @@ -20741,19 +20741,19 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function is_variable_initialized_graph(ref_; name=nothing, dtype=nothing) - local desc - tf.with_op_name(name, "IsVariableInitialized") do - desc = tf.NodeDescription("IsVariableInitialized") - ref_ = convert(Tensor{Any}, ref_) - (ref_,) = tf.tf_promote(ref_) - tf.add_input(desc, ref_) - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end + function is_variable_initialized_graph(ref_; name=nothing, dtype=nothing) + local desc + tf.with_op_name(name, "IsVariableInitialized") do + desc = tf.NodeDescription("IsVariableInitialized") + ref_ = convert(Tensor{Any}, ref_) + (ref_,) = tf.tf_promote(ref_) + tf.add_input(desc, ref_) + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function is_variable_initialized_eager(ref_; name=nothing, dtype=nothing) desc = tf.EagerOp("IsVariableInitialized") ref_ = convert(tf.EagerTensor, ref_) @@ -20769,13 +20769,13 @@ begin return res[1] end end - function is_variable_initialized(ref_; name=nothing, dtype=nothing) - if tf.in_eager_mode() - is_variable_initialized_eager(ref_; name=name, dtype=dtype) - else - is_variable_initialized_graph(ref_; name=name, dtype=dtype) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function is_variable_initialized(ref_; name=nothing, dtype=nothing) + if tf.in_eager_mode() + is_variable_initialized_eager(ref_; name=name, dtype=dtype) + else + is_variable_initialized_graph(ref_; name=name, dtype=dtype) + end end - end end @@ -20785,25 +20785,25 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function resource_scatter_sub_graph(resource_, indices_, updates_; name=nothing, dtype=nothing) - local desc - tf.with_op_name(name, "ResourceScatterSub") do - desc = tf.NodeDescription("ResourceScatterSub") - resource_ = convert(Tensor{Any}, resource_) - indices_ = convert(Tensor{Any}, indices_) - indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) - updates_ = convert(Tensor{Any}, updates_) - (updates_,) = tf.tf_promote(updates_) - (indices_,) = tf.tf_promote(indices_) - tf.add_input(desc, resource_) - tf.add_input(desc, indices_) - tf.add_input(desc, updates_) - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end + function resource_scatter_sub_graph(resource_, indices_, updates_; name=nothing, dtype=nothing) + local desc + tf.with_op_name(name, "ResourceScatterSub") do + desc = tf.NodeDescription("ResourceScatterSub") + resource_ = convert(Tensor{Any}, resource_) + indices_ = convert(Tensor{Any}, indices_) + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + updates_ = convert(Tensor{Any}, updates_) + (updates_,) = tf.tf_promote(updates_) + (indices_,) = tf.tf_promote(indices_) + tf.add_input(desc, resource_) + tf.add_input(desc, indices_) + tf.add_input(desc, updates_) + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function resource_scatter_sub_eager(resource_, indices_, updates_; name=nothing, dtype=nothing) desc = tf.EagerOp("ResourceScatterSub") resource_ = convert(tf.EagerTensor, resource_) @@ -20824,13 +20824,13 @@ begin return res[1] end end - function resource_scatter_sub(resource_, indices_, updates_; name=nothing, dtype=nothing) - if tf.in_eager_mode() - resource_scatter_sub_eager(resource_, indices_, updates_; name=name, dtype=dtype) - else - resource_scatter_sub_graph(resource_, indices_, updates_; name=name, dtype=dtype) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_scatter_sub(resource_, indices_, updates_; name=nothing, dtype=nothing) + if tf.in_eager_mode() + resource_scatter_sub_eager(resource_, indices_, updates_; name=name, dtype=dtype) + else + resource_scatter_sub_graph(resource_, indices_, updates_; name=name, dtype=dtype) + end end - end end @@ -20840,19 +20840,19 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function experimental_stats_aggregator_handle_graph(; name=nothing, container=nothing, shared_name=nothing) - local desc - tf.with_op_name(name, "ExperimentalStatsAggregatorHandle") do - desc = tf.NodeDescription("ExperimentalStatsAggregatorHandle") - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end + function experimental_stats_aggregator_handle_graph(; name=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "ExperimentalStatsAggregatorHandle") do + desc = tf.NodeDescription("ExperimentalStatsAggregatorHandle") + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function experimental_stats_aggregator_handle_eager(; name=nothing, container=nothing, shared_name=nothing) desc = tf.EagerOp("ExperimentalStatsAggregatorHandle") if container !== nothing @@ -20868,64 +20868,64 @@ begin return res[1] end end - function experimental_stats_aggregator_handle(; name=nothing, container=nothing, shared_name=nothing) - if tf.in_eager_mode() - experimental_stats_aggregator_handle_eager(; name=name, container=container, shared_name=shared_name) - else - experimental_stats_aggregator_handle_graph(; name=name, container=container, shared_name=shared_name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_stats_aggregator_handle(; name=nothing, container=nothing, shared_name=nothing) + if tf.in_eager_mode() + experimental_stats_aggregator_handle_eager(; name=name, container=container, shared_name=shared_name) + else + experimental_stats_aggregator_handle_graph(; name=name, container=container, shared_name=shared_name) + end end - end end """ - cudnn_rnnv2(input, input_h, input_c, params; rnn_mode=lstm, input_mode=linear_input, direction=unidirectional, dropout=?, seed=0, seed2=0, is_training=true) + cudnn_rnnv2(input, input_h, input_c, params; rnn_mode=, input_mode=, direction=, dropout=?, seed=0, seed2=0, is_training=true) """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function cudnn_rnnv2_graph(input_, input_h_, input_c_, params_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing, is_training=nothing) - local desc - tf.with_op_name(name, "CudnnRNNV2") do - desc = tf.NodeDescription("CudnnRNNV2") - input_ = convert(Tensor{Any}, input_) - input_h_ = convert(Tensor{Any}, input_h_) - input_c_ = convert(Tensor{Any}, input_c_) - params_ = convert(Tensor{Any}, params_) - (input_, input_h_, input_c_, params_) = tf.tf_promote(input_, input_h_, input_c_, params_) - tf.add_input(desc, input_) - tf.add_input(desc, input_h_) - tf.add_input(desc, input_c_) - tf.add_input(desc, params_) - if rnn_mode !== nothing - desc["rnn_mode"] = Base.String(rnn_mode) - end - if input_mode !== nothing - desc["input_mode"] = Base.String(input_mode) - end - if direction !== nothing - desc["direction"] = Base.String(direction) - end - if dropout !== nothing - desc["dropout"] = Base.identity(dropout) - end - if seed !== nothing - desc["seed"] = Base.Int(seed) - end - if seed2 !== nothing - desc["seed2"] = Base.Int(seed2) - end - if is_training !== nothing - desc["is_training"] = Base.Bool(is_training) - end + function cudnn_rnnv2_graph(input_, input_h_, input_c_, params_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing, is_training=nothing) + local desc + tf.with_op_name(name, "CudnnRNNV2") do + desc = tf.NodeDescription("CudnnRNNV2") + input_ = convert(Tensor{Any}, input_) + input_h_ = convert(Tensor{Any}, input_h_) + input_c_ = convert(Tensor{Any}, input_c_) + params_ = convert(Tensor{Any}, params_) + (input_, input_h_, input_c_, params_) = tf.tf_promote(input_, input_h_, input_c_, params_) + tf.add_input(desc, input_) + tf.add_input(desc, input_h_) + tf.add_input(desc, input_c_) + tf.add_input(desc, params_) + if rnn_mode !== nothing + desc["rnn_mode"] = Base.String(rnn_mode) + end + if input_mode !== nothing + desc["input_mode"] = Base.String(input_mode) + end + if direction !== nothing + desc["direction"] = Base.String(direction) end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:5 - push!(out, tf.Tensor(op, out_idx)) + if dropout !== nothing + desc["dropout"] = Base.identity(dropout) end - out + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end + if is_training !== nothing + desc["is_training"] = Base.Bool(is_training) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:5 + push!(out, tf.Tensor(op, out_idx)) end + out + end function cudnn_rnnv2_eager(input_, input_h_, input_c_, params_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing, is_training=nothing) desc = tf.EagerOp("CudnnRNNV2") input_ = convert(tf.EagerTensor, input_) @@ -20968,13 +20968,13 @@ begin return res end end - function cudnn_rnnv2(input_, input_h_, input_c_, params_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing, is_training=nothing) - if tf.in_eager_mode() - cudnn_rnnv2_eager(input_, input_h_, input_c_, params_; name=name, rnn_mode=rnn_mode, input_mode=input_mode, direction=direction, dropout=dropout, seed=seed, seed2=seed2, is_training=is_training) - else - cudnn_rnnv2_graph(input_, input_h_, input_c_, params_; name=name, rnn_mode=rnn_mode, input_mode=input_mode, direction=direction, dropout=dropout, seed=seed, seed2=seed2, is_training=is_training) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function cudnn_rnnv2(input_, input_h_, input_c_, params_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing, is_training=nothing) + if tf.in_eager_mode() + cudnn_rnnv2_eager(input_, input_h_, input_c_, params_; name=name, rnn_mode=rnn_mode, input_mode=input_mode, direction=direction, dropout=dropout, seed=seed, seed2=seed2, is_training=is_training) + else + cudnn_rnnv2_graph(input_, input_h_, input_c_, params_; name=name, rnn_mode=rnn_mode, input_mode=input_mode, direction=direction, dropout=dropout, seed=seed, seed2=seed2, is_training=is_training) + end end - end end @@ -20984,21 +20984,21 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function assign_add_graph(ref_, value_; name=nothing, use_locking=nothing) - local desc - tf.with_op_name(name, "AssignAdd") do - desc = tf.NodeDescription("AssignAdd") - ref_ = convert(Tensor{Any}, ref_) - value_ = convert(Tensor{Any}, value_) - (ref_, value_) = tf.tf_promote(ref_, value_) - tf.add_input(desc, ref_) - tf.add_input(desc, value_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end + function assign_add_graph(ref_, value_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "AssignAdd") do + desc = tf.NodeDescription("AssignAdd") + ref_ = convert(Tensor{Any}, ref_) + value_ = convert(Tensor{Any}, value_) + (ref_, value_) = tf.tf_promote(ref_, value_) + tf.add_input(desc, ref_) + tf.add_input(desc, value_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function assign_add_eager(ref_, value_; name=nothing, use_locking=nothing) desc = tf.EagerOp("AssignAdd") ref_ = convert(tf.EagerTensor, ref_) @@ -21017,13 +21017,13 @@ begin return res[1] end end - function assign_add(ref_, value_; name=nothing, use_locking=nothing) - if tf.in_eager_mode() - assign_add_eager(ref_, value_; name=name, use_locking=use_locking) - else - assign_add_graph(ref_, value_; name=name, use_locking=use_locking) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function assign_add(ref_, value_; name=nothing, use_locking=nothing) + if tf.in_eager_mode() + assign_add_eager(ref_, value_; name=name, use_locking=use_locking) + else + assign_add_graph(ref_, value_; name=name, use_locking=use_locking) + end end - end end @@ -21033,21 +21033,21 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tensor_dataset_graph(components_; name=nothing, Toutput_types=nothing, output_shapes=nothing) - local desc - tf.with_op_name(name, "TensorDataset") do - desc = tf.NodeDescription("TensorDataset") - components_ = [convert(Tensor{Any}, x) for x = components_] - tf.add_input(desc, components_) - if Toutput_types !== nothing - desc["Toutput_types"] = map(Base.identity, Toutput_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end + function tensor_dataset_graph(components_; name=nothing, Toutput_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "TensorDataset") do + desc = tf.NodeDescription("TensorDataset") + components_ = [convert(Tensor{Any}, x) for x = components_] + tf.add_input(desc, components_) + if Toutput_types !== nothing + desc["Toutput_types"] = map(Base.identity, Toutput_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function tensor_dataset_eager(components_; name=nothing, Toutput_types=nothing, output_shapes=nothing) desc = tf.EagerOp("TensorDataset") components_ = convert(tf.EagerTensor, components_) @@ -21065,13 +21065,13 @@ begin return res[1] end end - function tensor_dataset(components_; name=nothing, Toutput_types=nothing, output_shapes=nothing) - if tf.in_eager_mode() - tensor_dataset_eager(components_; name=name, Toutput_types=Toutput_types, output_shapes=output_shapes) - else - tensor_dataset_graph(components_; name=name, Toutput_types=Toutput_types, output_shapes=output_shapes) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_dataset(components_; name=nothing, Toutput_types=nothing, output_shapes=nothing) + if tf.in_eager_mode() + tensor_dataset_eager(components_; name=name, Toutput_types=Toutput_types, output_shapes=output_shapes) + else + tensor_dataset_graph(components_; name=name, Toutput_types=Toutput_types, output_shapes=output_shapes) + end end - end end @@ -21081,19 +21081,19 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function bucketize_graph(input_; name=nothing, boundaries=nothing) - local desc - tf.with_op_name(name, "Bucketize") do - desc = tf.NodeDescription("Bucketize") - input_ = convert(Tensor{Any}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - if boundaries !== nothing - desc["boundaries"] = map(Base.identity, boundaries) - end + function bucketize_graph(input_; name=nothing, boundaries=nothing) + local desc + tf.with_op_name(name, "Bucketize") do + desc = tf.NodeDescription("Bucketize") + input_ = convert(Tensor{Any}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + if boundaries !== nothing + desc["boundaries"] = map(Base.identity, boundaries) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function bucketize_eager(input_; name=nothing, boundaries=nothing) desc = tf.EagerOp("Bucketize") input_ = convert(tf.EagerTensor, input_) @@ -21109,13 +21109,13 @@ begin return res[1] end end - function bucketize(input_; name=nothing, boundaries=nothing) - if tf.in_eager_mode() - bucketize_eager(input_; name=name, boundaries=boundaries) - else - bucketize_graph(input_; name=name, boundaries=boundaries) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function bucketize(input_; name=nothing, boundaries=nothing) + if tf.in_eager_mode() + bucketize_eager(input_; name=name, boundaries=boundaries) + else + bucketize_graph(input_; name=name, boundaries=boundaries) + end end - end end @@ -21125,25 +21125,25 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function sparse_reduce_max_graph(input_indices_, input_values_, input_shape_, reduction_axes_; name=nothing, keep_dims=nothing) - local desc - tf.with_op_name(name, "SparseReduceMax") do - desc = tf.NodeDescription("SparseReduceMax") - input_indices_ = convert(Tensor{Int64}, input_indices_) - input_values_ = convert(Tensor{Any}, input_values_) - input_shape_ = convert(Tensor{Int64}, input_shape_) - reduction_axes_ = convert(Tensor{Int32}, reduction_axes_) - (input_values_,) = tf.tf_promote(input_values_) - tf.add_input(desc, input_indices_) - tf.add_input(desc, input_values_) - tf.add_input(desc, input_shape_) - tf.add_input(desc, reduction_axes_) - if keep_dims !== nothing - desc["keep_dims"] = Base.Bool(keep_dims) - end + function sparse_reduce_max_graph(input_indices_, input_values_, input_shape_, reduction_axes_; name=nothing, keep_dims=nothing) + local desc + tf.with_op_name(name, "SparseReduceMax") do + desc = tf.NodeDescription("SparseReduceMax") + input_indices_ = convert(Tensor{Int64}, input_indices_) + input_values_ = convert(Tensor{Any}, input_values_) + input_shape_ = convert(Tensor{Int64}, input_shape_) + reduction_axes_ = convert(Tensor{Int32}, reduction_axes_) + (input_values_,) = tf.tf_promote(input_values_) + tf.add_input(desc, input_indices_) + tf.add_input(desc, input_values_) + tf.add_input(desc, input_shape_) + tf.add_input(desc, reduction_axes_) + if keep_dims !== nothing + desc["keep_dims"] = Base.Bool(keep_dims) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function sparse_reduce_max_eager(input_indices_, input_values_, input_shape_, reduction_axes_; name=nothing, keep_dims=nothing) desc = tf.EagerOp("SparseReduceMax") input_indices_ = convert(tf.EagerTensor, input_indices_) @@ -21165,13 +21165,13 @@ begin return res[1] end end - function sparse_reduce_max(input_indices_, input_values_, input_shape_, reduction_axes_; name=nothing, keep_dims=nothing) - if tf.in_eager_mode() - sparse_reduce_max_eager(input_indices_, input_values_, input_shape_, reduction_axes_; name=name, keep_dims=keep_dims) - else - sparse_reduce_max_graph(input_indices_, input_values_, input_shape_, reduction_axes_; name=name, keep_dims=keep_dims) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_reduce_max(input_indices_, input_values_, input_shape_, reduction_axes_; name=nothing, keep_dims=nothing) + if tf.in_eager_mode() + sparse_reduce_max_eager(input_indices_, input_values_, input_shape_, reduction_axes_; name=name, keep_dims=keep_dims) + else + sparse_reduce_max_graph(input_indices_, input_values_, input_shape_, reduction_axes_; name=name, keep_dims=keep_dims) + end end - end end @@ -21181,27 +21181,27 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tensor_array_grad_with_shape_graph(handle_, flow_in_, shape_to_prepend_; name=nothing, source=nothing) - local desc - tf.with_op_name(name, "TensorArrayGradWithShape") do - desc = tf.NodeDescription("TensorArrayGradWithShape") - handle_ = convert(Tensor{Any}, handle_) - flow_in_ = convert(Tensor{Float32}, flow_in_) - shape_to_prepend_ = convert(Tensor{Int32}, shape_to_prepend_) - tf.add_input(desc, handle_) - tf.add_input(desc, flow_in_) - tf.add_input(desc, shape_to_prepend_) - if source !== nothing - desc["source"] = Base.String(source) - end + function tensor_array_grad_with_shape_graph(handle_, flow_in_, shape_to_prepend_; name=nothing, source=nothing) + local desc + tf.with_op_name(name, "TensorArrayGradWithShape") do + desc = tf.NodeDescription("TensorArrayGradWithShape") + handle_ = convert(Tensor{Any}, handle_) + flow_in_ = convert(Tensor{Float32}, flow_in_) + shape_to_prepend_ = convert(Tensor{Int32}, shape_to_prepend_) + tf.add_input(desc, handle_) + tf.add_input(desc, flow_in_) + tf.add_input(desc, shape_to_prepend_) + if source !== nothing + desc["source"] = Base.String(source) end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:2 - push!(out, tf.Tensor(op, out_idx)) - end - out end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end function tensor_array_grad_with_shape_eager(handle_, flow_in_, shape_to_prepend_; name=nothing, source=nothing) desc = tf.EagerOp("TensorArrayGradWithShape") handle_ = convert(tf.EagerTensor, handle_) @@ -21220,13 +21220,13 @@ begin return res end end - function tensor_array_grad_with_shape(handle_, flow_in_, shape_to_prepend_; name=nothing, source=nothing) - if tf.in_eager_mode() - tensor_array_grad_with_shape_eager(handle_, flow_in_, shape_to_prepend_; name=name, source=source) - else - tensor_array_grad_with_shape_graph(handle_, flow_in_, shape_to_prepend_; name=name, source=source) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_array_grad_with_shape(handle_, flow_in_, shape_to_prepend_; name=nothing, source=nothing) + if tf.in_eager_mode() + tensor_array_grad_with_shape_eager(handle_, flow_in_, shape_to_prepend_; name=name, source=source) + else + tensor_array_grad_with_shape_graph(handle_, flow_in_, shape_to_prepend_; name=name, source=source) + end end - end end @@ -21236,30 +21236,30 @@ end Retrieve embedding parameters for a single table. """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function retrieve_tpu_embedding_mdl_adagrad_light_parameters_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - local desc - tf.with_op_name(name, "RetrieveTPUEmbeddingMDLAdagradLightParameters") do - desc = tf.NodeDescription("RetrieveTPUEmbeddingMDLAdagradLightParameters") - if table_id !== nothing - desc["table_id"] = Base.Int(table_id) - end - if table_name !== nothing - desc["table_name"] = Base.String(table_name) - end - if num_shards !== nothing - desc["num_shards"] = Base.Int(num_shards) - end - if shard_id !== nothing - desc["shard_id"] = Base.Int(shard_id) - end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:4 - push!(out, tf.Tensor(op, out_idx)) - end - out + function retrieve_tpu_embedding_mdl_adagrad_light_parameters_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + local desc + tf.with_op_name(name, "RetrieveTPUEmbeddingMDLAdagradLightParameters") do + desc = tf.NodeDescription("RetrieveTPUEmbeddingMDLAdagradLightParameters") + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:4 + push!(out, tf.Tensor(op, out_idx)) end + out + end function retrieve_tpu_embedding_mdl_adagrad_light_parameters_eager(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) desc = tf.EagerOp("RetrieveTPUEmbeddingMDLAdagradLightParameters") if table_id !== nothing @@ -21281,13 +21281,13 @@ begin return res end end - function retrieve_tpu_embedding_mdl_adagrad_light_parameters(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - if tf.in_eager_mode() - retrieve_tpu_embedding_mdl_adagrad_light_parameters_eager(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) - else - retrieve_tpu_embedding_mdl_adagrad_light_parameters_graph(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function retrieve_tpu_embedding_mdl_adagrad_light_parameters(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + if tf.in_eager_mode() + retrieve_tpu_embedding_mdl_adagrad_light_parameters_eager(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + else + retrieve_tpu_embedding_mdl_adagrad_light_parameters_graph(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + end end - end end @@ -21297,15 +21297,15 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tensor_array_close_v3_graph(handle_; name=nothing) - local desc - tf.with_op_name(name, "TensorArrayCloseV3") do - desc = tf.NodeDescription("TensorArrayCloseV3") - handle_ = convert(Tensor{Any}, handle_) - tf.add_input(desc, handle_) - end - tf.Tensor(tf.Operation(desc)) + function tensor_array_close_v3_graph(handle_; name=nothing) + local desc + tf.with_op_name(name, "TensorArrayCloseV3") do + desc = tf.NodeDescription("TensorArrayCloseV3") + handle_ = convert(Tensor{Any}, handle_) + tf.add_input(desc, handle_) end + tf.Tensor(tf.Operation(desc)) + end function tensor_array_close_v3_eager(handle_; name=nothing) desc = tf.EagerOp("TensorArrayCloseV3") handle_ = convert(tf.EagerTensor, handle_) @@ -21317,13 +21317,13 @@ begin return res[1] end end - function tensor_array_close_v3(handle_; name=nothing) - if tf.in_eager_mode() - tensor_array_close_v3_eager(handle_; name=name) - else - tensor_array_close_v3_graph(handle_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_array_close_v3(handle_; name=nothing) + if tf.in_eager_mode() + tensor_array_close_v3_eager(handle_; name=name) + else + tensor_array_close_v3_graph(handle_; name=name) + end end - end end @@ -21333,23 +21333,23 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function non_max_suppression_with_overlaps_graph(overlaps_, scores_, max_output_size_, overlap_threshold_, score_threshold_; name=nothing) - local desc - tf.with_op_name(name, "NonMaxSuppressionWithOverlaps") do - desc = tf.NodeDescription("NonMaxSuppressionWithOverlaps") - overlaps_ = convert(Tensor{Float32}, overlaps_) - scores_ = convert(Tensor{Float32}, scores_) - max_output_size_ = convert(Tensor{Int32}, max_output_size_) - overlap_threshold_ = convert(Tensor{Float32}, overlap_threshold_) - score_threshold_ = convert(Tensor{Float32}, score_threshold_) - tf.add_input(desc, overlaps_) - tf.add_input(desc, scores_) - tf.add_input(desc, max_output_size_) - tf.add_input(desc, overlap_threshold_) - tf.add_input(desc, score_threshold_) - end - tf.Tensor(tf.Operation(desc)) + function non_max_suppression_with_overlaps_graph(overlaps_, scores_, max_output_size_, overlap_threshold_, score_threshold_; name=nothing) + local desc + tf.with_op_name(name, "NonMaxSuppressionWithOverlaps") do + desc = tf.NodeDescription("NonMaxSuppressionWithOverlaps") + overlaps_ = convert(Tensor{Float32}, overlaps_) + scores_ = convert(Tensor{Float32}, scores_) + max_output_size_ = convert(Tensor{Int32}, max_output_size_) + overlap_threshold_ = convert(Tensor{Float32}, overlap_threshold_) + score_threshold_ = convert(Tensor{Float32}, score_threshold_) + tf.add_input(desc, overlaps_) + tf.add_input(desc, scores_) + tf.add_input(desc, max_output_size_) + tf.add_input(desc, overlap_threshold_) + tf.add_input(desc, score_threshold_) end + tf.Tensor(tf.Operation(desc)) + end function non_max_suppression_with_overlaps_eager(overlaps_, scores_, max_output_size_, overlap_threshold_, score_threshold_; name=nothing) desc = tf.EagerOp("NonMaxSuppressionWithOverlaps") overlaps_ = convert(tf.EagerTensor, overlaps_) @@ -21369,13 +21369,13 @@ begin return res[1] end end - function non_max_suppression_with_overlaps(overlaps_, scores_, max_output_size_, overlap_threshold_, score_threshold_; name=nothing) - if tf.in_eager_mode() - non_max_suppression_with_overlaps_eager(overlaps_, scores_, max_output_size_, overlap_threshold_, score_threshold_; name=name) - else - non_max_suppression_with_overlaps_graph(overlaps_, scores_, max_output_size_, overlap_threshold_, score_threshold_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function non_max_suppression_with_overlaps(overlaps_, scores_, max_output_size_, overlap_threshold_, score_threshold_; name=nothing) + if tf.in_eager_mode() + non_max_suppression_with_overlaps_eager(overlaps_, scores_, max_output_size_, overlap_threshold_, score_threshold_; name=name) + else + non_max_suppression_with_overlaps_graph(overlaps_, scores_, max_output_size_, overlap_threshold_, score_threshold_; name=name) + end end - end end @@ -21385,25 +21385,25 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function pack_graph(values_; name=nothing, N=nothing, axis=nothing) - local desc - tf.with_op_name(name, "Pack") do - desc = tf.NodeDescription("Pack") - values_ = [convert(Tensor{Any}, x) for x = values_] - (values_,) = tf.tf_promote(values_) - tf.add_input(desc, values_) - if N !== nothing - desc["N"] = Base.Int(N) - end - if axis !== nothing - axis = Base.Int(axis) - 1 - end - if axis !== nothing - desc["axis"] = Base.Int(axis) - end + function pack_graph(values_; name=nothing, N=nothing, axis=nothing) + local desc + tf.with_op_name(name, "Pack") do + desc = tf.NodeDescription("Pack") + values_ = [convert(Tensor{Any}, x) for x = values_] + (values_,) = tf.tf_promote(values_) + tf.add_input(desc, values_) + if N !== nothing + desc["N"] = Base.Int(N) + end + if axis !== nothing + axis = Base.Int(axis) - 1 + end + if axis !== nothing + desc["axis"] = Base.Int(axis) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function pack_eager(values_; name=nothing, N=nothing, axis=nothing) desc = tf.EagerOp("Pack") values_ = convert(tf.EagerTensor, values_) @@ -21425,13 +21425,13 @@ begin return res[1] end end - function pack(values_; name=nothing, N=nothing, axis=nothing) - if tf.in_eager_mode() - pack_eager(values_; name=name, N=N, axis=axis) - else - pack_graph(values_; name=name, N=N, axis=axis) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function pack(values_; name=nothing, N=nothing, axis=nothing) + if tf.in_eager_mode() + pack_eager(values_; name=name, N=N, axis=axis) + else + pack_graph(values_; name=name, N=N, axis=axis) + end end - end end @@ -21441,20 +21441,20 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tensor_array_grad_v2_graph(handle_, flow_in_; name=nothing, source=nothing) - local desc - tf.with_op_name(name, "TensorArrayGradV2") do - desc = tf.NodeDescription("TensorArrayGradV2") - handle_ = convert(Tensor{String}, handle_) - flow_in_ = convert(Tensor{Float32}, flow_in_) - tf.add_input(desc, handle_) - tf.add_input(desc, flow_in_) - if source !== nothing - desc["source"] = Base.String(source) - end + function tensor_array_grad_v2_graph(handle_, flow_in_; name=nothing, source=nothing) + local desc + tf.with_op_name(name, "TensorArrayGradV2") do + desc = tf.NodeDescription("TensorArrayGradV2") + handle_ = convert(Tensor{String}, handle_) + flow_in_ = convert(Tensor{Float32}, flow_in_) + tf.add_input(desc, handle_) + tf.add_input(desc, flow_in_) + if source !== nothing + desc["source"] = Base.String(source) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function tensor_array_grad_v2_eager(handle_, flow_in_; name=nothing, source=nothing) desc = tf.EagerOp("TensorArrayGradV2") handle_ = convert(tf.EagerTensor, handle_) @@ -21471,13 +21471,13 @@ begin return res[1] end end - function tensor_array_grad_v2(handle_, flow_in_; name=nothing, source=nothing) - if tf.in_eager_mode() - tensor_array_grad_v2_eager(handle_, flow_in_; name=name, source=source) - else - tensor_array_grad_v2_graph(handle_, flow_in_; name=name, source=source) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_array_grad_v2(handle_, flow_in_; name=nothing, source=nothing) + if tf.in_eager_mode() + tensor_array_grad_v2_eager(handle_, flow_in_; name=name, source=source) + else + tensor_array_grad_v2_graph(handle_, flow_in_; name=name, source=source) + end end - end end @@ -21487,21 +21487,21 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function assign_sub_variable_op_graph(resource_, value_; name=nothing, dtype=nothing) - local desc - tf.with_op_name(name, "AssignSubVariableOp") do - desc = tf.NodeDescription("AssignSubVariableOp") - resource_ = convert(Tensor{Any}, resource_) - value_ = convert(Tensor{Any}, value_) - (value_,) = tf.tf_promote(value_) - tf.add_input(desc, resource_) - tf.add_input(desc, value_) - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end + function assign_sub_variable_op_graph(resource_, value_; name=nothing, dtype=nothing) + local desc + tf.with_op_name(name, "AssignSubVariableOp") do + desc = tf.NodeDescription("AssignSubVariableOp") + resource_ = convert(Tensor{Any}, resource_) + value_ = convert(Tensor{Any}, value_) + (value_,) = tf.tf_promote(value_) + tf.add_input(desc, resource_) + tf.add_input(desc, value_) + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function assign_sub_variable_op_eager(resource_, value_; name=nothing, dtype=nothing) desc = tf.EagerOp("AssignSubVariableOp") resource_ = convert(tf.EagerTensor, resource_) @@ -21519,13 +21519,13 @@ begin return res[1] end end - function assign_sub_variable_op(resource_, value_; name=nothing, dtype=nothing) - if tf.in_eager_mode() - assign_sub_variable_op_eager(resource_, value_; name=name, dtype=dtype) - else - assign_sub_variable_op_graph(resource_, value_; name=name, dtype=dtype) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function assign_sub_variable_op(resource_, value_; name=nothing, dtype=nothing) + if tf.in_eager_mode() + assign_sub_variable_op_eager(resource_, value_; name=name, dtype=dtype) + else + assign_sub_variable_op_graph(resource_, value_; name=name, dtype=dtype) + end end - end end @@ -21535,15 +21535,15 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function batch_fft2d_graph(input_; name=nothing) - local desc - tf.with_op_name(name, "BatchFFT2D") do - desc = tf.NodeDescription("BatchFFT2D") - input_ = convert(Tensor{Complex{Float32}}, input_) - tf.add_input(desc, input_) - end - tf.Tensor(tf.Operation(desc)) + function batch_fft2d_graph(input_; name=nothing) + local desc + tf.with_op_name(name, "BatchFFT2D") do + desc = tf.NodeDescription("BatchFFT2D") + input_ = convert(Tensor{Complex{Float32}}, input_) + tf.add_input(desc, input_) end + tf.Tensor(tf.Operation(desc)) + end function batch_fft2d_eager(input_; name=nothing) desc = tf.EagerOp("BatchFFT2D") input_ = convert(tf.EagerTensor, input_) @@ -21555,13 +21555,13 @@ begin return res[1] end end - function batch_fft2d(input_; name=nothing) - if tf.in_eager_mode() - batch_fft2d_eager(input_; name=name) - else - batch_fft2d_graph(input_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function batch_fft2d(input_; name=nothing) + if tf.in_eager_mode() + batch_fft2d_eager(input_; name=name) + else + batch_fft2d_graph(input_; name=name) + end end - end end @@ -21571,15 +21571,15 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function close_summary_writer_graph(writer_; name=nothing) - local desc - tf.with_op_name(name, "CloseSummaryWriter") do - desc = tf.NodeDescription("CloseSummaryWriter") - writer_ = convert(Tensor{Any}, writer_) - tf.add_input(desc, writer_) - end - tf.Tensor(tf.Operation(desc)) + function close_summary_writer_graph(writer_; name=nothing) + local desc + tf.with_op_name(name, "CloseSummaryWriter") do + desc = tf.NodeDescription("CloseSummaryWriter") + writer_ = convert(Tensor{Any}, writer_) + tf.add_input(desc, writer_) end + tf.Tensor(tf.Operation(desc)) + end function close_summary_writer_eager(writer_; name=nothing) desc = tf.EagerOp("CloseSummaryWriter") writer_ = convert(tf.EagerTensor, writer_) @@ -21591,13 +21591,13 @@ begin return res[1] end end - function close_summary_writer(writer_; name=nothing) - if tf.in_eager_mode() - close_summary_writer_eager(writer_; name=name) - else - close_summary_writer_graph(writer_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function close_summary_writer(writer_; name=nothing) + if tf.in_eager_mode() + close_summary_writer_eager(writer_; name=name) + else + close_summary_writer_graph(writer_; name=name) + end end - end end @@ -21607,16 +21607,16 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function rank_graph(input_; name=nothing) - local desc - tf.with_op_name(name, "Rank") do - desc = tf.NodeDescription("Rank") - input_ = convert(Tensor{Any}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - end - tf.Tensor(tf.Operation(desc)) + function rank_graph(input_; name=nothing) + local desc + tf.with_op_name(name, "Rank") do + desc = tf.NodeDescription("Rank") + input_ = convert(Tensor{Any}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) end + tf.Tensor(tf.Operation(desc)) + end function rank_eager(input_; name=nothing) desc = tf.EagerOp("Rank") input_ = convert(tf.EagerTensor, input_) @@ -21629,13 +21629,13 @@ begin return res[1] end end - function rank(input_; name=nothing) - if tf.in_eager_mode() - rank_eager(input_; name=name) - else - rank_graph(input_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function rank(input_; name=nothing) + if tf.in_eager_mode() + rank_eager(input_; name=name) + else + rank_graph(input_; name=name) + end end - end end @@ -21645,16 +21645,16 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function fft3d_graph(input_; name=nothing) - local desc - tf.with_op_name(name, "FFT3D") do - desc = tf.NodeDescription("FFT3D") - input_ = convert(Tensor{Complex{Float32}}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - end - tf.Tensor(tf.Operation(desc)) + function fft3d_graph(input_; name=nothing) + local desc + tf.with_op_name(name, "FFT3D") do + desc = tf.NodeDescription("FFT3D") + input_ = convert(Tensor{Complex{Float32}}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) end + tf.Tensor(tf.Operation(desc)) + end function fft3d_eager(input_; name=nothing) desc = tf.EagerOp("FFT3D") input_ = convert(tf.EagerTensor, input_) @@ -21667,13 +21667,13 @@ begin return res[1] end end - function fft3d(input_; name=nothing) - if tf.in_eager_mode() - fft3d_eager(input_; name=name) - else - fft3d_graph(input_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function fft3d(input_; name=nothing) + if tf.in_eager_mode() + fft3d_eager(input_; name=name) + else + fft3d_graph(input_; name=name) + end end - end end @@ -21683,33 +21683,33 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function apply_ftrl_graph(var_, accum_, linear_, grad_, lr_, l1_, l2_, lr_power_; name=nothing, use_locking=nothing) - local desc - tf.with_op_name(name, "ApplyFtrl") do - desc = tf.NodeDescription("ApplyFtrl") - var_ = convert(Tensor{Any}, var_) - accum_ = convert(Tensor{Any}, accum_) - linear_ = convert(Tensor{Any}, linear_) - grad_ = convert(Tensor{Any}, grad_) - lr_ = convert(Tensor{Any}, lr_) - l1_ = convert(Tensor{Any}, l1_) - l2_ = convert(Tensor{Any}, l2_) - lr_power_ = convert(Tensor{Any}, lr_power_) - (var_, accum_, linear_, grad_, lr_, l1_, l2_, lr_power_) = tf.tf_promote(var_, accum_, linear_, grad_, lr_, l1_, l2_, lr_power_) - tf.add_input(desc, var_) - tf.add_input(desc, accum_) - tf.add_input(desc, linear_) - tf.add_input(desc, grad_) - tf.add_input(desc, lr_) - tf.add_input(desc, l1_) - tf.add_input(desc, l2_) - tf.add_input(desc, lr_power_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - end - tf.Tensor(tf.Operation(desc)) + function apply_ftrl_graph(var_, accum_, linear_, grad_, lr_, l1_, l2_, lr_power_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "ApplyFtrl") do + desc = tf.NodeDescription("ApplyFtrl") + var_ = convert(Tensor{Any}, var_) + accum_ = convert(Tensor{Any}, accum_) + linear_ = convert(Tensor{Any}, linear_) + grad_ = convert(Tensor{Any}, grad_) + lr_ = convert(Tensor{Any}, lr_) + l1_ = convert(Tensor{Any}, l1_) + l2_ = convert(Tensor{Any}, l2_) + lr_power_ = convert(Tensor{Any}, lr_power_) + (var_, accum_, linear_, grad_, lr_, l1_, l2_, lr_power_) = tf.tf_promote(var_, accum_, linear_, grad_, lr_, l1_, l2_, lr_power_) + tf.add_input(desc, var_) + tf.add_input(desc, accum_) + tf.add_input(desc, linear_) + tf.add_input(desc, grad_) + tf.add_input(desc, lr_) + tf.add_input(desc, l1_) + tf.add_input(desc, l2_) + tf.add_input(desc, lr_power_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end end + tf.Tensor(tf.Operation(desc)) + end function apply_ftrl_eager(var_, accum_, linear_, grad_, lr_, l1_, l2_, lr_power_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ApplyFtrl") var_ = convert(tf.EagerTensor, var_) @@ -21746,13 +21746,13 @@ begin return res[1] end end - function apply_ftrl(var_, accum_, linear_, grad_, lr_, l1_, l2_, lr_power_; name=nothing, use_locking=nothing) - if tf.in_eager_mode() - apply_ftrl_eager(var_, accum_, linear_, grad_, lr_, l1_, l2_, lr_power_; name=name, use_locking=use_locking) - else - apply_ftrl_graph(var_, accum_, linear_, grad_, lr_, l1_, l2_, lr_power_; name=name, use_locking=use_locking) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function apply_ftrl(var_, accum_, linear_, grad_, lr_, l1_, l2_, lr_power_; name=nothing, use_locking=nothing) + if tf.in_eager_mode() + apply_ftrl_eager(var_, accum_, linear_, grad_, lr_, l1_, l2_, lr_power_; name=name, use_locking=use_locking) + else + apply_ftrl_graph(var_, accum_, linear_, grad_, lr_, l1_, l2_, lr_power_; name=name, use_locking=use_locking) + end end - end end @@ -21762,19 +21762,19 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function abort_graph(; name=nothing, error_msg=nothing, exit_without_error=nothing) - local desc - tf.with_op_name(name, "Abort") do - desc = tf.NodeDescription("Abort") - if error_msg !== nothing - desc["error_msg"] = Base.String(error_msg) - end - if exit_without_error !== nothing - desc["exit_without_error"] = Base.Bool(exit_without_error) - end + function abort_graph(; name=nothing, error_msg=nothing, exit_without_error=nothing) + local desc + tf.with_op_name(name, "Abort") do + desc = tf.NodeDescription("Abort") + if error_msg !== nothing + desc["error_msg"] = Base.String(error_msg) + end + if exit_without_error !== nothing + desc["exit_without_error"] = Base.Bool(exit_without_error) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function abort_eager(; name=nothing, error_msg=nothing, exit_without_error=nothing) desc = tf.EagerOp("Abort") if error_msg !== nothing @@ -21790,13 +21790,13 @@ begin return res[1] end end - function abort(; name=nothing, error_msg=nothing, exit_without_error=nothing) - if tf.in_eager_mode() - abort_eager(; name=name, error_msg=error_msg, exit_without_error=exit_without_error) - else - abort_graph(; name=name, error_msg=error_msg, exit_without_error=exit_without_error) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function abort(; name=nothing, error_msg=nothing, exit_without_error=nothing) + if tf.in_eager_mode() + abort_eager(; name=name, error_msg=error_msg, exit_without_error=exit_without_error) + else + abort_graph(; name=name, error_msg=error_msg, exit_without_error=exit_without_error) + end end - end end @@ -21806,24 +21806,24 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function audio_spectrogram_graph(input_; name=nothing, window_size=nothing, stride=nothing, magnitude_squared=nothing) - local desc - tf.with_op_name(name, "AudioSpectrogram") do - desc = tf.NodeDescription("AudioSpectrogram") - input_ = convert(Tensor{Float32}, input_) - tf.add_input(desc, input_) - if window_size !== nothing - desc["window_size"] = Base.Int(window_size) - end - if stride !== nothing - desc["stride"] = Base.Int(stride) - end - if magnitude_squared !== nothing - desc["magnitude_squared"] = Base.Bool(magnitude_squared) - end + function audio_spectrogram_graph(input_; name=nothing, window_size=nothing, stride=nothing, magnitude_squared=nothing) + local desc + tf.with_op_name(name, "AudioSpectrogram") do + desc = tf.NodeDescription("AudioSpectrogram") + input_ = convert(Tensor{Float32}, input_) + tf.add_input(desc, input_) + if window_size !== nothing + desc["window_size"] = Base.Int(window_size) + end + if stride !== nothing + desc["stride"] = Base.Int(stride) + end + if magnitude_squared !== nothing + desc["magnitude_squared"] = Base.Bool(magnitude_squared) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function audio_spectrogram_eager(input_; name=nothing, window_size=nothing, stride=nothing, magnitude_squared=nothing) desc = tf.EagerOp("AudioSpectrogram") input_ = convert(tf.EagerTensor, input_) @@ -21844,13 +21844,13 @@ begin return res[1] end end - function audio_spectrogram(input_; name=nothing, window_size=nothing, stride=nothing, magnitude_squared=nothing) - if tf.in_eager_mode() - audio_spectrogram_eager(input_; name=name, window_size=window_size, stride=stride, magnitude_squared=magnitude_squared) - else - audio_spectrogram_graph(input_; name=name, window_size=window_size, stride=stride, magnitude_squared=magnitude_squared) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function audio_spectrogram(input_; name=nothing, window_size=nothing, stride=nothing, magnitude_squared=nothing) + if tf.in_eager_mode() + audio_spectrogram_eager(input_; name=name, window_size=window_size, stride=stride, magnitude_squared=magnitude_squared) + else + audio_spectrogram_graph(input_; name=name, window_size=window_size, stride=stride, magnitude_squared=magnitude_squared) + end end - end end @@ -21860,18 +21860,18 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function variable_shape_graph(input_; name=nothing, out_type=nothing) - local desc - tf.with_op_name(name, "VariableShape") do - desc = tf.NodeDescription("VariableShape") - input_ = convert(Tensor{Any}, input_) - tf.add_input(desc, input_) - if out_type !== nothing - desc["out_type"] = Base.identity(out_type) - end + function variable_shape_graph(input_; name=nothing, out_type=nothing) + local desc + tf.with_op_name(name, "VariableShape") do + desc = tf.NodeDescription("VariableShape") + input_ = convert(Tensor{Any}, input_) + tf.add_input(desc, input_) + if out_type !== nothing + desc["out_type"] = Base.identity(out_type) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function variable_shape_eager(input_; name=nothing, out_type=nothing) desc = tf.EagerOp("VariableShape") input_ = convert(tf.EagerTensor, input_) @@ -21886,13 +21886,13 @@ begin return res[1] end end - function variable_shape(input_; name=nothing, out_type=nothing) - if tf.in_eager_mode() - variable_shape_eager(input_; name=name, out_type=out_type) - else - variable_shape_graph(input_; name=name, out_type=out_type) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function variable_shape(input_; name=nothing, out_type=nothing) + if tf.in_eager_mode() + variable_shape_eager(input_; name=name, out_type=out_type) + else + variable_shape_graph(input_; name=name, out_type=out_type) + end end - end end @@ -21902,28 +21902,28 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function fifo_queue_v2_graph(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) - local desc - tf.with_op_name(name, "FIFOQueueV2") do - desc = tf.NodeDescription("FIFOQueueV2") - if component_types !== nothing - desc["component_types"] = map(Base.identity, component_types) - end - if shapes !== nothing - desc["shapes"] = map(Base.identity, shapes) - end - if capacity !== nothing - desc["capacity"] = Base.Int(capacity) - end - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end + function fifo_queue_v2_graph(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "FIFOQueueV2") do + desc = tf.NodeDescription("FIFOQueueV2") + if component_types !== nothing + desc["component_types"] = map(Base.identity, component_types) + end + if shapes !== nothing + desc["shapes"] = map(Base.identity, shapes) + end + if capacity !== nothing + desc["capacity"] = Base.Int(capacity) + end + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function fifo_queue_v2_eager(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) desc = tf.EagerOp("FIFOQueueV2") if component_types !== nothing @@ -21948,13 +21948,13 @@ begin return res[1] end end - function fifo_queue_v2(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) - if tf.in_eager_mode() - fifo_queue_v2_eager(; name=name, component_types=component_types, shapes=shapes, capacity=capacity, container=container, shared_name=shared_name) - else - fifo_queue_v2_graph(; name=name, component_types=component_types, shapes=shapes, capacity=capacity, container=container, shared_name=shared_name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function fifo_queue_v2(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) + if tf.in_eager_mode() + fifo_queue_v2_eager(; name=name, component_types=component_types, shapes=shapes, capacity=capacity, container=container, shared_name=shared_name) + else + fifo_queue_v2_graph(; name=name, component_types=component_types, shapes=shapes, capacity=capacity, container=container, shared_name=shared_name) + end end - end end @@ -21964,25 +21964,25 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function variable_graph(; name=nothing, shape=nothing, dtype=nothing, container=nothing, shared_name=nothing) - local desc - tf.with_op_name(name, "Variable") do - desc = tf.NodeDescription("Variable") - if shape !== nothing - desc["shape"] = Base.identity(shape) - end - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end + function variable_graph(; name=nothing, shape=nothing, dtype=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "Variable") do + desc = tf.NodeDescription("Variable") + if shape !== nothing + desc["shape"] = Base.identity(shape) + end + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function variable_eager(; name=nothing, shape=nothing, dtype=nothing, container=nothing, shared_name=nothing) desc = tf.EagerOp("Variable") if shape !== nothing @@ -22004,13 +22004,13 @@ begin return res[1] end end - function variable(; name=nothing, shape=nothing, dtype=nothing, container=nothing, shared_name=nothing) - if tf.in_eager_mode() - variable_eager(; name=name, shape=shape, dtype=dtype, container=container, shared_name=shared_name) - else - variable_graph(; name=name, shape=shape, dtype=dtype, container=container, shared_name=shared_name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function variable(; name=nothing, shape=nothing, dtype=nothing, container=nothing, shared_name=nothing) + if tf.in_eager_mode() + variable_eager(; name=name, shape=shape, dtype=dtype, container=container, shared_name=shared_name) + else + variable_graph(; name=name, shape=shape, dtype=dtype, container=container, shared_name=shared_name) + end end - end end @@ -22020,17 +22020,17 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tensor_forest_create_tree_variable_graph(tree_handle_, tree_config_; name=nothing) - local desc - tf.with_op_name(name, "TensorForestCreateTreeVariable") do - desc = tf.NodeDescription("TensorForestCreateTreeVariable") - tree_handle_ = convert(Tensor{Any}, tree_handle_) - tree_config_ = convert(Tensor{String}, tree_config_) - tf.add_input(desc, tree_handle_) - tf.add_input(desc, tree_config_) - end - tf.Tensor(tf.Operation(desc)) + function tensor_forest_create_tree_variable_graph(tree_handle_, tree_config_; name=nothing) + local desc + tf.with_op_name(name, "TensorForestCreateTreeVariable") do + desc = tf.NodeDescription("TensorForestCreateTreeVariable") + tree_handle_ = convert(Tensor{Any}, tree_handle_) + tree_config_ = convert(Tensor{String}, tree_config_) + tf.add_input(desc, tree_handle_) + tf.add_input(desc, tree_config_) end + tf.Tensor(tf.Operation(desc)) + end function tensor_forest_create_tree_variable_eager(tree_handle_, tree_config_; name=nothing) desc = tf.EagerOp("TensorForestCreateTreeVariable") tree_handle_ = convert(tf.EagerTensor, tree_handle_) @@ -22044,13 +22044,13 @@ begin return res[1] end end - function tensor_forest_create_tree_variable(tree_handle_, tree_config_; name=nothing) - if tf.in_eager_mode() - tensor_forest_create_tree_variable_eager(tree_handle_, tree_config_; name=name) - else - tensor_forest_create_tree_variable_graph(tree_handle_, tree_config_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_forest_create_tree_variable(tree_handle_, tree_config_; name=nothing) + if tf.in_eager_mode() + tensor_forest_create_tree_variable_eager(tree_handle_, tree_config_; name=name) + else + tensor_forest_create_tree_variable_graph(tree_handle_, tree_config_; name=name) + end end - end end @@ -22060,30 +22060,30 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function max_pool_grad_with_argmax_graph(input_, grad_, argmax_; name=nothing, ksize=nothing, strides=nothing, padding=nothing) - local desc - tf.with_op_name(name, "MaxPoolGradWithArgmax") do - desc = tf.NodeDescription("MaxPoolGradWithArgmax") - input_ = convert(Tensor{Any}, input_) - grad_ = convert(Tensor{Any}, grad_) - argmax_ = convert(Tensor{Any}, argmax_) - (argmax_,) = tf.tf_promote(argmax_) - (input_, grad_) = tf.tf_promote(input_, grad_) - tf.add_input(desc, input_) - tf.add_input(desc, grad_) - tf.add_input(desc, argmax_) - if ksize !== nothing - desc["ksize"] = map(Base.identity, ksize) - end - if strides !== nothing - desc["strides"] = map(Base.identity, strides) - end - if padding !== nothing - desc["padding"] = Base.String(padding) - end - end - tf.Tensor(tf.Operation(desc)) + function max_pool_grad_with_argmax_graph(input_, grad_, argmax_; name=nothing, ksize=nothing, strides=nothing, padding=nothing) + local desc + tf.with_op_name(name, "MaxPoolGradWithArgmax") do + desc = tf.NodeDescription("MaxPoolGradWithArgmax") + input_ = convert(Tensor{Any}, input_) + grad_ = convert(Tensor{Any}, grad_) + argmax_ = convert(Tensor{Any}, argmax_) + (argmax_,) = tf.tf_promote(argmax_) + (input_, grad_) = tf.tf_promote(input_, grad_) + tf.add_input(desc, input_) + tf.add_input(desc, grad_) + tf.add_input(desc, argmax_) + if ksize !== nothing + desc["ksize"] = map(Base.identity, ksize) + end + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + if padding !== nothing + desc["padding"] = Base.String(padding) + end end + tf.Tensor(tf.Operation(desc)) + end function max_pool_grad_with_argmax_eager(input_, grad_, argmax_; name=nothing, ksize=nothing, strides=nothing, padding=nothing) desc = tf.EagerOp("MaxPoolGradWithArgmax") input_ = convert(tf.EagerTensor, input_) @@ -22111,13 +22111,13 @@ begin return res[1] end end - function max_pool_grad_with_argmax(input_, grad_, argmax_; name=nothing, ksize=nothing, strides=nothing, padding=nothing) - if tf.in_eager_mode() - max_pool_grad_with_argmax_eager(input_, grad_, argmax_; name=name, ksize=ksize, strides=strides, padding=padding) - else - max_pool_grad_with_argmax_graph(input_, grad_, argmax_; name=name, ksize=ksize, strides=strides, padding=padding) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function max_pool_grad_with_argmax(input_, grad_, argmax_; name=nothing, ksize=nothing, strides=nothing, padding=nothing) + if tf.in_eager_mode() + max_pool_grad_with_argmax_eager(input_, grad_, argmax_; name=name, ksize=ksize, strides=strides, padding=padding) + else + max_pool_grad_with_argmax_graph(input_, grad_, argmax_; name=name, ksize=ksize, strides=strides, padding=padding) + end end - end end @@ -22127,23 +22127,23 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function ref_switch_graph(data_, pred_; name=nothing) - local desc - tf.with_op_name(name, "RefSwitch") do - desc = tf.NodeDescription("RefSwitch") - data_ = convert(Tensor{Any}, data_) - pred_ = convert(Tensor{Bool}, pred_) - (data_,) = tf.tf_promote(data_) - tf.add_input(desc, data_) - tf.add_input(desc, pred_) - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:2 - push!(out, tf.Tensor(op, out_idx)) - end - out + function ref_switch_graph(data_, pred_; name=nothing) + local desc + tf.with_op_name(name, "RefSwitch") do + desc = tf.NodeDescription("RefSwitch") + data_ = convert(Tensor{Any}, data_) + pred_ = convert(Tensor{Bool}, pred_) + (data_,) = tf.tf_promote(data_) + tf.add_input(desc, data_) + tf.add_input(desc, pred_) end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end function ref_switch_eager(data_, pred_; name=nothing) desc = tf.EagerOp("RefSwitch") data_ = convert(tf.EagerTensor, data_) @@ -22158,13 +22158,13 @@ begin return res end end - function ref_switch(data_, pred_; name=nothing) - if tf.in_eager_mode() - ref_switch_eager(data_, pred_; name=name) - else - ref_switch_graph(data_, pred_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function ref_switch(data_, pred_; name=nothing) + if tf.in_eager_mode() + ref_switch_eager(data_, pred_; name=name) + else + ref_switch_graph(data_, pred_; name=name) + end end - end end @@ -22174,15 +22174,15 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function sdca_fprint_graph(input_; name=nothing) - local desc - tf.with_op_name(name, "SdcaFprint") do - desc = tf.NodeDescription("SdcaFprint") - input_ = convert(Tensor{String}, input_) - tf.add_input(desc, input_) - end - tf.Tensor(tf.Operation(desc)) + function sdca_fprint_graph(input_; name=nothing) + local desc + tf.with_op_name(name, "SdcaFprint") do + desc = tf.NodeDescription("SdcaFprint") + input_ = convert(Tensor{String}, input_) + tf.add_input(desc, input_) end + tf.Tensor(tf.Operation(desc)) + end function sdca_fprint_eager(input_; name=nothing) desc = tf.EagerOp("SdcaFprint") input_ = convert(tf.EagerTensor, input_) @@ -22194,13 +22194,13 @@ begin return res[1] end end - function sdca_fprint(input_; name=nothing) - if tf.in_eager_mode() - sdca_fprint_eager(input_; name=name) - else - sdca_fprint_graph(input_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sdca_fprint(input_; name=nothing) + if tf.in_eager_mode() + sdca_fprint_eager(input_; name=name) + else + sdca_fprint_graph(input_; name=name) + end end - end end @@ -22210,19 +22210,19 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function leaky_relu_graph(features_; name=nothing, alpha=nothing) - local desc - tf.with_op_name(name, "LeakyRelu") do - desc = tf.NodeDescription("LeakyRelu") - features_ = convert(Tensor{Float32}, features_) - (features_,) = tf.tf_promote(features_) - tf.add_input(desc, features_) - if alpha !== nothing - desc["alpha"] = Base.identity(alpha) - end + function leaky_relu_graph(features_; name=nothing, alpha=nothing) + local desc + tf.with_op_name(name, "LeakyRelu") do + desc = tf.NodeDescription("LeakyRelu") + features_ = convert(Tensor{Float32}, features_) + (features_,) = tf.tf_promote(features_) + tf.add_input(desc, features_) + if alpha !== nothing + desc["alpha"] = Base.identity(alpha) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function leaky_relu_eager(features_; name=nothing, alpha=nothing) desc = tf.EagerOp("LeakyRelu") features_ = convert(tf.EagerTensor, features_) @@ -22238,13 +22238,13 @@ begin return res[1] end end - function leaky_relu(features_; name=nothing, alpha=nothing) - if tf.in_eager_mode() - leaky_relu_eager(features_; name=name, alpha=alpha) - else - leaky_relu_graph(features_; name=name, alpha=alpha) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function leaky_relu(features_; name=nothing, alpha=nothing) + if tf.in_eager_mode() + leaky_relu_eager(features_; name=name, alpha=alpha) + else + leaky_relu_graph(features_; name=name, alpha=alpha) + end end - end end @@ -22254,18 +22254,18 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function identity_n_graph(input_; name=nothing, T=nothing) - local desc - tf.with_op_name(name, "IdentityN") do - desc = tf.NodeDescription("IdentityN") - input_ = [convert(Tensor{Any}, x) for x = input_] - tf.add_input(desc, input_) - if T !== nothing - desc["T"] = map(Base.identity, T) - end + function identity_n_graph(input_; name=nothing, T=nothing) + local desc + tf.with_op_name(name, "IdentityN") do + desc = tf.NodeDescription("IdentityN") + input_ = [convert(Tensor{Any}, x) for x = input_] + tf.add_input(desc, input_) + if T !== nothing + desc["T"] = map(Base.identity, T) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function identity_n_eager(input_; name=nothing, T=nothing) desc = tf.EagerOp("IdentityN") input_ = convert(tf.EagerTensor, input_) @@ -22280,77 +22280,77 @@ begin return res[1] end end - function identity_n(input_; name=nothing, T=nothing) - if tf.in_eager_mode() - identity_n_eager(input_; name=name, T=T) - else - identity_n_graph(input_; name=name, T=T) - end - end -end - - -""" - cudnn_rnn_backprop_v2(input, input_h, input_c, params, output, output_h, output_c, output_backprop, output_h_backprop, output_c_backprop, reserve_space, host_reserved; rnn_mode=lstm, input_mode=linear_input, direction=unidirectional, dropout=?, seed=0, seed2=0) - - -""" -begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function cudnn_rnn_backprop_v2_graph(input_, input_h_, input_c_, params_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_, host_reserved_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) - local desc - tf.with_op_name(name, "CudnnRNNBackpropV2") do - desc = tf.NodeDescription("CudnnRNNBackpropV2") - input_ = convert(Tensor{Any}, input_) - input_h_ = convert(Tensor{Any}, input_h_) - input_c_ = convert(Tensor{Any}, input_c_) - params_ = convert(Tensor{Any}, params_) - output_ = convert(Tensor{Any}, output_) - output_h_ = convert(Tensor{Any}, output_h_) - output_c_ = convert(Tensor{Any}, output_c_) - output_backprop_ = convert(Tensor{Any}, output_backprop_) - output_h_backprop_ = convert(Tensor{Any}, output_h_backprop_) - output_c_backprop_ = convert(Tensor{Any}, output_c_backprop_) - reserve_space_ = convert(Tensor{Any}, reserve_space_) - host_reserved_ = convert(Tensor{Any}, host_reserved_) - (input_, input_h_, input_c_, params_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_) = tf.tf_promote(input_, input_h_, input_c_, params_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_) - tf.add_input(desc, input_) - tf.add_input(desc, input_h_) - tf.add_input(desc, input_c_) - tf.add_input(desc, params_) - tf.add_input(desc, output_) - tf.add_input(desc, output_h_) - tf.add_input(desc, output_c_) - tf.add_input(desc, output_backprop_) - tf.add_input(desc, output_h_backprop_) - tf.add_input(desc, output_c_backprop_) - tf.add_input(desc, reserve_space_) - tf.add_input(desc, host_reserved_) - if rnn_mode !== nothing - desc["rnn_mode"] = Base.String(rnn_mode) - end - if input_mode !== nothing - desc["input_mode"] = Base.String(input_mode) - end - if direction !== nothing - desc["direction"] = Base.String(direction) - end - if dropout !== nothing - desc["dropout"] = Base.identity(dropout) - end - if seed !== nothing - desc["seed"] = Base.Int(seed) - end - if seed2 !== nothing - desc["seed2"] = Base.Int(seed2) - end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:4 - push!(out, tf.Tensor(op, out_idx)) - end - out + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function identity_n(input_; name=nothing, T=nothing) + if tf.in_eager_mode() + identity_n_eager(input_; name=name, T=T) + else + identity_n_graph(input_; name=name, T=T) + end + end +end + + +""" + cudnn_rnn_backprop_v2(input, input_h, input_c, params, output, output_h, output_c, output_backprop, output_h_backprop, output_c_backprop, reserve_space, host_reserved; rnn_mode=, input_mode=, direction=, dropout=?, seed=0, seed2=0) + + +""" +begin + function cudnn_rnn_backprop_v2_graph(input_, input_h_, input_c_, params_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_, host_reserved_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) + local desc + tf.with_op_name(name, "CudnnRNNBackpropV2") do + desc = tf.NodeDescription("CudnnRNNBackpropV2") + input_ = convert(Tensor{Any}, input_) + input_h_ = convert(Tensor{Any}, input_h_) + input_c_ = convert(Tensor{Any}, input_c_) + params_ = convert(Tensor{Any}, params_) + output_ = convert(Tensor{Any}, output_) + output_h_ = convert(Tensor{Any}, output_h_) + output_c_ = convert(Tensor{Any}, output_c_) + output_backprop_ = convert(Tensor{Any}, output_backprop_) + output_h_backprop_ = convert(Tensor{Any}, output_h_backprop_) + output_c_backprop_ = convert(Tensor{Any}, output_c_backprop_) + reserve_space_ = convert(Tensor{Any}, reserve_space_) + host_reserved_ = convert(Tensor{Any}, host_reserved_) + (input_, input_h_, input_c_, params_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_) = tf.tf_promote(input_, input_h_, input_c_, params_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_) + tf.add_input(desc, input_) + tf.add_input(desc, input_h_) + tf.add_input(desc, input_c_) + tf.add_input(desc, params_) + tf.add_input(desc, output_) + tf.add_input(desc, output_h_) + tf.add_input(desc, output_c_) + tf.add_input(desc, output_backprop_) + tf.add_input(desc, output_h_backprop_) + tf.add_input(desc, output_c_backprop_) + tf.add_input(desc, reserve_space_) + tf.add_input(desc, host_reserved_) + if rnn_mode !== nothing + desc["rnn_mode"] = Base.String(rnn_mode) + end + if input_mode !== nothing + desc["input_mode"] = Base.String(input_mode) + end + if direction !== nothing + desc["direction"] = Base.String(direction) + end + if dropout !== nothing + desc["dropout"] = Base.identity(dropout) + end + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:4 + push!(out, tf.Tensor(op, out_idx)) + end + out + end function cudnn_rnn_backprop_v2_eager(input_, input_h_, input_c_, params_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_, host_reserved_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) desc = tf.EagerOp("CudnnRNNBackpropV2") input_ = convert(tf.EagerTensor, input_) @@ -22413,13 +22413,13 @@ begin return res end end - function cudnn_rnn_backprop_v2(input_, input_h_, input_c_, params_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_, host_reserved_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) - if tf.in_eager_mode() - cudnn_rnn_backprop_v2_eager(input_, input_h_, input_c_, params_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_, host_reserved_; name=name, rnn_mode=rnn_mode, input_mode=input_mode, direction=direction, dropout=dropout, seed=seed, seed2=seed2) - else - cudnn_rnn_backprop_v2_graph(input_, input_h_, input_c_, params_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_, host_reserved_; name=name, rnn_mode=rnn_mode, input_mode=input_mode, direction=direction, dropout=dropout, seed=seed, seed2=seed2) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function cudnn_rnn_backprop_v2(input_, input_h_, input_c_, params_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_, host_reserved_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) + if tf.in_eager_mode() + cudnn_rnn_backprop_v2_eager(input_, input_h_, input_c_, params_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_, host_reserved_; name=name, rnn_mode=rnn_mode, input_mode=input_mode, direction=direction, dropout=dropout, seed=seed, seed2=seed2) + else + cudnn_rnn_backprop_v2_graph(input_, input_h_, input_c_, params_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_, host_reserved_; name=name, rnn_mode=rnn_mode, input_mode=input_mode, direction=direction, dropout=dropout, seed=seed, seed2=seed2) + end end - end end @@ -22429,25 +22429,25 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function requantization_range_graph(input_, input_min_, input_max_; name=nothing) - local desc - tf.with_op_name(name, "RequantizationRange") do - desc = tf.NodeDescription("RequantizationRange") - input_ = convert(Tensor{Any}, input_) - input_min_ = convert(Tensor{Float32}, input_min_) - input_max_ = convert(Tensor{Float32}, input_max_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - tf.add_input(desc, input_min_) - tf.add_input(desc, input_max_) - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:2 - push!(out, tf.Tensor(op, out_idx)) - end - out + function requantization_range_graph(input_, input_min_, input_max_; name=nothing) + local desc + tf.with_op_name(name, "RequantizationRange") do + desc = tf.NodeDescription("RequantizationRange") + input_ = convert(Tensor{Any}, input_) + input_min_ = convert(Tensor{Float32}, input_min_) + input_max_ = convert(Tensor{Float32}, input_max_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + tf.add_input(desc, input_min_) + tf.add_input(desc, input_max_) end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end function requantization_range_eager(input_, input_min_, input_max_; name=nothing) desc = tf.EagerOp("RequantizationRange") input_ = convert(tf.EagerTensor, input_) @@ -22464,13 +22464,13 @@ begin return res end end - function requantization_range(input_, input_min_, input_max_; name=nothing) - if tf.in_eager_mode() - requantization_range_eager(input_, input_min_, input_max_; name=name) - else - requantization_range_graph(input_, input_min_, input_max_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function requantization_range(input_, input_min_, input_max_; name=nothing) + if tf.in_eager_mode() + requantization_range_eager(input_, input_min_, input_max_; name=name) + else + requantization_range_graph(input_, input_min_, input_max_; name=name) + end end - end end @@ -22480,18 +22480,18 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function maximum_graph(x_, y_; name=nothing) - local desc - tf.with_op_name(name, "Maximum") do - desc = tf.NodeDescription("Maximum") - x_ = convert(Tensor{Any}, x_) - y_ = convert(Tensor{Any}, y_) - (x_, y_) = tf.tf_promote(x_, y_) - tf.add_input(desc, x_) - tf.add_input(desc, y_) - end - tf.Tensor(tf.Operation(desc)) + function maximum_graph(x_, y_; name=nothing) + local desc + tf.with_op_name(name, "Maximum") do + desc = tf.NodeDescription("Maximum") + x_ = convert(Tensor{Any}, x_) + y_ = convert(Tensor{Any}, y_) + (x_, y_) = tf.tf_promote(x_, y_) + tf.add_input(desc, x_) + tf.add_input(desc, y_) end + tf.Tensor(tf.Operation(desc)) + end function maximum_eager(x_, y_; name=nothing) desc = tf.EagerOp("Maximum") x_ = convert(tf.EagerTensor, x_) @@ -22507,13 +22507,13 @@ begin return res[1] end end - function maximum(x_, y_; name=nothing) - if tf.in_eager_mode() - maximum_eager(x_, y_; name=name) - else - maximum_graph(x_, y_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function maximum(x_, y_; name=nothing) + if tf.in_eager_mode() + maximum_eager(x_, y_; name=name) + else + maximum_graph(x_, y_; name=name) + end end - end end @@ -22523,19 +22523,19 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function reshape_graph(tensor_, shape_; name=nothing) - local desc - tf.with_op_name(name, "Reshape") do - desc = tf.NodeDescription("Reshape") - tensor_ = convert(Tensor{Any}, tensor_) - shape_ = convert(Tensor{Int32}, shape_) - (tensor_,) = tf.tf_promote(tensor_) - (shape_,) = tf.tf_promote(shape_) - tf.add_input(desc, tensor_) - tf.add_input(desc, shape_) - end - tf.Tensor(tf.Operation(desc)) + function reshape_graph(tensor_, shape_; name=nothing) + local desc + tf.with_op_name(name, "Reshape") do + desc = tf.NodeDescription("Reshape") + tensor_ = convert(Tensor{Any}, tensor_) + shape_ = convert(Tensor{Int32}, shape_) + (tensor_,) = tf.tf_promote(tensor_) + (shape_,) = tf.tf_promote(shape_) + tf.add_input(desc, tensor_) + tf.add_input(desc, shape_) end + tf.Tensor(tf.Operation(desc)) + end function reshape_eager(tensor_, shape_; name=nothing) desc = tf.EagerOp("Reshape") tensor_ = convert(tf.EagerTensor, tensor_) @@ -22551,13 +22551,13 @@ begin return res[1] end end - function reshape(tensor_, shape_; name=nothing) - if tf.in_eager_mode() - reshape_eager(tensor_, shape_; name=name) - else - reshape_graph(tensor_, shape_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function reshape(tensor_, shape_; name=nothing) + if tf.in_eager_mode() + reshape_eager(tensor_, shape_; name=name) + else + reshape_graph(tensor_, shape_; name=name) + end end - end end @@ -22567,23 +22567,23 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function matrix_solve_ls_graph(matrix_, rhs_, l2_regularizer_; name=nothing, fast=nothing) - local desc - tf.with_op_name(name, "MatrixSolveLs") do - desc = tf.NodeDescription("MatrixSolveLs") - matrix_ = convert(Tensor{Any}, matrix_) - rhs_ = convert(Tensor{Any}, rhs_) - l2_regularizer_ = convert(Tensor{Float64}, l2_regularizer_) - (matrix_, rhs_) = tf.tf_promote(matrix_, rhs_) - tf.add_input(desc, matrix_) - tf.add_input(desc, rhs_) - tf.add_input(desc, l2_regularizer_) - if fast !== nothing - desc["fast"] = Base.Bool(fast) - end + function matrix_solve_ls_graph(matrix_, rhs_, l2_regularizer_; name=nothing, fast=nothing) + local desc + tf.with_op_name(name, "MatrixSolveLs") do + desc = tf.NodeDescription("MatrixSolveLs") + matrix_ = convert(Tensor{Any}, matrix_) + rhs_ = convert(Tensor{Any}, rhs_) + l2_regularizer_ = convert(Tensor{Float64}, l2_regularizer_) + (matrix_, rhs_) = tf.tf_promote(matrix_, rhs_) + tf.add_input(desc, matrix_) + tf.add_input(desc, rhs_) + tf.add_input(desc, l2_regularizer_) + if fast !== nothing + desc["fast"] = Base.Bool(fast) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function matrix_solve_ls_eager(matrix_, rhs_, l2_regularizer_; name=nothing, fast=nothing) desc = tf.EagerOp("MatrixSolveLs") matrix_ = convert(tf.EagerTensor, matrix_) @@ -22604,13 +22604,13 @@ begin return res[1] end end - function matrix_solve_ls(matrix_, rhs_, l2_regularizer_; name=nothing, fast=nothing) - if tf.in_eager_mode() - matrix_solve_ls_eager(matrix_, rhs_, l2_regularizer_; name=name, fast=fast) - else - matrix_solve_ls_graph(matrix_, rhs_, l2_regularizer_; name=name, fast=fast) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function matrix_solve_ls(matrix_, rhs_, l2_regularizer_; name=nothing, fast=nothing) + if tf.in_eager_mode() + matrix_solve_ls_eager(matrix_, rhs_, l2_regularizer_; name=name, fast=fast) + else + matrix_solve_ls_graph(matrix_, rhs_, l2_regularizer_; name=name, fast=fast) + end end - end end @@ -22620,19 +22620,19 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tf_record_dataset_graph(filenames_, compression_type_, buffer_size_; name=nothing) - local desc - tf.with_op_name(name, "TFRecordDataset") do - desc = tf.NodeDescription("TFRecordDataset") - filenames_ = convert(Tensor{String}, filenames_) - compression_type_ = convert(Tensor{String}, compression_type_) - buffer_size_ = convert(Tensor{Int64}, buffer_size_) - tf.add_input(desc, filenames_) - tf.add_input(desc, compression_type_) - tf.add_input(desc, buffer_size_) - end - tf.Tensor(tf.Operation(desc)) + function tf_record_dataset_graph(filenames_, compression_type_, buffer_size_; name=nothing) + local desc + tf.with_op_name(name, "TFRecordDataset") do + desc = tf.NodeDescription("TFRecordDataset") + filenames_ = convert(Tensor{String}, filenames_) + compression_type_ = convert(Tensor{String}, compression_type_) + buffer_size_ = convert(Tensor{Int64}, buffer_size_) + tf.add_input(desc, filenames_) + tf.add_input(desc, compression_type_) + tf.add_input(desc, buffer_size_) end + tf.Tensor(tf.Operation(desc)) + end function tf_record_dataset_eager(filenames_, compression_type_, buffer_size_; name=nothing) desc = tf.EagerOp("TFRecordDataset") filenames_ = convert(tf.EagerTensor, filenames_) @@ -22648,13 +22648,13 @@ begin return res[1] end end - function tf_record_dataset(filenames_, compression_type_, buffer_size_; name=nothing) - if tf.in_eager_mode() - tf_record_dataset_eager(filenames_, compression_type_, buffer_size_; name=name) - else - tf_record_dataset_graph(filenames_, compression_type_, buffer_size_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tf_record_dataset(filenames_, compression_type_, buffer_size_; name=nothing) + if tf.in_eager_mode() + tf_record_dataset_eager(filenames_, compression_type_, buffer_size_; name=name) + else + tf_record_dataset_graph(filenames_, compression_type_, buffer_size_; name=name) + end end - end end @@ -22664,23 +22664,23 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function boosted_trees_example_debug_outputs_graph(tree_ensemble_handle_, bucketized_features_; name=nothing, num_bucketized_features=nothing, logits_dimension=nothing) - local desc - tf.with_op_name(name, "BoostedTreesExampleDebugOutputs") do - desc = tf.NodeDescription("BoostedTreesExampleDebugOutputs") - tree_ensemble_handle_ = convert(Tensor{Any}, tree_ensemble_handle_) - bucketized_features_ = [convert(Tensor{Int32}, x) for x = bucketized_features_] - tf.add_input(desc, tree_ensemble_handle_) - tf.add_input(desc, bucketized_features_) - if num_bucketized_features !== nothing - desc["num_bucketized_features"] = Base.Int(num_bucketized_features) - end - if logits_dimension !== nothing - desc["logits_dimension"] = Base.Int(logits_dimension) - end + function boosted_trees_example_debug_outputs_graph(tree_ensemble_handle_, bucketized_features_; name=nothing, num_bucketized_features=nothing, logits_dimension=nothing) + local desc + tf.with_op_name(name, "BoostedTreesExampleDebugOutputs") do + desc = tf.NodeDescription("BoostedTreesExampleDebugOutputs") + tree_ensemble_handle_ = convert(Tensor{Any}, tree_ensemble_handle_) + bucketized_features_ = [convert(Tensor{Int32}, x) for x = bucketized_features_] + tf.add_input(desc, tree_ensemble_handle_) + tf.add_input(desc, bucketized_features_) + if num_bucketized_features !== nothing + desc["num_bucketized_features"] = Base.Int(num_bucketized_features) + end + if logits_dimension !== nothing + desc["logits_dimension"] = Base.Int(logits_dimension) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function boosted_trees_example_debug_outputs_eager(tree_ensemble_handle_, bucketized_features_; name=nothing, num_bucketized_features=nothing, logits_dimension=nothing) desc = tf.EagerOp("BoostedTreesExampleDebugOutputs") tree_ensemble_handle_ = convert(tf.EagerTensor, tree_ensemble_handle_) @@ -22700,13 +22700,13 @@ begin return res[1] end end - function boosted_trees_example_debug_outputs(tree_ensemble_handle_, bucketized_features_; name=nothing, num_bucketized_features=nothing, logits_dimension=nothing) - if tf.in_eager_mode() - boosted_trees_example_debug_outputs_eager(tree_ensemble_handle_, bucketized_features_; name=name, num_bucketized_features=num_bucketized_features, logits_dimension=logits_dimension) - else - boosted_trees_example_debug_outputs_graph(tree_ensemble_handle_, bucketized_features_; name=name, num_bucketized_features=num_bucketized_features, logits_dimension=logits_dimension) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function boosted_trees_example_debug_outputs(tree_ensemble_handle_, bucketized_features_; name=nothing, num_bucketized_features=nothing, logits_dimension=nothing) + if tf.in_eager_mode() + boosted_trees_example_debug_outputs_eager(tree_ensemble_handle_, bucketized_features_; name=name, num_bucketized_features=num_bucketized_features, logits_dimension=logits_dimension) + else + boosted_trees_example_debug_outputs_graph(tree_ensemble_handle_, bucketized_features_; name=name, num_bucketized_features=num_bucketized_features, logits_dimension=logits_dimension) + end end - end end @@ -22716,23 +22716,23 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function experimental_max_intra_op_parallelism_dataset_graph(input_dataset_, max_intra_op_parallelism_; name=nothing, output_types=nothing, output_shapes=nothing) - local desc - tf.with_op_name(name, "ExperimentalMaxIntraOpParallelismDataset") do - desc = tf.NodeDescription("ExperimentalMaxIntraOpParallelismDataset") - input_dataset_ = convert(Tensor{Any}, input_dataset_) - max_intra_op_parallelism_ = convert(Tensor{Int64}, max_intra_op_parallelism_) - tf.add_input(desc, input_dataset_) - tf.add_input(desc, max_intra_op_parallelism_) - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end + function experimental_max_intra_op_parallelism_dataset_graph(input_dataset_, max_intra_op_parallelism_; name=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "ExperimentalMaxIntraOpParallelismDataset") do + desc = tf.NodeDescription("ExperimentalMaxIntraOpParallelismDataset") + input_dataset_ = convert(Tensor{Any}, input_dataset_) + max_intra_op_parallelism_ = convert(Tensor{Int64}, max_intra_op_parallelism_) + tf.add_input(desc, input_dataset_) + tf.add_input(desc, max_intra_op_parallelism_) + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function experimental_max_intra_op_parallelism_dataset_eager(input_dataset_, max_intra_op_parallelism_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("ExperimentalMaxIntraOpParallelismDataset") input_dataset_ = convert(tf.EagerTensor, input_dataset_) @@ -22752,13 +22752,13 @@ begin return res[1] end end - function experimental_max_intra_op_parallelism_dataset(input_dataset_, max_intra_op_parallelism_; name=nothing, output_types=nothing, output_shapes=nothing) - if tf.in_eager_mode() - experimental_max_intra_op_parallelism_dataset_eager(input_dataset_, max_intra_op_parallelism_; name=name, output_types=output_types, output_shapes=output_shapes) - else - experimental_max_intra_op_parallelism_dataset_graph(input_dataset_, max_intra_op_parallelism_; name=name, output_types=output_types, output_shapes=output_shapes) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_max_intra_op_parallelism_dataset(input_dataset_, max_intra_op_parallelism_; name=nothing, output_types=nothing, output_shapes=nothing) + if tf.in_eager_mode() + experimental_max_intra_op_parallelism_dataset_eager(input_dataset_, max_intra_op_parallelism_; name=name, output_types=output_types, output_shapes=output_shapes) + else + experimental_max_intra_op_parallelism_dataset_graph(input_dataset_, max_intra_op_parallelism_; name=name, output_types=output_types, output_shapes=output_shapes) + end end - end end @@ -22768,16 +22768,16 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function hsv_to_rgb_graph(images_; name=nothing) - local desc - tf.with_op_name(name, "HSVToRGB") do - desc = tf.NodeDescription("HSVToRGB") - images_ = convert(Tensor{Float32}, images_) - (images_,) = tf.tf_promote(images_) - tf.add_input(desc, images_) - end - tf.Tensor(tf.Operation(desc)) + function hsv_to_rgb_graph(images_; name=nothing) + local desc + tf.with_op_name(name, "HSVToRGB") do + desc = tf.NodeDescription("HSVToRGB") + images_ = convert(Tensor{Float32}, images_) + (images_,) = tf.tf_promote(images_) + tf.add_input(desc, images_) end + tf.Tensor(tf.Operation(desc)) + end function hsv_to_rgb_eager(images_; name=nothing) desc = tf.EagerOp("HSVToRGB") images_ = convert(tf.EagerTensor, images_) @@ -22790,13 +22790,13 @@ begin return res[1] end end - function hsv_to_rgb(images_; name=nothing) - if tf.in_eager_mode() - hsv_to_rgb_eager(images_; name=name) - else - hsv_to_rgb_graph(images_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function hsv_to_rgb(images_; name=nothing) + if tf.in_eager_mode() + hsv_to_rgb_eager(images_; name=name) + else + hsv_to_rgb_graph(images_; name=name) + end end - end end @@ -22806,25 +22806,25 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function scatter_div_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) - local desc - tf.with_op_name(name, "ScatterDiv") do - desc = tf.NodeDescription("ScatterDiv") - ref_ = convert(Tensor{Any}, ref_) - indices_ = convert(Tensor{Any}, indices_) - indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) - updates_ = convert(Tensor{Any}, updates_) - (ref_, updates_) = tf.tf_promote(ref_, updates_) - (indices_,) = tf.tf_promote(indices_) - tf.add_input(desc, ref_) - tf.add_input(desc, indices_) - tf.add_input(desc, updates_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end + function scatter_div_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "ScatterDiv") do + desc = tf.NodeDescription("ScatterDiv") + ref_ = convert(Tensor{Any}, ref_) + indices_ = convert(Tensor{Any}, indices_) + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + updates_ = convert(Tensor{Any}, updates_) + (ref_, updates_) = tf.tf_promote(ref_, updates_) + (indices_,) = tf.tf_promote(indices_) + tf.add_input(desc, ref_) + tf.add_input(desc, indices_) + tf.add_input(desc, updates_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function scatter_div_eager(ref_, indices_, updates_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ScatterDiv") ref_ = convert(tf.EagerTensor, ref_) @@ -22846,13 +22846,13 @@ begin return res[1] end end - function scatter_div(ref_, indices_, updates_; name=nothing, use_locking=nothing) - if tf.in_eager_mode() - scatter_div_eager(ref_, indices_, updates_; name=name, use_locking=use_locking) - else - scatter_div_graph(ref_, indices_, updates_; name=name, use_locking=use_locking) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function scatter_div(ref_, indices_, updates_; name=nothing, use_locking=nothing) + if tf.in_eager_mode() + scatter_div_eager(ref_, indices_, updates_; name=name, use_locking=use_locking) + else + scatter_div_graph(ref_, indices_, updates_; name=name, use_locking=use_locking) + end end - end end @@ -22862,26 +22862,26 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function decode_wav_graph(contents_; name=nothing, desired_channels=nothing, desired_samples=nothing) - local desc - tf.with_op_name(name, "DecodeWav") do - desc = tf.NodeDescription("DecodeWav") - contents_ = convert(Tensor{String}, contents_) - tf.add_input(desc, contents_) - if desired_channels !== nothing - desc["desired_channels"] = Base.Int(desired_channels) - end - if desired_samples !== nothing - desc["desired_samples"] = Base.Int(desired_samples) - end + function decode_wav_graph(contents_; name=nothing, desired_channels=nothing, desired_samples=nothing) + local desc + tf.with_op_name(name, "DecodeWav") do + desc = tf.NodeDescription("DecodeWav") + contents_ = convert(Tensor{String}, contents_) + tf.add_input(desc, contents_) + if desired_channels !== nothing + desc["desired_channels"] = Base.Int(desired_channels) end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:2 - push!(out, tf.Tensor(op, out_idx)) + if desired_samples !== nothing + desc["desired_samples"] = Base.Int(desired_samples) end - out end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end function decode_wav_eager(contents_; name=nothing, desired_channels=nothing, desired_samples=nothing) desc = tf.EagerOp("DecodeWav") contents_ = convert(tf.EagerTensor, contents_) @@ -22899,13 +22899,13 @@ begin return res end end - function decode_wav(contents_; name=nothing, desired_channels=nothing, desired_samples=nothing) - if tf.in_eager_mode() - decode_wav_eager(contents_; name=name, desired_channels=desired_channels, desired_samples=desired_samples) - else - decode_wav_graph(contents_; name=name, desired_channels=desired_channels, desired_samples=desired_samples) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function decode_wav(contents_; name=nothing, desired_channels=nothing, desired_samples=nothing) + if tf.in_eager_mode() + decode_wav_eager(contents_; name=name, desired_channels=desired_channels, desired_samples=desired_samples) + else + decode_wav_graph(contents_; name=name, desired_channels=desired_channels, desired_samples=desired_samples) + end end - end end @@ -22915,16 +22915,16 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function log_graph(x_; name=nothing) - local desc - tf.with_op_name(name, "Log") do - desc = tf.NodeDescription("Log") - x_ = convert(Tensor{Any}, x_) - (x_,) = tf.tf_promote(x_) - tf.add_input(desc, x_) - end - tf.Tensor(tf.Operation(desc)) + function log_graph(x_; name=nothing) + local desc + tf.with_op_name(name, "Log") do + desc = tf.NodeDescription("Log") + x_ = convert(Tensor{Any}, x_) + (x_,) = tf.tf_promote(x_) + tf.add_input(desc, x_) end + tf.Tensor(tf.Operation(desc)) + end function log_eager(x_; name=nothing) desc = tf.EagerOp("Log") x_ = convert(tf.EagerTensor, x_) @@ -22937,13 +22937,13 @@ begin return res[1] end end - function log(x_; name=nothing) - if tf.in_eager_mode() - log_eager(x_; name=name) - else - log_graph(x_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function log(x_; name=nothing) + if tf.in_eager_mode() + log_eager(x_; name=name) + else + log_graph(x_; name=name) + end end - end end @@ -22953,24 +22953,24 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function save_v2_graph(prefix_, tensor_names_, shape_and_slices_, tensors_; name=nothing, dtypes=nothing) - local desc - tf.with_op_name(name, "SaveV2") do - desc = tf.NodeDescription("SaveV2") - prefix_ = convert(Tensor{String}, prefix_) - tensor_names_ = convert(Tensor{String}, tensor_names_) - shape_and_slices_ = convert(Tensor{String}, shape_and_slices_) - tensors_ = [convert(Tensor{Any}, x) for x = tensors_] - tf.add_input(desc, prefix_) - tf.add_input(desc, tensor_names_) - tf.add_input(desc, shape_and_slices_) - tf.add_input(desc, tensors_) - if dtypes !== nothing - desc["dtypes"] = map(Base.identity, dtypes) - end + function save_v2_graph(prefix_, tensor_names_, shape_and_slices_, tensors_; name=nothing, dtypes=nothing) + local desc + tf.with_op_name(name, "SaveV2") do + desc = tf.NodeDescription("SaveV2") + prefix_ = convert(Tensor{String}, prefix_) + tensor_names_ = convert(Tensor{String}, tensor_names_) + shape_and_slices_ = convert(Tensor{String}, shape_and_slices_) + tensors_ = [convert(Tensor{Any}, x) for x = tensors_] + tf.add_input(desc, prefix_) + tf.add_input(desc, tensor_names_) + tf.add_input(desc, shape_and_slices_) + tf.add_input(desc, tensors_) + if dtypes !== nothing + desc["dtypes"] = map(Base.identity, dtypes) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function save_v2_eager(prefix_, tensor_names_, shape_and_slices_, tensors_; name=nothing, dtypes=nothing) desc = tf.EagerOp("SaveV2") prefix_ = convert(tf.EagerTensor, prefix_) @@ -22991,13 +22991,13 @@ begin return res[1] end end - function save_v2(prefix_, tensor_names_, shape_and_slices_, tensors_; name=nothing, dtypes=nothing) - if tf.in_eager_mode() - save_v2_eager(prefix_, tensor_names_, shape_and_slices_, tensors_; name=name, dtypes=dtypes) - else - save_v2_graph(prefix_, tensor_names_, shape_and_slices_, tensors_; name=name, dtypes=dtypes) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function save_v2(prefix_, tensor_names_, shape_and_slices_, tensors_; name=nothing, dtypes=nothing) + if tf.in_eager_mode() + save_v2_eager(prefix_, tensor_names_, shape_and_slices_, tensors_; name=name, dtypes=dtypes) + else + save_v2_graph(prefix_, tensor_names_, shape_and_slices_, tensors_; name=name, dtypes=dtypes) + end end - end end @@ -23007,16 +23007,16 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function deep_copy_graph(x_; name=nothing) - local desc - tf.with_op_name(name, "DeepCopy") do - desc = tf.NodeDescription("DeepCopy") - x_ = convert(Tensor{Any}, x_) - (x_,) = tf.tf_promote(x_) - tf.add_input(desc, x_) - end - tf.Tensor(tf.Operation(desc)) + function deep_copy_graph(x_; name=nothing) + local desc + tf.with_op_name(name, "DeepCopy") do + desc = tf.NodeDescription("DeepCopy") + x_ = convert(Tensor{Any}, x_) + (x_,) = tf.tf_promote(x_) + tf.add_input(desc, x_) end + tf.Tensor(tf.Operation(desc)) + end function deep_copy_eager(x_; name=nothing) desc = tf.EagerOp("DeepCopy") x_ = convert(tf.EagerTensor, x_) @@ -23029,13 +23029,13 @@ begin return res[1] end end - function deep_copy(x_; name=nothing) - if tf.in_eager_mode() - deep_copy_eager(x_; name=name) - else - deep_copy_graph(x_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function deep_copy(x_; name=nothing) + if tf.in_eager_mode() + deep_copy_eager(x_; name=name) + else + deep_copy_graph(x_; name=name) + end end - end end @@ -23045,21 +23045,21 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function model_dataset_graph(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) - local desc - tf.with_op_name(name, "ModelDataset") do - desc = tf.NodeDescription("ModelDataset") - input_dataset_ = convert(Tensor{Any}, input_dataset_) - tf.add_input(desc, input_dataset_) - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end + function model_dataset_graph(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "ModelDataset") do + desc = tf.NodeDescription("ModelDataset") + input_dataset_ = convert(Tensor{Any}, input_dataset_) + tf.add_input(desc, input_dataset_) + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function model_dataset_eager(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("ModelDataset") input_dataset_ = convert(tf.EagerTensor, input_dataset_) @@ -23077,13 +23077,13 @@ begin return res[1] end end - function model_dataset(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) - if tf.in_eager_mode() - model_dataset_eager(input_dataset_; name=name, output_types=output_types, output_shapes=output_shapes) - else - model_dataset_graph(input_dataset_; name=name, output_types=output_types, output_shapes=output_shapes) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function model_dataset(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) + if tf.in_eager_mode() + model_dataset_eager(input_dataset_; name=name, output_types=output_types, output_shapes=output_shapes) + else + model_dataset_graph(input_dataset_; name=name, output_types=output_types, output_shapes=output_shapes) + end end - end end @@ -23093,69 +23093,69 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function parse_sequence_example_graph(serialized_, debug_name_, context_dense_defaults_; name=nothing, feature_list_dense_missing_assumed_empty=nothing, context_sparse_keys=nothing, context_dense_keys=nothing, feature_list_sparse_keys=nothing, feature_list_dense_keys=nothing, Ncontext_sparse=nothing, Ncontext_dense=nothing, Nfeature_list_sparse=nothing, Nfeature_list_dense=nothing, context_sparse_types=nothing, Tcontext_dense=nothing, feature_list_dense_types=nothing, context_dense_shapes=nothing, feature_list_sparse_types=nothing, feature_list_dense_shapes=nothing) - local desc - tf.with_op_name(name, "ParseSequenceExample") do - desc = tf.NodeDescription("ParseSequenceExample") - serialized_ = convert(Tensor{String}, serialized_) - debug_name_ = convert(Tensor{String}, debug_name_) - context_dense_defaults_ = [convert(Tensor{Any}, x) for x = context_dense_defaults_] - tf.add_input(desc, serialized_) - tf.add_input(desc, debug_name_) - tf.add_input(desc, context_dense_defaults_) - if feature_list_dense_missing_assumed_empty !== nothing - desc["feature_list_dense_missing_assumed_empty"] = map(Base.identity, feature_list_dense_missing_assumed_empty) - end - if context_sparse_keys !== nothing - desc["context_sparse_keys"] = map(Base.identity, context_sparse_keys) - end - if context_dense_keys !== nothing - desc["context_dense_keys"] = map(Base.identity, context_dense_keys) - end - if feature_list_sparse_keys !== nothing - desc["feature_list_sparse_keys"] = map(Base.identity, feature_list_sparse_keys) - end - if feature_list_dense_keys !== nothing - desc["feature_list_dense_keys"] = map(Base.identity, feature_list_dense_keys) - end - if Ncontext_sparse !== nothing - desc["Ncontext_sparse"] = Base.Int(Ncontext_sparse) - end - if Ncontext_dense !== nothing - desc["Ncontext_dense"] = Base.Int(Ncontext_dense) - end - if Nfeature_list_sparse !== nothing - desc["Nfeature_list_sparse"] = Base.Int(Nfeature_list_sparse) - end - if Nfeature_list_dense !== nothing - desc["Nfeature_list_dense"] = Base.Int(Nfeature_list_dense) - end - if context_sparse_types !== nothing - desc["context_sparse_types"] = map(Base.identity, context_sparse_types) - end - if Tcontext_dense !== nothing - desc["Tcontext_dense"] = map(Base.identity, Tcontext_dense) - end - if feature_list_dense_types !== nothing - desc["feature_list_dense_types"] = map(Base.identity, feature_list_dense_types) - end - if context_dense_shapes !== nothing - desc["context_dense_shapes"] = map(Base.identity, context_dense_shapes) - end - if feature_list_sparse_types !== nothing - desc["feature_list_sparse_types"] = map(Base.identity, feature_list_sparse_types) - end - if feature_list_dense_shapes !== nothing - desc["feature_list_dense_shapes"] = map(Base.identity, feature_list_dense_shapes) - end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:9 - push!(out, tf.Tensor(op, out_idx)) - end - out + function parse_sequence_example_graph(serialized_, debug_name_, context_dense_defaults_; name=nothing, feature_list_dense_missing_assumed_empty=nothing, context_sparse_keys=nothing, context_dense_keys=nothing, feature_list_sparse_keys=nothing, feature_list_dense_keys=nothing, Ncontext_sparse=nothing, Ncontext_dense=nothing, Nfeature_list_sparse=nothing, Nfeature_list_dense=nothing, context_sparse_types=nothing, Tcontext_dense=nothing, feature_list_dense_types=nothing, context_dense_shapes=nothing, feature_list_sparse_types=nothing, feature_list_dense_shapes=nothing) + local desc + tf.with_op_name(name, "ParseSequenceExample") do + desc = tf.NodeDescription("ParseSequenceExample") + serialized_ = convert(Tensor{String}, serialized_) + debug_name_ = convert(Tensor{String}, debug_name_) + context_dense_defaults_ = [convert(Tensor{Any}, x) for x = context_dense_defaults_] + tf.add_input(desc, serialized_) + tf.add_input(desc, debug_name_) + tf.add_input(desc, context_dense_defaults_) + if feature_list_dense_missing_assumed_empty !== nothing + desc["feature_list_dense_missing_assumed_empty"] = map(Base.identity, feature_list_dense_missing_assumed_empty) + end + if context_sparse_keys !== nothing + desc["context_sparse_keys"] = map(Base.identity, context_sparse_keys) + end + if context_dense_keys !== nothing + desc["context_dense_keys"] = map(Base.identity, context_dense_keys) + end + if feature_list_sparse_keys !== nothing + desc["feature_list_sparse_keys"] = map(Base.identity, feature_list_sparse_keys) + end + if feature_list_dense_keys !== nothing + desc["feature_list_dense_keys"] = map(Base.identity, feature_list_dense_keys) + end + if Ncontext_sparse !== nothing + desc["Ncontext_sparse"] = Base.Int(Ncontext_sparse) + end + if Ncontext_dense !== nothing + desc["Ncontext_dense"] = Base.Int(Ncontext_dense) + end + if Nfeature_list_sparse !== nothing + desc["Nfeature_list_sparse"] = Base.Int(Nfeature_list_sparse) + end + if Nfeature_list_dense !== nothing + desc["Nfeature_list_dense"] = Base.Int(Nfeature_list_dense) + end + if context_sparse_types !== nothing + desc["context_sparse_types"] = map(Base.identity, context_sparse_types) + end + if Tcontext_dense !== nothing + desc["Tcontext_dense"] = map(Base.identity, Tcontext_dense) + end + if feature_list_dense_types !== nothing + desc["feature_list_dense_types"] = map(Base.identity, feature_list_dense_types) + end + if context_dense_shapes !== nothing + desc["context_dense_shapes"] = map(Base.identity, context_dense_shapes) + end + if feature_list_sparse_types !== nothing + desc["feature_list_sparse_types"] = map(Base.identity, feature_list_sparse_types) + end + if feature_list_dense_shapes !== nothing + desc["feature_list_dense_shapes"] = map(Base.identity, feature_list_dense_shapes) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:9 + push!(out, tf.Tensor(op, out_idx)) end + out + end function parse_sequence_example_eager(serialized_, debug_name_, context_dense_defaults_; name=nothing, feature_list_dense_missing_assumed_empty=nothing, context_sparse_keys=nothing, context_dense_keys=nothing, feature_list_sparse_keys=nothing, feature_list_dense_keys=nothing, Ncontext_sparse=nothing, Ncontext_dense=nothing, Nfeature_list_sparse=nothing, Nfeature_list_dense=nothing, context_sparse_types=nothing, Tcontext_dense=nothing, feature_list_dense_types=nothing, context_dense_shapes=nothing, feature_list_sparse_types=nothing, feature_list_dense_shapes=nothing) desc = tf.EagerOp("ParseSequenceExample") serialized_ = convert(tf.EagerTensor, serialized_) @@ -23216,13 +23216,13 @@ begin return res end end - function parse_sequence_example(serialized_, debug_name_, context_dense_defaults_; name=nothing, feature_list_dense_missing_assumed_empty=nothing, context_sparse_keys=nothing, context_dense_keys=nothing, feature_list_sparse_keys=nothing, feature_list_dense_keys=nothing, Ncontext_sparse=nothing, Ncontext_dense=nothing, Nfeature_list_sparse=nothing, Nfeature_list_dense=nothing, context_sparse_types=nothing, Tcontext_dense=nothing, feature_list_dense_types=nothing, context_dense_shapes=nothing, feature_list_sparse_types=nothing, feature_list_dense_shapes=nothing) - if tf.in_eager_mode() - parse_sequence_example_eager(serialized_, debug_name_, context_dense_defaults_; name=name, feature_list_dense_missing_assumed_empty=feature_list_dense_missing_assumed_empty, context_sparse_keys=context_sparse_keys, context_dense_keys=context_dense_keys, feature_list_sparse_keys=feature_list_sparse_keys, feature_list_dense_keys=feature_list_dense_keys, Ncontext_sparse=Ncontext_sparse, Ncontext_dense=Ncontext_dense, Nfeature_list_sparse=Nfeature_list_sparse, Nfeature_list_dense=Nfeature_list_dense, context_sparse_types=context_sparse_types, Tcontext_dense=Tcontext_dense, feature_list_dense_types=feature_list_dense_types, context_dense_shapes=context_dense_shapes, feature_list_sparse_types=feature_list_sparse_types, feature_list_dense_shapes=feature_list_dense_shapes) - else - parse_sequence_example_graph(serialized_, debug_name_, context_dense_defaults_; name=name, feature_list_dense_missing_assumed_empty=feature_list_dense_missing_assumed_empty, context_sparse_keys=context_sparse_keys, context_dense_keys=context_dense_keys, feature_list_sparse_keys=feature_list_sparse_keys, feature_list_dense_keys=feature_list_dense_keys, Ncontext_sparse=Ncontext_sparse, Ncontext_dense=Ncontext_dense, Nfeature_list_sparse=Nfeature_list_sparse, Nfeature_list_dense=Nfeature_list_dense, context_sparse_types=context_sparse_types, Tcontext_dense=Tcontext_dense, feature_list_dense_types=feature_list_dense_types, context_dense_shapes=context_dense_shapes, feature_list_sparse_types=feature_list_sparse_types, feature_list_dense_shapes=feature_list_dense_shapes) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function parse_sequence_example(serialized_, debug_name_, context_dense_defaults_; name=nothing, feature_list_dense_missing_assumed_empty=nothing, context_sparse_keys=nothing, context_dense_keys=nothing, feature_list_sparse_keys=nothing, feature_list_dense_keys=nothing, Ncontext_sparse=nothing, Ncontext_dense=nothing, Nfeature_list_sparse=nothing, Nfeature_list_dense=nothing, context_sparse_types=nothing, Tcontext_dense=nothing, feature_list_dense_types=nothing, context_dense_shapes=nothing, feature_list_sparse_types=nothing, feature_list_dense_shapes=nothing) + if tf.in_eager_mode() + parse_sequence_example_eager(serialized_, debug_name_, context_dense_defaults_; name=name, feature_list_dense_missing_assumed_empty=feature_list_dense_missing_assumed_empty, context_sparse_keys=context_sparse_keys, context_dense_keys=context_dense_keys, feature_list_sparse_keys=feature_list_sparse_keys, feature_list_dense_keys=feature_list_dense_keys, Ncontext_sparse=Ncontext_sparse, Ncontext_dense=Ncontext_dense, Nfeature_list_sparse=Nfeature_list_sparse, Nfeature_list_dense=Nfeature_list_dense, context_sparse_types=context_sparse_types, Tcontext_dense=Tcontext_dense, feature_list_dense_types=feature_list_dense_types, context_dense_shapes=context_dense_shapes, feature_list_sparse_types=feature_list_sparse_types, feature_list_dense_shapes=feature_list_dense_shapes) + else + parse_sequence_example_graph(serialized_, debug_name_, context_dense_defaults_; name=name, feature_list_dense_missing_assumed_empty=feature_list_dense_missing_assumed_empty, context_sparse_keys=context_sparse_keys, context_dense_keys=context_dense_keys, feature_list_sparse_keys=feature_list_sparse_keys, feature_list_dense_keys=feature_list_dense_keys, Ncontext_sparse=Ncontext_sparse, Ncontext_dense=Ncontext_dense, Nfeature_list_sparse=Nfeature_list_sparse, Nfeature_list_dense=Nfeature_list_dense, context_sparse_types=context_sparse_types, Tcontext_dense=Tcontext_dense, feature_list_dense_types=feature_list_dense_types, context_dense_shapes=context_dense_shapes, feature_list_sparse_types=feature_list_sparse_types, feature_list_dense_shapes=feature_list_dense_shapes) + end end - end end @@ -23232,16 +23232,16 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function sinh_graph(x_; name=nothing) - local desc - tf.with_op_name(name, "Sinh") do - desc = tf.NodeDescription("Sinh") - x_ = convert(Tensor{Any}, x_) - (x_,) = tf.tf_promote(x_) - tf.add_input(desc, x_) - end - tf.Tensor(tf.Operation(desc)) + function sinh_graph(x_; name=nothing) + local desc + tf.with_op_name(name, "Sinh") do + desc = tf.NodeDescription("Sinh") + x_ = convert(Tensor{Any}, x_) + (x_,) = tf.tf_promote(x_) + tf.add_input(desc, x_) end + tf.Tensor(tf.Operation(desc)) + end function sinh_eager(x_; name=nothing) desc = tf.EagerOp("Sinh") x_ = convert(tf.EagerTensor, x_) @@ -23254,13 +23254,13 @@ begin return res[1] end end - function sinh(x_; name=nothing) - if tf.in_eager_mode() - sinh_eager(x_; name=name) - else - sinh_graph(x_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sinh(x_; name=nothing) + if tf.in_eager_mode() + sinh_eager(x_; name=name) + else + sinh_graph(x_; name=name) + end end - end end @@ -23270,25 +23270,25 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function iterator_v2_graph(; name=nothing, shared_name=nothing, container=nothing, output_types=nothing, output_shapes=nothing) - local desc - tf.with_op_name(name, "IteratorV2") do - desc = tf.NodeDescription("IteratorV2") - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - if container !== nothing - desc["container"] = Base.String(container) - end - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end + function iterator_v2_graph(; name=nothing, shared_name=nothing, container=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "IteratorV2") do + desc = tf.NodeDescription("IteratorV2") + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + if container !== nothing + desc["container"] = Base.String(container) + end + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function iterator_v2_eager(; name=nothing, shared_name=nothing, container=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("IteratorV2") if shared_name !== nothing @@ -23310,13 +23310,13 @@ begin return res[1] end end - function iterator_v2(; name=nothing, shared_name=nothing, container=nothing, output_types=nothing, output_shapes=nothing) - if tf.in_eager_mode() - iterator_v2_eager(; name=name, shared_name=shared_name, container=container, output_types=output_types, output_shapes=output_shapes) - else - iterator_v2_graph(; name=name, shared_name=shared_name, container=container, output_types=output_types, output_shapes=output_shapes) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function iterator_v2(; name=nothing, shared_name=nothing, container=nothing, output_types=nothing, output_shapes=nothing) + if tf.in_eager_mode() + iterator_v2_eager(; name=name, shared_name=shared_name, container=container, output_types=output_types, output_shapes=output_shapes) + else + iterator_v2_graph(; name=name, shared_name=shared_name, container=container, output_types=output_types, output_shapes=output_shapes) + end end - end end @@ -23326,22 +23326,22 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tensor_array_write_v2_graph(handle_, index_, value_, flow_in_; name=nothing) - local desc - tf.with_op_name(name, "TensorArrayWriteV2") do - desc = tf.NodeDescription("TensorArrayWriteV2") - handle_ = convert(Tensor{String}, handle_) - index_ = convert(Tensor{Int32}, index_) - value_ = convert(Tensor{Any}, value_) - flow_in_ = convert(Tensor{Float32}, flow_in_) - (value_,) = tf.tf_promote(value_) - tf.add_input(desc, handle_) - tf.add_input(desc, index_) - tf.add_input(desc, value_) - tf.add_input(desc, flow_in_) - end - tf.Tensor(tf.Operation(desc)) + function tensor_array_write_v2_graph(handle_, index_, value_, flow_in_; name=nothing) + local desc + tf.with_op_name(name, "TensorArrayWriteV2") do + desc = tf.NodeDescription("TensorArrayWriteV2") + handle_ = convert(Tensor{String}, handle_) + index_ = convert(Tensor{Int32}, index_) + value_ = convert(Tensor{Any}, value_) + flow_in_ = convert(Tensor{Float32}, flow_in_) + (value_,) = tf.tf_promote(value_) + tf.add_input(desc, handle_) + tf.add_input(desc, index_) + tf.add_input(desc, value_) + tf.add_input(desc, flow_in_) end + tf.Tensor(tf.Operation(desc)) + end function tensor_array_write_v2_eager(handle_, index_, value_, flow_in_; name=nothing) desc = tf.EagerOp("TensorArrayWriteV2") handle_ = convert(tf.EagerTensor, handle_) @@ -23360,13 +23360,13 @@ begin return res[1] end end - function tensor_array_write_v2(handle_, index_, value_, flow_in_; name=nothing) - if tf.in_eager_mode() - tensor_array_write_v2_eager(handle_, index_, value_, flow_in_; name=name) - else - tensor_array_write_v2_graph(handle_, index_, value_, flow_in_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_array_write_v2(handle_, index_, value_, flow_in_; name=nothing) + if tf.in_eager_mode() + tensor_array_write_v2_eager(handle_, index_, value_, flow_in_; name=name) + else + tensor_array_write_v2_graph(handle_, index_, value_, flow_in_; name=name) + end end - end end @@ -23376,18 +23376,18 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tensor_list_element_shape_graph(input_handle_; name=nothing, shape_type=nothing) - local desc - tf.with_op_name(name, "TensorListElementShape") do - desc = tf.NodeDescription("TensorListElementShape") - input_handle_ = convert(Tensor{Any}, input_handle_) - tf.add_input(desc, input_handle_) - if shape_type !== nothing - desc["shape_type"] = Base.identity(shape_type) - end + function tensor_list_element_shape_graph(input_handle_; name=nothing, shape_type=nothing) + local desc + tf.with_op_name(name, "TensorListElementShape") do + desc = tf.NodeDescription("TensorListElementShape") + input_handle_ = convert(Tensor{Any}, input_handle_) + tf.add_input(desc, input_handle_) + if shape_type !== nothing + desc["shape_type"] = Base.identity(shape_type) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function tensor_list_element_shape_eager(input_handle_; name=nothing, shape_type=nothing) desc = tf.EagerOp("TensorListElementShape") input_handle_ = convert(tf.EagerTensor, input_handle_) @@ -23402,13 +23402,13 @@ begin return res[1] end end - function tensor_list_element_shape(input_handle_; name=nothing, shape_type=nothing) - if tf.in_eager_mode() - tensor_list_element_shape_eager(input_handle_; name=name, shape_type=shape_type) - else - tensor_list_element_shape_graph(input_handle_; name=name, shape_type=shape_type) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_list_element_shape(input_handle_; name=nothing, shape_type=nothing) + if tf.in_eager_mode() + tensor_list_element_shape_eager(input_handle_; name=name, shape_type=shape_type) + else + tensor_list_element_shape_graph(input_handle_; name=name, shape_type=shape_type) + end end - end end @@ -23418,15 +23418,15 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function queue_size_v2_graph(handle_; name=nothing) - local desc - tf.with_op_name(name, "QueueSizeV2") do - desc = tf.NodeDescription("QueueSizeV2") - handle_ = convert(Tensor{Any}, handle_) - tf.add_input(desc, handle_) - end - tf.Tensor(tf.Operation(desc)) + function queue_size_v2_graph(handle_; name=nothing) + local desc + tf.with_op_name(name, "QueueSizeV2") do + desc = tf.NodeDescription("QueueSizeV2") + handle_ = convert(Tensor{Any}, handle_) + tf.add_input(desc, handle_) end + tf.Tensor(tf.Operation(desc)) + end function queue_size_v2_eager(handle_; name=nothing) desc = tf.EagerOp("QueueSizeV2") handle_ = convert(tf.EagerTensor, handle_) @@ -23438,13 +23438,13 @@ begin return res[1] end end - function queue_size_v2(handle_; name=nothing) - if tf.in_eager_mode() - queue_size_v2_eager(handle_; name=name) - else - queue_size_v2_graph(handle_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function queue_size_v2(handle_; name=nothing) + if tf.in_eager_mode() + queue_size_v2_eager(handle_; name=name) + else + queue_size_v2_graph(handle_; name=name) + end end - end end @@ -23454,16 +23454,16 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function expm1_graph(x_; name=nothing) - local desc - tf.with_op_name(name, "Expm1") do - desc = tf.NodeDescription("Expm1") - x_ = convert(Tensor{Any}, x_) - (x_,) = tf.tf_promote(x_) - tf.add_input(desc, x_) - end - tf.Tensor(tf.Operation(desc)) + function expm1_graph(x_; name=nothing) + local desc + tf.with_op_name(name, "Expm1") do + desc = tf.NodeDescription("Expm1") + x_ = convert(Tensor{Any}, x_) + (x_,) = tf.tf_promote(x_) + tf.add_input(desc, x_) end + tf.Tensor(tf.Operation(desc)) + end function expm1_eager(x_; name=nothing) desc = tf.EagerOp("Expm1") x_ = convert(tf.EagerTensor, x_) @@ -23476,13 +23476,13 @@ begin return res[1] end end - function expm1(x_; name=nothing) - if tf.in_eager_mode() - expm1_eager(x_; name=name) - else - expm1_graph(x_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function expm1(x_; name=nothing) + if tf.in_eager_mode() + expm1_eager(x_; name=name) + else + expm1_graph(x_; name=name) + end end - end end @@ -23492,20 +23492,20 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function batch_matrix_band_part_graph(input_, num_lower_, num_upper_; name=nothing) - local desc - tf.with_op_name(name, "BatchMatrixBandPart") do - desc = tf.NodeDescription("BatchMatrixBandPart") - input_ = convert(Tensor{Any}, input_) - num_lower_ = convert(Tensor{Int64}, num_lower_) - num_upper_ = convert(Tensor{Int64}, num_upper_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - tf.add_input(desc, num_lower_) - tf.add_input(desc, num_upper_) - end - tf.Tensor(tf.Operation(desc)) + function batch_matrix_band_part_graph(input_, num_lower_, num_upper_; name=nothing) + local desc + tf.with_op_name(name, "BatchMatrixBandPart") do + desc = tf.NodeDescription("BatchMatrixBandPart") + input_ = convert(Tensor{Any}, input_) + num_lower_ = convert(Tensor{Int64}, num_lower_) + num_upper_ = convert(Tensor{Int64}, num_upper_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + tf.add_input(desc, num_lower_) + tf.add_input(desc, num_upper_) end + tf.Tensor(tf.Operation(desc)) + end function batch_matrix_band_part_eager(input_, num_lower_, num_upper_; name=nothing) desc = tf.EagerOp("BatchMatrixBandPart") input_ = convert(tf.EagerTensor, input_) @@ -23522,13 +23522,13 @@ begin return res[1] end end - function batch_matrix_band_part(input_, num_lower_, num_upper_; name=nothing) - if tf.in_eager_mode() - batch_matrix_band_part_eager(input_, num_lower_, num_upper_; name=name) - else - batch_matrix_band_part_graph(input_, num_lower_, num_upper_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function batch_matrix_band_part(input_, num_lower_, num_upper_; name=nothing) + if tf.in_eager_mode() + batch_matrix_band_part_eager(input_, num_lower_, num_upper_; name=name) + else + batch_matrix_band_part_graph(input_, num_lower_, num_upper_; name=name) + end end - end end @@ -23538,23 +23538,23 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function concatenate_dataset_graph(input_dataset_, another_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) - local desc - tf.with_op_name(name, "ConcatenateDataset") do - desc = tf.NodeDescription("ConcatenateDataset") - input_dataset_ = convert(Tensor{Any}, input_dataset_) - another_dataset_ = convert(Tensor{Any}, another_dataset_) - tf.add_input(desc, input_dataset_) - tf.add_input(desc, another_dataset_) - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end + function concatenate_dataset_graph(input_dataset_, another_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "ConcatenateDataset") do + desc = tf.NodeDescription("ConcatenateDataset") + input_dataset_ = convert(Tensor{Any}, input_dataset_) + another_dataset_ = convert(Tensor{Any}, another_dataset_) + tf.add_input(desc, input_dataset_) + tf.add_input(desc, another_dataset_) + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function concatenate_dataset_eager(input_dataset_, another_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("ConcatenateDataset") input_dataset_ = convert(tf.EagerTensor, input_dataset_) @@ -23574,13 +23574,13 @@ begin return res[1] end end - function concatenate_dataset(input_dataset_, another_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) - if tf.in_eager_mode() - concatenate_dataset_eager(input_dataset_, another_dataset_; name=name, output_types=output_types, output_shapes=output_shapes) - else - concatenate_dataset_graph(input_dataset_, another_dataset_; name=name, output_types=output_types, output_shapes=output_shapes) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function concatenate_dataset(input_dataset_, another_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) + if tf.in_eager_mode() + concatenate_dataset_eager(input_dataset_, another_dataset_; name=name, output_types=output_types, output_shapes=output_shapes) + else + concatenate_dataset_graph(input_dataset_, another_dataset_; name=name, output_types=output_types, output_shapes=output_shapes) + end end - end end @@ -23590,15 +23590,15 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function decode_gif_graph(contents_; name=nothing) - local desc - tf.with_op_name(name, "DecodeGif") do - desc = tf.NodeDescription("DecodeGif") - contents_ = convert(Tensor{String}, contents_) - tf.add_input(desc, contents_) - end - tf.Tensor(tf.Operation(desc)) + function decode_gif_graph(contents_; name=nothing) + local desc + tf.with_op_name(name, "DecodeGif") do + desc = tf.NodeDescription("DecodeGif") + contents_ = convert(Tensor{String}, contents_) + tf.add_input(desc, contents_) end + tf.Tensor(tf.Operation(desc)) + end function decode_gif_eager(contents_; name=nothing) desc = tf.EagerOp("DecodeGif") contents_ = convert(tf.EagerTensor, contents_) @@ -23610,13 +23610,13 @@ begin return res[1] end end - function decode_gif(contents_; name=nothing) - if tf.in_eager_mode() - decode_gif_eager(contents_; name=name) - else - decode_gif_graph(contents_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function decode_gif(contents_; name=nothing) + if tf.in_eager_mode() + decode_gif_eager(contents_; name=name) + else + decode_gif_graph(contents_; name=name) + end end - end end @@ -23626,57 +23626,57 @@ end Runs replicated computations on a distributed TPU system. """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tpu_replicate_graph(inputs_, broadcast_inputs_, variables_, guaranteed_constants_; name=nothing, computation=nothing, num_replicas=nothing, num_cores_per_replica=nothing, topology=nothing, use_tpu=nothing, device_assignment=nothing, host_compute_core=nothing, Tinputs=nothing, Tbroadcast_inputs=nothing, NumVariables=nothing, Tguaranteed_constants=nothing, output_types=nothing) - local desc - tf.with_op_name(name, "TPUReplicate") do - desc = tf.NodeDescription("TPUReplicate") - inputs_ = [convert(Tensor{Any}, x) for x = inputs_] - broadcast_inputs_ = [convert(Tensor{Any}, x) for x = broadcast_inputs_] - variables_ = [convert(Tensor{Any}, x) for x = variables_] - guaranteed_constants_ = [convert(Tensor{Any}, x) for x = guaranteed_constants_] - tf.add_input(desc, inputs_) - tf.add_input(desc, broadcast_inputs_) - tf.add_input(desc, variables_) - tf.add_input(desc, guaranteed_constants_) - if computation !== nothing - desc["computation"] = Base.identity(computation) - end - if num_replicas !== nothing - desc["num_replicas"] = Base.Int(num_replicas) - end - if num_cores_per_replica !== nothing - desc["num_cores_per_replica"] = Base.Int(num_cores_per_replica) - end - if topology !== nothing - desc["topology"] = Base.String(topology) - end - if use_tpu !== nothing - desc["use_tpu"] = Base.Bool(use_tpu) - end - if device_assignment !== nothing - desc["device_assignment"] = map(Base.identity, device_assignment) - end - if host_compute_core !== nothing - desc["host_compute_core"] = map(Base.identity, host_compute_core) - end - if Tinputs !== nothing - desc["Tinputs"] = map(Base.identity, Tinputs) - end - if Tbroadcast_inputs !== nothing - desc["Tbroadcast_inputs"] = map(Base.identity, Tbroadcast_inputs) - end - if NumVariables !== nothing - desc["NumVariables"] = Base.Int(NumVariables) - end - if Tguaranteed_constants !== nothing - desc["Tguaranteed_constants"] = map(Base.identity, Tguaranteed_constants) - end - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - end - tf.Tensor(tf.Operation(desc)) + function tpu_replicate_graph(inputs_, broadcast_inputs_, variables_, guaranteed_constants_; name=nothing, computation=nothing, num_replicas=nothing, num_cores_per_replica=nothing, topology=nothing, use_tpu=nothing, device_assignment=nothing, host_compute_core=nothing, Tinputs=nothing, Tbroadcast_inputs=nothing, NumVariables=nothing, Tguaranteed_constants=nothing, output_types=nothing) + local desc + tf.with_op_name(name, "TPUReplicate") do + desc = tf.NodeDescription("TPUReplicate") + inputs_ = [convert(Tensor{Any}, x) for x = inputs_] + broadcast_inputs_ = [convert(Tensor{Any}, x) for x = broadcast_inputs_] + variables_ = [convert(Tensor{Any}, x) for x = variables_] + guaranteed_constants_ = [convert(Tensor{Any}, x) for x = guaranteed_constants_] + tf.add_input(desc, inputs_) + tf.add_input(desc, broadcast_inputs_) + tf.add_input(desc, variables_) + tf.add_input(desc, guaranteed_constants_) + if computation !== nothing + desc["computation"] = Base.identity(computation) + end + if num_replicas !== nothing + desc["num_replicas"] = Base.Int(num_replicas) + end + if num_cores_per_replica !== nothing + desc["num_cores_per_replica"] = Base.Int(num_cores_per_replica) + end + if topology !== nothing + desc["topology"] = Base.String(topology) + end + if use_tpu !== nothing + desc["use_tpu"] = Base.Bool(use_tpu) + end + if device_assignment !== nothing + desc["device_assignment"] = map(Base.identity, device_assignment) + end + if host_compute_core !== nothing + desc["host_compute_core"] = map(Base.identity, host_compute_core) + end + if Tinputs !== nothing + desc["Tinputs"] = map(Base.identity, Tinputs) + end + if Tbroadcast_inputs !== nothing + desc["Tbroadcast_inputs"] = map(Base.identity, Tbroadcast_inputs) + end + if NumVariables !== nothing + desc["NumVariables"] = Base.Int(NumVariables) + end + if Tguaranteed_constants !== nothing + desc["Tguaranteed_constants"] = map(Base.identity, Tguaranteed_constants) + end + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end end + tf.Tensor(tf.Operation(desc)) + end function tpu_replicate_eager(inputs_, broadcast_inputs_, variables_, guaranteed_constants_; name=nothing, computation=nothing, num_replicas=nothing, num_cores_per_replica=nothing, topology=nothing, use_tpu=nothing, device_assignment=nothing, host_compute_core=nothing, Tinputs=nothing, Tbroadcast_inputs=nothing, NumVariables=nothing, Tguaranteed_constants=nothing, output_types=nothing) desc = tf.EagerOp("TPUReplicate") inputs_ = convert(tf.EagerTensor, inputs_) @@ -23730,13 +23730,13 @@ begin return res[1] end end - function tpu_replicate(inputs_, broadcast_inputs_, variables_, guaranteed_constants_; name=nothing, computation=nothing, num_replicas=nothing, num_cores_per_replica=nothing, topology=nothing, use_tpu=nothing, device_assignment=nothing, host_compute_core=nothing, Tinputs=nothing, Tbroadcast_inputs=nothing, NumVariables=nothing, Tguaranteed_constants=nothing, output_types=nothing) - if tf.in_eager_mode() - tpu_replicate_eager(inputs_, broadcast_inputs_, variables_, guaranteed_constants_; name=name, computation=computation, num_replicas=num_replicas, num_cores_per_replica=num_cores_per_replica, topology=topology, use_tpu=use_tpu, device_assignment=device_assignment, host_compute_core=host_compute_core, Tinputs=Tinputs, Tbroadcast_inputs=Tbroadcast_inputs, NumVariables=NumVariables, Tguaranteed_constants=Tguaranteed_constants, output_types=output_types) - else - tpu_replicate_graph(inputs_, broadcast_inputs_, variables_, guaranteed_constants_; name=name, computation=computation, num_replicas=num_replicas, num_cores_per_replica=num_cores_per_replica, topology=topology, use_tpu=use_tpu, device_assignment=device_assignment, host_compute_core=host_compute_core, Tinputs=Tinputs, Tbroadcast_inputs=Tbroadcast_inputs, NumVariables=NumVariables, Tguaranteed_constants=Tguaranteed_constants, output_types=output_types) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tpu_replicate(inputs_, broadcast_inputs_, variables_, guaranteed_constants_; name=nothing, computation=nothing, num_replicas=nothing, num_cores_per_replica=nothing, topology=nothing, use_tpu=nothing, device_assignment=nothing, host_compute_core=nothing, Tinputs=nothing, Tbroadcast_inputs=nothing, NumVariables=nothing, Tguaranteed_constants=nothing, output_types=nothing) + if tf.in_eager_mode() + tpu_replicate_eager(inputs_, broadcast_inputs_, variables_, guaranteed_constants_; name=name, computation=computation, num_replicas=num_replicas, num_cores_per_replica=num_cores_per_replica, topology=topology, use_tpu=use_tpu, device_assignment=device_assignment, host_compute_core=host_compute_core, Tinputs=Tinputs, Tbroadcast_inputs=Tbroadcast_inputs, NumVariables=NumVariables, Tguaranteed_constants=Tguaranteed_constants, output_types=output_types) + else + tpu_replicate_graph(inputs_, broadcast_inputs_, variables_, guaranteed_constants_; name=name, computation=computation, num_replicas=num_replicas, num_cores_per_replica=num_cores_per_replica, topology=topology, use_tpu=use_tpu, device_assignment=device_assignment, host_compute_core=host_compute_core, Tinputs=Tinputs, Tbroadcast_inputs=Tbroadcast_inputs, NumVariables=NumVariables, Tguaranteed_constants=Tguaranteed_constants, output_types=output_types) + end end - end end @@ -23746,24 +23746,24 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function batch_self_adjoint_eig_v2_graph(input_; name=nothing, compute_v=nothing) - local desc - tf.with_op_name(name, "BatchSelfAdjointEigV2") do - desc = tf.NodeDescription("BatchSelfAdjointEigV2") - input_ = convert(Tensor{Any}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - if compute_v !== nothing - desc["compute_v"] = Base.Bool(compute_v) - end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:2 - push!(out, tf.Tensor(op, out_idx)) + function batch_self_adjoint_eig_v2_graph(input_; name=nothing, compute_v=nothing) + local desc + tf.with_op_name(name, "BatchSelfAdjointEigV2") do + desc = tf.NodeDescription("BatchSelfAdjointEigV2") + input_ = convert(Tensor{Any}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + if compute_v !== nothing + desc["compute_v"] = Base.Bool(compute_v) end - out end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end function batch_self_adjoint_eig_v2_eager(input_; name=nothing, compute_v=nothing) desc = tf.EagerOp("BatchSelfAdjointEigV2") input_ = convert(tf.EagerTensor, input_) @@ -23779,13 +23779,13 @@ begin return res end end - function batch_self_adjoint_eig_v2(input_; name=nothing, compute_v=nothing) - if tf.in_eager_mode() - batch_self_adjoint_eig_v2_eager(input_; name=name, compute_v=compute_v) - else - batch_self_adjoint_eig_v2_graph(input_; name=name, compute_v=compute_v) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function batch_self_adjoint_eig_v2(input_; name=nothing, compute_v=nothing) + if tf.in_eager_mode() + batch_self_adjoint_eig_v2_eager(input_; name=name, compute_v=compute_v) + else + batch_self_adjoint_eig_v2_graph(input_; name=name, compute_v=compute_v) + end end - end end @@ -23795,19 +23795,19 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function shape_graph(input_; name=nothing, out_type=nothing) - local desc - tf.with_op_name(name, "Shape") do - desc = tf.NodeDescription("Shape") - input_ = convert(Tensor{Any}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - if out_type !== nothing - desc["out_type"] = Base.identity(out_type) - end + function shape_graph(input_; name=nothing, out_type=nothing) + local desc + tf.with_op_name(name, "Shape") do + desc = tf.NodeDescription("Shape") + input_ = convert(Tensor{Any}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + if out_type !== nothing + desc["out_type"] = Base.identity(out_type) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function shape_eager(input_; name=nothing, out_type=nothing) desc = tf.EagerOp("Shape") input_ = convert(tf.EagerTensor, input_) @@ -23823,13 +23823,13 @@ begin return res[1] end end - function shape(input_; name=nothing, out_type=nothing) - if tf.in_eager_mode() - shape_eager(input_; name=name, out_type=out_type) - else - shape_graph(input_; name=name, out_type=out_type) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function shape(input_; name=nothing, out_type=nothing) + if tf.in_eager_mode() + shape_eager(input_; name=name, out_type=out_type) + else + shape_graph(input_; name=name, out_type=out_type) + end end - end end @@ -23839,23 +23839,23 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function repeat_dataset_graph(input_dataset_, count_; name=nothing, output_types=nothing, output_shapes=nothing) - local desc - tf.with_op_name(name, "RepeatDataset") do - desc = tf.NodeDescription("RepeatDataset") - input_dataset_ = convert(Tensor{Any}, input_dataset_) - count_ = convert(Tensor{Int64}, count_) - tf.add_input(desc, input_dataset_) - tf.add_input(desc, count_) - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end + function repeat_dataset_graph(input_dataset_, count_; name=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "RepeatDataset") do + desc = tf.NodeDescription("RepeatDataset") + input_dataset_ = convert(Tensor{Any}, input_dataset_) + count_ = convert(Tensor{Int64}, count_) + tf.add_input(desc, input_dataset_) + tf.add_input(desc, count_) + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function repeat_dataset_eager(input_dataset_, count_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("RepeatDataset") input_dataset_ = convert(tf.EagerTensor, input_dataset_) @@ -23875,13 +23875,13 @@ begin return res[1] end end - function repeat_dataset(input_dataset_, count_; name=nothing, output_types=nothing, output_shapes=nothing) - if tf.in_eager_mode() - repeat_dataset_eager(input_dataset_, count_; name=name, output_types=output_types, output_shapes=output_shapes) - else - repeat_dataset_graph(input_dataset_, count_; name=name, output_types=output_types, output_shapes=output_shapes) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function repeat_dataset(input_dataset_, count_; name=nothing, output_types=nothing, output_shapes=nothing) + if tf.in_eager_mode() + repeat_dataset_eager(input_dataset_, count_; name=name, output_types=output_types, output_shapes=output_shapes) + else + repeat_dataset_graph(input_dataset_, count_; name=name, output_types=output_types, output_shapes=output_shapes) + end end - end end @@ -23891,18 +23891,18 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function reciprocal_grad_graph(y_, dy_; name=nothing) - local desc - tf.with_op_name(name, "ReciprocalGrad") do - desc = tf.NodeDescription("ReciprocalGrad") - y_ = convert(Tensor{Any}, y_) - dy_ = convert(Tensor{Any}, dy_) - (y_, dy_) = tf.tf_promote(y_, dy_) - tf.add_input(desc, y_) - tf.add_input(desc, dy_) - end - tf.Tensor(tf.Operation(desc)) + function reciprocal_grad_graph(y_, dy_; name=nothing) + local desc + tf.with_op_name(name, "ReciprocalGrad") do + desc = tf.NodeDescription("ReciprocalGrad") + y_ = convert(Tensor{Any}, y_) + dy_ = convert(Tensor{Any}, dy_) + (y_, dy_) = tf.tf_promote(y_, dy_) + tf.add_input(desc, y_) + tf.add_input(desc, dy_) end + tf.Tensor(tf.Operation(desc)) + end function reciprocal_grad_eager(y_, dy_; name=nothing) desc = tf.EagerOp("ReciprocalGrad") y_ = convert(tf.EagerTensor, y_) @@ -23918,41 +23918,41 @@ begin return res[1] end end - function reciprocal_grad(y_, dy_; name=nothing) - if tf.in_eager_mode() - reciprocal_grad_eager(y_, dy_; name=name) - else - reciprocal_grad_graph(y_, dy_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function reciprocal_grad(y_, dy_; name=nothing) + if tf.in_eager_mode() + reciprocal_grad_eager(y_, dy_; name=name) + else + reciprocal_grad_graph(y_, dy_; name=name) + end end - end end """ - crop_and_resize_grad_boxes(grads, image, boxes, box_ind; method=bilinear) + crop_and_resize_grad_boxes(grads, image, boxes, box_ind; method=) """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function crop_and_resize_grad_boxes_graph(grads_, image_, boxes_, box_ind_; name=nothing, method=nothing) - local desc - tf.with_op_name(name, "CropAndResizeGradBoxes") do - desc = tf.NodeDescription("CropAndResizeGradBoxes") - grads_ = convert(Tensor{Float32}, grads_) - image_ = convert(Tensor{Any}, image_) - boxes_ = convert(Tensor{Float32}, boxes_) - box_ind_ = convert(Tensor{Int32}, box_ind_) - (image_,) = tf.tf_promote(image_) - tf.add_input(desc, grads_) - tf.add_input(desc, image_) - tf.add_input(desc, boxes_) - tf.add_input(desc, box_ind_) - if method !== nothing - desc["method"] = Base.String(method) - end + function crop_and_resize_grad_boxes_graph(grads_, image_, boxes_, box_ind_; name=nothing, method=nothing) + local desc + tf.with_op_name(name, "CropAndResizeGradBoxes") do + desc = tf.NodeDescription("CropAndResizeGradBoxes") + grads_ = convert(Tensor{Float32}, grads_) + image_ = convert(Tensor{Any}, image_) + boxes_ = convert(Tensor{Float32}, boxes_) + box_ind_ = convert(Tensor{Int32}, box_ind_) + (image_,) = tf.tf_promote(image_) + tf.add_input(desc, grads_) + tf.add_input(desc, image_) + tf.add_input(desc, boxes_) + tf.add_input(desc, box_ind_) + if method !== nothing + desc["method"] = Base.String(method) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function crop_and_resize_grad_boxes_eager(grads_, image_, boxes_, box_ind_; name=nothing, method=nothing) desc = tf.EagerOp("CropAndResizeGradBoxes") grads_ = convert(tf.EagerTensor, grads_) @@ -23974,13 +23974,13 @@ begin return res[1] end end - function crop_and_resize_grad_boxes(grads_, image_, boxes_, box_ind_; name=nothing, method=nothing) - if tf.in_eager_mode() - crop_and_resize_grad_boxes_eager(grads_, image_, boxes_, box_ind_; name=name, method=method) - else - crop_and_resize_grad_boxes_graph(grads_, image_, boxes_, box_ind_; name=name, method=method) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function crop_and_resize_grad_boxes(grads_, image_, boxes_, box_ind_; name=nothing, method=nothing) + if tf.in_eager_mode() + crop_and_resize_grad_boxes_eager(grads_, image_, boxes_, box_ind_; name=name, method=method) + else + crop_and_resize_grad_boxes_graph(grads_, image_, boxes_, box_ind_; name=name, method=method) + end end - end end @@ -23990,21 +23990,21 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function batch_matrix_solve_graph(matrix_, rhs_; name=nothing, adjoint=nothing) - local desc - tf.with_op_name(name, "BatchMatrixSolve") do - desc = tf.NodeDescription("BatchMatrixSolve") - matrix_ = convert(Tensor{Any}, matrix_) - rhs_ = convert(Tensor{Any}, rhs_) - (matrix_, rhs_) = tf.tf_promote(matrix_, rhs_) - tf.add_input(desc, matrix_) - tf.add_input(desc, rhs_) - if adjoint !== nothing - desc["adjoint"] = Base.Bool(adjoint) - end + function batch_matrix_solve_graph(matrix_, rhs_; name=nothing, adjoint=nothing) + local desc + tf.with_op_name(name, "BatchMatrixSolve") do + desc = tf.NodeDescription("BatchMatrixSolve") + matrix_ = convert(Tensor{Any}, matrix_) + rhs_ = convert(Tensor{Any}, rhs_) + (matrix_, rhs_) = tf.tf_promote(matrix_, rhs_) + tf.add_input(desc, matrix_) + tf.add_input(desc, rhs_) + if adjoint !== nothing + desc["adjoint"] = Base.Bool(adjoint) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function batch_matrix_solve_eager(matrix_, rhs_; name=nothing, adjoint=nothing) desc = tf.EagerOp("BatchMatrixSolve") matrix_ = convert(tf.EagerTensor, matrix_) @@ -24023,13 +24023,13 @@ begin return res[1] end end - function batch_matrix_solve(matrix_, rhs_; name=nothing, adjoint=nothing) - if tf.in_eager_mode() - batch_matrix_solve_eager(matrix_, rhs_; name=name, adjoint=adjoint) - else - batch_matrix_solve_graph(matrix_, rhs_; name=name, adjoint=adjoint) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function batch_matrix_solve(matrix_, rhs_; name=nothing, adjoint=nothing) + if tf.in_eager_mode() + batch_matrix_solve_eager(matrix_, rhs_; name=name, adjoint=adjoint) + else + batch_matrix_solve_graph(matrix_, rhs_; name=name, adjoint=adjoint) + end end - end end @@ -24039,28 +24039,28 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function mutable_hash_table_v2_graph(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing) - local desc - tf.with_op_name(name, "MutableHashTableV2") do - desc = tf.NodeDescription("MutableHashTableV2") - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - if use_node_name_sharing !== nothing - desc["use_node_name_sharing"] = Base.Bool(use_node_name_sharing) - end - if key_dtype !== nothing - desc["key_dtype"] = Base.identity(key_dtype) - end - if value_dtype !== nothing - desc["value_dtype"] = Base.identity(value_dtype) - end + function mutable_hash_table_v2_graph(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing) + local desc + tf.with_op_name(name, "MutableHashTableV2") do + desc = tf.NodeDescription("MutableHashTableV2") + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + if use_node_name_sharing !== nothing + desc["use_node_name_sharing"] = Base.Bool(use_node_name_sharing) + end + if key_dtype !== nothing + desc["key_dtype"] = Base.identity(key_dtype) + end + if value_dtype !== nothing + desc["value_dtype"] = Base.identity(value_dtype) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function mutable_hash_table_v2_eager(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing) desc = tf.EagerOp("MutableHashTableV2") if container !== nothing @@ -24085,13 +24085,13 @@ begin return res[1] end end - function mutable_hash_table_v2(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing) - if tf.in_eager_mode() - mutable_hash_table_v2_eager(; name=name, container=container, shared_name=shared_name, use_node_name_sharing=use_node_name_sharing, key_dtype=key_dtype, value_dtype=value_dtype) - else - mutable_hash_table_v2_graph(; name=name, container=container, shared_name=shared_name, use_node_name_sharing=use_node_name_sharing, key_dtype=key_dtype, value_dtype=value_dtype) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function mutable_hash_table_v2(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing) + if tf.in_eager_mode() + mutable_hash_table_v2_eager(; name=name, container=container, shared_name=shared_name, use_node_name_sharing=use_node_name_sharing, key_dtype=key_dtype, value_dtype=value_dtype) + else + mutable_hash_table_v2_graph(; name=name, container=container, shared_name=shared_name, use_node_name_sharing=use_node_name_sharing, key_dtype=key_dtype, value_dtype=value_dtype) + end end - end end @@ -24101,16 +24101,16 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function exit_graph(data_; name=nothing) - local desc - tf.with_op_name(name, "Exit") do - desc = tf.NodeDescription("Exit") - data_ = convert(Tensor{Any}, data_) - (data_,) = tf.tf_promote(data_) - tf.add_input(desc, data_) - end - tf.Tensor(tf.Operation(desc)) + function exit_graph(data_; name=nothing) + local desc + tf.with_op_name(name, "Exit") do + desc = tf.NodeDescription("Exit") + data_ = convert(Tensor{Any}, data_) + (data_,) = tf.tf_promote(data_) + tf.add_input(desc, data_) end + tf.Tensor(tf.Operation(desc)) + end function exit_eager(data_; name=nothing) desc = tf.EagerOp("Exit") data_ = convert(tf.EagerTensor, data_) @@ -24123,13 +24123,13 @@ begin return res[1] end end - function exit(data_; name=nothing) - if tf.in_eager_mode() - exit_eager(data_; name=name) - else - exit_graph(data_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function exit(data_; name=nothing) + if tf.in_eager_mode() + exit_eager(data_; name=name) + else + exit_graph(data_; name=name) + end end - end end @@ -24139,28 +24139,28 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function lrn_graph(input_; name=nothing, depth_radius=nothing, bias=nothing, alpha=nothing, beta=nothing) - local desc - tf.with_op_name(name, "LRN") do - desc = tf.NodeDescription("LRN") - input_ = convert(Tensor{Float32}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - if depth_radius !== nothing - desc["depth_radius"] = Base.Int(depth_radius) - end - if bias !== nothing - desc["bias"] = Base.identity(bias) - end - if alpha !== nothing - desc["alpha"] = Base.identity(alpha) - end - if beta !== nothing - desc["beta"] = Base.identity(beta) - end + function lrn_graph(input_; name=nothing, depth_radius=nothing, bias=nothing, alpha=nothing, beta=nothing) + local desc + tf.with_op_name(name, "LRN") do + desc = tf.NodeDescription("LRN") + input_ = convert(Tensor{Float32}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + if depth_radius !== nothing + desc["depth_radius"] = Base.Int(depth_radius) + end + if bias !== nothing + desc["bias"] = Base.identity(bias) + end + if alpha !== nothing + desc["alpha"] = Base.identity(alpha) + end + if beta !== nothing + desc["beta"] = Base.identity(beta) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function lrn_eager(input_; name=nothing, depth_radius=nothing, bias=nothing, alpha=nothing, beta=nothing) desc = tf.EagerOp("LRN") input_ = convert(tf.EagerTensor, input_) @@ -24185,13 +24185,13 @@ begin return res[1] end end - function lrn(input_; name=nothing, depth_radius=nothing, bias=nothing, alpha=nothing, beta=nothing) - if tf.in_eager_mode() - lrn_eager(input_; name=name, depth_radius=depth_radius, bias=bias, alpha=alpha, beta=beta) - else - lrn_graph(input_; name=name, depth_radius=depth_radius, bias=bias, alpha=alpha, beta=beta) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function lrn(input_; name=nothing, depth_radius=nothing, bias=nothing, alpha=nothing, beta=nothing) + if tf.in_eager_mode() + lrn_eager(input_; name=name, depth_radius=depth_radius, bias=bias, alpha=alpha, beta=beta) + else + lrn_graph(input_; name=name, depth_radius=depth_radius, bias=bias, alpha=alpha, beta=beta) + end end - end end @@ -24201,30 +24201,30 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function stateless_if_graph(cond_, input_; name=nothing, Tin=nothing, Tout=nothing, then_branch=nothing, else_branch=nothing) - local desc - tf.with_op_name(name, "StatelessIf") do - desc = tf.NodeDescription("StatelessIf") - cond_ = convert(Tensor{Any}, cond_) - input_ = [convert(Tensor{Any}, x) for x = input_] - (cond_,) = tf.tf_promote(cond_) - tf.add_input(desc, cond_) - tf.add_input(desc, input_) - if Tin !== nothing - desc["Tin"] = map(Base.identity, Tin) - end - if Tout !== nothing - desc["Tout"] = map(Base.identity, Tout) - end - if then_branch !== nothing - desc["then_branch"] = Base.identity(then_branch) - end - if else_branch !== nothing - desc["else_branch"] = Base.identity(else_branch) - end - end - tf.Tensor(tf.Operation(desc)) + function stateless_if_graph(cond_, input_; name=nothing, Tin=nothing, Tout=nothing, then_branch=nothing, else_branch=nothing) + local desc + tf.with_op_name(name, "StatelessIf") do + desc = tf.NodeDescription("StatelessIf") + cond_ = convert(Tensor{Any}, cond_) + input_ = [convert(Tensor{Any}, x) for x = input_] + (cond_,) = tf.tf_promote(cond_) + tf.add_input(desc, cond_) + tf.add_input(desc, input_) + if Tin !== nothing + desc["Tin"] = map(Base.identity, Tin) + end + if Tout !== nothing + desc["Tout"] = map(Base.identity, Tout) + end + if then_branch !== nothing + desc["then_branch"] = Base.identity(then_branch) + end + if else_branch !== nothing + desc["else_branch"] = Base.identity(else_branch) + end end + tf.Tensor(tf.Operation(desc)) + end function stateless_if_eager(cond_, input_; name=nothing, Tin=nothing, Tout=nothing, then_branch=nothing, else_branch=nothing) desc = tf.EagerOp("StatelessIf") cond_ = convert(tf.EagerTensor, cond_) @@ -24251,13 +24251,13 @@ begin return res[1] end end - function stateless_if(cond_, input_; name=nothing, Tin=nothing, Tout=nothing, then_branch=nothing, else_branch=nothing) - if tf.in_eager_mode() - stateless_if_eager(cond_, input_; name=name, Tin=Tin, Tout=Tout, then_branch=then_branch, else_branch=else_branch) - else - stateless_if_graph(cond_, input_; name=name, Tin=Tin, Tout=Tout, then_branch=then_branch, else_branch=else_branch) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function stateless_if(cond_, input_; name=nothing, Tin=nothing, Tout=nothing, then_branch=nothing, else_branch=nothing) + if tf.in_eager_mode() + stateless_if_eager(cond_, input_; name=name, Tin=Tin, Tout=Tout, then_branch=then_branch, else_branch=else_branch) + else + stateless_if_graph(cond_, input_; name=name, Tin=Tin, Tout=Tout, then_branch=then_branch, else_branch=else_branch) + end end - end end @@ -24267,23 +24267,23 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tensor_list_set_item_graph(input_handle_, index_, item_; name=nothing, element_dtype=nothing) - local desc - tf.with_op_name(name, "TensorListSetItem") do - desc = tf.NodeDescription("TensorListSetItem") - input_handle_ = convert(Tensor{Any}, input_handle_) - index_ = convert(Tensor{Int32}, index_) - item_ = convert(Tensor{Any}, item_) - (item_,) = tf.tf_promote(item_) - tf.add_input(desc, input_handle_) - tf.add_input(desc, index_) - tf.add_input(desc, item_) - if element_dtype !== nothing - desc["element_dtype"] = Base.identity(element_dtype) - end + function tensor_list_set_item_graph(input_handle_, index_, item_; name=nothing, element_dtype=nothing) + local desc + tf.with_op_name(name, "TensorListSetItem") do + desc = tf.NodeDescription("TensorListSetItem") + input_handle_ = convert(Tensor{Any}, input_handle_) + index_ = convert(Tensor{Int32}, index_) + item_ = convert(Tensor{Any}, item_) + (item_,) = tf.tf_promote(item_) + tf.add_input(desc, input_handle_) + tf.add_input(desc, index_) + tf.add_input(desc, item_) + if element_dtype !== nothing + desc["element_dtype"] = Base.identity(element_dtype) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function tensor_list_set_item_eager(input_handle_, index_, item_; name=nothing, element_dtype=nothing) desc = tf.EagerOp("TensorListSetItem") input_handle_ = convert(tf.EagerTensor, input_handle_) @@ -24303,13 +24303,13 @@ begin return res[1] end end - function tensor_list_set_item(input_handle_, index_, item_; name=nothing, element_dtype=nothing) - if tf.in_eager_mode() - tensor_list_set_item_eager(input_handle_, index_, item_; name=name, element_dtype=element_dtype) - else - tensor_list_set_item_graph(input_handle_, index_, item_; name=name, element_dtype=element_dtype) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_list_set_item(input_handle_, index_, item_; name=nothing, element_dtype=nothing) + if tf.in_eager_mode() + tensor_list_set_item_eager(input_handle_, index_, item_; name=name, element_dtype=element_dtype) + else + tensor_list_set_item_graph(input_handle_, index_, item_; name=name, element_dtype=element_dtype) + end end - end end @@ -24319,16 +24319,16 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function rsqrt_graph(x_; name=nothing) - local desc - tf.with_op_name(name, "Rsqrt") do - desc = tf.NodeDescription("Rsqrt") - x_ = convert(Tensor{Any}, x_) - (x_,) = tf.tf_promote(x_) - tf.add_input(desc, x_) - end - tf.Tensor(tf.Operation(desc)) + function rsqrt_graph(x_; name=nothing) + local desc + tf.with_op_name(name, "Rsqrt") do + desc = tf.NodeDescription("Rsqrt") + x_ = convert(Tensor{Any}, x_) + (x_,) = tf.tf_promote(x_) + tf.add_input(desc, x_) end + tf.Tensor(tf.Operation(desc)) + end function rsqrt_eager(x_; name=nothing) desc = tf.EagerOp("Rsqrt") x_ = convert(tf.EagerTensor, x_) @@ -24341,13 +24341,13 @@ begin return res[1] end end - function rsqrt(x_; name=nothing) - if tf.in_eager_mode() - rsqrt_eager(x_; name=name) - else - rsqrt_graph(x_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function rsqrt(x_; name=nothing) + if tf.in_eager_mode() + rsqrt_eager(x_; name=name) + else + rsqrt_graph(x_; name=name) + end end - end end @@ -24357,15 +24357,15 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function delete_session_tensor_graph(handle_; name=nothing) - local desc - tf.with_op_name(name, "DeleteSessionTensor") do - desc = tf.NodeDescription("DeleteSessionTensor") - handle_ = convert(Tensor{String}, handle_) - tf.add_input(desc, handle_) - end - tf.Tensor(tf.Operation(desc)) + function delete_session_tensor_graph(handle_; name=nothing) + local desc + tf.with_op_name(name, "DeleteSessionTensor") do + desc = tf.NodeDescription("DeleteSessionTensor") + handle_ = convert(Tensor{String}, handle_) + tf.add_input(desc, handle_) end + tf.Tensor(tf.Operation(desc)) + end function delete_session_tensor_eager(handle_; name=nothing) desc = tf.EagerOp("DeleteSessionTensor") handle_ = convert(tf.EagerTensor, handle_) @@ -24377,13 +24377,13 @@ begin return res[1] end end - function delete_session_tensor(handle_; name=nothing) - if tf.in_eager_mode() - delete_session_tensor_eager(handle_; name=name) - else - delete_session_tensor_graph(handle_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function delete_session_tensor(handle_; name=nothing) + if tf.in_eager_mode() + delete_session_tensor_eager(handle_; name=name) + else + delete_session_tensor_graph(handle_; name=name) + end end - end end @@ -24393,30 +24393,30 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function one_hot_graph(indices_, depth_, on_value_, off_value_; name=nothing, axis=nothing) - local desc - tf.with_op_name(name, "OneHot") do - desc = tf.NodeDescription("OneHot") - indices_ = convert(Tensor{Int64}, indices_) - indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) - depth_ = convert(Tensor{Int32}, depth_) - on_value_ = convert(Tensor{Any}, on_value_) - off_value_ = convert(Tensor{Any}, off_value_) - (on_value_, off_value_) = tf.tf_promote(on_value_, off_value_) - (indices_,) = tf.tf_promote(indices_) - tf.add_input(desc, indices_) - tf.add_input(desc, depth_) - tf.add_input(desc, on_value_) - tf.add_input(desc, off_value_) - if axis !== nothing - axis = Base.Int(axis) - 1 - end - if axis !== nothing - desc["axis"] = Base.Int(axis) - end - end - tf.Tensor(tf.Operation(desc)) + function one_hot_graph(indices_, depth_, on_value_, off_value_; name=nothing, axis=nothing) + local desc + tf.with_op_name(name, "OneHot") do + desc = tf.NodeDescription("OneHot") + indices_ = convert(Tensor{Int64}, indices_) + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + depth_ = convert(Tensor{Int32}, depth_) + on_value_ = convert(Tensor{Any}, on_value_) + off_value_ = convert(Tensor{Any}, off_value_) + (on_value_, off_value_) = tf.tf_promote(on_value_, off_value_) + (indices_,) = tf.tf_promote(indices_) + tf.add_input(desc, indices_) + tf.add_input(desc, depth_) + tf.add_input(desc, on_value_) + tf.add_input(desc, off_value_) + if axis !== nothing + axis = Base.Int(axis) - 1 + end + if axis !== nothing + desc["axis"] = Base.Int(axis) + end end + tf.Tensor(tf.Operation(desc)) + end function one_hot_eager(indices_, depth_, on_value_, off_value_; name=nothing, axis=nothing) desc = tf.EagerOp("OneHot") indices_ = convert(tf.EagerTensor, indices_) @@ -24443,13 +24443,13 @@ begin return res[1] end end - function one_hot(indices_, depth_, on_value_, off_value_; name=nothing, axis=nothing) - if tf.in_eager_mode() - one_hot_eager(indices_, depth_, on_value_, off_value_; name=name, axis=axis) - else - one_hot_graph(indices_, depth_, on_value_, off_value_; name=name, axis=axis) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function one_hot(indices_, depth_, on_value_, off_value_; name=nothing, axis=nothing) + if tf.in_eager_mode() + one_hot_eager(indices_, depth_, on_value_, off_value_; name=name, axis=axis) + else + one_hot_graph(indices_, depth_, on_value_, off_value_; name=name, axis=axis) + end end - end end @@ -24459,33 +24459,33 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function resource_apply_ftrl_graph(var_, accum_, linear_, grad_, lr_, l1_, l2_, lr_power_; name=nothing, use_locking=nothing) - local desc - tf.with_op_name(name, "ResourceApplyFtrl") do - desc = tf.NodeDescription("ResourceApplyFtrl") - var_ = convert(Tensor{Any}, var_) - accum_ = convert(Tensor{Any}, accum_) - linear_ = convert(Tensor{Any}, linear_) - grad_ = convert(Tensor{Any}, grad_) - lr_ = convert(Tensor{Any}, lr_) - l1_ = convert(Tensor{Any}, l1_) - l2_ = convert(Tensor{Any}, l2_) - lr_power_ = convert(Tensor{Any}, lr_power_) - (grad_, lr_, l1_, l2_, lr_power_) = tf.tf_promote(grad_, lr_, l1_, l2_, lr_power_) - tf.add_input(desc, var_) - tf.add_input(desc, accum_) - tf.add_input(desc, linear_) - tf.add_input(desc, grad_) - tf.add_input(desc, lr_) - tf.add_input(desc, l1_) - tf.add_input(desc, l2_) - tf.add_input(desc, lr_power_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - end - tf.Tensor(tf.Operation(desc)) + function resource_apply_ftrl_graph(var_, accum_, linear_, grad_, lr_, l1_, l2_, lr_power_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "ResourceApplyFtrl") do + desc = tf.NodeDescription("ResourceApplyFtrl") + var_ = convert(Tensor{Any}, var_) + accum_ = convert(Tensor{Any}, accum_) + linear_ = convert(Tensor{Any}, linear_) + grad_ = convert(Tensor{Any}, grad_) + lr_ = convert(Tensor{Any}, lr_) + l1_ = convert(Tensor{Any}, l1_) + l2_ = convert(Tensor{Any}, l2_) + lr_power_ = convert(Tensor{Any}, lr_power_) + (grad_, lr_, l1_, l2_, lr_power_) = tf.tf_promote(grad_, lr_, l1_, l2_, lr_power_) + tf.add_input(desc, var_) + tf.add_input(desc, accum_) + tf.add_input(desc, linear_) + tf.add_input(desc, grad_) + tf.add_input(desc, lr_) + tf.add_input(desc, l1_) + tf.add_input(desc, l2_) + tf.add_input(desc, lr_power_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end end + tf.Tensor(tf.Operation(desc)) + end function resource_apply_ftrl_eager(var_, accum_, linear_, grad_, lr_, l1_, l2_, lr_power_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ResourceApplyFtrl") var_ = convert(tf.EagerTensor, var_) @@ -24519,13 +24519,13 @@ begin return res[1] end end - function resource_apply_ftrl(var_, accum_, linear_, grad_, lr_, l1_, l2_, lr_power_; name=nothing, use_locking=nothing) - if tf.in_eager_mode() - resource_apply_ftrl_eager(var_, accum_, linear_, grad_, lr_, l1_, l2_, lr_power_; name=name, use_locking=use_locking) - else - resource_apply_ftrl_graph(var_, accum_, linear_, grad_, lr_, l1_, l2_, lr_power_; name=name, use_locking=use_locking) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_apply_ftrl(var_, accum_, linear_, grad_, lr_, l1_, l2_, lr_power_; name=nothing, use_locking=nothing) + if tf.in_eager_mode() + resource_apply_ftrl_eager(var_, accum_, linear_, grad_, lr_, l1_, l2_, lr_power_; name=name, use_locking=use_locking) + else + resource_apply_ftrl_graph(var_, accum_, linear_, grad_, lr_, l1_, l2_, lr_power_; name=name, use_locking=use_locking) + end end - end end @@ -24535,65 +24535,65 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function sdca_optimizer_v2_graph(sparse_example_indices_, sparse_feature_indices_, sparse_feature_values_, dense_features_, example_weights_, example_labels_, sparse_indices_, sparse_weights_, dense_weights_, example_state_data_; name=nothing, loss_type=nothing, adaptive=nothing, num_sparse_features=nothing, num_sparse_features_with_values=nothing, num_dense_features=nothing, l1=nothing, l2=nothing, num_loss_partitions=nothing, num_inner_iterations=nothing) - local desc - tf.with_op_name(name, "SdcaOptimizerV2") do - desc = tf.NodeDescription("SdcaOptimizerV2") - sparse_example_indices_ = [convert(Tensor{Int64}, x) for x = sparse_example_indices_] - sparse_feature_indices_ = [convert(Tensor{Int64}, x) for x = sparse_feature_indices_] - sparse_feature_values_ = [convert(Tensor{Float32}, x) for x = sparse_feature_values_] - dense_features_ = [convert(Tensor{Float32}, x) for x = dense_features_] - example_weights_ = convert(Tensor{Float32}, example_weights_) - example_labels_ = convert(Tensor{Float32}, example_labels_) - sparse_indices_ = [convert(Tensor{Int64}, x) for x = sparse_indices_] - sparse_weights_ = [convert(Tensor{Float32}, x) for x = sparse_weights_] - dense_weights_ = [convert(Tensor{Float32}, x) for x = dense_weights_] - example_state_data_ = convert(Tensor{Float32}, example_state_data_) - tf.add_input(desc, sparse_example_indices_) - tf.add_input(desc, sparse_feature_indices_) - tf.add_input(desc, sparse_feature_values_) - tf.add_input(desc, dense_features_) - tf.add_input(desc, example_weights_) - tf.add_input(desc, example_labels_) - tf.add_input(desc, sparse_indices_) - tf.add_input(desc, sparse_weights_) - tf.add_input(desc, dense_weights_) - tf.add_input(desc, example_state_data_) - if loss_type !== nothing - desc["loss_type"] = Base.String(loss_type) - end - if adaptive !== nothing - desc["adaptive"] = Base.Bool(adaptive) - end - if num_sparse_features !== nothing - desc["num_sparse_features"] = Base.Int(num_sparse_features) - end - if num_sparse_features_with_values !== nothing - desc["num_sparse_features_with_values"] = Base.Int(num_sparse_features_with_values) - end - if num_dense_features !== nothing - desc["num_dense_features"] = Base.Int(num_dense_features) - end - if l1 !== nothing - desc["l1"] = Base.identity(l1) - end - if l2 !== nothing - desc["l2"] = Base.identity(l2) - end - if num_loss_partitions !== nothing - desc["num_loss_partitions"] = Base.Int(num_loss_partitions) - end - if num_inner_iterations !== nothing - desc["num_inner_iterations"] = Base.Int(num_inner_iterations) - end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:3 - push!(out, tf.Tensor(op, out_idx)) - end - out + function sdca_optimizer_v2_graph(sparse_example_indices_, sparse_feature_indices_, sparse_feature_values_, dense_features_, example_weights_, example_labels_, sparse_indices_, sparse_weights_, dense_weights_, example_state_data_; name=nothing, loss_type=nothing, adaptive=nothing, num_sparse_features=nothing, num_sparse_features_with_values=nothing, num_dense_features=nothing, l1=nothing, l2=nothing, num_loss_partitions=nothing, num_inner_iterations=nothing) + local desc + tf.with_op_name(name, "SdcaOptimizerV2") do + desc = tf.NodeDescription("SdcaOptimizerV2") + sparse_example_indices_ = [convert(Tensor{Int64}, x) for x = sparse_example_indices_] + sparse_feature_indices_ = [convert(Tensor{Int64}, x) for x = sparse_feature_indices_] + sparse_feature_values_ = [convert(Tensor{Float32}, x) for x = sparse_feature_values_] + dense_features_ = [convert(Tensor{Float32}, x) for x = dense_features_] + example_weights_ = convert(Tensor{Float32}, example_weights_) + example_labels_ = convert(Tensor{Float32}, example_labels_) + sparse_indices_ = [convert(Tensor{Int64}, x) for x = sparse_indices_] + sparse_weights_ = [convert(Tensor{Float32}, x) for x = sparse_weights_] + dense_weights_ = [convert(Tensor{Float32}, x) for x = dense_weights_] + example_state_data_ = convert(Tensor{Float32}, example_state_data_) + tf.add_input(desc, sparse_example_indices_) + tf.add_input(desc, sparse_feature_indices_) + tf.add_input(desc, sparse_feature_values_) + tf.add_input(desc, dense_features_) + tf.add_input(desc, example_weights_) + tf.add_input(desc, example_labels_) + tf.add_input(desc, sparse_indices_) + tf.add_input(desc, sparse_weights_) + tf.add_input(desc, dense_weights_) + tf.add_input(desc, example_state_data_) + if loss_type !== nothing + desc["loss_type"] = Base.String(loss_type) + end + if adaptive !== nothing + desc["adaptive"] = Base.Bool(adaptive) + end + if num_sparse_features !== nothing + desc["num_sparse_features"] = Base.Int(num_sparse_features) + end + if num_sparse_features_with_values !== nothing + desc["num_sparse_features_with_values"] = Base.Int(num_sparse_features_with_values) + end + if num_dense_features !== nothing + desc["num_dense_features"] = Base.Int(num_dense_features) + end + if l1 !== nothing + desc["l1"] = Base.identity(l1) + end + if l2 !== nothing + desc["l2"] = Base.identity(l2) + end + if num_loss_partitions !== nothing + desc["num_loss_partitions"] = Base.Int(num_loss_partitions) + end + if num_inner_iterations !== nothing + desc["num_inner_iterations"] = Base.Int(num_inner_iterations) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) end + out + end function sdca_optimizer_v2_eager(sparse_example_indices_, sparse_feature_indices_, sparse_feature_values_, dense_features_, example_weights_, example_labels_, sparse_indices_, sparse_weights_, dense_weights_, example_state_data_; name=nothing, loss_type=nothing, adaptive=nothing, num_sparse_features=nothing, num_sparse_features_with_values=nothing, num_dense_features=nothing, l1=nothing, l2=nothing, num_loss_partitions=nothing, num_inner_iterations=nothing) desc = tf.EagerOp("SdcaOptimizerV2") sparse_example_indices_ = convert(tf.EagerTensor, sparse_example_indices_) @@ -24650,13 +24650,13 @@ begin return res end end - function sdca_optimizer_v2(sparse_example_indices_, sparse_feature_indices_, sparse_feature_values_, dense_features_, example_weights_, example_labels_, sparse_indices_, sparse_weights_, dense_weights_, example_state_data_; name=nothing, loss_type=nothing, adaptive=nothing, num_sparse_features=nothing, num_sparse_features_with_values=nothing, num_dense_features=nothing, l1=nothing, l2=nothing, num_loss_partitions=nothing, num_inner_iterations=nothing) - if tf.in_eager_mode() - sdca_optimizer_v2_eager(sparse_example_indices_, sparse_feature_indices_, sparse_feature_values_, dense_features_, example_weights_, example_labels_, sparse_indices_, sparse_weights_, dense_weights_, example_state_data_; name=name, loss_type=loss_type, adaptive=adaptive, num_sparse_features=num_sparse_features, num_sparse_features_with_values=num_sparse_features_with_values, num_dense_features=num_dense_features, l1=l1, l2=l2, num_loss_partitions=num_loss_partitions, num_inner_iterations=num_inner_iterations) - else - sdca_optimizer_v2_graph(sparse_example_indices_, sparse_feature_indices_, sparse_feature_values_, dense_features_, example_weights_, example_labels_, sparse_indices_, sparse_weights_, dense_weights_, example_state_data_; name=name, loss_type=loss_type, adaptive=adaptive, num_sparse_features=num_sparse_features, num_sparse_features_with_values=num_sparse_features_with_values, num_dense_features=num_dense_features, l1=l1, l2=l2, num_loss_partitions=num_loss_partitions, num_inner_iterations=num_inner_iterations) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sdca_optimizer_v2(sparse_example_indices_, sparse_feature_indices_, sparse_feature_values_, dense_features_, example_weights_, example_labels_, sparse_indices_, sparse_weights_, dense_weights_, example_state_data_; name=nothing, loss_type=nothing, adaptive=nothing, num_sparse_features=nothing, num_sparse_features_with_values=nothing, num_dense_features=nothing, l1=nothing, l2=nothing, num_loss_partitions=nothing, num_inner_iterations=nothing) + if tf.in_eager_mode() + sdca_optimizer_v2_eager(sparse_example_indices_, sparse_feature_indices_, sparse_feature_values_, dense_features_, example_weights_, example_labels_, sparse_indices_, sparse_weights_, dense_weights_, example_state_data_; name=name, loss_type=loss_type, adaptive=adaptive, num_sparse_features=num_sparse_features, num_sparse_features_with_values=num_sparse_features_with_values, num_dense_features=num_dense_features, l1=l1, l2=l2, num_loss_partitions=num_loss_partitions, num_inner_iterations=num_inner_iterations) + else + sdca_optimizer_v2_graph(sparse_example_indices_, sparse_feature_indices_, sparse_feature_values_, dense_features_, example_weights_, example_labels_, sparse_indices_, sparse_weights_, dense_weights_, example_state_data_; name=name, loss_type=loss_type, adaptive=adaptive, num_sparse_features=num_sparse_features, num_sparse_features_with_values=num_sparse_features_with_values, num_dense_features=num_dense_features, l1=l1, l2=l2, num_loss_partitions=num_loss_partitions, num_inner_iterations=num_inner_iterations) + end end - end end @@ -24666,23 +24666,23 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function queue_enqueue_graph(handle_, components_; name=nothing, Tcomponents=nothing, timeout_ms=nothing) - local desc - tf.with_op_name(name, "QueueEnqueue") do - desc = tf.NodeDescription("QueueEnqueue") - handle_ = convert(Tensor{String}, handle_) - components_ = [convert(Tensor{Any}, x) for x = components_] - tf.add_input(desc, handle_) - tf.add_input(desc, components_) - if Tcomponents !== nothing - desc["Tcomponents"] = map(Base.identity, Tcomponents) - end - if timeout_ms !== nothing - desc["timeout_ms"] = Base.Int(timeout_ms) - end + function queue_enqueue_graph(handle_, components_; name=nothing, Tcomponents=nothing, timeout_ms=nothing) + local desc + tf.with_op_name(name, "QueueEnqueue") do + desc = tf.NodeDescription("QueueEnqueue") + handle_ = convert(Tensor{String}, handle_) + components_ = [convert(Tensor{Any}, x) for x = components_] + tf.add_input(desc, handle_) + tf.add_input(desc, components_) + if Tcomponents !== nothing + desc["Tcomponents"] = map(Base.identity, Tcomponents) + end + if timeout_ms !== nothing + desc["timeout_ms"] = Base.Int(timeout_ms) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function queue_enqueue_eager(handle_, components_; name=nothing, Tcomponents=nothing, timeout_ms=nothing) desc = tf.EagerOp("QueueEnqueue") handle_ = convert(tf.EagerTensor, handle_) @@ -24702,13 +24702,13 @@ begin return res[1] end end - function queue_enqueue(handle_, components_; name=nothing, Tcomponents=nothing, timeout_ms=nothing) - if tf.in_eager_mode() - queue_enqueue_eager(handle_, components_; name=name, Tcomponents=Tcomponents, timeout_ms=timeout_ms) - else - queue_enqueue_graph(handle_, components_; name=name, Tcomponents=Tcomponents, timeout_ms=timeout_ms) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function queue_enqueue(handle_, components_; name=nothing, Tcomponents=nothing, timeout_ms=nothing) + if tf.in_eager_mode() + queue_enqueue_eager(handle_, components_; name=name, Tcomponents=Tcomponents, timeout_ms=timeout_ms) + else + queue_enqueue_graph(handle_, components_; name=name, Tcomponents=Tcomponents, timeout_ms=timeout_ms) + end end - end end @@ -24718,31 +24718,31 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function ctc_beam_search_decoder_graph(inputs_, sequence_length_; name=nothing, beam_width=nothing, top_paths=nothing, merge_repeated=nothing) - local desc - tf.with_op_name(name, "CTCBeamSearchDecoder") do - desc = tf.NodeDescription("CTCBeamSearchDecoder") - inputs_ = convert(Tensor{Float32}, inputs_) - sequence_length_ = convert(Tensor{Int32}, sequence_length_) - tf.add_input(desc, inputs_) - tf.add_input(desc, sequence_length_) - if beam_width !== nothing - desc["beam_width"] = Base.Int(beam_width) - end - if top_paths !== nothing - desc["top_paths"] = Base.Int(top_paths) - end - if merge_repeated !== nothing - desc["merge_repeated"] = Base.Bool(merge_repeated) - end + function ctc_beam_search_decoder_graph(inputs_, sequence_length_; name=nothing, beam_width=nothing, top_paths=nothing, merge_repeated=nothing) + local desc + tf.with_op_name(name, "CTCBeamSearchDecoder") do + desc = tf.NodeDescription("CTCBeamSearchDecoder") + inputs_ = convert(Tensor{Float32}, inputs_) + sequence_length_ = convert(Tensor{Int32}, sequence_length_) + tf.add_input(desc, inputs_) + tf.add_input(desc, sequence_length_) + if beam_width !== nothing + desc["beam_width"] = Base.Int(beam_width) end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:4 - push!(out, tf.Tensor(op, out_idx)) + if top_paths !== nothing + desc["top_paths"] = Base.Int(top_paths) end - out + if merge_repeated !== nothing + desc["merge_repeated"] = Base.Bool(merge_repeated) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:4 + push!(out, tf.Tensor(op, out_idx)) end + out + end function ctc_beam_search_decoder_eager(inputs_, sequence_length_; name=nothing, beam_width=nothing, top_paths=nothing, merge_repeated=nothing) desc = tf.EagerOp("CTCBeamSearchDecoder") inputs_ = convert(tf.EagerTensor, inputs_) @@ -24765,44 +24765,44 @@ begin return res end end - function ctc_beam_search_decoder(inputs_, sequence_length_; name=nothing, beam_width=nothing, top_paths=nothing, merge_repeated=nothing) - if tf.in_eager_mode() - ctc_beam_search_decoder_eager(inputs_, sequence_length_; name=name, beam_width=beam_width, top_paths=top_paths, merge_repeated=merge_repeated) - else - ctc_beam_search_decoder_graph(inputs_, sequence_length_; name=name, beam_width=beam_width, top_paths=top_paths, merge_repeated=merge_repeated) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function ctc_beam_search_decoder(inputs_, sequence_length_; name=nothing, beam_width=nothing, top_paths=nothing, merge_repeated=nothing) + if tf.in_eager_mode() + ctc_beam_search_decoder_eager(inputs_, sequence_length_; name=name, beam_width=beam_width, top_paths=top_paths, merge_repeated=merge_repeated) + else + ctc_beam_search_decoder_graph(inputs_, sequence_length_; name=name, beam_width=beam_width, top_paths=top_paths, merge_repeated=merge_repeated) + end end - end end """ - conditional_accumulator(; container=, shared_name=, reduction_type=MEAN) + conditional_accumulator(; container=, shared_name=, reduction_type=) """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function conditional_accumulator_graph(; name=nothing, dtype=nothing, shape=nothing, container=nothing, shared_name=nothing, reduction_type=nothing) - local desc - tf.with_op_name(name, "ConditionalAccumulator") do - desc = tf.NodeDescription("ConditionalAccumulator") - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end - if shape !== nothing - desc["shape"] = Base.identity(shape) - end - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - if reduction_type !== nothing - desc["reduction_type"] = Base.String(reduction_type) - end + function conditional_accumulator_graph(; name=nothing, dtype=nothing, shape=nothing, container=nothing, shared_name=nothing, reduction_type=nothing) + local desc + tf.with_op_name(name, "ConditionalAccumulator") do + desc = tf.NodeDescription("ConditionalAccumulator") + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + if shape !== nothing + desc["shape"] = Base.identity(shape) + end + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + if reduction_type !== nothing + desc["reduction_type"] = Base.String(reduction_type) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function conditional_accumulator_eager(; name=nothing, dtype=nothing, shape=nothing, container=nothing, shared_name=nothing, reduction_type=nothing) desc = tf.EagerOp("ConditionalAccumulator") if dtype !== nothing @@ -24827,13 +24827,13 @@ begin return res[1] end end - function conditional_accumulator(; name=nothing, dtype=nothing, shape=nothing, container=nothing, shared_name=nothing, reduction_type=nothing) - if tf.in_eager_mode() - conditional_accumulator_eager(; name=name, dtype=dtype, shape=shape, container=container, shared_name=shared_name, reduction_type=reduction_type) - else - conditional_accumulator_graph(; name=name, dtype=dtype, shape=shape, container=container, shared_name=shared_name, reduction_type=reduction_type) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function conditional_accumulator(; name=nothing, dtype=nothing, shape=nothing, container=nothing, shared_name=nothing, reduction_type=nothing) + if tf.in_eager_mode() + conditional_accumulator_eager(; name=name, dtype=dtype, shape=shape, container=container, shared_name=shared_name, reduction_type=reduction_type) + else + conditional_accumulator_graph(; name=name, dtype=dtype, shape=shape, container=container, shared_name=shared_name, reduction_type=reduction_type) + end end - end end @@ -24843,19 +24843,19 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function whole_file_reader_graph(; name=nothing, container=nothing, shared_name=nothing) - local desc - tf.with_op_name(name, "WholeFileReader") do - desc = tf.NodeDescription("WholeFileReader") - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end + function whole_file_reader_graph(; name=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "WholeFileReader") do + desc = tf.NodeDescription("WholeFileReader") + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function whole_file_reader_eager(; name=nothing, container=nothing, shared_name=nothing) desc = tf.EagerOp("WholeFileReader") if container !== nothing @@ -24871,13 +24871,13 @@ begin return res[1] end end - function whole_file_reader(; name=nothing, container=nothing, shared_name=nothing) - if tf.in_eager_mode() - whole_file_reader_eager(; name=name, container=container, shared_name=shared_name) - else - whole_file_reader_graph(; name=name, container=container, shared_name=shared_name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function whole_file_reader(; name=nothing, container=nothing, shared_name=nothing) + if tf.in_eager_mode() + whole_file_reader_eager(; name=name, container=container, shared_name=shared_name) + else + whole_file_reader_graph(; name=name, container=container, shared_name=shared_name) + end end - end end @@ -24887,33 +24887,33 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function apply_rms_prop_graph(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=nothing, use_locking=nothing) - local desc - tf.with_op_name(name, "ApplyRMSProp") do - desc = tf.NodeDescription("ApplyRMSProp") - var_ = convert(Tensor{Any}, var_) - ms_ = convert(Tensor{Any}, ms_) - mom_ = convert(Tensor{Any}, mom_) - lr_ = convert(Tensor{Any}, lr_) - rho_ = convert(Tensor{Any}, rho_) - momentum_ = convert(Tensor{Any}, momentum_) - epsilon_ = convert(Tensor{Any}, epsilon_) - grad_ = convert(Tensor{Any}, grad_) - (var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_) = tf.tf_promote(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_) - tf.add_input(desc, var_) - tf.add_input(desc, ms_) - tf.add_input(desc, mom_) - tf.add_input(desc, lr_) - tf.add_input(desc, rho_) - tf.add_input(desc, momentum_) - tf.add_input(desc, epsilon_) - tf.add_input(desc, grad_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - end - tf.Tensor(tf.Operation(desc)) + function apply_rms_prop_graph(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "ApplyRMSProp") do + desc = tf.NodeDescription("ApplyRMSProp") + var_ = convert(Tensor{Any}, var_) + ms_ = convert(Tensor{Any}, ms_) + mom_ = convert(Tensor{Any}, mom_) + lr_ = convert(Tensor{Any}, lr_) + rho_ = convert(Tensor{Any}, rho_) + momentum_ = convert(Tensor{Any}, momentum_) + epsilon_ = convert(Tensor{Any}, epsilon_) + grad_ = convert(Tensor{Any}, grad_) + (var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_) = tf.tf_promote(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_) + tf.add_input(desc, var_) + tf.add_input(desc, ms_) + tf.add_input(desc, mom_) + tf.add_input(desc, lr_) + tf.add_input(desc, rho_) + tf.add_input(desc, momentum_) + tf.add_input(desc, epsilon_) + tf.add_input(desc, grad_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end end + tf.Tensor(tf.Operation(desc)) + end function apply_rms_prop_eager(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ApplyRMSProp") var_ = convert(tf.EagerTensor, var_) @@ -24950,13 +24950,13 @@ begin return res[1] end end - function apply_rms_prop(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=nothing, use_locking=nothing) - if tf.in_eager_mode() - apply_rms_prop_eager(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=name, use_locking=use_locking) - else - apply_rms_prop_graph(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=name, use_locking=use_locking) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function apply_rms_prop(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=nothing, use_locking=nothing) + if tf.in_eager_mode() + apply_rms_prop_eager(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=name, use_locking=use_locking) + else + apply_rms_prop_graph(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=name, use_locking=use_locking) + end end - end end @@ -24966,17 +24966,17 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function adjust_saturation_graph(images_, scale_; name=nothing) - local desc - tf.with_op_name(name, "AdjustSaturation") do - desc = tf.NodeDescription("AdjustSaturation") - images_ = convert(Tensor{Float32}, images_) - scale_ = convert(Tensor{Float32}, scale_) - tf.add_input(desc, images_) - tf.add_input(desc, scale_) - end - tf.Tensor(tf.Operation(desc)) + function adjust_saturation_graph(images_, scale_; name=nothing) + local desc + tf.with_op_name(name, "AdjustSaturation") do + desc = tf.NodeDescription("AdjustSaturation") + images_ = convert(Tensor{Float32}, images_) + scale_ = convert(Tensor{Float32}, scale_) + tf.add_input(desc, images_) + tf.add_input(desc, scale_) end + tf.Tensor(tf.Operation(desc)) + end function adjust_saturation_eager(images_, scale_; name=nothing) desc = tf.EagerOp("AdjustSaturation") images_ = convert(tf.EagerTensor, images_) @@ -24990,13 +24990,13 @@ begin return res[1] end end - function adjust_saturation(images_, scale_; name=nothing) - if tf.in_eager_mode() - adjust_saturation_eager(images_, scale_; name=name) - else - adjust_saturation_graph(images_, scale_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function adjust_saturation(images_, scale_; name=nothing) + if tf.in_eager_mode() + adjust_saturation_eager(images_, scale_; name=name) + else + adjust_saturation_graph(images_, scale_; name=name) + end end - end end @@ -25006,18 +25006,18 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function lookup_table_remove_v2_graph(table_handle_, keys_; name=nothing) - local desc - tf.with_op_name(name, "LookupTableRemoveV2") do - desc = tf.NodeDescription("LookupTableRemoveV2") - table_handle_ = convert(Tensor{Any}, table_handle_) - keys_ = convert(Tensor{Any}, keys_) - (keys_,) = tf.tf_promote(keys_) - tf.add_input(desc, table_handle_) - tf.add_input(desc, keys_) - end - tf.Tensor(tf.Operation(desc)) + function lookup_table_remove_v2_graph(table_handle_, keys_; name=nothing) + local desc + tf.with_op_name(name, "LookupTableRemoveV2") do + desc = tf.NodeDescription("LookupTableRemoveV2") + table_handle_ = convert(Tensor{Any}, table_handle_) + keys_ = convert(Tensor{Any}, keys_) + (keys_,) = tf.tf_promote(keys_) + tf.add_input(desc, table_handle_) + tf.add_input(desc, keys_) end + tf.Tensor(tf.Operation(desc)) + end function lookup_table_remove_v2_eager(table_handle_, keys_; name=nothing) desc = tf.EagerOp("LookupTableRemoveV2") table_handle_ = convert(tf.EagerTensor, table_handle_) @@ -25032,13 +25032,13 @@ begin return res[1] end end - function lookup_table_remove_v2(table_handle_, keys_; name=nothing) - if tf.in_eager_mode() - lookup_table_remove_v2_eager(table_handle_, keys_; name=name) - else - lookup_table_remove_v2_graph(table_handle_, keys_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function lookup_table_remove_v2(table_handle_, keys_; name=nothing) + if tf.in_eager_mode() + lookup_table_remove_v2_eager(table_handle_, keys_; name=name) + else + lookup_table_remove_v2_graph(table_handle_, keys_; name=name) + end end - end end @@ -25048,18 +25048,18 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function queue_close_graph(handle_; name=nothing, cancel_pending_enqueues=nothing) - local desc - tf.with_op_name(name, "QueueClose") do - desc = tf.NodeDescription("QueueClose") - handle_ = convert(Tensor{String}, handle_) - tf.add_input(desc, handle_) - if cancel_pending_enqueues !== nothing - desc["cancel_pending_enqueues"] = Base.Bool(cancel_pending_enqueues) - end + function queue_close_graph(handle_; name=nothing, cancel_pending_enqueues=nothing) + local desc + tf.with_op_name(name, "QueueClose") do + desc = tf.NodeDescription("QueueClose") + handle_ = convert(Tensor{String}, handle_) + tf.add_input(desc, handle_) + if cancel_pending_enqueues !== nothing + desc["cancel_pending_enqueues"] = Base.Bool(cancel_pending_enqueues) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function queue_close_eager(handle_; name=nothing, cancel_pending_enqueues=nothing) desc = tf.EagerOp("QueueClose") handle_ = convert(tf.EagerTensor, handle_) @@ -25074,13 +25074,13 @@ begin return res[1] end end - function queue_close(handle_; name=nothing, cancel_pending_enqueues=nothing) - if tf.in_eager_mode() - queue_close_eager(handle_; name=name, cancel_pending_enqueues=cancel_pending_enqueues) - else - queue_close_graph(handle_; name=name, cancel_pending_enqueues=cancel_pending_enqueues) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function queue_close(handle_; name=nothing, cancel_pending_enqueues=nothing) + if tf.in_eager_mode() + queue_close_eager(handle_; name=name, cancel_pending_enqueues=cancel_pending_enqueues) + else + queue_close_graph(handle_; name=name, cancel_pending_enqueues=cancel_pending_enqueues) + end end - end end @@ -25090,23 +25090,23 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function prefetch_dataset_graph(input_dataset_, buffer_size_; name=nothing, output_types=nothing, output_shapes=nothing) - local desc - tf.with_op_name(name, "PrefetchDataset") do - desc = tf.NodeDescription("PrefetchDataset") - input_dataset_ = convert(Tensor{Any}, input_dataset_) - buffer_size_ = convert(Tensor{Int64}, buffer_size_) - tf.add_input(desc, input_dataset_) - tf.add_input(desc, buffer_size_) - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end + function prefetch_dataset_graph(input_dataset_, buffer_size_; name=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "PrefetchDataset") do + desc = tf.NodeDescription("PrefetchDataset") + input_dataset_ = convert(Tensor{Any}, input_dataset_) + buffer_size_ = convert(Tensor{Int64}, buffer_size_) + tf.add_input(desc, input_dataset_) + tf.add_input(desc, buffer_size_) + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function prefetch_dataset_eager(input_dataset_, buffer_size_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("PrefetchDataset") input_dataset_ = convert(tf.EagerTensor, input_dataset_) @@ -25126,13 +25126,13 @@ begin return res[1] end end - function prefetch_dataset(input_dataset_, buffer_size_; name=nothing, output_types=nothing, output_shapes=nothing) - if tf.in_eager_mode() - prefetch_dataset_eager(input_dataset_, buffer_size_; name=name, output_types=output_types, output_shapes=output_shapes) - else - prefetch_dataset_graph(input_dataset_, buffer_size_; name=name, output_types=output_types, output_shapes=output_shapes) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function prefetch_dataset(input_dataset_, buffer_size_; name=nothing, output_types=nothing, output_shapes=nothing) + if tf.in_eager_mode() + prefetch_dataset_eager(input_dataset_, buffer_size_; name=name, output_types=output_types, output_shapes=output_shapes) + else + prefetch_dataset_graph(input_dataset_, buffer_size_; name=name, output_types=output_types, output_shapes=output_shapes) + end end - end end @@ -25142,35 +25142,35 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function map_dataset_graph(input_dataset_, other_arguments_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing, preserve_cardinality=nothing) - local desc - tf.with_op_name(name, "MapDataset") do - desc = tf.NodeDescription("MapDataset") - input_dataset_ = convert(Tensor{Any}, input_dataset_) - other_arguments_ = [convert(Tensor{Any}, x) for x = other_arguments_] - tf.add_input(desc, input_dataset_) - tf.add_input(desc, other_arguments_) - if f !== nothing - desc["f"] = Base.identity(f) - end - if Targuments !== nothing - desc["Targuments"] = map(Base.identity, Targuments) - end - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - if use_inter_op_parallelism !== nothing - desc["use_inter_op_parallelism"] = Base.Bool(use_inter_op_parallelism) - end - if preserve_cardinality !== nothing - desc["preserve_cardinality"] = Base.Bool(preserve_cardinality) - end - end - tf.Tensor(tf.Operation(desc)) + function map_dataset_graph(input_dataset_, other_arguments_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing, preserve_cardinality=nothing) + local desc + tf.with_op_name(name, "MapDataset") do + desc = tf.NodeDescription("MapDataset") + input_dataset_ = convert(Tensor{Any}, input_dataset_) + other_arguments_ = [convert(Tensor{Any}, x) for x = other_arguments_] + tf.add_input(desc, input_dataset_) + tf.add_input(desc, other_arguments_) + if f !== nothing + desc["f"] = Base.identity(f) + end + if Targuments !== nothing + desc["Targuments"] = map(Base.identity, Targuments) + end + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + if use_inter_op_parallelism !== nothing + desc["use_inter_op_parallelism"] = Base.Bool(use_inter_op_parallelism) + end + if preserve_cardinality !== nothing + desc["preserve_cardinality"] = Base.Bool(preserve_cardinality) + end end + tf.Tensor(tf.Operation(desc)) + end function map_dataset_eager(input_dataset_, other_arguments_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing, preserve_cardinality=nothing) desc = tf.EagerOp("MapDataset") input_dataset_ = convert(tf.EagerTensor, input_dataset_) @@ -25202,13 +25202,13 @@ begin return res[1] end end - function map_dataset(input_dataset_, other_arguments_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing, preserve_cardinality=nothing) - if tf.in_eager_mode() - map_dataset_eager(input_dataset_, other_arguments_; name=name, f=f, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes, use_inter_op_parallelism=use_inter_op_parallelism, preserve_cardinality=preserve_cardinality) - else - map_dataset_graph(input_dataset_, other_arguments_; name=name, f=f, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes, use_inter_op_parallelism=use_inter_op_parallelism, preserve_cardinality=preserve_cardinality) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function map_dataset(input_dataset_, other_arguments_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing, preserve_cardinality=nothing) + if tf.in_eager_mode() + map_dataset_eager(input_dataset_, other_arguments_; name=name, f=f, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes, use_inter_op_parallelism=use_inter_op_parallelism, preserve_cardinality=preserve_cardinality) + else + map_dataset_graph(input_dataset_, other_arguments_; name=name, f=f, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes, use_inter_op_parallelism=use_inter_op_parallelism, preserve_cardinality=preserve_cardinality) + end end - end end @@ -25218,22 +25218,22 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tensor_array_read_v3_graph(handle_, index_, flow_in_; name=nothing, dtype=nothing) - local desc - tf.with_op_name(name, "TensorArrayReadV3") do - desc = tf.NodeDescription("TensorArrayReadV3") - handle_ = convert(Tensor{Any}, handle_) - index_ = convert(Tensor{Int32}, index_) - flow_in_ = convert(Tensor{Float32}, flow_in_) - tf.add_input(desc, handle_) - tf.add_input(desc, index_) - tf.add_input(desc, flow_in_) - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end + function tensor_array_read_v3_graph(handle_, index_, flow_in_; name=nothing, dtype=nothing) + local desc + tf.with_op_name(name, "TensorArrayReadV3") do + desc = tf.NodeDescription("TensorArrayReadV3") + handle_ = convert(Tensor{Any}, handle_) + index_ = convert(Tensor{Int32}, index_) + flow_in_ = convert(Tensor{Float32}, flow_in_) + tf.add_input(desc, handle_) + tf.add_input(desc, index_) + tf.add_input(desc, flow_in_) + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function tensor_array_read_v3_eager(handle_, index_, flow_in_; name=nothing, dtype=nothing) desc = tf.EagerOp("TensorArrayReadV3") handle_ = convert(tf.EagerTensor, handle_) @@ -25252,13 +25252,13 @@ begin return res[1] end end - function tensor_array_read_v3(handle_, index_, flow_in_; name=nothing, dtype=nothing) - if tf.in_eager_mode() - tensor_array_read_v3_eager(handle_, index_, flow_in_; name=name, dtype=dtype) - else - tensor_array_read_v3_graph(handle_, index_, flow_in_; name=name, dtype=dtype) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_array_read_v3(handle_, index_, flow_in_; name=nothing, dtype=nothing) + if tf.in_eager_mode() + tensor_array_read_v3_eager(handle_, index_, flow_in_; name=name, dtype=dtype) + else + tensor_array_read_v3_graph(handle_, index_, flow_in_; name=name, dtype=dtype) + end end - end end @@ -25268,16 +25268,16 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function identity_graph(input_; name=nothing) - local desc - tf.with_op_name(name, "Identity") do - desc = tf.NodeDescription("Identity") - input_ = convert(Tensor{Any}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - end - tf.Tensor(tf.Operation(desc)) + function identity_graph(input_; name=nothing) + local desc + tf.with_op_name(name, "Identity") do + desc = tf.NodeDescription("Identity") + input_ = convert(Tensor{Any}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) end + tf.Tensor(tf.Operation(desc)) + end function identity_eager(input_; name=nothing) desc = tf.EagerOp("Identity") input_ = convert(tf.EagerTensor, input_) @@ -25290,13 +25290,13 @@ begin return res[1] end end - function identity(input_; name=nothing) - if tf.in_eager_mode() - identity_eager(input_; name=name) - else - identity_graph(input_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function identity(input_; name=nothing) + if tf.in_eager_mode() + identity_eager(input_; name=name) + else + identity_graph(input_; name=name) + end end - end end @@ -25306,30 +25306,30 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function print_graph(input_, data_; name=nothing, U=nothing, message=nothing, first_n=nothing, summarize=nothing) - local desc - tf.with_op_name(name, "Print") do - desc = tf.NodeDescription("Print") - input_ = convert(Tensor{Any}, input_) - data_ = [convert(Tensor{Any}, x) for x = data_] - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - tf.add_input(desc, data_) - if U !== nothing - desc["U"] = map(Base.identity, U) - end - if message !== nothing - desc["message"] = Base.String(message) - end - if first_n !== nothing - desc["first_n"] = Base.Int(first_n) - end - if summarize !== nothing - desc["summarize"] = Base.Int(summarize) - end - end - tf.Tensor(tf.Operation(desc)) + function print_graph(input_, data_; name=nothing, U=nothing, message=nothing, first_n=nothing, summarize=nothing) + local desc + tf.with_op_name(name, "Print") do + desc = tf.NodeDescription("Print") + input_ = convert(Tensor{Any}, input_) + data_ = [convert(Tensor{Any}, x) for x = data_] + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + tf.add_input(desc, data_) + if U !== nothing + desc["U"] = map(Base.identity, U) + end + if message !== nothing + desc["message"] = Base.String(message) + end + if first_n !== nothing + desc["first_n"] = Base.Int(first_n) + end + if summarize !== nothing + desc["summarize"] = Base.Int(summarize) + end end + tf.Tensor(tf.Operation(desc)) + end function print_eager(input_, data_; name=nothing, U=nothing, message=nothing, first_n=nothing, summarize=nothing) desc = tf.EagerOp("Print") input_ = convert(tf.EagerTensor, input_) @@ -25356,13 +25356,13 @@ begin return res[1] end end - function print(input_, data_; name=nothing, U=nothing, message=nothing, first_n=nothing, summarize=nothing) - if tf.in_eager_mode() - print_eager(input_, data_; name=name, U=U, message=message, first_n=first_n, summarize=summarize) - else - print_graph(input_, data_; name=name, U=U, message=message, first_n=first_n, summarize=summarize) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function print(input_, data_; name=nothing, U=nothing, message=nothing, first_n=nothing, summarize=nothing) + if tf.in_eager_mode() + print_eager(input_, data_; name=name, U=U, message=message, first_n=first_n, summarize=summarize) + else + print_graph(input_, data_; name=name, U=U, message=message, first_n=first_n, summarize=summarize) + end end - end end @@ -25372,28 +25372,28 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function collective_bcast_send_graph(input_; name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, shape=nothing) - local desc - tf.with_op_name(name, "CollectiveBcastSend") do - desc = tf.NodeDescription("CollectiveBcastSend") - input_ = convert(Tensor{Any}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - if group_size !== nothing - desc["group_size"] = Base.Int(group_size) - end - if group_key !== nothing - desc["group_key"] = Base.Int(group_key) - end - if instance_key !== nothing - desc["instance_key"] = Base.Int(instance_key) - end - if shape !== nothing - desc["shape"] = Base.identity(shape) - end + function collective_bcast_send_graph(input_; name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, shape=nothing) + local desc + tf.with_op_name(name, "CollectiveBcastSend") do + desc = tf.NodeDescription("CollectiveBcastSend") + input_ = convert(Tensor{Any}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + if group_size !== nothing + desc["group_size"] = Base.Int(group_size) + end + if group_key !== nothing + desc["group_key"] = Base.Int(group_key) + end + if instance_key !== nothing + desc["instance_key"] = Base.Int(instance_key) + end + if shape !== nothing + desc["shape"] = Base.identity(shape) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function collective_bcast_send_eager(input_; name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, shape=nothing) desc = tf.EagerOp("CollectiveBcastSend") input_ = convert(tf.EagerTensor, input_) @@ -25418,13 +25418,13 @@ begin return res[1] end end - function collective_bcast_send(input_; name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, shape=nothing) - if tf.in_eager_mode() - collective_bcast_send_eager(input_; name=name, group_size=group_size, group_key=group_key, instance_key=instance_key, shape=shape) - else - collective_bcast_send_graph(input_; name=name, group_size=group_size, group_key=group_key, instance_key=instance_key, shape=shape) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function collective_bcast_send(input_; name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, shape=nothing) + if tf.in_eager_mode() + collective_bcast_send_eager(input_; name=name, group_size=group_size, group_key=group_key, instance_key=instance_key, shape=shape) + else + collective_bcast_send_graph(input_; name=name, group_size=group_size, group_key=group_key, instance_key=instance_key, shape=shape) + end end - end end @@ -25434,26 +25434,26 @@ end Converts a list of tensors to an array of tensors. """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function _list_to_array_graph(input_; name=nothing, Tin=nothing, N=nothing) - local desc - tf.with_op_name(name, "_ListToArray") do - desc = tf.NodeDescription("_ListToArray") - input_ = [convert(Tensor{Any}, x) for x = input_] - tf.add_input(desc, input_) - if Tin !== nothing - desc["Tin"] = map(Base.identity, Tin) - end - if N !== nothing - desc["N"] = Base.Int(N) - end + function _list_to_array_graph(input_; name=nothing, Tin=nothing, N=nothing) + local desc + tf.with_op_name(name, "_ListToArray") do + desc = tf.NodeDescription("_ListToArray") + input_ = [convert(Tensor{Any}, x) for x = input_] + tf.add_input(desc, input_) + if Tin !== nothing + desc["Tin"] = map(Base.identity, Tin) end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:N - push!(out, tf.Tensor(op, out_idx)) + if N !== nothing + desc["N"] = Base.Int(N) end - out end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:N + push!(out, tf.Tensor(op, out_idx)) + end + out + end function _list_to_array_eager(input_; name=nothing, Tin=nothing, N=nothing) desc = tf.EagerOp("_ListToArray") input_ = convert(tf.EagerTensor, input_) @@ -25471,13 +25471,13 @@ begin return res end end - function _list_to_array(input_; name=nothing, Tin=nothing, N=nothing) - if tf.in_eager_mode() - _list_to_array_eager(input_; name=name, Tin=Tin, N=N) - else - _list_to_array_graph(input_; name=name, Tin=Tin, N=N) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _list_to_array(input_; name=nothing, Tin=nothing, N=nothing) + if tf.in_eager_mode() + _list_to_array_eager(input_; name=name, Tin=Tin, N=N) + else + _list_to_array_graph(input_; name=name, Tin=Tin, N=N) + end end - end end @@ -25487,29 +25487,29 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function neg_train_graph(w_in_, w_out_, examples_, labels_, lr_; name=nothing, vocab_count=nothing, num_negative_samples=nothing) - local desc - tf.with_op_name(name, "NegTrain") do - desc = tf.NodeDescription("NegTrain") - w_in_ = convert(Tensor{Float32}, w_in_) - w_out_ = convert(Tensor{Float32}, w_out_) - examples_ = convert(Tensor{Int32}, examples_) - labels_ = convert(Tensor{Int32}, labels_) - lr_ = convert(Tensor{Float32}, lr_) - tf.add_input(desc, w_in_) - tf.add_input(desc, w_out_) - tf.add_input(desc, examples_) - tf.add_input(desc, labels_) - tf.add_input(desc, lr_) - if vocab_count !== nothing - desc["vocab_count"] = map(Base.identity, vocab_count) - end - if num_negative_samples !== nothing - desc["num_negative_samples"] = Base.Int(num_negative_samples) - end + function neg_train_graph(w_in_, w_out_, examples_, labels_, lr_; name=nothing, vocab_count=nothing, num_negative_samples=nothing) + local desc + tf.with_op_name(name, "NegTrain") do + desc = tf.NodeDescription("NegTrain") + w_in_ = convert(Tensor{Float32}, w_in_) + w_out_ = convert(Tensor{Float32}, w_out_) + examples_ = convert(Tensor{Int32}, examples_) + labels_ = convert(Tensor{Int32}, labels_) + lr_ = convert(Tensor{Float32}, lr_) + tf.add_input(desc, w_in_) + tf.add_input(desc, w_out_) + tf.add_input(desc, examples_) + tf.add_input(desc, labels_) + tf.add_input(desc, lr_) + if vocab_count !== nothing + desc["vocab_count"] = map(Base.identity, vocab_count) + end + if num_negative_samples !== nothing + desc["num_negative_samples"] = Base.Int(num_negative_samples) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function neg_train_eager(w_in_, w_out_, examples_, labels_, lr_; name=nothing, vocab_count=nothing, num_negative_samples=nothing) desc = tf.EagerOp("NegTrain") w_in_ = convert(tf.EagerTensor, w_in_) @@ -25535,13 +25535,13 @@ begin return res[1] end end - function neg_train(w_in_, w_out_, examples_, labels_, lr_; name=nothing, vocab_count=nothing, num_negative_samples=nothing) - if tf.in_eager_mode() - neg_train_eager(w_in_, w_out_, examples_, labels_, lr_; name=name, vocab_count=vocab_count, num_negative_samples=num_negative_samples) - else - neg_train_graph(w_in_, w_out_, examples_, labels_, lr_; name=name, vocab_count=vocab_count, num_negative_samples=num_negative_samples) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function neg_train(w_in_, w_out_, examples_, labels_, lr_; name=nothing, vocab_count=nothing, num_negative_samples=nothing) + if tf.in_eager_mode() + neg_train_eager(w_in_, w_out_, examples_, labels_, lr_; name=name, vocab_count=vocab_count, num_negative_samples=num_negative_samples) + else + neg_train_graph(w_in_, w_out_, examples_, labels_, lr_; name=name, vocab_count=vocab_count, num_negative_samples=num_negative_samples) + end end - end end @@ -25551,20 +25551,20 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function merge_v2checkpoints_graph(checkpoint_prefixes_, destination_prefix_; name=nothing, delete_old_dirs=nothing) - local desc - tf.with_op_name(name, "MergeV2Checkpoints") do - desc = tf.NodeDescription("MergeV2Checkpoints") - checkpoint_prefixes_ = convert(Tensor{String}, checkpoint_prefixes_) - destination_prefix_ = convert(Tensor{String}, destination_prefix_) - tf.add_input(desc, checkpoint_prefixes_) - tf.add_input(desc, destination_prefix_) - if delete_old_dirs !== nothing - desc["delete_old_dirs"] = Base.Bool(delete_old_dirs) - end + function merge_v2checkpoints_graph(checkpoint_prefixes_, destination_prefix_; name=nothing, delete_old_dirs=nothing) + local desc + tf.with_op_name(name, "MergeV2Checkpoints") do + desc = tf.NodeDescription("MergeV2Checkpoints") + checkpoint_prefixes_ = convert(Tensor{String}, checkpoint_prefixes_) + destination_prefix_ = convert(Tensor{String}, destination_prefix_) + tf.add_input(desc, checkpoint_prefixes_) + tf.add_input(desc, destination_prefix_) + if delete_old_dirs !== nothing + desc["delete_old_dirs"] = Base.Bool(delete_old_dirs) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function merge_v2checkpoints_eager(checkpoint_prefixes_, destination_prefix_; name=nothing, delete_old_dirs=nothing) desc = tf.EagerOp("MergeV2Checkpoints") checkpoint_prefixes_ = convert(tf.EagerTensor, checkpoint_prefixes_) @@ -25581,13 +25581,13 @@ begin return res[1] end end - function merge_v2checkpoints(checkpoint_prefixes_, destination_prefix_; name=nothing, delete_old_dirs=nothing) - if tf.in_eager_mode() - merge_v2checkpoints_eager(checkpoint_prefixes_, destination_prefix_; name=name, delete_old_dirs=delete_old_dirs) - else - merge_v2checkpoints_graph(checkpoint_prefixes_, destination_prefix_; name=name, delete_old_dirs=delete_old_dirs) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function merge_v2checkpoints(checkpoint_prefixes_, destination_prefix_; name=nothing, delete_old_dirs=nothing) + if tf.in_eager_mode() + merge_v2checkpoints_eager(checkpoint_prefixes_, destination_prefix_; name=name, delete_old_dirs=delete_old_dirs) + else + merge_v2checkpoints_graph(checkpoint_prefixes_, destination_prefix_; name=name, delete_old_dirs=delete_old_dirs) + end end - end end @@ -25597,15 +25597,15 @@ end Worker heartbeat op. """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function worker_heartbeat_graph(request_; name=nothing) - local desc - tf.with_op_name(name, "WorkerHeartbeat") do - desc = tf.NodeDescription("WorkerHeartbeat") - request_ = convert(Tensor{String}, request_) - tf.add_input(desc, request_) - end - tf.Tensor(tf.Operation(desc)) + function worker_heartbeat_graph(request_; name=nothing) + local desc + tf.with_op_name(name, "WorkerHeartbeat") do + desc = tf.NodeDescription("WorkerHeartbeat") + request_ = convert(Tensor{String}, request_) + tf.add_input(desc, request_) end + tf.Tensor(tf.Operation(desc)) + end function worker_heartbeat_eager(request_; name=nothing) desc = tf.EagerOp("WorkerHeartbeat") request_ = convert(tf.EagerTensor, request_) @@ -25617,13 +25617,13 @@ begin return res[1] end end - function worker_heartbeat(request_; name=nothing) - if tf.in_eager_mode() - worker_heartbeat_eager(request_; name=name) - else - worker_heartbeat_graph(request_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function worker_heartbeat(request_; name=nothing) + if tf.in_eager_mode() + worker_heartbeat_eager(request_; name=name) + else + worker_heartbeat_graph(request_; name=name) + end end - end end @@ -25633,18 +25633,18 @@ end An Op to permute tensors across replicated TPU instances. Each instance """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function collective_permute_graph(input_, source_target_pairs_; name=nothing) - local desc - tf.with_op_name(name, "CollectivePermute") do - desc = tf.NodeDescription("CollectivePermute") - input_ = convert(Tensor{Any}, input_) - source_target_pairs_ = convert(Tensor{Int32}, source_target_pairs_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - tf.add_input(desc, source_target_pairs_) - end - tf.Tensor(tf.Operation(desc)) + function collective_permute_graph(input_, source_target_pairs_; name=nothing) + local desc + tf.with_op_name(name, "CollectivePermute") do + desc = tf.NodeDescription("CollectivePermute") + input_ = convert(Tensor{Any}, input_) + source_target_pairs_ = convert(Tensor{Int32}, source_target_pairs_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + tf.add_input(desc, source_target_pairs_) end + tf.Tensor(tf.Operation(desc)) + end function collective_permute_eager(input_, source_target_pairs_; name=nothing) desc = tf.EagerOp("CollectivePermute") input_ = convert(tf.EagerTensor, input_) @@ -25659,13 +25659,13 @@ begin return res[1] end end - function collective_permute(input_, source_target_pairs_; name=nothing) - if tf.in_eager_mode() - collective_permute_eager(input_, source_target_pairs_; name=name) - else - collective_permute_graph(input_, source_target_pairs_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function collective_permute(input_, source_target_pairs_; name=nothing) + if tf.in_eager_mode() + collective_permute_eager(input_, source_target_pairs_; name=name) + else + collective_permute_graph(input_, source_target_pairs_; name=name) + end end - end end @@ -25675,28 +25675,28 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function quantize_and_dequantize_v3_graph(input_, input_min_, input_max_, num_bits_; name=nothing, signed_input=nothing, range_given=nothing) - local desc - tf.with_op_name(name, "QuantizeAndDequantizeV3") do - desc = tf.NodeDescription("QuantizeAndDequantizeV3") - input_ = convert(Tensor{Any}, input_) - input_min_ = convert(Tensor{Any}, input_min_) - input_max_ = convert(Tensor{Any}, input_max_) - num_bits_ = convert(Tensor{Int32}, num_bits_) - (input_, input_min_, input_max_) = tf.tf_promote(input_, input_min_, input_max_) - tf.add_input(desc, input_) - tf.add_input(desc, input_min_) - tf.add_input(desc, input_max_) - tf.add_input(desc, num_bits_) - if signed_input !== nothing - desc["signed_input"] = Base.Bool(signed_input) - end - if range_given !== nothing - desc["range_given"] = Base.Bool(range_given) - end + function quantize_and_dequantize_v3_graph(input_, input_min_, input_max_, num_bits_; name=nothing, signed_input=nothing, range_given=nothing) + local desc + tf.with_op_name(name, "QuantizeAndDequantizeV3") do + desc = tf.NodeDescription("QuantizeAndDequantizeV3") + input_ = convert(Tensor{Any}, input_) + input_min_ = convert(Tensor{Any}, input_min_) + input_max_ = convert(Tensor{Any}, input_max_) + num_bits_ = convert(Tensor{Int32}, num_bits_) + (input_, input_min_, input_max_) = tf.tf_promote(input_, input_min_, input_max_) + tf.add_input(desc, input_) + tf.add_input(desc, input_min_) + tf.add_input(desc, input_max_) + tf.add_input(desc, num_bits_) + if signed_input !== nothing + desc["signed_input"] = Base.Bool(signed_input) + end + if range_given !== nothing + desc["range_given"] = Base.Bool(range_given) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function quantize_and_dequantize_v3_eager(input_, input_min_, input_max_, num_bits_; name=nothing, signed_input=nothing, range_given=nothing) desc = tf.EagerOp("QuantizeAndDequantizeV3") input_ = convert(tf.EagerTensor, input_) @@ -25723,13 +25723,13 @@ begin return res[1] end end - function quantize_and_dequantize_v3(input_, input_min_, input_max_, num_bits_; name=nothing, signed_input=nothing, range_given=nothing) - if tf.in_eager_mode() - quantize_and_dequantize_v3_eager(input_, input_min_, input_max_, num_bits_; name=name, signed_input=signed_input, range_given=range_given) - else - quantize_and_dequantize_v3_graph(input_, input_min_, input_max_, num_bits_; name=name, signed_input=signed_input, range_given=range_given) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function quantize_and_dequantize_v3(input_, input_min_, input_max_, num_bits_; name=nothing, signed_input=nothing, range_given=nothing) + if tf.in_eager_mode() + quantize_and_dequantize_v3_eager(input_, input_min_, input_max_, num_bits_; name=name, signed_input=signed_input, range_given=range_given) + else + quantize_and_dequantize_v3_graph(input_, input_min_, input_max_, num_bits_; name=name, signed_input=signed_input, range_given=range_given) + end end - end end @@ -25739,28 +25739,28 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function hash_table_graph(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing) - local desc - tf.with_op_name(name, "HashTable") do - desc = tf.NodeDescription("HashTable") - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - if use_node_name_sharing !== nothing - desc["use_node_name_sharing"] = Base.Bool(use_node_name_sharing) - end - if key_dtype !== nothing - desc["key_dtype"] = Base.identity(key_dtype) - end - if value_dtype !== nothing - desc["value_dtype"] = Base.identity(value_dtype) - end + function hash_table_graph(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing) + local desc + tf.with_op_name(name, "HashTable") do + desc = tf.NodeDescription("HashTable") + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + if use_node_name_sharing !== nothing + desc["use_node_name_sharing"] = Base.Bool(use_node_name_sharing) + end + if key_dtype !== nothing + desc["key_dtype"] = Base.identity(key_dtype) + end + if value_dtype !== nothing + desc["value_dtype"] = Base.identity(value_dtype) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function hash_table_eager(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing) desc = tf.EagerOp("HashTable") if container !== nothing @@ -25785,13 +25785,13 @@ begin return res[1] end end - function hash_table(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing) - if tf.in_eager_mode() - hash_table_eager(; name=name, container=container, shared_name=shared_name, use_node_name_sharing=use_node_name_sharing, key_dtype=key_dtype, value_dtype=value_dtype) - else - hash_table_graph(; name=name, container=container, shared_name=shared_name, use_node_name_sharing=use_node_name_sharing, key_dtype=key_dtype, value_dtype=value_dtype) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function hash_table(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing) + if tf.in_eager_mode() + hash_table_eager(; name=name, container=container, shared_name=shared_name, use_node_name_sharing=use_node_name_sharing, key_dtype=key_dtype, value_dtype=value_dtype) + else + hash_table_graph(; name=name, container=container, shared_name=shared_name, use_node_name_sharing=use_node_name_sharing, key_dtype=key_dtype, value_dtype=value_dtype) + end end - end end @@ -25801,18 +25801,18 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function softplus_grad_graph(gradients_, features_; name=nothing) - local desc - tf.with_op_name(name, "SoftplusGrad") do - desc = tf.NodeDescription("SoftplusGrad") - gradients_ = convert(Tensor{Any}, gradients_) - features_ = convert(Tensor{Any}, features_) - (gradients_, features_) = tf.tf_promote(gradients_, features_) - tf.add_input(desc, gradients_) - tf.add_input(desc, features_) - end - tf.Tensor(tf.Operation(desc)) + function softplus_grad_graph(gradients_, features_; name=nothing) + local desc + tf.with_op_name(name, "SoftplusGrad") do + desc = tf.NodeDescription("SoftplusGrad") + gradients_ = convert(Tensor{Any}, gradients_) + features_ = convert(Tensor{Any}, features_) + (gradients_, features_) = tf.tf_promote(gradients_, features_) + tf.add_input(desc, gradients_) + tf.add_input(desc, features_) end + tf.Tensor(tf.Operation(desc)) + end function softplus_grad_eager(gradients_, features_; name=nothing) desc = tf.EagerOp("SoftplusGrad") gradients_ = convert(tf.EagerTensor, gradients_) @@ -25828,13 +25828,13 @@ begin return res[1] end end - function softplus_grad(gradients_, features_; name=nothing) - if tf.in_eager_mode() - softplus_grad_eager(gradients_, features_; name=name) - else - softplus_grad_graph(gradients_, features_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function softplus_grad(gradients_, features_; name=nothing) + if tf.in_eager_mode() + softplus_grad_eager(gradients_, features_; name=name) + else + softplus_grad_graph(gradients_, features_; name=name) + end end - end end @@ -25844,31 +25844,31 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function fixed_length_record_reader_graph(; name=nothing, header_bytes=nothing, record_bytes=nothing, footer_bytes=nothing, hop_bytes=nothing, container=nothing, shared_name=nothing) - local desc - tf.with_op_name(name, "FixedLengthRecordReader") do - desc = tf.NodeDescription("FixedLengthRecordReader") - if header_bytes !== nothing - desc["header_bytes"] = Base.Int(header_bytes) - end - if record_bytes !== nothing - desc["record_bytes"] = Base.Int(record_bytes) - end - if footer_bytes !== nothing - desc["footer_bytes"] = Base.Int(footer_bytes) - end - if hop_bytes !== nothing - desc["hop_bytes"] = Base.Int(hop_bytes) - end - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - end - tf.Tensor(tf.Operation(desc)) + function fixed_length_record_reader_graph(; name=nothing, header_bytes=nothing, record_bytes=nothing, footer_bytes=nothing, hop_bytes=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "FixedLengthRecordReader") do + desc = tf.NodeDescription("FixedLengthRecordReader") + if header_bytes !== nothing + desc["header_bytes"] = Base.Int(header_bytes) + end + if record_bytes !== nothing + desc["record_bytes"] = Base.Int(record_bytes) + end + if footer_bytes !== nothing + desc["footer_bytes"] = Base.Int(footer_bytes) + end + if hop_bytes !== nothing + desc["hop_bytes"] = Base.Int(hop_bytes) + end + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end end + tf.Tensor(tf.Operation(desc)) + end function fixed_length_record_reader_eager(; name=nothing, header_bytes=nothing, record_bytes=nothing, footer_bytes=nothing, hop_bytes=nothing, container=nothing, shared_name=nothing) desc = tf.EagerOp("FixedLengthRecordReader") if header_bytes !== nothing @@ -25896,13 +25896,13 @@ begin return res[1] end end - function fixed_length_record_reader(; name=nothing, header_bytes=nothing, record_bytes=nothing, footer_bytes=nothing, hop_bytes=nothing, container=nothing, shared_name=nothing) - if tf.in_eager_mode() - fixed_length_record_reader_eager(; name=name, header_bytes=header_bytes, record_bytes=record_bytes, footer_bytes=footer_bytes, hop_bytes=hop_bytes, container=container, shared_name=shared_name) - else - fixed_length_record_reader_graph(; name=name, header_bytes=header_bytes, record_bytes=record_bytes, footer_bytes=footer_bytes, hop_bytes=hop_bytes, container=container, shared_name=shared_name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function fixed_length_record_reader(; name=nothing, header_bytes=nothing, record_bytes=nothing, footer_bytes=nothing, hop_bytes=nothing, container=nothing, shared_name=nothing) + if tf.in_eager_mode() + fixed_length_record_reader_eager(; name=name, header_bytes=header_bytes, record_bytes=record_bytes, footer_bytes=footer_bytes, hop_bytes=hop_bytes, container=container, shared_name=shared_name) + else + fixed_length_record_reader_graph(; name=name, header_bytes=header_bytes, record_bytes=record_bytes, footer_bytes=footer_bytes, hop_bytes=hop_bytes, container=container, shared_name=shared_name) + end end - end end @@ -25912,22 +25912,22 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tensor_array_scatter_v2_graph(handle_, indices_, value_, flow_in_; name=nothing) - local desc - tf.with_op_name(name, "TensorArrayScatterV2") do - desc = tf.NodeDescription("TensorArrayScatterV2") - handle_ = convert(Tensor{String}, handle_) - indices_ = convert(Tensor{Int32}, indices_) - value_ = convert(Tensor{Any}, value_) - flow_in_ = convert(Tensor{Float32}, flow_in_) - (value_,) = tf.tf_promote(value_) - tf.add_input(desc, handle_) - tf.add_input(desc, indices_) - tf.add_input(desc, value_) - tf.add_input(desc, flow_in_) - end - tf.Tensor(tf.Operation(desc)) + function tensor_array_scatter_v2_graph(handle_, indices_, value_, flow_in_; name=nothing) + local desc + tf.with_op_name(name, "TensorArrayScatterV2") do + desc = tf.NodeDescription("TensorArrayScatterV2") + handle_ = convert(Tensor{String}, handle_) + indices_ = convert(Tensor{Int32}, indices_) + value_ = convert(Tensor{Any}, value_) + flow_in_ = convert(Tensor{Float32}, flow_in_) + (value_,) = tf.tf_promote(value_) + tf.add_input(desc, handle_) + tf.add_input(desc, indices_) + tf.add_input(desc, value_) + tf.add_input(desc, flow_in_) end + tf.Tensor(tf.Operation(desc)) + end function tensor_array_scatter_v2_eager(handle_, indices_, value_, flow_in_; name=nothing) desc = tf.EagerOp("TensorArrayScatterV2") handle_ = convert(tf.EagerTensor, handle_) @@ -25946,13 +25946,13 @@ begin return res[1] end end - function tensor_array_scatter_v2(handle_, indices_, value_, flow_in_; name=nothing) - if tf.in_eager_mode() - tensor_array_scatter_v2_eager(handle_, indices_, value_, flow_in_; name=name) - else - tensor_array_scatter_v2_graph(handle_, indices_, value_, flow_in_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_array_scatter_v2(handle_, indices_, value_, flow_in_; name=nothing) + if tf.in_eager_mode() + tensor_array_scatter_v2_eager(handle_, indices_, value_, flow_in_; name=name) + else + tensor_array_scatter_v2_graph(handle_, indices_, value_, flow_in_; name=name) + end end - end end @@ -25962,15 +25962,15 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function decode_json_example_graph(json_examples_; name=nothing) - local desc - tf.with_op_name(name, "DecodeJSONExample") do - desc = tf.NodeDescription("DecodeJSONExample") - json_examples_ = convert(Tensor{String}, json_examples_) - tf.add_input(desc, json_examples_) - end - tf.Tensor(tf.Operation(desc)) + function decode_json_example_graph(json_examples_; name=nothing) + local desc + tf.with_op_name(name, "DecodeJSONExample") do + desc = tf.NodeDescription("DecodeJSONExample") + json_examples_ = convert(Tensor{String}, json_examples_) + tf.add_input(desc, json_examples_) end + tf.Tensor(tf.Operation(desc)) + end function decode_json_example_eager(json_examples_; name=nothing) desc = tf.EagerOp("DecodeJSONExample") json_examples_ = convert(tf.EagerTensor, json_examples_) @@ -25982,58 +25982,58 @@ begin return res[1] end end - function decode_json_example(json_examples_; name=nothing) - if tf.in_eager_mode() - decode_json_example_eager(json_examples_; name=name) - else - decode_json_example_graph(json_examples_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function decode_json_example(json_examples_; name=nothing) + if tf.in_eager_mode() + decode_json_example_eager(json_examples_; name=name) + else + decode_json_example_graph(json_examples_; name=name) + end end - end end """ - fused_batch_norm_grad_v2(y_backprop, x, scale, reserve_space_1, reserve_space_2; epsilon=?, data_format=NHWC, is_training=true) + fused_batch_norm_grad_v2(y_backprop, x, scale, reserve_space_1, reserve_space_2; epsilon=?, data_format=, is_training=true) """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function fused_batch_norm_grad_v2_graph(y_backprop_, x_, scale_, reserve_space_1_, reserve_space_2_; name=nothing, U=nothing, epsilon=nothing, data_format=nothing, is_training=nothing) - local desc - tf.with_op_name(name, "FusedBatchNormGradV2") do - desc = tf.NodeDescription("FusedBatchNormGradV2") - y_backprop_ = convert(Tensor{Any}, y_backprop_) - x_ = convert(Tensor{Any}, x_) - scale_ = convert(Tensor{Float32}, scale_) - reserve_space_1_ = convert(Tensor{Any}, reserve_space_1_) - reserve_space_2_ = convert(Tensor{Any}, reserve_space_2_) - (reserve_space_1_, reserve_space_2_) = tf.tf_promote(reserve_space_1_, reserve_space_2_) - (y_backprop_, x_) = tf.tf_promote(y_backprop_, x_) - tf.add_input(desc, y_backprop_) - tf.add_input(desc, x_) - tf.add_input(desc, scale_) - tf.add_input(desc, reserve_space_1_) - tf.add_input(desc, reserve_space_2_) - if U !== nothing - desc["U"] = Base.identity(U) - end - if epsilon !== nothing - desc["epsilon"] = Base.identity(epsilon) - end - if data_format !== nothing - desc["data_format"] = Base.String(data_format) - end - if is_training !== nothing - desc["is_training"] = Base.Bool(is_training) - end + function fused_batch_norm_grad_v2_graph(y_backprop_, x_, scale_, reserve_space_1_, reserve_space_2_; name=nothing, U=nothing, epsilon=nothing, data_format=nothing, is_training=nothing) + local desc + tf.with_op_name(name, "FusedBatchNormGradV2") do + desc = tf.NodeDescription("FusedBatchNormGradV2") + y_backprop_ = convert(Tensor{Any}, y_backprop_) + x_ = convert(Tensor{Any}, x_) + scale_ = convert(Tensor{Float32}, scale_) + reserve_space_1_ = convert(Tensor{Any}, reserve_space_1_) + reserve_space_2_ = convert(Tensor{Any}, reserve_space_2_) + (reserve_space_1_, reserve_space_2_) = tf.tf_promote(reserve_space_1_, reserve_space_2_) + (y_backprop_, x_) = tf.tf_promote(y_backprop_, x_) + tf.add_input(desc, y_backprop_) + tf.add_input(desc, x_) + tf.add_input(desc, scale_) + tf.add_input(desc, reserve_space_1_) + tf.add_input(desc, reserve_space_2_) + if U !== nothing + desc["U"] = Base.identity(U) + end + if epsilon !== nothing + desc["epsilon"] = Base.identity(epsilon) end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:5 - push!(out, tf.Tensor(op, out_idx)) + if data_format !== nothing + desc["data_format"] = Base.String(data_format) end - out + if is_training !== nothing + desc["is_training"] = Base.Bool(is_training) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:5 + push!(out, tf.Tensor(op, out_idx)) end + out + end function fused_batch_norm_grad_v2_eager(y_backprop_, x_, scale_, reserve_space_1_, reserve_space_2_; name=nothing, U=nothing, epsilon=nothing, data_format=nothing, is_training=nothing) desc = tf.EagerOp("FusedBatchNormGradV2") y_backprop_ = convert(tf.EagerTensor, y_backprop_) @@ -26069,13 +26069,13 @@ begin return res end end - function fused_batch_norm_grad_v2(y_backprop_, x_, scale_, reserve_space_1_, reserve_space_2_; name=nothing, U=nothing, epsilon=nothing, data_format=nothing, is_training=nothing) - if tf.in_eager_mode() - fused_batch_norm_grad_v2_eager(y_backprop_, x_, scale_, reserve_space_1_, reserve_space_2_; name=name, U=U, epsilon=epsilon, data_format=data_format, is_training=is_training) - else - fused_batch_norm_grad_v2_graph(y_backprop_, x_, scale_, reserve_space_1_, reserve_space_2_; name=name, U=U, epsilon=epsilon, data_format=data_format, is_training=is_training) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function fused_batch_norm_grad_v2(y_backprop_, x_, scale_, reserve_space_1_, reserve_space_2_; name=nothing, U=nothing, epsilon=nothing, data_format=nothing, is_training=nothing) + if tf.in_eager_mode() + fused_batch_norm_grad_v2_eager(y_backprop_, x_, scale_, reserve_space_1_, reserve_space_2_; name=name, U=U, epsilon=epsilon, data_format=data_format, is_training=is_training) + else + fused_batch_norm_grad_v2_graph(y_backprop_, x_, scale_, reserve_space_1_, reserve_space_2_; name=name, U=U, epsilon=epsilon, data_format=data_format, is_training=is_training) + end end - end end @@ -26085,25 +26085,25 @@ end Cast x of type SrcT to y of DstT. """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function _host_cast_graph(x_; name=nothing, SrcT=nothing, DstT=nothing, Truncate=nothing) - local desc - tf.with_op_name(name, "_HostCast") do - desc = tf.NodeDescription("_HostCast") - x_ = convert(Tensor{Any}, x_) - (x_,) = tf.tf_promote(x_) - tf.add_input(desc, x_) - if SrcT !== nothing - desc["SrcT"] = Base.identity(SrcT) - end - if DstT !== nothing - desc["DstT"] = Base.identity(DstT) - end - if Truncate !== nothing - desc["Truncate"] = Base.Bool(Truncate) - end - end - tf.Tensor(tf.Operation(desc)) + function _host_cast_graph(x_; name=nothing, SrcT=nothing, DstT=nothing, Truncate=nothing) + local desc + tf.with_op_name(name, "_HostCast") do + desc = tf.NodeDescription("_HostCast") + x_ = convert(Tensor{Any}, x_) + (x_,) = tf.tf_promote(x_) + tf.add_input(desc, x_) + if SrcT !== nothing + desc["SrcT"] = Base.identity(SrcT) + end + if DstT !== nothing + desc["DstT"] = Base.identity(DstT) + end + if Truncate !== nothing + desc["Truncate"] = Base.Bool(Truncate) + end end + tf.Tensor(tf.Operation(desc)) + end function _host_cast_eager(x_; name=nothing, SrcT=nothing, DstT=nothing, Truncate=nothing) desc = tf.EagerOp("_HostCast") x_ = convert(tf.EagerTensor, x_) @@ -26125,13 +26125,13 @@ begin return res[1] end end - function _host_cast(x_; name=nothing, SrcT=nothing, DstT=nothing, Truncate=nothing) - if tf.in_eager_mode() - _host_cast_eager(x_; name=name, SrcT=SrcT, DstT=DstT, Truncate=Truncate) - else - _host_cast_graph(x_; name=name, SrcT=SrcT, DstT=DstT, Truncate=Truncate) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _host_cast(x_; name=nothing, SrcT=nothing, DstT=nothing, Truncate=nothing) + if tf.in_eager_mode() + _host_cast_eager(x_; name=name, SrcT=SrcT, DstT=DstT, Truncate=Truncate) + else + _host_cast_graph(x_; name=name, SrcT=SrcT, DstT=DstT, Truncate=Truncate) + end end - end end @@ -26141,22 +26141,22 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tf_record_reader_graph(; name=nothing, container=nothing, shared_name=nothing, compression_type=nothing) - local desc - tf.with_op_name(name, "TFRecordReader") do - desc = tf.NodeDescription("TFRecordReader") - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - if compression_type !== nothing - desc["compression_type"] = Base.String(compression_type) - end + function tf_record_reader_graph(; name=nothing, container=nothing, shared_name=nothing, compression_type=nothing) + local desc + tf.with_op_name(name, "TFRecordReader") do + desc = tf.NodeDescription("TFRecordReader") + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + if compression_type !== nothing + desc["compression_type"] = Base.String(compression_type) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function tf_record_reader_eager(; name=nothing, container=nothing, shared_name=nothing, compression_type=nothing) desc = tf.EagerOp("TFRecordReader") if container !== nothing @@ -26175,13 +26175,13 @@ begin return res[1] end end - function tf_record_reader(; name=nothing, container=nothing, shared_name=nothing, compression_type=nothing) - if tf.in_eager_mode() - tf_record_reader_eager(; name=name, container=container, shared_name=shared_name, compression_type=compression_type) - else - tf_record_reader_graph(; name=name, container=container, shared_name=shared_name, compression_type=compression_type) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tf_record_reader(; name=nothing, container=nothing, shared_name=nothing, compression_type=nothing) + if tf.in_eager_mode() + tf_record_reader_eager(; name=name, container=container, shared_name=shared_name, compression_type=compression_type) + else + tf_record_reader_graph(; name=name, container=container, shared_name=shared_name, compression_type=compression_type) + end end - end end @@ -26191,27 +26191,27 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function while__graph(input_; name=nothing, T=nothing, cond=nothing, body=nothing, output_shapes=nothing) - local desc - tf.with_op_name(name, "While") do - desc = tf.NodeDescription("While") - input_ = [convert(Tensor{Any}, x) for x = input_] - tf.add_input(desc, input_) - if T !== nothing - desc["T"] = map(Base.identity, T) - end - if cond !== nothing - desc["cond"] = Base.identity(cond) - end - if body !== nothing - desc["body"] = Base.identity(body) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end + function while__graph(input_; name=nothing, T=nothing, cond=nothing, body=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "While") do + desc = tf.NodeDescription("While") + input_ = [convert(Tensor{Any}, x) for x = input_] + tf.add_input(desc, input_) + if T !== nothing + desc["T"] = map(Base.identity, T) + end + if cond !== nothing + desc["cond"] = Base.identity(cond) + end + if body !== nothing + desc["body"] = Base.identity(body) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function while__eager(input_; name=nothing, T=nothing, cond=nothing, body=nothing, output_shapes=nothing) desc = tf.EagerOp("While") input_ = convert(tf.EagerTensor, input_) @@ -26235,13 +26235,13 @@ begin return res[1] end end - function while_(input_; name=nothing, T=nothing, cond=nothing, body=nothing, output_shapes=nothing) - if tf.in_eager_mode() - while__eager(input_; name=name, T=T, cond=cond, body=body, output_shapes=output_shapes) - else - while__graph(input_; name=name, T=T, cond=cond, body=body, output_shapes=output_shapes) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function while_(input_; name=nothing, T=nothing, cond=nothing, body=nothing, output_shapes=nothing) + if tf.in_eager_mode() + while__eager(input_; name=name, T=T, cond=cond, body=body, output_shapes=output_shapes) + else + while__graph(input_; name=name, T=T, cond=cond, body=body, output_shapes=output_shapes) + end end - end end @@ -26251,24 +26251,24 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function stateless_multinomial_graph(logits_, num_samples_, seed_; name=nothing, output_dtype=nothing) - local desc - tf.with_op_name(name, "StatelessMultinomial") do - desc = tf.NodeDescription("StatelessMultinomial") - logits_ = convert(Tensor{Any}, logits_) - num_samples_ = convert(Tensor{Int32}, num_samples_) - seed_ = convert(Tensor{Int64}, seed_) - (logits_,) = tf.tf_promote(logits_) - (seed_,) = tf.tf_promote(seed_) - tf.add_input(desc, logits_) - tf.add_input(desc, num_samples_) - tf.add_input(desc, seed_) - if output_dtype !== nothing - desc["output_dtype"] = Base.identity(output_dtype) - end + function stateless_multinomial_graph(logits_, num_samples_, seed_; name=nothing, output_dtype=nothing) + local desc + tf.with_op_name(name, "StatelessMultinomial") do + desc = tf.NodeDescription("StatelessMultinomial") + logits_ = convert(Tensor{Any}, logits_) + num_samples_ = convert(Tensor{Int32}, num_samples_) + seed_ = convert(Tensor{Int64}, seed_) + (logits_,) = tf.tf_promote(logits_) + (seed_,) = tf.tf_promote(seed_) + tf.add_input(desc, logits_) + tf.add_input(desc, num_samples_) + tf.add_input(desc, seed_) + if output_dtype !== nothing + desc["output_dtype"] = Base.identity(output_dtype) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function stateless_multinomial_eager(logits_, num_samples_, seed_; name=nothing, output_dtype=nothing) desc = tf.EagerOp("StatelessMultinomial") logits_ = convert(tf.EagerTensor, logits_) @@ -26289,13 +26289,13 @@ begin return res[1] end end - function stateless_multinomial(logits_, num_samples_, seed_; name=nothing, output_dtype=nothing) - if tf.in_eager_mode() - stateless_multinomial_eager(logits_, num_samples_, seed_; name=name, output_dtype=output_dtype) - else - stateless_multinomial_graph(logits_, num_samples_, seed_; name=name, output_dtype=output_dtype) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function stateless_multinomial(logits_, num_samples_, seed_; name=nothing, output_dtype=nothing) + if tf.in_eager_mode() + stateless_multinomial_eager(logits_, num_samples_, seed_; name=name, output_dtype=output_dtype) + else + stateless_multinomial_graph(logits_, num_samples_, seed_; name=name, output_dtype=output_dtype) + end end - end end @@ -26305,25 +26305,25 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function scatter_add_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) - local desc - tf.with_op_name(name, "ScatterAdd") do - desc = tf.NodeDescription("ScatterAdd") - ref_ = convert(Tensor{Any}, ref_) - indices_ = convert(Tensor{Any}, indices_) - indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) - updates_ = convert(Tensor{Any}, updates_) - (ref_, updates_) = tf.tf_promote(ref_, updates_) - (indices_,) = tf.tf_promote(indices_) - tf.add_input(desc, ref_) - tf.add_input(desc, indices_) - tf.add_input(desc, updates_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end + function scatter_add_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "ScatterAdd") do + desc = tf.NodeDescription("ScatterAdd") + ref_ = convert(Tensor{Any}, ref_) + indices_ = convert(Tensor{Any}, indices_) + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + updates_ = convert(Tensor{Any}, updates_) + (ref_, updates_) = tf.tf_promote(ref_, updates_) + (indices_,) = tf.tf_promote(indices_) + tf.add_input(desc, ref_) + tf.add_input(desc, indices_) + tf.add_input(desc, updates_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function scatter_add_eager(ref_, indices_, updates_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ScatterAdd") ref_ = convert(tf.EagerTensor, ref_) @@ -26345,13 +26345,13 @@ begin return res[1] end end - function scatter_add(ref_, indices_, updates_; name=nothing, use_locking=nothing) - if tf.in_eager_mode() - scatter_add_eager(ref_, indices_, updates_; name=name, use_locking=use_locking) - else - scatter_add_graph(ref_, indices_, updates_; name=name, use_locking=use_locking) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function scatter_add(ref_, indices_, updates_; name=nothing, use_locking=nothing) + if tf.in_eager_mode() + scatter_add_eager(ref_, indices_, updates_; name=name, use_locking=use_locking) + else + scatter_add_graph(ref_, indices_, updates_; name=name, use_locking=use_locking) + end end - end end @@ -26361,16 +26361,16 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function conj_graph(input_; name=nothing) - local desc - tf.with_op_name(name, "Conj") do - desc = tf.NodeDescription("Conj") - input_ = convert(Tensor{Complex{Float32}}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - end - tf.Tensor(tf.Operation(desc)) + function conj_graph(input_; name=nothing) + local desc + tf.with_op_name(name, "Conj") do + desc = tf.NodeDescription("Conj") + input_ = convert(Tensor{Complex{Float32}}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) end + tf.Tensor(tf.Operation(desc)) + end function conj_eager(input_; name=nothing) desc = tf.EagerOp("Conj") input_ = convert(tf.EagerTensor, input_) @@ -26383,13 +26383,13 @@ begin return res[1] end end - function conj(input_; name=nothing) - if tf.in_eager_mode() - conj_eager(input_; name=name) - else - conj_graph(input_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function conj(input_; name=nothing) + if tf.in_eager_mode() + conj_eager(input_; name=name) + else + conj_graph(input_; name=name) + end end - end end @@ -26399,21 +26399,21 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function parallel_dynamic_stitch_graph(indices_, data_; name=nothing, N=nothing) - local desc - tf.with_op_name(name, "ParallelDynamicStitch") do - desc = tf.NodeDescription("ParallelDynamicStitch") - indices_ = [convert(Tensor{Int32}, x) for x = indices_] - data_ = [convert(Tensor{Any}, x) for x = data_] - (data_,) = tf.tf_promote(data_) - tf.add_input(desc, indices_) - tf.add_input(desc, data_) - if N !== nothing - desc["N"] = Base.Int(N) - end + function parallel_dynamic_stitch_graph(indices_, data_; name=nothing, N=nothing) + local desc + tf.with_op_name(name, "ParallelDynamicStitch") do + desc = tf.NodeDescription("ParallelDynamicStitch") + indices_ = [convert(Tensor{Int32}, x) for x = indices_] + data_ = [convert(Tensor{Any}, x) for x = data_] + (data_,) = tf.tf_promote(data_) + tf.add_input(desc, indices_) + tf.add_input(desc, data_) + if N !== nothing + desc["N"] = Base.Int(N) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function parallel_dynamic_stitch_eager(indices_, data_; name=nothing, N=nothing) desc = tf.EagerOp("ParallelDynamicStitch") indices_ = convert(tf.EagerTensor, indices_) @@ -26431,13 +26431,13 @@ begin return res[1] end end - function parallel_dynamic_stitch(indices_, data_; name=nothing, N=nothing) - if tf.in_eager_mode() - parallel_dynamic_stitch_eager(indices_, data_; name=name, N=N) - else - parallel_dynamic_stitch_graph(indices_, data_; name=name, N=N) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function parallel_dynamic_stitch(indices_, data_; name=nothing, N=nothing) + if tf.in_eager_mode() + parallel_dynamic_stitch_eager(indices_, data_; name=name, N=N) + else + parallel_dynamic_stitch_graph(indices_, data_; name=name, N=N) + end end - end end @@ -26447,17 +26447,17 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function make_iterator_graph(dataset_, iterator_; name=nothing) - local desc - tf.with_op_name(name, "MakeIterator") do - desc = tf.NodeDescription("MakeIterator") - dataset_ = convert(Tensor{Any}, dataset_) - iterator_ = convert(Tensor{Any}, iterator_) - tf.add_input(desc, dataset_) - tf.add_input(desc, iterator_) - end - tf.Tensor(tf.Operation(desc)) + function make_iterator_graph(dataset_, iterator_; name=nothing) + local desc + tf.with_op_name(name, "MakeIterator") do + desc = tf.NodeDescription("MakeIterator") + dataset_ = convert(Tensor{Any}, dataset_) + iterator_ = convert(Tensor{Any}, iterator_) + tf.add_input(desc, dataset_) + tf.add_input(desc, iterator_) end + tf.Tensor(tf.Operation(desc)) + end function make_iterator_eager(dataset_, iterator_; name=nothing) desc = tf.EagerOp("MakeIterator") dataset_ = convert(tf.EagerTensor, dataset_) @@ -26471,13 +26471,13 @@ begin return res[1] end end - function make_iterator(dataset_, iterator_; name=nothing) - if tf.in_eager_mode() - make_iterator_eager(dataset_, iterator_; name=name) - else - make_iterator_graph(dataset_, iterator_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function make_iterator(dataset_, iterator_; name=nothing) + if tf.in_eager_mode() + make_iterator_eager(dataset_, iterator_; name=name) + else + make_iterator_graph(dataset_, iterator_; name=name) + end end - end end @@ -26487,17 +26487,17 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function rfft3d_graph(input_, fft_length_; name=nothing) - local desc - tf.with_op_name(name, "RFFT3D") do - desc = tf.NodeDescription("RFFT3D") - input_ = convert(Tensor{Float32}, input_) - fft_length_ = convert(Tensor{Int32}, fft_length_) - tf.add_input(desc, input_) - tf.add_input(desc, fft_length_) - end - tf.Tensor(tf.Operation(desc)) + function rfft3d_graph(input_, fft_length_; name=nothing) + local desc + tf.with_op_name(name, "RFFT3D") do + desc = tf.NodeDescription("RFFT3D") + input_ = convert(Tensor{Float32}, input_) + fft_length_ = convert(Tensor{Int32}, fft_length_) + tf.add_input(desc, input_) + tf.add_input(desc, fft_length_) end + tf.Tensor(tf.Operation(desc)) + end function rfft3d_eager(input_, fft_length_; name=nothing) desc = tf.EagerOp("RFFT3D") input_ = convert(tf.EagerTensor, input_) @@ -26511,13 +26511,13 @@ begin return res[1] end end - function rfft3d(input_, fft_length_; name=nothing) - if tf.in_eager_mode() - rfft3d_eager(input_, fft_length_; name=name) - else - rfft3d_graph(input_, fft_length_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function rfft3d(input_, fft_length_; name=nothing) + if tf.in_eager_mode() + rfft3d_eager(input_, fft_length_; name=name) + else + rfft3d_graph(input_, fft_length_; name=name) + end end - end end @@ -26527,30 +26527,30 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function sparse_reduce_sum_sparse_graph(input_indices_, input_values_, input_shape_, reduction_axes_; name=nothing, keep_dims=nothing) - local desc - tf.with_op_name(name, "SparseReduceSumSparse") do - desc = tf.NodeDescription("SparseReduceSumSparse") - input_indices_ = convert(Tensor{Int64}, input_indices_) - input_values_ = convert(Tensor{Any}, input_values_) - input_shape_ = convert(Tensor{Int64}, input_shape_) - reduction_axes_ = convert(Tensor{Int32}, reduction_axes_) - (input_values_,) = tf.tf_promote(input_values_) - tf.add_input(desc, input_indices_) - tf.add_input(desc, input_values_) - tf.add_input(desc, input_shape_) - tf.add_input(desc, reduction_axes_) - if keep_dims !== nothing - desc["keep_dims"] = Base.Bool(keep_dims) - end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:3 - push!(out, tf.Tensor(op, out_idx)) + function sparse_reduce_sum_sparse_graph(input_indices_, input_values_, input_shape_, reduction_axes_; name=nothing, keep_dims=nothing) + local desc + tf.with_op_name(name, "SparseReduceSumSparse") do + desc = tf.NodeDescription("SparseReduceSumSparse") + input_indices_ = convert(Tensor{Int64}, input_indices_) + input_values_ = convert(Tensor{Any}, input_values_) + input_shape_ = convert(Tensor{Int64}, input_shape_) + reduction_axes_ = convert(Tensor{Int32}, reduction_axes_) + (input_values_,) = tf.tf_promote(input_values_) + tf.add_input(desc, input_indices_) + tf.add_input(desc, input_values_) + tf.add_input(desc, input_shape_) + tf.add_input(desc, reduction_axes_) + if keep_dims !== nothing + desc["keep_dims"] = Base.Bool(keep_dims) end - out end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end function sparse_reduce_sum_sparse_eager(input_indices_, input_values_, input_shape_, reduction_axes_; name=nothing, keep_dims=nothing) desc = tf.EagerOp("SparseReduceSumSparse") input_indices_ = convert(tf.EagerTensor, input_indices_) @@ -26572,13 +26572,13 @@ begin return res end end - function sparse_reduce_sum_sparse(input_indices_, input_values_, input_shape_, reduction_axes_; name=nothing, keep_dims=nothing) - if tf.in_eager_mode() - sparse_reduce_sum_sparse_eager(input_indices_, input_values_, input_shape_, reduction_axes_; name=name, keep_dims=keep_dims) - else - sparse_reduce_sum_sparse_graph(input_indices_, input_values_, input_shape_, reduction_axes_; name=name, keep_dims=keep_dims) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_reduce_sum_sparse(input_indices_, input_values_, input_shape_, reduction_axes_; name=nothing, keep_dims=nothing) + if tf.in_eager_mode() + sparse_reduce_sum_sparse_eager(input_indices_, input_values_, input_shape_, reduction_axes_; name=name, keep_dims=keep_dims) + else + sparse_reduce_sum_sparse_graph(input_indices_, input_values_, input_shape_, reduction_axes_; name=name, keep_dims=keep_dims) + end end - end end @@ -26588,28 +26588,28 @@ end Allocates a mutable tensor that becomes available to appropriately annotated """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function _scoped_allocator_graph(; name=nothing, shapes=nothing, shape=nothing, sa_name=nothing, id=nothing, expected_call_count=nothing) - local desc - tf.with_op_name(name, "_ScopedAllocator") do - desc = tf.NodeDescription("_ScopedAllocator") - if shapes !== nothing - desc["shapes"] = map(Base.identity, shapes) - end - if shape !== nothing - desc["shape"] = Base.identity(shape) - end - if sa_name !== nothing - desc["sa_name"] = Base.String(sa_name) - end - if id !== nothing - desc["id"] = Base.Int(id) - end - if expected_call_count !== nothing - desc["expected_call_count"] = Base.Int(expected_call_count) - end - end - tf.Tensor(tf.Operation(desc)) + function _scoped_allocator_graph(; name=nothing, shapes=nothing, shape=nothing, sa_name=nothing, id=nothing, expected_call_count=nothing) + local desc + tf.with_op_name(name, "_ScopedAllocator") do + desc = tf.NodeDescription("_ScopedAllocator") + if shapes !== nothing + desc["shapes"] = map(Base.identity, shapes) + end + if shape !== nothing + desc["shape"] = Base.identity(shape) + end + if sa_name !== nothing + desc["sa_name"] = Base.String(sa_name) + end + if id !== nothing + desc["id"] = Base.Int(id) + end + if expected_call_count !== nothing + desc["expected_call_count"] = Base.Int(expected_call_count) + end end + tf.Tensor(tf.Operation(desc)) + end function _scoped_allocator_eager(; name=nothing, shapes=nothing, shape=nothing, sa_name=nothing, id=nothing, expected_call_count=nothing) desc = tf.EagerOp("_ScopedAllocator") if shapes !== nothing @@ -26634,13 +26634,13 @@ begin return res[1] end end - function _scoped_allocator(; name=nothing, shapes=nothing, shape=nothing, sa_name=nothing, id=nothing, expected_call_count=nothing) - if tf.in_eager_mode() - _scoped_allocator_eager(; name=name, shapes=shapes, shape=shape, sa_name=sa_name, id=id, expected_call_count=expected_call_count) - else - _scoped_allocator_graph(; name=name, shapes=shapes, shape=shape, sa_name=sa_name, id=id, expected_call_count=expected_call_count) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _scoped_allocator(; name=nothing, shapes=nothing, shape=nothing, sa_name=nothing, id=nothing, expected_call_count=nothing) + if tf.in_eager_mode() + _scoped_allocator_eager(; name=name, shapes=shapes, shape=shape, sa_name=sa_name, id=id, expected_call_count=expected_call_count) + else + _scoped_allocator_graph(; name=name, shapes=shapes, shape=shape, sa_name=sa_name, id=id, expected_call_count=expected_call_count) + end end - end end @@ -26650,31 +26650,31 @@ end Load embedding parameters for a single table. """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function load_tpu_embedding_adadelta_parameters_graph(parameters_, accumulators_, updates_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - local desc - tf.with_op_name(name, "LoadTPUEmbeddingAdadeltaParameters") do - desc = tf.NodeDescription("LoadTPUEmbeddingAdadeltaParameters") - parameters_ = convert(Tensor{Float32}, parameters_) - accumulators_ = convert(Tensor{Float32}, accumulators_) - updates_ = convert(Tensor{Float32}, updates_) - tf.add_input(desc, parameters_) - tf.add_input(desc, accumulators_) - tf.add_input(desc, updates_) - if table_id !== nothing - desc["table_id"] = Base.Int(table_id) - end - if table_name !== nothing - desc["table_name"] = Base.String(table_name) - end - if num_shards !== nothing - desc["num_shards"] = Base.Int(num_shards) - end - if shard_id !== nothing - desc["shard_id"] = Base.Int(shard_id) - end - end - tf.Tensor(tf.Operation(desc)) + function load_tpu_embedding_adadelta_parameters_graph(parameters_, accumulators_, updates_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + local desc + tf.with_op_name(name, "LoadTPUEmbeddingAdadeltaParameters") do + desc = tf.NodeDescription("LoadTPUEmbeddingAdadeltaParameters") + parameters_ = convert(Tensor{Float32}, parameters_) + accumulators_ = convert(Tensor{Float32}, accumulators_) + updates_ = convert(Tensor{Float32}, updates_) + tf.add_input(desc, parameters_) + tf.add_input(desc, accumulators_) + tf.add_input(desc, updates_) + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end end + tf.Tensor(tf.Operation(desc)) + end function load_tpu_embedding_adadelta_parameters_eager(parameters_, accumulators_, updates_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) desc = tf.EagerOp("LoadTPUEmbeddingAdadeltaParameters") parameters_ = convert(tf.EagerTensor, parameters_) @@ -26702,13 +26702,13 @@ begin return res[1] end end - function load_tpu_embedding_adadelta_parameters(parameters_, accumulators_, updates_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - if tf.in_eager_mode() - load_tpu_embedding_adadelta_parameters_eager(parameters_, accumulators_, updates_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) - else - load_tpu_embedding_adadelta_parameters_graph(parameters_, accumulators_, updates_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function load_tpu_embedding_adadelta_parameters(parameters_, accumulators_, updates_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + if tf.in_eager_mode() + load_tpu_embedding_adadelta_parameters_eager(parameters_, accumulators_, updates_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + else + load_tpu_embedding_adadelta_parameters_graph(parameters_, accumulators_, updates_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + end end - end end @@ -26718,34 +26718,34 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function sparse_add_graph(a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_, thresh_; name=nothing) - local desc - tf.with_op_name(name, "SparseAdd") do - desc = tf.NodeDescription("SparseAdd") - a_indices_ = convert(Tensor{Int64}, a_indices_) - a_values_ = convert(Tensor{Any}, a_values_) - a_shape_ = convert(Tensor{Int64}, a_shape_) - b_indices_ = convert(Tensor{Int64}, b_indices_) - b_values_ = convert(Tensor{Any}, b_values_) - b_shape_ = convert(Tensor{Int64}, b_shape_) - thresh_ = convert(Tensor{Any}, thresh_) - (thresh_,) = tf.tf_promote(thresh_) - (a_values_, b_values_) = tf.tf_promote(a_values_, b_values_) - tf.add_input(desc, a_indices_) - tf.add_input(desc, a_values_) - tf.add_input(desc, a_shape_) - tf.add_input(desc, b_indices_) - tf.add_input(desc, b_values_) - tf.add_input(desc, b_shape_) - tf.add_input(desc, thresh_) - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:3 - push!(out, tf.Tensor(op, out_idx)) - end - out - end + function sparse_add_graph(a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_, thresh_; name=nothing) + local desc + tf.with_op_name(name, "SparseAdd") do + desc = tf.NodeDescription("SparseAdd") + a_indices_ = convert(Tensor{Int64}, a_indices_) + a_values_ = convert(Tensor{Any}, a_values_) + a_shape_ = convert(Tensor{Int64}, a_shape_) + b_indices_ = convert(Tensor{Int64}, b_indices_) + b_values_ = convert(Tensor{Any}, b_values_) + b_shape_ = convert(Tensor{Int64}, b_shape_) + thresh_ = convert(Tensor{Any}, thresh_) + (thresh_,) = tf.tf_promote(thresh_) + (a_values_, b_values_) = tf.tf_promote(a_values_, b_values_) + tf.add_input(desc, a_indices_) + tf.add_input(desc, a_values_) + tf.add_input(desc, a_shape_) + tf.add_input(desc, b_indices_) + tf.add_input(desc, b_values_) + tf.add_input(desc, b_shape_) + tf.add_input(desc, thresh_) + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end function sparse_add_eager(a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_, thresh_; name=nothing) desc = tf.EagerOp("SparseAdd") a_indices_ = convert(tf.EagerTensor, a_indices_) @@ -26772,13 +26772,13 @@ begin return res end end - function sparse_add(a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_, thresh_; name=nothing) - if tf.in_eager_mode() - sparse_add_eager(a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_, thresh_; name=name) - else - sparse_add_graph(a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_, thresh_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_add(a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_, thresh_; name=nothing) + if tf.in_eager_mode() + sparse_add_eager(a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_, thresh_; name=name) + else + sparse_add_graph(a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_, thresh_; name=name) + end end - end end @@ -26788,25 +26788,25 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function ctc_greedy_decoder_graph(inputs_, sequence_length_; name=nothing, merge_repeated=nothing) - local desc - tf.with_op_name(name, "CTCGreedyDecoder") do - desc = tf.NodeDescription("CTCGreedyDecoder") - inputs_ = convert(Tensor{Float32}, inputs_) - sequence_length_ = convert(Tensor{Int32}, sequence_length_) - tf.add_input(desc, inputs_) - tf.add_input(desc, sequence_length_) - if merge_repeated !== nothing - desc["merge_repeated"] = Base.Bool(merge_repeated) - end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:4 - push!(out, tf.Tensor(op, out_idx)) + function ctc_greedy_decoder_graph(inputs_, sequence_length_; name=nothing, merge_repeated=nothing) + local desc + tf.with_op_name(name, "CTCGreedyDecoder") do + desc = tf.NodeDescription("CTCGreedyDecoder") + inputs_ = convert(Tensor{Float32}, inputs_) + sequence_length_ = convert(Tensor{Int32}, sequence_length_) + tf.add_input(desc, inputs_) + tf.add_input(desc, sequence_length_) + if merge_repeated !== nothing + desc["merge_repeated"] = Base.Bool(merge_repeated) end - out end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:4 + push!(out, tf.Tensor(op, out_idx)) + end + out + end function ctc_greedy_decoder_eager(inputs_, sequence_length_; name=nothing, merge_repeated=nothing) desc = tf.EagerOp("CTCGreedyDecoder") inputs_ = convert(tf.EagerTensor, inputs_) @@ -26823,13 +26823,13 @@ begin return res end end - function ctc_greedy_decoder(inputs_, sequence_length_; name=nothing, merge_repeated=nothing) - if tf.in_eager_mode() - ctc_greedy_decoder_eager(inputs_, sequence_length_; name=name, merge_repeated=merge_repeated) - else - ctc_greedy_decoder_graph(inputs_, sequence_length_; name=name, merge_repeated=merge_repeated) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function ctc_greedy_decoder(inputs_, sequence_length_; name=nothing, merge_repeated=nothing) + if tf.in_eager_mode() + ctc_greedy_decoder_eager(inputs_, sequence_length_; name=name, merge_repeated=merge_repeated) + else + ctc_greedy_decoder_graph(inputs_, sequence_length_; name=name, merge_repeated=merge_repeated) + end end - end end @@ -26839,22 +26839,22 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function immutable_const_graph(; name=nothing, dtype=nothing, shape=nothing, memory_region_name=nothing) - local desc - tf.with_op_name(name, "ImmutableConst") do - desc = tf.NodeDescription("ImmutableConst") - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end - if shape !== nothing - desc["shape"] = Base.identity(shape) - end - if memory_region_name !== nothing - desc["memory_region_name"] = Base.String(memory_region_name) - end + function immutable_const_graph(; name=nothing, dtype=nothing, shape=nothing, memory_region_name=nothing) + local desc + tf.with_op_name(name, "ImmutableConst") do + desc = tf.NodeDescription("ImmutableConst") + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + if shape !== nothing + desc["shape"] = Base.identity(shape) + end + if memory_region_name !== nothing + desc["memory_region_name"] = Base.String(memory_region_name) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function immutable_const_eager(; name=nothing, dtype=nothing, shape=nothing, memory_region_name=nothing) desc = tf.EagerOp("ImmutableConst") if dtype !== nothing @@ -26873,13 +26873,13 @@ begin return res[1] end end - function immutable_const(; name=nothing, dtype=nothing, shape=nothing, memory_region_name=nothing) - if tf.in_eager_mode() - immutable_const_eager(; name=name, dtype=dtype, shape=shape, memory_region_name=memory_region_name) - else - immutable_const_graph(; name=name, dtype=dtype, shape=shape, memory_region_name=memory_region_name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function immutable_const(; name=nothing, dtype=nothing, shape=nothing, memory_region_name=nothing) + if tf.in_eager_mode() + immutable_const_eager(; name=name, dtype=dtype, shape=shape, memory_region_name=memory_region_name) + else + immutable_const_graph(; name=name, dtype=dtype, shape=shape, memory_region_name=memory_region_name) + end end - end end @@ -26889,15 +26889,15 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function consume_mutex_lock_graph(mutex_lock_; name=nothing) - local desc - tf.with_op_name(name, "ConsumeMutexLock") do - desc = tf.NodeDescription("ConsumeMutexLock") - mutex_lock_ = convert(Tensor{Any}, mutex_lock_) - tf.add_input(desc, mutex_lock_) - end - tf.Tensor(tf.Operation(desc)) + function consume_mutex_lock_graph(mutex_lock_; name=nothing) + local desc + tf.with_op_name(name, "ConsumeMutexLock") do + desc = tf.NodeDescription("ConsumeMutexLock") + mutex_lock_ = convert(Tensor{Any}, mutex_lock_) + tf.add_input(desc, mutex_lock_) end + tf.Tensor(tf.Operation(desc)) + end function consume_mutex_lock_eager(mutex_lock_; name=nothing) desc = tf.EagerOp("ConsumeMutexLock") mutex_lock_ = convert(tf.EagerTensor, mutex_lock_) @@ -26909,13 +26909,13 @@ begin return res[1] end end - function consume_mutex_lock(mutex_lock_; name=nothing) - if tf.in_eager_mode() - consume_mutex_lock_eager(mutex_lock_; name=name) - else - consume_mutex_lock_graph(mutex_lock_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function consume_mutex_lock(mutex_lock_; name=nothing) + if tf.in_eager_mode() + consume_mutex_lock_eager(mutex_lock_; name=name) + else + consume_mutex_lock_graph(mutex_lock_; name=name) + end end - end end @@ -26925,18 +26925,18 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function greater_equal_graph(x_, y_; name=nothing) - local desc - tf.with_op_name(name, "GreaterEqual") do - desc = tf.NodeDescription("GreaterEqual") - x_ = convert(Tensor{Any}, x_) - y_ = convert(Tensor{Any}, y_) - (x_, y_) = tf.tf_promote(x_, y_) - tf.add_input(desc, x_) - tf.add_input(desc, y_) - end - tf.Tensor(tf.Operation(desc)) + function greater_equal_graph(x_, y_; name=nothing) + local desc + tf.with_op_name(name, "GreaterEqual") do + desc = tf.NodeDescription("GreaterEqual") + x_ = convert(Tensor{Any}, x_) + y_ = convert(Tensor{Any}, y_) + (x_, y_) = tf.tf_promote(x_, y_) + tf.add_input(desc, x_) + tf.add_input(desc, y_) end + tf.Tensor(tf.Operation(desc)) + end function greater_equal_eager(x_, y_; name=nothing) desc = tf.EagerOp("GreaterEqual") x_ = convert(tf.EagerTensor, x_) @@ -26952,45 +26952,45 @@ begin return res[1] end end - function greater_equal(x_, y_; name=nothing) - if tf.in_eager_mode() - greater_equal_eager(x_, y_; name=name) - else - greater_equal_graph(x_, y_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function greater_equal(x_, y_; name=nothing) + if tf.in_eager_mode() + greater_equal_eager(x_, y_; name=name) + else + greater_equal_graph(x_, y_; name=name) + end end - end end """ - initialize_table_from_text_file_v2(table_handle, filename; vocab_size=-1, delimiter= ) + initialize_table_from_text_file_v2(table_handle, filename; vocab_size=-1, delimiter=) """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function initialize_table_from_text_file_v2_graph(table_handle_, filename_; name=nothing, key_index=nothing, value_index=nothing, vocab_size=nothing, delimiter=nothing) - local desc - tf.with_op_name(name, "InitializeTableFromTextFileV2") do - desc = tf.NodeDescription("InitializeTableFromTextFileV2") - table_handle_ = convert(Tensor{Any}, table_handle_) - filename_ = convert(Tensor{String}, filename_) - tf.add_input(desc, table_handle_) - tf.add_input(desc, filename_) - if key_index !== nothing - desc["key_index"] = Base.Int(key_index) - end - if value_index !== nothing - desc["value_index"] = Base.Int(value_index) - end - if vocab_size !== nothing - desc["vocab_size"] = Base.Int(vocab_size) - end - if delimiter !== nothing - desc["delimiter"] = Base.String(delimiter) - end + function initialize_table_from_text_file_v2_graph(table_handle_, filename_; name=nothing, key_index=nothing, value_index=nothing, vocab_size=nothing, delimiter=nothing) + local desc + tf.with_op_name(name, "InitializeTableFromTextFileV2") do + desc = tf.NodeDescription("InitializeTableFromTextFileV2") + table_handle_ = convert(Tensor{Any}, table_handle_) + filename_ = convert(Tensor{String}, filename_) + tf.add_input(desc, table_handle_) + tf.add_input(desc, filename_) + if key_index !== nothing + desc["key_index"] = Base.Int(key_index) + end + if value_index !== nothing + desc["value_index"] = Base.Int(value_index) + end + if vocab_size !== nothing + desc["vocab_size"] = Base.Int(vocab_size) + end + if delimiter !== nothing + desc["delimiter"] = Base.String(delimiter) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function initialize_table_from_text_file_v2_eager(table_handle_, filename_; name=nothing, key_index=nothing, value_index=nothing, vocab_size=nothing, delimiter=nothing) desc = tf.EagerOp("InitializeTableFromTextFileV2") table_handle_ = convert(tf.EagerTensor, table_handle_) @@ -27016,13 +27016,13 @@ begin return res[1] end end - function initialize_table_from_text_file_v2(table_handle_, filename_; name=nothing, key_index=nothing, value_index=nothing, vocab_size=nothing, delimiter=nothing) - if tf.in_eager_mode() - initialize_table_from_text_file_v2_eager(table_handle_, filename_; name=name, key_index=key_index, value_index=value_index, vocab_size=vocab_size, delimiter=delimiter) - else - initialize_table_from_text_file_v2_graph(table_handle_, filename_; name=name, key_index=key_index, value_index=value_index, vocab_size=vocab_size, delimiter=delimiter) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function initialize_table_from_text_file_v2(table_handle_, filename_; name=nothing, key_index=nothing, value_index=nothing, vocab_size=nothing, delimiter=nothing) + if tf.in_eager_mode() + initialize_table_from_text_file_v2_eager(table_handle_, filename_; name=name, key_index=key_index, value_index=value_index, vocab_size=vocab_size, delimiter=delimiter) + else + initialize_table_from_text_file_v2_graph(table_handle_, filename_; name=name, key_index=key_index, value_index=value_index, vocab_size=vocab_size, delimiter=delimiter) + end end - end end @@ -27032,21 +27032,21 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function queue_dequeue_graph(handle_; name=nothing, component_types=nothing, timeout_ms=nothing) - local desc - tf.with_op_name(name, "QueueDequeue") do - desc = tf.NodeDescription("QueueDequeue") - handle_ = convert(Tensor{String}, handle_) - tf.add_input(desc, handle_) - if component_types !== nothing - desc["component_types"] = map(Base.identity, component_types) - end - if timeout_ms !== nothing - desc["timeout_ms"] = Base.Int(timeout_ms) - end + function queue_dequeue_graph(handle_; name=nothing, component_types=nothing, timeout_ms=nothing) + local desc + tf.with_op_name(name, "QueueDequeue") do + desc = tf.NodeDescription("QueueDequeue") + handle_ = convert(Tensor{String}, handle_) + tf.add_input(desc, handle_) + if component_types !== nothing + desc["component_types"] = map(Base.identity, component_types) + end + if timeout_ms !== nothing + desc["timeout_ms"] = Base.Int(timeout_ms) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function queue_dequeue_eager(handle_; name=nothing, component_types=nothing, timeout_ms=nothing) desc = tf.EagerOp("QueueDequeue") handle_ = convert(tf.EagerTensor, handle_) @@ -27064,13 +27064,13 @@ begin return res[1] end end - function queue_dequeue(handle_; name=nothing, component_types=nothing, timeout_ms=nothing) - if tf.in_eager_mode() - queue_dequeue_eager(handle_; name=name, component_types=component_types, timeout_ms=timeout_ms) - else - queue_dequeue_graph(handle_; name=name, component_types=component_types, timeout_ms=timeout_ms) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function queue_dequeue(handle_; name=nothing, component_types=nothing, timeout_ms=nothing) + if tf.in_eager_mode() + queue_dequeue_eager(handle_; name=name, component_types=component_types, timeout_ms=timeout_ms) + else + queue_dequeue_graph(handle_; name=name, component_types=component_types, timeout_ms=timeout_ms) + end end - end end @@ -27080,18 +27080,18 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function equal_graph(x_, y_; name=nothing) - local desc - tf.with_op_name(name, "Equal") do - desc = tf.NodeDescription("Equal") - x_ = convert(Tensor{Any}, x_) - y_ = convert(Tensor{Any}, y_) - (x_, y_) = tf.tf_promote(x_, y_) - tf.add_input(desc, x_) - tf.add_input(desc, y_) - end - tf.Tensor(tf.Operation(desc)) + function equal_graph(x_, y_; name=nothing) + local desc + tf.with_op_name(name, "Equal") do + desc = tf.NodeDescription("Equal") + x_ = convert(Tensor{Any}, x_) + y_ = convert(Tensor{Any}, y_) + (x_, y_) = tf.tf_promote(x_, y_) + tf.add_input(desc, x_) + tf.add_input(desc, y_) end + tf.Tensor(tf.Operation(desc)) + end function equal_eager(x_, y_; name=nothing) desc = tf.EagerOp("Equal") x_ = convert(tf.EagerTensor, x_) @@ -27107,13 +27107,13 @@ begin return res[1] end end - function equal(x_, y_; name=nothing) - if tf.in_eager_mode() - equal_eager(x_, y_; name=name) - else - equal_graph(x_, y_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function equal(x_, y_; name=nothing) + if tf.in_eager_mode() + equal_eager(x_, y_; name=name) + else + equal_graph(x_, y_; name=name) + end end - end end @@ -27123,21 +27123,21 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function iterator_from_string_handle_graph(string_handle_; name=nothing, output_types=nothing, output_shapes=nothing) - local desc - tf.with_op_name(name, "IteratorFromStringHandle") do - desc = tf.NodeDescription("IteratorFromStringHandle") - string_handle_ = convert(Tensor{String}, string_handle_) - tf.add_input(desc, string_handle_) - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end + function iterator_from_string_handle_graph(string_handle_; name=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "IteratorFromStringHandle") do + desc = tf.NodeDescription("IteratorFromStringHandle") + string_handle_ = convert(Tensor{String}, string_handle_) + tf.add_input(desc, string_handle_) + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function iterator_from_string_handle_eager(string_handle_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("IteratorFromStringHandle") string_handle_ = convert(tf.EagerTensor, string_handle_) @@ -27155,13 +27155,13 @@ begin return res[1] end end - function iterator_from_string_handle(string_handle_; name=nothing, output_types=nothing, output_shapes=nothing) - if tf.in_eager_mode() - iterator_from_string_handle_eager(string_handle_; name=name, output_types=output_types, output_shapes=output_shapes) - else - iterator_from_string_handle_graph(string_handle_; name=name, output_types=output_types, output_shapes=output_shapes) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function iterator_from_string_handle(string_handle_; name=nothing, output_types=nothing, output_shapes=nothing) + if tf.in_eager_mode() + iterator_from_string_handle_eager(string_handle_; name=name, output_types=output_types, output_shapes=output_shapes) + else + iterator_from_string_handle_graph(string_handle_; name=name, output_types=output_types, output_shapes=output_shapes) + end end - end end @@ -27171,27 +27171,27 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tensor_list_split_graph(tensor_, element_shape_, lengths_; name=nothing, element_dtype=nothing, shape_type=nothing) - local desc - tf.with_op_name(name, "TensorListSplit") do - desc = tf.NodeDescription("TensorListSplit") - tensor_ = convert(Tensor{Any}, tensor_) - element_shape_ = convert(Tensor{Any}, element_shape_) - lengths_ = convert(Tensor{Int64}, lengths_) - (tensor_,) = tf.tf_promote(tensor_) - (element_shape_,) = tf.tf_promote(element_shape_) - tf.add_input(desc, tensor_) - tf.add_input(desc, element_shape_) - tf.add_input(desc, lengths_) - if element_dtype !== nothing - desc["element_dtype"] = Base.identity(element_dtype) - end - if shape_type !== nothing - desc["shape_type"] = Base.identity(shape_type) - end + function tensor_list_split_graph(tensor_, element_shape_, lengths_; name=nothing, element_dtype=nothing, shape_type=nothing) + local desc + tf.with_op_name(name, "TensorListSplit") do + desc = tf.NodeDescription("TensorListSplit") + tensor_ = convert(Tensor{Any}, tensor_) + element_shape_ = convert(Tensor{Any}, element_shape_) + lengths_ = convert(Tensor{Int64}, lengths_) + (tensor_,) = tf.tf_promote(tensor_) + (element_shape_,) = tf.tf_promote(element_shape_) + tf.add_input(desc, tensor_) + tf.add_input(desc, element_shape_) + tf.add_input(desc, lengths_) + if element_dtype !== nothing + desc["element_dtype"] = Base.identity(element_dtype) + end + if shape_type !== nothing + desc["shape_type"] = Base.identity(shape_type) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function tensor_list_split_eager(tensor_, element_shape_, lengths_; name=nothing, element_dtype=nothing, shape_type=nothing) desc = tf.EagerOp("TensorListSplit") tensor_ = convert(tf.EagerTensor, tensor_) @@ -27215,13 +27215,13 @@ begin return res[1] end end - function tensor_list_split(tensor_, element_shape_, lengths_; name=nothing, element_dtype=nothing, shape_type=nothing) - if tf.in_eager_mode() - tensor_list_split_eager(tensor_, element_shape_, lengths_; name=name, element_dtype=element_dtype, shape_type=shape_type) - else - tensor_list_split_graph(tensor_, element_shape_, lengths_; name=name, element_dtype=element_dtype, shape_type=shape_type) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_list_split(tensor_, element_shape_, lengths_; name=nothing, element_dtype=nothing, shape_type=nothing) + if tf.in_eager_mode() + tensor_list_split_eager(tensor_, element_shape_, lengths_; name=name, element_dtype=element_dtype, shape_type=shape_type) + else + tensor_list_split_graph(tensor_, element_shape_, lengths_; name=name, element_dtype=element_dtype, shape_type=shape_type) + end end - end end @@ -27231,39 +27231,39 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function fractional_max_pool_graph(value_; name=nothing, pooling_ratio=nothing, pseudo_random=nothing, overlapping=nothing, deterministic=nothing, seed=nothing, seed2=nothing) - local desc - tf.with_op_name(name, "FractionalMaxPool") do - desc = tf.NodeDescription("FractionalMaxPool") - value_ = convert(Tensor{Any}, value_) - (value_,) = tf.tf_promote(value_) - tf.add_input(desc, value_) - if pooling_ratio !== nothing - desc["pooling_ratio"] = map(Base.identity, pooling_ratio) - end - if pseudo_random !== nothing - desc["pseudo_random"] = Base.Bool(pseudo_random) - end - if overlapping !== nothing - desc["overlapping"] = Base.Bool(overlapping) - end - if deterministic !== nothing - desc["deterministic"] = Base.Bool(deterministic) - end - if seed !== nothing - desc["seed"] = Base.Int(seed) - end - if seed2 !== nothing - desc["seed2"] = Base.Int(seed2) - end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:3 - push!(out, tf.Tensor(op, out_idx)) - end - out + function fractional_max_pool_graph(value_; name=nothing, pooling_ratio=nothing, pseudo_random=nothing, overlapping=nothing, deterministic=nothing, seed=nothing, seed2=nothing) + local desc + tf.with_op_name(name, "FractionalMaxPool") do + desc = tf.NodeDescription("FractionalMaxPool") + value_ = convert(Tensor{Any}, value_) + (value_,) = tf.tf_promote(value_) + tf.add_input(desc, value_) + if pooling_ratio !== nothing + desc["pooling_ratio"] = map(Base.identity, pooling_ratio) + end + if pseudo_random !== nothing + desc["pseudo_random"] = Base.Bool(pseudo_random) + end + if overlapping !== nothing + desc["overlapping"] = Base.Bool(overlapping) + end + if deterministic !== nothing + desc["deterministic"] = Base.Bool(deterministic) + end + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end function fractional_max_pool_eager(value_; name=nothing, pooling_ratio=nothing, pseudo_random=nothing, overlapping=nothing, deterministic=nothing, seed=nothing, seed2=nothing) desc = tf.EagerOp("FractionalMaxPool") value_ = convert(tf.EagerTensor, value_) @@ -27294,13 +27294,13 @@ begin return res end end - function fractional_max_pool(value_; name=nothing, pooling_ratio=nothing, pseudo_random=nothing, overlapping=nothing, deterministic=nothing, seed=nothing, seed2=nothing) - if tf.in_eager_mode() - fractional_max_pool_eager(value_; name=name, pooling_ratio=pooling_ratio, pseudo_random=pseudo_random, overlapping=overlapping, deterministic=deterministic, seed=seed, seed2=seed2) - else - fractional_max_pool_graph(value_; name=name, pooling_ratio=pooling_ratio, pseudo_random=pseudo_random, overlapping=overlapping, deterministic=deterministic, seed=seed, seed2=seed2) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function fractional_max_pool(value_; name=nothing, pooling_ratio=nothing, pseudo_random=nothing, overlapping=nothing, deterministic=nothing, seed=nothing, seed2=nothing) + if tf.in_eager_mode() + fractional_max_pool_eager(value_; name=name, pooling_ratio=pooling_ratio, pseudo_random=pseudo_random, overlapping=overlapping, deterministic=deterministic, seed=seed, seed2=seed2) + else + fractional_max_pool_graph(value_; name=name, pooling_ratio=pooling_ratio, pseudo_random=pseudo_random, overlapping=overlapping, deterministic=deterministic, seed=seed, seed2=seed2) + end end - end end @@ -27310,22 +27310,22 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function scatter_nd_graph(indices_, updates_, shape_; name=nothing) - local desc - tf.with_op_name(name, "ScatterNd") do - desc = tf.NodeDescription("ScatterNd") - indices_ = convert(Tensor{Any}, indices_) - indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) - updates_ = convert(Tensor{Any}, updates_) - shape_ = convert(Tensor{Any}, shape_) - (updates_,) = tf.tf_promote(updates_) - (indices_, shape_) = tf.tf_promote(indices_, shape_) - tf.add_input(desc, indices_) - tf.add_input(desc, updates_) - tf.add_input(desc, shape_) - end - tf.Tensor(tf.Operation(desc)) + function scatter_nd_graph(indices_, updates_, shape_; name=nothing) + local desc + tf.with_op_name(name, "ScatterNd") do + desc = tf.NodeDescription("ScatterNd") + indices_ = convert(Tensor{Any}, indices_) + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + updates_ = convert(Tensor{Any}, updates_) + shape_ = convert(Tensor{Any}, shape_) + (updates_,) = tf.tf_promote(updates_) + (indices_, shape_) = tf.tf_promote(indices_, shape_) + tf.add_input(desc, indices_) + tf.add_input(desc, updates_) + tf.add_input(desc, shape_) end + tf.Tensor(tf.Operation(desc)) + end function scatter_nd_eager(indices_, updates_, shape_; name=nothing) desc = tf.EagerOp("ScatterNd") indices_ = convert(tf.EagerTensor, indices_) @@ -27344,13 +27344,13 @@ begin return res[1] end end - function scatter_nd(indices_, updates_, shape_; name=nothing) - if tf.in_eager_mode() - scatter_nd_eager(indices_, updates_, shape_; name=name) - else - scatter_nd_graph(indices_, updates_, shape_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function scatter_nd(indices_, updates_, shape_; name=nothing) + if tf.in_eager_mode() + scatter_nd_eager(indices_, updates_, shape_; name=name) + else + scatter_nd_graph(indices_, updates_, shape_; name=name) + end end - end end @@ -27360,20 +27360,20 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function select_graph(condition_, t_, e_; name=nothing) - local desc - tf.with_op_name(name, "Select") do - desc = tf.NodeDescription("Select") - condition_ = convert(Tensor{Bool}, condition_) - t_ = convert(Tensor{Any}, t_) - e_ = convert(Tensor{Any}, e_) - (t_, e_) = tf.tf_promote(t_, e_) - tf.add_input(desc, condition_) - tf.add_input(desc, t_) - tf.add_input(desc, e_) - end - tf.Tensor(tf.Operation(desc)) + function select_graph(condition_, t_, e_; name=nothing) + local desc + tf.with_op_name(name, "Select") do + desc = tf.NodeDescription("Select") + condition_ = convert(Tensor{Bool}, condition_) + t_ = convert(Tensor{Any}, t_) + e_ = convert(Tensor{Any}, e_) + (t_, e_) = tf.tf_promote(t_, e_) + tf.add_input(desc, condition_) + tf.add_input(desc, t_) + tf.add_input(desc, e_) end + tf.Tensor(tf.Operation(desc)) + end function select_eager(condition_, t_, e_; name=nothing) desc = tf.EagerOp("Select") condition_ = convert(tf.EagerTensor, condition_) @@ -27391,13 +27391,13 @@ begin return res[1] end end - function select(condition_, t_, e_; name=nothing) - if tf.in_eager_mode() - select_eager(condition_, t_, e_; name=name) - else - select_graph(condition_, t_, e_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function select(condition_, t_, e_; name=nothing) + if tf.in_eager_mode() + select_eager(condition_, t_, e_; name=name) + else + select_graph(condition_, t_, e_; name=name) + end end - end end @@ -27407,23 +27407,23 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function min_graph(input_, reduction_indices_; name=nothing, keep_dims=nothing) - local desc - tf.with_op_name(name, "Min") do - desc = tf.NodeDescription("Min") - input_ = convert(Tensor{Any}, input_) - reduction_indices_ = convert(Tensor{Int32}, reduction_indices_) - reduction_indices_ = reduction_indices_ - convert(tf.Tensor{eltype(reduction_indices_)}, 1) - (input_,) = tf.tf_promote(input_) - (reduction_indices_,) = tf.tf_promote(reduction_indices_) - tf.add_input(desc, input_) - tf.add_input(desc, reduction_indices_) - if keep_dims !== nothing - desc["keep_dims"] = Base.Bool(keep_dims) - end + function min_graph(input_, reduction_indices_; name=nothing, keep_dims=nothing) + local desc + tf.with_op_name(name, "Min") do + desc = tf.NodeDescription("Min") + input_ = convert(Tensor{Any}, input_) + reduction_indices_ = convert(Tensor{Int32}, reduction_indices_) + reduction_indices_ = reduction_indices_ - convert(tf.Tensor{eltype(reduction_indices_)}, 1) + (input_,) = tf.tf_promote(input_) + (reduction_indices_,) = tf.tf_promote(reduction_indices_) + tf.add_input(desc, input_) + tf.add_input(desc, reduction_indices_) + if keep_dims !== nothing + desc["keep_dims"] = Base.Bool(keep_dims) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function min_eager(input_, reduction_indices_; name=nothing, keep_dims=nothing) desc = tf.EagerOp("Min") input_ = convert(tf.EagerTensor, input_) @@ -27442,13 +27442,13 @@ begin return res[1] end end - function min(input_, reduction_indices_; name=nothing, keep_dims=nothing) - if tf.in_eager_mode() - min_eager(input_, reduction_indices_; name=name, keep_dims=keep_dims) - else - min_graph(input_, reduction_indices_; name=name, keep_dims=keep_dims) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function min(input_, reduction_indices_; name=nothing, keep_dims=nothing) + if tf.in_eager_mode() + min_eager(input_, reduction_indices_; name=name, keep_dims=keep_dims) + else + min_graph(input_, reduction_indices_; name=name, keep_dims=keep_dims) + end end - end end @@ -27458,32 +27458,32 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function lrn_grad_graph(input_grads_, input_image_, output_image_; name=nothing, depth_radius=nothing, bias=nothing, alpha=nothing, beta=nothing) - local desc - tf.with_op_name(name, "LRNGrad") do - desc = tf.NodeDescription("LRNGrad") - input_grads_ = convert(Tensor{Float32}, input_grads_) - input_image_ = convert(Tensor{Float32}, input_image_) - output_image_ = convert(Tensor{Float32}, output_image_) - (input_grads_, input_image_, output_image_) = tf.tf_promote(input_grads_, input_image_, output_image_) - tf.add_input(desc, input_grads_) - tf.add_input(desc, input_image_) - tf.add_input(desc, output_image_) - if depth_radius !== nothing - desc["depth_radius"] = Base.Int(depth_radius) - end - if bias !== nothing - desc["bias"] = Base.identity(bias) - end - if alpha !== nothing - desc["alpha"] = Base.identity(alpha) - end - if beta !== nothing - desc["beta"] = Base.identity(beta) - end - end - tf.Tensor(tf.Operation(desc)) + function lrn_grad_graph(input_grads_, input_image_, output_image_; name=nothing, depth_radius=nothing, bias=nothing, alpha=nothing, beta=nothing) + local desc + tf.with_op_name(name, "LRNGrad") do + desc = tf.NodeDescription("LRNGrad") + input_grads_ = convert(Tensor{Float32}, input_grads_) + input_image_ = convert(Tensor{Float32}, input_image_) + output_image_ = convert(Tensor{Float32}, output_image_) + (input_grads_, input_image_, output_image_) = tf.tf_promote(input_grads_, input_image_, output_image_) + tf.add_input(desc, input_grads_) + tf.add_input(desc, input_image_) + tf.add_input(desc, output_image_) + if depth_radius !== nothing + desc["depth_radius"] = Base.Int(depth_radius) + end + if bias !== nothing + desc["bias"] = Base.identity(bias) + end + if alpha !== nothing + desc["alpha"] = Base.identity(alpha) + end + if beta !== nothing + desc["beta"] = Base.identity(beta) + end end + tf.Tensor(tf.Operation(desc)) + end function lrn_grad_eager(input_grads_, input_image_, output_image_; name=nothing, depth_radius=nothing, bias=nothing, alpha=nothing, beta=nothing) desc = tf.EagerOp("LRNGrad") input_grads_ = convert(tf.EagerTensor, input_grads_) @@ -27514,13 +27514,13 @@ begin return res[1] end end - function lrn_grad(input_grads_, input_image_, output_image_; name=nothing, depth_radius=nothing, bias=nothing, alpha=nothing, beta=nothing) - if tf.in_eager_mode() - lrn_grad_eager(input_grads_, input_image_, output_image_; name=name, depth_radius=depth_radius, bias=bias, alpha=alpha, beta=beta) - else - lrn_grad_graph(input_grads_, input_image_, output_image_; name=name, depth_radius=depth_radius, bias=bias, alpha=alpha, beta=beta) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function lrn_grad(input_grads_, input_image_, output_image_; name=nothing, depth_radius=nothing, bias=nothing, alpha=nothing, beta=nothing) + if tf.in_eager_mode() + lrn_grad_eager(input_grads_, input_image_, output_image_; name=name, depth_radius=depth_radius, bias=bias, alpha=alpha, beta=beta) + else + lrn_grad_graph(input_grads_, input_image_, output_image_; name=name, depth_radius=depth_radius, bias=bias, alpha=alpha, beta=beta) + end end - end end @@ -27530,34 +27530,34 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function random_poisson_v2_graph(shape_, rate_; name=nothing, seed=nothing, seed2=nothing, S=nothing, R=nothing, dtype=nothing) - local desc - tf.with_op_name(name, "RandomPoissonV2") do - desc = tf.NodeDescription("RandomPoissonV2") - shape_ = convert(Tensor{Any}, shape_) - rate_ = convert(Tensor{Float64}, rate_) - (shape_,) = tf.tf_promote(shape_) - (rate_,) = tf.tf_promote(rate_) - tf.add_input(desc, shape_) - tf.add_input(desc, rate_) - if seed !== nothing - desc["seed"] = Base.Int(seed) - end - if seed2 !== nothing - desc["seed2"] = Base.Int(seed2) - end - if S !== nothing - desc["S"] = Base.identity(S) - end - if R !== nothing - desc["R"] = Base.identity(R) - end - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end - end - tf.Tensor(tf.Operation(desc)) + function random_poisson_v2_graph(shape_, rate_; name=nothing, seed=nothing, seed2=nothing, S=nothing, R=nothing, dtype=nothing) + local desc + tf.with_op_name(name, "RandomPoissonV2") do + desc = tf.NodeDescription("RandomPoissonV2") + shape_ = convert(Tensor{Any}, shape_) + rate_ = convert(Tensor{Float64}, rate_) + (shape_,) = tf.tf_promote(shape_) + (rate_,) = tf.tf_promote(rate_) + tf.add_input(desc, shape_) + tf.add_input(desc, rate_) + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end + if S !== nothing + desc["S"] = Base.identity(S) + end + if R !== nothing + desc["R"] = Base.identity(R) + end + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end end + tf.Tensor(tf.Operation(desc)) + end function random_poisson_v2_eager(shape_, rate_; name=nothing, seed=nothing, seed2=nothing, S=nothing, R=nothing, dtype=nothing) desc = tf.EagerOp("RandomPoissonV2") shape_ = convert(tf.EagerTensor, shape_) @@ -27588,13 +27588,13 @@ begin return res[1] end end - function random_poisson_v2(shape_, rate_; name=nothing, seed=nothing, seed2=nothing, S=nothing, R=nothing, dtype=nothing) - if tf.in_eager_mode() - random_poisson_v2_eager(shape_, rate_; name=name, seed=seed, seed2=seed2, S=S, R=R, dtype=dtype) - else - random_poisson_v2_graph(shape_, rate_; name=name, seed=seed, seed2=seed2, S=S, R=R, dtype=dtype) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function random_poisson_v2(shape_, rate_; name=nothing, seed=nothing, seed2=nothing, S=nothing, R=nothing, dtype=nothing) + if tf.in_eager_mode() + random_poisson_v2_eager(shape_, rate_; name=name, seed=seed, seed2=seed2, S=S, R=R, dtype=dtype) + else + random_poisson_v2_graph(shape_, rate_; name=name, seed=seed, seed2=seed2, S=S, R=R, dtype=dtype) + end end - end end @@ -27604,28 +27604,28 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function fifo_queue_graph(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) - local desc - tf.with_op_name(name, "FIFOQueue") do - desc = tf.NodeDescription("FIFOQueue") - if component_types !== nothing - desc["component_types"] = map(Base.identity, component_types) - end - if shapes !== nothing - desc["shapes"] = map(Base.identity, shapes) - end - if capacity !== nothing - desc["capacity"] = Base.Int(capacity) - end - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end + function fifo_queue_graph(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "FIFOQueue") do + desc = tf.NodeDescription("FIFOQueue") + if component_types !== nothing + desc["component_types"] = map(Base.identity, component_types) + end + if shapes !== nothing + desc["shapes"] = map(Base.identity, shapes) + end + if capacity !== nothing + desc["capacity"] = Base.Int(capacity) + end + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function fifo_queue_eager(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) desc = tf.EagerOp("FIFOQueue") if component_types !== nothing @@ -27650,13 +27650,13 @@ begin return res[1] end end - function fifo_queue(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) - if tf.in_eager_mode() - fifo_queue_eager(; name=name, component_types=component_types, shapes=shapes, capacity=capacity, container=container, shared_name=shared_name) - else - fifo_queue_graph(; name=name, component_types=component_types, shapes=shapes, capacity=capacity, container=container, shared_name=shared_name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function fifo_queue(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) + if tf.in_eager_mode() + fifo_queue_eager(; name=name, component_types=component_types, shapes=shapes, capacity=capacity, container=container, shared_name=shared_name) + else + fifo_queue_graph(; name=name, component_types=component_types, shapes=shapes, capacity=capacity, container=container, shared_name=shared_name) + end end - end end @@ -27666,31 +27666,31 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function resource_sparse_apply_proximal_gradient_descent_graph(var_, alpha_, l1_, l2_, grad_, indices_; name=nothing, use_locking=nothing) - local desc - tf.with_op_name(name, "ResourceSparseApplyProximalGradientDescent") do - desc = tf.NodeDescription("ResourceSparseApplyProximalGradientDescent") - var_ = convert(Tensor{Any}, var_) - alpha_ = convert(Tensor{Any}, alpha_) - l1_ = convert(Tensor{Any}, l1_) - l2_ = convert(Tensor{Any}, l2_) - grad_ = convert(Tensor{Any}, grad_) - indices_ = convert(Tensor{Any}, indices_) - indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) - (alpha_, l1_, l2_, grad_) = tf.tf_promote(alpha_, l1_, l2_, grad_) - (indices_,) = tf.tf_promote(indices_) - tf.add_input(desc, var_) - tf.add_input(desc, alpha_) - tf.add_input(desc, l1_) - tf.add_input(desc, l2_) - tf.add_input(desc, grad_) - tf.add_input(desc, indices_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - end - tf.Tensor(tf.Operation(desc)) + function resource_sparse_apply_proximal_gradient_descent_graph(var_, alpha_, l1_, l2_, grad_, indices_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "ResourceSparseApplyProximalGradientDescent") do + desc = tf.NodeDescription("ResourceSparseApplyProximalGradientDescent") + var_ = convert(Tensor{Any}, var_) + alpha_ = convert(Tensor{Any}, alpha_) + l1_ = convert(Tensor{Any}, l1_) + l2_ = convert(Tensor{Any}, l2_) + grad_ = convert(Tensor{Any}, grad_) + indices_ = convert(Tensor{Any}, indices_) + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + (alpha_, l1_, l2_, grad_) = tf.tf_promote(alpha_, l1_, l2_, grad_) + (indices_,) = tf.tf_promote(indices_) + tf.add_input(desc, var_) + tf.add_input(desc, alpha_) + tf.add_input(desc, l1_) + tf.add_input(desc, l2_) + tf.add_input(desc, grad_) + tf.add_input(desc, indices_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end end + tf.Tensor(tf.Operation(desc)) + end function resource_sparse_apply_proximal_gradient_descent_eager(var_, alpha_, l1_, l2_, grad_, indices_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ResourceSparseApplyProximalGradientDescent") var_ = convert(tf.EagerTensor, var_) @@ -27720,13 +27720,13 @@ begin return res[1] end end - function resource_sparse_apply_proximal_gradient_descent(var_, alpha_, l1_, l2_, grad_, indices_; name=nothing, use_locking=nothing) - if tf.in_eager_mode() - resource_sparse_apply_proximal_gradient_descent_eager(var_, alpha_, l1_, l2_, grad_, indices_; name=name, use_locking=use_locking) - else - resource_sparse_apply_proximal_gradient_descent_graph(var_, alpha_, l1_, l2_, grad_, indices_; name=name, use_locking=use_locking) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_sparse_apply_proximal_gradient_descent(var_, alpha_, l1_, l2_, grad_, indices_; name=nothing, use_locking=nothing) + if tf.in_eager_mode() + resource_sparse_apply_proximal_gradient_descent_eager(var_, alpha_, l1_, l2_, grad_, indices_; name=name, use_locking=use_locking) + else + resource_sparse_apply_proximal_gradient_descent_graph(var_, alpha_, l1_, l2_, grad_, indices_; name=name, use_locking=use_locking) + end end - end end @@ -27736,21 +27736,21 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function experimental_non_serializable_dataset_graph(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) - local desc - tf.with_op_name(name, "ExperimentalNonSerializableDataset") do - desc = tf.NodeDescription("ExperimentalNonSerializableDataset") - input_dataset_ = convert(Tensor{Any}, input_dataset_) - tf.add_input(desc, input_dataset_) - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end + function experimental_non_serializable_dataset_graph(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "ExperimentalNonSerializableDataset") do + desc = tf.NodeDescription("ExperimentalNonSerializableDataset") + input_dataset_ = convert(Tensor{Any}, input_dataset_) + tf.add_input(desc, input_dataset_) + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function experimental_non_serializable_dataset_eager(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("ExperimentalNonSerializableDataset") input_dataset_ = convert(tf.EagerTensor, input_dataset_) @@ -27768,13 +27768,13 @@ begin return res[1] end end - function experimental_non_serializable_dataset(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) - if tf.in_eager_mode() - experimental_non_serializable_dataset_eager(input_dataset_; name=name, output_types=output_types, output_shapes=output_shapes) - else - experimental_non_serializable_dataset_graph(input_dataset_; name=name, output_types=output_types, output_shapes=output_shapes) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_non_serializable_dataset(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) + if tf.in_eager_mode() + experimental_non_serializable_dataset_eager(input_dataset_; name=name, output_types=output_types, output_shapes=output_shapes) + else + experimental_non_serializable_dataset_graph(input_dataset_; name=name, output_types=output_types, output_shapes=output_shapes) + end end - end end @@ -27784,29 +27784,29 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function dilation2d_backprop_filter_graph(input_, filter_, out_backprop_; name=nothing, strides=nothing, rates=nothing, padding=nothing) - local desc - tf.with_op_name(name, "Dilation2DBackpropFilter") do - desc = tf.NodeDescription("Dilation2DBackpropFilter") - input_ = convert(Tensor{Any}, input_) - filter_ = convert(Tensor{Any}, filter_) - out_backprop_ = convert(Tensor{Any}, out_backprop_) - (input_, filter_, out_backprop_) = tf.tf_promote(input_, filter_, out_backprop_) - tf.add_input(desc, input_) - tf.add_input(desc, filter_) - tf.add_input(desc, out_backprop_) - if strides !== nothing - desc["strides"] = map(Base.identity, strides) - end - if rates !== nothing - desc["rates"] = map(Base.identity, rates) - end - if padding !== nothing - desc["padding"] = Base.String(padding) - end + function dilation2d_backprop_filter_graph(input_, filter_, out_backprop_; name=nothing, strides=nothing, rates=nothing, padding=nothing) + local desc + tf.with_op_name(name, "Dilation2DBackpropFilter") do + desc = tf.NodeDescription("Dilation2DBackpropFilter") + input_ = convert(Tensor{Any}, input_) + filter_ = convert(Tensor{Any}, filter_) + out_backprop_ = convert(Tensor{Any}, out_backprop_) + (input_, filter_, out_backprop_) = tf.tf_promote(input_, filter_, out_backprop_) + tf.add_input(desc, input_) + tf.add_input(desc, filter_) + tf.add_input(desc, out_backprop_) + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + if rates !== nothing + desc["rates"] = map(Base.identity, rates) + end + if padding !== nothing + desc["padding"] = Base.String(padding) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function dilation2d_backprop_filter_eager(input_, filter_, out_backprop_; name=nothing, strides=nothing, rates=nothing, padding=nothing) desc = tf.EagerOp("Dilation2DBackpropFilter") input_ = convert(tf.EagerTensor, input_) @@ -27834,13 +27834,13 @@ begin return res[1] end end - function dilation2d_backprop_filter(input_, filter_, out_backprop_; name=nothing, strides=nothing, rates=nothing, padding=nothing) - if tf.in_eager_mode() - dilation2d_backprop_filter_eager(input_, filter_, out_backprop_; name=name, strides=strides, rates=rates, padding=padding) - else - dilation2d_backprop_filter_graph(input_, filter_, out_backprop_; name=name, strides=strides, rates=rates, padding=padding) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function dilation2d_backprop_filter(input_, filter_, out_backprop_; name=nothing, strides=nothing, rates=nothing, padding=nothing) + if tf.in_eager_mode() + dilation2d_backprop_filter_eager(input_, filter_, out_backprop_; name=name, strides=strides, rates=rates, padding=padding) + else + dilation2d_backprop_filter_graph(input_, filter_, out_backprop_; name=name, strides=strides, rates=rates, padding=padding) + end end - end end @@ -27850,23 +27850,23 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function experimental_bytes_produced_stats_dataset_graph(input_dataset_, tag_; name=nothing, output_types=nothing, output_shapes=nothing) - local desc - tf.with_op_name(name, "ExperimentalBytesProducedStatsDataset") do - desc = tf.NodeDescription("ExperimentalBytesProducedStatsDataset") - input_dataset_ = convert(Tensor{Any}, input_dataset_) - tag_ = convert(Tensor{String}, tag_) - tf.add_input(desc, input_dataset_) - tf.add_input(desc, tag_) - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end + function experimental_bytes_produced_stats_dataset_graph(input_dataset_, tag_; name=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "ExperimentalBytesProducedStatsDataset") do + desc = tf.NodeDescription("ExperimentalBytesProducedStatsDataset") + input_dataset_ = convert(Tensor{Any}, input_dataset_) + tag_ = convert(Tensor{String}, tag_) + tf.add_input(desc, input_dataset_) + tf.add_input(desc, tag_) + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function experimental_bytes_produced_stats_dataset_eager(input_dataset_, tag_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("ExperimentalBytesProducedStatsDataset") input_dataset_ = convert(tf.EagerTensor, input_dataset_) @@ -27886,13 +27886,13 @@ begin return res[1] end end - function experimental_bytes_produced_stats_dataset(input_dataset_, tag_; name=nothing, output_types=nothing, output_shapes=nothing) - if tf.in_eager_mode() - experimental_bytes_produced_stats_dataset_eager(input_dataset_, tag_; name=name, output_types=output_types, output_shapes=output_shapes) - else - experimental_bytes_produced_stats_dataset_graph(input_dataset_, tag_; name=name, output_types=output_types, output_shapes=output_shapes) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_bytes_produced_stats_dataset(input_dataset_, tag_; name=nothing, output_types=nothing, output_shapes=nothing) + if tf.in_eager_mode() + experimental_bytes_produced_stats_dataset_eager(input_dataset_, tag_; name=name, output_types=output_types, output_shapes=output_shapes) + else + experimental_bytes_produced_stats_dataset_graph(input_dataset_, tag_; name=name, output_types=output_types, output_shapes=output_shapes) + end end - end end @@ -27902,30 +27902,30 @@ end output = cond ? then_branch(input) : else_branch(input) """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function _if_graph(cond_, input_; name=nothing, Tin=nothing, Tout=nothing, then_branch=nothing, else_branch=nothing) - local desc - tf.with_op_name(name, "_If") do - desc = tf.NodeDescription("_If") - cond_ = convert(Tensor{Any}, cond_) - input_ = [convert(Tensor{Any}, x) for x = input_] - (cond_,) = tf.tf_promote(cond_) - tf.add_input(desc, cond_) - tf.add_input(desc, input_) - if Tin !== nothing - desc["Tin"] = map(Base.identity, Tin) - end - if Tout !== nothing - desc["Tout"] = map(Base.identity, Tout) - end - if then_branch !== nothing - desc["then_branch"] = Base.identity(then_branch) - end - if else_branch !== nothing - desc["else_branch"] = Base.identity(else_branch) - end - end - tf.Tensor(tf.Operation(desc)) + function _if_graph(cond_, input_; name=nothing, Tin=nothing, Tout=nothing, then_branch=nothing, else_branch=nothing) + local desc + tf.with_op_name(name, "_If") do + desc = tf.NodeDescription("_If") + cond_ = convert(Tensor{Any}, cond_) + input_ = [convert(Tensor{Any}, x) for x = input_] + (cond_,) = tf.tf_promote(cond_) + tf.add_input(desc, cond_) + tf.add_input(desc, input_) + if Tin !== nothing + desc["Tin"] = map(Base.identity, Tin) + end + if Tout !== nothing + desc["Tout"] = map(Base.identity, Tout) + end + if then_branch !== nothing + desc["then_branch"] = Base.identity(then_branch) + end + if else_branch !== nothing + desc["else_branch"] = Base.identity(else_branch) + end end + tf.Tensor(tf.Operation(desc)) + end function _if_eager(cond_, input_; name=nothing, Tin=nothing, Tout=nothing, then_branch=nothing, else_branch=nothing) desc = tf.EagerOp("_If") cond_ = convert(tf.EagerTensor, cond_) @@ -27952,35 +27952,35 @@ begin return res[1] end end - function _if(cond_, input_; name=nothing, Tin=nothing, Tout=nothing, then_branch=nothing, else_branch=nothing) - if tf.in_eager_mode() - _if_eager(cond_, input_; name=name, Tin=Tin, Tout=Tout, then_branch=then_branch, else_branch=else_branch) - else - _if_graph(cond_, input_; name=name, Tin=Tin, Tout=Tout, then_branch=then_branch, else_branch=else_branch) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _if(cond_, input_; name=nothing, Tin=nothing, Tout=nothing, then_branch=nothing, else_branch=nothing) + if tf.in_eager_mode() + _if_eager(cond_, input_; name=name, Tin=Tin, Tout=Tout, then_branch=then_branch, else_branch=else_branch) + else + _if_graph(cond_, input_; name=name, Tin=Tin, Tout=Tout, then_branch=then_branch, else_branch=else_branch) + end end - end end """ - bias_add_grad(out_backprop; data_format=NHWC) + bias_add_grad(out_backprop; data_format=) """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function bias_add_grad_graph(out_backprop_; name=nothing, data_format=nothing) - local desc - tf.with_op_name(name, "BiasAddGrad") do - desc = tf.NodeDescription("BiasAddGrad") - out_backprop_ = convert(Tensor{Any}, out_backprop_) - (out_backprop_,) = tf.tf_promote(out_backprop_) - tf.add_input(desc, out_backprop_) - if data_format !== nothing - desc["data_format"] = Base.String(data_format) - end + function bias_add_grad_graph(out_backprop_; name=nothing, data_format=nothing) + local desc + tf.with_op_name(name, "BiasAddGrad") do + desc = tf.NodeDescription("BiasAddGrad") + out_backprop_ = convert(Tensor{Any}, out_backprop_) + (out_backprop_,) = tf.tf_promote(out_backprop_) + tf.add_input(desc, out_backprop_) + if data_format !== nothing + desc["data_format"] = Base.String(data_format) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function bias_add_grad_eager(out_backprop_; name=nothing, data_format=nothing) desc = tf.EagerOp("BiasAddGrad") out_backprop_ = convert(tf.EagerTensor, out_backprop_) @@ -27996,13 +27996,13 @@ begin return res[1] end end - function bias_add_grad(out_backprop_; name=nothing, data_format=nothing) - if tf.in_eager_mode() - bias_add_grad_eager(out_backprop_; name=name, data_format=data_format) - else - bias_add_grad_graph(out_backprop_; name=name, data_format=data_format) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function bias_add_grad(out_backprop_; name=nothing, data_format=nothing) + if tf.in_eager_mode() + bias_add_grad_eager(out_backprop_; name=name, data_format=data_format) + else + bias_add_grad_graph(out_backprop_; name=name, data_format=data_format) + end end - end end @@ -28012,15 +28012,15 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function reader_serialize_state_v2_graph(reader_handle_; name=nothing) - local desc - tf.with_op_name(name, "ReaderSerializeStateV2") do - desc = tf.NodeDescription("ReaderSerializeStateV2") - reader_handle_ = convert(Tensor{Any}, reader_handle_) - tf.add_input(desc, reader_handle_) - end - tf.Tensor(tf.Operation(desc)) + function reader_serialize_state_v2_graph(reader_handle_; name=nothing) + local desc + tf.with_op_name(name, "ReaderSerializeStateV2") do + desc = tf.NodeDescription("ReaderSerializeStateV2") + reader_handle_ = convert(Tensor{Any}, reader_handle_) + tf.add_input(desc, reader_handle_) end + tf.Tensor(tf.Operation(desc)) + end function reader_serialize_state_v2_eager(reader_handle_; name=nothing) desc = tf.EagerOp("ReaderSerializeStateV2") reader_handle_ = convert(tf.EagerTensor, reader_handle_) @@ -28032,13 +28032,13 @@ begin return res[1] end end - function reader_serialize_state_v2(reader_handle_; name=nothing) - if tf.in_eager_mode() - reader_serialize_state_v2_eager(reader_handle_; name=name) - else - reader_serialize_state_v2_graph(reader_handle_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function reader_serialize_state_v2(reader_handle_; name=nothing) + if tf.in_eager_mode() + reader_serialize_state_v2_eager(reader_handle_; name=name) + else + reader_serialize_state_v2_graph(reader_handle_; name=name) + end end - end end @@ -28048,15 +28048,15 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function wrap_dataset_variant_graph(input_handle_; name=nothing) - local desc - tf.with_op_name(name, "WrapDatasetVariant") do - desc = tf.NodeDescription("WrapDatasetVariant") - input_handle_ = convert(Tensor{Any}, input_handle_) - tf.add_input(desc, input_handle_) - end - tf.Tensor(tf.Operation(desc)) + function wrap_dataset_variant_graph(input_handle_; name=nothing) + local desc + tf.with_op_name(name, "WrapDatasetVariant") do + desc = tf.NodeDescription("WrapDatasetVariant") + input_handle_ = convert(Tensor{Any}, input_handle_) + tf.add_input(desc, input_handle_) end + tf.Tensor(tf.Operation(desc)) + end function wrap_dataset_variant_eager(input_handle_; name=nothing) desc = tf.EagerOp("WrapDatasetVariant") input_handle_ = convert(tf.EagerTensor, input_handle_) @@ -28068,13 +28068,13 @@ begin return res[1] end end - function wrap_dataset_variant(input_handle_; name=nothing) - if tf.in_eager_mode() - wrap_dataset_variant_eager(input_handle_; name=name) - else - wrap_dataset_variant_graph(input_handle_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function wrap_dataset_variant(input_handle_; name=nothing) + if tf.in_eager_mode() + wrap_dataset_variant_eager(input_handle_; name=name) + else + wrap_dataset_variant_graph(input_handle_; name=name) + end end - end end @@ -28084,38 +28084,38 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function parallel_interleave_dataset_v2_graph(input_dataset_, other_arguments_, cycle_length_, block_length_, num_parallel_calls_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, sloppy=nothing) - local desc - tf.with_op_name(name, "ParallelInterleaveDatasetV2") do - desc = tf.NodeDescription("ParallelInterleaveDatasetV2") - input_dataset_ = convert(Tensor{Any}, input_dataset_) - other_arguments_ = [convert(Tensor{Any}, x) for x = other_arguments_] - cycle_length_ = convert(Tensor{Int64}, cycle_length_) - block_length_ = convert(Tensor{Int64}, block_length_) - num_parallel_calls_ = convert(Tensor{Int64}, num_parallel_calls_) - tf.add_input(desc, input_dataset_) - tf.add_input(desc, other_arguments_) - tf.add_input(desc, cycle_length_) - tf.add_input(desc, block_length_) - tf.add_input(desc, num_parallel_calls_) - if f !== nothing - desc["f"] = Base.identity(f) - end - if Targuments !== nothing - desc["Targuments"] = map(Base.identity, Targuments) - end - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - if sloppy !== nothing - desc["sloppy"] = Base.Bool(sloppy) - end - end - tf.Tensor(tf.Operation(desc)) + function parallel_interleave_dataset_v2_graph(input_dataset_, other_arguments_, cycle_length_, block_length_, num_parallel_calls_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, sloppy=nothing) + local desc + tf.with_op_name(name, "ParallelInterleaveDatasetV2") do + desc = tf.NodeDescription("ParallelInterleaveDatasetV2") + input_dataset_ = convert(Tensor{Any}, input_dataset_) + other_arguments_ = [convert(Tensor{Any}, x) for x = other_arguments_] + cycle_length_ = convert(Tensor{Int64}, cycle_length_) + block_length_ = convert(Tensor{Int64}, block_length_) + num_parallel_calls_ = convert(Tensor{Int64}, num_parallel_calls_) + tf.add_input(desc, input_dataset_) + tf.add_input(desc, other_arguments_) + tf.add_input(desc, cycle_length_) + tf.add_input(desc, block_length_) + tf.add_input(desc, num_parallel_calls_) + if f !== nothing + desc["f"] = Base.identity(f) + end + if Targuments !== nothing + desc["Targuments"] = map(Base.identity, Targuments) + end + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + if sloppy !== nothing + desc["sloppy"] = Base.Bool(sloppy) + end end + tf.Tensor(tf.Operation(desc)) + end function parallel_interleave_dataset_v2_eager(input_dataset_, other_arguments_, cycle_length_, block_length_, num_parallel_calls_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, sloppy=nothing) desc = tf.EagerOp("ParallelInterleaveDatasetV2") input_dataset_ = convert(tf.EagerTensor, input_dataset_) @@ -28150,48 +28150,48 @@ begin return res[1] end end - function parallel_interleave_dataset_v2(input_dataset_, other_arguments_, cycle_length_, block_length_, num_parallel_calls_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, sloppy=nothing) - if tf.in_eager_mode() - parallel_interleave_dataset_v2_eager(input_dataset_, other_arguments_, cycle_length_, block_length_, num_parallel_calls_; name=name, f=f, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes, sloppy=sloppy) - else - parallel_interleave_dataset_v2_graph(input_dataset_, other_arguments_, cycle_length_, block_length_, num_parallel_calls_; name=name, f=f, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes, sloppy=sloppy) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function parallel_interleave_dataset_v2(input_dataset_, other_arguments_, cycle_length_, block_length_, num_parallel_calls_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, sloppy=nothing) + if tf.in_eager_mode() + parallel_interleave_dataset_v2_eager(input_dataset_, other_arguments_, cycle_length_, block_length_, num_parallel_calls_; name=name, f=f, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes, sloppy=sloppy) + else + parallel_interleave_dataset_v2_graph(input_dataset_, other_arguments_, cycle_length_, block_length_, num_parallel_calls_; name=name, f=f, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes, sloppy=sloppy) + end end - end end """ - depthwise_conv2d_native_backprop_input(input_sizes, filter, out_backprop; data_format=NHWC, dilations=[1, 1, 1, 1]) + depthwise_conv2d_native_backprop_input(input_sizes, filter, out_backprop; data_format=, dilations=[1, 1, 1, 1]) """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function depthwise_conv2d_native_backprop_input_graph(input_sizes_, filter_, out_backprop_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) - local desc - tf.with_op_name(name, "DepthwiseConv2dNativeBackpropInput") do - desc = tf.NodeDescription("DepthwiseConv2dNativeBackpropInput") - input_sizes_ = convert(Tensor{Int32}, input_sizes_) - filter_ = convert(Tensor{Any}, filter_) - out_backprop_ = convert(Tensor{Any}, out_backprop_) - (filter_, out_backprop_) = tf.tf_promote(filter_, out_backprop_) - tf.add_input(desc, input_sizes_) - tf.add_input(desc, filter_) - tf.add_input(desc, out_backprop_) - if strides !== nothing - desc["strides"] = map(Base.identity, strides) - end - if padding !== nothing - desc["padding"] = Base.String(padding) - end - if data_format !== nothing - desc["data_format"] = Base.String(data_format) - end - if dilations !== nothing - desc["dilations"] = map(Base.identity, dilations) - end + function depthwise_conv2d_native_backprop_input_graph(input_sizes_, filter_, out_backprop_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) + local desc + tf.with_op_name(name, "DepthwiseConv2dNativeBackpropInput") do + desc = tf.NodeDescription("DepthwiseConv2dNativeBackpropInput") + input_sizes_ = convert(Tensor{Int32}, input_sizes_) + filter_ = convert(Tensor{Any}, filter_) + out_backprop_ = convert(Tensor{Any}, out_backprop_) + (filter_, out_backprop_) = tf.tf_promote(filter_, out_backprop_) + tf.add_input(desc, input_sizes_) + tf.add_input(desc, filter_) + tf.add_input(desc, out_backprop_) + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + if padding !== nothing + desc["padding"] = Base.String(padding) + end + if data_format !== nothing + desc["data_format"] = Base.String(data_format) + end + if dilations !== nothing + desc["dilations"] = map(Base.identity, dilations) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function depthwise_conv2d_native_backprop_input_eager(input_sizes_, filter_, out_backprop_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) desc = tf.EagerOp("DepthwiseConv2dNativeBackpropInput") input_sizes_ = convert(tf.EagerTensor, input_sizes_) @@ -28221,13 +28221,13 @@ begin return res[1] end end - function depthwise_conv2d_native_backprop_input(input_sizes_, filter_, out_backprop_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) - if tf.in_eager_mode() - depthwise_conv2d_native_backprop_input_eager(input_sizes_, filter_, out_backprop_; name=name, strides=strides, padding=padding, data_format=data_format, dilations=dilations) - else - depthwise_conv2d_native_backprop_input_graph(input_sizes_, filter_, out_backprop_; name=name, strides=strides, padding=padding, data_format=data_format, dilations=dilations) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function depthwise_conv2d_native_backprop_input(input_sizes_, filter_, out_backprop_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) + if tf.in_eager_mode() + depthwise_conv2d_native_backprop_input_eager(input_sizes_, filter_, out_backprop_; name=name, strides=strides, padding=padding, data_format=data_format, dilations=dilations) + else + depthwise_conv2d_native_backprop_input_graph(input_sizes_, filter_, out_backprop_; name=name, strides=strides, padding=padding, data_format=data_format, dilations=dilations) + end end - end end @@ -28237,33 +28237,33 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function resource_apply_rms_prop_graph(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=nothing, use_locking=nothing) - local desc - tf.with_op_name(name, "ResourceApplyRMSProp") do - desc = tf.NodeDescription("ResourceApplyRMSProp") - var_ = convert(Tensor{Any}, var_) - ms_ = convert(Tensor{Any}, ms_) - mom_ = convert(Tensor{Any}, mom_) - lr_ = convert(Tensor{Any}, lr_) - rho_ = convert(Tensor{Any}, rho_) - momentum_ = convert(Tensor{Any}, momentum_) - epsilon_ = convert(Tensor{Any}, epsilon_) - grad_ = convert(Tensor{Any}, grad_) - (lr_, rho_, momentum_, epsilon_, grad_) = tf.tf_promote(lr_, rho_, momentum_, epsilon_, grad_) - tf.add_input(desc, var_) - tf.add_input(desc, ms_) - tf.add_input(desc, mom_) - tf.add_input(desc, lr_) - tf.add_input(desc, rho_) - tf.add_input(desc, momentum_) - tf.add_input(desc, epsilon_) - tf.add_input(desc, grad_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - end - tf.Tensor(tf.Operation(desc)) + function resource_apply_rms_prop_graph(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "ResourceApplyRMSProp") do + desc = tf.NodeDescription("ResourceApplyRMSProp") + var_ = convert(Tensor{Any}, var_) + ms_ = convert(Tensor{Any}, ms_) + mom_ = convert(Tensor{Any}, mom_) + lr_ = convert(Tensor{Any}, lr_) + rho_ = convert(Tensor{Any}, rho_) + momentum_ = convert(Tensor{Any}, momentum_) + epsilon_ = convert(Tensor{Any}, epsilon_) + grad_ = convert(Tensor{Any}, grad_) + (lr_, rho_, momentum_, epsilon_, grad_) = tf.tf_promote(lr_, rho_, momentum_, epsilon_, grad_) + tf.add_input(desc, var_) + tf.add_input(desc, ms_) + tf.add_input(desc, mom_) + tf.add_input(desc, lr_) + tf.add_input(desc, rho_) + tf.add_input(desc, momentum_) + tf.add_input(desc, epsilon_) + tf.add_input(desc, grad_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end end + tf.Tensor(tf.Operation(desc)) + end function resource_apply_rms_prop_eager(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ResourceApplyRMSProp") var_ = convert(tf.EagerTensor, var_) @@ -28297,13 +28297,13 @@ begin return res[1] end end - function resource_apply_rms_prop(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=nothing, use_locking=nothing) - if tf.in_eager_mode() - resource_apply_rms_prop_eager(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=name, use_locking=use_locking) - else - resource_apply_rms_prop_graph(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=name, use_locking=use_locking) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_apply_rms_prop(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=nothing, use_locking=nothing) + if tf.in_eager_mode() + resource_apply_rms_prop_eager(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=name, use_locking=use_locking) + else + resource_apply_rms_prop_graph(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=name, use_locking=use_locking) + end end - end end @@ -28313,21 +28313,21 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function experimental_lmdb_dataset_graph(filenames_; name=nothing, output_types=nothing, output_shapes=nothing) - local desc - tf.with_op_name(name, "ExperimentalLMDBDataset") do - desc = tf.NodeDescription("ExperimentalLMDBDataset") - filenames_ = convert(Tensor{String}, filenames_) - tf.add_input(desc, filenames_) - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end + function experimental_lmdb_dataset_graph(filenames_; name=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "ExperimentalLMDBDataset") do + desc = tf.NodeDescription("ExperimentalLMDBDataset") + filenames_ = convert(Tensor{String}, filenames_) + tf.add_input(desc, filenames_) + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function experimental_lmdb_dataset_eager(filenames_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("ExperimentalLMDBDataset") filenames_ = convert(tf.EagerTensor, filenames_) @@ -28345,13 +28345,13 @@ begin return res[1] end end - function experimental_lmdb_dataset(filenames_; name=nothing, output_types=nothing, output_shapes=nothing) - if tf.in_eager_mode() - experimental_lmdb_dataset_eager(filenames_; name=name, output_types=output_types, output_shapes=output_shapes) - else - experimental_lmdb_dataset_graph(filenames_; name=name, output_types=output_types, output_shapes=output_shapes) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_lmdb_dataset(filenames_; name=nothing, output_types=nothing, output_shapes=nothing) + if tf.in_eager_mode() + experimental_lmdb_dataset_eager(filenames_; name=name, output_types=output_types, output_shapes=output_shapes) + else + experimental_lmdb_dataset_graph(filenames_; name=name, output_types=output_types, output_shapes=output_shapes) + end end - end end @@ -28361,25 +28361,25 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function sparse_accumulator_take_gradient_graph(handle_, num_required_; name=nothing, dtype=nothing) - local desc - tf.with_op_name(name, "SparseAccumulatorTakeGradient") do - desc = tf.NodeDescription("SparseAccumulatorTakeGradient") - handle_ = convert(Tensor{String}, handle_) - num_required_ = convert(Tensor{Int32}, num_required_) - tf.add_input(desc, handle_) - tf.add_input(desc, num_required_) - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end + function sparse_accumulator_take_gradient_graph(handle_, num_required_; name=nothing, dtype=nothing) + local desc + tf.with_op_name(name, "SparseAccumulatorTakeGradient") do + desc = tf.NodeDescription("SparseAccumulatorTakeGradient") + handle_ = convert(Tensor{String}, handle_) + num_required_ = convert(Tensor{Int32}, num_required_) + tf.add_input(desc, handle_) + tf.add_input(desc, num_required_) + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:3 - push!(out, tf.Tensor(op, out_idx)) - end - out end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end function sparse_accumulator_take_gradient_eager(handle_, num_required_; name=nothing, dtype=nothing) desc = tf.EagerOp("SparseAccumulatorTakeGradient") handle_ = convert(tf.EagerTensor, handle_) @@ -28396,13 +28396,13 @@ begin return res end end - function sparse_accumulator_take_gradient(handle_, num_required_; name=nothing, dtype=nothing) - if tf.in_eager_mode() - sparse_accumulator_take_gradient_eager(handle_, num_required_; name=name, dtype=dtype) - else - sparse_accumulator_take_gradient_graph(handle_, num_required_; name=name, dtype=dtype) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_accumulator_take_gradient(handle_, num_required_; name=nothing, dtype=nothing) + if tf.in_eager_mode() + sparse_accumulator_take_gradient_eager(handle_, num_required_; name=name, dtype=dtype) + else + sparse_accumulator_take_gradient_graph(handle_, num_required_; name=name, dtype=dtype) + end end - end end @@ -28412,15 +28412,15 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function stack_close_v2_graph(handle_; name=nothing) - local desc - tf.with_op_name(name, "StackCloseV2") do - desc = tf.NodeDescription("StackCloseV2") - handle_ = convert(Tensor{Any}, handle_) - tf.add_input(desc, handle_) - end - tf.Tensor(tf.Operation(desc)) + function stack_close_v2_graph(handle_; name=nothing) + local desc + tf.with_op_name(name, "StackCloseV2") do + desc = tf.NodeDescription("StackCloseV2") + handle_ = convert(Tensor{Any}, handle_) + tf.add_input(desc, handle_) end + tf.Tensor(tf.Operation(desc)) + end function stack_close_v2_eager(handle_; name=nothing) desc = tf.EagerOp("StackCloseV2") handle_ = convert(tf.EagerTensor, handle_) @@ -28432,13 +28432,13 @@ begin return res[1] end end - function stack_close_v2(handle_; name=nothing) - if tf.in_eager_mode() - stack_close_v2_eager(handle_; name=name) - else - stack_close_v2_graph(handle_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function stack_close_v2(handle_; name=nothing) + if tf.in_eager_mode() + stack_close_v2_eager(handle_; name=name) + else + stack_close_v2_graph(handle_; name=name) + end end - end end @@ -28448,28 +28448,28 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function map_size_graph(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) - local desc - tf.with_op_name(name, "MapSize") do - desc = tf.NodeDescription("MapSize") - if capacity !== nothing - desc["capacity"] = Base.Int(capacity) - end - if memory_limit !== nothing - desc["memory_limit"] = Base.Int(memory_limit) - end - if dtypes !== nothing - desc["dtypes"] = map(Base.identity, dtypes) - end - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end + function map_size_graph(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "MapSize") do + desc = tf.NodeDescription("MapSize") + if capacity !== nothing + desc["capacity"] = Base.Int(capacity) + end + if memory_limit !== nothing + desc["memory_limit"] = Base.Int(memory_limit) + end + if dtypes !== nothing + desc["dtypes"] = map(Base.identity, dtypes) + end + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function map_size_eager(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) desc = tf.EagerOp("MapSize") if capacity !== nothing @@ -28494,13 +28494,13 @@ begin return res[1] end end - function map_size(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) - if tf.in_eager_mode() - map_size_eager(; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) - else - map_size_graph(; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function map_size(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + if tf.in_eager_mode() + map_size_eager(; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) + else + map_size_graph(; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) + end end - end end @@ -28510,33 +28510,33 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function resource_apply_adagrad_da_graph(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, lr_, l1_, l2_, global_step_; name=nothing, use_locking=nothing) - local desc - tf.with_op_name(name, "ResourceApplyAdagradDA") do - desc = tf.NodeDescription("ResourceApplyAdagradDA") - var_ = convert(Tensor{Any}, var_) - gradient_accumulator_ = convert(Tensor{Any}, gradient_accumulator_) - gradient_squared_accumulator_ = convert(Tensor{Any}, gradient_squared_accumulator_) - grad_ = convert(Tensor{Any}, grad_) - lr_ = convert(Tensor{Any}, lr_) - l1_ = convert(Tensor{Any}, l1_) - l2_ = convert(Tensor{Any}, l2_) - global_step_ = convert(Tensor{Int64}, global_step_) - (grad_, lr_, l1_, l2_) = tf.tf_promote(grad_, lr_, l1_, l2_) - tf.add_input(desc, var_) - tf.add_input(desc, gradient_accumulator_) - tf.add_input(desc, gradient_squared_accumulator_) - tf.add_input(desc, grad_) - tf.add_input(desc, lr_) - tf.add_input(desc, l1_) - tf.add_input(desc, l2_) - tf.add_input(desc, global_step_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - end - tf.Tensor(tf.Operation(desc)) + function resource_apply_adagrad_da_graph(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, lr_, l1_, l2_, global_step_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "ResourceApplyAdagradDA") do + desc = tf.NodeDescription("ResourceApplyAdagradDA") + var_ = convert(Tensor{Any}, var_) + gradient_accumulator_ = convert(Tensor{Any}, gradient_accumulator_) + gradient_squared_accumulator_ = convert(Tensor{Any}, gradient_squared_accumulator_) + grad_ = convert(Tensor{Any}, grad_) + lr_ = convert(Tensor{Any}, lr_) + l1_ = convert(Tensor{Any}, l1_) + l2_ = convert(Tensor{Any}, l2_) + global_step_ = convert(Tensor{Int64}, global_step_) + (grad_, lr_, l1_, l2_) = tf.tf_promote(grad_, lr_, l1_, l2_) + tf.add_input(desc, var_) + tf.add_input(desc, gradient_accumulator_) + tf.add_input(desc, gradient_squared_accumulator_) + tf.add_input(desc, grad_) + tf.add_input(desc, lr_) + tf.add_input(desc, l1_) + tf.add_input(desc, l2_) + tf.add_input(desc, global_step_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end end + tf.Tensor(tf.Operation(desc)) + end function resource_apply_adagrad_da_eager(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, lr_, l1_, l2_, global_step_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ResourceApplyAdagradDA") var_ = convert(tf.EagerTensor, var_) @@ -28569,13 +28569,13 @@ begin return res[1] end end - function resource_apply_adagrad_da(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, lr_, l1_, l2_, global_step_; name=nothing, use_locking=nothing) - if tf.in_eager_mode() - resource_apply_adagrad_da_eager(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, lr_, l1_, l2_, global_step_; name=name, use_locking=use_locking) - else - resource_apply_adagrad_da_graph(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, lr_, l1_, l2_, global_step_; name=name, use_locking=use_locking) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_apply_adagrad_da(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, lr_, l1_, l2_, global_step_; name=nothing, use_locking=nothing) + if tf.in_eager_mode() + resource_apply_adagrad_da_eager(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, lr_, l1_, l2_, global_step_; name=name, use_locking=use_locking) + else + resource_apply_adagrad_da_graph(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, lr_, l1_, l2_, global_step_; name=name, use_locking=use_locking) + end end - end end @@ -28585,15 +28585,15 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tensor_forest_tree_size_graph(tree_handle_; name=nothing) - local desc - tf.with_op_name(name, "TensorForestTreeSize") do - desc = tf.NodeDescription("TensorForestTreeSize") - tree_handle_ = convert(Tensor{Any}, tree_handle_) - tf.add_input(desc, tree_handle_) - end - tf.Tensor(tf.Operation(desc)) + function tensor_forest_tree_size_graph(tree_handle_; name=nothing) + local desc + tf.with_op_name(name, "TensorForestTreeSize") do + desc = tf.NodeDescription("TensorForestTreeSize") + tree_handle_ = convert(Tensor{Any}, tree_handle_) + tf.add_input(desc, tree_handle_) end + tf.Tensor(tf.Operation(desc)) + end function tensor_forest_tree_size_eager(tree_handle_; name=nothing) desc = tf.EagerOp("TensorForestTreeSize") tree_handle_ = convert(tf.EagerTensor, tree_handle_) @@ -28605,13 +28605,13 @@ begin return res[1] end end - function tensor_forest_tree_size(tree_handle_; name=nothing) - if tf.in_eager_mode() - tensor_forest_tree_size_eager(tree_handle_; name=name) - else - tensor_forest_tree_size_graph(tree_handle_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_forest_tree_size(tree_handle_; name=nothing) + if tf.in_eager_mode() + tensor_forest_tree_size_eager(tree_handle_; name=name) + else + tensor_forest_tree_size_graph(tree_handle_; name=name) + end end - end end @@ -28621,16 +28621,16 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function matrix_diag_part_graph(input_; name=nothing) - local desc - tf.with_op_name(name, "MatrixDiagPart") do - desc = tf.NodeDescription("MatrixDiagPart") - input_ = convert(Tensor{Any}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - end - tf.Tensor(tf.Operation(desc)) + function matrix_diag_part_graph(input_; name=nothing) + local desc + tf.with_op_name(name, "MatrixDiagPart") do + desc = tf.NodeDescription("MatrixDiagPart") + input_ = convert(Tensor{Any}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) end + tf.Tensor(tf.Operation(desc)) + end function matrix_diag_part_eager(input_; name=nothing) desc = tf.EagerOp("MatrixDiagPart") input_ = convert(tf.EagerTensor, input_) @@ -28643,13 +28643,13 @@ begin return res[1] end end - function matrix_diag_part(input_; name=nothing) - if tf.in_eager_mode() - matrix_diag_part_eager(input_; name=name) - else - matrix_diag_part_graph(input_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function matrix_diag_part(input_; name=nothing) + if tf.in_eager_mode() + matrix_diag_part_eager(input_; name=name) + else + matrix_diag_part_graph(input_; name=name) + end end - end end @@ -28659,15 +28659,15 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function reader_num_work_units_completed_v2_graph(reader_handle_; name=nothing) - local desc - tf.with_op_name(name, "ReaderNumWorkUnitsCompletedV2") do - desc = tf.NodeDescription("ReaderNumWorkUnitsCompletedV2") - reader_handle_ = convert(Tensor{Any}, reader_handle_) - tf.add_input(desc, reader_handle_) - end - tf.Tensor(tf.Operation(desc)) + function reader_num_work_units_completed_v2_graph(reader_handle_; name=nothing) + local desc + tf.with_op_name(name, "ReaderNumWorkUnitsCompletedV2") do + desc = tf.NodeDescription("ReaderNumWorkUnitsCompletedV2") + reader_handle_ = convert(Tensor{Any}, reader_handle_) + tf.add_input(desc, reader_handle_) end + tf.Tensor(tf.Operation(desc)) + end function reader_num_work_units_completed_v2_eager(reader_handle_; name=nothing) desc = tf.EagerOp("ReaderNumWorkUnitsCompletedV2") reader_handle_ = convert(tf.EagerTensor, reader_handle_) @@ -28679,13 +28679,13 @@ begin return res[1] end end - function reader_num_work_units_completed_v2(reader_handle_; name=nothing) - if tf.in_eager_mode() - reader_num_work_units_completed_v2_eager(reader_handle_; name=name) - else - reader_num_work_units_completed_v2_graph(reader_handle_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function reader_num_work_units_completed_v2(reader_handle_; name=nothing) + if tf.in_eager_mode() + reader_num_work_units_completed_v2_eager(reader_handle_; name=name) + else + reader_num_work_units_completed_v2_graph(reader_handle_; name=name) + end end - end end @@ -28695,22 +28695,22 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tensor_array_split_v3_graph(handle_, value_, lengths_, flow_in_; name=nothing) - local desc - tf.with_op_name(name, "TensorArraySplitV3") do - desc = tf.NodeDescription("TensorArraySplitV3") - handle_ = convert(Tensor{Any}, handle_) - value_ = convert(Tensor{Any}, value_) - lengths_ = convert(Tensor{Int64}, lengths_) - flow_in_ = convert(Tensor{Float32}, flow_in_) - (value_,) = tf.tf_promote(value_) - tf.add_input(desc, handle_) - tf.add_input(desc, value_) - tf.add_input(desc, lengths_) - tf.add_input(desc, flow_in_) - end - tf.Tensor(tf.Operation(desc)) + function tensor_array_split_v3_graph(handle_, value_, lengths_, flow_in_; name=nothing) + local desc + tf.with_op_name(name, "TensorArraySplitV3") do + desc = tf.NodeDescription("TensorArraySplitV3") + handle_ = convert(Tensor{Any}, handle_) + value_ = convert(Tensor{Any}, value_) + lengths_ = convert(Tensor{Int64}, lengths_) + flow_in_ = convert(Tensor{Float32}, flow_in_) + (value_,) = tf.tf_promote(value_) + tf.add_input(desc, handle_) + tf.add_input(desc, value_) + tf.add_input(desc, lengths_) + tf.add_input(desc, flow_in_) end + tf.Tensor(tf.Operation(desc)) + end function tensor_array_split_v3_eager(handle_, value_, lengths_, flow_in_; name=nothing) desc = tf.EagerOp("TensorArraySplitV3") handle_ = convert(tf.EagerTensor, handle_) @@ -28729,13 +28729,13 @@ begin return res[1] end end - function tensor_array_split_v3(handle_, value_, lengths_, flow_in_; name=nothing) - if tf.in_eager_mode() - tensor_array_split_v3_eager(handle_, value_, lengths_, flow_in_; name=name) - else - tensor_array_split_v3_graph(handle_, value_, lengths_, flow_in_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_array_split_v3(handle_, value_, lengths_, flow_in_; name=nothing) + if tf.in_eager_mode() + tensor_array_split_v3_eager(handle_, value_, lengths_, flow_in_; name=name) + else + tensor_array_split_v3_graph(handle_, value_, lengths_, flow_in_; name=name) + end end - end end @@ -28745,28 +28745,28 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function sparse_to_dense_graph(sparse_indices_, output_shape_, sparse_values_, default_value_; name=nothing, validate_indices=nothing) - local desc - tf.with_op_name(name, "SparseToDense") do - desc = tf.NodeDescription("SparseToDense") - sparse_indices_ = convert(Tensor{Any}, sparse_indices_) - sparse_indices_ = sparse_indices_ - convert(tf.Tensor{eltype(sparse_indices_)}, 1) - output_shape_ = convert(Tensor{Any}, output_shape_) - output_shape_ = output_shape_ - convert(tf.Tensor{eltype(output_shape_)}, 1) - sparse_values_ = convert(Tensor{Any}, sparse_values_) - default_value_ = convert(Tensor{Any}, default_value_) - (sparse_values_, default_value_) = tf.tf_promote(sparse_values_, default_value_) - (sparse_indices_, output_shape_) = tf.tf_promote(sparse_indices_, output_shape_) - tf.add_input(desc, sparse_indices_) - tf.add_input(desc, output_shape_) - tf.add_input(desc, sparse_values_) - tf.add_input(desc, default_value_) - if validate_indices !== nothing - desc["validate_indices"] = Base.Bool(validate_indices) - end + function sparse_to_dense_graph(sparse_indices_, output_shape_, sparse_values_, default_value_; name=nothing, validate_indices=nothing) + local desc + tf.with_op_name(name, "SparseToDense") do + desc = tf.NodeDescription("SparseToDense") + sparse_indices_ = convert(Tensor{Any}, sparse_indices_) + sparse_indices_ = sparse_indices_ - convert(tf.Tensor{eltype(sparse_indices_)}, 1) + output_shape_ = convert(Tensor{Any}, output_shape_) + output_shape_ = output_shape_ - convert(tf.Tensor{eltype(output_shape_)}, 1) + sparse_values_ = convert(Tensor{Any}, sparse_values_) + default_value_ = convert(Tensor{Any}, default_value_) + (sparse_values_, default_value_) = tf.tf_promote(sparse_values_, default_value_) + (sparse_indices_, output_shape_) = tf.tf_promote(sparse_indices_, output_shape_) + tf.add_input(desc, sparse_indices_) + tf.add_input(desc, output_shape_) + tf.add_input(desc, sparse_values_) + tf.add_input(desc, default_value_) + if validate_indices !== nothing + desc["validate_indices"] = Base.Bool(validate_indices) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function sparse_to_dense_eager(sparse_indices_, output_shape_, sparse_values_, default_value_; name=nothing, validate_indices=nothing) desc = tf.EagerOp("SparseToDense") sparse_indices_ = convert(tf.EagerTensor, sparse_indices_) @@ -28791,13 +28791,13 @@ begin return res[1] end end - function sparse_to_dense(sparse_indices_, output_shape_, sparse_values_, default_value_; name=nothing, validate_indices=nothing) - if tf.in_eager_mode() - sparse_to_dense_eager(sparse_indices_, output_shape_, sparse_values_, default_value_; name=name, validate_indices=validate_indices) - else - sparse_to_dense_graph(sparse_indices_, output_shape_, sparse_values_, default_value_; name=name, validate_indices=validate_indices) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_to_dense(sparse_indices_, output_shape_, sparse_values_, default_value_; name=nothing, validate_indices=nothing) + if tf.in_eager_mode() + sparse_to_dense_eager(sparse_indices_, output_shape_, sparse_values_, default_value_; name=name, validate_indices=validate_indices) + else + sparse_to_dense_graph(sparse_indices_, output_shape_, sparse_values_, default_value_; name=name, validate_indices=validate_indices) + end end - end end @@ -28807,19 +28807,19 @@ end Operator that connects N unreplicated inputs to an N-way replicated TPU computation. """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tpu_replicated_input_graph(inputs_; name=nothing, N=nothing) - local desc - tf.with_op_name(name, "TPUReplicatedInput") do - desc = tf.NodeDescription("TPUReplicatedInput") - inputs_ = [convert(Tensor{Any}, x) for x = inputs_] - (inputs_,) = tf.tf_promote(inputs_) - tf.add_input(desc, inputs_) - if N !== nothing - desc["N"] = Base.Int(N) - end + function tpu_replicated_input_graph(inputs_; name=nothing, N=nothing) + local desc + tf.with_op_name(name, "TPUReplicatedInput") do + desc = tf.NodeDescription("TPUReplicatedInput") + inputs_ = [convert(Tensor{Any}, x) for x = inputs_] + (inputs_,) = tf.tf_promote(inputs_) + tf.add_input(desc, inputs_) + if N !== nothing + desc["N"] = Base.Int(N) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function tpu_replicated_input_eager(inputs_; name=nothing, N=nothing) desc = tf.EagerOp("TPUReplicatedInput") inputs_ = convert(tf.EagerTensor, inputs_) @@ -28835,13 +28835,13 @@ begin return res[1] end end - function tpu_replicated_input(inputs_; name=nothing, N=nothing) - if tf.in_eager_mode() - tpu_replicated_input_eager(inputs_; name=name, N=N) - else - tpu_replicated_input_graph(inputs_; name=name, N=N) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tpu_replicated_input(inputs_; name=nothing, N=nothing) + if tf.in_eager_mode() + tpu_replicated_input_eager(inputs_; name=name, N=N) + else + tpu_replicated_input_graph(inputs_; name=name, N=N) + end end - end end @@ -28851,15 +28851,15 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function stack_close_graph(handle_; name=nothing) - local desc - tf.with_op_name(name, "StackClose") do - desc = tf.NodeDescription("StackClose") - handle_ = convert(Tensor{String}, handle_) - tf.add_input(desc, handle_) - end - tf.Tensor(tf.Operation(desc)) + function stack_close_graph(handle_; name=nothing) + local desc + tf.with_op_name(name, "StackClose") do + desc = tf.NodeDescription("StackClose") + handle_ = convert(Tensor{String}, handle_) + tf.add_input(desc, handle_) end + tf.Tensor(tf.Operation(desc)) + end function stack_close_eager(handle_; name=nothing) desc = tf.EagerOp("StackClose") handle_ = convert(tf.EagerTensor, handle_) @@ -28871,13 +28871,13 @@ begin return res[1] end end - function stack_close(handle_; name=nothing) - if tf.in_eager_mode() - stack_close_eager(handle_; name=name) - else - stack_close_graph(handle_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function stack_close(handle_; name=nothing) + if tf.in_eager_mode() + stack_close_eager(handle_; name=name) + else + stack_close_graph(handle_; name=name) + end end - end end @@ -28887,23 +28887,23 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function deserialize_many_sparse_graph(serialized_sparse_; name=nothing, dtype=nothing) - local desc - tf.with_op_name(name, "DeserializeManySparse") do - desc = tf.NodeDescription("DeserializeManySparse") - serialized_sparse_ = convert(Tensor{String}, serialized_sparse_) - tf.add_input(desc, serialized_sparse_) - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:3 - push!(out, tf.Tensor(op, out_idx)) + function deserialize_many_sparse_graph(serialized_sparse_; name=nothing, dtype=nothing) + local desc + tf.with_op_name(name, "DeserializeManySparse") do + desc = tf.NodeDescription("DeserializeManySparse") + serialized_sparse_ = convert(Tensor{String}, serialized_sparse_) + tf.add_input(desc, serialized_sparse_) + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) end - out end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end function deserialize_many_sparse_eager(serialized_sparse_; name=nothing, dtype=nothing) desc = tf.EagerOp("DeserializeManySparse") serialized_sparse_ = convert(tf.EagerTensor, serialized_sparse_) @@ -28918,13 +28918,13 @@ begin return res end end - function deserialize_many_sparse(serialized_sparse_; name=nothing, dtype=nothing) - if tf.in_eager_mode() - deserialize_many_sparse_eager(serialized_sparse_; name=name, dtype=dtype) - else - deserialize_many_sparse_graph(serialized_sparse_; name=name, dtype=dtype) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function deserialize_many_sparse(serialized_sparse_; name=nothing, dtype=nothing) + if tf.in_eager_mode() + deserialize_many_sparse_eager(serialized_sparse_; name=name, dtype=dtype) + else + deserialize_many_sparse_graph(serialized_sparse_; name=name, dtype=dtype) + end end - end end @@ -28934,25 +28934,25 @@ end Replacement node for NcclReduce. """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function _nccl_reduce_recv_graph(input_; name=nothing, reduction=nothing, num_devices=nothing, shared_name=nothing) - local desc - tf.with_op_name(name, "_NcclReduceRecv") do - desc = tf.NodeDescription("_NcclReduceRecv") - input_ = convert(Tensor{Any}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - if reduction !== nothing - desc["reduction"] = Base.String(reduction) - end - if num_devices !== nothing - desc["num_devices"] = Base.Int(num_devices) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - end - tf.Tensor(tf.Operation(desc)) + function _nccl_reduce_recv_graph(input_; name=nothing, reduction=nothing, num_devices=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "_NcclReduceRecv") do + desc = tf.NodeDescription("_NcclReduceRecv") + input_ = convert(Tensor{Any}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + if reduction !== nothing + desc["reduction"] = Base.String(reduction) + end + if num_devices !== nothing + desc["num_devices"] = Base.Int(num_devices) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end end + tf.Tensor(tf.Operation(desc)) + end function _nccl_reduce_recv_eager(input_; name=nothing, reduction=nothing, num_devices=nothing, shared_name=nothing) desc = tf.EagerOp("_NcclReduceRecv") input_ = convert(tf.EagerTensor, input_) @@ -28974,13 +28974,13 @@ begin return res[1] end end - function _nccl_reduce_recv(input_; name=nothing, reduction=nothing, num_devices=nothing, shared_name=nothing) - if tf.in_eager_mode() - _nccl_reduce_recv_eager(input_; name=name, reduction=reduction, num_devices=num_devices, shared_name=shared_name) - else - _nccl_reduce_recv_graph(input_; name=name, reduction=reduction, num_devices=num_devices, shared_name=shared_name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _nccl_reduce_recv(input_; name=nothing, reduction=nothing, num_devices=nothing, shared_name=nothing) + if tf.in_eager_mode() + _nccl_reduce_recv_eager(input_; name=name, reduction=reduction, num_devices=num_devices, shared_name=shared_name) + else + _nccl_reduce_recv_graph(input_; name=name, reduction=reduction, num_devices=num_devices, shared_name=shared_name) + end end - end end @@ -28990,22 +28990,22 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function mirror_pad_grad_graph(input_, paddings_; name=nothing, mode=nothing) - local desc - tf.with_op_name(name, "MirrorPadGrad") do - desc = tf.NodeDescription("MirrorPadGrad") - input_ = convert(Tensor{Any}, input_) - paddings_ = convert(Tensor{Int32}, paddings_) - (input_,) = tf.tf_promote(input_) - (paddings_,) = tf.tf_promote(paddings_) - tf.add_input(desc, input_) - tf.add_input(desc, paddings_) - if mode !== nothing - desc["mode"] = Base.String(mode) - end + function mirror_pad_grad_graph(input_, paddings_; name=nothing, mode=nothing) + local desc + tf.with_op_name(name, "MirrorPadGrad") do + desc = tf.NodeDescription("MirrorPadGrad") + input_ = convert(Tensor{Any}, input_) + paddings_ = convert(Tensor{Int32}, paddings_) + (input_,) = tf.tf_promote(input_) + (paddings_,) = tf.tf_promote(paddings_) + tf.add_input(desc, input_) + tf.add_input(desc, paddings_) + if mode !== nothing + desc["mode"] = Base.String(mode) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function mirror_pad_grad_eager(input_, paddings_; name=nothing, mode=nothing) desc = tf.EagerOp("MirrorPadGrad") input_ = convert(tf.EagerTensor, input_) @@ -29024,13 +29024,13 @@ begin return res[1] end end - function mirror_pad_grad(input_, paddings_; name=nothing, mode=nothing) - if tf.in_eager_mode() - mirror_pad_grad_eager(input_, paddings_; name=name, mode=mode) - else - mirror_pad_grad_graph(input_, paddings_; name=name, mode=mode) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function mirror_pad_grad(input_, paddings_; name=nothing, mode=nothing) + if tf.in_eager_mode() + mirror_pad_grad_eager(input_, paddings_; name=name, mode=mode) + else + mirror_pad_grad_graph(input_, paddings_; name=name, mode=mode) + end end - end end @@ -29040,18 +29040,18 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function broadcast_args_graph(s0_, s1_; name=nothing) - local desc - tf.with_op_name(name, "BroadcastArgs") do - desc = tf.NodeDescription("BroadcastArgs") - s0_ = convert(Tensor{Int32}, s0_) - s1_ = convert(Tensor{Int32}, s1_) - (s0_, s1_) = tf.tf_promote(s0_, s1_) - tf.add_input(desc, s0_) - tf.add_input(desc, s1_) - end - tf.Tensor(tf.Operation(desc)) + function broadcast_args_graph(s0_, s1_; name=nothing) + local desc + tf.with_op_name(name, "BroadcastArgs") do + desc = tf.NodeDescription("BroadcastArgs") + s0_ = convert(Tensor{Int32}, s0_) + s1_ = convert(Tensor{Int32}, s1_) + (s0_, s1_) = tf.tf_promote(s0_, s1_) + tf.add_input(desc, s0_) + tf.add_input(desc, s1_) end + tf.Tensor(tf.Operation(desc)) + end function broadcast_args_eager(s0_, s1_; name=nothing) desc = tf.EagerOp("BroadcastArgs") s0_ = convert(tf.EagerTensor, s0_) @@ -29067,13 +29067,13 @@ begin return res[1] end end - function broadcast_args(s0_, s1_; name=nothing) - if tf.in_eager_mode() - broadcast_args_eager(s0_, s1_; name=name) - else - broadcast_args_graph(s0_, s1_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function broadcast_args(s0_, s1_; name=nothing) + if tf.in_eager_mode() + broadcast_args_eager(s0_, s1_; name=name) + else + broadcast_args_graph(s0_, s1_; name=name) + end end - end end @@ -29083,22 +29083,22 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function stateless_truncated_normal_graph(shape_, seed_; name=nothing, dtype=nothing) - local desc - tf.with_op_name(name, "StatelessTruncatedNormal") do - desc = tf.NodeDescription("StatelessTruncatedNormal") - shape_ = convert(Tensor{Int32}, shape_) - seed_ = convert(Tensor{Int64}, seed_) - (shape_,) = tf.tf_promote(shape_) - (seed_,) = tf.tf_promote(seed_) - tf.add_input(desc, shape_) - tf.add_input(desc, seed_) - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end + function stateless_truncated_normal_graph(shape_, seed_; name=nothing, dtype=nothing) + local desc + tf.with_op_name(name, "StatelessTruncatedNormal") do + desc = tf.NodeDescription("StatelessTruncatedNormal") + shape_ = convert(Tensor{Int32}, shape_) + seed_ = convert(Tensor{Int64}, seed_) + (shape_,) = tf.tf_promote(shape_) + (seed_,) = tf.tf_promote(seed_) + tf.add_input(desc, shape_) + tf.add_input(desc, seed_) + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function stateless_truncated_normal_eager(shape_, seed_; name=nothing, dtype=nothing) desc = tf.EagerOp("StatelessTruncatedNormal") shape_ = convert(tf.EagerTensor, shape_) @@ -29117,13 +29117,13 @@ begin return res[1] end end - function stateless_truncated_normal(shape_, seed_; name=nothing, dtype=nothing) - if tf.in_eager_mode() - stateless_truncated_normal_eager(shape_, seed_; name=name, dtype=dtype) - else - stateless_truncated_normal_graph(shape_, seed_; name=name, dtype=dtype) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function stateless_truncated_normal(shape_, seed_; name=nothing, dtype=nothing) + if tf.in_eager_mode() + stateless_truncated_normal_eager(shape_, seed_; name=name, dtype=dtype) + else + stateless_truncated_normal_graph(shape_, seed_; name=name, dtype=dtype) + end end - end end @@ -29133,17 +29133,17 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function regex_full_match_graph(input_, pattern_; name=nothing) - local desc - tf.with_op_name(name, "RegexFullMatch") do - desc = tf.NodeDescription("RegexFullMatch") - input_ = convert(Tensor{String}, input_) - pattern_ = convert(Tensor{String}, pattern_) - tf.add_input(desc, input_) - tf.add_input(desc, pattern_) - end - tf.Tensor(tf.Operation(desc)) + function regex_full_match_graph(input_, pattern_; name=nothing) + local desc + tf.with_op_name(name, "RegexFullMatch") do + desc = tf.NodeDescription("RegexFullMatch") + input_ = convert(Tensor{String}, input_) + pattern_ = convert(Tensor{String}, pattern_) + tf.add_input(desc, input_) + tf.add_input(desc, pattern_) end + tf.Tensor(tf.Operation(desc)) + end function regex_full_match_eager(input_, pattern_; name=nothing) desc = tf.EagerOp("RegexFullMatch") input_ = convert(tf.EagerTensor, input_) @@ -29157,13 +29157,13 @@ begin return res[1] end end - function regex_full_match(input_, pattern_; name=nothing) - if tf.in_eager_mode() - regex_full_match_eager(input_, pattern_; name=name) - else - regex_full_match_graph(input_, pattern_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function regex_full_match(input_, pattern_; name=nothing) + if tf.in_eager_mode() + regex_full_match_eager(input_, pattern_; name=name) + else + regex_full_match_graph(input_, pattern_; name=name) + end end - end end @@ -29173,15 +29173,15 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function unwrap_dataset_variant_graph(input_handle_; name=nothing) - local desc - tf.with_op_name(name, "UnwrapDatasetVariant") do - desc = tf.NodeDescription("UnwrapDatasetVariant") - input_handle_ = convert(Tensor{Any}, input_handle_) - tf.add_input(desc, input_handle_) - end - tf.Tensor(tf.Operation(desc)) + function unwrap_dataset_variant_graph(input_handle_; name=nothing) + local desc + tf.with_op_name(name, "UnwrapDatasetVariant") do + desc = tf.NodeDescription("UnwrapDatasetVariant") + input_handle_ = convert(Tensor{Any}, input_handle_) + tf.add_input(desc, input_handle_) end + tf.Tensor(tf.Operation(desc)) + end function unwrap_dataset_variant_eager(input_handle_; name=nothing) desc = tf.EagerOp("UnwrapDatasetVariant") input_handle_ = convert(tf.EagerTensor, input_handle_) @@ -29193,13 +29193,13 @@ begin return res[1] end end - function unwrap_dataset_variant(input_handle_; name=nothing) - if tf.in_eager_mode() - unwrap_dataset_variant_eager(input_handle_; name=name) - else - unwrap_dataset_variant_graph(input_handle_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function unwrap_dataset_variant(input_handle_; name=nothing) + if tf.in_eager_mode() + unwrap_dataset_variant_eager(input_handle_; name=name) + else + unwrap_dataset_variant_graph(input_handle_; name=name) + end end - end end @@ -29209,21 +29209,21 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function empty_graph(shape_; name=nothing, dtype=nothing, init=nothing) - local desc - tf.with_op_name(name, "Empty") do - desc = tf.NodeDescription("Empty") - shape_ = convert(Tensor{Int32}, shape_) - tf.add_input(desc, shape_) - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end - if init !== nothing - desc["init"] = Base.Bool(init) - end + function empty_graph(shape_; name=nothing, dtype=nothing, init=nothing) + local desc + tf.with_op_name(name, "Empty") do + desc = tf.NodeDescription("Empty") + shape_ = convert(Tensor{Int32}, shape_) + tf.add_input(desc, shape_) + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + if init !== nothing + desc["init"] = Base.Bool(init) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function empty_eager(shape_; name=nothing, dtype=nothing, init=nothing) desc = tf.EagerOp("Empty") shape_ = convert(tf.EagerTensor, shape_) @@ -29241,13 +29241,13 @@ begin return res[1] end end - function empty(shape_; name=nothing, dtype=nothing, init=nothing) - if tf.in_eager_mode() - empty_eager(shape_; name=name, dtype=dtype, init=init) - else - empty_graph(shape_; name=name, dtype=dtype, init=init) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function empty(shape_; name=nothing, dtype=nothing, init=nothing) + if tf.in_eager_mode() + empty_eager(shape_; name=name, dtype=dtype, init=init) + else + empty_graph(shape_; name=name, dtype=dtype, init=init) + end end - end end @@ -29257,22 +29257,22 @@ end Retrieve multiple values that will be emitted by the computation as an XLA """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function outfeed_dequeue_tuple_graph(; name=nothing, dtypes=nothing, shapes=nothing, device_ordinal=nothing) - local desc - tf.with_op_name(name, "OutfeedDequeueTuple") do - desc = tf.NodeDescription("OutfeedDequeueTuple") - if dtypes !== nothing - desc["dtypes"] = map(Base.identity, dtypes) - end - if shapes !== nothing - desc["shapes"] = map(Base.identity, shapes) - end - if device_ordinal !== nothing - desc["device_ordinal"] = Base.Int(device_ordinal) - end + function outfeed_dequeue_tuple_graph(; name=nothing, dtypes=nothing, shapes=nothing, device_ordinal=nothing) + local desc + tf.with_op_name(name, "OutfeedDequeueTuple") do + desc = tf.NodeDescription("OutfeedDequeueTuple") + if dtypes !== nothing + desc["dtypes"] = map(Base.identity, dtypes) + end + if shapes !== nothing + desc["shapes"] = map(Base.identity, shapes) + end + if device_ordinal !== nothing + desc["device_ordinal"] = Base.Int(device_ordinal) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function outfeed_dequeue_tuple_eager(; name=nothing, dtypes=nothing, shapes=nothing, device_ordinal=nothing) desc = tf.EagerOp("OutfeedDequeueTuple") if dtypes !== nothing @@ -29291,13 +29291,13 @@ begin return res[1] end end - function outfeed_dequeue_tuple(; name=nothing, dtypes=nothing, shapes=nothing, device_ordinal=nothing) - if tf.in_eager_mode() - outfeed_dequeue_tuple_eager(; name=name, dtypes=dtypes, shapes=shapes, device_ordinal=device_ordinal) - else - outfeed_dequeue_tuple_graph(; name=name, dtypes=dtypes, shapes=shapes, device_ordinal=device_ordinal) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function outfeed_dequeue_tuple(; name=nothing, dtypes=nothing, shapes=nothing, device_ordinal=nothing) + if tf.in_eager_mode() + outfeed_dequeue_tuple_eager(; name=name, dtypes=dtypes, shapes=shapes, device_ordinal=device_ordinal) + else + outfeed_dequeue_tuple_graph(; name=name, dtypes=dtypes, shapes=shapes, device_ordinal=device_ordinal) + end end - end end @@ -29307,18 +29307,18 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function div_graph(x_, y_; name=nothing) - local desc - tf.with_op_name(name, "Div") do - desc = tf.NodeDescription("Div") - x_ = convert(Tensor{Any}, x_) - y_ = convert(Tensor{Any}, y_) - (x_, y_) = tf.tf_promote(x_, y_) - tf.add_input(desc, x_) - tf.add_input(desc, y_) - end - tf.Tensor(tf.Operation(desc)) + function div_graph(x_, y_; name=nothing) + local desc + tf.with_op_name(name, "Div") do + desc = tf.NodeDescription("Div") + x_ = convert(Tensor{Any}, x_) + y_ = convert(Tensor{Any}, y_) + (x_, y_) = tf.tf_promote(x_, y_) + tf.add_input(desc, x_) + tf.add_input(desc, y_) end + tf.Tensor(tf.Operation(desc)) + end function div_eager(x_, y_; name=nothing) desc = tf.EagerOp("Div") x_ = convert(tf.EagerTensor, x_) @@ -29334,13 +29334,13 @@ begin return res[1] end end - function div(x_, y_; name=nothing) - if tf.in_eager_mode() - div_eager(x_, y_; name=name) - else - div_graph(x_, y_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function div(x_, y_; name=nothing) + if tf.in_eager_mode() + div_eager(x_, y_; name=name) + else + div_graph(x_, y_; name=name) + end end - end end @@ -29350,28 +29350,28 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function barrier_graph(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) - local desc - tf.with_op_name(name, "Barrier") do - desc = tf.NodeDescription("Barrier") - if component_types !== nothing - desc["component_types"] = map(Base.identity, component_types) - end - if shapes !== nothing - desc["shapes"] = map(Base.identity, shapes) - end - if capacity !== nothing - desc["capacity"] = Base.Int(capacity) - end - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end + function barrier_graph(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "Barrier") do + desc = tf.NodeDescription("Barrier") + if component_types !== nothing + desc["component_types"] = map(Base.identity, component_types) + end + if shapes !== nothing + desc["shapes"] = map(Base.identity, shapes) + end + if capacity !== nothing + desc["capacity"] = Base.Int(capacity) + end + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function barrier_eager(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) desc = tf.EagerOp("Barrier") if component_types !== nothing @@ -29396,13 +29396,13 @@ begin return res[1] end end - function barrier(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) - if tf.in_eager_mode() - barrier_eager(; name=name, component_types=component_types, shapes=shapes, capacity=capacity, container=container, shared_name=shared_name) - else - barrier_graph(; name=name, component_types=component_types, shapes=shapes, capacity=capacity, container=container, shared_name=shared_name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function barrier(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) + if tf.in_eager_mode() + barrier_eager(; name=name, component_types=component_types, shapes=shapes, capacity=capacity, container=container, shared_name=shared_name) + else + barrier_graph(; name=name, component_types=component_types, shapes=shapes, capacity=capacity, container=container, shared_name=shared_name) + end end - end end @@ -29412,18 +29412,18 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function truncate_div_graph(x_, y_; name=nothing) - local desc - tf.with_op_name(name, "TruncateDiv") do - desc = tf.NodeDescription("TruncateDiv") - x_ = convert(Tensor{Any}, x_) - y_ = convert(Tensor{Any}, y_) - (x_, y_) = tf.tf_promote(x_, y_) - tf.add_input(desc, x_) - tf.add_input(desc, y_) - end - tf.Tensor(tf.Operation(desc)) + function truncate_div_graph(x_, y_; name=nothing) + local desc + tf.with_op_name(name, "TruncateDiv") do + desc = tf.NodeDescription("TruncateDiv") + x_ = convert(Tensor{Any}, x_) + y_ = convert(Tensor{Any}, y_) + (x_, y_) = tf.tf_promote(x_, y_) + tf.add_input(desc, x_) + tf.add_input(desc, y_) end + tf.Tensor(tf.Operation(desc)) + end function truncate_div_eager(x_, y_; name=nothing) desc = tf.EagerOp("TruncateDiv") x_ = convert(tf.EagerTensor, x_) @@ -29439,42 +29439,42 @@ begin return res[1] end end - function truncate_div(x_, y_; name=nothing) - if tf.in_eager_mode() - truncate_div_eager(x_, y_; name=name) - else - truncate_div_graph(x_, y_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function truncate_div(x_, y_; name=nothing) + if tf.in_eager_mode() + truncate_div_eager(x_, y_; name=name) + else + truncate_div_graph(x_, y_; name=name) + end end - end end """ - unicode_encode(input_values, input_splits; errors=replace, replacement_char=65533) + unicode_encode(input_values, input_splits; errors=, replacement_char=65533) """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function unicode_encode_graph(input_values_, input_splits_; name=nothing, errors=nothing, output_encoding=nothing, replacement_char=nothing) - local desc - tf.with_op_name(name, "UnicodeEncode") do - desc = tf.NodeDescription("UnicodeEncode") - input_values_ = convert(Tensor{Int32}, input_values_) - input_splits_ = convert(Tensor{Int64}, input_splits_) - tf.add_input(desc, input_values_) - tf.add_input(desc, input_splits_) - if errors !== nothing - desc["errors"] = Base.String(errors) - end - if output_encoding !== nothing - desc["output_encoding"] = Base.String(output_encoding) - end - if replacement_char !== nothing - desc["replacement_char"] = Base.Int(replacement_char) - end + function unicode_encode_graph(input_values_, input_splits_; name=nothing, errors=nothing, output_encoding=nothing, replacement_char=nothing) + local desc + tf.with_op_name(name, "UnicodeEncode") do + desc = tf.NodeDescription("UnicodeEncode") + input_values_ = convert(Tensor{Int32}, input_values_) + input_splits_ = convert(Tensor{Int64}, input_splits_) + tf.add_input(desc, input_values_) + tf.add_input(desc, input_splits_) + if errors !== nothing + desc["errors"] = Base.String(errors) + end + if output_encoding !== nothing + desc["output_encoding"] = Base.String(output_encoding) + end + if replacement_char !== nothing + desc["replacement_char"] = Base.Int(replacement_char) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function unicode_encode_eager(input_values_, input_splits_; name=nothing, errors=nothing, output_encoding=nothing, replacement_char=nothing) desc = tf.EagerOp("UnicodeEncode") input_values_ = convert(tf.EagerTensor, input_values_) @@ -29497,13 +29497,13 @@ begin return res[1] end end - function unicode_encode(input_values_, input_splits_; name=nothing, errors=nothing, output_encoding=nothing, replacement_char=nothing) - if tf.in_eager_mode() - unicode_encode_eager(input_values_, input_splits_; name=name, errors=errors, output_encoding=output_encoding, replacement_char=replacement_char) - else - unicode_encode_graph(input_values_, input_splits_; name=name, errors=errors, output_encoding=output_encoding, replacement_char=replacement_char) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function unicode_encode(input_values_, input_splits_; name=nothing, errors=nothing, output_encoding=nothing, replacement_char=nothing) + if tf.in_eager_mode() + unicode_encode_eager(input_values_, input_splits_; name=name, errors=errors, output_encoding=output_encoding, replacement_char=replacement_char) + else + unicode_encode_graph(input_values_, input_splits_; name=name, errors=errors, output_encoding=output_encoding, replacement_char=replacement_char) + end end - end end @@ -29513,18 +29513,18 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function merge_summary_graph(inputs_; name=nothing, N=nothing) - local desc - tf.with_op_name(name, "MergeSummary") do - desc = tf.NodeDescription("MergeSummary") - inputs_ = [convert(Tensor{String}, x) for x = inputs_] - tf.add_input(desc, inputs_) - if N !== nothing - desc["N"] = Base.Int(N) - end + function merge_summary_graph(inputs_; name=nothing, N=nothing) + local desc + tf.with_op_name(name, "MergeSummary") do + desc = tf.NodeDescription("MergeSummary") + inputs_ = [convert(Tensor{String}, x) for x = inputs_] + tf.add_input(desc, inputs_) + if N !== nothing + desc["N"] = Base.Int(N) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function merge_summary_eager(inputs_; name=nothing, N=nothing) desc = tf.EagerOp("MergeSummary") inputs_ = convert(tf.EagerTensor, inputs_) @@ -29539,13 +29539,13 @@ begin return res[1] end end - function merge_summary(inputs_; name=nothing, N=nothing) - if tf.in_eager_mode() - merge_summary_eager(inputs_; name=name, N=N) - else - merge_summary_graph(inputs_; name=name, N=N) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function merge_summary(inputs_; name=nothing, N=nothing) + if tf.in_eager_mode() + merge_summary_eager(inputs_; name=name, N=N) + else + merge_summary_graph(inputs_; name=name, N=N) + end end - end end @@ -29555,15 +29555,15 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function fake_queue_graph(resource_; name=nothing) - local desc - tf.with_op_name(name, "FakeQueue") do - desc = tf.NodeDescription("FakeQueue") - resource_ = convert(Tensor{Any}, resource_) - tf.add_input(desc, resource_) - end - tf.Tensor(tf.Operation(desc)) + function fake_queue_graph(resource_; name=nothing) + local desc + tf.with_op_name(name, "FakeQueue") do + desc = tf.NodeDescription("FakeQueue") + resource_ = convert(Tensor{Any}, resource_) + tf.add_input(desc, resource_) end + tf.Tensor(tf.Operation(desc)) + end function fake_queue_eager(resource_; name=nothing) desc = tf.EagerOp("FakeQueue") resource_ = convert(tf.EagerTensor, resource_) @@ -29575,13 +29575,13 @@ begin return res[1] end end - function fake_queue(resource_; name=nothing) - if tf.in_eager_mode() - fake_queue_eager(resource_; name=name) - else - fake_queue_graph(resource_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function fake_queue(resource_; name=nothing) + if tf.in_eager_mode() + fake_queue_eager(resource_; name=name) + else + fake_queue_graph(resource_; name=name) + end end - end end @@ -29591,16 +29591,16 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function batch_cholesky_graph(input_; name=nothing) - local desc - tf.with_op_name(name, "BatchCholesky") do - desc = tf.NodeDescription("BatchCholesky") - input_ = convert(Tensor{Any}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - end - tf.Tensor(tf.Operation(desc)) + function batch_cholesky_graph(input_; name=nothing) + local desc + tf.with_op_name(name, "BatchCholesky") do + desc = tf.NodeDescription("BatchCholesky") + input_ = convert(Tensor{Any}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) end + tf.Tensor(tf.Operation(desc)) + end function batch_cholesky_eager(input_; name=nothing) desc = tf.EagerOp("BatchCholesky") input_ = convert(tf.EagerTensor, input_) @@ -29613,13 +29613,13 @@ begin return res[1] end end - function batch_cholesky(input_; name=nothing) - if tf.in_eager_mode() - batch_cholesky_eager(input_; name=name) - else - batch_cholesky_graph(input_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function batch_cholesky(input_; name=nothing) + if tf.in_eager_mode() + batch_cholesky_eager(input_; name=name) + else + batch_cholesky_graph(input_; name=name) + end end - end end @@ -29629,25 +29629,25 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function iterator_graph(; name=nothing, shared_name=nothing, container=nothing, output_types=nothing, output_shapes=nothing) - local desc - tf.with_op_name(name, "Iterator") do - desc = tf.NodeDescription("Iterator") - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - if container !== nothing - desc["container"] = Base.String(container) - end - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end + function iterator_graph(; name=nothing, shared_name=nothing, container=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "Iterator") do + desc = tf.NodeDescription("Iterator") + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + if container !== nothing + desc["container"] = Base.String(container) + end + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function iterator_eager(; name=nothing, shared_name=nothing, container=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("Iterator") if shared_name !== nothing @@ -29669,13 +29669,13 @@ begin return res[1] end end - function iterator(; name=nothing, shared_name=nothing, container=nothing, output_types=nothing, output_shapes=nothing) - if tf.in_eager_mode() - iterator_eager(; name=name, shared_name=shared_name, container=container, output_types=output_types, output_shapes=output_shapes) - else - iterator_graph(; name=name, shared_name=shared_name, container=container, output_types=output_types, output_shapes=output_shapes) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function iterator(; name=nothing, shared_name=nothing, container=nothing, output_types=nothing, output_shapes=nothing) + if tf.in_eager_mode() + iterator_eager(; name=name, shared_name=shared_name, container=container, output_types=output_types, output_shapes=output_shapes) + else + iterator_graph(; name=name, shared_name=shared_name, container=container, output_types=output_types, output_shapes=output_shapes) + end end - end end @@ -29685,16 +29685,16 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function bessel_i1e_graph(x_; name=nothing) - local desc - tf.with_op_name(name, "BesselI1e") do - desc = tf.NodeDescription("BesselI1e") - x_ = convert(Tensor{Any}, x_) - (x_,) = tf.tf_promote(x_) - tf.add_input(desc, x_) - end - tf.Tensor(tf.Operation(desc)) + function bessel_i1e_graph(x_; name=nothing) + local desc + tf.with_op_name(name, "BesselI1e") do + desc = tf.NodeDescription("BesselI1e") + x_ = convert(Tensor{Any}, x_) + (x_,) = tf.tf_promote(x_) + tf.add_input(desc, x_) end + tf.Tensor(tf.Operation(desc)) + end function bessel_i1e_eager(x_; name=nothing) desc = tf.EagerOp("BesselI1e") x_ = convert(tf.EagerTensor, x_) @@ -29707,13 +29707,13 @@ begin return res[1] end end - function bessel_i1e(x_; name=nothing) - if tf.in_eager_mode() - bessel_i1e_eager(x_; name=name) - else - bessel_i1e_graph(x_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function bessel_i1e(x_; name=nothing) + if tf.in_eager_mode() + bessel_i1e_eager(x_; name=name) + else + bessel_i1e_graph(x_; name=name) + end end - end end @@ -29723,17 +29723,17 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function import_event_graph(writer_, event_; name=nothing) - local desc - tf.with_op_name(name, "ImportEvent") do - desc = tf.NodeDescription("ImportEvent") - writer_ = convert(Tensor{Any}, writer_) - event_ = convert(Tensor{String}, event_) - tf.add_input(desc, writer_) - tf.add_input(desc, event_) - end - tf.Tensor(tf.Operation(desc)) + function import_event_graph(writer_, event_; name=nothing) + local desc + tf.with_op_name(name, "ImportEvent") do + desc = tf.NodeDescription("ImportEvent") + writer_ = convert(Tensor{Any}, writer_) + event_ = convert(Tensor{String}, event_) + tf.add_input(desc, writer_) + tf.add_input(desc, event_) end + tf.Tensor(tf.Operation(desc)) + end function import_event_eager(writer_, event_; name=nothing) desc = tf.EagerOp("ImportEvent") writer_ = convert(tf.EagerTensor, writer_) @@ -29747,13 +29747,13 @@ begin return res[1] end end - function import_event(writer_, event_; name=nothing) - if tf.in_eager_mode() - import_event_eager(writer_, event_; name=name) - else - import_event_graph(writer_, event_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function import_event(writer_, event_; name=nothing) + if tf.in_eager_mode() + import_event_eager(writer_, event_; name=name) + else + import_event_graph(writer_, event_; name=name) + end end - end end @@ -29763,40 +29763,40 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function quantized_instance_norm_graph(x_, x_min_, x_max_; name=nothing, output_range_given=nothing, given_y_min=nothing, given_y_max=nothing, variance_epsilon=nothing, min_separation=nothing) - local desc - tf.with_op_name(name, "QuantizedInstanceNorm") do - desc = tf.NodeDescription("QuantizedInstanceNorm") - x_ = convert(Tensor{Any}, x_) - x_min_ = convert(Tensor{Float32}, x_min_) - x_max_ = convert(Tensor{Float32}, x_max_) - (x_,) = tf.tf_promote(x_) - tf.add_input(desc, x_) - tf.add_input(desc, x_min_) - tf.add_input(desc, x_max_) - if output_range_given !== nothing - desc["output_range_given"] = Base.Bool(output_range_given) - end - if given_y_min !== nothing - desc["given_y_min"] = Base.identity(given_y_min) - end - if given_y_max !== nothing - desc["given_y_max"] = Base.identity(given_y_max) - end - if variance_epsilon !== nothing - desc["variance_epsilon"] = Base.identity(variance_epsilon) - end - if min_separation !== nothing - desc["min_separation"] = Base.identity(min_separation) - end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:3 - push!(out, tf.Tensor(op, out_idx)) - end - out + function quantized_instance_norm_graph(x_, x_min_, x_max_; name=nothing, output_range_given=nothing, given_y_min=nothing, given_y_max=nothing, variance_epsilon=nothing, min_separation=nothing) + local desc + tf.with_op_name(name, "QuantizedInstanceNorm") do + desc = tf.NodeDescription("QuantizedInstanceNorm") + x_ = convert(Tensor{Any}, x_) + x_min_ = convert(Tensor{Float32}, x_min_) + x_max_ = convert(Tensor{Float32}, x_max_) + (x_,) = tf.tf_promote(x_) + tf.add_input(desc, x_) + tf.add_input(desc, x_min_) + tf.add_input(desc, x_max_) + if output_range_given !== nothing + desc["output_range_given"] = Base.Bool(output_range_given) + end + if given_y_min !== nothing + desc["given_y_min"] = Base.identity(given_y_min) + end + if given_y_max !== nothing + desc["given_y_max"] = Base.identity(given_y_max) + end + if variance_epsilon !== nothing + desc["variance_epsilon"] = Base.identity(variance_epsilon) + end + if min_separation !== nothing + desc["min_separation"] = Base.identity(min_separation) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) end + out + end function quantized_instance_norm_eager(x_, x_min_, x_max_; name=nothing, output_range_given=nothing, given_y_min=nothing, given_y_max=nothing, variance_epsilon=nothing, min_separation=nothing) desc = tf.EagerOp("QuantizedInstanceNorm") x_ = convert(tf.EagerTensor, x_) @@ -29828,13 +29828,13 @@ begin return res end end - function quantized_instance_norm(x_, x_min_, x_max_; name=nothing, output_range_given=nothing, given_y_min=nothing, given_y_max=nothing, variance_epsilon=nothing, min_separation=nothing) - if tf.in_eager_mode() - quantized_instance_norm_eager(x_, x_min_, x_max_; name=name, output_range_given=output_range_given, given_y_min=given_y_min, given_y_max=given_y_max, variance_epsilon=variance_epsilon, min_separation=min_separation) - else - quantized_instance_norm_graph(x_, x_min_, x_max_; name=name, output_range_given=output_range_given, given_y_min=given_y_min, given_y_max=given_y_max, variance_epsilon=variance_epsilon, min_separation=min_separation) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function quantized_instance_norm(x_, x_min_, x_max_; name=nothing, output_range_given=nothing, given_y_min=nothing, given_y_max=nothing, variance_epsilon=nothing, min_separation=nothing) + if tf.in_eager_mode() + quantized_instance_norm_eager(x_, x_min_, x_max_; name=name, output_range_given=output_range_given, given_y_min=given_y_min, given_y_max=given_y_max, variance_epsilon=variance_epsilon, min_separation=min_separation) + else + quantized_instance_norm_graph(x_, x_min_, x_max_; name=name, output_range_given=output_range_given, given_y_min=given_y_min, given_y_max=given_y_max, variance_epsilon=variance_epsilon, min_separation=min_separation) + end end - end end @@ -29844,22 +29844,22 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tensor_array_write_v3_graph(handle_, index_, value_, flow_in_; name=nothing) - local desc - tf.with_op_name(name, "TensorArrayWriteV3") do - desc = tf.NodeDescription("TensorArrayWriteV3") - handle_ = convert(Tensor{Any}, handle_) - index_ = convert(Tensor{Int32}, index_) - value_ = convert(Tensor{Any}, value_) - flow_in_ = convert(Tensor{Float32}, flow_in_) - (value_,) = tf.tf_promote(value_) - tf.add_input(desc, handle_) - tf.add_input(desc, index_) - tf.add_input(desc, value_) - tf.add_input(desc, flow_in_) - end - tf.Tensor(tf.Operation(desc)) + function tensor_array_write_v3_graph(handle_, index_, value_, flow_in_; name=nothing) + local desc + tf.with_op_name(name, "TensorArrayWriteV3") do + desc = tf.NodeDescription("TensorArrayWriteV3") + handle_ = convert(Tensor{Any}, handle_) + index_ = convert(Tensor{Int32}, index_) + value_ = convert(Tensor{Any}, value_) + flow_in_ = convert(Tensor{Float32}, flow_in_) + (value_,) = tf.tf_promote(value_) + tf.add_input(desc, handle_) + tf.add_input(desc, index_) + tf.add_input(desc, value_) + tf.add_input(desc, flow_in_) end + tf.Tensor(tf.Operation(desc)) + end function tensor_array_write_v3_eager(handle_, index_, value_, flow_in_; name=nothing) desc = tf.EagerOp("TensorArrayWriteV3") handle_ = convert(tf.EagerTensor, handle_) @@ -29878,13 +29878,13 @@ begin return res[1] end end - function tensor_array_write_v3(handle_, index_, value_, flow_in_; name=nothing) - if tf.in_eager_mode() - tensor_array_write_v3_eager(handle_, index_, value_, flow_in_; name=name) - else - tensor_array_write_v3_graph(handle_, index_, value_, flow_in_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_array_write_v3(handle_, index_, value_, flow_in_; name=nothing) + if tf.in_eager_mode() + tensor_array_write_v3_eager(handle_, index_, value_, flow_in_; name=name) + else + tensor_array_write_v3_graph(handle_, index_, value_, flow_in_; name=name) + end end - end end @@ -29894,29 +29894,29 @@ end Load embedding parameters for a single table. """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function load_tpu_embedding_adagrad_parameters_graph(parameters_, accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - local desc - tf.with_op_name(name, "LoadTPUEmbeddingAdagradParameters") do - desc = tf.NodeDescription("LoadTPUEmbeddingAdagradParameters") - parameters_ = convert(Tensor{Float32}, parameters_) - accumulators_ = convert(Tensor{Float32}, accumulators_) - tf.add_input(desc, parameters_) - tf.add_input(desc, accumulators_) - if table_id !== nothing - desc["table_id"] = Base.Int(table_id) - end - if table_name !== nothing - desc["table_name"] = Base.String(table_name) - end - if num_shards !== nothing - desc["num_shards"] = Base.Int(num_shards) - end - if shard_id !== nothing - desc["shard_id"] = Base.Int(shard_id) - end - end - tf.Tensor(tf.Operation(desc)) + function load_tpu_embedding_adagrad_parameters_graph(parameters_, accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + local desc + tf.with_op_name(name, "LoadTPUEmbeddingAdagradParameters") do + desc = tf.NodeDescription("LoadTPUEmbeddingAdagradParameters") + parameters_ = convert(Tensor{Float32}, parameters_) + accumulators_ = convert(Tensor{Float32}, accumulators_) + tf.add_input(desc, parameters_) + tf.add_input(desc, accumulators_) + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end end + tf.Tensor(tf.Operation(desc)) + end function load_tpu_embedding_adagrad_parameters_eager(parameters_, accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) desc = tf.EagerOp("LoadTPUEmbeddingAdagradParameters") parameters_ = convert(tf.EagerTensor, parameters_) @@ -29942,13 +29942,13 @@ begin return res[1] end end - function load_tpu_embedding_adagrad_parameters(parameters_, accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - if tf.in_eager_mode() - load_tpu_embedding_adagrad_parameters_eager(parameters_, accumulators_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) - else - load_tpu_embedding_adagrad_parameters_graph(parameters_, accumulators_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function load_tpu_embedding_adagrad_parameters(parameters_, accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + if tf.in_eager_mode() + load_tpu_embedding_adagrad_parameters_eager(parameters_, accumulators_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + else + load_tpu_embedding_adagrad_parameters_graph(parameters_, accumulators_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + end end - end end @@ -29958,29 +29958,29 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function dense_to_dense_set_operation_graph(set1_, set2_; name=nothing, set_operation=nothing, validate_indices=nothing) - local desc - tf.with_op_name(name, "DenseToDenseSetOperation") do - desc = tf.NodeDescription("DenseToDenseSetOperation") - set1_ = convert(Tensor{Any}, set1_) - set2_ = convert(Tensor{Any}, set2_) - (set1_, set2_) = tf.tf_promote(set1_, set2_) - tf.add_input(desc, set1_) - tf.add_input(desc, set2_) - if set_operation !== nothing - desc["set_operation"] = Base.String(set_operation) - end - if validate_indices !== nothing - desc["validate_indices"] = Base.Bool(validate_indices) - end + function dense_to_dense_set_operation_graph(set1_, set2_; name=nothing, set_operation=nothing, validate_indices=nothing) + local desc + tf.with_op_name(name, "DenseToDenseSetOperation") do + desc = tf.NodeDescription("DenseToDenseSetOperation") + set1_ = convert(Tensor{Any}, set1_) + set2_ = convert(Tensor{Any}, set2_) + (set1_, set2_) = tf.tf_promote(set1_, set2_) + tf.add_input(desc, set1_) + tf.add_input(desc, set2_) + if set_operation !== nothing + desc["set_operation"] = Base.String(set_operation) end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:3 - push!(out, tf.Tensor(op, out_idx)) + if validate_indices !== nothing + desc["validate_indices"] = Base.Bool(validate_indices) end - out end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end function dense_to_dense_set_operation_eager(set1_, set2_; name=nothing, set_operation=nothing, validate_indices=nothing) desc = tf.EagerOp("DenseToDenseSetOperation") set1_ = convert(tf.EagerTensor, set1_) @@ -30002,58 +30002,58 @@ begin return res end end - function dense_to_dense_set_operation(set1_, set2_; name=nothing, set_operation=nothing, validate_indices=nothing) - if tf.in_eager_mode() - dense_to_dense_set_operation_eager(set1_, set2_; name=name, set_operation=set_operation, validate_indices=validate_indices) - else - dense_to_dense_set_operation_graph(set1_, set2_; name=name, set_operation=set_operation, validate_indices=validate_indices) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function dense_to_dense_set_operation(set1_, set2_; name=nothing, set_operation=nothing, validate_indices=nothing) + if tf.in_eager_mode() + dense_to_dense_set_operation_eager(set1_, set2_; name=name, set_operation=set_operation, validate_indices=validate_indices) + else + dense_to_dense_set_operation_graph(set1_, set2_; name=name, set_operation=set_operation, validate_indices=validate_indices) + end end - end end """ - encode_jpeg(image; format=, quality=95, progressive=false, optimize_size=false, chroma_downsampling=true, density_unit=in, x_density=300, y_density=300, xmp_metadata=) + encode_jpeg(image; format=, quality=95, progressive=false, optimize_size=false, chroma_downsampling=true, density_unit=, x_density=300, y_density=300, xmp_metadata=) """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function encode_jpeg_graph(image_; name=nothing, format=nothing, quality=nothing, progressive=nothing, optimize_size=nothing, chroma_downsampling=nothing, density_unit=nothing, x_density=nothing, y_density=nothing, xmp_metadata=nothing) - local desc - tf.with_op_name(name, "EncodeJpeg") do - desc = tf.NodeDescription("EncodeJpeg") - image_ = convert(Tensor{UInt8}, image_) - tf.add_input(desc, image_) - if format !== nothing - desc["format"] = Base.String(format) - end - if quality !== nothing - desc["quality"] = Base.Int(quality) - end - if progressive !== nothing - desc["progressive"] = Base.Bool(progressive) - end - if optimize_size !== nothing - desc["optimize_size"] = Base.Bool(optimize_size) - end - if chroma_downsampling !== nothing - desc["chroma_downsampling"] = Base.Bool(chroma_downsampling) - end - if density_unit !== nothing - desc["density_unit"] = Base.String(density_unit) - end - if x_density !== nothing - desc["x_density"] = Base.Int(x_density) - end - if y_density !== nothing - desc["y_density"] = Base.Int(y_density) - end - if xmp_metadata !== nothing - desc["xmp_metadata"] = Base.String(xmp_metadata) - end + function encode_jpeg_graph(image_; name=nothing, format=nothing, quality=nothing, progressive=nothing, optimize_size=nothing, chroma_downsampling=nothing, density_unit=nothing, x_density=nothing, y_density=nothing, xmp_metadata=nothing) + local desc + tf.with_op_name(name, "EncodeJpeg") do + desc = tf.NodeDescription("EncodeJpeg") + image_ = convert(Tensor{UInt8}, image_) + tf.add_input(desc, image_) + if format !== nothing + desc["format"] = Base.String(format) + end + if quality !== nothing + desc["quality"] = Base.Int(quality) + end + if progressive !== nothing + desc["progressive"] = Base.Bool(progressive) + end + if optimize_size !== nothing + desc["optimize_size"] = Base.Bool(optimize_size) + end + if chroma_downsampling !== nothing + desc["chroma_downsampling"] = Base.Bool(chroma_downsampling) + end + if density_unit !== nothing + desc["density_unit"] = Base.String(density_unit) + end + if x_density !== nothing + desc["x_density"] = Base.Int(x_density) + end + if y_density !== nothing + desc["y_density"] = Base.Int(y_density) + end + if xmp_metadata !== nothing + desc["xmp_metadata"] = Base.String(xmp_metadata) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function encode_jpeg_eager(image_; name=nothing, format=nothing, quality=nothing, progressive=nothing, optimize_size=nothing, chroma_downsampling=nothing, density_unit=nothing, x_density=nothing, y_density=nothing, xmp_metadata=nothing) desc = tf.EagerOp("EncodeJpeg") image_ = convert(tf.EagerTensor, image_) @@ -30092,13 +30092,13 @@ begin return res[1] end end - function encode_jpeg(image_; name=nothing, format=nothing, quality=nothing, progressive=nothing, optimize_size=nothing, chroma_downsampling=nothing, density_unit=nothing, x_density=nothing, y_density=nothing, xmp_metadata=nothing) - if tf.in_eager_mode() - encode_jpeg_eager(image_; name=name, format=format, quality=quality, progressive=progressive, optimize_size=optimize_size, chroma_downsampling=chroma_downsampling, density_unit=density_unit, x_density=x_density, y_density=y_density, xmp_metadata=xmp_metadata) - else - encode_jpeg_graph(image_; name=name, format=format, quality=quality, progressive=progressive, optimize_size=optimize_size, chroma_downsampling=chroma_downsampling, density_unit=density_unit, x_density=x_density, y_density=y_density, xmp_metadata=xmp_metadata) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function encode_jpeg(image_; name=nothing, format=nothing, quality=nothing, progressive=nothing, optimize_size=nothing, chroma_downsampling=nothing, density_unit=nothing, x_density=nothing, y_density=nothing, xmp_metadata=nothing) + if tf.in_eager_mode() + encode_jpeg_eager(image_; name=name, format=format, quality=quality, progressive=progressive, optimize_size=optimize_size, chroma_downsampling=chroma_downsampling, density_unit=density_unit, x_density=x_density, y_density=y_density, xmp_metadata=xmp_metadata) + else + encode_jpeg_graph(image_; name=name, format=format, quality=quality, progressive=progressive, optimize_size=optimize_size, chroma_downsampling=chroma_downsampling, density_unit=density_unit, x_density=x_density, y_density=y_density, xmp_metadata=xmp_metadata) + end end - end end @@ -30108,29 +30108,29 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function fused_pad_conv2d_graph(input_, paddings_, filter_; name=nothing, mode=nothing, strides=nothing, padding=nothing) - local desc - tf.with_op_name(name, "FusedPadConv2D") do - desc = tf.NodeDescription("FusedPadConv2D") - input_ = convert(Tensor{Any}, input_) - paddings_ = convert(Tensor{Int32}, paddings_) - filter_ = convert(Tensor{Any}, filter_) - (input_, filter_) = tf.tf_promote(input_, filter_) - tf.add_input(desc, input_) - tf.add_input(desc, paddings_) - tf.add_input(desc, filter_) - if mode !== nothing - desc["mode"] = Base.String(mode) - end - if strides !== nothing - desc["strides"] = map(Base.identity, strides) - end - if padding !== nothing - desc["padding"] = Base.String(padding) - end + function fused_pad_conv2d_graph(input_, paddings_, filter_; name=nothing, mode=nothing, strides=nothing, padding=nothing) + local desc + tf.with_op_name(name, "FusedPadConv2D") do + desc = tf.NodeDescription("FusedPadConv2D") + input_ = convert(Tensor{Any}, input_) + paddings_ = convert(Tensor{Int32}, paddings_) + filter_ = convert(Tensor{Any}, filter_) + (input_, filter_) = tf.tf_promote(input_, filter_) + tf.add_input(desc, input_) + tf.add_input(desc, paddings_) + tf.add_input(desc, filter_) + if mode !== nothing + desc["mode"] = Base.String(mode) + end + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + if padding !== nothing + desc["padding"] = Base.String(padding) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function fused_pad_conv2d_eager(input_, paddings_, filter_; name=nothing, mode=nothing, strides=nothing, padding=nothing) desc = tf.EagerOp("FusedPadConv2D") input_ = convert(tf.EagerTensor, input_) @@ -30157,13 +30157,13 @@ begin return res[1] end end - function fused_pad_conv2d(input_, paddings_, filter_; name=nothing, mode=nothing, strides=nothing, padding=nothing) - if tf.in_eager_mode() - fused_pad_conv2d_eager(input_, paddings_, filter_; name=name, mode=mode, strides=strides, padding=padding) - else - fused_pad_conv2d_graph(input_, paddings_, filter_; name=name, mode=mode, strides=strides, padding=padding) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function fused_pad_conv2d(input_, paddings_, filter_; name=nothing, mode=nothing, strides=nothing, padding=nothing) + if tf.in_eager_mode() + fused_pad_conv2d_eager(input_, paddings_, filter_; name=name, mode=mode, strides=strides, padding=padding) + else + fused_pad_conv2d_graph(input_, paddings_, filter_; name=name, mode=mode, strides=strides, padding=padding) + end end - end end @@ -30173,20 +30173,20 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function inplace_update_graph(x_, i_, v_; name=nothing) - local desc - tf.with_op_name(name, "InplaceUpdate") do - desc = tf.NodeDescription("InplaceUpdate") - x_ = convert(Tensor{Any}, x_) - i_ = convert(Tensor{Int32}, i_) - v_ = convert(Tensor{Any}, v_) - (x_, v_) = tf.tf_promote(x_, v_) - tf.add_input(desc, x_) - tf.add_input(desc, i_) - tf.add_input(desc, v_) - end - tf.Tensor(tf.Operation(desc)) + function inplace_update_graph(x_, i_, v_; name=nothing) + local desc + tf.with_op_name(name, "InplaceUpdate") do + desc = tf.NodeDescription("InplaceUpdate") + x_ = convert(Tensor{Any}, x_) + i_ = convert(Tensor{Int32}, i_) + v_ = convert(Tensor{Any}, v_) + (x_, v_) = tf.tf_promote(x_, v_) + tf.add_input(desc, x_) + tf.add_input(desc, i_) + tf.add_input(desc, v_) end + tf.Tensor(tf.Operation(desc)) + end function inplace_update_eager(x_, i_, v_; name=nothing) desc = tf.EagerOp("InplaceUpdate") x_ = convert(tf.EagerTensor, x_) @@ -30204,13 +30204,13 @@ begin return res[1] end end - function inplace_update(x_, i_, v_; name=nothing) - if tf.in_eager_mode() - inplace_update_eager(x_, i_, v_; name=name) - else - inplace_update_graph(x_, i_, v_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function inplace_update(x_, i_, v_; name=nothing) + if tf.in_eager_mode() + inplace_update_eager(x_, i_, v_; name=name) + else + inplace_update_graph(x_, i_, v_; name=name) + end end - end end @@ -30220,28 +30220,28 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function quantized_relu_graph(features_, min_features_, max_features_; name=nothing, out_type=nothing) - local desc - tf.with_op_name(name, "QuantizedRelu") do - desc = tf.NodeDescription("QuantizedRelu") - features_ = convert(Tensor{Any}, features_) - min_features_ = convert(Tensor{Float32}, min_features_) - max_features_ = convert(Tensor{Float32}, max_features_) - (features_,) = tf.tf_promote(features_) - tf.add_input(desc, features_) - tf.add_input(desc, min_features_) - tf.add_input(desc, max_features_) - if out_type !== nothing - desc["out_type"] = Base.identity(out_type) - end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:3 - push!(out, tf.Tensor(op, out_idx)) + function quantized_relu_graph(features_, min_features_, max_features_; name=nothing, out_type=nothing) + local desc + tf.with_op_name(name, "QuantizedRelu") do + desc = tf.NodeDescription("QuantizedRelu") + features_ = convert(Tensor{Any}, features_) + min_features_ = convert(Tensor{Float32}, min_features_) + max_features_ = convert(Tensor{Float32}, max_features_) + (features_,) = tf.tf_promote(features_) + tf.add_input(desc, features_) + tf.add_input(desc, min_features_) + tf.add_input(desc, max_features_) + if out_type !== nothing + desc["out_type"] = Base.identity(out_type) end - out end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end function quantized_relu_eager(features_, min_features_, max_features_; name=nothing, out_type=nothing) desc = tf.EagerOp("QuantizedRelu") features_ = convert(tf.EagerTensor, features_) @@ -30261,13 +30261,13 @@ begin return res end end - function quantized_relu(features_, min_features_, max_features_; name=nothing, out_type=nothing) - if tf.in_eager_mode() - quantized_relu_eager(features_, min_features_, max_features_; name=name, out_type=out_type) - else - quantized_relu_graph(features_, min_features_, max_features_; name=name, out_type=out_type) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function quantized_relu(features_, min_features_, max_features_; name=nothing, out_type=nothing) + if tf.in_eager_mode() + quantized_relu_eager(features_, min_features_, max_features_; name=name, out_type=out_type) + else + quantized_relu_graph(features_, min_features_, max_features_; name=name, out_type=out_type) + end end - end end @@ -30277,20 +30277,20 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function gather_nd_graph(params_, indices_; name=nothing) - local desc - tf.with_op_name(name, "GatherNd") do - desc = tf.NodeDescription("GatherNd") - params_ = convert(Tensor{Any}, params_) - indices_ = convert(Tensor{Any}, indices_) - indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) - (params_,) = tf.tf_promote(params_) - (indices_,) = tf.tf_promote(indices_) - tf.add_input(desc, params_) - tf.add_input(desc, indices_) - end - tf.Tensor(tf.Operation(desc)) + function gather_nd_graph(params_, indices_; name=nothing) + local desc + tf.with_op_name(name, "GatherNd") do + desc = tf.NodeDescription("GatherNd") + params_ = convert(Tensor{Any}, params_) + indices_ = convert(Tensor{Any}, indices_) + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + (params_,) = tf.tf_promote(params_) + (indices_,) = tf.tf_promote(indices_) + tf.add_input(desc, params_) + tf.add_input(desc, indices_) end + tf.Tensor(tf.Operation(desc)) + end function gather_nd_eager(params_, indices_; name=nothing) desc = tf.EagerOp("GatherNd") params_ = convert(tf.EagerTensor, params_) @@ -30306,13 +30306,13 @@ begin return res[1] end end - function gather_nd(params_, indices_; name=nothing) - if tf.in_eager_mode() - gather_nd_eager(params_, indices_; name=name) - else - gather_nd_graph(params_, indices_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function gather_nd(params_, indices_; name=nothing) + if tf.in_eager_mode() + gather_nd_eager(params_, indices_; name=name) + else + gather_nd_graph(params_, indices_; name=name) + end end - end end @@ -30322,19 +30322,19 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function placeholder_graph(; name=nothing, dtype=nothing, shape=nothing) - local desc - tf.with_op_name(name, "Placeholder") do - desc = tf.NodeDescription("Placeholder") - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end - if shape !== nothing - desc["shape"] = Base.identity(shape) - end + function placeholder_graph(; name=nothing, dtype=nothing, shape=nothing) + local desc + tf.with_op_name(name, "Placeholder") do + desc = tf.NodeDescription("Placeholder") + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + if shape !== nothing + desc["shape"] = Base.identity(shape) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function placeholder_eager(; name=nothing, dtype=nothing, shape=nothing) desc = tf.EagerOp("Placeholder") if dtype !== nothing @@ -30350,13 +30350,13 @@ begin return res[1] end end - function placeholder(; name=nothing, dtype=nothing, shape=nothing) - if tf.in_eager_mode() - placeholder_eager(; name=name, dtype=dtype, shape=shape) - else - placeholder_graph(; name=name, dtype=dtype, shape=shape) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function placeholder(; name=nothing, dtype=nothing, shape=nothing) + if tf.in_eager_mode() + placeholder_eager(; name=name, dtype=dtype, shape=shape) + else + placeholder_graph(; name=name, dtype=dtype, shape=shape) + end end - end end @@ -30366,21 +30366,21 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function filter_by_last_component_dataset_graph(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) - local desc - tf.with_op_name(name, "FilterByLastComponentDataset") do - desc = tf.NodeDescription("FilterByLastComponentDataset") - input_dataset_ = convert(Tensor{Any}, input_dataset_) - tf.add_input(desc, input_dataset_) - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end + function filter_by_last_component_dataset_graph(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "FilterByLastComponentDataset") do + desc = tf.NodeDescription("FilterByLastComponentDataset") + input_dataset_ = convert(Tensor{Any}, input_dataset_) + tf.add_input(desc, input_dataset_) + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function filter_by_last_component_dataset_eager(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("FilterByLastComponentDataset") input_dataset_ = convert(tf.EagerTensor, input_dataset_) @@ -30398,13 +30398,13 @@ begin return res[1] end end - function filter_by_last_component_dataset(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) - if tf.in_eager_mode() - filter_by_last_component_dataset_eager(input_dataset_; name=name, output_types=output_types, output_shapes=output_shapes) - else - filter_by_last_component_dataset_graph(input_dataset_; name=name, output_types=output_types, output_shapes=output_shapes) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function filter_by_last_component_dataset(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) + if tf.in_eager_mode() + filter_by_last_component_dataset_eager(input_dataset_; name=name, output_types=output_types, output_shapes=output_shapes) + else + filter_by_last_component_dataset_graph(input_dataset_; name=name, output_types=output_types, output_shapes=output_shapes) + end end - end end @@ -30414,20 +30414,20 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function clip_by_value_graph(t_, clip_value_min_, clip_value_max_; name=nothing) - local desc - tf.with_op_name(name, "ClipByValue") do - desc = tf.NodeDescription("ClipByValue") - t_ = convert(Tensor{Any}, t_) - clip_value_min_ = convert(Tensor{Any}, clip_value_min_) - clip_value_max_ = convert(Tensor{Any}, clip_value_max_) - (t_, clip_value_min_, clip_value_max_) = tf.tf_promote(t_, clip_value_min_, clip_value_max_) - tf.add_input(desc, t_) - tf.add_input(desc, clip_value_min_) - tf.add_input(desc, clip_value_max_) - end - tf.Tensor(tf.Operation(desc)) + function clip_by_value_graph(t_, clip_value_min_, clip_value_max_; name=nothing) + local desc + tf.with_op_name(name, "ClipByValue") do + desc = tf.NodeDescription("ClipByValue") + t_ = convert(Tensor{Any}, t_) + clip_value_min_ = convert(Tensor{Any}, clip_value_min_) + clip_value_max_ = convert(Tensor{Any}, clip_value_max_) + (t_, clip_value_min_, clip_value_max_) = tf.tf_promote(t_, clip_value_min_, clip_value_max_) + tf.add_input(desc, t_) + tf.add_input(desc, clip_value_min_) + tf.add_input(desc, clip_value_max_) end + tf.Tensor(tf.Operation(desc)) + end function clip_by_value_eager(t_, clip_value_min_, clip_value_max_; name=nothing) desc = tf.EagerOp("ClipByValue") t_ = convert(tf.EagerTensor, t_) @@ -30446,13 +30446,13 @@ begin return res[1] end end - function clip_by_value(t_, clip_value_min_, clip_value_max_; name=nothing) - if tf.in_eager_mode() - clip_by_value_eager(t_, clip_value_min_, clip_value_max_; name=name) - else - clip_by_value_graph(t_, clip_value_min_, clip_value_max_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function clip_by_value(t_, clip_value_min_, clip_value_max_; name=nothing) + if tf.in_eager_mode() + clip_by_value_eager(t_, clip_value_min_, clip_value_max_; name=name) + else + clip_by_value_graph(t_, clip_value_min_, clip_value_max_; name=name) + end end - end end @@ -30462,24 +30462,24 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function image_summary_graph(tag_, tensor_; name=nothing, max_images=nothing, bad_color=nothing) - local desc - tf.with_op_name(name, "ImageSummary") do - desc = tf.NodeDescription("ImageSummary") - tag_ = convert(Tensor{String}, tag_) - tensor_ = convert(Tensor{Float32}, tensor_) - (tensor_,) = tf.tf_promote(tensor_) - tf.add_input(desc, tag_) - tf.add_input(desc, tensor_) - if max_images !== nothing - desc["max_images"] = Base.Int(max_images) - end - if bad_color !== nothing - desc["bad_color"] = TensorFlow.RawTensor(bad_color) - end + function image_summary_graph(tag_, tensor_; name=nothing, max_images=nothing, bad_color=nothing) + local desc + tf.with_op_name(name, "ImageSummary") do + desc = tf.NodeDescription("ImageSummary") + tag_ = convert(Tensor{String}, tag_) + tensor_ = convert(Tensor{Float32}, tensor_) + (tensor_,) = tf.tf_promote(tensor_) + tf.add_input(desc, tag_) + tf.add_input(desc, tensor_) + if max_images !== nothing + desc["max_images"] = Base.Int(max_images) + end + if bad_color !== nothing + desc["bad_color"] = TensorFlow.RawTensor(bad_color) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function image_summary_eager(tag_, tensor_; name=nothing, max_images=nothing, bad_color=nothing) desc = tf.EagerOp("ImageSummary") tag_ = convert(tf.EagerTensor, tag_) @@ -30500,13 +30500,13 @@ begin return res[1] end end - function image_summary(tag_, tensor_; name=nothing, max_images=nothing, bad_color=nothing) - if tf.in_eager_mode() - image_summary_eager(tag_, tensor_; name=name, max_images=max_images, bad_color=bad_color) - else - image_summary_graph(tag_, tensor_; name=name, max_images=max_images, bad_color=bad_color) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function image_summary(tag_, tensor_; name=nothing, max_images=nothing, bad_color=nothing) + if tf.in_eager_mode() + image_summary_eager(tag_, tensor_; name=name, max_images=max_images, bad_color=bad_color) + else + image_summary_graph(tag_, tensor_; name=name, max_images=max_images, bad_color=bad_color) + end end - end end @@ -30516,30 +30516,30 @@ end Retrieve embedding parameters for a single table. """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function retrieve_tpu_embedding_adadelta_parameters_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - local desc - tf.with_op_name(name, "RetrieveTPUEmbeddingAdadeltaParameters") do - desc = tf.NodeDescription("RetrieveTPUEmbeddingAdadeltaParameters") - if table_id !== nothing - desc["table_id"] = Base.Int(table_id) - end - if table_name !== nothing - desc["table_name"] = Base.String(table_name) - end - if num_shards !== nothing - desc["num_shards"] = Base.Int(num_shards) - end - if shard_id !== nothing - desc["shard_id"] = Base.Int(shard_id) - end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:3 - push!(out, tf.Tensor(op, out_idx)) - end - out + function retrieve_tpu_embedding_adadelta_parameters_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + local desc + tf.with_op_name(name, "RetrieveTPUEmbeddingAdadeltaParameters") do + desc = tf.NodeDescription("RetrieveTPUEmbeddingAdadeltaParameters") + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) end + out + end function retrieve_tpu_embedding_adadelta_parameters_eager(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) desc = tf.EagerOp("RetrieveTPUEmbeddingAdadeltaParameters") if table_id !== nothing @@ -30561,13 +30561,13 @@ begin return res end end - function retrieve_tpu_embedding_adadelta_parameters(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - if tf.in_eager_mode() - retrieve_tpu_embedding_adadelta_parameters_eager(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) - else - retrieve_tpu_embedding_adadelta_parameters_graph(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function retrieve_tpu_embedding_adadelta_parameters(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + if tf.in_eager_mode() + retrieve_tpu_embedding_adadelta_parameters_eager(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + else + retrieve_tpu_embedding_adadelta_parameters_graph(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + end end - end end @@ -30577,21 +30577,21 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function string_join_graph(inputs_; name=nothing, N=nothing, separator=nothing) - local desc - tf.with_op_name(name, "StringJoin") do - desc = tf.NodeDescription("StringJoin") - inputs_ = [convert(Tensor{String}, x) for x = inputs_] - tf.add_input(desc, inputs_) - if N !== nothing - desc["N"] = Base.Int(N) - end - if separator !== nothing - desc["separator"] = Base.String(separator) - end + function string_join_graph(inputs_; name=nothing, N=nothing, separator=nothing) + local desc + tf.with_op_name(name, "StringJoin") do + desc = tf.NodeDescription("StringJoin") + inputs_ = [convert(Tensor{String}, x) for x = inputs_] + tf.add_input(desc, inputs_) + if N !== nothing + desc["N"] = Base.Int(N) + end + if separator !== nothing + desc["separator"] = Base.String(separator) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function string_join_eager(inputs_; name=nothing, N=nothing, separator=nothing) desc = tf.EagerOp("StringJoin") inputs_ = convert(tf.EagerTensor, inputs_) @@ -30609,13 +30609,13 @@ begin return res[1] end end - function string_join(inputs_; name=nothing, N=nothing, separator=nothing) - if tf.in_eager_mode() - string_join_eager(inputs_; name=name, N=N, separator=separator) - else - string_join_graph(inputs_; name=name, N=N, separator=separator) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function string_join(inputs_; name=nothing, N=nothing, separator=nothing) + if tf.in_eager_mode() + string_join_eager(inputs_; name=name, N=N, separator=separator) + else + string_join_graph(inputs_; name=name, N=N, separator=separator) + end end - end end @@ -30625,25 +30625,25 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function resource_scatter_nd_add_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) - local desc - tf.with_op_name(name, "ResourceScatterNdAdd") do - desc = tf.NodeDescription("ResourceScatterNdAdd") - ref_ = convert(Tensor{Any}, ref_) - indices_ = convert(Tensor{Any}, indices_) - indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) - updates_ = convert(Tensor{Any}, updates_) - (updates_,) = tf.tf_promote(updates_) - (indices_,) = tf.tf_promote(indices_) - tf.add_input(desc, ref_) - tf.add_input(desc, indices_) - tf.add_input(desc, updates_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end + function resource_scatter_nd_add_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "ResourceScatterNdAdd") do + desc = tf.NodeDescription("ResourceScatterNdAdd") + ref_ = convert(Tensor{Any}, ref_) + indices_ = convert(Tensor{Any}, indices_) + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + updates_ = convert(Tensor{Any}, updates_) + (updates_,) = tf.tf_promote(updates_) + (indices_,) = tf.tf_promote(indices_) + tf.add_input(desc, ref_) + tf.add_input(desc, indices_) + tf.add_input(desc, updates_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function resource_scatter_nd_add_eager(ref_, indices_, updates_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ResourceScatterNdAdd") ref_ = convert(tf.EagerTensor, ref_) @@ -30664,13 +30664,13 @@ begin return res[1] end end - function resource_scatter_nd_add(ref_, indices_, updates_; name=nothing, use_locking=nothing) - if tf.in_eager_mode() - resource_scatter_nd_add_eager(ref_, indices_, updates_; name=name, use_locking=use_locking) - else - resource_scatter_nd_add_graph(ref_, indices_, updates_; name=name, use_locking=use_locking) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_scatter_nd_add(ref_, indices_, updates_; name=nothing, use_locking=nothing) + if tf.in_eager_mode() + resource_scatter_nd_add_eager(ref_, indices_, updates_; name=name, use_locking=use_locking) + else + resource_scatter_nd_add_graph(ref_, indices_, updates_; name=name, use_locking=use_locking) + end end - end end @@ -30680,20 +30680,20 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function boosted_trees_quantile_stream_resource_deserialize_graph(quantile_stream_resource_handle_, bucket_boundaries_; name=nothing, num_streams=nothing) - local desc - tf.with_op_name(name, "BoostedTreesQuantileStreamResourceDeserialize") do - desc = tf.NodeDescription("BoostedTreesQuantileStreamResourceDeserialize") - quantile_stream_resource_handle_ = convert(Tensor{Any}, quantile_stream_resource_handle_) - bucket_boundaries_ = [convert(Tensor{Float32}, x) for x = bucket_boundaries_] - tf.add_input(desc, quantile_stream_resource_handle_) - tf.add_input(desc, bucket_boundaries_) - if num_streams !== nothing - desc["num_streams"] = Base.Int(num_streams) - end + function boosted_trees_quantile_stream_resource_deserialize_graph(quantile_stream_resource_handle_, bucket_boundaries_; name=nothing, num_streams=nothing) + local desc + tf.with_op_name(name, "BoostedTreesQuantileStreamResourceDeserialize") do + desc = tf.NodeDescription("BoostedTreesQuantileStreamResourceDeserialize") + quantile_stream_resource_handle_ = convert(Tensor{Any}, quantile_stream_resource_handle_) + bucket_boundaries_ = [convert(Tensor{Float32}, x) for x = bucket_boundaries_] + tf.add_input(desc, quantile_stream_resource_handle_) + tf.add_input(desc, bucket_boundaries_) + if num_streams !== nothing + desc["num_streams"] = Base.Int(num_streams) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function boosted_trees_quantile_stream_resource_deserialize_eager(quantile_stream_resource_handle_, bucket_boundaries_; name=nothing, num_streams=nothing) desc = tf.EagerOp("BoostedTreesQuantileStreamResourceDeserialize") quantile_stream_resource_handle_ = convert(tf.EagerTensor, quantile_stream_resource_handle_) @@ -30710,13 +30710,13 @@ begin return res[1] end end - function boosted_trees_quantile_stream_resource_deserialize(quantile_stream_resource_handle_, bucket_boundaries_; name=nothing, num_streams=nothing) - if tf.in_eager_mode() - boosted_trees_quantile_stream_resource_deserialize_eager(quantile_stream_resource_handle_, bucket_boundaries_; name=name, num_streams=num_streams) - else - boosted_trees_quantile_stream_resource_deserialize_graph(quantile_stream_resource_handle_, bucket_boundaries_; name=name, num_streams=num_streams) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function boosted_trees_quantile_stream_resource_deserialize(quantile_stream_resource_handle_, bucket_boundaries_; name=nothing, num_streams=nothing) + if tf.in_eager_mode() + boosted_trees_quantile_stream_resource_deserialize_eager(quantile_stream_resource_handle_, bucket_boundaries_; name=name, num_streams=num_streams) + else + boosted_trees_quantile_stream_resource_deserialize_graph(quantile_stream_resource_handle_, bucket_boundaries_; name=name, num_streams=num_streams) + end end - end end @@ -30726,18 +30726,18 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function left_shift_graph(x_, y_; name=nothing) - local desc - tf.with_op_name(name, "LeftShift") do - desc = tf.NodeDescription("LeftShift") - x_ = convert(Tensor{Any}, x_) - y_ = convert(Tensor{Any}, y_) - (x_, y_) = tf.tf_promote(x_, y_) - tf.add_input(desc, x_) - tf.add_input(desc, y_) - end - tf.Tensor(tf.Operation(desc)) + function left_shift_graph(x_, y_; name=nothing) + local desc + tf.with_op_name(name, "LeftShift") do + desc = tf.NodeDescription("LeftShift") + x_ = convert(Tensor{Any}, x_) + y_ = convert(Tensor{Any}, y_) + (x_, y_) = tf.tf_promote(x_, y_) + tf.add_input(desc, x_) + tf.add_input(desc, y_) end + tf.Tensor(tf.Operation(desc)) + end function left_shift_eager(x_, y_; name=nothing) desc = tf.EagerOp("LeftShift") x_ = convert(tf.EagerTensor, x_) @@ -30753,13 +30753,13 @@ begin return res[1] end end - function left_shift(x_, y_; name=nothing) - if tf.in_eager_mode() - left_shift_eager(x_, y_; name=name) - else - left_shift_graph(x_, y_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function left_shift(x_, y_; name=nothing) + if tf.in_eager_mode() + left_shift_eager(x_, y_; name=name) + else + left_shift_graph(x_, y_; name=name) + end end - end end @@ -30769,22 +30769,22 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tensor_scatter_add_graph(tensor_, indices_, updates_; name=nothing) - local desc - tf.with_op_name(name, "TensorScatterAdd") do - desc = tf.NodeDescription("TensorScatterAdd") - tensor_ = convert(Tensor{Any}, tensor_) - indices_ = convert(Tensor{Any}, indices_) - indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) - updates_ = convert(Tensor{Any}, updates_) - (tensor_, updates_) = tf.tf_promote(tensor_, updates_) - (indices_,) = tf.tf_promote(indices_) - tf.add_input(desc, tensor_) - tf.add_input(desc, indices_) - tf.add_input(desc, updates_) - end - tf.Tensor(tf.Operation(desc)) + function tensor_scatter_add_graph(tensor_, indices_, updates_; name=nothing) + local desc + tf.with_op_name(name, "TensorScatterAdd") do + desc = tf.NodeDescription("TensorScatterAdd") + tensor_ = convert(Tensor{Any}, tensor_) + indices_ = convert(Tensor{Any}, indices_) + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + updates_ = convert(Tensor{Any}, updates_) + (tensor_, updates_) = tf.tf_promote(tensor_, updates_) + (indices_,) = tf.tf_promote(indices_) + tf.add_input(desc, tensor_) + tf.add_input(desc, indices_) + tf.add_input(desc, updates_) end + tf.Tensor(tf.Operation(desc)) + end function tensor_scatter_add_eager(tensor_, indices_, updates_; name=nothing) desc = tf.EagerOp("TensorScatterAdd") tensor_ = convert(tf.EagerTensor, tensor_) @@ -30803,13 +30803,13 @@ begin return res[1] end end - function tensor_scatter_add(tensor_, indices_, updates_; name=nothing) - if tf.in_eager_mode() - tensor_scatter_add_eager(tensor_, indices_, updates_; name=name) - else - tensor_scatter_add_graph(tensor_, indices_, updates_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_scatter_add(tensor_, indices_, updates_; name=nothing) + if tf.in_eager_mode() + tensor_scatter_add_eager(tensor_, indices_, updates_; name=name) + else + tensor_scatter_add_graph(tensor_, indices_, updates_; name=name) + end end - end end @@ -30819,33 +30819,33 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function _var_handles_op_graph(; name=nothing, containers=nothing, shared_names=nothing, N=nothing, dtypes=nothing, shapes=nothing) - local desc - tf.with_op_name(name, "_VarHandlesOp") do - desc = tf.NodeDescription("_VarHandlesOp") - if containers !== nothing - desc["containers"] = map(Base.identity, containers) - end - if shared_names !== nothing - desc["shared_names"] = map(Base.identity, shared_names) - end - if N !== nothing - desc["N"] = Base.Int(N) - end - if dtypes !== nothing - desc["dtypes"] = map(Base.identity, dtypes) - end - if shapes !== nothing - desc["shapes"] = map(Base.identity, shapes) - end + function _var_handles_op_graph(; name=nothing, containers=nothing, shared_names=nothing, N=nothing, dtypes=nothing, shapes=nothing) + local desc + tf.with_op_name(name, "_VarHandlesOp") do + desc = tf.NodeDescription("_VarHandlesOp") + if containers !== nothing + desc["containers"] = map(Base.identity, containers) + end + if shared_names !== nothing + desc["shared_names"] = map(Base.identity, shared_names) end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:N - push!(out, tf.Tensor(op, out_idx)) + if N !== nothing + desc["N"] = Base.Int(N) + end + if dtypes !== nothing + desc["dtypes"] = map(Base.identity, dtypes) + end + if shapes !== nothing + desc["shapes"] = map(Base.identity, shapes) end - out end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:N + push!(out, tf.Tensor(op, out_idx)) + end + out + end function _var_handles_op_eager(; name=nothing, containers=nothing, shared_names=nothing, N=nothing, dtypes=nothing, shapes=nothing) desc = tf.EagerOp("_VarHandlesOp") if containers !== nothing @@ -30870,13 +30870,13 @@ begin return res end end - function _var_handles_op(; name=nothing, containers=nothing, shared_names=nothing, N=nothing, dtypes=nothing, shapes=nothing) - if tf.in_eager_mode() - _var_handles_op_eager(; name=name, containers=containers, shared_names=shared_names, N=N, dtypes=dtypes, shapes=shapes) - else - _var_handles_op_graph(; name=name, containers=containers, shared_names=shared_names, N=N, dtypes=dtypes, shapes=shapes) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _var_handles_op(; name=nothing, containers=nothing, shared_names=nothing, N=nothing, dtypes=nothing, shapes=nothing) + if tf.in_eager_mode() + _var_handles_op_eager(; name=name, containers=containers, shared_names=shared_names, N=N, dtypes=dtypes, shapes=shapes) + else + _var_handles_op_graph(; name=name, containers=containers, shared_names=shared_names, N=N, dtypes=dtypes, shapes=shapes) + end end - end end @@ -30886,16 +30886,16 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function ifft3d_graph(input_; name=nothing) - local desc - tf.with_op_name(name, "IFFT3D") do - desc = tf.NodeDescription("IFFT3D") - input_ = convert(Tensor{Complex{Float32}}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - end - tf.Tensor(tf.Operation(desc)) + function ifft3d_graph(input_; name=nothing) + local desc + tf.with_op_name(name, "IFFT3D") do + desc = tf.NodeDescription("IFFT3D") + input_ = convert(Tensor{Complex{Float32}}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) end + tf.Tensor(tf.Operation(desc)) + end function ifft3d_eager(input_; name=nothing) desc = tf.EagerOp("IFFT3D") input_ = convert(tf.EagerTensor, input_) @@ -30908,13 +30908,13 @@ begin return res[1] end end - function ifft3d(input_; name=nothing) - if tf.in_eager_mode() - ifft3d_eager(input_; name=name) - else - ifft3d_graph(input_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function ifft3d(input_; name=nothing) + if tf.in_eager_mode() + ifft3d_eager(input_; name=name) + else + ifft3d_graph(input_; name=name) + end end - end end @@ -30924,21 +30924,21 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function ref_select_graph(index_, inputs_; name=nothing, N=nothing) - local desc - tf.with_op_name(name, "RefSelect") do - desc = tf.NodeDescription("RefSelect") - index_ = convert(Tensor{Int32}, index_) - inputs_ = [convert(Tensor{Any}, x) for x = inputs_] - (inputs_,) = tf.tf_promote(inputs_) - tf.add_input(desc, index_) - tf.add_input(desc, inputs_) - if N !== nothing - desc["N"] = Base.Int(N) - end + function ref_select_graph(index_, inputs_; name=nothing, N=nothing) + local desc + tf.with_op_name(name, "RefSelect") do + desc = tf.NodeDescription("RefSelect") + index_ = convert(Tensor{Int32}, index_) + inputs_ = [convert(Tensor{Any}, x) for x = inputs_] + (inputs_,) = tf.tf_promote(inputs_) + tf.add_input(desc, index_) + tf.add_input(desc, inputs_) + if N !== nothing + desc["N"] = Base.Int(N) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function ref_select_eager(index_, inputs_; name=nothing, N=nothing) desc = tf.EagerOp("RefSelect") index_ = convert(tf.EagerTensor, index_) @@ -30956,13 +30956,13 @@ begin return res[1] end end - function ref_select(index_, inputs_; name=nothing, N=nothing) - if tf.in_eager_mode() - ref_select_eager(index_, inputs_; name=name, N=N) - else - ref_select_graph(index_, inputs_; name=name, N=N) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function ref_select(index_, inputs_; name=nothing, N=nothing) + if tf.in_eager_mode() + ref_select_eager(index_, inputs_; name=name, N=N) + else + ref_select_graph(index_, inputs_; name=name, N=N) + end end - end end @@ -30972,20 +30972,20 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function sparse_tensor_slice_dataset_graph(indices_, values_, dense_shape_; name=nothing) - local desc - tf.with_op_name(name, "SparseTensorSliceDataset") do - desc = tf.NodeDescription("SparseTensorSliceDataset") - indices_ = convert(Tensor{Int64}, indices_) - values_ = convert(Tensor{Any}, values_) - dense_shape_ = convert(Tensor{Int64}, dense_shape_) - (values_,) = tf.tf_promote(values_) - tf.add_input(desc, indices_) - tf.add_input(desc, values_) - tf.add_input(desc, dense_shape_) - end - tf.Tensor(tf.Operation(desc)) + function sparse_tensor_slice_dataset_graph(indices_, values_, dense_shape_; name=nothing) + local desc + tf.with_op_name(name, "SparseTensorSliceDataset") do + desc = tf.NodeDescription("SparseTensorSliceDataset") + indices_ = convert(Tensor{Int64}, indices_) + values_ = convert(Tensor{Any}, values_) + dense_shape_ = convert(Tensor{Int64}, dense_shape_) + (values_,) = tf.tf_promote(values_) + tf.add_input(desc, indices_) + tf.add_input(desc, values_) + tf.add_input(desc, dense_shape_) end + tf.Tensor(tf.Operation(desc)) + end function sparse_tensor_slice_dataset_eager(indices_, values_, dense_shape_; name=nothing) desc = tf.EagerOp("SparseTensorSliceDataset") indices_ = convert(tf.EagerTensor, indices_) @@ -31002,13 +31002,13 @@ begin return res[1] end end - function sparse_tensor_slice_dataset(indices_, values_, dense_shape_; name=nothing) - if tf.in_eager_mode() - sparse_tensor_slice_dataset_eager(indices_, values_, dense_shape_; name=name) - else - sparse_tensor_slice_dataset_graph(indices_, values_, dense_shape_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_tensor_slice_dataset(indices_, values_, dense_shape_; name=nothing) + if tf.in_eager_mode() + sparse_tensor_slice_dataset_eager(indices_, values_, dense_shape_; name=name) + else + sparse_tensor_slice_dataset_graph(indices_, values_, dense_shape_; name=name) + end end - end end @@ -31018,30 +31018,30 @@ end Retrieve embedding parameters for a single table. """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function retrieve_tpu_embedding_ftrl_parameters_grad_accum_debug_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - local desc - tf.with_op_name(name, "RetrieveTPUEmbeddingFTRLParametersGradAccumDebug") do - desc = tf.NodeDescription("RetrieveTPUEmbeddingFTRLParametersGradAccumDebug") - if table_id !== nothing - desc["table_id"] = Base.Int(table_id) - end - if table_name !== nothing - desc["table_name"] = Base.String(table_name) - end - if num_shards !== nothing - desc["num_shards"] = Base.Int(num_shards) - end - if shard_id !== nothing - desc["shard_id"] = Base.Int(shard_id) - end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:4 - push!(out, tf.Tensor(op, out_idx)) - end - out + function retrieve_tpu_embedding_ftrl_parameters_grad_accum_debug_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + local desc + tf.with_op_name(name, "RetrieveTPUEmbeddingFTRLParametersGradAccumDebug") do + desc = tf.NodeDescription("RetrieveTPUEmbeddingFTRLParametersGradAccumDebug") + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:4 + push!(out, tf.Tensor(op, out_idx)) + end + out + end function retrieve_tpu_embedding_ftrl_parameters_grad_accum_debug_eager(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) desc = tf.EagerOp("RetrieveTPUEmbeddingFTRLParametersGradAccumDebug") if table_id !== nothing @@ -31063,13 +31063,13 @@ begin return res end end - function retrieve_tpu_embedding_ftrl_parameters_grad_accum_debug(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - if tf.in_eager_mode() - retrieve_tpu_embedding_ftrl_parameters_grad_accum_debug_eager(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) - else - retrieve_tpu_embedding_ftrl_parameters_grad_accum_debug_graph(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function retrieve_tpu_embedding_ftrl_parameters_grad_accum_debug(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + if tf.in_eager_mode() + retrieve_tpu_embedding_ftrl_parameters_grad_accum_debug_eager(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + else + retrieve_tpu_embedding_ftrl_parameters_grad_accum_debug_graph(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + end end - end end @@ -31079,15 +31079,15 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function batch_ifft2d_graph(input_; name=nothing) - local desc - tf.with_op_name(name, "BatchIFFT2D") do - desc = tf.NodeDescription("BatchIFFT2D") - input_ = convert(Tensor{Complex{Float32}}, input_) - tf.add_input(desc, input_) - end - tf.Tensor(tf.Operation(desc)) + function batch_ifft2d_graph(input_; name=nothing) + local desc + tf.with_op_name(name, "BatchIFFT2D") do + desc = tf.NodeDescription("BatchIFFT2D") + input_ = convert(Tensor{Complex{Float32}}, input_) + tf.add_input(desc, input_) end + tf.Tensor(tf.Operation(desc)) + end function batch_ifft2d_eager(input_; name=nothing) desc = tf.EagerOp("BatchIFFT2D") input_ = convert(tf.EagerTensor, input_) @@ -31099,13 +31099,13 @@ begin return res[1] end end - function batch_ifft2d(input_; name=nothing) - if tf.in_eager_mode() - batch_ifft2d_eager(input_; name=name) - else - batch_ifft2d_graph(input_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function batch_ifft2d(input_; name=nothing) + if tf.in_eager_mode() + batch_ifft2d_eager(input_; name=name) + else + batch_ifft2d_graph(input_; name=name) + end end - end end @@ -31115,25 +31115,25 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tensor_array_gather_graph(handle_, indices_, flow_in_; name=nothing, dtype=nothing, element_shape=nothing) - local desc - tf.with_op_name(name, "TensorArrayGather") do - desc = tf.NodeDescription("TensorArrayGather") - handle_ = convert(Tensor{String}, handle_) - indices_ = convert(Tensor{Int32}, indices_) - flow_in_ = convert(Tensor{Float32}, flow_in_) - tf.add_input(desc, handle_) - tf.add_input(desc, indices_) - tf.add_input(desc, flow_in_) - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end - if element_shape !== nothing - desc["element_shape"] = Base.identity(element_shape) - end + function tensor_array_gather_graph(handle_, indices_, flow_in_; name=nothing, dtype=nothing, element_shape=nothing) + local desc + tf.with_op_name(name, "TensorArrayGather") do + desc = tf.NodeDescription("TensorArrayGather") + handle_ = convert(Tensor{String}, handle_) + indices_ = convert(Tensor{Int32}, indices_) + flow_in_ = convert(Tensor{Float32}, flow_in_) + tf.add_input(desc, handle_) + tf.add_input(desc, indices_) + tf.add_input(desc, flow_in_) + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + if element_shape !== nothing + desc["element_shape"] = Base.identity(element_shape) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function tensor_array_gather_eager(handle_, indices_, flow_in_; name=nothing, dtype=nothing, element_shape=nothing) desc = tf.EagerOp("TensorArrayGather") handle_ = convert(tf.EagerTensor, handle_) @@ -31155,13 +31155,13 @@ begin return res[1] end end - function tensor_array_gather(handle_, indices_, flow_in_; name=nothing, dtype=nothing, element_shape=nothing) - if tf.in_eager_mode() - tensor_array_gather_eager(handle_, indices_, flow_in_; name=name, dtype=dtype, element_shape=element_shape) - else - tensor_array_gather_graph(handle_, indices_, flow_in_; name=name, dtype=dtype, element_shape=element_shape) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_array_gather(handle_, indices_, flow_in_; name=nothing, dtype=nothing, element_shape=nothing) + if tf.in_eager_mode() + tensor_array_gather_eager(handle_, indices_, flow_in_; name=name, dtype=dtype, element_shape=element_shape) + else + tensor_array_gather_graph(handle_, indices_, flow_in_; name=name, dtype=dtype, element_shape=element_shape) + end end - end end @@ -31171,25 +31171,25 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function sparse_segment_mean_with_num_segments_graph(data_, indices_, segment_ids_, num_segments_; name=nothing) - local desc - tf.with_op_name(name, "SparseSegmentMeanWithNumSegments") do - desc = tf.NodeDescription("SparseSegmentMeanWithNumSegments") - data_ = convert(Tensor{Any}, data_) - indices_ = convert(Tensor{Int32}, indices_) - indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) - segment_ids_ = convert(Tensor{Int32}, segment_ids_) - num_segments_ = convert(Tensor{Int32}, num_segments_) - (num_segments_,) = tf.tf_promote(num_segments_) - (data_,) = tf.tf_promote(data_) - (indices_,) = tf.tf_promote(indices_) - tf.add_input(desc, data_) - tf.add_input(desc, indices_) - tf.add_input(desc, segment_ids_) - tf.add_input(desc, num_segments_) - end - tf.Tensor(tf.Operation(desc)) + function sparse_segment_mean_with_num_segments_graph(data_, indices_, segment_ids_, num_segments_; name=nothing) + local desc + tf.with_op_name(name, "SparseSegmentMeanWithNumSegments") do + desc = tf.NodeDescription("SparseSegmentMeanWithNumSegments") + data_ = convert(Tensor{Any}, data_) + indices_ = convert(Tensor{Int32}, indices_) + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + segment_ids_ = convert(Tensor{Int32}, segment_ids_) + num_segments_ = convert(Tensor{Int32}, num_segments_) + (num_segments_,) = tf.tf_promote(num_segments_) + (data_,) = tf.tf_promote(data_) + (indices_,) = tf.tf_promote(indices_) + tf.add_input(desc, data_) + tf.add_input(desc, indices_) + tf.add_input(desc, segment_ids_) + tf.add_input(desc, num_segments_) end + tf.Tensor(tf.Operation(desc)) + end function sparse_segment_mean_with_num_segments_eager(data_, indices_, segment_ids_, num_segments_; name=nothing) desc = tf.EagerOp("SparseSegmentMeanWithNumSegments") data_ = convert(tf.EagerTensor, data_) @@ -31210,13 +31210,13 @@ begin return res[1] end end - function sparse_segment_mean_with_num_segments(data_, indices_, segment_ids_, num_segments_; name=nothing) - if tf.in_eager_mode() - sparse_segment_mean_with_num_segments_eager(data_, indices_, segment_ids_, num_segments_; name=name) - else - sparse_segment_mean_with_num_segments_graph(data_, indices_, segment_ids_, num_segments_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_segment_mean_with_num_segments(data_, indices_, segment_ids_, num_segments_; name=nothing) + if tf.in_eager_mode() + sparse_segment_mean_with_num_segments_eager(data_, indices_, segment_ids_, num_segments_; name=name) + else + sparse_segment_mean_with_num_segments_graph(data_, indices_, segment_ids_, num_segments_; name=name) + end end - end end @@ -31226,19 +31226,19 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function ensure_shape_graph(input_; name=nothing, shape=nothing) - local desc - tf.with_op_name(name, "EnsureShape") do - desc = tf.NodeDescription("EnsureShape") - input_ = convert(Tensor{Any}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - if shape !== nothing - desc["shape"] = Base.identity(shape) - end + function ensure_shape_graph(input_; name=nothing, shape=nothing) + local desc + tf.with_op_name(name, "EnsureShape") do + desc = tf.NodeDescription("EnsureShape") + input_ = convert(Tensor{Any}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + if shape !== nothing + desc["shape"] = Base.identity(shape) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function ensure_shape_eager(input_; name=nothing, shape=nothing) desc = tf.EagerOp("EnsureShape") input_ = convert(tf.EagerTensor, input_) @@ -31254,13 +31254,13 @@ begin return res[1] end end - function ensure_shape(input_; name=nothing, shape=nothing) - if tf.in_eager_mode() - ensure_shape_eager(input_; name=name, shape=shape) - else - ensure_shape_graph(input_; name=name, shape=shape) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function ensure_shape(input_; name=nothing, shape=nothing) + if tf.in_eager_mode() + ensure_shape_eager(input_; name=name, shape=shape) + else + ensure_shape_graph(input_; name=name, shape=shape) + end end - end end @@ -31270,27 +31270,27 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function apply_proximal_gradient_descent_graph(var_, alpha_, l1_, l2_, delta_; name=nothing, use_locking=nothing) - local desc - tf.with_op_name(name, "ApplyProximalGradientDescent") do - desc = tf.NodeDescription("ApplyProximalGradientDescent") - var_ = convert(Tensor{Any}, var_) - alpha_ = convert(Tensor{Any}, alpha_) - l1_ = convert(Tensor{Any}, l1_) - l2_ = convert(Tensor{Any}, l2_) - delta_ = convert(Tensor{Any}, delta_) - (var_, alpha_, l1_, l2_, delta_) = tf.tf_promote(var_, alpha_, l1_, l2_, delta_) - tf.add_input(desc, var_) - tf.add_input(desc, alpha_) - tf.add_input(desc, l1_) - tf.add_input(desc, l2_) - tf.add_input(desc, delta_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end + function apply_proximal_gradient_descent_graph(var_, alpha_, l1_, l2_, delta_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "ApplyProximalGradientDescent") do + desc = tf.NodeDescription("ApplyProximalGradientDescent") + var_ = convert(Tensor{Any}, var_) + alpha_ = convert(Tensor{Any}, alpha_) + l1_ = convert(Tensor{Any}, l1_) + l2_ = convert(Tensor{Any}, l2_) + delta_ = convert(Tensor{Any}, delta_) + (var_, alpha_, l1_, l2_, delta_) = tf.tf_promote(var_, alpha_, l1_, l2_, delta_) + tf.add_input(desc, var_) + tf.add_input(desc, alpha_) + tf.add_input(desc, l1_) + tf.add_input(desc, l2_) + tf.add_input(desc, delta_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function apply_proximal_gradient_descent_eager(var_, alpha_, l1_, l2_, delta_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ApplyProximalGradientDescent") var_ = convert(tf.EagerTensor, var_) @@ -31318,13 +31318,13 @@ begin return res[1] end end - function apply_proximal_gradient_descent(var_, alpha_, l1_, l2_, delta_; name=nothing, use_locking=nothing) - if tf.in_eager_mode() - apply_proximal_gradient_descent_eager(var_, alpha_, l1_, l2_, delta_; name=name, use_locking=use_locking) - else - apply_proximal_gradient_descent_graph(var_, alpha_, l1_, l2_, delta_; name=name, use_locking=use_locking) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function apply_proximal_gradient_descent(var_, alpha_, l1_, l2_, delta_; name=nothing, use_locking=nothing) + if tf.in_eager_mode() + apply_proximal_gradient_descent_eager(var_, alpha_, l1_, l2_, delta_; name=name, use_locking=use_locking) + else + apply_proximal_gradient_descent_graph(var_, alpha_, l1_, l2_, delta_; name=name, use_locking=use_locking) + end end - end end @@ -31334,34 +31334,34 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function collective_reduce_graph(input_; name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, merge_op=nothing, final_op=nothing, subdiv_offsets=nothing) - local desc - tf.with_op_name(name, "CollectiveReduce") do - desc = tf.NodeDescription("CollectiveReduce") - input_ = convert(Tensor{Any}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - if group_size !== nothing - desc["group_size"] = Base.Int(group_size) - end - if group_key !== nothing - desc["group_key"] = Base.Int(group_key) - end - if instance_key !== nothing - desc["instance_key"] = Base.Int(instance_key) - end - if merge_op !== nothing - desc["merge_op"] = Base.String(merge_op) - end - if final_op !== nothing - desc["final_op"] = Base.String(final_op) - end - if subdiv_offsets !== nothing - desc["subdiv_offsets"] = map(Base.identity, subdiv_offsets) - end - end - tf.Tensor(tf.Operation(desc)) + function collective_reduce_graph(input_; name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, merge_op=nothing, final_op=nothing, subdiv_offsets=nothing) + local desc + tf.with_op_name(name, "CollectiveReduce") do + desc = tf.NodeDescription("CollectiveReduce") + input_ = convert(Tensor{Any}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + if group_size !== nothing + desc["group_size"] = Base.Int(group_size) + end + if group_key !== nothing + desc["group_key"] = Base.Int(group_key) + end + if instance_key !== nothing + desc["instance_key"] = Base.Int(instance_key) + end + if merge_op !== nothing + desc["merge_op"] = Base.String(merge_op) + end + if final_op !== nothing + desc["final_op"] = Base.String(final_op) + end + if subdiv_offsets !== nothing + desc["subdiv_offsets"] = map(Base.identity, subdiv_offsets) + end end + tf.Tensor(tf.Operation(desc)) + end function collective_reduce_eager(input_; name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, merge_op=nothing, final_op=nothing, subdiv_offsets=nothing) desc = tf.EagerOp("CollectiveReduce") input_ = convert(tf.EagerTensor, input_) @@ -31392,13 +31392,13 @@ begin return res[1] end end - function collective_reduce(input_; name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, merge_op=nothing, final_op=nothing, subdiv_offsets=nothing) - if tf.in_eager_mode() - collective_reduce_eager(input_; name=name, group_size=group_size, group_key=group_key, instance_key=instance_key, merge_op=merge_op, final_op=final_op, subdiv_offsets=subdiv_offsets) - else - collective_reduce_graph(input_; name=name, group_size=group_size, group_key=group_key, instance_key=instance_key, merge_op=merge_op, final_op=final_op, subdiv_offsets=subdiv_offsets) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function collective_reduce(input_; name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, merge_op=nothing, final_op=nothing, subdiv_offsets=nothing) + if tf.in_eager_mode() + collective_reduce_eager(input_; name=name, group_size=group_size, group_key=group_key, instance_key=instance_key, merge_op=merge_op, final_op=final_op, subdiv_offsets=subdiv_offsets) + else + collective_reduce_graph(input_; name=name, group_size=group_size, group_key=group_key, instance_key=instance_key, merge_op=merge_op, final_op=final_op, subdiv_offsets=subdiv_offsets) + end end - end end @@ -31408,16 +31408,16 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function is_nan_graph(x_; name=nothing) - local desc - tf.with_op_name(name, "IsNan") do - desc = tf.NodeDescription("IsNan") - x_ = convert(Tensor{Any}, x_) - (x_,) = tf.tf_promote(x_) - tf.add_input(desc, x_) - end - tf.Tensor(tf.Operation(desc)) + function is_nan_graph(x_; name=nothing) + local desc + tf.with_op_name(name, "IsNan") do + desc = tf.NodeDescription("IsNan") + x_ = convert(Tensor{Any}, x_) + (x_,) = tf.tf_promote(x_) + tf.add_input(desc, x_) end + tf.Tensor(tf.Operation(desc)) + end function is_nan_eager(x_; name=nothing) desc = tf.EagerOp("IsNan") x_ = convert(tf.EagerTensor, x_) @@ -31430,13 +31430,13 @@ begin return res[1] end end - function is_nan(x_; name=nothing) - if tf.in_eager_mode() - is_nan_eager(x_; name=name) - else - is_nan_graph(x_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function is_nan(x_; name=nothing) + if tf.in_eager_mode() + is_nan_eager(x_; name=name) + else + is_nan_graph(x_; name=name) + end end - end end @@ -31446,35 +31446,35 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function apply_ada_max_graph(var_, m_, v_, beta1_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing) - local desc - tf.with_op_name(name, "ApplyAdaMax") do - desc = tf.NodeDescription("ApplyAdaMax") - var_ = convert(Tensor{Any}, var_) - m_ = convert(Tensor{Any}, m_) - v_ = convert(Tensor{Any}, v_) - beta1_power_ = convert(Tensor{Any}, beta1_power_) - lr_ = convert(Tensor{Any}, lr_) - beta1_ = convert(Tensor{Any}, beta1_) - beta2_ = convert(Tensor{Any}, beta2_) - epsilon_ = convert(Tensor{Any}, epsilon_) - grad_ = convert(Tensor{Any}, grad_) - (var_, m_, v_, beta1_power_, lr_, beta1_, beta2_, epsilon_, grad_) = tf.tf_promote(var_, m_, v_, beta1_power_, lr_, beta1_, beta2_, epsilon_, grad_) - tf.add_input(desc, var_) - tf.add_input(desc, m_) - tf.add_input(desc, v_) - tf.add_input(desc, beta1_power_) - tf.add_input(desc, lr_) - tf.add_input(desc, beta1_) - tf.add_input(desc, beta2_) - tf.add_input(desc, epsilon_) - tf.add_input(desc, grad_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - end - tf.Tensor(tf.Operation(desc)) - end + function apply_ada_max_graph(var_, m_, v_, beta1_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "ApplyAdaMax") do + desc = tf.NodeDescription("ApplyAdaMax") + var_ = convert(Tensor{Any}, var_) + m_ = convert(Tensor{Any}, m_) + v_ = convert(Tensor{Any}, v_) + beta1_power_ = convert(Tensor{Any}, beta1_power_) + lr_ = convert(Tensor{Any}, lr_) + beta1_ = convert(Tensor{Any}, beta1_) + beta2_ = convert(Tensor{Any}, beta2_) + epsilon_ = convert(Tensor{Any}, epsilon_) + grad_ = convert(Tensor{Any}, grad_) + (var_, m_, v_, beta1_power_, lr_, beta1_, beta2_, epsilon_, grad_) = tf.tf_promote(var_, m_, v_, beta1_power_, lr_, beta1_, beta2_, epsilon_, grad_) + tf.add_input(desc, var_) + tf.add_input(desc, m_) + tf.add_input(desc, v_) + tf.add_input(desc, beta1_power_) + tf.add_input(desc, lr_) + tf.add_input(desc, beta1_) + tf.add_input(desc, beta2_) + tf.add_input(desc, epsilon_) + tf.add_input(desc, grad_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + tf.Tensor(tf.Operation(desc)) + end function apply_ada_max_eager(var_, m_, v_, beta1_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ApplyAdaMax") var_ = convert(tf.EagerTensor, var_) @@ -31514,13 +31514,13 @@ begin return res[1] end end - function apply_ada_max(var_, m_, v_, beta1_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing) - if tf.in_eager_mode() - apply_ada_max_eager(var_, m_, v_, beta1_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=name, use_locking=use_locking) - else - apply_ada_max_graph(var_, m_, v_, beta1_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=name, use_locking=use_locking) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function apply_ada_max(var_, m_, v_, beta1_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing) + if tf.in_eager_mode() + apply_ada_max_eager(var_, m_, v_, beta1_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=name, use_locking=use_locking) + else + apply_ada_max_graph(var_, m_, v_, beta1_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=name, use_locking=use_locking) + end end - end end @@ -31530,35 +31530,35 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function decode_and_crop_jpeg_graph(contents_, crop_window_; name=nothing, channels=nothing, ratio=nothing, fancy_upscaling=nothing, try_recover_truncated=nothing, acceptable_fraction=nothing, dct_method=nothing) - local desc - tf.with_op_name(name, "DecodeAndCropJpeg") do - desc = tf.NodeDescription("DecodeAndCropJpeg") - contents_ = convert(Tensor{String}, contents_) - crop_window_ = convert(Tensor{Int32}, crop_window_) - tf.add_input(desc, contents_) - tf.add_input(desc, crop_window_) - if channels !== nothing - desc["channels"] = Base.Int(channels) - end - if ratio !== nothing - desc["ratio"] = Base.Int(ratio) - end - if fancy_upscaling !== nothing - desc["fancy_upscaling"] = Base.Bool(fancy_upscaling) - end - if try_recover_truncated !== nothing - desc["try_recover_truncated"] = Base.Bool(try_recover_truncated) - end - if acceptable_fraction !== nothing - desc["acceptable_fraction"] = Base.identity(acceptable_fraction) - end - if dct_method !== nothing - desc["dct_method"] = Base.String(dct_method) - end - end - tf.Tensor(tf.Operation(desc)) + function decode_and_crop_jpeg_graph(contents_, crop_window_; name=nothing, channels=nothing, ratio=nothing, fancy_upscaling=nothing, try_recover_truncated=nothing, acceptable_fraction=nothing, dct_method=nothing) + local desc + tf.with_op_name(name, "DecodeAndCropJpeg") do + desc = tf.NodeDescription("DecodeAndCropJpeg") + contents_ = convert(Tensor{String}, contents_) + crop_window_ = convert(Tensor{Int32}, crop_window_) + tf.add_input(desc, contents_) + tf.add_input(desc, crop_window_) + if channels !== nothing + desc["channels"] = Base.Int(channels) + end + if ratio !== nothing + desc["ratio"] = Base.Int(ratio) + end + if fancy_upscaling !== nothing + desc["fancy_upscaling"] = Base.Bool(fancy_upscaling) + end + if try_recover_truncated !== nothing + desc["try_recover_truncated"] = Base.Bool(try_recover_truncated) + end + if acceptable_fraction !== nothing + desc["acceptable_fraction"] = Base.identity(acceptable_fraction) + end + if dct_method !== nothing + desc["dct_method"] = Base.String(dct_method) + end end + tf.Tensor(tf.Operation(desc)) + end function decode_and_crop_jpeg_eager(contents_, crop_window_; name=nothing, channels=nothing, ratio=nothing, fancy_upscaling=nothing, try_recover_truncated=nothing, acceptable_fraction=nothing, dct_method=nothing) desc = tf.EagerOp("DecodeAndCropJpeg") contents_ = convert(tf.EagerTensor, contents_) @@ -31590,13 +31590,13 @@ begin return res[1] end end - function decode_and_crop_jpeg(contents_, crop_window_; name=nothing, channels=nothing, ratio=nothing, fancy_upscaling=nothing, try_recover_truncated=nothing, acceptable_fraction=nothing, dct_method=nothing) - if tf.in_eager_mode() - decode_and_crop_jpeg_eager(contents_, crop_window_; name=name, channels=channels, ratio=ratio, fancy_upscaling=fancy_upscaling, try_recover_truncated=try_recover_truncated, acceptable_fraction=acceptable_fraction, dct_method=dct_method) - else - decode_and_crop_jpeg_graph(contents_, crop_window_; name=name, channels=channels, ratio=ratio, fancy_upscaling=fancy_upscaling, try_recover_truncated=try_recover_truncated, acceptable_fraction=acceptable_fraction, dct_method=dct_method) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function decode_and_crop_jpeg(contents_, crop_window_; name=nothing, channels=nothing, ratio=nothing, fancy_upscaling=nothing, try_recover_truncated=nothing, acceptable_fraction=nothing, dct_method=nothing) + if tf.in_eager_mode() + decode_and_crop_jpeg_eager(contents_, crop_window_; name=name, channels=channels, ratio=ratio, fancy_upscaling=fancy_upscaling, try_recover_truncated=try_recover_truncated, acceptable_fraction=acceptable_fraction, dct_method=dct_method) + else + decode_and_crop_jpeg_graph(contents_, crop_window_; name=name, channels=channels, ratio=ratio, fancy_upscaling=fancy_upscaling, try_recover_truncated=try_recover_truncated, acceptable_fraction=acceptable_fraction, dct_method=dct_method) + end end - end end @@ -31606,35 +31606,35 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function apply_centered_rms_prop_graph(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=nothing, use_locking=nothing) - local desc - tf.with_op_name(name, "ApplyCenteredRMSProp") do - desc = tf.NodeDescription("ApplyCenteredRMSProp") - var_ = convert(Tensor{Any}, var_) - mg_ = convert(Tensor{Any}, mg_) - ms_ = convert(Tensor{Any}, ms_) - mom_ = convert(Tensor{Any}, mom_) - lr_ = convert(Tensor{Any}, lr_) - rho_ = convert(Tensor{Any}, rho_) - momentum_ = convert(Tensor{Any}, momentum_) - epsilon_ = convert(Tensor{Any}, epsilon_) - grad_ = convert(Tensor{Any}, grad_) - (var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_) = tf.tf_promote(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_) - tf.add_input(desc, var_) - tf.add_input(desc, mg_) - tf.add_input(desc, ms_) - tf.add_input(desc, mom_) - tf.add_input(desc, lr_) - tf.add_input(desc, rho_) - tf.add_input(desc, momentum_) - tf.add_input(desc, epsilon_) - tf.add_input(desc, grad_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - end - tf.Tensor(tf.Operation(desc)) - end + function apply_centered_rms_prop_graph(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "ApplyCenteredRMSProp") do + desc = tf.NodeDescription("ApplyCenteredRMSProp") + var_ = convert(Tensor{Any}, var_) + mg_ = convert(Tensor{Any}, mg_) + ms_ = convert(Tensor{Any}, ms_) + mom_ = convert(Tensor{Any}, mom_) + lr_ = convert(Tensor{Any}, lr_) + rho_ = convert(Tensor{Any}, rho_) + momentum_ = convert(Tensor{Any}, momentum_) + epsilon_ = convert(Tensor{Any}, epsilon_) + grad_ = convert(Tensor{Any}, grad_) + (var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_) = tf.tf_promote(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_) + tf.add_input(desc, var_) + tf.add_input(desc, mg_) + tf.add_input(desc, ms_) + tf.add_input(desc, mom_) + tf.add_input(desc, lr_) + tf.add_input(desc, rho_) + tf.add_input(desc, momentum_) + tf.add_input(desc, epsilon_) + tf.add_input(desc, grad_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + tf.Tensor(tf.Operation(desc)) + end function apply_centered_rms_prop_eager(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ApplyCenteredRMSProp") var_ = convert(tf.EagerTensor, var_) @@ -31674,48 +31674,48 @@ begin return res[1] end end - function apply_centered_rms_prop(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=nothing, use_locking=nothing) - if tf.in_eager_mode() - apply_centered_rms_prop_eager(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=name, use_locking=use_locking) - else - apply_centered_rms_prop_graph(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=name, use_locking=use_locking) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function apply_centered_rms_prop(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=nothing, use_locking=nothing) + if tf.in_eager_mode() + apply_centered_rms_prop_eager(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=name, use_locking=use_locking) + else + apply_centered_rms_prop_graph(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=name, use_locking=use_locking) + end end - end end """ - conv3d_backprop_filter_v2(input, filter_sizes, out_backprop; data_format=NDHWC, dilations=[1, 1, 1, 1, 1]) + conv3d_backprop_filter_v2(input, filter_sizes, out_backprop; data_format=, dilations=[1, 1, 1, 1, 1]) """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function conv3d_backprop_filter_v2_graph(input_, filter_sizes_, out_backprop_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) - local desc - tf.with_op_name(name, "Conv3DBackpropFilterV2") do - desc = tf.NodeDescription("Conv3DBackpropFilterV2") - input_ = convert(Tensor{Any}, input_) - filter_sizes_ = convert(Tensor{Int32}, filter_sizes_) - out_backprop_ = convert(Tensor{Any}, out_backprop_) - (input_, out_backprop_) = tf.tf_promote(input_, out_backprop_) - tf.add_input(desc, input_) - tf.add_input(desc, filter_sizes_) - tf.add_input(desc, out_backprop_) - if strides !== nothing - desc["strides"] = map(Base.identity, strides) - end - if padding !== nothing - desc["padding"] = Base.String(padding) - end - if data_format !== nothing - desc["data_format"] = Base.String(data_format) - end - if dilations !== nothing - desc["dilations"] = map(Base.identity, dilations) - end + function conv3d_backprop_filter_v2_graph(input_, filter_sizes_, out_backprop_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) + local desc + tf.with_op_name(name, "Conv3DBackpropFilterV2") do + desc = tf.NodeDescription("Conv3DBackpropFilterV2") + input_ = convert(Tensor{Any}, input_) + filter_sizes_ = convert(Tensor{Int32}, filter_sizes_) + out_backprop_ = convert(Tensor{Any}, out_backprop_) + (input_, out_backprop_) = tf.tf_promote(input_, out_backprop_) + tf.add_input(desc, input_) + tf.add_input(desc, filter_sizes_) + tf.add_input(desc, out_backprop_) + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + if padding !== nothing + desc["padding"] = Base.String(padding) + end + if data_format !== nothing + desc["data_format"] = Base.String(data_format) + end + if dilations !== nothing + desc["dilations"] = map(Base.identity, dilations) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function conv3d_backprop_filter_v2_eager(input_, filter_sizes_, out_backprop_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) desc = tf.EagerOp("Conv3DBackpropFilterV2") input_ = convert(tf.EagerTensor, input_) @@ -31745,13 +31745,13 @@ begin return res[1] end end - function conv3d_backprop_filter_v2(input_, filter_sizes_, out_backprop_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) - if tf.in_eager_mode() - conv3d_backprop_filter_v2_eager(input_, filter_sizes_, out_backprop_; name=name, strides=strides, padding=padding, data_format=data_format, dilations=dilations) - else - conv3d_backprop_filter_v2_graph(input_, filter_sizes_, out_backprop_; name=name, strides=strides, padding=padding, data_format=data_format, dilations=dilations) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function conv3d_backprop_filter_v2(input_, filter_sizes_, out_backprop_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) + if tf.in_eager_mode() + conv3d_backprop_filter_v2_eager(input_, filter_sizes_, out_backprop_; name=name, strides=strides, padding=padding, data_format=data_format, dilations=dilations) + else + conv3d_backprop_filter_v2_graph(input_, filter_sizes_, out_backprop_; name=name, strides=strides, padding=padding, data_format=data_format, dilations=dilations) + end end - end end @@ -31761,24 +31761,24 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function matrix_triangular_solve_graph(matrix_, rhs_; name=nothing, lower=nothing, adjoint=nothing) - local desc - tf.with_op_name(name, "MatrixTriangularSolve") do - desc = tf.NodeDescription("MatrixTriangularSolve") - matrix_ = convert(Tensor{Any}, matrix_) - rhs_ = convert(Tensor{Any}, rhs_) - (matrix_, rhs_) = tf.tf_promote(matrix_, rhs_) - tf.add_input(desc, matrix_) - tf.add_input(desc, rhs_) - if lower !== nothing - desc["lower"] = Base.Bool(lower) - end - if adjoint !== nothing - desc["adjoint"] = Base.Bool(adjoint) - end + function matrix_triangular_solve_graph(matrix_, rhs_; name=nothing, lower=nothing, adjoint=nothing) + local desc + tf.with_op_name(name, "MatrixTriangularSolve") do + desc = tf.NodeDescription("MatrixTriangularSolve") + matrix_ = convert(Tensor{Any}, matrix_) + rhs_ = convert(Tensor{Any}, rhs_) + (matrix_, rhs_) = tf.tf_promote(matrix_, rhs_) + tf.add_input(desc, matrix_) + tf.add_input(desc, rhs_) + if lower !== nothing + desc["lower"] = Base.Bool(lower) + end + if adjoint !== nothing + desc["adjoint"] = Base.Bool(adjoint) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function matrix_triangular_solve_eager(matrix_, rhs_; name=nothing, lower=nothing, adjoint=nothing) desc = tf.EagerOp("MatrixTriangularSolve") matrix_ = convert(tf.EagerTensor, matrix_) @@ -31800,13 +31800,13 @@ begin return res[1] end end - function matrix_triangular_solve(matrix_, rhs_; name=nothing, lower=nothing, adjoint=nothing) - if tf.in_eager_mode() - matrix_triangular_solve_eager(matrix_, rhs_; name=name, lower=lower, adjoint=adjoint) - else - matrix_triangular_solve_graph(matrix_, rhs_; name=name, lower=lower, adjoint=adjoint) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function matrix_triangular_solve(matrix_, rhs_; name=nothing, lower=nothing, adjoint=nothing) + if tf.in_eager_mode() + matrix_triangular_solve_eager(matrix_, rhs_; name=name, lower=lower, adjoint=adjoint) + else + matrix_triangular_solve_graph(matrix_, rhs_; name=name, lower=lower, adjoint=adjoint) + end end - end end @@ -31816,15 +31816,15 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function reader_num_work_units_completed_graph(reader_handle_; name=nothing) - local desc - tf.with_op_name(name, "ReaderNumWorkUnitsCompleted") do - desc = tf.NodeDescription("ReaderNumWorkUnitsCompleted") - reader_handle_ = convert(Tensor{String}, reader_handle_) - tf.add_input(desc, reader_handle_) - end - tf.Tensor(tf.Operation(desc)) + function reader_num_work_units_completed_graph(reader_handle_; name=nothing) + local desc + tf.with_op_name(name, "ReaderNumWorkUnitsCompleted") do + desc = tf.NodeDescription("ReaderNumWorkUnitsCompleted") + reader_handle_ = convert(Tensor{String}, reader_handle_) + tf.add_input(desc, reader_handle_) end + tf.Tensor(tf.Operation(desc)) + end function reader_num_work_units_completed_eager(reader_handle_; name=nothing) desc = tf.EagerOp("ReaderNumWorkUnitsCompleted") reader_handle_ = convert(tf.EagerTensor, reader_handle_) @@ -31836,13 +31836,13 @@ begin return res[1] end end - function reader_num_work_units_completed(reader_handle_; name=nothing) - if tf.in_eager_mode() - reader_num_work_units_completed_eager(reader_handle_; name=name) - else - reader_num_work_units_completed_graph(reader_handle_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function reader_num_work_units_completed(reader_handle_; name=nothing) + if tf.in_eager_mode() + reader_num_work_units_completed_eager(reader_handle_; name=name) + else + reader_num_work_units_completed_graph(reader_handle_; name=name) + end end - end end @@ -31852,26 +31852,26 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function write_audio_summary_graph(writer_, step_, tag_, tensor_, sample_rate_; name=nothing, max_outputs=nothing) - local desc - tf.with_op_name(name, "WriteAudioSummary") do - desc = tf.NodeDescription("WriteAudioSummary") - writer_ = convert(Tensor{Any}, writer_) - step_ = convert(Tensor{Int64}, step_) - tag_ = convert(Tensor{String}, tag_) - tensor_ = convert(Tensor{Float32}, tensor_) - sample_rate_ = convert(Tensor{Float32}, sample_rate_) - tf.add_input(desc, writer_) - tf.add_input(desc, step_) - tf.add_input(desc, tag_) - tf.add_input(desc, tensor_) - tf.add_input(desc, sample_rate_) - if max_outputs !== nothing - desc["max_outputs"] = Base.Int(max_outputs) - end + function write_audio_summary_graph(writer_, step_, tag_, tensor_, sample_rate_; name=nothing, max_outputs=nothing) + local desc + tf.with_op_name(name, "WriteAudioSummary") do + desc = tf.NodeDescription("WriteAudioSummary") + writer_ = convert(Tensor{Any}, writer_) + step_ = convert(Tensor{Int64}, step_) + tag_ = convert(Tensor{String}, tag_) + tensor_ = convert(Tensor{Float32}, tensor_) + sample_rate_ = convert(Tensor{Float32}, sample_rate_) + tf.add_input(desc, writer_) + tf.add_input(desc, step_) + tf.add_input(desc, tag_) + tf.add_input(desc, tensor_) + tf.add_input(desc, sample_rate_) + if max_outputs !== nothing + desc["max_outputs"] = Base.Int(max_outputs) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function write_audio_summary_eager(writer_, step_, tag_, tensor_, sample_rate_; name=nothing, max_outputs=nothing) desc = tf.EagerOp("WriteAudioSummary") writer_ = convert(tf.EagerTensor, writer_) @@ -31894,13 +31894,13 @@ begin return res[1] end end - function write_audio_summary(writer_, step_, tag_, tensor_, sample_rate_; name=nothing, max_outputs=nothing) - if tf.in_eager_mode() - write_audio_summary_eager(writer_, step_, tag_, tensor_, sample_rate_; name=name, max_outputs=max_outputs) - else - write_audio_summary_graph(writer_, step_, tag_, tensor_, sample_rate_; name=name, max_outputs=max_outputs) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function write_audio_summary(writer_, step_, tag_, tensor_, sample_rate_; name=nothing, max_outputs=nothing) + if tf.in_eager_mode() + write_audio_summary_eager(writer_, step_, tag_, tensor_, sample_rate_; name=name, max_outputs=max_outputs) + else + write_audio_summary_graph(writer_, step_, tag_, tensor_, sample_rate_; name=name, max_outputs=max_outputs) + end end - end end @@ -31910,17 +31910,17 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function sharded_filespec_graph(basename_, num_shards_; name=nothing) - local desc - tf.with_op_name(name, "ShardedFilespec") do - desc = tf.NodeDescription("ShardedFilespec") - basename_ = convert(Tensor{String}, basename_) - num_shards_ = convert(Tensor{Int32}, num_shards_) - tf.add_input(desc, basename_) - tf.add_input(desc, num_shards_) - end - tf.Tensor(tf.Operation(desc)) + function sharded_filespec_graph(basename_, num_shards_; name=nothing) + local desc + tf.with_op_name(name, "ShardedFilespec") do + desc = tf.NodeDescription("ShardedFilespec") + basename_ = convert(Tensor{String}, basename_) + num_shards_ = convert(Tensor{Int32}, num_shards_) + tf.add_input(desc, basename_) + tf.add_input(desc, num_shards_) end + tf.Tensor(tf.Operation(desc)) + end function sharded_filespec_eager(basename_, num_shards_; name=nothing) desc = tf.EagerOp("ShardedFilespec") basename_ = convert(tf.EagerTensor, basename_) @@ -31934,13 +31934,13 @@ begin return res[1] end end - function sharded_filespec(basename_, num_shards_; name=nothing) - if tf.in_eager_mode() - sharded_filespec_eager(basename_, num_shards_; name=name) - else - sharded_filespec_graph(basename_, num_shards_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sharded_filespec(basename_, num_shards_; name=nothing) + if tf.in_eager_mode() + sharded_filespec_eager(basename_, num_shards_; name=name) + else + sharded_filespec_graph(basename_, num_shards_; name=name) + end end - end end @@ -31950,18 +31950,18 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function div_no_nan_graph(x_, y_; name=nothing) - local desc - tf.with_op_name(name, "DivNoNan") do - desc = tf.NodeDescription("DivNoNan") - x_ = convert(Tensor{Any}, x_) - y_ = convert(Tensor{Any}, y_) - (x_, y_) = tf.tf_promote(x_, y_) - tf.add_input(desc, x_) - tf.add_input(desc, y_) - end - tf.Tensor(tf.Operation(desc)) + function div_no_nan_graph(x_, y_; name=nothing) + local desc + tf.with_op_name(name, "DivNoNan") do + desc = tf.NodeDescription("DivNoNan") + x_ = convert(Tensor{Any}, x_) + y_ = convert(Tensor{Any}, y_) + (x_, y_) = tf.tf_promote(x_, y_) + tf.add_input(desc, x_) + tf.add_input(desc, y_) end + tf.Tensor(tf.Operation(desc)) + end function div_no_nan_eager(x_, y_; name=nothing) desc = tf.EagerOp("DivNoNan") x_ = convert(tf.EagerTensor, x_) @@ -31977,13 +31977,13 @@ begin return res[1] end end - function div_no_nan(x_, y_; name=nothing) - if tf.in_eager_mode() - div_no_nan_eager(x_, y_; name=name) - else - div_no_nan_graph(x_, y_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function div_no_nan(x_, y_; name=nothing) + if tf.in_eager_mode() + div_no_nan_eager(x_, y_; name=name) + else + div_no_nan_graph(x_, y_; name=name) + end end - end end @@ -31993,30 +31993,30 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function sparse_accumulator_apply_gradient_graph(handle_, local_step_, gradient_indices_, gradient_values_, gradient_shape_; name=nothing, dtype=nothing, has_known_shape=nothing) - local desc - tf.with_op_name(name, "SparseAccumulatorApplyGradient") do - desc = tf.NodeDescription("SparseAccumulatorApplyGradient") - handle_ = convert(Tensor{String}, handle_) - local_step_ = convert(Tensor{Int64}, local_step_) - gradient_indices_ = convert(Tensor{Int64}, gradient_indices_) - gradient_values_ = convert(Tensor{Any}, gradient_values_) - gradient_shape_ = convert(Tensor{Int64}, gradient_shape_) - (gradient_values_,) = tf.tf_promote(gradient_values_) - tf.add_input(desc, handle_) - tf.add_input(desc, local_step_) - tf.add_input(desc, gradient_indices_) - tf.add_input(desc, gradient_values_) - tf.add_input(desc, gradient_shape_) - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end - if has_known_shape !== nothing - desc["has_known_shape"] = Base.Bool(has_known_shape) - end - end - tf.Tensor(tf.Operation(desc)) + function sparse_accumulator_apply_gradient_graph(handle_, local_step_, gradient_indices_, gradient_values_, gradient_shape_; name=nothing, dtype=nothing, has_known_shape=nothing) + local desc + tf.with_op_name(name, "SparseAccumulatorApplyGradient") do + desc = tf.NodeDescription("SparseAccumulatorApplyGradient") + handle_ = convert(Tensor{String}, handle_) + local_step_ = convert(Tensor{Int64}, local_step_) + gradient_indices_ = convert(Tensor{Int64}, gradient_indices_) + gradient_values_ = convert(Tensor{Any}, gradient_values_) + gradient_shape_ = convert(Tensor{Int64}, gradient_shape_) + (gradient_values_,) = tf.tf_promote(gradient_values_) + tf.add_input(desc, handle_) + tf.add_input(desc, local_step_) + tf.add_input(desc, gradient_indices_) + tf.add_input(desc, gradient_values_) + tf.add_input(desc, gradient_shape_) + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + if has_known_shape !== nothing + desc["has_known_shape"] = Base.Bool(has_known_shape) + end end + tf.Tensor(tf.Operation(desc)) + end function sparse_accumulator_apply_gradient_eager(handle_, local_step_, gradient_indices_, gradient_values_, gradient_shape_; name=nothing, dtype=nothing, has_known_shape=nothing) desc = tf.EagerOp("SparseAccumulatorApplyGradient") handle_ = convert(tf.EagerTensor, handle_) @@ -32043,13 +32043,13 @@ begin return res[1] end end - function sparse_accumulator_apply_gradient(handle_, local_step_, gradient_indices_, gradient_values_, gradient_shape_; name=nothing, dtype=nothing, has_known_shape=nothing) - if tf.in_eager_mode() - sparse_accumulator_apply_gradient_eager(handle_, local_step_, gradient_indices_, gradient_values_, gradient_shape_; name=name, dtype=dtype, has_known_shape=has_known_shape) - else - sparse_accumulator_apply_gradient_graph(handle_, local_step_, gradient_indices_, gradient_values_, gradient_shape_; name=name, dtype=dtype, has_known_shape=has_known_shape) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_accumulator_apply_gradient(handle_, local_step_, gradient_indices_, gradient_values_, gradient_shape_; name=nothing, dtype=nothing, has_known_shape=nothing) + if tf.in_eager_mode() + sparse_accumulator_apply_gradient_eager(handle_, local_step_, gradient_indices_, gradient_values_, gradient_shape_; name=name, dtype=dtype, has_known_shape=has_known_shape) + else + sparse_accumulator_apply_gradient_graph(handle_, local_step_, gradient_indices_, gradient_values_, gradient_shape_; name=name, dtype=dtype, has_known_shape=has_known_shape) + end end - end end @@ -32059,26 +32059,26 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function ragged_tensor_to_sparse_graph(rt_nested_splits_, rt_dense_values_; name=nothing, RAGGED_RANK=nothing) - local desc - tf.with_op_name(name, "RaggedTensorToSparse") do - desc = tf.NodeDescription("RaggedTensorToSparse") - rt_nested_splits_ = [convert(Tensor{Int64}, x) for x = rt_nested_splits_] - rt_dense_values_ = convert(Tensor{Any}, rt_dense_values_) - (rt_dense_values_,) = tf.tf_promote(rt_dense_values_) - tf.add_input(desc, rt_nested_splits_) - tf.add_input(desc, rt_dense_values_) - if RAGGED_RANK !== nothing - desc["RAGGED_RANK"] = Base.Int(RAGGED_RANK) - end + function ragged_tensor_to_sparse_graph(rt_nested_splits_, rt_dense_values_; name=nothing, RAGGED_RANK=nothing) + local desc + tf.with_op_name(name, "RaggedTensorToSparse") do + desc = tf.NodeDescription("RaggedTensorToSparse") + rt_nested_splits_ = [convert(Tensor{Int64}, x) for x = rt_nested_splits_] + rt_dense_values_ = convert(Tensor{Any}, rt_dense_values_) + (rt_dense_values_,) = tf.tf_promote(rt_dense_values_) + tf.add_input(desc, rt_nested_splits_) + tf.add_input(desc, rt_dense_values_) + if RAGGED_RANK !== nothing + desc["RAGGED_RANK"] = Base.Int(RAGGED_RANK) end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:3 - push!(out, tf.Tensor(op, out_idx)) - end - out end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end function ragged_tensor_to_sparse_eager(rt_nested_splits_, rt_dense_values_; name=nothing, RAGGED_RANK=nothing) desc = tf.EagerOp("RaggedTensorToSparse") rt_nested_splits_ = convert(tf.EagerTensor, rt_nested_splits_) @@ -32096,13 +32096,13 @@ begin return res end end - function ragged_tensor_to_sparse(rt_nested_splits_, rt_dense_values_; name=nothing, RAGGED_RANK=nothing) - if tf.in_eager_mode() - ragged_tensor_to_sparse_eager(rt_nested_splits_, rt_dense_values_; name=name, RAGGED_RANK=RAGGED_RANK) - else - ragged_tensor_to_sparse_graph(rt_nested_splits_, rt_dense_values_; name=name, RAGGED_RANK=RAGGED_RANK) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function ragged_tensor_to_sparse(rt_nested_splits_, rt_dense_values_; name=nothing, RAGGED_RANK=nothing) + if tf.in_eager_mode() + ragged_tensor_to_sparse_eager(rt_nested_splits_, rt_dense_values_; name=name, RAGGED_RANK=RAGGED_RANK) + else + ragged_tensor_to_sparse_graph(rt_nested_splits_, rt_dense_values_; name=name, RAGGED_RANK=RAGGED_RANK) + end end - end end @@ -32112,25 +32112,25 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function extract_volume_patches_graph(input_; name=nothing, ksizes=nothing, strides=nothing, padding=nothing) - local desc - tf.with_op_name(name, "ExtractVolumePatches") do - desc = tf.NodeDescription("ExtractVolumePatches") - input_ = convert(Tensor{Any}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - if ksizes !== nothing - desc["ksizes"] = map(Base.identity, ksizes) - end - if strides !== nothing - desc["strides"] = map(Base.identity, strides) - end - if padding !== nothing - desc["padding"] = Base.String(padding) - end + function extract_volume_patches_graph(input_; name=nothing, ksizes=nothing, strides=nothing, padding=nothing) + local desc + tf.with_op_name(name, "ExtractVolumePatches") do + desc = tf.NodeDescription("ExtractVolumePatches") + input_ = convert(Tensor{Any}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + if ksizes !== nothing + desc["ksizes"] = map(Base.identity, ksizes) + end + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + if padding !== nothing + desc["padding"] = Base.String(padding) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function extract_volume_patches_eager(input_; name=nothing, ksizes=nothing, strides=nothing, padding=nothing) desc = tf.EagerOp("ExtractVolumePatches") input_ = convert(tf.EagerTensor, input_) @@ -32152,13 +32152,13 @@ begin return res[1] end end - function extract_volume_patches(input_; name=nothing, ksizes=nothing, strides=nothing, padding=nothing) - if tf.in_eager_mode() - extract_volume_patches_eager(input_; name=name, ksizes=ksizes, strides=strides, padding=padding) - else - extract_volume_patches_graph(input_; name=name, ksizes=ksizes, strides=strides, padding=padding) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function extract_volume_patches(input_; name=nothing, ksizes=nothing, strides=nothing, padding=nothing) + if tf.in_eager_mode() + extract_volume_patches_eager(input_; name=name, ksizes=ksizes, strides=strides, padding=padding) + else + extract_volume_patches_graph(input_; name=name, ksizes=ksizes, strides=strides, padding=padding) + end end - end end @@ -32168,26 +32168,26 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function barrier_insert_many_graph(handle_, keys_, values_; name=nothing, component_index=nothing) - local desc - tf.with_op_name(name, "BarrierInsertMany") do - desc = tf.NodeDescription("BarrierInsertMany") - handle_ = convert(Tensor{String}, handle_) - keys_ = convert(Tensor{String}, keys_) - values_ = convert(Tensor{Any}, values_) - (values_,) = tf.tf_promote(values_) - tf.add_input(desc, handle_) - tf.add_input(desc, keys_) - tf.add_input(desc, values_) - if component_index !== nothing - component_index = Base.Int(component_index) - 1 - end - if component_index !== nothing - desc["component_index"] = Base.Int(component_index) - end + function barrier_insert_many_graph(handle_, keys_, values_; name=nothing, component_index=nothing) + local desc + tf.with_op_name(name, "BarrierInsertMany") do + desc = tf.NodeDescription("BarrierInsertMany") + handle_ = convert(Tensor{String}, handle_) + keys_ = convert(Tensor{String}, keys_) + values_ = convert(Tensor{Any}, values_) + (values_,) = tf.tf_promote(values_) + tf.add_input(desc, handle_) + tf.add_input(desc, keys_) + tf.add_input(desc, values_) + if component_index !== nothing + component_index = Base.Int(component_index) - 1 + end + if component_index !== nothing + desc["component_index"] = Base.Int(component_index) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function barrier_insert_many_eager(handle_, keys_, values_; name=nothing, component_index=nothing) desc = tf.EagerOp("BarrierInsertMany") handle_ = convert(tf.EagerTensor, handle_) @@ -32210,13 +32210,13 @@ begin return res[1] end end - function barrier_insert_many(handle_, keys_, values_; name=nothing, component_index=nothing) - if tf.in_eager_mode() - barrier_insert_many_eager(handle_, keys_, values_; name=name, component_index=component_index) - else - barrier_insert_many_graph(handle_, keys_, values_; name=name, component_index=component_index) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function barrier_insert_many(handle_, keys_, values_; name=nothing, component_index=nothing) + if tf.in_eager_mode() + barrier_insert_many_eager(handle_, keys_, values_; name=name, component_index=component_index) + else + barrier_insert_many_graph(handle_, keys_, values_; name=name, component_index=component_index) + end end - end end @@ -32226,19 +32226,19 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function const__graph(; name=nothing, value=nothing, dtype=nothing) - local desc - tf.with_op_name(name, "Const") do - desc = tf.NodeDescription("Const") - if value !== nothing - desc["value"] = TensorFlow.RawTensor(value) - end - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end + function const__graph(; name=nothing, value=nothing, dtype=nothing) + local desc + tf.with_op_name(name, "Const") do + desc = tf.NodeDescription("Const") + if value !== nothing + desc["value"] = TensorFlow.RawTensor(value) + end + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function const__eager(; name=nothing, value=nothing, dtype=nothing) desc = tf.EagerOp("Const") if value !== nothing @@ -32254,13 +32254,13 @@ begin return res[1] end end - function const_(; name=nothing, value=nothing, dtype=nothing) - if tf.in_eager_mode() - const__eager(; name=name, value=value, dtype=dtype) - else - const__graph(; name=name, value=value, dtype=dtype) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function const_(; name=nothing, value=nothing, dtype=nothing) + if tf.in_eager_mode() + const__eager(; name=name, value=value, dtype=dtype) + else + const__graph(; name=name, value=value, dtype=dtype) + end end - end end @@ -32270,22 +32270,22 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function space_to_batch_graph(input_, paddings_; name=nothing, block_size=nothing) - local desc - tf.with_op_name(name, "SpaceToBatch") do - desc = tf.NodeDescription("SpaceToBatch") - input_ = convert(Tensor{Any}, input_) - paddings_ = convert(Tensor{Int32}, paddings_) - (input_,) = tf.tf_promote(input_) - (paddings_,) = tf.tf_promote(paddings_) - tf.add_input(desc, input_) - tf.add_input(desc, paddings_) - if block_size !== nothing - desc["block_size"] = Base.Int(block_size) - end + function space_to_batch_graph(input_, paddings_; name=nothing, block_size=nothing) + local desc + tf.with_op_name(name, "SpaceToBatch") do + desc = tf.NodeDescription("SpaceToBatch") + input_ = convert(Tensor{Any}, input_) + paddings_ = convert(Tensor{Int32}, paddings_) + (input_,) = tf.tf_promote(input_) + (paddings_,) = tf.tf_promote(paddings_) + tf.add_input(desc, input_) + tf.add_input(desc, paddings_) + if block_size !== nothing + desc["block_size"] = Base.Int(block_size) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function space_to_batch_eager(input_, paddings_; name=nothing, block_size=nothing) desc = tf.EagerOp("SpaceToBatch") input_ = convert(tf.EagerTensor, input_) @@ -32304,13 +32304,13 @@ begin return res[1] end end - function space_to_batch(input_, paddings_; name=nothing, block_size=nothing) - if tf.in_eager_mode() - space_to_batch_eager(input_, paddings_; name=name, block_size=block_size) - else - space_to_batch_graph(input_, paddings_; name=name, block_size=block_size) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function space_to_batch(input_, paddings_; name=nothing, block_size=nothing) + if tf.in_eager_mode() + space_to_batch_eager(input_, paddings_; name=name, block_size=block_size) + else + space_to_batch_graph(input_, paddings_; name=name, block_size=block_size) + end end - end end @@ -32320,28 +32320,28 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function stage_size_graph(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) - local desc - tf.with_op_name(name, "StageSize") do - desc = tf.NodeDescription("StageSize") - if capacity !== nothing - desc["capacity"] = Base.Int(capacity) - end - if memory_limit !== nothing - desc["memory_limit"] = Base.Int(memory_limit) - end - if dtypes !== nothing - desc["dtypes"] = map(Base.identity, dtypes) - end - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end + function stage_size_graph(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "StageSize") do + desc = tf.NodeDescription("StageSize") + if capacity !== nothing + desc["capacity"] = Base.Int(capacity) + end + if memory_limit !== nothing + desc["memory_limit"] = Base.Int(memory_limit) + end + if dtypes !== nothing + desc["dtypes"] = map(Base.identity, dtypes) + end + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function stage_size_eager(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) desc = tf.EagerOp("StageSize") if capacity !== nothing @@ -32366,13 +32366,13 @@ begin return res[1] end end - function stage_size(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) - if tf.in_eager_mode() - stage_size_eager(; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) - else - stage_size_graph(; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function stage_size(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + if tf.in_eager_mode() + stage_size_eager(; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) + else + stage_size_graph(; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) + end end - end end @@ -32382,24 +32382,24 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function empty_tensor_list_graph(element_shape_, max_num_elements_; name=nothing, element_dtype=nothing, shape_type=nothing) - local desc - tf.with_op_name(name, "EmptyTensorList") do - desc = tf.NodeDescription("EmptyTensorList") - element_shape_ = convert(Tensor{Any}, element_shape_) - max_num_elements_ = convert(Tensor{Int32}, max_num_elements_) - (element_shape_,) = tf.tf_promote(element_shape_) - tf.add_input(desc, element_shape_) - tf.add_input(desc, max_num_elements_) - if element_dtype !== nothing - desc["element_dtype"] = Base.identity(element_dtype) - end - if shape_type !== nothing - desc["shape_type"] = Base.identity(shape_type) - end + function empty_tensor_list_graph(element_shape_, max_num_elements_; name=nothing, element_dtype=nothing, shape_type=nothing) + local desc + tf.with_op_name(name, "EmptyTensorList") do + desc = tf.NodeDescription("EmptyTensorList") + element_shape_ = convert(Tensor{Any}, element_shape_) + max_num_elements_ = convert(Tensor{Int32}, max_num_elements_) + (element_shape_,) = tf.tf_promote(element_shape_) + tf.add_input(desc, element_shape_) + tf.add_input(desc, max_num_elements_) + if element_dtype !== nothing + desc["element_dtype"] = Base.identity(element_dtype) + end + if shape_type !== nothing + desc["shape_type"] = Base.identity(shape_type) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function empty_tensor_list_eager(element_shape_, max_num_elements_; name=nothing, element_dtype=nothing, shape_type=nothing) desc = tf.EagerOp("EmptyTensorList") element_shape_ = convert(tf.EagerTensor, element_shape_) @@ -32420,13 +32420,13 @@ begin return res[1] end end - function empty_tensor_list(element_shape_, max_num_elements_; name=nothing, element_dtype=nothing, shape_type=nothing) - if tf.in_eager_mode() - empty_tensor_list_eager(element_shape_, max_num_elements_; name=name, element_dtype=element_dtype, shape_type=shape_type) - else - empty_tensor_list_graph(element_shape_, max_num_elements_; name=name, element_dtype=element_dtype, shape_type=shape_type) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function empty_tensor_list(element_shape_, max_num_elements_; name=nothing, element_dtype=nothing, shape_type=nothing) + if tf.in_eager_mode() + empty_tensor_list_eager(element_shape_, max_num_elements_; name=name, element_dtype=element_dtype, shape_type=shape_type) + else + empty_tensor_list_graph(element_shape_, max_num_elements_; name=name, element_dtype=element_dtype, shape_type=shape_type) + end end - end end @@ -32436,24 +32436,24 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function lu_graph(input_; name=nothing, output_idx_type=nothing) - local desc - tf.with_op_name(name, "Lu") do - desc = tf.NodeDescription("Lu") - input_ = convert(Tensor{Any}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - if output_idx_type !== nothing - desc["output_idx_type"] = Base.identity(output_idx_type) - end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:2 - push!(out, tf.Tensor(op, out_idx)) + function lu_graph(input_; name=nothing, output_idx_type=nothing) + local desc + tf.with_op_name(name, "Lu") do + desc = tf.NodeDescription("Lu") + input_ = convert(Tensor{Any}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + if output_idx_type !== nothing + desc["output_idx_type"] = Base.identity(output_idx_type) end - out end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end function lu_eager(input_; name=nothing, output_idx_type=nothing) desc = tf.EagerOp("Lu") input_ = convert(tf.EagerTensor, input_) @@ -32469,13 +32469,13 @@ begin return res end end - function lu(input_; name=nothing, output_idx_type=nothing) - if tf.in_eager_mode() - lu_eager(input_; name=name, output_idx_type=output_idx_type) - else - lu_graph(input_; name=name, output_idx_type=output_idx_type) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function lu(input_; name=nothing, output_idx_type=nothing) + if tf.in_eager_mode() + lu_eager(input_; name=name, output_idx_type=output_idx_type) + else + lu_graph(input_; name=name, output_idx_type=output_idx_type) + end end - end end @@ -32485,18 +32485,18 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function decode_compressed_graph(bytes_; name=nothing, compression_type=nothing) - local desc - tf.with_op_name(name, "DecodeCompressed") do - desc = tf.NodeDescription("DecodeCompressed") - bytes_ = convert(Tensor{String}, bytes_) - tf.add_input(desc, bytes_) - if compression_type !== nothing - desc["compression_type"] = Base.String(compression_type) - end + function decode_compressed_graph(bytes_; name=nothing, compression_type=nothing) + local desc + tf.with_op_name(name, "DecodeCompressed") do + desc = tf.NodeDescription("DecodeCompressed") + bytes_ = convert(Tensor{String}, bytes_) + tf.add_input(desc, bytes_) + if compression_type !== nothing + desc["compression_type"] = Base.String(compression_type) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function decode_compressed_eager(bytes_; name=nothing, compression_type=nothing) desc = tf.EagerOp("DecodeCompressed") bytes_ = convert(tf.EagerTensor, bytes_) @@ -32511,13 +32511,13 @@ begin return res[1] end end - function decode_compressed(bytes_; name=nothing, compression_type=nothing) - if tf.in_eager_mode() - decode_compressed_eager(bytes_; name=name, compression_type=compression_type) - else - decode_compressed_graph(bytes_; name=name, compression_type=compression_type) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function decode_compressed(bytes_; name=nothing, compression_type=nothing) + if tf.in_eager_mode() + decode_compressed_eager(bytes_; name=name, compression_type=compression_type) + else + decode_compressed_graph(bytes_; name=name, compression_type=compression_type) + end end - end end @@ -32527,18 +32527,18 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function get_session_tensor_graph(handle_; name=nothing, dtype=nothing) - local desc - tf.with_op_name(name, "GetSessionTensor") do - desc = tf.NodeDescription("GetSessionTensor") - handle_ = convert(Tensor{String}, handle_) - tf.add_input(desc, handle_) - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end + function get_session_tensor_graph(handle_; name=nothing, dtype=nothing) + local desc + tf.with_op_name(name, "GetSessionTensor") do + desc = tf.NodeDescription("GetSessionTensor") + handle_ = convert(Tensor{String}, handle_) + tf.add_input(desc, handle_) + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function get_session_tensor_eager(handle_; name=nothing, dtype=nothing) desc = tf.EagerOp("GetSessionTensor") handle_ = convert(tf.EagerTensor, handle_) @@ -32553,13 +32553,13 @@ begin return res[1] end end - function get_session_tensor(handle_; name=nothing, dtype=nothing) - if tf.in_eager_mode() - get_session_tensor_eager(handle_; name=name, dtype=dtype) - else - get_session_tensor_graph(handle_; name=name, dtype=dtype) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function get_session_tensor(handle_; name=nothing, dtype=nothing) + if tf.in_eager_mode() + get_session_tensor_eager(handle_; name=name, dtype=dtype) + else + get_session_tensor_graph(handle_; name=name, dtype=dtype) + end end - end end @@ -32569,25 +32569,25 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tensor_array_gather_v3_graph(handle_, indices_, flow_in_; name=nothing, dtype=nothing, element_shape=nothing) - local desc - tf.with_op_name(name, "TensorArrayGatherV3") do - desc = tf.NodeDescription("TensorArrayGatherV3") - handle_ = convert(Tensor{Any}, handle_) - indices_ = convert(Tensor{Int32}, indices_) - flow_in_ = convert(Tensor{Float32}, flow_in_) - tf.add_input(desc, handle_) - tf.add_input(desc, indices_) - tf.add_input(desc, flow_in_) - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end - if element_shape !== nothing - desc["element_shape"] = Base.identity(element_shape) - end + function tensor_array_gather_v3_graph(handle_, indices_, flow_in_; name=nothing, dtype=nothing, element_shape=nothing) + local desc + tf.with_op_name(name, "TensorArrayGatherV3") do + desc = tf.NodeDescription("TensorArrayGatherV3") + handle_ = convert(Tensor{Any}, handle_) + indices_ = convert(Tensor{Int32}, indices_) + flow_in_ = convert(Tensor{Float32}, flow_in_) + tf.add_input(desc, handle_) + tf.add_input(desc, indices_) + tf.add_input(desc, flow_in_) + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + if element_shape !== nothing + desc["element_shape"] = Base.identity(element_shape) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function tensor_array_gather_v3_eager(handle_, indices_, flow_in_; name=nothing, dtype=nothing, element_shape=nothing) desc = tf.EagerOp("TensorArrayGatherV3") handle_ = convert(tf.EagerTensor, handle_) @@ -32609,13 +32609,13 @@ begin return res[1] end end - function tensor_array_gather_v3(handle_, indices_, flow_in_; name=nothing, dtype=nothing, element_shape=nothing) - if tf.in_eager_mode() - tensor_array_gather_v3_eager(handle_, indices_, flow_in_; name=name, dtype=dtype, element_shape=element_shape) - else - tensor_array_gather_v3_graph(handle_, indices_, flow_in_; name=name, dtype=dtype, element_shape=element_shape) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_array_gather_v3(handle_, indices_, flow_in_; name=nothing, dtype=nothing, element_shape=nothing) + if tf.in_eager_mode() + tensor_array_gather_v3_eager(handle_, indices_, flow_in_; name=name, dtype=dtype, element_shape=element_shape) + else + tensor_array_gather_v3_graph(handle_, indices_, flow_in_; name=name, dtype=dtype, element_shape=element_shape) + end end - end end @@ -32625,18 +32625,18 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function destroy_resource_op_graph(resource_; name=nothing, ignore_lookup_error=nothing) - local desc - tf.with_op_name(name, "DestroyResourceOp") do - desc = tf.NodeDescription("DestroyResourceOp") - resource_ = convert(Tensor{Any}, resource_) - tf.add_input(desc, resource_) - if ignore_lookup_error !== nothing - desc["ignore_lookup_error"] = Base.Bool(ignore_lookup_error) - end + function destroy_resource_op_graph(resource_; name=nothing, ignore_lookup_error=nothing) + local desc + tf.with_op_name(name, "DestroyResourceOp") do + desc = tf.NodeDescription("DestroyResourceOp") + resource_ = convert(Tensor{Any}, resource_) + tf.add_input(desc, resource_) + if ignore_lookup_error !== nothing + desc["ignore_lookup_error"] = Base.Bool(ignore_lookup_error) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function destroy_resource_op_eager(resource_; name=nothing, ignore_lookup_error=nothing) desc = tf.EagerOp("DestroyResourceOp") resource_ = convert(tf.EagerTensor, resource_) @@ -32651,13 +32651,13 @@ begin return res[1] end end - function destroy_resource_op(resource_; name=nothing, ignore_lookup_error=nothing) - if tf.in_eager_mode() - destroy_resource_op_eager(resource_; name=name, ignore_lookup_error=ignore_lookup_error) - else - destroy_resource_op_graph(resource_; name=name, ignore_lookup_error=ignore_lookup_error) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function destroy_resource_op(resource_; name=nothing, ignore_lookup_error=nothing) + if tf.in_eager_mode() + destroy_resource_op_eager(resource_; name=name, ignore_lookup_error=ignore_lookup_error) + else + destroy_resource_op_graph(resource_; name=name, ignore_lookup_error=ignore_lookup_error) + end end - end end @@ -32667,33 +32667,33 @@ end Load embedding parameters for a single table. """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function load_tpu_embedding_ftrl_parameters_grad_accum_debug_graph(parameters_, accumulators_, linears_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - local desc - tf.with_op_name(name, "LoadTPUEmbeddingFTRLParametersGradAccumDebug") do - desc = tf.NodeDescription("LoadTPUEmbeddingFTRLParametersGradAccumDebug") - parameters_ = convert(Tensor{Float32}, parameters_) - accumulators_ = convert(Tensor{Float32}, accumulators_) - linears_ = convert(Tensor{Float32}, linears_) - gradient_accumulators_ = convert(Tensor{Float32}, gradient_accumulators_) - tf.add_input(desc, parameters_) - tf.add_input(desc, accumulators_) - tf.add_input(desc, linears_) - tf.add_input(desc, gradient_accumulators_) - if table_id !== nothing - desc["table_id"] = Base.Int(table_id) - end - if table_name !== nothing - desc["table_name"] = Base.String(table_name) - end - if num_shards !== nothing - desc["num_shards"] = Base.Int(num_shards) - end - if shard_id !== nothing - desc["shard_id"] = Base.Int(shard_id) - end - end - tf.Tensor(tf.Operation(desc)) + function load_tpu_embedding_ftrl_parameters_grad_accum_debug_graph(parameters_, accumulators_, linears_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + local desc + tf.with_op_name(name, "LoadTPUEmbeddingFTRLParametersGradAccumDebug") do + desc = tf.NodeDescription("LoadTPUEmbeddingFTRLParametersGradAccumDebug") + parameters_ = convert(Tensor{Float32}, parameters_) + accumulators_ = convert(Tensor{Float32}, accumulators_) + linears_ = convert(Tensor{Float32}, linears_) + gradient_accumulators_ = convert(Tensor{Float32}, gradient_accumulators_) + tf.add_input(desc, parameters_) + tf.add_input(desc, accumulators_) + tf.add_input(desc, linears_) + tf.add_input(desc, gradient_accumulators_) + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end end + tf.Tensor(tf.Operation(desc)) + end function load_tpu_embedding_ftrl_parameters_grad_accum_debug_eager(parameters_, accumulators_, linears_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) desc = tf.EagerOp("LoadTPUEmbeddingFTRLParametersGradAccumDebug") parameters_ = convert(tf.EagerTensor, parameters_) @@ -32723,13 +32723,13 @@ begin return res[1] end end - function load_tpu_embedding_ftrl_parameters_grad_accum_debug(parameters_, accumulators_, linears_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - if tf.in_eager_mode() - load_tpu_embedding_ftrl_parameters_grad_accum_debug_eager(parameters_, accumulators_, linears_, gradient_accumulators_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) - else - load_tpu_embedding_ftrl_parameters_grad_accum_debug_graph(parameters_, accumulators_, linears_, gradient_accumulators_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function load_tpu_embedding_ftrl_parameters_grad_accum_debug(parameters_, accumulators_, linears_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + if tf.in_eager_mode() + load_tpu_embedding_ftrl_parameters_grad_accum_debug_eager(parameters_, accumulators_, linears_, gradient_accumulators_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + else + load_tpu_embedding_ftrl_parameters_grad_accum_debug_graph(parameters_, accumulators_, linears_, gradient_accumulators_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + end end - end end @@ -32739,22 +32739,22 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function text_line_reader_graph(; name=nothing, skip_header_lines=nothing, container=nothing, shared_name=nothing) - local desc - tf.with_op_name(name, "TextLineReader") do - desc = tf.NodeDescription("TextLineReader") - if skip_header_lines !== nothing - desc["skip_header_lines"] = Base.Int(skip_header_lines) - end - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end + function text_line_reader_graph(; name=nothing, skip_header_lines=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "TextLineReader") do + desc = tf.NodeDescription("TextLineReader") + if skip_header_lines !== nothing + desc["skip_header_lines"] = Base.Int(skip_header_lines) + end + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function text_line_reader_eager(; name=nothing, skip_header_lines=nothing, container=nothing, shared_name=nothing) desc = tf.EagerOp("TextLineReader") if skip_header_lines !== nothing @@ -32773,13 +32773,13 @@ begin return res[1] end end - function text_line_reader(; name=nothing, skip_header_lines=nothing, container=nothing, shared_name=nothing) - if tf.in_eager_mode() - text_line_reader_eager(; name=name, skip_header_lines=skip_header_lines, container=container, shared_name=shared_name) - else - text_line_reader_graph(; name=name, skip_header_lines=skip_header_lines, container=container, shared_name=shared_name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function text_line_reader(; name=nothing, skip_header_lines=nothing, container=nothing, shared_name=nothing) + if tf.in_eager_mode() + text_line_reader_eager(; name=name, skip_header_lines=skip_header_lines, container=container, shared_name=shared_name) + else + text_line_reader_graph(; name=name, skip_header_lines=skip_header_lines, container=container, shared_name=shared_name) + end end - end end @@ -32789,23 +32789,23 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function create_summary_db_writer_graph(writer_, db_uri_, experiment_name_, run_name_, user_name_; name=nothing) - local desc - tf.with_op_name(name, "CreateSummaryDbWriter") do - desc = tf.NodeDescription("CreateSummaryDbWriter") - writer_ = convert(Tensor{Any}, writer_) - db_uri_ = convert(Tensor{String}, db_uri_) - experiment_name_ = convert(Tensor{String}, experiment_name_) - run_name_ = convert(Tensor{String}, run_name_) - user_name_ = convert(Tensor{String}, user_name_) - tf.add_input(desc, writer_) - tf.add_input(desc, db_uri_) - tf.add_input(desc, experiment_name_) - tf.add_input(desc, run_name_) - tf.add_input(desc, user_name_) - end - tf.Tensor(tf.Operation(desc)) + function create_summary_db_writer_graph(writer_, db_uri_, experiment_name_, run_name_, user_name_; name=nothing) + local desc + tf.with_op_name(name, "CreateSummaryDbWriter") do + desc = tf.NodeDescription("CreateSummaryDbWriter") + writer_ = convert(Tensor{Any}, writer_) + db_uri_ = convert(Tensor{String}, db_uri_) + experiment_name_ = convert(Tensor{String}, experiment_name_) + run_name_ = convert(Tensor{String}, run_name_) + user_name_ = convert(Tensor{String}, user_name_) + tf.add_input(desc, writer_) + tf.add_input(desc, db_uri_) + tf.add_input(desc, experiment_name_) + tf.add_input(desc, run_name_) + tf.add_input(desc, user_name_) end + tf.Tensor(tf.Operation(desc)) + end function create_summary_db_writer_eager(writer_, db_uri_, experiment_name_, run_name_, user_name_; name=nothing) desc = tf.EagerOp("CreateSummaryDbWriter") writer_ = convert(tf.EagerTensor, writer_) @@ -32825,13 +32825,13 @@ begin return res[1] end end - function create_summary_db_writer(writer_, db_uri_, experiment_name_, run_name_, user_name_; name=nothing) - if tf.in_eager_mode() - create_summary_db_writer_eager(writer_, db_uri_, experiment_name_, run_name_, user_name_; name=name) - else - create_summary_db_writer_graph(writer_, db_uri_, experiment_name_, run_name_, user_name_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function create_summary_db_writer(writer_, db_uri_, experiment_name_, run_name_, user_name_; name=nothing) + if tf.in_eager_mode() + create_summary_db_writer_eager(writer_, db_uri_, experiment_name_, run_name_, user_name_; name=name) + else + create_summary_db_writer_graph(writer_, db_uri_, experiment_name_, run_name_, user_name_; name=name) + end end - end end @@ -32841,18 +32841,18 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tanh_grad_graph(y_, dy_; name=nothing) - local desc - tf.with_op_name(name, "TanhGrad") do - desc = tf.NodeDescription("TanhGrad") - y_ = convert(Tensor{Any}, y_) - dy_ = convert(Tensor{Any}, dy_) - (y_, dy_) = tf.tf_promote(y_, dy_) - tf.add_input(desc, y_) - tf.add_input(desc, dy_) - end - tf.Tensor(tf.Operation(desc)) + function tanh_grad_graph(y_, dy_; name=nothing) + local desc + tf.with_op_name(name, "TanhGrad") do + desc = tf.NodeDescription("TanhGrad") + y_ = convert(Tensor{Any}, y_) + dy_ = convert(Tensor{Any}, dy_) + (y_, dy_) = tf.tf_promote(y_, dy_) + tf.add_input(desc, y_) + tf.add_input(desc, dy_) end + tf.Tensor(tf.Operation(desc)) + end function tanh_grad_eager(y_, dy_; name=nothing) desc = tf.EagerOp("TanhGrad") y_ = convert(tf.EagerTensor, y_) @@ -32868,13 +32868,13 @@ begin return res[1] end end - function tanh_grad(y_, dy_; name=nothing) - if tf.in_eager_mode() - tanh_grad_eager(y_, dy_; name=name) - else - tanh_grad_graph(y_, dy_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tanh_grad(y_, dy_; name=nothing) + if tf.in_eager_mode() + tanh_grad_eager(y_, dy_; name=name) + else + tanh_grad_graph(y_, dy_; name=name) + end end - end end @@ -32884,15 +32884,15 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function decode_base64_graph(input_; name=nothing) - local desc - tf.with_op_name(name, "DecodeBase64") do - desc = tf.NodeDescription("DecodeBase64") - input_ = convert(Tensor{String}, input_) - tf.add_input(desc, input_) - end - tf.Tensor(tf.Operation(desc)) + function decode_base64_graph(input_; name=nothing) + local desc + tf.with_op_name(name, "DecodeBase64") do + desc = tf.NodeDescription("DecodeBase64") + input_ = convert(Tensor{String}, input_) + tf.add_input(desc, input_) end + tf.Tensor(tf.Operation(desc)) + end function decode_base64_eager(input_; name=nothing) desc = tf.EagerOp("DecodeBase64") input_ = convert(tf.EagerTensor, input_) @@ -32904,46 +32904,46 @@ begin return res[1] end end - function decode_base64(input_; name=nothing) - if tf.in_eager_mode() - decode_base64_eager(input_; name=name) - else - decode_base64_graph(input_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function decode_base64(input_; name=nothing) + if tf.in_eager_mode() + decode_base64_eager(input_; name=name) + else + decode_base64_graph(input_; name=name) + end end - end end """ - max_pool_grad_grad_v2(orig_input, orig_output, grad, ksize, strides; data_format=NHWC) + max_pool_grad_grad_v2(orig_input, orig_output, grad, ksize, strides; data_format=) """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function max_pool_grad_grad_v2_graph(orig_input_, orig_output_, grad_, ksize_, strides_; name=nothing, padding=nothing, data_format=nothing) - local desc - tf.with_op_name(name, "MaxPoolGradGradV2") do - desc = tf.NodeDescription("MaxPoolGradGradV2") - orig_input_ = convert(Tensor{Any}, orig_input_) - orig_output_ = convert(Tensor{Any}, orig_output_) - grad_ = convert(Tensor{Any}, grad_) - ksize_ = convert(Tensor{Int32}, ksize_) - strides_ = convert(Tensor{Int32}, strides_) - (orig_input_, orig_output_, grad_) = tf.tf_promote(orig_input_, orig_output_, grad_) - tf.add_input(desc, orig_input_) - tf.add_input(desc, orig_output_) - tf.add_input(desc, grad_) - tf.add_input(desc, ksize_) - tf.add_input(desc, strides_) - if padding !== nothing - desc["padding"] = Base.String(padding) - end - if data_format !== nothing - desc["data_format"] = Base.String(data_format) - end + function max_pool_grad_grad_v2_graph(orig_input_, orig_output_, grad_, ksize_, strides_; name=nothing, padding=nothing, data_format=nothing) + local desc + tf.with_op_name(name, "MaxPoolGradGradV2") do + desc = tf.NodeDescription("MaxPoolGradGradV2") + orig_input_ = convert(Tensor{Any}, orig_input_) + orig_output_ = convert(Tensor{Any}, orig_output_) + grad_ = convert(Tensor{Any}, grad_) + ksize_ = convert(Tensor{Int32}, ksize_) + strides_ = convert(Tensor{Int32}, strides_) + (orig_input_, orig_output_, grad_) = tf.tf_promote(orig_input_, orig_output_, grad_) + tf.add_input(desc, orig_input_) + tf.add_input(desc, orig_output_) + tf.add_input(desc, grad_) + tf.add_input(desc, ksize_) + tf.add_input(desc, strides_) + if padding !== nothing + desc["padding"] = Base.String(padding) + end + if data_format !== nothing + desc["data_format"] = Base.String(data_format) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function max_pool_grad_grad_v2_eager(orig_input_, orig_output_, grad_, ksize_, strides_; name=nothing, padding=nothing, data_format=nothing) desc = tf.EagerOp("MaxPoolGradGradV2") orig_input_ = convert(tf.EagerTensor, orig_input_) @@ -32972,13 +32972,13 @@ begin return res[1] end end - function max_pool_grad_grad_v2(orig_input_, orig_output_, grad_, ksize_, strides_; name=nothing, padding=nothing, data_format=nothing) - if tf.in_eager_mode() - max_pool_grad_grad_v2_eager(orig_input_, orig_output_, grad_, ksize_, strides_; name=name, padding=padding, data_format=data_format) - else - max_pool_grad_grad_v2_graph(orig_input_, orig_output_, grad_, ksize_, strides_; name=name, padding=padding, data_format=data_format) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function max_pool_grad_grad_v2(orig_input_, orig_output_, grad_, ksize_, strides_; name=nothing, padding=nothing, data_format=nothing) + if tf.in_eager_mode() + max_pool_grad_grad_v2_eager(orig_input_, orig_output_, grad_, ksize_, strides_; name=name, padding=padding, data_format=data_format) + else + max_pool_grad_grad_v2_graph(orig_input_, orig_output_, grad_, ksize_, strides_; name=name, padding=padding, data_format=data_format) + end end - end end @@ -32988,22 +32988,22 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function audio_summary_v2_graph(tag_, tensor_, sample_rate_; name=nothing, max_outputs=nothing) - local desc - tf.with_op_name(name, "AudioSummaryV2") do - desc = tf.NodeDescription("AudioSummaryV2") - tag_ = convert(Tensor{String}, tag_) - tensor_ = convert(Tensor{Float32}, tensor_) - sample_rate_ = convert(Tensor{Float32}, sample_rate_) - tf.add_input(desc, tag_) - tf.add_input(desc, tensor_) - tf.add_input(desc, sample_rate_) - if max_outputs !== nothing - desc["max_outputs"] = Base.Int(max_outputs) - end + function audio_summary_v2_graph(tag_, tensor_, sample_rate_; name=nothing, max_outputs=nothing) + local desc + tf.with_op_name(name, "AudioSummaryV2") do + desc = tf.NodeDescription("AudioSummaryV2") + tag_ = convert(Tensor{String}, tag_) + tensor_ = convert(Tensor{Float32}, tensor_) + sample_rate_ = convert(Tensor{Float32}, sample_rate_) + tf.add_input(desc, tag_) + tf.add_input(desc, tensor_) + tf.add_input(desc, sample_rate_) + if max_outputs !== nothing + desc["max_outputs"] = Base.Int(max_outputs) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function audio_summary_v2_eager(tag_, tensor_, sample_rate_; name=nothing, max_outputs=nothing) desc = tf.EagerOp("AudioSummaryV2") tag_ = convert(tf.EagerTensor, tag_) @@ -33022,13 +33022,13 @@ begin return res[1] end end - function audio_summary_v2(tag_, tensor_, sample_rate_; name=nothing, max_outputs=nothing) - if tf.in_eager_mode() - audio_summary_v2_eager(tag_, tensor_, sample_rate_; name=name, max_outputs=max_outputs) - else - audio_summary_v2_graph(tag_, tensor_, sample_rate_; name=name, max_outputs=max_outputs) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function audio_summary_v2(tag_, tensor_, sample_rate_; name=nothing, max_outputs=nothing) + if tf.in_eager_mode() + audio_summary_v2_eager(tag_, tensor_, sample_rate_; name=name, max_outputs=max_outputs) + else + audio_summary_v2_graph(tag_, tensor_, sample_rate_; name=name, max_outputs=max_outputs) + end end - end end @@ -33038,33 +33038,33 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function stateful_partitioned_call_graph(args_; name=nothing, Tin=nothing, Tout=nothing, f=nothing, config=nothing, config_proto=nothing, executor_type=nothing) - local desc - tf.with_op_name(name, "StatefulPartitionedCall") do - desc = tf.NodeDescription("StatefulPartitionedCall") - args_ = [convert(Tensor{Any}, x) for x = args_] - tf.add_input(desc, args_) - if Tin !== nothing - desc["Tin"] = map(Base.identity, Tin) - end - if Tout !== nothing - desc["Tout"] = map(Base.identity, Tout) - end - if f !== nothing - desc["f"] = Base.identity(f) - end - if config !== nothing - desc["config"] = Base.String(config) - end - if config_proto !== nothing - desc["config_proto"] = Base.String(config_proto) - end - if executor_type !== nothing - desc["executor_type"] = Base.String(executor_type) - end - end - tf.Tensor(tf.Operation(desc)) + function stateful_partitioned_call_graph(args_; name=nothing, Tin=nothing, Tout=nothing, f=nothing, config=nothing, config_proto=nothing, executor_type=nothing) + local desc + tf.with_op_name(name, "StatefulPartitionedCall") do + desc = tf.NodeDescription("StatefulPartitionedCall") + args_ = [convert(Tensor{Any}, x) for x = args_] + tf.add_input(desc, args_) + if Tin !== nothing + desc["Tin"] = map(Base.identity, Tin) + end + if Tout !== nothing + desc["Tout"] = map(Base.identity, Tout) + end + if f !== nothing + desc["f"] = Base.identity(f) + end + if config !== nothing + desc["config"] = Base.String(config) + end + if config_proto !== nothing + desc["config_proto"] = Base.String(config_proto) + end + if executor_type !== nothing + desc["executor_type"] = Base.String(executor_type) + end end + tf.Tensor(tf.Operation(desc)) + end function stateful_partitioned_call_eager(args_; name=nothing, Tin=nothing, Tout=nothing, f=nothing, config=nothing, config_proto=nothing, executor_type=nothing) desc = tf.EagerOp("StatefulPartitionedCall") args_ = convert(tf.EagerTensor, args_) @@ -33094,13 +33094,13 @@ begin return res[1] end end - function stateful_partitioned_call(args_; name=nothing, Tin=nothing, Tout=nothing, f=nothing, config=nothing, config_proto=nothing, executor_type=nothing) - if tf.in_eager_mode() - stateful_partitioned_call_eager(args_; name=name, Tin=Tin, Tout=Tout, f=f, config=config, config_proto=config_proto, executor_type=executor_type) - else - stateful_partitioned_call_graph(args_; name=name, Tin=Tin, Tout=Tout, f=f, config=config, config_proto=config_proto, executor_type=executor_type) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function stateful_partitioned_call(args_; name=nothing, Tin=nothing, Tout=nothing, f=nothing, config=nothing, config_proto=nothing, executor_type=nothing) + if tf.in_eager_mode() + stateful_partitioned_call_eager(args_; name=name, Tin=Tin, Tout=Tout, f=f, config=config, config_proto=config_proto, executor_type=executor_type) + else + stateful_partitioned_call_graph(args_; name=name, Tin=Tin, Tout=Tout, f=f, config=config, config_proto=config_proto, executor_type=executor_type) + end end - end end @@ -33110,33 +33110,33 @@ end Acts like a Concat Op that merges multple tensors into one, however it must """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function _scoped_allocator_concat_graph(backing_, inputs_; name=nothing, shape=nothing, reshape=nothing, sa_name=nothing, id=nothing, N=nothing) - local desc - tf.with_op_name(name, "_ScopedAllocatorConcat") do - desc = tf.NodeDescription("_ScopedAllocatorConcat") - backing_ = convert(Tensor{Any}, backing_) - inputs_ = [convert(Tensor{Any}, x) for x = inputs_] - (backing_, inputs_) = tf.tf_promote(backing_, inputs_) - tf.add_input(desc, backing_) - tf.add_input(desc, inputs_) - if shape !== nothing - desc["shape"] = Base.identity(shape) - end - if reshape !== nothing - desc["reshape"] = Base.Bool(reshape) - end - if sa_name !== nothing - desc["sa_name"] = Base.String(sa_name) - end - if id !== nothing - desc["id"] = Base.Int(id) - end - if N !== nothing - desc["N"] = Base.Int(N) - end - end - tf.Tensor(tf.Operation(desc)) + function _scoped_allocator_concat_graph(backing_, inputs_; name=nothing, shape=nothing, reshape=nothing, sa_name=nothing, id=nothing, N=nothing) + local desc + tf.with_op_name(name, "_ScopedAllocatorConcat") do + desc = tf.NodeDescription("_ScopedAllocatorConcat") + backing_ = convert(Tensor{Any}, backing_) + inputs_ = [convert(Tensor{Any}, x) for x = inputs_] + (backing_, inputs_) = tf.tf_promote(backing_, inputs_) + tf.add_input(desc, backing_) + tf.add_input(desc, inputs_) + if shape !== nothing + desc["shape"] = Base.identity(shape) + end + if reshape !== nothing + desc["reshape"] = Base.Bool(reshape) + end + if sa_name !== nothing + desc["sa_name"] = Base.String(sa_name) + end + if id !== nothing + desc["id"] = Base.Int(id) + end + if N !== nothing + desc["N"] = Base.Int(N) + end end + tf.Tensor(tf.Operation(desc)) + end function _scoped_allocator_concat_eager(backing_, inputs_; name=nothing, shape=nothing, reshape=nothing, sa_name=nothing, id=nothing, N=nothing) desc = tf.EagerOp("_ScopedAllocatorConcat") backing_ = convert(tf.EagerTensor, backing_) @@ -33167,13 +33167,13 @@ begin return res[1] end end - function _scoped_allocator_concat(backing_, inputs_; name=nothing, shape=nothing, reshape=nothing, sa_name=nothing, id=nothing, N=nothing) - if tf.in_eager_mode() - _scoped_allocator_concat_eager(backing_, inputs_; name=name, shape=shape, reshape=reshape, sa_name=sa_name, id=id, N=N) - else - _scoped_allocator_concat_graph(backing_, inputs_; name=name, shape=shape, reshape=reshape, sa_name=sa_name, id=id, N=N) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _scoped_allocator_concat(backing_, inputs_; name=nothing, shape=nothing, reshape=nothing, sa_name=nothing, id=nothing, N=nothing) + if tf.in_eager_mode() + _scoped_allocator_concat_eager(backing_, inputs_; name=name, shape=shape, reshape=reshape, sa_name=sa_name, id=id, N=N) + else + _scoped_allocator_concat_graph(backing_, inputs_; name=name, shape=shape, reshape=reshape, sa_name=sa_name, id=id, N=N) + end end - end end @@ -33183,29 +33183,29 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function fake_quant_with_min_max_args_gradient_graph(gradients_, inputs_; name=nothing, min=nothing, max=nothing, num_bits=nothing, narrow_range=nothing) - local desc - tf.with_op_name(name, "FakeQuantWithMinMaxArgsGradient") do - desc = tf.NodeDescription("FakeQuantWithMinMaxArgsGradient") - gradients_ = convert(Tensor{Float32}, gradients_) - inputs_ = convert(Tensor{Float32}, inputs_) - tf.add_input(desc, gradients_) - tf.add_input(desc, inputs_) - if min !== nothing - desc["min"] = Base.identity(min) - end - if max !== nothing - desc["max"] = Base.identity(max) - end - if num_bits !== nothing - desc["num_bits"] = Base.Int(num_bits) - end - if narrow_range !== nothing - desc["narrow_range"] = Base.Bool(narrow_range) - end + function fake_quant_with_min_max_args_gradient_graph(gradients_, inputs_; name=nothing, min=nothing, max=nothing, num_bits=nothing, narrow_range=nothing) + local desc + tf.with_op_name(name, "FakeQuantWithMinMaxArgsGradient") do + desc = tf.NodeDescription("FakeQuantWithMinMaxArgsGradient") + gradients_ = convert(Tensor{Float32}, gradients_) + inputs_ = convert(Tensor{Float32}, inputs_) + tf.add_input(desc, gradients_) + tf.add_input(desc, inputs_) + if min !== nothing + desc["min"] = Base.identity(min) + end + if max !== nothing + desc["max"] = Base.identity(max) + end + if num_bits !== nothing + desc["num_bits"] = Base.Int(num_bits) + end + if narrow_range !== nothing + desc["narrow_range"] = Base.Bool(narrow_range) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function fake_quant_with_min_max_args_gradient_eager(gradients_, inputs_; name=nothing, min=nothing, max=nothing, num_bits=nothing, narrow_range=nothing) desc = tf.EagerOp("FakeQuantWithMinMaxArgsGradient") gradients_ = convert(tf.EagerTensor, gradients_) @@ -33231,13 +33231,13 @@ begin return res[1] end end - function fake_quant_with_min_max_args_gradient(gradients_, inputs_; name=nothing, min=nothing, max=nothing, num_bits=nothing, narrow_range=nothing) - if tf.in_eager_mode() - fake_quant_with_min_max_args_gradient_eager(gradients_, inputs_; name=name, min=min, max=max, num_bits=num_bits, narrow_range=narrow_range) - else - fake_quant_with_min_max_args_gradient_graph(gradients_, inputs_; name=name, min=min, max=max, num_bits=num_bits, narrow_range=narrow_range) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function fake_quant_with_min_max_args_gradient(gradients_, inputs_; name=nothing, min=nothing, max=nothing, num_bits=nothing, narrow_range=nothing) + if tf.in_eager_mode() + fake_quant_with_min_max_args_gradient_eager(gradients_, inputs_; name=name, min=min, max=max, num_bits=num_bits, narrow_range=narrow_range) + else + fake_quant_with_min_max_args_gradient_graph(gradients_, inputs_; name=name, min=min, max=max, num_bits=num_bits, narrow_range=narrow_range) + end end - end end @@ -33247,27 +33247,27 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function batch_svd_graph(input_; name=nothing, compute_uv=nothing, full_matrices=nothing) - local desc - tf.with_op_name(name, "BatchSvd") do - desc = tf.NodeDescription("BatchSvd") - input_ = convert(Tensor{Any}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - if compute_uv !== nothing - desc["compute_uv"] = Base.Bool(compute_uv) - end - if full_matrices !== nothing - desc["full_matrices"] = Base.Bool(full_matrices) - end + function batch_svd_graph(input_; name=nothing, compute_uv=nothing, full_matrices=nothing) + local desc + tf.with_op_name(name, "BatchSvd") do + desc = tf.NodeDescription("BatchSvd") + input_ = convert(Tensor{Any}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + if compute_uv !== nothing + desc["compute_uv"] = Base.Bool(compute_uv) end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:3 - push!(out, tf.Tensor(op, out_idx)) + if full_matrices !== nothing + desc["full_matrices"] = Base.Bool(full_matrices) end - out end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end function batch_svd_eager(input_; name=nothing, compute_uv=nothing, full_matrices=nothing) desc = tf.EagerOp("BatchSvd") input_ = convert(tf.EagerTensor, input_) @@ -33286,13 +33286,13 @@ begin return res end end - function batch_svd(input_; name=nothing, compute_uv=nothing, full_matrices=nothing) - if tf.in_eager_mode() - batch_svd_eager(input_; name=name, compute_uv=compute_uv, full_matrices=full_matrices) - else - batch_svd_graph(input_; name=name, compute_uv=compute_uv, full_matrices=full_matrices) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function batch_svd(input_; name=nothing, compute_uv=nothing, full_matrices=nothing) + if tf.in_eager_mode() + batch_svd_eager(input_; name=name, compute_uv=compute_uv, full_matrices=full_matrices) + else + batch_svd_graph(input_; name=name, compute_uv=compute_uv, full_matrices=full_matrices) + end end - end end @@ -33302,37 +33302,37 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function map_stage_graph(key_, indices_, values_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, fake_dtypes=nothing, container=nothing, shared_name=nothing) - local desc - tf.with_op_name(name, "MapStage") do - desc = tf.NodeDescription("MapStage") - key_ = convert(Tensor{Int64}, key_) - indices_ = convert(Tensor{Int32}, indices_) - values_ = [convert(Tensor{Any}, x) for x = values_] - tf.add_input(desc, key_) - tf.add_input(desc, indices_) - tf.add_input(desc, values_) - if capacity !== nothing - desc["capacity"] = Base.Int(capacity) - end - if memory_limit !== nothing - desc["memory_limit"] = Base.Int(memory_limit) - end - if dtypes !== nothing - desc["dtypes"] = map(Base.identity, dtypes) - end - if fake_dtypes !== nothing - desc["fake_dtypes"] = map(Base.identity, fake_dtypes) - end - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - end - tf.Tensor(tf.Operation(desc)) + function map_stage_graph(key_, indices_, values_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, fake_dtypes=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "MapStage") do + desc = tf.NodeDescription("MapStage") + key_ = convert(Tensor{Int64}, key_) + indices_ = convert(Tensor{Int32}, indices_) + values_ = [convert(Tensor{Any}, x) for x = values_] + tf.add_input(desc, key_) + tf.add_input(desc, indices_) + tf.add_input(desc, values_) + if capacity !== nothing + desc["capacity"] = Base.Int(capacity) + end + if memory_limit !== nothing + desc["memory_limit"] = Base.Int(memory_limit) + end + if dtypes !== nothing + desc["dtypes"] = map(Base.identity, dtypes) + end + if fake_dtypes !== nothing + desc["fake_dtypes"] = map(Base.identity, fake_dtypes) + end + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end end + tf.Tensor(tf.Operation(desc)) + end function map_stage_eager(key_, indices_, values_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, fake_dtypes=nothing, container=nothing, shared_name=nothing) desc = tf.EagerOp("MapStage") key_ = convert(tf.EagerTensor, key_) @@ -33366,13 +33366,13 @@ begin return res[1] end end - function map_stage(key_, indices_, values_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, fake_dtypes=nothing, container=nothing, shared_name=nothing) - if tf.in_eager_mode() - map_stage_eager(key_, indices_, values_; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, fake_dtypes=fake_dtypes, container=container, shared_name=shared_name) - else - map_stage_graph(key_, indices_, values_; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, fake_dtypes=fake_dtypes, container=container, shared_name=shared_name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function map_stage(key_, indices_, values_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, fake_dtypes=nothing, container=nothing, shared_name=nothing) + if tf.in_eager_mode() + map_stage_eager(key_, indices_, values_; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, fake_dtypes=fake_dtypes, container=container, shared_name=shared_name) + else + map_stage_graph(key_, indices_, values_; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, fake_dtypes=fake_dtypes, container=container, shared_name=shared_name) + end end - end end @@ -33382,37 +33382,37 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function resource_sparse_apply_ftrl_graph(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, lr_power_; name=nothing, use_locking=nothing) - local desc - tf.with_op_name(name, "ResourceSparseApplyFtrl") do - desc = tf.NodeDescription("ResourceSparseApplyFtrl") - var_ = convert(Tensor{Any}, var_) - accum_ = convert(Tensor{Any}, accum_) - linear_ = convert(Tensor{Any}, linear_) - grad_ = convert(Tensor{Any}, grad_) - indices_ = convert(Tensor{Any}, indices_) - indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) - lr_ = convert(Tensor{Any}, lr_) - l1_ = convert(Tensor{Any}, l1_) - l2_ = convert(Tensor{Any}, l2_) - lr_power_ = convert(Tensor{Any}, lr_power_) - (grad_, lr_, l1_, l2_, lr_power_) = tf.tf_promote(grad_, lr_, l1_, l2_, lr_power_) - (indices_,) = tf.tf_promote(indices_) - tf.add_input(desc, var_) - tf.add_input(desc, accum_) - tf.add_input(desc, linear_) - tf.add_input(desc, grad_) - tf.add_input(desc, indices_) - tf.add_input(desc, lr_) - tf.add_input(desc, l1_) - tf.add_input(desc, l2_) - tf.add_input(desc, lr_power_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - end - tf.Tensor(tf.Operation(desc)) - end + function resource_sparse_apply_ftrl_graph(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, lr_power_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "ResourceSparseApplyFtrl") do + desc = tf.NodeDescription("ResourceSparseApplyFtrl") + var_ = convert(Tensor{Any}, var_) + accum_ = convert(Tensor{Any}, accum_) + linear_ = convert(Tensor{Any}, linear_) + grad_ = convert(Tensor{Any}, grad_) + indices_ = convert(Tensor{Any}, indices_) + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + lr_ = convert(Tensor{Any}, lr_) + l1_ = convert(Tensor{Any}, l1_) + l2_ = convert(Tensor{Any}, l2_) + lr_power_ = convert(Tensor{Any}, lr_power_) + (grad_, lr_, l1_, l2_, lr_power_) = tf.tf_promote(grad_, lr_, l1_, l2_, lr_power_) + (indices_,) = tf.tf_promote(indices_) + tf.add_input(desc, var_) + tf.add_input(desc, accum_) + tf.add_input(desc, linear_) + tf.add_input(desc, grad_) + tf.add_input(desc, indices_) + tf.add_input(desc, lr_) + tf.add_input(desc, l1_) + tf.add_input(desc, l2_) + tf.add_input(desc, lr_power_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + tf.Tensor(tf.Operation(desc)) + end function resource_sparse_apply_ftrl_eager(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, lr_power_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ResourceSparseApplyFtrl") var_ = convert(tf.EagerTensor, var_) @@ -33449,13 +33449,13 @@ begin return res[1] end end - function resource_sparse_apply_ftrl(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, lr_power_; name=nothing, use_locking=nothing) - if tf.in_eager_mode() - resource_sparse_apply_ftrl_eager(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, lr_power_; name=name, use_locking=use_locking) - else - resource_sparse_apply_ftrl_graph(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, lr_power_; name=name, use_locking=use_locking) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_sparse_apply_ftrl(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, lr_power_; name=nothing, use_locking=nothing) + if tf.in_eager_mode() + resource_sparse_apply_ftrl_eager(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, lr_power_; name=name, use_locking=use_locking) + else + resource_sparse_apply_ftrl_graph(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, lr_power_; name=name, use_locking=use_locking) + end end - end end @@ -33465,21 +33465,21 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function resize_nearest_neighbor_graph(images_, size_; name=nothing, align_corners=nothing) - local desc - tf.with_op_name(name, "ResizeNearestNeighbor") do - desc = tf.NodeDescription("ResizeNearestNeighbor") - images_ = convert(Tensor{Any}, images_) - size_ = convert(Tensor{Int32}, size_) - (images_,) = tf.tf_promote(images_) - tf.add_input(desc, images_) - tf.add_input(desc, size_) - if align_corners !== nothing - desc["align_corners"] = Base.Bool(align_corners) - end + function resize_nearest_neighbor_graph(images_, size_; name=nothing, align_corners=nothing) + local desc + tf.with_op_name(name, "ResizeNearestNeighbor") do + desc = tf.NodeDescription("ResizeNearestNeighbor") + images_ = convert(Tensor{Any}, images_) + size_ = convert(Tensor{Int32}, size_) + (images_,) = tf.tf_promote(images_) + tf.add_input(desc, images_) + tf.add_input(desc, size_) + if align_corners !== nothing + desc["align_corners"] = Base.Bool(align_corners) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function resize_nearest_neighbor_eager(images_, size_; name=nothing, align_corners=nothing) desc = tf.EagerOp("ResizeNearestNeighbor") images_ = convert(tf.EagerTensor, images_) @@ -33497,13 +33497,13 @@ begin return res[1] end end - function resize_nearest_neighbor(images_, size_; name=nothing, align_corners=nothing) - if tf.in_eager_mode() - resize_nearest_neighbor_eager(images_, size_; name=name, align_corners=align_corners) - else - resize_nearest_neighbor_graph(images_, size_; name=name, align_corners=align_corners) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resize_nearest_neighbor(images_, size_; name=nothing, align_corners=nothing) + if tf.in_eager_mode() + resize_nearest_neighbor_eager(images_, size_; name=name, align_corners=align_corners) + else + resize_nearest_neighbor_graph(images_, size_; name=name, align_corners=align_corners) + end end - end end @@ -33513,37 +33513,37 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function experimental_csv_dataset_graph(filenames_, compression_type_, buffer_size_, header_, field_delim_, use_quote_delim_, na_value_, select_cols_, record_defaults_; name=nothing, output_types=nothing, output_shapes=nothing) - local desc - tf.with_op_name(name, "ExperimentalCSVDataset") do - desc = tf.NodeDescription("ExperimentalCSVDataset") - filenames_ = convert(Tensor{String}, filenames_) - compression_type_ = convert(Tensor{String}, compression_type_) - buffer_size_ = convert(Tensor{Int64}, buffer_size_) - header_ = convert(Tensor{Bool}, header_) - field_delim_ = convert(Tensor{String}, field_delim_) - use_quote_delim_ = convert(Tensor{Bool}, use_quote_delim_) - na_value_ = convert(Tensor{String}, na_value_) - select_cols_ = convert(Tensor{Int64}, select_cols_) - record_defaults_ = [convert(Tensor{Any}, x) for x = record_defaults_] - tf.add_input(desc, filenames_) - tf.add_input(desc, compression_type_) - tf.add_input(desc, buffer_size_) - tf.add_input(desc, header_) - tf.add_input(desc, field_delim_) - tf.add_input(desc, use_quote_delim_) - tf.add_input(desc, na_value_) - tf.add_input(desc, select_cols_) - tf.add_input(desc, record_defaults_) - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - end - tf.Tensor(tf.Operation(desc)) + function experimental_csv_dataset_graph(filenames_, compression_type_, buffer_size_, header_, field_delim_, use_quote_delim_, na_value_, select_cols_, record_defaults_; name=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "ExperimentalCSVDataset") do + desc = tf.NodeDescription("ExperimentalCSVDataset") + filenames_ = convert(Tensor{String}, filenames_) + compression_type_ = convert(Tensor{String}, compression_type_) + buffer_size_ = convert(Tensor{Int64}, buffer_size_) + header_ = convert(Tensor{Bool}, header_) + field_delim_ = convert(Tensor{String}, field_delim_) + use_quote_delim_ = convert(Tensor{Bool}, use_quote_delim_) + na_value_ = convert(Tensor{String}, na_value_) + select_cols_ = convert(Tensor{Int64}, select_cols_) + record_defaults_ = [convert(Tensor{Any}, x) for x = record_defaults_] + tf.add_input(desc, filenames_) + tf.add_input(desc, compression_type_) + tf.add_input(desc, buffer_size_) + tf.add_input(desc, header_) + tf.add_input(desc, field_delim_) + tf.add_input(desc, use_quote_delim_) + tf.add_input(desc, na_value_) + tf.add_input(desc, select_cols_) + tf.add_input(desc, record_defaults_) + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end end + tf.Tensor(tf.Operation(desc)) + end function experimental_csv_dataset_eager(filenames_, compression_type_, buffer_size_, header_, field_delim_, use_quote_delim_, na_value_, select_cols_, record_defaults_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("ExperimentalCSVDataset") filenames_ = convert(tf.EagerTensor, filenames_) @@ -33577,13 +33577,13 @@ begin return res[1] end end - function experimental_csv_dataset(filenames_, compression_type_, buffer_size_, header_, field_delim_, use_quote_delim_, na_value_, select_cols_, record_defaults_; name=nothing, output_types=nothing, output_shapes=nothing) - if tf.in_eager_mode() - experimental_csv_dataset_eager(filenames_, compression_type_, buffer_size_, header_, field_delim_, use_quote_delim_, na_value_, select_cols_, record_defaults_; name=name, output_types=output_types, output_shapes=output_shapes) - else - experimental_csv_dataset_graph(filenames_, compression_type_, buffer_size_, header_, field_delim_, use_quote_delim_, na_value_, select_cols_, record_defaults_; name=name, output_types=output_types, output_shapes=output_shapes) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_csv_dataset(filenames_, compression_type_, buffer_size_, header_, field_delim_, use_quote_delim_, na_value_, select_cols_, record_defaults_; name=nothing, output_types=nothing, output_shapes=nothing) + if tf.in_eager_mode() + experimental_csv_dataset_eager(filenames_, compression_type_, buffer_size_, header_, field_delim_, use_quote_delim_, na_value_, select_cols_, record_defaults_; name=name, output_types=output_types, output_shapes=output_shapes) + else + experimental_csv_dataset_graph(filenames_, compression_type_, buffer_size_, header_, field_delim_, use_quote_delim_, na_value_, select_cols_, record_defaults_; name=name, output_types=output_types, output_shapes=output_shapes) + end end - end end @@ -33593,27 +33593,27 @@ end Returns x * y element-wise. """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function _mkl_mul_graph(x_, y_, mkl_x_, mkl_y_; name=nothing) - local desc - tf.with_op_name(name, "_MklMul") do - desc = tf.NodeDescription("_MklMul") - x_ = convert(Tensor{Any}, x_) - y_ = convert(Tensor{Any}, y_) - mkl_x_ = convert(Tensor{UInt8}, mkl_x_) - mkl_y_ = convert(Tensor{UInt8}, mkl_y_) - (x_, y_) = tf.tf_promote(x_, y_) - tf.add_input(desc, x_) - tf.add_input(desc, y_) - tf.add_input(desc, mkl_x_) - tf.add_input(desc, mkl_y_) - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:2 - push!(out, tf.Tensor(op, out_idx)) - end - out + function _mkl_mul_graph(x_, y_, mkl_x_, mkl_y_; name=nothing) + local desc + tf.with_op_name(name, "_MklMul") do + desc = tf.NodeDescription("_MklMul") + x_ = convert(Tensor{Any}, x_) + y_ = convert(Tensor{Any}, y_) + mkl_x_ = convert(Tensor{UInt8}, mkl_x_) + mkl_y_ = convert(Tensor{UInt8}, mkl_y_) + (x_, y_) = tf.tf_promote(x_, y_) + tf.add_input(desc, x_) + tf.add_input(desc, y_) + tf.add_input(desc, mkl_x_) + tf.add_input(desc, mkl_y_) + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) end + out + end function _mkl_mul_eager(x_, y_, mkl_x_, mkl_y_; name=nothing) desc = tf.EagerOp("_MklMul") x_ = convert(tf.EagerTensor, x_) @@ -33633,13 +33633,13 @@ begin return res end end - function _mkl_mul(x_, y_, mkl_x_, mkl_y_; name=nothing) - if tf.in_eager_mode() - _mkl_mul_eager(x_, y_, mkl_x_, mkl_y_; name=name) - else - _mkl_mul_graph(x_, y_, mkl_x_, mkl_y_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _mkl_mul(x_, y_, mkl_x_, mkl_y_; name=nothing) + if tf.in_eager_mode() + _mkl_mul_eager(x_, y_, mkl_x_, mkl_y_; name=name) + else + _mkl_mul_graph(x_, y_, mkl_x_, mkl_y_; name=name) + end end - end end @@ -33649,16 +33649,16 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function batch_matrix_diag_graph(diagonal_; name=nothing) - local desc - tf.with_op_name(name, "BatchMatrixDiag") do - desc = tf.NodeDescription("BatchMatrixDiag") - diagonal_ = convert(Tensor{Any}, diagonal_) - (diagonal_,) = tf.tf_promote(diagonal_) - tf.add_input(desc, diagonal_) - end - tf.Tensor(tf.Operation(desc)) + function batch_matrix_diag_graph(diagonal_; name=nothing) + local desc + tf.with_op_name(name, "BatchMatrixDiag") do + desc = tf.NodeDescription("BatchMatrixDiag") + diagonal_ = convert(Tensor{Any}, diagonal_) + (diagonal_,) = tf.tf_promote(diagonal_) + tf.add_input(desc, diagonal_) end + tf.Tensor(tf.Operation(desc)) + end function batch_matrix_diag_eager(diagonal_; name=nothing) desc = tf.EagerOp("BatchMatrixDiag") diagonal_ = convert(tf.EagerTensor, diagonal_) @@ -33671,13 +33671,13 @@ begin return res[1] end end - function batch_matrix_diag(diagonal_; name=nothing) - if tf.in_eager_mode() - batch_matrix_diag_eager(diagonal_; name=name) - else - batch_matrix_diag_graph(diagonal_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function batch_matrix_diag(diagonal_; name=nothing) + if tf.in_eager_mode() + batch_matrix_diag_eager(diagonal_; name=name) + else + batch_matrix_diag_graph(diagonal_; name=name) + end end - end end @@ -33687,16 +33687,16 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function is_inf_graph(x_; name=nothing) - local desc - tf.with_op_name(name, "IsInf") do - desc = tf.NodeDescription("IsInf") - x_ = convert(Tensor{Any}, x_) - (x_,) = tf.tf_promote(x_) - tf.add_input(desc, x_) - end - tf.Tensor(tf.Operation(desc)) + function is_inf_graph(x_; name=nothing) + local desc + tf.with_op_name(name, "IsInf") do + desc = tf.NodeDescription("IsInf") + x_ = convert(Tensor{Any}, x_) + (x_,) = tf.tf_promote(x_) + tf.add_input(desc, x_) end + tf.Tensor(tf.Operation(desc)) + end function is_inf_eager(x_; name=nothing) desc = tf.EagerOp("IsInf") x_ = convert(tf.EagerTensor, x_) @@ -33709,13 +33709,13 @@ begin return res[1] end end - function is_inf(x_; name=nothing) - if tf.in_eager_mode() - is_inf_eager(x_; name=name) - else - is_inf_graph(x_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function is_inf(x_; name=nothing) + if tf.in_eager_mode() + is_inf_eager(x_; name=name) + else + is_inf_graph(x_; name=name) + end end - end end @@ -33725,56 +33725,56 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function fixed_unigram_candidate_sampler_graph(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, vocab_file=nothing, distortion=nothing, num_reserved_ids=nothing, num_shards=nothing, shard=nothing, unigrams=nothing, seed=nothing, seed2=nothing) - local desc - tf.with_op_name(name, "FixedUnigramCandidateSampler") do - desc = tf.NodeDescription("FixedUnigramCandidateSampler") - true_classes_ = convert(Tensor{Int64}, true_classes_) - tf.add_input(desc, true_classes_) - if num_true !== nothing - desc["num_true"] = Base.Int(num_true) - end - if num_sampled !== nothing - desc["num_sampled"] = Base.Int(num_sampled) - end - if unique !== nothing - desc["unique"] = Base.Bool(unique) - end - if range_max !== nothing - desc["range_max"] = Base.Int(range_max) - end - if vocab_file !== nothing - desc["vocab_file"] = Base.String(vocab_file) - end - if distortion !== nothing - desc["distortion"] = Base.identity(distortion) - end - if num_reserved_ids !== nothing - desc["num_reserved_ids"] = Base.Int(num_reserved_ids) - end - if num_shards !== nothing - desc["num_shards"] = Base.Int(num_shards) - end - if shard !== nothing - desc["shard"] = Base.Int(shard) - end - if unigrams !== nothing - desc["unigrams"] = map(Base.identity, unigrams) - end - if seed !== nothing - desc["seed"] = Base.Int(seed) - end - if seed2 !== nothing - desc["seed2"] = Base.Int(seed2) - end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:3 - push!(out, tf.Tensor(op, out_idx)) - end - out + function fixed_unigram_candidate_sampler_graph(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, vocab_file=nothing, distortion=nothing, num_reserved_ids=nothing, num_shards=nothing, shard=nothing, unigrams=nothing, seed=nothing, seed2=nothing) + local desc + tf.with_op_name(name, "FixedUnigramCandidateSampler") do + desc = tf.NodeDescription("FixedUnigramCandidateSampler") + true_classes_ = convert(Tensor{Int64}, true_classes_) + tf.add_input(desc, true_classes_) + if num_true !== nothing + desc["num_true"] = Base.Int(num_true) + end + if num_sampled !== nothing + desc["num_sampled"] = Base.Int(num_sampled) + end + if unique !== nothing + desc["unique"] = Base.Bool(unique) + end + if range_max !== nothing + desc["range_max"] = Base.Int(range_max) + end + if vocab_file !== nothing + desc["vocab_file"] = Base.String(vocab_file) + end + if distortion !== nothing + desc["distortion"] = Base.identity(distortion) + end + if num_reserved_ids !== nothing + desc["num_reserved_ids"] = Base.Int(num_reserved_ids) + end + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + if shard !== nothing + desc["shard"] = Base.Int(shard) + end + if unigrams !== nothing + desc["unigrams"] = map(Base.identity, unigrams) + end + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) end + out + end function fixed_unigram_candidate_sampler_eager(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, vocab_file=nothing, distortion=nothing, num_reserved_ids=nothing, num_shards=nothing, shard=nothing, unigrams=nothing, seed=nothing, seed2=nothing) desc = tf.EagerOp("FixedUnigramCandidateSampler") true_classes_ = convert(tf.EagerTensor, true_classes_) @@ -33822,13 +33822,13 @@ begin return res end end - function fixed_unigram_candidate_sampler(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, vocab_file=nothing, distortion=nothing, num_reserved_ids=nothing, num_shards=nothing, shard=nothing, unigrams=nothing, seed=nothing, seed2=nothing) - if tf.in_eager_mode() - fixed_unigram_candidate_sampler_eager(true_classes_; name=name, num_true=num_true, num_sampled=num_sampled, unique=unique, range_max=range_max, vocab_file=vocab_file, distortion=distortion, num_reserved_ids=num_reserved_ids, num_shards=num_shards, shard=shard, unigrams=unigrams, seed=seed, seed2=seed2) - else - fixed_unigram_candidate_sampler_graph(true_classes_; name=name, num_true=num_true, num_sampled=num_sampled, unique=unique, range_max=range_max, vocab_file=vocab_file, distortion=distortion, num_reserved_ids=num_reserved_ids, num_shards=num_shards, shard=shard, unigrams=unigrams, seed=seed, seed2=seed2) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function fixed_unigram_candidate_sampler(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, vocab_file=nothing, distortion=nothing, num_reserved_ids=nothing, num_shards=nothing, shard=nothing, unigrams=nothing, seed=nothing, seed2=nothing) + if tf.in_eager_mode() + fixed_unigram_candidate_sampler_eager(true_classes_; name=name, num_true=num_true, num_sampled=num_sampled, unique=unique, range_max=range_max, vocab_file=vocab_file, distortion=distortion, num_reserved_ids=num_reserved_ids, num_shards=num_shards, shard=shard, unigrams=unigrams, seed=seed, seed2=seed2) + else + fixed_unigram_candidate_sampler_graph(true_classes_; name=name, num_true=num_true, num_sampled=num_sampled, unique=unique, range_max=range_max, vocab_file=vocab_file, distortion=distortion, num_reserved_ids=num_reserved_ids, num_shards=num_shards, shard=shard, unigrams=unigrams, seed=seed, seed2=seed2) + end end - end end @@ -33838,20 +33838,20 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function unravel_index_graph(indices_, dims_; name=nothing) - local desc - tf.with_op_name(name, "UnravelIndex") do - desc = tf.NodeDescription("UnravelIndex") - indices_ = convert(Tensor{Int32}, indices_) - indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) - dims_ = convert(Tensor{Int32}, dims_) - dims_ = dims_ - convert(tf.Tensor{eltype(dims_)}, 1) - (indices_, dims_) = tf.tf_promote(indices_, dims_) - tf.add_input(desc, indices_) - tf.add_input(desc, dims_) - end - tf.Tensor(tf.Operation(desc)) + function unravel_index_graph(indices_, dims_; name=nothing) + local desc + tf.with_op_name(name, "UnravelIndex") do + desc = tf.NodeDescription("UnravelIndex") + indices_ = convert(Tensor{Int32}, indices_) + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + dims_ = convert(Tensor{Int32}, dims_) + dims_ = dims_ - convert(tf.Tensor{eltype(dims_)}, 1) + (indices_, dims_) = tf.tf_promote(indices_, dims_) + tf.add_input(desc, indices_) + tf.add_input(desc, dims_) end + tf.Tensor(tf.Operation(desc)) + end function unravel_index_eager(indices_, dims_; name=nothing) desc = tf.EagerOp("UnravelIndex") indices_ = convert(tf.EagerTensor, indices_) @@ -33867,13 +33867,13 @@ begin return res[1] end end - function unravel_index(indices_, dims_; name=nothing) - if tf.in_eager_mode() - unravel_index_eager(indices_, dims_; name=name) - else - unravel_index_graph(indices_, dims_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function unravel_index(indices_, dims_; name=nothing) + if tf.in_eager_mode() + unravel_index_eager(indices_, dims_; name=name) + else + unravel_index_graph(indices_, dims_; name=name) + end end - end end @@ -33883,39 +33883,39 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function sparse_apply_ftrl_v2_graph(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=nothing, use_locking=nothing) - local desc - tf.with_op_name(name, "SparseApplyFtrlV2") do - desc = tf.NodeDescription("SparseApplyFtrlV2") - var_ = convert(Tensor{Any}, var_) - accum_ = convert(Tensor{Any}, accum_) - linear_ = convert(Tensor{Any}, linear_) - grad_ = convert(Tensor{Any}, grad_) - indices_ = convert(Tensor{Any}, indices_) - indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) - lr_ = convert(Tensor{Any}, lr_) - l1_ = convert(Tensor{Any}, l1_) - l2_ = convert(Tensor{Any}, l2_) - l2_shrinkage_ = convert(Tensor{Any}, l2_shrinkage_) - lr_power_ = convert(Tensor{Any}, lr_power_) - (var_, accum_, linear_, grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_) = tf.tf_promote(var_, accum_, linear_, grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_) - (indices_,) = tf.tf_promote(indices_) - tf.add_input(desc, var_) - tf.add_input(desc, accum_) - tf.add_input(desc, linear_) - tf.add_input(desc, grad_) - tf.add_input(desc, indices_) - tf.add_input(desc, lr_) - tf.add_input(desc, l1_) - tf.add_input(desc, l2_) - tf.add_input(desc, l2_shrinkage_) - tf.add_input(desc, lr_power_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - end - tf.Tensor(tf.Operation(desc)) - end + function sparse_apply_ftrl_v2_graph(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "SparseApplyFtrlV2") do + desc = tf.NodeDescription("SparseApplyFtrlV2") + var_ = convert(Tensor{Any}, var_) + accum_ = convert(Tensor{Any}, accum_) + linear_ = convert(Tensor{Any}, linear_) + grad_ = convert(Tensor{Any}, grad_) + indices_ = convert(Tensor{Any}, indices_) + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + lr_ = convert(Tensor{Any}, lr_) + l1_ = convert(Tensor{Any}, l1_) + l2_ = convert(Tensor{Any}, l2_) + l2_shrinkage_ = convert(Tensor{Any}, l2_shrinkage_) + lr_power_ = convert(Tensor{Any}, lr_power_) + (var_, accum_, linear_, grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_) = tf.tf_promote(var_, accum_, linear_, grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_) + (indices_,) = tf.tf_promote(indices_) + tf.add_input(desc, var_) + tf.add_input(desc, accum_) + tf.add_input(desc, linear_) + tf.add_input(desc, grad_) + tf.add_input(desc, indices_) + tf.add_input(desc, lr_) + tf.add_input(desc, l1_) + tf.add_input(desc, l2_) + tf.add_input(desc, l2_shrinkage_) + tf.add_input(desc, lr_power_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + tf.Tensor(tf.Operation(desc)) + end function sparse_apply_ftrl_v2_eager(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=nothing, use_locking=nothing) desc = tf.EagerOp("SparseApplyFtrlV2") var_ = convert(tf.EagerTensor, var_) @@ -33958,13 +33958,13 @@ begin return res[1] end end - function sparse_apply_ftrl_v2(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=nothing, use_locking=nothing) - if tf.in_eager_mode() - sparse_apply_ftrl_v2_eager(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=name, use_locking=use_locking) - else - sparse_apply_ftrl_v2_graph(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=name, use_locking=use_locking) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_apply_ftrl_v2(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=nothing, use_locking=nothing) + if tf.in_eager_mode() + sparse_apply_ftrl_v2_eager(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=name, use_locking=use_locking) + else + sparse_apply_ftrl_v2_graph(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=name, use_locking=use_locking) + end end - end end @@ -33974,23 +33974,23 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function max_graph(input_, reduction_indices_; name=nothing, keep_dims=nothing) - local desc - tf.with_op_name(name, "Max") do - desc = tf.NodeDescription("Max") - input_ = convert(Tensor{Any}, input_) - reduction_indices_ = convert(Tensor{Int32}, reduction_indices_) - reduction_indices_ = reduction_indices_ - convert(tf.Tensor{eltype(reduction_indices_)}, 1) - (input_,) = tf.tf_promote(input_) - (reduction_indices_,) = tf.tf_promote(reduction_indices_) - tf.add_input(desc, input_) - tf.add_input(desc, reduction_indices_) - if keep_dims !== nothing - desc["keep_dims"] = Base.Bool(keep_dims) - end + function max_graph(input_, reduction_indices_; name=nothing, keep_dims=nothing) + local desc + tf.with_op_name(name, "Max") do + desc = tf.NodeDescription("Max") + input_ = convert(Tensor{Any}, input_) + reduction_indices_ = convert(Tensor{Int32}, reduction_indices_) + reduction_indices_ = reduction_indices_ - convert(tf.Tensor{eltype(reduction_indices_)}, 1) + (input_,) = tf.tf_promote(input_) + (reduction_indices_,) = tf.tf_promote(reduction_indices_) + tf.add_input(desc, input_) + tf.add_input(desc, reduction_indices_) + if keep_dims !== nothing + desc["keep_dims"] = Base.Bool(keep_dims) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function max_eager(input_, reduction_indices_; name=nothing, keep_dims=nothing) desc = tf.EagerOp("Max") input_ = convert(tf.EagerTensor, input_) @@ -34009,13 +34009,13 @@ begin return res[1] end end - function max(input_, reduction_indices_; name=nothing, keep_dims=nothing) - if tf.in_eager_mode() - max_eager(input_, reduction_indices_; name=name, keep_dims=keep_dims) - else - max_graph(input_, reduction_indices_; name=name, keep_dims=keep_dims) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function max(input_, reduction_indices_; name=nothing, keep_dims=nothing) + if tf.in_eager_mode() + max_eager(input_, reduction_indices_; name=name, keep_dims=keep_dims) + else + max_graph(input_, reduction_indices_; name=name, keep_dims=keep_dims) + end end - end end @@ -34025,16 +34025,16 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function ifft2d_graph(input_; name=nothing) - local desc - tf.with_op_name(name, "IFFT2D") do - desc = tf.NodeDescription("IFFT2D") - input_ = convert(Tensor{Complex{Float32}}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - end - tf.Tensor(tf.Operation(desc)) + function ifft2d_graph(input_; name=nothing) + local desc + tf.with_op_name(name, "IFFT2D") do + desc = tf.NodeDescription("IFFT2D") + input_ = convert(Tensor{Complex{Float32}}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) end + tf.Tensor(tf.Operation(desc)) + end function ifft2d_eager(input_; name=nothing) desc = tf.EagerOp("IFFT2D") input_ = convert(tf.EagerTensor, input_) @@ -34047,13 +34047,13 @@ begin return res[1] end end - function ifft2d(input_; name=nothing) - if tf.in_eager_mode() - ifft2d_eager(input_; name=name) - else - ifft2d_graph(input_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function ifft2d(input_; name=nothing) + if tf.in_eager_mode() + ifft2d_eager(input_; name=name) + else + ifft2d_graph(input_; name=name) + end end - end end @@ -34063,34 +34063,34 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function sparse_concat_graph(indices_, values_, shapes_; name=nothing, concat_dim=nothing, N=nothing) - local desc - tf.with_op_name(name, "SparseConcat") do - desc = tf.NodeDescription("SparseConcat") - indices_ = [convert(Tensor{Int64}, x) for x = indices_] - values_ = [convert(Tensor{Any}, x) for x = values_] - shapes_ = [convert(Tensor{Int64}, x) for x = shapes_] - (values_,) = tf.tf_promote(values_) - tf.add_input(desc, indices_) - tf.add_input(desc, values_) - tf.add_input(desc, shapes_) - if concat_dim !== nothing - concat_dim = Base.Int(concat_dim) - 1 - end - if concat_dim !== nothing - desc["concat_dim"] = Base.Int(concat_dim) - end - if N !== nothing - desc["N"] = Base.Int(N) - end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:3 - push!(out, tf.Tensor(op, out_idx)) - end - out + function sparse_concat_graph(indices_, values_, shapes_; name=nothing, concat_dim=nothing, N=nothing) + local desc + tf.with_op_name(name, "SparseConcat") do + desc = tf.NodeDescription("SparseConcat") + indices_ = [convert(Tensor{Int64}, x) for x = indices_] + values_ = [convert(Tensor{Any}, x) for x = values_] + shapes_ = [convert(Tensor{Int64}, x) for x = shapes_] + (values_,) = tf.tf_promote(values_) + tf.add_input(desc, indices_) + tf.add_input(desc, values_) + tf.add_input(desc, shapes_) + if concat_dim !== nothing + concat_dim = Base.Int(concat_dim) - 1 + end + if concat_dim !== nothing + desc["concat_dim"] = Base.Int(concat_dim) + end + if N !== nothing + desc["N"] = Base.Int(N) + end end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end function sparse_concat_eager(indices_, values_, shapes_; name=nothing, concat_dim=nothing, N=nothing) desc = tf.EagerOp("SparseConcat") indices_ = convert(tf.EagerTensor, indices_) @@ -34116,13 +34116,13 @@ begin return res end end - function sparse_concat(indices_, values_, shapes_; name=nothing, concat_dim=nothing, N=nothing) - if tf.in_eager_mode() - sparse_concat_eager(indices_, values_, shapes_; name=name, concat_dim=concat_dim, N=N) - else - sparse_concat_graph(indices_, values_, shapes_; name=name, concat_dim=concat_dim, N=N) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_concat(indices_, values_, shapes_; name=nothing, concat_dim=nothing, N=nothing) + if tf.in_eager_mode() + sparse_concat_eager(indices_, values_, shapes_; name=name, concat_dim=concat_dim, N=N) + else + sparse_concat_graph(indices_, values_, shapes_; name=name, concat_dim=concat_dim, N=N) + end end - end end @@ -34132,18 +34132,18 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function histogram_summary_graph(tag_, values_; name=nothing) - local desc - tf.with_op_name(name, "HistogramSummary") do - desc = tf.NodeDescription("HistogramSummary") - tag_ = convert(Tensor{String}, tag_) - values_ = convert(Tensor{Float32}, values_) - (values_,) = tf.tf_promote(values_) - tf.add_input(desc, tag_) - tf.add_input(desc, values_) - end - tf.Tensor(tf.Operation(desc)) + function histogram_summary_graph(tag_, values_; name=nothing) + local desc + tf.with_op_name(name, "HistogramSummary") do + desc = tf.NodeDescription("HistogramSummary") + tag_ = convert(Tensor{String}, tag_) + values_ = convert(Tensor{Float32}, values_) + (values_,) = tf.tf_promote(values_) + tf.add_input(desc, tag_) + tf.add_input(desc, values_) end + tf.Tensor(tf.Operation(desc)) + end function histogram_summary_eager(tag_, values_; name=nothing) desc = tf.EagerOp("HistogramSummary") tag_ = convert(tf.EagerTensor, tag_) @@ -34158,13 +34158,13 @@ begin return res[1] end end - function histogram_summary(tag_, values_; name=nothing) - if tf.in_eager_mode() - histogram_summary_eager(tag_, values_; name=name) - else - histogram_summary_graph(tag_, values_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function histogram_summary(tag_, values_; name=nothing) + if tf.in_eager_mode() + histogram_summary_eager(tag_, values_; name=name) + else + histogram_summary_graph(tag_, values_; name=name) + end end - end end @@ -34174,20 +34174,20 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function segment_sum_graph(data_, segment_ids_; name=nothing) - local desc - tf.with_op_name(name, "SegmentSum") do - desc = tf.NodeDescription("SegmentSum") - data_ = convert(Tensor{Any}, data_) - segment_ids_ = convert(Tensor{Any}, segment_ids_) - segment_ids_ = segment_ids_ - convert(tf.Tensor{eltype(segment_ids_)}, 1) - (data_,) = tf.tf_promote(data_) - (segment_ids_,) = tf.tf_promote(segment_ids_) - tf.add_input(desc, data_) - tf.add_input(desc, segment_ids_) - end - tf.Tensor(tf.Operation(desc)) + function segment_sum_graph(data_, segment_ids_; name=nothing) + local desc + tf.with_op_name(name, "SegmentSum") do + desc = tf.NodeDescription("SegmentSum") + data_ = convert(Tensor{Any}, data_) + segment_ids_ = convert(Tensor{Any}, segment_ids_) + segment_ids_ = segment_ids_ - convert(tf.Tensor{eltype(segment_ids_)}, 1) + (data_,) = tf.tf_promote(data_) + (segment_ids_,) = tf.tf_promote(segment_ids_) + tf.add_input(desc, data_) + tf.add_input(desc, segment_ids_) end + tf.Tensor(tf.Operation(desc)) + end function segment_sum_eager(data_, segment_ids_; name=nothing) desc = tf.EagerOp("SegmentSum") data_ = convert(tf.EagerTensor, data_) @@ -34203,13 +34203,13 @@ begin return res[1] end end - function segment_sum(data_, segment_ids_; name=nothing) - if tf.in_eager_mode() - segment_sum_eager(data_, segment_ids_; name=name) - else - segment_sum_graph(data_, segment_ids_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function segment_sum(data_, segment_ids_; name=nothing) + if tf.in_eager_mode() + segment_sum_eager(data_, segment_ids_; name=name) + else + segment_sum_graph(data_, segment_ids_; name=name) + end end - end end @@ -34219,16 +34219,16 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function exp_graph(x_; name=nothing) - local desc - tf.with_op_name(name, "Exp") do - desc = tf.NodeDescription("Exp") - x_ = convert(Tensor{Any}, x_) - (x_,) = tf.tf_promote(x_) - tf.add_input(desc, x_) - end - tf.Tensor(tf.Operation(desc)) + function exp_graph(x_; name=nothing) + local desc + tf.with_op_name(name, "Exp") do + desc = tf.NodeDescription("Exp") + x_ = convert(Tensor{Any}, x_) + (x_,) = tf.tf_promote(x_) + tf.add_input(desc, x_) end + tf.Tensor(tf.Operation(desc)) + end function exp_eager(x_; name=nothing) desc = tf.EagerOp("Exp") x_ = convert(tf.EagerTensor, x_) @@ -34241,13 +34241,13 @@ begin return res[1] end end - function exp(x_; name=nothing) - if tf.in_eager_mode() - exp_eager(x_; name=name) - else - exp_graph(x_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function exp(x_; name=nothing) + if tf.in_eager_mode() + exp_eager(x_; name=name) + else + exp_graph(x_; name=name) + end end - end end @@ -34257,22 +34257,22 @@ end An op that sets up the centralized structures for a distributed TPU """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function configure_distributed_tpu_graph(; name=nothing, embedding_config=nothing, tpu_embedding_config=nothing, is_global_init=nothing) - local desc - tf.with_op_name(name, "ConfigureDistributedTPU") do - desc = tf.NodeDescription("ConfigureDistributedTPU") - if embedding_config !== nothing - desc["embedding_config"] = Base.String(embedding_config) - end - if tpu_embedding_config !== nothing - desc["tpu_embedding_config"] = Base.String(tpu_embedding_config) - end - if is_global_init !== nothing - desc["is_global_init"] = Base.Bool(is_global_init) - end + function configure_distributed_tpu_graph(; name=nothing, embedding_config=nothing, tpu_embedding_config=nothing, is_global_init=nothing) + local desc + tf.with_op_name(name, "ConfigureDistributedTPU") do + desc = tf.NodeDescription("ConfigureDistributedTPU") + if embedding_config !== nothing + desc["embedding_config"] = Base.String(embedding_config) + end + if tpu_embedding_config !== nothing + desc["tpu_embedding_config"] = Base.String(tpu_embedding_config) + end + if is_global_init !== nothing + desc["is_global_init"] = Base.Bool(is_global_init) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function configure_distributed_tpu_eager(; name=nothing, embedding_config=nothing, tpu_embedding_config=nothing, is_global_init=nothing) desc = tf.EagerOp("ConfigureDistributedTPU") if embedding_config !== nothing @@ -34291,13 +34291,13 @@ begin return res[1] end end - function configure_distributed_tpu(; name=nothing, embedding_config=nothing, tpu_embedding_config=nothing, is_global_init=nothing) - if tf.in_eager_mode() - configure_distributed_tpu_eager(; name=name, embedding_config=embedding_config, tpu_embedding_config=tpu_embedding_config, is_global_init=is_global_init) - else - configure_distributed_tpu_graph(; name=name, embedding_config=embedding_config, tpu_embedding_config=tpu_embedding_config, is_global_init=is_global_init) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function configure_distributed_tpu(; name=nothing, embedding_config=nothing, tpu_embedding_config=nothing, is_global_init=nothing) + if tf.in_eager_mode() + configure_distributed_tpu_eager(; name=name, embedding_config=embedding_config, tpu_embedding_config=tpu_embedding_config, is_global_init=is_global_init) + else + configure_distributed_tpu_graph(; name=name, embedding_config=embedding_config, tpu_embedding_config=tpu_embedding_config, is_global_init=is_global_init) + end end - end end @@ -34307,26 +34307,26 @@ end A placeholder op for multiple values that will be sent from TensorFlow to a """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function _xla_send_from_host_graph(inputs_, dynamic_key_; name=nothing, Tinputs=nothing, key=nothing, device_ordinal=nothing) - local desc - tf.with_op_name(name, "_XlaSendFromHost") do - desc = tf.NodeDescription("_XlaSendFromHost") - inputs_ = [convert(Tensor{Any}, x) for x = inputs_] - dynamic_key_ = convert(Tensor{String}, dynamic_key_) - tf.add_input(desc, inputs_) - tf.add_input(desc, dynamic_key_) - if Tinputs !== nothing - desc["Tinputs"] = map(Base.identity, Tinputs) - end - if key !== nothing - desc["key"] = Base.String(key) - end - if device_ordinal !== nothing - desc["device_ordinal"] = Base.Int(device_ordinal) - end - end - tf.Tensor(tf.Operation(desc)) + function _xla_send_from_host_graph(inputs_, dynamic_key_; name=nothing, Tinputs=nothing, key=nothing, device_ordinal=nothing) + local desc + tf.with_op_name(name, "_XlaSendFromHost") do + desc = tf.NodeDescription("_XlaSendFromHost") + inputs_ = [convert(Tensor{Any}, x) for x = inputs_] + dynamic_key_ = convert(Tensor{String}, dynamic_key_) + tf.add_input(desc, inputs_) + tf.add_input(desc, dynamic_key_) + if Tinputs !== nothing + desc["Tinputs"] = map(Base.identity, Tinputs) + end + if key !== nothing + desc["key"] = Base.String(key) + end + if device_ordinal !== nothing + desc["device_ordinal"] = Base.Int(device_ordinal) + end end + tf.Tensor(tf.Operation(desc)) + end function _xla_send_from_host_eager(inputs_, dynamic_key_; name=nothing, Tinputs=nothing, key=nothing, device_ordinal=nothing) desc = tf.EagerOp("_XlaSendFromHost") inputs_ = convert(tf.EagerTensor, inputs_) @@ -34349,13 +34349,13 @@ begin return res[1] end end - function _xla_send_from_host(inputs_, dynamic_key_; name=nothing, Tinputs=nothing, key=nothing, device_ordinal=nothing) - if tf.in_eager_mode() - _xla_send_from_host_eager(inputs_, dynamic_key_; name=name, Tinputs=Tinputs, key=key, device_ordinal=device_ordinal) - else - _xla_send_from_host_graph(inputs_, dynamic_key_; name=name, Tinputs=Tinputs, key=key, device_ordinal=device_ordinal) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _xla_send_from_host(inputs_, dynamic_key_; name=nothing, Tinputs=nothing, key=nothing, device_ordinal=nothing) + if tf.in_eager_mode() + _xla_send_from_host_eager(inputs_, dynamic_key_; name=name, Tinputs=Tinputs, key=key, device_ordinal=device_ordinal) + else + _xla_send_from_host_graph(inputs_, dynamic_key_; name=name, Tinputs=Tinputs, key=key, device_ordinal=device_ordinal) + end end - end end @@ -34365,16 +34365,16 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function get_session_handle_v2_graph(value_; name=nothing) - local desc - tf.with_op_name(name, "GetSessionHandleV2") do - desc = tf.NodeDescription("GetSessionHandleV2") - value_ = convert(Tensor{Any}, value_) - (value_,) = tf.tf_promote(value_) - tf.add_input(desc, value_) - end - tf.Tensor(tf.Operation(desc)) + function get_session_handle_v2_graph(value_; name=nothing) + local desc + tf.with_op_name(name, "GetSessionHandleV2") do + desc = tf.NodeDescription("GetSessionHandleV2") + value_ = convert(Tensor{Any}, value_) + (value_,) = tf.tf_promote(value_) + tf.add_input(desc, value_) end + tf.Tensor(tf.Operation(desc)) + end function get_session_handle_v2_eager(value_; name=nothing) desc = tf.EagerOp("GetSessionHandleV2") value_ = convert(tf.EagerTensor, value_) @@ -34387,13 +34387,13 @@ begin return res[1] end end - function get_session_handle_v2(value_; name=nothing) - if tf.in_eager_mode() - get_session_handle_v2_eager(value_; name=name) - else - get_session_handle_v2_graph(value_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function get_session_handle_v2(value_; name=nothing) + if tf.in_eager_mode() + get_session_handle_v2_eager(value_; name=name) + else + get_session_handle_v2_graph(value_; name=name) + end end - end end @@ -34403,18 +34403,18 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function relu_grad_graph(gradients_, features_; name=nothing) - local desc - tf.with_op_name(name, "ReluGrad") do - desc = tf.NodeDescription("ReluGrad") - gradients_ = convert(Tensor{Any}, gradients_) - features_ = convert(Tensor{Any}, features_) - (gradients_, features_) = tf.tf_promote(gradients_, features_) - tf.add_input(desc, gradients_) - tf.add_input(desc, features_) - end - tf.Tensor(tf.Operation(desc)) + function relu_grad_graph(gradients_, features_; name=nothing) + local desc + tf.with_op_name(name, "ReluGrad") do + desc = tf.NodeDescription("ReluGrad") + gradients_ = convert(Tensor{Any}, gradients_) + features_ = convert(Tensor{Any}, features_) + (gradients_, features_) = tf.tf_promote(gradients_, features_) + tf.add_input(desc, gradients_) + tf.add_input(desc, features_) end + tf.Tensor(tf.Operation(desc)) + end function relu_grad_eager(gradients_, features_; name=nothing) desc = tf.EagerOp("ReluGrad") gradients_ = convert(tf.EagerTensor, gradients_) @@ -34430,13 +34430,13 @@ begin return res[1] end end - function relu_grad(gradients_, features_; name=nothing) - if tf.in_eager_mode() - relu_grad_eager(gradients_, features_; name=name) - else - relu_grad_graph(gradients_, features_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function relu_grad(gradients_, features_; name=nothing) + if tf.in_eager_mode() + relu_grad_eager(gradients_, features_; name=name) + else + relu_grad_graph(gradients_, features_; name=name) + end end - end end @@ -34446,23 +34446,23 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function unsorted_segment_min_graph(data_, segment_ids_, num_segments_; name=nothing) - local desc - tf.with_op_name(name, "UnsortedSegmentMin") do - desc = tf.NodeDescription("UnsortedSegmentMin") - data_ = convert(Tensor{Any}, data_) - segment_ids_ = convert(Tensor{Any}, segment_ids_) - segment_ids_ = segment_ids_ - convert(tf.Tensor{eltype(segment_ids_)}, 1) - num_segments_ = convert(Tensor{Int32}, num_segments_) - (num_segments_,) = tf.tf_promote(num_segments_) - (data_,) = tf.tf_promote(data_) - (segment_ids_,) = tf.tf_promote(segment_ids_) - tf.add_input(desc, data_) - tf.add_input(desc, segment_ids_) - tf.add_input(desc, num_segments_) - end - tf.Tensor(tf.Operation(desc)) + function unsorted_segment_min_graph(data_, segment_ids_, num_segments_; name=nothing) + local desc + tf.with_op_name(name, "UnsortedSegmentMin") do + desc = tf.NodeDescription("UnsortedSegmentMin") + data_ = convert(Tensor{Any}, data_) + segment_ids_ = convert(Tensor{Any}, segment_ids_) + segment_ids_ = segment_ids_ - convert(tf.Tensor{eltype(segment_ids_)}, 1) + num_segments_ = convert(Tensor{Int32}, num_segments_) + (num_segments_,) = tf.tf_promote(num_segments_) + (data_,) = tf.tf_promote(data_) + (segment_ids_,) = tf.tf_promote(segment_ids_) + tf.add_input(desc, data_) + tf.add_input(desc, segment_ids_) + tf.add_input(desc, num_segments_) end + tf.Tensor(tf.Operation(desc)) + end function unsorted_segment_min_eager(data_, segment_ids_, num_segments_; name=nothing) desc = tf.EagerOp("UnsortedSegmentMin") data_ = convert(tf.EagerTensor, data_) @@ -34481,13 +34481,13 @@ begin return res[1] end end - function unsorted_segment_min(data_, segment_ids_, num_segments_; name=nothing) - if tf.in_eager_mode() - unsorted_segment_min_eager(data_, segment_ids_, num_segments_; name=name) - else - unsorted_segment_min_graph(data_, segment_ids_, num_segments_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function unsorted_segment_min(data_, segment_ids_, num_segments_; name=nothing) + if tf.in_eager_mode() + unsorted_segment_min_eager(data_, segment_ids_, num_segments_; name=name) + else + unsorted_segment_min_graph(data_, segment_ids_, num_segments_; name=name) + end end - end end @@ -34497,43 +34497,43 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function parse_example_graph(serialized_, names_, sparse_keys_, dense_keys_, dense_defaults_; name=nothing, Nsparse=nothing, Ndense=nothing, sparse_types=nothing, Tdense=nothing, dense_shapes=nothing) - local desc - tf.with_op_name(name, "ParseExample") do - desc = tf.NodeDescription("ParseExample") - serialized_ = convert(Tensor{String}, serialized_) - names_ = convert(Tensor{String}, names_) - sparse_keys_ = [convert(Tensor{String}, x) for x = sparse_keys_] - dense_keys_ = [convert(Tensor{String}, x) for x = dense_keys_] - dense_defaults_ = [convert(Tensor{Any}, x) for x = dense_defaults_] - tf.add_input(desc, serialized_) - tf.add_input(desc, names_) - tf.add_input(desc, sparse_keys_) - tf.add_input(desc, dense_keys_) - tf.add_input(desc, dense_defaults_) - if Nsparse !== nothing - desc["Nsparse"] = Base.Int(Nsparse) - end - if Ndense !== nothing - desc["Ndense"] = Base.Int(Ndense) - end - if sparse_types !== nothing - desc["sparse_types"] = map(Base.identity, sparse_types) - end - if Tdense !== nothing - desc["Tdense"] = map(Base.identity, Tdense) - end - if dense_shapes !== nothing - desc["dense_shapes"] = map(Base.identity, dense_shapes) - end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:4 - push!(out, tf.Tensor(op, out_idx)) - end - out + function parse_example_graph(serialized_, names_, sparse_keys_, dense_keys_, dense_defaults_; name=nothing, Nsparse=nothing, Ndense=nothing, sparse_types=nothing, Tdense=nothing, dense_shapes=nothing) + local desc + tf.with_op_name(name, "ParseExample") do + desc = tf.NodeDescription("ParseExample") + serialized_ = convert(Tensor{String}, serialized_) + names_ = convert(Tensor{String}, names_) + sparse_keys_ = [convert(Tensor{String}, x) for x = sparse_keys_] + dense_keys_ = [convert(Tensor{String}, x) for x = dense_keys_] + dense_defaults_ = [convert(Tensor{Any}, x) for x = dense_defaults_] + tf.add_input(desc, serialized_) + tf.add_input(desc, names_) + tf.add_input(desc, sparse_keys_) + tf.add_input(desc, dense_keys_) + tf.add_input(desc, dense_defaults_) + if Nsparse !== nothing + desc["Nsparse"] = Base.Int(Nsparse) + end + if Ndense !== nothing + desc["Ndense"] = Base.Int(Ndense) + end + if sparse_types !== nothing + desc["sparse_types"] = map(Base.identity, sparse_types) + end + if Tdense !== nothing + desc["Tdense"] = map(Base.identity, Tdense) + end + if dense_shapes !== nothing + desc["dense_shapes"] = map(Base.identity, dense_shapes) + end end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:4 + push!(out, tf.Tensor(op, out_idx)) + end + out + end function parse_example_eager(serialized_, names_, sparse_keys_, dense_keys_, dense_defaults_; name=nothing, Nsparse=nothing, Ndense=nothing, sparse_types=nothing, Tdense=nothing, dense_shapes=nothing) desc = tf.EagerOp("ParseExample") serialized_ = convert(tf.EagerTensor, serialized_) @@ -34568,13 +34568,13 @@ begin return res end end - function parse_example(serialized_, names_, sparse_keys_, dense_keys_, dense_defaults_; name=nothing, Nsparse=nothing, Ndense=nothing, sparse_types=nothing, Tdense=nothing, dense_shapes=nothing) - if tf.in_eager_mode() - parse_example_eager(serialized_, names_, sparse_keys_, dense_keys_, dense_defaults_; name=name, Nsparse=Nsparse, Ndense=Ndense, sparse_types=sparse_types, Tdense=Tdense, dense_shapes=dense_shapes) - else - parse_example_graph(serialized_, names_, sparse_keys_, dense_keys_, dense_defaults_; name=name, Nsparse=Nsparse, Ndense=Ndense, sparse_types=sparse_types, Tdense=Tdense, dense_shapes=dense_shapes) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function parse_example(serialized_, names_, sparse_keys_, dense_keys_, dense_defaults_; name=nothing, Nsparse=nothing, Ndense=nothing, sparse_types=nothing, Tdense=nothing, dense_shapes=nothing) + if tf.in_eager_mode() + parse_example_eager(serialized_, names_, sparse_keys_, dense_keys_, dense_defaults_; name=name, Nsparse=Nsparse, Ndense=Ndense, sparse_types=sparse_types, Tdense=Tdense, dense_shapes=dense_shapes) + else + parse_example_graph(serialized_, names_, sparse_keys_, dense_keys_, dense_defaults_; name=name, Nsparse=Nsparse, Ndense=Ndense, sparse_types=sparse_types, Tdense=Tdense, dense_shapes=dense_shapes) + end end - end end @@ -34584,23 +34584,23 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function queue_enqueue_v2_graph(handle_, components_; name=nothing, Tcomponents=nothing, timeout_ms=nothing) - local desc - tf.with_op_name(name, "QueueEnqueueV2") do - desc = tf.NodeDescription("QueueEnqueueV2") - handle_ = convert(Tensor{Any}, handle_) - components_ = [convert(Tensor{Any}, x) for x = components_] - tf.add_input(desc, handle_) - tf.add_input(desc, components_) - if Tcomponents !== nothing - desc["Tcomponents"] = map(Base.identity, Tcomponents) - end - if timeout_ms !== nothing - desc["timeout_ms"] = Base.Int(timeout_ms) - end + function queue_enqueue_v2_graph(handle_, components_; name=nothing, Tcomponents=nothing, timeout_ms=nothing) + local desc + tf.with_op_name(name, "QueueEnqueueV2") do + desc = tf.NodeDescription("QueueEnqueueV2") + handle_ = convert(Tensor{Any}, handle_) + components_ = [convert(Tensor{Any}, x) for x = components_] + tf.add_input(desc, handle_) + tf.add_input(desc, components_) + if Tcomponents !== nothing + desc["Tcomponents"] = map(Base.identity, Tcomponents) + end + if timeout_ms !== nothing + desc["timeout_ms"] = Base.Int(timeout_ms) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function queue_enqueue_v2_eager(handle_, components_; name=nothing, Tcomponents=nothing, timeout_ms=nothing) desc = tf.EagerOp("QueueEnqueueV2") handle_ = convert(tf.EagerTensor, handle_) @@ -34620,13 +34620,13 @@ begin return res[1] end end - function queue_enqueue_v2(handle_, components_; name=nothing, Tcomponents=nothing, timeout_ms=nothing) - if tf.in_eager_mode() - queue_enqueue_v2_eager(handle_, components_; name=name, Tcomponents=Tcomponents, timeout_ms=timeout_ms) - else - queue_enqueue_v2_graph(handle_, components_; name=name, Tcomponents=Tcomponents, timeout_ms=timeout_ms) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function queue_enqueue_v2(handle_, components_; name=nothing, Tcomponents=nothing, timeout_ms=nothing) + if tf.in_eager_mode() + queue_enqueue_v2_eager(handle_, components_; name=name, Tcomponents=Tcomponents, timeout_ms=timeout_ms) + else + queue_enqueue_v2_graph(handle_, components_; name=name, Tcomponents=Tcomponents, timeout_ms=timeout_ms) + end end - end end @@ -34636,25 +34636,25 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function scatter_nd_add_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) - local desc - tf.with_op_name(name, "ScatterNdAdd") do - desc = tf.NodeDescription("ScatterNdAdd") - ref_ = convert(Tensor{Any}, ref_) - indices_ = convert(Tensor{Any}, indices_) - indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) - updates_ = convert(Tensor{Any}, updates_) - (ref_, updates_) = tf.tf_promote(ref_, updates_) - (indices_,) = tf.tf_promote(indices_) - tf.add_input(desc, ref_) - tf.add_input(desc, indices_) - tf.add_input(desc, updates_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end + function scatter_nd_add_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "ScatterNdAdd") do + desc = tf.NodeDescription("ScatterNdAdd") + ref_ = convert(Tensor{Any}, ref_) + indices_ = convert(Tensor{Any}, indices_) + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + updates_ = convert(Tensor{Any}, updates_) + (ref_, updates_) = tf.tf_promote(ref_, updates_) + (indices_,) = tf.tf_promote(indices_) + tf.add_input(desc, ref_) + tf.add_input(desc, indices_) + tf.add_input(desc, updates_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function scatter_nd_add_eager(ref_, indices_, updates_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ScatterNdAdd") ref_ = convert(tf.EagerTensor, ref_) @@ -34676,13 +34676,13 @@ begin return res[1] end end - function scatter_nd_add(ref_, indices_, updates_; name=nothing, use_locking=nothing) - if tf.in_eager_mode() - scatter_nd_add_eager(ref_, indices_, updates_; name=name, use_locking=use_locking) - else - scatter_nd_add_graph(ref_, indices_, updates_; name=name, use_locking=use_locking) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function scatter_nd_add(ref_, indices_, updates_; name=nothing, use_locking=nothing) + if tf.in_eager_mode() + scatter_nd_add_eager(ref_, indices_, updates_; name=name, use_locking=use_locking) + else + scatter_nd_add_graph(ref_, indices_, updates_; name=name, use_locking=use_locking) + end end - end end @@ -34692,15 +34692,15 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function reader_num_records_produced_v2_graph(reader_handle_; name=nothing) - local desc - tf.with_op_name(name, "ReaderNumRecordsProducedV2") do - desc = tf.NodeDescription("ReaderNumRecordsProducedV2") - reader_handle_ = convert(Tensor{Any}, reader_handle_) - tf.add_input(desc, reader_handle_) - end - tf.Tensor(tf.Operation(desc)) + function reader_num_records_produced_v2_graph(reader_handle_; name=nothing) + local desc + tf.with_op_name(name, "ReaderNumRecordsProducedV2") do + desc = tf.NodeDescription("ReaderNumRecordsProducedV2") + reader_handle_ = convert(Tensor{Any}, reader_handle_) + tf.add_input(desc, reader_handle_) end + tf.Tensor(tf.Operation(desc)) + end function reader_num_records_produced_v2_eager(reader_handle_; name=nothing) desc = tf.EagerOp("ReaderNumRecordsProducedV2") reader_handle_ = convert(tf.EagerTensor, reader_handle_) @@ -34712,13 +34712,13 @@ begin return res[1] end end - function reader_num_records_produced_v2(reader_handle_; name=nothing) - if tf.in_eager_mode() - reader_num_records_produced_v2_eager(reader_handle_; name=name) - else - reader_num_records_produced_v2_graph(reader_handle_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function reader_num_records_produced_v2(reader_handle_; name=nothing) + if tf.in_eager_mode() + reader_num_records_produced_v2_eager(reader_handle_; name=name) + else + reader_num_records_produced_v2_graph(reader_handle_; name=name) + end end - end end @@ -34728,33 +34728,33 @@ end Load embedding parameters for a single table. """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function load_tpu_embedding_centered_rms_prop_parameters_graph(parameters_, ms_, mom_, mg_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - local desc - tf.with_op_name(name, "LoadTPUEmbeddingCenteredRMSPropParameters") do - desc = tf.NodeDescription("LoadTPUEmbeddingCenteredRMSPropParameters") - parameters_ = convert(Tensor{Float32}, parameters_) - ms_ = convert(Tensor{Float32}, ms_) - mom_ = convert(Tensor{Float32}, mom_) - mg_ = convert(Tensor{Float32}, mg_) - tf.add_input(desc, parameters_) - tf.add_input(desc, ms_) - tf.add_input(desc, mom_) - tf.add_input(desc, mg_) - if table_id !== nothing - desc["table_id"] = Base.Int(table_id) - end - if table_name !== nothing - desc["table_name"] = Base.String(table_name) - end - if num_shards !== nothing - desc["num_shards"] = Base.Int(num_shards) - end - if shard_id !== nothing - desc["shard_id"] = Base.Int(shard_id) - end - end - tf.Tensor(tf.Operation(desc)) + function load_tpu_embedding_centered_rms_prop_parameters_graph(parameters_, ms_, mom_, mg_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + local desc + tf.with_op_name(name, "LoadTPUEmbeddingCenteredRMSPropParameters") do + desc = tf.NodeDescription("LoadTPUEmbeddingCenteredRMSPropParameters") + parameters_ = convert(Tensor{Float32}, parameters_) + ms_ = convert(Tensor{Float32}, ms_) + mom_ = convert(Tensor{Float32}, mom_) + mg_ = convert(Tensor{Float32}, mg_) + tf.add_input(desc, parameters_) + tf.add_input(desc, ms_) + tf.add_input(desc, mom_) + tf.add_input(desc, mg_) + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end end + tf.Tensor(tf.Operation(desc)) + end function load_tpu_embedding_centered_rms_prop_parameters_eager(parameters_, ms_, mom_, mg_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) desc = tf.EagerOp("LoadTPUEmbeddingCenteredRMSPropParameters") parameters_ = convert(tf.EagerTensor, parameters_) @@ -34784,13 +34784,13 @@ begin return res[1] end end - function load_tpu_embedding_centered_rms_prop_parameters(parameters_, ms_, mom_, mg_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - if tf.in_eager_mode() - load_tpu_embedding_centered_rms_prop_parameters_eager(parameters_, ms_, mom_, mg_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) - else - load_tpu_embedding_centered_rms_prop_parameters_graph(parameters_, ms_, mom_, mg_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function load_tpu_embedding_centered_rms_prop_parameters(parameters_, ms_, mom_, mg_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + if tf.in_eager_mode() + load_tpu_embedding_centered_rms_prop_parameters_eager(parameters_, ms_, mom_, mg_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + else + load_tpu_embedding_centered_rms_prop_parameters_graph(parameters_, ms_, mom_, mg_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + end end - end end @@ -34800,21 +34800,21 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function assign_sub_graph(ref_, value_; name=nothing, use_locking=nothing) - local desc - tf.with_op_name(name, "AssignSub") do - desc = tf.NodeDescription("AssignSub") - ref_ = convert(Tensor{Any}, ref_) - value_ = convert(Tensor{Any}, value_) - (ref_, value_) = tf.tf_promote(ref_, value_) - tf.add_input(desc, ref_) - tf.add_input(desc, value_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end + function assign_sub_graph(ref_, value_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "AssignSub") do + desc = tf.NodeDescription("AssignSub") + ref_ = convert(Tensor{Any}, ref_) + value_ = convert(Tensor{Any}, value_) + (ref_, value_) = tf.tf_promote(ref_, value_) + tf.add_input(desc, ref_) + tf.add_input(desc, value_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function assign_sub_eager(ref_, value_; name=nothing, use_locking=nothing) desc = tf.EagerOp("AssignSub") ref_ = convert(tf.EagerTensor, ref_) @@ -34833,13 +34833,13 @@ begin return res[1] end end - function assign_sub(ref_, value_; name=nothing, use_locking=nothing) - if tf.in_eager_mode() - assign_sub_eager(ref_, value_; name=name, use_locking=use_locking) - else - assign_sub_graph(ref_, value_; name=name, use_locking=use_locking) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function assign_sub(ref_, value_; name=nothing, use_locking=nothing) + if tf.in_eager_mode() + assign_sub_eager(ref_, value_; name=name, use_locking=use_locking) + else + assign_sub_graph(ref_, value_; name=name, use_locking=use_locking) + end end - end end @@ -34849,23 +34849,23 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function unsorted_segment_sum_graph(data_, segment_ids_, num_segments_; name=nothing) - local desc - tf.with_op_name(name, "UnsortedSegmentSum") do - desc = tf.NodeDescription("UnsortedSegmentSum") - data_ = convert(Tensor{Any}, data_) - segment_ids_ = convert(Tensor{Any}, segment_ids_) - segment_ids_ = segment_ids_ - convert(tf.Tensor{eltype(segment_ids_)}, 1) - num_segments_ = convert(Tensor{Int32}, num_segments_) - (num_segments_,) = tf.tf_promote(num_segments_) - (data_,) = tf.tf_promote(data_) - (segment_ids_,) = tf.tf_promote(segment_ids_) - tf.add_input(desc, data_) - tf.add_input(desc, segment_ids_) - tf.add_input(desc, num_segments_) - end - tf.Tensor(tf.Operation(desc)) + function unsorted_segment_sum_graph(data_, segment_ids_, num_segments_; name=nothing) + local desc + tf.with_op_name(name, "UnsortedSegmentSum") do + desc = tf.NodeDescription("UnsortedSegmentSum") + data_ = convert(Tensor{Any}, data_) + segment_ids_ = convert(Tensor{Any}, segment_ids_) + segment_ids_ = segment_ids_ - convert(tf.Tensor{eltype(segment_ids_)}, 1) + num_segments_ = convert(Tensor{Int32}, num_segments_) + (num_segments_,) = tf.tf_promote(num_segments_) + (data_,) = tf.tf_promote(data_) + (segment_ids_,) = tf.tf_promote(segment_ids_) + tf.add_input(desc, data_) + tf.add_input(desc, segment_ids_) + tf.add_input(desc, num_segments_) end + tf.Tensor(tf.Operation(desc)) + end function unsorted_segment_sum_eager(data_, segment_ids_, num_segments_; name=nothing) desc = tf.EagerOp("UnsortedSegmentSum") data_ = convert(tf.EagerTensor, data_) @@ -34884,54 +34884,54 @@ begin return res[1] end end - function unsorted_segment_sum(data_, segment_ids_, num_segments_; name=nothing) - if tf.in_eager_mode() - unsorted_segment_sum_eager(data_, segment_ids_, num_segments_; name=name) - else - unsorted_segment_sum_graph(data_, segment_ids_, num_segments_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function unsorted_segment_sum(data_, segment_ids_, num_segments_; name=nothing) + if tf.in_eager_mode() + unsorted_segment_sum_eager(data_, segment_ids_, num_segments_; name=name) + else + unsorted_segment_sum_graph(data_, segment_ids_, num_segments_; name=name) + end end - end end """ - fused_batch_norm_grad(y_backprop, x, scale, reserve_space_1, reserve_space_2; epsilon=?, data_format=NHWC, is_training=true) + fused_batch_norm_grad(y_backprop, x, scale, reserve_space_1, reserve_space_2; epsilon=?, data_format=, is_training=true) """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function fused_batch_norm_grad_graph(y_backprop_, x_, scale_, reserve_space_1_, reserve_space_2_; name=nothing, epsilon=nothing, data_format=nothing, is_training=nothing) - local desc - tf.with_op_name(name, "FusedBatchNormGrad") do - desc = tf.NodeDescription("FusedBatchNormGrad") - y_backprop_ = convert(Tensor{Any}, y_backprop_) - x_ = convert(Tensor{Any}, x_) - scale_ = convert(Tensor{Any}, scale_) - reserve_space_1_ = convert(Tensor{Any}, reserve_space_1_) - reserve_space_2_ = convert(Tensor{Any}, reserve_space_2_) - (y_backprop_, x_, scale_, reserve_space_1_, reserve_space_2_) = tf.tf_promote(y_backprop_, x_, scale_, reserve_space_1_, reserve_space_2_) - tf.add_input(desc, y_backprop_) - tf.add_input(desc, x_) - tf.add_input(desc, scale_) - tf.add_input(desc, reserve_space_1_) - tf.add_input(desc, reserve_space_2_) - if epsilon !== nothing - desc["epsilon"] = Base.identity(epsilon) - end - if data_format !== nothing - desc["data_format"] = Base.String(data_format) - end - if is_training !== nothing - desc["is_training"] = Base.Bool(is_training) - end + function fused_batch_norm_grad_graph(y_backprop_, x_, scale_, reserve_space_1_, reserve_space_2_; name=nothing, epsilon=nothing, data_format=nothing, is_training=nothing) + local desc + tf.with_op_name(name, "FusedBatchNormGrad") do + desc = tf.NodeDescription("FusedBatchNormGrad") + y_backprop_ = convert(Tensor{Any}, y_backprop_) + x_ = convert(Tensor{Any}, x_) + scale_ = convert(Tensor{Any}, scale_) + reserve_space_1_ = convert(Tensor{Any}, reserve_space_1_) + reserve_space_2_ = convert(Tensor{Any}, reserve_space_2_) + (y_backprop_, x_, scale_, reserve_space_1_, reserve_space_2_) = tf.tf_promote(y_backprop_, x_, scale_, reserve_space_1_, reserve_space_2_) + tf.add_input(desc, y_backprop_) + tf.add_input(desc, x_) + tf.add_input(desc, scale_) + tf.add_input(desc, reserve_space_1_) + tf.add_input(desc, reserve_space_2_) + if epsilon !== nothing + desc["epsilon"] = Base.identity(epsilon) end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:5 - push!(out, tf.Tensor(op, out_idx)) + if data_format !== nothing + desc["data_format"] = Base.String(data_format) end - out + if is_training !== nothing + desc["is_training"] = Base.Bool(is_training) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:5 + push!(out, tf.Tensor(op, out_idx)) end + out + end function fused_batch_norm_grad_eager(y_backprop_, x_, scale_, reserve_space_1_, reserve_space_2_; name=nothing, epsilon=nothing, data_format=nothing, is_training=nothing) desc = tf.EagerOp("FusedBatchNormGrad") y_backprop_ = convert(tf.EagerTensor, y_backprop_) @@ -34965,46 +34965,46 @@ begin return res end end - function fused_batch_norm_grad(y_backprop_, x_, scale_, reserve_space_1_, reserve_space_2_; name=nothing, epsilon=nothing, data_format=nothing, is_training=nothing) - if tf.in_eager_mode() - fused_batch_norm_grad_eager(y_backprop_, x_, scale_, reserve_space_1_, reserve_space_2_; name=name, epsilon=epsilon, data_format=data_format, is_training=is_training) - else - fused_batch_norm_grad_graph(y_backprop_, x_, scale_, reserve_space_1_, reserve_space_2_; name=name, epsilon=epsilon, data_format=data_format, is_training=is_training) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function fused_batch_norm_grad(y_backprop_, x_, scale_, reserve_space_1_, reserve_space_2_; name=nothing, epsilon=nothing, data_format=nothing, is_training=nothing) + if tf.in_eager_mode() + fused_batch_norm_grad_eager(y_backprop_, x_, scale_, reserve_space_1_, reserve_space_2_; name=name, epsilon=epsilon, data_format=data_format, is_training=is_training) + else + fused_batch_norm_grad_graph(y_backprop_, x_, scale_, reserve_space_1_, reserve_space_2_; name=name, epsilon=epsilon, data_format=data_format, is_training=is_training) + end end - end end """ - max_pool_grad_v2(orig_input, orig_output, grad, ksize, strides; data_format=NHWC) + max_pool_grad_v2(orig_input, orig_output, grad, ksize, strides; data_format=) """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function max_pool_grad_v2_graph(orig_input_, orig_output_, grad_, ksize_, strides_; name=nothing, padding=nothing, data_format=nothing) - local desc - tf.with_op_name(name, "MaxPoolGradV2") do - desc = tf.NodeDescription("MaxPoolGradV2") - orig_input_ = convert(Tensor{Float32}, orig_input_) - orig_output_ = convert(Tensor{Float32}, orig_output_) - grad_ = convert(Tensor{Float32}, grad_) - ksize_ = convert(Tensor{Int32}, ksize_) - strides_ = convert(Tensor{Int32}, strides_) - (orig_input_, orig_output_, grad_) = tf.tf_promote(orig_input_, orig_output_, grad_) - tf.add_input(desc, orig_input_) - tf.add_input(desc, orig_output_) - tf.add_input(desc, grad_) - tf.add_input(desc, ksize_) - tf.add_input(desc, strides_) - if padding !== nothing - desc["padding"] = Base.String(padding) - end - if data_format !== nothing - desc["data_format"] = Base.String(data_format) - end + function max_pool_grad_v2_graph(orig_input_, orig_output_, grad_, ksize_, strides_; name=nothing, padding=nothing, data_format=nothing) + local desc + tf.with_op_name(name, "MaxPoolGradV2") do + desc = tf.NodeDescription("MaxPoolGradV2") + orig_input_ = convert(Tensor{Float32}, orig_input_) + orig_output_ = convert(Tensor{Float32}, orig_output_) + grad_ = convert(Tensor{Float32}, grad_) + ksize_ = convert(Tensor{Int32}, ksize_) + strides_ = convert(Tensor{Int32}, strides_) + (orig_input_, orig_output_, grad_) = tf.tf_promote(orig_input_, orig_output_, grad_) + tf.add_input(desc, orig_input_) + tf.add_input(desc, orig_output_) + tf.add_input(desc, grad_) + tf.add_input(desc, ksize_) + tf.add_input(desc, strides_) + if padding !== nothing + desc["padding"] = Base.String(padding) + end + if data_format !== nothing + desc["data_format"] = Base.String(data_format) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function max_pool_grad_v2_eager(orig_input_, orig_output_, grad_, ksize_, strides_; name=nothing, padding=nothing, data_format=nothing) desc = tf.EagerOp("MaxPoolGradV2") orig_input_ = convert(tf.EagerTensor, orig_input_) @@ -35033,13 +35033,13 @@ begin return res[1] end end - function max_pool_grad_v2(orig_input_, orig_output_, grad_, ksize_, strides_; name=nothing, padding=nothing, data_format=nothing) - if tf.in_eager_mode() - max_pool_grad_v2_eager(orig_input_, orig_output_, grad_, ksize_, strides_; name=name, padding=padding, data_format=data_format) - else - max_pool_grad_v2_graph(orig_input_, orig_output_, grad_, ksize_, strides_; name=name, padding=padding, data_format=data_format) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function max_pool_grad_v2(orig_input_, orig_output_, grad_, ksize_, strides_; name=nothing, padding=nothing, data_format=nothing) + if tf.in_eager_mode() + max_pool_grad_v2_eager(orig_input_, orig_output_, grad_, ksize_, strides_; name=name, padding=padding, data_format=data_format) + else + max_pool_grad_v2_graph(orig_input_, orig_output_, grad_, ksize_, strides_; name=name, padding=padding, data_format=data_format) + end end - end end @@ -35049,19 +35049,19 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function boosted_trees_create_ensemble_graph(tree_ensemble_handle_, stamp_token_, tree_ensemble_serialized_; name=nothing) - local desc - tf.with_op_name(name, "BoostedTreesCreateEnsemble") do - desc = tf.NodeDescription("BoostedTreesCreateEnsemble") - tree_ensemble_handle_ = convert(Tensor{Any}, tree_ensemble_handle_) - stamp_token_ = convert(Tensor{Int64}, stamp_token_) - tree_ensemble_serialized_ = convert(Tensor{String}, tree_ensemble_serialized_) - tf.add_input(desc, tree_ensemble_handle_) - tf.add_input(desc, stamp_token_) - tf.add_input(desc, tree_ensemble_serialized_) - end - tf.Tensor(tf.Operation(desc)) + function boosted_trees_create_ensemble_graph(tree_ensemble_handle_, stamp_token_, tree_ensemble_serialized_; name=nothing) + local desc + tf.with_op_name(name, "BoostedTreesCreateEnsemble") do + desc = tf.NodeDescription("BoostedTreesCreateEnsemble") + tree_ensemble_handle_ = convert(Tensor{Any}, tree_ensemble_handle_) + stamp_token_ = convert(Tensor{Int64}, stamp_token_) + tree_ensemble_serialized_ = convert(Tensor{String}, tree_ensemble_serialized_) + tf.add_input(desc, tree_ensemble_handle_) + tf.add_input(desc, stamp_token_) + tf.add_input(desc, tree_ensemble_serialized_) end + tf.Tensor(tf.Operation(desc)) + end function boosted_trees_create_ensemble_eager(tree_ensemble_handle_, stamp_token_, tree_ensemble_serialized_; name=nothing) desc = tf.EagerOp("BoostedTreesCreateEnsemble") tree_ensemble_handle_ = convert(tf.EagerTensor, tree_ensemble_handle_) @@ -35077,13 +35077,13 @@ begin return res[1] end end - function boosted_trees_create_ensemble(tree_ensemble_handle_, stamp_token_, tree_ensemble_serialized_; name=nothing) - if tf.in_eager_mode() - boosted_trees_create_ensemble_eager(tree_ensemble_handle_, stamp_token_, tree_ensemble_serialized_; name=name) - else - boosted_trees_create_ensemble_graph(tree_ensemble_handle_, stamp_token_, tree_ensemble_serialized_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function boosted_trees_create_ensemble(tree_ensemble_handle_, stamp_token_, tree_ensemble_serialized_; name=nothing) + if tf.in_eager_mode() + boosted_trees_create_ensemble_eager(tree_ensemble_handle_, stamp_token_, tree_ensemble_serialized_; name=name) + else + boosted_trees_create_ensemble_graph(tree_ensemble_handle_, stamp_token_, tree_ensemble_serialized_; name=name) + end end - end end @@ -35093,28 +35093,28 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function ordered_map_incomplete_size_graph(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) - local desc - tf.with_op_name(name, "OrderedMapIncompleteSize") do - desc = tf.NodeDescription("OrderedMapIncompleteSize") - if capacity !== nothing - desc["capacity"] = Base.Int(capacity) - end - if memory_limit !== nothing - desc["memory_limit"] = Base.Int(memory_limit) - end - if dtypes !== nothing - desc["dtypes"] = map(Base.identity, dtypes) - end - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end + function ordered_map_incomplete_size_graph(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "OrderedMapIncompleteSize") do + desc = tf.NodeDescription("OrderedMapIncompleteSize") + if capacity !== nothing + desc["capacity"] = Base.Int(capacity) + end + if memory_limit !== nothing + desc["memory_limit"] = Base.Int(memory_limit) + end + if dtypes !== nothing + desc["dtypes"] = map(Base.identity, dtypes) + end + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function ordered_map_incomplete_size_eager(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) desc = tf.EagerOp("OrderedMapIncompleteSize") if capacity !== nothing @@ -35139,13 +35139,13 @@ begin return res[1] end end - function ordered_map_incomplete_size(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) - if tf.in_eager_mode() - ordered_map_incomplete_size_eager(; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) - else - ordered_map_incomplete_size_graph(; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function ordered_map_incomplete_size(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + if tf.in_eager_mode() + ordered_map_incomplete_size_eager(; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) + else + ordered_map_incomplete_size_graph(; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) + end end - end end @@ -35155,33 +35155,33 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function skipgram_graph(; name=nothing, filename=nothing, batch_size=nothing, window_size=nothing, min_count=nothing, subsample=nothing) - local desc - tf.with_op_name(name, "Skipgram") do - desc = tf.NodeDescription("Skipgram") - if filename !== nothing - desc["filename"] = Base.String(filename) - end - if batch_size !== nothing - desc["batch_size"] = Base.Int(batch_size) - end - if window_size !== nothing - desc["window_size"] = Base.Int(window_size) - end - if min_count !== nothing - desc["min_count"] = Base.Int(min_count) - end - if subsample !== nothing - desc["subsample"] = Base.identity(subsample) - end + function skipgram_graph(; name=nothing, filename=nothing, batch_size=nothing, window_size=nothing, min_count=nothing, subsample=nothing) + local desc + tf.with_op_name(name, "Skipgram") do + desc = tf.NodeDescription("Skipgram") + if filename !== nothing + desc["filename"] = Base.String(filename) + end + if batch_size !== nothing + desc["batch_size"] = Base.Int(batch_size) end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:7 - push!(out, tf.Tensor(op, out_idx)) + if window_size !== nothing + desc["window_size"] = Base.Int(window_size) end - out + if min_count !== nothing + desc["min_count"] = Base.Int(min_count) + end + if subsample !== nothing + desc["subsample"] = Base.identity(subsample) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:7 + push!(out, tf.Tensor(op, out_idx)) end + out + end function skipgram_eager(; name=nothing, filename=nothing, batch_size=nothing, window_size=nothing, min_count=nothing, subsample=nothing) desc = tf.EagerOp("Skipgram") if filename !== nothing @@ -35206,13 +35206,13 @@ begin return res end end - function skipgram(; name=nothing, filename=nothing, batch_size=nothing, window_size=nothing, min_count=nothing, subsample=nothing) - if tf.in_eager_mode() - skipgram_eager(; name=name, filename=filename, batch_size=batch_size, window_size=window_size, min_count=min_count, subsample=subsample) - else - skipgram_graph(; name=name, filename=filename, batch_size=batch_size, window_size=window_size, min_count=min_count, subsample=subsample) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function skipgram(; name=nothing, filename=nothing, batch_size=nothing, window_size=nothing, min_count=nothing, subsample=nothing) + if tf.in_eager_mode() + skipgram_eager(; name=name, filename=filename, batch_size=batch_size, window_size=window_size, min_count=min_count, subsample=subsample) + else + skipgram_graph(; name=name, filename=filename, batch_size=batch_size, window_size=window_size, min_count=min_count, subsample=subsample) + end end - end end @@ -35222,23 +35222,23 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function arg_min_graph(input_, dimension_; name=nothing, output_type=nothing) - local desc - tf.with_op_name(name, "ArgMin") do - desc = tf.NodeDescription("ArgMin") - input_ = convert(Tensor{Any}, input_) - dimension_ = convert(Tensor{Int32}, dimension_) - dimension_ = dimension_ - convert(tf.Tensor{eltype(dimension_)}, 1) - (input_,) = tf.tf_promote(input_) - (dimension_,) = tf.tf_promote(dimension_) - tf.add_input(desc, input_) - tf.add_input(desc, dimension_) - if output_type !== nothing - desc["output_type"] = Base.identity(output_type) - end + function arg_min_graph(input_, dimension_; name=nothing, output_type=nothing) + local desc + tf.with_op_name(name, "ArgMin") do + desc = tf.NodeDescription("ArgMin") + input_ = convert(Tensor{Any}, input_) + dimension_ = convert(Tensor{Int32}, dimension_) + dimension_ = dimension_ - convert(tf.Tensor{eltype(dimension_)}, 1) + (input_,) = tf.tf_promote(input_) + (dimension_,) = tf.tf_promote(dimension_) + tf.add_input(desc, input_) + tf.add_input(desc, dimension_) + if output_type !== nothing + desc["output_type"] = Base.identity(output_type) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function arg_min_eager(input_, dimension_; name=nothing, output_type=nothing) desc = tf.EagerOp("ArgMin") input_ = convert(tf.EagerTensor, input_) @@ -35257,13 +35257,13 @@ begin return res[1] end end - function arg_min(input_, dimension_; name=nothing, output_type=nothing) - if tf.in_eager_mode() - arg_min_eager(input_, dimension_; name=name, output_type=output_type) - else - arg_min_graph(input_, dimension_; name=name, output_type=output_type) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function arg_min(input_, dimension_; name=nothing, output_type=nothing) + if tf.in_eager_mode() + arg_min_eager(input_, dimension_; name=name, output_type=output_type) + else + arg_min_graph(input_, dimension_; name=name, output_type=output_type) + end end - end end @@ -35273,23 +35273,23 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function queue_dequeue_many_graph(handle_, n_; name=nothing, component_types=nothing, timeout_ms=nothing) - local desc - tf.with_op_name(name, "QueueDequeueMany") do - desc = tf.NodeDescription("QueueDequeueMany") - handle_ = convert(Tensor{String}, handle_) - n_ = convert(Tensor{Int32}, n_) - tf.add_input(desc, handle_) - tf.add_input(desc, n_) - if component_types !== nothing - desc["component_types"] = map(Base.identity, component_types) - end - if timeout_ms !== nothing - desc["timeout_ms"] = Base.Int(timeout_ms) - end + function queue_dequeue_many_graph(handle_, n_; name=nothing, component_types=nothing, timeout_ms=nothing) + local desc + tf.with_op_name(name, "QueueDequeueMany") do + desc = tf.NodeDescription("QueueDequeueMany") + handle_ = convert(Tensor{String}, handle_) + n_ = convert(Tensor{Int32}, n_) + tf.add_input(desc, handle_) + tf.add_input(desc, n_) + if component_types !== nothing + desc["component_types"] = map(Base.identity, component_types) + end + if timeout_ms !== nothing + desc["timeout_ms"] = Base.Int(timeout_ms) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function queue_dequeue_many_eager(handle_, n_; name=nothing, component_types=nothing, timeout_ms=nothing) desc = tf.EagerOp("QueueDequeueMany") handle_ = convert(tf.EagerTensor, handle_) @@ -35309,13 +35309,13 @@ begin return res[1] end end - function queue_dequeue_many(handle_, n_; name=nothing, component_types=nothing, timeout_ms=nothing) - if tf.in_eager_mode() - queue_dequeue_many_eager(handle_, n_; name=name, component_types=component_types, timeout_ms=timeout_ms) - else - queue_dequeue_many_graph(handle_, n_; name=name, component_types=component_types, timeout_ms=timeout_ms) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function queue_dequeue_many(handle_, n_; name=nothing, component_types=nothing, timeout_ms=nothing) + if tf.in_eager_mode() + queue_dequeue_many_eager(handle_, n_; name=name, component_types=component_types, timeout_ms=timeout_ms) + else + queue_dequeue_many_graph(handle_, n_; name=name, component_types=component_types, timeout_ms=timeout_ms) + end end - end end @@ -35325,20 +35325,20 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function boosted_trees_serialize_ensemble_graph(tree_ensemble_handle_; name=nothing) - local desc - tf.with_op_name(name, "BoostedTreesSerializeEnsemble") do - desc = tf.NodeDescription("BoostedTreesSerializeEnsemble") - tree_ensemble_handle_ = convert(Tensor{Any}, tree_ensemble_handle_) - tf.add_input(desc, tree_ensemble_handle_) - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:2 - push!(out, tf.Tensor(op, out_idx)) - end - out + function boosted_trees_serialize_ensemble_graph(tree_ensemble_handle_; name=nothing) + local desc + tf.with_op_name(name, "BoostedTreesSerializeEnsemble") do + desc = tf.NodeDescription("BoostedTreesSerializeEnsemble") + tree_ensemble_handle_ = convert(Tensor{Any}, tree_ensemble_handle_) + tf.add_input(desc, tree_ensemble_handle_) end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end function boosted_trees_serialize_ensemble_eager(tree_ensemble_handle_; name=nothing) desc = tf.EagerOp("BoostedTreesSerializeEnsemble") tree_ensemble_handle_ = convert(tf.EagerTensor, tree_ensemble_handle_) @@ -35350,13 +35350,13 @@ begin return res end end - function boosted_trees_serialize_ensemble(tree_ensemble_handle_; name=nothing) - if tf.in_eager_mode() - boosted_trees_serialize_ensemble_eager(tree_ensemble_handle_; name=name) - else - boosted_trees_serialize_ensemble_graph(tree_ensemble_handle_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function boosted_trees_serialize_ensemble(tree_ensemble_handle_; name=nothing) + if tf.in_eager_mode() + boosted_trees_serialize_ensemble_eager(tree_ensemble_handle_; name=name) + else + boosted_trees_serialize_ensemble_graph(tree_ensemble_handle_; name=name) + end end - end end @@ -35366,18 +35366,18 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function minimum_graph(x_, y_; name=nothing) - local desc - tf.with_op_name(name, "Minimum") do - desc = tf.NodeDescription("Minimum") - x_ = convert(Tensor{Any}, x_) - y_ = convert(Tensor{Any}, y_) - (x_, y_) = tf.tf_promote(x_, y_) - tf.add_input(desc, x_) - tf.add_input(desc, y_) - end - tf.Tensor(tf.Operation(desc)) + function minimum_graph(x_, y_; name=nothing) + local desc + tf.with_op_name(name, "Minimum") do + desc = tf.NodeDescription("Minimum") + x_ = convert(Tensor{Any}, x_) + y_ = convert(Tensor{Any}, y_) + (x_, y_) = tf.tf_promote(x_, y_) + tf.add_input(desc, x_) + tf.add_input(desc, y_) end + tf.Tensor(tf.Operation(desc)) + end function minimum_eager(x_, y_; name=nothing) desc = tf.EagerOp("Minimum") x_ = convert(tf.EagerTensor, x_) @@ -35393,39 +35393,39 @@ begin return res[1] end end - function minimum(x_, y_; name=nothing) - if tf.in_eager_mode() - minimum_eager(x_, y_; name=name) - else - minimum_graph(x_, y_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function minimum(x_, y_; name=nothing) + if tf.in_eager_mode() + minimum_eager(x_, y_; name=name) + else + minimum_graph(x_, y_; name=name) + end end - end end """ - substr(input, pos, len; unit=BYTE) + substr(input, pos, len; unit=) """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function substr_graph(input_, pos_, len_; name=nothing, unit=nothing) - local desc - tf.with_op_name(name, "Substr") do - desc = tf.NodeDescription("Substr") - input_ = convert(Tensor{String}, input_) - pos_ = convert(Tensor{Any}, pos_) - len_ = convert(Tensor{Any}, len_) - (pos_, len_) = tf.tf_promote(pos_, len_) - tf.add_input(desc, input_) - tf.add_input(desc, pos_) - tf.add_input(desc, len_) - if unit !== nothing - desc["unit"] = Base.String(unit) - end + function substr_graph(input_, pos_, len_; name=nothing, unit=nothing) + local desc + tf.with_op_name(name, "Substr") do + desc = tf.NodeDescription("Substr") + input_ = convert(Tensor{String}, input_) + pos_ = convert(Tensor{Any}, pos_) + len_ = convert(Tensor{Any}, len_) + (pos_, len_) = tf.tf_promote(pos_, len_) + tf.add_input(desc, input_) + tf.add_input(desc, pos_) + tf.add_input(desc, len_) + if unit !== nothing + desc["unit"] = Base.String(unit) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function substr_eager(input_, pos_, len_; name=nothing, unit=nothing) desc = tf.EagerOp("Substr") input_ = convert(tf.EagerTensor, input_) @@ -35446,13 +35446,13 @@ begin return res[1] end end - function substr(input_, pos_, len_; name=nothing, unit=nothing) - if tf.in_eager_mode() - substr_eager(input_, pos_, len_; name=name, unit=unit) - else - substr_graph(input_, pos_, len_; name=name, unit=unit) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function substr(input_, pos_, len_; name=nothing, unit=nothing) + if tf.in_eager_mode() + substr_eager(input_, pos_, len_; name=name, unit=unit) + else + substr_graph(input_, pos_, len_; name=name, unit=unit) + end end - end end @@ -35462,15 +35462,15 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function queue_size_graph(handle_; name=nothing) - local desc - tf.with_op_name(name, "QueueSize") do - desc = tf.NodeDescription("QueueSize") - handle_ = convert(Tensor{String}, handle_) - tf.add_input(desc, handle_) - end - tf.Tensor(tf.Operation(desc)) + function queue_size_graph(handle_; name=nothing) + local desc + tf.with_op_name(name, "QueueSize") do + desc = tf.NodeDescription("QueueSize") + handle_ = convert(Tensor{String}, handle_) + tf.add_input(desc, handle_) end + tf.Tensor(tf.Operation(desc)) + end function queue_size_eager(handle_; name=nothing) desc = tf.EagerOp("QueueSize") handle_ = convert(tf.EagerTensor, handle_) @@ -35482,13 +35482,13 @@ begin return res[1] end end - function queue_size(handle_; name=nothing) - if tf.in_eager_mode() - queue_size_eager(handle_; name=name) - else - queue_size_graph(handle_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function queue_size(handle_; name=nothing) + if tf.in_eager_mode() + queue_size_eager(handle_; name=name) + else + queue_size_graph(handle_; name=name) + end end - end end @@ -35498,35 +35498,35 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function apply_ftrl_v2_graph(var_, accum_, linear_, grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=nothing, use_locking=nothing) - local desc - tf.with_op_name(name, "ApplyFtrlV2") do - desc = tf.NodeDescription("ApplyFtrlV2") - var_ = convert(Tensor{Any}, var_) - accum_ = convert(Tensor{Any}, accum_) - linear_ = convert(Tensor{Any}, linear_) - grad_ = convert(Tensor{Any}, grad_) - lr_ = convert(Tensor{Any}, lr_) - l1_ = convert(Tensor{Any}, l1_) - l2_ = convert(Tensor{Any}, l2_) - l2_shrinkage_ = convert(Tensor{Any}, l2_shrinkage_) - lr_power_ = convert(Tensor{Any}, lr_power_) - (var_, accum_, linear_, grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_) = tf.tf_promote(var_, accum_, linear_, grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_) - tf.add_input(desc, var_) - tf.add_input(desc, accum_) - tf.add_input(desc, linear_) - tf.add_input(desc, grad_) - tf.add_input(desc, lr_) - tf.add_input(desc, l1_) - tf.add_input(desc, l2_) - tf.add_input(desc, l2_shrinkage_) - tf.add_input(desc, lr_power_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - end - tf.Tensor(tf.Operation(desc)) - end + function apply_ftrl_v2_graph(var_, accum_, linear_, grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "ApplyFtrlV2") do + desc = tf.NodeDescription("ApplyFtrlV2") + var_ = convert(Tensor{Any}, var_) + accum_ = convert(Tensor{Any}, accum_) + linear_ = convert(Tensor{Any}, linear_) + grad_ = convert(Tensor{Any}, grad_) + lr_ = convert(Tensor{Any}, lr_) + l1_ = convert(Tensor{Any}, l1_) + l2_ = convert(Tensor{Any}, l2_) + l2_shrinkage_ = convert(Tensor{Any}, l2_shrinkage_) + lr_power_ = convert(Tensor{Any}, lr_power_) + (var_, accum_, linear_, grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_) = tf.tf_promote(var_, accum_, linear_, grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_) + tf.add_input(desc, var_) + tf.add_input(desc, accum_) + tf.add_input(desc, linear_) + tf.add_input(desc, grad_) + tf.add_input(desc, lr_) + tf.add_input(desc, l1_) + tf.add_input(desc, l2_) + tf.add_input(desc, l2_shrinkage_) + tf.add_input(desc, lr_power_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + tf.Tensor(tf.Operation(desc)) + end function apply_ftrl_v2_eager(var_, accum_, linear_, grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ApplyFtrlV2") var_ = convert(tf.EagerTensor, var_) @@ -35566,13 +35566,13 @@ begin return res[1] end end - function apply_ftrl_v2(var_, accum_, linear_, grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=nothing, use_locking=nothing) - if tf.in_eager_mode() - apply_ftrl_v2_eager(var_, accum_, linear_, grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=name, use_locking=use_locking) - else - apply_ftrl_v2_graph(var_, accum_, linear_, grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=name, use_locking=use_locking) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function apply_ftrl_v2(var_, accum_, linear_, grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=nothing, use_locking=nothing) + if tf.in_eager_mode() + apply_ftrl_v2_eager(var_, accum_, linear_, grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=name, use_locking=use_locking) + else + apply_ftrl_v2_graph(var_, accum_, linear_, grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=name, use_locking=use_locking) + end end - end end @@ -35582,22 +35582,22 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function sparse_segment_mean_graph(data_, indices_, segment_ids_; name=nothing) - local desc - tf.with_op_name(name, "SparseSegmentMean") do - desc = tf.NodeDescription("SparseSegmentMean") - data_ = convert(Tensor{Any}, data_) - indices_ = convert(Tensor{Int32}, indices_) - indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) - segment_ids_ = convert(Tensor{Int32}, segment_ids_) - (data_,) = tf.tf_promote(data_) - (indices_,) = tf.tf_promote(indices_) - tf.add_input(desc, data_) - tf.add_input(desc, indices_) - tf.add_input(desc, segment_ids_) - end - tf.Tensor(tf.Operation(desc)) + function sparse_segment_mean_graph(data_, indices_, segment_ids_; name=nothing) + local desc + tf.with_op_name(name, "SparseSegmentMean") do + desc = tf.NodeDescription("SparseSegmentMean") + data_ = convert(Tensor{Any}, data_) + indices_ = convert(Tensor{Int32}, indices_) + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + segment_ids_ = convert(Tensor{Int32}, segment_ids_) + (data_,) = tf.tf_promote(data_) + (indices_,) = tf.tf_promote(indices_) + tf.add_input(desc, data_) + tf.add_input(desc, indices_) + tf.add_input(desc, segment_ids_) end + tf.Tensor(tf.Operation(desc)) + end function sparse_segment_mean_eager(data_, indices_, segment_ids_; name=nothing) desc = tf.EagerOp("SparseSegmentMean") data_ = convert(tf.EagerTensor, data_) @@ -35615,13 +35615,13 @@ begin return res[1] end end - function sparse_segment_mean(data_, indices_, segment_ids_; name=nothing) - if tf.in_eager_mode() - sparse_segment_mean_eager(data_, indices_, segment_ids_; name=name) - else - sparse_segment_mean_graph(data_, indices_, segment_ids_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_segment_mean(data_, indices_, segment_ids_; name=nothing) + if tf.in_eager_mode() + sparse_segment_mean_eager(data_, indices_, segment_ids_; name=name) + else + sparse_segment_mean_graph(data_, indices_, segment_ids_; name=name) + end end - end end @@ -35631,29 +35631,29 @@ end Load embedding parameters for a single table. """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function load_tpu_embedding_momentum_parameters_graph(parameters_, momenta_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - local desc - tf.with_op_name(name, "LoadTPUEmbeddingMomentumParameters") do - desc = tf.NodeDescription("LoadTPUEmbeddingMomentumParameters") - parameters_ = convert(Tensor{Float32}, parameters_) - momenta_ = convert(Tensor{Float32}, momenta_) - tf.add_input(desc, parameters_) - tf.add_input(desc, momenta_) - if table_id !== nothing - desc["table_id"] = Base.Int(table_id) - end - if table_name !== nothing - desc["table_name"] = Base.String(table_name) - end - if num_shards !== nothing - desc["num_shards"] = Base.Int(num_shards) - end - if shard_id !== nothing - desc["shard_id"] = Base.Int(shard_id) - end - end - tf.Tensor(tf.Operation(desc)) + function load_tpu_embedding_momentum_parameters_graph(parameters_, momenta_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + local desc + tf.with_op_name(name, "LoadTPUEmbeddingMomentumParameters") do + desc = tf.NodeDescription("LoadTPUEmbeddingMomentumParameters") + parameters_ = convert(Tensor{Float32}, parameters_) + momenta_ = convert(Tensor{Float32}, momenta_) + tf.add_input(desc, parameters_) + tf.add_input(desc, momenta_) + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end end + tf.Tensor(tf.Operation(desc)) + end function load_tpu_embedding_momentum_parameters_eager(parameters_, momenta_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) desc = tf.EagerOp("LoadTPUEmbeddingMomentumParameters") parameters_ = convert(tf.EagerTensor, parameters_) @@ -35679,13 +35679,13 @@ begin return res[1] end end - function load_tpu_embedding_momentum_parameters(parameters_, momenta_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - if tf.in_eager_mode() - load_tpu_embedding_momentum_parameters_eager(parameters_, momenta_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) - else - load_tpu_embedding_momentum_parameters_graph(parameters_, momenta_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function load_tpu_embedding_momentum_parameters(parameters_, momenta_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + if tf.in_eager_mode() + load_tpu_embedding_momentum_parameters_eager(parameters_, momenta_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + else + load_tpu_embedding_momentum_parameters_graph(parameters_, momenta_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + end end - end end @@ -35695,29 +35695,29 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function resource_apply_proximal_adagrad_graph(var_, accum_, lr_, l1_, l2_, grad_; name=nothing, use_locking=nothing) - local desc - tf.with_op_name(name, "ResourceApplyProximalAdagrad") do - desc = tf.NodeDescription("ResourceApplyProximalAdagrad") - var_ = convert(Tensor{Any}, var_) - accum_ = convert(Tensor{Any}, accum_) - lr_ = convert(Tensor{Any}, lr_) - l1_ = convert(Tensor{Any}, l1_) - l2_ = convert(Tensor{Any}, l2_) - grad_ = convert(Tensor{Any}, grad_) - (lr_, l1_, l2_, grad_) = tf.tf_promote(lr_, l1_, l2_, grad_) - tf.add_input(desc, var_) - tf.add_input(desc, accum_) - tf.add_input(desc, lr_) - tf.add_input(desc, l1_) - tf.add_input(desc, l2_) - tf.add_input(desc, grad_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end + function resource_apply_proximal_adagrad_graph(var_, accum_, lr_, l1_, l2_, grad_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "ResourceApplyProximalAdagrad") do + desc = tf.NodeDescription("ResourceApplyProximalAdagrad") + var_ = convert(Tensor{Any}, var_) + accum_ = convert(Tensor{Any}, accum_) + lr_ = convert(Tensor{Any}, lr_) + l1_ = convert(Tensor{Any}, l1_) + l2_ = convert(Tensor{Any}, l2_) + grad_ = convert(Tensor{Any}, grad_) + (lr_, l1_, l2_, grad_) = tf.tf_promote(lr_, l1_, l2_, grad_) + tf.add_input(desc, var_) + tf.add_input(desc, accum_) + tf.add_input(desc, lr_) + tf.add_input(desc, l1_) + tf.add_input(desc, l2_) + tf.add_input(desc, grad_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function resource_apply_proximal_adagrad_eager(var_, accum_, lr_, l1_, l2_, grad_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ResourceApplyProximalAdagrad") var_ = convert(tf.EagerTensor, var_) @@ -35746,13 +35746,13 @@ begin return res[1] end end - function resource_apply_proximal_adagrad(var_, accum_, lr_, l1_, l2_, grad_; name=nothing, use_locking=nothing) - if tf.in_eager_mode() - resource_apply_proximal_adagrad_eager(var_, accum_, lr_, l1_, l2_, grad_; name=name, use_locking=use_locking) - else - resource_apply_proximal_adagrad_graph(var_, accum_, lr_, l1_, l2_, grad_; name=name, use_locking=use_locking) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_apply_proximal_adagrad(var_, accum_, lr_, l1_, l2_, grad_; name=nothing, use_locking=nothing) + if tf.in_eager_mode() + resource_apply_proximal_adagrad_eager(var_, accum_, lr_, l1_, l2_, grad_; name=name, use_locking=use_locking) + else + resource_apply_proximal_adagrad_graph(var_, accum_, lr_, l1_, l2_, grad_; name=name, use_locking=use_locking) + end end - end end @@ -35762,25 +35762,25 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tensor_array_gather_v2_graph(handle_, indices_, flow_in_; name=nothing, dtype=nothing, element_shape=nothing) - local desc - tf.with_op_name(name, "TensorArrayGatherV2") do - desc = tf.NodeDescription("TensorArrayGatherV2") - handle_ = convert(Tensor{String}, handle_) - indices_ = convert(Tensor{Int32}, indices_) - flow_in_ = convert(Tensor{Float32}, flow_in_) - tf.add_input(desc, handle_) - tf.add_input(desc, indices_) - tf.add_input(desc, flow_in_) - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end - if element_shape !== nothing - desc["element_shape"] = Base.identity(element_shape) - end + function tensor_array_gather_v2_graph(handle_, indices_, flow_in_; name=nothing, dtype=nothing, element_shape=nothing) + local desc + tf.with_op_name(name, "TensorArrayGatherV2") do + desc = tf.NodeDescription("TensorArrayGatherV2") + handle_ = convert(Tensor{String}, handle_) + indices_ = convert(Tensor{Int32}, indices_) + flow_in_ = convert(Tensor{Float32}, flow_in_) + tf.add_input(desc, handle_) + tf.add_input(desc, indices_) + tf.add_input(desc, flow_in_) + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + if element_shape !== nothing + desc["element_shape"] = Base.identity(element_shape) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function tensor_array_gather_v2_eager(handle_, indices_, flow_in_; name=nothing, dtype=nothing, element_shape=nothing) desc = tf.EagerOp("TensorArrayGatherV2") handle_ = convert(tf.EagerTensor, handle_) @@ -35802,13 +35802,13 @@ begin return res[1] end end - function tensor_array_gather_v2(handle_, indices_, flow_in_; name=nothing, dtype=nothing, element_shape=nothing) - if tf.in_eager_mode() - tensor_array_gather_v2_eager(handle_, indices_, flow_in_; name=name, dtype=dtype, element_shape=element_shape) - else - tensor_array_gather_v2_graph(handle_, indices_, flow_in_; name=name, dtype=dtype, element_shape=element_shape) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_array_gather_v2(handle_, indices_, flow_in_; name=nothing, dtype=nothing, element_shape=nothing) + if tf.in_eager_mode() + tensor_array_gather_v2_eager(handle_, indices_, flow_in_; name=name, dtype=dtype, element_shape=element_shape) + else + tensor_array_gather_v2_graph(handle_, indices_, flow_in_; name=name, dtype=dtype, element_shape=element_shape) + end end - end end @@ -35818,18 +35818,18 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function less_graph(x_, y_; name=nothing) - local desc - tf.with_op_name(name, "Less") do - desc = tf.NodeDescription("Less") - x_ = convert(Tensor{Any}, x_) - y_ = convert(Tensor{Any}, y_) - (x_, y_) = tf.tf_promote(x_, y_) - tf.add_input(desc, x_) - tf.add_input(desc, y_) - end - tf.Tensor(tf.Operation(desc)) + function less_graph(x_, y_; name=nothing) + local desc + tf.with_op_name(name, "Less") do + desc = tf.NodeDescription("Less") + x_ = convert(Tensor{Any}, x_) + y_ = convert(Tensor{Any}, y_) + (x_, y_) = tf.tf_promote(x_, y_) + tf.add_input(desc, x_) + tf.add_input(desc, y_) end + tf.Tensor(tf.Operation(desc)) + end function less_eager(x_, y_; name=nothing) desc = tf.EagerOp("Less") x_ = convert(tf.EagerTensor, x_) @@ -35845,13 +35845,13 @@ begin return res[1] end end - function less(x_, y_; name=nothing) - if tf.in_eager_mode() - less_eager(x_, y_; name=name) - else - less_graph(x_, y_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function less(x_, y_; name=nothing) + if tf.in_eager_mode() + less_eager(x_, y_; name=name) + else + less_graph(x_, y_; name=name) + end end - end end @@ -35861,19 +35861,19 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function host_const_graph(; name=nothing, value=nothing, dtype=nothing) - local desc - tf.with_op_name(name, "HostConst") do - desc = tf.NodeDescription("HostConst") - if value !== nothing - desc["value"] = TensorFlow.RawTensor(value) - end - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end + function host_const_graph(; name=nothing, value=nothing, dtype=nothing) + local desc + tf.with_op_name(name, "HostConst") do + desc = tf.NodeDescription("HostConst") + if value !== nothing + desc["value"] = TensorFlow.RawTensor(value) + end + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function host_const_eager(; name=nothing, value=nothing, dtype=nothing) desc = tf.EagerOp("HostConst") if value !== nothing @@ -35889,13 +35889,13 @@ begin return res[1] end end - function host_const(; name=nothing, value=nothing, dtype=nothing) - if tf.in_eager_mode() - host_const_eager(; name=name, value=value, dtype=dtype) - else - host_const_graph(; name=name, value=value, dtype=dtype) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function host_const(; name=nothing, value=nothing, dtype=nothing) + if tf.in_eager_mode() + host_const_eager(; name=name, value=value, dtype=dtype) + else + host_const_graph(; name=name, value=value, dtype=dtype) + end end - end end @@ -35905,21 +35905,21 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function upper_bound_graph(sorted_inputs_, values_; name=nothing, out_type=nothing) - local desc - tf.with_op_name(name, "UpperBound") do - desc = tf.NodeDescription("UpperBound") - sorted_inputs_ = convert(Tensor{Any}, sorted_inputs_) - values_ = convert(Tensor{Any}, values_) - (sorted_inputs_, values_) = tf.tf_promote(sorted_inputs_, values_) - tf.add_input(desc, sorted_inputs_) - tf.add_input(desc, values_) - if out_type !== nothing - desc["out_type"] = Base.identity(out_type) - end + function upper_bound_graph(sorted_inputs_, values_; name=nothing, out_type=nothing) + local desc + tf.with_op_name(name, "UpperBound") do + desc = tf.NodeDescription("UpperBound") + sorted_inputs_ = convert(Tensor{Any}, sorted_inputs_) + values_ = convert(Tensor{Any}, values_) + (sorted_inputs_, values_) = tf.tf_promote(sorted_inputs_, values_) + tf.add_input(desc, sorted_inputs_) + tf.add_input(desc, values_) + if out_type !== nothing + desc["out_type"] = Base.identity(out_type) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function upper_bound_eager(sorted_inputs_, values_; name=nothing, out_type=nothing) desc = tf.EagerOp("UpperBound") sorted_inputs_ = convert(tf.EagerTensor, sorted_inputs_) @@ -35938,13 +35938,13 @@ begin return res[1] end end - function upper_bound(sorted_inputs_, values_; name=nothing, out_type=nothing) - if tf.in_eager_mode() - upper_bound_eager(sorted_inputs_, values_; name=name, out_type=out_type) - else - upper_bound_graph(sorted_inputs_, values_; name=name, out_type=out_type) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function upper_bound(sorted_inputs_, values_; name=nothing, out_type=nothing) + if tf.in_eager_mode() + upper_bound_eager(sorted_inputs_, values_; name=name, out_type=out_type) + else + upper_bound_graph(sorted_inputs_, values_; name=name, out_type=out_type) + end end - end end @@ -35954,20 +35954,20 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tensor_list_get_item_graph(input_handle_, index_; name=nothing, element_dtype=nothing) - local desc - tf.with_op_name(name, "TensorListGetItem") do - desc = tf.NodeDescription("TensorListGetItem") - input_handle_ = convert(Tensor{Any}, input_handle_) - index_ = convert(Tensor{Int32}, index_) - tf.add_input(desc, input_handle_) - tf.add_input(desc, index_) - if element_dtype !== nothing - desc["element_dtype"] = Base.identity(element_dtype) - end + function tensor_list_get_item_graph(input_handle_, index_; name=nothing, element_dtype=nothing) + local desc + tf.with_op_name(name, "TensorListGetItem") do + desc = tf.NodeDescription("TensorListGetItem") + input_handle_ = convert(Tensor{Any}, input_handle_) + index_ = convert(Tensor{Int32}, index_) + tf.add_input(desc, input_handle_) + tf.add_input(desc, index_) + if element_dtype !== nothing + desc["element_dtype"] = Base.identity(element_dtype) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function tensor_list_get_item_eager(input_handle_, index_; name=nothing, element_dtype=nothing) desc = tf.EagerOp("TensorListGetItem") input_handle_ = convert(tf.EagerTensor, input_handle_) @@ -35984,13 +35984,13 @@ begin return res[1] end end - function tensor_list_get_item(input_handle_, index_; name=nothing, element_dtype=nothing) - if tf.in_eager_mode() - tensor_list_get_item_eager(input_handle_, index_; name=name, element_dtype=element_dtype) - else - tensor_list_get_item_graph(input_handle_, index_; name=name, element_dtype=element_dtype) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_list_get_item(input_handle_, index_; name=nothing, element_dtype=nothing) + if tf.in_eager_mode() + tensor_list_get_item_eager(input_handle_, index_; name=name, element_dtype=element_dtype) + else + tensor_list_get_item_graph(input_handle_, index_; name=name, element_dtype=element_dtype) + end end - end end @@ -36000,25 +36000,25 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function fake_quant_with_min_max_vars_graph(inputs_, min_, max_; name=nothing, num_bits=nothing, narrow_range=nothing) - local desc - tf.with_op_name(name, "FakeQuantWithMinMaxVars") do - desc = tf.NodeDescription("FakeQuantWithMinMaxVars") - inputs_ = convert(Tensor{Float32}, inputs_) - min_ = convert(Tensor{Float32}, min_) - max_ = convert(Tensor{Float32}, max_) - tf.add_input(desc, inputs_) - tf.add_input(desc, min_) - tf.add_input(desc, max_) - if num_bits !== nothing - desc["num_bits"] = Base.Int(num_bits) - end - if narrow_range !== nothing - desc["narrow_range"] = Base.Bool(narrow_range) - end + function fake_quant_with_min_max_vars_graph(inputs_, min_, max_; name=nothing, num_bits=nothing, narrow_range=nothing) + local desc + tf.with_op_name(name, "FakeQuantWithMinMaxVars") do + desc = tf.NodeDescription("FakeQuantWithMinMaxVars") + inputs_ = convert(Tensor{Float32}, inputs_) + min_ = convert(Tensor{Float32}, min_) + max_ = convert(Tensor{Float32}, max_) + tf.add_input(desc, inputs_) + tf.add_input(desc, min_) + tf.add_input(desc, max_) + if num_bits !== nothing + desc["num_bits"] = Base.Int(num_bits) + end + if narrow_range !== nothing + desc["narrow_range"] = Base.Bool(narrow_range) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function fake_quant_with_min_max_vars_eager(inputs_, min_, max_; name=nothing, num_bits=nothing, narrow_range=nothing) desc = tf.EagerOp("FakeQuantWithMinMaxVars") inputs_ = convert(tf.EagerTensor, inputs_) @@ -36040,13 +36040,13 @@ begin return res[1] end end - function fake_quant_with_min_max_vars(inputs_, min_, max_; name=nothing, num_bits=nothing, narrow_range=nothing) - if tf.in_eager_mode() - fake_quant_with_min_max_vars_eager(inputs_, min_, max_; name=name, num_bits=num_bits, narrow_range=narrow_range) - else - fake_quant_with_min_max_vars_graph(inputs_, min_, max_; name=name, num_bits=num_bits, narrow_range=narrow_range) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function fake_quant_with_min_max_vars(inputs_, min_, max_; name=nothing, num_bits=nothing, narrow_range=nothing) + if tf.in_eager_mode() + fake_quant_with_min_max_vars_eager(inputs_, min_, max_; name=name, num_bits=num_bits, narrow_range=narrow_range) + else + fake_quant_with_min_max_vars_graph(inputs_, min_, max_; name=name, num_bits=num_bits, narrow_range=narrow_range) + end end - end end @@ -36056,15 +36056,15 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function is_boosted_trees_quantile_stream_resource_initialized_graph(quantile_stream_resource_handle_; name=nothing) - local desc - tf.with_op_name(name, "IsBoostedTreesQuantileStreamResourceInitialized") do - desc = tf.NodeDescription("IsBoostedTreesQuantileStreamResourceInitialized") - quantile_stream_resource_handle_ = convert(Tensor{Any}, quantile_stream_resource_handle_) - tf.add_input(desc, quantile_stream_resource_handle_) - end - tf.Tensor(tf.Operation(desc)) + function is_boosted_trees_quantile_stream_resource_initialized_graph(quantile_stream_resource_handle_; name=nothing) + local desc + tf.with_op_name(name, "IsBoostedTreesQuantileStreamResourceInitialized") do + desc = tf.NodeDescription("IsBoostedTreesQuantileStreamResourceInitialized") + quantile_stream_resource_handle_ = convert(Tensor{Any}, quantile_stream_resource_handle_) + tf.add_input(desc, quantile_stream_resource_handle_) end + tf.Tensor(tf.Operation(desc)) + end function is_boosted_trees_quantile_stream_resource_initialized_eager(quantile_stream_resource_handle_; name=nothing) desc = tf.EagerOp("IsBoostedTreesQuantileStreamResourceInitialized") quantile_stream_resource_handle_ = convert(tf.EagerTensor, quantile_stream_resource_handle_) @@ -36076,13 +36076,13 @@ begin return res[1] end end - function is_boosted_trees_quantile_stream_resource_initialized(quantile_stream_resource_handle_; name=nothing) - if tf.in_eager_mode() - is_boosted_trees_quantile_stream_resource_initialized_eager(quantile_stream_resource_handle_; name=name) - else - is_boosted_trees_quantile_stream_resource_initialized_graph(quantile_stream_resource_handle_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function is_boosted_trees_quantile_stream_resource_initialized(quantile_stream_resource_handle_; name=nothing) + if tf.in_eager_mode() + is_boosted_trees_quantile_stream_resource_initialized_eager(quantile_stream_resource_handle_; name=name) + else + is_boosted_trees_quantile_stream_resource_initialized_graph(quantile_stream_resource_handle_; name=name) + end end - end end @@ -36092,24 +36092,24 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function reader_read_up_to_v2_graph(reader_handle_, queue_handle_, num_records_; name=nothing) - local desc - tf.with_op_name(name, "ReaderReadUpToV2") do - desc = tf.NodeDescription("ReaderReadUpToV2") - reader_handle_ = convert(Tensor{Any}, reader_handle_) - queue_handle_ = convert(Tensor{Any}, queue_handle_) - num_records_ = convert(Tensor{Int64}, num_records_) - tf.add_input(desc, reader_handle_) - tf.add_input(desc, queue_handle_) - tf.add_input(desc, num_records_) - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:2 - push!(out, tf.Tensor(op, out_idx)) - end - out + function reader_read_up_to_v2_graph(reader_handle_, queue_handle_, num_records_; name=nothing) + local desc + tf.with_op_name(name, "ReaderReadUpToV2") do + desc = tf.NodeDescription("ReaderReadUpToV2") + reader_handle_ = convert(Tensor{Any}, reader_handle_) + queue_handle_ = convert(Tensor{Any}, queue_handle_) + num_records_ = convert(Tensor{Int64}, num_records_) + tf.add_input(desc, reader_handle_) + tf.add_input(desc, queue_handle_) + tf.add_input(desc, num_records_) end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end function reader_read_up_to_v2_eager(reader_handle_, queue_handle_, num_records_; name=nothing) desc = tf.EagerOp("ReaderReadUpToV2") reader_handle_ = convert(tf.EagerTensor, reader_handle_) @@ -36125,13 +36125,13 @@ begin return res end end - function reader_read_up_to_v2(reader_handle_, queue_handle_, num_records_; name=nothing) - if tf.in_eager_mode() - reader_read_up_to_v2_eager(reader_handle_, queue_handle_, num_records_; name=name) - else - reader_read_up_to_v2_graph(reader_handle_, queue_handle_, num_records_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function reader_read_up_to_v2(reader_handle_, queue_handle_, num_records_; name=nothing) + if tf.in_eager_mode() + reader_read_up_to_v2_eager(reader_handle_, queue_handle_, num_records_; name=name) + else + reader_read_up_to_v2_graph(reader_handle_, queue_handle_, num_records_; name=name) + end end - end end @@ -36141,18 +36141,18 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function complex_graph(real_, imag_; name=nothing) - local desc - tf.with_op_name(name, "Complex") do - desc = tf.NodeDescription("Complex") - real_ = convert(Tensor{Float32}, real_) - imag_ = convert(Tensor{Float32}, imag_) - (real_, imag_) = tf.tf_promote(real_, imag_) - tf.add_input(desc, real_) - tf.add_input(desc, imag_) - end - tf.Tensor(tf.Operation(desc)) + function complex_graph(real_, imag_; name=nothing) + local desc + tf.with_op_name(name, "Complex") do + desc = tf.NodeDescription("Complex") + real_ = convert(Tensor{Float32}, real_) + imag_ = convert(Tensor{Float32}, imag_) + (real_, imag_) = tf.tf_promote(real_, imag_) + tf.add_input(desc, real_) + tf.add_input(desc, imag_) end + tf.Tensor(tf.Operation(desc)) + end function complex_eager(real_, imag_; name=nothing) desc = tf.EagerOp("Complex") real_ = convert(tf.EagerTensor, real_) @@ -36168,13 +36168,13 @@ begin return res[1] end end - function complex(real_, imag_; name=nothing) - if tf.in_eager_mode() - complex_eager(real_, imag_; name=name) - else - complex_graph(real_, imag_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function complex(real_, imag_; name=nothing) + if tf.in_eager_mode() + complex_eager(real_, imag_; name=name) + else + complex_graph(real_, imag_; name=name) + end end - end end @@ -36184,24 +36184,24 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tensor_list_reserve_graph(element_shape_, num_elements_; name=nothing, element_dtype=nothing, shape_type=nothing) - local desc - tf.with_op_name(name, "TensorListReserve") do - desc = tf.NodeDescription("TensorListReserve") - element_shape_ = convert(Tensor{Any}, element_shape_) - num_elements_ = convert(Tensor{Int32}, num_elements_) - (element_shape_,) = tf.tf_promote(element_shape_) - tf.add_input(desc, element_shape_) - tf.add_input(desc, num_elements_) - if element_dtype !== nothing - desc["element_dtype"] = Base.identity(element_dtype) - end - if shape_type !== nothing - desc["shape_type"] = Base.identity(shape_type) - end + function tensor_list_reserve_graph(element_shape_, num_elements_; name=nothing, element_dtype=nothing, shape_type=nothing) + local desc + tf.with_op_name(name, "TensorListReserve") do + desc = tf.NodeDescription("TensorListReserve") + element_shape_ = convert(Tensor{Any}, element_shape_) + num_elements_ = convert(Tensor{Int32}, num_elements_) + (element_shape_,) = tf.tf_promote(element_shape_) + tf.add_input(desc, element_shape_) + tf.add_input(desc, num_elements_) + if element_dtype !== nothing + desc["element_dtype"] = Base.identity(element_dtype) + end + if shape_type !== nothing + desc["shape_type"] = Base.identity(shape_type) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function tensor_list_reserve_eager(element_shape_, num_elements_; name=nothing, element_dtype=nothing, shape_type=nothing) desc = tf.EagerOp("TensorListReserve") element_shape_ = convert(tf.EagerTensor, element_shape_) @@ -36222,13 +36222,13 @@ begin return res[1] end end - function tensor_list_reserve(element_shape_, num_elements_; name=nothing, element_dtype=nothing, shape_type=nothing) - if tf.in_eager_mode() - tensor_list_reserve_eager(element_shape_, num_elements_; name=name, element_dtype=element_dtype, shape_type=shape_type) - else - tensor_list_reserve_graph(element_shape_, num_elements_; name=name, element_dtype=element_dtype, shape_type=shape_type) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_list_reserve(element_shape_, num_elements_; name=nothing, element_dtype=nothing, shape_type=nothing) + if tf.in_eager_mode() + tensor_list_reserve_eager(element_shape_, num_elements_; name=name, element_dtype=element_dtype, shape_type=shape_type) + else + tensor_list_reserve_graph(element_shape_, num_elements_; name=name, element_dtype=element_dtype, shape_type=shape_type) + end end - end end @@ -36238,19 +36238,19 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function bitcast_graph(input_; name=nothing, type_=nothing) - local desc - tf.with_op_name(name, "Bitcast") do - desc = tf.NodeDescription("Bitcast") - input_ = convert(Tensor{Any}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - if type_ !== nothing - desc["type"] = Base.identity(type_) - end + function bitcast_graph(input_; name=nothing, type_=nothing) + local desc + tf.with_op_name(name, "Bitcast") do + desc = tf.NodeDescription("Bitcast") + input_ = convert(Tensor{Any}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + if type_ !== nothing + desc["type"] = Base.identity(type_) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function bitcast_eager(input_; name=nothing, type_=nothing) desc = tf.EagerOp("Bitcast") input_ = convert(tf.EagerTensor, input_) @@ -36266,13 +36266,13 @@ begin return res[1] end end - function bitcast(input_; name=nothing, type_=nothing) - if tf.in_eager_mode() - bitcast_eager(input_; name=name, type_=type_) - else - bitcast_graph(input_; name=name, type_=type_) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function bitcast(input_; name=nothing, type_=nothing) + if tf.in_eager_mode() + bitcast_eager(input_; name=name, type_=type_) + else + bitcast_graph(input_; name=name, type_=type_) + end end - end end @@ -36282,28 +36282,28 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function priority_queue_graph(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) - local desc - tf.with_op_name(name, "PriorityQueue") do - desc = tf.NodeDescription("PriorityQueue") - if component_types !== nothing - desc["component_types"] = map(Base.identity, component_types) - end - if shapes !== nothing - desc["shapes"] = map(Base.identity, shapes) - end - if capacity !== nothing - desc["capacity"] = Base.Int(capacity) - end - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end + function priority_queue_graph(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "PriorityQueue") do + desc = tf.NodeDescription("PriorityQueue") + if component_types !== nothing + desc["component_types"] = map(Base.identity, component_types) + end + if shapes !== nothing + desc["shapes"] = map(Base.identity, shapes) + end + if capacity !== nothing + desc["capacity"] = Base.Int(capacity) + end + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function priority_queue_eager(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) desc = tf.EagerOp("PriorityQueue") if component_types !== nothing @@ -36328,13 +36328,13 @@ begin return res[1] end end - function priority_queue(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) - if tf.in_eager_mode() - priority_queue_eager(; name=name, component_types=component_types, shapes=shapes, capacity=capacity, container=container, shared_name=shared_name) - else - priority_queue_graph(; name=name, component_types=component_types, shapes=shapes, capacity=capacity, container=container, shared_name=shared_name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function priority_queue(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) + if tf.in_eager_mode() + priority_queue_eager(; name=name, component_types=component_types, shapes=shapes, capacity=capacity, container=container, shared_name=shared_name) + else + priority_queue_graph(; name=name, component_types=component_types, shapes=shapes, capacity=capacity, container=container, shared_name=shared_name) + end end - end end @@ -36344,58 +36344,58 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function quantized_batch_norm_with_global_normalization_graph(t_, t_min_, t_max_, m_, m_min_, m_max_, v_, v_min_, v_max_, beta_, beta_min_, beta_max_, gamma_, gamma_min_, gamma_max_; name=nothing, out_type=nothing, variance_epsilon=nothing, scale_after_normalization=nothing) - local desc - tf.with_op_name(name, "QuantizedBatchNormWithGlobalNormalization") do - desc = tf.NodeDescription("QuantizedBatchNormWithGlobalNormalization") - t_ = convert(Tensor{Any}, t_) - t_min_ = convert(Tensor{Float32}, t_min_) - t_max_ = convert(Tensor{Float32}, t_max_) - m_ = convert(Tensor{Any}, m_) - m_min_ = convert(Tensor{Float32}, m_min_) - m_max_ = convert(Tensor{Float32}, m_max_) - v_ = convert(Tensor{Any}, v_) - v_min_ = convert(Tensor{Float32}, v_min_) - v_max_ = convert(Tensor{Float32}, v_max_) - beta_ = convert(Tensor{Any}, beta_) - beta_min_ = convert(Tensor{Float32}, beta_min_) - beta_max_ = convert(Tensor{Float32}, beta_max_) - gamma_ = convert(Tensor{Any}, gamma_) - gamma_min_ = convert(Tensor{Float32}, gamma_min_) - gamma_max_ = convert(Tensor{Float32}, gamma_max_) - (t_, m_, v_, beta_, gamma_) = tf.tf_promote(t_, m_, v_, beta_, gamma_) - tf.add_input(desc, t_) - tf.add_input(desc, t_min_) - tf.add_input(desc, t_max_) - tf.add_input(desc, m_) - tf.add_input(desc, m_min_) - tf.add_input(desc, m_max_) - tf.add_input(desc, v_) - tf.add_input(desc, v_min_) - tf.add_input(desc, v_max_) - tf.add_input(desc, beta_) - tf.add_input(desc, beta_min_) - tf.add_input(desc, beta_max_) - tf.add_input(desc, gamma_) - tf.add_input(desc, gamma_min_) - tf.add_input(desc, gamma_max_) - if out_type !== nothing - desc["out_type"] = Base.identity(out_type) - end - if variance_epsilon !== nothing - desc["variance_epsilon"] = Base.identity(variance_epsilon) - end - if scale_after_normalization !== nothing - desc["scale_after_normalization"] = Base.Bool(scale_after_normalization) - end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:3 - push!(out, tf.Tensor(op, out_idx)) - end - out - end + function quantized_batch_norm_with_global_normalization_graph(t_, t_min_, t_max_, m_, m_min_, m_max_, v_, v_min_, v_max_, beta_, beta_min_, beta_max_, gamma_, gamma_min_, gamma_max_; name=nothing, out_type=nothing, variance_epsilon=nothing, scale_after_normalization=nothing) + local desc + tf.with_op_name(name, "QuantizedBatchNormWithGlobalNormalization") do + desc = tf.NodeDescription("QuantizedBatchNormWithGlobalNormalization") + t_ = convert(Tensor{Any}, t_) + t_min_ = convert(Tensor{Float32}, t_min_) + t_max_ = convert(Tensor{Float32}, t_max_) + m_ = convert(Tensor{Any}, m_) + m_min_ = convert(Tensor{Float32}, m_min_) + m_max_ = convert(Tensor{Float32}, m_max_) + v_ = convert(Tensor{Any}, v_) + v_min_ = convert(Tensor{Float32}, v_min_) + v_max_ = convert(Tensor{Float32}, v_max_) + beta_ = convert(Tensor{Any}, beta_) + beta_min_ = convert(Tensor{Float32}, beta_min_) + beta_max_ = convert(Tensor{Float32}, beta_max_) + gamma_ = convert(Tensor{Any}, gamma_) + gamma_min_ = convert(Tensor{Float32}, gamma_min_) + gamma_max_ = convert(Tensor{Float32}, gamma_max_) + (t_, m_, v_, beta_, gamma_) = tf.tf_promote(t_, m_, v_, beta_, gamma_) + tf.add_input(desc, t_) + tf.add_input(desc, t_min_) + tf.add_input(desc, t_max_) + tf.add_input(desc, m_) + tf.add_input(desc, m_min_) + tf.add_input(desc, m_max_) + tf.add_input(desc, v_) + tf.add_input(desc, v_min_) + tf.add_input(desc, v_max_) + tf.add_input(desc, beta_) + tf.add_input(desc, beta_min_) + tf.add_input(desc, beta_max_) + tf.add_input(desc, gamma_) + tf.add_input(desc, gamma_min_) + tf.add_input(desc, gamma_max_) + if out_type !== nothing + desc["out_type"] = Base.identity(out_type) + end + if variance_epsilon !== nothing + desc["variance_epsilon"] = Base.identity(variance_epsilon) + end + if scale_after_normalization !== nothing + desc["scale_after_normalization"] = Base.Bool(scale_after_normalization) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end function quantized_batch_norm_with_global_normalization_eager(t_, t_min_, t_max_, m_, m_min_, m_max_, v_, v_min_, v_max_, beta_, beta_min_, beta_max_, gamma_, gamma_min_, gamma_max_; name=nothing, out_type=nothing, variance_epsilon=nothing, scale_after_normalization=nothing) desc = tf.EagerOp("QuantizedBatchNormWithGlobalNormalization") t_ = convert(tf.EagerTensor, t_) @@ -36449,13 +36449,13 @@ begin return res end end - function quantized_batch_norm_with_global_normalization(t_, t_min_, t_max_, m_, m_min_, m_max_, v_, v_min_, v_max_, beta_, beta_min_, beta_max_, gamma_, gamma_min_, gamma_max_; name=nothing, out_type=nothing, variance_epsilon=nothing, scale_after_normalization=nothing) - if tf.in_eager_mode() - quantized_batch_norm_with_global_normalization_eager(t_, t_min_, t_max_, m_, m_min_, m_max_, v_, v_min_, v_max_, beta_, beta_min_, beta_max_, gamma_, gamma_min_, gamma_max_; name=name, out_type=out_type, variance_epsilon=variance_epsilon, scale_after_normalization=scale_after_normalization) - else - quantized_batch_norm_with_global_normalization_graph(t_, t_min_, t_max_, m_, m_min_, m_max_, v_, v_min_, v_max_, beta_, beta_min_, beta_max_, gamma_, gamma_min_, gamma_max_; name=name, out_type=out_type, variance_epsilon=variance_epsilon, scale_after_normalization=scale_after_normalization) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function quantized_batch_norm_with_global_normalization(t_, t_min_, t_max_, m_, m_min_, m_max_, v_, v_min_, v_max_, beta_, beta_min_, beta_max_, gamma_, gamma_min_, gamma_max_; name=nothing, out_type=nothing, variance_epsilon=nothing, scale_after_normalization=nothing) + if tf.in_eager_mode() + quantized_batch_norm_with_global_normalization_eager(t_, t_min_, t_max_, m_, m_min_, m_max_, v_, v_min_, v_max_, beta_, beta_min_, beta_max_, gamma_, gamma_min_, gamma_max_; name=name, out_type=out_type, variance_epsilon=variance_epsilon, scale_after_normalization=scale_after_normalization) + else + quantized_batch_norm_with_global_normalization_graph(t_, t_min_, t_max_, m_, m_min_, m_max_, v_, v_min_, v_max_, beta_, beta_min_, beta_max_, gamma_, gamma_min_, gamma_max_; name=name, out_type=out_type, variance_epsilon=variance_epsilon, scale_after_normalization=scale_after_normalization) + end end - end end @@ -36465,16 +36465,16 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function cos_graph(x_; name=nothing) - local desc - tf.with_op_name(name, "Cos") do - desc = tf.NodeDescription("Cos") - x_ = convert(Tensor{Any}, x_) - (x_,) = tf.tf_promote(x_) - tf.add_input(desc, x_) - end - tf.Tensor(tf.Operation(desc)) + function cos_graph(x_; name=nothing) + local desc + tf.with_op_name(name, "Cos") do + desc = tf.NodeDescription("Cos") + x_ = convert(Tensor{Any}, x_) + (x_,) = tf.tf_promote(x_) + tf.add_input(desc, x_) end + tf.Tensor(tf.Operation(desc)) + end function cos_eager(x_; name=nothing) desc = tf.EagerOp("Cos") x_ = convert(tf.EagerTensor, x_) @@ -36487,13 +36487,13 @@ begin return res[1] end end - function cos(x_; name=nothing) - if tf.in_eager_mode() - cos_eager(x_; name=name) - else - cos_graph(x_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function cos(x_; name=nothing) + if tf.in_eager_mode() + cos_eager(x_; name=name) + else + cos_graph(x_; name=name) + end end - end end @@ -36503,28 +36503,28 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function quantize_down_and_shrink_range_graph(input_, input_min_, input_max_; name=nothing, out_type=nothing) - local desc - tf.with_op_name(name, "QuantizeDownAndShrinkRange") do - desc = tf.NodeDescription("QuantizeDownAndShrinkRange") - input_ = convert(Tensor{Any}, input_) - input_min_ = convert(Tensor{Float32}, input_min_) - input_max_ = convert(Tensor{Float32}, input_max_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - tf.add_input(desc, input_min_) - tf.add_input(desc, input_max_) - if out_type !== nothing - desc["out_type"] = Base.identity(out_type) - end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:3 - push!(out, tf.Tensor(op, out_idx)) + function quantize_down_and_shrink_range_graph(input_, input_min_, input_max_; name=nothing, out_type=nothing) + local desc + tf.with_op_name(name, "QuantizeDownAndShrinkRange") do + desc = tf.NodeDescription("QuantizeDownAndShrinkRange") + input_ = convert(Tensor{Any}, input_) + input_min_ = convert(Tensor{Float32}, input_min_) + input_max_ = convert(Tensor{Float32}, input_max_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + tf.add_input(desc, input_min_) + tf.add_input(desc, input_max_) + if out_type !== nothing + desc["out_type"] = Base.identity(out_type) end - out end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end function quantize_down_and_shrink_range_eager(input_, input_min_, input_max_; name=nothing, out_type=nothing) desc = tf.EagerOp("QuantizeDownAndShrinkRange") input_ = convert(tf.EagerTensor, input_) @@ -36544,13 +36544,13 @@ begin return res end end - function quantize_down_and_shrink_range(input_, input_min_, input_max_; name=nothing, out_type=nothing) - if tf.in_eager_mode() - quantize_down_and_shrink_range_eager(input_, input_min_, input_max_; name=name, out_type=out_type) - else - quantize_down_and_shrink_range_graph(input_, input_min_, input_max_; name=name, out_type=out_type) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function quantize_down_and_shrink_range(input_, input_min_, input_max_; name=nothing, out_type=nothing) + if tf.in_eager_mode() + quantize_down_and_shrink_range_eager(input_, input_min_, input_max_; name=name, out_type=out_type) + else + quantize_down_and_shrink_range_graph(input_, input_min_, input_max_; name=name, out_type=out_type) + end end - end end @@ -36560,23 +36560,23 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function experimental_random_dataset_graph(seed_, seed2_; name=nothing, output_types=nothing, output_shapes=nothing) - local desc - tf.with_op_name(name, "ExperimentalRandomDataset") do - desc = tf.NodeDescription("ExperimentalRandomDataset") - seed_ = convert(Tensor{Int64}, seed_) - seed2_ = convert(Tensor{Int64}, seed2_) - tf.add_input(desc, seed_) - tf.add_input(desc, seed2_) - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end + function experimental_random_dataset_graph(seed_, seed2_; name=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "ExperimentalRandomDataset") do + desc = tf.NodeDescription("ExperimentalRandomDataset") + seed_ = convert(Tensor{Int64}, seed_) + seed2_ = convert(Tensor{Int64}, seed2_) + tf.add_input(desc, seed_) + tf.add_input(desc, seed2_) + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function experimental_random_dataset_eager(seed_, seed2_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("ExperimentalRandomDataset") seed_ = convert(tf.EagerTensor, seed_) @@ -36596,13 +36596,13 @@ begin return res[1] end end - function experimental_random_dataset(seed_, seed2_; name=nothing, output_types=nothing, output_shapes=nothing) - if tf.in_eager_mode() - experimental_random_dataset_eager(seed_, seed2_; name=name, output_types=output_types, output_shapes=output_shapes) - else - experimental_random_dataset_graph(seed_, seed2_; name=name, output_types=output_types, output_shapes=output_shapes) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_random_dataset(seed_, seed2_; name=nothing, output_types=nothing, output_shapes=nothing) + if tf.in_eager_mode() + experimental_random_dataset_eager(seed_, seed2_; name=name, output_types=output_types, output_shapes=output_shapes) + else + experimental_random_dataset_graph(seed_, seed2_; name=name, output_types=output_types, output_shapes=output_shapes) + end end - end end @@ -36612,28 +36612,28 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function rpc_graph(address_, method_, request_; name=nothing, protocol=nothing, fail_fast=nothing, timeout_in_ms=nothing) - local desc - tf.with_op_name(name, "Rpc") do - desc = tf.NodeDescription("Rpc") - address_ = convert(Tensor{String}, address_) - method_ = convert(Tensor{String}, method_) - request_ = convert(Tensor{String}, request_) - tf.add_input(desc, address_) - tf.add_input(desc, method_) - tf.add_input(desc, request_) - if protocol !== nothing - desc["protocol"] = Base.String(protocol) - end - if fail_fast !== nothing - desc["fail_fast"] = Base.Bool(fail_fast) - end - if timeout_in_ms !== nothing - desc["timeout_in_ms"] = Base.Int(timeout_in_ms) - end + function rpc_graph(address_, method_, request_; name=nothing, protocol=nothing, fail_fast=nothing, timeout_in_ms=nothing) + local desc + tf.with_op_name(name, "Rpc") do + desc = tf.NodeDescription("Rpc") + address_ = convert(Tensor{String}, address_) + method_ = convert(Tensor{String}, method_) + request_ = convert(Tensor{String}, request_) + tf.add_input(desc, address_) + tf.add_input(desc, method_) + tf.add_input(desc, request_) + if protocol !== nothing + desc["protocol"] = Base.String(protocol) + end + if fail_fast !== nothing + desc["fail_fast"] = Base.Bool(fail_fast) + end + if timeout_in_ms !== nothing + desc["timeout_in_ms"] = Base.Int(timeout_in_ms) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function rpc_eager(address_, method_, request_; name=nothing, protocol=nothing, fail_fast=nothing, timeout_in_ms=nothing) desc = tf.EagerOp("Rpc") address_ = convert(tf.EagerTensor, address_) @@ -36658,13 +36658,13 @@ begin return res[1] end end - function rpc(address_, method_, request_; name=nothing, protocol=nothing, fail_fast=nothing, timeout_in_ms=nothing) - if tf.in_eager_mode() - rpc_eager(address_, method_, request_; name=name, protocol=protocol, fail_fast=fail_fast, timeout_in_ms=timeout_in_ms) - else - rpc_graph(address_, method_, request_; name=name, protocol=protocol, fail_fast=fail_fast, timeout_in_ms=timeout_in_ms) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function rpc(address_, method_, request_; name=nothing, protocol=nothing, fail_fast=nothing, timeout_in_ms=nothing) + if tf.in_eager_mode() + rpc_eager(address_, method_, request_; name=name, protocol=protocol, fail_fast=fail_fast, timeout_in_ms=timeout_in_ms) + else + rpc_graph(address_, method_, request_; name=name, protocol=protocol, fail_fast=fail_fast, timeout_in_ms=timeout_in_ms) + end end - end end @@ -36674,15 +36674,15 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tensor_list_length_graph(input_handle_; name=nothing) - local desc - tf.with_op_name(name, "TensorListLength") do - desc = tf.NodeDescription("TensorListLength") - input_handle_ = convert(Tensor{Any}, input_handle_) - tf.add_input(desc, input_handle_) - end - tf.Tensor(tf.Operation(desc)) + function tensor_list_length_graph(input_handle_; name=nothing) + local desc + tf.with_op_name(name, "TensorListLength") do + desc = tf.NodeDescription("TensorListLength") + input_handle_ = convert(Tensor{Any}, input_handle_) + tf.add_input(desc, input_handle_) end + tf.Tensor(tf.Operation(desc)) + end function tensor_list_length_eager(input_handle_; name=nothing) desc = tf.EagerOp("TensorListLength") input_handle_ = convert(tf.EagerTensor, input_handle_) @@ -36694,13 +36694,13 @@ begin return res[1] end end - function tensor_list_length(input_handle_; name=nothing) - if tf.in_eager_mode() - tensor_list_length_eager(input_handle_; name=name) - else - tensor_list_length_graph(input_handle_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_list_length(input_handle_; name=nothing) + if tf.in_eager_mode() + tensor_list_length_eager(input_handle_; name=name) + else + tensor_list_length_graph(input_handle_; name=name) + end end - end end @@ -36710,28 +36710,28 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function map_incomplete_size_graph(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) - local desc - tf.with_op_name(name, "MapIncompleteSize") do - desc = tf.NodeDescription("MapIncompleteSize") - if capacity !== nothing - desc["capacity"] = Base.Int(capacity) - end - if memory_limit !== nothing - desc["memory_limit"] = Base.Int(memory_limit) - end - if dtypes !== nothing - desc["dtypes"] = map(Base.identity, dtypes) - end - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end + function map_incomplete_size_graph(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "MapIncompleteSize") do + desc = tf.NodeDescription("MapIncompleteSize") + if capacity !== nothing + desc["capacity"] = Base.Int(capacity) + end + if memory_limit !== nothing + desc["memory_limit"] = Base.Int(memory_limit) + end + if dtypes !== nothing + desc["dtypes"] = map(Base.identity, dtypes) + end + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function map_incomplete_size_eager(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) desc = tf.EagerOp("MapIncompleteSize") if capacity !== nothing @@ -36756,13 +36756,13 @@ begin return res[1] end end - function map_incomplete_size(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) - if tf.in_eager_mode() - map_incomplete_size_eager(; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) - else - map_incomplete_size_graph(; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function map_incomplete_size(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + if tf.in_eager_mode() + map_incomplete_size_eager(; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) + else + map_incomplete_size_graph(; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) + end end - end end @@ -36772,24 +36772,24 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function stateless_while_graph(input_; name=nothing, T=nothing, cond=nothing, body=nothing) - local desc - tf.with_op_name(name, "StatelessWhile") do - desc = tf.NodeDescription("StatelessWhile") - input_ = [convert(Tensor{Any}, x) for x = input_] - tf.add_input(desc, input_) - if T !== nothing - desc["T"] = map(Base.identity, T) - end - if cond !== nothing - desc["cond"] = Base.identity(cond) - end - if body !== nothing - desc["body"] = Base.identity(body) - end + function stateless_while_graph(input_; name=nothing, T=nothing, cond=nothing, body=nothing) + local desc + tf.with_op_name(name, "StatelessWhile") do + desc = tf.NodeDescription("StatelessWhile") + input_ = [convert(Tensor{Any}, x) for x = input_] + tf.add_input(desc, input_) + if T !== nothing + desc["T"] = map(Base.identity, T) + end + if cond !== nothing + desc["cond"] = Base.identity(cond) + end + if body !== nothing + desc["body"] = Base.identity(body) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function stateless_while_eager(input_; name=nothing, T=nothing, cond=nothing, body=nothing) desc = tf.EagerOp("StatelessWhile") input_ = convert(tf.EagerTensor, input_) @@ -36810,44 +36810,44 @@ begin return res[1] end end - function stateless_while(input_; name=nothing, T=nothing, cond=nothing, body=nothing) - if tf.in_eager_mode() - stateless_while_eager(input_; name=name, T=T, cond=cond, body=body) - else - stateless_while_graph(input_; name=name, T=T, cond=cond, body=body) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function stateless_while(input_; name=nothing, T=nothing, cond=nothing, body=nothing) + if tf.in_eager_mode() + stateless_while_eager(input_; name=name, T=T, cond=cond, body=body) + else + stateless_while_graph(input_; name=name, T=T, cond=cond, body=body) + end end - end end """ - sparse_conditional_accumulator(; container=, shared_name=, reduction_type=MEAN) + sparse_conditional_accumulator(; container=, shared_name=, reduction_type=) """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function sparse_conditional_accumulator_graph(; name=nothing, dtype=nothing, shape=nothing, container=nothing, shared_name=nothing, reduction_type=nothing) - local desc - tf.with_op_name(name, "SparseConditionalAccumulator") do - desc = tf.NodeDescription("SparseConditionalAccumulator") - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end - if shape !== nothing - desc["shape"] = Base.identity(shape) - end - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - if reduction_type !== nothing - desc["reduction_type"] = Base.String(reduction_type) - end + function sparse_conditional_accumulator_graph(; name=nothing, dtype=nothing, shape=nothing, container=nothing, shared_name=nothing, reduction_type=nothing) + local desc + tf.with_op_name(name, "SparseConditionalAccumulator") do + desc = tf.NodeDescription("SparseConditionalAccumulator") + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + if shape !== nothing + desc["shape"] = Base.identity(shape) + end + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + if reduction_type !== nothing + desc["reduction_type"] = Base.String(reduction_type) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function sparse_conditional_accumulator_eager(; name=nothing, dtype=nothing, shape=nothing, container=nothing, shared_name=nothing, reduction_type=nothing) desc = tf.EagerOp("SparseConditionalAccumulator") if dtype !== nothing @@ -36872,13 +36872,13 @@ begin return res[1] end end - function sparse_conditional_accumulator(; name=nothing, dtype=nothing, shape=nothing, container=nothing, shared_name=nothing, reduction_type=nothing) - if tf.in_eager_mode() - sparse_conditional_accumulator_eager(; name=name, dtype=dtype, shape=shape, container=container, shared_name=shared_name, reduction_type=reduction_type) - else - sparse_conditional_accumulator_graph(; name=name, dtype=dtype, shape=shape, container=container, shared_name=shared_name, reduction_type=reduction_type) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_conditional_accumulator(; name=nothing, dtype=nothing, shape=nothing, container=nothing, shared_name=nothing, reduction_type=nothing) + if tf.in_eager_mode() + sparse_conditional_accumulator_eager(; name=name, dtype=dtype, shape=shape, container=container, shared_name=shared_name, reduction_type=reduction_type) + else + sparse_conditional_accumulator_graph(; name=name, dtype=dtype, shape=shape, container=container, shared_name=shared_name, reduction_type=reduction_type) + end end - end end @@ -36888,20 +36888,20 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function segment_min_graph(data_, segment_ids_; name=nothing) - local desc - tf.with_op_name(name, "SegmentMin") do - desc = tf.NodeDescription("SegmentMin") - data_ = convert(Tensor{Any}, data_) - segment_ids_ = convert(Tensor{Any}, segment_ids_) - segment_ids_ = segment_ids_ - convert(tf.Tensor{eltype(segment_ids_)}, 1) - (data_,) = tf.tf_promote(data_) - (segment_ids_,) = tf.tf_promote(segment_ids_) - tf.add_input(desc, data_) - tf.add_input(desc, segment_ids_) - end - tf.Tensor(tf.Operation(desc)) + function segment_min_graph(data_, segment_ids_; name=nothing) + local desc + tf.with_op_name(name, "SegmentMin") do + desc = tf.NodeDescription("SegmentMin") + data_ = convert(Tensor{Any}, data_) + segment_ids_ = convert(Tensor{Any}, segment_ids_) + segment_ids_ = segment_ids_ - convert(tf.Tensor{eltype(segment_ids_)}, 1) + (data_,) = tf.tf_promote(data_) + (segment_ids_,) = tf.tf_promote(segment_ids_) + tf.add_input(desc, data_) + tf.add_input(desc, segment_ids_) end + tf.Tensor(tf.Operation(desc)) + end function segment_min_eager(data_, segment_ids_; name=nothing) desc = tf.EagerOp("SegmentMin") data_ = convert(tf.EagerTensor, data_) @@ -36917,13 +36917,13 @@ begin return res[1] end end - function segment_min(data_, segment_ids_; name=nothing) - if tf.in_eager_mode() - segment_min_eager(data_, segment_ids_; name=name) - else - segment_min_graph(data_, segment_ids_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function segment_min(data_, segment_ids_; name=nothing) + if tf.in_eager_mode() + segment_min_eager(data_, segment_ids_; name=name) + else + segment_min_graph(data_, segment_ids_; name=name) + end end - end end @@ -36933,19 +36933,19 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function write_graph_summary_graph(writer_, step_, tensor_; name=nothing) - local desc - tf.with_op_name(name, "WriteGraphSummary") do - desc = tf.NodeDescription("WriteGraphSummary") - writer_ = convert(Tensor{Any}, writer_) - step_ = convert(Tensor{Int64}, step_) - tensor_ = convert(Tensor{String}, tensor_) - tf.add_input(desc, writer_) - tf.add_input(desc, step_) - tf.add_input(desc, tensor_) - end - tf.Tensor(tf.Operation(desc)) + function write_graph_summary_graph(writer_, step_, tensor_; name=nothing) + local desc + tf.with_op_name(name, "WriteGraphSummary") do + desc = tf.NodeDescription("WriteGraphSummary") + writer_ = convert(Tensor{Any}, writer_) + step_ = convert(Tensor{Int64}, step_) + tensor_ = convert(Tensor{String}, tensor_) + tf.add_input(desc, writer_) + tf.add_input(desc, step_) + tf.add_input(desc, tensor_) end + tf.Tensor(tf.Operation(desc)) + end function write_graph_summary_eager(writer_, step_, tensor_; name=nothing) desc = tf.EagerOp("WriteGraphSummary") writer_ = convert(tf.EagerTensor, writer_) @@ -36961,13 +36961,13 @@ begin return res[1] end end - function write_graph_summary(writer_, step_, tensor_; name=nothing) - if tf.in_eager_mode() - write_graph_summary_eager(writer_, step_, tensor_; name=name) - else - write_graph_summary_graph(writer_, step_, tensor_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function write_graph_summary(writer_, step_, tensor_; name=nothing) + if tf.in_eager_mode() + write_graph_summary_eager(writer_, step_, tensor_; name=name) + else + write_graph_summary_graph(writer_, step_, tensor_; name=name) + end end - end end @@ -36977,18 +36977,18 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function cholesky_grad_graph(l_, grad_; name=nothing) - local desc - tf.with_op_name(name, "CholeskyGrad") do - desc = tf.NodeDescription("CholeskyGrad") - l_ = convert(Tensor{Any}, l_) - grad_ = convert(Tensor{Any}, grad_) - (l_, grad_) = tf.tf_promote(l_, grad_) - tf.add_input(desc, l_) - tf.add_input(desc, grad_) - end - tf.Tensor(tf.Operation(desc)) + function cholesky_grad_graph(l_, grad_; name=nothing) + local desc + tf.with_op_name(name, "CholeskyGrad") do + desc = tf.NodeDescription("CholeskyGrad") + l_ = convert(Tensor{Any}, l_) + grad_ = convert(Tensor{Any}, grad_) + (l_, grad_) = tf.tf_promote(l_, grad_) + tf.add_input(desc, l_) + tf.add_input(desc, grad_) end + tf.Tensor(tf.Operation(desc)) + end function cholesky_grad_eager(l_, grad_; name=nothing) desc = tf.EagerOp("CholeskyGrad") l_ = convert(tf.EagerTensor, l_) @@ -37004,13 +37004,13 @@ begin return res[1] end end - function cholesky_grad(l_, grad_; name=nothing) - if tf.in_eager_mode() - cholesky_grad_eager(l_, grad_; name=name) - else - cholesky_grad_graph(l_, grad_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function cholesky_grad(l_, grad_; name=nothing) + if tf.in_eager_mode() + cholesky_grad_eager(l_, grad_; name=name) + else + cholesky_grad_graph(l_, grad_; name=name) + end end - end end @@ -37020,38 +37020,38 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function log_uniform_candidate_sampler_graph(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing) - local desc - tf.with_op_name(name, "LogUniformCandidateSampler") do - desc = tf.NodeDescription("LogUniformCandidateSampler") - true_classes_ = convert(Tensor{Int64}, true_classes_) - tf.add_input(desc, true_classes_) - if num_true !== nothing - desc["num_true"] = Base.Int(num_true) - end - if num_sampled !== nothing - desc["num_sampled"] = Base.Int(num_sampled) - end - if unique !== nothing - desc["unique"] = Base.Bool(unique) - end - if range_max !== nothing - desc["range_max"] = Base.Int(range_max) - end - if seed !== nothing - desc["seed"] = Base.Int(seed) - end - if seed2 !== nothing - desc["seed2"] = Base.Int(seed2) - end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:3 - push!(out, tf.Tensor(op, out_idx)) - end - out + function log_uniform_candidate_sampler_graph(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing) + local desc + tf.with_op_name(name, "LogUniformCandidateSampler") do + desc = tf.NodeDescription("LogUniformCandidateSampler") + true_classes_ = convert(Tensor{Int64}, true_classes_) + tf.add_input(desc, true_classes_) + if num_true !== nothing + desc["num_true"] = Base.Int(num_true) + end + if num_sampled !== nothing + desc["num_sampled"] = Base.Int(num_sampled) + end + if unique !== nothing + desc["unique"] = Base.Bool(unique) + end + if range_max !== nothing + desc["range_max"] = Base.Int(range_max) + end + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end function log_uniform_candidate_sampler_eager(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing) desc = tf.EagerOp("LogUniformCandidateSampler") true_classes_ = convert(tf.EagerTensor, true_classes_) @@ -37081,13 +37081,13 @@ begin return res end end - function log_uniform_candidate_sampler(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing) - if tf.in_eager_mode() - log_uniform_candidate_sampler_eager(true_classes_; name=name, num_true=num_true, num_sampled=num_sampled, unique=unique, range_max=range_max, seed=seed, seed2=seed2) - else - log_uniform_candidate_sampler_graph(true_classes_; name=name, num_true=num_true, num_sampled=num_sampled, unique=unique, range_max=range_max, seed=seed, seed2=seed2) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function log_uniform_candidate_sampler(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing) + if tf.in_eager_mode() + log_uniform_candidate_sampler_eager(true_classes_; name=name, num_true=num_true, num_sampled=num_sampled, unique=unique, range_max=range_max, seed=seed, seed2=seed2) + else + log_uniform_candidate_sampler_graph(true_classes_; name=name, num_true=num_true, num_sampled=num_sampled, unique=unique, range_max=range_max, seed=seed, seed2=seed2) + end end - end end @@ -37097,23 +37097,23 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function serialize_sparse_graph(sparse_indices_, sparse_values_, sparse_shape_; name=nothing, out_type=nothing) - local desc - tf.with_op_name(name, "SerializeSparse") do - desc = tf.NodeDescription("SerializeSparse") - sparse_indices_ = convert(Tensor{Int64}, sparse_indices_) - sparse_values_ = convert(Tensor{Any}, sparse_values_) - sparse_shape_ = convert(Tensor{Int64}, sparse_shape_) - (sparse_values_,) = tf.tf_promote(sparse_values_) - tf.add_input(desc, sparse_indices_) - tf.add_input(desc, sparse_values_) - tf.add_input(desc, sparse_shape_) - if out_type !== nothing - desc["out_type"] = Base.identity(out_type) - end + function serialize_sparse_graph(sparse_indices_, sparse_values_, sparse_shape_; name=nothing, out_type=nothing) + local desc + tf.with_op_name(name, "SerializeSparse") do + desc = tf.NodeDescription("SerializeSparse") + sparse_indices_ = convert(Tensor{Int64}, sparse_indices_) + sparse_values_ = convert(Tensor{Any}, sparse_values_) + sparse_shape_ = convert(Tensor{Int64}, sparse_shape_) + (sparse_values_,) = tf.tf_promote(sparse_values_) + tf.add_input(desc, sparse_indices_) + tf.add_input(desc, sparse_values_) + tf.add_input(desc, sparse_shape_) + if out_type !== nothing + desc["out_type"] = Base.identity(out_type) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function serialize_sparse_eager(sparse_indices_, sparse_values_, sparse_shape_; name=nothing, out_type=nothing) desc = tf.EagerOp("SerializeSparse") sparse_indices_ = convert(tf.EagerTensor, sparse_indices_) @@ -37133,13 +37133,13 @@ begin return res[1] end end - function serialize_sparse(sparse_indices_, sparse_values_, sparse_shape_; name=nothing, out_type=nothing) - if tf.in_eager_mode() - serialize_sparse_eager(sparse_indices_, sparse_values_, sparse_shape_; name=name, out_type=out_type) - else - serialize_sparse_graph(sparse_indices_, sparse_values_, sparse_shape_; name=name, out_type=out_type) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function serialize_sparse(sparse_indices_, sparse_values_, sparse_shape_; name=nothing, out_type=nothing) + if tf.in_eager_mode() + serialize_sparse_eager(sparse_indices_, sparse_values_, sparse_shape_; name=name, out_type=out_type) + else + serialize_sparse_graph(sparse_indices_, sparse_values_, sparse_shape_; name=name, out_type=out_type) + end end - end end @@ -37149,22 +37149,22 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function scatter_nd_non_aliasing_add_graph(input_, indices_, updates_; name=nothing) - local desc - tf.with_op_name(name, "ScatterNdNonAliasingAdd") do - desc = tf.NodeDescription("ScatterNdNonAliasingAdd") - input_ = convert(Tensor{Any}, input_) - indices_ = convert(Tensor{Any}, indices_) - indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) - updates_ = convert(Tensor{Any}, updates_) - (input_, updates_) = tf.tf_promote(input_, updates_) - (indices_,) = tf.tf_promote(indices_) - tf.add_input(desc, input_) - tf.add_input(desc, indices_) - tf.add_input(desc, updates_) - end - tf.Tensor(tf.Operation(desc)) + function scatter_nd_non_aliasing_add_graph(input_, indices_, updates_; name=nothing) + local desc + tf.with_op_name(name, "ScatterNdNonAliasingAdd") do + desc = tf.NodeDescription("ScatterNdNonAliasingAdd") + input_ = convert(Tensor{Any}, input_) + indices_ = convert(Tensor{Any}, indices_) + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + updates_ = convert(Tensor{Any}, updates_) + (input_, updates_) = tf.tf_promote(input_, updates_) + (indices_,) = tf.tf_promote(indices_) + tf.add_input(desc, input_) + tf.add_input(desc, indices_) + tf.add_input(desc, updates_) end + tf.Tensor(tf.Operation(desc)) + end function scatter_nd_non_aliasing_add_eager(input_, indices_, updates_; name=nothing) desc = tf.EagerOp("ScatterNdNonAliasingAdd") input_ = convert(tf.EagerTensor, input_) @@ -37183,13 +37183,13 @@ begin return res[1] end end - function scatter_nd_non_aliasing_add(input_, indices_, updates_; name=nothing) - if tf.in_eager_mode() - scatter_nd_non_aliasing_add_eager(input_, indices_, updates_; name=name) - else - scatter_nd_non_aliasing_add_graph(input_, indices_, updates_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function scatter_nd_non_aliasing_add(input_, indices_, updates_; name=nothing) + if tf.in_eager_mode() + scatter_nd_non_aliasing_add_eager(input_, indices_, updates_; name=name) + else + scatter_nd_non_aliasing_add_graph(input_, indices_, updates_; name=name) + end end - end end @@ -37199,24 +37199,24 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function ref_merge_graph(inputs_; name=nothing, N=nothing) - local desc - tf.with_op_name(name, "RefMerge") do - desc = tf.NodeDescription("RefMerge") - inputs_ = [convert(Tensor{Any}, x) for x = inputs_] - (inputs_,) = tf.tf_promote(inputs_) - tf.add_input(desc, inputs_) - if N !== nothing - desc["N"] = Base.Int(N) - end + function ref_merge_graph(inputs_; name=nothing, N=nothing) + local desc + tf.with_op_name(name, "RefMerge") do + desc = tf.NodeDescription("RefMerge") + inputs_ = [convert(Tensor{Any}, x) for x = inputs_] + (inputs_,) = tf.tf_promote(inputs_) + tf.add_input(desc, inputs_) + if N !== nothing + desc["N"] = Base.Int(N) end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:2 - push!(out, tf.Tensor(op, out_idx)) - end - out end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end function ref_merge_eager(inputs_; name=nothing, N=nothing) desc = tf.EagerOp("RefMerge") inputs_ = convert(tf.EagerTensor, inputs_) @@ -37232,13 +37232,13 @@ begin return res end end - function ref_merge(inputs_; name=nothing, N=nothing) - if tf.in_eager_mode() - ref_merge_eager(inputs_; name=name, N=N) - else - ref_merge_graph(inputs_; name=name, N=N) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function ref_merge(inputs_; name=nothing, N=nothing) + if tf.in_eager_mode() + ref_merge_eager(inputs_; name=name, N=N) + else + ref_merge_graph(inputs_; name=name, N=N) + end end - end end @@ -37248,23 +37248,23 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tensor_list_concat_graph(input_handle_; name=nothing, element_dtype=nothing) - local desc - tf.with_op_name(name, "TensorListConcat") do - desc = tf.NodeDescription("TensorListConcat") - input_handle_ = convert(Tensor{Any}, input_handle_) - tf.add_input(desc, input_handle_) - if element_dtype !== nothing - desc["element_dtype"] = Base.identity(element_dtype) - end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:2 - push!(out, tf.Tensor(op, out_idx)) + function tensor_list_concat_graph(input_handle_; name=nothing, element_dtype=nothing) + local desc + tf.with_op_name(name, "TensorListConcat") do + desc = tf.NodeDescription("TensorListConcat") + input_handle_ = convert(Tensor{Any}, input_handle_) + tf.add_input(desc, input_handle_) + if element_dtype !== nothing + desc["element_dtype"] = Base.identity(element_dtype) end - out end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end function tensor_list_concat_eager(input_handle_; name=nothing, element_dtype=nothing) desc = tf.EagerOp("TensorListConcat") input_handle_ = convert(tf.EagerTensor, input_handle_) @@ -37279,61 +37279,61 @@ begin return res end end - function tensor_list_concat(input_handle_; name=nothing, element_dtype=nothing) - if tf.in_eager_mode() - tensor_list_concat_eager(input_handle_; name=name, element_dtype=element_dtype) - else - tensor_list_concat_graph(input_handle_; name=name, element_dtype=element_dtype) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_list_concat(input_handle_; name=nothing, element_dtype=nothing) + if tf.in_eager_mode() + tensor_list_concat_eager(input_handle_; name=name, element_dtype=element_dtype) + else + tensor_list_concat_graph(input_handle_; name=name, element_dtype=element_dtype) + end end - end end """ - cudnn_rnn_canonical_to_params(num_layers, num_units, input_size, weights, biases; rnn_mode=lstm, input_mode=linear_input, direction=unidirectional, dropout=?, seed=0, seed2=0) + cudnn_rnn_canonical_to_params(num_layers, num_units, input_size, weights, biases; rnn_mode=, input_mode=, direction=, dropout=?, seed=0, seed2=0) """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function cudnn_rnn_canonical_to_params_graph(num_layers_, num_units_, input_size_, weights_, biases_; name=nothing, num_params=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) - local desc - tf.with_op_name(name, "CudnnRNNCanonicalToParams") do - desc = tf.NodeDescription("CudnnRNNCanonicalToParams") - num_layers_ = convert(Tensor{Int32}, num_layers_) - num_units_ = convert(Tensor{Int32}, num_units_) - input_size_ = convert(Tensor{Int32}, input_size_) - weights_ = [convert(Tensor{Any}, x) for x = weights_] - biases_ = [convert(Tensor{Any}, x) for x = biases_] - (weights_, biases_) = tf.tf_promote(weights_, biases_) - tf.add_input(desc, num_layers_) - tf.add_input(desc, num_units_) - tf.add_input(desc, input_size_) - tf.add_input(desc, weights_) - tf.add_input(desc, biases_) - if num_params !== nothing - desc["num_params"] = Base.Int(num_params) - end - if rnn_mode !== nothing - desc["rnn_mode"] = Base.String(rnn_mode) - end - if input_mode !== nothing - desc["input_mode"] = Base.String(input_mode) - end - if direction !== nothing - desc["direction"] = Base.String(direction) - end - if dropout !== nothing - desc["dropout"] = Base.identity(dropout) - end - if seed !== nothing - desc["seed"] = Base.Int(seed) - end - if seed2 !== nothing - desc["seed2"] = Base.Int(seed2) - end + function cudnn_rnn_canonical_to_params_graph(num_layers_, num_units_, input_size_, weights_, biases_; name=nothing, num_params=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) + local desc + tf.with_op_name(name, "CudnnRNNCanonicalToParams") do + desc = tf.NodeDescription("CudnnRNNCanonicalToParams") + num_layers_ = convert(Tensor{Int32}, num_layers_) + num_units_ = convert(Tensor{Int32}, num_units_) + input_size_ = convert(Tensor{Int32}, input_size_) + weights_ = [convert(Tensor{Any}, x) for x = weights_] + biases_ = [convert(Tensor{Any}, x) for x = biases_] + (weights_, biases_) = tf.tf_promote(weights_, biases_) + tf.add_input(desc, num_layers_) + tf.add_input(desc, num_units_) + tf.add_input(desc, input_size_) + tf.add_input(desc, weights_) + tf.add_input(desc, biases_) + if num_params !== nothing + desc["num_params"] = Base.Int(num_params) + end + if rnn_mode !== nothing + desc["rnn_mode"] = Base.String(rnn_mode) + end + if input_mode !== nothing + desc["input_mode"] = Base.String(input_mode) + end + if direction !== nothing + desc["direction"] = Base.String(direction) + end + if dropout !== nothing + desc["dropout"] = Base.identity(dropout) + end + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function cudnn_rnn_canonical_to_params_eager(num_layers_, num_units_, input_size_, weights_, biases_; name=nothing, num_params=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) desc = tf.EagerOp("CudnnRNNCanonicalToParams") num_layers_ = convert(tf.EagerTensor, num_layers_) @@ -37376,13 +37376,13 @@ begin return res[1] end end - function cudnn_rnn_canonical_to_params(num_layers_, num_units_, input_size_, weights_, biases_; name=nothing, num_params=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) - if tf.in_eager_mode() - cudnn_rnn_canonical_to_params_eager(num_layers_, num_units_, input_size_, weights_, biases_; name=name, num_params=num_params, rnn_mode=rnn_mode, input_mode=input_mode, direction=direction, dropout=dropout, seed=seed, seed2=seed2) - else - cudnn_rnn_canonical_to_params_graph(num_layers_, num_units_, input_size_, weights_, biases_; name=name, num_params=num_params, rnn_mode=rnn_mode, input_mode=input_mode, direction=direction, dropout=dropout, seed=seed, seed2=seed2) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function cudnn_rnn_canonical_to_params(num_layers_, num_units_, input_size_, weights_, biases_; name=nothing, num_params=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) + if tf.in_eager_mode() + cudnn_rnn_canonical_to_params_eager(num_layers_, num_units_, input_size_, weights_, biases_; name=name, num_params=num_params, rnn_mode=rnn_mode, input_mode=input_mode, direction=direction, dropout=dropout, seed=seed, seed2=seed2) + else + cudnn_rnn_canonical_to_params_graph(num_layers_, num_units_, input_size_, weights_, biases_; name=name, num_params=num_params, rnn_mode=rnn_mode, input_mode=input_mode, direction=direction, dropout=dropout, seed=seed, seed2=seed2) + end end - end end @@ -37392,35 +37392,35 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function sparse_apply_adadelta_graph(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) - local desc - tf.with_op_name(name, "SparseApplyAdadelta") do - desc = tf.NodeDescription("SparseApplyAdadelta") - var_ = convert(Tensor{Any}, var_) - accum_ = convert(Tensor{Any}, accum_) - accum_update_ = convert(Tensor{Any}, accum_update_) - lr_ = convert(Tensor{Any}, lr_) - rho_ = convert(Tensor{Any}, rho_) - epsilon_ = convert(Tensor{Any}, epsilon_) - grad_ = convert(Tensor{Any}, grad_) - indices_ = convert(Tensor{Any}, indices_) - indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) - (var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_) = tf.tf_promote(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_) - (indices_,) = tf.tf_promote(indices_) - tf.add_input(desc, var_) - tf.add_input(desc, accum_) - tf.add_input(desc, accum_update_) - tf.add_input(desc, lr_) - tf.add_input(desc, rho_) - tf.add_input(desc, epsilon_) - tf.add_input(desc, grad_) - tf.add_input(desc, indices_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - end - tf.Tensor(tf.Operation(desc)) - end + function sparse_apply_adadelta_graph(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "SparseApplyAdadelta") do + desc = tf.NodeDescription("SparseApplyAdadelta") + var_ = convert(Tensor{Any}, var_) + accum_ = convert(Tensor{Any}, accum_) + accum_update_ = convert(Tensor{Any}, accum_update_) + lr_ = convert(Tensor{Any}, lr_) + rho_ = convert(Tensor{Any}, rho_) + epsilon_ = convert(Tensor{Any}, epsilon_) + grad_ = convert(Tensor{Any}, grad_) + indices_ = convert(Tensor{Any}, indices_) + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + (var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_) = tf.tf_promote(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_) + (indices_,) = tf.tf_promote(indices_) + tf.add_input(desc, var_) + tf.add_input(desc, accum_) + tf.add_input(desc, accum_update_) + tf.add_input(desc, lr_) + tf.add_input(desc, rho_) + tf.add_input(desc, epsilon_) + tf.add_input(desc, grad_) + tf.add_input(desc, indices_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + tf.Tensor(tf.Operation(desc)) + end function sparse_apply_adadelta_eager(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) desc = tf.EagerOp("SparseApplyAdadelta") var_ = convert(tf.EagerTensor, var_) @@ -37457,13 +37457,13 @@ begin return res[1] end end - function sparse_apply_adadelta(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) - if tf.in_eager_mode() - sparse_apply_adadelta_eager(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_, indices_; name=name, use_locking=use_locking) - else - sparse_apply_adadelta_graph(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_, indices_; name=name, use_locking=use_locking) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_apply_adadelta(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) + if tf.in_eager_mode() + sparse_apply_adadelta_eager(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_, indices_; name=name, use_locking=use_locking) + else + sparse_apply_adadelta_graph(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_, indices_; name=name, use_locking=use_locking) + end end - end end @@ -37473,15 +37473,15 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tensor_array_close_graph(handle_; name=nothing) - local desc - tf.with_op_name(name, "TensorArrayClose") do - desc = tf.NodeDescription("TensorArrayClose") - handle_ = convert(Tensor{String}, handle_) - tf.add_input(desc, handle_) - end - tf.Tensor(tf.Operation(desc)) + function tensor_array_close_graph(handle_; name=nothing) + local desc + tf.with_op_name(name, "TensorArrayClose") do + desc = tf.NodeDescription("TensorArrayClose") + handle_ = convert(Tensor{String}, handle_) + tf.add_input(desc, handle_) end + tf.Tensor(tf.Operation(desc)) + end function tensor_array_close_eager(handle_; name=nothing) desc = tf.EagerOp("TensorArrayClose") handle_ = convert(tf.EagerTensor, handle_) @@ -37493,13 +37493,13 @@ begin return res[1] end end - function tensor_array_close(handle_; name=nothing) - if tf.in_eager_mode() - tensor_array_close_eager(handle_; name=name) - else - tensor_array_close_graph(handle_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_array_close(handle_; name=nothing) + if tf.in_eager_mode() + tensor_array_close_eager(handle_; name=name) + else + tensor_array_close_graph(handle_; name=name) + end end - end end @@ -37509,18 +37509,18 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function selu_grad_graph(gradients_, outputs_; name=nothing) - local desc - tf.with_op_name(name, "SeluGrad") do - desc = tf.NodeDescription("SeluGrad") - gradients_ = convert(Tensor{Any}, gradients_) - outputs_ = convert(Tensor{Any}, outputs_) - (gradients_, outputs_) = tf.tf_promote(gradients_, outputs_) - tf.add_input(desc, gradients_) - tf.add_input(desc, outputs_) - end - tf.Tensor(tf.Operation(desc)) + function selu_grad_graph(gradients_, outputs_; name=nothing) + local desc + tf.with_op_name(name, "SeluGrad") do + desc = tf.NodeDescription("SeluGrad") + gradients_ = convert(Tensor{Any}, gradients_) + outputs_ = convert(Tensor{Any}, outputs_) + (gradients_, outputs_) = tf.tf_promote(gradients_, outputs_) + tf.add_input(desc, gradients_) + tf.add_input(desc, outputs_) end + tf.Tensor(tf.Operation(desc)) + end function selu_grad_eager(gradients_, outputs_; name=nothing) desc = tf.EagerOp("SeluGrad") gradients_ = convert(tf.EagerTensor, gradients_) @@ -37536,40 +37536,40 @@ begin return res[1] end end - function selu_grad(gradients_, outputs_; name=nothing) - if tf.in_eager_mode() - selu_grad_eager(gradients_, outputs_; name=name) - else - selu_grad_graph(gradients_, outputs_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function selu_grad(gradients_, outputs_; name=nothing) + if tf.in_eager_mode() + selu_grad_eager(gradients_, outputs_; name=name) + else + selu_grad_graph(gradients_, outputs_; name=name) + end end - end end """ - crop_and_resize_grad_image(grads, boxes, box_ind, image_size; method=bilinear) + crop_and_resize_grad_image(grads, boxes, box_ind, image_size; method=) """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function crop_and_resize_grad_image_graph(grads_, boxes_, box_ind_, image_size_; name=nothing, method=nothing) - local desc - tf.with_op_name(name, "CropAndResizeGradImage") do - desc = tf.NodeDescription("CropAndResizeGradImage") - grads_ = convert(Tensor{Float32}, grads_) - boxes_ = convert(Tensor{Float32}, boxes_) - box_ind_ = convert(Tensor{Int32}, box_ind_) - image_size_ = convert(Tensor{Int32}, image_size_) - tf.add_input(desc, grads_) - tf.add_input(desc, boxes_) - tf.add_input(desc, box_ind_) - tf.add_input(desc, image_size_) - if method !== nothing - desc["method"] = Base.String(method) - end + function crop_and_resize_grad_image_graph(grads_, boxes_, box_ind_, image_size_; name=nothing, method=nothing) + local desc + tf.with_op_name(name, "CropAndResizeGradImage") do + desc = tf.NodeDescription("CropAndResizeGradImage") + grads_ = convert(Tensor{Float32}, grads_) + boxes_ = convert(Tensor{Float32}, boxes_) + box_ind_ = convert(Tensor{Int32}, box_ind_) + image_size_ = convert(Tensor{Int32}, image_size_) + tf.add_input(desc, grads_) + tf.add_input(desc, boxes_) + tf.add_input(desc, box_ind_) + tf.add_input(desc, image_size_) + if method !== nothing + desc["method"] = Base.String(method) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function crop_and_resize_grad_image_eager(grads_, boxes_, box_ind_, image_size_; name=nothing, method=nothing) desc = tf.EagerOp("CropAndResizeGradImage") grads_ = convert(tf.EagerTensor, grads_) @@ -37590,13 +37590,13 @@ begin return res[1] end end - function crop_and_resize_grad_image(grads_, boxes_, box_ind_, image_size_; name=nothing, method=nothing) - if tf.in_eager_mode() - crop_and_resize_grad_image_eager(grads_, boxes_, box_ind_, image_size_; name=name, method=method) - else - crop_and_resize_grad_image_graph(grads_, boxes_, box_ind_, image_size_; name=name, method=method) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function crop_and_resize_grad_image(grads_, boxes_, box_ind_, image_size_; name=nothing, method=nothing) + if tf.in_eager_mode() + crop_and_resize_grad_image_eager(grads_, boxes_, box_ind_, image_size_; name=name, method=method) + else + crop_and_resize_grad_image_graph(grads_, boxes_, box_ind_, image_size_; name=name, method=method) + end end - end end @@ -37606,17 +37606,17 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function rfft_graph(input_, fft_length_; name=nothing) - local desc - tf.with_op_name(name, "RFFT") do - desc = tf.NodeDescription("RFFT") - input_ = convert(Tensor{Float32}, input_) - fft_length_ = convert(Tensor{Int32}, fft_length_) - tf.add_input(desc, input_) - tf.add_input(desc, fft_length_) - end - tf.Tensor(tf.Operation(desc)) + function rfft_graph(input_, fft_length_; name=nothing) + local desc + tf.with_op_name(name, "RFFT") do + desc = tf.NodeDescription("RFFT") + input_ = convert(Tensor{Float32}, input_) + fft_length_ = convert(Tensor{Int32}, fft_length_) + tf.add_input(desc, input_) + tf.add_input(desc, fft_length_) end + tf.Tensor(tf.Operation(desc)) + end function rfft_eager(input_, fft_length_; name=nothing) desc = tf.EagerOp("RFFT") input_ = convert(tf.EagerTensor, input_) @@ -37630,13 +37630,13 @@ begin return res[1] end end - function rfft(input_, fft_length_; name=nothing) - if tf.in_eager_mode() - rfft_eager(input_, fft_length_; name=name) - else - rfft_graph(input_, fft_length_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function rfft(input_, fft_length_; name=nothing) + if tf.in_eager_mode() + rfft_eager(input_, fft_length_; name=name) + else + rfft_graph(input_, fft_length_; name=name) + end end - end end @@ -37646,25 +37646,25 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function experimental_sql_dataset_graph(driver_name_, data_source_name_, query_; name=nothing, output_types=nothing, output_shapes=nothing) - local desc - tf.with_op_name(name, "ExperimentalSqlDataset") do - desc = tf.NodeDescription("ExperimentalSqlDataset") - driver_name_ = convert(Tensor{String}, driver_name_) - data_source_name_ = convert(Tensor{String}, data_source_name_) - query_ = convert(Tensor{String}, query_) - tf.add_input(desc, driver_name_) - tf.add_input(desc, data_source_name_) - tf.add_input(desc, query_) - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end + function experimental_sql_dataset_graph(driver_name_, data_source_name_, query_; name=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "ExperimentalSqlDataset") do + desc = tf.NodeDescription("ExperimentalSqlDataset") + driver_name_ = convert(Tensor{String}, driver_name_) + data_source_name_ = convert(Tensor{String}, data_source_name_) + query_ = convert(Tensor{String}, query_) + tf.add_input(desc, driver_name_) + tf.add_input(desc, data_source_name_) + tf.add_input(desc, query_) + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function experimental_sql_dataset_eager(driver_name_, data_source_name_, query_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("ExperimentalSqlDataset") driver_name_ = convert(tf.EagerTensor, driver_name_) @@ -37686,13 +37686,13 @@ begin return res[1] end end - function experimental_sql_dataset(driver_name_, data_source_name_, query_; name=nothing, output_types=nothing, output_shapes=nothing) - if tf.in_eager_mode() - experimental_sql_dataset_eager(driver_name_, data_source_name_, query_; name=name, output_types=output_types, output_shapes=output_shapes) - else - experimental_sql_dataset_graph(driver_name_, data_source_name_, query_; name=name, output_types=output_types, output_shapes=output_shapes) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_sql_dataset(driver_name_, data_source_name_, query_; name=nothing, output_types=nothing, output_shapes=nothing) + if tf.in_eager_mode() + experimental_sql_dataset_eager(driver_name_, data_source_name_, query_; name=name, output_types=output_types, output_shapes=output_shapes) + else + experimental_sql_dataset_graph(driver_name_, data_source_name_, query_; name=name, output_types=output_types, output_shapes=output_shapes) + end end - end end @@ -37702,31 +37702,31 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function resource_apply_power_sign_graph(var_, m_, lr_, logbase_, sign_decay_, beta_, grad_; name=nothing, use_locking=nothing) - local desc - tf.with_op_name(name, "ResourceApplyPowerSign") do - desc = tf.NodeDescription("ResourceApplyPowerSign") - var_ = convert(Tensor{Any}, var_) - m_ = convert(Tensor{Any}, m_) - lr_ = convert(Tensor{Any}, lr_) - logbase_ = convert(Tensor{Any}, logbase_) - sign_decay_ = convert(Tensor{Any}, sign_decay_) - beta_ = convert(Tensor{Any}, beta_) - grad_ = convert(Tensor{Any}, grad_) - (lr_, logbase_, sign_decay_, beta_, grad_) = tf.tf_promote(lr_, logbase_, sign_decay_, beta_, grad_) - tf.add_input(desc, var_) - tf.add_input(desc, m_) - tf.add_input(desc, lr_) - tf.add_input(desc, logbase_) - tf.add_input(desc, sign_decay_) - tf.add_input(desc, beta_) - tf.add_input(desc, grad_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - end - tf.Tensor(tf.Operation(desc)) + function resource_apply_power_sign_graph(var_, m_, lr_, logbase_, sign_decay_, beta_, grad_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "ResourceApplyPowerSign") do + desc = tf.NodeDescription("ResourceApplyPowerSign") + var_ = convert(Tensor{Any}, var_) + m_ = convert(Tensor{Any}, m_) + lr_ = convert(Tensor{Any}, lr_) + logbase_ = convert(Tensor{Any}, logbase_) + sign_decay_ = convert(Tensor{Any}, sign_decay_) + beta_ = convert(Tensor{Any}, beta_) + grad_ = convert(Tensor{Any}, grad_) + (lr_, logbase_, sign_decay_, beta_, grad_) = tf.tf_promote(lr_, logbase_, sign_decay_, beta_, grad_) + tf.add_input(desc, var_) + tf.add_input(desc, m_) + tf.add_input(desc, lr_) + tf.add_input(desc, logbase_) + tf.add_input(desc, sign_decay_) + tf.add_input(desc, beta_) + tf.add_input(desc, grad_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end end + tf.Tensor(tf.Operation(desc)) + end function resource_apply_power_sign_eager(var_, m_, lr_, logbase_, sign_decay_, beta_, grad_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ResourceApplyPowerSign") var_ = convert(tf.EagerTensor, var_) @@ -37758,13 +37758,13 @@ begin return res[1] end end - function resource_apply_power_sign(var_, m_, lr_, logbase_, sign_decay_, beta_, grad_; name=nothing, use_locking=nothing) - if tf.in_eager_mode() - resource_apply_power_sign_eager(var_, m_, lr_, logbase_, sign_decay_, beta_, grad_; name=name, use_locking=use_locking) - else - resource_apply_power_sign_graph(var_, m_, lr_, logbase_, sign_decay_, beta_, grad_; name=name, use_locking=use_locking) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_apply_power_sign(var_, m_, lr_, logbase_, sign_decay_, beta_, grad_; name=nothing, use_locking=nothing) + if tf.in_eager_mode() + resource_apply_power_sign_eager(var_, m_, lr_, logbase_, sign_decay_, beta_, grad_; name=name, use_locking=use_locking) + else + resource_apply_power_sign_graph(var_, m_, lr_, logbase_, sign_decay_, beta_, grad_; name=name, use_locking=use_locking) + end end - end end @@ -37774,16 +37774,16 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function matrix_determinant_graph(input_; name=nothing) - local desc - tf.with_op_name(name, "MatrixDeterminant") do - desc = tf.NodeDescription("MatrixDeterminant") - input_ = convert(Tensor{Any}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - end - tf.Tensor(tf.Operation(desc)) + function matrix_determinant_graph(input_; name=nothing) + local desc + tf.with_op_name(name, "MatrixDeterminant") do + desc = tf.NodeDescription("MatrixDeterminant") + input_ = convert(Tensor{Any}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) end + tf.Tensor(tf.Operation(desc)) + end function matrix_determinant_eager(input_; name=nothing) desc = tf.EagerOp("MatrixDeterminant") input_ = convert(tf.EagerTensor, input_) @@ -37796,13 +37796,13 @@ begin return res[1] end end - function matrix_determinant(input_; name=nothing) - if tf.in_eager_mode() - matrix_determinant_eager(input_; name=name) - else - matrix_determinant_graph(input_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function matrix_determinant(input_; name=nothing) + if tf.in_eager_mode() + matrix_determinant_eager(input_; name=name) + else + matrix_determinant_graph(input_; name=name) + end end - end end @@ -37812,24 +37812,24 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function static_regex_replace_graph(input_; name=nothing, pattern=nothing, rewrite=nothing, replace_global=nothing) - local desc - tf.with_op_name(name, "StaticRegexReplace") do - desc = tf.NodeDescription("StaticRegexReplace") - input_ = convert(Tensor{String}, input_) - tf.add_input(desc, input_) - if pattern !== nothing - desc["pattern"] = Base.String(pattern) - end - if rewrite !== nothing - desc["rewrite"] = Base.String(rewrite) - end - if replace_global !== nothing - desc["replace_global"] = Base.Bool(replace_global) - end + function static_regex_replace_graph(input_; name=nothing, pattern=nothing, rewrite=nothing, replace_global=nothing) + local desc + tf.with_op_name(name, "StaticRegexReplace") do + desc = tf.NodeDescription("StaticRegexReplace") + input_ = convert(Tensor{String}, input_) + tf.add_input(desc, input_) + if pattern !== nothing + desc["pattern"] = Base.String(pattern) + end + if rewrite !== nothing + desc["rewrite"] = Base.String(rewrite) + end + if replace_global !== nothing + desc["replace_global"] = Base.Bool(replace_global) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function static_regex_replace_eager(input_; name=nothing, pattern=nothing, rewrite=nothing, replace_global=nothing) desc = tf.EagerOp("StaticRegexReplace") input_ = convert(tf.EagerTensor, input_) @@ -37850,44 +37850,44 @@ begin return res[1] end end - function static_regex_replace(input_; name=nothing, pattern=nothing, rewrite=nothing, replace_global=nothing) - if tf.in_eager_mode() - static_regex_replace_eager(input_; name=name, pattern=pattern, rewrite=rewrite, replace_global=replace_global) - else - static_regex_replace_graph(input_; name=name, pattern=pattern, rewrite=rewrite, replace_global=replace_global) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function static_regex_replace(input_; name=nothing, pattern=nothing, rewrite=nothing, replace_global=nothing) + if tf.in_eager_mode() + static_regex_replace_eager(input_; name=name, pattern=pattern, rewrite=rewrite, replace_global=replace_global) + else + static_regex_replace_graph(input_; name=name, pattern=pattern, rewrite=rewrite, replace_global=replace_global) + end end - end end """ - avg_pool(value; data_format=NHWC) + avg_pool(value; data_format=) """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function avg_pool_graph(value_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) - local desc - tf.with_op_name(name, "AvgPool") do - desc = tf.NodeDescription("AvgPool") - value_ = convert(Tensor{Any}, value_) - (value_,) = tf.tf_promote(value_) - tf.add_input(desc, value_) - if ksize !== nothing - desc["ksize"] = map(Base.identity, ksize) - end - if strides !== nothing - desc["strides"] = map(Base.identity, strides) - end - if padding !== nothing - desc["padding"] = Base.String(padding) - end - if data_format !== nothing - desc["data_format"] = Base.String(data_format) - end + function avg_pool_graph(value_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + local desc + tf.with_op_name(name, "AvgPool") do + desc = tf.NodeDescription("AvgPool") + value_ = convert(Tensor{Any}, value_) + (value_,) = tf.tf_promote(value_) + tf.add_input(desc, value_) + if ksize !== nothing + desc["ksize"] = map(Base.identity, ksize) + end + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + if padding !== nothing + desc["padding"] = Base.String(padding) + end + if data_format !== nothing + desc["data_format"] = Base.String(data_format) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function avg_pool_eager(value_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) desc = tf.EagerOp("AvgPool") value_ = convert(tf.EagerTensor, value_) @@ -37912,13 +37912,13 @@ begin return res[1] end end - function avg_pool(value_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) - if tf.in_eager_mode() - avg_pool_eager(value_; name=name, ksize=ksize, strides=strides, padding=padding, data_format=data_format) - else - avg_pool_graph(value_; name=name, ksize=ksize, strides=strides, padding=padding, data_format=data_format) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function avg_pool(value_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + if tf.in_eager_mode() + avg_pool_eager(value_; name=name, ksize=ksize, strides=strides, padding=padding, data_format=data_format) + else + avg_pool_graph(value_; name=name, ksize=ksize, strides=strides, padding=padding, data_format=data_format) + end end - end end @@ -37928,22 +37928,22 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function sparse_dense_cwise_add_graph(sp_indices_, sp_values_, sp_shape_, dense_; name=nothing) - local desc - tf.with_op_name(name, "SparseDenseCwiseAdd") do - desc = tf.NodeDescription("SparseDenseCwiseAdd") - sp_indices_ = convert(Tensor{Int64}, sp_indices_) - sp_values_ = convert(Tensor{Any}, sp_values_) - sp_shape_ = convert(Tensor{Int64}, sp_shape_) - dense_ = convert(Tensor{Any}, dense_) - (sp_values_, dense_) = tf.tf_promote(sp_values_, dense_) - tf.add_input(desc, sp_indices_) - tf.add_input(desc, sp_values_) - tf.add_input(desc, sp_shape_) - tf.add_input(desc, dense_) - end - tf.Tensor(tf.Operation(desc)) + function sparse_dense_cwise_add_graph(sp_indices_, sp_values_, sp_shape_, dense_; name=nothing) + local desc + tf.with_op_name(name, "SparseDenseCwiseAdd") do + desc = tf.NodeDescription("SparseDenseCwiseAdd") + sp_indices_ = convert(Tensor{Int64}, sp_indices_) + sp_values_ = convert(Tensor{Any}, sp_values_) + sp_shape_ = convert(Tensor{Int64}, sp_shape_) + dense_ = convert(Tensor{Any}, dense_) + (sp_values_, dense_) = tf.tf_promote(sp_values_, dense_) + tf.add_input(desc, sp_indices_) + tf.add_input(desc, sp_values_) + tf.add_input(desc, sp_shape_) + tf.add_input(desc, dense_) end + tf.Tensor(tf.Operation(desc)) + end function sparse_dense_cwise_add_eager(sp_indices_, sp_values_, sp_shape_, dense_; name=nothing) desc = tf.EagerOp("SparseDenseCwiseAdd") sp_indices_ = convert(tf.EagerTensor, sp_indices_) @@ -37963,13 +37963,13 @@ begin return res[1] end end - function sparse_dense_cwise_add(sp_indices_, sp_values_, sp_shape_, dense_; name=nothing) - if tf.in_eager_mode() - sparse_dense_cwise_add_eager(sp_indices_, sp_values_, sp_shape_, dense_; name=name) - else - sparse_dense_cwise_add_graph(sp_indices_, sp_values_, sp_shape_, dense_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_dense_cwise_add(sp_indices_, sp_values_, sp_shape_, dense_; name=nothing) + if tf.in_eager_mode() + sparse_dense_cwise_add_eager(sp_indices_, sp_values_, sp_shape_, dense_; name=name) + else + sparse_dense_cwise_add_graph(sp_indices_, sp_values_, sp_shape_, dense_; name=name) + end end - end end @@ -37979,18 +37979,18 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function bias_add_v1_graph(value_, bias_; name=nothing) - local desc - tf.with_op_name(name, "BiasAddV1") do - desc = tf.NodeDescription("BiasAddV1") - value_ = convert(Tensor{Any}, value_) - bias_ = convert(Tensor{Any}, bias_) - (value_, bias_) = tf.tf_promote(value_, bias_) - tf.add_input(desc, value_) - tf.add_input(desc, bias_) - end - tf.Tensor(tf.Operation(desc)) + function bias_add_v1_graph(value_, bias_; name=nothing) + local desc + tf.with_op_name(name, "BiasAddV1") do + desc = tf.NodeDescription("BiasAddV1") + value_ = convert(Tensor{Any}, value_) + bias_ = convert(Tensor{Any}, bias_) + (value_, bias_) = tf.tf_promote(value_, bias_) + tf.add_input(desc, value_) + tf.add_input(desc, bias_) end + tf.Tensor(tf.Operation(desc)) + end function bias_add_v1_eager(value_, bias_; name=nothing) desc = tf.EagerOp("BiasAddV1") value_ = convert(tf.EagerTensor, value_) @@ -38006,13 +38006,13 @@ begin return res[1] end end - function bias_add_v1(value_, bias_; name=nothing) - if tf.in_eager_mode() - bias_add_v1_eager(value_, bias_; name=name) - else - bias_add_v1_graph(value_, bias_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function bias_add_v1(value_, bias_; name=nothing) + if tf.in_eager_mode() + bias_add_v1_eager(value_, bias_; name=name) + else + bias_add_v1_graph(value_, bias_; name=name) + end end - end end @@ -38022,16 +38022,16 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function invert_permutation_graph(x_; name=nothing) - local desc - tf.with_op_name(name, "InvertPermutation") do - desc = tf.NodeDescription("InvertPermutation") - x_ = convert(Tensor{Int32}, x_) - (x_,) = tf.tf_promote(x_) - tf.add_input(desc, x_) - end - tf.Tensor(tf.Operation(desc)) + function invert_permutation_graph(x_; name=nothing) + local desc + tf.with_op_name(name, "InvertPermutation") do + desc = tf.NodeDescription("InvertPermutation") + x_ = convert(Tensor{Int32}, x_) + (x_,) = tf.tf_promote(x_) + tf.add_input(desc, x_) end + tf.Tensor(tf.Operation(desc)) + end function invert_permutation_eager(x_; name=nothing) desc = tf.EagerOp("InvertPermutation") x_ = convert(tf.EagerTensor, x_) @@ -38044,13 +38044,13 @@ begin return res[1] end end - function invert_permutation(x_; name=nothing) - if tf.in_eager_mode() - invert_permutation_eager(x_; name=name) - else - invert_permutation_graph(x_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function invert_permutation(x_; name=nothing) + if tf.in_eager_mode() + invert_permutation_eager(x_; name=name) + else + invert_permutation_graph(x_; name=name) + end end - end end @@ -38060,28 +38060,28 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function hash_table_v2_graph(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing) - local desc - tf.with_op_name(name, "HashTableV2") do - desc = tf.NodeDescription("HashTableV2") - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - if use_node_name_sharing !== nothing - desc["use_node_name_sharing"] = Base.Bool(use_node_name_sharing) - end - if key_dtype !== nothing - desc["key_dtype"] = Base.identity(key_dtype) - end - if value_dtype !== nothing - desc["value_dtype"] = Base.identity(value_dtype) - end + function hash_table_v2_graph(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing) + local desc + tf.with_op_name(name, "HashTableV2") do + desc = tf.NodeDescription("HashTableV2") + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + if use_node_name_sharing !== nothing + desc["use_node_name_sharing"] = Base.Bool(use_node_name_sharing) + end + if key_dtype !== nothing + desc["key_dtype"] = Base.identity(key_dtype) + end + if value_dtype !== nothing + desc["value_dtype"] = Base.identity(value_dtype) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function hash_table_v2_eager(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing) desc = tf.EagerOp("HashTableV2") if container !== nothing @@ -38106,13 +38106,13 @@ begin return res[1] end end - function hash_table_v2(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing) - if tf.in_eager_mode() - hash_table_v2_eager(; name=name, container=container, shared_name=shared_name, use_node_name_sharing=use_node_name_sharing, key_dtype=key_dtype, value_dtype=value_dtype) - else - hash_table_v2_graph(; name=name, container=container, shared_name=shared_name, use_node_name_sharing=use_node_name_sharing, key_dtype=key_dtype, value_dtype=value_dtype) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function hash_table_v2(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing) + if tf.in_eager_mode() + hash_table_v2_eager(; name=name, container=container, shared_name=shared_name, use_node_name_sharing=use_node_name_sharing, key_dtype=key_dtype, value_dtype=value_dtype) + else + hash_table_v2_graph(; name=name, container=container, shared_name=shared_name, use_node_name_sharing=use_node_name_sharing, key_dtype=key_dtype, value_dtype=value_dtype) + end end - end end @@ -38122,34 +38122,34 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function sparse_apply_momentum_graph(var_, accum_, lr_, grad_, indices_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) - local desc - tf.with_op_name(name, "SparseApplyMomentum") do - desc = tf.NodeDescription("SparseApplyMomentum") - var_ = convert(Tensor{Any}, var_) - accum_ = convert(Tensor{Any}, accum_) - lr_ = convert(Tensor{Any}, lr_) - grad_ = convert(Tensor{Any}, grad_) - indices_ = convert(Tensor{Any}, indices_) - indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) - momentum_ = convert(Tensor{Any}, momentum_) - (var_, accum_, lr_, grad_, momentum_) = tf.tf_promote(var_, accum_, lr_, grad_, momentum_) - (indices_,) = tf.tf_promote(indices_) - tf.add_input(desc, var_) - tf.add_input(desc, accum_) - tf.add_input(desc, lr_) - tf.add_input(desc, grad_) - tf.add_input(desc, indices_) - tf.add_input(desc, momentum_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - if use_nesterov !== nothing - desc["use_nesterov"] = Base.Bool(use_nesterov) - end - end - tf.Tensor(tf.Operation(desc)) + function sparse_apply_momentum_graph(var_, accum_, lr_, grad_, indices_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) + local desc + tf.with_op_name(name, "SparseApplyMomentum") do + desc = tf.NodeDescription("SparseApplyMomentum") + var_ = convert(Tensor{Any}, var_) + accum_ = convert(Tensor{Any}, accum_) + lr_ = convert(Tensor{Any}, lr_) + grad_ = convert(Tensor{Any}, grad_) + indices_ = convert(Tensor{Any}, indices_) + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + momentum_ = convert(Tensor{Any}, momentum_) + (var_, accum_, lr_, grad_, momentum_) = tf.tf_promote(var_, accum_, lr_, grad_, momentum_) + (indices_,) = tf.tf_promote(indices_) + tf.add_input(desc, var_) + tf.add_input(desc, accum_) + tf.add_input(desc, lr_) + tf.add_input(desc, grad_) + tf.add_input(desc, indices_) + tf.add_input(desc, momentum_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + if use_nesterov !== nothing + desc["use_nesterov"] = Base.Bool(use_nesterov) + end end + tf.Tensor(tf.Operation(desc)) + end function sparse_apply_momentum_eager(var_, accum_, lr_, grad_, indices_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) desc = tf.EagerOp("SparseApplyMomentum") var_ = convert(tf.EagerTensor, var_) @@ -38183,13 +38183,13 @@ begin return res[1] end end - function sparse_apply_momentum(var_, accum_, lr_, grad_, indices_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) - if tf.in_eager_mode() - sparse_apply_momentum_eager(var_, accum_, lr_, grad_, indices_, momentum_; name=name, use_locking=use_locking, use_nesterov=use_nesterov) - else - sparse_apply_momentum_graph(var_, accum_, lr_, grad_, indices_, momentum_; name=name, use_locking=use_locking, use_nesterov=use_nesterov) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_apply_momentum(var_, accum_, lr_, grad_, indices_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) + if tf.in_eager_mode() + sparse_apply_momentum_eager(var_, accum_, lr_, grad_, indices_, momentum_; name=name, use_locking=use_locking, use_nesterov=use_nesterov) + else + sparse_apply_momentum_graph(var_, accum_, lr_, grad_, indices_, momentum_; name=name, use_locking=use_locking, use_nesterov=use_nesterov) + end end - end end @@ -38199,25 +38199,25 @@ end An op which feeds a single Tensor value into the computation. """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function infeed_enqueue_graph(input_; name=nothing, dtype=nothing, shape=nothing, device_ordinal=nothing) - local desc - tf.with_op_name(name, "InfeedEnqueue") do - desc = tf.NodeDescription("InfeedEnqueue") - input_ = convert(Tensor{Any}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end - if shape !== nothing - desc["shape"] = Base.identity(shape) - end - if device_ordinal !== nothing - desc["device_ordinal"] = Base.Int(device_ordinal) - end - end - tf.Tensor(tf.Operation(desc)) + function infeed_enqueue_graph(input_; name=nothing, dtype=nothing, shape=nothing, device_ordinal=nothing) + local desc + tf.with_op_name(name, "InfeedEnqueue") do + desc = tf.NodeDescription("InfeedEnqueue") + input_ = convert(Tensor{Any}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + if shape !== nothing + desc["shape"] = Base.identity(shape) + end + if device_ordinal !== nothing + desc["device_ordinal"] = Base.Int(device_ordinal) + end end + tf.Tensor(tf.Operation(desc)) + end function infeed_enqueue_eager(input_; name=nothing, dtype=nothing, shape=nothing, device_ordinal=nothing) desc = tf.EagerOp("InfeedEnqueue") input_ = convert(tf.EagerTensor, input_) @@ -38239,13 +38239,13 @@ begin return res[1] end end - function infeed_enqueue(input_; name=nothing, dtype=nothing, shape=nothing, device_ordinal=nothing) - if tf.in_eager_mode() - infeed_enqueue_eager(input_; name=name, dtype=dtype, shape=shape, device_ordinal=device_ordinal) - else - infeed_enqueue_graph(input_; name=name, dtype=dtype, shape=shape, device_ordinal=device_ordinal) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function infeed_enqueue(input_; name=nothing, dtype=nothing, shape=nothing, device_ordinal=nothing) + if tf.in_eager_mode() + infeed_enqueue_eager(input_; name=name, dtype=dtype, shape=shape, device_ordinal=device_ordinal) + else + infeed_enqueue_graph(input_; name=name, dtype=dtype, shape=shape, device_ordinal=device_ordinal) + end end - end end @@ -38255,27 +38255,27 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function stateless_random_uniform_int_graph(shape_, seed_, minval_, maxval_; name=nothing, dtype=nothing) - local desc - tf.with_op_name(name, "StatelessRandomUniformInt") do - desc = tf.NodeDescription("StatelessRandomUniformInt") - shape_ = convert(Tensor{Any}, shape_) - seed_ = convert(Tensor{Int64}, seed_) - minval_ = convert(Tensor{Any}, minval_) - maxval_ = convert(Tensor{Any}, maxval_) - (minval_, maxval_) = tf.tf_promote(minval_, maxval_) - (shape_,) = tf.tf_promote(shape_) - (seed_,) = tf.tf_promote(seed_) - tf.add_input(desc, shape_) - tf.add_input(desc, seed_) - tf.add_input(desc, minval_) - tf.add_input(desc, maxval_) - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end + function stateless_random_uniform_int_graph(shape_, seed_, minval_, maxval_; name=nothing, dtype=nothing) + local desc + tf.with_op_name(name, "StatelessRandomUniformInt") do + desc = tf.NodeDescription("StatelessRandomUniformInt") + shape_ = convert(Tensor{Any}, shape_) + seed_ = convert(Tensor{Int64}, seed_) + minval_ = convert(Tensor{Any}, minval_) + maxval_ = convert(Tensor{Any}, maxval_) + (minval_, maxval_) = tf.tf_promote(minval_, maxval_) + (shape_,) = tf.tf_promote(shape_) + (seed_,) = tf.tf_promote(seed_) + tf.add_input(desc, shape_) + tf.add_input(desc, seed_) + tf.add_input(desc, minval_) + tf.add_input(desc, maxval_) + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function stateless_random_uniform_int_eager(shape_, seed_, minval_, maxval_; name=nothing, dtype=nothing) desc = tf.EagerOp("StatelessRandomUniformInt") shape_ = convert(tf.EagerTensor, shape_) @@ -38300,13 +38300,13 @@ begin return res[1] end end - function stateless_random_uniform_int(shape_, seed_, minval_, maxval_; name=nothing, dtype=nothing) - if tf.in_eager_mode() - stateless_random_uniform_int_eager(shape_, seed_, minval_, maxval_; name=name, dtype=dtype) - else - stateless_random_uniform_int_graph(shape_, seed_, minval_, maxval_; name=name, dtype=dtype) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function stateless_random_uniform_int(shape_, seed_, minval_, maxval_; name=nothing, dtype=nothing) + if tf.in_eager_mode() + stateless_random_uniform_int_eager(shape_, seed_, minval_, maxval_; name=name, dtype=dtype) + else + stateless_random_uniform_int_graph(shape_, seed_, minval_, maxval_; name=name, dtype=dtype) + end end - end end @@ -38316,31 +38316,31 @@ end Sends the named tensor from send_device to recv_device. """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function _send_graph(tensor_; name=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing) - local desc - tf.with_op_name(name, "_Send") do - desc = tf.NodeDescription("_Send") - tensor_ = convert(Tensor{Any}, tensor_) - (tensor_,) = tf.tf_promote(tensor_) - tf.add_input(desc, tensor_) - if tensor_name !== nothing - desc["tensor_name"] = Base.String(tensor_name) - end - if send_device !== nothing - desc["send_device"] = Base.String(send_device) - end - if send_device_incarnation !== nothing - desc["send_device_incarnation"] = Base.Int(send_device_incarnation) - end - if recv_device !== nothing - desc["recv_device"] = Base.String(recv_device) - end - if client_terminated !== nothing - desc["client_terminated"] = Base.Bool(client_terminated) - end - end - tf.Tensor(tf.Operation(desc)) + function _send_graph(tensor_; name=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing) + local desc + tf.with_op_name(name, "_Send") do + desc = tf.NodeDescription("_Send") + tensor_ = convert(Tensor{Any}, tensor_) + (tensor_,) = tf.tf_promote(tensor_) + tf.add_input(desc, tensor_) + if tensor_name !== nothing + desc["tensor_name"] = Base.String(tensor_name) + end + if send_device !== nothing + desc["send_device"] = Base.String(send_device) + end + if send_device_incarnation !== nothing + desc["send_device_incarnation"] = Base.Int(send_device_incarnation) + end + if recv_device !== nothing + desc["recv_device"] = Base.String(recv_device) + end + if client_terminated !== nothing + desc["client_terminated"] = Base.Bool(client_terminated) + end end + tf.Tensor(tf.Operation(desc)) + end function _send_eager(tensor_; name=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing) desc = tf.EagerOp("_Send") tensor_ = convert(tf.EagerTensor, tensor_) @@ -38368,13 +38368,13 @@ begin return res[1] end end - function _send(tensor_; name=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing) - if tf.in_eager_mode() - _send_eager(tensor_; name=name, tensor_name=tensor_name, send_device=send_device, send_device_incarnation=send_device_incarnation, recv_device=recv_device, client_terminated=client_terminated) - else - _send_graph(tensor_; name=name, tensor_name=tensor_name, send_device=send_device, send_device_incarnation=send_device_incarnation, recv_device=recv_device, client_terminated=client_terminated) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _send(tensor_; name=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing) + if tf.in_eager_mode() + _send_eager(tensor_; name=name, tensor_name=tensor_name, send_device=send_device, send_device_incarnation=send_device_incarnation, recv_device=recv_device, client_terminated=client_terminated) + else + _send_graph(tensor_; name=name, tensor_name=tensor_name, send_device=send_device, send_device_incarnation=send_device_incarnation, recv_device=recv_device, client_terminated=client_terminated) + end end - end end @@ -38384,33 +38384,33 @@ end Load embedding parameters for a single table. """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function load_tpu_embedding_adadelta_parameters_grad_accum_debug_graph(parameters_, accumulators_, updates_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - local desc - tf.with_op_name(name, "LoadTPUEmbeddingAdadeltaParametersGradAccumDebug") do - desc = tf.NodeDescription("LoadTPUEmbeddingAdadeltaParametersGradAccumDebug") - parameters_ = convert(Tensor{Float32}, parameters_) - accumulators_ = convert(Tensor{Float32}, accumulators_) - updates_ = convert(Tensor{Float32}, updates_) - gradient_accumulators_ = convert(Tensor{Float32}, gradient_accumulators_) - tf.add_input(desc, parameters_) - tf.add_input(desc, accumulators_) - tf.add_input(desc, updates_) - tf.add_input(desc, gradient_accumulators_) - if table_id !== nothing - desc["table_id"] = Base.Int(table_id) - end - if table_name !== nothing - desc["table_name"] = Base.String(table_name) - end - if num_shards !== nothing - desc["num_shards"] = Base.Int(num_shards) - end - if shard_id !== nothing - desc["shard_id"] = Base.Int(shard_id) - end - end - tf.Tensor(tf.Operation(desc)) + function load_tpu_embedding_adadelta_parameters_grad_accum_debug_graph(parameters_, accumulators_, updates_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + local desc + tf.with_op_name(name, "LoadTPUEmbeddingAdadeltaParametersGradAccumDebug") do + desc = tf.NodeDescription("LoadTPUEmbeddingAdadeltaParametersGradAccumDebug") + parameters_ = convert(Tensor{Float32}, parameters_) + accumulators_ = convert(Tensor{Float32}, accumulators_) + updates_ = convert(Tensor{Float32}, updates_) + gradient_accumulators_ = convert(Tensor{Float32}, gradient_accumulators_) + tf.add_input(desc, parameters_) + tf.add_input(desc, accumulators_) + tf.add_input(desc, updates_) + tf.add_input(desc, gradient_accumulators_) + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end end + tf.Tensor(tf.Operation(desc)) + end function load_tpu_embedding_adadelta_parameters_grad_accum_debug_eager(parameters_, accumulators_, updates_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) desc = tf.EagerOp("LoadTPUEmbeddingAdadeltaParametersGradAccumDebug") parameters_ = convert(tf.EagerTensor, parameters_) @@ -38440,13 +38440,13 @@ begin return res[1] end end - function load_tpu_embedding_adadelta_parameters_grad_accum_debug(parameters_, accumulators_, updates_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - if tf.in_eager_mode() - load_tpu_embedding_adadelta_parameters_grad_accum_debug_eager(parameters_, accumulators_, updates_, gradient_accumulators_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) - else - load_tpu_embedding_adadelta_parameters_grad_accum_debug_graph(parameters_, accumulators_, updates_, gradient_accumulators_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function load_tpu_embedding_adadelta_parameters_grad_accum_debug(parameters_, accumulators_, updates_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + if tf.in_eager_mode() + load_tpu_embedding_adadelta_parameters_grad_accum_debug_eager(parameters_, accumulators_, updates_, gradient_accumulators_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + else + load_tpu_embedding_adadelta_parameters_grad_accum_debug_graph(parameters_, accumulators_, updates_, gradient_accumulators_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + end end - end end @@ -38456,32 +38456,32 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function map_peek_graph(key_, indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) - local desc - tf.with_op_name(name, "MapPeek") do - desc = tf.NodeDescription("MapPeek") - key_ = convert(Tensor{Int64}, key_) - indices_ = convert(Tensor{Int32}, indices_) - tf.add_input(desc, key_) - tf.add_input(desc, indices_) - if capacity !== nothing - desc["capacity"] = Base.Int(capacity) - end - if memory_limit !== nothing - desc["memory_limit"] = Base.Int(memory_limit) - end - if dtypes !== nothing - desc["dtypes"] = map(Base.identity, dtypes) - end - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - end - tf.Tensor(tf.Operation(desc)) + function map_peek_graph(key_, indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "MapPeek") do + desc = tf.NodeDescription("MapPeek") + key_ = convert(Tensor{Int64}, key_) + indices_ = convert(Tensor{Int32}, indices_) + tf.add_input(desc, key_) + tf.add_input(desc, indices_) + if capacity !== nothing + desc["capacity"] = Base.Int(capacity) + end + if memory_limit !== nothing + desc["memory_limit"] = Base.Int(memory_limit) + end + if dtypes !== nothing + desc["dtypes"] = map(Base.identity, dtypes) + end + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end end + tf.Tensor(tf.Operation(desc)) + end function map_peek_eager(key_, indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) desc = tf.EagerOp("MapPeek") key_ = convert(tf.EagerTensor, key_) @@ -38510,13 +38510,13 @@ begin return res[1] end end - function map_peek(key_, indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) - if tf.in_eager_mode() - map_peek_eager(key_, indices_; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) - else - map_peek_graph(key_, indices_; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function map_peek(key_, indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + if tf.in_eager_mode() + map_peek_eager(key_, indices_; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) + else + map_peek_graph(key_, indices_; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) + end end - end end @@ -38526,22 +38526,22 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function write_scalar_summary_graph(writer_, step_, tag_, value_; name=nothing) - local desc - tf.with_op_name(name, "WriteScalarSummary") do - desc = tf.NodeDescription("WriteScalarSummary") - writer_ = convert(Tensor{Any}, writer_) - step_ = convert(Tensor{Int64}, step_) - tag_ = convert(Tensor{String}, tag_) - value_ = convert(Tensor{Any}, value_) - (value_,) = tf.tf_promote(value_) - tf.add_input(desc, writer_) - tf.add_input(desc, step_) - tf.add_input(desc, tag_) - tf.add_input(desc, value_) - end - tf.Tensor(tf.Operation(desc)) + function write_scalar_summary_graph(writer_, step_, tag_, value_; name=nothing) + local desc + tf.with_op_name(name, "WriteScalarSummary") do + desc = tf.NodeDescription("WriteScalarSummary") + writer_ = convert(Tensor{Any}, writer_) + step_ = convert(Tensor{Int64}, step_) + tag_ = convert(Tensor{String}, tag_) + value_ = convert(Tensor{Any}, value_) + (value_,) = tf.tf_promote(value_) + tf.add_input(desc, writer_) + tf.add_input(desc, step_) + tf.add_input(desc, tag_) + tf.add_input(desc, value_) end + tf.Tensor(tf.Operation(desc)) + end function write_scalar_summary_eager(writer_, step_, tag_, value_; name=nothing) desc = tf.EagerOp("WriteScalarSummary") writer_ = convert(tf.EagerTensor, writer_) @@ -38560,13 +38560,13 @@ begin return res[1] end end - function write_scalar_summary(writer_, step_, tag_, value_; name=nothing) - if tf.in_eager_mode() - write_scalar_summary_eager(writer_, step_, tag_, value_; name=name) - else - write_scalar_summary_graph(writer_, step_, tag_, value_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function write_scalar_summary(writer_, step_, tag_, value_; name=nothing) + if tf.in_eager_mode() + write_scalar_summary_eager(writer_, step_, tag_, value_; name=name) + else + write_scalar_summary_graph(writer_, step_, tag_, value_; name=name) + end end - end end @@ -38576,35 +38576,35 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function ordered_map_unstage_no_key_graph(indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) - local desc - tf.with_op_name(name, "OrderedMapUnstageNoKey") do - desc = tf.NodeDescription("OrderedMapUnstageNoKey") - indices_ = convert(Tensor{Int32}, indices_) - tf.add_input(desc, indices_) - if capacity !== nothing - desc["capacity"] = Base.Int(capacity) - end - if memory_limit !== nothing - desc["memory_limit"] = Base.Int(memory_limit) - end - if dtypes !== nothing - desc["dtypes"] = map(Base.identity, dtypes) - end - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:2 - push!(out, tf.Tensor(op, out_idx)) - end - out + function ordered_map_unstage_no_key_graph(indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "OrderedMapUnstageNoKey") do + desc = tf.NodeDescription("OrderedMapUnstageNoKey") + indices_ = convert(Tensor{Int32}, indices_) + tf.add_input(desc, indices_) + if capacity !== nothing + desc["capacity"] = Base.Int(capacity) + end + if memory_limit !== nothing + desc["memory_limit"] = Base.Int(memory_limit) + end + if dtypes !== nothing + desc["dtypes"] = map(Base.identity, dtypes) + end + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) end + out + end function ordered_map_unstage_no_key_eager(indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) desc = tf.EagerOp("OrderedMapUnstageNoKey") indices_ = convert(tf.EagerTensor, indices_) @@ -38631,13 +38631,13 @@ begin return res end end - function ordered_map_unstage_no_key(indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) - if tf.in_eager_mode() - ordered_map_unstage_no_key_eager(indices_; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) - else - ordered_map_unstage_no_key_graph(indices_; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function ordered_map_unstage_no_key(indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + if tf.in_eager_mode() + ordered_map_unstage_no_key_eager(indices_; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) + else + ordered_map_unstage_no_key_graph(indices_; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) + end end - end end @@ -38647,39 +38647,39 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function sparse_apply_centered_rms_prop_graph(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) - local desc - tf.with_op_name(name, "SparseApplyCenteredRMSProp") do - desc = tf.NodeDescription("SparseApplyCenteredRMSProp") - var_ = convert(Tensor{Any}, var_) - mg_ = convert(Tensor{Any}, mg_) - ms_ = convert(Tensor{Any}, ms_) - mom_ = convert(Tensor{Any}, mom_) - lr_ = convert(Tensor{Any}, lr_) - rho_ = convert(Tensor{Any}, rho_) - momentum_ = convert(Tensor{Any}, momentum_) - epsilon_ = convert(Tensor{Any}, epsilon_) - grad_ = convert(Tensor{Any}, grad_) - indices_ = convert(Tensor{Any}, indices_) - indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) - (var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_) = tf.tf_promote(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_) - (indices_,) = tf.tf_promote(indices_) - tf.add_input(desc, var_) - tf.add_input(desc, mg_) - tf.add_input(desc, ms_) - tf.add_input(desc, mom_) - tf.add_input(desc, lr_) - tf.add_input(desc, rho_) - tf.add_input(desc, momentum_) - tf.add_input(desc, epsilon_) - tf.add_input(desc, grad_) - tf.add_input(desc, indices_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - end - tf.Tensor(tf.Operation(desc)) - end + function sparse_apply_centered_rms_prop_graph(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "SparseApplyCenteredRMSProp") do + desc = tf.NodeDescription("SparseApplyCenteredRMSProp") + var_ = convert(Tensor{Any}, var_) + mg_ = convert(Tensor{Any}, mg_) + ms_ = convert(Tensor{Any}, ms_) + mom_ = convert(Tensor{Any}, mom_) + lr_ = convert(Tensor{Any}, lr_) + rho_ = convert(Tensor{Any}, rho_) + momentum_ = convert(Tensor{Any}, momentum_) + epsilon_ = convert(Tensor{Any}, epsilon_) + grad_ = convert(Tensor{Any}, grad_) + indices_ = convert(Tensor{Any}, indices_) + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + (var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_) = tf.tf_promote(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_) + (indices_,) = tf.tf_promote(indices_) + tf.add_input(desc, var_) + tf.add_input(desc, mg_) + tf.add_input(desc, ms_) + tf.add_input(desc, mom_) + tf.add_input(desc, lr_) + tf.add_input(desc, rho_) + tf.add_input(desc, momentum_) + tf.add_input(desc, epsilon_) + tf.add_input(desc, grad_) + tf.add_input(desc, indices_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + tf.Tensor(tf.Operation(desc)) + end function sparse_apply_centered_rms_prop_eager(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) desc = tf.EagerOp("SparseApplyCenteredRMSProp") var_ = convert(tf.EagerTensor, var_) @@ -38722,49 +38722,49 @@ begin return res[1] end end - function sparse_apply_centered_rms_prop(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) - if tf.in_eager_mode() - sparse_apply_centered_rms_prop_eager(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=name, use_locking=use_locking) - else - sparse_apply_centered_rms_prop_graph(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=name, use_locking=use_locking) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_apply_centered_rms_prop(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) + if tf.in_eager_mode() + sparse_apply_centered_rms_prop_eager(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=name, use_locking=use_locking) + else + sparse_apply_centered_rms_prop_graph(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=name, use_locking=use_locking) + end end - end end """ - conv3d_backprop_input_v2(input_sizes, filter, out_backprop; data_format=NDHWC, dilations=[1, 1, 1, 1, 1]) + conv3d_backprop_input_v2(input_sizes, filter, out_backprop; data_format=, dilations=[1, 1, 1, 1, 1]) """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function conv3d_backprop_input_v2_graph(input_sizes_, filter_, out_backprop_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) - local desc - tf.with_op_name(name, "Conv3DBackpropInputV2") do - desc = tf.NodeDescription("Conv3DBackpropInputV2") - input_sizes_ = convert(Tensor{Int32}, input_sizes_) - filter_ = convert(Tensor{Any}, filter_) - out_backprop_ = convert(Tensor{Any}, out_backprop_) - (filter_, out_backprop_) = tf.tf_promote(filter_, out_backprop_) - (input_sizes_,) = tf.tf_promote(input_sizes_) - tf.add_input(desc, input_sizes_) - tf.add_input(desc, filter_) - tf.add_input(desc, out_backprop_) - if strides !== nothing - desc["strides"] = map(Base.identity, strides) - end - if padding !== nothing - desc["padding"] = Base.String(padding) - end - if data_format !== nothing - desc["data_format"] = Base.String(data_format) - end - if dilations !== nothing - desc["dilations"] = map(Base.identity, dilations) - end + function conv3d_backprop_input_v2_graph(input_sizes_, filter_, out_backprop_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) + local desc + tf.with_op_name(name, "Conv3DBackpropInputV2") do + desc = tf.NodeDescription("Conv3DBackpropInputV2") + input_sizes_ = convert(Tensor{Int32}, input_sizes_) + filter_ = convert(Tensor{Any}, filter_) + out_backprop_ = convert(Tensor{Any}, out_backprop_) + (filter_, out_backprop_) = tf.tf_promote(filter_, out_backprop_) + (input_sizes_,) = tf.tf_promote(input_sizes_) + tf.add_input(desc, input_sizes_) + tf.add_input(desc, filter_) + tf.add_input(desc, out_backprop_) + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + if padding !== nothing + desc["padding"] = Base.String(padding) + end + if data_format !== nothing + desc["data_format"] = Base.String(data_format) + end + if dilations !== nothing + desc["dilations"] = map(Base.identity, dilations) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function conv3d_backprop_input_v2_eager(input_sizes_, filter_, out_backprop_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) desc = tf.EagerOp("Conv3DBackpropInputV2") input_sizes_ = convert(tf.EagerTensor, input_sizes_) @@ -38795,13 +38795,13 @@ begin return res[1] end end - function conv3d_backprop_input_v2(input_sizes_, filter_, out_backprop_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) - if tf.in_eager_mode() - conv3d_backprop_input_v2_eager(input_sizes_, filter_, out_backprop_; name=name, strides=strides, padding=padding, data_format=data_format, dilations=dilations) - else - conv3d_backprop_input_v2_graph(input_sizes_, filter_, out_backprop_; name=name, strides=strides, padding=padding, data_format=data_format, dilations=dilations) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function conv3d_backprop_input_v2(input_sizes_, filter_, out_backprop_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) + if tf.in_eager_mode() + conv3d_backprop_input_v2_eager(input_sizes_, filter_, out_backprop_; name=name, strides=strides, padding=padding, data_format=data_format, dilations=dilations) + else + conv3d_backprop_input_v2_graph(input_sizes_, filter_, out_backprop_; name=name, strides=strides, padding=padding, data_format=data_format, dilations=dilations) + end end - end end @@ -38811,30 +38811,30 @@ end Retrieve embedding parameters for a single table. """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function retrieve_tpu_embedding_proximal_adagrad_parameters_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - local desc - tf.with_op_name(name, "RetrieveTPUEmbeddingProximalAdagradParameters") do - desc = tf.NodeDescription("RetrieveTPUEmbeddingProximalAdagradParameters") - if table_id !== nothing - desc["table_id"] = Base.Int(table_id) - end - if table_name !== nothing - desc["table_name"] = Base.String(table_name) - end - if num_shards !== nothing - desc["num_shards"] = Base.Int(num_shards) - end - if shard_id !== nothing - desc["shard_id"] = Base.Int(shard_id) - end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:2 - push!(out, tf.Tensor(op, out_idx)) - end - out + function retrieve_tpu_embedding_proximal_adagrad_parameters_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + local desc + tf.with_op_name(name, "RetrieveTPUEmbeddingProximalAdagradParameters") do + desc = tf.NodeDescription("RetrieveTPUEmbeddingProximalAdagradParameters") + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) end + out + end function retrieve_tpu_embedding_proximal_adagrad_parameters_eager(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) desc = tf.EagerOp("RetrieveTPUEmbeddingProximalAdagradParameters") if table_id !== nothing @@ -38856,13 +38856,13 @@ begin return res end end - function retrieve_tpu_embedding_proximal_adagrad_parameters(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - if tf.in_eager_mode() - retrieve_tpu_embedding_proximal_adagrad_parameters_eager(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) - else - retrieve_tpu_embedding_proximal_adagrad_parameters_graph(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function retrieve_tpu_embedding_proximal_adagrad_parameters(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + if tf.in_eager_mode() + retrieve_tpu_embedding_proximal_adagrad_parameters_eager(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + else + retrieve_tpu_embedding_proximal_adagrad_parameters_graph(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + end end - end end @@ -38872,22 +38872,22 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function random_shuffle_graph(value_; name=nothing, seed=nothing, seed2=nothing) - local desc - tf.with_op_name(name, "RandomShuffle") do - desc = tf.NodeDescription("RandomShuffle") - value_ = convert(Tensor{Any}, value_) - (value_,) = tf.tf_promote(value_) - tf.add_input(desc, value_) - if seed !== nothing - desc["seed"] = Base.Int(seed) - end - if seed2 !== nothing - desc["seed2"] = Base.Int(seed2) - end + function random_shuffle_graph(value_; name=nothing, seed=nothing, seed2=nothing) + local desc + tf.with_op_name(name, "RandomShuffle") do + desc = tf.NodeDescription("RandomShuffle") + value_ = convert(Tensor{Any}, value_) + (value_,) = tf.tf_promote(value_) + tf.add_input(desc, value_) + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function random_shuffle_eager(value_; name=nothing, seed=nothing, seed2=nothing) desc = tf.EagerOp("RandomShuffle") value_ = convert(tf.EagerTensor, value_) @@ -38906,13 +38906,13 @@ begin return res[1] end end - function random_shuffle(value_; name=nothing, seed=nothing, seed2=nothing) - if tf.in_eager_mode() - random_shuffle_eager(value_; name=name, seed=seed, seed2=seed2) - else - random_shuffle_graph(value_; name=name, seed=seed, seed2=seed2) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function random_shuffle(value_; name=nothing, seed=nothing, seed2=nothing) + if tf.in_eager_mode() + random_shuffle_eager(value_; name=name, seed=seed, seed2=seed2) + else + random_shuffle_graph(value_; name=name, seed=seed, seed2=seed2) + end end - end end @@ -38922,38 +38922,38 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function uniform_candidate_sampler_graph(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing) - local desc - tf.with_op_name(name, "UniformCandidateSampler") do - desc = tf.NodeDescription("UniformCandidateSampler") - true_classes_ = convert(Tensor{Int64}, true_classes_) - tf.add_input(desc, true_classes_) - if num_true !== nothing - desc["num_true"] = Base.Int(num_true) - end - if num_sampled !== nothing - desc["num_sampled"] = Base.Int(num_sampled) - end - if unique !== nothing - desc["unique"] = Base.Bool(unique) - end - if range_max !== nothing - desc["range_max"] = Base.Int(range_max) - end - if seed !== nothing - desc["seed"] = Base.Int(seed) - end - if seed2 !== nothing - desc["seed2"] = Base.Int(seed2) - end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:3 - push!(out, tf.Tensor(op, out_idx)) - end - out + function uniform_candidate_sampler_graph(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing) + local desc + tf.with_op_name(name, "UniformCandidateSampler") do + desc = tf.NodeDescription("UniformCandidateSampler") + true_classes_ = convert(Tensor{Int64}, true_classes_) + tf.add_input(desc, true_classes_) + if num_true !== nothing + desc["num_true"] = Base.Int(num_true) + end + if num_sampled !== nothing + desc["num_sampled"] = Base.Int(num_sampled) + end + if unique !== nothing + desc["unique"] = Base.Bool(unique) + end + if range_max !== nothing + desc["range_max"] = Base.Int(range_max) + end + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) end + out + end function uniform_candidate_sampler_eager(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing) desc = tf.EagerOp("UniformCandidateSampler") true_classes_ = convert(tf.EagerTensor, true_classes_) @@ -38983,13 +38983,13 @@ begin return res end end - function uniform_candidate_sampler(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing) - if tf.in_eager_mode() - uniform_candidate_sampler_eager(true_classes_; name=name, num_true=num_true, num_sampled=num_sampled, unique=unique, range_max=range_max, seed=seed, seed2=seed2) - else - uniform_candidate_sampler_graph(true_classes_; name=name, num_true=num_true, num_sampled=num_sampled, unique=unique, range_max=range_max, seed=seed, seed2=seed2) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function uniform_candidate_sampler(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing) + if tf.in_eager_mode() + uniform_candidate_sampler_eager(true_classes_; name=name, num_true=num_true, num_sampled=num_sampled, unique=unique, range_max=range_max, seed=seed, seed2=seed2) + else + uniform_candidate_sampler_graph(true_classes_; name=name, num_true=num_true, num_sampled=num_sampled, unique=unique, range_max=range_max, seed=seed, seed2=seed2) + end end - end end @@ -38999,22 +38999,22 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tensor_array_split_v2_graph(handle_, value_, lengths_, flow_in_; name=nothing) - local desc - tf.with_op_name(name, "TensorArraySplitV2") do - desc = tf.NodeDescription("TensorArraySplitV2") - handle_ = convert(Tensor{String}, handle_) - value_ = convert(Tensor{Any}, value_) - lengths_ = convert(Tensor{Int64}, lengths_) - flow_in_ = convert(Tensor{Float32}, flow_in_) - (value_,) = tf.tf_promote(value_) - tf.add_input(desc, handle_) - tf.add_input(desc, value_) - tf.add_input(desc, lengths_) - tf.add_input(desc, flow_in_) - end - tf.Tensor(tf.Operation(desc)) + function tensor_array_split_v2_graph(handle_, value_, lengths_, flow_in_; name=nothing) + local desc + tf.with_op_name(name, "TensorArraySplitV2") do + desc = tf.NodeDescription("TensorArraySplitV2") + handle_ = convert(Tensor{String}, handle_) + value_ = convert(Tensor{Any}, value_) + lengths_ = convert(Tensor{Int64}, lengths_) + flow_in_ = convert(Tensor{Float32}, flow_in_) + (value_,) = tf.tf_promote(value_) + tf.add_input(desc, handle_) + tf.add_input(desc, value_) + tf.add_input(desc, lengths_) + tf.add_input(desc, flow_in_) end + tf.Tensor(tf.Operation(desc)) + end function tensor_array_split_v2_eager(handle_, value_, lengths_, flow_in_; name=nothing) desc = tf.EagerOp("TensorArraySplitV2") handle_ = convert(tf.EagerTensor, handle_) @@ -39033,13 +39033,13 @@ begin return res[1] end end - function tensor_array_split_v2(handle_, value_, lengths_, flow_in_; name=nothing) - if tf.in_eager_mode() - tensor_array_split_v2_eager(handle_, value_, lengths_, flow_in_; name=name) - else - tensor_array_split_v2_graph(handle_, value_, lengths_, flow_in_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_array_split_v2(handle_, value_, lengths_, flow_in_; name=nothing) + if tf.in_eager_mode() + tensor_array_split_v2_eager(handle_, value_, lengths_, flow_in_; name=name) + else + tensor_array_split_v2_graph(handle_, value_, lengths_, flow_in_; name=name) + end end - end end @@ -39049,42 +39049,42 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function mutable_dense_hash_table_v2_graph(empty_key_, deleted_key_; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing, initial_num_buckets=nothing, max_load_factor=nothing) - local desc - tf.with_op_name(name, "MutableDenseHashTableV2") do - desc = tf.NodeDescription("MutableDenseHashTableV2") - empty_key_ = convert(Tensor{Any}, empty_key_) - deleted_key_ = convert(Tensor{Any}, deleted_key_) - (empty_key_, deleted_key_) = tf.tf_promote(empty_key_, deleted_key_) - tf.add_input(desc, empty_key_) - tf.add_input(desc, deleted_key_) - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - if use_node_name_sharing !== nothing - desc["use_node_name_sharing"] = Base.Bool(use_node_name_sharing) - end - if key_dtype !== nothing - desc["key_dtype"] = Base.identity(key_dtype) - end - if value_dtype !== nothing - desc["value_dtype"] = Base.identity(value_dtype) - end - if value_shape !== nothing - desc["value_shape"] = Base.identity(value_shape) - end - if initial_num_buckets !== nothing - desc["initial_num_buckets"] = Base.Int(initial_num_buckets) - end - if max_load_factor !== nothing - desc["max_load_factor"] = Base.identity(max_load_factor) - end - end - tf.Tensor(tf.Operation(desc)) + function mutable_dense_hash_table_v2_graph(empty_key_, deleted_key_; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing, initial_num_buckets=nothing, max_load_factor=nothing) + local desc + tf.with_op_name(name, "MutableDenseHashTableV2") do + desc = tf.NodeDescription("MutableDenseHashTableV2") + empty_key_ = convert(Tensor{Any}, empty_key_) + deleted_key_ = convert(Tensor{Any}, deleted_key_) + (empty_key_, deleted_key_) = tf.tf_promote(empty_key_, deleted_key_) + tf.add_input(desc, empty_key_) + tf.add_input(desc, deleted_key_) + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + if use_node_name_sharing !== nothing + desc["use_node_name_sharing"] = Base.Bool(use_node_name_sharing) + end + if key_dtype !== nothing + desc["key_dtype"] = Base.identity(key_dtype) + end + if value_dtype !== nothing + desc["value_dtype"] = Base.identity(value_dtype) + end + if value_shape !== nothing + desc["value_shape"] = Base.identity(value_shape) + end + if initial_num_buckets !== nothing + desc["initial_num_buckets"] = Base.Int(initial_num_buckets) + end + if max_load_factor !== nothing + desc["max_load_factor"] = Base.identity(max_load_factor) + end end + tf.Tensor(tf.Operation(desc)) + end function mutable_dense_hash_table_v2_eager(empty_key_, deleted_key_; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing, initial_num_buckets=nothing, max_load_factor=nothing) desc = tf.EagerOp("MutableDenseHashTableV2") empty_key_ = convert(tf.EagerTensor, empty_key_) @@ -39124,13 +39124,13 @@ begin return res[1] end end - function mutable_dense_hash_table_v2(empty_key_, deleted_key_; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing, initial_num_buckets=nothing, max_load_factor=nothing) - if tf.in_eager_mode() - mutable_dense_hash_table_v2_eager(empty_key_, deleted_key_; name=name, container=container, shared_name=shared_name, use_node_name_sharing=use_node_name_sharing, key_dtype=key_dtype, value_dtype=value_dtype, value_shape=value_shape, initial_num_buckets=initial_num_buckets, max_load_factor=max_load_factor) - else - mutable_dense_hash_table_v2_graph(empty_key_, deleted_key_; name=name, container=container, shared_name=shared_name, use_node_name_sharing=use_node_name_sharing, key_dtype=key_dtype, value_dtype=value_dtype, value_shape=value_shape, initial_num_buckets=initial_num_buckets, max_load_factor=max_load_factor) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function mutable_dense_hash_table_v2(empty_key_, deleted_key_; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing, initial_num_buckets=nothing, max_load_factor=nothing) + if tf.in_eager_mode() + mutable_dense_hash_table_v2_eager(empty_key_, deleted_key_; name=name, container=container, shared_name=shared_name, use_node_name_sharing=use_node_name_sharing, key_dtype=key_dtype, value_dtype=value_dtype, value_shape=value_shape, initial_num_buckets=initial_num_buckets, max_load_factor=max_load_factor) + else + mutable_dense_hash_table_v2_graph(empty_key_, deleted_key_; name=name, container=container, shared_name=shared_name, use_node_name_sharing=use_node_name_sharing, key_dtype=key_dtype, value_dtype=value_dtype, value_shape=value_shape, initial_num_buckets=initial_num_buckets, max_load_factor=max_load_factor) + end end - end end @@ -39140,18 +39140,18 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function draw_bounding_boxes_graph(images_, boxes_; name=nothing) - local desc - tf.with_op_name(name, "DrawBoundingBoxes") do - desc = tf.NodeDescription("DrawBoundingBoxes") - images_ = convert(Tensor{Float32}, images_) - boxes_ = convert(Tensor{Float32}, boxes_) - (images_,) = tf.tf_promote(images_) - tf.add_input(desc, images_) - tf.add_input(desc, boxes_) - end - tf.Tensor(tf.Operation(desc)) + function draw_bounding_boxes_graph(images_, boxes_; name=nothing) + local desc + tf.with_op_name(name, "DrawBoundingBoxes") do + desc = tf.NodeDescription("DrawBoundingBoxes") + images_ = convert(Tensor{Float32}, images_) + boxes_ = convert(Tensor{Float32}, boxes_) + (images_,) = tf.tf_promote(images_) + tf.add_input(desc, images_) + tf.add_input(desc, boxes_) end + tf.Tensor(tf.Operation(desc)) + end function draw_bounding_boxes_eager(images_, boxes_; name=nothing) desc = tf.EagerOp("DrawBoundingBoxes") images_ = convert(tf.EagerTensor, images_) @@ -39166,13 +39166,13 @@ begin return res[1] end end - function draw_bounding_boxes(images_, boxes_; name=nothing) - if tf.in_eager_mode() - draw_bounding_boxes_eager(images_, boxes_; name=name) - else - draw_bounding_boxes_graph(images_, boxes_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function draw_bounding_boxes(images_, boxes_; name=nothing) + if tf.in_eager_mode() + draw_bounding_boxes_eager(images_, boxes_; name=name) + else + draw_bounding_boxes_graph(images_, boxes_; name=name) + end end - end end @@ -39182,33 +39182,33 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function sparse_apply_proximal_adagrad_graph(var_, accum_, lr_, l1_, l2_, grad_, indices_; name=nothing, use_locking=nothing) - local desc - tf.with_op_name(name, "SparseApplyProximalAdagrad") do - desc = tf.NodeDescription("SparseApplyProximalAdagrad") - var_ = convert(Tensor{Any}, var_) - accum_ = convert(Tensor{Any}, accum_) - lr_ = convert(Tensor{Any}, lr_) - l1_ = convert(Tensor{Any}, l1_) - l2_ = convert(Tensor{Any}, l2_) - grad_ = convert(Tensor{Any}, grad_) - indices_ = convert(Tensor{Any}, indices_) - indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) - (var_, accum_, lr_, l1_, l2_, grad_) = tf.tf_promote(var_, accum_, lr_, l1_, l2_, grad_) - (indices_,) = tf.tf_promote(indices_) - tf.add_input(desc, var_) - tf.add_input(desc, accum_) - tf.add_input(desc, lr_) - tf.add_input(desc, l1_) - tf.add_input(desc, l2_) - tf.add_input(desc, grad_) - tf.add_input(desc, indices_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - end - tf.Tensor(tf.Operation(desc)) + function sparse_apply_proximal_adagrad_graph(var_, accum_, lr_, l1_, l2_, grad_, indices_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "SparseApplyProximalAdagrad") do + desc = tf.NodeDescription("SparseApplyProximalAdagrad") + var_ = convert(Tensor{Any}, var_) + accum_ = convert(Tensor{Any}, accum_) + lr_ = convert(Tensor{Any}, lr_) + l1_ = convert(Tensor{Any}, l1_) + l2_ = convert(Tensor{Any}, l2_) + grad_ = convert(Tensor{Any}, grad_) + indices_ = convert(Tensor{Any}, indices_) + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + (var_, accum_, lr_, l1_, l2_, grad_) = tf.tf_promote(var_, accum_, lr_, l1_, l2_, grad_) + (indices_,) = tf.tf_promote(indices_) + tf.add_input(desc, var_) + tf.add_input(desc, accum_) + tf.add_input(desc, lr_) + tf.add_input(desc, l1_) + tf.add_input(desc, l2_) + tf.add_input(desc, grad_) + tf.add_input(desc, indices_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end end + tf.Tensor(tf.Operation(desc)) + end function sparse_apply_proximal_adagrad_eager(var_, accum_, lr_, l1_, l2_, grad_, indices_; name=nothing, use_locking=nothing) desc = tf.EagerOp("SparseApplyProximalAdagrad") var_ = convert(tf.EagerTensor, var_) @@ -39242,13 +39242,13 @@ begin return res[1] end end - function sparse_apply_proximal_adagrad(var_, accum_, lr_, l1_, l2_, grad_, indices_; name=nothing, use_locking=nothing) - if tf.in_eager_mode() - sparse_apply_proximal_adagrad_eager(var_, accum_, lr_, l1_, l2_, grad_, indices_; name=name, use_locking=use_locking) - else - sparse_apply_proximal_adagrad_graph(var_, accum_, lr_, l1_, l2_, grad_, indices_; name=name, use_locking=use_locking) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_apply_proximal_adagrad(var_, accum_, lr_, l1_, l2_, grad_, indices_; name=nothing, use_locking=nothing) + if tf.in_eager_mode() + sparse_apply_proximal_adagrad_eager(var_, accum_, lr_, l1_, l2_, grad_, indices_; name=name, use_locking=use_locking) + else + sparse_apply_proximal_adagrad_graph(var_, accum_, lr_, l1_, l2_, grad_, indices_; name=name, use_locking=use_locking) + end end - end end @@ -39258,25 +39258,25 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function range_dataset_graph(start_, stop_, step_; name=nothing, output_types=nothing, output_shapes=nothing) - local desc - tf.with_op_name(name, "RangeDataset") do - desc = tf.NodeDescription("RangeDataset") - start_ = convert(Tensor{Int64}, start_) - stop_ = convert(Tensor{Int64}, stop_) - step_ = convert(Tensor{Int64}, step_) - tf.add_input(desc, start_) - tf.add_input(desc, stop_) - tf.add_input(desc, step_) - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end + function range_dataset_graph(start_, stop_, step_; name=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "RangeDataset") do + desc = tf.NodeDescription("RangeDataset") + start_ = convert(Tensor{Int64}, start_) + stop_ = convert(Tensor{Int64}, stop_) + step_ = convert(Tensor{Int64}, step_) + tf.add_input(desc, start_) + tf.add_input(desc, stop_) + tf.add_input(desc, step_) + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function range_dataset_eager(start_, stop_, step_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("RangeDataset") start_ = convert(tf.EagerTensor, start_) @@ -39298,13 +39298,13 @@ begin return res[1] end end - function range_dataset(start_, stop_, step_; name=nothing, output_types=nothing, output_shapes=nothing) - if tf.in_eager_mode() - range_dataset_eager(start_, stop_, step_; name=name, output_types=output_types, output_shapes=output_shapes) - else - range_dataset_graph(start_, stop_, step_; name=name, output_types=output_types, output_shapes=output_shapes) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function range_dataset(start_, stop_, step_; name=nothing, output_types=nothing, output_shapes=nothing) + if tf.in_eager_mode() + range_dataset_eager(start_, stop_, step_; name=name, output_types=output_types, output_shapes=output_shapes) + else + range_dataset_graph(start_, stop_, step_; name=name, output_types=output_types, output_shapes=output_shapes) + end end - end end @@ -39314,17 +39314,17 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function reader_restore_state_v2_graph(reader_handle_, state_; name=nothing) - local desc - tf.with_op_name(name, "ReaderRestoreStateV2") do - desc = tf.NodeDescription("ReaderRestoreStateV2") - reader_handle_ = convert(Tensor{Any}, reader_handle_) - state_ = convert(Tensor{String}, state_) - tf.add_input(desc, reader_handle_) - tf.add_input(desc, state_) - end - tf.Tensor(tf.Operation(desc)) + function reader_restore_state_v2_graph(reader_handle_, state_; name=nothing) + local desc + tf.with_op_name(name, "ReaderRestoreStateV2") do + desc = tf.NodeDescription("ReaderRestoreStateV2") + reader_handle_ = convert(Tensor{Any}, reader_handle_) + state_ = convert(Tensor{String}, state_) + tf.add_input(desc, reader_handle_) + tf.add_input(desc, state_) end + tf.Tensor(tf.Operation(desc)) + end function reader_restore_state_v2_eager(reader_handle_, state_; name=nothing) desc = tf.EagerOp("ReaderRestoreStateV2") reader_handle_ = convert(tf.EagerTensor, reader_handle_) @@ -39338,13 +39338,13 @@ begin return res[1] end end - function reader_restore_state_v2(reader_handle_, state_; name=nothing) - if tf.in_eager_mode() - reader_restore_state_v2_eager(reader_handle_, state_; name=name) - else - reader_restore_state_v2_graph(reader_handle_, state_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function reader_restore_state_v2(reader_handle_, state_; name=nothing) + if tf.in_eager_mode() + reader_restore_state_v2_eager(reader_handle_, state_; name=name) + else + reader_restore_state_v2_graph(reader_handle_, state_; name=name) + end end - end end @@ -39354,26 +39354,26 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function top_kv2_graph(input_, k_; name=nothing, sorted=nothing) - local desc - tf.with_op_name(name, "TopKV2") do - desc = tf.NodeDescription("TopKV2") - input_ = convert(Tensor{Any}, input_) - k_ = convert(Tensor{Int32}, k_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - tf.add_input(desc, k_) - if sorted !== nothing - desc["sorted"] = Base.Bool(sorted) - end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:2 - push!(out, tf.Tensor(op, out_idx)) + function top_kv2_graph(input_, k_; name=nothing, sorted=nothing) + local desc + tf.with_op_name(name, "TopKV2") do + desc = tf.NodeDescription("TopKV2") + input_ = convert(Tensor{Any}, input_) + k_ = convert(Tensor{Int32}, k_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + tf.add_input(desc, k_) + if sorted !== nothing + desc["sorted"] = Base.Bool(sorted) end - out end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end function top_kv2_eager(input_, k_; name=nothing, sorted=nothing) desc = tf.EagerOp("TopKV2") input_ = convert(tf.EagerTensor, input_) @@ -39391,13 +39391,13 @@ begin return res end end - function top_kv2(input_, k_; name=nothing, sorted=nothing) - if tf.in_eager_mode() - top_kv2_eager(input_, k_; name=name, sorted=sorted) - else - top_kv2_graph(input_, k_; name=name, sorted=sorted) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function top_kv2(input_, k_; name=nothing, sorted=nothing) + if tf.in_eager_mode() + top_kv2_eager(input_, k_; name=name, sorted=sorted) + else + top_kv2_graph(input_, k_; name=name, sorted=sorted) + end end - end end @@ -39407,16 +39407,16 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function atanh_graph(x_; name=nothing) - local desc - tf.with_op_name(name, "Atanh") do - desc = tf.NodeDescription("Atanh") - x_ = convert(Tensor{Any}, x_) - (x_,) = tf.tf_promote(x_) - tf.add_input(desc, x_) - end - tf.Tensor(tf.Operation(desc)) + function atanh_graph(x_; name=nothing) + local desc + tf.with_op_name(name, "Atanh") do + desc = tf.NodeDescription("Atanh") + x_ = convert(Tensor{Any}, x_) + (x_,) = tf.tf_promote(x_) + tf.add_input(desc, x_) end + tf.Tensor(tf.Operation(desc)) + end function atanh_eager(x_; name=nothing) desc = tf.EagerOp("Atanh") x_ = convert(tf.EagerTensor, x_) @@ -39429,13 +39429,13 @@ begin return res[1] end end - function atanh(x_; name=nothing) - if tf.in_eager_mode() - atanh_eager(x_; name=name) - else - atanh_graph(x_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function atanh(x_; name=nothing) + if tf.in_eager_mode() + atanh_eager(x_; name=name) + else + atanh_graph(x_; name=name) + end end - end end @@ -39445,16 +39445,16 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function debug_gradient_identity_graph(input_; name=nothing) - local desc - tf.with_op_name(name, "DebugGradientIdentity") do - desc = tf.NodeDescription("DebugGradientIdentity") - input_ = convert(Tensor{Any}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - end - tf.Tensor(tf.Operation(desc)) + function debug_gradient_identity_graph(input_; name=nothing) + local desc + tf.with_op_name(name, "DebugGradientIdentity") do + desc = tf.NodeDescription("DebugGradientIdentity") + input_ = convert(Tensor{Any}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) end + tf.Tensor(tf.Operation(desc)) + end function debug_gradient_identity_eager(input_; name=nothing) desc = tf.EagerOp("DebugGradientIdentity") input_ = convert(tf.EagerTensor, input_) @@ -39467,13 +39467,13 @@ begin return res[1] end end - function debug_gradient_identity(input_; name=nothing) - if tf.in_eager_mode() - debug_gradient_identity_eager(input_; name=name) - else - debug_gradient_identity_graph(input_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function debug_gradient_identity(input_; name=nothing) + if tf.in_eager_mode() + debug_gradient_identity_eager(input_; name=name) + else + debug_gradient_identity_graph(input_; name=name) + end end - end end @@ -39483,27 +39483,27 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function sparse_add_grad_graph(backprop_val_grad_, a_indices_, b_indices_, sum_indices_; name=nothing) - local desc - tf.with_op_name(name, "SparseAddGrad") do - desc = tf.NodeDescription("SparseAddGrad") - backprop_val_grad_ = convert(Tensor{Any}, backprop_val_grad_) - a_indices_ = convert(Tensor{Int64}, a_indices_) - b_indices_ = convert(Tensor{Int64}, b_indices_) - sum_indices_ = convert(Tensor{Int64}, sum_indices_) - (backprop_val_grad_,) = tf.tf_promote(backprop_val_grad_) - tf.add_input(desc, backprop_val_grad_) - tf.add_input(desc, a_indices_) - tf.add_input(desc, b_indices_) - tf.add_input(desc, sum_indices_) - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:2 - push!(out, tf.Tensor(op, out_idx)) - end - out + function sparse_add_grad_graph(backprop_val_grad_, a_indices_, b_indices_, sum_indices_; name=nothing) + local desc + tf.with_op_name(name, "SparseAddGrad") do + desc = tf.NodeDescription("SparseAddGrad") + backprop_val_grad_ = convert(Tensor{Any}, backprop_val_grad_) + a_indices_ = convert(Tensor{Int64}, a_indices_) + b_indices_ = convert(Tensor{Int64}, b_indices_) + sum_indices_ = convert(Tensor{Int64}, sum_indices_) + (backprop_val_grad_,) = tf.tf_promote(backprop_val_grad_) + tf.add_input(desc, backprop_val_grad_) + tf.add_input(desc, a_indices_) + tf.add_input(desc, b_indices_) + tf.add_input(desc, sum_indices_) end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end function sparse_add_grad_eager(backprop_val_grad_, a_indices_, b_indices_, sum_indices_; name=nothing) desc = tf.EagerOp("SparseAddGrad") backprop_val_grad_ = convert(tf.EagerTensor, backprop_val_grad_) @@ -39522,13 +39522,13 @@ begin return res end end - function sparse_add_grad(backprop_val_grad_, a_indices_, b_indices_, sum_indices_; name=nothing) - if tf.in_eager_mode() - sparse_add_grad_eager(backprop_val_grad_, a_indices_, b_indices_, sum_indices_; name=name) - else - sparse_add_grad_graph(backprop_val_grad_, a_indices_, b_indices_, sum_indices_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_add_grad(backprop_val_grad_, a_indices_, b_indices_, sum_indices_; name=nothing) + if tf.in_eager_mode() + sparse_add_grad_eager(backprop_val_grad_, a_indices_, b_indices_, sum_indices_; name=name) + else + sparse_add_grad_graph(backprop_val_grad_, a_indices_, b_indices_, sum_indices_; name=name) + end end - end end @@ -39538,25 +39538,25 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function resource_scatter_add_graph(resource_, indices_, updates_; name=nothing, dtype=nothing) - local desc - tf.with_op_name(name, "ResourceScatterAdd") do - desc = tf.NodeDescription("ResourceScatterAdd") - resource_ = convert(Tensor{Any}, resource_) - indices_ = convert(Tensor{Any}, indices_) - indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) - updates_ = convert(Tensor{Any}, updates_) - (updates_,) = tf.tf_promote(updates_) - (indices_,) = tf.tf_promote(indices_) - tf.add_input(desc, resource_) - tf.add_input(desc, indices_) - tf.add_input(desc, updates_) - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end + function resource_scatter_add_graph(resource_, indices_, updates_; name=nothing, dtype=nothing) + local desc + tf.with_op_name(name, "ResourceScatterAdd") do + desc = tf.NodeDescription("ResourceScatterAdd") + resource_ = convert(Tensor{Any}, resource_) + indices_ = convert(Tensor{Any}, indices_) + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + updates_ = convert(Tensor{Any}, updates_) + (updates_,) = tf.tf_promote(updates_) + (indices_,) = tf.tf_promote(indices_) + tf.add_input(desc, resource_) + tf.add_input(desc, indices_) + tf.add_input(desc, updates_) + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function resource_scatter_add_eager(resource_, indices_, updates_; name=nothing, dtype=nothing) desc = tf.EagerOp("ResourceScatterAdd") resource_ = convert(tf.EagerTensor, resource_) @@ -39577,13 +39577,13 @@ begin return res[1] end end - function resource_scatter_add(resource_, indices_, updates_; name=nothing, dtype=nothing) - if tf.in_eager_mode() - resource_scatter_add_eager(resource_, indices_, updates_; name=name, dtype=dtype) - else - resource_scatter_add_graph(resource_, indices_, updates_; name=name, dtype=dtype) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_scatter_add(resource_, indices_, updates_; name=nothing, dtype=nothing) + if tf.in_eager_mode() + resource_scatter_add_eager(resource_, indices_, updates_; name=name, dtype=dtype) + else + resource_scatter_add_graph(resource_, indices_, updates_; name=name, dtype=dtype) + end end - end end @@ -39593,16 +39593,16 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function ceil_graph(x_; name=nothing) - local desc - tf.with_op_name(name, "Ceil") do - desc = tf.NodeDescription("Ceil") - x_ = convert(Tensor{Any}, x_) - (x_,) = tf.tf_promote(x_) - tf.add_input(desc, x_) - end - tf.Tensor(tf.Operation(desc)) + function ceil_graph(x_; name=nothing) + local desc + tf.with_op_name(name, "Ceil") do + desc = tf.NodeDescription("Ceil") + x_ = convert(Tensor{Any}, x_) + (x_,) = tf.tf_promote(x_) + tf.add_input(desc, x_) end + tf.Tensor(tf.Operation(desc)) + end function ceil_eager(x_; name=nothing) desc = tf.EagerOp("Ceil") x_ = convert(tf.EagerTensor, x_) @@ -39615,13 +39615,13 @@ begin return res[1] end end - function ceil(x_; name=nothing) - if tf.in_eager_mode() - ceil_eager(x_; name=name) - else - ceil_graph(x_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function ceil(x_; name=nothing) + if tf.in_eager_mode() + ceil_eager(x_; name=name) + else + ceil_graph(x_; name=name) + end end - end end @@ -39631,22 +39631,22 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function save_graph(filename_, tensor_names_, data_; name=nothing, T=nothing) - local desc - tf.with_op_name(name, "Save") do - desc = tf.NodeDescription("Save") - filename_ = convert(Tensor{String}, filename_) - tensor_names_ = convert(Tensor{String}, tensor_names_) - data_ = [convert(Tensor{Any}, x) for x = data_] - tf.add_input(desc, filename_) - tf.add_input(desc, tensor_names_) - tf.add_input(desc, data_) - if T !== nothing - desc["T"] = map(Base.identity, T) - end + function save_graph(filename_, tensor_names_, data_; name=nothing, T=nothing) + local desc + tf.with_op_name(name, "Save") do + desc = tf.NodeDescription("Save") + filename_ = convert(Tensor{String}, filename_) + tensor_names_ = convert(Tensor{String}, tensor_names_) + data_ = [convert(Tensor{Any}, x) for x = data_] + tf.add_input(desc, filename_) + tf.add_input(desc, tensor_names_) + tf.add_input(desc, data_) + if T !== nothing + desc["T"] = map(Base.identity, T) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function save_eager(filename_, tensor_names_, data_; name=nothing, T=nothing) desc = tf.EagerOp("Save") filename_ = convert(tf.EagerTensor, filename_) @@ -39665,13 +39665,13 @@ begin return res[1] end end - function save(filename_, tensor_names_, data_; name=nothing, T=nothing) - if tf.in_eager_mode() - save_eager(filename_, tensor_names_, data_; name=name, T=T) - else - save_graph(filename_, tensor_names_, data_; name=name, T=T) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function save(filename_, tensor_names_, data_; name=nothing, T=nothing) + if tf.in_eager_mode() + save_eager(filename_, tensor_names_, data_; name=name, T=T) + else + save_graph(filename_, tensor_names_, data_; name=name, T=T) + end end - end end @@ -39681,30 +39681,30 @@ end Retrieve embedding parameters for a single table. """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function retrieve_tpu_embedding_centered_rms_prop_parameters_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - local desc - tf.with_op_name(name, "RetrieveTPUEmbeddingCenteredRMSPropParameters") do - desc = tf.NodeDescription("RetrieveTPUEmbeddingCenteredRMSPropParameters") - if table_id !== nothing - desc["table_id"] = Base.Int(table_id) - end - if table_name !== nothing - desc["table_name"] = Base.String(table_name) - end - if num_shards !== nothing - desc["num_shards"] = Base.Int(num_shards) - end - if shard_id !== nothing - desc["shard_id"] = Base.Int(shard_id) - end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:4 - push!(out, tf.Tensor(op, out_idx)) - end - out + function retrieve_tpu_embedding_centered_rms_prop_parameters_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + local desc + tf.with_op_name(name, "RetrieveTPUEmbeddingCenteredRMSPropParameters") do + desc = tf.NodeDescription("RetrieveTPUEmbeddingCenteredRMSPropParameters") + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:4 + push!(out, tf.Tensor(op, out_idx)) + end + out + end function retrieve_tpu_embedding_centered_rms_prop_parameters_eager(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) desc = tf.EagerOp("RetrieveTPUEmbeddingCenteredRMSPropParameters") if table_id !== nothing @@ -39726,13 +39726,13 @@ begin return res end end - function retrieve_tpu_embedding_centered_rms_prop_parameters(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - if tf.in_eager_mode() - retrieve_tpu_embedding_centered_rms_prop_parameters_eager(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) - else - retrieve_tpu_embedding_centered_rms_prop_parameters_graph(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function retrieve_tpu_embedding_centered_rms_prop_parameters(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + if tf.in_eager_mode() + retrieve_tpu_embedding_centered_rms_prop_parameters_eager(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + else + retrieve_tpu_embedding_centered_rms_prop_parameters_graph(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + end end - end end @@ -39742,30 +39742,30 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function quantized_concat_graph(concat_dim_, values_, input_mins_, input_maxes_; name=nothing, N=nothing) - local desc - tf.with_op_name(name, "QuantizedConcat") do - desc = tf.NodeDescription("QuantizedConcat") - concat_dim_ = convert(Tensor{Int32}, concat_dim_) - values_ = [convert(Tensor{Any}, x) for x = values_] - input_mins_ = [convert(Tensor{Float32}, x) for x = input_mins_] - input_maxes_ = [convert(Tensor{Float32}, x) for x = input_maxes_] - (values_,) = tf.tf_promote(values_) - tf.add_input(desc, concat_dim_) - tf.add_input(desc, values_) - tf.add_input(desc, input_mins_) - tf.add_input(desc, input_maxes_) - if N !== nothing - desc["N"] = Base.Int(N) - end + function quantized_concat_graph(concat_dim_, values_, input_mins_, input_maxes_; name=nothing, N=nothing) + local desc + tf.with_op_name(name, "QuantizedConcat") do + desc = tf.NodeDescription("QuantizedConcat") + concat_dim_ = convert(Tensor{Int32}, concat_dim_) + values_ = [convert(Tensor{Any}, x) for x = values_] + input_mins_ = [convert(Tensor{Float32}, x) for x = input_mins_] + input_maxes_ = [convert(Tensor{Float32}, x) for x = input_maxes_] + (values_,) = tf.tf_promote(values_) + tf.add_input(desc, concat_dim_) + tf.add_input(desc, values_) + tf.add_input(desc, input_mins_) + tf.add_input(desc, input_maxes_) + if N !== nothing + desc["N"] = Base.Int(N) end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:3 - push!(out, tf.Tensor(op, out_idx)) - end - out end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end function quantized_concat_eager(concat_dim_, values_, input_mins_, input_maxes_; name=nothing, N=nothing) desc = tf.EagerOp("QuantizedConcat") concat_dim_ = convert(tf.EagerTensor, concat_dim_) @@ -39787,13 +39787,13 @@ begin return res end end - function quantized_concat(concat_dim_, values_, input_mins_, input_maxes_; name=nothing, N=nothing) - if tf.in_eager_mode() - quantized_concat_eager(concat_dim_, values_, input_mins_, input_maxes_; name=name, N=N) - else - quantized_concat_graph(concat_dim_, values_, input_mins_, input_maxes_; name=name, N=N) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function quantized_concat(concat_dim_, values_, input_mins_, input_maxes_; name=nothing, N=nothing) + if tf.in_eager_mode() + quantized_concat_eager(concat_dim_, values_, input_mins_, input_maxes_; name=name, N=N) + else + quantized_concat_graph(concat_dim_, values_, input_mins_, input_maxes_; name=name, N=N) + end end - end end @@ -39803,16 +39803,16 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function zeros_like_graph(x_; name=nothing) - local desc - tf.with_op_name(name, "ZerosLike") do - desc = tf.NodeDescription("ZerosLike") - x_ = convert(Tensor{Any}, x_) - (x_,) = tf.tf_promote(x_) - tf.add_input(desc, x_) - end - tf.Tensor(tf.Operation(desc)) + function zeros_like_graph(x_; name=nothing) + local desc + tf.with_op_name(name, "ZerosLike") do + desc = tf.NodeDescription("ZerosLike") + x_ = convert(Tensor{Any}, x_) + (x_,) = tf.tf_promote(x_) + tf.add_input(desc, x_) end + tf.Tensor(tf.Operation(desc)) + end function zeros_like_eager(x_; name=nothing) desc = tf.EagerOp("ZerosLike") x_ = convert(tf.EagerTensor, x_) @@ -39825,13 +39825,13 @@ begin return res[1] end end - function zeros_like(x_; name=nothing) - if tf.in_eager_mode() - zeros_like_eager(x_; name=name) - else - zeros_like_graph(x_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function zeros_like(x_; name=nothing) + if tf.in_eager_mode() + zeros_like_eager(x_; name=name) + else + zeros_like_graph(x_; name=name) + end end - end end @@ -39841,39 +39841,39 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function fractional_avg_pool_graph(value_; name=nothing, pooling_ratio=nothing, pseudo_random=nothing, overlapping=nothing, deterministic=nothing, seed=nothing, seed2=nothing) - local desc - tf.with_op_name(name, "FractionalAvgPool") do - desc = tf.NodeDescription("FractionalAvgPool") - value_ = convert(Tensor{Any}, value_) - (value_,) = tf.tf_promote(value_) - tf.add_input(desc, value_) - if pooling_ratio !== nothing - desc["pooling_ratio"] = map(Base.identity, pooling_ratio) - end - if pseudo_random !== nothing - desc["pseudo_random"] = Base.Bool(pseudo_random) - end - if overlapping !== nothing - desc["overlapping"] = Base.Bool(overlapping) - end - if deterministic !== nothing - desc["deterministic"] = Base.Bool(deterministic) - end - if seed !== nothing - desc["seed"] = Base.Int(seed) - end - if seed2 !== nothing - desc["seed2"] = Base.Int(seed2) - end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:3 - push!(out, tf.Tensor(op, out_idx)) - end - out + function fractional_avg_pool_graph(value_; name=nothing, pooling_ratio=nothing, pseudo_random=nothing, overlapping=nothing, deterministic=nothing, seed=nothing, seed2=nothing) + local desc + tf.with_op_name(name, "FractionalAvgPool") do + desc = tf.NodeDescription("FractionalAvgPool") + value_ = convert(Tensor{Any}, value_) + (value_,) = tf.tf_promote(value_) + tf.add_input(desc, value_) + if pooling_ratio !== nothing + desc["pooling_ratio"] = map(Base.identity, pooling_ratio) + end + if pseudo_random !== nothing + desc["pseudo_random"] = Base.Bool(pseudo_random) + end + if overlapping !== nothing + desc["overlapping"] = Base.Bool(overlapping) + end + if deterministic !== nothing + desc["deterministic"] = Base.Bool(deterministic) + end + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) end + out + end function fractional_avg_pool_eager(value_; name=nothing, pooling_ratio=nothing, pseudo_random=nothing, overlapping=nothing, deterministic=nothing, seed=nothing, seed2=nothing) desc = tf.EagerOp("FractionalAvgPool") value_ = convert(tf.EagerTensor, value_) @@ -39904,13 +39904,13 @@ begin return res end end - function fractional_avg_pool(value_; name=nothing, pooling_ratio=nothing, pseudo_random=nothing, overlapping=nothing, deterministic=nothing, seed=nothing, seed2=nothing) - if tf.in_eager_mode() - fractional_avg_pool_eager(value_; name=name, pooling_ratio=pooling_ratio, pseudo_random=pseudo_random, overlapping=overlapping, deterministic=deterministic, seed=seed, seed2=seed2) - else - fractional_avg_pool_graph(value_; name=name, pooling_ratio=pooling_ratio, pseudo_random=pseudo_random, overlapping=overlapping, deterministic=deterministic, seed=seed, seed2=seed2) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function fractional_avg_pool(value_; name=nothing, pooling_ratio=nothing, pseudo_random=nothing, overlapping=nothing, deterministic=nothing, seed=nothing, seed2=nothing) + if tf.in_eager_mode() + fractional_avg_pool_eager(value_; name=name, pooling_ratio=pooling_ratio, pseudo_random=pseudo_random, overlapping=overlapping, deterministic=deterministic, seed=seed, seed2=seed2) + else + fractional_avg_pool_graph(value_; name=name, pooling_ratio=pooling_ratio, pseudo_random=pseudo_random, overlapping=overlapping, deterministic=deterministic, seed=seed, seed2=seed2) + end end - end end @@ -39920,29 +39920,29 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function edit_distance_graph(hypothesis_indices_, hypothesis_values_, hypothesis_shape_, truth_indices_, truth_values_, truth_shape_; name=nothing, normalize=nothing) - local desc - tf.with_op_name(name, "EditDistance") do - desc = tf.NodeDescription("EditDistance") - hypothesis_indices_ = convert(Tensor{Int64}, hypothesis_indices_) - hypothesis_values_ = convert(Tensor{Any}, hypothesis_values_) - hypothesis_shape_ = convert(Tensor{Int64}, hypothesis_shape_) - truth_indices_ = convert(Tensor{Int64}, truth_indices_) - truth_values_ = convert(Tensor{Any}, truth_values_) - truth_shape_ = convert(Tensor{Int64}, truth_shape_) - (hypothesis_values_, truth_values_) = tf.tf_promote(hypothesis_values_, truth_values_) - tf.add_input(desc, hypothesis_indices_) - tf.add_input(desc, hypothesis_values_) - tf.add_input(desc, hypothesis_shape_) - tf.add_input(desc, truth_indices_) - tf.add_input(desc, truth_values_) - tf.add_input(desc, truth_shape_) - if normalize !== nothing - desc["normalize"] = Base.Bool(normalize) - end + function edit_distance_graph(hypothesis_indices_, hypothesis_values_, hypothesis_shape_, truth_indices_, truth_values_, truth_shape_; name=nothing, normalize=nothing) + local desc + tf.with_op_name(name, "EditDistance") do + desc = tf.NodeDescription("EditDistance") + hypothesis_indices_ = convert(Tensor{Int64}, hypothesis_indices_) + hypothesis_values_ = convert(Tensor{Any}, hypothesis_values_) + hypothesis_shape_ = convert(Tensor{Int64}, hypothesis_shape_) + truth_indices_ = convert(Tensor{Int64}, truth_indices_) + truth_values_ = convert(Tensor{Any}, truth_values_) + truth_shape_ = convert(Tensor{Int64}, truth_shape_) + (hypothesis_values_, truth_values_) = tf.tf_promote(hypothesis_values_, truth_values_) + tf.add_input(desc, hypothesis_indices_) + tf.add_input(desc, hypothesis_values_) + tf.add_input(desc, hypothesis_shape_) + tf.add_input(desc, truth_indices_) + tf.add_input(desc, truth_values_) + tf.add_input(desc, truth_shape_) + if normalize !== nothing + desc["normalize"] = Base.Bool(normalize) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function edit_distance_eager(hypothesis_indices_, hypothesis_values_, hypothesis_shape_, truth_indices_, truth_values_, truth_shape_; name=nothing, normalize=nothing) desc = tf.EagerOp("EditDistance") hypothesis_indices_ = convert(tf.EagerTensor, hypothesis_indices_) @@ -39969,13 +39969,13 @@ begin return res[1] end end - function edit_distance(hypothesis_indices_, hypothesis_values_, hypothesis_shape_, truth_indices_, truth_values_, truth_shape_; name=nothing, normalize=nothing) - if tf.in_eager_mode() - edit_distance_eager(hypothesis_indices_, hypothesis_values_, hypothesis_shape_, truth_indices_, truth_values_, truth_shape_; name=name, normalize=normalize) - else - edit_distance_graph(hypothesis_indices_, hypothesis_values_, hypothesis_shape_, truth_indices_, truth_values_, truth_shape_; name=name, normalize=normalize) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function edit_distance(hypothesis_indices_, hypothesis_values_, hypothesis_shape_, truth_indices_, truth_values_, truth_shape_; name=nothing, normalize=nothing) + if tf.in_eager_mode() + edit_distance_eager(hypothesis_indices_, hypothesis_values_, hypothesis_shape_, truth_indices_, truth_values_, truth_shape_; name=name, normalize=normalize) + else + edit_distance_graph(hypothesis_indices_, hypothesis_values_, hypothesis_shape_, truth_indices_, truth_values_, truth_shape_; name=name, normalize=normalize) + end end - end end @@ -39985,27 +39985,27 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function unique_v2_graph(x_, axis_; name=nothing, out_idx=nothing) - local desc - tf.with_op_name(name, "UniqueV2") do - desc = tf.NodeDescription("UniqueV2") - x_ = convert(Tensor{Any}, x_) - axis_ = convert(Tensor{Int64}, axis_) - (x_,) = tf.tf_promote(x_) - (axis_,) = tf.tf_promote(axis_) - tf.add_input(desc, x_) - tf.add_input(desc, axis_) - if out_idx !== nothing - desc["out_idx"] = Base.identity(out_idx) - end + function unique_v2_graph(x_, axis_; name=nothing, out_idx=nothing) + local desc + tf.with_op_name(name, "UniqueV2") do + desc = tf.NodeDescription("UniqueV2") + x_ = convert(Tensor{Any}, x_) + axis_ = convert(Tensor{Int64}, axis_) + (x_,) = tf.tf_promote(x_) + (axis_,) = tf.tf_promote(axis_) + tf.add_input(desc, x_) + tf.add_input(desc, axis_) + if out_idx !== nothing + desc["out_idx"] = Base.identity(out_idx) end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:2 - push!(out, tf.Tensor(op, out_idx)) - end - out end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end function unique_v2_eager(x_, axis_; name=nothing, out_idx=nothing) desc = tf.EagerOp("UniqueV2") x_ = convert(tf.EagerTensor, x_) @@ -40024,48 +40024,48 @@ begin return res end end - function unique_v2(x_, axis_; name=nothing, out_idx=nothing) - if tf.in_eager_mode() - unique_v2_eager(x_, axis_; name=name, out_idx=out_idx) - else - unique_v2_graph(x_, axis_; name=name, out_idx=out_idx) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function unique_v2(x_, axis_; name=nothing, out_idx=nothing) + if tf.in_eager_mode() + unique_v2_eager(x_, axis_; name=name, out_idx=out_idx) + else + unique_v2_graph(x_, axis_; name=name, out_idx=out_idx) + end end - end end """ - quantize_and_dequantize_v2(input, input_min, input_max; signed_input=true, num_bits=8, range_given=false, round_mode=HALF_TO_EVEN) + quantize_and_dequantize_v2(input, input_min, input_max; signed_input=true, num_bits=8, range_given=false, round_mode=) """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function quantize_and_dequantize_v2_graph(input_, input_min_, input_max_; name=nothing, signed_input=nothing, num_bits=nothing, range_given=nothing, round_mode=nothing) - local desc - tf.with_op_name(name, "QuantizeAndDequantizeV2") do - desc = tf.NodeDescription("QuantizeAndDequantizeV2") - input_ = convert(Tensor{Any}, input_) - input_min_ = convert(Tensor{Any}, input_min_) - input_max_ = convert(Tensor{Any}, input_max_) - (input_, input_min_, input_max_) = tf.tf_promote(input_, input_min_, input_max_) - tf.add_input(desc, input_) - tf.add_input(desc, input_min_) - tf.add_input(desc, input_max_) - if signed_input !== nothing - desc["signed_input"] = Base.Bool(signed_input) - end - if num_bits !== nothing - desc["num_bits"] = Base.Int(num_bits) - end - if range_given !== nothing - desc["range_given"] = Base.Bool(range_given) - end - if round_mode !== nothing - desc["round_mode"] = Base.String(round_mode) - end + function quantize_and_dequantize_v2_graph(input_, input_min_, input_max_; name=nothing, signed_input=nothing, num_bits=nothing, range_given=nothing, round_mode=nothing) + local desc + tf.with_op_name(name, "QuantizeAndDequantizeV2") do + desc = tf.NodeDescription("QuantizeAndDequantizeV2") + input_ = convert(Tensor{Any}, input_) + input_min_ = convert(Tensor{Any}, input_min_) + input_max_ = convert(Tensor{Any}, input_max_) + (input_, input_min_, input_max_) = tf.tf_promote(input_, input_min_, input_max_) + tf.add_input(desc, input_) + tf.add_input(desc, input_min_) + tf.add_input(desc, input_max_) + if signed_input !== nothing + desc["signed_input"] = Base.Bool(signed_input) + end + if num_bits !== nothing + desc["num_bits"] = Base.Int(num_bits) + end + if range_given !== nothing + desc["range_given"] = Base.Bool(range_given) + end + if round_mode !== nothing + desc["round_mode"] = Base.String(round_mode) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function quantize_and_dequantize_v2_eager(input_, input_min_, input_max_; name=nothing, signed_input=nothing, num_bits=nothing, range_given=nothing, round_mode=nothing) desc = tf.EagerOp("QuantizeAndDequantizeV2") input_ = convert(tf.EagerTensor, input_) @@ -40096,13 +40096,13 @@ begin return res[1] end end - function quantize_and_dequantize_v2(input_, input_min_, input_max_; name=nothing, signed_input=nothing, num_bits=nothing, range_given=nothing, round_mode=nothing) - if tf.in_eager_mode() - quantize_and_dequantize_v2_eager(input_, input_min_, input_max_; name=name, signed_input=signed_input, num_bits=num_bits, range_given=range_given, round_mode=round_mode) - else - quantize_and_dequantize_v2_graph(input_, input_min_, input_max_; name=name, signed_input=signed_input, num_bits=num_bits, range_given=range_given, round_mode=round_mode) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function quantize_and_dequantize_v2(input_, input_min_, input_max_; name=nothing, signed_input=nothing, num_bits=nothing, range_given=nothing, round_mode=nothing) + if tf.in_eager_mode() + quantize_and_dequantize_v2_eager(input_, input_min_, input_max_; name=name, signed_input=signed_input, num_bits=num_bits, range_given=range_given, round_mode=round_mode) + else + quantize_and_dequantize_v2_graph(input_, input_min_, input_max_; name=name, signed_input=signed_input, num_bits=num_bits, range_given=range_given, round_mode=round_mode) + end end - end end @@ -40112,31 +40112,31 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function quantize_and_dequantize_graph(input_; name=nothing, signed_input=nothing, num_bits=nothing, range_given=nothing, input_min=nothing, input_max=nothing) - local desc - tf.with_op_name(name, "QuantizeAndDequantize") do - desc = tf.NodeDescription("QuantizeAndDequantize") - input_ = convert(Tensor{Any}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - if signed_input !== nothing - desc["signed_input"] = Base.Bool(signed_input) - end - if num_bits !== nothing - desc["num_bits"] = Base.Int(num_bits) - end - if range_given !== nothing - desc["range_given"] = Base.Bool(range_given) - end - if input_min !== nothing - desc["input_min"] = Base.identity(input_min) - end - if input_max !== nothing - desc["input_max"] = Base.identity(input_max) - end - end - tf.Tensor(tf.Operation(desc)) + function quantize_and_dequantize_graph(input_; name=nothing, signed_input=nothing, num_bits=nothing, range_given=nothing, input_min=nothing, input_max=nothing) + local desc + tf.with_op_name(name, "QuantizeAndDequantize") do + desc = tf.NodeDescription("QuantizeAndDequantize") + input_ = convert(Tensor{Any}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + if signed_input !== nothing + desc["signed_input"] = Base.Bool(signed_input) + end + if num_bits !== nothing + desc["num_bits"] = Base.Int(num_bits) + end + if range_given !== nothing + desc["range_given"] = Base.Bool(range_given) + end + if input_min !== nothing + desc["input_min"] = Base.identity(input_min) + end + if input_max !== nothing + desc["input_max"] = Base.identity(input_max) + end end + tf.Tensor(tf.Operation(desc)) + end function quantize_and_dequantize_eager(input_; name=nothing, signed_input=nothing, num_bits=nothing, range_given=nothing, input_min=nothing, input_max=nothing) desc = tf.EagerOp("QuantizeAndDequantize") input_ = convert(tf.EagerTensor, input_) @@ -40164,13 +40164,13 @@ begin return res[1] end end - function quantize_and_dequantize(input_; name=nothing, signed_input=nothing, num_bits=nothing, range_given=nothing, input_min=nothing, input_max=nothing) - if tf.in_eager_mode() - quantize_and_dequantize_eager(input_; name=name, signed_input=signed_input, num_bits=num_bits, range_given=range_given, input_min=input_min, input_max=input_max) - else - quantize_and_dequantize_graph(input_; name=name, signed_input=signed_input, num_bits=num_bits, range_given=range_given, input_min=input_min, input_max=input_max) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function quantize_and_dequantize(input_; name=nothing, signed_input=nothing, num_bits=nothing, range_given=nothing, input_min=nothing, input_max=nothing) + if tf.in_eager_mode() + quantize_and_dequantize_eager(input_; name=name, signed_input=signed_input, num_bits=num_bits, range_given=range_given, input_min=input_min, input_max=input_max) + else + quantize_and_dequantize_graph(input_; name=name, signed_input=signed_input, num_bits=num_bits, range_given=range_given, input_min=input_min, input_max=input_max) + end end - end end @@ -40180,23 +40180,23 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tensor_list_pop_back_graph(input_handle_; name=nothing, element_dtype=nothing) - local desc - tf.with_op_name(name, "TensorListPopBack") do - desc = tf.NodeDescription("TensorListPopBack") - input_handle_ = convert(Tensor{Any}, input_handle_) - tf.add_input(desc, input_handle_) - if element_dtype !== nothing - desc["element_dtype"] = Base.identity(element_dtype) - end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:2 - push!(out, tf.Tensor(op, out_idx)) + function tensor_list_pop_back_graph(input_handle_; name=nothing, element_dtype=nothing) + local desc + tf.with_op_name(name, "TensorListPopBack") do + desc = tf.NodeDescription("TensorListPopBack") + input_handle_ = convert(Tensor{Any}, input_handle_) + tf.add_input(desc, input_handle_) + if element_dtype !== nothing + desc["element_dtype"] = Base.identity(element_dtype) end - out end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end function tensor_list_pop_back_eager(input_handle_; name=nothing, element_dtype=nothing) desc = tf.EagerOp("TensorListPopBack") input_handle_ = convert(tf.EagerTensor, input_handle_) @@ -40211,13 +40211,13 @@ begin return res end end - function tensor_list_pop_back(input_handle_; name=nothing, element_dtype=nothing) - if tf.in_eager_mode() - tensor_list_pop_back_eager(input_handle_; name=name, element_dtype=element_dtype) - else - tensor_list_pop_back_graph(input_handle_; name=name, element_dtype=element_dtype) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_list_pop_back(input_handle_; name=nothing, element_dtype=nothing) + if tf.in_eager_mode() + tensor_list_pop_back_eager(input_handle_; name=name, element_dtype=element_dtype) + else + tensor_list_pop_back_graph(input_handle_; name=name, element_dtype=element_dtype) + end end - end end @@ -40227,28 +40227,28 @@ end Debug NaN Value Counter Op """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function debug_nan_count_graph(input_; name=nothing, device_name=nothing, tensor_name=nothing, debug_urls=nothing, gated_grpc=nothing) - local desc - tf.with_op_name(name, "DebugNanCount") do - desc = tf.NodeDescription("DebugNanCount") - input_ = convert(Tensor{Any}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - if device_name !== nothing - desc["device_name"] = Base.String(device_name) - end - if tensor_name !== nothing - desc["tensor_name"] = Base.String(tensor_name) - end - if debug_urls !== nothing - desc["debug_urls"] = map(Base.identity, debug_urls) - end - if gated_grpc !== nothing - desc["gated_grpc"] = Base.Bool(gated_grpc) - end - end - tf.Tensor(tf.Operation(desc)) + function debug_nan_count_graph(input_; name=nothing, device_name=nothing, tensor_name=nothing, debug_urls=nothing, gated_grpc=nothing) + local desc + tf.with_op_name(name, "DebugNanCount") do + desc = tf.NodeDescription("DebugNanCount") + input_ = convert(Tensor{Any}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + if device_name !== nothing + desc["device_name"] = Base.String(device_name) + end + if tensor_name !== nothing + desc["tensor_name"] = Base.String(tensor_name) + end + if debug_urls !== nothing + desc["debug_urls"] = map(Base.identity, debug_urls) + end + if gated_grpc !== nothing + desc["gated_grpc"] = Base.Bool(gated_grpc) + end end + tf.Tensor(tf.Operation(desc)) + end function debug_nan_count_eager(input_; name=nothing, device_name=nothing, tensor_name=nothing, debug_urls=nothing, gated_grpc=nothing) desc = tf.EagerOp("DebugNanCount") input_ = convert(tf.EagerTensor, input_) @@ -40273,13 +40273,13 @@ begin return res[1] end end - function debug_nan_count(input_; name=nothing, device_name=nothing, tensor_name=nothing, debug_urls=nothing, gated_grpc=nothing) - if tf.in_eager_mode() - debug_nan_count_eager(input_; name=name, device_name=device_name, tensor_name=tensor_name, debug_urls=debug_urls, gated_grpc=gated_grpc) - else - debug_nan_count_graph(input_; name=name, device_name=device_name, tensor_name=tensor_name, debug_urls=debug_urls, gated_grpc=gated_grpc) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function debug_nan_count(input_; name=nothing, device_name=nothing, tensor_name=nothing, debug_urls=nothing, gated_grpc=nothing) + if tf.in_eager_mode() + debug_nan_count_eager(input_; name=name, device_name=device_name, tensor_name=tensor_name, debug_urls=debug_urls, gated_grpc=gated_grpc) + else + debug_nan_count_graph(input_; name=name, device_name=device_name, tensor_name=tensor_name, debug_urls=debug_urls, gated_grpc=gated_grpc) + end end - end end @@ -40289,33 +40289,33 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function apply_adagrad_da_graph(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, lr_, l1_, l2_, global_step_; name=nothing, use_locking=nothing) - local desc - tf.with_op_name(name, "ApplyAdagradDA") do - desc = tf.NodeDescription("ApplyAdagradDA") - var_ = convert(Tensor{Any}, var_) - gradient_accumulator_ = convert(Tensor{Any}, gradient_accumulator_) - gradient_squared_accumulator_ = convert(Tensor{Any}, gradient_squared_accumulator_) - grad_ = convert(Tensor{Any}, grad_) - lr_ = convert(Tensor{Any}, lr_) - l1_ = convert(Tensor{Any}, l1_) - l2_ = convert(Tensor{Any}, l2_) - global_step_ = convert(Tensor{Int64}, global_step_) - (var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, lr_, l1_, l2_) = tf.tf_promote(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, lr_, l1_, l2_) - tf.add_input(desc, var_) - tf.add_input(desc, gradient_accumulator_) - tf.add_input(desc, gradient_squared_accumulator_) - tf.add_input(desc, grad_) - tf.add_input(desc, lr_) - tf.add_input(desc, l1_) - tf.add_input(desc, l2_) - tf.add_input(desc, global_step_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - end - tf.Tensor(tf.Operation(desc)) + function apply_adagrad_da_graph(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, lr_, l1_, l2_, global_step_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "ApplyAdagradDA") do + desc = tf.NodeDescription("ApplyAdagradDA") + var_ = convert(Tensor{Any}, var_) + gradient_accumulator_ = convert(Tensor{Any}, gradient_accumulator_) + gradient_squared_accumulator_ = convert(Tensor{Any}, gradient_squared_accumulator_) + grad_ = convert(Tensor{Any}, grad_) + lr_ = convert(Tensor{Any}, lr_) + l1_ = convert(Tensor{Any}, l1_) + l2_ = convert(Tensor{Any}, l2_) + global_step_ = convert(Tensor{Int64}, global_step_) + (var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, lr_, l1_, l2_) = tf.tf_promote(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, lr_, l1_, l2_) + tf.add_input(desc, var_) + tf.add_input(desc, gradient_accumulator_) + tf.add_input(desc, gradient_squared_accumulator_) + tf.add_input(desc, grad_) + tf.add_input(desc, lr_) + tf.add_input(desc, l1_) + tf.add_input(desc, l2_) + tf.add_input(desc, global_step_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end end + tf.Tensor(tf.Operation(desc)) + end function apply_adagrad_da_eager(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, lr_, l1_, l2_, global_step_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ApplyAdagradDA") var_ = convert(tf.EagerTensor, var_) @@ -40351,46 +40351,46 @@ begin return res[1] end end - function apply_adagrad_da(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, lr_, l1_, l2_, global_step_; name=nothing, use_locking=nothing) - if tf.in_eager_mode() - apply_adagrad_da_eager(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, lr_, l1_, l2_, global_step_; name=name, use_locking=use_locking) - else - apply_adagrad_da_graph(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, lr_, l1_, l2_, global_step_; name=name, use_locking=use_locking) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function apply_adagrad_da(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, lr_, l1_, l2_, global_step_; name=nothing, use_locking=nothing) + if tf.in_eager_mode() + apply_adagrad_da_eager(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, lr_, l1_, l2_, global_step_; name=name, use_locking=use_locking) + else + apply_adagrad_da_graph(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, lr_, l1_, l2_, global_step_; name=name, use_locking=use_locking) + end end - end end """ - depthwise_conv2d_native(input, filter; data_format=NHWC, dilations=[1, 1, 1, 1]) + depthwise_conv2d_native(input, filter; data_format=, dilations=[1, 1, 1, 1]) """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function depthwise_conv2d_native_graph(input_, filter_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) - local desc - tf.with_op_name(name, "DepthwiseConv2dNative") do - desc = tf.NodeDescription("DepthwiseConv2dNative") - input_ = convert(Tensor{Any}, input_) - filter_ = convert(Tensor{Any}, filter_) - (input_, filter_) = tf.tf_promote(input_, filter_) - tf.add_input(desc, input_) - tf.add_input(desc, filter_) - if strides !== nothing - desc["strides"] = map(Base.identity, strides) - end - if padding !== nothing - desc["padding"] = Base.String(padding) - end - if data_format !== nothing - desc["data_format"] = Base.String(data_format) - end - if dilations !== nothing - desc["dilations"] = map(Base.identity, dilations) - end + function depthwise_conv2d_native_graph(input_, filter_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) + local desc + tf.with_op_name(name, "DepthwiseConv2dNative") do + desc = tf.NodeDescription("DepthwiseConv2dNative") + input_ = convert(Tensor{Any}, input_) + filter_ = convert(Tensor{Any}, filter_) + (input_, filter_) = tf.tf_promote(input_, filter_) + tf.add_input(desc, input_) + tf.add_input(desc, filter_) + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + if padding !== nothing + desc["padding"] = Base.String(padding) + end + if data_format !== nothing + desc["data_format"] = Base.String(data_format) + end + if dilations !== nothing + desc["dilations"] = map(Base.identity, dilations) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function depthwise_conv2d_native_eager(input_, filter_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) desc = tf.EagerOp("DepthwiseConv2dNative") input_ = convert(tf.EagerTensor, input_) @@ -40418,13 +40418,13 @@ begin return res[1] end end - function depthwise_conv2d_native(input_, filter_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) - if tf.in_eager_mode() - depthwise_conv2d_native_eager(input_, filter_; name=name, strides=strides, padding=padding, data_format=data_format, dilations=dilations) - else - depthwise_conv2d_native_graph(input_, filter_; name=name, strides=strides, padding=padding, data_format=data_format, dilations=dilations) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function depthwise_conv2d_native(input_, filter_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) + if tf.in_eager_mode() + depthwise_conv2d_native_eager(input_, filter_; name=name, strides=strides, padding=padding, data_format=data_format, dilations=dilations) + else + depthwise_conv2d_native_graph(input_, filter_; name=name, strides=strides, padding=padding, data_format=data_format, dilations=dilations) + end end - end end @@ -40434,15 +40434,15 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function serialize_iterator_graph(resource_handle_; name=nothing) - local desc - tf.with_op_name(name, "SerializeIterator") do - desc = tf.NodeDescription("SerializeIterator") - resource_handle_ = convert(Tensor{Any}, resource_handle_) - tf.add_input(desc, resource_handle_) - end - tf.Tensor(tf.Operation(desc)) + function serialize_iterator_graph(resource_handle_; name=nothing) + local desc + tf.with_op_name(name, "SerializeIterator") do + desc = tf.NodeDescription("SerializeIterator") + resource_handle_ = convert(Tensor{Any}, resource_handle_) + tf.add_input(desc, resource_handle_) end + tf.Tensor(tf.Operation(desc)) + end function serialize_iterator_eager(resource_handle_; name=nothing) desc = tf.EagerOp("SerializeIterator") resource_handle_ = convert(tf.EagerTensor, resource_handle_) @@ -40454,13 +40454,13 @@ begin return res[1] end end - function serialize_iterator(resource_handle_; name=nothing) - if tf.in_eager_mode() - serialize_iterator_eager(resource_handle_; name=name) - else - serialize_iterator_graph(resource_handle_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function serialize_iterator(resource_handle_; name=nothing) + if tf.in_eager_mode() + serialize_iterator_eager(resource_handle_; name=name) + else + serialize_iterator_graph(resource_handle_; name=name) + end end - end end @@ -40470,15 +40470,15 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function dataset_to_graph_graph(input_dataset_; name=nothing) - local desc - tf.with_op_name(name, "DatasetToGraph") do - desc = tf.NodeDescription("DatasetToGraph") - input_dataset_ = convert(Tensor{Any}, input_dataset_) - tf.add_input(desc, input_dataset_) - end - tf.Tensor(tf.Operation(desc)) + function dataset_to_graph_graph(input_dataset_; name=nothing) + local desc + tf.with_op_name(name, "DatasetToGraph") do + desc = tf.NodeDescription("DatasetToGraph") + input_dataset_ = convert(Tensor{Any}, input_dataset_) + tf.add_input(desc, input_dataset_) end + tf.Tensor(tf.Operation(desc)) + end function dataset_to_graph_eager(input_dataset_; name=nothing) desc = tf.EagerOp("DatasetToGraph") input_dataset_ = convert(tf.EagerTensor, input_dataset_) @@ -40490,13 +40490,13 @@ begin return res[1] end end - function dataset_to_graph(input_dataset_; name=nothing) - if tf.in_eager_mode() - dataset_to_graph_eager(input_dataset_; name=name) - else - dataset_to_graph_graph(input_dataset_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function dataset_to_graph(input_dataset_; name=nothing) + if tf.in_eager_mode() + dataset_to_graph_eager(input_dataset_; name=name) + else + dataset_to_graph_graph(input_dataset_; name=name) + end end - end end @@ -40506,27 +40506,27 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function top_k_graph(input_; name=nothing, k=nothing, sorted=nothing) - local desc - tf.with_op_name(name, "TopK") do - desc = tf.NodeDescription("TopK") - input_ = convert(Tensor{Any}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - if k !== nothing - desc["k"] = Base.Int(k) - end - if sorted !== nothing - desc["sorted"] = Base.Bool(sorted) - end + function top_k_graph(input_; name=nothing, k=nothing, sorted=nothing) + local desc + tf.with_op_name(name, "TopK") do + desc = tf.NodeDescription("TopK") + input_ = convert(Tensor{Any}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + if k !== nothing + desc["k"] = Base.Int(k) end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:2 - push!(out, tf.Tensor(op, out_idx)) + if sorted !== nothing + desc["sorted"] = Base.Bool(sorted) end - out end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end function top_k_eager(input_; name=nothing, k=nothing, sorted=nothing) desc = tf.EagerOp("TopK") input_ = convert(tf.EagerTensor, input_) @@ -40545,13 +40545,13 @@ begin return res end end - function top_k(input_; name=nothing, k=nothing, sorted=nothing) - if tf.in_eager_mode() - top_k_eager(input_; name=name, k=k, sorted=sorted) - else - top_k_graph(input_; name=name, k=k, sorted=sorted) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function top_k(input_; name=nothing, k=nothing, sorted=nothing) + if tf.in_eager_mode() + top_k_eager(input_; name=name, k=k, sorted=sorted) + else + top_k_graph(input_; name=name, k=k, sorted=sorted) + end end - end end @@ -40561,35 +40561,35 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function resource_apply_ftrl_v2_graph(var_, accum_, linear_, grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=nothing, use_locking=nothing) - local desc - tf.with_op_name(name, "ResourceApplyFtrlV2") do - desc = tf.NodeDescription("ResourceApplyFtrlV2") - var_ = convert(Tensor{Any}, var_) - accum_ = convert(Tensor{Any}, accum_) - linear_ = convert(Tensor{Any}, linear_) - grad_ = convert(Tensor{Any}, grad_) - lr_ = convert(Tensor{Any}, lr_) - l1_ = convert(Tensor{Any}, l1_) - l2_ = convert(Tensor{Any}, l2_) - l2_shrinkage_ = convert(Tensor{Any}, l2_shrinkage_) - lr_power_ = convert(Tensor{Any}, lr_power_) - (grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_) = tf.tf_promote(grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_) - tf.add_input(desc, var_) - tf.add_input(desc, accum_) - tf.add_input(desc, linear_) - tf.add_input(desc, grad_) - tf.add_input(desc, lr_) - tf.add_input(desc, l1_) - tf.add_input(desc, l2_) - tf.add_input(desc, l2_shrinkage_) - tf.add_input(desc, lr_power_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - end - tf.Tensor(tf.Operation(desc)) - end + function resource_apply_ftrl_v2_graph(var_, accum_, linear_, grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "ResourceApplyFtrlV2") do + desc = tf.NodeDescription("ResourceApplyFtrlV2") + var_ = convert(Tensor{Any}, var_) + accum_ = convert(Tensor{Any}, accum_) + linear_ = convert(Tensor{Any}, linear_) + grad_ = convert(Tensor{Any}, grad_) + lr_ = convert(Tensor{Any}, lr_) + l1_ = convert(Tensor{Any}, l1_) + l2_ = convert(Tensor{Any}, l2_) + l2_shrinkage_ = convert(Tensor{Any}, l2_shrinkage_) + lr_power_ = convert(Tensor{Any}, lr_power_) + (grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_) = tf.tf_promote(grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_) + tf.add_input(desc, var_) + tf.add_input(desc, accum_) + tf.add_input(desc, linear_) + tf.add_input(desc, grad_) + tf.add_input(desc, lr_) + tf.add_input(desc, l1_) + tf.add_input(desc, l2_) + tf.add_input(desc, l2_shrinkage_) + tf.add_input(desc, lr_power_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + tf.Tensor(tf.Operation(desc)) + end function resource_apply_ftrl_v2_eager(var_, accum_, linear_, grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ResourceApplyFtrlV2") var_ = convert(tf.EagerTensor, var_) @@ -40626,13 +40626,13 @@ begin return res[1] end end - function resource_apply_ftrl_v2(var_, accum_, linear_, grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=nothing, use_locking=nothing) - if tf.in_eager_mode() - resource_apply_ftrl_v2_eager(var_, accum_, linear_, grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=name, use_locking=use_locking) - else - resource_apply_ftrl_v2_graph(var_, accum_, linear_, grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=name, use_locking=use_locking) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_apply_ftrl_v2(var_, accum_, linear_, grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=nothing, use_locking=nothing) + if tf.in_eager_mode() + resource_apply_ftrl_v2_eager(var_, accum_, linear_, grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=name, use_locking=use_locking) + else + resource_apply_ftrl_v2_graph(var_, accum_, linear_, grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=name, use_locking=use_locking) + end end - end end @@ -40642,21 +40642,21 @@ end Replacement node for NcclBroadcast. """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function _nccl_broadcast_recv_graph(shape_; name=nothing, num_devices=nothing, shared_name=nothing) - local desc - tf.with_op_name(name, "_NcclBroadcastRecv") do - desc = tf.NodeDescription("_NcclBroadcastRecv") - shape_ = convert(Tensor{Int32}, shape_) - tf.add_input(desc, shape_) - if num_devices !== nothing - desc["num_devices"] = Base.Int(num_devices) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end + function _nccl_broadcast_recv_graph(shape_; name=nothing, num_devices=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "_NcclBroadcastRecv") do + desc = tf.NodeDescription("_NcclBroadcastRecv") + shape_ = convert(Tensor{Int32}, shape_) + tf.add_input(desc, shape_) + if num_devices !== nothing + desc["num_devices"] = Base.Int(num_devices) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function _nccl_broadcast_recv_eager(shape_; name=nothing, num_devices=nothing, shared_name=nothing) desc = tf.EagerOp("_NcclBroadcastRecv") shape_ = convert(tf.EagerTensor, shape_) @@ -40674,13 +40674,13 @@ begin return res[1] end end - function _nccl_broadcast_recv(shape_; name=nothing, num_devices=nothing, shared_name=nothing) - if tf.in_eager_mode() - _nccl_broadcast_recv_eager(shape_; name=name, num_devices=num_devices, shared_name=shared_name) - else - _nccl_broadcast_recv_graph(shape_; name=name, num_devices=num_devices, shared_name=shared_name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _nccl_broadcast_recv(shape_; name=nothing, num_devices=nothing, shared_name=nothing) + if tf.in_eager_mode() + _nccl_broadcast_recv_eager(shape_; name=name, num_devices=num_devices, shared_name=shared_name) + else + _nccl_broadcast_recv_graph(shape_; name=name, num_devices=num_devices, shared_name=shared_name) + end end - end end @@ -40690,15 +40690,15 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function queue_is_closed_graph(handle_; name=nothing) - local desc - tf.with_op_name(name, "QueueIsClosed") do - desc = tf.NodeDescription("QueueIsClosed") - handle_ = convert(Tensor{String}, handle_) - tf.add_input(desc, handle_) - end - tf.Tensor(tf.Operation(desc)) + function queue_is_closed_graph(handle_; name=nothing) + local desc + tf.with_op_name(name, "QueueIsClosed") do + desc = tf.NodeDescription("QueueIsClosed") + handle_ = convert(Tensor{String}, handle_) + tf.add_input(desc, handle_) end + tf.Tensor(tf.Operation(desc)) + end function queue_is_closed_eager(handle_; name=nothing) desc = tf.EagerOp("QueueIsClosed") handle_ = convert(tf.EagerTensor, handle_) @@ -40710,13 +40710,13 @@ begin return res[1] end end - function queue_is_closed(handle_; name=nothing) - if tf.in_eager_mode() - queue_is_closed_eager(handle_; name=name) - else - queue_is_closed_graph(handle_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function queue_is_closed(handle_; name=nothing) + if tf.in_eager_mode() + queue_is_closed_eager(handle_; name=name) + else + queue_is_closed_graph(handle_; name=name) + end end - end end @@ -40726,30 +40726,30 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function shuffle_dataset_graph(input_dataset_, buffer_size_, seed_, seed2_; name=nothing, reshuffle_each_iteration=nothing, output_types=nothing, output_shapes=nothing) - local desc - tf.with_op_name(name, "ShuffleDataset") do - desc = tf.NodeDescription("ShuffleDataset") - input_dataset_ = convert(Tensor{Any}, input_dataset_) - buffer_size_ = convert(Tensor{Int64}, buffer_size_) - seed_ = convert(Tensor{Int64}, seed_) - seed2_ = convert(Tensor{Int64}, seed2_) - tf.add_input(desc, input_dataset_) - tf.add_input(desc, buffer_size_) - tf.add_input(desc, seed_) - tf.add_input(desc, seed2_) - if reshuffle_each_iteration !== nothing - desc["reshuffle_each_iteration"] = Base.Bool(reshuffle_each_iteration) - end - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - end - tf.Tensor(tf.Operation(desc)) + function shuffle_dataset_graph(input_dataset_, buffer_size_, seed_, seed2_; name=nothing, reshuffle_each_iteration=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "ShuffleDataset") do + desc = tf.NodeDescription("ShuffleDataset") + input_dataset_ = convert(Tensor{Any}, input_dataset_) + buffer_size_ = convert(Tensor{Int64}, buffer_size_) + seed_ = convert(Tensor{Int64}, seed_) + seed2_ = convert(Tensor{Int64}, seed2_) + tf.add_input(desc, input_dataset_) + tf.add_input(desc, buffer_size_) + tf.add_input(desc, seed_) + tf.add_input(desc, seed2_) + if reshuffle_each_iteration !== nothing + desc["reshuffle_each_iteration"] = Base.Bool(reshuffle_each_iteration) + end + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end end + tf.Tensor(tf.Operation(desc)) + end function shuffle_dataset_eager(input_dataset_, buffer_size_, seed_, seed2_; name=nothing, reshuffle_each_iteration=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("ShuffleDataset") input_dataset_ = convert(tf.EagerTensor, input_dataset_) @@ -40776,13 +40776,13 @@ begin return res[1] end end - function shuffle_dataset(input_dataset_, buffer_size_, seed_, seed2_; name=nothing, reshuffle_each_iteration=nothing, output_types=nothing, output_shapes=nothing) - if tf.in_eager_mode() - shuffle_dataset_eager(input_dataset_, buffer_size_, seed_, seed2_; name=name, reshuffle_each_iteration=reshuffle_each_iteration, output_types=output_types, output_shapes=output_shapes) - else - shuffle_dataset_graph(input_dataset_, buffer_size_, seed_, seed2_; name=name, reshuffle_each_iteration=reshuffle_each_iteration, output_types=output_types, output_shapes=output_shapes) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function shuffle_dataset(input_dataset_, buffer_size_, seed_, seed2_; name=nothing, reshuffle_each_iteration=nothing, output_types=nothing, output_shapes=nothing) + if tf.in_eager_mode() + shuffle_dataset_eager(input_dataset_, buffer_size_, seed_, seed2_; name=name, reshuffle_each_iteration=reshuffle_each_iteration, output_types=output_types, output_shapes=output_shapes) + else + shuffle_dataset_graph(input_dataset_, buffer_size_, seed_, seed2_; name=name, reshuffle_each_iteration=reshuffle_each_iteration, output_types=output_types, output_shapes=output_shapes) + end end - end end @@ -40792,24 +40792,24 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function deserialize_sparse_graph(serialized_sparse_; name=nothing, dtype=nothing) - local desc - tf.with_op_name(name, "DeserializeSparse") do - desc = tf.NodeDescription("DeserializeSparse") - serialized_sparse_ = convert(Tensor{String}, serialized_sparse_) - (serialized_sparse_,) = tf.tf_promote(serialized_sparse_) - tf.add_input(desc, serialized_sparse_) - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end + function deserialize_sparse_graph(serialized_sparse_; name=nothing, dtype=nothing) + local desc + tf.with_op_name(name, "DeserializeSparse") do + desc = tf.NodeDescription("DeserializeSparse") + serialized_sparse_ = convert(Tensor{String}, serialized_sparse_) + (serialized_sparse_,) = tf.tf_promote(serialized_sparse_) + tf.add_input(desc, serialized_sparse_) + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:3 - push!(out, tf.Tensor(op, out_idx)) - end - out end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end function deserialize_sparse_eager(serialized_sparse_; name=nothing, dtype=nothing) desc = tf.EagerOp("DeserializeSparse") serialized_sparse_ = convert(tf.EagerTensor, serialized_sparse_) @@ -40825,13 +40825,13 @@ begin return res end end - function deserialize_sparse(serialized_sparse_; name=nothing, dtype=nothing) - if tf.in_eager_mode() - deserialize_sparse_eager(serialized_sparse_; name=name, dtype=dtype) - else - deserialize_sparse_graph(serialized_sparse_; name=name, dtype=dtype) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function deserialize_sparse(serialized_sparse_; name=nothing, dtype=nothing) + if tf.in_eager_mode() + deserialize_sparse_eager(serialized_sparse_; name=name, dtype=dtype) + else + deserialize_sparse_graph(serialized_sparse_; name=name, dtype=dtype) + end end - end end @@ -40841,28 +40841,28 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function priority_queue_v2_graph(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) - local desc - tf.with_op_name(name, "PriorityQueueV2") do - desc = tf.NodeDescription("PriorityQueueV2") - if component_types !== nothing - desc["component_types"] = map(Base.identity, component_types) - end - if shapes !== nothing - desc["shapes"] = map(Base.identity, shapes) - end - if capacity !== nothing - desc["capacity"] = Base.Int(capacity) - end - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end + function priority_queue_v2_graph(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "PriorityQueueV2") do + desc = tf.NodeDescription("PriorityQueueV2") + if component_types !== nothing + desc["component_types"] = map(Base.identity, component_types) + end + if shapes !== nothing + desc["shapes"] = map(Base.identity, shapes) + end + if capacity !== nothing + desc["capacity"] = Base.Int(capacity) + end + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function priority_queue_v2_eager(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) desc = tf.EagerOp("PriorityQueueV2") if component_types !== nothing @@ -40887,13 +40887,13 @@ begin return res[1] end end - function priority_queue_v2(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) - if tf.in_eager_mode() - priority_queue_v2_eager(; name=name, component_types=component_types, shapes=shapes, capacity=capacity, container=container, shared_name=shared_name) - else - priority_queue_v2_graph(; name=name, component_types=component_types, shapes=shapes, capacity=capacity, container=container, shared_name=shared_name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function priority_queue_v2(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) + if tf.in_eager_mode() + priority_queue_v2_eager(; name=name, component_types=component_types, shapes=shapes, capacity=capacity, container=container, shared_name=shared_name) + else + priority_queue_v2_graph(; name=name, component_types=component_types, shapes=shapes, capacity=capacity, container=container, shared_name=shared_name) + end end - end end @@ -40903,16 +40903,16 @@ end A graph node which represents an argument to a function. """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function _device_arg_graph(; name=nothing, index=nothing) - local desc - tf.with_op_name(name, "_DeviceArg") do - desc = tf.NodeDescription("_DeviceArg") - if index !== nothing - desc["index"] = Base.Int(index) - end + function _device_arg_graph(; name=nothing, index=nothing) + local desc + tf.with_op_name(name, "_DeviceArg") do + desc = tf.NodeDescription("_DeviceArg") + if index !== nothing + desc["index"] = Base.Int(index) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function _device_arg_eager(; name=nothing, index=nothing) desc = tf.EagerOp("_DeviceArg") if index !== nothing @@ -40925,13 +40925,13 @@ begin return res[1] end end - function _device_arg(; name=nothing, index=nothing) - if tf.in_eager_mode() - _device_arg_eager(; name=name, index=index) - else - _device_arg_graph(; name=name, index=index) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _device_arg(; name=nothing, index=nothing) + if tf.in_eager_mode() + _device_arg_eager(; name=name, index=index) + else + _device_arg_graph(; name=name, index=index) + end end - end end @@ -40941,25 +40941,25 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function truncated_normal_graph(shape_; name=nothing, seed=nothing, seed2=nothing, dtype=nothing) - local desc - tf.with_op_name(name, "TruncatedNormal") do - desc = tf.NodeDescription("TruncatedNormal") - shape_ = convert(Tensor{Any}, shape_) - (shape_,) = tf.tf_promote(shape_) - tf.add_input(desc, shape_) - if seed !== nothing - desc["seed"] = Base.Int(seed) - end - if seed2 !== nothing - desc["seed2"] = Base.Int(seed2) - end - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end + function truncated_normal_graph(shape_; name=nothing, seed=nothing, seed2=nothing, dtype=nothing) + local desc + tf.with_op_name(name, "TruncatedNormal") do + desc = tf.NodeDescription("TruncatedNormal") + shape_ = convert(Tensor{Any}, shape_) + (shape_,) = tf.tf_promote(shape_) + tf.add_input(desc, shape_) + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function truncated_normal_eager(shape_; name=nothing, seed=nothing, seed2=nothing, dtype=nothing) desc = tf.EagerOp("TruncatedNormal") shape_ = convert(tf.EagerTensor, shape_) @@ -40981,13 +40981,13 @@ begin return res[1] end end - function truncated_normal(shape_; name=nothing, seed=nothing, seed2=nothing, dtype=nothing) - if tf.in_eager_mode() - truncated_normal_eager(shape_; name=name, seed=seed, seed2=seed2, dtype=dtype) - else - truncated_normal_graph(shape_; name=name, seed=seed, seed2=seed2, dtype=dtype) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function truncated_normal(shape_; name=nothing, seed=nothing, seed2=nothing, dtype=nothing) + if tf.in_eager_mode() + truncated_normal_eager(shape_; name=name, seed=seed, seed2=seed2, dtype=dtype) + else + truncated_normal_graph(shape_; name=name, seed=seed, seed2=seed2, dtype=dtype) + end end - end end @@ -40997,20 +40997,20 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tensor_forest_tree_predict_graph(tree_handle_, dense_features_; name=nothing, logits_dimension=nothing) - local desc - tf.with_op_name(name, "TensorForestTreePredict") do - desc = tf.NodeDescription("TensorForestTreePredict") - tree_handle_ = convert(Tensor{Any}, tree_handle_) - dense_features_ = convert(Tensor{Float32}, dense_features_) - tf.add_input(desc, tree_handle_) - tf.add_input(desc, dense_features_) - if logits_dimension !== nothing - desc["logits_dimension"] = Base.Int(logits_dimension) - end + function tensor_forest_tree_predict_graph(tree_handle_, dense_features_; name=nothing, logits_dimension=nothing) + local desc + tf.with_op_name(name, "TensorForestTreePredict") do + desc = tf.NodeDescription("TensorForestTreePredict") + tree_handle_ = convert(Tensor{Any}, tree_handle_) + dense_features_ = convert(Tensor{Float32}, dense_features_) + tf.add_input(desc, tree_handle_) + tf.add_input(desc, dense_features_) + if logits_dimension !== nothing + desc["logits_dimension"] = Base.Int(logits_dimension) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function tensor_forest_tree_predict_eager(tree_handle_, dense_features_; name=nothing, logits_dimension=nothing) desc = tf.EagerOp("TensorForestTreePredict") tree_handle_ = convert(tf.EagerTensor, tree_handle_) @@ -41027,13 +41027,13 @@ begin return res[1] end end - function tensor_forest_tree_predict(tree_handle_, dense_features_; name=nothing, logits_dimension=nothing) - if tf.in_eager_mode() - tensor_forest_tree_predict_eager(tree_handle_, dense_features_; name=name, logits_dimension=logits_dimension) - else - tensor_forest_tree_predict_graph(tree_handle_, dense_features_; name=name, logits_dimension=logits_dimension) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_forest_tree_predict(tree_handle_, dense_features_; name=nothing, logits_dimension=nothing) + if tf.in_eager_mode() + tensor_forest_tree_predict_eager(tree_handle_, dense_features_; name=name, logits_dimension=logits_dimension) + else + tensor_forest_tree_predict_graph(tree_handle_, dense_features_; name=name, logits_dimension=logits_dimension) + end end - end end @@ -41043,21 +41043,21 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function stack_v2_graph(max_size_; name=nothing, elem_type=nothing, stack_name=nothing) - local desc - tf.with_op_name(name, "StackV2") do - desc = tf.NodeDescription("StackV2") - max_size_ = convert(Tensor{Int32}, max_size_) - tf.add_input(desc, max_size_) - if elem_type !== nothing - desc["elem_type"] = Base.identity(elem_type) - end - if stack_name !== nothing - desc["stack_name"] = Base.String(stack_name) - end + function stack_v2_graph(max_size_; name=nothing, elem_type=nothing, stack_name=nothing) + local desc + tf.with_op_name(name, "StackV2") do + desc = tf.NodeDescription("StackV2") + max_size_ = convert(Tensor{Int32}, max_size_) + tf.add_input(desc, max_size_) + if elem_type !== nothing + desc["elem_type"] = Base.identity(elem_type) + end + if stack_name !== nothing + desc["stack_name"] = Base.String(stack_name) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function stack_v2_eager(max_size_; name=nothing, elem_type=nothing, stack_name=nothing) desc = tf.EagerOp("StackV2") max_size_ = convert(tf.EagerTensor, max_size_) @@ -41075,13 +41075,13 @@ begin return res[1] end end - function stack_v2(max_size_; name=nothing, elem_type=nothing, stack_name=nothing) - if tf.in_eager_mode() - stack_v2_eager(max_size_; name=name, elem_type=elem_type, stack_name=stack_name) - else - stack_v2_graph(max_size_; name=name, elem_type=elem_type, stack_name=stack_name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function stack_v2(max_size_; name=nothing, elem_type=nothing, stack_name=nothing) + if tf.in_eager_mode() + stack_v2_eager(max_size_; name=name, elem_type=elem_type, stack_name=stack_name) + else + stack_v2_graph(max_size_; name=name, elem_type=elem_type, stack_name=stack_name) + end end - end end @@ -41091,15 +41091,15 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function accumulator_num_accumulated_graph(handle_; name=nothing) - local desc - tf.with_op_name(name, "AccumulatorNumAccumulated") do - desc = tf.NodeDescription("AccumulatorNumAccumulated") - handle_ = convert(Tensor{String}, handle_) - tf.add_input(desc, handle_) - end - tf.Tensor(tf.Operation(desc)) + function accumulator_num_accumulated_graph(handle_; name=nothing) + local desc + tf.with_op_name(name, "AccumulatorNumAccumulated") do + desc = tf.NodeDescription("AccumulatorNumAccumulated") + handle_ = convert(Tensor{String}, handle_) + tf.add_input(desc, handle_) end + tf.Tensor(tf.Operation(desc)) + end function accumulator_num_accumulated_eager(handle_; name=nothing) desc = tf.EagerOp("AccumulatorNumAccumulated") handle_ = convert(tf.EagerTensor, handle_) @@ -41111,13 +41111,13 @@ begin return res[1] end end - function accumulator_num_accumulated(handle_; name=nothing) - if tf.in_eager_mode() - accumulator_num_accumulated_eager(handle_; name=name) - else - accumulator_num_accumulated_graph(handle_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function accumulator_num_accumulated(handle_; name=nothing) + if tf.in_eager_mode() + accumulator_num_accumulated_eager(handle_; name=name) + else + accumulator_num_accumulated_graph(handle_; name=name) + end end - end end @@ -41127,15 +41127,15 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function reader_reset_v2_graph(reader_handle_; name=nothing) - local desc - tf.with_op_name(name, "ReaderResetV2") do - desc = tf.NodeDescription("ReaderResetV2") - reader_handle_ = convert(Tensor{Any}, reader_handle_) - tf.add_input(desc, reader_handle_) - end - tf.Tensor(tf.Operation(desc)) + function reader_reset_v2_graph(reader_handle_; name=nothing) + local desc + tf.with_op_name(name, "ReaderResetV2") do + desc = tf.NodeDescription("ReaderResetV2") + reader_handle_ = convert(Tensor{Any}, reader_handle_) + tf.add_input(desc, reader_handle_) end + tf.Tensor(tf.Operation(desc)) + end function reader_reset_v2_eager(reader_handle_; name=nothing) desc = tf.EagerOp("ReaderResetV2") reader_handle_ = convert(tf.EagerTensor, reader_handle_) @@ -41147,13 +41147,13 @@ begin return res[1] end end - function reader_reset_v2(reader_handle_; name=nothing) - if tf.in_eager_mode() - reader_reset_v2_eager(reader_handle_; name=name) - else - reader_reset_v2_graph(reader_handle_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function reader_reset_v2(reader_handle_; name=nothing) + if tf.in_eager_mode() + reader_reset_v2_eager(reader_handle_; name=name) + else + reader_reset_v2_graph(reader_handle_; name=name) + end end - end end @@ -41163,31 +41163,31 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function apply_add_sign_graph(var_, m_, lr_, alpha_, sign_decay_, beta_, grad_; name=nothing, use_locking=nothing) - local desc - tf.with_op_name(name, "ApplyAddSign") do - desc = tf.NodeDescription("ApplyAddSign") - var_ = convert(Tensor{Any}, var_) - m_ = convert(Tensor{Any}, m_) - lr_ = convert(Tensor{Any}, lr_) - alpha_ = convert(Tensor{Any}, alpha_) - sign_decay_ = convert(Tensor{Any}, sign_decay_) - beta_ = convert(Tensor{Any}, beta_) - grad_ = convert(Tensor{Any}, grad_) - (var_, m_, lr_, alpha_, sign_decay_, beta_, grad_) = tf.tf_promote(var_, m_, lr_, alpha_, sign_decay_, beta_, grad_) - tf.add_input(desc, var_) - tf.add_input(desc, m_) - tf.add_input(desc, lr_) - tf.add_input(desc, alpha_) - tf.add_input(desc, sign_decay_) - tf.add_input(desc, beta_) - tf.add_input(desc, grad_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - end - tf.Tensor(tf.Operation(desc)) + function apply_add_sign_graph(var_, m_, lr_, alpha_, sign_decay_, beta_, grad_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "ApplyAddSign") do + desc = tf.NodeDescription("ApplyAddSign") + var_ = convert(Tensor{Any}, var_) + m_ = convert(Tensor{Any}, m_) + lr_ = convert(Tensor{Any}, lr_) + alpha_ = convert(Tensor{Any}, alpha_) + sign_decay_ = convert(Tensor{Any}, sign_decay_) + beta_ = convert(Tensor{Any}, beta_) + grad_ = convert(Tensor{Any}, grad_) + (var_, m_, lr_, alpha_, sign_decay_, beta_, grad_) = tf.tf_promote(var_, m_, lr_, alpha_, sign_decay_, beta_, grad_) + tf.add_input(desc, var_) + tf.add_input(desc, m_) + tf.add_input(desc, lr_) + tf.add_input(desc, alpha_) + tf.add_input(desc, sign_decay_) + tf.add_input(desc, beta_) + tf.add_input(desc, grad_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end end + tf.Tensor(tf.Operation(desc)) + end function apply_add_sign_eager(var_, m_, lr_, alpha_, sign_decay_, beta_, grad_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ApplyAddSign") var_ = convert(tf.EagerTensor, var_) @@ -41221,13 +41221,13 @@ begin return res[1] end end - function apply_add_sign(var_, m_, lr_, alpha_, sign_decay_, beta_, grad_; name=nothing, use_locking=nothing) - if tf.in_eager_mode() - apply_add_sign_eager(var_, m_, lr_, alpha_, sign_decay_, beta_, grad_; name=name, use_locking=use_locking) - else - apply_add_sign_graph(var_, m_, lr_, alpha_, sign_decay_, beta_, grad_; name=name, use_locking=use_locking) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function apply_add_sign(var_, m_, lr_, alpha_, sign_decay_, beta_, grad_; name=nothing, use_locking=nothing) + if tf.in_eager_mode() + apply_add_sign_eager(var_, m_, lr_, alpha_, sign_decay_, beta_, grad_; name=name, use_locking=use_locking) + else + apply_add_sign_graph(var_, m_, lr_, alpha_, sign_decay_, beta_, grad_; name=name, use_locking=use_locking) + end end - end end @@ -41237,30 +41237,30 @@ end Retrieve embedding parameters for a single table. """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function retrieve_tpu_embedding_rms_prop_parameters_grad_accum_debug_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - local desc - tf.with_op_name(name, "RetrieveTPUEmbeddingRMSPropParametersGradAccumDebug") do - desc = tf.NodeDescription("RetrieveTPUEmbeddingRMSPropParametersGradAccumDebug") - if table_id !== nothing - desc["table_id"] = Base.Int(table_id) - end - if table_name !== nothing - desc["table_name"] = Base.String(table_name) - end - if num_shards !== nothing - desc["num_shards"] = Base.Int(num_shards) - end - if shard_id !== nothing - desc["shard_id"] = Base.Int(shard_id) - end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:4 - push!(out, tf.Tensor(op, out_idx)) - end - out + function retrieve_tpu_embedding_rms_prop_parameters_grad_accum_debug_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + local desc + tf.with_op_name(name, "RetrieveTPUEmbeddingRMSPropParametersGradAccumDebug") do + desc = tf.NodeDescription("RetrieveTPUEmbeddingRMSPropParametersGradAccumDebug") + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:4 + push!(out, tf.Tensor(op, out_idx)) end + out + end function retrieve_tpu_embedding_rms_prop_parameters_grad_accum_debug_eager(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) desc = tf.EagerOp("RetrieveTPUEmbeddingRMSPropParametersGradAccumDebug") if table_id !== nothing @@ -41282,13 +41282,13 @@ begin return res end end - function retrieve_tpu_embedding_rms_prop_parameters_grad_accum_debug(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - if tf.in_eager_mode() - retrieve_tpu_embedding_rms_prop_parameters_grad_accum_debug_eager(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) - else - retrieve_tpu_embedding_rms_prop_parameters_grad_accum_debug_graph(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function retrieve_tpu_embedding_rms_prop_parameters_grad_accum_debug(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + if tf.in_eager_mode() + retrieve_tpu_embedding_rms_prop_parameters_grad_accum_debug_eager(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + else + retrieve_tpu_embedding_rms_prop_parameters_grad_accum_debug_graph(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + end end - end end @@ -41298,16 +41298,16 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function rint_graph(x_; name=nothing) - local desc - tf.with_op_name(name, "Rint") do - desc = tf.NodeDescription("Rint") - x_ = convert(Tensor{Any}, x_) - (x_,) = tf.tf_promote(x_) - tf.add_input(desc, x_) - end - tf.Tensor(tf.Operation(desc)) + function rint_graph(x_; name=nothing) + local desc + tf.with_op_name(name, "Rint") do + desc = tf.NodeDescription("Rint") + x_ = convert(Tensor{Any}, x_) + (x_,) = tf.tf_promote(x_) + tf.add_input(desc, x_) end + tf.Tensor(tf.Operation(desc)) + end function rint_eager(x_; name=nothing) desc = tf.EagerOp("Rint") x_ = convert(tf.EagerTensor, x_) @@ -41320,13 +41320,13 @@ begin return res[1] end end - function rint(x_; name=nothing) - if tf.in_eager_mode() - rint_eager(x_; name=name) - else - rint_graph(x_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function rint(x_; name=nothing) + if tf.in_eager_mode() + rint_eager(x_; name=name) + else + rint_graph(x_; name=name) + end end - end end @@ -41336,30 +41336,30 @@ end Retrieve embedding parameters for a single table. """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function retrieve_tpu_embedding_adadelta_parameters_grad_accum_debug_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - local desc - tf.with_op_name(name, "RetrieveTPUEmbeddingAdadeltaParametersGradAccumDebug") do - desc = tf.NodeDescription("RetrieveTPUEmbeddingAdadeltaParametersGradAccumDebug") - if table_id !== nothing - desc["table_id"] = Base.Int(table_id) - end - if table_name !== nothing - desc["table_name"] = Base.String(table_name) - end - if num_shards !== nothing - desc["num_shards"] = Base.Int(num_shards) - end - if shard_id !== nothing - desc["shard_id"] = Base.Int(shard_id) - end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:4 - push!(out, tf.Tensor(op, out_idx)) - end - out + function retrieve_tpu_embedding_adadelta_parameters_grad_accum_debug_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + local desc + tf.with_op_name(name, "RetrieveTPUEmbeddingAdadeltaParametersGradAccumDebug") do + desc = tf.NodeDescription("RetrieveTPUEmbeddingAdadeltaParametersGradAccumDebug") + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:4 + push!(out, tf.Tensor(op, out_idx)) end + out + end function retrieve_tpu_embedding_adadelta_parameters_grad_accum_debug_eager(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) desc = tf.EagerOp("RetrieveTPUEmbeddingAdadeltaParametersGradAccumDebug") if table_id !== nothing @@ -41381,13 +41381,13 @@ begin return res end end - function retrieve_tpu_embedding_adadelta_parameters_grad_accum_debug(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - if tf.in_eager_mode() - retrieve_tpu_embedding_adadelta_parameters_grad_accum_debug_eager(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) - else - retrieve_tpu_embedding_adadelta_parameters_grad_accum_debug_graph(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function retrieve_tpu_embedding_adadelta_parameters_grad_accum_debug(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + if tf.in_eager_mode() + retrieve_tpu_embedding_adadelta_parameters_grad_accum_debug_eager(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + else + retrieve_tpu_embedding_adadelta_parameters_grad_accum_debug_graph(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + end end - end end @@ -41397,28 +41397,28 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function extract_glimpse_graph(input_, size_, offsets_; name=nothing, centered=nothing, normalized=nothing, uniform_noise=nothing) - local desc - tf.with_op_name(name, "ExtractGlimpse") do - desc = tf.NodeDescription("ExtractGlimpse") - input_ = convert(Tensor{Float32}, input_) - size_ = convert(Tensor{Int32}, size_) - offsets_ = convert(Tensor{Float32}, offsets_) - tf.add_input(desc, input_) - tf.add_input(desc, size_) - tf.add_input(desc, offsets_) - if centered !== nothing - desc["centered"] = Base.Bool(centered) - end - if normalized !== nothing - desc["normalized"] = Base.Bool(normalized) - end - if uniform_noise !== nothing - desc["uniform_noise"] = Base.Bool(uniform_noise) - end + function extract_glimpse_graph(input_, size_, offsets_; name=nothing, centered=nothing, normalized=nothing, uniform_noise=nothing) + local desc + tf.with_op_name(name, "ExtractGlimpse") do + desc = tf.NodeDescription("ExtractGlimpse") + input_ = convert(Tensor{Float32}, input_) + size_ = convert(Tensor{Int32}, size_) + offsets_ = convert(Tensor{Float32}, offsets_) + tf.add_input(desc, input_) + tf.add_input(desc, size_) + tf.add_input(desc, offsets_) + if centered !== nothing + desc["centered"] = Base.Bool(centered) + end + if normalized !== nothing + desc["normalized"] = Base.Bool(normalized) + end + if uniform_noise !== nothing + desc["uniform_noise"] = Base.Bool(uniform_noise) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function extract_glimpse_eager(input_, size_, offsets_; name=nothing, centered=nothing, normalized=nothing, uniform_noise=nothing) desc = tf.EagerOp("ExtractGlimpse") input_ = convert(tf.EagerTensor, input_) @@ -41443,13 +41443,13 @@ begin return res[1] end end - function extract_glimpse(input_, size_, offsets_; name=nothing, centered=nothing, normalized=nothing, uniform_noise=nothing) - if tf.in_eager_mode() - extract_glimpse_eager(input_, size_, offsets_; name=name, centered=centered, normalized=normalized, uniform_noise=uniform_noise) - else - extract_glimpse_graph(input_, size_, offsets_; name=name, centered=centered, normalized=normalized, uniform_noise=uniform_noise) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function extract_glimpse(input_, size_, offsets_; name=nothing, centered=nothing, normalized=nothing, uniform_noise=nothing) + if tf.in_eager_mode() + extract_glimpse_eager(input_, size_, offsets_; name=name, centered=centered, normalized=normalized, uniform_noise=uniform_noise) + else + extract_glimpse_graph(input_, size_, offsets_; name=name, centered=centered, normalized=normalized, uniform_noise=uniform_noise) + end end - end end @@ -41459,21 +41459,21 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function string_to_hash_bucket_strong_graph(input_; name=nothing, num_buckets=nothing, key=nothing) - local desc - tf.with_op_name(name, "StringToHashBucketStrong") do - desc = tf.NodeDescription("StringToHashBucketStrong") - input_ = convert(Tensor{String}, input_) - tf.add_input(desc, input_) - if num_buckets !== nothing - desc["num_buckets"] = Base.Int(num_buckets) - end - if key !== nothing - desc["key"] = map(Base.identity, key) - end + function string_to_hash_bucket_strong_graph(input_; name=nothing, num_buckets=nothing, key=nothing) + local desc + tf.with_op_name(name, "StringToHashBucketStrong") do + desc = tf.NodeDescription("StringToHashBucketStrong") + input_ = convert(Tensor{String}, input_) + tf.add_input(desc, input_) + if num_buckets !== nothing + desc["num_buckets"] = Base.Int(num_buckets) + end + if key !== nothing + desc["key"] = map(Base.identity, key) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function string_to_hash_bucket_strong_eager(input_; name=nothing, num_buckets=nothing, key=nothing) desc = tf.EagerOp("StringToHashBucketStrong") input_ = convert(tf.EagerTensor, input_) @@ -41491,13 +41491,13 @@ begin return res[1] end end - function string_to_hash_bucket_strong(input_; name=nothing, num_buckets=nothing, key=nothing) - if tf.in_eager_mode() - string_to_hash_bucket_strong_eager(input_; name=name, num_buckets=num_buckets, key=key) - else - string_to_hash_bucket_strong_graph(input_; name=name, num_buckets=num_buckets, key=key) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function string_to_hash_bucket_strong(input_; name=nothing, num_buckets=nothing, key=nothing) + if tf.in_eager_mode() + string_to_hash_bucket_strong_eager(input_; name=name, num_buckets=num_buckets, key=key) + else + string_to_hash_bucket_strong_graph(input_; name=name, num_buckets=num_buckets, key=key) + end end - end end @@ -41507,28 +41507,28 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function one_shot_iterator_graph(; name=nothing, dataset_factory=nothing, output_types=nothing, output_shapes=nothing, container=nothing, shared_name=nothing) - local desc - tf.with_op_name(name, "OneShotIterator") do - desc = tf.NodeDescription("OneShotIterator") - if dataset_factory !== nothing - desc["dataset_factory"] = Base.identity(dataset_factory) - end - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end + function one_shot_iterator_graph(; name=nothing, dataset_factory=nothing, output_types=nothing, output_shapes=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "OneShotIterator") do + desc = tf.NodeDescription("OneShotIterator") + if dataset_factory !== nothing + desc["dataset_factory"] = Base.identity(dataset_factory) + end + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function one_shot_iterator_eager(; name=nothing, dataset_factory=nothing, output_types=nothing, output_shapes=nothing, container=nothing, shared_name=nothing) desc = tf.EagerOp("OneShotIterator") if dataset_factory !== nothing @@ -41553,13 +41553,13 @@ begin return res[1] end end - function one_shot_iterator(; name=nothing, dataset_factory=nothing, output_types=nothing, output_shapes=nothing, container=nothing, shared_name=nothing) - if tf.in_eager_mode() - one_shot_iterator_eager(; name=name, dataset_factory=dataset_factory, output_types=output_types, output_shapes=output_shapes, container=container, shared_name=shared_name) - else - one_shot_iterator_graph(; name=name, dataset_factory=dataset_factory, output_types=output_types, output_shapes=output_shapes, container=container, shared_name=shared_name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function one_shot_iterator(; name=nothing, dataset_factory=nothing, output_types=nothing, output_shapes=nothing, container=nothing, shared_name=nothing) + if tf.in_eager_mode() + one_shot_iterator_eager(; name=name, dataset_factory=dataset_factory, output_types=output_types, output_shapes=output_shapes, container=container, shared_name=shared_name) + else + one_shot_iterator_graph(; name=name, dataset_factory=dataset_factory, output_types=output_types, output_shapes=output_shapes, container=container, shared_name=shared_name) + end end - end end @@ -41569,34 +41569,34 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function resource_sparse_apply_momentum_graph(var_, accum_, lr_, grad_, indices_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) - local desc - tf.with_op_name(name, "ResourceSparseApplyMomentum") do - desc = tf.NodeDescription("ResourceSparseApplyMomentum") - var_ = convert(Tensor{Any}, var_) - accum_ = convert(Tensor{Any}, accum_) - lr_ = convert(Tensor{Any}, lr_) - grad_ = convert(Tensor{Any}, grad_) - indices_ = convert(Tensor{Any}, indices_) - indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) - momentum_ = convert(Tensor{Any}, momentum_) - (lr_, grad_, momentum_) = tf.tf_promote(lr_, grad_, momentum_) - (indices_,) = tf.tf_promote(indices_) - tf.add_input(desc, var_) - tf.add_input(desc, accum_) - tf.add_input(desc, lr_) - tf.add_input(desc, grad_) - tf.add_input(desc, indices_) - tf.add_input(desc, momentum_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - if use_nesterov !== nothing - desc["use_nesterov"] = Base.Bool(use_nesterov) - end - end - tf.Tensor(tf.Operation(desc)) + function resource_sparse_apply_momentum_graph(var_, accum_, lr_, grad_, indices_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) + local desc + tf.with_op_name(name, "ResourceSparseApplyMomentum") do + desc = tf.NodeDescription("ResourceSparseApplyMomentum") + var_ = convert(Tensor{Any}, var_) + accum_ = convert(Tensor{Any}, accum_) + lr_ = convert(Tensor{Any}, lr_) + grad_ = convert(Tensor{Any}, grad_) + indices_ = convert(Tensor{Any}, indices_) + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + momentum_ = convert(Tensor{Any}, momentum_) + (lr_, grad_, momentum_) = tf.tf_promote(lr_, grad_, momentum_) + (indices_,) = tf.tf_promote(indices_) + tf.add_input(desc, var_) + tf.add_input(desc, accum_) + tf.add_input(desc, lr_) + tf.add_input(desc, grad_) + tf.add_input(desc, indices_) + tf.add_input(desc, momentum_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + if use_nesterov !== nothing + desc["use_nesterov"] = Base.Bool(use_nesterov) + end end + tf.Tensor(tf.Operation(desc)) + end function resource_sparse_apply_momentum_eager(var_, accum_, lr_, grad_, indices_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) desc = tf.EagerOp("ResourceSparseApplyMomentum") var_ = convert(tf.EagerTensor, var_) @@ -41628,13 +41628,13 @@ begin return res[1] end end - function resource_sparse_apply_momentum(var_, accum_, lr_, grad_, indices_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) - if tf.in_eager_mode() - resource_sparse_apply_momentum_eager(var_, accum_, lr_, grad_, indices_, momentum_; name=name, use_locking=use_locking, use_nesterov=use_nesterov) - else - resource_sparse_apply_momentum_graph(var_, accum_, lr_, grad_, indices_, momentum_; name=name, use_locking=use_locking, use_nesterov=use_nesterov) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_sparse_apply_momentum(var_, accum_, lr_, grad_, indices_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) + if tf.in_eager_mode() + resource_sparse_apply_momentum_eager(var_, accum_, lr_, grad_, indices_, momentum_; name=name, use_locking=use_locking, use_nesterov=use_nesterov) + else + resource_sparse_apply_momentum_graph(var_, accum_, lr_, grad_, indices_, momentum_; name=name, use_locking=use_locking, use_nesterov=use_nesterov) + end end - end end @@ -41644,24 +41644,24 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function save_slices_graph(filename_, tensor_names_, shapes_and_slices_, data_; name=nothing, T=nothing) - local desc - tf.with_op_name(name, "SaveSlices") do - desc = tf.NodeDescription("SaveSlices") - filename_ = convert(Tensor{String}, filename_) - tensor_names_ = convert(Tensor{String}, tensor_names_) - shapes_and_slices_ = convert(Tensor{String}, shapes_and_slices_) - data_ = [convert(Tensor{Any}, x) for x = data_] - tf.add_input(desc, filename_) - tf.add_input(desc, tensor_names_) - tf.add_input(desc, shapes_and_slices_) - tf.add_input(desc, data_) - if T !== nothing - desc["T"] = map(Base.identity, T) - end + function save_slices_graph(filename_, tensor_names_, shapes_and_slices_, data_; name=nothing, T=nothing) + local desc + tf.with_op_name(name, "SaveSlices") do + desc = tf.NodeDescription("SaveSlices") + filename_ = convert(Tensor{String}, filename_) + tensor_names_ = convert(Tensor{String}, tensor_names_) + shapes_and_slices_ = convert(Tensor{String}, shapes_and_slices_) + data_ = [convert(Tensor{Any}, x) for x = data_] + tf.add_input(desc, filename_) + tf.add_input(desc, tensor_names_) + tf.add_input(desc, shapes_and_slices_) + tf.add_input(desc, data_) + if T !== nothing + desc["T"] = map(Base.identity, T) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function save_slices_eager(filename_, tensor_names_, shapes_and_slices_, data_; name=nothing, T=nothing) desc = tf.EagerOp("SaveSlices") filename_ = convert(tf.EagerTensor, filename_) @@ -41682,13 +41682,13 @@ begin return res[1] end end - function save_slices(filename_, tensor_names_, shapes_and_slices_, data_; name=nothing, T=nothing) - if tf.in_eager_mode() - save_slices_eager(filename_, tensor_names_, shapes_and_slices_, data_; name=name, T=T) - else - save_slices_graph(filename_, tensor_names_, shapes_and_slices_, data_; name=name, T=T) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function save_slices(filename_, tensor_names_, shapes_and_slices_, data_; name=nothing, T=nothing) + if tf.in_eager_mode() + save_slices_eager(filename_, tensor_names_, shapes_and_slices_, data_; name=name, T=T) + else + save_slices_graph(filename_, tensor_names_, shapes_and_slices_, data_; name=name, T=T) + end end - end end @@ -41698,15 +41698,15 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function experimental_dataset_cardinality_graph(input_dataset_; name=nothing) - local desc - tf.with_op_name(name, "ExperimentalDatasetCardinality") do - desc = tf.NodeDescription("ExperimentalDatasetCardinality") - input_dataset_ = convert(Tensor{Any}, input_dataset_) - tf.add_input(desc, input_dataset_) - end - tf.Tensor(tf.Operation(desc)) + function experimental_dataset_cardinality_graph(input_dataset_; name=nothing) + local desc + tf.with_op_name(name, "ExperimentalDatasetCardinality") do + desc = tf.NodeDescription("ExperimentalDatasetCardinality") + input_dataset_ = convert(Tensor{Any}, input_dataset_) + tf.add_input(desc, input_dataset_) end + tf.Tensor(tf.Operation(desc)) + end function experimental_dataset_cardinality_eager(input_dataset_; name=nothing) desc = tf.EagerOp("ExperimentalDatasetCardinality") input_dataset_ = convert(tf.EagerTensor, input_dataset_) @@ -41718,13 +41718,13 @@ begin return res[1] end end - function experimental_dataset_cardinality(input_dataset_; name=nothing) - if tf.in_eager_mode() - experimental_dataset_cardinality_eager(input_dataset_; name=name) - else - experimental_dataset_cardinality_graph(input_dataset_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_dataset_cardinality(input_dataset_; name=nothing) + if tf.in_eager_mode() + experimental_dataset_cardinality_eager(input_dataset_; name=name) + else + experimental_dataset_cardinality_graph(input_dataset_; name=name) + end end - end end @@ -41734,38 +41734,38 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function experimental_numa_map_and_batch_dataset_graph(input_dataset_, other_arguments_, batch_size_, num_parallel_calls_, drop_remainder_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, preserve_cardinality=nothing) - local desc - tf.with_op_name(name, "ExperimentalNumaMapAndBatchDataset") do - desc = tf.NodeDescription("ExperimentalNumaMapAndBatchDataset") - input_dataset_ = convert(Tensor{Any}, input_dataset_) - other_arguments_ = [convert(Tensor{Any}, x) for x = other_arguments_] - batch_size_ = convert(Tensor{Int64}, batch_size_) - num_parallel_calls_ = convert(Tensor{Int64}, num_parallel_calls_) - drop_remainder_ = convert(Tensor{Bool}, drop_remainder_) - tf.add_input(desc, input_dataset_) - tf.add_input(desc, other_arguments_) - tf.add_input(desc, batch_size_) - tf.add_input(desc, num_parallel_calls_) - tf.add_input(desc, drop_remainder_) - if f !== nothing - desc["f"] = Base.identity(f) - end - if Targuments !== nothing - desc["Targuments"] = map(Base.identity, Targuments) - end - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - if preserve_cardinality !== nothing - desc["preserve_cardinality"] = Base.Bool(preserve_cardinality) - end - end - tf.Tensor(tf.Operation(desc)) + function experimental_numa_map_and_batch_dataset_graph(input_dataset_, other_arguments_, batch_size_, num_parallel_calls_, drop_remainder_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, preserve_cardinality=nothing) + local desc + tf.with_op_name(name, "ExperimentalNumaMapAndBatchDataset") do + desc = tf.NodeDescription("ExperimentalNumaMapAndBatchDataset") + input_dataset_ = convert(Tensor{Any}, input_dataset_) + other_arguments_ = [convert(Tensor{Any}, x) for x = other_arguments_] + batch_size_ = convert(Tensor{Int64}, batch_size_) + num_parallel_calls_ = convert(Tensor{Int64}, num_parallel_calls_) + drop_remainder_ = convert(Tensor{Bool}, drop_remainder_) + tf.add_input(desc, input_dataset_) + tf.add_input(desc, other_arguments_) + tf.add_input(desc, batch_size_) + tf.add_input(desc, num_parallel_calls_) + tf.add_input(desc, drop_remainder_) + if f !== nothing + desc["f"] = Base.identity(f) + end + if Targuments !== nothing + desc["Targuments"] = map(Base.identity, Targuments) + end + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + if preserve_cardinality !== nothing + desc["preserve_cardinality"] = Base.Bool(preserve_cardinality) + end end + tf.Tensor(tf.Operation(desc)) + end function experimental_numa_map_and_batch_dataset_eager(input_dataset_, other_arguments_, batch_size_, num_parallel_calls_, drop_remainder_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, preserve_cardinality=nothing) desc = tf.EagerOp("ExperimentalNumaMapAndBatchDataset") input_dataset_ = convert(tf.EagerTensor, input_dataset_) @@ -41800,13 +41800,13 @@ begin return res[1] end end - function experimental_numa_map_and_batch_dataset(input_dataset_, other_arguments_, batch_size_, num_parallel_calls_, drop_remainder_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, preserve_cardinality=nothing) - if tf.in_eager_mode() - experimental_numa_map_and_batch_dataset_eager(input_dataset_, other_arguments_, batch_size_, num_parallel_calls_, drop_remainder_; name=name, f=f, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes, preserve_cardinality=preserve_cardinality) - else - experimental_numa_map_and_batch_dataset_graph(input_dataset_, other_arguments_, batch_size_, num_parallel_calls_, drop_remainder_; name=name, f=f, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes, preserve_cardinality=preserve_cardinality) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_numa_map_and_batch_dataset(input_dataset_, other_arguments_, batch_size_, num_parallel_calls_, drop_remainder_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, preserve_cardinality=nothing) + if tf.in_eager_mode() + experimental_numa_map_and_batch_dataset_eager(input_dataset_, other_arguments_, batch_size_, num_parallel_calls_, drop_remainder_; name=name, f=f, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes, preserve_cardinality=preserve_cardinality) + else + experimental_numa_map_and_batch_dataset_graph(input_dataset_, other_arguments_, batch_size_, num_parallel_calls_, drop_remainder_; name=name, f=f, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes, preserve_cardinality=preserve_cardinality) + end end - end end @@ -41816,16 +41816,16 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function is_finite_graph(x_; name=nothing) - local desc - tf.with_op_name(name, "IsFinite") do - desc = tf.NodeDescription("IsFinite") - x_ = convert(Tensor{Any}, x_) - (x_,) = tf.tf_promote(x_) - tf.add_input(desc, x_) - end - tf.Tensor(tf.Operation(desc)) + function is_finite_graph(x_; name=nothing) + local desc + tf.with_op_name(name, "IsFinite") do + desc = tf.NodeDescription("IsFinite") + x_ = convert(Tensor{Any}, x_) + (x_,) = tf.tf_promote(x_) + tf.add_input(desc, x_) end + tf.Tensor(tf.Operation(desc)) + end function is_finite_eager(x_; name=nothing) desc = tf.EagerOp("IsFinite") x_ = convert(tf.EagerTensor, x_) @@ -41838,13 +41838,13 @@ begin return res[1] end end - function is_finite(x_; name=nothing) - if tf.in_eager_mode() - is_finite_eager(x_; name=name) - else - is_finite_graph(x_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function is_finite(x_; name=nothing) + if tf.in_eager_mode() + is_finite_eager(x_; name=name) + else + is_finite_graph(x_; name=name) + end end - end end @@ -41854,27 +41854,27 @@ end An Op to exchange data across TPU replicas. On each replica, the input is """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function all_to_all_graph(input_, group_assignment_; name=nothing, concat_dimension=nothing, split_dimension=nothing, split_count=nothing) - local desc - tf.with_op_name(name, "AllToAll") do - desc = tf.NodeDescription("AllToAll") - input_ = convert(Tensor{Any}, input_) - group_assignment_ = convert(Tensor{Int32}, group_assignment_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - tf.add_input(desc, group_assignment_) - if concat_dimension !== nothing - desc["concat_dimension"] = Base.Int(concat_dimension) - end - if split_dimension !== nothing - desc["split_dimension"] = Base.Int(split_dimension) - end - if split_count !== nothing - desc["split_count"] = Base.Int(split_count) - end - end - tf.Tensor(tf.Operation(desc)) + function all_to_all_graph(input_, group_assignment_; name=nothing, concat_dimension=nothing, split_dimension=nothing, split_count=nothing) + local desc + tf.with_op_name(name, "AllToAll") do + desc = tf.NodeDescription("AllToAll") + input_ = convert(Tensor{Any}, input_) + group_assignment_ = convert(Tensor{Int32}, group_assignment_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + tf.add_input(desc, group_assignment_) + if concat_dimension !== nothing + desc["concat_dimension"] = Base.Int(concat_dimension) + end + if split_dimension !== nothing + desc["split_dimension"] = Base.Int(split_dimension) + end + if split_count !== nothing + desc["split_count"] = Base.Int(split_count) + end end + tf.Tensor(tf.Operation(desc)) + end function all_to_all_eager(input_, group_assignment_; name=nothing, concat_dimension=nothing, split_dimension=nothing, split_count=nothing) desc = tf.EagerOp("AllToAll") input_ = convert(tf.EagerTensor, input_) @@ -41898,13 +41898,13 @@ begin return res[1] end end - function all_to_all(input_, group_assignment_; name=nothing, concat_dimension=nothing, split_dimension=nothing, split_count=nothing) - if tf.in_eager_mode() - all_to_all_eager(input_, group_assignment_; name=name, concat_dimension=concat_dimension, split_dimension=split_dimension, split_count=split_count) - else - all_to_all_graph(input_, group_assignment_; name=name, concat_dimension=concat_dimension, split_dimension=split_dimension, split_count=split_count) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function all_to_all(input_, group_assignment_; name=nothing, concat_dimension=nothing, split_dimension=nothing, split_count=nothing) + if tf.in_eager_mode() + all_to_all_eager(input_, group_assignment_; name=name, concat_dimension=concat_dimension, split_dimension=split_dimension, split_count=split_count) + else + all_to_all_graph(input_, group_assignment_; name=name, concat_dimension=concat_dimension, split_dimension=split_dimension, split_count=split_count) + end end - end end @@ -41914,29 +41914,29 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function take_many_sparse_from_tensors_map_graph(sparse_handles_; name=nothing, dtype=nothing, container=nothing, shared_name=nothing) - local desc - tf.with_op_name(name, "TakeManySparseFromTensorsMap") do - desc = tf.NodeDescription("TakeManySparseFromTensorsMap") - sparse_handles_ = convert(Tensor{Int64}, sparse_handles_) - tf.add_input(desc, sparse_handles_) - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end + function take_many_sparse_from_tensors_map_graph(sparse_handles_; name=nothing, dtype=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "TakeManySparseFromTensorsMap") do + desc = tf.NodeDescription("TakeManySparseFromTensorsMap") + sparse_handles_ = convert(Tensor{Int64}, sparse_handles_) + tf.add_input(desc, sparse_handles_) + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + if container !== nothing + desc["container"] = Base.String(container) end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:3 - push!(out, tf.Tensor(op, out_idx)) + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) end - out end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end function take_many_sparse_from_tensors_map_eager(sparse_handles_; name=nothing, dtype=nothing, container=nothing, shared_name=nothing) desc = tf.EagerOp("TakeManySparseFromTensorsMap") sparse_handles_ = convert(tf.EagerTensor, sparse_handles_) @@ -41957,13 +41957,13 @@ begin return res end end - function take_many_sparse_from_tensors_map(sparse_handles_; name=nothing, dtype=nothing, container=nothing, shared_name=nothing) - if tf.in_eager_mode() - take_many_sparse_from_tensors_map_eager(sparse_handles_; name=name, dtype=dtype, container=container, shared_name=shared_name) - else - take_many_sparse_from_tensors_map_graph(sparse_handles_; name=name, dtype=dtype, container=container, shared_name=shared_name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function take_many_sparse_from_tensors_map(sparse_handles_; name=nothing, dtype=nothing, container=nothing, shared_name=nothing) + if tf.in_eager_mode() + take_many_sparse_from_tensors_map_eager(sparse_handles_; name=name, dtype=dtype, container=container, shared_name=shared_name) + else + take_many_sparse_from_tensors_map_graph(sparse_handles_; name=name, dtype=dtype, container=container, shared_name=shared_name) + end end - end end @@ -41973,16 +41973,16 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function batch_matrix_diag_part_graph(input_; name=nothing) - local desc - tf.with_op_name(name, "BatchMatrixDiagPart") do - desc = tf.NodeDescription("BatchMatrixDiagPart") - input_ = convert(Tensor{Any}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - end - tf.Tensor(tf.Operation(desc)) + function batch_matrix_diag_part_graph(input_; name=nothing) + local desc + tf.with_op_name(name, "BatchMatrixDiagPart") do + desc = tf.NodeDescription("BatchMatrixDiagPart") + input_ = convert(Tensor{Any}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) end + tf.Tensor(tf.Operation(desc)) + end function batch_matrix_diag_part_eager(input_; name=nothing) desc = tf.EagerOp("BatchMatrixDiagPart") input_ = convert(tf.EagerTensor, input_) @@ -41995,13 +41995,13 @@ begin return res[1] end end - function batch_matrix_diag_part(input_; name=nothing) - if tf.in_eager_mode() - batch_matrix_diag_part_eager(input_; name=name) - else - batch_matrix_diag_part_graph(input_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function batch_matrix_diag_part(input_; name=nothing) + if tf.in_eager_mode() + batch_matrix_diag_part_eager(input_; name=name) + else + batch_matrix_diag_part_graph(input_; name=name) + end end - end end @@ -42011,23 +42011,23 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function fixed_length_record_dataset_graph(filenames_, header_bytes_, record_bytes_, footer_bytes_, buffer_size_; name=nothing) - local desc - tf.with_op_name(name, "FixedLengthRecordDataset") do - desc = tf.NodeDescription("FixedLengthRecordDataset") - filenames_ = convert(Tensor{String}, filenames_) - header_bytes_ = convert(Tensor{Int64}, header_bytes_) - record_bytes_ = convert(Tensor{Int64}, record_bytes_) - footer_bytes_ = convert(Tensor{Int64}, footer_bytes_) - buffer_size_ = convert(Tensor{Int64}, buffer_size_) - tf.add_input(desc, filenames_) - tf.add_input(desc, header_bytes_) - tf.add_input(desc, record_bytes_) - tf.add_input(desc, footer_bytes_) - tf.add_input(desc, buffer_size_) - end - tf.Tensor(tf.Operation(desc)) + function fixed_length_record_dataset_graph(filenames_, header_bytes_, record_bytes_, footer_bytes_, buffer_size_; name=nothing) + local desc + tf.with_op_name(name, "FixedLengthRecordDataset") do + desc = tf.NodeDescription("FixedLengthRecordDataset") + filenames_ = convert(Tensor{String}, filenames_) + header_bytes_ = convert(Tensor{Int64}, header_bytes_) + record_bytes_ = convert(Tensor{Int64}, record_bytes_) + footer_bytes_ = convert(Tensor{Int64}, footer_bytes_) + buffer_size_ = convert(Tensor{Int64}, buffer_size_) + tf.add_input(desc, filenames_) + tf.add_input(desc, header_bytes_) + tf.add_input(desc, record_bytes_) + tf.add_input(desc, footer_bytes_) + tf.add_input(desc, buffer_size_) end + tf.Tensor(tf.Operation(desc)) + end function fixed_length_record_dataset_eager(filenames_, header_bytes_, record_bytes_, footer_bytes_, buffer_size_; name=nothing) desc = tf.EagerOp("FixedLengthRecordDataset") filenames_ = convert(tf.EagerTensor, filenames_) @@ -42047,13 +42047,13 @@ begin return res[1] end end - function fixed_length_record_dataset(filenames_, header_bytes_, record_bytes_, footer_bytes_, buffer_size_; name=nothing) - if tf.in_eager_mode() - fixed_length_record_dataset_eager(filenames_, header_bytes_, record_bytes_, footer_bytes_, buffer_size_; name=name) - else - fixed_length_record_dataset_graph(filenames_, header_bytes_, record_bytes_, footer_bytes_, buffer_size_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function fixed_length_record_dataset(filenames_, header_bytes_, record_bytes_, footer_bytes_, buffer_size_; name=nothing) + if tf.in_eager_mode() + fixed_length_record_dataset_eager(filenames_, header_bytes_, record_bytes_, footer_bytes_, buffer_size_; name=name) + else + fixed_length_record_dataset_graph(filenames_, header_bytes_, record_bytes_, footer_bytes_, buffer_size_; name=name) + end end - end end @@ -42063,21 +42063,21 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function stack_push_graph(handle_, elem_; name=nothing, swap_memory=nothing) - local desc - tf.with_op_name(name, "StackPush") do - desc = tf.NodeDescription("StackPush") - handle_ = convert(Tensor{String}, handle_) - elem_ = convert(Tensor{Any}, elem_) - (elem_,) = tf.tf_promote(elem_) - tf.add_input(desc, handle_) - tf.add_input(desc, elem_) - if swap_memory !== nothing - desc["swap_memory"] = Base.Bool(swap_memory) - end + function stack_push_graph(handle_, elem_; name=nothing, swap_memory=nothing) + local desc + tf.with_op_name(name, "StackPush") do + desc = tf.NodeDescription("StackPush") + handle_ = convert(Tensor{String}, handle_) + elem_ = convert(Tensor{Any}, elem_) + (elem_,) = tf.tf_promote(elem_) + tf.add_input(desc, handle_) + tf.add_input(desc, elem_) + if swap_memory !== nothing + desc["swap_memory"] = Base.Bool(swap_memory) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function stack_push_eager(handle_, elem_; name=nothing, swap_memory=nothing) desc = tf.EagerOp("StackPush") handle_ = convert(tf.EagerTensor, handle_) @@ -42095,13 +42095,13 @@ begin return res[1] end end - function stack_push(handle_, elem_; name=nothing, swap_memory=nothing) - if tf.in_eager_mode() - stack_push_eager(handle_, elem_; name=name, swap_memory=swap_memory) - else - stack_push_graph(handle_, elem_; name=name, swap_memory=swap_memory) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function stack_push(handle_, elem_; name=nothing, swap_memory=nothing) + if tf.in_eager_mode() + stack_push_eager(handle_, elem_; name=name, swap_memory=swap_memory) + else + stack_push_graph(handle_, elem_; name=name, swap_memory=swap_memory) + end end - end end @@ -42111,19 +42111,19 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function placeholder_v2_graph(; name=nothing, dtype=nothing, shape=nothing) - local desc - tf.with_op_name(name, "PlaceholderV2") do - desc = tf.NodeDescription("PlaceholderV2") - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end - if shape !== nothing - desc["shape"] = Base.identity(shape) - end + function placeholder_v2_graph(; name=nothing, dtype=nothing, shape=nothing) + local desc + tf.with_op_name(name, "PlaceholderV2") do + desc = tf.NodeDescription("PlaceholderV2") + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + if shape !== nothing + desc["shape"] = Base.identity(shape) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function placeholder_v2_eager(; name=nothing, dtype=nothing, shape=nothing) desc = tf.EagerOp("PlaceholderV2") if dtype !== nothing @@ -42139,13 +42139,13 @@ begin return res[1] end end - function placeholder_v2(; name=nothing, dtype=nothing, shape=nothing) - if tf.in_eager_mode() - placeholder_v2_eager(; name=name, dtype=dtype, shape=shape) - else - placeholder_v2_graph(; name=name, dtype=dtype, shape=shape) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function placeholder_v2(; name=nothing, dtype=nothing, shape=nothing) + if tf.in_eager_mode() + placeholder_v2_eager(; name=name, dtype=dtype, shape=shape) + else + placeholder_v2_graph(; name=name, dtype=dtype, shape=shape) + end end - end end @@ -42155,19 +42155,19 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function multi_device_iterator_init_graph(dataset_, multi_device_iterator_, max_buffer_size_; name=nothing) - local desc - tf.with_op_name(name, "MultiDeviceIteratorInit") do - desc = tf.NodeDescription("MultiDeviceIteratorInit") - dataset_ = convert(Tensor{Any}, dataset_) - multi_device_iterator_ = convert(Tensor{Any}, multi_device_iterator_) - max_buffer_size_ = convert(Tensor{Int64}, max_buffer_size_) - tf.add_input(desc, dataset_) - tf.add_input(desc, multi_device_iterator_) - tf.add_input(desc, max_buffer_size_) - end - tf.Tensor(tf.Operation(desc)) + function multi_device_iterator_init_graph(dataset_, multi_device_iterator_, max_buffer_size_; name=nothing) + local desc + tf.with_op_name(name, "MultiDeviceIteratorInit") do + desc = tf.NodeDescription("MultiDeviceIteratorInit") + dataset_ = convert(Tensor{Any}, dataset_) + multi_device_iterator_ = convert(Tensor{Any}, multi_device_iterator_) + max_buffer_size_ = convert(Tensor{Int64}, max_buffer_size_) + tf.add_input(desc, dataset_) + tf.add_input(desc, multi_device_iterator_) + tf.add_input(desc, max_buffer_size_) end + tf.Tensor(tf.Operation(desc)) + end function multi_device_iterator_init_eager(dataset_, multi_device_iterator_, max_buffer_size_; name=nothing) desc = tf.EagerOp("MultiDeviceIteratorInit") dataset_ = convert(tf.EagerTensor, dataset_) @@ -42183,13 +42183,13 @@ begin return res[1] end end - function multi_device_iterator_init(dataset_, multi_device_iterator_, max_buffer_size_; name=nothing) - if tf.in_eager_mode() - multi_device_iterator_init_eager(dataset_, multi_device_iterator_, max_buffer_size_; name=name) - else - multi_device_iterator_init_graph(dataset_, multi_device_iterator_, max_buffer_size_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function multi_device_iterator_init(dataset_, multi_device_iterator_, max_buffer_size_; name=nothing) + if tf.in_eager_mode() + multi_device_iterator_init_eager(dataset_, multi_device_iterator_, max_buffer_size_; name=name) + else + multi_device_iterator_init_graph(dataset_, multi_device_iterator_, max_buffer_size_; name=name) + end end - end end @@ -42199,19 +42199,19 @@ end Re-configures the GCS block cache with the new configuration values. """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function gcs_configure_block_cache_graph(max_cache_size_, block_size_, max_staleness_; name=nothing) - local desc - tf.with_op_name(name, "GcsConfigureBlockCache") do - desc = tf.NodeDescription("GcsConfigureBlockCache") - max_cache_size_ = convert(Tensor{Any}, max_cache_size_) - block_size_ = convert(Tensor{Any}, block_size_) - max_staleness_ = convert(Tensor{Any}, max_staleness_) - tf.add_input(desc, max_cache_size_) - tf.add_input(desc, block_size_) - tf.add_input(desc, max_staleness_) - end - tf.Tensor(tf.Operation(desc)) + function gcs_configure_block_cache_graph(max_cache_size_, block_size_, max_staleness_; name=nothing) + local desc + tf.with_op_name(name, "GcsConfigureBlockCache") do + desc = tf.NodeDescription("GcsConfigureBlockCache") + max_cache_size_ = convert(Tensor{Any}, max_cache_size_) + block_size_ = convert(Tensor{Any}, block_size_) + max_staleness_ = convert(Tensor{Any}, max_staleness_) + tf.add_input(desc, max_cache_size_) + tf.add_input(desc, block_size_) + tf.add_input(desc, max_staleness_) end + tf.Tensor(tf.Operation(desc)) + end function gcs_configure_block_cache_eager(max_cache_size_, block_size_, max_staleness_; name=nothing) desc = tf.EagerOp("GcsConfigureBlockCache") max_cache_size_ = convert(tf.EagerTensor, max_cache_size_) @@ -42227,13 +42227,13 @@ begin return res[1] end end - function gcs_configure_block_cache(max_cache_size_, block_size_, max_staleness_; name=nothing) - if tf.in_eager_mode() - gcs_configure_block_cache_eager(max_cache_size_, block_size_, max_staleness_; name=name) - else - gcs_configure_block_cache_graph(max_cache_size_, block_size_, max_staleness_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function gcs_configure_block_cache(max_cache_size_, block_size_, max_staleness_; name=nothing) + if tf.in_eager_mode() + gcs_configure_block_cache_eager(max_cache_size_, block_size_, max_staleness_; name=name) + else + gcs_configure_block_cache_graph(max_cache_size_, block_size_, max_staleness_; name=name) + end end - end end @@ -42243,21 +42243,21 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function queue_dequeue_v2_graph(handle_; name=nothing, component_types=nothing, timeout_ms=nothing) - local desc - tf.with_op_name(name, "QueueDequeueV2") do - desc = tf.NodeDescription("QueueDequeueV2") - handle_ = convert(Tensor{Any}, handle_) - tf.add_input(desc, handle_) - if component_types !== nothing - desc["component_types"] = map(Base.identity, component_types) - end - if timeout_ms !== nothing - desc["timeout_ms"] = Base.Int(timeout_ms) - end + function queue_dequeue_v2_graph(handle_; name=nothing, component_types=nothing, timeout_ms=nothing) + local desc + tf.with_op_name(name, "QueueDequeueV2") do + desc = tf.NodeDescription("QueueDequeueV2") + handle_ = convert(Tensor{Any}, handle_) + tf.add_input(desc, handle_) + if component_types !== nothing + desc["component_types"] = map(Base.identity, component_types) + end + if timeout_ms !== nothing + desc["timeout_ms"] = Base.Int(timeout_ms) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function queue_dequeue_v2_eager(handle_; name=nothing, component_types=nothing, timeout_ms=nothing) desc = tf.EagerOp("QueueDequeueV2") handle_ = convert(tf.EagerTensor, handle_) @@ -42275,13 +42275,13 @@ begin return res[1] end end - function queue_dequeue_v2(handle_; name=nothing, component_types=nothing, timeout_ms=nothing) - if tf.in_eager_mode() - queue_dequeue_v2_eager(handle_; name=name, component_types=component_types, timeout_ms=timeout_ms) - else - queue_dequeue_v2_graph(handle_; name=name, component_types=component_types, timeout_ms=timeout_ms) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function queue_dequeue_v2(handle_; name=nothing, component_types=nothing, timeout_ms=nothing) + if tf.in_eager_mode() + queue_dequeue_v2_eager(handle_; name=name, component_types=component_types, timeout_ms=timeout_ms) + else + queue_dequeue_v2_graph(handle_; name=name, component_types=component_types, timeout_ms=timeout_ms) + end end - end end @@ -42291,19 +42291,19 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function transpose_graph(x_, perm_; name=nothing) - local desc - tf.with_op_name(name, "Transpose") do - desc = tf.NodeDescription("Transpose") - x_ = convert(Tensor{Any}, x_) - perm_ = convert(Tensor{Int32}, perm_) - (perm_,) = tf.tf_promote(perm_) - (x_,) = tf.tf_promote(x_) - tf.add_input(desc, x_) - tf.add_input(desc, perm_) - end - tf.Tensor(tf.Operation(desc)) + function transpose_graph(x_, perm_; name=nothing) + local desc + tf.with_op_name(name, "Transpose") do + desc = tf.NodeDescription("Transpose") + x_ = convert(Tensor{Any}, x_) + perm_ = convert(Tensor{Int32}, perm_) + (perm_,) = tf.tf_promote(perm_) + (x_,) = tf.tf_promote(x_) + tf.add_input(desc, x_) + tf.add_input(desc, perm_) end + tf.Tensor(tf.Operation(desc)) + end function transpose_eager(x_, perm_; name=nothing) desc = tf.EagerOp("Transpose") x_ = convert(tf.EagerTensor, x_) @@ -42319,13 +42319,13 @@ begin return res[1] end end - function transpose(x_, perm_; name=nothing) - if tf.in_eager_mode() - transpose_eager(x_, perm_; name=name) - else - transpose_graph(x_, perm_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function transpose(x_, perm_; name=nothing) + if tf.in_eager_mode() + transpose_eager(x_, perm_; name=name) + else + transpose_graph(x_, perm_; name=name) + end end - end end @@ -42335,30 +42335,30 @@ end Retrieve embedding parameters for a single table. """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function retrieve_tpu_embedding_rms_prop_parameters_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - local desc - tf.with_op_name(name, "RetrieveTPUEmbeddingRMSPropParameters") do - desc = tf.NodeDescription("RetrieveTPUEmbeddingRMSPropParameters") - if table_id !== nothing - desc["table_id"] = Base.Int(table_id) - end - if table_name !== nothing - desc["table_name"] = Base.String(table_name) - end - if num_shards !== nothing - desc["num_shards"] = Base.Int(num_shards) - end - if shard_id !== nothing - desc["shard_id"] = Base.Int(shard_id) - end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:3 - push!(out, tf.Tensor(op, out_idx)) - end - out + function retrieve_tpu_embedding_rms_prop_parameters_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + local desc + tf.with_op_name(name, "RetrieveTPUEmbeddingRMSPropParameters") do + desc = tf.NodeDescription("RetrieveTPUEmbeddingRMSPropParameters") + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end function retrieve_tpu_embedding_rms_prop_parameters_eager(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) desc = tf.EagerOp("RetrieveTPUEmbeddingRMSPropParameters") if table_id !== nothing @@ -42380,13 +42380,13 @@ begin return res end end - function retrieve_tpu_embedding_rms_prop_parameters(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - if tf.in_eager_mode() - retrieve_tpu_embedding_rms_prop_parameters_eager(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) - else - retrieve_tpu_embedding_rms_prop_parameters_graph(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function retrieve_tpu_embedding_rms_prop_parameters(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + if tf.in_eager_mode() + retrieve_tpu_embedding_rms_prop_parameters_eager(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + else + retrieve_tpu_embedding_rms_prop_parameters_graph(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + end end - end end @@ -42396,16 +42396,16 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function ifft_graph(input_; name=nothing) - local desc - tf.with_op_name(name, "IFFT") do - desc = tf.NodeDescription("IFFT") - input_ = convert(Tensor{Complex{Float32}}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - end - tf.Tensor(tf.Operation(desc)) + function ifft_graph(input_; name=nothing) + local desc + tf.with_op_name(name, "IFFT") do + desc = tf.NodeDescription("IFFT") + input_ = convert(Tensor{Complex{Float32}}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) end + tf.Tensor(tf.Operation(desc)) + end function ifft_eager(input_; name=nothing) desc = tf.EagerOp("IFFT") input_ = convert(tf.EagerTensor, input_) @@ -42418,13 +42418,13 @@ begin return res[1] end end - function ifft(input_; name=nothing) - if tf.in_eager_mode() - ifft_eager(input_; name=name) - else - ifft_graph(input_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function ifft(input_; name=nothing) + if tf.in_eager_mode() + ifft_eager(input_; name=name) + else + ifft_graph(input_; name=name) + end end - end end @@ -42434,25 +42434,25 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function sparse_segment_sum_with_num_segments_graph(data_, indices_, segment_ids_, num_segments_; name=nothing) - local desc - tf.with_op_name(name, "SparseSegmentSumWithNumSegments") do - desc = tf.NodeDescription("SparseSegmentSumWithNumSegments") - data_ = convert(Tensor{Any}, data_) - indices_ = convert(Tensor{Int32}, indices_) - indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) - segment_ids_ = convert(Tensor{Int32}, segment_ids_) - num_segments_ = convert(Tensor{Int32}, num_segments_) - (num_segments_,) = tf.tf_promote(num_segments_) - (data_,) = tf.tf_promote(data_) - (indices_,) = tf.tf_promote(indices_) - tf.add_input(desc, data_) - tf.add_input(desc, indices_) - tf.add_input(desc, segment_ids_) - tf.add_input(desc, num_segments_) - end - tf.Tensor(tf.Operation(desc)) + function sparse_segment_sum_with_num_segments_graph(data_, indices_, segment_ids_, num_segments_; name=nothing) + local desc + tf.with_op_name(name, "SparseSegmentSumWithNumSegments") do + desc = tf.NodeDescription("SparseSegmentSumWithNumSegments") + data_ = convert(Tensor{Any}, data_) + indices_ = convert(Tensor{Int32}, indices_) + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + segment_ids_ = convert(Tensor{Int32}, segment_ids_) + num_segments_ = convert(Tensor{Int32}, num_segments_) + (num_segments_,) = tf.tf_promote(num_segments_) + (data_,) = tf.tf_promote(data_) + (indices_,) = tf.tf_promote(indices_) + tf.add_input(desc, data_) + tf.add_input(desc, indices_) + tf.add_input(desc, segment_ids_) + tf.add_input(desc, num_segments_) end + tf.Tensor(tf.Operation(desc)) + end function sparse_segment_sum_with_num_segments_eager(data_, indices_, segment_ids_, num_segments_; name=nothing) desc = tf.EagerOp("SparseSegmentSumWithNumSegments") data_ = convert(tf.EagerTensor, data_) @@ -42473,13 +42473,13 @@ begin return res[1] end end - function sparse_segment_sum_with_num_segments(data_, indices_, segment_ids_, num_segments_; name=nothing) - if tf.in_eager_mode() - sparse_segment_sum_with_num_segments_eager(data_, indices_, segment_ids_, num_segments_; name=name) - else - sparse_segment_sum_with_num_segments_graph(data_, indices_, segment_ids_, num_segments_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_segment_sum_with_num_segments(data_, indices_, segment_ids_, num_segments_; name=nothing) + if tf.in_eager_mode() + sparse_segment_sum_with_num_segments_eager(data_, indices_, segment_ids_, num_segments_; name=name) + else + sparse_segment_sum_with_num_segments_graph(data_, indices_, segment_ids_, num_segments_; name=name) + end end - end end @@ -42489,15 +42489,15 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function queue_is_closed_v2_graph(handle_; name=nothing) - local desc - tf.with_op_name(name, "QueueIsClosedV2") do - desc = tf.NodeDescription("QueueIsClosedV2") - handle_ = convert(Tensor{Any}, handle_) - tf.add_input(desc, handle_) - end - tf.Tensor(tf.Operation(desc)) + function queue_is_closed_v2_graph(handle_; name=nothing) + local desc + tf.with_op_name(name, "QueueIsClosedV2") do + desc = tf.NodeDescription("QueueIsClosedV2") + handle_ = convert(Tensor{Any}, handle_) + tf.add_input(desc, handle_) end + tf.Tensor(tf.Operation(desc)) + end function queue_is_closed_v2_eager(handle_; name=nothing) desc = tf.EagerOp("QueueIsClosedV2") handle_ = convert(tf.EagerTensor, handle_) @@ -42509,13 +42509,13 @@ begin return res[1] end end - function queue_is_closed_v2(handle_; name=nothing) - if tf.in_eager_mode() - queue_is_closed_v2_eager(handle_; name=name) - else - queue_is_closed_v2_graph(handle_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function queue_is_closed_v2(handle_; name=nothing) + if tf.in_eager_mode() + queue_is_closed_v2_eager(handle_; name=name) + else + queue_is_closed_v2_graph(handle_; name=name) + end end - end end @@ -42525,34 +42525,34 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function parameterized_truncated_normal_graph(shape_, means_, stdevs_, minvals_, maxvals_; name=nothing, seed=nothing, seed2=nothing, dtype=nothing) - local desc - tf.with_op_name(name, "ParameterizedTruncatedNormal") do - desc = tf.NodeDescription("ParameterizedTruncatedNormal") - shape_ = convert(Tensor{Any}, shape_) - means_ = convert(Tensor{Any}, means_) - stdevs_ = convert(Tensor{Any}, stdevs_) - minvals_ = convert(Tensor{Any}, minvals_) - maxvals_ = convert(Tensor{Any}, maxvals_) - (means_, stdevs_, minvals_, maxvals_) = tf.tf_promote(means_, stdevs_, minvals_, maxvals_) - (shape_,) = tf.tf_promote(shape_) - tf.add_input(desc, shape_) - tf.add_input(desc, means_) - tf.add_input(desc, stdevs_) - tf.add_input(desc, minvals_) - tf.add_input(desc, maxvals_) - if seed !== nothing - desc["seed"] = Base.Int(seed) - end - if seed2 !== nothing - desc["seed2"] = Base.Int(seed2) - end - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end - end - tf.Tensor(tf.Operation(desc)) + function parameterized_truncated_normal_graph(shape_, means_, stdevs_, minvals_, maxvals_; name=nothing, seed=nothing, seed2=nothing, dtype=nothing) + local desc + tf.with_op_name(name, "ParameterizedTruncatedNormal") do + desc = tf.NodeDescription("ParameterizedTruncatedNormal") + shape_ = convert(Tensor{Any}, shape_) + means_ = convert(Tensor{Any}, means_) + stdevs_ = convert(Tensor{Any}, stdevs_) + minvals_ = convert(Tensor{Any}, minvals_) + maxvals_ = convert(Tensor{Any}, maxvals_) + (means_, stdevs_, minvals_, maxvals_) = tf.tf_promote(means_, stdevs_, minvals_, maxvals_) + (shape_,) = tf.tf_promote(shape_) + tf.add_input(desc, shape_) + tf.add_input(desc, means_) + tf.add_input(desc, stdevs_) + tf.add_input(desc, minvals_) + tf.add_input(desc, maxvals_) + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end end + tf.Tensor(tf.Operation(desc)) + end function parameterized_truncated_normal_eager(shape_, means_, stdevs_, minvals_, maxvals_; name=nothing, seed=nothing, seed2=nothing, dtype=nothing) desc = tf.EagerOp("ParameterizedTruncatedNormal") shape_ = convert(tf.EagerTensor, shape_) @@ -42586,13 +42586,13 @@ begin return res[1] end end - function parameterized_truncated_normal(shape_, means_, stdevs_, minvals_, maxvals_; name=nothing, seed=nothing, seed2=nothing, dtype=nothing) - if tf.in_eager_mode() - parameterized_truncated_normal_eager(shape_, means_, stdevs_, minvals_, maxvals_; name=name, seed=seed, seed2=seed2, dtype=dtype) - else - parameterized_truncated_normal_graph(shape_, means_, stdevs_, minvals_, maxvals_; name=name, seed=seed, seed2=seed2, dtype=dtype) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function parameterized_truncated_normal(shape_, means_, stdevs_, minvals_, maxvals_; name=nothing, seed=nothing, seed2=nothing, dtype=nothing) + if tf.in_eager_mode() + parameterized_truncated_normal_eager(shape_, means_, stdevs_, minvals_, maxvals_; name=name, seed=seed, seed2=seed2, dtype=dtype) + else + parameterized_truncated_normal_graph(shape_, means_, stdevs_, minvals_, maxvals_; name=name, seed=seed, seed2=seed2, dtype=dtype) + end end - end end @@ -42602,16 +42602,16 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function diag_part_graph(input_; name=nothing) - local desc - tf.with_op_name(name, "DiagPart") do - desc = tf.NodeDescription("DiagPart") - input_ = convert(Tensor{Any}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - end - tf.Tensor(tf.Operation(desc)) + function diag_part_graph(input_; name=nothing) + local desc + tf.with_op_name(name, "DiagPart") do + desc = tf.NodeDescription("DiagPart") + input_ = convert(Tensor{Any}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) end + tf.Tensor(tf.Operation(desc)) + end function diag_part_eager(input_; name=nothing) desc = tf.EagerOp("DiagPart") input_ = convert(tf.EagerTensor, input_) @@ -42624,13 +42624,13 @@ begin return res[1] end end - function diag_part(input_; name=nothing) - if tf.in_eager_mode() - diag_part_eager(input_; name=name) - else - diag_part_graph(input_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function diag_part(input_; name=nothing) + if tf.in_eager_mode() + diag_part_eager(input_; name=name) + else + diag_part_graph(input_; name=name) + end end - end end @@ -42640,22 +42640,22 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function regex_replace_graph(input_, pattern_, rewrite_; name=nothing, replace_global=nothing) - local desc - tf.with_op_name(name, "RegexReplace") do - desc = tf.NodeDescription("RegexReplace") - input_ = convert(Tensor{String}, input_) - pattern_ = convert(Tensor{String}, pattern_) - rewrite_ = convert(Tensor{String}, rewrite_) - tf.add_input(desc, input_) - tf.add_input(desc, pattern_) - tf.add_input(desc, rewrite_) - if replace_global !== nothing - desc["replace_global"] = Base.Bool(replace_global) - end + function regex_replace_graph(input_, pattern_, rewrite_; name=nothing, replace_global=nothing) + local desc + tf.with_op_name(name, "RegexReplace") do + desc = tf.NodeDescription("RegexReplace") + input_ = convert(Tensor{String}, input_) + pattern_ = convert(Tensor{String}, pattern_) + rewrite_ = convert(Tensor{String}, rewrite_) + tf.add_input(desc, input_) + tf.add_input(desc, pattern_) + tf.add_input(desc, rewrite_) + if replace_global !== nothing + desc["replace_global"] = Base.Bool(replace_global) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function regex_replace_eager(input_, pattern_, rewrite_; name=nothing, replace_global=nothing) desc = tf.EagerOp("RegexReplace") input_ = convert(tf.EagerTensor, input_) @@ -42674,13 +42674,13 @@ begin return res[1] end end - function regex_replace(input_, pattern_, rewrite_; name=nothing, replace_global=nothing) - if tf.in_eager_mode() - regex_replace_eager(input_, pattern_, rewrite_; name=name, replace_global=replace_global) - else - regex_replace_graph(input_, pattern_, rewrite_; name=name, replace_global=replace_global) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function regex_replace(input_, pattern_, rewrite_; name=nothing, replace_global=nothing) + if tf.in_eager_mode() + regex_replace_eager(input_, pattern_, rewrite_; name=name, replace_global=replace_global) + else + regex_replace_graph(input_, pattern_, rewrite_; name=name, replace_global=replace_global) + end end - end end @@ -42690,30 +42690,30 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function sparse_tensor_dense_mat_mul_graph(a_indices_, a_values_, a_shape_, b_; name=nothing, adjoint_a=nothing, adjoint_b=nothing) - local desc - tf.with_op_name(name, "SparseTensorDenseMatMul") do - desc = tf.NodeDescription("SparseTensorDenseMatMul") - a_indices_ = convert(Tensor{Int64}, a_indices_) - a_indices_ = a_indices_ - convert(tf.Tensor{eltype(a_indices_)}, 1) - a_values_ = convert(Tensor{Any}, a_values_) - a_shape_ = convert(Tensor{Int64}, a_shape_) - b_ = convert(Tensor{Any}, b_) - (a_values_, b_) = tf.tf_promote(a_values_, b_) - (a_indices_,) = tf.tf_promote(a_indices_) - tf.add_input(desc, a_indices_) - tf.add_input(desc, a_values_) - tf.add_input(desc, a_shape_) - tf.add_input(desc, b_) - if adjoint_a !== nothing - desc["adjoint_a"] = Base.Bool(adjoint_a) - end - if adjoint_b !== nothing - desc["adjoint_b"] = Base.Bool(adjoint_b) - end - end - tf.Tensor(tf.Operation(desc)) + function sparse_tensor_dense_mat_mul_graph(a_indices_, a_values_, a_shape_, b_; name=nothing, adjoint_a=nothing, adjoint_b=nothing) + local desc + tf.with_op_name(name, "SparseTensorDenseMatMul") do + desc = tf.NodeDescription("SparseTensorDenseMatMul") + a_indices_ = convert(Tensor{Int64}, a_indices_) + a_indices_ = a_indices_ - convert(tf.Tensor{eltype(a_indices_)}, 1) + a_values_ = convert(Tensor{Any}, a_values_) + a_shape_ = convert(Tensor{Int64}, a_shape_) + b_ = convert(Tensor{Any}, b_) + (a_values_, b_) = tf.tf_promote(a_values_, b_) + (a_indices_,) = tf.tf_promote(a_indices_) + tf.add_input(desc, a_indices_) + tf.add_input(desc, a_values_) + tf.add_input(desc, a_shape_) + tf.add_input(desc, b_) + if adjoint_a !== nothing + desc["adjoint_a"] = Base.Bool(adjoint_a) + end + if adjoint_b !== nothing + desc["adjoint_b"] = Base.Bool(adjoint_b) + end end + tf.Tensor(tf.Operation(desc)) + end function sparse_tensor_dense_mat_mul_eager(a_indices_, a_values_, a_shape_, b_; name=nothing, adjoint_a=nothing, adjoint_b=nothing) desc = tf.EagerOp("SparseTensorDenseMatMul") a_indices_ = convert(tf.EagerTensor, a_indices_) @@ -42740,13 +42740,13 @@ begin return res[1] end end - function sparse_tensor_dense_mat_mul(a_indices_, a_values_, a_shape_, b_; name=nothing, adjoint_a=nothing, adjoint_b=nothing) - if tf.in_eager_mode() - sparse_tensor_dense_mat_mul_eager(a_indices_, a_values_, a_shape_, b_; name=name, adjoint_a=adjoint_a, adjoint_b=adjoint_b) - else - sparse_tensor_dense_mat_mul_graph(a_indices_, a_values_, a_shape_, b_; name=name, adjoint_a=adjoint_a, adjoint_b=adjoint_b) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_tensor_dense_mat_mul(a_indices_, a_values_, a_shape_, b_; name=nothing, adjoint_a=nothing, adjoint_b=nothing) + if tf.in_eager_mode() + sparse_tensor_dense_mat_mul_eager(a_indices_, a_values_, a_shape_, b_; name=name, adjoint_a=adjoint_a, adjoint_b=adjoint_b) + else + sparse_tensor_dense_mat_mul_graph(a_indices_, a_values_, a_shape_, b_; name=name, adjoint_a=adjoint_a, adjoint_b=adjoint_b) + end end - end end @@ -42756,32 +42756,32 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function map_defun_graph(arguments_, captured_inputs_; name=nothing, Targuments=nothing, Tcaptured=nothing, output_types=nothing, output_shapes=nothing, f=nothing) - local desc - tf.with_op_name(name, "MapDefun") do - desc = tf.NodeDescription("MapDefun") - arguments_ = [convert(Tensor{Any}, x) for x = arguments_] - captured_inputs_ = [convert(Tensor{Any}, x) for x = captured_inputs_] - tf.add_input(desc, arguments_) - tf.add_input(desc, captured_inputs_) - if Targuments !== nothing - desc["Targuments"] = map(Base.identity, Targuments) - end - if Tcaptured !== nothing - desc["Tcaptured"] = map(Base.identity, Tcaptured) - end - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - if f !== nothing - desc["f"] = Base.identity(f) - end - end - tf.Tensor(tf.Operation(desc)) + function map_defun_graph(arguments_, captured_inputs_; name=nothing, Targuments=nothing, Tcaptured=nothing, output_types=nothing, output_shapes=nothing, f=nothing) + local desc + tf.with_op_name(name, "MapDefun") do + desc = tf.NodeDescription("MapDefun") + arguments_ = [convert(Tensor{Any}, x) for x = arguments_] + captured_inputs_ = [convert(Tensor{Any}, x) for x = captured_inputs_] + tf.add_input(desc, arguments_) + tf.add_input(desc, captured_inputs_) + if Targuments !== nothing + desc["Targuments"] = map(Base.identity, Targuments) + end + if Tcaptured !== nothing + desc["Tcaptured"] = map(Base.identity, Tcaptured) + end + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + if f !== nothing + desc["f"] = Base.identity(f) + end end + tf.Tensor(tf.Operation(desc)) + end function map_defun_eager(arguments_, captured_inputs_; name=nothing, Targuments=nothing, Tcaptured=nothing, output_types=nothing, output_shapes=nothing, f=nothing) desc = tf.EagerOp("MapDefun") arguments_ = convert(tf.EagerTensor, arguments_) @@ -42810,13 +42810,13 @@ begin return res[1] end end - function map_defun(arguments_, captured_inputs_; name=nothing, Targuments=nothing, Tcaptured=nothing, output_types=nothing, output_shapes=nothing, f=nothing) - if tf.in_eager_mode() - map_defun_eager(arguments_, captured_inputs_; name=name, Targuments=Targuments, Tcaptured=Tcaptured, output_types=output_types, output_shapes=output_shapes, f=f) - else - map_defun_graph(arguments_, captured_inputs_; name=name, Targuments=Targuments, Tcaptured=Tcaptured, output_types=output_types, output_shapes=output_shapes, f=f) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function map_defun(arguments_, captured_inputs_; name=nothing, Targuments=nothing, Tcaptured=nothing, output_types=nothing, output_shapes=nothing, f=nothing) + if tf.in_eager_mode() + map_defun_eager(arguments_, captured_inputs_; name=name, Targuments=Targuments, Tcaptured=Tcaptured, output_types=output_types, output_shapes=output_shapes, f=f) + else + map_defun_graph(arguments_, captured_inputs_; name=name, Targuments=Targuments, Tcaptured=Tcaptured, output_types=output_types, output_shapes=output_shapes, f=f) + end end - end end @@ -42826,38 +42826,38 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function thread_unsafe_unigram_candidate_sampler_graph(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing) - local desc - tf.with_op_name(name, "ThreadUnsafeUnigramCandidateSampler") do - desc = tf.NodeDescription("ThreadUnsafeUnigramCandidateSampler") - true_classes_ = convert(Tensor{Int64}, true_classes_) - tf.add_input(desc, true_classes_) - if num_true !== nothing - desc["num_true"] = Base.Int(num_true) - end - if num_sampled !== nothing - desc["num_sampled"] = Base.Int(num_sampled) - end - if unique !== nothing - desc["unique"] = Base.Bool(unique) - end - if range_max !== nothing - desc["range_max"] = Base.Int(range_max) - end - if seed !== nothing - desc["seed"] = Base.Int(seed) - end - if seed2 !== nothing - desc["seed2"] = Base.Int(seed2) - end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:3 - push!(out, tf.Tensor(op, out_idx)) - end - out + function thread_unsafe_unigram_candidate_sampler_graph(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing) + local desc + tf.with_op_name(name, "ThreadUnsafeUnigramCandidateSampler") do + desc = tf.NodeDescription("ThreadUnsafeUnigramCandidateSampler") + true_classes_ = convert(Tensor{Int64}, true_classes_) + tf.add_input(desc, true_classes_) + if num_true !== nothing + desc["num_true"] = Base.Int(num_true) + end + if num_sampled !== nothing + desc["num_sampled"] = Base.Int(num_sampled) + end + if unique !== nothing + desc["unique"] = Base.Bool(unique) + end + if range_max !== nothing + desc["range_max"] = Base.Int(range_max) + end + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end function thread_unsafe_unigram_candidate_sampler_eager(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing) desc = tf.EagerOp("ThreadUnsafeUnigramCandidateSampler") true_classes_ = convert(tf.EagerTensor, true_classes_) @@ -42887,13 +42887,13 @@ begin return res end end - function thread_unsafe_unigram_candidate_sampler(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing) - if tf.in_eager_mode() - thread_unsafe_unigram_candidate_sampler_eager(true_classes_; name=name, num_true=num_true, num_sampled=num_sampled, unique=unique, range_max=range_max, seed=seed, seed2=seed2) - else - thread_unsafe_unigram_candidate_sampler_graph(true_classes_; name=name, num_true=num_true, num_sampled=num_sampled, unique=unique, range_max=range_max, seed=seed, seed2=seed2) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function thread_unsafe_unigram_candidate_sampler(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing) + if tf.in_eager_mode() + thread_unsafe_unigram_candidate_sampler_eager(true_classes_; name=name, num_true=num_true, num_sampled=num_sampled, unique=unique, range_max=range_max, seed=seed, seed2=seed2) + else + thread_unsafe_unigram_candidate_sampler_graph(true_classes_; name=name, num_true=num_true, num_sampled=num_sampled, unique=unique, range_max=range_max, seed=seed, seed2=seed2) + end end - end end @@ -42903,30 +42903,30 @@ end Retrieve embedding parameters for a single table. """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function retrieve_tpu_embedding_adam_parameters_grad_accum_debug_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - local desc - tf.with_op_name(name, "RetrieveTPUEmbeddingADAMParametersGradAccumDebug") do - desc = tf.NodeDescription("RetrieveTPUEmbeddingADAMParametersGradAccumDebug") - if table_id !== nothing - desc["table_id"] = Base.Int(table_id) - end - if table_name !== nothing - desc["table_name"] = Base.String(table_name) - end - if num_shards !== nothing - desc["num_shards"] = Base.Int(num_shards) - end - if shard_id !== nothing - desc["shard_id"] = Base.Int(shard_id) - end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:4 - push!(out, tf.Tensor(op, out_idx)) - end - out + function retrieve_tpu_embedding_adam_parameters_grad_accum_debug_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + local desc + tf.with_op_name(name, "RetrieveTPUEmbeddingADAMParametersGradAccumDebug") do + desc = tf.NodeDescription("RetrieveTPUEmbeddingADAMParametersGradAccumDebug") + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:4 + push!(out, tf.Tensor(op, out_idx)) + end + out + end function retrieve_tpu_embedding_adam_parameters_grad_accum_debug_eager(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) desc = tf.EagerOp("RetrieveTPUEmbeddingADAMParametersGradAccumDebug") if table_id !== nothing @@ -42948,13 +42948,13 @@ begin return res end end - function retrieve_tpu_embedding_adam_parameters_grad_accum_debug(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - if tf.in_eager_mode() - retrieve_tpu_embedding_adam_parameters_grad_accum_debug_eager(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) - else - retrieve_tpu_embedding_adam_parameters_grad_accum_debug_graph(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function retrieve_tpu_embedding_adam_parameters_grad_accum_debug(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + if tf.in_eager_mode() + retrieve_tpu_embedding_adam_parameters_grad_accum_debug_eager(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + else + retrieve_tpu_embedding_adam_parameters_grad_accum_debug_graph(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + end end - end end @@ -42964,22 +42964,22 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function parallel_concat_graph(values_; name=nothing, N=nothing, shape=nothing) - local desc - tf.with_op_name(name, "ParallelConcat") do - desc = tf.NodeDescription("ParallelConcat") - values_ = [convert(Tensor{Any}, x) for x = values_] - (values_,) = tf.tf_promote(values_) - tf.add_input(desc, values_) - if N !== nothing - desc["N"] = Base.Int(N) - end - if shape !== nothing - desc["shape"] = Base.identity(shape) - end + function parallel_concat_graph(values_; name=nothing, N=nothing, shape=nothing) + local desc + tf.with_op_name(name, "ParallelConcat") do + desc = tf.NodeDescription("ParallelConcat") + values_ = [convert(Tensor{Any}, x) for x = values_] + (values_,) = tf.tf_promote(values_) + tf.add_input(desc, values_) + if N !== nothing + desc["N"] = Base.Int(N) + end + if shape !== nothing + desc["shape"] = Base.identity(shape) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function parallel_concat_eager(values_; name=nothing, N=nothing, shape=nothing) desc = tf.EagerOp("ParallelConcat") values_ = convert(tf.EagerTensor, values_) @@ -42998,13 +42998,13 @@ begin return res[1] end end - function parallel_concat(values_; name=nothing, N=nothing, shape=nothing) - if tf.in_eager_mode() - parallel_concat_eager(values_; name=name, N=N, shape=shape) - else - parallel_concat_graph(values_; name=name, N=N, shape=shape) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function parallel_concat(values_; name=nothing, N=nothing, shape=nothing) + if tf.in_eager_mode() + parallel_concat_eager(values_; name=name, N=N, shape=shape) + else + parallel_concat_graph(values_; name=name, N=N, shape=shape) + end end - end end @@ -43014,21 +43014,21 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function lookup_table_find_v2_graph(table_handle_, keys_, default_value_; name=nothing) - local desc - tf.with_op_name(name, "LookupTableFindV2") do - desc = tf.NodeDescription("LookupTableFindV2") - table_handle_ = convert(Tensor{Any}, table_handle_) - keys_ = convert(Tensor{Any}, keys_) - default_value_ = convert(Tensor{Any}, default_value_) - (keys_,) = tf.tf_promote(keys_) - (default_value_,) = tf.tf_promote(default_value_) - tf.add_input(desc, table_handle_) - tf.add_input(desc, keys_) - tf.add_input(desc, default_value_) - end - tf.Tensor(tf.Operation(desc)) + function lookup_table_find_v2_graph(table_handle_, keys_, default_value_; name=nothing) + local desc + tf.with_op_name(name, "LookupTableFindV2") do + desc = tf.NodeDescription("LookupTableFindV2") + table_handle_ = convert(Tensor{Any}, table_handle_) + keys_ = convert(Tensor{Any}, keys_) + default_value_ = convert(Tensor{Any}, default_value_) + (keys_,) = tf.tf_promote(keys_) + (default_value_,) = tf.tf_promote(default_value_) + tf.add_input(desc, table_handle_) + tf.add_input(desc, keys_) + tf.add_input(desc, default_value_) end + tf.Tensor(tf.Operation(desc)) + end function lookup_table_find_v2_eager(table_handle_, keys_, default_value_; name=nothing) desc = tf.EagerOp("LookupTableFindV2") table_handle_ = convert(tf.EagerTensor, table_handle_) @@ -43046,13 +43046,13 @@ begin return res[1] end end - function lookup_table_find_v2(table_handle_, keys_, default_value_; name=nothing) - if tf.in_eager_mode() - lookup_table_find_v2_eager(table_handle_, keys_, default_value_; name=name) - else - lookup_table_find_v2_graph(table_handle_, keys_, default_value_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function lookup_table_find_v2(table_handle_, keys_, default_value_; name=nothing) + if tf.in_eager_mode() + lookup_table_find_v2_eager(table_handle_, keys_, default_value_; name=name) + else + lookup_table_find_v2_graph(table_handle_, keys_, default_value_; name=name) + end end - end end @@ -43062,17 +43062,17 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tensor_forest_tree_deserialize_graph(tree_handle_, tree_config_; name=nothing) - local desc - tf.with_op_name(name, "TensorForestTreeDeserialize") do - desc = tf.NodeDescription("TensorForestTreeDeserialize") - tree_handle_ = convert(Tensor{Any}, tree_handle_) - tree_config_ = convert(Tensor{String}, tree_config_) - tf.add_input(desc, tree_handle_) - tf.add_input(desc, tree_config_) - end - tf.Tensor(tf.Operation(desc)) + function tensor_forest_tree_deserialize_graph(tree_handle_, tree_config_; name=nothing) + local desc + tf.with_op_name(name, "TensorForestTreeDeserialize") do + desc = tf.NodeDescription("TensorForestTreeDeserialize") + tree_handle_ = convert(Tensor{Any}, tree_handle_) + tree_config_ = convert(Tensor{String}, tree_config_) + tf.add_input(desc, tree_handle_) + tf.add_input(desc, tree_config_) end + tf.Tensor(tf.Operation(desc)) + end function tensor_forest_tree_deserialize_eager(tree_handle_, tree_config_; name=nothing) desc = tf.EagerOp("TensorForestTreeDeserialize") tree_handle_ = convert(tf.EagerTensor, tree_handle_) @@ -43086,13 +43086,13 @@ begin return res[1] end end - function tensor_forest_tree_deserialize(tree_handle_, tree_config_; name=nothing) - if tf.in_eager_mode() - tensor_forest_tree_deserialize_eager(tree_handle_, tree_config_; name=name) - else - tensor_forest_tree_deserialize_graph(tree_handle_, tree_config_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_forest_tree_deserialize(tree_handle_, tree_config_; name=nothing) + if tf.in_eager_mode() + tensor_forest_tree_deserialize_eager(tree_handle_, tree_config_; name=name) + else + tensor_forest_tree_deserialize_graph(tree_handle_, tree_config_; name=name) + end end - end end @@ -43102,30 +43102,30 @@ end Retrieve embedding parameters for a single table. """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function retrieve_tpu_embedding_momentum_parameters_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - local desc - tf.with_op_name(name, "RetrieveTPUEmbeddingMomentumParameters") do - desc = tf.NodeDescription("RetrieveTPUEmbeddingMomentumParameters") - if table_id !== nothing - desc["table_id"] = Base.Int(table_id) - end - if table_name !== nothing - desc["table_name"] = Base.String(table_name) - end - if num_shards !== nothing - desc["num_shards"] = Base.Int(num_shards) - end - if shard_id !== nothing - desc["shard_id"] = Base.Int(shard_id) - end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:2 - push!(out, tf.Tensor(op, out_idx)) - end - out + function retrieve_tpu_embedding_momentum_parameters_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + local desc + tf.with_op_name(name, "RetrieveTPUEmbeddingMomentumParameters") do + desc = tf.NodeDescription("RetrieveTPUEmbeddingMomentumParameters") + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) end + out + end function retrieve_tpu_embedding_momentum_parameters_eager(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) desc = tf.EagerOp("RetrieveTPUEmbeddingMomentumParameters") if table_id !== nothing @@ -43147,13 +43147,13 @@ begin return res end end - function retrieve_tpu_embedding_momentum_parameters(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - if tf.in_eager_mode() - retrieve_tpu_embedding_momentum_parameters_eager(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) - else - retrieve_tpu_embedding_momentum_parameters_graph(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function retrieve_tpu_embedding_momentum_parameters(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + if tf.in_eager_mode() + retrieve_tpu_embedding_momentum_parameters_eager(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + else + retrieve_tpu_embedding_momentum_parameters_graph(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + end end - end end @@ -43163,27 +43163,27 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function fake_quant_with_min_max_args_graph(inputs_; name=nothing, min=nothing, max=nothing, num_bits=nothing, narrow_range=nothing) - local desc - tf.with_op_name(name, "FakeQuantWithMinMaxArgs") do - desc = tf.NodeDescription("FakeQuantWithMinMaxArgs") - inputs_ = convert(Tensor{Float32}, inputs_) - tf.add_input(desc, inputs_) - if min !== nothing - desc["min"] = Base.identity(min) - end - if max !== nothing - desc["max"] = Base.identity(max) - end - if num_bits !== nothing - desc["num_bits"] = Base.Int(num_bits) - end - if narrow_range !== nothing - desc["narrow_range"] = Base.Bool(narrow_range) - end + function fake_quant_with_min_max_args_graph(inputs_; name=nothing, min=nothing, max=nothing, num_bits=nothing, narrow_range=nothing) + local desc + tf.with_op_name(name, "FakeQuantWithMinMaxArgs") do + desc = tf.NodeDescription("FakeQuantWithMinMaxArgs") + inputs_ = convert(Tensor{Float32}, inputs_) + tf.add_input(desc, inputs_) + if min !== nothing + desc["min"] = Base.identity(min) + end + if max !== nothing + desc["max"] = Base.identity(max) + end + if num_bits !== nothing + desc["num_bits"] = Base.Int(num_bits) + end + if narrow_range !== nothing + desc["narrow_range"] = Base.Bool(narrow_range) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function fake_quant_with_min_max_args_eager(inputs_; name=nothing, min=nothing, max=nothing, num_bits=nothing, narrow_range=nothing) desc = tf.EagerOp("FakeQuantWithMinMaxArgs") inputs_ = convert(tf.EagerTensor, inputs_) @@ -43207,13 +43207,13 @@ begin return res[1] end end - function fake_quant_with_min_max_args(inputs_; name=nothing, min=nothing, max=nothing, num_bits=nothing, narrow_range=nothing) - if tf.in_eager_mode() - fake_quant_with_min_max_args_eager(inputs_; name=name, min=min, max=max, num_bits=num_bits, narrow_range=narrow_range) - else - fake_quant_with_min_max_args_graph(inputs_; name=name, min=min, max=max, num_bits=num_bits, narrow_range=narrow_range) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function fake_quant_with_min_max_args(inputs_; name=nothing, min=nothing, max=nothing, num_bits=nothing, narrow_range=nothing) + if tf.in_eager_mode() + fake_quant_with_min_max_args_eager(inputs_; name=name, min=min, max=max, num_bits=num_bits, narrow_range=narrow_range) + else + fake_quant_with_min_max_args_graph(inputs_; name=name, min=min, max=max, num_bits=num_bits, narrow_range=narrow_range) + end end - end end @@ -43223,23 +43223,23 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function resource_apply_gradient_descent_graph(var_, alpha_, delta_; name=nothing, use_locking=nothing) - local desc - tf.with_op_name(name, "ResourceApplyGradientDescent") do - desc = tf.NodeDescription("ResourceApplyGradientDescent") - var_ = convert(Tensor{Any}, var_) - alpha_ = convert(Tensor{Any}, alpha_) - delta_ = convert(Tensor{Any}, delta_) - (alpha_, delta_) = tf.tf_promote(alpha_, delta_) - tf.add_input(desc, var_) - tf.add_input(desc, alpha_) - tf.add_input(desc, delta_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end + function resource_apply_gradient_descent_graph(var_, alpha_, delta_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "ResourceApplyGradientDescent") do + desc = tf.NodeDescription("ResourceApplyGradientDescent") + var_ = convert(Tensor{Any}, var_) + alpha_ = convert(Tensor{Any}, alpha_) + delta_ = convert(Tensor{Any}, delta_) + (alpha_, delta_) = tf.tf_promote(alpha_, delta_) + tf.add_input(desc, var_) + tf.add_input(desc, alpha_) + tf.add_input(desc, delta_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function resource_apply_gradient_descent_eager(var_, alpha_, delta_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ResourceApplyGradientDescent") var_ = convert(tf.EagerTensor, var_) @@ -43260,13 +43260,13 @@ begin return res[1] end end - function resource_apply_gradient_descent(var_, alpha_, delta_; name=nothing, use_locking=nothing) - if tf.in_eager_mode() - resource_apply_gradient_descent_eager(var_, alpha_, delta_; name=name, use_locking=use_locking) - else - resource_apply_gradient_descent_graph(var_, alpha_, delta_; name=name, use_locking=use_locking) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_apply_gradient_descent(var_, alpha_, delta_; name=nothing, use_locking=nothing) + if tf.in_eager_mode() + resource_apply_gradient_descent_eager(var_, alpha_, delta_; name=name, use_locking=use_locking) + else + resource_apply_gradient_descent_graph(var_, alpha_, delta_; name=name, use_locking=use_locking) + end end - end end @@ -43276,27 +43276,27 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function experimental_sliding_window_dataset_graph(input_dataset_, window_size_, window_shift_, window_stride_; name=nothing, output_types=nothing, output_shapes=nothing) - local desc - tf.with_op_name(name, "ExperimentalSlidingWindowDataset") do - desc = tf.NodeDescription("ExperimentalSlidingWindowDataset") - input_dataset_ = convert(Tensor{Any}, input_dataset_) - window_size_ = convert(Tensor{Int64}, window_size_) - window_shift_ = convert(Tensor{Int64}, window_shift_) - window_stride_ = convert(Tensor{Int64}, window_stride_) - tf.add_input(desc, input_dataset_) - tf.add_input(desc, window_size_) - tf.add_input(desc, window_shift_) - tf.add_input(desc, window_stride_) - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end + function experimental_sliding_window_dataset_graph(input_dataset_, window_size_, window_shift_, window_stride_; name=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "ExperimentalSlidingWindowDataset") do + desc = tf.NodeDescription("ExperimentalSlidingWindowDataset") + input_dataset_ = convert(Tensor{Any}, input_dataset_) + window_size_ = convert(Tensor{Int64}, window_size_) + window_shift_ = convert(Tensor{Int64}, window_shift_) + window_stride_ = convert(Tensor{Int64}, window_stride_) + tf.add_input(desc, input_dataset_) + tf.add_input(desc, window_size_) + tf.add_input(desc, window_shift_) + tf.add_input(desc, window_stride_) + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function experimental_sliding_window_dataset_eager(input_dataset_, window_size_, window_shift_, window_stride_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("ExperimentalSlidingWindowDataset") input_dataset_ = convert(tf.EagerTensor, input_dataset_) @@ -43320,13 +43320,13 @@ begin return res[1] end end - function experimental_sliding_window_dataset(input_dataset_, window_size_, window_shift_, window_stride_; name=nothing, output_types=nothing, output_shapes=nothing) - if tf.in_eager_mode() - experimental_sliding_window_dataset_eager(input_dataset_, window_size_, window_shift_, window_stride_; name=name, output_types=output_types, output_shapes=output_shapes) - else - experimental_sliding_window_dataset_graph(input_dataset_, window_size_, window_shift_, window_stride_; name=name, output_types=output_types, output_shapes=output_shapes) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_sliding_window_dataset(input_dataset_, window_size_, window_shift_, window_stride_; name=nothing, output_types=nothing, output_shapes=nothing) + if tf.in_eager_mode() + experimental_sliding_window_dataset_eager(input_dataset_, window_size_, window_shift_, window_stride_; name=name, output_types=output_types, output_shapes=output_shapes) + else + experimental_sliding_window_dataset_graph(input_dataset_, window_size_, window_shift_, window_stride_; name=name, output_types=output_types, output_shapes=output_shapes) + end end - end end @@ -43336,21 +43336,21 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function decode_raw_graph(bytes_; name=nothing, out_type=nothing, little_endian=nothing) - local desc - tf.with_op_name(name, "DecodeRaw") do - desc = tf.NodeDescription("DecodeRaw") - bytes_ = convert(Tensor{String}, bytes_) - tf.add_input(desc, bytes_) - if out_type !== nothing - desc["out_type"] = Base.identity(out_type) - end - if little_endian !== nothing - desc["little_endian"] = Base.Bool(little_endian) - end + function decode_raw_graph(bytes_; name=nothing, out_type=nothing, little_endian=nothing) + local desc + tf.with_op_name(name, "DecodeRaw") do + desc = tf.NodeDescription("DecodeRaw") + bytes_ = convert(Tensor{String}, bytes_) + tf.add_input(desc, bytes_) + if out_type !== nothing + desc["out_type"] = Base.identity(out_type) + end + if little_endian !== nothing + desc["little_endian"] = Base.Bool(little_endian) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function decode_raw_eager(bytes_; name=nothing, out_type=nothing, little_endian=nothing) desc = tf.EagerOp("DecodeRaw") bytes_ = convert(tf.EagerTensor, bytes_) @@ -43368,13 +43368,13 @@ begin return res[1] end end - function decode_raw(bytes_; name=nothing, out_type=nothing, little_endian=nothing) - if tf.in_eager_mode() - decode_raw_eager(bytes_; name=name, out_type=out_type, little_endian=little_endian) - else - decode_raw_graph(bytes_; name=name, out_type=out_type, little_endian=little_endian) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function decode_raw(bytes_; name=nothing, out_type=nothing, little_endian=nothing) + if tf.in_eager_mode() + decode_raw_eager(bytes_; name=name, out_type=out_type, little_endian=little_endian) + else + decode_raw_graph(bytes_; name=name, out_type=out_type, little_endian=little_endian) + end end - end end @@ -43384,32 +43384,32 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function fake_quant_with_min_max_vars_per_channel_gradient_graph(gradients_, inputs_, min_, max_; name=nothing, num_bits=nothing, narrow_range=nothing) - local desc - tf.with_op_name(name, "FakeQuantWithMinMaxVarsPerChannelGradient") do - desc = tf.NodeDescription("FakeQuantWithMinMaxVarsPerChannelGradient") - gradients_ = convert(Tensor{Float32}, gradients_) - inputs_ = convert(Tensor{Float32}, inputs_) - min_ = convert(Tensor{Float32}, min_) - max_ = convert(Tensor{Float32}, max_) - tf.add_input(desc, gradients_) - tf.add_input(desc, inputs_) - tf.add_input(desc, min_) - tf.add_input(desc, max_) - if num_bits !== nothing - desc["num_bits"] = Base.Int(num_bits) - end - if narrow_range !== nothing - desc["narrow_range"] = Base.Bool(narrow_range) - end + function fake_quant_with_min_max_vars_per_channel_gradient_graph(gradients_, inputs_, min_, max_; name=nothing, num_bits=nothing, narrow_range=nothing) + local desc + tf.with_op_name(name, "FakeQuantWithMinMaxVarsPerChannelGradient") do + desc = tf.NodeDescription("FakeQuantWithMinMaxVarsPerChannelGradient") + gradients_ = convert(Tensor{Float32}, gradients_) + inputs_ = convert(Tensor{Float32}, inputs_) + min_ = convert(Tensor{Float32}, min_) + max_ = convert(Tensor{Float32}, max_) + tf.add_input(desc, gradients_) + tf.add_input(desc, inputs_) + tf.add_input(desc, min_) + tf.add_input(desc, max_) + if num_bits !== nothing + desc["num_bits"] = Base.Int(num_bits) end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:3 - push!(out, tf.Tensor(op, out_idx)) + if narrow_range !== nothing + desc["narrow_range"] = Base.Bool(narrow_range) end - out end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end function fake_quant_with_min_max_vars_per_channel_gradient_eager(gradients_, inputs_, min_, max_; name=nothing, num_bits=nothing, narrow_range=nothing) desc = tf.EagerOp("FakeQuantWithMinMaxVarsPerChannelGradient") gradients_ = convert(tf.EagerTensor, gradients_) @@ -43433,13 +43433,13 @@ begin return res end end - function fake_quant_with_min_max_vars_per_channel_gradient(gradients_, inputs_, min_, max_; name=nothing, num_bits=nothing, narrow_range=nothing) - if tf.in_eager_mode() - fake_quant_with_min_max_vars_per_channel_gradient_eager(gradients_, inputs_, min_, max_; name=name, num_bits=num_bits, narrow_range=narrow_range) - else - fake_quant_with_min_max_vars_per_channel_gradient_graph(gradients_, inputs_, min_, max_; name=name, num_bits=num_bits, narrow_range=narrow_range) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function fake_quant_with_min_max_vars_per_channel_gradient(gradients_, inputs_, min_, max_; name=nothing, num_bits=nothing, narrow_range=nothing) + if tf.in_eager_mode() + fake_quant_with_min_max_vars_per_channel_gradient_eager(gradients_, inputs_, min_, max_; name=name, num_bits=num_bits, narrow_range=narrow_range) + else + fake_quant_with_min_max_vars_per_channel_gradient_graph(gradients_, inputs_, min_, max_; name=name, num_bits=num_bits, narrow_range=narrow_range) + end end - end end @@ -43449,27 +43449,27 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function unique_with_counts_v2_graph(x_, axis_; name=nothing, out_idx=nothing) - local desc - tf.with_op_name(name, "UniqueWithCountsV2") do - desc = tf.NodeDescription("UniqueWithCountsV2") - x_ = convert(Tensor{Any}, x_) - axis_ = convert(Tensor{Int64}, axis_) - (x_,) = tf.tf_promote(x_) - (axis_,) = tf.tf_promote(axis_) - tf.add_input(desc, x_) - tf.add_input(desc, axis_) - if out_idx !== nothing - desc["out_idx"] = Base.identity(out_idx) - end + function unique_with_counts_v2_graph(x_, axis_; name=nothing, out_idx=nothing) + local desc + tf.with_op_name(name, "UniqueWithCountsV2") do + desc = tf.NodeDescription("UniqueWithCountsV2") + x_ = convert(Tensor{Any}, x_) + axis_ = convert(Tensor{Int64}, axis_) + (x_,) = tf.tf_promote(x_) + (axis_,) = tf.tf_promote(axis_) + tf.add_input(desc, x_) + tf.add_input(desc, axis_) + if out_idx !== nothing + desc["out_idx"] = Base.identity(out_idx) end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:3 - push!(out, tf.Tensor(op, out_idx)) - end - out end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end function unique_with_counts_v2_eager(x_, axis_; name=nothing, out_idx=nothing) desc = tf.EagerOp("UniqueWithCountsV2") x_ = convert(tf.EagerTensor, x_) @@ -43488,13 +43488,13 @@ begin return res end end - function unique_with_counts_v2(x_, axis_; name=nothing, out_idx=nothing) - if tf.in_eager_mode() - unique_with_counts_v2_eager(x_, axis_; name=name, out_idx=out_idx) - else - unique_with_counts_v2_graph(x_, axis_; name=name, out_idx=out_idx) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function unique_with_counts_v2(x_, axis_; name=nothing, out_idx=nothing) + if tf.in_eager_mode() + unique_with_counts_v2_eager(x_, axis_; name=name, out_idx=out_idx) + else + unique_with_counts_v2_graph(x_, axis_; name=name, out_idx=out_idx) + end end - end end @@ -43504,23 +43504,23 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function experimental_sleep_dataset_graph(input_dataset_, sleep_microseconds_; name=nothing, output_types=nothing, output_shapes=nothing) - local desc - tf.with_op_name(name, "ExperimentalSleepDataset") do - desc = tf.NodeDescription("ExperimentalSleepDataset") - input_dataset_ = convert(Tensor{Any}, input_dataset_) - sleep_microseconds_ = convert(Tensor{Int64}, sleep_microseconds_) - tf.add_input(desc, input_dataset_) - tf.add_input(desc, sleep_microseconds_) - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end + function experimental_sleep_dataset_graph(input_dataset_, sleep_microseconds_; name=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "ExperimentalSleepDataset") do + desc = tf.NodeDescription("ExperimentalSleepDataset") + input_dataset_ = convert(Tensor{Any}, input_dataset_) + sleep_microseconds_ = convert(Tensor{Int64}, sleep_microseconds_) + tf.add_input(desc, input_dataset_) + tf.add_input(desc, sleep_microseconds_) + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function experimental_sleep_dataset_eager(input_dataset_, sleep_microseconds_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("ExperimentalSleepDataset") input_dataset_ = convert(tf.EagerTensor, input_dataset_) @@ -43540,13 +43540,13 @@ begin return res[1] end end - function experimental_sleep_dataset(input_dataset_, sleep_microseconds_; name=nothing, output_types=nothing, output_shapes=nothing) - if tf.in_eager_mode() - experimental_sleep_dataset_eager(input_dataset_, sleep_microseconds_; name=name, output_types=output_types, output_shapes=output_shapes) - else - experimental_sleep_dataset_graph(input_dataset_, sleep_microseconds_; name=name, output_types=output_types, output_shapes=output_shapes) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_sleep_dataset(input_dataset_, sleep_microseconds_; name=nothing, output_types=nothing, output_shapes=nothing) + if tf.in_eager_mode() + experimental_sleep_dataset_eager(input_dataset_, sleep_microseconds_; name=name, output_types=output_types, output_shapes=output_shapes) + else + experimental_sleep_dataset_graph(input_dataset_, sleep_microseconds_; name=name, output_types=output_types, output_shapes=output_shapes) + end end - end end @@ -43556,24 +43556,24 @@ end Operator that connects the output of an N-way replicated TPU computation to N separate outputs. """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tpu_replicated_output_graph(input_; name=nothing, num_replicas=nothing) - local desc - tf.with_op_name(name, "TPUReplicatedOutput") do - desc = tf.NodeDescription("TPUReplicatedOutput") - input_ = convert(Tensor{Any}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - if num_replicas !== nothing - desc["num_replicas"] = Base.Int(num_replicas) - end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:num_replicas - push!(out, tf.Tensor(op, out_idx)) + function tpu_replicated_output_graph(input_; name=nothing, num_replicas=nothing) + local desc + tf.with_op_name(name, "TPUReplicatedOutput") do + desc = tf.NodeDescription("TPUReplicatedOutput") + input_ = convert(Tensor{Any}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + if num_replicas !== nothing + desc["num_replicas"] = Base.Int(num_replicas) end - out end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:num_replicas + push!(out, tf.Tensor(op, out_idx)) + end + out + end function tpu_replicated_output_eager(input_; name=nothing, num_replicas=nothing) desc = tf.EagerOp("TPUReplicatedOutput") input_ = convert(tf.EagerTensor, input_) @@ -43589,13 +43589,13 @@ begin return res end end - function tpu_replicated_output(input_; name=nothing, num_replicas=nothing) - if tf.in_eager_mode() - tpu_replicated_output_eager(input_; name=name, num_replicas=num_replicas) - else - tpu_replicated_output_graph(input_; name=name, num_replicas=num_replicas) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tpu_replicated_output(input_; name=nothing, num_replicas=nothing) + if tf.in_eager_mode() + tpu_replicated_output_eager(input_; name=name, num_replicas=num_replicas) + else + tpu_replicated_output_graph(input_; name=name, num_replicas=num_replicas) + end end - end end @@ -43605,21 +43605,21 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function lower_bound_graph(sorted_inputs_, values_; name=nothing, out_type=nothing) - local desc - tf.with_op_name(name, "LowerBound") do - desc = tf.NodeDescription("LowerBound") - sorted_inputs_ = convert(Tensor{Any}, sorted_inputs_) - values_ = convert(Tensor{Any}, values_) - (sorted_inputs_, values_) = tf.tf_promote(sorted_inputs_, values_) - tf.add_input(desc, sorted_inputs_) - tf.add_input(desc, values_) - if out_type !== nothing - desc["out_type"] = Base.identity(out_type) - end + function lower_bound_graph(sorted_inputs_, values_; name=nothing, out_type=nothing) + local desc + tf.with_op_name(name, "LowerBound") do + desc = tf.NodeDescription("LowerBound") + sorted_inputs_ = convert(Tensor{Any}, sorted_inputs_) + values_ = convert(Tensor{Any}, values_) + (sorted_inputs_, values_) = tf.tf_promote(sorted_inputs_, values_) + tf.add_input(desc, sorted_inputs_) + tf.add_input(desc, values_) + if out_type !== nothing + desc["out_type"] = Base.identity(out_type) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function lower_bound_eager(sorted_inputs_, values_; name=nothing, out_type=nothing) desc = tf.EagerOp("LowerBound") sorted_inputs_ = convert(tf.EagerTensor, sorted_inputs_) @@ -43638,13 +43638,13 @@ begin return res[1] end end - function lower_bound(sorted_inputs_, values_; name=nothing, out_type=nothing) - if tf.in_eager_mode() - lower_bound_eager(sorted_inputs_, values_; name=name, out_type=out_type) - else - lower_bound_graph(sorted_inputs_, values_; name=name, out_type=out_type) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function lower_bound(sorted_inputs_, values_; name=nothing, out_type=nothing) + if tf.in_eager_mode() + lower_bound_eager(sorted_inputs_, values_; name=name, out_type=out_type) + else + lower_bound_graph(sorted_inputs_, values_; name=name, out_type=out_type) + end end - end end @@ -43654,16 +43654,16 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tan_graph(x_; name=nothing) - local desc - tf.with_op_name(name, "Tan") do - desc = tf.NodeDescription("Tan") - x_ = convert(Tensor{Any}, x_) - (x_,) = tf.tf_promote(x_) - tf.add_input(desc, x_) - end - tf.Tensor(tf.Operation(desc)) + function tan_graph(x_; name=nothing) + local desc + tf.with_op_name(name, "Tan") do + desc = tf.NodeDescription("Tan") + x_ = convert(Tensor{Any}, x_) + (x_,) = tf.tf_promote(x_) + tf.add_input(desc, x_) end + tf.Tensor(tf.Operation(desc)) + end function tan_eager(x_; name=nothing) desc = tf.EagerOp("Tan") x_ = convert(tf.EagerTensor, x_) @@ -43676,13 +43676,13 @@ begin return res[1] end end - function tan(x_; name=nothing) - if tf.in_eager_mode() - tan_eager(x_; name=name) - else - tan_graph(x_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tan(x_; name=nothing) + if tf.in_eager_mode() + tan_eager(x_; name=name) + else + tan_graph(x_; name=name) + end end - end end @@ -43692,25 +43692,25 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function enter_graph(data_; name=nothing, frame_name=nothing, is_constant=nothing, parallel_iterations=nothing) - local desc - tf.with_op_name(name, "Enter") do - desc = tf.NodeDescription("Enter") - data_ = convert(Tensor{Any}, data_) - (data_,) = tf.tf_promote(data_) - tf.add_input(desc, data_) - if frame_name !== nothing - desc["frame_name"] = Base.String(frame_name) - end - if is_constant !== nothing - desc["is_constant"] = Base.Bool(is_constant) - end - if parallel_iterations !== nothing - desc["parallel_iterations"] = Base.Int(parallel_iterations) - end + function enter_graph(data_; name=nothing, frame_name=nothing, is_constant=nothing, parallel_iterations=nothing) + local desc + tf.with_op_name(name, "Enter") do + desc = tf.NodeDescription("Enter") + data_ = convert(Tensor{Any}, data_) + (data_,) = tf.tf_promote(data_) + tf.add_input(desc, data_) + if frame_name !== nothing + desc["frame_name"] = Base.String(frame_name) + end + if is_constant !== nothing + desc["is_constant"] = Base.Bool(is_constant) + end + if parallel_iterations !== nothing + desc["parallel_iterations"] = Base.Int(parallel_iterations) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function enter_eager(data_; name=nothing, frame_name=nothing, is_constant=nothing, parallel_iterations=nothing) desc = tf.EagerOp("Enter") data_ = convert(tf.EagerTensor, data_) @@ -43732,13 +43732,13 @@ begin return res[1] end end - function enter(data_; name=nothing, frame_name=nothing, is_constant=nothing, parallel_iterations=nothing) - if tf.in_eager_mode() - enter_eager(data_; name=name, frame_name=frame_name, is_constant=is_constant, parallel_iterations=parallel_iterations) - else - enter_graph(data_; name=name, frame_name=frame_name, is_constant=is_constant, parallel_iterations=parallel_iterations) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function enter(data_; name=nothing, frame_name=nothing, is_constant=nothing, parallel_iterations=nothing) + if tf.in_eager_mode() + enter_eager(data_; name=name, frame_name=frame_name, is_constant=is_constant, parallel_iterations=parallel_iterations) + else + enter_graph(data_; name=name, frame_name=frame_name, is_constant=is_constant, parallel_iterations=parallel_iterations) + end end - end end @@ -43748,24 +43748,24 @@ end An op which feeds multiple Tensor values into the computation as an XLA tuple. """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function infeed_enqueue_tuple_graph(inputs_; name=nothing, dtypes=nothing, shapes=nothing, device_ordinal=nothing) - local desc - tf.with_op_name(name, "InfeedEnqueueTuple") do - desc = tf.NodeDescription("InfeedEnqueueTuple") - inputs_ = [convert(Tensor{Any}, x) for x = inputs_] - tf.add_input(desc, inputs_) - if dtypes !== nothing - desc["dtypes"] = map(Base.identity, dtypes) - end - if shapes !== nothing - desc["shapes"] = map(Base.identity, shapes) - end - if device_ordinal !== nothing - desc["device_ordinal"] = Base.Int(device_ordinal) - end - end - tf.Tensor(tf.Operation(desc)) + function infeed_enqueue_tuple_graph(inputs_; name=nothing, dtypes=nothing, shapes=nothing, device_ordinal=nothing) + local desc + tf.with_op_name(name, "InfeedEnqueueTuple") do + desc = tf.NodeDescription("InfeedEnqueueTuple") + inputs_ = [convert(Tensor{Any}, x) for x = inputs_] + tf.add_input(desc, inputs_) + if dtypes !== nothing + desc["dtypes"] = map(Base.identity, dtypes) + end + if shapes !== nothing + desc["shapes"] = map(Base.identity, shapes) + end + if device_ordinal !== nothing + desc["device_ordinal"] = Base.Int(device_ordinal) + end end + tf.Tensor(tf.Operation(desc)) + end function infeed_enqueue_tuple_eager(inputs_; name=nothing, dtypes=nothing, shapes=nothing, device_ordinal=nothing) desc = tf.EagerOp("InfeedEnqueueTuple") inputs_ = convert(tf.EagerTensor, inputs_) @@ -43786,13 +43786,13 @@ begin return res[1] end end - function infeed_enqueue_tuple(inputs_; name=nothing, dtypes=nothing, shapes=nothing, device_ordinal=nothing) - if tf.in_eager_mode() - infeed_enqueue_tuple_eager(inputs_; name=name, dtypes=dtypes, shapes=shapes, device_ordinal=device_ordinal) - else - infeed_enqueue_tuple_graph(inputs_; name=name, dtypes=dtypes, shapes=shapes, device_ordinal=device_ordinal) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function infeed_enqueue_tuple(inputs_; name=nothing, dtypes=nothing, shapes=nothing, device_ordinal=nothing) + if tf.in_eager_mode() + infeed_enqueue_tuple_eager(inputs_; name=name, dtypes=dtypes, shapes=shapes, device_ordinal=device_ordinal) + else + infeed_enqueue_tuple_graph(inputs_; name=name, dtypes=dtypes, shapes=shapes, device_ordinal=device_ordinal) + end end - end end @@ -43802,16 +43802,16 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function square_graph(x_; name=nothing) - local desc - tf.with_op_name(name, "Square") do - desc = tf.NodeDescription("Square") - x_ = convert(Tensor{Any}, x_) - (x_,) = tf.tf_promote(x_) - tf.add_input(desc, x_) - end - tf.Tensor(tf.Operation(desc)) + function square_graph(x_; name=nothing) + local desc + tf.with_op_name(name, "Square") do + desc = tf.NodeDescription("Square") + x_ = convert(Tensor{Any}, x_) + (x_,) = tf.tf_promote(x_) + tf.add_input(desc, x_) end + tf.Tensor(tf.Operation(desc)) + end function square_eager(x_; name=nothing) desc = tf.EagerOp("Square") x_ = convert(tf.EagerTensor, x_) @@ -43824,13 +43824,13 @@ begin return res[1] end end - function square(x_; name=nothing) - if tf.in_eager_mode() - square_eager(x_; name=name) - else - square_graph(x_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function square(x_; name=nothing) + if tf.in_eager_mode() + square_eager(x_; name=name) + else + square_graph(x_; name=name) + end end - end end @@ -43840,15 +43840,15 @@ end An op that informs a host of the global ids of all the of TPUs in the """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function _set_global_tpu_array_graph(topology_; name=nothing) - local desc - tf.with_op_name(name, "_SetGlobalTPUArray") do - desc = tf.NodeDescription("_SetGlobalTPUArray") - topology_ = convert(Tensor{String}, topology_) - tf.add_input(desc, topology_) - end - tf.Tensor(tf.Operation(desc)) + function _set_global_tpu_array_graph(topology_; name=nothing) + local desc + tf.with_op_name(name, "_SetGlobalTPUArray") do + desc = tf.NodeDescription("_SetGlobalTPUArray") + topology_ = convert(Tensor{String}, topology_) + tf.add_input(desc, topology_) end + tf.Tensor(tf.Operation(desc)) + end function _set_global_tpu_array_eager(topology_; name=nothing) desc = tf.EagerOp("_SetGlobalTPUArray") topology_ = convert(tf.EagerTensor, topology_) @@ -43860,13 +43860,13 @@ begin return res[1] end end - function _set_global_tpu_array(topology_; name=nothing) - if tf.in_eager_mode() - _set_global_tpu_array_eager(topology_; name=name) - else - _set_global_tpu_array_graph(topology_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _set_global_tpu_array(topology_; name=nothing) + if tf.in_eager_mode() + _set_global_tpu_array_eager(topology_; name=name) + else + _set_global_tpu_array_graph(topology_; name=name) + end end - end end @@ -43876,16 +43876,16 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function debug_gradient_ref_identity_graph(input_; name=nothing) - local desc - tf.with_op_name(name, "DebugGradientRefIdentity") do - desc = tf.NodeDescription("DebugGradientRefIdentity") - input_ = convert(Tensor{Any}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - end - tf.Tensor(tf.Operation(desc)) + function debug_gradient_ref_identity_graph(input_; name=nothing) + local desc + tf.with_op_name(name, "DebugGradientRefIdentity") do + desc = tf.NodeDescription("DebugGradientRefIdentity") + input_ = convert(Tensor{Any}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) end + tf.Tensor(tf.Operation(desc)) + end function debug_gradient_ref_identity_eager(input_; name=nothing) desc = tf.EagerOp("DebugGradientRefIdentity") input_ = convert(tf.EagerTensor, input_) @@ -43898,13 +43898,13 @@ begin return res[1] end end - function debug_gradient_ref_identity(input_; name=nothing) - if tf.in_eager_mode() - debug_gradient_ref_identity_eager(input_; name=name) - else - debug_gradient_ref_identity_graph(input_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function debug_gradient_ref_identity(input_; name=nothing) + if tf.in_eager_mode() + debug_gradient_ref_identity_eager(input_; name=name) + else + debug_gradient_ref_identity_graph(input_; name=name) + end end - end end @@ -43914,31 +43914,31 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function apply_adadelta_graph(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_; name=nothing, use_locking=nothing) - local desc - tf.with_op_name(name, "ApplyAdadelta") do - desc = tf.NodeDescription("ApplyAdadelta") - var_ = convert(Tensor{Any}, var_) - accum_ = convert(Tensor{Any}, accum_) - accum_update_ = convert(Tensor{Any}, accum_update_) - lr_ = convert(Tensor{Any}, lr_) - rho_ = convert(Tensor{Any}, rho_) - epsilon_ = convert(Tensor{Any}, epsilon_) - grad_ = convert(Tensor{Any}, grad_) - (var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_) = tf.tf_promote(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_) - tf.add_input(desc, var_) - tf.add_input(desc, accum_) - tf.add_input(desc, accum_update_) - tf.add_input(desc, lr_) - tf.add_input(desc, rho_) - tf.add_input(desc, epsilon_) - tf.add_input(desc, grad_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - end - tf.Tensor(tf.Operation(desc)) + function apply_adadelta_graph(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "ApplyAdadelta") do + desc = tf.NodeDescription("ApplyAdadelta") + var_ = convert(Tensor{Any}, var_) + accum_ = convert(Tensor{Any}, accum_) + accum_update_ = convert(Tensor{Any}, accum_update_) + lr_ = convert(Tensor{Any}, lr_) + rho_ = convert(Tensor{Any}, rho_) + epsilon_ = convert(Tensor{Any}, epsilon_) + grad_ = convert(Tensor{Any}, grad_) + (var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_) = tf.tf_promote(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_) + tf.add_input(desc, var_) + tf.add_input(desc, accum_) + tf.add_input(desc, accum_update_) + tf.add_input(desc, lr_) + tf.add_input(desc, rho_) + tf.add_input(desc, epsilon_) + tf.add_input(desc, grad_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end end + tf.Tensor(tf.Operation(desc)) + end function apply_adadelta_eager(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ApplyAdadelta") var_ = convert(tf.EagerTensor, var_) @@ -43972,13 +43972,13 @@ begin return res[1] end end - function apply_adadelta(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_; name=nothing, use_locking=nothing) - if tf.in_eager_mode() - apply_adadelta_eager(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_; name=name, use_locking=use_locking) - else - apply_adadelta_graph(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_; name=name, use_locking=use_locking) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function apply_adadelta(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_; name=nothing, use_locking=nothing) + if tf.in_eager_mode() + apply_adadelta_eager(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_; name=name, use_locking=use_locking) + else + apply_adadelta_graph(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_; name=name, use_locking=use_locking) + end end - end end @@ -43988,45 +43988,45 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function experimental_group_by_window_dataset_graph(input_dataset_, key_func_other_arguments_, reduce_func_other_arguments_, window_size_func_other_arguments_; name=nothing, key_func=nothing, reduce_func=nothing, window_size_func=nothing, Tkey_func_other_arguments=nothing, Treduce_func_other_arguments=nothing, Twindow_size_func_other_arguments=nothing, output_types=nothing, output_shapes=nothing) - local desc - tf.with_op_name(name, "ExperimentalGroupByWindowDataset") do - desc = tf.NodeDescription("ExperimentalGroupByWindowDataset") - input_dataset_ = convert(Tensor{Any}, input_dataset_) - key_func_other_arguments_ = [convert(Tensor{Any}, x) for x = key_func_other_arguments_] - reduce_func_other_arguments_ = [convert(Tensor{Any}, x) for x = reduce_func_other_arguments_] - window_size_func_other_arguments_ = [convert(Tensor{Any}, x) for x = window_size_func_other_arguments_] - tf.add_input(desc, input_dataset_) - tf.add_input(desc, key_func_other_arguments_) - tf.add_input(desc, reduce_func_other_arguments_) - tf.add_input(desc, window_size_func_other_arguments_) - if key_func !== nothing - desc["key_func"] = Base.identity(key_func) - end - if reduce_func !== nothing - desc["reduce_func"] = Base.identity(reduce_func) - end - if window_size_func !== nothing - desc["window_size_func"] = Base.identity(window_size_func) - end - if Tkey_func_other_arguments !== nothing - desc["Tkey_func_other_arguments"] = map(Base.identity, Tkey_func_other_arguments) - end - if Treduce_func_other_arguments !== nothing - desc["Treduce_func_other_arguments"] = map(Base.identity, Treduce_func_other_arguments) - end - if Twindow_size_func_other_arguments !== nothing - desc["Twindow_size_func_other_arguments"] = map(Base.identity, Twindow_size_func_other_arguments) - end - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - end - tf.Tensor(tf.Operation(desc)) + function experimental_group_by_window_dataset_graph(input_dataset_, key_func_other_arguments_, reduce_func_other_arguments_, window_size_func_other_arguments_; name=nothing, key_func=nothing, reduce_func=nothing, window_size_func=nothing, Tkey_func_other_arguments=nothing, Treduce_func_other_arguments=nothing, Twindow_size_func_other_arguments=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "ExperimentalGroupByWindowDataset") do + desc = tf.NodeDescription("ExperimentalGroupByWindowDataset") + input_dataset_ = convert(Tensor{Any}, input_dataset_) + key_func_other_arguments_ = [convert(Tensor{Any}, x) for x = key_func_other_arguments_] + reduce_func_other_arguments_ = [convert(Tensor{Any}, x) for x = reduce_func_other_arguments_] + window_size_func_other_arguments_ = [convert(Tensor{Any}, x) for x = window_size_func_other_arguments_] + tf.add_input(desc, input_dataset_) + tf.add_input(desc, key_func_other_arguments_) + tf.add_input(desc, reduce_func_other_arguments_) + tf.add_input(desc, window_size_func_other_arguments_) + if key_func !== nothing + desc["key_func"] = Base.identity(key_func) + end + if reduce_func !== nothing + desc["reduce_func"] = Base.identity(reduce_func) + end + if window_size_func !== nothing + desc["window_size_func"] = Base.identity(window_size_func) + end + if Tkey_func_other_arguments !== nothing + desc["Tkey_func_other_arguments"] = map(Base.identity, Tkey_func_other_arguments) + end + if Treduce_func_other_arguments !== nothing + desc["Treduce_func_other_arguments"] = map(Base.identity, Treduce_func_other_arguments) + end + if Twindow_size_func_other_arguments !== nothing + desc["Twindow_size_func_other_arguments"] = map(Base.identity, Twindow_size_func_other_arguments) + end + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end end + tf.Tensor(tf.Operation(desc)) + end function experimental_group_by_window_dataset_eager(input_dataset_, key_func_other_arguments_, reduce_func_other_arguments_, window_size_func_other_arguments_; name=nothing, key_func=nothing, reduce_func=nothing, window_size_func=nothing, Tkey_func_other_arguments=nothing, Treduce_func_other_arguments=nothing, Twindow_size_func_other_arguments=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("ExperimentalGroupByWindowDataset") input_dataset_ = convert(tf.EagerTensor, input_dataset_) @@ -44068,13 +44068,13 @@ begin return res[1] end end - function experimental_group_by_window_dataset(input_dataset_, key_func_other_arguments_, reduce_func_other_arguments_, window_size_func_other_arguments_; name=nothing, key_func=nothing, reduce_func=nothing, window_size_func=nothing, Tkey_func_other_arguments=nothing, Treduce_func_other_arguments=nothing, Twindow_size_func_other_arguments=nothing, output_types=nothing, output_shapes=nothing) - if tf.in_eager_mode() - experimental_group_by_window_dataset_eager(input_dataset_, key_func_other_arguments_, reduce_func_other_arguments_, window_size_func_other_arguments_; name=name, key_func=key_func, reduce_func=reduce_func, window_size_func=window_size_func, Tkey_func_other_arguments=Tkey_func_other_arguments, Treduce_func_other_arguments=Treduce_func_other_arguments, Twindow_size_func_other_arguments=Twindow_size_func_other_arguments, output_types=output_types, output_shapes=output_shapes) - else - experimental_group_by_window_dataset_graph(input_dataset_, key_func_other_arguments_, reduce_func_other_arguments_, window_size_func_other_arguments_; name=name, key_func=key_func, reduce_func=reduce_func, window_size_func=window_size_func, Tkey_func_other_arguments=Tkey_func_other_arguments, Treduce_func_other_arguments=Treduce_func_other_arguments, Twindow_size_func_other_arguments=Twindow_size_func_other_arguments, output_types=output_types, output_shapes=output_shapes) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_group_by_window_dataset(input_dataset_, key_func_other_arguments_, reduce_func_other_arguments_, window_size_func_other_arguments_; name=nothing, key_func=nothing, reduce_func=nothing, window_size_func=nothing, Tkey_func_other_arguments=nothing, Treduce_func_other_arguments=nothing, Twindow_size_func_other_arguments=nothing, output_types=nothing, output_shapes=nothing) + if tf.in_eager_mode() + experimental_group_by_window_dataset_eager(input_dataset_, key_func_other_arguments_, reduce_func_other_arguments_, window_size_func_other_arguments_; name=name, key_func=key_func, reduce_func=reduce_func, window_size_func=window_size_func, Tkey_func_other_arguments=Tkey_func_other_arguments, Treduce_func_other_arguments=Treduce_func_other_arguments, Twindow_size_func_other_arguments=Twindow_size_func_other_arguments, output_types=output_types, output_shapes=output_shapes) + else + experimental_group_by_window_dataset_graph(input_dataset_, key_func_other_arguments_, reduce_func_other_arguments_, window_size_func_other_arguments_; name=name, key_func=key_func, reduce_func=reduce_func, window_size_func=window_size_func, Tkey_func_other_arguments=Tkey_func_other_arguments, Treduce_func_other_arguments=Treduce_func_other_arguments, Twindow_size_func_other_arguments=Twindow_size_func_other_arguments, output_types=output_types, output_shapes=output_shapes) + end end - end end @@ -44084,23 +44084,23 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function audio_summary_graph(tag_, tensor_; name=nothing, sample_rate=nothing, max_outputs=nothing) - local desc - tf.with_op_name(name, "AudioSummary") do - desc = tf.NodeDescription("AudioSummary") - tag_ = convert(Tensor{String}, tag_) - tensor_ = convert(Tensor{Float32}, tensor_) - tf.add_input(desc, tag_) - tf.add_input(desc, tensor_) - if sample_rate !== nothing - desc["sample_rate"] = Base.identity(sample_rate) - end - if max_outputs !== nothing - desc["max_outputs"] = Base.Int(max_outputs) - end + function audio_summary_graph(tag_, tensor_; name=nothing, sample_rate=nothing, max_outputs=nothing) + local desc + tf.with_op_name(name, "AudioSummary") do + desc = tf.NodeDescription("AudioSummary") + tag_ = convert(Tensor{String}, tag_) + tensor_ = convert(Tensor{Float32}, tensor_) + tf.add_input(desc, tag_) + tf.add_input(desc, tensor_) + if sample_rate !== nothing + desc["sample_rate"] = Base.identity(sample_rate) + end + if max_outputs !== nothing + desc["max_outputs"] = Base.Int(max_outputs) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function audio_summary_eager(tag_, tensor_; name=nothing, sample_rate=nothing, max_outputs=nothing) desc = tf.EagerOp("AudioSummary") tag_ = convert(tf.EagerTensor, tag_) @@ -44120,13 +44120,13 @@ begin return res[1] end end - function audio_summary(tag_, tensor_; name=nothing, sample_rate=nothing, max_outputs=nothing) - if tf.in_eager_mode() - audio_summary_eager(tag_, tensor_; name=name, sample_rate=sample_rate, max_outputs=max_outputs) - else - audio_summary_graph(tag_, tensor_; name=name, sample_rate=sample_rate, max_outputs=max_outputs) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function audio_summary(tag_, tensor_; name=nothing, sample_rate=nothing, max_outputs=nothing) + if tf.in_eager_mode() + audio_summary_eager(tag_, tensor_; name=name, sample_rate=sample_rate, max_outputs=max_outputs) + else + audio_summary_graph(tag_, tensor_; name=name, sample_rate=sample_rate, max_outputs=max_outputs) + end end - end end @@ -44136,18 +44136,18 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function squared_difference_graph(x_, y_; name=nothing) - local desc - tf.with_op_name(name, "SquaredDifference") do - desc = tf.NodeDescription("SquaredDifference") - x_ = convert(Tensor{Any}, x_) - y_ = convert(Tensor{Any}, y_) - (x_, y_) = tf.tf_promote(x_, y_) - tf.add_input(desc, x_) - tf.add_input(desc, y_) - end - tf.Tensor(tf.Operation(desc)) + function squared_difference_graph(x_, y_; name=nothing) + local desc + tf.with_op_name(name, "SquaredDifference") do + desc = tf.NodeDescription("SquaredDifference") + x_ = convert(Tensor{Any}, x_) + y_ = convert(Tensor{Any}, y_) + (x_, y_) = tf.tf_promote(x_, y_) + tf.add_input(desc, x_) + tf.add_input(desc, y_) end + tf.Tensor(tf.Operation(desc)) + end function squared_difference_eager(x_, y_; name=nothing) desc = tf.EagerOp("SquaredDifference") x_ = convert(tf.EagerTensor, x_) @@ -44163,13 +44163,13 @@ begin return res[1] end end - function squared_difference(x_, y_; name=nothing) - if tf.in_eager_mode() - squared_difference_eager(x_, y_; name=name) - else - squared_difference_graph(x_, y_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function squared_difference(x_, y_; name=nothing) + if tf.in_eager_mode() + squared_difference_eager(x_, y_; name=name) + else + squared_difference_graph(x_, y_; name=name) + end end - end end @@ -44179,25 +44179,25 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function scatter_nd_update_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) - local desc - tf.with_op_name(name, "ScatterNdUpdate") do - desc = tf.NodeDescription("ScatterNdUpdate") - ref_ = convert(Tensor{Any}, ref_) - indices_ = convert(Tensor{Any}, indices_) - indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) - updates_ = convert(Tensor{Any}, updates_) - (ref_, updates_) = tf.tf_promote(ref_, updates_) - (indices_,) = tf.tf_promote(indices_) - tf.add_input(desc, ref_) - tf.add_input(desc, indices_) - tf.add_input(desc, updates_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end + function scatter_nd_update_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "ScatterNdUpdate") do + desc = tf.NodeDescription("ScatterNdUpdate") + ref_ = convert(Tensor{Any}, ref_) + indices_ = convert(Tensor{Any}, indices_) + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + updates_ = convert(Tensor{Any}, updates_) + (ref_, updates_) = tf.tf_promote(ref_, updates_) + (indices_,) = tf.tf_promote(indices_) + tf.add_input(desc, ref_) + tf.add_input(desc, indices_) + tf.add_input(desc, updates_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function scatter_nd_update_eager(ref_, indices_, updates_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ScatterNdUpdate") ref_ = convert(tf.EagerTensor, ref_) @@ -44219,13 +44219,13 @@ begin return res[1] end end - function scatter_nd_update(ref_, indices_, updates_; name=nothing, use_locking=nothing) - if tf.in_eager_mode() - scatter_nd_update_eager(ref_, indices_, updates_; name=name, use_locking=use_locking) - else - scatter_nd_update_graph(ref_, indices_, updates_; name=name, use_locking=use_locking) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function scatter_nd_update(ref_, indices_, updates_; name=nothing, use_locking=nothing) + if tf.in_eager_mode() + scatter_nd_update_eager(ref_, indices_, updates_; name=name, use_locking=use_locking) + else + scatter_nd_update_graph(ref_, indices_, updates_; name=name, use_locking=use_locking) + end end - end end @@ -44235,21 +44235,21 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function dynamic_stitch_graph(indices_, data_; name=nothing, N=nothing) - local desc - tf.with_op_name(name, "DynamicStitch") do - desc = tf.NodeDescription("DynamicStitch") - indices_ = [convert(Tensor{Int32}, x) for x = indices_] - data_ = [convert(Tensor{Any}, x) for x = data_] - (data_,) = tf.tf_promote(data_) - tf.add_input(desc, indices_) - tf.add_input(desc, data_) - if N !== nothing - desc["N"] = Base.Int(N) - end + function dynamic_stitch_graph(indices_, data_; name=nothing, N=nothing) + local desc + tf.with_op_name(name, "DynamicStitch") do + desc = tf.NodeDescription("DynamicStitch") + indices_ = [convert(Tensor{Int32}, x) for x = indices_] + data_ = [convert(Tensor{Any}, x) for x = data_] + (data_,) = tf.tf_promote(data_) + tf.add_input(desc, indices_) + tf.add_input(desc, data_) + if N !== nothing + desc["N"] = Base.Int(N) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function dynamic_stitch_eager(indices_, data_; name=nothing, N=nothing) desc = tf.EagerOp("DynamicStitch") indices_ = convert(tf.EagerTensor, indices_) @@ -44267,13 +44267,13 @@ begin return res[1] end end - function dynamic_stitch(indices_, data_; name=nothing, N=nothing) - if tf.in_eager_mode() - dynamic_stitch_eager(indices_, data_; name=name, N=N) - else - dynamic_stitch_graph(indices_, data_; name=name, N=N) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function dynamic_stitch(indices_, data_; name=nothing, N=nothing) + if tf.in_eager_mode() + dynamic_stitch_eager(indices_, data_; name=name, N=N) + else + dynamic_stitch_graph(indices_, data_; name=name, N=N) + end end - end end @@ -44283,16 +44283,16 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function ones_like_graph(x_; name=nothing) - local desc - tf.with_op_name(name, "OnesLike") do - desc = tf.NodeDescription("OnesLike") - x_ = convert(Tensor{Any}, x_) - (x_,) = tf.tf_promote(x_) - tf.add_input(desc, x_) - end - tf.Tensor(tf.Operation(desc)) + function ones_like_graph(x_; name=nothing) + local desc + tf.with_op_name(name, "OnesLike") do + desc = tf.NodeDescription("OnesLike") + x_ = convert(Tensor{Any}, x_) + (x_,) = tf.tf_promote(x_) + tf.add_input(desc, x_) end + tf.Tensor(tf.Operation(desc)) + end function ones_like_eager(x_; name=nothing) desc = tf.EagerOp("OnesLike") x_ = convert(tf.EagerTensor, x_) @@ -44305,13 +44305,13 @@ begin return res[1] end end - function ones_like(x_; name=nothing) - if tf.in_eager_mode() - ones_like_eager(x_; name=name) - else - ones_like_graph(x_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function ones_like(x_; name=nothing) + if tf.in_eager_mode() + ones_like_eager(x_; name=name) + else + ones_like_graph(x_; name=name) + end end - end end @@ -44321,27 +44321,27 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function fractional_max_pool_grad_graph(orig_input_, orig_output_, out_backprop_, row_pooling_sequence_, col_pooling_sequence_; name=nothing, overlapping=nothing) - local desc - tf.with_op_name(name, "FractionalMaxPoolGrad") do - desc = tf.NodeDescription("FractionalMaxPoolGrad") - orig_input_ = convert(Tensor{Any}, orig_input_) - orig_output_ = convert(Tensor{Any}, orig_output_) - out_backprop_ = convert(Tensor{Any}, out_backprop_) - row_pooling_sequence_ = convert(Tensor{Int64}, row_pooling_sequence_) - col_pooling_sequence_ = convert(Tensor{Int64}, col_pooling_sequence_) - (orig_input_, orig_output_, out_backprop_) = tf.tf_promote(orig_input_, orig_output_, out_backprop_) - tf.add_input(desc, orig_input_) - tf.add_input(desc, orig_output_) - tf.add_input(desc, out_backprop_) - tf.add_input(desc, row_pooling_sequence_) - tf.add_input(desc, col_pooling_sequence_) - if overlapping !== nothing - desc["overlapping"] = Base.Bool(overlapping) - end + function fractional_max_pool_grad_graph(orig_input_, orig_output_, out_backprop_, row_pooling_sequence_, col_pooling_sequence_; name=nothing, overlapping=nothing) + local desc + tf.with_op_name(name, "FractionalMaxPoolGrad") do + desc = tf.NodeDescription("FractionalMaxPoolGrad") + orig_input_ = convert(Tensor{Any}, orig_input_) + orig_output_ = convert(Tensor{Any}, orig_output_) + out_backprop_ = convert(Tensor{Any}, out_backprop_) + row_pooling_sequence_ = convert(Tensor{Int64}, row_pooling_sequence_) + col_pooling_sequence_ = convert(Tensor{Int64}, col_pooling_sequence_) + (orig_input_, orig_output_, out_backprop_) = tf.tf_promote(orig_input_, orig_output_, out_backprop_) + tf.add_input(desc, orig_input_) + tf.add_input(desc, orig_output_) + tf.add_input(desc, out_backprop_) + tf.add_input(desc, row_pooling_sequence_) + tf.add_input(desc, col_pooling_sequence_) + if overlapping !== nothing + desc["overlapping"] = Base.Bool(overlapping) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function fractional_max_pool_grad_eager(orig_input_, orig_output_, out_backprop_, row_pooling_sequence_, col_pooling_sequence_; name=nothing, overlapping=nothing) desc = tf.EagerOp("FractionalMaxPoolGrad") orig_input_ = convert(tf.EagerTensor, orig_input_) @@ -44367,13 +44367,13 @@ begin return res[1] end end - function fractional_max_pool_grad(orig_input_, orig_output_, out_backprop_, row_pooling_sequence_, col_pooling_sequence_; name=nothing, overlapping=nothing) - if tf.in_eager_mode() - fractional_max_pool_grad_eager(orig_input_, orig_output_, out_backprop_, row_pooling_sequence_, col_pooling_sequence_; name=name, overlapping=overlapping) - else - fractional_max_pool_grad_graph(orig_input_, orig_output_, out_backprop_, row_pooling_sequence_, col_pooling_sequence_; name=name, overlapping=overlapping) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function fractional_max_pool_grad(orig_input_, orig_output_, out_backprop_, row_pooling_sequence_, col_pooling_sequence_; name=nothing, overlapping=nothing) + if tf.in_eager_mode() + fractional_max_pool_grad_eager(orig_input_, orig_output_, out_backprop_, row_pooling_sequence_, col_pooling_sequence_; name=name, overlapping=overlapping) + else + fractional_max_pool_grad_graph(orig_input_, orig_output_, out_backprop_, row_pooling_sequence_, col_pooling_sequence_; name=name, overlapping=overlapping) + end end - end end @@ -44383,26 +44383,26 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function remote_call_graph(target_, args_; name=nothing, Tin=nothing, Tout=nothing, f=nothing) - local desc - tf.with_op_name(name, "RemoteCall") do - desc = tf.NodeDescription("RemoteCall") - target_ = convert(Tensor{String}, target_) - args_ = [convert(Tensor{Any}, x) for x = args_] - tf.add_input(desc, target_) - tf.add_input(desc, args_) - if Tin !== nothing - desc["Tin"] = map(Base.identity, Tin) - end - if Tout !== nothing - desc["Tout"] = map(Base.identity, Tout) - end - if f !== nothing - desc["f"] = Base.identity(f) - end + function remote_call_graph(target_, args_; name=nothing, Tin=nothing, Tout=nothing, f=nothing) + local desc + tf.with_op_name(name, "RemoteCall") do + desc = tf.NodeDescription("RemoteCall") + target_ = convert(Tensor{String}, target_) + args_ = [convert(Tensor{Any}, x) for x = args_] + tf.add_input(desc, target_) + tf.add_input(desc, args_) + if Tin !== nothing + desc["Tin"] = map(Base.identity, Tin) + end + if Tout !== nothing + desc["Tout"] = map(Base.identity, Tout) + end + if f !== nothing + desc["f"] = Base.identity(f) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function remote_call_eager(target_, args_; name=nothing, Tin=nothing, Tout=nothing, f=nothing) desc = tf.EagerOp("RemoteCall") target_ = convert(tf.EagerTensor, target_) @@ -44425,13 +44425,13 @@ begin return res[1] end end - function remote_call(target_, args_; name=nothing, Tin=nothing, Tout=nothing, f=nothing) - if tf.in_eager_mode() - remote_call_eager(target_, args_; name=name, Tin=Tin, Tout=Tout, f=f) - else - remote_call_graph(target_, args_; name=name, Tin=Tin, Tout=Tout, f=f) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function remote_call(target_, args_; name=nothing, Tin=nothing, Tout=nothing, f=nothing) + if tf.in_eager_mode() + remote_call_eager(target_, args_; name=name, Tin=Tin, Tout=Tout, f=f) + else + remote_call_graph(target_, args_; name=name, Tin=Tin, Tout=Tout, f=f) + end end - end end @@ -44441,23 +44441,23 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function gather_graph(params_, indices_; name=nothing, validate_indices=nothing) - local desc - tf.with_op_name(name, "Gather") do - desc = tf.NodeDescription("Gather") - params_ = convert(Tensor{Any}, params_) - indices_ = convert(Tensor{Any}, indices_) - indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) - (params_,) = tf.tf_promote(params_) - (indices_,) = tf.tf_promote(indices_) - tf.add_input(desc, params_) - tf.add_input(desc, indices_) - if validate_indices !== nothing - desc["validate_indices"] = Base.Bool(validate_indices) - end + function gather_graph(params_, indices_; name=nothing, validate_indices=nothing) + local desc + tf.with_op_name(name, "Gather") do + desc = tf.NodeDescription("Gather") + params_ = convert(Tensor{Any}, params_) + indices_ = convert(Tensor{Any}, indices_) + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + (params_,) = tf.tf_promote(params_) + (indices_,) = tf.tf_promote(indices_) + tf.add_input(desc, params_) + tf.add_input(desc, indices_) + if validate_indices !== nothing + desc["validate_indices"] = Base.Bool(validate_indices) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function gather_eager(params_, indices_; name=nothing, validate_indices=nothing) desc = tf.EagerOp("Gather") params_ = convert(tf.EagerTensor, params_) @@ -44476,13 +44476,13 @@ begin return res[1] end end - function gather(params_, indices_; name=nothing, validate_indices=nothing) - if tf.in_eager_mode() - gather_eager(params_, indices_; name=name, validate_indices=validate_indices) - else - gather_graph(params_, indices_; name=name, validate_indices=validate_indices) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function gather(params_, indices_; name=nothing, validate_indices=nothing) + if tf.in_eager_mode() + gather_eager(params_, indices_; name=name, validate_indices=validate_indices) + else + gather_graph(params_, indices_; name=name, validate_indices=validate_indices) + end end - end end @@ -44492,38 +44492,38 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function quantized_mat_mul_graph(a_, b_, min_a_, max_a_, min_b_, max_b_; name=nothing, transpose_a=nothing, transpose_b=nothing) - local desc - tf.with_op_name(name, "QuantizedMatMul") do - desc = tf.NodeDescription("QuantizedMatMul") - a_ = convert(Tensor{Any}, a_) - b_ = convert(Tensor{Any}, b_) - min_a_ = convert(Tensor{Float32}, min_a_) - max_a_ = convert(Tensor{Float32}, max_a_) - min_b_ = convert(Tensor{Float32}, min_b_) - max_b_ = convert(Tensor{Float32}, max_b_) - (a_,) = tf.tf_promote(a_) - (b_,) = tf.tf_promote(b_) - tf.add_input(desc, a_) - tf.add_input(desc, b_) - tf.add_input(desc, min_a_) - tf.add_input(desc, max_a_) - tf.add_input(desc, min_b_) - tf.add_input(desc, max_b_) - if transpose_a !== nothing - desc["transpose_a"] = Base.Bool(transpose_a) - end - if transpose_b !== nothing - desc["transpose_b"] = Base.Bool(transpose_b) - end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:3 - push!(out, tf.Tensor(op, out_idx)) - end - out + function quantized_mat_mul_graph(a_, b_, min_a_, max_a_, min_b_, max_b_; name=nothing, transpose_a=nothing, transpose_b=nothing) + local desc + tf.with_op_name(name, "QuantizedMatMul") do + desc = tf.NodeDescription("QuantizedMatMul") + a_ = convert(Tensor{Any}, a_) + b_ = convert(Tensor{Any}, b_) + min_a_ = convert(Tensor{Float32}, min_a_) + max_a_ = convert(Tensor{Float32}, max_a_) + min_b_ = convert(Tensor{Float32}, min_b_) + max_b_ = convert(Tensor{Float32}, max_b_) + (a_,) = tf.tf_promote(a_) + (b_,) = tf.tf_promote(b_) + tf.add_input(desc, a_) + tf.add_input(desc, b_) + tf.add_input(desc, min_a_) + tf.add_input(desc, max_a_) + tf.add_input(desc, min_b_) + tf.add_input(desc, max_b_) + if transpose_a !== nothing + desc["transpose_a"] = Base.Bool(transpose_a) + end + if transpose_b !== nothing + desc["transpose_b"] = Base.Bool(transpose_b) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) end + out + end function quantized_mat_mul_eager(a_, b_, min_a_, max_a_, min_b_, max_b_; name=nothing, transpose_a=nothing, transpose_b=nothing) desc = tf.EagerOp("QuantizedMatMul") a_ = convert(tf.EagerTensor, a_) @@ -44553,48 +44553,48 @@ begin return res end end - function quantized_mat_mul(a_, b_, min_a_, max_a_, min_b_, max_b_; name=nothing, transpose_a=nothing, transpose_b=nothing) - if tf.in_eager_mode() - quantized_mat_mul_eager(a_, b_, min_a_, max_a_, min_b_, max_b_; name=name, transpose_a=transpose_a, transpose_b=transpose_b) - else - quantized_mat_mul_graph(a_, b_, min_a_, max_a_, min_b_, max_b_; name=name, transpose_a=transpose_a, transpose_b=transpose_b) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function quantized_mat_mul(a_, b_, min_a_, max_a_, min_b_, max_b_; name=nothing, transpose_a=nothing, transpose_b=nothing) + if tf.in_eager_mode() + quantized_mat_mul_eager(a_, b_, min_a_, max_a_, min_b_, max_b_; name=name, transpose_a=transpose_a, transpose_b=transpose_b) + else + quantized_mat_mul_graph(a_, b_, min_a_, max_a_, min_b_, max_b_; name=name, transpose_a=transpose_a, transpose_b=transpose_b) + end end - end end """ - unicode_decode_with_offsets(input; errors=replace, replacement_char=65533, replace_control_characters=false) + unicode_decode_with_offsets(input; errors=, replacement_char=65533, replace_control_characters=false) """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function unicode_decode_with_offsets_graph(input_; name=nothing, input_encoding=nothing, errors=nothing, replacement_char=nothing, replace_control_characters=nothing) - local desc - tf.with_op_name(name, "UnicodeDecodeWithOffsets") do - desc = tf.NodeDescription("UnicodeDecodeWithOffsets") - input_ = convert(Tensor{String}, input_) - tf.add_input(desc, input_) - if input_encoding !== nothing - desc["input_encoding"] = Base.String(input_encoding) - end - if errors !== nothing - desc["errors"] = Base.String(errors) - end - if replacement_char !== nothing - desc["replacement_char"] = Base.Int(replacement_char) - end - if replace_control_characters !== nothing - desc["replace_control_characters"] = Base.Bool(replace_control_characters) - end + function unicode_decode_with_offsets_graph(input_; name=nothing, input_encoding=nothing, errors=nothing, replacement_char=nothing, replace_control_characters=nothing) + local desc + tf.with_op_name(name, "UnicodeDecodeWithOffsets") do + desc = tf.NodeDescription("UnicodeDecodeWithOffsets") + input_ = convert(Tensor{String}, input_) + tf.add_input(desc, input_) + if input_encoding !== nothing + desc["input_encoding"] = Base.String(input_encoding) + end + if errors !== nothing + desc["errors"] = Base.String(errors) end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:3 - push!(out, tf.Tensor(op, out_idx)) + if replacement_char !== nothing + desc["replacement_char"] = Base.Int(replacement_char) end - out + if replace_control_characters !== nothing + desc["replace_control_characters"] = Base.Bool(replace_control_characters) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) end + out + end function unicode_decode_with_offsets_eager(input_; name=nothing, input_encoding=nothing, errors=nothing, replacement_char=nothing, replace_control_characters=nothing) desc = tf.EagerOp("UnicodeDecodeWithOffsets") input_ = convert(tf.EagerTensor, input_) @@ -44618,13 +44618,13 @@ begin return res end end - function unicode_decode_with_offsets(input_; name=nothing, input_encoding=nothing, errors=nothing, replacement_char=nothing, replace_control_characters=nothing) - if tf.in_eager_mode() - unicode_decode_with_offsets_eager(input_; name=name, input_encoding=input_encoding, errors=errors, replacement_char=replacement_char, replace_control_characters=replace_control_characters) - else - unicode_decode_with_offsets_graph(input_; name=name, input_encoding=input_encoding, errors=errors, replacement_char=replacement_char, replace_control_characters=replace_control_characters) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function unicode_decode_with_offsets(input_; name=nothing, input_encoding=nothing, errors=nothing, replacement_char=nothing, replace_control_characters=nothing) + if tf.in_eager_mode() + unicode_decode_with_offsets_eager(input_; name=name, input_encoding=input_encoding, errors=errors, replacement_char=replacement_char, replace_control_characters=replace_control_characters) + else + unicode_decode_with_offsets_graph(input_; name=name, input_encoding=input_encoding, errors=errors, replacement_char=replacement_char, replace_control_characters=replace_control_characters) + end end - end end @@ -44634,23 +44634,23 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function accumulator_apply_gradient_graph(handle_, local_step_, gradient_; name=nothing, dtype=nothing) - local desc - tf.with_op_name(name, "AccumulatorApplyGradient") do - desc = tf.NodeDescription("AccumulatorApplyGradient") - handle_ = convert(Tensor{String}, handle_) - local_step_ = convert(Tensor{Int64}, local_step_) - gradient_ = convert(Tensor{Any}, gradient_) - (gradient_,) = tf.tf_promote(gradient_) - tf.add_input(desc, handle_) - tf.add_input(desc, local_step_) - tf.add_input(desc, gradient_) - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end + function accumulator_apply_gradient_graph(handle_, local_step_, gradient_; name=nothing, dtype=nothing) + local desc + tf.with_op_name(name, "AccumulatorApplyGradient") do + desc = tf.NodeDescription("AccumulatorApplyGradient") + handle_ = convert(Tensor{String}, handle_) + local_step_ = convert(Tensor{Int64}, local_step_) + gradient_ = convert(Tensor{Any}, gradient_) + (gradient_,) = tf.tf_promote(gradient_) + tf.add_input(desc, handle_) + tf.add_input(desc, local_step_) + tf.add_input(desc, gradient_) + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function accumulator_apply_gradient_eager(handle_, local_step_, gradient_; name=nothing, dtype=nothing) desc = tf.EagerOp("AccumulatorApplyGradient") handle_ = convert(tf.EagerTensor, handle_) @@ -44670,13 +44670,13 @@ begin return res[1] end end - function accumulator_apply_gradient(handle_, local_step_, gradient_; name=nothing, dtype=nothing) - if tf.in_eager_mode() - accumulator_apply_gradient_eager(handle_, local_step_, gradient_; name=name, dtype=dtype) - else - accumulator_apply_gradient_graph(handle_, local_step_, gradient_; name=name, dtype=dtype) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function accumulator_apply_gradient(handle_, local_step_, gradient_; name=nothing, dtype=nothing) + if tf.in_eager_mode() + accumulator_apply_gradient_eager(handle_, local_step_, gradient_; name=name, dtype=dtype) + else + accumulator_apply_gradient_graph(handle_, local_step_, gradient_; name=name, dtype=dtype) + end end - end end @@ -44686,33 +44686,33 @@ end This Op eases the porting of code that uses tf.nn.embedding_lookup_sparse(). """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function enqueue_tpu_embedding_sparse_tensor_batch_graph(sample_indices_, embedding_indices_, aggregation_weights_, mode_override_; name=nothing, N=nothing, device_ordinal=nothing, combiners=nothing, table_ids=nothing) - local desc - tf.with_op_name(name, "EnqueueTPUEmbeddingSparseTensorBatch") do - desc = tf.NodeDescription("EnqueueTPUEmbeddingSparseTensorBatch") - sample_indices_ = [convert(Tensor{Int32}, x) for x = sample_indices_] - embedding_indices_ = [convert(Tensor{Int32}, x) for x = embedding_indices_] - aggregation_weights_ = [convert(Tensor{Float32}, x) for x = aggregation_weights_] - mode_override_ = convert(Tensor{String}, mode_override_) - tf.add_input(desc, sample_indices_) - tf.add_input(desc, embedding_indices_) - tf.add_input(desc, aggregation_weights_) - tf.add_input(desc, mode_override_) - if N !== nothing - desc["N"] = Base.Int(N) - end - if device_ordinal !== nothing - desc["device_ordinal"] = Base.Int(device_ordinal) - end - if combiners !== nothing - desc["combiners"] = map(Base.identity, combiners) - end - if table_ids !== nothing - desc["table_ids"] = map(Base.identity, table_ids) - end - end - tf.Tensor(tf.Operation(desc)) + function enqueue_tpu_embedding_sparse_tensor_batch_graph(sample_indices_, embedding_indices_, aggregation_weights_, mode_override_; name=nothing, N=nothing, device_ordinal=nothing, combiners=nothing, table_ids=nothing) + local desc + tf.with_op_name(name, "EnqueueTPUEmbeddingSparseTensorBatch") do + desc = tf.NodeDescription("EnqueueTPUEmbeddingSparseTensorBatch") + sample_indices_ = [convert(Tensor{Int32}, x) for x = sample_indices_] + embedding_indices_ = [convert(Tensor{Int32}, x) for x = embedding_indices_] + aggregation_weights_ = [convert(Tensor{Float32}, x) for x = aggregation_weights_] + mode_override_ = convert(Tensor{String}, mode_override_) + tf.add_input(desc, sample_indices_) + tf.add_input(desc, embedding_indices_) + tf.add_input(desc, aggregation_weights_) + tf.add_input(desc, mode_override_) + if N !== nothing + desc["N"] = Base.Int(N) + end + if device_ordinal !== nothing + desc["device_ordinal"] = Base.Int(device_ordinal) + end + if combiners !== nothing + desc["combiners"] = map(Base.identity, combiners) + end + if table_ids !== nothing + desc["table_ids"] = map(Base.identity, table_ids) + end end + tf.Tensor(tf.Operation(desc)) + end function enqueue_tpu_embedding_sparse_tensor_batch_eager(sample_indices_, embedding_indices_, aggregation_weights_, mode_override_; name=nothing, N=nothing, device_ordinal=nothing, combiners=nothing, table_ids=nothing) desc = tf.EagerOp("EnqueueTPUEmbeddingSparseTensorBatch") sample_indices_ = convert(tf.EagerTensor, sample_indices_) @@ -44742,13 +44742,13 @@ begin return res[1] end end - function enqueue_tpu_embedding_sparse_tensor_batch(sample_indices_, embedding_indices_, aggregation_weights_, mode_override_; name=nothing, N=nothing, device_ordinal=nothing, combiners=nothing, table_ids=nothing) - if tf.in_eager_mode() - enqueue_tpu_embedding_sparse_tensor_batch_eager(sample_indices_, embedding_indices_, aggregation_weights_, mode_override_; name=name, N=N, device_ordinal=device_ordinal, combiners=combiners, table_ids=table_ids) - else - enqueue_tpu_embedding_sparse_tensor_batch_graph(sample_indices_, embedding_indices_, aggregation_weights_, mode_override_; name=name, N=N, device_ordinal=device_ordinal, combiners=combiners, table_ids=table_ids) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function enqueue_tpu_embedding_sparse_tensor_batch(sample_indices_, embedding_indices_, aggregation_weights_, mode_override_; name=nothing, N=nothing, device_ordinal=nothing, combiners=nothing, table_ids=nothing) + if tf.in_eager_mode() + enqueue_tpu_embedding_sparse_tensor_batch_eager(sample_indices_, embedding_indices_, aggregation_weights_, mode_override_; name=name, N=N, device_ordinal=device_ordinal, combiners=combiners, table_ids=table_ids) + else + enqueue_tpu_embedding_sparse_tensor_batch_graph(sample_indices_, embedding_indices_, aggregation_weights_, mode_override_; name=name, N=N, device_ordinal=device_ordinal, combiners=combiners, table_ids=table_ids) + end end - end end @@ -44758,24 +44758,24 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function write_summary_graph(writer_, step_, tensor_, tag_, summary_metadata_; name=nothing) - local desc - tf.with_op_name(name, "WriteSummary") do - desc = tf.NodeDescription("WriteSummary") - writer_ = convert(Tensor{Any}, writer_) - step_ = convert(Tensor{Int64}, step_) - tensor_ = convert(Tensor{Any}, tensor_) - tag_ = convert(Tensor{String}, tag_) - summary_metadata_ = convert(Tensor{String}, summary_metadata_) - (tensor_,) = tf.tf_promote(tensor_) - tf.add_input(desc, writer_) - tf.add_input(desc, step_) - tf.add_input(desc, tensor_) - tf.add_input(desc, tag_) - tf.add_input(desc, summary_metadata_) - end - tf.Tensor(tf.Operation(desc)) + function write_summary_graph(writer_, step_, tensor_, tag_, summary_metadata_; name=nothing) + local desc + tf.with_op_name(name, "WriteSummary") do + desc = tf.NodeDescription("WriteSummary") + writer_ = convert(Tensor{Any}, writer_) + step_ = convert(Tensor{Int64}, step_) + tensor_ = convert(Tensor{Any}, tensor_) + tag_ = convert(Tensor{String}, tag_) + summary_metadata_ = convert(Tensor{String}, summary_metadata_) + (tensor_,) = tf.tf_promote(tensor_) + tf.add_input(desc, writer_) + tf.add_input(desc, step_) + tf.add_input(desc, tensor_) + tf.add_input(desc, tag_) + tf.add_input(desc, summary_metadata_) end + tf.Tensor(tf.Operation(desc)) + end function write_summary_eager(writer_, step_, tensor_, tag_, summary_metadata_; name=nothing) desc = tf.EagerOp("WriteSummary") writer_ = convert(tf.EagerTensor, writer_) @@ -44796,13 +44796,13 @@ begin return res[1] end end - function write_summary(writer_, step_, tensor_, tag_, summary_metadata_; name=nothing) - if tf.in_eager_mode() - write_summary_eager(writer_, step_, tensor_, tag_, summary_metadata_; name=name) - else - write_summary_graph(writer_, step_, tensor_, tag_, summary_metadata_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function write_summary(writer_, step_, tensor_, tag_, summary_metadata_; name=nothing) + if tf.in_eager_mode() + write_summary_eager(writer_, step_, tensor_, tag_, summary_metadata_; name=name) + else + write_summary_graph(writer_, step_, tensor_, tag_, summary_metadata_; name=name) + end end - end end @@ -44812,44 +44812,44 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function quantized_conv2d_graph(input_, filter_, min_input_, max_input_, min_filter_, max_filter_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) - local desc - tf.with_op_name(name, "QuantizedConv2D") do - desc = tf.NodeDescription("QuantizedConv2D") - input_ = convert(Tensor{Any}, input_) - filter_ = convert(Tensor{Any}, filter_) - min_input_ = convert(Tensor{Float32}, min_input_) - max_input_ = convert(Tensor{Float32}, max_input_) - min_filter_ = convert(Tensor{Float32}, min_filter_) - max_filter_ = convert(Tensor{Float32}, max_filter_) - (filter_,) = tf.tf_promote(filter_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - tf.add_input(desc, filter_) - tf.add_input(desc, min_input_) - tf.add_input(desc, max_input_) - tf.add_input(desc, min_filter_) - tf.add_input(desc, max_filter_) - if out_type !== nothing - desc["out_type"] = Base.identity(out_type) - end - if strides !== nothing - desc["strides"] = map(Base.identity, strides) - end - if padding !== nothing - desc["padding"] = Base.String(padding) - end - if dilations !== nothing - desc["dilations"] = map(Base.identity, dilations) - end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:3 - push!(out, tf.Tensor(op, out_idx)) - end - out + function quantized_conv2d_graph(input_, filter_, min_input_, max_input_, min_filter_, max_filter_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) + local desc + tf.with_op_name(name, "QuantizedConv2D") do + desc = tf.NodeDescription("QuantizedConv2D") + input_ = convert(Tensor{Any}, input_) + filter_ = convert(Tensor{Any}, filter_) + min_input_ = convert(Tensor{Float32}, min_input_) + max_input_ = convert(Tensor{Float32}, max_input_) + min_filter_ = convert(Tensor{Float32}, min_filter_) + max_filter_ = convert(Tensor{Float32}, max_filter_) + (filter_,) = tf.tf_promote(filter_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + tf.add_input(desc, filter_) + tf.add_input(desc, min_input_) + tf.add_input(desc, max_input_) + tf.add_input(desc, min_filter_) + tf.add_input(desc, max_filter_) + if out_type !== nothing + desc["out_type"] = Base.identity(out_type) + end + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + if padding !== nothing + desc["padding"] = Base.String(padding) + end + if dilations !== nothing + desc["dilations"] = map(Base.identity, dilations) + end end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end function quantized_conv2d_eager(input_, filter_, min_input_, max_input_, min_filter_, max_filter_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) desc = tf.EagerOp("QuantizedConv2D") input_ = convert(tf.EagerTensor, input_) @@ -44885,13 +44885,13 @@ begin return res end end - function quantized_conv2d(input_, filter_, min_input_, max_input_, min_filter_, max_filter_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) - if tf.in_eager_mode() - quantized_conv2d_eager(input_, filter_, min_input_, max_input_, min_filter_, max_filter_; name=name, out_type=out_type, strides=strides, padding=padding, dilations=dilations) - else - quantized_conv2d_graph(input_, filter_, min_input_, max_input_, min_filter_, max_filter_; name=name, out_type=out_type, strides=strides, padding=padding, dilations=dilations) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function quantized_conv2d(input_, filter_, min_input_, max_input_, min_filter_, max_filter_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) + if tf.in_eager_mode() + quantized_conv2d_eager(input_, filter_, min_input_, max_input_, min_filter_, max_filter_; name=name, out_type=out_type, strides=strides, padding=padding, dilations=dilations) + else + quantized_conv2d_graph(input_, filter_, min_input_, max_input_, min_filter_, max_filter_; name=name, out_type=out_type, strides=strides, padding=padding, dilations=dilations) + end end - end end @@ -44901,30 +44901,30 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function resource_apply_momentum_graph(var_, accum_, lr_, grad_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) - local desc - tf.with_op_name(name, "ResourceApplyMomentum") do - desc = tf.NodeDescription("ResourceApplyMomentum") - var_ = convert(Tensor{Any}, var_) - accum_ = convert(Tensor{Any}, accum_) - lr_ = convert(Tensor{Any}, lr_) - grad_ = convert(Tensor{Any}, grad_) - momentum_ = convert(Tensor{Any}, momentum_) - (lr_, grad_, momentum_) = tf.tf_promote(lr_, grad_, momentum_) - tf.add_input(desc, var_) - tf.add_input(desc, accum_) - tf.add_input(desc, lr_) - tf.add_input(desc, grad_) - tf.add_input(desc, momentum_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - if use_nesterov !== nothing - desc["use_nesterov"] = Base.Bool(use_nesterov) - end - end - tf.Tensor(tf.Operation(desc)) + function resource_apply_momentum_graph(var_, accum_, lr_, grad_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) + local desc + tf.with_op_name(name, "ResourceApplyMomentum") do + desc = tf.NodeDescription("ResourceApplyMomentum") + var_ = convert(Tensor{Any}, var_) + accum_ = convert(Tensor{Any}, accum_) + lr_ = convert(Tensor{Any}, lr_) + grad_ = convert(Tensor{Any}, grad_) + momentum_ = convert(Tensor{Any}, momentum_) + (lr_, grad_, momentum_) = tf.tf_promote(lr_, grad_, momentum_) + tf.add_input(desc, var_) + tf.add_input(desc, accum_) + tf.add_input(desc, lr_) + tf.add_input(desc, grad_) + tf.add_input(desc, momentum_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + if use_nesterov !== nothing + desc["use_nesterov"] = Base.Bool(use_nesterov) + end end + tf.Tensor(tf.Operation(desc)) + end function resource_apply_momentum_eager(var_, accum_, lr_, grad_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) desc = tf.EagerOp("ResourceApplyMomentum") var_ = convert(tf.EagerTensor, var_) @@ -44953,13 +44953,13 @@ begin return res[1] end end - function resource_apply_momentum(var_, accum_, lr_, grad_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) - if tf.in_eager_mode() - resource_apply_momentum_eager(var_, accum_, lr_, grad_, momentum_; name=name, use_locking=use_locking, use_nesterov=use_nesterov) - else - resource_apply_momentum_graph(var_, accum_, lr_, grad_, momentum_; name=name, use_locking=use_locking, use_nesterov=use_nesterov) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_apply_momentum(var_, accum_, lr_, grad_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) + if tf.in_eager_mode() + resource_apply_momentum_eager(var_, accum_, lr_, grad_, momentum_; name=name, use_locking=use_locking, use_nesterov=use_nesterov) + else + resource_apply_momentum_graph(var_, accum_, lr_, grad_, momentum_; name=name, use_locking=use_locking, use_nesterov=use_nesterov) + end end - end end @@ -44969,16 +44969,16 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function log1p_graph(x_; name=nothing) - local desc - tf.with_op_name(name, "Log1p") do - desc = tf.NodeDescription("Log1p") - x_ = convert(Tensor{Any}, x_) - (x_,) = tf.tf_promote(x_) - tf.add_input(desc, x_) - end - tf.Tensor(tf.Operation(desc)) + function log1p_graph(x_; name=nothing) + local desc + tf.with_op_name(name, "Log1p") do + desc = tf.NodeDescription("Log1p") + x_ = convert(Tensor{Any}, x_) + (x_,) = tf.tf_promote(x_) + tf.add_input(desc, x_) end + tf.Tensor(tf.Operation(desc)) + end function log1p_eager(x_; name=nothing) desc = tf.EagerOp("Log1p") x_ = convert(tf.EagerTensor, x_) @@ -44991,13 +44991,13 @@ begin return res[1] end end - function log1p(x_; name=nothing) - if tf.in_eager_mode() - log1p_eager(x_; name=name) - else - log1p_graph(x_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function log1p(x_; name=nothing) + if tf.in_eager_mode() + log1p_eager(x_; name=name) + else + log1p_graph(x_; name=name) + end end - end end @@ -45007,28 +45007,28 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function ordered_map_clear_graph(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) - local desc - tf.with_op_name(name, "OrderedMapClear") do - desc = tf.NodeDescription("OrderedMapClear") - if capacity !== nothing - desc["capacity"] = Base.Int(capacity) - end - if memory_limit !== nothing - desc["memory_limit"] = Base.Int(memory_limit) - end - if dtypes !== nothing - desc["dtypes"] = map(Base.identity, dtypes) - end - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end + function ordered_map_clear_graph(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "OrderedMapClear") do + desc = tf.NodeDescription("OrderedMapClear") + if capacity !== nothing + desc["capacity"] = Base.Int(capacity) + end + if memory_limit !== nothing + desc["memory_limit"] = Base.Int(memory_limit) + end + if dtypes !== nothing + desc["dtypes"] = map(Base.identity, dtypes) + end + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function ordered_map_clear_eager(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) desc = tf.EagerOp("OrderedMapClear") if capacity !== nothing @@ -45053,13 +45053,13 @@ begin return res[1] end end - function ordered_map_clear(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) - if tf.in_eager_mode() - ordered_map_clear_eager(; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) - else - ordered_map_clear_graph(; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function ordered_map_clear(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + if tf.in_eager_mode() + ordered_map_clear_eager(; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) + else + ordered_map_clear_graph(; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) + end end - end end @@ -45069,25 +45069,25 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function resource_scatter_update_graph(resource_, indices_, updates_; name=nothing, dtype=nothing) - local desc - tf.with_op_name(name, "ResourceScatterUpdate") do - desc = tf.NodeDescription("ResourceScatterUpdate") - resource_ = convert(Tensor{Any}, resource_) - indices_ = convert(Tensor{Any}, indices_) - indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) - updates_ = convert(Tensor{Any}, updates_) - (updates_,) = tf.tf_promote(updates_) - (indices_,) = tf.tf_promote(indices_) - tf.add_input(desc, resource_) - tf.add_input(desc, indices_) - tf.add_input(desc, updates_) - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end + function resource_scatter_update_graph(resource_, indices_, updates_; name=nothing, dtype=nothing) + local desc + tf.with_op_name(name, "ResourceScatterUpdate") do + desc = tf.NodeDescription("ResourceScatterUpdate") + resource_ = convert(Tensor{Any}, resource_) + indices_ = convert(Tensor{Any}, indices_) + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + updates_ = convert(Tensor{Any}, updates_) + (updates_,) = tf.tf_promote(updates_) + (indices_,) = tf.tf_promote(indices_) + tf.add_input(desc, resource_) + tf.add_input(desc, indices_) + tf.add_input(desc, updates_) + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function resource_scatter_update_eager(resource_, indices_, updates_; name=nothing, dtype=nothing) desc = tf.EagerOp("ResourceScatterUpdate") resource_ = convert(tf.EagerTensor, resource_) @@ -45108,13 +45108,13 @@ begin return res[1] end end - function resource_scatter_update(resource_, indices_, updates_; name=nothing, dtype=nothing) - if tf.in_eager_mode() - resource_scatter_update_eager(resource_, indices_, updates_; name=name, dtype=dtype) - else - resource_scatter_update_graph(resource_, indices_, updates_; name=name, dtype=dtype) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_scatter_update(resource_, indices_, updates_; name=nothing, dtype=nothing) + if tf.in_eager_mode() + resource_scatter_update_eager(resource_, indices_, updates_; name=name, dtype=dtype) + else + resource_scatter_update_graph(resource_, indices_, updates_; name=name, dtype=dtype) + end end - end end @@ -45124,34 +45124,34 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function barrier_take_many_graph(handle_, num_elements_; name=nothing, component_types=nothing, allow_small_batch=nothing, wait_for_incomplete=nothing, timeout_ms=nothing) - local desc - tf.with_op_name(name, "BarrierTakeMany") do - desc = tf.NodeDescription("BarrierTakeMany") - handle_ = convert(Tensor{String}, handle_) - num_elements_ = convert(Tensor{Int32}, num_elements_) - tf.add_input(desc, handle_) - tf.add_input(desc, num_elements_) - if component_types !== nothing - desc["component_types"] = map(Base.identity, component_types) - end - if allow_small_batch !== nothing - desc["allow_small_batch"] = Base.Bool(allow_small_batch) - end - if wait_for_incomplete !== nothing - desc["wait_for_incomplete"] = Base.Bool(wait_for_incomplete) - end - if timeout_ms !== nothing - desc["timeout_ms"] = Base.Int(timeout_ms) - end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:3 - push!(out, tf.Tensor(op, out_idx)) - end - out + function barrier_take_many_graph(handle_, num_elements_; name=nothing, component_types=nothing, allow_small_batch=nothing, wait_for_incomplete=nothing, timeout_ms=nothing) + local desc + tf.with_op_name(name, "BarrierTakeMany") do + desc = tf.NodeDescription("BarrierTakeMany") + handle_ = convert(Tensor{String}, handle_) + num_elements_ = convert(Tensor{Int32}, num_elements_) + tf.add_input(desc, handle_) + tf.add_input(desc, num_elements_) + if component_types !== nothing + desc["component_types"] = map(Base.identity, component_types) + end + if allow_small_batch !== nothing + desc["allow_small_batch"] = Base.Bool(allow_small_batch) + end + if wait_for_incomplete !== nothing + desc["wait_for_incomplete"] = Base.Bool(wait_for_incomplete) + end + if timeout_ms !== nothing + desc["timeout_ms"] = Base.Int(timeout_ms) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) end + out + end function barrier_take_many_eager(handle_, num_elements_; name=nothing, component_types=nothing, allow_small_batch=nothing, wait_for_incomplete=nothing, timeout_ms=nothing) desc = tf.EagerOp("BarrierTakeMany") handle_ = convert(tf.EagerTensor, handle_) @@ -45177,13 +45177,13 @@ begin return res end end - function barrier_take_many(handle_, num_elements_; name=nothing, component_types=nothing, allow_small_batch=nothing, wait_for_incomplete=nothing, timeout_ms=nothing) - if tf.in_eager_mode() - barrier_take_many_eager(handle_, num_elements_; name=name, component_types=component_types, allow_small_batch=allow_small_batch, wait_for_incomplete=wait_for_incomplete, timeout_ms=timeout_ms) - else - barrier_take_many_graph(handle_, num_elements_; name=name, component_types=component_types, allow_small_batch=allow_small_batch, wait_for_incomplete=wait_for_incomplete, timeout_ms=timeout_ms) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function barrier_take_many(handle_, num_elements_; name=nothing, component_types=nothing, allow_small_batch=nothing, wait_for_incomplete=nothing, timeout_ms=nothing) + if tf.in_eager_mode() + barrier_take_many_eager(handle_, num_elements_; name=name, component_types=component_types, allow_small_batch=allow_small_batch, wait_for_incomplete=wait_for_incomplete, timeout_ms=timeout_ms) + else + barrier_take_many_graph(handle_, num_elements_; name=name, component_types=component_types, allow_small_batch=allow_small_batch, wait_for_incomplete=wait_for_incomplete, timeout_ms=timeout_ms) + end end - end end @@ -45193,30 +45193,30 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function resource_apply_keras_momentum_graph(var_, accum_, lr_, grad_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) - local desc - tf.with_op_name(name, "ResourceApplyKerasMomentum") do - desc = tf.NodeDescription("ResourceApplyKerasMomentum") - var_ = convert(Tensor{Any}, var_) - accum_ = convert(Tensor{Any}, accum_) - lr_ = convert(Tensor{Any}, lr_) - grad_ = convert(Tensor{Any}, grad_) - momentum_ = convert(Tensor{Any}, momentum_) - (lr_, grad_, momentum_) = tf.tf_promote(lr_, grad_, momentum_) - tf.add_input(desc, var_) - tf.add_input(desc, accum_) - tf.add_input(desc, lr_) - tf.add_input(desc, grad_) - tf.add_input(desc, momentum_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - if use_nesterov !== nothing - desc["use_nesterov"] = Base.Bool(use_nesterov) - end - end - tf.Tensor(tf.Operation(desc)) + function resource_apply_keras_momentum_graph(var_, accum_, lr_, grad_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) + local desc + tf.with_op_name(name, "ResourceApplyKerasMomentum") do + desc = tf.NodeDescription("ResourceApplyKerasMomentum") + var_ = convert(Tensor{Any}, var_) + accum_ = convert(Tensor{Any}, accum_) + lr_ = convert(Tensor{Any}, lr_) + grad_ = convert(Tensor{Any}, grad_) + momentum_ = convert(Tensor{Any}, momentum_) + (lr_, grad_, momentum_) = tf.tf_promote(lr_, grad_, momentum_) + tf.add_input(desc, var_) + tf.add_input(desc, accum_) + tf.add_input(desc, lr_) + tf.add_input(desc, grad_) + tf.add_input(desc, momentum_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + if use_nesterov !== nothing + desc["use_nesterov"] = Base.Bool(use_nesterov) + end end + tf.Tensor(tf.Operation(desc)) + end function resource_apply_keras_momentum_eager(var_, accum_, lr_, grad_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) desc = tf.EagerOp("ResourceApplyKerasMomentum") var_ = convert(tf.EagerTensor, var_) @@ -45245,13 +45245,13 @@ begin return res[1] end end - function resource_apply_keras_momentum(var_, accum_, lr_, grad_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) - if tf.in_eager_mode() - resource_apply_keras_momentum_eager(var_, accum_, lr_, grad_, momentum_; name=name, use_locking=use_locking, use_nesterov=use_nesterov) - else - resource_apply_keras_momentum_graph(var_, accum_, lr_, grad_, momentum_; name=name, use_locking=use_locking, use_nesterov=use_nesterov) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_apply_keras_momentum(var_, accum_, lr_, grad_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) + if tf.in_eager_mode() + resource_apply_keras_momentum_eager(var_, accum_, lr_, grad_, momentum_; name=name, use_locking=use_locking, use_nesterov=use_nesterov) + else + resource_apply_keras_momentum_graph(var_, accum_, lr_, grad_, momentum_; name=name, use_locking=use_locking, use_nesterov=use_nesterov) + end end - end end @@ -45261,34 +45261,34 @@ end Generates serialized partition messages suitable for batch reads. """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function generate_big_query_reader_partitions_graph(; name=nothing, project_id=nothing, dataset_id=nothing, table_id=nothing, columns=nothing, timestamp_millis=nothing, num_partitions=nothing, test_end_point=nothing) - local desc - tf.with_op_name(name, "GenerateBigQueryReaderPartitions") do - desc = tf.NodeDescription("GenerateBigQueryReaderPartitions") - if project_id !== nothing - desc["project_id"] = Base.String(project_id) - end - if dataset_id !== nothing - desc["dataset_id"] = Base.String(dataset_id) - end - if table_id !== nothing - desc["table_id"] = Base.String(table_id) - end - if columns !== nothing - desc["columns"] = map(Base.identity, columns) - end - if timestamp_millis !== nothing - desc["timestamp_millis"] = Base.Int(timestamp_millis) - end - if num_partitions !== nothing - desc["num_partitions"] = Base.Int(num_partitions) - end - if test_end_point !== nothing - desc["test_end_point"] = Base.String(test_end_point) - end - end - tf.Tensor(tf.Operation(desc)) + function generate_big_query_reader_partitions_graph(; name=nothing, project_id=nothing, dataset_id=nothing, table_id=nothing, columns=nothing, timestamp_millis=nothing, num_partitions=nothing, test_end_point=nothing) + local desc + tf.with_op_name(name, "GenerateBigQueryReaderPartitions") do + desc = tf.NodeDescription("GenerateBigQueryReaderPartitions") + if project_id !== nothing + desc["project_id"] = Base.String(project_id) + end + if dataset_id !== nothing + desc["dataset_id"] = Base.String(dataset_id) + end + if table_id !== nothing + desc["table_id"] = Base.String(table_id) + end + if columns !== nothing + desc["columns"] = map(Base.identity, columns) + end + if timestamp_millis !== nothing + desc["timestamp_millis"] = Base.Int(timestamp_millis) + end + if num_partitions !== nothing + desc["num_partitions"] = Base.Int(num_partitions) + end + if test_end_point !== nothing + desc["test_end_point"] = Base.String(test_end_point) + end end + tf.Tensor(tf.Operation(desc)) + end function generate_big_query_reader_partitions_eager(; name=nothing, project_id=nothing, dataset_id=nothing, table_id=nothing, columns=nothing, timestamp_millis=nothing, num_partitions=nothing, test_end_point=nothing) desc = tf.EagerOp("GenerateBigQueryReaderPartitions") if project_id !== nothing @@ -45319,13 +45319,13 @@ begin return res[1] end end - function generate_big_query_reader_partitions(; name=nothing, project_id=nothing, dataset_id=nothing, table_id=nothing, columns=nothing, timestamp_millis=nothing, num_partitions=nothing, test_end_point=nothing) - if tf.in_eager_mode() - generate_big_query_reader_partitions_eager(; name=name, project_id=project_id, dataset_id=dataset_id, table_id=table_id, columns=columns, timestamp_millis=timestamp_millis, num_partitions=num_partitions, test_end_point=test_end_point) - else - generate_big_query_reader_partitions_graph(; name=name, project_id=project_id, dataset_id=dataset_id, table_id=table_id, columns=columns, timestamp_millis=timestamp_millis, num_partitions=num_partitions, test_end_point=test_end_point) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function generate_big_query_reader_partitions(; name=nothing, project_id=nothing, dataset_id=nothing, table_id=nothing, columns=nothing, timestamp_millis=nothing, num_partitions=nothing, test_end_point=nothing) + if tf.in_eager_mode() + generate_big_query_reader_partitions_eager(; name=name, project_id=project_id, dataset_id=dataset_id, table_id=table_id, columns=columns, timestamp_millis=timestamp_millis, num_partitions=num_partitions, test_end_point=test_end_point) + else + generate_big_query_reader_partitions_graph(; name=name, project_id=project_id, dataset_id=dataset_id, table_id=table_id, columns=columns, timestamp_millis=timestamp_millis, num_partitions=num_partitions, test_end_point=test_end_point) + end end - end end @@ -45335,24 +45335,24 @@ end A placeholder op for multiple values that will be sent to TensorFlow from a """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function _xla_recv_at_host_graph(dynamic_key_; name=nothing, Toutputs=nothing, key=nothing, device_ordinal=nothing) - local desc - tf.with_op_name(name, "_XlaRecvAtHost") do - desc = tf.NodeDescription("_XlaRecvAtHost") - dynamic_key_ = convert(Tensor{String}, dynamic_key_) - tf.add_input(desc, dynamic_key_) - if Toutputs !== nothing - desc["Toutputs"] = map(Base.identity, Toutputs) - end - if key !== nothing - desc["key"] = Base.String(key) - end - if device_ordinal !== nothing - desc["device_ordinal"] = Base.Int(device_ordinal) - end - end - tf.Tensor(tf.Operation(desc)) + function _xla_recv_at_host_graph(dynamic_key_; name=nothing, Toutputs=nothing, key=nothing, device_ordinal=nothing) + local desc + tf.with_op_name(name, "_XlaRecvAtHost") do + desc = tf.NodeDescription("_XlaRecvAtHost") + dynamic_key_ = convert(Tensor{String}, dynamic_key_) + tf.add_input(desc, dynamic_key_) + if Toutputs !== nothing + desc["Toutputs"] = map(Base.identity, Toutputs) + end + if key !== nothing + desc["key"] = Base.String(key) + end + if device_ordinal !== nothing + desc["device_ordinal"] = Base.Int(device_ordinal) + end end + tf.Tensor(tf.Operation(desc)) + end function _xla_recv_at_host_eager(dynamic_key_; name=nothing, Toutputs=nothing, key=nothing, device_ordinal=nothing) desc = tf.EagerOp("_XlaRecvAtHost") dynamic_key_ = convert(tf.EagerTensor, dynamic_key_) @@ -45373,13 +45373,13 @@ begin return res[1] end end - function _xla_recv_at_host(dynamic_key_; name=nothing, Toutputs=nothing, key=nothing, device_ordinal=nothing) - if tf.in_eager_mode() - _xla_recv_at_host_eager(dynamic_key_; name=name, Toutputs=Toutputs, key=key, device_ordinal=device_ordinal) - else - _xla_recv_at_host_graph(dynamic_key_; name=name, Toutputs=Toutputs, key=key, device_ordinal=device_ordinal) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _xla_recv_at_host(dynamic_key_; name=nothing, Toutputs=nothing, key=nothing, device_ordinal=nothing) + if tf.in_eager_mode() + _xla_recv_at_host_eager(dynamic_key_; name=name, Toutputs=Toutputs, key=key, device_ordinal=device_ordinal) + else + _xla_recv_at_host_graph(dynamic_key_; name=name, Toutputs=Toutputs, key=key, device_ordinal=device_ordinal) + end end - end end @@ -45389,34 +45389,34 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function quantized_avg_pool_graph(input_, min_input_, max_input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing) - local desc - tf.with_op_name(name, "QuantizedAvgPool") do - desc = tf.NodeDescription("QuantizedAvgPool") - input_ = convert(Tensor{Any}, input_) - min_input_ = convert(Tensor{Float32}, min_input_) - max_input_ = convert(Tensor{Float32}, max_input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - tf.add_input(desc, min_input_) - tf.add_input(desc, max_input_) - if ksize !== nothing - desc["ksize"] = map(Base.identity, ksize) - end - if strides !== nothing - desc["strides"] = map(Base.identity, strides) - end - if padding !== nothing - desc["padding"] = Base.String(padding) - end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:3 - push!(out, tf.Tensor(op, out_idx)) - end - out + function quantized_avg_pool_graph(input_, min_input_, max_input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing) + local desc + tf.with_op_name(name, "QuantizedAvgPool") do + desc = tf.NodeDescription("QuantizedAvgPool") + input_ = convert(Tensor{Any}, input_) + min_input_ = convert(Tensor{Float32}, min_input_) + max_input_ = convert(Tensor{Float32}, max_input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + tf.add_input(desc, min_input_) + tf.add_input(desc, max_input_) + if ksize !== nothing + desc["ksize"] = map(Base.identity, ksize) + end + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + if padding !== nothing + desc["padding"] = Base.String(padding) + end end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end function quantized_avg_pool_eager(input_, min_input_, max_input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing) desc = tf.EagerOp("QuantizedAvgPool") input_ = convert(tf.EagerTensor, input_) @@ -45442,13 +45442,13 @@ begin return res end end - function quantized_avg_pool(input_, min_input_, max_input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing) - if tf.in_eager_mode() - quantized_avg_pool_eager(input_, min_input_, max_input_; name=name, ksize=ksize, strides=strides, padding=padding) - else - quantized_avg_pool_graph(input_, min_input_, max_input_; name=name, ksize=ksize, strides=strides, padding=padding) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function quantized_avg_pool(input_, min_input_, max_input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing) + if tf.in_eager_mode() + quantized_avg_pool_eager(input_, min_input_, max_input_; name=name, ksize=ksize, strides=strides, padding=padding) + else + quantized_avg_pool_graph(input_, min_input_, max_input_; name=name, ksize=ksize, strides=strides, padding=padding) + end end - end end @@ -45458,39 +45458,39 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function resource_apply_adam_with_amsgrad_graph(var_, m_, v_, vhat_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing) - local desc - tf.with_op_name(name, "ResourceApplyAdamWithAmsgrad") do - desc = tf.NodeDescription("ResourceApplyAdamWithAmsgrad") - var_ = convert(Tensor{Any}, var_) - m_ = convert(Tensor{Any}, m_) - v_ = convert(Tensor{Any}, v_) - vhat_ = convert(Tensor{Any}, vhat_) - beta1_power_ = convert(Tensor{Any}, beta1_power_) - beta2_power_ = convert(Tensor{Any}, beta2_power_) - lr_ = convert(Tensor{Any}, lr_) - beta1_ = convert(Tensor{Any}, beta1_) - beta2_ = convert(Tensor{Any}, beta2_) - epsilon_ = convert(Tensor{Any}, epsilon_) - grad_ = convert(Tensor{Any}, grad_) - (beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_) = tf.tf_promote(beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_) - tf.add_input(desc, var_) - tf.add_input(desc, m_) - tf.add_input(desc, v_) - tf.add_input(desc, vhat_) - tf.add_input(desc, beta1_power_) - tf.add_input(desc, beta2_power_) - tf.add_input(desc, lr_) - tf.add_input(desc, beta1_) - tf.add_input(desc, beta2_) - tf.add_input(desc, epsilon_) - tf.add_input(desc, grad_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - end - tf.Tensor(tf.Operation(desc)) - end + function resource_apply_adam_with_amsgrad_graph(var_, m_, v_, vhat_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "ResourceApplyAdamWithAmsgrad") do + desc = tf.NodeDescription("ResourceApplyAdamWithAmsgrad") + var_ = convert(Tensor{Any}, var_) + m_ = convert(Tensor{Any}, m_) + v_ = convert(Tensor{Any}, v_) + vhat_ = convert(Tensor{Any}, vhat_) + beta1_power_ = convert(Tensor{Any}, beta1_power_) + beta2_power_ = convert(Tensor{Any}, beta2_power_) + lr_ = convert(Tensor{Any}, lr_) + beta1_ = convert(Tensor{Any}, beta1_) + beta2_ = convert(Tensor{Any}, beta2_) + epsilon_ = convert(Tensor{Any}, epsilon_) + grad_ = convert(Tensor{Any}, grad_) + (beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_) = tf.tf_promote(beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_) + tf.add_input(desc, var_) + tf.add_input(desc, m_) + tf.add_input(desc, v_) + tf.add_input(desc, vhat_) + tf.add_input(desc, beta1_power_) + tf.add_input(desc, beta2_power_) + tf.add_input(desc, lr_) + tf.add_input(desc, beta1_) + tf.add_input(desc, beta2_) + tf.add_input(desc, epsilon_) + tf.add_input(desc, grad_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + tf.Tensor(tf.Operation(desc)) + end function resource_apply_adam_with_amsgrad_eager(var_, m_, v_, vhat_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ResourceApplyAdamWithAmsgrad") var_ = convert(tf.EagerTensor, var_) @@ -45532,13 +45532,13 @@ begin return res[1] end end - function resource_apply_adam_with_amsgrad(var_, m_, v_, vhat_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing) - if tf.in_eager_mode() - resource_apply_adam_with_amsgrad_eager(var_, m_, v_, vhat_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=name, use_locking=use_locking) - else - resource_apply_adam_with_amsgrad_graph(var_, m_, v_, vhat_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=name, use_locking=use_locking) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_apply_adam_with_amsgrad(var_, m_, v_, vhat_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing) + if tf.in_eager_mode() + resource_apply_adam_with_amsgrad_eager(var_, m_, v_, vhat_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=name, use_locking=use_locking) + else + resource_apply_adam_with_amsgrad_graph(var_, m_, v_, vhat_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=name, use_locking=use_locking) + end end - end end @@ -45548,31 +45548,31 @@ end Receives the named tensor from send_device on recv_device. """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function _host_recv_graph(; name=nothing, tensor_type=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing) - local desc - tf.with_op_name(name, "_HostRecv") do - desc = tf.NodeDescription("_HostRecv") - if tensor_type !== nothing - desc["tensor_type"] = Base.identity(tensor_type) - end - if tensor_name !== nothing - desc["tensor_name"] = Base.String(tensor_name) - end - if send_device !== nothing - desc["send_device"] = Base.String(send_device) - end - if send_device_incarnation !== nothing - desc["send_device_incarnation"] = Base.Int(send_device_incarnation) - end - if recv_device !== nothing - desc["recv_device"] = Base.String(recv_device) - end - if client_terminated !== nothing - desc["client_terminated"] = Base.Bool(client_terminated) - end - end - tf.Tensor(tf.Operation(desc)) + function _host_recv_graph(; name=nothing, tensor_type=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing) + local desc + tf.with_op_name(name, "_HostRecv") do + desc = tf.NodeDescription("_HostRecv") + if tensor_type !== nothing + desc["tensor_type"] = Base.identity(tensor_type) + end + if tensor_name !== nothing + desc["tensor_name"] = Base.String(tensor_name) + end + if send_device !== nothing + desc["send_device"] = Base.String(send_device) + end + if send_device_incarnation !== nothing + desc["send_device_incarnation"] = Base.Int(send_device_incarnation) + end + if recv_device !== nothing + desc["recv_device"] = Base.String(recv_device) + end + if client_terminated !== nothing + desc["client_terminated"] = Base.Bool(client_terminated) + end end + tf.Tensor(tf.Operation(desc)) + end function _host_recv_eager(; name=nothing, tensor_type=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing) desc = tf.EagerOp("_HostRecv") if tensor_type !== nothing @@ -45600,13 +45600,13 @@ begin return res[1] end end - function _host_recv(; name=nothing, tensor_type=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing) - if tf.in_eager_mode() - _host_recv_eager(; name=name, tensor_type=tensor_type, tensor_name=tensor_name, send_device=send_device, send_device_incarnation=send_device_incarnation, recv_device=recv_device, client_terminated=client_terminated) - else - _host_recv_graph(; name=name, tensor_type=tensor_type, tensor_name=tensor_name, send_device=send_device, send_device_incarnation=send_device_incarnation, recv_device=recv_device, client_terminated=client_terminated) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _host_recv(; name=nothing, tensor_type=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing) + if tf.in_eager_mode() + _host_recv_eager(; name=name, tensor_type=tensor_type, tensor_name=tensor_name, send_device=send_device, send_device_incarnation=send_device_incarnation, recv_device=recv_device, client_terminated=client_terminated) + else + _host_recv_graph(; name=name, tensor_type=tensor_type, tensor_name=tensor_name, send_device=send_device, send_device_incarnation=send_device_incarnation, recv_device=recv_device, client_terminated=client_terminated) + end end - end end @@ -45616,23 +45616,23 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function boosted_trees_center_bias_graph(tree_ensemble_handle_, mean_gradients_, mean_hessians_, l1_, l2_; name=nothing) - local desc - tf.with_op_name(name, "BoostedTreesCenterBias") do - desc = tf.NodeDescription("BoostedTreesCenterBias") - tree_ensemble_handle_ = convert(Tensor{Any}, tree_ensemble_handle_) - mean_gradients_ = convert(Tensor{Float32}, mean_gradients_) - mean_hessians_ = convert(Tensor{Float32}, mean_hessians_) - l1_ = convert(Tensor{Float32}, l1_) - l2_ = convert(Tensor{Float32}, l2_) - tf.add_input(desc, tree_ensemble_handle_) - tf.add_input(desc, mean_gradients_) - tf.add_input(desc, mean_hessians_) - tf.add_input(desc, l1_) - tf.add_input(desc, l2_) - end - tf.Tensor(tf.Operation(desc)) + function boosted_trees_center_bias_graph(tree_ensemble_handle_, mean_gradients_, mean_hessians_, l1_, l2_; name=nothing) + local desc + tf.with_op_name(name, "BoostedTreesCenterBias") do + desc = tf.NodeDescription("BoostedTreesCenterBias") + tree_ensemble_handle_ = convert(Tensor{Any}, tree_ensemble_handle_) + mean_gradients_ = convert(Tensor{Float32}, mean_gradients_) + mean_hessians_ = convert(Tensor{Float32}, mean_hessians_) + l1_ = convert(Tensor{Float32}, l1_) + l2_ = convert(Tensor{Float32}, l2_) + tf.add_input(desc, tree_ensemble_handle_) + tf.add_input(desc, mean_gradients_) + tf.add_input(desc, mean_hessians_) + tf.add_input(desc, l1_) + tf.add_input(desc, l2_) end + tf.Tensor(tf.Operation(desc)) + end function boosted_trees_center_bias_eager(tree_ensemble_handle_, mean_gradients_, mean_hessians_, l1_, l2_; name=nothing) desc = tf.EagerOp("BoostedTreesCenterBias") tree_ensemble_handle_ = convert(tf.EagerTensor, tree_ensemble_handle_) @@ -45652,13 +45652,13 @@ begin return res[1] end end - function boosted_trees_center_bias(tree_ensemble_handle_, mean_gradients_, mean_hessians_, l1_, l2_; name=nothing) - if tf.in_eager_mode() - boosted_trees_center_bias_eager(tree_ensemble_handle_, mean_gradients_, mean_hessians_, l1_, l2_; name=name) - else - boosted_trees_center_bias_graph(tree_ensemble_handle_, mean_gradients_, mean_hessians_, l1_, l2_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function boosted_trees_center_bias(tree_ensemble_handle_, mean_gradients_, mean_hessians_, l1_, l2_; name=nothing) + if tf.in_eager_mode() + boosted_trees_center_bias_eager(tree_ensemble_handle_, mean_gradients_, mean_hessians_, l1_, l2_; name=name) + else + boosted_trees_center_bias_graph(tree_ensemble_handle_, mean_gradients_, mean_hessians_, l1_, l2_; name=name) + end end - end end @@ -45668,15 +45668,15 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function lookup_table_size_v2_graph(table_handle_; name=nothing) - local desc - tf.with_op_name(name, "LookupTableSizeV2") do - desc = tf.NodeDescription("LookupTableSizeV2") - table_handle_ = convert(Tensor{Any}, table_handle_) - tf.add_input(desc, table_handle_) - end - tf.Tensor(tf.Operation(desc)) + function lookup_table_size_v2_graph(table_handle_; name=nothing) + local desc + tf.with_op_name(name, "LookupTableSizeV2") do + desc = tf.NodeDescription("LookupTableSizeV2") + table_handle_ = convert(Tensor{Any}, table_handle_) + tf.add_input(desc, table_handle_) end + tf.Tensor(tf.Operation(desc)) + end function lookup_table_size_v2_eager(table_handle_; name=nothing) desc = tf.EagerOp("LookupTableSizeV2") table_handle_ = convert(tf.EagerTensor, table_handle_) @@ -45688,13 +45688,13 @@ begin return res[1] end end - function lookup_table_size_v2(table_handle_; name=nothing) - if tf.in_eager_mode() - lookup_table_size_v2_eager(table_handle_; name=name) - else - lookup_table_size_v2_graph(table_handle_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function lookup_table_size_v2(table_handle_; name=nothing) + if tf.in_eager_mode() + lookup_table_size_v2_eager(table_handle_; name=name) + else + lookup_table_size_v2_graph(table_handle_; name=name) + end end - end end @@ -45704,17 +45704,17 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function irfft_graph(input_, fft_length_; name=nothing) - local desc - tf.with_op_name(name, "IRFFT") do - desc = tf.NodeDescription("IRFFT") - input_ = convert(Tensor{Complex{Float32}}, input_) - fft_length_ = convert(Tensor{Int32}, fft_length_) - tf.add_input(desc, input_) - tf.add_input(desc, fft_length_) - end - tf.Tensor(tf.Operation(desc)) + function irfft_graph(input_, fft_length_; name=nothing) + local desc + tf.with_op_name(name, "IRFFT") do + desc = tf.NodeDescription("IRFFT") + input_ = convert(Tensor{Complex{Float32}}, input_) + fft_length_ = convert(Tensor{Int32}, fft_length_) + tf.add_input(desc, input_) + tf.add_input(desc, fft_length_) end + tf.Tensor(tf.Operation(desc)) + end function irfft_eager(input_, fft_length_; name=nothing) desc = tf.EagerOp("IRFFT") input_ = convert(tf.EagerTensor, input_) @@ -45728,13 +45728,13 @@ begin return res[1] end end - function irfft(input_, fft_length_; name=nothing) - if tf.in_eager_mode() - irfft_eager(input_, fft_length_; name=name) - else - irfft_graph(input_, fft_length_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function irfft(input_, fft_length_; name=nothing) + if tf.in_eager_mode() + irfft_eager(input_, fft_length_; name=name) + else + irfft_graph(input_, fft_length_; name=name) + end end - end end @@ -45744,20 +45744,20 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function inplace_add_graph(x_, i_, v_; name=nothing) - local desc - tf.with_op_name(name, "InplaceAdd") do - desc = tf.NodeDescription("InplaceAdd") - x_ = convert(Tensor{Any}, x_) - i_ = convert(Tensor{Int32}, i_) - v_ = convert(Tensor{Any}, v_) - (x_, v_) = tf.tf_promote(x_, v_) - tf.add_input(desc, x_) - tf.add_input(desc, i_) - tf.add_input(desc, v_) - end - tf.Tensor(tf.Operation(desc)) + function inplace_add_graph(x_, i_, v_; name=nothing) + local desc + tf.with_op_name(name, "InplaceAdd") do + desc = tf.NodeDescription("InplaceAdd") + x_ = convert(Tensor{Any}, x_) + i_ = convert(Tensor{Int32}, i_) + v_ = convert(Tensor{Any}, v_) + (x_, v_) = tf.tf_promote(x_, v_) + tf.add_input(desc, x_) + tf.add_input(desc, i_) + tf.add_input(desc, v_) end + tf.Tensor(tf.Operation(desc)) + end function inplace_add_eager(x_, i_, v_; name=nothing) desc = tf.EagerOp("InplaceAdd") x_ = convert(tf.EagerTensor, x_) @@ -45775,37 +45775,37 @@ begin return res[1] end end - function inplace_add(x_, i_, v_; name=nothing) - if tf.in_eager_mode() - inplace_add_eager(x_, i_, v_; name=name) - else - inplace_add_graph(x_, i_, v_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function inplace_add(x_, i_, v_; name=nothing) + if tf.in_eager_mode() + inplace_add_eager(x_, i_, v_; name=name) + else + inplace_add_graph(x_, i_, v_; name=name) + end end - end end """ - bias_add(value, bias; data_format=NHWC) + bias_add(value, bias; data_format=) """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function bias_add_graph(value_, bias_; name=nothing, data_format=nothing) - local desc - tf.with_op_name(name, "BiasAdd") do - desc = tf.NodeDescription("BiasAdd") - value_ = convert(Tensor{Any}, value_) - bias_ = convert(Tensor{Any}, bias_) - (value_, bias_) = tf.tf_promote(value_, bias_) - tf.add_input(desc, value_) - tf.add_input(desc, bias_) - if data_format !== nothing - desc["data_format"] = Base.String(data_format) - end + function bias_add_graph(value_, bias_; name=nothing, data_format=nothing) + local desc + tf.with_op_name(name, "BiasAdd") do + desc = tf.NodeDescription("BiasAdd") + value_ = convert(Tensor{Any}, value_) + bias_ = convert(Tensor{Any}, bias_) + (value_, bias_) = tf.tf_promote(value_, bias_) + tf.add_input(desc, value_) + tf.add_input(desc, bias_) + if data_format !== nothing + desc["data_format"] = Base.String(data_format) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function bias_add_eager(value_, bias_; name=nothing, data_format=nothing) desc = tf.EagerOp("BiasAdd") value_ = convert(tf.EagerTensor, value_) @@ -45824,13 +45824,13 @@ begin return res[1] end end - function bias_add(value_, bias_; name=nothing, data_format=nothing) - if tf.in_eager_mode() - bias_add_eager(value_, bias_; name=name, data_format=data_format) - else - bias_add_graph(value_, bias_; name=name, data_format=data_format) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function bias_add(value_, bias_; name=nothing, data_format=nothing) + if tf.in_eager_mode() + bias_add_eager(value_, bias_; name=name, data_format=data_format) + else + bias_add_graph(value_, bias_; name=name, data_format=data_format) + end end - end end @@ -45840,14 +45840,14 @@ end An op that disconnects the TPUs on a host from a running distributed """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function _disconnect_host_from_distributed_tpu_system_graph(; name=nothing) - local desc - tf.with_op_name(name, "_DisconnectHostFromDistributedTPUSystem") do - desc - tf.NodeDescription("_DisconnectHostFromDistributedTPUSystem") - end - tf.Tensor(tf.Operation(desc)) + function _disconnect_host_from_distributed_tpu_system_graph(; name=nothing) + local desc + tf.with_op_name(name, "_DisconnectHostFromDistributedTPUSystem") do + desc + tf.NodeDescription("_DisconnectHostFromDistributedTPUSystem") end + tf.Tensor(tf.Operation(desc)) + end function _disconnect_host_from_distributed_tpu_system_eager(; name=nothing) desc = tf.EagerOp("_DisconnectHostFromDistributedTPUSystem") res = tf.execute(desc) @@ -45857,13 +45857,13 @@ begin return res[1] end end - function _disconnect_host_from_distributed_tpu_system(; name=nothing) - if tf.in_eager_mode() - _disconnect_host_from_distributed_tpu_system_eager(; name=name) - else - _disconnect_host_from_distributed_tpu_system_graph(; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _disconnect_host_from_distributed_tpu_system(; name=nothing) + if tf.in_eager_mode() + _disconnect_host_from_distributed_tpu_system_eager(; name=name) + else + _disconnect_host_from_distributed_tpu_system_graph(; name=name) + end end - end end @@ -45873,33 +45873,33 @@ end Load embedding parameters for a single table. """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function load_tpu_embedding_adam_parameters_grad_accum_debug_graph(parameters_, momenta_, velocities_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - local desc - tf.with_op_name(name, "LoadTPUEmbeddingADAMParametersGradAccumDebug") do - desc = tf.NodeDescription("LoadTPUEmbeddingADAMParametersGradAccumDebug") - parameters_ = convert(Tensor{Float32}, parameters_) - momenta_ = convert(Tensor{Float32}, momenta_) - velocities_ = convert(Tensor{Float32}, velocities_) - gradient_accumulators_ = convert(Tensor{Float32}, gradient_accumulators_) - tf.add_input(desc, parameters_) - tf.add_input(desc, momenta_) - tf.add_input(desc, velocities_) - tf.add_input(desc, gradient_accumulators_) - if table_id !== nothing - desc["table_id"] = Base.Int(table_id) - end - if table_name !== nothing - desc["table_name"] = Base.String(table_name) - end - if num_shards !== nothing - desc["num_shards"] = Base.Int(num_shards) - end - if shard_id !== nothing - desc["shard_id"] = Base.Int(shard_id) - end - end - tf.Tensor(tf.Operation(desc)) + function load_tpu_embedding_adam_parameters_grad_accum_debug_graph(parameters_, momenta_, velocities_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + local desc + tf.with_op_name(name, "LoadTPUEmbeddingADAMParametersGradAccumDebug") do + desc = tf.NodeDescription("LoadTPUEmbeddingADAMParametersGradAccumDebug") + parameters_ = convert(Tensor{Float32}, parameters_) + momenta_ = convert(Tensor{Float32}, momenta_) + velocities_ = convert(Tensor{Float32}, velocities_) + gradient_accumulators_ = convert(Tensor{Float32}, gradient_accumulators_) + tf.add_input(desc, parameters_) + tf.add_input(desc, momenta_) + tf.add_input(desc, velocities_) + tf.add_input(desc, gradient_accumulators_) + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end end + tf.Tensor(tf.Operation(desc)) + end function load_tpu_embedding_adam_parameters_grad_accum_debug_eager(parameters_, momenta_, velocities_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) desc = tf.EagerOp("LoadTPUEmbeddingADAMParametersGradAccumDebug") parameters_ = convert(tf.EagerTensor, parameters_) @@ -45929,13 +45929,13 @@ begin return res[1] end end - function load_tpu_embedding_adam_parameters_grad_accum_debug(parameters_, momenta_, velocities_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - if tf.in_eager_mode() - load_tpu_embedding_adam_parameters_grad_accum_debug_eager(parameters_, momenta_, velocities_, gradient_accumulators_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) - else - load_tpu_embedding_adam_parameters_grad_accum_debug_graph(parameters_, momenta_, velocities_, gradient_accumulators_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function load_tpu_embedding_adam_parameters_grad_accum_debug(parameters_, momenta_, velocities_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + if tf.in_eager_mode() + load_tpu_embedding_adam_parameters_grad_accum_debug_eager(parameters_, momenta_, velocities_, gradient_accumulators_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + else + load_tpu_embedding_adam_parameters_grad_accum_debug_graph(parameters_, momenta_, velocities_, gradient_accumulators_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + end end - end end @@ -45945,25 +45945,25 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function ragged_range_graph(starts_, limits_, deltas_; name=nothing) - local desc - tf.with_op_name(name, "RaggedRange") do - desc = tf.NodeDescription("RaggedRange") - starts_ = convert(Tensor{Int32}, starts_) - limits_ = convert(Tensor{Int32}, limits_) - deltas_ = convert(Tensor{Int32}, deltas_) - (starts_, limits_, deltas_) = tf.tf_promote(starts_, limits_, deltas_) - tf.add_input(desc, starts_) - tf.add_input(desc, limits_) - tf.add_input(desc, deltas_) - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:2 - push!(out, tf.Tensor(op, out_idx)) - end - out + function ragged_range_graph(starts_, limits_, deltas_; name=nothing) + local desc + tf.with_op_name(name, "RaggedRange") do + desc = tf.NodeDescription("RaggedRange") + starts_ = convert(Tensor{Int32}, starts_) + limits_ = convert(Tensor{Int32}, limits_) + deltas_ = convert(Tensor{Int32}, deltas_) + (starts_, limits_, deltas_) = tf.tf_promote(starts_, limits_, deltas_) + tf.add_input(desc, starts_) + tf.add_input(desc, limits_) + tf.add_input(desc, deltas_) + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) end + out + end function ragged_range_eager(starts_, limits_, deltas_; name=nothing) desc = tf.EagerOp("RaggedRange") starts_ = convert(tf.EagerTensor, starts_) @@ -45982,13 +45982,13 @@ begin return res end end - function ragged_range(starts_, limits_, deltas_; name=nothing) - if tf.in_eager_mode() - ragged_range_eager(starts_, limits_, deltas_; name=name) - else - ragged_range_graph(starts_, limits_, deltas_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function ragged_range(starts_, limits_, deltas_; name=nothing) + if tf.in_eager_mode() + ragged_range_eager(starts_, limits_, deltas_; name=name) + else + ragged_range_graph(starts_, limits_, deltas_; name=name) + end end - end end @@ -45998,29 +45998,29 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function window_dataset_graph(input_dataset_, size_, shift_, stride_, drop_remainder_; name=nothing, output_types=nothing, output_shapes=nothing) - local desc - tf.with_op_name(name, "WindowDataset") do - desc = tf.NodeDescription("WindowDataset") - input_dataset_ = convert(Tensor{Any}, input_dataset_) - size_ = convert(Tensor{Int64}, size_) - shift_ = convert(Tensor{Int64}, shift_) - stride_ = convert(Tensor{Int64}, stride_) - drop_remainder_ = convert(Tensor{Bool}, drop_remainder_) - tf.add_input(desc, input_dataset_) - tf.add_input(desc, size_) - tf.add_input(desc, shift_) - tf.add_input(desc, stride_) - tf.add_input(desc, drop_remainder_) - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end + function window_dataset_graph(input_dataset_, size_, shift_, stride_, drop_remainder_; name=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "WindowDataset") do + desc = tf.NodeDescription("WindowDataset") + input_dataset_ = convert(Tensor{Any}, input_dataset_) + size_ = convert(Tensor{Int64}, size_) + shift_ = convert(Tensor{Int64}, shift_) + stride_ = convert(Tensor{Int64}, stride_) + drop_remainder_ = convert(Tensor{Bool}, drop_remainder_) + tf.add_input(desc, input_dataset_) + tf.add_input(desc, size_) + tf.add_input(desc, shift_) + tf.add_input(desc, stride_) + tf.add_input(desc, drop_remainder_) + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function window_dataset_eager(input_dataset_, size_, shift_, stride_, drop_remainder_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("WindowDataset") input_dataset_ = convert(tf.EagerTensor, input_dataset_) @@ -46046,13 +46046,13 @@ begin return res[1] end end - function window_dataset(input_dataset_, size_, shift_, stride_, drop_remainder_; name=nothing, output_types=nothing, output_shapes=nothing) - if tf.in_eager_mode() - window_dataset_eager(input_dataset_, size_, shift_, stride_, drop_remainder_; name=name, output_types=output_types, output_shapes=output_shapes) - else - window_dataset_graph(input_dataset_, size_, shift_, stride_, drop_remainder_; name=name, output_types=output_types, output_shapes=output_shapes) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function window_dataset(input_dataset_, size_, shift_, stride_, drop_remainder_; name=nothing, output_types=nothing, output_shapes=nothing) + if tf.in_eager_mode() + window_dataset_eager(input_dataset_, size_, shift_, stride_, drop_remainder_; name=name, output_types=output_types, output_shapes=output_shapes) + else + window_dataset_graph(input_dataset_, size_, shift_, stride_, drop_remainder_; name=name, output_types=output_types, output_shapes=output_shapes) + end end - end end @@ -46062,16 +46062,16 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function diag_graph(diagonal_; name=nothing) - local desc - tf.with_op_name(name, "Diag") do - desc = tf.NodeDescription("Diag") - diagonal_ = convert(Tensor{Any}, diagonal_) - (diagonal_,) = tf.tf_promote(diagonal_) - tf.add_input(desc, diagonal_) - end - tf.Tensor(tf.Operation(desc)) + function diag_graph(diagonal_; name=nothing) + local desc + tf.with_op_name(name, "Diag") do + desc = tf.NodeDescription("Diag") + diagonal_ = convert(Tensor{Any}, diagonal_) + (diagonal_,) = tf.tf_promote(diagonal_) + tf.add_input(desc, diagonal_) end + tf.Tensor(tf.Operation(desc)) + end function diag_eager(diagonal_; name=nothing) desc = tf.EagerOp("Diag") diagonal_ = convert(tf.EagerTensor, diagonal_) @@ -46084,13 +46084,13 @@ begin return res[1] end end - function diag(diagonal_; name=nothing) - if tf.in_eager_mode() - diag_eager(diagonal_; name=name) - else - diag_graph(diagonal_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function diag(diagonal_; name=nothing) + if tf.in_eager_mode() + diag_eager(diagonal_; name=name) + else + diag_graph(diagonal_; name=name) + end end - end end @@ -46100,19 +46100,19 @@ end A placeholder op for a value that will be fed into the computation. """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function infeed_dequeue_graph(; name=nothing, dtype=nothing, shape=nothing) - local desc - tf.with_op_name(name, "InfeedDequeue") do - desc = tf.NodeDescription("InfeedDequeue") - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end - if shape !== nothing - desc["shape"] = Base.identity(shape) - end + function infeed_dequeue_graph(; name=nothing, dtype=nothing, shape=nothing) + local desc + tf.with_op_name(name, "InfeedDequeue") do + desc = tf.NodeDescription("InfeedDequeue") + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + if shape !== nothing + desc["shape"] = Base.identity(shape) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function infeed_dequeue_eager(; name=nothing, dtype=nothing, shape=nothing) desc = tf.EagerOp("InfeedDequeue") if dtype !== nothing @@ -46128,13 +46128,13 @@ begin return res[1] end end - function infeed_dequeue(; name=nothing, dtype=nothing, shape=nothing) - if tf.in_eager_mode() - infeed_dequeue_eager(; name=name, dtype=dtype, shape=shape) - else - infeed_dequeue_graph(; name=name, dtype=dtype, shape=shape) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function infeed_dequeue(; name=nothing, dtype=nothing, shape=nothing) + if tf.in_eager_mode() + infeed_dequeue_eager(; name=name, dtype=dtype, shape=shape) + else + infeed_dequeue_graph(; name=name, dtype=dtype, shape=shape) + end end - end end @@ -46144,23 +46144,23 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function experimental_latency_stats_dataset_graph(input_dataset_, tag_; name=nothing, output_types=nothing, output_shapes=nothing) - local desc - tf.with_op_name(name, "ExperimentalLatencyStatsDataset") do - desc = tf.NodeDescription("ExperimentalLatencyStatsDataset") - input_dataset_ = convert(Tensor{Any}, input_dataset_) - tag_ = convert(Tensor{String}, tag_) - tf.add_input(desc, input_dataset_) - tf.add_input(desc, tag_) - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end + function experimental_latency_stats_dataset_graph(input_dataset_, tag_; name=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "ExperimentalLatencyStatsDataset") do + desc = tf.NodeDescription("ExperimentalLatencyStatsDataset") + input_dataset_ = convert(Tensor{Any}, input_dataset_) + tag_ = convert(Tensor{String}, tag_) + tf.add_input(desc, input_dataset_) + tf.add_input(desc, tag_) + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function experimental_latency_stats_dataset_eager(input_dataset_, tag_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("ExperimentalLatencyStatsDataset") input_dataset_ = convert(tf.EagerTensor, input_dataset_) @@ -46180,13 +46180,13 @@ begin return res[1] end end - function experimental_latency_stats_dataset(input_dataset_, tag_; name=nothing, output_types=nothing, output_shapes=nothing) - if tf.in_eager_mode() - experimental_latency_stats_dataset_eager(input_dataset_, tag_; name=name, output_types=output_types, output_shapes=output_shapes) - else - experimental_latency_stats_dataset_graph(input_dataset_, tag_; name=name, output_types=output_types, output_shapes=output_shapes) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_latency_stats_dataset(input_dataset_, tag_; name=nothing, output_types=nothing, output_shapes=nothing) + if tf.in_eager_mode() + experimental_latency_stats_dataset_eager(input_dataset_, tag_; name=name, output_types=output_types, output_shapes=output_shapes) + else + experimental_latency_stats_dataset_graph(input_dataset_, tag_; name=name, output_types=output_types, output_shapes=output_shapes) + end end - end end @@ -46196,26 +46196,26 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function add_sparse_to_tensors_map_graph(sparse_indices_, sparse_values_, sparse_shape_; name=nothing, container=nothing, shared_name=nothing) - local desc - tf.with_op_name(name, "AddSparseToTensorsMap") do - desc = tf.NodeDescription("AddSparseToTensorsMap") - sparse_indices_ = convert(Tensor{Int64}, sparse_indices_) - sparse_values_ = convert(Tensor{Any}, sparse_values_) - sparse_shape_ = convert(Tensor{Int64}, sparse_shape_) - (sparse_values_,) = tf.tf_promote(sparse_values_) - tf.add_input(desc, sparse_indices_) - tf.add_input(desc, sparse_values_) - tf.add_input(desc, sparse_shape_) - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end + function add_sparse_to_tensors_map_graph(sparse_indices_, sparse_values_, sparse_shape_; name=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "AddSparseToTensorsMap") do + desc = tf.NodeDescription("AddSparseToTensorsMap") + sparse_indices_ = convert(Tensor{Int64}, sparse_indices_) + sparse_values_ = convert(Tensor{Any}, sparse_values_) + sparse_shape_ = convert(Tensor{Int64}, sparse_shape_) + (sparse_values_,) = tf.tf_promote(sparse_values_) + tf.add_input(desc, sparse_indices_) + tf.add_input(desc, sparse_values_) + tf.add_input(desc, sparse_shape_) + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function add_sparse_to_tensors_map_eager(sparse_indices_, sparse_values_, sparse_shape_; name=nothing, container=nothing, shared_name=nothing) desc = tf.EagerOp("AddSparseToTensorsMap") sparse_indices_ = convert(tf.EagerTensor, sparse_indices_) @@ -46238,13 +46238,13 @@ begin return res[1] end end - function add_sparse_to_tensors_map(sparse_indices_, sparse_values_, sparse_shape_; name=nothing, container=nothing, shared_name=nothing) - if tf.in_eager_mode() - add_sparse_to_tensors_map_eager(sparse_indices_, sparse_values_, sparse_shape_; name=name, container=container, shared_name=shared_name) - else - add_sparse_to_tensors_map_graph(sparse_indices_, sparse_values_, sparse_shape_; name=name, container=container, shared_name=shared_name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function add_sparse_to_tensors_map(sparse_indices_, sparse_values_, sparse_shape_; name=nothing, container=nothing, shared_name=nothing) + if tf.in_eager_mode() + add_sparse_to_tensors_map_eager(sparse_indices_, sparse_values_, sparse_shape_; name=name, container=container, shared_name=shared_name) + else + add_sparse_to_tensors_map_graph(sparse_indices_, sparse_values_, sparse_shape_; name=name, container=container, shared_name=shared_name) + end end - end end @@ -46254,33 +46254,33 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function ragged_gather_graph(params_nested_splits_, params_dense_values_, indices_; name=nothing, PARAMS_RAGGED_RANK=nothing, OUTPUT_RAGGED_RANK=nothing) - local desc - tf.with_op_name(name, "RaggedGather") do - desc = tf.NodeDescription("RaggedGather") - params_nested_splits_ = [convert(Tensor{Int64}, x) for x = params_nested_splits_] - params_dense_values_ = convert(Tensor{Any}, params_dense_values_) - indices_ = convert(Tensor{Any}, indices_) - indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) - (indices_,) = tf.tf_promote(indices_) - (params_dense_values_,) = tf.tf_promote(params_dense_values_) - tf.add_input(desc, params_nested_splits_) - tf.add_input(desc, params_dense_values_) - tf.add_input(desc, indices_) - if PARAMS_RAGGED_RANK !== nothing - desc["PARAMS_RAGGED_RANK"] = Base.Int(PARAMS_RAGGED_RANK) - end - if OUTPUT_RAGGED_RANK !== nothing - desc["OUTPUT_RAGGED_RANK"] = Base.Int(OUTPUT_RAGGED_RANK) - end + function ragged_gather_graph(params_nested_splits_, params_dense_values_, indices_; name=nothing, PARAMS_RAGGED_RANK=nothing, OUTPUT_RAGGED_RANK=nothing) + local desc + tf.with_op_name(name, "RaggedGather") do + desc = tf.NodeDescription("RaggedGather") + params_nested_splits_ = [convert(Tensor{Int64}, x) for x = params_nested_splits_] + params_dense_values_ = convert(Tensor{Any}, params_dense_values_) + indices_ = convert(Tensor{Any}, indices_) + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + (indices_,) = tf.tf_promote(indices_) + (params_dense_values_,) = tf.tf_promote(params_dense_values_) + tf.add_input(desc, params_nested_splits_) + tf.add_input(desc, params_dense_values_) + tf.add_input(desc, indices_) + if PARAMS_RAGGED_RANK !== nothing + desc["PARAMS_RAGGED_RANK"] = Base.Int(PARAMS_RAGGED_RANK) end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:2 - push!(out, tf.Tensor(op, out_idx)) + if OUTPUT_RAGGED_RANK !== nothing + desc["OUTPUT_RAGGED_RANK"] = Base.Int(OUTPUT_RAGGED_RANK) end - out end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end function ragged_gather_eager(params_nested_splits_, params_dense_values_, indices_; name=nothing, PARAMS_RAGGED_RANK=nothing, OUTPUT_RAGGED_RANK=nothing) desc = tf.EagerOp("RaggedGather") params_nested_splits_ = convert(tf.EagerTensor, params_nested_splits_) @@ -46304,13 +46304,13 @@ begin return res end end - function ragged_gather(params_nested_splits_, params_dense_values_, indices_; name=nothing, PARAMS_RAGGED_RANK=nothing, OUTPUT_RAGGED_RANK=nothing) - if tf.in_eager_mode() - ragged_gather_eager(params_nested_splits_, params_dense_values_, indices_; name=name, PARAMS_RAGGED_RANK=PARAMS_RAGGED_RANK, OUTPUT_RAGGED_RANK=OUTPUT_RAGGED_RANK) - else - ragged_gather_graph(params_nested_splits_, params_dense_values_, indices_; name=name, PARAMS_RAGGED_RANK=PARAMS_RAGGED_RANK, OUTPUT_RAGGED_RANK=OUTPUT_RAGGED_RANK) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function ragged_gather(params_nested_splits_, params_dense_values_, indices_; name=nothing, PARAMS_RAGGED_RANK=nothing, OUTPUT_RAGGED_RANK=nothing) + if tf.in_eager_mode() + ragged_gather_eager(params_nested_splits_, params_dense_values_, indices_; name=name, PARAMS_RAGGED_RANK=PARAMS_RAGGED_RANK, OUTPUT_RAGGED_RANK=OUTPUT_RAGGED_RANK) + else + ragged_gather_graph(params_nested_splits_, params_dense_values_, indices_; name=name, PARAMS_RAGGED_RANK=PARAMS_RAGGED_RANK, OUTPUT_RAGGED_RANK=OUTPUT_RAGGED_RANK) + end end - end end @@ -46320,16 +46320,16 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function rgb_to_hsv_graph(images_; name=nothing) - local desc - tf.with_op_name(name, "RGBToHSV") do - desc = tf.NodeDescription("RGBToHSV") - images_ = convert(Tensor{Float32}, images_) - (images_,) = tf.tf_promote(images_) - tf.add_input(desc, images_) - end - tf.Tensor(tf.Operation(desc)) + function rgb_to_hsv_graph(images_; name=nothing) + local desc + tf.with_op_name(name, "RGBToHSV") do + desc = tf.NodeDescription("RGBToHSV") + images_ = convert(Tensor{Float32}, images_) + (images_,) = tf.tf_promote(images_) + tf.add_input(desc, images_) end + tf.Tensor(tf.Operation(desc)) + end function rgb_to_hsv_eager(images_; name=nothing) desc = tf.EagerOp("RGBToHSV") images_ = convert(tf.EagerTensor, images_) @@ -46342,13 +46342,13 @@ begin return res[1] end end - function rgb_to_hsv(images_; name=nothing) - if tf.in_eager_mode() - rgb_to_hsv_eager(images_; name=name) - else - rgb_to_hsv_graph(images_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function rgb_to_hsv(images_; name=nothing) + if tf.in_eager_mode() + rgb_to_hsv_eager(images_; name=name) + else + rgb_to_hsv_graph(images_; name=name) + end end - end end @@ -46358,15 +46358,15 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function multi_device_iterator_to_string_handle_graph(multi_device_iterator_; name=nothing) - local desc - tf.with_op_name(name, "MultiDeviceIteratorToStringHandle") do - desc = tf.NodeDescription("MultiDeviceIteratorToStringHandle") - multi_device_iterator_ = convert(Tensor{Any}, multi_device_iterator_) - tf.add_input(desc, multi_device_iterator_) - end - tf.Tensor(tf.Operation(desc)) + function multi_device_iterator_to_string_handle_graph(multi_device_iterator_; name=nothing) + local desc + tf.with_op_name(name, "MultiDeviceIteratorToStringHandle") do + desc = tf.NodeDescription("MultiDeviceIteratorToStringHandle") + multi_device_iterator_ = convert(Tensor{Any}, multi_device_iterator_) + tf.add_input(desc, multi_device_iterator_) end + tf.Tensor(tf.Operation(desc)) + end function multi_device_iterator_to_string_handle_eager(multi_device_iterator_; name=nothing) desc = tf.EagerOp("MultiDeviceIteratorToStringHandle") multi_device_iterator_ = convert(tf.EagerTensor, multi_device_iterator_) @@ -46378,13 +46378,13 @@ begin return res[1] end end - function multi_device_iterator_to_string_handle(multi_device_iterator_; name=nothing) - if tf.in_eager_mode() - multi_device_iterator_to_string_handle_eager(multi_device_iterator_; name=name) - else - multi_device_iterator_to_string_handle_graph(multi_device_iterator_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function multi_device_iterator_to_string_handle(multi_device_iterator_; name=nothing) + if tf.in_eager_mode() + multi_device_iterator_to_string_handle_eager(multi_device_iterator_; name=name) + else + multi_device_iterator_to_string_handle_graph(multi_device_iterator_; name=name) + end end - end end @@ -46394,27 +46394,27 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function for__graph(start_, limit_, delta_, input_; name=nothing, T=nothing, body=nothing) - local desc - tf.with_op_name(name, "For") do - desc = tf.NodeDescription("For") - start_ = convert(Tensor{Int32}, start_) - limit_ = convert(Tensor{Int32}, limit_) - delta_ = convert(Tensor{Int32}, delta_) - input_ = [convert(Tensor{Any}, x) for x = input_] - tf.add_input(desc, start_) - tf.add_input(desc, limit_) - tf.add_input(desc, delta_) - tf.add_input(desc, input_) - if T !== nothing - desc["T"] = map(Base.identity, T) - end - if body !== nothing - desc["body"] = Base.identity(body) - end + function for__graph(start_, limit_, delta_, input_; name=nothing, T=nothing, body=nothing) + local desc + tf.with_op_name(name, "For") do + desc = tf.NodeDescription("For") + start_ = convert(Tensor{Int32}, start_) + limit_ = convert(Tensor{Int32}, limit_) + delta_ = convert(Tensor{Int32}, delta_) + input_ = [convert(Tensor{Any}, x) for x = input_] + tf.add_input(desc, start_) + tf.add_input(desc, limit_) + tf.add_input(desc, delta_) + tf.add_input(desc, input_) + if T !== nothing + desc["T"] = map(Base.identity, T) + end + if body !== nothing + desc["body"] = Base.identity(body) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function for__eager(start_, limit_, delta_, input_; name=nothing, T=nothing, body=nothing) desc = tf.EagerOp("For") start_ = convert(tf.EagerTensor, start_) @@ -46438,13 +46438,13 @@ begin return res[1] end end - function for_(start_, limit_, delta_, input_; name=nothing, T=nothing, body=nothing) - if tf.in_eager_mode() - for__eager(start_, limit_, delta_, input_; name=name, T=T, body=body) - else - for__graph(start_, limit_, delta_, input_; name=name, T=T, body=body) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function for_(start_, limit_, delta_, input_; name=nothing, T=nothing, body=nothing) + if tf.in_eager_mode() + for__eager(start_, limit_, delta_, input_; name=name, T=T, body=body) + else + for__graph(start_, limit_, delta_, input_; name=name, T=T, body=body) + end end - end end @@ -46454,30 +46454,30 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function sparse_reduce_max_sparse_graph(input_indices_, input_values_, input_shape_, reduction_axes_; name=nothing, keep_dims=nothing) - local desc - tf.with_op_name(name, "SparseReduceMaxSparse") do - desc = tf.NodeDescription("SparseReduceMaxSparse") - input_indices_ = convert(Tensor{Int64}, input_indices_) - input_values_ = convert(Tensor{Any}, input_values_) - input_shape_ = convert(Tensor{Int64}, input_shape_) - reduction_axes_ = convert(Tensor{Int32}, reduction_axes_) - (input_values_,) = tf.tf_promote(input_values_) - tf.add_input(desc, input_indices_) - tf.add_input(desc, input_values_) - tf.add_input(desc, input_shape_) - tf.add_input(desc, reduction_axes_) - if keep_dims !== nothing - desc["keep_dims"] = Base.Bool(keep_dims) - end + function sparse_reduce_max_sparse_graph(input_indices_, input_values_, input_shape_, reduction_axes_; name=nothing, keep_dims=nothing) + local desc + tf.with_op_name(name, "SparseReduceMaxSparse") do + desc = tf.NodeDescription("SparseReduceMaxSparse") + input_indices_ = convert(Tensor{Int64}, input_indices_) + input_values_ = convert(Tensor{Any}, input_values_) + input_shape_ = convert(Tensor{Int64}, input_shape_) + reduction_axes_ = convert(Tensor{Int32}, reduction_axes_) + (input_values_,) = tf.tf_promote(input_values_) + tf.add_input(desc, input_indices_) + tf.add_input(desc, input_values_) + tf.add_input(desc, input_shape_) + tf.add_input(desc, reduction_axes_) + if keep_dims !== nothing + desc["keep_dims"] = Base.Bool(keep_dims) end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:3 - push!(out, tf.Tensor(op, out_idx)) - end - out end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end function sparse_reduce_max_sparse_eager(input_indices_, input_values_, input_shape_, reduction_axes_; name=nothing, keep_dims=nothing) desc = tf.EagerOp("SparseReduceMaxSparse") input_indices_ = convert(tf.EagerTensor, input_indices_) @@ -46499,13 +46499,13 @@ begin return res end end - function sparse_reduce_max_sparse(input_indices_, input_values_, input_shape_, reduction_axes_; name=nothing, keep_dims=nothing) - if tf.in_eager_mode() - sparse_reduce_max_sparse_eager(input_indices_, input_values_, input_shape_, reduction_axes_; name=name, keep_dims=keep_dims) - else - sparse_reduce_max_sparse_graph(input_indices_, input_values_, input_shape_, reduction_axes_; name=name, keep_dims=keep_dims) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_reduce_max_sparse(input_indices_, input_values_, input_shape_, reduction_axes_; name=nothing, keep_dims=nothing) + if tf.in_eager_mode() + sparse_reduce_max_sparse_eager(input_indices_, input_values_, input_shape_, reduction_axes_; name=name, keep_dims=keep_dims) + else + sparse_reduce_max_sparse_graph(input_indices_, input_values_, input_shape_, reduction_axes_; name=name, keep_dims=keep_dims) + end end - end end @@ -46515,25 +46515,25 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function concat_offset_graph(concat_dim_, shape_; name=nothing, N=nothing) - local desc - tf.with_op_name(name, "ConcatOffset") do - desc = tf.NodeDescription("ConcatOffset") - concat_dim_ = convert(Tensor{Int32}, concat_dim_) - shape_ = [convert(Tensor{Int32}, x) for x = shape_] - tf.add_input(desc, concat_dim_) - tf.add_input(desc, shape_) - if N !== nothing - desc["N"] = Base.Int(N) - end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:N - push!(out, tf.Tensor(op, out_idx)) + function concat_offset_graph(concat_dim_, shape_; name=nothing, N=nothing) + local desc + tf.with_op_name(name, "ConcatOffset") do + desc = tf.NodeDescription("ConcatOffset") + concat_dim_ = convert(Tensor{Int32}, concat_dim_) + shape_ = [convert(Tensor{Int32}, x) for x = shape_] + tf.add_input(desc, concat_dim_) + tf.add_input(desc, shape_) + if N !== nothing + desc["N"] = Base.Int(N) end - out end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:N + push!(out, tf.Tensor(op, out_idx)) + end + out + end function concat_offset_eager(concat_dim_, shape_; name=nothing, N=nothing) desc = tf.EagerOp("ConcatOffset") concat_dim_ = convert(tf.EagerTensor, concat_dim_) @@ -46550,13 +46550,13 @@ begin return res end end - function concat_offset(concat_dim_, shape_; name=nothing, N=nothing) - if tf.in_eager_mode() - concat_offset_eager(concat_dim_, shape_; name=name, N=N) - else - concat_offset_graph(concat_dim_, shape_; name=name, N=N) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function concat_offset(concat_dim_, shape_; name=nothing, N=nothing) + if tf.in_eager_mode() + concat_offset_eager(concat_dim_, shape_; name=name, N=N) + else + concat_offset_graph(concat_dim_, shape_; name=name, N=N) + end end - end end @@ -46566,30 +46566,30 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function stage_graph(values_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) - local desc - tf.with_op_name(name, "Stage") do - desc = tf.NodeDescription("Stage") - values_ = [convert(Tensor{Any}, x) for x = values_] - tf.add_input(desc, values_) - if capacity !== nothing - desc["capacity"] = Base.Int(capacity) - end - if memory_limit !== nothing - desc["memory_limit"] = Base.Int(memory_limit) - end - if dtypes !== nothing - desc["dtypes"] = map(Base.identity, dtypes) - end - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - end - tf.Tensor(tf.Operation(desc)) + function stage_graph(values_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "Stage") do + desc = tf.NodeDescription("Stage") + values_ = [convert(Tensor{Any}, x) for x = values_] + tf.add_input(desc, values_) + if capacity !== nothing + desc["capacity"] = Base.Int(capacity) + end + if memory_limit !== nothing + desc["memory_limit"] = Base.Int(memory_limit) + end + if dtypes !== nothing + desc["dtypes"] = map(Base.identity, dtypes) + end + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end end + tf.Tensor(tf.Operation(desc)) + end function stage_eager(values_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) desc = tf.EagerOp("Stage") values_ = convert(tf.EagerTensor, values_) @@ -46616,13 +46616,13 @@ begin return res[1] end end - function stage(values_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) - if tf.in_eager_mode() - stage_eager(values_; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) - else - stage_graph(values_; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function stage(values_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + if tf.in_eager_mode() + stage_eager(values_; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) + else + stage_graph(values_; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) + end end - end end @@ -46632,23 +46632,23 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function switch_graph(data_, pred_; name=nothing) - local desc - tf.with_op_name(name, "Switch") do - desc = tf.NodeDescription("Switch") - data_ = convert(Tensor{Any}, data_) - pred_ = convert(Tensor{Bool}, pred_) - (data_,) = tf.tf_promote(data_) - tf.add_input(desc, data_) - tf.add_input(desc, pred_) - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:2 - push!(out, tf.Tensor(op, out_idx)) - end - out + function switch_graph(data_, pred_; name=nothing) + local desc + tf.with_op_name(name, "Switch") do + desc = tf.NodeDescription("Switch") + data_ = convert(Tensor{Any}, data_) + pred_ = convert(Tensor{Bool}, pred_) + (data_,) = tf.tf_promote(data_) + tf.add_input(desc, data_) + tf.add_input(desc, pred_) end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end function switch_eager(data_, pred_; name=nothing) desc = tf.EagerOp("Switch") data_ = convert(tf.EagerTensor, data_) @@ -46663,13 +46663,13 @@ begin return res end end - function switch(data_, pred_; name=nothing) - if tf.in_eager_mode() - switch_eager(data_, pred_; name=name) - else - switch_graph(data_, pred_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function switch(data_, pred_; name=nothing) + if tf.in_eager_mode() + switch_eager(data_, pred_; name=name) + else + switch_graph(data_, pred_; name=name) + end end - end end @@ -46679,23 +46679,23 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function queue_dequeue_many_v2_graph(handle_, n_; name=nothing, component_types=nothing, timeout_ms=nothing) - local desc - tf.with_op_name(name, "QueueDequeueManyV2") do - desc = tf.NodeDescription("QueueDequeueManyV2") - handle_ = convert(Tensor{Any}, handle_) - n_ = convert(Tensor{Int32}, n_) - tf.add_input(desc, handle_) - tf.add_input(desc, n_) - if component_types !== nothing - desc["component_types"] = map(Base.identity, component_types) - end - if timeout_ms !== nothing - desc["timeout_ms"] = Base.Int(timeout_ms) - end + function queue_dequeue_many_v2_graph(handle_, n_; name=nothing, component_types=nothing, timeout_ms=nothing) + local desc + tf.with_op_name(name, "QueueDequeueManyV2") do + desc = tf.NodeDescription("QueueDequeueManyV2") + handle_ = convert(Tensor{Any}, handle_) + n_ = convert(Tensor{Int32}, n_) + tf.add_input(desc, handle_) + tf.add_input(desc, n_) + if component_types !== nothing + desc["component_types"] = map(Base.identity, component_types) + end + if timeout_ms !== nothing + desc["timeout_ms"] = Base.Int(timeout_ms) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function queue_dequeue_many_v2_eager(handle_, n_; name=nothing, component_types=nothing, timeout_ms=nothing) desc = tf.EagerOp("QueueDequeueManyV2") handle_ = convert(tf.EagerTensor, handle_) @@ -46715,13 +46715,13 @@ begin return res[1] end end - function queue_dequeue_many_v2(handle_, n_; name=nothing, component_types=nothing, timeout_ms=nothing) - if tf.in_eager_mode() - queue_dequeue_many_v2_eager(handle_, n_; name=name, component_types=component_types, timeout_ms=timeout_ms) - else - queue_dequeue_many_v2_graph(handle_, n_; name=name, component_types=component_types, timeout_ms=timeout_ms) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function queue_dequeue_many_v2(handle_, n_; name=nothing, component_types=nothing, timeout_ms=nothing) + if tf.in_eager_mode() + queue_dequeue_many_v2_eager(handle_, n_; name=name, component_types=component_types, timeout_ms=timeout_ms) + else + queue_dequeue_many_v2_graph(handle_, n_; name=name, component_types=component_types, timeout_ms=timeout_ms) + end end - end end @@ -46731,20 +46731,20 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function segment_prod_graph(data_, segment_ids_; name=nothing) - local desc - tf.with_op_name(name, "SegmentProd") do - desc = tf.NodeDescription("SegmentProd") - data_ = convert(Tensor{Any}, data_) - segment_ids_ = convert(Tensor{Any}, segment_ids_) - segment_ids_ = segment_ids_ - convert(tf.Tensor{eltype(segment_ids_)}, 1) - (data_,) = tf.tf_promote(data_) - (segment_ids_,) = tf.tf_promote(segment_ids_) - tf.add_input(desc, data_) - tf.add_input(desc, segment_ids_) - end - tf.Tensor(tf.Operation(desc)) + function segment_prod_graph(data_, segment_ids_; name=nothing) + local desc + tf.with_op_name(name, "SegmentProd") do + desc = tf.NodeDescription("SegmentProd") + data_ = convert(Tensor{Any}, data_) + segment_ids_ = convert(Tensor{Any}, segment_ids_) + segment_ids_ = segment_ids_ - convert(tf.Tensor{eltype(segment_ids_)}, 1) + (data_,) = tf.tf_promote(data_) + (segment_ids_,) = tf.tf_promote(segment_ids_) + tf.add_input(desc, data_) + tf.add_input(desc, segment_ids_) end + tf.Tensor(tf.Operation(desc)) + end function segment_prod_eager(data_, segment_ids_; name=nothing) desc = tf.EagerOp("SegmentProd") data_ = convert(tf.EagerTensor, data_) @@ -46760,13 +46760,13 @@ begin return res[1] end end - function segment_prod(data_, segment_ids_; name=nothing) - if tf.in_eager_mode() - segment_prod_eager(data_, segment_ids_; name=name) - else - segment_prod_graph(data_, segment_ids_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function segment_prod(data_, segment_ids_; name=nothing) + if tf.in_eager_mode() + segment_prod_eager(data_, segment_ids_; name=name) + else + segment_prod_graph(data_, segment_ids_; name=name) + end end - end end @@ -46776,21 +46776,21 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function approximate_equal_graph(x_, y_; name=nothing, tolerance=nothing) - local desc - tf.with_op_name(name, "ApproximateEqual") do - desc = tf.NodeDescription("ApproximateEqual") - x_ = convert(Tensor{Any}, x_) - y_ = convert(Tensor{Any}, y_) - (x_, y_) = tf.tf_promote(x_, y_) - tf.add_input(desc, x_) - tf.add_input(desc, y_) - if tolerance !== nothing - desc["tolerance"] = Base.identity(tolerance) - end + function approximate_equal_graph(x_, y_; name=nothing, tolerance=nothing) + local desc + tf.with_op_name(name, "ApproximateEqual") do + desc = tf.NodeDescription("ApproximateEqual") + x_ = convert(Tensor{Any}, x_) + y_ = convert(Tensor{Any}, y_) + (x_, y_) = tf.tf_promote(x_, y_) + tf.add_input(desc, x_) + tf.add_input(desc, y_) + if tolerance !== nothing + desc["tolerance"] = Base.identity(tolerance) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function approximate_equal_eager(x_, y_; name=nothing, tolerance=nothing) desc = tf.EagerOp("ApproximateEqual") x_ = convert(tf.EagerTensor, x_) @@ -46809,49 +46809,49 @@ begin return res[1] end end - function approximate_equal(x_, y_; name=nothing, tolerance=nothing) - if tf.in_eager_mode() - approximate_equal_eager(x_, y_; name=name, tolerance=tolerance) - else - approximate_equal_graph(x_, y_; name=name, tolerance=tolerance) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function approximate_equal(x_, y_; name=nothing, tolerance=nothing) + if tf.in_eager_mode() + approximate_equal_eager(x_, y_; name=name, tolerance=tolerance) + else + approximate_equal_graph(x_, y_; name=name, tolerance=tolerance) + end end - end end """ - conv2d(input, filter; use_cudnn_on_gpu=true, data_format=NHWC, dilations=[1, 1, 1, 1]) + conv2d(input, filter; use_cudnn_on_gpu=true, data_format=, dilations=[1, 1, 1, 1]) """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function conv2d_graph(input_, filter_; name=nothing, strides=nothing, use_cudnn_on_gpu=nothing, padding=nothing, data_format=nothing, dilations=nothing) - local desc - tf.with_op_name(name, "Conv2D") do - desc = tf.NodeDescription("Conv2D") - input_ = convert(Tensor{Any}, input_) - filter_ = convert(Tensor{Any}, filter_) - (input_, filter_) = tf.tf_promote(input_, filter_) - tf.add_input(desc, input_) - tf.add_input(desc, filter_) - if strides !== nothing - desc["strides"] = map(Base.identity, strides) - end - if use_cudnn_on_gpu !== nothing - desc["use_cudnn_on_gpu"] = Base.Bool(use_cudnn_on_gpu) - end - if padding !== nothing - desc["padding"] = Base.String(padding) - end - if data_format !== nothing - desc["data_format"] = Base.String(data_format) - end - if dilations !== nothing - desc["dilations"] = map(Base.identity, dilations) - end + function conv2d_graph(input_, filter_; name=nothing, strides=nothing, use_cudnn_on_gpu=nothing, padding=nothing, data_format=nothing, dilations=nothing) + local desc + tf.with_op_name(name, "Conv2D") do + desc = tf.NodeDescription("Conv2D") + input_ = convert(Tensor{Any}, input_) + filter_ = convert(Tensor{Any}, filter_) + (input_, filter_) = tf.tf_promote(input_, filter_) + tf.add_input(desc, input_) + tf.add_input(desc, filter_) + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + if use_cudnn_on_gpu !== nothing + desc["use_cudnn_on_gpu"] = Base.Bool(use_cudnn_on_gpu) + end + if padding !== nothing + desc["padding"] = Base.String(padding) + end + if data_format !== nothing + desc["data_format"] = Base.String(data_format) + end + if dilations !== nothing + desc["dilations"] = map(Base.identity, dilations) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function conv2d_eager(input_, filter_; name=nothing, strides=nothing, use_cudnn_on_gpu=nothing, padding=nothing, data_format=nothing, dilations=nothing) desc = tf.EagerOp("Conv2D") input_ = convert(tf.EagerTensor, input_) @@ -46882,13 +46882,13 @@ begin return res[1] end end - function conv2d(input_, filter_; name=nothing, strides=nothing, use_cudnn_on_gpu=nothing, padding=nothing, data_format=nothing, dilations=nothing) - if tf.in_eager_mode() - conv2d_eager(input_, filter_; name=name, strides=strides, use_cudnn_on_gpu=use_cudnn_on_gpu, padding=padding, data_format=data_format, dilations=dilations) - else - conv2d_graph(input_, filter_; name=name, strides=strides, use_cudnn_on_gpu=use_cudnn_on_gpu, padding=padding, data_format=data_format, dilations=dilations) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function conv2d(input_, filter_; name=nothing, strides=nothing, use_cudnn_on_gpu=nothing, padding=nothing, data_format=nothing, dilations=nothing) + if tf.in_eager_mode() + conv2d_eager(input_, filter_; name=name, strides=strides, use_cudnn_on_gpu=use_cudnn_on_gpu, padding=padding, data_format=data_format, dilations=dilations) + else + conv2d_graph(input_, filter_; name=name, strides=strides, use_cudnn_on_gpu=use_cudnn_on_gpu, padding=padding, data_format=data_format, dilations=dilations) + end end - end end @@ -46898,18 +46898,18 @@ end An Op to sum inputs across replicated TPU instances. Each instance supplies its """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function cross_replica_sum_graph(input_, group_assignment_; name=nothing) - local desc - tf.with_op_name(name, "CrossReplicaSum") do - desc = tf.NodeDescription("CrossReplicaSum") - input_ = convert(Tensor{Any}, input_) - group_assignment_ = convert(Tensor{Int32}, group_assignment_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - tf.add_input(desc, group_assignment_) - end - tf.Tensor(tf.Operation(desc)) + function cross_replica_sum_graph(input_, group_assignment_; name=nothing) + local desc + tf.with_op_name(name, "CrossReplicaSum") do + desc = tf.NodeDescription("CrossReplicaSum") + input_ = convert(Tensor{Any}, input_) + group_assignment_ = convert(Tensor{Int32}, group_assignment_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + tf.add_input(desc, group_assignment_) end + tf.Tensor(tf.Operation(desc)) + end function cross_replica_sum_eager(input_, group_assignment_; name=nothing) desc = tf.EagerOp("CrossReplicaSum") input_ = convert(tf.EagerTensor, input_) @@ -46924,13 +46924,13 @@ begin return res[1] end end - function cross_replica_sum(input_, group_assignment_; name=nothing) - if tf.in_eager_mode() - cross_replica_sum_eager(input_, group_assignment_; name=name) - else - cross_replica_sum_graph(input_, group_assignment_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function cross_replica_sum(input_, group_assignment_; name=nothing) + if tf.in_eager_mode() + cross_replica_sum_eager(input_, group_assignment_; name=name) + else + cross_replica_sum_graph(input_, group_assignment_; name=name) + end end - end end @@ -46940,31 +46940,31 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function sparse_mat_mul_graph(a_, b_; name=nothing, transpose_a=nothing, transpose_b=nothing, a_is_sparse=nothing, b_is_sparse=nothing) - local desc - tf.with_op_name(name, "SparseMatMul") do - desc = tf.NodeDescription("SparseMatMul") - a_ = convert(Tensor{Float32}, a_) - b_ = convert(Tensor{Float32}, b_) - (b_,) = tf.tf_promote(b_) - (a_,) = tf.tf_promote(a_) - tf.add_input(desc, a_) - tf.add_input(desc, b_) - if transpose_a !== nothing - desc["transpose_a"] = Base.Bool(transpose_a) - end - if transpose_b !== nothing - desc["transpose_b"] = Base.Bool(transpose_b) - end - if a_is_sparse !== nothing - desc["a_is_sparse"] = Base.Bool(a_is_sparse) - end - if b_is_sparse !== nothing - desc["b_is_sparse"] = Base.Bool(b_is_sparse) - end - end - tf.Tensor(tf.Operation(desc)) + function sparse_mat_mul_graph(a_, b_; name=nothing, transpose_a=nothing, transpose_b=nothing, a_is_sparse=nothing, b_is_sparse=nothing) + local desc + tf.with_op_name(name, "SparseMatMul") do + desc = tf.NodeDescription("SparseMatMul") + a_ = convert(Tensor{Float32}, a_) + b_ = convert(Tensor{Float32}, b_) + (b_,) = tf.tf_promote(b_) + (a_,) = tf.tf_promote(a_) + tf.add_input(desc, a_) + tf.add_input(desc, b_) + if transpose_a !== nothing + desc["transpose_a"] = Base.Bool(transpose_a) + end + if transpose_b !== nothing + desc["transpose_b"] = Base.Bool(transpose_b) + end + if a_is_sparse !== nothing + desc["a_is_sparse"] = Base.Bool(a_is_sparse) + end + if b_is_sparse !== nothing + desc["b_is_sparse"] = Base.Bool(b_is_sparse) + end end + tf.Tensor(tf.Operation(desc)) + end function sparse_mat_mul_eager(a_, b_; name=nothing, transpose_a=nothing, transpose_b=nothing, a_is_sparse=nothing, b_is_sparse=nothing) desc = tf.EagerOp("SparseMatMul") a_ = convert(tf.EagerTensor, a_) @@ -46992,13 +46992,13 @@ begin return res[1] end end - function sparse_mat_mul(a_, b_; name=nothing, transpose_a=nothing, transpose_b=nothing, a_is_sparse=nothing, b_is_sparse=nothing) - if tf.in_eager_mode() - sparse_mat_mul_eager(a_, b_; name=name, transpose_a=transpose_a, transpose_b=transpose_b, a_is_sparse=a_is_sparse, b_is_sparse=b_is_sparse) - else - sparse_mat_mul_graph(a_, b_; name=name, transpose_a=transpose_a, transpose_b=transpose_b, a_is_sparse=a_is_sparse, b_is_sparse=b_is_sparse) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_mat_mul(a_, b_; name=nothing, transpose_a=nothing, transpose_b=nothing, a_is_sparse=nothing, b_is_sparse=nothing) + if tf.in_eager_mode() + sparse_mat_mul_eager(a_, b_; name=name, transpose_a=transpose_a, transpose_b=transpose_b, a_is_sparse=a_is_sparse, b_is_sparse=b_is_sparse) + else + sparse_mat_mul_graph(a_, b_; name=name, transpose_a=transpose_a, transpose_b=transpose_b, a_is_sparse=a_is_sparse, b_is_sparse=b_is_sparse) + end end - end end @@ -47008,35 +47008,35 @@ end Acts roughly like a SplitV Op that splits one tensor into multiple tensors """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function _scoped_allocator_split_graph(concat_, split_; name=nothing, sa_name=nothing, id=nothing, N=nothing, shapes=nothing) - local desc - tf.with_op_name(name, "_ScopedAllocatorSplit") do - desc = tf.NodeDescription("_ScopedAllocatorSplit") - concat_ = convert(Tensor{Any}, concat_) - split_ = [convert(Tensor{Any}, x) for x = split_] - (concat_, split_) = tf.tf_promote(concat_, split_) - tf.add_input(desc, concat_) - tf.add_input(desc, split_) - if sa_name !== nothing - desc["sa_name"] = Base.String(sa_name) - end - if id !== nothing - desc["id"] = Base.Int(id) - end - if N !== nothing - desc["N"] = Base.Int(N) - end - if shapes !== nothing - desc["shapes"] = map(Base.identity, shapes) - end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:N - push!(out, tf.Tensor(op, out_idx)) - end - out + function _scoped_allocator_split_graph(concat_, split_; name=nothing, sa_name=nothing, id=nothing, N=nothing, shapes=nothing) + local desc + tf.with_op_name(name, "_ScopedAllocatorSplit") do + desc = tf.NodeDescription("_ScopedAllocatorSplit") + concat_ = convert(Tensor{Any}, concat_) + split_ = [convert(Tensor{Any}, x) for x = split_] + (concat_, split_) = tf.tf_promote(concat_, split_) + tf.add_input(desc, concat_) + tf.add_input(desc, split_) + if sa_name !== nothing + desc["sa_name"] = Base.String(sa_name) + end + if id !== nothing + desc["id"] = Base.Int(id) + end + if N !== nothing + desc["N"] = Base.Int(N) + end + if shapes !== nothing + desc["shapes"] = map(Base.identity, shapes) + end end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:N + push!(out, tf.Tensor(op, out_idx)) + end + out + end function _scoped_allocator_split_eager(concat_, split_; name=nothing, sa_name=nothing, id=nothing, N=nothing, shapes=nothing) desc = tf.EagerOp("_ScopedAllocatorSplit") concat_ = convert(tf.EagerTensor, concat_) @@ -47064,13 +47064,13 @@ begin return res end end - function _scoped_allocator_split(concat_, split_; name=nothing, sa_name=nothing, id=nothing, N=nothing, shapes=nothing) - if tf.in_eager_mode() - _scoped_allocator_split_eager(concat_, split_; name=name, sa_name=sa_name, id=id, N=N, shapes=shapes) - else - _scoped_allocator_split_graph(concat_, split_; name=name, sa_name=sa_name, id=id, N=N, shapes=shapes) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _scoped_allocator_split(concat_, split_; name=nothing, sa_name=nothing, id=nothing, N=nothing, shapes=nothing) + if tf.in_eager_mode() + _scoped_allocator_split_eager(concat_, split_; name=name, sa_name=sa_name, id=id, N=N, shapes=shapes) + else + _scoped_allocator_split_graph(concat_, split_; name=name, sa_name=sa_name, id=id, N=N, shapes=shapes) + end end - end end @@ -47080,18 +47080,18 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function igammac_graph(a_, x_; name=nothing) - local desc - tf.with_op_name(name, "Igammac") do - desc = tf.NodeDescription("Igammac") - a_ = convert(Tensor{Any}, a_) - x_ = convert(Tensor{Any}, x_) - (a_, x_) = tf.tf_promote(a_, x_) - tf.add_input(desc, a_) - tf.add_input(desc, x_) - end - tf.Tensor(tf.Operation(desc)) + function igammac_graph(a_, x_; name=nothing) + local desc + tf.with_op_name(name, "Igammac") do + desc = tf.NodeDescription("Igammac") + a_ = convert(Tensor{Any}, a_) + x_ = convert(Tensor{Any}, x_) + (a_, x_) = tf.tf_promote(a_, x_) + tf.add_input(desc, a_) + tf.add_input(desc, x_) end + tf.Tensor(tf.Operation(desc)) + end function igammac_eager(a_, x_; name=nothing) desc = tf.EagerOp("Igammac") a_ = convert(tf.EagerTensor, a_) @@ -47107,13 +47107,13 @@ begin return res[1] end end - function igammac(a_, x_; name=nothing) - if tf.in_eager_mode() - igammac_eager(a_, x_; name=name) - else - igammac_graph(a_, x_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function igammac(a_, x_; name=nothing) + if tf.in_eager_mode() + igammac_eager(a_, x_; name=name) + else + igammac_graph(a_, x_; name=name) + end end - end end @@ -47123,24 +47123,24 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function batch_mat_mul_graph(x_, y_; name=nothing, adj_x=nothing, adj_y=nothing) - local desc - tf.with_op_name(name, "BatchMatMul") do - desc = tf.NodeDescription("BatchMatMul") - x_ = convert(Tensor{Any}, x_) - y_ = convert(Tensor{Any}, y_) - (x_, y_) = tf.tf_promote(x_, y_) - tf.add_input(desc, x_) - tf.add_input(desc, y_) - if adj_x !== nothing - desc["adj_x"] = Base.Bool(adj_x) - end - if adj_y !== nothing - desc["adj_y"] = Base.Bool(adj_y) - end + function batch_mat_mul_graph(x_, y_; name=nothing, adj_x=nothing, adj_y=nothing) + local desc + tf.with_op_name(name, "BatchMatMul") do + desc = tf.NodeDescription("BatchMatMul") + x_ = convert(Tensor{Any}, x_) + y_ = convert(Tensor{Any}, y_) + (x_, y_) = tf.tf_promote(x_, y_) + tf.add_input(desc, x_) + tf.add_input(desc, y_) + if adj_x !== nothing + desc["adj_x"] = Base.Bool(adj_x) + end + if adj_y !== nothing + desc["adj_y"] = Base.Bool(adj_y) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function batch_mat_mul_eager(x_, y_; name=nothing, adj_x=nothing, adj_y=nothing) desc = tf.EagerOp("BatchMatMul") x_ = convert(tf.EagerTensor, x_) @@ -47162,13 +47162,13 @@ begin return res[1] end end - function batch_mat_mul(x_, y_; name=nothing, adj_x=nothing, adj_y=nothing) - if tf.in_eager_mode() - batch_mat_mul_eager(x_, y_; name=name, adj_x=adj_x, adj_y=adj_y) - else - batch_mat_mul_graph(x_, y_; name=name, adj_x=adj_x, adj_y=adj_y) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function batch_mat_mul(x_, y_; name=nothing, adj_x=nothing, adj_y=nothing) + if tf.in_eager_mode() + batch_mat_mul_eager(x_, y_; name=name, adj_x=adj_x, adj_y=adj_y) + else + batch_mat_mul_graph(x_, y_; name=name, adj_x=adj_x, adj_y=adj_y) + end end - end end @@ -47178,23 +47178,23 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tensor_array_pack_graph(handle_, flow_in_; name=nothing, dtype=nothing, element_shape=nothing) - local desc - tf.with_op_name(name, "TensorArrayPack") do - desc = tf.NodeDescription("TensorArrayPack") - handle_ = convert(Tensor{String}, handle_) - flow_in_ = convert(Tensor{Float32}, flow_in_) - tf.add_input(desc, handle_) - tf.add_input(desc, flow_in_) - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end - if element_shape !== nothing - desc["element_shape"] = Base.identity(element_shape) - end + function tensor_array_pack_graph(handle_, flow_in_; name=nothing, dtype=nothing, element_shape=nothing) + local desc + tf.with_op_name(name, "TensorArrayPack") do + desc = tf.NodeDescription("TensorArrayPack") + handle_ = convert(Tensor{String}, handle_) + flow_in_ = convert(Tensor{Float32}, flow_in_) + tf.add_input(desc, handle_) + tf.add_input(desc, flow_in_) + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + if element_shape !== nothing + desc["element_shape"] = Base.identity(element_shape) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function tensor_array_pack_eager(handle_, flow_in_; name=nothing, dtype=nothing, element_shape=nothing) desc = tf.EagerOp("TensorArrayPack") handle_ = convert(tf.EagerTensor, handle_) @@ -47214,13 +47214,13 @@ begin return res[1] end end - function tensor_array_pack(handle_, flow_in_; name=nothing, dtype=nothing, element_shape=nothing) - if tf.in_eager_mode() - tensor_array_pack_eager(handle_, flow_in_; name=name, dtype=dtype, element_shape=element_shape) - else - tensor_array_pack_graph(handle_, flow_in_; name=name, dtype=dtype, element_shape=element_shape) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_array_pack(handle_, flow_in_; name=nothing, dtype=nothing, element_shape=nothing) + if tf.in_eager_mode() + tensor_array_pack_eager(handle_, flow_in_; name=name, dtype=dtype, element_shape=element_shape) + else + tensor_array_pack_graph(handle_, flow_in_; name=name, dtype=dtype, element_shape=element_shape) + end end - end end @@ -47230,18 +47230,18 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function queue_close_v2_graph(handle_; name=nothing, cancel_pending_enqueues=nothing) - local desc - tf.with_op_name(name, "QueueCloseV2") do - desc = tf.NodeDescription("QueueCloseV2") - handle_ = convert(Tensor{Any}, handle_) - tf.add_input(desc, handle_) - if cancel_pending_enqueues !== nothing - desc["cancel_pending_enqueues"] = Base.Bool(cancel_pending_enqueues) - end + function queue_close_v2_graph(handle_; name=nothing, cancel_pending_enqueues=nothing) + local desc + tf.with_op_name(name, "QueueCloseV2") do + desc = tf.NodeDescription("QueueCloseV2") + handle_ = convert(Tensor{Any}, handle_) + tf.add_input(desc, handle_) + if cancel_pending_enqueues !== nothing + desc["cancel_pending_enqueues"] = Base.Bool(cancel_pending_enqueues) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function queue_close_v2_eager(handle_; name=nothing, cancel_pending_enqueues=nothing) desc = tf.EagerOp("QueueCloseV2") handle_ = convert(tf.EagerTensor, handle_) @@ -47256,13 +47256,13 @@ begin return res[1] end end - function queue_close_v2(handle_; name=nothing, cancel_pending_enqueues=nothing) - if tf.in_eager_mode() - queue_close_v2_eager(handle_; name=name, cancel_pending_enqueues=cancel_pending_enqueues) - else - queue_close_v2_graph(handle_; name=name, cancel_pending_enqueues=cancel_pending_enqueues) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function queue_close_v2(handle_; name=nothing, cancel_pending_enqueues=nothing) + if tf.in_eager_mode() + queue_close_v2_eager(handle_; name=name, cancel_pending_enqueues=cancel_pending_enqueues) + else + queue_close_v2_graph(handle_; name=name, cancel_pending_enqueues=cancel_pending_enqueues) + end end - end end @@ -47272,30 +47272,30 @@ end An op that enqueues TPUEmbedding input indices from a SparseTensor. """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function enqueue_tpu_embedding_sparse_batch_graph(sample_indices_, embedding_indices_, aggregation_weights_, mode_override_; name=nothing, N=nothing, device_ordinal=nothing, combiners=nothing) - local desc - tf.with_op_name(name, "EnqueueTPUEmbeddingSparseBatch") do - desc = tf.NodeDescription("EnqueueTPUEmbeddingSparseBatch") - sample_indices_ = [convert(Tensor{Int32}, x) for x = sample_indices_] - embedding_indices_ = [convert(Tensor{Int32}, x) for x = embedding_indices_] - aggregation_weights_ = [convert(Tensor{Float32}, x) for x = aggregation_weights_] - mode_override_ = convert(Tensor{String}, mode_override_) - tf.add_input(desc, sample_indices_) - tf.add_input(desc, embedding_indices_) - tf.add_input(desc, aggregation_weights_) - tf.add_input(desc, mode_override_) - if N !== nothing - desc["N"] = Base.Int(N) - end - if device_ordinal !== nothing - desc["device_ordinal"] = Base.Int(device_ordinal) - end - if combiners !== nothing - desc["combiners"] = map(Base.identity, combiners) - end - end - tf.Tensor(tf.Operation(desc)) + function enqueue_tpu_embedding_sparse_batch_graph(sample_indices_, embedding_indices_, aggregation_weights_, mode_override_; name=nothing, N=nothing, device_ordinal=nothing, combiners=nothing) + local desc + tf.with_op_name(name, "EnqueueTPUEmbeddingSparseBatch") do + desc = tf.NodeDescription("EnqueueTPUEmbeddingSparseBatch") + sample_indices_ = [convert(Tensor{Int32}, x) for x = sample_indices_] + embedding_indices_ = [convert(Tensor{Int32}, x) for x = embedding_indices_] + aggregation_weights_ = [convert(Tensor{Float32}, x) for x = aggregation_weights_] + mode_override_ = convert(Tensor{String}, mode_override_) + tf.add_input(desc, sample_indices_) + tf.add_input(desc, embedding_indices_) + tf.add_input(desc, aggregation_weights_) + tf.add_input(desc, mode_override_) + if N !== nothing + desc["N"] = Base.Int(N) + end + if device_ordinal !== nothing + desc["device_ordinal"] = Base.Int(device_ordinal) + end + if combiners !== nothing + desc["combiners"] = map(Base.identity, combiners) + end end + tf.Tensor(tf.Operation(desc)) + end function enqueue_tpu_embedding_sparse_batch_eager(sample_indices_, embedding_indices_, aggregation_weights_, mode_override_; name=nothing, N=nothing, device_ordinal=nothing, combiners=nothing) desc = tf.EagerOp("EnqueueTPUEmbeddingSparseBatch") sample_indices_ = convert(tf.EagerTensor, sample_indices_) @@ -47322,13 +47322,13 @@ begin return res[1] end end - function enqueue_tpu_embedding_sparse_batch(sample_indices_, embedding_indices_, aggregation_weights_, mode_override_; name=nothing, N=nothing, device_ordinal=nothing, combiners=nothing) - if tf.in_eager_mode() - enqueue_tpu_embedding_sparse_batch_eager(sample_indices_, embedding_indices_, aggregation_weights_, mode_override_; name=name, N=N, device_ordinal=device_ordinal, combiners=combiners) - else - enqueue_tpu_embedding_sparse_batch_graph(sample_indices_, embedding_indices_, aggregation_weights_, mode_override_; name=name, N=N, device_ordinal=device_ordinal, combiners=combiners) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function enqueue_tpu_embedding_sparse_batch(sample_indices_, embedding_indices_, aggregation_weights_, mode_override_; name=nothing, N=nothing, device_ordinal=nothing, combiners=nothing) + if tf.in_eager_mode() + enqueue_tpu_embedding_sparse_batch_eager(sample_indices_, embedding_indices_, aggregation_weights_, mode_override_; name=name, N=N, device_ordinal=device_ordinal, combiners=combiners) + else + enqueue_tpu_embedding_sparse_batch_graph(sample_indices_, embedding_indices_, aggregation_weights_, mode_override_; name=name, N=N, device_ordinal=device_ordinal, combiners=combiners) + end end - end end @@ -47338,17 +47338,17 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function reader_restore_state_graph(reader_handle_, state_; name=nothing) - local desc - tf.with_op_name(name, "ReaderRestoreState") do - desc = tf.NodeDescription("ReaderRestoreState") - reader_handle_ = convert(Tensor{String}, reader_handle_) - state_ = convert(Tensor{String}, state_) - tf.add_input(desc, reader_handle_) - tf.add_input(desc, state_) - end - tf.Tensor(tf.Operation(desc)) + function reader_restore_state_graph(reader_handle_, state_; name=nothing) + local desc + tf.with_op_name(name, "ReaderRestoreState") do + desc = tf.NodeDescription("ReaderRestoreState") + reader_handle_ = convert(Tensor{String}, reader_handle_) + state_ = convert(Tensor{String}, state_) + tf.add_input(desc, reader_handle_) + tf.add_input(desc, state_) end + tf.Tensor(tf.Operation(desc)) + end function reader_restore_state_eager(reader_handle_, state_; name=nothing) desc = tf.EagerOp("ReaderRestoreState") reader_handle_ = convert(tf.EagerTensor, reader_handle_) @@ -47362,57 +47362,57 @@ begin return res[1] end end - function reader_restore_state(reader_handle_, state_; name=nothing) - if tf.in_eager_mode() - reader_restore_state_eager(reader_handle_, state_; name=name) - else - reader_restore_state_graph(reader_handle_, state_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function reader_restore_state(reader_handle_, state_; name=nothing) + if tf.in_eager_mode() + reader_restore_state_eager(reader_handle_, state_; name=name) + else + reader_restore_state_graph(reader_handle_, state_; name=name) + end end - end end """ - _fused_conv2d(input, filter, args; data_format=NHWC, dilations=[1, 1, 1, 1], fused_ops=Int64[], epsilon=?) + _fused_conv2d(input, filter, args; data_format=, dilations=[1, 1, 1, 1], fused_ops=Int64[], epsilon=?) *NOTE*: Do not invoke this operator directly in Python. Grappler is """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function _fused_conv2d_graph(input_, filter_, args_; name=nothing, num_args=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing, fused_ops=nothing, epsilon=nothing) - local desc - tf.with_op_name(name, "_FusedConv2D") do - desc = tf.NodeDescription("_FusedConv2D") - input_ = convert(Tensor{Any}, input_) - filter_ = convert(Tensor{Any}, filter_) - args_ = [convert(Tensor{Any}, x) for x = args_] - (input_, filter_, args_) = tf.tf_promote(input_, filter_, args_) - tf.add_input(desc, input_) - tf.add_input(desc, filter_) - tf.add_input(desc, args_) - if num_args !== nothing - desc["num_args"] = Base.Int(num_args) - end - if strides !== nothing - desc["strides"] = map(Base.identity, strides) - end - if padding !== nothing - desc["padding"] = Base.String(padding) - end - if data_format !== nothing - desc["data_format"] = Base.String(data_format) - end - if dilations !== nothing - desc["dilations"] = map(Base.identity, dilations) - end - if fused_ops !== nothing - desc["fused_ops"] = map(Base.identity, fused_ops) - end - if epsilon !== nothing - desc["epsilon"] = Base.identity(epsilon) - end - end - tf.Tensor(tf.Operation(desc)) + function _fused_conv2d_graph(input_, filter_, args_; name=nothing, num_args=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing, fused_ops=nothing, epsilon=nothing) + local desc + tf.with_op_name(name, "_FusedConv2D") do + desc = tf.NodeDescription("_FusedConv2D") + input_ = convert(Tensor{Any}, input_) + filter_ = convert(Tensor{Any}, filter_) + args_ = [convert(Tensor{Any}, x) for x = args_] + (input_, filter_, args_) = tf.tf_promote(input_, filter_, args_) + tf.add_input(desc, input_) + tf.add_input(desc, filter_) + tf.add_input(desc, args_) + if num_args !== nothing + desc["num_args"] = Base.Int(num_args) + end + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + if padding !== nothing + desc["padding"] = Base.String(padding) + end + if data_format !== nothing + desc["data_format"] = Base.String(data_format) + end + if dilations !== nothing + desc["dilations"] = map(Base.identity, dilations) + end + if fused_ops !== nothing + desc["fused_ops"] = map(Base.identity, fused_ops) + end + if epsilon !== nothing + desc["epsilon"] = Base.identity(epsilon) + end end + tf.Tensor(tf.Operation(desc)) + end function _fused_conv2d_eager(input_, filter_, args_; name=nothing, num_args=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing, fused_ops=nothing, epsilon=nothing) desc = tf.EagerOp("_FusedConv2D") input_ = convert(tf.EagerTensor, input_) @@ -47452,13 +47452,13 @@ begin return res[1] end end - function _fused_conv2d(input_, filter_, args_; name=nothing, num_args=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing, fused_ops=nothing, epsilon=nothing) - if tf.in_eager_mode() - _fused_conv2d_eager(input_, filter_, args_; name=name, num_args=num_args, strides=strides, padding=padding, data_format=data_format, dilations=dilations, fused_ops=fused_ops, epsilon=epsilon) - else - _fused_conv2d_graph(input_, filter_, args_; name=name, num_args=num_args, strides=strides, padding=padding, data_format=data_format, dilations=dilations, fused_ops=fused_ops, epsilon=epsilon) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _fused_conv2d(input_, filter_, args_; name=nothing, num_args=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing, fused_ops=nothing, epsilon=nothing) + if tf.in_eager_mode() + _fused_conv2d_eager(input_, filter_, args_; name=name, num_args=num_args, strides=strides, padding=padding, data_format=data_format, dilations=dilations, fused_ops=fused_ops, epsilon=epsilon) + else + _fused_conv2d_graph(input_, filter_, args_; name=name, num_args=num_args, strides=strides, padding=padding, data_format=data_format, dilations=dilations, fused_ops=fused_ops, epsilon=epsilon) + end end - end end @@ -47468,21 +47468,21 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function _read_variables_op_graph(resources_; name=nothing, N=nothing, dtypes=nothing) - local desc - tf.with_op_name(name, "_ReadVariablesOp") do - desc = tf.NodeDescription("_ReadVariablesOp") - resources_ = [convert(Tensor{Any}, x) for x = resources_] - tf.add_input(desc, resources_) - if N !== nothing - desc["N"] = Base.Int(N) - end - if dtypes !== nothing - desc["dtypes"] = map(Base.identity, dtypes) - end + function _read_variables_op_graph(resources_; name=nothing, N=nothing, dtypes=nothing) + local desc + tf.with_op_name(name, "_ReadVariablesOp") do + desc = tf.NodeDescription("_ReadVariablesOp") + resources_ = [convert(Tensor{Any}, x) for x = resources_] + tf.add_input(desc, resources_) + if N !== nothing + desc["N"] = Base.Int(N) + end + if dtypes !== nothing + desc["dtypes"] = map(Base.identity, dtypes) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function _read_variables_op_eager(resources_; name=nothing, N=nothing, dtypes=nothing) desc = tf.EagerOp("_ReadVariablesOp") resources_ = convert(tf.EagerTensor, resources_) @@ -47500,13 +47500,13 @@ begin return res[1] end end - function _read_variables_op(resources_; name=nothing, N=nothing, dtypes=nothing) - if tf.in_eager_mode() - _read_variables_op_eager(resources_; name=name, N=N, dtypes=dtypes) - else - _read_variables_op_graph(resources_; name=name, N=N, dtypes=dtypes) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _read_variables_op(resources_; name=nothing, N=nothing, dtypes=nothing) + if tf.in_eager_mode() + _read_variables_op_eager(resources_; name=name, N=N, dtypes=dtypes) + else + _read_variables_op_graph(resources_; name=name, N=N, dtypes=dtypes) + end end - end end @@ -47516,31 +47516,31 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function mutable_hash_table_of_tensors_graph(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing) - local desc - tf.with_op_name(name, "MutableHashTableOfTensors") do - desc = tf.NodeDescription("MutableHashTableOfTensors") - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - if use_node_name_sharing !== nothing - desc["use_node_name_sharing"] = Base.Bool(use_node_name_sharing) - end - if key_dtype !== nothing - desc["key_dtype"] = Base.identity(key_dtype) - end - if value_dtype !== nothing - desc["value_dtype"] = Base.identity(value_dtype) - end - if value_shape !== nothing - desc["value_shape"] = Base.identity(value_shape) - end - end - tf.Tensor(tf.Operation(desc)) + function mutable_hash_table_of_tensors_graph(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing) + local desc + tf.with_op_name(name, "MutableHashTableOfTensors") do + desc = tf.NodeDescription("MutableHashTableOfTensors") + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + if use_node_name_sharing !== nothing + desc["use_node_name_sharing"] = Base.Bool(use_node_name_sharing) + end + if key_dtype !== nothing + desc["key_dtype"] = Base.identity(key_dtype) + end + if value_dtype !== nothing + desc["value_dtype"] = Base.identity(value_dtype) + end + if value_shape !== nothing + desc["value_shape"] = Base.identity(value_shape) + end end + tf.Tensor(tf.Operation(desc)) + end function mutable_hash_table_of_tensors_eager(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing) desc = tf.EagerOp("MutableHashTableOfTensors") if container !== nothing @@ -47568,13 +47568,13 @@ begin return res[1] end end - function mutable_hash_table_of_tensors(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing) - if tf.in_eager_mode() - mutable_hash_table_of_tensors_eager(; name=name, container=container, shared_name=shared_name, use_node_name_sharing=use_node_name_sharing, key_dtype=key_dtype, value_dtype=value_dtype, value_shape=value_shape) - else - mutable_hash_table_of_tensors_graph(; name=name, container=container, shared_name=shared_name, use_node_name_sharing=use_node_name_sharing, key_dtype=key_dtype, value_dtype=value_dtype, value_shape=value_shape) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function mutable_hash_table_of_tensors(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing) + if tf.in_eager_mode() + mutable_hash_table_of_tensors_eager(; name=name, container=container, shared_name=shared_name, use_node_name_sharing=use_node_name_sharing, key_dtype=key_dtype, value_dtype=value_dtype, value_shape=value_shape) + else + mutable_hash_table_of_tensors_graph(; name=name, container=container, shared_name=shared_name, use_node_name_sharing=use_node_name_sharing, key_dtype=key_dtype, value_dtype=value_dtype, value_shape=value_shape) + end end - end end @@ -47584,15 +47584,15 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function read_file_graph(filename_; name=nothing) - local desc - tf.with_op_name(name, "ReadFile") do - desc = tf.NodeDescription("ReadFile") - filename_ = convert(Tensor{String}, filename_) - tf.add_input(desc, filename_) - end - tf.Tensor(tf.Operation(desc)) + function read_file_graph(filename_; name=nothing) + local desc + tf.with_op_name(name, "ReadFile") do + desc = tf.NodeDescription("ReadFile") + filename_ = convert(Tensor{String}, filename_) + tf.add_input(desc, filename_) end + tf.Tensor(tf.Operation(desc)) + end function read_file_eager(filename_; name=nothing) desc = tf.EagerOp("ReadFile") filename_ = convert(tf.EagerTensor, filename_) @@ -47604,13 +47604,13 @@ begin return res[1] end end - function read_file(filename_; name=nothing) - if tf.in_eager_mode() - read_file_eager(filename_; name=name) - else - read_file_graph(filename_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function read_file(filename_; name=nothing) + if tf.in_eager_mode() + read_file_eager(filename_; name=name) + else + read_file_graph(filename_; name=name) + end end - end end @@ -47620,33 +47620,33 @@ end Load embedding parameters for a single table. """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function load_tpu_embedding_mdl_adagrad_light_parameters_graph(parameters_, accumulators_, weights_, benefits_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - local desc - tf.with_op_name(name, "LoadTPUEmbeddingMDLAdagradLightParameters") do - desc = tf.NodeDescription("LoadTPUEmbeddingMDLAdagradLightParameters") - parameters_ = convert(Tensor{Float32}, parameters_) - accumulators_ = convert(Tensor{Float32}, accumulators_) - weights_ = convert(Tensor{Float32}, weights_) - benefits_ = convert(Tensor{Float32}, benefits_) - tf.add_input(desc, parameters_) - tf.add_input(desc, accumulators_) - tf.add_input(desc, weights_) - tf.add_input(desc, benefits_) - if table_id !== nothing - desc["table_id"] = Base.Int(table_id) - end - if table_name !== nothing - desc["table_name"] = Base.String(table_name) - end - if num_shards !== nothing - desc["num_shards"] = Base.Int(num_shards) - end - if shard_id !== nothing - desc["shard_id"] = Base.Int(shard_id) - end - end - tf.Tensor(tf.Operation(desc)) + function load_tpu_embedding_mdl_adagrad_light_parameters_graph(parameters_, accumulators_, weights_, benefits_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + local desc + tf.with_op_name(name, "LoadTPUEmbeddingMDLAdagradLightParameters") do + desc = tf.NodeDescription("LoadTPUEmbeddingMDLAdagradLightParameters") + parameters_ = convert(Tensor{Float32}, parameters_) + accumulators_ = convert(Tensor{Float32}, accumulators_) + weights_ = convert(Tensor{Float32}, weights_) + benefits_ = convert(Tensor{Float32}, benefits_) + tf.add_input(desc, parameters_) + tf.add_input(desc, accumulators_) + tf.add_input(desc, weights_) + tf.add_input(desc, benefits_) + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end end + tf.Tensor(tf.Operation(desc)) + end function load_tpu_embedding_mdl_adagrad_light_parameters_eager(parameters_, accumulators_, weights_, benefits_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) desc = tf.EagerOp("LoadTPUEmbeddingMDLAdagradLightParameters") parameters_ = convert(tf.EagerTensor, parameters_) @@ -47676,13 +47676,13 @@ begin return res[1] end end - function load_tpu_embedding_mdl_adagrad_light_parameters(parameters_, accumulators_, weights_, benefits_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - if tf.in_eager_mode() - load_tpu_embedding_mdl_adagrad_light_parameters_eager(parameters_, accumulators_, weights_, benefits_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) - else - load_tpu_embedding_mdl_adagrad_light_parameters_graph(parameters_, accumulators_, weights_, benefits_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function load_tpu_embedding_mdl_adagrad_light_parameters(parameters_, accumulators_, weights_, benefits_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + if tf.in_eager_mode() + load_tpu_embedding_mdl_adagrad_light_parameters_eager(parameters_, accumulators_, weights_, benefits_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + else + load_tpu_embedding_mdl_adagrad_light_parameters_graph(parameters_, accumulators_, weights_, benefits_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + end end - end end @@ -47692,25 +47692,25 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function fractional_avg_pool_grad_graph(orig_input_tensor_shape_, out_backprop_, row_pooling_sequence_, col_pooling_sequence_; name=nothing, overlapping=nothing) - local desc - tf.with_op_name(name, "FractionalAvgPoolGrad") do - desc = tf.NodeDescription("FractionalAvgPoolGrad") - orig_input_tensor_shape_ = convert(Tensor{Int64}, orig_input_tensor_shape_) - out_backprop_ = convert(Tensor{Any}, out_backprop_) - row_pooling_sequence_ = convert(Tensor{Int64}, row_pooling_sequence_) - col_pooling_sequence_ = convert(Tensor{Int64}, col_pooling_sequence_) - (out_backprop_,) = tf.tf_promote(out_backprop_) - tf.add_input(desc, orig_input_tensor_shape_) - tf.add_input(desc, out_backprop_) - tf.add_input(desc, row_pooling_sequence_) - tf.add_input(desc, col_pooling_sequence_) - if overlapping !== nothing - desc["overlapping"] = Base.Bool(overlapping) - end + function fractional_avg_pool_grad_graph(orig_input_tensor_shape_, out_backprop_, row_pooling_sequence_, col_pooling_sequence_; name=nothing, overlapping=nothing) + local desc + tf.with_op_name(name, "FractionalAvgPoolGrad") do + desc = tf.NodeDescription("FractionalAvgPoolGrad") + orig_input_tensor_shape_ = convert(Tensor{Int64}, orig_input_tensor_shape_) + out_backprop_ = convert(Tensor{Any}, out_backprop_) + row_pooling_sequence_ = convert(Tensor{Int64}, row_pooling_sequence_) + col_pooling_sequence_ = convert(Tensor{Int64}, col_pooling_sequence_) + (out_backprop_,) = tf.tf_promote(out_backprop_) + tf.add_input(desc, orig_input_tensor_shape_) + tf.add_input(desc, out_backprop_) + tf.add_input(desc, row_pooling_sequence_) + tf.add_input(desc, col_pooling_sequence_) + if overlapping !== nothing + desc["overlapping"] = Base.Bool(overlapping) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function fractional_avg_pool_grad_eager(orig_input_tensor_shape_, out_backprop_, row_pooling_sequence_, col_pooling_sequence_; name=nothing, overlapping=nothing) desc = tf.EagerOp("FractionalAvgPoolGrad") orig_input_tensor_shape_ = convert(tf.EagerTensor, orig_input_tensor_shape_) @@ -47732,13 +47732,13 @@ begin return res[1] end end - function fractional_avg_pool_grad(orig_input_tensor_shape_, out_backprop_, row_pooling_sequence_, col_pooling_sequence_; name=nothing, overlapping=nothing) - if tf.in_eager_mode() - fractional_avg_pool_grad_eager(orig_input_tensor_shape_, out_backprop_, row_pooling_sequence_, col_pooling_sequence_; name=name, overlapping=overlapping) - else - fractional_avg_pool_grad_graph(orig_input_tensor_shape_, out_backprop_, row_pooling_sequence_, col_pooling_sequence_; name=name, overlapping=overlapping) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function fractional_avg_pool_grad(orig_input_tensor_shape_, out_backprop_, row_pooling_sequence_, col_pooling_sequence_; name=nothing, overlapping=nothing) + if tf.in_eager_mode() + fractional_avg_pool_grad_eager(orig_input_tensor_shape_, out_backprop_, row_pooling_sequence_, col_pooling_sequence_; name=name, overlapping=overlapping) + else + fractional_avg_pool_grad_graph(orig_input_tensor_shape_, out_backprop_, row_pooling_sequence_, col_pooling_sequence_; name=name, overlapping=overlapping) + end end - end end @@ -47748,31 +47748,31 @@ end Load embedding parameters for a single table. """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function load_tpu_embedding_adagrad_parameters_grad_accum_debug_graph(parameters_, accumulators_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - local desc - tf.with_op_name(name, "LoadTPUEmbeddingAdagradParametersGradAccumDebug") do - desc = tf.NodeDescription("LoadTPUEmbeddingAdagradParametersGradAccumDebug") - parameters_ = convert(Tensor{Float32}, parameters_) - accumulators_ = convert(Tensor{Float32}, accumulators_) - gradient_accumulators_ = convert(Tensor{Float32}, gradient_accumulators_) - tf.add_input(desc, parameters_) - tf.add_input(desc, accumulators_) - tf.add_input(desc, gradient_accumulators_) - if table_id !== nothing - desc["table_id"] = Base.Int(table_id) - end - if table_name !== nothing - desc["table_name"] = Base.String(table_name) - end - if num_shards !== nothing - desc["num_shards"] = Base.Int(num_shards) - end - if shard_id !== nothing - desc["shard_id"] = Base.Int(shard_id) - end - end - tf.Tensor(tf.Operation(desc)) + function load_tpu_embedding_adagrad_parameters_grad_accum_debug_graph(parameters_, accumulators_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + local desc + tf.with_op_name(name, "LoadTPUEmbeddingAdagradParametersGradAccumDebug") do + desc = tf.NodeDescription("LoadTPUEmbeddingAdagradParametersGradAccumDebug") + parameters_ = convert(Tensor{Float32}, parameters_) + accumulators_ = convert(Tensor{Float32}, accumulators_) + gradient_accumulators_ = convert(Tensor{Float32}, gradient_accumulators_) + tf.add_input(desc, parameters_) + tf.add_input(desc, accumulators_) + tf.add_input(desc, gradient_accumulators_) + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end end + tf.Tensor(tf.Operation(desc)) + end function load_tpu_embedding_adagrad_parameters_grad_accum_debug_eager(parameters_, accumulators_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) desc = tf.EagerOp("LoadTPUEmbeddingAdagradParametersGradAccumDebug") parameters_ = convert(tf.EagerTensor, parameters_) @@ -47800,13 +47800,13 @@ begin return res[1] end end - function load_tpu_embedding_adagrad_parameters_grad_accum_debug(parameters_, accumulators_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - if tf.in_eager_mode() - load_tpu_embedding_adagrad_parameters_grad_accum_debug_eager(parameters_, accumulators_, gradient_accumulators_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) - else - load_tpu_embedding_adagrad_parameters_grad_accum_debug_graph(parameters_, accumulators_, gradient_accumulators_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function load_tpu_embedding_adagrad_parameters_grad_accum_debug(parameters_, accumulators_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + if tf.in_eager_mode() + load_tpu_embedding_adagrad_parameters_grad_accum_debug_eager(parameters_, accumulators_, gradient_accumulators_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + else + load_tpu_embedding_adagrad_parameters_grad_accum_debug_graph(parameters_, accumulators_, gradient_accumulators_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + end end - end end @@ -47816,20 +47816,20 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function bincount_graph(arr_, size_, weights_; name=nothing) - local desc - tf.with_op_name(name, "Bincount") do - desc = tf.NodeDescription("Bincount") - arr_ = convert(Tensor{Int32}, arr_) - size_ = convert(Tensor{Int32}, size_) - weights_ = convert(Tensor{Any}, weights_) - (weights_,) = tf.tf_promote(weights_) - tf.add_input(desc, arr_) - tf.add_input(desc, size_) - tf.add_input(desc, weights_) - end - tf.Tensor(tf.Operation(desc)) + function bincount_graph(arr_, size_, weights_; name=nothing) + local desc + tf.with_op_name(name, "Bincount") do + desc = tf.NodeDescription("Bincount") + arr_ = convert(Tensor{Int32}, arr_) + size_ = convert(Tensor{Int32}, size_) + weights_ = convert(Tensor{Any}, weights_) + (weights_,) = tf.tf_promote(weights_) + tf.add_input(desc, arr_) + tf.add_input(desc, size_) + tf.add_input(desc, weights_) end + tf.Tensor(tf.Operation(desc)) + end function bincount_eager(arr_, size_, weights_; name=nothing) desc = tf.EagerOp("Bincount") arr_ = convert(tf.EagerTensor, arr_) @@ -47846,13 +47846,13 @@ begin return res[1] end end - function bincount(arr_, size_, weights_; name=nothing) - if tf.in_eager_mode() - bincount_eager(arr_, size_, weights_; name=name) - else - bincount_graph(arr_, size_, weights_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function bincount(arr_, size_, weights_; name=nothing) + if tf.in_eager_mode() + bincount_eager(arr_, size_, weights_; name=name) + else + bincount_graph(arr_, size_, weights_; name=name) + end end - end end @@ -47862,16 +47862,16 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function inv_graph(x_; name=nothing) - local desc - tf.with_op_name(name, "Inv") do - desc = tf.NodeDescription("Inv") - x_ = convert(Tensor{Any}, x_) - (x_,) = tf.tf_promote(x_) - tf.add_input(desc, x_) - end - tf.Tensor(tf.Operation(desc)) + function inv_graph(x_; name=nothing) + local desc + tf.with_op_name(name, "Inv") do + desc = tf.NodeDescription("Inv") + x_ = convert(Tensor{Any}, x_) + (x_,) = tf.tf_promote(x_) + tf.add_input(desc, x_) end + tf.Tensor(tf.Operation(desc)) + end function inv_eager(x_; name=nothing) desc = tf.EagerOp("Inv") x_ = convert(tf.EagerTensor, x_) @@ -47884,13 +47884,13 @@ begin return res[1] end end - function inv(x_; name=nothing) - if tf.in_eager_mode() - inv_eager(x_; name=name) - else - inv_graph(x_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function inv(x_; name=nothing) + if tf.in_eager_mode() + inv_eager(x_; name=name) + else + inv_graph(x_; name=name) + end end - end end @@ -47900,29 +47900,29 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function apply_proximal_adagrad_graph(var_, accum_, lr_, l1_, l2_, grad_; name=nothing, use_locking=nothing) - local desc - tf.with_op_name(name, "ApplyProximalAdagrad") do - desc = tf.NodeDescription("ApplyProximalAdagrad") - var_ = convert(Tensor{Any}, var_) - accum_ = convert(Tensor{Any}, accum_) - lr_ = convert(Tensor{Any}, lr_) - l1_ = convert(Tensor{Any}, l1_) - l2_ = convert(Tensor{Any}, l2_) - grad_ = convert(Tensor{Any}, grad_) - (var_, accum_, lr_, l1_, l2_, grad_) = tf.tf_promote(var_, accum_, lr_, l1_, l2_, grad_) - tf.add_input(desc, var_) - tf.add_input(desc, accum_) - tf.add_input(desc, lr_) - tf.add_input(desc, l1_) - tf.add_input(desc, l2_) - tf.add_input(desc, grad_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end + function apply_proximal_adagrad_graph(var_, accum_, lr_, l1_, l2_, grad_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "ApplyProximalAdagrad") do + desc = tf.NodeDescription("ApplyProximalAdagrad") + var_ = convert(Tensor{Any}, var_) + accum_ = convert(Tensor{Any}, accum_) + lr_ = convert(Tensor{Any}, lr_) + l1_ = convert(Tensor{Any}, l1_) + l2_ = convert(Tensor{Any}, l2_) + grad_ = convert(Tensor{Any}, grad_) + (var_, accum_, lr_, l1_, l2_, grad_) = tf.tf_promote(var_, accum_, lr_, l1_, l2_, grad_) + tf.add_input(desc, var_) + tf.add_input(desc, accum_) + tf.add_input(desc, lr_) + tf.add_input(desc, l1_) + tf.add_input(desc, l2_) + tf.add_input(desc, grad_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function apply_proximal_adagrad_eager(var_, accum_, lr_, l1_, l2_, grad_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ApplyProximalAdagrad") var_ = convert(tf.EagerTensor, var_) @@ -47953,13 +47953,13 @@ begin return res[1] end end - function apply_proximal_adagrad(var_, accum_, lr_, l1_, l2_, grad_; name=nothing, use_locking=nothing) - if tf.in_eager_mode() - apply_proximal_adagrad_eager(var_, accum_, lr_, l1_, l2_, grad_; name=name, use_locking=use_locking) - else - apply_proximal_adagrad_graph(var_, accum_, lr_, l1_, l2_, grad_; name=name, use_locking=use_locking) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function apply_proximal_adagrad(var_, accum_, lr_, l1_, l2_, grad_; name=nothing, use_locking=nothing) + if tf.in_eager_mode() + apply_proximal_adagrad_eager(var_, accum_, lr_, l1_, l2_, grad_; name=name, use_locking=use_locking) + else + apply_proximal_adagrad_graph(var_, accum_, lr_, l1_, l2_, grad_; name=name, use_locking=use_locking) + end end - end end @@ -47969,23 +47969,23 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function gather_v2_graph(params_, indices_, axis_; name=nothing) - local desc - tf.with_op_name(name, "GatherV2") do - desc = tf.NodeDescription("GatherV2") - params_ = convert(Tensor{Any}, params_) - indices_ = convert(Tensor{Any}, indices_) - indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) - axis_ = convert(Tensor{Any}, axis_) - (params_,) = tf.tf_promote(params_) - (indices_,) = tf.tf_promote(indices_) - (axis_,) = tf.tf_promote(axis_) - tf.add_input(desc, params_) - tf.add_input(desc, indices_) - tf.add_input(desc, axis_) - end - tf.Tensor(tf.Operation(desc)) + function gather_v2_graph(params_, indices_, axis_; name=nothing) + local desc + tf.with_op_name(name, "GatherV2") do + desc = tf.NodeDescription("GatherV2") + params_ = convert(Tensor{Any}, params_) + indices_ = convert(Tensor{Any}, indices_) + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + axis_ = convert(Tensor{Any}, axis_) + (params_,) = tf.tf_promote(params_) + (indices_,) = tf.tf_promote(indices_) + (axis_,) = tf.tf_promote(axis_) + tf.add_input(desc, params_) + tf.add_input(desc, indices_) + tf.add_input(desc, axis_) end + tf.Tensor(tf.Operation(desc)) + end function gather_v2_eager(params_, indices_, axis_; name=nothing) desc = tf.EagerOp("GatherV2") params_ = convert(tf.EagerTensor, params_) @@ -48004,13 +48004,13 @@ begin return res[1] end end - function gather_v2(params_, indices_, axis_; name=nothing) - if tf.in_eager_mode() - gather_v2_eager(params_, indices_, axis_; name=name) - else - gather_v2_graph(params_, indices_, axis_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function gather_v2(params_, indices_, axis_; name=nothing) + if tf.in_eager_mode() + gather_v2_eager(params_, indices_, axis_; name=name) + else + gather_v2_graph(params_, indices_, axis_; name=name) + end end - end end @@ -48020,17 +48020,17 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function write_file_graph(filename_, contents_; name=nothing) - local desc - tf.with_op_name(name, "WriteFile") do - desc = tf.NodeDescription("WriteFile") - filename_ = convert(Tensor{String}, filename_) - contents_ = convert(Tensor{String}, contents_) - tf.add_input(desc, filename_) - tf.add_input(desc, contents_) - end - tf.Tensor(tf.Operation(desc)) + function write_file_graph(filename_, contents_; name=nothing) + local desc + tf.with_op_name(name, "WriteFile") do + desc = tf.NodeDescription("WriteFile") + filename_ = convert(Tensor{String}, filename_) + contents_ = convert(Tensor{String}, contents_) + tf.add_input(desc, filename_) + tf.add_input(desc, contents_) end + tf.Tensor(tf.Operation(desc)) + end function write_file_eager(filename_, contents_; name=nothing) desc = tf.EagerOp("WriteFile") filename_ = convert(tf.EagerTensor, filename_) @@ -48044,13 +48044,13 @@ begin return res[1] end end - function write_file(filename_, contents_; name=nothing) - if tf.in_eager_mode() - write_file_eager(filename_, contents_; name=name) - else - write_file_graph(filename_, contents_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function write_file(filename_, contents_; name=nothing) + if tf.in_eager_mode() + write_file_eager(filename_, contents_; name=name) + else + write_file_graph(filename_, contents_; name=name) + end end - end end @@ -48060,20 +48060,20 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function boosted_trees_get_ensemble_states_graph(tree_ensemble_handle_; name=nothing) - local desc - tf.with_op_name(name, "BoostedTreesGetEnsembleStates") do - desc = tf.NodeDescription("BoostedTreesGetEnsembleStates") - tree_ensemble_handle_ = convert(Tensor{Any}, tree_ensemble_handle_) - tf.add_input(desc, tree_ensemble_handle_) - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:5 - push!(out, tf.Tensor(op, out_idx)) - end - out + function boosted_trees_get_ensemble_states_graph(tree_ensemble_handle_; name=nothing) + local desc + tf.with_op_name(name, "BoostedTreesGetEnsembleStates") do + desc = tf.NodeDescription("BoostedTreesGetEnsembleStates") + tree_ensemble_handle_ = convert(Tensor{Any}, tree_ensemble_handle_) + tf.add_input(desc, tree_ensemble_handle_) + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:5 + push!(out, tf.Tensor(op, out_idx)) end + out + end function boosted_trees_get_ensemble_states_eager(tree_ensemble_handle_; name=nothing) desc = tf.EagerOp("BoostedTreesGetEnsembleStates") tree_ensemble_handle_ = convert(tf.EagerTensor, tree_ensemble_handle_) @@ -48085,13 +48085,13 @@ begin return res end end - function boosted_trees_get_ensemble_states(tree_ensemble_handle_; name=nothing) - if tf.in_eager_mode() - boosted_trees_get_ensemble_states_eager(tree_ensemble_handle_; name=name) - else - boosted_trees_get_ensemble_states_graph(tree_ensemble_handle_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function boosted_trees_get_ensemble_states(tree_ensemble_handle_; name=nothing) + if tf.in_eager_mode() + boosted_trees_get_ensemble_states_eager(tree_ensemble_handle_; name=name) + else + boosted_trees_get_ensemble_states_graph(tree_ensemble_handle_; name=name) + end end - end end @@ -48101,25 +48101,25 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function resource_gather_graph(resource_, indices_; name=nothing, validate_indices=nothing, dtype=nothing) - local desc - tf.with_op_name(name, "ResourceGather") do - desc = tf.NodeDescription("ResourceGather") - resource_ = convert(Tensor{Any}, resource_) - indices_ = convert(Tensor{Any}, indices_) - indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) - (indices_,) = tf.tf_promote(indices_) - tf.add_input(desc, resource_) - tf.add_input(desc, indices_) - if validate_indices !== nothing - desc["validate_indices"] = Base.Bool(validate_indices) - end - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end + function resource_gather_graph(resource_, indices_; name=nothing, validate_indices=nothing, dtype=nothing) + local desc + tf.with_op_name(name, "ResourceGather") do + desc = tf.NodeDescription("ResourceGather") + resource_ = convert(Tensor{Any}, resource_) + indices_ = convert(Tensor{Any}, indices_) + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + (indices_,) = tf.tf_promote(indices_) + tf.add_input(desc, resource_) + tf.add_input(desc, indices_) + if validate_indices !== nothing + desc["validate_indices"] = Base.Bool(validate_indices) + end + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function resource_gather_eager(resource_, indices_; name=nothing, validate_indices=nothing, dtype=nothing) desc = tf.EagerOp("ResourceGather") resource_ = convert(tf.EagerTensor, resource_) @@ -48140,13 +48140,13 @@ begin return res[1] end end - function resource_gather(resource_, indices_; name=nothing, validate_indices=nothing, dtype=nothing) - if tf.in_eager_mode() - resource_gather_eager(resource_, indices_; name=name, validate_indices=validate_indices, dtype=dtype) - else - resource_gather_graph(resource_, indices_; name=name, validate_indices=validate_indices, dtype=dtype) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_gather(resource_, indices_; name=nothing, validate_indices=nothing, dtype=nothing) + if tf.in_eager_mode() + resource_gather_eager(resource_, indices_; name=name, validate_indices=validate_indices, dtype=dtype) + else + resource_gather_graph(resource_, indices_; name=name, validate_indices=validate_indices, dtype=dtype) + end end - end end @@ -48156,27 +48156,27 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function resource_apply_proximal_gradient_descent_graph(var_, alpha_, l1_, l2_, delta_; name=nothing, use_locking=nothing) - local desc - tf.with_op_name(name, "ResourceApplyProximalGradientDescent") do - desc = tf.NodeDescription("ResourceApplyProximalGradientDescent") - var_ = convert(Tensor{Any}, var_) - alpha_ = convert(Tensor{Any}, alpha_) - l1_ = convert(Tensor{Any}, l1_) - l2_ = convert(Tensor{Any}, l2_) - delta_ = convert(Tensor{Any}, delta_) - (alpha_, l1_, l2_, delta_) = tf.tf_promote(alpha_, l1_, l2_, delta_) - tf.add_input(desc, var_) - tf.add_input(desc, alpha_) - tf.add_input(desc, l1_) - tf.add_input(desc, l2_) - tf.add_input(desc, delta_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end + function resource_apply_proximal_gradient_descent_graph(var_, alpha_, l1_, l2_, delta_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "ResourceApplyProximalGradientDescent") do + desc = tf.NodeDescription("ResourceApplyProximalGradientDescent") + var_ = convert(Tensor{Any}, var_) + alpha_ = convert(Tensor{Any}, alpha_) + l1_ = convert(Tensor{Any}, l1_) + l2_ = convert(Tensor{Any}, l2_) + delta_ = convert(Tensor{Any}, delta_) + (alpha_, l1_, l2_, delta_) = tf.tf_promote(alpha_, l1_, l2_, delta_) + tf.add_input(desc, var_) + tf.add_input(desc, alpha_) + tf.add_input(desc, l1_) + tf.add_input(desc, l2_) + tf.add_input(desc, delta_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function resource_apply_proximal_gradient_descent_eager(var_, alpha_, l1_, l2_, delta_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ResourceApplyProximalGradientDescent") var_ = convert(tf.EagerTensor, var_) @@ -48203,13 +48203,13 @@ begin return res[1] end end - function resource_apply_proximal_gradient_descent(var_, alpha_, l1_, l2_, delta_; name=nothing, use_locking=nothing) - if tf.in_eager_mode() - resource_apply_proximal_gradient_descent_eager(var_, alpha_, l1_, l2_, delta_; name=name, use_locking=use_locking) - else - resource_apply_proximal_gradient_descent_graph(var_, alpha_, l1_, l2_, delta_; name=name, use_locking=use_locking) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_apply_proximal_gradient_descent(var_, alpha_, l1_, l2_, delta_; name=nothing, use_locking=nothing) + if tf.in_eager_mode() + resource_apply_proximal_gradient_descent_eager(var_, alpha_, l1_, l2_, delta_; name=name, use_locking=use_locking) + else + resource_apply_proximal_gradient_descent_graph(var_, alpha_, l1_, l2_, delta_; name=name, use_locking=use_locking) + end end - end end @@ -48219,18 +48219,18 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function truncate_mod_graph(x_, y_; name=nothing) - local desc - tf.with_op_name(name, "TruncateMod") do - desc = tf.NodeDescription("TruncateMod") - x_ = convert(Tensor{Any}, x_) - y_ = convert(Tensor{Any}, y_) - (x_, y_) = tf.tf_promote(x_, y_) - tf.add_input(desc, x_) - tf.add_input(desc, y_) - end - tf.Tensor(tf.Operation(desc)) + function truncate_mod_graph(x_, y_; name=nothing) + local desc + tf.with_op_name(name, "TruncateMod") do + desc = tf.NodeDescription("TruncateMod") + x_ = convert(Tensor{Any}, x_) + y_ = convert(Tensor{Any}, y_) + (x_, y_) = tf.tf_promote(x_, y_) + tf.add_input(desc, x_) + tf.add_input(desc, y_) end + tf.Tensor(tf.Operation(desc)) + end function truncate_mod_eager(x_, y_; name=nothing) desc = tf.EagerOp("TruncateMod") x_ = convert(tf.EagerTensor, x_) @@ -48246,13 +48246,13 @@ begin return res[1] end end - function truncate_mod(x_, y_; name=nothing) - if tf.in_eager_mode() - truncate_mod_eager(x_, y_; name=name) - else - truncate_mod_graph(x_, y_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function truncate_mod(x_, y_; name=nothing) + if tf.in_eager_mode() + truncate_mod_eager(x_, y_; name=name) + else + truncate_mod_graph(x_, y_; name=name) + end end - end end @@ -48262,21 +48262,21 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function log_matrix_determinant_graph(input_; name=nothing) - local desc - tf.with_op_name(name, "LogMatrixDeterminant") do - desc = tf.NodeDescription("LogMatrixDeterminant") - input_ = convert(Tensor{Any}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:2 - push!(out, tf.Tensor(op, out_idx)) - end - out + function log_matrix_determinant_graph(input_; name=nothing) + local desc + tf.with_op_name(name, "LogMatrixDeterminant") do + desc = tf.NodeDescription("LogMatrixDeterminant") + input_ = convert(Tensor{Any}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) end + out + end function log_matrix_determinant_eager(input_; name=nothing) desc = tf.EagerOp("LogMatrixDeterminant") input_ = convert(tf.EagerTensor, input_) @@ -48289,13 +48289,13 @@ begin return res end end - function log_matrix_determinant(input_; name=nothing) - if tf.in_eager_mode() - log_matrix_determinant_eager(input_; name=name) - else - log_matrix_determinant_graph(input_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function log_matrix_determinant(input_; name=nothing) + if tf.in_eager_mode() + log_matrix_determinant_eager(input_; name=name) + else + log_matrix_determinant_graph(input_; name=name) + end end - end end @@ -48305,17 +48305,17 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function irfft2d_graph(input_, fft_length_; name=nothing) - local desc - tf.with_op_name(name, "IRFFT2D") do - desc = tf.NodeDescription("IRFFT2D") - input_ = convert(Tensor{Complex{Float32}}, input_) - fft_length_ = convert(Tensor{Int32}, fft_length_) - tf.add_input(desc, input_) - tf.add_input(desc, fft_length_) - end - tf.Tensor(tf.Operation(desc)) + function irfft2d_graph(input_, fft_length_; name=nothing) + local desc + tf.with_op_name(name, "IRFFT2D") do + desc = tf.NodeDescription("IRFFT2D") + input_ = convert(Tensor{Complex{Float32}}, input_) + fft_length_ = convert(Tensor{Int32}, fft_length_) + tf.add_input(desc, input_) + tf.add_input(desc, fft_length_) end + tf.Tensor(tf.Operation(desc)) + end function irfft2d_eager(input_, fft_length_; name=nothing) desc = tf.EagerOp("IRFFT2D") input_ = convert(tf.EagerTensor, input_) @@ -48329,13 +48329,13 @@ begin return res[1] end end - function irfft2d(input_, fft_length_; name=nothing) - if tf.in_eager_mode() - irfft2d_eager(input_, fft_length_; name=name) - else - irfft2d_graph(input_, fft_length_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function irfft2d(input_, fft_length_; name=nothing) + if tf.in_eager_mode() + irfft2d_eager(input_, fft_length_; name=name) + else + irfft2d_graph(input_, fft_length_; name=name) + end end - end end @@ -48345,32 +48345,32 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function boosted_trees_training_predict_graph(tree_ensemble_handle_, cached_tree_ids_, cached_node_ids_, bucketized_features_; name=nothing, num_bucketized_features=nothing, logits_dimension=nothing) - local desc - tf.with_op_name(name, "BoostedTreesTrainingPredict") do - desc = tf.NodeDescription("BoostedTreesTrainingPredict") - tree_ensemble_handle_ = convert(Tensor{Any}, tree_ensemble_handle_) - cached_tree_ids_ = convert(Tensor{Int32}, cached_tree_ids_) - cached_node_ids_ = convert(Tensor{Int32}, cached_node_ids_) - bucketized_features_ = [convert(Tensor{Int32}, x) for x = bucketized_features_] - tf.add_input(desc, tree_ensemble_handle_) - tf.add_input(desc, cached_tree_ids_) - tf.add_input(desc, cached_node_ids_) - tf.add_input(desc, bucketized_features_) - if num_bucketized_features !== nothing - desc["num_bucketized_features"] = Base.Int(num_bucketized_features) - end - if logits_dimension !== nothing - desc["logits_dimension"] = Base.Int(logits_dimension) - end + function boosted_trees_training_predict_graph(tree_ensemble_handle_, cached_tree_ids_, cached_node_ids_, bucketized_features_; name=nothing, num_bucketized_features=nothing, logits_dimension=nothing) + local desc + tf.with_op_name(name, "BoostedTreesTrainingPredict") do + desc = tf.NodeDescription("BoostedTreesTrainingPredict") + tree_ensemble_handle_ = convert(Tensor{Any}, tree_ensemble_handle_) + cached_tree_ids_ = convert(Tensor{Int32}, cached_tree_ids_) + cached_node_ids_ = convert(Tensor{Int32}, cached_node_ids_) + bucketized_features_ = [convert(Tensor{Int32}, x) for x = bucketized_features_] + tf.add_input(desc, tree_ensemble_handle_) + tf.add_input(desc, cached_tree_ids_) + tf.add_input(desc, cached_node_ids_) + tf.add_input(desc, bucketized_features_) + if num_bucketized_features !== nothing + desc["num_bucketized_features"] = Base.Int(num_bucketized_features) end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:3 - push!(out, tf.Tensor(op, out_idx)) + if logits_dimension !== nothing + desc["logits_dimension"] = Base.Int(logits_dimension) end - out end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end function boosted_trees_training_predict_eager(tree_ensemble_handle_, cached_tree_ids_, cached_node_ids_, bucketized_features_; name=nothing, num_bucketized_features=nothing, logits_dimension=nothing) desc = tf.EagerOp("BoostedTreesTrainingPredict") tree_ensemble_handle_ = convert(tf.EagerTensor, tree_ensemble_handle_) @@ -48394,13 +48394,13 @@ begin return res end end - function boosted_trees_training_predict(tree_ensemble_handle_, cached_tree_ids_, cached_node_ids_, bucketized_features_; name=nothing, num_bucketized_features=nothing, logits_dimension=nothing) - if tf.in_eager_mode() - boosted_trees_training_predict_eager(tree_ensemble_handle_, cached_tree_ids_, cached_node_ids_, bucketized_features_; name=name, num_bucketized_features=num_bucketized_features, logits_dimension=logits_dimension) - else - boosted_trees_training_predict_graph(tree_ensemble_handle_, cached_tree_ids_, cached_node_ids_, bucketized_features_; name=name, num_bucketized_features=num_bucketized_features, logits_dimension=logits_dimension) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function boosted_trees_training_predict(tree_ensemble_handle_, cached_tree_ids_, cached_node_ids_, bucketized_features_; name=nothing, num_bucketized_features=nothing, logits_dimension=nothing) + if tf.in_eager_mode() + boosted_trees_training_predict_eager(tree_ensemble_handle_, cached_tree_ids_, cached_node_ids_, bucketized_features_; name=name, num_bucketized_features=num_bucketized_features, logits_dimension=logits_dimension) + else + boosted_trees_training_predict_graph(tree_ensemble_handle_, cached_tree_ids_, cached_node_ids_, bucketized_features_; name=name, num_bucketized_features=num_bucketized_features, logits_dimension=logits_dimension) + end end - end end @@ -48410,16 +48410,16 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function floor_graph(x_; name=nothing) - local desc - tf.with_op_name(name, "Floor") do - desc = tf.NodeDescription("Floor") - x_ = convert(Tensor{Any}, x_) - (x_,) = tf.tf_promote(x_) - tf.add_input(desc, x_) - end - tf.Tensor(tf.Operation(desc)) + function floor_graph(x_; name=nothing) + local desc + tf.with_op_name(name, "Floor") do + desc = tf.NodeDescription("Floor") + x_ = convert(Tensor{Any}, x_) + (x_,) = tf.tf_promote(x_) + tf.add_input(desc, x_) end + tf.Tensor(tf.Operation(desc)) + end function floor_eager(x_; name=nothing) desc = tf.EagerOp("Floor") x_ = convert(tf.EagerTensor, x_) @@ -48432,13 +48432,13 @@ begin return res[1] end end - function floor(x_; name=nothing) - if tf.in_eager_mode() - floor_eager(x_; name=name) - else - floor_graph(x_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function floor(x_; name=nothing) + if tf.in_eager_mode() + floor_eager(x_; name=name) + else + floor_graph(x_; name=name) + end end - end end @@ -48448,27 +48448,27 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function write_image_summary_graph(writer_, step_, tag_, tensor_, bad_color_; name=nothing, max_images=nothing) - local desc - tf.with_op_name(name, "WriteImageSummary") do - desc = tf.NodeDescription("WriteImageSummary") - writer_ = convert(Tensor{Any}, writer_) - step_ = convert(Tensor{Int64}, step_) - tag_ = convert(Tensor{String}, tag_) - tensor_ = convert(Tensor{Float32}, tensor_) - bad_color_ = convert(Tensor{UInt8}, bad_color_) - (tensor_,) = tf.tf_promote(tensor_) - tf.add_input(desc, writer_) - tf.add_input(desc, step_) - tf.add_input(desc, tag_) - tf.add_input(desc, tensor_) - tf.add_input(desc, bad_color_) - if max_images !== nothing - desc["max_images"] = Base.Int(max_images) - end + function write_image_summary_graph(writer_, step_, tag_, tensor_, bad_color_; name=nothing, max_images=nothing) + local desc + tf.with_op_name(name, "WriteImageSummary") do + desc = tf.NodeDescription("WriteImageSummary") + writer_ = convert(Tensor{Any}, writer_) + step_ = convert(Tensor{Int64}, step_) + tag_ = convert(Tensor{String}, tag_) + tensor_ = convert(Tensor{Float32}, tensor_) + bad_color_ = convert(Tensor{UInt8}, bad_color_) + (tensor_,) = tf.tf_promote(tensor_) + tf.add_input(desc, writer_) + tf.add_input(desc, step_) + tf.add_input(desc, tag_) + tf.add_input(desc, tensor_) + tf.add_input(desc, bad_color_) + if max_images !== nothing + desc["max_images"] = Base.Int(max_images) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function write_image_summary_eager(writer_, step_, tag_, tensor_, bad_color_; name=nothing, max_images=nothing) desc = tf.EagerOp("WriteImageSummary") writer_ = convert(tf.EagerTensor, writer_) @@ -48492,13 +48492,13 @@ begin return res[1] end end - function write_image_summary(writer_, step_, tag_, tensor_, bad_color_; name=nothing, max_images=nothing) - if tf.in_eager_mode() - write_image_summary_eager(writer_, step_, tag_, tensor_, bad_color_; name=name, max_images=max_images) - else - write_image_summary_graph(writer_, step_, tag_, tensor_, bad_color_; name=name, max_images=max_images) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function write_image_summary(writer_, step_, tag_, tensor_, bad_color_; name=nothing, max_images=nothing) + if tf.in_eager_mode() + write_image_summary_eager(writer_, step_, tag_, tensor_, bad_color_; name=name, max_images=max_images) + else + write_image_summary_graph(writer_, step_, tag_, tensor_, bad_color_; name=name, max_images=max_images) + end end - end end @@ -48508,18 +48508,18 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tile_grad_graph(input_, multiples_; name=nothing) - local desc - tf.with_op_name(name, "TileGrad") do - desc = tf.NodeDescription("TileGrad") - input_ = convert(Tensor{Any}, input_) - multiples_ = convert(Tensor{Int32}, multiples_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - tf.add_input(desc, multiples_) - end - tf.Tensor(tf.Operation(desc)) + function tile_grad_graph(input_, multiples_; name=nothing) + local desc + tf.with_op_name(name, "TileGrad") do + desc = tf.NodeDescription("TileGrad") + input_ = convert(Tensor{Any}, input_) + multiples_ = convert(Tensor{Int32}, multiples_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + tf.add_input(desc, multiples_) end + tf.Tensor(tf.Operation(desc)) + end function tile_grad_eager(input_, multiples_; name=nothing) desc = tf.EagerOp("TileGrad") input_ = convert(tf.EagerTensor, input_) @@ -48534,13 +48534,13 @@ begin return res[1] end end - function tile_grad(input_, multiples_; name=nothing) - if tf.in_eager_mode() - tile_grad_eager(input_, multiples_; name=name) - else - tile_grad_graph(input_, multiples_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tile_grad(input_, multiples_; name=nothing) + if tf.in_eager_mode() + tile_grad_eager(input_, multiples_; name=name) + else + tile_grad_graph(input_, multiples_; name=name) + end end - end end @@ -48550,31 +48550,31 @@ end Load embedding parameters for a single table. """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function load_tpu_embedding_proximal_adagrad_parameters_grad_accum_debug_graph(parameters_, accumulators_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - local desc - tf.with_op_name(name, "LoadTPUEmbeddingProximalAdagradParametersGradAccumDebug") do - desc = tf.NodeDescription("LoadTPUEmbeddingProximalAdagradParametersGradAccumDebug") - parameters_ = convert(Tensor{Float32}, parameters_) - accumulators_ = convert(Tensor{Float32}, accumulators_) - gradient_accumulators_ = convert(Tensor{Float32}, gradient_accumulators_) - tf.add_input(desc, parameters_) - tf.add_input(desc, accumulators_) - tf.add_input(desc, gradient_accumulators_) - if table_id !== nothing - desc["table_id"] = Base.Int(table_id) - end - if table_name !== nothing - desc["table_name"] = Base.String(table_name) - end - if num_shards !== nothing - desc["num_shards"] = Base.Int(num_shards) - end - if shard_id !== nothing - desc["shard_id"] = Base.Int(shard_id) - end - end - tf.Tensor(tf.Operation(desc)) + function load_tpu_embedding_proximal_adagrad_parameters_grad_accum_debug_graph(parameters_, accumulators_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + local desc + tf.with_op_name(name, "LoadTPUEmbeddingProximalAdagradParametersGradAccumDebug") do + desc = tf.NodeDescription("LoadTPUEmbeddingProximalAdagradParametersGradAccumDebug") + parameters_ = convert(Tensor{Float32}, parameters_) + accumulators_ = convert(Tensor{Float32}, accumulators_) + gradient_accumulators_ = convert(Tensor{Float32}, gradient_accumulators_) + tf.add_input(desc, parameters_) + tf.add_input(desc, accumulators_) + tf.add_input(desc, gradient_accumulators_) + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end end + tf.Tensor(tf.Operation(desc)) + end function load_tpu_embedding_proximal_adagrad_parameters_grad_accum_debug_eager(parameters_, accumulators_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) desc = tf.EagerOp("LoadTPUEmbeddingProximalAdagradParametersGradAccumDebug") parameters_ = convert(tf.EagerTensor, parameters_) @@ -48602,13 +48602,13 @@ begin return res[1] end end - function load_tpu_embedding_proximal_adagrad_parameters_grad_accum_debug(parameters_, accumulators_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - if tf.in_eager_mode() - load_tpu_embedding_proximal_adagrad_parameters_grad_accum_debug_eager(parameters_, accumulators_, gradient_accumulators_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) - else - load_tpu_embedding_proximal_adagrad_parameters_grad_accum_debug_graph(parameters_, accumulators_, gradient_accumulators_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function load_tpu_embedding_proximal_adagrad_parameters_grad_accum_debug(parameters_, accumulators_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + if tf.in_eager_mode() + load_tpu_embedding_proximal_adagrad_parameters_grad_accum_debug_eager(parameters_, accumulators_, gradient_accumulators_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + else + load_tpu_embedding_proximal_adagrad_parameters_grad_accum_debug_graph(parameters_, accumulators_, gradient_accumulators_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + end end - end end @@ -48618,25 +48618,25 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tensor_array_grad_v3_graph(handle_, flow_in_; name=nothing, source=nothing) - local desc - tf.with_op_name(name, "TensorArrayGradV3") do - desc = tf.NodeDescription("TensorArrayGradV3") - handle_ = convert(Tensor{Any}, handle_) - flow_in_ = convert(Tensor{Float32}, flow_in_) - tf.add_input(desc, handle_) - tf.add_input(desc, flow_in_) - if source !== nothing - desc["source"] = Base.String(source) - end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:2 - push!(out, tf.Tensor(op, out_idx)) + function tensor_array_grad_v3_graph(handle_, flow_in_; name=nothing, source=nothing) + local desc + tf.with_op_name(name, "TensorArrayGradV3") do + desc = tf.NodeDescription("TensorArrayGradV3") + handle_ = convert(Tensor{Any}, handle_) + flow_in_ = convert(Tensor{Float32}, flow_in_) + tf.add_input(desc, handle_) + tf.add_input(desc, flow_in_) + if source !== nothing + desc["source"] = Base.String(source) end - out end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end function tensor_array_grad_v3_eager(handle_, flow_in_; name=nothing, source=nothing) desc = tf.EagerOp("TensorArrayGradV3") handle_ = convert(tf.EagerTensor, handle_) @@ -48653,13 +48653,13 @@ begin return res end end - function tensor_array_grad_v3(handle_, flow_in_; name=nothing, source=nothing) - if tf.in_eager_mode() - tensor_array_grad_v3_eager(handle_, flow_in_; name=name, source=source) - else - tensor_array_grad_v3_graph(handle_, flow_in_; name=name, source=source) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_array_grad_v3(handle_, flow_in_; name=nothing, source=nothing) + if tf.in_eager_mode() + tensor_array_grad_v3_eager(handle_, flow_in_; name=name, source=source) + else + tensor_array_grad_v3_graph(handle_, flow_in_; name=name, source=source) + end end - end end @@ -48669,23 +48669,23 @@ end An op that enqueues a list of input batch tensors to TPUEmbedding. """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function enqueue_tpu_embedding_integer_batch_graph(batch_, mode_override_; name=nothing, N=nothing, device_ordinal=nothing) - local desc - tf.with_op_name(name, "EnqueueTPUEmbeddingIntegerBatch") do - desc = tf.NodeDescription("EnqueueTPUEmbeddingIntegerBatch") - batch_ = [convert(Tensor{Int32}, x) for x = batch_] - mode_override_ = convert(Tensor{String}, mode_override_) - tf.add_input(desc, batch_) - tf.add_input(desc, mode_override_) - if N !== nothing - desc["N"] = Base.Int(N) - end - if device_ordinal !== nothing - desc["device_ordinal"] = Base.Int(device_ordinal) - end + function enqueue_tpu_embedding_integer_batch_graph(batch_, mode_override_; name=nothing, N=nothing, device_ordinal=nothing) + local desc + tf.with_op_name(name, "EnqueueTPUEmbeddingIntegerBatch") do + desc = tf.NodeDescription("EnqueueTPUEmbeddingIntegerBatch") + batch_ = [convert(Tensor{Int32}, x) for x = batch_] + mode_override_ = convert(Tensor{String}, mode_override_) + tf.add_input(desc, batch_) + tf.add_input(desc, mode_override_) + if N !== nothing + desc["N"] = Base.Int(N) + end + if device_ordinal !== nothing + desc["device_ordinal"] = Base.Int(device_ordinal) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function enqueue_tpu_embedding_integer_batch_eager(batch_, mode_override_; name=nothing, N=nothing, device_ordinal=nothing) desc = tf.EagerOp("EnqueueTPUEmbeddingIntegerBatch") batch_ = convert(tf.EagerTensor, batch_) @@ -48705,54 +48705,54 @@ begin return res[1] end end - function enqueue_tpu_embedding_integer_batch(batch_, mode_override_; name=nothing, N=nothing, device_ordinal=nothing) - if tf.in_eager_mode() - enqueue_tpu_embedding_integer_batch_eager(batch_, mode_override_; name=name, N=N, device_ordinal=device_ordinal) - else - enqueue_tpu_embedding_integer_batch_graph(batch_, mode_override_; name=name, N=N, device_ordinal=device_ordinal) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function enqueue_tpu_embedding_integer_batch(batch_, mode_override_; name=nothing, N=nothing, device_ordinal=nothing) + if tf.in_eager_mode() + enqueue_tpu_embedding_integer_batch_eager(batch_, mode_override_; name=name, N=N, device_ordinal=device_ordinal) + else + enqueue_tpu_embedding_integer_batch_graph(batch_, mode_override_; name=name, N=N, device_ordinal=device_ordinal) + end end - end end """ - fused_batch_norm(x, scale, offset, mean, variance; epsilon=?, data_format=NHWC, is_training=true) + fused_batch_norm(x, scale, offset, mean, variance; epsilon=?, data_format=, is_training=true) """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function fused_batch_norm_graph(x_, scale_, offset_, mean_, variance_; name=nothing, epsilon=nothing, data_format=nothing, is_training=nothing) - local desc - tf.with_op_name(name, "FusedBatchNorm") do - desc = tf.NodeDescription("FusedBatchNorm") - x_ = convert(Tensor{Any}, x_) - scale_ = convert(Tensor{Any}, scale_) - offset_ = convert(Tensor{Any}, offset_) - mean_ = convert(Tensor{Any}, mean_) - variance_ = convert(Tensor{Any}, variance_) - (x_, scale_, offset_, mean_, variance_) = tf.tf_promote(x_, scale_, offset_, mean_, variance_) - tf.add_input(desc, x_) - tf.add_input(desc, scale_) - tf.add_input(desc, offset_) - tf.add_input(desc, mean_) - tf.add_input(desc, variance_) - if epsilon !== nothing - desc["epsilon"] = Base.identity(epsilon) - end - if data_format !== nothing - desc["data_format"] = Base.String(data_format) - end - if is_training !== nothing - desc["is_training"] = Base.Bool(is_training) - end + function fused_batch_norm_graph(x_, scale_, offset_, mean_, variance_; name=nothing, epsilon=nothing, data_format=nothing, is_training=nothing) + local desc + tf.with_op_name(name, "FusedBatchNorm") do + desc = tf.NodeDescription("FusedBatchNorm") + x_ = convert(Tensor{Any}, x_) + scale_ = convert(Tensor{Any}, scale_) + offset_ = convert(Tensor{Any}, offset_) + mean_ = convert(Tensor{Any}, mean_) + variance_ = convert(Tensor{Any}, variance_) + (x_, scale_, offset_, mean_, variance_) = tf.tf_promote(x_, scale_, offset_, mean_, variance_) + tf.add_input(desc, x_) + tf.add_input(desc, scale_) + tf.add_input(desc, offset_) + tf.add_input(desc, mean_) + tf.add_input(desc, variance_) + if epsilon !== nothing + desc["epsilon"] = Base.identity(epsilon) end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:5 - push!(out, tf.Tensor(op, out_idx)) + if data_format !== nothing + desc["data_format"] = Base.String(data_format) end - out + if is_training !== nothing + desc["is_training"] = Base.Bool(is_training) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:5 + push!(out, tf.Tensor(op, out_idx)) end + out + end function fused_batch_norm_eager(x_, scale_, offset_, mean_, variance_; name=nothing, epsilon=nothing, data_format=nothing, is_training=nothing) desc = tf.EagerOp("FusedBatchNorm") x_ = convert(tf.EagerTensor, x_) @@ -48786,13 +48786,13 @@ begin return res end end - function fused_batch_norm(x_, scale_, offset_, mean_, variance_; name=nothing, epsilon=nothing, data_format=nothing, is_training=nothing) - if tf.in_eager_mode() - fused_batch_norm_eager(x_, scale_, offset_, mean_, variance_; name=name, epsilon=epsilon, data_format=data_format, is_training=is_training) - else - fused_batch_norm_graph(x_, scale_, offset_, mean_, variance_; name=name, epsilon=epsilon, data_format=data_format, is_training=is_training) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function fused_batch_norm(x_, scale_, offset_, mean_, variance_; name=nothing, epsilon=nothing, data_format=nothing, is_training=nothing) + if tf.in_eager_mode() + fused_batch_norm_eager(x_, scale_, offset_, mean_, variance_; name=name, epsilon=epsilon, data_format=data_format, is_training=is_training) + else + fused_batch_norm_graph(x_, scale_, offset_, mean_, variance_; name=name, epsilon=epsilon, data_format=data_format, is_training=is_training) + end end - end end @@ -48802,17 +48802,17 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function logical_and_graph(x_, y_; name=nothing) - local desc - tf.with_op_name(name, "LogicalAnd") do - desc = tf.NodeDescription("LogicalAnd") - x_ = convert(Tensor{Bool}, x_) - y_ = convert(Tensor{Bool}, y_) - tf.add_input(desc, x_) - tf.add_input(desc, y_) - end - tf.Tensor(tf.Operation(desc)) + function logical_and_graph(x_, y_; name=nothing) + local desc + tf.with_op_name(name, "LogicalAnd") do + desc = tf.NodeDescription("LogicalAnd") + x_ = convert(Tensor{Bool}, x_) + y_ = convert(Tensor{Bool}, y_) + tf.add_input(desc, x_) + tf.add_input(desc, y_) end + tf.Tensor(tf.Operation(desc)) + end function logical_and_eager(x_, y_; name=nothing) desc = tf.EagerOp("LogicalAnd") x_ = convert(tf.EagerTensor, x_) @@ -48826,13 +48826,13 @@ begin return res[1] end end - function logical_and(x_, y_; name=nothing) - if tf.in_eager_mode() - logical_and_eager(x_, y_; name=name) - else - logical_and_graph(x_, y_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function logical_and(x_, y_; name=nothing) + if tf.in_eager_mode() + logical_and_eager(x_, y_; name=name) + else + logical_and_graph(x_, y_; name=name) + end end - end end @@ -48842,22 +48842,22 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tensor_scatter_update_graph(tensor_, indices_, updates_; name=nothing) - local desc - tf.with_op_name(name, "TensorScatterUpdate") do - desc = tf.NodeDescription("TensorScatterUpdate") - tensor_ = convert(Tensor{Any}, tensor_) - indices_ = convert(Tensor{Any}, indices_) - indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) - updates_ = convert(Tensor{Any}, updates_) - (tensor_, updates_) = tf.tf_promote(tensor_, updates_) - (indices_,) = tf.tf_promote(indices_) - tf.add_input(desc, tensor_) - tf.add_input(desc, indices_) - tf.add_input(desc, updates_) - end - tf.Tensor(tf.Operation(desc)) + function tensor_scatter_update_graph(tensor_, indices_, updates_; name=nothing) + local desc + tf.with_op_name(name, "TensorScatterUpdate") do + desc = tf.NodeDescription("TensorScatterUpdate") + tensor_ = convert(Tensor{Any}, tensor_) + indices_ = convert(Tensor{Any}, indices_) + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + updates_ = convert(Tensor{Any}, updates_) + (tensor_, updates_) = tf.tf_promote(tensor_, updates_) + (indices_,) = tf.tf_promote(indices_) + tf.add_input(desc, tensor_) + tf.add_input(desc, indices_) + tf.add_input(desc, updates_) end + tf.Tensor(tf.Operation(desc)) + end function tensor_scatter_update_eager(tensor_, indices_, updates_; name=nothing) desc = tf.EagerOp("TensorScatterUpdate") tensor_ = convert(tf.EagerTensor, tensor_) @@ -48876,13 +48876,13 @@ begin return res[1] end end - function tensor_scatter_update(tensor_, indices_, updates_; name=nothing) - if tf.in_eager_mode() - tensor_scatter_update_eager(tensor_, indices_, updates_; name=name) - else - tensor_scatter_update_graph(tensor_, indices_, updates_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_scatter_update(tensor_, indices_, updates_; name=nothing) + if tf.in_eager_mode() + tensor_scatter_update_eager(tensor_, indices_, updates_; name=name) + else + tensor_scatter_update_graph(tensor_, indices_, updates_; name=name) + end end - end end @@ -48892,22 +48892,22 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function text_line_reader_v2_graph(; name=nothing, skip_header_lines=nothing, container=nothing, shared_name=nothing) - local desc - tf.with_op_name(name, "TextLineReaderV2") do - desc = tf.NodeDescription("TextLineReaderV2") - if skip_header_lines !== nothing - desc["skip_header_lines"] = Base.Int(skip_header_lines) - end - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end + function text_line_reader_v2_graph(; name=nothing, skip_header_lines=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "TextLineReaderV2") do + desc = tf.NodeDescription("TextLineReaderV2") + if skip_header_lines !== nothing + desc["skip_header_lines"] = Base.Int(skip_header_lines) + end + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function text_line_reader_v2_eager(; name=nothing, skip_header_lines=nothing, container=nothing, shared_name=nothing) desc = tf.EagerOp("TextLineReaderV2") if skip_header_lines !== nothing @@ -48926,13 +48926,13 @@ begin return res[1] end end - function text_line_reader_v2(; name=nothing, skip_header_lines=nothing, container=nothing, shared_name=nothing) - if tf.in_eager_mode() - text_line_reader_v2_eager(; name=name, skip_header_lines=skip_header_lines, container=container, shared_name=shared_name) - else - text_line_reader_v2_graph(; name=name, skip_header_lines=skip_header_lines, container=container, shared_name=shared_name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function text_line_reader_v2(; name=nothing, skip_header_lines=nothing, container=nothing, shared_name=nothing) + if tf.in_eager_mode() + text_line_reader_v2_eager(; name=name, skip_header_lines=skip_header_lines, container=container, shared_name=shared_name) + else + text_line_reader_v2_graph(; name=name, skip_header_lines=skip_header_lines, container=container, shared_name=shared_name) + end end - end end @@ -48942,21 +48942,21 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tensor_slice_dataset_graph(components_; name=nothing, Toutput_types=nothing, output_shapes=nothing) - local desc - tf.with_op_name(name, "TensorSliceDataset") do - desc = tf.NodeDescription("TensorSliceDataset") - components_ = [convert(Tensor{Any}, x) for x = components_] - tf.add_input(desc, components_) - if Toutput_types !== nothing - desc["Toutput_types"] = map(Base.identity, Toutput_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end + function tensor_slice_dataset_graph(components_; name=nothing, Toutput_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "TensorSliceDataset") do + desc = tf.NodeDescription("TensorSliceDataset") + components_ = [convert(Tensor{Any}, x) for x = components_] + tf.add_input(desc, components_) + if Toutput_types !== nothing + desc["Toutput_types"] = map(Base.identity, Toutput_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function tensor_slice_dataset_eager(components_; name=nothing, Toutput_types=nothing, output_shapes=nothing) desc = tf.EagerOp("TensorSliceDataset") components_ = convert(tf.EagerTensor, components_) @@ -48974,13 +48974,13 @@ begin return res[1] end end - function tensor_slice_dataset(components_; name=nothing, Toutput_types=nothing, output_shapes=nothing) - if tf.in_eager_mode() - tensor_slice_dataset_eager(components_; name=name, Toutput_types=Toutput_types, output_shapes=output_shapes) - else - tensor_slice_dataset_graph(components_; name=name, Toutput_types=Toutput_types, output_shapes=output_shapes) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_slice_dataset(components_; name=nothing, Toutput_types=nothing, output_shapes=nothing) + if tf.in_eager_mode() + tensor_slice_dataset_eager(components_; name=name, Toutput_types=Toutput_types, output_shapes=output_shapes) + else + tensor_slice_dataset_graph(components_; name=name, Toutput_types=Toutput_types, output_shapes=output_shapes) + end end - end end @@ -48990,22 +48990,22 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tensor_array_scatter_v3_graph(handle_, indices_, value_, flow_in_; name=nothing) - local desc - tf.with_op_name(name, "TensorArrayScatterV3") do - desc = tf.NodeDescription("TensorArrayScatterV3") - handle_ = convert(Tensor{Any}, handle_) - indices_ = convert(Tensor{Int32}, indices_) - value_ = convert(Tensor{Any}, value_) - flow_in_ = convert(Tensor{Float32}, flow_in_) - (value_,) = tf.tf_promote(value_) - tf.add_input(desc, handle_) - tf.add_input(desc, indices_) - tf.add_input(desc, value_) - tf.add_input(desc, flow_in_) - end - tf.Tensor(tf.Operation(desc)) + function tensor_array_scatter_v3_graph(handle_, indices_, value_, flow_in_; name=nothing) + local desc + tf.with_op_name(name, "TensorArrayScatterV3") do + desc = tf.NodeDescription("TensorArrayScatterV3") + handle_ = convert(Tensor{Any}, handle_) + indices_ = convert(Tensor{Int32}, indices_) + value_ = convert(Tensor{Any}, value_) + flow_in_ = convert(Tensor{Float32}, flow_in_) + (value_,) = tf.tf_promote(value_) + tf.add_input(desc, handle_) + tf.add_input(desc, indices_) + tf.add_input(desc, value_) + tf.add_input(desc, flow_in_) end + tf.Tensor(tf.Operation(desc)) + end function tensor_array_scatter_v3_eager(handle_, indices_, value_, flow_in_; name=nothing) desc = tf.EagerOp("TensorArrayScatterV3") handle_ = convert(tf.EagerTensor, handle_) @@ -49024,13 +49024,13 @@ begin return res[1] end end - function tensor_array_scatter_v3(handle_, indices_, value_, flow_in_; name=nothing) - if tf.in_eager_mode() - tensor_array_scatter_v3_eager(handle_, indices_, value_, flow_in_; name=name) - else - tensor_array_scatter_v3_graph(handle_, indices_, value_, flow_in_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_array_scatter_v3(handle_, indices_, value_, flow_in_; name=nothing) + if tf.in_eager_mode() + tensor_array_scatter_v3_eager(handle_, indices_, value_, flow_in_; name=name) + else + tensor_array_scatter_v3_graph(handle_, indices_, value_, flow_in_; name=name) + end end - end end @@ -49040,21 +49040,21 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function resize_nearest_neighbor_grad_graph(grads_, size_; name=nothing, align_corners=nothing) - local desc - tf.with_op_name(name, "ResizeNearestNeighborGrad") do - desc = tf.NodeDescription("ResizeNearestNeighborGrad") - grads_ = convert(Tensor{Any}, grads_) - size_ = convert(Tensor{Int32}, size_) - (grads_,) = tf.tf_promote(grads_) - tf.add_input(desc, grads_) - tf.add_input(desc, size_) - if align_corners !== nothing - desc["align_corners"] = Base.Bool(align_corners) - end + function resize_nearest_neighbor_grad_graph(grads_, size_; name=nothing, align_corners=nothing) + local desc + tf.with_op_name(name, "ResizeNearestNeighborGrad") do + desc = tf.NodeDescription("ResizeNearestNeighborGrad") + grads_ = convert(Tensor{Any}, grads_) + size_ = convert(Tensor{Int32}, size_) + (grads_,) = tf.tf_promote(grads_) + tf.add_input(desc, grads_) + tf.add_input(desc, size_) + if align_corners !== nothing + desc["align_corners"] = Base.Bool(align_corners) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function resize_nearest_neighbor_grad_eager(grads_, size_; name=nothing, align_corners=nothing) desc = tf.EagerOp("ResizeNearestNeighborGrad") grads_ = convert(tf.EagerTensor, grads_) @@ -49072,13 +49072,13 @@ begin return res[1] end end - function resize_nearest_neighbor_grad(grads_, size_; name=nothing, align_corners=nothing) - if tf.in_eager_mode() - resize_nearest_neighbor_grad_eager(grads_, size_; name=name, align_corners=align_corners) - else - resize_nearest_neighbor_grad_graph(grads_, size_; name=name, align_corners=align_corners) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resize_nearest_neighbor_grad(grads_, size_; name=nothing, align_corners=nothing) + if tf.in_eager_mode() + resize_nearest_neighbor_grad_eager(grads_, size_; name=name, align_corners=align_corners) + else + resize_nearest_neighbor_grad_graph(grads_, size_; name=name, align_corners=align_corners) + end end - end end @@ -49088,31 +49088,31 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function apply_power_sign_graph(var_, m_, lr_, logbase_, sign_decay_, beta_, grad_; name=nothing, use_locking=nothing) - local desc - tf.with_op_name(name, "ApplyPowerSign") do - desc = tf.NodeDescription("ApplyPowerSign") - var_ = convert(Tensor{Any}, var_) - m_ = convert(Tensor{Any}, m_) - lr_ = convert(Tensor{Any}, lr_) - logbase_ = convert(Tensor{Any}, logbase_) - sign_decay_ = convert(Tensor{Any}, sign_decay_) - beta_ = convert(Tensor{Any}, beta_) - grad_ = convert(Tensor{Any}, grad_) - (var_, m_, lr_, logbase_, sign_decay_, beta_, grad_) = tf.tf_promote(var_, m_, lr_, logbase_, sign_decay_, beta_, grad_) - tf.add_input(desc, var_) - tf.add_input(desc, m_) - tf.add_input(desc, lr_) - tf.add_input(desc, logbase_) - tf.add_input(desc, sign_decay_) - tf.add_input(desc, beta_) - tf.add_input(desc, grad_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - end - tf.Tensor(tf.Operation(desc)) + function apply_power_sign_graph(var_, m_, lr_, logbase_, sign_decay_, beta_, grad_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "ApplyPowerSign") do + desc = tf.NodeDescription("ApplyPowerSign") + var_ = convert(Tensor{Any}, var_) + m_ = convert(Tensor{Any}, m_) + lr_ = convert(Tensor{Any}, lr_) + logbase_ = convert(Tensor{Any}, logbase_) + sign_decay_ = convert(Tensor{Any}, sign_decay_) + beta_ = convert(Tensor{Any}, beta_) + grad_ = convert(Tensor{Any}, grad_) + (var_, m_, lr_, logbase_, sign_decay_, beta_, grad_) = tf.tf_promote(var_, m_, lr_, logbase_, sign_decay_, beta_, grad_) + tf.add_input(desc, var_) + tf.add_input(desc, m_) + tf.add_input(desc, lr_) + tf.add_input(desc, logbase_) + tf.add_input(desc, sign_decay_) + tf.add_input(desc, beta_) + tf.add_input(desc, grad_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end end + tf.Tensor(tf.Operation(desc)) + end function apply_power_sign_eager(var_, m_, lr_, logbase_, sign_decay_, beta_, grad_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ApplyPowerSign") var_ = convert(tf.EagerTensor, var_) @@ -49146,13 +49146,13 @@ begin return res[1] end end - function apply_power_sign(var_, m_, lr_, logbase_, sign_decay_, beta_, grad_; name=nothing, use_locking=nothing) - if tf.in_eager_mode() - apply_power_sign_eager(var_, m_, lr_, logbase_, sign_decay_, beta_, grad_; name=name, use_locking=use_locking) - else - apply_power_sign_graph(var_, m_, lr_, logbase_, sign_decay_, beta_, grad_; name=name, use_locking=use_locking) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function apply_power_sign(var_, m_, lr_, logbase_, sign_decay_, beta_, grad_; name=nothing, use_locking=nothing) + if tf.in_eager_mode() + apply_power_sign_eager(var_, m_, lr_, logbase_, sign_decay_, beta_, grad_; name=name, use_locking=use_locking) + else + apply_power_sign_graph(var_, m_, lr_, logbase_, sign_decay_, beta_, grad_; name=name, use_locking=use_locking) + end end - end end @@ -49162,22 +49162,22 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function mirror_pad_graph(input_, paddings_; name=nothing, mode=nothing) - local desc - tf.with_op_name(name, "MirrorPad") do - desc = tf.NodeDescription("MirrorPad") - input_ = convert(Tensor{Any}, input_) - paddings_ = convert(Tensor{Int32}, paddings_) - (input_,) = tf.tf_promote(input_) - (paddings_,) = tf.tf_promote(paddings_) - tf.add_input(desc, input_) - tf.add_input(desc, paddings_) - if mode !== nothing - desc["mode"] = Base.String(mode) - end + function mirror_pad_graph(input_, paddings_; name=nothing, mode=nothing) + local desc + tf.with_op_name(name, "MirrorPad") do + desc = tf.NodeDescription("MirrorPad") + input_ = convert(Tensor{Any}, input_) + paddings_ = convert(Tensor{Int32}, paddings_) + (input_,) = tf.tf_promote(input_) + (paddings_,) = tf.tf_promote(paddings_) + tf.add_input(desc, input_) + tf.add_input(desc, paddings_) + if mode !== nothing + desc["mode"] = Base.String(mode) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function mirror_pad_eager(input_, paddings_; name=nothing, mode=nothing) desc = tf.EagerOp("MirrorPad") input_ = convert(tf.EagerTensor, input_) @@ -49196,13 +49196,13 @@ begin return res[1] end end - function mirror_pad(input_, paddings_; name=nothing, mode=nothing) - if tf.in_eager_mode() - mirror_pad_eager(input_, paddings_; name=name, mode=mode) - else - mirror_pad_graph(input_, paddings_; name=name, mode=mode) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function mirror_pad(input_, paddings_; name=nothing, mode=nothing) + if tf.in_eager_mode() + mirror_pad_eager(input_, paddings_; name=name, mode=mode) + else + mirror_pad_graph(input_, paddings_; name=name, mode=mode) + end end - end end @@ -49212,15 +49212,15 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function logical_not_graph(x_; name=nothing) - local desc - tf.with_op_name(name, "LogicalNot") do - desc = tf.NodeDescription("LogicalNot") - x_ = convert(Tensor{Bool}, x_) - tf.add_input(desc, x_) - end - tf.Tensor(tf.Operation(desc)) + function logical_not_graph(x_; name=nothing) + local desc + tf.with_op_name(name, "LogicalNot") do + desc = tf.NodeDescription("LogicalNot") + x_ = convert(Tensor{Bool}, x_) + tf.add_input(desc, x_) end + tf.Tensor(tf.Operation(desc)) + end function logical_not_eager(x_; name=nothing) desc = tf.EagerOp("LogicalNot") x_ = convert(tf.EagerTensor, x_) @@ -49232,13 +49232,13 @@ begin return res[1] end end - function logical_not(x_; name=nothing) - if tf.in_eager_mode() - logical_not_eager(x_; name=name) - else - logical_not_graph(x_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function logical_not(x_; name=nothing) + if tf.in_eager_mode() + logical_not_eager(x_; name=name) + else + logical_not_graph(x_; name=name) + end end - end end @@ -49248,15 +49248,15 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function batch_ifft_graph(input_; name=nothing) - local desc - tf.with_op_name(name, "BatchIFFT") do - desc = tf.NodeDescription("BatchIFFT") - input_ = convert(Tensor{Complex{Float32}}, input_) - tf.add_input(desc, input_) - end - tf.Tensor(tf.Operation(desc)) + function batch_ifft_graph(input_; name=nothing) + local desc + tf.with_op_name(name, "BatchIFFT") do + desc = tf.NodeDescription("BatchIFFT") + input_ = convert(Tensor{Complex{Float32}}, input_) + tf.add_input(desc, input_) end + tf.Tensor(tf.Operation(desc)) + end function batch_ifft_eager(input_; name=nothing) desc = tf.EagerOp("BatchIFFT") input_ = convert(tf.EagerTensor, input_) @@ -49268,13 +49268,13 @@ begin return res[1] end end - function batch_ifft(input_; name=nothing) - if tf.in_eager_mode() - batch_ifft_eager(input_; name=name) - else - batch_ifft_graph(input_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function batch_ifft(input_; name=nothing) + if tf.in_eager_mode() + batch_ifft_eager(input_; name=name) + else + batch_ifft_graph(input_; name=name) + end end - end end @@ -49284,28 +49284,28 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tensor_array_concat_v2_graph(handle_, flow_in_; name=nothing, dtype=nothing, element_shape_except0=nothing) - local desc - tf.with_op_name(name, "TensorArrayConcatV2") do - desc = tf.NodeDescription("TensorArrayConcatV2") - handle_ = convert(Tensor{String}, handle_) - flow_in_ = convert(Tensor{Float32}, flow_in_) - tf.add_input(desc, handle_) - tf.add_input(desc, flow_in_) - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end - if element_shape_except0 !== nothing - desc["element_shape_except0"] = Base.identity(element_shape_except0) - end + function tensor_array_concat_v2_graph(handle_, flow_in_; name=nothing, dtype=nothing, element_shape_except0=nothing) + local desc + tf.with_op_name(name, "TensorArrayConcatV2") do + desc = tf.NodeDescription("TensorArrayConcatV2") + handle_ = convert(Tensor{String}, handle_) + flow_in_ = convert(Tensor{Float32}, flow_in_) + tf.add_input(desc, handle_) + tf.add_input(desc, flow_in_) + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:2 - push!(out, tf.Tensor(op, out_idx)) + if element_shape_except0 !== nothing + desc["element_shape_except0"] = Base.identity(element_shape_except0) end - out end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end function tensor_array_concat_v2_eager(handle_, flow_in_; name=nothing, dtype=nothing, element_shape_except0=nothing) desc = tf.EagerOp("TensorArrayConcatV2") handle_ = convert(tf.EagerTensor, handle_) @@ -49325,13 +49325,13 @@ begin return res end end - function tensor_array_concat_v2(handle_, flow_in_; name=nothing, dtype=nothing, element_shape_except0=nothing) - if tf.in_eager_mode() - tensor_array_concat_v2_eager(handle_, flow_in_; name=name, dtype=dtype, element_shape_except0=element_shape_except0) - else - tensor_array_concat_v2_graph(handle_, flow_in_; name=name, dtype=dtype, element_shape_except0=element_shape_except0) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_array_concat_v2(handle_, flow_in_; name=nothing, dtype=nothing, element_shape_except0=nothing) + if tf.in_eager_mode() + tensor_array_concat_v2_eager(handle_, flow_in_; name=name, dtype=dtype, element_shape_except0=element_shape_except0) + else + tensor_array_concat_v2_graph(handle_, flow_in_; name=name, dtype=dtype, element_shape_except0=element_shape_except0) + end end - end end @@ -49341,23 +49341,23 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function sum_graph(input_, reduction_indices_; name=nothing, keep_dims=nothing) - local desc - tf.with_op_name(name, "Sum") do - desc = tf.NodeDescription("Sum") - input_ = convert(Tensor{Any}, input_) - reduction_indices_ = convert(Tensor{Int32}, reduction_indices_) - reduction_indices_ = reduction_indices_ - convert(tf.Tensor{eltype(reduction_indices_)}, 1) - (input_,) = tf.tf_promote(input_) - (reduction_indices_,) = tf.tf_promote(reduction_indices_) - tf.add_input(desc, input_) - tf.add_input(desc, reduction_indices_) - if keep_dims !== nothing - desc["keep_dims"] = Base.Bool(keep_dims) - end + function sum_graph(input_, reduction_indices_; name=nothing, keep_dims=nothing) + local desc + tf.with_op_name(name, "Sum") do + desc = tf.NodeDescription("Sum") + input_ = convert(Tensor{Any}, input_) + reduction_indices_ = convert(Tensor{Int32}, reduction_indices_) + reduction_indices_ = reduction_indices_ - convert(tf.Tensor{eltype(reduction_indices_)}, 1) + (input_,) = tf.tf_promote(input_) + (reduction_indices_,) = tf.tf_promote(reduction_indices_) + tf.add_input(desc, input_) + tf.add_input(desc, reduction_indices_) + if keep_dims !== nothing + desc["keep_dims"] = Base.Bool(keep_dims) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function sum_eager(input_, reduction_indices_; name=nothing, keep_dims=nothing) desc = tf.EagerOp("Sum") input_ = convert(tf.EagerTensor, input_) @@ -49376,13 +49376,13 @@ begin return res[1] end end - function sum(input_, reduction_indices_; name=nothing, keep_dims=nothing) - if tf.in_eager_mode() - sum_eager(input_, reduction_indices_; name=name, keep_dims=keep_dims) - else - sum_graph(input_, reduction_indices_; name=name, keep_dims=keep_dims) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sum(input_, reduction_indices_; name=nothing, keep_dims=nothing) + if tf.in_eager_mode() + sum_eager(input_, reduction_indices_; name=name, keep_dims=keep_dims) + else + sum_graph(input_, reduction_indices_; name=name, keep_dims=keep_dims) + end end - end end @@ -49392,23 +49392,23 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function boosted_trees_predict_graph(tree_ensemble_handle_, bucketized_features_; name=nothing, num_bucketized_features=nothing, logits_dimension=nothing) - local desc - tf.with_op_name(name, "BoostedTreesPredict") do - desc = tf.NodeDescription("BoostedTreesPredict") - tree_ensemble_handle_ = convert(Tensor{Any}, tree_ensemble_handle_) - bucketized_features_ = [convert(Tensor{Int32}, x) for x = bucketized_features_] - tf.add_input(desc, tree_ensemble_handle_) - tf.add_input(desc, bucketized_features_) - if num_bucketized_features !== nothing - desc["num_bucketized_features"] = Base.Int(num_bucketized_features) - end - if logits_dimension !== nothing - desc["logits_dimension"] = Base.Int(logits_dimension) - end + function boosted_trees_predict_graph(tree_ensemble_handle_, bucketized_features_; name=nothing, num_bucketized_features=nothing, logits_dimension=nothing) + local desc + tf.with_op_name(name, "BoostedTreesPredict") do + desc = tf.NodeDescription("BoostedTreesPredict") + tree_ensemble_handle_ = convert(Tensor{Any}, tree_ensemble_handle_) + bucketized_features_ = [convert(Tensor{Int32}, x) for x = bucketized_features_] + tf.add_input(desc, tree_ensemble_handle_) + tf.add_input(desc, bucketized_features_) + if num_bucketized_features !== nothing + desc["num_bucketized_features"] = Base.Int(num_bucketized_features) + end + if logits_dimension !== nothing + desc["logits_dimension"] = Base.Int(logits_dimension) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function boosted_trees_predict_eager(tree_ensemble_handle_, bucketized_features_; name=nothing, num_bucketized_features=nothing, logits_dimension=nothing) desc = tf.EagerOp("BoostedTreesPredict") tree_ensemble_handle_ = convert(tf.EagerTensor, tree_ensemble_handle_) @@ -49428,13 +49428,13 @@ begin return res[1] end end - function boosted_trees_predict(tree_ensemble_handle_, bucketized_features_; name=nothing, num_bucketized_features=nothing, logits_dimension=nothing) - if tf.in_eager_mode() - boosted_trees_predict_eager(tree_ensemble_handle_, bucketized_features_; name=name, num_bucketized_features=num_bucketized_features, logits_dimension=logits_dimension) - else - boosted_trees_predict_graph(tree_ensemble_handle_, bucketized_features_; name=name, num_bucketized_features=num_bucketized_features, logits_dimension=logits_dimension) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function boosted_trees_predict(tree_ensemble_handle_, bucketized_features_; name=nothing, num_bucketized_features=nothing, logits_dimension=nothing) + if tf.in_eager_mode() + boosted_trees_predict_eager(tree_ensemble_handle_, bucketized_features_; name=name, num_bucketized_features=num_bucketized_features, logits_dimension=logits_dimension) + else + boosted_trees_predict_graph(tree_ensemble_handle_, bucketized_features_; name=name, num_bucketized_features=num_bucketized_features, logits_dimension=logits_dimension) + end end - end end @@ -49444,32 +49444,32 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function resource_sparse_apply_adagrad_graph(var_, accum_, lr_, grad_, indices_; name=nothing, use_locking=nothing, update_slots=nothing) - local desc - tf.with_op_name(name, "ResourceSparseApplyAdagrad") do - desc = tf.NodeDescription("ResourceSparseApplyAdagrad") - var_ = convert(Tensor{Any}, var_) - accum_ = convert(Tensor{Any}, accum_) - lr_ = convert(Tensor{Any}, lr_) - grad_ = convert(Tensor{Any}, grad_) - indices_ = convert(Tensor{Any}, indices_) - indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) - (lr_, grad_) = tf.tf_promote(lr_, grad_) - (indices_,) = tf.tf_promote(indices_) - tf.add_input(desc, var_) - tf.add_input(desc, accum_) - tf.add_input(desc, lr_) - tf.add_input(desc, grad_) - tf.add_input(desc, indices_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - if update_slots !== nothing - desc["update_slots"] = Base.Bool(update_slots) - end - end - tf.Tensor(tf.Operation(desc)) + function resource_sparse_apply_adagrad_graph(var_, accum_, lr_, grad_, indices_; name=nothing, use_locking=nothing, update_slots=nothing) + local desc + tf.with_op_name(name, "ResourceSparseApplyAdagrad") do + desc = tf.NodeDescription("ResourceSparseApplyAdagrad") + var_ = convert(Tensor{Any}, var_) + accum_ = convert(Tensor{Any}, accum_) + lr_ = convert(Tensor{Any}, lr_) + grad_ = convert(Tensor{Any}, grad_) + indices_ = convert(Tensor{Any}, indices_) + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + (lr_, grad_) = tf.tf_promote(lr_, grad_) + (indices_,) = tf.tf_promote(indices_) + tf.add_input(desc, var_) + tf.add_input(desc, accum_) + tf.add_input(desc, lr_) + tf.add_input(desc, grad_) + tf.add_input(desc, indices_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + if update_slots !== nothing + desc["update_slots"] = Base.Bool(update_slots) + end end + tf.Tensor(tf.Operation(desc)) + end function resource_sparse_apply_adagrad_eager(var_, accum_, lr_, grad_, indices_; name=nothing, use_locking=nothing, update_slots=nothing) desc = tf.EagerOp("ResourceSparseApplyAdagrad") var_ = convert(tf.EagerTensor, var_) @@ -49498,13 +49498,13 @@ begin return res[1] end end - function resource_sparse_apply_adagrad(var_, accum_, lr_, grad_, indices_; name=nothing, use_locking=nothing, update_slots=nothing) - if tf.in_eager_mode() - resource_sparse_apply_adagrad_eager(var_, accum_, lr_, grad_, indices_; name=name, use_locking=use_locking, update_slots=update_slots) - else - resource_sparse_apply_adagrad_graph(var_, accum_, lr_, grad_, indices_; name=name, use_locking=use_locking, update_slots=update_slots) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_sparse_apply_adagrad(var_, accum_, lr_, grad_, indices_; name=nothing, use_locking=nothing, update_slots=nothing) + if tf.in_eager_mode() + resource_sparse_apply_adagrad_eager(var_, accum_, lr_, grad_, indices_; name=name, use_locking=use_locking, update_slots=update_slots) + else + resource_sparse_apply_adagrad_graph(var_, accum_, lr_, grad_, indices_; name=name, use_locking=use_locking, update_slots=update_slots) + end end - end end @@ -49514,21 +49514,21 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function leaky_relu_grad_graph(gradients_, features_; name=nothing, alpha=nothing) - local desc - tf.with_op_name(name, "LeakyReluGrad") do - desc = tf.NodeDescription("LeakyReluGrad") - gradients_ = convert(Tensor{Float32}, gradients_) - features_ = convert(Tensor{Float32}, features_) - (gradients_, features_) = tf.tf_promote(gradients_, features_) - tf.add_input(desc, gradients_) - tf.add_input(desc, features_) - if alpha !== nothing - desc["alpha"] = Base.identity(alpha) - end + function leaky_relu_grad_graph(gradients_, features_; name=nothing, alpha=nothing) + local desc + tf.with_op_name(name, "LeakyReluGrad") do + desc = tf.NodeDescription("LeakyReluGrad") + gradients_ = convert(Tensor{Float32}, gradients_) + features_ = convert(Tensor{Float32}, features_) + (gradients_, features_) = tf.tf_promote(gradients_, features_) + tf.add_input(desc, gradients_) + tf.add_input(desc, features_) + if alpha !== nothing + desc["alpha"] = Base.identity(alpha) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function leaky_relu_grad_eager(gradients_, features_; name=nothing, alpha=nothing) desc = tf.EagerOp("LeakyReluGrad") gradients_ = convert(tf.EagerTensor, gradients_) @@ -49547,13 +49547,13 @@ begin return res[1] end end - function leaky_relu_grad(gradients_, features_; name=nothing, alpha=nothing) - if tf.in_eager_mode() - leaky_relu_grad_eager(gradients_, features_; name=name, alpha=alpha) - else - leaky_relu_grad_graph(gradients_, features_; name=name, alpha=alpha) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function leaky_relu_grad(gradients_, features_; name=nothing, alpha=nothing) + if tf.in_eager_mode() + leaky_relu_grad_eager(gradients_, features_; name=name, alpha=alpha) + else + leaky_relu_grad_graph(gradients_, features_; name=name, alpha=alpha) + end end - end end @@ -49563,19 +49563,19 @@ end A graph node which represents a return value of a function. """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function _device_retval_graph(input_; name=nothing, index=nothing) - local desc - tf.with_op_name(name, "_DeviceRetval") do - desc = tf.NodeDescription("_DeviceRetval") - input_ = convert(Tensor{Any}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - if index !== nothing - desc["index"] = Base.Int(index) - end + function _device_retval_graph(input_; name=nothing, index=nothing) + local desc + tf.with_op_name(name, "_DeviceRetval") do + desc = tf.NodeDescription("_DeviceRetval") + input_ = convert(Tensor{Any}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + if index !== nothing + desc["index"] = Base.Int(index) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function _device_retval_eager(input_; name=nothing, index=nothing) desc = tf.EagerOp("_DeviceRetval") input_ = convert(tf.EagerTensor, input_) @@ -49591,13 +49591,13 @@ begin return res[1] end end - function _device_retval(input_; name=nothing, index=nothing) - if tf.in_eager_mode() - _device_retval_eager(input_; name=name, index=index) - else - _device_retval_graph(input_; name=name, index=index) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _device_retval(input_; name=nothing, index=nothing) + if tf.in_eager_mode() + _device_retval_eager(input_; name=name, index=index) + else + _device_retval_graph(input_; name=name, index=index) + end end - end end @@ -49607,19 +49607,19 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function pad_graph(input_, paddings_; name=nothing) - local desc - tf.with_op_name(name, "Pad") do - desc = tf.NodeDescription("Pad") - input_ = convert(Tensor{Any}, input_) - paddings_ = convert(Tensor{Int32}, paddings_) - (input_,) = tf.tf_promote(input_) - (paddings_,) = tf.tf_promote(paddings_) - tf.add_input(desc, input_) - tf.add_input(desc, paddings_) - end - tf.Tensor(tf.Operation(desc)) + function pad_graph(input_, paddings_; name=nothing) + local desc + tf.with_op_name(name, "Pad") do + desc = tf.NodeDescription("Pad") + input_ = convert(Tensor{Any}, input_) + paddings_ = convert(Tensor{Int32}, paddings_) + (input_,) = tf.tf_promote(input_) + (paddings_,) = tf.tf_promote(paddings_) + tf.add_input(desc, input_) + tf.add_input(desc, paddings_) end + tf.Tensor(tf.Operation(desc)) + end function pad_eager(input_, paddings_; name=nothing) desc = tf.EagerOp("Pad") input_ = convert(tf.EagerTensor, input_) @@ -49635,13 +49635,13 @@ begin return res[1] end end - function pad(input_, paddings_; name=nothing) - if tf.in_eager_mode() - pad_eager(input_, paddings_; name=name) - else - pad_graph(input_, paddings_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function pad(input_, paddings_; name=nothing) + if tf.in_eager_mode() + pad_eager(input_, paddings_; name=name) + else + pad_graph(input_, paddings_; name=name) + end end - end end @@ -49651,26 +49651,26 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function add_many_sparse_to_tensors_map_graph(sparse_indices_, sparse_values_, sparse_shape_; name=nothing, container=nothing, shared_name=nothing) - local desc - tf.with_op_name(name, "AddManySparseToTensorsMap") do - desc = tf.NodeDescription("AddManySparseToTensorsMap") - sparse_indices_ = convert(Tensor{Int64}, sparse_indices_) - sparse_values_ = convert(Tensor{Any}, sparse_values_) - sparse_shape_ = convert(Tensor{Int64}, sparse_shape_) - (sparse_values_,) = tf.tf_promote(sparse_values_) - tf.add_input(desc, sparse_indices_) - tf.add_input(desc, sparse_values_) - tf.add_input(desc, sparse_shape_) - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end + function add_many_sparse_to_tensors_map_graph(sparse_indices_, sparse_values_, sparse_shape_; name=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "AddManySparseToTensorsMap") do + desc = tf.NodeDescription("AddManySparseToTensorsMap") + sparse_indices_ = convert(Tensor{Int64}, sparse_indices_) + sparse_values_ = convert(Tensor{Any}, sparse_values_) + sparse_shape_ = convert(Tensor{Int64}, sparse_shape_) + (sparse_values_,) = tf.tf_promote(sparse_values_) + tf.add_input(desc, sparse_indices_) + tf.add_input(desc, sparse_values_) + tf.add_input(desc, sparse_shape_) + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function add_many_sparse_to_tensors_map_eager(sparse_indices_, sparse_values_, sparse_shape_; name=nothing, container=nothing, shared_name=nothing) desc = tf.EagerOp("AddManySparseToTensorsMap") sparse_indices_ = convert(tf.EagerTensor, sparse_indices_) @@ -49693,13 +49693,13 @@ begin return res[1] end end - function add_many_sparse_to_tensors_map(sparse_indices_, sparse_values_, sparse_shape_; name=nothing, container=nothing, shared_name=nothing) - if tf.in_eager_mode() - add_many_sparse_to_tensors_map_eager(sparse_indices_, sparse_values_, sparse_shape_; name=name, container=container, shared_name=shared_name) - else - add_many_sparse_to_tensors_map_graph(sparse_indices_, sparse_values_, sparse_shape_; name=name, container=container, shared_name=shared_name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function add_many_sparse_to_tensors_map(sparse_indices_, sparse_values_, sparse_shape_; name=nothing, container=nothing, shared_name=nothing) + if tf.in_eager_mode() + add_many_sparse_to_tensors_map_eager(sparse_indices_, sparse_values_, sparse_shape_; name=name, container=container, shared_name=shared_name) + else + add_many_sparse_to_tensors_map_graph(sparse_indices_, sparse_values_, sparse_shape_; name=name, container=container, shared_name=shared_name) + end end - end end @@ -49709,25 +49709,25 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function sparse_reorder_graph(input_indices_, input_values_, input_shape_; name=nothing) - local desc - tf.with_op_name(name, "SparseReorder") do - desc = tf.NodeDescription("SparseReorder") - input_indices_ = convert(Tensor{Int64}, input_indices_) - input_values_ = convert(Tensor{Any}, input_values_) - input_shape_ = convert(Tensor{Int64}, input_shape_) - (input_values_,) = tf.tf_promote(input_values_) - tf.add_input(desc, input_indices_) - tf.add_input(desc, input_values_) - tf.add_input(desc, input_shape_) - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:2 - push!(out, tf.Tensor(op, out_idx)) - end - out + function sparse_reorder_graph(input_indices_, input_values_, input_shape_; name=nothing) + local desc + tf.with_op_name(name, "SparseReorder") do + desc = tf.NodeDescription("SparseReorder") + input_indices_ = convert(Tensor{Int64}, input_indices_) + input_values_ = convert(Tensor{Any}, input_values_) + input_shape_ = convert(Tensor{Int64}, input_shape_) + (input_values_,) = tf.tf_promote(input_values_) + tf.add_input(desc, input_indices_) + tf.add_input(desc, input_values_) + tf.add_input(desc, input_shape_) + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) end + out + end function sparse_reorder_eager(input_indices_, input_values_, input_shape_; name=nothing) desc = tf.EagerOp("SparseReorder") input_indices_ = convert(tf.EagerTensor, input_indices_) @@ -49744,13 +49744,13 @@ begin return res end end - function sparse_reorder(input_indices_, input_values_, input_shape_; name=nothing) - if tf.in_eager_mode() - sparse_reorder_eager(input_indices_, input_values_, input_shape_; name=name) - else - sparse_reorder_graph(input_indices_, input_values_, input_shape_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_reorder(input_indices_, input_values_, input_shape_; name=nothing) + if tf.in_eager_mode() + sparse_reorder_eager(input_indices_, input_values_, input_shape_; name=name) + else + sparse_reorder_graph(input_indices_, input_values_, input_shape_; name=name) + end end - end end @@ -49760,18 +49760,18 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function bitwise_xor_graph(x_, y_; name=nothing) - local desc - tf.with_op_name(name, "BitwiseXor") do - desc = tf.NodeDescription("BitwiseXor") - x_ = convert(Tensor{Any}, x_) - y_ = convert(Tensor{Any}, y_) - (x_, y_) = tf.tf_promote(x_, y_) - tf.add_input(desc, x_) - tf.add_input(desc, y_) - end - tf.Tensor(tf.Operation(desc)) + function bitwise_xor_graph(x_, y_; name=nothing) + local desc + tf.with_op_name(name, "BitwiseXor") do + desc = tf.NodeDescription("BitwiseXor") + x_ = convert(Tensor{Any}, x_) + y_ = convert(Tensor{Any}, y_) + (x_, y_) = tf.tf_promote(x_, y_) + tf.add_input(desc, x_) + tf.add_input(desc, y_) end + tf.Tensor(tf.Operation(desc)) + end function bitwise_xor_eager(x_, y_; name=nothing) desc = tf.EagerOp("BitwiseXor") x_ = convert(tf.EagerTensor, x_) @@ -49787,13 +49787,13 @@ begin return res[1] end end - function bitwise_xor(x_, y_; name=nothing) - if tf.in_eager_mode() - bitwise_xor_eager(x_, y_; name=name) - else - bitwise_xor_graph(x_, y_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function bitwise_xor(x_, y_; name=nothing) + if tf.in_eager_mode() + bitwise_xor_eager(x_, y_; name=name) + else + bitwise_xor_graph(x_, y_; name=name) + end end - end end @@ -49803,18 +49803,18 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function batch_matrix_set_diag_graph(input_, diagonal_; name=nothing) - local desc - tf.with_op_name(name, "BatchMatrixSetDiag") do - desc = tf.NodeDescription("BatchMatrixSetDiag") - input_ = convert(Tensor{Any}, input_) - diagonal_ = convert(Tensor{Any}, diagonal_) - (input_, diagonal_) = tf.tf_promote(input_, diagonal_) - tf.add_input(desc, input_) - tf.add_input(desc, diagonal_) - end - tf.Tensor(tf.Operation(desc)) + function batch_matrix_set_diag_graph(input_, diagonal_; name=nothing) + local desc + tf.with_op_name(name, "BatchMatrixSetDiag") do + desc = tf.NodeDescription("BatchMatrixSetDiag") + input_ = convert(Tensor{Any}, input_) + diagonal_ = convert(Tensor{Any}, diagonal_) + (input_, diagonal_) = tf.tf_promote(input_, diagonal_) + tf.add_input(desc, input_) + tf.add_input(desc, diagonal_) end + tf.Tensor(tf.Operation(desc)) + end function batch_matrix_set_diag_eager(input_, diagonal_; name=nothing) desc = tf.EagerOp("BatchMatrixSetDiag") input_ = convert(tf.EagerTensor, input_) @@ -49830,13 +49830,13 @@ begin return res[1] end end - function batch_matrix_set_diag(input_, diagonal_; name=nothing) - if tf.in_eager_mode() - batch_matrix_set_diag_eager(input_, diagonal_; name=name) - else - batch_matrix_set_diag_graph(input_, diagonal_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function batch_matrix_set_diag(input_, diagonal_; name=nothing) + if tf.in_eager_mode() + batch_matrix_set_diag_eager(input_, diagonal_; name=name) + else + batch_matrix_set_diag_graph(input_, diagonal_; name=name) + end end - end end @@ -49846,21 +49846,21 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function lookup_table_insert_v2_graph(table_handle_, keys_, values_; name=nothing) - local desc - tf.with_op_name(name, "LookupTableInsertV2") do - desc = tf.NodeDescription("LookupTableInsertV2") - table_handle_ = convert(Tensor{Any}, table_handle_) - keys_ = convert(Tensor{Any}, keys_) - values_ = convert(Tensor{Any}, values_) - (keys_,) = tf.tf_promote(keys_) - (values_,) = tf.tf_promote(values_) - tf.add_input(desc, table_handle_) - tf.add_input(desc, keys_) - tf.add_input(desc, values_) - end - tf.Tensor(tf.Operation(desc)) + function lookup_table_insert_v2_graph(table_handle_, keys_, values_; name=nothing) + local desc + tf.with_op_name(name, "LookupTableInsertV2") do + desc = tf.NodeDescription("LookupTableInsertV2") + table_handle_ = convert(Tensor{Any}, table_handle_) + keys_ = convert(Tensor{Any}, keys_) + values_ = convert(Tensor{Any}, values_) + (keys_,) = tf.tf_promote(keys_) + (values_,) = tf.tf_promote(values_) + tf.add_input(desc, table_handle_) + tf.add_input(desc, keys_) + tf.add_input(desc, values_) end + tf.Tensor(tf.Operation(desc)) + end function lookup_table_insert_v2_eager(table_handle_, keys_, values_; name=nothing) desc = tf.EagerOp("LookupTableInsertV2") table_handle_ = convert(tf.EagerTensor, table_handle_) @@ -49878,13 +49878,13 @@ begin return res[1] end end - function lookup_table_insert_v2(table_handle_, keys_, values_; name=nothing) - if tf.in_eager_mode() - lookup_table_insert_v2_eager(table_handle_, keys_, values_; name=name) - else - lookup_table_insert_v2_graph(table_handle_, keys_, values_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function lookup_table_insert_v2(table_handle_, keys_, values_; name=nothing) + if tf.in_eager_mode() + lookup_table_insert_v2_eager(table_handle_, keys_, values_; name=name) + else + lookup_table_insert_v2_graph(table_handle_, keys_, values_; name=name) + end end - end end @@ -49894,25 +49894,25 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function experimental_dense_to_sparse_batch_dataset_graph(input_dataset_, batch_size_, row_shape_; name=nothing, output_types=nothing, output_shapes=nothing) - local desc - tf.with_op_name(name, "ExperimentalDenseToSparseBatchDataset") do - desc = tf.NodeDescription("ExperimentalDenseToSparseBatchDataset") - input_dataset_ = convert(Tensor{Any}, input_dataset_) - batch_size_ = convert(Tensor{Int64}, batch_size_) - row_shape_ = convert(Tensor{Int64}, row_shape_) - tf.add_input(desc, input_dataset_) - tf.add_input(desc, batch_size_) - tf.add_input(desc, row_shape_) - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end + function experimental_dense_to_sparse_batch_dataset_graph(input_dataset_, batch_size_, row_shape_; name=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "ExperimentalDenseToSparseBatchDataset") do + desc = tf.NodeDescription("ExperimentalDenseToSparseBatchDataset") + input_dataset_ = convert(Tensor{Any}, input_dataset_) + batch_size_ = convert(Tensor{Int64}, batch_size_) + row_shape_ = convert(Tensor{Int64}, row_shape_) + tf.add_input(desc, input_dataset_) + tf.add_input(desc, batch_size_) + tf.add_input(desc, row_shape_) + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function experimental_dense_to_sparse_batch_dataset_eager(input_dataset_, batch_size_, row_shape_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("ExperimentalDenseToSparseBatchDataset") input_dataset_ = convert(tf.EagerTensor, input_dataset_) @@ -49934,13 +49934,13 @@ begin return res[1] end end - function experimental_dense_to_sparse_batch_dataset(input_dataset_, batch_size_, row_shape_; name=nothing, output_types=nothing, output_shapes=nothing) - if tf.in_eager_mode() - experimental_dense_to_sparse_batch_dataset_eager(input_dataset_, batch_size_, row_shape_; name=name, output_types=output_types, output_shapes=output_shapes) - else - experimental_dense_to_sparse_batch_dataset_graph(input_dataset_, batch_size_, row_shape_; name=name, output_types=output_types, output_shapes=output_shapes) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_dense_to_sparse_batch_dataset(input_dataset_, batch_size_, row_shape_; name=nothing, output_types=nothing, output_shapes=nothing) + if tf.in_eager_mode() + experimental_dense_to_sparse_batch_dataset_eager(input_dataset_, batch_size_, row_shape_; name=name, output_types=output_types, output_shapes=output_shapes) + else + experimental_dense_to_sparse_batch_dataset_graph(input_dataset_, batch_size_, row_shape_; name=name, output_types=output_types, output_shapes=output_shapes) + end end - end end @@ -49950,37 +49950,37 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function resource_sparse_apply_rms_prop_graph(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) - local desc - tf.with_op_name(name, "ResourceSparseApplyRMSProp") do - desc = tf.NodeDescription("ResourceSparseApplyRMSProp") - var_ = convert(Tensor{Any}, var_) - ms_ = convert(Tensor{Any}, ms_) - mom_ = convert(Tensor{Any}, mom_) - lr_ = convert(Tensor{Any}, lr_) - rho_ = convert(Tensor{Any}, rho_) - momentum_ = convert(Tensor{Any}, momentum_) - epsilon_ = convert(Tensor{Any}, epsilon_) - grad_ = convert(Tensor{Any}, grad_) - indices_ = convert(Tensor{Any}, indices_) - indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) - (lr_, rho_, momentum_, epsilon_, grad_) = tf.tf_promote(lr_, rho_, momentum_, epsilon_, grad_) - (indices_,) = tf.tf_promote(indices_) - tf.add_input(desc, var_) - tf.add_input(desc, ms_) - tf.add_input(desc, mom_) - tf.add_input(desc, lr_) - tf.add_input(desc, rho_) - tf.add_input(desc, momentum_) - tf.add_input(desc, epsilon_) - tf.add_input(desc, grad_) - tf.add_input(desc, indices_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - end - tf.Tensor(tf.Operation(desc)) - end + function resource_sparse_apply_rms_prop_graph(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "ResourceSparseApplyRMSProp") do + desc = tf.NodeDescription("ResourceSparseApplyRMSProp") + var_ = convert(Tensor{Any}, var_) + ms_ = convert(Tensor{Any}, ms_) + mom_ = convert(Tensor{Any}, mom_) + lr_ = convert(Tensor{Any}, lr_) + rho_ = convert(Tensor{Any}, rho_) + momentum_ = convert(Tensor{Any}, momentum_) + epsilon_ = convert(Tensor{Any}, epsilon_) + grad_ = convert(Tensor{Any}, grad_) + indices_ = convert(Tensor{Any}, indices_) + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + (lr_, rho_, momentum_, epsilon_, grad_) = tf.tf_promote(lr_, rho_, momentum_, epsilon_, grad_) + (indices_,) = tf.tf_promote(indices_) + tf.add_input(desc, var_) + tf.add_input(desc, ms_) + tf.add_input(desc, mom_) + tf.add_input(desc, lr_) + tf.add_input(desc, rho_) + tf.add_input(desc, momentum_) + tf.add_input(desc, epsilon_) + tf.add_input(desc, grad_) + tf.add_input(desc, indices_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + tf.Tensor(tf.Operation(desc)) + end function resource_sparse_apply_rms_prop_eager(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ResourceSparseApplyRMSProp") var_ = convert(tf.EagerTensor, var_) @@ -50017,13 +50017,13 @@ begin return res[1] end end - function resource_sparse_apply_rms_prop(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) - if tf.in_eager_mode() - resource_sparse_apply_rms_prop_eager(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=name, use_locking=use_locking) - else - resource_sparse_apply_rms_prop_graph(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=name, use_locking=use_locking) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_sparse_apply_rms_prop(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) + if tf.in_eager_mode() + resource_sparse_apply_rms_prop_eager(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=name, use_locking=use_locking) + else + resource_sparse_apply_rms_prop_graph(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=name, use_locking=use_locking) + end end - end end @@ -50033,24 +50033,24 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function random_crop_graph(image_, size_; name=nothing, seed=nothing, seed2=nothing) - local desc - tf.with_op_name(name, "RandomCrop") do - desc = tf.NodeDescription("RandomCrop") - image_ = convert(Tensor{Any}, image_) - size_ = convert(Tensor{Int64}, size_) - (image_,) = tf.tf_promote(image_) - tf.add_input(desc, image_) - tf.add_input(desc, size_) - if seed !== nothing - desc["seed"] = Base.Int(seed) - end - if seed2 !== nothing - desc["seed2"] = Base.Int(seed2) - end + function random_crop_graph(image_, size_; name=nothing, seed=nothing, seed2=nothing) + local desc + tf.with_op_name(name, "RandomCrop") do + desc = tf.NodeDescription("RandomCrop") + image_ = convert(Tensor{Any}, image_) + size_ = convert(Tensor{Int64}, size_) + (image_,) = tf.tf_promote(image_) + tf.add_input(desc, image_) + tf.add_input(desc, size_) + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function random_crop_eager(image_, size_; name=nothing, seed=nothing, seed2=nothing) desc = tf.EagerOp("RandomCrop") image_ = convert(tf.EagerTensor, image_) @@ -50071,13 +50071,13 @@ begin return res[1] end end - function random_crop(image_, size_; name=nothing, seed=nothing, seed2=nothing) - if tf.in_eager_mode() - random_crop_eager(image_, size_; name=name, seed=seed, seed2=seed2) - else - random_crop_graph(image_, size_; name=name, seed=seed, seed2=seed2) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function random_crop(image_, size_; name=nothing, seed=nothing, seed2=nothing) + if tf.in_eager_mode() + random_crop_eager(image_, size_; name=name, seed=seed, seed2=seed2) + else + random_crop_graph(image_, size_; name=name, seed=seed, seed2=seed2) + end end - end end @@ -50087,21 +50087,21 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function lookup_table_import_v2_graph(table_handle_, keys_, values_; name=nothing) - local desc - tf.with_op_name(name, "LookupTableImportV2") do - desc = tf.NodeDescription("LookupTableImportV2") - table_handle_ = convert(Tensor{Any}, table_handle_) - keys_ = convert(Tensor{Any}, keys_) - values_ = convert(Tensor{Any}, values_) - (keys_,) = tf.tf_promote(keys_) - (values_,) = tf.tf_promote(values_) - tf.add_input(desc, table_handle_) - tf.add_input(desc, keys_) - tf.add_input(desc, values_) - end - tf.Tensor(tf.Operation(desc)) + function lookup_table_import_v2_graph(table_handle_, keys_, values_; name=nothing) + local desc + tf.with_op_name(name, "LookupTableImportV2") do + desc = tf.NodeDescription("LookupTableImportV2") + table_handle_ = convert(Tensor{Any}, table_handle_) + keys_ = convert(Tensor{Any}, keys_) + values_ = convert(Tensor{Any}, values_) + (keys_,) = tf.tf_promote(keys_) + (values_,) = tf.tf_promote(values_) + tf.add_input(desc, table_handle_) + tf.add_input(desc, keys_) + tf.add_input(desc, values_) end + tf.Tensor(tf.Operation(desc)) + end function lookup_table_import_v2_eager(table_handle_, keys_, values_; name=nothing) desc = tf.EagerOp("LookupTableImportV2") table_handle_ = convert(tf.EagerTensor, table_handle_) @@ -50119,13 +50119,13 @@ begin return res[1] end end - function lookup_table_import_v2(table_handle_, keys_, values_; name=nothing) - if tf.in_eager_mode() - lookup_table_import_v2_eager(table_handle_, keys_, values_; name=name) - else - lookup_table_import_v2_graph(table_handle_, keys_, values_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function lookup_table_import_v2(table_handle_, keys_, values_; name=nothing) + if tf.in_eager_mode() + lookup_table_import_v2_eager(table_handle_, keys_, values_; name=name) + else + lookup_table_import_v2_graph(table_handle_, keys_, values_; name=name) + end end - end end @@ -50135,25 +50135,25 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function resource_scatter_nd_update_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) - local desc - tf.with_op_name(name, "ResourceScatterNdUpdate") do - desc = tf.NodeDescription("ResourceScatterNdUpdate") - ref_ = convert(Tensor{Any}, ref_) - indices_ = convert(Tensor{Any}, indices_) - indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) - updates_ = convert(Tensor{Any}, updates_) - (updates_,) = tf.tf_promote(updates_) - (indices_,) = tf.tf_promote(indices_) - tf.add_input(desc, ref_) - tf.add_input(desc, indices_) - tf.add_input(desc, updates_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end + function resource_scatter_nd_update_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "ResourceScatterNdUpdate") do + desc = tf.NodeDescription("ResourceScatterNdUpdate") + ref_ = convert(Tensor{Any}, ref_) + indices_ = convert(Tensor{Any}, indices_) + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + updates_ = convert(Tensor{Any}, updates_) + (updates_,) = tf.tf_promote(updates_) + (indices_,) = tf.tf_promote(indices_) + tf.add_input(desc, ref_) + tf.add_input(desc, indices_) + tf.add_input(desc, updates_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function resource_scatter_nd_update_eager(ref_, indices_, updates_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ResourceScatterNdUpdate") ref_ = convert(tf.EagerTensor, ref_) @@ -50174,13 +50174,13 @@ begin return res[1] end end - function resource_scatter_nd_update(ref_, indices_, updates_; name=nothing, use_locking=nothing) - if tf.in_eager_mode() - resource_scatter_nd_update_eager(ref_, indices_, updates_; name=name, use_locking=use_locking) - else - resource_scatter_nd_update_graph(ref_, indices_, updates_; name=name, use_locking=use_locking) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_scatter_nd_update(ref_, indices_, updates_; name=nothing, use_locking=nothing) + if tf.in_eager_mode() + resource_scatter_nd_update_eager(ref_, indices_, updates_; name=name, use_locking=use_locking) + else + resource_scatter_nd_update_graph(ref_, indices_, updates_; name=name, use_locking=use_locking) + end end - end end @@ -50190,18 +50190,18 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function static_regex_full_match_graph(input_; name=nothing, pattern=nothing) - local desc - tf.with_op_name(name, "StaticRegexFullMatch") do - desc = tf.NodeDescription("StaticRegexFullMatch") - input_ = convert(Tensor{String}, input_) - tf.add_input(desc, input_) - if pattern !== nothing - desc["pattern"] = Base.String(pattern) - end + function static_regex_full_match_graph(input_; name=nothing, pattern=nothing) + local desc + tf.with_op_name(name, "StaticRegexFullMatch") do + desc = tf.NodeDescription("StaticRegexFullMatch") + input_ = convert(Tensor{String}, input_) + tf.add_input(desc, input_) + if pattern !== nothing + desc["pattern"] = Base.String(pattern) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function static_regex_full_match_eager(input_; name=nothing, pattern=nothing) desc = tf.EagerOp("StaticRegexFullMatch") input_ = convert(tf.EagerTensor, input_) @@ -50216,13 +50216,13 @@ begin return res[1] end end - function static_regex_full_match(input_; name=nothing, pattern=nothing) - if tf.in_eager_mode() - static_regex_full_match_eager(input_; name=name, pattern=pattern) - else - static_regex_full_match_graph(input_; name=name, pattern=pattern) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function static_regex_full_match(input_; name=nothing, pattern=nothing) + if tf.in_eager_mode() + static_regex_full_match_eager(input_; name=name, pattern=pattern) + else + static_regex_full_match_graph(input_; name=name, pattern=pattern) + end end - end end @@ -50232,15 +50232,15 @@ end Configures the credentials used by the GCS client of the local TF runtime. """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function gcs_configure_credentials_graph(json_; name=nothing) - local desc - tf.with_op_name(name, "GcsConfigureCredentials") do - desc = tf.NodeDescription("GcsConfigureCredentials") - json_ = convert(Tensor{String}, json_) - tf.add_input(desc, json_) - end - tf.Tensor(tf.Operation(desc)) + function gcs_configure_credentials_graph(json_; name=nothing) + local desc + tf.with_op_name(name, "GcsConfigureCredentials") do + desc = tf.NodeDescription("GcsConfigureCredentials") + json_ = convert(Tensor{String}, json_) + tf.add_input(desc, json_) end + tf.Tensor(tf.Operation(desc)) + end function gcs_configure_credentials_eager(json_; name=nothing) desc = tf.EagerOp("GcsConfigureCredentials") json_ = convert(tf.EagerTensor, json_) @@ -50252,13 +50252,13 @@ begin return res[1] end end - function gcs_configure_credentials(json_; name=nothing) - if tf.in_eager_mode() - gcs_configure_credentials_eager(json_; name=name) - else - gcs_configure_credentials_graph(json_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function gcs_configure_credentials(json_; name=nothing) + if tf.in_eager_mode() + gcs_configure_credentials_eager(json_; name=name) + else + gcs_configure_credentials_graph(json_; name=name) + end end - end end @@ -50268,17 +50268,17 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tensor_array_size_v3_graph(handle_, flow_in_; name=nothing) - local desc - tf.with_op_name(name, "TensorArraySizeV3") do - desc = tf.NodeDescription("TensorArraySizeV3") - handle_ = convert(Tensor{Any}, handle_) - flow_in_ = convert(Tensor{Float32}, flow_in_) - tf.add_input(desc, handle_) - tf.add_input(desc, flow_in_) - end - tf.Tensor(tf.Operation(desc)) + function tensor_array_size_v3_graph(handle_, flow_in_; name=nothing) + local desc + tf.with_op_name(name, "TensorArraySizeV3") do + desc = tf.NodeDescription("TensorArraySizeV3") + handle_ = convert(Tensor{Any}, handle_) + flow_in_ = convert(Tensor{Float32}, flow_in_) + tf.add_input(desc, handle_) + tf.add_input(desc, flow_in_) end + tf.Tensor(tf.Operation(desc)) + end function tensor_array_size_v3_eager(handle_, flow_in_; name=nothing) desc = tf.EagerOp("TensorArraySizeV3") handle_ = convert(tf.EagerTensor, handle_) @@ -50292,13 +50292,13 @@ begin return res[1] end end - function tensor_array_size_v3(handle_, flow_in_; name=nothing) - if tf.in_eager_mode() - tensor_array_size_v3_eager(handle_, flow_in_; name=name) - else - tensor_array_size_v3_graph(handle_, flow_in_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_array_size_v3(handle_, flow_in_; name=nothing) + if tf.in_eager_mode() + tensor_array_size_v3_eager(handle_, flow_in_; name=name) + else + tensor_array_size_v3_graph(handle_, flow_in_; name=name) + end end - end end @@ -50308,25 +50308,25 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function sparse_segment_sqrt_n_with_num_segments_graph(data_, indices_, segment_ids_, num_segments_; name=nothing) - local desc - tf.with_op_name(name, "SparseSegmentSqrtNWithNumSegments") do - desc = tf.NodeDescription("SparseSegmentSqrtNWithNumSegments") - data_ = convert(Tensor{Any}, data_) - indices_ = convert(Tensor{Int32}, indices_) - indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) - segment_ids_ = convert(Tensor{Int32}, segment_ids_) - num_segments_ = convert(Tensor{Int32}, num_segments_) - (num_segments_,) = tf.tf_promote(num_segments_) - (data_,) = tf.tf_promote(data_) - (indices_,) = tf.tf_promote(indices_) - tf.add_input(desc, data_) - tf.add_input(desc, indices_) - tf.add_input(desc, segment_ids_) - tf.add_input(desc, num_segments_) - end - tf.Tensor(tf.Operation(desc)) + function sparse_segment_sqrt_n_with_num_segments_graph(data_, indices_, segment_ids_, num_segments_; name=nothing) + local desc + tf.with_op_name(name, "SparseSegmentSqrtNWithNumSegments") do + desc = tf.NodeDescription("SparseSegmentSqrtNWithNumSegments") + data_ = convert(Tensor{Any}, data_) + indices_ = convert(Tensor{Int32}, indices_) + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + segment_ids_ = convert(Tensor{Int32}, segment_ids_) + num_segments_ = convert(Tensor{Int32}, num_segments_) + (num_segments_,) = tf.tf_promote(num_segments_) + (data_,) = tf.tf_promote(data_) + (indices_,) = tf.tf_promote(indices_) + tf.add_input(desc, data_) + tf.add_input(desc, indices_) + tf.add_input(desc, segment_ids_) + tf.add_input(desc, num_segments_) end + tf.Tensor(tf.Operation(desc)) + end function sparse_segment_sqrt_n_with_num_segments_eager(data_, indices_, segment_ids_, num_segments_; name=nothing) desc = tf.EagerOp("SparseSegmentSqrtNWithNumSegments") data_ = convert(tf.EagerTensor, data_) @@ -50347,51 +50347,51 @@ begin return res[1] end end - function sparse_segment_sqrt_n_with_num_segments(data_, indices_, segment_ids_, num_segments_; name=nothing) - if tf.in_eager_mode() - sparse_segment_sqrt_n_with_num_segments_eager(data_, indices_, segment_ids_, num_segments_; name=name) - else - sparse_segment_sqrt_n_with_num_segments_graph(data_, indices_, segment_ids_, num_segments_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_segment_sqrt_n_with_num_segments(data_, indices_, segment_ids_, num_segments_; name=nothing) + if tf.in_eager_mode() + sparse_segment_sqrt_n_with_num_segments_eager(data_, indices_, segment_ids_, num_segments_; name=name) + else + sparse_segment_sqrt_n_with_num_segments_graph(data_, indices_, segment_ids_, num_segments_; name=name) + end end - end end """ - conv2d_backprop_filter(input, filter_sizes, out_backprop; use_cudnn_on_gpu=true, data_format=NHWC, dilations=[1, 1, 1, 1]) + conv2d_backprop_filter(input, filter_sizes, out_backprop; use_cudnn_on_gpu=true, data_format=, dilations=[1, 1, 1, 1]) """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function conv2d_backprop_filter_graph(input_, filter_sizes_, out_backprop_; name=nothing, strides=nothing, use_cudnn_on_gpu=nothing, padding=nothing, data_format=nothing, dilations=nothing) - local desc - tf.with_op_name(name, "Conv2DBackpropFilter") do - desc = tf.NodeDescription("Conv2DBackpropFilter") - input_ = convert(Tensor{Any}, input_) - filter_sizes_ = convert(Tensor{Int32}, filter_sizes_) - out_backprop_ = convert(Tensor{Any}, out_backprop_) - (input_, out_backprop_) = tf.tf_promote(input_, out_backprop_) - tf.add_input(desc, input_) - tf.add_input(desc, filter_sizes_) - tf.add_input(desc, out_backprop_) - if strides !== nothing - desc["strides"] = map(Base.identity, strides) - end - if use_cudnn_on_gpu !== nothing - desc["use_cudnn_on_gpu"] = Base.Bool(use_cudnn_on_gpu) - end - if padding !== nothing - desc["padding"] = Base.String(padding) - end - if data_format !== nothing - desc["data_format"] = Base.String(data_format) - end - if dilations !== nothing - desc["dilations"] = map(Base.identity, dilations) - end + function conv2d_backprop_filter_graph(input_, filter_sizes_, out_backprop_; name=nothing, strides=nothing, use_cudnn_on_gpu=nothing, padding=nothing, data_format=nothing, dilations=nothing) + local desc + tf.with_op_name(name, "Conv2DBackpropFilter") do + desc = tf.NodeDescription("Conv2DBackpropFilter") + input_ = convert(Tensor{Any}, input_) + filter_sizes_ = convert(Tensor{Int32}, filter_sizes_) + out_backprop_ = convert(Tensor{Any}, out_backprop_) + (input_, out_backprop_) = tf.tf_promote(input_, out_backprop_) + tf.add_input(desc, input_) + tf.add_input(desc, filter_sizes_) + tf.add_input(desc, out_backprop_) + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + if use_cudnn_on_gpu !== nothing + desc["use_cudnn_on_gpu"] = Base.Bool(use_cudnn_on_gpu) + end + if padding !== nothing + desc["padding"] = Base.String(padding) + end + if data_format !== nothing + desc["data_format"] = Base.String(data_format) + end + if dilations !== nothing + desc["dilations"] = map(Base.identity, dilations) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function conv2d_backprop_filter_eager(input_, filter_sizes_, out_backprop_; name=nothing, strides=nothing, use_cudnn_on_gpu=nothing, padding=nothing, data_format=nothing, dilations=nothing) desc = tf.EagerOp("Conv2DBackpropFilter") input_ = convert(tf.EagerTensor, input_) @@ -50424,13 +50424,13 @@ begin return res[1] end end - function conv2d_backprop_filter(input_, filter_sizes_, out_backprop_; name=nothing, strides=nothing, use_cudnn_on_gpu=nothing, padding=nothing, data_format=nothing, dilations=nothing) - if tf.in_eager_mode() - conv2d_backprop_filter_eager(input_, filter_sizes_, out_backprop_; name=name, strides=strides, use_cudnn_on_gpu=use_cudnn_on_gpu, padding=padding, data_format=data_format, dilations=dilations) - else - conv2d_backprop_filter_graph(input_, filter_sizes_, out_backprop_; name=name, strides=strides, use_cudnn_on_gpu=use_cudnn_on_gpu, padding=padding, data_format=data_format, dilations=dilations) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function conv2d_backprop_filter(input_, filter_sizes_, out_backprop_; name=nothing, strides=nothing, use_cudnn_on_gpu=nothing, padding=nothing, data_format=nothing, dilations=nothing) + if tf.in_eager_mode() + conv2d_backprop_filter_eager(input_, filter_sizes_, out_backprop_; name=name, strides=strides, use_cudnn_on_gpu=use_cudnn_on_gpu, padding=padding, data_format=data_format, dilations=dilations) + else + conv2d_backprop_filter_graph(input_, filter_sizes_, out_backprop_; name=name, strides=strides, use_cudnn_on_gpu=use_cudnn_on_gpu, padding=padding, data_format=data_format, dilations=dilations) + end end - end end @@ -50440,53 +50440,53 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function experimental_group_by_reducer_dataset_graph(input_dataset_, key_func_other_arguments_, init_func_other_arguments_, reduce_func_other_arguments_, finalize_func_other_arguments_; name=nothing, key_func=nothing, init_func=nothing, reduce_func=nothing, finalize_func=nothing, Tkey_func_other_arguments=nothing, Tinit_func_other_arguments=nothing, Treduce_func_other_arguments=nothing, Tfinalize_func_other_arguments=nothing, output_types=nothing, output_shapes=nothing) - local desc - tf.with_op_name(name, "ExperimentalGroupByReducerDataset") do - desc = tf.NodeDescription("ExperimentalGroupByReducerDataset") - input_dataset_ = convert(Tensor{Any}, input_dataset_) - key_func_other_arguments_ = [convert(Tensor{Any}, x) for x = key_func_other_arguments_] - init_func_other_arguments_ = [convert(Tensor{Any}, x) for x = init_func_other_arguments_] - reduce_func_other_arguments_ = [convert(Tensor{Any}, x) for x = reduce_func_other_arguments_] - finalize_func_other_arguments_ = [convert(Tensor{Any}, x) for x = finalize_func_other_arguments_] - tf.add_input(desc, input_dataset_) - tf.add_input(desc, key_func_other_arguments_) - tf.add_input(desc, init_func_other_arguments_) - tf.add_input(desc, reduce_func_other_arguments_) - tf.add_input(desc, finalize_func_other_arguments_) - if key_func !== nothing - desc["key_func"] = Base.identity(key_func) - end - if init_func !== nothing - desc["init_func"] = Base.identity(init_func) - end - if reduce_func !== nothing - desc["reduce_func"] = Base.identity(reduce_func) - end - if finalize_func !== nothing - desc["finalize_func"] = Base.identity(finalize_func) - end - if Tkey_func_other_arguments !== nothing - desc["Tkey_func_other_arguments"] = map(Base.identity, Tkey_func_other_arguments) - end - if Tinit_func_other_arguments !== nothing - desc["Tinit_func_other_arguments"] = map(Base.identity, Tinit_func_other_arguments) - end - if Treduce_func_other_arguments !== nothing - desc["Treduce_func_other_arguments"] = map(Base.identity, Treduce_func_other_arguments) - end - if Tfinalize_func_other_arguments !== nothing - desc["Tfinalize_func_other_arguments"] = map(Base.identity, Tfinalize_func_other_arguments) - end - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - end - tf.Tensor(tf.Operation(desc)) + function experimental_group_by_reducer_dataset_graph(input_dataset_, key_func_other_arguments_, init_func_other_arguments_, reduce_func_other_arguments_, finalize_func_other_arguments_; name=nothing, key_func=nothing, init_func=nothing, reduce_func=nothing, finalize_func=nothing, Tkey_func_other_arguments=nothing, Tinit_func_other_arguments=nothing, Treduce_func_other_arguments=nothing, Tfinalize_func_other_arguments=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "ExperimentalGroupByReducerDataset") do + desc = tf.NodeDescription("ExperimentalGroupByReducerDataset") + input_dataset_ = convert(Tensor{Any}, input_dataset_) + key_func_other_arguments_ = [convert(Tensor{Any}, x) for x = key_func_other_arguments_] + init_func_other_arguments_ = [convert(Tensor{Any}, x) for x = init_func_other_arguments_] + reduce_func_other_arguments_ = [convert(Tensor{Any}, x) for x = reduce_func_other_arguments_] + finalize_func_other_arguments_ = [convert(Tensor{Any}, x) for x = finalize_func_other_arguments_] + tf.add_input(desc, input_dataset_) + tf.add_input(desc, key_func_other_arguments_) + tf.add_input(desc, init_func_other_arguments_) + tf.add_input(desc, reduce_func_other_arguments_) + tf.add_input(desc, finalize_func_other_arguments_) + if key_func !== nothing + desc["key_func"] = Base.identity(key_func) + end + if init_func !== nothing + desc["init_func"] = Base.identity(init_func) + end + if reduce_func !== nothing + desc["reduce_func"] = Base.identity(reduce_func) + end + if finalize_func !== nothing + desc["finalize_func"] = Base.identity(finalize_func) + end + if Tkey_func_other_arguments !== nothing + desc["Tkey_func_other_arguments"] = map(Base.identity, Tkey_func_other_arguments) + end + if Tinit_func_other_arguments !== nothing + desc["Tinit_func_other_arguments"] = map(Base.identity, Tinit_func_other_arguments) + end + if Treduce_func_other_arguments !== nothing + desc["Treduce_func_other_arguments"] = map(Base.identity, Treduce_func_other_arguments) + end + if Tfinalize_func_other_arguments !== nothing + desc["Tfinalize_func_other_arguments"] = map(Base.identity, Tfinalize_func_other_arguments) + end + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end end + tf.Tensor(tf.Operation(desc)) + end function experimental_group_by_reducer_dataset_eager(input_dataset_, key_func_other_arguments_, init_func_other_arguments_, reduce_func_other_arguments_, finalize_func_other_arguments_; name=nothing, key_func=nothing, init_func=nothing, reduce_func=nothing, finalize_func=nothing, Tkey_func_other_arguments=nothing, Tinit_func_other_arguments=nothing, Treduce_func_other_arguments=nothing, Tfinalize_func_other_arguments=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("ExperimentalGroupByReducerDataset") input_dataset_ = convert(tf.EagerTensor, input_dataset_) @@ -50536,48 +50536,48 @@ begin return res[1] end end - function experimental_group_by_reducer_dataset(input_dataset_, key_func_other_arguments_, init_func_other_arguments_, reduce_func_other_arguments_, finalize_func_other_arguments_; name=nothing, key_func=nothing, init_func=nothing, reduce_func=nothing, finalize_func=nothing, Tkey_func_other_arguments=nothing, Tinit_func_other_arguments=nothing, Treduce_func_other_arguments=nothing, Tfinalize_func_other_arguments=nothing, output_types=nothing, output_shapes=nothing) - if tf.in_eager_mode() - experimental_group_by_reducer_dataset_eager(input_dataset_, key_func_other_arguments_, init_func_other_arguments_, reduce_func_other_arguments_, finalize_func_other_arguments_; name=name, key_func=key_func, init_func=init_func, reduce_func=reduce_func, finalize_func=finalize_func, Tkey_func_other_arguments=Tkey_func_other_arguments, Tinit_func_other_arguments=Tinit_func_other_arguments, Treduce_func_other_arguments=Treduce_func_other_arguments, Tfinalize_func_other_arguments=Tfinalize_func_other_arguments, output_types=output_types, output_shapes=output_shapes) - else - experimental_group_by_reducer_dataset_graph(input_dataset_, key_func_other_arguments_, init_func_other_arguments_, reduce_func_other_arguments_, finalize_func_other_arguments_; name=name, key_func=key_func, init_func=init_func, reduce_func=reduce_func, finalize_func=finalize_func, Tkey_func_other_arguments=Tkey_func_other_arguments, Tinit_func_other_arguments=Tinit_func_other_arguments, Treduce_func_other_arguments=Treduce_func_other_arguments, Tfinalize_func_other_arguments=Tfinalize_func_other_arguments, output_types=output_types, output_shapes=output_shapes) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_group_by_reducer_dataset(input_dataset_, key_func_other_arguments_, init_func_other_arguments_, reduce_func_other_arguments_, finalize_func_other_arguments_; name=nothing, key_func=nothing, init_func=nothing, reduce_func=nothing, finalize_func=nothing, Tkey_func_other_arguments=nothing, Tinit_func_other_arguments=nothing, Treduce_func_other_arguments=nothing, Tfinalize_func_other_arguments=nothing, output_types=nothing, output_shapes=nothing) + if tf.in_eager_mode() + experimental_group_by_reducer_dataset_eager(input_dataset_, key_func_other_arguments_, init_func_other_arguments_, reduce_func_other_arguments_, finalize_func_other_arguments_; name=name, key_func=key_func, init_func=init_func, reduce_func=reduce_func, finalize_func=finalize_func, Tkey_func_other_arguments=Tkey_func_other_arguments, Tinit_func_other_arguments=Tinit_func_other_arguments, Treduce_func_other_arguments=Treduce_func_other_arguments, Tfinalize_func_other_arguments=Tfinalize_func_other_arguments, output_types=output_types, output_shapes=output_shapes) + else + experimental_group_by_reducer_dataset_graph(input_dataset_, key_func_other_arguments_, init_func_other_arguments_, reduce_func_other_arguments_, finalize_func_other_arguments_; name=name, key_func=key_func, init_func=init_func, reduce_func=reduce_func, finalize_func=finalize_func, Tkey_func_other_arguments=Tkey_func_other_arguments, Tinit_func_other_arguments=Tinit_func_other_arguments, Treduce_func_other_arguments=Treduce_func_other_arguments, Tfinalize_func_other_arguments=Tfinalize_func_other_arguments, output_types=output_types, output_shapes=output_shapes) + end end - end end """ - max_pool_grad(orig_input, orig_output, grad; data_format=NHWC) + max_pool_grad(orig_input, orig_output, grad; data_format=) """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function max_pool_grad_graph(orig_input_, orig_output_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) - local desc - tf.with_op_name(name, "MaxPoolGrad") do - desc = tf.NodeDescription("MaxPoolGrad") - orig_input_ = convert(Tensor{Float32}, orig_input_) - orig_output_ = convert(Tensor{Float32}, orig_output_) - grad_ = convert(Tensor{Float32}, grad_) - (orig_input_, orig_output_, grad_) = tf.tf_promote(orig_input_, orig_output_, grad_) - tf.add_input(desc, orig_input_) - tf.add_input(desc, orig_output_) - tf.add_input(desc, grad_) - if ksize !== nothing - desc["ksize"] = map(Base.identity, ksize) - end - if strides !== nothing - desc["strides"] = map(Base.identity, strides) - end - if padding !== nothing - desc["padding"] = Base.String(padding) - end - if data_format !== nothing - desc["data_format"] = Base.String(data_format) - end + function max_pool_grad_graph(orig_input_, orig_output_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + local desc + tf.with_op_name(name, "MaxPoolGrad") do + desc = tf.NodeDescription("MaxPoolGrad") + orig_input_ = convert(Tensor{Float32}, orig_input_) + orig_output_ = convert(Tensor{Float32}, orig_output_) + grad_ = convert(Tensor{Float32}, grad_) + (orig_input_, orig_output_, grad_) = tf.tf_promote(orig_input_, orig_output_, grad_) + tf.add_input(desc, orig_input_) + tf.add_input(desc, orig_output_) + tf.add_input(desc, grad_) + if ksize !== nothing + desc["ksize"] = map(Base.identity, ksize) + end + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + if padding !== nothing + desc["padding"] = Base.String(padding) + end + if data_format !== nothing + desc["data_format"] = Base.String(data_format) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function max_pool_grad_eager(orig_input_, orig_output_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) desc = tf.EagerOp("MaxPoolGrad") orig_input_ = convert(tf.EagerTensor, orig_input_) @@ -50608,13 +50608,13 @@ begin return res[1] end end - function max_pool_grad(orig_input_, orig_output_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) - if tf.in_eager_mode() - max_pool_grad_eager(orig_input_, orig_output_, grad_; name=name, ksize=ksize, strides=strides, padding=padding, data_format=data_format) - else - max_pool_grad_graph(orig_input_, orig_output_, grad_; name=name, ksize=ksize, strides=strides, padding=padding, data_format=data_format) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function max_pool_grad(orig_input_, orig_output_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + if tf.in_eager_mode() + max_pool_grad_eager(orig_input_, orig_output_, grad_; name=name, ksize=ksize, strides=strides, padding=padding, data_format=data_format) + else + max_pool_grad_graph(orig_input_, orig_output_, grad_; name=name, ksize=ksize, strides=strides, padding=padding, data_format=data_format) + end end - end end @@ -50624,15 +50624,15 @@ end An op that connects each chip on the host to a centralized UberDriver to allow """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function _initialize_host_for_distributed_tpu_graph(input_; name=nothing) - local desc - tf.with_op_name(name, "_InitializeHostForDistributedTPU") do - desc = tf.NodeDescription("_InitializeHostForDistributedTPU") - input_ = convert(Tensor{String}, input_) - tf.add_input(desc, input_) - end - tf.Tensor(tf.Operation(desc)) + function _initialize_host_for_distributed_tpu_graph(input_; name=nothing) + local desc + tf.with_op_name(name, "_InitializeHostForDistributedTPU") do + desc = tf.NodeDescription("_InitializeHostForDistributedTPU") + input_ = convert(Tensor{String}, input_) + tf.add_input(desc, input_) end + tf.Tensor(tf.Operation(desc)) + end function _initialize_host_for_distributed_tpu_eager(input_; name=nothing) desc = tf.EagerOp("_InitializeHostForDistributedTPU") input_ = convert(tf.EagerTensor, input_) @@ -50644,13 +50644,13 @@ begin return res[1] end end - function _initialize_host_for_distributed_tpu(input_; name=nothing) - if tf.in_eager_mode() - _initialize_host_for_distributed_tpu_eager(input_; name=name) - else - _initialize_host_for_distributed_tpu_graph(input_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _initialize_host_for_distributed_tpu(input_; name=nothing) + if tf.in_eager_mode() + _initialize_host_for_distributed_tpu_eager(input_; name=name) + else + _initialize_host_for_distributed_tpu_graph(input_; name=name) + end end - end end @@ -50660,30 +50660,30 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function stage_peek_graph(index_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) - local desc - tf.with_op_name(name, "StagePeek") do - desc = tf.NodeDescription("StagePeek") - index_ = convert(Tensor{Int32}, index_) - tf.add_input(desc, index_) - if capacity !== nothing - desc["capacity"] = Base.Int(capacity) - end - if memory_limit !== nothing - desc["memory_limit"] = Base.Int(memory_limit) - end - if dtypes !== nothing - desc["dtypes"] = map(Base.identity, dtypes) - end - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - end - tf.Tensor(tf.Operation(desc)) + function stage_peek_graph(index_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "StagePeek") do + desc = tf.NodeDescription("StagePeek") + index_ = convert(Tensor{Int32}, index_) + tf.add_input(desc, index_) + if capacity !== nothing + desc["capacity"] = Base.Int(capacity) + end + if memory_limit !== nothing + desc["memory_limit"] = Base.Int(memory_limit) + end + if dtypes !== nothing + desc["dtypes"] = map(Base.identity, dtypes) + end + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end end + tf.Tensor(tf.Operation(desc)) + end function stage_peek_eager(index_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) desc = tf.EagerOp("StagePeek") index_ = convert(tf.EagerTensor, index_) @@ -50710,13 +50710,13 @@ begin return res[1] end end - function stage_peek(index_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) - if tf.in_eager_mode() - stage_peek_eager(index_; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) - else - stage_peek_graph(index_; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function stage_peek(index_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + if tf.in_eager_mode() + stage_peek_eager(index_; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) + else + stage_peek_graph(index_; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) + end end - end end @@ -50726,21 +50726,21 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function pad_v2_graph(input_, paddings_, constant_values_; name=nothing) - local desc - tf.with_op_name(name, "PadV2") do - desc = tf.NodeDescription("PadV2") - input_ = convert(Tensor{Any}, input_) - paddings_ = convert(Tensor{Int32}, paddings_) - constant_values_ = convert(Tensor{Any}, constant_values_) - (input_, constant_values_) = tf.tf_promote(input_, constant_values_) - (paddings_,) = tf.tf_promote(paddings_) - tf.add_input(desc, input_) - tf.add_input(desc, paddings_) - tf.add_input(desc, constant_values_) - end - tf.Tensor(tf.Operation(desc)) + function pad_v2_graph(input_, paddings_, constant_values_; name=nothing) + local desc + tf.with_op_name(name, "PadV2") do + desc = tf.NodeDescription("PadV2") + input_ = convert(Tensor{Any}, input_) + paddings_ = convert(Tensor{Int32}, paddings_) + constant_values_ = convert(Tensor{Any}, constant_values_) + (input_, constant_values_) = tf.tf_promote(input_, constant_values_) + (paddings_,) = tf.tf_promote(paddings_) + tf.add_input(desc, input_) + tf.add_input(desc, paddings_) + tf.add_input(desc, constant_values_) end + tf.Tensor(tf.Operation(desc)) + end function pad_v2_eager(input_, paddings_, constant_values_; name=nothing) desc = tf.EagerOp("PadV2") input_ = convert(tf.EagerTensor, input_) @@ -50759,13 +50759,13 @@ begin return res[1] end end - function pad_v2(input_, paddings_, constant_values_; name=nothing) - if tf.in_eager_mode() - pad_v2_eager(input_, paddings_, constant_values_; name=name) - else - pad_v2_graph(input_, paddings_, constant_values_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function pad_v2(input_, paddings_, constant_values_; name=nothing) + if tf.in_eager_mode() + pad_v2_eager(input_, paddings_, constant_values_; name=name) + else + pad_v2_graph(input_, paddings_, constant_values_; name=name) + end end - end end @@ -50775,21 +50775,21 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function optional_get_value_graph(optional_; name=nothing, output_types=nothing, output_shapes=nothing) - local desc - tf.with_op_name(name, "OptionalGetValue") do - desc = tf.NodeDescription("OptionalGetValue") - optional_ = convert(Tensor{Any}, optional_) - tf.add_input(desc, optional_) - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end + function optional_get_value_graph(optional_; name=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "OptionalGetValue") do + desc = tf.NodeDescription("OptionalGetValue") + optional_ = convert(Tensor{Any}, optional_) + tf.add_input(desc, optional_) + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function optional_get_value_eager(optional_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("OptionalGetValue") optional_ = convert(tf.EagerTensor, optional_) @@ -50807,34 +50807,34 @@ begin return res[1] end end - function optional_get_value(optional_; name=nothing, output_types=nothing, output_shapes=nothing) - if tf.in_eager_mode() - optional_get_value_eager(optional_; name=name, output_types=output_types, output_shapes=output_shapes) - else - optional_get_value_graph(optional_; name=name, output_types=output_types, output_shapes=output_shapes) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function optional_get_value(optional_; name=nothing, output_types=nothing, output_shapes=nothing) + if tf.in_eager_mode() + optional_get_value_eager(optional_; name=name, output_types=output_types, output_shapes=output_shapes) + else + optional_get_value_graph(optional_; name=name, output_types=output_types, output_shapes=output_shapes) + end end - end end """ - print_v2(input; output_stream=stderr) + print_v2(input; output_stream=) """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function print_v2_graph(input_; name=nothing, output_stream=nothing) - local desc - tf.with_op_name(name, "PrintV2") do - desc = tf.NodeDescription("PrintV2") - input_ = convert(Tensor{String}, input_) - tf.add_input(desc, input_) - if output_stream !== nothing - desc["output_stream"] = Base.String(output_stream) - end + function print_v2_graph(input_; name=nothing, output_stream=nothing) + local desc + tf.with_op_name(name, "PrintV2") do + desc = tf.NodeDescription("PrintV2") + input_ = convert(Tensor{String}, input_) + tf.add_input(desc, input_) + if output_stream !== nothing + desc["output_stream"] = Base.String(output_stream) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function print_v2_eager(input_; name=nothing, output_stream=nothing) desc = tf.EagerOp("PrintV2") input_ = convert(tf.EagerTensor, input_) @@ -50849,13 +50849,13 @@ begin return res[1] end end - function print_v2(input_; name=nothing, output_stream=nothing) - if tf.in_eager_mode() - print_v2_eager(input_; name=name, output_stream=output_stream) - else - print_v2_graph(input_; name=name, output_stream=output_stream) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function print_v2(input_; name=nothing, output_stream=nothing) + if tf.in_eager_mode() + print_v2_eager(input_; name=name, output_stream=output_stream) + else + print_v2_graph(input_; name=name, output_stream=output_stream) + end end - end end @@ -50865,19 +50865,19 @@ end Creates an empty Tensor with shape `shape` and type `dtype`. """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function _parallel_concat_start_graph(; name=nothing, shape=nothing, dtype=nothing) - local desc - tf.with_op_name(name, "_ParallelConcatStart") do - desc = tf.NodeDescription("_ParallelConcatStart") - if shape !== nothing - desc["shape"] = Base.identity(shape) - end - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end + function _parallel_concat_start_graph(; name=nothing, shape=nothing, dtype=nothing) + local desc + tf.with_op_name(name, "_ParallelConcatStart") do + desc = tf.NodeDescription("_ParallelConcatStart") + if shape !== nothing + desc["shape"] = Base.identity(shape) + end + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function _parallel_concat_start_eager(; name=nothing, shape=nothing, dtype=nothing) desc = tf.EagerOp("_ParallelConcatStart") if shape !== nothing @@ -50893,13 +50893,13 @@ begin return res[1] end end - function _parallel_concat_start(; name=nothing, shape=nothing, dtype=nothing) - if tf.in_eager_mode() - _parallel_concat_start_eager(; name=name, shape=shape, dtype=dtype) - else - _parallel_concat_start_graph(; name=name, shape=shape, dtype=dtype) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _parallel_concat_start(; name=nothing, shape=nothing, dtype=nothing) + if tf.in_eager_mode() + _parallel_concat_start_eager(; name=name, shape=shape, dtype=dtype) + else + _parallel_concat_start_graph(; name=name, shape=shape, dtype=dtype) + end end - end end @@ -50909,31 +50909,31 @@ end Load embedding parameters for a single table. """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function load_tpu_embedding_ftrl_parameters_graph(parameters_, accumulators_, linears_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - local desc - tf.with_op_name(name, "LoadTPUEmbeddingFTRLParameters") do - desc = tf.NodeDescription("LoadTPUEmbeddingFTRLParameters") - parameters_ = convert(Tensor{Float32}, parameters_) - accumulators_ = convert(Tensor{Float32}, accumulators_) - linears_ = convert(Tensor{Float32}, linears_) - tf.add_input(desc, parameters_) - tf.add_input(desc, accumulators_) - tf.add_input(desc, linears_) - if table_id !== nothing - desc["table_id"] = Base.Int(table_id) - end - if table_name !== nothing - desc["table_name"] = Base.String(table_name) - end - if num_shards !== nothing - desc["num_shards"] = Base.Int(num_shards) - end - if shard_id !== nothing - desc["shard_id"] = Base.Int(shard_id) - end - end - tf.Tensor(tf.Operation(desc)) + function load_tpu_embedding_ftrl_parameters_graph(parameters_, accumulators_, linears_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + local desc + tf.with_op_name(name, "LoadTPUEmbeddingFTRLParameters") do + desc = tf.NodeDescription("LoadTPUEmbeddingFTRLParameters") + parameters_ = convert(Tensor{Float32}, parameters_) + accumulators_ = convert(Tensor{Float32}, accumulators_) + linears_ = convert(Tensor{Float32}, linears_) + tf.add_input(desc, parameters_) + tf.add_input(desc, accumulators_) + tf.add_input(desc, linears_) + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end end + tf.Tensor(tf.Operation(desc)) + end function load_tpu_embedding_ftrl_parameters_eager(parameters_, accumulators_, linears_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) desc = tf.EagerOp("LoadTPUEmbeddingFTRLParameters") parameters_ = convert(tf.EagerTensor, parameters_) @@ -50961,13 +50961,13 @@ begin return res[1] end end - function load_tpu_embedding_ftrl_parameters(parameters_, accumulators_, linears_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - if tf.in_eager_mode() - load_tpu_embedding_ftrl_parameters_eager(parameters_, accumulators_, linears_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) - else - load_tpu_embedding_ftrl_parameters_graph(parameters_, accumulators_, linears_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function load_tpu_embedding_ftrl_parameters(parameters_, accumulators_, linears_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + if tf.in_eager_mode() + load_tpu_embedding_ftrl_parameters_eager(parameters_, accumulators_, linears_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + else + load_tpu_embedding_ftrl_parameters_graph(parameters_, accumulators_, linears_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + end end - end end @@ -50977,29 +50977,29 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function sparse_slice_graph(indices_, values_, shape_, start_, size_; name=nothing) - local desc - tf.with_op_name(name, "SparseSlice") do - desc = tf.NodeDescription("SparseSlice") - indices_ = convert(Tensor{Int64}, indices_) - values_ = convert(Tensor{Any}, values_) - shape_ = convert(Tensor{Int64}, shape_) - start_ = convert(Tensor{Int64}, start_) - size_ = convert(Tensor{Int64}, size_) - (values_,) = tf.tf_promote(values_) - tf.add_input(desc, indices_) - tf.add_input(desc, values_) - tf.add_input(desc, shape_) - tf.add_input(desc, start_) - tf.add_input(desc, size_) - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:3 - push!(out, tf.Tensor(op, out_idx)) - end - out + function sparse_slice_graph(indices_, values_, shape_, start_, size_; name=nothing) + local desc + tf.with_op_name(name, "SparseSlice") do + desc = tf.NodeDescription("SparseSlice") + indices_ = convert(Tensor{Int64}, indices_) + values_ = convert(Tensor{Any}, values_) + shape_ = convert(Tensor{Int64}, shape_) + start_ = convert(Tensor{Int64}, start_) + size_ = convert(Tensor{Int64}, size_) + (values_,) = tf.tf_promote(values_) + tf.add_input(desc, indices_) + tf.add_input(desc, values_) + tf.add_input(desc, shape_) + tf.add_input(desc, start_) + tf.add_input(desc, size_) + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) end + out + end function sparse_slice_eager(indices_, values_, shape_, start_, size_; name=nothing) desc = tf.EagerOp("SparseSlice") indices_ = convert(tf.EagerTensor, indices_) @@ -51020,13 +51020,13 @@ begin return res end end - function sparse_slice(indices_, values_, shape_, start_, size_; name=nothing) - if tf.in_eager_mode() - sparse_slice_eager(indices_, values_, shape_, start_, size_; name=name) - else - sparse_slice_graph(indices_, values_, shape_, start_, size_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_slice(indices_, values_, shape_, start_, size_; name=nothing) + if tf.in_eager_mode() + sparse_slice_eager(indices_, values_, shape_, start_, size_; name=name) + else + sparse_slice_graph(indices_, values_, shape_, start_, size_; name=name) + end end - end end @@ -51036,27 +51036,27 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function boosted_trees_make_quantile_summaries_graph(float_values_, example_weights_, epsilon_; name=nothing, num_features=nothing) - local desc - tf.with_op_name(name, "BoostedTreesMakeQuantileSummaries") do - desc = tf.NodeDescription("BoostedTreesMakeQuantileSummaries") - float_values_ = [convert(Tensor{Float32}, x) for x = float_values_] - example_weights_ = convert(Tensor{Float32}, example_weights_) - epsilon_ = convert(Tensor{Float32}, epsilon_) - tf.add_input(desc, float_values_) - tf.add_input(desc, example_weights_) - tf.add_input(desc, epsilon_) - if num_features !== nothing - desc["num_features"] = Base.Int(num_features) - end + function boosted_trees_make_quantile_summaries_graph(float_values_, example_weights_, epsilon_; name=nothing, num_features=nothing) + local desc + tf.with_op_name(name, "BoostedTreesMakeQuantileSummaries") do + desc = tf.NodeDescription("BoostedTreesMakeQuantileSummaries") + float_values_ = [convert(Tensor{Float32}, x) for x = float_values_] + example_weights_ = convert(Tensor{Float32}, example_weights_) + epsilon_ = convert(Tensor{Float32}, epsilon_) + tf.add_input(desc, float_values_) + tf.add_input(desc, example_weights_) + tf.add_input(desc, epsilon_) + if num_features !== nothing + desc["num_features"] = Base.Int(num_features) end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:num_features - push!(out, tf.Tensor(op, out_idx)) - end - out end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:num_features + push!(out, tf.Tensor(op, out_idx)) + end + out + end function boosted_trees_make_quantile_summaries_eager(float_values_, example_weights_, epsilon_; name=nothing, num_features=nothing) desc = tf.EagerOp("BoostedTreesMakeQuantileSummaries") float_values_ = convert(tf.EagerTensor, float_values_) @@ -51075,13 +51075,13 @@ begin return res end end - function boosted_trees_make_quantile_summaries(float_values_, example_weights_, epsilon_; name=nothing, num_features=nothing) - if tf.in_eager_mode() - boosted_trees_make_quantile_summaries_eager(float_values_, example_weights_, epsilon_; name=name, num_features=num_features) - else - boosted_trees_make_quantile_summaries_graph(float_values_, example_weights_, epsilon_; name=name, num_features=num_features) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function boosted_trees_make_quantile_summaries(float_values_, example_weights_, epsilon_; name=nothing, num_features=nothing) + if tf.in_eager_mode() + boosted_trees_make_quantile_summaries_eager(float_values_, example_weights_, epsilon_; name=name, num_features=num_features) + else + boosted_trees_make_quantile_summaries_graph(float_values_, example_weights_, epsilon_; name=name, num_features=num_features) + end end - end end @@ -51091,21 +51091,21 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function matrix_solve_graph(matrix_, rhs_; name=nothing, adjoint=nothing) - local desc - tf.with_op_name(name, "MatrixSolve") do - desc = tf.NodeDescription("MatrixSolve") - matrix_ = convert(Tensor{Any}, matrix_) - rhs_ = convert(Tensor{Any}, rhs_) - (matrix_, rhs_) = tf.tf_promote(matrix_, rhs_) - tf.add_input(desc, matrix_) - tf.add_input(desc, rhs_) - if adjoint !== nothing - desc["adjoint"] = Base.Bool(adjoint) - end + function matrix_solve_graph(matrix_, rhs_; name=nothing, adjoint=nothing) + local desc + tf.with_op_name(name, "MatrixSolve") do + desc = tf.NodeDescription("MatrixSolve") + matrix_ = convert(Tensor{Any}, matrix_) + rhs_ = convert(Tensor{Any}, rhs_) + (matrix_, rhs_) = tf.tf_promote(matrix_, rhs_) + tf.add_input(desc, matrix_) + tf.add_input(desc, rhs_) + if adjoint !== nothing + desc["adjoint"] = Base.Bool(adjoint) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function matrix_solve_eager(matrix_, rhs_; name=nothing, adjoint=nothing) desc = tf.EagerOp("MatrixSolve") matrix_ = convert(tf.EagerTensor, matrix_) @@ -51124,13 +51124,13 @@ begin return res[1] end end - function matrix_solve(matrix_, rhs_; name=nothing, adjoint=nothing) - if tf.in_eager_mode() - matrix_solve_eager(matrix_, rhs_; name=name, adjoint=adjoint) - else - matrix_solve_graph(matrix_, rhs_; name=name, adjoint=adjoint) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function matrix_solve(matrix_, rhs_; name=nothing, adjoint=nothing) + if tf.in_eager_mode() + matrix_solve_eager(matrix_, rhs_; name=name, adjoint=adjoint) + else + matrix_solve_graph(matrix_, rhs_; name=name, adjoint=adjoint) + end end - end end @@ -51140,18 +51140,18 @@ end An op that sets up the centralized structures for a distributed TPU """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function _configure_distributed_tpu_graph(inputs_; name=nothing, N=nothing) - local desc - tf.with_op_name(name, "_ConfigureDistributedTPU") do - desc = tf.NodeDescription("_ConfigureDistributedTPU") - inputs_ = [convert(Tensor{Int32}, x) for x = inputs_] - tf.add_input(desc, inputs_) - if N !== nothing - desc["N"] = Base.Int(N) - end + function _configure_distributed_tpu_graph(inputs_; name=nothing, N=nothing) + local desc + tf.with_op_name(name, "_ConfigureDistributedTPU") do + desc = tf.NodeDescription("_ConfigureDistributedTPU") + inputs_ = [convert(Tensor{Int32}, x) for x = inputs_] + tf.add_input(desc, inputs_) + if N !== nothing + desc["N"] = Base.Int(N) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function _configure_distributed_tpu_eager(inputs_; name=nothing, N=nothing) desc = tf.EagerOp("_ConfigureDistributedTPU") inputs_ = convert(tf.EagerTensor, inputs_) @@ -51166,13 +51166,13 @@ begin return res[1] end end - function _configure_distributed_tpu(inputs_; name=nothing, N=nothing) - if tf.in_eager_mode() - _configure_distributed_tpu_eager(inputs_; name=name, N=N) - else - _configure_distributed_tpu_graph(inputs_; name=name, N=N) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _configure_distributed_tpu(inputs_; name=nothing, N=nothing) + if tf.in_eager_mode() + _configure_distributed_tpu_eager(inputs_; name=name, N=N) + else + _configure_distributed_tpu_graph(inputs_; name=name, N=N) + end end - end end @@ -51182,17 +51182,17 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function adjust_contrastv2_graph(images_, contrast_factor_; name=nothing) - local desc - tf.with_op_name(name, "AdjustContrastv2") do - desc = tf.NodeDescription("AdjustContrastv2") - images_ = convert(Tensor{Float32}, images_) - contrast_factor_ = convert(Tensor{Float32}, contrast_factor_) - tf.add_input(desc, images_) - tf.add_input(desc, contrast_factor_) - end - tf.Tensor(tf.Operation(desc)) + function adjust_contrastv2_graph(images_, contrast_factor_; name=nothing) + local desc + tf.with_op_name(name, "AdjustContrastv2") do + desc = tf.NodeDescription("AdjustContrastv2") + images_ = convert(Tensor{Float32}, images_) + contrast_factor_ = convert(Tensor{Float32}, contrast_factor_) + tf.add_input(desc, images_) + tf.add_input(desc, contrast_factor_) end + tf.Tensor(tf.Operation(desc)) + end function adjust_contrastv2_eager(images_, contrast_factor_; name=nothing) desc = tf.EagerOp("AdjustContrastv2") images_ = convert(tf.EagerTensor, images_) @@ -51206,13 +51206,13 @@ begin return res[1] end end - function adjust_contrastv2(images_, contrast_factor_; name=nothing) - if tf.in_eager_mode() - adjust_contrastv2_eager(images_, contrast_factor_; name=name) - else - adjust_contrastv2_graph(images_, contrast_factor_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function adjust_contrastv2(images_, contrast_factor_; name=nothing) + if tf.in_eager_mode() + adjust_contrastv2_eager(images_, contrast_factor_; name=name) + else + adjust_contrastv2_graph(images_, contrast_factor_; name=name) + end end - end end @@ -51222,27 +51222,27 @@ end Returns the max of x and y (i.e. x > y ? x : y) element-wise. """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function _mkl_maximum_graph(x_, y_, mkl_x_, mkl_y_; name=nothing) - local desc - tf.with_op_name(name, "_MklMaximum") do - desc = tf.NodeDescription("_MklMaximum") - x_ = convert(Tensor{Any}, x_) - y_ = convert(Tensor{Any}, y_) - mkl_x_ = convert(Tensor{UInt8}, mkl_x_) - mkl_y_ = convert(Tensor{UInt8}, mkl_y_) - (x_, y_) = tf.tf_promote(x_, y_) - tf.add_input(desc, x_) - tf.add_input(desc, y_) - tf.add_input(desc, mkl_x_) - tf.add_input(desc, mkl_y_) - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:2 - push!(out, tf.Tensor(op, out_idx)) - end - out + function _mkl_maximum_graph(x_, y_, mkl_x_, mkl_y_; name=nothing) + local desc + tf.with_op_name(name, "_MklMaximum") do + desc = tf.NodeDescription("_MklMaximum") + x_ = convert(Tensor{Any}, x_) + y_ = convert(Tensor{Any}, y_) + mkl_x_ = convert(Tensor{UInt8}, mkl_x_) + mkl_y_ = convert(Tensor{UInt8}, mkl_y_) + (x_, y_) = tf.tf_promote(x_, y_) + tf.add_input(desc, x_) + tf.add_input(desc, y_) + tf.add_input(desc, mkl_x_) + tf.add_input(desc, mkl_y_) + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) end + out + end function _mkl_maximum_eager(x_, y_, mkl_x_, mkl_y_; name=nothing) desc = tf.EagerOp("_MklMaximum") x_ = convert(tf.EagerTensor, x_) @@ -51262,56 +51262,56 @@ begin return res end end - function _mkl_maximum(x_, y_, mkl_x_, mkl_y_; name=nothing) - if tf.in_eager_mode() - _mkl_maximum_eager(x_, y_, mkl_x_, mkl_y_; name=name) - else - _mkl_maximum_graph(x_, y_, mkl_x_, mkl_y_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _mkl_maximum(x_, y_, mkl_x_, mkl_y_; name=nothing) + if tf.in_eager_mode() + _mkl_maximum_eager(x_, y_, mkl_x_, mkl_y_; name=name) + else + _mkl_maximum_graph(x_, y_, mkl_x_, mkl_y_; name=name) + end end - end end """ - cudnn_rnn_params_size(num_layers, num_units, input_size; rnn_mode=lstm, input_mode=linear_input, direction=unidirectional, dropout=?, seed=0, seed2=0) + cudnn_rnn_params_size(num_layers, num_units, input_size; rnn_mode=, input_mode=, direction=, dropout=?, seed=0, seed2=0) """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function cudnn_rnn_params_size_graph(num_layers_, num_units_, input_size_; name=nothing, S=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) - local desc - tf.with_op_name(name, "CudnnRNNParamsSize") do - desc = tf.NodeDescription("CudnnRNNParamsSize") - num_layers_ = convert(Tensor{Int32}, num_layers_) - num_units_ = convert(Tensor{Int32}, num_units_) - input_size_ = convert(Tensor{Int32}, input_size_) - tf.add_input(desc, num_layers_) - tf.add_input(desc, num_units_) - tf.add_input(desc, input_size_) - if S !== nothing - desc["S"] = Base.identity(S) - end - if rnn_mode !== nothing - desc["rnn_mode"] = Base.String(rnn_mode) - end - if input_mode !== nothing - desc["input_mode"] = Base.String(input_mode) - end - if direction !== nothing - desc["direction"] = Base.String(direction) - end - if dropout !== nothing - desc["dropout"] = Base.identity(dropout) - end - if seed !== nothing - desc["seed"] = Base.Int(seed) - end - if seed2 !== nothing - desc["seed2"] = Base.Int(seed2) - end + function cudnn_rnn_params_size_graph(num_layers_, num_units_, input_size_; name=nothing, S=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) + local desc + tf.with_op_name(name, "CudnnRNNParamsSize") do + desc = tf.NodeDescription("CudnnRNNParamsSize") + num_layers_ = convert(Tensor{Int32}, num_layers_) + num_units_ = convert(Tensor{Int32}, num_units_) + input_size_ = convert(Tensor{Int32}, input_size_) + tf.add_input(desc, num_layers_) + tf.add_input(desc, num_units_) + tf.add_input(desc, input_size_) + if S !== nothing + desc["S"] = Base.identity(S) + end + if rnn_mode !== nothing + desc["rnn_mode"] = Base.String(rnn_mode) + end + if input_mode !== nothing + desc["input_mode"] = Base.String(input_mode) + end + if direction !== nothing + desc["direction"] = Base.String(direction) + end + if dropout !== nothing + desc["dropout"] = Base.identity(dropout) + end + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function cudnn_rnn_params_size_eager(num_layers_, num_units_, input_size_; name=nothing, S=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) desc = tf.EagerOp("CudnnRNNParamsSize") num_layers_ = convert(tf.EagerTensor, num_layers_) @@ -51348,13 +51348,13 @@ begin return res[1] end end - function cudnn_rnn_params_size(num_layers_, num_units_, input_size_; name=nothing, S=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) - if tf.in_eager_mode() - cudnn_rnn_params_size_eager(num_layers_, num_units_, input_size_; name=name, S=S, rnn_mode=rnn_mode, input_mode=input_mode, direction=direction, dropout=dropout, seed=seed, seed2=seed2) - else - cudnn_rnn_params_size_graph(num_layers_, num_units_, input_size_; name=name, S=S, rnn_mode=rnn_mode, input_mode=input_mode, direction=direction, dropout=dropout, seed=seed, seed2=seed2) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function cudnn_rnn_params_size(num_layers_, num_units_, input_size_; name=nothing, S=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) + if tf.in_eager_mode() + cudnn_rnn_params_size_eager(num_layers_, num_units_, input_size_; name=name, S=S, rnn_mode=rnn_mode, input_mode=input_mode, direction=direction, dropout=dropout, seed=seed, seed2=seed2) + else + cudnn_rnn_params_size_graph(num_layers_, num_units_, input_size_; name=name, S=S, rnn_mode=rnn_mode, input_mode=input_mode, direction=direction, dropout=dropout, seed=seed, seed2=seed2) + end end - end end @@ -51364,20 +51364,20 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function boosted_trees_quantile_stream_resource_add_summaries_graph(quantile_stream_resource_handle_, summaries_; name=nothing, num_features=nothing) - local desc - tf.with_op_name(name, "BoostedTreesQuantileStreamResourceAddSummaries") do - desc = tf.NodeDescription("BoostedTreesQuantileStreamResourceAddSummaries") - quantile_stream_resource_handle_ = convert(Tensor{Any}, quantile_stream_resource_handle_) - summaries_ = [convert(Tensor{Float32}, x) for x = summaries_] - tf.add_input(desc, quantile_stream_resource_handle_) - tf.add_input(desc, summaries_) - if num_features !== nothing - desc["num_features"] = Base.Int(num_features) - end + function boosted_trees_quantile_stream_resource_add_summaries_graph(quantile_stream_resource_handle_, summaries_; name=nothing, num_features=nothing) + local desc + tf.with_op_name(name, "BoostedTreesQuantileStreamResourceAddSummaries") do + desc = tf.NodeDescription("BoostedTreesQuantileStreamResourceAddSummaries") + quantile_stream_resource_handle_ = convert(Tensor{Any}, quantile_stream_resource_handle_) + summaries_ = [convert(Tensor{Float32}, x) for x = summaries_] + tf.add_input(desc, quantile_stream_resource_handle_) + tf.add_input(desc, summaries_) + if num_features !== nothing + desc["num_features"] = Base.Int(num_features) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function boosted_trees_quantile_stream_resource_add_summaries_eager(quantile_stream_resource_handle_, summaries_; name=nothing, num_features=nothing) desc = tf.EagerOp("BoostedTreesQuantileStreamResourceAddSummaries") quantile_stream_resource_handle_ = convert(tf.EagerTensor, quantile_stream_resource_handle_) @@ -51394,13 +51394,13 @@ begin return res[1] end end - function boosted_trees_quantile_stream_resource_add_summaries(quantile_stream_resource_handle_, summaries_; name=nothing, num_features=nothing) - if tf.in_eager_mode() - boosted_trees_quantile_stream_resource_add_summaries_eager(quantile_stream_resource_handle_, summaries_; name=name, num_features=num_features) - else - boosted_trees_quantile_stream_resource_add_summaries_graph(quantile_stream_resource_handle_, summaries_; name=name, num_features=num_features) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function boosted_trees_quantile_stream_resource_add_summaries(quantile_stream_resource_handle_, summaries_; name=nothing, num_features=nothing) + if tf.in_eager_mode() + boosted_trees_quantile_stream_resource_add_summaries_eager(quantile_stream_resource_handle_, summaries_; name=name, num_features=num_features) + else + boosted_trees_quantile_stream_resource_add_summaries_graph(quantile_stream_resource_handle_, summaries_; name=name, num_features=num_features) + end end - end end @@ -51410,15 +51410,15 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function batch_ifft3d_graph(input_; name=nothing) - local desc - tf.with_op_name(name, "BatchIFFT3D") do - desc = tf.NodeDescription("BatchIFFT3D") - input_ = convert(Tensor{Complex{Float32}}, input_) - tf.add_input(desc, input_) - end - tf.Tensor(tf.Operation(desc)) + function batch_ifft3d_graph(input_; name=nothing) + local desc + tf.with_op_name(name, "BatchIFFT3D") do + desc = tf.NodeDescription("BatchIFFT3D") + input_ = convert(Tensor{Complex{Float32}}, input_) + tf.add_input(desc, input_) end + tf.Tensor(tf.Operation(desc)) + end function batch_ifft3d_eager(input_; name=nothing) desc = tf.EagerOp("BatchIFFT3D") input_ = convert(tf.EagerTensor, input_) @@ -51430,13 +51430,13 @@ begin return res[1] end end - function batch_ifft3d(input_; name=nothing) - if tf.in_eager_mode() - batch_ifft3d_eager(input_; name=name) - else - batch_ifft3d_graph(input_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function batch_ifft3d(input_; name=nothing) + if tf.in_eager_mode() + batch_ifft3d_eager(input_; name=name) + else + batch_ifft3d_graph(input_; name=name) + end end - end end @@ -51446,16 +51446,16 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function sigmoid_graph(x_; name=nothing) - local desc - tf.with_op_name(name, "Sigmoid") do - desc = tf.NodeDescription("Sigmoid") - x_ = convert(Tensor{Any}, x_) - (x_,) = tf.tf_promote(x_) - tf.add_input(desc, x_) - end - tf.Tensor(tf.Operation(desc)) + function sigmoid_graph(x_; name=nothing) + local desc + tf.with_op_name(name, "Sigmoid") do + desc = tf.NodeDescription("Sigmoid") + x_ = convert(Tensor{Any}, x_) + (x_,) = tf.tf_promote(x_) + tf.add_input(desc, x_) end + tf.Tensor(tf.Operation(desc)) + end function sigmoid_eager(x_; name=nothing) desc = tf.EagerOp("Sigmoid") x_ = convert(tf.EagerTensor, x_) @@ -51468,13 +51468,13 @@ begin return res[1] end end - function sigmoid(x_; name=nothing) - if tf.in_eager_mode() - sigmoid_eager(x_; name=name) - else - sigmoid_graph(x_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sigmoid(x_; name=nothing) + if tf.in_eager_mode() + sigmoid_eager(x_; name=name) + else + sigmoid_graph(x_; name=name) + end end - end end @@ -51484,20 +51484,20 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function segment_mean_graph(data_, segment_ids_; name=nothing) - local desc - tf.with_op_name(name, "SegmentMean") do - desc = tf.NodeDescription("SegmentMean") - data_ = convert(Tensor{Any}, data_) - segment_ids_ = convert(Tensor{Any}, segment_ids_) - segment_ids_ = segment_ids_ - convert(tf.Tensor{eltype(segment_ids_)}, 1) - (data_,) = tf.tf_promote(data_) - (segment_ids_,) = tf.tf_promote(segment_ids_) - tf.add_input(desc, data_) - tf.add_input(desc, segment_ids_) - end - tf.Tensor(tf.Operation(desc)) + function segment_mean_graph(data_, segment_ids_; name=nothing) + local desc + tf.with_op_name(name, "SegmentMean") do + desc = tf.NodeDescription("SegmentMean") + data_ = convert(Tensor{Any}, data_) + segment_ids_ = convert(Tensor{Any}, segment_ids_) + segment_ids_ = segment_ids_ - convert(tf.Tensor{eltype(segment_ids_)}, 1) + (data_,) = tf.tf_promote(data_) + (segment_ids_,) = tf.tf_promote(segment_ids_) + tf.add_input(desc, data_) + tf.add_input(desc, segment_ids_) end + tf.Tensor(tf.Operation(desc)) + end function segment_mean_eager(data_, segment_ids_; name=nothing) desc = tf.EagerOp("SegmentMean") data_ = convert(tf.EagerTensor, data_) @@ -51513,13 +51513,13 @@ begin return res[1] end end - function segment_mean(data_, segment_ids_; name=nothing) - if tf.in_eager_mode() - segment_mean_eager(data_, segment_ids_; name=name) - else - segment_mean_graph(data_, segment_ids_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function segment_mean(data_, segment_ids_; name=nothing) + if tf.in_eager_mode() + segment_mean_eager(data_, segment_ids_; name=name) + else + segment_mean_graph(data_, segment_ids_; name=name) + end end - end end @@ -51529,15 +51529,15 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function is_boosted_trees_ensemble_initialized_graph(tree_ensemble_handle_; name=nothing) - local desc - tf.with_op_name(name, "IsBoostedTreesEnsembleInitialized") do - desc = tf.NodeDescription("IsBoostedTreesEnsembleInitialized") - tree_ensemble_handle_ = convert(Tensor{Any}, tree_ensemble_handle_) - tf.add_input(desc, tree_ensemble_handle_) - end - tf.Tensor(tf.Operation(desc)) + function is_boosted_trees_ensemble_initialized_graph(tree_ensemble_handle_; name=nothing) + local desc + tf.with_op_name(name, "IsBoostedTreesEnsembleInitialized") do + desc = tf.NodeDescription("IsBoostedTreesEnsembleInitialized") + tree_ensemble_handle_ = convert(Tensor{Any}, tree_ensemble_handle_) + tf.add_input(desc, tree_ensemble_handle_) end + tf.Tensor(tf.Operation(desc)) + end function is_boosted_trees_ensemble_initialized_eager(tree_ensemble_handle_; name=nothing) desc = tf.EagerOp("IsBoostedTreesEnsembleInitialized") tree_ensemble_handle_ = convert(tf.EagerTensor, tree_ensemble_handle_) @@ -51549,13 +51549,13 @@ begin return res[1] end end - function is_boosted_trees_ensemble_initialized(tree_ensemble_handle_; name=nothing) - if tf.in_eager_mode() - is_boosted_trees_ensemble_initialized_eager(tree_ensemble_handle_; name=name) - else - is_boosted_trees_ensemble_initialized_graph(tree_ensemble_handle_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function is_boosted_trees_ensemble_initialized(tree_ensemble_handle_; name=nothing) + if tf.in_eager_mode() + is_boosted_trees_ensemble_initialized_eager(tree_ensemble_handle_; name=name) + else + is_boosted_trees_ensemble_initialized_graph(tree_ensemble_handle_; name=name) + end end - end end @@ -51565,17 +51565,17 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tensor_array_size_v2_graph(handle_, flow_in_; name=nothing) - local desc - tf.with_op_name(name, "TensorArraySizeV2") do - desc = tf.NodeDescription("TensorArraySizeV2") - handle_ = convert(Tensor{String}, handle_) - flow_in_ = convert(Tensor{Float32}, flow_in_) - tf.add_input(desc, handle_) - tf.add_input(desc, flow_in_) - end - tf.Tensor(tf.Operation(desc)) + function tensor_array_size_v2_graph(handle_, flow_in_; name=nothing) + local desc + tf.with_op_name(name, "TensorArraySizeV2") do + desc = tf.NodeDescription("TensorArraySizeV2") + handle_ = convert(Tensor{String}, handle_) + flow_in_ = convert(Tensor{Float32}, flow_in_) + tf.add_input(desc, handle_) + tf.add_input(desc, flow_in_) end + tf.Tensor(tf.Operation(desc)) + end function tensor_array_size_v2_eager(handle_, flow_in_; name=nothing) desc = tf.EagerOp("TensorArraySizeV2") handle_ = convert(tf.EagerTensor, handle_) @@ -51589,13 +51589,13 @@ begin return res[1] end end - function tensor_array_size_v2(handle_, flow_in_; name=nothing) - if tf.in_eager_mode() - tensor_array_size_v2_eager(handle_, flow_in_; name=name) - else - tensor_array_size_v2_graph(handle_, flow_in_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_array_size_v2(handle_, flow_in_; name=nothing) + if tf.in_eager_mode() + tensor_array_size_v2_eager(handle_, flow_in_; name=name) + else + tensor_array_size_v2_graph(handle_, flow_in_; name=name) + end end - end end @@ -51605,27 +51605,27 @@ end Returns x - y element-wise. """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function _mkl_sub_graph(x_, y_, mkl_x_, mkl_y_; name=nothing) - local desc - tf.with_op_name(name, "_MklSub") do - desc = tf.NodeDescription("_MklSub") - x_ = convert(Tensor{Any}, x_) - y_ = convert(Tensor{Any}, y_) - mkl_x_ = convert(Tensor{UInt8}, mkl_x_) - mkl_y_ = convert(Tensor{UInt8}, mkl_y_) - (x_, y_) = tf.tf_promote(x_, y_) - tf.add_input(desc, x_) - tf.add_input(desc, y_) - tf.add_input(desc, mkl_x_) - tf.add_input(desc, mkl_y_) - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:2 - push!(out, tf.Tensor(op, out_idx)) - end - out + function _mkl_sub_graph(x_, y_, mkl_x_, mkl_y_; name=nothing) + local desc + tf.with_op_name(name, "_MklSub") do + desc = tf.NodeDescription("_MklSub") + x_ = convert(Tensor{Any}, x_) + y_ = convert(Tensor{Any}, y_) + mkl_x_ = convert(Tensor{UInt8}, mkl_x_) + mkl_y_ = convert(Tensor{UInt8}, mkl_y_) + (x_, y_) = tf.tf_promote(x_, y_) + tf.add_input(desc, x_) + tf.add_input(desc, y_) + tf.add_input(desc, mkl_x_) + tf.add_input(desc, mkl_y_) end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end function _mkl_sub_eager(x_, y_, mkl_x_, mkl_y_; name=nothing) desc = tf.EagerOp("_MklSub") x_ = convert(tf.EagerTensor, x_) @@ -51645,13 +51645,13 @@ begin return res end end - function _mkl_sub(x_, y_, mkl_x_, mkl_y_; name=nothing) - if tf.in_eager_mode() - _mkl_sub_eager(x_, y_, mkl_x_, mkl_y_; name=name) - else - _mkl_sub_graph(x_, y_, mkl_x_, mkl_y_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _mkl_sub(x_, y_, mkl_x_, mkl_y_; name=nothing) + if tf.in_eager_mode() + _mkl_sub_eager(x_, y_, mkl_x_, mkl_y_; name=name) + else + _mkl_sub_graph(x_, y_, mkl_x_, mkl_y_; name=name) + end end - end end @@ -51661,26 +51661,26 @@ end An op that performs gradient updates of embedding tables. """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function send_tpu_embedding_gradients_graph(inputs_, learning_rates_; name=nothing, N=nothing, NN=nothing, config=nothing) - local desc - tf.with_op_name(name, "SendTPUEmbeddingGradients") do - desc = tf.NodeDescription("SendTPUEmbeddingGradients") - inputs_ = [convert(Tensor{Float32}, x) for x = inputs_] - learning_rates_ = [convert(Tensor{Float32}, x) for x = learning_rates_] - tf.add_input(desc, inputs_) - tf.add_input(desc, learning_rates_) - if N !== nothing - desc["N"] = Base.Int(N) - end - if NN !== nothing - desc["NN"] = Base.Int(NN) - end - if config !== nothing - desc["config"] = Base.String(config) - end - end - tf.Tensor(tf.Operation(desc)) + function send_tpu_embedding_gradients_graph(inputs_, learning_rates_; name=nothing, N=nothing, NN=nothing, config=nothing) + local desc + tf.with_op_name(name, "SendTPUEmbeddingGradients") do + desc = tf.NodeDescription("SendTPUEmbeddingGradients") + inputs_ = [convert(Tensor{Float32}, x) for x = inputs_] + learning_rates_ = [convert(Tensor{Float32}, x) for x = learning_rates_] + tf.add_input(desc, inputs_) + tf.add_input(desc, learning_rates_) + if N !== nothing + desc["N"] = Base.Int(N) + end + if NN !== nothing + desc["NN"] = Base.Int(NN) + end + if config !== nothing + desc["config"] = Base.String(config) + end end + tf.Tensor(tf.Operation(desc)) + end function send_tpu_embedding_gradients_eager(inputs_, learning_rates_; name=nothing, N=nothing, NN=nothing, config=nothing) desc = tf.EagerOp("SendTPUEmbeddingGradients") inputs_ = convert(tf.EagerTensor, inputs_) @@ -51703,44 +51703,44 @@ begin return res[1] end end - function send_tpu_embedding_gradients(inputs_, learning_rates_; name=nothing, N=nothing, NN=nothing, config=nothing) - if tf.in_eager_mode() - send_tpu_embedding_gradients_eager(inputs_, learning_rates_; name=name, N=N, NN=NN, config=config) - else - send_tpu_embedding_gradients_graph(inputs_, learning_rates_; name=name, N=N, NN=NN, config=config) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function send_tpu_embedding_gradients(inputs_, learning_rates_; name=nothing, N=nothing, NN=nothing, config=nothing) + if tf.in_eager_mode() + send_tpu_embedding_gradients_eager(inputs_, learning_rates_; name=name, N=N, NN=NN, config=config) + else + send_tpu_embedding_gradients_graph(inputs_, learning_rates_; name=name, N=N, NN=NN, config=config) + end end - end end """ - max_pool3d(input; data_format=NDHWC) + max_pool3d(input; data_format=) """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function max_pool3d_graph(input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) - local desc - tf.with_op_name(name, "MaxPool3D") do - desc = tf.NodeDescription("MaxPool3D") - input_ = convert(Tensor{Any}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - if ksize !== nothing - desc["ksize"] = map(Base.identity, ksize) - end - if strides !== nothing - desc["strides"] = map(Base.identity, strides) - end - if padding !== nothing - desc["padding"] = Base.String(padding) - end - if data_format !== nothing - desc["data_format"] = Base.String(data_format) - end + function max_pool3d_graph(input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + local desc + tf.with_op_name(name, "MaxPool3D") do + desc = tf.NodeDescription("MaxPool3D") + input_ = convert(Tensor{Any}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + if ksize !== nothing + desc["ksize"] = map(Base.identity, ksize) + end + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + if padding !== nothing + desc["padding"] = Base.String(padding) + end + if data_format !== nothing + desc["data_format"] = Base.String(data_format) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function max_pool3d_eager(input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) desc = tf.EagerOp("MaxPool3D") input_ = convert(tf.EagerTensor, input_) @@ -51765,13 +51765,13 @@ begin return res[1] end end - function max_pool3d(input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) - if tf.in_eager_mode() - max_pool3d_eager(input_; name=name, ksize=ksize, strides=strides, padding=padding, data_format=data_format) - else - max_pool3d_graph(input_; name=name, ksize=ksize, strides=strides, padding=padding, data_format=data_format) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function max_pool3d(input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + if tf.in_eager_mode() + max_pool3d_eager(input_; name=name, ksize=ksize, strides=strides, padding=padding, data_format=data_format) + else + max_pool3d_graph(input_; name=name, ksize=ksize, strides=strides, padding=padding, data_format=data_format) + end end - end end @@ -51781,23 +51781,23 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function prod_graph(input_, reduction_indices_; name=nothing, keep_dims=nothing) - local desc - tf.with_op_name(name, "Prod") do - desc = tf.NodeDescription("Prod") - input_ = convert(Tensor{Any}, input_) - reduction_indices_ = convert(Tensor{Int32}, reduction_indices_) - reduction_indices_ = reduction_indices_ - convert(tf.Tensor{eltype(reduction_indices_)}, 1) - (input_,) = tf.tf_promote(input_) - (reduction_indices_,) = tf.tf_promote(reduction_indices_) - tf.add_input(desc, input_) - tf.add_input(desc, reduction_indices_) - if keep_dims !== nothing - desc["keep_dims"] = Base.Bool(keep_dims) - end + function prod_graph(input_, reduction_indices_; name=nothing, keep_dims=nothing) + local desc + tf.with_op_name(name, "Prod") do + desc = tf.NodeDescription("Prod") + input_ = convert(Tensor{Any}, input_) + reduction_indices_ = convert(Tensor{Int32}, reduction_indices_) + reduction_indices_ = reduction_indices_ - convert(tf.Tensor{eltype(reduction_indices_)}, 1) + (input_,) = tf.tf_promote(input_) + (reduction_indices_,) = tf.tf_promote(reduction_indices_) + tf.add_input(desc, input_) + tf.add_input(desc, reduction_indices_) + if keep_dims !== nothing + desc["keep_dims"] = Base.Bool(keep_dims) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function prod_eager(input_, reduction_indices_; name=nothing, keep_dims=nothing) desc = tf.EagerOp("Prod") input_ = convert(tf.EagerTensor, input_) @@ -51816,13 +51816,13 @@ begin return res[1] end end - function prod(input_, reduction_indices_; name=nothing, keep_dims=nothing) - if tf.in_eager_mode() - prod_eager(input_, reduction_indices_; name=name, keep_dims=keep_dims) - else - prod_graph(input_, reduction_indices_; name=name, keep_dims=keep_dims) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function prod(input_, reduction_indices_; name=nothing, keep_dims=nothing) + if tf.in_eager_mode() + prod_eager(input_, reduction_indices_; name=name, keep_dims=keep_dims) + else + prod_graph(input_, reduction_indices_; name=name, keep_dims=keep_dims) + end end - end end @@ -51832,15 +51832,15 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function experimental_identity_indexed_dataset_graph(size_; name=nothing) - local desc - tf.with_op_name(name, "ExperimentalIdentityIndexedDataset") do - desc = tf.NodeDescription("ExperimentalIdentityIndexedDataset") - size_ = convert(Tensor{Any}, size_) - tf.add_input(desc, size_) - end - tf.Tensor(tf.Operation(desc)) + function experimental_identity_indexed_dataset_graph(size_; name=nothing) + local desc + tf.with_op_name(name, "ExperimentalIdentityIndexedDataset") do + desc = tf.NodeDescription("ExperimentalIdentityIndexedDataset") + size_ = convert(Tensor{Any}, size_) + tf.add_input(desc, size_) end + tf.Tensor(tf.Operation(desc)) + end function experimental_identity_indexed_dataset_eager(size_; name=nothing) desc = tf.EagerOp("ExperimentalIdentityIndexedDataset") size_ = convert(tf.EagerTensor, size_) @@ -51852,13 +51852,13 @@ begin return res[1] end end - function experimental_identity_indexed_dataset(size_; name=nothing) - if tf.in_eager_mode() - experimental_identity_indexed_dataset_eager(size_; name=name) - else - experimental_identity_indexed_dataset_graph(size_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_identity_indexed_dataset(size_; name=nothing) + if tf.in_eager_mode() + experimental_identity_indexed_dataset_eager(size_; name=name) + else + experimental_identity_indexed_dataset_graph(size_; name=name) + end end - end end @@ -51868,21 +51868,21 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tensor_list_push_back_graph(input_handle_, tensor_; name=nothing, element_dtype=nothing) - local desc - tf.with_op_name(name, "TensorListPushBack") do - desc = tf.NodeDescription("TensorListPushBack") - input_handle_ = convert(Tensor{Any}, input_handle_) - tensor_ = convert(Tensor{Any}, tensor_) - (tensor_,) = tf.tf_promote(tensor_) - tf.add_input(desc, input_handle_) - tf.add_input(desc, tensor_) - if element_dtype !== nothing - desc["element_dtype"] = Base.identity(element_dtype) - end + function tensor_list_push_back_graph(input_handle_, tensor_; name=nothing, element_dtype=nothing) + local desc + tf.with_op_name(name, "TensorListPushBack") do + desc = tf.NodeDescription("TensorListPushBack") + input_handle_ = convert(Tensor{Any}, input_handle_) + tensor_ = convert(Tensor{Any}, tensor_) + (tensor_,) = tf.tf_promote(tensor_) + tf.add_input(desc, input_handle_) + tf.add_input(desc, tensor_) + if element_dtype !== nothing + desc["element_dtype"] = Base.identity(element_dtype) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function tensor_list_push_back_eager(input_handle_, tensor_; name=nothing, element_dtype=nothing) desc = tf.EagerOp("TensorListPushBack") input_handle_ = convert(tf.EagerTensor, input_handle_) @@ -51900,13 +51900,13 @@ begin return res[1] end end - function tensor_list_push_back(input_handle_, tensor_; name=nothing, element_dtype=nothing) - if tf.in_eager_mode() - tensor_list_push_back_eager(input_handle_, tensor_; name=name, element_dtype=element_dtype) - else - tensor_list_push_back_graph(input_handle_, tensor_; name=name, element_dtype=element_dtype) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_list_push_back(input_handle_, tensor_; name=nothing, element_dtype=nothing) + if tf.in_eager_mode() + tensor_list_push_back_eager(input_handle_, tensor_; name=name, element_dtype=element_dtype) + else + tensor_list_push_back_graph(input_handle_, tensor_; name=name, element_dtype=element_dtype) + end end - end end @@ -51916,53 +51916,53 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function batch_function_graph(in_tensors_, captured_tensors_; name=nothing, f=nothing, num_batch_threads=nothing, max_batch_size=nothing, batch_timeout_micros=nothing, max_enqueued_batches=nothing, allowed_batch_sizes=nothing, container=nothing, shared_name=nothing, batching_queue=nothing, Tin=nothing, Tcaptured=nothing, Tout=nothing) - local desc - tf.with_op_name(name, "BatchFunction") do - desc = tf.NodeDescription("BatchFunction") - in_tensors_ = [convert(Tensor{Any}, x) for x = in_tensors_] - captured_tensors_ = [convert(Tensor{Any}, x) for x = captured_tensors_] - tf.add_input(desc, in_tensors_) - tf.add_input(desc, captured_tensors_) - if f !== nothing - desc["f"] = Base.identity(f) - end - if num_batch_threads !== nothing - desc["num_batch_threads"] = Base.Int(num_batch_threads) - end - if max_batch_size !== nothing - desc["max_batch_size"] = Base.Int(max_batch_size) - end - if batch_timeout_micros !== nothing - desc["batch_timeout_micros"] = Base.Int(batch_timeout_micros) - end - if max_enqueued_batches !== nothing - desc["max_enqueued_batches"] = Base.Int(max_enqueued_batches) - end - if allowed_batch_sizes !== nothing - desc["allowed_batch_sizes"] = map(Base.identity, allowed_batch_sizes) - end - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - if batching_queue !== nothing - desc["batching_queue"] = Base.String(batching_queue) - end - if Tin !== nothing - desc["Tin"] = map(Base.identity, Tin) - end - if Tcaptured !== nothing - desc["Tcaptured"] = map(Base.identity, Tcaptured) - end - if Tout !== nothing - desc["Tout"] = map(Base.identity, Tout) - end - end - tf.Tensor(tf.Operation(desc)) + function batch_function_graph(in_tensors_, captured_tensors_; name=nothing, f=nothing, num_batch_threads=nothing, max_batch_size=nothing, batch_timeout_micros=nothing, max_enqueued_batches=nothing, allowed_batch_sizes=nothing, container=nothing, shared_name=nothing, batching_queue=nothing, Tin=nothing, Tcaptured=nothing, Tout=nothing) + local desc + tf.with_op_name(name, "BatchFunction") do + desc = tf.NodeDescription("BatchFunction") + in_tensors_ = [convert(Tensor{Any}, x) for x = in_tensors_] + captured_tensors_ = [convert(Tensor{Any}, x) for x = captured_tensors_] + tf.add_input(desc, in_tensors_) + tf.add_input(desc, captured_tensors_) + if f !== nothing + desc["f"] = Base.identity(f) + end + if num_batch_threads !== nothing + desc["num_batch_threads"] = Base.Int(num_batch_threads) + end + if max_batch_size !== nothing + desc["max_batch_size"] = Base.Int(max_batch_size) + end + if batch_timeout_micros !== nothing + desc["batch_timeout_micros"] = Base.Int(batch_timeout_micros) + end + if max_enqueued_batches !== nothing + desc["max_enqueued_batches"] = Base.Int(max_enqueued_batches) + end + if allowed_batch_sizes !== nothing + desc["allowed_batch_sizes"] = map(Base.identity, allowed_batch_sizes) + end + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + if batching_queue !== nothing + desc["batching_queue"] = Base.String(batching_queue) + end + if Tin !== nothing + desc["Tin"] = map(Base.identity, Tin) + end + if Tcaptured !== nothing + desc["Tcaptured"] = map(Base.identity, Tcaptured) + end + if Tout !== nothing + desc["Tout"] = map(Base.identity, Tout) + end end + tf.Tensor(tf.Operation(desc)) + end function batch_function_eager(in_tensors_, captured_tensors_; name=nothing, f=nothing, num_batch_threads=nothing, max_batch_size=nothing, batch_timeout_micros=nothing, max_enqueued_batches=nothing, allowed_batch_sizes=nothing, container=nothing, shared_name=nothing, batching_queue=nothing, Tin=nothing, Tcaptured=nothing, Tout=nothing) desc = tf.EagerOp("BatchFunction") in_tensors_ = convert(tf.EagerTensor, in_tensors_) @@ -52012,13 +52012,13 @@ begin return res[1] end end - function batch_function(in_tensors_, captured_tensors_; name=nothing, f=nothing, num_batch_threads=nothing, max_batch_size=nothing, batch_timeout_micros=nothing, max_enqueued_batches=nothing, allowed_batch_sizes=nothing, container=nothing, shared_name=nothing, batching_queue=nothing, Tin=nothing, Tcaptured=nothing, Tout=nothing) - if tf.in_eager_mode() - batch_function_eager(in_tensors_, captured_tensors_; name=name, f=f, num_batch_threads=num_batch_threads, max_batch_size=max_batch_size, batch_timeout_micros=batch_timeout_micros, max_enqueued_batches=max_enqueued_batches, allowed_batch_sizes=allowed_batch_sizes, container=container, shared_name=shared_name, batching_queue=batching_queue, Tin=Tin, Tcaptured=Tcaptured, Tout=Tout) - else - batch_function_graph(in_tensors_, captured_tensors_; name=name, f=f, num_batch_threads=num_batch_threads, max_batch_size=max_batch_size, batch_timeout_micros=batch_timeout_micros, max_enqueued_batches=max_enqueued_batches, allowed_batch_sizes=allowed_batch_sizes, container=container, shared_name=shared_name, batching_queue=batching_queue, Tin=Tin, Tcaptured=Tcaptured, Tout=Tout) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function batch_function(in_tensors_, captured_tensors_; name=nothing, f=nothing, num_batch_threads=nothing, max_batch_size=nothing, batch_timeout_micros=nothing, max_enqueued_batches=nothing, allowed_batch_sizes=nothing, container=nothing, shared_name=nothing, batching_queue=nothing, Tin=nothing, Tcaptured=nothing, Tout=nothing) + if tf.in_eager_mode() + batch_function_eager(in_tensors_, captured_tensors_; name=name, f=f, num_batch_threads=num_batch_threads, max_batch_size=max_batch_size, batch_timeout_micros=batch_timeout_micros, max_enqueued_batches=max_enqueued_batches, allowed_batch_sizes=allowed_batch_sizes, container=container, shared_name=shared_name, batching_queue=batching_queue, Tin=Tin, Tcaptured=Tcaptured, Tout=Tout) + else + batch_function_graph(in_tensors_, captured_tensors_; name=name, f=f, num_batch_threads=num_batch_threads, max_batch_size=max_batch_size, batch_timeout_micros=batch_timeout_micros, max_enqueued_batches=max_enqueued_batches, allowed_batch_sizes=allowed_batch_sizes, container=container, shared_name=shared_name, batching_queue=batching_queue, Tin=Tin, Tcaptured=Tcaptured, Tout=Tout) + end end - end end @@ -52028,27 +52028,27 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function sparse_fill_empty_rows_graph(indices_, values_, dense_shape_, default_value_; name=nothing) - local desc - tf.with_op_name(name, "SparseFillEmptyRows") do - desc = tf.NodeDescription("SparseFillEmptyRows") - indices_ = convert(Tensor{Int64}, indices_) - values_ = convert(Tensor{Any}, values_) - dense_shape_ = convert(Tensor{Int64}, dense_shape_) - default_value_ = convert(Tensor{Any}, default_value_) - (values_, default_value_) = tf.tf_promote(values_, default_value_) - tf.add_input(desc, indices_) - tf.add_input(desc, values_) - tf.add_input(desc, dense_shape_) - tf.add_input(desc, default_value_) - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:4 - push!(out, tf.Tensor(op, out_idx)) - end - out + function sparse_fill_empty_rows_graph(indices_, values_, dense_shape_, default_value_; name=nothing) + local desc + tf.with_op_name(name, "SparseFillEmptyRows") do + desc = tf.NodeDescription("SparseFillEmptyRows") + indices_ = convert(Tensor{Int64}, indices_) + values_ = convert(Tensor{Any}, values_) + dense_shape_ = convert(Tensor{Int64}, dense_shape_) + default_value_ = convert(Tensor{Any}, default_value_) + (values_, default_value_) = tf.tf_promote(values_, default_value_) + tf.add_input(desc, indices_) + tf.add_input(desc, values_) + tf.add_input(desc, dense_shape_) + tf.add_input(desc, default_value_) end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:4 + push!(out, tf.Tensor(op, out_idx)) + end + out + end function sparse_fill_empty_rows_eager(indices_, values_, dense_shape_, default_value_; name=nothing) desc = tf.EagerOp("SparseFillEmptyRows") indices_ = convert(tf.EagerTensor, indices_) @@ -52068,13 +52068,13 @@ begin return res end end - function sparse_fill_empty_rows(indices_, values_, dense_shape_, default_value_; name=nothing) - if tf.in_eager_mode() - sparse_fill_empty_rows_eager(indices_, values_, dense_shape_, default_value_; name=name) - else - sparse_fill_empty_rows_graph(indices_, values_, dense_shape_, default_value_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_fill_empty_rows(indices_, values_, dense_shape_, default_value_; name=nothing) + if tf.in_eager_mode() + sparse_fill_empty_rows_eager(indices_, values_, dense_shape_, default_value_; name=name) + else + sparse_fill_empty_rows_graph(indices_, values_, dense_shape_, default_value_; name=name) + end end - end end @@ -52084,24 +52084,24 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function self_adjoint_eig_v2_graph(input_; name=nothing, compute_v=nothing) - local desc - tf.with_op_name(name, "SelfAdjointEigV2") do - desc = tf.NodeDescription("SelfAdjointEigV2") - input_ = convert(Tensor{Any}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - if compute_v !== nothing - desc["compute_v"] = Base.Bool(compute_v) - end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:2 - push!(out, tf.Tensor(op, out_idx)) + function self_adjoint_eig_v2_graph(input_; name=nothing, compute_v=nothing) + local desc + tf.with_op_name(name, "SelfAdjointEigV2") do + desc = tf.NodeDescription("SelfAdjointEigV2") + input_ = convert(Tensor{Any}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + if compute_v !== nothing + desc["compute_v"] = Base.Bool(compute_v) end - out end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end function self_adjoint_eig_v2_eager(input_; name=nothing, compute_v=nothing) desc = tf.EagerOp("SelfAdjointEigV2") input_ = convert(tf.EagerTensor, input_) @@ -52117,13 +52117,13 @@ begin return res end end - function self_adjoint_eig_v2(input_; name=nothing, compute_v=nothing) - if tf.in_eager_mode() - self_adjoint_eig_v2_eager(input_; name=name, compute_v=compute_v) - else - self_adjoint_eig_v2_graph(input_; name=name, compute_v=compute_v) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function self_adjoint_eig_v2(input_; name=nothing, compute_v=nothing) + if tf.in_eager_mode() + self_adjoint_eig_v2_eager(input_; name=name, compute_v=compute_v) + else + self_adjoint_eig_v2_graph(input_; name=name, compute_v=compute_v) + end end - end end @@ -52133,30 +52133,30 @@ end Retrieve embedding parameters for a single table. """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function retrieve_tpu_embedding_ftrl_parameters_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - local desc - tf.with_op_name(name, "RetrieveTPUEmbeddingFTRLParameters") do - desc = tf.NodeDescription("RetrieveTPUEmbeddingFTRLParameters") - if table_id !== nothing - desc["table_id"] = Base.Int(table_id) - end - if table_name !== nothing - desc["table_name"] = Base.String(table_name) - end - if num_shards !== nothing - desc["num_shards"] = Base.Int(num_shards) - end - if shard_id !== nothing - desc["shard_id"] = Base.Int(shard_id) - end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:3 - push!(out, tf.Tensor(op, out_idx)) - end - out + function retrieve_tpu_embedding_ftrl_parameters_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + local desc + tf.with_op_name(name, "RetrieveTPUEmbeddingFTRLParameters") do + desc = tf.NodeDescription("RetrieveTPUEmbeddingFTRLParameters") + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) end + out + end function retrieve_tpu_embedding_ftrl_parameters_eager(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) desc = tf.EagerOp("RetrieveTPUEmbeddingFTRLParameters") if table_id !== nothing @@ -52178,13 +52178,13 @@ begin return res end end - function retrieve_tpu_embedding_ftrl_parameters(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - if tf.in_eager_mode() - retrieve_tpu_embedding_ftrl_parameters_eager(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) - else - retrieve_tpu_embedding_ftrl_parameters_graph(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function retrieve_tpu_embedding_ftrl_parameters(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + if tf.in_eager_mode() + retrieve_tpu_embedding_ftrl_parameters_eager(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + else + retrieve_tpu_embedding_ftrl_parameters_graph(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + end end - end end @@ -52194,37 +52194,37 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function resource_sparse_apply_adagrad_da_graph(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, indices_, lr_, l1_, l2_, global_step_; name=nothing, use_locking=nothing) - local desc - tf.with_op_name(name, "ResourceSparseApplyAdagradDA") do - desc = tf.NodeDescription("ResourceSparseApplyAdagradDA") - var_ = convert(Tensor{Any}, var_) - gradient_accumulator_ = convert(Tensor{Any}, gradient_accumulator_) - gradient_squared_accumulator_ = convert(Tensor{Any}, gradient_squared_accumulator_) - grad_ = convert(Tensor{Any}, grad_) - indices_ = convert(Tensor{Any}, indices_) - indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) - lr_ = convert(Tensor{Any}, lr_) - l1_ = convert(Tensor{Any}, l1_) - l2_ = convert(Tensor{Any}, l2_) - global_step_ = convert(Tensor{Int64}, global_step_) - (grad_, lr_, l1_, l2_) = tf.tf_promote(grad_, lr_, l1_, l2_) - (indices_,) = tf.tf_promote(indices_) - tf.add_input(desc, var_) - tf.add_input(desc, gradient_accumulator_) - tf.add_input(desc, gradient_squared_accumulator_) - tf.add_input(desc, grad_) - tf.add_input(desc, indices_) - tf.add_input(desc, lr_) - tf.add_input(desc, l1_) - tf.add_input(desc, l2_) - tf.add_input(desc, global_step_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - end - tf.Tensor(tf.Operation(desc)) - end + function resource_sparse_apply_adagrad_da_graph(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, indices_, lr_, l1_, l2_, global_step_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "ResourceSparseApplyAdagradDA") do + desc = tf.NodeDescription("ResourceSparseApplyAdagradDA") + var_ = convert(Tensor{Any}, var_) + gradient_accumulator_ = convert(Tensor{Any}, gradient_accumulator_) + gradient_squared_accumulator_ = convert(Tensor{Any}, gradient_squared_accumulator_) + grad_ = convert(Tensor{Any}, grad_) + indices_ = convert(Tensor{Any}, indices_) + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + lr_ = convert(Tensor{Any}, lr_) + l1_ = convert(Tensor{Any}, l1_) + l2_ = convert(Tensor{Any}, l2_) + global_step_ = convert(Tensor{Int64}, global_step_) + (grad_, lr_, l1_, l2_) = tf.tf_promote(grad_, lr_, l1_, l2_) + (indices_,) = tf.tf_promote(indices_) + tf.add_input(desc, var_) + tf.add_input(desc, gradient_accumulator_) + tf.add_input(desc, gradient_squared_accumulator_) + tf.add_input(desc, grad_) + tf.add_input(desc, indices_) + tf.add_input(desc, lr_) + tf.add_input(desc, l1_) + tf.add_input(desc, l2_) + tf.add_input(desc, global_step_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + tf.Tensor(tf.Operation(desc)) + end function resource_sparse_apply_adagrad_da_eager(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, indices_, lr_, l1_, l2_, global_step_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ResourceSparseApplyAdagradDA") var_ = convert(tf.EagerTensor, var_) @@ -52260,13 +52260,13 @@ begin return res[1] end end - function resource_sparse_apply_adagrad_da(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, indices_, lr_, l1_, l2_, global_step_; name=nothing, use_locking=nothing) - if tf.in_eager_mode() - resource_sparse_apply_adagrad_da_eager(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, indices_, lr_, l1_, l2_, global_step_; name=name, use_locking=use_locking) - else - resource_sparse_apply_adagrad_da_graph(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, indices_, lr_, l1_, l2_, global_step_; name=name, use_locking=use_locking) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_sparse_apply_adagrad_da(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, indices_, lr_, l1_, l2_, global_step_; name=nothing, use_locking=nothing) + if tf.in_eager_mode() + resource_sparse_apply_adagrad_da_eager(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, indices_, lr_, l1_, l2_, global_step_; name=name, use_locking=use_locking) + else + resource_sparse_apply_adagrad_da_graph(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, indices_, lr_, l1_, l2_, global_step_; name=name, use_locking=use_locking) + end end - end end @@ -52276,22 +52276,22 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function temporary_variable_graph(; name=nothing, shape=nothing, dtype=nothing, var_name=nothing) - local desc - tf.with_op_name(name, "TemporaryVariable") do - desc = tf.NodeDescription("TemporaryVariable") - if shape !== nothing - desc["shape"] = Base.identity(shape) - end - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end - if var_name !== nothing - desc["var_name"] = Base.String(var_name) - end + function temporary_variable_graph(; name=nothing, shape=nothing, dtype=nothing, var_name=nothing) + local desc + tf.with_op_name(name, "TemporaryVariable") do + desc = tf.NodeDescription("TemporaryVariable") + if shape !== nothing + desc["shape"] = Base.identity(shape) + end + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + if var_name !== nothing + desc["var_name"] = Base.String(var_name) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function temporary_variable_eager(; name=nothing, shape=nothing, dtype=nothing, var_name=nothing) desc = tf.EagerOp("TemporaryVariable") if shape !== nothing @@ -52310,13 +52310,13 @@ begin return res[1] end end - function temporary_variable(; name=nothing, shape=nothing, dtype=nothing, var_name=nothing) - if tf.in_eager_mode() - temporary_variable_eager(; name=name, shape=shape, dtype=dtype, var_name=var_name) - else - temporary_variable_graph(; name=name, shape=shape, dtype=dtype, var_name=var_name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function temporary_variable(; name=nothing, shape=nothing, dtype=nothing, var_name=nothing) + if tf.in_eager_mode() + temporary_variable_eager(; name=name, shape=shape, dtype=dtype, var_name=var_name) + else + temporary_variable_graph(; name=name, shape=shape, dtype=dtype, var_name=var_name) + end end - end end @@ -52326,31 +52326,31 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function resource_apply_add_sign_graph(var_, m_, lr_, alpha_, sign_decay_, beta_, grad_; name=nothing, use_locking=nothing) - local desc - tf.with_op_name(name, "ResourceApplyAddSign") do - desc = tf.NodeDescription("ResourceApplyAddSign") - var_ = convert(Tensor{Any}, var_) - m_ = convert(Tensor{Any}, m_) - lr_ = convert(Tensor{Any}, lr_) - alpha_ = convert(Tensor{Any}, alpha_) - sign_decay_ = convert(Tensor{Any}, sign_decay_) - beta_ = convert(Tensor{Any}, beta_) - grad_ = convert(Tensor{Any}, grad_) - (lr_, alpha_, sign_decay_, beta_, grad_) = tf.tf_promote(lr_, alpha_, sign_decay_, beta_, grad_) - tf.add_input(desc, var_) - tf.add_input(desc, m_) - tf.add_input(desc, lr_) - tf.add_input(desc, alpha_) - tf.add_input(desc, sign_decay_) - tf.add_input(desc, beta_) - tf.add_input(desc, grad_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - end - tf.Tensor(tf.Operation(desc)) + function resource_apply_add_sign_graph(var_, m_, lr_, alpha_, sign_decay_, beta_, grad_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "ResourceApplyAddSign") do + desc = tf.NodeDescription("ResourceApplyAddSign") + var_ = convert(Tensor{Any}, var_) + m_ = convert(Tensor{Any}, m_) + lr_ = convert(Tensor{Any}, lr_) + alpha_ = convert(Tensor{Any}, alpha_) + sign_decay_ = convert(Tensor{Any}, sign_decay_) + beta_ = convert(Tensor{Any}, beta_) + grad_ = convert(Tensor{Any}, grad_) + (lr_, alpha_, sign_decay_, beta_, grad_) = tf.tf_promote(lr_, alpha_, sign_decay_, beta_, grad_) + tf.add_input(desc, var_) + tf.add_input(desc, m_) + tf.add_input(desc, lr_) + tf.add_input(desc, alpha_) + tf.add_input(desc, sign_decay_) + tf.add_input(desc, beta_) + tf.add_input(desc, grad_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end end + tf.Tensor(tf.Operation(desc)) + end function resource_apply_add_sign_eager(var_, m_, lr_, alpha_, sign_decay_, beta_, grad_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ResourceApplyAddSign") var_ = convert(tf.EagerTensor, var_) @@ -52382,13 +52382,13 @@ begin return res[1] end end - function resource_apply_add_sign(var_, m_, lr_, alpha_, sign_decay_, beta_, grad_; name=nothing, use_locking=nothing) - if tf.in_eager_mode() - resource_apply_add_sign_eager(var_, m_, lr_, alpha_, sign_decay_, beta_, grad_; name=name, use_locking=use_locking) - else - resource_apply_add_sign_graph(var_, m_, lr_, alpha_, sign_decay_, beta_, grad_; name=name, use_locking=use_locking) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_apply_add_sign(var_, m_, lr_, alpha_, sign_decay_, beta_, grad_; name=nothing, use_locking=nothing) + if tf.in_eager_mode() + resource_apply_add_sign_eager(var_, m_, lr_, alpha_, sign_decay_, beta_, grad_; name=name, use_locking=use_locking) + else + resource_apply_add_sign_graph(var_, m_, lr_, alpha_, sign_decay_, beta_, grad_; name=name, use_locking=use_locking) + end end - end end @@ -52398,22 +52398,22 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function roll_graph(input_, shift_, axis_; name=nothing) - local desc - tf.with_op_name(name, "Roll") do - desc = tf.NodeDescription("Roll") - input_ = convert(Tensor{Any}, input_) - shift_ = convert(Tensor{Any}, shift_) - axis_ = convert(Tensor{Any}, axis_) - (input_,) = tf.tf_promote(input_) - (shift_,) = tf.tf_promote(shift_) - (axis_,) = tf.tf_promote(axis_) - tf.add_input(desc, input_) - tf.add_input(desc, shift_) - tf.add_input(desc, axis_) - end - tf.Tensor(tf.Operation(desc)) + function roll_graph(input_, shift_, axis_; name=nothing) + local desc + tf.with_op_name(name, "Roll") do + desc = tf.NodeDescription("Roll") + input_ = convert(Tensor{Any}, input_) + shift_ = convert(Tensor{Any}, shift_) + axis_ = convert(Tensor{Any}, axis_) + (input_,) = tf.tf_promote(input_) + (shift_,) = tf.tf_promote(shift_) + (axis_,) = tf.tf_promote(axis_) + tf.add_input(desc, input_) + tf.add_input(desc, shift_) + tf.add_input(desc, axis_) end + tf.Tensor(tf.Operation(desc)) + end function roll_eager(input_, shift_, axis_; name=nothing) desc = tf.EagerOp("Roll") input_ = convert(tf.EagerTensor, input_) @@ -52432,13 +52432,13 @@ begin return res[1] end end - function roll(input_, shift_, axis_; name=nothing) - if tf.in_eager_mode() - roll_eager(input_, shift_, axis_; name=name) - else - roll_graph(input_, shift_, axis_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function roll(input_, shift_, axis_; name=nothing) + if tf.in_eager_mode() + roll_eager(input_, shift_, axis_; name=name) + else + roll_graph(input_, shift_, axis_; name=name) + end end - end end @@ -52448,18 +52448,18 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function xdivy_graph(x_, y_; name=nothing) - local desc - tf.with_op_name(name, "Xdivy") do - desc = tf.NodeDescription("Xdivy") - x_ = convert(Tensor{Any}, x_) - y_ = convert(Tensor{Any}, y_) - (x_, y_) = tf.tf_promote(x_, y_) - tf.add_input(desc, x_) - tf.add_input(desc, y_) - end - tf.Tensor(tf.Operation(desc)) + function xdivy_graph(x_, y_; name=nothing) + local desc + tf.with_op_name(name, "Xdivy") do + desc = tf.NodeDescription("Xdivy") + x_ = convert(Tensor{Any}, x_) + y_ = convert(Tensor{Any}, y_) + (x_, y_) = tf.tf_promote(x_, y_) + tf.add_input(desc, x_) + tf.add_input(desc, y_) end + tf.Tensor(tf.Operation(desc)) + end function xdivy_eager(x_, y_; name=nothing) desc = tf.EagerOp("Xdivy") x_ = convert(tf.EagerTensor, x_) @@ -52475,48 +52475,48 @@ begin return res[1] end end - function xdivy(x_, y_; name=nothing) - if tf.in_eager_mode() - xdivy_eager(x_, y_; name=name) - else - xdivy_graph(x_, y_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function xdivy(x_, y_; name=nothing) + if tf.in_eager_mode() + xdivy_eager(x_, y_; name=name) + else + xdivy_graph(x_, y_; name=name) + end end - end end """ - max_pool3d_grad_grad(orig_input, orig_output, grad; data_format=NDHWC) + max_pool3d_grad_grad(orig_input, orig_output, grad; data_format=) """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function max_pool3d_grad_grad_graph(orig_input_, orig_output_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) - local desc - tf.with_op_name(name, "MaxPool3DGradGrad") do - desc = tf.NodeDescription("MaxPool3DGradGrad") - orig_input_ = convert(Tensor{Any}, orig_input_) - orig_output_ = convert(Tensor{Any}, orig_output_) - grad_ = convert(Tensor{Any}, grad_) - (orig_input_, orig_output_, grad_) = tf.tf_promote(orig_input_, orig_output_, grad_) - tf.add_input(desc, orig_input_) - tf.add_input(desc, orig_output_) - tf.add_input(desc, grad_) - if ksize !== nothing - desc["ksize"] = map(Base.identity, ksize) - end - if strides !== nothing - desc["strides"] = map(Base.identity, strides) - end - if padding !== nothing - desc["padding"] = Base.String(padding) - end - if data_format !== nothing - desc["data_format"] = Base.String(data_format) - end + function max_pool3d_grad_grad_graph(orig_input_, orig_output_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + local desc + tf.with_op_name(name, "MaxPool3DGradGrad") do + desc = tf.NodeDescription("MaxPool3DGradGrad") + orig_input_ = convert(Tensor{Any}, orig_input_) + orig_output_ = convert(Tensor{Any}, orig_output_) + grad_ = convert(Tensor{Any}, grad_) + (orig_input_, orig_output_, grad_) = tf.tf_promote(orig_input_, orig_output_, grad_) + tf.add_input(desc, orig_input_) + tf.add_input(desc, orig_output_) + tf.add_input(desc, grad_) + if ksize !== nothing + desc["ksize"] = map(Base.identity, ksize) + end + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + if padding !== nothing + desc["padding"] = Base.String(padding) + end + if data_format !== nothing + desc["data_format"] = Base.String(data_format) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function max_pool3d_grad_grad_eager(orig_input_, orig_output_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) desc = tf.EagerOp("MaxPool3DGradGrad") orig_input_ = convert(tf.EagerTensor, orig_input_) @@ -52547,13 +52547,13 @@ begin return res[1] end end - function max_pool3d_grad_grad(orig_input_, orig_output_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) - if tf.in_eager_mode() - max_pool3d_grad_grad_eager(orig_input_, orig_output_, grad_; name=name, ksize=ksize, strides=strides, padding=padding, data_format=data_format) - else - max_pool3d_grad_grad_graph(orig_input_, orig_output_, grad_; name=name, ksize=ksize, strides=strides, padding=padding, data_format=data_format) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function max_pool3d_grad_grad(orig_input_, orig_output_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + if tf.in_eager_mode() + max_pool3d_grad_grad_eager(orig_input_, orig_output_, grad_; name=name, ksize=ksize, strides=strides, padding=padding, data_format=data_format) + else + max_pool3d_grad_grad_graph(orig_input_, orig_output_, grad_; name=name, ksize=ksize, strides=strides, padding=padding, data_format=data_format) + end end - end end @@ -52563,35 +52563,35 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function quantized_bias_add_graph(input_, bias_, min_input_, max_input_, min_bias_, max_bias_; name=nothing, out_type=nothing) - local desc - tf.with_op_name(name, "QuantizedBiasAdd") do - desc = tf.NodeDescription("QuantizedBiasAdd") - input_ = convert(Tensor{Any}, input_) - bias_ = convert(Tensor{Any}, bias_) - min_input_ = convert(Tensor{Float32}, min_input_) - max_input_ = convert(Tensor{Float32}, max_input_) - min_bias_ = convert(Tensor{Float32}, min_bias_) - max_bias_ = convert(Tensor{Float32}, max_bias_) - (input_,) = tf.tf_promote(input_) - (bias_,) = tf.tf_promote(bias_) - tf.add_input(desc, input_) - tf.add_input(desc, bias_) - tf.add_input(desc, min_input_) - tf.add_input(desc, max_input_) - tf.add_input(desc, min_bias_) - tf.add_input(desc, max_bias_) - if out_type !== nothing - desc["out_type"] = Base.identity(out_type) - end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:3 - push!(out, tf.Tensor(op, out_idx)) - end - out + function quantized_bias_add_graph(input_, bias_, min_input_, max_input_, min_bias_, max_bias_; name=nothing, out_type=nothing) + local desc + tf.with_op_name(name, "QuantizedBiasAdd") do + desc = tf.NodeDescription("QuantizedBiasAdd") + input_ = convert(Tensor{Any}, input_) + bias_ = convert(Tensor{Any}, bias_) + min_input_ = convert(Tensor{Float32}, min_input_) + max_input_ = convert(Tensor{Float32}, max_input_) + min_bias_ = convert(Tensor{Float32}, min_bias_) + max_bias_ = convert(Tensor{Float32}, max_bias_) + (input_,) = tf.tf_promote(input_) + (bias_,) = tf.tf_promote(bias_) + tf.add_input(desc, input_) + tf.add_input(desc, bias_) + tf.add_input(desc, min_input_) + tf.add_input(desc, max_input_) + tf.add_input(desc, min_bias_) + tf.add_input(desc, max_bias_) + if out_type !== nothing + desc["out_type"] = Base.identity(out_type) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) end + out + end function quantized_bias_add_eager(input_, bias_, min_input_, max_input_, min_bias_, max_bias_; name=nothing, out_type=nothing) desc = tf.EagerOp("QuantizedBiasAdd") input_ = convert(tf.EagerTensor, input_) @@ -52618,44 +52618,44 @@ begin return res end end - function quantized_bias_add(input_, bias_, min_input_, max_input_, min_bias_, max_bias_; name=nothing, out_type=nothing) - if tf.in_eager_mode() - quantized_bias_add_eager(input_, bias_, min_input_, max_input_, min_bias_, max_bias_; name=name, out_type=out_type) - else - quantized_bias_add_graph(input_, bias_, min_input_, max_input_, min_bias_, max_bias_; name=name, out_type=out_type) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function quantized_bias_add(input_, bias_, min_input_, max_input_, min_bias_, max_bias_; name=nothing, out_type=nothing) + if tf.in_eager_mode() + quantized_bias_add_eager(input_, bias_, min_input_, max_input_, min_bias_, max_bias_; name=name, out_type=out_type) + else + quantized_bias_add_graph(input_, bias_, min_input_, max_input_, min_bias_, max_bias_; name=name, out_type=out_type) + end end - end end """ - crop_and_resize(image, boxes, box_ind, crop_size; method=bilinear, extrapolation_value=?) + crop_and_resize(image, boxes, box_ind, crop_size; method=, extrapolation_value=?) """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function crop_and_resize_graph(image_, boxes_, box_ind_, crop_size_; name=nothing, method=nothing, extrapolation_value=nothing) - local desc - tf.with_op_name(name, "CropAndResize") do - desc = tf.NodeDescription("CropAndResize") - image_ = convert(Tensor{Any}, image_) - boxes_ = convert(Tensor{Float32}, boxes_) - box_ind_ = convert(Tensor{Int32}, box_ind_) - crop_size_ = convert(Tensor{Int32}, crop_size_) - (image_,) = tf.tf_promote(image_) - tf.add_input(desc, image_) - tf.add_input(desc, boxes_) - tf.add_input(desc, box_ind_) - tf.add_input(desc, crop_size_) - if method !== nothing - desc["method"] = Base.String(method) - end - if extrapolation_value !== nothing - desc["extrapolation_value"] = Base.identity(extrapolation_value) - end + function crop_and_resize_graph(image_, boxes_, box_ind_, crop_size_; name=nothing, method=nothing, extrapolation_value=nothing) + local desc + tf.with_op_name(name, "CropAndResize") do + desc = tf.NodeDescription("CropAndResize") + image_ = convert(Tensor{Any}, image_) + boxes_ = convert(Tensor{Float32}, boxes_) + box_ind_ = convert(Tensor{Int32}, box_ind_) + crop_size_ = convert(Tensor{Int32}, crop_size_) + (image_,) = tf.tf_promote(image_) + tf.add_input(desc, image_) + tf.add_input(desc, boxes_) + tf.add_input(desc, box_ind_) + tf.add_input(desc, crop_size_) + if method !== nothing + desc["method"] = Base.String(method) + end + if extrapolation_value !== nothing + desc["extrapolation_value"] = Base.identity(extrapolation_value) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function crop_and_resize_eager(image_, boxes_, box_ind_, crop_size_; name=nothing, method=nothing, extrapolation_value=nothing) desc = tf.EagerOp("CropAndResize") image_ = convert(tf.EagerTensor, image_) @@ -52680,13 +52680,13 @@ begin return res[1] end end - function crop_and_resize(image_, boxes_, box_ind_, crop_size_; name=nothing, method=nothing, extrapolation_value=nothing) - if tf.in_eager_mode() - crop_and_resize_eager(image_, boxes_, box_ind_, crop_size_; name=name, method=method, extrapolation_value=extrapolation_value) - else - crop_and_resize_graph(image_, boxes_, box_ind_, crop_size_; name=name, method=method, extrapolation_value=extrapolation_value) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function crop_and_resize(image_, boxes_, box_ind_, crop_size_; name=nothing, method=nothing, extrapolation_value=nothing) + if tf.in_eager_mode() + crop_and_resize_eager(image_, boxes_, box_ind_, crop_size_; name=name, method=method, extrapolation_value=extrapolation_value) + else + crop_and_resize_graph(image_, boxes_, box_ind_, crop_size_; name=name, method=method, extrapolation_value=extrapolation_value) + end end - end end @@ -52696,35 +52696,35 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function map_unstage_no_key_graph(indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) - local desc - tf.with_op_name(name, "MapUnstageNoKey") do - desc = tf.NodeDescription("MapUnstageNoKey") - indices_ = convert(Tensor{Int32}, indices_) - tf.add_input(desc, indices_) - if capacity !== nothing - desc["capacity"] = Base.Int(capacity) - end - if memory_limit !== nothing - desc["memory_limit"] = Base.Int(memory_limit) - end - if dtypes !== nothing - desc["dtypes"] = map(Base.identity, dtypes) - end - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:2 - push!(out, tf.Tensor(op, out_idx)) - end - out + function map_unstage_no_key_graph(indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "MapUnstageNoKey") do + desc = tf.NodeDescription("MapUnstageNoKey") + indices_ = convert(Tensor{Int32}, indices_) + tf.add_input(desc, indices_) + if capacity !== nothing + desc["capacity"] = Base.Int(capacity) + end + if memory_limit !== nothing + desc["memory_limit"] = Base.Int(memory_limit) + end + if dtypes !== nothing + desc["dtypes"] = map(Base.identity, dtypes) + end + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) end + out + end function map_unstage_no_key_eager(indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) desc = tf.EagerOp("MapUnstageNoKey") indices_ = convert(tf.EagerTensor, indices_) @@ -52751,13 +52751,13 @@ begin return res end end - function map_unstage_no_key(indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) - if tf.in_eager_mode() - map_unstage_no_key_eager(indices_; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) - else - map_unstage_no_key_graph(indices_; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function map_unstage_no_key(indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + if tf.in_eager_mode() + map_unstage_no_key_eager(indices_; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) + else + map_unstage_no_key_graph(indices_; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) + end end - end end @@ -52767,25 +52767,25 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function scatter_nd_sub_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) - local desc - tf.with_op_name(name, "ScatterNdSub") do - desc = tf.NodeDescription("ScatterNdSub") - ref_ = convert(Tensor{Any}, ref_) - indices_ = convert(Tensor{Any}, indices_) - indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) - updates_ = convert(Tensor{Any}, updates_) - (ref_, updates_) = tf.tf_promote(ref_, updates_) - (indices_,) = tf.tf_promote(indices_) - tf.add_input(desc, ref_) - tf.add_input(desc, indices_) - tf.add_input(desc, updates_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end + function scatter_nd_sub_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "ScatterNdSub") do + desc = tf.NodeDescription("ScatterNdSub") + ref_ = convert(Tensor{Any}, ref_) + indices_ = convert(Tensor{Any}, indices_) + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + updates_ = convert(Tensor{Any}, updates_) + (ref_, updates_) = tf.tf_promote(ref_, updates_) + (indices_,) = tf.tf_promote(indices_) + tf.add_input(desc, ref_) + tf.add_input(desc, indices_) + tf.add_input(desc, updates_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function scatter_nd_sub_eager(ref_, indices_, updates_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ScatterNdSub") ref_ = convert(tf.EagerTensor, ref_) @@ -52807,13 +52807,13 @@ begin return res[1] end end - function scatter_nd_sub(ref_, indices_, updates_; name=nothing, use_locking=nothing) - if tf.in_eager_mode() - scatter_nd_sub_eager(ref_, indices_, updates_; name=name, use_locking=use_locking) - else - scatter_nd_sub_graph(ref_, indices_, updates_; name=name, use_locking=use_locking) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function scatter_nd_sub(ref_, indices_, updates_; name=nothing, use_locking=nothing) + if tf.in_eager_mode() + scatter_nd_sub_eager(ref_, indices_, updates_; name=name, use_locking=use_locking) + else + scatter_nd_sub_graph(ref_, indices_, updates_; name=name, use_locking=use_locking) + end end - end end @@ -52823,21 +52823,21 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function resize_bilinear_graph(images_, size_; name=nothing, align_corners=nothing) - local desc - tf.with_op_name(name, "ResizeBilinear") do - desc = tf.NodeDescription("ResizeBilinear") - images_ = convert(Tensor{Any}, images_) - size_ = convert(Tensor{Int32}, size_) - (images_,) = tf.tf_promote(images_) - tf.add_input(desc, images_) - tf.add_input(desc, size_) - if align_corners !== nothing - desc["align_corners"] = Base.Bool(align_corners) - end + function resize_bilinear_graph(images_, size_; name=nothing, align_corners=nothing) + local desc + tf.with_op_name(name, "ResizeBilinear") do + desc = tf.NodeDescription("ResizeBilinear") + images_ = convert(Tensor{Any}, images_) + size_ = convert(Tensor{Int32}, size_) + (images_,) = tf.tf_promote(images_) + tf.add_input(desc, images_) + tf.add_input(desc, size_) + if align_corners !== nothing + desc["align_corners"] = Base.Bool(align_corners) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function resize_bilinear_eager(images_, size_; name=nothing, align_corners=nothing) desc = tf.EagerOp("ResizeBilinear") images_ = convert(tf.EagerTensor, images_) @@ -52855,13 +52855,13 @@ begin return res[1] end end - function resize_bilinear(images_, size_; name=nothing, align_corners=nothing) - if tf.in_eager_mode() - resize_bilinear_eager(images_, size_; name=name, align_corners=align_corners) - else - resize_bilinear_graph(images_, size_; name=name, align_corners=align_corners) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resize_bilinear(images_, size_; name=nothing, align_corners=nothing) + if tf.in_eager_mode() + resize_bilinear_eager(images_, size_; name=name, align_corners=align_corners) + else + resize_bilinear_graph(images_, size_; name=name, align_corners=align_corners) + end end - end end @@ -52871,32 +52871,32 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function ordered_map_peek_graph(key_, indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) - local desc - tf.with_op_name(name, "OrderedMapPeek") do - desc = tf.NodeDescription("OrderedMapPeek") - key_ = convert(Tensor{Int64}, key_) - indices_ = convert(Tensor{Int32}, indices_) - tf.add_input(desc, key_) - tf.add_input(desc, indices_) - if capacity !== nothing - desc["capacity"] = Base.Int(capacity) - end - if memory_limit !== nothing - desc["memory_limit"] = Base.Int(memory_limit) - end - if dtypes !== nothing - desc["dtypes"] = map(Base.identity, dtypes) - end - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - end - tf.Tensor(tf.Operation(desc)) + function ordered_map_peek_graph(key_, indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "OrderedMapPeek") do + desc = tf.NodeDescription("OrderedMapPeek") + key_ = convert(Tensor{Int64}, key_) + indices_ = convert(Tensor{Int32}, indices_) + tf.add_input(desc, key_) + tf.add_input(desc, indices_) + if capacity !== nothing + desc["capacity"] = Base.Int(capacity) + end + if memory_limit !== nothing + desc["memory_limit"] = Base.Int(memory_limit) + end + if dtypes !== nothing + desc["dtypes"] = map(Base.identity, dtypes) + end + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end end + tf.Tensor(tf.Operation(desc)) + end function ordered_map_peek_eager(key_, indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) desc = tf.EagerOp("OrderedMapPeek") key_ = convert(tf.EagerTensor, key_) @@ -52925,13 +52925,13 @@ begin return res[1] end end - function ordered_map_peek(key_, indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) - if tf.in_eager_mode() - ordered_map_peek_eager(key_, indices_; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) - else - ordered_map_peek_graph(key_, indices_; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function ordered_map_peek(key_, indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + if tf.in_eager_mode() + ordered_map_peek_eager(key_, indices_; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) + else + ordered_map_peek_graph(key_, indices_; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) + end end - end end @@ -52941,30 +52941,30 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tensor_array_graph(size_; name=nothing, dtype=nothing, dynamic_size=nothing, clear_after_read=nothing, tensor_array_name=nothing, element_shape=nothing) - local desc - tf.with_op_name(name, "TensorArray") do - desc = tf.NodeDescription("TensorArray") - size_ = convert(Tensor{Int32}, size_) - tf.add_input(desc, size_) - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end - if dynamic_size !== nothing - desc["dynamic_size"] = Base.Bool(dynamic_size) - end - if clear_after_read !== nothing - desc["clear_after_read"] = Base.Bool(clear_after_read) - end - if tensor_array_name !== nothing - desc["tensor_array_name"] = Base.String(tensor_array_name) - end - if element_shape !== nothing - desc["element_shape"] = Base.identity(element_shape) - end - end - tf.Tensor(tf.Operation(desc)) + function tensor_array_graph(size_; name=nothing, dtype=nothing, dynamic_size=nothing, clear_after_read=nothing, tensor_array_name=nothing, element_shape=nothing) + local desc + tf.with_op_name(name, "TensorArray") do + desc = tf.NodeDescription("TensorArray") + size_ = convert(Tensor{Int32}, size_) + tf.add_input(desc, size_) + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + if dynamic_size !== nothing + desc["dynamic_size"] = Base.Bool(dynamic_size) + end + if clear_after_read !== nothing + desc["clear_after_read"] = Base.Bool(clear_after_read) + end + if tensor_array_name !== nothing + desc["tensor_array_name"] = Base.String(tensor_array_name) + end + if element_shape !== nothing + desc["element_shape"] = Base.identity(element_shape) + end end + tf.Tensor(tf.Operation(desc)) + end function tensor_array_eager(size_; name=nothing, dtype=nothing, dynamic_size=nothing, clear_after_read=nothing, tensor_array_name=nothing, element_shape=nothing) desc = tf.EagerOp("TensorArray") size_ = convert(tf.EagerTensor, size_) @@ -52991,13 +52991,13 @@ begin return res[1] end end - function tensor_array(size_; name=nothing, dtype=nothing, dynamic_size=nothing, clear_after_read=nothing, tensor_array_name=nothing, element_shape=nothing) - if tf.in_eager_mode() - tensor_array_eager(size_; name=name, dtype=dtype, dynamic_size=dynamic_size, clear_after_read=clear_after_read, tensor_array_name=tensor_array_name, element_shape=element_shape) - else - tensor_array_graph(size_; name=name, dtype=dtype, dynamic_size=dynamic_size, clear_after_read=clear_after_read, tensor_array_name=tensor_array_name, element_shape=element_shape) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_array(size_; name=nothing, dtype=nothing, dynamic_size=nothing, clear_after_read=nothing, tensor_array_name=nothing, element_shape=nothing) + if tf.in_eager_mode() + tensor_array_eager(size_; name=name, dtype=dtype, dynamic_size=dynamic_size, clear_after_read=clear_after_read, tensor_array_name=tensor_array_name, element_shape=element_shape) + else + tensor_array_graph(size_; name=name, dtype=dtype, dynamic_size=dynamic_size, clear_after_read=clear_after_read, tensor_array_name=tensor_array_name, element_shape=element_shape) + end end - end end @@ -53007,20 +53007,20 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function inplace_sub_graph(x_, i_, v_; name=nothing) - local desc - tf.with_op_name(name, "InplaceSub") do - desc = tf.NodeDescription("InplaceSub") - x_ = convert(Tensor{Any}, x_) - i_ = convert(Tensor{Int32}, i_) - v_ = convert(Tensor{Any}, v_) - (x_, v_) = tf.tf_promote(x_, v_) - tf.add_input(desc, x_) - tf.add_input(desc, i_) - tf.add_input(desc, v_) - end - tf.Tensor(tf.Operation(desc)) + function inplace_sub_graph(x_, i_, v_; name=nothing) + local desc + tf.with_op_name(name, "InplaceSub") do + desc = tf.NodeDescription("InplaceSub") + x_ = convert(Tensor{Any}, x_) + i_ = convert(Tensor{Int32}, i_) + v_ = convert(Tensor{Any}, v_) + (x_, v_) = tf.tf_promote(x_, v_) + tf.add_input(desc, x_) + tf.add_input(desc, i_) + tf.add_input(desc, v_) end + tf.Tensor(tf.Operation(desc)) + end function inplace_sub_eager(x_, i_, v_; name=nothing) desc = tf.EagerOp("InplaceSub") x_ = convert(tf.EagerTensor, x_) @@ -53038,13 +53038,13 @@ begin return res[1] end end - function inplace_sub(x_, i_, v_; name=nothing) - if tf.in_eager_mode() - inplace_sub_eager(x_, i_, v_; name=name) - else - inplace_sub_graph(x_, i_, v_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function inplace_sub(x_, i_, v_; name=nothing) + if tf.in_eager_mode() + inplace_sub_eager(x_, i_, v_; name=name) + else + inplace_sub_graph(x_, i_, v_; name=name) + end end - end end @@ -53054,18 +53054,18 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function pow_graph(x_, y_; name=nothing) - local desc - tf.with_op_name(name, "Pow") do - desc = tf.NodeDescription("Pow") - x_ = convert(Tensor{Any}, x_) - y_ = convert(Tensor{Any}, y_) - (x_, y_) = tf.tf_promote(x_, y_) - tf.add_input(desc, x_) - tf.add_input(desc, y_) - end - tf.Tensor(tf.Operation(desc)) + function pow_graph(x_, y_; name=nothing) + local desc + tf.with_op_name(name, "Pow") do + desc = tf.NodeDescription("Pow") + x_ = convert(Tensor{Any}, x_) + y_ = convert(Tensor{Any}, y_) + (x_, y_) = tf.tf_promote(x_, y_) + tf.add_input(desc, x_) + tf.add_input(desc, y_) end + tf.Tensor(tf.Operation(desc)) + end function pow_eager(x_, y_; name=nothing) desc = tf.EagerOp("Pow") x_ = convert(tf.EagerTensor, x_) @@ -53081,13 +53081,13 @@ begin return res[1] end end - function pow(x_, y_; name=nothing) - if tf.in_eager_mode() - pow_eager(x_, y_; name=name) - else - pow_graph(x_, y_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function pow(x_, y_; name=nothing) + if tf.in_eager_mode() + pow_eager(x_, y_; name=name) + else + pow_graph(x_, y_; name=name) + end end - end end @@ -53097,16 +53097,16 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function ref_next_iteration_graph(data_; name=nothing) - local desc - tf.with_op_name(name, "RefNextIteration") do - desc = tf.NodeDescription("RefNextIteration") - data_ = convert(Tensor{Any}, data_) - (data_,) = tf.tf_promote(data_) - tf.add_input(desc, data_) - end - tf.Tensor(tf.Operation(desc)) + function ref_next_iteration_graph(data_; name=nothing) + local desc + tf.with_op_name(name, "RefNextIteration") do + desc = tf.NodeDescription("RefNextIteration") + data_ = convert(Tensor{Any}, data_) + (data_,) = tf.tf_promote(data_) + tf.add_input(desc, data_) end + tf.Tensor(tf.Operation(desc)) + end function ref_next_iteration_eager(data_; name=nothing) desc = tf.EagerOp("RefNextIteration") data_ = convert(tf.EagerTensor, data_) @@ -53119,13 +53119,13 @@ begin return res[1] end end - function ref_next_iteration(data_; name=nothing) - if tf.in_eager_mode() - ref_next_iteration_eager(data_; name=name) - else - ref_next_iteration_graph(data_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function ref_next_iteration(data_; name=nothing) + if tf.in_eager_mode() + ref_next_iteration_eager(data_; name=name) + else + ref_next_iteration_graph(data_; name=name) + end end - end end @@ -53135,18 +53135,18 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function scalar_summary_graph(tags_, values_; name=nothing) - local desc - tf.with_op_name(name, "ScalarSummary") do - desc = tf.NodeDescription("ScalarSummary") - tags_ = convert(Tensor{String}, tags_) - values_ = convert(Tensor{Any}, values_) - (values_,) = tf.tf_promote(values_) - tf.add_input(desc, tags_) - tf.add_input(desc, values_) - end - tf.Tensor(tf.Operation(desc)) + function scalar_summary_graph(tags_, values_; name=nothing) + local desc + tf.with_op_name(name, "ScalarSummary") do + desc = tf.NodeDescription("ScalarSummary") + tags_ = convert(Tensor{String}, tags_) + values_ = convert(Tensor{Any}, values_) + (values_,) = tf.tf_promote(values_) + tf.add_input(desc, tags_) + tf.add_input(desc, values_) end + tf.Tensor(tf.Operation(desc)) + end function scalar_summary_eager(tags_, values_; name=nothing) desc = tf.EagerOp("ScalarSummary") tags_ = convert(tf.EagerTensor, tags_) @@ -53161,13 +53161,13 @@ begin return res[1] end end - function scalar_summary(tags_, values_; name=nothing) - if tf.in_eager_mode() - scalar_summary_eager(tags_, values_; name=name) - else - scalar_summary_graph(tags_, values_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function scalar_summary(tags_, values_; name=nothing) + if tf.in_eager_mode() + scalar_summary_eager(tags_, values_; name=name) + else + scalar_summary_graph(tags_, values_; name=name) + end end - end end @@ -53177,25 +53177,25 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function string_split_v2_graph(input_, sep_; name=nothing, maxsplit=nothing) - local desc - tf.with_op_name(name, "StringSplitV2") do - desc = tf.NodeDescription("StringSplitV2") - input_ = convert(Tensor{String}, input_) - sep_ = convert(Tensor{String}, sep_) - tf.add_input(desc, input_) - tf.add_input(desc, sep_) - if maxsplit !== nothing - desc["maxsplit"] = Base.Int(maxsplit) - end + function string_split_v2_graph(input_, sep_; name=nothing, maxsplit=nothing) + local desc + tf.with_op_name(name, "StringSplitV2") do + desc = tf.NodeDescription("StringSplitV2") + input_ = convert(Tensor{String}, input_) + sep_ = convert(Tensor{String}, sep_) + tf.add_input(desc, input_) + tf.add_input(desc, sep_) + if maxsplit !== nothing + desc["maxsplit"] = Base.Int(maxsplit) end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:3 - push!(out, tf.Tensor(op, out_idx)) - end - out end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end function string_split_v2_eager(input_, sep_; name=nothing, maxsplit=nothing) desc = tf.EagerOp("StringSplitV2") input_ = convert(tf.EagerTensor, input_) @@ -53212,13 +53212,13 @@ begin return res end end - function string_split_v2(input_, sep_; name=nothing, maxsplit=nothing) - if tf.in_eager_mode() - string_split_v2_eager(input_, sep_; name=name, maxsplit=maxsplit) - else - string_split_v2_graph(input_, sep_; name=name, maxsplit=maxsplit) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function string_split_v2(input_, sep_; name=nothing, maxsplit=nothing) + if tf.in_eager_mode() + string_split_v2_eager(input_, sep_; name=name, maxsplit=maxsplit) + else + string_split_v2_graph(input_, sep_; name=name, maxsplit=maxsplit) + end end - end end @@ -53228,16 +53228,16 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function bessel_i0e_graph(x_; name=nothing) - local desc - tf.with_op_name(name, "BesselI0e") do - desc = tf.NodeDescription("BesselI0e") - x_ = convert(Tensor{Any}, x_) - (x_,) = tf.tf_promote(x_) - tf.add_input(desc, x_) - end - tf.Tensor(tf.Operation(desc)) + function bessel_i0e_graph(x_; name=nothing) + local desc + tf.with_op_name(name, "BesselI0e") do + desc = tf.NodeDescription("BesselI0e") + x_ = convert(Tensor{Any}, x_) + (x_,) = tf.tf_promote(x_) + tf.add_input(desc, x_) end + tf.Tensor(tf.Operation(desc)) + end function bessel_i0e_eager(x_; name=nothing) desc = tf.EagerOp("BesselI0e") x_ = convert(tf.EagerTensor, x_) @@ -53250,13 +53250,13 @@ begin return res[1] end end - function bessel_i0e(x_; name=nothing) - if tf.in_eager_mode() - bessel_i0e_eager(x_; name=name) - else - bessel_i0e_graph(x_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function bessel_i0e(x_; name=nothing) + if tf.in_eager_mode() + bessel_i0e_eager(x_; name=name) + else + bessel_i0e_graph(x_; name=name) + end end - end end @@ -53266,24 +53266,24 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function unique_graph(x_; name=nothing, out_idx=nothing) - local desc - tf.with_op_name(name, "Unique") do - desc = tf.NodeDescription("Unique") - x_ = convert(Tensor{Any}, x_) - (x_,) = tf.tf_promote(x_) - tf.add_input(desc, x_) - if out_idx !== nothing - desc["out_idx"] = Base.identity(out_idx) - end + function unique_graph(x_; name=nothing, out_idx=nothing) + local desc + tf.with_op_name(name, "Unique") do + desc = tf.NodeDescription("Unique") + x_ = convert(Tensor{Any}, x_) + (x_,) = tf.tf_promote(x_) + tf.add_input(desc, x_) + if out_idx !== nothing + desc["out_idx"] = Base.identity(out_idx) end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:2 - push!(out, tf.Tensor(op, out_idx)) - end - out end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end function unique_eager(x_; name=nothing, out_idx=nothing) desc = tf.EagerOp("Unique") x_ = convert(tf.EagerTensor, x_) @@ -53299,13 +53299,13 @@ begin return res end end - function unique(x_; name=nothing, out_idx=nothing) - if tf.in_eager_mode() - unique_eager(x_; name=name, out_idx=out_idx) - else - unique_graph(x_; name=name, out_idx=out_idx) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function unique(x_; name=nothing, out_idx=nothing) + if tf.in_eager_mode() + unique_eager(x_; name=name, out_idx=out_idx) + else + unique_graph(x_; name=name, out_idx=out_idx) + end end - end end @@ -53315,16 +53315,16 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function next_iteration_graph(data_; name=nothing) - local desc - tf.with_op_name(name, "NextIteration") do - desc = tf.NodeDescription("NextIteration") - data_ = convert(Tensor{Any}, data_) - (data_,) = tf.tf_promote(data_) - tf.add_input(desc, data_) - end - tf.Tensor(tf.Operation(desc)) + function next_iteration_graph(data_; name=nothing) + local desc + tf.with_op_name(name, "NextIteration") do + desc = tf.NodeDescription("NextIteration") + data_ = convert(Tensor{Any}, data_) + (data_,) = tf.tf_promote(data_) + tf.add_input(desc, data_) end + tf.Tensor(tf.Operation(desc)) + end function next_iteration_eager(data_; name=nothing) desc = tf.EagerOp("NextIteration") data_ = convert(tf.EagerTensor, data_) @@ -53337,13 +53337,13 @@ begin return res[1] end end - function next_iteration(data_; name=nothing) - if tf.in_eager_mode() - next_iteration_eager(data_; name=name) - else - next_iteration_graph(data_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function next_iteration(data_; name=nothing) + if tf.in_eager_mode() + next_iteration_eager(data_; name=name) + else + next_iteration_graph(data_; name=name) + end end - end end @@ -53353,31 +53353,31 @@ end Load embedding parameters for a single table. """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function load_tpu_embedding_rms_prop_parameters_graph(parameters_, ms_, mom_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - local desc - tf.with_op_name(name, "LoadTPUEmbeddingRMSPropParameters") do - desc = tf.NodeDescription("LoadTPUEmbeddingRMSPropParameters") - parameters_ = convert(Tensor{Float32}, parameters_) - ms_ = convert(Tensor{Float32}, ms_) - mom_ = convert(Tensor{Float32}, mom_) - tf.add_input(desc, parameters_) - tf.add_input(desc, ms_) - tf.add_input(desc, mom_) - if table_id !== nothing - desc["table_id"] = Base.Int(table_id) - end - if table_name !== nothing - desc["table_name"] = Base.String(table_name) - end - if num_shards !== nothing - desc["num_shards"] = Base.Int(num_shards) - end - if shard_id !== nothing - desc["shard_id"] = Base.Int(shard_id) - end - end - tf.Tensor(tf.Operation(desc)) + function load_tpu_embedding_rms_prop_parameters_graph(parameters_, ms_, mom_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + local desc + tf.with_op_name(name, "LoadTPUEmbeddingRMSPropParameters") do + desc = tf.NodeDescription("LoadTPUEmbeddingRMSPropParameters") + parameters_ = convert(Tensor{Float32}, parameters_) + ms_ = convert(Tensor{Float32}, ms_) + mom_ = convert(Tensor{Float32}, mom_) + tf.add_input(desc, parameters_) + tf.add_input(desc, ms_) + tf.add_input(desc, mom_) + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end end + tf.Tensor(tf.Operation(desc)) + end function load_tpu_embedding_rms_prop_parameters_eager(parameters_, ms_, mom_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) desc = tf.EagerOp("LoadTPUEmbeddingRMSPropParameters") parameters_ = convert(tf.EagerTensor, parameters_) @@ -53405,13 +53405,13 @@ begin return res[1] end end - function load_tpu_embedding_rms_prop_parameters(parameters_, ms_, mom_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - if tf.in_eager_mode() - load_tpu_embedding_rms_prop_parameters_eager(parameters_, ms_, mom_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) - else - load_tpu_embedding_rms_prop_parameters_graph(parameters_, ms_, mom_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function load_tpu_embedding_rms_prop_parameters(parameters_, ms_, mom_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + if tf.in_eager_mode() + load_tpu_embedding_rms_prop_parameters_eager(parameters_, ms_, mom_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + else + load_tpu_embedding_rms_prop_parameters_graph(parameters_, ms_, mom_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + end end - end end @@ -53421,24 +53421,24 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function eager_py_func_graph(input_; name=nothing, token=nothing, Tin=nothing, Tout=nothing) - local desc - tf.with_op_name(name, "EagerPyFunc") do - desc = tf.NodeDescription("EagerPyFunc") - input_ = [convert(Tensor{Any}, x) for x = input_] - tf.add_input(desc, input_) - if token !== nothing - desc["token"] = Base.String(token) - end - if Tin !== nothing - desc["Tin"] = map(Base.identity, Tin) - end - if Tout !== nothing - desc["Tout"] = map(Base.identity, Tout) - end + function eager_py_func_graph(input_; name=nothing, token=nothing, Tin=nothing, Tout=nothing) + local desc + tf.with_op_name(name, "EagerPyFunc") do + desc = tf.NodeDescription("EagerPyFunc") + input_ = [convert(Tensor{Any}, x) for x = input_] + tf.add_input(desc, input_) + if token !== nothing + desc["token"] = Base.String(token) + end + if Tin !== nothing + desc["Tin"] = map(Base.identity, Tin) + end + if Tout !== nothing + desc["Tout"] = map(Base.identity, Tout) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function eager_py_func_eager(input_; name=nothing, token=nothing, Tin=nothing, Tout=nothing) desc = tf.EagerOp("EagerPyFunc") input_ = convert(tf.EagerTensor, input_) @@ -53459,13 +53459,13 @@ begin return res[1] end end - function eager_py_func(input_; name=nothing, token=nothing, Tin=nothing, Tout=nothing) - if tf.in_eager_mode() - eager_py_func_eager(input_; name=name, token=token, Tin=Tin, Tout=Tout) - else - eager_py_func_graph(input_; name=name, token=token, Tin=Tin, Tout=Tout) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function eager_py_func(input_; name=nothing, token=nothing, Tin=nothing, Tout=nothing) + if tf.in_eager_mode() + eager_py_func_eager(input_; name=name, token=token, Tin=Tin, Tout=Tout) + else + eager_py_func_graph(input_; name=name, token=token, Tin=Tin, Tout=Tout) + end end - end end @@ -53475,19 +53475,19 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function whole_file_reader_v2_graph(; name=nothing, container=nothing, shared_name=nothing) - local desc - tf.with_op_name(name, "WholeFileReaderV2") do - desc = tf.NodeDescription("WholeFileReaderV2") - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end + function whole_file_reader_v2_graph(; name=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "WholeFileReaderV2") do + desc = tf.NodeDescription("WholeFileReaderV2") + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function whole_file_reader_v2_eager(; name=nothing, container=nothing, shared_name=nothing) desc = tf.EagerOp("WholeFileReaderV2") if container !== nothing @@ -53503,13 +53503,13 @@ begin return res[1] end end - function whole_file_reader_v2(; name=nothing, container=nothing, shared_name=nothing) - if tf.in_eager_mode() - whole_file_reader_v2_eager(; name=name, container=container, shared_name=shared_name) - else - whole_file_reader_v2_graph(; name=name, container=container, shared_name=shared_name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function whole_file_reader_v2(; name=nothing, container=nothing, shared_name=nothing) + if tf.in_eager_mode() + whole_file_reader_v2_eager(; name=name, container=container, shared_name=shared_name) + else + whole_file_reader_v2_graph(; name=name, container=container, shared_name=shared_name) + end end - end end @@ -53519,22 +53519,22 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tensor_scatter_sub_graph(tensor_, indices_, updates_; name=nothing) - local desc - tf.with_op_name(name, "TensorScatterSub") do - desc = tf.NodeDescription("TensorScatterSub") - tensor_ = convert(Tensor{Any}, tensor_) - indices_ = convert(Tensor{Any}, indices_) - indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) - updates_ = convert(Tensor{Any}, updates_) - (tensor_, updates_) = tf.tf_promote(tensor_, updates_) - (indices_,) = tf.tf_promote(indices_) - tf.add_input(desc, tensor_) - tf.add_input(desc, indices_) - tf.add_input(desc, updates_) - end - tf.Tensor(tf.Operation(desc)) + function tensor_scatter_sub_graph(tensor_, indices_, updates_; name=nothing) + local desc + tf.with_op_name(name, "TensorScatterSub") do + desc = tf.NodeDescription("TensorScatterSub") + tensor_ = convert(Tensor{Any}, tensor_) + indices_ = convert(Tensor{Any}, indices_) + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + updates_ = convert(Tensor{Any}, updates_) + (tensor_, updates_) = tf.tf_promote(tensor_, updates_) + (indices_,) = tf.tf_promote(indices_) + tf.add_input(desc, tensor_) + tf.add_input(desc, indices_) + tf.add_input(desc, updates_) end + tf.Tensor(tf.Operation(desc)) + end function tensor_scatter_sub_eager(tensor_, indices_, updates_; name=nothing) desc = tf.EagerOp("TensorScatterSub") tensor_ = convert(tf.EagerTensor, tensor_) @@ -53553,13 +53553,13 @@ begin return res[1] end end - function tensor_scatter_sub(tensor_, indices_, updates_; name=nothing) - if tf.in_eager_mode() - tensor_scatter_sub_eager(tensor_, indices_, updates_; name=name) - else - tensor_scatter_sub_graph(tensor_, indices_, updates_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_scatter_sub(tensor_, indices_, updates_; name=nothing) + if tf.in_eager_mode() + tensor_scatter_sub_eager(tensor_, indices_, updates_; name=name) + else + tensor_scatter_sub_graph(tensor_, indices_, updates_; name=name) + end end - end end @@ -53569,25 +53569,25 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function scatter_max_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) - local desc - tf.with_op_name(name, "ScatterMax") do - desc = tf.NodeDescription("ScatterMax") - ref_ = convert(Tensor{Any}, ref_) - indices_ = convert(Tensor{Any}, indices_) - indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) - updates_ = convert(Tensor{Any}, updates_) - (ref_, updates_) = tf.tf_promote(ref_, updates_) - (indices_,) = tf.tf_promote(indices_) - tf.add_input(desc, ref_) - tf.add_input(desc, indices_) - tf.add_input(desc, updates_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end + function scatter_max_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "ScatterMax") do + desc = tf.NodeDescription("ScatterMax") + ref_ = convert(Tensor{Any}, ref_) + indices_ = convert(Tensor{Any}, indices_) + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + updates_ = convert(Tensor{Any}, updates_) + (ref_, updates_) = tf.tf_promote(ref_, updates_) + (indices_,) = tf.tf_promote(indices_) + tf.add_input(desc, ref_) + tf.add_input(desc, indices_) + tf.add_input(desc, updates_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function scatter_max_eager(ref_, indices_, updates_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ScatterMax") ref_ = convert(tf.EagerTensor, ref_) @@ -53609,13 +53609,13 @@ begin return res[1] end end - function scatter_max(ref_, indices_, updates_; name=nothing, use_locking=nothing) - if tf.in_eager_mode() - scatter_max_eager(ref_, indices_, updates_; name=name, use_locking=use_locking) - else - scatter_max_graph(ref_, indices_, updates_; name=name, use_locking=use_locking) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function scatter_max(ref_, indices_, updates_; name=nothing, use_locking=nothing) + if tf.in_eager_mode() + scatter_max_eager(ref_, indices_, updates_; name=name, use_locking=use_locking) + else + scatter_max_graph(ref_, indices_, updates_; name=name, use_locking=use_locking) + end end - end end @@ -53625,16 +53625,16 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function sqrt_graph(x_; name=nothing) - local desc - tf.with_op_name(name, "Sqrt") do - desc = tf.NodeDescription("Sqrt") - x_ = convert(Tensor{Any}, x_) - (x_,) = tf.tf_promote(x_) - tf.add_input(desc, x_) - end - tf.Tensor(tf.Operation(desc)) + function sqrt_graph(x_; name=nothing) + local desc + tf.with_op_name(name, "Sqrt") do + desc = tf.NodeDescription("Sqrt") + x_ = convert(Tensor{Any}, x_) + (x_,) = tf.tf_promote(x_) + tf.add_input(desc, x_) end + tf.Tensor(tf.Operation(desc)) + end function sqrt_eager(x_; name=nothing) desc = tf.EagerOp("Sqrt") x_ = convert(tf.EagerTensor, x_) @@ -53647,13 +53647,13 @@ begin return res[1] end end - function sqrt(x_; name=nothing) - if tf.in_eager_mode() - sqrt_eager(x_; name=name) - else - sqrt_graph(x_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sqrt(x_; name=nothing) + if tf.in_eager_mode() + sqrt_eager(x_; name=name) + else + sqrt_graph(x_; name=name) + end end - end end @@ -53663,20 +53663,20 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function accumulator_take_gradient_graph(handle_, num_required_; name=nothing, dtype=nothing) - local desc - tf.with_op_name(name, "AccumulatorTakeGradient") do - desc = tf.NodeDescription("AccumulatorTakeGradient") - handle_ = convert(Tensor{String}, handle_) - num_required_ = convert(Tensor{Int32}, num_required_) - tf.add_input(desc, handle_) - tf.add_input(desc, num_required_) - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end + function accumulator_take_gradient_graph(handle_, num_required_; name=nothing, dtype=nothing) + local desc + tf.with_op_name(name, "AccumulatorTakeGradient") do + desc = tf.NodeDescription("AccumulatorTakeGradient") + handle_ = convert(Tensor{String}, handle_) + num_required_ = convert(Tensor{Int32}, num_required_) + tf.add_input(desc, handle_) + tf.add_input(desc, num_required_) + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function accumulator_take_gradient_eager(handle_, num_required_; name=nothing, dtype=nothing) desc = tf.EagerOp("AccumulatorTakeGradient") handle_ = convert(tf.EagerTensor, handle_) @@ -53693,13 +53693,13 @@ begin return res[1] end end - function accumulator_take_gradient(handle_, num_required_; name=nothing, dtype=nothing) - if tf.in_eager_mode() - accumulator_take_gradient_eager(handle_, num_required_; name=name, dtype=dtype) - else - accumulator_take_gradient_graph(handle_, num_required_; name=name, dtype=dtype) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function accumulator_take_gradient(handle_, num_required_; name=nothing, dtype=nothing) + if tf.in_eager_mode() + accumulator_take_gradient_eager(handle_, num_required_; name=name, dtype=dtype) + else + accumulator_take_gradient_graph(handle_, num_required_; name=name, dtype=dtype) + end end - end end @@ -53709,27 +53709,27 @@ end Returns x + y element-wise. """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function _mkl_add_graph(x_, y_, mkl_x_, mkl_y_; name=nothing) - local desc - tf.with_op_name(name, "_MklAdd") do - desc = tf.NodeDescription("_MklAdd") - x_ = convert(Tensor{Any}, x_) - y_ = convert(Tensor{Any}, y_) - mkl_x_ = convert(Tensor{UInt8}, mkl_x_) - mkl_y_ = convert(Tensor{UInt8}, mkl_y_) - (x_, y_) = tf.tf_promote(x_, y_) - tf.add_input(desc, x_) - tf.add_input(desc, y_) - tf.add_input(desc, mkl_x_) - tf.add_input(desc, mkl_y_) - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:2 - push!(out, tf.Tensor(op, out_idx)) - end - out + function _mkl_add_graph(x_, y_, mkl_x_, mkl_y_; name=nothing) + local desc + tf.with_op_name(name, "_MklAdd") do + desc = tf.NodeDescription("_MklAdd") + x_ = convert(Tensor{Any}, x_) + y_ = convert(Tensor{Any}, y_) + mkl_x_ = convert(Tensor{UInt8}, mkl_x_) + mkl_y_ = convert(Tensor{UInt8}, mkl_y_) + (x_, y_) = tf.tf_promote(x_, y_) + tf.add_input(desc, x_) + tf.add_input(desc, y_) + tf.add_input(desc, mkl_x_) + tf.add_input(desc, mkl_y_) end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end function _mkl_add_eager(x_, y_, mkl_x_, mkl_y_; name=nothing) desc = tf.EagerOp("_MklAdd") x_ = convert(tf.EagerTensor, x_) @@ -53749,13 +53749,13 @@ begin return res end end - function _mkl_add(x_, y_, mkl_x_, mkl_y_; name=nothing) - if tf.in_eager_mode() - _mkl_add_eager(x_, y_, mkl_x_, mkl_y_; name=name) - else - _mkl_add_graph(x_, y_, mkl_x_, mkl_y_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _mkl_add(x_, y_, mkl_x_, mkl_y_; name=nothing) + if tf.in_eager_mode() + _mkl_add_eager(x_, y_, mkl_x_, mkl_y_; name=name) + else + _mkl_add_graph(x_, y_, mkl_x_, mkl_y_; name=name) + end end - end end @@ -53765,18 +53765,18 @@ end An op which emits multiple Tensor values from an XLA computation. """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function outfeed_enqueue_tuple_graph(inputs_; name=nothing, dtypes=nothing) - local desc - tf.with_op_name(name, "OutfeedEnqueueTuple") do - desc = tf.NodeDescription("OutfeedEnqueueTuple") - inputs_ = [convert(Tensor{Any}, x) for x = inputs_] - tf.add_input(desc, inputs_) - if dtypes !== nothing - desc["dtypes"] = map(Base.identity, dtypes) - end + function outfeed_enqueue_tuple_graph(inputs_; name=nothing, dtypes=nothing) + local desc + tf.with_op_name(name, "OutfeedEnqueueTuple") do + desc = tf.NodeDescription("OutfeedEnqueueTuple") + inputs_ = [convert(Tensor{Any}, x) for x = inputs_] + tf.add_input(desc, inputs_) + if dtypes !== nothing + desc["dtypes"] = map(Base.identity, dtypes) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function outfeed_enqueue_tuple_eager(inputs_; name=nothing, dtypes=nothing) desc = tf.EagerOp("OutfeedEnqueueTuple") inputs_ = convert(tf.EagerTensor, inputs_) @@ -53791,13 +53791,13 @@ begin return res[1] end end - function outfeed_enqueue_tuple(inputs_; name=nothing, dtypes=nothing) - if tf.in_eager_mode() - outfeed_enqueue_tuple_eager(inputs_; name=name, dtypes=dtypes) - else - outfeed_enqueue_tuple_graph(inputs_; name=name, dtypes=dtypes) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function outfeed_enqueue_tuple(inputs_; name=nothing, dtypes=nothing) + if tf.in_eager_mode() + outfeed_enqueue_tuple_eager(inputs_; name=name, dtypes=dtypes) + else + outfeed_enqueue_tuple_graph(inputs_; name=name, dtypes=dtypes) + end end - end end @@ -53807,16 +53807,16 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function reciprocal_graph(x_; name=nothing) - local desc - tf.with_op_name(name, "Reciprocal") do - desc = tf.NodeDescription("Reciprocal") - x_ = convert(Tensor{Any}, x_) - (x_,) = tf.tf_promote(x_) - tf.add_input(desc, x_) - end - tf.Tensor(tf.Operation(desc)) + function reciprocal_graph(x_; name=nothing) + local desc + tf.with_op_name(name, "Reciprocal") do + desc = tf.NodeDescription("Reciprocal") + x_ = convert(Tensor{Any}, x_) + (x_,) = tf.tf_promote(x_) + tf.add_input(desc, x_) end + tf.Tensor(tf.Operation(desc)) + end function reciprocal_eager(x_; name=nothing) desc = tf.EagerOp("Reciprocal") x_ = convert(tf.EagerTensor, x_) @@ -53829,13 +53829,13 @@ begin return res[1] end end - function reciprocal(x_; name=nothing) - if tf.in_eager_mode() - reciprocal_eager(x_; name=name) - else - reciprocal_graph(x_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function reciprocal(x_; name=nothing) + if tf.in_eager_mode() + reciprocal_eager(x_; name=name) + else + reciprocal_graph(x_; name=name) + end end - end end @@ -53845,15 +53845,15 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function string_strip_graph(input_; name=nothing) - local desc - tf.with_op_name(name, "StringStrip") do - desc = tf.NodeDescription("StringStrip") - input_ = convert(Tensor{String}, input_) - tf.add_input(desc, input_) - end - tf.Tensor(tf.Operation(desc)) + function string_strip_graph(input_; name=nothing) + local desc + tf.with_op_name(name, "StringStrip") do + desc = tf.NodeDescription("StringStrip") + input_ = convert(Tensor{String}, input_) + tf.add_input(desc, input_) end + tf.Tensor(tf.Operation(desc)) + end function string_strip_eager(input_; name=nothing) desc = tf.EagerOp("StringStrip") input_ = convert(tf.EagerTensor, input_) @@ -53865,13 +53865,13 @@ begin return res[1] end end - function string_strip(input_; name=nothing) - if tf.in_eager_mode() - string_strip_eager(input_; name=name) - else - string_strip_graph(input_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function string_strip(input_; name=nothing) + if tf.in_eager_mode() + string_strip_eager(input_; name=name) + else + string_strip_graph(input_; name=name) + end end - end end @@ -53881,15 +53881,15 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function barrier_ready_size_graph(handle_; name=nothing) - local desc - tf.with_op_name(name, "BarrierReadySize") do - desc = tf.NodeDescription("BarrierReadySize") - handle_ = convert(Tensor{String}, handle_) - tf.add_input(desc, handle_) - end - tf.Tensor(tf.Operation(desc)) + function barrier_ready_size_graph(handle_; name=nothing) + local desc + tf.with_op_name(name, "BarrierReadySize") do + desc = tf.NodeDescription("BarrierReadySize") + handle_ = convert(Tensor{String}, handle_) + tf.add_input(desc, handle_) end + tf.Tensor(tf.Operation(desc)) + end function barrier_ready_size_eager(handle_; name=nothing) desc = tf.EagerOp("BarrierReadySize") handle_ = convert(tf.EagerTensor, handle_) @@ -53901,13 +53901,13 @@ begin return res[1] end end - function barrier_ready_size(handle_; name=nothing) - if tf.in_eager_mode() - barrier_ready_size_eager(handle_; name=name) - else - barrier_ready_size_graph(handle_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function barrier_ready_size(handle_; name=nothing) + if tf.in_eager_mode() + barrier_ready_size_eager(handle_; name=name) + else + barrier_ready_size_graph(handle_; name=name) + end end - end end @@ -53917,25 +53917,25 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function fake_quant_with_min_max_vars_per_channel_graph(inputs_, min_, max_; name=nothing, num_bits=nothing, narrow_range=nothing) - local desc - tf.with_op_name(name, "FakeQuantWithMinMaxVarsPerChannel") do - desc = tf.NodeDescription("FakeQuantWithMinMaxVarsPerChannel") - inputs_ = convert(Tensor{Float32}, inputs_) - min_ = convert(Tensor{Float32}, min_) - max_ = convert(Tensor{Float32}, max_) - tf.add_input(desc, inputs_) - tf.add_input(desc, min_) - tf.add_input(desc, max_) - if num_bits !== nothing - desc["num_bits"] = Base.Int(num_bits) - end - if narrow_range !== nothing - desc["narrow_range"] = Base.Bool(narrow_range) - end + function fake_quant_with_min_max_vars_per_channel_graph(inputs_, min_, max_; name=nothing, num_bits=nothing, narrow_range=nothing) + local desc + tf.with_op_name(name, "FakeQuantWithMinMaxVarsPerChannel") do + desc = tf.NodeDescription("FakeQuantWithMinMaxVarsPerChannel") + inputs_ = convert(Tensor{Float32}, inputs_) + min_ = convert(Tensor{Float32}, min_) + max_ = convert(Tensor{Float32}, max_) + tf.add_input(desc, inputs_) + tf.add_input(desc, min_) + tf.add_input(desc, max_) + if num_bits !== nothing + desc["num_bits"] = Base.Int(num_bits) + end + if narrow_range !== nothing + desc["narrow_range"] = Base.Bool(narrow_range) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function fake_quant_with_min_max_vars_per_channel_eager(inputs_, min_, max_; name=nothing, num_bits=nothing, narrow_range=nothing) desc = tf.EagerOp("FakeQuantWithMinMaxVarsPerChannel") inputs_ = convert(tf.EagerTensor, inputs_) @@ -53957,13 +53957,13 @@ begin return res[1] end end - function fake_quant_with_min_max_vars_per_channel(inputs_, min_, max_; name=nothing, num_bits=nothing, narrow_range=nothing) - if tf.in_eager_mode() - fake_quant_with_min_max_vars_per_channel_eager(inputs_, min_, max_; name=name, num_bits=num_bits, narrow_range=narrow_range) - else - fake_quant_with_min_max_vars_per_channel_graph(inputs_, min_, max_; name=name, num_bits=num_bits, narrow_range=narrow_range) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function fake_quant_with_min_max_vars_per_channel(inputs_, min_, max_; name=nothing, num_bits=nothing, narrow_range=nothing) + if tf.in_eager_mode() + fake_quant_with_min_max_vars_per_channel_eager(inputs_, min_, max_; name=name, num_bits=num_bits, narrow_range=narrow_range) + else + fake_quant_with_min_max_vars_per_channel_graph(inputs_, min_, max_; name=name, num_bits=num_bits, narrow_range=narrow_range) + end end - end end @@ -53973,18 +53973,18 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function string_to_hash_bucket_graph(string_tensor_; name=nothing, num_buckets=nothing) - local desc - tf.with_op_name(name, "StringToHashBucket") do - desc = tf.NodeDescription("StringToHashBucket") - string_tensor_ = convert(Tensor{String}, string_tensor_) - tf.add_input(desc, string_tensor_) - if num_buckets !== nothing - desc["num_buckets"] = Base.Int(num_buckets) - end + function string_to_hash_bucket_graph(string_tensor_; name=nothing, num_buckets=nothing) + local desc + tf.with_op_name(name, "StringToHashBucket") do + desc = tf.NodeDescription("StringToHashBucket") + string_tensor_ = convert(Tensor{String}, string_tensor_) + tf.add_input(desc, string_tensor_) + if num_buckets !== nothing + desc["num_buckets"] = Base.Int(num_buckets) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function string_to_hash_bucket_eager(string_tensor_; name=nothing, num_buckets=nothing) desc = tf.EagerOp("StringToHashBucket") string_tensor_ = convert(tf.EagerTensor, string_tensor_) @@ -53999,13 +53999,13 @@ begin return res[1] end end - function string_to_hash_bucket(string_tensor_; name=nothing, num_buckets=nothing) - if tf.in_eager_mode() - string_to_hash_bucket_eager(string_tensor_; name=name, num_buckets=num_buckets) - else - string_to_hash_bucket_graph(string_tensor_; name=name, num_buckets=num_buckets) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function string_to_hash_bucket(string_tensor_; name=nothing, num_buckets=nothing) + if tf.in_eager_mode() + string_to_hash_bucket_eager(string_tensor_; name=name, num_buckets=num_buckets) + else + string_to_hash_bucket_graph(string_tensor_; name=name, num_buckets=num_buckets) + end end - end end @@ -54015,28 +54015,28 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tensor_array_concat_graph(handle_, flow_in_; name=nothing, dtype=nothing, element_shape_except0=nothing) - local desc - tf.with_op_name(name, "TensorArrayConcat") do - desc = tf.NodeDescription("TensorArrayConcat") - handle_ = convert(Tensor{String}, handle_) - flow_in_ = convert(Tensor{Float32}, flow_in_) - tf.add_input(desc, handle_) - tf.add_input(desc, flow_in_) - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end - if element_shape_except0 !== nothing - desc["element_shape_except0"] = Base.identity(element_shape_except0) - end + function tensor_array_concat_graph(handle_, flow_in_; name=nothing, dtype=nothing, element_shape_except0=nothing) + local desc + tf.with_op_name(name, "TensorArrayConcat") do + desc = tf.NodeDescription("TensorArrayConcat") + handle_ = convert(Tensor{String}, handle_) + flow_in_ = convert(Tensor{Float32}, flow_in_) + tf.add_input(desc, handle_) + tf.add_input(desc, flow_in_) + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:2 - push!(out, tf.Tensor(op, out_idx)) + if element_shape_except0 !== nothing + desc["element_shape_except0"] = Base.identity(element_shape_except0) end - out end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end function tensor_array_concat_eager(handle_, flow_in_; name=nothing, dtype=nothing, element_shape_except0=nothing) desc = tf.EagerOp("TensorArrayConcat") handle_ = convert(tf.EagerTensor, handle_) @@ -54056,13 +54056,13 @@ begin return res end end - function tensor_array_concat(handle_, flow_in_; name=nothing, dtype=nothing, element_shape_except0=nothing) - if tf.in_eager_mode() - tensor_array_concat_eager(handle_, flow_in_; name=name, dtype=dtype, element_shape_except0=element_shape_except0) - else - tensor_array_concat_graph(handle_, flow_in_; name=name, dtype=dtype, element_shape_except0=element_shape_except0) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_array_concat(handle_, flow_in_; name=nothing, dtype=nothing, element_shape_except0=nothing) + if tf.in_eager_mode() + tensor_array_concat_eager(handle_, flow_in_; name=name, dtype=dtype, element_shape_except0=element_shape_except0) + else + tensor_array_concat_graph(handle_, flow_in_; name=name, dtype=dtype, element_shape_except0=element_shape_except0) + end end - end end @@ -54072,19 +54072,19 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function sharded_filename_graph(basename_, shard_, num_shards_; name=nothing) - local desc - tf.with_op_name(name, "ShardedFilename") do - desc = tf.NodeDescription("ShardedFilename") - basename_ = convert(Tensor{String}, basename_) - shard_ = convert(Tensor{Int32}, shard_) - num_shards_ = convert(Tensor{Int32}, num_shards_) - tf.add_input(desc, basename_) - tf.add_input(desc, shard_) - tf.add_input(desc, num_shards_) - end - tf.Tensor(tf.Operation(desc)) + function sharded_filename_graph(basename_, shard_, num_shards_; name=nothing) + local desc + tf.with_op_name(name, "ShardedFilename") do + desc = tf.NodeDescription("ShardedFilename") + basename_ = convert(Tensor{String}, basename_) + shard_ = convert(Tensor{Int32}, shard_) + num_shards_ = convert(Tensor{Int32}, num_shards_) + tf.add_input(desc, basename_) + tf.add_input(desc, shard_) + tf.add_input(desc, num_shards_) end + tf.Tensor(tf.Operation(desc)) + end function sharded_filename_eager(basename_, shard_, num_shards_; name=nothing) desc = tf.EagerOp("ShardedFilename") basename_ = convert(tf.EagerTensor, basename_) @@ -54100,13 +54100,13 @@ begin return res[1] end end - function sharded_filename(basename_, shard_, num_shards_; name=nothing) - if tf.in_eager_mode() - sharded_filename_eager(basename_, shard_, num_shards_; name=name) - else - sharded_filename_graph(basename_, shard_, num_shards_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sharded_filename(basename_, shard_, num_shards_; name=nothing) + if tf.in_eager_mode() + sharded_filename_eager(basename_, shard_, num_shards_; name=name) + else + sharded_filename_graph(basename_, shard_, num_shards_; name=name) + end end - end end @@ -54116,24 +54116,24 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function py_func_graph(input_; name=nothing, token=nothing, Tin=nothing, Tout=nothing) - local desc - tf.with_op_name(name, "PyFunc") do - desc = tf.NodeDescription("PyFunc") - input_ = [convert(Tensor{Any}, x) for x = input_] - tf.add_input(desc, input_) - if token !== nothing - desc["token"] = Base.String(token) - end - if Tin !== nothing - desc["Tin"] = map(Base.identity, Tin) - end - if Tout !== nothing - desc["Tout"] = map(Base.identity, Tout) - end + function py_func_graph(input_; name=nothing, token=nothing, Tin=nothing, Tout=nothing) + local desc + tf.with_op_name(name, "PyFunc") do + desc = tf.NodeDescription("PyFunc") + input_ = [convert(Tensor{Any}, x) for x = input_] + tf.add_input(desc, input_) + if token !== nothing + desc["token"] = Base.String(token) + end + if Tin !== nothing + desc["Tin"] = map(Base.identity, Tin) + end + if Tout !== nothing + desc["Tout"] = map(Base.identity, Tout) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function py_func_eager(input_; name=nothing, token=nothing, Tin=nothing, Tout=nothing) desc = tf.EagerOp("PyFunc") input_ = convert(tf.EagerTensor, input_) @@ -54154,13 +54154,13 @@ begin return res[1] end end - function py_func(input_; name=nothing, token=nothing, Tin=nothing, Tout=nothing) - if tf.in_eager_mode() - py_func_eager(input_; name=name, token=token, Tin=Tin, Tout=Tout) - else - py_func_graph(input_; name=name, token=token, Tin=Tin, Tout=Tout) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function py_func(input_; name=nothing, token=nothing, Tin=nothing, Tout=nothing) + if tf.in_eager_mode() + py_func_eager(input_; name=name, token=token, Tin=Tin, Tout=Tout) + else + py_func_graph(input_; name=name, token=token, Tin=Tin, Tout=Tout) + end end - end end @@ -54170,23 +54170,23 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function unsorted_segment_prod_graph(data_, segment_ids_, num_segments_; name=nothing) - local desc - tf.with_op_name(name, "UnsortedSegmentProd") do - desc = tf.NodeDescription("UnsortedSegmentProd") - data_ = convert(Tensor{Any}, data_) - segment_ids_ = convert(Tensor{Any}, segment_ids_) - segment_ids_ = segment_ids_ - convert(tf.Tensor{eltype(segment_ids_)}, 1) - num_segments_ = convert(Tensor{Int32}, num_segments_) - (num_segments_,) = tf.tf_promote(num_segments_) - (data_,) = tf.tf_promote(data_) - (segment_ids_,) = tf.tf_promote(segment_ids_) - tf.add_input(desc, data_) - tf.add_input(desc, segment_ids_) - tf.add_input(desc, num_segments_) - end - tf.Tensor(tf.Operation(desc)) + function unsorted_segment_prod_graph(data_, segment_ids_, num_segments_; name=nothing) + local desc + tf.with_op_name(name, "UnsortedSegmentProd") do + desc = tf.NodeDescription("UnsortedSegmentProd") + data_ = convert(Tensor{Any}, data_) + segment_ids_ = convert(Tensor{Any}, segment_ids_) + segment_ids_ = segment_ids_ - convert(tf.Tensor{eltype(segment_ids_)}, 1) + num_segments_ = convert(Tensor{Int32}, num_segments_) + (num_segments_,) = tf.tf_promote(num_segments_) + (data_,) = tf.tf_promote(data_) + (segment_ids_,) = tf.tf_promote(segment_ids_) + tf.add_input(desc, data_) + tf.add_input(desc, segment_ids_) + tf.add_input(desc, num_segments_) end + tf.Tensor(tf.Operation(desc)) + end function unsorted_segment_prod_eager(data_, segment_ids_, num_segments_; name=nothing) desc = tf.EagerOp("UnsortedSegmentProd") data_ = convert(tf.EagerTensor, data_) @@ -54205,13 +54205,13 @@ begin return res[1] end end - function unsorted_segment_prod(data_, segment_ids_, num_segments_; name=nothing) - if tf.in_eager_mode() - unsorted_segment_prod_eager(data_, segment_ids_, num_segments_; name=name) - else - unsorted_segment_prod_graph(data_, segment_ids_, num_segments_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function unsorted_segment_prod(data_, segment_ids_, num_segments_; name=nothing) + if tf.in_eager_mode() + unsorted_segment_prod_eager(data_, segment_ids_, num_segments_; name=name) + else + unsorted_segment_prod_graph(data_, segment_ids_, num_segments_; name=name) + end end - end end @@ -54221,19 +54221,19 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function count_up_to_graph(ref_; name=nothing, limit=nothing) - local desc - tf.with_op_name(name, "CountUpTo") do - desc = tf.NodeDescription("CountUpTo") - ref_ = convert(Tensor{Any}, ref_) - (ref_,) = tf.tf_promote(ref_) - tf.add_input(desc, ref_) - if limit !== nothing - desc["limit"] = Base.Int(limit) - end + function count_up_to_graph(ref_; name=nothing, limit=nothing) + local desc + tf.with_op_name(name, "CountUpTo") do + desc = tf.NodeDescription("CountUpTo") + ref_ = convert(Tensor{Any}, ref_) + (ref_,) = tf.tf_promote(ref_) + tf.add_input(desc, ref_) + if limit !== nothing + desc["limit"] = Base.Int(limit) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function count_up_to_eager(ref_; name=nothing, limit=nothing) desc = tf.EagerOp("CountUpTo") ref_ = convert(tf.EagerTensor, ref_) @@ -54249,13 +54249,13 @@ begin return res[1] end end - function count_up_to(ref_; name=nothing, limit=nothing) - if tf.in_eager_mode() - count_up_to_eager(ref_; name=name, limit=limit) - else - count_up_to_graph(ref_; name=name, limit=limit) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function count_up_to(ref_; name=nothing, limit=nothing) + if tf.in_eager_mode() + count_up_to_eager(ref_; name=name, limit=limit) + else + count_up_to_graph(ref_; name=name, limit=limit) + end end - end end @@ -54265,28 +54265,28 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function random_gamma_graph(shape_, alpha_; name=nothing, seed=nothing, seed2=nothing, S=nothing) - local desc - tf.with_op_name(name, "RandomGamma") do - desc = tf.NodeDescription("RandomGamma") - shape_ = convert(Tensor{Any}, shape_) - alpha_ = convert(Tensor{Any}, alpha_) - (shape_,) = tf.tf_promote(shape_) - (alpha_,) = tf.tf_promote(alpha_) - tf.add_input(desc, shape_) - tf.add_input(desc, alpha_) - if seed !== nothing - desc["seed"] = Base.Int(seed) - end - if seed2 !== nothing - desc["seed2"] = Base.Int(seed2) - end - if S !== nothing - desc["S"] = Base.identity(S) - end + function random_gamma_graph(shape_, alpha_; name=nothing, seed=nothing, seed2=nothing, S=nothing) + local desc + tf.with_op_name(name, "RandomGamma") do + desc = tf.NodeDescription("RandomGamma") + shape_ = convert(Tensor{Any}, shape_) + alpha_ = convert(Tensor{Any}, alpha_) + (shape_,) = tf.tf_promote(shape_) + (alpha_,) = tf.tf_promote(alpha_) + tf.add_input(desc, shape_) + tf.add_input(desc, alpha_) + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end + if S !== nothing + desc["S"] = Base.identity(S) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function random_gamma_eager(shape_, alpha_; name=nothing, seed=nothing, seed2=nothing, S=nothing) desc = tf.EagerOp("RandomGamma") shape_ = convert(tf.EagerTensor, shape_) @@ -54311,13 +54311,13 @@ begin return res[1] end end - function random_gamma(shape_, alpha_; name=nothing, seed=nothing, seed2=nothing, S=nothing) - if tf.in_eager_mode() - random_gamma_eager(shape_, alpha_; name=name, seed=seed, seed2=seed2, S=S) - else - random_gamma_graph(shape_, alpha_; name=name, seed=seed, seed2=seed2, S=S) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function random_gamma(shape_, alpha_; name=nothing, seed=nothing, seed2=nothing, S=nothing) + if tf.in_eager_mode() + random_gamma_eager(shape_, alpha_; name=name, seed=seed, seed2=seed2, S=S) + else + random_gamma_graph(shape_, alpha_; name=name, seed=seed, seed2=seed2, S=S) + end end - end end @@ -54327,20 +54327,20 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tensor_array_grad_graph(handle_, flow_in_; name=nothing, source=nothing) - local desc - tf.with_op_name(name, "TensorArrayGrad") do - desc = tf.NodeDescription("TensorArrayGrad") - handle_ = convert(Tensor{String}, handle_) - flow_in_ = convert(Tensor{Float32}, flow_in_) - tf.add_input(desc, handle_) - tf.add_input(desc, flow_in_) - if source !== nothing - desc["source"] = Base.String(source) - end + function tensor_array_grad_graph(handle_, flow_in_; name=nothing, source=nothing) + local desc + tf.with_op_name(name, "TensorArrayGrad") do + desc = tf.NodeDescription("TensorArrayGrad") + handle_ = convert(Tensor{String}, handle_) + flow_in_ = convert(Tensor{Float32}, flow_in_) + tf.add_input(desc, handle_) + tf.add_input(desc, flow_in_) + if source !== nothing + desc["source"] = Base.String(source) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function tensor_array_grad_eager(handle_, flow_in_; name=nothing, source=nothing) desc = tf.EagerOp("TensorArrayGrad") handle_ = convert(tf.EagerTensor, handle_) @@ -54357,13 +54357,13 @@ begin return res[1] end end - function tensor_array_grad(handle_, flow_in_; name=nothing, source=nothing) - if tf.in_eager_mode() - tensor_array_grad_eager(handle_, flow_in_; name=name, source=source) - else - tensor_array_grad_graph(handle_, flow_in_; name=name, source=source) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_array_grad(handle_, flow_in_; name=nothing, source=nothing) + if tf.in_eager_mode() + tensor_array_grad_eager(handle_, flow_in_; name=name, source=source) + else + tensor_array_grad_graph(handle_, flow_in_; name=name, source=source) + end end - end end @@ -54373,27 +54373,27 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function dilation2d_graph(input_, filter_; name=nothing, strides=nothing, rates=nothing, padding=nothing) - local desc - tf.with_op_name(name, "Dilation2D") do - desc = tf.NodeDescription("Dilation2D") - input_ = convert(Tensor{Any}, input_) - filter_ = convert(Tensor{Any}, filter_) - (input_, filter_) = tf.tf_promote(input_, filter_) - tf.add_input(desc, input_) - tf.add_input(desc, filter_) - if strides !== nothing - desc["strides"] = map(Base.identity, strides) - end - if rates !== nothing - desc["rates"] = map(Base.identity, rates) - end - if padding !== nothing - desc["padding"] = Base.String(padding) - end + function dilation2d_graph(input_, filter_; name=nothing, strides=nothing, rates=nothing, padding=nothing) + local desc + tf.with_op_name(name, "Dilation2D") do + desc = tf.NodeDescription("Dilation2D") + input_ = convert(Tensor{Any}, input_) + filter_ = convert(Tensor{Any}, filter_) + (input_, filter_) = tf.tf_promote(input_, filter_) + tf.add_input(desc, input_) + tf.add_input(desc, filter_) + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + if rates !== nothing + desc["rates"] = map(Base.identity, rates) + end + if padding !== nothing + desc["padding"] = Base.String(padding) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function dilation2d_eager(input_, filter_; name=nothing, strides=nothing, rates=nothing, padding=nothing) desc = tf.EagerOp("Dilation2D") input_ = convert(tf.EagerTensor, input_) @@ -54418,13 +54418,13 @@ begin return res[1] end end - function dilation2d(input_, filter_; name=nothing, strides=nothing, rates=nothing, padding=nothing) - if tf.in_eager_mode() - dilation2d_eager(input_, filter_; name=name, strides=strides, rates=rates, padding=padding) - else - dilation2d_graph(input_, filter_; name=name, strides=strides, rates=rates, padding=padding) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function dilation2d(input_, filter_; name=nothing, strides=nothing, rates=nothing, padding=nothing) + if tf.in_eager_mode() + dilation2d_eager(input_, filter_; name=name, strides=strides, rates=rates, padding=padding) + else + dilation2d_graph(input_, filter_; name=name, strides=strides, rates=rates, padding=padding) + end end - end end @@ -54434,29 +54434,29 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function unbatch_graph(batched_tensor_, batch_index_, id_; name=nothing, timeout_micros=nothing, container=nothing, shared_name=nothing) - local desc - tf.with_op_name(name, "Unbatch") do - desc = tf.NodeDescription("Unbatch") - batched_tensor_ = convert(Tensor{Any}, batched_tensor_) - batch_index_ = convert(Tensor{Int64}, batch_index_) - id_ = convert(Tensor{Int64}, id_) - (batched_tensor_,) = tf.tf_promote(batched_tensor_) - tf.add_input(desc, batched_tensor_) - tf.add_input(desc, batch_index_) - tf.add_input(desc, id_) - if timeout_micros !== nothing - desc["timeout_micros"] = Base.Int(timeout_micros) - end - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end + function unbatch_graph(batched_tensor_, batch_index_, id_; name=nothing, timeout_micros=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "Unbatch") do + desc = tf.NodeDescription("Unbatch") + batched_tensor_ = convert(Tensor{Any}, batched_tensor_) + batch_index_ = convert(Tensor{Int64}, batch_index_) + id_ = convert(Tensor{Int64}, id_) + (batched_tensor_,) = tf.tf_promote(batched_tensor_) + tf.add_input(desc, batched_tensor_) + tf.add_input(desc, batch_index_) + tf.add_input(desc, id_) + if timeout_micros !== nothing + desc["timeout_micros"] = Base.Int(timeout_micros) + end + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function unbatch_eager(batched_tensor_, batch_index_, id_; name=nothing, timeout_micros=nothing, container=nothing, shared_name=nothing) desc = tf.EagerOp("Unbatch") batched_tensor_ = convert(tf.EagerTensor, batched_tensor_) @@ -54482,13 +54482,13 @@ begin return res[1] end end - function unbatch(batched_tensor_, batch_index_, id_; name=nothing, timeout_micros=nothing, container=nothing, shared_name=nothing) - if tf.in_eager_mode() - unbatch_eager(batched_tensor_, batch_index_, id_; name=name, timeout_micros=timeout_micros, container=container, shared_name=shared_name) - else - unbatch_graph(batched_tensor_, batch_index_, id_; name=name, timeout_micros=timeout_micros, container=container, shared_name=shared_name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function unbatch(batched_tensor_, batch_index_, id_; name=nothing, timeout_micros=nothing, container=nothing, shared_name=nothing) + if tf.in_eager_mode() + unbatch_eager(batched_tensor_, batch_index_, id_; name=name, timeout_micros=timeout_micros, container=container, shared_name=shared_name) + else + unbatch_graph(batched_tensor_, batch_index_, id_; name=name, timeout_micros=timeout_micros, container=container, shared_name=shared_name) + end end - end end @@ -54498,16 +54498,16 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function get_session_handle_graph(value_; name=nothing) - local desc - tf.with_op_name(name, "GetSessionHandle") do - desc = tf.NodeDescription("GetSessionHandle") - value_ = convert(Tensor{Any}, value_) - (value_,) = tf.tf_promote(value_) - tf.add_input(desc, value_) - end - tf.Tensor(tf.Operation(desc)) + function get_session_handle_graph(value_; name=nothing) + local desc + tf.with_op_name(name, "GetSessionHandle") do + desc = tf.NodeDescription("GetSessionHandle") + value_ = convert(Tensor{Any}, value_) + (value_,) = tf.tf_promote(value_) + tf.add_input(desc, value_) end + tf.Tensor(tf.Operation(desc)) + end function get_session_handle_eager(value_; name=nothing) desc = tf.EagerOp("GetSessionHandle") value_ = convert(tf.EagerTensor, value_) @@ -54520,13 +54520,13 @@ begin return res[1] end end - function get_session_handle(value_; name=nothing) - if tf.in_eager_mode() - get_session_handle_eager(value_; name=name) - else - get_session_handle_graph(value_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function get_session_handle(value_; name=nothing) + if tf.in_eager_mode() + get_session_handle_eager(value_; name=name) + else + get_session_handle_graph(value_; name=name) + end end - end end @@ -54536,30 +54536,30 @@ end Retrieve embedding parameters for a single table. """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function retrieve_tpu_embedding_adam_parameters_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - local desc - tf.with_op_name(name, "RetrieveTPUEmbeddingADAMParameters") do - desc = tf.NodeDescription("RetrieveTPUEmbeddingADAMParameters") - if table_id !== nothing - desc["table_id"] = Base.Int(table_id) - end - if table_name !== nothing - desc["table_name"] = Base.String(table_name) - end - if num_shards !== nothing - desc["num_shards"] = Base.Int(num_shards) - end - if shard_id !== nothing - desc["shard_id"] = Base.Int(shard_id) - end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:3 - push!(out, tf.Tensor(op, out_idx)) - end - out + function retrieve_tpu_embedding_adam_parameters_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + local desc + tf.with_op_name(name, "RetrieveTPUEmbeddingADAMParameters") do + desc = tf.NodeDescription("RetrieveTPUEmbeddingADAMParameters") + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end function retrieve_tpu_embedding_adam_parameters_eager(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) desc = tf.EagerOp("RetrieveTPUEmbeddingADAMParameters") if table_id !== nothing @@ -54581,13 +54581,13 @@ begin return res end end - function retrieve_tpu_embedding_adam_parameters(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - if tf.in_eager_mode() - retrieve_tpu_embedding_adam_parameters_eager(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) - else - retrieve_tpu_embedding_adam_parameters_graph(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function retrieve_tpu_embedding_adam_parameters(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + if tf.in_eager_mode() + retrieve_tpu_embedding_adam_parameters_eager(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + else + retrieve_tpu_embedding_adam_parameters_graph(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + end end - end end @@ -54597,31 +54597,31 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function mutable_hash_table_of_tensors_v2_graph(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing) - local desc - tf.with_op_name(name, "MutableHashTableOfTensorsV2") do - desc = tf.NodeDescription("MutableHashTableOfTensorsV2") - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - if use_node_name_sharing !== nothing - desc["use_node_name_sharing"] = Base.Bool(use_node_name_sharing) - end - if key_dtype !== nothing - desc["key_dtype"] = Base.identity(key_dtype) - end - if value_dtype !== nothing - desc["value_dtype"] = Base.identity(value_dtype) - end - if value_shape !== nothing - desc["value_shape"] = Base.identity(value_shape) - end - end - tf.Tensor(tf.Operation(desc)) + function mutable_hash_table_of_tensors_v2_graph(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing) + local desc + tf.with_op_name(name, "MutableHashTableOfTensorsV2") do + desc = tf.NodeDescription("MutableHashTableOfTensorsV2") + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + if use_node_name_sharing !== nothing + desc["use_node_name_sharing"] = Base.Bool(use_node_name_sharing) + end + if key_dtype !== nothing + desc["key_dtype"] = Base.identity(key_dtype) + end + if value_dtype !== nothing + desc["value_dtype"] = Base.identity(value_dtype) + end + if value_shape !== nothing + desc["value_shape"] = Base.identity(value_shape) + end end + tf.Tensor(tf.Operation(desc)) + end function mutable_hash_table_of_tensors_v2_eager(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing) desc = tf.EagerOp("MutableHashTableOfTensorsV2") if container !== nothing @@ -54649,13 +54649,13 @@ begin return res[1] end end - function mutable_hash_table_of_tensors_v2(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing) - if tf.in_eager_mode() - mutable_hash_table_of_tensors_v2_eager(; name=name, container=container, shared_name=shared_name, use_node_name_sharing=use_node_name_sharing, key_dtype=key_dtype, value_dtype=value_dtype, value_shape=value_shape) - else - mutable_hash_table_of_tensors_v2_graph(; name=name, container=container, shared_name=shared_name, use_node_name_sharing=use_node_name_sharing, key_dtype=key_dtype, value_dtype=value_dtype, value_shape=value_shape) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function mutable_hash_table_of_tensors_v2(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing) + if tf.in_eager_mode() + mutable_hash_table_of_tensors_v2_eager(; name=name, container=container, shared_name=shared_name, use_node_name_sharing=use_node_name_sharing, key_dtype=key_dtype, value_dtype=value_dtype, value_shape=value_shape) + else + mutable_hash_table_of_tensors_v2_graph(; name=name, container=container, shared_name=shared_name, use_node_name_sharing=use_node_name_sharing, key_dtype=key_dtype, value_dtype=value_dtype, value_shape=value_shape) + end end - end end @@ -54665,37 +54665,37 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function sparse_apply_ftrl_graph(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, lr_power_; name=nothing, use_locking=nothing) - local desc - tf.with_op_name(name, "SparseApplyFtrl") do - desc = tf.NodeDescription("SparseApplyFtrl") - var_ = convert(Tensor{Any}, var_) - accum_ = convert(Tensor{Any}, accum_) - linear_ = convert(Tensor{Any}, linear_) - grad_ = convert(Tensor{Any}, grad_) - indices_ = convert(Tensor{Any}, indices_) - indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) - lr_ = convert(Tensor{Any}, lr_) - l1_ = convert(Tensor{Any}, l1_) - l2_ = convert(Tensor{Any}, l2_) - lr_power_ = convert(Tensor{Any}, lr_power_) - (var_, accum_, linear_, grad_, lr_, l1_, l2_, lr_power_) = tf.tf_promote(var_, accum_, linear_, grad_, lr_, l1_, l2_, lr_power_) - (indices_,) = tf.tf_promote(indices_) - tf.add_input(desc, var_) - tf.add_input(desc, accum_) - tf.add_input(desc, linear_) - tf.add_input(desc, grad_) - tf.add_input(desc, indices_) - tf.add_input(desc, lr_) - tf.add_input(desc, l1_) - tf.add_input(desc, l2_) - tf.add_input(desc, lr_power_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - end - tf.Tensor(tf.Operation(desc)) - end + function sparse_apply_ftrl_graph(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, lr_power_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "SparseApplyFtrl") do + desc = tf.NodeDescription("SparseApplyFtrl") + var_ = convert(Tensor{Any}, var_) + accum_ = convert(Tensor{Any}, accum_) + linear_ = convert(Tensor{Any}, linear_) + grad_ = convert(Tensor{Any}, grad_) + indices_ = convert(Tensor{Any}, indices_) + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + lr_ = convert(Tensor{Any}, lr_) + l1_ = convert(Tensor{Any}, l1_) + l2_ = convert(Tensor{Any}, l2_) + lr_power_ = convert(Tensor{Any}, lr_power_) + (var_, accum_, linear_, grad_, lr_, l1_, l2_, lr_power_) = tf.tf_promote(var_, accum_, linear_, grad_, lr_, l1_, l2_, lr_power_) + (indices_,) = tf.tf_promote(indices_) + tf.add_input(desc, var_) + tf.add_input(desc, accum_) + tf.add_input(desc, linear_) + tf.add_input(desc, grad_) + tf.add_input(desc, indices_) + tf.add_input(desc, lr_) + tf.add_input(desc, l1_) + tf.add_input(desc, l2_) + tf.add_input(desc, lr_power_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + tf.Tensor(tf.Operation(desc)) + end function sparse_apply_ftrl_eager(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, lr_power_; name=nothing, use_locking=nothing) desc = tf.EagerOp("SparseApplyFtrl") var_ = convert(tf.EagerTensor, var_) @@ -54735,13 +54735,13 @@ begin return res[1] end end - function sparse_apply_ftrl(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, lr_power_; name=nothing, use_locking=nothing) - if tf.in_eager_mode() - sparse_apply_ftrl_eager(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, lr_power_; name=name, use_locking=use_locking) - else - sparse_apply_ftrl_graph(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, lr_power_; name=name, use_locking=use_locking) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_apply_ftrl(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, lr_power_; name=nothing, use_locking=nothing) + if tf.in_eager_mode() + sparse_apply_ftrl_eager(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, lr_power_; name=name, use_locking=use_locking) + else + sparse_apply_ftrl_graph(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, lr_power_; name=name, use_locking=use_locking) + end end - end end @@ -54751,25 +54751,25 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function batch_dataset_v2_graph(input_dataset_, batch_size_, drop_remainder_; name=nothing, output_types=nothing, output_shapes=nothing) - local desc - tf.with_op_name(name, "BatchDatasetV2") do - desc = tf.NodeDescription("BatchDatasetV2") - input_dataset_ = convert(Tensor{Any}, input_dataset_) - batch_size_ = convert(Tensor{Int64}, batch_size_) - drop_remainder_ = convert(Tensor{Bool}, drop_remainder_) - tf.add_input(desc, input_dataset_) - tf.add_input(desc, batch_size_) - tf.add_input(desc, drop_remainder_) - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end + function batch_dataset_v2_graph(input_dataset_, batch_size_, drop_remainder_; name=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "BatchDatasetV2") do + desc = tf.NodeDescription("BatchDatasetV2") + input_dataset_ = convert(Tensor{Any}, input_dataset_) + batch_size_ = convert(Tensor{Int64}, batch_size_) + drop_remainder_ = convert(Tensor{Bool}, drop_remainder_) + tf.add_input(desc, input_dataset_) + tf.add_input(desc, batch_size_) + tf.add_input(desc, drop_remainder_) + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function batch_dataset_v2_eager(input_dataset_, batch_size_, drop_remainder_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("BatchDatasetV2") input_dataset_ = convert(tf.EagerTensor, input_dataset_) @@ -54791,13 +54791,13 @@ begin return res[1] end end - function batch_dataset_v2(input_dataset_, batch_size_, drop_remainder_; name=nothing, output_types=nothing, output_shapes=nothing) - if tf.in_eager_mode() - batch_dataset_v2_eager(input_dataset_, batch_size_, drop_remainder_; name=name, output_types=output_types, output_shapes=output_shapes) - else - batch_dataset_v2_graph(input_dataset_, batch_size_, drop_remainder_; name=name, output_types=output_types, output_shapes=output_shapes) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function batch_dataset_v2(input_dataset_, batch_size_, drop_remainder_; name=nothing, output_types=nothing, output_shapes=nothing) + if tf.in_eager_mode() + batch_dataset_v2_eager(input_dataset_, batch_size_, drop_remainder_; name=name, output_types=output_types, output_shapes=output_shapes) + else + batch_dataset_v2_graph(input_dataset_, batch_size_, drop_remainder_; name=name, output_types=output_types, output_shapes=output_shapes) + end end - end end @@ -54807,31 +54807,31 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function sparse_sparse_minimum_graph(a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_; name=nothing) - local desc - tf.with_op_name(name, "SparseSparseMinimum") do - desc = tf.NodeDescription("SparseSparseMinimum") - a_indices_ = convert(Tensor{Int64}, a_indices_) - a_values_ = convert(Tensor{Any}, a_values_) - a_shape_ = convert(Tensor{Int64}, a_shape_) - b_indices_ = convert(Tensor{Int64}, b_indices_) - b_values_ = convert(Tensor{Any}, b_values_) - b_shape_ = convert(Tensor{Int64}, b_shape_) - (a_values_, b_values_) = tf.tf_promote(a_values_, b_values_) - tf.add_input(desc, a_indices_) - tf.add_input(desc, a_values_) - tf.add_input(desc, a_shape_) - tf.add_input(desc, b_indices_) - tf.add_input(desc, b_values_) - tf.add_input(desc, b_shape_) - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:2 - push!(out, tf.Tensor(op, out_idx)) - end - out + function sparse_sparse_minimum_graph(a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_; name=nothing) + local desc + tf.with_op_name(name, "SparseSparseMinimum") do + desc = tf.NodeDescription("SparseSparseMinimum") + a_indices_ = convert(Tensor{Int64}, a_indices_) + a_values_ = convert(Tensor{Any}, a_values_) + a_shape_ = convert(Tensor{Int64}, a_shape_) + b_indices_ = convert(Tensor{Int64}, b_indices_) + b_values_ = convert(Tensor{Any}, b_values_) + b_shape_ = convert(Tensor{Int64}, b_shape_) + (a_values_, b_values_) = tf.tf_promote(a_values_, b_values_) + tf.add_input(desc, a_indices_) + tf.add_input(desc, a_values_) + tf.add_input(desc, a_shape_) + tf.add_input(desc, b_indices_) + tf.add_input(desc, b_values_) + tf.add_input(desc, b_shape_) + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) end + out + end function sparse_sparse_minimum_eager(a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_; name=nothing) desc = tf.EagerOp("SparseSparseMinimum") a_indices_ = convert(tf.EagerTensor, a_indices_) @@ -54855,13 +54855,13 @@ begin return res end end - function sparse_sparse_minimum(a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_; name=nothing) - if tf.in_eager_mode() - sparse_sparse_minimum_eager(a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_; name=name) - else - sparse_sparse_minimum_graph(a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_sparse_minimum(a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_; name=nothing) + if tf.in_eager_mode() + sparse_sparse_minimum_eager(a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_; name=name) + else + sparse_sparse_minimum_graph(a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_; name=name) + end end - end end @@ -54871,20 +54871,20 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function reverse_v2_graph(tensor_, axis_; name=nothing) - local desc - tf.with_op_name(name, "ReverseV2") do - desc = tf.NodeDescription("ReverseV2") - tensor_ = convert(Tensor{Any}, tensor_) - axis_ = convert(Tensor{Int32}, axis_) - axis_ = axis_ - convert(tf.Tensor{eltype(axis_)}, 1) - (tensor_,) = tf.tf_promote(tensor_) - (axis_,) = tf.tf_promote(axis_) - tf.add_input(desc, tensor_) - tf.add_input(desc, axis_) - end - tf.Tensor(tf.Operation(desc)) + function reverse_v2_graph(tensor_, axis_; name=nothing) + local desc + tf.with_op_name(name, "ReverseV2") do + desc = tf.NodeDescription("ReverseV2") + tensor_ = convert(Tensor{Any}, tensor_) + axis_ = convert(Tensor{Int32}, axis_) + axis_ = axis_ - convert(tf.Tensor{eltype(axis_)}, 1) + (tensor_,) = tf.tf_promote(tensor_) + (axis_,) = tf.tf_promote(axis_) + tf.add_input(desc, tensor_) + tf.add_input(desc, axis_) end + tf.Tensor(tf.Operation(desc)) + end function reverse_v2_eager(tensor_, axis_; name=nothing) desc = tf.EagerOp("ReverseV2") tensor_ = convert(tf.EagerTensor, tensor_) @@ -54900,13 +54900,13 @@ begin return res[1] end end - function reverse_v2(tensor_, axis_; name=nothing) - if tf.in_eager_mode() - reverse_v2_eager(tensor_, axis_; name=name) - else - reverse_v2_graph(tensor_, axis_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function reverse_v2(tensor_, axis_; name=nothing) + if tf.in_eager_mode() + reverse_v2_eager(tensor_, axis_; name=name) + else + reverse_v2_graph(tensor_, axis_; name=name) + end end - end end @@ -54916,59 +54916,59 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function strided_slice_graph(input_, begin_, end_, strides_; name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing) - local desc - tf.with_op_name(name, "StridedSlice") do - desc = tf.NodeDescription("StridedSlice") - input_ = convert(Tensor{Any}, input_) - begin_ = convert(Tensor{Any}, begin_) - begin_ = begin_ - convert(tf.Tensor{eltype(begin_)}, 1) - end_ = convert(Tensor{Any}, end_) - end_ = end_ - convert(tf.Tensor{eltype(end_)}, 1) - strides_ = convert(Tensor{Any}, strides_) - strides_ = strides_ - convert(tf.Tensor{eltype(strides_)}, 1) - (input_,) = tf.tf_promote(input_) - (begin_, end_, strides_) = tf.tf_promote(begin_, end_, strides_) - tf.add_input(desc, input_) - tf.add_input(desc, begin_) - tf.add_input(desc, end_) - tf.add_input(desc, strides_) - if Index !== nothing - desc["Index"] = Base.identity(Index) - end - if begin_mask !== nothing - begin_mask = Base.Int(begin_mask) - 1 - end - if begin_mask !== nothing - desc["begin_mask"] = Base.Int(begin_mask) - end - if end_mask !== nothing - end_mask = Base.Int(end_mask) - 1 - end - if end_mask !== nothing - desc["end_mask"] = Base.Int(end_mask) - end - if ellipsis_mask !== nothing - ellipsis_mask = Base.Int(ellipsis_mask) - 1 - end - if ellipsis_mask !== nothing - desc["ellipsis_mask"] = Base.Int(ellipsis_mask) - end - if new_axis_mask !== nothing - new_axis_mask = Base.Int(new_axis_mask) - 1 - end - if new_axis_mask !== nothing - desc["new_axis_mask"] = Base.Int(new_axis_mask) - end - if shrink_axis_mask !== nothing - shrink_axis_mask = Base.Int(shrink_axis_mask) - 1 - end - if shrink_axis_mask !== nothing - desc["shrink_axis_mask"] = Base.Int(shrink_axis_mask) - end - end - tf.Tensor(tf.Operation(desc)) + function strided_slice_graph(input_, begin_, end_, strides_; name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing) + local desc + tf.with_op_name(name, "StridedSlice") do + desc = tf.NodeDescription("StridedSlice") + input_ = convert(Tensor{Any}, input_) + begin_ = convert(Tensor{Any}, begin_) + begin_ = begin_ - convert(tf.Tensor{eltype(begin_)}, 1) + end_ = convert(Tensor{Any}, end_) + end_ = end_ - convert(tf.Tensor{eltype(end_)}, 1) + strides_ = convert(Tensor{Any}, strides_) + strides_ = strides_ - convert(tf.Tensor{eltype(strides_)}, 1) + (input_,) = tf.tf_promote(input_) + (begin_, end_, strides_) = tf.tf_promote(begin_, end_, strides_) + tf.add_input(desc, input_) + tf.add_input(desc, begin_) + tf.add_input(desc, end_) + tf.add_input(desc, strides_) + if Index !== nothing + desc["Index"] = Base.identity(Index) + end + if begin_mask !== nothing + begin_mask = Base.Int(begin_mask) - 1 + end + if begin_mask !== nothing + desc["begin_mask"] = Base.Int(begin_mask) + end + if end_mask !== nothing + end_mask = Base.Int(end_mask) - 1 + end + if end_mask !== nothing + desc["end_mask"] = Base.Int(end_mask) + end + if ellipsis_mask !== nothing + ellipsis_mask = Base.Int(ellipsis_mask) - 1 + end + if ellipsis_mask !== nothing + desc["ellipsis_mask"] = Base.Int(ellipsis_mask) + end + if new_axis_mask !== nothing + new_axis_mask = Base.Int(new_axis_mask) - 1 + end + if new_axis_mask !== nothing + desc["new_axis_mask"] = Base.Int(new_axis_mask) + end + if shrink_axis_mask !== nothing + shrink_axis_mask = Base.Int(shrink_axis_mask) - 1 + end + if shrink_axis_mask !== nothing + desc["shrink_axis_mask"] = Base.Int(shrink_axis_mask) + end end + tf.Tensor(tf.Operation(desc)) + end function strided_slice_eager(input_, begin_, end_, strides_; name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing) desc = tf.EagerOp("StridedSlice") input_ = convert(tf.EagerTensor, input_) @@ -55023,13 +55023,13 @@ begin return res[1] end end - function strided_slice(input_, begin_, end_, strides_; name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing) - if tf.in_eager_mode() - strided_slice_eager(input_, begin_, end_, strides_; name=name, Index=Index, begin_mask=begin_mask, end_mask=end_mask, ellipsis_mask=ellipsis_mask, new_axis_mask=new_axis_mask, shrink_axis_mask=shrink_axis_mask) - else - strided_slice_graph(input_, begin_, end_, strides_; name=name, Index=Index, begin_mask=begin_mask, end_mask=end_mask, ellipsis_mask=ellipsis_mask, new_axis_mask=new_axis_mask, shrink_axis_mask=shrink_axis_mask) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function strided_slice(input_, begin_, end_, strides_; name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing) + if tf.in_eager_mode() + strided_slice_eager(input_, begin_, end_, strides_; name=name, Index=Index, begin_mask=begin_mask, end_mask=end_mask, ellipsis_mask=ellipsis_mask, new_axis_mask=new_axis_mask, shrink_axis_mask=shrink_axis_mask) + else + strided_slice_graph(input_, begin_, end_, strides_; name=name, Index=Index, begin_mask=begin_mask, end_mask=end_mask, ellipsis_mask=ellipsis_mask, new_axis_mask=new_axis_mask, shrink_axis_mask=shrink_axis_mask) + end end - end end @@ -55039,15 +55039,15 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function matching_files_graph(pattern_; name=nothing) - local desc - tf.with_op_name(name, "MatchingFiles") do - desc = tf.NodeDescription("MatchingFiles") - pattern_ = convert(Tensor{String}, pattern_) - tf.add_input(desc, pattern_) - end - tf.Tensor(tf.Operation(desc)) + function matching_files_graph(pattern_; name=nothing) + local desc + tf.with_op_name(name, "MatchingFiles") do + desc = tf.NodeDescription("MatchingFiles") + pattern_ = convert(Tensor{String}, pattern_) + tf.add_input(desc, pattern_) end + tf.Tensor(tf.Operation(desc)) + end function matching_files_eager(pattern_; name=nothing) desc = tf.EagerOp("MatchingFiles") pattern_ = convert(tf.EagerTensor, pattern_) @@ -55059,13 +55059,13 @@ begin return res[1] end end - function matching_files(pattern_; name=nothing) - if tf.in_eager_mode() - matching_files_eager(pattern_; name=name) - else - matching_files_graph(pattern_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function matching_files(pattern_; name=nothing) + if tf.in_eager_mode() + matching_files_eager(pattern_; name=name) + else + matching_files_graph(pattern_; name=name) + end end - end end @@ -55075,18 +55075,18 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function encode_base64_graph(input_; name=nothing, pad=nothing) - local desc - tf.with_op_name(name, "EncodeBase64") do - desc = tf.NodeDescription("EncodeBase64") - input_ = convert(Tensor{String}, input_) - tf.add_input(desc, input_) - if pad !== nothing - desc["pad"] = Base.Bool(pad) - end + function encode_base64_graph(input_; name=nothing, pad=nothing) + local desc + tf.with_op_name(name, "EncodeBase64") do + desc = tf.NodeDescription("EncodeBase64") + input_ = convert(Tensor{String}, input_) + tf.add_input(desc, input_) + if pad !== nothing + desc["pad"] = Base.Bool(pad) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function encode_base64_eager(input_; name=nothing, pad=nothing) desc = tf.EagerOp("EncodeBase64") input_ = convert(tf.EagerTensor, input_) @@ -55101,13 +55101,13 @@ begin return res[1] end end - function encode_base64(input_; name=nothing, pad=nothing) - if tf.in_eager_mode() - encode_base64_eager(input_; name=name, pad=pad) - else - encode_base64_graph(input_; name=name, pad=pad) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function encode_base64(input_; name=nothing, pad=nothing) + if tf.in_eager_mode() + encode_base64_eager(input_; name=name, pad=pad) + else + encode_base64_graph(input_; name=name, pad=pad) + end end - end end @@ -55117,21 +55117,21 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function iterator_get_next_as_optional_graph(iterator_; name=nothing, output_types=nothing, output_shapes=nothing) - local desc - tf.with_op_name(name, "IteratorGetNextAsOptional") do - desc = tf.NodeDescription("IteratorGetNextAsOptional") - iterator_ = convert(Tensor{Any}, iterator_) - tf.add_input(desc, iterator_) - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end + function iterator_get_next_as_optional_graph(iterator_; name=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "IteratorGetNextAsOptional") do + desc = tf.NodeDescription("IteratorGetNextAsOptional") + iterator_ = convert(Tensor{Any}, iterator_) + tf.add_input(desc, iterator_) + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function iterator_get_next_as_optional_eager(iterator_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("IteratorGetNextAsOptional") iterator_ = convert(tf.EagerTensor, iterator_) @@ -55149,13 +55149,13 @@ begin return res[1] end end - function iterator_get_next_as_optional(iterator_; name=nothing, output_types=nothing, output_shapes=nothing) - if tf.in_eager_mode() - iterator_get_next_as_optional_eager(iterator_; name=name, output_types=output_types, output_shapes=output_shapes) - else - iterator_get_next_as_optional_graph(iterator_; name=name, output_types=output_types, output_shapes=output_shapes) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function iterator_get_next_as_optional(iterator_; name=nothing, output_types=nothing, output_shapes=nothing) + if tf.in_eager_mode() + iterator_get_next_as_optional_eager(iterator_; name=name, output_types=output_types, output_shapes=output_shapes) + else + iterator_get_next_as_optional_graph(iterator_; name=name, output_types=output_types, output_shapes=output_shapes) + end end - end end @@ -55165,28 +55165,28 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function padding_fifo_queue_graph(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) - local desc - tf.with_op_name(name, "PaddingFIFOQueue") do - desc = tf.NodeDescription("PaddingFIFOQueue") - if component_types !== nothing - desc["component_types"] = map(Base.identity, component_types) - end - if shapes !== nothing - desc["shapes"] = map(Base.identity, shapes) - end - if capacity !== nothing - desc["capacity"] = Base.Int(capacity) - end - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end + function padding_fifo_queue_graph(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "PaddingFIFOQueue") do + desc = tf.NodeDescription("PaddingFIFOQueue") + if component_types !== nothing + desc["component_types"] = map(Base.identity, component_types) + end + if shapes !== nothing + desc["shapes"] = map(Base.identity, shapes) + end + if capacity !== nothing + desc["capacity"] = Base.Int(capacity) + end + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function padding_fifo_queue_eager(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) desc = tf.EagerOp("PaddingFIFOQueue") if component_types !== nothing @@ -55211,13 +55211,13 @@ begin return res[1] end end - function padding_fifo_queue(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) - if tf.in_eager_mode() - padding_fifo_queue_eager(; name=name, component_types=component_types, shapes=shapes, capacity=capacity, container=container, shared_name=shared_name) - else - padding_fifo_queue_graph(; name=name, component_types=component_types, shapes=shapes, capacity=capacity, container=container, shared_name=shared_name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function padding_fifo_queue(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) + if tf.in_eager_mode() + padding_fifo_queue_eager(; name=name, component_types=component_types, shapes=shapes, capacity=capacity, container=container, shared_name=shared_name) + else + padding_fifo_queue_graph(; name=name, component_types=component_types, shapes=shapes, capacity=capacity, container=container, shared_name=shared_name) + end end - end end @@ -55227,15 +55227,15 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function iterator_to_string_handle_graph(resource_handle_; name=nothing) - local desc - tf.with_op_name(name, "IteratorToStringHandle") do - desc = tf.NodeDescription("IteratorToStringHandle") - resource_handle_ = convert(Tensor{Any}, resource_handle_) - tf.add_input(desc, resource_handle_) - end - tf.Tensor(tf.Operation(desc)) + function iterator_to_string_handle_graph(resource_handle_; name=nothing) + local desc + tf.with_op_name(name, "IteratorToStringHandle") do + desc = tf.NodeDescription("IteratorToStringHandle") + resource_handle_ = convert(Tensor{Any}, resource_handle_) + tf.add_input(desc, resource_handle_) end + tf.Tensor(tf.Operation(desc)) + end function iterator_to_string_handle_eager(resource_handle_; name=nothing) desc = tf.EagerOp("IteratorToStringHandle") resource_handle_ = convert(tf.EagerTensor, resource_handle_) @@ -55247,13 +55247,13 @@ begin return res[1] end end - function iterator_to_string_handle(resource_handle_; name=nothing) - if tf.in_eager_mode() - iterator_to_string_handle_eager(resource_handle_; name=name) - else - iterator_to_string_handle_graph(resource_handle_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function iterator_to_string_handle(resource_handle_; name=nothing) + if tf.in_eager_mode() + iterator_to_string_handle_eager(resource_handle_; name=name) + else + iterator_to_string_handle_graph(resource_handle_; name=name) + end end - end end @@ -55263,30 +55263,30 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function max_pool_grad_grad_with_argmax_graph(input_, grad_, argmax_; name=nothing, ksize=nothing, strides=nothing, padding=nothing) - local desc - tf.with_op_name(name, "MaxPoolGradGradWithArgmax") do - desc = tf.NodeDescription("MaxPoolGradGradWithArgmax") - input_ = convert(Tensor{Any}, input_) - grad_ = convert(Tensor{Any}, grad_) - argmax_ = convert(Tensor{Any}, argmax_) - (argmax_,) = tf.tf_promote(argmax_) - (input_, grad_) = tf.tf_promote(input_, grad_) - tf.add_input(desc, input_) - tf.add_input(desc, grad_) - tf.add_input(desc, argmax_) - if ksize !== nothing - desc["ksize"] = map(Base.identity, ksize) - end - if strides !== nothing - desc["strides"] = map(Base.identity, strides) - end - if padding !== nothing - desc["padding"] = Base.String(padding) - end - end - tf.Tensor(tf.Operation(desc)) + function max_pool_grad_grad_with_argmax_graph(input_, grad_, argmax_; name=nothing, ksize=nothing, strides=nothing, padding=nothing) + local desc + tf.with_op_name(name, "MaxPoolGradGradWithArgmax") do + desc = tf.NodeDescription("MaxPoolGradGradWithArgmax") + input_ = convert(Tensor{Any}, input_) + grad_ = convert(Tensor{Any}, grad_) + argmax_ = convert(Tensor{Any}, argmax_) + (argmax_,) = tf.tf_promote(argmax_) + (input_, grad_) = tf.tf_promote(input_, grad_) + tf.add_input(desc, input_) + tf.add_input(desc, grad_) + tf.add_input(desc, argmax_) + if ksize !== nothing + desc["ksize"] = map(Base.identity, ksize) + end + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + if padding !== nothing + desc["padding"] = Base.String(padding) + end end + tf.Tensor(tf.Operation(desc)) + end function max_pool_grad_grad_with_argmax_eager(input_, grad_, argmax_; name=nothing, ksize=nothing, strides=nothing, padding=nothing) desc = tf.EagerOp("MaxPoolGradGradWithArgmax") input_ = convert(tf.EagerTensor, input_) @@ -55314,13 +55314,13 @@ begin return res[1] end end - function max_pool_grad_grad_with_argmax(input_, grad_, argmax_; name=nothing, ksize=nothing, strides=nothing, padding=nothing) - if tf.in_eager_mode() - max_pool_grad_grad_with_argmax_eager(input_, grad_, argmax_; name=name, ksize=ksize, strides=strides, padding=padding) - else - max_pool_grad_grad_with_argmax_graph(input_, grad_, argmax_; name=name, ksize=ksize, strides=strides, padding=padding) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function max_pool_grad_grad_with_argmax(input_, grad_, argmax_; name=nothing, ksize=nothing, strides=nothing, padding=nothing) + if tf.in_eager_mode() + max_pool_grad_grad_with_argmax_eager(input_, grad_, argmax_; name=name, ksize=ksize, strides=strides, padding=padding) + else + max_pool_grad_grad_with_argmax_graph(input_, grad_, argmax_; name=name, ksize=ksize, strides=strides, padding=padding) + end end - end end @@ -55330,20 +55330,20 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tensor_list_gather_graph(input_handle_, indices_; name=nothing, element_dtype=nothing) - local desc - tf.with_op_name(name, "TensorListGather") do - desc = tf.NodeDescription("TensorListGather") - input_handle_ = convert(Tensor{Any}, input_handle_) - indices_ = convert(Tensor{Int32}, indices_) - tf.add_input(desc, input_handle_) - tf.add_input(desc, indices_) - if element_dtype !== nothing - desc["element_dtype"] = Base.identity(element_dtype) - end + function tensor_list_gather_graph(input_handle_, indices_; name=nothing, element_dtype=nothing) + local desc + tf.with_op_name(name, "TensorListGather") do + desc = tf.NodeDescription("TensorListGather") + input_handle_ = convert(Tensor{Any}, input_handle_) + indices_ = convert(Tensor{Int32}, indices_) + tf.add_input(desc, input_handle_) + tf.add_input(desc, indices_) + if element_dtype !== nothing + desc["element_dtype"] = Base.identity(element_dtype) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function tensor_list_gather_eager(input_handle_, indices_; name=nothing, element_dtype=nothing) desc = tf.EagerOp("TensorListGather") input_handle_ = convert(tf.EagerTensor, input_handle_) @@ -55360,13 +55360,13 @@ begin return res[1] end end - function tensor_list_gather(input_handle_, indices_; name=nothing, element_dtype=nothing) - if tf.in_eager_mode() - tensor_list_gather_eager(input_handle_, indices_; name=name, element_dtype=element_dtype) - else - tensor_list_gather_graph(input_handle_, indices_; name=name, element_dtype=element_dtype) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_list_gather(input_handle_, indices_; name=nothing, element_dtype=nothing) + if tf.in_eager_mode() + tensor_list_gather_eager(input_handle_, indices_; name=name, element_dtype=element_dtype) + else + tensor_list_gather_graph(input_handle_, indices_; name=name, element_dtype=element_dtype) + end end - end end @@ -55376,27 +55376,27 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function multinomial_graph(logits_, num_samples_; name=nothing, seed=nothing, seed2=nothing, output_dtype=nothing) - local desc - tf.with_op_name(name, "Multinomial") do - desc = tf.NodeDescription("Multinomial") - logits_ = convert(Tensor{Any}, logits_) - num_samples_ = convert(Tensor{Int32}, num_samples_) - (logits_,) = tf.tf_promote(logits_) - tf.add_input(desc, logits_) - tf.add_input(desc, num_samples_) - if seed !== nothing - desc["seed"] = Base.Int(seed) - end - if seed2 !== nothing - desc["seed2"] = Base.Int(seed2) - end - if output_dtype !== nothing - desc["output_dtype"] = Base.identity(output_dtype) - end + function multinomial_graph(logits_, num_samples_; name=nothing, seed=nothing, seed2=nothing, output_dtype=nothing) + local desc + tf.with_op_name(name, "Multinomial") do + desc = tf.NodeDescription("Multinomial") + logits_ = convert(Tensor{Any}, logits_) + num_samples_ = convert(Tensor{Int32}, num_samples_) + (logits_,) = tf.tf_promote(logits_) + tf.add_input(desc, logits_) + tf.add_input(desc, num_samples_) + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end + if output_dtype !== nothing + desc["output_dtype"] = Base.identity(output_dtype) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function multinomial_eager(logits_, num_samples_; name=nothing, seed=nothing, seed2=nothing, output_dtype=nothing) desc = tf.EagerOp("Multinomial") logits_ = convert(tf.EagerTensor, logits_) @@ -55420,13 +55420,13 @@ begin return res[1] end end - function multinomial(logits_, num_samples_; name=nothing, seed=nothing, seed2=nothing, output_dtype=nothing) - if tf.in_eager_mode() - multinomial_eager(logits_, num_samples_; name=name, seed=seed, seed2=seed2, output_dtype=output_dtype) - else - multinomial_graph(logits_, num_samples_; name=name, seed=seed, seed2=seed2, output_dtype=output_dtype) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function multinomial(logits_, num_samples_; name=nothing, seed=nothing, seed2=nothing, output_dtype=nothing) + if tf.in_eager_mode() + multinomial_eager(logits_, num_samples_; name=name, seed=seed, seed2=seed2, output_dtype=output_dtype) + else + multinomial_graph(logits_, num_samples_; name=name, seed=seed, seed2=seed2, output_dtype=output_dtype) + end end - end end @@ -55436,22 +55436,22 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tensor_array_read_graph(handle_, index_, flow_in_; name=nothing, dtype=nothing) - local desc - tf.with_op_name(name, "TensorArrayRead") do - desc = tf.NodeDescription("TensorArrayRead") - handle_ = convert(Tensor{String}, handle_) - index_ = convert(Tensor{Int32}, index_) - flow_in_ = convert(Tensor{Float32}, flow_in_) - tf.add_input(desc, handle_) - tf.add_input(desc, index_) - tf.add_input(desc, flow_in_) - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end + function tensor_array_read_graph(handle_, index_, flow_in_; name=nothing, dtype=nothing) + local desc + tf.with_op_name(name, "TensorArrayRead") do + desc = tf.NodeDescription("TensorArrayRead") + handle_ = convert(Tensor{String}, handle_) + index_ = convert(Tensor{Int32}, index_) + flow_in_ = convert(Tensor{Float32}, flow_in_) + tf.add_input(desc, handle_) + tf.add_input(desc, index_) + tf.add_input(desc, flow_in_) + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function tensor_array_read_eager(handle_, index_, flow_in_; name=nothing, dtype=nothing) desc = tf.EagerOp("TensorArrayRead") handle_ = convert(tf.EagerTensor, handle_) @@ -55470,13 +55470,13 @@ begin return res[1] end end - function tensor_array_read(handle_, index_, flow_in_; name=nothing, dtype=nothing) - if tf.in_eager_mode() - tensor_array_read_eager(handle_, index_, flow_in_; name=name, dtype=dtype) - else - tensor_array_read_graph(handle_, index_, flow_in_; name=name, dtype=dtype) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_array_read(handle_, index_, flow_in_; name=nothing, dtype=nothing) + if tf.in_eager_mode() + tensor_array_read_eager(handle_, index_, flow_in_; name=name, dtype=dtype) + else + tensor_array_read_graph(handle_, index_, flow_in_; name=name, dtype=dtype) + end end - end end @@ -55486,23 +55486,23 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function experimental_indexed_dataset_get_graph(materialized_, index_; name=nothing, output_types=nothing, output_shapes=nothing) - local desc - tf.with_op_name(name, "ExperimentalIndexedDatasetGet") do - desc = tf.NodeDescription("ExperimentalIndexedDatasetGet") - materialized_ = convert(Tensor{Any}, materialized_) - index_ = convert(Tensor{Any}, index_) - tf.add_input(desc, materialized_) - tf.add_input(desc, index_) - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end + function experimental_indexed_dataset_get_graph(materialized_, index_; name=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "ExperimentalIndexedDatasetGet") do + desc = tf.NodeDescription("ExperimentalIndexedDatasetGet") + materialized_ = convert(Tensor{Any}, materialized_) + index_ = convert(Tensor{Any}, index_) + tf.add_input(desc, materialized_) + tf.add_input(desc, index_) + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function experimental_indexed_dataset_get_eager(materialized_, index_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("ExperimentalIndexedDatasetGet") materialized_ = convert(tf.EagerTensor, materialized_) @@ -55522,13 +55522,13 @@ begin return res[1] end end - function experimental_indexed_dataset_get(materialized_, index_; name=nothing, output_types=nothing, output_shapes=nothing) - if tf.in_eager_mode() - experimental_indexed_dataset_get_eager(materialized_, index_; name=name, output_types=output_types, output_shapes=output_shapes) - else - experimental_indexed_dataset_get_graph(materialized_, index_; name=name, output_types=output_types, output_shapes=output_shapes) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_indexed_dataset_get(materialized_, index_; name=nothing, output_types=nothing, output_shapes=nothing) + if tf.in_eager_mode() + experimental_indexed_dataset_get_eager(materialized_, index_; name=name, output_types=output_types, output_shapes=output_shapes) + else + experimental_indexed_dataset_get_graph(materialized_, index_; name=name, output_types=output_types, output_shapes=output_shapes) + end end - end end @@ -55538,21 +55538,21 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function iterator_from_string_handle_v2_graph(string_handle_; name=nothing, output_types=nothing, output_shapes=nothing) - local desc - tf.with_op_name(name, "IteratorFromStringHandleV2") do - desc = tf.NodeDescription("IteratorFromStringHandleV2") - string_handle_ = convert(Tensor{String}, string_handle_) - tf.add_input(desc, string_handle_) - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end + function iterator_from_string_handle_v2_graph(string_handle_; name=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "IteratorFromStringHandleV2") do + desc = tf.NodeDescription("IteratorFromStringHandleV2") + string_handle_ = convert(Tensor{String}, string_handle_) + tf.add_input(desc, string_handle_) + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function iterator_from_string_handle_v2_eager(string_handle_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("IteratorFromStringHandleV2") string_handle_ = convert(tf.EagerTensor, string_handle_) @@ -55570,13 +55570,13 @@ begin return res[1] end end - function iterator_from_string_handle_v2(string_handle_; name=nothing, output_types=nothing, output_shapes=nothing) - if tf.in_eager_mode() - iterator_from_string_handle_v2_eager(string_handle_; name=name, output_types=output_types, output_shapes=output_shapes) - else - iterator_from_string_handle_v2_graph(string_handle_; name=name, output_types=output_types, output_shapes=output_shapes) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function iterator_from_string_handle_v2(string_handle_; name=nothing, output_types=nothing, output_shapes=nothing) + if tf.in_eager_mode() + iterator_from_string_handle_v2_eager(string_handle_; name=name, output_types=output_types, output_shapes=output_shapes) + else + iterator_from_string_handle_v2_graph(string_handle_; name=name, output_types=output_types, output_shapes=output_shapes) + end end - end end @@ -55586,18 +55586,18 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function bitwise_or_graph(x_, y_; name=nothing) - local desc - tf.with_op_name(name, "BitwiseOr") do - desc = tf.NodeDescription("BitwiseOr") - x_ = convert(Tensor{Any}, x_) - y_ = convert(Tensor{Any}, y_) - (x_, y_) = tf.tf_promote(x_, y_) - tf.add_input(desc, x_) - tf.add_input(desc, y_) - end - tf.Tensor(tf.Operation(desc)) + function bitwise_or_graph(x_, y_; name=nothing) + local desc + tf.with_op_name(name, "BitwiseOr") do + desc = tf.NodeDescription("BitwiseOr") + x_ = convert(Tensor{Any}, x_) + y_ = convert(Tensor{Any}, y_) + (x_, y_) = tf.tf_promote(x_, y_) + tf.add_input(desc, x_) + tf.add_input(desc, y_) end + tf.Tensor(tf.Operation(desc)) + end function bitwise_or_eager(x_, y_; name=nothing) desc = tf.EagerOp("BitwiseOr") x_ = convert(tf.EagerTensor, x_) @@ -55613,13 +55613,13 @@ begin return res[1] end end - function bitwise_or(x_, y_; name=nothing) - if tf.in_eager_mode() - bitwise_or_eager(x_, y_; name=name) - else - bitwise_or_graph(x_, y_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function bitwise_or(x_, y_; name=nothing) + if tf.in_eager_mode() + bitwise_or_eager(x_, y_; name=name) + else + bitwise_or_graph(x_, y_; name=name) + end end - end end @@ -55629,23 +55629,23 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function unsorted_segment_max_graph(data_, segment_ids_, num_segments_; name=nothing) - local desc - tf.with_op_name(name, "UnsortedSegmentMax") do - desc = tf.NodeDescription("UnsortedSegmentMax") - data_ = convert(Tensor{Any}, data_) - segment_ids_ = convert(Tensor{Any}, segment_ids_) - segment_ids_ = segment_ids_ - convert(tf.Tensor{eltype(segment_ids_)}, 1) - num_segments_ = convert(Tensor{Int32}, num_segments_) - (num_segments_,) = tf.tf_promote(num_segments_) - (data_,) = tf.tf_promote(data_) - (segment_ids_,) = tf.tf_promote(segment_ids_) - tf.add_input(desc, data_) - tf.add_input(desc, segment_ids_) - tf.add_input(desc, num_segments_) - end - tf.Tensor(tf.Operation(desc)) + function unsorted_segment_max_graph(data_, segment_ids_, num_segments_; name=nothing) + local desc + tf.with_op_name(name, "UnsortedSegmentMax") do + desc = tf.NodeDescription("UnsortedSegmentMax") + data_ = convert(Tensor{Any}, data_) + segment_ids_ = convert(Tensor{Any}, segment_ids_) + segment_ids_ = segment_ids_ - convert(tf.Tensor{eltype(segment_ids_)}, 1) + num_segments_ = convert(Tensor{Int32}, num_segments_) + (num_segments_,) = tf.tf_promote(num_segments_) + (data_,) = tf.tf_promote(data_) + (segment_ids_,) = tf.tf_promote(segment_ids_) + tf.add_input(desc, data_) + tf.add_input(desc, segment_ids_) + tf.add_input(desc, num_segments_) end + tf.Tensor(tf.Operation(desc)) + end function unsorted_segment_max_eager(data_, segment_ids_, num_segments_; name=nothing) desc = tf.EagerOp("UnsortedSegmentMax") data_ = convert(tf.EagerTensor, data_) @@ -55664,13 +55664,13 @@ begin return res[1] end end - function unsorted_segment_max(data_, segment_ids_, num_segments_; name=nothing) - if tf.in_eager_mode() - unsorted_segment_max_eager(data_, segment_ids_, num_segments_; name=name) - else - unsorted_segment_max_graph(data_, segment_ids_, num_segments_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function unsorted_segment_max(data_, segment_ids_, num_segments_; name=nothing) + if tf.in_eager_mode() + unsorted_segment_max_eager(data_, segment_ids_, num_segments_; name=name) + else + unsorted_segment_max_graph(data_, segment_ids_, num_segments_; name=name) + end end - end end @@ -55680,27 +55680,27 @@ end Returns (x - y)(x - y) element-wise. """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function _mkl_squared_difference_graph(x_, y_, mkl_x_, mkl_y_; name=nothing) - local desc - tf.with_op_name(name, "_MklSquaredDifference") do - desc = tf.NodeDescription("_MklSquaredDifference") - x_ = convert(Tensor{Any}, x_) - y_ = convert(Tensor{Any}, y_) - mkl_x_ = convert(Tensor{UInt8}, mkl_x_) - mkl_y_ = convert(Tensor{UInt8}, mkl_y_) - (x_, y_) = tf.tf_promote(x_, y_) - tf.add_input(desc, x_) - tf.add_input(desc, y_) - tf.add_input(desc, mkl_x_) - tf.add_input(desc, mkl_y_) - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:2 - push!(out, tf.Tensor(op, out_idx)) - end - out + function _mkl_squared_difference_graph(x_, y_, mkl_x_, mkl_y_; name=nothing) + local desc + tf.with_op_name(name, "_MklSquaredDifference") do + desc = tf.NodeDescription("_MklSquaredDifference") + x_ = convert(Tensor{Any}, x_) + y_ = convert(Tensor{Any}, y_) + mkl_x_ = convert(Tensor{UInt8}, mkl_x_) + mkl_y_ = convert(Tensor{UInt8}, mkl_y_) + (x_, y_) = tf.tf_promote(x_, y_) + tf.add_input(desc, x_) + tf.add_input(desc, y_) + tf.add_input(desc, mkl_x_) + tf.add_input(desc, mkl_y_) end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end function _mkl_squared_difference_eager(x_, y_, mkl_x_, mkl_y_; name=nothing) desc = tf.EagerOp("_MklSquaredDifference") x_ = convert(tf.EagerTensor, x_) @@ -55720,13 +55720,13 @@ begin return res end end - function _mkl_squared_difference(x_, y_, mkl_x_, mkl_y_; name=nothing) - if tf.in_eager_mode() - _mkl_squared_difference_eager(x_, y_, mkl_x_, mkl_y_; name=name) - else - _mkl_squared_difference_graph(x_, y_, mkl_x_, mkl_y_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _mkl_squared_difference(x_, y_, mkl_x_, mkl_y_; name=nothing) + if tf.in_eager_mode() + _mkl_squared_difference_eager(x_, y_, mkl_x_, mkl_y_; name=name) + else + _mkl_squared_difference_graph(x_, y_, mkl_x_, mkl_y_; name=name) + end end - end end @@ -55736,29 +55736,29 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function conv3d_backprop_filter_graph(input_, filter_, out_backprop_; name=nothing, strides=nothing, padding=nothing, dilations=nothing) - local desc - tf.with_op_name(name, "Conv3DBackpropFilter") do - desc = tf.NodeDescription("Conv3DBackpropFilter") - input_ = convert(Tensor{Any}, input_) - filter_ = convert(Tensor{Any}, filter_) - out_backprop_ = convert(Tensor{Any}, out_backprop_) - (input_, filter_, out_backprop_) = tf.tf_promote(input_, filter_, out_backprop_) - tf.add_input(desc, input_) - tf.add_input(desc, filter_) - tf.add_input(desc, out_backprop_) - if strides !== nothing - desc["strides"] = map(Base.identity, strides) - end - if padding !== nothing - desc["padding"] = Base.String(padding) - end - if dilations !== nothing - desc["dilations"] = map(Base.identity, dilations) - end + function conv3d_backprop_filter_graph(input_, filter_, out_backprop_; name=nothing, strides=nothing, padding=nothing, dilations=nothing) + local desc + tf.with_op_name(name, "Conv3DBackpropFilter") do + desc = tf.NodeDescription("Conv3DBackpropFilter") + input_ = convert(Tensor{Any}, input_) + filter_ = convert(Tensor{Any}, filter_) + out_backprop_ = convert(Tensor{Any}, out_backprop_) + (input_, filter_, out_backprop_) = tf.tf_promote(input_, filter_, out_backprop_) + tf.add_input(desc, input_) + tf.add_input(desc, filter_) + tf.add_input(desc, out_backprop_) + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + if padding !== nothing + desc["padding"] = Base.String(padding) + end + if dilations !== nothing + desc["dilations"] = map(Base.identity, dilations) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function conv3d_backprop_filter_eager(input_, filter_, out_backprop_; name=nothing, strides=nothing, padding=nothing, dilations=nothing) desc = tf.EagerOp("Conv3DBackpropFilter") input_ = convert(tf.EagerTensor, input_) @@ -55786,13 +55786,13 @@ begin return res[1] end end - function conv3d_backprop_filter(input_, filter_, out_backprop_; name=nothing, strides=nothing, padding=nothing, dilations=nothing) - if tf.in_eager_mode() - conv3d_backprop_filter_eager(input_, filter_, out_backprop_; name=name, strides=strides, padding=padding, dilations=dilations) - else - conv3d_backprop_filter_graph(input_, filter_, out_backprop_; name=name, strides=strides, padding=padding, dilations=dilations) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function conv3d_backprop_filter(input_, filter_, out_backprop_; name=nothing, strides=nothing, padding=nothing, dilations=nothing) + if tf.in_eager_mode() + conv3d_backprop_filter_eager(input_, filter_, out_backprop_; name=name, strides=strides, padding=padding, dilations=dilations) + else + conv3d_backprop_filter_graph(input_, filter_, out_backprop_; name=name, strides=strides, padding=padding, dilations=dilations) + end end - end end @@ -55802,33 +55802,33 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function if__graph(cond_, input_; name=nothing, Tin=nothing, Tout=nothing, then_branch=nothing, else_branch=nothing, output_shapes=nothing) - local desc - tf.with_op_name(name, "If") do - desc = tf.NodeDescription("If") - cond_ = convert(Tensor{Any}, cond_) - input_ = [convert(Tensor{Any}, x) for x = input_] - (cond_,) = tf.tf_promote(cond_) - tf.add_input(desc, cond_) - tf.add_input(desc, input_) - if Tin !== nothing - desc["Tin"] = map(Base.identity, Tin) - end - if Tout !== nothing - desc["Tout"] = map(Base.identity, Tout) - end - if then_branch !== nothing - desc["then_branch"] = Base.identity(then_branch) - end - if else_branch !== nothing - desc["else_branch"] = Base.identity(else_branch) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - end - tf.Tensor(tf.Operation(desc)) + function if__graph(cond_, input_; name=nothing, Tin=nothing, Tout=nothing, then_branch=nothing, else_branch=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "If") do + desc = tf.NodeDescription("If") + cond_ = convert(Tensor{Any}, cond_) + input_ = [convert(Tensor{Any}, x) for x = input_] + (cond_,) = tf.tf_promote(cond_) + tf.add_input(desc, cond_) + tf.add_input(desc, input_) + if Tin !== nothing + desc["Tin"] = map(Base.identity, Tin) + end + if Tout !== nothing + desc["Tout"] = map(Base.identity, Tout) + end + if then_branch !== nothing + desc["then_branch"] = Base.identity(then_branch) + end + if else_branch !== nothing + desc["else_branch"] = Base.identity(else_branch) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end end + tf.Tensor(tf.Operation(desc)) + end function if__eager(cond_, input_; name=nothing, Tin=nothing, Tout=nothing, then_branch=nothing, else_branch=nothing, output_shapes=nothing) desc = tf.EagerOp("If") cond_ = convert(tf.EagerTensor, cond_) @@ -55858,13 +55858,13 @@ begin return res[1] end end - function if_(cond_, input_; name=nothing, Tin=nothing, Tout=nothing, then_branch=nothing, else_branch=nothing, output_shapes=nothing) - if tf.in_eager_mode() - if__eager(cond_, input_; name=name, Tin=Tin, Tout=Tout, then_branch=then_branch, else_branch=else_branch, output_shapes=output_shapes) - else - if__graph(cond_, input_; name=name, Tin=Tin, Tout=Tout, then_branch=then_branch, else_branch=else_branch, output_shapes=output_shapes) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function if_(cond_, input_; name=nothing, Tin=nothing, Tout=nothing, then_branch=nothing, else_branch=nothing, output_shapes=nothing) + if tf.in_eager_mode() + if__eager(cond_, input_; name=name, Tin=Tin, Tout=Tout, then_branch=then_branch, else_branch=else_branch, output_shapes=output_shapes) + else + if__graph(cond_, input_; name=name, Tin=Tin, Tout=Tout, then_branch=then_branch, else_branch=else_branch, output_shapes=output_shapes) + end end - end end @@ -55874,29 +55874,29 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function flat_map_dataset_graph(input_dataset_, other_arguments_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) - local desc - tf.with_op_name(name, "FlatMapDataset") do - desc = tf.NodeDescription("FlatMapDataset") - input_dataset_ = convert(Tensor{Any}, input_dataset_) - other_arguments_ = [convert(Tensor{Any}, x) for x = other_arguments_] - tf.add_input(desc, input_dataset_) - tf.add_input(desc, other_arguments_) - if f !== nothing - desc["f"] = Base.identity(f) - end - if Targuments !== nothing - desc["Targuments"] = map(Base.identity, Targuments) - end - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end + function flat_map_dataset_graph(input_dataset_, other_arguments_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "FlatMapDataset") do + desc = tf.NodeDescription("FlatMapDataset") + input_dataset_ = convert(Tensor{Any}, input_dataset_) + other_arguments_ = [convert(Tensor{Any}, x) for x = other_arguments_] + tf.add_input(desc, input_dataset_) + tf.add_input(desc, other_arguments_) + if f !== nothing + desc["f"] = Base.identity(f) + end + if Targuments !== nothing + desc["Targuments"] = map(Base.identity, Targuments) + end + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function flat_map_dataset_eager(input_dataset_, other_arguments_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("FlatMapDataset") input_dataset_ = convert(tf.EagerTensor, input_dataset_) @@ -55922,13 +55922,13 @@ begin return res[1] end end - function flat_map_dataset(input_dataset_, other_arguments_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) - if tf.in_eager_mode() - flat_map_dataset_eager(input_dataset_, other_arguments_; name=name, f=f, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes) - else - flat_map_dataset_graph(input_dataset_, other_arguments_; name=name, f=f, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function flat_map_dataset(input_dataset_, other_arguments_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) + if tf.in_eager_mode() + flat_map_dataset_eager(input_dataset_, other_arguments_; name=name, f=f, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes) + else + flat_map_dataset_graph(input_dataset_, other_arguments_; name=name, f=f, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes) + end end - end end @@ -55938,27 +55938,27 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tensor_list_scatter_graph(tensor_, indices_, element_shape_; name=nothing, element_dtype=nothing, shape_type=nothing) - local desc - tf.with_op_name(name, "TensorListScatter") do - desc = tf.NodeDescription("TensorListScatter") - tensor_ = convert(Tensor{Any}, tensor_) - indices_ = convert(Tensor{Int32}, indices_) - element_shape_ = convert(Tensor{Any}, element_shape_) - (tensor_,) = tf.tf_promote(tensor_) - (element_shape_,) = tf.tf_promote(element_shape_) - tf.add_input(desc, tensor_) - tf.add_input(desc, indices_) - tf.add_input(desc, element_shape_) - if element_dtype !== nothing - desc["element_dtype"] = Base.identity(element_dtype) - end - if shape_type !== nothing - desc["shape_type"] = Base.identity(shape_type) - end + function tensor_list_scatter_graph(tensor_, indices_, element_shape_; name=nothing, element_dtype=nothing, shape_type=nothing) + local desc + tf.with_op_name(name, "TensorListScatter") do + desc = tf.NodeDescription("TensorListScatter") + tensor_ = convert(Tensor{Any}, tensor_) + indices_ = convert(Tensor{Int32}, indices_) + element_shape_ = convert(Tensor{Any}, element_shape_) + (tensor_,) = tf.tf_promote(tensor_) + (element_shape_,) = tf.tf_promote(element_shape_) + tf.add_input(desc, tensor_) + tf.add_input(desc, indices_) + tf.add_input(desc, element_shape_) + if element_dtype !== nothing + desc["element_dtype"] = Base.identity(element_dtype) + end + if shape_type !== nothing + desc["shape_type"] = Base.identity(shape_type) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function tensor_list_scatter_eager(tensor_, indices_, element_shape_; name=nothing, element_dtype=nothing, shape_type=nothing) desc = tf.EagerOp("TensorListScatter") tensor_ = convert(tf.EagerTensor, tensor_) @@ -55982,13 +55982,13 @@ begin return res[1] end end - function tensor_list_scatter(tensor_, indices_, element_shape_; name=nothing, element_dtype=nothing, shape_type=nothing) - if tf.in_eager_mode() - tensor_list_scatter_eager(tensor_, indices_, element_shape_; name=name, element_dtype=element_dtype, shape_type=shape_type) - else - tensor_list_scatter_graph(tensor_, indices_, element_shape_; name=name, element_dtype=element_dtype, shape_type=shape_type) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_list_scatter(tensor_, indices_, element_shape_; name=nothing, element_dtype=nothing, shape_type=nothing) + if tf.in_eager_mode() + tensor_list_scatter_eager(tensor_, indices_, element_shape_; name=name, element_dtype=element_dtype, shape_type=shape_type) + else + tensor_list_scatter_graph(tensor_, indices_, element_shape_; name=name, element_dtype=element_dtype, shape_type=shape_type) + end end - end end @@ -55998,18 +55998,18 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function softsign_grad_graph(gradients_, features_; name=nothing) - local desc - tf.with_op_name(name, "SoftsignGrad") do - desc = tf.NodeDescription("SoftsignGrad") - gradients_ = convert(Tensor{Any}, gradients_) - features_ = convert(Tensor{Any}, features_) - (gradients_, features_) = tf.tf_promote(gradients_, features_) - tf.add_input(desc, gradients_) - tf.add_input(desc, features_) - end - tf.Tensor(tf.Operation(desc)) + function softsign_grad_graph(gradients_, features_; name=nothing) + local desc + tf.with_op_name(name, "SoftsignGrad") do + desc = tf.NodeDescription("SoftsignGrad") + gradients_ = convert(Tensor{Any}, gradients_) + features_ = convert(Tensor{Any}, features_) + (gradients_, features_) = tf.tf_promote(gradients_, features_) + tf.add_input(desc, gradients_) + tf.add_input(desc, features_) end + tf.Tensor(tf.Operation(desc)) + end function softsign_grad_eager(gradients_, features_; name=nothing) desc = tf.EagerOp("SoftsignGrad") gradients_ = convert(tf.EagerTensor, gradients_) @@ -56025,13 +56025,13 @@ begin return res[1] end end - function softsign_grad(gradients_, features_; name=nothing) - if tf.in_eager_mode() - softsign_grad_eager(gradients_, features_; name=name) - else - softsign_grad_graph(gradients_, features_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function softsign_grad(gradients_, features_; name=nothing) + if tf.in_eager_mode() + softsign_grad_eager(gradients_, features_; name=name) + else + softsign_grad_graph(gradients_, features_; name=name) + end end - end end @@ -56041,22 +56041,22 @@ end Copy Host Op. """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function copy_host_graph(input_; name=nothing, tensor_name=nothing, debug_ops_spec=nothing) - local desc - tf.with_op_name(name, "CopyHost") do - desc = tf.NodeDescription("CopyHost") - input_ = convert(Tensor{Any}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - if tensor_name !== nothing - desc["tensor_name"] = Base.String(tensor_name) - end - if debug_ops_spec !== nothing - desc["debug_ops_spec"] = map(Base.identity, debug_ops_spec) - end + function copy_host_graph(input_; name=nothing, tensor_name=nothing, debug_ops_spec=nothing) + local desc + tf.with_op_name(name, "CopyHost") do + desc = tf.NodeDescription("CopyHost") + input_ = convert(Tensor{Any}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + if tensor_name !== nothing + desc["tensor_name"] = Base.String(tensor_name) + end + if debug_ops_spec !== nothing + desc["debug_ops_spec"] = map(Base.identity, debug_ops_spec) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function copy_host_eager(input_; name=nothing, tensor_name=nothing, debug_ops_spec=nothing) desc = tf.EagerOp("CopyHost") input_ = convert(tf.EagerTensor, input_) @@ -56075,13 +56075,13 @@ begin return res[1] end end - function copy_host(input_; name=nothing, tensor_name=nothing, debug_ops_spec=nothing) - if tf.in_eager_mode() - copy_host_eager(input_; name=name, tensor_name=tensor_name, debug_ops_spec=debug_ops_spec) - else - copy_host_graph(input_; name=name, tensor_name=tensor_name, debug_ops_spec=debug_ops_spec) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function copy_host(input_; name=nothing, tensor_name=nothing, debug_ops_spec=nothing) + if tf.in_eager_mode() + copy_host_eager(input_; name=name, tensor_name=tensor_name, debug_ops_spec=debug_ops_spec) + else + copy_host_graph(input_; name=name, tensor_name=tensor_name, debug_ops_spec=debug_ops_spec) + end end - end end @@ -56091,22 +56091,22 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function lin_space_graph(start_, stop_, num_; name=nothing) - local desc - tf.with_op_name(name, "LinSpace") do - desc = tf.NodeDescription("LinSpace") - start_ = convert(Tensor{Any}, start_) - stop_ = convert(Tensor{Any}, stop_) - num_ = convert(Tensor{Int32}, num_) - num_ = num_ - convert(tf.Tensor{eltype(num_)}, 1) - (start_, stop_) = tf.tf_promote(start_, stop_) - (num_,) = tf.tf_promote(num_) - tf.add_input(desc, start_) - tf.add_input(desc, stop_) - tf.add_input(desc, num_) - end - tf.Tensor(tf.Operation(desc)) + function lin_space_graph(start_, stop_, num_; name=nothing) + local desc + tf.with_op_name(name, "LinSpace") do + desc = tf.NodeDescription("LinSpace") + start_ = convert(Tensor{Any}, start_) + stop_ = convert(Tensor{Any}, stop_) + num_ = convert(Tensor{Int32}, num_) + num_ = num_ - convert(tf.Tensor{eltype(num_)}, 1) + (start_, stop_) = tf.tf_promote(start_, stop_) + (num_,) = tf.tf_promote(num_) + tf.add_input(desc, start_) + tf.add_input(desc, stop_) + tf.add_input(desc, num_) end + tf.Tensor(tf.Operation(desc)) + end function lin_space_eager(start_, stop_, num_; name=nothing) desc = tf.EagerOp("LinSpace") start_ = convert(tf.EagerTensor, start_) @@ -56125,13 +56125,13 @@ begin return res[1] end end - function lin_space(start_, stop_, num_; name=nothing) - if tf.in_eager_mode() - lin_space_eager(start_, stop_, num_; name=name) - else - lin_space_graph(start_, stop_, num_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function lin_space(start_, stop_, num_; name=nothing) + if tf.in_eager_mode() + lin_space_eager(start_, stop_, num_; name=name) + else + lin_space_graph(start_, stop_, num_; name=name) + end end - end end @@ -56141,21 +56141,21 @@ end Updates input `value` at `loc` with `update`. """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function _parallel_concat_update_graph(value_, update_; name=nothing, loc=nothing) - local desc - tf.with_op_name(name, "_ParallelConcatUpdate") do - desc = tf.NodeDescription("_ParallelConcatUpdate") - value_ = convert(Tensor{Any}, value_) - update_ = convert(Tensor{Any}, update_) - (value_, update_) = tf.tf_promote(value_, update_) - tf.add_input(desc, value_) - tf.add_input(desc, update_) - if loc !== nothing - desc["loc"] = Base.Int(loc) - end + function _parallel_concat_update_graph(value_, update_; name=nothing, loc=nothing) + local desc + tf.with_op_name(name, "_ParallelConcatUpdate") do + desc = tf.NodeDescription("_ParallelConcatUpdate") + value_ = convert(Tensor{Any}, value_) + update_ = convert(Tensor{Any}, update_) + (value_, update_) = tf.tf_promote(value_, update_) + tf.add_input(desc, value_) + tf.add_input(desc, update_) + if loc !== nothing + desc["loc"] = Base.Int(loc) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function _parallel_concat_update_eager(value_, update_; name=nothing, loc=nothing) desc = tf.EagerOp("_ParallelConcatUpdate") value_ = convert(tf.EagerTensor, value_) @@ -56174,13 +56174,13 @@ begin return res[1] end end - function _parallel_concat_update(value_, update_; name=nothing, loc=nothing) - if tf.in_eager_mode() - _parallel_concat_update_eager(value_, update_; name=name, loc=loc) - else - _parallel_concat_update_graph(value_, update_; name=name, loc=loc) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _parallel_concat_update(value_, update_; name=nothing, loc=nothing) + if tf.in_eager_mode() + _parallel_concat_update_eager(value_, update_; name=name, loc=loc) + else + _parallel_concat_update_graph(value_, update_; name=name, loc=loc) + end end - end end @@ -56190,19 +56190,19 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function stack_graph(; name=nothing, elem_type=nothing, stack_name=nothing) - local desc - tf.with_op_name(name, "Stack") do - desc = tf.NodeDescription("Stack") - if elem_type !== nothing - desc["elem_type"] = Base.identity(elem_type) - end - if stack_name !== nothing - desc["stack_name"] = Base.String(stack_name) - end + function stack_graph(; name=nothing, elem_type=nothing, stack_name=nothing) + local desc + tf.with_op_name(name, "Stack") do + desc = tf.NodeDescription("Stack") + if elem_type !== nothing + desc["elem_type"] = Base.identity(elem_type) + end + if stack_name !== nothing + desc["stack_name"] = Base.String(stack_name) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function stack_eager(; name=nothing, elem_type=nothing, stack_name=nothing) desc = tf.EagerOp("Stack") if elem_type !== nothing @@ -56218,13 +56218,13 @@ begin return res[1] end end - function stack(; name=nothing, elem_type=nothing, stack_name=nothing) - if tf.in_eager_mode() - stack_eager(; name=name, elem_type=elem_type, stack_name=stack_name) - else - stack_graph(; name=name, elem_type=elem_type, stack_name=stack_name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function stack(; name=nothing, elem_type=nothing, stack_name=nothing) + if tf.in_eager_mode() + stack_eager(; name=name, elem_type=elem_type, stack_name=stack_name) + else + stack_graph(; name=name, elem_type=elem_type, stack_name=stack_name) + end end - end end @@ -56234,21 +56234,21 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function stack_push_v2_graph(handle_, elem_; name=nothing, swap_memory=nothing) - local desc - tf.with_op_name(name, "StackPushV2") do - desc = tf.NodeDescription("StackPushV2") - handle_ = convert(Tensor{Any}, handle_) - elem_ = convert(Tensor{Any}, elem_) - (elem_,) = tf.tf_promote(elem_) - tf.add_input(desc, handle_) - tf.add_input(desc, elem_) - if swap_memory !== nothing - desc["swap_memory"] = Base.Bool(swap_memory) - end + function stack_push_v2_graph(handle_, elem_; name=nothing, swap_memory=nothing) + local desc + tf.with_op_name(name, "StackPushV2") do + desc = tf.NodeDescription("StackPushV2") + handle_ = convert(Tensor{Any}, handle_) + elem_ = convert(Tensor{Any}, elem_) + (elem_,) = tf.tf_promote(elem_) + tf.add_input(desc, handle_) + tf.add_input(desc, elem_) + if swap_memory !== nothing + desc["swap_memory"] = Base.Bool(swap_memory) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function stack_push_v2_eager(handle_, elem_; name=nothing, swap_memory=nothing) desc = tf.EagerOp("StackPushV2") handle_ = convert(tf.EagerTensor, handle_) @@ -56266,13 +56266,13 @@ begin return res[1] end end - function stack_push_v2(handle_, elem_; name=nothing, swap_memory=nothing) - if tf.in_eager_mode() - stack_push_v2_eager(handle_, elem_; name=name, swap_memory=swap_memory) - else - stack_push_v2_graph(handle_, elem_; name=name, swap_memory=swap_memory) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function stack_push_v2(handle_, elem_; name=nothing, swap_memory=nothing) + if tf.in_eager_mode() + stack_push_v2_eager(handle_, elem_; name=name, swap_memory=swap_memory) + else + stack_push_v2_graph(handle_, elem_; name=name, swap_memory=swap_memory) + end end - end end @@ -56282,21 +56282,21 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function assign_variable_op_graph(resource_, value_; name=nothing, dtype=nothing) - local desc - tf.with_op_name(name, "AssignVariableOp") do - desc = tf.NodeDescription("AssignVariableOp") - resource_ = convert(Tensor{Any}, resource_) - value_ = convert(Tensor{Any}, value_) - (value_,) = tf.tf_promote(value_) - tf.add_input(desc, resource_) - tf.add_input(desc, value_) - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end + function assign_variable_op_graph(resource_, value_; name=nothing, dtype=nothing) + local desc + tf.with_op_name(name, "AssignVariableOp") do + desc = tf.NodeDescription("AssignVariableOp") + resource_ = convert(Tensor{Any}, resource_) + value_ = convert(Tensor{Any}, value_) + (value_,) = tf.tf_promote(value_) + tf.add_input(desc, resource_) + tf.add_input(desc, value_) + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function assign_variable_op_eager(resource_, value_; name=nothing, dtype=nothing) desc = tf.EagerOp("AssignVariableOp") resource_ = convert(tf.EagerTensor, resource_) @@ -56314,13 +56314,13 @@ begin return res[1] end end - function assign_variable_op(resource_, value_; name=nothing, dtype=nothing) - if tf.in_eager_mode() - assign_variable_op_eager(resource_, value_; name=name, dtype=dtype) - else - assign_variable_op_graph(resource_, value_; name=name, dtype=dtype) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function assign_variable_op(resource_, value_; name=nothing, dtype=nothing) + if tf.in_eager_mode() + assign_variable_op_eager(resource_, value_; name=name, dtype=dtype) + else + assign_variable_op_graph(resource_, value_; name=name, dtype=dtype) + end end - end end @@ -56330,31 +56330,31 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function sparse_split_graph(split_dim_, indices_, values_, shape_; name=nothing, num_split=nothing) - local desc - tf.with_op_name(name, "SparseSplit") do - desc = tf.NodeDescription("SparseSplit") - split_dim_ = convert(Tensor{Int64}, split_dim_) - split_dim_ = split_dim_ - convert(tf.Tensor{eltype(split_dim_)}, 1) - indices_ = convert(Tensor{Int64}, indices_) - values_ = convert(Tensor{Any}, values_) - shape_ = convert(Tensor{Int64}, shape_) - (values_,) = tf.tf_promote(values_) - tf.add_input(desc, split_dim_) - tf.add_input(desc, indices_) - tf.add_input(desc, values_) - tf.add_input(desc, shape_) - if num_split !== nothing - desc["num_split"] = Base.Int(num_split) - end + function sparse_split_graph(split_dim_, indices_, values_, shape_; name=nothing, num_split=nothing) + local desc + tf.with_op_name(name, "SparseSplit") do + desc = tf.NodeDescription("SparseSplit") + split_dim_ = convert(Tensor{Int64}, split_dim_) + split_dim_ = split_dim_ - convert(tf.Tensor{eltype(split_dim_)}, 1) + indices_ = convert(Tensor{Int64}, indices_) + values_ = convert(Tensor{Any}, values_) + shape_ = convert(Tensor{Int64}, shape_) + (values_,) = tf.tf_promote(values_) + tf.add_input(desc, split_dim_) + tf.add_input(desc, indices_) + tf.add_input(desc, values_) + tf.add_input(desc, shape_) + if num_split !== nothing + desc["num_split"] = Base.Int(num_split) end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:3 - push!(out, tf.Tensor(op, out_idx)) - end - out end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end function sparse_split_eager(split_dim_, indices_, values_, shape_; name=nothing, num_split=nothing) desc = tf.EagerOp("SparseSplit") split_dim_ = convert(tf.EagerTensor, split_dim_) @@ -56376,13 +56376,13 @@ begin return res end end - function sparse_split(split_dim_, indices_, values_, shape_; name=nothing, num_split=nothing) - if tf.in_eager_mode() - sparse_split_eager(split_dim_, indices_, values_, shape_; name=name, num_split=num_split) - else - sparse_split_graph(split_dim_, indices_, values_, shape_; name=name, num_split=num_split) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_split(split_dim_, indices_, values_, shape_; name=nothing, num_split=nothing) + if tf.in_eager_mode() + sparse_split_eager(split_dim_, indices_, values_, shape_; name=name, num_split=num_split) + else + sparse_split_graph(split_dim_, indices_, values_, shape_; name=name, num_split=num_split) + end end - end end @@ -56392,20 +56392,20 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tensor_array_unpack_graph(handle_, value_, flow_in_; name=nothing) - local desc - tf.with_op_name(name, "TensorArrayUnpack") do - desc = tf.NodeDescription("TensorArrayUnpack") - handle_ = convert(Tensor{String}, handle_) - value_ = convert(Tensor{Any}, value_) - flow_in_ = convert(Tensor{Float32}, flow_in_) - (value_,) = tf.tf_promote(value_) - tf.add_input(desc, handle_) - tf.add_input(desc, value_) - tf.add_input(desc, flow_in_) - end - tf.Tensor(tf.Operation(desc)) + function tensor_array_unpack_graph(handle_, value_, flow_in_; name=nothing) + local desc + tf.with_op_name(name, "TensorArrayUnpack") do + desc = tf.NodeDescription("TensorArrayUnpack") + handle_ = convert(Tensor{String}, handle_) + value_ = convert(Tensor{Any}, value_) + flow_in_ = convert(Tensor{Float32}, flow_in_) + (value_,) = tf.tf_promote(value_) + tf.add_input(desc, handle_) + tf.add_input(desc, value_) + tf.add_input(desc, flow_in_) end + tf.Tensor(tf.Operation(desc)) + end function tensor_array_unpack_eager(handle_, value_, flow_in_; name=nothing) desc = tf.EagerOp("TensorArrayUnpack") handle_ = convert(tf.EagerTensor, handle_) @@ -56422,13 +56422,13 @@ begin return res[1] end end - function tensor_array_unpack(handle_, value_, flow_in_; name=nothing) - if tf.in_eager_mode() - tensor_array_unpack_eager(handle_, value_, flow_in_; name=name) - else - tensor_array_unpack_graph(handle_, value_, flow_in_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_array_unpack(handle_, value_, flow_in_; name=nothing) + if tf.in_eager_mode() + tensor_array_unpack_eager(handle_, value_, flow_in_; name=name) + else + tensor_array_unpack_graph(handle_, value_, flow_in_; name=name) + end end - end end @@ -56438,21 +56438,21 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tensor_list_stack_graph(input_handle_; name=nothing, element_dtype=nothing, num_elements=nothing) - local desc - tf.with_op_name(name, "TensorListStack") do - desc = tf.NodeDescription("TensorListStack") - input_handle_ = convert(Tensor{Any}, input_handle_) - tf.add_input(desc, input_handle_) - if element_dtype !== nothing - desc["element_dtype"] = Base.identity(element_dtype) - end - if num_elements !== nothing - desc["num_elements"] = Base.Int(num_elements) - end + function tensor_list_stack_graph(input_handle_; name=nothing, element_dtype=nothing, num_elements=nothing) + local desc + tf.with_op_name(name, "TensorListStack") do + desc = tf.NodeDescription("TensorListStack") + input_handle_ = convert(Tensor{Any}, input_handle_) + tf.add_input(desc, input_handle_) + if element_dtype !== nothing + desc["element_dtype"] = Base.identity(element_dtype) + end + if num_elements !== nothing + desc["num_elements"] = Base.Int(num_elements) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function tensor_list_stack_eager(input_handle_; name=nothing, element_dtype=nothing, num_elements=nothing) desc = tf.EagerOp("TensorListStack") input_handle_ = convert(tf.EagerTensor, input_handle_) @@ -56470,13 +56470,13 @@ begin return res[1] end end - function tensor_list_stack(input_handle_; name=nothing, element_dtype=nothing, num_elements=nothing) - if tf.in_eager_mode() - tensor_list_stack_eager(input_handle_; name=name, element_dtype=element_dtype, num_elements=num_elements) - else - tensor_list_stack_graph(input_handle_; name=name, element_dtype=element_dtype, num_elements=num_elements) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_list_stack(input_handle_; name=nothing, element_dtype=nothing, num_elements=nothing) + if tf.in_eager_mode() + tensor_list_stack_eager(input_handle_; name=name, element_dtype=element_dtype, num_elements=num_elements) + else + tensor_list_stack_graph(input_handle_; name=name, element_dtype=element_dtype, num_elements=num_elements) + end end - end end @@ -56486,15 +56486,15 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function barrier_incomplete_size_graph(handle_; name=nothing) - local desc - tf.with_op_name(name, "BarrierIncompleteSize") do - desc = tf.NodeDescription("BarrierIncompleteSize") - handle_ = convert(Tensor{String}, handle_) - tf.add_input(desc, handle_) - end - tf.Tensor(tf.Operation(desc)) + function barrier_incomplete_size_graph(handle_; name=nothing) + local desc + tf.with_op_name(name, "BarrierIncompleteSize") do + desc = tf.NodeDescription("BarrierIncompleteSize") + handle_ = convert(Tensor{String}, handle_) + tf.add_input(desc, handle_) end + tf.Tensor(tf.Operation(desc)) + end function barrier_incomplete_size_eager(handle_; name=nothing) desc = tf.EagerOp("BarrierIncompleteSize") handle_ = convert(tf.EagerTensor, handle_) @@ -56506,13 +56506,13 @@ begin return res[1] end end - function barrier_incomplete_size(handle_; name=nothing) - if tf.in_eager_mode() - barrier_incomplete_size_eager(handle_; name=name) - else - barrier_incomplete_size_graph(handle_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function barrier_incomplete_size(handle_; name=nothing) + if tf.in_eager_mode() + barrier_incomplete_size_eager(handle_; name=name) + else + barrier_incomplete_size_graph(handle_; name=name) + end end - end end @@ -56522,23 +56522,23 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function restore_graph(file_pattern_, tensor_name_; name=nothing, dt=nothing, preferred_shard=nothing) - local desc - tf.with_op_name(name, "Restore") do - desc = tf.NodeDescription("Restore") - file_pattern_ = convert(Tensor{String}, file_pattern_) - tensor_name_ = convert(Tensor{String}, tensor_name_) - tf.add_input(desc, file_pattern_) - tf.add_input(desc, tensor_name_) - if dt !== nothing - desc["dt"] = Base.identity(dt) - end - if preferred_shard !== nothing - desc["preferred_shard"] = Base.Int(preferred_shard) - end + function restore_graph(file_pattern_, tensor_name_; name=nothing, dt=nothing, preferred_shard=nothing) + local desc + tf.with_op_name(name, "Restore") do + desc = tf.NodeDescription("Restore") + file_pattern_ = convert(Tensor{String}, file_pattern_) + tensor_name_ = convert(Tensor{String}, tensor_name_) + tf.add_input(desc, file_pattern_) + tf.add_input(desc, tensor_name_) + if dt !== nothing + desc["dt"] = Base.identity(dt) + end + if preferred_shard !== nothing + desc["preferred_shard"] = Base.Int(preferred_shard) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function restore_eager(file_pattern_, tensor_name_; name=nothing, dt=nothing, preferred_shard=nothing) desc = tf.EagerOp("Restore") file_pattern_ = convert(tf.EagerTensor, file_pattern_) @@ -56558,13 +56558,13 @@ begin return res[1] end end - function restore(file_pattern_, tensor_name_; name=nothing, dt=nothing, preferred_shard=nothing) - if tf.in_eager_mode() - restore_eager(file_pattern_, tensor_name_; name=name, dt=dt, preferred_shard=preferred_shard) - else - restore_graph(file_pattern_, tensor_name_; name=name, dt=dt, preferred_shard=preferred_shard) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function restore(file_pattern_, tensor_name_; name=nothing, dt=nothing, preferred_shard=nothing) + if tf.in_eager_mode() + restore_eager(file_pattern_, tensor_name_; name=name, dt=dt, preferred_shard=preferred_shard) + else + restore_graph(file_pattern_, tensor_name_; name=name, dt=dt, preferred_shard=preferred_shard) + end end - end end @@ -56574,38 +56574,38 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tensor_array_v3_graph(size_; name=nothing, dtype=nothing, element_shape=nothing, dynamic_size=nothing, clear_after_read=nothing, identical_element_shapes=nothing, tensor_array_name=nothing) - local desc - tf.with_op_name(name, "TensorArrayV3") do - desc = tf.NodeDescription("TensorArrayV3") - size_ = convert(Tensor{Int32}, size_) - tf.add_input(desc, size_) - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end - if element_shape !== nothing - desc["element_shape"] = Base.identity(element_shape) - end - if dynamic_size !== nothing - desc["dynamic_size"] = Base.Bool(dynamic_size) - end - if clear_after_read !== nothing - desc["clear_after_read"] = Base.Bool(clear_after_read) - end - if identical_element_shapes !== nothing - desc["identical_element_shapes"] = Base.Bool(identical_element_shapes) - end - if tensor_array_name !== nothing - desc["tensor_array_name"] = Base.String(tensor_array_name) - end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:2 - push!(out, tf.Tensor(op, out_idx)) - end - out + function tensor_array_v3_graph(size_; name=nothing, dtype=nothing, element_shape=nothing, dynamic_size=nothing, clear_after_read=nothing, identical_element_shapes=nothing, tensor_array_name=nothing) + local desc + tf.with_op_name(name, "TensorArrayV3") do + desc = tf.NodeDescription("TensorArrayV3") + size_ = convert(Tensor{Int32}, size_) + tf.add_input(desc, size_) + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + if element_shape !== nothing + desc["element_shape"] = Base.identity(element_shape) + end + if dynamic_size !== nothing + desc["dynamic_size"] = Base.Bool(dynamic_size) + end + if clear_after_read !== nothing + desc["clear_after_read"] = Base.Bool(clear_after_read) + end + if identical_element_shapes !== nothing + desc["identical_element_shapes"] = Base.Bool(identical_element_shapes) + end + if tensor_array_name !== nothing + desc["tensor_array_name"] = Base.String(tensor_array_name) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) end + out + end function tensor_array_v3_eager(size_; name=nothing, dtype=nothing, element_shape=nothing, dynamic_size=nothing, clear_after_read=nothing, identical_element_shapes=nothing, tensor_array_name=nothing) desc = tf.EagerOp("TensorArrayV3") size_ = convert(tf.EagerTensor, size_) @@ -56635,13 +56635,13 @@ begin return res end end - function tensor_array_v3(size_; name=nothing, dtype=nothing, element_shape=nothing, dynamic_size=nothing, clear_after_read=nothing, identical_element_shapes=nothing, tensor_array_name=nothing) - if tf.in_eager_mode() - tensor_array_v3_eager(size_; name=name, dtype=dtype, element_shape=element_shape, dynamic_size=dynamic_size, clear_after_read=clear_after_read, identical_element_shapes=identical_element_shapes, tensor_array_name=tensor_array_name) - else - tensor_array_v3_graph(size_; name=name, dtype=dtype, element_shape=element_shape, dynamic_size=dynamic_size, clear_after_read=clear_after_read, identical_element_shapes=identical_element_shapes, tensor_array_name=tensor_array_name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_array_v3(size_; name=nothing, dtype=nothing, element_shape=nothing, dynamic_size=nothing, clear_after_read=nothing, identical_element_shapes=nothing, tensor_array_name=nothing) + if tf.in_eager_mode() + tensor_array_v3_eager(size_; name=name, dtype=dtype, element_shape=element_shape, dynamic_size=dynamic_size, clear_after_read=clear_after_read, identical_element_shapes=identical_element_shapes, tensor_array_name=tensor_array_name) + else + tensor_array_v3_graph(size_; name=name, dtype=dtype, element_shape=element_shape, dynamic_size=dynamic_size, clear_after_read=clear_after_read, identical_element_shapes=identical_element_shapes, tensor_array_name=tensor_array_name) + end end - end end @@ -56651,23 +56651,23 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function experimental_assert_next_dataset_graph(input_dataset_, transformations_; name=nothing, output_types=nothing, output_shapes=nothing) - local desc - tf.with_op_name(name, "ExperimentalAssertNextDataset") do - desc = tf.NodeDescription("ExperimentalAssertNextDataset") - input_dataset_ = convert(Tensor{Any}, input_dataset_) - transformations_ = convert(Tensor{String}, transformations_) - tf.add_input(desc, input_dataset_) - tf.add_input(desc, transformations_) - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end + function experimental_assert_next_dataset_graph(input_dataset_, transformations_; name=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "ExperimentalAssertNextDataset") do + desc = tf.NodeDescription("ExperimentalAssertNextDataset") + input_dataset_ = convert(Tensor{Any}, input_dataset_) + transformations_ = convert(Tensor{String}, transformations_) + tf.add_input(desc, input_dataset_) + tf.add_input(desc, transformations_) + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function experimental_assert_next_dataset_eager(input_dataset_, transformations_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("ExperimentalAssertNextDataset") input_dataset_ = convert(tf.EagerTensor, input_dataset_) @@ -56687,13 +56687,13 @@ begin return res[1] end end - function experimental_assert_next_dataset(input_dataset_, transformations_; name=nothing, output_types=nothing, output_shapes=nothing) - if tf.in_eager_mode() - experimental_assert_next_dataset_eager(input_dataset_, transformations_; name=name, output_types=output_types, output_shapes=output_shapes) - else - experimental_assert_next_dataset_graph(input_dataset_, transformations_; name=name, output_types=output_types, output_shapes=output_shapes) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_assert_next_dataset(input_dataset_, transformations_; name=nothing, output_types=nothing, output_shapes=nothing) + if tf.in_eager_mode() + experimental_assert_next_dataset_eager(input_dataset_, transformations_; name=name, output_types=output_types, output_shapes=output_shapes) + else + experimental_assert_next_dataset_graph(input_dataset_, transformations_; name=name, output_types=output_types, output_shapes=output_shapes) + end end - end end @@ -56703,21 +56703,21 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function in_top_k_graph(predictions_, targets_; name=nothing, k=nothing) - local desc - tf.with_op_name(name, "InTopK") do - desc = tf.NodeDescription("InTopK") - predictions_ = convert(Tensor{Float32}, predictions_) - targets_ = convert(Tensor{Int32}, targets_) - (targets_,) = tf.tf_promote(targets_) - tf.add_input(desc, predictions_) - tf.add_input(desc, targets_) - if k !== nothing - desc["k"] = Base.Int(k) - end + function in_top_k_graph(predictions_, targets_; name=nothing, k=nothing) + local desc + tf.with_op_name(name, "InTopK") do + desc = tf.NodeDescription("InTopK") + predictions_ = convert(Tensor{Float32}, predictions_) + targets_ = convert(Tensor{Int32}, targets_) + (targets_,) = tf.tf_promote(targets_) + tf.add_input(desc, predictions_) + tf.add_input(desc, targets_) + if k !== nothing + desc["k"] = Base.Int(k) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function in_top_k_eager(predictions_, targets_; name=nothing, k=nothing) desc = tf.EagerOp("InTopK") predictions_ = convert(tf.EagerTensor, predictions_) @@ -56735,13 +56735,13 @@ begin return res[1] end end - function in_top_k(predictions_, targets_; name=nothing, k=nothing) - if tf.in_eager_mode() - in_top_k_eager(predictions_, targets_; name=name, k=k) - else - in_top_k_graph(predictions_, targets_; name=name, k=k) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function in_top_k(predictions_, targets_; name=nothing, k=nothing) + if tf.in_eager_mode() + in_top_k_eager(predictions_, targets_; name=name, k=k) + else + in_top_k_graph(predictions_, targets_; name=name, k=k) + end end - end end @@ -56751,25 +56751,25 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function scatter_sub_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) - local desc - tf.with_op_name(name, "ScatterSub") do - desc = tf.NodeDescription("ScatterSub") - ref_ = convert(Tensor{Any}, ref_) - indices_ = convert(Tensor{Any}, indices_) - indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) - updates_ = convert(Tensor{Any}, updates_) - (ref_, updates_) = tf.tf_promote(ref_, updates_) - (indices_,) = tf.tf_promote(indices_) - tf.add_input(desc, ref_) - tf.add_input(desc, indices_) - tf.add_input(desc, updates_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end + function scatter_sub_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "ScatterSub") do + desc = tf.NodeDescription("ScatterSub") + ref_ = convert(Tensor{Any}, ref_) + indices_ = convert(Tensor{Any}, indices_) + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + updates_ = convert(Tensor{Any}, updates_) + (ref_, updates_) = tf.tf_promote(ref_, updates_) + (indices_,) = tf.tf_promote(indices_) + tf.add_input(desc, ref_) + tf.add_input(desc, indices_) + tf.add_input(desc, updates_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function scatter_sub_eager(ref_, indices_, updates_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ScatterSub") ref_ = convert(tf.EagerTensor, ref_) @@ -56791,13 +56791,13 @@ begin return res[1] end end - function scatter_sub(ref_, indices_, updates_; name=nothing, use_locking=nothing) - if tf.in_eager_mode() - scatter_sub_eager(ref_, indices_, updates_; name=name, use_locking=use_locking) - else - scatter_sub_graph(ref_, indices_, updates_; name=name, use_locking=use_locking) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function scatter_sub(ref_, indices_, updates_; name=nothing, use_locking=nothing) + if tf.in_eager_mode() + scatter_sub_eager(ref_, indices_, updates_; name=name, use_locking=use_locking) + else + scatter_sub_graph(ref_, indices_, updates_; name=name, use_locking=use_locking) + end end - end end @@ -56807,16 +56807,16 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function acosh_graph(x_; name=nothing) - local desc - tf.with_op_name(name, "Acosh") do - desc = tf.NodeDescription("Acosh") - x_ = convert(Tensor{Any}, x_) - (x_,) = tf.tf_promote(x_) - tf.add_input(desc, x_) - end - tf.Tensor(tf.Operation(desc)) + function acosh_graph(x_; name=nothing) + local desc + tf.with_op_name(name, "Acosh") do + desc = tf.NodeDescription("Acosh") + x_ = convert(Tensor{Any}, x_) + (x_,) = tf.tf_promote(x_) + tf.add_input(desc, x_) end + tf.Tensor(tf.Operation(desc)) + end function acosh_eager(x_; name=nothing) desc = tf.EagerOp("Acosh") x_ = convert(tf.EagerTensor, x_) @@ -56829,48 +56829,48 @@ begin return res[1] end end - function acosh(x_; name=nothing) - if tf.in_eager_mode() - acosh_eager(x_; name=name) - else - acosh_graph(x_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function acosh(x_; name=nothing) + if tf.in_eager_mode() + acosh_eager(x_; name=name) + else + acosh_graph(x_; name=name) + end end - end end """ - depthwise_conv2d_native_backprop_filter(input, filter_sizes, out_backprop; data_format=NHWC, dilations=[1, 1, 1, 1]) + depthwise_conv2d_native_backprop_filter(input, filter_sizes, out_backprop; data_format=, dilations=[1, 1, 1, 1]) """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function depthwise_conv2d_native_backprop_filter_graph(input_, filter_sizes_, out_backprop_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) - local desc - tf.with_op_name(name, "DepthwiseConv2dNativeBackpropFilter") do - desc = tf.NodeDescription("DepthwiseConv2dNativeBackpropFilter") - input_ = convert(Tensor{Any}, input_) - filter_sizes_ = convert(Tensor{Int32}, filter_sizes_) - out_backprop_ = convert(Tensor{Any}, out_backprop_) - (input_, out_backprop_) = tf.tf_promote(input_, out_backprop_) - tf.add_input(desc, input_) - tf.add_input(desc, filter_sizes_) - tf.add_input(desc, out_backprop_) - if strides !== nothing - desc["strides"] = map(Base.identity, strides) - end - if padding !== nothing - desc["padding"] = Base.String(padding) - end - if data_format !== nothing - desc["data_format"] = Base.String(data_format) - end - if dilations !== nothing - desc["dilations"] = map(Base.identity, dilations) - end + function depthwise_conv2d_native_backprop_filter_graph(input_, filter_sizes_, out_backprop_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) + local desc + tf.with_op_name(name, "DepthwiseConv2dNativeBackpropFilter") do + desc = tf.NodeDescription("DepthwiseConv2dNativeBackpropFilter") + input_ = convert(Tensor{Any}, input_) + filter_sizes_ = convert(Tensor{Int32}, filter_sizes_) + out_backprop_ = convert(Tensor{Any}, out_backprop_) + (input_, out_backprop_) = tf.tf_promote(input_, out_backprop_) + tf.add_input(desc, input_) + tf.add_input(desc, filter_sizes_) + tf.add_input(desc, out_backprop_) + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + if padding !== nothing + desc["padding"] = Base.String(padding) + end + if data_format !== nothing + desc["data_format"] = Base.String(data_format) + end + if dilations !== nothing + desc["dilations"] = map(Base.identity, dilations) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function depthwise_conv2d_native_backprop_filter_eager(input_, filter_sizes_, out_backprop_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) desc = tf.EagerOp("DepthwiseConv2dNativeBackpropFilter") input_ = convert(tf.EagerTensor, input_) @@ -56900,46 +56900,46 @@ begin return res[1] end end - function depthwise_conv2d_native_backprop_filter(input_, filter_sizes_, out_backprop_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) - if tf.in_eager_mode() - depthwise_conv2d_native_backprop_filter_eager(input_, filter_sizes_, out_backprop_; name=name, strides=strides, padding=padding, data_format=data_format, dilations=dilations) - else - depthwise_conv2d_native_backprop_filter_graph(input_, filter_sizes_, out_backprop_; name=name, strides=strides, padding=padding, data_format=data_format, dilations=dilations) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function depthwise_conv2d_native_backprop_filter(input_, filter_sizes_, out_backprop_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) + if tf.in_eager_mode() + depthwise_conv2d_native_backprop_filter_eager(input_, filter_sizes_, out_backprop_; name=name, strides=strides, padding=padding, data_format=data_format, dilations=dilations) + else + depthwise_conv2d_native_backprop_filter_graph(input_, filter_sizes_, out_backprop_; name=name, strides=strides, padding=padding, data_format=data_format, dilations=dilations) + end end - end end """ - quantize_v2(input, min_range, max_range; mode=MIN_COMBINED, round_mode=HALF_AWAY_FROM_ZERO) + quantize_v2(input, min_range, max_range; mode=, round_mode=) """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function quantize_v2_graph(input_, min_range_, max_range_; name=nothing, mode=nothing, round_mode=nothing) - local desc - tf.with_op_name(name, "QuantizeV2") do - desc = tf.NodeDescription("QuantizeV2") - input_ = convert(Tensor{Float32}, input_) - min_range_ = convert(Tensor{Float32}, min_range_) - max_range_ = convert(Tensor{Float32}, max_range_) - tf.add_input(desc, input_) - tf.add_input(desc, min_range_) - tf.add_input(desc, max_range_) - if mode !== nothing - desc["mode"] = Base.String(mode) - end - if round_mode !== nothing - desc["round_mode"] = Base.String(round_mode) - end + function quantize_v2_graph(input_, min_range_, max_range_; name=nothing, mode=nothing, round_mode=nothing) + local desc + tf.with_op_name(name, "QuantizeV2") do + desc = tf.NodeDescription("QuantizeV2") + input_ = convert(Tensor{Float32}, input_) + min_range_ = convert(Tensor{Float32}, min_range_) + max_range_ = convert(Tensor{Float32}, max_range_) + tf.add_input(desc, input_) + tf.add_input(desc, min_range_) + tf.add_input(desc, max_range_) + if mode !== nothing + desc["mode"] = Base.String(mode) end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:3 - push!(out, tf.Tensor(op, out_idx)) + if round_mode !== nothing + desc["round_mode"] = Base.String(round_mode) end - out end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end function quantize_v2_eager(input_, min_range_, max_range_; name=nothing, mode=nothing, round_mode=nothing) desc = tf.EagerOp("QuantizeV2") input_ = convert(tf.EagerTensor, input_) @@ -56961,13 +56961,13 @@ begin return res end end - function quantize_v2(input_, min_range_, max_range_; name=nothing, mode=nothing, round_mode=nothing) - if tf.in_eager_mode() - quantize_v2_eager(input_, min_range_, max_range_; name=name, mode=mode, round_mode=round_mode) - else - quantize_v2_graph(input_, min_range_, max_range_; name=name, mode=mode, round_mode=round_mode) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function quantize_v2(input_, min_range_, max_range_; name=nothing, mode=nothing, round_mode=nothing) + if tf.in_eager_mode() + quantize_v2_eager(input_, min_range_, max_range_; name=name, mode=mode, round_mode=round_mode) + else + quantize_v2_graph(input_, min_range_, max_range_; name=name, mode=mode, round_mode=round_mode) + end end - end end @@ -56977,25 +56977,25 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function cast_graph(x_; name=nothing, SrcT=nothing, DstT=nothing, Truncate=nothing) - local desc - tf.with_op_name(name, "Cast") do - desc = tf.NodeDescription("Cast") - x_ = convert(Tensor{Any}, x_) - (x_,) = tf.tf_promote(x_) - tf.add_input(desc, x_) - if SrcT !== nothing - desc["SrcT"] = Base.identity(SrcT) - end - if DstT !== nothing - desc["DstT"] = Base.identity(DstT) - end - if Truncate !== nothing - desc["Truncate"] = Base.Bool(Truncate) - end + function cast_graph(x_; name=nothing, SrcT=nothing, DstT=nothing, Truncate=nothing) + local desc + tf.with_op_name(name, "Cast") do + desc = tf.NodeDescription("Cast") + x_ = convert(Tensor{Any}, x_) + (x_,) = tf.tf_promote(x_) + tf.add_input(desc, x_) + if SrcT !== nothing + desc["SrcT"] = Base.identity(SrcT) + end + if DstT !== nothing + desc["DstT"] = Base.identity(DstT) + end + if Truncate !== nothing + desc["Truncate"] = Base.Bool(Truncate) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function cast_eager(x_; name=nothing, SrcT=nothing, DstT=nothing, Truncate=nothing) desc = tf.EagerOp("Cast") x_ = convert(tf.EagerTensor, x_) @@ -57017,13 +57017,13 @@ begin return res[1] end end - function cast(x_; name=nothing, SrcT=nothing, DstT=nothing, Truncate=nothing) - if tf.in_eager_mode() - cast_eager(x_; name=name, SrcT=SrcT, DstT=DstT, Truncate=Truncate) - else - cast_graph(x_; name=name, SrcT=SrcT, DstT=DstT, Truncate=Truncate) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function cast(x_; name=nothing, SrcT=nothing, DstT=nothing, Truncate=nothing) + if tf.in_eager_mode() + cast_eager(x_; name=name, SrcT=SrcT, DstT=DstT, Truncate=Truncate) + else + cast_graph(x_; name=name, SrcT=SrcT, DstT=DstT, Truncate=Truncate) + end end - end end @@ -57033,43 +57033,43 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function generator_dataset_graph(init_func_other_args_, next_func_other_args_, finalize_func_other_args_; name=nothing, init_func=nothing, next_func=nothing, finalize_func=nothing, Tinit_func_args=nothing, Tnext_func_args=nothing, Tfinalize_func_args=nothing, output_types=nothing, output_shapes=nothing) - local desc - tf.with_op_name(name, "GeneratorDataset") do - desc = tf.NodeDescription("GeneratorDataset") - init_func_other_args_ = [convert(Tensor{Any}, x) for x = init_func_other_args_] - next_func_other_args_ = [convert(Tensor{Any}, x) for x = next_func_other_args_] - finalize_func_other_args_ = [convert(Tensor{Any}, x) for x = finalize_func_other_args_] - tf.add_input(desc, init_func_other_args_) - tf.add_input(desc, next_func_other_args_) - tf.add_input(desc, finalize_func_other_args_) - if init_func !== nothing - desc["init_func"] = Base.identity(init_func) - end - if next_func !== nothing - desc["next_func"] = Base.identity(next_func) - end - if finalize_func !== nothing - desc["finalize_func"] = Base.identity(finalize_func) - end - if Tinit_func_args !== nothing - desc["Tinit_func_args"] = map(Base.identity, Tinit_func_args) - end - if Tnext_func_args !== nothing - desc["Tnext_func_args"] = map(Base.identity, Tnext_func_args) - end - if Tfinalize_func_args !== nothing - desc["Tfinalize_func_args"] = map(Base.identity, Tfinalize_func_args) - end - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - end - tf.Tensor(tf.Operation(desc)) + function generator_dataset_graph(init_func_other_args_, next_func_other_args_, finalize_func_other_args_; name=nothing, init_func=nothing, next_func=nothing, finalize_func=nothing, Tinit_func_args=nothing, Tnext_func_args=nothing, Tfinalize_func_args=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "GeneratorDataset") do + desc = tf.NodeDescription("GeneratorDataset") + init_func_other_args_ = [convert(Tensor{Any}, x) for x = init_func_other_args_] + next_func_other_args_ = [convert(Tensor{Any}, x) for x = next_func_other_args_] + finalize_func_other_args_ = [convert(Tensor{Any}, x) for x = finalize_func_other_args_] + tf.add_input(desc, init_func_other_args_) + tf.add_input(desc, next_func_other_args_) + tf.add_input(desc, finalize_func_other_args_) + if init_func !== nothing + desc["init_func"] = Base.identity(init_func) + end + if next_func !== nothing + desc["next_func"] = Base.identity(next_func) + end + if finalize_func !== nothing + desc["finalize_func"] = Base.identity(finalize_func) + end + if Tinit_func_args !== nothing + desc["Tinit_func_args"] = map(Base.identity, Tinit_func_args) + end + if Tnext_func_args !== nothing + desc["Tnext_func_args"] = map(Base.identity, Tnext_func_args) + end + if Tfinalize_func_args !== nothing + desc["Tfinalize_func_args"] = map(Base.identity, Tfinalize_func_args) + end + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end end + tf.Tensor(tf.Operation(desc)) + end function generator_dataset_eager(init_func_other_args_, next_func_other_args_, finalize_func_other_args_; name=nothing, init_func=nothing, next_func=nothing, finalize_func=nothing, Tinit_func_args=nothing, Tnext_func_args=nothing, Tfinalize_func_args=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("GeneratorDataset") init_func_other_args_ = convert(tf.EagerTensor, init_func_other_args_) @@ -57109,13 +57109,13 @@ begin return res[1] end end - function generator_dataset(init_func_other_args_, next_func_other_args_, finalize_func_other_args_; name=nothing, init_func=nothing, next_func=nothing, finalize_func=nothing, Tinit_func_args=nothing, Tnext_func_args=nothing, Tfinalize_func_args=nothing, output_types=nothing, output_shapes=nothing) - if tf.in_eager_mode() - generator_dataset_eager(init_func_other_args_, next_func_other_args_, finalize_func_other_args_; name=name, init_func=init_func, next_func=next_func, finalize_func=finalize_func, Tinit_func_args=Tinit_func_args, Tnext_func_args=Tnext_func_args, Tfinalize_func_args=Tfinalize_func_args, output_types=output_types, output_shapes=output_shapes) - else - generator_dataset_graph(init_func_other_args_, next_func_other_args_, finalize_func_other_args_; name=name, init_func=init_func, next_func=next_func, finalize_func=finalize_func, Tinit_func_args=Tinit_func_args, Tnext_func_args=Tnext_func_args, Tfinalize_func_args=Tfinalize_func_args, output_types=output_types, output_shapes=output_shapes) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function generator_dataset(init_func_other_args_, next_func_other_args_, finalize_func_other_args_; name=nothing, init_func=nothing, next_func=nothing, finalize_func=nothing, Tinit_func_args=nothing, Tnext_func_args=nothing, Tfinalize_func_args=nothing, output_types=nothing, output_shapes=nothing) + if tf.in_eager_mode() + generator_dataset_eager(init_func_other_args_, next_func_other_args_, finalize_func_other_args_; name=name, init_func=init_func, next_func=next_func, finalize_func=finalize_func, Tinit_func_args=Tinit_func_args, Tnext_func_args=Tnext_func_args, Tfinalize_func_args=Tfinalize_func_args, output_types=output_types, output_shapes=output_shapes) + else + generator_dataset_graph(init_func_other_args_, next_func_other_args_, finalize_func_other_args_; name=name, init_func=init_func, next_func=next_func, finalize_func=finalize_func, Tinit_func_args=Tinit_func_args, Tnext_func_args=Tnext_func_args, Tfinalize_func_args=Tfinalize_func_args, output_types=output_types, output_shapes=output_shapes) + end end - end end @@ -57125,15 +57125,15 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tensor_forest_tree_serialize_graph(tree_handle_; name=nothing) - local desc - tf.with_op_name(name, "TensorForestTreeSerialize") do - desc = tf.NodeDescription("TensorForestTreeSerialize") - tree_handle_ = convert(Tensor{Any}, tree_handle_) - tf.add_input(desc, tree_handle_) - end - tf.Tensor(tf.Operation(desc)) + function tensor_forest_tree_serialize_graph(tree_handle_; name=nothing) + local desc + tf.with_op_name(name, "TensorForestTreeSerialize") do + desc = tf.NodeDescription("TensorForestTreeSerialize") + tree_handle_ = convert(Tensor{Any}, tree_handle_) + tf.add_input(desc, tree_handle_) end + tf.Tensor(tf.Operation(desc)) + end function tensor_forest_tree_serialize_eager(tree_handle_; name=nothing) desc = tf.EagerOp("TensorForestTreeSerialize") tree_handle_ = convert(tf.EagerTensor, tree_handle_) @@ -57145,13 +57145,13 @@ begin return res[1] end end - function tensor_forest_tree_serialize(tree_handle_; name=nothing) - if tf.in_eager_mode() - tensor_forest_tree_serialize_eager(tree_handle_; name=name) - else - tensor_forest_tree_serialize_graph(tree_handle_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_forest_tree_serialize(tree_handle_; name=nothing) + if tf.in_eager_mode() + tensor_forest_tree_serialize_eager(tree_handle_; name=name) + else + tensor_forest_tree_serialize_graph(tree_handle_; name=name) + end end - end end @@ -57161,15 +57161,15 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tensor_array_close_v2_graph(handle_; name=nothing) - local desc - tf.with_op_name(name, "TensorArrayCloseV2") do - desc = tf.NodeDescription("TensorArrayCloseV2") - handle_ = convert(Tensor{String}, handle_) - tf.add_input(desc, handle_) - end - tf.Tensor(tf.Operation(desc)) + function tensor_array_close_v2_graph(handle_; name=nothing) + local desc + tf.with_op_name(name, "TensorArrayCloseV2") do + desc = tf.NodeDescription("TensorArrayCloseV2") + handle_ = convert(Tensor{String}, handle_) + tf.add_input(desc, handle_) end + tf.Tensor(tf.Operation(desc)) + end function tensor_array_close_v2_eager(handle_; name=nothing) desc = tf.EagerOp("TensorArrayCloseV2") handle_ = convert(tf.EagerTensor, handle_) @@ -57181,13 +57181,13 @@ begin return res[1] end end - function tensor_array_close_v2(handle_; name=nothing) - if tf.in_eager_mode() - tensor_array_close_v2_eager(handle_; name=name) - else - tensor_array_close_v2_graph(handle_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_array_close_v2(handle_; name=nothing) + if tf.in_eager_mode() + tensor_array_close_v2_eager(handle_; name=name) + else + tensor_array_close_v2_graph(handle_; name=name) + end end - end end @@ -57197,37 +57197,37 @@ end A Reader that outputs rows from a BigQuery table as tensorflow Examples. """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function big_query_reader_graph(; name=nothing, container=nothing, shared_name=nothing, project_id=nothing, dataset_id=nothing, table_id=nothing, columns=nothing, timestamp_millis=nothing, test_end_point=nothing) - local desc - tf.with_op_name(name, "BigQueryReader") do - desc = tf.NodeDescription("BigQueryReader") - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - if project_id !== nothing - desc["project_id"] = Base.String(project_id) - end - if dataset_id !== nothing - desc["dataset_id"] = Base.String(dataset_id) - end - if table_id !== nothing - desc["table_id"] = Base.String(table_id) - end - if columns !== nothing - desc["columns"] = map(Base.identity, columns) - end - if timestamp_millis !== nothing - desc["timestamp_millis"] = Base.Int(timestamp_millis) - end - if test_end_point !== nothing - desc["test_end_point"] = Base.String(test_end_point) - end - end - tf.Tensor(tf.Operation(desc)) + function big_query_reader_graph(; name=nothing, container=nothing, shared_name=nothing, project_id=nothing, dataset_id=nothing, table_id=nothing, columns=nothing, timestamp_millis=nothing, test_end_point=nothing) + local desc + tf.with_op_name(name, "BigQueryReader") do + desc = tf.NodeDescription("BigQueryReader") + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + if project_id !== nothing + desc["project_id"] = Base.String(project_id) + end + if dataset_id !== nothing + desc["dataset_id"] = Base.String(dataset_id) + end + if table_id !== nothing + desc["table_id"] = Base.String(table_id) + end + if columns !== nothing + desc["columns"] = map(Base.identity, columns) + end + if timestamp_millis !== nothing + desc["timestamp_millis"] = Base.Int(timestamp_millis) + end + if test_end_point !== nothing + desc["test_end_point"] = Base.String(test_end_point) + end end + tf.Tensor(tf.Operation(desc)) + end function big_query_reader_eager(; name=nothing, container=nothing, shared_name=nothing, project_id=nothing, dataset_id=nothing, table_id=nothing, columns=nothing, timestamp_millis=nothing, test_end_point=nothing) desc = tf.EagerOp("BigQueryReader") if container !== nothing @@ -57261,13 +57261,13 @@ begin return res[1] end end - function big_query_reader(; name=nothing, container=nothing, shared_name=nothing, project_id=nothing, dataset_id=nothing, table_id=nothing, columns=nothing, timestamp_millis=nothing, test_end_point=nothing) - if tf.in_eager_mode() - big_query_reader_eager(; name=name, container=container, shared_name=shared_name, project_id=project_id, dataset_id=dataset_id, table_id=table_id, columns=columns, timestamp_millis=timestamp_millis, test_end_point=test_end_point) - else - big_query_reader_graph(; name=name, container=container, shared_name=shared_name, project_id=project_id, dataset_id=dataset_id, table_id=table_id, columns=columns, timestamp_millis=timestamp_millis, test_end_point=test_end_point) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function big_query_reader(; name=nothing, container=nothing, shared_name=nothing, project_id=nothing, dataset_id=nothing, table_id=nothing, columns=nothing, timestamp_millis=nothing, test_end_point=nothing) + if tf.in_eager_mode() + big_query_reader_eager(; name=name, container=container, shared_name=shared_name, project_id=project_id, dataset_id=dataset_id, table_id=table_id, columns=columns, timestamp_millis=timestamp_millis, test_end_point=test_end_point) + else + big_query_reader_graph(; name=name, container=container, shared_name=shared_name, project_id=project_id, dataset_id=dataset_id, table_id=table_id, columns=columns, timestamp_millis=timestamp_millis, test_end_point=test_end_point) + end end - end end @@ -57277,22 +57277,22 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function reader_read_v2_graph(reader_handle_, queue_handle_; name=nothing) - local desc - tf.with_op_name(name, "ReaderReadV2") do - desc = tf.NodeDescription("ReaderReadV2") - reader_handle_ = convert(Tensor{Any}, reader_handle_) - queue_handle_ = convert(Tensor{Any}, queue_handle_) - tf.add_input(desc, reader_handle_) - tf.add_input(desc, queue_handle_) - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:2 - push!(out, tf.Tensor(op, out_idx)) - end - out + function reader_read_v2_graph(reader_handle_, queue_handle_; name=nothing) + local desc + tf.with_op_name(name, "ReaderReadV2") do + desc = tf.NodeDescription("ReaderReadV2") + reader_handle_ = convert(Tensor{Any}, reader_handle_) + queue_handle_ = convert(Tensor{Any}, queue_handle_) + tf.add_input(desc, reader_handle_) + tf.add_input(desc, queue_handle_) + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) end + out + end function reader_read_v2_eager(reader_handle_, queue_handle_; name=nothing) desc = tf.EagerOp("ReaderReadV2") reader_handle_ = convert(tf.EagerTensor, reader_handle_) @@ -57306,13 +57306,13 @@ begin return res end end - function reader_read_v2(reader_handle_, queue_handle_; name=nothing) - if tf.in_eager_mode() - reader_read_v2_eager(reader_handle_, queue_handle_; name=name) - else - reader_read_v2_graph(reader_handle_, queue_handle_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function reader_read_v2(reader_handle_, queue_handle_; name=nothing) + if tf.in_eager_mode() + reader_read_v2_eager(reader_handle_, queue_handle_; name=name) + else + reader_read_v2_graph(reader_handle_, queue_handle_; name=name) + end end - end end @@ -57322,18 +57322,18 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function mod_graph(x_, y_; name=nothing) - local desc - tf.with_op_name(name, "Mod") do - desc = tf.NodeDescription("Mod") - x_ = convert(Tensor{Any}, x_) - y_ = convert(Tensor{Any}, y_) - (x_, y_) = tf.tf_promote(x_, y_) - tf.add_input(desc, x_) - tf.add_input(desc, y_) - end - tf.Tensor(tf.Operation(desc)) + function mod_graph(x_, y_; name=nothing) + local desc + tf.with_op_name(name, "Mod") do + desc = tf.NodeDescription("Mod") + x_ = convert(Tensor{Any}, x_) + y_ = convert(Tensor{Any}, y_) + (x_, y_) = tf.tf_promote(x_, y_) + tf.add_input(desc, x_) + tf.add_input(desc, y_) end + tf.Tensor(tf.Operation(desc)) + end function mod_eager(x_, y_; name=nothing) desc = tf.EagerOp("Mod") x_ = convert(tf.EagerTensor, x_) @@ -57349,13 +57349,13 @@ begin return res[1] end end - function mod(x_, y_; name=nothing) - if tf.in_eager_mode() - mod_eager(x_, y_; name=name) - else - mod_graph(x_, y_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function mod(x_, y_; name=nothing) + if tf.in_eager_mode() + mod_eager(x_, y_; name=name) + else + mod_graph(x_, y_; name=name) + end end - end end @@ -57365,18 +57365,18 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function add_v2_graph(x_, y_; name=nothing) - local desc - tf.with_op_name(name, "AddV2") do - desc = tf.NodeDescription("AddV2") - x_ = convert(Tensor{Any}, x_) - y_ = convert(Tensor{Any}, y_) - (x_, y_) = tf.tf_promote(x_, y_) - tf.add_input(desc, x_) - tf.add_input(desc, y_) - end - tf.Tensor(tf.Operation(desc)) + function add_v2_graph(x_, y_; name=nothing) + local desc + tf.with_op_name(name, "AddV2") do + desc = tf.NodeDescription("AddV2") + x_ = convert(Tensor{Any}, x_) + y_ = convert(Tensor{Any}, y_) + (x_, y_) = tf.tf_promote(x_, y_) + tf.add_input(desc, x_) + tf.add_input(desc, y_) end + tf.Tensor(tf.Operation(desc)) + end function add_v2_eager(x_, y_; name=nothing) desc = tf.EagerOp("AddV2") x_ = convert(tf.EagerTensor, x_) @@ -57392,13 +57392,13 @@ begin return res[1] end end - function add_v2(x_, y_; name=nothing) - if tf.in_eager_mode() - add_v2_eager(x_, y_; name=name) - else - add_v2_graph(x_, y_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function add_v2(x_, y_; name=nothing) + if tf.in_eager_mode() + add_v2_eager(x_, y_; name=name) + else + add_v2_graph(x_, y_; name=name) + end end - end end @@ -57408,22 +57408,22 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function stateless_random_normal_graph(shape_, seed_; name=nothing, dtype=nothing) - local desc - tf.with_op_name(name, "StatelessRandomNormal") do - desc = tf.NodeDescription("StatelessRandomNormal") - shape_ = convert(Tensor{Int32}, shape_) - seed_ = convert(Tensor{Int64}, seed_) - (shape_,) = tf.tf_promote(shape_) - (seed_,) = tf.tf_promote(seed_) - tf.add_input(desc, shape_) - tf.add_input(desc, seed_) - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end + function stateless_random_normal_graph(shape_, seed_; name=nothing, dtype=nothing) + local desc + tf.with_op_name(name, "StatelessRandomNormal") do + desc = tf.NodeDescription("StatelessRandomNormal") + shape_ = convert(Tensor{Int32}, shape_) + seed_ = convert(Tensor{Int64}, seed_) + (shape_,) = tf.tf_promote(shape_) + (seed_,) = tf.tf_promote(seed_) + tf.add_input(desc, shape_) + tf.add_input(desc, seed_) + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function stateless_random_normal_eager(shape_, seed_; name=nothing, dtype=nothing) desc = tf.EagerOp("StatelessRandomNormal") shape_ = convert(tf.EagerTensor, shape_) @@ -57442,13 +57442,13 @@ begin return res[1] end end - function stateless_random_normal(shape_, seed_; name=nothing, dtype=nothing) - if tf.in_eager_mode() - stateless_random_normal_eager(shape_, seed_; name=name, dtype=dtype) - else - stateless_random_normal_graph(shape_, seed_; name=name, dtype=dtype) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function stateless_random_normal(shape_, seed_; name=nothing, dtype=nothing) + if tf.in_eager_mode() + stateless_random_normal_eager(shape_, seed_; name=name, dtype=dtype) + else + stateless_random_normal_graph(shape_, seed_; name=name, dtype=dtype) + end end - end end @@ -57458,61 +57458,61 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function strided_slice_assign_graph(ref_, begin_, end_, strides_, value_; name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing) - local desc - tf.with_op_name(name, "StridedSliceAssign") do - desc = tf.NodeDescription("StridedSliceAssign") - ref_ = convert(Tensor{Any}, ref_) - begin_ = convert(Tensor{Any}, begin_) - begin_ = begin_ - convert(tf.Tensor{eltype(begin_)}, 1) - end_ = convert(Tensor{Any}, end_) - end_ = end_ - convert(tf.Tensor{eltype(end_)}, 1) - strides_ = convert(Tensor{Any}, strides_) - strides_ = strides_ - convert(tf.Tensor{eltype(strides_)}, 1) - value_ = convert(Tensor{Any}, value_) - (ref_, value_) = tf.tf_promote(ref_, value_) - (begin_, end_, strides_) = tf.tf_promote(begin_, end_, strides_) - tf.add_input(desc, ref_) - tf.add_input(desc, begin_) - tf.add_input(desc, end_) - tf.add_input(desc, strides_) - tf.add_input(desc, value_) - if Index !== nothing - desc["Index"] = Base.identity(Index) - end - if begin_mask !== nothing - begin_mask = Base.Int(begin_mask) - 1 - end - if begin_mask !== nothing - desc["begin_mask"] = Base.Int(begin_mask) - end - if end_mask !== nothing - end_mask = Base.Int(end_mask) - 1 - end - if end_mask !== nothing - desc["end_mask"] = Base.Int(end_mask) - end - if ellipsis_mask !== nothing - ellipsis_mask = Base.Int(ellipsis_mask) - 1 - end - if ellipsis_mask !== nothing - desc["ellipsis_mask"] = Base.Int(ellipsis_mask) - end - if new_axis_mask !== nothing - new_axis_mask = Base.Int(new_axis_mask) - 1 - end - if new_axis_mask !== nothing - desc["new_axis_mask"] = Base.Int(new_axis_mask) - end - if shrink_axis_mask !== nothing - shrink_axis_mask = Base.Int(shrink_axis_mask) - 1 - end - if shrink_axis_mask !== nothing - desc["shrink_axis_mask"] = Base.Int(shrink_axis_mask) - end - end - tf.Tensor(tf.Operation(desc)) + function strided_slice_assign_graph(ref_, begin_, end_, strides_, value_; name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing) + local desc + tf.with_op_name(name, "StridedSliceAssign") do + desc = tf.NodeDescription("StridedSliceAssign") + ref_ = convert(Tensor{Any}, ref_) + begin_ = convert(Tensor{Any}, begin_) + begin_ = begin_ - convert(tf.Tensor{eltype(begin_)}, 1) + end_ = convert(Tensor{Any}, end_) + end_ = end_ - convert(tf.Tensor{eltype(end_)}, 1) + strides_ = convert(Tensor{Any}, strides_) + strides_ = strides_ - convert(tf.Tensor{eltype(strides_)}, 1) + value_ = convert(Tensor{Any}, value_) + (ref_, value_) = tf.tf_promote(ref_, value_) + (begin_, end_, strides_) = tf.tf_promote(begin_, end_, strides_) + tf.add_input(desc, ref_) + tf.add_input(desc, begin_) + tf.add_input(desc, end_) + tf.add_input(desc, strides_) + tf.add_input(desc, value_) + if Index !== nothing + desc["Index"] = Base.identity(Index) + end + if begin_mask !== nothing + begin_mask = Base.Int(begin_mask) - 1 + end + if begin_mask !== nothing + desc["begin_mask"] = Base.Int(begin_mask) + end + if end_mask !== nothing + end_mask = Base.Int(end_mask) - 1 + end + if end_mask !== nothing + desc["end_mask"] = Base.Int(end_mask) + end + if ellipsis_mask !== nothing + ellipsis_mask = Base.Int(ellipsis_mask) - 1 + end + if ellipsis_mask !== nothing + desc["ellipsis_mask"] = Base.Int(ellipsis_mask) + end + if new_axis_mask !== nothing + new_axis_mask = Base.Int(new_axis_mask) - 1 + end + if new_axis_mask !== nothing + desc["new_axis_mask"] = Base.Int(new_axis_mask) + end + if shrink_axis_mask !== nothing + shrink_axis_mask = Base.Int(shrink_axis_mask) - 1 + end + if shrink_axis_mask !== nothing + desc["shrink_axis_mask"] = Base.Int(shrink_axis_mask) + end end + tf.Tensor(tf.Operation(desc)) + end function strided_slice_assign_eager(ref_, begin_, end_, strides_, value_; name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing) desc = tf.EagerOp("StridedSliceAssign") ref_ = convert(tf.EagerTensor, ref_) @@ -57570,13 +57570,13 @@ begin return res[1] end end - function strided_slice_assign(ref_, begin_, end_, strides_, value_; name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing) - if tf.in_eager_mode() - strided_slice_assign_eager(ref_, begin_, end_, strides_, value_; name=name, Index=Index, begin_mask=begin_mask, end_mask=end_mask, ellipsis_mask=ellipsis_mask, new_axis_mask=new_axis_mask, shrink_axis_mask=shrink_axis_mask) - else - strided_slice_assign_graph(ref_, begin_, end_, strides_, value_; name=name, Index=Index, begin_mask=begin_mask, end_mask=end_mask, ellipsis_mask=ellipsis_mask, new_axis_mask=new_axis_mask, shrink_axis_mask=shrink_axis_mask) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function strided_slice_assign(ref_, begin_, end_, strides_, value_; name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing) + if tf.in_eager_mode() + strided_slice_assign_eager(ref_, begin_, end_, strides_, value_; name=name, Index=Index, begin_mask=begin_mask, end_mask=end_mask, ellipsis_mask=ellipsis_mask, new_axis_mask=new_axis_mask, shrink_axis_mask=shrink_axis_mask) + else + strided_slice_assign_graph(ref_, begin_, end_, strides_, value_; name=name, Index=Index, begin_mask=begin_mask, end_mask=end_mask, ellipsis_mask=ellipsis_mask, new_axis_mask=new_axis_mask, shrink_axis_mask=shrink_axis_mask) + end end - end end @@ -57586,25 +57586,25 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function scatter_min_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) - local desc - tf.with_op_name(name, "ScatterMin") do - desc = tf.NodeDescription("ScatterMin") - ref_ = convert(Tensor{Any}, ref_) - indices_ = convert(Tensor{Any}, indices_) - indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) - updates_ = convert(Tensor{Any}, updates_) - (ref_, updates_) = tf.tf_promote(ref_, updates_) - (indices_,) = tf.tf_promote(indices_) - tf.add_input(desc, ref_) - tf.add_input(desc, indices_) - tf.add_input(desc, updates_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end + function scatter_min_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "ScatterMin") do + desc = tf.NodeDescription("ScatterMin") + ref_ = convert(Tensor{Any}, ref_) + indices_ = convert(Tensor{Any}, indices_) + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + updates_ = convert(Tensor{Any}, updates_) + (ref_, updates_) = tf.tf_promote(ref_, updates_) + (indices_,) = tf.tf_promote(indices_) + tf.add_input(desc, ref_) + tf.add_input(desc, indices_) + tf.add_input(desc, updates_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function scatter_min_eager(ref_, indices_, updates_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ScatterMin") ref_ = convert(tf.EagerTensor, ref_) @@ -57626,13 +57626,13 @@ begin return res[1] end end - function scatter_min(ref_, indices_, updates_; name=nothing, use_locking=nothing) - if tf.in_eager_mode() - scatter_min_eager(ref_, indices_, updates_; name=name, use_locking=use_locking) - else - scatter_min_graph(ref_, indices_, updates_; name=name, use_locking=use_locking) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function scatter_min(ref_, indices_, updates_; name=nothing, use_locking=nothing) + if tf.in_eager_mode() + scatter_min_eager(ref_, indices_, updates_; name=name, use_locking=use_locking) + else + scatter_min_graph(ref_, indices_, updates_; name=name, use_locking=use_locking) + end end - end end @@ -57642,61 +57642,61 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function resource_strided_slice_assign_graph(ref_, begin_, end_, strides_, value_; name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing) - local desc - tf.with_op_name(name, "ResourceStridedSliceAssign") do - desc = tf.NodeDescription("ResourceStridedSliceAssign") - ref_ = convert(Tensor{Any}, ref_) - begin_ = convert(Tensor{Any}, begin_) - begin_ = begin_ - convert(tf.Tensor{eltype(begin_)}, 1) - end_ = convert(Tensor{Any}, end_) - end_ = end_ - convert(tf.Tensor{eltype(end_)}, 1) - strides_ = convert(Tensor{Any}, strides_) - strides_ = strides_ - convert(tf.Tensor{eltype(strides_)}, 1) - value_ = convert(Tensor{Any}, value_) - (value_,) = tf.tf_promote(value_) - (begin_, end_, strides_) = tf.tf_promote(begin_, end_, strides_) - tf.add_input(desc, ref_) - tf.add_input(desc, begin_) - tf.add_input(desc, end_) - tf.add_input(desc, strides_) - tf.add_input(desc, value_) - if Index !== nothing - desc["Index"] = Base.identity(Index) - end - if begin_mask !== nothing - begin_mask = Base.Int(begin_mask) - 1 - end - if begin_mask !== nothing - desc["begin_mask"] = Base.Int(begin_mask) - end - if end_mask !== nothing - end_mask = Base.Int(end_mask) - 1 - end - if end_mask !== nothing - desc["end_mask"] = Base.Int(end_mask) - end - if ellipsis_mask !== nothing - ellipsis_mask = Base.Int(ellipsis_mask) - 1 - end - if ellipsis_mask !== nothing - desc["ellipsis_mask"] = Base.Int(ellipsis_mask) - end - if new_axis_mask !== nothing - new_axis_mask = Base.Int(new_axis_mask) - 1 - end - if new_axis_mask !== nothing - desc["new_axis_mask"] = Base.Int(new_axis_mask) - end - if shrink_axis_mask !== nothing - shrink_axis_mask = Base.Int(shrink_axis_mask) - 1 - end - if shrink_axis_mask !== nothing - desc["shrink_axis_mask"] = Base.Int(shrink_axis_mask) - end - end - tf.Tensor(tf.Operation(desc)) + function resource_strided_slice_assign_graph(ref_, begin_, end_, strides_, value_; name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing) + local desc + tf.with_op_name(name, "ResourceStridedSliceAssign") do + desc = tf.NodeDescription("ResourceStridedSliceAssign") + ref_ = convert(Tensor{Any}, ref_) + begin_ = convert(Tensor{Any}, begin_) + begin_ = begin_ - convert(tf.Tensor{eltype(begin_)}, 1) + end_ = convert(Tensor{Any}, end_) + end_ = end_ - convert(tf.Tensor{eltype(end_)}, 1) + strides_ = convert(Tensor{Any}, strides_) + strides_ = strides_ - convert(tf.Tensor{eltype(strides_)}, 1) + value_ = convert(Tensor{Any}, value_) + (value_,) = tf.tf_promote(value_) + (begin_, end_, strides_) = tf.tf_promote(begin_, end_, strides_) + tf.add_input(desc, ref_) + tf.add_input(desc, begin_) + tf.add_input(desc, end_) + tf.add_input(desc, strides_) + tf.add_input(desc, value_) + if Index !== nothing + desc["Index"] = Base.identity(Index) + end + if begin_mask !== nothing + begin_mask = Base.Int(begin_mask) - 1 + end + if begin_mask !== nothing + desc["begin_mask"] = Base.Int(begin_mask) + end + if end_mask !== nothing + end_mask = Base.Int(end_mask) - 1 + end + if end_mask !== nothing + desc["end_mask"] = Base.Int(end_mask) + end + if ellipsis_mask !== nothing + ellipsis_mask = Base.Int(ellipsis_mask) - 1 + end + if ellipsis_mask !== nothing + desc["ellipsis_mask"] = Base.Int(ellipsis_mask) + end + if new_axis_mask !== nothing + new_axis_mask = Base.Int(new_axis_mask) - 1 + end + if new_axis_mask !== nothing + desc["new_axis_mask"] = Base.Int(new_axis_mask) + end + if shrink_axis_mask !== nothing + shrink_axis_mask = Base.Int(shrink_axis_mask) - 1 + end + if shrink_axis_mask !== nothing + desc["shrink_axis_mask"] = Base.Int(shrink_axis_mask) + end end + tf.Tensor(tf.Operation(desc)) + end function resource_strided_slice_assign_eager(ref_, begin_, end_, strides_, value_; name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing) desc = tf.EagerOp("ResourceStridedSliceAssign") ref_ = convert(tf.EagerTensor, ref_) @@ -57753,13 +57753,13 @@ begin return res[1] end end - function resource_strided_slice_assign(ref_, begin_, end_, strides_, value_; name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing) - if tf.in_eager_mode() - resource_strided_slice_assign_eager(ref_, begin_, end_, strides_, value_; name=name, Index=Index, begin_mask=begin_mask, end_mask=end_mask, ellipsis_mask=ellipsis_mask, new_axis_mask=new_axis_mask, shrink_axis_mask=shrink_axis_mask) - else - resource_strided_slice_assign_graph(ref_, begin_, end_, strides_, value_; name=name, Index=Index, begin_mask=begin_mask, end_mask=end_mask, ellipsis_mask=ellipsis_mask, new_axis_mask=new_axis_mask, shrink_axis_mask=shrink_axis_mask) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_strided_slice_assign(ref_, begin_, end_, strides_, value_; name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing) + if tf.in_eager_mode() + resource_strided_slice_assign_eager(ref_, begin_, end_, strides_, value_; name=name, Index=Index, begin_mask=begin_mask, end_mask=end_mask, ellipsis_mask=ellipsis_mask, new_axis_mask=new_axis_mask, shrink_axis_mask=shrink_axis_mask) + else + resource_strided_slice_assign_graph(ref_, begin_, end_, strides_, value_; name=name, Index=Index, begin_mask=begin_mask, end_mask=end_mask, ellipsis_mask=ellipsis_mask, new_axis_mask=new_axis_mask, shrink_axis_mask=shrink_axis_mask) + end end - end end @@ -57769,18 +57769,18 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function random_gamma_grad_graph(alpha_, sample_; name=nothing) - local desc - tf.with_op_name(name, "RandomGammaGrad") do - desc = tf.NodeDescription("RandomGammaGrad") - alpha_ = convert(Tensor{Any}, alpha_) - sample_ = convert(Tensor{Any}, sample_) - (alpha_, sample_) = tf.tf_promote(alpha_, sample_) - tf.add_input(desc, alpha_) - tf.add_input(desc, sample_) - end - tf.Tensor(tf.Operation(desc)) + function random_gamma_grad_graph(alpha_, sample_; name=nothing) + local desc + tf.with_op_name(name, "RandomGammaGrad") do + desc = tf.NodeDescription("RandomGammaGrad") + alpha_ = convert(Tensor{Any}, alpha_) + sample_ = convert(Tensor{Any}, sample_) + (alpha_, sample_) = tf.tf_promote(alpha_, sample_) + tf.add_input(desc, alpha_) + tf.add_input(desc, sample_) end + tf.Tensor(tf.Operation(desc)) + end function random_gamma_grad_eager(alpha_, sample_; name=nothing) desc = tf.EagerOp("RandomGammaGrad") alpha_ = convert(tf.EagerTensor, alpha_) @@ -57796,13 +57796,13 @@ begin return res[1] end end - function random_gamma_grad(alpha_, sample_; name=nothing) - if tf.in_eager_mode() - random_gamma_grad_eager(alpha_, sample_; name=name) - else - random_gamma_grad_graph(alpha_, sample_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function random_gamma_grad(alpha_, sample_; name=nothing) + if tf.in_eager_mode() + random_gamma_grad_eager(alpha_, sample_; name=name) + else + random_gamma_grad_graph(alpha_, sample_; name=name) + end end - end end @@ -57812,34 +57812,34 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function resource_sparse_apply_keras_momentum_graph(var_, accum_, lr_, grad_, indices_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) - local desc - tf.with_op_name(name, "ResourceSparseApplyKerasMomentum") do - desc = tf.NodeDescription("ResourceSparseApplyKerasMomentum") - var_ = convert(Tensor{Any}, var_) - accum_ = convert(Tensor{Any}, accum_) - lr_ = convert(Tensor{Any}, lr_) - grad_ = convert(Tensor{Any}, grad_) - indices_ = convert(Tensor{Any}, indices_) - indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) - momentum_ = convert(Tensor{Any}, momentum_) - (lr_, grad_, momentum_) = tf.tf_promote(lr_, grad_, momentum_) - (indices_,) = tf.tf_promote(indices_) - tf.add_input(desc, var_) - tf.add_input(desc, accum_) - tf.add_input(desc, lr_) - tf.add_input(desc, grad_) - tf.add_input(desc, indices_) - tf.add_input(desc, momentum_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - if use_nesterov !== nothing - desc["use_nesterov"] = Base.Bool(use_nesterov) - end - end - tf.Tensor(tf.Operation(desc)) + function resource_sparse_apply_keras_momentum_graph(var_, accum_, lr_, grad_, indices_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) + local desc + tf.with_op_name(name, "ResourceSparseApplyKerasMomentum") do + desc = tf.NodeDescription("ResourceSparseApplyKerasMomentum") + var_ = convert(Tensor{Any}, var_) + accum_ = convert(Tensor{Any}, accum_) + lr_ = convert(Tensor{Any}, lr_) + grad_ = convert(Tensor{Any}, grad_) + indices_ = convert(Tensor{Any}, indices_) + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + momentum_ = convert(Tensor{Any}, momentum_) + (lr_, grad_, momentum_) = tf.tf_promote(lr_, grad_, momentum_) + (indices_,) = tf.tf_promote(indices_) + tf.add_input(desc, var_) + tf.add_input(desc, accum_) + tf.add_input(desc, lr_) + tf.add_input(desc, grad_) + tf.add_input(desc, indices_) + tf.add_input(desc, momentum_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + if use_nesterov !== nothing + desc["use_nesterov"] = Base.Bool(use_nesterov) + end end + tf.Tensor(tf.Operation(desc)) + end function resource_sparse_apply_keras_momentum_eager(var_, accum_, lr_, grad_, indices_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) desc = tf.EagerOp("ResourceSparseApplyKerasMomentum") var_ = convert(tf.EagerTensor, var_) @@ -57871,13 +57871,13 @@ begin return res[1] end end - function resource_sparse_apply_keras_momentum(var_, accum_, lr_, grad_, indices_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) - if tf.in_eager_mode() - resource_sparse_apply_keras_momentum_eager(var_, accum_, lr_, grad_, indices_, momentum_; name=name, use_locking=use_locking, use_nesterov=use_nesterov) - else - resource_sparse_apply_keras_momentum_graph(var_, accum_, lr_, grad_, indices_, momentum_; name=name, use_locking=use_locking, use_nesterov=use_nesterov) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_sparse_apply_keras_momentum(var_, accum_, lr_, grad_, indices_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) + if tf.in_eager_mode() + resource_sparse_apply_keras_momentum_eager(var_, accum_, lr_, grad_, indices_, momentum_; name=name, use_locking=use_locking, use_nesterov=use_nesterov) + else + resource_sparse_apply_keras_momentum_graph(var_, accum_, lr_, grad_, indices_, momentum_; name=name, use_locking=use_locking, use_nesterov=use_nesterov) + end end - end end @@ -57887,22 +57887,22 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function boosted_trees_create_quantile_stream_resource_graph(quantile_stream_resource_handle_, epsilon_, num_streams_; name=nothing, max_elements=nothing) - local desc - tf.with_op_name(name, "BoostedTreesCreateQuantileStreamResource") do - desc = tf.NodeDescription("BoostedTreesCreateQuantileStreamResource") - quantile_stream_resource_handle_ = convert(Tensor{Any}, quantile_stream_resource_handle_) - epsilon_ = convert(Tensor{Float32}, epsilon_) - num_streams_ = convert(Tensor{Int64}, num_streams_) - tf.add_input(desc, quantile_stream_resource_handle_) - tf.add_input(desc, epsilon_) - tf.add_input(desc, num_streams_) - if max_elements !== nothing - desc["max_elements"] = Base.Int(max_elements) - end + function boosted_trees_create_quantile_stream_resource_graph(quantile_stream_resource_handle_, epsilon_, num_streams_; name=nothing, max_elements=nothing) + local desc + tf.with_op_name(name, "BoostedTreesCreateQuantileStreamResource") do + desc = tf.NodeDescription("BoostedTreesCreateQuantileStreamResource") + quantile_stream_resource_handle_ = convert(Tensor{Any}, quantile_stream_resource_handle_) + epsilon_ = convert(Tensor{Float32}, epsilon_) + num_streams_ = convert(Tensor{Int64}, num_streams_) + tf.add_input(desc, quantile_stream_resource_handle_) + tf.add_input(desc, epsilon_) + tf.add_input(desc, num_streams_) + if max_elements !== nothing + desc["max_elements"] = Base.Int(max_elements) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function boosted_trees_create_quantile_stream_resource_eager(quantile_stream_resource_handle_, epsilon_, num_streams_; name=nothing, max_elements=nothing) desc = tf.EagerOp("BoostedTreesCreateQuantileStreamResource") quantile_stream_resource_handle_ = convert(tf.EagerTensor, quantile_stream_resource_handle_) @@ -57921,13 +57921,13 @@ begin return res[1] end end - function boosted_trees_create_quantile_stream_resource(quantile_stream_resource_handle_, epsilon_, num_streams_; name=nothing, max_elements=nothing) - if tf.in_eager_mode() - boosted_trees_create_quantile_stream_resource_eager(quantile_stream_resource_handle_, epsilon_, num_streams_; name=name, max_elements=max_elements) - else - boosted_trees_create_quantile_stream_resource_graph(quantile_stream_resource_handle_, epsilon_, num_streams_; name=name, max_elements=max_elements) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function boosted_trees_create_quantile_stream_resource(quantile_stream_resource_handle_, epsilon_, num_streams_; name=nothing, max_elements=nothing) + if tf.in_eager_mode() + boosted_trees_create_quantile_stream_resource_eager(quantile_stream_resource_handle_, epsilon_, num_streams_; name=name, max_elements=max_elements) + else + boosted_trees_create_quantile_stream_resource_graph(quantile_stream_resource_handle_, epsilon_, num_streams_; name=name, max_elements=max_elements) + end end - end end @@ -57937,28 +57937,28 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function quantized_relu6_graph(features_, min_features_, max_features_; name=nothing, out_type=nothing) - local desc - tf.with_op_name(name, "QuantizedRelu6") do - desc = tf.NodeDescription("QuantizedRelu6") - features_ = convert(Tensor{Any}, features_) - min_features_ = convert(Tensor{Float32}, min_features_) - max_features_ = convert(Tensor{Float32}, max_features_) - (features_,) = tf.tf_promote(features_) - tf.add_input(desc, features_) - tf.add_input(desc, min_features_) - tf.add_input(desc, max_features_) - if out_type !== nothing - desc["out_type"] = Base.identity(out_type) - end + function quantized_relu6_graph(features_, min_features_, max_features_; name=nothing, out_type=nothing) + local desc + tf.with_op_name(name, "QuantizedRelu6") do + desc = tf.NodeDescription("QuantizedRelu6") + features_ = convert(Tensor{Any}, features_) + min_features_ = convert(Tensor{Float32}, min_features_) + max_features_ = convert(Tensor{Float32}, max_features_) + (features_,) = tf.tf_promote(features_) + tf.add_input(desc, features_) + tf.add_input(desc, min_features_) + tf.add_input(desc, max_features_) + if out_type !== nothing + desc["out_type"] = Base.identity(out_type) end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:3 - push!(out, tf.Tensor(op, out_idx)) - end - out end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end function quantized_relu6_eager(features_, min_features_, max_features_; name=nothing, out_type=nothing) desc = tf.EagerOp("QuantizedRelu6") features_ = convert(tf.EagerTensor, features_) @@ -57978,13 +57978,13 @@ begin return res end end - function quantized_relu6(features_, min_features_, max_features_; name=nothing, out_type=nothing) - if tf.in_eager_mode() - quantized_relu6_eager(features_, min_features_, max_features_; name=name, out_type=out_type) - else - quantized_relu6_graph(features_, min_features_, max_features_; name=name, out_type=out_type) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function quantized_relu6(features_, min_features_, max_features_; name=nothing, out_type=nothing) + if tf.in_eager_mode() + quantized_relu6_eager(features_, min_features_, max_features_; name=name, out_type=out_type) + else + quantized_relu6_graph(features_, min_features_, max_features_; name=name, out_type=out_type) + end end - end end @@ -57994,31 +57994,31 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function sparse_sparse_maximum_graph(a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_; name=nothing) - local desc - tf.with_op_name(name, "SparseSparseMaximum") do - desc = tf.NodeDescription("SparseSparseMaximum") - a_indices_ = convert(Tensor{Int64}, a_indices_) - a_values_ = convert(Tensor{Any}, a_values_) - a_shape_ = convert(Tensor{Int64}, a_shape_) - b_indices_ = convert(Tensor{Int64}, b_indices_) - b_values_ = convert(Tensor{Any}, b_values_) - b_shape_ = convert(Tensor{Int64}, b_shape_) - (a_values_, b_values_) = tf.tf_promote(a_values_, b_values_) - tf.add_input(desc, a_indices_) - tf.add_input(desc, a_values_) - tf.add_input(desc, a_shape_) - tf.add_input(desc, b_indices_) - tf.add_input(desc, b_values_) - tf.add_input(desc, b_shape_) - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:2 - push!(out, tf.Tensor(op, out_idx)) - end - out + function sparse_sparse_maximum_graph(a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_; name=nothing) + local desc + tf.with_op_name(name, "SparseSparseMaximum") do + desc = tf.NodeDescription("SparseSparseMaximum") + a_indices_ = convert(Tensor{Int64}, a_indices_) + a_values_ = convert(Tensor{Any}, a_values_) + a_shape_ = convert(Tensor{Int64}, a_shape_) + b_indices_ = convert(Tensor{Int64}, b_indices_) + b_values_ = convert(Tensor{Any}, b_values_) + b_shape_ = convert(Tensor{Int64}, b_shape_) + (a_values_, b_values_) = tf.tf_promote(a_values_, b_values_) + tf.add_input(desc, a_indices_) + tf.add_input(desc, a_values_) + tf.add_input(desc, a_shape_) + tf.add_input(desc, b_indices_) + tf.add_input(desc, b_values_) + tf.add_input(desc, b_shape_) + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) end + out + end function sparse_sparse_maximum_eager(a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_; name=nothing) desc = tf.EagerOp("SparseSparseMaximum") a_indices_ = convert(tf.EagerTensor, a_indices_) @@ -58042,13 +58042,13 @@ begin return res end end - function sparse_sparse_maximum(a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_; name=nothing) - if tf.in_eager_mode() - sparse_sparse_maximum_eager(a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_; name=name) - else - sparse_sparse_maximum_graph(a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_sparse_maximum(a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_; name=nothing) + if tf.in_eager_mode() + sparse_sparse_maximum_eager(a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_; name=name) + else + sparse_sparse_maximum_graph(a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_; name=name) + end end - end end @@ -58058,30 +58058,30 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function batch_norm_with_global_normalization_graph(t_, m_, v_, beta_, gamma_; name=nothing, variance_epsilon=nothing, scale_after_normalization=nothing) - local desc - tf.with_op_name(name, "BatchNormWithGlobalNormalization") do - desc = tf.NodeDescription("BatchNormWithGlobalNormalization") - t_ = convert(Tensor{Any}, t_) - m_ = convert(Tensor{Any}, m_) - v_ = convert(Tensor{Any}, v_) - beta_ = convert(Tensor{Any}, beta_) - gamma_ = convert(Tensor{Any}, gamma_) - (t_, m_, v_, beta_, gamma_) = tf.tf_promote(t_, m_, v_, beta_, gamma_) - tf.add_input(desc, t_) - tf.add_input(desc, m_) - tf.add_input(desc, v_) - tf.add_input(desc, beta_) - tf.add_input(desc, gamma_) - if variance_epsilon !== nothing - desc["variance_epsilon"] = Base.identity(variance_epsilon) - end - if scale_after_normalization !== nothing - desc["scale_after_normalization"] = Base.Bool(scale_after_normalization) - end - end - tf.Tensor(tf.Operation(desc)) + function batch_norm_with_global_normalization_graph(t_, m_, v_, beta_, gamma_; name=nothing, variance_epsilon=nothing, scale_after_normalization=nothing) + local desc + tf.with_op_name(name, "BatchNormWithGlobalNormalization") do + desc = tf.NodeDescription("BatchNormWithGlobalNormalization") + t_ = convert(Tensor{Any}, t_) + m_ = convert(Tensor{Any}, m_) + v_ = convert(Tensor{Any}, v_) + beta_ = convert(Tensor{Any}, beta_) + gamma_ = convert(Tensor{Any}, gamma_) + (t_, m_, v_, beta_, gamma_) = tf.tf_promote(t_, m_, v_, beta_, gamma_) + tf.add_input(desc, t_) + tf.add_input(desc, m_) + tf.add_input(desc, v_) + tf.add_input(desc, beta_) + tf.add_input(desc, gamma_) + if variance_epsilon !== nothing + desc["variance_epsilon"] = Base.identity(variance_epsilon) + end + if scale_after_normalization !== nothing + desc["scale_after_normalization"] = Base.Bool(scale_after_normalization) + end end + tf.Tensor(tf.Operation(desc)) + end function batch_norm_with_global_normalization_eager(t_, m_, v_, beta_, gamma_; name=nothing, variance_epsilon=nothing, scale_after_normalization=nothing) desc = tf.EagerOp("BatchNormWithGlobalNormalization") t_ = convert(tf.EagerTensor, t_) @@ -58112,13 +58112,13 @@ begin return res[1] end end - function batch_norm_with_global_normalization(t_, m_, v_, beta_, gamma_; name=nothing, variance_epsilon=nothing, scale_after_normalization=nothing) - if tf.in_eager_mode() - batch_norm_with_global_normalization_eager(t_, m_, v_, beta_, gamma_; name=name, variance_epsilon=variance_epsilon, scale_after_normalization=scale_after_normalization) - else - batch_norm_with_global_normalization_graph(t_, m_, v_, beta_, gamma_; name=name, variance_epsilon=variance_epsilon, scale_after_normalization=scale_after_normalization) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function batch_norm_with_global_normalization(t_, m_, v_, beta_, gamma_; name=nothing, variance_epsilon=nothing, scale_after_normalization=nothing) + if tf.in_eager_mode() + batch_norm_with_global_normalization_eager(t_, m_, v_, beta_, gamma_; name=name, variance_epsilon=variance_epsilon, scale_after_normalization=scale_after_normalization) + else + batch_norm_with_global_normalization_graph(t_, m_, v_, beta_, gamma_; name=name, variance_epsilon=variance_epsilon, scale_after_normalization=scale_after_normalization) + end end - end end @@ -58128,20 +58128,20 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function in_top_kv2_graph(predictions_, targets_, k_; name=nothing) - local desc - tf.with_op_name(name, "InTopKV2") do - desc = tf.NodeDescription("InTopKV2") - predictions_ = convert(Tensor{Float32}, predictions_) - targets_ = convert(Tensor{Int32}, targets_) - k_ = convert(Tensor{Int32}, k_) - (targets_, k_) = tf.tf_promote(targets_, k_) - tf.add_input(desc, predictions_) - tf.add_input(desc, targets_) - tf.add_input(desc, k_) - end - tf.Tensor(tf.Operation(desc)) + function in_top_kv2_graph(predictions_, targets_, k_; name=nothing) + local desc + tf.with_op_name(name, "InTopKV2") do + desc = tf.NodeDescription("InTopKV2") + predictions_ = convert(Tensor{Float32}, predictions_) + targets_ = convert(Tensor{Int32}, targets_) + k_ = convert(Tensor{Int32}, k_) + (targets_, k_) = tf.tf_promote(targets_, k_) + tf.add_input(desc, predictions_) + tf.add_input(desc, targets_) + tf.add_input(desc, k_) end + tf.Tensor(tf.Operation(desc)) + end function in_top_kv2_eager(predictions_, targets_, k_; name=nothing) desc = tf.EagerOp("InTopKV2") predictions_ = convert(tf.EagerTensor, predictions_) @@ -58159,13 +58159,13 @@ begin return res[1] end end - function in_top_kv2(predictions_, targets_, k_; name=nothing) - if tf.in_eager_mode() - in_top_kv2_eager(predictions_, targets_, k_; name=name) - else - in_top_kv2_graph(predictions_, targets_, k_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function in_top_kv2(predictions_, targets_, k_; name=nothing) + if tf.in_eager_mode() + in_top_kv2_eager(predictions_, targets_, k_; name=name) + else + in_top_kv2_graph(predictions_, targets_, k_; name=name) + end end - end end @@ -58175,16 +58175,16 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function cholesky_graph(input_; name=nothing) - local desc - tf.with_op_name(name, "Cholesky") do - desc = tf.NodeDescription("Cholesky") - input_ = convert(Tensor{Any}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - end - tf.Tensor(tf.Operation(desc)) + function cholesky_graph(input_; name=nothing) + local desc + tf.with_op_name(name, "Cholesky") do + desc = tf.NodeDescription("Cholesky") + input_ = convert(Tensor{Any}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) end + tf.Tensor(tf.Operation(desc)) + end function cholesky_eager(input_; name=nothing) desc = tf.EagerOp("Cholesky") input_ = convert(tf.EagerTensor, input_) @@ -58197,13 +58197,13 @@ begin return res[1] end end - function cholesky(input_; name=nothing) - if tf.in_eager_mode() - cholesky_eager(input_; name=name) - else - cholesky_graph(input_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function cholesky(input_; name=nothing) + if tf.in_eager_mode() + cholesky_eager(input_; name=name) + else + cholesky_graph(input_; name=name) + end end - end end @@ -58213,35 +58213,35 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function resource_apply_centered_rms_prop_graph(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=nothing, use_locking=nothing) - local desc - tf.with_op_name(name, "ResourceApplyCenteredRMSProp") do - desc = tf.NodeDescription("ResourceApplyCenteredRMSProp") - var_ = convert(Tensor{Any}, var_) - mg_ = convert(Tensor{Any}, mg_) - ms_ = convert(Tensor{Any}, ms_) - mom_ = convert(Tensor{Any}, mom_) - lr_ = convert(Tensor{Any}, lr_) - rho_ = convert(Tensor{Any}, rho_) - momentum_ = convert(Tensor{Any}, momentum_) - epsilon_ = convert(Tensor{Any}, epsilon_) - grad_ = convert(Tensor{Any}, grad_) - (lr_, rho_, momentum_, epsilon_, grad_) = tf.tf_promote(lr_, rho_, momentum_, epsilon_, grad_) - tf.add_input(desc, var_) - tf.add_input(desc, mg_) - tf.add_input(desc, ms_) - tf.add_input(desc, mom_) - tf.add_input(desc, lr_) - tf.add_input(desc, rho_) - tf.add_input(desc, momentum_) - tf.add_input(desc, epsilon_) - tf.add_input(desc, grad_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - end - tf.Tensor(tf.Operation(desc)) - end + function resource_apply_centered_rms_prop_graph(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "ResourceApplyCenteredRMSProp") do + desc = tf.NodeDescription("ResourceApplyCenteredRMSProp") + var_ = convert(Tensor{Any}, var_) + mg_ = convert(Tensor{Any}, mg_) + ms_ = convert(Tensor{Any}, ms_) + mom_ = convert(Tensor{Any}, mom_) + lr_ = convert(Tensor{Any}, lr_) + rho_ = convert(Tensor{Any}, rho_) + momentum_ = convert(Tensor{Any}, momentum_) + epsilon_ = convert(Tensor{Any}, epsilon_) + grad_ = convert(Tensor{Any}, grad_) + (lr_, rho_, momentum_, epsilon_, grad_) = tf.tf_promote(lr_, rho_, momentum_, epsilon_, grad_) + tf.add_input(desc, var_) + tf.add_input(desc, mg_) + tf.add_input(desc, ms_) + tf.add_input(desc, mom_) + tf.add_input(desc, lr_) + tf.add_input(desc, rho_) + tf.add_input(desc, momentum_) + tf.add_input(desc, epsilon_) + tf.add_input(desc, grad_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + tf.Tensor(tf.Operation(desc)) + end function resource_apply_centered_rms_prop_eager(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=nothing, use_locking=nothing) desc = tf.EagerOp("ResourceApplyCenteredRMSProp") var_ = convert(tf.EagerTensor, var_) @@ -58277,13 +58277,13 @@ begin return res[1] end end - function resource_apply_centered_rms_prop(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=nothing, use_locking=nothing) - if tf.in_eager_mode() - resource_apply_centered_rms_prop_eager(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=name, use_locking=use_locking) - else - resource_apply_centered_rms_prop_graph(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=name, use_locking=use_locking) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_apply_centered_rms_prop(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=nothing, use_locking=nothing) + if tf.in_eager_mode() + resource_apply_centered_rms_prop_eager(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=name, use_locking=use_locking) + else + resource_apply_centered_rms_prop_graph(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=name, use_locking=use_locking) + end end - end end @@ -58293,28 +58293,28 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function resource_apply_adagrad_graph(var_, accum_, lr_, grad_; name=nothing, use_locking=nothing, update_slots=nothing) - local desc - tf.with_op_name(name, "ResourceApplyAdagrad") do - desc = tf.NodeDescription("ResourceApplyAdagrad") - var_ = convert(Tensor{Any}, var_) - accum_ = convert(Tensor{Any}, accum_) - lr_ = convert(Tensor{Any}, lr_) - grad_ = convert(Tensor{Any}, grad_) - (lr_, grad_) = tf.tf_promote(lr_, grad_) - tf.add_input(desc, var_) - tf.add_input(desc, accum_) - tf.add_input(desc, lr_) - tf.add_input(desc, grad_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - if update_slots !== nothing - desc["update_slots"] = Base.Bool(update_slots) - end + function resource_apply_adagrad_graph(var_, accum_, lr_, grad_; name=nothing, use_locking=nothing, update_slots=nothing) + local desc + tf.with_op_name(name, "ResourceApplyAdagrad") do + desc = tf.NodeDescription("ResourceApplyAdagrad") + var_ = convert(Tensor{Any}, var_) + accum_ = convert(Tensor{Any}, accum_) + lr_ = convert(Tensor{Any}, lr_) + grad_ = convert(Tensor{Any}, grad_) + (lr_, grad_) = tf.tf_promote(lr_, grad_) + tf.add_input(desc, var_) + tf.add_input(desc, accum_) + tf.add_input(desc, lr_) + tf.add_input(desc, grad_) + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + if update_slots !== nothing + desc["update_slots"] = Base.Bool(update_slots) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function resource_apply_adagrad_eager(var_, accum_, lr_, grad_; name=nothing, use_locking=nothing, update_slots=nothing) desc = tf.EagerOp("ResourceApplyAdagrad") var_ = convert(tf.EagerTensor, var_) @@ -58340,13 +58340,13 @@ begin return res[1] end end - function resource_apply_adagrad(var_, accum_, lr_, grad_; name=nothing, use_locking=nothing, update_slots=nothing) - if tf.in_eager_mode() - resource_apply_adagrad_eager(var_, accum_, lr_, grad_; name=name, use_locking=use_locking, update_slots=update_slots) - else - resource_apply_adagrad_graph(var_, accum_, lr_, grad_; name=name, use_locking=use_locking, update_slots=update_slots) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_apply_adagrad(var_, accum_, lr_, grad_; name=nothing, use_locking=nothing, update_slots=nothing) + if tf.in_eager_mode() + resource_apply_adagrad_eager(var_, accum_, lr_, grad_; name=name, use_locking=use_locking, update_slots=update_slots) + else + resource_apply_adagrad_graph(var_, accum_, lr_, grad_; name=name, use_locking=use_locking, update_slots=update_slots) + end end - end end @@ -58356,39 +58356,39 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function experimental_parallel_interleave_dataset_graph(input_dataset_, other_arguments_, cycle_length_, block_length_, sloppy_, buffer_output_elements_, prefetch_input_elements_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) - local desc - tf.with_op_name(name, "ExperimentalParallelInterleaveDataset") do - desc = tf.NodeDescription("ExperimentalParallelInterleaveDataset") - input_dataset_ = convert(Tensor{Any}, input_dataset_) - other_arguments_ = [convert(Tensor{Any}, x) for x = other_arguments_] - cycle_length_ = convert(Tensor{Int64}, cycle_length_) - block_length_ = convert(Tensor{Int64}, block_length_) - sloppy_ = convert(Tensor{Bool}, sloppy_) - buffer_output_elements_ = convert(Tensor{Int64}, buffer_output_elements_) - prefetch_input_elements_ = convert(Tensor{Int64}, prefetch_input_elements_) - tf.add_input(desc, input_dataset_) - tf.add_input(desc, other_arguments_) - tf.add_input(desc, cycle_length_) - tf.add_input(desc, block_length_) - tf.add_input(desc, sloppy_) - tf.add_input(desc, buffer_output_elements_) - tf.add_input(desc, prefetch_input_elements_) - if f !== nothing - desc["f"] = Base.identity(f) - end - if Targuments !== nothing - desc["Targuments"] = map(Base.identity, Targuments) - end - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - end - tf.Tensor(tf.Operation(desc)) + function experimental_parallel_interleave_dataset_graph(input_dataset_, other_arguments_, cycle_length_, block_length_, sloppy_, buffer_output_elements_, prefetch_input_elements_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "ExperimentalParallelInterleaveDataset") do + desc = tf.NodeDescription("ExperimentalParallelInterleaveDataset") + input_dataset_ = convert(Tensor{Any}, input_dataset_) + other_arguments_ = [convert(Tensor{Any}, x) for x = other_arguments_] + cycle_length_ = convert(Tensor{Int64}, cycle_length_) + block_length_ = convert(Tensor{Int64}, block_length_) + sloppy_ = convert(Tensor{Bool}, sloppy_) + buffer_output_elements_ = convert(Tensor{Int64}, buffer_output_elements_) + prefetch_input_elements_ = convert(Tensor{Int64}, prefetch_input_elements_) + tf.add_input(desc, input_dataset_) + tf.add_input(desc, other_arguments_) + tf.add_input(desc, cycle_length_) + tf.add_input(desc, block_length_) + tf.add_input(desc, sloppy_) + tf.add_input(desc, buffer_output_elements_) + tf.add_input(desc, prefetch_input_elements_) + if f !== nothing + desc["f"] = Base.identity(f) + end + if Targuments !== nothing + desc["Targuments"] = map(Base.identity, Targuments) + end + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end end + tf.Tensor(tf.Operation(desc)) + end function experimental_parallel_interleave_dataset_eager(input_dataset_, other_arguments_, cycle_length_, block_length_, sloppy_, buffer_output_elements_, prefetch_input_elements_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("ExperimentalParallelInterleaveDataset") input_dataset_ = convert(tf.EagerTensor, input_dataset_) @@ -58424,13 +58424,13 @@ begin return res[1] end end - function experimental_parallel_interleave_dataset(input_dataset_, other_arguments_, cycle_length_, block_length_, sloppy_, buffer_output_elements_, prefetch_input_elements_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) - if tf.in_eager_mode() - experimental_parallel_interleave_dataset_eager(input_dataset_, other_arguments_, cycle_length_, block_length_, sloppy_, buffer_output_elements_, prefetch_input_elements_; name=name, f=f, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes) - else - experimental_parallel_interleave_dataset_graph(input_dataset_, other_arguments_, cycle_length_, block_length_, sloppy_, buffer_output_elements_, prefetch_input_elements_; name=name, f=f, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_parallel_interleave_dataset(input_dataset_, other_arguments_, cycle_length_, block_length_, sloppy_, buffer_output_elements_, prefetch_input_elements_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) + if tf.in_eager_mode() + experimental_parallel_interleave_dataset_eager(input_dataset_, other_arguments_, cycle_length_, block_length_, sloppy_, buffer_output_elements_, prefetch_input_elements_; name=name, f=f, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes) + else + experimental_parallel_interleave_dataset_graph(input_dataset_, other_arguments_, cycle_length_, block_length_, sloppy_, buffer_output_elements_, prefetch_input_elements_; name=name, f=f, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes) + end end - end end @@ -58440,21 +58440,21 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function resize_bicubic_grad_graph(grads_, original_image_; name=nothing, align_corners=nothing) - local desc - tf.with_op_name(name, "ResizeBicubicGrad") do - desc = tf.NodeDescription("ResizeBicubicGrad") - grads_ = convert(Tensor{Float32}, grads_) - original_image_ = convert(Tensor{Any}, original_image_) - (original_image_,) = tf.tf_promote(original_image_) - tf.add_input(desc, grads_) - tf.add_input(desc, original_image_) - if align_corners !== nothing - desc["align_corners"] = Base.Bool(align_corners) - end + function resize_bicubic_grad_graph(grads_, original_image_; name=nothing, align_corners=nothing) + local desc + tf.with_op_name(name, "ResizeBicubicGrad") do + desc = tf.NodeDescription("ResizeBicubicGrad") + grads_ = convert(Tensor{Float32}, grads_) + original_image_ = convert(Tensor{Any}, original_image_) + (original_image_,) = tf.tf_promote(original_image_) + tf.add_input(desc, grads_) + tf.add_input(desc, original_image_) + if align_corners !== nothing + desc["align_corners"] = Base.Bool(align_corners) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function resize_bicubic_grad_eager(grads_, original_image_; name=nothing, align_corners=nothing) desc = tf.EagerOp("ResizeBicubicGrad") grads_ = convert(tf.EagerTensor, grads_) @@ -58472,13 +58472,13 @@ begin return res[1] end end - function resize_bicubic_grad(grads_, original_image_; name=nothing, align_corners=nothing) - if tf.in_eager_mode() - resize_bicubic_grad_eager(grads_, original_image_; name=name, align_corners=align_corners) - else - resize_bicubic_grad_graph(grads_, original_image_; name=name, align_corners=align_corners) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resize_bicubic_grad(grads_, original_image_; name=nothing, align_corners=nothing) + if tf.in_eager_mode() + resize_bicubic_grad_eager(grads_, original_image_; name=name, align_corners=align_corners) + else + resize_bicubic_grad_graph(grads_, original_image_; name=name, align_corners=align_corners) + end end - end end @@ -58488,16 +58488,16 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function batch_self_adjoint_eig_graph(input_; name=nothing) - local desc - tf.with_op_name(name, "BatchSelfAdjointEig") do - desc = tf.NodeDescription("BatchSelfAdjointEig") - input_ = convert(Tensor{Any}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - end - tf.Tensor(tf.Operation(desc)) + function batch_self_adjoint_eig_graph(input_; name=nothing) + local desc + tf.with_op_name(name, "BatchSelfAdjointEig") do + desc = tf.NodeDescription("BatchSelfAdjointEig") + input_ = convert(Tensor{Any}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) end + tf.Tensor(tf.Operation(desc)) + end function batch_self_adjoint_eig_eager(input_; name=nothing) desc = tf.EagerOp("BatchSelfAdjointEig") input_ = convert(tf.EagerTensor, input_) @@ -58510,13 +58510,13 @@ begin return res[1] end end - function batch_self_adjoint_eig(input_; name=nothing) - if tf.in_eager_mode() - batch_self_adjoint_eig_eager(input_; name=name) - else - batch_self_adjoint_eig_graph(input_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function batch_self_adjoint_eig(input_; name=nothing) + if tf.in_eager_mode() + batch_self_adjoint_eig_eager(input_; name=name) + else + batch_self_adjoint_eig_graph(input_; name=name) + end end - end end @@ -58526,20 +58526,20 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function sparse_softmax_graph(sp_indices_, sp_values_, sp_shape_; name=nothing) - local desc - tf.with_op_name(name, "SparseSoftmax") do - desc = tf.NodeDescription("SparseSoftmax") - sp_indices_ = convert(Tensor{Int64}, sp_indices_) - sp_values_ = convert(Tensor{Any}, sp_values_) - sp_shape_ = convert(Tensor{Int64}, sp_shape_) - (sp_values_,) = tf.tf_promote(sp_values_) - tf.add_input(desc, sp_indices_) - tf.add_input(desc, sp_values_) - tf.add_input(desc, sp_shape_) - end - tf.Tensor(tf.Operation(desc)) + function sparse_softmax_graph(sp_indices_, sp_values_, sp_shape_; name=nothing) + local desc + tf.with_op_name(name, "SparseSoftmax") do + desc = tf.NodeDescription("SparseSoftmax") + sp_indices_ = convert(Tensor{Int64}, sp_indices_) + sp_values_ = convert(Tensor{Any}, sp_values_) + sp_shape_ = convert(Tensor{Int64}, sp_shape_) + (sp_values_,) = tf.tf_promote(sp_values_) + tf.add_input(desc, sp_indices_) + tf.add_input(desc, sp_values_) + tf.add_input(desc, sp_shape_) end + tf.Tensor(tf.Operation(desc)) + end function sparse_softmax_eager(sp_indices_, sp_values_, sp_shape_; name=nothing) desc = tf.EagerOp("SparseSoftmax") sp_indices_ = convert(tf.EagerTensor, sp_indices_) @@ -58556,13 +58556,13 @@ begin return res[1] end end - function sparse_softmax(sp_indices_, sp_values_, sp_shape_; name=nothing) - if tf.in_eager_mode() - sparse_softmax_eager(sp_indices_, sp_values_, sp_shape_; name=name) - else - sparse_softmax_graph(sp_indices_, sp_values_, sp_shape_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_softmax(sp_indices_, sp_values_, sp_shape_; name=nothing) + if tf.in_eager_mode() + sparse_softmax_eager(sp_indices_, sp_values_, sp_shape_; name=name) + else + sparse_softmax_graph(sp_indices_, sp_values_, sp_shape_; name=name) + end end - end end @@ -58572,16 +58572,16 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function asinh_graph(x_; name=nothing) - local desc - tf.with_op_name(name, "Asinh") do - desc = tf.NodeDescription("Asinh") - x_ = convert(Tensor{Any}, x_) - (x_,) = tf.tf_promote(x_) - tf.add_input(desc, x_) - end - tf.Tensor(tf.Operation(desc)) + function asinh_graph(x_; name=nothing) + local desc + tf.with_op_name(name, "Asinh") do + desc = tf.NodeDescription("Asinh") + x_ = convert(Tensor{Any}, x_) + (x_,) = tf.tf_promote(x_) + tf.add_input(desc, x_) end + tf.Tensor(tf.Operation(desc)) + end function asinh_eager(x_; name=nothing) desc = tf.EagerOp("Asinh") x_ = convert(tf.EagerTensor, x_) @@ -58594,13 +58594,13 @@ begin return res[1] end end - function asinh(x_; name=nothing) - if tf.in_eager_mode() - asinh_eager(x_; name=name) - else - asinh_graph(x_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function asinh(x_; name=nothing) + if tf.in_eager_mode() + asinh_eager(x_; name=name) + else + asinh_graph(x_; name=name) + end end - end end @@ -58610,19 +58610,19 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function matrix_inverse_graph(input_; name=nothing, adjoint=nothing) - local desc - tf.with_op_name(name, "MatrixInverse") do - desc = tf.NodeDescription("MatrixInverse") - input_ = convert(Tensor{Any}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - if adjoint !== nothing - desc["adjoint"] = Base.Bool(adjoint) - end + function matrix_inverse_graph(input_; name=nothing, adjoint=nothing) + local desc + tf.with_op_name(name, "MatrixInverse") do + desc = tf.NodeDescription("MatrixInverse") + input_ = convert(Tensor{Any}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + if adjoint !== nothing + desc["adjoint"] = Base.Bool(adjoint) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function matrix_inverse_eager(input_; name=nothing, adjoint=nothing) desc = tf.EagerOp("MatrixInverse") input_ = convert(tf.EagerTensor, input_) @@ -58638,13 +58638,13 @@ begin return res[1] end end - function matrix_inverse(input_; name=nothing, adjoint=nothing) - if tf.in_eager_mode() - matrix_inverse_eager(input_; name=name, adjoint=adjoint) - else - matrix_inverse_graph(input_; name=name, adjoint=adjoint) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function matrix_inverse(input_; name=nothing, adjoint=nothing) + if tf.in_eager_mode() + matrix_inverse_eager(input_; name=name, adjoint=adjoint) + else + matrix_inverse_graph(input_; name=name, adjoint=adjoint) + end end - end end @@ -58654,20 +58654,20 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function tensor_list_concat_lists_graph(input_a_, input_b_; name=nothing, element_dtype=nothing) - local desc - tf.with_op_name(name, "TensorListConcatLists") do - desc = tf.NodeDescription("TensorListConcatLists") - input_a_ = convert(Tensor{Any}, input_a_) - input_b_ = convert(Tensor{Any}, input_b_) - tf.add_input(desc, input_a_) - tf.add_input(desc, input_b_) - if element_dtype !== nothing - desc["element_dtype"] = Base.identity(element_dtype) - end + function tensor_list_concat_lists_graph(input_a_, input_b_; name=nothing, element_dtype=nothing) + local desc + tf.with_op_name(name, "TensorListConcatLists") do + desc = tf.NodeDescription("TensorListConcatLists") + input_a_ = convert(Tensor{Any}, input_a_) + input_b_ = convert(Tensor{Any}, input_b_) + tf.add_input(desc, input_a_) + tf.add_input(desc, input_b_) + if element_dtype !== nothing + desc["element_dtype"] = Base.identity(element_dtype) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function tensor_list_concat_lists_eager(input_a_, input_b_; name=nothing, element_dtype=nothing) desc = tf.EagerOp("TensorListConcatLists") input_a_ = convert(tf.EagerTensor, input_a_) @@ -58684,13 +58684,13 @@ begin return res[1] end end - function tensor_list_concat_lists(input_a_, input_b_; name=nothing, element_dtype=nothing) - if tf.in_eager_mode() - tensor_list_concat_lists_eager(input_a_, input_b_; name=name, element_dtype=element_dtype) - else - tensor_list_concat_lists_graph(input_a_, input_b_; name=name, element_dtype=element_dtype) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_list_concat_lists(input_a_, input_b_; name=nothing, element_dtype=nothing) + if tf.in_eager_mode() + tensor_list_concat_lists_eager(input_a_, input_b_; name=name, element_dtype=element_dtype) + else + tensor_list_concat_lists_graph(input_a_, input_b_; name=name, element_dtype=element_dtype) + end end - end end @@ -58700,32 +58700,32 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function requantize_graph(input_, input_min_, input_max_, requested_output_min_, requested_output_max_; name=nothing, out_type=nothing) - local desc - tf.with_op_name(name, "Requantize") do - desc = tf.NodeDescription("Requantize") - input_ = convert(Tensor{Any}, input_) - input_min_ = convert(Tensor{Float32}, input_min_) - input_max_ = convert(Tensor{Float32}, input_max_) - requested_output_min_ = convert(Tensor{Float32}, requested_output_min_) - requested_output_max_ = convert(Tensor{Float32}, requested_output_max_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - tf.add_input(desc, input_min_) - tf.add_input(desc, input_max_) - tf.add_input(desc, requested_output_min_) - tf.add_input(desc, requested_output_max_) - if out_type !== nothing - desc["out_type"] = Base.identity(out_type) - end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:3 - push!(out, tf.Tensor(op, out_idx)) + function requantize_graph(input_, input_min_, input_max_, requested_output_min_, requested_output_max_; name=nothing, out_type=nothing) + local desc + tf.with_op_name(name, "Requantize") do + desc = tf.NodeDescription("Requantize") + input_ = convert(Tensor{Any}, input_) + input_min_ = convert(Tensor{Float32}, input_min_) + input_max_ = convert(Tensor{Float32}, input_max_) + requested_output_min_ = convert(Tensor{Float32}, requested_output_min_) + requested_output_max_ = convert(Tensor{Float32}, requested_output_max_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + tf.add_input(desc, input_min_) + tf.add_input(desc, input_max_) + tf.add_input(desc, requested_output_min_) + tf.add_input(desc, requested_output_max_) + if out_type !== nothing + desc["out_type"] = Base.identity(out_type) end - out end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end function requantize_eager(input_, input_min_, input_max_, requested_output_min_, requested_output_max_; name=nothing, out_type=nothing) desc = tf.EagerOp("Requantize") input_ = convert(tf.EagerTensor, input_) @@ -58749,13 +58749,13 @@ begin return res end end - function requantize(input_, input_min_, input_max_, requested_output_min_, requested_output_max_; name=nothing, out_type=nothing) - if tf.in_eager_mode() - requantize_eager(input_, input_min_, input_max_, requested_output_min_, requested_output_max_; name=name, out_type=out_type) - else - requantize_graph(input_, input_min_, input_max_, requested_output_min_, requested_output_max_; name=name, out_type=out_type) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function requantize(input_, input_min_, input_max_, requested_output_min_, requested_output_max_; name=nothing, out_type=nothing) + if tf.in_eager_mode() + requantize_eager(input_, input_min_, input_max_, requested_output_min_, requested_output_max_; name=name, out_type=out_type) + else + requantize_graph(input_, input_min_, input_max_, requested_output_min_, requested_output_max_; name=name, out_type=out_type) + end end - end end @@ -58765,16 +58765,16 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function fft_graph(input_; name=nothing) - local desc - tf.with_op_name(name, "FFT") do - desc = tf.NodeDescription("FFT") - input_ = convert(Tensor{Complex{Float32}}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - end - tf.Tensor(tf.Operation(desc)) + function fft_graph(input_; name=nothing) + local desc + tf.with_op_name(name, "FFT") do + desc = tf.NodeDescription("FFT") + input_ = convert(Tensor{Complex{Float32}}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) end + tf.Tensor(tf.Operation(desc)) + end function fft_eager(input_; name=nothing) desc = tf.EagerOp("FFT") input_ = convert(tf.EagerTensor, input_) @@ -58787,13 +58787,13 @@ begin return res[1] end end - function fft(input_; name=nothing) - if tf.in_eager_mode() - fft_eager(input_; name=name) - else - fft_graph(input_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function fft(input_; name=nothing) + if tf.in_eager_mode() + fft_eager(input_; name=name) + else + fft_graph(input_; name=name) + end end - end end @@ -58803,19 +58803,19 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function conjugate_transpose_graph(x_, perm_; name=nothing) - local desc - tf.with_op_name(name, "ConjugateTranspose") do - desc = tf.NodeDescription("ConjugateTranspose") - x_ = convert(Tensor{Any}, x_) - perm_ = convert(Tensor{Int32}, perm_) - (perm_,) = tf.tf_promote(perm_) - (x_,) = tf.tf_promote(x_) - tf.add_input(desc, x_) - tf.add_input(desc, perm_) - end - tf.Tensor(tf.Operation(desc)) + function conjugate_transpose_graph(x_, perm_; name=nothing) + local desc + tf.with_op_name(name, "ConjugateTranspose") do + desc = tf.NodeDescription("ConjugateTranspose") + x_ = convert(Tensor{Any}, x_) + perm_ = convert(Tensor{Int32}, perm_) + (perm_,) = tf.tf_promote(perm_) + (x_,) = tf.tf_promote(x_) + tf.add_input(desc, x_) + tf.add_input(desc, perm_) end + tf.Tensor(tf.Operation(desc)) + end function conjugate_transpose_eager(x_, perm_; name=nothing) desc = tf.EagerOp("ConjugateTranspose") x_ = convert(tf.EagerTensor, x_) @@ -58831,13 +58831,13 @@ begin return res[1] end end - function conjugate_transpose(x_, perm_; name=nothing) - if tf.in_eager_mode() - conjugate_transpose_eager(x_, perm_; name=name) - else - conjugate_transpose_graph(x_, perm_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function conjugate_transpose(x_, perm_; name=nothing) + if tf.in_eager_mode() + conjugate_transpose_eager(x_, perm_; name=name) + else + conjugate_transpose_graph(x_, perm_; name=name) + end end - end end @@ -58847,28 +58847,28 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function unstage_graph(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) - local desc - tf.with_op_name(name, "Unstage") do - desc = tf.NodeDescription("Unstage") - if capacity !== nothing - desc["capacity"] = Base.Int(capacity) - end - if memory_limit !== nothing - desc["memory_limit"] = Base.Int(memory_limit) - end - if dtypes !== nothing - desc["dtypes"] = map(Base.identity, dtypes) - end - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end + function unstage_graph(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "Unstage") do + desc = tf.NodeDescription("Unstage") + if capacity !== nothing + desc["capacity"] = Base.Int(capacity) + end + if memory_limit !== nothing + desc["memory_limit"] = Base.Int(memory_limit) + end + if dtypes !== nothing + desc["dtypes"] = map(Base.identity, dtypes) + end + if container !== nothing + desc["container"] = Base.String(container) + end + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function unstage_eager(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) desc = tf.EagerOp("Unstage") if capacity !== nothing @@ -58893,13 +58893,13 @@ begin return res[1] end end - function unstage(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) - if tf.in_eager_mode() - unstage_eager(; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) - else - unstage_graph(; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function unstage(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + if tf.in_eager_mode() + unstage_eager(; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) + else + unstage_graph(; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) + end end - end end @@ -58909,18 +58909,18 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function relu6grad_graph(gradients_, features_; name=nothing) - local desc - tf.with_op_name(name, "Relu6Grad") do - desc = tf.NodeDescription("Relu6Grad") - gradients_ = convert(Tensor{Any}, gradients_) - features_ = convert(Tensor{Any}, features_) - (gradients_, features_) = tf.tf_promote(gradients_, features_) - tf.add_input(desc, gradients_) - tf.add_input(desc, features_) - end - tf.Tensor(tf.Operation(desc)) + function relu6grad_graph(gradients_, features_; name=nothing) + local desc + tf.with_op_name(name, "Relu6Grad") do + desc = tf.NodeDescription("Relu6Grad") + gradients_ = convert(Tensor{Any}, gradients_) + features_ = convert(Tensor{Any}, features_) + (gradients_, features_) = tf.tf_promote(gradients_, features_) + tf.add_input(desc, gradients_) + tf.add_input(desc, features_) end + tf.Tensor(tf.Operation(desc)) + end function relu6grad_eager(gradients_, features_; name=nothing) desc = tf.EagerOp("Relu6Grad") gradients_ = convert(tf.EagerTensor, gradients_) @@ -58936,13 +58936,13 @@ begin return res[1] end end - function relu6grad(gradients_, features_; name=nothing) - if tf.in_eager_mode() - relu6grad_eager(gradients_, features_; name=name) - else - relu6grad_graph(gradients_, features_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function relu6grad(gradients_, features_; name=nothing) + if tf.in_eager_mode() + relu6grad_eager(gradients_, features_; name=name) + else + relu6grad_graph(gradients_, features_; name=name) + end end - end end @@ -58952,22 +58952,22 @@ end Converts an array of tensors to a list of tensors. """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function _array_to_list_graph(input_; name=nothing, N=nothing, out_types=nothing) - local desc - tf.with_op_name(name, "_ArrayToList") do - desc = tf.NodeDescription("_ArrayToList") - input_ = [convert(Tensor{Any}, x) for x = input_] - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - if N !== nothing - desc["N"] = Base.Int(N) - end - if out_types !== nothing - desc["out_types"] = map(Base.identity, out_types) - end + function _array_to_list_graph(input_; name=nothing, N=nothing, out_types=nothing) + local desc + tf.with_op_name(name, "_ArrayToList") do + desc = tf.NodeDescription("_ArrayToList") + input_ = [convert(Tensor{Any}, x) for x = input_] + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + if N !== nothing + desc["N"] = Base.Int(N) + end + if out_types !== nothing + desc["out_types"] = map(Base.identity, out_types) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function _array_to_list_eager(input_; name=nothing, N=nothing, out_types=nothing) desc = tf.EagerOp("_ArrayToList") input_ = convert(tf.EagerTensor, input_) @@ -58986,13 +58986,13 @@ begin return res[1] end end - function _array_to_list(input_; name=nothing, N=nothing, out_types=nothing) - if tf.in_eager_mode() - _array_to_list_eager(input_; name=name, N=N, out_types=out_types) - else - _array_to_list_graph(input_; name=name, N=N, out_types=out_types) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _array_to_list(input_; name=nothing, N=nothing, out_types=nothing) + if tf.in_eager_mode() + _array_to_list_eager(input_; name=name, N=N, out_types=out_types) + else + _array_to_list_graph(input_; name=name, N=N, out_types=out_types) + end end - end end @@ -59002,20 +59002,20 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function expand_dims_graph(input_, dim_; name=nothing) - local desc - tf.with_op_name(name, "ExpandDims") do - desc = tf.NodeDescription("ExpandDims") - input_ = convert(Tensor{Any}, input_) - dim_ = convert(Tensor{Int32}, dim_) - dim_ = dim_ - convert(tf.Tensor{eltype(dim_)}, 1) - (input_,) = tf.tf_promote(input_) - (dim_,) = tf.tf_promote(dim_) - tf.add_input(desc, input_) - tf.add_input(desc, dim_) - end - tf.Tensor(tf.Operation(desc)) + function expand_dims_graph(input_, dim_; name=nothing) + local desc + tf.with_op_name(name, "ExpandDims") do + desc = tf.NodeDescription("ExpandDims") + input_ = convert(Tensor{Any}, input_) + dim_ = convert(Tensor{Int32}, dim_) + dim_ = dim_ - convert(tf.Tensor{eltype(dim_)}, 1) + (input_,) = tf.tf_promote(input_) + (dim_,) = tf.tf_promote(dim_) + tf.add_input(desc, input_) + tf.add_input(desc, dim_) end + tf.Tensor(tf.Operation(desc)) + end function expand_dims_eager(input_, dim_; name=nothing) desc = tf.EagerOp("ExpandDims") input_ = convert(tf.EagerTensor, input_) @@ -59031,13 +59031,13 @@ begin return res[1] end end - function expand_dims(input_, dim_; name=nothing) - if tf.in_eager_mode() - expand_dims_eager(input_, dim_; name=name) - else - expand_dims_graph(input_, dim_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function expand_dims(input_, dim_; name=nothing) + if tf.in_eager_mode() + expand_dims_eager(input_, dim_; name=name) + else + expand_dims_graph(input_, dim_; name=name) + end end - end end @@ -59047,18 +59047,18 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function inv_grad_graph(y_, dy_; name=nothing) - local desc - tf.with_op_name(name, "InvGrad") do - desc = tf.NodeDescription("InvGrad") - y_ = convert(Tensor{Any}, y_) - dy_ = convert(Tensor{Any}, dy_) - (y_, dy_) = tf.tf_promote(y_, dy_) - tf.add_input(desc, y_) - tf.add_input(desc, dy_) - end - tf.Tensor(tf.Operation(desc)) + function inv_grad_graph(y_, dy_; name=nothing) + local desc + tf.with_op_name(name, "InvGrad") do + desc = tf.NodeDescription("InvGrad") + y_ = convert(Tensor{Any}, y_) + dy_ = convert(Tensor{Any}, dy_) + (y_, dy_) = tf.tf_promote(y_, dy_) + tf.add_input(desc, y_) + tf.add_input(desc, dy_) end + tf.Tensor(tf.Operation(desc)) + end function inv_grad_eager(y_, dy_; name=nothing) desc = tf.EagerOp("InvGrad") y_ = convert(tf.EagerTensor, y_) @@ -59074,13 +59074,13 @@ begin return res[1] end end - function inv_grad(y_, dy_; name=nothing) - if tf.in_eager_mode() - inv_grad_eager(y_, dy_; name=name) - else - inv_grad_graph(y_, dy_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function inv_grad(y_, dy_; name=nothing) + if tf.in_eager_mode() + inv_grad_eager(y_, dy_; name=name) + else + inv_grad_graph(y_, dy_; name=name) + end end - end end @@ -59090,22 +59090,22 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function non_max_suppression_graph(boxes_, scores_, max_output_size_; name=nothing, iou_threshold=nothing) - local desc - tf.with_op_name(name, "NonMaxSuppression") do - desc = tf.NodeDescription("NonMaxSuppression") - boxes_ = convert(Tensor{Float32}, boxes_) - scores_ = convert(Tensor{Float32}, scores_) - max_output_size_ = convert(Tensor{Int32}, max_output_size_) - tf.add_input(desc, boxes_) - tf.add_input(desc, scores_) - tf.add_input(desc, max_output_size_) - if iou_threshold !== nothing - desc["iou_threshold"] = Base.identity(iou_threshold) - end + function non_max_suppression_graph(boxes_, scores_, max_output_size_; name=nothing, iou_threshold=nothing) + local desc + tf.with_op_name(name, "NonMaxSuppression") do + desc = tf.NodeDescription("NonMaxSuppression") + boxes_ = convert(Tensor{Float32}, boxes_) + scores_ = convert(Tensor{Float32}, scores_) + max_output_size_ = convert(Tensor{Int32}, max_output_size_) + tf.add_input(desc, boxes_) + tf.add_input(desc, scores_) + tf.add_input(desc, max_output_size_) + if iou_threshold !== nothing + desc["iou_threshold"] = Base.identity(iou_threshold) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function non_max_suppression_eager(boxes_, scores_, max_output_size_; name=nothing, iou_threshold=nothing) desc = tf.EagerOp("NonMaxSuppression") boxes_ = convert(tf.EagerTensor, boxes_) @@ -59124,13 +59124,13 @@ begin return res[1] end end - function non_max_suppression(boxes_, scores_, max_output_size_; name=nothing, iou_threshold=nothing) - if tf.in_eager_mode() - non_max_suppression_eager(boxes_, scores_, max_output_size_; name=name, iou_threshold=iou_threshold) - else - non_max_suppression_graph(boxes_, scores_, max_output_size_; name=name, iou_threshold=iou_threshold) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function non_max_suppression(boxes_, scores_, max_output_size_; name=nothing, iou_threshold=nothing) + if tf.in_eager_mode() + non_max_suppression_eager(boxes_, scores_, max_output_size_; name=name, iou_threshold=iou_threshold) + else + non_max_suppression_graph(boxes_, scores_, max_output_size_; name=name, iou_threshold=iou_threshold) + end end - end end @@ -59140,16 +59140,16 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function l2loss_graph(t_; name=nothing) - local desc - tf.with_op_name(name, "L2Loss") do - desc = tf.NodeDescription("L2Loss") - t_ = convert(Tensor{Any}, t_) - (t_,) = tf.tf_promote(t_) - tf.add_input(desc, t_) - end - tf.Tensor(tf.Operation(desc)) + function l2loss_graph(t_; name=nothing) + local desc + tf.with_op_name(name, "L2Loss") do + desc = tf.NodeDescription("L2Loss") + t_ = convert(Tensor{Any}, t_) + (t_,) = tf.tf_promote(t_) + tf.add_input(desc, t_) end + tf.Tensor(tf.Operation(desc)) + end function l2loss_eager(t_; name=nothing) desc = tf.EagerOp("L2Loss") t_ = convert(tf.EagerTensor, t_) @@ -59162,13 +59162,13 @@ begin return res[1] end end - function l2loss(t_; name=nothing) - if tf.in_eager_mode() - l2loss_eager(t_; name=name) - else - l2loss_graph(t_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function l2loss(t_; name=nothing) + if tf.in_eager_mode() + l2loss_eager(t_; name=name) + else + l2loss_graph(t_; name=name) + end end - end end @@ -59178,21 +59178,21 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function resize_area_graph(images_, size_; name=nothing, align_corners=nothing) - local desc - tf.with_op_name(name, "ResizeArea") do - desc = tf.NodeDescription("ResizeArea") - images_ = convert(Tensor{Any}, images_) - size_ = convert(Tensor{Int32}, size_) - (images_,) = tf.tf_promote(images_) - tf.add_input(desc, images_) - tf.add_input(desc, size_) - if align_corners !== nothing - desc["align_corners"] = Base.Bool(align_corners) - end + function resize_area_graph(images_, size_; name=nothing, align_corners=nothing) + local desc + tf.with_op_name(name, "ResizeArea") do + desc = tf.NodeDescription("ResizeArea") + images_ = convert(Tensor{Any}, images_) + size_ = convert(Tensor{Int32}, size_) + (images_,) = tf.tf_promote(images_) + tf.add_input(desc, images_) + tf.add_input(desc, size_) + if align_corners !== nothing + desc["align_corners"] = Base.Bool(align_corners) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function resize_area_eager(images_, size_; name=nothing, align_corners=nothing) desc = tf.EagerOp("ResizeArea") images_ = convert(tf.EagerTensor, images_) @@ -59210,13 +59210,13 @@ begin return res[1] end end - function resize_area(images_, size_; name=nothing, align_corners=nothing) - if tf.in_eager_mode() - resize_area_eager(images_, size_; name=name, align_corners=align_corners) - else - resize_area_graph(images_, size_; name=name, align_corners=align_corners) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resize_area(images_, size_; name=nothing, align_corners=nothing) + if tf.in_eager_mode() + resize_area_eager(images_, size_; name=name, align_corners=align_corners) + else + resize_area_graph(images_, size_; name=name, align_corners=align_corners) + end end - end end @@ -59226,50 +59226,50 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function sparse_cross_graph(indices_, values_, shapes_, dense_inputs_; name=nothing, N=nothing, hashed_output=nothing, num_buckets=nothing, hash_key=nothing, sparse_types=nothing, dense_types=nothing, out_type=nothing, internal_type=nothing) - local desc - tf.with_op_name(name, "SparseCross") do - desc = tf.NodeDescription("SparseCross") - indices_ = [convert(Tensor{Int64}, x) for x = indices_] - values_ = [convert(Tensor{Any}, x) for x = values_] - shapes_ = [convert(Tensor{Int64}, x) for x = shapes_] - dense_inputs_ = [convert(Tensor{Any}, x) for x = dense_inputs_] - tf.add_input(desc, indices_) - tf.add_input(desc, values_) - tf.add_input(desc, shapes_) - tf.add_input(desc, dense_inputs_) - if N !== nothing - desc["N"] = Base.Int(N) - end - if hashed_output !== nothing - desc["hashed_output"] = Base.Bool(hashed_output) - end - if num_buckets !== nothing - desc["num_buckets"] = Base.Int(num_buckets) - end - if hash_key !== nothing - desc["hash_key"] = Base.Int(hash_key) - end - if sparse_types !== nothing - desc["sparse_types"] = map(Base.identity, sparse_types) - end - if dense_types !== nothing - desc["dense_types"] = map(Base.identity, dense_types) - end - if out_type !== nothing - desc["out_type"] = Base.identity(out_type) - end - if internal_type !== nothing - desc["internal_type"] = Base.identity(internal_type) - end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:3 - push!(out, tf.Tensor(op, out_idx)) - end - out + function sparse_cross_graph(indices_, values_, shapes_, dense_inputs_; name=nothing, N=nothing, hashed_output=nothing, num_buckets=nothing, hash_key=nothing, sparse_types=nothing, dense_types=nothing, out_type=nothing, internal_type=nothing) + local desc + tf.with_op_name(name, "SparseCross") do + desc = tf.NodeDescription("SparseCross") + indices_ = [convert(Tensor{Int64}, x) for x = indices_] + values_ = [convert(Tensor{Any}, x) for x = values_] + shapes_ = [convert(Tensor{Int64}, x) for x = shapes_] + dense_inputs_ = [convert(Tensor{Any}, x) for x = dense_inputs_] + tf.add_input(desc, indices_) + tf.add_input(desc, values_) + tf.add_input(desc, shapes_) + tf.add_input(desc, dense_inputs_) + if N !== nothing + desc["N"] = Base.Int(N) + end + if hashed_output !== nothing + desc["hashed_output"] = Base.Bool(hashed_output) + end + if num_buckets !== nothing + desc["num_buckets"] = Base.Int(num_buckets) + end + if hash_key !== nothing + desc["hash_key"] = Base.Int(hash_key) + end + if sparse_types !== nothing + desc["sparse_types"] = map(Base.identity, sparse_types) + end + if dense_types !== nothing + desc["dense_types"] = map(Base.identity, dense_types) + end + if out_type !== nothing + desc["out_type"] = Base.identity(out_type) + end + if internal_type !== nothing + desc["internal_type"] = Base.identity(internal_type) + end end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end function sparse_cross_eager(indices_, values_, shapes_, dense_inputs_; name=nothing, N=nothing, hashed_output=nothing, num_buckets=nothing, hash_key=nothing, sparse_types=nothing, dense_types=nothing, out_type=nothing, internal_type=nothing) desc = tf.EagerOp("SparseCross") indices_ = convert(tf.EagerTensor, indices_) @@ -59311,13 +59311,13 @@ begin return res end end - function sparse_cross(indices_, values_, shapes_, dense_inputs_; name=nothing, N=nothing, hashed_output=nothing, num_buckets=nothing, hash_key=nothing, sparse_types=nothing, dense_types=nothing, out_type=nothing, internal_type=nothing) - if tf.in_eager_mode() - sparse_cross_eager(indices_, values_, shapes_, dense_inputs_; name=name, N=N, hashed_output=hashed_output, num_buckets=num_buckets, hash_key=hash_key, sparse_types=sparse_types, dense_types=dense_types, out_type=out_type, internal_type=internal_type) - else - sparse_cross_graph(indices_, values_, shapes_, dense_inputs_; name=name, N=N, hashed_output=hashed_output, num_buckets=num_buckets, hash_key=hash_key, sparse_types=sparse_types, dense_types=dense_types, out_type=out_type, internal_type=internal_type) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_cross(indices_, values_, shapes_, dense_inputs_; name=nothing, N=nothing, hashed_output=nothing, num_buckets=nothing, hash_key=nothing, sparse_types=nothing, dense_types=nothing, out_type=nothing, internal_type=nothing) + if tf.in_eager_mode() + sparse_cross_eager(indices_, values_, shapes_, dense_inputs_; name=name, N=N, hashed_output=hashed_output, num_buckets=num_buckets, hash_key=hash_key, sparse_types=sparse_types, dense_types=dense_types, out_type=out_type, internal_type=internal_type) + else + sparse_cross_graph(indices_, values_, shapes_, dense_inputs_; name=name, N=N, hashed_output=hashed_output, num_buckets=num_buckets, hash_key=hash_key, sparse_types=sparse_types, dense_types=dense_types, out_type=out_type, internal_type=internal_type) + end end - end end @@ -59327,15 +59327,15 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function batch_fft3d_graph(input_; name=nothing) - local desc - tf.with_op_name(name, "BatchFFT3D") do - desc = tf.NodeDescription("BatchFFT3D") - input_ = convert(Tensor{Complex{Float32}}, input_) - tf.add_input(desc, input_) - end - tf.Tensor(tf.Operation(desc)) + function batch_fft3d_graph(input_; name=nothing) + local desc + tf.with_op_name(name, "BatchFFT3D") do + desc = tf.NodeDescription("BatchFFT3D") + input_ = convert(Tensor{Complex{Float32}}, input_) + tf.add_input(desc, input_) end + tf.Tensor(tf.Operation(desc)) + end function batch_fft3d_eager(input_; name=nothing) desc = tf.EagerOp("BatchFFT3D") input_ = convert(tf.EagerTensor, input_) @@ -59347,13 +59347,13 @@ begin return res[1] end end - function batch_fft3d(input_; name=nothing) - if tf.in_eager_mode() - batch_fft3d_eager(input_; name=name) - else - batch_fft3d_graph(input_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function batch_fft3d(input_; name=nothing) + if tf.in_eager_mode() + batch_fft3d_eager(input_; name=name) + else + batch_fft3d_graph(input_; name=name) + end end - end end @@ -59363,25 +59363,25 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function random_standard_normal_graph(shape_; name=nothing, seed=nothing, seed2=nothing, dtype=nothing) - local desc - tf.with_op_name(name, "RandomStandardNormal") do - desc = tf.NodeDescription("RandomStandardNormal") - shape_ = convert(Tensor{Any}, shape_) - (shape_,) = tf.tf_promote(shape_) - tf.add_input(desc, shape_) - if seed !== nothing - desc["seed"] = Base.Int(seed) - end - if seed2 !== nothing - desc["seed2"] = Base.Int(seed2) - end - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end + function random_standard_normal_graph(shape_; name=nothing, seed=nothing, seed2=nothing, dtype=nothing) + local desc + tf.with_op_name(name, "RandomStandardNormal") do + desc = tf.NodeDescription("RandomStandardNormal") + shape_ = convert(Tensor{Any}, shape_) + (shape_,) = tf.tf_promote(shape_) + tf.add_input(desc, shape_) + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function random_standard_normal_eager(shape_; name=nothing, seed=nothing, seed2=nothing, dtype=nothing) desc = tf.EagerOp("RandomStandardNormal") shape_ = convert(tf.EagerTensor, shape_) @@ -59403,13 +59403,13 @@ begin return res[1] end end - function random_standard_normal(shape_; name=nothing, seed=nothing, seed2=nothing, dtype=nothing) - if tf.in_eager_mode() - random_standard_normal_eager(shape_; name=name, seed=seed, seed2=seed2, dtype=dtype) - else - random_standard_normal_graph(shape_; name=name, seed=seed, seed2=seed2, dtype=dtype) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function random_standard_normal(shape_; name=nothing, seed=nothing, seed2=nothing, dtype=nothing) + if tf.in_eager_mode() + random_standard_normal_eager(shape_; name=name, seed=seed, seed2=seed2, dtype=dtype) + else + random_standard_normal_graph(shape_; name=name, seed=seed, seed2=seed2, dtype=dtype) + end end - end end @@ -59419,25 +59419,25 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function resource_scatter_mul_graph(resource_, indices_, updates_; name=nothing, dtype=nothing) - local desc - tf.with_op_name(name, "ResourceScatterMul") do - desc = tf.NodeDescription("ResourceScatterMul") - resource_ = convert(Tensor{Any}, resource_) - indices_ = convert(Tensor{Any}, indices_) - indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) - updates_ = convert(Tensor{Any}, updates_) - (updates_,) = tf.tf_promote(updates_) - (indices_,) = tf.tf_promote(indices_) - tf.add_input(desc, resource_) - tf.add_input(desc, indices_) - tf.add_input(desc, updates_) - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end + function resource_scatter_mul_graph(resource_, indices_, updates_; name=nothing, dtype=nothing) + local desc + tf.with_op_name(name, "ResourceScatterMul") do + desc = tf.NodeDescription("ResourceScatterMul") + resource_ = convert(Tensor{Any}, resource_) + indices_ = convert(Tensor{Any}, indices_) + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + updates_ = convert(Tensor{Any}, updates_) + (updates_,) = tf.tf_promote(updates_) + (indices_,) = tf.tf_promote(indices_) + tf.add_input(desc, resource_) + tf.add_input(desc, indices_) + tf.add_input(desc, updates_) + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function resource_scatter_mul_eager(resource_, indices_, updates_; name=nothing, dtype=nothing) desc = tf.EagerOp("ResourceScatterMul") resource_ = convert(tf.EagerTensor, resource_) @@ -59458,13 +59458,13 @@ begin return res[1] end end - function resource_scatter_mul(resource_, indices_, updates_; name=nothing, dtype=nothing) - if tf.in_eager_mode() - resource_scatter_mul_eager(resource_, indices_, updates_; name=name, dtype=dtype) - else - resource_scatter_mul_graph(resource_, indices_, updates_; name=name, dtype=dtype) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_scatter_mul(resource_, indices_, updates_; name=nothing, dtype=nothing) + if tf.in_eager_mode() + resource_scatter_mul_eager(resource_, indices_, updates_; name=name, dtype=dtype) + else + resource_scatter_mul_graph(resource_, indices_, updates_; name=name, dtype=dtype) + end end - end end @@ -59474,65 +59474,65 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function sdca_optimizer_graph(sparse_example_indices_, sparse_feature_indices_, sparse_feature_values_, dense_features_, example_weights_, example_labels_, sparse_indices_, sparse_weights_, dense_weights_, example_state_data_; name=nothing, loss_type=nothing, adaptative=nothing, num_sparse_features=nothing, num_sparse_features_with_values=nothing, num_dense_features=nothing, l1=nothing, l2=nothing, num_loss_partitions=nothing, num_inner_iterations=nothing) - local desc - tf.with_op_name(name, "SdcaOptimizer") do - desc = tf.NodeDescription("SdcaOptimizer") - sparse_example_indices_ = [convert(Tensor{Int64}, x) for x = sparse_example_indices_] - sparse_feature_indices_ = [convert(Tensor{Int64}, x) for x = sparse_feature_indices_] - sparse_feature_values_ = [convert(Tensor{Float32}, x) for x = sparse_feature_values_] - dense_features_ = [convert(Tensor{Float32}, x) for x = dense_features_] - example_weights_ = convert(Tensor{Float32}, example_weights_) - example_labels_ = convert(Tensor{Float32}, example_labels_) - sparse_indices_ = [convert(Tensor{Int64}, x) for x = sparse_indices_] - sparse_weights_ = [convert(Tensor{Float32}, x) for x = sparse_weights_] - dense_weights_ = [convert(Tensor{Float32}, x) for x = dense_weights_] - example_state_data_ = convert(Tensor{Float32}, example_state_data_) - tf.add_input(desc, sparse_example_indices_) - tf.add_input(desc, sparse_feature_indices_) - tf.add_input(desc, sparse_feature_values_) - tf.add_input(desc, dense_features_) - tf.add_input(desc, example_weights_) - tf.add_input(desc, example_labels_) - tf.add_input(desc, sparse_indices_) - tf.add_input(desc, sparse_weights_) - tf.add_input(desc, dense_weights_) - tf.add_input(desc, example_state_data_) - if loss_type !== nothing - desc["loss_type"] = Base.String(loss_type) - end - if adaptative !== nothing - desc["adaptative"] = Base.Bool(adaptative) - end - if num_sparse_features !== nothing - desc["num_sparse_features"] = Base.Int(num_sparse_features) - end - if num_sparse_features_with_values !== nothing - desc["num_sparse_features_with_values"] = Base.Int(num_sparse_features_with_values) - end - if num_dense_features !== nothing - desc["num_dense_features"] = Base.Int(num_dense_features) - end - if l1 !== nothing - desc["l1"] = Base.identity(l1) - end - if l2 !== nothing - desc["l2"] = Base.identity(l2) - end - if num_loss_partitions !== nothing - desc["num_loss_partitions"] = Base.Int(num_loss_partitions) - end - if num_inner_iterations !== nothing - desc["num_inner_iterations"] = Base.Int(num_inner_iterations) - end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:3 - push!(out, tf.Tensor(op, out_idx)) - end - out + function sdca_optimizer_graph(sparse_example_indices_, sparse_feature_indices_, sparse_feature_values_, dense_features_, example_weights_, example_labels_, sparse_indices_, sparse_weights_, dense_weights_, example_state_data_; name=nothing, loss_type=nothing, adaptative=nothing, num_sparse_features=nothing, num_sparse_features_with_values=nothing, num_dense_features=nothing, l1=nothing, l2=nothing, num_loss_partitions=nothing, num_inner_iterations=nothing) + local desc + tf.with_op_name(name, "SdcaOptimizer") do + desc = tf.NodeDescription("SdcaOptimizer") + sparse_example_indices_ = [convert(Tensor{Int64}, x) for x = sparse_example_indices_] + sparse_feature_indices_ = [convert(Tensor{Int64}, x) for x = sparse_feature_indices_] + sparse_feature_values_ = [convert(Tensor{Float32}, x) for x = sparse_feature_values_] + dense_features_ = [convert(Tensor{Float32}, x) for x = dense_features_] + example_weights_ = convert(Tensor{Float32}, example_weights_) + example_labels_ = convert(Tensor{Float32}, example_labels_) + sparse_indices_ = [convert(Tensor{Int64}, x) for x = sparse_indices_] + sparse_weights_ = [convert(Tensor{Float32}, x) for x = sparse_weights_] + dense_weights_ = [convert(Tensor{Float32}, x) for x = dense_weights_] + example_state_data_ = convert(Tensor{Float32}, example_state_data_) + tf.add_input(desc, sparse_example_indices_) + tf.add_input(desc, sparse_feature_indices_) + tf.add_input(desc, sparse_feature_values_) + tf.add_input(desc, dense_features_) + tf.add_input(desc, example_weights_) + tf.add_input(desc, example_labels_) + tf.add_input(desc, sparse_indices_) + tf.add_input(desc, sparse_weights_) + tf.add_input(desc, dense_weights_) + tf.add_input(desc, example_state_data_) + if loss_type !== nothing + desc["loss_type"] = Base.String(loss_type) + end + if adaptative !== nothing + desc["adaptative"] = Base.Bool(adaptative) + end + if num_sparse_features !== nothing + desc["num_sparse_features"] = Base.Int(num_sparse_features) + end + if num_sparse_features_with_values !== nothing + desc["num_sparse_features_with_values"] = Base.Int(num_sparse_features_with_values) + end + if num_dense_features !== nothing + desc["num_dense_features"] = Base.Int(num_dense_features) + end + if l1 !== nothing + desc["l1"] = Base.identity(l1) + end + if l2 !== nothing + desc["l2"] = Base.identity(l2) + end + if num_loss_partitions !== nothing + desc["num_loss_partitions"] = Base.Int(num_loss_partitions) + end + if num_inner_iterations !== nothing + desc["num_inner_iterations"] = Base.Int(num_inner_iterations) + end end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end function sdca_optimizer_eager(sparse_example_indices_, sparse_feature_indices_, sparse_feature_values_, dense_features_, example_weights_, example_labels_, sparse_indices_, sparse_weights_, dense_weights_, example_state_data_; name=nothing, loss_type=nothing, adaptative=nothing, num_sparse_features=nothing, num_sparse_features_with_values=nothing, num_dense_features=nothing, l1=nothing, l2=nothing, num_loss_partitions=nothing, num_inner_iterations=nothing) desc = tf.EagerOp("SdcaOptimizer") sparse_example_indices_ = convert(tf.EagerTensor, sparse_example_indices_) @@ -59589,13 +59589,13 @@ begin return res end end - function sdca_optimizer(sparse_example_indices_, sparse_feature_indices_, sparse_feature_values_, dense_features_, example_weights_, example_labels_, sparse_indices_, sparse_weights_, dense_weights_, example_state_data_; name=nothing, loss_type=nothing, adaptative=nothing, num_sparse_features=nothing, num_sparse_features_with_values=nothing, num_dense_features=nothing, l1=nothing, l2=nothing, num_loss_partitions=nothing, num_inner_iterations=nothing) - if tf.in_eager_mode() - sdca_optimizer_eager(sparse_example_indices_, sparse_feature_indices_, sparse_feature_values_, dense_features_, example_weights_, example_labels_, sparse_indices_, sparse_weights_, dense_weights_, example_state_data_; name=name, loss_type=loss_type, adaptative=adaptative, num_sparse_features=num_sparse_features, num_sparse_features_with_values=num_sparse_features_with_values, num_dense_features=num_dense_features, l1=l1, l2=l2, num_loss_partitions=num_loss_partitions, num_inner_iterations=num_inner_iterations) - else - sdca_optimizer_graph(sparse_example_indices_, sparse_feature_indices_, sparse_feature_values_, dense_features_, example_weights_, example_labels_, sparse_indices_, sparse_weights_, dense_weights_, example_state_data_; name=name, loss_type=loss_type, adaptative=adaptative, num_sparse_features=num_sparse_features, num_sparse_features_with_values=num_sparse_features_with_values, num_dense_features=num_dense_features, l1=l1, l2=l2, num_loss_partitions=num_loss_partitions, num_inner_iterations=num_inner_iterations) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sdca_optimizer(sparse_example_indices_, sparse_feature_indices_, sparse_feature_values_, dense_features_, example_weights_, example_labels_, sparse_indices_, sparse_weights_, dense_weights_, example_state_data_; name=nothing, loss_type=nothing, adaptative=nothing, num_sparse_features=nothing, num_sparse_features_with_values=nothing, num_dense_features=nothing, l1=nothing, l2=nothing, num_loss_partitions=nothing, num_inner_iterations=nothing) + if tf.in_eager_mode() + sdca_optimizer_eager(sparse_example_indices_, sparse_feature_indices_, sparse_feature_values_, dense_features_, example_weights_, example_labels_, sparse_indices_, sparse_weights_, dense_weights_, example_state_data_; name=name, loss_type=loss_type, adaptative=adaptative, num_sparse_features=num_sparse_features, num_sparse_features_with_values=num_sparse_features_with_values, num_dense_features=num_dense_features, l1=l1, l2=l2, num_loss_partitions=num_loss_partitions, num_inner_iterations=num_inner_iterations) + else + sdca_optimizer_graph(sparse_example_indices_, sparse_feature_indices_, sparse_feature_values_, dense_features_, example_weights_, example_labels_, sparse_indices_, sparse_weights_, dense_weights_, example_state_data_; name=name, loss_type=loss_type, adaptative=adaptative, num_sparse_features=num_sparse_features, num_sparse_features_with_values=num_sparse_features_with_values, num_dense_features=num_dense_features, l1=l1, l2=l2, num_loss_partitions=num_loss_partitions, num_inner_iterations=num_inner_iterations) + end end - end end @@ -59605,18 +59605,18 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function zeta_graph(x_, q_; name=nothing) - local desc - tf.with_op_name(name, "Zeta") do - desc = tf.NodeDescription("Zeta") - x_ = convert(Tensor{Any}, x_) - q_ = convert(Tensor{Any}, q_) - (x_, q_) = tf.tf_promote(x_, q_) - tf.add_input(desc, x_) - tf.add_input(desc, q_) - end - tf.Tensor(tf.Operation(desc)) + function zeta_graph(x_, q_; name=nothing) + local desc + tf.with_op_name(name, "Zeta") do + desc = tf.NodeDescription("Zeta") + x_ = convert(Tensor{Any}, x_) + q_ = convert(Tensor{Any}, q_) + (x_, q_) = tf.tf_promote(x_, q_) + tf.add_input(desc, x_) + tf.add_input(desc, q_) end + tf.Tensor(tf.Operation(desc)) + end function zeta_eager(x_, q_; name=nothing) desc = tf.EagerOp("Zeta") x_ = convert(tf.EagerTensor, x_) @@ -59632,13 +59632,13 @@ begin return res[1] end end - function zeta(x_, q_; name=nothing) - if tf.in_eager_mode() - zeta_eager(x_, q_; name=name) - else - zeta_graph(x_, q_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function zeta(x_, q_; name=nothing) + if tf.in_eager_mode() + zeta_eager(x_, q_; name=name) + else + zeta_graph(x_, q_; name=name) + end end - end end @@ -59648,44 +59648,44 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function sample_distorted_bounding_box_graph(image_size_, bounding_boxes_; name=nothing, seed=nothing, seed2=nothing, min_object_covered=nothing, aspect_ratio_range=nothing, area_range=nothing, max_attempts=nothing, use_image_if_no_bounding_boxes=nothing) - local desc - tf.with_op_name(name, "SampleDistortedBoundingBox") do - desc = tf.NodeDescription("SampleDistortedBoundingBox") - image_size_ = convert(Tensor{Any}, image_size_) - bounding_boxes_ = convert(Tensor{Float32}, bounding_boxes_) - (image_size_,) = tf.tf_promote(image_size_) - tf.add_input(desc, image_size_) - tf.add_input(desc, bounding_boxes_) - if seed !== nothing - desc["seed"] = Base.Int(seed) - end - if seed2 !== nothing - desc["seed2"] = Base.Int(seed2) - end - if min_object_covered !== nothing - desc["min_object_covered"] = Base.identity(min_object_covered) - end - if aspect_ratio_range !== nothing - desc["aspect_ratio_range"] = map(Base.identity, aspect_ratio_range) - end - if area_range !== nothing - desc["area_range"] = map(Base.identity, area_range) - end - if max_attempts !== nothing - desc["max_attempts"] = Base.Int(max_attempts) - end - if use_image_if_no_bounding_boxes !== nothing - desc["use_image_if_no_bounding_boxes"] = Base.Bool(use_image_if_no_bounding_boxes) - end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:3 - push!(out, tf.Tensor(op, out_idx)) - end - out + function sample_distorted_bounding_box_graph(image_size_, bounding_boxes_; name=nothing, seed=nothing, seed2=nothing, min_object_covered=nothing, aspect_ratio_range=nothing, area_range=nothing, max_attempts=nothing, use_image_if_no_bounding_boxes=nothing) + local desc + tf.with_op_name(name, "SampleDistortedBoundingBox") do + desc = tf.NodeDescription("SampleDistortedBoundingBox") + image_size_ = convert(Tensor{Any}, image_size_) + bounding_boxes_ = convert(Tensor{Float32}, bounding_boxes_) + (image_size_,) = tf.tf_promote(image_size_) + tf.add_input(desc, image_size_) + tf.add_input(desc, bounding_boxes_) + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end + if min_object_covered !== nothing + desc["min_object_covered"] = Base.identity(min_object_covered) + end + if aspect_ratio_range !== nothing + desc["aspect_ratio_range"] = map(Base.identity, aspect_ratio_range) + end + if area_range !== nothing + desc["area_range"] = map(Base.identity, area_range) + end + if max_attempts !== nothing + desc["max_attempts"] = Base.Int(max_attempts) + end + if use_image_if_no_bounding_boxes !== nothing + desc["use_image_if_no_bounding_boxes"] = Base.Bool(use_image_if_no_bounding_boxes) + end end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end function sample_distorted_bounding_box_eager(image_size_, bounding_boxes_; name=nothing, seed=nothing, seed2=nothing, min_object_covered=nothing, aspect_ratio_range=nothing, area_range=nothing, max_attempts=nothing, use_image_if_no_bounding_boxes=nothing) desc = tf.EagerOp("SampleDistortedBoundingBox") image_size_ = convert(tf.EagerTensor, image_size_) @@ -59721,13 +59721,13 @@ begin return res end end - function sample_distorted_bounding_box(image_size_, bounding_boxes_; name=nothing, seed=nothing, seed2=nothing, min_object_covered=nothing, aspect_ratio_range=nothing, area_range=nothing, max_attempts=nothing, use_image_if_no_bounding_boxes=nothing) - if tf.in_eager_mode() - sample_distorted_bounding_box_eager(image_size_, bounding_boxes_; name=name, seed=seed, seed2=seed2, min_object_covered=min_object_covered, aspect_ratio_range=aspect_ratio_range, area_range=area_range, max_attempts=max_attempts, use_image_if_no_bounding_boxes=use_image_if_no_bounding_boxes) - else - sample_distorted_bounding_box_graph(image_size_, bounding_boxes_; name=name, seed=seed, seed2=seed2, min_object_covered=min_object_covered, aspect_ratio_range=aspect_ratio_range, area_range=area_range, max_attempts=max_attempts, use_image_if_no_bounding_boxes=use_image_if_no_bounding_boxes) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sample_distorted_bounding_box(image_size_, bounding_boxes_; name=nothing, seed=nothing, seed2=nothing, min_object_covered=nothing, aspect_ratio_range=nothing, area_range=nothing, max_attempts=nothing, use_image_if_no_bounding_boxes=nothing) + if tf.in_eager_mode() + sample_distorted_bounding_box_eager(image_size_, bounding_boxes_; name=name, seed=seed, seed2=seed2, min_object_covered=min_object_covered, aspect_ratio_range=aspect_ratio_range, area_range=area_range, max_attempts=max_attempts, use_image_if_no_bounding_boxes=use_image_if_no_bounding_boxes) + else + sample_distorted_bounding_box_graph(image_size_, bounding_boxes_; name=name, seed=seed, seed2=seed2, min_object_covered=min_object_covered, aspect_ratio_range=aspect_ratio_range, area_range=area_range, max_attempts=max_attempts, use_image_if_no_bounding_boxes=use_image_if_no_bounding_boxes) + end end - end end @@ -59737,18 +59737,18 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function igamma_grad_a_graph(a_, x_; name=nothing) - local desc - tf.with_op_name(name, "IgammaGradA") do - desc = tf.NodeDescription("IgammaGradA") - a_ = convert(Tensor{Any}, a_) - x_ = convert(Tensor{Any}, x_) - (a_, x_) = tf.tf_promote(a_, x_) - tf.add_input(desc, a_) - tf.add_input(desc, x_) - end - tf.Tensor(tf.Operation(desc)) + function igamma_grad_a_graph(a_, x_; name=nothing) + local desc + tf.with_op_name(name, "IgammaGradA") do + desc = tf.NodeDescription("IgammaGradA") + a_ = convert(Tensor{Any}, a_) + x_ = convert(Tensor{Any}, x_) + (a_, x_) = tf.tf_promote(a_, x_) + tf.add_input(desc, a_) + tf.add_input(desc, x_) end + tf.Tensor(tf.Operation(desc)) + end function igamma_grad_a_eager(a_, x_; name=nothing) desc = tf.EagerOp("IgammaGradA") a_ = convert(tf.EagerTensor, a_) @@ -59764,13 +59764,13 @@ begin return res[1] end end - function igamma_grad_a(a_, x_; name=nothing) - if tf.in_eager_mode() - igamma_grad_a_eager(a_, x_; name=name) - else - igamma_grad_a_graph(a_, x_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function igamma_grad_a(a_, x_; name=nothing) + if tf.in_eager_mode() + igamma_grad_a_eager(a_, x_; name=name) + else + igamma_grad_a_graph(a_, x_; name=name) + end end - end end @@ -59780,20 +59780,20 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function segment_max_graph(data_, segment_ids_; name=nothing) - local desc - tf.with_op_name(name, "SegmentMax") do - desc = tf.NodeDescription("SegmentMax") - data_ = convert(Tensor{Any}, data_) - segment_ids_ = convert(Tensor{Any}, segment_ids_) - segment_ids_ = segment_ids_ - convert(tf.Tensor{eltype(segment_ids_)}, 1) - (data_,) = tf.tf_promote(data_) - (segment_ids_,) = tf.tf_promote(segment_ids_) - tf.add_input(desc, data_) - tf.add_input(desc, segment_ids_) - end - tf.Tensor(tf.Operation(desc)) + function segment_max_graph(data_, segment_ids_; name=nothing) + local desc + tf.with_op_name(name, "SegmentMax") do + desc = tf.NodeDescription("SegmentMax") + data_ = convert(Tensor{Any}, data_) + segment_ids_ = convert(Tensor{Any}, segment_ids_) + segment_ids_ = segment_ids_ - convert(tf.Tensor{eltype(segment_ids_)}, 1) + (data_,) = tf.tf_promote(data_) + (segment_ids_,) = tf.tf_promote(segment_ids_) + tf.add_input(desc, data_) + tf.add_input(desc, segment_ids_) end + tf.Tensor(tf.Operation(desc)) + end function segment_max_eager(data_, segment_ids_; name=nothing) desc = tf.EagerOp("SegmentMax") data_ = convert(tf.EagerTensor, data_) @@ -59809,13 +59809,13 @@ begin return res[1] end end - function segment_max(data_, segment_ids_; name=nothing) - if tf.in_eager_mode() - segment_max_eager(data_, segment_ids_; name=name) - else - segment_max_graph(data_, segment_ids_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function segment_max(data_, segment_ids_; name=nothing) + if tf.in_eager_mode() + segment_max_eager(data_, segment_ids_; name=name) + else + segment_max_graph(data_, segment_ids_; name=name) + end end - end end @@ -59825,20 +59825,20 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function range_graph(start_, limit_, delta_; name=nothing) - local desc - tf.with_op_name(name, "Range") do - desc = tf.NodeDescription("Range") - start_ = convert(Tensor{Int32}, start_) - limit_ = convert(Tensor{Int32}, limit_) - delta_ = convert(Tensor{Int32}, delta_) - (start_, limit_, delta_) = tf.tf_promote(start_, limit_, delta_) - tf.add_input(desc, start_) - tf.add_input(desc, limit_) - tf.add_input(desc, delta_) - end - tf.Tensor(tf.Operation(desc)) + function range_graph(start_, limit_, delta_; name=nothing) + local desc + tf.with_op_name(name, "Range") do + desc = tf.NodeDescription("Range") + start_ = convert(Tensor{Int32}, start_) + limit_ = convert(Tensor{Int32}, limit_) + delta_ = convert(Tensor{Int32}, delta_) + (start_, limit_, delta_) = tf.tf_promote(start_, limit_, delta_) + tf.add_input(desc, start_) + tf.add_input(desc, limit_) + tf.add_input(desc, delta_) end + tf.Tensor(tf.Operation(desc)) + end function range_eager(start_, limit_, delta_; name=nothing) desc = tf.EagerOp("Range") start_ = convert(tf.EagerTensor, start_) @@ -59857,13 +59857,13 @@ begin return res[1] end end - function range(start_, limit_, delta_; name=nothing) - if tf.in_eager_mode() - range_eager(start_, limit_, delta_; name=name) - else - range_graph(start_, limit_, delta_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function range(start_, limit_, delta_; name=nothing) + if tf.in_eager_mode() + range_eager(start_, limit_, delta_; name=name) + else + range_graph(start_, limit_, delta_; name=name) + end end - end end @@ -59873,30 +59873,30 @@ end Retrieve embedding parameters for a single table. """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function retrieve_tpu_embedding_momentum_parameters_grad_accum_debug_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - local desc - tf.with_op_name(name, "RetrieveTPUEmbeddingMomentumParametersGradAccumDebug") do - desc = tf.NodeDescription("RetrieveTPUEmbeddingMomentumParametersGradAccumDebug") - if table_id !== nothing - desc["table_id"] = Base.Int(table_id) - end - if table_name !== nothing - desc["table_name"] = Base.String(table_name) - end - if num_shards !== nothing - desc["num_shards"] = Base.Int(num_shards) - end - if shard_id !== nothing - desc["shard_id"] = Base.Int(shard_id) - end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:3 - push!(out, tf.Tensor(op, out_idx)) - end - out + function retrieve_tpu_embedding_momentum_parameters_grad_accum_debug_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + local desc + tf.with_op_name(name, "RetrieveTPUEmbeddingMomentumParametersGradAccumDebug") do + desc = tf.NodeDescription("RetrieveTPUEmbeddingMomentumParametersGradAccumDebug") + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) end + out + end function retrieve_tpu_embedding_momentum_parameters_grad_accum_debug_eager(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) desc = tf.EagerOp("RetrieveTPUEmbeddingMomentumParametersGradAccumDebug") if table_id !== nothing @@ -59918,13 +59918,13 @@ begin return res end end - function retrieve_tpu_embedding_momentum_parameters_grad_accum_debug(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - if tf.in_eager_mode() - retrieve_tpu_embedding_momentum_parameters_grad_accum_debug_eager(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) - else - retrieve_tpu_embedding_momentum_parameters_grad_accum_debug_graph(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function retrieve_tpu_embedding_momentum_parameters_grad_accum_debug(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + if tf.in_eager_mode() + retrieve_tpu_embedding_momentum_parameters_grad_accum_debug_eager(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + else + retrieve_tpu_embedding_momentum_parameters_grad_accum_debug_graph(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + end end - end end @@ -59934,15 +59934,15 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function flush_summary_writer_graph(writer_; name=nothing) - local desc - tf.with_op_name(name, "FlushSummaryWriter") do - desc = tf.NodeDescription("FlushSummaryWriter") - writer_ = convert(Tensor{Any}, writer_) - tf.add_input(desc, writer_) - end - tf.Tensor(tf.Operation(desc)) + function flush_summary_writer_graph(writer_; name=nothing) + local desc + tf.with_op_name(name, "FlushSummaryWriter") do + desc = tf.NodeDescription("FlushSummaryWriter") + writer_ = convert(Tensor{Any}, writer_) + tf.add_input(desc, writer_) end + tf.Tensor(tf.Operation(desc)) + end function flush_summary_writer_eager(writer_; name=nothing) desc = tf.EagerOp("FlushSummaryWriter") writer_ = convert(tf.EagerTensor, writer_) @@ -59954,39 +59954,39 @@ begin return res[1] end end - function flush_summary_writer(writer_; name=nothing) - if tf.in_eager_mode() - flush_summary_writer_eager(writer_; name=name) - else - flush_summary_writer_graph(writer_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function flush_summary_writer(writer_; name=nothing) + if tf.in_eager_mode() + flush_summary_writer_eager(writer_; name=name) + else + flush_summary_writer_graph(writer_; name=name) + end end - end end """ - dequantize(input, min_range, max_range; mode=MIN_COMBINED) + dequantize(input, min_range, max_range; mode=) """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function dequantize_graph(input_, min_range_, max_range_; name=nothing, mode=nothing) - local desc - tf.with_op_name(name, "Dequantize") do - desc = tf.NodeDescription("Dequantize") - input_ = convert(Tensor{Any}, input_) - min_range_ = convert(Tensor{Float32}, min_range_) - max_range_ = convert(Tensor{Float32}, max_range_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - tf.add_input(desc, min_range_) - tf.add_input(desc, max_range_) - if mode !== nothing - desc["mode"] = Base.String(mode) - end + function dequantize_graph(input_, min_range_, max_range_; name=nothing, mode=nothing) + local desc + tf.with_op_name(name, "Dequantize") do + desc = tf.NodeDescription("Dequantize") + input_ = convert(Tensor{Any}, input_) + min_range_ = convert(Tensor{Float32}, min_range_) + max_range_ = convert(Tensor{Float32}, max_range_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + tf.add_input(desc, min_range_) + tf.add_input(desc, max_range_) + if mode !== nothing + desc["mode"] = Base.String(mode) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function dequantize_eager(input_, min_range_, max_range_; name=nothing, mode=nothing) desc = tf.EagerOp("Dequantize") input_ = convert(tf.EagerTensor, input_) @@ -60006,13 +60006,13 @@ begin return res[1] end end - function dequantize(input_, min_range_, max_range_; name=nothing, mode=nothing) - if tf.in_eager_mode() - dequantize_eager(input_, min_range_, max_range_; name=name, mode=mode) - else - dequantize_graph(input_, min_range_, max_range_; name=name, mode=mode) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function dequantize(input_, min_range_, max_range_; name=nothing, mode=nothing) + if tf.in_eager_mode() + dequantize_eager(input_, min_range_, max_range_; name=name, mode=mode) + else + dequantize_graph(input_, min_range_, max_range_; name=name, mode=mode) + end end - end end @@ -60022,23 +60022,23 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function sparse_fill_empty_rows_grad_graph(reverse_index_map_, grad_values_; name=nothing) - local desc - tf.with_op_name(name, "SparseFillEmptyRowsGrad") do - desc = tf.NodeDescription("SparseFillEmptyRowsGrad") - reverse_index_map_ = convert(Tensor{Int64}, reverse_index_map_) - grad_values_ = convert(Tensor{Any}, grad_values_) - (grad_values_,) = tf.tf_promote(grad_values_) - tf.add_input(desc, reverse_index_map_) - tf.add_input(desc, grad_values_) - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:2 - push!(out, tf.Tensor(op, out_idx)) - end - out + function sparse_fill_empty_rows_grad_graph(reverse_index_map_, grad_values_; name=nothing) + local desc + tf.with_op_name(name, "SparseFillEmptyRowsGrad") do + desc = tf.NodeDescription("SparseFillEmptyRowsGrad") + reverse_index_map_ = convert(Tensor{Int64}, reverse_index_map_) + grad_values_ = convert(Tensor{Any}, grad_values_) + (grad_values_,) = tf.tf_promote(grad_values_) + tf.add_input(desc, reverse_index_map_) + tf.add_input(desc, grad_values_) + end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) end + out + end function sparse_fill_empty_rows_grad_eager(reverse_index_map_, grad_values_; name=nothing) desc = tf.EagerOp("SparseFillEmptyRowsGrad") reverse_index_map_ = convert(tf.EagerTensor, reverse_index_map_) @@ -60053,13 +60053,13 @@ begin return res end end - function sparse_fill_empty_rows_grad(reverse_index_map_, grad_values_; name=nothing) - if tf.in_eager_mode() - sparse_fill_empty_rows_grad_eager(reverse_index_map_, grad_values_; name=name) - else - sparse_fill_empty_rows_grad_graph(reverse_index_map_, grad_values_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_fill_empty_rows_grad(reverse_index_map_, grad_values_; name=nothing) + if tf.in_eager_mode() + sparse_fill_empty_rows_grad_eager(reverse_index_map_, grad_values_; name=name) + else + sparse_fill_empty_rows_grad_graph(reverse_index_map_, grad_values_; name=name) + end end - end end @@ -60069,21 +60069,21 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function iterator_get_next_graph(iterator_; name=nothing, output_types=nothing, output_shapes=nothing) - local desc - tf.with_op_name(name, "IteratorGetNext") do - desc = tf.NodeDescription("IteratorGetNext") - iterator_ = convert(Tensor{Any}, iterator_) - tf.add_input(desc, iterator_) - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end + function iterator_get_next_graph(iterator_; name=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "IteratorGetNext") do + desc = tf.NodeDescription("IteratorGetNext") + iterator_ = convert(Tensor{Any}, iterator_) + tf.add_input(desc, iterator_) + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function iterator_get_next_eager(iterator_; name=nothing, output_types=nothing, output_shapes=nothing) desc = tf.EagerOp("IteratorGetNext") iterator_ = convert(tf.EagerTensor, iterator_) @@ -60101,13 +60101,13 @@ begin return res[1] end end - function iterator_get_next(iterator_; name=nothing, output_types=nothing, output_shapes=nothing) - if tf.in_eager_mode() - iterator_get_next_eager(iterator_; name=name, output_types=output_types, output_shapes=output_shapes) - else - iterator_get_next_graph(iterator_; name=name, output_types=output_types, output_shapes=output_shapes) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function iterator_get_next(iterator_; name=nothing, output_types=nothing, output_shapes=nothing) + if tf.in_eager_mode() + iterator_get_next_eager(iterator_; name=name, output_types=output_types, output_shapes=output_shapes) + else + iterator_get_next_graph(iterator_; name=name, output_types=output_types, output_shapes=output_shapes) + end end - end end @@ -60117,19 +60117,19 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function prevent_gradient_graph(input_; name=nothing, message=nothing) - local desc - tf.with_op_name(name, "PreventGradient") do - desc = tf.NodeDescription("PreventGradient") - input_ = convert(Tensor{Any}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - if message !== nothing - desc["message"] = Base.String(message) - end + function prevent_gradient_graph(input_; name=nothing, message=nothing) + local desc + tf.with_op_name(name, "PreventGradient") do + desc = tf.NodeDescription("PreventGradient") + input_ = convert(Tensor{Any}, input_) + (input_,) = tf.tf_promote(input_) + tf.add_input(desc, input_) + if message !== nothing + desc["message"] = Base.String(message) end - tf.Tensor(tf.Operation(desc)) end + tf.Tensor(tf.Operation(desc)) + end function prevent_gradient_eager(input_; name=nothing, message=nothing) desc = tf.EagerOp("PreventGradient") input_ = convert(tf.EagerTensor, input_) @@ -60145,13 +60145,13 @@ begin return res[1] end end - function prevent_gradient(input_; name=nothing, message=nothing) - if tf.in_eager_mode() - prevent_gradient_eager(input_; name=name, message=message) - else - prevent_gradient_graph(input_; name=name, message=message) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function prevent_gradient(input_; name=nothing, message=nothing) + if tf.in_eager_mode() + prevent_gradient_eager(input_; name=name, message=message) + else + prevent_gradient_graph(input_; name=name, message=message) + end end - end end @@ -60161,25 +60161,25 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function sparse_tensor_dense_add_graph(a_indices_, a_values_, a_shape_, b_; name=nothing) - local desc - tf.with_op_name(name, "SparseTensorDenseAdd") do - desc = tf.NodeDescription("SparseTensorDenseAdd") - a_indices_ = convert(Tensor{Any}, a_indices_) - a_indices_ = a_indices_ - convert(tf.Tensor{eltype(a_indices_)}, 1) - a_values_ = convert(Tensor{Any}, a_values_) - a_shape_ = convert(Tensor{Any}, a_shape_) - a_shape_ = a_shape_ - convert(tf.Tensor{eltype(a_shape_)}, 1) - b_ = convert(Tensor{Any}, b_) - (a_values_, b_) = tf.tf_promote(a_values_, b_) - (a_indices_, a_shape_) = tf.tf_promote(a_indices_, a_shape_) - tf.add_input(desc, a_indices_) - tf.add_input(desc, a_values_) - tf.add_input(desc, a_shape_) - tf.add_input(desc, b_) - end - tf.Tensor(tf.Operation(desc)) + function sparse_tensor_dense_add_graph(a_indices_, a_values_, a_shape_, b_; name=nothing) + local desc + tf.with_op_name(name, "SparseTensorDenseAdd") do + desc = tf.NodeDescription("SparseTensorDenseAdd") + a_indices_ = convert(Tensor{Any}, a_indices_) + a_indices_ = a_indices_ - convert(tf.Tensor{eltype(a_indices_)}, 1) + a_values_ = convert(Tensor{Any}, a_values_) + a_shape_ = convert(Tensor{Any}, a_shape_) + a_shape_ = a_shape_ - convert(tf.Tensor{eltype(a_shape_)}, 1) + b_ = convert(Tensor{Any}, b_) + (a_values_, b_) = tf.tf_promote(a_values_, b_) + (a_indices_, a_shape_) = tf.tf_promote(a_indices_, a_shape_) + tf.add_input(desc, a_indices_) + tf.add_input(desc, a_values_) + tf.add_input(desc, a_shape_) + tf.add_input(desc, b_) end + tf.Tensor(tf.Operation(desc)) + end function sparse_tensor_dense_add_eager(a_indices_, a_values_, a_shape_, b_; name=nothing) desc = tf.EagerOp("SparseTensorDenseAdd") a_indices_ = convert(tf.EagerTensor, a_indices_) @@ -60201,13 +60201,13 @@ begin return res[1] end end - function sparse_tensor_dense_add(a_indices_, a_values_, a_shape_, b_; name=nothing) - if tf.in_eager_mode() - sparse_tensor_dense_add_eager(a_indices_, a_values_, a_shape_, b_; name=name) - else - sparse_tensor_dense_add_graph(a_indices_, a_values_, a_shape_, b_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_tensor_dense_add(a_indices_, a_values_, a_shape_, b_; name=nothing) + if tf.in_eager_mode() + sparse_tensor_dense_add_eager(a_indices_, a_values_, a_shape_, b_; name=name) + else + sparse_tensor_dense_add_graph(a_indices_, a_values_, a_shape_, b_; name=name) + end end - end end @@ -60217,20 +60217,20 @@ end """ begin - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:221 =# tf.@op function lookup_table_export_graph(table_handle_; name=nothing) - local desc - tf.with_op_name(name, "LookupTableExport") do - desc = tf.NodeDescription("LookupTableExport") - table_handle_ = convert(Tensor{String}, table_handle_) - tf.add_input(desc, table_handle_) - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:2 - push!(out, tf.Tensor(op, out_idx)) - end - out + function lookup_table_export_graph(table_handle_; name=nothing) + local desc + tf.with_op_name(name, "LookupTableExport") do + desc = tf.NodeDescription("LookupTableExport") + table_handle_ = convert(Tensor{String}, table_handle_) + tf.add_input(desc, table_handle_) end + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end function lookup_table_export_eager(table_handle_; name=nothing) desc = tf.EagerOp("LookupTableExport") table_handle_ = convert(tf.EagerTensor, table_handle_) @@ -60242,13 +60242,13 @@ begin return res end end - function lookup_table_export(table_handle_; name=nothing) - if tf.in_eager_mode() - lookup_table_export_eager(table_handle_; name=name) - else - lookup_table_export_graph(table_handle_; name=name) + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function lookup_table_export(table_handle_; name=nothing) + if tf.in_eager_mode() + lookup_table_export_eager(table_handle_; name=name) + else + lookup_table_export_graph(table_handle_; name=name) + end end - end end diff --git a/test/runtests.jl b/test/runtests.jl index c505e3f6..6a3ad45f 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -31,9 +31,9 @@ tests = [ "train.jl", "training.jl", "transformations.jl", +"summary_eager.jl" ] - tf_versioninfo() # Dump out all the info at start of the test, for easy debugging from logs. (also check `tf_versioninfo()` itself works) for filename in tests @@ -42,3 +42,7 @@ for filename in tests include(filename) end end + +# TODO configure tests so they automatically set the appropriate graph or eager mode for themselves. For now, +# all the eager tests run at the end. +include(joinpath(dirname(@__FILE__), "..", "examples", "keras.jl")) From d4a8d5f3c5b803cbb4f78eaf040998d594ee1b29 Mon Sep 17 00:00:00 2001 From: Jon Malmaud Date: Fri, 15 Mar 2019 20:39:09 -0400 Subject: [PATCH 47/49] Tests working --- src/generate_ops.jl | 6 +- src/ops/imported_ops.jl | 144069 +++++++++++++++++++++++++------------ 2 files changed, 97009 insertions(+), 47066 deletions(-) diff --git a/src/generate_ops.jl b/src/generate_ops.jl index a893d63a..6c7856a5 100644 --- a/src/generate_ops.jl +++ b/src/generate_ops.jl @@ -292,8 +292,6 @@ function to_function(op::tensorflow.OpDef) sig = "$jl_name($(posargs_str)$(kwargs_str))" doc_str = string(" ", sig, "\n\n", escape_string(op.summary)) #TODO Workout how to get descriptions for docstrings - expr = unblock(MacroTools.flatten(MacroTools.striplines(expr))) - eager_expr = unblock(MacroTools.flatten(MacroTools.striplines(eager_expr))) OpFunc(expr, eager_expr, dispatch_expr, doc_str, jl_name) end @@ -311,7 +309,9 @@ function stringify_func(opfunc::OpFunc) $(opfunc.eager_expr) $(opfunc.dispatch_expr) end - expr = unblock(MacroTools.flatten(MacroTools.striplines(expr))) + # MacroTools.flatten seems to have a bug that's causins an invalid expression for 'NoOp' + # expr = (MacroTools.flatten(MacroTools.striplines(expr))) + expr = MacroTools.striplines(expr) s = string(expr) docstring = replace(opfunc.docstring, "\$" => "") diff --git a/src/ops/imported_ops.jl b/src/ops/imported_ops.jl index 9161cd63..93fe206b 100644 --- a/src/ops/imported_ops.jl +++ b/src/ops/imported_ops.jl @@ -1,4 +1,4 @@ -# Autogenerated on 2019-03-15T19:52:15.908 +# Autogenerated on 2019-03-15T20:34:54.858 module Ops import TensorFlow @@ -10,49 +10,89 @@ import TensorFlow: Tensor """ begin - function reduce_join_graph(inputs_, reduction_indices_; name=nothing, keep_dims=nothing, separator=nothing) - local desc - tf.with_op_name(name, "ReduceJoin") do - desc = tf.NodeDescription("ReduceJoin") - inputs_ = convert(Tensor{String}, inputs_) - reduction_indices_ = convert(Tensor{Int32}, reduction_indices_) - tf.add_input(desc, inputs_) - tf.add_input(desc, reduction_indices_) - if keep_dims !== nothing - desc["keep_dims"] = Base.Bool(keep_dims) + begin + function reduce_join_graph(inputs_, reduction_indices_; name=nothing, keep_dims=nothing, separator=nothing) + local desc + tf.with_op_name(name, "ReduceJoin") do + desc = tf.NodeDescription("ReduceJoin") + begin + begin + inputs_ = convert(Tensor{String}, inputs_) + begin + end + end + begin + reduction_indices_ = convert(Tensor{Int32}, reduction_indices_) + begin + end + end + end + begin + begin + tf.add_input(desc, inputs_) + end + begin + tf.add_input(desc, reduction_indices_) + end + end + begin + begin + if keep_dims !== nothing + desc["keep_dims"] = Base.Bool(keep_dims) + end + end + begin + if separator !== nothing + desc["separator"] = Base.String(separator) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function reduce_join_eager(inputs_, reduction_indices_; name=nothing, keep_dims=nothing, separator=nothing) + desc = tf.EagerOp("ReduceJoin") + inputs_ = convert(tf.EagerTensor, inputs_) + reduction_indices_ = convert(tf.EagerTensor, reduction_indices_) + begin + begin + tf.add_input(desc, inputs_) + end + begin + tf.add_input(desc, reduction_indices_) + end + end + begin + begin + if keep_dims !== nothing + desc["keep_dims"] = Base.Bool(keep_dims) + end + end + begin + if separator !== nothing + desc["separator"] = Base.String(separator) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(reduce_join, [inputs_, reduction_indices_], name=nothing, keep_dims=nothing, separator=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function reduce_join(inputs_, reduction_indices_; name=nothing, keep_dims=nothing, separator=nothing) + if tf.in_eager_mode() + reduce_join_eager(inputs_, reduction_indices_; name=name, keep_dims=keep_dims, separator=separator) + else + reduce_join_graph(inputs_, reduction_indices_; name=name, keep_dims=keep_dims, separator=separator) + end end - if separator !== nothing - desc["separator"] = Base.String(separator) - end - end - tf.Tensor(tf.Operation(desc)) - end - function reduce_join_eager(inputs_, reduction_indices_; name=nothing, keep_dims=nothing, separator=nothing) - desc = tf.EagerOp("ReduceJoin") - inputs_ = convert(tf.EagerTensor, inputs_) - reduction_indices_ = convert(tf.EagerTensor, reduction_indices_) - tf.add_input(desc, inputs_) - tf.add_input(desc, reduction_indices_) - if keep_dims !== nothing - desc["keep_dims"] = Base.Bool(keep_dims) - end - if separator !== nothing - desc["separator"] = Base.String(separator) - end - res = tf.execute(desc) - node = tf.TapeNode(reduce_join, [inputs_, reduction_indices_], name=nothing, keep_dims=nothing, separator=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function reduce_join(inputs_, reduction_indices_; name=nothing, keep_dims=nothing, separator=nothing) - if tf.in_eager_mode() - reduce_join_eager(inputs_, reduction_indices_; name=name, keep_dims=keep_dims, separator=separator) - else - reduce_join_graph(inputs_, reduction_indices_; name=name, keep_dims=keep_dims, separator=separator) - end - end end @@ -62,77 +102,141 @@ end """ begin - function reduce_dataset_graph(input_dataset_, initial_state_, other_arguments_; name=nothing, f=nothing, Tstate=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing) - local desc - tf.with_op_name(name, "ReduceDataset") do - desc = tf.NodeDescription("ReduceDataset") - input_dataset_ = convert(Tensor{Any}, input_dataset_) - initial_state_ = [convert(Tensor{Any}, x) for x = initial_state_] - other_arguments_ = [convert(Tensor{Any}, x) for x = other_arguments_] - tf.add_input(desc, input_dataset_) - tf.add_input(desc, initial_state_) - tf.add_input(desc, other_arguments_) - if f !== nothing - desc["f"] = Base.identity(f) + begin + function reduce_dataset_graph(input_dataset_, initial_state_, other_arguments_; name=nothing, f=nothing, Tstate=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing) + local desc + tf.with_op_name(name, "ReduceDataset") do + desc = tf.NodeDescription("ReduceDataset") + begin + begin + input_dataset_ = convert(Tensor{Any}, input_dataset_) + begin + end + end + begin + initial_state_ = [convert(Tensor{Any}, x) for x = initial_state_] + begin + end + end + begin + other_arguments_ = [convert(Tensor{Any}, x) for x = other_arguments_] + begin + end + end + end + begin + begin + tf.add_input(desc, input_dataset_) + end + begin + tf.add_input(desc, initial_state_) + end + begin + tf.add_input(desc, other_arguments_) + end + end + begin + begin + if f !== nothing + desc["f"] = Base.identity(f) + end + end + begin + if Tstate !== nothing + desc["Tstate"] = map(Base.identity, Tstate) + end + end + begin + if Targuments !== nothing + desc["Targuments"] = map(Base.identity, Targuments) + end + end + begin + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + end + begin + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + begin + if use_inter_op_parallelism !== nothing + desc["use_inter_op_parallelism"] = Base.Bool(use_inter_op_parallelism) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function reduce_dataset_eager(input_dataset_, initial_state_, other_arguments_; name=nothing, f=nothing, Tstate=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing) + desc = tf.EagerOp("ReduceDataset") + input_dataset_ = convert(tf.EagerTensor, input_dataset_) + initial_state_ = convert(tf.EagerTensor, initial_state_) + other_arguments_ = convert(tf.EagerTensor, other_arguments_) + begin + begin + tf.add_input(desc, input_dataset_) + end + begin + tf.add_input(desc, initial_state_) + end + begin + tf.add_input(desc, other_arguments_) + end + end + begin + begin + if f !== nothing + desc["f"] = Base.identity(f) + end + end + begin + if Tstate !== nothing + desc["Tstate"] = map(Base.identity, Tstate) + end + end + begin + if Targuments !== nothing + desc["Targuments"] = map(Base.identity, Targuments) + end + end + begin + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + end + begin + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + begin + if use_inter_op_parallelism !== nothing + desc["use_inter_op_parallelism"] = Base.Bool(use_inter_op_parallelism) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(reduce_dataset, [input_dataset_, initial_state_, other_arguments_], name=nothing, f=nothing, Tstate=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function reduce_dataset(input_dataset_, initial_state_, other_arguments_; name=nothing, f=nothing, Tstate=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing) + if tf.in_eager_mode() + reduce_dataset_eager(input_dataset_, initial_state_, other_arguments_; name=name, f=f, Tstate=Tstate, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes, use_inter_op_parallelism=use_inter_op_parallelism) + else + reduce_dataset_graph(input_dataset_, initial_state_, other_arguments_; name=name, f=f, Tstate=Tstate, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes, use_inter_op_parallelism=use_inter_op_parallelism) + end end - if Tstate !== nothing - desc["Tstate"] = map(Base.identity, Tstate) - end - if Targuments !== nothing - desc["Targuments"] = map(Base.identity, Targuments) - end - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - if use_inter_op_parallelism !== nothing - desc["use_inter_op_parallelism"] = Base.Bool(use_inter_op_parallelism) - end - end - tf.Tensor(tf.Operation(desc)) - end - function reduce_dataset_eager(input_dataset_, initial_state_, other_arguments_; name=nothing, f=nothing, Tstate=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing) - desc = tf.EagerOp("ReduceDataset") - input_dataset_ = convert(tf.EagerTensor, input_dataset_) - initial_state_ = convert(tf.EagerTensor, initial_state_) - other_arguments_ = convert(tf.EagerTensor, other_arguments_) - tf.add_input(desc, input_dataset_) - tf.add_input(desc, initial_state_) - tf.add_input(desc, other_arguments_) - if f !== nothing - desc["f"] = Base.identity(f) - end - if Tstate !== nothing - desc["Tstate"] = map(Base.identity, Tstate) - end - if Targuments !== nothing - desc["Targuments"] = map(Base.identity, Targuments) - end - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - if use_inter_op_parallelism !== nothing - desc["use_inter_op_parallelism"] = Base.Bool(use_inter_op_parallelism) - end - res = tf.execute(desc) - node = tf.TapeNode(reduce_dataset, [input_dataset_, initial_state_, other_arguments_], name=nothing, f=nothing, Tstate=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function reduce_dataset(input_dataset_, initial_state_, other_arguments_; name=nothing, f=nothing, Tstate=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing) - if tf.in_eager_mode() - reduce_dataset_eager(input_dataset_, initial_state_, other_arguments_; name=name, f=f, Tstate=Tstate, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes, use_inter_op_parallelism=use_inter_op_parallelism) - else - reduce_dataset_graph(input_dataset_, initial_state_, other_arguments_; name=name, f=f, Tstate=Tstate, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes, use_inter_op_parallelism=use_inter_op_parallelism) - end - end end @@ -142,53 +246,101 @@ end """ begin - function tensor_list_from_tensor_graph(tensor_, element_shape_; name=nothing, element_dtype=nothing, shape_type=nothing) - local desc - tf.with_op_name(name, "TensorListFromTensor") do - desc = tf.NodeDescription("TensorListFromTensor") - tensor_ = convert(Tensor{Any}, tensor_) - element_shape_ = convert(Tensor{Any}, element_shape_) - (tensor_,) = tf.tf_promote(tensor_) - (element_shape_,) = tf.tf_promote(element_shape_) - tf.add_input(desc, tensor_) - tf.add_input(desc, element_shape_) - if element_dtype !== nothing - desc["element_dtype"] = Base.identity(element_dtype) + begin + function tensor_list_from_tensor_graph(tensor_, element_shape_; name=nothing, element_dtype=nothing, shape_type=nothing) + local desc + tf.with_op_name(name, "TensorListFromTensor") do + desc = tf.NodeDescription("TensorListFromTensor") + begin + begin + tensor_ = convert(Tensor{Any}, tensor_) + begin + end + end + begin + element_shape_ = convert(Tensor{Any}, element_shape_) + begin + end + end + begin + (tensor_,) = tf.tf_promote(tensor_) + end + begin + (element_shape_,) = tf.tf_promote(element_shape_) + end + end + begin + begin + tf.add_input(desc, tensor_) + end + begin + tf.add_input(desc, element_shape_) + end + end + begin + begin + if element_dtype !== nothing + desc["element_dtype"] = Base.identity(element_dtype) + end + end + begin + if shape_type !== nothing + desc["shape_type"] = Base.identity(shape_type) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function tensor_list_from_tensor_eager(tensor_, element_shape_; name=nothing, element_dtype=nothing, shape_type=nothing) + desc = tf.EagerOp("TensorListFromTensor") + tensor_ = convert(tf.EagerTensor, tensor_) + element_shape_ = convert(tf.EagerTensor, element_shape_) + begin + begin + tf.add_input(desc, tensor_) + end + begin + tf.add_input(desc, element_shape_) + end + end + begin + begin + if element_dtype !== nothing + desc["element_dtype"] = Base.identity(element_dtype) + end + end + begin + if shape_type !== nothing + desc["shape_type"] = Base.identity(shape_type) + end + end + end + begin + desc["element_dtype"] = tf.data_type(tensor_) + end + begin + desc["shape_type"] = tf.data_type(element_shape_) + end + res = tf.execute(desc) + node = tf.TapeNode(tensor_list_from_tensor, [tensor_, element_shape_], name=nothing, element_dtype=nothing, shape_type=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_list_from_tensor(tensor_, element_shape_; name=nothing, element_dtype=nothing, shape_type=nothing) + if tf.in_eager_mode() + tensor_list_from_tensor_eager(tensor_, element_shape_; name=name, element_dtype=element_dtype, shape_type=shape_type) + else + tensor_list_from_tensor_graph(tensor_, element_shape_; name=name, element_dtype=element_dtype, shape_type=shape_type) + end end - if shape_type !== nothing - desc["shape_type"] = Base.identity(shape_type) - end - end - tf.Tensor(tf.Operation(desc)) - end - function tensor_list_from_tensor_eager(tensor_, element_shape_; name=nothing, element_dtype=nothing, shape_type=nothing) - desc = tf.EagerOp("TensorListFromTensor") - tensor_ = convert(tf.EagerTensor, tensor_) - element_shape_ = convert(tf.EagerTensor, element_shape_) - tf.add_input(desc, tensor_) - tf.add_input(desc, element_shape_) - if element_dtype !== nothing - desc["element_dtype"] = Base.identity(element_dtype) - end - if shape_type !== nothing - desc["shape_type"] = Base.identity(shape_type) - end - desc["element_dtype"] = tf.data_type(tensor_) - desc["shape_type"] = tf.data_type(element_shape_) - res = tf.execute(desc) - node = tf.TapeNode(tensor_list_from_tensor, [tensor_, element_shape_], name=nothing, element_dtype=nothing, shape_type=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_list_from_tensor(tensor_, element_shape_; name=nothing, element_dtype=nothing, shape_type=nothing) - if tf.in_eager_mode() - tensor_list_from_tensor_eager(tensor_, element_shape_; name=name, element_dtype=element_dtype, shape_type=shape_type) - else - tensor_list_from_tensor_graph(tensor_, element_shape_; name=name, element_dtype=element_dtype, shape_type=shape_type) - end - end end @@ -198,39 +350,67 @@ end """ begin - function extract_jpeg_shape_graph(contents_; name=nothing, output_type=nothing) - local desc - tf.with_op_name(name, "ExtractJpegShape") do - desc = tf.NodeDescription("ExtractJpegShape") - contents_ = convert(Tensor{String}, contents_) - tf.add_input(desc, contents_) - if output_type !== nothing - desc["output_type"] = Base.identity(output_type) + begin + function extract_jpeg_shape_graph(contents_; name=nothing, output_type=nothing) + local desc + tf.with_op_name(name, "ExtractJpegShape") do + desc = tf.NodeDescription("ExtractJpegShape") + begin + begin + contents_ = convert(Tensor{String}, contents_) + begin + end + end + end + begin + begin + tf.add_input(desc, contents_) + end + end + begin + begin + if output_type !== nothing + desc["output_type"] = Base.identity(output_type) + end + end + end end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function extract_jpeg_shape_eager(contents_; name=nothing, output_type=nothing) - desc = tf.EagerOp("ExtractJpegShape") - contents_ = convert(tf.EagerTensor, contents_) - tf.add_input(desc, contents_) - if output_type !== nothing - desc["output_type"] = Base.identity(output_type) - end - res = tf.execute(desc) - node = tf.TapeNode(extract_jpeg_shape, [contents_], name=nothing, output_type=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function extract_jpeg_shape_eager(contents_; name=nothing, output_type=nothing) + desc = tf.EagerOp("ExtractJpegShape") + contents_ = convert(tf.EagerTensor, contents_) + begin + begin + tf.add_input(desc, contents_) + end + end + begin + begin + if output_type !== nothing + desc["output_type"] = Base.identity(output_type) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(extract_jpeg_shape, [contents_], name=nothing, output_type=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function extract_jpeg_shape(contents_; name=nothing, output_type=nothing) - if tf.in_eager_mode() - extract_jpeg_shape_eager(contents_; name=name, output_type=output_type) - else - extract_jpeg_shape_graph(contents_; name=name, output_type=output_type) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function extract_jpeg_shape(contents_; name=nothing, output_type=nothing) + if tf.in_eager_mode() + extract_jpeg_shape_eager(contents_; name=name, output_type=output_type) + else + extract_jpeg_shape_graph(contents_; name=name, output_type=output_type) + end end - end + end end @@ -240,52 +420,90 @@ end """ begin - function svd_graph(input_; name=nothing, compute_uv=nothing, full_matrices=nothing) - local desc - tf.with_op_name(name, "Svd") do - desc = tf.NodeDescription("Svd") - input_ = convert(Tensor{Any}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - if compute_uv !== nothing - desc["compute_uv"] = Base.Bool(compute_uv) + begin + function svd_graph(input_; name=nothing, compute_uv=nothing, full_matrices=nothing) + local desc + tf.with_op_name(name, "Svd") do + desc = tf.NodeDescription("Svd") + begin + begin + input_ = convert(Tensor{Any}, input_) + begin + end + end + begin + (input_,) = tf.tf_promote(input_) + end + end + begin + begin + tf.add_input(desc, input_) + end + end + begin + begin + if compute_uv !== nothing + desc["compute_uv"] = Base.Bool(compute_uv) + end + end + begin + if full_matrices !== nothing + desc["full_matrices"] = Base.Bool(full_matrices) + end + end + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + end + end + begin + function svd_eager(input_; name=nothing, compute_uv=nothing, full_matrices=nothing) + desc = tf.EagerOp("Svd") + input_ = convert(tf.EagerTensor, input_) + begin + begin + tf.add_input(desc, input_) + end + end + begin + begin + if compute_uv !== nothing + desc["compute_uv"] = Base.Bool(compute_uv) + end + end + begin + if full_matrices !== nothing + desc["full_matrices"] = Base.Bool(full_matrices) + end + end + end + begin + desc["T"] = tf.data_type(input_) + end + res = tf.execute(desc) + node = tf.TapeNode(svd, [input_], name=nothing, compute_uv=nothing, full_matrices=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function svd(input_; name=nothing, compute_uv=nothing, full_matrices=nothing) + if tf.in_eager_mode() + svd_eager(input_; name=name, compute_uv=compute_uv, full_matrices=full_matrices) + else + svd_graph(input_; name=name, compute_uv=compute_uv, full_matrices=full_matrices) + end end - if full_matrices !== nothing - desc["full_matrices"] = Base.Bool(full_matrices) - end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:3 - push!(out, tf.Tensor(op, out_idx)) - end - out - end - function svd_eager(input_; name=nothing, compute_uv=nothing, full_matrices=nothing) - desc = tf.EagerOp("Svd") - input_ = convert(tf.EagerTensor, input_) - tf.add_input(desc, input_) - if compute_uv !== nothing - desc["compute_uv"] = Base.Bool(compute_uv) - end - if full_matrices !== nothing - desc["full_matrices"] = Base.Bool(full_matrices) - end - desc["T"] = tf.data_type(input_) - res = tf.execute(desc) - node = tf.TapeNode(svd, [input_], name=nothing, compute_uv=nothing, full_matrices=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function svd(input_; name=nothing, compute_uv=nothing, full_matrices=nothing) - if tf.in_eager_mode() - svd_eager(input_; name=name, compute_uv=compute_uv, full_matrices=full_matrices) - else - svd_graph(input_; name=name, compute_uv=compute_uv, full_matrices=full_matrices) - end - end end @@ -295,45 +513,77 @@ end """ begin - function iterator_get_next_sync_graph(iterator_; name=nothing, output_types=nothing, output_shapes=nothing) - local desc - tf.with_op_name(name, "IteratorGetNextSync") do - desc = tf.NodeDescription("IteratorGetNextSync") - iterator_ = convert(Tensor{Any}, iterator_) - tf.add_input(desc, iterator_) - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) + begin + function iterator_get_next_sync_graph(iterator_; name=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "IteratorGetNextSync") do + desc = tf.NodeDescription("IteratorGetNextSync") + begin + begin + iterator_ = convert(Tensor{Any}, iterator_) + begin + end + end + end + begin + begin + tf.add_input(desc, iterator_) + end + end + begin + begin + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + end + begin + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function iterator_get_next_sync_eager(iterator_; name=nothing, output_types=nothing, output_shapes=nothing) + desc = tf.EagerOp("IteratorGetNextSync") + iterator_ = convert(tf.EagerTensor, iterator_) + begin + begin + tf.add_input(desc, iterator_) + end + end + begin + begin + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + end + begin + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(iterator_get_next_sync, [iterator_], name=nothing, output_types=nothing, output_shapes=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function iterator_get_next_sync(iterator_; name=nothing, output_types=nothing, output_shapes=nothing) + if tf.in_eager_mode() + iterator_get_next_sync_eager(iterator_; name=name, output_types=output_types, output_shapes=output_shapes) + else + iterator_get_next_sync_graph(iterator_; name=name, output_types=output_types, output_shapes=output_shapes) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function iterator_get_next_sync_eager(iterator_; name=nothing, output_types=nothing, output_shapes=nothing) - desc = tf.EagerOp("IteratorGetNextSync") - iterator_ = convert(tf.EagerTensor, iterator_) - tf.add_input(desc, iterator_) - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - res = tf.execute(desc) - node = tf.TapeNode(iterator_get_next_sync, [iterator_], name=nothing, output_types=nothing, output_shapes=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function iterator_get_next_sync(iterator_; name=nothing, output_types=nothing, output_shapes=nothing) - if tf.in_eager_mode() - iterator_get_next_sync_eager(iterator_; name=name, output_types=output_types, output_shapes=output_shapes) - else - iterator_get_next_sync_graph(iterator_; name=name, output_types=output_types, output_shapes=output_shapes) - end - end end @@ -343,53 +593,93 @@ end """ begin - function ref_enter_graph(data_; name=nothing, frame_name=nothing, is_constant=nothing, parallel_iterations=nothing) - local desc - tf.with_op_name(name, "RefEnter") do - desc = tf.NodeDescription("RefEnter") - data_ = convert(Tensor{Any}, data_) - (data_,) = tf.tf_promote(data_) - tf.add_input(desc, data_) - if frame_name !== nothing - desc["frame_name"] = Base.String(frame_name) - end - if is_constant !== nothing - desc["is_constant"] = Base.Bool(is_constant) + begin + function ref_enter_graph(data_; name=nothing, frame_name=nothing, is_constant=nothing, parallel_iterations=nothing) + local desc + tf.with_op_name(name, "RefEnter") do + desc = tf.NodeDescription("RefEnter") + begin + begin + data_ = convert(Tensor{Any}, data_) + begin + end + end + begin + (data_,) = tf.tf_promote(data_) + end + end + begin + begin + tf.add_input(desc, data_) + end + end + begin + begin + if frame_name !== nothing + desc["frame_name"] = Base.String(frame_name) + end + end + begin + if is_constant !== nothing + desc["is_constant"] = Base.Bool(is_constant) + end + end + begin + if parallel_iterations !== nothing + desc["parallel_iterations"] = Base.Int(parallel_iterations) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function ref_enter_eager(data_; name=nothing, frame_name=nothing, is_constant=nothing, parallel_iterations=nothing) + desc = tf.EagerOp("RefEnter") + data_ = convert(tf.EagerTensor, data_) + begin + begin + tf.add_input(desc, data_) + end + end + begin + begin + if frame_name !== nothing + desc["frame_name"] = Base.String(frame_name) + end + end + begin + if is_constant !== nothing + desc["is_constant"] = Base.Bool(is_constant) + end + end + begin + if parallel_iterations !== nothing + desc["parallel_iterations"] = Base.Int(parallel_iterations) + end + end + end + begin + desc["T"] = tf.data_type(data_) + end + res = tf.execute(desc) + node = tf.TapeNode(ref_enter, [data_], name=nothing, frame_name=nothing, is_constant=nothing, parallel_iterations=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function ref_enter(data_; name=nothing, frame_name=nothing, is_constant=nothing, parallel_iterations=nothing) + if tf.in_eager_mode() + ref_enter_eager(data_; name=name, frame_name=frame_name, is_constant=is_constant, parallel_iterations=parallel_iterations) + else + ref_enter_graph(data_; name=name, frame_name=frame_name, is_constant=is_constant, parallel_iterations=parallel_iterations) + end end - if parallel_iterations !== nothing - desc["parallel_iterations"] = Base.Int(parallel_iterations) - end - end - tf.Tensor(tf.Operation(desc)) - end - function ref_enter_eager(data_; name=nothing, frame_name=nothing, is_constant=nothing, parallel_iterations=nothing) - desc = tf.EagerOp("RefEnter") - data_ = convert(tf.EagerTensor, data_) - tf.add_input(desc, data_) - if frame_name !== nothing - desc["frame_name"] = Base.String(frame_name) - end - if is_constant !== nothing - desc["is_constant"] = Base.Bool(is_constant) - end - if parallel_iterations !== nothing - desc["parallel_iterations"] = Base.Int(parallel_iterations) - end - desc["T"] = tf.data_type(data_) - res = tf.execute(desc) - node = tf.TapeNode(ref_enter, [data_], name=nothing, frame_name=nothing, is_constant=nothing, parallel_iterations=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function ref_enter(data_; name=nothing, frame_name=nothing, is_constant=nothing, parallel_iterations=nothing) - if tf.in_eager_mode() - ref_enter_eager(data_; name=name, frame_name=frame_name, is_constant=is_constant, parallel_iterations=parallel_iterations) - else - ref_enter_graph(data_; name=name, frame_name=frame_name, is_constant=is_constant, parallel_iterations=parallel_iterations) - end - end end @@ -399,35 +689,63 @@ end """ begin - function erf_graph(x_; name=nothing) - local desc - tf.with_op_name(name, "Erf") do - desc = tf.NodeDescription("Erf") - x_ = convert(Tensor{Any}, x_) - (x_,) = tf.tf_promote(x_) - tf.add_input(desc, x_) + begin + function erf_graph(x_; name=nothing) + local desc + tf.with_op_name(name, "Erf") do + desc = tf.NodeDescription("Erf") + begin + begin + x_ = convert(Tensor{Any}, x_) + begin + end + end + begin + (x_,) = tf.tf_promote(x_) + end + end + begin + begin + tf.add_input(desc, x_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function erf_eager(x_; name=nothing) - desc = tf.EagerOp("Erf") - x_ = convert(tf.EagerTensor, x_) - tf.add_input(desc, x_) - desc["T"] = tf.data_type(x_) - res = tf.execute(desc) - node = tf.TapeNode(erf, [x_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function erf_eager(x_; name=nothing) + desc = tf.EagerOp("Erf") + x_ = convert(tf.EagerTensor, x_) + begin + begin + tf.add_input(desc, x_) + end + end + begin + end + begin + desc["T"] = tf.data_type(x_) + end + res = tf.execute(desc) + node = tf.TapeNode(erf, [x_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function erf(x_; name=nothing) - if tf.in_eager_mode() - erf_eager(x_; name=name) - else - erf_graph(x_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function erf(x_; name=nothing) + if tf.in_eager_mode() + erf_eager(x_; name=name) + else + erf_graph(x_; name=name) + end end - end + end end @@ -437,38 +755,64 @@ end """ begin - function lookup_table_export_v2_graph(table_handle_; name=nothing) - local desc - tf.with_op_name(name, "LookupTableExportV2") do - desc = tf.NodeDescription("LookupTableExportV2") - table_handle_ = convert(Tensor{Any}, table_handle_) - tf.add_input(desc, table_handle_) - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:2 - push!(out, tf.Tensor(op, out_idx)) + begin + function lookup_table_export_v2_graph(table_handle_; name=nothing) + local desc + tf.with_op_name(name, "LookupTableExportV2") do + desc = tf.NodeDescription("LookupTableExportV2") + begin + begin + table_handle_ = convert(Tensor{Any}, table_handle_) + begin + end + end + end + begin + begin + tf.add_input(desc, table_handle_) + end + end + begin + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end end - out end - function lookup_table_export_v2_eager(table_handle_; name=nothing) - desc = tf.EagerOp("LookupTableExportV2") - table_handle_ = convert(tf.EagerTensor, table_handle_) - tf.add_input(desc, table_handle_) - res = tf.execute(desc) - node = tf.TapeNode(lookup_table_export_v2, [table_handle_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res + begin + function lookup_table_export_v2_eager(table_handle_; name=nothing) + desc = tf.EagerOp("LookupTableExportV2") + table_handle_ = convert(tf.EagerTensor, table_handle_) + begin + begin + tf.add_input(desc, table_handle_) + end + end + begin + end + res = tf.execute(desc) + node = tf.TapeNode(lookup_table_export_v2, [table_handle_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function lookup_table_export_v2(table_handle_; name=nothing) - if tf.in_eager_mode() - lookup_table_export_v2_eager(table_handle_; name=name) - else - lookup_table_export_v2_graph(table_handle_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function lookup_table_export_v2(table_handle_; name=nothing) + if tf.in_eager_mode() + lookup_table_export_v2_eager(table_handle_; name=name) + else + lookup_table_export_v2_graph(table_handle_; name=name) + end end - end + end end @@ -478,35 +822,63 @@ end """ begin - function round_graph(x_; name=nothing) - local desc - tf.with_op_name(name, "Round") do - desc = tf.NodeDescription("Round") - x_ = convert(Tensor{Any}, x_) - (x_,) = tf.tf_promote(x_) - tf.add_input(desc, x_) + begin + function round_graph(x_; name=nothing) + local desc + tf.with_op_name(name, "Round") do + desc = tf.NodeDescription("Round") + begin + begin + x_ = convert(Tensor{Any}, x_) + begin + end + end + begin + (x_,) = tf.tf_promote(x_) + end + end + begin + begin + tf.add_input(desc, x_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function round_eager(x_; name=nothing) - desc = tf.EagerOp("Round") - x_ = convert(tf.EagerTensor, x_) - tf.add_input(desc, x_) - desc["T"] = tf.data_type(x_) - res = tf.execute(desc) - node = tf.TapeNode(round, [x_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function round_eager(x_; name=nothing) + desc = tf.EagerOp("Round") + x_ = convert(tf.EagerTensor, x_) + begin + begin + tf.add_input(desc, x_) + end + end + begin + end + begin + desc["T"] = tf.data_type(x_) + end + res = tf.execute(desc) + node = tf.TapeNode(round, [x_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function round(x_; name=nothing) - if tf.in_eager_mode() - round_eager(x_; name=name) - else - round_graph(x_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function round(x_; name=nothing) + if tf.in_eager_mode() + round_eager(x_; name=name) + else + round_graph(x_; name=name) + end end - end + end end @@ -516,47 +888,75 @@ end Retrieves a single tensor from the computation outfeed. This operation will """ begin - function outfeed_dequeue_graph(; name=nothing, dtype=nothing, shape=nothing, device_ordinal=nothing) - local desc - tf.with_op_name(name, "OutfeedDequeue") do - desc = tf.NodeDescription("OutfeedDequeue") - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end - if shape !== nothing - desc["shape"] = Base.identity(shape) + begin + function outfeed_dequeue_graph(; name=nothing, dtype=nothing, shape=nothing, device_ordinal=nothing) + local desc + tf.with_op_name(name, "OutfeedDequeue") do + desc = tf.NodeDescription("OutfeedDequeue") + begin + end + begin + end + begin + begin + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + begin + if shape !== nothing + desc["shape"] = Base.identity(shape) + end + end + begin + if device_ordinal !== nothing + desc["device_ordinal"] = Base.Int(device_ordinal) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function outfeed_dequeue_eager(; name=nothing, dtype=nothing, shape=nothing, device_ordinal=nothing) + desc = tf.EagerOp("OutfeedDequeue") + begin + end + begin + begin + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + begin + if shape !== nothing + desc["shape"] = Base.identity(shape) + end + end + begin + if device_ordinal !== nothing + desc["device_ordinal"] = Base.Int(device_ordinal) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(outfeed_dequeue, [], name=nothing, dtype=nothing, shape=nothing, device_ordinal=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function outfeed_dequeue(; name=nothing, dtype=nothing, shape=nothing, device_ordinal=nothing) + if tf.in_eager_mode() + outfeed_dequeue_eager(; name=name, dtype=dtype, shape=shape, device_ordinal=device_ordinal) + else + outfeed_dequeue_graph(; name=name, dtype=dtype, shape=shape, device_ordinal=device_ordinal) + end end - if device_ordinal !== nothing - desc["device_ordinal"] = Base.Int(device_ordinal) - end - end - tf.Tensor(tf.Operation(desc)) - end - function outfeed_dequeue_eager(; name=nothing, dtype=nothing, shape=nothing, device_ordinal=nothing) - desc = tf.EagerOp("OutfeedDequeue") - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end - if shape !== nothing - desc["shape"] = Base.identity(shape) - end - if device_ordinal !== nothing - desc["device_ordinal"] = Base.Int(device_ordinal) - end - res = tf.execute(desc) - node = tf.TapeNode(outfeed_dequeue, [], name=nothing, dtype=nothing, shape=nothing, device_ordinal=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function outfeed_dequeue(; name=nothing, dtype=nothing, shape=nothing, device_ordinal=nothing) - if tf.in_eager_mode() - outfeed_dequeue_eager(; name=name, dtype=dtype, shape=shape, device_ordinal=device_ordinal) - else - outfeed_dequeue_graph(; name=name, dtype=dtype, shape=shape, device_ordinal=device_ordinal) - end - end end @@ -566,33 +966,57 @@ end """ begin - function tensor_forest_tree_is_initialized_op_graph(tree_handle_; name=nothing) - local desc - tf.with_op_name(name, "TensorForestTreeIsInitializedOp") do - desc = tf.NodeDescription("TensorForestTreeIsInitializedOp") - tree_handle_ = convert(Tensor{Any}, tree_handle_) - tf.add_input(desc, tree_handle_) + begin + function tensor_forest_tree_is_initialized_op_graph(tree_handle_; name=nothing) + local desc + tf.with_op_name(name, "TensorForestTreeIsInitializedOp") do + desc = tf.NodeDescription("TensorForestTreeIsInitializedOp") + begin + begin + tree_handle_ = convert(Tensor{Any}, tree_handle_) + begin + end + end + end + begin + begin + tf.add_input(desc, tree_handle_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function tensor_forest_tree_is_initialized_op_eager(tree_handle_; name=nothing) - desc = tf.EagerOp("TensorForestTreeIsInitializedOp") - tree_handle_ = convert(tf.EagerTensor, tree_handle_) - tf.add_input(desc, tree_handle_) - res = tf.execute(desc) - node = tf.TapeNode(tensor_forest_tree_is_initialized_op, [tree_handle_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function tensor_forest_tree_is_initialized_op_eager(tree_handle_; name=nothing) + desc = tf.EagerOp("TensorForestTreeIsInitializedOp") + tree_handle_ = convert(tf.EagerTensor, tree_handle_) + begin + begin + tf.add_input(desc, tree_handle_) + end + end + begin + end + res = tf.execute(desc) + node = tf.TapeNode(tensor_forest_tree_is_initialized_op, [tree_handle_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_forest_tree_is_initialized_op(tree_handle_; name=nothing) - if tf.in_eager_mode() - tensor_forest_tree_is_initialized_op_eager(tree_handle_; name=name) - else - tensor_forest_tree_is_initialized_op_graph(tree_handle_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_forest_tree_is_initialized_op(tree_handle_; name=nothing) + if tf.in_eager_mode() + tensor_forest_tree_is_initialized_op_eager(tree_handle_; name=name) + else + tensor_forest_tree_is_initialized_op_graph(tree_handle_; name=name) + end end - end + end end @@ -602,46 +1026,80 @@ end """ begin - function merge_graph(inputs_; name=nothing, N=nothing) - local desc - tf.with_op_name(name, "Merge") do - desc = tf.NodeDescription("Merge") - inputs_ = [convert(Tensor{Any}, x) for x = inputs_] - (inputs_,) = tf.tf_promote(inputs_) - tf.add_input(desc, inputs_) - if N !== nothing - desc["N"] = Base.Int(N) + begin + function merge_graph(inputs_; name=nothing, N=nothing) + local desc + tf.with_op_name(name, "Merge") do + desc = tf.NodeDescription("Merge") + begin + begin + inputs_ = [convert(Tensor{Any}, x) for x = inputs_] + begin + end + end + begin + (inputs_,) = tf.tf_promote(inputs_) + end + end + begin + begin + tf.add_input(desc, inputs_) + end + end + begin + begin + if N !== nothing + desc["N"] = Base.Int(N) + end + end + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out end end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:2 - push!(out, tf.Tensor(op, out_idx)) - end - out end - function merge_eager(inputs_; name=nothing, N=nothing) - desc = tf.EagerOp("Merge") - inputs_ = convert(tf.EagerTensor, inputs_) - tf.add_input(desc, inputs_) - if N !== nothing - desc["N"] = Base.Int(N) - end - desc["T"] = tf.data_type(inputs_) - res = tf.execute(desc) - node = tf.TapeNode(merge, [inputs_], name=nothing, N=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res + begin + function merge_eager(inputs_; name=nothing, N=nothing) + desc = tf.EagerOp("Merge") + inputs_ = convert(tf.EagerTensor, inputs_) + begin + begin + tf.add_input(desc, inputs_) + end + end + begin + begin + if N !== nothing + desc["N"] = Base.Int(N) + end + end + end + begin + desc["T"] = tf.data_type(inputs_) + end + res = tf.execute(desc) + node = tf.TapeNode(merge, [inputs_], name=nothing, N=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function merge(inputs_; name=nothing, N=nothing) - if tf.in_eager_mode() - merge_eager(inputs_; name=name, N=N) - else - merge_graph(inputs_; name=name, N=N) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function merge(inputs_; name=nothing, N=nothing) + if tf.in_eager_mode() + merge_eager(inputs_; name=name, N=N) + else + merge_graph(inputs_; name=name, N=N) + end end - end + end end @@ -651,50 +1109,100 @@ end """ begin - function histogram_fixed_width_graph(values_, value_range_, nbins_; name=nothing, dtype=nothing) - local desc - tf.with_op_name(name, "HistogramFixedWidth") do - desc = tf.NodeDescription("HistogramFixedWidth") - values_ = convert(Tensor{Any}, values_) - value_range_ = convert(Tensor{Any}, value_range_) - nbins_ = convert(Tensor{Int32}, nbins_) - (values_, value_range_) = tf.tf_promote(values_, value_range_) - tf.add_input(desc, values_) - tf.add_input(desc, value_range_) - tf.add_input(desc, nbins_) - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) + begin + function histogram_fixed_width_graph(values_, value_range_, nbins_; name=nothing, dtype=nothing) + local desc + tf.with_op_name(name, "HistogramFixedWidth") do + desc = tf.NodeDescription("HistogramFixedWidth") + begin + begin + values_ = convert(Tensor{Any}, values_) + begin + end + end + begin + value_range_ = convert(Tensor{Any}, value_range_) + begin + end + end + begin + nbins_ = convert(Tensor{Int32}, nbins_) + begin + end + end + begin + (values_, value_range_) = tf.tf_promote(values_, value_range_) + end + end + begin + begin + tf.add_input(desc, values_) + end + begin + tf.add_input(desc, value_range_) + end + begin + tf.add_input(desc, nbins_) + end + end + begin + begin + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function histogram_fixed_width_eager(values_, value_range_, nbins_; name=nothing, dtype=nothing) + desc = tf.EagerOp("HistogramFixedWidth") + values_ = convert(tf.EagerTensor, values_) + value_range_ = convert(tf.EagerTensor, value_range_) + nbins_ = convert(tf.EagerTensor, nbins_) + begin + begin + tf.add_input(desc, values_) + end + begin + tf.add_input(desc, value_range_) + end + begin + tf.add_input(desc, nbins_) + end + end + begin + begin + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + end + begin + desc["T"] = tf.data_type(values_) + end + begin + desc["T"] = tf.data_type(value_range_) + end + res = tf.execute(desc) + node = tf.TapeNode(histogram_fixed_width, [values_, value_range_, nbins_], name=nothing, dtype=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function histogram_fixed_width(values_, value_range_, nbins_; name=nothing, dtype=nothing) + if tf.in_eager_mode() + histogram_fixed_width_eager(values_, value_range_, nbins_; name=name, dtype=dtype) + else + histogram_fixed_width_graph(values_, value_range_, nbins_; name=name, dtype=dtype) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function histogram_fixed_width_eager(values_, value_range_, nbins_; name=nothing, dtype=nothing) - desc = tf.EagerOp("HistogramFixedWidth") - values_ = convert(tf.EagerTensor, values_) - value_range_ = convert(tf.EagerTensor, value_range_) - nbins_ = convert(tf.EagerTensor, nbins_) - tf.add_input(desc, values_) - tf.add_input(desc, value_range_) - tf.add_input(desc, nbins_) - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end - desc["T"] = tf.data_type(values_) - desc["T"] = tf.data_type(value_range_) - res = tf.execute(desc) - node = tf.TapeNode(histogram_fixed_width, [values_, value_range_, nbins_], name=nothing, dtype=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function histogram_fixed_width(values_, value_range_, nbins_; name=nothing, dtype=nothing) - if tf.in_eager_mode() - histogram_fixed_width_eager(values_, value_range_, nbins_; name=name, dtype=dtype) - else - histogram_fixed_width_graph(values_, value_range_, nbins_; name=name, dtype=dtype) - end - end end @@ -704,35 +1212,63 @@ end """ begin - function asin_graph(x_; name=nothing) - local desc - tf.with_op_name(name, "Asin") do - desc = tf.NodeDescription("Asin") - x_ = convert(Tensor{Any}, x_) - (x_,) = tf.tf_promote(x_) - tf.add_input(desc, x_) + begin + function asin_graph(x_; name=nothing) + local desc + tf.with_op_name(name, "Asin") do + desc = tf.NodeDescription("Asin") + begin + begin + x_ = convert(Tensor{Any}, x_) + begin + end + end + begin + (x_,) = tf.tf_promote(x_) + end + end + begin + begin + tf.add_input(desc, x_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function asin_eager(x_; name=nothing) - desc = tf.EagerOp("Asin") - x_ = convert(tf.EagerTensor, x_) - tf.add_input(desc, x_) - desc["T"] = tf.data_type(x_) - res = tf.execute(desc) - node = tf.TapeNode(asin, [x_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function asin_eager(x_; name=nothing) + desc = tf.EagerOp("Asin") + x_ = convert(tf.EagerTensor, x_) + begin + begin + tf.add_input(desc, x_) + end + end + begin + end + begin + desc["T"] = tf.data_type(x_) + end + res = tf.execute(desc) + node = tf.TapeNode(asin, [x_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function asin(x_; name=nothing) - if tf.in_eager_mode() - asin_eager(x_; name=name) - else - asin_graph(x_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function asin(x_; name=nothing) + if tf.in_eager_mode() + asin_eager(x_; name=name) + else + asin_graph(x_; name=name) + end end - end + end end @@ -742,46 +1278,86 @@ end """ begin - function any_graph(input_, reduction_indices_; name=nothing, keep_dims=nothing) - local desc - tf.with_op_name(name, "Any") do - desc = tf.NodeDescription("Any") - input_ = convert(Tensor{Bool}, input_) - reduction_indices_ = convert(Tensor{Int32}, reduction_indices_) - reduction_indices_ = reduction_indices_ - convert(tf.Tensor{eltype(reduction_indices_)}, 1) - (reduction_indices_,) = tf.tf_promote(reduction_indices_) - tf.add_input(desc, input_) - tf.add_input(desc, reduction_indices_) - if keep_dims !== nothing - desc["keep_dims"] = Base.Bool(keep_dims) + begin + function any_graph(input_, reduction_indices_; name=nothing, keep_dims=nothing) + local desc + tf.with_op_name(name, "Any") do + desc = tf.NodeDescription("Any") + begin + begin + input_ = convert(Tensor{Bool}, input_) + begin + end + end + begin + reduction_indices_ = convert(Tensor{Int32}, reduction_indices_) + begin + reduction_indices_ = reduction_indices_ - convert(tf.Tensor{eltype(reduction_indices_)}, 1) + end + end + begin + (reduction_indices_,) = tf.tf_promote(reduction_indices_) + end + end + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, reduction_indices_) + end + end + begin + begin + if keep_dims !== nothing + desc["keep_dims"] = Base.Bool(keep_dims) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function any_eager(input_, reduction_indices_; name=nothing, keep_dims=nothing) + desc = tf.EagerOp("Any") + input_ = convert(tf.EagerTensor, input_) + reduction_indices_ = convert(tf.EagerTensor, reduction_indices_) + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, reduction_indices_) + end + end + begin + begin + if keep_dims !== nothing + desc["keep_dims"] = Base.Bool(keep_dims) + end + end + end + begin + desc["Tidx"] = tf.data_type(reduction_indices_) + end + res = tf.execute(desc) + node = tf.TapeNode(any, [input_, reduction_indices_], name=nothing, keep_dims=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function any(input_, reduction_indices_; name=nothing, keep_dims=nothing) + if tf.in_eager_mode() + any_eager(input_, reduction_indices_; name=name, keep_dims=keep_dims) + else + any_graph(input_, reduction_indices_; name=name, keep_dims=keep_dims) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function any_eager(input_, reduction_indices_; name=nothing, keep_dims=nothing) - desc = tf.EagerOp("Any") - input_ = convert(tf.EagerTensor, input_) - reduction_indices_ = convert(tf.EagerTensor, reduction_indices_) - tf.add_input(desc, input_) - tf.add_input(desc, reduction_indices_) - if keep_dims !== nothing - desc["keep_dims"] = Base.Bool(keep_dims) - end - desc["Tidx"] = tf.data_type(reduction_indices_) - res = tf.execute(desc) - node = tf.TapeNode(any, [input_, reduction_indices_], name=nothing, keep_dims=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function any(input_, reduction_indices_; name=nothing, keep_dims=nothing) - if tf.in_eager_mode() - any_eager(input_, reduction_indices_; name=name, keep_dims=keep_dims) - else - any_graph(input_, reduction_indices_; name=name, keep_dims=keep_dims) - end - end end @@ -791,40 +1367,78 @@ end """ begin - function rsqrt_grad_graph(y_, dy_; name=nothing) - local desc - tf.with_op_name(name, "RsqrtGrad") do - desc = tf.NodeDescription("RsqrtGrad") - y_ = convert(Tensor{Any}, y_) - dy_ = convert(Tensor{Any}, dy_) - (y_, dy_) = tf.tf_promote(y_, dy_) - tf.add_input(desc, y_) - tf.add_input(desc, dy_) + begin + function rsqrt_grad_graph(y_, dy_; name=nothing) + local desc + tf.with_op_name(name, "RsqrtGrad") do + desc = tf.NodeDescription("RsqrtGrad") + begin + begin + y_ = convert(Tensor{Any}, y_) + begin + end + end + begin + dy_ = convert(Tensor{Any}, dy_) + begin + end + end + begin + (y_, dy_) = tf.tf_promote(y_, dy_) + end + end + begin + begin + tf.add_input(desc, y_) + end + begin + tf.add_input(desc, dy_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function rsqrt_grad_eager(y_, dy_; name=nothing) - desc = tf.EagerOp("RsqrtGrad") - y_ = convert(tf.EagerTensor, y_) - dy_ = convert(tf.EagerTensor, dy_) - tf.add_input(desc, y_) - tf.add_input(desc, dy_) - desc["T"] = tf.data_type(y_) - desc["T"] = tf.data_type(dy_) - res = tf.execute(desc) - node = tf.TapeNode(rsqrt_grad, [y_, dy_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function rsqrt_grad_eager(y_, dy_; name=nothing) + desc = tf.EagerOp("RsqrtGrad") + y_ = convert(tf.EagerTensor, y_) + dy_ = convert(tf.EagerTensor, dy_) + begin + begin + tf.add_input(desc, y_) + end + begin + tf.add_input(desc, dy_) + end + end + begin + end + begin + desc["T"] = tf.data_type(y_) + end + begin + desc["T"] = tf.data_type(dy_) + end + res = tf.execute(desc) + node = tf.TapeNode(rsqrt_grad, [y_, dy_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function rsqrt_grad(y_, dy_; name=nothing) - if tf.in_eager_mode() - rsqrt_grad_eager(y_, dy_; name=name) - else - rsqrt_grad_graph(y_, dy_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function rsqrt_grad(y_, dy_; name=nothing) + if tf.in_eager_mode() + rsqrt_grad_eager(y_, dy_; name=name) + else + rsqrt_grad_graph(y_, dy_; name=name) + end end - end + end end @@ -834,47 +1448,99 @@ end """ begin - function tensor_array_scatter_graph(handle_, indices_, value_, flow_in_; name=nothing) - local desc - tf.with_op_name(name, "TensorArrayScatter") do - desc = tf.NodeDescription("TensorArrayScatter") - handle_ = convert(Tensor{String}, handle_) - indices_ = convert(Tensor{Int32}, indices_) - value_ = convert(Tensor{Any}, value_) - flow_in_ = convert(Tensor{Float32}, flow_in_) - (value_,) = tf.tf_promote(value_) - tf.add_input(desc, handle_) - tf.add_input(desc, indices_) - tf.add_input(desc, value_) - tf.add_input(desc, flow_in_) - end - tf.Tensor(tf.Operation(desc)) - end - function tensor_array_scatter_eager(handle_, indices_, value_, flow_in_; name=nothing) - desc = tf.EagerOp("TensorArrayScatter") - handle_ = convert(tf.EagerTensor, handle_) - indices_ = convert(tf.EagerTensor, indices_) - value_ = convert(tf.EagerTensor, value_) - flow_in_ = convert(tf.EagerTensor, flow_in_) - tf.add_input(desc, handle_) - tf.add_input(desc, indices_) - tf.add_input(desc, value_) - tf.add_input(desc, flow_in_) - desc["T"] = tf.data_type(value_) - res = tf.execute(desc) - node = tf.TapeNode(tensor_array_scatter, [handle_, indices_, value_, flow_in_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_array_scatter(handle_, indices_, value_, flow_in_; name=nothing) - if tf.in_eager_mode() - tensor_array_scatter_eager(handle_, indices_, value_, flow_in_; name=name) - else - tensor_array_scatter_graph(handle_, indices_, value_, flow_in_; name=name) + begin + function tensor_array_scatter_graph(handle_, indices_, value_, flow_in_; name=nothing) + local desc + tf.with_op_name(name, "TensorArrayScatter") do + desc = tf.NodeDescription("TensorArrayScatter") + begin + begin + handle_ = convert(Tensor{String}, handle_) + begin + end + end + begin + indices_ = convert(Tensor{Int32}, indices_) + begin + end + end + begin + value_ = convert(Tensor{Any}, value_) + begin + end + end + begin + flow_in_ = convert(Tensor{Float32}, flow_in_) + begin + end + end + begin + (value_,) = tf.tf_promote(value_) + end + end + begin + begin + tf.add_input(desc, handle_) + end + begin + tf.add_input(desc, indices_) + end + begin + tf.add_input(desc, value_) + end + begin + tf.add_input(desc, flow_in_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function tensor_array_scatter_eager(handle_, indices_, value_, flow_in_; name=nothing) + desc = tf.EagerOp("TensorArrayScatter") + handle_ = convert(tf.EagerTensor, handle_) + indices_ = convert(tf.EagerTensor, indices_) + value_ = convert(tf.EagerTensor, value_) + flow_in_ = convert(tf.EagerTensor, flow_in_) + begin + begin + tf.add_input(desc, handle_) + end + begin + tf.add_input(desc, indices_) + end + begin + tf.add_input(desc, value_) + end + begin + tf.add_input(desc, flow_in_) + end + end + begin + end + begin + desc["T"] = tf.data_type(value_) + end + res = tf.execute(desc) + node = tf.TapeNode(tensor_array_scatter, [handle_, indices_, value_, flow_in_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_array_scatter(handle_, indices_, value_, flow_in_; name=nothing) + if tf.in_eager_mode() + tensor_array_scatter_eager(handle_, indices_, value_, flow_in_; name=name) + else + tensor_array_scatter_graph(handle_, indices_, value_, flow_in_; name=name) + end end - end + end end @@ -884,50 +1550,92 @@ end """ begin - function dynamic_partition_graph(data_, partitions_; name=nothing, num_partitions=nothing) - local desc - tf.with_op_name(name, "DynamicPartition") do - desc = tf.NodeDescription("DynamicPartition") - data_ = convert(Tensor{Any}, data_) - partitions_ = convert(Tensor{Int32}, partitions_) - (data_,) = tf.tf_promote(data_) - tf.add_input(desc, data_) - tf.add_input(desc, partitions_) - if num_partitions !== nothing - desc["num_partitions"] = Base.Int(num_partitions) + begin + function dynamic_partition_graph(data_, partitions_; name=nothing, num_partitions=nothing) + local desc + tf.with_op_name(name, "DynamicPartition") do + desc = tf.NodeDescription("DynamicPartition") + begin + begin + data_ = convert(Tensor{Any}, data_) + begin + end + end + begin + partitions_ = convert(Tensor{Int32}, partitions_) + begin + end + end + begin + (data_,) = tf.tf_promote(data_) + end + end + begin + begin + tf.add_input(desc, data_) + end + begin + tf.add_input(desc, partitions_) + end + end + begin + begin + if num_partitions !== nothing + desc["num_partitions"] = Base.Int(num_partitions) + end + end + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:num_partitions + push!(out, tf.Tensor(op, out_idx)) + end + out + end + end + end + begin + function dynamic_partition_eager(data_, partitions_; name=nothing, num_partitions=nothing) + desc = tf.EagerOp("DynamicPartition") + data_ = convert(tf.EagerTensor, data_) + partitions_ = convert(tf.EagerTensor, partitions_) + begin + begin + tf.add_input(desc, data_) + end + begin + tf.add_input(desc, partitions_) + end + end + begin + begin + if num_partitions !== nothing + desc["num_partitions"] = Base.Int(num_partitions) + end + end + end + begin + desc["T"] = tf.data_type(data_) + end + res = tf.execute(desc) + node = tf.TapeNode(dynamic_partition, [data_, partitions_], name=nothing, num_partitions=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function dynamic_partition(data_, partitions_; name=nothing, num_partitions=nothing) + if tf.in_eager_mode() + dynamic_partition_eager(data_, partitions_; name=name, num_partitions=num_partitions) + else + dynamic_partition_graph(data_, partitions_; name=name, num_partitions=num_partitions) + end end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:num_partitions - push!(out, tf.Tensor(op, out_idx)) - end - out - end - function dynamic_partition_eager(data_, partitions_; name=nothing, num_partitions=nothing) - desc = tf.EagerOp("DynamicPartition") - data_ = convert(tf.EagerTensor, data_) - partitions_ = convert(tf.EagerTensor, partitions_) - tf.add_input(desc, data_) - tf.add_input(desc, partitions_) - if num_partitions !== nothing - desc["num_partitions"] = Base.Int(num_partitions) - end - desc["T"] = tf.data_type(data_) - res = tf.execute(desc) - node = tf.TapeNode(dynamic_partition, [data_, partitions_], name=nothing, num_partitions=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function dynamic_partition(data_, partitions_; name=nothing, num_partitions=nothing) - if tf.in_eager_mode() - dynamic_partition_eager(data_, partitions_; name=name, num_partitions=num_partitions) - else - dynamic_partition_graph(data_, partitions_; name=name, num_partitions=num_partitions) - end - end end @@ -937,49 +1645,89 @@ end """ begin - function experimental_private_thread_pool_dataset_graph(input_dataset_, num_threads_; name=nothing, output_types=nothing, output_shapes=nothing) - local desc - tf.with_op_name(name, "ExperimentalPrivateThreadPoolDataset") do - desc = tf.NodeDescription("ExperimentalPrivateThreadPoolDataset") - input_dataset_ = convert(Tensor{Any}, input_dataset_) - num_threads_ = convert(Tensor{Int64}, num_threads_) - tf.add_input(desc, input_dataset_) - tf.add_input(desc, num_threads_) - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) + begin + function experimental_private_thread_pool_dataset_graph(input_dataset_, num_threads_; name=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "ExperimentalPrivateThreadPoolDataset") do + desc = tf.NodeDescription("ExperimentalPrivateThreadPoolDataset") + begin + begin + input_dataset_ = convert(Tensor{Any}, input_dataset_) + begin + end + end + begin + num_threads_ = convert(Tensor{Int64}, num_threads_) + begin + end + end + end + begin + begin + tf.add_input(desc, input_dataset_) + end + begin + tf.add_input(desc, num_threads_) + end + end + begin + begin + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + end + begin + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function experimental_private_thread_pool_dataset_eager(input_dataset_, num_threads_; name=nothing, output_types=nothing, output_shapes=nothing) + desc = tf.EagerOp("ExperimentalPrivateThreadPoolDataset") + input_dataset_ = convert(tf.EagerTensor, input_dataset_) + num_threads_ = convert(tf.EagerTensor, num_threads_) + begin + begin + tf.add_input(desc, input_dataset_) + end + begin + tf.add_input(desc, num_threads_) + end + end + begin + begin + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + end + begin + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(experimental_private_thread_pool_dataset, [input_dataset_, num_threads_], name=nothing, output_types=nothing, output_shapes=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_private_thread_pool_dataset(input_dataset_, num_threads_; name=nothing, output_types=nothing, output_shapes=nothing) + if tf.in_eager_mode() + experimental_private_thread_pool_dataset_eager(input_dataset_, num_threads_; name=name, output_types=output_types, output_shapes=output_shapes) + else + experimental_private_thread_pool_dataset_graph(input_dataset_, num_threads_; name=name, output_types=output_types, output_shapes=output_shapes) + end end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - end - tf.Tensor(tf.Operation(desc)) - end - function experimental_private_thread_pool_dataset_eager(input_dataset_, num_threads_; name=nothing, output_types=nothing, output_shapes=nothing) - desc = tf.EagerOp("ExperimentalPrivateThreadPoolDataset") - input_dataset_ = convert(tf.EagerTensor, input_dataset_) - num_threads_ = convert(tf.EagerTensor, num_threads_) - tf.add_input(desc, input_dataset_) - tf.add_input(desc, num_threads_) - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - res = tf.execute(desc) - node = tf.TapeNode(experimental_private_thread_pool_dataset, [input_dataset_, num_threads_], name=nothing, output_types=nothing, output_shapes=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_private_thread_pool_dataset(input_dataset_, num_threads_; name=nothing, output_types=nothing, output_shapes=nothing) - if tf.in_eager_mode() - experimental_private_thread_pool_dataset_eager(input_dataset_, num_threads_; name=name, output_types=output_types, output_shapes=output_shapes) - else - experimental_private_thread_pool_dataset_graph(input_dataset_, num_threads_; name=name, output_types=output_types, output_shapes=output_shapes) - end - end end @@ -989,33 +1737,57 @@ end """ begin - function reader_serialize_state_graph(reader_handle_; name=nothing) - local desc - tf.with_op_name(name, "ReaderSerializeState") do - desc = tf.NodeDescription("ReaderSerializeState") - reader_handle_ = convert(Tensor{String}, reader_handle_) - tf.add_input(desc, reader_handle_) + begin + function reader_serialize_state_graph(reader_handle_; name=nothing) + local desc + tf.with_op_name(name, "ReaderSerializeState") do + desc = tf.NodeDescription("ReaderSerializeState") + begin + begin + reader_handle_ = convert(Tensor{String}, reader_handle_) + begin + end + end + end + begin + begin + tf.add_input(desc, reader_handle_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function reader_serialize_state_eager(reader_handle_; name=nothing) - desc = tf.EagerOp("ReaderSerializeState") - reader_handle_ = convert(tf.EagerTensor, reader_handle_) - tf.add_input(desc, reader_handle_) - res = tf.execute(desc) - node = tf.TapeNode(reader_serialize_state, [reader_handle_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function reader_serialize_state_eager(reader_handle_; name=nothing) + desc = tf.EagerOp("ReaderSerializeState") + reader_handle_ = convert(tf.EagerTensor, reader_handle_) + begin + begin + tf.add_input(desc, reader_handle_) + end + end + begin + end + res = tf.execute(desc) + node = tf.TapeNode(reader_serialize_state, [reader_handle_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function reader_serialize_state(reader_handle_; name=nothing) - if tf.in_eager_mode() - reader_serialize_state_eager(reader_handle_; name=name) - else - reader_serialize_state_graph(reader_handle_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function reader_serialize_state(reader_handle_; name=nothing) + if tf.in_eager_mode() + reader_serialize_state_eager(reader_handle_; name=name) + else + reader_serialize_state_graph(reader_handle_; name=name) + end end - end + end end @@ -1025,40 +1797,78 @@ end """ begin - function right_shift_graph(x_, y_; name=nothing) - local desc - tf.with_op_name(name, "RightShift") do - desc = tf.NodeDescription("RightShift") - x_ = convert(Tensor{Any}, x_) - y_ = convert(Tensor{Any}, y_) - (x_, y_) = tf.tf_promote(x_, y_) - tf.add_input(desc, x_) - tf.add_input(desc, y_) + begin + function right_shift_graph(x_, y_; name=nothing) + local desc + tf.with_op_name(name, "RightShift") do + desc = tf.NodeDescription("RightShift") + begin + begin + x_ = convert(Tensor{Any}, x_) + begin + end + end + begin + y_ = convert(Tensor{Any}, y_) + begin + end + end + begin + (x_, y_) = tf.tf_promote(x_, y_) + end + end + begin + begin + tf.add_input(desc, x_) + end + begin + tf.add_input(desc, y_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function right_shift_eager(x_, y_; name=nothing) - desc = tf.EagerOp("RightShift") - x_ = convert(tf.EagerTensor, x_) - y_ = convert(tf.EagerTensor, y_) - tf.add_input(desc, x_) - tf.add_input(desc, y_) - desc["T"] = tf.data_type(x_) - desc["T"] = tf.data_type(y_) - res = tf.execute(desc) - node = tf.TapeNode(right_shift, [x_, y_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function right_shift_eager(x_, y_; name=nothing) + desc = tf.EagerOp("RightShift") + x_ = convert(tf.EagerTensor, x_) + y_ = convert(tf.EagerTensor, y_) + begin + begin + tf.add_input(desc, x_) + end + begin + tf.add_input(desc, y_) + end + end + begin + end + begin + desc["T"] = tf.data_type(x_) + end + begin + desc["T"] = tf.data_type(y_) + end + res = tf.execute(desc) + node = tf.TapeNode(right_shift, [x_, y_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function right_shift(x_, y_; name=nothing) - if tf.in_eager_mode() - right_shift_eager(x_, y_; name=name) - else - right_shift_graph(x_, y_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function right_shift(x_, y_; name=nothing) + if tf.in_eager_mode() + right_shift_eager(x_, y_; name=name) + else + right_shift_graph(x_, y_; name=name) + end end - end + end end @@ -1068,59 +1878,103 @@ end """ begin - function avg_pool3d_graph(input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) - local desc - tf.with_op_name(name, "AvgPool3D") do - desc = tf.NodeDescription("AvgPool3D") - input_ = convert(Tensor{Any}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - if ksize !== nothing - desc["ksize"] = map(Base.identity, ksize) - end - if strides !== nothing - desc["strides"] = map(Base.identity, strides) - end - if padding !== nothing - desc["padding"] = Base.String(padding) + begin + function avg_pool3d_graph(input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + local desc + tf.with_op_name(name, "AvgPool3D") do + desc = tf.NodeDescription("AvgPool3D") + begin + begin + input_ = convert(Tensor{Any}, input_) + begin + end + end + begin + (input_,) = tf.tf_promote(input_) + end + end + begin + begin + tf.add_input(desc, input_) + end + end + begin + begin + if ksize !== nothing + desc["ksize"] = map(Base.identity, ksize) + end + end + begin + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + end + begin + if padding !== nothing + desc["padding"] = Base.String(padding) + end + end + begin + if data_format !== nothing + desc["data_format"] = Base.String(data_format) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function avg_pool3d_eager(input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + desc = tf.EagerOp("AvgPool3D") + input_ = convert(tf.EagerTensor, input_) + begin + begin + tf.add_input(desc, input_) + end + end + begin + begin + if ksize !== nothing + desc["ksize"] = map(Base.identity, ksize) + end + end + begin + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + end + begin + if padding !== nothing + desc["padding"] = Base.String(padding) + end + end + begin + if data_format !== nothing + desc["data_format"] = Base.String(data_format) + end + end + end + begin + desc["T"] = tf.data_type(input_) + end + res = tf.execute(desc) + node = tf.TapeNode(avg_pool3d, [input_], name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function avg_pool3d(input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + if tf.in_eager_mode() + avg_pool3d_eager(input_; name=name, ksize=ksize, strides=strides, padding=padding, data_format=data_format) + else + avg_pool3d_graph(input_; name=name, ksize=ksize, strides=strides, padding=padding, data_format=data_format) + end end - if data_format !== nothing - desc["data_format"] = Base.String(data_format) - end - end - tf.Tensor(tf.Operation(desc)) - end - function avg_pool3d_eager(input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) - desc = tf.EagerOp("AvgPool3D") - input_ = convert(tf.EagerTensor, input_) - tf.add_input(desc, input_) - if ksize !== nothing - desc["ksize"] = map(Base.identity, ksize) - end - if strides !== nothing - desc["strides"] = map(Base.identity, strides) - end - if padding !== nothing - desc["padding"] = Base.String(padding) - end - if data_format !== nothing - desc["data_format"] = Base.String(data_format) - end - desc["T"] = tf.data_type(input_) - res = tf.execute(desc) - node = tf.TapeNode(avg_pool3d, [input_], name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function avg_pool3d(input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) - if tf.in_eager_mode() - avg_pool3d_eager(input_; name=name, ksize=ksize, strides=strides, padding=padding, data_format=data_format) - else - avg_pool3d_graph(input_; name=name, ksize=ksize, strides=strides, padding=padding, data_format=data_format) - end - end end @@ -1130,41 +1984,73 @@ end """ begin - function encode_png_graph(image_; name=nothing, compression=nothing) - local desc - tf.with_op_name(name, "EncodePng") do - desc = tf.NodeDescription("EncodePng") - image_ = convert(Tensor{UInt8}, image_) - (image_,) = tf.tf_promote(image_) - tf.add_input(desc, image_) - if compression !== nothing - desc["compression"] = Base.Int(compression) + begin + function encode_png_graph(image_; name=nothing, compression=nothing) + local desc + tf.with_op_name(name, "EncodePng") do + desc = tf.NodeDescription("EncodePng") + begin + begin + image_ = convert(Tensor{UInt8}, image_) + begin + end + end + begin + (image_,) = tf.tf_promote(image_) + end + end + begin + begin + tf.add_input(desc, image_) + end + end + begin + begin + if compression !== nothing + desc["compression"] = Base.Int(compression) + end + end + end end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function encode_png_eager(image_; name=nothing, compression=nothing) - desc = tf.EagerOp("EncodePng") - image_ = convert(tf.EagerTensor, image_) - tf.add_input(desc, image_) - if compression !== nothing - desc["compression"] = Base.Int(compression) - end - desc["T"] = tf.data_type(image_) - res = tf.execute(desc) - node = tf.TapeNode(encode_png, [image_], name=nothing, compression=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function encode_png_eager(image_; name=nothing, compression=nothing) + desc = tf.EagerOp("EncodePng") + image_ = convert(tf.EagerTensor, image_) + begin + begin + tf.add_input(desc, image_) + end + end + begin + begin + if compression !== nothing + desc["compression"] = Base.Int(compression) + end + end + end + begin + desc["T"] = tf.data_type(image_) + end + res = tf.execute(desc) + node = tf.TapeNode(encode_png, [image_], name=nothing, compression=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function encode_png(image_; name=nothing, compression=nothing) - if tf.in_eager_mode() - encode_png_eager(image_; name=name, compression=compression) - else - encode_png_graph(image_; name=name, compression=compression) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function encode_png(image_; name=nothing, compression=nothing) + if tf.in_eager_mode() + encode_png_eager(image_; name=name, compression=compression) + else + encode_png_graph(image_; name=name, compression=compression) + end end - end + end end @@ -1174,59 +2060,103 @@ end Debug Identity Op. """ begin - function debug_identity_graph(input_; name=nothing, device_name=nothing, tensor_name=nothing, debug_urls=nothing, gated_grpc=nothing) - local desc - tf.with_op_name(name, "DebugIdentity") do - desc = tf.NodeDescription("DebugIdentity") - input_ = convert(Tensor{Any}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - if device_name !== nothing - desc["device_name"] = Base.String(device_name) - end - if tensor_name !== nothing - desc["tensor_name"] = Base.String(tensor_name) - end - if debug_urls !== nothing - desc["debug_urls"] = map(Base.identity, debug_urls) - end - if gated_grpc !== nothing - desc["gated_grpc"] = Base.Bool(gated_grpc) + begin + function debug_identity_graph(input_; name=nothing, device_name=nothing, tensor_name=nothing, debug_urls=nothing, gated_grpc=nothing) + local desc + tf.with_op_name(name, "DebugIdentity") do + desc = tf.NodeDescription("DebugIdentity") + begin + begin + input_ = convert(Tensor{Any}, input_) + begin + end + end + begin + (input_,) = tf.tf_promote(input_) + end + end + begin + begin + tf.add_input(desc, input_) + end + end + begin + begin + if device_name !== nothing + desc["device_name"] = Base.String(device_name) + end + end + begin + if tensor_name !== nothing + desc["tensor_name"] = Base.String(tensor_name) + end + end + begin + if debug_urls !== nothing + desc["debug_urls"] = map(Base.identity, debug_urls) + end + end + begin + if gated_grpc !== nothing + desc["gated_grpc"] = Base.Bool(gated_grpc) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function debug_identity_eager(input_; name=nothing, device_name=nothing, tensor_name=nothing, debug_urls=nothing, gated_grpc=nothing) + desc = tf.EagerOp("DebugIdentity") + input_ = convert(tf.EagerTensor, input_) + begin + begin + tf.add_input(desc, input_) + end + end + begin + begin + if device_name !== nothing + desc["device_name"] = Base.String(device_name) + end + end + begin + if tensor_name !== nothing + desc["tensor_name"] = Base.String(tensor_name) + end + end + begin + if debug_urls !== nothing + desc["debug_urls"] = map(Base.identity, debug_urls) + end + end + begin + if gated_grpc !== nothing + desc["gated_grpc"] = Base.Bool(gated_grpc) + end + end + end + begin + desc["T"] = tf.data_type(input_) + end + res = tf.execute(desc) + node = tf.TapeNode(debug_identity, [input_], name=nothing, device_name=nothing, tensor_name=nothing, debug_urls=nothing, gated_grpc=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function debug_identity(input_; name=nothing, device_name=nothing, tensor_name=nothing, debug_urls=nothing, gated_grpc=nothing) + if tf.in_eager_mode() + debug_identity_eager(input_; name=name, device_name=device_name, tensor_name=tensor_name, debug_urls=debug_urls, gated_grpc=gated_grpc) + else + debug_identity_graph(input_; name=name, device_name=device_name, tensor_name=tensor_name, debug_urls=debug_urls, gated_grpc=gated_grpc) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function debug_identity_eager(input_; name=nothing, device_name=nothing, tensor_name=nothing, debug_urls=nothing, gated_grpc=nothing) - desc = tf.EagerOp("DebugIdentity") - input_ = convert(tf.EagerTensor, input_) - tf.add_input(desc, input_) - if device_name !== nothing - desc["device_name"] = Base.String(device_name) - end - if tensor_name !== nothing - desc["tensor_name"] = Base.String(tensor_name) - end - if debug_urls !== nothing - desc["debug_urls"] = map(Base.identity, debug_urls) - end - if gated_grpc !== nothing - desc["gated_grpc"] = Base.Bool(gated_grpc) - end - desc["T"] = tf.data_type(input_) - res = tf.execute(desc) - node = tf.TapeNode(debug_identity, [input_], name=nothing, device_name=nothing, tensor_name=nothing, debug_urls=nothing, gated_grpc=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function debug_identity(input_; name=nothing, device_name=nothing, tensor_name=nothing, debug_urls=nothing, gated_grpc=nothing) - if tf.in_eager_mode() - debug_identity_eager(input_; name=name, device_name=device_name, tensor_name=tensor_name, debug_urls=debug_urls, gated_grpc=gated_grpc) - else - debug_identity_graph(input_; name=name, device_name=device_name, tensor_name=tensor_name, debug_urls=debug_urls, gated_grpc=gated_grpc) - end - end end @@ -1236,35 +2166,63 @@ end """ begin - function imag_graph(input_; name=nothing) - local desc - tf.with_op_name(name, "Imag") do - desc = tf.NodeDescription("Imag") - input_ = convert(Tensor{Complex{Float32}}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) + begin + function imag_graph(input_; name=nothing) + local desc + tf.with_op_name(name, "Imag") do + desc = tf.NodeDescription("Imag") + begin + begin + input_ = convert(Tensor{Complex{Float32}}, input_) + begin + end + end + begin + (input_,) = tf.tf_promote(input_) + end + end + begin + begin + tf.add_input(desc, input_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function imag_eager(input_; name=nothing) - desc = tf.EagerOp("Imag") - input_ = convert(tf.EagerTensor, input_) - tf.add_input(desc, input_) - desc["T"] = tf.data_type(input_) - res = tf.execute(desc) - node = tf.TapeNode(imag, [input_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function imag_eager(input_; name=nothing) + desc = tf.EagerOp("Imag") + input_ = convert(tf.EagerTensor, input_) + begin + begin + tf.add_input(desc, input_) + end + end + begin + end + begin + desc["T"] = tf.data_type(input_) + end + res = tf.execute(desc) + node = tf.TapeNode(imag, [input_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function imag(input_; name=nothing) - if tf.in_eager_mode() - imag_eager(input_; name=name) - else - imag_graph(input_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function imag(input_; name=nothing) + if tf.in_eager_mode() + imag_eager(input_; name=name) + else + imag_graph(input_; name=name) + end end - end + end end @@ -1274,85 +2232,203 @@ end """ begin - function resource_sparse_apply_ftrl_v2_graph(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=nothing, use_locking=nothing) - local desc - tf.with_op_name(name, "ResourceSparseApplyFtrlV2") do - desc = tf.NodeDescription("ResourceSparseApplyFtrlV2") - var_ = convert(Tensor{Any}, var_) - accum_ = convert(Tensor{Any}, accum_) - linear_ = convert(Tensor{Any}, linear_) - grad_ = convert(Tensor{Any}, grad_) - indices_ = convert(Tensor{Any}, indices_) - indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) - lr_ = convert(Tensor{Any}, lr_) - l1_ = convert(Tensor{Any}, l1_) - l2_ = convert(Tensor{Any}, l2_) - l2_shrinkage_ = convert(Tensor{Any}, l2_shrinkage_) - lr_power_ = convert(Tensor{Any}, lr_power_) - (grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_) = tf.tf_promote(grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_) - (indices_,) = tf.tf_promote(indices_) - tf.add_input(desc, var_) - tf.add_input(desc, accum_) - tf.add_input(desc, linear_) - tf.add_input(desc, grad_) - tf.add_input(desc, indices_) - tf.add_input(desc, lr_) - tf.add_input(desc, l1_) - tf.add_input(desc, l2_) - tf.add_input(desc, l2_shrinkage_) - tf.add_input(desc, lr_power_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - end - tf.Tensor(tf.Operation(desc)) - end - function resource_sparse_apply_ftrl_v2_eager(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=nothing, use_locking=nothing) - desc = tf.EagerOp("ResourceSparseApplyFtrlV2") - var_ = convert(tf.EagerTensor, var_) - accum_ = convert(tf.EagerTensor, accum_) - linear_ = convert(tf.EagerTensor, linear_) - grad_ = convert(tf.EagerTensor, grad_) - indices_ = convert(tf.EagerTensor, indices_) - lr_ = convert(tf.EagerTensor, lr_) - l1_ = convert(tf.EagerTensor, l1_) - l2_ = convert(tf.EagerTensor, l2_) - l2_shrinkage_ = convert(tf.EagerTensor, l2_shrinkage_) - lr_power_ = convert(tf.EagerTensor, lr_power_) - tf.add_input(desc, var_) - tf.add_input(desc, accum_) - tf.add_input(desc, linear_) - tf.add_input(desc, grad_) - tf.add_input(desc, indices_) - tf.add_input(desc, lr_) - tf.add_input(desc, l1_) - tf.add_input(desc, l2_) - tf.add_input(desc, l2_shrinkage_) - tf.add_input(desc, lr_power_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - desc["T"] = tf.data_type(grad_) - desc["Tindices"] = tf.data_type(indices_) - desc["T"] = tf.data_type(lr_) - desc["T"] = tf.data_type(l1_) - desc["T"] = tf.data_type(l2_) - desc["T"] = tf.data_type(l2_shrinkage_) - desc["T"] = tf.data_type(lr_power_) - res = tf.execute(desc) - node = tf.TapeNode(resource_sparse_apply_ftrl_v2, [var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, l2_shrinkage_, lr_power_], name=nothing, use_locking=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_sparse_apply_ftrl_v2(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=nothing, use_locking=nothing) - if tf.in_eager_mode() - resource_sparse_apply_ftrl_v2_eager(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=name, use_locking=use_locking) - else - resource_sparse_apply_ftrl_v2_graph(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=name, use_locking=use_locking) + begin + function resource_sparse_apply_ftrl_v2_graph(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "ResourceSparseApplyFtrlV2") do + desc = tf.NodeDescription("ResourceSparseApplyFtrlV2") + begin + begin + var_ = convert(Tensor{Any}, var_) + begin + end + end + begin + accum_ = convert(Tensor{Any}, accum_) + begin + end + end + begin + linear_ = convert(Tensor{Any}, linear_) + begin + end + end + begin + grad_ = convert(Tensor{Any}, grad_) + begin + end + end + begin + indices_ = convert(Tensor{Any}, indices_) + begin + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + end + end + begin + lr_ = convert(Tensor{Any}, lr_) + begin + end + end + begin + l1_ = convert(Tensor{Any}, l1_) + begin + end + end + begin + l2_ = convert(Tensor{Any}, l2_) + begin + end + end + begin + l2_shrinkage_ = convert(Tensor{Any}, l2_shrinkage_) + begin + end + end + begin + lr_power_ = convert(Tensor{Any}, lr_power_) + begin + end + end + begin + (grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_) = tf.tf_promote(grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_) + end + begin + (indices_,) = tf.tf_promote(indices_) + end + end + begin + begin + tf.add_input(desc, var_) + end + begin + tf.add_input(desc, accum_) + end + begin + tf.add_input(desc, linear_) + end + begin + tf.add_input(desc, grad_) + end + begin + tf.add_input(desc, indices_) + end + begin + tf.add_input(desc, lr_) + end + begin + tf.add_input(desc, l1_) + end + begin + tf.add_input(desc, l2_) + end + begin + tf.add_input(desc, l2_shrinkage_) + end + begin + tf.add_input(desc, lr_power_) + end + end + begin + begin + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function resource_sparse_apply_ftrl_v2_eager(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=nothing, use_locking=nothing) + desc = tf.EagerOp("ResourceSparseApplyFtrlV2") + var_ = convert(tf.EagerTensor, var_) + accum_ = convert(tf.EagerTensor, accum_) + linear_ = convert(tf.EagerTensor, linear_) + grad_ = convert(tf.EagerTensor, grad_) + indices_ = convert(tf.EagerTensor, indices_) + lr_ = convert(tf.EagerTensor, lr_) + l1_ = convert(tf.EagerTensor, l1_) + l2_ = convert(tf.EagerTensor, l2_) + l2_shrinkage_ = convert(tf.EagerTensor, l2_shrinkage_) + lr_power_ = convert(tf.EagerTensor, lr_power_) + begin + begin + tf.add_input(desc, var_) + end + begin + tf.add_input(desc, accum_) + end + begin + tf.add_input(desc, linear_) + end + begin + tf.add_input(desc, grad_) + end + begin + tf.add_input(desc, indices_) + end + begin + tf.add_input(desc, lr_) + end + begin + tf.add_input(desc, l1_) + end + begin + tf.add_input(desc, l2_) + end + begin + tf.add_input(desc, l2_shrinkage_) + end + begin + tf.add_input(desc, lr_power_) + end + end + begin + begin + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + end + begin + desc["T"] = tf.data_type(grad_) + end + begin + desc["Tindices"] = tf.data_type(indices_) + end + begin + desc["T"] = tf.data_type(lr_) + end + begin + desc["T"] = tf.data_type(l1_) + end + begin + desc["T"] = tf.data_type(l2_) + end + begin + desc["T"] = tf.data_type(l2_shrinkage_) + end + begin + desc["T"] = tf.data_type(lr_power_) + end + res = tf.execute(desc) + node = tf.TapeNode(resource_sparse_apply_ftrl_v2, [var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, l2_shrinkage_, lr_power_], name=nothing, use_locking=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_sparse_apply_ftrl_v2(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=nothing, use_locking=nothing) + if tf.in_eager_mode() + resource_sparse_apply_ftrl_v2_eager(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=name, use_locking=use_locking) + else + resource_sparse_apply_ftrl_v2_graph(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=name, use_locking=use_locking) + end end - end + end end @@ -1362,59 +2438,95 @@ end """ begin - function stage_clear_graph(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) - local desc - tf.with_op_name(name, "StageClear") do - desc = tf.NodeDescription("StageClear") - if capacity !== nothing - desc["capacity"] = Base.Int(capacity) - end - if memory_limit !== nothing - desc["memory_limit"] = Base.Int(memory_limit) - end - if dtypes !== nothing - desc["dtypes"] = map(Base.identity, dtypes) + begin + function stage_clear_graph(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "StageClear") do + desc = tf.NodeDescription("StageClear") + begin + end + begin + end + begin + begin + if capacity !== nothing + desc["capacity"] = Base.Int(capacity) + end + end + begin + if memory_limit !== nothing + desc["memory_limit"] = Base.Int(memory_limit) + end + end + begin + if dtypes !== nothing + desc["dtypes"] = map(Base.identity, dtypes) + end + end + begin + if container !== nothing + desc["container"] = Base.String(container) + end + end + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function stage_clear_eager(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + desc = tf.EagerOp("StageClear") + begin + end + begin + begin + if capacity !== nothing + desc["capacity"] = Base.Int(capacity) + end + end + begin + if memory_limit !== nothing + desc["memory_limit"] = Base.Int(memory_limit) + end + end + begin + if dtypes !== nothing + desc["dtypes"] = map(Base.identity, dtypes) + end + end + begin + if container !== nothing + desc["container"] = Base.String(container) + end + end + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(stage_clear, [], name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function stage_clear(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + if tf.in_eager_mode() + stage_clear_eager(; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) + else + stage_clear_graph(; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) + end end - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - end - tf.Tensor(tf.Operation(desc)) - end - function stage_clear_eager(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) - desc = tf.EagerOp("StageClear") - if capacity !== nothing - desc["capacity"] = Base.Int(capacity) - end - if memory_limit !== nothing - desc["memory_limit"] = Base.Int(memory_limit) - end - if dtypes !== nothing - desc["dtypes"] = map(Base.identity, dtypes) - end - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - res = tf.execute(desc) - node = tf.TapeNode(stage_clear, [], name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function stage_clear(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) - if tf.in_eager_mode() - stage_clear_eager(; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) - else - stage_clear_graph(; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) - end - end end @@ -1424,35 +2536,63 @@ end """ begin - function sign_graph(x_; name=nothing) - local desc - tf.with_op_name(name, "Sign") do - desc = tf.NodeDescription("Sign") - x_ = convert(Tensor{Any}, x_) - (x_,) = tf.tf_promote(x_) - tf.add_input(desc, x_) + begin + function sign_graph(x_; name=nothing) + local desc + tf.with_op_name(name, "Sign") do + desc = tf.NodeDescription("Sign") + begin + begin + x_ = convert(Tensor{Any}, x_) + begin + end + end + begin + (x_,) = tf.tf_promote(x_) + end + end + begin + begin + tf.add_input(desc, x_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function sign_eager(x_; name=nothing) - desc = tf.EagerOp("Sign") - x_ = convert(tf.EagerTensor, x_) - tf.add_input(desc, x_) - desc["T"] = tf.data_type(x_) - res = tf.execute(desc) - node = tf.TapeNode(sign, [x_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function sign_eager(x_; name=nothing) + desc = tf.EagerOp("Sign") + x_ = convert(tf.EagerTensor, x_) + begin + begin + tf.add_input(desc, x_) + end + end + begin + end + begin + desc["T"] = tf.data_type(x_) + end + res = tf.execute(desc) + node = tf.TapeNode(sign, [x_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sign(x_; name=nothing) - if tf.in_eager_mode() - sign_eager(x_; name=name) - else - sign_graph(x_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sign(x_; name=nothing) + if tf.in_eager_mode() + sign_eager(x_; name=name) + else + sign_graph(x_; name=name) + end end - end + end end @@ -1462,35 +2602,63 @@ end """ begin - function population_count_graph(x_; name=nothing) - local desc - tf.with_op_name(name, "PopulationCount") do - desc = tf.NodeDescription("PopulationCount") - x_ = convert(Tensor{Any}, x_) - (x_,) = tf.tf_promote(x_) - tf.add_input(desc, x_) + begin + function population_count_graph(x_; name=nothing) + local desc + tf.with_op_name(name, "PopulationCount") do + desc = tf.NodeDescription("PopulationCount") + begin + begin + x_ = convert(Tensor{Any}, x_) + begin + end + end + begin + (x_,) = tf.tf_promote(x_) + end + end + begin + begin + tf.add_input(desc, x_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function population_count_eager(x_; name=nothing) - desc = tf.EagerOp("PopulationCount") - x_ = convert(tf.EagerTensor, x_) - tf.add_input(desc, x_) - desc["T"] = tf.data_type(x_) - res = tf.execute(desc) - node = tf.TapeNode(population_count, [x_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function population_count_eager(x_; name=nothing) + desc = tf.EagerOp("PopulationCount") + x_ = convert(tf.EagerTensor, x_) + begin + begin + tf.add_input(desc, x_) + end + end + begin + end + begin + desc["T"] = tf.data_type(x_) + end + res = tf.execute(desc) + node = tf.TapeNode(population_count, [x_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function population_count(x_; name=nothing) - if tf.in_eager_mode() - population_count_eager(x_; name=name) - else - population_count_graph(x_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function population_count(x_; name=nothing) + if tf.in_eager_mode() + population_count_eager(x_; name=name) + else + population_count_graph(x_; name=name) + end end - end + end end @@ -1500,35 +2668,63 @@ end """ begin - function neg_graph(x_; name=nothing) - local desc - tf.with_op_name(name, "Neg") do - desc = tf.NodeDescription("Neg") - x_ = convert(Tensor{Any}, x_) - (x_,) = tf.tf_promote(x_) - tf.add_input(desc, x_) + begin + function neg_graph(x_; name=nothing) + local desc + tf.with_op_name(name, "Neg") do + desc = tf.NodeDescription("Neg") + begin + begin + x_ = convert(Tensor{Any}, x_) + begin + end + end + begin + (x_,) = tf.tf_promote(x_) + end + end + begin + begin + tf.add_input(desc, x_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function neg_eager(x_; name=nothing) - desc = tf.EagerOp("Neg") - x_ = convert(tf.EagerTensor, x_) - tf.add_input(desc, x_) - desc["T"] = tf.data_type(x_) - res = tf.execute(desc) - node = tf.TapeNode(neg, [x_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function neg_eager(x_; name=nothing) + desc = tf.EagerOp("Neg") + x_ = convert(tf.EagerTensor, x_) + begin + begin + tf.add_input(desc, x_) + end + end + begin + end + begin + desc["T"] = tf.data_type(x_) + end + res = tf.execute(desc) + node = tf.TapeNode(neg, [x_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function neg(x_; name=nothing) - if tf.in_eager_mode() - neg_eager(x_; name=name) - else - neg_graph(x_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function neg(x_; name=nothing) + if tf.in_eager_mode() + neg_eager(x_; name=name) + else + neg_graph(x_; name=name) + end end - end + end end @@ -1538,41 +2734,65 @@ end """ begin - function anonymous_iterator_graph(; name=nothing, output_types=nothing, output_shapes=nothing) - local desc - tf.with_op_name(name, "AnonymousIterator") do - desc = tf.NodeDescription("AnonymousIterator") - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) + begin + function anonymous_iterator_graph(; name=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "AnonymousIterator") do + desc = tf.NodeDescription("AnonymousIterator") + begin + end + begin + end + begin + begin + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + end + begin + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + end end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function anonymous_iterator_eager(; name=nothing, output_types=nothing, output_shapes=nothing) - desc = tf.EagerOp("AnonymousIterator") - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - res = tf.execute(desc) - node = tf.TapeNode(anonymous_iterator, [], name=nothing, output_types=nothing, output_shapes=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function anonymous_iterator_eager(; name=nothing, output_types=nothing, output_shapes=nothing) + desc = tf.EagerOp("AnonymousIterator") + begin + end + begin + begin + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + end + begin + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(anonymous_iterator, [], name=nothing, output_types=nothing, output_shapes=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function anonymous_iterator(; name=nothing, output_types=nothing, output_shapes=nothing) - if tf.in_eager_mode() - anonymous_iterator_eager(; name=name, output_types=output_types, output_shapes=output_shapes) - else - anonymous_iterator_graph(; name=name, output_types=output_types, output_shapes=output_shapes) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function anonymous_iterator(; name=nothing, output_types=nothing, output_shapes=nothing) + if tf.in_eager_mode() + anonymous_iterator_eager(; name=name, output_types=output_types, output_shapes=output_shapes) + else + anonymous_iterator_graph(; name=name, output_types=output_types, output_shapes=output_shapes) + end end - end + end end @@ -1582,53 +2802,109 @@ end """ begin - function sparse_reduce_sum_graph(input_indices_, input_values_, input_shape_, reduction_axes_; name=nothing, keep_dims=nothing) - local desc - tf.with_op_name(name, "SparseReduceSum") do - desc = tf.NodeDescription("SparseReduceSum") - input_indices_ = convert(Tensor{Int64}, input_indices_) - input_values_ = convert(Tensor{Any}, input_values_) - input_shape_ = convert(Tensor{Int64}, input_shape_) - reduction_axes_ = convert(Tensor{Int32}, reduction_axes_) - (input_values_,) = tf.tf_promote(input_values_) - tf.add_input(desc, input_indices_) - tf.add_input(desc, input_values_) - tf.add_input(desc, input_shape_) - tf.add_input(desc, reduction_axes_) - if keep_dims !== nothing - desc["keep_dims"] = Base.Bool(keep_dims) + begin + function sparse_reduce_sum_graph(input_indices_, input_values_, input_shape_, reduction_axes_; name=nothing, keep_dims=nothing) + local desc + tf.with_op_name(name, "SparseReduceSum") do + desc = tf.NodeDescription("SparseReduceSum") + begin + begin + input_indices_ = convert(Tensor{Int64}, input_indices_) + begin + end + end + begin + input_values_ = convert(Tensor{Any}, input_values_) + begin + end + end + begin + input_shape_ = convert(Tensor{Int64}, input_shape_) + begin + end + end + begin + reduction_axes_ = convert(Tensor{Int32}, reduction_axes_) + begin + end + end + begin + (input_values_,) = tf.tf_promote(input_values_) + end + end + begin + begin + tf.add_input(desc, input_indices_) + end + begin + tf.add_input(desc, input_values_) + end + begin + tf.add_input(desc, input_shape_) + end + begin + tf.add_input(desc, reduction_axes_) + end + end + begin + begin + if keep_dims !== nothing + desc["keep_dims"] = Base.Bool(keep_dims) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function sparse_reduce_sum_eager(input_indices_, input_values_, input_shape_, reduction_axes_; name=nothing, keep_dims=nothing) + desc = tf.EagerOp("SparseReduceSum") + input_indices_ = convert(tf.EagerTensor, input_indices_) + input_values_ = convert(tf.EagerTensor, input_values_) + input_shape_ = convert(tf.EagerTensor, input_shape_) + reduction_axes_ = convert(tf.EagerTensor, reduction_axes_) + begin + begin + tf.add_input(desc, input_indices_) + end + begin + tf.add_input(desc, input_values_) + end + begin + tf.add_input(desc, input_shape_) + end + begin + tf.add_input(desc, reduction_axes_) + end + end + begin + begin + if keep_dims !== nothing + desc["keep_dims"] = Base.Bool(keep_dims) + end + end + end + begin + desc["T"] = tf.data_type(input_values_) + end + res = tf.execute(desc) + node = tf.TapeNode(sparse_reduce_sum, [input_indices_, input_values_, input_shape_, reduction_axes_], name=nothing, keep_dims=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_reduce_sum(input_indices_, input_values_, input_shape_, reduction_axes_; name=nothing, keep_dims=nothing) + if tf.in_eager_mode() + sparse_reduce_sum_eager(input_indices_, input_values_, input_shape_, reduction_axes_; name=name, keep_dims=keep_dims) + else + sparse_reduce_sum_graph(input_indices_, input_values_, input_shape_, reduction_axes_; name=name, keep_dims=keep_dims) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function sparse_reduce_sum_eager(input_indices_, input_values_, input_shape_, reduction_axes_; name=nothing, keep_dims=nothing) - desc = tf.EagerOp("SparseReduceSum") - input_indices_ = convert(tf.EagerTensor, input_indices_) - input_values_ = convert(tf.EagerTensor, input_values_) - input_shape_ = convert(tf.EagerTensor, input_shape_) - reduction_axes_ = convert(tf.EagerTensor, reduction_axes_) - tf.add_input(desc, input_indices_) - tf.add_input(desc, input_values_) - tf.add_input(desc, input_shape_) - tf.add_input(desc, reduction_axes_) - if keep_dims !== nothing - desc["keep_dims"] = Base.Bool(keep_dims) - end - desc["T"] = tf.data_type(input_values_) - res = tf.execute(desc) - node = tf.TapeNode(sparse_reduce_sum, [input_indices_, input_values_, input_shape_, reduction_axes_], name=nothing, keep_dims=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_reduce_sum(input_indices_, input_values_, input_shape_, reduction_axes_; name=nothing, keep_dims=nothing) - if tf.in_eager_mode() - sparse_reduce_sum_eager(input_indices_, input_values_, input_shape_, reduction_axes_; name=name, keep_dims=keep_dims) - else - sparse_reduce_sum_graph(input_indices_, input_values_, input_shape_, reduction_axes_; name=name, keep_dims=keep_dims) - end - end end @@ -1638,39 +2914,67 @@ end """ begin - function string_length_graph(input_; name=nothing, unit=nothing) - local desc - tf.with_op_name(name, "StringLength") do - desc = tf.NodeDescription("StringLength") - input_ = convert(Tensor{String}, input_) - tf.add_input(desc, input_) - if unit !== nothing - desc["unit"] = Base.String(unit) + begin + function string_length_graph(input_; name=nothing, unit=nothing) + local desc + tf.with_op_name(name, "StringLength") do + desc = tf.NodeDescription("StringLength") + begin + begin + input_ = convert(Tensor{String}, input_) + begin + end + end + end + begin + begin + tf.add_input(desc, input_) + end + end + begin + begin + if unit !== nothing + desc["unit"] = Base.String(unit) + end + end + end end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function string_length_eager(input_; name=nothing, unit=nothing) - desc = tf.EagerOp("StringLength") - input_ = convert(tf.EagerTensor, input_) - tf.add_input(desc, input_) - if unit !== nothing - desc["unit"] = Base.String(unit) - end - res = tf.execute(desc) - node = tf.TapeNode(string_length, [input_], name=nothing, unit=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function string_length_eager(input_; name=nothing, unit=nothing) + desc = tf.EagerOp("StringLength") + input_ = convert(tf.EagerTensor, input_) + begin + begin + tf.add_input(desc, input_) + end + end + begin + begin + if unit !== nothing + desc["unit"] = Base.String(unit) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(string_length, [input_], name=nothing, unit=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function string_length(input_; name=nothing, unit=nothing) - if tf.in_eager_mode() - string_length_eager(input_; name=name, unit=unit) - else - string_length_graph(input_; name=name, unit=unit) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function string_length(input_; name=nothing, unit=nothing) + if tf.in_eager_mode() + string_length_eager(input_; name=name, unit=unit) + else + string_length_graph(input_; name=name, unit=unit) + end end - end + end end @@ -1680,61 +2984,109 @@ end """ begin - function filter_dataset_graph(input_dataset_, other_arguments_; name=nothing, predicate=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) - local desc - tf.with_op_name(name, "FilterDataset") do - desc = tf.NodeDescription("FilterDataset") - input_dataset_ = convert(Tensor{Any}, input_dataset_) - other_arguments_ = [convert(Tensor{Any}, x) for x = other_arguments_] - tf.add_input(desc, input_dataset_) - tf.add_input(desc, other_arguments_) - if predicate !== nothing - desc["predicate"] = Base.identity(predicate) - end - if Targuments !== nothing - desc["Targuments"] = map(Base.identity, Targuments) - end - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) + begin + function filter_dataset_graph(input_dataset_, other_arguments_; name=nothing, predicate=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "FilterDataset") do + desc = tf.NodeDescription("FilterDataset") + begin + begin + input_dataset_ = convert(Tensor{Any}, input_dataset_) + begin + end + end + begin + other_arguments_ = [convert(Tensor{Any}, x) for x = other_arguments_] + begin + end + end + end + begin + begin + tf.add_input(desc, input_dataset_) + end + begin + tf.add_input(desc, other_arguments_) + end + end + begin + begin + if predicate !== nothing + desc["predicate"] = Base.identity(predicate) + end + end + begin + if Targuments !== nothing + desc["Targuments"] = map(Base.identity, Targuments) + end + end + begin + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + end + begin + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function filter_dataset_eager(input_dataset_, other_arguments_; name=nothing, predicate=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) + desc = tf.EagerOp("FilterDataset") + input_dataset_ = convert(tf.EagerTensor, input_dataset_) + other_arguments_ = convert(tf.EagerTensor, other_arguments_) + begin + begin + tf.add_input(desc, input_dataset_) + end + begin + tf.add_input(desc, other_arguments_) + end + end + begin + begin + if predicate !== nothing + desc["predicate"] = Base.identity(predicate) + end + end + begin + if Targuments !== nothing + desc["Targuments"] = map(Base.identity, Targuments) + end + end + begin + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + end + begin + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(filter_dataset, [input_dataset_, other_arguments_], name=nothing, predicate=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function filter_dataset(input_dataset_, other_arguments_; name=nothing, predicate=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) + if tf.in_eager_mode() + filter_dataset_eager(input_dataset_, other_arguments_; name=name, predicate=predicate, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes) + else + filter_dataset_graph(input_dataset_, other_arguments_; name=name, predicate=predicate, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function filter_dataset_eager(input_dataset_, other_arguments_; name=nothing, predicate=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) - desc = tf.EagerOp("FilterDataset") - input_dataset_ = convert(tf.EagerTensor, input_dataset_) - other_arguments_ = convert(tf.EagerTensor, other_arguments_) - tf.add_input(desc, input_dataset_) - tf.add_input(desc, other_arguments_) - if predicate !== nothing - desc["predicate"] = Base.identity(predicate) - end - if Targuments !== nothing - desc["Targuments"] = map(Base.identity, Targuments) - end - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - res = tf.execute(desc) - node = tf.TapeNode(filter_dataset, [input_dataset_, other_arguments_], name=nothing, predicate=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function filter_dataset(input_dataset_, other_arguments_; name=nothing, predicate=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) - if tf.in_eager_mode() - filter_dataset_eager(input_dataset_, other_arguments_; name=name, predicate=predicate, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes) - else - filter_dataset_graph(input_dataset_, other_arguments_; name=name, predicate=predicate, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes) - end - end end @@ -1744,64 +3096,118 @@ end """ begin - function conv3d_graph(input_, filter_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) - local desc - tf.with_op_name(name, "Conv3D") do - desc = tf.NodeDescription("Conv3D") - input_ = convert(Tensor{Any}, input_) - filter_ = convert(Tensor{Any}, filter_) - (input_, filter_) = tf.tf_promote(input_, filter_) - tf.add_input(desc, input_) - tf.add_input(desc, filter_) - if strides !== nothing - desc["strides"] = map(Base.identity, strides) - end - if padding !== nothing - desc["padding"] = Base.String(padding) - end - if data_format !== nothing - desc["data_format"] = Base.String(data_format) - end - if dilations !== nothing - desc["dilations"] = map(Base.identity, dilations) + begin + function conv3d_graph(input_, filter_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) + local desc + tf.with_op_name(name, "Conv3D") do + desc = tf.NodeDescription("Conv3D") + begin + begin + input_ = convert(Tensor{Any}, input_) + begin + end + end + begin + filter_ = convert(Tensor{Any}, filter_) + begin + end + end + begin + (input_, filter_) = tf.tf_promote(input_, filter_) + end + end + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, filter_) + end + end + begin + begin + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + end + begin + if padding !== nothing + desc["padding"] = Base.String(padding) + end + end + begin + if data_format !== nothing + desc["data_format"] = Base.String(data_format) + end + end + begin + if dilations !== nothing + desc["dilations"] = map(Base.identity, dilations) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function conv3d_eager(input_, filter_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) + desc = tf.EagerOp("Conv3D") + input_ = convert(tf.EagerTensor, input_) + filter_ = convert(tf.EagerTensor, filter_) + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, filter_) + end + end + begin + begin + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + end + begin + if padding !== nothing + desc["padding"] = Base.String(padding) + end + end + begin + if data_format !== nothing + desc["data_format"] = Base.String(data_format) + end + end + begin + if dilations !== nothing + desc["dilations"] = map(Base.identity, dilations) + end + end + end + begin + desc["T"] = tf.data_type(input_) + end + begin + desc["T"] = tf.data_type(filter_) + end + res = tf.execute(desc) + node = tf.TapeNode(conv3d, [input_, filter_], name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function conv3d(input_, filter_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) + if tf.in_eager_mode() + conv3d_eager(input_, filter_; name=name, strides=strides, padding=padding, data_format=data_format, dilations=dilations) + else + conv3d_graph(input_, filter_; name=name, strides=strides, padding=padding, data_format=data_format, dilations=dilations) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function conv3d_eager(input_, filter_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) - desc = tf.EagerOp("Conv3D") - input_ = convert(tf.EagerTensor, input_) - filter_ = convert(tf.EagerTensor, filter_) - tf.add_input(desc, input_) - tf.add_input(desc, filter_) - if strides !== nothing - desc["strides"] = map(Base.identity, strides) - end - if padding !== nothing - desc["padding"] = Base.String(padding) - end - if data_format !== nothing - desc["data_format"] = Base.String(data_format) - end - if dilations !== nothing - desc["dilations"] = map(Base.identity, dilations) - end - desc["T"] = tf.data_type(input_) - desc["T"] = tf.data_type(filter_) - res = tf.execute(desc) - node = tf.TapeNode(conv3d, [input_, filter_], name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function conv3d(input_, filter_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) - if tf.in_eager_mode() - conv3d_eager(input_, filter_; name=name, strides=strides, padding=padding, data_format=data_format, dilations=dilations) - else - conv3d_graph(input_, filter_; name=name, strides=strides, padding=padding, data_format=data_format, dilations=dilations) - end - end end @@ -1811,58 +3217,92 @@ end Retrieve embedding parameters for a single table. """ begin - function retrieve_tpu_embedding_adagrad_parameters_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - local desc - tf.with_op_name(name, "RetrieveTPUEmbeddingAdagradParameters") do - desc = tf.NodeDescription("RetrieveTPUEmbeddingAdagradParameters") - if table_id !== nothing - desc["table_id"] = Base.Int(table_id) - end - if table_name !== nothing - desc["table_name"] = Base.String(table_name) - end - if num_shards !== nothing - desc["num_shards"] = Base.Int(num_shards) - end - if shard_id !== nothing - desc["shard_id"] = Base.Int(shard_id) + begin + function retrieve_tpu_embedding_adagrad_parameters_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + local desc + tf.with_op_name(name, "RetrieveTPUEmbeddingAdagradParameters") do + desc = tf.NodeDescription("RetrieveTPUEmbeddingAdagradParameters") + begin + end + begin + end + begin + begin + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + end + begin + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + end + begin + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + end + begin + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + end + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + end + end + begin + function retrieve_tpu_embedding_adagrad_parameters_eager(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + desc = tf.EagerOp("RetrieveTPUEmbeddingAdagradParameters") + begin + end + begin + begin + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + end + begin + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + end + begin + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + end + begin + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(retrieve_tpu_embedding_adagrad_parameters, [], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function retrieve_tpu_embedding_adagrad_parameters(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + if tf.in_eager_mode() + retrieve_tpu_embedding_adagrad_parameters_eager(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + else + retrieve_tpu_embedding_adagrad_parameters_graph(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + end end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:2 - push!(out, tf.Tensor(op, out_idx)) - end - out - end - function retrieve_tpu_embedding_adagrad_parameters_eager(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - desc = tf.EagerOp("RetrieveTPUEmbeddingAdagradParameters") - if table_id !== nothing - desc["table_id"] = Base.Int(table_id) - end - if table_name !== nothing - desc["table_name"] = Base.String(table_name) - end - if num_shards !== nothing - desc["num_shards"] = Base.Int(num_shards) - end - if shard_id !== nothing - desc["shard_id"] = Base.Int(shard_id) - end - res = tf.execute(desc) - node = tf.TapeNode(retrieve_tpu_embedding_adagrad_parameters, [], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function retrieve_tpu_embedding_adagrad_parameters(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - if tf.in_eager_mode() - retrieve_tpu_embedding_adagrad_parameters_eager(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) - else - retrieve_tpu_embedding_adagrad_parameters_graph(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) - end - end end @@ -1872,33 +3312,57 @@ end """ begin - function optional_has_value_graph(optional_; name=nothing) - local desc - tf.with_op_name(name, "OptionalHasValue") do - desc = tf.NodeDescription("OptionalHasValue") - optional_ = convert(Tensor{Any}, optional_) - tf.add_input(desc, optional_) + begin + function optional_has_value_graph(optional_; name=nothing) + local desc + tf.with_op_name(name, "OptionalHasValue") do + desc = tf.NodeDescription("OptionalHasValue") + begin + begin + optional_ = convert(Tensor{Any}, optional_) + begin + end + end + end + begin + begin + tf.add_input(desc, optional_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function optional_has_value_eager(optional_; name=nothing) - desc = tf.EagerOp("OptionalHasValue") - optional_ = convert(tf.EagerTensor, optional_) - tf.add_input(desc, optional_) - res = tf.execute(desc) - node = tf.TapeNode(optional_has_value, [optional_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function optional_has_value_eager(optional_; name=nothing) + desc = tf.EagerOp("OptionalHasValue") + optional_ = convert(tf.EagerTensor, optional_) + begin + begin + tf.add_input(desc, optional_) + end + end + begin + end + res = tf.execute(desc) + node = tf.TapeNode(optional_has_value, [optional_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function optional_has_value(optional_; name=nothing) - if tf.in_eager_mode() - optional_has_value_eager(optional_; name=name) - else - optional_has_value_graph(optional_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function optional_has_value(optional_; name=nothing) + if tf.in_eager_mode() + optional_has_value_eager(optional_; name=name) + else + optional_has_value_graph(optional_; name=name) + end end - end + end end @@ -1908,92 +3372,218 @@ end """ begin - function apply_adam_graph(var_, m_, v_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing, use_nesterov=nothing) - local desc - tf.with_op_name(name, "ApplyAdam") do - desc = tf.NodeDescription("ApplyAdam") - var_ = convert(Tensor{Any}, var_) - m_ = convert(Tensor{Any}, m_) - v_ = convert(Tensor{Any}, v_) - beta1_power_ = convert(Tensor{Any}, beta1_power_) - beta2_power_ = convert(Tensor{Any}, beta2_power_) - lr_ = convert(Tensor{Any}, lr_) - beta1_ = convert(Tensor{Any}, beta1_) - beta2_ = convert(Tensor{Any}, beta2_) - epsilon_ = convert(Tensor{Any}, epsilon_) - grad_ = convert(Tensor{Any}, grad_) - (var_, m_, v_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_) = tf.tf_promote(var_, m_, v_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_) - tf.add_input(desc, var_) - tf.add_input(desc, m_) - tf.add_input(desc, v_) - tf.add_input(desc, beta1_power_) - tf.add_input(desc, beta2_power_) - tf.add_input(desc, lr_) - tf.add_input(desc, beta1_) - tf.add_input(desc, beta2_) - tf.add_input(desc, epsilon_) - tf.add_input(desc, grad_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - if use_nesterov !== nothing - desc["use_nesterov"] = Base.Bool(use_nesterov) - end - end - tf.Tensor(tf.Operation(desc)) - end - function apply_adam_eager(var_, m_, v_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing, use_nesterov=nothing) - desc = tf.EagerOp("ApplyAdam") - var_ = convert(tf.EagerTensor, var_) - m_ = convert(tf.EagerTensor, m_) - v_ = convert(tf.EagerTensor, v_) - beta1_power_ = convert(tf.EagerTensor, beta1_power_) - beta2_power_ = convert(tf.EagerTensor, beta2_power_) - lr_ = convert(tf.EagerTensor, lr_) - beta1_ = convert(tf.EagerTensor, beta1_) - beta2_ = convert(tf.EagerTensor, beta2_) - epsilon_ = convert(tf.EagerTensor, epsilon_) - grad_ = convert(tf.EagerTensor, grad_) - tf.add_input(desc, var_) - tf.add_input(desc, m_) - tf.add_input(desc, v_) - tf.add_input(desc, beta1_power_) - tf.add_input(desc, beta2_power_) - tf.add_input(desc, lr_) - tf.add_input(desc, beta1_) - tf.add_input(desc, beta2_) - tf.add_input(desc, epsilon_) - tf.add_input(desc, grad_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - if use_nesterov !== nothing - desc["use_nesterov"] = Base.Bool(use_nesterov) - end - desc["T"] = tf.data_type(var_) - desc["T"] = tf.data_type(m_) - desc["T"] = tf.data_type(v_) - desc["T"] = tf.data_type(beta1_power_) - desc["T"] = tf.data_type(beta2_power_) - desc["T"] = tf.data_type(lr_) - desc["T"] = tf.data_type(beta1_) - desc["T"] = tf.data_type(beta2_) - desc["T"] = tf.data_type(epsilon_) - desc["T"] = tf.data_type(grad_) - res = tf.execute(desc) - node = tf.TapeNode(apply_adam, [var_, m_, v_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_], name=nothing, use_locking=nothing, use_nesterov=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function apply_adam(var_, m_, v_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing, use_nesterov=nothing) - if tf.in_eager_mode() - apply_adam_eager(var_, m_, v_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=name, use_locking=use_locking, use_nesterov=use_nesterov) - else - apply_adam_graph(var_, m_, v_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=name, use_locking=use_locking, use_nesterov=use_nesterov) + begin + function apply_adam_graph(var_, m_, v_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing, use_nesterov=nothing) + local desc + tf.with_op_name(name, "ApplyAdam") do + desc = tf.NodeDescription("ApplyAdam") + begin + begin + var_ = convert(Tensor{Any}, var_) + begin + end + end + begin + m_ = convert(Tensor{Any}, m_) + begin + end + end + begin + v_ = convert(Tensor{Any}, v_) + begin + end + end + begin + beta1_power_ = convert(Tensor{Any}, beta1_power_) + begin + end + end + begin + beta2_power_ = convert(Tensor{Any}, beta2_power_) + begin + end + end + begin + lr_ = convert(Tensor{Any}, lr_) + begin + end + end + begin + beta1_ = convert(Tensor{Any}, beta1_) + begin + end + end + begin + beta2_ = convert(Tensor{Any}, beta2_) + begin + end + end + begin + epsilon_ = convert(Tensor{Any}, epsilon_) + begin + end + end + begin + grad_ = convert(Tensor{Any}, grad_) + begin + end + end + begin + (var_, m_, v_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_) = tf.tf_promote(var_, m_, v_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_) + end + end + begin + begin + tf.add_input(desc, var_) + end + begin + tf.add_input(desc, m_) + end + begin + tf.add_input(desc, v_) + end + begin + tf.add_input(desc, beta1_power_) + end + begin + tf.add_input(desc, beta2_power_) + end + begin + tf.add_input(desc, lr_) + end + begin + tf.add_input(desc, beta1_) + end + begin + tf.add_input(desc, beta2_) + end + begin + tf.add_input(desc, epsilon_) + end + begin + tf.add_input(desc, grad_) + end + end + begin + begin + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + begin + if use_nesterov !== nothing + desc["use_nesterov"] = Base.Bool(use_nesterov) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function apply_adam_eager(var_, m_, v_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing, use_nesterov=nothing) + desc = tf.EagerOp("ApplyAdam") + var_ = convert(tf.EagerTensor, var_) + m_ = convert(tf.EagerTensor, m_) + v_ = convert(tf.EagerTensor, v_) + beta1_power_ = convert(tf.EagerTensor, beta1_power_) + beta2_power_ = convert(tf.EagerTensor, beta2_power_) + lr_ = convert(tf.EagerTensor, lr_) + beta1_ = convert(tf.EagerTensor, beta1_) + beta2_ = convert(tf.EagerTensor, beta2_) + epsilon_ = convert(tf.EagerTensor, epsilon_) + grad_ = convert(tf.EagerTensor, grad_) + begin + begin + tf.add_input(desc, var_) + end + begin + tf.add_input(desc, m_) + end + begin + tf.add_input(desc, v_) + end + begin + tf.add_input(desc, beta1_power_) + end + begin + tf.add_input(desc, beta2_power_) + end + begin + tf.add_input(desc, lr_) + end + begin + tf.add_input(desc, beta1_) + end + begin + tf.add_input(desc, beta2_) + end + begin + tf.add_input(desc, epsilon_) + end + begin + tf.add_input(desc, grad_) + end + end + begin + begin + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + begin + if use_nesterov !== nothing + desc["use_nesterov"] = Base.Bool(use_nesterov) + end + end + end + begin + desc["T"] = tf.data_type(var_) + end + begin + desc["T"] = tf.data_type(m_) + end + begin + desc["T"] = tf.data_type(v_) + end + begin + desc["T"] = tf.data_type(beta1_power_) + end + begin + desc["T"] = tf.data_type(beta2_power_) + end + begin + desc["T"] = tf.data_type(lr_) + end + begin + desc["T"] = tf.data_type(beta1_) + end + begin + desc["T"] = tf.data_type(beta2_) + end + begin + desc["T"] = tf.data_type(epsilon_) + end + begin + desc["T"] = tf.data_type(grad_) + end + res = tf.execute(desc) + node = tf.TapeNode(apply_adam, [var_, m_, v_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_], name=nothing, use_locking=nothing, use_nesterov=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function apply_adam(var_, m_, v_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing, use_nesterov=nothing) + if tf.in_eager_mode() + apply_adam_eager(var_, m_, v_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=name, use_locking=use_locking, use_nesterov=use_nesterov) + else + apply_adam_graph(var_, m_, v_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=name, use_locking=use_locking, use_nesterov=use_nesterov) + end end - end + end end @@ -2003,94 +3593,176 @@ end """ begin - function cudnn_rnn_params_to_canonical_graph(num_layers_, num_units_, input_size_, params_; name=nothing, num_params=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) - local desc - tf.with_op_name(name, "CudnnRNNParamsToCanonical") do - desc = tf.NodeDescription("CudnnRNNParamsToCanonical") - num_layers_ = convert(Tensor{Int32}, num_layers_) - num_units_ = convert(Tensor{Int32}, num_units_) - input_size_ = convert(Tensor{Int32}, input_size_) - params_ = convert(Tensor{Any}, params_) - (params_,) = tf.tf_promote(params_) - tf.add_input(desc, num_layers_) - tf.add_input(desc, num_units_) - tf.add_input(desc, input_size_) - tf.add_input(desc, params_) - if num_params !== nothing - desc["num_params"] = Base.Int(num_params) - end - if rnn_mode !== nothing - desc["rnn_mode"] = Base.String(rnn_mode) - end - if input_mode !== nothing - desc["input_mode"] = Base.String(input_mode) - end - if direction !== nothing - desc["direction"] = Base.String(direction) - end - if dropout !== nothing - desc["dropout"] = Base.identity(dropout) - end - if seed !== nothing - desc["seed"] = Base.Int(seed) - end - if seed2 !== nothing - desc["seed2"] = Base.Int(seed2) + begin + function cudnn_rnn_params_to_canonical_graph(num_layers_, num_units_, input_size_, params_; name=nothing, num_params=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) + local desc + tf.with_op_name(name, "CudnnRNNParamsToCanonical") do + desc = tf.NodeDescription("CudnnRNNParamsToCanonical") + begin + begin + num_layers_ = convert(Tensor{Int32}, num_layers_) + begin + end + end + begin + num_units_ = convert(Tensor{Int32}, num_units_) + begin + end + end + begin + input_size_ = convert(Tensor{Int32}, input_size_) + begin + end + end + begin + params_ = convert(Tensor{Any}, params_) + begin + end + end + begin + (params_,) = tf.tf_promote(params_) + end + end + begin + begin + tf.add_input(desc, num_layers_) + end + begin + tf.add_input(desc, num_units_) + end + begin + tf.add_input(desc, input_size_) + end + begin + tf.add_input(desc, params_) + end + end + begin + begin + if num_params !== nothing + desc["num_params"] = Base.Int(num_params) + end + end + begin + if rnn_mode !== nothing + desc["rnn_mode"] = Base.String(rnn_mode) + end + end + begin + if input_mode !== nothing + desc["input_mode"] = Base.String(input_mode) + end + end + begin + if direction !== nothing + desc["direction"] = Base.String(direction) + end + end + begin + if dropout !== nothing + desc["dropout"] = Base.identity(dropout) + end + end + begin + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + end + begin + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end + end + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + end + end + begin + function cudnn_rnn_params_to_canonical_eager(num_layers_, num_units_, input_size_, params_; name=nothing, num_params=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) + desc = tf.EagerOp("CudnnRNNParamsToCanonical") + num_layers_ = convert(tf.EagerTensor, num_layers_) + num_units_ = convert(tf.EagerTensor, num_units_) + input_size_ = convert(tf.EagerTensor, input_size_) + params_ = convert(tf.EagerTensor, params_) + begin + begin + tf.add_input(desc, num_layers_) + end + begin + tf.add_input(desc, num_units_) + end + begin + tf.add_input(desc, input_size_) + end + begin + tf.add_input(desc, params_) + end + end + begin + begin + if num_params !== nothing + desc["num_params"] = Base.Int(num_params) + end + end + begin + if rnn_mode !== nothing + desc["rnn_mode"] = Base.String(rnn_mode) + end + end + begin + if input_mode !== nothing + desc["input_mode"] = Base.String(input_mode) + end + end + begin + if direction !== nothing + desc["direction"] = Base.String(direction) + end + end + begin + if dropout !== nothing + desc["dropout"] = Base.identity(dropout) + end + end + begin + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + end + begin + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end + end + end + begin + desc["T"] = tf.data_type(params_) + end + res = tf.execute(desc) + node = tf.TapeNode(cudnn_rnn_params_to_canonical, [num_layers_, num_units_, input_size_, params_], name=nothing, num_params=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function cudnn_rnn_params_to_canonical(num_layers_, num_units_, input_size_, params_; name=nothing, num_params=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) + if tf.in_eager_mode() + cudnn_rnn_params_to_canonical_eager(num_layers_, num_units_, input_size_, params_; name=name, num_params=num_params, rnn_mode=rnn_mode, input_mode=input_mode, direction=direction, dropout=dropout, seed=seed, seed2=seed2) + else + cudnn_rnn_params_to_canonical_graph(num_layers_, num_units_, input_size_, params_; name=name, num_params=num_params, rnn_mode=rnn_mode, input_mode=input_mode, direction=direction, dropout=dropout, seed=seed, seed2=seed2) + end end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:2 - push!(out, tf.Tensor(op, out_idx)) - end - out - end - function cudnn_rnn_params_to_canonical_eager(num_layers_, num_units_, input_size_, params_; name=nothing, num_params=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) - desc = tf.EagerOp("CudnnRNNParamsToCanonical") - num_layers_ = convert(tf.EagerTensor, num_layers_) - num_units_ = convert(tf.EagerTensor, num_units_) - input_size_ = convert(tf.EagerTensor, input_size_) - params_ = convert(tf.EagerTensor, params_) - tf.add_input(desc, num_layers_) - tf.add_input(desc, num_units_) - tf.add_input(desc, input_size_) - tf.add_input(desc, params_) - if num_params !== nothing - desc["num_params"] = Base.Int(num_params) - end - if rnn_mode !== nothing - desc["rnn_mode"] = Base.String(rnn_mode) - end - if input_mode !== nothing - desc["input_mode"] = Base.String(input_mode) - end - if direction !== nothing - desc["direction"] = Base.String(direction) - end - if dropout !== nothing - desc["dropout"] = Base.identity(dropout) - end - if seed !== nothing - desc["seed"] = Base.Int(seed) - end - if seed2 !== nothing - desc["seed2"] = Base.Int(seed2) - end - desc["T"] = tf.data_type(params_) - res = tf.execute(desc) - node = tf.TapeNode(cudnn_rnn_params_to_canonical, [num_layers_, num_units_, input_size_, params_], name=nothing, num_params=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function cudnn_rnn_params_to_canonical(num_layers_, num_units_, input_size_, params_; name=nothing, num_params=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) - if tf.in_eager_mode() - cudnn_rnn_params_to_canonical_eager(num_layers_, num_units_, input_size_, params_; name=name, num_params=num_params, rnn_mode=rnn_mode, input_mode=input_mode, direction=direction, dropout=dropout, seed=seed, seed2=seed2) - else - cudnn_rnn_params_to_canonical_graph(num_layers_, num_units_, input_size_, params_; name=name, num_params=num_params, rnn_mode=rnn_mode, input_mode=input_mode, direction=direction, dropout=dropout, seed=seed, seed2=seed2) - end - end end @@ -2100,37 +3772,69 @@ end """ begin - function irfft3d_graph(input_, fft_length_; name=nothing) - local desc - tf.with_op_name(name, "IRFFT3D") do - desc = tf.NodeDescription("IRFFT3D") - input_ = convert(Tensor{Complex{Float32}}, input_) - fft_length_ = convert(Tensor{Int32}, fft_length_) - tf.add_input(desc, input_) - tf.add_input(desc, fft_length_) + begin + function irfft3d_graph(input_, fft_length_; name=nothing) + local desc + tf.with_op_name(name, "IRFFT3D") do + desc = tf.NodeDescription("IRFFT3D") + begin + begin + input_ = convert(Tensor{Complex{Float32}}, input_) + begin + end + end + begin + fft_length_ = convert(Tensor{Int32}, fft_length_) + begin + end + end + end + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, fft_length_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function irfft3d_eager(input_, fft_length_; name=nothing) - desc = tf.EagerOp("IRFFT3D") - input_ = convert(tf.EagerTensor, input_) - fft_length_ = convert(tf.EagerTensor, fft_length_) - tf.add_input(desc, input_) - tf.add_input(desc, fft_length_) - res = tf.execute(desc) - node = tf.TapeNode(irfft3d, [input_, fft_length_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function irfft3d_eager(input_, fft_length_; name=nothing) + desc = tf.EagerOp("IRFFT3D") + input_ = convert(tf.EagerTensor, input_) + fft_length_ = convert(tf.EagerTensor, fft_length_) + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, fft_length_) + end + end + begin + end + res = tf.execute(desc) + node = tf.TapeNode(irfft3d, [input_, fft_length_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function irfft3d(input_, fft_length_; name=nothing) - if tf.in_eager_mode() - irfft3d_eager(input_, fft_length_; name=name) - else - irfft3d_graph(input_, fft_length_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function irfft3d(input_, fft_length_; name=nothing) + if tf.in_eager_mode() + irfft3d_eager(input_, fft_length_; name=name) + else + irfft3d_graph(input_, fft_length_; name=name) + end end - end + end end @@ -2140,35 +3844,63 @@ end """ begin - function angle_graph(input_; name=nothing) - local desc - tf.with_op_name(name, "Angle") do - desc = tf.NodeDescription("Angle") - input_ = convert(Tensor{Complex{Float32}}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) + begin + function angle_graph(input_; name=nothing) + local desc + tf.with_op_name(name, "Angle") do + desc = tf.NodeDescription("Angle") + begin + begin + input_ = convert(Tensor{Complex{Float32}}, input_) + begin + end + end + begin + (input_,) = tf.tf_promote(input_) + end + end + begin + begin + tf.add_input(desc, input_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function angle_eager(input_; name=nothing) - desc = tf.EagerOp("Angle") - input_ = convert(tf.EagerTensor, input_) - tf.add_input(desc, input_) - desc["T"] = tf.data_type(input_) - res = tf.execute(desc) - node = tf.TapeNode(angle, [input_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function angle_eager(input_; name=nothing) + desc = tf.EagerOp("Angle") + input_ = convert(tf.EagerTensor, input_) + begin + begin + tf.add_input(desc, input_) + end + end + begin + end + begin + desc["T"] = tf.data_type(input_) + end + res = tf.execute(desc) + node = tf.TapeNode(angle, [input_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function angle(input_; name=nothing) - if tf.in_eager_mode() - angle_eager(input_; name=name) - else - angle_graph(input_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function angle(input_; name=nothing) + if tf.in_eager_mode() + angle_eager(input_; name=name) + else + angle_graph(input_; name=name) + end end - end + end end @@ -2178,41 +3910,65 @@ end """ begin - function tensor_forest_tree_resource_handle_op_graph(; name=nothing, container=nothing, shared_name=nothing) - local desc - tf.with_op_name(name, "TensorForestTreeResourceHandleOp") do - desc = tf.NodeDescription("TensorForestTreeResourceHandleOp") - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) + begin + function tensor_forest_tree_resource_handle_op_graph(; name=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "TensorForestTreeResourceHandleOp") do + desc = tf.NodeDescription("TensorForestTreeResourceHandleOp") + begin + end + begin + end + begin + begin + if container !== nothing + desc["container"] = Base.String(container) + end + end + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + end end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function tensor_forest_tree_resource_handle_op_eager(; name=nothing, container=nothing, shared_name=nothing) - desc = tf.EagerOp("TensorForestTreeResourceHandleOp") - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - res = tf.execute(desc) - node = tf.TapeNode(tensor_forest_tree_resource_handle_op, [], name=nothing, container=nothing, shared_name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function tensor_forest_tree_resource_handle_op_eager(; name=nothing, container=nothing, shared_name=nothing) + desc = tf.EagerOp("TensorForestTreeResourceHandleOp") + begin + end + begin + begin + if container !== nothing + desc["container"] = Base.String(container) + end + end + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(tensor_forest_tree_resource_handle_op, [], name=nothing, container=nothing, shared_name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_forest_tree_resource_handle_op(; name=nothing, container=nothing, shared_name=nothing) - if tf.in_eager_mode() - tensor_forest_tree_resource_handle_op_eager(; name=name, container=container, shared_name=shared_name) - else - tensor_forest_tree_resource_handle_op_graph(; name=name, container=container, shared_name=shared_name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_forest_tree_resource_handle_op(; name=nothing, container=nothing, shared_name=nothing) + if tf.in_eager_mode() + tensor_forest_tree_resource_handle_op_eager(; name=name, container=container, shared_name=shared_name) + else + tensor_forest_tree_resource_handle_op_graph(; name=name, container=container, shared_name=shared_name) + end end - end + end end @@ -2222,74 +3978,124 @@ end """ begin - function learned_unigram_candidate_sampler_graph(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing) - local desc - tf.with_op_name(name, "LearnedUnigramCandidateSampler") do - desc = tf.NodeDescription("LearnedUnigramCandidateSampler") - true_classes_ = convert(Tensor{Int64}, true_classes_) - tf.add_input(desc, true_classes_) - if num_true !== nothing - desc["num_true"] = Base.Int(num_true) - end - if num_sampled !== nothing - desc["num_sampled"] = Base.Int(num_sampled) - end - if unique !== nothing - desc["unique"] = Base.Bool(unique) + begin + function learned_unigram_candidate_sampler_graph(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing) + local desc + tf.with_op_name(name, "LearnedUnigramCandidateSampler") do + desc = tf.NodeDescription("LearnedUnigramCandidateSampler") + begin + begin + true_classes_ = convert(Tensor{Int64}, true_classes_) + begin + end + end + end + begin + begin + tf.add_input(desc, true_classes_) + end + end + begin + begin + if num_true !== nothing + desc["num_true"] = Base.Int(num_true) + end + end + begin + if num_sampled !== nothing + desc["num_sampled"] = Base.Int(num_sampled) + end + end + begin + if unique !== nothing + desc["unique"] = Base.Bool(unique) + end + end + begin + if range_max !== nothing + desc["range_max"] = Base.Int(range_max) + end + end + begin + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + end + begin + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end + end + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + end + end + begin + function learned_unigram_candidate_sampler_eager(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing) + desc = tf.EagerOp("LearnedUnigramCandidateSampler") + true_classes_ = convert(tf.EagerTensor, true_classes_) + begin + begin + tf.add_input(desc, true_classes_) + end + end + begin + begin + if num_true !== nothing + desc["num_true"] = Base.Int(num_true) + end + end + begin + if num_sampled !== nothing + desc["num_sampled"] = Base.Int(num_sampled) + end + end + begin + if unique !== nothing + desc["unique"] = Base.Bool(unique) + end + end + begin + if range_max !== nothing + desc["range_max"] = Base.Int(range_max) + end + end + begin + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + end + begin + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(learned_unigram_candidate_sampler, [true_classes_], name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function learned_unigram_candidate_sampler(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing) + if tf.in_eager_mode() + learned_unigram_candidate_sampler_eager(true_classes_; name=name, num_true=num_true, num_sampled=num_sampled, unique=unique, range_max=range_max, seed=seed, seed2=seed2) + else + learned_unigram_candidate_sampler_graph(true_classes_; name=name, num_true=num_true, num_sampled=num_sampled, unique=unique, range_max=range_max, seed=seed, seed2=seed2) + end end - if range_max !== nothing - desc["range_max"] = Base.Int(range_max) - end - if seed !== nothing - desc["seed"] = Base.Int(seed) - end - if seed2 !== nothing - desc["seed2"] = Base.Int(seed2) - end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:3 - push!(out, tf.Tensor(op, out_idx)) - end - out - end - function learned_unigram_candidate_sampler_eager(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing) - desc = tf.EagerOp("LearnedUnigramCandidateSampler") - true_classes_ = convert(tf.EagerTensor, true_classes_) - tf.add_input(desc, true_classes_) - if num_true !== nothing - desc["num_true"] = Base.Int(num_true) - end - if num_sampled !== nothing - desc["num_sampled"] = Base.Int(num_sampled) - end - if unique !== nothing - desc["unique"] = Base.Bool(unique) - end - if range_max !== nothing - desc["range_max"] = Base.Int(range_max) - end - if seed !== nothing - desc["seed"] = Base.Int(seed) - end - if seed2 !== nothing - desc["seed2"] = Base.Int(seed2) - end - res = tf.execute(desc) - node = tf.TapeNode(learned_unigram_candidate_sampler, [true_classes_], name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function learned_unigram_candidate_sampler(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing) - if tf.in_eager_mode() - learned_unigram_candidate_sampler_eager(true_classes_; name=name, num_true=num_true, num_sampled=num_sampled, unique=unique, range_max=range_max, seed=seed, seed2=seed2) - else - learned_unigram_candidate_sampler_graph(true_classes_; name=name, num_true=num_true, num_sampled=num_sampled, unique=unique, range_max=range_max, seed=seed, seed2=seed2) - end - end end @@ -2299,35 +4105,55 @@ end A graph node which represents an argument to a function. """ begin - function _arg_graph(; name=nothing, index=nothing) - local desc - tf.with_op_name(name, "_Arg") do - desc = tf.NodeDescription("_Arg") - if index !== nothing - desc["index"] = Base.Int(index) + begin + function _arg_graph(; name=nothing, index=nothing) + local desc + tf.with_op_name(name, "_Arg") do + desc = tf.NodeDescription("_Arg") + begin + end + begin + end + begin + begin + if index !== nothing + desc["index"] = Base.Int(index) + end + end + end end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function _arg_eager(; name=nothing, index=nothing) - desc = tf.EagerOp("_Arg") - if index !== nothing - desc["index"] = Base.Int(index) - end - res = tf.execute(desc) - node = tf.TapeNode(_arg, [], name=nothing, index=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function _arg_eager(; name=nothing, index=nothing) + desc = tf.EagerOp("_Arg") + begin + end + begin + begin + if index !== nothing + desc["index"] = Base.Int(index) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(_arg, [], name=nothing, index=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _arg(; name=nothing, index=nothing) - if tf.in_eager_mode() - _arg_eager(; name=name, index=index) - else - _arg_graph(; name=name, index=index) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _arg(; name=nothing, index=nothing) + if tf.in_eager_mode() + _arg_eager(; name=name, index=index) + else + _arg_graph(; name=name, index=index) + end end - end + end end @@ -2337,35 +4163,63 @@ end """ begin - function matrix_square_root_graph(input_; name=nothing) - local desc - tf.with_op_name(name, "MatrixSquareRoot") do - desc = tf.NodeDescription("MatrixSquareRoot") - input_ = convert(Tensor{Any}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) + begin + function matrix_square_root_graph(input_; name=nothing) + local desc + tf.with_op_name(name, "MatrixSquareRoot") do + desc = tf.NodeDescription("MatrixSquareRoot") + begin + begin + input_ = convert(Tensor{Any}, input_) + begin + end + end + begin + (input_,) = tf.tf_promote(input_) + end + end + begin + begin + tf.add_input(desc, input_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function matrix_square_root_eager(input_; name=nothing) - desc = tf.EagerOp("MatrixSquareRoot") - input_ = convert(tf.EagerTensor, input_) - tf.add_input(desc, input_) - desc["T"] = tf.data_type(input_) - res = tf.execute(desc) - node = tf.TapeNode(matrix_square_root, [input_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function matrix_square_root_eager(input_; name=nothing) + desc = tf.EagerOp("MatrixSquareRoot") + input_ = convert(tf.EagerTensor, input_) + begin + begin + tf.add_input(desc, input_) + end + end + begin + end + begin + desc["T"] = tf.data_type(input_) + end + res = tf.execute(desc) + node = tf.TapeNode(matrix_square_root, [input_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function matrix_square_root(input_; name=nothing) - if tf.in_eager_mode() - matrix_square_root_eager(input_; name=name) - else - matrix_square_root_graph(input_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function matrix_square_root(input_; name=nothing) + if tf.in_eager_mode() + matrix_square_root_eager(input_; name=name) + else + matrix_square_root_graph(input_; name=name) + end end - end + end end @@ -2375,48 +4229,102 @@ end """ begin - function sparse_dense_cwise_mul_graph(sp_indices_, sp_values_, sp_shape_, dense_; name=nothing) - local desc - tf.with_op_name(name, "SparseDenseCwiseMul") do - desc = tf.NodeDescription("SparseDenseCwiseMul") - sp_indices_ = convert(Tensor{Int64}, sp_indices_) - sp_values_ = convert(Tensor{Any}, sp_values_) - sp_shape_ = convert(Tensor{Int64}, sp_shape_) - dense_ = convert(Tensor{Any}, dense_) - (sp_values_, dense_) = tf.tf_promote(sp_values_, dense_) - tf.add_input(desc, sp_indices_) - tf.add_input(desc, sp_values_) - tf.add_input(desc, sp_shape_) - tf.add_input(desc, dense_) - end - tf.Tensor(tf.Operation(desc)) - end - function sparse_dense_cwise_mul_eager(sp_indices_, sp_values_, sp_shape_, dense_; name=nothing) - desc = tf.EagerOp("SparseDenseCwiseMul") - sp_indices_ = convert(tf.EagerTensor, sp_indices_) - sp_values_ = convert(tf.EagerTensor, sp_values_) - sp_shape_ = convert(tf.EagerTensor, sp_shape_) - dense_ = convert(tf.EagerTensor, dense_) - tf.add_input(desc, sp_indices_) - tf.add_input(desc, sp_values_) - tf.add_input(desc, sp_shape_) - tf.add_input(desc, dense_) - desc["T"] = tf.data_type(sp_values_) - desc["T"] = tf.data_type(dense_) - res = tf.execute(desc) - node = tf.TapeNode(sparse_dense_cwise_mul, [sp_indices_, sp_values_, sp_shape_, dense_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_dense_cwise_mul(sp_indices_, sp_values_, sp_shape_, dense_; name=nothing) - if tf.in_eager_mode() - sparse_dense_cwise_mul_eager(sp_indices_, sp_values_, sp_shape_, dense_; name=name) - else - sparse_dense_cwise_mul_graph(sp_indices_, sp_values_, sp_shape_, dense_; name=name) + begin + function sparse_dense_cwise_mul_graph(sp_indices_, sp_values_, sp_shape_, dense_; name=nothing) + local desc + tf.with_op_name(name, "SparseDenseCwiseMul") do + desc = tf.NodeDescription("SparseDenseCwiseMul") + begin + begin + sp_indices_ = convert(Tensor{Int64}, sp_indices_) + begin + end + end + begin + sp_values_ = convert(Tensor{Any}, sp_values_) + begin + end + end + begin + sp_shape_ = convert(Tensor{Int64}, sp_shape_) + begin + end + end + begin + dense_ = convert(Tensor{Any}, dense_) + begin + end + end + begin + (sp_values_, dense_) = tf.tf_promote(sp_values_, dense_) + end + end + begin + begin + tf.add_input(desc, sp_indices_) + end + begin + tf.add_input(desc, sp_values_) + end + begin + tf.add_input(desc, sp_shape_) + end + begin + tf.add_input(desc, dense_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function sparse_dense_cwise_mul_eager(sp_indices_, sp_values_, sp_shape_, dense_; name=nothing) + desc = tf.EagerOp("SparseDenseCwiseMul") + sp_indices_ = convert(tf.EagerTensor, sp_indices_) + sp_values_ = convert(tf.EagerTensor, sp_values_) + sp_shape_ = convert(tf.EagerTensor, sp_shape_) + dense_ = convert(tf.EagerTensor, dense_) + begin + begin + tf.add_input(desc, sp_indices_) + end + begin + tf.add_input(desc, sp_values_) + end + begin + tf.add_input(desc, sp_shape_) + end + begin + tf.add_input(desc, dense_) + end + end + begin + end + begin + desc["T"] = tf.data_type(sp_values_) + end + begin + desc["T"] = tf.data_type(dense_) + end + res = tf.execute(desc) + node = tf.TapeNode(sparse_dense_cwise_mul, [sp_indices_, sp_values_, sp_shape_, dense_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_dense_cwise_mul(sp_indices_, sp_values_, sp_shape_, dense_; name=nothing) + if tf.in_eager_mode() + sparse_dense_cwise_mul_eager(sp_indices_, sp_values_, sp_shape_, dense_; name=name) + else + sparse_dense_cwise_mul_graph(sp_indices_, sp_values_, sp_shape_, dense_; name=name) + end end - end + end end @@ -2426,54 +4334,96 @@ end """ begin - function tensor_array_concat_v3_graph(handle_, flow_in_; name=nothing, dtype=nothing, element_shape_except0=nothing) - local desc - tf.with_op_name(name, "TensorArrayConcatV3") do - desc = tf.NodeDescription("TensorArrayConcatV3") - handle_ = convert(Tensor{Any}, handle_) - flow_in_ = convert(Tensor{Float32}, flow_in_) - tf.add_input(desc, handle_) - tf.add_input(desc, flow_in_) - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) + begin + function tensor_array_concat_v3_graph(handle_, flow_in_; name=nothing, dtype=nothing, element_shape_except0=nothing) + local desc + tf.with_op_name(name, "TensorArrayConcatV3") do + desc = tf.NodeDescription("TensorArrayConcatV3") + begin + begin + handle_ = convert(Tensor{Any}, handle_) + begin + end + end + begin + flow_in_ = convert(Tensor{Float32}, flow_in_) + begin + end + end + end + begin + begin + tf.add_input(desc, handle_) + end + begin + tf.add_input(desc, flow_in_) + end + end + begin + begin + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + begin + if element_shape_except0 !== nothing + desc["element_shape_except0"] = Base.identity(element_shape_except0) + end + end + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + end + end + begin + function tensor_array_concat_v3_eager(handle_, flow_in_; name=nothing, dtype=nothing, element_shape_except0=nothing) + desc = tf.EagerOp("TensorArrayConcatV3") + handle_ = convert(tf.EagerTensor, handle_) + flow_in_ = convert(tf.EagerTensor, flow_in_) + begin + begin + tf.add_input(desc, handle_) + end + begin + tf.add_input(desc, flow_in_) + end + end + begin + begin + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + begin + if element_shape_except0 !== nothing + desc["element_shape_except0"] = Base.identity(element_shape_except0) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(tensor_array_concat_v3, [handle_, flow_in_], name=nothing, dtype=nothing, element_shape_except0=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_array_concat_v3(handle_, flow_in_; name=nothing, dtype=nothing, element_shape_except0=nothing) + if tf.in_eager_mode() + tensor_array_concat_v3_eager(handle_, flow_in_; name=name, dtype=dtype, element_shape_except0=element_shape_except0) + else + tensor_array_concat_v3_graph(handle_, flow_in_; name=name, dtype=dtype, element_shape_except0=element_shape_except0) + end end - if element_shape_except0 !== nothing - desc["element_shape_except0"] = Base.identity(element_shape_except0) - end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:2 - push!(out, tf.Tensor(op, out_idx)) - end - out - end - function tensor_array_concat_v3_eager(handle_, flow_in_; name=nothing, dtype=nothing, element_shape_except0=nothing) - desc = tf.EagerOp("TensorArrayConcatV3") - handle_ = convert(tf.EagerTensor, handle_) - flow_in_ = convert(tf.EagerTensor, flow_in_) - tf.add_input(desc, handle_) - tf.add_input(desc, flow_in_) - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end - if element_shape_except0 !== nothing - desc["element_shape_except0"] = Base.identity(element_shape_except0) - end - res = tf.execute(desc) - node = tf.TapeNode(tensor_array_concat_v3, [handle_, flow_in_], name=nothing, dtype=nothing, element_shape_except0=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_array_concat_v3(handle_, flow_in_; name=nothing, dtype=nothing, element_shape_except0=nothing) - if tf.in_eager_mode() - tensor_array_concat_v3_eager(handle_, flow_in_; name=name, dtype=dtype, element_shape_except0=element_shape_except0) - else - tensor_array_concat_v3_graph(handle_, flow_in_; name=name, dtype=dtype, element_shape_except0=element_shape_except0) - end - end end @@ -2483,33 +4433,57 @@ end """ begin - function unicode_script_graph(input_; name=nothing) - local desc - tf.with_op_name(name, "UnicodeScript") do - desc = tf.NodeDescription("UnicodeScript") - input_ = convert(Tensor{Int32}, input_) - tf.add_input(desc, input_) + begin + function unicode_script_graph(input_; name=nothing) + local desc + tf.with_op_name(name, "UnicodeScript") do + desc = tf.NodeDescription("UnicodeScript") + begin + begin + input_ = convert(Tensor{Int32}, input_) + begin + end + end + end + begin + begin + tf.add_input(desc, input_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function unicode_script_eager(input_; name=nothing) - desc = tf.EagerOp("UnicodeScript") - input_ = convert(tf.EagerTensor, input_) - tf.add_input(desc, input_) - res = tf.execute(desc) - node = tf.TapeNode(unicode_script, [input_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function unicode_script_eager(input_; name=nothing) + desc = tf.EagerOp("UnicodeScript") + input_ = convert(tf.EagerTensor, input_) + begin + begin + tf.add_input(desc, input_) + end + end + begin + end + res = tf.execute(desc) + node = tf.TapeNode(unicode_script, [input_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function unicode_script(input_; name=nothing) - if tf.in_eager_mode() - unicode_script_eager(input_; name=name) - else - unicode_script_graph(input_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function unicode_script(input_; name=nothing) + if tf.in_eager_mode() + unicode_script_eager(input_; name=name) + else + unicode_script_graph(input_; name=name) + end end - end + end end @@ -2519,40 +4493,78 @@ end """ begin - function batch_cholesky_grad_graph(l_, grad_; name=nothing) - local desc - tf.with_op_name(name, "BatchCholeskyGrad") do - desc = tf.NodeDescription("BatchCholeskyGrad") - l_ = convert(Tensor{Any}, l_) - grad_ = convert(Tensor{Any}, grad_) - (l_, grad_) = tf.tf_promote(l_, grad_) - tf.add_input(desc, l_) - tf.add_input(desc, grad_) + begin + function batch_cholesky_grad_graph(l_, grad_; name=nothing) + local desc + tf.with_op_name(name, "BatchCholeskyGrad") do + desc = tf.NodeDescription("BatchCholeskyGrad") + begin + begin + l_ = convert(Tensor{Any}, l_) + begin + end + end + begin + grad_ = convert(Tensor{Any}, grad_) + begin + end + end + begin + (l_, grad_) = tf.tf_promote(l_, grad_) + end + end + begin + begin + tf.add_input(desc, l_) + end + begin + tf.add_input(desc, grad_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function batch_cholesky_grad_eager(l_, grad_; name=nothing) - desc = tf.EagerOp("BatchCholeskyGrad") - l_ = convert(tf.EagerTensor, l_) - grad_ = convert(tf.EagerTensor, grad_) - tf.add_input(desc, l_) - tf.add_input(desc, grad_) - desc["T"] = tf.data_type(l_) - desc["T"] = tf.data_type(grad_) - res = tf.execute(desc) - node = tf.TapeNode(batch_cholesky_grad, [l_, grad_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function batch_cholesky_grad_eager(l_, grad_; name=nothing) + desc = tf.EagerOp("BatchCholeskyGrad") + l_ = convert(tf.EagerTensor, l_) + grad_ = convert(tf.EagerTensor, grad_) + begin + begin + tf.add_input(desc, l_) + end + begin + tf.add_input(desc, grad_) + end + end + begin + end + begin + desc["T"] = tf.data_type(l_) + end + begin + desc["T"] = tf.data_type(grad_) + end + res = tf.execute(desc) + node = tf.TapeNode(batch_cholesky_grad, [l_, grad_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function batch_cholesky_grad(l_, grad_; name=nothing) - if tf.in_eager_mode() - batch_cholesky_grad_eager(l_, grad_; name=name) - else - batch_cholesky_grad_graph(l_, grad_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function batch_cholesky_grad(l_, grad_; name=nothing) + if tf.in_eager_mode() + batch_cholesky_grad_eager(l_, grad_; name=name) + else + batch_cholesky_grad_graph(l_, grad_; name=name) + end end - end + end end @@ -2562,48 +4574,92 @@ end """ begin - function mean_graph(input_, reduction_indices_; name=nothing, keep_dims=nothing) - local desc - tf.with_op_name(name, "Mean") do - desc = tf.NodeDescription("Mean") - input_ = convert(Tensor{Any}, input_) - reduction_indices_ = convert(Tensor{Int32}, reduction_indices_) - reduction_indices_ = reduction_indices_ - convert(tf.Tensor{eltype(reduction_indices_)}, 1) - (input_,) = tf.tf_promote(input_) - (reduction_indices_,) = tf.tf_promote(reduction_indices_) - tf.add_input(desc, input_) - tf.add_input(desc, reduction_indices_) - if keep_dims !== nothing - desc["keep_dims"] = Base.Bool(keep_dims) + begin + function mean_graph(input_, reduction_indices_; name=nothing, keep_dims=nothing) + local desc + tf.with_op_name(name, "Mean") do + desc = tf.NodeDescription("Mean") + begin + begin + input_ = convert(Tensor{Any}, input_) + begin + end + end + begin + reduction_indices_ = convert(Tensor{Int32}, reduction_indices_) + begin + reduction_indices_ = reduction_indices_ - convert(tf.Tensor{eltype(reduction_indices_)}, 1) + end + end + begin + (input_,) = tf.tf_promote(input_) + end + begin + (reduction_indices_,) = tf.tf_promote(reduction_indices_) + end + end + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, reduction_indices_) + end + end + begin + begin + if keep_dims !== nothing + desc["keep_dims"] = Base.Bool(keep_dims) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function mean_eager(input_, reduction_indices_; name=nothing, keep_dims=nothing) + desc = tf.EagerOp("Mean") + input_ = convert(tf.EagerTensor, input_) + reduction_indices_ = convert(tf.EagerTensor, reduction_indices_) + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, reduction_indices_) + end + end + begin + begin + if keep_dims !== nothing + desc["keep_dims"] = Base.Bool(keep_dims) + end + end + end + begin + desc["T"] = tf.data_type(input_) + end + begin + desc["Tidx"] = tf.data_type(reduction_indices_) + end + res = tf.execute(desc) + node = tf.TapeNode(mean, [input_, reduction_indices_], name=nothing, keep_dims=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function mean(input_, reduction_indices_; name=nothing, keep_dims=nothing) + if tf.in_eager_mode() + mean_eager(input_, reduction_indices_; name=name, keep_dims=keep_dims) + else + mean_graph(input_, reduction_indices_; name=name, keep_dims=keep_dims) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function mean_eager(input_, reduction_indices_; name=nothing, keep_dims=nothing) - desc = tf.EagerOp("Mean") - input_ = convert(tf.EagerTensor, input_) - reduction_indices_ = convert(tf.EagerTensor, reduction_indices_) - tf.add_input(desc, input_) - tf.add_input(desc, reduction_indices_) - if keep_dims !== nothing - desc["keep_dims"] = Base.Bool(keep_dims) - end - desc["T"] = tf.data_type(input_) - desc["Tidx"] = tf.data_type(reduction_indices_) - res = tf.execute(desc) - node = tf.TapeNode(mean, [input_, reduction_indices_], name=nothing, keep_dims=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function mean(input_, reduction_indices_; name=nothing, keep_dims=nothing) - if tf.in_eager_mode() - mean_eager(input_, reduction_indices_; name=name, keep_dims=keep_dims) - else - mean_graph(input_, reduction_indices_; name=name, keep_dims=keep_dims) - end - end end @@ -2613,33 +4669,57 @@ end """ begin - function batch_fft_graph(input_; name=nothing) - local desc - tf.with_op_name(name, "BatchFFT") do - desc = tf.NodeDescription("BatchFFT") - input_ = convert(Tensor{Complex{Float32}}, input_) - tf.add_input(desc, input_) + begin + function batch_fft_graph(input_; name=nothing) + local desc + tf.with_op_name(name, "BatchFFT") do + desc = tf.NodeDescription("BatchFFT") + begin + begin + input_ = convert(Tensor{Complex{Float32}}, input_) + begin + end + end + end + begin + begin + tf.add_input(desc, input_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function batch_fft_eager(input_; name=nothing) - desc = tf.EagerOp("BatchFFT") - input_ = convert(tf.EagerTensor, input_) - tf.add_input(desc, input_) - res = tf.execute(desc) - node = tf.TapeNode(batch_fft, [input_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function batch_fft_eager(input_; name=nothing) + desc = tf.EagerOp("BatchFFT") + input_ = convert(tf.EagerTensor, input_) + begin + begin + tf.add_input(desc, input_) + end + end + begin + end + res = tf.execute(desc) + node = tf.TapeNode(batch_fft, [input_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function batch_fft(input_; name=nothing) - if tf.in_eager_mode() - batch_fft_eager(input_; name=name) - else - batch_fft_graph(input_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function batch_fft(input_; name=nothing) + if tf.in_eager_mode() + batch_fft_eager(input_; name=name) + else + batch_fft_graph(input_; name=name) + end end - end + end end @@ -2649,35 +4729,63 @@ end """ begin - function sin_graph(x_; name=nothing) - local desc - tf.with_op_name(name, "Sin") do - desc = tf.NodeDescription("Sin") - x_ = convert(Tensor{Any}, x_) - (x_,) = tf.tf_promote(x_) - tf.add_input(desc, x_) + begin + function sin_graph(x_; name=nothing) + local desc + tf.with_op_name(name, "Sin") do + desc = tf.NodeDescription("Sin") + begin + begin + x_ = convert(Tensor{Any}, x_) + begin + end + end + begin + (x_,) = tf.tf_promote(x_) + end + end + begin + begin + tf.add_input(desc, x_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function sin_eager(x_; name=nothing) - desc = tf.EagerOp("Sin") - x_ = convert(tf.EagerTensor, x_) - tf.add_input(desc, x_) - desc["T"] = tf.data_type(x_) - res = tf.execute(desc) - node = tf.TapeNode(sin, [x_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function sin_eager(x_; name=nothing) + desc = tf.EagerOp("Sin") + x_ = convert(tf.EagerTensor, x_) + begin + begin + tf.add_input(desc, x_) + end + end + begin + end + begin + desc["T"] = tf.data_type(x_) + end + res = tf.execute(desc) + node = tf.TapeNode(sin, [x_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sin(x_; name=nothing) - if tf.in_eager_mode() - sin_eager(x_; name=name) - else - sin_graph(x_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sin(x_; name=nothing) + if tf.in_eager_mode() + sin_eager(x_; name=name) + else + sin_graph(x_; name=name) + end end - end + end end @@ -2687,41 +4795,65 @@ end """ begin - function boosted_trees_ensemble_resource_handle_op_graph(; name=nothing, container=nothing, shared_name=nothing) - local desc - tf.with_op_name(name, "BoostedTreesEnsembleResourceHandleOp") do - desc = tf.NodeDescription("BoostedTreesEnsembleResourceHandleOp") - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) + begin + function boosted_trees_ensemble_resource_handle_op_graph(; name=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "BoostedTreesEnsembleResourceHandleOp") do + desc = tf.NodeDescription("BoostedTreesEnsembleResourceHandleOp") + begin + end + begin + end + begin + begin + if container !== nothing + desc["container"] = Base.String(container) + end + end + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + end end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function boosted_trees_ensemble_resource_handle_op_eager(; name=nothing, container=nothing, shared_name=nothing) - desc = tf.EagerOp("BoostedTreesEnsembleResourceHandleOp") - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - res = tf.execute(desc) - node = tf.TapeNode(boosted_trees_ensemble_resource_handle_op, [], name=nothing, container=nothing, shared_name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function boosted_trees_ensemble_resource_handle_op_eager(; name=nothing, container=nothing, shared_name=nothing) + desc = tf.EagerOp("BoostedTreesEnsembleResourceHandleOp") + begin + end + begin + begin + if container !== nothing + desc["container"] = Base.String(container) + end + end + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(boosted_trees_ensemble_resource_handle_op, [], name=nothing, container=nothing, shared_name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function boosted_trees_ensemble_resource_handle_op(; name=nothing, container=nothing, shared_name=nothing) - if tf.in_eager_mode() - boosted_trees_ensemble_resource_handle_op_eager(; name=name, container=container, shared_name=shared_name) - else - boosted_trees_ensemble_resource_handle_op_graph(; name=name, container=container, shared_name=shared_name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function boosted_trees_ensemble_resource_handle_op(; name=nothing, container=nothing, shared_name=nothing) + if tf.in_eager_mode() + boosted_trees_ensemble_resource_handle_op_eager(; name=name, container=container, shared_name=shared_name) + else + boosted_trees_ensemble_resource_handle_op_graph(; name=name, container=container, shared_name=shared_name) + end end - end + end end @@ -2731,66 +4863,124 @@ end """ begin - function quantized_max_pool_graph(input_, min_input_, max_input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing) - local desc - tf.with_op_name(name, "QuantizedMaxPool") do - desc = tf.NodeDescription("QuantizedMaxPool") - input_ = convert(Tensor{Any}, input_) - min_input_ = convert(Tensor{Float32}, min_input_) - max_input_ = convert(Tensor{Float32}, max_input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - tf.add_input(desc, min_input_) - tf.add_input(desc, max_input_) - if ksize !== nothing - desc["ksize"] = map(Base.identity, ksize) + begin + function quantized_max_pool_graph(input_, min_input_, max_input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing) + local desc + tf.with_op_name(name, "QuantizedMaxPool") do + desc = tf.NodeDescription("QuantizedMaxPool") + begin + begin + input_ = convert(Tensor{Any}, input_) + begin + end + end + begin + min_input_ = convert(Tensor{Float32}, min_input_) + begin + end + end + begin + max_input_ = convert(Tensor{Float32}, max_input_) + begin + end + end + begin + (input_,) = tf.tf_promote(input_) + end + end + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, min_input_) + end + begin + tf.add_input(desc, max_input_) + end + end + begin + begin + if ksize !== nothing + desc["ksize"] = map(Base.identity, ksize) + end + end + begin + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + end + begin + if padding !== nothing + desc["padding"] = Base.String(padding) + end + end + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + end + end + begin + function quantized_max_pool_eager(input_, min_input_, max_input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing) + desc = tf.EagerOp("QuantizedMaxPool") + input_ = convert(tf.EagerTensor, input_) + min_input_ = convert(tf.EagerTensor, min_input_) + max_input_ = convert(tf.EagerTensor, max_input_) + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, min_input_) + end + begin + tf.add_input(desc, max_input_) + end + end + begin + begin + if ksize !== nothing + desc["ksize"] = map(Base.identity, ksize) + end + end + begin + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + end + begin + if padding !== nothing + desc["padding"] = Base.String(padding) + end + end + end + begin + desc["T"] = tf.data_type(input_) + end + res = tf.execute(desc) + node = tf.TapeNode(quantized_max_pool, [input_, min_input_, max_input_], name=nothing, ksize=nothing, strides=nothing, padding=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function quantized_max_pool(input_, min_input_, max_input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing) + if tf.in_eager_mode() + quantized_max_pool_eager(input_, min_input_, max_input_; name=name, ksize=ksize, strides=strides, padding=padding) + else + quantized_max_pool_graph(input_, min_input_, max_input_; name=name, ksize=ksize, strides=strides, padding=padding) + end end - if strides !== nothing - desc["strides"] = map(Base.identity, strides) - end - if padding !== nothing - desc["padding"] = Base.String(padding) - end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:3 - push!(out, tf.Tensor(op, out_idx)) - end - out - end - function quantized_max_pool_eager(input_, min_input_, max_input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing) - desc = tf.EagerOp("QuantizedMaxPool") - input_ = convert(tf.EagerTensor, input_) - min_input_ = convert(tf.EagerTensor, min_input_) - max_input_ = convert(tf.EagerTensor, max_input_) - tf.add_input(desc, input_) - tf.add_input(desc, min_input_) - tf.add_input(desc, max_input_) - if ksize !== nothing - desc["ksize"] = map(Base.identity, ksize) - end - if strides !== nothing - desc["strides"] = map(Base.identity, strides) - end - if padding !== nothing - desc["padding"] = Base.String(padding) - end - desc["T"] = tf.data_type(input_) - res = tf.execute(desc) - node = tf.TapeNode(quantized_max_pool, [input_, min_input_, max_input_], name=nothing, ksize=nothing, strides=nothing, padding=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function quantized_max_pool(input_, min_input_, max_input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing) - if tf.in_eager_mode() - quantized_max_pool_eager(input_, min_input_, max_input_; name=name, ksize=ksize, strides=strides, padding=padding) - else - quantized_max_pool_graph(input_, min_input_, max_input_; name=name, ksize=ksize, strides=strides, padding=padding) - end - end end @@ -2800,77 +4990,141 @@ end """ begin - function ordered_map_stage_graph(key_, indices_, values_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, fake_dtypes=nothing, container=nothing, shared_name=nothing) - local desc - tf.with_op_name(name, "OrderedMapStage") do - desc = tf.NodeDescription("OrderedMapStage") - key_ = convert(Tensor{Int64}, key_) - indices_ = convert(Tensor{Int32}, indices_) - values_ = [convert(Tensor{Any}, x) for x = values_] - tf.add_input(desc, key_) - tf.add_input(desc, indices_) - tf.add_input(desc, values_) - if capacity !== nothing - desc["capacity"] = Base.Int(capacity) - end - if memory_limit !== nothing - desc["memory_limit"] = Base.Int(memory_limit) - end - if dtypes !== nothing - desc["dtypes"] = map(Base.identity, dtypes) - end - if fake_dtypes !== nothing - desc["fake_dtypes"] = map(Base.identity, fake_dtypes) - end - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) + begin + function ordered_map_stage_graph(key_, indices_, values_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, fake_dtypes=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "OrderedMapStage") do + desc = tf.NodeDescription("OrderedMapStage") + begin + begin + key_ = convert(Tensor{Int64}, key_) + begin + end + end + begin + indices_ = convert(Tensor{Int32}, indices_) + begin + end + end + begin + values_ = [convert(Tensor{Any}, x) for x = values_] + begin + end + end + end + begin + begin + tf.add_input(desc, key_) + end + begin + tf.add_input(desc, indices_) + end + begin + tf.add_input(desc, values_) + end + end + begin + begin + if capacity !== nothing + desc["capacity"] = Base.Int(capacity) + end + end + begin + if memory_limit !== nothing + desc["memory_limit"] = Base.Int(memory_limit) + end + end + begin + if dtypes !== nothing + desc["dtypes"] = map(Base.identity, dtypes) + end + end + begin + if fake_dtypes !== nothing + desc["fake_dtypes"] = map(Base.identity, fake_dtypes) + end + end + begin + if container !== nothing + desc["container"] = Base.String(container) + end + end + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function ordered_map_stage_eager(key_, indices_, values_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, fake_dtypes=nothing, container=nothing, shared_name=nothing) + desc = tf.EagerOp("OrderedMapStage") + key_ = convert(tf.EagerTensor, key_) + indices_ = convert(tf.EagerTensor, indices_) + values_ = convert(tf.EagerTensor, values_) + begin + begin + tf.add_input(desc, key_) + end + begin + tf.add_input(desc, indices_) + end + begin + tf.add_input(desc, values_) + end + end + begin + begin + if capacity !== nothing + desc["capacity"] = Base.Int(capacity) + end + end + begin + if memory_limit !== nothing + desc["memory_limit"] = Base.Int(memory_limit) + end + end + begin + if dtypes !== nothing + desc["dtypes"] = map(Base.identity, dtypes) + end + end + begin + if fake_dtypes !== nothing + desc["fake_dtypes"] = map(Base.identity, fake_dtypes) + end + end + begin + if container !== nothing + desc["container"] = Base.String(container) + end + end + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(ordered_map_stage, [key_, indices_, values_], name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, fake_dtypes=nothing, container=nothing, shared_name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function ordered_map_stage(key_, indices_, values_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, fake_dtypes=nothing, container=nothing, shared_name=nothing) + if tf.in_eager_mode() + ordered_map_stage_eager(key_, indices_, values_; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, fake_dtypes=fake_dtypes, container=container, shared_name=shared_name) + else + ordered_map_stage_graph(key_, indices_, values_; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, fake_dtypes=fake_dtypes, container=container, shared_name=shared_name) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function ordered_map_stage_eager(key_, indices_, values_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, fake_dtypes=nothing, container=nothing, shared_name=nothing) - desc = tf.EagerOp("OrderedMapStage") - key_ = convert(tf.EagerTensor, key_) - indices_ = convert(tf.EagerTensor, indices_) - values_ = convert(tf.EagerTensor, values_) - tf.add_input(desc, key_) - tf.add_input(desc, indices_) - tf.add_input(desc, values_) - if capacity !== nothing - desc["capacity"] = Base.Int(capacity) - end - if memory_limit !== nothing - desc["memory_limit"] = Base.Int(memory_limit) - end - if dtypes !== nothing - desc["dtypes"] = map(Base.identity, dtypes) - end - if fake_dtypes !== nothing - desc["fake_dtypes"] = map(Base.identity, fake_dtypes) - end - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - res = tf.execute(desc) - node = tf.TapeNode(ordered_map_stage, [key_, indices_, values_], name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, fake_dtypes=nothing, container=nothing, shared_name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function ordered_map_stage(key_, indices_, values_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, fake_dtypes=nothing, container=nothing, shared_name=nothing) - if tf.in_eager_mode() - ordered_map_stage_eager(key_, indices_, values_; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, fake_dtypes=fake_dtypes, container=container, shared_name=shared_name) - else - ordered_map_stage_graph(key_, indices_, values_; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, fake_dtypes=fake_dtypes, container=container, shared_name=shared_name) - end - end end @@ -2880,69 +5134,117 @@ end """ begin - function partitioned_call_graph(args_; name=nothing, Tin=nothing, Tout=nothing, f=nothing, config=nothing, config_proto=nothing, executor_type=nothing) - local desc - tf.with_op_name(name, "PartitionedCall") do - desc = tf.NodeDescription("PartitionedCall") - args_ = [convert(Tensor{Any}, x) for x = args_] - tf.add_input(desc, args_) - if Tin !== nothing - desc["Tin"] = map(Base.identity, Tin) - end - if Tout !== nothing - desc["Tout"] = map(Base.identity, Tout) - end - if f !== nothing - desc["f"] = Base.identity(f) - end - if config !== nothing - desc["config"] = Base.String(config) - end - if config_proto !== nothing - desc["config_proto"] = Base.String(config_proto) - end - if executor_type !== nothing - desc["executor_type"] = Base.String(executor_type) + begin + function partitioned_call_graph(args_; name=nothing, Tin=nothing, Tout=nothing, f=nothing, config=nothing, config_proto=nothing, executor_type=nothing) + local desc + tf.with_op_name(name, "PartitionedCall") do + desc = tf.NodeDescription("PartitionedCall") + begin + begin + args_ = [convert(Tensor{Any}, x) for x = args_] + begin + end + end + end + begin + begin + tf.add_input(desc, args_) + end + end + begin + begin + if Tin !== nothing + desc["Tin"] = map(Base.identity, Tin) + end + end + begin + if Tout !== nothing + desc["Tout"] = map(Base.identity, Tout) + end + end + begin + if f !== nothing + desc["f"] = Base.identity(f) + end + end + begin + if config !== nothing + desc["config"] = Base.String(config) + end + end + begin + if config_proto !== nothing + desc["config_proto"] = Base.String(config_proto) + end + end + begin + if executor_type !== nothing + desc["executor_type"] = Base.String(executor_type) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function partitioned_call_eager(args_; name=nothing, Tin=nothing, Tout=nothing, f=nothing, config=nothing, config_proto=nothing, executor_type=nothing) + desc = tf.EagerOp("PartitionedCall") + args_ = convert(tf.EagerTensor, args_) + begin + begin + tf.add_input(desc, args_) + end + end + begin + begin + if Tin !== nothing + desc["Tin"] = map(Base.identity, Tin) + end + end + begin + if Tout !== nothing + desc["Tout"] = map(Base.identity, Tout) + end + end + begin + if f !== nothing + desc["f"] = Base.identity(f) + end + end + begin + if config !== nothing + desc["config"] = Base.String(config) + end + end + begin + if config_proto !== nothing + desc["config_proto"] = Base.String(config_proto) + end + end + begin + if executor_type !== nothing + desc["executor_type"] = Base.String(executor_type) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(partitioned_call, [args_], name=nothing, Tin=nothing, Tout=nothing, f=nothing, config=nothing, config_proto=nothing, executor_type=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function partitioned_call(args_; name=nothing, Tin=nothing, Tout=nothing, f=nothing, config=nothing, config_proto=nothing, executor_type=nothing) + if tf.in_eager_mode() + partitioned_call_eager(args_; name=name, Tin=Tin, Tout=Tout, f=f, config=config, config_proto=config_proto, executor_type=executor_type) + else + partitioned_call_graph(args_; name=name, Tin=Tin, Tout=Tout, f=f, config=config, config_proto=config_proto, executor_type=executor_type) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function partitioned_call_eager(args_; name=nothing, Tin=nothing, Tout=nothing, f=nothing, config=nothing, config_proto=nothing, executor_type=nothing) - desc = tf.EagerOp("PartitionedCall") - args_ = convert(tf.EagerTensor, args_) - tf.add_input(desc, args_) - if Tin !== nothing - desc["Tin"] = map(Base.identity, Tin) - end - if Tout !== nothing - desc["Tout"] = map(Base.identity, Tout) - end - if f !== nothing - desc["f"] = Base.identity(f) - end - if config !== nothing - desc["config"] = Base.String(config) - end - if config_proto !== nothing - desc["config_proto"] = Base.String(config_proto) - end - if executor_type !== nothing - desc["executor_type"] = Base.String(executor_type) - end - res = tf.execute(desc) - node = tf.TapeNode(partitioned_call, [args_], name=nothing, Tin=nothing, Tout=nothing, f=nothing, config=nothing, config_proto=nothing, executor_type=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function partitioned_call(args_; name=nothing, Tin=nothing, Tout=nothing, f=nothing, config=nothing, config_proto=nothing, executor_type=nothing) - if tf.in_eager_mode() - partitioned_call_eager(args_; name=name, Tin=Tin, Tout=Tout, f=f, config=config, config_proto=config_proto, executor_type=executor_type) - else - partitioned_call_graph(args_; name=name, Tin=Tin, Tout=Tout, f=f, config=config, config_proto=config_proto, executor_type=executor_type) - end - end end @@ -2952,69 +5254,147 @@ end """ begin - function sparse_apply_adagrad_graph(var_, accum_, lr_, grad_, indices_; name=nothing, use_locking=nothing, update_slots=nothing) - local desc - tf.with_op_name(name, "SparseApplyAdagrad") do - desc = tf.NodeDescription("SparseApplyAdagrad") - var_ = convert(Tensor{Any}, var_) - accum_ = convert(Tensor{Any}, accum_) - lr_ = convert(Tensor{Any}, lr_) - grad_ = convert(Tensor{Any}, grad_) - indices_ = convert(Tensor{Any}, indices_) - indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) - (var_, accum_, lr_, grad_) = tf.tf_promote(var_, accum_, lr_, grad_) - (indices_,) = tf.tf_promote(indices_) - tf.add_input(desc, var_) - tf.add_input(desc, accum_) - tf.add_input(desc, lr_) - tf.add_input(desc, grad_) - tf.add_input(desc, indices_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - if update_slots !== nothing - desc["update_slots"] = Base.Bool(update_slots) + begin + function sparse_apply_adagrad_graph(var_, accum_, lr_, grad_, indices_; name=nothing, use_locking=nothing, update_slots=nothing) + local desc + tf.with_op_name(name, "SparseApplyAdagrad") do + desc = tf.NodeDescription("SparseApplyAdagrad") + begin + begin + var_ = convert(Tensor{Any}, var_) + begin + end + end + begin + accum_ = convert(Tensor{Any}, accum_) + begin + end + end + begin + lr_ = convert(Tensor{Any}, lr_) + begin + end + end + begin + grad_ = convert(Tensor{Any}, grad_) + begin + end + end + begin + indices_ = convert(Tensor{Any}, indices_) + begin + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + end + end + begin + (var_, accum_, lr_, grad_) = tf.tf_promote(var_, accum_, lr_, grad_) + end + begin + (indices_,) = tf.tf_promote(indices_) + end + end + begin + begin + tf.add_input(desc, var_) + end + begin + tf.add_input(desc, accum_) + end + begin + tf.add_input(desc, lr_) + end + begin + tf.add_input(desc, grad_) + end + begin + tf.add_input(desc, indices_) + end + end + begin + begin + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + begin + if update_slots !== nothing + desc["update_slots"] = Base.Bool(update_slots) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function sparse_apply_adagrad_eager(var_, accum_, lr_, grad_, indices_; name=nothing, use_locking=nothing, update_slots=nothing) + desc = tf.EagerOp("SparseApplyAdagrad") + var_ = convert(tf.EagerTensor, var_) + accum_ = convert(tf.EagerTensor, accum_) + lr_ = convert(tf.EagerTensor, lr_) + grad_ = convert(tf.EagerTensor, grad_) + indices_ = convert(tf.EagerTensor, indices_) + begin + begin + tf.add_input(desc, var_) + end + begin + tf.add_input(desc, accum_) + end + begin + tf.add_input(desc, lr_) + end + begin + tf.add_input(desc, grad_) + end + begin + tf.add_input(desc, indices_) + end + end + begin + begin + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + begin + if update_slots !== nothing + desc["update_slots"] = Base.Bool(update_slots) + end + end + end + begin + desc["T"] = tf.data_type(var_) + end + begin + desc["T"] = tf.data_type(accum_) + end + begin + desc["T"] = tf.data_type(lr_) + end + begin + desc["T"] = tf.data_type(grad_) + end + begin + desc["Tindices"] = tf.data_type(indices_) + end + res = tf.execute(desc) + node = tf.TapeNode(sparse_apply_adagrad, [var_, accum_, lr_, grad_, indices_], name=nothing, use_locking=nothing, update_slots=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_apply_adagrad(var_, accum_, lr_, grad_, indices_; name=nothing, use_locking=nothing, update_slots=nothing) + if tf.in_eager_mode() + sparse_apply_adagrad_eager(var_, accum_, lr_, grad_, indices_; name=name, use_locking=use_locking, update_slots=update_slots) + else + sparse_apply_adagrad_graph(var_, accum_, lr_, grad_, indices_; name=name, use_locking=use_locking, update_slots=update_slots) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function sparse_apply_adagrad_eager(var_, accum_, lr_, grad_, indices_; name=nothing, use_locking=nothing, update_slots=nothing) - desc = tf.EagerOp("SparseApplyAdagrad") - var_ = convert(tf.EagerTensor, var_) - accum_ = convert(tf.EagerTensor, accum_) - lr_ = convert(tf.EagerTensor, lr_) - grad_ = convert(tf.EagerTensor, grad_) - indices_ = convert(tf.EagerTensor, indices_) - tf.add_input(desc, var_) - tf.add_input(desc, accum_) - tf.add_input(desc, lr_) - tf.add_input(desc, grad_) - tf.add_input(desc, indices_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - if update_slots !== nothing - desc["update_slots"] = Base.Bool(update_slots) - end - desc["T"] = tf.data_type(var_) - desc["T"] = tf.data_type(accum_) - desc["T"] = tf.data_type(lr_) - desc["T"] = tf.data_type(grad_) - desc["Tindices"] = tf.data_type(indices_) - res = tf.execute(desc) - node = tf.TapeNode(sparse_apply_adagrad, [var_, accum_, lr_, grad_, indices_], name=nothing, use_locking=nothing, update_slots=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_apply_adagrad(var_, accum_, lr_, grad_, indices_; name=nothing, use_locking=nothing, update_slots=nothing) - if tf.in_eager_mode() - sparse_apply_adagrad_eager(var_, accum_, lr_, grad_, indices_; name=name, use_locking=use_locking, update_slots=update_slots) - else - sparse_apply_adagrad_graph(var_, accum_, lr_, grad_, indices_; name=name, use_locking=use_locking, update_slots=update_slots) - end - end end @@ -3024,74 +5404,124 @@ end """ begin - function decode_proto_v2_graph(bytes_; name=nothing, message_type=nothing, field_names=nothing, output_types=nothing, descriptor_source=nothing, message_format=nothing, sanitize=nothing) - local desc - tf.with_op_name(name, "DecodeProtoV2") do - desc = tf.NodeDescription("DecodeProtoV2") - bytes_ = convert(Tensor{String}, bytes_) - tf.add_input(desc, bytes_) - if message_type !== nothing - desc["message_type"] = Base.String(message_type) - end - if field_names !== nothing - desc["field_names"] = map(Base.identity, field_names) - end - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if descriptor_source !== nothing - desc["descriptor_source"] = Base.String(descriptor_source) - end - if message_format !== nothing - desc["message_format"] = Base.String(message_format) - end - if sanitize !== nothing - desc["sanitize"] = Base.Bool(sanitize) + begin + function decode_proto_v2_graph(bytes_; name=nothing, message_type=nothing, field_names=nothing, output_types=nothing, descriptor_source=nothing, message_format=nothing, sanitize=nothing) + local desc + tf.with_op_name(name, "DecodeProtoV2") do + desc = tf.NodeDescription("DecodeProtoV2") + begin + begin + bytes_ = convert(Tensor{String}, bytes_) + begin + end + end + end + begin + begin + tf.add_input(desc, bytes_) + end + end + begin + begin + if message_type !== nothing + desc["message_type"] = Base.String(message_type) + end + end + begin + if field_names !== nothing + desc["field_names"] = map(Base.identity, field_names) + end + end + begin + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + end + begin + if descriptor_source !== nothing + desc["descriptor_source"] = Base.String(descriptor_source) + end + end + begin + if message_format !== nothing + desc["message_format"] = Base.String(message_format) + end + end + begin + if sanitize !== nothing + desc["sanitize"] = Base.Bool(sanitize) + end + end + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + end + end + begin + function decode_proto_v2_eager(bytes_; name=nothing, message_type=nothing, field_names=nothing, output_types=nothing, descriptor_source=nothing, message_format=nothing, sanitize=nothing) + desc = tf.EagerOp("DecodeProtoV2") + bytes_ = convert(tf.EagerTensor, bytes_) + begin + begin + tf.add_input(desc, bytes_) + end + end + begin + begin + if message_type !== nothing + desc["message_type"] = Base.String(message_type) + end + end + begin + if field_names !== nothing + desc["field_names"] = map(Base.identity, field_names) + end + end + begin + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + end + begin + if descriptor_source !== nothing + desc["descriptor_source"] = Base.String(descriptor_source) + end + end + begin + if message_format !== nothing + desc["message_format"] = Base.String(message_format) + end + end + begin + if sanitize !== nothing + desc["sanitize"] = Base.Bool(sanitize) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(decode_proto_v2, [bytes_], name=nothing, message_type=nothing, field_names=nothing, output_types=nothing, descriptor_source=nothing, message_format=nothing, sanitize=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function decode_proto_v2(bytes_; name=nothing, message_type=nothing, field_names=nothing, output_types=nothing, descriptor_source=nothing, message_format=nothing, sanitize=nothing) + if tf.in_eager_mode() + decode_proto_v2_eager(bytes_; name=name, message_type=message_type, field_names=field_names, output_types=output_types, descriptor_source=descriptor_source, message_format=message_format, sanitize=sanitize) + else + decode_proto_v2_graph(bytes_; name=name, message_type=message_type, field_names=field_names, output_types=output_types, descriptor_source=descriptor_source, message_format=message_format, sanitize=sanitize) + end end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:2 - push!(out, tf.Tensor(op, out_idx)) - end - out - end - function decode_proto_v2_eager(bytes_; name=nothing, message_type=nothing, field_names=nothing, output_types=nothing, descriptor_source=nothing, message_format=nothing, sanitize=nothing) - desc = tf.EagerOp("DecodeProtoV2") - bytes_ = convert(tf.EagerTensor, bytes_) - tf.add_input(desc, bytes_) - if message_type !== nothing - desc["message_type"] = Base.String(message_type) - end - if field_names !== nothing - desc["field_names"] = map(Base.identity, field_names) - end - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if descriptor_source !== nothing - desc["descriptor_source"] = Base.String(descriptor_source) - end - if message_format !== nothing - desc["message_format"] = Base.String(message_format) - end - if sanitize !== nothing - desc["sanitize"] = Base.Bool(sanitize) - end - res = tf.execute(desc) - node = tf.TapeNode(decode_proto_v2, [bytes_], name=nothing, message_type=nothing, field_names=nothing, output_types=nothing, descriptor_source=nothing, message_format=nothing, sanitize=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function decode_proto_v2(bytes_; name=nothing, message_type=nothing, field_names=nothing, output_types=nothing, descriptor_source=nothing, message_format=nothing, sanitize=nothing) - if tf.in_eager_mode() - decode_proto_v2_eager(bytes_; name=name, message_type=message_type, field_names=field_names, output_types=output_types, descriptor_source=descriptor_source, message_format=message_format, sanitize=sanitize) - else - decode_proto_v2_graph(bytes_; name=name, message_type=message_type, field_names=field_names, output_types=output_types, descriptor_source=descriptor_source, message_format=message_format, sanitize=sanitize) - end - end end @@ -3101,45 +5531,93 @@ end """ begin - function betainc_graph(a_, b_, x_; name=nothing) - local desc - tf.with_op_name(name, "Betainc") do - desc = tf.NodeDescription("Betainc") - a_ = convert(Tensor{Any}, a_) - b_ = convert(Tensor{Any}, b_) - x_ = convert(Tensor{Any}, x_) - (a_, b_, x_) = tf.tf_promote(a_, b_, x_) - tf.add_input(desc, a_) - tf.add_input(desc, b_) - tf.add_input(desc, x_) - end - tf.Tensor(tf.Operation(desc)) - end - function betainc_eager(a_, b_, x_; name=nothing) - desc = tf.EagerOp("Betainc") - a_ = convert(tf.EagerTensor, a_) - b_ = convert(tf.EagerTensor, b_) - x_ = convert(tf.EagerTensor, x_) - tf.add_input(desc, a_) - tf.add_input(desc, b_) - tf.add_input(desc, x_) - desc["T"] = tf.data_type(a_) - desc["T"] = tf.data_type(b_) - desc["T"] = tf.data_type(x_) - res = tf.execute(desc) - node = tf.TapeNode(betainc, [a_, b_, x_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function betainc(a_, b_, x_; name=nothing) - if tf.in_eager_mode() - betainc_eager(a_, b_, x_; name=name) - else - betainc_graph(a_, b_, x_; name=name) + begin + function betainc_graph(a_, b_, x_; name=nothing) + local desc + tf.with_op_name(name, "Betainc") do + desc = tf.NodeDescription("Betainc") + begin + begin + a_ = convert(Tensor{Any}, a_) + begin + end + end + begin + b_ = convert(Tensor{Any}, b_) + begin + end + end + begin + x_ = convert(Tensor{Any}, x_) + begin + end + end + begin + (a_, b_, x_) = tf.tf_promote(a_, b_, x_) + end + end + begin + begin + tf.add_input(desc, a_) + end + begin + tf.add_input(desc, b_) + end + begin + tf.add_input(desc, x_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function betainc_eager(a_, b_, x_; name=nothing) + desc = tf.EagerOp("Betainc") + a_ = convert(tf.EagerTensor, a_) + b_ = convert(tf.EagerTensor, b_) + x_ = convert(tf.EagerTensor, x_) + begin + begin + tf.add_input(desc, a_) + end + begin + tf.add_input(desc, b_) + end + begin + tf.add_input(desc, x_) + end + end + begin + end + begin + desc["T"] = tf.data_type(a_) + end + begin + desc["T"] = tf.data_type(b_) + end + begin + desc["T"] = tf.data_type(x_) + end + res = tf.execute(desc) + node = tf.TapeNode(betainc, [a_, b_, x_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function betainc(a_, b_, x_; name=nothing) + if tf.in_eager_mode() + betainc_eager(a_, b_, x_; name=name) + else + betainc_graph(a_, b_, x_; name=name) + end end - end + end end @@ -3149,35 +5627,63 @@ end """ begin - function guarantee_const_graph(input_; name=nothing) - local desc - tf.with_op_name(name, "GuaranteeConst") do - desc = tf.NodeDescription("GuaranteeConst") - input_ = convert(Tensor{Any}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) + begin + function guarantee_const_graph(input_; name=nothing) + local desc + tf.with_op_name(name, "GuaranteeConst") do + desc = tf.NodeDescription("GuaranteeConst") + begin + begin + input_ = convert(Tensor{Any}, input_) + begin + end + end + begin + (input_,) = tf.tf_promote(input_) + end + end + begin + begin + tf.add_input(desc, input_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function guarantee_const_eager(input_; name=nothing) - desc = tf.EagerOp("GuaranteeConst") - input_ = convert(tf.EagerTensor, input_) - tf.add_input(desc, input_) - desc["T"] = tf.data_type(input_) - res = tf.execute(desc) - node = tf.TapeNode(guarantee_const, [input_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function guarantee_const_eager(input_; name=nothing) + desc = tf.EagerOp("GuaranteeConst") + input_ = convert(tf.EagerTensor, input_) + begin + begin + tf.add_input(desc, input_) + end + end + begin + end + begin + desc["T"] = tf.data_type(input_) + end + res = tf.execute(desc) + node = tf.TapeNode(guarantee_const, [input_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function guarantee_const(input_; name=nothing) - if tf.in_eager_mode() - guarantee_const_eager(input_; name=name) - else - guarantee_const_graph(input_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function guarantee_const(input_; name=nothing) + if tf.in_eager_mode() + guarantee_const_eager(input_; name=name) + else + guarantee_const_graph(input_; name=name) + end end - end + end end @@ -3187,39 +5693,67 @@ end """ begin - function decode_bmp_graph(contents_; name=nothing, channels=nothing) - local desc - tf.with_op_name(name, "DecodeBmp") do - desc = tf.NodeDescription("DecodeBmp") - contents_ = convert(Tensor{String}, contents_) - tf.add_input(desc, contents_) - if channels !== nothing - desc["channels"] = Base.Int(channels) + begin + function decode_bmp_graph(contents_; name=nothing, channels=nothing) + local desc + tf.with_op_name(name, "DecodeBmp") do + desc = tf.NodeDescription("DecodeBmp") + begin + begin + contents_ = convert(Tensor{String}, contents_) + begin + end + end + end + begin + begin + tf.add_input(desc, contents_) + end + end + begin + begin + if channels !== nothing + desc["channels"] = Base.Int(channels) + end + end + end end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function decode_bmp_eager(contents_; name=nothing, channels=nothing) - desc = tf.EagerOp("DecodeBmp") - contents_ = convert(tf.EagerTensor, contents_) - tf.add_input(desc, contents_) - if channels !== nothing - desc["channels"] = Base.Int(channels) - end - res = tf.execute(desc) - node = tf.TapeNode(decode_bmp, [contents_], name=nothing, channels=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function decode_bmp_eager(contents_; name=nothing, channels=nothing) + desc = tf.EagerOp("DecodeBmp") + contents_ = convert(tf.EagerTensor, contents_) + begin + begin + tf.add_input(desc, contents_) + end + end + begin + begin + if channels !== nothing + desc["channels"] = Base.Int(channels) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(decode_bmp, [contents_], name=nothing, channels=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function decode_bmp(contents_; name=nothing, channels=nothing) - if tf.in_eager_mode() - decode_bmp_eager(contents_; name=name, channels=channels) - else - decode_bmp_graph(contents_; name=name, channels=channels) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function decode_bmp(contents_; name=nothing, channels=nothing) + if tf.in_eager_mode() + decode_bmp_eager(contents_; name=name, channels=channels) + else + decode_bmp_graph(contents_; name=name, channels=channels) + end end - end + end end @@ -3229,48 +5763,86 @@ end """ begin - function boosted_trees_bucketize_graph(float_values_, bucket_boundaries_; name=nothing, num_features=nothing) - local desc - tf.with_op_name(name, "BoostedTreesBucketize") do - desc = tf.NodeDescription("BoostedTreesBucketize") - float_values_ = [convert(Tensor{Float32}, x) for x = float_values_] - bucket_boundaries_ = [convert(Tensor{Float32}, x) for x = bucket_boundaries_] - tf.add_input(desc, float_values_) - tf.add_input(desc, bucket_boundaries_) - if num_features !== nothing - desc["num_features"] = Base.Int(num_features) + begin + function boosted_trees_bucketize_graph(float_values_, bucket_boundaries_; name=nothing, num_features=nothing) + local desc + tf.with_op_name(name, "BoostedTreesBucketize") do + desc = tf.NodeDescription("BoostedTreesBucketize") + begin + begin + float_values_ = [convert(Tensor{Float32}, x) for x = float_values_] + begin + end + end + begin + bucket_boundaries_ = [convert(Tensor{Float32}, x) for x = bucket_boundaries_] + begin + end + end + end + begin + begin + tf.add_input(desc, float_values_) + end + begin + tf.add_input(desc, bucket_boundaries_) + end + end + begin + begin + if num_features !== nothing + desc["num_features"] = Base.Int(num_features) + end + end + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:num_features + push!(out, tf.Tensor(op, out_idx)) + end + out + end + end + end + begin + function boosted_trees_bucketize_eager(float_values_, bucket_boundaries_; name=nothing, num_features=nothing) + desc = tf.EagerOp("BoostedTreesBucketize") + float_values_ = convert(tf.EagerTensor, float_values_) + bucket_boundaries_ = convert(tf.EagerTensor, bucket_boundaries_) + begin + begin + tf.add_input(desc, float_values_) + end + begin + tf.add_input(desc, bucket_boundaries_) + end + end + begin + begin + if num_features !== nothing + desc["num_features"] = Base.Int(num_features) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(boosted_trees_bucketize, [float_values_, bucket_boundaries_], name=nothing, num_features=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function boosted_trees_bucketize(float_values_, bucket_boundaries_; name=nothing, num_features=nothing) + if tf.in_eager_mode() + boosted_trees_bucketize_eager(float_values_, bucket_boundaries_; name=name, num_features=num_features) + else + boosted_trees_bucketize_graph(float_values_, bucket_boundaries_; name=name, num_features=num_features) + end end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:num_features - push!(out, tf.Tensor(op, out_idx)) - end - out - end - function boosted_trees_bucketize_eager(float_values_, bucket_boundaries_; name=nothing, num_features=nothing) - desc = tf.EagerOp("BoostedTreesBucketize") - float_values_ = convert(tf.EagerTensor, float_values_) - bucket_boundaries_ = convert(tf.EagerTensor, bucket_boundaries_) - tf.add_input(desc, float_values_) - tf.add_input(desc, bucket_boundaries_) - if num_features !== nothing - desc["num_features"] = Base.Int(num_features) - end - res = tf.execute(desc) - node = tf.TapeNode(boosted_trees_bucketize, [float_values_, bucket_boundaries_], name=nothing, num_features=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function boosted_trees_bucketize(float_values_, bucket_boundaries_; name=nothing, num_features=nothing) - if tf.in_eager_mode() - boosted_trees_bucketize_eager(float_values_, bucket_boundaries_; name=name, num_features=num_features) - else - boosted_trees_bucketize_graph(float_values_, bucket_boundaries_; name=name, num_features=num_features) - end - end end @@ -3280,30 +5852,45 @@ end An op that shuts down a running distributed TPU system. The Op returns """ begin - function shutdown_distributed_tpu_graph(; name=nothing) - local desc - tf.with_op_name(name, "ShutdownDistributedTPU") do - desc - tf.NodeDescription("ShutdownDistributedTPU") + begin + function shutdown_distributed_tpu_graph(; name=nothing) + local desc + tf.with_op_name(name, "ShutdownDistributedTPU") do + desc = tf.NodeDescription("ShutdownDistributedTPU") + begin + end + begin + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function shutdown_distributed_tpu_eager(; name=nothing) - desc = tf.EagerOp("ShutdownDistributedTPU") - res = tf.execute(desc) - node = tf.TapeNode(shutdown_distributed_tpu, [], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function shutdown_distributed_tpu_eager(; name=nothing) + desc = tf.EagerOp("ShutdownDistributedTPU") + begin + end + begin + end + res = tf.execute(desc) + node = tf.TapeNode(shutdown_distributed_tpu, [], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function shutdown_distributed_tpu(; name=nothing) - if tf.in_eager_mode() - shutdown_distributed_tpu_eager(; name=name) - else - shutdown_distributed_tpu_graph(; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function shutdown_distributed_tpu(; name=nothing) + if tf.in_eager_mode() + shutdown_distributed_tpu_eager(; name=name) + else + shutdown_distributed_tpu_graph(; name=name) + end end - end + end end @@ -3313,33 +5900,57 @@ end """ begin - function experimental_stats_aggregator_summary_graph(iterator_; name=nothing) - local desc - tf.with_op_name(name, "ExperimentalStatsAggregatorSummary") do - desc = tf.NodeDescription("ExperimentalStatsAggregatorSummary") - iterator_ = convert(Tensor{Any}, iterator_) - tf.add_input(desc, iterator_) + begin + function experimental_stats_aggregator_summary_graph(iterator_; name=nothing) + local desc + tf.with_op_name(name, "ExperimentalStatsAggregatorSummary") do + desc = tf.NodeDescription("ExperimentalStatsAggregatorSummary") + begin + begin + iterator_ = convert(Tensor{Any}, iterator_) + begin + end + end + end + begin + begin + tf.add_input(desc, iterator_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function experimental_stats_aggregator_summary_eager(iterator_; name=nothing) - desc = tf.EagerOp("ExperimentalStatsAggregatorSummary") - iterator_ = convert(tf.EagerTensor, iterator_) - tf.add_input(desc, iterator_) - res = tf.execute(desc) - node = tf.TapeNode(experimental_stats_aggregator_summary, [iterator_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function experimental_stats_aggregator_summary_eager(iterator_; name=nothing) + desc = tf.EagerOp("ExperimentalStatsAggregatorSummary") + iterator_ = convert(tf.EagerTensor, iterator_) + begin + begin + tf.add_input(desc, iterator_) + end + end + begin + end + res = tf.execute(desc) + node = tf.TapeNode(experimental_stats_aggregator_summary, [iterator_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_stats_aggregator_summary(iterator_; name=nothing) - if tf.in_eager_mode() - experimental_stats_aggregator_summary_eager(iterator_; name=name) - else - experimental_stats_aggregator_summary_graph(iterator_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_stats_aggregator_summary(iterator_; name=nothing) + if tf.in_eager_mode() + experimental_stats_aggregator_summary_eager(iterator_; name=name) + else + experimental_stats_aggregator_summary_graph(iterator_; name=name) + end end - end + end end @@ -3349,30 +5960,45 @@ end """ begin - function timestamp_graph(; name=nothing) - local desc - tf.with_op_name(name, "Timestamp") do - desc - tf.NodeDescription("Timestamp") + begin + function timestamp_graph(; name=nothing) + local desc + tf.with_op_name(name, "Timestamp") do + desc = tf.NodeDescription("Timestamp") + begin + end + begin + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function timestamp_eager(; name=nothing) - desc = tf.EagerOp("Timestamp") - res = tf.execute(desc) - node = tf.TapeNode(timestamp, [], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function timestamp_eager(; name=nothing) + desc = tf.EagerOp("Timestamp") + begin + end + begin + end + res = tf.execute(desc) + node = tf.TapeNode(timestamp, [], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function timestamp(; name=nothing) - if tf.in_eager_mode() - timestamp_eager(; name=name) - else - timestamp_graph(; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function timestamp(; name=nothing) + if tf.in_eager_mode() + timestamp_eager(; name=name) + else + timestamp_graph(; name=name) + end end - end + end end @@ -3382,35 +6008,63 @@ end """ begin - function matrix_exponential_graph(input_; name=nothing) - local desc - tf.with_op_name(name, "MatrixExponential") do - desc = tf.NodeDescription("MatrixExponential") - input_ = convert(Tensor{Any}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) + begin + function matrix_exponential_graph(input_; name=nothing) + local desc + tf.with_op_name(name, "MatrixExponential") do + desc = tf.NodeDescription("MatrixExponential") + begin + begin + input_ = convert(Tensor{Any}, input_) + begin + end + end + begin + (input_,) = tf.tf_promote(input_) + end + end + begin + begin + tf.add_input(desc, input_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function matrix_exponential_eager(input_; name=nothing) - desc = tf.EagerOp("MatrixExponential") - input_ = convert(tf.EagerTensor, input_) - tf.add_input(desc, input_) - desc["T"] = tf.data_type(input_) - res = tf.execute(desc) - node = tf.TapeNode(matrix_exponential, [input_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function matrix_exponential_eager(input_; name=nothing) + desc = tf.EagerOp("MatrixExponential") + input_ = convert(tf.EagerTensor, input_) + begin + begin + tf.add_input(desc, input_) + end + end + begin + end + begin + desc["T"] = tf.data_type(input_) + end + res = tf.execute(desc) + node = tf.TapeNode(matrix_exponential, [input_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function matrix_exponential(input_; name=nothing) - if tf.in_eager_mode() - matrix_exponential_eager(input_; name=name) - else - matrix_exponential_graph(input_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function matrix_exponential(input_; name=nothing) + if tf.in_eager_mode() + matrix_exponential_eager(input_; name=name) + else + matrix_exponential_graph(input_; name=name) + end end - end + end end @@ -3420,41 +6074,73 @@ end """ begin - function size_graph(input_; name=nothing, out_type=nothing) - local desc - tf.with_op_name(name, "Size") do - desc = tf.NodeDescription("Size") - input_ = convert(Tensor{Any}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - if out_type !== nothing - desc["out_type"] = Base.identity(out_type) + begin + function size_graph(input_; name=nothing, out_type=nothing) + local desc + tf.with_op_name(name, "Size") do + desc = tf.NodeDescription("Size") + begin + begin + input_ = convert(Tensor{Any}, input_) + begin + end + end + begin + (input_,) = tf.tf_promote(input_) + end + end + begin + begin + tf.add_input(desc, input_) + end + end + begin + begin + if out_type !== nothing + desc["out_type"] = Base.identity(out_type) + end + end + end end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function size_eager(input_; name=nothing, out_type=nothing) - desc = tf.EagerOp("Size") - input_ = convert(tf.EagerTensor, input_) - tf.add_input(desc, input_) - if out_type !== nothing - desc["out_type"] = Base.identity(out_type) - end - desc["T"] = tf.data_type(input_) - res = tf.execute(desc) - node = tf.TapeNode(size, [input_], name=nothing, out_type=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function size_eager(input_; name=nothing, out_type=nothing) + desc = tf.EagerOp("Size") + input_ = convert(tf.EagerTensor, input_) + begin + begin + tf.add_input(desc, input_) + end + end + begin + begin + if out_type !== nothing + desc["out_type"] = Base.identity(out_type) + end + end + end + begin + desc["T"] = tf.data_type(input_) + end + res = tf.execute(desc) + node = tf.TapeNode(size, [input_], name=nothing, out_type=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function size(input_; name=nothing, out_type=nothing) - if tf.in_eager_mode() - size_eager(input_; name=name, out_type=out_type) - else - size_graph(input_; name=name, out_type=out_type) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function size(input_; name=nothing, out_type=nothing) + if tf.in_eager_mode() + size_eager(input_; name=name, out_type=out_type) + else + size_graph(input_; name=name, out_type=out_type) + end end - end + end end @@ -3464,41 +6150,73 @@ end """ begin - function add_n_graph(inputs_; name=nothing, N=nothing) - local desc - tf.with_op_name(name, "AddN") do - desc = tf.NodeDescription("AddN") - inputs_ = [convert(Tensor{Any}, x) for x = inputs_] - (inputs_,) = tf.tf_promote(inputs_) - tf.add_input(desc, inputs_) - if N !== nothing - desc["N"] = Base.Int(N) + begin + function add_n_graph(inputs_; name=nothing, N=nothing) + local desc + tf.with_op_name(name, "AddN") do + desc = tf.NodeDescription("AddN") + begin + begin + inputs_ = [convert(Tensor{Any}, x) for x = inputs_] + begin + end + end + begin + (inputs_,) = tf.tf_promote(inputs_) + end + end + begin + begin + tf.add_input(desc, inputs_) + end + end + begin + begin + if N !== nothing + desc["N"] = Base.Int(N) + end + end + end end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function add_n_eager(inputs_; name=nothing, N=nothing) - desc = tf.EagerOp("AddN") - inputs_ = convert(tf.EagerTensor, inputs_) - tf.add_input(desc, inputs_) - if N !== nothing - desc["N"] = Base.Int(N) - end - desc["T"] = tf.data_type(inputs_) - res = tf.execute(desc) - node = tf.TapeNode(add_n, [inputs_], name=nothing, N=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function add_n_eager(inputs_; name=nothing, N=nothing) + desc = tf.EagerOp("AddN") + inputs_ = convert(tf.EagerTensor, inputs_) + begin + begin + tf.add_input(desc, inputs_) + end + end + begin + begin + if N !== nothing + desc["N"] = Base.Int(N) + end + end + end + begin + desc["T"] = tf.data_type(inputs_) + end + res = tf.execute(desc) + node = tf.TapeNode(add_n, [inputs_], name=nothing, N=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function add_n(inputs_; name=nothing, N=nothing) - if tf.in_eager_mode() - add_n_eager(inputs_; name=name, N=N) - else - add_n_graph(inputs_; name=name, N=N) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function add_n(inputs_; name=nothing, N=nothing) + if tf.in_eager_mode() + add_n_eager(inputs_; name=name, N=N) + else + add_n_graph(inputs_; name=name, N=N) + end end - end + end end @@ -3508,46 +6226,94 @@ end """ begin - function sparse_segment_sum_graph(data_, indices_, segment_ids_; name=nothing) - local desc - tf.with_op_name(name, "SparseSegmentSum") do - desc = tf.NodeDescription("SparseSegmentSum") - data_ = convert(Tensor{Any}, data_) - indices_ = convert(Tensor{Int32}, indices_) - indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) - segment_ids_ = convert(Tensor{Int32}, segment_ids_) - (data_,) = tf.tf_promote(data_) - (indices_,) = tf.tf_promote(indices_) - tf.add_input(desc, data_) - tf.add_input(desc, indices_) - tf.add_input(desc, segment_ids_) - end - tf.Tensor(tf.Operation(desc)) - end - function sparse_segment_sum_eager(data_, indices_, segment_ids_; name=nothing) - desc = tf.EagerOp("SparseSegmentSum") - data_ = convert(tf.EagerTensor, data_) - indices_ = convert(tf.EagerTensor, indices_) - segment_ids_ = convert(tf.EagerTensor, segment_ids_) - tf.add_input(desc, data_) - tf.add_input(desc, indices_) - tf.add_input(desc, segment_ids_) - desc["T"] = tf.data_type(data_) - desc["Tidx"] = tf.data_type(indices_) - res = tf.execute(desc) - node = tf.TapeNode(sparse_segment_sum, [data_, indices_, segment_ids_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_segment_sum(data_, indices_, segment_ids_; name=nothing) - if tf.in_eager_mode() - sparse_segment_sum_eager(data_, indices_, segment_ids_; name=name) - else - sparse_segment_sum_graph(data_, indices_, segment_ids_; name=name) + begin + function sparse_segment_sum_graph(data_, indices_, segment_ids_; name=nothing) + local desc + tf.with_op_name(name, "SparseSegmentSum") do + desc = tf.NodeDescription("SparseSegmentSum") + begin + begin + data_ = convert(Tensor{Any}, data_) + begin + end + end + begin + indices_ = convert(Tensor{Int32}, indices_) + begin + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + end + end + begin + segment_ids_ = convert(Tensor{Int32}, segment_ids_) + begin + end + end + begin + (data_,) = tf.tf_promote(data_) + end + begin + (indices_,) = tf.tf_promote(indices_) + end + end + begin + begin + tf.add_input(desc, data_) + end + begin + tf.add_input(desc, indices_) + end + begin + tf.add_input(desc, segment_ids_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function sparse_segment_sum_eager(data_, indices_, segment_ids_; name=nothing) + desc = tf.EagerOp("SparseSegmentSum") + data_ = convert(tf.EagerTensor, data_) + indices_ = convert(tf.EagerTensor, indices_) + segment_ids_ = convert(tf.EagerTensor, segment_ids_) + begin + begin + tf.add_input(desc, data_) + end + begin + tf.add_input(desc, indices_) + end + begin + tf.add_input(desc, segment_ids_) + end + end + begin + end + begin + desc["T"] = tf.data_type(data_) + end + begin + desc["Tidx"] = tf.data_type(indices_) + end + res = tf.execute(desc) + node = tf.TapeNode(sparse_segment_sum, [data_, indices_, segment_ids_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_segment_sum(data_, indices_, segment_ids_; name=nothing) + if tf.in_eager_mode() + sparse_segment_sum_eager(data_, indices_, segment_ids_; name=name) + else + sparse_segment_sum_graph(data_, indices_, segment_ids_; name=name) + end end - end + end end @@ -3557,49 +6323,89 @@ end """ begin - function batch_dataset_graph(input_dataset_, batch_size_; name=nothing, output_types=nothing, output_shapes=nothing) - local desc - tf.with_op_name(name, "BatchDataset") do - desc = tf.NodeDescription("BatchDataset") - input_dataset_ = convert(Tensor{Any}, input_dataset_) - batch_size_ = convert(Tensor{Int64}, batch_size_) - tf.add_input(desc, input_dataset_) - tf.add_input(desc, batch_size_) - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) + begin + function batch_dataset_graph(input_dataset_, batch_size_; name=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "BatchDataset") do + desc = tf.NodeDescription("BatchDataset") + begin + begin + input_dataset_ = convert(Tensor{Any}, input_dataset_) + begin + end + end + begin + batch_size_ = convert(Tensor{Int64}, batch_size_) + begin + end + end + end + begin + begin + tf.add_input(desc, input_dataset_) + end + begin + tf.add_input(desc, batch_size_) + end + end + begin + begin + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + end + begin + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function batch_dataset_eager(input_dataset_, batch_size_; name=nothing, output_types=nothing, output_shapes=nothing) + desc = tf.EagerOp("BatchDataset") + input_dataset_ = convert(tf.EagerTensor, input_dataset_) + batch_size_ = convert(tf.EagerTensor, batch_size_) + begin + begin + tf.add_input(desc, input_dataset_) + end + begin + tf.add_input(desc, batch_size_) + end + end + begin + begin + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + end + begin + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(batch_dataset, [input_dataset_, batch_size_], name=nothing, output_types=nothing, output_shapes=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function batch_dataset(input_dataset_, batch_size_; name=nothing, output_types=nothing, output_shapes=nothing) + if tf.in_eager_mode() + batch_dataset_eager(input_dataset_, batch_size_; name=name, output_types=output_types, output_shapes=output_shapes) + else + batch_dataset_graph(input_dataset_, batch_size_; name=name, output_types=output_types, output_shapes=output_shapes) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function batch_dataset_eager(input_dataset_, batch_size_; name=nothing, output_types=nothing, output_shapes=nothing) - desc = tf.EagerOp("BatchDataset") - input_dataset_ = convert(tf.EagerTensor, input_dataset_) - batch_size_ = convert(tf.EagerTensor, batch_size_) - tf.add_input(desc, input_dataset_) - tf.add_input(desc, batch_size_) - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - res = tf.execute(desc) - node = tf.TapeNode(batch_dataset, [input_dataset_, batch_size_], name=nothing, output_types=nothing, output_shapes=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function batch_dataset(input_dataset_, batch_size_; name=nothing, output_types=nothing, output_shapes=nothing) - if tf.in_eager_mode() - batch_dataset_eager(input_dataset_, batch_size_; name=name, output_types=output_types, output_shapes=output_shapes) - else - batch_dataset_graph(input_dataset_, batch_size_; name=name, output_types=output_types, output_shapes=output_shapes) - end - end end @@ -3609,71 +6415,115 @@ end """ begin - function record_input_graph(; name=nothing, file_pattern=nothing, file_random_seed=nothing, file_shuffle_shift_ratio=nothing, file_buffer_size=nothing, file_parallelism=nothing, batch_size=nothing, compression_type=nothing) - local desc - tf.with_op_name(name, "RecordInput") do - desc = tf.NodeDescription("RecordInput") - if file_pattern !== nothing - desc["file_pattern"] = Base.String(file_pattern) - end - if file_random_seed !== nothing - desc["file_random_seed"] = Base.Int(file_random_seed) - end - if file_shuffle_shift_ratio !== nothing - desc["file_shuffle_shift_ratio"] = Base.identity(file_shuffle_shift_ratio) - end - if file_buffer_size !== nothing - desc["file_buffer_size"] = Base.Int(file_buffer_size) - end - if file_parallelism !== nothing - desc["file_parallelism"] = Base.Int(file_parallelism) - end - if batch_size !== nothing - desc["batch_size"] = Base.Int(batch_size) + begin + function record_input_graph(; name=nothing, file_pattern=nothing, file_random_seed=nothing, file_shuffle_shift_ratio=nothing, file_buffer_size=nothing, file_parallelism=nothing, batch_size=nothing, compression_type=nothing) + local desc + tf.with_op_name(name, "RecordInput") do + desc = tf.NodeDescription("RecordInput") + begin + end + begin + end + begin + begin + if file_pattern !== nothing + desc["file_pattern"] = Base.String(file_pattern) + end + end + begin + if file_random_seed !== nothing + desc["file_random_seed"] = Base.Int(file_random_seed) + end + end + begin + if file_shuffle_shift_ratio !== nothing + desc["file_shuffle_shift_ratio"] = Base.identity(file_shuffle_shift_ratio) + end + end + begin + if file_buffer_size !== nothing + desc["file_buffer_size"] = Base.Int(file_buffer_size) + end + end + begin + if file_parallelism !== nothing + desc["file_parallelism"] = Base.Int(file_parallelism) + end + end + begin + if batch_size !== nothing + desc["batch_size"] = Base.Int(batch_size) + end + end + begin + if compression_type !== nothing + desc["compression_type"] = Base.String(compression_type) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function record_input_eager(; name=nothing, file_pattern=nothing, file_random_seed=nothing, file_shuffle_shift_ratio=nothing, file_buffer_size=nothing, file_parallelism=nothing, batch_size=nothing, compression_type=nothing) + desc = tf.EagerOp("RecordInput") + begin + end + begin + begin + if file_pattern !== nothing + desc["file_pattern"] = Base.String(file_pattern) + end + end + begin + if file_random_seed !== nothing + desc["file_random_seed"] = Base.Int(file_random_seed) + end + end + begin + if file_shuffle_shift_ratio !== nothing + desc["file_shuffle_shift_ratio"] = Base.identity(file_shuffle_shift_ratio) + end + end + begin + if file_buffer_size !== nothing + desc["file_buffer_size"] = Base.Int(file_buffer_size) + end + end + begin + if file_parallelism !== nothing + desc["file_parallelism"] = Base.Int(file_parallelism) + end + end + begin + if batch_size !== nothing + desc["batch_size"] = Base.Int(batch_size) + end + end + begin + if compression_type !== nothing + desc["compression_type"] = Base.String(compression_type) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(record_input, [], name=nothing, file_pattern=nothing, file_random_seed=nothing, file_shuffle_shift_ratio=nothing, file_buffer_size=nothing, file_parallelism=nothing, batch_size=nothing, compression_type=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function record_input(; name=nothing, file_pattern=nothing, file_random_seed=nothing, file_shuffle_shift_ratio=nothing, file_buffer_size=nothing, file_parallelism=nothing, batch_size=nothing, compression_type=nothing) + if tf.in_eager_mode() + record_input_eager(; name=name, file_pattern=file_pattern, file_random_seed=file_random_seed, file_shuffle_shift_ratio=file_shuffle_shift_ratio, file_buffer_size=file_buffer_size, file_parallelism=file_parallelism, batch_size=batch_size, compression_type=compression_type) + else + record_input_graph(; name=name, file_pattern=file_pattern, file_random_seed=file_random_seed, file_shuffle_shift_ratio=file_shuffle_shift_ratio, file_buffer_size=file_buffer_size, file_parallelism=file_parallelism, batch_size=batch_size, compression_type=compression_type) + end end - if compression_type !== nothing - desc["compression_type"] = Base.String(compression_type) - end - end - tf.Tensor(tf.Operation(desc)) - end - function record_input_eager(; name=nothing, file_pattern=nothing, file_random_seed=nothing, file_shuffle_shift_ratio=nothing, file_buffer_size=nothing, file_parallelism=nothing, batch_size=nothing, compression_type=nothing) - desc = tf.EagerOp("RecordInput") - if file_pattern !== nothing - desc["file_pattern"] = Base.String(file_pattern) - end - if file_random_seed !== nothing - desc["file_random_seed"] = Base.Int(file_random_seed) - end - if file_shuffle_shift_ratio !== nothing - desc["file_shuffle_shift_ratio"] = Base.identity(file_shuffle_shift_ratio) - end - if file_buffer_size !== nothing - desc["file_buffer_size"] = Base.Int(file_buffer_size) - end - if file_parallelism !== nothing - desc["file_parallelism"] = Base.Int(file_parallelism) - end - if batch_size !== nothing - desc["batch_size"] = Base.Int(batch_size) - end - if compression_type !== nothing - desc["compression_type"] = Base.String(compression_type) - end - res = tf.execute(desc) - node = tf.TapeNode(record_input, [], name=nothing, file_pattern=nothing, file_random_seed=nothing, file_shuffle_shift_ratio=nothing, file_buffer_size=nothing, file_parallelism=nothing, batch_size=nothing, compression_type=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function record_input(; name=nothing, file_pattern=nothing, file_random_seed=nothing, file_shuffle_shift_ratio=nothing, file_buffer_size=nothing, file_parallelism=nothing, batch_size=nothing, compression_type=nothing) - if tf.in_eager_mode() - record_input_eager(; name=name, file_pattern=file_pattern, file_random_seed=file_random_seed, file_shuffle_shift_ratio=file_shuffle_shift_ratio, file_buffer_size=file_buffer_size, file_parallelism=file_parallelism, batch_size=batch_size, compression_type=compression_type) - else - record_input_graph(; name=name, file_pattern=file_pattern, file_random_seed=file_random_seed, file_shuffle_shift_ratio=file_shuffle_shift_ratio, file_buffer_size=file_buffer_size, file_parallelism=file_parallelism, batch_size=batch_size, compression_type=compression_type) - end - end end @@ -3683,49 +6533,89 @@ end """ begin - function queue_dequeue_up_to_v2_graph(handle_, n_; name=nothing, component_types=nothing, timeout_ms=nothing) - local desc - tf.with_op_name(name, "QueueDequeueUpToV2") do - desc = tf.NodeDescription("QueueDequeueUpToV2") - handle_ = convert(Tensor{Any}, handle_) - n_ = convert(Tensor{Int32}, n_) - tf.add_input(desc, handle_) - tf.add_input(desc, n_) - if component_types !== nothing - desc["component_types"] = map(Base.identity, component_types) - end - if timeout_ms !== nothing - desc["timeout_ms"] = Base.Int(timeout_ms) + begin + function queue_dequeue_up_to_v2_graph(handle_, n_; name=nothing, component_types=nothing, timeout_ms=nothing) + local desc + tf.with_op_name(name, "QueueDequeueUpToV2") do + desc = tf.NodeDescription("QueueDequeueUpToV2") + begin + begin + handle_ = convert(Tensor{Any}, handle_) + begin + end + end + begin + n_ = convert(Tensor{Int32}, n_) + begin + end + end + end + begin + begin + tf.add_input(desc, handle_) + end + begin + tf.add_input(desc, n_) + end + end + begin + begin + if component_types !== nothing + desc["component_types"] = map(Base.identity, component_types) + end + end + begin + if timeout_ms !== nothing + desc["timeout_ms"] = Base.Int(timeout_ms) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function queue_dequeue_up_to_v2_eager(handle_, n_; name=nothing, component_types=nothing, timeout_ms=nothing) + desc = tf.EagerOp("QueueDequeueUpToV2") + handle_ = convert(tf.EagerTensor, handle_) + n_ = convert(tf.EagerTensor, n_) + begin + begin + tf.add_input(desc, handle_) + end + begin + tf.add_input(desc, n_) + end + end + begin + begin + if component_types !== nothing + desc["component_types"] = map(Base.identity, component_types) + end + end + begin + if timeout_ms !== nothing + desc["timeout_ms"] = Base.Int(timeout_ms) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(queue_dequeue_up_to_v2, [handle_, n_], name=nothing, component_types=nothing, timeout_ms=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function queue_dequeue_up_to_v2(handle_, n_; name=nothing, component_types=nothing, timeout_ms=nothing) + if tf.in_eager_mode() + queue_dequeue_up_to_v2_eager(handle_, n_; name=name, component_types=component_types, timeout_ms=timeout_ms) + else + queue_dequeue_up_to_v2_graph(handle_, n_; name=name, component_types=component_types, timeout_ms=timeout_ms) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function queue_dequeue_up_to_v2_eager(handle_, n_; name=nothing, component_types=nothing, timeout_ms=nothing) - desc = tf.EagerOp("QueueDequeueUpToV2") - handle_ = convert(tf.EagerTensor, handle_) - n_ = convert(tf.EagerTensor, n_) - tf.add_input(desc, handle_) - tf.add_input(desc, n_) - if component_types !== nothing - desc["component_types"] = map(Base.identity, component_types) - end - if timeout_ms !== nothing - desc["timeout_ms"] = Base.Int(timeout_ms) - end - res = tf.execute(desc) - node = tf.TapeNode(queue_dequeue_up_to_v2, [handle_, n_], name=nothing, component_types=nothing, timeout_ms=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function queue_dequeue_up_to_v2(handle_, n_; name=nothing, component_types=nothing, timeout_ms=nothing) - if tf.in_eager_mode() - queue_dequeue_up_to_v2_eager(handle_, n_; name=name, component_types=component_types, timeout_ms=timeout_ms) - else - queue_dequeue_up_to_v2_graph(handle_, n_; name=name, component_types=component_types, timeout_ms=timeout_ms) - end - end end @@ -3735,58 +6625,92 @@ end Retrieve embedding parameters for a single table. """ begin - function retrieve_tpu_embedding_proximal_adagrad_parameters_grad_accum_debug_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - local desc - tf.with_op_name(name, "RetrieveTPUEmbeddingProximalAdagradParametersGradAccumDebug") do - desc = tf.NodeDescription("RetrieveTPUEmbeddingProximalAdagradParametersGradAccumDebug") - if table_id !== nothing - desc["table_id"] = Base.Int(table_id) - end - if table_name !== nothing - desc["table_name"] = Base.String(table_name) - end - if num_shards !== nothing - desc["num_shards"] = Base.Int(num_shards) - end - if shard_id !== nothing - desc["shard_id"] = Base.Int(shard_id) + begin + function retrieve_tpu_embedding_proximal_adagrad_parameters_grad_accum_debug_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + local desc + tf.with_op_name(name, "RetrieveTPUEmbeddingProximalAdagradParametersGradAccumDebug") do + desc = tf.NodeDescription("RetrieveTPUEmbeddingProximalAdagradParametersGradAccumDebug") + begin + end + begin + end + begin + begin + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + end + begin + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + end + begin + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + end + begin + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + end + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + end + end + begin + function retrieve_tpu_embedding_proximal_adagrad_parameters_grad_accum_debug_eager(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + desc = tf.EagerOp("RetrieveTPUEmbeddingProximalAdagradParametersGradAccumDebug") + begin + end + begin + begin + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + end + begin + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + end + begin + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + end + begin + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(retrieve_tpu_embedding_proximal_adagrad_parameters_grad_accum_debug, [], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function retrieve_tpu_embedding_proximal_adagrad_parameters_grad_accum_debug(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + if tf.in_eager_mode() + retrieve_tpu_embedding_proximal_adagrad_parameters_grad_accum_debug_eager(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + else + retrieve_tpu_embedding_proximal_adagrad_parameters_grad_accum_debug_graph(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + end end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:3 - push!(out, tf.Tensor(op, out_idx)) - end - out - end - function retrieve_tpu_embedding_proximal_adagrad_parameters_grad_accum_debug_eager(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - desc = tf.EagerOp("RetrieveTPUEmbeddingProximalAdagradParametersGradAccumDebug") - if table_id !== nothing - desc["table_id"] = Base.Int(table_id) - end - if table_name !== nothing - desc["table_name"] = Base.String(table_name) - end - if num_shards !== nothing - desc["num_shards"] = Base.Int(num_shards) - end - if shard_id !== nothing - desc["shard_id"] = Base.Int(shard_id) - end - res = tf.execute(desc) - node = tf.TapeNode(retrieve_tpu_embedding_proximal_adagrad_parameters_grad_accum_debug, [], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function retrieve_tpu_embedding_proximal_adagrad_parameters_grad_accum_debug(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - if tf.in_eager_mode() - retrieve_tpu_embedding_proximal_adagrad_parameters_grad_accum_debug_eager(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) - else - retrieve_tpu_embedding_proximal_adagrad_parameters_grad_accum_debug_graph(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) - end - end end @@ -3796,69 +6720,133 @@ end Load embedding parameters for a single table. """ begin - function load_tpu_embedding_rms_prop_parameters_grad_accum_debug_graph(parameters_, ms_, mom_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - local desc - tf.with_op_name(name, "LoadTPUEmbeddingRMSPropParametersGradAccumDebug") do - desc = tf.NodeDescription("LoadTPUEmbeddingRMSPropParametersGradAccumDebug") - parameters_ = convert(Tensor{Float32}, parameters_) - ms_ = convert(Tensor{Float32}, ms_) - mom_ = convert(Tensor{Float32}, mom_) - gradient_accumulators_ = convert(Tensor{Float32}, gradient_accumulators_) - tf.add_input(desc, parameters_) - tf.add_input(desc, ms_) - tf.add_input(desc, mom_) - tf.add_input(desc, gradient_accumulators_) - if table_id !== nothing - desc["table_id"] = Base.Int(table_id) - end - if table_name !== nothing - desc["table_name"] = Base.String(table_name) - end - if num_shards !== nothing - desc["num_shards"] = Base.Int(num_shards) + begin + function load_tpu_embedding_rms_prop_parameters_grad_accum_debug_graph(parameters_, ms_, mom_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + local desc + tf.with_op_name(name, "LoadTPUEmbeddingRMSPropParametersGradAccumDebug") do + desc = tf.NodeDescription("LoadTPUEmbeddingRMSPropParametersGradAccumDebug") + begin + begin + parameters_ = convert(Tensor{Float32}, parameters_) + begin + end + end + begin + ms_ = convert(Tensor{Float32}, ms_) + begin + end + end + begin + mom_ = convert(Tensor{Float32}, mom_) + begin + end + end + begin + gradient_accumulators_ = convert(Tensor{Float32}, gradient_accumulators_) + begin + end + end + end + begin + begin + tf.add_input(desc, parameters_) + end + begin + tf.add_input(desc, ms_) + end + begin + tf.add_input(desc, mom_) + end + begin + tf.add_input(desc, gradient_accumulators_) + end + end + begin + begin + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + end + begin + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + end + begin + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + end + begin + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function load_tpu_embedding_rms_prop_parameters_grad_accum_debug_eager(parameters_, ms_, mom_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + desc = tf.EagerOp("LoadTPUEmbeddingRMSPropParametersGradAccumDebug") + parameters_ = convert(tf.EagerTensor, parameters_) + ms_ = convert(tf.EagerTensor, ms_) + mom_ = convert(tf.EagerTensor, mom_) + gradient_accumulators_ = convert(tf.EagerTensor, gradient_accumulators_) + begin + begin + tf.add_input(desc, parameters_) + end + begin + tf.add_input(desc, ms_) + end + begin + tf.add_input(desc, mom_) + end + begin + tf.add_input(desc, gradient_accumulators_) + end + end + begin + begin + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + end + begin + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + end + begin + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + end + begin + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(load_tpu_embedding_rms_prop_parameters_grad_accum_debug, [parameters_, ms_, mom_, gradient_accumulators_], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function load_tpu_embedding_rms_prop_parameters_grad_accum_debug(parameters_, ms_, mom_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + if tf.in_eager_mode() + load_tpu_embedding_rms_prop_parameters_grad_accum_debug_eager(parameters_, ms_, mom_, gradient_accumulators_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + else + load_tpu_embedding_rms_prop_parameters_grad_accum_debug_graph(parameters_, ms_, mom_, gradient_accumulators_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + end end - if shard_id !== nothing - desc["shard_id"] = Base.Int(shard_id) - end - end - tf.Tensor(tf.Operation(desc)) - end - function load_tpu_embedding_rms_prop_parameters_grad_accum_debug_eager(parameters_, ms_, mom_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - desc = tf.EagerOp("LoadTPUEmbeddingRMSPropParametersGradAccumDebug") - parameters_ = convert(tf.EagerTensor, parameters_) - ms_ = convert(tf.EagerTensor, ms_) - mom_ = convert(tf.EagerTensor, mom_) - gradient_accumulators_ = convert(tf.EagerTensor, gradient_accumulators_) - tf.add_input(desc, parameters_) - tf.add_input(desc, ms_) - tf.add_input(desc, mom_) - tf.add_input(desc, gradient_accumulators_) - if table_id !== nothing - desc["table_id"] = Base.Int(table_id) - end - if table_name !== nothing - desc["table_name"] = Base.String(table_name) - end - if num_shards !== nothing - desc["num_shards"] = Base.Int(num_shards) - end - if shard_id !== nothing - desc["shard_id"] = Base.Int(shard_id) - end - res = tf.execute(desc) - node = tf.TapeNode(load_tpu_embedding_rms_prop_parameters_grad_accum_debug, [parameters_, ms_, mom_, gradient_accumulators_], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function load_tpu_embedding_rms_prop_parameters_grad_accum_debug(parameters_, ms_, mom_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - if tf.in_eager_mode() - load_tpu_embedding_rms_prop_parameters_grad_accum_debug_eager(parameters_, ms_, mom_, gradient_accumulators_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) - else - load_tpu_embedding_rms_prop_parameters_grad_accum_debug_graph(parameters_, ms_, mom_, gradient_accumulators_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) - end - end end @@ -3868,35 +6856,63 @@ end """ begin - function serialize_tensor_graph(tensor_; name=nothing) - local desc - tf.with_op_name(name, "SerializeTensor") do - desc = tf.NodeDescription("SerializeTensor") - tensor_ = convert(Tensor{Any}, tensor_) - (tensor_,) = tf.tf_promote(tensor_) - tf.add_input(desc, tensor_) + begin + function serialize_tensor_graph(tensor_; name=nothing) + local desc + tf.with_op_name(name, "SerializeTensor") do + desc = tf.NodeDescription("SerializeTensor") + begin + begin + tensor_ = convert(Tensor{Any}, tensor_) + begin + end + end + begin + (tensor_,) = tf.tf_promote(tensor_) + end + end + begin + begin + tf.add_input(desc, tensor_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function serialize_tensor_eager(tensor_; name=nothing) - desc = tf.EagerOp("SerializeTensor") - tensor_ = convert(tf.EagerTensor, tensor_) - tf.add_input(desc, tensor_) - desc["T"] = tf.data_type(tensor_) - res = tf.execute(desc) - node = tf.TapeNode(serialize_tensor, [tensor_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function serialize_tensor_eager(tensor_; name=nothing) + desc = tf.EagerOp("SerializeTensor") + tensor_ = convert(tf.EagerTensor, tensor_) + begin + begin + tf.add_input(desc, tensor_) + end + end + begin + end + begin + desc["T"] = tf.data_type(tensor_) + end + res = tf.execute(desc) + node = tf.TapeNode(serialize_tensor, [tensor_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function serialize_tensor(tensor_; name=nothing) - if tf.in_eager_mode() - serialize_tensor_eager(tensor_; name=name) - else - serialize_tensor_graph(tensor_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function serialize_tensor(tensor_; name=nothing) + if tf.in_eager_mode() + serialize_tensor_eager(tensor_; name=name) + else + serialize_tensor_graph(tensor_; name=name) + end end - end + end end @@ -3906,40 +6922,78 @@ end """ begin - function mul_graph(x_, y_; name=nothing) - local desc - tf.with_op_name(name, "Mul") do - desc = tf.NodeDescription("Mul") - x_ = convert(Tensor{Any}, x_) - y_ = convert(Tensor{Any}, y_) - (x_, y_) = tf.tf_promote(x_, y_) - tf.add_input(desc, x_) - tf.add_input(desc, y_) + begin + function mul_graph(x_, y_; name=nothing) + local desc + tf.with_op_name(name, "Mul") do + desc = tf.NodeDescription("Mul") + begin + begin + x_ = convert(Tensor{Any}, x_) + begin + end + end + begin + y_ = convert(Tensor{Any}, y_) + begin + end + end + begin + (x_, y_) = tf.tf_promote(x_, y_) + end + end + begin + begin + tf.add_input(desc, x_) + end + begin + tf.add_input(desc, y_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function mul_eager(x_, y_; name=nothing) - desc = tf.EagerOp("Mul") - x_ = convert(tf.EagerTensor, x_) - y_ = convert(tf.EagerTensor, y_) - tf.add_input(desc, x_) - tf.add_input(desc, y_) - desc["T"] = tf.data_type(x_) - desc["T"] = tf.data_type(y_) - res = tf.execute(desc) - node = tf.TapeNode(mul, [x_, y_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function mul_eager(x_, y_; name=nothing) + desc = tf.EagerOp("Mul") + x_ = convert(tf.EagerTensor, x_) + y_ = convert(tf.EagerTensor, y_) + begin + begin + tf.add_input(desc, x_) + end + begin + tf.add_input(desc, y_) + end + end + begin + end + begin + desc["T"] = tf.data_type(x_) + end + begin + desc["T"] = tf.data_type(y_) + end + res = tf.execute(desc) + node = tf.TapeNode(mul, [x_, y_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function mul(x_, y_; name=nothing) - if tf.in_eager_mode() - mul_eager(x_, y_; name=name) - else - mul_graph(x_, y_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function mul(x_, y_; name=nothing) + if tf.in_eager_mode() + mul_eager(x_, y_; name=name) + else + mul_graph(x_, y_; name=name) + end end - end + end end @@ -3949,45 +7003,85 @@ end """ begin - function softmax_cross_entropy_with_logits_graph(features_, labels_; name=nothing) - local desc - tf.with_op_name(name, "SoftmaxCrossEntropyWithLogits") do - desc = tf.NodeDescription("SoftmaxCrossEntropyWithLogits") - features_ = convert(Tensor{Any}, features_) - labels_ = convert(Tensor{Any}, labels_) - (features_, labels_) = tf.tf_promote(features_, labels_) - tf.add_input(desc, features_) - tf.add_input(desc, labels_) - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:2 - push!(out, tf.Tensor(op, out_idx)) + begin + function softmax_cross_entropy_with_logits_graph(features_, labels_; name=nothing) + local desc + tf.with_op_name(name, "SoftmaxCrossEntropyWithLogits") do + desc = tf.NodeDescription("SoftmaxCrossEntropyWithLogits") + begin + begin + features_ = convert(Tensor{Any}, features_) + begin + end + end + begin + labels_ = convert(Tensor{Any}, labels_) + begin + end + end + begin + (features_, labels_) = tf.tf_promote(features_, labels_) + end + end + begin + begin + tf.add_input(desc, features_) + end + begin + tf.add_input(desc, labels_) + end + end + begin + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end end - out end - function softmax_cross_entropy_with_logits_eager(features_, labels_; name=nothing) - desc = tf.EagerOp("SoftmaxCrossEntropyWithLogits") - features_ = convert(tf.EagerTensor, features_) - labels_ = convert(tf.EagerTensor, labels_) - tf.add_input(desc, features_) - tf.add_input(desc, labels_) - desc["T"] = tf.data_type(features_) - desc["T"] = tf.data_type(labels_) - res = tf.execute(desc) - node = tf.TapeNode(softmax_cross_entropy_with_logits, [features_, labels_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res + begin + function softmax_cross_entropy_with_logits_eager(features_, labels_; name=nothing) + desc = tf.EagerOp("SoftmaxCrossEntropyWithLogits") + features_ = convert(tf.EagerTensor, features_) + labels_ = convert(tf.EagerTensor, labels_) + begin + begin + tf.add_input(desc, features_) + end + begin + tf.add_input(desc, labels_) + end + end + begin + end + begin + desc["T"] = tf.data_type(features_) + end + begin + desc["T"] = tf.data_type(labels_) + end + res = tf.execute(desc) + node = tf.TapeNode(softmax_cross_entropy_with_logits, [features_, labels_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function softmax_cross_entropy_with_logits(features_, labels_; name=nothing) - if tf.in_eager_mode() - softmax_cross_entropy_with_logits_eager(features_, labels_; name=name) - else - softmax_cross_entropy_with_logits_graph(features_, labels_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function softmax_cross_entropy_with_logits(features_, labels_; name=nothing) + if tf.in_eager_mode() + softmax_cross_entropy_with_logits_eager(features_, labels_; name=name) + else + softmax_cross_entropy_with_logits_graph(features_, labels_; name=name) + end end - end + end end @@ -3997,52 +7091,104 @@ end """ begin - function resource_scatter_div_graph(resource_, indices_, updates_; name=nothing, dtype=nothing) - local desc - tf.with_op_name(name, "ResourceScatterDiv") do - desc = tf.NodeDescription("ResourceScatterDiv") - resource_ = convert(Tensor{Any}, resource_) - indices_ = convert(Tensor{Any}, indices_) - indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) - updates_ = convert(Tensor{Any}, updates_) - (updates_,) = tf.tf_promote(updates_) - (indices_,) = tf.tf_promote(indices_) - tf.add_input(desc, resource_) - tf.add_input(desc, indices_) - tf.add_input(desc, updates_) - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) + begin + function resource_scatter_div_graph(resource_, indices_, updates_; name=nothing, dtype=nothing) + local desc + tf.with_op_name(name, "ResourceScatterDiv") do + desc = tf.NodeDescription("ResourceScatterDiv") + begin + begin + resource_ = convert(Tensor{Any}, resource_) + begin + end + end + begin + indices_ = convert(Tensor{Any}, indices_) + begin + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + end + end + begin + updates_ = convert(Tensor{Any}, updates_) + begin + end + end + begin + (updates_,) = tf.tf_promote(updates_) + end + begin + (indices_,) = tf.tf_promote(indices_) + end + end + begin + begin + tf.add_input(desc, resource_) + end + begin + tf.add_input(desc, indices_) + end + begin + tf.add_input(desc, updates_) + end + end + begin + begin + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function resource_scatter_div_eager(resource_, indices_, updates_; name=nothing, dtype=nothing) + desc = tf.EagerOp("ResourceScatterDiv") + resource_ = convert(tf.EagerTensor, resource_) + indices_ = convert(tf.EagerTensor, indices_) + updates_ = convert(tf.EagerTensor, updates_) + begin + begin + tf.add_input(desc, resource_) + end + begin + tf.add_input(desc, indices_) + end + begin + tf.add_input(desc, updates_) + end + end + begin + begin + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + end + begin + desc["Tindices"] = tf.data_type(indices_) + end + begin + desc["dtype"] = tf.data_type(updates_) + end + res = tf.execute(desc) + node = tf.TapeNode(resource_scatter_div, [resource_, indices_, updates_], name=nothing, dtype=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_scatter_div(resource_, indices_, updates_; name=nothing, dtype=nothing) + if tf.in_eager_mode() + resource_scatter_div_eager(resource_, indices_, updates_; name=name, dtype=dtype) + else + resource_scatter_div_graph(resource_, indices_, updates_; name=name, dtype=dtype) + end end - end - tf.Tensor(tf.Operation(desc)) end - function resource_scatter_div_eager(resource_, indices_, updates_; name=nothing, dtype=nothing) - desc = tf.EagerOp("ResourceScatterDiv") - resource_ = convert(tf.EagerTensor, resource_) - indices_ = convert(tf.EagerTensor, indices_) - updates_ = convert(tf.EagerTensor, updates_) - tf.add_input(desc, resource_) - tf.add_input(desc, indices_) - tf.add_input(desc, updates_) - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end - desc["Tindices"] = tf.data_type(indices_) - desc["dtype"] = tf.data_type(updates_) - res = tf.execute(desc) - node = tf.TapeNode(resource_scatter_div, [resource_, indices_, updates_], name=nothing, dtype=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_scatter_div(resource_, indices_, updates_; name=nothing, dtype=nothing) - if tf.in_eager_mode() - resource_scatter_div_eager(resource_, indices_, updates_; name=name, dtype=dtype) - else - resource_scatter_div_graph(resource_, indices_, updates_; name=name, dtype=dtype) - end - end end @@ -4052,53 +7198,117 @@ end """ begin - function fixed_length_record_dataset_v2_graph(filenames_, header_bytes_, record_bytes_, footer_bytes_, buffer_size_, compression_type_; name=nothing) - local desc - tf.with_op_name(name, "FixedLengthRecordDatasetV2") do - desc = tf.NodeDescription("FixedLengthRecordDatasetV2") - filenames_ = convert(Tensor{String}, filenames_) - header_bytes_ = convert(Tensor{Int64}, header_bytes_) - record_bytes_ = convert(Tensor{Int64}, record_bytes_) - footer_bytes_ = convert(Tensor{Int64}, footer_bytes_) - buffer_size_ = convert(Tensor{Int64}, buffer_size_) - compression_type_ = convert(Tensor{String}, compression_type_) - tf.add_input(desc, filenames_) - tf.add_input(desc, header_bytes_) - tf.add_input(desc, record_bytes_) - tf.add_input(desc, footer_bytes_) - tf.add_input(desc, buffer_size_) - tf.add_input(desc, compression_type_) - end - tf.Tensor(tf.Operation(desc)) - end - function fixed_length_record_dataset_v2_eager(filenames_, header_bytes_, record_bytes_, footer_bytes_, buffer_size_, compression_type_; name=nothing) - desc = tf.EagerOp("FixedLengthRecordDatasetV2") - filenames_ = convert(tf.EagerTensor, filenames_) - header_bytes_ = convert(tf.EagerTensor, header_bytes_) - record_bytes_ = convert(tf.EagerTensor, record_bytes_) - footer_bytes_ = convert(tf.EagerTensor, footer_bytes_) - buffer_size_ = convert(tf.EagerTensor, buffer_size_) - compression_type_ = convert(tf.EagerTensor, compression_type_) - tf.add_input(desc, filenames_) - tf.add_input(desc, header_bytes_) - tf.add_input(desc, record_bytes_) - tf.add_input(desc, footer_bytes_) - tf.add_input(desc, buffer_size_) - tf.add_input(desc, compression_type_) - res = tf.execute(desc) - node = tf.TapeNode(fixed_length_record_dataset_v2, [filenames_, header_bytes_, record_bytes_, footer_bytes_, buffer_size_, compression_type_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function fixed_length_record_dataset_v2(filenames_, header_bytes_, record_bytes_, footer_bytes_, buffer_size_, compression_type_; name=nothing) - if tf.in_eager_mode() - fixed_length_record_dataset_v2_eager(filenames_, header_bytes_, record_bytes_, footer_bytes_, buffer_size_, compression_type_; name=name) - else - fixed_length_record_dataset_v2_graph(filenames_, header_bytes_, record_bytes_, footer_bytes_, buffer_size_, compression_type_; name=name) + begin + function fixed_length_record_dataset_v2_graph(filenames_, header_bytes_, record_bytes_, footer_bytes_, buffer_size_, compression_type_; name=nothing) + local desc + tf.with_op_name(name, "FixedLengthRecordDatasetV2") do + desc = tf.NodeDescription("FixedLengthRecordDatasetV2") + begin + begin + filenames_ = convert(Tensor{String}, filenames_) + begin + end + end + begin + header_bytes_ = convert(Tensor{Int64}, header_bytes_) + begin + end + end + begin + record_bytes_ = convert(Tensor{Int64}, record_bytes_) + begin + end + end + begin + footer_bytes_ = convert(Tensor{Int64}, footer_bytes_) + begin + end + end + begin + buffer_size_ = convert(Tensor{Int64}, buffer_size_) + begin + end + end + begin + compression_type_ = convert(Tensor{String}, compression_type_) + begin + end + end + end + begin + begin + tf.add_input(desc, filenames_) + end + begin + tf.add_input(desc, header_bytes_) + end + begin + tf.add_input(desc, record_bytes_) + end + begin + tf.add_input(desc, footer_bytes_) + end + begin + tf.add_input(desc, buffer_size_) + end + begin + tf.add_input(desc, compression_type_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function fixed_length_record_dataset_v2_eager(filenames_, header_bytes_, record_bytes_, footer_bytes_, buffer_size_, compression_type_; name=nothing) + desc = tf.EagerOp("FixedLengthRecordDatasetV2") + filenames_ = convert(tf.EagerTensor, filenames_) + header_bytes_ = convert(tf.EagerTensor, header_bytes_) + record_bytes_ = convert(tf.EagerTensor, record_bytes_) + footer_bytes_ = convert(tf.EagerTensor, footer_bytes_) + buffer_size_ = convert(tf.EagerTensor, buffer_size_) + compression_type_ = convert(tf.EagerTensor, compression_type_) + begin + begin + tf.add_input(desc, filenames_) + end + begin + tf.add_input(desc, header_bytes_) + end + begin + tf.add_input(desc, record_bytes_) + end + begin + tf.add_input(desc, footer_bytes_) + end + begin + tf.add_input(desc, buffer_size_) + end + begin + tf.add_input(desc, compression_type_) + end + end + begin + end + res = tf.execute(desc) + node = tf.TapeNode(fixed_length_record_dataset_v2, [filenames_, header_bytes_, record_bytes_, footer_bytes_, buffer_size_, compression_type_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function fixed_length_record_dataset_v2(filenames_, header_bytes_, record_bytes_, footer_bytes_, buffer_size_, compression_type_; name=nothing) + if tf.in_eager_mode() + fixed_length_record_dataset_v2_eager(filenames_, header_bytes_, record_bytes_, footer_bytes_, buffer_size_, compression_type_; name=name) + else + fixed_length_record_dataset_v2_graph(filenames_, header_bytes_, record_bytes_, footer_bytes_, buffer_size_, compression_type_; name=name) + end end - end + end end @@ -4108,49 +7318,89 @@ end """ begin - function skip_dataset_graph(input_dataset_, count_; name=nothing, output_types=nothing, output_shapes=nothing) - local desc - tf.with_op_name(name, "SkipDataset") do - desc = tf.NodeDescription("SkipDataset") - input_dataset_ = convert(Tensor{Any}, input_dataset_) - count_ = convert(Tensor{Int64}, count_) - tf.add_input(desc, input_dataset_) - tf.add_input(desc, count_) - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) + begin + function skip_dataset_graph(input_dataset_, count_; name=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "SkipDataset") do + desc = tf.NodeDescription("SkipDataset") + begin + begin + input_dataset_ = convert(Tensor{Any}, input_dataset_) + begin + end + end + begin + count_ = convert(Tensor{Int64}, count_) + begin + end + end + end + begin + begin + tf.add_input(desc, input_dataset_) + end + begin + tf.add_input(desc, count_) + end + end + begin + begin + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + end + begin + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function skip_dataset_eager(input_dataset_, count_; name=nothing, output_types=nothing, output_shapes=nothing) + desc = tf.EagerOp("SkipDataset") + input_dataset_ = convert(tf.EagerTensor, input_dataset_) + count_ = convert(tf.EagerTensor, count_) + begin + begin + tf.add_input(desc, input_dataset_) + end + begin + tf.add_input(desc, count_) + end + end + begin + begin + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + end + begin + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(skip_dataset, [input_dataset_, count_], name=nothing, output_types=nothing, output_shapes=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function skip_dataset(input_dataset_, count_; name=nothing, output_types=nothing, output_shapes=nothing) + if tf.in_eager_mode() + skip_dataset_eager(input_dataset_, count_; name=name, output_types=output_types, output_shapes=output_shapes) + else + skip_dataset_graph(input_dataset_, count_; name=name, output_types=output_types, output_shapes=output_shapes) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function skip_dataset_eager(input_dataset_, count_; name=nothing, output_types=nothing, output_shapes=nothing) - desc = tf.EagerOp("SkipDataset") - input_dataset_ = convert(tf.EagerTensor, input_dataset_) - count_ = convert(tf.EagerTensor, count_) - tf.add_input(desc, input_dataset_) - tf.add_input(desc, count_) - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - res = tf.execute(desc) - node = tf.TapeNode(skip_dataset, [input_dataset_, count_], name=nothing, output_types=nothing, output_shapes=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function skip_dataset(input_dataset_, count_; name=nothing, output_types=nothing, output_shapes=nothing) - if tf.in_eager_mode() - skip_dataset_eager(input_dataset_, count_; name=name, output_types=output_types, output_shapes=output_shapes) - else - skip_dataset_graph(input_dataset_, count_; name=name, output_types=output_types, output_shapes=output_shapes) - end - end end @@ -4160,35 +7410,63 @@ end """ begin - function cosh_graph(x_; name=nothing) - local desc - tf.with_op_name(name, "Cosh") do - desc = tf.NodeDescription("Cosh") - x_ = convert(Tensor{Any}, x_) - (x_,) = tf.tf_promote(x_) - tf.add_input(desc, x_) + begin + function cosh_graph(x_; name=nothing) + local desc + tf.with_op_name(name, "Cosh") do + desc = tf.NodeDescription("Cosh") + begin + begin + x_ = convert(Tensor{Any}, x_) + begin + end + end + begin + (x_,) = tf.tf_promote(x_) + end + end + begin + begin + tf.add_input(desc, x_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function cosh_eager(x_; name=nothing) - desc = tf.EagerOp("Cosh") - x_ = convert(tf.EagerTensor, x_) - tf.add_input(desc, x_) - desc["T"] = tf.data_type(x_) - res = tf.execute(desc) - node = tf.TapeNode(cosh, [x_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function cosh_eager(x_; name=nothing) + desc = tf.EagerOp("Cosh") + x_ = convert(tf.EagerTensor, x_) + begin + begin + tf.add_input(desc, x_) + end + end + begin + end + begin + desc["T"] = tf.data_type(x_) + end + res = tf.execute(desc) + node = tf.TapeNode(cosh, [x_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function cosh(x_; name=nothing) - if tf.in_eager_mode() - cosh_eager(x_; name=name) - else - cosh_graph(x_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function cosh(x_; name=nothing) + if tf.in_eager_mode() + cosh_eager(x_; name=name) + else + cosh_graph(x_; name=name) + end end - end + end end @@ -4198,85 +7476,173 @@ end """ begin - function fused_batch_norm_v2_graph(x_, scale_, offset_, mean_, variance_; name=nothing, U=nothing, epsilon=nothing, data_format=nothing, is_training=nothing) - local desc - tf.with_op_name(name, "FusedBatchNormV2") do - desc = tf.NodeDescription("FusedBatchNormV2") - x_ = convert(Tensor{Any}, x_) - scale_ = convert(Tensor{Any}, scale_) - offset_ = convert(Tensor{Any}, offset_) - mean_ = convert(Tensor{Any}, mean_) - variance_ = convert(Tensor{Any}, variance_) - (scale_, offset_, mean_, variance_) = tf.tf_promote(scale_, offset_, mean_, variance_) - (x_,) = tf.tf_promote(x_) - tf.add_input(desc, x_) - tf.add_input(desc, scale_) - tf.add_input(desc, offset_) - tf.add_input(desc, mean_) - tf.add_input(desc, variance_) - if U !== nothing - desc["U"] = Base.identity(U) - end - if epsilon !== nothing - desc["epsilon"] = Base.identity(epsilon) - end - if data_format !== nothing - desc["data_format"] = Base.String(data_format) + begin + function fused_batch_norm_v2_graph(x_, scale_, offset_, mean_, variance_; name=nothing, U=nothing, epsilon=nothing, data_format=nothing, is_training=nothing) + local desc + tf.with_op_name(name, "FusedBatchNormV2") do + desc = tf.NodeDescription("FusedBatchNormV2") + begin + begin + x_ = convert(Tensor{Any}, x_) + begin + end + end + begin + scale_ = convert(Tensor{Any}, scale_) + begin + end + end + begin + offset_ = convert(Tensor{Any}, offset_) + begin + end + end + begin + mean_ = convert(Tensor{Any}, mean_) + begin + end + end + begin + variance_ = convert(Tensor{Any}, variance_) + begin + end + end + begin + (scale_, offset_, mean_, variance_) = tf.tf_promote(scale_, offset_, mean_, variance_) + end + begin + (x_,) = tf.tf_promote(x_) + end + end + begin + begin + tf.add_input(desc, x_) + end + begin + tf.add_input(desc, scale_) + end + begin + tf.add_input(desc, offset_) + end + begin + tf.add_input(desc, mean_) + end + begin + tf.add_input(desc, variance_) + end + end + begin + begin + if U !== nothing + desc["U"] = Base.identity(U) + end + end + begin + if epsilon !== nothing + desc["epsilon"] = Base.identity(epsilon) + end + end + begin + if data_format !== nothing + desc["data_format"] = Base.String(data_format) + end + end + begin + if is_training !== nothing + desc["is_training"] = Base.Bool(is_training) + end + end + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:5 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + end + end + begin + function fused_batch_norm_v2_eager(x_, scale_, offset_, mean_, variance_; name=nothing, U=nothing, epsilon=nothing, data_format=nothing, is_training=nothing) + desc = tf.EagerOp("FusedBatchNormV2") + x_ = convert(tf.EagerTensor, x_) + scale_ = convert(tf.EagerTensor, scale_) + offset_ = convert(tf.EagerTensor, offset_) + mean_ = convert(tf.EagerTensor, mean_) + variance_ = convert(tf.EagerTensor, variance_) + begin + begin + tf.add_input(desc, x_) + end + begin + tf.add_input(desc, scale_) + end + begin + tf.add_input(desc, offset_) + end + begin + tf.add_input(desc, mean_) + end + begin + tf.add_input(desc, variance_) + end + end + begin + begin + if U !== nothing + desc["U"] = Base.identity(U) + end + end + begin + if epsilon !== nothing + desc["epsilon"] = Base.identity(epsilon) + end + end + begin + if data_format !== nothing + desc["data_format"] = Base.String(data_format) + end + end + begin + if is_training !== nothing + desc["is_training"] = Base.Bool(is_training) + end + end + end + begin + desc["T"] = tf.data_type(x_) + end + begin + desc["U"] = tf.data_type(scale_) + end + begin + desc["U"] = tf.data_type(offset_) + end + begin + desc["U"] = tf.data_type(mean_) + end + begin + desc["U"] = tf.data_type(variance_) + end + res = tf.execute(desc) + node = tf.TapeNode(fused_batch_norm_v2, [x_, scale_, offset_, mean_, variance_], name=nothing, U=nothing, epsilon=nothing, data_format=nothing, is_training=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function fused_batch_norm_v2(x_, scale_, offset_, mean_, variance_; name=nothing, U=nothing, epsilon=nothing, data_format=nothing, is_training=nothing) + if tf.in_eager_mode() + fused_batch_norm_v2_eager(x_, scale_, offset_, mean_, variance_; name=name, U=U, epsilon=epsilon, data_format=data_format, is_training=is_training) + else + fused_batch_norm_v2_graph(x_, scale_, offset_, mean_, variance_; name=name, U=U, epsilon=epsilon, data_format=data_format, is_training=is_training) + end end - if is_training !== nothing - desc["is_training"] = Base.Bool(is_training) - end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:5 - push!(out, tf.Tensor(op, out_idx)) - end - out - end - function fused_batch_norm_v2_eager(x_, scale_, offset_, mean_, variance_; name=nothing, U=nothing, epsilon=nothing, data_format=nothing, is_training=nothing) - desc = tf.EagerOp("FusedBatchNormV2") - x_ = convert(tf.EagerTensor, x_) - scale_ = convert(tf.EagerTensor, scale_) - offset_ = convert(tf.EagerTensor, offset_) - mean_ = convert(tf.EagerTensor, mean_) - variance_ = convert(tf.EagerTensor, variance_) - tf.add_input(desc, x_) - tf.add_input(desc, scale_) - tf.add_input(desc, offset_) - tf.add_input(desc, mean_) - tf.add_input(desc, variance_) - if U !== nothing - desc["U"] = Base.identity(U) - end - if epsilon !== nothing - desc["epsilon"] = Base.identity(epsilon) - end - if data_format !== nothing - desc["data_format"] = Base.String(data_format) - end - if is_training !== nothing - desc["is_training"] = Base.Bool(is_training) - end - desc["T"] = tf.data_type(x_) - desc["U"] = tf.data_type(scale_) - desc["U"] = tf.data_type(offset_) - desc["U"] = tf.data_type(mean_) - desc["U"] = tf.data_type(variance_) - res = tf.execute(desc) - node = tf.TapeNode(fused_batch_norm_v2, [x_, scale_, offset_, mean_, variance_], name=nothing, U=nothing, epsilon=nothing, data_format=nothing, is_training=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function fused_batch_norm_v2(x_, scale_, offset_, mean_, variance_; name=nothing, U=nothing, epsilon=nothing, data_format=nothing, is_training=nothing) - if tf.in_eager_mode() - fused_batch_norm_v2_eager(x_, scale_, offset_, mean_, variance_; name=name, U=U, epsilon=epsilon, data_format=data_format, is_training=is_training) - else - fused_batch_norm_v2_graph(x_, scale_, offset_, mean_, variance_; name=name, U=U, epsilon=epsilon, data_format=data_format, is_training=is_training) - end - end end @@ -4286,47 +7652,99 @@ end """ begin - function tensor_array_split_graph(handle_, value_, lengths_, flow_in_; name=nothing) - local desc - tf.with_op_name(name, "TensorArraySplit") do - desc = tf.NodeDescription("TensorArraySplit") - handle_ = convert(Tensor{String}, handle_) - value_ = convert(Tensor{Any}, value_) - lengths_ = convert(Tensor{Int64}, lengths_) - flow_in_ = convert(Tensor{Float32}, flow_in_) - (value_,) = tf.tf_promote(value_) - tf.add_input(desc, handle_) - tf.add_input(desc, value_) - tf.add_input(desc, lengths_) - tf.add_input(desc, flow_in_) - end - tf.Tensor(tf.Operation(desc)) - end - function tensor_array_split_eager(handle_, value_, lengths_, flow_in_; name=nothing) - desc = tf.EagerOp("TensorArraySplit") - handle_ = convert(tf.EagerTensor, handle_) - value_ = convert(tf.EagerTensor, value_) - lengths_ = convert(tf.EagerTensor, lengths_) - flow_in_ = convert(tf.EagerTensor, flow_in_) - tf.add_input(desc, handle_) - tf.add_input(desc, value_) - tf.add_input(desc, lengths_) - tf.add_input(desc, flow_in_) - desc["T"] = tf.data_type(value_) - res = tf.execute(desc) - node = tf.TapeNode(tensor_array_split, [handle_, value_, lengths_, flow_in_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_array_split(handle_, value_, lengths_, flow_in_; name=nothing) - if tf.in_eager_mode() - tensor_array_split_eager(handle_, value_, lengths_, flow_in_; name=name) - else - tensor_array_split_graph(handle_, value_, lengths_, flow_in_; name=name) + begin + function tensor_array_split_graph(handle_, value_, lengths_, flow_in_; name=nothing) + local desc + tf.with_op_name(name, "TensorArraySplit") do + desc = tf.NodeDescription("TensorArraySplit") + begin + begin + handle_ = convert(Tensor{String}, handle_) + begin + end + end + begin + value_ = convert(Tensor{Any}, value_) + begin + end + end + begin + lengths_ = convert(Tensor{Int64}, lengths_) + begin + end + end + begin + flow_in_ = convert(Tensor{Float32}, flow_in_) + begin + end + end + begin + (value_,) = tf.tf_promote(value_) + end + end + begin + begin + tf.add_input(desc, handle_) + end + begin + tf.add_input(desc, value_) + end + begin + tf.add_input(desc, lengths_) + end + begin + tf.add_input(desc, flow_in_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function tensor_array_split_eager(handle_, value_, lengths_, flow_in_; name=nothing) + desc = tf.EagerOp("TensorArraySplit") + handle_ = convert(tf.EagerTensor, handle_) + value_ = convert(tf.EagerTensor, value_) + lengths_ = convert(tf.EagerTensor, lengths_) + flow_in_ = convert(tf.EagerTensor, flow_in_) + begin + begin + tf.add_input(desc, handle_) + end + begin + tf.add_input(desc, value_) + end + begin + tf.add_input(desc, lengths_) + end + begin + tf.add_input(desc, flow_in_) + end + end + begin + end + begin + desc["T"] = tf.data_type(value_) + end + res = tf.execute(desc) + node = tf.TapeNode(tensor_array_split, [handle_, value_, lengths_, flow_in_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_array_split(handle_, value_, lengths_, flow_in_; name=nothing) + if tf.in_eager_mode() + tensor_array_split_eager(handle_, value_, lengths_, flow_in_; name=name) + else + tensor_array_split_graph(handle_, value_, lengths_, flow_in_; name=name) + end end - end + end end @@ -4336,68 +7754,130 @@ end """ begin - function ctc_loss_graph(inputs_, labels_indices_, labels_values_, sequence_length_; name=nothing, preprocess_collapse_repeated=nothing, ctc_merge_repeated=nothing, ignore_longer_outputs_than_inputs=nothing) - local desc - tf.with_op_name(name, "CTCLoss") do - desc = tf.NodeDescription("CTCLoss") - inputs_ = convert(Tensor{Float32}, inputs_) - labels_indices_ = convert(Tensor{Int64}, labels_indices_) - labels_values_ = convert(Tensor{Int32}, labels_values_) - sequence_length_ = convert(Tensor{Int32}, sequence_length_) - tf.add_input(desc, inputs_) - tf.add_input(desc, labels_indices_) - tf.add_input(desc, labels_values_) - tf.add_input(desc, sequence_length_) - if preprocess_collapse_repeated !== nothing - desc["preprocess_collapse_repeated"] = Base.Bool(preprocess_collapse_repeated) - end - if ctc_merge_repeated !== nothing - desc["ctc_merge_repeated"] = Base.Bool(ctc_merge_repeated) + begin + function ctc_loss_graph(inputs_, labels_indices_, labels_values_, sequence_length_; name=nothing, preprocess_collapse_repeated=nothing, ctc_merge_repeated=nothing, ignore_longer_outputs_than_inputs=nothing) + local desc + tf.with_op_name(name, "CTCLoss") do + desc = tf.NodeDescription("CTCLoss") + begin + begin + inputs_ = convert(Tensor{Float32}, inputs_) + begin + end + end + begin + labels_indices_ = convert(Tensor{Int64}, labels_indices_) + begin + end + end + begin + labels_values_ = convert(Tensor{Int32}, labels_values_) + begin + end + end + begin + sequence_length_ = convert(Tensor{Int32}, sequence_length_) + begin + end + end + end + begin + begin + tf.add_input(desc, inputs_) + end + begin + tf.add_input(desc, labels_indices_) + end + begin + tf.add_input(desc, labels_values_) + end + begin + tf.add_input(desc, sequence_length_) + end + end + begin + begin + if preprocess_collapse_repeated !== nothing + desc["preprocess_collapse_repeated"] = Base.Bool(preprocess_collapse_repeated) + end + end + begin + if ctc_merge_repeated !== nothing + desc["ctc_merge_repeated"] = Base.Bool(ctc_merge_repeated) + end + end + begin + if ignore_longer_outputs_than_inputs !== nothing + desc["ignore_longer_outputs_than_inputs"] = Base.Bool(ignore_longer_outputs_than_inputs) + end + end + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + end + end + begin + function ctc_loss_eager(inputs_, labels_indices_, labels_values_, sequence_length_; name=nothing, preprocess_collapse_repeated=nothing, ctc_merge_repeated=nothing, ignore_longer_outputs_than_inputs=nothing) + desc = tf.EagerOp("CTCLoss") + inputs_ = convert(tf.EagerTensor, inputs_) + labels_indices_ = convert(tf.EagerTensor, labels_indices_) + labels_values_ = convert(tf.EagerTensor, labels_values_) + sequence_length_ = convert(tf.EagerTensor, sequence_length_) + begin + begin + tf.add_input(desc, inputs_) + end + begin + tf.add_input(desc, labels_indices_) + end + begin + tf.add_input(desc, labels_values_) + end + begin + tf.add_input(desc, sequence_length_) + end + end + begin + begin + if preprocess_collapse_repeated !== nothing + desc["preprocess_collapse_repeated"] = Base.Bool(preprocess_collapse_repeated) + end + end + begin + if ctc_merge_repeated !== nothing + desc["ctc_merge_repeated"] = Base.Bool(ctc_merge_repeated) + end + end + begin + if ignore_longer_outputs_than_inputs !== nothing + desc["ignore_longer_outputs_than_inputs"] = Base.Bool(ignore_longer_outputs_than_inputs) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(ctc_loss, [inputs_, labels_indices_, labels_values_, sequence_length_], name=nothing, preprocess_collapse_repeated=nothing, ctc_merge_repeated=nothing, ignore_longer_outputs_than_inputs=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function ctc_loss(inputs_, labels_indices_, labels_values_, sequence_length_; name=nothing, preprocess_collapse_repeated=nothing, ctc_merge_repeated=nothing, ignore_longer_outputs_than_inputs=nothing) + if tf.in_eager_mode() + ctc_loss_eager(inputs_, labels_indices_, labels_values_, sequence_length_; name=name, preprocess_collapse_repeated=preprocess_collapse_repeated, ctc_merge_repeated=ctc_merge_repeated, ignore_longer_outputs_than_inputs=ignore_longer_outputs_than_inputs) + else + ctc_loss_graph(inputs_, labels_indices_, labels_values_, sequence_length_; name=name, preprocess_collapse_repeated=preprocess_collapse_repeated, ctc_merge_repeated=ctc_merge_repeated, ignore_longer_outputs_than_inputs=ignore_longer_outputs_than_inputs) + end end - if ignore_longer_outputs_than_inputs !== nothing - desc["ignore_longer_outputs_than_inputs"] = Base.Bool(ignore_longer_outputs_than_inputs) - end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:2 - push!(out, tf.Tensor(op, out_idx)) - end - out - end - function ctc_loss_eager(inputs_, labels_indices_, labels_values_, sequence_length_; name=nothing, preprocess_collapse_repeated=nothing, ctc_merge_repeated=nothing, ignore_longer_outputs_than_inputs=nothing) - desc = tf.EagerOp("CTCLoss") - inputs_ = convert(tf.EagerTensor, inputs_) - labels_indices_ = convert(tf.EagerTensor, labels_indices_) - labels_values_ = convert(tf.EagerTensor, labels_values_) - sequence_length_ = convert(tf.EagerTensor, sequence_length_) - tf.add_input(desc, inputs_) - tf.add_input(desc, labels_indices_) - tf.add_input(desc, labels_values_) - tf.add_input(desc, sequence_length_) - if preprocess_collapse_repeated !== nothing - desc["preprocess_collapse_repeated"] = Base.Bool(preprocess_collapse_repeated) - end - if ctc_merge_repeated !== nothing - desc["ctc_merge_repeated"] = Base.Bool(ctc_merge_repeated) - end - if ignore_longer_outputs_than_inputs !== nothing - desc["ignore_longer_outputs_than_inputs"] = Base.Bool(ignore_longer_outputs_than_inputs) - end - res = tf.execute(desc) - node = tf.TapeNode(ctc_loss, [inputs_, labels_indices_, labels_values_, sequence_length_], name=nothing, preprocess_collapse_repeated=nothing, ctc_merge_repeated=nothing, ignore_longer_outputs_than_inputs=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function ctc_loss(inputs_, labels_indices_, labels_values_, sequence_length_; name=nothing, preprocess_collapse_repeated=nothing, ctc_merge_repeated=nothing, ignore_longer_outputs_than_inputs=nothing) - if tf.in_eager_mode() - ctc_loss_eager(inputs_, labels_indices_, labels_values_, sequence_length_; name=name, preprocess_collapse_repeated=preprocess_collapse_repeated, ctc_merge_repeated=ctc_merge_repeated, ignore_longer_outputs_than_inputs=ignore_longer_outputs_than_inputs) - else - ctc_loss_graph(inputs_, labels_indices_, labels_values_, sequence_length_; name=name, preprocess_collapse_repeated=preprocess_collapse_repeated, ctc_merge_repeated=ctc_merge_repeated, ignore_longer_outputs_than_inputs=ignore_longer_outputs_than_inputs) - end - end end @@ -4407,54 +7887,112 @@ end """ begin - function quantized_reshape_graph(tensor_, shape_, input_min_, input_max_; name=nothing) - local desc - tf.with_op_name(name, "QuantizedReshape") do - desc = tf.NodeDescription("QuantizedReshape") - tensor_ = convert(Tensor{Any}, tensor_) - shape_ = convert(Tensor{Int32}, shape_) - input_min_ = convert(Tensor{Float32}, input_min_) - input_max_ = convert(Tensor{Float32}, input_max_) - (tensor_,) = tf.tf_promote(tensor_) - (shape_,) = tf.tf_promote(shape_) - tf.add_input(desc, tensor_) - tf.add_input(desc, shape_) - tf.add_input(desc, input_min_) - tf.add_input(desc, input_max_) - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:3 - push!(out, tf.Tensor(op, out_idx)) - end - out - end - function quantized_reshape_eager(tensor_, shape_, input_min_, input_max_; name=nothing) - desc = tf.EagerOp("QuantizedReshape") - tensor_ = convert(tf.EagerTensor, tensor_) - shape_ = convert(tf.EagerTensor, shape_) - input_min_ = convert(tf.EagerTensor, input_min_) - input_max_ = convert(tf.EagerTensor, input_max_) - tf.add_input(desc, tensor_) - tf.add_input(desc, shape_) - tf.add_input(desc, input_min_) - tf.add_input(desc, input_max_) - desc["T"] = tf.data_type(tensor_) - desc["Tshape"] = tf.data_type(shape_) - res = tf.execute(desc) - node = tf.TapeNode(quantized_reshape, [tensor_, shape_, input_min_, input_max_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function quantized_reshape(tensor_, shape_, input_min_, input_max_; name=nothing) - if tf.in_eager_mode() - quantized_reshape_eager(tensor_, shape_, input_min_, input_max_; name=name) - else - quantized_reshape_graph(tensor_, shape_, input_min_, input_max_; name=name) + begin + function quantized_reshape_graph(tensor_, shape_, input_min_, input_max_; name=nothing) + local desc + tf.with_op_name(name, "QuantizedReshape") do + desc = tf.NodeDescription("QuantizedReshape") + begin + begin + tensor_ = convert(Tensor{Any}, tensor_) + begin + end + end + begin + shape_ = convert(Tensor{Int32}, shape_) + begin + end + end + begin + input_min_ = convert(Tensor{Float32}, input_min_) + begin + end + end + begin + input_max_ = convert(Tensor{Float32}, input_max_) + begin + end + end + begin + (tensor_,) = tf.tf_promote(tensor_) + end + begin + (shape_,) = tf.tf_promote(shape_) + end + end + begin + begin + tf.add_input(desc, tensor_) + end + begin + tf.add_input(desc, shape_) + end + begin + tf.add_input(desc, input_min_) + end + begin + tf.add_input(desc, input_max_) + end + end + begin + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + end + end + begin + function quantized_reshape_eager(tensor_, shape_, input_min_, input_max_; name=nothing) + desc = tf.EagerOp("QuantizedReshape") + tensor_ = convert(tf.EagerTensor, tensor_) + shape_ = convert(tf.EagerTensor, shape_) + input_min_ = convert(tf.EagerTensor, input_min_) + input_max_ = convert(tf.EagerTensor, input_max_) + begin + begin + tf.add_input(desc, tensor_) + end + begin + tf.add_input(desc, shape_) + end + begin + tf.add_input(desc, input_min_) + end + begin + tf.add_input(desc, input_max_) + end + end + begin + end + begin + desc["T"] = tf.data_type(tensor_) + end + begin + desc["Tshape"] = tf.data_type(shape_) + end + res = tf.execute(desc) + node = tf.TapeNode(quantized_reshape, [tensor_, shape_, input_min_, input_max_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function quantized_reshape(tensor_, shape_, input_min_, input_max_; name=nothing) + if tf.in_eager_mode() + quantized_reshape_eager(tensor_, shape_, input_min_, input_max_; name=name) + else + quantized_reshape_graph(tensor_, shape_, input_min_, input_max_; name=name) + end end - end + end end @@ -4464,40 +8002,78 @@ end """ begin - function floor_div_graph(x_, y_; name=nothing) - local desc - tf.with_op_name(name, "FloorDiv") do - desc = tf.NodeDescription("FloorDiv") - x_ = convert(Tensor{Any}, x_) - y_ = convert(Tensor{Any}, y_) - (x_, y_) = tf.tf_promote(x_, y_) - tf.add_input(desc, x_) - tf.add_input(desc, y_) + begin + function floor_div_graph(x_, y_; name=nothing) + local desc + tf.with_op_name(name, "FloorDiv") do + desc = tf.NodeDescription("FloorDiv") + begin + begin + x_ = convert(Tensor{Any}, x_) + begin + end + end + begin + y_ = convert(Tensor{Any}, y_) + begin + end + end + begin + (x_, y_) = tf.tf_promote(x_, y_) + end + end + begin + begin + tf.add_input(desc, x_) + end + begin + tf.add_input(desc, y_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function floor_div_eager(x_, y_; name=nothing) - desc = tf.EagerOp("FloorDiv") - x_ = convert(tf.EagerTensor, x_) - y_ = convert(tf.EagerTensor, y_) - tf.add_input(desc, x_) - tf.add_input(desc, y_) - desc["T"] = tf.data_type(x_) - desc["T"] = tf.data_type(y_) - res = tf.execute(desc) - node = tf.TapeNode(floor_div, [x_, y_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function floor_div_eager(x_, y_; name=nothing) + desc = tf.EagerOp("FloorDiv") + x_ = convert(tf.EagerTensor, x_) + y_ = convert(tf.EagerTensor, y_) + begin + begin + tf.add_input(desc, x_) + end + begin + tf.add_input(desc, y_) + end + end + begin + end + begin + desc["T"] = tf.data_type(x_) + end + begin + desc["T"] = tf.data_type(y_) + end + res = tf.execute(desc) + node = tf.TapeNode(floor_div, [x_, y_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function floor_div(x_, y_; name=nothing) - if tf.in_eager_mode() - floor_div_eager(x_, y_; name=name) - else - floor_div_graph(x_, y_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function floor_div(x_, y_; name=nothing) + if tf.in_eager_mode() + floor_div_eager(x_, y_; name=name) + else + floor_div_graph(x_, y_; name=name) + end end - end + end end @@ -4507,63 +8083,107 @@ end """ begin - function tensor_array_v2_graph(size_; name=nothing, dtype=nothing, element_shape=nothing, dynamic_size=nothing, clear_after_read=nothing, tensor_array_name=nothing) - local desc - tf.with_op_name(name, "TensorArrayV2") do - desc = tf.NodeDescription("TensorArrayV2") - size_ = convert(Tensor{Int32}, size_) - tf.add_input(desc, size_) - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end - if element_shape !== nothing - desc["element_shape"] = Base.identity(element_shape) + begin + function tensor_array_v2_graph(size_; name=nothing, dtype=nothing, element_shape=nothing, dynamic_size=nothing, clear_after_read=nothing, tensor_array_name=nothing) + local desc + tf.with_op_name(name, "TensorArrayV2") do + desc = tf.NodeDescription("TensorArrayV2") + begin + begin + size_ = convert(Tensor{Int32}, size_) + begin + end + end + end + begin + begin + tf.add_input(desc, size_) + end + end + begin + begin + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + begin + if element_shape !== nothing + desc["element_shape"] = Base.identity(element_shape) + end + end + begin + if dynamic_size !== nothing + desc["dynamic_size"] = Base.Bool(dynamic_size) + end + end + begin + if clear_after_read !== nothing + desc["clear_after_read"] = Base.Bool(clear_after_read) + end + end + begin + if tensor_array_name !== nothing + desc["tensor_array_name"] = Base.String(tensor_array_name) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function tensor_array_v2_eager(size_; name=nothing, dtype=nothing, element_shape=nothing, dynamic_size=nothing, clear_after_read=nothing, tensor_array_name=nothing) + desc = tf.EagerOp("TensorArrayV2") + size_ = convert(tf.EagerTensor, size_) + begin + begin + tf.add_input(desc, size_) + end + end + begin + begin + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + begin + if element_shape !== nothing + desc["element_shape"] = Base.identity(element_shape) + end + end + begin + if dynamic_size !== nothing + desc["dynamic_size"] = Base.Bool(dynamic_size) + end + end + begin + if clear_after_read !== nothing + desc["clear_after_read"] = Base.Bool(clear_after_read) + end + end + begin + if tensor_array_name !== nothing + desc["tensor_array_name"] = Base.String(tensor_array_name) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(tensor_array_v2, [size_], name=nothing, dtype=nothing, element_shape=nothing, dynamic_size=nothing, clear_after_read=nothing, tensor_array_name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_array_v2(size_; name=nothing, dtype=nothing, element_shape=nothing, dynamic_size=nothing, clear_after_read=nothing, tensor_array_name=nothing) + if tf.in_eager_mode() + tensor_array_v2_eager(size_; name=name, dtype=dtype, element_shape=element_shape, dynamic_size=dynamic_size, clear_after_read=clear_after_read, tensor_array_name=tensor_array_name) + else + tensor_array_v2_graph(size_; name=name, dtype=dtype, element_shape=element_shape, dynamic_size=dynamic_size, clear_after_read=clear_after_read, tensor_array_name=tensor_array_name) + end end - if dynamic_size !== nothing - desc["dynamic_size"] = Base.Bool(dynamic_size) - end - if clear_after_read !== nothing - desc["clear_after_read"] = Base.Bool(clear_after_read) - end - if tensor_array_name !== nothing - desc["tensor_array_name"] = Base.String(tensor_array_name) - end - end - tf.Tensor(tf.Operation(desc)) - end - function tensor_array_v2_eager(size_; name=nothing, dtype=nothing, element_shape=nothing, dynamic_size=nothing, clear_after_read=nothing, tensor_array_name=nothing) - desc = tf.EagerOp("TensorArrayV2") - size_ = convert(tf.EagerTensor, size_) - tf.add_input(desc, size_) - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end - if element_shape !== nothing - desc["element_shape"] = Base.identity(element_shape) - end - if dynamic_size !== nothing - desc["dynamic_size"] = Base.Bool(dynamic_size) - end - if clear_after_read !== nothing - desc["clear_after_read"] = Base.Bool(clear_after_read) - end - if tensor_array_name !== nothing - desc["tensor_array_name"] = Base.String(tensor_array_name) - end - res = tf.execute(desc) - node = tf.TapeNode(tensor_array_v2, [size_], name=nothing, dtype=nothing, element_shape=nothing, dynamic_size=nothing, clear_after_read=nothing, tensor_array_name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_array_v2(size_; name=nothing, dtype=nothing, element_shape=nothing, dynamic_size=nothing, clear_after_read=nothing, tensor_array_name=nothing) - if tf.in_eager_mode() - tensor_array_v2_eager(size_; name=name, dtype=dtype, element_shape=element_shape, dynamic_size=dynamic_size, clear_after_read=clear_after_read, tensor_array_name=tensor_array_name) - else - tensor_array_v2_graph(size_; name=name, dtype=dtype, element_shape=element_shape, dynamic_size=dynamic_size, clear_after_read=clear_after_read, tensor_array_name=tensor_array_name) - end - end end @@ -4573,39 +8193,67 @@ end """ begin - function barrier_close_graph(handle_; name=nothing, cancel_pending_enqueues=nothing) - local desc - tf.with_op_name(name, "BarrierClose") do - desc = tf.NodeDescription("BarrierClose") - handle_ = convert(Tensor{String}, handle_) - tf.add_input(desc, handle_) - if cancel_pending_enqueues !== nothing - desc["cancel_pending_enqueues"] = Base.Bool(cancel_pending_enqueues) + begin + function barrier_close_graph(handle_; name=nothing, cancel_pending_enqueues=nothing) + local desc + tf.with_op_name(name, "BarrierClose") do + desc = tf.NodeDescription("BarrierClose") + begin + begin + handle_ = convert(Tensor{String}, handle_) + begin + end + end + end + begin + begin + tf.add_input(desc, handle_) + end + end + begin + begin + if cancel_pending_enqueues !== nothing + desc["cancel_pending_enqueues"] = Base.Bool(cancel_pending_enqueues) + end + end + end end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function barrier_close_eager(handle_; name=nothing, cancel_pending_enqueues=nothing) - desc = tf.EagerOp("BarrierClose") - handle_ = convert(tf.EagerTensor, handle_) - tf.add_input(desc, handle_) - if cancel_pending_enqueues !== nothing - desc["cancel_pending_enqueues"] = Base.Bool(cancel_pending_enqueues) - end - res = tf.execute(desc) - node = tf.TapeNode(barrier_close, [handle_], name=nothing, cancel_pending_enqueues=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function barrier_close_eager(handle_; name=nothing, cancel_pending_enqueues=nothing) + desc = tf.EagerOp("BarrierClose") + handle_ = convert(tf.EagerTensor, handle_) + begin + begin + tf.add_input(desc, handle_) + end + end + begin + begin + if cancel_pending_enqueues !== nothing + desc["cancel_pending_enqueues"] = Base.Bool(cancel_pending_enqueues) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(barrier_close, [handle_], name=nothing, cancel_pending_enqueues=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function barrier_close(handle_; name=nothing, cancel_pending_enqueues=nothing) - if tf.in_eager_mode() - barrier_close_eager(handle_; name=name, cancel_pending_enqueues=cancel_pending_enqueues) - else - barrier_close_graph(handle_; name=name, cancel_pending_enqueues=cancel_pending_enqueues) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function barrier_close(handle_; name=nothing, cancel_pending_enqueues=nothing) + if tf.in_eager_mode() + barrier_close_eager(handle_; name=name, cancel_pending_enqueues=cancel_pending_enqueues) + else + barrier_close_graph(handle_; name=name, cancel_pending_enqueues=cancel_pending_enqueues) + end end - end + end end @@ -4615,39 +8263,67 @@ end """ begin - function read_variable_op_graph(resource_; name=nothing, dtype=nothing) - local desc - tf.with_op_name(name, "ReadVariableOp") do - desc = tf.NodeDescription("ReadVariableOp") - resource_ = convert(Tensor{Any}, resource_) - tf.add_input(desc, resource_) - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) + begin + function read_variable_op_graph(resource_; name=nothing, dtype=nothing) + local desc + tf.with_op_name(name, "ReadVariableOp") do + desc = tf.NodeDescription("ReadVariableOp") + begin + begin + resource_ = convert(Tensor{Any}, resource_) + begin + end + end + end + begin + begin + tf.add_input(desc, resource_) + end + end + begin + begin + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + end end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function read_variable_op_eager(resource_; name=nothing, dtype=nothing) - desc = tf.EagerOp("ReadVariableOp") - resource_ = convert(tf.EagerTensor, resource_) - tf.add_input(desc, resource_) - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end - res = tf.execute(desc) - node = tf.TapeNode(read_variable_op, [resource_], name=nothing, dtype=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function read_variable_op_eager(resource_; name=nothing, dtype=nothing) + desc = tf.EagerOp("ReadVariableOp") + resource_ = convert(tf.EagerTensor, resource_) + begin + begin + tf.add_input(desc, resource_) + end + end + begin + begin + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(read_variable_op, [resource_], name=nothing, dtype=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function read_variable_op(resource_; name=nothing, dtype=nothing) - if tf.in_eager_mode() - read_variable_op_eager(resource_; name=name, dtype=dtype) - else - read_variable_op_graph(resource_; name=name, dtype=dtype) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function read_variable_op(resource_; name=nothing, dtype=nothing) + if tf.in_eager_mode() + read_variable_op_eager(resource_; name=name, dtype=dtype) + else + read_variable_op_graph(resource_; name=name, dtype=dtype) + end end - end + end end @@ -4657,62 +8333,136 @@ end """ begin - function quantized_mul_graph(x_, y_, min_x_, max_x_, min_y_, max_y_; name=nothing) - local desc - tf.with_op_name(name, "QuantizedMul") do - desc = tf.NodeDescription("QuantizedMul") - x_ = convert(Tensor{Any}, x_) - y_ = convert(Tensor{Any}, y_) - min_x_ = convert(Tensor{Float32}, min_x_) - max_x_ = convert(Tensor{Float32}, max_x_) - min_y_ = convert(Tensor{Float32}, min_y_) - max_y_ = convert(Tensor{Float32}, max_y_) - (x_,) = tf.tf_promote(x_) - (y_,) = tf.tf_promote(y_) - tf.add_input(desc, x_) - tf.add_input(desc, y_) - tf.add_input(desc, min_x_) - tf.add_input(desc, max_x_) - tf.add_input(desc, min_y_) - tf.add_input(desc, max_y_) - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:3 - push!(out, tf.Tensor(op, out_idx)) - end - out - end - function quantized_mul_eager(x_, y_, min_x_, max_x_, min_y_, max_y_; name=nothing) - desc = tf.EagerOp("QuantizedMul") - x_ = convert(tf.EagerTensor, x_) - y_ = convert(tf.EagerTensor, y_) - min_x_ = convert(tf.EagerTensor, min_x_) - max_x_ = convert(tf.EagerTensor, max_x_) - min_y_ = convert(tf.EagerTensor, min_y_) - max_y_ = convert(tf.EagerTensor, max_y_) - tf.add_input(desc, x_) - tf.add_input(desc, y_) - tf.add_input(desc, min_x_) - tf.add_input(desc, max_x_) - tf.add_input(desc, min_y_) - tf.add_input(desc, max_y_) - desc["T1"] = tf.data_type(x_) - desc["T2"] = tf.data_type(y_) - res = tf.execute(desc) - node = tf.TapeNode(quantized_mul, [x_, y_, min_x_, max_x_, min_y_, max_y_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function quantized_mul(x_, y_, min_x_, max_x_, min_y_, max_y_; name=nothing) - if tf.in_eager_mode() - quantized_mul_eager(x_, y_, min_x_, max_x_, min_y_, max_y_; name=name) - else - quantized_mul_graph(x_, y_, min_x_, max_x_, min_y_, max_y_; name=name) + begin + function quantized_mul_graph(x_, y_, min_x_, max_x_, min_y_, max_y_; name=nothing) + local desc + tf.with_op_name(name, "QuantizedMul") do + desc = tf.NodeDescription("QuantizedMul") + begin + begin + x_ = convert(Tensor{Any}, x_) + begin + end + end + begin + y_ = convert(Tensor{Any}, y_) + begin + end + end + begin + min_x_ = convert(Tensor{Float32}, min_x_) + begin + end + end + begin + max_x_ = convert(Tensor{Float32}, max_x_) + begin + end + end + begin + min_y_ = convert(Tensor{Float32}, min_y_) + begin + end + end + begin + max_y_ = convert(Tensor{Float32}, max_y_) + begin + end + end + begin + (x_,) = tf.tf_promote(x_) + end + begin + (y_,) = tf.tf_promote(y_) + end + end + begin + begin + tf.add_input(desc, x_) + end + begin + tf.add_input(desc, y_) + end + begin + tf.add_input(desc, min_x_) + end + begin + tf.add_input(desc, max_x_) + end + begin + tf.add_input(desc, min_y_) + end + begin + tf.add_input(desc, max_y_) + end + end + begin + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + end + end + begin + function quantized_mul_eager(x_, y_, min_x_, max_x_, min_y_, max_y_; name=nothing) + desc = tf.EagerOp("QuantizedMul") + x_ = convert(tf.EagerTensor, x_) + y_ = convert(tf.EagerTensor, y_) + min_x_ = convert(tf.EagerTensor, min_x_) + max_x_ = convert(tf.EagerTensor, max_x_) + min_y_ = convert(tf.EagerTensor, min_y_) + max_y_ = convert(tf.EagerTensor, max_y_) + begin + begin + tf.add_input(desc, x_) + end + begin + tf.add_input(desc, y_) + end + begin + tf.add_input(desc, min_x_) + end + begin + tf.add_input(desc, max_x_) + end + begin + tf.add_input(desc, min_y_) + end + begin + tf.add_input(desc, max_y_) + end + end + begin + end + begin + desc["T1"] = tf.data_type(x_) + end + begin + desc["T2"] = tf.data_type(y_) + end + res = tf.execute(desc) + node = tf.TapeNode(quantized_mul, [x_, y_, min_x_, max_x_, min_y_, max_y_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function quantized_mul(x_, y_, min_x_, max_x_, min_y_, max_y_; name=nothing) + if tf.in_eager_mode() + quantized_mul_eager(x_, y_, min_x_, max_x_, min_y_, max_y_; name=name) + else + quantized_mul_graph(x_, y_, min_x_, max_x_, min_y_, max_y_; name=name) + end end - end + end end @@ -4722,35 +8472,63 @@ end """ begin - function selu_graph(features_; name=nothing) - local desc - tf.with_op_name(name, "Selu") do - desc = tf.NodeDescription("Selu") - features_ = convert(Tensor{Any}, features_) - (features_,) = tf.tf_promote(features_) - tf.add_input(desc, features_) + begin + function selu_graph(features_; name=nothing) + local desc + tf.with_op_name(name, "Selu") do + desc = tf.NodeDescription("Selu") + begin + begin + features_ = convert(Tensor{Any}, features_) + begin + end + end + begin + (features_,) = tf.tf_promote(features_) + end + end + begin + begin + tf.add_input(desc, features_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function selu_eager(features_; name=nothing) - desc = tf.EagerOp("Selu") - features_ = convert(tf.EagerTensor, features_) - tf.add_input(desc, features_) - desc["T"] = tf.data_type(features_) - res = tf.execute(desc) - node = tf.TapeNode(selu, [features_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function selu_eager(features_; name=nothing) + desc = tf.EagerOp("Selu") + features_ = convert(tf.EagerTensor, features_) + begin + begin + tf.add_input(desc, features_) + end + end + begin + end + begin + desc["T"] = tf.data_type(features_) + end + res = tf.execute(desc) + node = tf.TapeNode(selu, [features_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function selu(features_; name=nothing) - if tf.in_eager_mode() - selu_eager(features_; name=name) - else - selu_graph(features_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function selu(features_; name=nothing) + if tf.in_eager_mode() + selu_eager(features_; name=name) + else + selu_graph(features_; name=name) + end end - end + end end @@ -4760,45 +8538,93 @@ end """ begin - function lookup_table_insert_graph(table_handle_, keys_, values_; name=nothing) - local desc - tf.with_op_name(name, "LookupTableInsert") do - desc = tf.NodeDescription("LookupTableInsert") - table_handle_ = convert(Tensor{String}, table_handle_) - keys_ = convert(Tensor{Any}, keys_) - values_ = convert(Tensor{Any}, values_) - (keys_,) = tf.tf_promote(keys_) - (values_,) = tf.tf_promote(values_) - tf.add_input(desc, table_handle_) - tf.add_input(desc, keys_) - tf.add_input(desc, values_) - end - tf.Tensor(tf.Operation(desc)) - end - function lookup_table_insert_eager(table_handle_, keys_, values_; name=nothing) - desc = tf.EagerOp("LookupTableInsert") - table_handle_ = convert(tf.EagerTensor, table_handle_) - keys_ = convert(tf.EagerTensor, keys_) - values_ = convert(tf.EagerTensor, values_) - tf.add_input(desc, table_handle_) - tf.add_input(desc, keys_) - tf.add_input(desc, values_) - desc["Tin"] = tf.data_type(keys_) - desc["Tout"] = tf.data_type(values_) - res = tf.execute(desc) - node = tf.TapeNode(lookup_table_insert, [table_handle_, keys_, values_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function lookup_table_insert(table_handle_, keys_, values_; name=nothing) - if tf.in_eager_mode() - lookup_table_insert_eager(table_handle_, keys_, values_; name=name) - else - lookup_table_insert_graph(table_handle_, keys_, values_; name=name) + begin + function lookup_table_insert_graph(table_handle_, keys_, values_; name=nothing) + local desc + tf.with_op_name(name, "LookupTableInsert") do + desc = tf.NodeDescription("LookupTableInsert") + begin + begin + table_handle_ = convert(Tensor{String}, table_handle_) + begin + end + end + begin + keys_ = convert(Tensor{Any}, keys_) + begin + end + end + begin + values_ = convert(Tensor{Any}, values_) + begin + end + end + begin + (keys_,) = tf.tf_promote(keys_) + end + begin + (values_,) = tf.tf_promote(values_) + end + end + begin + begin + tf.add_input(desc, table_handle_) + end + begin + tf.add_input(desc, keys_) + end + begin + tf.add_input(desc, values_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function lookup_table_insert_eager(table_handle_, keys_, values_; name=nothing) + desc = tf.EagerOp("LookupTableInsert") + table_handle_ = convert(tf.EagerTensor, table_handle_) + keys_ = convert(tf.EagerTensor, keys_) + values_ = convert(tf.EagerTensor, values_) + begin + begin + tf.add_input(desc, table_handle_) + end + begin + tf.add_input(desc, keys_) + end + begin + tf.add_input(desc, values_) + end + end + begin + end + begin + desc["Tin"] = tf.data_type(keys_) + end + begin + desc["Tout"] = tf.data_type(values_) + end + res = tf.execute(desc) + node = tf.TapeNode(lookup_table_insert, [table_handle_, keys_, values_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function lookup_table_insert(table_handle_, keys_, values_; name=nothing) + if tf.in_eager_mode() + lookup_table_insert_eager(table_handle_, keys_, values_; name=name) + else + lookup_table_insert_graph(table_handle_, keys_, values_; name=name) + end end - end + end end @@ -4808,35 +8634,63 @@ end """ begin - function complex_abs_graph(x_; name=nothing) - local desc - tf.with_op_name(name, "ComplexAbs") do - desc = tf.NodeDescription("ComplexAbs") - x_ = convert(Tensor{Complex{Float32}}, x_) - (x_,) = tf.tf_promote(x_) - tf.add_input(desc, x_) + begin + function complex_abs_graph(x_; name=nothing) + local desc + tf.with_op_name(name, "ComplexAbs") do + desc = tf.NodeDescription("ComplexAbs") + begin + begin + x_ = convert(Tensor{Complex{Float32}}, x_) + begin + end + end + begin + (x_,) = tf.tf_promote(x_) + end + end + begin + begin + tf.add_input(desc, x_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function complex_abs_eager(x_; name=nothing) - desc = tf.EagerOp("ComplexAbs") - x_ = convert(tf.EagerTensor, x_) - tf.add_input(desc, x_) - desc["T"] = tf.data_type(x_) - res = tf.execute(desc) - node = tf.TapeNode(complex_abs, [x_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function complex_abs_eager(x_; name=nothing) + desc = tf.EagerOp("ComplexAbs") + x_ = convert(tf.EagerTensor, x_) + begin + begin + tf.add_input(desc, x_) + end + end + begin + end + begin + desc["T"] = tf.data_type(x_) + end + res = tf.execute(desc) + node = tf.TapeNode(complex_abs, [x_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function complex_abs(x_; name=nothing) - if tf.in_eager_mode() - complex_abs_eager(x_; name=name) - else - complex_abs_graph(x_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function complex_abs(x_; name=nothing) + if tf.in_eager_mode() + complex_abs_eager(x_; name=name) + else + complex_abs_graph(x_; name=name) + end end - end + end end @@ -4846,35 +8700,63 @@ end """ begin - function abs_graph(x_; name=nothing) - local desc - tf.with_op_name(name, "Abs") do - desc = tf.NodeDescription("Abs") - x_ = convert(Tensor{Any}, x_) - (x_,) = tf.tf_promote(x_) - tf.add_input(desc, x_) + begin + function abs_graph(x_; name=nothing) + local desc + tf.with_op_name(name, "Abs") do + desc = tf.NodeDescription("Abs") + begin + begin + x_ = convert(Tensor{Any}, x_) + begin + end + end + begin + (x_,) = tf.tf_promote(x_) + end + end + begin + begin + tf.add_input(desc, x_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function abs_eager(x_; name=nothing) - desc = tf.EagerOp("Abs") - x_ = convert(tf.EagerTensor, x_) - tf.add_input(desc, x_) - desc["T"] = tf.data_type(x_) - res = tf.execute(desc) - node = tf.TapeNode(abs, [x_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function abs_eager(x_; name=nothing) + desc = tf.EagerOp("Abs") + x_ = convert(tf.EagerTensor, x_) + begin + begin + tf.add_input(desc, x_) + end + end + begin + end + begin + desc["T"] = tf.data_type(x_) + end + res = tf.execute(desc) + node = tf.TapeNode(abs, [x_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function abs(x_; name=nothing) - if tf.in_eager_mode() - abs_eager(x_; name=name) - else - abs_graph(x_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function abs(x_; name=nothing) + if tf.in_eager_mode() + abs_eager(x_; name=name) + else + abs_graph(x_; name=name) + end end - end + end end @@ -4884,45 +8766,93 @@ end """ begin - function lookup_table_import_graph(table_handle_, keys_, values_; name=nothing) - local desc - tf.with_op_name(name, "LookupTableImport") do - desc = tf.NodeDescription("LookupTableImport") - table_handle_ = convert(Tensor{String}, table_handle_) - keys_ = convert(Tensor{Any}, keys_) - values_ = convert(Tensor{Any}, values_) - (keys_,) = tf.tf_promote(keys_) - (values_,) = tf.tf_promote(values_) - tf.add_input(desc, table_handle_) - tf.add_input(desc, keys_) - tf.add_input(desc, values_) - end - tf.Tensor(tf.Operation(desc)) - end - function lookup_table_import_eager(table_handle_, keys_, values_; name=nothing) - desc = tf.EagerOp("LookupTableImport") - table_handle_ = convert(tf.EagerTensor, table_handle_) - keys_ = convert(tf.EagerTensor, keys_) - values_ = convert(tf.EagerTensor, values_) - tf.add_input(desc, table_handle_) - tf.add_input(desc, keys_) - tf.add_input(desc, values_) - desc["Tin"] = tf.data_type(keys_) - desc["Tout"] = tf.data_type(values_) - res = tf.execute(desc) - node = tf.TapeNode(lookup_table_import, [table_handle_, keys_, values_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function lookup_table_import(table_handle_, keys_, values_; name=nothing) - if tf.in_eager_mode() - lookup_table_import_eager(table_handle_, keys_, values_; name=name) - else - lookup_table_import_graph(table_handle_, keys_, values_; name=name) + begin + function lookup_table_import_graph(table_handle_, keys_, values_; name=nothing) + local desc + tf.with_op_name(name, "LookupTableImport") do + desc = tf.NodeDescription("LookupTableImport") + begin + begin + table_handle_ = convert(Tensor{String}, table_handle_) + begin + end + end + begin + keys_ = convert(Tensor{Any}, keys_) + begin + end + end + begin + values_ = convert(Tensor{Any}, values_) + begin + end + end + begin + (keys_,) = tf.tf_promote(keys_) + end + begin + (values_,) = tf.tf_promote(values_) + end + end + begin + begin + tf.add_input(desc, table_handle_) + end + begin + tf.add_input(desc, keys_) + end + begin + tf.add_input(desc, values_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function lookup_table_import_eager(table_handle_, keys_, values_; name=nothing) + desc = tf.EagerOp("LookupTableImport") + table_handle_ = convert(tf.EagerTensor, table_handle_) + keys_ = convert(tf.EagerTensor, keys_) + values_ = convert(tf.EagerTensor, values_) + begin + begin + tf.add_input(desc, table_handle_) + end + begin + tf.add_input(desc, keys_) + end + begin + tf.add_input(desc, values_) + end + end + begin + end + begin + desc["Tin"] = tf.data_type(keys_) + end + begin + desc["Tout"] = tf.data_type(values_) + end + res = tf.execute(desc) + node = tf.TapeNode(lookup_table_import, [table_handle_, keys_, values_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function lookup_table_import(table_handle_, keys_, values_; name=nothing) + if tf.in_eager_mode() + lookup_table_import_eager(table_handle_, keys_, values_; name=name) + else + lookup_table_import_graph(table_handle_, keys_, values_; name=name) + end end - end + end end @@ -4932,89 +8862,209 @@ end """ begin - function resource_apply_adam_graph(var_, m_, v_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing, use_nesterov=nothing) - local desc - tf.with_op_name(name, "ResourceApplyAdam") do - desc = tf.NodeDescription("ResourceApplyAdam") - var_ = convert(Tensor{Any}, var_) - m_ = convert(Tensor{Any}, m_) - v_ = convert(Tensor{Any}, v_) - beta1_power_ = convert(Tensor{Any}, beta1_power_) - beta2_power_ = convert(Tensor{Any}, beta2_power_) - lr_ = convert(Tensor{Any}, lr_) - beta1_ = convert(Tensor{Any}, beta1_) - beta2_ = convert(Tensor{Any}, beta2_) - epsilon_ = convert(Tensor{Any}, epsilon_) - grad_ = convert(Tensor{Any}, grad_) - (beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_) = tf.tf_promote(beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_) - tf.add_input(desc, var_) - tf.add_input(desc, m_) - tf.add_input(desc, v_) - tf.add_input(desc, beta1_power_) - tf.add_input(desc, beta2_power_) - tf.add_input(desc, lr_) - tf.add_input(desc, beta1_) - tf.add_input(desc, beta2_) - tf.add_input(desc, epsilon_) - tf.add_input(desc, grad_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - if use_nesterov !== nothing - desc["use_nesterov"] = Base.Bool(use_nesterov) - end - end - tf.Tensor(tf.Operation(desc)) - end - function resource_apply_adam_eager(var_, m_, v_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing, use_nesterov=nothing) - desc = tf.EagerOp("ResourceApplyAdam") - var_ = convert(tf.EagerTensor, var_) - m_ = convert(tf.EagerTensor, m_) - v_ = convert(tf.EagerTensor, v_) - beta1_power_ = convert(tf.EagerTensor, beta1_power_) - beta2_power_ = convert(tf.EagerTensor, beta2_power_) - lr_ = convert(tf.EagerTensor, lr_) - beta1_ = convert(tf.EagerTensor, beta1_) - beta2_ = convert(tf.EagerTensor, beta2_) - epsilon_ = convert(tf.EagerTensor, epsilon_) - grad_ = convert(tf.EagerTensor, grad_) - tf.add_input(desc, var_) - tf.add_input(desc, m_) - tf.add_input(desc, v_) - tf.add_input(desc, beta1_power_) - tf.add_input(desc, beta2_power_) - tf.add_input(desc, lr_) - tf.add_input(desc, beta1_) - tf.add_input(desc, beta2_) - tf.add_input(desc, epsilon_) - tf.add_input(desc, grad_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - if use_nesterov !== nothing - desc["use_nesterov"] = Base.Bool(use_nesterov) - end - desc["T"] = tf.data_type(beta1_power_) - desc["T"] = tf.data_type(beta2_power_) - desc["T"] = tf.data_type(lr_) - desc["T"] = tf.data_type(beta1_) - desc["T"] = tf.data_type(beta2_) - desc["T"] = tf.data_type(epsilon_) - desc["T"] = tf.data_type(grad_) - res = tf.execute(desc) - node = tf.TapeNode(resource_apply_adam, [var_, m_, v_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_], name=nothing, use_locking=nothing, use_nesterov=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_apply_adam(var_, m_, v_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing, use_nesterov=nothing) - if tf.in_eager_mode() - resource_apply_adam_eager(var_, m_, v_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=name, use_locking=use_locking, use_nesterov=use_nesterov) - else - resource_apply_adam_graph(var_, m_, v_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=name, use_locking=use_locking, use_nesterov=use_nesterov) + begin + function resource_apply_adam_graph(var_, m_, v_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing, use_nesterov=nothing) + local desc + tf.with_op_name(name, "ResourceApplyAdam") do + desc = tf.NodeDescription("ResourceApplyAdam") + begin + begin + var_ = convert(Tensor{Any}, var_) + begin + end + end + begin + m_ = convert(Tensor{Any}, m_) + begin + end + end + begin + v_ = convert(Tensor{Any}, v_) + begin + end + end + begin + beta1_power_ = convert(Tensor{Any}, beta1_power_) + begin + end + end + begin + beta2_power_ = convert(Tensor{Any}, beta2_power_) + begin + end + end + begin + lr_ = convert(Tensor{Any}, lr_) + begin + end + end + begin + beta1_ = convert(Tensor{Any}, beta1_) + begin + end + end + begin + beta2_ = convert(Tensor{Any}, beta2_) + begin + end + end + begin + epsilon_ = convert(Tensor{Any}, epsilon_) + begin + end + end + begin + grad_ = convert(Tensor{Any}, grad_) + begin + end + end + begin + (beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_) = tf.tf_promote(beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_) + end + end + begin + begin + tf.add_input(desc, var_) + end + begin + tf.add_input(desc, m_) + end + begin + tf.add_input(desc, v_) + end + begin + tf.add_input(desc, beta1_power_) + end + begin + tf.add_input(desc, beta2_power_) + end + begin + tf.add_input(desc, lr_) + end + begin + tf.add_input(desc, beta1_) + end + begin + tf.add_input(desc, beta2_) + end + begin + tf.add_input(desc, epsilon_) + end + begin + tf.add_input(desc, grad_) + end + end + begin + begin + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + begin + if use_nesterov !== nothing + desc["use_nesterov"] = Base.Bool(use_nesterov) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function resource_apply_adam_eager(var_, m_, v_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing, use_nesterov=nothing) + desc = tf.EagerOp("ResourceApplyAdam") + var_ = convert(tf.EagerTensor, var_) + m_ = convert(tf.EagerTensor, m_) + v_ = convert(tf.EagerTensor, v_) + beta1_power_ = convert(tf.EagerTensor, beta1_power_) + beta2_power_ = convert(tf.EagerTensor, beta2_power_) + lr_ = convert(tf.EagerTensor, lr_) + beta1_ = convert(tf.EagerTensor, beta1_) + beta2_ = convert(tf.EagerTensor, beta2_) + epsilon_ = convert(tf.EagerTensor, epsilon_) + grad_ = convert(tf.EagerTensor, grad_) + begin + begin + tf.add_input(desc, var_) + end + begin + tf.add_input(desc, m_) + end + begin + tf.add_input(desc, v_) + end + begin + tf.add_input(desc, beta1_power_) + end + begin + tf.add_input(desc, beta2_power_) + end + begin + tf.add_input(desc, lr_) + end + begin + tf.add_input(desc, beta1_) + end + begin + tf.add_input(desc, beta2_) + end + begin + tf.add_input(desc, epsilon_) + end + begin + tf.add_input(desc, grad_) + end + end + begin + begin + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + begin + if use_nesterov !== nothing + desc["use_nesterov"] = Base.Bool(use_nesterov) + end + end + end + begin + desc["T"] = tf.data_type(beta1_power_) + end + begin + desc["T"] = tf.data_type(beta2_power_) + end + begin + desc["T"] = tf.data_type(lr_) + end + begin + desc["T"] = tf.data_type(beta1_) + end + begin + desc["T"] = tf.data_type(beta2_) + end + begin + desc["T"] = tf.data_type(epsilon_) + end + begin + desc["T"] = tf.data_type(grad_) + end + res = tf.execute(desc) + node = tf.TapeNode(resource_apply_adam, [var_, m_, v_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_], name=nothing, use_locking=nothing, use_nesterov=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_apply_adam(var_, m_, v_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing, use_nesterov=nothing) + if tf.in_eager_mode() + resource_apply_adam_eager(var_, m_, v_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=name, use_locking=use_locking, use_nesterov=use_nesterov) + else + resource_apply_adam_graph(var_, m_, v_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=name, use_locking=use_locking, use_nesterov=use_nesterov) + end end - end + end end @@ -5024,47 +9074,99 @@ end """ begin - function write_histogram_summary_graph(writer_, step_, tag_, values_; name=nothing) - local desc - tf.with_op_name(name, "WriteHistogramSummary") do - desc = tf.NodeDescription("WriteHistogramSummary") - writer_ = convert(Tensor{Any}, writer_) - step_ = convert(Tensor{Int64}, step_) - tag_ = convert(Tensor{String}, tag_) - values_ = convert(Tensor{Float32}, values_) - (values_,) = tf.tf_promote(values_) - tf.add_input(desc, writer_) - tf.add_input(desc, step_) - tf.add_input(desc, tag_) - tf.add_input(desc, values_) - end - tf.Tensor(tf.Operation(desc)) - end - function write_histogram_summary_eager(writer_, step_, tag_, values_; name=nothing) - desc = tf.EagerOp("WriteHistogramSummary") - writer_ = convert(tf.EagerTensor, writer_) - step_ = convert(tf.EagerTensor, step_) - tag_ = convert(tf.EagerTensor, tag_) - values_ = convert(tf.EagerTensor, values_) - tf.add_input(desc, writer_) - tf.add_input(desc, step_) - tf.add_input(desc, tag_) - tf.add_input(desc, values_) - desc["T"] = tf.data_type(values_) - res = tf.execute(desc) - node = tf.TapeNode(write_histogram_summary, [writer_, step_, tag_, values_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function write_histogram_summary(writer_, step_, tag_, values_; name=nothing) - if tf.in_eager_mode() - write_histogram_summary_eager(writer_, step_, tag_, values_; name=name) - else - write_histogram_summary_graph(writer_, step_, tag_, values_; name=name) + begin + function write_histogram_summary_graph(writer_, step_, tag_, values_; name=nothing) + local desc + tf.with_op_name(name, "WriteHistogramSummary") do + desc = tf.NodeDescription("WriteHistogramSummary") + begin + begin + writer_ = convert(Tensor{Any}, writer_) + begin + end + end + begin + step_ = convert(Tensor{Int64}, step_) + begin + end + end + begin + tag_ = convert(Tensor{String}, tag_) + begin + end + end + begin + values_ = convert(Tensor{Float32}, values_) + begin + end + end + begin + (values_,) = tf.tf_promote(values_) + end + end + begin + begin + tf.add_input(desc, writer_) + end + begin + tf.add_input(desc, step_) + end + begin + tf.add_input(desc, tag_) + end + begin + tf.add_input(desc, values_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function write_histogram_summary_eager(writer_, step_, tag_, values_; name=nothing) + desc = tf.EagerOp("WriteHistogramSummary") + writer_ = convert(tf.EagerTensor, writer_) + step_ = convert(tf.EagerTensor, step_) + tag_ = convert(tf.EagerTensor, tag_) + values_ = convert(tf.EagerTensor, values_) + begin + begin + tf.add_input(desc, writer_) + end + begin + tf.add_input(desc, step_) + end + begin + tf.add_input(desc, tag_) + end + begin + tf.add_input(desc, values_) + end + end + begin + end + begin + desc["T"] = tf.data_type(values_) + end + res = tf.execute(desc) + node = tf.TapeNode(write_histogram_summary, [writer_, step_, tag_, values_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function write_histogram_summary(writer_, step_, tag_, values_; name=nothing) + if tf.in_eager_mode() + write_histogram_summary_eager(writer_, step_, tag_, values_; name=name) + else + write_histogram_summary_graph(writer_, step_, tag_, values_; name=name) + end end - end + end end @@ -5074,65 +9176,113 @@ end Sends the named tensor from send_device to recv_device. """ begin - function _host_send_graph(tensor_; name=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing) - local desc - tf.with_op_name(name, "_HostSend") do - desc = tf.NodeDescription("_HostSend") - tensor_ = convert(Tensor{Any}, tensor_) - (tensor_,) = tf.tf_promote(tensor_) - tf.add_input(desc, tensor_) - if tensor_name !== nothing - desc["tensor_name"] = Base.String(tensor_name) - end - if send_device !== nothing - desc["send_device"] = Base.String(send_device) - end - if send_device_incarnation !== nothing - desc["send_device_incarnation"] = Base.Int(send_device_incarnation) - end - if recv_device !== nothing - desc["recv_device"] = Base.String(recv_device) - end - if client_terminated !== nothing - desc["client_terminated"] = Base.Bool(client_terminated) + begin + function _host_send_graph(tensor_; name=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing) + local desc + tf.with_op_name(name, "_HostSend") do + desc = tf.NodeDescription("_HostSend") + begin + begin + tensor_ = convert(Tensor{Any}, tensor_) + begin + end + end + begin + (tensor_,) = tf.tf_promote(tensor_) + end + end + begin + begin + tf.add_input(desc, tensor_) + end + end + begin + begin + if tensor_name !== nothing + desc["tensor_name"] = Base.String(tensor_name) + end + end + begin + if send_device !== nothing + desc["send_device"] = Base.String(send_device) + end + end + begin + if send_device_incarnation !== nothing + desc["send_device_incarnation"] = Base.Int(send_device_incarnation) + end + end + begin + if recv_device !== nothing + desc["recv_device"] = Base.String(recv_device) + end + end + begin + if client_terminated !== nothing + desc["client_terminated"] = Base.Bool(client_terminated) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function _host_send_eager(tensor_; name=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing) + desc = tf.EagerOp("_HostSend") + tensor_ = convert(tf.EagerTensor, tensor_) + begin + begin + tf.add_input(desc, tensor_) + end + end + begin + begin + if tensor_name !== nothing + desc["tensor_name"] = Base.String(tensor_name) + end + end + begin + if send_device !== nothing + desc["send_device"] = Base.String(send_device) + end + end + begin + if send_device_incarnation !== nothing + desc["send_device_incarnation"] = Base.Int(send_device_incarnation) + end + end + begin + if recv_device !== nothing + desc["recv_device"] = Base.String(recv_device) + end + end + begin + if client_terminated !== nothing + desc["client_terminated"] = Base.Bool(client_terminated) + end + end + end + begin + desc["T"] = tf.data_type(tensor_) + end + res = tf.execute(desc) + node = tf.TapeNode(_host_send, [tensor_], name=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _host_send(tensor_; name=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing) + if tf.in_eager_mode() + _host_send_eager(tensor_; name=name, tensor_name=tensor_name, send_device=send_device, send_device_incarnation=send_device_incarnation, recv_device=recv_device, client_terminated=client_terminated) + else + _host_send_graph(tensor_; name=name, tensor_name=tensor_name, send_device=send_device, send_device_incarnation=send_device_incarnation, recv_device=recv_device, client_terminated=client_terminated) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function _host_send_eager(tensor_; name=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing) - desc = tf.EagerOp("_HostSend") - tensor_ = convert(tf.EagerTensor, tensor_) - tf.add_input(desc, tensor_) - if tensor_name !== nothing - desc["tensor_name"] = Base.String(tensor_name) - end - if send_device !== nothing - desc["send_device"] = Base.String(send_device) - end - if send_device_incarnation !== nothing - desc["send_device_incarnation"] = Base.Int(send_device_incarnation) - end - if recv_device !== nothing - desc["recv_device"] = Base.String(recv_device) - end - if client_terminated !== nothing - desc["client_terminated"] = Base.Bool(client_terminated) - end - desc["T"] = tf.data_type(tensor_) - res = tf.execute(desc) - node = tf.TapeNode(_host_send, [tensor_], name=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _host_send(tensor_; name=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing) - if tf.in_eager_mode() - _host_send_eager(tensor_; name=name, tensor_name=tensor_name, send_device=send_device, send_device_incarnation=send_device_incarnation, recv_device=recv_device, client_terminated=client_terminated) - else - _host_send_graph(tensor_; name=name, tensor_name=tensor_name, send_device=send_device, send_device_incarnation=send_device_incarnation, recv_device=recv_device, client_terminated=client_terminated) - end - end end @@ -5142,37 +9292,69 @@ end """ begin - function experimental_indexed_dataset_materialize_graph(dataset_, materialized_; name=nothing) - local desc - tf.with_op_name(name, "ExperimentalIndexedDatasetMaterialize") do - desc = tf.NodeDescription("ExperimentalIndexedDatasetMaterialize") - dataset_ = convert(Tensor{Any}, dataset_) - materialized_ = convert(Tensor{Any}, materialized_) - tf.add_input(desc, dataset_) - tf.add_input(desc, materialized_) + begin + function experimental_indexed_dataset_materialize_graph(dataset_, materialized_; name=nothing) + local desc + tf.with_op_name(name, "ExperimentalIndexedDatasetMaterialize") do + desc = tf.NodeDescription("ExperimentalIndexedDatasetMaterialize") + begin + begin + dataset_ = convert(Tensor{Any}, dataset_) + begin + end + end + begin + materialized_ = convert(Tensor{Any}, materialized_) + begin + end + end + end + begin + begin + tf.add_input(desc, dataset_) + end + begin + tf.add_input(desc, materialized_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function experimental_indexed_dataset_materialize_eager(dataset_, materialized_; name=nothing) - desc = tf.EagerOp("ExperimentalIndexedDatasetMaterialize") - dataset_ = convert(tf.EagerTensor, dataset_) - materialized_ = convert(tf.EagerTensor, materialized_) - tf.add_input(desc, dataset_) - tf.add_input(desc, materialized_) - res = tf.execute(desc) - node = tf.TapeNode(experimental_indexed_dataset_materialize, [dataset_, materialized_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function experimental_indexed_dataset_materialize_eager(dataset_, materialized_; name=nothing) + desc = tf.EagerOp("ExperimentalIndexedDatasetMaterialize") + dataset_ = convert(tf.EagerTensor, dataset_) + materialized_ = convert(tf.EagerTensor, materialized_) + begin + begin + tf.add_input(desc, dataset_) + end + begin + tf.add_input(desc, materialized_) + end + end + begin + end + res = tf.execute(desc) + node = tf.TapeNode(experimental_indexed_dataset_materialize, [dataset_, materialized_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_indexed_dataset_materialize(dataset_, materialized_; name=nothing) - if tf.in_eager_mode() - experimental_indexed_dataset_materialize_eager(dataset_, materialized_; name=name) - else - experimental_indexed_dataset_materialize_graph(dataset_, materialized_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_indexed_dataset_materialize(dataset_, materialized_; name=nothing) + if tf.in_eager_mode() + experimental_indexed_dataset_materialize_eager(dataset_, materialized_; name=name) + else + experimental_indexed_dataset_materialize_graph(dataset_, materialized_; name=name) + end end - end + end end @@ -5182,40 +9364,78 @@ end """ begin - function greater_graph(x_, y_; name=nothing) - local desc - tf.with_op_name(name, "Greater") do - desc = tf.NodeDescription("Greater") - x_ = convert(Tensor{Any}, x_) - y_ = convert(Tensor{Any}, y_) - (x_, y_) = tf.tf_promote(x_, y_) - tf.add_input(desc, x_) - tf.add_input(desc, y_) + begin + function greater_graph(x_, y_; name=nothing) + local desc + tf.with_op_name(name, "Greater") do + desc = tf.NodeDescription("Greater") + begin + begin + x_ = convert(Tensor{Any}, x_) + begin + end + end + begin + y_ = convert(Tensor{Any}, y_) + begin + end + end + begin + (x_, y_) = tf.tf_promote(x_, y_) + end + end + begin + begin + tf.add_input(desc, x_) + end + begin + tf.add_input(desc, y_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function greater_eager(x_, y_; name=nothing) - desc = tf.EagerOp("Greater") - x_ = convert(tf.EagerTensor, x_) - y_ = convert(tf.EagerTensor, y_) - tf.add_input(desc, x_) - tf.add_input(desc, y_) - desc["T"] = tf.data_type(x_) - desc["T"] = tf.data_type(y_) - res = tf.execute(desc) - node = tf.TapeNode(greater, [x_, y_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function greater_eager(x_, y_; name=nothing) + desc = tf.EagerOp("Greater") + x_ = convert(tf.EagerTensor, x_) + y_ = convert(tf.EagerTensor, y_) + begin + begin + tf.add_input(desc, x_) + end + begin + tf.add_input(desc, y_) + end + end + begin + end + begin + desc["T"] = tf.data_type(x_) + end + begin + desc["T"] = tf.data_type(y_) + end + res = tf.execute(desc) + node = tf.TapeNode(greater, [x_, y_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function greater(x_, y_; name=nothing) - if tf.in_eager_mode() - greater_eager(x_, y_; name=name) - else - greater_graph(x_, y_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function greater(x_, y_; name=nothing) + if tf.in_eager_mode() + greater_eager(x_, y_; name=name) + else + greater_graph(x_, y_; name=name) + end end - end + end end @@ -5225,41 +9445,73 @@ end """ begin - function nccl_broadcast_graph(input_; name=nothing, shape=nothing) - local desc - tf.with_op_name(name, "NcclBroadcast") do - desc = tf.NodeDescription("NcclBroadcast") - input_ = convert(Tensor{Any}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - if shape !== nothing - desc["shape"] = Base.identity(shape) + begin + function nccl_broadcast_graph(input_; name=nothing, shape=nothing) + local desc + tf.with_op_name(name, "NcclBroadcast") do + desc = tf.NodeDescription("NcclBroadcast") + begin + begin + input_ = convert(Tensor{Any}, input_) + begin + end + end + begin + (input_,) = tf.tf_promote(input_) + end + end + begin + begin + tf.add_input(desc, input_) + end + end + begin + begin + if shape !== nothing + desc["shape"] = Base.identity(shape) + end + end + end end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function nccl_broadcast_eager(input_; name=nothing, shape=nothing) - desc = tf.EagerOp("NcclBroadcast") - input_ = convert(tf.EagerTensor, input_) - tf.add_input(desc, input_) - if shape !== nothing - desc["shape"] = Base.identity(shape) - end - desc["T"] = tf.data_type(input_) - res = tf.execute(desc) - node = tf.TapeNode(nccl_broadcast, [input_], name=nothing, shape=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function nccl_broadcast_eager(input_; name=nothing, shape=nothing) + desc = tf.EagerOp("NcclBroadcast") + input_ = convert(tf.EagerTensor, input_) + begin + begin + tf.add_input(desc, input_) + end + end + begin + begin + if shape !== nothing + desc["shape"] = Base.identity(shape) + end + end + end + begin + desc["T"] = tf.data_type(input_) + end + res = tf.execute(desc) + node = tf.TapeNode(nccl_broadcast, [input_], name=nothing, shape=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function nccl_broadcast(input_; name=nothing, shape=nothing) - if tf.in_eager_mode() - nccl_broadcast_eager(input_; name=name, shape=shape) - else - nccl_broadcast_graph(input_; name=name, shape=shape) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function nccl_broadcast(input_; name=nothing, shape=nothing) + if tf.in_eager_mode() + nccl_broadcast_eager(input_; name=name, shape=shape) + else + nccl_broadcast_graph(input_; name=name, shape=shape) + end end - end + end end @@ -5269,45 +9521,85 @@ end """ begin - function tensor_list_push_back_batch_graph(input_handles_, tensor_; name=nothing, element_dtype=nothing) - local desc - tf.with_op_name(name, "TensorListPushBackBatch") do - desc = tf.NodeDescription("TensorListPushBackBatch") - input_handles_ = convert(Tensor{Any}, input_handles_) - tensor_ = convert(Tensor{Any}, tensor_) - (tensor_,) = tf.tf_promote(tensor_) - tf.add_input(desc, input_handles_) - tf.add_input(desc, tensor_) - if element_dtype !== nothing - desc["element_dtype"] = Base.identity(element_dtype) + begin + function tensor_list_push_back_batch_graph(input_handles_, tensor_; name=nothing, element_dtype=nothing) + local desc + tf.with_op_name(name, "TensorListPushBackBatch") do + desc = tf.NodeDescription("TensorListPushBackBatch") + begin + begin + input_handles_ = convert(Tensor{Any}, input_handles_) + begin + end + end + begin + tensor_ = convert(Tensor{Any}, tensor_) + begin + end + end + begin + (tensor_,) = tf.tf_promote(tensor_) + end + end + begin + begin + tf.add_input(desc, input_handles_) + end + begin + tf.add_input(desc, tensor_) + end + end + begin + begin + if element_dtype !== nothing + desc["element_dtype"] = Base.identity(element_dtype) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function tensor_list_push_back_batch_eager(input_handles_, tensor_; name=nothing, element_dtype=nothing) + desc = tf.EagerOp("TensorListPushBackBatch") + input_handles_ = convert(tf.EagerTensor, input_handles_) + tensor_ = convert(tf.EagerTensor, tensor_) + begin + begin + tf.add_input(desc, input_handles_) + end + begin + tf.add_input(desc, tensor_) + end + end + begin + begin + if element_dtype !== nothing + desc["element_dtype"] = Base.identity(element_dtype) + end + end + end + begin + desc["element_dtype"] = tf.data_type(tensor_) + end + res = tf.execute(desc) + node = tf.TapeNode(tensor_list_push_back_batch, [input_handles_, tensor_], name=nothing, element_dtype=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_list_push_back_batch(input_handles_, tensor_; name=nothing, element_dtype=nothing) + if tf.in_eager_mode() + tensor_list_push_back_batch_eager(input_handles_, tensor_; name=name, element_dtype=element_dtype) + else + tensor_list_push_back_batch_graph(input_handles_, tensor_; name=name, element_dtype=element_dtype) + end end - end - tf.Tensor(tf.Operation(desc)) end - function tensor_list_push_back_batch_eager(input_handles_, tensor_; name=nothing, element_dtype=nothing) - desc = tf.EagerOp("TensorListPushBackBatch") - input_handles_ = convert(tf.EagerTensor, input_handles_) - tensor_ = convert(tf.EagerTensor, tensor_) - tf.add_input(desc, input_handles_) - tf.add_input(desc, tensor_) - if element_dtype !== nothing - desc["element_dtype"] = Base.identity(element_dtype) - end - desc["element_dtype"] = tf.data_type(tensor_) - res = tf.execute(desc) - node = tf.TapeNode(tensor_list_push_back_batch, [input_handles_, tensor_], name=nothing, element_dtype=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_list_push_back_batch(input_handles_, tensor_; name=nothing, element_dtype=nothing) - if tf.in_eager_mode() - tensor_list_push_back_batch_eager(input_handles_, tensor_; name=name, element_dtype=element_dtype) - else - tensor_list_push_back_batch_graph(input_handles_, tensor_; name=name, element_dtype=element_dtype) - end - end end @@ -5317,52 +9609,104 @@ end """ begin - function resource_scatter_min_graph(resource_, indices_, updates_; name=nothing, dtype=nothing) - local desc - tf.with_op_name(name, "ResourceScatterMin") do - desc = tf.NodeDescription("ResourceScatterMin") - resource_ = convert(Tensor{Any}, resource_) - indices_ = convert(Tensor{Any}, indices_) - indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) - updates_ = convert(Tensor{Any}, updates_) - (updates_,) = tf.tf_promote(updates_) - (indices_,) = tf.tf_promote(indices_) - tf.add_input(desc, resource_) - tf.add_input(desc, indices_) - tf.add_input(desc, updates_) - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) + begin + function resource_scatter_min_graph(resource_, indices_, updates_; name=nothing, dtype=nothing) + local desc + tf.with_op_name(name, "ResourceScatterMin") do + desc = tf.NodeDescription("ResourceScatterMin") + begin + begin + resource_ = convert(Tensor{Any}, resource_) + begin + end + end + begin + indices_ = convert(Tensor{Any}, indices_) + begin + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + end + end + begin + updates_ = convert(Tensor{Any}, updates_) + begin + end + end + begin + (updates_,) = tf.tf_promote(updates_) + end + begin + (indices_,) = tf.tf_promote(indices_) + end + end + begin + begin + tf.add_input(desc, resource_) + end + begin + tf.add_input(desc, indices_) + end + begin + tf.add_input(desc, updates_) + end + end + begin + begin + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function resource_scatter_min_eager(resource_, indices_, updates_; name=nothing, dtype=nothing) + desc = tf.EagerOp("ResourceScatterMin") + resource_ = convert(tf.EagerTensor, resource_) + indices_ = convert(tf.EagerTensor, indices_) + updates_ = convert(tf.EagerTensor, updates_) + begin + begin + tf.add_input(desc, resource_) + end + begin + tf.add_input(desc, indices_) + end + begin + tf.add_input(desc, updates_) + end + end + begin + begin + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + end + begin + desc["Tindices"] = tf.data_type(indices_) + end + begin + desc["dtype"] = tf.data_type(updates_) + end + res = tf.execute(desc) + node = tf.TapeNode(resource_scatter_min, [resource_, indices_, updates_], name=nothing, dtype=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_scatter_min(resource_, indices_, updates_; name=nothing, dtype=nothing) + if tf.in_eager_mode() + resource_scatter_min_eager(resource_, indices_, updates_; name=name, dtype=dtype) + else + resource_scatter_min_graph(resource_, indices_, updates_; name=name, dtype=dtype) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function resource_scatter_min_eager(resource_, indices_, updates_; name=nothing, dtype=nothing) - desc = tf.EagerOp("ResourceScatterMin") - resource_ = convert(tf.EagerTensor, resource_) - indices_ = convert(tf.EagerTensor, indices_) - updates_ = convert(tf.EagerTensor, updates_) - tf.add_input(desc, resource_) - tf.add_input(desc, indices_) - tf.add_input(desc, updates_) - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end - desc["Tindices"] = tf.data_type(indices_) - desc["dtype"] = tf.data_type(updates_) - res = tf.execute(desc) - node = tf.TapeNode(resource_scatter_min, [resource_, indices_, updates_], name=nothing, dtype=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_scatter_min(resource_, indices_, updates_; name=nothing, dtype=nothing) - if tf.in_eager_mode() - resource_scatter_min_eager(resource_, indices_, updates_; name=name, dtype=dtype) - else - resource_scatter_min_graph(resource_, indices_, updates_; name=name, dtype=dtype) - end - end end @@ -5372,53 +9716,107 @@ end """ begin - function slice_graph(input_, begin_, size_; name=nothing, Index=nothing) - local desc - tf.with_op_name(name, "Slice") do - desc = tf.NodeDescription("Slice") - input_ = convert(Tensor{Any}, input_) - begin_ = convert(Tensor{Any}, begin_) - begin_ = begin_ - convert(tf.Tensor{eltype(begin_)}, 1) - size_ = convert(Tensor{Any}, size_) - (input_,) = tf.tf_promote(input_) - (begin_, size_) = tf.tf_promote(begin_, size_) - tf.add_input(desc, input_) - tf.add_input(desc, begin_) - tf.add_input(desc, size_) - if Index !== nothing - desc["Index"] = Base.identity(Index) + begin + function slice_graph(input_, begin_, size_; name=nothing, Index=nothing) + local desc + tf.with_op_name(name, "Slice") do + desc = tf.NodeDescription("Slice") + begin + begin + input_ = convert(Tensor{Any}, input_) + begin + end + end + begin + begin_ = convert(Tensor{Any}, begin_) + begin + begin_ = begin_ - convert(tf.Tensor{eltype(begin_)}, 1) + end + end + begin + size_ = convert(Tensor{Any}, size_) + begin + end + end + begin + (input_,) = tf.tf_promote(input_) + end + begin + (begin_, size_) = tf.tf_promote(begin_, size_) + end + end + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, begin_) + end + begin + tf.add_input(desc, size_) + end + end + begin + begin + if Index !== nothing + desc["Index"] = Base.identity(Index) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function slice_eager(input_, begin_, size_; name=nothing, Index=nothing) + desc = tf.EagerOp("Slice") + input_ = convert(tf.EagerTensor, input_) + begin_ = convert(tf.EagerTensor, begin_) + size_ = convert(tf.EagerTensor, size_) + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, begin_) + end + begin + tf.add_input(desc, size_) + end + end + begin + begin + if Index !== nothing + desc["Index"] = Base.identity(Index) + end + end + end + begin + desc["T"] = tf.data_type(input_) + end + begin + desc["Index"] = tf.data_type(begin_) + end + begin + desc["Index"] = tf.data_type(size_) + end + res = tf.execute(desc) + node = tf.TapeNode(slice, [input_, begin_, size_], name=nothing, Index=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function slice(input_, begin_, size_; name=nothing, Index=nothing) + if tf.in_eager_mode() + slice_eager(input_, begin_, size_; name=name, Index=Index) + else + slice_graph(input_, begin_, size_; name=name, Index=Index) + end end - end - tf.Tensor(tf.Operation(desc)) end - function slice_eager(input_, begin_, size_; name=nothing, Index=nothing) - desc = tf.EagerOp("Slice") - input_ = convert(tf.EagerTensor, input_) - begin_ = convert(tf.EagerTensor, begin_) - size_ = convert(tf.EagerTensor, size_) - tf.add_input(desc, input_) - tf.add_input(desc, begin_) - tf.add_input(desc, size_) - if Index !== nothing - desc["Index"] = Base.identity(Index) - end - desc["T"] = tf.data_type(input_) - desc["Index"] = tf.data_type(begin_) - desc["Index"] = tf.data_type(size_) - res = tf.execute(desc) - node = tf.TapeNode(slice, [input_, begin_, size_], name=nothing, Index=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function slice(input_, begin_, size_; name=nothing, Index=nothing) - if tf.in_eager_mode() - slice_eager(input_, begin_, size_; name=name, Index=Index) - else - slice_graph(input_, begin_, size_; name=name, Index=Index) - end - end end @@ -5428,62 +9826,104 @@ end """ begin - function unicode_decode_graph(input_; name=nothing, input_encoding=nothing, errors=nothing, replacement_char=nothing, replace_control_characters=nothing) - local desc - tf.with_op_name(name, "UnicodeDecode") do - desc = tf.NodeDescription("UnicodeDecode") - input_ = convert(Tensor{String}, input_) - tf.add_input(desc, input_) - if input_encoding !== nothing - desc["input_encoding"] = Base.String(input_encoding) - end - if errors !== nothing - desc["errors"] = Base.String(errors) - end - if replacement_char !== nothing - desc["replacement_char"] = Base.Int(replacement_char) - end - if replace_control_characters !== nothing - desc["replace_control_characters"] = Base.Bool(replace_control_characters) + begin + function unicode_decode_graph(input_; name=nothing, input_encoding=nothing, errors=nothing, replacement_char=nothing, replace_control_characters=nothing) + local desc + tf.with_op_name(name, "UnicodeDecode") do + desc = tf.NodeDescription("UnicodeDecode") + begin + begin + input_ = convert(Tensor{String}, input_) + begin + end + end + end + begin + begin + tf.add_input(desc, input_) + end + end + begin + begin + if input_encoding !== nothing + desc["input_encoding"] = Base.String(input_encoding) + end + end + begin + if errors !== nothing + desc["errors"] = Base.String(errors) + end + end + begin + if replacement_char !== nothing + desc["replacement_char"] = Base.Int(replacement_char) + end + end + begin + if replace_control_characters !== nothing + desc["replace_control_characters"] = Base.Bool(replace_control_characters) + end + end + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + end + end + begin + function unicode_decode_eager(input_; name=nothing, input_encoding=nothing, errors=nothing, replacement_char=nothing, replace_control_characters=nothing) + desc = tf.EagerOp("UnicodeDecode") + input_ = convert(tf.EagerTensor, input_) + begin + begin + tf.add_input(desc, input_) + end + end + begin + begin + if input_encoding !== nothing + desc["input_encoding"] = Base.String(input_encoding) + end + end + begin + if errors !== nothing + desc["errors"] = Base.String(errors) + end + end + begin + if replacement_char !== nothing + desc["replacement_char"] = Base.Int(replacement_char) + end + end + begin + if replace_control_characters !== nothing + desc["replace_control_characters"] = Base.Bool(replace_control_characters) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(unicode_decode, [input_], name=nothing, input_encoding=nothing, errors=nothing, replacement_char=nothing, replace_control_characters=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function unicode_decode(input_; name=nothing, input_encoding=nothing, errors=nothing, replacement_char=nothing, replace_control_characters=nothing) + if tf.in_eager_mode() + unicode_decode_eager(input_; name=name, input_encoding=input_encoding, errors=errors, replacement_char=replacement_char, replace_control_characters=replace_control_characters) + else + unicode_decode_graph(input_; name=name, input_encoding=input_encoding, errors=errors, replacement_char=replacement_char, replace_control_characters=replace_control_characters) + end end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:2 - push!(out, tf.Tensor(op, out_idx)) - end - out - end - function unicode_decode_eager(input_; name=nothing, input_encoding=nothing, errors=nothing, replacement_char=nothing, replace_control_characters=nothing) - desc = tf.EagerOp("UnicodeDecode") - input_ = convert(tf.EagerTensor, input_) - tf.add_input(desc, input_) - if input_encoding !== nothing - desc["input_encoding"] = Base.String(input_encoding) - end - if errors !== nothing - desc["errors"] = Base.String(errors) - end - if replacement_char !== nothing - desc["replacement_char"] = Base.Int(replacement_char) - end - if replace_control_characters !== nothing - desc["replace_control_characters"] = Base.Bool(replace_control_characters) - end - res = tf.execute(desc) - node = tf.TapeNode(unicode_decode, [input_], name=nothing, input_encoding=nothing, errors=nothing, replacement_char=nothing, replace_control_characters=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function unicode_decode(input_; name=nothing, input_encoding=nothing, errors=nothing, replacement_char=nothing, replace_control_characters=nothing) - if tf.in_eager_mode() - unicode_decode_eager(input_; name=name, input_encoding=input_encoding, errors=errors, replacement_char=replacement_char, replace_control_characters=replace_control_characters) - else - unicode_decode_graph(input_; name=name, input_encoding=input_encoding, errors=errors, replacement_char=replacement_char, replace_control_characters=replace_control_characters) - end - end end @@ -5493,49 +9933,89 @@ end """ begin - function take_dataset_graph(input_dataset_, count_; name=nothing, output_types=nothing, output_shapes=nothing) - local desc - tf.with_op_name(name, "TakeDataset") do - desc = tf.NodeDescription("TakeDataset") - input_dataset_ = convert(Tensor{Any}, input_dataset_) - count_ = convert(Tensor{Int64}, count_) - tf.add_input(desc, input_dataset_) - tf.add_input(desc, count_) - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) + begin + function take_dataset_graph(input_dataset_, count_; name=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "TakeDataset") do + desc = tf.NodeDescription("TakeDataset") + begin + begin + input_dataset_ = convert(Tensor{Any}, input_dataset_) + begin + end + end + begin + count_ = convert(Tensor{Int64}, count_) + begin + end + end + end + begin + begin + tf.add_input(desc, input_dataset_) + end + begin + tf.add_input(desc, count_) + end + end + begin + begin + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + end + begin + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function take_dataset_eager(input_dataset_, count_; name=nothing, output_types=nothing, output_shapes=nothing) + desc = tf.EagerOp("TakeDataset") + input_dataset_ = convert(tf.EagerTensor, input_dataset_) + count_ = convert(tf.EagerTensor, count_) + begin + begin + tf.add_input(desc, input_dataset_) + end + begin + tf.add_input(desc, count_) + end + end + begin + begin + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + end + begin + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(take_dataset, [input_dataset_, count_], name=nothing, output_types=nothing, output_shapes=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function take_dataset(input_dataset_, count_; name=nothing, output_types=nothing, output_shapes=nothing) + if tf.in_eager_mode() + take_dataset_eager(input_dataset_, count_; name=name, output_types=output_types, output_shapes=output_shapes) + else + take_dataset_graph(input_dataset_, count_; name=name, output_types=output_types, output_shapes=output_shapes) + end end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - end - tf.Tensor(tf.Operation(desc)) end - function take_dataset_eager(input_dataset_, count_; name=nothing, output_types=nothing, output_shapes=nothing) - desc = tf.EagerOp("TakeDataset") - input_dataset_ = convert(tf.EagerTensor, input_dataset_) - count_ = convert(tf.EagerTensor, count_) - tf.add_input(desc, input_dataset_) - tf.add_input(desc, count_) - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - res = tf.execute(desc) - node = tf.TapeNode(take_dataset, [input_dataset_, count_], name=nothing, output_types=nothing, output_shapes=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function take_dataset(input_dataset_, count_; name=nothing, output_types=nothing, output_shapes=nothing) - if tf.in_eager_mode() - take_dataset_eager(input_dataset_, count_; name=name, output_types=output_types, output_shapes=output_shapes) - else - take_dataset_graph(input_dataset_, count_; name=name, output_types=output_types, output_shapes=output_shapes) - end - end end @@ -5545,63 +10025,123 @@ end """ begin - function boosted_trees_make_stats_summary_graph(node_ids_, gradients_, hessians_, bucketized_features_list_; name=nothing, max_splits=nothing, num_buckets=nothing, num_features=nothing) - local desc - tf.with_op_name(name, "BoostedTreesMakeStatsSummary") do - desc = tf.NodeDescription("BoostedTreesMakeStatsSummary") - node_ids_ = convert(Tensor{Int32}, node_ids_) - gradients_ = convert(Tensor{Float32}, gradients_) - hessians_ = convert(Tensor{Float32}, hessians_) - bucketized_features_list_ = [convert(Tensor{Int32}, x) for x = bucketized_features_list_] - tf.add_input(desc, node_ids_) - tf.add_input(desc, gradients_) - tf.add_input(desc, hessians_) - tf.add_input(desc, bucketized_features_list_) - if max_splits !== nothing - desc["max_splits"] = Base.Int(max_splits) - end - if num_buckets !== nothing - desc["num_buckets"] = Base.Int(num_buckets) - end - if num_features !== nothing - desc["num_features"] = Base.Int(num_features) + begin + function boosted_trees_make_stats_summary_graph(node_ids_, gradients_, hessians_, bucketized_features_list_; name=nothing, max_splits=nothing, num_buckets=nothing, num_features=nothing) + local desc + tf.with_op_name(name, "BoostedTreesMakeStatsSummary") do + desc = tf.NodeDescription("BoostedTreesMakeStatsSummary") + begin + begin + node_ids_ = convert(Tensor{Int32}, node_ids_) + begin + end + end + begin + gradients_ = convert(Tensor{Float32}, gradients_) + begin + end + end + begin + hessians_ = convert(Tensor{Float32}, hessians_) + begin + end + end + begin + bucketized_features_list_ = [convert(Tensor{Int32}, x) for x = bucketized_features_list_] + begin + end + end + end + begin + begin + tf.add_input(desc, node_ids_) + end + begin + tf.add_input(desc, gradients_) + end + begin + tf.add_input(desc, hessians_) + end + begin + tf.add_input(desc, bucketized_features_list_) + end + end + begin + begin + if max_splits !== nothing + desc["max_splits"] = Base.Int(max_splits) + end + end + begin + if num_buckets !== nothing + desc["num_buckets"] = Base.Int(num_buckets) + end + end + begin + if num_features !== nothing + desc["num_features"] = Base.Int(num_features) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function boosted_trees_make_stats_summary_eager(node_ids_, gradients_, hessians_, bucketized_features_list_; name=nothing, max_splits=nothing, num_buckets=nothing, num_features=nothing) + desc = tf.EagerOp("BoostedTreesMakeStatsSummary") + node_ids_ = convert(tf.EagerTensor, node_ids_) + gradients_ = convert(tf.EagerTensor, gradients_) + hessians_ = convert(tf.EagerTensor, hessians_) + bucketized_features_list_ = convert(tf.EagerTensor, bucketized_features_list_) + begin + begin + tf.add_input(desc, node_ids_) + end + begin + tf.add_input(desc, gradients_) + end + begin + tf.add_input(desc, hessians_) + end + begin + tf.add_input(desc, bucketized_features_list_) + end + end + begin + begin + if max_splits !== nothing + desc["max_splits"] = Base.Int(max_splits) + end + end + begin + if num_buckets !== nothing + desc["num_buckets"] = Base.Int(num_buckets) + end + end + begin + if num_features !== nothing + desc["num_features"] = Base.Int(num_features) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(boosted_trees_make_stats_summary, [node_ids_, gradients_, hessians_, bucketized_features_list_], name=nothing, max_splits=nothing, num_buckets=nothing, num_features=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function boosted_trees_make_stats_summary(node_ids_, gradients_, hessians_, bucketized_features_list_; name=nothing, max_splits=nothing, num_buckets=nothing, num_features=nothing) + if tf.in_eager_mode() + boosted_trees_make_stats_summary_eager(node_ids_, gradients_, hessians_, bucketized_features_list_; name=name, max_splits=max_splits, num_buckets=num_buckets, num_features=num_features) + else + boosted_trees_make_stats_summary_graph(node_ids_, gradients_, hessians_, bucketized_features_list_; name=name, max_splits=max_splits, num_buckets=num_buckets, num_features=num_features) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function boosted_trees_make_stats_summary_eager(node_ids_, gradients_, hessians_, bucketized_features_list_; name=nothing, max_splits=nothing, num_buckets=nothing, num_features=nothing) - desc = tf.EagerOp("BoostedTreesMakeStatsSummary") - node_ids_ = convert(tf.EagerTensor, node_ids_) - gradients_ = convert(tf.EagerTensor, gradients_) - hessians_ = convert(tf.EagerTensor, hessians_) - bucketized_features_list_ = convert(tf.EagerTensor, bucketized_features_list_) - tf.add_input(desc, node_ids_) - tf.add_input(desc, gradients_) - tf.add_input(desc, hessians_) - tf.add_input(desc, bucketized_features_list_) - if max_splits !== nothing - desc["max_splits"] = Base.Int(max_splits) - end - if num_buckets !== nothing - desc["num_buckets"] = Base.Int(num_buckets) - end - if num_features !== nothing - desc["num_features"] = Base.Int(num_features) - end - res = tf.execute(desc) - node = tf.TapeNode(boosted_trees_make_stats_summary, [node_ids_, gradients_, hessians_, bucketized_features_list_], name=nothing, max_splits=nothing, num_buckets=nothing, num_features=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function boosted_trees_make_stats_summary(node_ids_, gradients_, hessians_, bucketized_features_list_; name=nothing, max_splits=nothing, num_buckets=nothing, num_features=nothing) - if tf.in_eager_mode() - boosted_trees_make_stats_summary_eager(node_ids_, gradients_, hessians_, bucketized_features_list_; name=name, max_splits=max_splits, num_buckets=num_buckets, num_features=num_features) - else - boosted_trees_make_stats_summary_graph(node_ids_, gradients_, hessians_, bucketized_features_list_; name=name, max_splits=max_splits, num_buckets=num_buckets, num_features=num_features) - end - end end @@ -5611,68 +10151,114 @@ end """ begin - function all_candidate_sampler_graph(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, seed=nothing, seed2=nothing) - local desc - tf.with_op_name(name, "AllCandidateSampler") do - desc = tf.NodeDescription("AllCandidateSampler") - true_classes_ = convert(Tensor{Int64}, true_classes_) - tf.add_input(desc, true_classes_) - if num_true !== nothing - desc["num_true"] = Base.Int(num_true) + begin + function all_candidate_sampler_graph(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, seed=nothing, seed2=nothing) + local desc + tf.with_op_name(name, "AllCandidateSampler") do + desc = tf.NodeDescription("AllCandidateSampler") + begin + begin + true_classes_ = convert(Tensor{Int64}, true_classes_) + begin + end + end + end + begin + begin + tf.add_input(desc, true_classes_) + end + end + begin + begin + if num_true !== nothing + desc["num_true"] = Base.Int(num_true) + end + end + begin + if num_sampled !== nothing + desc["num_sampled"] = Base.Int(num_sampled) + end + end + begin + if unique !== nothing + desc["unique"] = Base.Bool(unique) + end + end + begin + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + end + begin + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end + end + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + end + end + begin + function all_candidate_sampler_eager(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, seed=nothing, seed2=nothing) + desc = tf.EagerOp("AllCandidateSampler") + true_classes_ = convert(tf.EagerTensor, true_classes_) + begin + begin + tf.add_input(desc, true_classes_) + end + end + begin + begin + if num_true !== nothing + desc["num_true"] = Base.Int(num_true) + end + end + begin + if num_sampled !== nothing + desc["num_sampled"] = Base.Int(num_sampled) + end + end + begin + if unique !== nothing + desc["unique"] = Base.Bool(unique) + end + end + begin + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + end + begin + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(all_candidate_sampler, [true_classes_], name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, seed=nothing, seed2=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function all_candidate_sampler(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, seed=nothing, seed2=nothing) + if tf.in_eager_mode() + all_candidate_sampler_eager(true_classes_; name=name, num_true=num_true, num_sampled=num_sampled, unique=unique, seed=seed, seed2=seed2) + else + all_candidate_sampler_graph(true_classes_; name=name, num_true=num_true, num_sampled=num_sampled, unique=unique, seed=seed, seed2=seed2) + end end - if num_sampled !== nothing - desc["num_sampled"] = Base.Int(num_sampled) - end - if unique !== nothing - desc["unique"] = Base.Bool(unique) - end - if seed !== nothing - desc["seed"] = Base.Int(seed) - end - if seed2 !== nothing - desc["seed2"] = Base.Int(seed2) - end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:3 - push!(out, tf.Tensor(op, out_idx)) - end - out - end - function all_candidate_sampler_eager(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, seed=nothing, seed2=nothing) - desc = tf.EagerOp("AllCandidateSampler") - true_classes_ = convert(tf.EagerTensor, true_classes_) - tf.add_input(desc, true_classes_) - if num_true !== nothing - desc["num_true"] = Base.Int(num_true) - end - if num_sampled !== nothing - desc["num_sampled"] = Base.Int(num_sampled) - end - if unique !== nothing - desc["unique"] = Base.Bool(unique) - end - if seed !== nothing - desc["seed"] = Base.Int(seed) - end - if seed2 !== nothing - desc["seed2"] = Base.Int(seed2) - end - res = tf.execute(desc) - node = tf.TapeNode(all_candidate_sampler, [true_classes_], name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, seed=nothing, seed2=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function all_candidate_sampler(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, seed=nothing, seed2=nothing) - if tf.in_eager_mode() - all_candidate_sampler_eager(true_classes_; name=name, num_true=num_true, num_sampled=num_sampled, unique=unique, seed=seed, seed2=seed2) - else - all_candidate_sampler_graph(true_classes_; name=name, num_true=num_true, num_sampled=num_sampled, unique=unique, seed=seed, seed2=seed2) - end - end end @@ -5682,74 +10268,140 @@ end """ begin - function conv2d_backprop_input_graph(input_sizes_, filter_, out_backprop_; name=nothing, strides=nothing, use_cudnn_on_gpu=nothing, padding=nothing, data_format=nothing, dilations=nothing) - local desc - tf.with_op_name(name, "Conv2DBackpropInput") do - desc = tf.NodeDescription("Conv2DBackpropInput") - input_sizes_ = convert(Tensor{Int32}, input_sizes_) - filter_ = convert(Tensor{Any}, filter_) - out_backprop_ = convert(Tensor{Any}, out_backprop_) - (filter_, out_backprop_) = tf.tf_promote(filter_, out_backprop_) - tf.add_input(desc, input_sizes_) - tf.add_input(desc, filter_) - tf.add_input(desc, out_backprop_) - if strides !== nothing - desc["strides"] = map(Base.identity, strides) - end - if use_cudnn_on_gpu !== nothing - desc["use_cudnn_on_gpu"] = Base.Bool(use_cudnn_on_gpu) + begin + function conv2d_backprop_input_graph(input_sizes_, filter_, out_backprop_; name=nothing, strides=nothing, use_cudnn_on_gpu=nothing, padding=nothing, data_format=nothing, dilations=nothing) + local desc + tf.with_op_name(name, "Conv2DBackpropInput") do + desc = tf.NodeDescription("Conv2DBackpropInput") + begin + begin + input_sizes_ = convert(Tensor{Int32}, input_sizes_) + begin + end + end + begin + filter_ = convert(Tensor{Any}, filter_) + begin + end + end + begin + out_backprop_ = convert(Tensor{Any}, out_backprop_) + begin + end + end + begin + (filter_, out_backprop_) = tf.tf_promote(filter_, out_backprop_) + end + end + begin + begin + tf.add_input(desc, input_sizes_) + end + begin + tf.add_input(desc, filter_) + end + begin + tf.add_input(desc, out_backprop_) + end + end + begin + begin + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + end + begin + if use_cudnn_on_gpu !== nothing + desc["use_cudnn_on_gpu"] = Base.Bool(use_cudnn_on_gpu) + end + end + begin + if padding !== nothing + desc["padding"] = Base.String(padding) + end + end + begin + if data_format !== nothing + desc["data_format"] = Base.String(data_format) + end + end + begin + if dilations !== nothing + desc["dilations"] = map(Base.identity, dilations) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function conv2d_backprop_input_eager(input_sizes_, filter_, out_backprop_; name=nothing, strides=nothing, use_cudnn_on_gpu=nothing, padding=nothing, data_format=nothing, dilations=nothing) + desc = tf.EagerOp("Conv2DBackpropInput") + input_sizes_ = convert(tf.EagerTensor, input_sizes_) + filter_ = convert(tf.EagerTensor, filter_) + out_backprop_ = convert(tf.EagerTensor, out_backprop_) + begin + begin + tf.add_input(desc, input_sizes_) + end + begin + tf.add_input(desc, filter_) + end + begin + tf.add_input(desc, out_backprop_) + end + end + begin + begin + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + end + begin + if use_cudnn_on_gpu !== nothing + desc["use_cudnn_on_gpu"] = Base.Bool(use_cudnn_on_gpu) + end + end + begin + if padding !== nothing + desc["padding"] = Base.String(padding) + end + end + begin + if data_format !== nothing + desc["data_format"] = Base.String(data_format) + end + end + begin + if dilations !== nothing + desc["dilations"] = map(Base.identity, dilations) + end + end + end + begin + desc["T"] = tf.data_type(filter_) + end + begin + desc["T"] = tf.data_type(out_backprop_) + end + res = tf.execute(desc) + node = tf.TapeNode(conv2d_backprop_input, [input_sizes_, filter_, out_backprop_], name=nothing, strides=nothing, use_cudnn_on_gpu=nothing, padding=nothing, data_format=nothing, dilations=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function conv2d_backprop_input(input_sizes_, filter_, out_backprop_; name=nothing, strides=nothing, use_cudnn_on_gpu=nothing, padding=nothing, data_format=nothing, dilations=nothing) + if tf.in_eager_mode() + conv2d_backprop_input_eager(input_sizes_, filter_, out_backprop_; name=name, strides=strides, use_cudnn_on_gpu=use_cudnn_on_gpu, padding=padding, data_format=data_format, dilations=dilations) + else + conv2d_backprop_input_graph(input_sizes_, filter_, out_backprop_; name=name, strides=strides, use_cudnn_on_gpu=use_cudnn_on_gpu, padding=padding, data_format=data_format, dilations=dilations) + end end - if padding !== nothing - desc["padding"] = Base.String(padding) - end - if data_format !== nothing - desc["data_format"] = Base.String(data_format) - end - if dilations !== nothing - desc["dilations"] = map(Base.identity, dilations) - end - end - tf.Tensor(tf.Operation(desc)) - end - function conv2d_backprop_input_eager(input_sizes_, filter_, out_backprop_; name=nothing, strides=nothing, use_cudnn_on_gpu=nothing, padding=nothing, data_format=nothing, dilations=nothing) - desc = tf.EagerOp("Conv2DBackpropInput") - input_sizes_ = convert(tf.EagerTensor, input_sizes_) - filter_ = convert(tf.EagerTensor, filter_) - out_backprop_ = convert(tf.EagerTensor, out_backprop_) - tf.add_input(desc, input_sizes_) - tf.add_input(desc, filter_) - tf.add_input(desc, out_backprop_) - if strides !== nothing - desc["strides"] = map(Base.identity, strides) - end - if use_cudnn_on_gpu !== nothing - desc["use_cudnn_on_gpu"] = Base.Bool(use_cudnn_on_gpu) - end - if padding !== nothing - desc["padding"] = Base.String(padding) - end - if data_format !== nothing - desc["data_format"] = Base.String(data_format) - end - if dilations !== nothing - desc["dilations"] = map(Base.identity, dilations) - end - desc["T"] = tf.data_type(filter_) - desc["T"] = tf.data_type(out_backprop_) - res = tf.execute(desc) - node = tf.TapeNode(conv2d_backprop_input, [input_sizes_, filter_, out_backprop_], name=nothing, strides=nothing, use_cudnn_on_gpu=nothing, padding=nothing, data_format=nothing, dilations=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function conv2d_backprop_input(input_sizes_, filter_, out_backprop_; name=nothing, strides=nothing, use_cudnn_on_gpu=nothing, padding=nothing, data_format=nothing, dilations=nothing) - if tf.in_eager_mode() - conv2d_backprop_input_eager(input_sizes_, filter_, out_backprop_; name=name, strides=strides, use_cudnn_on_gpu=use_cudnn_on_gpu, padding=padding, data_format=data_format, dilations=dilations) - else - conv2d_backprop_input_graph(input_sizes_, filter_, out_backprop_; name=name, strides=strides, use_cudnn_on_gpu=use_cudnn_on_gpu, padding=padding, data_format=data_format, dilations=dilations) - end - end end @@ -5759,45 +10411,77 @@ end """ begin - function dataset_to_single_element_graph(dataset_; name=nothing, output_types=nothing, output_shapes=nothing) - local desc - tf.with_op_name(name, "DatasetToSingleElement") do - desc = tf.NodeDescription("DatasetToSingleElement") - dataset_ = convert(Tensor{Any}, dataset_) - tf.add_input(desc, dataset_) - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) + begin + function dataset_to_single_element_graph(dataset_; name=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "DatasetToSingleElement") do + desc = tf.NodeDescription("DatasetToSingleElement") + begin + begin + dataset_ = convert(Tensor{Any}, dataset_) + begin + end + end + end + begin + begin + tf.add_input(desc, dataset_) + end + end + begin + begin + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + end + begin + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function dataset_to_single_element_eager(dataset_; name=nothing, output_types=nothing, output_shapes=nothing) + desc = tf.EagerOp("DatasetToSingleElement") + dataset_ = convert(tf.EagerTensor, dataset_) + begin + begin + tf.add_input(desc, dataset_) + end + end + begin + begin + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + end + begin + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(dataset_to_single_element, [dataset_], name=nothing, output_types=nothing, output_shapes=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function dataset_to_single_element(dataset_; name=nothing, output_types=nothing, output_shapes=nothing) + if tf.in_eager_mode() + dataset_to_single_element_eager(dataset_; name=name, output_types=output_types, output_shapes=output_shapes) + else + dataset_to_single_element_graph(dataset_; name=name, output_types=output_types, output_shapes=output_shapes) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function dataset_to_single_element_eager(dataset_; name=nothing, output_types=nothing, output_shapes=nothing) - desc = tf.EagerOp("DatasetToSingleElement") - dataset_ = convert(tf.EagerTensor, dataset_) - tf.add_input(desc, dataset_) - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - res = tf.execute(desc) - node = tf.TapeNode(dataset_to_single_element, [dataset_], name=nothing, output_types=nothing, output_shapes=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function dataset_to_single_element(dataset_; name=nothing, output_types=nothing, output_shapes=nothing) - if tf.in_eager_mode() - dataset_to_single_element_eager(dataset_; name=name, output_types=output_types, output_shapes=output_shapes) - else - dataset_to_single_element_graph(dataset_; name=name, output_types=output_types, output_shapes=output_shapes) - end - end end @@ -5807,49 +10491,89 @@ end """ begin - function cache_dataset_graph(input_dataset_, filename_; name=nothing, output_types=nothing, output_shapes=nothing) - local desc - tf.with_op_name(name, "CacheDataset") do - desc = tf.NodeDescription("CacheDataset") - input_dataset_ = convert(Tensor{Any}, input_dataset_) - filename_ = convert(Tensor{String}, filename_) - tf.add_input(desc, input_dataset_) - tf.add_input(desc, filename_) - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) + begin + function cache_dataset_graph(input_dataset_, filename_; name=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "CacheDataset") do + desc = tf.NodeDescription("CacheDataset") + begin + begin + input_dataset_ = convert(Tensor{Any}, input_dataset_) + begin + end + end + begin + filename_ = convert(Tensor{String}, filename_) + begin + end + end + end + begin + begin + tf.add_input(desc, input_dataset_) + end + begin + tf.add_input(desc, filename_) + end + end + begin + begin + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + end + begin + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function cache_dataset_eager(input_dataset_, filename_; name=nothing, output_types=nothing, output_shapes=nothing) + desc = tf.EagerOp("CacheDataset") + input_dataset_ = convert(tf.EagerTensor, input_dataset_) + filename_ = convert(tf.EagerTensor, filename_) + begin + begin + tf.add_input(desc, input_dataset_) + end + begin + tf.add_input(desc, filename_) + end + end + begin + begin + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + end + begin + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(cache_dataset, [input_dataset_, filename_], name=nothing, output_types=nothing, output_shapes=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function cache_dataset(input_dataset_, filename_; name=nothing, output_types=nothing, output_shapes=nothing) + if tf.in_eager_mode() + cache_dataset_eager(input_dataset_, filename_; name=name, output_types=output_types, output_shapes=output_shapes) + else + cache_dataset_graph(input_dataset_, filename_; name=name, output_types=output_types, output_shapes=output_shapes) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function cache_dataset_eager(input_dataset_, filename_; name=nothing, output_types=nothing, output_shapes=nothing) - desc = tf.EagerOp("CacheDataset") - input_dataset_ = convert(tf.EagerTensor, input_dataset_) - filename_ = convert(tf.EagerTensor, filename_) - tf.add_input(desc, input_dataset_) - tf.add_input(desc, filename_) - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - res = tf.execute(desc) - node = tf.TapeNode(cache_dataset, [input_dataset_, filename_], name=nothing, output_types=nothing, output_shapes=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function cache_dataset(input_dataset_, filename_; name=nothing, output_types=nothing, output_shapes=nothing) - if tf.in_eager_mode() - cache_dataset_eager(input_dataset_, filename_; name=name, output_types=output_types, output_shapes=output_shapes) - else - cache_dataset_graph(input_dataset_, filename_; name=name, output_types=output_types, output_shapes=output_shapes) - end - end end @@ -5859,62 +10583,120 @@ end """ begin - function fake_quant_with_min_max_vars_gradient_graph(gradients_, inputs_, min_, max_; name=nothing, num_bits=nothing, narrow_range=nothing) - local desc - tf.with_op_name(name, "FakeQuantWithMinMaxVarsGradient") do - desc = tf.NodeDescription("FakeQuantWithMinMaxVarsGradient") - gradients_ = convert(Tensor{Float32}, gradients_) - inputs_ = convert(Tensor{Float32}, inputs_) - min_ = convert(Tensor{Float32}, min_) - max_ = convert(Tensor{Float32}, max_) - tf.add_input(desc, gradients_) - tf.add_input(desc, inputs_) - tf.add_input(desc, min_) - tf.add_input(desc, max_) - if num_bits !== nothing - desc["num_bits"] = Base.Int(num_bits) - end - if narrow_range !== nothing - desc["narrow_range"] = Base.Bool(narrow_range) + begin + function fake_quant_with_min_max_vars_gradient_graph(gradients_, inputs_, min_, max_; name=nothing, num_bits=nothing, narrow_range=nothing) + local desc + tf.with_op_name(name, "FakeQuantWithMinMaxVarsGradient") do + desc = tf.NodeDescription("FakeQuantWithMinMaxVarsGradient") + begin + begin + gradients_ = convert(Tensor{Float32}, gradients_) + begin + end + end + begin + inputs_ = convert(Tensor{Float32}, inputs_) + begin + end + end + begin + min_ = convert(Tensor{Float32}, min_) + begin + end + end + begin + max_ = convert(Tensor{Float32}, max_) + begin + end + end + end + begin + begin + tf.add_input(desc, gradients_) + end + begin + tf.add_input(desc, inputs_) + end + begin + tf.add_input(desc, min_) + end + begin + tf.add_input(desc, max_) + end + end + begin + begin + if num_bits !== nothing + desc["num_bits"] = Base.Int(num_bits) + end + end + begin + if narrow_range !== nothing + desc["narrow_range"] = Base.Bool(narrow_range) + end + end + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + end + end + begin + function fake_quant_with_min_max_vars_gradient_eager(gradients_, inputs_, min_, max_; name=nothing, num_bits=nothing, narrow_range=nothing) + desc = tf.EagerOp("FakeQuantWithMinMaxVarsGradient") + gradients_ = convert(tf.EagerTensor, gradients_) + inputs_ = convert(tf.EagerTensor, inputs_) + min_ = convert(tf.EagerTensor, min_) + max_ = convert(tf.EagerTensor, max_) + begin + begin + tf.add_input(desc, gradients_) + end + begin + tf.add_input(desc, inputs_) + end + begin + tf.add_input(desc, min_) + end + begin + tf.add_input(desc, max_) + end + end + begin + begin + if num_bits !== nothing + desc["num_bits"] = Base.Int(num_bits) + end + end + begin + if narrow_range !== nothing + desc["narrow_range"] = Base.Bool(narrow_range) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(fake_quant_with_min_max_vars_gradient, [gradients_, inputs_, min_, max_], name=nothing, num_bits=nothing, narrow_range=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function fake_quant_with_min_max_vars_gradient(gradients_, inputs_, min_, max_; name=nothing, num_bits=nothing, narrow_range=nothing) + if tf.in_eager_mode() + fake_quant_with_min_max_vars_gradient_eager(gradients_, inputs_, min_, max_; name=name, num_bits=num_bits, narrow_range=narrow_range) + else + fake_quant_with_min_max_vars_gradient_graph(gradients_, inputs_, min_, max_; name=name, num_bits=num_bits, narrow_range=narrow_range) + end end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:3 - push!(out, tf.Tensor(op, out_idx)) - end - out end - function fake_quant_with_min_max_vars_gradient_eager(gradients_, inputs_, min_, max_; name=nothing, num_bits=nothing, narrow_range=nothing) - desc = tf.EagerOp("FakeQuantWithMinMaxVarsGradient") - gradients_ = convert(tf.EagerTensor, gradients_) - inputs_ = convert(tf.EagerTensor, inputs_) - min_ = convert(tf.EagerTensor, min_) - max_ = convert(tf.EagerTensor, max_) - tf.add_input(desc, gradients_) - tf.add_input(desc, inputs_) - tf.add_input(desc, min_) - tf.add_input(desc, max_) - if num_bits !== nothing - desc["num_bits"] = Base.Int(num_bits) - end - if narrow_range !== nothing - desc["narrow_range"] = Base.Bool(narrow_range) - end - res = tf.execute(desc) - node = tf.TapeNode(fake_quant_with_min_max_vars_gradient, [gradients_, inputs_, min_, max_], name=nothing, num_bits=nothing, narrow_range=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function fake_quant_with_min_max_vars_gradient(gradients_, inputs_, min_, max_; name=nothing, num_bits=nothing, narrow_range=nothing) - if tf.in_eager_mode() - fake_quant_with_min_max_vars_gradient_eager(gradients_, inputs_, min_, max_; name=name, num_bits=num_bits, narrow_range=narrow_range) - else - fake_quant_with_min_max_vars_gradient_graph(gradients_, inputs_, min_, max_; name=name, num_bits=num_bits, narrow_range=narrow_range) - end - end end @@ -5924,72 +10706,142 @@ end """ begin - function fused_resize_and_pad_conv2d_graph(input_, size_, paddings_, filter_; name=nothing, resize_align_corners=nothing, mode=nothing, strides=nothing, padding=nothing) - local desc - tf.with_op_name(name, "FusedResizeAndPadConv2D") do - desc = tf.NodeDescription("FusedResizeAndPadConv2D") - input_ = convert(Tensor{Any}, input_) - size_ = convert(Tensor{Int32}, size_) - paddings_ = convert(Tensor{Int32}, paddings_) - filter_ = convert(Tensor{Any}, filter_) - (input_, filter_) = tf.tf_promote(input_, filter_) - tf.add_input(desc, input_) - tf.add_input(desc, size_) - tf.add_input(desc, paddings_) - tf.add_input(desc, filter_) - if resize_align_corners !== nothing - desc["resize_align_corners"] = Base.Bool(resize_align_corners) - end - if mode !== nothing - desc["mode"] = Base.String(mode) - end - if strides !== nothing - desc["strides"] = map(Base.identity, strides) + begin + function fused_resize_and_pad_conv2d_graph(input_, size_, paddings_, filter_; name=nothing, resize_align_corners=nothing, mode=nothing, strides=nothing, padding=nothing) + local desc + tf.with_op_name(name, "FusedResizeAndPadConv2D") do + desc = tf.NodeDescription("FusedResizeAndPadConv2D") + begin + begin + input_ = convert(Tensor{Any}, input_) + begin + end + end + begin + size_ = convert(Tensor{Int32}, size_) + begin + end + end + begin + paddings_ = convert(Tensor{Int32}, paddings_) + begin + end + end + begin + filter_ = convert(Tensor{Any}, filter_) + begin + end + end + begin + (input_, filter_) = tf.tf_promote(input_, filter_) + end + end + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, size_) + end + begin + tf.add_input(desc, paddings_) + end + begin + tf.add_input(desc, filter_) + end + end + begin + begin + if resize_align_corners !== nothing + desc["resize_align_corners"] = Base.Bool(resize_align_corners) + end + end + begin + if mode !== nothing + desc["mode"] = Base.String(mode) + end + end + begin + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + end + begin + if padding !== nothing + desc["padding"] = Base.String(padding) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function fused_resize_and_pad_conv2d_eager(input_, size_, paddings_, filter_; name=nothing, resize_align_corners=nothing, mode=nothing, strides=nothing, padding=nothing) + desc = tf.EagerOp("FusedResizeAndPadConv2D") + input_ = convert(tf.EagerTensor, input_) + size_ = convert(tf.EagerTensor, size_) + paddings_ = convert(tf.EagerTensor, paddings_) + filter_ = convert(tf.EagerTensor, filter_) + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, size_) + end + begin + tf.add_input(desc, paddings_) + end + begin + tf.add_input(desc, filter_) + end + end + begin + begin + if resize_align_corners !== nothing + desc["resize_align_corners"] = Base.Bool(resize_align_corners) + end + end + begin + if mode !== nothing + desc["mode"] = Base.String(mode) + end + end + begin + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + end + begin + if padding !== nothing + desc["padding"] = Base.String(padding) + end + end + end + begin + desc["T"] = tf.data_type(input_) + end + begin + desc["T"] = tf.data_type(filter_) + end + res = tf.execute(desc) + node = tf.TapeNode(fused_resize_and_pad_conv2d, [input_, size_, paddings_, filter_], name=nothing, resize_align_corners=nothing, mode=nothing, strides=nothing, padding=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function fused_resize_and_pad_conv2d(input_, size_, paddings_, filter_; name=nothing, resize_align_corners=nothing, mode=nothing, strides=nothing, padding=nothing) + if tf.in_eager_mode() + fused_resize_and_pad_conv2d_eager(input_, size_, paddings_, filter_; name=name, resize_align_corners=resize_align_corners, mode=mode, strides=strides, padding=padding) + else + fused_resize_and_pad_conv2d_graph(input_, size_, paddings_, filter_; name=name, resize_align_corners=resize_align_corners, mode=mode, strides=strides, padding=padding) + end end - if padding !== nothing - desc["padding"] = Base.String(padding) - end - end - tf.Tensor(tf.Operation(desc)) - end - function fused_resize_and_pad_conv2d_eager(input_, size_, paddings_, filter_; name=nothing, resize_align_corners=nothing, mode=nothing, strides=nothing, padding=nothing) - desc = tf.EagerOp("FusedResizeAndPadConv2D") - input_ = convert(tf.EagerTensor, input_) - size_ = convert(tf.EagerTensor, size_) - paddings_ = convert(tf.EagerTensor, paddings_) - filter_ = convert(tf.EagerTensor, filter_) - tf.add_input(desc, input_) - tf.add_input(desc, size_) - tf.add_input(desc, paddings_) - tf.add_input(desc, filter_) - if resize_align_corners !== nothing - desc["resize_align_corners"] = Base.Bool(resize_align_corners) - end - if mode !== nothing - desc["mode"] = Base.String(mode) - end - if strides !== nothing - desc["strides"] = map(Base.identity, strides) - end - if padding !== nothing - desc["padding"] = Base.String(padding) - end - desc["T"] = tf.data_type(input_) - desc["T"] = tf.data_type(filter_) - res = tf.execute(desc) - node = tf.TapeNode(fused_resize_and_pad_conv2d, [input_, size_, paddings_, filter_], name=nothing, resize_align_corners=nothing, mode=nothing, strides=nothing, padding=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function fused_resize_and_pad_conv2d(input_, size_, paddings_, filter_; name=nothing, resize_align_corners=nothing, mode=nothing, strides=nothing, padding=nothing) - if tf.in_eager_mode() - fused_resize_and_pad_conv2d_eager(input_, size_, paddings_, filter_; name=name, resize_align_corners=resize_align_corners, mode=mode, strides=strides, padding=padding) - else - fused_resize_and_pad_conv2d_graph(input_, size_, paddings_, filter_; name=name, resize_align_corners=resize_align_corners, mode=mode, strides=strides, padding=padding) - end - end end @@ -5999,98 +10851,164 @@ end """ begin - function batch_graph(in_tensors_; name=nothing, num_batch_threads=nothing, max_batch_size=nothing, max_enqueued_batches=nothing, batch_timeout_micros=nothing, allowed_batch_sizes=nothing, grad_timeout_micros=nothing, container=nothing, shared_name=nothing, batching_queue=nothing, T=nothing) - local desc - tf.with_op_name(name, "Batch") do - desc = tf.NodeDescription("Batch") - in_tensors_ = [convert(Tensor{Any}, x) for x = in_tensors_] - tf.add_input(desc, in_tensors_) - if num_batch_threads !== nothing - desc["num_batch_threads"] = Base.Int(num_batch_threads) - end - if max_batch_size !== nothing - desc["max_batch_size"] = Base.Int(max_batch_size) - end - if max_enqueued_batches !== nothing - desc["max_enqueued_batches"] = Base.Int(max_enqueued_batches) - end - if batch_timeout_micros !== nothing - desc["batch_timeout_micros"] = Base.Int(batch_timeout_micros) - end - if allowed_batch_sizes !== nothing - desc["allowed_batch_sizes"] = map(Base.identity, allowed_batch_sizes) - end - if grad_timeout_micros !== nothing - desc["grad_timeout_micros"] = Base.Int(grad_timeout_micros) - end - if container !== nothing - desc["container"] = Base.String(container) + begin + function batch_graph(in_tensors_; name=nothing, num_batch_threads=nothing, max_batch_size=nothing, max_enqueued_batches=nothing, batch_timeout_micros=nothing, allowed_batch_sizes=nothing, grad_timeout_micros=nothing, container=nothing, shared_name=nothing, batching_queue=nothing, T=nothing) + local desc + tf.with_op_name(name, "Batch") do + desc = tf.NodeDescription("Batch") + begin + begin + in_tensors_ = [convert(Tensor{Any}, x) for x = in_tensors_] + begin + end + end + end + begin + begin + tf.add_input(desc, in_tensors_) + end + end + begin + begin + if num_batch_threads !== nothing + desc["num_batch_threads"] = Base.Int(num_batch_threads) + end + end + begin + if max_batch_size !== nothing + desc["max_batch_size"] = Base.Int(max_batch_size) + end + end + begin + if max_enqueued_batches !== nothing + desc["max_enqueued_batches"] = Base.Int(max_enqueued_batches) + end + end + begin + if batch_timeout_micros !== nothing + desc["batch_timeout_micros"] = Base.Int(batch_timeout_micros) + end + end + begin + if allowed_batch_sizes !== nothing + desc["allowed_batch_sizes"] = map(Base.identity, allowed_batch_sizes) + end + end + begin + if grad_timeout_micros !== nothing + desc["grad_timeout_micros"] = Base.Int(grad_timeout_micros) + end + end + begin + if container !== nothing + desc["container"] = Base.String(container) + end + end + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + begin + if batching_queue !== nothing + desc["batching_queue"] = Base.String(batching_queue) + end + end + begin + if T !== nothing + desc["T"] = map(Base.identity, T) + end + end + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + end + end + begin + function batch_eager(in_tensors_; name=nothing, num_batch_threads=nothing, max_batch_size=nothing, max_enqueued_batches=nothing, batch_timeout_micros=nothing, allowed_batch_sizes=nothing, grad_timeout_micros=nothing, container=nothing, shared_name=nothing, batching_queue=nothing, T=nothing) + desc = tf.EagerOp("Batch") + in_tensors_ = convert(tf.EagerTensor, in_tensors_) + begin + begin + tf.add_input(desc, in_tensors_) + end + end + begin + begin + if num_batch_threads !== nothing + desc["num_batch_threads"] = Base.Int(num_batch_threads) + end + end + begin + if max_batch_size !== nothing + desc["max_batch_size"] = Base.Int(max_batch_size) + end + end + begin + if max_enqueued_batches !== nothing + desc["max_enqueued_batches"] = Base.Int(max_enqueued_batches) + end + end + begin + if batch_timeout_micros !== nothing + desc["batch_timeout_micros"] = Base.Int(batch_timeout_micros) + end + end + begin + if allowed_batch_sizes !== nothing + desc["allowed_batch_sizes"] = map(Base.identity, allowed_batch_sizes) + end + end + begin + if grad_timeout_micros !== nothing + desc["grad_timeout_micros"] = Base.Int(grad_timeout_micros) + end + end + begin + if container !== nothing + desc["container"] = Base.String(container) + end + end + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + begin + if batching_queue !== nothing + desc["batching_queue"] = Base.String(batching_queue) + end + end + begin + if T !== nothing + desc["T"] = map(Base.identity, T) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(batch, [in_tensors_], name=nothing, num_batch_threads=nothing, max_batch_size=nothing, max_enqueued_batches=nothing, batch_timeout_micros=nothing, allowed_batch_sizes=nothing, grad_timeout_micros=nothing, container=nothing, shared_name=nothing, batching_queue=nothing, T=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function batch(in_tensors_; name=nothing, num_batch_threads=nothing, max_batch_size=nothing, max_enqueued_batches=nothing, batch_timeout_micros=nothing, allowed_batch_sizes=nothing, grad_timeout_micros=nothing, container=nothing, shared_name=nothing, batching_queue=nothing, T=nothing) + if tf.in_eager_mode() + batch_eager(in_tensors_; name=name, num_batch_threads=num_batch_threads, max_batch_size=max_batch_size, max_enqueued_batches=max_enqueued_batches, batch_timeout_micros=batch_timeout_micros, allowed_batch_sizes=allowed_batch_sizes, grad_timeout_micros=grad_timeout_micros, container=container, shared_name=shared_name, batching_queue=batching_queue, T=T) + else + batch_graph(in_tensors_; name=name, num_batch_threads=num_batch_threads, max_batch_size=max_batch_size, max_enqueued_batches=max_enqueued_batches, batch_timeout_micros=batch_timeout_micros, allowed_batch_sizes=allowed_batch_sizes, grad_timeout_micros=grad_timeout_micros, container=container, shared_name=shared_name, batching_queue=batching_queue, T=T) + end end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - if batching_queue !== nothing - desc["batching_queue"] = Base.String(batching_queue) - end - if T !== nothing - desc["T"] = map(Base.identity, T) - end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:3 - push!(out, tf.Tensor(op, out_idx)) - end - out - end - function batch_eager(in_tensors_; name=nothing, num_batch_threads=nothing, max_batch_size=nothing, max_enqueued_batches=nothing, batch_timeout_micros=nothing, allowed_batch_sizes=nothing, grad_timeout_micros=nothing, container=nothing, shared_name=nothing, batching_queue=nothing, T=nothing) - desc = tf.EagerOp("Batch") - in_tensors_ = convert(tf.EagerTensor, in_tensors_) - tf.add_input(desc, in_tensors_) - if num_batch_threads !== nothing - desc["num_batch_threads"] = Base.Int(num_batch_threads) - end - if max_batch_size !== nothing - desc["max_batch_size"] = Base.Int(max_batch_size) - end - if max_enqueued_batches !== nothing - desc["max_enqueued_batches"] = Base.Int(max_enqueued_batches) - end - if batch_timeout_micros !== nothing - desc["batch_timeout_micros"] = Base.Int(batch_timeout_micros) - end - if allowed_batch_sizes !== nothing - desc["allowed_batch_sizes"] = map(Base.identity, allowed_batch_sizes) - end - if grad_timeout_micros !== nothing - desc["grad_timeout_micros"] = Base.Int(grad_timeout_micros) - end - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - if batching_queue !== nothing - desc["batching_queue"] = Base.String(batching_queue) - end - if T !== nothing - desc["T"] = map(Base.identity, T) - end - res = tf.execute(desc) - node = tf.TapeNode(batch, [in_tensors_], name=nothing, num_batch_threads=nothing, max_batch_size=nothing, max_enqueued_batches=nothing, batch_timeout_micros=nothing, allowed_batch_sizes=nothing, grad_timeout_micros=nothing, container=nothing, shared_name=nothing, batching_queue=nothing, T=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function batch(in_tensors_; name=nothing, num_batch_threads=nothing, max_batch_size=nothing, max_enqueued_batches=nothing, batch_timeout_micros=nothing, allowed_batch_sizes=nothing, grad_timeout_micros=nothing, container=nothing, shared_name=nothing, batching_queue=nothing, T=nothing) - if tf.in_eager_mode() - batch_eager(in_tensors_; name=name, num_batch_threads=num_batch_threads, max_batch_size=max_batch_size, max_enqueued_batches=max_enqueued_batches, batch_timeout_micros=batch_timeout_micros, allowed_batch_sizes=allowed_batch_sizes, grad_timeout_micros=grad_timeout_micros, container=container, shared_name=shared_name, batching_queue=batching_queue, T=T) - else - batch_graph(in_tensors_; name=name, num_batch_threads=num_batch_threads, max_batch_size=max_batch_size, max_enqueued_batches=max_enqueued_batches, batch_timeout_micros=batch_timeout_micros, allowed_batch_sizes=allowed_batch_sizes, grad_timeout_micros=grad_timeout_micros, container=container, shared_name=shared_name, batching_queue=batching_queue, T=T) - end - end end @@ -6100,53 +11018,85 @@ end """ begin - function collective_bcast_recv_graph(; name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, shape=nothing) - local desc - tf.with_op_name(name, "CollectiveBcastRecv") do - desc = tf.NodeDescription("CollectiveBcastRecv") - if group_size !== nothing - desc["group_size"] = Base.Int(group_size) - end - if group_key !== nothing - desc["group_key"] = Base.Int(group_key) + begin + function collective_bcast_recv_graph(; name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, shape=nothing) + local desc + tf.with_op_name(name, "CollectiveBcastRecv") do + desc = tf.NodeDescription("CollectiveBcastRecv") + begin + end + begin + end + begin + begin + if group_size !== nothing + desc["group_size"] = Base.Int(group_size) + end + end + begin + if group_key !== nothing + desc["group_key"] = Base.Int(group_key) + end + end + begin + if instance_key !== nothing + desc["instance_key"] = Base.Int(instance_key) + end + end + begin + if shape !== nothing + desc["shape"] = Base.identity(shape) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function collective_bcast_recv_eager(; name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, shape=nothing) + desc = tf.EagerOp("CollectiveBcastRecv") + begin + end + begin + begin + if group_size !== nothing + desc["group_size"] = Base.Int(group_size) + end + end + begin + if group_key !== nothing + desc["group_key"] = Base.Int(group_key) + end + end + begin + if instance_key !== nothing + desc["instance_key"] = Base.Int(instance_key) + end + end + begin + if shape !== nothing + desc["shape"] = Base.identity(shape) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(collective_bcast_recv, [], name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, shape=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function collective_bcast_recv(; name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, shape=nothing) + if tf.in_eager_mode() + collective_bcast_recv_eager(; name=name, group_size=group_size, group_key=group_key, instance_key=instance_key, shape=shape) + else + collective_bcast_recv_graph(; name=name, group_size=group_size, group_key=group_key, instance_key=instance_key, shape=shape) + end end - if instance_key !== nothing - desc["instance_key"] = Base.Int(instance_key) - end - if shape !== nothing - desc["shape"] = Base.identity(shape) - end - end - tf.Tensor(tf.Operation(desc)) - end - function collective_bcast_recv_eager(; name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, shape=nothing) - desc = tf.EagerOp("CollectiveBcastRecv") - if group_size !== nothing - desc["group_size"] = Base.Int(group_size) - end - if group_key !== nothing - desc["group_key"] = Base.Int(group_key) - end - if instance_key !== nothing - desc["instance_key"] = Base.Int(instance_key) - end - if shape !== nothing - desc["shape"] = Base.identity(shape) - end - res = tf.execute(desc) - node = tf.TapeNode(collective_bcast_recv, [], name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, shape=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function collective_bcast_recv(; name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, shape=nothing) - if tf.in_eager_mode() - collective_bcast_recv_eager(; name=name, group_size=group_size, group_key=group_key, instance_key=instance_key, shape=shape) - else - collective_bcast_recv_graph(; name=name, group_size=group_size, group_key=group_key, instance_key=instance_key, shape=shape) - end - end end @@ -6156,47 +11106,99 @@ end """ begin - function batch_to_space_nd_graph(input_, block_shape_, crops_; name=nothing) - local desc - tf.with_op_name(name, "BatchToSpaceND") do - desc = tf.NodeDescription("BatchToSpaceND") - input_ = convert(Tensor{Any}, input_) - block_shape_ = convert(Tensor{Int32}, block_shape_) - crops_ = convert(Tensor{Int32}, crops_) - (crops_,) = tf.tf_promote(crops_) - (input_,) = tf.tf_promote(input_) - (block_shape_,) = tf.tf_promote(block_shape_) - tf.add_input(desc, input_) - tf.add_input(desc, block_shape_) - tf.add_input(desc, crops_) - end - tf.Tensor(tf.Operation(desc)) - end - function batch_to_space_nd_eager(input_, block_shape_, crops_; name=nothing) - desc = tf.EagerOp("BatchToSpaceND") - input_ = convert(tf.EagerTensor, input_) - block_shape_ = convert(tf.EagerTensor, block_shape_) - crops_ = convert(tf.EagerTensor, crops_) - tf.add_input(desc, input_) - tf.add_input(desc, block_shape_) - tf.add_input(desc, crops_) - desc["T"] = tf.data_type(input_) - desc["Tblock_shape"] = tf.data_type(block_shape_) - desc["Tcrops"] = tf.data_type(crops_) - res = tf.execute(desc) - node = tf.TapeNode(batch_to_space_nd, [input_, block_shape_, crops_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function batch_to_space_nd(input_, block_shape_, crops_; name=nothing) - if tf.in_eager_mode() - batch_to_space_nd_eager(input_, block_shape_, crops_; name=name) - else - batch_to_space_nd_graph(input_, block_shape_, crops_; name=name) + begin + function batch_to_space_nd_graph(input_, block_shape_, crops_; name=nothing) + local desc + tf.with_op_name(name, "BatchToSpaceND") do + desc = tf.NodeDescription("BatchToSpaceND") + begin + begin + input_ = convert(Tensor{Any}, input_) + begin + end + end + begin + block_shape_ = convert(Tensor{Int32}, block_shape_) + begin + end + end + begin + crops_ = convert(Tensor{Int32}, crops_) + begin + end + end + begin + (crops_,) = tf.tf_promote(crops_) + end + begin + (input_,) = tf.tf_promote(input_) + end + begin + (block_shape_,) = tf.tf_promote(block_shape_) + end + end + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, block_shape_) + end + begin + tf.add_input(desc, crops_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function batch_to_space_nd_eager(input_, block_shape_, crops_; name=nothing) + desc = tf.EagerOp("BatchToSpaceND") + input_ = convert(tf.EagerTensor, input_) + block_shape_ = convert(tf.EagerTensor, block_shape_) + crops_ = convert(tf.EagerTensor, crops_) + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, block_shape_) + end + begin + tf.add_input(desc, crops_) + end + end + begin + end + begin + desc["T"] = tf.data_type(input_) + end + begin + desc["Tblock_shape"] = tf.data_type(block_shape_) + end + begin + desc["Tcrops"] = tf.data_type(crops_) + end + res = tf.execute(desc) + node = tf.TapeNode(batch_to_space_nd, [input_, block_shape_, crops_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function batch_to_space_nd(input_, block_shape_, crops_; name=nothing) + if tf.in_eager_mode() + batch_to_space_nd_eager(input_, block_shape_, crops_; name=name) + else + batch_to_space_nd_graph(input_, block_shape_, crops_; name=name) + end end - end + end end @@ -6206,33 +11208,57 @@ end """ begin - function loop_cond_graph(input_; name=nothing) - local desc - tf.with_op_name(name, "LoopCond") do - desc = tf.NodeDescription("LoopCond") - input_ = convert(Tensor{Bool}, input_) - tf.add_input(desc, input_) + begin + function loop_cond_graph(input_; name=nothing) + local desc + tf.with_op_name(name, "LoopCond") do + desc = tf.NodeDescription("LoopCond") + begin + begin + input_ = convert(Tensor{Bool}, input_) + begin + end + end + end + begin + begin + tf.add_input(desc, input_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function loop_cond_eager(input_; name=nothing) - desc = tf.EagerOp("LoopCond") - input_ = convert(tf.EagerTensor, input_) - tf.add_input(desc, input_) - res = tf.execute(desc) - node = tf.TapeNode(loop_cond, [input_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function loop_cond_eager(input_; name=nothing) + desc = tf.EagerOp("LoopCond") + input_ = convert(tf.EagerTensor, input_) + begin + begin + tf.add_input(desc, input_) + end + end + begin + end + res = tf.execute(desc) + node = tf.TapeNode(loop_cond, [input_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function loop_cond(input_; name=nothing) - if tf.in_eager_mode() - loop_cond_eager(input_; name=name) - else - loop_cond_graph(input_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function loop_cond(input_; name=nothing) + if tf.in_eager_mode() + loop_cond_eager(input_; name=name) + else + loop_cond_graph(input_; name=name) + end end - end + end end @@ -6242,47 +11268,83 @@ end """ begin - function depth_to_space_graph(input_; name=nothing, block_size=nothing, data_format=nothing) - local desc - tf.with_op_name(name, "DepthToSpace") do - desc = tf.NodeDescription("DepthToSpace") - input_ = convert(Tensor{Any}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - if block_size !== nothing - desc["block_size"] = Base.Int(block_size) - end - if data_format !== nothing - desc["data_format"] = Base.String(data_format) + begin + function depth_to_space_graph(input_; name=nothing, block_size=nothing, data_format=nothing) + local desc + tf.with_op_name(name, "DepthToSpace") do + desc = tf.NodeDescription("DepthToSpace") + begin + begin + input_ = convert(Tensor{Any}, input_) + begin + end + end + begin + (input_,) = tf.tf_promote(input_) + end + end + begin + begin + tf.add_input(desc, input_) + end + end + begin + begin + if block_size !== nothing + desc["block_size"] = Base.Int(block_size) + end + end + begin + if data_format !== nothing + desc["data_format"] = Base.String(data_format) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function depth_to_space_eager(input_; name=nothing, block_size=nothing, data_format=nothing) + desc = tf.EagerOp("DepthToSpace") + input_ = convert(tf.EagerTensor, input_) + begin + begin + tf.add_input(desc, input_) + end + end + begin + begin + if block_size !== nothing + desc["block_size"] = Base.Int(block_size) + end + end + begin + if data_format !== nothing + desc["data_format"] = Base.String(data_format) + end + end + end + begin + desc["T"] = tf.data_type(input_) + end + res = tf.execute(desc) + node = tf.TapeNode(depth_to_space, [input_], name=nothing, block_size=nothing, data_format=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function depth_to_space(input_; name=nothing, block_size=nothing, data_format=nothing) + if tf.in_eager_mode() + depth_to_space_eager(input_; name=name, block_size=block_size, data_format=data_format) + else + depth_to_space_graph(input_; name=name, block_size=block_size, data_format=data_format) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function depth_to_space_eager(input_; name=nothing, block_size=nothing, data_format=nothing) - desc = tf.EagerOp("DepthToSpace") - input_ = convert(tf.EagerTensor, input_) - tf.add_input(desc, input_) - if block_size !== nothing - desc["block_size"] = Base.Int(block_size) - end - if data_format !== nothing - desc["data_format"] = Base.String(data_format) - end - desc["T"] = tf.data_type(input_) - res = tf.execute(desc) - node = tf.TapeNode(depth_to_space, [input_], name=nothing, block_size=nothing, data_format=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function depth_to_space(input_; name=nothing, block_size=nothing, data_format=nothing) - if tf.in_eager_mode() - depth_to_space_eager(input_; name=name, block_size=block_size, data_format=data_format) - else - depth_to_space_graph(input_; name=name, block_size=block_size, data_format=data_format) - end - end end @@ -6292,41 +11354,73 @@ end """ begin - function destroy_temporary_variable_graph(ref_; name=nothing, var_name=nothing) - local desc - tf.with_op_name(name, "DestroyTemporaryVariable") do - desc = tf.NodeDescription("DestroyTemporaryVariable") - ref_ = convert(Tensor{Any}, ref_) - (ref_,) = tf.tf_promote(ref_) - tf.add_input(desc, ref_) - if var_name !== nothing - desc["var_name"] = Base.String(var_name) + begin + function destroy_temporary_variable_graph(ref_; name=nothing, var_name=nothing) + local desc + tf.with_op_name(name, "DestroyTemporaryVariable") do + desc = tf.NodeDescription("DestroyTemporaryVariable") + begin + begin + ref_ = convert(Tensor{Any}, ref_) + begin + end + end + begin + (ref_,) = tf.tf_promote(ref_) + end + end + begin + begin + tf.add_input(desc, ref_) + end + end + begin + begin + if var_name !== nothing + desc["var_name"] = Base.String(var_name) + end + end + end end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function destroy_temporary_variable_eager(ref_; name=nothing, var_name=nothing) - desc = tf.EagerOp("DestroyTemporaryVariable") - ref_ = convert(tf.EagerTensor, ref_) - tf.add_input(desc, ref_) - if var_name !== nothing - desc["var_name"] = Base.String(var_name) - end - desc["T"] = tf.data_type(ref_) - res = tf.execute(desc) - node = tf.TapeNode(destroy_temporary_variable, [ref_], name=nothing, var_name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function destroy_temporary_variable_eager(ref_; name=nothing, var_name=nothing) + desc = tf.EagerOp("DestroyTemporaryVariable") + ref_ = convert(tf.EagerTensor, ref_) + begin + begin + tf.add_input(desc, ref_) + end + end + begin + begin + if var_name !== nothing + desc["var_name"] = Base.String(var_name) + end + end + end + begin + desc["T"] = tf.data_type(ref_) + end + res = tf.execute(desc) + node = tf.TapeNode(destroy_temporary_variable, [ref_], name=nothing, var_name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function destroy_temporary_variable(ref_; name=nothing, var_name=nothing) - if tf.in_eager_mode() - destroy_temporary_variable_eager(ref_; name=name, var_name=var_name) - else - destroy_temporary_variable_graph(ref_; name=name, var_name=var_name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function destroy_temporary_variable(ref_; name=nothing, var_name=nothing) + if tf.in_eager_mode() + destroy_temporary_variable_eager(ref_; name=name, var_name=var_name) + else + destroy_temporary_variable_graph(ref_; name=name, var_name=var_name) + end end - end + end end @@ -6336,97 +11430,185 @@ end """ begin - function cudnn_rnn_graph(input_, input_h_, input_c_, params_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing, is_training=nothing) - local desc - tf.with_op_name(name, "CudnnRNN") do - desc = tf.NodeDescription("CudnnRNN") - input_ = convert(Tensor{Any}, input_) - input_h_ = convert(Tensor{Any}, input_h_) - input_c_ = convert(Tensor{Any}, input_c_) - params_ = convert(Tensor{Any}, params_) - (input_, input_h_, input_c_, params_) = tf.tf_promote(input_, input_h_, input_c_, params_) - tf.add_input(desc, input_) - tf.add_input(desc, input_h_) - tf.add_input(desc, input_c_) - tf.add_input(desc, params_) - if rnn_mode !== nothing - desc["rnn_mode"] = Base.String(rnn_mode) - end - if input_mode !== nothing - desc["input_mode"] = Base.String(input_mode) - end - if direction !== nothing - desc["direction"] = Base.String(direction) - end - if dropout !== nothing - desc["dropout"] = Base.identity(dropout) - end - if seed !== nothing - desc["seed"] = Base.Int(seed) - end - if seed2 !== nothing - desc["seed2"] = Base.Int(seed2) - end - if is_training !== nothing - desc["is_training"] = Base.Bool(is_training) + begin + function cudnn_rnn_graph(input_, input_h_, input_c_, params_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing, is_training=nothing) + local desc + tf.with_op_name(name, "CudnnRNN") do + desc = tf.NodeDescription("CudnnRNN") + begin + begin + input_ = convert(Tensor{Any}, input_) + begin + end + end + begin + input_h_ = convert(Tensor{Any}, input_h_) + begin + end + end + begin + input_c_ = convert(Tensor{Any}, input_c_) + begin + end + end + begin + params_ = convert(Tensor{Any}, params_) + begin + end + end + begin + (input_, input_h_, input_c_, params_) = tf.tf_promote(input_, input_h_, input_c_, params_) + end + end + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, input_h_) + end + begin + tf.add_input(desc, input_c_) + end + begin + tf.add_input(desc, params_) + end + end + begin + begin + if rnn_mode !== nothing + desc["rnn_mode"] = Base.String(rnn_mode) + end + end + begin + if input_mode !== nothing + desc["input_mode"] = Base.String(input_mode) + end + end + begin + if direction !== nothing + desc["direction"] = Base.String(direction) + end + end + begin + if dropout !== nothing + desc["dropout"] = Base.identity(dropout) + end + end + begin + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + end + begin + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end + end + begin + if is_training !== nothing + desc["is_training"] = Base.Bool(is_training) + end + end + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:4 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + end + end + begin + function cudnn_rnn_eager(input_, input_h_, input_c_, params_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing, is_training=nothing) + desc = tf.EagerOp("CudnnRNN") + input_ = convert(tf.EagerTensor, input_) + input_h_ = convert(tf.EagerTensor, input_h_) + input_c_ = convert(tf.EagerTensor, input_c_) + params_ = convert(tf.EagerTensor, params_) + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, input_h_) + end + begin + tf.add_input(desc, input_c_) + end + begin + tf.add_input(desc, params_) + end + end + begin + begin + if rnn_mode !== nothing + desc["rnn_mode"] = Base.String(rnn_mode) + end + end + begin + if input_mode !== nothing + desc["input_mode"] = Base.String(input_mode) + end + end + begin + if direction !== nothing + desc["direction"] = Base.String(direction) + end + end + begin + if dropout !== nothing + desc["dropout"] = Base.identity(dropout) + end + end + begin + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + end + begin + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end + end + begin + if is_training !== nothing + desc["is_training"] = Base.Bool(is_training) + end + end + end + begin + desc["T"] = tf.data_type(input_) + end + begin + desc["T"] = tf.data_type(input_h_) + end + begin + desc["T"] = tf.data_type(input_c_) + end + begin + desc["T"] = tf.data_type(params_) + end + res = tf.execute(desc) + node = tf.TapeNode(cudnn_rnn, [input_, input_h_, input_c_, params_], name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing, is_training=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function cudnn_rnn(input_, input_h_, input_c_, params_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing, is_training=nothing) + if tf.in_eager_mode() + cudnn_rnn_eager(input_, input_h_, input_c_, params_; name=name, rnn_mode=rnn_mode, input_mode=input_mode, direction=direction, dropout=dropout, seed=seed, seed2=seed2, is_training=is_training) + else + cudnn_rnn_graph(input_, input_h_, input_c_, params_; name=name, rnn_mode=rnn_mode, input_mode=input_mode, direction=direction, dropout=dropout, seed=seed, seed2=seed2, is_training=is_training) + end end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:4 - push!(out, tf.Tensor(op, out_idx)) - end - out - end - function cudnn_rnn_eager(input_, input_h_, input_c_, params_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing, is_training=nothing) - desc = tf.EagerOp("CudnnRNN") - input_ = convert(tf.EagerTensor, input_) - input_h_ = convert(tf.EagerTensor, input_h_) - input_c_ = convert(tf.EagerTensor, input_c_) - params_ = convert(tf.EagerTensor, params_) - tf.add_input(desc, input_) - tf.add_input(desc, input_h_) - tf.add_input(desc, input_c_) - tf.add_input(desc, params_) - if rnn_mode !== nothing - desc["rnn_mode"] = Base.String(rnn_mode) - end - if input_mode !== nothing - desc["input_mode"] = Base.String(input_mode) - end - if direction !== nothing - desc["direction"] = Base.String(direction) - end - if dropout !== nothing - desc["dropout"] = Base.identity(dropout) - end - if seed !== nothing - desc["seed"] = Base.Int(seed) - end - if seed2 !== nothing - desc["seed2"] = Base.Int(seed2) - end - if is_training !== nothing - desc["is_training"] = Base.Bool(is_training) - end - desc["T"] = tf.data_type(input_) - desc["T"] = tf.data_type(input_h_) - desc["T"] = tf.data_type(input_c_) - desc["T"] = tf.data_type(params_) - res = tf.execute(desc) - node = tf.TapeNode(cudnn_rnn, [input_, input_h_, input_c_, params_], name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing, is_training=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function cudnn_rnn(input_, input_h_, input_c_, params_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing, is_training=nothing) - if tf.in_eager_mode() - cudnn_rnn_eager(input_, input_h_, input_c_, params_; name=name, rnn_mode=rnn_mode, input_mode=input_mode, direction=direction, dropout=dropout, seed=seed, seed2=seed2, is_training=is_training) - else - cudnn_rnn_graph(input_, input_h_, input_c_, params_; name=name, rnn_mode=rnn_mode, input_mode=input_mode, direction=direction, dropout=dropout, seed=seed, seed2=seed2, is_training=is_training) - end - end end @@ -6436,35 +11618,63 @@ end """ begin - function ref_identity_graph(input_; name=nothing) - local desc - tf.with_op_name(name, "RefIdentity") do - desc = tf.NodeDescription("RefIdentity") - input_ = convert(Tensor{Any}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) + begin + function ref_identity_graph(input_; name=nothing) + local desc + tf.with_op_name(name, "RefIdentity") do + desc = tf.NodeDescription("RefIdentity") + begin + begin + input_ = convert(Tensor{Any}, input_) + begin + end + end + begin + (input_,) = tf.tf_promote(input_) + end + end + begin + begin + tf.add_input(desc, input_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function ref_identity_eager(input_; name=nothing) - desc = tf.EagerOp("RefIdentity") - input_ = convert(tf.EagerTensor, input_) - tf.add_input(desc, input_) - desc["T"] = tf.data_type(input_) - res = tf.execute(desc) - node = tf.TapeNode(ref_identity, [input_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function ref_identity_eager(input_; name=nothing) + desc = tf.EagerOp("RefIdentity") + input_ = convert(tf.EagerTensor, input_) + begin + begin + tf.add_input(desc, input_) + end + end + begin + end + begin + desc["T"] = tf.data_type(input_) + end + res = tf.execute(desc) + node = tf.TapeNode(ref_identity, [input_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function ref_identity(input_; name=nothing) - if tf.in_eager_mode() - ref_identity_eager(input_; name=name) - else - ref_identity_graph(input_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function ref_identity(input_; name=nothing) + if tf.in_eager_mode() + ref_identity_eager(input_; name=name) + else + ref_identity_graph(input_; name=name) + end end - end + end end @@ -6474,70 +11684,136 @@ end """ begin - function max_pool3d_grad_graph(orig_input_, orig_output_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) - local desc - tf.with_op_name(name, "MaxPool3DGrad") do - desc = tf.NodeDescription("MaxPool3DGrad") - orig_input_ = convert(Tensor{Float32}, orig_input_) - orig_output_ = convert(Tensor{Float32}, orig_output_) - grad_ = convert(Tensor{Float32}, grad_) - (grad_,) = tf.tf_promote(grad_) - (orig_input_, orig_output_) = tf.tf_promote(orig_input_, orig_output_) - tf.add_input(desc, orig_input_) - tf.add_input(desc, orig_output_) - tf.add_input(desc, grad_) - if ksize !== nothing - desc["ksize"] = map(Base.identity, ksize) + begin + function max_pool3d_grad_graph(orig_input_, orig_output_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + local desc + tf.with_op_name(name, "MaxPool3DGrad") do + desc = tf.NodeDescription("MaxPool3DGrad") + begin + begin + orig_input_ = convert(Tensor{Float32}, orig_input_) + begin + end + end + begin + orig_output_ = convert(Tensor{Float32}, orig_output_) + begin + end + end + begin + grad_ = convert(Tensor{Float32}, grad_) + begin + end + end + begin + (grad_,) = tf.tf_promote(grad_) + end + begin + (orig_input_, orig_output_) = tf.tf_promote(orig_input_, orig_output_) + end + end + begin + begin + tf.add_input(desc, orig_input_) + end + begin + tf.add_input(desc, orig_output_) + end + begin + tf.add_input(desc, grad_) + end + end + begin + begin + if ksize !== nothing + desc["ksize"] = map(Base.identity, ksize) + end + end + begin + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + end + begin + if padding !== nothing + desc["padding"] = Base.String(padding) + end + end + begin + if data_format !== nothing + desc["data_format"] = Base.String(data_format) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function max_pool3d_grad_eager(orig_input_, orig_output_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + desc = tf.EagerOp("MaxPool3DGrad") + orig_input_ = convert(tf.EagerTensor, orig_input_) + orig_output_ = convert(tf.EagerTensor, orig_output_) + grad_ = convert(tf.EagerTensor, grad_) + begin + begin + tf.add_input(desc, orig_input_) + end + begin + tf.add_input(desc, orig_output_) + end + begin + tf.add_input(desc, grad_) + end + end + begin + begin + if ksize !== nothing + desc["ksize"] = map(Base.identity, ksize) + end + end + begin + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + end + begin + if padding !== nothing + desc["padding"] = Base.String(padding) + end + end + begin + if data_format !== nothing + desc["data_format"] = Base.String(data_format) + end + end + end + begin + desc["TInput"] = tf.data_type(orig_input_) + end + begin + desc["TInput"] = tf.data_type(orig_output_) + end + begin + desc["T"] = tf.data_type(grad_) + end + res = tf.execute(desc) + node = tf.TapeNode(max_pool3d_grad, [orig_input_, orig_output_, grad_], name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function max_pool3d_grad(orig_input_, orig_output_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + if tf.in_eager_mode() + max_pool3d_grad_eager(orig_input_, orig_output_, grad_; name=name, ksize=ksize, strides=strides, padding=padding, data_format=data_format) + else + max_pool3d_grad_graph(orig_input_, orig_output_, grad_; name=name, ksize=ksize, strides=strides, padding=padding, data_format=data_format) + end end - if strides !== nothing - desc["strides"] = map(Base.identity, strides) - end - if padding !== nothing - desc["padding"] = Base.String(padding) - end - if data_format !== nothing - desc["data_format"] = Base.String(data_format) - end - end - tf.Tensor(tf.Operation(desc)) end - function max_pool3d_grad_eager(orig_input_, orig_output_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) - desc = tf.EagerOp("MaxPool3DGrad") - orig_input_ = convert(tf.EagerTensor, orig_input_) - orig_output_ = convert(tf.EagerTensor, orig_output_) - grad_ = convert(tf.EagerTensor, grad_) - tf.add_input(desc, orig_input_) - tf.add_input(desc, orig_output_) - tf.add_input(desc, grad_) - if ksize !== nothing - desc["ksize"] = map(Base.identity, ksize) - end - if strides !== nothing - desc["strides"] = map(Base.identity, strides) - end - if padding !== nothing - desc["padding"] = Base.String(padding) - end - if data_format !== nothing - desc["data_format"] = Base.String(data_format) - end - desc["TInput"] = tf.data_type(orig_input_) - desc["TInput"] = tf.data_type(orig_output_) - desc["T"] = tf.data_type(grad_) - res = tf.execute(desc) - node = tf.TapeNode(max_pool3d_grad, [orig_input_, orig_output_, grad_], name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function max_pool3d_grad(orig_input_, orig_output_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) - if tf.in_eager_mode() - max_pool3d_grad_eager(orig_input_, orig_output_, grad_; name=name, ksize=ksize, strides=strides, padding=padding, data_format=data_format) - else - max_pool3d_grad_graph(orig_input_, orig_output_, grad_; name=name, ksize=ksize, strides=strides, padding=padding, data_format=data_format) - end - end end @@ -6547,65 +11823,121 @@ end Load embedding parameters for a single table. """ begin - function load_tpu_embedding_momentum_parameters_grad_accum_debug_graph(parameters_, momenta_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - local desc - tf.with_op_name(name, "LoadTPUEmbeddingMomentumParametersGradAccumDebug") do - desc = tf.NodeDescription("LoadTPUEmbeddingMomentumParametersGradAccumDebug") - parameters_ = convert(Tensor{Float32}, parameters_) - momenta_ = convert(Tensor{Float32}, momenta_) - gradient_accumulators_ = convert(Tensor{Float32}, gradient_accumulators_) - tf.add_input(desc, parameters_) - tf.add_input(desc, momenta_) - tf.add_input(desc, gradient_accumulators_) - if table_id !== nothing - desc["table_id"] = Base.Int(table_id) + begin + function load_tpu_embedding_momentum_parameters_grad_accum_debug_graph(parameters_, momenta_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + local desc + tf.with_op_name(name, "LoadTPUEmbeddingMomentumParametersGradAccumDebug") do + desc = tf.NodeDescription("LoadTPUEmbeddingMomentumParametersGradAccumDebug") + begin + begin + parameters_ = convert(Tensor{Float32}, parameters_) + begin + end + end + begin + momenta_ = convert(Tensor{Float32}, momenta_) + begin + end + end + begin + gradient_accumulators_ = convert(Tensor{Float32}, gradient_accumulators_) + begin + end + end + end + begin + begin + tf.add_input(desc, parameters_) + end + begin + tf.add_input(desc, momenta_) + end + begin + tf.add_input(desc, gradient_accumulators_) + end + end + begin + begin + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + end + begin + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + end + begin + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + end + begin + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function load_tpu_embedding_momentum_parameters_grad_accum_debug_eager(parameters_, momenta_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + desc = tf.EagerOp("LoadTPUEmbeddingMomentumParametersGradAccumDebug") + parameters_ = convert(tf.EagerTensor, parameters_) + momenta_ = convert(tf.EagerTensor, momenta_) + gradient_accumulators_ = convert(tf.EagerTensor, gradient_accumulators_) + begin + begin + tf.add_input(desc, parameters_) + end + begin + tf.add_input(desc, momenta_) + end + begin + tf.add_input(desc, gradient_accumulators_) + end + end + begin + begin + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + end + begin + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + end + begin + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + end + begin + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(load_tpu_embedding_momentum_parameters_grad_accum_debug, [parameters_, momenta_, gradient_accumulators_], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function load_tpu_embedding_momentum_parameters_grad_accum_debug(parameters_, momenta_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + if tf.in_eager_mode() + load_tpu_embedding_momentum_parameters_grad_accum_debug_eager(parameters_, momenta_, gradient_accumulators_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + else + load_tpu_embedding_momentum_parameters_grad_accum_debug_graph(parameters_, momenta_, gradient_accumulators_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + end end - if table_name !== nothing - desc["table_name"] = Base.String(table_name) - end - if num_shards !== nothing - desc["num_shards"] = Base.Int(num_shards) - end - if shard_id !== nothing - desc["shard_id"] = Base.Int(shard_id) - end - end - tf.Tensor(tf.Operation(desc)) - end - function load_tpu_embedding_momentum_parameters_grad_accum_debug_eager(parameters_, momenta_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - desc = tf.EagerOp("LoadTPUEmbeddingMomentumParametersGradAccumDebug") - parameters_ = convert(tf.EagerTensor, parameters_) - momenta_ = convert(tf.EagerTensor, momenta_) - gradient_accumulators_ = convert(tf.EagerTensor, gradient_accumulators_) - tf.add_input(desc, parameters_) - tf.add_input(desc, momenta_) - tf.add_input(desc, gradient_accumulators_) - if table_id !== nothing - desc["table_id"] = Base.Int(table_id) - end - if table_name !== nothing - desc["table_name"] = Base.String(table_name) - end - if num_shards !== nothing - desc["num_shards"] = Base.Int(num_shards) - end - if shard_id !== nothing - desc["shard_id"] = Base.Int(shard_id) - end - res = tf.execute(desc) - node = tf.TapeNode(load_tpu_embedding_momentum_parameters_grad_accum_debug, [parameters_, momenta_, gradient_accumulators_], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function load_tpu_embedding_momentum_parameters_grad_accum_debug(parameters_, momenta_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - if tf.in_eager_mode() - load_tpu_embedding_momentum_parameters_grad_accum_debug_eager(parameters_, momenta_, gradient_accumulators_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) - else - load_tpu_embedding_momentum_parameters_grad_accum_debug_graph(parameters_, momenta_, gradient_accumulators_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) - end - end end @@ -6615,63 +11947,123 @@ end """ begin - function conv3d_backprop_input_graph(input_, filter_, out_backprop_; name=nothing, strides=nothing, padding=nothing, dilations=nothing) - local desc - tf.with_op_name(name, "Conv3DBackpropInput") do - desc = tf.NodeDescription("Conv3DBackpropInput") - input_ = convert(Tensor{Any}, input_) - filter_ = convert(Tensor{Any}, filter_) - out_backprop_ = convert(Tensor{Any}, out_backprop_) - (input_, filter_, out_backprop_) = tf.tf_promote(input_, filter_, out_backprop_) - tf.add_input(desc, input_) - tf.add_input(desc, filter_) - tf.add_input(desc, out_backprop_) - if strides !== nothing - desc["strides"] = map(Base.identity, strides) + begin + function conv3d_backprop_input_graph(input_, filter_, out_backprop_; name=nothing, strides=nothing, padding=nothing, dilations=nothing) + local desc + tf.with_op_name(name, "Conv3DBackpropInput") do + desc = tf.NodeDescription("Conv3DBackpropInput") + begin + begin + input_ = convert(Tensor{Any}, input_) + begin + end + end + begin + filter_ = convert(Tensor{Any}, filter_) + begin + end + end + begin + out_backprop_ = convert(Tensor{Any}, out_backprop_) + begin + end + end + begin + (input_, filter_, out_backprop_) = tf.tf_promote(input_, filter_, out_backprop_) + end + end + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, filter_) + end + begin + tf.add_input(desc, out_backprop_) + end + end + begin + begin + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + end + begin + if padding !== nothing + desc["padding"] = Base.String(padding) + end + end + begin + if dilations !== nothing + desc["dilations"] = map(Base.identity, dilations) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function conv3d_backprop_input_eager(input_, filter_, out_backprop_; name=nothing, strides=nothing, padding=nothing, dilations=nothing) + desc = tf.EagerOp("Conv3DBackpropInput") + input_ = convert(tf.EagerTensor, input_) + filter_ = convert(tf.EagerTensor, filter_) + out_backprop_ = convert(tf.EagerTensor, out_backprop_) + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, filter_) + end + begin + tf.add_input(desc, out_backprop_) + end + end + begin + begin + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + end + begin + if padding !== nothing + desc["padding"] = Base.String(padding) + end + end + begin + if dilations !== nothing + desc["dilations"] = map(Base.identity, dilations) + end + end + end + begin + desc["T"] = tf.data_type(input_) + end + begin + desc["T"] = tf.data_type(filter_) + end + begin + desc["T"] = tf.data_type(out_backprop_) + end + res = tf.execute(desc) + node = tf.TapeNode(conv3d_backprop_input, [input_, filter_, out_backprop_], name=nothing, strides=nothing, padding=nothing, dilations=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function conv3d_backprop_input(input_, filter_, out_backprop_; name=nothing, strides=nothing, padding=nothing, dilations=nothing) + if tf.in_eager_mode() + conv3d_backprop_input_eager(input_, filter_, out_backprop_; name=name, strides=strides, padding=padding, dilations=dilations) + else + conv3d_backprop_input_graph(input_, filter_, out_backprop_; name=name, strides=strides, padding=padding, dilations=dilations) + end end - if padding !== nothing - desc["padding"] = Base.String(padding) - end - if dilations !== nothing - desc["dilations"] = map(Base.identity, dilations) - end - end - tf.Tensor(tf.Operation(desc)) - end - function conv3d_backprop_input_eager(input_, filter_, out_backprop_; name=nothing, strides=nothing, padding=nothing, dilations=nothing) - desc = tf.EagerOp("Conv3DBackpropInput") - input_ = convert(tf.EagerTensor, input_) - filter_ = convert(tf.EagerTensor, filter_) - out_backprop_ = convert(tf.EagerTensor, out_backprop_) - tf.add_input(desc, input_) - tf.add_input(desc, filter_) - tf.add_input(desc, out_backprop_) - if strides !== nothing - desc["strides"] = map(Base.identity, strides) - end - if padding !== nothing - desc["padding"] = Base.String(padding) - end - if dilations !== nothing - desc["dilations"] = map(Base.identity, dilations) - end - desc["T"] = tf.data_type(input_) - desc["T"] = tf.data_type(filter_) - desc["T"] = tf.data_type(out_backprop_) - res = tf.execute(desc) - node = tf.TapeNode(conv3d_backprop_input, [input_, filter_, out_backprop_], name=nothing, strides=nothing, padding=nothing, dilations=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function conv3d_backprop_input(input_, filter_, out_backprop_; name=nothing, strides=nothing, padding=nothing, dilations=nothing) - if tf.in_eager_mode() - conv3d_backprop_input_eager(input_, filter_, out_backprop_; name=name, strides=strides, padding=padding, dilations=dilations) - else - conv3d_backprop_input_graph(input_, filter_, out_backprop_; name=name, strides=strides, padding=padding, dilations=dilations) - end - end end @@ -6681,59 +12073,95 @@ end """ begin - function padding_fifo_queue_v2_graph(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) - local desc - tf.with_op_name(name, "PaddingFIFOQueueV2") do - desc = tf.NodeDescription("PaddingFIFOQueueV2") - if component_types !== nothing - desc["component_types"] = map(Base.identity, component_types) - end - if shapes !== nothing - desc["shapes"] = map(Base.identity, shapes) - end - if capacity !== nothing - desc["capacity"] = Base.Int(capacity) + begin + function padding_fifo_queue_v2_graph(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "PaddingFIFOQueueV2") do + desc = tf.NodeDescription("PaddingFIFOQueueV2") + begin + end + begin + end + begin + begin + if component_types !== nothing + desc["component_types"] = map(Base.identity, component_types) + end + end + begin + if shapes !== nothing + desc["shapes"] = map(Base.identity, shapes) + end + end + begin + if capacity !== nothing + desc["capacity"] = Base.Int(capacity) + end + end + begin + if container !== nothing + desc["container"] = Base.String(container) + end + end + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function padding_fifo_queue_v2_eager(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) + desc = tf.EagerOp("PaddingFIFOQueueV2") + begin + end + begin + begin + if component_types !== nothing + desc["component_types"] = map(Base.identity, component_types) + end + end + begin + if shapes !== nothing + desc["shapes"] = map(Base.identity, shapes) + end + end + begin + if capacity !== nothing + desc["capacity"] = Base.Int(capacity) + end + end + begin + if container !== nothing + desc["container"] = Base.String(container) + end + end + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(padding_fifo_queue_v2, [], name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function padding_fifo_queue_v2(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) + if tf.in_eager_mode() + padding_fifo_queue_v2_eager(; name=name, component_types=component_types, shapes=shapes, capacity=capacity, container=container, shared_name=shared_name) + else + padding_fifo_queue_v2_graph(; name=name, component_types=component_types, shapes=shapes, capacity=capacity, container=container, shared_name=shared_name) + end end - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - end - tf.Tensor(tf.Operation(desc)) - end - function padding_fifo_queue_v2_eager(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) - desc = tf.EagerOp("PaddingFIFOQueueV2") - if component_types !== nothing - desc["component_types"] = map(Base.identity, component_types) - end - if shapes !== nothing - desc["shapes"] = map(Base.identity, shapes) - end - if capacity !== nothing - desc["capacity"] = Base.Int(capacity) - end - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - res = tf.execute(desc) - node = tf.TapeNode(padding_fifo_queue_v2, [], name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function padding_fifo_queue_v2(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) - if tf.in_eager_mode() - padding_fifo_queue_v2_eager(; name=name, component_types=component_types, shapes=shapes, capacity=capacity, container=container, shared_name=shared_name) - else - padding_fifo_queue_v2_graph(; name=name, component_types=component_types, shapes=shapes, capacity=capacity, container=container, shared_name=shared_name) - end - end end @@ -6743,35 +12171,63 @@ end """ begin - function ref_exit_graph(data_; name=nothing) - local desc - tf.with_op_name(name, "RefExit") do - desc = tf.NodeDescription("RefExit") - data_ = convert(Tensor{Any}, data_) - (data_,) = tf.tf_promote(data_) - tf.add_input(desc, data_) + begin + function ref_exit_graph(data_; name=nothing) + local desc + tf.with_op_name(name, "RefExit") do + desc = tf.NodeDescription("RefExit") + begin + begin + data_ = convert(Tensor{Any}, data_) + begin + end + end + begin + (data_,) = tf.tf_promote(data_) + end + end + begin + begin + tf.add_input(desc, data_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function ref_exit_eager(data_; name=nothing) - desc = tf.EagerOp("RefExit") - data_ = convert(tf.EagerTensor, data_) - tf.add_input(desc, data_) - desc["T"] = tf.data_type(data_) - res = tf.execute(desc) - node = tf.TapeNode(ref_exit, [data_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function ref_exit_eager(data_; name=nothing) + desc = tf.EagerOp("RefExit") + data_ = convert(tf.EagerTensor, data_) + begin + begin + tf.add_input(desc, data_) + end + end + begin + end + begin + desc["T"] = tf.data_type(data_) + end + res = tf.execute(desc) + node = tf.TapeNode(ref_exit, [data_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function ref_exit(data_; name=nothing) - if tf.in_eager_mode() - ref_exit_eager(data_; name=name) - else - ref_exit_graph(data_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function ref_exit(data_; name=nothing) + if tf.in_eager_mode() + ref_exit_eager(data_; name=name) + else + ref_exit_graph(data_; name=name) + end end - end + end end @@ -6781,59 +12237,95 @@ end """ begin - function map_clear_graph(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) - local desc - tf.with_op_name(name, "MapClear") do - desc = tf.NodeDescription("MapClear") - if capacity !== nothing - desc["capacity"] = Base.Int(capacity) - end - if memory_limit !== nothing - desc["memory_limit"] = Base.Int(memory_limit) + begin + function map_clear_graph(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "MapClear") do + desc = tf.NodeDescription("MapClear") + begin + end + begin + end + begin + begin + if capacity !== nothing + desc["capacity"] = Base.Int(capacity) + end + end + begin + if memory_limit !== nothing + desc["memory_limit"] = Base.Int(memory_limit) + end + end + begin + if dtypes !== nothing + desc["dtypes"] = map(Base.identity, dtypes) + end + end + begin + if container !== nothing + desc["container"] = Base.String(container) + end + end + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function map_clear_eager(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + desc = tf.EagerOp("MapClear") + begin + end + begin + begin + if capacity !== nothing + desc["capacity"] = Base.Int(capacity) + end + end + begin + if memory_limit !== nothing + desc["memory_limit"] = Base.Int(memory_limit) + end + end + begin + if dtypes !== nothing + desc["dtypes"] = map(Base.identity, dtypes) + end + end + begin + if container !== nothing + desc["container"] = Base.String(container) + end + end + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(map_clear, [], name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function map_clear(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + if tf.in_eager_mode() + map_clear_eager(; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) + else + map_clear_graph(; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) + end end - if dtypes !== nothing - desc["dtypes"] = map(Base.identity, dtypes) - end - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - end - tf.Tensor(tf.Operation(desc)) end - function map_clear_eager(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) - desc = tf.EagerOp("MapClear") - if capacity !== nothing - desc["capacity"] = Base.Int(capacity) - end - if memory_limit !== nothing - desc["memory_limit"] = Base.Int(memory_limit) - end - if dtypes !== nothing - desc["dtypes"] = map(Base.identity, dtypes) - end - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - res = tf.execute(desc) - node = tf.TapeNode(map_clear, [], name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function map_clear(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) - if tf.in_eager_mode() - map_clear_eager(; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) - else - map_clear_graph(; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) - end - end end @@ -6843,37 +12335,69 @@ end """ begin - function encode_wav_graph(audio_, sample_rate_; name=nothing) - local desc - tf.with_op_name(name, "EncodeWav") do - desc = tf.NodeDescription("EncodeWav") - audio_ = convert(Tensor{Float32}, audio_) - sample_rate_ = convert(Tensor{Int32}, sample_rate_) - tf.add_input(desc, audio_) - tf.add_input(desc, sample_rate_) + begin + function encode_wav_graph(audio_, sample_rate_; name=nothing) + local desc + tf.with_op_name(name, "EncodeWav") do + desc = tf.NodeDescription("EncodeWav") + begin + begin + audio_ = convert(Tensor{Float32}, audio_) + begin + end + end + begin + sample_rate_ = convert(Tensor{Int32}, sample_rate_) + begin + end + end + end + begin + begin + tf.add_input(desc, audio_) + end + begin + tf.add_input(desc, sample_rate_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function encode_wav_eager(audio_, sample_rate_; name=nothing) - desc = tf.EagerOp("EncodeWav") - audio_ = convert(tf.EagerTensor, audio_) - sample_rate_ = convert(tf.EagerTensor, sample_rate_) - tf.add_input(desc, audio_) - tf.add_input(desc, sample_rate_) - res = tf.execute(desc) - node = tf.TapeNode(encode_wav, [audio_, sample_rate_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function encode_wav_eager(audio_, sample_rate_; name=nothing) + desc = tf.EagerOp("EncodeWav") + audio_ = convert(tf.EagerTensor, audio_) + sample_rate_ = convert(tf.EagerTensor, sample_rate_) + begin + begin + tf.add_input(desc, audio_) + end + begin + tf.add_input(desc, sample_rate_) + end + end + begin + end + res = tf.execute(desc) + node = tf.TapeNode(encode_wav, [audio_, sample_rate_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function encode_wav(audio_, sample_rate_; name=nothing) - if tf.in_eager_mode() - encode_wav_eager(audio_, sample_rate_; name=name) - else - encode_wav_graph(audio_, sample_rate_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function encode_wav(audio_, sample_rate_; name=nothing) + if tf.in_eager_mode() + encode_wav_eager(audio_, sample_rate_; name=name) + else + encode_wav_graph(audio_, sample_rate_; name=name) + end end - end + end end @@ -6883,43 +12407,87 @@ end """ begin - function tensor_summary_v2_graph(tag_, tensor_, serialized_summary_metadata_; name=nothing) - local desc - tf.with_op_name(name, "TensorSummaryV2") do - desc = tf.NodeDescription("TensorSummaryV2") - tag_ = convert(Tensor{String}, tag_) - tensor_ = convert(Tensor{Any}, tensor_) - serialized_summary_metadata_ = convert(Tensor{String}, serialized_summary_metadata_) - (tensor_,) = tf.tf_promote(tensor_) - tf.add_input(desc, tag_) - tf.add_input(desc, tensor_) - tf.add_input(desc, serialized_summary_metadata_) - end - tf.Tensor(tf.Operation(desc)) - end - function tensor_summary_v2_eager(tag_, tensor_, serialized_summary_metadata_; name=nothing) - desc = tf.EagerOp("TensorSummaryV2") - tag_ = convert(tf.EagerTensor, tag_) - tensor_ = convert(tf.EagerTensor, tensor_) - serialized_summary_metadata_ = convert(tf.EagerTensor, serialized_summary_metadata_) - tf.add_input(desc, tag_) - tf.add_input(desc, tensor_) - tf.add_input(desc, serialized_summary_metadata_) - desc["T"] = tf.data_type(tensor_) - res = tf.execute(desc) - node = tf.TapeNode(tensor_summary_v2, [tag_, tensor_, serialized_summary_metadata_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_summary_v2(tag_, tensor_, serialized_summary_metadata_; name=nothing) - if tf.in_eager_mode() - tensor_summary_v2_eager(tag_, tensor_, serialized_summary_metadata_; name=name) - else - tensor_summary_v2_graph(tag_, tensor_, serialized_summary_metadata_; name=name) + begin + function tensor_summary_v2_graph(tag_, tensor_, serialized_summary_metadata_; name=nothing) + local desc + tf.with_op_name(name, "TensorSummaryV2") do + desc = tf.NodeDescription("TensorSummaryV2") + begin + begin + tag_ = convert(Tensor{String}, tag_) + begin + end + end + begin + tensor_ = convert(Tensor{Any}, tensor_) + begin + end + end + begin + serialized_summary_metadata_ = convert(Tensor{String}, serialized_summary_metadata_) + begin + end + end + begin + (tensor_,) = tf.tf_promote(tensor_) + end + end + begin + begin + tf.add_input(desc, tag_) + end + begin + tf.add_input(desc, tensor_) + end + begin + tf.add_input(desc, serialized_summary_metadata_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function tensor_summary_v2_eager(tag_, tensor_, serialized_summary_metadata_; name=nothing) + desc = tf.EagerOp("TensorSummaryV2") + tag_ = convert(tf.EagerTensor, tag_) + tensor_ = convert(tf.EagerTensor, tensor_) + serialized_summary_metadata_ = convert(tf.EagerTensor, serialized_summary_metadata_) + begin + begin + tf.add_input(desc, tag_) + end + begin + tf.add_input(desc, tensor_) + end + begin + tf.add_input(desc, serialized_summary_metadata_) + end + end + begin + end + begin + desc["T"] = tf.data_type(tensor_) + end + res = tf.execute(desc) + node = tf.TapeNode(tensor_summary_v2, [tag_, tensor_, serialized_summary_metadata_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_summary_v2(tag_, tensor_, serialized_summary_metadata_; name=nothing) + if tf.in_eager_mode() + tensor_summary_v2_eager(tag_, tensor_, serialized_summary_metadata_; name=name) + else + tensor_summary_v2_graph(tag_, tensor_, serialized_summary_metadata_; name=name) + end end - end + end end @@ -6929,49 +12497,89 @@ end """ begin - function queue_dequeue_up_to_graph(handle_, n_; name=nothing, component_types=nothing, timeout_ms=nothing) - local desc - tf.with_op_name(name, "QueueDequeueUpTo") do - desc = tf.NodeDescription("QueueDequeueUpTo") - handle_ = convert(Tensor{String}, handle_) - n_ = convert(Tensor{Int32}, n_) - tf.add_input(desc, handle_) - tf.add_input(desc, n_) - if component_types !== nothing - desc["component_types"] = map(Base.identity, component_types) - end - if timeout_ms !== nothing - desc["timeout_ms"] = Base.Int(timeout_ms) + begin + function queue_dequeue_up_to_graph(handle_, n_; name=nothing, component_types=nothing, timeout_ms=nothing) + local desc + tf.with_op_name(name, "QueueDequeueUpTo") do + desc = tf.NodeDescription("QueueDequeueUpTo") + begin + begin + handle_ = convert(Tensor{String}, handle_) + begin + end + end + begin + n_ = convert(Tensor{Int32}, n_) + begin + end + end + end + begin + begin + tf.add_input(desc, handle_) + end + begin + tf.add_input(desc, n_) + end + end + begin + begin + if component_types !== nothing + desc["component_types"] = map(Base.identity, component_types) + end + end + begin + if timeout_ms !== nothing + desc["timeout_ms"] = Base.Int(timeout_ms) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function queue_dequeue_up_to_eager(handle_, n_; name=nothing, component_types=nothing, timeout_ms=nothing) + desc = tf.EagerOp("QueueDequeueUpTo") + handle_ = convert(tf.EagerTensor, handle_) + n_ = convert(tf.EagerTensor, n_) + begin + begin + tf.add_input(desc, handle_) + end + begin + tf.add_input(desc, n_) + end + end + begin + begin + if component_types !== nothing + desc["component_types"] = map(Base.identity, component_types) + end + end + begin + if timeout_ms !== nothing + desc["timeout_ms"] = Base.Int(timeout_ms) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(queue_dequeue_up_to, [handle_, n_], name=nothing, component_types=nothing, timeout_ms=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function queue_dequeue_up_to(handle_, n_; name=nothing, component_types=nothing, timeout_ms=nothing) + if tf.in_eager_mode() + queue_dequeue_up_to_eager(handle_, n_; name=name, component_types=component_types, timeout_ms=timeout_ms) + else + queue_dequeue_up_to_graph(handle_, n_; name=name, component_types=component_types, timeout_ms=timeout_ms) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function queue_dequeue_up_to_eager(handle_, n_; name=nothing, component_types=nothing, timeout_ms=nothing) - desc = tf.EagerOp("QueueDequeueUpTo") - handle_ = convert(tf.EagerTensor, handle_) - n_ = convert(tf.EagerTensor, n_) - tf.add_input(desc, handle_) - tf.add_input(desc, n_) - if component_types !== nothing - desc["component_types"] = map(Base.identity, component_types) - end - if timeout_ms !== nothing - desc["timeout_ms"] = Base.Int(timeout_ms) - end - res = tf.execute(desc) - node = tf.TapeNode(queue_dequeue_up_to, [handle_, n_], name=nothing, component_types=nothing, timeout_ms=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function queue_dequeue_up_to(handle_, n_; name=nothing, component_types=nothing, timeout_ms=nothing) - if tf.in_eager_mode() - queue_dequeue_up_to_eager(handle_, n_; name=name, component_types=component_types, timeout_ms=timeout_ms) - else - queue_dequeue_up_to_graph(handle_, n_; name=name, component_types=component_types, timeout_ms=timeout_ms) - end - end end @@ -6981,46 +12589,96 @@ end """ begin - function matrix_band_part_graph(input_, num_lower_, num_upper_; name=nothing) - local desc - tf.with_op_name(name, "MatrixBandPart") do - desc = tf.NodeDescription("MatrixBandPart") - input_ = convert(Tensor{Any}, input_) - num_lower_ = convert(Tensor{Int64}, num_lower_) - num_upper_ = convert(Tensor{Int64}, num_upper_) - (input_,) = tf.tf_promote(input_) - (num_lower_, num_upper_) = tf.tf_promote(num_lower_, num_upper_) - tf.add_input(desc, input_) - tf.add_input(desc, num_lower_) - tf.add_input(desc, num_upper_) - end - tf.Tensor(tf.Operation(desc)) - end - function matrix_band_part_eager(input_, num_lower_, num_upper_; name=nothing) - desc = tf.EagerOp("MatrixBandPart") - input_ = convert(tf.EagerTensor, input_) - num_lower_ = convert(tf.EagerTensor, num_lower_) - num_upper_ = convert(tf.EagerTensor, num_upper_) - tf.add_input(desc, input_) - tf.add_input(desc, num_lower_) - tf.add_input(desc, num_upper_) - desc["T"] = tf.data_type(input_) - desc["Tindex"] = tf.data_type(num_lower_) - desc["Tindex"] = tf.data_type(num_upper_) - res = tf.execute(desc) - node = tf.TapeNode(matrix_band_part, [input_, num_lower_, num_upper_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function matrix_band_part(input_, num_lower_, num_upper_; name=nothing) - if tf.in_eager_mode() - matrix_band_part_eager(input_, num_lower_, num_upper_; name=name) - else - matrix_band_part_graph(input_, num_lower_, num_upper_; name=name) + begin + function matrix_band_part_graph(input_, num_lower_, num_upper_; name=nothing) + local desc + tf.with_op_name(name, "MatrixBandPart") do + desc = tf.NodeDescription("MatrixBandPart") + begin + begin + input_ = convert(Tensor{Any}, input_) + begin + end + end + begin + num_lower_ = convert(Tensor{Int64}, num_lower_) + begin + end + end + begin + num_upper_ = convert(Tensor{Int64}, num_upper_) + begin + end + end + begin + (input_,) = tf.tf_promote(input_) + end + begin + (num_lower_, num_upper_) = tf.tf_promote(num_lower_, num_upper_) + end + end + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, num_lower_) + end + begin + tf.add_input(desc, num_upper_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function matrix_band_part_eager(input_, num_lower_, num_upper_; name=nothing) + desc = tf.EagerOp("MatrixBandPart") + input_ = convert(tf.EagerTensor, input_) + num_lower_ = convert(tf.EagerTensor, num_lower_) + num_upper_ = convert(tf.EagerTensor, num_upper_) + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, num_lower_) + end + begin + tf.add_input(desc, num_upper_) + end + end + begin + end + begin + desc["T"] = tf.data_type(input_) + end + begin + desc["Tindex"] = tf.data_type(num_lower_) + end + begin + desc["Tindex"] = tf.data_type(num_upper_) + end + res = tf.execute(desc) + node = tf.TapeNode(matrix_band_part, [input_, num_lower_, num_upper_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function matrix_band_part(input_, num_lower_, num_upper_; name=nothing) + if tf.in_eager_mode() + matrix_band_part_eager(input_, num_lower_, num_upper_; name=name) + else + matrix_band_part_graph(input_, num_lower_, num_upper_; name=name) + end end - end + end end @@ -7030,47 +12688,83 @@ end Copy Op. """ begin - function copy_graph(input_; name=nothing, tensor_name=nothing, debug_ops_spec=nothing) - local desc - tf.with_op_name(name, "Copy") do - desc = tf.NodeDescription("Copy") - input_ = convert(Tensor{Any}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - if tensor_name !== nothing - desc["tensor_name"] = Base.String(tensor_name) - end - if debug_ops_spec !== nothing - desc["debug_ops_spec"] = map(Base.identity, debug_ops_spec) + begin + function copy_graph(input_; name=nothing, tensor_name=nothing, debug_ops_spec=nothing) + local desc + tf.with_op_name(name, "Copy") do + desc = tf.NodeDescription("Copy") + begin + begin + input_ = convert(Tensor{Any}, input_) + begin + end + end + begin + (input_,) = tf.tf_promote(input_) + end + end + begin + begin + tf.add_input(desc, input_) + end + end + begin + begin + if tensor_name !== nothing + desc["tensor_name"] = Base.String(tensor_name) + end + end + begin + if debug_ops_spec !== nothing + desc["debug_ops_spec"] = map(Base.identity, debug_ops_spec) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function copy_eager(input_; name=nothing, tensor_name=nothing, debug_ops_spec=nothing) + desc = tf.EagerOp("Copy") + input_ = convert(tf.EagerTensor, input_) + begin + begin + tf.add_input(desc, input_) + end + end + begin + begin + if tensor_name !== nothing + desc["tensor_name"] = Base.String(tensor_name) + end + end + begin + if debug_ops_spec !== nothing + desc["debug_ops_spec"] = map(Base.identity, debug_ops_spec) + end + end + end + begin + desc["T"] = tf.data_type(input_) + end + res = tf.execute(desc) + node = tf.TapeNode(copy, [input_], name=nothing, tensor_name=nothing, debug_ops_spec=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function copy(input_; name=nothing, tensor_name=nothing, debug_ops_spec=nothing) + if tf.in_eager_mode() + copy_eager(input_; name=name, tensor_name=tensor_name, debug_ops_spec=debug_ops_spec) + else + copy_graph(input_; name=name, tensor_name=tensor_name, debug_ops_spec=debug_ops_spec) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function copy_eager(input_; name=nothing, tensor_name=nothing, debug_ops_spec=nothing) - desc = tf.EagerOp("Copy") - input_ = convert(tf.EagerTensor, input_) - tf.add_input(desc, input_) - if tensor_name !== nothing - desc["tensor_name"] = Base.String(tensor_name) - end - if debug_ops_spec !== nothing - desc["debug_ops_spec"] = map(Base.identity, debug_ops_spec) - end - desc["T"] = tf.data_type(input_) - res = tf.execute(desc) - node = tf.TapeNode(copy, [input_], name=nothing, tensor_name=nothing, debug_ops_spec=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function copy(input_; name=nothing, tensor_name=nothing, debug_ops_spec=nothing) - if tf.in_eager_mode() - copy_eager(input_; name=name, tensor_name=tensor_name, debug_ops_spec=debug_ops_spec) - else - copy_graph(input_; name=name, tensor_name=tensor_name, debug_ops_spec=debug_ops_spec) - end - end end @@ -7080,52 +12774,90 @@ end """ begin - function shape_n_graph(input_; name=nothing, N=nothing, out_type=nothing) - local desc - tf.with_op_name(name, "ShapeN") do - desc = tf.NodeDescription("ShapeN") - input_ = [convert(Tensor{Any}, x) for x = input_] - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - if N !== nothing - desc["N"] = Base.Int(N) - end - if out_type !== nothing - desc["out_type"] = Base.identity(out_type) + begin + function shape_n_graph(input_; name=nothing, N=nothing, out_type=nothing) + local desc + tf.with_op_name(name, "ShapeN") do + desc = tf.NodeDescription("ShapeN") + begin + begin + input_ = [convert(Tensor{Any}, x) for x = input_] + begin + end + end + begin + (input_,) = tf.tf_promote(input_) + end + end + begin + begin + tf.add_input(desc, input_) + end + end + begin + begin + if N !== nothing + desc["N"] = Base.Int(N) + end + end + begin + if out_type !== nothing + desc["out_type"] = Base.identity(out_type) + end + end + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:N + push!(out, tf.Tensor(op, out_idx)) + end + out + end + end + end + begin + function shape_n_eager(input_; name=nothing, N=nothing, out_type=nothing) + desc = tf.EagerOp("ShapeN") + input_ = convert(tf.EagerTensor, input_) + begin + begin + tf.add_input(desc, input_) + end + end + begin + begin + if N !== nothing + desc["N"] = Base.Int(N) + end + end + begin + if out_type !== nothing + desc["out_type"] = Base.identity(out_type) + end + end + end + begin + desc["T"] = tf.data_type(input_) + end + res = tf.execute(desc) + node = tf.TapeNode(shape_n, [input_], name=nothing, N=nothing, out_type=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function shape_n(input_; name=nothing, N=nothing, out_type=nothing) + if tf.in_eager_mode() + shape_n_eager(input_; name=name, N=N, out_type=out_type) + else + shape_n_graph(input_; name=name, N=N, out_type=out_type) + end end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:N - push!(out, tf.Tensor(op, out_idx)) - end - out - end - function shape_n_eager(input_; name=nothing, N=nothing, out_type=nothing) - desc = tf.EagerOp("ShapeN") - input_ = convert(tf.EagerTensor, input_) - tf.add_input(desc, input_) - if N !== nothing - desc["N"] = Base.Int(N) - end - if out_type !== nothing - desc["out_type"] = Base.identity(out_type) - end - desc["T"] = tf.data_type(input_) - res = tf.execute(desc) - node = tf.TapeNode(shape_n, [input_], name=nothing, N=nothing, out_type=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function shape_n(input_; name=nothing, N=nothing, out_type=nothing) - if tf.in_eager_mode() - shape_n_eager(input_; name=name, N=N, out_type=out_type) - else - shape_n_graph(input_; name=name, N=N, out_type=out_type) - end - end end @@ -7135,89 +12867,161 @@ end """ begin - function experimental_parse_example_dataset_graph(input_dataset_, num_parallel_calls_, dense_defaults_; name=nothing, sparse_keys=nothing, dense_keys=nothing, sparse_types=nothing, Tdense=nothing, dense_shapes=nothing, output_types=nothing, output_shapes=nothing, sloppy=nothing) - local desc - tf.with_op_name(name, "ExperimentalParseExampleDataset") do - desc = tf.NodeDescription("ExperimentalParseExampleDataset") - input_dataset_ = convert(Tensor{Any}, input_dataset_) - num_parallel_calls_ = convert(Tensor{Int64}, num_parallel_calls_) - dense_defaults_ = [convert(Tensor{Any}, x) for x = dense_defaults_] - tf.add_input(desc, input_dataset_) - tf.add_input(desc, num_parallel_calls_) - tf.add_input(desc, dense_defaults_) - if sparse_keys !== nothing - desc["sparse_keys"] = map(Base.identity, sparse_keys) - end - if dense_keys !== nothing - desc["dense_keys"] = map(Base.identity, dense_keys) - end - if sparse_types !== nothing - desc["sparse_types"] = map(Base.identity, sparse_types) - end - if Tdense !== nothing - desc["Tdense"] = map(Base.identity, Tdense) - end - if dense_shapes !== nothing - desc["dense_shapes"] = map(Base.identity, dense_shapes) + begin + function experimental_parse_example_dataset_graph(input_dataset_, num_parallel_calls_, dense_defaults_; name=nothing, sparse_keys=nothing, dense_keys=nothing, sparse_types=nothing, Tdense=nothing, dense_shapes=nothing, output_types=nothing, output_shapes=nothing, sloppy=nothing) + local desc + tf.with_op_name(name, "ExperimentalParseExampleDataset") do + desc = tf.NodeDescription("ExperimentalParseExampleDataset") + begin + begin + input_dataset_ = convert(Tensor{Any}, input_dataset_) + begin + end + end + begin + num_parallel_calls_ = convert(Tensor{Int64}, num_parallel_calls_) + begin + end + end + begin + dense_defaults_ = [convert(Tensor{Any}, x) for x = dense_defaults_] + begin + end + end + end + begin + begin + tf.add_input(desc, input_dataset_) + end + begin + tf.add_input(desc, num_parallel_calls_) + end + begin + tf.add_input(desc, dense_defaults_) + end + end + begin + begin + if sparse_keys !== nothing + desc["sparse_keys"] = map(Base.identity, sparse_keys) + end + end + begin + if dense_keys !== nothing + desc["dense_keys"] = map(Base.identity, dense_keys) + end + end + begin + if sparse_types !== nothing + desc["sparse_types"] = map(Base.identity, sparse_types) + end + end + begin + if Tdense !== nothing + desc["Tdense"] = map(Base.identity, Tdense) + end + end + begin + if dense_shapes !== nothing + desc["dense_shapes"] = map(Base.identity, dense_shapes) + end + end + begin + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + end + begin + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + begin + if sloppy !== nothing + desc["sloppy"] = Base.Bool(sloppy) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function experimental_parse_example_dataset_eager(input_dataset_, num_parallel_calls_, dense_defaults_; name=nothing, sparse_keys=nothing, dense_keys=nothing, sparse_types=nothing, Tdense=nothing, dense_shapes=nothing, output_types=nothing, output_shapes=nothing, sloppy=nothing) + desc = tf.EagerOp("ExperimentalParseExampleDataset") + input_dataset_ = convert(tf.EagerTensor, input_dataset_) + num_parallel_calls_ = convert(tf.EagerTensor, num_parallel_calls_) + dense_defaults_ = convert(tf.EagerTensor, dense_defaults_) + begin + begin + tf.add_input(desc, input_dataset_) + end + begin + tf.add_input(desc, num_parallel_calls_) + end + begin + tf.add_input(desc, dense_defaults_) + end + end + begin + begin + if sparse_keys !== nothing + desc["sparse_keys"] = map(Base.identity, sparse_keys) + end + end + begin + if dense_keys !== nothing + desc["dense_keys"] = map(Base.identity, dense_keys) + end + end + begin + if sparse_types !== nothing + desc["sparse_types"] = map(Base.identity, sparse_types) + end + end + begin + if Tdense !== nothing + desc["Tdense"] = map(Base.identity, Tdense) + end + end + begin + if dense_shapes !== nothing + desc["dense_shapes"] = map(Base.identity, dense_shapes) + end + end + begin + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + end + begin + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + begin + if sloppy !== nothing + desc["sloppy"] = Base.Bool(sloppy) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(experimental_parse_example_dataset, [input_dataset_, num_parallel_calls_, dense_defaults_], name=nothing, sparse_keys=nothing, dense_keys=nothing, sparse_types=nothing, Tdense=nothing, dense_shapes=nothing, output_types=nothing, output_shapes=nothing, sloppy=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_parse_example_dataset(input_dataset_, num_parallel_calls_, dense_defaults_; name=nothing, sparse_keys=nothing, dense_keys=nothing, sparse_types=nothing, Tdense=nothing, dense_shapes=nothing, output_types=nothing, output_shapes=nothing, sloppy=nothing) + if tf.in_eager_mode() + experimental_parse_example_dataset_eager(input_dataset_, num_parallel_calls_, dense_defaults_; name=name, sparse_keys=sparse_keys, dense_keys=dense_keys, sparse_types=sparse_types, Tdense=Tdense, dense_shapes=dense_shapes, output_types=output_types, output_shapes=output_shapes, sloppy=sloppy) + else + experimental_parse_example_dataset_graph(input_dataset_, num_parallel_calls_, dense_defaults_; name=name, sparse_keys=sparse_keys, dense_keys=dense_keys, sparse_types=sparse_types, Tdense=Tdense, dense_shapes=dense_shapes, output_types=output_types, output_shapes=output_shapes, sloppy=sloppy) + end end - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - if sloppy !== nothing - desc["sloppy"] = Base.Bool(sloppy) - end - end - tf.Tensor(tf.Operation(desc)) - end - function experimental_parse_example_dataset_eager(input_dataset_, num_parallel_calls_, dense_defaults_; name=nothing, sparse_keys=nothing, dense_keys=nothing, sparse_types=nothing, Tdense=nothing, dense_shapes=nothing, output_types=nothing, output_shapes=nothing, sloppy=nothing) - desc = tf.EagerOp("ExperimentalParseExampleDataset") - input_dataset_ = convert(tf.EagerTensor, input_dataset_) - num_parallel_calls_ = convert(tf.EagerTensor, num_parallel_calls_) - dense_defaults_ = convert(tf.EagerTensor, dense_defaults_) - tf.add_input(desc, input_dataset_) - tf.add_input(desc, num_parallel_calls_) - tf.add_input(desc, dense_defaults_) - if sparse_keys !== nothing - desc["sparse_keys"] = map(Base.identity, sparse_keys) - end - if dense_keys !== nothing - desc["dense_keys"] = map(Base.identity, dense_keys) - end - if sparse_types !== nothing - desc["sparse_types"] = map(Base.identity, sparse_types) - end - if Tdense !== nothing - desc["Tdense"] = map(Base.identity, Tdense) - end - if dense_shapes !== nothing - desc["dense_shapes"] = map(Base.identity, dense_shapes) - end - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - if sloppy !== nothing - desc["sloppy"] = Base.Bool(sloppy) - end - res = tf.execute(desc) - node = tf.TapeNode(experimental_parse_example_dataset, [input_dataset_, num_parallel_calls_, dense_defaults_], name=nothing, sparse_keys=nothing, dense_keys=nothing, sparse_types=nothing, Tdense=nothing, dense_shapes=nothing, output_types=nothing, output_shapes=nothing, sloppy=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_parse_example_dataset(input_dataset_, num_parallel_calls_, dense_defaults_; name=nothing, sparse_keys=nothing, dense_keys=nothing, sparse_types=nothing, Tdense=nothing, dense_shapes=nothing, output_types=nothing, output_shapes=nothing, sloppy=nothing) - if tf.in_eager_mode() - experimental_parse_example_dataset_eager(input_dataset_, num_parallel_calls_, dense_defaults_; name=name, sparse_keys=sparse_keys, dense_keys=dense_keys, sparse_types=sparse_types, Tdense=Tdense, dense_shapes=dense_shapes, output_types=output_types, output_shapes=output_shapes, sloppy=sloppy) - else - experimental_parse_example_dataset_graph(input_dataset_, num_parallel_calls_, dense_defaults_; name=name, sparse_keys=sparse_keys, dense_keys=dense_keys, sparse_types=sparse_types, Tdense=Tdense, dense_shapes=dense_shapes, output_types=output_types, output_shapes=output_shapes, sloppy=sloppy) - end - end end @@ -7227,45 +13031,85 @@ end """ begin - function concat_graph(concat_dim_, values_; name=nothing, N=nothing) - local desc - tf.with_op_name(name, "Concat") do - desc = tf.NodeDescription("Concat") - concat_dim_ = convert(Tensor{Int32}, concat_dim_) - values_ = [convert(Tensor{Any}, x) for x = values_] - (values_,) = tf.tf_promote(values_) - tf.add_input(desc, concat_dim_) - tf.add_input(desc, values_) - if N !== nothing - desc["N"] = Base.Int(N) + begin + function concat_graph(concat_dim_, values_; name=nothing, N=nothing) + local desc + tf.with_op_name(name, "Concat") do + desc = tf.NodeDescription("Concat") + begin + begin + concat_dim_ = convert(Tensor{Int32}, concat_dim_) + begin + end + end + begin + values_ = [convert(Tensor{Any}, x) for x = values_] + begin + end + end + begin + (values_,) = tf.tf_promote(values_) + end + end + begin + begin + tf.add_input(desc, concat_dim_) + end + begin + tf.add_input(desc, values_) + end + end + begin + begin + if N !== nothing + desc["N"] = Base.Int(N) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function concat_eager(concat_dim_, values_; name=nothing, N=nothing) + desc = tf.EagerOp("Concat") + concat_dim_ = convert(tf.EagerTensor, concat_dim_) + values_ = convert(tf.EagerTensor, values_) + begin + begin + tf.add_input(desc, concat_dim_) + end + begin + tf.add_input(desc, values_) + end + end + begin + begin + if N !== nothing + desc["N"] = Base.Int(N) + end + end + end + begin + desc["T"] = tf.data_type(values_) + end + res = tf.execute(desc) + node = tf.TapeNode(concat, [concat_dim_, values_], name=nothing, N=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function concat(concat_dim_, values_; name=nothing, N=nothing) + if tf.in_eager_mode() + concat_eager(concat_dim_, values_; name=name, N=N) + else + concat_graph(concat_dim_, values_; name=name, N=N) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function concat_eager(concat_dim_, values_; name=nothing, N=nothing) - desc = tf.EagerOp("Concat") - concat_dim_ = convert(tf.EagerTensor, concat_dim_) - values_ = convert(tf.EagerTensor, values_) - tf.add_input(desc, concat_dim_) - tf.add_input(desc, values_) - if N !== nothing - desc["N"] = Base.Int(N) - end - desc["T"] = tf.data_type(values_) - res = tf.execute(desc) - node = tf.TapeNode(concat, [concat_dim_, values_], name=nothing, N=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function concat(concat_dim_, values_; name=nothing, N=nothing) - if tf.in_eager_mode() - concat_eager(concat_dim_, values_; name=name, N=N) - else - concat_graph(concat_dim_, values_; name=name, N=N) - end - end end @@ -7275,47 +13119,83 @@ end """ begin - function data_format_dim_map_graph(x_; name=nothing, src_format=nothing, dst_format=nothing) - local desc - tf.with_op_name(name, "DataFormatDimMap") do - desc = tf.NodeDescription("DataFormatDimMap") - x_ = convert(Tensor{Int32}, x_) - (x_,) = tf.tf_promote(x_) - tf.add_input(desc, x_) - if src_format !== nothing - desc["src_format"] = Base.String(src_format) + begin + function data_format_dim_map_graph(x_; name=nothing, src_format=nothing, dst_format=nothing) + local desc + tf.with_op_name(name, "DataFormatDimMap") do + desc = tf.NodeDescription("DataFormatDimMap") + begin + begin + x_ = convert(Tensor{Int32}, x_) + begin + end + end + begin + (x_,) = tf.tf_promote(x_) + end + end + begin + begin + tf.add_input(desc, x_) + end + end + begin + begin + if src_format !== nothing + desc["src_format"] = Base.String(src_format) + end + end + begin + if dst_format !== nothing + desc["dst_format"] = Base.String(dst_format) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function data_format_dim_map_eager(x_; name=nothing, src_format=nothing, dst_format=nothing) + desc = tf.EagerOp("DataFormatDimMap") + x_ = convert(tf.EagerTensor, x_) + begin + begin + tf.add_input(desc, x_) + end + end + begin + begin + if src_format !== nothing + desc["src_format"] = Base.String(src_format) + end + end + begin + if dst_format !== nothing + desc["dst_format"] = Base.String(dst_format) + end + end + end + begin + desc["T"] = tf.data_type(x_) + end + res = tf.execute(desc) + node = tf.TapeNode(data_format_dim_map, [x_], name=nothing, src_format=nothing, dst_format=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function data_format_dim_map(x_; name=nothing, src_format=nothing, dst_format=nothing) + if tf.in_eager_mode() + data_format_dim_map_eager(x_; name=name, src_format=src_format, dst_format=dst_format) + else + data_format_dim_map_graph(x_; name=name, src_format=src_format, dst_format=dst_format) + end end - if dst_format !== nothing - desc["dst_format"] = Base.String(dst_format) - end - end - tf.Tensor(tf.Operation(desc)) end - function data_format_dim_map_eager(x_; name=nothing, src_format=nothing, dst_format=nothing) - desc = tf.EagerOp("DataFormatDimMap") - x_ = convert(tf.EagerTensor, x_) - tf.add_input(desc, x_) - if src_format !== nothing - desc["src_format"] = Base.String(src_format) - end - if dst_format !== nothing - desc["dst_format"] = Base.String(dst_format) - end - desc["T"] = tf.data_type(x_) - res = tf.execute(desc) - node = tf.TapeNode(data_format_dim_map, [x_], name=nothing, src_format=nothing, dst_format=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function data_format_dim_map(x_; name=nothing, src_format=nothing, dst_format=nothing) - if tf.in_eager_mode() - data_format_dim_map_eager(x_; name=name, src_format=src_format, dst_format=dst_format) - else - data_format_dim_map_graph(x_; name=name, src_format=src_format, dst_format=dst_format) - end - end end @@ -7325,41 +13205,65 @@ end """ begin - function identity_reader_graph(; name=nothing, container=nothing, shared_name=nothing) - local desc - tf.with_op_name(name, "IdentityReader") do - desc = tf.NodeDescription("IdentityReader") - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) + begin + function identity_reader_graph(; name=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "IdentityReader") do + desc = tf.NodeDescription("IdentityReader") + begin + end + begin + end + begin + begin + if container !== nothing + desc["container"] = Base.String(container) + end + end + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + end end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function identity_reader_eager(; name=nothing, container=nothing, shared_name=nothing) - desc = tf.EagerOp("IdentityReader") - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - res = tf.execute(desc) - node = tf.TapeNode(identity_reader, [], name=nothing, container=nothing, shared_name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function identity_reader_eager(; name=nothing, container=nothing, shared_name=nothing) + desc = tf.EagerOp("IdentityReader") + begin + end + begin + begin + if container !== nothing + desc["container"] = Base.String(container) + end + end + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(identity_reader, [], name=nothing, container=nothing, shared_name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function identity_reader(; name=nothing, container=nothing, shared_name=nothing) - if tf.in_eager_mode() - identity_reader_eager(; name=name, container=container, shared_name=shared_name) - else - identity_reader_graph(; name=name, container=container, shared_name=shared_name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function identity_reader(; name=nothing, container=nothing, shared_name=nothing) + if tf.in_eager_mode() + identity_reader_eager(; name=name, container=container, shared_name=shared_name) + else + identity_reader_graph(; name=name, container=container, shared_name=shared_name) + end end - end + end end @@ -7369,35 +13273,63 @@ end """ begin - function softplus_graph(features_; name=nothing) - local desc - tf.with_op_name(name, "Softplus") do - desc = tf.NodeDescription("Softplus") - features_ = convert(Tensor{Any}, features_) - (features_,) = tf.tf_promote(features_) - tf.add_input(desc, features_) + begin + function softplus_graph(features_; name=nothing) + local desc + tf.with_op_name(name, "Softplus") do + desc = tf.NodeDescription("Softplus") + begin + begin + features_ = convert(Tensor{Any}, features_) + begin + end + end + begin + (features_,) = tf.tf_promote(features_) + end + end + begin + begin + tf.add_input(desc, features_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function softplus_eager(features_; name=nothing) - desc = tf.EagerOp("Softplus") - features_ = convert(tf.EagerTensor, features_) - tf.add_input(desc, features_) - desc["T"] = tf.data_type(features_) - res = tf.execute(desc) - node = tf.TapeNode(softplus, [features_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function softplus_eager(features_; name=nothing) + desc = tf.EagerOp("Softplus") + features_ = convert(tf.EagerTensor, features_) + begin + begin + tf.add_input(desc, features_) + end + end + begin + end + begin + desc["T"] = tf.data_type(features_) + end + res = tf.execute(desc) + node = tf.TapeNode(softplus, [features_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function softplus(features_; name=nothing) - if tf.in_eager_mode() - softplus_eager(features_; name=name) - else - softplus_graph(features_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function softplus(features_; name=nothing) + if tf.in_eager_mode() + softplus_eager(features_; name=name) + else + softplus_graph(features_; name=name) + end end - end + end end @@ -7407,71 +13339,161 @@ end """ begin - function resource_sparse_apply_proximal_adagrad_graph(var_, accum_, lr_, l1_, l2_, grad_, indices_; name=nothing, use_locking=nothing) - local desc - tf.with_op_name(name, "ResourceSparseApplyProximalAdagrad") do - desc = tf.NodeDescription("ResourceSparseApplyProximalAdagrad") - var_ = convert(Tensor{Any}, var_) - accum_ = convert(Tensor{Any}, accum_) - lr_ = convert(Tensor{Any}, lr_) - l1_ = convert(Tensor{Any}, l1_) - l2_ = convert(Tensor{Any}, l2_) - grad_ = convert(Tensor{Any}, grad_) - indices_ = convert(Tensor{Any}, indices_) - indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) - (lr_, l1_, l2_, grad_) = tf.tf_promote(lr_, l1_, l2_, grad_) - (indices_,) = tf.tf_promote(indices_) - tf.add_input(desc, var_) - tf.add_input(desc, accum_) - tf.add_input(desc, lr_) - tf.add_input(desc, l1_) - tf.add_input(desc, l2_) - tf.add_input(desc, grad_) - tf.add_input(desc, indices_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - end - tf.Tensor(tf.Operation(desc)) - end - function resource_sparse_apply_proximal_adagrad_eager(var_, accum_, lr_, l1_, l2_, grad_, indices_; name=nothing, use_locking=nothing) - desc = tf.EagerOp("ResourceSparseApplyProximalAdagrad") - var_ = convert(tf.EagerTensor, var_) - accum_ = convert(tf.EagerTensor, accum_) - lr_ = convert(tf.EagerTensor, lr_) - l1_ = convert(tf.EagerTensor, l1_) - l2_ = convert(tf.EagerTensor, l2_) - grad_ = convert(tf.EagerTensor, grad_) - indices_ = convert(tf.EagerTensor, indices_) - tf.add_input(desc, var_) - tf.add_input(desc, accum_) - tf.add_input(desc, lr_) - tf.add_input(desc, l1_) - tf.add_input(desc, l2_) - tf.add_input(desc, grad_) - tf.add_input(desc, indices_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - desc["T"] = tf.data_type(lr_) - desc["T"] = tf.data_type(l1_) - desc["T"] = tf.data_type(l2_) - desc["T"] = tf.data_type(grad_) - desc["Tindices"] = tf.data_type(indices_) - res = tf.execute(desc) - node = tf.TapeNode(resource_sparse_apply_proximal_adagrad, [var_, accum_, lr_, l1_, l2_, grad_, indices_], name=nothing, use_locking=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_sparse_apply_proximal_adagrad(var_, accum_, lr_, l1_, l2_, grad_, indices_; name=nothing, use_locking=nothing) - if tf.in_eager_mode() - resource_sparse_apply_proximal_adagrad_eager(var_, accum_, lr_, l1_, l2_, grad_, indices_; name=name, use_locking=use_locking) - else - resource_sparse_apply_proximal_adagrad_graph(var_, accum_, lr_, l1_, l2_, grad_, indices_; name=name, use_locking=use_locking) + begin + function resource_sparse_apply_proximal_adagrad_graph(var_, accum_, lr_, l1_, l2_, grad_, indices_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "ResourceSparseApplyProximalAdagrad") do + desc = tf.NodeDescription("ResourceSparseApplyProximalAdagrad") + begin + begin + var_ = convert(Tensor{Any}, var_) + begin + end + end + begin + accum_ = convert(Tensor{Any}, accum_) + begin + end + end + begin + lr_ = convert(Tensor{Any}, lr_) + begin + end + end + begin + l1_ = convert(Tensor{Any}, l1_) + begin + end + end + begin + l2_ = convert(Tensor{Any}, l2_) + begin + end + end + begin + grad_ = convert(Tensor{Any}, grad_) + begin + end + end + begin + indices_ = convert(Tensor{Any}, indices_) + begin + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + end + end + begin + (lr_, l1_, l2_, grad_) = tf.tf_promote(lr_, l1_, l2_, grad_) + end + begin + (indices_,) = tf.tf_promote(indices_) + end + end + begin + begin + tf.add_input(desc, var_) + end + begin + tf.add_input(desc, accum_) + end + begin + tf.add_input(desc, lr_) + end + begin + tf.add_input(desc, l1_) + end + begin + tf.add_input(desc, l2_) + end + begin + tf.add_input(desc, grad_) + end + begin + tf.add_input(desc, indices_) + end + end + begin + begin + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function resource_sparse_apply_proximal_adagrad_eager(var_, accum_, lr_, l1_, l2_, grad_, indices_; name=nothing, use_locking=nothing) + desc = tf.EagerOp("ResourceSparseApplyProximalAdagrad") + var_ = convert(tf.EagerTensor, var_) + accum_ = convert(tf.EagerTensor, accum_) + lr_ = convert(tf.EagerTensor, lr_) + l1_ = convert(tf.EagerTensor, l1_) + l2_ = convert(tf.EagerTensor, l2_) + grad_ = convert(tf.EagerTensor, grad_) + indices_ = convert(tf.EagerTensor, indices_) + begin + begin + tf.add_input(desc, var_) + end + begin + tf.add_input(desc, accum_) + end + begin + tf.add_input(desc, lr_) + end + begin + tf.add_input(desc, l1_) + end + begin + tf.add_input(desc, l2_) + end + begin + tf.add_input(desc, grad_) + end + begin + tf.add_input(desc, indices_) + end + end + begin + begin + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + end + begin + desc["T"] = tf.data_type(lr_) + end + begin + desc["T"] = tf.data_type(l1_) + end + begin + desc["T"] = tf.data_type(l2_) + end + begin + desc["T"] = tf.data_type(grad_) + end + begin + desc["Tindices"] = tf.data_type(indices_) + end + res = tf.execute(desc) + node = tf.TapeNode(resource_sparse_apply_proximal_adagrad, [var_, accum_, lr_, l1_, l2_, grad_, indices_], name=nothing, use_locking=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_sparse_apply_proximal_adagrad(var_, accum_, lr_, l1_, l2_, grad_, indices_; name=nothing, use_locking=nothing) + if tf.in_eager_mode() + resource_sparse_apply_proximal_adagrad_eager(var_, accum_, lr_, l1_, l2_, grad_, indices_; name=name, use_locking=use_locking) + else + resource_sparse_apply_proximal_adagrad_graph(var_, accum_, lr_, l1_, l2_, grad_, indices_; name=name, use_locking=use_locking) + end end - end + end end @@ -7481,126 +13503,248 @@ end """ begin - function parse_single_sequence_example_graph(serialized_, feature_list_dense_missing_assumed_empty_, context_sparse_keys_, context_dense_keys_, feature_list_sparse_keys_, feature_list_dense_keys_, context_dense_defaults_, debug_name_; name=nothing, Ncontext_sparse=nothing, Ncontext_dense=nothing, Nfeature_list_sparse=nothing, Nfeature_list_dense=nothing, context_sparse_types=nothing, Tcontext_dense=nothing, feature_list_dense_types=nothing, context_dense_shapes=nothing, feature_list_sparse_types=nothing, feature_list_dense_shapes=nothing) - local desc - tf.with_op_name(name, "ParseSingleSequenceExample") do - desc = tf.NodeDescription("ParseSingleSequenceExample") - serialized_ = convert(Tensor{String}, serialized_) - feature_list_dense_missing_assumed_empty_ = convert(Tensor{String}, feature_list_dense_missing_assumed_empty_) - context_sparse_keys_ = [convert(Tensor{String}, x) for x = context_sparse_keys_] - context_dense_keys_ = [convert(Tensor{String}, x) for x = context_dense_keys_] - feature_list_sparse_keys_ = [convert(Tensor{String}, x) for x = feature_list_sparse_keys_] - feature_list_dense_keys_ = [convert(Tensor{String}, x) for x = feature_list_dense_keys_] - context_dense_defaults_ = [convert(Tensor{Any}, x) for x = context_dense_defaults_] - debug_name_ = convert(Tensor{String}, debug_name_) - tf.add_input(desc, serialized_) - tf.add_input(desc, feature_list_dense_missing_assumed_empty_) - tf.add_input(desc, context_sparse_keys_) - tf.add_input(desc, context_dense_keys_) - tf.add_input(desc, feature_list_sparse_keys_) - tf.add_input(desc, feature_list_dense_keys_) - tf.add_input(desc, context_dense_defaults_) - tf.add_input(desc, debug_name_) - if Ncontext_sparse !== nothing - desc["Ncontext_sparse"] = Base.Int(Ncontext_sparse) - end - if Ncontext_dense !== nothing - desc["Ncontext_dense"] = Base.Int(Ncontext_dense) - end - if Nfeature_list_sparse !== nothing - desc["Nfeature_list_sparse"] = Base.Int(Nfeature_list_sparse) - end - if Nfeature_list_dense !== nothing - desc["Nfeature_list_dense"] = Base.Int(Nfeature_list_dense) - end - if context_sparse_types !== nothing - desc["context_sparse_types"] = map(Base.identity, context_sparse_types) - end - if Tcontext_dense !== nothing - desc["Tcontext_dense"] = map(Base.identity, Tcontext_dense) - end - if feature_list_dense_types !== nothing - desc["feature_list_dense_types"] = map(Base.identity, feature_list_dense_types) - end - if context_dense_shapes !== nothing - desc["context_dense_shapes"] = map(Base.identity, context_dense_shapes) - end - if feature_list_sparse_types !== nothing - desc["feature_list_sparse_types"] = map(Base.identity, feature_list_sparse_types) - end - if feature_list_dense_shapes !== nothing - desc["feature_list_dense_shapes"] = map(Base.identity, feature_list_dense_shapes) + begin + function parse_single_sequence_example_graph(serialized_, feature_list_dense_missing_assumed_empty_, context_sparse_keys_, context_dense_keys_, feature_list_sparse_keys_, feature_list_dense_keys_, context_dense_defaults_, debug_name_; name=nothing, Ncontext_sparse=nothing, Ncontext_dense=nothing, Nfeature_list_sparse=nothing, Nfeature_list_dense=nothing, context_sparse_types=nothing, Tcontext_dense=nothing, feature_list_dense_types=nothing, context_dense_shapes=nothing, feature_list_sparse_types=nothing, feature_list_dense_shapes=nothing) + local desc + tf.with_op_name(name, "ParseSingleSequenceExample") do + desc = tf.NodeDescription("ParseSingleSequenceExample") + begin + begin + serialized_ = convert(Tensor{String}, serialized_) + begin + end + end + begin + feature_list_dense_missing_assumed_empty_ = convert(Tensor{String}, feature_list_dense_missing_assumed_empty_) + begin + end + end + begin + context_sparse_keys_ = [convert(Tensor{String}, x) for x = context_sparse_keys_] + begin + end + end + begin + context_dense_keys_ = [convert(Tensor{String}, x) for x = context_dense_keys_] + begin + end + end + begin + feature_list_sparse_keys_ = [convert(Tensor{String}, x) for x = feature_list_sparse_keys_] + begin + end + end + begin + feature_list_dense_keys_ = [convert(Tensor{String}, x) for x = feature_list_dense_keys_] + begin + end + end + begin + context_dense_defaults_ = [convert(Tensor{Any}, x) for x = context_dense_defaults_] + begin + end + end + begin + debug_name_ = convert(Tensor{String}, debug_name_) + begin + end + end + end + begin + begin + tf.add_input(desc, serialized_) + end + begin + tf.add_input(desc, feature_list_dense_missing_assumed_empty_) + end + begin + tf.add_input(desc, context_sparse_keys_) + end + begin + tf.add_input(desc, context_dense_keys_) + end + begin + tf.add_input(desc, feature_list_sparse_keys_) + end + begin + tf.add_input(desc, feature_list_dense_keys_) + end + begin + tf.add_input(desc, context_dense_defaults_) + end + begin + tf.add_input(desc, debug_name_) + end + end + begin + begin + if Ncontext_sparse !== nothing + desc["Ncontext_sparse"] = Base.Int(Ncontext_sparse) + end + end + begin + if Ncontext_dense !== nothing + desc["Ncontext_dense"] = Base.Int(Ncontext_dense) + end + end + begin + if Nfeature_list_sparse !== nothing + desc["Nfeature_list_sparse"] = Base.Int(Nfeature_list_sparse) + end + end + begin + if Nfeature_list_dense !== nothing + desc["Nfeature_list_dense"] = Base.Int(Nfeature_list_dense) + end + end + begin + if context_sparse_types !== nothing + desc["context_sparse_types"] = map(Base.identity, context_sparse_types) + end + end + begin + if Tcontext_dense !== nothing + desc["Tcontext_dense"] = map(Base.identity, Tcontext_dense) + end + end + begin + if feature_list_dense_types !== nothing + desc["feature_list_dense_types"] = map(Base.identity, feature_list_dense_types) + end + end + begin + if context_dense_shapes !== nothing + desc["context_dense_shapes"] = map(Base.identity, context_dense_shapes) + end + end + begin + if feature_list_sparse_types !== nothing + desc["feature_list_sparse_types"] = map(Base.identity, feature_list_sparse_types) + end + end + begin + if feature_list_dense_shapes !== nothing + desc["feature_list_dense_shapes"] = map(Base.identity, feature_list_dense_shapes) + end + end + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:8 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + end + end + begin + function parse_single_sequence_example_eager(serialized_, feature_list_dense_missing_assumed_empty_, context_sparse_keys_, context_dense_keys_, feature_list_sparse_keys_, feature_list_dense_keys_, context_dense_defaults_, debug_name_; name=nothing, Ncontext_sparse=nothing, Ncontext_dense=nothing, Nfeature_list_sparse=nothing, Nfeature_list_dense=nothing, context_sparse_types=nothing, Tcontext_dense=nothing, feature_list_dense_types=nothing, context_dense_shapes=nothing, feature_list_sparse_types=nothing, feature_list_dense_shapes=nothing) + desc = tf.EagerOp("ParseSingleSequenceExample") + serialized_ = convert(tf.EagerTensor, serialized_) + feature_list_dense_missing_assumed_empty_ = convert(tf.EagerTensor, feature_list_dense_missing_assumed_empty_) + context_sparse_keys_ = convert(tf.EagerTensor, context_sparse_keys_) + context_dense_keys_ = convert(tf.EagerTensor, context_dense_keys_) + feature_list_sparse_keys_ = convert(tf.EagerTensor, feature_list_sparse_keys_) + feature_list_dense_keys_ = convert(tf.EagerTensor, feature_list_dense_keys_) + context_dense_defaults_ = convert(tf.EagerTensor, context_dense_defaults_) + debug_name_ = convert(tf.EagerTensor, debug_name_) + begin + begin + tf.add_input(desc, serialized_) + end + begin + tf.add_input(desc, feature_list_dense_missing_assumed_empty_) + end + begin + tf.add_input(desc, context_sparse_keys_) + end + begin + tf.add_input(desc, context_dense_keys_) + end + begin + tf.add_input(desc, feature_list_sparse_keys_) + end + begin + tf.add_input(desc, feature_list_dense_keys_) + end + begin + tf.add_input(desc, context_dense_defaults_) + end + begin + tf.add_input(desc, debug_name_) + end + end + begin + begin + if Ncontext_sparse !== nothing + desc["Ncontext_sparse"] = Base.Int(Ncontext_sparse) + end + end + begin + if Ncontext_dense !== nothing + desc["Ncontext_dense"] = Base.Int(Ncontext_dense) + end + end + begin + if Nfeature_list_sparse !== nothing + desc["Nfeature_list_sparse"] = Base.Int(Nfeature_list_sparse) + end + end + begin + if Nfeature_list_dense !== nothing + desc["Nfeature_list_dense"] = Base.Int(Nfeature_list_dense) + end + end + begin + if context_sparse_types !== nothing + desc["context_sparse_types"] = map(Base.identity, context_sparse_types) + end + end + begin + if Tcontext_dense !== nothing + desc["Tcontext_dense"] = map(Base.identity, Tcontext_dense) + end + end + begin + if feature_list_dense_types !== nothing + desc["feature_list_dense_types"] = map(Base.identity, feature_list_dense_types) + end + end + begin + if context_dense_shapes !== nothing + desc["context_dense_shapes"] = map(Base.identity, context_dense_shapes) + end + end + begin + if feature_list_sparse_types !== nothing + desc["feature_list_sparse_types"] = map(Base.identity, feature_list_sparse_types) + end + end + begin + if feature_list_dense_shapes !== nothing + desc["feature_list_dense_shapes"] = map(Base.identity, feature_list_dense_shapes) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(parse_single_sequence_example, [serialized_, feature_list_dense_missing_assumed_empty_, context_sparse_keys_, context_dense_keys_, feature_list_sparse_keys_, feature_list_dense_keys_, context_dense_defaults_, debug_name_], name=nothing, Ncontext_sparse=nothing, Ncontext_dense=nothing, Nfeature_list_sparse=nothing, Nfeature_list_dense=nothing, context_sparse_types=nothing, Tcontext_dense=nothing, feature_list_dense_types=nothing, context_dense_shapes=nothing, feature_list_sparse_types=nothing, feature_list_dense_shapes=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function parse_single_sequence_example(serialized_, feature_list_dense_missing_assumed_empty_, context_sparse_keys_, context_dense_keys_, feature_list_sparse_keys_, feature_list_dense_keys_, context_dense_defaults_, debug_name_; name=nothing, Ncontext_sparse=nothing, Ncontext_dense=nothing, Nfeature_list_sparse=nothing, Nfeature_list_dense=nothing, context_sparse_types=nothing, Tcontext_dense=nothing, feature_list_dense_types=nothing, context_dense_shapes=nothing, feature_list_sparse_types=nothing, feature_list_dense_shapes=nothing) + if tf.in_eager_mode() + parse_single_sequence_example_eager(serialized_, feature_list_dense_missing_assumed_empty_, context_sparse_keys_, context_dense_keys_, feature_list_sparse_keys_, feature_list_dense_keys_, context_dense_defaults_, debug_name_; name=name, Ncontext_sparse=Ncontext_sparse, Ncontext_dense=Ncontext_dense, Nfeature_list_sparse=Nfeature_list_sparse, Nfeature_list_dense=Nfeature_list_dense, context_sparse_types=context_sparse_types, Tcontext_dense=Tcontext_dense, feature_list_dense_types=feature_list_dense_types, context_dense_shapes=context_dense_shapes, feature_list_sparse_types=feature_list_sparse_types, feature_list_dense_shapes=feature_list_dense_shapes) + else + parse_single_sequence_example_graph(serialized_, feature_list_dense_missing_assumed_empty_, context_sparse_keys_, context_dense_keys_, feature_list_sparse_keys_, feature_list_dense_keys_, context_dense_defaults_, debug_name_; name=name, Ncontext_sparse=Ncontext_sparse, Ncontext_dense=Ncontext_dense, Nfeature_list_sparse=Nfeature_list_sparse, Nfeature_list_dense=Nfeature_list_dense, context_sparse_types=context_sparse_types, Tcontext_dense=Tcontext_dense, feature_list_dense_types=feature_list_dense_types, context_dense_shapes=context_dense_shapes, feature_list_sparse_types=feature_list_sparse_types, feature_list_dense_shapes=feature_list_dense_shapes) + end end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:8 - push!(out, tf.Tensor(op, out_idx)) - end - out - end - function parse_single_sequence_example_eager(serialized_, feature_list_dense_missing_assumed_empty_, context_sparse_keys_, context_dense_keys_, feature_list_sparse_keys_, feature_list_dense_keys_, context_dense_defaults_, debug_name_; name=nothing, Ncontext_sparse=nothing, Ncontext_dense=nothing, Nfeature_list_sparse=nothing, Nfeature_list_dense=nothing, context_sparse_types=nothing, Tcontext_dense=nothing, feature_list_dense_types=nothing, context_dense_shapes=nothing, feature_list_sparse_types=nothing, feature_list_dense_shapes=nothing) - desc = tf.EagerOp("ParseSingleSequenceExample") - serialized_ = convert(tf.EagerTensor, serialized_) - feature_list_dense_missing_assumed_empty_ = convert(tf.EagerTensor, feature_list_dense_missing_assumed_empty_) - context_sparse_keys_ = convert(tf.EagerTensor, context_sparse_keys_) - context_dense_keys_ = convert(tf.EagerTensor, context_dense_keys_) - feature_list_sparse_keys_ = convert(tf.EagerTensor, feature_list_sparse_keys_) - feature_list_dense_keys_ = convert(tf.EagerTensor, feature_list_dense_keys_) - context_dense_defaults_ = convert(tf.EagerTensor, context_dense_defaults_) - debug_name_ = convert(tf.EagerTensor, debug_name_) - tf.add_input(desc, serialized_) - tf.add_input(desc, feature_list_dense_missing_assumed_empty_) - tf.add_input(desc, context_sparse_keys_) - tf.add_input(desc, context_dense_keys_) - tf.add_input(desc, feature_list_sparse_keys_) - tf.add_input(desc, feature_list_dense_keys_) - tf.add_input(desc, context_dense_defaults_) - tf.add_input(desc, debug_name_) - if Ncontext_sparse !== nothing - desc["Ncontext_sparse"] = Base.Int(Ncontext_sparse) - end - if Ncontext_dense !== nothing - desc["Ncontext_dense"] = Base.Int(Ncontext_dense) - end - if Nfeature_list_sparse !== nothing - desc["Nfeature_list_sparse"] = Base.Int(Nfeature_list_sparse) - end - if Nfeature_list_dense !== nothing - desc["Nfeature_list_dense"] = Base.Int(Nfeature_list_dense) - end - if context_sparse_types !== nothing - desc["context_sparse_types"] = map(Base.identity, context_sparse_types) - end - if Tcontext_dense !== nothing - desc["Tcontext_dense"] = map(Base.identity, Tcontext_dense) - end - if feature_list_dense_types !== nothing - desc["feature_list_dense_types"] = map(Base.identity, feature_list_dense_types) - end - if context_dense_shapes !== nothing - desc["context_dense_shapes"] = map(Base.identity, context_dense_shapes) - end - if feature_list_sparse_types !== nothing - desc["feature_list_sparse_types"] = map(Base.identity, feature_list_sparse_types) - end - if feature_list_dense_shapes !== nothing - desc["feature_list_dense_shapes"] = map(Base.identity, feature_list_dense_shapes) - end - res = tf.execute(desc) - node = tf.TapeNode(parse_single_sequence_example, [serialized_, feature_list_dense_missing_assumed_empty_, context_sparse_keys_, context_dense_keys_, feature_list_sparse_keys_, feature_list_dense_keys_, context_dense_defaults_, debug_name_], name=nothing, Ncontext_sparse=nothing, Ncontext_dense=nothing, Nfeature_list_sparse=nothing, Nfeature_list_dense=nothing, context_sparse_types=nothing, Tcontext_dense=nothing, feature_list_dense_types=nothing, context_dense_shapes=nothing, feature_list_sparse_types=nothing, feature_list_dense_shapes=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function parse_single_sequence_example(serialized_, feature_list_dense_missing_assumed_empty_, context_sparse_keys_, context_dense_keys_, feature_list_sparse_keys_, feature_list_dense_keys_, context_dense_defaults_, debug_name_; name=nothing, Ncontext_sparse=nothing, Ncontext_dense=nothing, Nfeature_list_sparse=nothing, Nfeature_list_dense=nothing, context_sparse_types=nothing, Tcontext_dense=nothing, feature_list_dense_types=nothing, context_dense_shapes=nothing, feature_list_sparse_types=nothing, feature_list_dense_shapes=nothing) - if tf.in_eager_mode() - parse_single_sequence_example_eager(serialized_, feature_list_dense_missing_assumed_empty_, context_sparse_keys_, context_dense_keys_, feature_list_sparse_keys_, feature_list_dense_keys_, context_dense_defaults_, debug_name_; name=name, Ncontext_sparse=Ncontext_sparse, Ncontext_dense=Ncontext_dense, Nfeature_list_sparse=Nfeature_list_sparse, Nfeature_list_dense=Nfeature_list_dense, context_sparse_types=context_sparse_types, Tcontext_dense=Tcontext_dense, feature_list_dense_types=feature_list_dense_types, context_dense_shapes=context_dense_shapes, feature_list_sparse_types=feature_list_sparse_types, feature_list_dense_shapes=feature_list_dense_shapes) - else - parse_single_sequence_example_graph(serialized_, feature_list_dense_missing_assumed_empty_, context_sparse_keys_, context_dense_keys_, feature_list_sparse_keys_, feature_list_dense_keys_, context_dense_defaults_, debug_name_; name=name, Ncontext_sparse=Ncontext_sparse, Ncontext_dense=Ncontext_dense, Nfeature_list_sparse=Nfeature_list_sparse, Nfeature_list_dense=Nfeature_list_dense, context_sparse_types=context_sparse_types, Tcontext_dense=Tcontext_dense, feature_list_dense_types=feature_list_dense_types, context_dense_shapes=context_dense_shapes, feature_list_sparse_types=feature_list_sparse_types, feature_list_dense_shapes=feature_list_dense_shapes) - end - end end @@ -7610,35 +13754,63 @@ end """ begin - function matrix_diag_graph(diagonal_; name=nothing) - local desc - tf.with_op_name(name, "MatrixDiag") do - desc = tf.NodeDescription("MatrixDiag") - diagonal_ = convert(Tensor{Any}, diagonal_) - (diagonal_,) = tf.tf_promote(diagonal_) - tf.add_input(desc, diagonal_) + begin + function matrix_diag_graph(diagonal_; name=nothing) + local desc + tf.with_op_name(name, "MatrixDiag") do + desc = tf.NodeDescription("MatrixDiag") + begin + begin + diagonal_ = convert(Tensor{Any}, diagonal_) + begin + end + end + begin + (diagonal_,) = tf.tf_promote(diagonal_) + end + end + begin + begin + tf.add_input(desc, diagonal_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function matrix_diag_eager(diagonal_; name=nothing) - desc = tf.EagerOp("MatrixDiag") - diagonal_ = convert(tf.EagerTensor, diagonal_) - tf.add_input(desc, diagonal_) - desc["T"] = tf.data_type(diagonal_) - res = tf.execute(desc) - node = tf.TapeNode(matrix_diag, [diagonal_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function matrix_diag_eager(diagonal_; name=nothing) + desc = tf.EagerOp("MatrixDiag") + diagonal_ = convert(tf.EagerTensor, diagonal_) + begin + begin + tf.add_input(desc, diagonal_) + end + end + begin + end + begin + desc["T"] = tf.data_type(diagonal_) + end + res = tf.execute(desc) + node = tf.TapeNode(matrix_diag, [diagonal_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function matrix_diag(diagonal_; name=nothing) - if tf.in_eager_mode() - matrix_diag_eager(diagonal_; name=name) - else - matrix_diag_graph(diagonal_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function matrix_diag(diagonal_; name=nothing) + if tf.in_eager_mode() + matrix_diag_eager(diagonal_; name=name) + else + matrix_diag_graph(diagonal_; name=name) + end end - end + end end @@ -7648,30 +13820,45 @@ end """ begin - function fact_graph(; name=nothing) - local desc - tf.with_op_name(name, "Fact") do - desc - tf.NodeDescription("Fact") + begin + function fact_graph(; name=nothing) + local desc + tf.with_op_name(name, "Fact") do + desc = tf.NodeDescription("Fact") + begin + end + begin + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function fact_eager(; name=nothing) - desc = tf.EagerOp("Fact") - res = tf.execute(desc) - node = tf.TapeNode(fact, [], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function fact_eager(; name=nothing) + desc = tf.EagerOp("Fact") + begin + end + begin + end + res = tf.execute(desc) + node = tf.TapeNode(fact, [], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function fact(; name=nothing) - if tf.in_eager_mode() - fact_eager(; name=name) - else - fact_graph(; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function fact(; name=nothing) + if tf.in_eager_mode() + fact_eager(; name=name) + else + fact_graph(; name=name) + end end - end + end end @@ -7681,69 +13868,133 @@ end """ begin - function max_pool_grad_grad_graph(orig_input_, orig_output_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) - local desc - tf.with_op_name(name, "MaxPoolGradGrad") do - desc = tf.NodeDescription("MaxPoolGradGrad") - orig_input_ = convert(Tensor{Any}, orig_input_) - orig_output_ = convert(Tensor{Any}, orig_output_) - grad_ = convert(Tensor{Any}, grad_) - (orig_input_, orig_output_, grad_) = tf.tf_promote(orig_input_, orig_output_, grad_) - tf.add_input(desc, orig_input_) - tf.add_input(desc, orig_output_) - tf.add_input(desc, grad_) - if ksize !== nothing - desc["ksize"] = map(Base.identity, ksize) - end - if strides !== nothing - desc["strides"] = map(Base.identity, strides) - end - if padding !== nothing - desc["padding"] = Base.String(padding) - end - if data_format !== nothing - desc["data_format"] = Base.String(data_format) + begin + function max_pool_grad_grad_graph(orig_input_, orig_output_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + local desc + tf.with_op_name(name, "MaxPoolGradGrad") do + desc = tf.NodeDescription("MaxPoolGradGrad") + begin + begin + orig_input_ = convert(Tensor{Any}, orig_input_) + begin + end + end + begin + orig_output_ = convert(Tensor{Any}, orig_output_) + begin + end + end + begin + grad_ = convert(Tensor{Any}, grad_) + begin + end + end + begin + (orig_input_, orig_output_, grad_) = tf.tf_promote(orig_input_, orig_output_, grad_) + end + end + begin + begin + tf.add_input(desc, orig_input_) + end + begin + tf.add_input(desc, orig_output_) + end + begin + tf.add_input(desc, grad_) + end + end + begin + begin + if ksize !== nothing + desc["ksize"] = map(Base.identity, ksize) + end + end + begin + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + end + begin + if padding !== nothing + desc["padding"] = Base.String(padding) + end + end + begin + if data_format !== nothing + desc["data_format"] = Base.String(data_format) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function max_pool_grad_grad_eager(orig_input_, orig_output_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + desc = tf.EagerOp("MaxPoolGradGrad") + orig_input_ = convert(tf.EagerTensor, orig_input_) + orig_output_ = convert(tf.EagerTensor, orig_output_) + grad_ = convert(tf.EagerTensor, grad_) + begin + begin + tf.add_input(desc, orig_input_) + end + begin + tf.add_input(desc, orig_output_) + end + begin + tf.add_input(desc, grad_) + end + end + begin + begin + if ksize !== nothing + desc["ksize"] = map(Base.identity, ksize) + end + end + begin + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + end + begin + if padding !== nothing + desc["padding"] = Base.String(padding) + end + end + begin + if data_format !== nothing + desc["data_format"] = Base.String(data_format) + end + end + end + begin + desc["T"] = tf.data_type(orig_input_) + end + begin + desc["T"] = tf.data_type(orig_output_) + end + begin + desc["T"] = tf.data_type(grad_) + end + res = tf.execute(desc) + node = tf.TapeNode(max_pool_grad_grad, [orig_input_, orig_output_, grad_], name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function max_pool_grad_grad(orig_input_, orig_output_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + if tf.in_eager_mode() + max_pool_grad_grad_eager(orig_input_, orig_output_, grad_; name=name, ksize=ksize, strides=strides, padding=padding, data_format=data_format) + else + max_pool_grad_grad_graph(orig_input_, orig_output_, grad_; name=name, ksize=ksize, strides=strides, padding=padding, data_format=data_format) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function max_pool_grad_grad_eager(orig_input_, orig_output_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) - desc = tf.EagerOp("MaxPoolGradGrad") - orig_input_ = convert(tf.EagerTensor, orig_input_) - orig_output_ = convert(tf.EagerTensor, orig_output_) - grad_ = convert(tf.EagerTensor, grad_) - tf.add_input(desc, orig_input_) - tf.add_input(desc, orig_output_) - tf.add_input(desc, grad_) - if ksize !== nothing - desc["ksize"] = map(Base.identity, ksize) - end - if strides !== nothing - desc["strides"] = map(Base.identity, strides) - end - if padding !== nothing - desc["padding"] = Base.String(padding) - end - if data_format !== nothing - desc["data_format"] = Base.String(data_format) - end - desc["T"] = tf.data_type(orig_input_) - desc["T"] = tf.data_type(orig_output_) - desc["T"] = tf.data_type(grad_) - res = tf.execute(desc) - node = tf.TapeNode(max_pool_grad_grad, [orig_input_, orig_output_, grad_], name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function max_pool_grad_grad(orig_input_, orig_output_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) - if tf.in_eager_mode() - max_pool_grad_grad_eager(orig_input_, orig_output_, grad_; name=name, ksize=ksize, strides=strides, padding=padding, data_format=data_format) - else - max_pool_grad_grad_graph(orig_input_, orig_output_, grad_; name=name, ksize=ksize, strides=strides, padding=padding, data_format=data_format) - end - end end @@ -7753,45 +14004,85 @@ end """ begin - function resize_bilinear_grad_graph(grads_, original_image_; name=nothing, align_corners=nothing) - local desc - tf.with_op_name(name, "ResizeBilinearGrad") do - desc = tf.NodeDescription("ResizeBilinearGrad") - grads_ = convert(Tensor{Float32}, grads_) - original_image_ = convert(Tensor{Any}, original_image_) - (original_image_,) = tf.tf_promote(original_image_) - tf.add_input(desc, grads_) - tf.add_input(desc, original_image_) - if align_corners !== nothing - desc["align_corners"] = Base.Bool(align_corners) + begin + function resize_bilinear_grad_graph(grads_, original_image_; name=nothing, align_corners=nothing) + local desc + tf.with_op_name(name, "ResizeBilinearGrad") do + desc = tf.NodeDescription("ResizeBilinearGrad") + begin + begin + grads_ = convert(Tensor{Float32}, grads_) + begin + end + end + begin + original_image_ = convert(Tensor{Any}, original_image_) + begin + end + end + begin + (original_image_,) = tf.tf_promote(original_image_) + end + end + begin + begin + tf.add_input(desc, grads_) + end + begin + tf.add_input(desc, original_image_) + end + end + begin + begin + if align_corners !== nothing + desc["align_corners"] = Base.Bool(align_corners) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function resize_bilinear_grad_eager(grads_, original_image_; name=nothing, align_corners=nothing) + desc = tf.EagerOp("ResizeBilinearGrad") + grads_ = convert(tf.EagerTensor, grads_) + original_image_ = convert(tf.EagerTensor, original_image_) + begin + begin + tf.add_input(desc, grads_) + end + begin + tf.add_input(desc, original_image_) + end + end + begin + begin + if align_corners !== nothing + desc["align_corners"] = Base.Bool(align_corners) + end + end + end + begin + desc["T"] = tf.data_type(original_image_) + end + res = tf.execute(desc) + node = tf.TapeNode(resize_bilinear_grad, [grads_, original_image_], name=nothing, align_corners=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resize_bilinear_grad(grads_, original_image_; name=nothing, align_corners=nothing) + if tf.in_eager_mode() + resize_bilinear_grad_eager(grads_, original_image_; name=name, align_corners=align_corners) + else + resize_bilinear_grad_graph(grads_, original_image_; name=name, align_corners=align_corners) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function resize_bilinear_grad_eager(grads_, original_image_; name=nothing, align_corners=nothing) - desc = tf.EagerOp("ResizeBilinearGrad") - grads_ = convert(tf.EagerTensor, grads_) - original_image_ = convert(tf.EagerTensor, original_image_) - tf.add_input(desc, grads_) - tf.add_input(desc, original_image_) - if align_corners !== nothing - desc["align_corners"] = Base.Bool(align_corners) - end - desc["T"] = tf.data_type(original_image_) - res = tf.execute(desc) - node = tf.TapeNode(resize_bilinear_grad, [grads_, original_image_], name=nothing, align_corners=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resize_bilinear_grad(grads_, original_image_; name=nothing, align_corners=nothing) - if tf.in_eager_mode() - resize_bilinear_grad_eager(grads_, original_image_; name=name, align_corners=align_corners) - else - resize_bilinear_grad_graph(grads_, original_image_; name=name, align_corners=align_corners) - end - end end @@ -7801,48 +14092,92 @@ end """ begin - function batch_to_space_graph(input_, crops_; name=nothing, block_size=nothing) - local desc - tf.with_op_name(name, "BatchToSpace") do - desc = tf.NodeDescription("BatchToSpace") - input_ = convert(Tensor{Any}, input_) - crops_ = convert(Tensor{Int32}, crops_) - crops_ = crops_ - convert(tf.Tensor{eltype(crops_)}, 1) - (input_,) = tf.tf_promote(input_) - (crops_,) = tf.tf_promote(crops_) - tf.add_input(desc, input_) - tf.add_input(desc, crops_) - if block_size !== nothing - desc["block_size"] = Base.Int(block_size) + begin + function batch_to_space_graph(input_, crops_; name=nothing, block_size=nothing) + local desc + tf.with_op_name(name, "BatchToSpace") do + desc = tf.NodeDescription("BatchToSpace") + begin + begin + input_ = convert(Tensor{Any}, input_) + begin + end + end + begin + crops_ = convert(Tensor{Int32}, crops_) + begin + crops_ = crops_ - convert(tf.Tensor{eltype(crops_)}, 1) + end + end + begin + (input_,) = tf.tf_promote(input_) + end + begin + (crops_,) = tf.tf_promote(crops_) + end + end + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, crops_) + end + end + begin + begin + if block_size !== nothing + desc["block_size"] = Base.Int(block_size) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function batch_to_space_eager(input_, crops_; name=nothing, block_size=nothing) + desc = tf.EagerOp("BatchToSpace") + input_ = convert(tf.EagerTensor, input_) + crops_ = convert(tf.EagerTensor, crops_) + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, crops_) + end + end + begin + begin + if block_size !== nothing + desc["block_size"] = Base.Int(block_size) + end + end + end + begin + desc["T"] = tf.data_type(input_) + end + begin + desc["Tidx"] = tf.data_type(crops_) + end + res = tf.execute(desc) + node = tf.TapeNode(batch_to_space, [input_, crops_], name=nothing, block_size=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function batch_to_space(input_, crops_; name=nothing, block_size=nothing) + if tf.in_eager_mode() + batch_to_space_eager(input_, crops_; name=name, block_size=block_size) + else + batch_to_space_graph(input_, crops_; name=name, block_size=block_size) + end end - end - tf.Tensor(tf.Operation(desc)) end - function batch_to_space_eager(input_, crops_; name=nothing, block_size=nothing) - desc = tf.EagerOp("BatchToSpace") - input_ = convert(tf.EagerTensor, input_) - crops_ = convert(tf.EagerTensor, crops_) - tf.add_input(desc, input_) - tf.add_input(desc, crops_) - if block_size !== nothing - desc["block_size"] = Base.Int(block_size) - end - desc["T"] = tf.data_type(input_) - desc["Tidx"] = tf.data_type(crops_) - res = tf.execute(desc) - node = tf.TapeNode(batch_to_space, [input_, crops_], name=nothing, block_size=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function batch_to_space(input_, crops_; name=nothing, block_size=nothing) - if tf.in_eager_mode() - batch_to_space_eager(input_, crops_; name=name, block_size=block_size) - else - batch_to_space_graph(input_, crops_; name=name, block_size=block_size) - end - end end @@ -7852,39 +14187,67 @@ end """ begin - function optional_from_value_graph(components_; name=nothing, Toutput_types=nothing) - local desc - tf.with_op_name(name, "OptionalFromValue") do - desc = tf.NodeDescription("OptionalFromValue") - components_ = [convert(Tensor{Any}, x) for x = components_] - tf.add_input(desc, components_) - if Toutput_types !== nothing - desc["Toutput_types"] = map(Base.identity, Toutput_types) + begin + function optional_from_value_graph(components_; name=nothing, Toutput_types=nothing) + local desc + tf.with_op_name(name, "OptionalFromValue") do + desc = tf.NodeDescription("OptionalFromValue") + begin + begin + components_ = [convert(Tensor{Any}, x) for x = components_] + begin + end + end + end + begin + begin + tf.add_input(desc, components_) + end + end + begin + begin + if Toutput_types !== nothing + desc["Toutput_types"] = map(Base.identity, Toutput_types) + end + end + end end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function optional_from_value_eager(components_; name=nothing, Toutput_types=nothing) - desc = tf.EagerOp("OptionalFromValue") - components_ = convert(tf.EagerTensor, components_) - tf.add_input(desc, components_) - if Toutput_types !== nothing - desc["Toutput_types"] = map(Base.identity, Toutput_types) - end - res = tf.execute(desc) - node = tf.TapeNode(optional_from_value, [components_], name=nothing, Toutput_types=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function optional_from_value_eager(components_; name=nothing, Toutput_types=nothing) + desc = tf.EagerOp("OptionalFromValue") + components_ = convert(tf.EagerTensor, components_) + begin + begin + tf.add_input(desc, components_) + end + end + begin + begin + if Toutput_types !== nothing + desc["Toutput_types"] = map(Base.identity, Toutput_types) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(optional_from_value, [components_], name=nothing, Toutput_types=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function optional_from_value(components_; name=nothing, Toutput_types=nothing) - if tf.in_eager_mode() - optional_from_value_eager(components_; name=name, Toutput_types=Toutput_types) - else - optional_from_value_graph(components_; name=name, Toutput_types=Toutput_types) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function optional_from_value(components_; name=nothing, Toutput_types=nothing) + if tf.in_eager_mode() + optional_from_value_eager(components_; name=name, Toutput_types=Toutput_types) + else + optional_from_value_graph(components_; name=name, Toutput_types=Toutput_types) + end end - end + end end @@ -7894,41 +14257,79 @@ end """ begin - function xlogy_graph(x_, y_; name=nothing) - local desc - tf.with_op_name(name, "Xlogy") do - desc = tf.NodeDescription("Xlogy") - x_ = convert(Tensor{Any}, x_) - y_ = convert(Tensor{Any}, y_) - (x_, y_) = tf.tf_promote(x_, y_) - tf.add_input(desc, x_) - tf.add_input(desc, y_) - end - tf.Tensor(tf.Operation(desc)) - end - function xlogy_eager(x_, y_; name=nothing) - desc = tf.EagerOp("Xlogy") - x_ = convert(tf.EagerTensor, x_) - y_ = convert(tf.EagerTensor, y_) - tf.add_input(desc, x_) - tf.add_input(desc, y_) - desc["T"] = tf.data_type(x_) - desc["T"] = tf.data_type(y_) - res = tf.execute(desc) - node = tf.TapeNode(xlogy, [x_, y_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function xlogy_graph(x_, y_; name=nothing) + local desc + tf.with_op_name(name, "Xlogy") do + desc = tf.NodeDescription("Xlogy") + begin + begin + x_ = convert(Tensor{Any}, x_) + begin + end + end + begin + y_ = convert(Tensor{Any}, y_) + begin + end + end + begin + (x_, y_) = tf.tf_promote(x_, y_) + end + end + begin + begin + tf.add_input(desc, x_) + end + begin + tf.add_input(desc, y_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function xlogy(x_, y_; name=nothing) - if tf.in_eager_mode() - xlogy_eager(x_, y_; name=name) - else - xlogy_graph(x_, y_; name=name) + begin + function xlogy_eager(x_, y_; name=nothing) + desc = tf.EagerOp("Xlogy") + x_ = convert(tf.EagerTensor, x_) + y_ = convert(tf.EagerTensor, y_) + begin + begin + tf.add_input(desc, x_) + end + begin + tf.add_input(desc, y_) + end end - end -end + begin + end + begin + desc["T"] = tf.data_type(x_) + end + begin + desc["T"] = tf.data_type(y_) + end + res = tf.execute(desc) + node = tf.TapeNode(xlogy, [x_, y_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function xlogy(x_, y_; name=nothing) + if tf.in_eager_mode() + xlogy_eager(x_, y_; name=name) + else + xlogy_graph(x_, y_; name=name) + end + end + end +end """ @@ -7937,40 +14338,78 @@ end """ begin - function cross_graph(a_, b_; name=nothing) - local desc - tf.with_op_name(name, "Cross") do - desc = tf.NodeDescription("Cross") - a_ = convert(Tensor{Any}, a_) - b_ = convert(Tensor{Any}, b_) - (a_, b_) = tf.tf_promote(a_, b_) - tf.add_input(desc, a_) - tf.add_input(desc, b_) + begin + function cross_graph(a_, b_; name=nothing) + local desc + tf.with_op_name(name, "Cross") do + desc = tf.NodeDescription("Cross") + begin + begin + a_ = convert(Tensor{Any}, a_) + begin + end + end + begin + b_ = convert(Tensor{Any}, b_) + begin + end + end + begin + (a_, b_) = tf.tf_promote(a_, b_) + end + end + begin + begin + tf.add_input(desc, a_) + end + begin + tf.add_input(desc, b_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function cross_eager(a_, b_; name=nothing) - desc = tf.EagerOp("Cross") - a_ = convert(tf.EagerTensor, a_) - b_ = convert(tf.EagerTensor, b_) - tf.add_input(desc, a_) - tf.add_input(desc, b_) - desc["T"] = tf.data_type(a_) - desc["T"] = tf.data_type(b_) - res = tf.execute(desc) - node = tf.TapeNode(cross, [a_, b_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function cross_eager(a_, b_; name=nothing) + desc = tf.EagerOp("Cross") + a_ = convert(tf.EagerTensor, a_) + b_ = convert(tf.EagerTensor, b_) + begin + begin + tf.add_input(desc, a_) + end + begin + tf.add_input(desc, b_) + end + end + begin + end + begin + desc["T"] = tf.data_type(a_) + end + begin + desc["T"] = tf.data_type(b_) + end + res = tf.execute(desc) + node = tf.TapeNode(cross, [a_, b_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function cross(a_, b_; name=nothing) - if tf.in_eager_mode() - cross_eager(a_, b_; name=name) - else - cross_graph(a_, b_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function cross(a_, b_; name=nothing) + if tf.in_eager_mode() + cross_eager(a_, b_; name=name) + else + cross_graph(a_, b_; name=name) + end end - end + end end @@ -7980,40 +14419,78 @@ end """ begin - function bitwise_and_graph(x_, y_; name=nothing) - local desc - tf.with_op_name(name, "BitwiseAnd") do - desc = tf.NodeDescription("BitwiseAnd") - x_ = convert(Tensor{Any}, x_) - y_ = convert(Tensor{Any}, y_) - (x_, y_) = tf.tf_promote(x_, y_) - tf.add_input(desc, x_) - tf.add_input(desc, y_) + begin + function bitwise_and_graph(x_, y_; name=nothing) + local desc + tf.with_op_name(name, "BitwiseAnd") do + desc = tf.NodeDescription("BitwiseAnd") + begin + begin + x_ = convert(Tensor{Any}, x_) + begin + end + end + begin + y_ = convert(Tensor{Any}, y_) + begin + end + end + begin + (x_, y_) = tf.tf_promote(x_, y_) + end + end + begin + begin + tf.add_input(desc, x_) + end + begin + tf.add_input(desc, y_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function bitwise_and_eager(x_, y_; name=nothing) - desc = tf.EagerOp("BitwiseAnd") - x_ = convert(tf.EagerTensor, x_) - y_ = convert(tf.EagerTensor, y_) - tf.add_input(desc, x_) - tf.add_input(desc, y_) - desc["T"] = tf.data_type(x_) - desc["T"] = tf.data_type(y_) - res = tf.execute(desc) - node = tf.TapeNode(bitwise_and, [x_, y_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function bitwise_and_eager(x_, y_; name=nothing) + desc = tf.EagerOp("BitwiseAnd") + x_ = convert(tf.EagerTensor, x_) + y_ = convert(tf.EagerTensor, y_) + begin + begin + tf.add_input(desc, x_) + end + begin + tf.add_input(desc, y_) + end + end + begin + end + begin + desc["T"] = tf.data_type(x_) + end + begin + desc["T"] = tf.data_type(y_) + end + res = tf.execute(desc) + node = tf.TapeNode(bitwise_and, [x_, y_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function bitwise_and(x_, y_; name=nothing) - if tf.in_eager_mode() - bitwise_and_eager(x_, y_; name=name) - else - bitwise_and_graph(x_, y_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function bitwise_and(x_, y_; name=nothing) + if tf.in_eager_mode() + bitwise_and_eager(x_, y_; name=name) + else + bitwise_and_graph(x_, y_; name=name) + end end - end + end end @@ -8023,41 +14500,81 @@ end """ begin - function broadcast_to_graph(input_, shape_; name=nothing) - local desc - tf.with_op_name(name, "BroadcastTo") do - desc = tf.NodeDescription("BroadcastTo") - input_ = convert(Tensor{Any}, input_) - shape_ = convert(Tensor{Int32}, shape_) - (input_,) = tf.tf_promote(input_) - (shape_,) = tf.tf_promote(shape_) - tf.add_input(desc, input_) - tf.add_input(desc, shape_) + begin + function broadcast_to_graph(input_, shape_; name=nothing) + local desc + tf.with_op_name(name, "BroadcastTo") do + desc = tf.NodeDescription("BroadcastTo") + begin + begin + input_ = convert(Tensor{Any}, input_) + begin + end + end + begin + shape_ = convert(Tensor{Int32}, shape_) + begin + end + end + begin + (input_,) = tf.tf_promote(input_) + end + begin + (shape_,) = tf.tf_promote(shape_) + end + end + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, shape_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function broadcast_to_eager(input_, shape_; name=nothing) - desc = tf.EagerOp("BroadcastTo") - input_ = convert(tf.EagerTensor, input_) - shape_ = convert(tf.EagerTensor, shape_) - tf.add_input(desc, input_) - tf.add_input(desc, shape_) - desc["T"] = tf.data_type(input_) - desc["Tidx"] = tf.data_type(shape_) - res = tf.execute(desc) - node = tf.TapeNode(broadcast_to, [input_, shape_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function broadcast_to_eager(input_, shape_; name=nothing) + desc = tf.EagerOp("BroadcastTo") + input_ = convert(tf.EagerTensor, input_) + shape_ = convert(tf.EagerTensor, shape_) + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, shape_) + end + end + begin + end + begin + desc["T"] = tf.data_type(input_) + end + begin + desc["Tidx"] = tf.data_type(shape_) + end + res = tf.execute(desc) + node = tf.TapeNode(broadcast_to, [input_, shape_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function broadcast_to(input_, shape_; name=nothing) - if tf.in_eager_mode() - broadcast_to_eager(input_, shape_; name=name) - else - broadcast_to_graph(input_, shape_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function broadcast_to(input_, shape_; name=nothing) + if tf.in_eager_mode() + broadcast_to_eager(input_, shape_; name=name) + else + broadcast_to_graph(input_, shape_; name=name) + end end - end + end end @@ -8067,40 +14584,78 @@ end """ begin - function elu_grad_graph(gradients_, outputs_; name=nothing) - local desc - tf.with_op_name(name, "EluGrad") do - desc = tf.NodeDescription("EluGrad") - gradients_ = convert(Tensor{Any}, gradients_) - outputs_ = convert(Tensor{Any}, outputs_) - (gradients_, outputs_) = tf.tf_promote(gradients_, outputs_) - tf.add_input(desc, gradients_) - tf.add_input(desc, outputs_) + begin + function elu_grad_graph(gradients_, outputs_; name=nothing) + local desc + tf.with_op_name(name, "EluGrad") do + desc = tf.NodeDescription("EluGrad") + begin + begin + gradients_ = convert(Tensor{Any}, gradients_) + begin + end + end + begin + outputs_ = convert(Tensor{Any}, outputs_) + begin + end + end + begin + (gradients_, outputs_) = tf.tf_promote(gradients_, outputs_) + end + end + begin + begin + tf.add_input(desc, gradients_) + end + begin + tf.add_input(desc, outputs_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function elu_grad_eager(gradients_, outputs_; name=nothing) - desc = tf.EagerOp("EluGrad") - gradients_ = convert(tf.EagerTensor, gradients_) - outputs_ = convert(tf.EagerTensor, outputs_) - tf.add_input(desc, gradients_) - tf.add_input(desc, outputs_) - desc["T"] = tf.data_type(gradients_) - desc["T"] = tf.data_type(outputs_) - res = tf.execute(desc) - node = tf.TapeNode(elu_grad, [gradients_, outputs_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function elu_grad_eager(gradients_, outputs_; name=nothing) + desc = tf.EagerOp("EluGrad") + gradients_ = convert(tf.EagerTensor, gradients_) + outputs_ = convert(tf.EagerTensor, outputs_) + begin + begin + tf.add_input(desc, gradients_) + end + begin + tf.add_input(desc, outputs_) + end + end + begin + end + begin + desc["T"] = tf.data_type(gradients_) + end + begin + desc["T"] = tf.data_type(outputs_) + end + res = tf.execute(desc) + node = tf.TapeNode(elu_grad, [gradients_, outputs_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function elu_grad(gradients_, outputs_; name=nothing) - if tf.in_eager_mode() - elu_grad_eager(gradients_, outputs_; name=name) - else - elu_grad_graph(gradients_, outputs_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function elu_grad(gradients_, outputs_; name=nothing) + if tf.in_eager_mode() + elu_grad_eager(gradients_, outputs_; name=name) + else + elu_grad_graph(gradients_, outputs_; name=name) + end end - end + end end @@ -8110,126 +14665,280 @@ end """ begin - function cudnn_rnn_backprop_graph(input_, input_h_, input_c_, params_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) - local desc - tf.with_op_name(name, "CudnnRNNBackprop") do - desc = tf.NodeDescription("CudnnRNNBackprop") - input_ = convert(Tensor{Any}, input_) - input_h_ = convert(Tensor{Any}, input_h_) - input_c_ = convert(Tensor{Any}, input_c_) - params_ = convert(Tensor{Any}, params_) - output_ = convert(Tensor{Any}, output_) - output_h_ = convert(Tensor{Any}, output_h_) - output_c_ = convert(Tensor{Any}, output_c_) - output_backprop_ = convert(Tensor{Any}, output_backprop_) - output_h_backprop_ = convert(Tensor{Any}, output_h_backprop_) - output_c_backprop_ = convert(Tensor{Any}, output_c_backprop_) - reserve_space_ = convert(Tensor{Any}, reserve_space_) - (input_, input_h_, input_c_, params_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_) = tf.tf_promote(input_, input_h_, input_c_, params_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_) - tf.add_input(desc, input_) - tf.add_input(desc, input_h_) - tf.add_input(desc, input_c_) - tf.add_input(desc, params_) - tf.add_input(desc, output_) - tf.add_input(desc, output_h_) - tf.add_input(desc, output_c_) - tf.add_input(desc, output_backprop_) - tf.add_input(desc, output_h_backprop_) - tf.add_input(desc, output_c_backprop_) - tf.add_input(desc, reserve_space_) - if rnn_mode !== nothing - desc["rnn_mode"] = Base.String(rnn_mode) - end - if input_mode !== nothing - desc["input_mode"] = Base.String(input_mode) - end - if direction !== nothing - desc["direction"] = Base.String(direction) - end - if dropout !== nothing - desc["dropout"] = Base.identity(dropout) - end - if seed !== nothing - desc["seed"] = Base.Int(seed) - end - if seed2 !== nothing - desc["seed2"] = Base.Int(seed2) - end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:4 - push!(out, tf.Tensor(op, out_idx)) - end - out - end - function cudnn_rnn_backprop_eager(input_, input_h_, input_c_, params_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) - desc = tf.EagerOp("CudnnRNNBackprop") - input_ = convert(tf.EagerTensor, input_) - input_h_ = convert(tf.EagerTensor, input_h_) - input_c_ = convert(tf.EagerTensor, input_c_) - params_ = convert(tf.EagerTensor, params_) - output_ = convert(tf.EagerTensor, output_) - output_h_ = convert(tf.EagerTensor, output_h_) - output_c_ = convert(tf.EagerTensor, output_c_) - output_backprop_ = convert(tf.EagerTensor, output_backprop_) - output_h_backprop_ = convert(tf.EagerTensor, output_h_backprop_) - output_c_backprop_ = convert(tf.EagerTensor, output_c_backprop_) - reserve_space_ = convert(tf.EagerTensor, reserve_space_) - tf.add_input(desc, input_) - tf.add_input(desc, input_h_) - tf.add_input(desc, input_c_) - tf.add_input(desc, params_) - tf.add_input(desc, output_) - tf.add_input(desc, output_h_) - tf.add_input(desc, output_c_) - tf.add_input(desc, output_backprop_) - tf.add_input(desc, output_h_backprop_) - tf.add_input(desc, output_c_backprop_) - tf.add_input(desc, reserve_space_) - if rnn_mode !== nothing - desc["rnn_mode"] = Base.String(rnn_mode) - end - if input_mode !== nothing - desc["input_mode"] = Base.String(input_mode) - end - if direction !== nothing - desc["direction"] = Base.String(direction) - end - if dropout !== nothing - desc["dropout"] = Base.identity(dropout) - end - if seed !== nothing - desc["seed"] = Base.Int(seed) - end - if seed2 !== nothing - desc["seed2"] = Base.Int(seed2) - end - desc["T"] = tf.data_type(input_) - desc["T"] = tf.data_type(input_h_) - desc["T"] = tf.data_type(input_c_) - desc["T"] = tf.data_type(params_) - desc["T"] = tf.data_type(output_) - desc["T"] = tf.data_type(output_h_) - desc["T"] = tf.data_type(output_c_) - desc["T"] = tf.data_type(output_backprop_) - desc["T"] = tf.data_type(output_h_backprop_) - desc["T"] = tf.data_type(output_c_backprop_) - desc["T"] = tf.data_type(reserve_space_) - res = tf.execute(desc) - node = tf.TapeNode(cudnn_rnn_backprop, [input_, input_h_, input_c_, params_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_], name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function cudnn_rnn_backprop(input_, input_h_, input_c_, params_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) - if tf.in_eager_mode() - cudnn_rnn_backprop_eager(input_, input_h_, input_c_, params_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_; name=name, rnn_mode=rnn_mode, input_mode=input_mode, direction=direction, dropout=dropout, seed=seed, seed2=seed2) - else - cudnn_rnn_backprop_graph(input_, input_h_, input_c_, params_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_; name=name, rnn_mode=rnn_mode, input_mode=input_mode, direction=direction, dropout=dropout, seed=seed, seed2=seed2) + begin + function cudnn_rnn_backprop_graph(input_, input_h_, input_c_, params_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) + local desc + tf.with_op_name(name, "CudnnRNNBackprop") do + desc = tf.NodeDescription("CudnnRNNBackprop") + begin + begin + input_ = convert(Tensor{Any}, input_) + begin + end + end + begin + input_h_ = convert(Tensor{Any}, input_h_) + begin + end + end + begin + input_c_ = convert(Tensor{Any}, input_c_) + begin + end + end + begin + params_ = convert(Tensor{Any}, params_) + begin + end + end + begin + output_ = convert(Tensor{Any}, output_) + begin + end + end + begin + output_h_ = convert(Tensor{Any}, output_h_) + begin + end + end + begin + output_c_ = convert(Tensor{Any}, output_c_) + begin + end + end + begin + output_backprop_ = convert(Tensor{Any}, output_backprop_) + begin + end + end + begin + output_h_backprop_ = convert(Tensor{Any}, output_h_backprop_) + begin + end + end + begin + output_c_backprop_ = convert(Tensor{Any}, output_c_backprop_) + begin + end + end + begin + reserve_space_ = convert(Tensor{Any}, reserve_space_) + begin + end + end + begin + (input_, input_h_, input_c_, params_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_) = tf.tf_promote(input_, input_h_, input_c_, params_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_) + end + end + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, input_h_) + end + begin + tf.add_input(desc, input_c_) + end + begin + tf.add_input(desc, params_) + end + begin + tf.add_input(desc, output_) + end + begin + tf.add_input(desc, output_h_) + end + begin + tf.add_input(desc, output_c_) + end + begin + tf.add_input(desc, output_backprop_) + end + begin + tf.add_input(desc, output_h_backprop_) + end + begin + tf.add_input(desc, output_c_backprop_) + end + begin + tf.add_input(desc, reserve_space_) + end + end + begin + begin + if rnn_mode !== nothing + desc["rnn_mode"] = Base.String(rnn_mode) + end + end + begin + if input_mode !== nothing + desc["input_mode"] = Base.String(input_mode) + end + end + begin + if direction !== nothing + desc["direction"] = Base.String(direction) + end + end + begin + if dropout !== nothing + desc["dropout"] = Base.identity(dropout) + end + end + begin + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + end + begin + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end + end + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:4 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + end + end + begin + function cudnn_rnn_backprop_eager(input_, input_h_, input_c_, params_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) + desc = tf.EagerOp("CudnnRNNBackprop") + input_ = convert(tf.EagerTensor, input_) + input_h_ = convert(tf.EagerTensor, input_h_) + input_c_ = convert(tf.EagerTensor, input_c_) + params_ = convert(tf.EagerTensor, params_) + output_ = convert(tf.EagerTensor, output_) + output_h_ = convert(tf.EagerTensor, output_h_) + output_c_ = convert(tf.EagerTensor, output_c_) + output_backprop_ = convert(tf.EagerTensor, output_backprop_) + output_h_backprop_ = convert(tf.EagerTensor, output_h_backprop_) + output_c_backprop_ = convert(tf.EagerTensor, output_c_backprop_) + reserve_space_ = convert(tf.EagerTensor, reserve_space_) + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, input_h_) + end + begin + tf.add_input(desc, input_c_) + end + begin + tf.add_input(desc, params_) + end + begin + tf.add_input(desc, output_) + end + begin + tf.add_input(desc, output_h_) + end + begin + tf.add_input(desc, output_c_) + end + begin + tf.add_input(desc, output_backprop_) + end + begin + tf.add_input(desc, output_h_backprop_) + end + begin + tf.add_input(desc, output_c_backprop_) + end + begin + tf.add_input(desc, reserve_space_) + end + end + begin + begin + if rnn_mode !== nothing + desc["rnn_mode"] = Base.String(rnn_mode) + end + end + begin + if input_mode !== nothing + desc["input_mode"] = Base.String(input_mode) + end + end + begin + if direction !== nothing + desc["direction"] = Base.String(direction) + end + end + begin + if dropout !== nothing + desc["dropout"] = Base.identity(dropout) + end + end + begin + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + end + begin + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end + end + end + begin + desc["T"] = tf.data_type(input_) + end + begin + desc["T"] = tf.data_type(input_h_) + end + begin + desc["T"] = tf.data_type(input_c_) + end + begin + desc["T"] = tf.data_type(params_) + end + begin + desc["T"] = tf.data_type(output_) + end + begin + desc["T"] = tf.data_type(output_h_) + end + begin + desc["T"] = tf.data_type(output_c_) + end + begin + desc["T"] = tf.data_type(output_backprop_) + end + begin + desc["T"] = tf.data_type(output_h_backprop_) + end + begin + desc["T"] = tf.data_type(output_c_backprop_) + end + begin + desc["T"] = tf.data_type(reserve_space_) + end + res = tf.execute(desc) + node = tf.TapeNode(cudnn_rnn_backprop, [input_, input_h_, input_c_, params_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_], name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function cudnn_rnn_backprop(input_, input_h_, input_c_, params_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) + if tf.in_eager_mode() + cudnn_rnn_backprop_eager(input_, input_h_, input_c_, params_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_; name=name, rnn_mode=rnn_mode, input_mode=input_mode, direction=direction, dropout=dropout, seed=seed, seed2=seed2) + else + cudnn_rnn_backprop_graph(input_, input_h_, input_c_, params_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_; name=name, rnn_mode=rnn_mode, input_mode=input_mode, direction=direction, dropout=dropout, seed=seed, seed2=seed2) + end end - end + end end @@ -8239,39 +14948,67 @@ end """ begin - function string_to_hash_bucket_fast_graph(input_; name=nothing, num_buckets=nothing) - local desc - tf.with_op_name(name, "StringToHashBucketFast") do - desc = tf.NodeDescription("StringToHashBucketFast") - input_ = convert(Tensor{String}, input_) - tf.add_input(desc, input_) - if num_buckets !== nothing - desc["num_buckets"] = Base.Int(num_buckets) + begin + function string_to_hash_bucket_fast_graph(input_; name=nothing, num_buckets=nothing) + local desc + tf.with_op_name(name, "StringToHashBucketFast") do + desc = tf.NodeDescription("StringToHashBucketFast") + begin + begin + input_ = convert(Tensor{String}, input_) + begin + end + end + end + begin + begin + tf.add_input(desc, input_) + end + end + begin + begin + if num_buckets !== nothing + desc["num_buckets"] = Base.Int(num_buckets) + end + end + end end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function string_to_hash_bucket_fast_eager(input_; name=nothing, num_buckets=nothing) - desc = tf.EagerOp("StringToHashBucketFast") - input_ = convert(tf.EagerTensor, input_) - tf.add_input(desc, input_) - if num_buckets !== nothing - desc["num_buckets"] = Base.Int(num_buckets) - end - res = tf.execute(desc) - node = tf.TapeNode(string_to_hash_bucket_fast, [input_], name=nothing, num_buckets=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function string_to_hash_bucket_fast_eager(input_; name=nothing, num_buckets=nothing) + desc = tf.EagerOp("StringToHashBucketFast") + input_ = convert(tf.EagerTensor, input_) + begin + begin + tf.add_input(desc, input_) + end + end + begin + begin + if num_buckets !== nothing + desc["num_buckets"] = Base.Int(num_buckets) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(string_to_hash_bucket_fast, [input_], name=nothing, num_buckets=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function string_to_hash_bucket_fast(input_; name=nothing, num_buckets=nothing) - if tf.in_eager_mode() - string_to_hash_bucket_fast_eager(input_; name=name, num_buckets=num_buckets) - else - string_to_hash_bucket_fast_graph(input_; name=name, num_buckets=num_buckets) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function string_to_hash_bucket_fast(input_; name=nothing, num_buckets=nothing) + if tf.in_eager_mode() + string_to_hash_bucket_fast_eager(input_; name=name, num_buckets=num_buckets) + else + string_to_hash_bucket_fast_graph(input_; name=name, num_buckets=num_buckets) + end end - end + end end @@ -8281,59 +15018,95 @@ end """ begin - function mutable_hash_table_graph(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing) - local desc - tf.with_op_name(name, "MutableHashTable") do - desc = tf.NodeDescription("MutableHashTable") - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - if use_node_name_sharing !== nothing - desc["use_node_name_sharing"] = Base.Bool(use_node_name_sharing) - end - if key_dtype !== nothing - desc["key_dtype"] = Base.identity(key_dtype) + begin + function mutable_hash_table_graph(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing) + local desc + tf.with_op_name(name, "MutableHashTable") do + desc = tf.NodeDescription("MutableHashTable") + begin + end + begin + end + begin + begin + if container !== nothing + desc["container"] = Base.String(container) + end + end + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + begin + if use_node_name_sharing !== nothing + desc["use_node_name_sharing"] = Base.Bool(use_node_name_sharing) + end + end + begin + if key_dtype !== nothing + desc["key_dtype"] = Base.identity(key_dtype) + end + end + begin + if value_dtype !== nothing + desc["value_dtype"] = Base.identity(value_dtype) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function mutable_hash_table_eager(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing) + desc = tf.EagerOp("MutableHashTable") + begin + end + begin + begin + if container !== nothing + desc["container"] = Base.String(container) + end + end + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + begin + if use_node_name_sharing !== nothing + desc["use_node_name_sharing"] = Base.Bool(use_node_name_sharing) + end + end + begin + if key_dtype !== nothing + desc["key_dtype"] = Base.identity(key_dtype) + end + end + begin + if value_dtype !== nothing + desc["value_dtype"] = Base.identity(value_dtype) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(mutable_hash_table, [], name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function mutable_hash_table(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing) + if tf.in_eager_mode() + mutable_hash_table_eager(; name=name, container=container, shared_name=shared_name, use_node_name_sharing=use_node_name_sharing, key_dtype=key_dtype, value_dtype=value_dtype) + else + mutable_hash_table_graph(; name=name, container=container, shared_name=shared_name, use_node_name_sharing=use_node_name_sharing, key_dtype=key_dtype, value_dtype=value_dtype) + end end - if value_dtype !== nothing - desc["value_dtype"] = Base.identity(value_dtype) - end - end - tf.Tensor(tf.Operation(desc)) - end - function mutable_hash_table_eager(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing) - desc = tf.EagerOp("MutableHashTable") - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - if use_node_name_sharing !== nothing - desc["use_node_name_sharing"] = Base.Bool(use_node_name_sharing) - end - if key_dtype !== nothing - desc["key_dtype"] = Base.identity(key_dtype) - end - if value_dtype !== nothing - desc["value_dtype"] = Base.identity(value_dtype) - end - res = tf.execute(desc) - node = tf.TapeNode(mutable_hash_table, [], name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function mutable_hash_table(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing) - if tf.in_eager_mode() - mutable_hash_table_eager(; name=name, container=container, shared_name=shared_name, use_node_name_sharing=use_node_name_sharing, key_dtype=key_dtype, value_dtype=value_dtype) - else - mutable_hash_table_graph(; name=name, container=container, shared_name=shared_name, use_node_name_sharing=use_node_name_sharing, key_dtype=key_dtype, value_dtype=value_dtype) - end - end end @@ -8343,35 +15116,63 @@ end """ begin - function relu_graph(features_; name=nothing) - local desc - tf.with_op_name(name, "Relu") do - desc = tf.NodeDescription("Relu") - features_ = convert(Tensor{Any}, features_) - (features_,) = tf.tf_promote(features_) - tf.add_input(desc, features_) + begin + function relu_graph(features_; name=nothing) + local desc + tf.with_op_name(name, "Relu") do + desc = tf.NodeDescription("Relu") + begin + begin + features_ = convert(Tensor{Any}, features_) + begin + end + end + begin + (features_,) = tf.tf_promote(features_) + end + end + begin + begin + tf.add_input(desc, features_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function relu_eager(features_; name=nothing) - desc = tf.EagerOp("Relu") - features_ = convert(tf.EagerTensor, features_) - tf.add_input(desc, features_) - desc["T"] = tf.data_type(features_) - res = tf.execute(desc) - node = tf.TapeNode(relu, [features_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function relu_eager(features_; name=nothing) + desc = tf.EagerOp("Relu") + features_ = convert(tf.EagerTensor, features_) + begin + begin + tf.add_input(desc, features_) + end + end + begin + end + begin + desc["T"] = tf.data_type(features_) + end + res = tf.execute(desc) + node = tf.TapeNode(relu, [features_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function relu(features_; name=nothing) - if tf.in_eager_mode() - relu_eager(features_; name=name) - else - relu_graph(features_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function relu(features_; name=nothing) + if tf.in_eager_mode() + relu_eager(features_; name=name) + else + relu_graph(features_; name=name) + end end - end + end end @@ -8381,45 +15182,85 @@ end """ begin - function nth_element_graph(input_, n_; name=nothing, reverse=nothing) - local desc - tf.with_op_name(name, "NthElement") do - desc = tf.NodeDescription("NthElement") - input_ = convert(Tensor{Any}, input_) - n_ = convert(Tensor{Int32}, n_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - tf.add_input(desc, n_) - if reverse !== nothing - desc["reverse"] = Base.Bool(reverse) + begin + function nth_element_graph(input_, n_; name=nothing, reverse=nothing) + local desc + tf.with_op_name(name, "NthElement") do + desc = tf.NodeDescription("NthElement") + begin + begin + input_ = convert(Tensor{Any}, input_) + begin + end + end + begin + n_ = convert(Tensor{Int32}, n_) + begin + end + end + begin + (input_,) = tf.tf_promote(input_) + end + end + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, n_) + end + end + begin + begin + if reverse !== nothing + desc["reverse"] = Base.Bool(reverse) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function nth_element_eager(input_, n_; name=nothing, reverse=nothing) + desc = tf.EagerOp("NthElement") + input_ = convert(tf.EagerTensor, input_) + n_ = convert(tf.EagerTensor, n_) + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, n_) + end + end + begin + begin + if reverse !== nothing + desc["reverse"] = Base.Bool(reverse) + end + end + end + begin + desc["T"] = tf.data_type(input_) + end + res = tf.execute(desc) + node = tf.TapeNode(nth_element, [input_, n_], name=nothing, reverse=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function nth_element(input_, n_; name=nothing, reverse=nothing) + if tf.in_eager_mode() + nth_element_eager(input_, n_; name=name, reverse=reverse) + else + nth_element_graph(input_, n_; name=name, reverse=reverse) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function nth_element_eager(input_, n_; name=nothing, reverse=nothing) - desc = tf.EagerOp("NthElement") - input_ = convert(tf.EagerTensor, input_) - n_ = convert(tf.EagerTensor, n_) - tf.add_input(desc, input_) - tf.add_input(desc, n_) - if reverse !== nothing - desc["reverse"] = Base.Bool(reverse) - end - desc["T"] = tf.data_type(input_) - res = tf.execute(desc) - node = tf.TapeNode(nth_element, [input_, n_], name=nothing, reverse=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function nth_element(input_, n_; name=nothing, reverse=nothing) - if tf.in_eager_mode() - nth_element_eager(input_, n_; name=name, reverse=reverse) - else - nth_element_graph(input_, n_; name=name, reverse=reverse) - end - end end @@ -8429,35 +15270,63 @@ end """ begin - function softsign_graph(features_; name=nothing) - local desc - tf.with_op_name(name, "Softsign") do - desc = tf.NodeDescription("Softsign") - features_ = convert(Tensor{Any}, features_) - (features_,) = tf.tf_promote(features_) - tf.add_input(desc, features_) + begin + function softsign_graph(features_; name=nothing) + local desc + tf.with_op_name(name, "Softsign") do + desc = tf.NodeDescription("Softsign") + begin + begin + features_ = convert(Tensor{Any}, features_) + begin + end + end + begin + (features_,) = tf.tf_promote(features_) + end + end + begin + begin + tf.add_input(desc, features_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function softsign_eager(features_; name=nothing) - desc = tf.EagerOp("Softsign") - features_ = convert(tf.EagerTensor, features_) - tf.add_input(desc, features_) - desc["T"] = tf.data_type(features_) - res = tf.execute(desc) - node = tf.TapeNode(softsign, [features_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function softsign_eager(features_; name=nothing) + desc = tf.EagerOp("Softsign") + features_ = convert(tf.EagerTensor, features_) + begin + begin + tf.add_input(desc, features_) + end + end + begin + end + begin + desc["T"] = tf.data_type(features_) + end + res = tf.execute(desc) + node = tf.TapeNode(softsign, [features_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function softsign(features_; name=nothing) - if tf.in_eager_mode() - softsign_eager(features_; name=name) - else - softsign_graph(features_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function softsign(features_; name=nothing) + if tf.in_eager_mode() + softsign_eager(features_; name=name) + else + softsign_graph(features_; name=name) + end end - end + end end @@ -8467,83 +15336,143 @@ end """ begin - function mutable_dense_hash_table_graph(empty_key_; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing, initial_num_buckets=nothing, max_load_factor=nothing) - local desc - tf.with_op_name(name, "MutableDenseHashTable") do - desc = tf.NodeDescription("MutableDenseHashTable") - empty_key_ = convert(Tensor{Any}, empty_key_) - (empty_key_,) = tf.tf_promote(empty_key_) - tf.add_input(desc, empty_key_) - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - if use_node_name_sharing !== nothing - desc["use_node_name_sharing"] = Base.Bool(use_node_name_sharing) - end - if key_dtype !== nothing - desc["key_dtype"] = Base.identity(key_dtype) - end - if value_dtype !== nothing - desc["value_dtype"] = Base.identity(value_dtype) - end - if value_shape !== nothing - desc["value_shape"] = Base.identity(value_shape) + begin + function mutable_dense_hash_table_graph(empty_key_; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing, initial_num_buckets=nothing, max_load_factor=nothing) + local desc + tf.with_op_name(name, "MutableDenseHashTable") do + desc = tf.NodeDescription("MutableDenseHashTable") + begin + begin + empty_key_ = convert(Tensor{Any}, empty_key_) + begin + end + end + begin + (empty_key_,) = tf.tf_promote(empty_key_) + end + end + begin + begin + tf.add_input(desc, empty_key_) + end + end + begin + begin + if container !== nothing + desc["container"] = Base.String(container) + end + end + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + begin + if use_node_name_sharing !== nothing + desc["use_node_name_sharing"] = Base.Bool(use_node_name_sharing) + end + end + begin + if key_dtype !== nothing + desc["key_dtype"] = Base.identity(key_dtype) + end + end + begin + if value_dtype !== nothing + desc["value_dtype"] = Base.identity(value_dtype) + end + end + begin + if value_shape !== nothing + desc["value_shape"] = Base.identity(value_shape) + end + end + begin + if initial_num_buckets !== nothing + desc["initial_num_buckets"] = Base.Int(initial_num_buckets) + end + end + begin + if max_load_factor !== nothing + desc["max_load_factor"] = Base.identity(max_load_factor) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function mutable_dense_hash_table_eager(empty_key_; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing, initial_num_buckets=nothing, max_load_factor=nothing) + desc = tf.EagerOp("MutableDenseHashTable") + empty_key_ = convert(tf.EagerTensor, empty_key_) + begin + begin + tf.add_input(desc, empty_key_) + end + end + begin + begin + if container !== nothing + desc["container"] = Base.String(container) + end + end + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + begin + if use_node_name_sharing !== nothing + desc["use_node_name_sharing"] = Base.Bool(use_node_name_sharing) + end + end + begin + if key_dtype !== nothing + desc["key_dtype"] = Base.identity(key_dtype) + end + end + begin + if value_dtype !== nothing + desc["value_dtype"] = Base.identity(value_dtype) + end + end + begin + if value_shape !== nothing + desc["value_shape"] = Base.identity(value_shape) + end + end + begin + if initial_num_buckets !== nothing + desc["initial_num_buckets"] = Base.Int(initial_num_buckets) + end + end + begin + if max_load_factor !== nothing + desc["max_load_factor"] = Base.identity(max_load_factor) + end + end + end + begin + desc["key_dtype"] = tf.data_type(empty_key_) + end + res = tf.execute(desc) + node = tf.TapeNode(mutable_dense_hash_table, [empty_key_], name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing, initial_num_buckets=nothing, max_load_factor=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function mutable_dense_hash_table(empty_key_; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing, initial_num_buckets=nothing, max_load_factor=nothing) + if tf.in_eager_mode() + mutable_dense_hash_table_eager(empty_key_; name=name, container=container, shared_name=shared_name, use_node_name_sharing=use_node_name_sharing, key_dtype=key_dtype, value_dtype=value_dtype, value_shape=value_shape, initial_num_buckets=initial_num_buckets, max_load_factor=max_load_factor) + else + mutable_dense_hash_table_graph(empty_key_; name=name, container=container, shared_name=shared_name, use_node_name_sharing=use_node_name_sharing, key_dtype=key_dtype, value_dtype=value_dtype, value_shape=value_shape, initial_num_buckets=initial_num_buckets, max_load_factor=max_load_factor) + end end - if initial_num_buckets !== nothing - desc["initial_num_buckets"] = Base.Int(initial_num_buckets) - end - if max_load_factor !== nothing - desc["max_load_factor"] = Base.identity(max_load_factor) - end - end - tf.Tensor(tf.Operation(desc)) - end - function mutable_dense_hash_table_eager(empty_key_; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing, initial_num_buckets=nothing, max_load_factor=nothing) - desc = tf.EagerOp("MutableDenseHashTable") - empty_key_ = convert(tf.EagerTensor, empty_key_) - tf.add_input(desc, empty_key_) - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - if use_node_name_sharing !== nothing - desc["use_node_name_sharing"] = Base.Bool(use_node_name_sharing) - end - if key_dtype !== nothing - desc["key_dtype"] = Base.identity(key_dtype) - end - if value_dtype !== nothing - desc["value_dtype"] = Base.identity(value_dtype) - end - if value_shape !== nothing - desc["value_shape"] = Base.identity(value_shape) - end - if initial_num_buckets !== nothing - desc["initial_num_buckets"] = Base.Int(initial_num_buckets) - end - if max_load_factor !== nothing - desc["max_load_factor"] = Base.identity(max_load_factor) - end - desc["key_dtype"] = tf.data_type(empty_key_) - res = tf.execute(desc) - node = tf.TapeNode(mutable_dense_hash_table, [empty_key_], name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing, initial_num_buckets=nothing, max_load_factor=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function mutable_dense_hash_table(empty_key_; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing, initial_num_buckets=nothing, max_load_factor=nothing) - if tf.in_eager_mode() - mutable_dense_hash_table_eager(empty_key_; name=name, container=container, shared_name=shared_name, use_node_name_sharing=use_node_name_sharing, key_dtype=key_dtype, value_dtype=value_dtype, value_shape=value_shape, initial_num_buckets=initial_num_buckets, max_load_factor=max_load_factor) - else - mutable_dense_hash_table_graph(empty_key_; name=name, container=container, shared_name=shared_name, use_node_name_sharing=use_node_name_sharing, key_dtype=key_dtype, value_dtype=value_dtype, value_shape=value_shape, initial_num_buckets=initial_num_buckets, max_load_factor=max_load_factor) - end - end end @@ -8553,30 +15482,45 @@ end An op that shuts down a running distributed TPU system. The Op returns """ begin - function _shutdown_distributed_tpu_graph(; name=nothing) - local desc - tf.with_op_name(name, "_ShutdownDistributedTPU") do - desc - tf.NodeDescription("_ShutdownDistributedTPU") + begin + function _shutdown_distributed_tpu_graph(; name=nothing) + local desc + tf.with_op_name(name, "_ShutdownDistributedTPU") do + desc = tf.NodeDescription("_ShutdownDistributedTPU") + begin + end + begin + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function _shutdown_distributed_tpu_eager(; name=nothing) - desc = tf.EagerOp("_ShutdownDistributedTPU") - res = tf.execute(desc) - node = tf.TapeNode(_shutdown_distributed_tpu, [], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function _shutdown_distributed_tpu_eager(; name=nothing) + desc = tf.EagerOp("_ShutdownDistributedTPU") + begin + end + begin + end + res = tf.execute(desc) + node = tf.TapeNode(_shutdown_distributed_tpu, [], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _shutdown_distributed_tpu(; name=nothing) - if tf.in_eager_mode() - _shutdown_distributed_tpu_eager(; name=name) - else - _shutdown_distributed_tpu_graph(; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _shutdown_distributed_tpu(; name=nothing) + if tf.in_eager_mode() + _shutdown_distributed_tpu_eager(; name=name) + else + _shutdown_distributed_tpu_graph(; name=name) + end end - end + end end @@ -8586,40 +15530,78 @@ end """ begin - function polygamma_graph(a_, x_; name=nothing) - local desc - tf.with_op_name(name, "Polygamma") do - desc = tf.NodeDescription("Polygamma") - a_ = convert(Tensor{Any}, a_) - x_ = convert(Tensor{Any}, x_) - (a_, x_) = tf.tf_promote(a_, x_) - tf.add_input(desc, a_) - tf.add_input(desc, x_) + begin + function polygamma_graph(a_, x_; name=nothing) + local desc + tf.with_op_name(name, "Polygamma") do + desc = tf.NodeDescription("Polygamma") + begin + begin + a_ = convert(Tensor{Any}, a_) + begin + end + end + begin + x_ = convert(Tensor{Any}, x_) + begin + end + end + begin + (a_, x_) = tf.tf_promote(a_, x_) + end + end + begin + begin + tf.add_input(desc, a_) + end + begin + tf.add_input(desc, x_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function polygamma_eager(a_, x_; name=nothing) - desc = tf.EagerOp("Polygamma") - a_ = convert(tf.EagerTensor, a_) - x_ = convert(tf.EagerTensor, x_) - tf.add_input(desc, a_) - tf.add_input(desc, x_) - desc["T"] = tf.data_type(a_) - desc["T"] = tf.data_type(x_) - res = tf.execute(desc) - node = tf.TapeNode(polygamma, [a_, x_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function polygamma_eager(a_, x_; name=nothing) + desc = tf.EagerOp("Polygamma") + a_ = convert(tf.EagerTensor, a_) + x_ = convert(tf.EagerTensor, x_) + begin + begin + tf.add_input(desc, a_) + end + begin + tf.add_input(desc, x_) + end + end + begin + end + begin + desc["T"] = tf.data_type(a_) + end + begin + desc["T"] = tf.data_type(x_) + end + res = tf.execute(desc) + node = tf.TapeNode(polygamma, [a_, x_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function polygamma(a_, x_; name=nothing) - if tf.in_eager_mode() - polygamma_eager(a_, x_; name=name) - else - polygamma_graph(a_, x_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function polygamma(a_, x_; name=nothing) + if tf.in_eager_mode() + polygamma_eager(a_, x_; name=name) + else + polygamma_graph(a_, x_; name=name) + end end - end + end end @@ -8629,47 +15611,83 @@ end """ begin - function nccl_reduce_graph(input_; name=nothing, reduction=nothing, num_devices=nothing) - local desc - tf.with_op_name(name, "NcclReduce") do - desc = tf.NodeDescription("NcclReduce") - input_ = [convert(Tensor{Any}, x) for x = input_] - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - if reduction !== nothing - desc["reduction"] = Base.String(reduction) - end - if num_devices !== nothing - desc["num_devices"] = Base.Int(num_devices) + begin + function nccl_reduce_graph(input_; name=nothing, reduction=nothing, num_devices=nothing) + local desc + tf.with_op_name(name, "NcclReduce") do + desc = tf.NodeDescription("NcclReduce") + begin + begin + input_ = [convert(Tensor{Any}, x) for x = input_] + begin + end + end + begin + (input_,) = tf.tf_promote(input_) + end + end + begin + begin + tf.add_input(desc, input_) + end + end + begin + begin + if reduction !== nothing + desc["reduction"] = Base.String(reduction) + end + end + begin + if num_devices !== nothing + desc["num_devices"] = Base.Int(num_devices) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function nccl_reduce_eager(input_; name=nothing, reduction=nothing, num_devices=nothing) + desc = tf.EagerOp("NcclReduce") + input_ = convert(tf.EagerTensor, input_) + begin + begin + tf.add_input(desc, input_) + end + end + begin + begin + if reduction !== nothing + desc["reduction"] = Base.String(reduction) + end + end + begin + if num_devices !== nothing + desc["num_devices"] = Base.Int(num_devices) + end + end + end + begin + desc["T"] = tf.data_type(input_) + end + res = tf.execute(desc) + node = tf.TapeNode(nccl_reduce, [input_], name=nothing, reduction=nothing, num_devices=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function nccl_reduce(input_; name=nothing, reduction=nothing, num_devices=nothing) + if tf.in_eager_mode() + nccl_reduce_eager(input_; name=name, reduction=reduction, num_devices=num_devices) + else + nccl_reduce_graph(input_; name=name, reduction=reduction, num_devices=num_devices) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function nccl_reduce_eager(input_; name=nothing, reduction=nothing, num_devices=nothing) - desc = tf.EagerOp("NcclReduce") - input_ = convert(tf.EagerTensor, input_) - tf.add_input(desc, input_) - if reduction !== nothing - desc["reduction"] = Base.String(reduction) - end - if num_devices !== nothing - desc["num_devices"] = Base.Int(num_devices) - end - desc["T"] = tf.data_type(input_) - res = tf.execute(desc) - node = tf.TapeNode(nccl_reduce, [input_], name=nothing, reduction=nothing, num_devices=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function nccl_reduce(input_; name=nothing, reduction=nothing, num_devices=nothing) - if tf.in_eager_mode() - nccl_reduce_eager(input_; name=name, reduction=reduction, num_devices=num_devices) - else - nccl_reduce_graph(input_; name=name, reduction=reduction, num_devices=num_devices) - end - end end @@ -8679,48 +15697,92 @@ end """ begin - function arg_max_graph(input_, dimension_; name=nothing, output_type=nothing) - local desc - tf.with_op_name(name, "ArgMax") do - desc = tf.NodeDescription("ArgMax") - input_ = convert(Tensor{Any}, input_) - dimension_ = convert(Tensor{Int32}, dimension_) - dimension_ = dimension_ - convert(tf.Tensor{eltype(dimension_)}, 1) - (input_,) = tf.tf_promote(input_) - (dimension_,) = tf.tf_promote(dimension_) - tf.add_input(desc, input_) - tf.add_input(desc, dimension_) - if output_type !== nothing - desc["output_type"] = Base.identity(output_type) + begin + function arg_max_graph(input_, dimension_; name=nothing, output_type=nothing) + local desc + tf.with_op_name(name, "ArgMax") do + desc = tf.NodeDescription("ArgMax") + begin + begin + input_ = convert(Tensor{Any}, input_) + begin + end + end + begin + dimension_ = convert(Tensor{Int32}, dimension_) + begin + dimension_ = dimension_ - convert(tf.Tensor{eltype(dimension_)}, 1) + end + end + begin + (input_,) = tf.tf_promote(input_) + end + begin + (dimension_,) = tf.tf_promote(dimension_) + end + end + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, dimension_) + end + end + begin + begin + if output_type !== nothing + desc["output_type"] = Base.identity(output_type) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function arg_max_eager(input_, dimension_; name=nothing, output_type=nothing) + desc = tf.EagerOp("ArgMax") + input_ = convert(tf.EagerTensor, input_) + dimension_ = convert(tf.EagerTensor, dimension_) + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, dimension_) + end + end + begin + begin + if output_type !== nothing + desc["output_type"] = Base.identity(output_type) + end + end + end + begin + desc["T"] = tf.data_type(input_) + end + begin + desc["Tidx"] = tf.data_type(dimension_) + end + res = tf.execute(desc) + node = tf.TapeNode(arg_max, [input_, dimension_], name=nothing, output_type=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function arg_max(input_, dimension_; name=nothing, output_type=nothing) + if tf.in_eager_mode() + arg_max_eager(input_, dimension_; name=name, output_type=output_type) + else + arg_max_graph(input_, dimension_; name=name, output_type=output_type) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function arg_max_eager(input_, dimension_; name=nothing, output_type=nothing) - desc = tf.EagerOp("ArgMax") - input_ = convert(tf.EagerTensor, input_) - dimension_ = convert(tf.EagerTensor, dimension_) - tf.add_input(desc, input_) - tf.add_input(desc, dimension_) - if output_type !== nothing - desc["output_type"] = Base.identity(output_type) - end - desc["T"] = tf.data_type(input_) - desc["Tidx"] = tf.data_type(dimension_) - res = tf.execute(desc) - node = tf.TapeNode(arg_max, [input_, dimension_], name=nothing, output_type=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function arg_max(input_, dimension_; name=nothing, output_type=nothing) - if tf.in_eager_mode() - arg_max_eager(input_, dimension_; name=name, output_type=output_type) - else - arg_max_graph(input_, dimension_; name=name, output_type=output_type) - end - end end @@ -8730,40 +15792,78 @@ end """ begin - function matrix_set_diag_graph(input_, diagonal_; name=nothing) - local desc - tf.with_op_name(name, "MatrixSetDiag") do - desc = tf.NodeDescription("MatrixSetDiag") - input_ = convert(Tensor{Any}, input_) - diagonal_ = convert(Tensor{Any}, diagonal_) - (input_, diagonal_) = tf.tf_promote(input_, diagonal_) - tf.add_input(desc, input_) - tf.add_input(desc, diagonal_) + begin + function matrix_set_diag_graph(input_, diagonal_; name=nothing) + local desc + tf.with_op_name(name, "MatrixSetDiag") do + desc = tf.NodeDescription("MatrixSetDiag") + begin + begin + input_ = convert(Tensor{Any}, input_) + begin + end + end + begin + diagonal_ = convert(Tensor{Any}, diagonal_) + begin + end + end + begin + (input_, diagonal_) = tf.tf_promote(input_, diagonal_) + end + end + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, diagonal_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function matrix_set_diag_eager(input_, diagonal_; name=nothing) - desc = tf.EagerOp("MatrixSetDiag") - input_ = convert(tf.EagerTensor, input_) - diagonal_ = convert(tf.EagerTensor, diagonal_) - tf.add_input(desc, input_) - tf.add_input(desc, diagonal_) - desc["T"] = tf.data_type(input_) - desc["T"] = tf.data_type(diagonal_) - res = tf.execute(desc) - node = tf.TapeNode(matrix_set_diag, [input_, diagonal_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function matrix_set_diag_eager(input_, diagonal_; name=nothing) + desc = tf.EagerOp("MatrixSetDiag") + input_ = convert(tf.EagerTensor, input_) + diagonal_ = convert(tf.EagerTensor, diagonal_) + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, diagonal_) + end + end + begin + end + begin + desc["T"] = tf.data_type(input_) + end + begin + desc["T"] = tf.data_type(diagonal_) + end + res = tf.execute(desc) + node = tf.TapeNode(matrix_set_diag, [input_, diagonal_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function matrix_set_diag(input_, diagonal_; name=nothing) - if tf.in_eager_mode() - matrix_set_diag_eager(input_, diagonal_; name=name) - else - matrix_set_diag_graph(input_, diagonal_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function matrix_set_diag(input_, diagonal_; name=nothing) + if tf.in_eager_mode() + matrix_set_diag_eager(input_, diagonal_; name=name) + else + matrix_set_diag_graph(input_, diagonal_; name=name) + end end - end + end end @@ -8773,47 +15873,99 @@ end """ begin - function space_to_batch_nd_graph(input_, block_shape_, paddings_; name=nothing) - local desc - tf.with_op_name(name, "SpaceToBatchND") do - desc = tf.NodeDescription("SpaceToBatchND") - input_ = convert(Tensor{Any}, input_) - block_shape_ = convert(Tensor{Int32}, block_shape_) - paddings_ = convert(Tensor{Int32}, paddings_) - (input_,) = tf.tf_promote(input_) - (paddings_,) = tf.tf_promote(paddings_) - (block_shape_,) = tf.tf_promote(block_shape_) - tf.add_input(desc, input_) - tf.add_input(desc, block_shape_) - tf.add_input(desc, paddings_) - end - tf.Tensor(tf.Operation(desc)) - end - function space_to_batch_nd_eager(input_, block_shape_, paddings_; name=nothing) - desc = tf.EagerOp("SpaceToBatchND") - input_ = convert(tf.EagerTensor, input_) - block_shape_ = convert(tf.EagerTensor, block_shape_) - paddings_ = convert(tf.EagerTensor, paddings_) - tf.add_input(desc, input_) - tf.add_input(desc, block_shape_) - tf.add_input(desc, paddings_) - desc["T"] = tf.data_type(input_) - desc["Tblock_shape"] = tf.data_type(block_shape_) - desc["Tpaddings"] = tf.data_type(paddings_) - res = tf.execute(desc) - node = tf.TapeNode(space_to_batch_nd, [input_, block_shape_, paddings_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function space_to_batch_nd(input_, block_shape_, paddings_; name=nothing) - if tf.in_eager_mode() - space_to_batch_nd_eager(input_, block_shape_, paddings_; name=name) - else - space_to_batch_nd_graph(input_, block_shape_, paddings_; name=name) + begin + function space_to_batch_nd_graph(input_, block_shape_, paddings_; name=nothing) + local desc + tf.with_op_name(name, "SpaceToBatchND") do + desc = tf.NodeDescription("SpaceToBatchND") + begin + begin + input_ = convert(Tensor{Any}, input_) + begin + end + end + begin + block_shape_ = convert(Tensor{Int32}, block_shape_) + begin + end + end + begin + paddings_ = convert(Tensor{Int32}, paddings_) + begin + end + end + begin + (input_,) = tf.tf_promote(input_) + end + begin + (paddings_,) = tf.tf_promote(paddings_) + end + begin + (block_shape_,) = tf.tf_promote(block_shape_) + end + end + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, block_shape_) + end + begin + tf.add_input(desc, paddings_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function space_to_batch_nd_eager(input_, block_shape_, paddings_; name=nothing) + desc = tf.EagerOp("SpaceToBatchND") + input_ = convert(tf.EagerTensor, input_) + block_shape_ = convert(tf.EagerTensor, block_shape_) + paddings_ = convert(tf.EagerTensor, paddings_) + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, block_shape_) + end + begin + tf.add_input(desc, paddings_) + end + end + begin + end + begin + desc["T"] = tf.data_type(input_) + end + begin + desc["Tblock_shape"] = tf.data_type(block_shape_) + end + begin + desc["Tpaddings"] = tf.data_type(paddings_) + end + res = tf.execute(desc) + node = tf.TapeNode(space_to_batch_nd, [input_, block_shape_, paddings_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function space_to_batch_nd(input_, block_shape_, paddings_; name=nothing) + if tf.in_eager_mode() + space_to_batch_nd_eager(input_, block_shape_, paddings_; name=name) + else + space_to_batch_nd_graph(input_, block_shape_, paddings_; name=name) + end end - end + end end @@ -8823,46 +15975,88 @@ end """ begin - function sparse_reshape_graph(input_indices_, input_shape_, new_shape_; name=nothing) - local desc - tf.with_op_name(name, "SparseReshape") do - desc = tf.NodeDescription("SparseReshape") - input_indices_ = convert(Tensor{Int64}, input_indices_) - input_shape_ = convert(Tensor{Int64}, input_shape_) - new_shape_ = convert(Tensor{Int64}, new_shape_) - tf.add_input(desc, input_indices_) - tf.add_input(desc, input_shape_) - tf.add_input(desc, new_shape_) - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:2 - push!(out, tf.Tensor(op, out_idx)) - end - out - end - function sparse_reshape_eager(input_indices_, input_shape_, new_shape_; name=nothing) - desc = tf.EagerOp("SparseReshape") - input_indices_ = convert(tf.EagerTensor, input_indices_) - input_shape_ = convert(tf.EagerTensor, input_shape_) - new_shape_ = convert(tf.EagerTensor, new_shape_) - tf.add_input(desc, input_indices_) - tf.add_input(desc, input_shape_) - tf.add_input(desc, new_shape_) - res = tf.execute(desc) - node = tf.TapeNode(sparse_reshape, [input_indices_, input_shape_, new_shape_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_reshape(input_indices_, input_shape_, new_shape_; name=nothing) - if tf.in_eager_mode() - sparse_reshape_eager(input_indices_, input_shape_, new_shape_; name=name) - else - sparse_reshape_graph(input_indices_, input_shape_, new_shape_; name=name) + begin + function sparse_reshape_graph(input_indices_, input_shape_, new_shape_; name=nothing) + local desc + tf.with_op_name(name, "SparseReshape") do + desc = tf.NodeDescription("SparseReshape") + begin + begin + input_indices_ = convert(Tensor{Int64}, input_indices_) + begin + end + end + begin + input_shape_ = convert(Tensor{Int64}, input_shape_) + begin + end + end + begin + new_shape_ = convert(Tensor{Int64}, new_shape_) + begin + end + end + end + begin + begin + tf.add_input(desc, input_indices_) + end + begin + tf.add_input(desc, input_shape_) + end + begin + tf.add_input(desc, new_shape_) + end + end + begin + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + end + end + begin + function sparse_reshape_eager(input_indices_, input_shape_, new_shape_; name=nothing) + desc = tf.EagerOp("SparseReshape") + input_indices_ = convert(tf.EagerTensor, input_indices_) + input_shape_ = convert(tf.EagerTensor, input_shape_) + new_shape_ = convert(tf.EagerTensor, new_shape_) + begin + begin + tf.add_input(desc, input_indices_) + end + begin + tf.add_input(desc, input_shape_) + end + begin + tf.add_input(desc, new_shape_) + end + end + begin + end + res = tf.execute(desc) + node = tf.TapeNode(sparse_reshape, [input_indices_, input_shape_, new_shape_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_reshape(input_indices_, input_shape_, new_shape_; name=nothing) + if tf.in_eager_mode() + sparse_reshape_eager(input_indices_, input_shape_, new_shape_; name=name) + else + sparse_reshape_graph(input_indices_, input_shape_, new_shape_; name=name) + end end - end + end end @@ -8872,49 +16066,89 @@ end """ begin - function optimize_dataset_graph(input_dataset_, optimizations_; name=nothing, output_types=nothing, output_shapes=nothing) - local desc - tf.with_op_name(name, "OptimizeDataset") do - desc = tf.NodeDescription("OptimizeDataset") - input_dataset_ = convert(Tensor{Any}, input_dataset_) - optimizations_ = convert(Tensor{String}, optimizations_) - tf.add_input(desc, input_dataset_) - tf.add_input(desc, optimizations_) - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) + begin + function optimize_dataset_graph(input_dataset_, optimizations_; name=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "OptimizeDataset") do + desc = tf.NodeDescription("OptimizeDataset") + begin + begin + input_dataset_ = convert(Tensor{Any}, input_dataset_) + begin + end + end + begin + optimizations_ = convert(Tensor{String}, optimizations_) + begin + end + end + end + begin + begin + tf.add_input(desc, input_dataset_) + end + begin + tf.add_input(desc, optimizations_) + end + end + begin + begin + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + end + begin + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function optimize_dataset_eager(input_dataset_, optimizations_; name=nothing, output_types=nothing, output_shapes=nothing) + desc = tf.EagerOp("OptimizeDataset") + input_dataset_ = convert(tf.EagerTensor, input_dataset_) + optimizations_ = convert(tf.EagerTensor, optimizations_) + begin + begin + tf.add_input(desc, input_dataset_) + end + begin + tf.add_input(desc, optimizations_) + end + end + begin + begin + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + end + begin + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(optimize_dataset, [input_dataset_, optimizations_], name=nothing, output_types=nothing, output_shapes=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function optimize_dataset(input_dataset_, optimizations_; name=nothing, output_types=nothing, output_shapes=nothing) + if tf.in_eager_mode() + optimize_dataset_eager(input_dataset_, optimizations_; name=name, output_types=output_types, output_shapes=output_shapes) + else + optimize_dataset_graph(input_dataset_, optimizations_; name=name, output_types=output_types, output_shapes=output_shapes) + end end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - end - tf.Tensor(tf.Operation(desc)) - end - function optimize_dataset_eager(input_dataset_, optimizations_; name=nothing, output_types=nothing, output_shapes=nothing) - desc = tf.EagerOp("OptimizeDataset") - input_dataset_ = convert(tf.EagerTensor, input_dataset_) - optimizations_ = convert(tf.EagerTensor, optimizations_) - tf.add_input(desc, input_dataset_) - tf.add_input(desc, optimizations_) - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - res = tf.execute(desc) - node = tf.TapeNode(optimize_dataset, [input_dataset_, optimizations_], name=nothing, output_types=nothing, output_shapes=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function optimize_dataset(input_dataset_, optimizations_; name=nothing, output_types=nothing, output_shapes=nothing) - if tf.in_eager_mode() - optimize_dataset_eager(input_dataset_, optimizations_; name=name, output_types=output_types, output_shapes=output_shapes) - else - optimize_dataset_graph(input_dataset_, optimizations_; name=name, output_types=output_types, output_shapes=output_shapes) - end - end end @@ -8924,48 +16158,92 @@ end """ begin - function concat_v2_graph(values_, axis_; name=nothing, N=nothing) - local desc - tf.with_op_name(name, "ConcatV2") do - desc = tf.NodeDescription("ConcatV2") - values_ = [convert(Tensor{Any}, x) for x = values_] - axis_ = convert(Tensor{Int32}, axis_) - axis_ = axis_ - convert(tf.Tensor{eltype(axis_)}, 1) - (values_,) = tf.tf_promote(values_) - (axis_,) = tf.tf_promote(axis_) - tf.add_input(desc, values_) - tf.add_input(desc, axis_) - if N !== nothing - desc["N"] = Base.Int(N) + begin + function concat_v2_graph(values_, axis_; name=nothing, N=nothing) + local desc + tf.with_op_name(name, "ConcatV2") do + desc = tf.NodeDescription("ConcatV2") + begin + begin + values_ = [convert(Tensor{Any}, x) for x = values_] + begin + end + end + begin + axis_ = convert(Tensor{Int32}, axis_) + begin + axis_ = axis_ - convert(tf.Tensor{eltype(axis_)}, 1) + end + end + begin + (values_,) = tf.tf_promote(values_) + end + begin + (axis_,) = tf.tf_promote(axis_) + end + end + begin + begin + tf.add_input(desc, values_) + end + begin + tf.add_input(desc, axis_) + end + end + begin + begin + if N !== nothing + desc["N"] = Base.Int(N) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function concat_v2_eager(values_, axis_; name=nothing, N=nothing) + desc = tf.EagerOp("ConcatV2") + values_ = convert(tf.EagerTensor, values_) + axis_ = convert(tf.EagerTensor, axis_) + begin + begin + tf.add_input(desc, values_) + end + begin + tf.add_input(desc, axis_) + end + end + begin + begin + if N !== nothing + desc["N"] = Base.Int(N) + end + end + end + begin + desc["T"] = tf.data_type(values_) + end + begin + desc["Tidx"] = tf.data_type(axis_) + end + res = tf.execute(desc) + node = tf.TapeNode(concat_v2, [values_, axis_], name=nothing, N=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function concat_v2(values_, axis_; name=nothing, N=nothing) + if tf.in_eager_mode() + concat_v2_eager(values_, axis_; name=name, N=N) + else + concat_v2_graph(values_, axis_; name=name, N=N) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function concat_v2_eager(values_, axis_; name=nothing, N=nothing) - desc = tf.EagerOp("ConcatV2") - values_ = convert(tf.EagerTensor, values_) - axis_ = convert(tf.EagerTensor, axis_) - tf.add_input(desc, values_) - tf.add_input(desc, axis_) - if N !== nothing - desc["N"] = Base.Int(N) - end - desc["T"] = tf.data_type(values_) - desc["Tidx"] = tf.data_type(axis_) - res = tf.execute(desc) - node = tf.TapeNode(concat_v2, [values_, axis_], name=nothing, N=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function concat_v2(values_, axis_; name=nothing, N=nothing) - if tf.in_eager_mode() - concat_v2_eager(values_, axis_; name=name, N=N) - else - concat_v2_graph(values_, axis_; name=name, N=N) - end - end end @@ -8975,75 +16253,173 @@ end """ begin - function resource_sparse_apply_adadelta_graph(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) - local desc - tf.with_op_name(name, "ResourceSparseApplyAdadelta") do - desc = tf.NodeDescription("ResourceSparseApplyAdadelta") - var_ = convert(Tensor{Any}, var_) - accum_ = convert(Tensor{Any}, accum_) - accum_update_ = convert(Tensor{Any}, accum_update_) - lr_ = convert(Tensor{Any}, lr_) - rho_ = convert(Tensor{Any}, rho_) - epsilon_ = convert(Tensor{Any}, epsilon_) - grad_ = convert(Tensor{Any}, grad_) - indices_ = convert(Tensor{Any}, indices_) - indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) - (lr_, rho_, epsilon_, grad_) = tf.tf_promote(lr_, rho_, epsilon_, grad_) - (indices_,) = tf.tf_promote(indices_) - tf.add_input(desc, var_) - tf.add_input(desc, accum_) - tf.add_input(desc, accum_update_) - tf.add_input(desc, lr_) - tf.add_input(desc, rho_) - tf.add_input(desc, epsilon_) - tf.add_input(desc, grad_) - tf.add_input(desc, indices_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - end - tf.Tensor(tf.Operation(desc)) - end - function resource_sparse_apply_adadelta_eager(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) - desc = tf.EagerOp("ResourceSparseApplyAdadelta") - var_ = convert(tf.EagerTensor, var_) - accum_ = convert(tf.EagerTensor, accum_) - accum_update_ = convert(tf.EagerTensor, accum_update_) - lr_ = convert(tf.EagerTensor, lr_) - rho_ = convert(tf.EagerTensor, rho_) - epsilon_ = convert(tf.EagerTensor, epsilon_) - grad_ = convert(tf.EagerTensor, grad_) - indices_ = convert(tf.EagerTensor, indices_) - tf.add_input(desc, var_) - tf.add_input(desc, accum_) - tf.add_input(desc, accum_update_) - tf.add_input(desc, lr_) - tf.add_input(desc, rho_) - tf.add_input(desc, epsilon_) - tf.add_input(desc, grad_) - tf.add_input(desc, indices_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - desc["T"] = tf.data_type(lr_) - desc["T"] = tf.data_type(rho_) - desc["T"] = tf.data_type(epsilon_) - desc["T"] = tf.data_type(grad_) - desc["Tindices"] = tf.data_type(indices_) - res = tf.execute(desc) - node = tf.TapeNode(resource_sparse_apply_adadelta, [var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_, indices_], name=nothing, use_locking=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_sparse_apply_adadelta(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) - if tf.in_eager_mode() - resource_sparse_apply_adadelta_eager(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_, indices_; name=name, use_locking=use_locking) - else - resource_sparse_apply_adadelta_graph(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_, indices_; name=name, use_locking=use_locking) + begin + function resource_sparse_apply_adadelta_graph(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "ResourceSparseApplyAdadelta") do + desc = tf.NodeDescription("ResourceSparseApplyAdadelta") + begin + begin + var_ = convert(Tensor{Any}, var_) + begin + end + end + begin + accum_ = convert(Tensor{Any}, accum_) + begin + end + end + begin + accum_update_ = convert(Tensor{Any}, accum_update_) + begin + end + end + begin + lr_ = convert(Tensor{Any}, lr_) + begin + end + end + begin + rho_ = convert(Tensor{Any}, rho_) + begin + end + end + begin + epsilon_ = convert(Tensor{Any}, epsilon_) + begin + end + end + begin + grad_ = convert(Tensor{Any}, grad_) + begin + end + end + begin + indices_ = convert(Tensor{Any}, indices_) + begin + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + end + end + begin + (lr_, rho_, epsilon_, grad_) = tf.tf_promote(lr_, rho_, epsilon_, grad_) + end + begin + (indices_,) = tf.tf_promote(indices_) + end + end + begin + begin + tf.add_input(desc, var_) + end + begin + tf.add_input(desc, accum_) + end + begin + tf.add_input(desc, accum_update_) + end + begin + tf.add_input(desc, lr_) + end + begin + tf.add_input(desc, rho_) + end + begin + tf.add_input(desc, epsilon_) + end + begin + tf.add_input(desc, grad_) + end + begin + tf.add_input(desc, indices_) + end + end + begin + begin + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function resource_sparse_apply_adadelta_eager(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) + desc = tf.EagerOp("ResourceSparseApplyAdadelta") + var_ = convert(tf.EagerTensor, var_) + accum_ = convert(tf.EagerTensor, accum_) + accum_update_ = convert(tf.EagerTensor, accum_update_) + lr_ = convert(tf.EagerTensor, lr_) + rho_ = convert(tf.EagerTensor, rho_) + epsilon_ = convert(tf.EagerTensor, epsilon_) + grad_ = convert(tf.EagerTensor, grad_) + indices_ = convert(tf.EagerTensor, indices_) + begin + begin + tf.add_input(desc, var_) + end + begin + tf.add_input(desc, accum_) + end + begin + tf.add_input(desc, accum_update_) + end + begin + tf.add_input(desc, lr_) + end + begin + tf.add_input(desc, rho_) + end + begin + tf.add_input(desc, epsilon_) + end + begin + tf.add_input(desc, grad_) + end + begin + tf.add_input(desc, indices_) + end + end + begin + begin + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + end + begin + desc["T"] = tf.data_type(lr_) + end + begin + desc["T"] = tf.data_type(rho_) + end + begin + desc["T"] = tf.data_type(epsilon_) + end + begin + desc["T"] = tf.data_type(grad_) + end + begin + desc["Tindices"] = tf.data_type(indices_) + end + res = tf.execute(desc) + node = tf.TapeNode(resource_sparse_apply_adadelta, [var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_, indices_], name=nothing, use_locking=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_sparse_apply_adadelta(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) + if tf.in_eager_mode() + resource_sparse_apply_adadelta_eager(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_, indices_; name=name, use_locking=use_locking) + else + resource_sparse_apply_adadelta_graph(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_, indices_; name=name, use_locking=use_locking) + end end - end + end end @@ -9053,41 +16429,81 @@ end """ begin - function tile_graph(input_, multiples_; name=nothing) - local desc - tf.with_op_name(name, "Tile") do - desc = tf.NodeDescription("Tile") - input_ = convert(Tensor{Any}, input_) - multiples_ = convert(Tensor{Int32}, multiples_) - (input_,) = tf.tf_promote(input_) - (multiples_,) = tf.tf_promote(multiples_) - tf.add_input(desc, input_) - tf.add_input(desc, multiples_) + begin + function tile_graph(input_, multiples_; name=nothing) + local desc + tf.with_op_name(name, "Tile") do + desc = tf.NodeDescription("Tile") + begin + begin + input_ = convert(Tensor{Any}, input_) + begin + end + end + begin + multiples_ = convert(Tensor{Int32}, multiples_) + begin + end + end + begin + (input_,) = tf.tf_promote(input_) + end + begin + (multiples_,) = tf.tf_promote(multiples_) + end + end + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, multiples_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function tile_eager(input_, multiples_; name=nothing) - desc = tf.EagerOp("Tile") - input_ = convert(tf.EagerTensor, input_) - multiples_ = convert(tf.EagerTensor, multiples_) - tf.add_input(desc, input_) - tf.add_input(desc, multiples_) - desc["T"] = tf.data_type(input_) - desc["Tmultiples"] = tf.data_type(multiples_) - res = tf.execute(desc) - node = tf.TapeNode(tile, [input_, multiples_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function tile_eager(input_, multiples_; name=nothing) + desc = tf.EagerOp("Tile") + input_ = convert(tf.EagerTensor, input_) + multiples_ = convert(tf.EagerTensor, multiples_) + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, multiples_) + end + end + begin + end + begin + desc["T"] = tf.data_type(input_) + end + begin + desc["Tmultiples"] = tf.data_type(multiples_) + end + res = tf.execute(desc) + node = tf.TapeNode(tile, [input_, multiples_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tile(input_, multiples_; name=nothing) - if tf.in_eager_mode() - tile_eager(input_, multiples_; name=name) - else - tile_graph(input_, multiples_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tile(input_, multiples_; name=nothing) + if tf.in_eager_mode() + tile_eager(input_, multiples_; name=name) + else + tile_graph(input_, multiples_; name=name) + end end - end + end end @@ -9097,41 +16513,65 @@ end """ begin - function mutex_v2_graph(; name=nothing, container=nothing, shared_name=nothing) - local desc - tf.with_op_name(name, "MutexV2") do - desc = tf.NodeDescription("MutexV2") - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) + begin + function mutex_v2_graph(; name=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "MutexV2") do + desc = tf.NodeDescription("MutexV2") + begin + end + begin + end + begin + begin + if container !== nothing + desc["container"] = Base.String(container) + end + end + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + end end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function mutex_v2_eager(; name=nothing, container=nothing, shared_name=nothing) - desc = tf.EagerOp("MutexV2") - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - res = tf.execute(desc) - node = tf.TapeNode(mutex_v2, [], name=nothing, container=nothing, shared_name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function mutex_v2_eager(; name=nothing, container=nothing, shared_name=nothing) + desc = tf.EagerOp("MutexV2") + begin + end + begin + begin + if container !== nothing + desc["container"] = Base.String(container) + end + end + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(mutex_v2, [], name=nothing, container=nothing, shared_name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function mutex_v2(; name=nothing, container=nothing, shared_name=nothing) - if tf.in_eager_mode() - mutex_v2_eager(; name=name, container=container, shared_name=shared_name) - else - mutex_v2_graph(; name=name, container=container, shared_name=shared_name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function mutex_v2(; name=nothing, container=nothing, shared_name=nothing) + if tf.in_eager_mode() + mutex_v2_eager(; name=name, container=container, shared_name=shared_name) + else + mutex_v2_graph(; name=name, container=container, shared_name=shared_name) + end end - end + end end @@ -9141,49 +16581,97 @@ end """ begin - function serialize_many_sparse_graph(sparse_indices_, sparse_values_, sparse_shape_; name=nothing, out_type=nothing) - local desc - tf.with_op_name(name, "SerializeManySparse") do - desc = tf.NodeDescription("SerializeManySparse") - sparse_indices_ = convert(Tensor{Int64}, sparse_indices_) - sparse_values_ = convert(Tensor{Any}, sparse_values_) - sparse_shape_ = convert(Tensor{Int64}, sparse_shape_) - (sparse_values_,) = tf.tf_promote(sparse_values_) - tf.add_input(desc, sparse_indices_) - tf.add_input(desc, sparse_values_) - tf.add_input(desc, sparse_shape_) - if out_type !== nothing - desc["out_type"] = Base.identity(out_type) + begin + function serialize_many_sparse_graph(sparse_indices_, sparse_values_, sparse_shape_; name=nothing, out_type=nothing) + local desc + tf.with_op_name(name, "SerializeManySparse") do + desc = tf.NodeDescription("SerializeManySparse") + begin + begin + sparse_indices_ = convert(Tensor{Int64}, sparse_indices_) + begin + end + end + begin + sparse_values_ = convert(Tensor{Any}, sparse_values_) + begin + end + end + begin + sparse_shape_ = convert(Tensor{Int64}, sparse_shape_) + begin + end + end + begin + (sparse_values_,) = tf.tf_promote(sparse_values_) + end + end + begin + begin + tf.add_input(desc, sparse_indices_) + end + begin + tf.add_input(desc, sparse_values_) + end + begin + tf.add_input(desc, sparse_shape_) + end + end + begin + begin + if out_type !== nothing + desc["out_type"] = Base.identity(out_type) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function serialize_many_sparse_eager(sparse_indices_, sparse_values_, sparse_shape_; name=nothing, out_type=nothing) + desc = tf.EagerOp("SerializeManySparse") + sparse_indices_ = convert(tf.EagerTensor, sparse_indices_) + sparse_values_ = convert(tf.EagerTensor, sparse_values_) + sparse_shape_ = convert(tf.EagerTensor, sparse_shape_) + begin + begin + tf.add_input(desc, sparse_indices_) + end + begin + tf.add_input(desc, sparse_values_) + end + begin + tf.add_input(desc, sparse_shape_) + end + end + begin + begin + if out_type !== nothing + desc["out_type"] = Base.identity(out_type) + end + end + end + begin + desc["T"] = tf.data_type(sparse_values_) + end + res = tf.execute(desc) + node = tf.TapeNode(serialize_many_sparse, [sparse_indices_, sparse_values_, sparse_shape_], name=nothing, out_type=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function serialize_many_sparse(sparse_indices_, sparse_values_, sparse_shape_; name=nothing, out_type=nothing) + if tf.in_eager_mode() + serialize_many_sparse_eager(sparse_indices_, sparse_values_, sparse_shape_; name=name, out_type=out_type) + else + serialize_many_sparse_graph(sparse_indices_, sparse_values_, sparse_shape_; name=name, out_type=out_type) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function serialize_many_sparse_eager(sparse_indices_, sparse_values_, sparse_shape_; name=nothing, out_type=nothing) - desc = tf.EagerOp("SerializeManySparse") - sparse_indices_ = convert(tf.EagerTensor, sparse_indices_) - sparse_values_ = convert(tf.EagerTensor, sparse_values_) - sparse_shape_ = convert(tf.EagerTensor, sparse_shape_) - tf.add_input(desc, sparse_indices_) - tf.add_input(desc, sparse_values_) - tf.add_input(desc, sparse_shape_) - if out_type !== nothing - desc["out_type"] = Base.identity(out_type) - end - desc["T"] = tf.data_type(sparse_values_) - res = tf.execute(desc) - node = tf.TapeNode(serialize_many_sparse, [sparse_indices_, sparse_values_, sparse_shape_], name=nothing, out_type=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function serialize_many_sparse(sparse_indices_, sparse_values_, sparse_shape_; name=nothing, out_type=nothing) - if tf.in_eager_mode() - serialize_many_sparse_eager(sparse_indices_, sparse_values_, sparse_shape_; name=name, out_type=out_type) - else - serialize_many_sparse_graph(sparse_indices_, sparse_values_, sparse_shape_; name=name, out_type=out_type) - end - end end @@ -9193,49 +16681,89 @@ end An op enabling differentiation of TPU Embeddings. """ begin - function tpu_embedding_activations_graph(embedding_variable_, sliced_activations_; name=nothing, table_id=nothing, lookup_id=nothing) - local desc - tf.with_op_name(name, "TPUEmbeddingActivations") do - desc = tf.NodeDescription("TPUEmbeddingActivations") - embedding_variable_ = convert(Tensor{Float32}, embedding_variable_) - sliced_activations_ = convert(Tensor{Float32}, sliced_activations_) - tf.add_input(desc, embedding_variable_) - tf.add_input(desc, sliced_activations_) - if table_id !== nothing - desc["table_id"] = Base.Int(table_id) + begin + function tpu_embedding_activations_graph(embedding_variable_, sliced_activations_; name=nothing, table_id=nothing, lookup_id=nothing) + local desc + tf.with_op_name(name, "TPUEmbeddingActivations") do + desc = tf.NodeDescription("TPUEmbeddingActivations") + begin + begin + embedding_variable_ = convert(Tensor{Float32}, embedding_variable_) + begin + end + end + begin + sliced_activations_ = convert(Tensor{Float32}, sliced_activations_) + begin + end + end + end + begin + begin + tf.add_input(desc, embedding_variable_) + end + begin + tf.add_input(desc, sliced_activations_) + end + end + begin + begin + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + end + begin + if lookup_id !== nothing + desc["lookup_id"] = Base.Int(lookup_id) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function tpu_embedding_activations_eager(embedding_variable_, sliced_activations_; name=nothing, table_id=nothing, lookup_id=nothing) + desc = tf.EagerOp("TPUEmbeddingActivations") + embedding_variable_ = convert(tf.EagerTensor, embedding_variable_) + sliced_activations_ = convert(tf.EagerTensor, sliced_activations_) + begin + begin + tf.add_input(desc, embedding_variable_) + end + begin + tf.add_input(desc, sliced_activations_) + end + end + begin + begin + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + end + begin + if lookup_id !== nothing + desc["lookup_id"] = Base.Int(lookup_id) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(tpu_embedding_activations, [embedding_variable_, sliced_activations_], name=nothing, table_id=nothing, lookup_id=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tpu_embedding_activations(embedding_variable_, sliced_activations_; name=nothing, table_id=nothing, lookup_id=nothing) + if tf.in_eager_mode() + tpu_embedding_activations_eager(embedding_variable_, sliced_activations_; name=name, table_id=table_id, lookup_id=lookup_id) + else + tpu_embedding_activations_graph(embedding_variable_, sliced_activations_; name=name, table_id=table_id, lookup_id=lookup_id) + end end - if lookup_id !== nothing - desc["lookup_id"] = Base.Int(lookup_id) - end - end - tf.Tensor(tf.Operation(desc)) end - function tpu_embedding_activations_eager(embedding_variable_, sliced_activations_; name=nothing, table_id=nothing, lookup_id=nothing) - desc = tf.EagerOp("TPUEmbeddingActivations") - embedding_variable_ = convert(tf.EagerTensor, embedding_variable_) - sliced_activations_ = convert(tf.EagerTensor, sliced_activations_) - tf.add_input(desc, embedding_variable_) - tf.add_input(desc, sliced_activations_) - if table_id !== nothing - desc["table_id"] = Base.Int(table_id) - end - if lookup_id !== nothing - desc["lookup_id"] = Base.Int(lookup_id) - end - res = tf.execute(desc) - node = tf.TapeNode(tpu_embedding_activations, [embedding_variable_, sliced_activations_], name=nothing, table_id=nothing, lookup_id=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tpu_embedding_activations(embedding_variable_, sliced_activations_; name=nothing, table_id=nothing, lookup_id=nothing) - if tf.in_eager_mode() - tpu_embedding_activations_eager(embedding_variable_, sliced_activations_; name=name, table_id=table_id, lookup_id=lookup_id) - else - tpu_embedding_activations_graph(embedding_variable_, sliced_activations_; name=name, table_id=table_id, lookup_id=lookup_id) - end - end end @@ -9245,50 +16773,100 @@ end """ begin - function batch_matrix_solve_ls_graph(matrix_, rhs_, l2_regularizer_; name=nothing, fast=nothing) - local desc - tf.with_op_name(name, "BatchMatrixSolveLs") do - desc = tf.NodeDescription("BatchMatrixSolveLs") - matrix_ = convert(Tensor{Any}, matrix_) - rhs_ = convert(Tensor{Any}, rhs_) - l2_regularizer_ = convert(Tensor{Float64}, l2_regularizer_) - (matrix_, rhs_) = tf.tf_promote(matrix_, rhs_) - tf.add_input(desc, matrix_) - tf.add_input(desc, rhs_) - tf.add_input(desc, l2_regularizer_) - if fast !== nothing - desc["fast"] = Base.Bool(fast) + begin + function batch_matrix_solve_ls_graph(matrix_, rhs_, l2_regularizer_; name=nothing, fast=nothing) + local desc + tf.with_op_name(name, "BatchMatrixSolveLs") do + desc = tf.NodeDescription("BatchMatrixSolveLs") + begin + begin + matrix_ = convert(Tensor{Any}, matrix_) + begin + end + end + begin + rhs_ = convert(Tensor{Any}, rhs_) + begin + end + end + begin + l2_regularizer_ = convert(Tensor{Float64}, l2_regularizer_) + begin + end + end + begin + (matrix_, rhs_) = tf.tf_promote(matrix_, rhs_) + end + end + begin + begin + tf.add_input(desc, matrix_) + end + begin + tf.add_input(desc, rhs_) + end + begin + tf.add_input(desc, l2_regularizer_) + end + end + begin + begin + if fast !== nothing + desc["fast"] = Base.Bool(fast) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function batch_matrix_solve_ls_eager(matrix_, rhs_, l2_regularizer_; name=nothing, fast=nothing) + desc = tf.EagerOp("BatchMatrixSolveLs") + matrix_ = convert(tf.EagerTensor, matrix_) + rhs_ = convert(tf.EagerTensor, rhs_) + l2_regularizer_ = convert(tf.EagerTensor, l2_regularizer_) + begin + begin + tf.add_input(desc, matrix_) + end + begin + tf.add_input(desc, rhs_) + end + begin + tf.add_input(desc, l2_regularizer_) + end + end + begin + begin + if fast !== nothing + desc["fast"] = Base.Bool(fast) + end + end + end + begin + desc["T"] = tf.data_type(matrix_) + end + begin + desc["T"] = tf.data_type(rhs_) + end + res = tf.execute(desc) + node = tf.TapeNode(batch_matrix_solve_ls, [matrix_, rhs_, l2_regularizer_], name=nothing, fast=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function batch_matrix_solve_ls(matrix_, rhs_, l2_regularizer_; name=nothing, fast=nothing) + if tf.in_eager_mode() + batch_matrix_solve_ls_eager(matrix_, rhs_, l2_regularizer_; name=name, fast=fast) + else + batch_matrix_solve_ls_graph(matrix_, rhs_, l2_regularizer_; name=name, fast=fast) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function batch_matrix_solve_ls_eager(matrix_, rhs_, l2_regularizer_; name=nothing, fast=nothing) - desc = tf.EagerOp("BatchMatrixSolveLs") - matrix_ = convert(tf.EagerTensor, matrix_) - rhs_ = convert(tf.EagerTensor, rhs_) - l2_regularizer_ = convert(tf.EagerTensor, l2_regularizer_) - tf.add_input(desc, matrix_) - tf.add_input(desc, rhs_) - tf.add_input(desc, l2_regularizer_) - if fast !== nothing - desc["fast"] = Base.Bool(fast) - end - desc["T"] = tf.data_type(matrix_) - desc["T"] = tf.data_type(rhs_) - res = tf.execute(desc) - node = tf.TapeNode(batch_matrix_solve_ls, [matrix_, rhs_, l2_regularizer_], name=nothing, fast=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function batch_matrix_solve_ls(matrix_, rhs_, l2_regularizer_; name=nothing, fast=nothing) - if tf.in_eager_mode() - batch_matrix_solve_ls_eager(matrix_, rhs_, l2_regularizer_; name=name, fast=fast) - else - batch_matrix_solve_ls_graph(matrix_, rhs_, l2_regularizer_; name=name, fast=fast) - end - end end @@ -9298,40 +16876,78 @@ end """ begin - function not_equal_graph(x_, y_; name=nothing) - local desc - tf.with_op_name(name, "NotEqual") do - desc = tf.NodeDescription("NotEqual") - x_ = convert(Tensor{Any}, x_) - y_ = convert(Tensor{Any}, y_) - (x_, y_) = tf.tf_promote(x_, y_) - tf.add_input(desc, x_) - tf.add_input(desc, y_) + begin + function not_equal_graph(x_, y_; name=nothing) + local desc + tf.with_op_name(name, "NotEqual") do + desc = tf.NodeDescription("NotEqual") + begin + begin + x_ = convert(Tensor{Any}, x_) + begin + end + end + begin + y_ = convert(Tensor{Any}, y_) + begin + end + end + begin + (x_, y_) = tf.tf_promote(x_, y_) + end + end + begin + begin + tf.add_input(desc, x_) + end + begin + tf.add_input(desc, y_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function not_equal_eager(x_, y_; name=nothing) - desc = tf.EagerOp("NotEqual") - x_ = convert(tf.EagerTensor, x_) - y_ = convert(tf.EagerTensor, y_) - tf.add_input(desc, x_) - tf.add_input(desc, y_) - desc["T"] = tf.data_type(x_) - desc["T"] = tf.data_type(y_) - res = tf.execute(desc) - node = tf.TapeNode(not_equal, [x_, y_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function not_equal_eager(x_, y_; name=nothing) + desc = tf.EagerOp("NotEqual") + x_ = convert(tf.EagerTensor, x_) + y_ = convert(tf.EagerTensor, y_) + begin + begin + tf.add_input(desc, x_) + end + begin + tf.add_input(desc, y_) + end + end + begin + end + begin + desc["T"] = tf.data_type(x_) + end + begin + desc["T"] = tf.data_type(y_) + end + res = tf.execute(desc) + node = tf.TapeNode(not_equal, [x_, y_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function not_equal(x_, y_; name=nothing) - if tf.in_eager_mode() - not_equal_eager(x_, y_; name=name) - else - not_equal_graph(x_, y_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function not_equal(x_, y_; name=nothing) + if tf.in_eager_mode() + not_equal_eager(x_, y_; name=name) + else + not_equal_graph(x_, y_; name=name) + end end - end + end end @@ -9341,35 +16957,63 @@ end """ begin - function lgamma_graph(x_; name=nothing) - local desc - tf.with_op_name(name, "Lgamma") do - desc = tf.NodeDescription("Lgamma") - x_ = convert(Tensor{Any}, x_) - (x_,) = tf.tf_promote(x_) - tf.add_input(desc, x_) + begin + function lgamma_graph(x_; name=nothing) + local desc + tf.with_op_name(name, "Lgamma") do + desc = tf.NodeDescription("Lgamma") + begin + begin + x_ = convert(Tensor{Any}, x_) + begin + end + end + begin + (x_,) = tf.tf_promote(x_) + end + end + begin + begin + tf.add_input(desc, x_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function lgamma_eager(x_; name=nothing) - desc = tf.EagerOp("Lgamma") - x_ = convert(tf.EagerTensor, x_) - tf.add_input(desc, x_) - desc["T"] = tf.data_type(x_) - res = tf.execute(desc) - node = tf.TapeNode(lgamma, [x_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function lgamma_eager(x_; name=nothing) + desc = tf.EagerOp("Lgamma") + x_ = convert(tf.EagerTensor, x_) + begin + begin + tf.add_input(desc, x_) + end + end + begin + end + begin + desc["T"] = tf.data_type(x_) + end + res = tf.execute(desc) + node = tf.TapeNode(lgamma, [x_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function lgamma(x_; name=nothing) - if tf.in_eager_mode() - lgamma_eager(x_; name=name) - else - lgamma_graph(x_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function lgamma(x_; name=nothing) + if tf.in_eager_mode() + lgamma_eager(x_; name=name) + else + lgamma_graph(x_; name=name) + end end - end + end end @@ -9379,71 +17023,115 @@ end """ begin - function tpu_replicate_metadata_graph(; name=nothing, num_replicas=nothing, num_cores_per_replica=nothing, topology=nothing, use_tpu=nothing, device_assignment=nothing, computation_shape=nothing, host_compute_core=nothing) - local desc - tf.with_op_name(name, "TPUReplicateMetadata") do - desc = tf.NodeDescription("TPUReplicateMetadata") - if num_replicas !== nothing - desc["num_replicas"] = Base.Int(num_replicas) - end - if num_cores_per_replica !== nothing - desc["num_cores_per_replica"] = Base.Int(num_cores_per_replica) - end - if topology !== nothing - desc["topology"] = Base.String(topology) - end - if use_tpu !== nothing - desc["use_tpu"] = Base.Bool(use_tpu) - end - if device_assignment !== nothing - desc["device_assignment"] = map(Base.identity, device_assignment) - end - if computation_shape !== nothing - desc["computation_shape"] = map(Base.identity, computation_shape) + begin + function tpu_replicate_metadata_graph(; name=nothing, num_replicas=nothing, num_cores_per_replica=nothing, topology=nothing, use_tpu=nothing, device_assignment=nothing, computation_shape=nothing, host_compute_core=nothing) + local desc + tf.with_op_name(name, "TPUReplicateMetadata") do + desc = tf.NodeDescription("TPUReplicateMetadata") + begin + end + begin + end + begin + begin + if num_replicas !== nothing + desc["num_replicas"] = Base.Int(num_replicas) + end + end + begin + if num_cores_per_replica !== nothing + desc["num_cores_per_replica"] = Base.Int(num_cores_per_replica) + end + end + begin + if topology !== nothing + desc["topology"] = Base.String(topology) + end + end + begin + if use_tpu !== nothing + desc["use_tpu"] = Base.Bool(use_tpu) + end + end + begin + if device_assignment !== nothing + desc["device_assignment"] = map(Base.identity, device_assignment) + end + end + begin + if computation_shape !== nothing + desc["computation_shape"] = map(Base.identity, computation_shape) + end + end + begin + if host_compute_core !== nothing + desc["host_compute_core"] = map(Base.identity, host_compute_core) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function tpu_replicate_metadata_eager(; name=nothing, num_replicas=nothing, num_cores_per_replica=nothing, topology=nothing, use_tpu=nothing, device_assignment=nothing, computation_shape=nothing, host_compute_core=nothing) + desc = tf.EagerOp("TPUReplicateMetadata") + begin + end + begin + begin + if num_replicas !== nothing + desc["num_replicas"] = Base.Int(num_replicas) + end + end + begin + if num_cores_per_replica !== nothing + desc["num_cores_per_replica"] = Base.Int(num_cores_per_replica) + end + end + begin + if topology !== nothing + desc["topology"] = Base.String(topology) + end + end + begin + if use_tpu !== nothing + desc["use_tpu"] = Base.Bool(use_tpu) + end + end + begin + if device_assignment !== nothing + desc["device_assignment"] = map(Base.identity, device_assignment) + end + end + begin + if computation_shape !== nothing + desc["computation_shape"] = map(Base.identity, computation_shape) + end + end + begin + if host_compute_core !== nothing + desc["host_compute_core"] = map(Base.identity, host_compute_core) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(tpu_replicate_metadata, [], name=nothing, num_replicas=nothing, num_cores_per_replica=nothing, topology=nothing, use_tpu=nothing, device_assignment=nothing, computation_shape=nothing, host_compute_core=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tpu_replicate_metadata(; name=nothing, num_replicas=nothing, num_cores_per_replica=nothing, topology=nothing, use_tpu=nothing, device_assignment=nothing, computation_shape=nothing, host_compute_core=nothing) + if tf.in_eager_mode() + tpu_replicate_metadata_eager(; name=name, num_replicas=num_replicas, num_cores_per_replica=num_cores_per_replica, topology=topology, use_tpu=use_tpu, device_assignment=device_assignment, computation_shape=computation_shape, host_compute_core=host_compute_core) + else + tpu_replicate_metadata_graph(; name=name, num_replicas=num_replicas, num_cores_per_replica=num_cores_per_replica, topology=topology, use_tpu=use_tpu, device_assignment=device_assignment, computation_shape=computation_shape, host_compute_core=host_compute_core) + end end - if host_compute_core !== nothing - desc["host_compute_core"] = map(Base.identity, host_compute_core) - end - end - tf.Tensor(tf.Operation(desc)) - end - function tpu_replicate_metadata_eager(; name=nothing, num_replicas=nothing, num_cores_per_replica=nothing, topology=nothing, use_tpu=nothing, device_assignment=nothing, computation_shape=nothing, host_compute_core=nothing) - desc = tf.EagerOp("TPUReplicateMetadata") - if num_replicas !== nothing - desc["num_replicas"] = Base.Int(num_replicas) - end - if num_cores_per_replica !== nothing - desc["num_cores_per_replica"] = Base.Int(num_cores_per_replica) - end - if topology !== nothing - desc["topology"] = Base.String(topology) - end - if use_tpu !== nothing - desc["use_tpu"] = Base.Bool(use_tpu) - end - if device_assignment !== nothing - desc["device_assignment"] = map(Base.identity, device_assignment) - end - if computation_shape !== nothing - desc["computation_shape"] = map(Base.identity, computation_shape) - end - if host_compute_core !== nothing - desc["host_compute_core"] = map(Base.identity, host_compute_core) - end - res = tf.execute(desc) - node = tf.TapeNode(tpu_replicate_metadata, [], name=nothing, num_replicas=nothing, num_cores_per_replica=nothing, topology=nothing, use_tpu=nothing, device_assignment=nothing, computation_shape=nothing, host_compute_core=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tpu_replicate_metadata(; name=nothing, num_replicas=nothing, num_cores_per_replica=nothing, topology=nothing, use_tpu=nothing, device_assignment=nothing, computation_shape=nothing, host_compute_core=nothing) - if tf.in_eager_mode() - tpu_replicate_metadata_eager(; name=name, num_replicas=num_replicas, num_cores_per_replica=num_cores_per_replica, topology=topology, use_tpu=use_tpu, device_assignment=device_assignment, computation_shape=computation_shape, host_compute_core=host_compute_core) - else - tpu_replicate_metadata_graph(; name=name, num_replicas=num_replicas, num_cores_per_replica=num_cores_per_replica, topology=topology, use_tpu=use_tpu, device_assignment=device_assignment, computation_shape=computation_shape, host_compute_core=host_compute_core) - end - end end @@ -9453,59 +17141,95 @@ end """ begin - function experimental_thread_pool_handle_graph(; name=nothing, num_threads=nothing, max_intra_op_parallelism=nothing, display_name=nothing, container=nothing, shared_name=nothing) - local desc - tf.with_op_name(name, "ExperimentalThreadPoolHandle") do - desc = tf.NodeDescription("ExperimentalThreadPoolHandle") - if num_threads !== nothing - desc["num_threads"] = Base.Int(num_threads) - end - if max_intra_op_parallelism !== nothing - desc["max_intra_op_parallelism"] = Base.Int(max_intra_op_parallelism) - end - if display_name !== nothing - desc["display_name"] = Base.String(display_name) - end - if container !== nothing - desc["container"] = Base.String(container) + begin + function experimental_thread_pool_handle_graph(; name=nothing, num_threads=nothing, max_intra_op_parallelism=nothing, display_name=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "ExperimentalThreadPoolHandle") do + desc = tf.NodeDescription("ExperimentalThreadPoolHandle") + begin + end + begin + end + begin + begin + if num_threads !== nothing + desc["num_threads"] = Base.Int(num_threads) + end + end + begin + if max_intra_op_parallelism !== nothing + desc["max_intra_op_parallelism"] = Base.Int(max_intra_op_parallelism) + end + end + begin + if display_name !== nothing + desc["display_name"] = Base.String(display_name) + end + end + begin + if container !== nothing + desc["container"] = Base.String(container) + end + end + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function experimental_thread_pool_handle_eager(; name=nothing, num_threads=nothing, max_intra_op_parallelism=nothing, display_name=nothing, container=nothing, shared_name=nothing) + desc = tf.EagerOp("ExperimentalThreadPoolHandle") + begin + end + begin + begin + if num_threads !== nothing + desc["num_threads"] = Base.Int(num_threads) + end + end + begin + if max_intra_op_parallelism !== nothing + desc["max_intra_op_parallelism"] = Base.Int(max_intra_op_parallelism) + end + end + begin + if display_name !== nothing + desc["display_name"] = Base.String(display_name) + end + end + begin + if container !== nothing + desc["container"] = Base.String(container) + end + end + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(experimental_thread_pool_handle, [], name=nothing, num_threads=nothing, max_intra_op_parallelism=nothing, display_name=nothing, container=nothing, shared_name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_thread_pool_handle(; name=nothing, num_threads=nothing, max_intra_op_parallelism=nothing, display_name=nothing, container=nothing, shared_name=nothing) + if tf.in_eager_mode() + experimental_thread_pool_handle_eager(; name=name, num_threads=num_threads, max_intra_op_parallelism=max_intra_op_parallelism, display_name=display_name, container=container, shared_name=shared_name) + else + experimental_thread_pool_handle_graph(; name=name, num_threads=num_threads, max_intra_op_parallelism=max_intra_op_parallelism, display_name=display_name, container=container, shared_name=shared_name) + end end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - end - tf.Tensor(tf.Operation(desc)) - end - function experimental_thread_pool_handle_eager(; name=nothing, num_threads=nothing, max_intra_op_parallelism=nothing, display_name=nothing, container=nothing, shared_name=nothing) - desc = tf.EagerOp("ExperimentalThreadPoolHandle") - if num_threads !== nothing - desc["num_threads"] = Base.Int(num_threads) - end - if max_intra_op_parallelism !== nothing - desc["max_intra_op_parallelism"] = Base.Int(max_intra_op_parallelism) - end - if display_name !== nothing - desc["display_name"] = Base.String(display_name) - end - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - res = tf.execute(desc) - node = tf.TapeNode(experimental_thread_pool_handle, [], name=nothing, num_threads=nothing, max_intra_op_parallelism=nothing, display_name=nothing, container=nothing, shared_name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_thread_pool_handle(; name=nothing, num_threads=nothing, max_intra_op_parallelism=nothing, display_name=nothing, container=nothing, shared_name=nothing) - if tf.in_eager_mode() - experimental_thread_pool_handle_eager(; name=name, num_threads=num_threads, max_intra_op_parallelism=max_intra_op_parallelism, display_name=display_name, container=container, shared_name=shared_name) - else - experimental_thread_pool_handle_graph(; name=name, num_threads=num_threads, max_intra_op_parallelism=max_intra_op_parallelism, display_name=display_name, container=container, shared_name=shared_name) - end - end end @@ -9515,35 +17239,63 @@ end """ begin - function self_adjoint_eig_graph(input_; name=nothing) - local desc - tf.with_op_name(name, "SelfAdjointEig") do - desc = tf.NodeDescription("SelfAdjointEig") - input_ = convert(Tensor{Any}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) + begin + function self_adjoint_eig_graph(input_; name=nothing) + local desc + tf.with_op_name(name, "SelfAdjointEig") do + desc = tf.NodeDescription("SelfAdjointEig") + begin + begin + input_ = convert(Tensor{Any}, input_) + begin + end + end + begin + (input_,) = tf.tf_promote(input_) + end + end + begin + begin + tf.add_input(desc, input_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function self_adjoint_eig_eager(input_; name=nothing) - desc = tf.EagerOp("SelfAdjointEig") - input_ = convert(tf.EagerTensor, input_) - tf.add_input(desc, input_) - desc["T"] = tf.data_type(input_) - res = tf.execute(desc) - node = tf.TapeNode(self_adjoint_eig, [input_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function self_adjoint_eig_eager(input_; name=nothing) + desc = tf.EagerOp("SelfAdjointEig") + input_ = convert(tf.EagerTensor, input_) + begin + begin + tf.add_input(desc, input_) + end + end + begin + end + begin + desc["T"] = tf.data_type(input_) + end + res = tf.execute(desc) + node = tf.TapeNode(self_adjoint_eig, [input_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function self_adjoint_eig(input_; name=nothing) - if tf.in_eager_mode() - self_adjoint_eig_eager(input_; name=name) - else - self_adjoint_eig_graph(input_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function self_adjoint_eig(input_; name=nothing) + if tf.in_eager_mode() + self_adjoint_eig_eager(input_; name=name) + else + self_adjoint_eig_graph(input_; name=name) + end end - end + end end @@ -9553,44 +17305,74 @@ end """ begin - function boosted_trees_quantile_stream_resource_get_bucket_boundaries_graph(quantile_stream_resource_handle_; name=nothing, num_features=nothing) - local desc - tf.with_op_name(name, "BoostedTreesQuantileStreamResourceGetBucketBoundaries") do - desc = tf.NodeDescription("BoostedTreesQuantileStreamResourceGetBucketBoundaries") - quantile_stream_resource_handle_ = convert(Tensor{Any}, quantile_stream_resource_handle_) - tf.add_input(desc, quantile_stream_resource_handle_) - if num_features !== nothing - desc["num_features"] = Base.Int(num_features) + begin + function boosted_trees_quantile_stream_resource_get_bucket_boundaries_graph(quantile_stream_resource_handle_; name=nothing, num_features=nothing) + local desc + tf.with_op_name(name, "BoostedTreesQuantileStreamResourceGetBucketBoundaries") do + desc = tf.NodeDescription("BoostedTreesQuantileStreamResourceGetBucketBoundaries") + begin + begin + quantile_stream_resource_handle_ = convert(Tensor{Any}, quantile_stream_resource_handle_) + begin + end + end + end + begin + begin + tf.add_input(desc, quantile_stream_resource_handle_) + end + end + begin + begin + if num_features !== nothing + desc["num_features"] = Base.Int(num_features) + end + end + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:num_features + push!(out, tf.Tensor(op, out_idx)) + end + out end end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:num_features - push!(out, tf.Tensor(op, out_idx)) - end - out end - function boosted_trees_quantile_stream_resource_get_bucket_boundaries_eager(quantile_stream_resource_handle_; name=nothing, num_features=nothing) - desc = tf.EagerOp("BoostedTreesQuantileStreamResourceGetBucketBoundaries") - quantile_stream_resource_handle_ = convert(tf.EagerTensor, quantile_stream_resource_handle_) - tf.add_input(desc, quantile_stream_resource_handle_) - if num_features !== nothing - desc["num_features"] = Base.Int(num_features) - end - res = tf.execute(desc) - node = tf.TapeNode(boosted_trees_quantile_stream_resource_get_bucket_boundaries, [quantile_stream_resource_handle_], name=nothing, num_features=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res + begin + function boosted_trees_quantile_stream_resource_get_bucket_boundaries_eager(quantile_stream_resource_handle_; name=nothing, num_features=nothing) + desc = tf.EagerOp("BoostedTreesQuantileStreamResourceGetBucketBoundaries") + quantile_stream_resource_handle_ = convert(tf.EagerTensor, quantile_stream_resource_handle_) + begin + begin + tf.add_input(desc, quantile_stream_resource_handle_) + end + end + begin + begin + if num_features !== nothing + desc["num_features"] = Base.Int(num_features) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(boosted_trees_quantile_stream_resource_get_bucket_boundaries, [quantile_stream_resource_handle_], name=nothing, num_features=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function boosted_trees_quantile_stream_resource_get_bucket_boundaries(quantile_stream_resource_handle_; name=nothing, num_features=nothing) - if tf.in_eager_mode() - boosted_trees_quantile_stream_resource_get_bucket_boundaries_eager(quantile_stream_resource_handle_; name=name, num_features=num_features) - else - boosted_trees_quantile_stream_resource_get_bucket_boundaries_graph(quantile_stream_resource_handle_; name=name, num_features=num_features) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function boosted_trees_quantile_stream_resource_get_bucket_boundaries(quantile_stream_resource_handle_; name=nothing, num_features=nothing) + if tf.in_eager_mode() + boosted_trees_quantile_stream_resource_get_bucket_boundaries_eager(quantile_stream_resource_handle_; name=name, num_features=num_features) + else + boosted_trees_quantile_stream_resource_get_bucket_boundaries_graph(quantile_stream_resource_handle_; name=name, num_features=num_features) + end end - end + end end @@ -9600,48 +17382,102 @@ end """ begin - function sparse_dense_cwise_div_graph(sp_indices_, sp_values_, sp_shape_, dense_; name=nothing) - local desc - tf.with_op_name(name, "SparseDenseCwiseDiv") do - desc = tf.NodeDescription("SparseDenseCwiseDiv") - sp_indices_ = convert(Tensor{Int64}, sp_indices_) - sp_values_ = convert(Tensor{Any}, sp_values_) - sp_shape_ = convert(Tensor{Int64}, sp_shape_) - dense_ = convert(Tensor{Any}, dense_) - (sp_values_, dense_) = tf.tf_promote(sp_values_, dense_) - tf.add_input(desc, sp_indices_) - tf.add_input(desc, sp_values_) - tf.add_input(desc, sp_shape_) - tf.add_input(desc, dense_) - end - tf.Tensor(tf.Operation(desc)) - end - function sparse_dense_cwise_div_eager(sp_indices_, sp_values_, sp_shape_, dense_; name=nothing) - desc = tf.EagerOp("SparseDenseCwiseDiv") - sp_indices_ = convert(tf.EagerTensor, sp_indices_) - sp_values_ = convert(tf.EagerTensor, sp_values_) - sp_shape_ = convert(tf.EagerTensor, sp_shape_) - dense_ = convert(tf.EagerTensor, dense_) - tf.add_input(desc, sp_indices_) - tf.add_input(desc, sp_values_) - tf.add_input(desc, sp_shape_) - tf.add_input(desc, dense_) - desc["T"] = tf.data_type(sp_values_) - desc["T"] = tf.data_type(dense_) - res = tf.execute(desc) - node = tf.TapeNode(sparse_dense_cwise_div, [sp_indices_, sp_values_, sp_shape_, dense_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_dense_cwise_div(sp_indices_, sp_values_, sp_shape_, dense_; name=nothing) - if tf.in_eager_mode() - sparse_dense_cwise_div_eager(sp_indices_, sp_values_, sp_shape_, dense_; name=name) - else - sparse_dense_cwise_div_graph(sp_indices_, sp_values_, sp_shape_, dense_; name=name) + begin + function sparse_dense_cwise_div_graph(sp_indices_, sp_values_, sp_shape_, dense_; name=nothing) + local desc + tf.with_op_name(name, "SparseDenseCwiseDiv") do + desc = tf.NodeDescription("SparseDenseCwiseDiv") + begin + begin + sp_indices_ = convert(Tensor{Int64}, sp_indices_) + begin + end + end + begin + sp_values_ = convert(Tensor{Any}, sp_values_) + begin + end + end + begin + sp_shape_ = convert(Tensor{Int64}, sp_shape_) + begin + end + end + begin + dense_ = convert(Tensor{Any}, dense_) + begin + end + end + begin + (sp_values_, dense_) = tf.tf_promote(sp_values_, dense_) + end + end + begin + begin + tf.add_input(desc, sp_indices_) + end + begin + tf.add_input(desc, sp_values_) + end + begin + tf.add_input(desc, sp_shape_) + end + begin + tf.add_input(desc, dense_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function sparse_dense_cwise_div_eager(sp_indices_, sp_values_, sp_shape_, dense_; name=nothing) + desc = tf.EagerOp("SparseDenseCwiseDiv") + sp_indices_ = convert(tf.EagerTensor, sp_indices_) + sp_values_ = convert(tf.EagerTensor, sp_values_) + sp_shape_ = convert(tf.EagerTensor, sp_shape_) + dense_ = convert(tf.EagerTensor, dense_) + begin + begin + tf.add_input(desc, sp_indices_) + end + begin + tf.add_input(desc, sp_values_) + end + begin + tf.add_input(desc, sp_shape_) + end + begin + tf.add_input(desc, dense_) + end + end + begin + end + begin + desc["T"] = tf.data_type(sp_values_) + end + begin + desc["T"] = tf.data_type(dense_) + end + res = tf.execute(desc) + node = tf.TapeNode(sparse_dense_cwise_div, [sp_indices_, sp_values_, sp_shape_, dense_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_dense_cwise_div(sp_indices_, sp_values_, sp_shape_, dense_; name=nothing) + if tf.in_eager_mode() + sparse_dense_cwise_div_eager(sp_indices_, sp_values_, sp_shape_, dense_; name=name) + else + sparse_dense_cwise_div_graph(sp_indices_, sp_values_, sp_shape_, dense_; name=name) + end end - end + end end @@ -9651,35 +17487,63 @@ end """ begin - function acos_graph(x_; name=nothing) - local desc - tf.with_op_name(name, "Acos") do - desc = tf.NodeDescription("Acos") - x_ = convert(Tensor{Any}, x_) - (x_,) = tf.tf_promote(x_) - tf.add_input(desc, x_) + begin + function acos_graph(x_; name=nothing) + local desc + tf.with_op_name(name, "Acos") do + desc = tf.NodeDescription("Acos") + begin + begin + x_ = convert(Tensor{Any}, x_) + begin + end + end + begin + (x_,) = tf.tf_promote(x_) + end + end + begin + begin + tf.add_input(desc, x_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function acos_eager(x_; name=nothing) - desc = tf.EagerOp("Acos") - x_ = convert(tf.EagerTensor, x_) - tf.add_input(desc, x_) - desc["T"] = tf.data_type(x_) - res = tf.execute(desc) - node = tf.TapeNode(acos, [x_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function acos_eager(x_; name=nothing) + desc = tf.EagerOp("Acos") + x_ = convert(tf.EagerTensor, x_) + begin + begin + tf.add_input(desc, x_) + end + end + begin + end + begin + desc["T"] = tf.data_type(x_) + end + res = tf.execute(desc) + node = tf.TapeNode(acos, [x_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function acos(x_; name=nothing) - if tf.in_eager_mode() - acos_eager(x_; name=name) - else - acos_graph(x_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function acos(x_; name=nothing) + if tf.in_eager_mode() + acos_eager(x_; name=name) + else + acos_graph(x_; name=name) + end end - end + end end @@ -9689,46 +17553,86 @@ end """ begin - function all_graph(input_, reduction_indices_; name=nothing, keep_dims=nothing) - local desc - tf.with_op_name(name, "All") do - desc = tf.NodeDescription("All") - input_ = convert(Tensor{Bool}, input_) - reduction_indices_ = convert(Tensor{Int32}, reduction_indices_) - reduction_indices_ = reduction_indices_ - convert(tf.Tensor{eltype(reduction_indices_)}, 1) - (reduction_indices_,) = tf.tf_promote(reduction_indices_) - tf.add_input(desc, input_) - tf.add_input(desc, reduction_indices_) - if keep_dims !== nothing - desc["keep_dims"] = Base.Bool(keep_dims) + begin + function all_graph(input_, reduction_indices_; name=nothing, keep_dims=nothing) + local desc + tf.with_op_name(name, "All") do + desc = tf.NodeDescription("All") + begin + begin + input_ = convert(Tensor{Bool}, input_) + begin + end + end + begin + reduction_indices_ = convert(Tensor{Int32}, reduction_indices_) + begin + reduction_indices_ = reduction_indices_ - convert(tf.Tensor{eltype(reduction_indices_)}, 1) + end + end + begin + (reduction_indices_,) = tf.tf_promote(reduction_indices_) + end + end + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, reduction_indices_) + end + end + begin + begin + if keep_dims !== nothing + desc["keep_dims"] = Base.Bool(keep_dims) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function all_eager(input_, reduction_indices_; name=nothing, keep_dims=nothing) + desc = tf.EagerOp("All") + input_ = convert(tf.EagerTensor, input_) + reduction_indices_ = convert(tf.EagerTensor, reduction_indices_) + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, reduction_indices_) + end + end + begin + begin + if keep_dims !== nothing + desc["keep_dims"] = Base.Bool(keep_dims) + end + end + end + begin + desc["Tidx"] = tf.data_type(reduction_indices_) + end + res = tf.execute(desc) + node = tf.TapeNode(all, [input_, reduction_indices_], name=nothing, keep_dims=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function all(input_, reduction_indices_; name=nothing, keep_dims=nothing) + if tf.in_eager_mode() + all_eager(input_, reduction_indices_; name=name, keep_dims=keep_dims) + else + all_graph(input_, reduction_indices_; name=name, keep_dims=keep_dims) + end end - end - tf.Tensor(tf.Operation(desc)) end - function all_eager(input_, reduction_indices_; name=nothing, keep_dims=nothing) - desc = tf.EagerOp("All") - input_ = convert(tf.EagerTensor, input_) - reduction_indices_ = convert(tf.EagerTensor, reduction_indices_) - tf.add_input(desc, input_) - tf.add_input(desc, reduction_indices_) - if keep_dims !== nothing - desc["keep_dims"] = Base.Bool(keep_dims) - end - desc["Tidx"] = tf.data_type(reduction_indices_) - res = tf.execute(desc) - node = tf.TapeNode(all, [input_, reduction_indices_], name=nothing, keep_dims=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function all(input_, reduction_indices_; name=nothing, keep_dims=nothing) - if tf.in_eager_mode() - all_eager(input_, reduction_indices_; name=name, keep_dims=keep_dims) - else - all_graph(input_, reduction_indices_; name=name, keep_dims=keep_dims) - end - end end @@ -9738,40 +17642,78 @@ end """ begin - function compare_and_bitpack_graph(input_, threshold_; name=nothing) - local desc - tf.with_op_name(name, "CompareAndBitpack") do - desc = tf.NodeDescription("CompareAndBitpack") - input_ = convert(Tensor{Any}, input_) - threshold_ = convert(Tensor{Any}, threshold_) - (input_, threshold_) = tf.tf_promote(input_, threshold_) - tf.add_input(desc, input_) - tf.add_input(desc, threshold_) + begin + function compare_and_bitpack_graph(input_, threshold_; name=nothing) + local desc + tf.with_op_name(name, "CompareAndBitpack") do + desc = tf.NodeDescription("CompareAndBitpack") + begin + begin + input_ = convert(Tensor{Any}, input_) + begin + end + end + begin + threshold_ = convert(Tensor{Any}, threshold_) + begin + end + end + begin + (input_, threshold_) = tf.tf_promote(input_, threshold_) + end + end + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, threshold_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function compare_and_bitpack_eager(input_, threshold_; name=nothing) - desc = tf.EagerOp("CompareAndBitpack") - input_ = convert(tf.EagerTensor, input_) - threshold_ = convert(tf.EagerTensor, threshold_) - tf.add_input(desc, input_) - tf.add_input(desc, threshold_) - desc["T"] = tf.data_type(input_) - desc["T"] = tf.data_type(threshold_) - res = tf.execute(desc) - node = tf.TapeNode(compare_and_bitpack, [input_, threshold_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function compare_and_bitpack_eager(input_, threshold_; name=nothing) + desc = tf.EagerOp("CompareAndBitpack") + input_ = convert(tf.EagerTensor, input_) + threshold_ = convert(tf.EagerTensor, threshold_) + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, threshold_) + end + end + begin + end + begin + desc["T"] = tf.data_type(input_) + end + begin + desc["T"] = tf.data_type(threshold_) + end + res = tf.execute(desc) + node = tf.TapeNode(compare_and_bitpack, [input_, threshold_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function compare_and_bitpack(input_, threshold_; name=nothing) - if tf.in_eager_mode() - compare_and_bitpack_eager(input_, threshold_; name=name) - else - compare_and_bitpack_graph(input_, threshold_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function compare_and_bitpack(input_, threshold_; name=nothing) + if tf.in_eager_mode() + compare_and_bitpack_eager(input_, threshold_; name=name) + else + compare_and_bitpack_graph(input_, threshold_; name=name) + end end - end + end end @@ -9781,53 +17723,85 @@ end """ begin - function var_handle_op_graph(; name=nothing, container=nothing, shared_name=nothing, dtype=nothing, shape=nothing) - local desc - tf.with_op_name(name, "VarHandleOp") do - desc = tf.NodeDescription("VarHandleOp") - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end - if shape !== nothing - desc["shape"] = Base.identity(shape) + begin + function var_handle_op_graph(; name=nothing, container=nothing, shared_name=nothing, dtype=nothing, shape=nothing) + local desc + tf.with_op_name(name, "VarHandleOp") do + desc = tf.NodeDescription("VarHandleOp") + begin + end + begin + end + begin + begin + if container !== nothing + desc["container"] = Base.String(container) + end + end + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + begin + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + begin + if shape !== nothing + desc["shape"] = Base.identity(shape) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function var_handle_op_eager(; name=nothing, container=nothing, shared_name=nothing, dtype=nothing, shape=nothing) + desc = tf.EagerOp("VarHandleOp") + begin + end + begin + begin + if container !== nothing + desc["container"] = Base.String(container) + end + end + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + begin + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + begin + if shape !== nothing + desc["shape"] = Base.identity(shape) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(var_handle_op, [], name=nothing, container=nothing, shared_name=nothing, dtype=nothing, shape=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function var_handle_op(; name=nothing, container=nothing, shared_name=nothing, dtype=nothing, shape=nothing) + if tf.in_eager_mode() + var_handle_op_eager(; name=name, container=container, shared_name=shared_name, dtype=dtype, shape=shape) + else + var_handle_op_graph(; name=name, container=container, shared_name=shared_name, dtype=dtype, shape=shape) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function var_handle_op_eager(; name=nothing, container=nothing, shared_name=nothing, dtype=nothing, shape=nothing) - desc = tf.EagerOp("VarHandleOp") - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end - if shape !== nothing - desc["shape"] = Base.identity(shape) - end - res = tf.execute(desc) - node = tf.TapeNode(var_handle_op, [], name=nothing, container=nothing, shared_name=nothing, dtype=nothing, shape=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function var_handle_op(; name=nothing, container=nothing, shared_name=nothing, dtype=nothing, shape=nothing) - if tf.in_eager_mode() - var_handle_op_eager(; name=name, container=container, shared_name=shared_name, dtype=dtype, shape=shape) - else - var_handle_op_graph(; name=name, container=container, shared_name=shared_name, dtype=dtype, shape=shape) - end - end end @@ -9837,45 +17811,77 @@ end """ begin - function experimental_unique_dataset_graph(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) - local desc - tf.with_op_name(name, "ExperimentalUniqueDataset") do - desc = tf.NodeDescription("ExperimentalUniqueDataset") - input_dataset_ = convert(Tensor{Any}, input_dataset_) - tf.add_input(desc, input_dataset_) - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) + begin + function experimental_unique_dataset_graph(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "ExperimentalUniqueDataset") do + desc = tf.NodeDescription("ExperimentalUniqueDataset") + begin + begin + input_dataset_ = convert(Tensor{Any}, input_dataset_) + begin + end + end + end + begin + begin + tf.add_input(desc, input_dataset_) + end + end + begin + begin + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + end + begin + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function experimental_unique_dataset_eager(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) + desc = tf.EagerOp("ExperimentalUniqueDataset") + input_dataset_ = convert(tf.EagerTensor, input_dataset_) + begin + begin + tf.add_input(desc, input_dataset_) + end + end + begin + begin + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + end + begin + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(experimental_unique_dataset, [input_dataset_], name=nothing, output_types=nothing, output_shapes=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_unique_dataset(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) + if tf.in_eager_mode() + experimental_unique_dataset_eager(input_dataset_; name=name, output_types=output_types, output_shapes=output_shapes) + else + experimental_unique_dataset_graph(input_dataset_; name=name, output_types=output_types, output_shapes=output_shapes) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function experimental_unique_dataset_eager(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) - desc = tf.EagerOp("ExperimentalUniqueDataset") - input_dataset_ = convert(tf.EagerTensor, input_dataset_) - tf.add_input(desc, input_dataset_) - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - res = tf.execute(desc) - node = tf.TapeNode(experimental_unique_dataset, [input_dataset_], name=nothing, output_types=nothing, output_shapes=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_unique_dataset(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) - if tf.in_eager_mode() - experimental_unique_dataset_eager(input_dataset_; name=name, output_types=output_types, output_shapes=output_shapes) - else - experimental_unique_dataset_graph(input_dataset_; name=name, output_types=output_types, output_shapes=output_shapes) - end - end end @@ -9885,51 +17891,95 @@ end """ begin - function list_diff_graph(x_, y_; name=nothing, out_idx=nothing) - local desc - tf.with_op_name(name, "ListDiff") do - desc = tf.NodeDescription("ListDiff") - x_ = convert(Tensor{Any}, x_) - y_ = convert(Tensor{Any}, y_) - (x_, y_) = tf.tf_promote(x_, y_) - tf.add_input(desc, x_) - tf.add_input(desc, y_) - if out_idx !== nothing - desc["out_idx"] = Base.identity(out_idx) + begin + function list_diff_graph(x_, y_; name=nothing, out_idx=nothing) + local desc + tf.with_op_name(name, "ListDiff") do + desc = tf.NodeDescription("ListDiff") + begin + begin + x_ = convert(Tensor{Any}, x_) + begin + end + end + begin + y_ = convert(Tensor{Any}, y_) + begin + end + end + begin + (x_, y_) = tf.tf_promote(x_, y_) + end + end + begin + begin + tf.add_input(desc, x_) + end + begin + tf.add_input(desc, y_) + end + end + begin + begin + if out_idx !== nothing + desc["out_idx"] = Base.identity(out_idx) + end + end + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + end + end + begin + function list_diff_eager(x_, y_; name=nothing, out_idx=nothing) + desc = tf.EagerOp("ListDiff") + x_ = convert(tf.EagerTensor, x_) + y_ = convert(tf.EagerTensor, y_) + begin + begin + tf.add_input(desc, x_) + end + begin + tf.add_input(desc, y_) + end + end + begin + begin + if out_idx !== nothing + desc["out_idx"] = Base.identity(out_idx) + end + end + end + begin + desc["T"] = tf.data_type(x_) + end + begin + desc["T"] = tf.data_type(y_) + end + res = tf.execute(desc) + node = tf.TapeNode(list_diff, [x_, y_], name=nothing, out_idx=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function list_diff(x_, y_; name=nothing, out_idx=nothing) + if tf.in_eager_mode() + list_diff_eager(x_, y_; name=name, out_idx=out_idx) + else + list_diff_graph(x_, y_; name=name, out_idx=out_idx) + end end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:2 - push!(out, tf.Tensor(op, out_idx)) - end - out - end - function list_diff_eager(x_, y_; name=nothing, out_idx=nothing) - desc = tf.EagerOp("ListDiff") - x_ = convert(tf.EagerTensor, x_) - y_ = convert(tf.EagerTensor, y_) - tf.add_input(desc, x_) - tf.add_input(desc, y_) - if out_idx !== nothing - desc["out_idx"] = Base.identity(out_idx) - end - desc["T"] = tf.data_type(x_) - desc["T"] = tf.data_type(y_) - res = tf.execute(desc) - node = tf.TapeNode(list_diff, [x_, y_], name=nothing, out_idx=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function list_diff(x_, y_; name=nothing, out_idx=nothing) - if tf.in_eager_mode() - list_diff_eager(x_, y_; name=name, out_idx=out_idx) - else - list_diff_graph(x_, y_; name=name, out_idx=out_idx) - end - end end @@ -9939,49 +17989,105 @@ end """ begin - function create_summary_file_writer_graph(writer_, logdir_, max_queue_, flush_millis_, filename_suffix_; name=nothing) - local desc - tf.with_op_name(name, "CreateSummaryFileWriter") do - desc = tf.NodeDescription("CreateSummaryFileWriter") - writer_ = convert(Tensor{Any}, writer_) - logdir_ = convert(Tensor{String}, logdir_) - max_queue_ = convert(Tensor{Int32}, max_queue_) - flush_millis_ = convert(Tensor{Int32}, flush_millis_) - filename_suffix_ = convert(Tensor{String}, filename_suffix_) - tf.add_input(desc, writer_) - tf.add_input(desc, logdir_) - tf.add_input(desc, max_queue_) - tf.add_input(desc, flush_millis_) - tf.add_input(desc, filename_suffix_) - end - tf.Tensor(tf.Operation(desc)) - end - function create_summary_file_writer_eager(writer_, logdir_, max_queue_, flush_millis_, filename_suffix_; name=nothing) - desc = tf.EagerOp("CreateSummaryFileWriter") - writer_ = convert(tf.EagerTensor, writer_) - logdir_ = convert(tf.EagerTensor, logdir_) - max_queue_ = convert(tf.EagerTensor, max_queue_) - flush_millis_ = convert(tf.EagerTensor, flush_millis_) - filename_suffix_ = convert(tf.EagerTensor, filename_suffix_) - tf.add_input(desc, writer_) - tf.add_input(desc, logdir_) - tf.add_input(desc, max_queue_) - tf.add_input(desc, flush_millis_) - tf.add_input(desc, filename_suffix_) - res = tf.execute(desc) - node = tf.TapeNode(create_summary_file_writer, [writer_, logdir_, max_queue_, flush_millis_, filename_suffix_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function create_summary_file_writer(writer_, logdir_, max_queue_, flush_millis_, filename_suffix_; name=nothing) - if tf.in_eager_mode() - create_summary_file_writer_eager(writer_, logdir_, max_queue_, flush_millis_, filename_suffix_; name=name) - else - create_summary_file_writer_graph(writer_, logdir_, max_queue_, flush_millis_, filename_suffix_; name=name) + begin + function create_summary_file_writer_graph(writer_, logdir_, max_queue_, flush_millis_, filename_suffix_; name=nothing) + local desc + tf.with_op_name(name, "CreateSummaryFileWriter") do + desc = tf.NodeDescription("CreateSummaryFileWriter") + begin + begin + writer_ = convert(Tensor{Any}, writer_) + begin + end + end + begin + logdir_ = convert(Tensor{String}, logdir_) + begin + end + end + begin + max_queue_ = convert(Tensor{Int32}, max_queue_) + begin + end + end + begin + flush_millis_ = convert(Tensor{Int32}, flush_millis_) + begin + end + end + begin + filename_suffix_ = convert(Tensor{String}, filename_suffix_) + begin + end + end + end + begin + begin + tf.add_input(desc, writer_) + end + begin + tf.add_input(desc, logdir_) + end + begin + tf.add_input(desc, max_queue_) + end + begin + tf.add_input(desc, flush_millis_) + end + begin + tf.add_input(desc, filename_suffix_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function create_summary_file_writer_eager(writer_, logdir_, max_queue_, flush_millis_, filename_suffix_; name=nothing) + desc = tf.EagerOp("CreateSummaryFileWriter") + writer_ = convert(tf.EagerTensor, writer_) + logdir_ = convert(tf.EagerTensor, logdir_) + max_queue_ = convert(tf.EagerTensor, max_queue_) + flush_millis_ = convert(tf.EagerTensor, flush_millis_) + filename_suffix_ = convert(tf.EagerTensor, filename_suffix_) + begin + begin + tf.add_input(desc, writer_) + end + begin + tf.add_input(desc, logdir_) + end + begin + tf.add_input(desc, max_queue_) + end + begin + tf.add_input(desc, flush_millis_) + end + begin + tf.add_input(desc, filename_suffix_) + end + end + begin + end + res = tf.execute(desc) + node = tf.TapeNode(create_summary_file_writer, [writer_, logdir_, max_queue_, flush_millis_, filename_suffix_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function create_summary_file_writer(writer_, logdir_, max_queue_, flush_millis_, filename_suffix_; name=nothing) + if tf.in_eager_mode() + create_summary_file_writer_eager(writer_, logdir_, max_queue_, flush_millis_, filename_suffix_; name=name) + else + create_summary_file_writer_graph(writer_, logdir_, max_queue_, flush_millis_, filename_suffix_; name=name) + end end - end + end end @@ -9991,60 +18097,106 @@ end """ begin - function generate_vocab_remapping_graph(new_vocab_file_, old_vocab_file_; name=nothing, new_vocab_offset=nothing, num_new_vocab=nothing, old_vocab_size=nothing) - local desc - tf.with_op_name(name, "GenerateVocabRemapping") do - desc = tf.NodeDescription("GenerateVocabRemapping") - new_vocab_file_ = convert(Tensor{String}, new_vocab_file_) - old_vocab_file_ = convert(Tensor{String}, old_vocab_file_) - tf.add_input(desc, new_vocab_file_) - tf.add_input(desc, old_vocab_file_) - if new_vocab_offset !== nothing - desc["new_vocab_offset"] = Base.Int(new_vocab_offset) - end - if num_new_vocab !== nothing - desc["num_new_vocab"] = Base.Int(num_new_vocab) - end - if old_vocab_size !== nothing - desc["old_vocab_size"] = Base.Int(old_vocab_size) + begin + function generate_vocab_remapping_graph(new_vocab_file_, old_vocab_file_; name=nothing, new_vocab_offset=nothing, num_new_vocab=nothing, old_vocab_size=nothing) + local desc + tf.with_op_name(name, "GenerateVocabRemapping") do + desc = tf.NodeDescription("GenerateVocabRemapping") + begin + begin + new_vocab_file_ = convert(Tensor{String}, new_vocab_file_) + begin + end + end + begin + old_vocab_file_ = convert(Tensor{String}, old_vocab_file_) + begin + end + end + end + begin + begin + tf.add_input(desc, new_vocab_file_) + end + begin + tf.add_input(desc, old_vocab_file_) + end + end + begin + begin + if new_vocab_offset !== nothing + desc["new_vocab_offset"] = Base.Int(new_vocab_offset) + end + end + begin + if num_new_vocab !== nothing + desc["num_new_vocab"] = Base.Int(num_new_vocab) + end + end + begin + if old_vocab_size !== nothing + desc["old_vocab_size"] = Base.Int(old_vocab_size) + end + end + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + end + end + begin + function generate_vocab_remapping_eager(new_vocab_file_, old_vocab_file_; name=nothing, new_vocab_offset=nothing, num_new_vocab=nothing, old_vocab_size=nothing) + desc = tf.EagerOp("GenerateVocabRemapping") + new_vocab_file_ = convert(tf.EagerTensor, new_vocab_file_) + old_vocab_file_ = convert(tf.EagerTensor, old_vocab_file_) + begin + begin + tf.add_input(desc, new_vocab_file_) + end + begin + tf.add_input(desc, old_vocab_file_) + end + end + begin + begin + if new_vocab_offset !== nothing + desc["new_vocab_offset"] = Base.Int(new_vocab_offset) + end + end + begin + if num_new_vocab !== nothing + desc["num_new_vocab"] = Base.Int(num_new_vocab) + end + end + begin + if old_vocab_size !== nothing + desc["old_vocab_size"] = Base.Int(old_vocab_size) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(generate_vocab_remapping, [new_vocab_file_, old_vocab_file_], name=nothing, new_vocab_offset=nothing, num_new_vocab=nothing, old_vocab_size=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function generate_vocab_remapping(new_vocab_file_, old_vocab_file_; name=nothing, new_vocab_offset=nothing, num_new_vocab=nothing, old_vocab_size=nothing) + if tf.in_eager_mode() + generate_vocab_remapping_eager(new_vocab_file_, old_vocab_file_; name=name, new_vocab_offset=new_vocab_offset, num_new_vocab=num_new_vocab, old_vocab_size=old_vocab_size) + else + generate_vocab_remapping_graph(new_vocab_file_, old_vocab_file_; name=name, new_vocab_offset=new_vocab_offset, num_new_vocab=num_new_vocab, old_vocab_size=old_vocab_size) + end end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:2 - push!(out, tf.Tensor(op, out_idx)) - end - out - end - function generate_vocab_remapping_eager(new_vocab_file_, old_vocab_file_; name=nothing, new_vocab_offset=nothing, num_new_vocab=nothing, old_vocab_size=nothing) - desc = tf.EagerOp("GenerateVocabRemapping") - new_vocab_file_ = convert(tf.EagerTensor, new_vocab_file_) - old_vocab_file_ = convert(tf.EagerTensor, old_vocab_file_) - tf.add_input(desc, new_vocab_file_) - tf.add_input(desc, old_vocab_file_) - if new_vocab_offset !== nothing - desc["new_vocab_offset"] = Base.Int(new_vocab_offset) - end - if num_new_vocab !== nothing - desc["num_new_vocab"] = Base.Int(num_new_vocab) - end - if old_vocab_size !== nothing - desc["old_vocab_size"] = Base.Int(old_vocab_size) - end - res = tf.execute(desc) - node = tf.TapeNode(generate_vocab_remapping, [new_vocab_file_, old_vocab_file_], name=nothing, new_vocab_offset=nothing, num_new_vocab=nothing, old_vocab_size=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function generate_vocab_remapping(new_vocab_file_, old_vocab_file_; name=nothing, new_vocab_offset=nothing, num_new_vocab=nothing, old_vocab_size=nothing) - if tf.in_eager_mode() - generate_vocab_remapping_eager(new_vocab_file_, old_vocab_file_; name=name, new_vocab_offset=new_vocab_offset, num_new_vocab=num_new_vocab, old_vocab_size=old_vocab_size) - else - generate_vocab_remapping_graph(new_vocab_file_, old_vocab_file_; name=name, new_vocab_offset=new_vocab_offset, num_new_vocab=num_new_vocab, old_vocab_size=old_vocab_size) - end - end end @@ -10054,41 +18206,73 @@ end """ begin - function batch_matrix_inverse_graph(input_; name=nothing, adjoint=nothing) - local desc - tf.with_op_name(name, "BatchMatrixInverse") do - desc = tf.NodeDescription("BatchMatrixInverse") - input_ = convert(Tensor{Any}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - if adjoint !== nothing - desc["adjoint"] = Base.Bool(adjoint) + begin + function batch_matrix_inverse_graph(input_; name=nothing, adjoint=nothing) + local desc + tf.with_op_name(name, "BatchMatrixInverse") do + desc = tf.NodeDescription("BatchMatrixInverse") + begin + begin + input_ = convert(Tensor{Any}, input_) + begin + end + end + begin + (input_,) = tf.tf_promote(input_) + end + end + begin + begin + tf.add_input(desc, input_) + end + end + begin + begin + if adjoint !== nothing + desc["adjoint"] = Base.Bool(adjoint) + end + end + end end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function batch_matrix_inverse_eager(input_; name=nothing, adjoint=nothing) - desc = tf.EagerOp("BatchMatrixInverse") - input_ = convert(tf.EagerTensor, input_) - tf.add_input(desc, input_) - if adjoint !== nothing - desc["adjoint"] = Base.Bool(adjoint) - end - desc["T"] = tf.data_type(input_) - res = tf.execute(desc) - node = tf.TapeNode(batch_matrix_inverse, [input_], name=nothing, adjoint=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function batch_matrix_inverse_eager(input_; name=nothing, adjoint=nothing) + desc = tf.EagerOp("BatchMatrixInverse") + input_ = convert(tf.EagerTensor, input_) + begin + begin + tf.add_input(desc, input_) + end + end + begin + begin + if adjoint !== nothing + desc["adjoint"] = Base.Bool(adjoint) + end + end + end + begin + desc["T"] = tf.data_type(input_) + end + res = tf.execute(desc) + node = tf.TapeNode(batch_matrix_inverse, [input_], name=nothing, adjoint=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function batch_matrix_inverse(input_; name=nothing, adjoint=nothing) - if tf.in_eager_mode() - batch_matrix_inverse_eager(input_; name=name, adjoint=adjoint) - else - batch_matrix_inverse_graph(input_; name=name, adjoint=adjoint) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function batch_matrix_inverse(input_; name=nothing, adjoint=nothing) + if tf.in_eager_mode() + batch_matrix_inverse_eager(input_; name=name, adjoint=adjoint) + else + batch_matrix_inverse_graph(input_; name=name, adjoint=adjoint) + end end - end + end end @@ -10098,30 +18282,45 @@ end """ begin - function control_trigger_graph(; name=nothing) - local desc - tf.with_op_name(name, "ControlTrigger") do - desc - tf.NodeDescription("ControlTrigger") + begin + function control_trigger_graph(; name=nothing) + local desc + tf.with_op_name(name, "ControlTrigger") do + desc = tf.NodeDescription("ControlTrigger") + begin + end + begin + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function control_trigger_eager(; name=nothing) - desc = tf.EagerOp("ControlTrigger") - res = tf.execute(desc) - node = tf.TapeNode(control_trigger, [], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function control_trigger_eager(; name=nothing) + desc = tf.EagerOp("ControlTrigger") + begin + end + begin + end + res = tf.execute(desc) + node = tf.TapeNode(control_trigger, [], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function control_trigger(; name=nothing) - if tf.in_eager_mode() - control_trigger_eager(; name=name) - else - control_trigger_graph(; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function control_trigger(; name=nothing) + if tf.in_eager_mode() + control_trigger_eager(; name=name) + else + control_trigger_graph(; name=name) + end end - end + end end @@ -10131,35 +18330,63 @@ end """ begin - function stop_gradient_graph(input_; name=nothing) - local desc - tf.with_op_name(name, "StopGradient") do - desc = tf.NodeDescription("StopGradient") - input_ = convert(Tensor{Any}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) + begin + function stop_gradient_graph(input_; name=nothing) + local desc + tf.with_op_name(name, "StopGradient") do + desc = tf.NodeDescription("StopGradient") + begin + begin + input_ = convert(Tensor{Any}, input_) + begin + end + end + begin + (input_,) = tf.tf_promote(input_) + end + end + begin + begin + tf.add_input(desc, input_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function stop_gradient_eager(input_; name=nothing) - desc = tf.EagerOp("StopGradient") - input_ = convert(tf.EagerTensor, input_) - tf.add_input(desc, input_) - desc["T"] = tf.data_type(input_) - res = tf.execute(desc) - node = tf.TapeNode(stop_gradient, [input_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function stop_gradient_eager(input_; name=nothing) + desc = tf.EagerOp("StopGradient") + input_ = convert(tf.EagerTensor, input_) + begin + begin + tf.add_input(desc, input_) + end + end + begin + end + begin + desc["T"] = tf.data_type(input_) + end + res = tf.execute(desc) + node = tf.TapeNode(stop_gradient, [input_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function stop_gradient(input_; name=nothing) - if tf.in_eager_mode() - stop_gradient_eager(input_; name=name) - else - stop_gradient_graph(input_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function stop_gradient(input_; name=nothing) + if tf.in_eager_mode() + stop_gradient_eager(input_; name=name) + else + stop_gradient_graph(input_; name=name) + end end - end + end end @@ -10169,51 +18396,93 @@ end """ begin - function split_graph(split_dim_, value_; name=nothing, num_split=nothing) - local desc - tf.with_op_name(name, "Split") do - desc = tf.NodeDescription("Split") - split_dim_ = convert(Tensor{Int32}, split_dim_) - split_dim_ = split_dim_ - convert(tf.Tensor{eltype(split_dim_)}, 1) - value_ = convert(Tensor{Any}, value_) - (value_,) = tf.tf_promote(value_) - tf.add_input(desc, split_dim_) - tf.add_input(desc, value_) - if num_split !== nothing - desc["num_split"] = Base.Int(num_split) + begin + function split_graph(split_dim_, value_; name=nothing, num_split=nothing) + local desc + tf.with_op_name(name, "Split") do + desc = tf.NodeDescription("Split") + begin + begin + split_dim_ = convert(Tensor{Int32}, split_dim_) + begin + split_dim_ = split_dim_ - convert(tf.Tensor{eltype(split_dim_)}, 1) + end + end + begin + value_ = convert(Tensor{Any}, value_) + begin + end + end + begin + (value_,) = tf.tf_promote(value_) + end + end + begin + begin + tf.add_input(desc, split_dim_) + end + begin + tf.add_input(desc, value_) + end + end + begin + begin + if num_split !== nothing + desc["num_split"] = Base.Int(num_split) + end + end + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:num_split + push!(out, tf.Tensor(op, out_idx)) + end + out + end + end + end + begin + function split_eager(split_dim_, value_; name=nothing, num_split=nothing) + desc = tf.EagerOp("Split") + split_dim_ = convert(tf.EagerTensor, split_dim_) + value_ = convert(tf.EagerTensor, value_) + begin + begin + tf.add_input(desc, split_dim_) + end + begin + tf.add_input(desc, value_) + end + end + begin + begin + if num_split !== nothing + desc["num_split"] = Base.Int(num_split) + end + end + end + begin + desc["T"] = tf.data_type(value_) + end + res = tf.execute(desc) + node = tf.TapeNode(split, [split_dim_, value_], name=nothing, num_split=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function split(split_dim_, value_; name=nothing, num_split=nothing) + if tf.in_eager_mode() + split_eager(split_dim_, value_; name=name, num_split=num_split) + else + split_graph(split_dim_, value_; name=name, num_split=num_split) + end end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:num_split - push!(out, tf.Tensor(op, out_idx)) - end - out - end - function split_eager(split_dim_, value_; name=nothing, num_split=nothing) - desc = tf.EagerOp("Split") - split_dim_ = convert(tf.EagerTensor, split_dim_) - value_ = convert(tf.EagerTensor, value_) - tf.add_input(desc, split_dim_) - tf.add_input(desc, value_) - if num_split !== nothing - desc["num_split"] = Base.Int(num_split) - end - desc["T"] = tf.data_type(value_) - res = tf.execute(desc) - node = tf.TapeNode(split, [split_dim_, value_], name=nothing, num_split=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function split(split_dim_, value_; name=nothing, num_split=nothing) - if tf.in_eager_mode() - split_eager(split_dim_, value_; name=name, num_split=num_split) - else - split_graph(split_dim_, value_; name=name, num_split=num_split) - end - end end @@ -10223,58 +18492,100 @@ end """ begin - function unpack_graph(value_; name=nothing, num=nothing, axis=nothing) - local desc - tf.with_op_name(name, "Unpack") do - desc = tf.NodeDescription("Unpack") - value_ = convert(Tensor{Any}, value_) - (value_,) = tf.tf_promote(value_) - tf.add_input(desc, value_) - if num !== nothing - desc["num"] = Base.Int(num) + begin + function unpack_graph(value_; name=nothing, num=nothing, axis=nothing) + local desc + tf.with_op_name(name, "Unpack") do + desc = tf.NodeDescription("Unpack") + begin + begin + value_ = convert(Tensor{Any}, value_) + begin + end + end + begin + (value_,) = tf.tf_promote(value_) + end + end + begin + begin + tf.add_input(desc, value_) + end + end + begin + begin + if num !== nothing + desc["num"] = Base.Int(num) + end + end + begin + if axis !== nothing + axis = Base.Int(axis) - 1 + end + end + begin + if axis !== nothing + desc["axis"] = Base.Int(axis) + end + end + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:num + push!(out, tf.Tensor(op, out_idx)) + end + out + end + end + end + begin + function unpack_eager(value_; name=nothing, num=nothing, axis=nothing) + desc = tf.EagerOp("Unpack") + value_ = convert(tf.EagerTensor, value_) + begin + begin + tf.add_input(desc, value_) + end + end + begin + begin + if num !== nothing + desc["num"] = Base.Int(num) + end + end + begin + if axis !== nothing + axis = Base.Int(axis) - 1 + end + end + begin + if axis !== nothing + desc["axis"] = Base.Int(axis) + end + end + end + begin + desc["T"] = tf.data_type(value_) + end + res = tf.execute(desc) + node = tf.TapeNode(unpack, [value_], name=nothing, num=nothing, axis=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function unpack(value_; name=nothing, num=nothing, axis=nothing) + if tf.in_eager_mode() + unpack_eager(value_; name=name, num=num, axis=axis) + else + unpack_graph(value_; name=name, num=num, axis=axis) + end end - if axis !== nothing - axis = Base.Int(axis) - 1 - end - if axis !== nothing - desc["axis"] = Base.Int(axis) - end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:num - push!(out, tf.Tensor(op, out_idx)) - end - out end - function unpack_eager(value_; name=nothing, num=nothing, axis=nothing) - desc = tf.EagerOp("Unpack") - value_ = convert(tf.EagerTensor, value_) - tf.add_input(desc, value_) - if num !== nothing - desc["num"] = Base.Int(num) - end - if axis !== nothing - axis = Base.Int(axis) - 1 - end - if axis !== nothing - desc["axis"] = Base.Int(axis) - end - desc["T"] = tf.data_type(value_) - res = tf.execute(desc) - node = tf.TapeNode(unpack, [value_], name=nothing, num=nothing, axis=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function unpack(value_; name=nothing, num=nothing, axis=nothing) - if tf.in_eager_mode() - unpack_eager(value_; name=name, num=num, axis=axis) - else - unpack_graph(value_; name=name, num=num, axis=axis) - end - end end @@ -10284,52 +18595,104 @@ end """ begin - function resource_scatter_max_graph(resource_, indices_, updates_; name=nothing, dtype=nothing) - local desc - tf.with_op_name(name, "ResourceScatterMax") do - desc = tf.NodeDescription("ResourceScatterMax") - resource_ = convert(Tensor{Any}, resource_) - indices_ = convert(Tensor{Any}, indices_) - indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) - updates_ = convert(Tensor{Any}, updates_) - (updates_,) = tf.tf_promote(updates_) - (indices_,) = tf.tf_promote(indices_) - tf.add_input(desc, resource_) - tf.add_input(desc, indices_) - tf.add_input(desc, updates_) - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) + begin + function resource_scatter_max_graph(resource_, indices_, updates_; name=nothing, dtype=nothing) + local desc + tf.with_op_name(name, "ResourceScatterMax") do + desc = tf.NodeDescription("ResourceScatterMax") + begin + begin + resource_ = convert(Tensor{Any}, resource_) + begin + end + end + begin + indices_ = convert(Tensor{Any}, indices_) + begin + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + end + end + begin + updates_ = convert(Tensor{Any}, updates_) + begin + end + end + begin + (updates_,) = tf.tf_promote(updates_) + end + begin + (indices_,) = tf.tf_promote(indices_) + end + end + begin + begin + tf.add_input(desc, resource_) + end + begin + tf.add_input(desc, indices_) + end + begin + tf.add_input(desc, updates_) + end + end + begin + begin + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function resource_scatter_max_eager(resource_, indices_, updates_; name=nothing, dtype=nothing) + desc = tf.EagerOp("ResourceScatterMax") + resource_ = convert(tf.EagerTensor, resource_) + indices_ = convert(tf.EagerTensor, indices_) + updates_ = convert(tf.EagerTensor, updates_) + begin + begin + tf.add_input(desc, resource_) + end + begin + tf.add_input(desc, indices_) + end + begin + tf.add_input(desc, updates_) + end + end + begin + begin + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + end + begin + desc["Tindices"] = tf.data_type(indices_) + end + begin + desc["dtype"] = tf.data_type(updates_) + end + res = tf.execute(desc) + node = tf.TapeNode(resource_scatter_max, [resource_, indices_, updates_], name=nothing, dtype=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_scatter_max(resource_, indices_, updates_; name=nothing, dtype=nothing) + if tf.in_eager_mode() + resource_scatter_max_eager(resource_, indices_, updates_; name=name, dtype=dtype) + else + resource_scatter_max_graph(resource_, indices_, updates_; name=name, dtype=dtype) + end end - end - tf.Tensor(tf.Operation(desc)) end - function resource_scatter_max_eager(resource_, indices_, updates_; name=nothing, dtype=nothing) - desc = tf.EagerOp("ResourceScatterMax") - resource_ = convert(tf.EagerTensor, resource_) - indices_ = convert(tf.EagerTensor, indices_) - updates_ = convert(tf.EagerTensor, updates_) - tf.add_input(desc, resource_) - tf.add_input(desc, indices_) - tf.add_input(desc, updates_) - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end - desc["Tindices"] = tf.data_type(indices_) - desc["dtype"] = tf.data_type(updates_) - res = tf.execute(desc) - node = tf.TapeNode(resource_scatter_max, [resource_, indices_, updates_], name=nothing, dtype=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_scatter_max(resource_, indices_, updates_; name=nothing, dtype=nothing) - if tf.in_eager_mode() - resource_scatter_max_eager(resource_, indices_, updates_; name=name, dtype=dtype) - else - resource_scatter_max_graph(resource_, indices_, updates_; name=name, dtype=dtype) - end - end end @@ -10339,47 +18702,99 @@ end """ begin - function tensor_array_write_graph(handle_, index_, value_, flow_in_; name=nothing) - local desc - tf.with_op_name(name, "TensorArrayWrite") do - desc = tf.NodeDescription("TensorArrayWrite") - handle_ = convert(Tensor{String}, handle_) - index_ = convert(Tensor{Int32}, index_) - value_ = convert(Tensor{Any}, value_) - flow_in_ = convert(Tensor{Float32}, flow_in_) - (value_,) = tf.tf_promote(value_) - tf.add_input(desc, handle_) - tf.add_input(desc, index_) - tf.add_input(desc, value_) - tf.add_input(desc, flow_in_) - end - tf.Tensor(tf.Operation(desc)) - end - function tensor_array_write_eager(handle_, index_, value_, flow_in_; name=nothing) - desc = tf.EagerOp("TensorArrayWrite") - handle_ = convert(tf.EagerTensor, handle_) - index_ = convert(tf.EagerTensor, index_) - value_ = convert(tf.EagerTensor, value_) - flow_in_ = convert(tf.EagerTensor, flow_in_) - tf.add_input(desc, handle_) - tf.add_input(desc, index_) - tf.add_input(desc, value_) - tf.add_input(desc, flow_in_) - desc["T"] = tf.data_type(value_) - res = tf.execute(desc) - node = tf.TapeNode(tensor_array_write, [handle_, index_, value_, flow_in_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_array_write(handle_, index_, value_, flow_in_; name=nothing) - if tf.in_eager_mode() - tensor_array_write_eager(handle_, index_, value_, flow_in_; name=name) - else - tensor_array_write_graph(handle_, index_, value_, flow_in_; name=name) + begin + function tensor_array_write_graph(handle_, index_, value_, flow_in_; name=nothing) + local desc + tf.with_op_name(name, "TensorArrayWrite") do + desc = tf.NodeDescription("TensorArrayWrite") + begin + begin + handle_ = convert(Tensor{String}, handle_) + begin + end + end + begin + index_ = convert(Tensor{Int32}, index_) + begin + end + end + begin + value_ = convert(Tensor{Any}, value_) + begin + end + end + begin + flow_in_ = convert(Tensor{Float32}, flow_in_) + begin + end + end + begin + (value_,) = tf.tf_promote(value_) + end + end + begin + begin + tf.add_input(desc, handle_) + end + begin + tf.add_input(desc, index_) + end + begin + tf.add_input(desc, value_) + end + begin + tf.add_input(desc, flow_in_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function tensor_array_write_eager(handle_, index_, value_, flow_in_; name=nothing) + desc = tf.EagerOp("TensorArrayWrite") + handle_ = convert(tf.EagerTensor, handle_) + index_ = convert(tf.EagerTensor, index_) + value_ = convert(tf.EagerTensor, value_) + flow_in_ = convert(tf.EagerTensor, flow_in_) + begin + begin + tf.add_input(desc, handle_) + end + begin + tf.add_input(desc, index_) + end + begin + tf.add_input(desc, value_) + end + begin + tf.add_input(desc, flow_in_) + end + end + begin + end + begin + desc["T"] = tf.data_type(value_) + end + res = tf.execute(desc) + node = tf.TapeNode(tensor_array_write, [handle_, index_, value_, flow_in_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_array_write(handle_, index_, value_, flow_in_; name=nothing) + if tf.in_eager_mode() + tensor_array_write_eager(handle_, index_, value_, flow_in_; name=name) + else + tensor_array_write_graph(handle_, index_, value_, flow_in_; name=name) + end end - end + end end @@ -10389,47 +18804,91 @@ end """ begin - function fill_graph(dims_, value_; name=nothing, index_type=nothing) - local desc - tf.with_op_name(name, "Fill") do - desc = tf.NodeDescription("Fill") - dims_ = convert(Tensor{Int32}, dims_) - value_ = convert(Tensor{Any}, value_) - (value_,) = tf.tf_promote(value_) - (dims_,) = tf.tf_promote(dims_) - tf.add_input(desc, dims_) - tf.add_input(desc, value_) - if index_type !== nothing - desc["index_type"] = Base.identity(index_type) + begin + function fill_graph(dims_, value_; name=nothing, index_type=nothing) + local desc + tf.with_op_name(name, "Fill") do + desc = tf.NodeDescription("Fill") + begin + begin + dims_ = convert(Tensor{Int32}, dims_) + begin + end + end + begin + value_ = convert(Tensor{Any}, value_) + begin + end + end + begin + (value_,) = tf.tf_promote(value_) + end + begin + (dims_,) = tf.tf_promote(dims_) + end + end + begin + begin + tf.add_input(desc, dims_) + end + begin + tf.add_input(desc, value_) + end + end + begin + begin + if index_type !== nothing + desc["index_type"] = Base.identity(index_type) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function fill_eager(dims_, value_; name=nothing, index_type=nothing) + desc = tf.EagerOp("Fill") + dims_ = convert(tf.EagerTensor, dims_) + value_ = convert(tf.EagerTensor, value_) + begin + begin + tf.add_input(desc, dims_) + end + begin + tf.add_input(desc, value_) + end + end + begin + begin + if index_type !== nothing + desc["index_type"] = Base.identity(index_type) + end + end + end + begin + desc["index_type"] = tf.data_type(dims_) + end + begin + desc["T"] = tf.data_type(value_) + end + res = tf.execute(desc) + node = tf.TapeNode(fill, [dims_, value_], name=nothing, index_type=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function fill(dims_, value_; name=nothing, index_type=nothing) + if tf.in_eager_mode() + fill_eager(dims_, value_; name=name, index_type=index_type) + else + fill_graph(dims_, value_; name=name, index_type=index_type) + end end - end - tf.Tensor(tf.Operation(desc)) end - function fill_eager(dims_, value_; name=nothing, index_type=nothing) - desc = tf.EagerOp("Fill") - dims_ = convert(tf.EagerTensor, dims_) - value_ = convert(tf.EagerTensor, value_) - tf.add_input(desc, dims_) - tf.add_input(desc, value_) - if index_type !== nothing - desc["index_type"] = Base.identity(index_type) - end - desc["index_type"] = tf.data_type(dims_) - desc["T"] = tf.data_type(value_) - res = tf.execute(desc) - node = tf.TapeNode(fill, [dims_, value_], name=nothing, index_type=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function fill(dims_, value_; name=nothing, index_type=nothing) - if tf.in_eager_mode() - fill_eager(dims_, value_; name=name, index_type=index_type) - else - fill_graph(dims_, value_; name=name, index_type=index_type) - end - end end @@ -10439,35 +18898,63 @@ end """ begin - function softmax_graph(logits_; name=nothing) - local desc - tf.with_op_name(name, "Softmax") do - desc = tf.NodeDescription("Softmax") - logits_ = convert(Tensor{Any}, logits_) - (logits_,) = tf.tf_promote(logits_) - tf.add_input(desc, logits_) + begin + function softmax_graph(logits_; name=nothing) + local desc + tf.with_op_name(name, "Softmax") do + desc = tf.NodeDescription("Softmax") + begin + begin + logits_ = convert(Tensor{Any}, logits_) + begin + end + end + begin + (logits_,) = tf.tf_promote(logits_) + end + end + begin + begin + tf.add_input(desc, logits_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function softmax_eager(logits_; name=nothing) - desc = tf.EagerOp("Softmax") - logits_ = convert(tf.EagerTensor, logits_) - tf.add_input(desc, logits_) - desc["T"] = tf.data_type(logits_) - res = tf.execute(desc) - node = tf.TapeNode(softmax, [logits_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function softmax_eager(logits_; name=nothing) + desc = tf.EagerOp("Softmax") + logits_ = convert(tf.EagerTensor, logits_) + begin + begin + tf.add_input(desc, logits_) + end + end + begin + end + begin + desc["T"] = tf.data_type(logits_) + end + res = tf.execute(desc) + node = tf.TapeNode(softmax, [logits_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function softmax(logits_; name=nothing) - if tf.in_eager_mode() - softmax_eager(logits_; name=name) - else - softmax_graph(logits_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function softmax(logits_; name=nothing) + if tf.in_eager_mode() + softmax_eager(logits_; name=name) + else + softmax_graph(logits_; name=name) + end end - end + end end @@ -10477,45 +18964,85 @@ end """ begin - function resize_bicubic_graph(images_, size_; name=nothing, align_corners=nothing) - local desc - tf.with_op_name(name, "ResizeBicubic") do - desc = tf.NodeDescription("ResizeBicubic") - images_ = convert(Tensor{Any}, images_) - size_ = convert(Tensor{Int32}, size_) - (images_,) = tf.tf_promote(images_) - tf.add_input(desc, images_) - tf.add_input(desc, size_) - if align_corners !== nothing - desc["align_corners"] = Base.Bool(align_corners) + begin + function resize_bicubic_graph(images_, size_; name=nothing, align_corners=nothing) + local desc + tf.with_op_name(name, "ResizeBicubic") do + desc = tf.NodeDescription("ResizeBicubic") + begin + begin + images_ = convert(Tensor{Any}, images_) + begin + end + end + begin + size_ = convert(Tensor{Int32}, size_) + begin + end + end + begin + (images_,) = tf.tf_promote(images_) + end + end + begin + begin + tf.add_input(desc, images_) + end + begin + tf.add_input(desc, size_) + end + end + begin + begin + if align_corners !== nothing + desc["align_corners"] = Base.Bool(align_corners) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function resize_bicubic_eager(images_, size_; name=nothing, align_corners=nothing) + desc = tf.EagerOp("ResizeBicubic") + images_ = convert(tf.EagerTensor, images_) + size_ = convert(tf.EagerTensor, size_) + begin + begin + tf.add_input(desc, images_) + end + begin + tf.add_input(desc, size_) + end + end + begin + begin + if align_corners !== nothing + desc["align_corners"] = Base.Bool(align_corners) + end + end + end + begin + desc["T"] = tf.data_type(images_) + end + res = tf.execute(desc) + node = tf.TapeNode(resize_bicubic, [images_, size_], name=nothing, align_corners=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resize_bicubic(images_, size_; name=nothing, align_corners=nothing) + if tf.in_eager_mode() + resize_bicubic_eager(images_, size_; name=name, align_corners=align_corners) + else + resize_bicubic_graph(images_, size_; name=name, align_corners=align_corners) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function resize_bicubic_eager(images_, size_; name=nothing, align_corners=nothing) - desc = tf.EagerOp("ResizeBicubic") - images_ = convert(tf.EagerTensor, images_) - size_ = convert(tf.EagerTensor, size_) - tf.add_input(desc, images_) - tf.add_input(desc, size_) - if align_corners !== nothing - desc["align_corners"] = Base.Bool(align_corners) - end - desc["T"] = tf.data_type(images_) - res = tf.execute(desc) - node = tf.TapeNode(resize_bicubic, [images_, size_], name=nothing, align_corners=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resize_bicubic(images_, size_; name=nothing, align_corners=nothing) - if tf.in_eager_mode() - resize_bicubic_eager(images_, size_; name=name, align_corners=align_corners) - else - resize_bicubic_graph(images_, size_; name=name, align_corners=align_corners) - end - end end @@ -10525,41 +19052,65 @@ end A placeholder op for multiple values that will be fed into the computation """ begin - function infeed_dequeue_tuple_graph(; name=nothing, dtypes=nothing, shapes=nothing) - local desc - tf.with_op_name(name, "InfeedDequeueTuple") do - desc = tf.NodeDescription("InfeedDequeueTuple") - if dtypes !== nothing - desc["dtypes"] = map(Base.identity, dtypes) - end - if shapes !== nothing - desc["shapes"] = map(Base.identity, shapes) + begin + function infeed_dequeue_tuple_graph(; name=nothing, dtypes=nothing, shapes=nothing) + local desc + tf.with_op_name(name, "InfeedDequeueTuple") do + desc = tf.NodeDescription("InfeedDequeueTuple") + begin + end + begin + end + begin + begin + if dtypes !== nothing + desc["dtypes"] = map(Base.identity, dtypes) + end + end + begin + if shapes !== nothing + desc["shapes"] = map(Base.identity, shapes) + end + end + end end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function infeed_dequeue_tuple_eager(; name=nothing, dtypes=nothing, shapes=nothing) - desc = tf.EagerOp("InfeedDequeueTuple") - if dtypes !== nothing - desc["dtypes"] = map(Base.identity, dtypes) - end - if shapes !== nothing - desc["shapes"] = map(Base.identity, shapes) - end - res = tf.execute(desc) - node = tf.TapeNode(infeed_dequeue_tuple, [], name=nothing, dtypes=nothing, shapes=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function infeed_dequeue_tuple_eager(; name=nothing, dtypes=nothing, shapes=nothing) + desc = tf.EagerOp("InfeedDequeueTuple") + begin + end + begin + begin + if dtypes !== nothing + desc["dtypes"] = map(Base.identity, dtypes) + end + end + begin + if shapes !== nothing + desc["shapes"] = map(Base.identity, shapes) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(infeed_dequeue_tuple, [], name=nothing, dtypes=nothing, shapes=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function infeed_dequeue_tuple(; name=nothing, dtypes=nothing, shapes=nothing) - if tf.in_eager_mode() - infeed_dequeue_tuple_eager(; name=name, dtypes=dtypes, shapes=shapes) - else - infeed_dequeue_tuple_graph(; name=name, dtypes=dtypes, shapes=shapes) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function infeed_dequeue_tuple(; name=nothing, dtypes=nothing, shapes=nothing) + if tf.in_eager_mode() + infeed_dequeue_tuple_eager(; name=name, dtypes=dtypes, shapes=shapes) + else + infeed_dequeue_tuple_graph(; name=name, dtypes=dtypes, shapes=shapes) + end end - end + end end @@ -10569,59 +19120,95 @@ end """ begin - function multi_device_iterator_graph(; name=nothing, devices=nothing, shared_name=nothing, container=nothing, output_types=nothing, output_shapes=nothing) - local desc - tf.with_op_name(name, "MultiDeviceIterator") do - desc = tf.NodeDescription("MultiDeviceIterator") - if devices !== nothing - desc["devices"] = map(Base.identity, devices) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - if container !== nothing - desc["container"] = Base.String(container) + begin + function multi_device_iterator_graph(; name=nothing, devices=nothing, shared_name=nothing, container=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "MultiDeviceIterator") do + desc = tf.NodeDescription("MultiDeviceIterator") + begin + end + begin + end + begin + begin + if devices !== nothing + desc["devices"] = map(Base.identity, devices) + end + end + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + begin + if container !== nothing + desc["container"] = Base.String(container) + end + end + begin + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + end + begin + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function multi_device_iterator_eager(; name=nothing, devices=nothing, shared_name=nothing, container=nothing, output_types=nothing, output_shapes=nothing) + desc = tf.EagerOp("MultiDeviceIterator") + begin + end + begin + begin + if devices !== nothing + desc["devices"] = map(Base.identity, devices) + end + end + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + begin + if container !== nothing + desc["container"] = Base.String(container) + end + end + begin + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + end + begin + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(multi_device_iterator, [], name=nothing, devices=nothing, shared_name=nothing, container=nothing, output_types=nothing, output_shapes=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function multi_device_iterator(; name=nothing, devices=nothing, shared_name=nothing, container=nothing, output_types=nothing, output_shapes=nothing) + if tf.in_eager_mode() + multi_device_iterator_eager(; name=name, devices=devices, shared_name=shared_name, container=container, output_types=output_types, output_shapes=output_shapes) + else + multi_device_iterator_graph(; name=name, devices=devices, shared_name=shared_name, container=container, output_types=output_types, output_shapes=output_shapes) + end end - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - end - tf.Tensor(tf.Operation(desc)) - end - function multi_device_iterator_eager(; name=nothing, devices=nothing, shared_name=nothing, container=nothing, output_types=nothing, output_shapes=nothing) - desc = tf.EagerOp("MultiDeviceIterator") - if devices !== nothing - desc["devices"] = map(Base.identity, devices) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - if container !== nothing - desc["container"] = Base.String(container) - end - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - res = tf.execute(desc) - node = tf.TapeNode(multi_device_iterator, [], name=nothing, devices=nothing, shared_name=nothing, container=nothing, output_types=nothing, output_shapes=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function multi_device_iterator(; name=nothing, devices=nothing, shared_name=nothing, container=nothing, output_types=nothing, output_shapes=nothing) - if tf.in_eager_mode() - multi_device_iterator_eager(; name=name, devices=devices, shared_name=shared_name, container=container, output_types=output_types, output_shapes=output_shapes) - else - multi_device_iterator_graph(; name=name, devices=devices, shared_name=shared_name, container=container, output_types=output_types, output_shapes=output_shapes) - end - end end @@ -10631,67 +19218,119 @@ end """ begin - function decode_csv_graph(records_, record_defaults_; name=nothing, OUT_TYPE=nothing, field_delim=nothing, use_quote_delim=nothing, na_value=nothing, select_cols=nothing) - local desc - tf.with_op_name(name, "DecodeCSV") do - desc = tf.NodeDescription("DecodeCSV") - records_ = convert(Tensor{String}, records_) - record_defaults_ = [convert(Tensor{Any}, x) for x = record_defaults_] - tf.add_input(desc, records_) - tf.add_input(desc, record_defaults_) - if OUT_TYPE !== nothing - desc["OUT_TYPE"] = map(Base.identity, OUT_TYPE) - end - if field_delim !== nothing - desc["field_delim"] = Base.String(field_delim) - end - if use_quote_delim !== nothing - desc["use_quote_delim"] = Base.Bool(use_quote_delim) - end - if na_value !== nothing - desc["na_value"] = Base.String(na_value) - end - if select_cols !== nothing - desc["select_cols"] = map(Base.identity, select_cols) + begin + function decode_csv_graph(records_, record_defaults_; name=nothing, OUT_TYPE=nothing, field_delim=nothing, use_quote_delim=nothing, na_value=nothing, select_cols=nothing) + local desc + tf.with_op_name(name, "DecodeCSV") do + desc = tf.NodeDescription("DecodeCSV") + begin + begin + records_ = convert(Tensor{String}, records_) + begin + end + end + begin + record_defaults_ = [convert(Tensor{Any}, x) for x = record_defaults_] + begin + end + end + end + begin + begin + tf.add_input(desc, records_) + end + begin + tf.add_input(desc, record_defaults_) + end + end + begin + begin + if OUT_TYPE !== nothing + desc["OUT_TYPE"] = map(Base.identity, OUT_TYPE) + end + end + begin + if field_delim !== nothing + desc["field_delim"] = Base.String(field_delim) + end + end + begin + if use_quote_delim !== nothing + desc["use_quote_delim"] = Base.Bool(use_quote_delim) + end + end + begin + if na_value !== nothing + desc["na_value"] = Base.String(na_value) + end + end + begin + if select_cols !== nothing + desc["select_cols"] = map(Base.identity, select_cols) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function decode_csv_eager(records_, record_defaults_; name=nothing, OUT_TYPE=nothing, field_delim=nothing, use_quote_delim=nothing, na_value=nothing, select_cols=nothing) + desc = tf.EagerOp("DecodeCSV") + records_ = convert(tf.EagerTensor, records_) + record_defaults_ = convert(tf.EagerTensor, record_defaults_) + begin + begin + tf.add_input(desc, records_) + end + begin + tf.add_input(desc, record_defaults_) + end + end + begin + begin + if OUT_TYPE !== nothing + desc["OUT_TYPE"] = map(Base.identity, OUT_TYPE) + end + end + begin + if field_delim !== nothing + desc["field_delim"] = Base.String(field_delim) + end + end + begin + if use_quote_delim !== nothing + desc["use_quote_delim"] = Base.Bool(use_quote_delim) + end + end + begin + if na_value !== nothing + desc["na_value"] = Base.String(na_value) + end + end + begin + if select_cols !== nothing + desc["select_cols"] = map(Base.identity, select_cols) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(decode_csv, [records_, record_defaults_], name=nothing, OUT_TYPE=nothing, field_delim=nothing, use_quote_delim=nothing, na_value=nothing, select_cols=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function decode_csv(records_, record_defaults_; name=nothing, OUT_TYPE=nothing, field_delim=nothing, use_quote_delim=nothing, na_value=nothing, select_cols=nothing) + if tf.in_eager_mode() + decode_csv_eager(records_, record_defaults_; name=name, OUT_TYPE=OUT_TYPE, field_delim=field_delim, use_quote_delim=use_quote_delim, na_value=na_value, select_cols=select_cols) + else + decode_csv_graph(records_, record_defaults_; name=name, OUT_TYPE=OUT_TYPE, field_delim=field_delim, use_quote_delim=use_quote_delim, na_value=na_value, select_cols=select_cols) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function decode_csv_eager(records_, record_defaults_; name=nothing, OUT_TYPE=nothing, field_delim=nothing, use_quote_delim=nothing, na_value=nothing, select_cols=nothing) - desc = tf.EagerOp("DecodeCSV") - records_ = convert(tf.EagerTensor, records_) - record_defaults_ = convert(tf.EagerTensor, record_defaults_) - tf.add_input(desc, records_) - tf.add_input(desc, record_defaults_) - if OUT_TYPE !== nothing - desc["OUT_TYPE"] = map(Base.identity, OUT_TYPE) - end - if field_delim !== nothing - desc["field_delim"] = Base.String(field_delim) - end - if use_quote_delim !== nothing - desc["use_quote_delim"] = Base.Bool(use_quote_delim) - end - if na_value !== nothing - desc["na_value"] = Base.String(na_value) - end - if select_cols !== nothing - desc["select_cols"] = map(Base.identity, select_cols) - end - res = tf.execute(desc) - node = tf.TapeNode(decode_csv, [records_, record_defaults_], name=nothing, OUT_TYPE=nothing, field_delim=nothing, use_quote_delim=nothing, na_value=nothing, select_cols=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function decode_csv(records_, record_defaults_; name=nothing, OUT_TYPE=nothing, field_delim=nothing, use_quote_delim=nothing, na_value=nothing, select_cols=nothing) - if tf.in_eager_mode() - decode_csv_eager(records_, record_defaults_; name=name, OUT_TYPE=OUT_TYPE, field_delim=field_delim, use_quote_delim=use_quote_delim, na_value=na_value, select_cols=select_cols) - else - decode_csv_graph(records_, record_defaults_; name=name, OUT_TYPE=OUT_TYPE, field_delim=field_delim, use_quote_delim=use_quote_delim, na_value=na_value, select_cols=select_cols) - end - end end @@ -10701,45 +19340,93 @@ end """ begin - function lookup_table_find_graph(table_handle_, keys_, default_value_; name=nothing) - local desc - tf.with_op_name(name, "LookupTableFind") do - desc = tf.NodeDescription("LookupTableFind") - table_handle_ = convert(Tensor{String}, table_handle_) - keys_ = convert(Tensor{Any}, keys_) - default_value_ = convert(Tensor{Any}, default_value_) - (keys_,) = tf.tf_promote(keys_) - (default_value_,) = tf.tf_promote(default_value_) - tf.add_input(desc, table_handle_) - tf.add_input(desc, keys_) - tf.add_input(desc, default_value_) - end - tf.Tensor(tf.Operation(desc)) - end - function lookup_table_find_eager(table_handle_, keys_, default_value_; name=nothing) - desc = tf.EagerOp("LookupTableFind") - table_handle_ = convert(tf.EagerTensor, table_handle_) - keys_ = convert(tf.EagerTensor, keys_) - default_value_ = convert(tf.EagerTensor, default_value_) - tf.add_input(desc, table_handle_) - tf.add_input(desc, keys_) - tf.add_input(desc, default_value_) - desc["Tin"] = tf.data_type(keys_) - desc["Tout"] = tf.data_type(default_value_) - res = tf.execute(desc) - node = tf.TapeNode(lookup_table_find, [table_handle_, keys_, default_value_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function lookup_table_find(table_handle_, keys_, default_value_; name=nothing) - if tf.in_eager_mode() - lookup_table_find_eager(table_handle_, keys_, default_value_; name=name) - else - lookup_table_find_graph(table_handle_, keys_, default_value_; name=name) + begin + function lookup_table_find_graph(table_handle_, keys_, default_value_; name=nothing) + local desc + tf.with_op_name(name, "LookupTableFind") do + desc = tf.NodeDescription("LookupTableFind") + begin + begin + table_handle_ = convert(Tensor{String}, table_handle_) + begin + end + end + begin + keys_ = convert(Tensor{Any}, keys_) + begin + end + end + begin + default_value_ = convert(Tensor{Any}, default_value_) + begin + end + end + begin + (keys_,) = tf.tf_promote(keys_) + end + begin + (default_value_,) = tf.tf_promote(default_value_) + end + end + begin + begin + tf.add_input(desc, table_handle_) + end + begin + tf.add_input(desc, keys_) + end + begin + tf.add_input(desc, default_value_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function lookup_table_find_eager(table_handle_, keys_, default_value_; name=nothing) + desc = tf.EagerOp("LookupTableFind") + table_handle_ = convert(tf.EagerTensor, table_handle_) + keys_ = convert(tf.EagerTensor, keys_) + default_value_ = convert(tf.EagerTensor, default_value_) + begin + begin + tf.add_input(desc, table_handle_) + end + begin + tf.add_input(desc, keys_) + end + begin + tf.add_input(desc, default_value_) + end + end + begin + end + begin + desc["Tin"] = tf.data_type(keys_) + end + begin + desc["Tout"] = tf.data_type(default_value_) + end + res = tf.execute(desc) + node = tf.TapeNode(lookup_table_find, [table_handle_, keys_, default_value_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function lookup_table_find(table_handle_, keys_, default_value_; name=nothing) + if tf.in_eager_mode() + lookup_table_find_eager(table_handle_, keys_, default_value_; name=name) + else + lookup_table_find_graph(table_handle_, keys_, default_value_; name=name) + end end - end + end end @@ -10749,61 +19436,125 @@ end """ begin - function shuffle_and_repeat_dataset_graph(input_dataset_, buffer_size_, seed_, seed2_, count_; name=nothing, output_types=nothing, output_shapes=nothing) - local desc - tf.with_op_name(name, "ShuffleAndRepeatDataset") do - desc = tf.NodeDescription("ShuffleAndRepeatDataset") - input_dataset_ = convert(Tensor{Any}, input_dataset_) - buffer_size_ = convert(Tensor{Int64}, buffer_size_) - seed_ = convert(Tensor{Int64}, seed_) - seed2_ = convert(Tensor{Int64}, seed2_) - count_ = convert(Tensor{Int64}, count_) - tf.add_input(desc, input_dataset_) - tf.add_input(desc, buffer_size_) - tf.add_input(desc, seed_) - tf.add_input(desc, seed2_) - tf.add_input(desc, count_) - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) + begin + function shuffle_and_repeat_dataset_graph(input_dataset_, buffer_size_, seed_, seed2_, count_; name=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "ShuffleAndRepeatDataset") do + desc = tf.NodeDescription("ShuffleAndRepeatDataset") + begin + begin + input_dataset_ = convert(Tensor{Any}, input_dataset_) + begin + end + end + begin + buffer_size_ = convert(Tensor{Int64}, buffer_size_) + begin + end + end + begin + seed_ = convert(Tensor{Int64}, seed_) + begin + end + end + begin + seed2_ = convert(Tensor{Int64}, seed2_) + begin + end + end + begin + count_ = convert(Tensor{Int64}, count_) + begin + end + end + end + begin + begin + tf.add_input(desc, input_dataset_) + end + begin + tf.add_input(desc, buffer_size_) + end + begin + tf.add_input(desc, seed_) + end + begin + tf.add_input(desc, seed2_) + end + begin + tf.add_input(desc, count_) + end + end + begin + begin + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + end + begin + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function shuffle_and_repeat_dataset_eager(input_dataset_, buffer_size_, seed_, seed2_, count_; name=nothing, output_types=nothing, output_shapes=nothing) + desc = tf.EagerOp("ShuffleAndRepeatDataset") + input_dataset_ = convert(tf.EagerTensor, input_dataset_) + buffer_size_ = convert(tf.EagerTensor, buffer_size_) + seed_ = convert(tf.EagerTensor, seed_) + seed2_ = convert(tf.EagerTensor, seed2_) + count_ = convert(tf.EagerTensor, count_) + begin + begin + tf.add_input(desc, input_dataset_) + end + begin + tf.add_input(desc, buffer_size_) + end + begin + tf.add_input(desc, seed_) + end + begin + tf.add_input(desc, seed2_) + end + begin + tf.add_input(desc, count_) + end + end + begin + begin + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + end + begin + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(shuffle_and_repeat_dataset, [input_dataset_, buffer_size_, seed_, seed2_, count_], name=nothing, output_types=nothing, output_shapes=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function shuffle_and_repeat_dataset(input_dataset_, buffer_size_, seed_, seed2_, count_; name=nothing, output_types=nothing, output_shapes=nothing) + if tf.in_eager_mode() + shuffle_and_repeat_dataset_eager(input_dataset_, buffer_size_, seed_, seed2_, count_; name=name, output_types=output_types, output_shapes=output_shapes) + else + shuffle_and_repeat_dataset_graph(input_dataset_, buffer_size_, seed_, seed2_, count_; name=name, output_types=output_types, output_shapes=output_shapes) + end end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - end - tf.Tensor(tf.Operation(desc)) - end - function shuffle_and_repeat_dataset_eager(input_dataset_, buffer_size_, seed_, seed2_, count_; name=nothing, output_types=nothing, output_shapes=nothing) - desc = tf.EagerOp("ShuffleAndRepeatDataset") - input_dataset_ = convert(tf.EagerTensor, input_dataset_) - buffer_size_ = convert(tf.EagerTensor, buffer_size_) - seed_ = convert(tf.EagerTensor, seed_) - seed2_ = convert(tf.EagerTensor, seed2_) - count_ = convert(tf.EagerTensor, count_) - tf.add_input(desc, input_dataset_) - tf.add_input(desc, buffer_size_) - tf.add_input(desc, seed_) - tf.add_input(desc, seed2_) - tf.add_input(desc, count_) - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - res = tf.execute(desc) - node = tf.TapeNode(shuffle_and_repeat_dataset, [input_dataset_, buffer_size_, seed_, seed2_, count_], name=nothing, output_types=nothing, output_shapes=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function shuffle_and_repeat_dataset(input_dataset_, buffer_size_, seed_, seed2_, count_; name=nothing, output_types=nothing, output_shapes=nothing) - if tf.in_eager_mode() - shuffle_and_repeat_dataset_eager(input_dataset_, buffer_size_, seed_, seed2_, count_; name=name, output_types=output_types, output_shapes=output_shapes) - else - shuffle_and_repeat_dataset_graph(input_dataset_, buffer_size_, seed_, seed2_, count_; name=name, output_types=output_types, output_shapes=output_shapes) - end - end end @@ -10813,45 +19564,77 @@ end """ begin - function experimental_unbatch_dataset_graph(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) - local desc - tf.with_op_name(name, "ExperimentalUnbatchDataset") do - desc = tf.NodeDescription("ExperimentalUnbatchDataset") - input_dataset_ = convert(Tensor{Any}, input_dataset_) - tf.add_input(desc, input_dataset_) - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) + begin + function experimental_unbatch_dataset_graph(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "ExperimentalUnbatchDataset") do + desc = tf.NodeDescription("ExperimentalUnbatchDataset") + begin + begin + input_dataset_ = convert(Tensor{Any}, input_dataset_) + begin + end + end + end + begin + begin + tf.add_input(desc, input_dataset_) + end + end + begin + begin + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + end + begin + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function experimental_unbatch_dataset_eager(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) + desc = tf.EagerOp("ExperimentalUnbatchDataset") + input_dataset_ = convert(tf.EagerTensor, input_dataset_) + begin + begin + tf.add_input(desc, input_dataset_) + end + end + begin + begin + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + end + begin + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(experimental_unbatch_dataset, [input_dataset_], name=nothing, output_types=nothing, output_shapes=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_unbatch_dataset(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) + if tf.in_eager_mode() + experimental_unbatch_dataset_eager(input_dataset_; name=name, output_types=output_types, output_shapes=output_shapes) + else + experimental_unbatch_dataset_graph(input_dataset_; name=name, output_types=output_types, output_shapes=output_shapes) + end end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - end - tf.Tensor(tf.Operation(desc)) - end - function experimental_unbatch_dataset_eager(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) - desc = tf.EagerOp("ExperimentalUnbatchDataset") - input_dataset_ = convert(tf.EagerTensor, input_dataset_) - tf.add_input(desc, input_dataset_) - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - res = tf.execute(desc) - node = tf.TapeNode(experimental_unbatch_dataset, [input_dataset_], name=nothing, output_types=nothing, output_shapes=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_unbatch_dataset(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) - if tf.in_eager_mode() - experimental_unbatch_dataset_eager(input_dataset_; name=name, output_types=output_types, output_shapes=output_shapes) - else - experimental_unbatch_dataset_graph(input_dataset_; name=name, output_types=output_types, output_shapes=output_shapes) - end - end end @@ -10861,63 +19644,115 @@ end """ begin - function avg_pool3d_grad_graph(orig_input_shape_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) - local desc - tf.with_op_name(name, "AvgPool3DGrad") do - desc = tf.NodeDescription("AvgPool3DGrad") - orig_input_shape_ = convert(Tensor{Int32}, orig_input_shape_) - grad_ = convert(Tensor{Any}, grad_) - (grad_,) = tf.tf_promote(grad_) - tf.add_input(desc, orig_input_shape_) - tf.add_input(desc, grad_) - if ksize !== nothing - desc["ksize"] = map(Base.identity, ksize) + begin + function avg_pool3d_grad_graph(orig_input_shape_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + local desc + tf.with_op_name(name, "AvgPool3DGrad") do + desc = tf.NodeDescription("AvgPool3DGrad") + begin + begin + orig_input_shape_ = convert(Tensor{Int32}, orig_input_shape_) + begin + end + end + begin + grad_ = convert(Tensor{Any}, grad_) + begin + end + end + begin + (grad_,) = tf.tf_promote(grad_) + end + end + begin + begin + tf.add_input(desc, orig_input_shape_) + end + begin + tf.add_input(desc, grad_) + end + end + begin + begin + if ksize !== nothing + desc["ksize"] = map(Base.identity, ksize) + end + end + begin + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + end + begin + if padding !== nothing + desc["padding"] = Base.String(padding) + end + end + begin + if data_format !== nothing + desc["data_format"] = Base.String(data_format) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function avg_pool3d_grad_eager(orig_input_shape_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + desc = tf.EagerOp("AvgPool3DGrad") + orig_input_shape_ = convert(tf.EagerTensor, orig_input_shape_) + grad_ = convert(tf.EagerTensor, grad_) + begin + begin + tf.add_input(desc, orig_input_shape_) + end + begin + tf.add_input(desc, grad_) + end + end + begin + begin + if ksize !== nothing + desc["ksize"] = map(Base.identity, ksize) + end + end + begin + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + end + begin + if padding !== nothing + desc["padding"] = Base.String(padding) + end + end + begin + if data_format !== nothing + desc["data_format"] = Base.String(data_format) + end + end + end + begin + desc["T"] = tf.data_type(grad_) + end + res = tf.execute(desc) + node = tf.TapeNode(avg_pool3d_grad, [orig_input_shape_, grad_], name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function avg_pool3d_grad(orig_input_shape_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + if tf.in_eager_mode() + avg_pool3d_grad_eager(orig_input_shape_, grad_; name=name, ksize=ksize, strides=strides, padding=padding, data_format=data_format) + else + avg_pool3d_grad_graph(orig_input_shape_, grad_; name=name, ksize=ksize, strides=strides, padding=padding, data_format=data_format) + end end - if strides !== nothing - desc["strides"] = map(Base.identity, strides) - end - if padding !== nothing - desc["padding"] = Base.String(padding) - end - if data_format !== nothing - desc["data_format"] = Base.String(data_format) - end - end - tf.Tensor(tf.Operation(desc)) - end - function avg_pool3d_grad_eager(orig_input_shape_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) - desc = tf.EagerOp("AvgPool3DGrad") - orig_input_shape_ = convert(tf.EagerTensor, orig_input_shape_) - grad_ = convert(tf.EagerTensor, grad_) - tf.add_input(desc, orig_input_shape_) - tf.add_input(desc, grad_) - if ksize !== nothing - desc["ksize"] = map(Base.identity, ksize) - end - if strides !== nothing - desc["strides"] = map(Base.identity, strides) - end - if padding !== nothing - desc["padding"] = Base.String(padding) - end - if data_format !== nothing - desc["data_format"] = Base.String(data_format) - end - desc["T"] = tf.data_type(grad_) - res = tf.execute(desc) - node = tf.TapeNode(avg_pool3d_grad, [orig_input_shape_, grad_], name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function avg_pool3d_grad(orig_input_shape_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) - if tf.in_eager_mode() - avg_pool3d_grad_eager(orig_input_shape_, grad_; name=name, ksize=ksize, strides=strides, padding=padding, data_format=data_format) - else - avg_pool3d_grad_graph(orig_input_shape_, grad_; name=name, ksize=ksize, strides=strides, padding=padding, data_format=data_format) - end - end end @@ -10927,47 +19762,83 @@ end """ begin - function placeholder_with_default_graph(input_; name=nothing, dtype=nothing, shape=nothing) - local desc - tf.with_op_name(name, "PlaceholderWithDefault") do - desc = tf.NodeDescription("PlaceholderWithDefault") - input_ = convert(Tensor{Any}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) + begin + function placeholder_with_default_graph(input_; name=nothing, dtype=nothing, shape=nothing) + local desc + tf.with_op_name(name, "PlaceholderWithDefault") do + desc = tf.NodeDescription("PlaceholderWithDefault") + begin + begin + input_ = convert(Tensor{Any}, input_) + begin + end + end + begin + (input_,) = tf.tf_promote(input_) + end + end + begin + begin + tf.add_input(desc, input_) + end + end + begin + begin + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + begin + if shape !== nothing + desc["shape"] = Base.identity(shape) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function placeholder_with_default_eager(input_; name=nothing, dtype=nothing, shape=nothing) + desc = tf.EagerOp("PlaceholderWithDefault") + input_ = convert(tf.EagerTensor, input_) + begin + begin + tf.add_input(desc, input_) + end + end + begin + begin + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + begin + if shape !== nothing + desc["shape"] = Base.identity(shape) + end + end + end + begin + desc["dtype"] = tf.data_type(input_) + end + res = tf.execute(desc) + node = tf.TapeNode(placeholder_with_default, [input_], name=nothing, dtype=nothing, shape=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function placeholder_with_default(input_; name=nothing, dtype=nothing, shape=nothing) + if tf.in_eager_mode() + placeholder_with_default_eager(input_; name=name, dtype=dtype, shape=shape) + else + placeholder_with_default_graph(input_; name=name, dtype=dtype, shape=shape) + end end - if shape !== nothing - desc["shape"] = Base.identity(shape) - end - end - tf.Tensor(tf.Operation(desc)) - end - function placeholder_with_default_eager(input_; name=nothing, dtype=nothing, shape=nothing) - desc = tf.EagerOp("PlaceholderWithDefault") - input_ = convert(tf.EagerTensor, input_) - tf.add_input(desc, input_) - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end - if shape !== nothing - desc["shape"] = Base.identity(shape) - end - desc["dtype"] = tf.data_type(input_) - res = tf.execute(desc) - node = tf.TapeNode(placeholder_with_default, [input_], name=nothing, dtype=nothing, shape=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function placeholder_with_default(input_; name=nothing, dtype=nothing, shape=nothing) - if tf.in_eager_mode() - placeholder_with_default_eager(input_; name=name, dtype=dtype, shape=shape) - else - placeholder_with_default_graph(input_; name=name, dtype=dtype, shape=shape) - end - end end @@ -10977,45 +19848,93 @@ end """ begin - function initialize_table_v2_graph(table_handle_, keys_, values_; name=nothing) - local desc - tf.with_op_name(name, "InitializeTableV2") do - desc = tf.NodeDescription("InitializeTableV2") - table_handle_ = convert(Tensor{Any}, table_handle_) - keys_ = convert(Tensor{Any}, keys_) - values_ = convert(Tensor{Any}, values_) - (values_,) = tf.tf_promote(values_) - (keys_,) = tf.tf_promote(keys_) - tf.add_input(desc, table_handle_) - tf.add_input(desc, keys_) - tf.add_input(desc, values_) - end - tf.Tensor(tf.Operation(desc)) - end - function initialize_table_v2_eager(table_handle_, keys_, values_; name=nothing) - desc = tf.EagerOp("InitializeTableV2") - table_handle_ = convert(tf.EagerTensor, table_handle_) - keys_ = convert(tf.EagerTensor, keys_) - values_ = convert(tf.EagerTensor, values_) - tf.add_input(desc, table_handle_) - tf.add_input(desc, keys_) - tf.add_input(desc, values_) - desc["Tkey"] = tf.data_type(keys_) - desc["Tval"] = tf.data_type(values_) - res = tf.execute(desc) - node = tf.TapeNode(initialize_table_v2, [table_handle_, keys_, values_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function initialize_table_v2(table_handle_, keys_, values_; name=nothing) - if tf.in_eager_mode() - initialize_table_v2_eager(table_handle_, keys_, values_; name=name) - else - initialize_table_v2_graph(table_handle_, keys_, values_; name=name) + begin + function initialize_table_v2_graph(table_handle_, keys_, values_; name=nothing) + local desc + tf.with_op_name(name, "InitializeTableV2") do + desc = tf.NodeDescription("InitializeTableV2") + begin + begin + table_handle_ = convert(Tensor{Any}, table_handle_) + begin + end + end + begin + keys_ = convert(Tensor{Any}, keys_) + begin + end + end + begin + values_ = convert(Tensor{Any}, values_) + begin + end + end + begin + (values_,) = tf.tf_promote(values_) + end + begin + (keys_,) = tf.tf_promote(keys_) + end + end + begin + begin + tf.add_input(desc, table_handle_) + end + begin + tf.add_input(desc, keys_) + end + begin + tf.add_input(desc, values_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function initialize_table_v2_eager(table_handle_, keys_, values_; name=nothing) + desc = tf.EagerOp("InitializeTableV2") + table_handle_ = convert(tf.EagerTensor, table_handle_) + keys_ = convert(tf.EagerTensor, keys_) + values_ = convert(tf.EagerTensor, values_) + begin + begin + tf.add_input(desc, table_handle_) + end + begin + tf.add_input(desc, keys_) + end + begin + tf.add_input(desc, values_) + end + end + begin + end + begin + desc["Tkey"] = tf.data_type(keys_) + end + begin + desc["Tval"] = tf.data_type(values_) + end + res = tf.execute(desc) + node = tf.TapeNode(initialize_table_v2, [table_handle_, keys_, values_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function initialize_table_v2(table_handle_, keys_, values_; name=nothing) + if tf.in_eager_mode() + initialize_table_v2_eager(table_handle_, keys_, values_; name=name) + else + initialize_table_v2_graph(table_handle_, keys_, values_; name=name) + end end - end + end end @@ -11025,49 +19944,97 @@ end """ begin - function set_size_graph(set_indices_, set_values_, set_shape_; name=nothing, validate_indices=nothing) - local desc - tf.with_op_name(name, "SetSize") do - desc = tf.NodeDescription("SetSize") - set_indices_ = convert(Tensor{Int64}, set_indices_) - set_values_ = convert(Tensor{Any}, set_values_) - set_shape_ = convert(Tensor{Int64}, set_shape_) - (set_values_,) = tf.tf_promote(set_values_) - tf.add_input(desc, set_indices_) - tf.add_input(desc, set_values_) - tf.add_input(desc, set_shape_) - if validate_indices !== nothing - desc["validate_indices"] = Base.Bool(validate_indices) + begin + function set_size_graph(set_indices_, set_values_, set_shape_; name=nothing, validate_indices=nothing) + local desc + tf.with_op_name(name, "SetSize") do + desc = tf.NodeDescription("SetSize") + begin + begin + set_indices_ = convert(Tensor{Int64}, set_indices_) + begin + end + end + begin + set_values_ = convert(Tensor{Any}, set_values_) + begin + end + end + begin + set_shape_ = convert(Tensor{Int64}, set_shape_) + begin + end + end + begin + (set_values_,) = tf.tf_promote(set_values_) + end + end + begin + begin + tf.add_input(desc, set_indices_) + end + begin + tf.add_input(desc, set_values_) + end + begin + tf.add_input(desc, set_shape_) + end + end + begin + begin + if validate_indices !== nothing + desc["validate_indices"] = Base.Bool(validate_indices) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function set_size_eager(set_indices_, set_values_, set_shape_; name=nothing, validate_indices=nothing) + desc = tf.EagerOp("SetSize") + set_indices_ = convert(tf.EagerTensor, set_indices_) + set_values_ = convert(tf.EagerTensor, set_values_) + set_shape_ = convert(tf.EagerTensor, set_shape_) + begin + begin + tf.add_input(desc, set_indices_) + end + begin + tf.add_input(desc, set_values_) + end + begin + tf.add_input(desc, set_shape_) + end + end + begin + begin + if validate_indices !== nothing + desc["validate_indices"] = Base.Bool(validate_indices) + end + end + end + begin + desc["T"] = tf.data_type(set_values_) + end + res = tf.execute(desc) + node = tf.TapeNode(set_size, [set_indices_, set_values_, set_shape_], name=nothing, validate_indices=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function set_size(set_indices_, set_values_, set_shape_; name=nothing, validate_indices=nothing) + if tf.in_eager_mode() + set_size_eager(set_indices_, set_values_, set_shape_; name=name, validate_indices=validate_indices) + else + set_size_graph(set_indices_, set_values_, set_shape_; name=name, validate_indices=validate_indices) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function set_size_eager(set_indices_, set_values_, set_shape_; name=nothing, validate_indices=nothing) - desc = tf.EagerOp("SetSize") - set_indices_ = convert(tf.EagerTensor, set_indices_) - set_values_ = convert(tf.EagerTensor, set_values_) - set_shape_ = convert(tf.EagerTensor, set_shape_) - tf.add_input(desc, set_indices_) - tf.add_input(desc, set_values_) - tf.add_input(desc, set_shape_) - if validate_indices !== nothing - desc["validate_indices"] = Base.Bool(validate_indices) - end - desc["T"] = tf.data_type(set_values_) - res = tf.execute(desc) - node = tf.TapeNode(set_size, [set_indices_, set_values_, set_shape_], name=nothing, validate_indices=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function set_size(set_indices_, set_values_, set_shape_; name=nothing, validate_indices=nothing) - if tf.in_eager_mode() - set_size_eager(set_indices_, set_values_, set_shape_; name=name, validate_indices=validate_indices) - else - set_size_graph(set_indices_, set_values_, set_shape_; name=name, validate_indices=validate_indices) - end - end end @@ -11077,49 +20044,89 @@ end """ begin - function assert_graph(condition_, data_; name=nothing, T=nothing, summarize=nothing) - local desc - tf.with_op_name(name, "Assert") do - desc = tf.NodeDescription("Assert") - condition_ = convert(Tensor{Bool}, condition_) - data_ = [convert(Tensor{Any}, x) for x = data_] - tf.add_input(desc, condition_) - tf.add_input(desc, data_) - if T !== nothing - desc["T"] = map(Base.identity, T) + begin + function assert_graph(condition_, data_; name=nothing, T=nothing, summarize=nothing) + local desc + tf.with_op_name(name, "Assert") do + desc = tf.NodeDescription("Assert") + begin + begin + condition_ = convert(Tensor{Bool}, condition_) + begin + end + end + begin + data_ = [convert(Tensor{Any}, x) for x = data_] + begin + end + end + end + begin + begin + tf.add_input(desc, condition_) + end + begin + tf.add_input(desc, data_) + end + end + begin + begin + if T !== nothing + desc["T"] = map(Base.identity, T) + end + end + begin + if summarize !== nothing + desc["summarize"] = Base.Int(summarize) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function assert_eager(condition_, data_; name=nothing, T=nothing, summarize=nothing) + desc = tf.EagerOp("Assert") + condition_ = convert(tf.EagerTensor, condition_) + data_ = convert(tf.EagerTensor, data_) + begin + begin + tf.add_input(desc, condition_) + end + begin + tf.add_input(desc, data_) + end + end + begin + begin + if T !== nothing + desc["T"] = map(Base.identity, T) + end + end + begin + if summarize !== nothing + desc["summarize"] = Base.Int(summarize) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(assert, [condition_, data_], name=nothing, T=nothing, summarize=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function assert(condition_, data_; name=nothing, T=nothing, summarize=nothing) + if tf.in_eager_mode() + assert_eager(condition_, data_; name=name, T=T, summarize=summarize) + else + assert_graph(condition_, data_; name=name, T=T, summarize=summarize) + end end - if summarize !== nothing - desc["summarize"] = Base.Int(summarize) - end - end - tf.Tensor(tf.Operation(desc)) - end - function assert_eager(condition_, data_; name=nothing, T=nothing, summarize=nothing) - desc = tf.EagerOp("Assert") - condition_ = convert(tf.EagerTensor, condition_) - data_ = convert(tf.EagerTensor, data_) - tf.add_input(desc, condition_) - tf.add_input(desc, data_) - if T !== nothing - desc["T"] = map(Base.identity, T) - end - if summarize !== nothing - desc["summarize"] = Base.Int(summarize) - end - res = tf.execute(desc) - node = tf.TapeNode(assert, [condition_, data_], name=nothing, T=nothing, summarize=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function assert(condition_, data_; name=nothing, T=nothing, summarize=nothing) - if tf.in_eager_mode() - assert_eager(condition_, data_; name=name, T=T, summarize=summarize) - else - assert_graph(condition_, data_; name=name, T=T, summarize=summarize) - end - end end @@ -11129,63 +20136,131 @@ end """ begin - function non_max_suppression_v4_graph(boxes_, scores_, max_output_size_, iou_threshold_, score_threshold_; name=nothing, pad_to_max_output_size=nothing) - local desc - tf.with_op_name(name, "NonMaxSuppressionV4") do - desc = tf.NodeDescription("NonMaxSuppressionV4") - boxes_ = convert(Tensor{Float32}, boxes_) - scores_ = convert(Tensor{Float32}, scores_) - max_output_size_ = convert(Tensor{Int32}, max_output_size_) - iou_threshold_ = convert(Tensor{Float32}, iou_threshold_) - score_threshold_ = convert(Tensor{Float32}, score_threshold_) - (boxes_, scores_) = tf.tf_promote(boxes_, scores_) - tf.add_input(desc, boxes_) - tf.add_input(desc, scores_) - tf.add_input(desc, max_output_size_) - tf.add_input(desc, iou_threshold_) - tf.add_input(desc, score_threshold_) - if pad_to_max_output_size !== nothing - desc["pad_to_max_output_size"] = Base.Bool(pad_to_max_output_size) + begin + function non_max_suppression_v4_graph(boxes_, scores_, max_output_size_, iou_threshold_, score_threshold_; name=nothing, pad_to_max_output_size=nothing) + local desc + tf.with_op_name(name, "NonMaxSuppressionV4") do + desc = tf.NodeDescription("NonMaxSuppressionV4") + begin + begin + boxes_ = convert(Tensor{Float32}, boxes_) + begin + end + end + begin + scores_ = convert(Tensor{Float32}, scores_) + begin + end + end + begin + max_output_size_ = convert(Tensor{Int32}, max_output_size_) + begin + end + end + begin + iou_threshold_ = convert(Tensor{Float32}, iou_threshold_) + begin + end + end + begin + score_threshold_ = convert(Tensor{Float32}, score_threshold_) + begin + end + end + begin + (boxes_, scores_) = tf.tf_promote(boxes_, scores_) + end + end + begin + begin + tf.add_input(desc, boxes_) + end + begin + tf.add_input(desc, scores_) + end + begin + tf.add_input(desc, max_output_size_) + end + begin + tf.add_input(desc, iou_threshold_) + end + begin + tf.add_input(desc, score_threshold_) + end + end + begin + begin + if pad_to_max_output_size !== nothing + desc["pad_to_max_output_size"] = Base.Bool(pad_to_max_output_size) + end + end + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + end + end + begin + function non_max_suppression_v4_eager(boxes_, scores_, max_output_size_, iou_threshold_, score_threshold_; name=nothing, pad_to_max_output_size=nothing) + desc = tf.EagerOp("NonMaxSuppressionV4") + boxes_ = convert(tf.EagerTensor, boxes_) + scores_ = convert(tf.EagerTensor, scores_) + max_output_size_ = convert(tf.EagerTensor, max_output_size_) + iou_threshold_ = convert(tf.EagerTensor, iou_threshold_) + score_threshold_ = convert(tf.EagerTensor, score_threshold_) + begin + begin + tf.add_input(desc, boxes_) + end + begin + tf.add_input(desc, scores_) + end + begin + tf.add_input(desc, max_output_size_) + end + begin + tf.add_input(desc, iou_threshold_) + end + begin + tf.add_input(desc, score_threshold_) + end + end + begin + begin + if pad_to_max_output_size !== nothing + desc["pad_to_max_output_size"] = Base.Bool(pad_to_max_output_size) + end + end + end + begin + desc["T"] = tf.data_type(boxes_) + end + begin + desc["T"] = tf.data_type(scores_) + end + res = tf.execute(desc) + node = tf.TapeNode(non_max_suppression_v4, [boxes_, scores_, max_output_size_, iou_threshold_, score_threshold_], name=nothing, pad_to_max_output_size=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function non_max_suppression_v4(boxes_, scores_, max_output_size_, iou_threshold_, score_threshold_; name=nothing, pad_to_max_output_size=nothing) + if tf.in_eager_mode() + non_max_suppression_v4_eager(boxes_, scores_, max_output_size_, iou_threshold_, score_threshold_; name=name, pad_to_max_output_size=pad_to_max_output_size) + else + non_max_suppression_v4_graph(boxes_, scores_, max_output_size_, iou_threshold_, score_threshold_; name=name, pad_to_max_output_size=pad_to_max_output_size) + end end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:2 - push!(out, tf.Tensor(op, out_idx)) - end - out - end - function non_max_suppression_v4_eager(boxes_, scores_, max_output_size_, iou_threshold_, score_threshold_; name=nothing, pad_to_max_output_size=nothing) - desc = tf.EagerOp("NonMaxSuppressionV4") - boxes_ = convert(tf.EagerTensor, boxes_) - scores_ = convert(tf.EagerTensor, scores_) - max_output_size_ = convert(tf.EagerTensor, max_output_size_) - iou_threshold_ = convert(tf.EagerTensor, iou_threshold_) - score_threshold_ = convert(tf.EagerTensor, score_threshold_) - tf.add_input(desc, boxes_) - tf.add_input(desc, scores_) - tf.add_input(desc, max_output_size_) - tf.add_input(desc, iou_threshold_) - tf.add_input(desc, score_threshold_) - if pad_to_max_output_size !== nothing - desc["pad_to_max_output_size"] = Base.Bool(pad_to_max_output_size) - end - desc["T"] = tf.data_type(boxes_) - desc["T"] = tf.data_type(scores_) - res = tf.execute(desc) - node = tf.TapeNode(non_max_suppression_v4, [boxes_, scores_, max_output_size_, iou_threshold_, score_threshold_], name=nothing, pad_to_max_output_size=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function non_max_suppression_v4(boxes_, scores_, max_output_size_, iou_threshold_, score_threshold_; name=nothing, pad_to_max_output_size=nothing) - if tf.in_eager_mode() - non_max_suppression_v4_eager(boxes_, scores_, max_output_size_, iou_threshold_, score_threshold_; name=name, pad_to_max_output_size=pad_to_max_output_size) - else - non_max_suppression_v4_graph(boxes_, scores_, max_output_size_, iou_threshold_, score_threshold_; name=name, pad_to_max_output_size=pad_to_max_output_size) - end - end end @@ -11195,84 +20270,154 @@ end """ begin - function sample_distorted_bounding_box_v2_graph(image_size_, bounding_boxes_, min_object_covered_; name=nothing, seed=nothing, seed2=nothing, aspect_ratio_range=nothing, area_range=nothing, max_attempts=nothing, use_image_if_no_bounding_boxes=nothing) - local desc - tf.with_op_name(name, "SampleDistortedBoundingBoxV2") do - desc = tf.NodeDescription("SampleDistortedBoundingBoxV2") - image_size_ = convert(Tensor{Any}, image_size_) - bounding_boxes_ = convert(Tensor{Float32}, bounding_boxes_) - min_object_covered_ = convert(Tensor{Float32}, min_object_covered_) - (image_size_,) = tf.tf_promote(image_size_) - tf.add_input(desc, image_size_) - tf.add_input(desc, bounding_boxes_) - tf.add_input(desc, min_object_covered_) - if seed !== nothing - desc["seed"] = Base.Int(seed) - end - if seed2 !== nothing - desc["seed2"] = Base.Int(seed2) + begin + function sample_distorted_bounding_box_v2_graph(image_size_, bounding_boxes_, min_object_covered_; name=nothing, seed=nothing, seed2=nothing, aspect_ratio_range=nothing, area_range=nothing, max_attempts=nothing, use_image_if_no_bounding_boxes=nothing) + local desc + tf.with_op_name(name, "SampleDistortedBoundingBoxV2") do + desc = tf.NodeDescription("SampleDistortedBoundingBoxV2") + begin + begin + image_size_ = convert(Tensor{Any}, image_size_) + begin + end + end + begin + bounding_boxes_ = convert(Tensor{Float32}, bounding_boxes_) + begin + end + end + begin + min_object_covered_ = convert(Tensor{Float32}, min_object_covered_) + begin + end + end + begin + (image_size_,) = tf.tf_promote(image_size_) + end + end + begin + begin + tf.add_input(desc, image_size_) + end + begin + tf.add_input(desc, bounding_boxes_) + end + begin + tf.add_input(desc, min_object_covered_) + end + end + begin + begin + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + end + begin + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end + end + begin + if aspect_ratio_range !== nothing + desc["aspect_ratio_range"] = map(Base.identity, aspect_ratio_range) + end + end + begin + if area_range !== nothing + desc["area_range"] = map(Base.identity, area_range) + end + end + begin + if max_attempts !== nothing + desc["max_attempts"] = Base.Int(max_attempts) + end + end + begin + if use_image_if_no_bounding_boxes !== nothing + desc["use_image_if_no_bounding_boxes"] = Base.Bool(use_image_if_no_bounding_boxes) + end + end + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + end + end + begin + function sample_distorted_bounding_box_v2_eager(image_size_, bounding_boxes_, min_object_covered_; name=nothing, seed=nothing, seed2=nothing, aspect_ratio_range=nothing, area_range=nothing, max_attempts=nothing, use_image_if_no_bounding_boxes=nothing) + desc = tf.EagerOp("SampleDistortedBoundingBoxV2") + image_size_ = convert(tf.EagerTensor, image_size_) + bounding_boxes_ = convert(tf.EagerTensor, bounding_boxes_) + min_object_covered_ = convert(tf.EagerTensor, min_object_covered_) + begin + begin + tf.add_input(desc, image_size_) + end + begin + tf.add_input(desc, bounding_boxes_) + end + begin + tf.add_input(desc, min_object_covered_) + end + end + begin + begin + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + end + begin + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end + end + begin + if aspect_ratio_range !== nothing + desc["aspect_ratio_range"] = map(Base.identity, aspect_ratio_range) + end + end + begin + if area_range !== nothing + desc["area_range"] = map(Base.identity, area_range) + end + end + begin + if max_attempts !== nothing + desc["max_attempts"] = Base.Int(max_attempts) + end + end + begin + if use_image_if_no_bounding_boxes !== nothing + desc["use_image_if_no_bounding_boxes"] = Base.Bool(use_image_if_no_bounding_boxes) + end + end + end + begin + desc["T"] = tf.data_type(image_size_) + end + res = tf.execute(desc) + node = tf.TapeNode(sample_distorted_bounding_box_v2, [image_size_, bounding_boxes_, min_object_covered_], name=nothing, seed=nothing, seed2=nothing, aspect_ratio_range=nothing, area_range=nothing, max_attempts=nothing, use_image_if_no_bounding_boxes=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sample_distorted_bounding_box_v2(image_size_, bounding_boxes_, min_object_covered_; name=nothing, seed=nothing, seed2=nothing, aspect_ratio_range=nothing, area_range=nothing, max_attempts=nothing, use_image_if_no_bounding_boxes=nothing) + if tf.in_eager_mode() + sample_distorted_bounding_box_v2_eager(image_size_, bounding_boxes_, min_object_covered_; name=name, seed=seed, seed2=seed2, aspect_ratio_range=aspect_ratio_range, area_range=area_range, max_attempts=max_attempts, use_image_if_no_bounding_boxes=use_image_if_no_bounding_boxes) + else + sample_distorted_bounding_box_v2_graph(image_size_, bounding_boxes_, min_object_covered_; name=name, seed=seed, seed2=seed2, aspect_ratio_range=aspect_ratio_range, area_range=area_range, max_attempts=max_attempts, use_image_if_no_bounding_boxes=use_image_if_no_bounding_boxes) + end end - if aspect_ratio_range !== nothing - desc["aspect_ratio_range"] = map(Base.identity, aspect_ratio_range) - end - if area_range !== nothing - desc["area_range"] = map(Base.identity, area_range) - end - if max_attempts !== nothing - desc["max_attempts"] = Base.Int(max_attempts) - end - if use_image_if_no_bounding_boxes !== nothing - desc["use_image_if_no_bounding_boxes"] = Base.Bool(use_image_if_no_bounding_boxes) - end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:3 - push!(out, tf.Tensor(op, out_idx)) - end - out - end - function sample_distorted_bounding_box_v2_eager(image_size_, bounding_boxes_, min_object_covered_; name=nothing, seed=nothing, seed2=nothing, aspect_ratio_range=nothing, area_range=nothing, max_attempts=nothing, use_image_if_no_bounding_boxes=nothing) - desc = tf.EagerOp("SampleDistortedBoundingBoxV2") - image_size_ = convert(tf.EagerTensor, image_size_) - bounding_boxes_ = convert(tf.EagerTensor, bounding_boxes_) - min_object_covered_ = convert(tf.EagerTensor, min_object_covered_) - tf.add_input(desc, image_size_) - tf.add_input(desc, bounding_boxes_) - tf.add_input(desc, min_object_covered_) - if seed !== nothing - desc["seed"] = Base.Int(seed) - end - if seed2 !== nothing - desc["seed2"] = Base.Int(seed2) - end - if aspect_ratio_range !== nothing - desc["aspect_ratio_range"] = map(Base.identity, aspect_ratio_range) - end - if area_range !== nothing - desc["area_range"] = map(Base.identity, area_range) - end - if max_attempts !== nothing - desc["max_attempts"] = Base.Int(max_attempts) - end - if use_image_if_no_bounding_boxes !== nothing - desc["use_image_if_no_bounding_boxes"] = Base.Bool(use_image_if_no_bounding_boxes) - end - desc["T"] = tf.data_type(image_size_) - res = tf.execute(desc) - node = tf.TapeNode(sample_distorted_bounding_box_v2, [image_size_, bounding_boxes_, min_object_covered_], name=nothing, seed=nothing, seed2=nothing, aspect_ratio_range=nothing, area_range=nothing, max_attempts=nothing, use_image_if_no_bounding_boxes=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sample_distorted_bounding_box_v2(image_size_, bounding_boxes_, min_object_covered_; name=nothing, seed=nothing, seed2=nothing, aspect_ratio_range=nothing, area_range=nothing, max_attempts=nothing, use_image_if_no_bounding_boxes=nothing) - if tf.in_eager_mode() - sample_distorted_bounding_box_v2_eager(image_size_, bounding_boxes_, min_object_covered_; name=name, seed=seed, seed2=seed2, aspect_ratio_range=aspect_ratio_range, area_range=area_range, max_attempts=max_attempts, use_image_if_no_bounding_boxes=use_image_if_no_bounding_boxes) - else - sample_distorted_bounding_box_v2_graph(image_size_, bounding_boxes_, min_object_covered_; name=name, seed=seed, seed2=seed2, aspect_ratio_range=aspect_ratio_range, area_range=area_range, max_attempts=max_attempts, use_image_if_no_bounding_boxes=use_image_if_no_bounding_boxes) - end - end end @@ -11282,61 +20427,109 @@ end """ begin - function initialize_table_from_text_file_graph(table_handle_, filename_; name=nothing, key_index=nothing, value_index=nothing, vocab_size=nothing, delimiter=nothing) - local desc - tf.with_op_name(name, "InitializeTableFromTextFile") do - desc = tf.NodeDescription("InitializeTableFromTextFile") - table_handle_ = convert(Tensor{String}, table_handle_) - filename_ = convert(Tensor{String}, filename_) - tf.add_input(desc, table_handle_) - tf.add_input(desc, filename_) - if key_index !== nothing - desc["key_index"] = Base.Int(key_index) + begin + function initialize_table_from_text_file_graph(table_handle_, filename_; name=nothing, key_index=nothing, value_index=nothing, vocab_size=nothing, delimiter=nothing) + local desc + tf.with_op_name(name, "InitializeTableFromTextFile") do + desc = tf.NodeDescription("InitializeTableFromTextFile") + begin + begin + table_handle_ = convert(Tensor{String}, table_handle_) + begin + end + end + begin + filename_ = convert(Tensor{String}, filename_) + begin + end + end + end + begin + begin + tf.add_input(desc, table_handle_) + end + begin + tf.add_input(desc, filename_) + end + end + begin + begin + if key_index !== nothing + desc["key_index"] = Base.Int(key_index) + end + end + begin + if value_index !== nothing + desc["value_index"] = Base.Int(value_index) + end + end + begin + if vocab_size !== nothing + desc["vocab_size"] = Base.Int(vocab_size) + end + end + begin + if delimiter !== nothing + desc["delimiter"] = Base.String(delimiter) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function initialize_table_from_text_file_eager(table_handle_, filename_; name=nothing, key_index=nothing, value_index=nothing, vocab_size=nothing, delimiter=nothing) + desc = tf.EagerOp("InitializeTableFromTextFile") + table_handle_ = convert(tf.EagerTensor, table_handle_) + filename_ = convert(tf.EagerTensor, filename_) + begin + begin + tf.add_input(desc, table_handle_) + end + begin + tf.add_input(desc, filename_) + end + end + begin + begin + if key_index !== nothing + desc["key_index"] = Base.Int(key_index) + end + end + begin + if value_index !== nothing + desc["value_index"] = Base.Int(value_index) + end + end + begin + if vocab_size !== nothing + desc["vocab_size"] = Base.Int(vocab_size) + end + end + begin + if delimiter !== nothing + desc["delimiter"] = Base.String(delimiter) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(initialize_table_from_text_file, [table_handle_, filename_], name=nothing, key_index=nothing, value_index=nothing, vocab_size=nothing, delimiter=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function initialize_table_from_text_file(table_handle_, filename_; name=nothing, key_index=nothing, value_index=nothing, vocab_size=nothing, delimiter=nothing) + if tf.in_eager_mode() + initialize_table_from_text_file_eager(table_handle_, filename_; name=name, key_index=key_index, value_index=value_index, vocab_size=vocab_size, delimiter=delimiter) + else + initialize_table_from_text_file_graph(table_handle_, filename_; name=name, key_index=key_index, value_index=value_index, vocab_size=vocab_size, delimiter=delimiter) + end end - if value_index !== nothing - desc["value_index"] = Base.Int(value_index) - end - if vocab_size !== nothing - desc["vocab_size"] = Base.Int(vocab_size) - end - if delimiter !== nothing - desc["delimiter"] = Base.String(delimiter) - end - end - tf.Tensor(tf.Operation(desc)) - end - function initialize_table_from_text_file_eager(table_handle_, filename_; name=nothing, key_index=nothing, value_index=nothing, vocab_size=nothing, delimiter=nothing) - desc = tf.EagerOp("InitializeTableFromTextFile") - table_handle_ = convert(tf.EagerTensor, table_handle_) - filename_ = convert(tf.EagerTensor, filename_) - tf.add_input(desc, table_handle_) - tf.add_input(desc, filename_) - if key_index !== nothing - desc["key_index"] = Base.Int(key_index) - end - if value_index !== nothing - desc["value_index"] = Base.Int(value_index) - end - if vocab_size !== nothing - desc["vocab_size"] = Base.Int(vocab_size) - end - if delimiter !== nothing - desc["delimiter"] = Base.String(delimiter) - end - res = tf.execute(desc) - node = tf.TapeNode(initialize_table_from_text_file, [table_handle_, filename_], name=nothing, key_index=nothing, value_index=nothing, vocab_size=nothing, delimiter=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function initialize_table_from_text_file(table_handle_, filename_; name=nothing, key_index=nothing, value_index=nothing, vocab_size=nothing, delimiter=nothing) - if tf.in_eager_mode() - initialize_table_from_text_file_eager(table_handle_, filename_; name=name, key_index=key_index, value_index=value_index, vocab_size=vocab_size, delimiter=delimiter) - else - initialize_table_from_text_file_graph(table_handle_, filename_; name=name, key_index=key_index, value_index=value_index, vocab_size=vocab_size, delimiter=delimiter) - end - end end @@ -11346,33 +20539,57 @@ end """ begin - function lookup_table_size_graph(table_handle_; name=nothing) - local desc - tf.with_op_name(name, "LookupTableSize") do - desc = tf.NodeDescription("LookupTableSize") - table_handle_ = convert(Tensor{String}, table_handle_) - tf.add_input(desc, table_handle_) + begin + function lookup_table_size_graph(table_handle_; name=nothing) + local desc + tf.with_op_name(name, "LookupTableSize") do + desc = tf.NodeDescription("LookupTableSize") + begin + begin + table_handle_ = convert(Tensor{String}, table_handle_) + begin + end + end + end + begin + begin + tf.add_input(desc, table_handle_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function lookup_table_size_eager(table_handle_; name=nothing) - desc = tf.EagerOp("LookupTableSize") - table_handle_ = convert(tf.EagerTensor, table_handle_) - tf.add_input(desc, table_handle_) - res = tf.execute(desc) - node = tf.TapeNode(lookup_table_size, [table_handle_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function lookup_table_size_eager(table_handle_; name=nothing) + desc = tf.EagerOp("LookupTableSize") + table_handle_ = convert(tf.EagerTensor, table_handle_) + begin + begin + tf.add_input(desc, table_handle_) + end + end + begin + end + res = tf.execute(desc) + node = tf.TapeNode(lookup_table_size, [table_handle_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function lookup_table_size(table_handle_; name=nothing) - if tf.in_eager_mode() - lookup_table_size_eager(table_handle_; name=name) - else - lookup_table_size_graph(table_handle_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function lookup_table_size(table_handle_; name=nothing) + if tf.in_eager_mode() + lookup_table_size_eager(table_handle_; name=name) + else + lookup_table_size_graph(table_handle_; name=name) + end end - end + end end @@ -11382,82 +20599,194 @@ end """ begin - function sparse_apply_adagrad_da_graph(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, indices_, lr_, l1_, l2_, global_step_; name=nothing, use_locking=nothing) - local desc - tf.with_op_name(name, "SparseApplyAdagradDA") do - desc = tf.NodeDescription("SparseApplyAdagradDA") - var_ = convert(Tensor{Any}, var_) - gradient_accumulator_ = convert(Tensor{Any}, gradient_accumulator_) - gradient_squared_accumulator_ = convert(Tensor{Any}, gradient_squared_accumulator_) - grad_ = convert(Tensor{Any}, grad_) - indices_ = convert(Tensor{Any}, indices_) - indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) - lr_ = convert(Tensor{Any}, lr_) - l1_ = convert(Tensor{Any}, l1_) - l2_ = convert(Tensor{Any}, l2_) - global_step_ = convert(Tensor{Int64}, global_step_) - (var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, lr_, l1_, l2_) = tf.tf_promote(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, lr_, l1_, l2_) - (indices_,) = tf.tf_promote(indices_) - tf.add_input(desc, var_) - tf.add_input(desc, gradient_accumulator_) - tf.add_input(desc, gradient_squared_accumulator_) - tf.add_input(desc, grad_) - tf.add_input(desc, indices_) - tf.add_input(desc, lr_) - tf.add_input(desc, l1_) - tf.add_input(desc, l2_) - tf.add_input(desc, global_step_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - end - tf.Tensor(tf.Operation(desc)) - end - function sparse_apply_adagrad_da_eager(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, indices_, lr_, l1_, l2_, global_step_; name=nothing, use_locking=nothing) - desc = tf.EagerOp("SparseApplyAdagradDA") - var_ = convert(tf.EagerTensor, var_) - gradient_accumulator_ = convert(tf.EagerTensor, gradient_accumulator_) - gradient_squared_accumulator_ = convert(tf.EagerTensor, gradient_squared_accumulator_) - grad_ = convert(tf.EagerTensor, grad_) - indices_ = convert(tf.EagerTensor, indices_) - lr_ = convert(tf.EagerTensor, lr_) - l1_ = convert(tf.EagerTensor, l1_) - l2_ = convert(tf.EagerTensor, l2_) - global_step_ = convert(tf.EagerTensor, global_step_) - tf.add_input(desc, var_) - tf.add_input(desc, gradient_accumulator_) - tf.add_input(desc, gradient_squared_accumulator_) - tf.add_input(desc, grad_) - tf.add_input(desc, indices_) - tf.add_input(desc, lr_) - tf.add_input(desc, l1_) - tf.add_input(desc, l2_) - tf.add_input(desc, global_step_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - desc["T"] = tf.data_type(var_) - desc["T"] = tf.data_type(gradient_accumulator_) - desc["T"] = tf.data_type(gradient_squared_accumulator_) - desc["T"] = tf.data_type(grad_) - desc["Tindices"] = tf.data_type(indices_) - desc["T"] = tf.data_type(lr_) - desc["T"] = tf.data_type(l1_) - desc["T"] = tf.data_type(l2_) - res = tf.execute(desc) - node = tf.TapeNode(sparse_apply_adagrad_da, [var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, indices_, lr_, l1_, l2_, global_step_], name=nothing, use_locking=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_apply_adagrad_da(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, indices_, lr_, l1_, l2_, global_step_; name=nothing, use_locking=nothing) - if tf.in_eager_mode() - sparse_apply_adagrad_da_eager(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, indices_, lr_, l1_, l2_, global_step_; name=name, use_locking=use_locking) - else - sparse_apply_adagrad_da_graph(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, indices_, lr_, l1_, l2_, global_step_; name=name, use_locking=use_locking) + begin + function sparse_apply_adagrad_da_graph(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, indices_, lr_, l1_, l2_, global_step_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "SparseApplyAdagradDA") do + desc = tf.NodeDescription("SparseApplyAdagradDA") + begin + begin + var_ = convert(Tensor{Any}, var_) + begin + end + end + begin + gradient_accumulator_ = convert(Tensor{Any}, gradient_accumulator_) + begin + end + end + begin + gradient_squared_accumulator_ = convert(Tensor{Any}, gradient_squared_accumulator_) + begin + end + end + begin + grad_ = convert(Tensor{Any}, grad_) + begin + end + end + begin + indices_ = convert(Tensor{Any}, indices_) + begin + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + end + end + begin + lr_ = convert(Tensor{Any}, lr_) + begin + end + end + begin + l1_ = convert(Tensor{Any}, l1_) + begin + end + end + begin + l2_ = convert(Tensor{Any}, l2_) + begin + end + end + begin + global_step_ = convert(Tensor{Int64}, global_step_) + begin + end + end + begin + (var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, lr_, l1_, l2_) = tf.tf_promote(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, lr_, l1_, l2_) + end + begin + (indices_,) = tf.tf_promote(indices_) + end + end + begin + begin + tf.add_input(desc, var_) + end + begin + tf.add_input(desc, gradient_accumulator_) + end + begin + tf.add_input(desc, gradient_squared_accumulator_) + end + begin + tf.add_input(desc, grad_) + end + begin + tf.add_input(desc, indices_) + end + begin + tf.add_input(desc, lr_) + end + begin + tf.add_input(desc, l1_) + end + begin + tf.add_input(desc, l2_) + end + begin + tf.add_input(desc, global_step_) + end + end + begin + begin + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function sparse_apply_adagrad_da_eager(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, indices_, lr_, l1_, l2_, global_step_; name=nothing, use_locking=nothing) + desc = tf.EagerOp("SparseApplyAdagradDA") + var_ = convert(tf.EagerTensor, var_) + gradient_accumulator_ = convert(tf.EagerTensor, gradient_accumulator_) + gradient_squared_accumulator_ = convert(tf.EagerTensor, gradient_squared_accumulator_) + grad_ = convert(tf.EagerTensor, grad_) + indices_ = convert(tf.EagerTensor, indices_) + lr_ = convert(tf.EagerTensor, lr_) + l1_ = convert(tf.EagerTensor, l1_) + l2_ = convert(tf.EagerTensor, l2_) + global_step_ = convert(tf.EagerTensor, global_step_) + begin + begin + tf.add_input(desc, var_) + end + begin + tf.add_input(desc, gradient_accumulator_) + end + begin + tf.add_input(desc, gradient_squared_accumulator_) + end + begin + tf.add_input(desc, grad_) + end + begin + tf.add_input(desc, indices_) + end + begin + tf.add_input(desc, lr_) + end + begin + tf.add_input(desc, l1_) + end + begin + tf.add_input(desc, l2_) + end + begin + tf.add_input(desc, global_step_) + end + end + begin + begin + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + end + begin + desc["T"] = tf.data_type(var_) + end + begin + desc["T"] = tf.data_type(gradient_accumulator_) + end + begin + desc["T"] = tf.data_type(gradient_squared_accumulator_) + end + begin + desc["T"] = tf.data_type(grad_) + end + begin + desc["Tindices"] = tf.data_type(indices_) + end + begin + desc["T"] = tf.data_type(lr_) + end + begin + desc["T"] = tf.data_type(l1_) + end + begin + desc["T"] = tf.data_type(l2_) + end + res = tf.execute(desc) + node = tf.TapeNode(sparse_apply_adagrad_da, [var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, indices_, lr_, l1_, l2_, global_step_], name=nothing, use_locking=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_apply_adagrad_da(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, indices_, lr_, l1_, l2_, global_step_; name=nothing, use_locking=nothing) + if tf.in_eager_mode() + sparse_apply_adagrad_da_eager(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, indices_, lr_, l1_, l2_, global_step_; name=name, use_locking=use_locking) + else + sparse_apply_adagrad_da_graph(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, indices_, lr_, l1_, l2_, global_step_; name=name, use_locking=use_locking) + end end - end + end end @@ -11467,45 +20796,85 @@ end """ begin - function broadcast_gradient_args_graph(s0_, s1_; name=nothing) - local desc - tf.with_op_name(name, "BroadcastGradientArgs") do - desc = tf.NodeDescription("BroadcastGradientArgs") - s0_ = convert(Tensor{Int32}, s0_) - s1_ = convert(Tensor{Int32}, s1_) - (s0_, s1_) = tf.tf_promote(s0_, s1_) - tf.add_input(desc, s0_) - tf.add_input(desc, s1_) - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:2 - push!(out, tf.Tensor(op, out_idx)) + begin + function broadcast_gradient_args_graph(s0_, s1_; name=nothing) + local desc + tf.with_op_name(name, "BroadcastGradientArgs") do + desc = tf.NodeDescription("BroadcastGradientArgs") + begin + begin + s0_ = convert(Tensor{Int32}, s0_) + begin + end + end + begin + s1_ = convert(Tensor{Int32}, s1_) + begin + end + end + begin + (s0_, s1_) = tf.tf_promote(s0_, s1_) + end + end + begin + begin + tf.add_input(desc, s0_) + end + begin + tf.add_input(desc, s1_) + end + end + begin + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end end - out end - function broadcast_gradient_args_eager(s0_, s1_; name=nothing) - desc = tf.EagerOp("BroadcastGradientArgs") - s0_ = convert(tf.EagerTensor, s0_) - s1_ = convert(tf.EagerTensor, s1_) - tf.add_input(desc, s0_) - tf.add_input(desc, s1_) - desc["T"] = tf.data_type(s0_) - desc["T"] = tf.data_type(s1_) - res = tf.execute(desc) - node = tf.TapeNode(broadcast_gradient_args, [s0_, s1_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res + begin + function broadcast_gradient_args_eager(s0_, s1_; name=nothing) + desc = tf.EagerOp("BroadcastGradientArgs") + s0_ = convert(tf.EagerTensor, s0_) + s1_ = convert(tf.EagerTensor, s1_) + begin + begin + tf.add_input(desc, s0_) + end + begin + tf.add_input(desc, s1_) + end + end + begin + end + begin + desc["T"] = tf.data_type(s0_) + end + begin + desc["T"] = tf.data_type(s1_) + end + res = tf.execute(desc) + node = tf.TapeNode(broadcast_gradient_args, [s0_, s1_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function broadcast_gradient_args(s0_, s1_; name=nothing) - if tf.in_eager_mode() - broadcast_gradient_args_eager(s0_, s1_; name=name) - else - broadcast_gradient_args_graph(s0_, s1_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function broadcast_gradient_args(s0_, s1_; name=nothing) + if tf.in_eager_mode() + broadcast_gradient_args_eager(s0_, s1_; name=name) + else + broadcast_gradient_args_graph(s0_, s1_; name=name) + end end - end + end end @@ -11515,41 +20884,65 @@ end """ begin - function summary_writer_graph(; name=nothing, shared_name=nothing, container=nothing) - local desc - tf.with_op_name(name, "SummaryWriter") do - desc = tf.NodeDescription("SummaryWriter") - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - if container !== nothing - desc["container"] = Base.String(container) + begin + function summary_writer_graph(; name=nothing, shared_name=nothing, container=nothing) + local desc + tf.with_op_name(name, "SummaryWriter") do + desc = tf.NodeDescription("SummaryWriter") + begin + end + begin + end + begin + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + begin + if container !== nothing + desc["container"] = Base.String(container) + end + end + end end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function summary_writer_eager(; name=nothing, shared_name=nothing, container=nothing) - desc = tf.EagerOp("SummaryWriter") - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - if container !== nothing - desc["container"] = Base.String(container) - end - res = tf.execute(desc) - node = tf.TapeNode(summary_writer, [], name=nothing, shared_name=nothing, container=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function summary_writer_eager(; name=nothing, shared_name=nothing, container=nothing) + desc = tf.EagerOp("SummaryWriter") + begin + end + begin + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + begin + if container !== nothing + desc["container"] = Base.String(container) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(summary_writer, [], name=nothing, shared_name=nothing, container=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function summary_writer(; name=nothing, shared_name=nothing, container=nothing) - if tf.in_eager_mode() - summary_writer_eager(; name=name, shared_name=shared_name, container=container) - else - summary_writer_graph(; name=name, shared_name=shared_name, container=container) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function summary_writer(; name=nothing, shared_name=nothing, container=nothing) + if tf.in_eager_mode() + summary_writer_eager(; name=name, shared_name=shared_name, container=container) + else + summary_writer_graph(; name=name, shared_name=shared_name, container=container) + end end - end + end end @@ -11559,51 +20952,87 @@ end output = input; While (Cond(output)) { output = Body(output) } """ begin - function _while_graph(input_; name=nothing, T=nothing, cond=nothing, body=nothing) - local desc - tf.with_op_name(name, "_While") do - desc = tf.NodeDescription("_While") - input_ = [convert(Tensor{Any}, x) for x = input_] - tf.add_input(desc, input_) - if T !== nothing - desc["T"] = map(Base.identity, T) - end - if cond !== nothing - desc["cond"] = Base.identity(cond) - end - if body !== nothing - desc["body"] = Base.identity(body) + begin + function _while_graph(input_; name=nothing, T=nothing, cond=nothing, body=nothing) + local desc + tf.with_op_name(name, "_While") do + desc = tf.NodeDescription("_While") + begin + begin + input_ = [convert(Tensor{Any}, x) for x = input_] + begin + end + end + end + begin + begin + tf.add_input(desc, input_) + end + end + begin + begin + if T !== nothing + desc["T"] = map(Base.identity, T) + end + end + begin + if cond !== nothing + desc["cond"] = Base.identity(cond) + end + end + begin + if body !== nothing + desc["body"] = Base.identity(body) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function _while_eager(input_; name=nothing, T=nothing, cond=nothing, body=nothing) + desc = tf.EagerOp("_While") + input_ = convert(tf.EagerTensor, input_) + begin + begin + tf.add_input(desc, input_) + end + end + begin + begin + if T !== nothing + desc["T"] = map(Base.identity, T) + end + end + begin + if cond !== nothing + desc["cond"] = Base.identity(cond) + end + end + begin + if body !== nothing + desc["body"] = Base.identity(body) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(_while, [input_], name=nothing, T=nothing, cond=nothing, body=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _while(input_; name=nothing, T=nothing, cond=nothing, body=nothing) + if tf.in_eager_mode() + _while_eager(input_; name=name, T=T, cond=cond, body=body) + else + _while_graph(input_; name=name, T=T, cond=cond, body=body) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function _while_eager(input_; name=nothing, T=nothing, cond=nothing, body=nothing) - desc = tf.EagerOp("_While") - input_ = convert(tf.EagerTensor, input_) - tf.add_input(desc, input_) - if T !== nothing - desc["T"] = map(Base.identity, T) - end - if cond !== nothing - desc["cond"] = Base.identity(cond) - end - if body !== nothing - desc["body"] = Base.identity(body) - end - res = tf.execute(desc) - node = tf.TapeNode(_while, [input_], name=nothing, T=nothing, cond=nothing, body=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _while(input_; name=nothing, T=nothing, cond=nothing, body=nothing) - if tf.in_eager_mode() - _while_eager(input_; name=name, T=T, cond=cond, body=body) - else - _while_graph(input_; name=name, T=T, cond=cond, body=body) - end - end end @@ -11613,46 +21042,72 @@ end An op that receives embedding activations on the TPU. """ begin - function recv_tpu_embedding_activations_graph(; name=nothing, num_outputs=nothing, config=nothing) - local desc - tf.with_op_name(name, "RecvTPUEmbeddingActivations") do - desc = tf.NodeDescription("RecvTPUEmbeddingActivations") - if num_outputs !== nothing - desc["num_outputs"] = Base.Int(num_outputs) + begin + function recv_tpu_embedding_activations_graph(; name=nothing, num_outputs=nothing, config=nothing) + local desc + tf.with_op_name(name, "RecvTPUEmbeddingActivations") do + desc = tf.NodeDescription("RecvTPUEmbeddingActivations") + begin + end + begin + end + begin + begin + if num_outputs !== nothing + desc["num_outputs"] = Base.Int(num_outputs) + end + end + begin + if config !== nothing + desc["config"] = Base.String(config) + end + end + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:num_outputs + push!(out, tf.Tensor(op, out_idx)) + end + out + end + end + end + begin + function recv_tpu_embedding_activations_eager(; name=nothing, num_outputs=nothing, config=nothing) + desc = tf.EagerOp("RecvTPUEmbeddingActivations") + begin + end + begin + begin + if num_outputs !== nothing + desc["num_outputs"] = Base.Int(num_outputs) + end + end + begin + if config !== nothing + desc["config"] = Base.String(config) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(recv_tpu_embedding_activations, [], name=nothing, num_outputs=nothing, config=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function recv_tpu_embedding_activations(; name=nothing, num_outputs=nothing, config=nothing) + if tf.in_eager_mode() + recv_tpu_embedding_activations_eager(; name=name, num_outputs=num_outputs, config=config) + else + recv_tpu_embedding_activations_graph(; name=name, num_outputs=num_outputs, config=config) + end end - if config !== nothing - desc["config"] = Base.String(config) - end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:num_outputs - push!(out, tf.Tensor(op, out_idx)) - end - out - end - function recv_tpu_embedding_activations_eager(; name=nothing, num_outputs=nothing, config=nothing) - desc = tf.EagerOp("RecvTPUEmbeddingActivations") - if num_outputs !== nothing - desc["num_outputs"] = Base.Int(num_outputs) - end - if config !== nothing - desc["config"] = Base.String(config) - end - res = tf.execute(desc) - node = tf.TapeNode(recv_tpu_embedding_activations, [], name=nothing, num_outputs=nothing, config=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function recv_tpu_embedding_activations(; name=nothing, num_outputs=nothing, config=nothing) - if tf.in_eager_mode() - recv_tpu_embedding_activations_eager(; name=name, num_outputs=num_outputs, config=config) - else - recv_tpu_embedding_activations_graph(; name=name, num_outputs=num_outputs, config=config) - end - end end @@ -11662,45 +21117,93 @@ end """ begin - function initialize_table_graph(table_handle_, keys_, values_; name=nothing) - local desc - tf.with_op_name(name, "InitializeTable") do - desc = tf.NodeDescription("InitializeTable") - table_handle_ = convert(Tensor{String}, table_handle_) - keys_ = convert(Tensor{Any}, keys_) - values_ = convert(Tensor{Any}, values_) - (values_,) = tf.tf_promote(values_) - (keys_,) = tf.tf_promote(keys_) - tf.add_input(desc, table_handle_) - tf.add_input(desc, keys_) - tf.add_input(desc, values_) - end - tf.Tensor(tf.Operation(desc)) - end - function initialize_table_eager(table_handle_, keys_, values_; name=nothing) - desc = tf.EagerOp("InitializeTable") - table_handle_ = convert(tf.EagerTensor, table_handle_) - keys_ = convert(tf.EagerTensor, keys_) - values_ = convert(tf.EagerTensor, values_) - tf.add_input(desc, table_handle_) - tf.add_input(desc, keys_) - tf.add_input(desc, values_) - desc["Tkey"] = tf.data_type(keys_) - desc["Tval"] = tf.data_type(values_) - res = tf.execute(desc) - node = tf.TapeNode(initialize_table, [table_handle_, keys_, values_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function initialize_table(table_handle_, keys_, values_; name=nothing) - if tf.in_eager_mode() - initialize_table_eager(table_handle_, keys_, values_; name=name) - else - initialize_table_graph(table_handle_, keys_, values_; name=name) + begin + function initialize_table_graph(table_handle_, keys_, values_; name=nothing) + local desc + tf.with_op_name(name, "InitializeTable") do + desc = tf.NodeDescription("InitializeTable") + begin + begin + table_handle_ = convert(Tensor{String}, table_handle_) + begin + end + end + begin + keys_ = convert(Tensor{Any}, keys_) + begin + end + end + begin + values_ = convert(Tensor{Any}, values_) + begin + end + end + begin + (values_,) = tf.tf_promote(values_) + end + begin + (keys_,) = tf.tf_promote(keys_) + end + end + begin + begin + tf.add_input(desc, table_handle_) + end + begin + tf.add_input(desc, keys_) + end + begin + tf.add_input(desc, values_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function initialize_table_eager(table_handle_, keys_, values_; name=nothing) + desc = tf.EagerOp("InitializeTable") + table_handle_ = convert(tf.EagerTensor, table_handle_) + keys_ = convert(tf.EagerTensor, keys_) + values_ = convert(tf.EagerTensor, values_) + begin + begin + tf.add_input(desc, table_handle_) + end + begin + tf.add_input(desc, keys_) + end + begin + tf.add_input(desc, values_) + end + end + begin + end + begin + desc["Tkey"] = tf.data_type(keys_) + end + begin + desc["Tval"] = tf.data_type(values_) + end + res = tf.execute(desc) + node = tf.TapeNode(initialize_table, [table_handle_, keys_, values_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function initialize_table(table_handle_, keys_, values_; name=nothing) + if tf.in_eager_mode() + initialize_table_eager(table_handle_, keys_, values_; name=name) + else + initialize_table_graph(table_handle_, keys_, values_; name=name) + end end - end + end end @@ -11710,77 +21213,133 @@ end Debug Numeric Summary Op. """ begin - function debug_numeric_summary_graph(input_; name=nothing, device_name=nothing, tensor_name=nothing, debug_urls=nothing, lower_bound=nothing, upper_bound=nothing, mute_if_healthy=nothing, gated_grpc=nothing) - local desc - tf.with_op_name(name, "DebugNumericSummary") do - desc = tf.NodeDescription("DebugNumericSummary") - input_ = convert(Tensor{Any}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - if device_name !== nothing - desc["device_name"] = Base.String(device_name) - end - if tensor_name !== nothing - desc["tensor_name"] = Base.String(tensor_name) + begin + function debug_numeric_summary_graph(input_; name=nothing, device_name=nothing, tensor_name=nothing, debug_urls=nothing, lower_bound=nothing, upper_bound=nothing, mute_if_healthy=nothing, gated_grpc=nothing) + local desc + tf.with_op_name(name, "DebugNumericSummary") do + desc = tf.NodeDescription("DebugNumericSummary") + begin + begin + input_ = convert(Tensor{Any}, input_) + begin + end + end + begin + (input_,) = tf.tf_promote(input_) + end + end + begin + begin + tf.add_input(desc, input_) + end + end + begin + begin + if device_name !== nothing + desc["device_name"] = Base.String(device_name) + end + end + begin + if tensor_name !== nothing + desc["tensor_name"] = Base.String(tensor_name) + end + end + begin + if debug_urls !== nothing + desc["debug_urls"] = map(Base.identity, debug_urls) + end + end + begin + if lower_bound !== nothing + desc["lower_bound"] = Base.identity(lower_bound) + end + end + begin + if upper_bound !== nothing + desc["upper_bound"] = Base.identity(upper_bound) + end + end + begin + if mute_if_healthy !== nothing + desc["mute_if_healthy"] = Base.Bool(mute_if_healthy) + end + end + begin + if gated_grpc !== nothing + desc["gated_grpc"] = Base.Bool(gated_grpc) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function debug_numeric_summary_eager(input_; name=nothing, device_name=nothing, tensor_name=nothing, debug_urls=nothing, lower_bound=nothing, upper_bound=nothing, mute_if_healthy=nothing, gated_grpc=nothing) + desc = tf.EagerOp("DebugNumericSummary") + input_ = convert(tf.EagerTensor, input_) + begin + begin + tf.add_input(desc, input_) + end + end + begin + begin + if device_name !== nothing + desc["device_name"] = Base.String(device_name) + end + end + begin + if tensor_name !== nothing + desc["tensor_name"] = Base.String(tensor_name) + end + end + begin + if debug_urls !== nothing + desc["debug_urls"] = map(Base.identity, debug_urls) + end + end + begin + if lower_bound !== nothing + desc["lower_bound"] = Base.identity(lower_bound) + end + end + begin + if upper_bound !== nothing + desc["upper_bound"] = Base.identity(upper_bound) + end + end + begin + if mute_if_healthy !== nothing + desc["mute_if_healthy"] = Base.Bool(mute_if_healthy) + end + end + begin + if gated_grpc !== nothing + desc["gated_grpc"] = Base.Bool(gated_grpc) + end + end + end + begin + desc["T"] = tf.data_type(input_) + end + res = tf.execute(desc) + node = tf.TapeNode(debug_numeric_summary, [input_], name=nothing, device_name=nothing, tensor_name=nothing, debug_urls=nothing, lower_bound=nothing, upper_bound=nothing, mute_if_healthy=nothing, gated_grpc=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function debug_numeric_summary(input_; name=nothing, device_name=nothing, tensor_name=nothing, debug_urls=nothing, lower_bound=nothing, upper_bound=nothing, mute_if_healthy=nothing, gated_grpc=nothing) + if tf.in_eager_mode() + debug_numeric_summary_eager(input_; name=name, device_name=device_name, tensor_name=tensor_name, debug_urls=debug_urls, lower_bound=lower_bound, upper_bound=upper_bound, mute_if_healthy=mute_if_healthy, gated_grpc=gated_grpc) + else + debug_numeric_summary_graph(input_; name=name, device_name=device_name, tensor_name=tensor_name, debug_urls=debug_urls, lower_bound=lower_bound, upper_bound=upper_bound, mute_if_healthy=mute_if_healthy, gated_grpc=gated_grpc) + end end - if debug_urls !== nothing - desc["debug_urls"] = map(Base.identity, debug_urls) - end - if lower_bound !== nothing - desc["lower_bound"] = Base.identity(lower_bound) - end - if upper_bound !== nothing - desc["upper_bound"] = Base.identity(upper_bound) - end - if mute_if_healthy !== nothing - desc["mute_if_healthy"] = Base.Bool(mute_if_healthy) - end - if gated_grpc !== nothing - desc["gated_grpc"] = Base.Bool(gated_grpc) - end - end - tf.Tensor(tf.Operation(desc)) - end - function debug_numeric_summary_eager(input_; name=nothing, device_name=nothing, tensor_name=nothing, debug_urls=nothing, lower_bound=nothing, upper_bound=nothing, mute_if_healthy=nothing, gated_grpc=nothing) - desc = tf.EagerOp("DebugNumericSummary") - input_ = convert(tf.EagerTensor, input_) - tf.add_input(desc, input_) - if device_name !== nothing - desc["device_name"] = Base.String(device_name) - end - if tensor_name !== nothing - desc["tensor_name"] = Base.String(tensor_name) - end - if debug_urls !== nothing - desc["debug_urls"] = map(Base.identity, debug_urls) - end - if lower_bound !== nothing - desc["lower_bound"] = Base.identity(lower_bound) - end - if upper_bound !== nothing - desc["upper_bound"] = Base.identity(upper_bound) - end - if mute_if_healthy !== nothing - desc["mute_if_healthy"] = Base.Bool(mute_if_healthy) - end - if gated_grpc !== nothing - desc["gated_grpc"] = Base.Bool(gated_grpc) - end - desc["T"] = tf.data_type(input_) - res = tf.execute(desc) - node = tf.TapeNode(debug_numeric_summary, [input_], name=nothing, device_name=nothing, tensor_name=nothing, debug_urls=nothing, lower_bound=nothing, upper_bound=nothing, mute_if_healthy=nothing, gated_grpc=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function debug_numeric_summary(input_; name=nothing, device_name=nothing, tensor_name=nothing, debug_urls=nothing, lower_bound=nothing, upper_bound=nothing, mute_if_healthy=nothing, gated_grpc=nothing) - if tf.in_eager_mode() - debug_numeric_summary_eager(input_; name=name, device_name=device_name, tensor_name=tensor_name, debug_urls=debug_urls, lower_bound=lower_bound, upper_bound=upper_bound, mute_if_healthy=mute_if_healthy, gated_grpc=gated_grpc) - else - debug_numeric_summary_graph(input_; name=name, device_name=device_name, tensor_name=tensor_name, debug_urls=debug_urls, lower_bound=lower_bound, upper_bound=upper_bound, mute_if_healthy=mute_if_healthy, gated_grpc=gated_grpc) - end - end end @@ -11790,58 +21349,92 @@ end Retrieve embedding parameters for a single table. """ begin - function retrieve_tpu_embedding_adagrad_parameters_grad_accum_debug_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - local desc - tf.with_op_name(name, "RetrieveTPUEmbeddingAdagradParametersGradAccumDebug") do - desc = tf.NodeDescription("RetrieveTPUEmbeddingAdagradParametersGradAccumDebug") - if table_id !== nothing - desc["table_id"] = Base.Int(table_id) - end - if table_name !== nothing - desc["table_name"] = Base.String(table_name) - end - if num_shards !== nothing - desc["num_shards"] = Base.Int(num_shards) - end - if shard_id !== nothing - desc["shard_id"] = Base.Int(shard_id) + begin + function retrieve_tpu_embedding_adagrad_parameters_grad_accum_debug_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + local desc + tf.with_op_name(name, "RetrieveTPUEmbeddingAdagradParametersGradAccumDebug") do + desc = tf.NodeDescription("RetrieveTPUEmbeddingAdagradParametersGradAccumDebug") + begin + end + begin + end + begin + begin + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + end + begin + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + end + begin + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + end + begin + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + end + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + end + end + begin + function retrieve_tpu_embedding_adagrad_parameters_grad_accum_debug_eager(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + desc = tf.EagerOp("RetrieveTPUEmbeddingAdagradParametersGradAccumDebug") + begin + end + begin + begin + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + end + begin + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + end + begin + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + end + begin + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(retrieve_tpu_embedding_adagrad_parameters_grad_accum_debug, [], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function retrieve_tpu_embedding_adagrad_parameters_grad_accum_debug(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + if tf.in_eager_mode() + retrieve_tpu_embedding_adagrad_parameters_grad_accum_debug_eager(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + else + retrieve_tpu_embedding_adagrad_parameters_grad_accum_debug_graph(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + end end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:3 - push!(out, tf.Tensor(op, out_idx)) - end - out - end - function retrieve_tpu_embedding_adagrad_parameters_grad_accum_debug_eager(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - desc = tf.EagerOp("RetrieveTPUEmbeddingAdagradParametersGradAccumDebug") - if table_id !== nothing - desc["table_id"] = Base.Int(table_id) - end - if table_name !== nothing - desc["table_name"] = Base.String(table_name) - end - if num_shards !== nothing - desc["num_shards"] = Base.Int(num_shards) - end - if shard_id !== nothing - desc["shard_id"] = Base.Int(shard_id) - end - res = tf.execute(desc) - node = tf.TapeNode(retrieve_tpu_embedding_adagrad_parameters_grad_accum_debug, [], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function retrieve_tpu_embedding_adagrad_parameters_grad_accum_debug(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - if tf.in_eager_mode() - retrieve_tpu_embedding_adagrad_parameters_grad_accum_debug_eager(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) - else - retrieve_tpu_embedding_adagrad_parameters_grad_accum_debug_graph(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) - end - end end @@ -11851,35 +21444,63 @@ end """ begin - function tanh_graph(x_; name=nothing) - local desc - tf.with_op_name(name, "Tanh") do - desc = tf.NodeDescription("Tanh") - x_ = convert(Tensor{Any}, x_) - (x_,) = tf.tf_promote(x_) - tf.add_input(desc, x_) + begin + function tanh_graph(x_; name=nothing) + local desc + tf.with_op_name(name, "Tanh") do + desc = tf.NodeDescription("Tanh") + begin + begin + x_ = convert(Tensor{Any}, x_) + begin + end + end + begin + (x_,) = tf.tf_promote(x_) + end + end + begin + begin + tf.add_input(desc, x_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function tanh_eager(x_; name=nothing) - desc = tf.EagerOp("Tanh") - x_ = convert(tf.EagerTensor, x_) - tf.add_input(desc, x_) - desc["T"] = tf.data_type(x_) - res = tf.execute(desc) - node = tf.TapeNode(tanh, [x_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function tanh_eager(x_; name=nothing) + desc = tf.EagerOp("Tanh") + x_ = convert(tf.EagerTensor, x_) + begin + begin + tf.add_input(desc, x_) + end + end + begin + end + begin + desc["T"] = tf.data_type(x_) + end + res = tf.execute(desc) + node = tf.TapeNode(tanh, [x_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tanh(x_; name=nothing) - if tf.in_eager_mode() - tanh_eager(x_; name=name) - else - tanh_graph(x_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tanh(x_; name=nothing) + if tf.in_eager_mode() + tanh_eager(x_; name=name) + else + tanh_graph(x_; name=name) + end end - end + end end @@ -11889,51 +21510,87 @@ end """ begin - function symbolic_gradient_graph(input_; name=nothing, Tin=nothing, Tout=nothing, f=nothing) - local desc - tf.with_op_name(name, "SymbolicGradient") do - desc = tf.NodeDescription("SymbolicGradient") - input_ = [convert(Tensor{Any}, x) for x = input_] - tf.add_input(desc, input_) - if Tin !== nothing - desc["Tin"] = map(Base.identity, Tin) - end - if Tout !== nothing - desc["Tout"] = map(Base.identity, Tout) + begin + function symbolic_gradient_graph(input_; name=nothing, Tin=nothing, Tout=nothing, f=nothing) + local desc + tf.with_op_name(name, "SymbolicGradient") do + desc = tf.NodeDescription("SymbolicGradient") + begin + begin + input_ = [convert(Tensor{Any}, x) for x = input_] + begin + end + end + end + begin + begin + tf.add_input(desc, input_) + end + end + begin + begin + if Tin !== nothing + desc["Tin"] = map(Base.identity, Tin) + end + end + begin + if Tout !== nothing + desc["Tout"] = map(Base.identity, Tout) + end + end + begin + if f !== nothing + desc["f"] = Base.identity(f) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function symbolic_gradient_eager(input_; name=nothing, Tin=nothing, Tout=nothing, f=nothing) + desc = tf.EagerOp("SymbolicGradient") + input_ = convert(tf.EagerTensor, input_) + begin + begin + tf.add_input(desc, input_) + end + end + begin + begin + if Tin !== nothing + desc["Tin"] = map(Base.identity, Tin) + end + end + begin + if Tout !== nothing + desc["Tout"] = map(Base.identity, Tout) + end + end + begin + if f !== nothing + desc["f"] = Base.identity(f) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(symbolic_gradient, [input_], name=nothing, Tin=nothing, Tout=nothing, f=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function symbolic_gradient(input_; name=nothing, Tin=nothing, Tout=nothing, f=nothing) + if tf.in_eager_mode() + symbolic_gradient_eager(input_; name=name, Tin=Tin, Tout=Tout, f=f) + else + symbolic_gradient_graph(input_; name=name, Tin=Tin, Tout=Tout, f=f) + end end - if f !== nothing - desc["f"] = Base.identity(f) - end - end - tf.Tensor(tf.Operation(desc)) end - function symbolic_gradient_eager(input_; name=nothing, Tin=nothing, Tout=nothing, f=nothing) - desc = tf.EagerOp("SymbolicGradient") - input_ = convert(tf.EagerTensor, input_) - tf.add_input(desc, input_) - if Tin !== nothing - desc["Tin"] = map(Base.identity, Tin) - end - if Tout !== nothing - desc["Tout"] = map(Base.identity, Tout) - end - if f !== nothing - desc["f"] = Base.identity(f) - end - res = tf.execute(desc) - node = tf.TapeNode(symbolic_gradient, [input_], name=nothing, Tin=nothing, Tout=nothing, f=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function symbolic_gradient(input_; name=nothing, Tin=nothing, Tout=nothing, f=nothing) - if tf.in_eager_mode() - symbolic_gradient_eager(input_; name=name, Tin=Tin, Tout=Tout, f=f) - else - symbolic_gradient_graph(input_; name=name, Tin=Tin, Tout=Tout, f=f) - end - end end @@ -11943,77 +21600,173 @@ end """ begin - function boosted_trees_update_ensemble_graph(tree_ensemble_handle_, feature_ids_, node_ids_, gains_, thresholds_, left_node_contribs_, right_node_contribs_, max_depth_, learning_rate_; name=nothing, pruning_mode=nothing, num_features=nothing) - local desc - tf.with_op_name(name, "BoostedTreesUpdateEnsemble") do - desc = tf.NodeDescription("BoostedTreesUpdateEnsemble") - tree_ensemble_handle_ = convert(Tensor{Any}, tree_ensemble_handle_) - feature_ids_ = convert(Tensor{Int32}, feature_ids_) - node_ids_ = [convert(Tensor{Int32}, x) for x = node_ids_] - gains_ = [convert(Tensor{Float32}, x) for x = gains_] - thresholds_ = [convert(Tensor{Int32}, x) for x = thresholds_] - left_node_contribs_ = [convert(Tensor{Float32}, x) for x = left_node_contribs_] - right_node_contribs_ = [convert(Tensor{Float32}, x) for x = right_node_contribs_] - max_depth_ = convert(Tensor{Int32}, max_depth_) - learning_rate_ = convert(Tensor{Float32}, learning_rate_) - tf.add_input(desc, tree_ensemble_handle_) - tf.add_input(desc, feature_ids_) - tf.add_input(desc, node_ids_) - tf.add_input(desc, gains_) - tf.add_input(desc, thresholds_) - tf.add_input(desc, left_node_contribs_) - tf.add_input(desc, right_node_contribs_) - tf.add_input(desc, max_depth_) - tf.add_input(desc, learning_rate_) - if pruning_mode !== nothing - desc["pruning_mode"] = Base.Int(pruning_mode) - end - if num_features !== nothing - desc["num_features"] = Base.Int(num_features) - end - end - tf.Tensor(tf.Operation(desc)) - end - function boosted_trees_update_ensemble_eager(tree_ensemble_handle_, feature_ids_, node_ids_, gains_, thresholds_, left_node_contribs_, right_node_contribs_, max_depth_, learning_rate_; name=nothing, pruning_mode=nothing, num_features=nothing) - desc = tf.EagerOp("BoostedTreesUpdateEnsemble") - tree_ensemble_handle_ = convert(tf.EagerTensor, tree_ensemble_handle_) - feature_ids_ = convert(tf.EagerTensor, feature_ids_) - node_ids_ = convert(tf.EagerTensor, node_ids_) - gains_ = convert(tf.EagerTensor, gains_) - thresholds_ = convert(tf.EagerTensor, thresholds_) - left_node_contribs_ = convert(tf.EagerTensor, left_node_contribs_) - right_node_contribs_ = convert(tf.EagerTensor, right_node_contribs_) - max_depth_ = convert(tf.EagerTensor, max_depth_) - learning_rate_ = convert(tf.EagerTensor, learning_rate_) - tf.add_input(desc, tree_ensemble_handle_) - tf.add_input(desc, feature_ids_) - tf.add_input(desc, node_ids_) - tf.add_input(desc, gains_) - tf.add_input(desc, thresholds_) - tf.add_input(desc, left_node_contribs_) - tf.add_input(desc, right_node_contribs_) - tf.add_input(desc, max_depth_) - tf.add_input(desc, learning_rate_) - if pruning_mode !== nothing - desc["pruning_mode"] = Base.Int(pruning_mode) - end - if num_features !== nothing - desc["num_features"] = Base.Int(num_features) - end - res = tf.execute(desc) - node = tf.TapeNode(boosted_trees_update_ensemble, [tree_ensemble_handle_, feature_ids_, node_ids_, gains_, thresholds_, left_node_contribs_, right_node_contribs_, max_depth_, learning_rate_], name=nothing, pruning_mode=nothing, num_features=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function boosted_trees_update_ensemble(tree_ensemble_handle_, feature_ids_, node_ids_, gains_, thresholds_, left_node_contribs_, right_node_contribs_, max_depth_, learning_rate_; name=nothing, pruning_mode=nothing, num_features=nothing) - if tf.in_eager_mode() - boosted_trees_update_ensemble_eager(tree_ensemble_handle_, feature_ids_, node_ids_, gains_, thresholds_, left_node_contribs_, right_node_contribs_, max_depth_, learning_rate_; name=name, pruning_mode=pruning_mode, num_features=num_features) - else - boosted_trees_update_ensemble_graph(tree_ensemble_handle_, feature_ids_, node_ids_, gains_, thresholds_, left_node_contribs_, right_node_contribs_, max_depth_, learning_rate_; name=name, pruning_mode=pruning_mode, num_features=num_features) + begin + function boosted_trees_update_ensemble_graph(tree_ensemble_handle_, feature_ids_, node_ids_, gains_, thresholds_, left_node_contribs_, right_node_contribs_, max_depth_, learning_rate_; name=nothing, pruning_mode=nothing, num_features=nothing) + local desc + tf.with_op_name(name, "BoostedTreesUpdateEnsemble") do + desc = tf.NodeDescription("BoostedTreesUpdateEnsemble") + begin + begin + tree_ensemble_handle_ = convert(Tensor{Any}, tree_ensemble_handle_) + begin + end + end + begin + feature_ids_ = convert(Tensor{Int32}, feature_ids_) + begin + end + end + begin + node_ids_ = [convert(Tensor{Int32}, x) for x = node_ids_] + begin + end + end + begin + gains_ = [convert(Tensor{Float32}, x) for x = gains_] + begin + end + end + begin + thresholds_ = [convert(Tensor{Int32}, x) for x = thresholds_] + begin + end + end + begin + left_node_contribs_ = [convert(Tensor{Float32}, x) for x = left_node_contribs_] + begin + end + end + begin + right_node_contribs_ = [convert(Tensor{Float32}, x) for x = right_node_contribs_] + begin + end + end + begin + max_depth_ = convert(Tensor{Int32}, max_depth_) + begin + end + end + begin + learning_rate_ = convert(Tensor{Float32}, learning_rate_) + begin + end + end + end + begin + begin + tf.add_input(desc, tree_ensemble_handle_) + end + begin + tf.add_input(desc, feature_ids_) + end + begin + tf.add_input(desc, node_ids_) + end + begin + tf.add_input(desc, gains_) + end + begin + tf.add_input(desc, thresholds_) + end + begin + tf.add_input(desc, left_node_contribs_) + end + begin + tf.add_input(desc, right_node_contribs_) + end + begin + tf.add_input(desc, max_depth_) + end + begin + tf.add_input(desc, learning_rate_) + end + end + begin + begin + if pruning_mode !== nothing + desc["pruning_mode"] = Base.Int(pruning_mode) + end + end + begin + if num_features !== nothing + desc["num_features"] = Base.Int(num_features) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function boosted_trees_update_ensemble_eager(tree_ensemble_handle_, feature_ids_, node_ids_, gains_, thresholds_, left_node_contribs_, right_node_contribs_, max_depth_, learning_rate_; name=nothing, pruning_mode=nothing, num_features=nothing) + desc = tf.EagerOp("BoostedTreesUpdateEnsemble") + tree_ensemble_handle_ = convert(tf.EagerTensor, tree_ensemble_handle_) + feature_ids_ = convert(tf.EagerTensor, feature_ids_) + node_ids_ = convert(tf.EagerTensor, node_ids_) + gains_ = convert(tf.EagerTensor, gains_) + thresholds_ = convert(tf.EagerTensor, thresholds_) + left_node_contribs_ = convert(tf.EagerTensor, left_node_contribs_) + right_node_contribs_ = convert(tf.EagerTensor, right_node_contribs_) + max_depth_ = convert(tf.EagerTensor, max_depth_) + learning_rate_ = convert(tf.EagerTensor, learning_rate_) + begin + begin + tf.add_input(desc, tree_ensemble_handle_) + end + begin + tf.add_input(desc, feature_ids_) + end + begin + tf.add_input(desc, node_ids_) + end + begin + tf.add_input(desc, gains_) + end + begin + tf.add_input(desc, thresholds_) + end + begin + tf.add_input(desc, left_node_contribs_) + end + begin + tf.add_input(desc, right_node_contribs_) + end + begin + tf.add_input(desc, max_depth_) + end + begin + tf.add_input(desc, learning_rate_) + end + end + begin + begin + if pruning_mode !== nothing + desc["pruning_mode"] = Base.Int(pruning_mode) + end + end + begin + if num_features !== nothing + desc["num_features"] = Base.Int(num_features) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(boosted_trees_update_ensemble, [tree_ensemble_handle_, feature_ids_, node_ids_, gains_, thresholds_, left_node_contribs_, right_node_contribs_, max_depth_, learning_rate_], name=nothing, pruning_mode=nothing, num_features=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function boosted_trees_update_ensemble(tree_ensemble_handle_, feature_ids_, node_ids_, gains_, thresholds_, left_node_contribs_, right_node_contribs_, max_depth_, learning_rate_; name=nothing, pruning_mode=nothing, num_features=nothing) + if tf.in_eager_mode() + boosted_trees_update_ensemble_eager(tree_ensemble_handle_, feature_ids_, node_ids_, gains_, thresholds_, left_node_contribs_, right_node_contribs_, max_depth_, learning_rate_; name=name, pruning_mode=pruning_mode, num_features=num_features) + else + boosted_trees_update_ensemble_graph(tree_ensemble_handle_, feature_ids_, node_ids_, gains_, thresholds_, left_node_contribs_, right_node_contribs_, max_depth_, learning_rate_; name=name, pruning_mode=pruning_mode, num_features=num_features) + end end - end + end end @@ -12023,67 +21776,143 @@ end """ begin - function apply_momentum_graph(var_, accum_, lr_, grad_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) - local desc - tf.with_op_name(name, "ApplyMomentum") do - desc = tf.NodeDescription("ApplyMomentum") - var_ = convert(Tensor{Any}, var_) - accum_ = convert(Tensor{Any}, accum_) - lr_ = convert(Tensor{Any}, lr_) - grad_ = convert(Tensor{Any}, grad_) - momentum_ = convert(Tensor{Any}, momentum_) - (var_, accum_, lr_, grad_, momentum_) = tf.tf_promote(var_, accum_, lr_, grad_, momentum_) - tf.add_input(desc, var_) - tf.add_input(desc, accum_) - tf.add_input(desc, lr_) - tf.add_input(desc, grad_) - tf.add_input(desc, momentum_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - if use_nesterov !== nothing - desc["use_nesterov"] = Base.Bool(use_nesterov) + begin + function apply_momentum_graph(var_, accum_, lr_, grad_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) + local desc + tf.with_op_name(name, "ApplyMomentum") do + desc = tf.NodeDescription("ApplyMomentum") + begin + begin + var_ = convert(Tensor{Any}, var_) + begin + end + end + begin + accum_ = convert(Tensor{Any}, accum_) + begin + end + end + begin + lr_ = convert(Tensor{Any}, lr_) + begin + end + end + begin + grad_ = convert(Tensor{Any}, grad_) + begin + end + end + begin + momentum_ = convert(Tensor{Any}, momentum_) + begin + end + end + begin + (var_, accum_, lr_, grad_, momentum_) = tf.tf_promote(var_, accum_, lr_, grad_, momentum_) + end + end + begin + begin + tf.add_input(desc, var_) + end + begin + tf.add_input(desc, accum_) + end + begin + tf.add_input(desc, lr_) + end + begin + tf.add_input(desc, grad_) + end + begin + tf.add_input(desc, momentum_) + end + end + begin + begin + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + begin + if use_nesterov !== nothing + desc["use_nesterov"] = Base.Bool(use_nesterov) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function apply_momentum_eager(var_, accum_, lr_, grad_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) + desc = tf.EagerOp("ApplyMomentum") + var_ = convert(tf.EagerTensor, var_) + accum_ = convert(tf.EagerTensor, accum_) + lr_ = convert(tf.EagerTensor, lr_) + grad_ = convert(tf.EagerTensor, grad_) + momentum_ = convert(tf.EagerTensor, momentum_) + begin + begin + tf.add_input(desc, var_) + end + begin + tf.add_input(desc, accum_) + end + begin + tf.add_input(desc, lr_) + end + begin + tf.add_input(desc, grad_) + end + begin + tf.add_input(desc, momentum_) + end + end + begin + begin + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + begin + if use_nesterov !== nothing + desc["use_nesterov"] = Base.Bool(use_nesterov) + end + end + end + begin + desc["T"] = tf.data_type(var_) + end + begin + desc["T"] = tf.data_type(accum_) + end + begin + desc["T"] = tf.data_type(lr_) + end + begin + desc["T"] = tf.data_type(grad_) + end + begin + desc["T"] = tf.data_type(momentum_) + end + res = tf.execute(desc) + node = tf.TapeNode(apply_momentum, [var_, accum_, lr_, grad_, momentum_], name=nothing, use_locking=nothing, use_nesterov=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function apply_momentum(var_, accum_, lr_, grad_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) + if tf.in_eager_mode() + apply_momentum_eager(var_, accum_, lr_, grad_, momentum_; name=name, use_locking=use_locking, use_nesterov=use_nesterov) + else + apply_momentum_graph(var_, accum_, lr_, grad_, momentum_; name=name, use_locking=use_locking, use_nesterov=use_nesterov) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function apply_momentum_eager(var_, accum_, lr_, grad_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) - desc = tf.EagerOp("ApplyMomentum") - var_ = convert(tf.EagerTensor, var_) - accum_ = convert(tf.EagerTensor, accum_) - lr_ = convert(tf.EagerTensor, lr_) - grad_ = convert(tf.EagerTensor, grad_) - momentum_ = convert(tf.EagerTensor, momentum_) - tf.add_input(desc, var_) - tf.add_input(desc, accum_) - tf.add_input(desc, lr_) - tf.add_input(desc, grad_) - tf.add_input(desc, momentum_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - if use_nesterov !== nothing - desc["use_nesterov"] = Base.Bool(use_nesterov) - end - desc["T"] = tf.data_type(var_) - desc["T"] = tf.data_type(accum_) - desc["T"] = tf.data_type(lr_) - desc["T"] = tf.data_type(grad_) - desc["T"] = tf.data_type(momentum_) - res = tf.execute(desc) - node = tf.TapeNode(apply_momentum, [var_, accum_, lr_, grad_, momentum_], name=nothing, use_locking=nothing, use_nesterov=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function apply_momentum(var_, accum_, lr_, grad_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) - if tf.in_eager_mode() - apply_momentum_eager(var_, accum_, lr_, grad_, momentum_; name=name, use_locking=use_locking, use_nesterov=use_nesterov) - else - apply_momentum_graph(var_, accum_, lr_, grad_, momentum_; name=name, use_locking=use_locking, use_nesterov=use_nesterov) - end - end end @@ -12093,42 +21922,76 @@ end """ begin - function reader_read_graph(reader_handle_, queue_handle_; name=nothing) - local desc - tf.with_op_name(name, "ReaderRead") do - desc = tf.NodeDescription("ReaderRead") - reader_handle_ = convert(Tensor{String}, reader_handle_) - queue_handle_ = convert(Tensor{String}, queue_handle_) - tf.add_input(desc, reader_handle_) - tf.add_input(desc, queue_handle_) - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:2 - push!(out, tf.Tensor(op, out_idx)) + begin + function reader_read_graph(reader_handle_, queue_handle_; name=nothing) + local desc + tf.with_op_name(name, "ReaderRead") do + desc = tf.NodeDescription("ReaderRead") + begin + begin + reader_handle_ = convert(Tensor{String}, reader_handle_) + begin + end + end + begin + queue_handle_ = convert(Tensor{String}, queue_handle_) + begin + end + end + end + begin + begin + tf.add_input(desc, reader_handle_) + end + begin + tf.add_input(desc, queue_handle_) + end + end + begin + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end end - out end - function reader_read_eager(reader_handle_, queue_handle_; name=nothing) - desc = tf.EagerOp("ReaderRead") - reader_handle_ = convert(tf.EagerTensor, reader_handle_) - queue_handle_ = convert(tf.EagerTensor, queue_handle_) - tf.add_input(desc, reader_handle_) - tf.add_input(desc, queue_handle_) - res = tf.execute(desc) - node = tf.TapeNode(reader_read, [reader_handle_, queue_handle_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res + begin + function reader_read_eager(reader_handle_, queue_handle_; name=nothing) + desc = tf.EagerOp("ReaderRead") + reader_handle_ = convert(tf.EagerTensor, reader_handle_) + queue_handle_ = convert(tf.EagerTensor, queue_handle_) + begin + begin + tf.add_input(desc, reader_handle_) + end + begin + tf.add_input(desc, queue_handle_) + end + end + begin + end + res = tf.execute(desc) + node = tf.TapeNode(reader_read, [reader_handle_, queue_handle_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function reader_read(reader_handle_, queue_handle_; name=nothing) - if tf.in_eager_mode() - reader_read_eager(reader_handle_, queue_handle_; name=name) - else - reader_read_graph(reader_handle_, queue_handle_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function reader_read(reader_handle_, queue_handle_; name=nothing) + if tf.in_eager_mode() + reader_read_eager(reader_handle_, queue_handle_; name=name) + else + reader_read_graph(reader_handle_, queue_handle_; name=name) + end end - end + end end @@ -12138,45 +22001,77 @@ end An op that blocks execution until a distributed TPU system has """ begin - function _wait_for_distributed_tpu_graph(inputs_; name=nothing, startup_timeout_sec=nothing, N=nothing) - local desc - tf.with_op_name(name, "_WaitForDistributedTPU") do - desc = tf.NodeDescription("_WaitForDistributedTPU") - inputs_ = [convert(Tensor{Int32}, x) for x = inputs_] - tf.add_input(desc, inputs_) - if startup_timeout_sec !== nothing - desc["startup_timeout_sec"] = Base.Int(startup_timeout_sec) - end - if N !== nothing - desc["N"] = Base.Int(N) + begin + function _wait_for_distributed_tpu_graph(inputs_; name=nothing, startup_timeout_sec=nothing, N=nothing) + local desc + tf.with_op_name(name, "_WaitForDistributedTPU") do + desc = tf.NodeDescription("_WaitForDistributedTPU") + begin + begin + inputs_ = [convert(Tensor{Int32}, x) for x = inputs_] + begin + end + end + end + begin + begin + tf.add_input(desc, inputs_) + end + end + begin + begin + if startup_timeout_sec !== nothing + desc["startup_timeout_sec"] = Base.Int(startup_timeout_sec) + end + end + begin + if N !== nothing + desc["N"] = Base.Int(N) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function _wait_for_distributed_tpu_eager(inputs_; name=nothing, startup_timeout_sec=nothing, N=nothing) + desc = tf.EagerOp("_WaitForDistributedTPU") + inputs_ = convert(tf.EagerTensor, inputs_) + begin + begin + tf.add_input(desc, inputs_) + end + end + begin + begin + if startup_timeout_sec !== nothing + desc["startup_timeout_sec"] = Base.Int(startup_timeout_sec) + end + end + begin + if N !== nothing + desc["N"] = Base.Int(N) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(_wait_for_distributed_tpu, [inputs_], name=nothing, startup_timeout_sec=nothing, N=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _wait_for_distributed_tpu(inputs_; name=nothing, startup_timeout_sec=nothing, N=nothing) + if tf.in_eager_mode() + _wait_for_distributed_tpu_eager(inputs_; name=name, startup_timeout_sec=startup_timeout_sec, N=N) + else + _wait_for_distributed_tpu_graph(inputs_; name=name, startup_timeout_sec=startup_timeout_sec, N=N) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function _wait_for_distributed_tpu_eager(inputs_; name=nothing, startup_timeout_sec=nothing, N=nothing) - desc = tf.EagerOp("_WaitForDistributedTPU") - inputs_ = convert(tf.EagerTensor, inputs_) - tf.add_input(desc, inputs_) - if startup_timeout_sec !== nothing - desc["startup_timeout_sec"] = Base.Int(startup_timeout_sec) - end - if N !== nothing - desc["N"] = Base.Int(N) - end - res = tf.execute(desc) - node = tf.TapeNode(_wait_for_distributed_tpu, [inputs_], name=nothing, startup_timeout_sec=nothing, N=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _wait_for_distributed_tpu(inputs_; name=nothing, startup_timeout_sec=nothing, N=nothing) - if tf.in_eager_mode() - _wait_for_distributed_tpu_eager(inputs_; name=name, startup_timeout_sec=startup_timeout_sec, N=N) - else - _wait_for_distributed_tpu_graph(inputs_; name=name, startup_timeout_sec=startup_timeout_sec, N=N) - end - end end @@ -12186,33 +22081,57 @@ end """ begin - function mutex_lock_graph(mutex_; name=nothing) - local desc - tf.with_op_name(name, "MutexLock") do - desc = tf.NodeDescription("MutexLock") - mutex_ = convert(Tensor{Any}, mutex_) - tf.add_input(desc, mutex_) + begin + function mutex_lock_graph(mutex_; name=nothing) + local desc + tf.with_op_name(name, "MutexLock") do + desc = tf.NodeDescription("MutexLock") + begin + begin + mutex_ = convert(Tensor{Any}, mutex_) + begin + end + end + end + begin + begin + tf.add_input(desc, mutex_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function mutex_lock_eager(mutex_; name=nothing) - desc = tf.EagerOp("MutexLock") - mutex_ = convert(tf.EagerTensor, mutex_) - tf.add_input(desc, mutex_) - res = tf.execute(desc) - node = tf.TapeNode(mutex_lock, [mutex_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function mutex_lock_eager(mutex_; name=nothing) + desc = tf.EagerOp("MutexLock") + mutex_ = convert(tf.EagerTensor, mutex_) + begin + begin + tf.add_input(desc, mutex_) + end + end + begin + end + res = tf.execute(desc) + node = tf.TapeNode(mutex_lock, [mutex_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function mutex_lock(mutex_; name=nothing) - if tf.in_eager_mode() - mutex_lock_eager(mutex_; name=name) - else - mutex_lock_graph(mutex_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function mutex_lock(mutex_; name=nothing) + if tf.in_eager_mode() + mutex_lock_eager(mutex_; name=name) + else + mutex_lock_graph(mutex_; name=name) + end end - end + end end @@ -12222,37 +22141,69 @@ end """ begin - function accumulator_set_global_step_graph(handle_, new_global_step_; name=nothing) - local desc - tf.with_op_name(name, "AccumulatorSetGlobalStep") do - desc = tf.NodeDescription("AccumulatorSetGlobalStep") - handle_ = convert(Tensor{String}, handle_) - new_global_step_ = convert(Tensor{Int64}, new_global_step_) - tf.add_input(desc, handle_) - tf.add_input(desc, new_global_step_) + begin + function accumulator_set_global_step_graph(handle_, new_global_step_; name=nothing) + local desc + tf.with_op_name(name, "AccumulatorSetGlobalStep") do + desc = tf.NodeDescription("AccumulatorSetGlobalStep") + begin + begin + handle_ = convert(Tensor{String}, handle_) + begin + end + end + begin + new_global_step_ = convert(Tensor{Int64}, new_global_step_) + begin + end + end + end + begin + begin + tf.add_input(desc, handle_) + end + begin + tf.add_input(desc, new_global_step_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function accumulator_set_global_step_eager(handle_, new_global_step_; name=nothing) - desc = tf.EagerOp("AccumulatorSetGlobalStep") - handle_ = convert(tf.EagerTensor, handle_) - new_global_step_ = convert(tf.EagerTensor, new_global_step_) - tf.add_input(desc, handle_) - tf.add_input(desc, new_global_step_) - res = tf.execute(desc) - node = tf.TapeNode(accumulator_set_global_step, [handle_, new_global_step_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function accumulator_set_global_step_eager(handle_, new_global_step_; name=nothing) + desc = tf.EagerOp("AccumulatorSetGlobalStep") + handle_ = convert(tf.EagerTensor, handle_) + new_global_step_ = convert(tf.EagerTensor, new_global_step_) + begin + begin + tf.add_input(desc, handle_) + end + begin + tf.add_input(desc, new_global_step_) + end + end + begin + end + res = tf.execute(desc) + node = tf.TapeNode(accumulator_set_global_step, [handle_, new_global_step_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function accumulator_set_global_step(handle_, new_global_step_; name=nothing) - if tf.in_eager_mode() - accumulator_set_global_step_eager(handle_, new_global_step_; name=name) - else - accumulator_set_global_step_graph(handle_, new_global_step_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function accumulator_set_global_step(handle_, new_global_step_; name=nothing) + if tf.in_eager_mode() + accumulator_set_global_step_eager(handle_, new_global_step_; name=name) + else + accumulator_set_global_step_graph(handle_, new_global_step_; name=name) + end end - end + end end @@ -12262,62 +22213,136 @@ end """ begin - function quantized_add_graph(x_, y_, min_x_, max_x_, min_y_, max_y_; name=nothing) - local desc - tf.with_op_name(name, "QuantizedAdd") do - desc = tf.NodeDescription("QuantizedAdd") - x_ = convert(Tensor{Any}, x_) - y_ = convert(Tensor{Any}, y_) - min_x_ = convert(Tensor{Float32}, min_x_) - max_x_ = convert(Tensor{Float32}, max_x_) - min_y_ = convert(Tensor{Float32}, min_y_) - max_y_ = convert(Tensor{Float32}, max_y_) - (x_,) = tf.tf_promote(x_) - (y_,) = tf.tf_promote(y_) - tf.add_input(desc, x_) - tf.add_input(desc, y_) - tf.add_input(desc, min_x_) - tf.add_input(desc, max_x_) - tf.add_input(desc, min_y_) - tf.add_input(desc, max_y_) - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:3 - push!(out, tf.Tensor(op, out_idx)) - end - out - end - function quantized_add_eager(x_, y_, min_x_, max_x_, min_y_, max_y_; name=nothing) - desc = tf.EagerOp("QuantizedAdd") - x_ = convert(tf.EagerTensor, x_) - y_ = convert(tf.EagerTensor, y_) - min_x_ = convert(tf.EagerTensor, min_x_) - max_x_ = convert(tf.EagerTensor, max_x_) - min_y_ = convert(tf.EagerTensor, min_y_) - max_y_ = convert(tf.EagerTensor, max_y_) - tf.add_input(desc, x_) - tf.add_input(desc, y_) - tf.add_input(desc, min_x_) - tf.add_input(desc, max_x_) - tf.add_input(desc, min_y_) - tf.add_input(desc, max_y_) - desc["T1"] = tf.data_type(x_) - desc["T2"] = tf.data_type(y_) - res = tf.execute(desc) - node = tf.TapeNode(quantized_add, [x_, y_, min_x_, max_x_, min_y_, max_y_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function quantized_add(x_, y_, min_x_, max_x_, min_y_, max_y_; name=nothing) - if tf.in_eager_mode() - quantized_add_eager(x_, y_, min_x_, max_x_, min_y_, max_y_; name=name) - else - quantized_add_graph(x_, y_, min_x_, max_x_, min_y_, max_y_; name=name) + begin + function quantized_add_graph(x_, y_, min_x_, max_x_, min_y_, max_y_; name=nothing) + local desc + tf.with_op_name(name, "QuantizedAdd") do + desc = tf.NodeDescription("QuantizedAdd") + begin + begin + x_ = convert(Tensor{Any}, x_) + begin + end + end + begin + y_ = convert(Tensor{Any}, y_) + begin + end + end + begin + min_x_ = convert(Tensor{Float32}, min_x_) + begin + end + end + begin + max_x_ = convert(Tensor{Float32}, max_x_) + begin + end + end + begin + min_y_ = convert(Tensor{Float32}, min_y_) + begin + end + end + begin + max_y_ = convert(Tensor{Float32}, max_y_) + begin + end + end + begin + (x_,) = tf.tf_promote(x_) + end + begin + (y_,) = tf.tf_promote(y_) + end + end + begin + begin + tf.add_input(desc, x_) + end + begin + tf.add_input(desc, y_) + end + begin + tf.add_input(desc, min_x_) + end + begin + tf.add_input(desc, max_x_) + end + begin + tf.add_input(desc, min_y_) + end + begin + tf.add_input(desc, max_y_) + end + end + begin + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + end + end + begin + function quantized_add_eager(x_, y_, min_x_, max_x_, min_y_, max_y_; name=nothing) + desc = tf.EagerOp("QuantizedAdd") + x_ = convert(tf.EagerTensor, x_) + y_ = convert(tf.EagerTensor, y_) + min_x_ = convert(tf.EagerTensor, min_x_) + max_x_ = convert(tf.EagerTensor, max_x_) + min_y_ = convert(tf.EagerTensor, min_y_) + max_y_ = convert(tf.EagerTensor, max_y_) + begin + begin + tf.add_input(desc, x_) + end + begin + tf.add_input(desc, y_) + end + begin + tf.add_input(desc, min_x_) + end + begin + tf.add_input(desc, max_x_) + end + begin + tf.add_input(desc, min_y_) + end + begin + tf.add_input(desc, max_y_) + end + end + begin + end + begin + desc["T1"] = tf.data_type(x_) + end + begin + desc["T2"] = tf.data_type(y_) + end + res = tf.execute(desc) + node = tf.TapeNode(quantized_add, [x_, y_, min_x_, max_x_, min_y_, max_y_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function quantized_add(x_, y_, min_x_, max_x_, min_y_, max_y_; name=nothing) + if tf.in_eager_mode() + quantized_add_eager(x_, y_, min_x_, max_x_, min_y_, max_y_; name=name) + else + quantized_add_graph(x_, y_, min_x_, max_x_, min_y_, max_y_; name=name) + end end - end + end end @@ -12327,41 +22352,73 @@ end """ begin - function squeeze_graph(input_; name=nothing, squeeze_dims=nothing) - local desc - tf.with_op_name(name, "Squeeze") do - desc = tf.NodeDescription("Squeeze") - input_ = convert(Tensor{Any}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - if squeeze_dims !== nothing - desc["squeeze_dims"] = map(Base.identity, squeeze_dims) + begin + function squeeze_graph(input_; name=nothing, squeeze_dims=nothing) + local desc + tf.with_op_name(name, "Squeeze") do + desc = tf.NodeDescription("Squeeze") + begin + begin + input_ = convert(Tensor{Any}, input_) + begin + end + end + begin + (input_,) = tf.tf_promote(input_) + end + end + begin + begin + tf.add_input(desc, input_) + end + end + begin + begin + if squeeze_dims !== nothing + desc["squeeze_dims"] = map(Base.identity, squeeze_dims) + end + end + end end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function squeeze_eager(input_; name=nothing, squeeze_dims=nothing) - desc = tf.EagerOp("Squeeze") - input_ = convert(tf.EagerTensor, input_) - tf.add_input(desc, input_) - if squeeze_dims !== nothing - desc["squeeze_dims"] = map(Base.identity, squeeze_dims) - end - desc["T"] = tf.data_type(input_) - res = tf.execute(desc) - node = tf.TapeNode(squeeze, [input_], name=nothing, squeeze_dims=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function squeeze_eager(input_; name=nothing, squeeze_dims=nothing) + desc = tf.EagerOp("Squeeze") + input_ = convert(tf.EagerTensor, input_) + begin + begin + tf.add_input(desc, input_) + end + end + begin + begin + if squeeze_dims !== nothing + desc["squeeze_dims"] = map(Base.identity, squeeze_dims) + end + end + end + begin + desc["T"] = tf.data_type(input_) + end + res = tf.execute(desc) + node = tf.TapeNode(squeeze, [input_], name=nothing, squeeze_dims=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function squeeze(input_; name=nothing, squeeze_dims=nothing) - if tf.in_eager_mode() - squeeze_eager(input_; name=name, squeeze_dims=squeeze_dims) - else - squeeze_graph(input_; name=name, squeeze_dims=squeeze_dims) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function squeeze(input_; name=nothing, squeeze_dims=nothing) + if tf.in_eager_mode() + squeeze_eager(input_; name=name, squeeze_dims=squeeze_dims) + else + squeeze_graph(input_; name=name, squeeze_dims=squeeze_dims) + end end - end + end end @@ -12371,33 +22428,57 @@ end """ begin - function experimental_matching_files_dataset_graph(patterns_; name=nothing) - local desc - tf.with_op_name(name, "ExperimentalMatchingFilesDataset") do - desc = tf.NodeDescription("ExperimentalMatchingFilesDataset") - patterns_ = convert(Tensor{String}, patterns_) - tf.add_input(desc, patterns_) + begin + function experimental_matching_files_dataset_graph(patterns_; name=nothing) + local desc + tf.with_op_name(name, "ExperimentalMatchingFilesDataset") do + desc = tf.NodeDescription("ExperimentalMatchingFilesDataset") + begin + begin + patterns_ = convert(Tensor{String}, patterns_) + begin + end + end + end + begin + begin + tf.add_input(desc, patterns_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function experimental_matching_files_dataset_eager(patterns_; name=nothing) - desc = tf.EagerOp("ExperimentalMatchingFilesDataset") - patterns_ = convert(tf.EagerTensor, patterns_) - tf.add_input(desc, patterns_) - res = tf.execute(desc) - node = tf.TapeNode(experimental_matching_files_dataset, [patterns_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function experimental_matching_files_dataset_eager(patterns_; name=nothing) + desc = tf.EagerOp("ExperimentalMatchingFilesDataset") + patterns_ = convert(tf.EagerTensor, patterns_) + begin + begin + tf.add_input(desc, patterns_) + end + end + begin + end + res = tf.execute(desc) + node = tf.TapeNode(experimental_matching_files_dataset, [patterns_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_matching_files_dataset(patterns_; name=nothing) - if tf.in_eager_mode() - experimental_matching_files_dataset_eager(patterns_; name=name) - else - experimental_matching_files_dataset_graph(patterns_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_matching_files_dataset(patterns_; name=nothing) + if tf.in_eager_mode() + experimental_matching_files_dataset_eager(patterns_; name=name) + else + experimental_matching_files_dataset_graph(patterns_; name=name) + end end - end + end end @@ -12407,41 +22488,81 @@ end """ begin - function experimental_dataset_to_tf_record_graph(input_dataset_, filename_, compression_type_; name=nothing) - local desc - tf.with_op_name(name, "ExperimentalDatasetToTFRecord") do - desc = tf.NodeDescription("ExperimentalDatasetToTFRecord") - input_dataset_ = convert(Tensor{Any}, input_dataset_) - filename_ = convert(Tensor{String}, filename_) - compression_type_ = convert(Tensor{String}, compression_type_) - tf.add_input(desc, input_dataset_) - tf.add_input(desc, filename_) - tf.add_input(desc, compression_type_) - end - tf.Tensor(tf.Operation(desc)) - end - function experimental_dataset_to_tf_record_eager(input_dataset_, filename_, compression_type_; name=nothing) - desc = tf.EagerOp("ExperimentalDatasetToTFRecord") - input_dataset_ = convert(tf.EagerTensor, input_dataset_) - filename_ = convert(tf.EagerTensor, filename_) - compression_type_ = convert(tf.EagerTensor, compression_type_) - tf.add_input(desc, input_dataset_) - tf.add_input(desc, filename_) - tf.add_input(desc, compression_type_) - res = tf.execute(desc) - node = tf.TapeNode(experimental_dataset_to_tf_record, [input_dataset_, filename_, compression_type_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_dataset_to_tf_record(input_dataset_, filename_, compression_type_; name=nothing) - if tf.in_eager_mode() - experimental_dataset_to_tf_record_eager(input_dataset_, filename_, compression_type_; name=name) - else - experimental_dataset_to_tf_record_graph(input_dataset_, filename_, compression_type_; name=name) + begin + function experimental_dataset_to_tf_record_graph(input_dataset_, filename_, compression_type_; name=nothing) + local desc + tf.with_op_name(name, "ExperimentalDatasetToTFRecord") do + desc = tf.NodeDescription("ExperimentalDatasetToTFRecord") + begin + begin + input_dataset_ = convert(Tensor{Any}, input_dataset_) + begin + end + end + begin + filename_ = convert(Tensor{String}, filename_) + begin + end + end + begin + compression_type_ = convert(Tensor{String}, compression_type_) + begin + end + end + end + begin + begin + tf.add_input(desc, input_dataset_) + end + begin + tf.add_input(desc, filename_) + end + begin + tf.add_input(desc, compression_type_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function experimental_dataset_to_tf_record_eager(input_dataset_, filename_, compression_type_; name=nothing) + desc = tf.EagerOp("ExperimentalDatasetToTFRecord") + input_dataset_ = convert(tf.EagerTensor, input_dataset_) + filename_ = convert(tf.EagerTensor, filename_) + compression_type_ = convert(tf.EagerTensor, compression_type_) + begin + begin + tf.add_input(desc, input_dataset_) + end + begin + tf.add_input(desc, filename_) + end + begin + tf.add_input(desc, compression_type_) + end + end + begin + end + res = tf.execute(desc) + node = tf.TapeNode(experimental_dataset_to_tf_record, [input_dataset_, filename_, compression_type_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_dataset_to_tf_record(input_dataset_, filename_, compression_type_; name=nothing) + if tf.in_eager_mode() + experimental_dataset_to_tf_record_eager(input_dataset_, filename_, compression_type_; name=name) + else + experimental_dataset_to_tf_record_graph(input_dataset_, filename_, compression_type_; name=name) + end end - end + end end @@ -12451,30 +22572,45 @@ end """ begin - function no_op_graph(; name=nothing) - local desc - tf.with_op_name(name, "NoOp") do - desc - tf.NodeDescription("NoOp") + begin + function no_op_graph(; name=nothing) + local desc + tf.with_op_name(name, "NoOp") do + desc = tf.NodeDescription("NoOp") + begin + end + begin + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function no_op_eager(; name=nothing) - desc = tf.EagerOp("NoOp") - res = tf.execute(desc) - node = tf.TapeNode(no_op, [], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function no_op_eager(; name=nothing) + desc = tf.EagerOp("NoOp") + begin + end + begin + end + res = tf.execute(desc) + node = tf.TapeNode(no_op, [], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function no_op(; name=nothing) - if tf.in_eager_mode() - no_op_eager(; name=name) - else - no_op_graph(; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function no_op(; name=nothing) + if tf.in_eager_mode() + no_op_eager(; name=name) + else + no_op_graph(; name=name) + end end - end + end end @@ -12484,51 +22620,87 @@ end """ begin - function zip_dataset_graph(input_datasets_; name=nothing, output_types=nothing, output_shapes=nothing, N=nothing) - local desc - tf.with_op_name(name, "ZipDataset") do - desc = tf.NodeDescription("ZipDataset") - input_datasets_ = [convert(Tensor{Any}, x) for x = input_datasets_] - tf.add_input(desc, input_datasets_) - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) + begin + function zip_dataset_graph(input_datasets_; name=nothing, output_types=nothing, output_shapes=nothing, N=nothing) + local desc + tf.with_op_name(name, "ZipDataset") do + desc = tf.NodeDescription("ZipDataset") + begin + begin + input_datasets_ = [convert(Tensor{Any}, x) for x = input_datasets_] + begin + end + end + end + begin + begin + tf.add_input(desc, input_datasets_) + end + end + begin + begin + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + end + begin + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + begin + if N !== nothing + desc["N"] = Base.Int(N) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function zip_dataset_eager(input_datasets_; name=nothing, output_types=nothing, output_shapes=nothing, N=nothing) + desc = tf.EagerOp("ZipDataset") + input_datasets_ = convert(tf.EagerTensor, input_datasets_) + begin + begin + tf.add_input(desc, input_datasets_) + end + end + begin + begin + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + end + begin + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + begin + if N !== nothing + desc["N"] = Base.Int(N) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(zip_dataset, [input_datasets_], name=nothing, output_types=nothing, output_shapes=nothing, N=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function zip_dataset(input_datasets_; name=nothing, output_types=nothing, output_shapes=nothing, N=nothing) + if tf.in_eager_mode() + zip_dataset_eager(input_datasets_; name=name, output_types=output_types, output_shapes=output_shapes, N=N) + else + zip_dataset_graph(input_datasets_; name=name, output_types=output_types, output_shapes=output_shapes, N=N) + end end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - if N !== nothing - desc["N"] = Base.Int(N) - end - end - tf.Tensor(tf.Operation(desc)) - end - function zip_dataset_eager(input_datasets_; name=nothing, output_types=nothing, output_shapes=nothing, N=nothing) - desc = tf.EagerOp("ZipDataset") - input_datasets_ = convert(tf.EagerTensor, input_datasets_) - tf.add_input(desc, input_datasets_) - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - if N !== nothing - desc["N"] = Base.Int(N) - end - res = tf.execute(desc) - node = tf.TapeNode(zip_dataset, [input_datasets_], name=nothing, output_types=nothing, output_shapes=nothing, N=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function zip_dataset(input_datasets_; name=nothing, output_types=nothing, output_shapes=nothing, N=nothing) - if tf.in_eager_mode() - zip_dataset_eager(input_datasets_; name=name, output_types=output_types, output_shapes=output_shapes, N=N) - else - zip_dataset_graph(input_datasets_; name=name, output_types=output_types, output_shapes=output_shapes, N=N) - end - end end @@ -12538,57 +22710,97 @@ end Load embedding parameters for a single table. """ begin - function load_tpu_embedding_stochastic_gradient_descent_parameters_graph(parameters_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - local desc - tf.with_op_name(name, "LoadTPUEmbeddingStochasticGradientDescentParameters") do - desc = tf.NodeDescription("LoadTPUEmbeddingStochasticGradientDescentParameters") - parameters_ = convert(Tensor{Float32}, parameters_) - tf.add_input(desc, parameters_) - if table_id !== nothing - desc["table_id"] = Base.Int(table_id) - end - if table_name !== nothing - desc["table_name"] = Base.String(table_name) - end - if num_shards !== nothing - desc["num_shards"] = Base.Int(num_shards) + begin + function load_tpu_embedding_stochastic_gradient_descent_parameters_graph(parameters_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + local desc + tf.with_op_name(name, "LoadTPUEmbeddingStochasticGradientDescentParameters") do + desc = tf.NodeDescription("LoadTPUEmbeddingStochasticGradientDescentParameters") + begin + begin + parameters_ = convert(Tensor{Float32}, parameters_) + begin + end + end + end + begin + begin + tf.add_input(desc, parameters_) + end + end + begin + begin + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + end + begin + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + end + begin + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + end + begin + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function load_tpu_embedding_stochastic_gradient_descent_parameters_eager(parameters_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + desc = tf.EagerOp("LoadTPUEmbeddingStochasticGradientDescentParameters") + parameters_ = convert(tf.EagerTensor, parameters_) + begin + begin + tf.add_input(desc, parameters_) + end + end + begin + begin + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + end + begin + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + end + begin + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + end + begin + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(load_tpu_embedding_stochastic_gradient_descent_parameters, [parameters_], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function load_tpu_embedding_stochastic_gradient_descent_parameters(parameters_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + if tf.in_eager_mode() + load_tpu_embedding_stochastic_gradient_descent_parameters_eager(parameters_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + else + load_tpu_embedding_stochastic_gradient_descent_parameters_graph(parameters_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + end end - if shard_id !== nothing - desc["shard_id"] = Base.Int(shard_id) - end - end - tf.Tensor(tf.Operation(desc)) - end - function load_tpu_embedding_stochastic_gradient_descent_parameters_eager(parameters_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - desc = tf.EagerOp("LoadTPUEmbeddingStochasticGradientDescentParameters") - parameters_ = convert(tf.EagerTensor, parameters_) - tf.add_input(desc, parameters_) - if table_id !== nothing - desc["table_id"] = Base.Int(table_id) - end - if table_name !== nothing - desc["table_name"] = Base.String(table_name) - end - if num_shards !== nothing - desc["num_shards"] = Base.Int(num_shards) - end - if shard_id !== nothing - desc["shard_id"] = Base.Int(shard_id) - end - res = tf.execute(desc) - node = tf.TapeNode(load_tpu_embedding_stochastic_gradient_descent_parameters, [parameters_], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function load_tpu_embedding_stochastic_gradient_descent_parameters(parameters_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - if tf.in_eager_mode() - load_tpu_embedding_stochastic_gradient_descent_parameters_eager(parameters_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) - else - load_tpu_embedding_stochastic_gradient_descent_parameters_graph(parameters_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) - end - end end @@ -12598,41 +22810,65 @@ end """ begin - function identity_reader_v2_graph(; name=nothing, container=nothing, shared_name=nothing) - local desc - tf.with_op_name(name, "IdentityReaderV2") do - desc = tf.NodeDescription("IdentityReaderV2") - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) + begin + function identity_reader_v2_graph(; name=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "IdentityReaderV2") do + desc = tf.NodeDescription("IdentityReaderV2") + begin + end + begin + end + begin + begin + if container !== nothing + desc["container"] = Base.String(container) + end + end + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + end end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function identity_reader_v2_eager(; name=nothing, container=nothing, shared_name=nothing) - desc = tf.EagerOp("IdentityReaderV2") - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - res = tf.execute(desc) - node = tf.TapeNode(identity_reader_v2, [], name=nothing, container=nothing, shared_name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function identity_reader_v2_eager(; name=nothing, container=nothing, shared_name=nothing) + desc = tf.EagerOp("IdentityReaderV2") + begin + end + begin + begin + if container !== nothing + desc["container"] = Base.String(container) + end + end + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(identity_reader_v2, [], name=nothing, container=nothing, shared_name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function identity_reader_v2(; name=nothing, container=nothing, shared_name=nothing) - if tf.in_eager_mode() - identity_reader_v2_eager(; name=name, container=container, shared_name=shared_name) - else - identity_reader_v2_graph(; name=name, container=container, shared_name=shared_name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function identity_reader_v2(; name=nothing, container=nothing, shared_name=nothing) + if tf.in_eager_mode() + identity_reader_v2_eager(; name=name, container=container, shared_name=shared_name) + else + identity_reader_v2_graph(; name=name, container=container, shared_name=shared_name) + end end - end + end end @@ -12642,41 +22878,65 @@ end """ begin - function lmdb_reader_graph(; name=nothing, container=nothing, shared_name=nothing) - local desc - tf.with_op_name(name, "LMDBReader") do - desc = tf.NodeDescription("LMDBReader") - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) + begin + function lmdb_reader_graph(; name=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "LMDBReader") do + desc = tf.NodeDescription("LMDBReader") + begin + end + begin + end + begin + begin + if container !== nothing + desc["container"] = Base.String(container) + end + end + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + end end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function lmdb_reader_eager(; name=nothing, container=nothing, shared_name=nothing) - desc = tf.EagerOp("LMDBReader") - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - res = tf.execute(desc) - node = tf.TapeNode(lmdb_reader, [], name=nothing, container=nothing, shared_name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function lmdb_reader_eager(; name=nothing, container=nothing, shared_name=nothing) + desc = tf.EagerOp("LMDBReader") + begin + end + begin + begin + if container !== nothing + desc["container"] = Base.String(container) + end + end + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(lmdb_reader, [], name=nothing, container=nothing, shared_name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function lmdb_reader(; name=nothing, container=nothing, shared_name=nothing) - if tf.in_eager_mode() - lmdb_reader_eager(; name=name, container=container, shared_name=shared_name) - else - lmdb_reader_graph(; name=name, container=container, shared_name=shared_name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function lmdb_reader(; name=nothing, container=nothing, shared_name=nothing) + if tf.in_eager_mode() + lmdb_reader_eager(; name=name, container=container, shared_name=shared_name) + else + lmdb_reader_graph(; name=name, container=container, shared_name=shared_name) + end end - end + end end @@ -12686,53 +22946,93 @@ end """ begin - function nccl_all_reduce_graph(input_; name=nothing, reduction=nothing, num_devices=nothing, shared_name=nothing) - local desc - tf.with_op_name(name, "NcclAllReduce") do - desc = tf.NodeDescription("NcclAllReduce") - input_ = convert(Tensor{Any}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - if reduction !== nothing - desc["reduction"] = Base.String(reduction) - end - if num_devices !== nothing - desc["num_devices"] = Base.Int(num_devices) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) + begin + function nccl_all_reduce_graph(input_; name=nothing, reduction=nothing, num_devices=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "NcclAllReduce") do + desc = tf.NodeDescription("NcclAllReduce") + begin + begin + input_ = convert(Tensor{Any}, input_) + begin + end + end + begin + (input_,) = tf.tf_promote(input_) + end + end + begin + begin + tf.add_input(desc, input_) + end + end + begin + begin + if reduction !== nothing + desc["reduction"] = Base.String(reduction) + end + end + begin + if num_devices !== nothing + desc["num_devices"] = Base.Int(num_devices) + end + end + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function nccl_all_reduce_eager(input_; name=nothing, reduction=nothing, num_devices=nothing, shared_name=nothing) + desc = tf.EagerOp("NcclAllReduce") + input_ = convert(tf.EagerTensor, input_) + begin + begin + tf.add_input(desc, input_) + end + end + begin + begin + if reduction !== nothing + desc["reduction"] = Base.String(reduction) + end + end + begin + if num_devices !== nothing + desc["num_devices"] = Base.Int(num_devices) + end + end + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + end + begin + desc["T"] = tf.data_type(input_) + end + res = tf.execute(desc) + node = tf.TapeNode(nccl_all_reduce, [input_], name=nothing, reduction=nothing, num_devices=nothing, shared_name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function nccl_all_reduce(input_; name=nothing, reduction=nothing, num_devices=nothing, shared_name=nothing) + if tf.in_eager_mode() + nccl_all_reduce_eager(input_; name=name, reduction=reduction, num_devices=num_devices, shared_name=shared_name) + else + nccl_all_reduce_graph(input_; name=name, reduction=reduction, num_devices=num_devices, shared_name=shared_name) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function nccl_all_reduce_eager(input_; name=nothing, reduction=nothing, num_devices=nothing, shared_name=nothing) - desc = tf.EagerOp("NcclAllReduce") - input_ = convert(tf.EagerTensor, input_) - tf.add_input(desc, input_) - if reduction !== nothing - desc["reduction"] = Base.String(reduction) - end - if num_devices !== nothing - desc["num_devices"] = Base.Int(num_devices) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - desc["T"] = tf.data_type(input_) - res = tf.execute(desc) - node = tf.TapeNode(nccl_all_reduce, [input_], name=nothing, reduction=nothing, num_devices=nothing, shared_name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function nccl_all_reduce(input_; name=nothing, reduction=nothing, num_devices=nothing, shared_name=nothing) - if tf.in_eager_mode() - nccl_all_reduce_eager(input_; name=name, reduction=reduction, num_devices=num_devices, shared_name=shared_name) - else - nccl_all_reduce_graph(input_; name=name, reduction=reduction, num_devices=num_devices, shared_name=shared_name) - end - end end @@ -12742,41 +23042,81 @@ end """ begin - function text_line_dataset_graph(filenames_, compression_type_, buffer_size_; name=nothing) - local desc - tf.with_op_name(name, "TextLineDataset") do - desc = tf.NodeDescription("TextLineDataset") - filenames_ = convert(Tensor{String}, filenames_) - compression_type_ = convert(Tensor{String}, compression_type_) - buffer_size_ = convert(Tensor{Int64}, buffer_size_) - tf.add_input(desc, filenames_) - tf.add_input(desc, compression_type_) - tf.add_input(desc, buffer_size_) - end - tf.Tensor(tf.Operation(desc)) - end - function text_line_dataset_eager(filenames_, compression_type_, buffer_size_; name=nothing) - desc = tf.EagerOp("TextLineDataset") - filenames_ = convert(tf.EagerTensor, filenames_) - compression_type_ = convert(tf.EagerTensor, compression_type_) - buffer_size_ = convert(tf.EagerTensor, buffer_size_) - tf.add_input(desc, filenames_) - tf.add_input(desc, compression_type_) - tf.add_input(desc, buffer_size_) - res = tf.execute(desc) - node = tf.TapeNode(text_line_dataset, [filenames_, compression_type_, buffer_size_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function text_line_dataset(filenames_, compression_type_, buffer_size_; name=nothing) - if tf.in_eager_mode() - text_line_dataset_eager(filenames_, compression_type_, buffer_size_; name=name) - else - text_line_dataset_graph(filenames_, compression_type_, buffer_size_; name=name) + begin + function text_line_dataset_graph(filenames_, compression_type_, buffer_size_; name=nothing) + local desc + tf.with_op_name(name, "TextLineDataset") do + desc = tf.NodeDescription("TextLineDataset") + begin + begin + filenames_ = convert(Tensor{String}, filenames_) + begin + end + end + begin + compression_type_ = convert(Tensor{String}, compression_type_) + begin + end + end + begin + buffer_size_ = convert(Tensor{Int64}, buffer_size_) + begin + end + end + end + begin + begin + tf.add_input(desc, filenames_) + end + begin + tf.add_input(desc, compression_type_) + end + begin + tf.add_input(desc, buffer_size_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function text_line_dataset_eager(filenames_, compression_type_, buffer_size_; name=nothing) + desc = tf.EagerOp("TextLineDataset") + filenames_ = convert(tf.EagerTensor, filenames_) + compression_type_ = convert(tf.EagerTensor, compression_type_) + buffer_size_ = convert(tf.EagerTensor, buffer_size_) + begin + begin + tf.add_input(desc, filenames_) + end + begin + tf.add_input(desc, compression_type_) + end + begin + tf.add_input(desc, buffer_size_) + end + end + begin + end + res = tf.execute(desc) + node = tf.TapeNode(text_line_dataset, [filenames_, compression_type_, buffer_size_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function text_line_dataset(filenames_, compression_type_, buffer_size_; name=nothing) + if tf.in_eager_mode() + text_line_dataset_eager(filenames_, compression_type_, buffer_size_; name=name) + else + text_line_dataset_graph(filenames_, compression_type_, buffer_size_; name=name) + end end - end + end end @@ -12786,51 +23126,87 @@ end """ begin - function sdca_shrink_l1_graph(weights_; name=nothing, num_features=nothing, l1=nothing, l2=nothing) - local desc - tf.with_op_name(name, "SdcaShrinkL1") do - desc = tf.NodeDescription("SdcaShrinkL1") - weights_ = [convert(Tensor{Float32}, x) for x = weights_] - tf.add_input(desc, weights_) - if num_features !== nothing - desc["num_features"] = Base.Int(num_features) - end - if l1 !== nothing - desc["l1"] = Base.identity(l1) - end - if l2 !== nothing - desc["l2"] = Base.identity(l2) + begin + function sdca_shrink_l1_graph(weights_; name=nothing, num_features=nothing, l1=nothing, l2=nothing) + local desc + tf.with_op_name(name, "SdcaShrinkL1") do + desc = tf.NodeDescription("SdcaShrinkL1") + begin + begin + weights_ = [convert(Tensor{Float32}, x) for x = weights_] + begin + end + end + end + begin + begin + tf.add_input(desc, weights_) + end + end + begin + begin + if num_features !== nothing + desc["num_features"] = Base.Int(num_features) + end + end + begin + if l1 !== nothing + desc["l1"] = Base.identity(l1) + end + end + begin + if l2 !== nothing + desc["l2"] = Base.identity(l2) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function sdca_shrink_l1_eager(weights_; name=nothing, num_features=nothing, l1=nothing, l2=nothing) + desc = tf.EagerOp("SdcaShrinkL1") + weights_ = convert(tf.EagerTensor, weights_) + begin + begin + tf.add_input(desc, weights_) + end + end + begin + begin + if num_features !== nothing + desc["num_features"] = Base.Int(num_features) + end + end + begin + if l1 !== nothing + desc["l1"] = Base.identity(l1) + end + end + begin + if l2 !== nothing + desc["l2"] = Base.identity(l2) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(sdca_shrink_l1, [weights_], name=nothing, num_features=nothing, l1=nothing, l2=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sdca_shrink_l1(weights_; name=nothing, num_features=nothing, l1=nothing, l2=nothing) + if tf.in_eager_mode() + sdca_shrink_l1_eager(weights_; name=name, num_features=num_features, l1=l1, l2=l2) + else + sdca_shrink_l1_graph(weights_; name=name, num_features=num_features, l1=l1, l2=l2) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function sdca_shrink_l1_eager(weights_; name=nothing, num_features=nothing, l1=nothing, l2=nothing) - desc = tf.EagerOp("SdcaShrinkL1") - weights_ = convert(tf.EagerTensor, weights_) - tf.add_input(desc, weights_) - if num_features !== nothing - desc["num_features"] = Base.Int(num_features) - end - if l1 !== nothing - desc["l1"] = Base.identity(l1) - end - if l2 !== nothing - desc["l2"] = Base.identity(l2) - end - res = tf.execute(desc) - node = tf.TapeNode(sdca_shrink_l1, [weights_], name=nothing, num_features=nothing, l1=nothing, l2=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sdca_shrink_l1(weights_; name=nothing, num_features=nothing, l1=nothing, l2=nothing) - if tf.in_eager_mode() - sdca_shrink_l1_eager(weights_; name=name, num_features=num_features, l1=l1, l2=l2) - else - sdca_shrink_l1_graph(weights_; name=name, num_features=num_features, l1=l1, l2=l2) - end - end end @@ -12840,47 +23216,75 @@ end """ begin - function tf_record_reader_v2_graph(; name=nothing, container=nothing, shared_name=nothing, compression_type=nothing) - local desc - tf.with_op_name(name, "TFRecordReaderV2") do - desc = tf.NodeDescription("TFRecordReaderV2") - if container !== nothing - desc["container"] = Base.String(container) + begin + function tf_record_reader_v2_graph(; name=nothing, container=nothing, shared_name=nothing, compression_type=nothing) + local desc + tf.with_op_name(name, "TFRecordReaderV2") do + desc = tf.NodeDescription("TFRecordReaderV2") + begin + end + begin + end + begin + begin + if container !== nothing + desc["container"] = Base.String(container) + end + end + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + begin + if compression_type !== nothing + desc["compression_type"] = Base.String(compression_type) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function tf_record_reader_v2_eager(; name=nothing, container=nothing, shared_name=nothing, compression_type=nothing) + desc = tf.EagerOp("TFRecordReaderV2") + begin + end + begin + begin + if container !== nothing + desc["container"] = Base.String(container) + end + end + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + begin + if compression_type !== nothing + desc["compression_type"] = Base.String(compression_type) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(tf_record_reader_v2, [], name=nothing, container=nothing, shared_name=nothing, compression_type=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tf_record_reader_v2(; name=nothing, container=nothing, shared_name=nothing, compression_type=nothing) + if tf.in_eager_mode() + tf_record_reader_v2_eager(; name=name, container=container, shared_name=shared_name, compression_type=compression_type) + else + tf_record_reader_v2_graph(; name=name, container=container, shared_name=shared_name, compression_type=compression_type) + end end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - if compression_type !== nothing - desc["compression_type"] = Base.String(compression_type) - end - end - tf.Tensor(tf.Operation(desc)) - end - function tf_record_reader_v2_eager(; name=nothing, container=nothing, shared_name=nothing, compression_type=nothing) - desc = tf.EagerOp("TFRecordReaderV2") - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - if compression_type !== nothing - desc["compression_type"] = Base.String(compression_type) - end - res = tf.execute(desc) - node = tf.TapeNode(tf_record_reader_v2, [], name=nothing, container=nothing, shared_name=nothing, compression_type=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tf_record_reader_v2(; name=nothing, container=nothing, shared_name=nothing, compression_type=nothing) - if tf.in_eager_mode() - tf_record_reader_v2_eager(; name=name, container=container, shared_name=shared_name, compression_type=compression_type) - else - tf_record_reader_v2_graph(; name=name, container=container, shared_name=shared_name, compression_type=compression_type) - end - end end @@ -12890,67 +23294,135 @@ end """ begin - function padded_batch_dataset_v2_graph(input_dataset_, batch_size_, padded_shapes_, padding_values_, drop_remainder_; name=nothing, Toutput_types=nothing, output_shapes=nothing, N=nothing) - local desc - tf.with_op_name(name, "PaddedBatchDatasetV2") do - desc = tf.NodeDescription("PaddedBatchDatasetV2") - input_dataset_ = convert(Tensor{Any}, input_dataset_) - batch_size_ = convert(Tensor{Int64}, batch_size_) - padded_shapes_ = [convert(Tensor{Int64}, x) for x = padded_shapes_] - padding_values_ = [convert(Tensor{Any}, x) for x = padding_values_] - drop_remainder_ = convert(Tensor{Bool}, drop_remainder_) - tf.add_input(desc, input_dataset_) - tf.add_input(desc, batch_size_) - tf.add_input(desc, padded_shapes_) - tf.add_input(desc, padding_values_) - tf.add_input(desc, drop_remainder_) - if Toutput_types !== nothing - desc["Toutput_types"] = map(Base.identity, Toutput_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - if N !== nothing - desc["N"] = Base.Int(N) + begin + function padded_batch_dataset_v2_graph(input_dataset_, batch_size_, padded_shapes_, padding_values_, drop_remainder_; name=nothing, Toutput_types=nothing, output_shapes=nothing, N=nothing) + local desc + tf.with_op_name(name, "PaddedBatchDatasetV2") do + desc = tf.NodeDescription("PaddedBatchDatasetV2") + begin + begin + input_dataset_ = convert(Tensor{Any}, input_dataset_) + begin + end + end + begin + batch_size_ = convert(Tensor{Int64}, batch_size_) + begin + end + end + begin + padded_shapes_ = [convert(Tensor{Int64}, x) for x = padded_shapes_] + begin + end + end + begin + padding_values_ = [convert(Tensor{Any}, x) for x = padding_values_] + begin + end + end + begin + drop_remainder_ = convert(Tensor{Bool}, drop_remainder_) + begin + end + end + end + begin + begin + tf.add_input(desc, input_dataset_) + end + begin + tf.add_input(desc, batch_size_) + end + begin + tf.add_input(desc, padded_shapes_) + end + begin + tf.add_input(desc, padding_values_) + end + begin + tf.add_input(desc, drop_remainder_) + end + end + begin + begin + if Toutput_types !== nothing + desc["Toutput_types"] = map(Base.identity, Toutput_types) + end + end + begin + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + begin + if N !== nothing + desc["N"] = Base.Int(N) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function padded_batch_dataset_v2_eager(input_dataset_, batch_size_, padded_shapes_, padding_values_, drop_remainder_; name=nothing, Toutput_types=nothing, output_shapes=nothing, N=nothing) + desc = tf.EagerOp("PaddedBatchDatasetV2") + input_dataset_ = convert(tf.EagerTensor, input_dataset_) + batch_size_ = convert(tf.EagerTensor, batch_size_) + padded_shapes_ = convert(tf.EagerTensor, padded_shapes_) + padding_values_ = convert(tf.EagerTensor, padding_values_) + drop_remainder_ = convert(tf.EagerTensor, drop_remainder_) + begin + begin + tf.add_input(desc, input_dataset_) + end + begin + tf.add_input(desc, batch_size_) + end + begin + tf.add_input(desc, padded_shapes_) + end + begin + tf.add_input(desc, padding_values_) + end + begin + tf.add_input(desc, drop_remainder_) + end + end + begin + begin + if Toutput_types !== nothing + desc["Toutput_types"] = map(Base.identity, Toutput_types) + end + end + begin + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + begin + if N !== nothing + desc["N"] = Base.Int(N) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(padded_batch_dataset_v2, [input_dataset_, batch_size_, padded_shapes_, padding_values_, drop_remainder_], name=nothing, Toutput_types=nothing, output_shapes=nothing, N=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function padded_batch_dataset_v2(input_dataset_, batch_size_, padded_shapes_, padding_values_, drop_remainder_; name=nothing, Toutput_types=nothing, output_shapes=nothing, N=nothing) + if tf.in_eager_mode() + padded_batch_dataset_v2_eager(input_dataset_, batch_size_, padded_shapes_, padding_values_, drop_remainder_; name=name, Toutput_types=Toutput_types, output_shapes=output_shapes, N=N) + else + padded_batch_dataset_v2_graph(input_dataset_, batch_size_, padded_shapes_, padding_values_, drop_remainder_; name=name, Toutput_types=Toutput_types, output_shapes=output_shapes, N=N) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function padded_batch_dataset_v2_eager(input_dataset_, batch_size_, padded_shapes_, padding_values_, drop_remainder_; name=nothing, Toutput_types=nothing, output_shapes=nothing, N=nothing) - desc = tf.EagerOp("PaddedBatchDatasetV2") - input_dataset_ = convert(tf.EagerTensor, input_dataset_) - batch_size_ = convert(tf.EagerTensor, batch_size_) - padded_shapes_ = convert(tf.EagerTensor, padded_shapes_) - padding_values_ = convert(tf.EagerTensor, padding_values_) - drop_remainder_ = convert(tf.EagerTensor, drop_remainder_) - tf.add_input(desc, input_dataset_) - tf.add_input(desc, batch_size_) - tf.add_input(desc, padded_shapes_) - tf.add_input(desc, padding_values_) - tf.add_input(desc, drop_remainder_) - if Toutput_types !== nothing - desc["Toutput_types"] = map(Base.identity, Toutput_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - if N !== nothing - desc["N"] = Base.Int(N) - end - res = tf.execute(desc) - node = tf.TapeNode(padded_batch_dataset_v2, [input_dataset_, batch_size_, padded_shapes_, padding_values_, drop_remainder_], name=nothing, Toutput_types=nothing, output_shapes=nothing, N=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function padded_batch_dataset_v2(input_dataset_, batch_size_, padded_shapes_, padding_values_, drop_remainder_; name=nothing, Toutput_types=nothing, output_shapes=nothing, N=nothing) - if tf.in_eager_mode() - padded_batch_dataset_v2_eager(input_dataset_, batch_size_, padded_shapes_, padding_values_, drop_remainder_; name=name, Toutput_types=Toutput_types, output_shapes=output_shapes, N=N) - else - padded_batch_dataset_v2_graph(input_dataset_, batch_size_, padded_shapes_, padding_values_, drop_remainder_; name=name, Toutput_types=Toutput_types, output_shapes=output_shapes, N=N) - end - end end @@ -12960,45 +23432,77 @@ end """ begin - function multi_device_iterator_from_string_handle_graph(string_handle_; name=nothing, output_types=nothing, output_shapes=nothing) - local desc - tf.with_op_name(name, "MultiDeviceIteratorFromStringHandle") do - desc = tf.NodeDescription("MultiDeviceIteratorFromStringHandle") - string_handle_ = convert(Tensor{String}, string_handle_) - tf.add_input(desc, string_handle_) - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) + begin + function multi_device_iterator_from_string_handle_graph(string_handle_; name=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "MultiDeviceIteratorFromStringHandle") do + desc = tf.NodeDescription("MultiDeviceIteratorFromStringHandle") + begin + begin + string_handle_ = convert(Tensor{String}, string_handle_) + begin + end + end + end + begin + begin + tf.add_input(desc, string_handle_) + end + end + begin + begin + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + end + begin + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function multi_device_iterator_from_string_handle_eager(string_handle_; name=nothing, output_types=nothing, output_shapes=nothing) + desc = tf.EagerOp("MultiDeviceIteratorFromStringHandle") + string_handle_ = convert(tf.EagerTensor, string_handle_) + begin + begin + tf.add_input(desc, string_handle_) + end + end + begin + begin + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + end + begin + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(multi_device_iterator_from_string_handle, [string_handle_], name=nothing, output_types=nothing, output_shapes=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function multi_device_iterator_from_string_handle(string_handle_; name=nothing, output_types=nothing, output_shapes=nothing) + if tf.in_eager_mode() + multi_device_iterator_from_string_handle_eager(string_handle_; name=name, output_types=output_types, output_shapes=output_shapes) + else + multi_device_iterator_from_string_handle_graph(string_handle_; name=name, output_types=output_types, output_shapes=output_shapes) + end end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - end - tf.Tensor(tf.Operation(desc)) - end - function multi_device_iterator_from_string_handle_eager(string_handle_; name=nothing, output_types=nothing, output_shapes=nothing) - desc = tf.EagerOp("MultiDeviceIteratorFromStringHandle") - string_handle_ = convert(tf.EagerTensor, string_handle_) - tf.add_input(desc, string_handle_) - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - res = tf.execute(desc) - node = tf.TapeNode(multi_device_iterator_from_string_handle, [string_handle_], name=nothing, output_types=nothing, output_shapes=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function multi_device_iterator_from_string_handle(string_handle_; name=nothing, output_types=nothing, output_shapes=nothing) - if tf.in_eager_mode() - multi_device_iterator_from_string_handle_eager(string_handle_; name=name, output_types=output_types, output_shapes=output_shapes) - else - multi_device_iterator_from_string_handle_graph(string_handle_; name=name, output_types=output_types, output_shapes=output_shapes) - end - end end @@ -13008,61 +23512,109 @@ end Load embedding parameters for a single table. """ begin - function load_tpu_embedding_proximal_adagrad_parameters_graph(parameters_, accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - local desc - tf.with_op_name(name, "LoadTPUEmbeddingProximalAdagradParameters") do - desc = tf.NodeDescription("LoadTPUEmbeddingProximalAdagradParameters") - parameters_ = convert(Tensor{Float32}, parameters_) - accumulators_ = convert(Tensor{Float32}, accumulators_) - tf.add_input(desc, parameters_) - tf.add_input(desc, accumulators_) - if table_id !== nothing - desc["table_id"] = Base.Int(table_id) + begin + function load_tpu_embedding_proximal_adagrad_parameters_graph(parameters_, accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + local desc + tf.with_op_name(name, "LoadTPUEmbeddingProximalAdagradParameters") do + desc = tf.NodeDescription("LoadTPUEmbeddingProximalAdagradParameters") + begin + begin + parameters_ = convert(Tensor{Float32}, parameters_) + begin + end + end + begin + accumulators_ = convert(Tensor{Float32}, accumulators_) + begin + end + end + end + begin + begin + tf.add_input(desc, parameters_) + end + begin + tf.add_input(desc, accumulators_) + end + end + begin + begin + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + end + begin + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + end + begin + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + end + begin + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function load_tpu_embedding_proximal_adagrad_parameters_eager(parameters_, accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + desc = tf.EagerOp("LoadTPUEmbeddingProximalAdagradParameters") + parameters_ = convert(tf.EagerTensor, parameters_) + accumulators_ = convert(tf.EagerTensor, accumulators_) + begin + begin + tf.add_input(desc, parameters_) + end + begin + tf.add_input(desc, accumulators_) + end + end + begin + begin + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + end + begin + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + end + begin + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + end + begin + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(load_tpu_embedding_proximal_adagrad_parameters, [parameters_, accumulators_], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function load_tpu_embedding_proximal_adagrad_parameters(parameters_, accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + if tf.in_eager_mode() + load_tpu_embedding_proximal_adagrad_parameters_eager(parameters_, accumulators_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + else + load_tpu_embedding_proximal_adagrad_parameters_graph(parameters_, accumulators_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + end end - if table_name !== nothing - desc["table_name"] = Base.String(table_name) - end - if num_shards !== nothing - desc["num_shards"] = Base.Int(num_shards) - end - if shard_id !== nothing - desc["shard_id"] = Base.Int(shard_id) - end - end - tf.Tensor(tf.Operation(desc)) end - function load_tpu_embedding_proximal_adagrad_parameters_eager(parameters_, accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - desc = tf.EagerOp("LoadTPUEmbeddingProximalAdagradParameters") - parameters_ = convert(tf.EagerTensor, parameters_) - accumulators_ = convert(tf.EagerTensor, accumulators_) - tf.add_input(desc, parameters_) - tf.add_input(desc, accumulators_) - if table_id !== nothing - desc["table_id"] = Base.Int(table_id) - end - if table_name !== nothing - desc["table_name"] = Base.String(table_name) - end - if num_shards !== nothing - desc["num_shards"] = Base.Int(num_shards) - end - if shard_id !== nothing - desc["shard_id"] = Base.Int(shard_id) - end - res = tf.execute(desc) - node = tf.TapeNode(load_tpu_embedding_proximal_adagrad_parameters, [parameters_, accumulators_], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function load_tpu_embedding_proximal_adagrad_parameters(parameters_, accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - if tf.in_eager_mode() - load_tpu_embedding_proximal_adagrad_parameters_eager(parameters_, accumulators_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) - else - load_tpu_embedding_proximal_adagrad_parameters_graph(parameters_, accumulators_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) - end - end end @@ -13072,37 +23624,69 @@ end """ begin - function tensor_array_size_graph(handle_, flow_in_; name=nothing) - local desc - tf.with_op_name(name, "TensorArraySize") do - desc = tf.NodeDescription("TensorArraySize") - handle_ = convert(Tensor{String}, handle_) - flow_in_ = convert(Tensor{Float32}, flow_in_) - tf.add_input(desc, handle_) - tf.add_input(desc, flow_in_) + begin + function tensor_array_size_graph(handle_, flow_in_; name=nothing) + local desc + tf.with_op_name(name, "TensorArraySize") do + desc = tf.NodeDescription("TensorArraySize") + begin + begin + handle_ = convert(Tensor{String}, handle_) + begin + end + end + begin + flow_in_ = convert(Tensor{Float32}, flow_in_) + begin + end + end + end + begin + begin + tf.add_input(desc, handle_) + end + begin + tf.add_input(desc, flow_in_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function tensor_array_size_eager(handle_, flow_in_; name=nothing) - desc = tf.EagerOp("TensorArraySize") - handle_ = convert(tf.EagerTensor, handle_) - flow_in_ = convert(tf.EagerTensor, flow_in_) - tf.add_input(desc, handle_) - tf.add_input(desc, flow_in_) - res = tf.execute(desc) - node = tf.TapeNode(tensor_array_size, [handle_, flow_in_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function tensor_array_size_eager(handle_, flow_in_; name=nothing) + desc = tf.EagerOp("TensorArraySize") + handle_ = convert(tf.EagerTensor, handle_) + flow_in_ = convert(tf.EagerTensor, flow_in_) + begin + begin + tf.add_input(desc, handle_) + end + begin + tf.add_input(desc, flow_in_) + end + end + begin + end + res = tf.execute(desc) + node = tf.TapeNode(tensor_array_size, [handle_, flow_in_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_array_size(handle_, flow_in_; name=nothing) - if tf.in_eager_mode() - tensor_array_size_eager(handle_, flow_in_; name=name) - else - tensor_array_size_graph(handle_, flow_in_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_array_size(handle_, flow_in_; name=nothing) + if tf.in_eager_mode() + tensor_array_size_eager(handle_, flow_in_; name=name) + else + tensor_array_size_graph(handle_, flow_in_; name=name) + end end - end + end end @@ -13112,59 +23696,95 @@ end """ begin - function ordered_map_size_graph(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) - local desc - tf.with_op_name(name, "OrderedMapSize") do - desc = tf.NodeDescription("OrderedMapSize") - if capacity !== nothing - desc["capacity"] = Base.Int(capacity) - end - if memory_limit !== nothing - desc["memory_limit"] = Base.Int(memory_limit) - end - if dtypes !== nothing - desc["dtypes"] = map(Base.identity, dtypes) + begin + function ordered_map_size_graph(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "OrderedMapSize") do + desc = tf.NodeDescription("OrderedMapSize") + begin + end + begin + end + begin + begin + if capacity !== nothing + desc["capacity"] = Base.Int(capacity) + end + end + begin + if memory_limit !== nothing + desc["memory_limit"] = Base.Int(memory_limit) + end + end + begin + if dtypes !== nothing + desc["dtypes"] = map(Base.identity, dtypes) + end + end + begin + if container !== nothing + desc["container"] = Base.String(container) + end + end + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function ordered_map_size_eager(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + desc = tf.EagerOp("OrderedMapSize") + begin + end + begin + begin + if capacity !== nothing + desc["capacity"] = Base.Int(capacity) + end + end + begin + if memory_limit !== nothing + desc["memory_limit"] = Base.Int(memory_limit) + end + end + begin + if dtypes !== nothing + desc["dtypes"] = map(Base.identity, dtypes) + end + end + begin + if container !== nothing + desc["container"] = Base.String(container) + end + end + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(ordered_map_size, [], name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function ordered_map_size(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + if tf.in_eager_mode() + ordered_map_size_eager(; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) + else + ordered_map_size_graph(; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) + end end - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - end - tf.Tensor(tf.Operation(desc)) - end - function ordered_map_size_eager(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) - desc = tf.EagerOp("OrderedMapSize") - if capacity !== nothing - desc["capacity"] = Base.Int(capacity) - end - if memory_limit !== nothing - desc["memory_limit"] = Base.Int(memory_limit) - end - if dtypes !== nothing - desc["dtypes"] = map(Base.identity, dtypes) - end - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - res = tf.execute(desc) - node = tf.TapeNode(ordered_map_size, [], name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function ordered_map_size(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) - if tf.in_eager_mode() - ordered_map_size_eager(; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) - else - ordered_map_size_graph(; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) - end - end end @@ -13174,47 +23794,91 @@ end """ begin - function stateless_random_uniform_graph(shape_, seed_; name=nothing, dtype=nothing) - local desc - tf.with_op_name(name, "StatelessRandomUniform") do - desc = tf.NodeDescription("StatelessRandomUniform") - shape_ = convert(Tensor{Int32}, shape_) - seed_ = convert(Tensor{Int64}, seed_) - (shape_,) = tf.tf_promote(shape_) - (seed_,) = tf.tf_promote(seed_) - tf.add_input(desc, shape_) - tf.add_input(desc, seed_) - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) + begin + function stateless_random_uniform_graph(shape_, seed_; name=nothing, dtype=nothing) + local desc + tf.with_op_name(name, "StatelessRandomUniform") do + desc = tf.NodeDescription("StatelessRandomUniform") + begin + begin + shape_ = convert(Tensor{Int32}, shape_) + begin + end + end + begin + seed_ = convert(Tensor{Int64}, seed_) + begin + end + end + begin + (shape_,) = tf.tf_promote(shape_) + end + begin + (seed_,) = tf.tf_promote(seed_) + end + end + begin + begin + tf.add_input(desc, shape_) + end + begin + tf.add_input(desc, seed_) + end + end + begin + begin + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function stateless_random_uniform_eager(shape_, seed_; name=nothing, dtype=nothing) + desc = tf.EagerOp("StatelessRandomUniform") + shape_ = convert(tf.EagerTensor, shape_) + seed_ = convert(tf.EagerTensor, seed_) + begin + begin + tf.add_input(desc, shape_) + end + begin + tf.add_input(desc, seed_) + end + end + begin + begin + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + end + begin + desc["T"] = tf.data_type(shape_) + end + begin + desc["Tseed"] = tf.data_type(seed_) + end + res = tf.execute(desc) + node = tf.TapeNode(stateless_random_uniform, [shape_, seed_], name=nothing, dtype=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function stateless_random_uniform(shape_, seed_; name=nothing, dtype=nothing) + if tf.in_eager_mode() + stateless_random_uniform_eager(shape_, seed_; name=name, dtype=dtype) + else + stateless_random_uniform_graph(shape_, seed_; name=name, dtype=dtype) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function stateless_random_uniform_eager(shape_, seed_; name=nothing, dtype=nothing) - desc = tf.EagerOp("StatelessRandomUniform") - shape_ = convert(tf.EagerTensor, shape_) - seed_ = convert(tf.EagerTensor, seed_) - tf.add_input(desc, shape_) - tf.add_input(desc, seed_) - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end - desc["T"] = tf.data_type(shape_) - desc["Tseed"] = tf.data_type(seed_) - res = tf.execute(desc) - node = tf.TapeNode(stateless_random_uniform, [shape_, seed_], name=nothing, dtype=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function stateless_random_uniform(shape_, seed_; name=nothing, dtype=nothing) - if tf.in_eager_mode() - stateless_random_uniform_eager(shape_, seed_; name=name, dtype=dtype) - else - stateless_random_uniform_graph(shape_, seed_; name=name, dtype=dtype) - end - end end @@ -13224,73 +23888,153 @@ end """ begin - function sparse_to_sparse_set_operation_graph(set1_indices_, set1_values_, set1_shape_, set2_indices_, set2_values_, set2_shape_; name=nothing, set_operation=nothing, validate_indices=nothing) - local desc - tf.with_op_name(name, "SparseToSparseSetOperation") do - desc = tf.NodeDescription("SparseToSparseSetOperation") - set1_indices_ = convert(Tensor{Int64}, set1_indices_) - set1_values_ = convert(Tensor{Any}, set1_values_) - set1_shape_ = convert(Tensor{Int64}, set1_shape_) - set2_indices_ = convert(Tensor{Int64}, set2_indices_) - set2_values_ = convert(Tensor{Any}, set2_values_) - set2_shape_ = convert(Tensor{Int64}, set2_shape_) - (set1_values_, set2_values_) = tf.tf_promote(set1_values_, set2_values_) - tf.add_input(desc, set1_indices_) - tf.add_input(desc, set1_values_) - tf.add_input(desc, set1_shape_) - tf.add_input(desc, set2_indices_) - tf.add_input(desc, set2_values_) - tf.add_input(desc, set2_shape_) - if set_operation !== nothing - desc["set_operation"] = Base.String(set_operation) + begin + function sparse_to_sparse_set_operation_graph(set1_indices_, set1_values_, set1_shape_, set2_indices_, set2_values_, set2_shape_; name=nothing, set_operation=nothing, validate_indices=nothing) + local desc + tf.with_op_name(name, "SparseToSparseSetOperation") do + desc = tf.NodeDescription("SparseToSparseSetOperation") + begin + begin + set1_indices_ = convert(Tensor{Int64}, set1_indices_) + begin + end + end + begin + set1_values_ = convert(Tensor{Any}, set1_values_) + begin + end + end + begin + set1_shape_ = convert(Tensor{Int64}, set1_shape_) + begin + end + end + begin + set2_indices_ = convert(Tensor{Int64}, set2_indices_) + begin + end + end + begin + set2_values_ = convert(Tensor{Any}, set2_values_) + begin + end + end + begin + set2_shape_ = convert(Tensor{Int64}, set2_shape_) + begin + end + end + begin + (set1_values_, set2_values_) = tf.tf_promote(set1_values_, set2_values_) + end + end + begin + begin + tf.add_input(desc, set1_indices_) + end + begin + tf.add_input(desc, set1_values_) + end + begin + tf.add_input(desc, set1_shape_) + end + begin + tf.add_input(desc, set2_indices_) + end + begin + tf.add_input(desc, set2_values_) + end + begin + tf.add_input(desc, set2_shape_) + end + end + begin + begin + if set_operation !== nothing + desc["set_operation"] = Base.String(set_operation) + end + end + begin + if validate_indices !== nothing + desc["validate_indices"] = Base.Bool(validate_indices) + end + end + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + end + end + begin + function sparse_to_sparse_set_operation_eager(set1_indices_, set1_values_, set1_shape_, set2_indices_, set2_values_, set2_shape_; name=nothing, set_operation=nothing, validate_indices=nothing) + desc = tf.EagerOp("SparseToSparseSetOperation") + set1_indices_ = convert(tf.EagerTensor, set1_indices_) + set1_values_ = convert(tf.EagerTensor, set1_values_) + set1_shape_ = convert(tf.EagerTensor, set1_shape_) + set2_indices_ = convert(tf.EagerTensor, set2_indices_) + set2_values_ = convert(tf.EagerTensor, set2_values_) + set2_shape_ = convert(tf.EagerTensor, set2_shape_) + begin + begin + tf.add_input(desc, set1_indices_) + end + begin + tf.add_input(desc, set1_values_) + end + begin + tf.add_input(desc, set1_shape_) + end + begin + tf.add_input(desc, set2_indices_) + end + begin + tf.add_input(desc, set2_values_) + end + begin + tf.add_input(desc, set2_shape_) + end + end + begin + begin + if set_operation !== nothing + desc["set_operation"] = Base.String(set_operation) + end + end + begin + if validate_indices !== nothing + desc["validate_indices"] = Base.Bool(validate_indices) + end + end + end + begin + desc["T"] = tf.data_type(set1_values_) + end + begin + desc["T"] = tf.data_type(set2_values_) + end + res = tf.execute(desc) + node = tf.TapeNode(sparse_to_sparse_set_operation, [set1_indices_, set1_values_, set1_shape_, set2_indices_, set2_values_, set2_shape_], name=nothing, set_operation=nothing, validate_indices=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_to_sparse_set_operation(set1_indices_, set1_values_, set1_shape_, set2_indices_, set2_values_, set2_shape_; name=nothing, set_operation=nothing, validate_indices=nothing) + if tf.in_eager_mode() + sparse_to_sparse_set_operation_eager(set1_indices_, set1_values_, set1_shape_, set2_indices_, set2_values_, set2_shape_; name=name, set_operation=set_operation, validate_indices=validate_indices) + else + sparse_to_sparse_set_operation_graph(set1_indices_, set1_values_, set1_shape_, set2_indices_, set2_values_, set2_shape_; name=name, set_operation=set_operation, validate_indices=validate_indices) + end end - if validate_indices !== nothing - desc["validate_indices"] = Base.Bool(validate_indices) - end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:3 - push!(out, tf.Tensor(op, out_idx)) - end - out - end - function sparse_to_sparse_set_operation_eager(set1_indices_, set1_values_, set1_shape_, set2_indices_, set2_values_, set2_shape_; name=nothing, set_operation=nothing, validate_indices=nothing) - desc = tf.EagerOp("SparseToSparseSetOperation") - set1_indices_ = convert(tf.EagerTensor, set1_indices_) - set1_values_ = convert(tf.EagerTensor, set1_values_) - set1_shape_ = convert(tf.EagerTensor, set1_shape_) - set2_indices_ = convert(tf.EagerTensor, set2_indices_) - set2_values_ = convert(tf.EagerTensor, set2_values_) - set2_shape_ = convert(tf.EagerTensor, set2_shape_) - tf.add_input(desc, set1_indices_) - tf.add_input(desc, set1_values_) - tf.add_input(desc, set1_shape_) - tf.add_input(desc, set2_indices_) - tf.add_input(desc, set2_values_) - tf.add_input(desc, set2_shape_) - if set_operation !== nothing - desc["set_operation"] = Base.String(set_operation) - end - if validate_indices !== nothing - desc["validate_indices"] = Base.Bool(validate_indices) - end - desc["T"] = tf.data_type(set1_values_) - desc["T"] = tf.data_type(set2_values_) - res = tf.execute(desc) - node = tf.TapeNode(sparse_to_sparse_set_operation, [set1_indices_, set1_values_, set1_shape_, set2_indices_, set2_values_, set2_shape_], name=nothing, set_operation=nothing, validate_indices=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_to_sparse_set_operation(set1_indices_, set1_values_, set1_shape_, set2_indices_, set2_values_, set2_shape_; name=nothing, set_operation=nothing, validate_indices=nothing) - if tf.in_eager_mode() - sparse_to_sparse_set_operation_eager(set1_indices_, set1_values_, set1_shape_, set2_indices_, set2_values_, set2_shape_; name=name, set_operation=set_operation, validate_indices=validate_indices) - else - sparse_to_sparse_set_operation_graph(set1_indices_, set1_values_, set1_shape_, set2_indices_, set2_values_, set2_shape_; name=name, set_operation=set_operation, validate_indices=validate_indices) - end - end end @@ -13300,53 +24044,93 @@ end """ begin - function tensor_summary_graph(tensor_; name=nothing, description=nothing, labels=nothing, display_name=nothing) - local desc - tf.with_op_name(name, "TensorSummary") do - desc = tf.NodeDescription("TensorSummary") - tensor_ = convert(Tensor{Any}, tensor_) - (tensor_,) = tf.tf_promote(tensor_) - tf.add_input(desc, tensor_) - if description !== nothing - desc["description"] = Base.String(description) - end - if labels !== nothing - desc["labels"] = map(Base.identity, labels) + begin + function tensor_summary_graph(tensor_; name=nothing, description=nothing, labels=nothing, display_name=nothing) + local desc + tf.with_op_name(name, "TensorSummary") do + desc = tf.NodeDescription("TensorSummary") + begin + begin + tensor_ = convert(Tensor{Any}, tensor_) + begin + end + end + begin + (tensor_,) = tf.tf_promote(tensor_) + end + end + begin + begin + tf.add_input(desc, tensor_) + end + end + begin + begin + if description !== nothing + desc["description"] = Base.String(description) + end + end + begin + if labels !== nothing + desc["labels"] = map(Base.identity, labels) + end + end + begin + if display_name !== nothing + desc["display_name"] = Base.String(display_name) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function tensor_summary_eager(tensor_; name=nothing, description=nothing, labels=nothing, display_name=nothing) + desc = tf.EagerOp("TensorSummary") + tensor_ = convert(tf.EagerTensor, tensor_) + begin + begin + tf.add_input(desc, tensor_) + end + end + begin + begin + if description !== nothing + desc["description"] = Base.String(description) + end + end + begin + if labels !== nothing + desc["labels"] = map(Base.identity, labels) + end + end + begin + if display_name !== nothing + desc["display_name"] = Base.String(display_name) + end + end + end + begin + desc["T"] = tf.data_type(tensor_) + end + res = tf.execute(desc) + node = tf.TapeNode(tensor_summary, [tensor_], name=nothing, description=nothing, labels=nothing, display_name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_summary(tensor_; name=nothing, description=nothing, labels=nothing, display_name=nothing) + if tf.in_eager_mode() + tensor_summary_eager(tensor_; name=name, description=description, labels=labels, display_name=display_name) + else + tensor_summary_graph(tensor_; name=name, description=description, labels=labels, display_name=display_name) + end end - if display_name !== nothing - desc["display_name"] = Base.String(display_name) - end - end - tf.Tensor(tf.Operation(desc)) - end - function tensor_summary_eager(tensor_; name=nothing, description=nothing, labels=nothing, display_name=nothing) - desc = tf.EagerOp("TensorSummary") - tensor_ = convert(tf.EagerTensor, tensor_) - tf.add_input(desc, tensor_) - if description !== nothing - desc["description"] = Base.String(description) - end - if labels !== nothing - desc["labels"] = map(Base.identity, labels) - end - if display_name !== nothing - desc["display_name"] = Base.String(display_name) - end - desc["T"] = tf.data_type(tensor_) - res = tf.execute(desc) - node = tf.TapeNode(tensor_summary, [tensor_], name=nothing, description=nothing, labels=nothing, display_name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_summary(tensor_; name=nothing, description=nothing, labels=nothing, display_name=nothing) - if tf.in_eager_mode() - tensor_summary_eager(tensor_; name=name, description=description, labels=labels, display_name=display_name) - else - tensor_summary_graph(tensor_; name=name, description=description, labels=labels, display_name=display_name) - end - end end @@ -13356,51 +24140,87 @@ end """ begin - function remote_fused_graph_execute_graph(inputs_; name=nothing, Tinputs=nothing, Toutputs=nothing, serialized_remote_fused_graph_execute_info=nothing) - local desc - tf.with_op_name(name, "RemoteFusedGraphExecute") do - desc = tf.NodeDescription("RemoteFusedGraphExecute") - inputs_ = [convert(Tensor{Any}, x) for x = inputs_] - tf.add_input(desc, inputs_) - if Tinputs !== nothing - desc["Tinputs"] = map(Base.identity, Tinputs) - end - if Toutputs !== nothing - desc["Toutputs"] = map(Base.identity, Toutputs) + begin + function remote_fused_graph_execute_graph(inputs_; name=nothing, Tinputs=nothing, Toutputs=nothing, serialized_remote_fused_graph_execute_info=nothing) + local desc + tf.with_op_name(name, "RemoteFusedGraphExecute") do + desc = tf.NodeDescription("RemoteFusedGraphExecute") + begin + begin + inputs_ = [convert(Tensor{Any}, x) for x = inputs_] + begin + end + end + end + begin + begin + tf.add_input(desc, inputs_) + end + end + begin + begin + if Tinputs !== nothing + desc["Tinputs"] = map(Base.identity, Tinputs) + end + end + begin + if Toutputs !== nothing + desc["Toutputs"] = map(Base.identity, Toutputs) + end + end + begin + if serialized_remote_fused_graph_execute_info !== nothing + desc["serialized_remote_fused_graph_execute_info"] = Base.String(serialized_remote_fused_graph_execute_info) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function remote_fused_graph_execute_eager(inputs_; name=nothing, Tinputs=nothing, Toutputs=nothing, serialized_remote_fused_graph_execute_info=nothing) + desc = tf.EagerOp("RemoteFusedGraphExecute") + inputs_ = convert(tf.EagerTensor, inputs_) + begin + begin + tf.add_input(desc, inputs_) + end + end + begin + begin + if Tinputs !== nothing + desc["Tinputs"] = map(Base.identity, Tinputs) + end + end + begin + if Toutputs !== nothing + desc["Toutputs"] = map(Base.identity, Toutputs) + end + end + begin + if serialized_remote_fused_graph_execute_info !== nothing + desc["serialized_remote_fused_graph_execute_info"] = Base.String(serialized_remote_fused_graph_execute_info) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(remote_fused_graph_execute, [inputs_], name=nothing, Tinputs=nothing, Toutputs=nothing, serialized_remote_fused_graph_execute_info=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function remote_fused_graph_execute(inputs_; name=nothing, Tinputs=nothing, Toutputs=nothing, serialized_remote_fused_graph_execute_info=nothing) + if tf.in_eager_mode() + remote_fused_graph_execute_eager(inputs_; name=name, Tinputs=Tinputs, Toutputs=Toutputs, serialized_remote_fused_graph_execute_info=serialized_remote_fused_graph_execute_info) + else + remote_fused_graph_execute_graph(inputs_; name=name, Tinputs=Tinputs, Toutputs=Toutputs, serialized_remote_fused_graph_execute_info=serialized_remote_fused_graph_execute_info) + end end - if serialized_remote_fused_graph_execute_info !== nothing - desc["serialized_remote_fused_graph_execute_info"] = Base.String(serialized_remote_fused_graph_execute_info) - end - end - tf.Tensor(tf.Operation(desc)) end - function remote_fused_graph_execute_eager(inputs_; name=nothing, Tinputs=nothing, Toutputs=nothing, serialized_remote_fused_graph_execute_info=nothing) - desc = tf.EagerOp("RemoteFusedGraphExecute") - inputs_ = convert(tf.EagerTensor, inputs_) - tf.add_input(desc, inputs_) - if Tinputs !== nothing - desc["Tinputs"] = map(Base.identity, Tinputs) - end - if Toutputs !== nothing - desc["Toutputs"] = map(Base.identity, Toutputs) - end - if serialized_remote_fused_graph_execute_info !== nothing - desc["serialized_remote_fused_graph_execute_info"] = Base.String(serialized_remote_fused_graph_execute_info) - end - res = tf.execute(desc) - node = tf.TapeNode(remote_fused_graph_execute, [inputs_], name=nothing, Tinputs=nothing, Toutputs=nothing, serialized_remote_fused_graph_execute_info=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function remote_fused_graph_execute(inputs_; name=nothing, Tinputs=nothing, Toutputs=nothing, serialized_remote_fused_graph_execute_info=nothing) - if tf.in_eager_mode() - remote_fused_graph_execute_eager(inputs_; name=name, Tinputs=Tinputs, Toutputs=Toutputs, serialized_remote_fused_graph_execute_info=serialized_remote_fused_graph_execute_info) - else - remote_fused_graph_execute_graph(inputs_; name=name, Tinputs=Tinputs, Toutputs=Toutputs, serialized_remote_fused_graph_execute_info=serialized_remote_fused_graph_execute_info) - end - end end @@ -13410,47 +24230,99 @@ end """ begin - function sparse_slice_grad_graph(backprop_val_grad_, input_indices_, input_start_, output_indices_; name=nothing) - local desc - tf.with_op_name(name, "SparseSliceGrad") do - desc = tf.NodeDescription("SparseSliceGrad") - backprop_val_grad_ = convert(Tensor{Any}, backprop_val_grad_) - input_indices_ = convert(Tensor{Int64}, input_indices_) - input_start_ = convert(Tensor{Int64}, input_start_) - output_indices_ = convert(Tensor{Int64}, output_indices_) - (backprop_val_grad_,) = tf.tf_promote(backprop_val_grad_) - tf.add_input(desc, backprop_val_grad_) - tf.add_input(desc, input_indices_) - tf.add_input(desc, input_start_) - tf.add_input(desc, output_indices_) - end - tf.Tensor(tf.Operation(desc)) - end - function sparse_slice_grad_eager(backprop_val_grad_, input_indices_, input_start_, output_indices_; name=nothing) - desc = tf.EagerOp("SparseSliceGrad") - backprop_val_grad_ = convert(tf.EagerTensor, backprop_val_grad_) - input_indices_ = convert(tf.EagerTensor, input_indices_) - input_start_ = convert(tf.EagerTensor, input_start_) - output_indices_ = convert(tf.EagerTensor, output_indices_) - tf.add_input(desc, backprop_val_grad_) - tf.add_input(desc, input_indices_) - tf.add_input(desc, input_start_) - tf.add_input(desc, output_indices_) - desc["T"] = tf.data_type(backprop_val_grad_) - res = tf.execute(desc) - node = tf.TapeNode(sparse_slice_grad, [backprop_val_grad_, input_indices_, input_start_, output_indices_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_slice_grad(backprop_val_grad_, input_indices_, input_start_, output_indices_; name=nothing) - if tf.in_eager_mode() - sparse_slice_grad_eager(backprop_val_grad_, input_indices_, input_start_, output_indices_; name=name) - else - sparse_slice_grad_graph(backprop_val_grad_, input_indices_, input_start_, output_indices_; name=name) + begin + function sparse_slice_grad_graph(backprop_val_grad_, input_indices_, input_start_, output_indices_; name=nothing) + local desc + tf.with_op_name(name, "SparseSliceGrad") do + desc = tf.NodeDescription("SparseSliceGrad") + begin + begin + backprop_val_grad_ = convert(Tensor{Any}, backprop_val_grad_) + begin + end + end + begin + input_indices_ = convert(Tensor{Int64}, input_indices_) + begin + end + end + begin + input_start_ = convert(Tensor{Int64}, input_start_) + begin + end + end + begin + output_indices_ = convert(Tensor{Int64}, output_indices_) + begin + end + end + begin + (backprop_val_grad_,) = tf.tf_promote(backprop_val_grad_) + end + end + begin + begin + tf.add_input(desc, backprop_val_grad_) + end + begin + tf.add_input(desc, input_indices_) + end + begin + tf.add_input(desc, input_start_) + end + begin + tf.add_input(desc, output_indices_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function sparse_slice_grad_eager(backprop_val_grad_, input_indices_, input_start_, output_indices_; name=nothing) + desc = tf.EagerOp("SparseSliceGrad") + backprop_val_grad_ = convert(tf.EagerTensor, backprop_val_grad_) + input_indices_ = convert(tf.EagerTensor, input_indices_) + input_start_ = convert(tf.EagerTensor, input_start_) + output_indices_ = convert(tf.EagerTensor, output_indices_) + begin + begin + tf.add_input(desc, backprop_val_grad_) + end + begin + tf.add_input(desc, input_indices_) + end + begin + tf.add_input(desc, input_start_) + end + begin + tf.add_input(desc, output_indices_) + end + end + begin + end + begin + desc["T"] = tf.data_type(backprop_val_grad_) + end + res = tf.execute(desc) + node = tf.TapeNode(sparse_slice_grad, [backprop_val_grad_, input_indices_, input_start_, output_indices_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_slice_grad(backprop_val_grad_, input_indices_, input_start_, output_indices_; name=nothing) + if tf.in_eager_mode() + sparse_slice_grad_eager(backprop_val_grad_, input_indices_, input_start_, output_indices_; name=name) + else + sparse_slice_grad_graph(backprop_val_grad_, input_indices_, input_start_, output_indices_; name=name) + end end - end + end end @@ -13460,54 +24332,102 @@ end """ begin - function cumsum_graph(x_, axis_; name=nothing, exclusive=nothing, reverse=nothing) - local desc - tf.with_op_name(name, "Cumsum") do - desc = tf.NodeDescription("Cumsum") - x_ = convert(Tensor{Any}, x_) - axis_ = convert(Tensor{Int32}, axis_) - axis_ = axis_ - convert(tf.Tensor{eltype(axis_)}, 1) - (x_,) = tf.tf_promote(x_) - (axis_,) = tf.tf_promote(axis_) - tf.add_input(desc, x_) - tf.add_input(desc, axis_) - if exclusive !== nothing - desc["exclusive"] = Base.Bool(exclusive) - end - if reverse !== nothing - desc["reverse"] = Base.Bool(reverse) + begin + function cumsum_graph(x_, axis_; name=nothing, exclusive=nothing, reverse=nothing) + local desc + tf.with_op_name(name, "Cumsum") do + desc = tf.NodeDescription("Cumsum") + begin + begin + x_ = convert(Tensor{Any}, x_) + begin + end + end + begin + axis_ = convert(Tensor{Int32}, axis_) + begin + axis_ = axis_ - convert(tf.Tensor{eltype(axis_)}, 1) + end + end + begin + (x_,) = tf.tf_promote(x_) + end + begin + (axis_,) = tf.tf_promote(axis_) + end + end + begin + begin + tf.add_input(desc, x_) + end + begin + tf.add_input(desc, axis_) + end + end + begin + begin + if exclusive !== nothing + desc["exclusive"] = Base.Bool(exclusive) + end + end + begin + if reverse !== nothing + desc["reverse"] = Base.Bool(reverse) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function cumsum_eager(x_, axis_; name=nothing, exclusive=nothing, reverse=nothing) + desc = tf.EagerOp("Cumsum") + x_ = convert(tf.EagerTensor, x_) + axis_ = convert(tf.EagerTensor, axis_) + begin + begin + tf.add_input(desc, x_) + end + begin + tf.add_input(desc, axis_) + end + end + begin + begin + if exclusive !== nothing + desc["exclusive"] = Base.Bool(exclusive) + end + end + begin + if reverse !== nothing + desc["reverse"] = Base.Bool(reverse) + end + end + end + begin + desc["T"] = tf.data_type(x_) + end + begin + desc["Tidx"] = tf.data_type(axis_) + end + res = tf.execute(desc) + node = tf.TapeNode(cumsum, [x_, axis_], name=nothing, exclusive=nothing, reverse=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function cumsum(x_, axis_; name=nothing, exclusive=nothing, reverse=nothing) + if tf.in_eager_mode() + cumsum_eager(x_, axis_; name=name, exclusive=exclusive, reverse=reverse) + else + cumsum_graph(x_, axis_; name=name, exclusive=exclusive, reverse=reverse) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function cumsum_eager(x_, axis_; name=nothing, exclusive=nothing, reverse=nothing) - desc = tf.EagerOp("Cumsum") - x_ = convert(tf.EagerTensor, x_) - axis_ = convert(tf.EagerTensor, axis_) - tf.add_input(desc, x_) - tf.add_input(desc, axis_) - if exclusive !== nothing - desc["exclusive"] = Base.Bool(exclusive) - end - if reverse !== nothing - desc["reverse"] = Base.Bool(reverse) - end - desc["T"] = tf.data_type(x_) - desc["Tidx"] = tf.data_type(axis_) - res = tf.execute(desc) - node = tf.TapeNode(cumsum, [x_, axis_], name=nothing, exclusive=nothing, reverse=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function cumsum(x_, axis_; name=nothing, exclusive=nothing, reverse=nothing) - if tf.in_eager_mode() - cumsum_eager(x_, axis_; name=name, exclusive=exclusive, reverse=reverse) - else - cumsum_graph(x_, axis_; name=name, exclusive=exclusive, reverse=reverse) - end - end end @@ -13517,72 +24437,150 @@ end """ begin - function batch_norm_with_global_normalization_grad_graph(t_, m_, v_, gamma_, backprop_; name=nothing, variance_epsilon=nothing, scale_after_normalization=nothing) - local desc - tf.with_op_name(name, "BatchNormWithGlobalNormalizationGrad") do - desc = tf.NodeDescription("BatchNormWithGlobalNormalizationGrad") - t_ = convert(Tensor{Any}, t_) - m_ = convert(Tensor{Any}, m_) - v_ = convert(Tensor{Any}, v_) - gamma_ = convert(Tensor{Any}, gamma_) - backprop_ = convert(Tensor{Any}, backprop_) - (t_, m_, v_, gamma_, backprop_) = tf.tf_promote(t_, m_, v_, gamma_, backprop_) - tf.add_input(desc, t_) - tf.add_input(desc, m_) - tf.add_input(desc, v_) - tf.add_input(desc, gamma_) - tf.add_input(desc, backprop_) - if variance_epsilon !== nothing - desc["variance_epsilon"] = Base.identity(variance_epsilon) - end - if scale_after_normalization !== nothing - desc["scale_after_normalization"] = Base.Bool(scale_after_normalization) + begin + function batch_norm_with_global_normalization_grad_graph(t_, m_, v_, gamma_, backprop_; name=nothing, variance_epsilon=nothing, scale_after_normalization=nothing) + local desc + tf.with_op_name(name, "BatchNormWithGlobalNormalizationGrad") do + desc = tf.NodeDescription("BatchNormWithGlobalNormalizationGrad") + begin + begin + t_ = convert(Tensor{Any}, t_) + begin + end + end + begin + m_ = convert(Tensor{Any}, m_) + begin + end + end + begin + v_ = convert(Tensor{Any}, v_) + begin + end + end + begin + gamma_ = convert(Tensor{Any}, gamma_) + begin + end + end + begin + backprop_ = convert(Tensor{Any}, backprop_) + begin + end + end + begin + (t_, m_, v_, gamma_, backprop_) = tf.tf_promote(t_, m_, v_, gamma_, backprop_) + end + end + begin + begin + tf.add_input(desc, t_) + end + begin + tf.add_input(desc, m_) + end + begin + tf.add_input(desc, v_) + end + begin + tf.add_input(desc, gamma_) + end + begin + tf.add_input(desc, backprop_) + end + end + begin + begin + if variance_epsilon !== nothing + desc["variance_epsilon"] = Base.identity(variance_epsilon) + end + end + begin + if scale_after_normalization !== nothing + desc["scale_after_normalization"] = Base.Bool(scale_after_normalization) + end + end + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:5 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + end + end + begin + function batch_norm_with_global_normalization_grad_eager(t_, m_, v_, gamma_, backprop_; name=nothing, variance_epsilon=nothing, scale_after_normalization=nothing) + desc = tf.EagerOp("BatchNormWithGlobalNormalizationGrad") + t_ = convert(tf.EagerTensor, t_) + m_ = convert(tf.EagerTensor, m_) + v_ = convert(tf.EagerTensor, v_) + gamma_ = convert(tf.EagerTensor, gamma_) + backprop_ = convert(tf.EagerTensor, backprop_) + begin + begin + tf.add_input(desc, t_) + end + begin + tf.add_input(desc, m_) + end + begin + tf.add_input(desc, v_) + end + begin + tf.add_input(desc, gamma_) + end + begin + tf.add_input(desc, backprop_) + end + end + begin + begin + if variance_epsilon !== nothing + desc["variance_epsilon"] = Base.identity(variance_epsilon) + end + end + begin + if scale_after_normalization !== nothing + desc["scale_after_normalization"] = Base.Bool(scale_after_normalization) + end + end + end + begin + desc["T"] = tf.data_type(t_) + end + begin + desc["T"] = tf.data_type(m_) + end + begin + desc["T"] = tf.data_type(v_) + end + begin + desc["T"] = tf.data_type(gamma_) + end + begin + desc["T"] = tf.data_type(backprop_) + end + res = tf.execute(desc) + node = tf.TapeNode(batch_norm_with_global_normalization_grad, [t_, m_, v_, gamma_, backprop_], name=nothing, variance_epsilon=nothing, scale_after_normalization=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function batch_norm_with_global_normalization_grad(t_, m_, v_, gamma_, backprop_; name=nothing, variance_epsilon=nothing, scale_after_normalization=nothing) + if tf.in_eager_mode() + batch_norm_with_global_normalization_grad_eager(t_, m_, v_, gamma_, backprop_; name=name, variance_epsilon=variance_epsilon, scale_after_normalization=scale_after_normalization) + else + batch_norm_with_global_normalization_grad_graph(t_, m_, v_, gamma_, backprop_; name=name, variance_epsilon=variance_epsilon, scale_after_normalization=scale_after_normalization) + end end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:5 - push!(out, tf.Tensor(op, out_idx)) - end - out end - function batch_norm_with_global_normalization_grad_eager(t_, m_, v_, gamma_, backprop_; name=nothing, variance_epsilon=nothing, scale_after_normalization=nothing) - desc = tf.EagerOp("BatchNormWithGlobalNormalizationGrad") - t_ = convert(tf.EagerTensor, t_) - m_ = convert(tf.EagerTensor, m_) - v_ = convert(tf.EagerTensor, v_) - gamma_ = convert(tf.EagerTensor, gamma_) - backprop_ = convert(tf.EagerTensor, backprop_) - tf.add_input(desc, t_) - tf.add_input(desc, m_) - tf.add_input(desc, v_) - tf.add_input(desc, gamma_) - tf.add_input(desc, backprop_) - if variance_epsilon !== nothing - desc["variance_epsilon"] = Base.identity(variance_epsilon) - end - if scale_after_normalization !== nothing - desc["scale_after_normalization"] = Base.Bool(scale_after_normalization) - end - desc["T"] = tf.data_type(t_) - desc["T"] = tf.data_type(m_) - desc["T"] = tf.data_type(v_) - desc["T"] = tf.data_type(gamma_) - desc["T"] = tf.data_type(backprop_) - res = tf.execute(desc) - node = tf.TapeNode(batch_norm_with_global_normalization_grad, [t_, m_, v_, gamma_, backprop_], name=nothing, variance_epsilon=nothing, scale_after_normalization=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function batch_norm_with_global_normalization_grad(t_, m_, v_, gamma_, backprop_; name=nothing, variance_epsilon=nothing, scale_after_normalization=nothing) - if tf.in_eager_mode() - batch_norm_with_global_normalization_grad_eager(t_, m_, v_, gamma_, backprop_; name=name, variance_epsilon=variance_epsilon, scale_after_normalization=scale_after_normalization) - else - batch_norm_with_global_normalization_grad_graph(t_, m_, v_, gamma_, backprop_; name=name, variance_epsilon=variance_epsilon, scale_after_normalization=scale_after_normalization) - end - end end @@ -13592,63 +24590,115 @@ end """ begin - function avg_pool_grad_graph(orig_input_shape_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) - local desc - tf.with_op_name(name, "AvgPoolGrad") do - desc = tf.NodeDescription("AvgPoolGrad") - orig_input_shape_ = convert(Tensor{Int32}, orig_input_shape_) - grad_ = convert(Tensor{Any}, grad_) - (grad_,) = tf.tf_promote(grad_) - tf.add_input(desc, orig_input_shape_) - tf.add_input(desc, grad_) - if ksize !== nothing - desc["ksize"] = map(Base.identity, ksize) - end - if strides !== nothing - desc["strides"] = map(Base.identity, strides) - end - if padding !== nothing - desc["padding"] = Base.String(padding) + begin + function avg_pool_grad_graph(orig_input_shape_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + local desc + tf.with_op_name(name, "AvgPoolGrad") do + desc = tf.NodeDescription("AvgPoolGrad") + begin + begin + orig_input_shape_ = convert(Tensor{Int32}, orig_input_shape_) + begin + end + end + begin + grad_ = convert(Tensor{Any}, grad_) + begin + end + end + begin + (grad_,) = tf.tf_promote(grad_) + end + end + begin + begin + tf.add_input(desc, orig_input_shape_) + end + begin + tf.add_input(desc, grad_) + end + end + begin + begin + if ksize !== nothing + desc["ksize"] = map(Base.identity, ksize) + end + end + begin + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + end + begin + if padding !== nothing + desc["padding"] = Base.String(padding) + end + end + begin + if data_format !== nothing + desc["data_format"] = Base.String(data_format) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function avg_pool_grad_eager(orig_input_shape_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + desc = tf.EagerOp("AvgPoolGrad") + orig_input_shape_ = convert(tf.EagerTensor, orig_input_shape_) + grad_ = convert(tf.EagerTensor, grad_) + begin + begin + tf.add_input(desc, orig_input_shape_) + end + begin + tf.add_input(desc, grad_) + end + end + begin + begin + if ksize !== nothing + desc["ksize"] = map(Base.identity, ksize) + end + end + begin + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + end + begin + if padding !== nothing + desc["padding"] = Base.String(padding) + end + end + begin + if data_format !== nothing + desc["data_format"] = Base.String(data_format) + end + end + end + begin + desc["T"] = tf.data_type(grad_) + end + res = tf.execute(desc) + node = tf.TapeNode(avg_pool_grad, [orig_input_shape_, grad_], name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function avg_pool_grad(orig_input_shape_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + if tf.in_eager_mode() + avg_pool_grad_eager(orig_input_shape_, grad_; name=name, ksize=ksize, strides=strides, padding=padding, data_format=data_format) + else + avg_pool_grad_graph(orig_input_shape_, grad_; name=name, ksize=ksize, strides=strides, padding=padding, data_format=data_format) + end end - if data_format !== nothing - desc["data_format"] = Base.String(data_format) - end - end - tf.Tensor(tf.Operation(desc)) - end - function avg_pool_grad_eager(orig_input_shape_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) - desc = tf.EagerOp("AvgPoolGrad") - orig_input_shape_ = convert(tf.EagerTensor, orig_input_shape_) - grad_ = convert(tf.EagerTensor, grad_) - tf.add_input(desc, orig_input_shape_) - tf.add_input(desc, grad_) - if ksize !== nothing - desc["ksize"] = map(Base.identity, ksize) - end - if strides !== nothing - desc["strides"] = map(Base.identity, strides) - end - if padding !== nothing - desc["padding"] = Base.String(padding) - end - if data_format !== nothing - desc["data_format"] = Base.String(data_format) - end - desc["T"] = tf.data_type(grad_) - res = tf.execute(desc) - node = tf.TapeNode(avg_pool_grad, [orig_input_shape_, grad_], name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function avg_pool_grad(orig_input_shape_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) - if tf.in_eager_mode() - avg_pool_grad_eager(orig_input_shape_, grad_; name=name, ksize=ksize, strides=strides, padding=padding, data_format=data_format) - else - avg_pool_grad_graph(orig_input_shape_, grad_; name=name, ksize=ksize, strides=strides, padding=padding, data_format=data_format) - end - end end @@ -13658,47 +24708,91 @@ end """ begin - function restore_v2_graph(prefix_, tensor_names_, shape_and_slices_; name=nothing, dtypes=nothing) - local desc - tf.with_op_name(name, "RestoreV2") do - desc = tf.NodeDescription("RestoreV2") - prefix_ = convert(Tensor{String}, prefix_) - tensor_names_ = convert(Tensor{String}, tensor_names_) - shape_and_slices_ = convert(Tensor{String}, shape_and_slices_) - tf.add_input(desc, prefix_) - tf.add_input(desc, tensor_names_) - tf.add_input(desc, shape_and_slices_) - if dtypes !== nothing - desc["dtypes"] = map(Base.identity, dtypes) + begin + function restore_v2_graph(prefix_, tensor_names_, shape_and_slices_; name=nothing, dtypes=nothing) + local desc + tf.with_op_name(name, "RestoreV2") do + desc = tf.NodeDescription("RestoreV2") + begin + begin + prefix_ = convert(Tensor{String}, prefix_) + begin + end + end + begin + tensor_names_ = convert(Tensor{String}, tensor_names_) + begin + end + end + begin + shape_and_slices_ = convert(Tensor{String}, shape_and_slices_) + begin + end + end + end + begin + begin + tf.add_input(desc, prefix_) + end + begin + tf.add_input(desc, tensor_names_) + end + begin + tf.add_input(desc, shape_and_slices_) + end + end + begin + begin + if dtypes !== nothing + desc["dtypes"] = map(Base.identity, dtypes) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function restore_v2_eager(prefix_, tensor_names_, shape_and_slices_; name=nothing, dtypes=nothing) + desc = tf.EagerOp("RestoreV2") + prefix_ = convert(tf.EagerTensor, prefix_) + tensor_names_ = convert(tf.EagerTensor, tensor_names_) + shape_and_slices_ = convert(tf.EagerTensor, shape_and_slices_) + begin + begin + tf.add_input(desc, prefix_) + end + begin + tf.add_input(desc, tensor_names_) + end + begin + tf.add_input(desc, shape_and_slices_) + end + end + begin + begin + if dtypes !== nothing + desc["dtypes"] = map(Base.identity, dtypes) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(restore_v2, [prefix_, tensor_names_, shape_and_slices_], name=nothing, dtypes=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function restore_v2(prefix_, tensor_names_, shape_and_slices_; name=nothing, dtypes=nothing) + if tf.in_eager_mode() + restore_v2_eager(prefix_, tensor_names_, shape_and_slices_; name=name, dtypes=dtypes) + else + restore_v2_graph(prefix_, tensor_names_, shape_and_slices_; name=name, dtypes=dtypes) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function restore_v2_eager(prefix_, tensor_names_, shape_and_slices_; name=nothing, dtypes=nothing) - desc = tf.EagerOp("RestoreV2") - prefix_ = convert(tf.EagerTensor, prefix_) - tensor_names_ = convert(tf.EagerTensor, tensor_names_) - shape_and_slices_ = convert(tf.EagerTensor, shape_and_slices_) - tf.add_input(desc, prefix_) - tf.add_input(desc, tensor_names_) - tf.add_input(desc, shape_and_slices_) - if dtypes !== nothing - desc["dtypes"] = map(Base.identity, dtypes) - end - res = tf.execute(desc) - node = tf.TapeNode(restore_v2, [prefix_, tensor_names_, shape_and_slices_], name=nothing, dtypes=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function restore_v2(prefix_, tensor_names_, shape_and_slices_; name=nothing, dtypes=nothing) - if tf.in_eager_mode() - restore_v2_eager(prefix_, tensor_names_, shape_and_slices_; name=name, dtypes=dtypes) - else - restore_v2_graph(prefix_, tensor_names_, shape_and_slices_; name=name, dtypes=dtypes) - end - end end @@ -13708,35 +24802,63 @@ end """ begin - function relu6_graph(features_; name=nothing) - local desc - tf.with_op_name(name, "Relu6") do - desc = tf.NodeDescription("Relu6") - features_ = convert(Tensor{Any}, features_) - (features_,) = tf.tf_promote(features_) - tf.add_input(desc, features_) + begin + function relu6_graph(features_; name=nothing) + local desc + tf.with_op_name(name, "Relu6") do + desc = tf.NodeDescription("Relu6") + begin + begin + features_ = convert(Tensor{Any}, features_) + begin + end + end + begin + (features_,) = tf.tf_promote(features_) + end + end + begin + begin + tf.add_input(desc, features_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function relu6_eager(features_; name=nothing) - desc = tf.EagerOp("Relu6") - features_ = convert(tf.EagerTensor, features_) - tf.add_input(desc, features_) - desc["T"] = tf.data_type(features_) - res = tf.execute(desc) - node = tf.TapeNode(relu6, [features_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function relu6_eager(features_; name=nothing) + desc = tf.EagerOp("Relu6") + features_ = convert(tf.EagerTensor, features_) + begin + begin + tf.add_input(desc, features_) + end + end + begin + end + begin + desc["T"] = tf.data_type(features_) + end + res = tf.execute(desc) + node = tf.TapeNode(relu6, [features_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function relu6(features_; name=nothing) - if tf.in_eager_mode() - relu6_eager(features_; name=name) - else - relu6_graph(features_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function relu6(features_; name=nothing) + if tf.in_eager_mode() + relu6_eager(features_; name=name) + else + relu6_graph(features_; name=name) + end end - end + end end @@ -13746,83 +24868,197 @@ end """ begin - function sparse_apply_rms_prop_graph(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) - local desc - tf.with_op_name(name, "SparseApplyRMSProp") do - desc = tf.NodeDescription("SparseApplyRMSProp") - var_ = convert(Tensor{Any}, var_) - ms_ = convert(Tensor{Any}, ms_) - mom_ = convert(Tensor{Any}, mom_) - lr_ = convert(Tensor{Any}, lr_) - rho_ = convert(Tensor{Any}, rho_) - momentum_ = convert(Tensor{Any}, momentum_) - epsilon_ = convert(Tensor{Any}, epsilon_) - grad_ = convert(Tensor{Any}, grad_) - indices_ = convert(Tensor{Any}, indices_) - indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) - (var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_) = tf.tf_promote(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_) - (indices_,) = tf.tf_promote(indices_) - tf.add_input(desc, var_) - tf.add_input(desc, ms_) - tf.add_input(desc, mom_) - tf.add_input(desc, lr_) - tf.add_input(desc, rho_) - tf.add_input(desc, momentum_) - tf.add_input(desc, epsilon_) - tf.add_input(desc, grad_) - tf.add_input(desc, indices_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - end - tf.Tensor(tf.Operation(desc)) - end - function sparse_apply_rms_prop_eager(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) - desc = tf.EagerOp("SparseApplyRMSProp") - var_ = convert(tf.EagerTensor, var_) - ms_ = convert(tf.EagerTensor, ms_) - mom_ = convert(tf.EagerTensor, mom_) - lr_ = convert(tf.EagerTensor, lr_) - rho_ = convert(tf.EagerTensor, rho_) - momentum_ = convert(tf.EagerTensor, momentum_) - epsilon_ = convert(tf.EagerTensor, epsilon_) - grad_ = convert(tf.EagerTensor, grad_) - indices_ = convert(tf.EagerTensor, indices_) - tf.add_input(desc, var_) - tf.add_input(desc, ms_) - tf.add_input(desc, mom_) - tf.add_input(desc, lr_) - tf.add_input(desc, rho_) - tf.add_input(desc, momentum_) - tf.add_input(desc, epsilon_) - tf.add_input(desc, grad_) - tf.add_input(desc, indices_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - desc["T"] = tf.data_type(var_) - desc["T"] = tf.data_type(ms_) - desc["T"] = tf.data_type(mom_) - desc["T"] = tf.data_type(lr_) - desc["T"] = tf.data_type(rho_) - desc["T"] = tf.data_type(momentum_) - desc["T"] = tf.data_type(epsilon_) - desc["T"] = tf.data_type(grad_) - desc["Tindices"] = tf.data_type(indices_) - res = tf.execute(desc) - node = tf.TapeNode(sparse_apply_rms_prop, [var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_], name=nothing, use_locking=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_apply_rms_prop(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) - if tf.in_eager_mode() - sparse_apply_rms_prop_eager(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=name, use_locking=use_locking) - else - sparse_apply_rms_prop_graph(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=name, use_locking=use_locking) + begin + function sparse_apply_rms_prop_graph(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "SparseApplyRMSProp") do + desc = tf.NodeDescription("SparseApplyRMSProp") + begin + begin + var_ = convert(Tensor{Any}, var_) + begin + end + end + begin + ms_ = convert(Tensor{Any}, ms_) + begin + end + end + begin + mom_ = convert(Tensor{Any}, mom_) + begin + end + end + begin + lr_ = convert(Tensor{Any}, lr_) + begin + end + end + begin + rho_ = convert(Tensor{Any}, rho_) + begin + end + end + begin + momentum_ = convert(Tensor{Any}, momentum_) + begin + end + end + begin + epsilon_ = convert(Tensor{Any}, epsilon_) + begin + end + end + begin + grad_ = convert(Tensor{Any}, grad_) + begin + end + end + begin + indices_ = convert(Tensor{Any}, indices_) + begin + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + end + end + begin + (var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_) = tf.tf_promote(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_) + end + begin + (indices_,) = tf.tf_promote(indices_) + end + end + begin + begin + tf.add_input(desc, var_) + end + begin + tf.add_input(desc, ms_) + end + begin + tf.add_input(desc, mom_) + end + begin + tf.add_input(desc, lr_) + end + begin + tf.add_input(desc, rho_) + end + begin + tf.add_input(desc, momentum_) + end + begin + tf.add_input(desc, epsilon_) + end + begin + tf.add_input(desc, grad_) + end + begin + tf.add_input(desc, indices_) + end + end + begin + begin + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function sparse_apply_rms_prop_eager(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) + desc = tf.EagerOp("SparseApplyRMSProp") + var_ = convert(tf.EagerTensor, var_) + ms_ = convert(tf.EagerTensor, ms_) + mom_ = convert(tf.EagerTensor, mom_) + lr_ = convert(tf.EagerTensor, lr_) + rho_ = convert(tf.EagerTensor, rho_) + momentum_ = convert(tf.EagerTensor, momentum_) + epsilon_ = convert(tf.EagerTensor, epsilon_) + grad_ = convert(tf.EagerTensor, grad_) + indices_ = convert(tf.EagerTensor, indices_) + begin + begin + tf.add_input(desc, var_) + end + begin + tf.add_input(desc, ms_) + end + begin + tf.add_input(desc, mom_) + end + begin + tf.add_input(desc, lr_) + end + begin + tf.add_input(desc, rho_) + end + begin + tf.add_input(desc, momentum_) + end + begin + tf.add_input(desc, epsilon_) + end + begin + tf.add_input(desc, grad_) + end + begin + tf.add_input(desc, indices_) + end + end + begin + begin + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + end + begin + desc["T"] = tf.data_type(var_) + end + begin + desc["T"] = tf.data_type(ms_) + end + begin + desc["T"] = tf.data_type(mom_) + end + begin + desc["T"] = tf.data_type(lr_) + end + begin + desc["T"] = tf.data_type(rho_) + end + begin + desc["T"] = tf.data_type(momentum_) + end + begin + desc["T"] = tf.data_type(epsilon_) + end + begin + desc["T"] = tf.data_type(grad_) + end + begin + desc["Tindices"] = tf.data_type(indices_) + end + res = tf.execute(desc) + node = tf.TapeNode(sparse_apply_rms_prop, [var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_], name=nothing, use_locking=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_apply_rms_prop(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) + if tf.in_eager_mode() + sparse_apply_rms_prop_eager(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=name, use_locking=use_locking) + else + sparse_apply_rms_prop_graph(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=name, use_locking=use_locking) + end end - end + end end @@ -13832,65 +25068,105 @@ end Receives the named tensor from send_device on recv_device. """ begin - function _recv_graph(; name=nothing, tensor_type=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing) - local desc - tf.with_op_name(name, "_Recv") do - desc = tf.NodeDescription("_Recv") - if tensor_type !== nothing - desc["tensor_type"] = Base.identity(tensor_type) - end - if tensor_name !== nothing - desc["tensor_name"] = Base.String(tensor_name) - end - if send_device !== nothing - desc["send_device"] = Base.String(send_device) - end - if send_device_incarnation !== nothing - desc["send_device_incarnation"] = Base.Int(send_device_incarnation) + begin + function _recv_graph(; name=nothing, tensor_type=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing) + local desc + tf.with_op_name(name, "_Recv") do + desc = tf.NodeDescription("_Recv") + begin + end + begin + end + begin + begin + if tensor_type !== nothing + desc["tensor_type"] = Base.identity(tensor_type) + end + end + begin + if tensor_name !== nothing + desc["tensor_name"] = Base.String(tensor_name) + end + end + begin + if send_device !== nothing + desc["send_device"] = Base.String(send_device) + end + end + begin + if send_device_incarnation !== nothing + desc["send_device_incarnation"] = Base.Int(send_device_incarnation) + end + end + begin + if recv_device !== nothing + desc["recv_device"] = Base.String(recv_device) + end + end + begin + if client_terminated !== nothing + desc["client_terminated"] = Base.Bool(client_terminated) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function _recv_eager(; name=nothing, tensor_type=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing) + desc = tf.EagerOp("_Recv") + begin + end + begin + begin + if tensor_type !== nothing + desc["tensor_type"] = Base.identity(tensor_type) + end + end + begin + if tensor_name !== nothing + desc["tensor_name"] = Base.String(tensor_name) + end + end + begin + if send_device !== nothing + desc["send_device"] = Base.String(send_device) + end + end + begin + if send_device_incarnation !== nothing + desc["send_device_incarnation"] = Base.Int(send_device_incarnation) + end + end + begin + if recv_device !== nothing + desc["recv_device"] = Base.String(recv_device) + end + end + begin + if client_terminated !== nothing + desc["client_terminated"] = Base.Bool(client_terminated) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(_recv, [], name=nothing, tensor_type=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _recv(; name=nothing, tensor_type=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing) + if tf.in_eager_mode() + _recv_eager(; name=name, tensor_type=tensor_type, tensor_name=tensor_name, send_device=send_device, send_device_incarnation=send_device_incarnation, recv_device=recv_device, client_terminated=client_terminated) + else + _recv_graph(; name=name, tensor_type=tensor_type, tensor_name=tensor_name, send_device=send_device, send_device_incarnation=send_device_incarnation, recv_device=recv_device, client_terminated=client_terminated) + end end - if recv_device !== nothing - desc["recv_device"] = Base.String(recv_device) - end - if client_terminated !== nothing - desc["client_terminated"] = Base.Bool(client_terminated) - end - end - tf.Tensor(tf.Operation(desc)) - end - function _recv_eager(; name=nothing, tensor_type=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing) - desc = tf.EagerOp("_Recv") - if tensor_type !== nothing - desc["tensor_type"] = Base.identity(tensor_type) - end - if tensor_name !== nothing - desc["tensor_name"] = Base.String(tensor_name) - end - if send_device !== nothing - desc["send_device"] = Base.String(send_device) - end - if send_device_incarnation !== nothing - desc["send_device_incarnation"] = Base.Int(send_device_incarnation) - end - if recv_device !== nothing - desc["recv_device"] = Base.String(recv_device) - end - if client_terminated !== nothing - desc["client_terminated"] = Base.Bool(client_terminated) - end - res = tf.execute(desc) - node = tf.TapeNode(_recv, [], name=nothing, tensor_type=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _recv(; name=nothing, tensor_type=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing) - if tf.in_eager_mode() - _recv_eager(; name=name, tensor_type=tensor_type, tensor_name=tensor_name, send_device=send_device, send_device_incarnation=send_device_incarnation, recv_device=recv_device, client_terminated=client_terminated) - else - _recv_graph(; name=name, tensor_type=tensor_type, tensor_name=tensor_name, send_device=send_device, send_device_incarnation=send_device_incarnation, recv_device=recv_device, client_terminated=client_terminated) - end - end end @@ -13900,59 +25176,103 @@ end """ begin - function max_pool_graph(input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) - local desc - tf.with_op_name(name, "MaxPool") do - desc = tf.NodeDescription("MaxPool") - input_ = convert(Tensor{Float32}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - if ksize !== nothing - desc["ksize"] = map(Base.identity, ksize) - end - if strides !== nothing - desc["strides"] = map(Base.identity, strides) - end - if padding !== nothing - desc["padding"] = Base.String(padding) - end - if data_format !== nothing - desc["data_format"] = Base.String(data_format) + begin + function max_pool_graph(input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + local desc + tf.with_op_name(name, "MaxPool") do + desc = tf.NodeDescription("MaxPool") + begin + begin + input_ = convert(Tensor{Float32}, input_) + begin + end + end + begin + (input_,) = tf.tf_promote(input_) + end + end + begin + begin + tf.add_input(desc, input_) + end + end + begin + begin + if ksize !== nothing + desc["ksize"] = map(Base.identity, ksize) + end + end + begin + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + end + begin + if padding !== nothing + desc["padding"] = Base.String(padding) + end + end + begin + if data_format !== nothing + desc["data_format"] = Base.String(data_format) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function max_pool_eager(input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + desc = tf.EagerOp("MaxPool") + input_ = convert(tf.EagerTensor, input_) + begin + begin + tf.add_input(desc, input_) + end + end + begin + begin + if ksize !== nothing + desc["ksize"] = map(Base.identity, ksize) + end + end + begin + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + end + begin + if padding !== nothing + desc["padding"] = Base.String(padding) + end + end + begin + if data_format !== nothing + desc["data_format"] = Base.String(data_format) + end + end + end + begin + desc["T"] = tf.data_type(input_) + end + res = tf.execute(desc) + node = tf.TapeNode(max_pool, [input_], name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function max_pool(input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + if tf.in_eager_mode() + max_pool_eager(input_; name=name, ksize=ksize, strides=strides, padding=padding, data_format=data_format) + else + max_pool_graph(input_; name=name, ksize=ksize, strides=strides, padding=padding, data_format=data_format) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function max_pool_eager(input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) - desc = tf.EagerOp("MaxPool") - input_ = convert(tf.EagerTensor, input_) - tf.add_input(desc, input_) - if ksize !== nothing - desc["ksize"] = map(Base.identity, ksize) - end - if strides !== nothing - desc["strides"] = map(Base.identity, strides) - end - if padding !== nothing - desc["padding"] = Base.String(padding) - end - if data_format !== nothing - desc["data_format"] = Base.String(data_format) - end - desc["T"] = tf.data_type(input_) - res = tf.execute(desc) - node = tf.TapeNode(max_pool, [input_], name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function max_pool(input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) - if tf.in_eager_mode() - max_pool_eager(input_; name=name, ksize=ksize, strides=strides, padding=padding, data_format=data_format) - else - max_pool_graph(input_; name=name, ksize=ksize, strides=strides, padding=padding, data_format=data_format) - end - end end @@ -13962,35 +25282,63 @@ end """ begin - function invert_graph(x_; name=nothing) - local desc - tf.with_op_name(name, "Invert") do - desc = tf.NodeDescription("Invert") - x_ = convert(Tensor{Any}, x_) - (x_,) = tf.tf_promote(x_) - tf.add_input(desc, x_) + begin + function invert_graph(x_; name=nothing) + local desc + tf.with_op_name(name, "Invert") do + desc = tf.NodeDescription("Invert") + begin + begin + x_ = convert(Tensor{Any}, x_) + begin + end + end + begin + (x_,) = tf.tf_promote(x_) + end + end + begin + begin + tf.add_input(desc, x_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function invert_eager(x_; name=nothing) - desc = tf.EagerOp("Invert") - x_ = convert(tf.EagerTensor, x_) - tf.add_input(desc, x_) - desc["T"] = tf.data_type(x_) - res = tf.execute(desc) - node = tf.TapeNode(invert, [x_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function invert_eager(x_; name=nothing) + desc = tf.EagerOp("Invert") + x_ = convert(tf.EagerTensor, x_) + begin + begin + tf.add_input(desc, x_) + end + end + begin + end + begin + desc["T"] = tf.data_type(x_) + end + res = tf.execute(desc) + node = tf.TapeNode(invert, [x_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function invert(x_; name=nothing) - if tf.in_eager_mode() - invert_eager(x_; name=name) - else - invert_graph(x_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function invert(x_; name=nothing) + if tf.in_eager_mode() + invert_eager(x_; name=name) + else + invert_graph(x_; name=name) + end end - end + end end @@ -14000,41 +25348,73 @@ end *NOTE*: Do not invoke this operator directly in Python. Graph rewrite pass is """ begin - function _unary_ops_composition_graph(x_; name=nothing, op_names=nothing) - local desc - tf.with_op_name(name, "_UnaryOpsComposition") do - desc = tf.NodeDescription("_UnaryOpsComposition") - x_ = convert(Tensor{Any}, x_) - (x_,) = tf.tf_promote(x_) - tf.add_input(desc, x_) - if op_names !== nothing - desc["op_names"] = map(Base.identity, op_names) + begin + function _unary_ops_composition_graph(x_; name=nothing, op_names=nothing) + local desc + tf.with_op_name(name, "_UnaryOpsComposition") do + desc = tf.NodeDescription("_UnaryOpsComposition") + begin + begin + x_ = convert(Tensor{Any}, x_) + begin + end + end + begin + (x_,) = tf.tf_promote(x_) + end + end + begin + begin + tf.add_input(desc, x_) + end + end + begin + begin + if op_names !== nothing + desc["op_names"] = map(Base.identity, op_names) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function _unary_ops_composition_eager(x_; name=nothing, op_names=nothing) + desc = tf.EagerOp("_UnaryOpsComposition") + x_ = convert(tf.EagerTensor, x_) + begin + begin + tf.add_input(desc, x_) + end + end + begin + begin + if op_names !== nothing + desc["op_names"] = map(Base.identity, op_names) + end + end + end + begin + desc["T"] = tf.data_type(x_) + end + res = tf.execute(desc) + node = tf.TapeNode(_unary_ops_composition, [x_], name=nothing, op_names=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _unary_ops_composition(x_; name=nothing, op_names=nothing) + if tf.in_eager_mode() + _unary_ops_composition_eager(x_; name=name, op_names=op_names) + else + _unary_ops_composition_graph(x_; name=name, op_names=op_names) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function _unary_ops_composition_eager(x_; name=nothing, op_names=nothing) - desc = tf.EagerOp("_UnaryOpsComposition") - x_ = convert(tf.EagerTensor, x_) - tf.add_input(desc, x_) - if op_names !== nothing - desc["op_names"] = map(Base.identity, op_names) - end - desc["T"] = tf.data_type(x_) - res = tf.execute(desc) - node = tf.TapeNode(_unary_ops_composition, [x_], name=nothing, op_names=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _unary_ops_composition(x_; name=nothing, op_names=nothing) - if tf.in_eager_mode() - _unary_ops_composition_eager(x_; name=name, op_names=op_names) - else - _unary_ops_composition_graph(x_; name=name, op_names=op_names) - end - end end @@ -14044,73 +25424,129 @@ end """ begin - function experimental_map_dataset_graph(input_dataset_, other_arguments_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing, preserve_cardinality=nothing) - local desc - tf.with_op_name(name, "ExperimentalMapDataset") do - desc = tf.NodeDescription("ExperimentalMapDataset") - input_dataset_ = convert(Tensor{Any}, input_dataset_) - other_arguments_ = [convert(Tensor{Any}, x) for x = other_arguments_] - tf.add_input(desc, input_dataset_) - tf.add_input(desc, other_arguments_) - if f !== nothing - desc["f"] = Base.identity(f) - end - if Targuments !== nothing - desc["Targuments"] = map(Base.identity, Targuments) - end - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) + begin + function experimental_map_dataset_graph(input_dataset_, other_arguments_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing, preserve_cardinality=nothing) + local desc + tf.with_op_name(name, "ExperimentalMapDataset") do + desc = tf.NodeDescription("ExperimentalMapDataset") + begin + begin + input_dataset_ = convert(Tensor{Any}, input_dataset_) + begin + end + end + begin + other_arguments_ = [convert(Tensor{Any}, x) for x = other_arguments_] + begin + end + end + end + begin + begin + tf.add_input(desc, input_dataset_) + end + begin + tf.add_input(desc, other_arguments_) + end + end + begin + begin + if f !== nothing + desc["f"] = Base.identity(f) + end + end + begin + if Targuments !== nothing + desc["Targuments"] = map(Base.identity, Targuments) + end + end + begin + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + end + begin + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + begin + if use_inter_op_parallelism !== nothing + desc["use_inter_op_parallelism"] = Base.Bool(use_inter_op_parallelism) + end + end + begin + if preserve_cardinality !== nothing + desc["preserve_cardinality"] = Base.Bool(preserve_cardinality) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function experimental_map_dataset_eager(input_dataset_, other_arguments_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing, preserve_cardinality=nothing) + desc = tf.EagerOp("ExperimentalMapDataset") + input_dataset_ = convert(tf.EagerTensor, input_dataset_) + other_arguments_ = convert(tf.EagerTensor, other_arguments_) + begin + begin + tf.add_input(desc, input_dataset_) + end + begin + tf.add_input(desc, other_arguments_) + end + end + begin + begin + if f !== nothing + desc["f"] = Base.identity(f) + end + end + begin + if Targuments !== nothing + desc["Targuments"] = map(Base.identity, Targuments) + end + end + begin + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + end + begin + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + begin + if use_inter_op_parallelism !== nothing + desc["use_inter_op_parallelism"] = Base.Bool(use_inter_op_parallelism) + end + end + begin + if preserve_cardinality !== nothing + desc["preserve_cardinality"] = Base.Bool(preserve_cardinality) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(experimental_map_dataset, [input_dataset_, other_arguments_], name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing, preserve_cardinality=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_map_dataset(input_dataset_, other_arguments_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing, preserve_cardinality=nothing) + if tf.in_eager_mode() + experimental_map_dataset_eager(input_dataset_, other_arguments_; name=name, f=f, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes, use_inter_op_parallelism=use_inter_op_parallelism, preserve_cardinality=preserve_cardinality) + else + experimental_map_dataset_graph(input_dataset_, other_arguments_; name=name, f=f, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes, use_inter_op_parallelism=use_inter_op_parallelism, preserve_cardinality=preserve_cardinality) + end end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - if use_inter_op_parallelism !== nothing - desc["use_inter_op_parallelism"] = Base.Bool(use_inter_op_parallelism) - end - if preserve_cardinality !== nothing - desc["preserve_cardinality"] = Base.Bool(preserve_cardinality) - end - end - tf.Tensor(tf.Operation(desc)) end - function experimental_map_dataset_eager(input_dataset_, other_arguments_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing, preserve_cardinality=nothing) - desc = tf.EagerOp("ExperimentalMapDataset") - input_dataset_ = convert(tf.EagerTensor, input_dataset_) - other_arguments_ = convert(tf.EagerTensor, other_arguments_) - tf.add_input(desc, input_dataset_) - tf.add_input(desc, other_arguments_) - if f !== nothing - desc["f"] = Base.identity(f) - end - if Targuments !== nothing - desc["Targuments"] = map(Base.identity, Targuments) - end - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - if use_inter_op_parallelism !== nothing - desc["use_inter_op_parallelism"] = Base.Bool(use_inter_op_parallelism) - end - if preserve_cardinality !== nothing - desc["preserve_cardinality"] = Base.Bool(preserve_cardinality) - end - res = tf.execute(desc) - node = tf.TapeNode(experimental_map_dataset, [input_dataset_, other_arguments_], name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing, preserve_cardinality=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_map_dataset(input_dataset_, other_arguments_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing, preserve_cardinality=nothing) - if tf.in_eager_mode() - experimental_map_dataset_eager(input_dataset_, other_arguments_; name=name, f=f, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes, use_inter_op_parallelism=use_inter_op_parallelism, preserve_cardinality=preserve_cardinality) - else - experimental_map_dataset_graph(input_dataset_, other_arguments_; name=name, f=f, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes, use_inter_op_parallelism=use_inter_op_parallelism, preserve_cardinality=preserve_cardinality) - end - end end @@ -14120,65 +25556,121 @@ end Load embedding parameters for a single table. """ begin - function load_tpu_embedding_adam_parameters_graph(parameters_, momenta_, velocities_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - local desc - tf.with_op_name(name, "LoadTPUEmbeddingADAMParameters") do - desc = tf.NodeDescription("LoadTPUEmbeddingADAMParameters") - parameters_ = convert(Tensor{Float32}, parameters_) - momenta_ = convert(Tensor{Float32}, momenta_) - velocities_ = convert(Tensor{Float32}, velocities_) - tf.add_input(desc, parameters_) - tf.add_input(desc, momenta_) - tf.add_input(desc, velocities_) - if table_id !== nothing - desc["table_id"] = Base.Int(table_id) - end - if table_name !== nothing - desc["table_name"] = Base.String(table_name) - end - if num_shards !== nothing - desc["num_shards"] = Base.Int(num_shards) + begin + function load_tpu_embedding_adam_parameters_graph(parameters_, momenta_, velocities_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + local desc + tf.with_op_name(name, "LoadTPUEmbeddingADAMParameters") do + desc = tf.NodeDescription("LoadTPUEmbeddingADAMParameters") + begin + begin + parameters_ = convert(Tensor{Float32}, parameters_) + begin + end + end + begin + momenta_ = convert(Tensor{Float32}, momenta_) + begin + end + end + begin + velocities_ = convert(Tensor{Float32}, velocities_) + begin + end + end + end + begin + begin + tf.add_input(desc, parameters_) + end + begin + tf.add_input(desc, momenta_) + end + begin + tf.add_input(desc, velocities_) + end + end + begin + begin + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + end + begin + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + end + begin + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + end + begin + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function load_tpu_embedding_adam_parameters_eager(parameters_, momenta_, velocities_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + desc = tf.EagerOp("LoadTPUEmbeddingADAMParameters") + parameters_ = convert(tf.EagerTensor, parameters_) + momenta_ = convert(tf.EagerTensor, momenta_) + velocities_ = convert(tf.EagerTensor, velocities_) + begin + begin + tf.add_input(desc, parameters_) + end + begin + tf.add_input(desc, momenta_) + end + begin + tf.add_input(desc, velocities_) + end + end + begin + begin + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + end + begin + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + end + begin + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + end + begin + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(load_tpu_embedding_adam_parameters, [parameters_, momenta_, velocities_], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function load_tpu_embedding_adam_parameters(parameters_, momenta_, velocities_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + if tf.in_eager_mode() + load_tpu_embedding_adam_parameters_eager(parameters_, momenta_, velocities_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + else + load_tpu_embedding_adam_parameters_graph(parameters_, momenta_, velocities_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + end end - if shard_id !== nothing - desc["shard_id"] = Base.Int(shard_id) - end - end - tf.Tensor(tf.Operation(desc)) - end - function load_tpu_embedding_adam_parameters_eager(parameters_, momenta_, velocities_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - desc = tf.EagerOp("LoadTPUEmbeddingADAMParameters") - parameters_ = convert(tf.EagerTensor, parameters_) - momenta_ = convert(tf.EagerTensor, momenta_) - velocities_ = convert(tf.EagerTensor, velocities_) - tf.add_input(desc, parameters_) - tf.add_input(desc, momenta_) - tf.add_input(desc, velocities_) - if table_id !== nothing - desc["table_id"] = Base.Int(table_id) - end - if table_name !== nothing - desc["table_name"] = Base.String(table_name) - end - if num_shards !== nothing - desc["num_shards"] = Base.Int(num_shards) - end - if shard_id !== nothing - desc["shard_id"] = Base.Int(shard_id) - end - res = tf.execute(desc) - node = tf.TapeNode(load_tpu_embedding_adam_parameters, [parameters_, momenta_, velocities_], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function load_tpu_embedding_adam_parameters(parameters_, momenta_, velocities_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - if tf.in_eager_mode() - load_tpu_embedding_adam_parameters_eager(parameters_, momenta_, velocities_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) - else - load_tpu_embedding_adam_parameters_graph(parameters_, momenta_, velocities_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) - end - end end @@ -14188,39 +25680,67 @@ end """ begin - function parse_tensor_graph(serialized_; name=nothing, out_type=nothing) - local desc - tf.with_op_name(name, "ParseTensor") do - desc = tf.NodeDescription("ParseTensor") - serialized_ = convert(Tensor{String}, serialized_) - tf.add_input(desc, serialized_) - if out_type !== nothing - desc["out_type"] = Base.identity(out_type) + begin + function parse_tensor_graph(serialized_; name=nothing, out_type=nothing) + local desc + tf.with_op_name(name, "ParseTensor") do + desc = tf.NodeDescription("ParseTensor") + begin + begin + serialized_ = convert(Tensor{String}, serialized_) + begin + end + end + end + begin + begin + tf.add_input(desc, serialized_) + end + end + begin + begin + if out_type !== nothing + desc["out_type"] = Base.identity(out_type) + end + end + end end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function parse_tensor_eager(serialized_; name=nothing, out_type=nothing) - desc = tf.EagerOp("ParseTensor") - serialized_ = convert(tf.EagerTensor, serialized_) - tf.add_input(desc, serialized_) - if out_type !== nothing - desc["out_type"] = Base.identity(out_type) - end - res = tf.execute(desc) - node = tf.TapeNode(parse_tensor, [serialized_], name=nothing, out_type=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function parse_tensor_eager(serialized_; name=nothing, out_type=nothing) + desc = tf.EagerOp("ParseTensor") + serialized_ = convert(tf.EagerTensor, serialized_) + begin + begin + tf.add_input(desc, serialized_) + end + end + begin + begin + if out_type !== nothing + desc["out_type"] = Base.identity(out_type) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(parse_tensor, [serialized_], name=nothing, out_type=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function parse_tensor(serialized_; name=nothing, out_type=nothing) - if tf.in_eager_mode() - parse_tensor_eager(serialized_; name=name, out_type=out_type) - else - parse_tensor_graph(serialized_; name=name, out_type=out_type) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function parse_tensor(serialized_; name=nothing, out_type=nothing) + if tf.in_eager_mode() + parse_tensor_eager(serialized_; name=name, out_type=out_type) + else + parse_tensor_graph(serialized_; name=name, out_type=out_type) + end end - end + end end @@ -14230,53 +25750,85 @@ end """ begin - function experimental_materialized_index_dataset_handle_graph(; name=nothing, container=nothing, shared_name=nothing, output_types=nothing, output_shapes=nothing) - local desc - tf.with_op_name(name, "ExperimentalMaterializedIndexDatasetHandle") do - desc = tf.NodeDescription("ExperimentalMaterializedIndexDatasetHandle") - if container !== nothing - desc["container"] = Base.String(container) + begin + function experimental_materialized_index_dataset_handle_graph(; name=nothing, container=nothing, shared_name=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "ExperimentalMaterializedIndexDatasetHandle") do + desc = tf.NodeDescription("ExperimentalMaterializedIndexDatasetHandle") + begin + end + begin + end + begin + begin + if container !== nothing + desc["container"] = Base.String(container) + end + end + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + begin + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + end + begin + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function experimental_materialized_index_dataset_handle_eager(; name=nothing, container=nothing, shared_name=nothing, output_types=nothing, output_shapes=nothing) + desc = tf.EagerOp("ExperimentalMaterializedIndexDatasetHandle") + begin + end + begin + begin + if container !== nothing + desc["container"] = Base.String(container) + end + end + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + begin + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + end + begin + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(experimental_materialized_index_dataset_handle, [], name=nothing, container=nothing, shared_name=nothing, output_types=nothing, output_shapes=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_materialized_index_dataset_handle(; name=nothing, container=nothing, shared_name=nothing, output_types=nothing, output_shapes=nothing) + if tf.in_eager_mode() + experimental_materialized_index_dataset_handle_eager(; name=name, container=container, shared_name=shared_name, output_types=output_types, output_shapes=output_shapes) + else + experimental_materialized_index_dataset_handle_graph(; name=name, container=container, shared_name=shared_name, output_types=output_types, output_shapes=output_shapes) + end end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - end - tf.Tensor(tf.Operation(desc)) end - function experimental_materialized_index_dataset_handle_eager(; name=nothing, container=nothing, shared_name=nothing, output_types=nothing, output_shapes=nothing) - desc = tf.EagerOp("ExperimentalMaterializedIndexDatasetHandle") - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - res = tf.execute(desc) - node = tf.TapeNode(experimental_materialized_index_dataset_handle, [], name=nothing, container=nothing, shared_name=nothing, output_types=nothing, output_shapes=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_materialized_index_dataset_handle(; name=nothing, container=nothing, shared_name=nothing, output_types=nothing, output_shapes=nothing) - if tf.in_eager_mode() - experimental_materialized_index_dataset_handle_eager(; name=name, container=container, shared_name=shared_name, output_types=output_types, output_shapes=output_shapes) - else - experimental_materialized_index_dataset_handle_graph(; name=name, container=container, shared_name=shared_name, output_types=output_types, output_shapes=output_shapes) - end - end end @@ -14286,53 +25838,101 @@ end """ begin - function multi_device_iterator_get_next_from_shard_graph(multi_device_iterator_, shard_num_, incarnation_id_; name=nothing, output_types=nothing, output_shapes=nothing) - local desc - tf.with_op_name(name, "MultiDeviceIteratorGetNextFromShard") do - desc = tf.NodeDescription("MultiDeviceIteratorGetNextFromShard") - multi_device_iterator_ = convert(Tensor{Any}, multi_device_iterator_) - shard_num_ = convert(Tensor{Int32}, shard_num_) - incarnation_id_ = convert(Tensor{Int64}, incarnation_id_) - tf.add_input(desc, multi_device_iterator_) - tf.add_input(desc, shard_num_) - tf.add_input(desc, incarnation_id_) - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) + begin + function multi_device_iterator_get_next_from_shard_graph(multi_device_iterator_, shard_num_, incarnation_id_; name=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "MultiDeviceIteratorGetNextFromShard") do + desc = tf.NodeDescription("MultiDeviceIteratorGetNextFromShard") + begin + begin + multi_device_iterator_ = convert(Tensor{Any}, multi_device_iterator_) + begin + end + end + begin + shard_num_ = convert(Tensor{Int32}, shard_num_) + begin + end + end + begin + incarnation_id_ = convert(Tensor{Int64}, incarnation_id_) + begin + end + end + end + begin + begin + tf.add_input(desc, multi_device_iterator_) + end + begin + tf.add_input(desc, shard_num_) + end + begin + tf.add_input(desc, incarnation_id_) + end + end + begin + begin + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + end + begin + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function multi_device_iterator_get_next_from_shard_eager(multi_device_iterator_, shard_num_, incarnation_id_; name=nothing, output_types=nothing, output_shapes=nothing) + desc = tf.EagerOp("MultiDeviceIteratorGetNextFromShard") + multi_device_iterator_ = convert(tf.EagerTensor, multi_device_iterator_) + shard_num_ = convert(tf.EagerTensor, shard_num_) + incarnation_id_ = convert(tf.EagerTensor, incarnation_id_) + begin + begin + tf.add_input(desc, multi_device_iterator_) + end + begin + tf.add_input(desc, shard_num_) + end + begin + tf.add_input(desc, incarnation_id_) + end + end + begin + begin + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + end + begin + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(multi_device_iterator_get_next_from_shard, [multi_device_iterator_, shard_num_, incarnation_id_], name=nothing, output_types=nothing, output_shapes=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function multi_device_iterator_get_next_from_shard(multi_device_iterator_, shard_num_, incarnation_id_; name=nothing, output_types=nothing, output_shapes=nothing) + if tf.in_eager_mode() + multi_device_iterator_get_next_from_shard_eager(multi_device_iterator_, shard_num_, incarnation_id_; name=name, output_types=output_types, output_shapes=output_shapes) + else + multi_device_iterator_get_next_from_shard_graph(multi_device_iterator_, shard_num_, incarnation_id_; name=name, output_types=output_types, output_shapes=output_shapes) + end end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - end - tf.Tensor(tf.Operation(desc)) - end - function multi_device_iterator_get_next_from_shard_eager(multi_device_iterator_, shard_num_, incarnation_id_; name=nothing, output_types=nothing, output_shapes=nothing) - desc = tf.EagerOp("MultiDeviceIteratorGetNextFromShard") - multi_device_iterator_ = convert(tf.EagerTensor, multi_device_iterator_) - shard_num_ = convert(tf.EagerTensor, shard_num_) - incarnation_id_ = convert(tf.EagerTensor, incarnation_id_) - tf.add_input(desc, multi_device_iterator_) - tf.add_input(desc, shard_num_) - tf.add_input(desc, incarnation_id_) - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - res = tf.execute(desc) - node = tf.TapeNode(multi_device_iterator_get_next_from_shard, [multi_device_iterator_, shard_num_, incarnation_id_], name=nothing, output_types=nothing, output_shapes=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function multi_device_iterator_get_next_from_shard(multi_device_iterator_, shard_num_, incarnation_id_; name=nothing, output_types=nothing, output_shapes=nothing) - if tf.in_eager_mode() - multi_device_iterator_get_next_from_shard_eager(multi_device_iterator_, shard_num_, incarnation_id_; name=name, output_types=output_types, output_shapes=output_shapes) - else - multi_device_iterator_get_next_from_shard_graph(multi_device_iterator_, shard_num_, incarnation_id_; name=name, output_types=output_types, output_shapes=output_shapes) - end - end end @@ -14342,58 +25942,116 @@ end """ begin - function random_uniform_int_graph(shape_, minval_, maxval_; name=nothing, seed=nothing, seed2=nothing) - local desc - tf.with_op_name(name, "RandomUniformInt") do - desc = tf.NodeDescription("RandomUniformInt") - shape_ = convert(Tensor{Any}, shape_) - minval_ = convert(Tensor{Any}, minval_) - maxval_ = convert(Tensor{Any}, maxval_) - (shape_,) = tf.tf_promote(shape_) - (minval_, maxval_) = tf.tf_promote(minval_, maxval_) - tf.add_input(desc, shape_) - tf.add_input(desc, minval_) - tf.add_input(desc, maxval_) - if seed !== nothing - desc["seed"] = Base.Int(seed) + begin + function random_uniform_int_graph(shape_, minval_, maxval_; name=nothing, seed=nothing, seed2=nothing) + local desc + tf.with_op_name(name, "RandomUniformInt") do + desc = tf.NodeDescription("RandomUniformInt") + begin + begin + shape_ = convert(Tensor{Any}, shape_) + begin + end + end + begin + minval_ = convert(Tensor{Any}, minval_) + begin + end + end + begin + maxval_ = convert(Tensor{Any}, maxval_) + begin + end + end + begin + (shape_,) = tf.tf_promote(shape_) + end + begin + (minval_, maxval_) = tf.tf_promote(minval_, maxval_) + end + end + begin + begin + tf.add_input(desc, shape_) + end + begin + tf.add_input(desc, minval_) + end + begin + tf.add_input(desc, maxval_) + end + end + begin + begin + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + end + begin + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function random_uniform_int_eager(shape_, minval_, maxval_; name=nothing, seed=nothing, seed2=nothing) + desc = tf.EagerOp("RandomUniformInt") + shape_ = convert(tf.EagerTensor, shape_) + minval_ = convert(tf.EagerTensor, minval_) + maxval_ = convert(tf.EagerTensor, maxval_) + begin + begin + tf.add_input(desc, shape_) + end + begin + tf.add_input(desc, minval_) + end + begin + tf.add_input(desc, maxval_) + end + end + begin + begin + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + end + begin + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end + end + end + begin + desc["T"] = tf.data_type(shape_) + end + begin + desc["Tout"] = tf.data_type(minval_) + end + begin + desc["Tout"] = tf.data_type(maxval_) + end + res = tf.execute(desc) + node = tf.TapeNode(random_uniform_int, [shape_, minval_, maxval_], name=nothing, seed=nothing, seed2=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function random_uniform_int(shape_, minval_, maxval_; name=nothing, seed=nothing, seed2=nothing) + if tf.in_eager_mode() + random_uniform_int_eager(shape_, minval_, maxval_; name=name, seed=seed, seed2=seed2) + else + random_uniform_int_graph(shape_, minval_, maxval_; name=name, seed=seed, seed2=seed2) + end end - if seed2 !== nothing - desc["seed2"] = Base.Int(seed2) - end - end - tf.Tensor(tf.Operation(desc)) - end - function random_uniform_int_eager(shape_, minval_, maxval_; name=nothing, seed=nothing, seed2=nothing) - desc = tf.EagerOp("RandomUniformInt") - shape_ = convert(tf.EagerTensor, shape_) - minval_ = convert(tf.EagerTensor, minval_) - maxval_ = convert(tf.EagerTensor, maxval_) - tf.add_input(desc, shape_) - tf.add_input(desc, minval_) - tf.add_input(desc, maxval_) - if seed !== nothing - desc["seed"] = Base.Int(seed) - end - if seed2 !== nothing - desc["seed2"] = Base.Int(seed2) - end - desc["T"] = tf.data_type(shape_) - desc["Tout"] = tf.data_type(minval_) - desc["Tout"] = tf.data_type(maxval_) - res = tf.execute(desc) - node = tf.TapeNode(random_uniform_int, [shape_, minval_, maxval_], name=nothing, seed=nothing, seed2=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function random_uniform_int(shape_, minval_, maxval_; name=nothing, seed=nothing, seed2=nothing) - if tf.in_eager_mode() - random_uniform_int_eager(shape_, minval_, maxval_; name=name, seed=seed, seed2=seed2) - else - random_uniform_int_graph(shape_, minval_, maxval_; name=name, seed=seed, seed2=seed2) - end - end end @@ -14403,46 +26061,88 @@ end """ begin - function sparse_softmax_cross_entropy_with_logits_graph(features_, labels_; name=nothing) - local desc - tf.with_op_name(name, "SparseSoftmaxCrossEntropyWithLogits") do - desc = tf.NodeDescription("SparseSoftmaxCrossEntropyWithLogits") - features_ = convert(Tensor{Any}, features_) - labels_ = convert(Tensor{Int64}, labels_) - (features_,) = tf.tf_promote(features_) - (labels_,) = tf.tf_promote(labels_) - tf.add_input(desc, features_) - tf.add_input(desc, labels_) - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:2 - push!(out, tf.Tensor(op, out_idx)) - end - out - end - function sparse_softmax_cross_entropy_with_logits_eager(features_, labels_; name=nothing) - desc = tf.EagerOp("SparseSoftmaxCrossEntropyWithLogits") - features_ = convert(tf.EagerTensor, features_) - labels_ = convert(tf.EagerTensor, labels_) - tf.add_input(desc, features_) - tf.add_input(desc, labels_) - desc["T"] = tf.data_type(features_) - desc["Tlabels"] = tf.data_type(labels_) - res = tf.execute(desc) - node = tf.TapeNode(sparse_softmax_cross_entropy_with_logits, [features_, labels_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_softmax_cross_entropy_with_logits(features_, labels_; name=nothing) - if tf.in_eager_mode() - sparse_softmax_cross_entropy_with_logits_eager(features_, labels_; name=name) - else - sparse_softmax_cross_entropy_with_logits_graph(features_, labels_; name=name) + begin + function sparse_softmax_cross_entropy_with_logits_graph(features_, labels_; name=nothing) + local desc + tf.with_op_name(name, "SparseSoftmaxCrossEntropyWithLogits") do + desc = tf.NodeDescription("SparseSoftmaxCrossEntropyWithLogits") + begin + begin + features_ = convert(Tensor{Any}, features_) + begin + end + end + begin + labels_ = convert(Tensor{Int64}, labels_) + begin + end + end + begin + (features_,) = tf.tf_promote(features_) + end + begin + (labels_,) = tf.tf_promote(labels_) + end + end + begin + begin + tf.add_input(desc, features_) + end + begin + tf.add_input(desc, labels_) + end + end + begin + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + end + end + begin + function sparse_softmax_cross_entropy_with_logits_eager(features_, labels_; name=nothing) + desc = tf.EagerOp("SparseSoftmaxCrossEntropyWithLogits") + features_ = convert(tf.EagerTensor, features_) + labels_ = convert(tf.EagerTensor, labels_) + begin + begin + tf.add_input(desc, features_) + end + begin + tf.add_input(desc, labels_) + end + end + begin + end + begin + desc["T"] = tf.data_type(features_) + end + begin + desc["Tlabels"] = tf.data_type(labels_) + end + res = tf.execute(desc) + node = tf.TapeNode(sparse_softmax_cross_entropy_with_logits, [features_, labels_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_softmax_cross_entropy_with_logits(features_, labels_; name=nothing) + if tf.in_eager_mode() + sparse_softmax_cross_entropy_with_logits_eager(features_, labels_; name=name) + else + sparse_softmax_cross_entropy_with_logits_graph(features_, labels_; name=name) + end end - end + end end @@ -14452,47 +26152,91 @@ end """ begin - function tensor_array_read_v2_graph(handle_, index_, flow_in_; name=nothing, dtype=nothing) - local desc - tf.with_op_name(name, "TensorArrayReadV2") do - desc = tf.NodeDescription("TensorArrayReadV2") - handle_ = convert(Tensor{String}, handle_) - index_ = convert(Tensor{Int32}, index_) - flow_in_ = convert(Tensor{Float32}, flow_in_) - tf.add_input(desc, handle_) - tf.add_input(desc, index_) - tf.add_input(desc, flow_in_) - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) + begin + function tensor_array_read_v2_graph(handle_, index_, flow_in_; name=nothing, dtype=nothing) + local desc + tf.with_op_name(name, "TensorArrayReadV2") do + desc = tf.NodeDescription("TensorArrayReadV2") + begin + begin + handle_ = convert(Tensor{String}, handle_) + begin + end + end + begin + index_ = convert(Tensor{Int32}, index_) + begin + end + end + begin + flow_in_ = convert(Tensor{Float32}, flow_in_) + begin + end + end + end + begin + begin + tf.add_input(desc, handle_) + end + begin + tf.add_input(desc, index_) + end + begin + tf.add_input(desc, flow_in_) + end + end + begin + begin + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function tensor_array_read_v2_eager(handle_, index_, flow_in_; name=nothing, dtype=nothing) + desc = tf.EagerOp("TensorArrayReadV2") + handle_ = convert(tf.EagerTensor, handle_) + index_ = convert(tf.EagerTensor, index_) + flow_in_ = convert(tf.EagerTensor, flow_in_) + begin + begin + tf.add_input(desc, handle_) + end + begin + tf.add_input(desc, index_) + end + begin + tf.add_input(desc, flow_in_) + end + end + begin + begin + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(tensor_array_read_v2, [handle_, index_, flow_in_], name=nothing, dtype=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_array_read_v2(handle_, index_, flow_in_; name=nothing, dtype=nothing) + if tf.in_eager_mode() + tensor_array_read_v2_eager(handle_, index_, flow_in_; name=name, dtype=dtype) + else + tensor_array_read_v2_graph(handle_, index_, flow_in_; name=name, dtype=dtype) + end end - end - tf.Tensor(tf.Operation(desc)) end - function tensor_array_read_v2_eager(handle_, index_, flow_in_; name=nothing, dtype=nothing) - desc = tf.EagerOp("TensorArrayReadV2") - handle_ = convert(tf.EagerTensor, handle_) - index_ = convert(tf.EagerTensor, index_) - flow_in_ = convert(tf.EagerTensor, flow_in_) - tf.add_input(desc, handle_) - tf.add_input(desc, index_) - tf.add_input(desc, flow_in_) - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end - res = tf.execute(desc) - node = tf.TapeNode(tensor_array_read_v2, [handle_, index_, flow_in_], name=nothing, dtype=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_array_read_v2(handle_, index_, flow_in_; name=nothing, dtype=nothing) - if tf.in_eager_mode() - tensor_array_read_v2_eager(handle_, index_, flow_in_; name=name, dtype=dtype) - else - tensor_array_read_v2_graph(handle_, index_, flow_in_; name=name, dtype=dtype) - end - end end @@ -14502,46 +26246,88 @@ end """ begin - function reader_read_up_to_graph(reader_handle_, queue_handle_, num_records_; name=nothing) - local desc - tf.with_op_name(name, "ReaderReadUpTo") do - desc = tf.NodeDescription("ReaderReadUpTo") - reader_handle_ = convert(Tensor{String}, reader_handle_) - queue_handle_ = convert(Tensor{String}, queue_handle_) - num_records_ = convert(Tensor{Int64}, num_records_) - tf.add_input(desc, reader_handle_) - tf.add_input(desc, queue_handle_) - tf.add_input(desc, num_records_) - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:2 - push!(out, tf.Tensor(op, out_idx)) - end - out - end - function reader_read_up_to_eager(reader_handle_, queue_handle_, num_records_; name=nothing) - desc = tf.EagerOp("ReaderReadUpTo") - reader_handle_ = convert(tf.EagerTensor, reader_handle_) - queue_handle_ = convert(tf.EagerTensor, queue_handle_) - num_records_ = convert(tf.EagerTensor, num_records_) - tf.add_input(desc, reader_handle_) - tf.add_input(desc, queue_handle_) - tf.add_input(desc, num_records_) - res = tf.execute(desc) - node = tf.TapeNode(reader_read_up_to, [reader_handle_, queue_handle_, num_records_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function reader_read_up_to(reader_handle_, queue_handle_, num_records_; name=nothing) - if tf.in_eager_mode() - reader_read_up_to_eager(reader_handle_, queue_handle_, num_records_; name=name) - else - reader_read_up_to_graph(reader_handle_, queue_handle_, num_records_; name=name) + begin + function reader_read_up_to_graph(reader_handle_, queue_handle_, num_records_; name=nothing) + local desc + tf.with_op_name(name, "ReaderReadUpTo") do + desc = tf.NodeDescription("ReaderReadUpTo") + begin + begin + reader_handle_ = convert(Tensor{String}, reader_handle_) + begin + end + end + begin + queue_handle_ = convert(Tensor{String}, queue_handle_) + begin + end + end + begin + num_records_ = convert(Tensor{Int64}, num_records_) + begin + end + end + end + begin + begin + tf.add_input(desc, reader_handle_) + end + begin + tf.add_input(desc, queue_handle_) + end + begin + tf.add_input(desc, num_records_) + end + end + begin + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + end + end + begin + function reader_read_up_to_eager(reader_handle_, queue_handle_, num_records_; name=nothing) + desc = tf.EagerOp("ReaderReadUpTo") + reader_handle_ = convert(tf.EagerTensor, reader_handle_) + queue_handle_ = convert(tf.EagerTensor, queue_handle_) + num_records_ = convert(tf.EagerTensor, num_records_) + begin + begin + tf.add_input(desc, reader_handle_) + end + begin + tf.add_input(desc, queue_handle_) + end + begin + tf.add_input(desc, num_records_) + end + end + begin + end + res = tf.execute(desc) + node = tf.TapeNode(reader_read_up_to, [reader_handle_, queue_handle_, num_records_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function reader_read_up_to(reader_handle_, queue_handle_, num_records_; name=nothing) + if tf.in_eager_mode() + reader_read_up_to_eager(reader_handle_, queue_handle_, num_records_; name=name) + else + reader_read_up_to_graph(reader_handle_, queue_handle_, num_records_; name=name) + end end - end + end end @@ -14551,61 +26337,109 @@ end """ begin - function encode_proto_graph(sizes_, values_; name=nothing, field_names=nothing, message_type=nothing, descriptor_source=nothing, Tinput_types=nothing) - local desc - tf.with_op_name(name, "EncodeProto") do - desc = tf.NodeDescription("EncodeProto") - sizes_ = convert(Tensor{Int32}, sizes_) - values_ = [convert(Tensor{Any}, x) for x = values_] - tf.add_input(desc, sizes_) - tf.add_input(desc, values_) - if field_names !== nothing - desc["field_names"] = map(Base.identity, field_names) + begin + function encode_proto_graph(sizes_, values_; name=nothing, field_names=nothing, message_type=nothing, descriptor_source=nothing, Tinput_types=nothing) + local desc + tf.with_op_name(name, "EncodeProto") do + desc = tf.NodeDescription("EncodeProto") + begin + begin + sizes_ = convert(Tensor{Int32}, sizes_) + begin + end + end + begin + values_ = [convert(Tensor{Any}, x) for x = values_] + begin + end + end + end + begin + begin + tf.add_input(desc, sizes_) + end + begin + tf.add_input(desc, values_) + end + end + begin + begin + if field_names !== nothing + desc["field_names"] = map(Base.identity, field_names) + end + end + begin + if message_type !== nothing + desc["message_type"] = Base.String(message_type) + end + end + begin + if descriptor_source !== nothing + desc["descriptor_source"] = Base.String(descriptor_source) + end + end + begin + if Tinput_types !== nothing + desc["Tinput_types"] = map(Base.identity, Tinput_types) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function encode_proto_eager(sizes_, values_; name=nothing, field_names=nothing, message_type=nothing, descriptor_source=nothing, Tinput_types=nothing) + desc = tf.EagerOp("EncodeProto") + sizes_ = convert(tf.EagerTensor, sizes_) + values_ = convert(tf.EagerTensor, values_) + begin + begin + tf.add_input(desc, sizes_) + end + begin + tf.add_input(desc, values_) + end + end + begin + begin + if field_names !== nothing + desc["field_names"] = map(Base.identity, field_names) + end + end + begin + if message_type !== nothing + desc["message_type"] = Base.String(message_type) + end + end + begin + if descriptor_source !== nothing + desc["descriptor_source"] = Base.String(descriptor_source) + end + end + begin + if Tinput_types !== nothing + desc["Tinput_types"] = map(Base.identity, Tinput_types) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(encode_proto, [sizes_, values_], name=nothing, field_names=nothing, message_type=nothing, descriptor_source=nothing, Tinput_types=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function encode_proto(sizes_, values_; name=nothing, field_names=nothing, message_type=nothing, descriptor_source=nothing, Tinput_types=nothing) + if tf.in_eager_mode() + encode_proto_eager(sizes_, values_; name=name, field_names=field_names, message_type=message_type, descriptor_source=descriptor_source, Tinput_types=Tinput_types) + else + encode_proto_graph(sizes_, values_; name=name, field_names=field_names, message_type=message_type, descriptor_source=descriptor_source, Tinput_types=Tinput_types) + end end - if message_type !== nothing - desc["message_type"] = Base.String(message_type) - end - if descriptor_source !== nothing - desc["descriptor_source"] = Base.String(descriptor_source) - end - if Tinput_types !== nothing - desc["Tinput_types"] = map(Base.identity, Tinput_types) - end - end - tf.Tensor(tf.Operation(desc)) end - function encode_proto_eager(sizes_, values_; name=nothing, field_names=nothing, message_type=nothing, descriptor_source=nothing, Tinput_types=nothing) - desc = tf.EagerOp("EncodeProto") - sizes_ = convert(tf.EagerTensor, sizes_) - values_ = convert(tf.EagerTensor, values_) - tf.add_input(desc, sizes_) - tf.add_input(desc, values_) - if field_names !== nothing - desc["field_names"] = map(Base.identity, field_names) - end - if message_type !== nothing - desc["message_type"] = Base.String(message_type) - end - if descriptor_source !== nothing - desc["descriptor_source"] = Base.String(descriptor_source) - end - if Tinput_types !== nothing - desc["Tinput_types"] = map(Base.identity, Tinput_types) - end - res = tf.execute(desc) - node = tf.TapeNode(encode_proto, [sizes_, values_], name=nothing, field_names=nothing, message_type=nothing, descriptor_source=nothing, Tinput_types=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function encode_proto(sizes_, values_; name=nothing, field_names=nothing, message_type=nothing, descriptor_source=nothing, Tinput_types=nothing) - if tf.in_eager_mode() - encode_proto_eager(sizes_, values_; name=name, field_names=field_names, message_type=message_type, descriptor_source=descriptor_source, Tinput_types=Tinput_types) - else - encode_proto_graph(sizes_, values_; name=name, field_names=field_names, message_type=message_type, descriptor_source=descriptor_source, Tinput_types=Tinput_types) - end - end end @@ -14615,125 +26449,239 @@ end """ begin - function strided_slice_grad_graph(shape_, begin_, end_, strides_, dy_; name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing) - local desc - tf.with_op_name(name, "StridedSliceGrad") do - desc = tf.NodeDescription("StridedSliceGrad") - shape_ = convert(Tensor{Any}, shape_) - begin_ = convert(Tensor{Any}, begin_) - begin_ = begin_ - convert(tf.Tensor{eltype(begin_)}, 1) - end_ = convert(Tensor{Any}, end_) - end_ = end_ - convert(tf.Tensor{eltype(end_)}, 1) - strides_ = convert(Tensor{Any}, strides_) - strides_ = strides_ - convert(tf.Tensor{eltype(strides_)}, 1) - dy_ = convert(Tensor{Any}, dy_) - (dy_,) = tf.tf_promote(dy_) - (shape_, begin_, end_, strides_) = tf.tf_promote(shape_, begin_, end_, strides_) - tf.add_input(desc, shape_) - tf.add_input(desc, begin_) - tf.add_input(desc, end_) - tf.add_input(desc, strides_) - tf.add_input(desc, dy_) - if Index !== nothing - desc["Index"] = Base.identity(Index) - end - if begin_mask !== nothing - begin_mask = Base.Int(begin_mask) - 1 - end - if begin_mask !== nothing - desc["begin_mask"] = Base.Int(begin_mask) - end - if end_mask !== nothing - end_mask = Base.Int(end_mask) - 1 - end - if end_mask !== nothing - desc["end_mask"] = Base.Int(end_mask) - end - if ellipsis_mask !== nothing - ellipsis_mask = Base.Int(ellipsis_mask) - 1 - end - if ellipsis_mask !== nothing - desc["ellipsis_mask"] = Base.Int(ellipsis_mask) - end - if new_axis_mask !== nothing - new_axis_mask = Base.Int(new_axis_mask) - 1 - end - if new_axis_mask !== nothing - desc["new_axis_mask"] = Base.Int(new_axis_mask) + begin + function strided_slice_grad_graph(shape_, begin_, end_, strides_, dy_; name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing) + local desc + tf.with_op_name(name, "StridedSliceGrad") do + desc = tf.NodeDescription("StridedSliceGrad") + begin + begin + shape_ = convert(Tensor{Any}, shape_) + begin + end + end + begin + begin_ = convert(Tensor{Any}, begin_) + begin + begin_ = begin_ - convert(tf.Tensor{eltype(begin_)}, 1) + end + end + begin + end_ = convert(Tensor{Any}, end_) + begin + end_ = end_ - convert(tf.Tensor{eltype(end_)}, 1) + end + end + begin + strides_ = convert(Tensor{Any}, strides_) + begin + strides_ = strides_ - convert(tf.Tensor{eltype(strides_)}, 1) + end + end + begin + dy_ = convert(Tensor{Any}, dy_) + begin + end + end + begin + (dy_,) = tf.tf_promote(dy_) + end + begin + (shape_, begin_, end_, strides_) = tf.tf_promote(shape_, begin_, end_, strides_) + end + end + begin + begin + tf.add_input(desc, shape_) + end + begin + tf.add_input(desc, begin_) + end + begin + tf.add_input(desc, end_) + end + begin + tf.add_input(desc, strides_) + end + begin + tf.add_input(desc, dy_) + end + end + begin + begin + if Index !== nothing + desc["Index"] = Base.identity(Index) + end + end + begin + if begin_mask !== nothing + begin_mask = Base.Int(begin_mask) - 1 + end + end + begin + if begin_mask !== nothing + desc["begin_mask"] = Base.Int(begin_mask) + end + end + begin + if end_mask !== nothing + end_mask = Base.Int(end_mask) - 1 + end + end + begin + if end_mask !== nothing + desc["end_mask"] = Base.Int(end_mask) + end + end + begin + if ellipsis_mask !== nothing + ellipsis_mask = Base.Int(ellipsis_mask) - 1 + end + end + begin + if ellipsis_mask !== nothing + desc["ellipsis_mask"] = Base.Int(ellipsis_mask) + end + end + begin + if new_axis_mask !== nothing + new_axis_mask = Base.Int(new_axis_mask) - 1 + end + end + begin + if new_axis_mask !== nothing + desc["new_axis_mask"] = Base.Int(new_axis_mask) + end + end + begin + if shrink_axis_mask !== nothing + shrink_axis_mask = Base.Int(shrink_axis_mask) - 1 + end + end + begin + if shrink_axis_mask !== nothing + desc["shrink_axis_mask"] = Base.Int(shrink_axis_mask) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function strided_slice_grad_eager(shape_, begin_, end_, strides_, dy_; name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing) + desc = tf.EagerOp("StridedSliceGrad") + shape_ = convert(tf.EagerTensor, shape_) + begin_ = convert(tf.EagerTensor, begin_) + end_ = convert(tf.EagerTensor, end_) + strides_ = convert(tf.EagerTensor, strides_) + dy_ = convert(tf.EagerTensor, dy_) + begin + begin + tf.add_input(desc, shape_) + end + begin + tf.add_input(desc, begin_) + end + begin + tf.add_input(desc, end_) + end + begin + tf.add_input(desc, strides_) + end + begin + tf.add_input(desc, dy_) + end + end + begin + begin + if Index !== nothing + desc["Index"] = Base.identity(Index) + end + end + begin + if begin_mask !== nothing + begin_mask = Base.Int(begin_mask) - 1 + end + end + begin + if begin_mask !== nothing + desc["begin_mask"] = Base.Int(begin_mask) + end + end + begin + if end_mask !== nothing + end_mask = Base.Int(end_mask) - 1 + end + end + begin + if end_mask !== nothing + desc["end_mask"] = Base.Int(end_mask) + end + end + begin + if ellipsis_mask !== nothing + ellipsis_mask = Base.Int(ellipsis_mask) - 1 + end + end + begin + if ellipsis_mask !== nothing + desc["ellipsis_mask"] = Base.Int(ellipsis_mask) + end + end + begin + if new_axis_mask !== nothing + new_axis_mask = Base.Int(new_axis_mask) - 1 + end + end + begin + if new_axis_mask !== nothing + desc["new_axis_mask"] = Base.Int(new_axis_mask) + end + end + begin + if shrink_axis_mask !== nothing + shrink_axis_mask = Base.Int(shrink_axis_mask) - 1 + end + end + begin + if shrink_axis_mask !== nothing + desc["shrink_axis_mask"] = Base.Int(shrink_axis_mask) + end + end + end + begin + desc["Index"] = tf.data_type(shape_) + end + begin + desc["Index"] = tf.data_type(begin_) + end + begin + desc["Index"] = tf.data_type(end_) + end + begin + desc["Index"] = tf.data_type(strides_) + end + begin + desc["T"] = tf.data_type(dy_) + end + res = tf.execute(desc) + node = tf.TapeNode(strided_slice_grad, [shape_, begin_, end_, strides_, dy_], name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function strided_slice_grad(shape_, begin_, end_, strides_, dy_; name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing) + if tf.in_eager_mode() + strided_slice_grad_eager(shape_, begin_, end_, strides_, dy_; name=name, Index=Index, begin_mask=begin_mask, end_mask=end_mask, ellipsis_mask=ellipsis_mask, new_axis_mask=new_axis_mask, shrink_axis_mask=shrink_axis_mask) + else + strided_slice_grad_graph(shape_, begin_, end_, strides_, dy_; name=name, Index=Index, begin_mask=begin_mask, end_mask=end_mask, ellipsis_mask=ellipsis_mask, new_axis_mask=new_axis_mask, shrink_axis_mask=shrink_axis_mask) + end end - if shrink_axis_mask !== nothing - shrink_axis_mask = Base.Int(shrink_axis_mask) - 1 - end - if shrink_axis_mask !== nothing - desc["shrink_axis_mask"] = Base.Int(shrink_axis_mask) - end - end - tf.Tensor(tf.Operation(desc)) - end - function strided_slice_grad_eager(shape_, begin_, end_, strides_, dy_; name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing) - desc = tf.EagerOp("StridedSliceGrad") - shape_ = convert(tf.EagerTensor, shape_) - begin_ = convert(tf.EagerTensor, begin_) - end_ = convert(tf.EagerTensor, end_) - strides_ = convert(tf.EagerTensor, strides_) - dy_ = convert(tf.EagerTensor, dy_) - tf.add_input(desc, shape_) - tf.add_input(desc, begin_) - tf.add_input(desc, end_) - tf.add_input(desc, strides_) - tf.add_input(desc, dy_) - if Index !== nothing - desc["Index"] = Base.identity(Index) - end - if begin_mask !== nothing - begin_mask = Base.Int(begin_mask) - 1 - end - if begin_mask !== nothing - desc["begin_mask"] = Base.Int(begin_mask) - end - if end_mask !== nothing - end_mask = Base.Int(end_mask) - 1 - end - if end_mask !== nothing - desc["end_mask"] = Base.Int(end_mask) - end - if ellipsis_mask !== nothing - ellipsis_mask = Base.Int(ellipsis_mask) - 1 - end - if ellipsis_mask !== nothing - desc["ellipsis_mask"] = Base.Int(ellipsis_mask) - end - if new_axis_mask !== nothing - new_axis_mask = Base.Int(new_axis_mask) - 1 - end - if new_axis_mask !== nothing - desc["new_axis_mask"] = Base.Int(new_axis_mask) - end - if shrink_axis_mask !== nothing - shrink_axis_mask = Base.Int(shrink_axis_mask) - 1 - end - if shrink_axis_mask !== nothing - desc["shrink_axis_mask"] = Base.Int(shrink_axis_mask) - end - desc["Index"] = tf.data_type(shape_) - desc["Index"] = tf.data_type(begin_) - desc["Index"] = tf.data_type(end_) - desc["Index"] = tf.data_type(strides_) - desc["T"] = tf.data_type(dy_) - res = tf.execute(desc) - node = tf.TapeNode(strided_slice_grad, [shape_, begin_, end_, strides_, dy_], name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function strided_slice_grad(shape_, begin_, end_, strides_, dy_; name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing) - if tf.in_eager_mode() - strided_slice_grad_eager(shape_, begin_, end_, strides_, dy_; name=name, Index=Index, begin_mask=begin_mask, end_mask=end_mask, ellipsis_mask=ellipsis_mask, new_axis_mask=new_axis_mask, shrink_axis_mask=shrink_axis_mask) - else - strided_slice_grad_graph(shape_, begin_, end_, strides_, dy_; name=name, Index=Index, begin_mask=begin_mask, end_mask=end_mask, ellipsis_mask=ellipsis_mask, new_axis_mask=new_axis_mask, shrink_axis_mask=shrink_axis_mask) - end - end end @@ -14743,53 +26691,93 @@ end Replacement node for NcclReduce. """ begin - function _nccl_reduce_send_graph(input_; name=nothing, reduction=nothing, num_devices=nothing, shared_name=nothing) - local desc - tf.with_op_name(name, "_NcclReduceSend") do - desc = tf.NodeDescription("_NcclReduceSend") - input_ = convert(Tensor{Any}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - if reduction !== nothing - desc["reduction"] = Base.String(reduction) - end - if num_devices !== nothing - desc["num_devices"] = Base.Int(num_devices) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) + begin + function _nccl_reduce_send_graph(input_; name=nothing, reduction=nothing, num_devices=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "_NcclReduceSend") do + desc = tf.NodeDescription("_NcclReduceSend") + begin + begin + input_ = convert(Tensor{Any}, input_) + begin + end + end + begin + (input_,) = tf.tf_promote(input_) + end + end + begin + begin + tf.add_input(desc, input_) + end + end + begin + begin + if reduction !== nothing + desc["reduction"] = Base.String(reduction) + end + end + begin + if num_devices !== nothing + desc["num_devices"] = Base.Int(num_devices) + end + end + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function _nccl_reduce_send_eager(input_; name=nothing, reduction=nothing, num_devices=nothing, shared_name=nothing) + desc = tf.EagerOp("_NcclReduceSend") + input_ = convert(tf.EagerTensor, input_) + begin + begin + tf.add_input(desc, input_) + end + end + begin + begin + if reduction !== nothing + desc["reduction"] = Base.String(reduction) + end + end + begin + if num_devices !== nothing + desc["num_devices"] = Base.Int(num_devices) + end + end + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + end + begin + desc["T"] = tf.data_type(input_) + end + res = tf.execute(desc) + node = tf.TapeNode(_nccl_reduce_send, [input_], name=nothing, reduction=nothing, num_devices=nothing, shared_name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _nccl_reduce_send(input_; name=nothing, reduction=nothing, num_devices=nothing, shared_name=nothing) + if tf.in_eager_mode() + _nccl_reduce_send_eager(input_; name=name, reduction=reduction, num_devices=num_devices, shared_name=shared_name) + else + _nccl_reduce_send_graph(input_; name=name, reduction=reduction, num_devices=num_devices, shared_name=shared_name) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function _nccl_reduce_send_eager(input_; name=nothing, reduction=nothing, num_devices=nothing, shared_name=nothing) - desc = tf.EagerOp("_NcclReduceSend") - input_ = convert(tf.EagerTensor, input_) - tf.add_input(desc, input_) - if reduction !== nothing - desc["reduction"] = Base.String(reduction) - end - if num_devices !== nothing - desc["num_devices"] = Base.Int(num_devices) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - desc["T"] = tf.data_type(input_) - res = tf.execute(desc) - node = tf.TapeNode(_nccl_reduce_send, [input_], name=nothing, reduction=nothing, num_devices=nothing, shared_name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _nccl_reduce_send(input_; name=nothing, reduction=nothing, num_devices=nothing, shared_name=nothing) - if tf.in_eager_mode() - _nccl_reduce_send_eager(input_; name=name, reduction=reduction, num_devices=num_devices, shared_name=shared_name) - else - _nccl_reduce_send_graph(input_; name=name, reduction=reduction, num_devices=num_devices, shared_name=shared_name) - end - end end @@ -14799,63 +26787,123 @@ end """ begin - function padded_batch_dataset_graph(input_dataset_, batch_size_, padded_shapes_, padding_values_; name=nothing, Toutput_types=nothing, output_shapes=nothing, N=nothing) - local desc - tf.with_op_name(name, "PaddedBatchDataset") do - desc = tf.NodeDescription("PaddedBatchDataset") - input_dataset_ = convert(Tensor{Any}, input_dataset_) - batch_size_ = convert(Tensor{Int64}, batch_size_) - padded_shapes_ = [convert(Tensor{Int64}, x) for x = padded_shapes_] - padding_values_ = [convert(Tensor{Any}, x) for x = padding_values_] - tf.add_input(desc, input_dataset_) - tf.add_input(desc, batch_size_) - tf.add_input(desc, padded_shapes_) - tf.add_input(desc, padding_values_) - if Toutput_types !== nothing - desc["Toutput_types"] = map(Base.identity, Toutput_types) + begin + function padded_batch_dataset_graph(input_dataset_, batch_size_, padded_shapes_, padding_values_; name=nothing, Toutput_types=nothing, output_shapes=nothing, N=nothing) + local desc + tf.with_op_name(name, "PaddedBatchDataset") do + desc = tf.NodeDescription("PaddedBatchDataset") + begin + begin + input_dataset_ = convert(Tensor{Any}, input_dataset_) + begin + end + end + begin + batch_size_ = convert(Tensor{Int64}, batch_size_) + begin + end + end + begin + padded_shapes_ = [convert(Tensor{Int64}, x) for x = padded_shapes_] + begin + end + end + begin + padding_values_ = [convert(Tensor{Any}, x) for x = padding_values_] + begin + end + end + end + begin + begin + tf.add_input(desc, input_dataset_) + end + begin + tf.add_input(desc, batch_size_) + end + begin + tf.add_input(desc, padded_shapes_) + end + begin + tf.add_input(desc, padding_values_) + end + end + begin + begin + if Toutput_types !== nothing + desc["Toutput_types"] = map(Base.identity, Toutput_types) + end + end + begin + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + begin + if N !== nothing + desc["N"] = Base.Int(N) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function padded_batch_dataset_eager(input_dataset_, batch_size_, padded_shapes_, padding_values_; name=nothing, Toutput_types=nothing, output_shapes=nothing, N=nothing) + desc = tf.EagerOp("PaddedBatchDataset") + input_dataset_ = convert(tf.EagerTensor, input_dataset_) + batch_size_ = convert(tf.EagerTensor, batch_size_) + padded_shapes_ = convert(tf.EagerTensor, padded_shapes_) + padding_values_ = convert(tf.EagerTensor, padding_values_) + begin + begin + tf.add_input(desc, input_dataset_) + end + begin + tf.add_input(desc, batch_size_) + end + begin + tf.add_input(desc, padded_shapes_) + end + begin + tf.add_input(desc, padding_values_) + end + end + begin + begin + if Toutput_types !== nothing + desc["Toutput_types"] = map(Base.identity, Toutput_types) + end + end + begin + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + begin + if N !== nothing + desc["N"] = Base.Int(N) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(padded_batch_dataset, [input_dataset_, batch_size_, padded_shapes_, padding_values_], name=nothing, Toutput_types=nothing, output_shapes=nothing, N=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function padded_batch_dataset(input_dataset_, batch_size_, padded_shapes_, padding_values_; name=nothing, Toutput_types=nothing, output_shapes=nothing, N=nothing) + if tf.in_eager_mode() + padded_batch_dataset_eager(input_dataset_, batch_size_, padded_shapes_, padding_values_; name=name, Toutput_types=Toutput_types, output_shapes=output_shapes, N=N) + else + padded_batch_dataset_graph(input_dataset_, batch_size_, padded_shapes_, padding_values_; name=name, Toutput_types=Toutput_types, output_shapes=output_shapes, N=N) + end end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - if N !== nothing - desc["N"] = Base.Int(N) - end - end - tf.Tensor(tf.Operation(desc)) - end - function padded_batch_dataset_eager(input_dataset_, batch_size_, padded_shapes_, padding_values_; name=nothing, Toutput_types=nothing, output_shapes=nothing, N=nothing) - desc = tf.EagerOp("PaddedBatchDataset") - input_dataset_ = convert(tf.EagerTensor, input_dataset_) - batch_size_ = convert(tf.EagerTensor, batch_size_) - padded_shapes_ = convert(tf.EagerTensor, padded_shapes_) - padding_values_ = convert(tf.EagerTensor, padding_values_) - tf.add_input(desc, input_dataset_) - tf.add_input(desc, batch_size_) - tf.add_input(desc, padded_shapes_) - tf.add_input(desc, padding_values_) - if Toutput_types !== nothing - desc["Toutput_types"] = map(Base.identity, Toutput_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - if N !== nothing - desc["N"] = Base.Int(N) - end - res = tf.execute(desc) - node = tf.TapeNode(padded_batch_dataset, [input_dataset_, batch_size_, padded_shapes_, padding_values_], name=nothing, Toutput_types=nothing, output_shapes=nothing, N=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function padded_batch_dataset(input_dataset_, batch_size_, padded_shapes_, padding_values_; name=nothing, Toutput_types=nothing, output_shapes=nothing, N=nothing) - if tf.in_eager_mode() - padded_batch_dataset_eager(input_dataset_, batch_size_, padded_shapes_, padding_values_; name=name, Toutput_types=Toutput_types, output_shapes=output_shapes, N=N) - else - padded_batch_dataset_graph(input_dataset_, batch_size_, padded_shapes_, padding_values_; name=name, Toutput_types=Toutput_types, output_shapes=output_shapes, N=N) - end - end end @@ -14865,47 +26913,83 @@ end """ begin - function data_format_vec_permute_graph(x_; name=nothing, src_format=nothing, dst_format=nothing) - local desc - tf.with_op_name(name, "DataFormatVecPermute") do - desc = tf.NodeDescription("DataFormatVecPermute") - x_ = convert(Tensor{Int32}, x_) - (x_,) = tf.tf_promote(x_) - tf.add_input(desc, x_) - if src_format !== nothing - desc["src_format"] = Base.String(src_format) + begin + function data_format_vec_permute_graph(x_; name=nothing, src_format=nothing, dst_format=nothing) + local desc + tf.with_op_name(name, "DataFormatVecPermute") do + desc = tf.NodeDescription("DataFormatVecPermute") + begin + begin + x_ = convert(Tensor{Int32}, x_) + begin + end + end + begin + (x_,) = tf.tf_promote(x_) + end + end + begin + begin + tf.add_input(desc, x_) + end + end + begin + begin + if src_format !== nothing + desc["src_format"] = Base.String(src_format) + end + end + begin + if dst_format !== nothing + desc["dst_format"] = Base.String(dst_format) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function data_format_vec_permute_eager(x_; name=nothing, src_format=nothing, dst_format=nothing) + desc = tf.EagerOp("DataFormatVecPermute") + x_ = convert(tf.EagerTensor, x_) + begin + begin + tf.add_input(desc, x_) + end + end + begin + begin + if src_format !== nothing + desc["src_format"] = Base.String(src_format) + end + end + begin + if dst_format !== nothing + desc["dst_format"] = Base.String(dst_format) + end + end + end + begin + desc["T"] = tf.data_type(x_) + end + res = tf.execute(desc) + node = tf.TapeNode(data_format_vec_permute, [x_], name=nothing, src_format=nothing, dst_format=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function data_format_vec_permute(x_; name=nothing, src_format=nothing, dst_format=nothing) + if tf.in_eager_mode() + data_format_vec_permute_eager(x_; name=name, src_format=src_format, dst_format=dst_format) + else + data_format_vec_permute_graph(x_; name=name, src_format=src_format, dst_format=dst_format) + end end - if dst_format !== nothing - desc["dst_format"] = Base.String(dst_format) - end - end - tf.Tensor(tf.Operation(desc)) end - function data_format_vec_permute_eager(x_; name=nothing, src_format=nothing, dst_format=nothing) - desc = tf.EagerOp("DataFormatVecPermute") - x_ = convert(tf.EagerTensor, x_) - tf.add_input(desc, x_) - if src_format !== nothing - desc["src_format"] = Base.String(src_format) - end - if dst_format !== nothing - desc["dst_format"] = Base.String(dst_format) - end - desc["T"] = tf.data_type(x_) - res = tf.execute(desc) - node = tf.TapeNode(data_format_vec_permute, [x_], name=nothing, src_format=nothing, dst_format=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function data_format_vec_permute(x_; name=nothing, src_format=nothing, dst_format=nothing) - if tf.in_eager_mode() - data_format_vec_permute_eager(x_; name=name, src_format=src_format, dst_format=dst_format) - else - data_format_vec_permute_graph(x_; name=name, src_format=src_format, dst_format=dst_format) - end - end end @@ -14915,57 +26999,97 @@ end """ begin - function string_format_graph(inputs_; name=nothing, T=nothing, template=nothing, placeholder=nothing, summarize=nothing) - local desc - tf.with_op_name(name, "StringFormat") do - desc = tf.NodeDescription("StringFormat") - inputs_ = [convert(Tensor{Any}, x) for x = inputs_] - tf.add_input(desc, inputs_) - if T !== nothing - desc["T"] = map(Base.identity, T) - end - if template !== nothing - desc["template"] = Base.String(template) - end - if placeholder !== nothing - desc["placeholder"] = Base.String(placeholder) + begin + function string_format_graph(inputs_; name=nothing, T=nothing, template=nothing, placeholder=nothing, summarize=nothing) + local desc + tf.with_op_name(name, "StringFormat") do + desc = tf.NodeDescription("StringFormat") + begin + begin + inputs_ = [convert(Tensor{Any}, x) for x = inputs_] + begin + end + end + end + begin + begin + tf.add_input(desc, inputs_) + end + end + begin + begin + if T !== nothing + desc["T"] = map(Base.identity, T) + end + end + begin + if template !== nothing + desc["template"] = Base.String(template) + end + end + begin + if placeholder !== nothing + desc["placeholder"] = Base.String(placeholder) + end + end + begin + if summarize !== nothing + desc["summarize"] = Base.Int(summarize) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function string_format_eager(inputs_; name=nothing, T=nothing, template=nothing, placeholder=nothing, summarize=nothing) + desc = tf.EagerOp("StringFormat") + inputs_ = convert(tf.EagerTensor, inputs_) + begin + begin + tf.add_input(desc, inputs_) + end + end + begin + begin + if T !== nothing + desc["T"] = map(Base.identity, T) + end + end + begin + if template !== nothing + desc["template"] = Base.String(template) + end + end + begin + if placeholder !== nothing + desc["placeholder"] = Base.String(placeholder) + end + end + begin + if summarize !== nothing + desc["summarize"] = Base.Int(summarize) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(string_format, [inputs_], name=nothing, T=nothing, template=nothing, placeholder=nothing, summarize=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function string_format(inputs_; name=nothing, T=nothing, template=nothing, placeholder=nothing, summarize=nothing) + if tf.in_eager_mode() + string_format_eager(inputs_; name=name, T=T, template=template, placeholder=placeholder, summarize=summarize) + else + string_format_graph(inputs_; name=name, T=T, template=template, placeholder=placeholder, summarize=summarize) + end end - if summarize !== nothing - desc["summarize"] = Base.Int(summarize) - end - end - tf.Tensor(tf.Operation(desc)) - end - function string_format_eager(inputs_; name=nothing, T=nothing, template=nothing, placeholder=nothing, summarize=nothing) - desc = tf.EagerOp("StringFormat") - inputs_ = convert(tf.EagerTensor, inputs_) - tf.add_input(desc, inputs_) - if T !== nothing - desc["T"] = map(Base.identity, T) - end - if template !== nothing - desc["template"] = Base.String(template) - end - if placeholder !== nothing - desc["placeholder"] = Base.String(placeholder) - end - if summarize !== nothing - desc["summarize"] = Base.Int(summarize) - end - res = tf.execute(desc) - node = tf.TapeNode(string_format, [inputs_], name=nothing, T=nothing, template=nothing, placeholder=nothing, summarize=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function string_format(inputs_; name=nothing, T=nothing, template=nothing, placeholder=nothing, summarize=nothing) - if tf.in_eager_mode() - string_format_eager(inputs_; name=name, T=T, template=template, placeholder=placeholder, summarize=summarize) - else - string_format_graph(inputs_; name=name, T=T, template=template, placeholder=placeholder, summarize=summarize) - end - end end @@ -14975,65 +27099,113 @@ end """ begin - function as_string_graph(input_; name=nothing, precision=nothing, scientific=nothing, shortest=nothing, width=nothing, fill=nothing) - local desc - tf.with_op_name(name, "AsString") do - desc = tf.NodeDescription("AsString") - input_ = convert(Tensor{Any}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - if precision !== nothing - desc["precision"] = Base.Int(precision) - end - if scientific !== nothing - desc["scientific"] = Base.Bool(scientific) - end - if shortest !== nothing - desc["shortest"] = Base.Bool(shortest) + begin + function as_string_graph(input_; name=nothing, precision=nothing, scientific=nothing, shortest=nothing, width=nothing, fill=nothing) + local desc + tf.with_op_name(name, "AsString") do + desc = tf.NodeDescription("AsString") + begin + begin + input_ = convert(Tensor{Any}, input_) + begin + end + end + begin + (input_,) = tf.tf_promote(input_) + end + end + begin + begin + tf.add_input(desc, input_) + end + end + begin + begin + if precision !== nothing + desc["precision"] = Base.Int(precision) + end + end + begin + if scientific !== nothing + desc["scientific"] = Base.Bool(scientific) + end + end + begin + if shortest !== nothing + desc["shortest"] = Base.Bool(shortest) + end + end + begin + if width !== nothing + desc["width"] = Base.Int(width) + end + end + begin + if fill !== nothing + desc["fill"] = Base.String(fill) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function as_string_eager(input_; name=nothing, precision=nothing, scientific=nothing, shortest=nothing, width=nothing, fill=nothing) + desc = tf.EagerOp("AsString") + input_ = convert(tf.EagerTensor, input_) + begin + begin + tf.add_input(desc, input_) + end + end + begin + begin + if precision !== nothing + desc["precision"] = Base.Int(precision) + end + end + begin + if scientific !== nothing + desc["scientific"] = Base.Bool(scientific) + end + end + begin + if shortest !== nothing + desc["shortest"] = Base.Bool(shortest) + end + end + begin + if width !== nothing + desc["width"] = Base.Int(width) + end + end + begin + if fill !== nothing + desc["fill"] = Base.String(fill) + end + end + end + begin + desc["T"] = tf.data_type(input_) + end + res = tf.execute(desc) + node = tf.TapeNode(as_string, [input_], name=nothing, precision=nothing, scientific=nothing, shortest=nothing, width=nothing, fill=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function as_string(input_; name=nothing, precision=nothing, scientific=nothing, shortest=nothing, width=nothing, fill=nothing) + if tf.in_eager_mode() + as_string_eager(input_; name=name, precision=precision, scientific=scientific, shortest=shortest, width=width, fill=fill) + else + as_string_graph(input_; name=name, precision=precision, scientific=scientific, shortest=shortest, width=width, fill=fill) + end end - if width !== nothing - desc["width"] = Base.Int(width) - end - if fill !== nothing - desc["fill"] = Base.String(fill) - end - end - tf.Tensor(tf.Operation(desc)) - end - function as_string_eager(input_; name=nothing, precision=nothing, scientific=nothing, shortest=nothing, width=nothing, fill=nothing) - desc = tf.EagerOp("AsString") - input_ = convert(tf.EagerTensor, input_) - tf.add_input(desc, input_) - if precision !== nothing - desc["precision"] = Base.Int(precision) - end - if scientific !== nothing - desc["scientific"] = Base.Bool(scientific) - end - if shortest !== nothing - desc["shortest"] = Base.Bool(shortest) - end - if width !== nothing - desc["width"] = Base.Int(width) - end - if fill !== nothing - desc["fill"] = Base.String(fill) - end - desc["T"] = tf.data_type(input_) - res = tf.execute(desc) - node = tf.TapeNode(as_string, [input_], name=nothing, precision=nothing, scientific=nothing, shortest=nothing, width=nothing, fill=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function as_string(input_; name=nothing, precision=nothing, scientific=nothing, shortest=nothing, width=nothing, fill=nothing) - if tf.in_eager_mode() - as_string_eager(input_; name=name, precision=precision, scientific=scientific, shortest=shortest, width=width, fill=fill) - else - as_string_graph(input_; name=name, precision=precision, scientific=scientific, shortest=shortest, width=width, fill=fill) - end - end end @@ -15043,49 +27215,89 @@ end """ begin - function queue_enqueue_many_graph(handle_, components_; name=nothing, Tcomponents=nothing, timeout_ms=nothing) - local desc - tf.with_op_name(name, "QueueEnqueueMany") do - desc = tf.NodeDescription("QueueEnqueueMany") - handle_ = convert(Tensor{String}, handle_) - components_ = [convert(Tensor{Any}, x) for x = components_] - tf.add_input(desc, handle_) - tf.add_input(desc, components_) - if Tcomponents !== nothing - desc["Tcomponents"] = map(Base.identity, Tcomponents) + begin + function queue_enqueue_many_graph(handle_, components_; name=nothing, Tcomponents=nothing, timeout_ms=nothing) + local desc + tf.with_op_name(name, "QueueEnqueueMany") do + desc = tf.NodeDescription("QueueEnqueueMany") + begin + begin + handle_ = convert(Tensor{String}, handle_) + begin + end + end + begin + components_ = [convert(Tensor{Any}, x) for x = components_] + begin + end + end + end + begin + begin + tf.add_input(desc, handle_) + end + begin + tf.add_input(desc, components_) + end + end + begin + begin + if Tcomponents !== nothing + desc["Tcomponents"] = map(Base.identity, Tcomponents) + end + end + begin + if timeout_ms !== nothing + desc["timeout_ms"] = Base.Int(timeout_ms) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function queue_enqueue_many_eager(handle_, components_; name=nothing, Tcomponents=nothing, timeout_ms=nothing) + desc = tf.EagerOp("QueueEnqueueMany") + handle_ = convert(tf.EagerTensor, handle_) + components_ = convert(tf.EagerTensor, components_) + begin + begin + tf.add_input(desc, handle_) + end + begin + tf.add_input(desc, components_) + end + end + begin + begin + if Tcomponents !== nothing + desc["Tcomponents"] = map(Base.identity, Tcomponents) + end + end + begin + if timeout_ms !== nothing + desc["timeout_ms"] = Base.Int(timeout_ms) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(queue_enqueue_many, [handle_, components_], name=nothing, Tcomponents=nothing, timeout_ms=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function queue_enqueue_many(handle_, components_; name=nothing, Tcomponents=nothing, timeout_ms=nothing) + if tf.in_eager_mode() + queue_enqueue_many_eager(handle_, components_; name=name, Tcomponents=Tcomponents, timeout_ms=timeout_ms) + else + queue_enqueue_many_graph(handle_, components_; name=name, Tcomponents=Tcomponents, timeout_ms=timeout_ms) + end end - if timeout_ms !== nothing - desc["timeout_ms"] = Base.Int(timeout_ms) - end - end - tf.Tensor(tf.Operation(desc)) - end - function queue_enqueue_many_eager(handle_, components_; name=nothing, Tcomponents=nothing, timeout_ms=nothing) - desc = tf.EagerOp("QueueEnqueueMany") - handle_ = convert(tf.EagerTensor, handle_) - components_ = convert(tf.EagerTensor, components_) - tf.add_input(desc, handle_) - tf.add_input(desc, components_) - if Tcomponents !== nothing - desc["Tcomponents"] = map(Base.identity, Tcomponents) - end - if timeout_ms !== nothing - desc["timeout_ms"] = Base.Int(timeout_ms) - end - res = tf.execute(desc) - node = tf.TapeNode(queue_enqueue_many, [handle_, components_], name=nothing, Tcomponents=nothing, timeout_ms=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function queue_enqueue_many(handle_, components_; name=nothing, Tcomponents=nothing, timeout_ms=nothing) - if tf.in_eager_mode() - queue_enqueue_many_eager(handle_, components_; name=name, Tcomponents=Tcomponents, timeout_ms=timeout_ms) - else - queue_enqueue_many_graph(handle_, components_; name=name, Tcomponents=Tcomponents, timeout_ms=timeout_ms) - end - end end @@ -15095,41 +27307,65 @@ end """ begin - function fake_param_graph(; name=nothing, dtype=nothing, shape=nothing) - local desc - tf.with_op_name(name, "FakeParam") do - desc = tf.NodeDescription("FakeParam") - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end - if shape !== nothing - desc["shape"] = Base.identity(shape) + begin + function fake_param_graph(; name=nothing, dtype=nothing, shape=nothing) + local desc + tf.with_op_name(name, "FakeParam") do + desc = tf.NodeDescription("FakeParam") + begin + end + begin + end + begin + begin + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + begin + if shape !== nothing + desc["shape"] = Base.identity(shape) + end + end + end end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function fake_param_eager(; name=nothing, dtype=nothing, shape=nothing) - desc = tf.EagerOp("FakeParam") - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end - if shape !== nothing - desc["shape"] = Base.identity(shape) - end - res = tf.execute(desc) - node = tf.TapeNode(fake_param, [], name=nothing, dtype=nothing, shape=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function fake_param_eager(; name=nothing, dtype=nothing, shape=nothing) + desc = tf.EagerOp("FakeParam") + begin + end + begin + begin + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + begin + if shape !== nothing + desc["shape"] = Base.identity(shape) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(fake_param, [], name=nothing, dtype=nothing, shape=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function fake_param(; name=nothing, dtype=nothing, shape=nothing) - if tf.in_eager_mode() - fake_param_eager(; name=name, dtype=dtype, shape=shape) - else - fake_param_graph(; name=name, dtype=dtype, shape=shape) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function fake_param(; name=nothing, dtype=nothing, shape=nothing) + if tf.in_eager_mode() + fake_param_eager(; name=name, dtype=dtype, shape=shape) + else + fake_param_graph(; name=name, dtype=dtype, shape=shape) + end end - end + end end @@ -15139,62 +27375,128 @@ end """ begin - function apply_adagrad_graph(var_, accum_, lr_, grad_; name=nothing, use_locking=nothing, update_slots=nothing) - local desc - tf.with_op_name(name, "ApplyAdagrad") do - desc = tf.NodeDescription("ApplyAdagrad") - var_ = convert(Tensor{Any}, var_) - accum_ = convert(Tensor{Any}, accum_) - lr_ = convert(Tensor{Any}, lr_) - grad_ = convert(Tensor{Any}, grad_) - (var_, accum_, lr_, grad_) = tf.tf_promote(var_, accum_, lr_, grad_) - tf.add_input(desc, var_) - tf.add_input(desc, accum_) - tf.add_input(desc, lr_) - tf.add_input(desc, grad_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) + begin + function apply_adagrad_graph(var_, accum_, lr_, grad_; name=nothing, use_locking=nothing, update_slots=nothing) + local desc + tf.with_op_name(name, "ApplyAdagrad") do + desc = tf.NodeDescription("ApplyAdagrad") + begin + begin + var_ = convert(Tensor{Any}, var_) + begin + end + end + begin + accum_ = convert(Tensor{Any}, accum_) + begin + end + end + begin + lr_ = convert(Tensor{Any}, lr_) + begin + end + end + begin + grad_ = convert(Tensor{Any}, grad_) + begin + end + end + begin + (var_, accum_, lr_, grad_) = tf.tf_promote(var_, accum_, lr_, grad_) + end + end + begin + begin + tf.add_input(desc, var_) + end + begin + tf.add_input(desc, accum_) + end + begin + tf.add_input(desc, lr_) + end + begin + tf.add_input(desc, grad_) + end + end + begin + begin + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + begin + if update_slots !== nothing + desc["update_slots"] = Base.Bool(update_slots) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function apply_adagrad_eager(var_, accum_, lr_, grad_; name=nothing, use_locking=nothing, update_slots=nothing) + desc = tf.EagerOp("ApplyAdagrad") + var_ = convert(tf.EagerTensor, var_) + accum_ = convert(tf.EagerTensor, accum_) + lr_ = convert(tf.EagerTensor, lr_) + grad_ = convert(tf.EagerTensor, grad_) + begin + begin + tf.add_input(desc, var_) + end + begin + tf.add_input(desc, accum_) + end + begin + tf.add_input(desc, lr_) + end + begin + tf.add_input(desc, grad_) + end + end + begin + begin + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + begin + if update_slots !== nothing + desc["update_slots"] = Base.Bool(update_slots) + end + end + end + begin + desc["T"] = tf.data_type(var_) + end + begin + desc["T"] = tf.data_type(accum_) + end + begin + desc["T"] = tf.data_type(lr_) + end + begin + desc["T"] = tf.data_type(grad_) + end + res = tf.execute(desc) + node = tf.TapeNode(apply_adagrad, [var_, accum_, lr_, grad_], name=nothing, use_locking=nothing, update_slots=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function apply_adagrad(var_, accum_, lr_, grad_; name=nothing, use_locking=nothing, update_slots=nothing) + if tf.in_eager_mode() + apply_adagrad_eager(var_, accum_, lr_, grad_; name=name, use_locking=use_locking, update_slots=update_slots) + else + apply_adagrad_graph(var_, accum_, lr_, grad_; name=name, use_locking=use_locking, update_slots=update_slots) + end end - if update_slots !== nothing - desc["update_slots"] = Base.Bool(update_slots) - end - end - tf.Tensor(tf.Operation(desc)) - end - function apply_adagrad_eager(var_, accum_, lr_, grad_; name=nothing, use_locking=nothing, update_slots=nothing) - desc = tf.EagerOp("ApplyAdagrad") - var_ = convert(tf.EagerTensor, var_) - accum_ = convert(tf.EagerTensor, accum_) - lr_ = convert(tf.EagerTensor, lr_) - grad_ = convert(tf.EagerTensor, grad_) - tf.add_input(desc, var_) - tf.add_input(desc, accum_) - tf.add_input(desc, lr_) - tf.add_input(desc, grad_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - if update_slots !== nothing - desc["update_slots"] = Base.Bool(update_slots) - end - desc["T"] = tf.data_type(var_) - desc["T"] = tf.data_type(accum_) - desc["T"] = tf.data_type(lr_) - desc["T"] = tf.data_type(grad_) - res = tf.execute(desc) - node = tf.TapeNode(apply_adagrad, [var_, accum_, lr_, grad_], name=nothing, use_locking=nothing, update_slots=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function apply_adagrad(var_, accum_, lr_, grad_; name=nothing, use_locking=nothing, update_slots=nothing) - if tf.in_eager_mode() - apply_adagrad_eager(var_, accum_, lr_, grad_; name=name, use_locking=use_locking, update_slots=update_slots) - else - apply_adagrad_graph(var_, accum_, lr_, grad_; name=name, use_locking=use_locking, update_slots=update_slots) - end - end end @@ -15204,33 +27506,57 @@ end """ begin - function experimental_iterator_get_device_graph(resource_; name=nothing) - local desc - tf.with_op_name(name, "ExperimentalIteratorGetDevice") do - desc = tf.NodeDescription("ExperimentalIteratorGetDevice") - resource_ = convert(Tensor{Any}, resource_) - tf.add_input(desc, resource_) + begin + function experimental_iterator_get_device_graph(resource_; name=nothing) + local desc + tf.with_op_name(name, "ExperimentalIteratorGetDevice") do + desc = tf.NodeDescription("ExperimentalIteratorGetDevice") + begin + begin + resource_ = convert(Tensor{Any}, resource_) + begin + end + end + end + begin + begin + tf.add_input(desc, resource_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function experimental_iterator_get_device_eager(resource_; name=nothing) - desc = tf.EagerOp("ExperimentalIteratorGetDevice") - resource_ = convert(tf.EagerTensor, resource_) - tf.add_input(desc, resource_) - res = tf.execute(desc) - node = tf.TapeNode(experimental_iterator_get_device, [resource_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function experimental_iterator_get_device_eager(resource_; name=nothing) + desc = tf.EagerOp("ExperimentalIteratorGetDevice") + resource_ = convert(tf.EagerTensor, resource_) + begin + begin + tf.add_input(desc, resource_) + end + end + begin + end + res = tf.execute(desc) + node = tf.TapeNode(experimental_iterator_get_device, [resource_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_iterator_get_device(resource_; name=nothing) - if tf.in_eager_mode() - experimental_iterator_get_device_eager(resource_; name=name) - else - experimental_iterator_get_device_graph(resource_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_iterator_get_device(resource_; name=nothing) + if tf.in_eager_mode() + experimental_iterator_get_device_eager(resource_; name=name) + else + experimental_iterator_get_device_graph(resource_; name=name) + end end - end + end end @@ -15240,47 +27566,99 @@ end """ begin - function adjust_contrast_graph(images_, contrast_factor_, min_value_, max_value_; name=nothing) - local desc - tf.with_op_name(name, "AdjustContrast") do - desc = tf.NodeDescription("AdjustContrast") - images_ = convert(Tensor{Any}, images_) - contrast_factor_ = convert(Tensor{Float32}, contrast_factor_) - min_value_ = convert(Tensor{Float32}, min_value_) - max_value_ = convert(Tensor{Float32}, max_value_) - (images_,) = tf.tf_promote(images_) - tf.add_input(desc, images_) - tf.add_input(desc, contrast_factor_) - tf.add_input(desc, min_value_) - tf.add_input(desc, max_value_) - end - tf.Tensor(tf.Operation(desc)) - end - function adjust_contrast_eager(images_, contrast_factor_, min_value_, max_value_; name=nothing) - desc = tf.EagerOp("AdjustContrast") - images_ = convert(tf.EagerTensor, images_) - contrast_factor_ = convert(tf.EagerTensor, contrast_factor_) - min_value_ = convert(tf.EagerTensor, min_value_) - max_value_ = convert(tf.EagerTensor, max_value_) - tf.add_input(desc, images_) - tf.add_input(desc, contrast_factor_) - tf.add_input(desc, min_value_) - tf.add_input(desc, max_value_) - desc["T"] = tf.data_type(images_) - res = tf.execute(desc) - node = tf.TapeNode(adjust_contrast, [images_, contrast_factor_, min_value_, max_value_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function adjust_contrast(images_, contrast_factor_, min_value_, max_value_; name=nothing) - if tf.in_eager_mode() - adjust_contrast_eager(images_, contrast_factor_, min_value_, max_value_; name=name) - else - adjust_contrast_graph(images_, contrast_factor_, min_value_, max_value_; name=name) + begin + function adjust_contrast_graph(images_, contrast_factor_, min_value_, max_value_; name=nothing) + local desc + tf.with_op_name(name, "AdjustContrast") do + desc = tf.NodeDescription("AdjustContrast") + begin + begin + images_ = convert(Tensor{Any}, images_) + begin + end + end + begin + contrast_factor_ = convert(Tensor{Float32}, contrast_factor_) + begin + end + end + begin + min_value_ = convert(Tensor{Float32}, min_value_) + begin + end + end + begin + max_value_ = convert(Tensor{Float32}, max_value_) + begin + end + end + begin + (images_,) = tf.tf_promote(images_) + end + end + begin + begin + tf.add_input(desc, images_) + end + begin + tf.add_input(desc, contrast_factor_) + end + begin + tf.add_input(desc, min_value_) + end + begin + tf.add_input(desc, max_value_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function adjust_contrast_eager(images_, contrast_factor_, min_value_, max_value_; name=nothing) + desc = tf.EagerOp("AdjustContrast") + images_ = convert(tf.EagerTensor, images_) + contrast_factor_ = convert(tf.EagerTensor, contrast_factor_) + min_value_ = convert(tf.EagerTensor, min_value_) + max_value_ = convert(tf.EagerTensor, max_value_) + begin + begin + tf.add_input(desc, images_) + end + begin + tf.add_input(desc, contrast_factor_) + end + begin + tf.add_input(desc, min_value_) + end + begin + tf.add_input(desc, max_value_) + end + end + begin + end + begin + desc["T"] = tf.data_type(images_) + end + res = tf.execute(desc) + node = tf.TapeNode(adjust_contrast, [images_, contrast_factor_, min_value_, max_value_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function adjust_contrast(images_, contrast_factor_, min_value_, max_value_; name=nothing) + if tf.in_eager_mode() + adjust_contrast_eager(images_, contrast_factor_, min_value_, max_value_; name=name) + else + adjust_contrast_graph(images_, contrast_factor_, min_value_, max_value_; name=name) + end end - end + end end @@ -15290,30 +27668,45 @@ end """ begin - function optional_none_graph(; name=nothing) - local desc - tf.with_op_name(name, "OptionalNone") do - desc - tf.NodeDescription("OptionalNone") + begin + function optional_none_graph(; name=nothing) + local desc + tf.with_op_name(name, "OptionalNone") do + desc = tf.NodeDescription("OptionalNone") + begin + end + begin + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function optional_none_eager(; name=nothing) - desc = tf.EagerOp("OptionalNone") - res = tf.execute(desc) - node = tf.TapeNode(optional_none, [], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function optional_none_eager(; name=nothing) + desc = tf.EagerOp("OptionalNone") + begin + end + begin + end + res = tf.execute(desc) + node = tf.TapeNode(optional_none, [], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function optional_none(; name=nothing) - if tf.in_eager_mode() - optional_none_eager(; name=name) - else - optional_none_graph(; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function optional_none(; name=nothing) + if tf.in_eager_mode() + optional_none_eager(; name=name) + else + optional_none_graph(; name=name) + end end - end + end end @@ -15323,59 +27716,103 @@ end """ begin - function extract_image_patches_graph(images_; name=nothing, ksizes=nothing, strides=nothing, rates=nothing, padding=nothing) - local desc - tf.with_op_name(name, "ExtractImagePatches") do - desc = tf.NodeDescription("ExtractImagePatches") - images_ = convert(Tensor{Any}, images_) - (images_,) = tf.tf_promote(images_) - tf.add_input(desc, images_) - if ksizes !== nothing - desc["ksizes"] = map(Base.identity, ksizes) - end - if strides !== nothing - desc["strides"] = map(Base.identity, strides) - end - if rates !== nothing - desc["rates"] = map(Base.identity, rates) + begin + function extract_image_patches_graph(images_; name=nothing, ksizes=nothing, strides=nothing, rates=nothing, padding=nothing) + local desc + tf.with_op_name(name, "ExtractImagePatches") do + desc = tf.NodeDescription("ExtractImagePatches") + begin + begin + images_ = convert(Tensor{Any}, images_) + begin + end + end + begin + (images_,) = tf.tf_promote(images_) + end + end + begin + begin + tf.add_input(desc, images_) + end + end + begin + begin + if ksizes !== nothing + desc["ksizes"] = map(Base.identity, ksizes) + end + end + begin + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + end + begin + if rates !== nothing + desc["rates"] = map(Base.identity, rates) + end + end + begin + if padding !== nothing + desc["padding"] = Base.String(padding) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function extract_image_patches_eager(images_; name=nothing, ksizes=nothing, strides=nothing, rates=nothing, padding=nothing) + desc = tf.EagerOp("ExtractImagePatches") + images_ = convert(tf.EagerTensor, images_) + begin + begin + tf.add_input(desc, images_) + end + end + begin + begin + if ksizes !== nothing + desc["ksizes"] = map(Base.identity, ksizes) + end + end + begin + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + end + begin + if rates !== nothing + desc["rates"] = map(Base.identity, rates) + end + end + begin + if padding !== nothing + desc["padding"] = Base.String(padding) + end + end + end + begin + desc["T"] = tf.data_type(images_) + end + res = tf.execute(desc) + node = tf.TapeNode(extract_image_patches, [images_], name=nothing, ksizes=nothing, strides=nothing, rates=nothing, padding=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function extract_image_patches(images_; name=nothing, ksizes=nothing, strides=nothing, rates=nothing, padding=nothing) + if tf.in_eager_mode() + extract_image_patches_eager(images_; name=name, ksizes=ksizes, strides=strides, rates=rates, padding=padding) + else + extract_image_patches_graph(images_; name=name, ksizes=ksizes, strides=strides, rates=rates, padding=padding) + end end - if padding !== nothing - desc["padding"] = Base.String(padding) - end - end - tf.Tensor(tf.Operation(desc)) - end - function extract_image_patches_eager(images_; name=nothing, ksizes=nothing, strides=nothing, rates=nothing, padding=nothing) - desc = tf.EagerOp("ExtractImagePatches") - images_ = convert(tf.EagerTensor, images_) - tf.add_input(desc, images_) - if ksizes !== nothing - desc["ksizes"] = map(Base.identity, ksizes) - end - if strides !== nothing - desc["strides"] = map(Base.identity, strides) - end - if rates !== nothing - desc["rates"] = map(Base.identity, rates) - end - if padding !== nothing - desc["padding"] = Base.String(padding) - end - desc["T"] = tf.data_type(images_) - res = tf.execute(desc) - node = tf.TapeNode(extract_image_patches, [images_], name=nothing, ksizes=nothing, strides=nothing, rates=nothing, padding=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function extract_image_patches(images_; name=nothing, ksizes=nothing, strides=nothing, rates=nothing, padding=nothing) - if tf.in_eager_mode() - extract_image_patches_eager(images_; name=name, ksizes=ksizes, strides=strides, rates=rates, padding=padding) - else - extract_image_patches_graph(images_; name=name, ksizes=ksizes, strides=strides, rates=rates, padding=padding) - end - end end @@ -15385,53 +27822,85 @@ end """ begin - function variable_v2_graph(; name=nothing, shape=nothing, dtype=nothing, container=nothing, shared_name=nothing) - local desc - tf.with_op_name(name, "VariableV2") do - desc = tf.NodeDescription("VariableV2") - if shape !== nothing - desc["shape"] = Base.identity(shape) - end - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end - if container !== nothing - desc["container"] = Base.String(container) + begin + function variable_v2_graph(; name=nothing, shape=nothing, dtype=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "VariableV2") do + desc = tf.NodeDescription("VariableV2") + begin + end + begin + end + begin + begin + if shape !== nothing + desc["shape"] = Base.identity(shape) + end + end + begin + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + begin + if container !== nothing + desc["container"] = Base.String(container) + end + end + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function variable_v2_eager(; name=nothing, shape=nothing, dtype=nothing, container=nothing, shared_name=nothing) + desc = tf.EagerOp("VariableV2") + begin + end + begin + begin + if shape !== nothing + desc["shape"] = Base.identity(shape) + end + end + begin + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + begin + if container !== nothing + desc["container"] = Base.String(container) + end + end + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(variable_v2, [], name=nothing, shape=nothing, dtype=nothing, container=nothing, shared_name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function variable_v2(; name=nothing, shape=nothing, dtype=nothing, container=nothing, shared_name=nothing) + if tf.in_eager_mode() + variable_v2_eager(; name=name, shape=shape, dtype=dtype, container=container, shared_name=shared_name) + else + variable_v2_graph(; name=name, shape=shape, dtype=dtype, container=container, shared_name=shared_name) + end end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - end - tf.Tensor(tf.Operation(desc)) - end - function variable_v2_eager(; name=nothing, shape=nothing, dtype=nothing, container=nothing, shared_name=nothing) - desc = tf.EagerOp("VariableV2") - if shape !== nothing - desc["shape"] = Base.identity(shape) - end - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - res = tf.execute(desc) - node = tf.TapeNode(variable_v2, [], name=nothing, shape=nothing, dtype=nothing, container=nothing, shared_name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function variable_v2(; name=nothing, shape=nothing, dtype=nothing, container=nothing, shared_name=nothing) - if tf.in_eager_mode() - variable_v2_eager(; name=name, shape=shape, dtype=dtype, container=container, shared_name=shared_name) - else - variable_v2_graph(; name=name, shape=shape, dtype=dtype, container=container, shared_name=shared_name) - end - end end @@ -15441,35 +27910,63 @@ end """ begin - function elu_graph(features_; name=nothing) - local desc - tf.with_op_name(name, "Elu") do - desc = tf.NodeDescription("Elu") - features_ = convert(Tensor{Any}, features_) - (features_,) = tf.tf_promote(features_) - tf.add_input(desc, features_) + begin + function elu_graph(features_; name=nothing) + local desc + tf.with_op_name(name, "Elu") do + desc = tf.NodeDescription("Elu") + begin + begin + features_ = convert(Tensor{Any}, features_) + begin + end + end + begin + (features_,) = tf.tf_promote(features_) + end + end + begin + begin + tf.add_input(desc, features_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function elu_eager(features_; name=nothing) - desc = tf.EagerOp("Elu") - features_ = convert(tf.EagerTensor, features_) - tf.add_input(desc, features_) - desc["T"] = tf.data_type(features_) - res = tf.execute(desc) - node = tf.TapeNode(elu, [features_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function elu_eager(features_; name=nothing) + desc = tf.EagerOp("Elu") + features_ = convert(tf.EagerTensor, features_) + begin + begin + tf.add_input(desc, features_) + end + end + begin + end + begin + desc["T"] = tf.data_type(features_) + end + res = tf.execute(desc) + node = tf.TapeNode(elu, [features_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function elu(features_; name=nothing) - if tf.in_eager_mode() - elu_eager(features_; name=name) - else - elu_graph(features_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function elu(features_; name=nothing) + if tf.in_eager_mode() + elu_eager(features_; name=name) + else + elu_graph(features_; name=name) + end end - end + end end @@ -15479,53 +27976,107 @@ end """ begin - function scatter_update_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) - local desc - tf.with_op_name(name, "ScatterUpdate") do - desc = tf.NodeDescription("ScatterUpdate") - ref_ = convert(Tensor{Any}, ref_) - indices_ = convert(Tensor{Any}, indices_) - indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) - updates_ = convert(Tensor{Any}, updates_) - (ref_, updates_) = tf.tf_promote(ref_, updates_) - (indices_,) = tf.tf_promote(indices_) - tf.add_input(desc, ref_) - tf.add_input(desc, indices_) - tf.add_input(desc, updates_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) + begin + function scatter_update_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "ScatterUpdate") do + desc = tf.NodeDescription("ScatterUpdate") + begin + begin + ref_ = convert(Tensor{Any}, ref_) + begin + end + end + begin + indices_ = convert(Tensor{Any}, indices_) + begin + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + end + end + begin + updates_ = convert(Tensor{Any}, updates_) + begin + end + end + begin + (ref_, updates_) = tf.tf_promote(ref_, updates_) + end + begin + (indices_,) = tf.tf_promote(indices_) + end + end + begin + begin + tf.add_input(desc, ref_) + end + begin + tf.add_input(desc, indices_) + end + begin + tf.add_input(desc, updates_) + end + end + begin + begin + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function scatter_update_eager(ref_, indices_, updates_; name=nothing, use_locking=nothing) + desc = tf.EagerOp("ScatterUpdate") + ref_ = convert(tf.EagerTensor, ref_) + indices_ = convert(tf.EagerTensor, indices_) + updates_ = convert(tf.EagerTensor, updates_) + begin + begin + tf.add_input(desc, ref_) + end + begin + tf.add_input(desc, indices_) + end + begin + tf.add_input(desc, updates_) + end + end + begin + begin + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + end + begin + desc["T"] = tf.data_type(ref_) + end + begin + desc["Tindices"] = tf.data_type(indices_) + end + begin + desc["T"] = tf.data_type(updates_) + end + res = tf.execute(desc) + node = tf.TapeNode(scatter_update, [ref_, indices_, updates_], name=nothing, use_locking=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function scatter_update(ref_, indices_, updates_; name=nothing, use_locking=nothing) + if tf.in_eager_mode() + scatter_update_eager(ref_, indices_, updates_; name=name, use_locking=use_locking) + else + scatter_update_graph(ref_, indices_, updates_; name=name, use_locking=use_locking) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function scatter_update_eager(ref_, indices_, updates_; name=nothing, use_locking=nothing) - desc = tf.EagerOp("ScatterUpdate") - ref_ = convert(tf.EagerTensor, ref_) - indices_ = convert(tf.EagerTensor, indices_) - updates_ = convert(tf.EagerTensor, updates_) - tf.add_input(desc, ref_) - tf.add_input(desc, indices_) - tf.add_input(desc, updates_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - desc["T"] = tf.data_type(ref_) - desc["Tindices"] = tf.data_type(indices_) - desc["T"] = tf.data_type(updates_) - res = tf.execute(desc) - node = tf.TapeNode(scatter_update, [ref_, indices_, updates_], name=nothing, use_locking=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function scatter_update(ref_, indices_, updates_; name=nothing, use_locking=nothing) - if tf.in_eager_mode() - scatter_update_eager(ref_, indices_, updates_; name=name, use_locking=use_locking) - else - scatter_update_graph(ref_, indices_, updates_; name=name, use_locking=use_locking) - end - end end @@ -15535,40 +28086,78 @@ end """ begin - function floor_mod_graph(x_, y_; name=nothing) - local desc - tf.with_op_name(name, "FloorMod") do - desc = tf.NodeDescription("FloorMod") - x_ = convert(Tensor{Any}, x_) - y_ = convert(Tensor{Any}, y_) - (x_, y_) = tf.tf_promote(x_, y_) - tf.add_input(desc, x_) - tf.add_input(desc, y_) + begin + function floor_mod_graph(x_, y_; name=nothing) + local desc + tf.with_op_name(name, "FloorMod") do + desc = tf.NodeDescription("FloorMod") + begin + begin + x_ = convert(Tensor{Any}, x_) + begin + end + end + begin + y_ = convert(Tensor{Any}, y_) + begin + end + end + begin + (x_, y_) = tf.tf_promote(x_, y_) + end + end + begin + begin + tf.add_input(desc, x_) + end + begin + tf.add_input(desc, y_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function floor_mod_eager(x_, y_; name=nothing) - desc = tf.EagerOp("FloorMod") - x_ = convert(tf.EagerTensor, x_) - y_ = convert(tf.EagerTensor, y_) - tf.add_input(desc, x_) - tf.add_input(desc, y_) - desc["T"] = tf.data_type(x_) - desc["T"] = tf.data_type(y_) - res = tf.execute(desc) - node = tf.TapeNode(floor_mod, [x_, y_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function floor_mod_eager(x_, y_; name=nothing) + desc = tf.EagerOp("FloorMod") + x_ = convert(tf.EagerTensor, x_) + y_ = convert(tf.EagerTensor, y_) + begin + begin + tf.add_input(desc, x_) + end + begin + tf.add_input(desc, y_) + end + end + begin + end + begin + desc["T"] = tf.data_type(x_) + end + begin + desc["T"] = tf.data_type(y_) + end + res = tf.execute(desc) + node = tf.TapeNode(floor_mod, [x_, y_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function floor_mod(x_, y_; name=nothing) - if tf.in_eager_mode() - floor_mod_eager(x_, y_; name=name) - else - floor_mod_graph(x_, y_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function floor_mod(x_, y_; name=nothing) + if tf.in_eager_mode() + floor_mod_eager(x_, y_; name=name) + else + floor_mod_graph(x_, y_; name=name) + end end - end + end end @@ -15578,45 +28167,77 @@ end """ begin - function experimental_ignore_errors_dataset_graph(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) - local desc - tf.with_op_name(name, "ExperimentalIgnoreErrorsDataset") do - desc = tf.NodeDescription("ExperimentalIgnoreErrorsDataset") - input_dataset_ = convert(Tensor{Any}, input_dataset_) - tf.add_input(desc, input_dataset_) - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) + begin + function experimental_ignore_errors_dataset_graph(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "ExperimentalIgnoreErrorsDataset") do + desc = tf.NodeDescription("ExperimentalIgnoreErrorsDataset") + begin + begin + input_dataset_ = convert(Tensor{Any}, input_dataset_) + begin + end + end + end + begin + begin + tf.add_input(desc, input_dataset_) + end + end + begin + begin + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + end + begin + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function experimental_ignore_errors_dataset_eager(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) + desc = tf.EagerOp("ExperimentalIgnoreErrorsDataset") + input_dataset_ = convert(tf.EagerTensor, input_dataset_) + begin + begin + tf.add_input(desc, input_dataset_) + end + end + begin + begin + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + end + begin + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(experimental_ignore_errors_dataset, [input_dataset_], name=nothing, output_types=nothing, output_shapes=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_ignore_errors_dataset(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) + if tf.in_eager_mode() + experimental_ignore_errors_dataset_eager(input_dataset_; name=name, output_types=output_types, output_shapes=output_shapes) + else + experimental_ignore_errors_dataset_graph(input_dataset_; name=name, output_types=output_types, output_shapes=output_shapes) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function experimental_ignore_errors_dataset_eager(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) - desc = tf.EagerOp("ExperimentalIgnoreErrorsDataset") - input_dataset_ = convert(tf.EagerTensor, input_dataset_) - tf.add_input(desc, input_dataset_) - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - res = tf.execute(desc) - node = tf.TapeNode(experimental_ignore_errors_dataset, [input_dataset_], name=nothing, output_types=nothing, output_shapes=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_ignore_errors_dataset(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) - if tf.in_eager_mode() - experimental_ignore_errors_dataset_eager(input_dataset_; name=name, output_types=output_types, output_shapes=output_shapes) - else - experimental_ignore_errors_dataset_graph(input_dataset_; name=name, output_types=output_types, output_shapes=output_shapes) - end - end end @@ -15626,57 +28247,113 @@ end """ begin - function experimental_set_stats_aggregator_dataset_graph(input_dataset_, stats_aggregator_, tag_, counter_prefix_; name=nothing, output_types=nothing, output_shapes=nothing) - local desc - tf.with_op_name(name, "ExperimentalSetStatsAggregatorDataset") do - desc = tf.NodeDescription("ExperimentalSetStatsAggregatorDataset") - input_dataset_ = convert(Tensor{Any}, input_dataset_) - stats_aggregator_ = convert(Tensor{Any}, stats_aggregator_) - tag_ = convert(Tensor{String}, tag_) - counter_prefix_ = convert(Tensor{String}, counter_prefix_) - tf.add_input(desc, input_dataset_) - tf.add_input(desc, stats_aggregator_) - tf.add_input(desc, tag_) - tf.add_input(desc, counter_prefix_) - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) + begin + function experimental_set_stats_aggregator_dataset_graph(input_dataset_, stats_aggregator_, tag_, counter_prefix_; name=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "ExperimentalSetStatsAggregatorDataset") do + desc = tf.NodeDescription("ExperimentalSetStatsAggregatorDataset") + begin + begin + input_dataset_ = convert(Tensor{Any}, input_dataset_) + begin + end + end + begin + stats_aggregator_ = convert(Tensor{Any}, stats_aggregator_) + begin + end + end + begin + tag_ = convert(Tensor{String}, tag_) + begin + end + end + begin + counter_prefix_ = convert(Tensor{String}, counter_prefix_) + begin + end + end + end + begin + begin + tf.add_input(desc, input_dataset_) + end + begin + tf.add_input(desc, stats_aggregator_) + end + begin + tf.add_input(desc, tag_) + end + begin + tf.add_input(desc, counter_prefix_) + end + end + begin + begin + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + end + begin + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function experimental_set_stats_aggregator_dataset_eager(input_dataset_, stats_aggregator_, tag_, counter_prefix_; name=nothing, output_types=nothing, output_shapes=nothing) + desc = tf.EagerOp("ExperimentalSetStatsAggregatorDataset") + input_dataset_ = convert(tf.EagerTensor, input_dataset_) + stats_aggregator_ = convert(tf.EagerTensor, stats_aggregator_) + tag_ = convert(tf.EagerTensor, tag_) + counter_prefix_ = convert(tf.EagerTensor, counter_prefix_) + begin + begin + tf.add_input(desc, input_dataset_) + end + begin + tf.add_input(desc, stats_aggregator_) + end + begin + tf.add_input(desc, tag_) + end + begin + tf.add_input(desc, counter_prefix_) + end + end + begin + begin + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + end + begin + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(experimental_set_stats_aggregator_dataset, [input_dataset_, stats_aggregator_, tag_, counter_prefix_], name=nothing, output_types=nothing, output_shapes=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_set_stats_aggregator_dataset(input_dataset_, stats_aggregator_, tag_, counter_prefix_; name=nothing, output_types=nothing, output_shapes=nothing) + if tf.in_eager_mode() + experimental_set_stats_aggregator_dataset_eager(input_dataset_, stats_aggregator_, tag_, counter_prefix_; name=name, output_types=output_types, output_shapes=output_shapes) + else + experimental_set_stats_aggregator_dataset_graph(input_dataset_, stats_aggregator_, tag_, counter_prefix_; name=name, output_types=output_types, output_shapes=output_shapes) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function experimental_set_stats_aggregator_dataset_eager(input_dataset_, stats_aggregator_, tag_, counter_prefix_; name=nothing, output_types=nothing, output_shapes=nothing) - desc = tf.EagerOp("ExperimentalSetStatsAggregatorDataset") - input_dataset_ = convert(tf.EagerTensor, input_dataset_) - stats_aggregator_ = convert(tf.EagerTensor, stats_aggregator_) - tag_ = convert(tf.EagerTensor, tag_) - counter_prefix_ = convert(tf.EagerTensor, counter_prefix_) - tf.add_input(desc, input_dataset_) - tf.add_input(desc, stats_aggregator_) - tf.add_input(desc, tag_) - tf.add_input(desc, counter_prefix_) - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - res = tf.execute(desc) - node = tf.TapeNode(experimental_set_stats_aggregator_dataset, [input_dataset_, stats_aggregator_, tag_, counter_prefix_], name=nothing, output_types=nothing, output_shapes=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_set_stats_aggregator_dataset(input_dataset_, stats_aggregator_, tag_, counter_prefix_; name=nothing, output_types=nothing, output_shapes=nothing) - if tf.in_eager_mode() - experimental_set_stats_aggregator_dataset_eager(input_dataset_, stats_aggregator_, tag_, counter_prefix_; name=name, output_types=output_types, output_shapes=output_shapes) - else - experimental_set_stats_aggregator_dataset_graph(input_dataset_, stats_aggregator_, tag_, counter_prefix_; name=name, output_types=output_types, output_shapes=output_shapes) - end - end end @@ -15686,60 +28363,106 @@ end """ begin - function compute_accidental_hits_graph(true_classes_, sampled_candidates_; name=nothing, num_true=nothing, seed=nothing, seed2=nothing) - local desc - tf.with_op_name(name, "ComputeAccidentalHits") do - desc = tf.NodeDescription("ComputeAccidentalHits") - true_classes_ = convert(Tensor{Int64}, true_classes_) - sampled_candidates_ = convert(Tensor{Int64}, sampled_candidates_) - tf.add_input(desc, true_classes_) - tf.add_input(desc, sampled_candidates_) - if num_true !== nothing - desc["num_true"] = Base.Int(num_true) - end - if seed !== nothing - desc["seed"] = Base.Int(seed) + begin + function compute_accidental_hits_graph(true_classes_, sampled_candidates_; name=nothing, num_true=nothing, seed=nothing, seed2=nothing) + local desc + tf.with_op_name(name, "ComputeAccidentalHits") do + desc = tf.NodeDescription("ComputeAccidentalHits") + begin + begin + true_classes_ = convert(Tensor{Int64}, true_classes_) + begin + end + end + begin + sampled_candidates_ = convert(Tensor{Int64}, sampled_candidates_) + begin + end + end + end + begin + begin + tf.add_input(desc, true_classes_) + end + begin + tf.add_input(desc, sampled_candidates_) + end + end + begin + begin + if num_true !== nothing + desc["num_true"] = Base.Int(num_true) + end + end + begin + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + end + begin + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end + end + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + end + end + begin + function compute_accidental_hits_eager(true_classes_, sampled_candidates_; name=nothing, num_true=nothing, seed=nothing, seed2=nothing) + desc = tf.EagerOp("ComputeAccidentalHits") + true_classes_ = convert(tf.EagerTensor, true_classes_) + sampled_candidates_ = convert(tf.EagerTensor, sampled_candidates_) + begin + begin + tf.add_input(desc, true_classes_) + end + begin + tf.add_input(desc, sampled_candidates_) + end + end + begin + begin + if num_true !== nothing + desc["num_true"] = Base.Int(num_true) + end + end + begin + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + end + begin + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(compute_accidental_hits, [true_classes_, sampled_candidates_], name=nothing, num_true=nothing, seed=nothing, seed2=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function compute_accidental_hits(true_classes_, sampled_candidates_; name=nothing, num_true=nothing, seed=nothing, seed2=nothing) + if tf.in_eager_mode() + compute_accidental_hits_eager(true_classes_, sampled_candidates_; name=name, num_true=num_true, seed=seed, seed2=seed2) + else + compute_accidental_hits_graph(true_classes_, sampled_candidates_; name=name, num_true=num_true, seed=seed, seed2=seed2) + end end - if seed2 !== nothing - desc["seed2"] = Base.Int(seed2) - end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:3 - push!(out, tf.Tensor(op, out_idx)) - end - out - end - function compute_accidental_hits_eager(true_classes_, sampled_candidates_; name=nothing, num_true=nothing, seed=nothing, seed2=nothing) - desc = tf.EagerOp("ComputeAccidentalHits") - true_classes_ = convert(tf.EagerTensor, true_classes_) - sampled_candidates_ = convert(tf.EagerTensor, sampled_candidates_) - tf.add_input(desc, true_classes_) - tf.add_input(desc, sampled_candidates_) - if num_true !== nothing - desc["num_true"] = Base.Int(num_true) - end - if seed !== nothing - desc["seed"] = Base.Int(seed) - end - if seed2 !== nothing - desc["seed2"] = Base.Int(seed2) - end - res = tf.execute(desc) - node = tf.TapeNode(compute_accidental_hits, [true_classes_, sampled_candidates_], name=nothing, num_true=nothing, seed=nothing, seed2=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function compute_accidental_hits(true_classes_, sampled_candidates_; name=nothing, num_true=nothing, seed=nothing, seed2=nothing) - if tf.in_eager_mode() - compute_accidental_hits_eager(true_classes_, sampled_candidates_; name=name, num_true=num_true, seed=seed, seed2=seed2) - else - compute_accidental_hits_graph(true_classes_, sampled_candidates_; name=name, num_true=num_true, seed=seed, seed2=seed2) - end - end end @@ -15749,39 +28472,67 @@ end """ begin - function string_to_number_graph(string_tensor_; name=nothing, out_type=nothing) - local desc - tf.with_op_name(name, "StringToNumber") do - desc = tf.NodeDescription("StringToNumber") - string_tensor_ = convert(Tensor{String}, string_tensor_) - tf.add_input(desc, string_tensor_) - if out_type !== nothing - desc["out_type"] = Base.identity(out_type) + begin + function string_to_number_graph(string_tensor_; name=nothing, out_type=nothing) + local desc + tf.with_op_name(name, "StringToNumber") do + desc = tf.NodeDescription("StringToNumber") + begin + begin + string_tensor_ = convert(Tensor{String}, string_tensor_) + begin + end + end + end + begin + begin + tf.add_input(desc, string_tensor_) + end + end + begin + begin + if out_type !== nothing + desc["out_type"] = Base.identity(out_type) + end + end + end end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function string_to_number_eager(string_tensor_; name=nothing, out_type=nothing) - desc = tf.EagerOp("StringToNumber") - string_tensor_ = convert(tf.EagerTensor, string_tensor_) - tf.add_input(desc, string_tensor_) - if out_type !== nothing - desc["out_type"] = Base.identity(out_type) - end - res = tf.execute(desc) - node = tf.TapeNode(string_to_number, [string_tensor_], name=nothing, out_type=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function string_to_number_eager(string_tensor_; name=nothing, out_type=nothing) + desc = tf.EagerOp("StringToNumber") + string_tensor_ = convert(tf.EagerTensor, string_tensor_) + begin + begin + tf.add_input(desc, string_tensor_) + end + end + begin + begin + if out_type !== nothing + desc["out_type"] = Base.identity(out_type) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(string_to_number, [string_tensor_], name=nothing, out_type=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function string_to_number(string_tensor_; name=nothing, out_type=nothing) - if tf.in_eager_mode() - string_to_number_eager(string_tensor_; name=name, out_type=out_type) - else - string_to_number_graph(string_tensor_; name=name, out_type=out_type) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function string_to_number(string_tensor_; name=nothing, out_type=nothing) + if tf.in_eager_mode() + string_to_number_eager(string_tensor_; name=name, out_type=out_type) + else + string_to_number_graph(string_tensor_; name=name, out_type=out_type) + end end - end + end end @@ -15791,35 +28542,63 @@ end """ begin - function snapshot_graph(input_; name=nothing) - local desc - tf.with_op_name(name, "Snapshot") do - desc = tf.NodeDescription("Snapshot") - input_ = convert(Tensor{Any}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) + begin + function snapshot_graph(input_; name=nothing) + local desc + tf.with_op_name(name, "Snapshot") do + desc = tf.NodeDescription("Snapshot") + begin + begin + input_ = convert(Tensor{Any}, input_) + begin + end + end + begin + (input_,) = tf.tf_promote(input_) + end + end + begin + begin + tf.add_input(desc, input_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function snapshot_eager(input_; name=nothing) - desc = tf.EagerOp("Snapshot") - input_ = convert(tf.EagerTensor, input_) - tf.add_input(desc, input_) - desc["T"] = tf.data_type(input_) - res = tf.execute(desc) - node = tf.TapeNode(snapshot, [input_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function snapshot_eager(input_; name=nothing) + desc = tf.EagerOp("Snapshot") + input_ = convert(tf.EagerTensor, input_) + begin + begin + tf.add_input(desc, input_) + end + end + begin + end + begin + desc["T"] = tf.data_type(input_) + end + res = tf.execute(desc) + node = tf.TapeNode(snapshot, [input_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function snapshot(input_; name=nothing) - if tf.in_eager_mode() - snapshot_eager(input_; name=name) - else - snapshot_graph(input_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function snapshot(input_; name=nothing) + if tf.in_eager_mode() + snapshot_eager(input_; name=name) + else + snapshot_graph(input_; name=name) + end end - end + end end @@ -15829,37 +28608,69 @@ end """ begin - function deserialize_iterator_graph(resource_handle_, serialized_; name=nothing) - local desc - tf.with_op_name(name, "DeserializeIterator") do - desc = tf.NodeDescription("DeserializeIterator") - resource_handle_ = convert(Tensor{Any}, resource_handle_) - serialized_ = convert(Tensor{Any}, serialized_) - tf.add_input(desc, resource_handle_) - tf.add_input(desc, serialized_) + begin + function deserialize_iterator_graph(resource_handle_, serialized_; name=nothing) + local desc + tf.with_op_name(name, "DeserializeIterator") do + desc = tf.NodeDescription("DeserializeIterator") + begin + begin + resource_handle_ = convert(Tensor{Any}, resource_handle_) + begin + end + end + begin + serialized_ = convert(Tensor{Any}, serialized_) + begin + end + end + end + begin + begin + tf.add_input(desc, resource_handle_) + end + begin + tf.add_input(desc, serialized_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function deserialize_iterator_eager(resource_handle_, serialized_; name=nothing) - desc = tf.EagerOp("DeserializeIterator") - resource_handle_ = convert(tf.EagerTensor, resource_handle_) - serialized_ = convert(tf.EagerTensor, serialized_) - tf.add_input(desc, resource_handle_) - tf.add_input(desc, serialized_) - res = tf.execute(desc) - node = tf.TapeNode(deserialize_iterator, [resource_handle_, serialized_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function deserialize_iterator_eager(resource_handle_, serialized_; name=nothing) + desc = tf.EagerOp("DeserializeIterator") + resource_handle_ = convert(tf.EagerTensor, resource_handle_) + serialized_ = convert(tf.EagerTensor, serialized_) + begin + begin + tf.add_input(desc, resource_handle_) + end + begin + tf.add_input(desc, serialized_) + end + end + begin + end + res = tf.execute(desc) + node = tf.TapeNode(deserialize_iterator, [resource_handle_, serialized_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function deserialize_iterator(resource_handle_, serialized_; name=nothing) - if tf.in_eager_mode() - deserialize_iterator_eager(resource_handle_, serialized_; name=name) - else - deserialize_iterator_graph(resource_handle_, serialized_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function deserialize_iterator(resource_handle_, serialized_; name=nothing) + if tf.in_eager_mode() + deserialize_iterator_eager(resource_handle_, serialized_; name=name) + else + deserialize_iterator_graph(resource_handle_, serialized_; name=name) + end end - end + end end @@ -15869,35 +28680,63 @@ end """ begin - function atan_graph(x_; name=nothing) - local desc - tf.with_op_name(name, "Atan") do - desc = tf.NodeDescription("Atan") - x_ = convert(Tensor{Any}, x_) - (x_,) = tf.tf_promote(x_) - tf.add_input(desc, x_) + begin + function atan_graph(x_; name=nothing) + local desc + tf.with_op_name(name, "Atan") do + desc = tf.NodeDescription("Atan") + begin + begin + x_ = convert(Tensor{Any}, x_) + begin + end + end + begin + (x_,) = tf.tf_promote(x_) + end + end + begin + begin + tf.add_input(desc, x_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function atan_eager(x_; name=nothing) - desc = tf.EagerOp("Atan") - x_ = convert(tf.EagerTensor, x_) - tf.add_input(desc, x_) - desc["T"] = tf.data_type(x_) - res = tf.execute(desc) - node = tf.TapeNode(atan, [x_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function atan_eager(x_; name=nothing) + desc = tf.EagerOp("Atan") + x_ = convert(tf.EagerTensor, x_) + begin + begin + tf.add_input(desc, x_) + end + end + begin + end + begin + desc["T"] = tf.data_type(x_) + end + res = tf.execute(desc) + node = tf.TapeNode(atan, [x_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function atan(x_; name=nothing) - if tf.in_eager_mode() - atan_eager(x_; name=name) - else - atan_graph(x_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function atan(x_; name=nothing) + if tf.in_eager_mode() + atan_eager(x_; name=name) + else + atan_graph(x_; name=name) + end end - end + end end @@ -15907,52 +28746,98 @@ end """ begin - function mat_mul_graph(a_, b_; name=nothing, transpose_a=nothing, transpose_b=nothing) - local desc - tf.with_op_name(name, "MatMul") do - desc = tf.NodeDescription("MatMul") - a_ = convert(Tensor{Any}, a_) - b_ = convert(Tensor{Any}, b_) - (a_, b_) = tf.tf_promote(a_, b_) - tf.add_input(desc, a_) - tf.add_input(desc, b_) - if transpose_a !== nothing - desc["transpose_a"] = Base.Bool(transpose_a) + begin + function mat_mul_graph(a_, b_; name=nothing, transpose_a=nothing, transpose_b=nothing) + local desc + tf.with_op_name(name, "MatMul") do + desc = tf.NodeDescription("MatMul") + begin + begin + a_ = convert(Tensor{Any}, a_) + begin + end + end + begin + b_ = convert(Tensor{Any}, b_) + begin + end + end + begin + (a_, b_) = tf.tf_promote(a_, b_) + end + end + begin + begin + tf.add_input(desc, a_) + end + begin + tf.add_input(desc, b_) + end + end + begin + begin + if transpose_a !== nothing + desc["transpose_a"] = Base.Bool(transpose_a) + end + end + begin + if transpose_b !== nothing + desc["transpose_b"] = Base.Bool(transpose_b) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function mat_mul_eager(a_, b_; name=nothing, transpose_a=nothing, transpose_b=nothing) + desc = tf.EagerOp("MatMul") + a_ = convert(tf.EagerTensor, a_) + b_ = convert(tf.EagerTensor, b_) + begin + begin + tf.add_input(desc, a_) + end + begin + tf.add_input(desc, b_) + end + end + begin + begin + if transpose_a !== nothing + desc["transpose_a"] = Base.Bool(transpose_a) + end + end + begin + if transpose_b !== nothing + desc["transpose_b"] = Base.Bool(transpose_b) + end + end + end + begin + desc["T"] = tf.data_type(a_) + end + begin + desc["T"] = tf.data_type(b_) + end + res = tf.execute(desc) + node = tf.TapeNode(mat_mul, [a_, b_], name=nothing, transpose_a=nothing, transpose_b=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function mat_mul(a_, b_; name=nothing, transpose_a=nothing, transpose_b=nothing) + if tf.in_eager_mode() + mat_mul_eager(a_, b_; name=name, transpose_a=transpose_a, transpose_b=transpose_b) + else + mat_mul_graph(a_, b_; name=name, transpose_a=transpose_a, transpose_b=transpose_b) + end end - if transpose_b !== nothing - desc["transpose_b"] = Base.Bool(transpose_b) - end - end - tf.Tensor(tf.Operation(desc)) - end - function mat_mul_eager(a_, b_; name=nothing, transpose_a=nothing, transpose_b=nothing) - desc = tf.EagerOp("MatMul") - a_ = convert(tf.EagerTensor, a_) - b_ = convert(tf.EagerTensor, b_) - tf.add_input(desc, a_) - tf.add_input(desc, b_) - if transpose_a !== nothing - desc["transpose_a"] = Base.Bool(transpose_a) - end - if transpose_b !== nothing - desc["transpose_b"] = Base.Bool(transpose_b) - end - desc["T"] = tf.data_type(a_) - desc["T"] = tf.data_type(b_) - res = tf.execute(desc) - node = tf.TapeNode(mat_mul, [a_, b_], name=nothing, transpose_a=nothing, transpose_b=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function mat_mul(a_, b_; name=nothing, transpose_a=nothing, transpose_b=nothing) - if tf.in_eager_mode() - mat_mul_eager(a_, b_; name=name, transpose_a=transpose_a, transpose_b=transpose_b) - else - mat_mul_graph(a_, b_; name=name, transpose_a=transpose_a, transpose_b=transpose_b) - end - end end @@ -15962,35 +28847,63 @@ end """ begin - function erfc_graph(x_; name=nothing) - local desc - tf.with_op_name(name, "Erfc") do - desc = tf.NodeDescription("Erfc") - x_ = convert(Tensor{Any}, x_) - (x_,) = tf.tf_promote(x_) - tf.add_input(desc, x_) - end - tf.Tensor(tf.Operation(desc)) - end - function erfc_eager(x_; name=nothing) - desc = tf.EagerOp("Erfc") - x_ = convert(tf.EagerTensor, x_) - tf.add_input(desc, x_) - desc["T"] = tf.data_type(x_) - res = tf.execute(desc) - node = tf.TapeNode(erfc, [x_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function erfc_graph(x_; name=nothing) + local desc + tf.with_op_name(name, "Erfc") do + desc = tf.NodeDescription("Erfc") + begin + begin + x_ = convert(Tensor{Any}, x_) + begin + end + end + begin + (x_,) = tf.tf_promote(x_) + end + end + begin + begin + tf.add_input(desc, x_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function erfc(x_; name=nothing) - if tf.in_eager_mode() - erfc_eager(x_; name=name) - else - erfc_graph(x_; name=name) + begin + function erfc_eager(x_; name=nothing) + desc = tf.EagerOp("Erfc") + x_ = convert(tf.EagerTensor, x_) + begin + begin + tf.add_input(desc, x_) + end end - end + begin + end + begin + desc["T"] = tf.data_type(x_) + end + res = tf.execute(desc) + node = tf.TapeNode(erfc, [x_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function erfc(x_; name=nothing) + if tf.in_eager_mode() + erfc_eager(x_; name=name) + else + erfc_graph(x_; name=name) + end + end + end end @@ -16000,40 +28913,78 @@ end """ begin - function sigmoid_grad_graph(y_, dy_; name=nothing) - local desc - tf.with_op_name(name, "SigmoidGrad") do - desc = tf.NodeDescription("SigmoidGrad") - y_ = convert(Tensor{Any}, y_) - dy_ = convert(Tensor{Any}, dy_) - (y_, dy_) = tf.tf_promote(y_, dy_) - tf.add_input(desc, y_) - tf.add_input(desc, dy_) + begin + function sigmoid_grad_graph(y_, dy_; name=nothing) + local desc + tf.with_op_name(name, "SigmoidGrad") do + desc = tf.NodeDescription("SigmoidGrad") + begin + begin + y_ = convert(Tensor{Any}, y_) + begin + end + end + begin + dy_ = convert(Tensor{Any}, dy_) + begin + end + end + begin + (y_, dy_) = tf.tf_promote(y_, dy_) + end + end + begin + begin + tf.add_input(desc, y_) + end + begin + tf.add_input(desc, dy_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function sigmoid_grad_eager(y_, dy_; name=nothing) - desc = tf.EagerOp("SigmoidGrad") - y_ = convert(tf.EagerTensor, y_) - dy_ = convert(tf.EagerTensor, dy_) - tf.add_input(desc, y_) - tf.add_input(desc, dy_) - desc["T"] = tf.data_type(y_) - desc["T"] = tf.data_type(dy_) - res = tf.execute(desc) - node = tf.TapeNode(sigmoid_grad, [y_, dy_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function sigmoid_grad_eager(y_, dy_; name=nothing) + desc = tf.EagerOp("SigmoidGrad") + y_ = convert(tf.EagerTensor, y_) + dy_ = convert(tf.EagerTensor, dy_) + begin + begin + tf.add_input(desc, y_) + end + begin + tf.add_input(desc, dy_) + end + end + begin + end + begin + desc["T"] = tf.data_type(y_) + end + begin + desc["T"] = tf.data_type(dy_) + end + res = tf.execute(desc) + node = tf.TapeNode(sigmoid_grad, [y_, dy_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sigmoid_grad(y_, dy_; name=nothing) - if tf.in_eager_mode() - sigmoid_grad_eager(y_, dy_; name=name) - else - sigmoid_grad_graph(y_, dy_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sigmoid_grad(y_, dy_; name=nothing) + if tf.in_eager_mode() + sigmoid_grad_eager(y_, dy_; name=name) + else + sigmoid_grad_graph(y_, dy_; name=name) + end end - end + end end @@ -16043,71 +28994,115 @@ end """ begin - function fixed_length_record_reader_v2_graph(; name=nothing, header_bytes=nothing, record_bytes=nothing, footer_bytes=nothing, hop_bytes=nothing, container=nothing, shared_name=nothing, encoding=nothing) - local desc - tf.with_op_name(name, "FixedLengthRecordReaderV2") do - desc = tf.NodeDescription("FixedLengthRecordReaderV2") - if header_bytes !== nothing - desc["header_bytes"] = Base.Int(header_bytes) - end - if record_bytes !== nothing - desc["record_bytes"] = Base.Int(record_bytes) + begin + function fixed_length_record_reader_v2_graph(; name=nothing, header_bytes=nothing, record_bytes=nothing, footer_bytes=nothing, hop_bytes=nothing, container=nothing, shared_name=nothing, encoding=nothing) + local desc + tf.with_op_name(name, "FixedLengthRecordReaderV2") do + desc = tf.NodeDescription("FixedLengthRecordReaderV2") + begin + end + begin + end + begin + begin + if header_bytes !== nothing + desc["header_bytes"] = Base.Int(header_bytes) + end + end + begin + if record_bytes !== nothing + desc["record_bytes"] = Base.Int(record_bytes) + end + end + begin + if footer_bytes !== nothing + desc["footer_bytes"] = Base.Int(footer_bytes) + end + end + begin + if hop_bytes !== nothing + desc["hop_bytes"] = Base.Int(hop_bytes) + end + end + begin + if container !== nothing + desc["container"] = Base.String(container) + end + end + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + begin + if encoding !== nothing + desc["encoding"] = Base.String(encoding) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function fixed_length_record_reader_v2_eager(; name=nothing, header_bytes=nothing, record_bytes=nothing, footer_bytes=nothing, hop_bytes=nothing, container=nothing, shared_name=nothing, encoding=nothing) + desc = tf.EagerOp("FixedLengthRecordReaderV2") + begin + end + begin + begin + if header_bytes !== nothing + desc["header_bytes"] = Base.Int(header_bytes) + end + end + begin + if record_bytes !== nothing + desc["record_bytes"] = Base.Int(record_bytes) + end + end + begin + if footer_bytes !== nothing + desc["footer_bytes"] = Base.Int(footer_bytes) + end + end + begin + if hop_bytes !== nothing + desc["hop_bytes"] = Base.Int(hop_bytes) + end + end + begin + if container !== nothing + desc["container"] = Base.String(container) + end + end + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + begin + if encoding !== nothing + desc["encoding"] = Base.String(encoding) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(fixed_length_record_reader_v2, [], name=nothing, header_bytes=nothing, record_bytes=nothing, footer_bytes=nothing, hop_bytes=nothing, container=nothing, shared_name=nothing, encoding=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function fixed_length_record_reader_v2(; name=nothing, header_bytes=nothing, record_bytes=nothing, footer_bytes=nothing, hop_bytes=nothing, container=nothing, shared_name=nothing, encoding=nothing) + if tf.in_eager_mode() + fixed_length_record_reader_v2_eager(; name=name, header_bytes=header_bytes, record_bytes=record_bytes, footer_bytes=footer_bytes, hop_bytes=hop_bytes, container=container, shared_name=shared_name, encoding=encoding) + else + fixed_length_record_reader_v2_graph(; name=name, header_bytes=header_bytes, record_bytes=record_bytes, footer_bytes=footer_bytes, hop_bytes=hop_bytes, container=container, shared_name=shared_name, encoding=encoding) + end end - if footer_bytes !== nothing - desc["footer_bytes"] = Base.Int(footer_bytes) - end - if hop_bytes !== nothing - desc["hop_bytes"] = Base.Int(hop_bytes) - end - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - if encoding !== nothing - desc["encoding"] = Base.String(encoding) - end - end - tf.Tensor(tf.Operation(desc)) - end - function fixed_length_record_reader_v2_eager(; name=nothing, header_bytes=nothing, record_bytes=nothing, footer_bytes=nothing, hop_bytes=nothing, container=nothing, shared_name=nothing, encoding=nothing) - desc = tf.EagerOp("FixedLengthRecordReaderV2") - if header_bytes !== nothing - desc["header_bytes"] = Base.Int(header_bytes) - end - if record_bytes !== nothing - desc["record_bytes"] = Base.Int(record_bytes) - end - if footer_bytes !== nothing - desc["footer_bytes"] = Base.Int(footer_bytes) - end - if hop_bytes !== nothing - desc["hop_bytes"] = Base.Int(hop_bytes) - end - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - if encoding !== nothing - desc["encoding"] = Base.String(encoding) - end - res = tf.execute(desc) - node = tf.TapeNode(fixed_length_record_reader_v2, [], name=nothing, header_bytes=nothing, record_bytes=nothing, footer_bytes=nothing, hop_bytes=nothing, container=nothing, shared_name=nothing, encoding=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function fixed_length_record_reader_v2(; name=nothing, header_bytes=nothing, record_bytes=nothing, footer_bytes=nothing, hop_bytes=nothing, container=nothing, shared_name=nothing, encoding=nothing) - if tf.in_eager_mode() - fixed_length_record_reader_v2_eager(; name=name, header_bytes=header_bytes, record_bytes=record_bytes, footer_bytes=footer_bytes, hop_bytes=hop_bytes, container=container, shared_name=shared_name, encoding=encoding) - else - fixed_length_record_reader_v2_graph(; name=name, header_bytes=header_bytes, record_bytes=record_bytes, footer_bytes=footer_bytes, hop_bytes=hop_bytes, container=container, shared_name=shared_name, encoding=encoding) - end - end end @@ -16117,52 +29112,114 @@ end """ begin - function non_max_suppression_v3_graph(boxes_, scores_, max_output_size_, iou_threshold_, score_threshold_; name=nothing) - local desc - tf.with_op_name(name, "NonMaxSuppressionV3") do - desc = tf.NodeDescription("NonMaxSuppressionV3") - boxes_ = convert(Tensor{Float32}, boxes_) - scores_ = convert(Tensor{Float32}, scores_) - max_output_size_ = convert(Tensor{Int32}, max_output_size_) - iou_threshold_ = convert(Tensor{Float32}, iou_threshold_) - score_threshold_ = convert(Tensor{Float32}, score_threshold_) - (boxes_, scores_) = tf.tf_promote(boxes_, scores_) - tf.add_input(desc, boxes_) - tf.add_input(desc, scores_) - tf.add_input(desc, max_output_size_) - tf.add_input(desc, iou_threshold_) - tf.add_input(desc, score_threshold_) - end - tf.Tensor(tf.Operation(desc)) - end - function non_max_suppression_v3_eager(boxes_, scores_, max_output_size_, iou_threshold_, score_threshold_; name=nothing) - desc = tf.EagerOp("NonMaxSuppressionV3") - boxes_ = convert(tf.EagerTensor, boxes_) - scores_ = convert(tf.EagerTensor, scores_) - max_output_size_ = convert(tf.EagerTensor, max_output_size_) - iou_threshold_ = convert(tf.EagerTensor, iou_threshold_) - score_threshold_ = convert(tf.EagerTensor, score_threshold_) - tf.add_input(desc, boxes_) - tf.add_input(desc, scores_) - tf.add_input(desc, max_output_size_) - tf.add_input(desc, iou_threshold_) - tf.add_input(desc, score_threshold_) - desc["T"] = tf.data_type(boxes_) - desc["T"] = tf.data_type(scores_) - res = tf.execute(desc) - node = tf.TapeNode(non_max_suppression_v3, [boxes_, scores_, max_output_size_, iou_threshold_, score_threshold_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function non_max_suppression_v3(boxes_, scores_, max_output_size_, iou_threshold_, score_threshold_; name=nothing) - if tf.in_eager_mode() - non_max_suppression_v3_eager(boxes_, scores_, max_output_size_, iou_threshold_, score_threshold_; name=name) - else - non_max_suppression_v3_graph(boxes_, scores_, max_output_size_, iou_threshold_, score_threshold_; name=name) + begin + function non_max_suppression_v3_graph(boxes_, scores_, max_output_size_, iou_threshold_, score_threshold_; name=nothing) + local desc + tf.with_op_name(name, "NonMaxSuppressionV3") do + desc = tf.NodeDescription("NonMaxSuppressionV3") + begin + begin + boxes_ = convert(Tensor{Float32}, boxes_) + begin + end + end + begin + scores_ = convert(Tensor{Float32}, scores_) + begin + end + end + begin + max_output_size_ = convert(Tensor{Int32}, max_output_size_) + begin + end + end + begin + iou_threshold_ = convert(Tensor{Float32}, iou_threshold_) + begin + end + end + begin + score_threshold_ = convert(Tensor{Float32}, score_threshold_) + begin + end + end + begin + (boxes_, scores_) = tf.tf_promote(boxes_, scores_) + end + end + begin + begin + tf.add_input(desc, boxes_) + end + begin + tf.add_input(desc, scores_) + end + begin + tf.add_input(desc, max_output_size_) + end + begin + tf.add_input(desc, iou_threshold_) + end + begin + tf.add_input(desc, score_threshold_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function non_max_suppression_v3_eager(boxes_, scores_, max_output_size_, iou_threshold_, score_threshold_; name=nothing) + desc = tf.EagerOp("NonMaxSuppressionV3") + boxes_ = convert(tf.EagerTensor, boxes_) + scores_ = convert(tf.EagerTensor, scores_) + max_output_size_ = convert(tf.EagerTensor, max_output_size_) + iou_threshold_ = convert(tf.EagerTensor, iou_threshold_) + score_threshold_ = convert(tf.EagerTensor, score_threshold_) + begin + begin + tf.add_input(desc, boxes_) + end + begin + tf.add_input(desc, scores_) + end + begin + tf.add_input(desc, max_output_size_) + end + begin + tf.add_input(desc, iou_threshold_) + end + begin + tf.add_input(desc, score_threshold_) + end + end + begin + end + begin + desc["T"] = tf.data_type(boxes_) + end + begin + desc["T"] = tf.data_type(scores_) + end + res = tf.execute(desc) + node = tf.TapeNode(non_max_suppression_v3, [boxes_, scores_, max_output_size_, iou_threshold_, score_threshold_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function non_max_suppression_v3(boxes_, scores_, max_output_size_, iou_threshold_, score_threshold_; name=nothing) + if tf.in_eager_mode() + non_max_suppression_v3_eager(boxes_, scores_, max_output_size_, iou_threshold_, score_threshold_; name=name) + else + non_max_suppression_v3_graph(boxes_, scores_, max_output_size_, iou_threshold_, score_threshold_; name=name) + end end - end + end end @@ -16172,63 +29229,123 @@ end """ begin - function dilation2d_backprop_input_graph(input_, filter_, out_backprop_; name=nothing, strides=nothing, rates=nothing, padding=nothing) - local desc - tf.with_op_name(name, "Dilation2DBackpropInput") do - desc = tf.NodeDescription("Dilation2DBackpropInput") - input_ = convert(Tensor{Any}, input_) - filter_ = convert(Tensor{Any}, filter_) - out_backprop_ = convert(Tensor{Any}, out_backprop_) - (input_, filter_, out_backprop_) = tf.tf_promote(input_, filter_, out_backprop_) - tf.add_input(desc, input_) - tf.add_input(desc, filter_) - tf.add_input(desc, out_backprop_) - if strides !== nothing - desc["strides"] = map(Base.identity, strides) - end - if rates !== nothing - desc["rates"] = map(Base.identity, rates) + begin + function dilation2d_backprop_input_graph(input_, filter_, out_backprop_; name=nothing, strides=nothing, rates=nothing, padding=nothing) + local desc + tf.with_op_name(name, "Dilation2DBackpropInput") do + desc = tf.NodeDescription("Dilation2DBackpropInput") + begin + begin + input_ = convert(Tensor{Any}, input_) + begin + end + end + begin + filter_ = convert(Tensor{Any}, filter_) + begin + end + end + begin + out_backprop_ = convert(Tensor{Any}, out_backprop_) + begin + end + end + begin + (input_, filter_, out_backprop_) = tf.tf_promote(input_, filter_, out_backprop_) + end + end + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, filter_) + end + begin + tf.add_input(desc, out_backprop_) + end + end + begin + begin + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + end + begin + if rates !== nothing + desc["rates"] = map(Base.identity, rates) + end + end + begin + if padding !== nothing + desc["padding"] = Base.String(padding) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function dilation2d_backprop_input_eager(input_, filter_, out_backprop_; name=nothing, strides=nothing, rates=nothing, padding=nothing) + desc = tf.EagerOp("Dilation2DBackpropInput") + input_ = convert(tf.EagerTensor, input_) + filter_ = convert(tf.EagerTensor, filter_) + out_backprop_ = convert(tf.EagerTensor, out_backprop_) + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, filter_) + end + begin + tf.add_input(desc, out_backprop_) + end + end + begin + begin + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + end + begin + if rates !== nothing + desc["rates"] = map(Base.identity, rates) + end + end + begin + if padding !== nothing + desc["padding"] = Base.String(padding) + end + end + end + begin + desc["T"] = tf.data_type(input_) + end + begin + desc["T"] = tf.data_type(filter_) + end + begin + desc["T"] = tf.data_type(out_backprop_) + end + res = tf.execute(desc) + node = tf.TapeNode(dilation2d_backprop_input, [input_, filter_, out_backprop_], name=nothing, strides=nothing, rates=nothing, padding=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function dilation2d_backprop_input(input_, filter_, out_backprop_; name=nothing, strides=nothing, rates=nothing, padding=nothing) + if tf.in_eager_mode() + dilation2d_backprop_input_eager(input_, filter_, out_backprop_; name=name, strides=strides, rates=rates, padding=padding) + else + dilation2d_backprop_input_graph(input_, filter_, out_backprop_; name=name, strides=strides, rates=rates, padding=padding) + end end - if padding !== nothing - desc["padding"] = Base.String(padding) - end - end - tf.Tensor(tf.Operation(desc)) - end - function dilation2d_backprop_input_eager(input_, filter_, out_backprop_; name=nothing, strides=nothing, rates=nothing, padding=nothing) - desc = tf.EagerOp("Dilation2DBackpropInput") - input_ = convert(tf.EagerTensor, input_) - filter_ = convert(tf.EagerTensor, filter_) - out_backprop_ = convert(tf.EagerTensor, out_backprop_) - tf.add_input(desc, input_) - tf.add_input(desc, filter_) - tf.add_input(desc, out_backprop_) - if strides !== nothing - desc["strides"] = map(Base.identity, strides) - end - if rates !== nothing - desc["rates"] = map(Base.identity, rates) - end - if padding !== nothing - desc["padding"] = Base.String(padding) - end - desc["T"] = tf.data_type(input_) - desc["T"] = tf.data_type(filter_) - desc["T"] = tf.data_type(out_backprop_) - res = tf.execute(desc) - node = tf.TapeNode(dilation2d_backprop_input, [input_, filter_, out_backprop_], name=nothing, strides=nothing, rates=nothing, padding=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function dilation2d_backprop_input(input_, filter_, out_backprop_; name=nothing, strides=nothing, rates=nothing, padding=nothing) - if tf.in_eager_mode() - dilation2d_backprop_input_eager(input_, filter_, out_backprop_; name=name, strides=strides, rates=rates, padding=padding) - else - dilation2d_backprop_input_graph(input_, filter_, out_backprop_; name=name, strides=strides, rates=rates, padding=padding) - end - end end @@ -16238,68 +29355,154 @@ end """ begin - function resource_apply_adadelta_graph(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_; name=nothing, use_locking=nothing) - local desc - tf.with_op_name(name, "ResourceApplyAdadelta") do - desc = tf.NodeDescription("ResourceApplyAdadelta") - var_ = convert(Tensor{Any}, var_) - accum_ = convert(Tensor{Any}, accum_) - accum_update_ = convert(Tensor{Any}, accum_update_) - lr_ = convert(Tensor{Any}, lr_) - rho_ = convert(Tensor{Any}, rho_) - epsilon_ = convert(Tensor{Any}, epsilon_) - grad_ = convert(Tensor{Any}, grad_) - (lr_, rho_, epsilon_, grad_) = tf.tf_promote(lr_, rho_, epsilon_, grad_) - tf.add_input(desc, var_) - tf.add_input(desc, accum_) - tf.add_input(desc, accum_update_) - tf.add_input(desc, lr_) - tf.add_input(desc, rho_) - tf.add_input(desc, epsilon_) - tf.add_input(desc, grad_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - end - tf.Tensor(tf.Operation(desc)) - end - function resource_apply_adadelta_eager(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_; name=nothing, use_locking=nothing) - desc = tf.EagerOp("ResourceApplyAdadelta") - var_ = convert(tf.EagerTensor, var_) - accum_ = convert(tf.EagerTensor, accum_) - accum_update_ = convert(tf.EagerTensor, accum_update_) - lr_ = convert(tf.EagerTensor, lr_) - rho_ = convert(tf.EagerTensor, rho_) - epsilon_ = convert(tf.EagerTensor, epsilon_) - grad_ = convert(tf.EagerTensor, grad_) - tf.add_input(desc, var_) - tf.add_input(desc, accum_) - tf.add_input(desc, accum_update_) - tf.add_input(desc, lr_) - tf.add_input(desc, rho_) - tf.add_input(desc, epsilon_) - tf.add_input(desc, grad_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - desc["T"] = tf.data_type(lr_) - desc["T"] = tf.data_type(rho_) - desc["T"] = tf.data_type(epsilon_) - desc["T"] = tf.data_type(grad_) - res = tf.execute(desc) - node = tf.TapeNode(resource_apply_adadelta, [var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_], name=nothing, use_locking=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_apply_adadelta(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_; name=nothing, use_locking=nothing) - if tf.in_eager_mode() - resource_apply_adadelta_eager(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_; name=name, use_locking=use_locking) - else - resource_apply_adadelta_graph(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_; name=name, use_locking=use_locking) + begin + function resource_apply_adadelta_graph(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "ResourceApplyAdadelta") do + desc = tf.NodeDescription("ResourceApplyAdadelta") + begin + begin + var_ = convert(Tensor{Any}, var_) + begin + end + end + begin + accum_ = convert(Tensor{Any}, accum_) + begin + end + end + begin + accum_update_ = convert(Tensor{Any}, accum_update_) + begin + end + end + begin + lr_ = convert(Tensor{Any}, lr_) + begin + end + end + begin + rho_ = convert(Tensor{Any}, rho_) + begin + end + end + begin + epsilon_ = convert(Tensor{Any}, epsilon_) + begin + end + end + begin + grad_ = convert(Tensor{Any}, grad_) + begin + end + end + begin + (lr_, rho_, epsilon_, grad_) = tf.tf_promote(lr_, rho_, epsilon_, grad_) + end + end + begin + begin + tf.add_input(desc, var_) + end + begin + tf.add_input(desc, accum_) + end + begin + tf.add_input(desc, accum_update_) + end + begin + tf.add_input(desc, lr_) + end + begin + tf.add_input(desc, rho_) + end + begin + tf.add_input(desc, epsilon_) + end + begin + tf.add_input(desc, grad_) + end + end + begin + begin + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function resource_apply_adadelta_eager(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_; name=nothing, use_locking=nothing) + desc = tf.EagerOp("ResourceApplyAdadelta") + var_ = convert(tf.EagerTensor, var_) + accum_ = convert(tf.EagerTensor, accum_) + accum_update_ = convert(tf.EagerTensor, accum_update_) + lr_ = convert(tf.EagerTensor, lr_) + rho_ = convert(tf.EagerTensor, rho_) + epsilon_ = convert(tf.EagerTensor, epsilon_) + grad_ = convert(tf.EagerTensor, grad_) + begin + begin + tf.add_input(desc, var_) + end + begin + tf.add_input(desc, accum_) + end + begin + tf.add_input(desc, accum_update_) + end + begin + tf.add_input(desc, lr_) + end + begin + tf.add_input(desc, rho_) + end + begin + tf.add_input(desc, epsilon_) + end + begin + tf.add_input(desc, grad_) + end + end + begin + begin + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + end + begin + desc["T"] = tf.data_type(lr_) + end + begin + desc["T"] = tf.data_type(rho_) + end + begin + desc["T"] = tf.data_type(epsilon_) + end + begin + desc["T"] = tf.data_type(grad_) + end + res = tf.execute(desc) + node = tf.TapeNode(resource_apply_adadelta, [var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_], name=nothing, use_locking=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_apply_adadelta(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_; name=nothing, use_locking=nothing) + if tf.in_eager_mode() + resource_apply_adadelta_eager(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_; name=name, use_locking=use_locking) + else + resource_apply_adadelta_graph(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_; name=name, use_locking=use_locking) + end end - end + end end @@ -16309,37 +29512,69 @@ end """ begin - function logical_or_graph(x_, y_; name=nothing) - local desc - tf.with_op_name(name, "LogicalOr") do - desc = tf.NodeDescription("LogicalOr") - x_ = convert(Tensor{Bool}, x_) - y_ = convert(Tensor{Bool}, y_) - tf.add_input(desc, x_) - tf.add_input(desc, y_) + begin + function logical_or_graph(x_, y_; name=nothing) + local desc + tf.with_op_name(name, "LogicalOr") do + desc = tf.NodeDescription("LogicalOr") + begin + begin + x_ = convert(Tensor{Bool}, x_) + begin + end + end + begin + y_ = convert(Tensor{Bool}, y_) + begin + end + end + end + begin + begin + tf.add_input(desc, x_) + end + begin + tf.add_input(desc, y_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function logical_or_eager(x_, y_; name=nothing) - desc = tf.EagerOp("LogicalOr") - x_ = convert(tf.EagerTensor, x_) - y_ = convert(tf.EagerTensor, y_) - tf.add_input(desc, x_) - tf.add_input(desc, y_) - res = tf.execute(desc) - node = tf.TapeNode(logical_or, [x_, y_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function logical_or_eager(x_, y_; name=nothing) + desc = tf.EagerOp("LogicalOr") + x_ = convert(tf.EagerTensor, x_) + y_ = convert(tf.EagerTensor, y_) + begin + begin + tf.add_input(desc, x_) + end + begin + tf.add_input(desc, y_) + end + end + begin + end + res = tf.execute(desc) + node = tf.TapeNode(logical_or, [x_, y_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function logical_or(x_, y_; name=nothing) - if tf.in_eager_mode() - logical_or_eager(x_, y_; name=name) - else - logical_or_graph(x_, y_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function logical_or(x_, y_; name=nothing) + if tf.in_eager_mode() + logical_or_eager(x_, y_; name=name) + else + logical_or_graph(x_, y_; name=name) + end end - end + end end @@ -16349,65 +29584,129 @@ end """ begin - function dense_to_sparse_set_operation_graph(set1_, set2_indices_, set2_values_, set2_shape_; name=nothing, set_operation=nothing, validate_indices=nothing) - local desc - tf.with_op_name(name, "DenseToSparseSetOperation") do - desc = tf.NodeDescription("DenseToSparseSetOperation") - set1_ = convert(Tensor{Any}, set1_) - set2_indices_ = convert(Tensor{Int64}, set2_indices_) - set2_values_ = convert(Tensor{Any}, set2_values_) - set2_shape_ = convert(Tensor{Int64}, set2_shape_) - (set1_, set2_values_) = tf.tf_promote(set1_, set2_values_) - tf.add_input(desc, set1_) - tf.add_input(desc, set2_indices_) - tf.add_input(desc, set2_values_) - tf.add_input(desc, set2_shape_) - if set_operation !== nothing - desc["set_operation"] = Base.String(set_operation) - end - if validate_indices !== nothing - desc["validate_indices"] = Base.Bool(validate_indices) + begin + function dense_to_sparse_set_operation_graph(set1_, set2_indices_, set2_values_, set2_shape_; name=nothing, set_operation=nothing, validate_indices=nothing) + local desc + tf.with_op_name(name, "DenseToSparseSetOperation") do + desc = tf.NodeDescription("DenseToSparseSetOperation") + begin + begin + set1_ = convert(Tensor{Any}, set1_) + begin + end + end + begin + set2_indices_ = convert(Tensor{Int64}, set2_indices_) + begin + end + end + begin + set2_values_ = convert(Tensor{Any}, set2_values_) + begin + end + end + begin + set2_shape_ = convert(Tensor{Int64}, set2_shape_) + begin + end + end + begin + (set1_, set2_values_) = tf.tf_promote(set1_, set2_values_) + end + end + begin + begin + tf.add_input(desc, set1_) + end + begin + tf.add_input(desc, set2_indices_) + end + begin + tf.add_input(desc, set2_values_) + end + begin + tf.add_input(desc, set2_shape_) + end + end + begin + begin + if set_operation !== nothing + desc["set_operation"] = Base.String(set_operation) + end + end + begin + if validate_indices !== nothing + desc["validate_indices"] = Base.Bool(validate_indices) + end + end + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + end + end + begin + function dense_to_sparse_set_operation_eager(set1_, set2_indices_, set2_values_, set2_shape_; name=nothing, set_operation=nothing, validate_indices=nothing) + desc = tf.EagerOp("DenseToSparseSetOperation") + set1_ = convert(tf.EagerTensor, set1_) + set2_indices_ = convert(tf.EagerTensor, set2_indices_) + set2_values_ = convert(tf.EagerTensor, set2_values_) + set2_shape_ = convert(tf.EagerTensor, set2_shape_) + begin + begin + tf.add_input(desc, set1_) + end + begin + tf.add_input(desc, set2_indices_) + end + begin + tf.add_input(desc, set2_values_) + end + begin + tf.add_input(desc, set2_shape_) + end + end + begin + begin + if set_operation !== nothing + desc["set_operation"] = Base.String(set_operation) + end + end + begin + if validate_indices !== nothing + desc["validate_indices"] = Base.Bool(validate_indices) + end + end + end + begin + desc["T"] = tf.data_type(set1_) + end + begin + desc["T"] = tf.data_type(set2_values_) + end + res = tf.execute(desc) + node = tf.TapeNode(dense_to_sparse_set_operation, [set1_, set2_indices_, set2_values_, set2_shape_], name=nothing, set_operation=nothing, validate_indices=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function dense_to_sparse_set_operation(set1_, set2_indices_, set2_values_, set2_shape_; name=nothing, set_operation=nothing, validate_indices=nothing) + if tf.in_eager_mode() + dense_to_sparse_set_operation_eager(set1_, set2_indices_, set2_values_, set2_shape_; name=name, set_operation=set_operation, validate_indices=validate_indices) + else + dense_to_sparse_set_operation_graph(set1_, set2_indices_, set2_values_, set2_shape_; name=name, set_operation=set_operation, validate_indices=validate_indices) + end end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:3 - push!(out, tf.Tensor(op, out_idx)) - end - out - end - function dense_to_sparse_set_operation_eager(set1_, set2_indices_, set2_values_, set2_shape_; name=nothing, set_operation=nothing, validate_indices=nothing) - desc = tf.EagerOp("DenseToSparseSetOperation") - set1_ = convert(tf.EagerTensor, set1_) - set2_indices_ = convert(tf.EagerTensor, set2_indices_) - set2_values_ = convert(tf.EagerTensor, set2_values_) - set2_shape_ = convert(tf.EagerTensor, set2_shape_) - tf.add_input(desc, set1_) - tf.add_input(desc, set2_indices_) - tf.add_input(desc, set2_values_) - tf.add_input(desc, set2_shape_) - if set_operation !== nothing - desc["set_operation"] = Base.String(set_operation) - end - if validate_indices !== nothing - desc["validate_indices"] = Base.Bool(validate_indices) - end - desc["T"] = tf.data_type(set1_) - desc["T"] = tf.data_type(set2_values_) - res = tf.execute(desc) - node = tf.TapeNode(dense_to_sparse_set_operation, [set1_, set2_indices_, set2_values_, set2_shape_], name=nothing, set_operation=nothing, validate_indices=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function dense_to_sparse_set_operation(set1_, set2_indices_, set2_values_, set2_shape_; name=nothing, set_operation=nothing, validate_indices=nothing) - if tf.in_eager_mode() - dense_to_sparse_set_operation_eager(set1_, set2_indices_, set2_values_, set2_shape_; name=name, set_operation=set_operation, validate_indices=validate_indices) - else - dense_to_sparse_set_operation_graph(set1_, set2_indices_, set2_values_, set2_shape_; name=name, set_operation=set_operation, validate_indices=validate_indices) - end - end end @@ -16417,33 +29716,57 @@ end """ begin - function reader_num_records_produced_graph(reader_handle_; name=nothing) - local desc - tf.with_op_name(name, "ReaderNumRecordsProduced") do - desc = tf.NodeDescription("ReaderNumRecordsProduced") - reader_handle_ = convert(Tensor{String}, reader_handle_) - tf.add_input(desc, reader_handle_) + begin + function reader_num_records_produced_graph(reader_handle_; name=nothing) + local desc + tf.with_op_name(name, "ReaderNumRecordsProduced") do + desc = tf.NodeDescription("ReaderNumRecordsProduced") + begin + begin + reader_handle_ = convert(Tensor{String}, reader_handle_) + begin + end + end + end + begin + begin + tf.add_input(desc, reader_handle_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function reader_num_records_produced_eager(reader_handle_; name=nothing) - desc = tf.EagerOp("ReaderNumRecordsProduced") - reader_handle_ = convert(tf.EagerTensor, reader_handle_) - tf.add_input(desc, reader_handle_) - res = tf.execute(desc) - node = tf.TapeNode(reader_num_records_produced, [reader_handle_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function reader_num_records_produced_eager(reader_handle_; name=nothing) + desc = tf.EagerOp("ReaderNumRecordsProduced") + reader_handle_ = convert(tf.EagerTensor, reader_handle_) + begin + begin + tf.add_input(desc, reader_handle_) + end + end + begin + end + res = tf.execute(desc) + node = tf.TapeNode(reader_num_records_produced, [reader_handle_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function reader_num_records_produced(reader_handle_; name=nothing) - if tf.in_eager_mode() - reader_num_records_produced_eager(reader_handle_; name=name) - else - reader_num_records_produced_graph(reader_handle_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function reader_num_records_produced(reader_handle_; name=nothing) + if tf.in_eager_mode() + reader_num_records_produced_eager(reader_handle_; name=name) + else + reader_num_records_produced_graph(reader_handle_; name=name) + end end - end + end end @@ -16453,37 +29776,69 @@ end """ begin - function adjust_hue_graph(images_, delta_; name=nothing) - local desc - tf.with_op_name(name, "AdjustHue") do - desc = tf.NodeDescription("AdjustHue") - images_ = convert(Tensor{Float32}, images_) - delta_ = convert(Tensor{Float32}, delta_) - tf.add_input(desc, images_) - tf.add_input(desc, delta_) + begin + function adjust_hue_graph(images_, delta_; name=nothing) + local desc + tf.with_op_name(name, "AdjustHue") do + desc = tf.NodeDescription("AdjustHue") + begin + begin + images_ = convert(Tensor{Float32}, images_) + begin + end + end + begin + delta_ = convert(Tensor{Float32}, delta_) + begin + end + end + end + begin + begin + tf.add_input(desc, images_) + end + begin + tf.add_input(desc, delta_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function adjust_hue_eager(images_, delta_; name=nothing) - desc = tf.EagerOp("AdjustHue") - images_ = convert(tf.EagerTensor, images_) - delta_ = convert(tf.EagerTensor, delta_) - tf.add_input(desc, images_) - tf.add_input(desc, delta_) - res = tf.execute(desc) - node = tf.TapeNode(adjust_hue, [images_, delta_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function adjust_hue_eager(images_, delta_; name=nothing) + desc = tf.EagerOp("AdjustHue") + images_ = convert(tf.EagerTensor, images_) + delta_ = convert(tf.EagerTensor, delta_) + begin + begin + tf.add_input(desc, images_) + end + begin + tf.add_input(desc, delta_) + end + end + begin + end + res = tf.execute(desc) + node = tf.TapeNode(adjust_hue, [images_, delta_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function adjust_hue(images_, delta_; name=nothing) - if tf.in_eager_mode() - adjust_hue_eager(images_, delta_; name=name) - else - adjust_hue_graph(images_, delta_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function adjust_hue(images_, delta_; name=nothing) + if tf.in_eager_mode() + adjust_hue_eager(images_, delta_; name=name) + else + adjust_hue_graph(images_, delta_; name=name) + end end - end + end end @@ -16493,43 +29848,79 @@ end """ begin - function boosted_trees_quantile_stream_resource_flush_graph(quantile_stream_resource_handle_, num_buckets_; name=nothing, generate_quantiles=nothing) - local desc - tf.with_op_name(name, "BoostedTreesQuantileStreamResourceFlush") do - desc = tf.NodeDescription("BoostedTreesQuantileStreamResourceFlush") - quantile_stream_resource_handle_ = convert(Tensor{Any}, quantile_stream_resource_handle_) - num_buckets_ = convert(Tensor{Int64}, num_buckets_) - tf.add_input(desc, quantile_stream_resource_handle_) - tf.add_input(desc, num_buckets_) - if generate_quantiles !== nothing - desc["generate_quantiles"] = Base.Bool(generate_quantiles) + begin + function boosted_trees_quantile_stream_resource_flush_graph(quantile_stream_resource_handle_, num_buckets_; name=nothing, generate_quantiles=nothing) + local desc + tf.with_op_name(name, "BoostedTreesQuantileStreamResourceFlush") do + desc = tf.NodeDescription("BoostedTreesQuantileStreamResourceFlush") + begin + begin + quantile_stream_resource_handle_ = convert(Tensor{Any}, quantile_stream_resource_handle_) + begin + end + end + begin + num_buckets_ = convert(Tensor{Int64}, num_buckets_) + begin + end + end + end + begin + begin + tf.add_input(desc, quantile_stream_resource_handle_) + end + begin + tf.add_input(desc, num_buckets_) + end + end + begin + begin + if generate_quantiles !== nothing + desc["generate_quantiles"] = Base.Bool(generate_quantiles) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function boosted_trees_quantile_stream_resource_flush_eager(quantile_stream_resource_handle_, num_buckets_; name=nothing, generate_quantiles=nothing) + desc = tf.EagerOp("BoostedTreesQuantileStreamResourceFlush") + quantile_stream_resource_handle_ = convert(tf.EagerTensor, quantile_stream_resource_handle_) + num_buckets_ = convert(tf.EagerTensor, num_buckets_) + begin + begin + tf.add_input(desc, quantile_stream_resource_handle_) + end + begin + tf.add_input(desc, num_buckets_) + end + end + begin + begin + if generate_quantiles !== nothing + desc["generate_quantiles"] = Base.Bool(generate_quantiles) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(boosted_trees_quantile_stream_resource_flush, [quantile_stream_resource_handle_, num_buckets_], name=nothing, generate_quantiles=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function boosted_trees_quantile_stream_resource_flush(quantile_stream_resource_handle_, num_buckets_; name=nothing, generate_quantiles=nothing) + if tf.in_eager_mode() + boosted_trees_quantile_stream_resource_flush_eager(quantile_stream_resource_handle_, num_buckets_; name=name, generate_quantiles=generate_quantiles) + else + boosted_trees_quantile_stream_resource_flush_graph(quantile_stream_resource_handle_, num_buckets_; name=name, generate_quantiles=generate_quantiles) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function boosted_trees_quantile_stream_resource_flush_eager(quantile_stream_resource_handle_, num_buckets_; name=nothing, generate_quantiles=nothing) - desc = tf.EagerOp("BoostedTreesQuantileStreamResourceFlush") - quantile_stream_resource_handle_ = convert(tf.EagerTensor, quantile_stream_resource_handle_) - num_buckets_ = convert(tf.EagerTensor, num_buckets_) - tf.add_input(desc, quantile_stream_resource_handle_) - tf.add_input(desc, num_buckets_) - if generate_quantiles !== nothing - desc["generate_quantiles"] = Base.Bool(generate_quantiles) - end - res = tf.execute(desc) - node = tf.TapeNode(boosted_trees_quantile_stream_resource_flush, [quantile_stream_resource_handle_, num_buckets_], name=nothing, generate_quantiles=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function boosted_trees_quantile_stream_resource_flush(quantile_stream_resource_handle_, num_buckets_; name=nothing, generate_quantiles=nothing) - if tf.in_eager_mode() - boosted_trees_quantile_stream_resource_flush_eager(quantile_stream_resource_handle_, num_buckets_; name=name, generate_quantiles=generate_quantiles) - else - boosted_trees_quantile_stream_resource_flush_graph(quantile_stream_resource_handle_, num_buckets_; name=name, generate_quantiles=generate_quantiles) - end - end end @@ -16539,79 +29930,155 @@ end """ begin - function experimental_map_and_batch_dataset_graph(input_dataset_, other_arguments_, batch_size_, num_parallel_calls_, drop_remainder_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, preserve_cardinality=nothing) - local desc - tf.with_op_name(name, "ExperimentalMapAndBatchDataset") do - desc = tf.NodeDescription("ExperimentalMapAndBatchDataset") - input_dataset_ = convert(Tensor{Any}, input_dataset_) - other_arguments_ = [convert(Tensor{Any}, x) for x = other_arguments_] - batch_size_ = convert(Tensor{Int64}, batch_size_) - num_parallel_calls_ = convert(Tensor{Int64}, num_parallel_calls_) - drop_remainder_ = convert(Tensor{Bool}, drop_remainder_) - tf.add_input(desc, input_dataset_) - tf.add_input(desc, other_arguments_) - tf.add_input(desc, batch_size_) - tf.add_input(desc, num_parallel_calls_) - tf.add_input(desc, drop_remainder_) - if f !== nothing - desc["f"] = Base.identity(f) - end - if Targuments !== nothing - desc["Targuments"] = map(Base.identity, Targuments) - end - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - if preserve_cardinality !== nothing - desc["preserve_cardinality"] = Base.Bool(preserve_cardinality) + begin + function experimental_map_and_batch_dataset_graph(input_dataset_, other_arguments_, batch_size_, num_parallel_calls_, drop_remainder_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, preserve_cardinality=nothing) + local desc + tf.with_op_name(name, "ExperimentalMapAndBatchDataset") do + desc = tf.NodeDescription("ExperimentalMapAndBatchDataset") + begin + begin + input_dataset_ = convert(Tensor{Any}, input_dataset_) + begin + end + end + begin + other_arguments_ = [convert(Tensor{Any}, x) for x = other_arguments_] + begin + end + end + begin + batch_size_ = convert(Tensor{Int64}, batch_size_) + begin + end + end + begin + num_parallel_calls_ = convert(Tensor{Int64}, num_parallel_calls_) + begin + end + end + begin + drop_remainder_ = convert(Tensor{Bool}, drop_remainder_) + begin + end + end + end + begin + begin + tf.add_input(desc, input_dataset_) + end + begin + tf.add_input(desc, other_arguments_) + end + begin + tf.add_input(desc, batch_size_) + end + begin + tf.add_input(desc, num_parallel_calls_) + end + begin + tf.add_input(desc, drop_remainder_) + end + end + begin + begin + if f !== nothing + desc["f"] = Base.identity(f) + end + end + begin + if Targuments !== nothing + desc["Targuments"] = map(Base.identity, Targuments) + end + end + begin + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + end + begin + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + begin + if preserve_cardinality !== nothing + desc["preserve_cardinality"] = Base.Bool(preserve_cardinality) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function experimental_map_and_batch_dataset_eager(input_dataset_, other_arguments_, batch_size_, num_parallel_calls_, drop_remainder_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, preserve_cardinality=nothing) + desc = tf.EagerOp("ExperimentalMapAndBatchDataset") + input_dataset_ = convert(tf.EagerTensor, input_dataset_) + other_arguments_ = convert(tf.EagerTensor, other_arguments_) + batch_size_ = convert(tf.EagerTensor, batch_size_) + num_parallel_calls_ = convert(tf.EagerTensor, num_parallel_calls_) + drop_remainder_ = convert(tf.EagerTensor, drop_remainder_) + begin + begin + tf.add_input(desc, input_dataset_) + end + begin + tf.add_input(desc, other_arguments_) + end + begin + tf.add_input(desc, batch_size_) + end + begin + tf.add_input(desc, num_parallel_calls_) + end + begin + tf.add_input(desc, drop_remainder_) + end + end + begin + begin + if f !== nothing + desc["f"] = Base.identity(f) + end + end + begin + if Targuments !== nothing + desc["Targuments"] = map(Base.identity, Targuments) + end + end + begin + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + end + begin + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + begin + if preserve_cardinality !== nothing + desc["preserve_cardinality"] = Base.Bool(preserve_cardinality) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(experimental_map_and_batch_dataset, [input_dataset_, other_arguments_, batch_size_, num_parallel_calls_, drop_remainder_], name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, preserve_cardinality=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_map_and_batch_dataset(input_dataset_, other_arguments_, batch_size_, num_parallel_calls_, drop_remainder_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, preserve_cardinality=nothing) + if tf.in_eager_mode() + experimental_map_and_batch_dataset_eager(input_dataset_, other_arguments_, batch_size_, num_parallel_calls_, drop_remainder_; name=name, f=f, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes, preserve_cardinality=preserve_cardinality) + else + experimental_map_and_batch_dataset_graph(input_dataset_, other_arguments_, batch_size_, num_parallel_calls_, drop_remainder_; name=name, f=f, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes, preserve_cardinality=preserve_cardinality) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function experimental_map_and_batch_dataset_eager(input_dataset_, other_arguments_, batch_size_, num_parallel_calls_, drop_remainder_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, preserve_cardinality=nothing) - desc = tf.EagerOp("ExperimentalMapAndBatchDataset") - input_dataset_ = convert(tf.EagerTensor, input_dataset_) - other_arguments_ = convert(tf.EagerTensor, other_arguments_) - batch_size_ = convert(tf.EagerTensor, batch_size_) - num_parallel_calls_ = convert(tf.EagerTensor, num_parallel_calls_) - drop_remainder_ = convert(tf.EagerTensor, drop_remainder_) - tf.add_input(desc, input_dataset_) - tf.add_input(desc, other_arguments_) - tf.add_input(desc, batch_size_) - tf.add_input(desc, num_parallel_calls_) - tf.add_input(desc, drop_remainder_) - if f !== nothing - desc["f"] = Base.identity(f) - end - if Targuments !== nothing - desc["Targuments"] = map(Base.identity, Targuments) - end - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - if preserve_cardinality !== nothing - desc["preserve_cardinality"] = Base.Bool(preserve_cardinality) - end - res = tf.execute(desc) - node = tf.TapeNode(experimental_map_and_batch_dataset, [input_dataset_, other_arguments_, batch_size_, num_parallel_calls_, drop_remainder_], name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, preserve_cardinality=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_map_and_batch_dataset(input_dataset_, other_arguments_, batch_size_, num_parallel_calls_, drop_remainder_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, preserve_cardinality=nothing) - if tf.in_eager_mode() - experimental_map_and_batch_dataset_eager(input_dataset_, other_arguments_, batch_size_, num_parallel_calls_, drop_remainder_; name=name, f=f, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes, preserve_cardinality=preserve_cardinality) - else - experimental_map_and_batch_dataset_graph(input_dataset_, other_arguments_, batch_size_, num_parallel_calls_, drop_remainder_; name=name, f=f, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes, preserve_cardinality=preserve_cardinality) - end - end end @@ -16621,40 +30088,78 @@ end """ begin - function real_div_graph(x_, y_; name=nothing) - local desc - tf.with_op_name(name, "RealDiv") do - desc = tf.NodeDescription("RealDiv") - x_ = convert(Tensor{Any}, x_) - y_ = convert(Tensor{Any}, y_) - (x_, y_) = tf.tf_promote(x_, y_) - tf.add_input(desc, x_) - tf.add_input(desc, y_) + begin + function real_div_graph(x_, y_; name=nothing) + local desc + tf.with_op_name(name, "RealDiv") do + desc = tf.NodeDescription("RealDiv") + begin + begin + x_ = convert(Tensor{Any}, x_) + begin + end + end + begin + y_ = convert(Tensor{Any}, y_) + begin + end + end + begin + (x_, y_) = tf.tf_promote(x_, y_) + end + end + begin + begin + tf.add_input(desc, x_) + end + begin + tf.add_input(desc, y_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function real_div_eager(x_, y_; name=nothing) - desc = tf.EagerOp("RealDiv") - x_ = convert(tf.EagerTensor, x_) - y_ = convert(tf.EagerTensor, y_) - tf.add_input(desc, x_) - tf.add_input(desc, y_) - desc["T"] = tf.data_type(x_) - desc["T"] = tf.data_type(y_) - res = tf.execute(desc) - node = tf.TapeNode(real_div, [x_, y_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function real_div_eager(x_, y_; name=nothing) + desc = tf.EagerOp("RealDiv") + x_ = convert(tf.EagerTensor, x_) + y_ = convert(tf.EagerTensor, y_) + begin + begin + tf.add_input(desc, x_) + end + begin + tf.add_input(desc, y_) + end + end + begin + end + begin + desc["T"] = tf.data_type(x_) + end + begin + desc["T"] = tf.data_type(y_) + end + res = tf.execute(desc) + node = tf.TapeNode(real_div, [x_, y_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function real_div(x_, y_; name=nothing) - if tf.in_eager_mode() - real_div_eager(x_, y_; name=name) - else - real_div_graph(x_, y_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function real_div(x_, y_; name=nothing) + if tf.in_eager_mode() + real_div_eager(x_, y_; name=name) + else + real_div_graph(x_, y_; name=name) + end end - end + end end @@ -16664,53 +30169,101 @@ end """ begin - function restore_slice_graph(file_pattern_, tensor_name_, shape_and_slice_; name=nothing, dt=nothing, preferred_shard=nothing) - local desc - tf.with_op_name(name, "RestoreSlice") do - desc = tf.NodeDescription("RestoreSlice") - file_pattern_ = convert(Tensor{String}, file_pattern_) - tensor_name_ = convert(Tensor{String}, tensor_name_) - shape_and_slice_ = convert(Tensor{String}, shape_and_slice_) - tf.add_input(desc, file_pattern_) - tf.add_input(desc, tensor_name_) - tf.add_input(desc, shape_and_slice_) - if dt !== nothing - desc["dt"] = Base.identity(dt) + begin + function restore_slice_graph(file_pattern_, tensor_name_, shape_and_slice_; name=nothing, dt=nothing, preferred_shard=nothing) + local desc + tf.with_op_name(name, "RestoreSlice") do + desc = tf.NodeDescription("RestoreSlice") + begin + begin + file_pattern_ = convert(Tensor{String}, file_pattern_) + begin + end + end + begin + tensor_name_ = convert(Tensor{String}, tensor_name_) + begin + end + end + begin + shape_and_slice_ = convert(Tensor{String}, shape_and_slice_) + begin + end + end + end + begin + begin + tf.add_input(desc, file_pattern_) + end + begin + tf.add_input(desc, tensor_name_) + end + begin + tf.add_input(desc, shape_and_slice_) + end + end + begin + begin + if dt !== nothing + desc["dt"] = Base.identity(dt) + end + end + begin + if preferred_shard !== nothing + desc["preferred_shard"] = Base.Int(preferred_shard) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function restore_slice_eager(file_pattern_, tensor_name_, shape_and_slice_; name=nothing, dt=nothing, preferred_shard=nothing) + desc = tf.EagerOp("RestoreSlice") + file_pattern_ = convert(tf.EagerTensor, file_pattern_) + tensor_name_ = convert(tf.EagerTensor, tensor_name_) + shape_and_slice_ = convert(tf.EagerTensor, shape_and_slice_) + begin + begin + tf.add_input(desc, file_pattern_) + end + begin + tf.add_input(desc, tensor_name_) + end + begin + tf.add_input(desc, shape_and_slice_) + end + end + begin + begin + if dt !== nothing + desc["dt"] = Base.identity(dt) + end + end + begin + if preferred_shard !== nothing + desc["preferred_shard"] = Base.Int(preferred_shard) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(restore_slice, [file_pattern_, tensor_name_, shape_and_slice_], name=nothing, dt=nothing, preferred_shard=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function restore_slice(file_pattern_, tensor_name_, shape_and_slice_; name=nothing, dt=nothing, preferred_shard=nothing) + if tf.in_eager_mode() + restore_slice_eager(file_pattern_, tensor_name_, shape_and_slice_; name=name, dt=dt, preferred_shard=preferred_shard) + else + restore_slice_graph(file_pattern_, tensor_name_, shape_and_slice_; name=name, dt=dt, preferred_shard=preferred_shard) + end end - if preferred_shard !== nothing - desc["preferred_shard"] = Base.Int(preferred_shard) - end - end - tf.Tensor(tf.Operation(desc)) - end - function restore_slice_eager(file_pattern_, tensor_name_, shape_and_slice_; name=nothing, dt=nothing, preferred_shard=nothing) - desc = tf.EagerOp("RestoreSlice") - file_pattern_ = convert(tf.EagerTensor, file_pattern_) - tensor_name_ = convert(tf.EagerTensor, tensor_name_) - shape_and_slice_ = convert(tf.EagerTensor, shape_and_slice_) - tf.add_input(desc, file_pattern_) - tf.add_input(desc, tensor_name_) - tf.add_input(desc, shape_and_slice_) - if dt !== nothing - desc["dt"] = Base.identity(dt) - end - if preferred_shard !== nothing - desc["preferred_shard"] = Base.Int(preferred_shard) - end - res = tf.execute(desc) - node = tf.TapeNode(restore_slice, [file_pattern_, tensor_name_, shape_and_slice_], name=nothing, dt=nothing, preferred_shard=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function restore_slice(file_pattern_, tensor_name_, shape_and_slice_; name=nothing, dt=nothing, preferred_shard=nothing) - if tf.in_eager_mode() - restore_slice_eager(file_pattern_, tensor_name_, shape_and_slice_; name=name, dt=dt, preferred_shard=preferred_shard) - else - restore_slice_graph(file_pattern_, tensor_name_, shape_and_slice_; name=name, dt=dt, preferred_shard=preferred_shard) - end - end end @@ -16720,39 +30273,67 @@ end """ begin - function stack_pop_v2_graph(handle_; name=nothing, elem_type=nothing) - local desc - tf.with_op_name(name, "StackPopV2") do - desc = tf.NodeDescription("StackPopV2") - handle_ = convert(Tensor{Any}, handle_) - tf.add_input(desc, handle_) - if elem_type !== nothing - desc["elem_type"] = Base.identity(elem_type) + begin + function stack_pop_v2_graph(handle_; name=nothing, elem_type=nothing) + local desc + tf.with_op_name(name, "StackPopV2") do + desc = tf.NodeDescription("StackPopV2") + begin + begin + handle_ = convert(Tensor{Any}, handle_) + begin + end + end + end + begin + begin + tf.add_input(desc, handle_) + end + end + begin + begin + if elem_type !== nothing + desc["elem_type"] = Base.identity(elem_type) + end + end + end end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function stack_pop_v2_eager(handle_; name=nothing, elem_type=nothing) - desc = tf.EagerOp("StackPopV2") - handle_ = convert(tf.EagerTensor, handle_) - tf.add_input(desc, handle_) - if elem_type !== nothing - desc["elem_type"] = Base.identity(elem_type) - end - res = tf.execute(desc) - node = tf.TapeNode(stack_pop_v2, [handle_], name=nothing, elem_type=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function stack_pop_v2_eager(handle_; name=nothing, elem_type=nothing) + desc = tf.EagerOp("StackPopV2") + handle_ = convert(tf.EagerTensor, handle_) + begin + begin + tf.add_input(desc, handle_) + end + end + begin + begin + if elem_type !== nothing + desc["elem_type"] = Base.identity(elem_type) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(stack_pop_v2, [handle_], name=nothing, elem_type=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function stack_pop_v2(handle_; name=nothing, elem_type=nothing) - if tf.in_eager_mode() - stack_pop_v2_eager(handle_; name=name, elem_type=elem_type) - else - stack_pop_v2_graph(handle_; name=name, elem_type=elem_type) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function stack_pop_v2(handle_; name=nothing, elem_type=nothing) + if tf.in_eager_mode() + stack_pop_v2_eager(handle_; name=name, elem_type=elem_type) + else + stack_pop_v2_graph(handle_; name=name, elem_type=elem_type) + end end - end + end end @@ -16762,39 +30343,75 @@ end """ begin - function reverse_graph(tensor_, dims_; name=nothing) - local desc - tf.with_op_name(name, "Reverse") do - desc = tf.NodeDescription("Reverse") - tensor_ = convert(Tensor{Any}, tensor_) - dims_ = convert(Tensor{Bool}, dims_) - (tensor_,) = tf.tf_promote(tensor_) - tf.add_input(desc, tensor_) - tf.add_input(desc, dims_) + begin + function reverse_graph(tensor_, dims_; name=nothing) + local desc + tf.with_op_name(name, "Reverse") do + desc = tf.NodeDescription("Reverse") + begin + begin + tensor_ = convert(Tensor{Any}, tensor_) + begin + end + end + begin + dims_ = convert(Tensor{Bool}, dims_) + begin + end + end + begin + (tensor_,) = tf.tf_promote(tensor_) + end + end + begin + begin + tf.add_input(desc, tensor_) + end + begin + tf.add_input(desc, dims_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function reverse_eager(tensor_, dims_; name=nothing) - desc = tf.EagerOp("Reverse") - tensor_ = convert(tf.EagerTensor, tensor_) - dims_ = convert(tf.EagerTensor, dims_) - tf.add_input(desc, tensor_) - tf.add_input(desc, dims_) - desc["T"] = tf.data_type(tensor_) - res = tf.execute(desc) - node = tf.TapeNode(reverse, [tensor_, dims_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function reverse_eager(tensor_, dims_; name=nothing) + desc = tf.EagerOp("Reverse") + tensor_ = convert(tf.EagerTensor, tensor_) + dims_ = convert(tf.EagerTensor, dims_) + begin + begin + tf.add_input(desc, tensor_) + end + begin + tf.add_input(desc, dims_) + end + end + begin + end + begin + desc["T"] = tf.data_type(tensor_) + end + res = tf.execute(desc) + node = tf.TapeNode(reverse, [tensor_, dims_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function reverse(tensor_, dims_; name=nothing) - if tf.in_eager_mode() - reverse_eager(tensor_, dims_; name=name) - else - reverse_graph(tensor_, dims_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function reverse(tensor_, dims_; name=nothing) + if tf.in_eager_mode() + reverse_eager(tensor_, dims_; name=name) + else + reverse_graph(tensor_, dims_; name=name) + end end - end + end end @@ -16804,45 +30421,77 @@ end """ begin - function decode_png_graph(contents_; name=nothing, channels=nothing, dtype=nothing) - local desc - tf.with_op_name(name, "DecodePng") do - desc = tf.NodeDescription("DecodePng") - contents_ = convert(Tensor{String}, contents_) - tf.add_input(desc, contents_) - if channels !== nothing - desc["channels"] = Base.Int(channels) - end - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) + begin + function decode_png_graph(contents_; name=nothing, channels=nothing, dtype=nothing) + local desc + tf.with_op_name(name, "DecodePng") do + desc = tf.NodeDescription("DecodePng") + begin + begin + contents_ = convert(Tensor{String}, contents_) + begin + end + end + end + begin + begin + tf.add_input(desc, contents_) + end + end + begin + begin + if channels !== nothing + desc["channels"] = Base.Int(channels) + end + end + begin + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function decode_png_eager(contents_; name=nothing, channels=nothing, dtype=nothing) + desc = tf.EagerOp("DecodePng") + contents_ = convert(tf.EagerTensor, contents_) + begin + begin + tf.add_input(desc, contents_) + end + end + begin + begin + if channels !== nothing + desc["channels"] = Base.Int(channels) + end + end + begin + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(decode_png, [contents_], name=nothing, channels=nothing, dtype=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function decode_png(contents_; name=nothing, channels=nothing, dtype=nothing) + if tf.in_eager_mode() + decode_png_eager(contents_; name=name, channels=channels, dtype=dtype) + else + decode_png_graph(contents_; name=name, channels=channels, dtype=dtype) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function decode_png_eager(contents_; name=nothing, channels=nothing, dtype=nothing) - desc = tf.EagerOp("DecodePng") - contents_ = convert(tf.EagerTensor, contents_) - tf.add_input(desc, contents_) - if channels !== nothing - desc["channels"] = Base.Int(channels) - end - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end - res = tf.execute(desc) - node = tf.TapeNode(decode_png, [contents_], name=nothing, channels=nothing, dtype=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function decode_png(contents_; name=nothing, channels=nothing, dtype=nothing) - if tf.in_eager_mode() - decode_png_eager(contents_; name=name, channels=channels, dtype=dtype) - else - decode_png_graph(contents_; name=name, channels=channels, dtype=dtype) - end - end end @@ -16852,48 +30501,102 @@ end """ begin - function non_max_suppression_v2_graph(boxes_, scores_, max_output_size_, iou_threshold_; name=nothing) - local desc - tf.with_op_name(name, "NonMaxSuppressionV2") do - desc = tf.NodeDescription("NonMaxSuppressionV2") - boxes_ = convert(Tensor{Float32}, boxes_) - scores_ = convert(Tensor{Float32}, scores_) - max_output_size_ = convert(Tensor{Int32}, max_output_size_) - iou_threshold_ = convert(Tensor{Float32}, iou_threshold_) - (boxes_, scores_) = tf.tf_promote(boxes_, scores_) - tf.add_input(desc, boxes_) - tf.add_input(desc, scores_) - tf.add_input(desc, max_output_size_) - tf.add_input(desc, iou_threshold_) - end - tf.Tensor(tf.Operation(desc)) - end - function non_max_suppression_v2_eager(boxes_, scores_, max_output_size_, iou_threshold_; name=nothing) - desc = tf.EagerOp("NonMaxSuppressionV2") - boxes_ = convert(tf.EagerTensor, boxes_) - scores_ = convert(tf.EagerTensor, scores_) - max_output_size_ = convert(tf.EagerTensor, max_output_size_) - iou_threshold_ = convert(tf.EagerTensor, iou_threshold_) - tf.add_input(desc, boxes_) - tf.add_input(desc, scores_) - tf.add_input(desc, max_output_size_) - tf.add_input(desc, iou_threshold_) - desc["T"] = tf.data_type(boxes_) - desc["T"] = tf.data_type(scores_) - res = tf.execute(desc) - node = tf.TapeNode(non_max_suppression_v2, [boxes_, scores_, max_output_size_, iou_threshold_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function non_max_suppression_v2(boxes_, scores_, max_output_size_, iou_threshold_; name=nothing) - if tf.in_eager_mode() - non_max_suppression_v2_eager(boxes_, scores_, max_output_size_, iou_threshold_; name=name) - else - non_max_suppression_v2_graph(boxes_, scores_, max_output_size_, iou_threshold_; name=name) + begin + function non_max_suppression_v2_graph(boxes_, scores_, max_output_size_, iou_threshold_; name=nothing) + local desc + tf.with_op_name(name, "NonMaxSuppressionV2") do + desc = tf.NodeDescription("NonMaxSuppressionV2") + begin + begin + boxes_ = convert(Tensor{Float32}, boxes_) + begin + end + end + begin + scores_ = convert(Tensor{Float32}, scores_) + begin + end + end + begin + max_output_size_ = convert(Tensor{Int32}, max_output_size_) + begin + end + end + begin + iou_threshold_ = convert(Tensor{Float32}, iou_threshold_) + begin + end + end + begin + (boxes_, scores_) = tf.tf_promote(boxes_, scores_) + end + end + begin + begin + tf.add_input(desc, boxes_) + end + begin + tf.add_input(desc, scores_) + end + begin + tf.add_input(desc, max_output_size_) + end + begin + tf.add_input(desc, iou_threshold_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function non_max_suppression_v2_eager(boxes_, scores_, max_output_size_, iou_threshold_; name=nothing) + desc = tf.EagerOp("NonMaxSuppressionV2") + boxes_ = convert(tf.EagerTensor, boxes_) + scores_ = convert(tf.EagerTensor, scores_) + max_output_size_ = convert(tf.EagerTensor, max_output_size_) + iou_threshold_ = convert(tf.EagerTensor, iou_threshold_) + begin + begin + tf.add_input(desc, boxes_) + end + begin + tf.add_input(desc, scores_) + end + begin + tf.add_input(desc, max_output_size_) + end + begin + tf.add_input(desc, iou_threshold_) + end + end + begin + end + begin + desc["T"] = tf.data_type(boxes_) + end + begin + desc["T"] = tf.data_type(scores_) + end + res = tf.execute(desc) + node = tf.TapeNode(non_max_suppression_v2, [boxes_, scores_, max_output_size_, iou_threshold_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function non_max_suppression_v2(boxes_, scores_, max_output_size_, iou_threshold_; name=nothing) + if tf.in_eager_mode() + non_max_suppression_v2_eager(boxes_, scores_, max_output_size_, iou_threshold_; name=name) + else + non_max_suppression_v2_graph(boxes_, scores_, max_output_size_, iou_threshold_; name=name) + end end - end + end end @@ -16903,40 +30606,78 @@ end """ begin - function igamma_graph(a_, x_; name=nothing) - local desc - tf.with_op_name(name, "Igamma") do - desc = tf.NodeDescription("Igamma") - a_ = convert(Tensor{Any}, a_) - x_ = convert(Tensor{Any}, x_) - (a_, x_) = tf.tf_promote(a_, x_) - tf.add_input(desc, a_) - tf.add_input(desc, x_) + begin + function igamma_graph(a_, x_; name=nothing) + local desc + tf.with_op_name(name, "Igamma") do + desc = tf.NodeDescription("Igamma") + begin + begin + a_ = convert(Tensor{Any}, a_) + begin + end + end + begin + x_ = convert(Tensor{Any}, x_) + begin + end + end + begin + (a_, x_) = tf.tf_promote(a_, x_) + end + end + begin + begin + tf.add_input(desc, a_) + end + begin + tf.add_input(desc, x_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function igamma_eager(a_, x_; name=nothing) - desc = tf.EagerOp("Igamma") - a_ = convert(tf.EagerTensor, a_) - x_ = convert(tf.EagerTensor, x_) - tf.add_input(desc, a_) - tf.add_input(desc, x_) - desc["T"] = tf.data_type(a_) - desc["T"] = tf.data_type(x_) - res = tf.execute(desc) - node = tf.TapeNode(igamma, [a_, x_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function igamma_eager(a_, x_; name=nothing) + desc = tf.EagerOp("Igamma") + a_ = convert(tf.EagerTensor, a_) + x_ = convert(tf.EagerTensor, x_) + begin + begin + tf.add_input(desc, a_) + end + begin + tf.add_input(desc, x_) + end + end + begin + end + begin + desc["T"] = tf.data_type(a_) + end + begin + desc["T"] = tf.data_type(x_) + end + res = tf.execute(desc) + node = tf.TapeNode(igamma, [a_, x_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function igamma(a_, x_; name=nothing) - if tf.in_eager_mode() - igamma_eager(a_, x_; name=name) - else - igamma_graph(a_, x_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function igamma(a_, x_; name=nothing) + if tf.in_eager_mode() + igamma_eager(a_, x_; name=name) + else + igamma_graph(a_, x_; name=name) + end end - end + end end @@ -16946,35 +30687,63 @@ end """ begin - function digamma_graph(x_; name=nothing) - local desc - tf.with_op_name(name, "Digamma") do - desc = tf.NodeDescription("Digamma") - x_ = convert(Tensor{Any}, x_) - (x_,) = tf.tf_promote(x_) - tf.add_input(desc, x_) + begin + function digamma_graph(x_; name=nothing) + local desc + tf.with_op_name(name, "Digamma") do + desc = tf.NodeDescription("Digamma") + begin + begin + x_ = convert(Tensor{Any}, x_) + begin + end + end + begin + (x_,) = tf.tf_promote(x_) + end + end + begin + begin + tf.add_input(desc, x_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function digamma_eager(x_; name=nothing) - desc = tf.EagerOp("Digamma") - x_ = convert(tf.EagerTensor, x_) - tf.add_input(desc, x_) - desc["T"] = tf.data_type(x_) - res = tf.execute(desc) - node = tf.TapeNode(digamma, [x_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function digamma_eager(x_; name=nothing) + desc = tf.EagerOp("Digamma") + x_ = convert(tf.EagerTensor, x_) + begin + begin + tf.add_input(desc, x_) + end + end + begin + end + begin + desc["T"] = tf.data_type(x_) + end + res = tf.execute(desc) + node = tf.TapeNode(digamma, [x_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function digamma(x_; name=nothing) - if tf.in_eager_mode() - digamma_eager(x_; name=name) - else - digamma_graph(x_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function digamma(x_; name=nothing) + if tf.in_eager_mode() + digamma_eager(x_; name=name) + else + digamma_graph(x_; name=name) + end end - end + end end @@ -16984,78 +30753,184 @@ end """ begin - function resource_apply_ada_max_graph(var_, m_, v_, beta1_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing) - local desc - tf.with_op_name(name, "ResourceApplyAdaMax") do - desc = tf.NodeDescription("ResourceApplyAdaMax") - var_ = convert(Tensor{Any}, var_) - m_ = convert(Tensor{Any}, m_) - v_ = convert(Tensor{Any}, v_) - beta1_power_ = convert(Tensor{Any}, beta1_power_) - lr_ = convert(Tensor{Any}, lr_) - beta1_ = convert(Tensor{Any}, beta1_) - beta2_ = convert(Tensor{Any}, beta2_) - epsilon_ = convert(Tensor{Any}, epsilon_) - grad_ = convert(Tensor{Any}, grad_) - (beta1_power_, lr_, beta1_, beta2_, epsilon_, grad_) = tf.tf_promote(beta1_power_, lr_, beta1_, beta2_, epsilon_, grad_) - tf.add_input(desc, var_) - tf.add_input(desc, m_) - tf.add_input(desc, v_) - tf.add_input(desc, beta1_power_) - tf.add_input(desc, lr_) - tf.add_input(desc, beta1_) - tf.add_input(desc, beta2_) - tf.add_input(desc, epsilon_) - tf.add_input(desc, grad_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - end - tf.Tensor(tf.Operation(desc)) - end - function resource_apply_ada_max_eager(var_, m_, v_, beta1_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing) - desc = tf.EagerOp("ResourceApplyAdaMax") - var_ = convert(tf.EagerTensor, var_) - m_ = convert(tf.EagerTensor, m_) - v_ = convert(tf.EagerTensor, v_) - beta1_power_ = convert(tf.EagerTensor, beta1_power_) - lr_ = convert(tf.EagerTensor, lr_) - beta1_ = convert(tf.EagerTensor, beta1_) - beta2_ = convert(tf.EagerTensor, beta2_) - epsilon_ = convert(tf.EagerTensor, epsilon_) - grad_ = convert(tf.EagerTensor, grad_) - tf.add_input(desc, var_) - tf.add_input(desc, m_) - tf.add_input(desc, v_) - tf.add_input(desc, beta1_power_) - tf.add_input(desc, lr_) - tf.add_input(desc, beta1_) - tf.add_input(desc, beta2_) - tf.add_input(desc, epsilon_) - tf.add_input(desc, grad_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - desc["T"] = tf.data_type(beta1_power_) - desc["T"] = tf.data_type(lr_) - desc["T"] = tf.data_type(beta1_) - desc["T"] = tf.data_type(beta2_) - desc["T"] = tf.data_type(epsilon_) - desc["T"] = tf.data_type(grad_) - res = tf.execute(desc) - node = tf.TapeNode(resource_apply_ada_max, [var_, m_, v_, beta1_power_, lr_, beta1_, beta2_, epsilon_, grad_], name=nothing, use_locking=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_apply_ada_max(var_, m_, v_, beta1_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing) - if tf.in_eager_mode() - resource_apply_ada_max_eager(var_, m_, v_, beta1_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=name, use_locking=use_locking) - else - resource_apply_ada_max_graph(var_, m_, v_, beta1_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=name, use_locking=use_locking) + begin + function resource_apply_ada_max_graph(var_, m_, v_, beta1_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "ResourceApplyAdaMax") do + desc = tf.NodeDescription("ResourceApplyAdaMax") + begin + begin + var_ = convert(Tensor{Any}, var_) + begin + end + end + begin + m_ = convert(Tensor{Any}, m_) + begin + end + end + begin + v_ = convert(Tensor{Any}, v_) + begin + end + end + begin + beta1_power_ = convert(Tensor{Any}, beta1_power_) + begin + end + end + begin + lr_ = convert(Tensor{Any}, lr_) + begin + end + end + begin + beta1_ = convert(Tensor{Any}, beta1_) + begin + end + end + begin + beta2_ = convert(Tensor{Any}, beta2_) + begin + end + end + begin + epsilon_ = convert(Tensor{Any}, epsilon_) + begin + end + end + begin + grad_ = convert(Tensor{Any}, grad_) + begin + end + end + begin + (beta1_power_, lr_, beta1_, beta2_, epsilon_, grad_) = tf.tf_promote(beta1_power_, lr_, beta1_, beta2_, epsilon_, grad_) + end + end + begin + begin + tf.add_input(desc, var_) + end + begin + tf.add_input(desc, m_) + end + begin + tf.add_input(desc, v_) + end + begin + tf.add_input(desc, beta1_power_) + end + begin + tf.add_input(desc, lr_) + end + begin + tf.add_input(desc, beta1_) + end + begin + tf.add_input(desc, beta2_) + end + begin + tf.add_input(desc, epsilon_) + end + begin + tf.add_input(desc, grad_) + end + end + begin + begin + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function resource_apply_ada_max_eager(var_, m_, v_, beta1_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing) + desc = tf.EagerOp("ResourceApplyAdaMax") + var_ = convert(tf.EagerTensor, var_) + m_ = convert(tf.EagerTensor, m_) + v_ = convert(tf.EagerTensor, v_) + beta1_power_ = convert(tf.EagerTensor, beta1_power_) + lr_ = convert(tf.EagerTensor, lr_) + beta1_ = convert(tf.EagerTensor, beta1_) + beta2_ = convert(tf.EagerTensor, beta2_) + epsilon_ = convert(tf.EagerTensor, epsilon_) + grad_ = convert(tf.EagerTensor, grad_) + begin + begin + tf.add_input(desc, var_) + end + begin + tf.add_input(desc, m_) + end + begin + tf.add_input(desc, v_) + end + begin + tf.add_input(desc, beta1_power_) + end + begin + tf.add_input(desc, lr_) + end + begin + tf.add_input(desc, beta1_) + end + begin + tf.add_input(desc, beta2_) + end + begin + tf.add_input(desc, epsilon_) + end + begin + tf.add_input(desc, grad_) + end + end + begin + begin + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + end + begin + desc["T"] = tf.data_type(beta1_power_) + end + begin + desc["T"] = tf.data_type(lr_) + end + begin + desc["T"] = tf.data_type(beta1_) + end + begin + desc["T"] = tf.data_type(beta2_) + end + begin + desc["T"] = tf.data_type(epsilon_) + end + begin + desc["T"] = tf.data_type(grad_) + end + res = tf.execute(desc) + node = tf.TapeNode(resource_apply_ada_max, [var_, m_, v_, beta1_power_, lr_, beta1_, beta2_, epsilon_, grad_], name=nothing, use_locking=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_apply_ada_max(var_, m_, v_, beta1_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing) + if tf.in_eager_mode() + resource_apply_ada_max_eager(var_, m_, v_, beta1_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=name, use_locking=use_locking) + else + resource_apply_ada_max_graph(var_, m_, v_, beta1_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=name, use_locking=use_locking) + end end - end + end end @@ -17065,47 +30940,83 @@ end """ begin - function space_to_depth_graph(input_; name=nothing, block_size=nothing, data_format=nothing) - local desc - tf.with_op_name(name, "SpaceToDepth") do - desc = tf.NodeDescription("SpaceToDepth") - input_ = convert(Tensor{Any}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - if block_size !== nothing - desc["block_size"] = Base.Int(block_size) + begin + function space_to_depth_graph(input_; name=nothing, block_size=nothing, data_format=nothing) + local desc + tf.with_op_name(name, "SpaceToDepth") do + desc = tf.NodeDescription("SpaceToDepth") + begin + begin + input_ = convert(Tensor{Any}, input_) + begin + end + end + begin + (input_,) = tf.tf_promote(input_) + end + end + begin + begin + tf.add_input(desc, input_) + end + end + begin + begin + if block_size !== nothing + desc["block_size"] = Base.Int(block_size) + end + end + begin + if data_format !== nothing + desc["data_format"] = Base.String(data_format) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function space_to_depth_eager(input_; name=nothing, block_size=nothing, data_format=nothing) + desc = tf.EagerOp("SpaceToDepth") + input_ = convert(tf.EagerTensor, input_) + begin + begin + tf.add_input(desc, input_) + end + end + begin + begin + if block_size !== nothing + desc["block_size"] = Base.Int(block_size) + end + end + begin + if data_format !== nothing + desc["data_format"] = Base.String(data_format) + end + end + end + begin + desc["T"] = tf.data_type(input_) + end + res = tf.execute(desc) + node = tf.TapeNode(space_to_depth, [input_], name=nothing, block_size=nothing, data_format=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function space_to_depth(input_; name=nothing, block_size=nothing, data_format=nothing) + if tf.in_eager_mode() + space_to_depth_eager(input_; name=name, block_size=block_size, data_format=data_format) + else + space_to_depth_graph(input_; name=name, block_size=block_size, data_format=data_format) + end end - if data_format !== nothing - desc["data_format"] = Base.String(data_format) - end - end - tf.Tensor(tf.Operation(desc)) end - function space_to_depth_eager(input_; name=nothing, block_size=nothing, data_format=nothing) - desc = tf.EagerOp("SpaceToDepth") - input_ = convert(tf.EagerTensor, input_) - tf.add_input(desc, input_) - if block_size !== nothing - desc["block_size"] = Base.Int(block_size) - end - if data_format !== nothing - desc["data_format"] = Base.String(data_format) - end - desc["T"] = tf.data_type(input_) - res = tf.execute(desc) - node = tf.TapeNode(space_to_depth, [input_], name=nothing, block_size=nothing, data_format=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function space_to_depth(input_; name=nothing, block_size=nothing, data_format=nothing) - if tf.in_eager_mode() - space_to_depth_eager(input_; name=name, block_size=block_size, data_format=data_format) - else - space_to_depth_graph(input_; name=name, block_size=block_size, data_format=data_format) - end - end end @@ -17115,40 +31026,78 @@ end """ begin - function sqrt_grad_graph(y_, dy_; name=nothing) - local desc - tf.with_op_name(name, "SqrtGrad") do - desc = tf.NodeDescription("SqrtGrad") - y_ = convert(Tensor{Any}, y_) - dy_ = convert(Tensor{Any}, dy_) - (y_, dy_) = tf.tf_promote(y_, dy_) - tf.add_input(desc, y_) - tf.add_input(desc, dy_) + begin + function sqrt_grad_graph(y_, dy_; name=nothing) + local desc + tf.with_op_name(name, "SqrtGrad") do + desc = tf.NodeDescription("SqrtGrad") + begin + begin + y_ = convert(Tensor{Any}, y_) + begin + end + end + begin + dy_ = convert(Tensor{Any}, dy_) + begin + end + end + begin + (y_, dy_) = tf.tf_promote(y_, dy_) + end + end + begin + begin + tf.add_input(desc, y_) + end + begin + tf.add_input(desc, dy_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function sqrt_grad_eager(y_, dy_; name=nothing) - desc = tf.EagerOp("SqrtGrad") - y_ = convert(tf.EagerTensor, y_) - dy_ = convert(tf.EagerTensor, dy_) - tf.add_input(desc, y_) - tf.add_input(desc, dy_) - desc["T"] = tf.data_type(y_) - desc["T"] = tf.data_type(dy_) - res = tf.execute(desc) - node = tf.TapeNode(sqrt_grad, [y_, dy_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function sqrt_grad_eager(y_, dy_; name=nothing) + desc = tf.EagerOp("SqrtGrad") + y_ = convert(tf.EagerTensor, y_) + dy_ = convert(tf.EagerTensor, dy_) + begin + begin + tf.add_input(desc, y_) + end + begin + tf.add_input(desc, dy_) + end + end + begin + end + begin + desc["T"] = tf.data_type(y_) + end + begin + desc["T"] = tf.data_type(dy_) + end + res = tf.execute(desc) + node = tf.TapeNode(sqrt_grad, [y_, dy_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sqrt_grad(y_, dy_; name=nothing) - if tf.in_eager_mode() - sqrt_grad_eager(y_, dy_; name=name) - else - sqrt_grad_graph(y_, dy_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sqrt_grad(y_, dy_; name=nothing) + if tf.in_eager_mode() + sqrt_grad_eager(y_, dy_; name=name) + else + sqrt_grad_graph(y_, dy_; name=name) + end end - end + end end @@ -17158,67 +31107,119 @@ end """ begin - function map_unstage_graph(key_, indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) - local desc - tf.with_op_name(name, "MapUnstage") do - desc = tf.NodeDescription("MapUnstage") - key_ = convert(Tensor{Int64}, key_) - indices_ = convert(Tensor{Int32}, indices_) - tf.add_input(desc, key_) - tf.add_input(desc, indices_) - if capacity !== nothing - desc["capacity"] = Base.Int(capacity) - end - if memory_limit !== nothing - desc["memory_limit"] = Base.Int(memory_limit) - end - if dtypes !== nothing - desc["dtypes"] = map(Base.identity, dtypes) + begin + function map_unstage_graph(key_, indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "MapUnstage") do + desc = tf.NodeDescription("MapUnstage") + begin + begin + key_ = convert(Tensor{Int64}, key_) + begin + end + end + begin + indices_ = convert(Tensor{Int32}, indices_) + begin + end + end + end + begin + begin + tf.add_input(desc, key_) + end + begin + tf.add_input(desc, indices_) + end + end + begin + begin + if capacity !== nothing + desc["capacity"] = Base.Int(capacity) + end + end + begin + if memory_limit !== nothing + desc["memory_limit"] = Base.Int(memory_limit) + end + end + begin + if dtypes !== nothing + desc["dtypes"] = map(Base.identity, dtypes) + end + end + begin + if container !== nothing + desc["container"] = Base.String(container) + end + end + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function map_unstage_eager(key_, indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + desc = tf.EagerOp("MapUnstage") + key_ = convert(tf.EagerTensor, key_) + indices_ = convert(tf.EagerTensor, indices_) + begin + begin + tf.add_input(desc, key_) + end + begin + tf.add_input(desc, indices_) + end + end + begin + begin + if capacity !== nothing + desc["capacity"] = Base.Int(capacity) + end + end + begin + if memory_limit !== nothing + desc["memory_limit"] = Base.Int(memory_limit) + end + end + begin + if dtypes !== nothing + desc["dtypes"] = map(Base.identity, dtypes) + end + end + begin + if container !== nothing + desc["container"] = Base.String(container) + end + end + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(map_unstage, [key_, indices_], name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function map_unstage(key_, indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + if tf.in_eager_mode() + map_unstage_eager(key_, indices_; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) + else + map_unstage_graph(key_, indices_; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) + end end - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - end - tf.Tensor(tf.Operation(desc)) - end - function map_unstage_eager(key_, indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) - desc = tf.EagerOp("MapUnstage") - key_ = convert(tf.EagerTensor, key_) - indices_ = convert(tf.EagerTensor, indices_) - tf.add_input(desc, key_) - tf.add_input(desc, indices_) - if capacity !== nothing - desc["capacity"] = Base.Int(capacity) - end - if memory_limit !== nothing - desc["memory_limit"] = Base.Int(memory_limit) - end - if dtypes !== nothing - desc["dtypes"] = map(Base.identity, dtypes) - end - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - res = tf.execute(desc) - node = tf.TapeNode(map_unstage, [key_, indices_], name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function map_unstage(key_, indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) - if tf.in_eager_mode() - map_unstage_eager(key_, indices_; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) - else - map_unstage_graph(key_, indices_; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) - end - end end @@ -17228,46 +31229,80 @@ end """ begin - function qr_graph(input_; name=nothing, full_matrices=nothing) - local desc - tf.with_op_name(name, "Qr") do - desc = tf.NodeDescription("Qr") - input_ = convert(Tensor{Any}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - if full_matrices !== nothing - desc["full_matrices"] = Base.Bool(full_matrices) + begin + function qr_graph(input_; name=nothing, full_matrices=nothing) + local desc + tf.with_op_name(name, "Qr") do + desc = tf.NodeDescription("Qr") + begin + begin + input_ = convert(Tensor{Any}, input_) + begin + end + end + begin + (input_,) = tf.tf_promote(input_) + end + end + begin + begin + tf.add_input(desc, input_) + end + end + begin + begin + if full_matrices !== nothing + desc["full_matrices"] = Base.Bool(full_matrices) + end + end + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out end end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:2 - push!(out, tf.Tensor(op, out_idx)) - end - out end - function qr_eager(input_; name=nothing, full_matrices=nothing) - desc = tf.EagerOp("Qr") - input_ = convert(tf.EagerTensor, input_) - tf.add_input(desc, input_) - if full_matrices !== nothing - desc["full_matrices"] = Base.Bool(full_matrices) - end - desc["T"] = tf.data_type(input_) - res = tf.execute(desc) - node = tf.TapeNode(qr, [input_], name=nothing, full_matrices=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res + begin + function qr_eager(input_; name=nothing, full_matrices=nothing) + desc = tf.EagerOp("Qr") + input_ = convert(tf.EagerTensor, input_) + begin + begin + tf.add_input(desc, input_) + end + end + begin + begin + if full_matrices !== nothing + desc["full_matrices"] = Base.Bool(full_matrices) + end + end + end + begin + desc["T"] = tf.data_type(input_) + end + res = tf.execute(desc) + node = tf.TapeNode(qr, [input_], name=nothing, full_matrices=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function qr(input_; name=nothing, full_matrices=nothing) - if tf.in_eager_mode() - qr_eager(input_; name=name, full_matrices=full_matrices) - else - qr_graph(input_; name=name, full_matrices=full_matrices) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function qr(input_; name=nothing, full_matrices=nothing) + if tf.in_eager_mode() + qr_eager(input_; name=name, full_matrices=full_matrices) + else + qr_graph(input_; name=name, full_matrices=full_matrices) + end end - end + end end @@ -17277,70 +31312,144 @@ end """ begin - function boosted_trees_calculate_best_gains_per_feature_graph(node_id_range_, stats_summary_list_, l1_, l2_, tree_complexity_, min_node_weight_; name=nothing, max_splits=nothing, num_features=nothing) - local desc - tf.with_op_name(name, "BoostedTreesCalculateBestGainsPerFeature") do - desc = tf.NodeDescription("BoostedTreesCalculateBestGainsPerFeature") - node_id_range_ = convert(Tensor{Int32}, node_id_range_) - stats_summary_list_ = [convert(Tensor{Float32}, x) for x = stats_summary_list_] - l1_ = convert(Tensor{Float32}, l1_) - l2_ = convert(Tensor{Float32}, l2_) - tree_complexity_ = convert(Tensor{Float32}, tree_complexity_) - min_node_weight_ = convert(Tensor{Float32}, min_node_weight_) - tf.add_input(desc, node_id_range_) - tf.add_input(desc, stats_summary_list_) - tf.add_input(desc, l1_) - tf.add_input(desc, l2_) - tf.add_input(desc, tree_complexity_) - tf.add_input(desc, min_node_weight_) - if max_splits !== nothing - desc["max_splits"] = Base.Int(max_splits) - end - if num_features !== nothing - desc["num_features"] = Base.Int(num_features) + begin + function boosted_trees_calculate_best_gains_per_feature_graph(node_id_range_, stats_summary_list_, l1_, l2_, tree_complexity_, min_node_weight_; name=nothing, max_splits=nothing, num_features=nothing) + local desc + tf.with_op_name(name, "BoostedTreesCalculateBestGainsPerFeature") do + desc = tf.NodeDescription("BoostedTreesCalculateBestGainsPerFeature") + begin + begin + node_id_range_ = convert(Tensor{Int32}, node_id_range_) + begin + end + end + begin + stats_summary_list_ = [convert(Tensor{Float32}, x) for x = stats_summary_list_] + begin + end + end + begin + l1_ = convert(Tensor{Float32}, l1_) + begin + end + end + begin + l2_ = convert(Tensor{Float32}, l2_) + begin + end + end + begin + tree_complexity_ = convert(Tensor{Float32}, tree_complexity_) + begin + end + end + begin + min_node_weight_ = convert(Tensor{Float32}, min_node_weight_) + begin + end + end + end + begin + begin + tf.add_input(desc, node_id_range_) + end + begin + tf.add_input(desc, stats_summary_list_) + end + begin + tf.add_input(desc, l1_) + end + begin + tf.add_input(desc, l2_) + end + begin + tf.add_input(desc, tree_complexity_) + end + begin + tf.add_input(desc, min_node_weight_) + end + end + begin + begin + if max_splits !== nothing + desc["max_splits"] = Base.Int(max_splits) + end + end + begin + if num_features !== nothing + desc["num_features"] = Base.Int(num_features) + end + end + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:5 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + end + end + begin + function boosted_trees_calculate_best_gains_per_feature_eager(node_id_range_, stats_summary_list_, l1_, l2_, tree_complexity_, min_node_weight_; name=nothing, max_splits=nothing, num_features=nothing) + desc = tf.EagerOp("BoostedTreesCalculateBestGainsPerFeature") + node_id_range_ = convert(tf.EagerTensor, node_id_range_) + stats_summary_list_ = convert(tf.EagerTensor, stats_summary_list_) + l1_ = convert(tf.EagerTensor, l1_) + l2_ = convert(tf.EagerTensor, l2_) + tree_complexity_ = convert(tf.EagerTensor, tree_complexity_) + min_node_weight_ = convert(tf.EagerTensor, min_node_weight_) + begin + begin + tf.add_input(desc, node_id_range_) + end + begin + tf.add_input(desc, stats_summary_list_) + end + begin + tf.add_input(desc, l1_) + end + begin + tf.add_input(desc, l2_) + end + begin + tf.add_input(desc, tree_complexity_) + end + begin + tf.add_input(desc, min_node_weight_) + end + end + begin + begin + if max_splits !== nothing + desc["max_splits"] = Base.Int(max_splits) + end + end + begin + if num_features !== nothing + desc["num_features"] = Base.Int(num_features) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(boosted_trees_calculate_best_gains_per_feature, [node_id_range_, stats_summary_list_, l1_, l2_, tree_complexity_, min_node_weight_], name=nothing, max_splits=nothing, num_features=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function boosted_trees_calculate_best_gains_per_feature(node_id_range_, stats_summary_list_, l1_, l2_, tree_complexity_, min_node_weight_; name=nothing, max_splits=nothing, num_features=nothing) + if tf.in_eager_mode() + boosted_trees_calculate_best_gains_per_feature_eager(node_id_range_, stats_summary_list_, l1_, l2_, tree_complexity_, min_node_weight_; name=name, max_splits=max_splits, num_features=num_features) + else + boosted_trees_calculate_best_gains_per_feature_graph(node_id_range_, stats_summary_list_, l1_, l2_, tree_complexity_, min_node_weight_; name=name, max_splits=max_splits, num_features=num_features) + end end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:5 - push!(out, tf.Tensor(op, out_idx)) - end - out end - function boosted_trees_calculate_best_gains_per_feature_eager(node_id_range_, stats_summary_list_, l1_, l2_, tree_complexity_, min_node_weight_; name=nothing, max_splits=nothing, num_features=nothing) - desc = tf.EagerOp("BoostedTreesCalculateBestGainsPerFeature") - node_id_range_ = convert(tf.EagerTensor, node_id_range_) - stats_summary_list_ = convert(tf.EagerTensor, stats_summary_list_) - l1_ = convert(tf.EagerTensor, l1_) - l2_ = convert(tf.EagerTensor, l2_) - tree_complexity_ = convert(tf.EagerTensor, tree_complexity_) - min_node_weight_ = convert(tf.EagerTensor, min_node_weight_) - tf.add_input(desc, node_id_range_) - tf.add_input(desc, stats_summary_list_) - tf.add_input(desc, l1_) - tf.add_input(desc, l2_) - tf.add_input(desc, tree_complexity_) - tf.add_input(desc, min_node_weight_) - if max_splits !== nothing - desc["max_splits"] = Base.Int(max_splits) - end - if num_features !== nothing - desc["num_features"] = Base.Int(num_features) - end - res = tf.execute(desc) - node = tf.TapeNode(boosted_trees_calculate_best_gains_per_feature, [node_id_range_, stats_summary_list_, l1_, l2_, tree_complexity_, min_node_weight_], name=nothing, max_splits=nothing, num_features=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function boosted_trees_calculate_best_gains_per_feature(node_id_range_, stats_summary_list_, l1_, l2_, tree_complexity_, min_node_weight_; name=nothing, max_splits=nothing, num_features=nothing) - if tf.in_eager_mode() - boosted_trees_calculate_best_gains_per_feature_eager(node_id_range_, stats_summary_list_, l1_, l2_, tree_complexity_, min_node_weight_; name=name, max_splits=max_splits, num_features=num_features) - else - boosted_trees_calculate_best_gains_per_feature_graph(node_id_range_, stats_summary_list_, l1_, l2_, tree_complexity_, min_node_weight_; name=name, max_splits=max_splits, num_features=num_features) - end - end end @@ -17350,60 +31459,122 @@ end """ begin - function unbatch_grad_graph(original_input_, batch_index_, grad_, id_; name=nothing, container=nothing, shared_name=nothing) - local desc - tf.with_op_name(name, "UnbatchGrad") do - desc = tf.NodeDescription("UnbatchGrad") - original_input_ = convert(Tensor{Any}, original_input_) - batch_index_ = convert(Tensor{Int64}, batch_index_) - grad_ = convert(Tensor{Any}, grad_) - id_ = convert(Tensor{Int64}, id_) - (original_input_, grad_) = tf.tf_promote(original_input_, grad_) - tf.add_input(desc, original_input_) - tf.add_input(desc, batch_index_) - tf.add_input(desc, grad_) - tf.add_input(desc, id_) - if container !== nothing - desc["container"] = Base.String(container) + begin + function unbatch_grad_graph(original_input_, batch_index_, grad_, id_; name=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "UnbatchGrad") do + desc = tf.NodeDescription("UnbatchGrad") + begin + begin + original_input_ = convert(Tensor{Any}, original_input_) + begin + end + end + begin + batch_index_ = convert(Tensor{Int64}, batch_index_) + begin + end + end + begin + grad_ = convert(Tensor{Any}, grad_) + begin + end + end + begin + id_ = convert(Tensor{Int64}, id_) + begin + end + end + begin + (original_input_, grad_) = tf.tf_promote(original_input_, grad_) + end + end + begin + begin + tf.add_input(desc, original_input_) + end + begin + tf.add_input(desc, batch_index_) + end + begin + tf.add_input(desc, grad_) + end + begin + tf.add_input(desc, id_) + end + end + begin + begin + if container !== nothing + desc["container"] = Base.String(container) + end + end + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function unbatch_grad_eager(original_input_, batch_index_, grad_, id_; name=nothing, container=nothing, shared_name=nothing) + desc = tf.EagerOp("UnbatchGrad") + original_input_ = convert(tf.EagerTensor, original_input_) + batch_index_ = convert(tf.EagerTensor, batch_index_) + grad_ = convert(tf.EagerTensor, grad_) + id_ = convert(tf.EagerTensor, id_) + begin + begin + tf.add_input(desc, original_input_) + end + begin + tf.add_input(desc, batch_index_) + end + begin + tf.add_input(desc, grad_) + end + begin + tf.add_input(desc, id_) + end + end + begin + begin + if container !== nothing + desc["container"] = Base.String(container) + end + end + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + end + begin + desc["T"] = tf.data_type(original_input_) + end + begin + desc["T"] = tf.data_type(grad_) + end + res = tf.execute(desc) + node = tf.TapeNode(unbatch_grad, [original_input_, batch_index_, grad_, id_], name=nothing, container=nothing, shared_name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function unbatch_grad(original_input_, batch_index_, grad_, id_; name=nothing, container=nothing, shared_name=nothing) + if tf.in_eager_mode() + unbatch_grad_eager(original_input_, batch_index_, grad_, id_; name=name, container=container, shared_name=shared_name) + else + unbatch_grad_graph(original_input_, batch_index_, grad_, id_; name=name, container=container, shared_name=shared_name) + end end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - end - tf.Tensor(tf.Operation(desc)) end - function unbatch_grad_eager(original_input_, batch_index_, grad_, id_; name=nothing, container=nothing, shared_name=nothing) - desc = tf.EagerOp("UnbatchGrad") - original_input_ = convert(tf.EagerTensor, original_input_) - batch_index_ = convert(tf.EagerTensor, batch_index_) - grad_ = convert(tf.EagerTensor, grad_) - id_ = convert(tf.EagerTensor, id_) - tf.add_input(desc, original_input_) - tf.add_input(desc, batch_index_) - tf.add_input(desc, grad_) - tf.add_input(desc, id_) - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - desc["T"] = tf.data_type(original_input_) - desc["T"] = tf.data_type(grad_) - res = tf.execute(desc) - node = tf.TapeNode(unbatch_grad, [original_input_, batch_index_, grad_, id_], name=nothing, container=nothing, shared_name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function unbatch_grad(original_input_, batch_index_, grad_, id_; name=nothing, container=nothing, shared_name=nothing) - if tf.in_eager_mode() - unbatch_grad_eager(original_input_, batch_index_, grad_, id_; name=name, container=container, shared_name=shared_name) - else - unbatch_grad_graph(original_input_, batch_index_, grad_, id_; name=name, container=container, shared_name=shared_name) - end - end end @@ -17413,35 +31584,63 @@ end """ begin - function log_softmax_graph(logits_; name=nothing) - local desc - tf.with_op_name(name, "LogSoftmax") do - desc = tf.NodeDescription("LogSoftmax") - logits_ = convert(Tensor{Any}, logits_) - (logits_,) = tf.tf_promote(logits_) - tf.add_input(desc, logits_) + begin + function log_softmax_graph(logits_; name=nothing) + local desc + tf.with_op_name(name, "LogSoftmax") do + desc = tf.NodeDescription("LogSoftmax") + begin + begin + logits_ = convert(Tensor{Any}, logits_) + begin + end + end + begin + (logits_,) = tf.tf_promote(logits_) + end + end + begin + begin + tf.add_input(desc, logits_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function log_softmax_eager(logits_; name=nothing) - desc = tf.EagerOp("LogSoftmax") - logits_ = convert(tf.EagerTensor, logits_) - tf.add_input(desc, logits_) - desc["T"] = tf.data_type(logits_) - res = tf.execute(desc) - node = tf.TapeNode(log_softmax, [logits_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function log_softmax_eager(logits_; name=nothing) + desc = tf.EagerOp("LogSoftmax") + logits_ = convert(tf.EagerTensor, logits_) + begin + begin + tf.add_input(desc, logits_) + end + end + begin + end + begin + desc["T"] = tf.data_type(logits_) + end + res = tf.execute(desc) + node = tf.TapeNode(log_softmax, [logits_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function log_softmax(logits_; name=nothing) - if tf.in_eager_mode() - log_softmax_eager(logits_; name=name) - else - log_softmax_graph(logits_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function log_softmax(logits_; name=nothing) + if tf.in_eager_mode() + log_softmax_eager(logits_; name=name) + else + log_softmax_graph(logits_; name=name) + end end - end + end end @@ -17451,39 +31650,67 @@ end """ begin - function resource_count_up_to_graph(resource_; name=nothing, limit=nothing) - local desc - tf.with_op_name(name, "ResourceCountUpTo") do - desc = tf.NodeDescription("ResourceCountUpTo") - resource_ = convert(Tensor{Any}, resource_) - tf.add_input(desc, resource_) - if limit !== nothing - desc["limit"] = Base.Int(limit) + begin + function resource_count_up_to_graph(resource_; name=nothing, limit=nothing) + local desc + tf.with_op_name(name, "ResourceCountUpTo") do + desc = tf.NodeDescription("ResourceCountUpTo") + begin + begin + resource_ = convert(Tensor{Any}, resource_) + begin + end + end + end + begin + begin + tf.add_input(desc, resource_) + end + end + begin + begin + if limit !== nothing + desc["limit"] = Base.Int(limit) + end + end + end end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function resource_count_up_to_eager(resource_; name=nothing, limit=nothing) - desc = tf.EagerOp("ResourceCountUpTo") - resource_ = convert(tf.EagerTensor, resource_) - tf.add_input(desc, resource_) - if limit !== nothing - desc["limit"] = Base.Int(limit) - end - res = tf.execute(desc) - node = tf.TapeNode(resource_count_up_to, [resource_], name=nothing, limit=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function resource_count_up_to_eager(resource_; name=nothing, limit=nothing) + desc = tf.EagerOp("ResourceCountUpTo") + resource_ = convert(tf.EagerTensor, resource_) + begin + begin + tf.add_input(desc, resource_) + end + end + begin + begin + if limit !== nothing + desc["limit"] = Base.Int(limit) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(resource_count_up_to, [resource_], name=nothing, limit=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_count_up_to(resource_; name=nothing, limit=nothing) - if tf.in_eager_mode() - resource_count_up_to_eager(resource_; name=name, limit=limit) - else - resource_count_up_to_graph(resource_; name=name, limit=limit) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_count_up_to(resource_; name=nothing, limit=nothing) + if tf.in_eager_mode() + resource_count_up_to_eager(resource_; name=name, limit=limit) + else + resource_count_up_to_graph(resource_; name=name, limit=limit) + end end - end + end end @@ -17493,47 +31720,83 @@ end """ begin - function accumulate_nv2_graph(inputs_; name=nothing, N=nothing, shape=nothing) - local desc - tf.with_op_name(name, "AccumulateNV2") do - desc = tf.NodeDescription("AccumulateNV2") - inputs_ = [convert(Tensor{Any}, x) for x = inputs_] - (inputs_,) = tf.tf_promote(inputs_) - tf.add_input(desc, inputs_) - if N !== nothing - desc["N"] = Base.Int(N) - end - if shape !== nothing - desc["shape"] = Base.identity(shape) + begin + function accumulate_nv2_graph(inputs_; name=nothing, N=nothing, shape=nothing) + local desc + tf.with_op_name(name, "AccumulateNV2") do + desc = tf.NodeDescription("AccumulateNV2") + begin + begin + inputs_ = [convert(Tensor{Any}, x) for x = inputs_] + begin + end + end + begin + (inputs_,) = tf.tf_promote(inputs_) + end + end + begin + begin + tf.add_input(desc, inputs_) + end + end + begin + begin + if N !== nothing + desc["N"] = Base.Int(N) + end + end + begin + if shape !== nothing + desc["shape"] = Base.identity(shape) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function accumulate_nv2_eager(inputs_; name=nothing, N=nothing, shape=nothing) + desc = tf.EagerOp("AccumulateNV2") + inputs_ = convert(tf.EagerTensor, inputs_) + begin + begin + tf.add_input(desc, inputs_) + end + end + begin + begin + if N !== nothing + desc["N"] = Base.Int(N) + end + end + begin + if shape !== nothing + desc["shape"] = Base.identity(shape) + end + end + end + begin + desc["T"] = tf.data_type(inputs_) + end + res = tf.execute(desc) + node = tf.TapeNode(accumulate_nv2, [inputs_], name=nothing, N=nothing, shape=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function accumulate_nv2(inputs_; name=nothing, N=nothing, shape=nothing) + if tf.in_eager_mode() + accumulate_nv2_eager(inputs_; name=name, N=N, shape=shape) + else + accumulate_nv2_graph(inputs_; name=name, N=N, shape=shape) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function accumulate_nv2_eager(inputs_; name=nothing, N=nothing, shape=nothing) - desc = tf.EagerOp("AccumulateNV2") - inputs_ = convert(tf.EagerTensor, inputs_) - tf.add_input(desc, inputs_) - if N !== nothing - desc["N"] = Base.Int(N) - end - if shape !== nothing - desc["shape"] = Base.identity(shape) - end - desc["T"] = tf.data_type(inputs_) - res = tf.execute(desc) - node = tf.TapeNode(accumulate_nv2, [inputs_], name=nothing, N=nothing, shape=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function accumulate_nv2(inputs_; name=nothing, N=nothing, shape=nothing) - if tf.in_eager_mode() - accumulate_nv2_eager(inputs_; name=name, N=N, shape=shape) - else - accumulate_nv2_graph(inputs_; name=name, N=N, shape=shape) - end - end end @@ -17543,83 +31806,151 @@ end """ begin - function parallel_map_dataset_graph(input_dataset_, other_arguments_, num_parallel_calls_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing, sloppy=nothing, preserve_cardinality=nothing) - local desc - tf.with_op_name(name, "ParallelMapDataset") do - desc = tf.NodeDescription("ParallelMapDataset") - input_dataset_ = convert(Tensor{Any}, input_dataset_) - other_arguments_ = [convert(Tensor{Any}, x) for x = other_arguments_] - num_parallel_calls_ = convert(Tensor{Int32}, num_parallel_calls_) - tf.add_input(desc, input_dataset_) - tf.add_input(desc, other_arguments_) - tf.add_input(desc, num_parallel_calls_) - if f !== nothing - desc["f"] = Base.identity(f) - end - if Targuments !== nothing - desc["Targuments"] = map(Base.identity, Targuments) - end - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - if use_inter_op_parallelism !== nothing - desc["use_inter_op_parallelism"] = Base.Bool(use_inter_op_parallelism) - end - if sloppy !== nothing - desc["sloppy"] = Base.Bool(sloppy) + begin + function parallel_map_dataset_graph(input_dataset_, other_arguments_, num_parallel_calls_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing, sloppy=nothing, preserve_cardinality=nothing) + local desc + tf.with_op_name(name, "ParallelMapDataset") do + desc = tf.NodeDescription("ParallelMapDataset") + begin + begin + input_dataset_ = convert(Tensor{Any}, input_dataset_) + begin + end + end + begin + other_arguments_ = [convert(Tensor{Any}, x) for x = other_arguments_] + begin + end + end + begin + num_parallel_calls_ = convert(Tensor{Int32}, num_parallel_calls_) + begin + end + end + end + begin + begin + tf.add_input(desc, input_dataset_) + end + begin + tf.add_input(desc, other_arguments_) + end + begin + tf.add_input(desc, num_parallel_calls_) + end + end + begin + begin + if f !== nothing + desc["f"] = Base.identity(f) + end + end + begin + if Targuments !== nothing + desc["Targuments"] = map(Base.identity, Targuments) + end + end + begin + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + end + begin + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + begin + if use_inter_op_parallelism !== nothing + desc["use_inter_op_parallelism"] = Base.Bool(use_inter_op_parallelism) + end + end + begin + if sloppy !== nothing + desc["sloppy"] = Base.Bool(sloppy) + end + end + begin + if preserve_cardinality !== nothing + desc["preserve_cardinality"] = Base.Bool(preserve_cardinality) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function parallel_map_dataset_eager(input_dataset_, other_arguments_, num_parallel_calls_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing, sloppy=nothing, preserve_cardinality=nothing) + desc = tf.EagerOp("ParallelMapDataset") + input_dataset_ = convert(tf.EagerTensor, input_dataset_) + other_arguments_ = convert(tf.EagerTensor, other_arguments_) + num_parallel_calls_ = convert(tf.EagerTensor, num_parallel_calls_) + begin + begin + tf.add_input(desc, input_dataset_) + end + begin + tf.add_input(desc, other_arguments_) + end + begin + tf.add_input(desc, num_parallel_calls_) + end + end + begin + begin + if f !== nothing + desc["f"] = Base.identity(f) + end + end + begin + if Targuments !== nothing + desc["Targuments"] = map(Base.identity, Targuments) + end + end + begin + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + end + begin + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + begin + if use_inter_op_parallelism !== nothing + desc["use_inter_op_parallelism"] = Base.Bool(use_inter_op_parallelism) + end + end + begin + if sloppy !== nothing + desc["sloppy"] = Base.Bool(sloppy) + end + end + begin + if preserve_cardinality !== nothing + desc["preserve_cardinality"] = Base.Bool(preserve_cardinality) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(parallel_map_dataset, [input_dataset_, other_arguments_, num_parallel_calls_], name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing, sloppy=nothing, preserve_cardinality=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function parallel_map_dataset(input_dataset_, other_arguments_, num_parallel_calls_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing, sloppy=nothing, preserve_cardinality=nothing) + if tf.in_eager_mode() + parallel_map_dataset_eager(input_dataset_, other_arguments_, num_parallel_calls_; name=name, f=f, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes, use_inter_op_parallelism=use_inter_op_parallelism, sloppy=sloppy, preserve_cardinality=preserve_cardinality) + else + parallel_map_dataset_graph(input_dataset_, other_arguments_, num_parallel_calls_; name=name, f=f, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes, use_inter_op_parallelism=use_inter_op_parallelism, sloppy=sloppy, preserve_cardinality=preserve_cardinality) + end end - if preserve_cardinality !== nothing - desc["preserve_cardinality"] = Base.Bool(preserve_cardinality) - end - end - tf.Tensor(tf.Operation(desc)) - end - function parallel_map_dataset_eager(input_dataset_, other_arguments_, num_parallel_calls_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing, sloppy=nothing, preserve_cardinality=nothing) - desc = tf.EagerOp("ParallelMapDataset") - input_dataset_ = convert(tf.EagerTensor, input_dataset_) - other_arguments_ = convert(tf.EagerTensor, other_arguments_) - num_parallel_calls_ = convert(tf.EagerTensor, num_parallel_calls_) - tf.add_input(desc, input_dataset_) - tf.add_input(desc, other_arguments_) - tf.add_input(desc, num_parallel_calls_) - if f !== nothing - desc["f"] = Base.identity(f) - end - if Targuments !== nothing - desc["Targuments"] = map(Base.identity, Targuments) - end - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - if use_inter_op_parallelism !== nothing - desc["use_inter_op_parallelism"] = Base.Bool(use_inter_op_parallelism) - end - if sloppy !== nothing - desc["sloppy"] = Base.Bool(sloppy) - end - if preserve_cardinality !== nothing - desc["preserve_cardinality"] = Base.Bool(preserve_cardinality) - end - res = tf.execute(desc) - node = tf.TapeNode(parallel_map_dataset, [input_dataset_, other_arguments_, num_parallel_calls_], name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing, sloppy=nothing, preserve_cardinality=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function parallel_map_dataset(input_dataset_, other_arguments_, num_parallel_calls_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing, sloppy=nothing, preserve_cardinality=nothing) - if tf.in_eager_mode() - parallel_map_dataset_eager(input_dataset_, other_arguments_, num_parallel_calls_; name=name, f=f, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes, use_inter_op_parallelism=use_inter_op_parallelism, sloppy=sloppy, preserve_cardinality=preserve_cardinality) - else - parallel_map_dataset_graph(input_dataset_, other_arguments_, num_parallel_calls_; name=name, f=f, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes, use_inter_op_parallelism=use_inter_op_parallelism, sloppy=sloppy, preserve_cardinality=preserve_cardinality) - end - end end @@ -17629,53 +31960,93 @@ end """ begin - function random_uniform_graph(shape_; name=nothing, seed=nothing, seed2=nothing, dtype=nothing) - local desc - tf.with_op_name(name, "RandomUniform") do - desc = tf.NodeDescription("RandomUniform") - shape_ = convert(Tensor{Any}, shape_) - (shape_,) = tf.tf_promote(shape_) - tf.add_input(desc, shape_) - if seed !== nothing - desc["seed"] = Base.Int(seed) - end - if seed2 !== nothing - desc["seed2"] = Base.Int(seed2) + begin + function random_uniform_graph(shape_; name=nothing, seed=nothing, seed2=nothing, dtype=nothing) + local desc + tf.with_op_name(name, "RandomUniform") do + desc = tf.NodeDescription("RandomUniform") + begin + begin + shape_ = convert(Tensor{Any}, shape_) + begin + end + end + begin + (shape_,) = tf.tf_promote(shape_) + end + end + begin + begin + tf.add_input(desc, shape_) + end + end + begin + begin + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + end + begin + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end + end + begin + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function random_uniform_eager(shape_; name=nothing, seed=nothing, seed2=nothing, dtype=nothing) + desc = tf.EagerOp("RandomUniform") + shape_ = convert(tf.EagerTensor, shape_) + begin + begin + tf.add_input(desc, shape_) + end + end + begin + begin + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + end + begin + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end + end + begin + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + end + begin + desc["T"] = tf.data_type(shape_) + end + res = tf.execute(desc) + node = tf.TapeNode(random_uniform, [shape_], name=nothing, seed=nothing, seed2=nothing, dtype=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function random_uniform(shape_; name=nothing, seed=nothing, seed2=nothing, dtype=nothing) + if tf.in_eager_mode() + random_uniform_eager(shape_; name=name, seed=seed, seed2=seed2, dtype=dtype) + else + random_uniform_graph(shape_; name=name, seed=seed, seed2=seed2, dtype=dtype) + end end - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end - end - tf.Tensor(tf.Operation(desc)) end - function random_uniform_eager(shape_; name=nothing, seed=nothing, seed2=nothing, dtype=nothing) - desc = tf.EagerOp("RandomUniform") - shape_ = convert(tf.EagerTensor, shape_) - tf.add_input(desc, shape_) - if seed !== nothing - desc["seed"] = Base.Int(seed) - end - if seed2 !== nothing - desc["seed2"] = Base.Int(seed2) - end - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end - desc["T"] = tf.data_type(shape_) - res = tf.execute(desc) - node = tf.TapeNode(random_uniform, [shape_], name=nothing, seed=nothing, seed2=nothing, dtype=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function random_uniform(shape_; name=nothing, seed=nothing, seed2=nothing, dtype=nothing) - if tf.in_eager_mode() - random_uniform_eager(shape_; name=name, seed=seed, seed2=seed2, dtype=dtype) - else - random_uniform_graph(shape_; name=name, seed=seed, seed2=seed2, dtype=dtype) - end - end end @@ -17685,63 +32056,107 @@ end """ begin - function unicode_transcode_graph(input_; name=nothing, input_encoding=nothing, output_encoding=nothing, errors=nothing, replacement_char=nothing, replace_control_characters=nothing) - local desc - tf.with_op_name(name, "UnicodeTranscode") do - desc = tf.NodeDescription("UnicodeTranscode") - input_ = convert(Tensor{String}, input_) - tf.add_input(desc, input_) - if input_encoding !== nothing - desc["input_encoding"] = Base.String(input_encoding) - end - if output_encoding !== nothing - desc["output_encoding"] = Base.String(output_encoding) + begin + function unicode_transcode_graph(input_; name=nothing, input_encoding=nothing, output_encoding=nothing, errors=nothing, replacement_char=nothing, replace_control_characters=nothing) + local desc + tf.with_op_name(name, "UnicodeTranscode") do + desc = tf.NodeDescription("UnicodeTranscode") + begin + begin + input_ = convert(Tensor{String}, input_) + begin + end + end + end + begin + begin + tf.add_input(desc, input_) + end + end + begin + begin + if input_encoding !== nothing + desc["input_encoding"] = Base.String(input_encoding) + end + end + begin + if output_encoding !== nothing + desc["output_encoding"] = Base.String(output_encoding) + end + end + begin + if errors !== nothing + desc["errors"] = Base.String(errors) + end + end + begin + if replacement_char !== nothing + desc["replacement_char"] = Base.Int(replacement_char) + end + end + begin + if replace_control_characters !== nothing + desc["replace_control_characters"] = Base.Bool(replace_control_characters) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function unicode_transcode_eager(input_; name=nothing, input_encoding=nothing, output_encoding=nothing, errors=nothing, replacement_char=nothing, replace_control_characters=nothing) + desc = tf.EagerOp("UnicodeTranscode") + input_ = convert(tf.EagerTensor, input_) + begin + begin + tf.add_input(desc, input_) + end + end + begin + begin + if input_encoding !== nothing + desc["input_encoding"] = Base.String(input_encoding) + end + end + begin + if output_encoding !== nothing + desc["output_encoding"] = Base.String(output_encoding) + end + end + begin + if errors !== nothing + desc["errors"] = Base.String(errors) + end + end + begin + if replacement_char !== nothing + desc["replacement_char"] = Base.Int(replacement_char) + end + end + begin + if replace_control_characters !== nothing + desc["replace_control_characters"] = Base.Bool(replace_control_characters) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(unicode_transcode, [input_], name=nothing, input_encoding=nothing, output_encoding=nothing, errors=nothing, replacement_char=nothing, replace_control_characters=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function unicode_transcode(input_; name=nothing, input_encoding=nothing, output_encoding=nothing, errors=nothing, replacement_char=nothing, replace_control_characters=nothing) + if tf.in_eager_mode() + unicode_transcode_eager(input_; name=name, input_encoding=input_encoding, output_encoding=output_encoding, errors=errors, replacement_char=replacement_char, replace_control_characters=replace_control_characters) + else + unicode_transcode_graph(input_; name=name, input_encoding=input_encoding, output_encoding=output_encoding, errors=errors, replacement_char=replacement_char, replace_control_characters=replace_control_characters) + end end - if errors !== nothing - desc["errors"] = Base.String(errors) - end - if replacement_char !== nothing - desc["replacement_char"] = Base.Int(replacement_char) - end - if replace_control_characters !== nothing - desc["replace_control_characters"] = Base.Bool(replace_control_characters) - end - end - tf.Tensor(tf.Operation(desc)) - end - function unicode_transcode_eager(input_; name=nothing, input_encoding=nothing, output_encoding=nothing, errors=nothing, replacement_char=nothing, replace_control_characters=nothing) - desc = tf.EagerOp("UnicodeTranscode") - input_ = convert(tf.EagerTensor, input_) - tf.add_input(desc, input_) - if input_encoding !== nothing - desc["input_encoding"] = Base.String(input_encoding) - end - if output_encoding !== nothing - desc["output_encoding"] = Base.String(output_encoding) - end - if errors !== nothing - desc["errors"] = Base.String(errors) - end - if replacement_char !== nothing - desc["replacement_char"] = Base.Int(replacement_char) - end - if replace_control_characters !== nothing - desc["replace_control_characters"] = Base.Bool(replace_control_characters) - end - res = tf.execute(desc) - node = tf.TapeNode(unicode_transcode, [input_], name=nothing, input_encoding=nothing, output_encoding=nothing, errors=nothing, replacement_char=nothing, replace_control_characters=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function unicode_transcode(input_; name=nothing, input_encoding=nothing, output_encoding=nothing, errors=nothing, replacement_char=nothing, replace_control_characters=nothing) - if tf.in_eager_mode() - unicode_transcode_eager(input_; name=name, input_encoding=input_encoding, output_encoding=output_encoding, errors=errors, replacement_char=replacement_char, replace_control_characters=replace_control_characters) - else - unicode_transcode_graph(input_; name=name, input_encoding=input_encoding, output_encoding=output_encoding, errors=errors, replacement_char=replacement_char, replace_control_characters=replace_control_characters) - end - end end @@ -17751,33 +32166,57 @@ end """ begin - function reader_reset_graph(reader_handle_; name=nothing) - local desc - tf.with_op_name(name, "ReaderReset") do - desc = tf.NodeDescription("ReaderReset") - reader_handle_ = convert(Tensor{String}, reader_handle_) - tf.add_input(desc, reader_handle_) + begin + function reader_reset_graph(reader_handle_; name=nothing) + local desc + tf.with_op_name(name, "ReaderReset") do + desc = tf.NodeDescription("ReaderReset") + begin + begin + reader_handle_ = convert(Tensor{String}, reader_handle_) + begin + end + end + end + begin + begin + tf.add_input(desc, reader_handle_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function reader_reset_eager(reader_handle_; name=nothing) - desc = tf.EagerOp("ReaderReset") - reader_handle_ = convert(tf.EagerTensor, reader_handle_) - tf.add_input(desc, reader_handle_) - res = tf.execute(desc) - node = tf.TapeNode(reader_reset, [reader_handle_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function reader_reset_eager(reader_handle_; name=nothing) + desc = tf.EagerOp("ReaderReset") + reader_handle_ = convert(tf.EagerTensor, reader_handle_) + begin + begin + tf.add_input(desc, reader_handle_) + end + end + begin + end + res = tf.execute(desc) + node = tf.TapeNode(reader_reset, [reader_handle_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function reader_reset(reader_handle_; name=nothing) - if tf.in_eager_mode() - reader_reset_eager(reader_handle_; name=name) - else - reader_reset_graph(reader_handle_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function reader_reset(reader_handle_; name=nothing) + if tf.in_eager_mode() + reader_reset_eager(reader_handle_; name=name) + else + reader_reset_graph(reader_handle_; name=name) + end end - end + end end @@ -17787,47 +32226,83 @@ end Replacement node for NcclBroadcast. """ begin - function _nccl_broadcast_send_graph(input_; name=nothing, num_devices=nothing, shared_name=nothing) - local desc - tf.with_op_name(name, "_NcclBroadcastSend") do - desc = tf.NodeDescription("_NcclBroadcastSend") - input_ = convert(Tensor{Any}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - if num_devices !== nothing - desc["num_devices"] = Base.Int(num_devices) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) + begin + function _nccl_broadcast_send_graph(input_; name=nothing, num_devices=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "_NcclBroadcastSend") do + desc = tf.NodeDescription("_NcclBroadcastSend") + begin + begin + input_ = convert(Tensor{Any}, input_) + begin + end + end + begin + (input_,) = tf.tf_promote(input_) + end + end + begin + begin + tf.add_input(desc, input_) + end + end + begin + begin + if num_devices !== nothing + desc["num_devices"] = Base.Int(num_devices) + end + end + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function _nccl_broadcast_send_eager(input_; name=nothing, num_devices=nothing, shared_name=nothing) + desc = tf.EagerOp("_NcclBroadcastSend") + input_ = convert(tf.EagerTensor, input_) + begin + begin + tf.add_input(desc, input_) + end + end + begin + begin + if num_devices !== nothing + desc["num_devices"] = Base.Int(num_devices) + end + end + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + end + begin + desc["T"] = tf.data_type(input_) + end + res = tf.execute(desc) + node = tf.TapeNode(_nccl_broadcast_send, [input_], name=nothing, num_devices=nothing, shared_name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _nccl_broadcast_send(input_; name=nothing, num_devices=nothing, shared_name=nothing) + if tf.in_eager_mode() + _nccl_broadcast_send_eager(input_; name=name, num_devices=num_devices, shared_name=shared_name) + else + _nccl_broadcast_send_graph(input_; name=name, num_devices=num_devices, shared_name=shared_name) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function _nccl_broadcast_send_eager(input_; name=nothing, num_devices=nothing, shared_name=nothing) - desc = tf.EagerOp("_NcclBroadcastSend") - input_ = convert(tf.EagerTensor, input_) - tf.add_input(desc, input_) - if num_devices !== nothing - desc["num_devices"] = Base.Int(num_devices) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - desc["T"] = tf.data_type(input_) - res = tf.execute(desc) - node = tf.TapeNode(_nccl_broadcast_send, [input_], name=nothing, num_devices=nothing, shared_name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _nccl_broadcast_send(input_; name=nothing, num_devices=nothing, shared_name=nothing) - if tf.in_eager_mode() - _nccl_broadcast_send_eager(input_; name=name, num_devices=num_devices, shared_name=shared_name) - else - _nccl_broadcast_send_graph(input_; name=name, num_devices=num_devices, shared_name=shared_name) - end - end end @@ -17837,35 +32312,63 @@ end """ begin - function batch_matrix_determinant_graph(input_; name=nothing) - local desc - tf.with_op_name(name, "BatchMatrixDeterminant") do - desc = tf.NodeDescription("BatchMatrixDeterminant") - input_ = convert(Tensor{Any}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) + begin + function batch_matrix_determinant_graph(input_; name=nothing) + local desc + tf.with_op_name(name, "BatchMatrixDeterminant") do + desc = tf.NodeDescription("BatchMatrixDeterminant") + begin + begin + input_ = convert(Tensor{Any}, input_) + begin + end + end + begin + (input_,) = tf.tf_promote(input_) + end + end + begin + begin + tf.add_input(desc, input_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function batch_matrix_determinant_eager(input_; name=nothing) - desc = tf.EagerOp("BatchMatrixDeterminant") - input_ = convert(tf.EagerTensor, input_) - tf.add_input(desc, input_) - desc["T"] = tf.data_type(input_) - res = tf.execute(desc) - node = tf.TapeNode(batch_matrix_determinant, [input_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function batch_matrix_determinant_eager(input_; name=nothing) + desc = tf.EagerOp("BatchMatrixDeterminant") + input_ = convert(tf.EagerTensor, input_) + begin + begin + tf.add_input(desc, input_) + end + end + begin + end + begin + desc["T"] = tf.data_type(input_) + end + res = tf.execute(desc) + node = tf.TapeNode(batch_matrix_determinant, [input_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function batch_matrix_determinant(input_; name=nothing) - if tf.in_eager_mode() - batch_matrix_determinant_eager(input_; name=name) - else - batch_matrix_determinant_graph(input_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function batch_matrix_determinant(input_; name=nothing) + if tf.in_eager_mode() + batch_matrix_determinant_eager(input_; name=name) + else + batch_matrix_determinant_graph(input_; name=name) + end end - end + end end @@ -17875,40 +32378,78 @@ end """ begin - function less_equal_graph(x_, y_; name=nothing) - local desc - tf.with_op_name(name, "LessEqual") do - desc = tf.NodeDescription("LessEqual") - x_ = convert(Tensor{Any}, x_) - y_ = convert(Tensor{Any}, y_) - (x_, y_) = tf.tf_promote(x_, y_) - tf.add_input(desc, x_) - tf.add_input(desc, y_) + begin + function less_equal_graph(x_, y_; name=nothing) + local desc + tf.with_op_name(name, "LessEqual") do + desc = tf.NodeDescription("LessEqual") + begin + begin + x_ = convert(Tensor{Any}, x_) + begin + end + end + begin + y_ = convert(Tensor{Any}, y_) + begin + end + end + begin + (x_, y_) = tf.tf_promote(x_, y_) + end + end + begin + begin + tf.add_input(desc, x_) + end + begin + tf.add_input(desc, y_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function less_equal_eager(x_, y_; name=nothing) - desc = tf.EagerOp("LessEqual") - x_ = convert(tf.EagerTensor, x_) - y_ = convert(tf.EagerTensor, y_) - tf.add_input(desc, x_) - tf.add_input(desc, y_) - desc["T"] = tf.data_type(x_) - desc["T"] = tf.data_type(y_) - res = tf.execute(desc) - node = tf.TapeNode(less_equal, [x_, y_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function less_equal_eager(x_, y_; name=nothing) + desc = tf.EagerOp("LessEqual") + x_ = convert(tf.EagerTensor, x_) + y_ = convert(tf.EagerTensor, y_) + begin + begin + tf.add_input(desc, x_) + end + begin + tf.add_input(desc, y_) + end + end + begin + end + begin + desc["T"] = tf.data_type(x_) + end + begin + desc["T"] = tf.data_type(y_) + end + res = tf.execute(desc) + node = tf.TapeNode(less_equal, [x_, y_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function less_equal(x_, y_; name=nothing) - if tf.in_eager_mode() - less_equal_eager(x_, y_; name=name) - else - less_equal_graph(x_, y_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function less_equal(x_, y_; name=nothing) + if tf.in_eager_mode() + less_equal_eager(x_, y_; name=name) + else + less_equal_graph(x_, y_; name=name) + end end - end + end end @@ -17918,51 +32459,103 @@ end """ begin - function apply_gradient_descent_graph(var_, alpha_, delta_; name=nothing, use_locking=nothing) - local desc - tf.with_op_name(name, "ApplyGradientDescent") do - desc = tf.NodeDescription("ApplyGradientDescent") - var_ = convert(Tensor{Any}, var_) - alpha_ = convert(Tensor{Any}, alpha_) - delta_ = convert(Tensor{Any}, delta_) - (var_, alpha_, delta_) = tf.tf_promote(var_, alpha_, delta_) - tf.add_input(desc, var_) - tf.add_input(desc, alpha_) - tf.add_input(desc, delta_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) + begin + function apply_gradient_descent_graph(var_, alpha_, delta_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "ApplyGradientDescent") do + desc = tf.NodeDescription("ApplyGradientDescent") + begin + begin + var_ = convert(Tensor{Any}, var_) + begin + end + end + begin + alpha_ = convert(Tensor{Any}, alpha_) + begin + end + end + begin + delta_ = convert(Tensor{Any}, delta_) + begin + end + end + begin + (var_, alpha_, delta_) = tf.tf_promote(var_, alpha_, delta_) + end + end + begin + begin + tf.add_input(desc, var_) + end + begin + tf.add_input(desc, alpha_) + end + begin + tf.add_input(desc, delta_) + end + end + begin + begin + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function apply_gradient_descent_eager(var_, alpha_, delta_; name=nothing, use_locking=nothing) + desc = tf.EagerOp("ApplyGradientDescent") + var_ = convert(tf.EagerTensor, var_) + alpha_ = convert(tf.EagerTensor, alpha_) + delta_ = convert(tf.EagerTensor, delta_) + begin + begin + tf.add_input(desc, var_) + end + begin + tf.add_input(desc, alpha_) + end + begin + tf.add_input(desc, delta_) + end + end + begin + begin + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + end + begin + desc["T"] = tf.data_type(var_) + end + begin + desc["T"] = tf.data_type(alpha_) + end + begin + desc["T"] = tf.data_type(delta_) + end + res = tf.execute(desc) + node = tf.TapeNode(apply_gradient_descent, [var_, alpha_, delta_], name=nothing, use_locking=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function apply_gradient_descent(var_, alpha_, delta_; name=nothing, use_locking=nothing) + if tf.in_eager_mode() + apply_gradient_descent_eager(var_, alpha_, delta_; name=name, use_locking=use_locking) + else + apply_gradient_descent_graph(var_, alpha_, delta_; name=name, use_locking=use_locking) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function apply_gradient_descent_eager(var_, alpha_, delta_; name=nothing, use_locking=nothing) - desc = tf.EagerOp("ApplyGradientDescent") - var_ = convert(tf.EagerTensor, var_) - alpha_ = convert(tf.EagerTensor, alpha_) - delta_ = convert(tf.EagerTensor, delta_) - tf.add_input(desc, var_) - tf.add_input(desc, alpha_) - tf.add_input(desc, delta_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - desc["T"] = tf.data_type(var_) - desc["T"] = tf.data_type(alpha_) - desc["T"] = tf.data_type(delta_) - res = tf.execute(desc) - node = tf.TapeNode(apply_gradient_descent, [var_, alpha_, delta_], name=nothing, use_locking=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function apply_gradient_descent(var_, alpha_, delta_; name=nothing, use_locking=nothing) - if tf.in_eager_mode() - apply_gradient_descent_eager(var_, alpha_, delta_; name=name, use_locking=use_locking) - else - apply_gradient_descent_graph(var_, alpha_, delta_; name=name, use_locking=use_locking) - end - end end @@ -17972,46 +32565,94 @@ end """ begin - function sparse_segment_sqrt_n_graph(data_, indices_, segment_ids_; name=nothing) - local desc - tf.with_op_name(name, "SparseSegmentSqrtN") do - desc = tf.NodeDescription("SparseSegmentSqrtN") - data_ = convert(Tensor{Any}, data_) - indices_ = convert(Tensor{Int32}, indices_) - indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) - segment_ids_ = convert(Tensor{Int32}, segment_ids_) - (data_,) = tf.tf_promote(data_) - (indices_,) = tf.tf_promote(indices_) - tf.add_input(desc, data_) - tf.add_input(desc, indices_) - tf.add_input(desc, segment_ids_) - end - tf.Tensor(tf.Operation(desc)) - end - function sparse_segment_sqrt_n_eager(data_, indices_, segment_ids_; name=nothing) - desc = tf.EagerOp("SparseSegmentSqrtN") - data_ = convert(tf.EagerTensor, data_) - indices_ = convert(tf.EagerTensor, indices_) - segment_ids_ = convert(tf.EagerTensor, segment_ids_) - tf.add_input(desc, data_) - tf.add_input(desc, indices_) - tf.add_input(desc, segment_ids_) - desc["T"] = tf.data_type(data_) - desc["Tidx"] = tf.data_type(indices_) - res = tf.execute(desc) - node = tf.TapeNode(sparse_segment_sqrt_n, [data_, indices_, segment_ids_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_segment_sqrt_n(data_, indices_, segment_ids_; name=nothing) - if tf.in_eager_mode() - sparse_segment_sqrt_n_eager(data_, indices_, segment_ids_; name=name) - else - sparse_segment_sqrt_n_graph(data_, indices_, segment_ids_; name=name) + begin + function sparse_segment_sqrt_n_graph(data_, indices_, segment_ids_; name=nothing) + local desc + tf.with_op_name(name, "SparseSegmentSqrtN") do + desc = tf.NodeDescription("SparseSegmentSqrtN") + begin + begin + data_ = convert(Tensor{Any}, data_) + begin + end + end + begin + indices_ = convert(Tensor{Int32}, indices_) + begin + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + end + end + begin + segment_ids_ = convert(Tensor{Int32}, segment_ids_) + begin + end + end + begin + (data_,) = tf.tf_promote(data_) + end + begin + (indices_,) = tf.tf_promote(indices_) + end + end + begin + begin + tf.add_input(desc, data_) + end + begin + tf.add_input(desc, indices_) + end + begin + tf.add_input(desc, segment_ids_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function sparse_segment_sqrt_n_eager(data_, indices_, segment_ids_; name=nothing) + desc = tf.EagerOp("SparseSegmentSqrtN") + data_ = convert(tf.EagerTensor, data_) + indices_ = convert(tf.EagerTensor, indices_) + segment_ids_ = convert(tf.EagerTensor, segment_ids_) + begin + begin + tf.add_input(desc, data_) + end + begin + tf.add_input(desc, indices_) + end + begin + tf.add_input(desc, segment_ids_) + end + end + begin + end + begin + desc["T"] = tf.data_type(data_) + end + begin + desc["Tidx"] = tf.data_type(indices_) + end + res = tf.execute(desc) + node = tf.TapeNode(sparse_segment_sqrt_n, [data_, indices_, segment_ids_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_segment_sqrt_n(data_, indices_, segment_ids_; name=nothing) + if tf.in_eager_mode() + sparse_segment_sqrt_n_eager(data_, indices_, segment_ids_; name=name) + else + sparse_segment_sqrt_n_graph(data_, indices_, segment_ids_; name=name) + end end - end + end end @@ -18021,35 +32662,63 @@ end """ begin - function matrix_logarithm_graph(input_; name=nothing) - local desc - tf.with_op_name(name, "MatrixLogarithm") do - desc = tf.NodeDescription("MatrixLogarithm") - input_ = convert(Tensor{Any}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) + begin + function matrix_logarithm_graph(input_; name=nothing) + local desc + tf.with_op_name(name, "MatrixLogarithm") do + desc = tf.NodeDescription("MatrixLogarithm") + begin + begin + input_ = convert(Tensor{Any}, input_) + begin + end + end + begin + (input_,) = tf.tf_promote(input_) + end + end + begin + begin + tf.add_input(desc, input_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function matrix_logarithm_eager(input_; name=nothing) - desc = tf.EagerOp("MatrixLogarithm") - input_ = convert(tf.EagerTensor, input_) - tf.add_input(desc, input_) - desc["T"] = tf.data_type(input_) - res = tf.execute(desc) - node = tf.TapeNode(matrix_logarithm, [input_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function matrix_logarithm_eager(input_; name=nothing) + desc = tf.EagerOp("MatrixLogarithm") + input_ = convert(tf.EagerTensor, input_) + begin + begin + tf.add_input(desc, input_) + end + end + begin + end + begin + desc["T"] = tf.data_type(input_) + end + res = tf.execute(desc) + node = tf.TapeNode(matrix_logarithm, [input_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function matrix_logarithm(input_; name=nothing) - if tf.in_eager_mode() - matrix_logarithm_eager(input_; name=name) - else - matrix_logarithm_graph(input_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function matrix_logarithm(input_; name=nothing) + if tf.in_eager_mode() + matrix_logarithm_eager(input_; name=name) + else + matrix_logarithm_graph(input_; name=name) + end end - end + end end @@ -18059,53 +32728,107 @@ end """ begin - function scatter_mul_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) - local desc - tf.with_op_name(name, "ScatterMul") do - desc = tf.NodeDescription("ScatterMul") - ref_ = convert(Tensor{Any}, ref_) - indices_ = convert(Tensor{Any}, indices_) - indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) - updates_ = convert(Tensor{Any}, updates_) - (ref_, updates_) = tf.tf_promote(ref_, updates_) - (indices_,) = tf.tf_promote(indices_) - tf.add_input(desc, ref_) - tf.add_input(desc, indices_) - tf.add_input(desc, updates_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) + begin + function scatter_mul_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "ScatterMul") do + desc = tf.NodeDescription("ScatterMul") + begin + begin + ref_ = convert(Tensor{Any}, ref_) + begin + end + end + begin + indices_ = convert(Tensor{Any}, indices_) + begin + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + end + end + begin + updates_ = convert(Tensor{Any}, updates_) + begin + end + end + begin + (ref_, updates_) = tf.tf_promote(ref_, updates_) + end + begin + (indices_,) = tf.tf_promote(indices_) + end + end + begin + begin + tf.add_input(desc, ref_) + end + begin + tf.add_input(desc, indices_) + end + begin + tf.add_input(desc, updates_) + end + end + begin + begin + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function scatter_mul_eager(ref_, indices_, updates_; name=nothing, use_locking=nothing) + desc = tf.EagerOp("ScatterMul") + ref_ = convert(tf.EagerTensor, ref_) + indices_ = convert(tf.EagerTensor, indices_) + updates_ = convert(tf.EagerTensor, updates_) + begin + begin + tf.add_input(desc, ref_) + end + begin + tf.add_input(desc, indices_) + end + begin + tf.add_input(desc, updates_) + end + end + begin + begin + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + end + begin + desc["T"] = tf.data_type(ref_) + end + begin + desc["Tindices"] = tf.data_type(indices_) + end + begin + desc["T"] = tf.data_type(updates_) + end + res = tf.execute(desc) + node = tf.TapeNode(scatter_mul, [ref_, indices_, updates_], name=nothing, use_locking=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function scatter_mul(ref_, indices_, updates_; name=nothing, use_locking=nothing) + if tf.in_eager_mode() + scatter_mul_eager(ref_, indices_, updates_; name=name, use_locking=use_locking) + else + scatter_mul_graph(ref_, indices_, updates_; name=name, use_locking=use_locking) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function scatter_mul_eager(ref_, indices_, updates_; name=nothing, use_locking=nothing) - desc = tf.EagerOp("ScatterMul") - ref_ = convert(tf.EagerTensor, ref_) - indices_ = convert(tf.EagerTensor, indices_) - updates_ = convert(tf.EagerTensor, updates_) - tf.add_input(desc, ref_) - tf.add_input(desc, indices_) - tf.add_input(desc, updates_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - desc["T"] = tf.data_type(ref_) - desc["Tindices"] = tf.data_type(indices_) - desc["T"] = tf.data_type(updates_) - res = tf.execute(desc) - node = tf.TapeNode(scatter_mul, [ref_, indices_, updates_], name=nothing, use_locking=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function scatter_mul(ref_, indices_, updates_; name=nothing, use_locking=nothing) - if tf.in_eager_mode() - scatter_mul_eager(ref_, indices_, updates_; name=name, use_locking=use_locking) - else - scatter_mul_graph(ref_, indices_, updates_; name=name, use_locking=use_locking) - end - end end @@ -18115,69 +32838,117 @@ end """ begin - function decode_jpeg_graph(contents_; name=nothing, channels=nothing, ratio=nothing, fancy_upscaling=nothing, try_recover_truncated=nothing, acceptable_fraction=nothing, dct_method=nothing) - local desc - tf.with_op_name(name, "DecodeJpeg") do - desc = tf.NodeDescription("DecodeJpeg") - contents_ = convert(Tensor{String}, contents_) - tf.add_input(desc, contents_) - if channels !== nothing - desc["channels"] = Base.Int(channels) - end - if ratio !== nothing - desc["ratio"] = Base.Int(ratio) - end - if fancy_upscaling !== nothing - desc["fancy_upscaling"] = Base.Bool(fancy_upscaling) - end - if try_recover_truncated !== nothing - desc["try_recover_truncated"] = Base.Bool(try_recover_truncated) - end - if acceptable_fraction !== nothing - desc["acceptable_fraction"] = Base.identity(acceptable_fraction) - end - if dct_method !== nothing - desc["dct_method"] = Base.String(dct_method) + begin + function decode_jpeg_graph(contents_; name=nothing, channels=nothing, ratio=nothing, fancy_upscaling=nothing, try_recover_truncated=nothing, acceptable_fraction=nothing, dct_method=nothing) + local desc + tf.with_op_name(name, "DecodeJpeg") do + desc = tf.NodeDescription("DecodeJpeg") + begin + begin + contents_ = convert(Tensor{String}, contents_) + begin + end + end + end + begin + begin + tf.add_input(desc, contents_) + end + end + begin + begin + if channels !== nothing + desc["channels"] = Base.Int(channels) + end + end + begin + if ratio !== nothing + desc["ratio"] = Base.Int(ratio) + end + end + begin + if fancy_upscaling !== nothing + desc["fancy_upscaling"] = Base.Bool(fancy_upscaling) + end + end + begin + if try_recover_truncated !== nothing + desc["try_recover_truncated"] = Base.Bool(try_recover_truncated) + end + end + begin + if acceptable_fraction !== nothing + desc["acceptable_fraction"] = Base.identity(acceptable_fraction) + end + end + begin + if dct_method !== nothing + desc["dct_method"] = Base.String(dct_method) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function decode_jpeg_eager(contents_; name=nothing, channels=nothing, ratio=nothing, fancy_upscaling=nothing, try_recover_truncated=nothing, acceptable_fraction=nothing, dct_method=nothing) + desc = tf.EagerOp("DecodeJpeg") + contents_ = convert(tf.EagerTensor, contents_) + begin + begin + tf.add_input(desc, contents_) + end + end + begin + begin + if channels !== nothing + desc["channels"] = Base.Int(channels) + end + end + begin + if ratio !== nothing + desc["ratio"] = Base.Int(ratio) + end + end + begin + if fancy_upscaling !== nothing + desc["fancy_upscaling"] = Base.Bool(fancy_upscaling) + end + end + begin + if try_recover_truncated !== nothing + desc["try_recover_truncated"] = Base.Bool(try_recover_truncated) + end + end + begin + if acceptable_fraction !== nothing + desc["acceptable_fraction"] = Base.identity(acceptable_fraction) + end + end + begin + if dct_method !== nothing + desc["dct_method"] = Base.String(dct_method) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(decode_jpeg, [contents_], name=nothing, channels=nothing, ratio=nothing, fancy_upscaling=nothing, try_recover_truncated=nothing, acceptable_fraction=nothing, dct_method=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function decode_jpeg(contents_; name=nothing, channels=nothing, ratio=nothing, fancy_upscaling=nothing, try_recover_truncated=nothing, acceptable_fraction=nothing, dct_method=nothing) + if tf.in_eager_mode() + decode_jpeg_eager(contents_; name=name, channels=channels, ratio=ratio, fancy_upscaling=fancy_upscaling, try_recover_truncated=try_recover_truncated, acceptable_fraction=acceptable_fraction, dct_method=dct_method) + else + decode_jpeg_graph(contents_; name=name, channels=channels, ratio=ratio, fancy_upscaling=fancy_upscaling, try_recover_truncated=try_recover_truncated, acceptable_fraction=acceptable_fraction, dct_method=dct_method) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function decode_jpeg_eager(contents_; name=nothing, channels=nothing, ratio=nothing, fancy_upscaling=nothing, try_recover_truncated=nothing, acceptable_fraction=nothing, dct_method=nothing) - desc = tf.EagerOp("DecodeJpeg") - contents_ = convert(tf.EagerTensor, contents_) - tf.add_input(desc, contents_) - if channels !== nothing - desc["channels"] = Base.Int(channels) - end - if ratio !== nothing - desc["ratio"] = Base.Int(ratio) - end - if fancy_upscaling !== nothing - desc["fancy_upscaling"] = Base.Bool(fancy_upscaling) - end - if try_recover_truncated !== nothing - desc["try_recover_truncated"] = Base.Bool(try_recover_truncated) - end - if acceptable_fraction !== nothing - desc["acceptable_fraction"] = Base.identity(acceptable_fraction) - end - if dct_method !== nothing - desc["dct_method"] = Base.String(dct_method) - end - res = tf.execute(desc) - node = tf.TapeNode(decode_jpeg, [contents_], name=nothing, channels=nothing, ratio=nothing, fancy_upscaling=nothing, try_recover_truncated=nothing, acceptable_fraction=nothing, dct_method=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function decode_jpeg(contents_; name=nothing, channels=nothing, ratio=nothing, fancy_upscaling=nothing, try_recover_truncated=nothing, acceptable_fraction=nothing, dct_method=nothing) - if tf.in_eager_mode() - decode_jpeg_eager(contents_; name=name, channels=channels, ratio=ratio, fancy_upscaling=fancy_upscaling, try_recover_truncated=try_recover_truncated, acceptable_fraction=acceptable_fraction, dct_method=dct_method) - else - decode_jpeg_graph(contents_; name=name, channels=channels, ratio=ratio, fancy_upscaling=fancy_upscaling, try_recover_truncated=try_recover_truncated, acceptable_fraction=acceptable_fraction, dct_method=dct_method) - end - end end @@ -18187,77 +32958,125 @@ end """ begin - function random_shuffle_queue_v2_graph(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, min_after_dequeue=nothing, seed=nothing, seed2=nothing, container=nothing, shared_name=nothing) - local desc - tf.with_op_name(name, "RandomShuffleQueueV2") do - desc = tf.NodeDescription("RandomShuffleQueueV2") - if component_types !== nothing - desc["component_types"] = map(Base.identity, component_types) - end - if shapes !== nothing - desc["shapes"] = map(Base.identity, shapes) - end - if capacity !== nothing - desc["capacity"] = Base.Int(capacity) - end - if min_after_dequeue !== nothing - desc["min_after_dequeue"] = Base.Int(min_after_dequeue) - end - if seed !== nothing - desc["seed"] = Base.Int(seed) - end - if seed2 !== nothing - desc["seed2"] = Base.Int(seed2) + begin + function random_shuffle_queue_v2_graph(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, min_after_dequeue=nothing, seed=nothing, seed2=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "RandomShuffleQueueV2") do + desc = tf.NodeDescription("RandomShuffleQueueV2") + begin + end + begin + end + begin + begin + if component_types !== nothing + desc["component_types"] = map(Base.identity, component_types) + end + end + begin + if shapes !== nothing + desc["shapes"] = map(Base.identity, shapes) + end + end + begin + if capacity !== nothing + desc["capacity"] = Base.Int(capacity) + end + end + begin + if min_after_dequeue !== nothing + desc["min_after_dequeue"] = Base.Int(min_after_dequeue) + end + end + begin + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + end + begin + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end + end + begin + if container !== nothing + desc["container"] = Base.String(container) + end + end + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function random_shuffle_queue_v2_eager(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, min_after_dequeue=nothing, seed=nothing, seed2=nothing, container=nothing, shared_name=nothing) + desc = tf.EagerOp("RandomShuffleQueueV2") + begin + end + begin + begin + if component_types !== nothing + desc["component_types"] = map(Base.identity, component_types) + end + end + begin + if shapes !== nothing + desc["shapes"] = map(Base.identity, shapes) + end + end + begin + if capacity !== nothing + desc["capacity"] = Base.Int(capacity) + end + end + begin + if min_after_dequeue !== nothing + desc["min_after_dequeue"] = Base.Int(min_after_dequeue) + end + end + begin + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + end + begin + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end + end + begin + if container !== nothing + desc["container"] = Base.String(container) + end + end + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(random_shuffle_queue_v2, [], name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, min_after_dequeue=nothing, seed=nothing, seed2=nothing, container=nothing, shared_name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function random_shuffle_queue_v2(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, min_after_dequeue=nothing, seed=nothing, seed2=nothing, container=nothing, shared_name=nothing) + if tf.in_eager_mode() + random_shuffle_queue_v2_eager(; name=name, component_types=component_types, shapes=shapes, capacity=capacity, min_after_dequeue=min_after_dequeue, seed=seed, seed2=seed2, container=container, shared_name=shared_name) + else + random_shuffle_queue_v2_graph(; name=name, component_types=component_types, shapes=shapes, capacity=capacity, min_after_dequeue=min_after_dequeue, seed=seed, seed2=seed2, container=container, shared_name=shared_name) + end end - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - end - tf.Tensor(tf.Operation(desc)) - end - function random_shuffle_queue_v2_eager(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, min_after_dequeue=nothing, seed=nothing, seed2=nothing, container=nothing, shared_name=nothing) - desc = tf.EagerOp("RandomShuffleQueueV2") - if component_types !== nothing - desc["component_types"] = map(Base.identity, component_types) - end - if shapes !== nothing - desc["shapes"] = map(Base.identity, shapes) - end - if capacity !== nothing - desc["capacity"] = Base.Int(capacity) - end - if min_after_dequeue !== nothing - desc["min_after_dequeue"] = Base.Int(min_after_dequeue) - end - if seed !== nothing - desc["seed"] = Base.Int(seed) - end - if seed2 !== nothing - desc["seed2"] = Base.Int(seed2) - end - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - res = tf.execute(desc) - node = tf.TapeNode(random_shuffle_queue_v2, [], name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, min_after_dequeue=nothing, seed=nothing, seed2=nothing, container=nothing, shared_name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function random_shuffle_queue_v2(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, min_after_dequeue=nothing, seed=nothing, seed2=nothing, container=nothing, shared_name=nothing) - if tf.in_eager_mode() - random_shuffle_queue_v2_eager(; name=name, component_types=component_types, shapes=shapes, capacity=capacity, min_after_dequeue=min_after_dequeue, seed=seed, seed2=seed2, container=container, shared_name=shared_name) - else - random_shuffle_queue_v2_graph(; name=name, component_types=component_types, shapes=shapes, capacity=capacity, min_after_dequeue=min_after_dequeue, seed=seed, seed2=seed2, container=container, shared_name=shared_name) - end - end end @@ -18267,49 +33086,89 @@ end """ begin - function queue_enqueue_many_v2_graph(handle_, components_; name=nothing, Tcomponents=nothing, timeout_ms=nothing) - local desc - tf.with_op_name(name, "QueueEnqueueManyV2") do - desc = tf.NodeDescription("QueueEnqueueManyV2") - handle_ = convert(Tensor{Any}, handle_) - components_ = [convert(Tensor{Any}, x) for x = components_] - tf.add_input(desc, handle_) - tf.add_input(desc, components_) - if Tcomponents !== nothing - desc["Tcomponents"] = map(Base.identity, Tcomponents) - end - if timeout_ms !== nothing - desc["timeout_ms"] = Base.Int(timeout_ms) + begin + function queue_enqueue_many_v2_graph(handle_, components_; name=nothing, Tcomponents=nothing, timeout_ms=nothing) + local desc + tf.with_op_name(name, "QueueEnqueueManyV2") do + desc = tf.NodeDescription("QueueEnqueueManyV2") + begin + begin + handle_ = convert(Tensor{Any}, handle_) + begin + end + end + begin + components_ = [convert(Tensor{Any}, x) for x = components_] + begin + end + end + end + begin + begin + tf.add_input(desc, handle_) + end + begin + tf.add_input(desc, components_) + end + end + begin + begin + if Tcomponents !== nothing + desc["Tcomponents"] = map(Base.identity, Tcomponents) + end + end + begin + if timeout_ms !== nothing + desc["timeout_ms"] = Base.Int(timeout_ms) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function queue_enqueue_many_v2_eager(handle_, components_; name=nothing, Tcomponents=nothing, timeout_ms=nothing) + desc = tf.EagerOp("QueueEnqueueManyV2") + handle_ = convert(tf.EagerTensor, handle_) + components_ = convert(tf.EagerTensor, components_) + begin + begin + tf.add_input(desc, handle_) + end + begin + tf.add_input(desc, components_) + end + end + begin + begin + if Tcomponents !== nothing + desc["Tcomponents"] = map(Base.identity, Tcomponents) + end + end + begin + if timeout_ms !== nothing + desc["timeout_ms"] = Base.Int(timeout_ms) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(queue_enqueue_many_v2, [handle_, components_], name=nothing, Tcomponents=nothing, timeout_ms=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function queue_enqueue_many_v2(handle_, components_; name=nothing, Tcomponents=nothing, timeout_ms=nothing) + if tf.in_eager_mode() + queue_enqueue_many_v2_eager(handle_, components_; name=name, Tcomponents=Tcomponents, timeout_ms=timeout_ms) + else + queue_enqueue_many_v2_graph(handle_, components_; name=name, Tcomponents=Tcomponents, timeout_ms=timeout_ms) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function queue_enqueue_many_v2_eager(handle_, components_; name=nothing, Tcomponents=nothing, timeout_ms=nothing) - desc = tf.EagerOp("QueueEnqueueManyV2") - handle_ = convert(tf.EagerTensor, handle_) - components_ = convert(tf.EagerTensor, components_) - tf.add_input(desc, handle_) - tf.add_input(desc, components_) - if Tcomponents !== nothing - desc["Tcomponents"] = map(Base.identity, Tcomponents) - end - if timeout_ms !== nothing - desc["timeout_ms"] = Base.Int(timeout_ms) - end - res = tf.execute(desc) - node = tf.TapeNode(queue_enqueue_many_v2, [handle_, components_], name=nothing, Tcomponents=nothing, timeout_ms=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function queue_enqueue_many_v2(handle_, components_; name=nothing, Tcomponents=nothing, timeout_ms=nothing) - if tf.in_eager_mode() - queue_enqueue_many_v2_eager(handle_, components_; name=name, Tcomponents=Tcomponents, timeout_ms=timeout_ms) - else - queue_enqueue_many_v2_graph(handle_, components_; name=name, Tcomponents=Tcomponents, timeout_ms=timeout_ms) - end - end end @@ -18319,84 +33178,200 @@ end """ begin - function resource_sparse_apply_centered_rms_prop_graph(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) - local desc - tf.with_op_name(name, "ResourceSparseApplyCenteredRMSProp") do - desc = tf.NodeDescription("ResourceSparseApplyCenteredRMSProp") - var_ = convert(Tensor{Any}, var_) - mg_ = convert(Tensor{Any}, mg_) - ms_ = convert(Tensor{Any}, ms_) - mom_ = convert(Tensor{Any}, mom_) - lr_ = convert(Tensor{Any}, lr_) - rho_ = convert(Tensor{Any}, rho_) - momentum_ = convert(Tensor{Any}, momentum_) - epsilon_ = convert(Tensor{Any}, epsilon_) - grad_ = convert(Tensor{Any}, grad_) - indices_ = convert(Tensor{Any}, indices_) - indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) - (lr_, rho_, momentum_, epsilon_, grad_) = tf.tf_promote(lr_, rho_, momentum_, epsilon_, grad_) - (indices_,) = tf.tf_promote(indices_) - tf.add_input(desc, var_) - tf.add_input(desc, mg_) - tf.add_input(desc, ms_) - tf.add_input(desc, mom_) - tf.add_input(desc, lr_) - tf.add_input(desc, rho_) - tf.add_input(desc, momentum_) - tf.add_input(desc, epsilon_) - tf.add_input(desc, grad_) - tf.add_input(desc, indices_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - end - tf.Tensor(tf.Operation(desc)) - end - function resource_sparse_apply_centered_rms_prop_eager(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) - desc = tf.EagerOp("ResourceSparseApplyCenteredRMSProp") - var_ = convert(tf.EagerTensor, var_) - mg_ = convert(tf.EagerTensor, mg_) - ms_ = convert(tf.EagerTensor, ms_) - mom_ = convert(tf.EagerTensor, mom_) - lr_ = convert(tf.EagerTensor, lr_) - rho_ = convert(tf.EagerTensor, rho_) - momentum_ = convert(tf.EagerTensor, momentum_) - epsilon_ = convert(tf.EagerTensor, epsilon_) - grad_ = convert(tf.EagerTensor, grad_) - indices_ = convert(tf.EagerTensor, indices_) - tf.add_input(desc, var_) - tf.add_input(desc, mg_) - tf.add_input(desc, ms_) - tf.add_input(desc, mom_) - tf.add_input(desc, lr_) - tf.add_input(desc, rho_) - tf.add_input(desc, momentum_) - tf.add_input(desc, epsilon_) - tf.add_input(desc, grad_) - tf.add_input(desc, indices_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - desc["T"] = tf.data_type(lr_) - desc["T"] = tf.data_type(rho_) - desc["T"] = tf.data_type(momentum_) - desc["T"] = tf.data_type(epsilon_) - desc["T"] = tf.data_type(grad_) - desc["Tindices"] = tf.data_type(indices_) - res = tf.execute(desc) - node = tf.TapeNode(resource_sparse_apply_centered_rms_prop, [var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_], name=nothing, use_locking=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_sparse_apply_centered_rms_prop(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) - if tf.in_eager_mode() - resource_sparse_apply_centered_rms_prop_eager(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=name, use_locking=use_locking) - else - resource_sparse_apply_centered_rms_prop_graph(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=name, use_locking=use_locking) + begin + function resource_sparse_apply_centered_rms_prop_graph(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "ResourceSparseApplyCenteredRMSProp") do + desc = tf.NodeDescription("ResourceSparseApplyCenteredRMSProp") + begin + begin + var_ = convert(Tensor{Any}, var_) + begin + end + end + begin + mg_ = convert(Tensor{Any}, mg_) + begin + end + end + begin + ms_ = convert(Tensor{Any}, ms_) + begin + end + end + begin + mom_ = convert(Tensor{Any}, mom_) + begin + end + end + begin + lr_ = convert(Tensor{Any}, lr_) + begin + end + end + begin + rho_ = convert(Tensor{Any}, rho_) + begin + end + end + begin + momentum_ = convert(Tensor{Any}, momentum_) + begin + end + end + begin + epsilon_ = convert(Tensor{Any}, epsilon_) + begin + end + end + begin + grad_ = convert(Tensor{Any}, grad_) + begin + end + end + begin + indices_ = convert(Tensor{Any}, indices_) + begin + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + end + end + begin + (lr_, rho_, momentum_, epsilon_, grad_) = tf.tf_promote(lr_, rho_, momentum_, epsilon_, grad_) + end + begin + (indices_,) = tf.tf_promote(indices_) + end + end + begin + begin + tf.add_input(desc, var_) + end + begin + tf.add_input(desc, mg_) + end + begin + tf.add_input(desc, ms_) + end + begin + tf.add_input(desc, mom_) + end + begin + tf.add_input(desc, lr_) + end + begin + tf.add_input(desc, rho_) + end + begin + tf.add_input(desc, momentum_) + end + begin + tf.add_input(desc, epsilon_) + end + begin + tf.add_input(desc, grad_) + end + begin + tf.add_input(desc, indices_) + end + end + begin + begin + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function resource_sparse_apply_centered_rms_prop_eager(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) + desc = tf.EagerOp("ResourceSparseApplyCenteredRMSProp") + var_ = convert(tf.EagerTensor, var_) + mg_ = convert(tf.EagerTensor, mg_) + ms_ = convert(tf.EagerTensor, ms_) + mom_ = convert(tf.EagerTensor, mom_) + lr_ = convert(tf.EagerTensor, lr_) + rho_ = convert(tf.EagerTensor, rho_) + momentum_ = convert(tf.EagerTensor, momentum_) + epsilon_ = convert(tf.EagerTensor, epsilon_) + grad_ = convert(tf.EagerTensor, grad_) + indices_ = convert(tf.EagerTensor, indices_) + begin + begin + tf.add_input(desc, var_) + end + begin + tf.add_input(desc, mg_) + end + begin + tf.add_input(desc, ms_) + end + begin + tf.add_input(desc, mom_) + end + begin + tf.add_input(desc, lr_) + end + begin + tf.add_input(desc, rho_) + end + begin + tf.add_input(desc, momentum_) + end + begin + tf.add_input(desc, epsilon_) + end + begin + tf.add_input(desc, grad_) + end + begin + tf.add_input(desc, indices_) + end + end + begin + begin + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + end + begin + desc["T"] = tf.data_type(lr_) + end + begin + desc["T"] = tf.data_type(rho_) + end + begin + desc["T"] = tf.data_type(momentum_) + end + begin + desc["T"] = tf.data_type(epsilon_) + end + begin + desc["T"] = tf.data_type(grad_) + end + begin + desc["Tindices"] = tf.data_type(indices_) + end + res = tf.execute(desc) + node = tf.TapeNode(resource_sparse_apply_centered_rms_prop, [var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_], name=nothing, use_locking=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_sparse_apply_centered_rms_prop(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) + if tf.in_eager_mode() + resource_sparse_apply_centered_rms_prop_eager(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=name, use_locking=use_locking) + else + resource_sparse_apply_centered_rms_prop_graph(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=name, use_locking=use_locking) + end end - end + end end @@ -18406,69 +33381,133 @@ end """ begin - function interleave_dataset_graph(input_dataset_, other_arguments_, cycle_length_, block_length_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) - local desc - tf.with_op_name(name, "InterleaveDataset") do - desc = tf.NodeDescription("InterleaveDataset") - input_dataset_ = convert(Tensor{Any}, input_dataset_) - other_arguments_ = [convert(Tensor{Any}, x) for x = other_arguments_] - cycle_length_ = convert(Tensor{Int64}, cycle_length_) - block_length_ = convert(Tensor{Int64}, block_length_) - tf.add_input(desc, input_dataset_) - tf.add_input(desc, other_arguments_) - tf.add_input(desc, cycle_length_) - tf.add_input(desc, block_length_) - if f !== nothing - desc["f"] = Base.identity(f) - end - if Targuments !== nothing - desc["Targuments"] = map(Base.identity, Targuments) - end - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) + begin + function interleave_dataset_graph(input_dataset_, other_arguments_, cycle_length_, block_length_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "InterleaveDataset") do + desc = tf.NodeDescription("InterleaveDataset") + begin + begin + input_dataset_ = convert(Tensor{Any}, input_dataset_) + begin + end + end + begin + other_arguments_ = [convert(Tensor{Any}, x) for x = other_arguments_] + begin + end + end + begin + cycle_length_ = convert(Tensor{Int64}, cycle_length_) + begin + end + end + begin + block_length_ = convert(Tensor{Int64}, block_length_) + begin + end + end + end + begin + begin + tf.add_input(desc, input_dataset_) + end + begin + tf.add_input(desc, other_arguments_) + end + begin + tf.add_input(desc, cycle_length_) + end + begin + tf.add_input(desc, block_length_) + end + end + begin + begin + if f !== nothing + desc["f"] = Base.identity(f) + end + end + begin + if Targuments !== nothing + desc["Targuments"] = map(Base.identity, Targuments) + end + end + begin + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + end + begin + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function interleave_dataset_eager(input_dataset_, other_arguments_, cycle_length_, block_length_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) + desc = tf.EagerOp("InterleaveDataset") + input_dataset_ = convert(tf.EagerTensor, input_dataset_) + other_arguments_ = convert(tf.EagerTensor, other_arguments_) + cycle_length_ = convert(tf.EagerTensor, cycle_length_) + block_length_ = convert(tf.EagerTensor, block_length_) + begin + begin + tf.add_input(desc, input_dataset_) + end + begin + tf.add_input(desc, other_arguments_) + end + begin + tf.add_input(desc, cycle_length_) + end + begin + tf.add_input(desc, block_length_) + end + end + begin + begin + if f !== nothing + desc["f"] = Base.identity(f) + end + end + begin + if Targuments !== nothing + desc["Targuments"] = map(Base.identity, Targuments) + end + end + begin + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + end + begin + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(interleave_dataset, [input_dataset_, other_arguments_, cycle_length_, block_length_], name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function interleave_dataset(input_dataset_, other_arguments_, cycle_length_, block_length_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) + if tf.in_eager_mode() + interleave_dataset_eager(input_dataset_, other_arguments_, cycle_length_, block_length_; name=name, f=f, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes) + else + interleave_dataset_graph(input_dataset_, other_arguments_, cycle_length_, block_length_; name=name, f=f, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function interleave_dataset_eager(input_dataset_, other_arguments_, cycle_length_, block_length_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) - desc = tf.EagerOp("InterleaveDataset") - input_dataset_ = convert(tf.EagerTensor, input_dataset_) - other_arguments_ = convert(tf.EagerTensor, other_arguments_) - cycle_length_ = convert(tf.EagerTensor, cycle_length_) - block_length_ = convert(tf.EagerTensor, block_length_) - tf.add_input(desc, input_dataset_) - tf.add_input(desc, other_arguments_) - tf.add_input(desc, cycle_length_) - tf.add_input(desc, block_length_) - if f !== nothing - desc["f"] = Base.identity(f) - end - if Targuments !== nothing - desc["Targuments"] = map(Base.identity, Targuments) - end - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - res = tf.execute(desc) - node = tf.TapeNode(interleave_dataset, [input_dataset_, other_arguments_, cycle_length_, block_length_], name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function interleave_dataset(input_dataset_, other_arguments_, cycle_length_, block_length_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) - if tf.in_eager_mode() - interleave_dataset_eager(input_dataset_, other_arguments_, cycle_length_, block_length_; name=name, f=f, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes) - else - interleave_dataset_graph(input_dataset_, other_arguments_, cycle_length_, block_length_; name=name, f=f, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes) - end - end end @@ -18478,39 +33517,67 @@ end """ begin - function stack_pop_graph(handle_; name=nothing, elem_type=nothing) - local desc - tf.with_op_name(name, "StackPop") do - desc = tf.NodeDescription("StackPop") - handle_ = convert(Tensor{String}, handle_) - tf.add_input(desc, handle_) - if elem_type !== nothing - desc["elem_type"] = Base.identity(elem_type) + begin + function stack_pop_graph(handle_; name=nothing, elem_type=nothing) + local desc + tf.with_op_name(name, "StackPop") do + desc = tf.NodeDescription("StackPop") + begin + begin + handle_ = convert(Tensor{String}, handle_) + begin + end + end + end + begin + begin + tf.add_input(desc, handle_) + end + end + begin + begin + if elem_type !== nothing + desc["elem_type"] = Base.identity(elem_type) + end + end + end end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function stack_pop_eager(handle_; name=nothing, elem_type=nothing) - desc = tf.EagerOp("StackPop") - handle_ = convert(tf.EagerTensor, handle_) - tf.add_input(desc, handle_) - if elem_type !== nothing - desc["elem_type"] = Base.identity(elem_type) - end - res = tf.execute(desc) - node = tf.TapeNode(stack_pop, [handle_], name=nothing, elem_type=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function stack_pop_eager(handle_; name=nothing, elem_type=nothing) + desc = tf.EagerOp("StackPop") + handle_ = convert(tf.EagerTensor, handle_) + begin + begin + tf.add_input(desc, handle_) + end + end + begin + begin + if elem_type !== nothing + desc["elem_type"] = Base.identity(elem_type) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(stack_pop, [handle_], name=nothing, elem_type=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function stack_pop(handle_; name=nothing, elem_type=nothing) - if tf.in_eager_mode() - stack_pop_eager(handle_; name=name, elem_type=elem_type) - else - stack_pop_graph(handle_; name=name, elem_type=elem_type) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function stack_pop(handle_; name=nothing, elem_type=nothing) + if tf.in_eager_mode() + stack_pop_eager(handle_; name=name, elem_type=elem_type) + else + stack_pop_graph(handle_; name=name, elem_type=elem_type) + end end - end + end end @@ -18520,41 +33587,81 @@ end """ begin - function boosted_trees_deserialize_ensemble_graph(tree_ensemble_handle_, stamp_token_, tree_ensemble_serialized_; name=nothing) - local desc - tf.with_op_name(name, "BoostedTreesDeserializeEnsemble") do - desc = tf.NodeDescription("BoostedTreesDeserializeEnsemble") - tree_ensemble_handle_ = convert(Tensor{Any}, tree_ensemble_handle_) - stamp_token_ = convert(Tensor{Int64}, stamp_token_) - tree_ensemble_serialized_ = convert(Tensor{String}, tree_ensemble_serialized_) - tf.add_input(desc, tree_ensemble_handle_) - tf.add_input(desc, stamp_token_) - tf.add_input(desc, tree_ensemble_serialized_) - end - tf.Tensor(tf.Operation(desc)) - end - function boosted_trees_deserialize_ensemble_eager(tree_ensemble_handle_, stamp_token_, tree_ensemble_serialized_; name=nothing) - desc = tf.EagerOp("BoostedTreesDeserializeEnsemble") - tree_ensemble_handle_ = convert(tf.EagerTensor, tree_ensemble_handle_) - stamp_token_ = convert(tf.EagerTensor, stamp_token_) - tree_ensemble_serialized_ = convert(tf.EagerTensor, tree_ensemble_serialized_) - tf.add_input(desc, tree_ensemble_handle_) - tf.add_input(desc, stamp_token_) - tf.add_input(desc, tree_ensemble_serialized_) - res = tf.execute(desc) - node = tf.TapeNode(boosted_trees_deserialize_ensemble, [tree_ensemble_handle_, stamp_token_, tree_ensemble_serialized_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function boosted_trees_deserialize_ensemble(tree_ensemble_handle_, stamp_token_, tree_ensemble_serialized_; name=nothing) - if tf.in_eager_mode() - boosted_trees_deserialize_ensemble_eager(tree_ensemble_handle_, stamp_token_, tree_ensemble_serialized_; name=name) - else - boosted_trees_deserialize_ensemble_graph(tree_ensemble_handle_, stamp_token_, tree_ensemble_serialized_; name=name) + begin + function boosted_trees_deserialize_ensemble_graph(tree_ensemble_handle_, stamp_token_, tree_ensemble_serialized_; name=nothing) + local desc + tf.with_op_name(name, "BoostedTreesDeserializeEnsemble") do + desc = tf.NodeDescription("BoostedTreesDeserializeEnsemble") + begin + begin + tree_ensemble_handle_ = convert(Tensor{Any}, tree_ensemble_handle_) + begin + end + end + begin + stamp_token_ = convert(Tensor{Int64}, stamp_token_) + begin + end + end + begin + tree_ensemble_serialized_ = convert(Tensor{String}, tree_ensemble_serialized_) + begin + end + end + end + begin + begin + tf.add_input(desc, tree_ensemble_handle_) + end + begin + tf.add_input(desc, stamp_token_) + end + begin + tf.add_input(desc, tree_ensemble_serialized_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function boosted_trees_deserialize_ensemble_eager(tree_ensemble_handle_, stamp_token_, tree_ensemble_serialized_; name=nothing) + desc = tf.EagerOp("BoostedTreesDeserializeEnsemble") + tree_ensemble_handle_ = convert(tf.EagerTensor, tree_ensemble_handle_) + stamp_token_ = convert(tf.EagerTensor, stamp_token_) + tree_ensemble_serialized_ = convert(tf.EagerTensor, tree_ensemble_serialized_) + begin + begin + tf.add_input(desc, tree_ensemble_handle_) + end + begin + tf.add_input(desc, stamp_token_) + end + begin + tf.add_input(desc, tree_ensemble_serialized_) + end + end + begin + end + res = tf.execute(desc) + node = tf.TapeNode(boosted_trees_deserialize_ensemble, [tree_ensemble_handle_, stamp_token_, tree_ensemble_serialized_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function boosted_trees_deserialize_ensemble(tree_ensemble_handle_, stamp_token_, tree_ensemble_serialized_; name=nothing) + if tf.in_eager_mode() + boosted_trees_deserialize_ensemble_eager(tree_ensemble_handle_, stamp_token_, tree_ensemble_serialized_; name=name) + else + boosted_trees_deserialize_ensemble_graph(tree_ensemble_handle_, stamp_token_, tree_ensemble_serialized_; name=name) + end end - end + end end @@ -18564,55 +33671,107 @@ end """ begin - function max_pool_v2_graph(input_, ksize_, strides_; name=nothing, padding=nothing, data_format=nothing) - local desc - tf.with_op_name(name, "MaxPoolV2") do - desc = tf.NodeDescription("MaxPoolV2") - input_ = convert(Tensor{Float32}, input_) - ksize_ = convert(Tensor{Int32}, ksize_) - strides_ = convert(Tensor{Int32}, strides_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - tf.add_input(desc, ksize_) - tf.add_input(desc, strides_) - if padding !== nothing - desc["padding"] = Base.String(padding) - end - if data_format !== nothing - desc["data_format"] = Base.String(data_format) + begin + function max_pool_v2_graph(input_, ksize_, strides_; name=nothing, padding=nothing, data_format=nothing) + local desc + tf.with_op_name(name, "MaxPoolV2") do + desc = tf.NodeDescription("MaxPoolV2") + begin + begin + input_ = convert(Tensor{Float32}, input_) + begin + end + end + begin + ksize_ = convert(Tensor{Int32}, ksize_) + begin + end + end + begin + strides_ = convert(Tensor{Int32}, strides_) + begin + end + end + begin + (input_,) = tf.tf_promote(input_) + end + end + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, ksize_) + end + begin + tf.add_input(desc, strides_) + end + end + begin + begin + if padding !== nothing + desc["padding"] = Base.String(padding) + end + end + begin + if data_format !== nothing + desc["data_format"] = Base.String(data_format) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function max_pool_v2_eager(input_, ksize_, strides_; name=nothing, padding=nothing, data_format=nothing) + desc = tf.EagerOp("MaxPoolV2") + input_ = convert(tf.EagerTensor, input_) + ksize_ = convert(tf.EagerTensor, ksize_) + strides_ = convert(tf.EagerTensor, strides_) + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, ksize_) + end + begin + tf.add_input(desc, strides_) + end + end + begin + begin + if padding !== nothing + desc["padding"] = Base.String(padding) + end + end + begin + if data_format !== nothing + desc["data_format"] = Base.String(data_format) + end + end + end + begin + desc["T"] = tf.data_type(input_) + end + res = tf.execute(desc) + node = tf.TapeNode(max_pool_v2, [input_, ksize_, strides_], name=nothing, padding=nothing, data_format=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function max_pool_v2(input_, ksize_, strides_; name=nothing, padding=nothing, data_format=nothing) + if tf.in_eager_mode() + max_pool_v2_eager(input_, ksize_, strides_; name=name, padding=padding, data_format=data_format) + else + max_pool_v2_graph(input_, ksize_, strides_; name=name, padding=padding, data_format=data_format) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function max_pool_v2_eager(input_, ksize_, strides_; name=nothing, padding=nothing, data_format=nothing) - desc = tf.EagerOp("MaxPoolV2") - input_ = convert(tf.EagerTensor, input_) - ksize_ = convert(tf.EagerTensor, ksize_) - strides_ = convert(tf.EagerTensor, strides_) - tf.add_input(desc, input_) - tf.add_input(desc, ksize_) - tf.add_input(desc, strides_) - if padding !== nothing - desc["padding"] = Base.String(padding) - end - if data_format !== nothing - desc["data_format"] = Base.String(data_format) - end - desc["T"] = tf.data_type(input_) - res = tf.execute(desc) - node = tf.TapeNode(max_pool_v2, [input_, ksize_, strides_], name=nothing, padding=nothing, data_format=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function max_pool_v2(input_, ksize_, strides_; name=nothing, padding=nothing, data_format=nothing) - if tf.in_eager_mode() - max_pool_v2_eager(input_, ksize_, strides_; name=name, padding=padding, data_format=data_format) - else - max_pool_v2_graph(input_, ksize_, strides_; name=name, padding=padding, data_format=data_format) - end - end end @@ -18622,67 +33781,135 @@ end """ begin - function load_and_remap_matrix_graph(ckpt_path_, old_tensor_name_, row_remapping_, col_remapping_, initializing_values_; name=nothing, num_rows=nothing, num_cols=nothing, max_rows_in_memory=nothing) - local desc - tf.with_op_name(name, "LoadAndRemapMatrix") do - desc = tf.NodeDescription("LoadAndRemapMatrix") - ckpt_path_ = convert(Tensor{String}, ckpt_path_) - old_tensor_name_ = convert(Tensor{String}, old_tensor_name_) - row_remapping_ = convert(Tensor{Int64}, row_remapping_) - col_remapping_ = convert(Tensor{Int64}, col_remapping_) - initializing_values_ = convert(Tensor{Float32}, initializing_values_) - tf.add_input(desc, ckpt_path_) - tf.add_input(desc, old_tensor_name_) - tf.add_input(desc, row_remapping_) - tf.add_input(desc, col_remapping_) - tf.add_input(desc, initializing_values_) - if num_rows !== nothing - desc["num_rows"] = Base.Int(num_rows) - end - if num_cols !== nothing - desc["num_cols"] = Base.Int(num_cols) + begin + function load_and_remap_matrix_graph(ckpt_path_, old_tensor_name_, row_remapping_, col_remapping_, initializing_values_; name=nothing, num_rows=nothing, num_cols=nothing, max_rows_in_memory=nothing) + local desc + tf.with_op_name(name, "LoadAndRemapMatrix") do + desc = tf.NodeDescription("LoadAndRemapMatrix") + begin + begin + ckpt_path_ = convert(Tensor{String}, ckpt_path_) + begin + end + end + begin + old_tensor_name_ = convert(Tensor{String}, old_tensor_name_) + begin + end + end + begin + row_remapping_ = convert(Tensor{Int64}, row_remapping_) + begin + end + end + begin + col_remapping_ = convert(Tensor{Int64}, col_remapping_) + begin + end + end + begin + initializing_values_ = convert(Tensor{Float32}, initializing_values_) + begin + end + end + end + begin + begin + tf.add_input(desc, ckpt_path_) + end + begin + tf.add_input(desc, old_tensor_name_) + end + begin + tf.add_input(desc, row_remapping_) + end + begin + tf.add_input(desc, col_remapping_) + end + begin + tf.add_input(desc, initializing_values_) + end + end + begin + begin + if num_rows !== nothing + desc["num_rows"] = Base.Int(num_rows) + end + end + begin + if num_cols !== nothing + desc["num_cols"] = Base.Int(num_cols) + end + end + begin + if max_rows_in_memory !== nothing + desc["max_rows_in_memory"] = Base.Int(max_rows_in_memory) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function load_and_remap_matrix_eager(ckpt_path_, old_tensor_name_, row_remapping_, col_remapping_, initializing_values_; name=nothing, num_rows=nothing, num_cols=nothing, max_rows_in_memory=nothing) + desc = tf.EagerOp("LoadAndRemapMatrix") + ckpt_path_ = convert(tf.EagerTensor, ckpt_path_) + old_tensor_name_ = convert(tf.EagerTensor, old_tensor_name_) + row_remapping_ = convert(tf.EagerTensor, row_remapping_) + col_remapping_ = convert(tf.EagerTensor, col_remapping_) + initializing_values_ = convert(tf.EagerTensor, initializing_values_) + begin + begin + tf.add_input(desc, ckpt_path_) + end + begin + tf.add_input(desc, old_tensor_name_) + end + begin + tf.add_input(desc, row_remapping_) + end + begin + tf.add_input(desc, col_remapping_) + end + begin + tf.add_input(desc, initializing_values_) + end + end + begin + begin + if num_rows !== nothing + desc["num_rows"] = Base.Int(num_rows) + end + end + begin + if num_cols !== nothing + desc["num_cols"] = Base.Int(num_cols) + end + end + begin + if max_rows_in_memory !== nothing + desc["max_rows_in_memory"] = Base.Int(max_rows_in_memory) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(load_and_remap_matrix, [ckpt_path_, old_tensor_name_, row_remapping_, col_remapping_, initializing_values_], name=nothing, num_rows=nothing, num_cols=nothing, max_rows_in_memory=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function load_and_remap_matrix(ckpt_path_, old_tensor_name_, row_remapping_, col_remapping_, initializing_values_; name=nothing, num_rows=nothing, num_cols=nothing, max_rows_in_memory=nothing) + if tf.in_eager_mode() + load_and_remap_matrix_eager(ckpt_path_, old_tensor_name_, row_remapping_, col_remapping_, initializing_values_; name=name, num_rows=num_rows, num_cols=num_cols, max_rows_in_memory=max_rows_in_memory) + else + load_and_remap_matrix_graph(ckpt_path_, old_tensor_name_, row_remapping_, col_remapping_, initializing_values_; name=name, num_rows=num_rows, num_cols=num_cols, max_rows_in_memory=max_rows_in_memory) + end end - if max_rows_in_memory !== nothing - desc["max_rows_in_memory"] = Base.Int(max_rows_in_memory) - end - end - tf.Tensor(tf.Operation(desc)) - end - function load_and_remap_matrix_eager(ckpt_path_, old_tensor_name_, row_remapping_, col_remapping_, initializing_values_; name=nothing, num_rows=nothing, num_cols=nothing, max_rows_in_memory=nothing) - desc = tf.EagerOp("LoadAndRemapMatrix") - ckpt_path_ = convert(tf.EagerTensor, ckpt_path_) - old_tensor_name_ = convert(tf.EagerTensor, old_tensor_name_) - row_remapping_ = convert(tf.EagerTensor, row_remapping_) - col_remapping_ = convert(tf.EagerTensor, col_remapping_) - initializing_values_ = convert(tf.EagerTensor, initializing_values_) - tf.add_input(desc, ckpt_path_) - tf.add_input(desc, old_tensor_name_) - tf.add_input(desc, row_remapping_) - tf.add_input(desc, col_remapping_) - tf.add_input(desc, initializing_values_) - if num_rows !== nothing - desc["num_rows"] = Base.Int(num_rows) - end - if num_cols !== nothing - desc["num_cols"] = Base.Int(num_cols) - end - if max_rows_in_memory !== nothing - desc["max_rows_in_memory"] = Base.Int(max_rows_in_memory) - end - res = tf.execute(desc) - node = tf.TapeNode(load_and_remap_matrix, [ckpt_path_, old_tensor_name_, row_remapping_, col_remapping_, initializing_values_], name=nothing, num_rows=nothing, num_cols=nothing, max_rows_in_memory=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function load_and_remap_matrix(ckpt_path_, old_tensor_name_, row_remapping_, col_remapping_, initializing_values_; name=nothing, num_rows=nothing, num_cols=nothing, max_rows_in_memory=nothing) - if tf.in_eager_mode() - load_and_remap_matrix_eager(ckpt_path_, old_tensor_name_, row_remapping_, col_remapping_, initializing_values_; name=name, num_rows=num_rows, num_cols=num_cols, max_rows_in_memory=max_rows_in_memory) - else - load_and_remap_matrix_graph(ckpt_path_, old_tensor_name_, row_remapping_, col_remapping_, initializing_values_; name=name, num_rows=num_rows, num_cols=num_cols, max_rows_in_memory=max_rows_in_memory) - end - end end @@ -18692,68 +33919,152 @@ end """ begin - function sparse_apply_proximal_gradient_descent_graph(var_, alpha_, l1_, l2_, grad_, indices_; name=nothing, use_locking=nothing) - local desc - tf.with_op_name(name, "SparseApplyProximalGradientDescent") do - desc = tf.NodeDescription("SparseApplyProximalGradientDescent") - var_ = convert(Tensor{Any}, var_) - alpha_ = convert(Tensor{Any}, alpha_) - l1_ = convert(Tensor{Any}, l1_) - l2_ = convert(Tensor{Any}, l2_) - grad_ = convert(Tensor{Any}, grad_) - indices_ = convert(Tensor{Any}, indices_) - indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) - (var_, alpha_, l1_, l2_, grad_) = tf.tf_promote(var_, alpha_, l1_, l2_, grad_) - (indices_,) = tf.tf_promote(indices_) - tf.add_input(desc, var_) - tf.add_input(desc, alpha_) - tf.add_input(desc, l1_) - tf.add_input(desc, l2_) - tf.add_input(desc, grad_) - tf.add_input(desc, indices_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - end - tf.Tensor(tf.Operation(desc)) - end - function sparse_apply_proximal_gradient_descent_eager(var_, alpha_, l1_, l2_, grad_, indices_; name=nothing, use_locking=nothing) - desc = tf.EagerOp("SparseApplyProximalGradientDescent") - var_ = convert(tf.EagerTensor, var_) - alpha_ = convert(tf.EagerTensor, alpha_) - l1_ = convert(tf.EagerTensor, l1_) - l2_ = convert(tf.EagerTensor, l2_) - grad_ = convert(tf.EagerTensor, grad_) - indices_ = convert(tf.EagerTensor, indices_) - tf.add_input(desc, var_) - tf.add_input(desc, alpha_) - tf.add_input(desc, l1_) - tf.add_input(desc, l2_) - tf.add_input(desc, grad_) - tf.add_input(desc, indices_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - desc["T"] = tf.data_type(var_) - desc["T"] = tf.data_type(alpha_) - desc["T"] = tf.data_type(l1_) - desc["T"] = tf.data_type(l2_) - desc["T"] = tf.data_type(grad_) - desc["Tindices"] = tf.data_type(indices_) - res = tf.execute(desc) - node = tf.TapeNode(sparse_apply_proximal_gradient_descent, [var_, alpha_, l1_, l2_, grad_, indices_], name=nothing, use_locking=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_apply_proximal_gradient_descent(var_, alpha_, l1_, l2_, grad_, indices_; name=nothing, use_locking=nothing) - if tf.in_eager_mode() - sparse_apply_proximal_gradient_descent_eager(var_, alpha_, l1_, l2_, grad_, indices_; name=name, use_locking=use_locking) - else - sparse_apply_proximal_gradient_descent_graph(var_, alpha_, l1_, l2_, grad_, indices_; name=name, use_locking=use_locking) + begin + function sparse_apply_proximal_gradient_descent_graph(var_, alpha_, l1_, l2_, grad_, indices_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "SparseApplyProximalGradientDescent") do + desc = tf.NodeDescription("SparseApplyProximalGradientDescent") + begin + begin + var_ = convert(Tensor{Any}, var_) + begin + end + end + begin + alpha_ = convert(Tensor{Any}, alpha_) + begin + end + end + begin + l1_ = convert(Tensor{Any}, l1_) + begin + end + end + begin + l2_ = convert(Tensor{Any}, l2_) + begin + end + end + begin + grad_ = convert(Tensor{Any}, grad_) + begin + end + end + begin + indices_ = convert(Tensor{Any}, indices_) + begin + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + end + end + begin + (var_, alpha_, l1_, l2_, grad_) = tf.tf_promote(var_, alpha_, l1_, l2_, grad_) + end + begin + (indices_,) = tf.tf_promote(indices_) + end + end + begin + begin + tf.add_input(desc, var_) + end + begin + tf.add_input(desc, alpha_) + end + begin + tf.add_input(desc, l1_) + end + begin + tf.add_input(desc, l2_) + end + begin + tf.add_input(desc, grad_) + end + begin + tf.add_input(desc, indices_) + end + end + begin + begin + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function sparse_apply_proximal_gradient_descent_eager(var_, alpha_, l1_, l2_, grad_, indices_; name=nothing, use_locking=nothing) + desc = tf.EagerOp("SparseApplyProximalGradientDescent") + var_ = convert(tf.EagerTensor, var_) + alpha_ = convert(tf.EagerTensor, alpha_) + l1_ = convert(tf.EagerTensor, l1_) + l2_ = convert(tf.EagerTensor, l2_) + grad_ = convert(tf.EagerTensor, grad_) + indices_ = convert(tf.EagerTensor, indices_) + begin + begin + tf.add_input(desc, var_) + end + begin + tf.add_input(desc, alpha_) + end + begin + tf.add_input(desc, l1_) + end + begin + tf.add_input(desc, l2_) + end + begin + tf.add_input(desc, grad_) + end + begin + tf.add_input(desc, indices_) + end + end + begin + begin + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + end + begin + desc["T"] = tf.data_type(var_) + end + begin + desc["T"] = tf.data_type(alpha_) + end + begin + desc["T"] = tf.data_type(l1_) + end + begin + desc["T"] = tf.data_type(l2_) + end + begin + desc["T"] = tf.data_type(grad_) + end + begin + desc["Tindices"] = tf.data_type(indices_) + end + res = tf.execute(desc) + node = tf.TapeNode(sparse_apply_proximal_gradient_descent, [var_, alpha_, l1_, l2_, grad_, indices_], name=nothing, use_locking=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_apply_proximal_gradient_descent(var_, alpha_, l1_, l2_, grad_, indices_; name=nothing, use_locking=nothing) + if tf.in_eager_mode() + sparse_apply_proximal_gradient_descent_eager(var_, alpha_, l1_, l2_, grad_, indices_; name=name, use_locking=use_locking) + else + sparse_apply_proximal_gradient_descent_graph(var_, alpha_, l1_, l2_, grad_, indices_; name=name, use_locking=use_locking) + end end - end + end end @@ -18763,51 +34074,87 @@ end """ begin - function py_func_stateless_graph(input_; name=nothing, token=nothing, Tin=nothing, Tout=nothing) - local desc - tf.with_op_name(name, "PyFuncStateless") do - desc = tf.NodeDescription("PyFuncStateless") - input_ = [convert(Tensor{Any}, x) for x = input_] - tf.add_input(desc, input_) - if token !== nothing - desc["token"] = Base.String(token) - end - if Tin !== nothing - desc["Tin"] = map(Base.identity, Tin) + begin + function py_func_stateless_graph(input_; name=nothing, token=nothing, Tin=nothing, Tout=nothing) + local desc + tf.with_op_name(name, "PyFuncStateless") do + desc = tf.NodeDescription("PyFuncStateless") + begin + begin + input_ = [convert(Tensor{Any}, x) for x = input_] + begin + end + end + end + begin + begin + tf.add_input(desc, input_) + end + end + begin + begin + if token !== nothing + desc["token"] = Base.String(token) + end + end + begin + if Tin !== nothing + desc["Tin"] = map(Base.identity, Tin) + end + end + begin + if Tout !== nothing + desc["Tout"] = map(Base.identity, Tout) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function py_func_stateless_eager(input_; name=nothing, token=nothing, Tin=nothing, Tout=nothing) + desc = tf.EagerOp("PyFuncStateless") + input_ = convert(tf.EagerTensor, input_) + begin + begin + tf.add_input(desc, input_) + end + end + begin + begin + if token !== nothing + desc["token"] = Base.String(token) + end + end + begin + if Tin !== nothing + desc["Tin"] = map(Base.identity, Tin) + end + end + begin + if Tout !== nothing + desc["Tout"] = map(Base.identity, Tout) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(py_func_stateless, [input_], name=nothing, token=nothing, Tin=nothing, Tout=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function py_func_stateless(input_; name=nothing, token=nothing, Tin=nothing, Tout=nothing) + if tf.in_eager_mode() + py_func_stateless_eager(input_; name=name, token=token, Tin=Tin, Tout=Tout) + else + py_func_stateless_graph(input_; name=name, token=token, Tin=Tin, Tout=Tout) + end end - if Tout !== nothing - desc["Tout"] = map(Base.identity, Tout) - end - end - tf.Tensor(tf.Operation(desc)) end - function py_func_stateless_eager(input_; name=nothing, token=nothing, Tin=nothing, Tout=nothing) - desc = tf.EagerOp("PyFuncStateless") - input_ = convert(tf.EagerTensor, input_) - tf.add_input(desc, input_) - if token !== nothing - desc["token"] = Base.String(token) - end - if Tin !== nothing - desc["Tin"] = map(Base.identity, Tin) - end - if Tout !== nothing - desc["Tout"] = map(Base.identity, Tout) - end - res = tf.execute(desc) - node = tf.TapeNode(py_func_stateless, [input_], name=nothing, token=nothing, Tin=nothing, Tout=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function py_func_stateless(input_; name=nothing, token=nothing, Tin=nothing, Tout=nothing) - if tf.in_eager_mode() - py_func_stateless_eager(input_; name=name, token=token, Tin=Tin, Tout=Tout) - else - py_func_stateless_graph(input_; name=name, token=token, Tin=Tin, Tout=Tout) - end - end end @@ -18817,35 +34164,63 @@ end """ begin - function where_graph(input_; name=nothing) - local desc - tf.with_op_name(name, "Where") do - desc = tf.NodeDescription("Where") - input_ = convert(Tensor{Bool}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) + begin + function where_graph(input_; name=nothing) + local desc + tf.with_op_name(name, "Where") do + desc = tf.NodeDescription("Where") + begin + begin + input_ = convert(Tensor{Bool}, input_) + begin + end + end + begin + (input_,) = tf.tf_promote(input_) + end + end + begin + begin + tf.add_input(desc, input_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function where_eager(input_; name=nothing) - desc = tf.EagerOp("Where") - input_ = convert(tf.EagerTensor, input_) - tf.add_input(desc, input_) - desc["T"] = tf.data_type(input_) - res = tf.execute(desc) - node = tf.TapeNode(where, [input_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function where_eager(input_; name=nothing) + desc = tf.EagerOp("Where") + input_ = convert(tf.EagerTensor, input_) + begin + begin + tf.add_input(desc, input_) + end + end + begin + end + begin + desc["T"] = tf.data_type(input_) + end + res = tf.execute(desc) + node = tf.TapeNode(where, [input_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function where(input_; name=nothing) - if tf.in_eager_mode() - where_eager(input_; name=name) - else - where_graph(input_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function where(input_; name=nothing) + if tf.in_eager_mode() + where_eager(input_; name=name) + else + where_graph(input_; name=name) + end end - end + end end @@ -18855,61 +34230,109 @@ end """ begin - function mfcc_graph(spectrogram_, sample_rate_; name=nothing, upper_frequency_limit=nothing, lower_frequency_limit=nothing, filterbank_channel_count=nothing, dct_coefficient_count=nothing) - local desc - tf.with_op_name(name, "Mfcc") do - desc = tf.NodeDescription("Mfcc") - spectrogram_ = convert(Tensor{Float32}, spectrogram_) - sample_rate_ = convert(Tensor{Int32}, sample_rate_) - tf.add_input(desc, spectrogram_) - tf.add_input(desc, sample_rate_) - if upper_frequency_limit !== nothing - desc["upper_frequency_limit"] = Base.identity(upper_frequency_limit) - end - if lower_frequency_limit !== nothing - desc["lower_frequency_limit"] = Base.identity(lower_frequency_limit) - end - if filterbank_channel_count !== nothing - desc["filterbank_channel_count"] = Base.Int(filterbank_channel_count) + begin + function mfcc_graph(spectrogram_, sample_rate_; name=nothing, upper_frequency_limit=nothing, lower_frequency_limit=nothing, filterbank_channel_count=nothing, dct_coefficient_count=nothing) + local desc + tf.with_op_name(name, "Mfcc") do + desc = tf.NodeDescription("Mfcc") + begin + begin + spectrogram_ = convert(Tensor{Float32}, spectrogram_) + begin + end + end + begin + sample_rate_ = convert(Tensor{Int32}, sample_rate_) + begin + end + end + end + begin + begin + tf.add_input(desc, spectrogram_) + end + begin + tf.add_input(desc, sample_rate_) + end + end + begin + begin + if upper_frequency_limit !== nothing + desc["upper_frequency_limit"] = Base.identity(upper_frequency_limit) + end + end + begin + if lower_frequency_limit !== nothing + desc["lower_frequency_limit"] = Base.identity(lower_frequency_limit) + end + end + begin + if filterbank_channel_count !== nothing + desc["filterbank_channel_count"] = Base.Int(filterbank_channel_count) + end + end + begin + if dct_coefficient_count !== nothing + desc["dct_coefficient_count"] = Base.Int(dct_coefficient_count) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function mfcc_eager(spectrogram_, sample_rate_; name=nothing, upper_frequency_limit=nothing, lower_frequency_limit=nothing, filterbank_channel_count=nothing, dct_coefficient_count=nothing) + desc = tf.EagerOp("Mfcc") + spectrogram_ = convert(tf.EagerTensor, spectrogram_) + sample_rate_ = convert(tf.EagerTensor, sample_rate_) + begin + begin + tf.add_input(desc, spectrogram_) + end + begin + tf.add_input(desc, sample_rate_) + end + end + begin + begin + if upper_frequency_limit !== nothing + desc["upper_frequency_limit"] = Base.identity(upper_frequency_limit) + end + end + begin + if lower_frequency_limit !== nothing + desc["lower_frequency_limit"] = Base.identity(lower_frequency_limit) + end + end + begin + if filterbank_channel_count !== nothing + desc["filterbank_channel_count"] = Base.Int(filterbank_channel_count) + end + end + begin + if dct_coefficient_count !== nothing + desc["dct_coefficient_count"] = Base.Int(dct_coefficient_count) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(mfcc, [spectrogram_, sample_rate_], name=nothing, upper_frequency_limit=nothing, lower_frequency_limit=nothing, filterbank_channel_count=nothing, dct_coefficient_count=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function mfcc(spectrogram_, sample_rate_; name=nothing, upper_frequency_limit=nothing, lower_frequency_limit=nothing, filterbank_channel_count=nothing, dct_coefficient_count=nothing) + if tf.in_eager_mode() + mfcc_eager(spectrogram_, sample_rate_; name=name, upper_frequency_limit=upper_frequency_limit, lower_frequency_limit=lower_frequency_limit, filterbank_channel_count=filterbank_channel_count, dct_coefficient_count=dct_coefficient_count) + else + mfcc_graph(spectrogram_, sample_rate_; name=name, upper_frequency_limit=upper_frequency_limit, lower_frequency_limit=lower_frequency_limit, filterbank_channel_count=filterbank_channel_count, dct_coefficient_count=dct_coefficient_count) + end end - if dct_coefficient_count !== nothing - desc["dct_coefficient_count"] = Base.Int(dct_coefficient_count) - end - end - tf.Tensor(tf.Operation(desc)) - end - function mfcc_eager(spectrogram_, sample_rate_; name=nothing, upper_frequency_limit=nothing, lower_frequency_limit=nothing, filterbank_channel_count=nothing, dct_coefficient_count=nothing) - desc = tf.EagerOp("Mfcc") - spectrogram_ = convert(tf.EagerTensor, spectrogram_) - sample_rate_ = convert(tf.EagerTensor, sample_rate_) - tf.add_input(desc, spectrogram_) - tf.add_input(desc, sample_rate_) - if upper_frequency_limit !== nothing - desc["upper_frequency_limit"] = Base.identity(upper_frequency_limit) - end - if lower_frequency_limit !== nothing - desc["lower_frequency_limit"] = Base.identity(lower_frequency_limit) - end - if filterbank_channel_count !== nothing - desc["filterbank_channel_count"] = Base.Int(filterbank_channel_count) - end - if dct_coefficient_count !== nothing - desc["dct_coefficient_count"] = Base.Int(dct_coefficient_count) - end - res = tf.execute(desc) - node = tf.TapeNode(mfcc, [spectrogram_, sample_rate_], name=nothing, upper_frequency_limit=nothing, lower_frequency_limit=nothing, filterbank_channel_count=nothing, dct_coefficient_count=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function mfcc(spectrogram_, sample_rate_; name=nothing, upper_frequency_limit=nothing, lower_frequency_limit=nothing, filterbank_channel_count=nothing, dct_coefficient_count=nothing) - if tf.in_eager_mode() - mfcc_eager(spectrogram_, sample_rate_; name=name, upper_frequency_limit=upper_frequency_limit, lower_frequency_limit=lower_frequency_limit, filterbank_channel_count=filterbank_channel_count, dct_coefficient_count=dct_coefficient_count) - else - mfcc_graph(spectrogram_, sample_rate_; name=name, upper_frequency_limit=upper_frequency_limit, lower_frequency_limit=lower_frequency_limit, filterbank_channel_count=filterbank_channel_count, dct_coefficient_count=dct_coefficient_count) - end - end end @@ -18919,41 +34342,73 @@ end """ begin - function check_numerics_graph(tensor_; name=nothing, message=nothing) - local desc - tf.with_op_name(name, "CheckNumerics") do - desc = tf.NodeDescription("CheckNumerics") - tensor_ = convert(Tensor{Any}, tensor_) - (tensor_,) = tf.tf_promote(tensor_) - tf.add_input(desc, tensor_) - if message !== nothing - desc["message"] = Base.String(message) + begin + function check_numerics_graph(tensor_; name=nothing, message=nothing) + local desc + tf.with_op_name(name, "CheckNumerics") do + desc = tf.NodeDescription("CheckNumerics") + begin + begin + tensor_ = convert(Tensor{Any}, tensor_) + begin + end + end + begin + (tensor_,) = tf.tf_promote(tensor_) + end + end + begin + begin + tf.add_input(desc, tensor_) + end + end + begin + begin + if message !== nothing + desc["message"] = Base.String(message) + end + end + end end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function check_numerics_eager(tensor_; name=nothing, message=nothing) - desc = tf.EagerOp("CheckNumerics") - tensor_ = convert(tf.EagerTensor, tensor_) - tf.add_input(desc, tensor_) - if message !== nothing - desc["message"] = Base.String(message) - end - desc["T"] = tf.data_type(tensor_) - res = tf.execute(desc) - node = tf.TapeNode(check_numerics, [tensor_], name=nothing, message=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function check_numerics_eager(tensor_; name=nothing, message=nothing) + desc = tf.EagerOp("CheckNumerics") + tensor_ = convert(tf.EagerTensor, tensor_) + begin + begin + tf.add_input(desc, tensor_) + end + end + begin + begin + if message !== nothing + desc["message"] = Base.String(message) + end + end + end + begin + desc["T"] = tf.data_type(tensor_) + end + res = tf.execute(desc) + node = tf.TapeNode(check_numerics, [tensor_], name=nothing, message=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function check_numerics(tensor_; name=nothing, message=nothing) - if tf.in_eager_mode() - check_numerics_eager(tensor_; name=name, message=message) - else - check_numerics_graph(tensor_; name=name, message=message) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function check_numerics(tensor_; name=nothing, message=nothing) + if tf.in_eager_mode() + check_numerics_eager(tensor_; name=name, message=message) + else + check_numerics_graph(tensor_; name=name, message=message) + end end - end + end end @@ -18963,30 +34418,45 @@ end """ begin - function tpu_compilation_result_graph(; name=nothing) - local desc - tf.with_op_name(name, "TPUCompilationResult") do - desc - tf.NodeDescription("TPUCompilationResult") + begin + function tpu_compilation_result_graph(; name=nothing) + local desc + tf.with_op_name(name, "TPUCompilationResult") do + desc = tf.NodeDescription("TPUCompilationResult") + begin + end + begin + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function tpu_compilation_result_eager(; name=nothing) - desc = tf.EagerOp("TPUCompilationResult") - res = tf.execute(desc) - node = tf.TapeNode(tpu_compilation_result, [], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function tpu_compilation_result_eager(; name=nothing) + desc = tf.EagerOp("TPUCompilationResult") + begin + end + begin + end + res = tf.execute(desc) + node = tf.TapeNode(tpu_compilation_result, [], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tpu_compilation_result(; name=nothing) - if tf.in_eager_mode() - tpu_compilation_result_eager(; name=name) - else - tpu_compilation_result_graph(; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tpu_compilation_result(; name=nothing) + if tf.in_eager_mode() + tpu_compilation_result_eager(; name=name) + else + tpu_compilation_result_graph(; name=name) + end end - end + end end @@ -18996,53 +34466,85 @@ end Retrieve embedding parameters for a single table. """ begin - function retrieve_tpu_embedding_stochastic_gradient_descent_parameters_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - local desc - tf.with_op_name(name, "RetrieveTPUEmbeddingStochasticGradientDescentParameters") do - desc = tf.NodeDescription("RetrieveTPUEmbeddingStochasticGradientDescentParameters") - if table_id !== nothing - desc["table_id"] = Base.Int(table_id) - end - if table_name !== nothing - desc["table_name"] = Base.String(table_name) + begin + function retrieve_tpu_embedding_stochastic_gradient_descent_parameters_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + local desc + tf.with_op_name(name, "RetrieveTPUEmbeddingStochasticGradientDescentParameters") do + desc = tf.NodeDescription("RetrieveTPUEmbeddingStochasticGradientDescentParameters") + begin + end + begin + end + begin + begin + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + end + begin + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + end + begin + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + end + begin + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function retrieve_tpu_embedding_stochastic_gradient_descent_parameters_eager(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + desc = tf.EagerOp("RetrieveTPUEmbeddingStochasticGradientDescentParameters") + begin + end + begin + begin + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + end + begin + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + end + begin + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + end + begin + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(retrieve_tpu_embedding_stochastic_gradient_descent_parameters, [], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function retrieve_tpu_embedding_stochastic_gradient_descent_parameters(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + if tf.in_eager_mode() + retrieve_tpu_embedding_stochastic_gradient_descent_parameters_eager(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + else + retrieve_tpu_embedding_stochastic_gradient_descent_parameters_graph(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + end end - if num_shards !== nothing - desc["num_shards"] = Base.Int(num_shards) - end - if shard_id !== nothing - desc["shard_id"] = Base.Int(shard_id) - end - end - tf.Tensor(tf.Operation(desc)) - end - function retrieve_tpu_embedding_stochastic_gradient_descent_parameters_eager(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - desc = tf.EagerOp("RetrieveTPUEmbeddingStochasticGradientDescentParameters") - if table_id !== nothing - desc["table_id"] = Base.Int(table_id) - end - if table_name !== nothing - desc["table_name"] = Base.String(table_name) - end - if num_shards !== nothing - desc["num_shards"] = Base.Int(num_shards) - end - if shard_id !== nothing - desc["shard_id"] = Base.Int(shard_id) - end - res = tf.execute(desc) - node = tf.TapeNode(retrieve_tpu_embedding_stochastic_gradient_descent_parameters, [], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function retrieve_tpu_embedding_stochastic_gradient_descent_parameters(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - if tf.in_eager_mode() - retrieve_tpu_embedding_stochastic_gradient_descent_parameters_eager(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) - else - retrieve_tpu_embedding_stochastic_gradient_descent_parameters_graph(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) - end - end end @@ -19052,50 +34554,106 @@ end """ begin - function sparse_segment_mean_grad_graph(grad_, indices_, segment_ids_, output_dim0_; name=nothing) - local desc - tf.with_op_name(name, "SparseSegmentMeanGrad") do - desc = tf.NodeDescription("SparseSegmentMeanGrad") - grad_ = convert(Tensor{Any}, grad_) - indices_ = convert(Tensor{Int32}, indices_) - indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) - segment_ids_ = convert(Tensor{Int32}, segment_ids_) - output_dim0_ = convert(Tensor{Int32}, output_dim0_) - (grad_,) = tf.tf_promote(grad_) - (indices_,) = tf.tf_promote(indices_) - tf.add_input(desc, grad_) - tf.add_input(desc, indices_) - tf.add_input(desc, segment_ids_) - tf.add_input(desc, output_dim0_) - end - tf.Tensor(tf.Operation(desc)) - end - function sparse_segment_mean_grad_eager(grad_, indices_, segment_ids_, output_dim0_; name=nothing) - desc = tf.EagerOp("SparseSegmentMeanGrad") - grad_ = convert(tf.EagerTensor, grad_) - indices_ = convert(tf.EagerTensor, indices_) - segment_ids_ = convert(tf.EagerTensor, segment_ids_) - output_dim0_ = convert(tf.EagerTensor, output_dim0_) - tf.add_input(desc, grad_) - tf.add_input(desc, indices_) - tf.add_input(desc, segment_ids_) - tf.add_input(desc, output_dim0_) - desc["T"] = tf.data_type(grad_) - desc["Tidx"] = tf.data_type(indices_) - res = tf.execute(desc) - node = tf.TapeNode(sparse_segment_mean_grad, [grad_, indices_, segment_ids_, output_dim0_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_segment_mean_grad(grad_, indices_, segment_ids_, output_dim0_; name=nothing) - if tf.in_eager_mode() - sparse_segment_mean_grad_eager(grad_, indices_, segment_ids_, output_dim0_; name=name) - else - sparse_segment_mean_grad_graph(grad_, indices_, segment_ids_, output_dim0_; name=name) + begin + function sparse_segment_mean_grad_graph(grad_, indices_, segment_ids_, output_dim0_; name=nothing) + local desc + tf.with_op_name(name, "SparseSegmentMeanGrad") do + desc = tf.NodeDescription("SparseSegmentMeanGrad") + begin + begin + grad_ = convert(Tensor{Any}, grad_) + begin + end + end + begin + indices_ = convert(Tensor{Int32}, indices_) + begin + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + end + end + begin + segment_ids_ = convert(Tensor{Int32}, segment_ids_) + begin + end + end + begin + output_dim0_ = convert(Tensor{Int32}, output_dim0_) + begin + end + end + begin + (grad_,) = tf.tf_promote(grad_) + end + begin + (indices_,) = tf.tf_promote(indices_) + end + end + begin + begin + tf.add_input(desc, grad_) + end + begin + tf.add_input(desc, indices_) + end + begin + tf.add_input(desc, segment_ids_) + end + begin + tf.add_input(desc, output_dim0_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function sparse_segment_mean_grad_eager(grad_, indices_, segment_ids_, output_dim0_; name=nothing) + desc = tf.EagerOp("SparseSegmentMeanGrad") + grad_ = convert(tf.EagerTensor, grad_) + indices_ = convert(tf.EagerTensor, indices_) + segment_ids_ = convert(tf.EagerTensor, segment_ids_) + output_dim0_ = convert(tf.EagerTensor, output_dim0_) + begin + begin + tf.add_input(desc, grad_) + end + begin + tf.add_input(desc, indices_) + end + begin + tf.add_input(desc, segment_ids_) + end + begin + tf.add_input(desc, output_dim0_) + end + end + begin + end + begin + desc["T"] = tf.data_type(grad_) + end + begin + desc["Tidx"] = tf.data_type(indices_) + end + res = tf.execute(desc) + node = tf.TapeNode(sparse_segment_mean_grad, [grad_, indices_, segment_ids_, output_dim0_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_segment_mean_grad(grad_, indices_, segment_ids_, output_dim0_; name=nothing) + if tf.in_eager_mode() + sparse_segment_mean_grad_eager(grad_, indices_, segment_ids_, output_dim0_; name=name) + else + sparse_segment_mean_grad_graph(grad_, indices_, segment_ids_, output_dim0_; name=name) + end end - end + end end @@ -19105,64 +34663,118 @@ end """ begin - function try_rpc_graph(address_, method_, request_; name=nothing, protocol=nothing, fail_fast=nothing, timeout_in_ms=nothing) - local desc - tf.with_op_name(name, "TryRpc") do - desc = tf.NodeDescription("TryRpc") - address_ = convert(Tensor{String}, address_) - method_ = convert(Tensor{String}, method_) - request_ = convert(Tensor{String}, request_) - tf.add_input(desc, address_) - tf.add_input(desc, method_) - tf.add_input(desc, request_) - if protocol !== nothing - desc["protocol"] = Base.String(protocol) - end - if fail_fast !== nothing - desc["fail_fast"] = Base.Bool(fail_fast) + begin + function try_rpc_graph(address_, method_, request_; name=nothing, protocol=nothing, fail_fast=nothing, timeout_in_ms=nothing) + local desc + tf.with_op_name(name, "TryRpc") do + desc = tf.NodeDescription("TryRpc") + begin + begin + address_ = convert(Tensor{String}, address_) + begin + end + end + begin + method_ = convert(Tensor{String}, method_) + begin + end + end + begin + request_ = convert(Tensor{String}, request_) + begin + end + end + end + begin + begin + tf.add_input(desc, address_) + end + begin + tf.add_input(desc, method_) + end + begin + tf.add_input(desc, request_) + end + end + begin + begin + if protocol !== nothing + desc["protocol"] = Base.String(protocol) + end + end + begin + if fail_fast !== nothing + desc["fail_fast"] = Base.Bool(fail_fast) + end + end + begin + if timeout_in_ms !== nothing + desc["timeout_in_ms"] = Base.Int(timeout_in_ms) + end + end + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + end + end + begin + function try_rpc_eager(address_, method_, request_; name=nothing, protocol=nothing, fail_fast=nothing, timeout_in_ms=nothing) + desc = tf.EagerOp("TryRpc") + address_ = convert(tf.EagerTensor, address_) + method_ = convert(tf.EagerTensor, method_) + request_ = convert(tf.EagerTensor, request_) + begin + begin + tf.add_input(desc, address_) + end + begin + tf.add_input(desc, method_) + end + begin + tf.add_input(desc, request_) + end + end + begin + begin + if protocol !== nothing + desc["protocol"] = Base.String(protocol) + end + end + begin + if fail_fast !== nothing + desc["fail_fast"] = Base.Bool(fail_fast) + end + end + begin + if timeout_in_ms !== nothing + desc["timeout_in_ms"] = Base.Int(timeout_in_ms) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(try_rpc, [address_, method_, request_], name=nothing, protocol=nothing, fail_fast=nothing, timeout_in_ms=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function try_rpc(address_, method_, request_; name=nothing, protocol=nothing, fail_fast=nothing, timeout_in_ms=nothing) + if tf.in_eager_mode() + try_rpc_eager(address_, method_, request_; name=name, protocol=protocol, fail_fast=fail_fast, timeout_in_ms=timeout_in_ms) + else + try_rpc_graph(address_, method_, request_; name=name, protocol=protocol, fail_fast=fail_fast, timeout_in_ms=timeout_in_ms) + end end - if timeout_in_ms !== nothing - desc["timeout_in_ms"] = Base.Int(timeout_in_ms) - end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:3 - push!(out, tf.Tensor(op, out_idx)) - end - out - end - function try_rpc_eager(address_, method_, request_; name=nothing, protocol=nothing, fail_fast=nothing, timeout_in_ms=nothing) - desc = tf.EagerOp("TryRpc") - address_ = convert(tf.EagerTensor, address_) - method_ = convert(tf.EagerTensor, method_) - request_ = convert(tf.EagerTensor, request_) - tf.add_input(desc, address_) - tf.add_input(desc, method_) - tf.add_input(desc, request_) - if protocol !== nothing - desc["protocol"] = Base.String(protocol) - end - if fail_fast !== nothing - desc["fail_fast"] = Base.Bool(fail_fast) - end - if timeout_in_ms !== nothing - desc["timeout_in_ms"] = Base.Int(timeout_in_ms) - end - res = tf.execute(desc) - node = tf.TapeNode(try_rpc, [address_, method_, request_], name=nothing, protocol=nothing, fail_fast=nothing, timeout_in_ms=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function try_rpc(address_, method_, request_; name=nothing, protocol=nothing, fail_fast=nothing, timeout_in_ms=nothing) - if tf.in_eager_mode() - try_rpc_eager(address_, method_, request_; name=name, protocol=protocol, fail_fast=fail_fast, timeout_in_ms=timeout_in_ms) - else - try_rpc_graph(address_, method_, request_; name=name, protocol=protocol, fail_fast=fail_fast, timeout_in_ms=timeout_in_ms) - end - end end @@ -19172,52 +34784,98 @@ end """ begin - function batch_matrix_triangular_solve_graph(matrix_, rhs_; name=nothing, lower=nothing, adjoint=nothing) - local desc - tf.with_op_name(name, "BatchMatrixTriangularSolve") do - desc = tf.NodeDescription("BatchMatrixTriangularSolve") - matrix_ = convert(Tensor{Any}, matrix_) - rhs_ = convert(Tensor{Any}, rhs_) - (matrix_, rhs_) = tf.tf_promote(matrix_, rhs_) - tf.add_input(desc, matrix_) - tf.add_input(desc, rhs_) - if lower !== nothing - desc["lower"] = Base.Bool(lower) + begin + function batch_matrix_triangular_solve_graph(matrix_, rhs_; name=nothing, lower=nothing, adjoint=nothing) + local desc + tf.with_op_name(name, "BatchMatrixTriangularSolve") do + desc = tf.NodeDescription("BatchMatrixTriangularSolve") + begin + begin + matrix_ = convert(Tensor{Any}, matrix_) + begin + end + end + begin + rhs_ = convert(Tensor{Any}, rhs_) + begin + end + end + begin + (matrix_, rhs_) = tf.tf_promote(matrix_, rhs_) + end + end + begin + begin + tf.add_input(desc, matrix_) + end + begin + tf.add_input(desc, rhs_) + end + end + begin + begin + if lower !== nothing + desc["lower"] = Base.Bool(lower) + end + end + begin + if adjoint !== nothing + desc["adjoint"] = Base.Bool(adjoint) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function batch_matrix_triangular_solve_eager(matrix_, rhs_; name=nothing, lower=nothing, adjoint=nothing) + desc = tf.EagerOp("BatchMatrixTriangularSolve") + matrix_ = convert(tf.EagerTensor, matrix_) + rhs_ = convert(tf.EagerTensor, rhs_) + begin + begin + tf.add_input(desc, matrix_) + end + begin + tf.add_input(desc, rhs_) + end + end + begin + begin + if lower !== nothing + desc["lower"] = Base.Bool(lower) + end + end + begin + if adjoint !== nothing + desc["adjoint"] = Base.Bool(adjoint) + end + end + end + begin + desc["T"] = tf.data_type(matrix_) + end + begin + desc["T"] = tf.data_type(rhs_) + end + res = tf.execute(desc) + node = tf.TapeNode(batch_matrix_triangular_solve, [matrix_, rhs_], name=nothing, lower=nothing, adjoint=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function batch_matrix_triangular_solve(matrix_, rhs_; name=nothing, lower=nothing, adjoint=nothing) + if tf.in_eager_mode() + batch_matrix_triangular_solve_eager(matrix_, rhs_; name=name, lower=lower, adjoint=adjoint) + else + batch_matrix_triangular_solve_graph(matrix_, rhs_; name=name, lower=lower, adjoint=adjoint) + end end - if adjoint !== nothing - desc["adjoint"] = Base.Bool(adjoint) - end - end - tf.Tensor(tf.Operation(desc)) - end - function batch_matrix_triangular_solve_eager(matrix_, rhs_; name=nothing, lower=nothing, adjoint=nothing) - desc = tf.EagerOp("BatchMatrixTriangularSolve") - matrix_ = convert(tf.EagerTensor, matrix_) - rhs_ = convert(tf.EagerTensor, rhs_) - tf.add_input(desc, matrix_) - tf.add_input(desc, rhs_) - if lower !== nothing - desc["lower"] = Base.Bool(lower) - end - if adjoint !== nothing - desc["adjoint"] = Base.Bool(adjoint) - end - desc["T"] = tf.data_type(matrix_) - desc["T"] = tf.data_type(rhs_) - res = tf.execute(desc) - node = tf.TapeNode(batch_matrix_triangular_solve, [matrix_, rhs_], name=nothing, lower=nothing, adjoint=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function batch_matrix_triangular_solve(matrix_, rhs_; name=nothing, lower=nothing, adjoint=nothing) - if tf.in_eager_mode() - batch_matrix_triangular_solve_eager(matrix_, rhs_; name=name, lower=lower, adjoint=adjoint) - else - batch_matrix_triangular_solve_graph(matrix_, rhs_; name=name, lower=lower, adjoint=adjoint) - end - end end @@ -19227,41 +34885,73 @@ end A graph node which represents a return value of a function. """ begin - function _retval_graph(input_; name=nothing, index=nothing) - local desc - tf.with_op_name(name, "_Retval") do - desc = tf.NodeDescription("_Retval") - input_ = convert(Tensor{Any}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - if index !== nothing - desc["index"] = Base.Int(index) + begin + function _retval_graph(input_; name=nothing, index=nothing) + local desc + tf.with_op_name(name, "_Retval") do + desc = tf.NodeDescription("_Retval") + begin + begin + input_ = convert(Tensor{Any}, input_) + begin + end + end + begin + (input_,) = tf.tf_promote(input_) + end + end + begin + begin + tf.add_input(desc, input_) + end + end + begin + begin + if index !== nothing + desc["index"] = Base.Int(index) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function _retval_eager(input_; name=nothing, index=nothing) + desc = tf.EagerOp("_Retval") + input_ = convert(tf.EagerTensor, input_) + begin + begin + tf.add_input(desc, input_) + end + end + begin + begin + if index !== nothing + desc["index"] = Base.Int(index) + end + end + end + begin + desc["T"] = tf.data_type(input_) + end + res = tf.execute(desc) + node = tf.TapeNode(_retval, [input_], name=nothing, index=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _retval(input_; name=nothing, index=nothing) + if tf.in_eager_mode() + _retval_eager(input_; name=name, index=index) + else + _retval_graph(input_; name=name, index=index) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function _retval_eager(input_; name=nothing, index=nothing) - desc = tf.EagerOp("_Retval") - input_ = convert(tf.EagerTensor, input_) - tf.add_input(desc, input_) - if index !== nothing - desc["index"] = Base.Int(index) - end - desc["T"] = tf.data_type(input_) - res = tf.execute(desc) - node = tf.TapeNode(_retval, [input_], name=nothing, index=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _retval(input_; name=nothing, index=nothing) - if tf.in_eager_mode() - _retval_eager(input_; name=name, index=index) - else - _retval_graph(input_; name=name, index=index) - end - end end @@ -19271,46 +34961,80 @@ end """ begin - function unique_with_counts_graph(x_; name=nothing, out_idx=nothing) - local desc - tf.with_op_name(name, "UniqueWithCounts") do - desc = tf.NodeDescription("UniqueWithCounts") - x_ = convert(Tensor{Any}, x_) - (x_,) = tf.tf_promote(x_) - tf.add_input(desc, x_) - if out_idx !== nothing - desc["out_idx"] = Base.identity(out_idx) + begin + function unique_with_counts_graph(x_; name=nothing, out_idx=nothing) + local desc + tf.with_op_name(name, "UniqueWithCounts") do + desc = tf.NodeDescription("UniqueWithCounts") + begin + begin + x_ = convert(Tensor{Any}, x_) + begin + end + end + begin + (x_,) = tf.tf_promote(x_) + end + end + begin + begin + tf.add_input(desc, x_) + end + end + begin + begin + if out_idx !== nothing + desc["out_idx"] = Base.identity(out_idx) + end + end + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:3 - push!(out, tf.Tensor(op, out_idx)) - end - out - end - function unique_with_counts_eager(x_; name=nothing, out_idx=nothing) - desc = tf.EagerOp("UniqueWithCounts") - x_ = convert(tf.EagerTensor, x_) - tf.add_input(desc, x_) - if out_idx !== nothing - desc["out_idx"] = Base.identity(out_idx) - end - desc["T"] = tf.data_type(x_) - res = tf.execute(desc) - node = tf.TapeNode(unique_with_counts, [x_], name=nothing, out_idx=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function unique_with_counts(x_; name=nothing, out_idx=nothing) - if tf.in_eager_mode() - unique_with_counts_eager(x_; name=name, out_idx=out_idx) - else - unique_with_counts_graph(x_; name=name, out_idx=out_idx) + begin + function unique_with_counts_eager(x_; name=nothing, out_idx=nothing) + desc = tf.EagerOp("UniqueWithCounts") + x_ = convert(tf.EagerTensor, x_) + begin + begin + tf.add_input(desc, x_) + end end - end + begin + begin + if out_idx !== nothing + desc["out_idx"] = Base.identity(out_idx) + end + end + end + begin + desc["T"] = tf.data_type(x_) + end + res = tf.execute(desc) + node = tf.TapeNode(unique_with_counts, [x_], name=nothing, out_idx=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function unique_with_counts(x_; name=nothing, out_idx=nothing) + if tf.in_eager_mode() + unique_with_counts_eager(x_; name=name, out_idx=out_idx) + else + unique_with_counts_graph(x_; name=name, out_idx=out_idx) + end + end + end end @@ -19320,40 +35044,78 @@ end """ begin - function add_graph(x_, y_; name=nothing) - local desc - tf.with_op_name(name, "Add") do - desc = tf.NodeDescription("Add") - x_ = convert(Tensor{Any}, x_) - y_ = convert(Tensor{Any}, y_) - (x_, y_) = tf.tf_promote(x_, y_) - tf.add_input(desc, x_) - tf.add_input(desc, y_) + begin + function add_graph(x_, y_; name=nothing) + local desc + tf.with_op_name(name, "Add") do + desc = tf.NodeDescription("Add") + begin + begin + x_ = convert(Tensor{Any}, x_) + begin + end + end + begin + y_ = convert(Tensor{Any}, y_) + begin + end + end + begin + (x_, y_) = tf.tf_promote(x_, y_) + end + end + begin + begin + tf.add_input(desc, x_) + end + begin + tf.add_input(desc, y_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function add_eager(x_, y_; name=nothing) - desc = tf.EagerOp("Add") - x_ = convert(tf.EagerTensor, x_) - y_ = convert(tf.EagerTensor, y_) - tf.add_input(desc, x_) - tf.add_input(desc, y_) - desc["T"] = tf.data_type(x_) - desc["T"] = tf.data_type(y_) - res = tf.execute(desc) - node = tf.TapeNode(add, [x_, y_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function add_eager(x_, y_; name=nothing) + desc = tf.EagerOp("Add") + x_ = convert(tf.EagerTensor, x_) + y_ = convert(tf.EagerTensor, y_) + begin + begin + tf.add_input(desc, x_) + end + begin + tf.add_input(desc, y_) + end + end + begin + end + begin + desc["T"] = tf.data_type(x_) + end + begin + desc["T"] = tf.data_type(y_) + end + res = tf.execute(desc) + node = tf.TapeNode(add, [x_, y_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function add(x_, y_; name=nothing) - if tf.in_eager_mode() - add_eager(x_, y_; name=name) - else - add_graph(x_, y_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function add(x_, y_; name=nothing) + if tf.in_eager_mode() + add_eager(x_, y_; name=name) + else + add_graph(x_, y_; name=name) + end end - end + end end @@ -19363,77 +35125,141 @@ end """ begin - function experimental_scan_dataset_graph(input_dataset_, initial_state_, other_arguments_; name=nothing, f=nothing, Tstate=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, preserve_cardinality=nothing) - local desc - tf.with_op_name(name, "ExperimentalScanDataset") do - desc = tf.NodeDescription("ExperimentalScanDataset") - input_dataset_ = convert(Tensor{Any}, input_dataset_) - initial_state_ = [convert(Tensor{Any}, x) for x = initial_state_] - other_arguments_ = [convert(Tensor{Any}, x) for x = other_arguments_] - tf.add_input(desc, input_dataset_) - tf.add_input(desc, initial_state_) - tf.add_input(desc, other_arguments_) - if f !== nothing - desc["f"] = Base.identity(f) - end - if Tstate !== nothing - desc["Tstate"] = map(Base.identity, Tstate) + begin + function experimental_scan_dataset_graph(input_dataset_, initial_state_, other_arguments_; name=nothing, f=nothing, Tstate=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, preserve_cardinality=nothing) + local desc + tf.with_op_name(name, "ExperimentalScanDataset") do + desc = tf.NodeDescription("ExperimentalScanDataset") + begin + begin + input_dataset_ = convert(Tensor{Any}, input_dataset_) + begin + end + end + begin + initial_state_ = [convert(Tensor{Any}, x) for x = initial_state_] + begin + end + end + begin + other_arguments_ = [convert(Tensor{Any}, x) for x = other_arguments_] + begin + end + end + end + begin + begin + tf.add_input(desc, input_dataset_) + end + begin + tf.add_input(desc, initial_state_) + end + begin + tf.add_input(desc, other_arguments_) + end + end + begin + begin + if f !== nothing + desc["f"] = Base.identity(f) + end + end + begin + if Tstate !== nothing + desc["Tstate"] = map(Base.identity, Tstate) + end + end + begin + if Targuments !== nothing + desc["Targuments"] = map(Base.identity, Targuments) + end + end + begin + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + end + begin + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + begin + if preserve_cardinality !== nothing + desc["preserve_cardinality"] = Base.Bool(preserve_cardinality) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function experimental_scan_dataset_eager(input_dataset_, initial_state_, other_arguments_; name=nothing, f=nothing, Tstate=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, preserve_cardinality=nothing) + desc = tf.EagerOp("ExperimentalScanDataset") + input_dataset_ = convert(tf.EagerTensor, input_dataset_) + initial_state_ = convert(tf.EagerTensor, initial_state_) + other_arguments_ = convert(tf.EagerTensor, other_arguments_) + begin + begin + tf.add_input(desc, input_dataset_) + end + begin + tf.add_input(desc, initial_state_) + end + begin + tf.add_input(desc, other_arguments_) + end + end + begin + begin + if f !== nothing + desc["f"] = Base.identity(f) + end + end + begin + if Tstate !== nothing + desc["Tstate"] = map(Base.identity, Tstate) + end + end + begin + if Targuments !== nothing + desc["Targuments"] = map(Base.identity, Targuments) + end + end + begin + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + end + begin + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + begin + if preserve_cardinality !== nothing + desc["preserve_cardinality"] = Base.Bool(preserve_cardinality) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(experimental_scan_dataset, [input_dataset_, initial_state_, other_arguments_], name=nothing, f=nothing, Tstate=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, preserve_cardinality=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_scan_dataset(input_dataset_, initial_state_, other_arguments_; name=nothing, f=nothing, Tstate=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, preserve_cardinality=nothing) + if tf.in_eager_mode() + experimental_scan_dataset_eager(input_dataset_, initial_state_, other_arguments_; name=name, f=f, Tstate=Tstate, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes, preserve_cardinality=preserve_cardinality) + else + experimental_scan_dataset_graph(input_dataset_, initial_state_, other_arguments_; name=name, f=f, Tstate=Tstate, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes, preserve_cardinality=preserve_cardinality) + end end - if Targuments !== nothing - desc["Targuments"] = map(Base.identity, Targuments) - end - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - if preserve_cardinality !== nothing - desc["preserve_cardinality"] = Base.Bool(preserve_cardinality) - end - end - tf.Tensor(tf.Operation(desc)) - end - function experimental_scan_dataset_eager(input_dataset_, initial_state_, other_arguments_; name=nothing, f=nothing, Tstate=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, preserve_cardinality=nothing) - desc = tf.EagerOp("ExperimentalScanDataset") - input_dataset_ = convert(tf.EagerTensor, input_dataset_) - initial_state_ = convert(tf.EagerTensor, initial_state_) - other_arguments_ = convert(tf.EagerTensor, other_arguments_) - tf.add_input(desc, input_dataset_) - tf.add_input(desc, initial_state_) - tf.add_input(desc, other_arguments_) - if f !== nothing - desc["f"] = Base.identity(f) - end - if Tstate !== nothing - desc["Tstate"] = map(Base.identity, Tstate) - end - if Targuments !== nothing - desc["Targuments"] = map(Base.identity, Targuments) - end - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - if preserve_cardinality !== nothing - desc["preserve_cardinality"] = Base.Bool(preserve_cardinality) - end - res = tf.execute(desc) - node = tf.TapeNode(experimental_scan_dataset, [input_dataset_, initial_state_, other_arguments_], name=nothing, f=nothing, Tstate=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, preserve_cardinality=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_scan_dataset(input_dataset_, initial_state_, other_arguments_; name=nothing, f=nothing, Tstate=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, preserve_cardinality=nothing) - if tf.in_eager_mode() - experimental_scan_dataset_eager(input_dataset_, initial_state_, other_arguments_; name=name, f=f, Tstate=Tstate, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes, preserve_cardinality=preserve_cardinality) - else - experimental_scan_dataset_graph(input_dataset_, initial_state_, other_arguments_; name=name, f=f, Tstate=Tstate, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes, preserve_cardinality=preserve_cardinality) - end - end end @@ -19443,45 +35269,85 @@ end """ begin - function assign_add_variable_op_graph(resource_, value_; name=nothing, dtype=nothing) - local desc - tf.with_op_name(name, "AssignAddVariableOp") do - desc = tf.NodeDescription("AssignAddVariableOp") - resource_ = convert(Tensor{Any}, resource_) - value_ = convert(Tensor{Any}, value_) - (value_,) = tf.tf_promote(value_) - tf.add_input(desc, resource_) - tf.add_input(desc, value_) - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) + begin + function assign_add_variable_op_graph(resource_, value_; name=nothing, dtype=nothing) + local desc + tf.with_op_name(name, "AssignAddVariableOp") do + desc = tf.NodeDescription("AssignAddVariableOp") + begin + begin + resource_ = convert(Tensor{Any}, resource_) + begin + end + end + begin + value_ = convert(Tensor{Any}, value_) + begin + end + end + begin + (value_,) = tf.tf_promote(value_) + end + end + begin + begin + tf.add_input(desc, resource_) + end + begin + tf.add_input(desc, value_) + end + end + begin + begin + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function assign_add_variable_op_eager(resource_, value_; name=nothing, dtype=nothing) + desc = tf.EagerOp("AssignAddVariableOp") + resource_ = convert(tf.EagerTensor, resource_) + value_ = convert(tf.EagerTensor, value_) + begin + begin + tf.add_input(desc, resource_) + end + begin + tf.add_input(desc, value_) + end + end + begin + begin + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + end + begin + desc["dtype"] = tf.data_type(value_) + end + res = tf.execute(desc) + node = tf.TapeNode(assign_add_variable_op, [resource_, value_], name=nothing, dtype=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function assign_add_variable_op(resource_, value_; name=nothing, dtype=nothing) + if tf.in_eager_mode() + assign_add_variable_op_eager(resource_, value_; name=name, dtype=dtype) + else + assign_add_variable_op_graph(resource_, value_; name=name, dtype=dtype) + end end - end - tf.Tensor(tf.Operation(desc)) end - function assign_add_variable_op_eager(resource_, value_; name=nothing, dtype=nothing) - desc = tf.EagerOp("AssignAddVariableOp") - resource_ = convert(tf.EagerTensor, resource_) - value_ = convert(tf.EagerTensor, value_) - tf.add_input(desc, resource_) - tf.add_input(desc, value_) - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end - desc["dtype"] = tf.data_type(value_) - res = tf.execute(desc) - node = tf.TapeNode(assign_add_variable_op, [resource_, value_], name=nothing, dtype=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function assign_add_variable_op(resource_, value_; name=nothing, dtype=nothing) - if tf.in_eager_mode() - assign_add_variable_op_eager(resource_, value_; name=name, dtype=dtype) - else - assign_add_variable_op_graph(resource_, value_; name=name, dtype=dtype) - end - end end @@ -19491,57 +35357,111 @@ end """ begin - function split_v_graph(value_, size_splits_, split_dim_; name=nothing, num_split=nothing) - local desc - tf.with_op_name(name, "SplitV") do - desc = tf.NodeDescription("SplitV") - value_ = convert(Tensor{Any}, value_) - size_splits_ = convert(Tensor{Int64}, size_splits_) - split_dim_ = convert(Tensor{Int32}, split_dim_) - split_dim_ = split_dim_ - convert(tf.Tensor{eltype(split_dim_)}, 1) - (value_,) = tf.tf_promote(value_) - (size_splits_,) = tf.tf_promote(size_splits_) - tf.add_input(desc, value_) - tf.add_input(desc, size_splits_) - tf.add_input(desc, split_dim_) - if num_split !== nothing - desc["num_split"] = Base.Int(num_split) + begin + function split_v_graph(value_, size_splits_, split_dim_; name=nothing, num_split=nothing) + local desc + tf.with_op_name(name, "SplitV") do + desc = tf.NodeDescription("SplitV") + begin + begin + value_ = convert(Tensor{Any}, value_) + begin + end + end + begin + size_splits_ = convert(Tensor{Int64}, size_splits_) + begin + end + end + begin + split_dim_ = convert(Tensor{Int32}, split_dim_) + begin + split_dim_ = split_dim_ - convert(tf.Tensor{eltype(split_dim_)}, 1) + end + end + begin + (value_,) = tf.tf_promote(value_) + end + begin + (size_splits_,) = tf.tf_promote(size_splits_) + end + end + begin + begin + tf.add_input(desc, value_) + end + begin + tf.add_input(desc, size_splits_) + end + begin + tf.add_input(desc, split_dim_) + end + end + begin + begin + if num_split !== nothing + desc["num_split"] = Base.Int(num_split) + end + end + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:num_split + push!(out, tf.Tensor(op, out_idx)) + end + out + end + end + end + begin + function split_v_eager(value_, size_splits_, split_dim_; name=nothing, num_split=nothing) + desc = tf.EagerOp("SplitV") + value_ = convert(tf.EagerTensor, value_) + size_splits_ = convert(tf.EagerTensor, size_splits_) + split_dim_ = convert(tf.EagerTensor, split_dim_) + begin + begin + tf.add_input(desc, value_) + end + begin + tf.add_input(desc, size_splits_) + end + begin + tf.add_input(desc, split_dim_) + end + end + begin + begin + if num_split !== nothing + desc["num_split"] = Base.Int(num_split) + end + end + end + begin + desc["T"] = tf.data_type(value_) + end + begin + desc["Tlen"] = tf.data_type(size_splits_) + end + res = tf.execute(desc) + node = tf.TapeNode(split_v, [value_, size_splits_, split_dim_], name=nothing, num_split=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function split_v(value_, size_splits_, split_dim_; name=nothing, num_split=nothing) + if tf.in_eager_mode() + split_v_eager(value_, size_splits_, split_dim_; name=name, num_split=num_split) + else + split_v_graph(value_, size_splits_, split_dim_; name=name, num_split=num_split) + end end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:num_split - push!(out, tf.Tensor(op, out_idx)) - end - out - end - function split_v_eager(value_, size_splits_, split_dim_; name=nothing, num_split=nothing) - desc = tf.EagerOp("SplitV") - value_ = convert(tf.EagerTensor, value_) - size_splits_ = convert(tf.EagerTensor, size_splits_) - split_dim_ = convert(tf.EagerTensor, split_dim_) - tf.add_input(desc, value_) - tf.add_input(desc, size_splits_) - tf.add_input(desc, split_dim_) - if num_split !== nothing - desc["num_split"] = Base.Int(num_split) - end - desc["T"] = tf.data_type(value_) - desc["Tlen"] = tf.data_type(size_splits_) - res = tf.execute(desc) - node = tf.TapeNode(split_v, [value_, size_splits_, split_dim_], name=nothing, num_split=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function split_v(value_, size_splits_, split_dim_; name=nothing, num_split=nothing) - if tf.in_eager_mode() - split_v_eager(value_, size_splits_, split_dim_; name=name, num_split=num_split) - else - split_v_graph(value_, size_splits_, split_dim_; name=name, num_split=num_split) - end - end end @@ -19551,52 +35471,98 @@ end """ begin - function assign_graph(ref_, value_; name=nothing, validate_shape=nothing, use_locking=nothing) - local desc - tf.with_op_name(name, "Assign") do - desc = tf.NodeDescription("Assign") - ref_ = convert(Tensor{Any}, ref_) - value_ = convert(Tensor{Any}, value_) - (ref_, value_) = tf.tf_promote(ref_, value_) - tf.add_input(desc, ref_) - tf.add_input(desc, value_) - if validate_shape !== nothing - desc["validate_shape"] = Base.Bool(validate_shape) + begin + function assign_graph(ref_, value_; name=nothing, validate_shape=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "Assign") do + desc = tf.NodeDescription("Assign") + begin + begin + ref_ = convert(Tensor{Any}, ref_) + begin + end + end + begin + value_ = convert(Tensor{Any}, value_) + begin + end + end + begin + (ref_, value_) = tf.tf_promote(ref_, value_) + end + end + begin + begin + tf.add_input(desc, ref_) + end + begin + tf.add_input(desc, value_) + end + end + begin + begin + if validate_shape !== nothing + desc["validate_shape"] = Base.Bool(validate_shape) + end + end + begin + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function assign_eager(ref_, value_; name=nothing, validate_shape=nothing, use_locking=nothing) + desc = tf.EagerOp("Assign") + ref_ = convert(tf.EagerTensor, ref_) + value_ = convert(tf.EagerTensor, value_) + begin + begin + tf.add_input(desc, ref_) + end + begin + tf.add_input(desc, value_) + end + end + begin + begin + if validate_shape !== nothing + desc["validate_shape"] = Base.Bool(validate_shape) + end + end + begin + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + end + begin + desc["T"] = tf.data_type(ref_) + end + begin + desc["T"] = tf.data_type(value_) + end + res = tf.execute(desc) + node = tf.TapeNode(assign, [ref_, value_], name=nothing, validate_shape=nothing, use_locking=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function assign(ref_, value_; name=nothing, validate_shape=nothing, use_locking=nothing) + if tf.in_eager_mode() + assign_eager(ref_, value_; name=name, validate_shape=validate_shape, use_locking=use_locking) + else + assign_graph(ref_, value_; name=name, validate_shape=validate_shape, use_locking=use_locking) + end end - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - end - tf.Tensor(tf.Operation(desc)) - end - function assign_eager(ref_, value_; name=nothing, validate_shape=nothing, use_locking=nothing) - desc = tf.EagerOp("Assign") - ref_ = convert(tf.EagerTensor, ref_) - value_ = convert(tf.EagerTensor, value_) - tf.add_input(desc, ref_) - tf.add_input(desc, value_) - if validate_shape !== nothing - desc["validate_shape"] = Base.Bool(validate_shape) - end - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - desc["T"] = tf.data_type(ref_) - desc["T"] = tf.data_type(value_) - res = tf.execute(desc) - node = tf.TapeNode(assign, [ref_, value_], name=nothing, validate_shape=nothing, use_locking=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function assign(ref_, value_; name=nothing, validate_shape=nothing, use_locking=nothing) - if tf.in_eager_mode() - assign_eager(ref_, value_; name=name, validate_shape=validate_shape, use_locking=use_locking) - else - assign_graph(ref_, value_; name=name, validate_shape=validate_shape, use_locking=use_locking) - end - end end @@ -19606,58 +35572,100 @@ end """ begin - function max_pool_with_argmax_graph(input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing) - local desc - tf.with_op_name(name, "MaxPoolWithArgmax") do - desc = tf.NodeDescription("MaxPoolWithArgmax") - input_ = convert(Tensor{Any}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - if ksize !== nothing - desc["ksize"] = map(Base.identity, ksize) + begin + function max_pool_with_argmax_graph(input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing) + local desc + tf.with_op_name(name, "MaxPoolWithArgmax") do + desc = tf.NodeDescription("MaxPoolWithArgmax") + begin + begin + input_ = convert(Tensor{Any}, input_) + begin + end + end + begin + (input_,) = tf.tf_promote(input_) + end + end + begin + begin + tf.add_input(desc, input_) + end + end + begin + begin + if ksize !== nothing + desc["ksize"] = map(Base.identity, ksize) + end + end + begin + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + end + begin + if padding !== nothing + desc["padding"] = Base.String(padding) + end + end + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + end + end + begin + function max_pool_with_argmax_eager(input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing) + desc = tf.EagerOp("MaxPoolWithArgmax") + input_ = convert(tf.EagerTensor, input_) + begin + begin + tf.add_input(desc, input_) + end + end + begin + begin + if ksize !== nothing + desc["ksize"] = map(Base.identity, ksize) + end + end + begin + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + end + begin + if padding !== nothing + desc["padding"] = Base.String(padding) + end + end + end + begin + desc["T"] = tf.data_type(input_) + end + res = tf.execute(desc) + node = tf.TapeNode(max_pool_with_argmax, [input_], name=nothing, ksize=nothing, strides=nothing, padding=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function max_pool_with_argmax(input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing) + if tf.in_eager_mode() + max_pool_with_argmax_eager(input_; name=name, ksize=ksize, strides=strides, padding=padding) + else + max_pool_with_argmax_graph(input_; name=name, ksize=ksize, strides=strides, padding=padding) + end end - if strides !== nothing - desc["strides"] = map(Base.identity, strides) - end - if padding !== nothing - desc["padding"] = Base.String(padding) - end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:2 - push!(out, tf.Tensor(op, out_idx)) - end - out end - function max_pool_with_argmax_eager(input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing) - desc = tf.EagerOp("MaxPoolWithArgmax") - input_ = convert(tf.EagerTensor, input_) - tf.add_input(desc, input_) - if ksize !== nothing - desc["ksize"] = map(Base.identity, ksize) - end - if strides !== nothing - desc["strides"] = map(Base.identity, strides) - end - if padding !== nothing - desc["padding"] = Base.String(padding) - end - desc["T"] = tf.data_type(input_) - res = tf.execute(desc) - node = tf.TapeNode(max_pool_with_argmax, [input_], name=nothing, ksize=nothing, strides=nothing, padding=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function max_pool_with_argmax(input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing) - if tf.in_eager_mode() - max_pool_with_argmax_eager(input_; name=name, ksize=ksize, strides=strides, padding=padding) - else - max_pool_with_argmax_graph(input_; name=name, ksize=ksize, strides=strides, padding=padding) - end - end end @@ -19667,58 +35675,116 @@ end """ begin - function quantized_relu_x_graph(features_, max_value_, min_features_, max_features_; name=nothing, out_type=nothing) - local desc - tf.with_op_name(name, "QuantizedReluX") do - desc = tf.NodeDescription("QuantizedReluX") - features_ = convert(Tensor{Any}, features_) - max_value_ = convert(Tensor{Float32}, max_value_) - min_features_ = convert(Tensor{Float32}, min_features_) - max_features_ = convert(Tensor{Float32}, max_features_) - (features_,) = tf.tf_promote(features_) - tf.add_input(desc, features_) - tf.add_input(desc, max_value_) - tf.add_input(desc, min_features_) - tf.add_input(desc, max_features_) - if out_type !== nothing - desc["out_type"] = Base.identity(out_type) + begin + function quantized_relu_x_graph(features_, max_value_, min_features_, max_features_; name=nothing, out_type=nothing) + local desc + tf.with_op_name(name, "QuantizedReluX") do + desc = tf.NodeDescription("QuantizedReluX") + begin + begin + features_ = convert(Tensor{Any}, features_) + begin + end + end + begin + max_value_ = convert(Tensor{Float32}, max_value_) + begin + end + end + begin + min_features_ = convert(Tensor{Float32}, min_features_) + begin + end + end + begin + max_features_ = convert(Tensor{Float32}, max_features_) + begin + end + end + begin + (features_,) = tf.tf_promote(features_) + end + end + begin + begin + tf.add_input(desc, features_) + end + begin + tf.add_input(desc, max_value_) + end + begin + tf.add_input(desc, min_features_) + end + begin + tf.add_input(desc, max_features_) + end + end + begin + begin + if out_type !== nothing + desc["out_type"] = Base.identity(out_type) + end + end + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + end + end + begin + function quantized_relu_x_eager(features_, max_value_, min_features_, max_features_; name=nothing, out_type=nothing) + desc = tf.EagerOp("QuantizedReluX") + features_ = convert(tf.EagerTensor, features_) + max_value_ = convert(tf.EagerTensor, max_value_) + min_features_ = convert(tf.EagerTensor, min_features_) + max_features_ = convert(tf.EagerTensor, max_features_) + begin + begin + tf.add_input(desc, features_) + end + begin + tf.add_input(desc, max_value_) + end + begin + tf.add_input(desc, min_features_) + end + begin + tf.add_input(desc, max_features_) + end + end + begin + begin + if out_type !== nothing + desc["out_type"] = Base.identity(out_type) + end + end + end + begin + desc["Tinput"] = tf.data_type(features_) + end + res = tf.execute(desc) + node = tf.TapeNode(quantized_relu_x, [features_, max_value_, min_features_, max_features_], name=nothing, out_type=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function quantized_relu_x(features_, max_value_, min_features_, max_features_; name=nothing, out_type=nothing) + if tf.in_eager_mode() + quantized_relu_x_eager(features_, max_value_, min_features_, max_features_; name=name, out_type=out_type) + else + quantized_relu_x_graph(features_, max_value_, min_features_, max_features_; name=name, out_type=out_type) + end end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:3 - push!(out, tf.Tensor(op, out_idx)) - end - out - end - function quantized_relu_x_eager(features_, max_value_, min_features_, max_features_; name=nothing, out_type=nothing) - desc = tf.EagerOp("QuantizedReluX") - features_ = convert(tf.EagerTensor, features_) - max_value_ = convert(tf.EagerTensor, max_value_) - min_features_ = convert(tf.EagerTensor, min_features_) - max_features_ = convert(tf.EagerTensor, max_features_) - tf.add_input(desc, features_) - tf.add_input(desc, max_value_) - tf.add_input(desc, min_features_) - tf.add_input(desc, max_features_) - if out_type !== nothing - desc["out_type"] = Base.identity(out_type) - end - desc["Tinput"] = tf.data_type(features_) - res = tf.execute(desc) - node = tf.TapeNode(quantized_relu_x, [features_, max_value_, min_features_, max_features_], name=nothing, out_type=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function quantized_relu_x(features_, max_value_, min_features_, max_features_; name=nothing, out_type=nothing) - if tf.in_eager_mode() - quantized_relu_x_eager(features_, max_value_, min_features_, max_features_; name=name, out_type=out_type) - else - quantized_relu_x_graph(features_, max_value_, min_features_, max_features_; name=name, out_type=out_type) - end - end end @@ -19728,77 +35794,125 @@ end """ begin - function random_shuffle_queue_graph(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, min_after_dequeue=nothing, seed=nothing, seed2=nothing, container=nothing, shared_name=nothing) - local desc - tf.with_op_name(name, "RandomShuffleQueue") do - desc = tf.NodeDescription("RandomShuffleQueue") - if component_types !== nothing - desc["component_types"] = map(Base.identity, component_types) - end - if shapes !== nothing - desc["shapes"] = map(Base.identity, shapes) - end - if capacity !== nothing - desc["capacity"] = Base.Int(capacity) + begin + function random_shuffle_queue_graph(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, min_after_dequeue=nothing, seed=nothing, seed2=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "RandomShuffleQueue") do + desc = tf.NodeDescription("RandomShuffleQueue") + begin + end + begin + end + begin + begin + if component_types !== nothing + desc["component_types"] = map(Base.identity, component_types) + end + end + begin + if shapes !== nothing + desc["shapes"] = map(Base.identity, shapes) + end + end + begin + if capacity !== nothing + desc["capacity"] = Base.Int(capacity) + end + end + begin + if min_after_dequeue !== nothing + desc["min_after_dequeue"] = Base.Int(min_after_dequeue) + end + end + begin + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + end + begin + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end + end + begin + if container !== nothing + desc["container"] = Base.String(container) + end + end + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function random_shuffle_queue_eager(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, min_after_dequeue=nothing, seed=nothing, seed2=nothing, container=nothing, shared_name=nothing) + desc = tf.EagerOp("RandomShuffleQueue") + begin + end + begin + begin + if component_types !== nothing + desc["component_types"] = map(Base.identity, component_types) + end + end + begin + if shapes !== nothing + desc["shapes"] = map(Base.identity, shapes) + end + end + begin + if capacity !== nothing + desc["capacity"] = Base.Int(capacity) + end + end + begin + if min_after_dequeue !== nothing + desc["min_after_dequeue"] = Base.Int(min_after_dequeue) + end + end + begin + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + end + begin + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end + end + begin + if container !== nothing + desc["container"] = Base.String(container) + end + end + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(random_shuffle_queue, [], name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, min_after_dequeue=nothing, seed=nothing, seed2=nothing, container=nothing, shared_name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function random_shuffle_queue(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, min_after_dequeue=nothing, seed=nothing, seed2=nothing, container=nothing, shared_name=nothing) + if tf.in_eager_mode() + random_shuffle_queue_eager(; name=name, component_types=component_types, shapes=shapes, capacity=capacity, min_after_dequeue=min_after_dequeue, seed=seed, seed2=seed2, container=container, shared_name=shared_name) + else + random_shuffle_queue_graph(; name=name, component_types=component_types, shapes=shapes, capacity=capacity, min_after_dequeue=min_after_dequeue, seed=seed, seed2=seed2, container=container, shared_name=shared_name) + end end - if min_after_dequeue !== nothing - desc["min_after_dequeue"] = Base.Int(min_after_dequeue) - end - if seed !== nothing - desc["seed"] = Base.Int(seed) - end - if seed2 !== nothing - desc["seed2"] = Base.Int(seed2) - end - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - end - tf.Tensor(tf.Operation(desc)) - end - function random_shuffle_queue_eager(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, min_after_dequeue=nothing, seed=nothing, seed2=nothing, container=nothing, shared_name=nothing) - desc = tf.EagerOp("RandomShuffleQueue") - if component_types !== nothing - desc["component_types"] = map(Base.identity, component_types) - end - if shapes !== nothing - desc["shapes"] = map(Base.identity, shapes) - end - if capacity !== nothing - desc["capacity"] = Base.Int(capacity) - end - if min_after_dequeue !== nothing - desc["min_after_dequeue"] = Base.Int(min_after_dequeue) - end - if seed !== nothing - desc["seed"] = Base.Int(seed) - end - if seed2 !== nothing - desc["seed2"] = Base.Int(seed2) - end - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - res = tf.execute(desc) - node = tf.TapeNode(random_shuffle_queue, [], name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, min_after_dequeue=nothing, seed=nothing, seed2=nothing, container=nothing, shared_name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function random_shuffle_queue(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, min_after_dequeue=nothing, seed=nothing, seed2=nothing, container=nothing, shared_name=nothing) - if tf.in_eager_mode() - random_shuffle_queue_eager(; name=name, component_types=component_types, shapes=shapes, capacity=capacity, min_after_dequeue=min_after_dequeue, seed=seed, seed2=seed2, container=container, shared_name=shared_name) - else - random_shuffle_queue_graph(; name=name, component_types=component_types, shapes=shapes, capacity=capacity, min_after_dequeue=min_after_dequeue, seed=seed, seed2=seed2, container=container, shared_name=shared_name) - end - end end @@ -19808,35 +35922,63 @@ end """ begin - function fft2d_graph(input_; name=nothing) - local desc - tf.with_op_name(name, "FFT2D") do - desc = tf.NodeDescription("FFT2D") - input_ = convert(Tensor{Complex{Float32}}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) + begin + function fft2d_graph(input_; name=nothing) + local desc + tf.with_op_name(name, "FFT2D") do + desc = tf.NodeDescription("FFT2D") + begin + begin + input_ = convert(Tensor{Complex{Float32}}, input_) + begin + end + end + begin + (input_,) = tf.tf_promote(input_) + end + end + begin + begin + tf.add_input(desc, input_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function fft2d_eager(input_; name=nothing) - desc = tf.EagerOp("FFT2D") - input_ = convert(tf.EagerTensor, input_) - tf.add_input(desc, input_) - desc["Tcomplex"] = tf.data_type(input_) - res = tf.execute(desc) - node = tf.TapeNode(fft2d, [input_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function fft2d_eager(input_; name=nothing) + desc = tf.EagerOp("FFT2D") + input_ = convert(tf.EagerTensor, input_) + begin + begin + tf.add_input(desc, input_) + end + end + begin + end + begin + desc["Tcomplex"] = tf.data_type(input_) + end + res = tf.execute(desc) + node = tf.TapeNode(fft2d, [input_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function fft2d(input_; name=nothing) - if tf.in_eager_mode() - fft2d_eager(input_; name=name) - else - fft2d_graph(input_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function fft2d(input_; name=nothing) + if tf.in_eager_mode() + fft2d_eager(input_; name=name) + else + fft2d_graph(input_; name=name) + end end - end + end end @@ -19846,49 +35988,89 @@ end """ begin - function experimental_thread_pool_dataset_graph(input_dataset_, thread_pool_; name=nothing, output_types=nothing, output_shapes=nothing) - local desc - tf.with_op_name(name, "ExperimentalThreadPoolDataset") do - desc = tf.NodeDescription("ExperimentalThreadPoolDataset") - input_dataset_ = convert(Tensor{Any}, input_dataset_) - thread_pool_ = convert(Tensor{Any}, thread_pool_) - tf.add_input(desc, input_dataset_) - tf.add_input(desc, thread_pool_) - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) + begin + function experimental_thread_pool_dataset_graph(input_dataset_, thread_pool_; name=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "ExperimentalThreadPoolDataset") do + desc = tf.NodeDescription("ExperimentalThreadPoolDataset") + begin + begin + input_dataset_ = convert(Tensor{Any}, input_dataset_) + begin + end + end + begin + thread_pool_ = convert(Tensor{Any}, thread_pool_) + begin + end + end + end + begin + begin + tf.add_input(desc, input_dataset_) + end + begin + tf.add_input(desc, thread_pool_) + end + end + begin + begin + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + end + begin + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function experimental_thread_pool_dataset_eager(input_dataset_, thread_pool_; name=nothing, output_types=nothing, output_shapes=nothing) + desc = tf.EagerOp("ExperimentalThreadPoolDataset") + input_dataset_ = convert(tf.EagerTensor, input_dataset_) + thread_pool_ = convert(tf.EagerTensor, thread_pool_) + begin + begin + tf.add_input(desc, input_dataset_) + end + begin + tf.add_input(desc, thread_pool_) + end + end + begin + begin + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + end + begin + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(experimental_thread_pool_dataset, [input_dataset_, thread_pool_], name=nothing, output_types=nothing, output_shapes=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_thread_pool_dataset(input_dataset_, thread_pool_; name=nothing, output_types=nothing, output_shapes=nothing) + if tf.in_eager_mode() + experimental_thread_pool_dataset_eager(input_dataset_, thread_pool_; name=name, output_types=output_types, output_shapes=output_shapes) + else + experimental_thread_pool_dataset_graph(input_dataset_, thread_pool_; name=name, output_types=output_types, output_shapes=output_shapes) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function experimental_thread_pool_dataset_eager(input_dataset_, thread_pool_; name=nothing, output_types=nothing, output_shapes=nothing) - desc = tf.EagerOp("ExperimentalThreadPoolDataset") - input_dataset_ = convert(tf.EagerTensor, input_dataset_) - thread_pool_ = convert(tf.EagerTensor, thread_pool_) - tf.add_input(desc, input_dataset_) - tf.add_input(desc, thread_pool_) - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - res = tf.execute(desc) - node = tf.TapeNode(experimental_thread_pool_dataset, [input_dataset_, thread_pool_], name=nothing, output_types=nothing, output_shapes=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_thread_pool_dataset(input_dataset_, thread_pool_; name=nothing, output_types=nothing, output_shapes=nothing) - if tf.in_eager_mode() - experimental_thread_pool_dataset_eager(input_dataset_, thread_pool_; name=name, output_types=output_types, output_shapes=output_shapes) - else - experimental_thread_pool_dataset_graph(input_dataset_, thread_pool_; name=name, output_types=output_types, output_shapes=output_shapes) - end - end end @@ -19898,67 +36080,119 @@ end """ begin - function ordered_map_unstage_graph(key_, indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) - local desc - tf.with_op_name(name, "OrderedMapUnstage") do - desc = tf.NodeDescription("OrderedMapUnstage") - key_ = convert(Tensor{Int64}, key_) - indices_ = convert(Tensor{Int32}, indices_) - tf.add_input(desc, key_) - tf.add_input(desc, indices_) - if capacity !== nothing - desc["capacity"] = Base.Int(capacity) - end - if memory_limit !== nothing - desc["memory_limit"] = Base.Int(memory_limit) - end - if dtypes !== nothing - desc["dtypes"] = map(Base.identity, dtypes) - end - if container !== nothing - desc["container"] = Base.String(container) + begin + function ordered_map_unstage_graph(key_, indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "OrderedMapUnstage") do + desc = tf.NodeDescription("OrderedMapUnstage") + begin + begin + key_ = convert(Tensor{Int64}, key_) + begin + end + end + begin + indices_ = convert(Tensor{Int32}, indices_) + begin + end + end + end + begin + begin + tf.add_input(desc, key_) + end + begin + tf.add_input(desc, indices_) + end + end + begin + begin + if capacity !== nothing + desc["capacity"] = Base.Int(capacity) + end + end + begin + if memory_limit !== nothing + desc["memory_limit"] = Base.Int(memory_limit) + end + end + begin + if dtypes !== nothing + desc["dtypes"] = map(Base.identity, dtypes) + end + end + begin + if container !== nothing + desc["container"] = Base.String(container) + end + end + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function ordered_map_unstage_eager(key_, indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + desc = tf.EagerOp("OrderedMapUnstage") + key_ = convert(tf.EagerTensor, key_) + indices_ = convert(tf.EagerTensor, indices_) + begin + begin + tf.add_input(desc, key_) + end + begin + tf.add_input(desc, indices_) + end + end + begin + begin + if capacity !== nothing + desc["capacity"] = Base.Int(capacity) + end + end + begin + if memory_limit !== nothing + desc["memory_limit"] = Base.Int(memory_limit) + end + end + begin + if dtypes !== nothing + desc["dtypes"] = map(Base.identity, dtypes) + end + end + begin + if container !== nothing + desc["container"] = Base.String(container) + end + end + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(ordered_map_unstage, [key_, indices_], name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function ordered_map_unstage(key_, indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + if tf.in_eager_mode() + ordered_map_unstage_eager(key_, indices_; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) + else + ordered_map_unstage_graph(key_, indices_; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) + end end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - end - tf.Tensor(tf.Operation(desc)) - end - function ordered_map_unstage_eager(key_, indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) - desc = tf.EagerOp("OrderedMapUnstage") - key_ = convert(tf.EagerTensor, key_) - indices_ = convert(tf.EagerTensor, indices_) - tf.add_input(desc, key_) - tf.add_input(desc, indices_) - if capacity !== nothing - desc["capacity"] = Base.Int(capacity) - end - if memory_limit !== nothing - desc["memory_limit"] = Base.Int(memory_limit) - end - if dtypes !== nothing - desc["dtypes"] = map(Base.identity, dtypes) - end - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - res = tf.execute(desc) - node = tf.TapeNode(ordered_map_unstage, [key_, indices_], name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function ordered_map_unstage(key_, indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) - if tf.in_eager_mode() - ordered_map_unstage_eager(key_, indices_; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) - else - ordered_map_unstage_graph(key_, indices_; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) - end - end end @@ -19968,55 +36202,99 @@ end """ begin - function experimental_directed_interleave_dataset_graph(selector_input_dataset_, data_input_datasets_; name=nothing, output_types=nothing, output_shapes=nothing, N=nothing) - local desc - tf.with_op_name(name, "ExperimentalDirectedInterleaveDataset") do - desc = tf.NodeDescription("ExperimentalDirectedInterleaveDataset") - selector_input_dataset_ = convert(Tensor{Any}, selector_input_dataset_) - data_input_datasets_ = [convert(Tensor{Any}, x) for x = data_input_datasets_] - tf.add_input(desc, selector_input_dataset_) - tf.add_input(desc, data_input_datasets_) - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) + begin + function experimental_directed_interleave_dataset_graph(selector_input_dataset_, data_input_datasets_; name=nothing, output_types=nothing, output_shapes=nothing, N=nothing) + local desc + tf.with_op_name(name, "ExperimentalDirectedInterleaveDataset") do + desc = tf.NodeDescription("ExperimentalDirectedInterleaveDataset") + begin + begin + selector_input_dataset_ = convert(Tensor{Any}, selector_input_dataset_) + begin + end + end + begin + data_input_datasets_ = [convert(Tensor{Any}, x) for x = data_input_datasets_] + begin + end + end + end + begin + begin + tf.add_input(desc, selector_input_dataset_) + end + begin + tf.add_input(desc, data_input_datasets_) + end + end + begin + begin + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + end + begin + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + begin + if N !== nothing + desc["N"] = Base.Int(N) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function experimental_directed_interleave_dataset_eager(selector_input_dataset_, data_input_datasets_; name=nothing, output_types=nothing, output_shapes=nothing, N=nothing) + desc = tf.EagerOp("ExperimentalDirectedInterleaveDataset") + selector_input_dataset_ = convert(tf.EagerTensor, selector_input_dataset_) + data_input_datasets_ = convert(tf.EagerTensor, data_input_datasets_) + begin + begin + tf.add_input(desc, selector_input_dataset_) + end + begin + tf.add_input(desc, data_input_datasets_) + end + end + begin + begin + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + end + begin + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + begin + if N !== nothing + desc["N"] = Base.Int(N) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(experimental_directed_interleave_dataset, [selector_input_dataset_, data_input_datasets_], name=nothing, output_types=nothing, output_shapes=nothing, N=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_directed_interleave_dataset(selector_input_dataset_, data_input_datasets_; name=nothing, output_types=nothing, output_shapes=nothing, N=nothing) + if tf.in_eager_mode() + experimental_directed_interleave_dataset_eager(selector_input_dataset_, data_input_datasets_; name=name, output_types=output_types, output_shapes=output_shapes, N=N) + else + experimental_directed_interleave_dataset_graph(selector_input_dataset_, data_input_datasets_; name=name, output_types=output_types, output_shapes=output_shapes, N=N) + end end - if N !== nothing - desc["N"] = Base.Int(N) - end - end - tf.Tensor(tf.Operation(desc)) - end - function experimental_directed_interleave_dataset_eager(selector_input_dataset_, data_input_datasets_; name=nothing, output_types=nothing, output_shapes=nothing, N=nothing) - desc = tf.EagerOp("ExperimentalDirectedInterleaveDataset") - selector_input_dataset_ = convert(tf.EagerTensor, selector_input_dataset_) - data_input_datasets_ = convert(tf.EagerTensor, data_input_datasets_) - tf.add_input(desc, selector_input_dataset_) - tf.add_input(desc, data_input_datasets_) - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - if N !== nothing - desc["N"] = Base.Int(N) - end - res = tf.execute(desc) - node = tf.TapeNode(experimental_directed_interleave_dataset, [selector_input_dataset_, data_input_datasets_], name=nothing, output_types=nothing, output_shapes=nothing, N=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_directed_interleave_dataset(selector_input_dataset_, data_input_datasets_; name=nothing, output_types=nothing, output_shapes=nothing, N=nothing) - if tf.in_eager_mode() - experimental_directed_interleave_dataset_eager(selector_input_dataset_, data_input_datasets_; name=name, output_types=output_types, output_shapes=output_shapes, N=N) - else - experimental_directed_interleave_dataset_graph(selector_input_dataset_, data_input_datasets_; name=name, output_types=output_types, output_shapes=output_shapes, N=N) - end - end end @@ -20026,35 +36304,63 @@ end """ begin - function real_graph(input_; name=nothing) - local desc - tf.with_op_name(name, "Real") do - desc = tf.NodeDescription("Real") - input_ = convert(Tensor{Complex{Float32}}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) + begin + function real_graph(input_; name=nothing) + local desc + tf.with_op_name(name, "Real") do + desc = tf.NodeDescription("Real") + begin + begin + input_ = convert(Tensor{Complex{Float32}}, input_) + begin + end + end + begin + (input_,) = tf.tf_promote(input_) + end + end + begin + begin + tf.add_input(desc, input_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function real_eager(input_; name=nothing) - desc = tf.EagerOp("Real") - input_ = convert(tf.EagerTensor, input_) - tf.add_input(desc, input_) - desc["T"] = tf.data_type(input_) - res = tf.execute(desc) - node = tf.TapeNode(real, [input_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function real_eager(input_; name=nothing) + desc = tf.EagerOp("Real") + input_ = convert(tf.EagerTensor, input_) + begin + begin + tf.add_input(desc, input_) + end + end + begin + end + begin + desc["T"] = tf.data_type(input_) + end + res = tf.execute(desc) + node = tf.TapeNode(real, [input_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function real(input_; name=nothing) - if tf.in_eager_mode() - real_eager(input_; name=name) - else - real_graph(input_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function real(input_; name=nothing) + if tf.in_eager_mode() + real_eager(input_; name=name) + else + real_graph(input_; name=name) + end end - end + end end @@ -20064,50 +36370,106 @@ end """ begin - function sparse_segment_sqrt_n_grad_graph(grad_, indices_, segment_ids_, output_dim0_; name=nothing) - local desc - tf.with_op_name(name, "SparseSegmentSqrtNGrad") do - desc = tf.NodeDescription("SparseSegmentSqrtNGrad") - grad_ = convert(Tensor{Any}, grad_) - indices_ = convert(Tensor{Int32}, indices_) - indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) - segment_ids_ = convert(Tensor{Int32}, segment_ids_) - output_dim0_ = convert(Tensor{Int32}, output_dim0_) - (grad_,) = tf.tf_promote(grad_) - (indices_,) = tf.tf_promote(indices_) - tf.add_input(desc, grad_) - tf.add_input(desc, indices_) - tf.add_input(desc, segment_ids_) - tf.add_input(desc, output_dim0_) - end - tf.Tensor(tf.Operation(desc)) - end - function sparse_segment_sqrt_n_grad_eager(grad_, indices_, segment_ids_, output_dim0_; name=nothing) - desc = tf.EagerOp("SparseSegmentSqrtNGrad") - grad_ = convert(tf.EagerTensor, grad_) - indices_ = convert(tf.EagerTensor, indices_) - segment_ids_ = convert(tf.EagerTensor, segment_ids_) - output_dim0_ = convert(tf.EagerTensor, output_dim0_) - tf.add_input(desc, grad_) - tf.add_input(desc, indices_) - tf.add_input(desc, segment_ids_) - tf.add_input(desc, output_dim0_) - desc["T"] = tf.data_type(grad_) - desc["Tidx"] = tf.data_type(indices_) - res = tf.execute(desc) - node = tf.TapeNode(sparse_segment_sqrt_n_grad, [grad_, indices_, segment_ids_, output_dim0_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_segment_sqrt_n_grad(grad_, indices_, segment_ids_, output_dim0_; name=nothing) - if tf.in_eager_mode() - sparse_segment_sqrt_n_grad_eager(grad_, indices_, segment_ids_, output_dim0_; name=name) - else - sparse_segment_sqrt_n_grad_graph(grad_, indices_, segment_ids_, output_dim0_; name=name) + begin + function sparse_segment_sqrt_n_grad_graph(grad_, indices_, segment_ids_, output_dim0_; name=nothing) + local desc + tf.with_op_name(name, "SparseSegmentSqrtNGrad") do + desc = tf.NodeDescription("SparseSegmentSqrtNGrad") + begin + begin + grad_ = convert(Tensor{Any}, grad_) + begin + end + end + begin + indices_ = convert(Tensor{Int32}, indices_) + begin + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + end + end + begin + segment_ids_ = convert(Tensor{Int32}, segment_ids_) + begin + end + end + begin + output_dim0_ = convert(Tensor{Int32}, output_dim0_) + begin + end + end + begin + (grad_,) = tf.tf_promote(grad_) + end + begin + (indices_,) = tf.tf_promote(indices_) + end + end + begin + begin + tf.add_input(desc, grad_) + end + begin + tf.add_input(desc, indices_) + end + begin + tf.add_input(desc, segment_ids_) + end + begin + tf.add_input(desc, output_dim0_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function sparse_segment_sqrt_n_grad_eager(grad_, indices_, segment_ids_, output_dim0_; name=nothing) + desc = tf.EagerOp("SparseSegmentSqrtNGrad") + grad_ = convert(tf.EagerTensor, grad_) + indices_ = convert(tf.EagerTensor, indices_) + segment_ids_ = convert(tf.EagerTensor, segment_ids_) + output_dim0_ = convert(tf.EagerTensor, output_dim0_) + begin + begin + tf.add_input(desc, grad_) + end + begin + tf.add_input(desc, indices_) + end + begin + tf.add_input(desc, segment_ids_) + end + begin + tf.add_input(desc, output_dim0_) + end + end + begin + end + begin + desc["T"] = tf.data_type(grad_) + end + begin + desc["Tidx"] = tf.data_type(indices_) + end + res = tf.execute(desc) + node = tf.TapeNode(sparse_segment_sqrt_n_grad, [grad_, indices_, segment_ids_, output_dim0_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_segment_sqrt_n_grad(grad_, indices_, segment_ids_, output_dim0_; name=nothing) + if tf.in_eager_mode() + sparse_segment_sqrt_n_grad_eager(grad_, indices_, segment_ids_, output_dim0_; name=name) + else + sparse_segment_sqrt_n_grad_graph(grad_, indices_, segment_ids_, output_dim0_; name=name) + end end - end + end end @@ -20117,37 +36479,69 @@ end """ begin - function rfft2d_graph(input_, fft_length_; name=nothing) - local desc - tf.with_op_name(name, "RFFT2D") do - desc = tf.NodeDescription("RFFT2D") - input_ = convert(Tensor{Float32}, input_) - fft_length_ = convert(Tensor{Int32}, fft_length_) - tf.add_input(desc, input_) - tf.add_input(desc, fft_length_) + begin + function rfft2d_graph(input_, fft_length_; name=nothing) + local desc + tf.with_op_name(name, "RFFT2D") do + desc = tf.NodeDescription("RFFT2D") + begin + begin + input_ = convert(Tensor{Float32}, input_) + begin + end + end + begin + fft_length_ = convert(Tensor{Int32}, fft_length_) + begin + end + end + end + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, fft_length_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function rfft2d_eager(input_, fft_length_; name=nothing) - desc = tf.EagerOp("RFFT2D") - input_ = convert(tf.EagerTensor, input_) - fft_length_ = convert(tf.EagerTensor, fft_length_) - tf.add_input(desc, input_) - tf.add_input(desc, fft_length_) - res = tf.execute(desc) - node = tf.TapeNode(rfft2d, [input_, fft_length_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function rfft2d_eager(input_, fft_length_; name=nothing) + desc = tf.EagerOp("RFFT2D") + input_ = convert(tf.EagerTensor, input_) + fft_length_ = convert(tf.EagerTensor, fft_length_) + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, fft_length_) + end + end + begin + end + res = tf.execute(desc) + node = tf.TapeNode(rfft2d, [input_, fft_length_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function rfft2d(input_, fft_length_; name=nothing) - if tf.in_eager_mode() - rfft2d_eager(input_, fft_length_; name=name) - else - rfft2d_graph(input_, fft_length_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function rfft2d(input_, fft_length_; name=nothing) + if tf.in_eager_mode() + rfft2d_eager(input_, fft_length_; name=name) + else + rfft2d_graph(input_, fft_length_; name=name) + end end - end + end end @@ -20157,33 +36551,57 @@ end """ begin - function var_is_initialized_op_graph(resource_; name=nothing) - local desc - tf.with_op_name(name, "VarIsInitializedOp") do - desc = tf.NodeDescription("VarIsInitializedOp") - resource_ = convert(Tensor{Any}, resource_) - tf.add_input(desc, resource_) + begin + function var_is_initialized_op_graph(resource_; name=nothing) + local desc + tf.with_op_name(name, "VarIsInitializedOp") do + desc = tf.NodeDescription("VarIsInitializedOp") + begin + begin + resource_ = convert(Tensor{Any}, resource_) + begin + end + end + end + begin + begin + tf.add_input(desc, resource_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function var_is_initialized_op_eager(resource_; name=nothing) - desc = tf.EagerOp("VarIsInitializedOp") - resource_ = convert(tf.EagerTensor, resource_) - tf.add_input(desc, resource_) - res = tf.execute(desc) - node = tf.TapeNode(var_is_initialized_op, [resource_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function var_is_initialized_op_eager(resource_; name=nothing) + desc = tf.EagerOp("VarIsInitializedOp") + resource_ = convert(tf.EagerTensor, resource_) + begin + begin + tf.add_input(desc, resource_) + end + end + begin + end + res = tf.execute(desc) + node = tf.TapeNode(var_is_initialized_op, [resource_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function var_is_initialized_op(resource_; name=nothing) - if tf.in_eager_mode() - var_is_initialized_op_eager(resource_; name=name) - else - var_is_initialized_op_graph(resource_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function var_is_initialized_op(resource_; name=nothing) + if tf.in_eager_mode() + var_is_initialized_op_eager(resource_; name=name) + else + var_is_initialized_op_graph(resource_; name=name) + end end - end + end end @@ -20193,41 +36611,65 @@ end """ begin - function boosted_trees_quantile_stream_resource_handle_op_graph(; name=nothing, container=nothing, shared_name=nothing) - local desc - tf.with_op_name(name, "BoostedTreesQuantileStreamResourceHandleOp") do - desc = tf.NodeDescription("BoostedTreesQuantileStreamResourceHandleOp") - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) + begin + function boosted_trees_quantile_stream_resource_handle_op_graph(; name=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "BoostedTreesQuantileStreamResourceHandleOp") do + desc = tf.NodeDescription("BoostedTreesQuantileStreamResourceHandleOp") + begin + end + begin + end + begin + begin + if container !== nothing + desc["container"] = Base.String(container) + end + end + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + end end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function boosted_trees_quantile_stream_resource_handle_op_eager(; name=nothing, container=nothing, shared_name=nothing) - desc = tf.EagerOp("BoostedTreesQuantileStreamResourceHandleOp") - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - res = tf.execute(desc) - node = tf.TapeNode(boosted_trees_quantile_stream_resource_handle_op, [], name=nothing, container=nothing, shared_name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function boosted_trees_quantile_stream_resource_handle_op_eager(; name=nothing, container=nothing, shared_name=nothing) + desc = tf.EagerOp("BoostedTreesQuantileStreamResourceHandleOp") + begin + end + begin + begin + if container !== nothing + desc["container"] = Base.String(container) + end + end + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(boosted_trees_quantile_stream_resource_handle_op, [], name=nothing, container=nothing, shared_name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function boosted_trees_quantile_stream_resource_handle_op(; name=nothing, container=nothing, shared_name=nothing) - if tf.in_eager_mode() - boosted_trees_quantile_stream_resource_handle_op_eager(; name=name, container=container, shared_name=shared_name) - else - boosted_trees_quantile_stream_resource_handle_op_graph(; name=name, container=container, shared_name=shared_name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function boosted_trees_quantile_stream_resource_handle_op(; name=nothing, container=nothing, shared_name=nothing) + if tf.in_eager_mode() + boosted_trees_quantile_stream_resource_handle_op_eager(; name=name, container=container, shared_name=shared_name) + else + boosted_trees_quantile_stream_resource_handle_op_graph(; name=name, container=container, shared_name=shared_name) + end end - end + end end @@ -20237,40 +36679,78 @@ end """ begin - function atan2_graph(y_, x_; name=nothing) - local desc - tf.with_op_name(name, "Atan2") do - desc = tf.NodeDescription("Atan2") - y_ = convert(Tensor{Any}, y_) - x_ = convert(Tensor{Any}, x_) - (y_, x_) = tf.tf_promote(y_, x_) - tf.add_input(desc, y_) - tf.add_input(desc, x_) + begin + function atan2_graph(y_, x_; name=nothing) + local desc + tf.with_op_name(name, "Atan2") do + desc = tf.NodeDescription("Atan2") + begin + begin + y_ = convert(Tensor{Any}, y_) + begin + end + end + begin + x_ = convert(Tensor{Any}, x_) + begin + end + end + begin + (y_, x_) = tf.tf_promote(y_, x_) + end + end + begin + begin + tf.add_input(desc, y_) + end + begin + tf.add_input(desc, x_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function atan2_eager(y_, x_; name=nothing) - desc = tf.EagerOp("Atan2") - y_ = convert(tf.EagerTensor, y_) - x_ = convert(tf.EagerTensor, x_) - tf.add_input(desc, y_) - tf.add_input(desc, x_) - desc["T"] = tf.data_type(y_) - desc["T"] = tf.data_type(x_) - res = tf.execute(desc) - node = tf.TapeNode(atan2, [y_, x_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function atan2_eager(y_, x_; name=nothing) + desc = tf.EagerOp("Atan2") + y_ = convert(tf.EagerTensor, y_) + x_ = convert(tf.EagerTensor, x_) + begin + begin + tf.add_input(desc, y_) + end + begin + tf.add_input(desc, x_) + end + end + begin + end + begin + desc["T"] = tf.data_type(y_) + end + begin + desc["T"] = tf.data_type(x_) + end + res = tf.execute(desc) + node = tf.TapeNode(atan2, [y_, x_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function atan2(y_, x_; name=nothing) - if tf.in_eager_mode() - atan2_eager(y_, x_; name=name) - else - atan2_graph(y_, x_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function atan2(y_, x_; name=nothing) + if tf.in_eager_mode() + atan2_eager(y_, x_; name=name) + else + atan2_graph(y_, x_; name=name) + end end - end + end end @@ -20280,65 +36760,121 @@ end """ begin - function random_poisson_graph(shape_, rate_; name=nothing, seed=nothing, seed2=nothing, S=nothing, dtype=nothing) - local desc - tf.with_op_name(name, "RandomPoisson") do - desc = tf.NodeDescription("RandomPoisson") - shape_ = convert(Tensor{Any}, shape_) - rate_ = convert(Tensor{Any}, rate_) - (rate_,) = tf.tf_promote(rate_) - (shape_,) = tf.tf_promote(shape_) - tf.add_input(desc, shape_) - tf.add_input(desc, rate_) - if seed !== nothing - desc["seed"] = Base.Int(seed) - end - if seed2 !== nothing - desc["seed2"] = Base.Int(seed2) - end - if S !== nothing - desc["S"] = Base.identity(S) + begin + function random_poisson_graph(shape_, rate_; name=nothing, seed=nothing, seed2=nothing, S=nothing, dtype=nothing) + local desc + tf.with_op_name(name, "RandomPoisson") do + desc = tf.NodeDescription("RandomPoisson") + begin + begin + shape_ = convert(Tensor{Any}, shape_) + begin + end + end + begin + rate_ = convert(Tensor{Any}, rate_) + begin + end + end + begin + (rate_,) = tf.tf_promote(rate_) + end + begin + (shape_,) = tf.tf_promote(shape_) + end + end + begin + begin + tf.add_input(desc, shape_) + end + begin + tf.add_input(desc, rate_) + end + end + begin + begin + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + end + begin + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end + end + begin + if S !== nothing + desc["S"] = Base.identity(S) + end + end + begin + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function random_poisson_eager(shape_, rate_; name=nothing, seed=nothing, seed2=nothing, S=nothing, dtype=nothing) + desc = tf.EagerOp("RandomPoisson") + shape_ = convert(tf.EagerTensor, shape_) + rate_ = convert(tf.EagerTensor, rate_) + begin + begin + tf.add_input(desc, shape_) + end + begin + tf.add_input(desc, rate_) + end + end + begin + begin + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + end + begin + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end + end + begin + if S !== nothing + desc["S"] = Base.identity(S) + end + end + begin + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + end + begin + desc["S"] = tf.data_type(shape_) + end + begin + desc["dtype"] = tf.data_type(rate_) + end + res = tf.execute(desc) + node = tf.TapeNode(random_poisson, [shape_, rate_], name=nothing, seed=nothing, seed2=nothing, S=nothing, dtype=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function random_poisson(shape_, rate_; name=nothing, seed=nothing, seed2=nothing, S=nothing, dtype=nothing) + if tf.in_eager_mode() + random_poisson_eager(shape_, rate_; name=name, seed=seed, seed2=seed2, S=S, dtype=dtype) + else + random_poisson_graph(shape_, rate_; name=name, seed=seed, seed2=seed2, S=S, dtype=dtype) + end end - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end - end - tf.Tensor(tf.Operation(desc)) - end - function random_poisson_eager(shape_, rate_; name=nothing, seed=nothing, seed2=nothing, S=nothing, dtype=nothing) - desc = tf.EagerOp("RandomPoisson") - shape_ = convert(tf.EagerTensor, shape_) - rate_ = convert(tf.EagerTensor, rate_) - tf.add_input(desc, shape_) - tf.add_input(desc, rate_) - if seed !== nothing - desc["seed"] = Base.Int(seed) - end - if seed2 !== nothing - desc["seed2"] = Base.Int(seed2) - end - if S !== nothing - desc["S"] = Base.identity(S) - end - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end - desc["S"] = tf.data_type(shape_) - desc["dtype"] = tf.data_type(rate_) - res = tf.execute(desc) - node = tf.TapeNode(random_poisson, [shape_, rate_], name=nothing, seed=nothing, seed2=nothing, S=nothing, dtype=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function random_poisson(shape_, rate_; name=nothing, seed=nothing, seed2=nothing, S=nothing, dtype=nothing) - if tf.in_eager_mode() - random_poisson_eager(shape_, rate_; name=name, seed=seed, seed2=seed2, S=S, dtype=dtype) - else - random_poisson_graph(shape_, rate_; name=name, seed=seed, seed2=seed2, S=S, dtype=dtype) - end - end end @@ -20348,53 +36884,101 @@ end """ begin - function reverse_sequence_graph(input_, seq_lengths_; name=nothing, seq_dim=nothing, batch_dim=nothing) - local desc - tf.with_op_name(name, "ReverseSequence") do - desc = tf.NodeDescription("ReverseSequence") - input_ = convert(Tensor{Any}, input_) - seq_lengths_ = convert(Tensor{Int64}, seq_lengths_) - (input_,) = tf.tf_promote(input_) - (seq_lengths_,) = tf.tf_promote(seq_lengths_) - tf.add_input(desc, input_) - tf.add_input(desc, seq_lengths_) - if seq_dim !== nothing - desc["seq_dim"] = Base.Int(seq_dim) + begin + function reverse_sequence_graph(input_, seq_lengths_; name=nothing, seq_dim=nothing, batch_dim=nothing) + local desc + tf.with_op_name(name, "ReverseSequence") do + desc = tf.NodeDescription("ReverseSequence") + begin + begin + input_ = convert(Tensor{Any}, input_) + begin + end + end + begin + seq_lengths_ = convert(Tensor{Int64}, seq_lengths_) + begin + end + end + begin + (input_,) = tf.tf_promote(input_) + end + begin + (seq_lengths_,) = tf.tf_promote(seq_lengths_) + end + end + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, seq_lengths_) + end + end + begin + begin + if seq_dim !== nothing + desc["seq_dim"] = Base.Int(seq_dim) + end + end + begin + if batch_dim !== nothing + desc["batch_dim"] = Base.Int(batch_dim) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function reverse_sequence_eager(input_, seq_lengths_; name=nothing, seq_dim=nothing, batch_dim=nothing) + desc = tf.EagerOp("ReverseSequence") + input_ = convert(tf.EagerTensor, input_) + seq_lengths_ = convert(tf.EagerTensor, seq_lengths_) + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, seq_lengths_) + end + end + begin + begin + if seq_dim !== nothing + desc["seq_dim"] = Base.Int(seq_dim) + end + end + begin + if batch_dim !== nothing + desc["batch_dim"] = Base.Int(batch_dim) + end + end + end + begin + desc["T"] = tf.data_type(input_) + end + begin + desc["Tlen"] = tf.data_type(seq_lengths_) + end + res = tf.execute(desc) + node = tf.TapeNode(reverse_sequence, [input_, seq_lengths_], name=nothing, seq_dim=nothing, batch_dim=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function reverse_sequence(input_, seq_lengths_; name=nothing, seq_dim=nothing, batch_dim=nothing) + if tf.in_eager_mode() + reverse_sequence_eager(input_, seq_lengths_; name=name, seq_dim=seq_dim, batch_dim=batch_dim) + else + reverse_sequence_graph(input_, seq_lengths_; name=name, seq_dim=seq_dim, batch_dim=batch_dim) + end end - if batch_dim !== nothing - desc["batch_dim"] = Base.Int(batch_dim) - end - end - tf.Tensor(tf.Operation(desc)) end - function reverse_sequence_eager(input_, seq_lengths_; name=nothing, seq_dim=nothing, batch_dim=nothing) - desc = tf.EagerOp("ReverseSequence") - input_ = convert(tf.EagerTensor, input_) - seq_lengths_ = convert(tf.EagerTensor, seq_lengths_) - tf.add_input(desc, input_) - tf.add_input(desc, seq_lengths_) - if seq_dim !== nothing - desc["seq_dim"] = Base.Int(seq_dim) - end - if batch_dim !== nothing - desc["batch_dim"] = Base.Int(batch_dim) - end - desc["T"] = tf.data_type(input_) - desc["Tlen"] = tf.data_type(seq_lengths_) - res = tf.execute(desc) - node = tf.TapeNode(reverse_sequence, [input_, seq_lengths_], name=nothing, seq_dim=nothing, batch_dim=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function reverse_sequence(input_, seq_lengths_; name=nothing, seq_dim=nothing, batch_dim=nothing) - if tf.in_eager_mode() - reverse_sequence_eager(input_, seq_lengths_; name=name, seq_dim=seq_dim, batch_dim=batch_dim) - else - reverse_sequence_graph(input_, seq_lengths_; name=name, seq_dim=seq_dim, batch_dim=batch_dim) - end - end end @@ -20404,41 +36988,73 @@ end An op which emits a single Tensor value from an XLA computation. """ begin - function outfeed_enqueue_graph(input_; name=nothing, dtype=nothing) - local desc - tf.with_op_name(name, "OutfeedEnqueue") do - desc = tf.NodeDescription("OutfeedEnqueue") - input_ = convert(Tensor{Any}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) + begin + function outfeed_enqueue_graph(input_; name=nothing, dtype=nothing) + local desc + tf.with_op_name(name, "OutfeedEnqueue") do + desc = tf.NodeDescription("OutfeedEnqueue") + begin + begin + input_ = convert(Tensor{Any}, input_) + begin + end + end + begin + (input_,) = tf.tf_promote(input_) + end + end + begin + begin + tf.add_input(desc, input_) + end + end + begin + begin + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function outfeed_enqueue_eager(input_; name=nothing, dtype=nothing) + desc = tf.EagerOp("OutfeedEnqueue") + input_ = convert(tf.EagerTensor, input_) + begin + begin + tf.add_input(desc, input_) + end + end + begin + begin + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + end + begin + desc["dtype"] = tf.data_type(input_) + end + res = tf.execute(desc) + node = tf.TapeNode(outfeed_enqueue, [input_], name=nothing, dtype=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function outfeed_enqueue(input_; name=nothing, dtype=nothing) + if tf.in_eager_mode() + outfeed_enqueue_eager(input_; name=name, dtype=dtype) + else + outfeed_enqueue_graph(input_; name=name, dtype=dtype) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function outfeed_enqueue_eager(input_; name=nothing, dtype=nothing) - desc = tf.EagerOp("OutfeedEnqueue") - input_ = convert(tf.EagerTensor, input_) - tf.add_input(desc, input_) - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end - desc["dtype"] = tf.data_type(input_) - res = tf.execute(desc) - node = tf.TapeNode(outfeed_enqueue, [input_], name=nothing, dtype=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function outfeed_enqueue(input_; name=nothing, dtype=nothing) - if tf.in_eager_mode() - outfeed_enqueue_eager(input_; name=name, dtype=dtype) - else - outfeed_enqueue_graph(input_; name=name, dtype=dtype) - end - end end @@ -20448,40 +37064,78 @@ end """ begin - function sub_graph(x_, y_; name=nothing) - local desc - tf.with_op_name(name, "Sub") do - desc = tf.NodeDescription("Sub") - x_ = convert(Tensor{Any}, x_) - y_ = convert(Tensor{Any}, y_) - (x_, y_) = tf.tf_promote(x_, y_) - tf.add_input(desc, x_) - tf.add_input(desc, y_) + begin + function sub_graph(x_, y_; name=nothing) + local desc + tf.with_op_name(name, "Sub") do + desc = tf.NodeDescription("Sub") + begin + begin + x_ = convert(Tensor{Any}, x_) + begin + end + end + begin + y_ = convert(Tensor{Any}, y_) + begin + end + end + begin + (x_, y_) = tf.tf_promote(x_, y_) + end + end + begin + begin + tf.add_input(desc, x_) + end + begin + tf.add_input(desc, y_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function sub_eager(x_, y_; name=nothing) - desc = tf.EagerOp("Sub") - x_ = convert(tf.EagerTensor, x_) - y_ = convert(tf.EagerTensor, y_) - tf.add_input(desc, x_) - tf.add_input(desc, y_) - desc["T"] = tf.data_type(x_) - desc["T"] = tf.data_type(y_) - res = tf.execute(desc) - node = tf.TapeNode(sub, [x_, y_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function sub_eager(x_, y_; name=nothing) + desc = tf.EagerOp("Sub") + x_ = convert(tf.EagerTensor, x_) + y_ = convert(tf.EagerTensor, y_) + begin + begin + tf.add_input(desc, x_) + end + begin + tf.add_input(desc, y_) + end + end + begin + end + begin + desc["T"] = tf.data_type(x_) + end + begin + desc["T"] = tf.data_type(y_) + end + res = tf.execute(desc) + node = tf.TapeNode(sub, [x_, y_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sub(x_, y_; name=nothing) - if tf.in_eager_mode() - sub_eager(x_, y_; name=name) - else - sub_graph(x_, y_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sub(x_, y_; name=nothing) + if tf.in_eager_mode() + sub_eager(x_, y_; name=name) + else + sub_graph(x_, y_; name=name) + end end - end + end end @@ -20491,48 +37145,86 @@ end """ begin - function string_split_graph(input_, delimiter_; name=nothing, skip_empty=nothing) - local desc - tf.with_op_name(name, "StringSplit") do - desc = tf.NodeDescription("StringSplit") - input_ = convert(Tensor{String}, input_) - delimiter_ = convert(Tensor{String}, delimiter_) - tf.add_input(desc, input_) - tf.add_input(desc, delimiter_) - if skip_empty !== nothing - desc["skip_empty"] = Base.Bool(skip_empty) + begin + function string_split_graph(input_, delimiter_; name=nothing, skip_empty=nothing) + local desc + tf.with_op_name(name, "StringSplit") do + desc = tf.NodeDescription("StringSplit") + begin + begin + input_ = convert(Tensor{String}, input_) + begin + end + end + begin + delimiter_ = convert(Tensor{String}, delimiter_) + begin + end + end + end + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, delimiter_) + end + end + begin + begin + if skip_empty !== nothing + desc["skip_empty"] = Base.Bool(skip_empty) + end + end + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + end + end + begin + function string_split_eager(input_, delimiter_; name=nothing, skip_empty=nothing) + desc = tf.EagerOp("StringSplit") + input_ = convert(tf.EagerTensor, input_) + delimiter_ = convert(tf.EagerTensor, delimiter_) + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, delimiter_) + end + end + begin + begin + if skip_empty !== nothing + desc["skip_empty"] = Base.Bool(skip_empty) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(string_split, [input_, delimiter_], name=nothing, skip_empty=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function string_split(input_, delimiter_; name=nothing, skip_empty=nothing) + if tf.in_eager_mode() + string_split_eager(input_, delimiter_; name=name, skip_empty=skip_empty) + else + string_split_graph(input_, delimiter_; name=name, skip_empty=skip_empty) + end end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:3 - push!(out, tf.Tensor(op, out_idx)) - end - out - end - function string_split_eager(input_, delimiter_; name=nothing, skip_empty=nothing) - desc = tf.EagerOp("StringSplit") - input_ = convert(tf.EagerTensor, input_) - delimiter_ = convert(tf.EagerTensor, delimiter_) - tf.add_input(desc, input_) - tf.add_input(desc, delimiter_) - if skip_empty !== nothing - desc["skip_empty"] = Base.Bool(skip_empty) - end - res = tf.execute(desc) - node = tf.TapeNode(string_split, [input_, delimiter_], name=nothing, skip_empty=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function string_split(input_, delimiter_; name=nothing, skip_empty=nothing) - if tf.in_eager_mode() - string_split_eager(input_, delimiter_; name=name, skip_empty=skip_empty) - else - string_split_graph(input_, delimiter_; name=name, skip_empty=skip_empty) - end - end end @@ -20542,54 +37234,102 @@ end """ begin - function cumprod_graph(x_, axis_; name=nothing, exclusive=nothing, reverse=nothing) - local desc - tf.with_op_name(name, "Cumprod") do - desc = tf.NodeDescription("Cumprod") - x_ = convert(Tensor{Any}, x_) - axis_ = convert(Tensor{Int32}, axis_) - axis_ = axis_ - convert(tf.Tensor{eltype(axis_)}, 1) - (x_,) = tf.tf_promote(x_) - (axis_,) = tf.tf_promote(axis_) - tf.add_input(desc, x_) - tf.add_input(desc, axis_) - if exclusive !== nothing - desc["exclusive"] = Base.Bool(exclusive) - end - if reverse !== nothing - desc["reverse"] = Base.Bool(reverse) + begin + function cumprod_graph(x_, axis_; name=nothing, exclusive=nothing, reverse=nothing) + local desc + tf.with_op_name(name, "Cumprod") do + desc = tf.NodeDescription("Cumprod") + begin + begin + x_ = convert(Tensor{Any}, x_) + begin + end + end + begin + axis_ = convert(Tensor{Int32}, axis_) + begin + axis_ = axis_ - convert(tf.Tensor{eltype(axis_)}, 1) + end + end + begin + (x_,) = tf.tf_promote(x_) + end + begin + (axis_,) = tf.tf_promote(axis_) + end + end + begin + begin + tf.add_input(desc, x_) + end + begin + tf.add_input(desc, axis_) + end + end + begin + begin + if exclusive !== nothing + desc["exclusive"] = Base.Bool(exclusive) + end + end + begin + if reverse !== nothing + desc["reverse"] = Base.Bool(reverse) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function cumprod_eager(x_, axis_; name=nothing, exclusive=nothing, reverse=nothing) + desc = tf.EagerOp("Cumprod") + x_ = convert(tf.EagerTensor, x_) + axis_ = convert(tf.EagerTensor, axis_) + begin + begin + tf.add_input(desc, x_) + end + begin + tf.add_input(desc, axis_) + end + end + begin + begin + if exclusive !== nothing + desc["exclusive"] = Base.Bool(exclusive) + end + end + begin + if reverse !== nothing + desc["reverse"] = Base.Bool(reverse) + end + end + end + begin + desc["T"] = tf.data_type(x_) + end + begin + desc["Tidx"] = tf.data_type(axis_) + end + res = tf.execute(desc) + node = tf.TapeNode(cumprod, [x_, axis_], name=nothing, exclusive=nothing, reverse=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function cumprod(x_, axis_; name=nothing, exclusive=nothing, reverse=nothing) + if tf.in_eager_mode() + cumprod_eager(x_, axis_; name=name, exclusive=exclusive, reverse=reverse) + else + cumprod_graph(x_, axis_; name=name, exclusive=exclusive, reverse=reverse) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function cumprod_eager(x_, axis_; name=nothing, exclusive=nothing, reverse=nothing) - desc = tf.EagerOp("Cumprod") - x_ = convert(tf.EagerTensor, x_) - axis_ = convert(tf.EagerTensor, axis_) - tf.add_input(desc, x_) - tf.add_input(desc, axis_) - if exclusive !== nothing - desc["exclusive"] = Base.Bool(exclusive) - end - if reverse !== nothing - desc["reverse"] = Base.Bool(reverse) - end - desc["T"] = tf.data_type(x_) - desc["Tidx"] = tf.data_type(axis_) - res = tf.execute(desc) - node = tf.TapeNode(cumprod, [x_, axis_], name=nothing, exclusive=nothing, reverse=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function cumprod(x_, axis_; name=nothing, exclusive=nothing, reverse=nothing) - if tf.in_eager_mode() - cumprod_eager(x_, axis_; name=name, exclusive=exclusive, reverse=reverse) - else - cumprod_graph(x_, axis_; name=name, exclusive=exclusive, reverse=reverse) - end - end end @@ -20599,58 +37339,116 @@ end """ begin - function quantized_resize_bilinear_graph(images_, size_, min_, max_; name=nothing, align_corners=nothing) - local desc - tf.with_op_name(name, "QuantizedResizeBilinear") do - desc = tf.NodeDescription("QuantizedResizeBilinear") - images_ = convert(Tensor{Any}, images_) - size_ = convert(Tensor{Int32}, size_) - min_ = convert(Tensor{Float32}, min_) - max_ = convert(Tensor{Float32}, max_) - (images_,) = tf.tf_promote(images_) - tf.add_input(desc, images_) - tf.add_input(desc, size_) - tf.add_input(desc, min_) - tf.add_input(desc, max_) - if align_corners !== nothing - desc["align_corners"] = Base.Bool(align_corners) + begin + function quantized_resize_bilinear_graph(images_, size_, min_, max_; name=nothing, align_corners=nothing) + local desc + tf.with_op_name(name, "QuantizedResizeBilinear") do + desc = tf.NodeDescription("QuantizedResizeBilinear") + begin + begin + images_ = convert(Tensor{Any}, images_) + begin + end + end + begin + size_ = convert(Tensor{Int32}, size_) + begin + end + end + begin + min_ = convert(Tensor{Float32}, min_) + begin + end + end + begin + max_ = convert(Tensor{Float32}, max_) + begin + end + end + begin + (images_,) = tf.tf_promote(images_) + end + end + begin + begin + tf.add_input(desc, images_) + end + begin + tf.add_input(desc, size_) + end + begin + tf.add_input(desc, min_) + end + begin + tf.add_input(desc, max_) + end + end + begin + begin + if align_corners !== nothing + desc["align_corners"] = Base.Bool(align_corners) + end + end + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + end + end + begin + function quantized_resize_bilinear_eager(images_, size_, min_, max_; name=nothing, align_corners=nothing) + desc = tf.EagerOp("QuantizedResizeBilinear") + images_ = convert(tf.EagerTensor, images_) + size_ = convert(tf.EagerTensor, size_) + min_ = convert(tf.EagerTensor, min_) + max_ = convert(tf.EagerTensor, max_) + begin + begin + tf.add_input(desc, images_) + end + begin + tf.add_input(desc, size_) + end + begin + tf.add_input(desc, min_) + end + begin + tf.add_input(desc, max_) + end + end + begin + begin + if align_corners !== nothing + desc["align_corners"] = Base.Bool(align_corners) + end + end + end + begin + desc["T"] = tf.data_type(images_) + end + res = tf.execute(desc) + node = tf.TapeNode(quantized_resize_bilinear, [images_, size_, min_, max_], name=nothing, align_corners=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function quantized_resize_bilinear(images_, size_, min_, max_; name=nothing, align_corners=nothing) + if tf.in_eager_mode() + quantized_resize_bilinear_eager(images_, size_, min_, max_; name=name, align_corners=align_corners) + else + quantized_resize_bilinear_graph(images_, size_, min_, max_; name=name, align_corners=align_corners) + end end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:3 - push!(out, tf.Tensor(op, out_idx)) - end - out - end - function quantized_resize_bilinear_eager(images_, size_, min_, max_; name=nothing, align_corners=nothing) - desc = tf.EagerOp("QuantizedResizeBilinear") - images_ = convert(tf.EagerTensor, images_) - size_ = convert(tf.EagerTensor, size_) - min_ = convert(tf.EagerTensor, min_) - max_ = convert(tf.EagerTensor, max_) - tf.add_input(desc, images_) - tf.add_input(desc, size_) - tf.add_input(desc, min_) - tf.add_input(desc, max_) - if align_corners !== nothing - desc["align_corners"] = Base.Bool(align_corners) - end - desc["T"] = tf.data_type(images_) - res = tf.execute(desc) - node = tf.TapeNode(quantized_resize_bilinear, [images_, size_, min_, max_], name=nothing, align_corners=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function quantized_resize_bilinear(images_, size_, min_, max_; name=nothing, align_corners=nothing) - if tf.in_eager_mode() - quantized_resize_bilinear_eager(images_, size_, min_, max_; name=name, align_corners=align_corners) - else - quantized_resize_bilinear_graph(images_, size_, min_, max_; name=name, align_corners=align_corners) - end - end end @@ -20660,78 +37458,136 @@ end """ begin - function parse_single_example_graph(serialized_, dense_defaults_; name=nothing, num_sparse=nothing, sparse_keys=nothing, dense_keys=nothing, sparse_types=nothing, Tdense=nothing, dense_shapes=nothing) - local desc - tf.with_op_name(name, "ParseSingleExample") do - desc = tf.NodeDescription("ParseSingleExample") - serialized_ = convert(Tensor{String}, serialized_) - dense_defaults_ = [convert(Tensor{Any}, x) for x = dense_defaults_] - tf.add_input(desc, serialized_) - tf.add_input(desc, dense_defaults_) - if num_sparse !== nothing - desc["num_sparse"] = Base.Int(num_sparse) - end - if sparse_keys !== nothing - desc["sparse_keys"] = map(Base.identity, sparse_keys) - end - if dense_keys !== nothing - desc["dense_keys"] = map(Base.identity, dense_keys) + begin + function parse_single_example_graph(serialized_, dense_defaults_; name=nothing, num_sparse=nothing, sparse_keys=nothing, dense_keys=nothing, sparse_types=nothing, Tdense=nothing, dense_shapes=nothing) + local desc + tf.with_op_name(name, "ParseSingleExample") do + desc = tf.NodeDescription("ParseSingleExample") + begin + begin + serialized_ = convert(Tensor{String}, serialized_) + begin + end + end + begin + dense_defaults_ = [convert(Tensor{Any}, x) for x = dense_defaults_] + begin + end + end + end + begin + begin + tf.add_input(desc, serialized_) + end + begin + tf.add_input(desc, dense_defaults_) + end + end + begin + begin + if num_sparse !== nothing + desc["num_sparse"] = Base.Int(num_sparse) + end + end + begin + if sparse_keys !== nothing + desc["sparse_keys"] = map(Base.identity, sparse_keys) + end + end + begin + if dense_keys !== nothing + desc["dense_keys"] = map(Base.identity, dense_keys) + end + end + begin + if sparse_types !== nothing + desc["sparse_types"] = map(Base.identity, sparse_types) + end + end + begin + if Tdense !== nothing + desc["Tdense"] = map(Base.identity, Tdense) + end + end + begin + if dense_shapes !== nothing + desc["dense_shapes"] = map(Base.identity, dense_shapes) + end + end + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:4 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + end + end + begin + function parse_single_example_eager(serialized_, dense_defaults_; name=nothing, num_sparse=nothing, sparse_keys=nothing, dense_keys=nothing, sparse_types=nothing, Tdense=nothing, dense_shapes=nothing) + desc = tf.EagerOp("ParseSingleExample") + serialized_ = convert(tf.EagerTensor, serialized_) + dense_defaults_ = convert(tf.EagerTensor, dense_defaults_) + begin + begin + tf.add_input(desc, serialized_) + end + begin + tf.add_input(desc, dense_defaults_) + end + end + begin + begin + if num_sparse !== nothing + desc["num_sparse"] = Base.Int(num_sparse) + end + end + begin + if sparse_keys !== nothing + desc["sparse_keys"] = map(Base.identity, sparse_keys) + end + end + begin + if dense_keys !== nothing + desc["dense_keys"] = map(Base.identity, dense_keys) + end + end + begin + if sparse_types !== nothing + desc["sparse_types"] = map(Base.identity, sparse_types) + end + end + begin + if Tdense !== nothing + desc["Tdense"] = map(Base.identity, Tdense) + end + end + begin + if dense_shapes !== nothing + desc["dense_shapes"] = map(Base.identity, dense_shapes) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(parse_single_example, [serialized_, dense_defaults_], name=nothing, num_sparse=nothing, sparse_keys=nothing, dense_keys=nothing, sparse_types=nothing, Tdense=nothing, dense_shapes=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function parse_single_example(serialized_, dense_defaults_; name=nothing, num_sparse=nothing, sparse_keys=nothing, dense_keys=nothing, sparse_types=nothing, Tdense=nothing, dense_shapes=nothing) + if tf.in_eager_mode() + parse_single_example_eager(serialized_, dense_defaults_; name=name, num_sparse=num_sparse, sparse_keys=sparse_keys, dense_keys=dense_keys, sparse_types=sparse_types, Tdense=Tdense, dense_shapes=dense_shapes) + else + parse_single_example_graph(serialized_, dense_defaults_; name=name, num_sparse=num_sparse, sparse_keys=sparse_keys, dense_keys=dense_keys, sparse_types=sparse_types, Tdense=Tdense, dense_shapes=dense_shapes) + end end - if sparse_types !== nothing - desc["sparse_types"] = map(Base.identity, sparse_types) - end - if Tdense !== nothing - desc["Tdense"] = map(Base.identity, Tdense) - end - if dense_shapes !== nothing - desc["dense_shapes"] = map(Base.identity, dense_shapes) - end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:4 - push!(out, tf.Tensor(op, out_idx)) - end - out - end - function parse_single_example_eager(serialized_, dense_defaults_; name=nothing, num_sparse=nothing, sparse_keys=nothing, dense_keys=nothing, sparse_types=nothing, Tdense=nothing, dense_shapes=nothing) - desc = tf.EagerOp("ParseSingleExample") - serialized_ = convert(tf.EagerTensor, serialized_) - dense_defaults_ = convert(tf.EagerTensor, dense_defaults_) - tf.add_input(desc, serialized_) - tf.add_input(desc, dense_defaults_) - if num_sparse !== nothing - desc["num_sparse"] = Base.Int(num_sparse) - end - if sparse_keys !== nothing - desc["sparse_keys"] = map(Base.identity, sparse_keys) - end - if dense_keys !== nothing - desc["dense_keys"] = map(Base.identity, dense_keys) - end - if sparse_types !== nothing - desc["sparse_types"] = map(Base.identity, sparse_types) - end - if Tdense !== nothing - desc["Tdense"] = map(Base.identity, Tdense) - end - if dense_shapes !== nothing - desc["dense_shapes"] = map(Base.identity, dense_shapes) - end - res = tf.execute(desc) - node = tf.TapeNode(parse_single_example, [serialized_, dense_defaults_], name=nothing, num_sparse=nothing, sparse_keys=nothing, dense_keys=nothing, sparse_types=nothing, Tdense=nothing, dense_shapes=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function parse_single_example(serialized_, dense_defaults_; name=nothing, num_sparse=nothing, sparse_keys=nothing, dense_keys=nothing, sparse_types=nothing, Tdense=nothing, dense_shapes=nothing) - if tf.in_eager_mode() - parse_single_example_eager(serialized_, dense_defaults_; name=name, num_sparse=num_sparse, sparse_keys=sparse_keys, dense_keys=dense_keys, sparse_types=sparse_types, Tdense=Tdense, dense_shapes=dense_shapes) - else - parse_single_example_graph(serialized_, dense_defaults_; name=name, num_sparse=num_sparse, sparse_keys=sparse_keys, dense_keys=dense_keys, sparse_types=sparse_types, Tdense=Tdense, dense_shapes=dense_shapes) - end - end end @@ -20741,41 +37597,73 @@ end """ begin - function is_variable_initialized_graph(ref_; name=nothing, dtype=nothing) - local desc - tf.with_op_name(name, "IsVariableInitialized") do - desc = tf.NodeDescription("IsVariableInitialized") - ref_ = convert(Tensor{Any}, ref_) - (ref_,) = tf.tf_promote(ref_) - tf.add_input(desc, ref_) - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) + begin + function is_variable_initialized_graph(ref_; name=nothing, dtype=nothing) + local desc + tf.with_op_name(name, "IsVariableInitialized") do + desc = tf.NodeDescription("IsVariableInitialized") + begin + begin + ref_ = convert(Tensor{Any}, ref_) + begin + end + end + begin + (ref_,) = tf.tf_promote(ref_) + end + end + begin + begin + tf.add_input(desc, ref_) + end + end + begin + begin + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + end end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function is_variable_initialized_eager(ref_; name=nothing, dtype=nothing) - desc = tf.EagerOp("IsVariableInitialized") - ref_ = convert(tf.EagerTensor, ref_) - tf.add_input(desc, ref_) - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end - desc["dtype"] = tf.data_type(ref_) - res = tf.execute(desc) - node = tf.TapeNode(is_variable_initialized, [ref_], name=nothing, dtype=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function is_variable_initialized_eager(ref_; name=nothing, dtype=nothing) + desc = tf.EagerOp("IsVariableInitialized") + ref_ = convert(tf.EagerTensor, ref_) + begin + begin + tf.add_input(desc, ref_) + end + end + begin + begin + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + end + begin + desc["dtype"] = tf.data_type(ref_) + end + res = tf.execute(desc) + node = tf.TapeNode(is_variable_initialized, [ref_], name=nothing, dtype=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function is_variable_initialized(ref_; name=nothing, dtype=nothing) - if tf.in_eager_mode() - is_variable_initialized_eager(ref_; name=name, dtype=dtype) - else - is_variable_initialized_graph(ref_; name=name, dtype=dtype) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function is_variable_initialized(ref_; name=nothing, dtype=nothing) + if tf.in_eager_mode() + is_variable_initialized_eager(ref_; name=name, dtype=dtype) + else + is_variable_initialized_graph(ref_; name=name, dtype=dtype) + end end - end + end end @@ -20785,52 +37673,104 @@ end """ begin - function resource_scatter_sub_graph(resource_, indices_, updates_; name=nothing, dtype=nothing) - local desc - tf.with_op_name(name, "ResourceScatterSub") do - desc = tf.NodeDescription("ResourceScatterSub") - resource_ = convert(Tensor{Any}, resource_) - indices_ = convert(Tensor{Any}, indices_) - indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) - updates_ = convert(Tensor{Any}, updates_) - (updates_,) = tf.tf_promote(updates_) - (indices_,) = tf.tf_promote(indices_) - tf.add_input(desc, resource_) - tf.add_input(desc, indices_) - tf.add_input(desc, updates_) - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) + begin + function resource_scatter_sub_graph(resource_, indices_, updates_; name=nothing, dtype=nothing) + local desc + tf.with_op_name(name, "ResourceScatterSub") do + desc = tf.NodeDescription("ResourceScatterSub") + begin + begin + resource_ = convert(Tensor{Any}, resource_) + begin + end + end + begin + indices_ = convert(Tensor{Any}, indices_) + begin + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + end + end + begin + updates_ = convert(Tensor{Any}, updates_) + begin + end + end + begin + (updates_,) = tf.tf_promote(updates_) + end + begin + (indices_,) = tf.tf_promote(indices_) + end + end + begin + begin + tf.add_input(desc, resource_) + end + begin + tf.add_input(desc, indices_) + end + begin + tf.add_input(desc, updates_) + end + end + begin + begin + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function resource_scatter_sub_eager(resource_, indices_, updates_; name=nothing, dtype=nothing) + desc = tf.EagerOp("ResourceScatterSub") + resource_ = convert(tf.EagerTensor, resource_) + indices_ = convert(tf.EagerTensor, indices_) + updates_ = convert(tf.EagerTensor, updates_) + begin + begin + tf.add_input(desc, resource_) + end + begin + tf.add_input(desc, indices_) + end + begin + tf.add_input(desc, updates_) + end + end + begin + begin + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + end + begin + desc["Tindices"] = tf.data_type(indices_) + end + begin + desc["dtype"] = tf.data_type(updates_) + end + res = tf.execute(desc) + node = tf.TapeNode(resource_scatter_sub, [resource_, indices_, updates_], name=nothing, dtype=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_scatter_sub(resource_, indices_, updates_; name=nothing, dtype=nothing) + if tf.in_eager_mode() + resource_scatter_sub_eager(resource_, indices_, updates_; name=name, dtype=dtype) + else + resource_scatter_sub_graph(resource_, indices_, updates_; name=name, dtype=dtype) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function resource_scatter_sub_eager(resource_, indices_, updates_; name=nothing, dtype=nothing) - desc = tf.EagerOp("ResourceScatterSub") - resource_ = convert(tf.EagerTensor, resource_) - indices_ = convert(tf.EagerTensor, indices_) - updates_ = convert(tf.EagerTensor, updates_) - tf.add_input(desc, resource_) - tf.add_input(desc, indices_) - tf.add_input(desc, updates_) - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end - desc["Tindices"] = tf.data_type(indices_) - desc["dtype"] = tf.data_type(updates_) - res = tf.execute(desc) - node = tf.TapeNode(resource_scatter_sub, [resource_, indices_, updates_], name=nothing, dtype=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_scatter_sub(resource_, indices_, updates_; name=nothing, dtype=nothing) - if tf.in_eager_mode() - resource_scatter_sub_eager(resource_, indices_, updates_; name=name, dtype=dtype) - else - resource_scatter_sub_graph(resource_, indices_, updates_; name=name, dtype=dtype) - end - end end @@ -20840,41 +37780,65 @@ end """ begin - function experimental_stats_aggregator_handle_graph(; name=nothing, container=nothing, shared_name=nothing) - local desc - tf.with_op_name(name, "ExperimentalStatsAggregatorHandle") do - desc = tf.NodeDescription("ExperimentalStatsAggregatorHandle") - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) + begin + function experimental_stats_aggregator_handle_graph(; name=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "ExperimentalStatsAggregatorHandle") do + desc = tf.NodeDescription("ExperimentalStatsAggregatorHandle") + begin + end + begin + end + begin + begin + if container !== nothing + desc["container"] = Base.String(container) + end + end + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + end end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function experimental_stats_aggregator_handle_eager(; name=nothing, container=nothing, shared_name=nothing) - desc = tf.EagerOp("ExperimentalStatsAggregatorHandle") - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - res = tf.execute(desc) - node = tf.TapeNode(experimental_stats_aggregator_handle, [], name=nothing, container=nothing, shared_name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function experimental_stats_aggregator_handle_eager(; name=nothing, container=nothing, shared_name=nothing) + desc = tf.EagerOp("ExperimentalStatsAggregatorHandle") + begin + end + begin + begin + if container !== nothing + desc["container"] = Base.String(container) + end + end + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(experimental_stats_aggregator_handle, [], name=nothing, container=nothing, shared_name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_stats_aggregator_handle(; name=nothing, container=nothing, shared_name=nothing) - if tf.in_eager_mode() - experimental_stats_aggregator_handle_eager(; name=name, container=container, shared_name=shared_name) - else - experimental_stats_aggregator_handle_graph(; name=name, container=container, shared_name=shared_name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_stats_aggregator_handle(; name=nothing, container=nothing, shared_name=nothing) + if tf.in_eager_mode() + experimental_stats_aggregator_handle_eager(; name=name, container=container, shared_name=shared_name) + else + experimental_stats_aggregator_handle_graph(; name=name, container=container, shared_name=shared_name) + end end - end + end end @@ -20884,97 +37848,185 @@ end """ begin - function cudnn_rnnv2_graph(input_, input_h_, input_c_, params_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing, is_training=nothing) - local desc - tf.with_op_name(name, "CudnnRNNV2") do - desc = tf.NodeDescription("CudnnRNNV2") - input_ = convert(Tensor{Any}, input_) - input_h_ = convert(Tensor{Any}, input_h_) - input_c_ = convert(Tensor{Any}, input_c_) - params_ = convert(Tensor{Any}, params_) - (input_, input_h_, input_c_, params_) = tf.tf_promote(input_, input_h_, input_c_, params_) - tf.add_input(desc, input_) - tf.add_input(desc, input_h_) - tf.add_input(desc, input_c_) - tf.add_input(desc, params_) - if rnn_mode !== nothing - desc["rnn_mode"] = Base.String(rnn_mode) + begin + function cudnn_rnnv2_graph(input_, input_h_, input_c_, params_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing, is_training=nothing) + local desc + tf.with_op_name(name, "CudnnRNNV2") do + desc = tf.NodeDescription("CudnnRNNV2") + begin + begin + input_ = convert(Tensor{Any}, input_) + begin + end + end + begin + input_h_ = convert(Tensor{Any}, input_h_) + begin + end + end + begin + input_c_ = convert(Tensor{Any}, input_c_) + begin + end + end + begin + params_ = convert(Tensor{Any}, params_) + begin + end + end + begin + (input_, input_h_, input_c_, params_) = tf.tf_promote(input_, input_h_, input_c_, params_) + end + end + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, input_h_) + end + begin + tf.add_input(desc, input_c_) + end + begin + tf.add_input(desc, params_) + end + end + begin + begin + if rnn_mode !== nothing + desc["rnn_mode"] = Base.String(rnn_mode) + end + end + begin + if input_mode !== nothing + desc["input_mode"] = Base.String(input_mode) + end + end + begin + if direction !== nothing + desc["direction"] = Base.String(direction) + end + end + begin + if dropout !== nothing + desc["dropout"] = Base.identity(dropout) + end + end + begin + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + end + begin + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end + end + begin + if is_training !== nothing + desc["is_training"] = Base.Bool(is_training) + end + end + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:5 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + end + end + begin + function cudnn_rnnv2_eager(input_, input_h_, input_c_, params_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing, is_training=nothing) + desc = tf.EagerOp("CudnnRNNV2") + input_ = convert(tf.EagerTensor, input_) + input_h_ = convert(tf.EagerTensor, input_h_) + input_c_ = convert(tf.EagerTensor, input_c_) + params_ = convert(tf.EagerTensor, params_) + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, input_h_) + end + begin + tf.add_input(desc, input_c_) + end + begin + tf.add_input(desc, params_) + end + end + begin + begin + if rnn_mode !== nothing + desc["rnn_mode"] = Base.String(rnn_mode) + end + end + begin + if input_mode !== nothing + desc["input_mode"] = Base.String(input_mode) + end + end + begin + if direction !== nothing + desc["direction"] = Base.String(direction) + end + end + begin + if dropout !== nothing + desc["dropout"] = Base.identity(dropout) + end + end + begin + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + end + begin + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end + end + begin + if is_training !== nothing + desc["is_training"] = Base.Bool(is_training) + end + end + end + begin + desc["T"] = tf.data_type(input_) + end + begin + desc["T"] = tf.data_type(input_h_) + end + begin + desc["T"] = tf.data_type(input_c_) + end + begin + desc["T"] = tf.data_type(params_) + end + res = tf.execute(desc) + node = tf.TapeNode(cudnn_rnnv2, [input_, input_h_, input_c_, params_], name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing, is_training=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function cudnn_rnnv2(input_, input_h_, input_c_, params_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing, is_training=nothing) + if tf.in_eager_mode() + cudnn_rnnv2_eager(input_, input_h_, input_c_, params_; name=name, rnn_mode=rnn_mode, input_mode=input_mode, direction=direction, dropout=dropout, seed=seed, seed2=seed2, is_training=is_training) + else + cudnn_rnnv2_graph(input_, input_h_, input_c_, params_; name=name, rnn_mode=rnn_mode, input_mode=input_mode, direction=direction, dropout=dropout, seed=seed, seed2=seed2, is_training=is_training) + end end - if input_mode !== nothing - desc["input_mode"] = Base.String(input_mode) - end - if direction !== nothing - desc["direction"] = Base.String(direction) - end - if dropout !== nothing - desc["dropout"] = Base.identity(dropout) - end - if seed !== nothing - desc["seed"] = Base.Int(seed) - end - if seed2 !== nothing - desc["seed2"] = Base.Int(seed2) - end - if is_training !== nothing - desc["is_training"] = Base.Bool(is_training) - end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:5 - push!(out, tf.Tensor(op, out_idx)) - end - out end - function cudnn_rnnv2_eager(input_, input_h_, input_c_, params_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing, is_training=nothing) - desc = tf.EagerOp("CudnnRNNV2") - input_ = convert(tf.EagerTensor, input_) - input_h_ = convert(tf.EagerTensor, input_h_) - input_c_ = convert(tf.EagerTensor, input_c_) - params_ = convert(tf.EagerTensor, params_) - tf.add_input(desc, input_) - tf.add_input(desc, input_h_) - tf.add_input(desc, input_c_) - tf.add_input(desc, params_) - if rnn_mode !== nothing - desc["rnn_mode"] = Base.String(rnn_mode) - end - if input_mode !== nothing - desc["input_mode"] = Base.String(input_mode) - end - if direction !== nothing - desc["direction"] = Base.String(direction) - end - if dropout !== nothing - desc["dropout"] = Base.identity(dropout) - end - if seed !== nothing - desc["seed"] = Base.Int(seed) - end - if seed2 !== nothing - desc["seed2"] = Base.Int(seed2) - end - if is_training !== nothing - desc["is_training"] = Base.Bool(is_training) - end - desc["T"] = tf.data_type(input_) - desc["T"] = tf.data_type(input_h_) - desc["T"] = tf.data_type(input_c_) - desc["T"] = tf.data_type(params_) - res = tf.execute(desc) - node = tf.TapeNode(cudnn_rnnv2, [input_, input_h_, input_c_, params_], name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing, is_training=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function cudnn_rnnv2(input_, input_h_, input_c_, params_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing, is_training=nothing) - if tf.in_eager_mode() - cudnn_rnnv2_eager(input_, input_h_, input_c_, params_; name=name, rnn_mode=rnn_mode, input_mode=input_mode, direction=direction, dropout=dropout, seed=seed, seed2=seed2, is_training=is_training) - else - cudnn_rnnv2_graph(input_, input_h_, input_c_, params_; name=name, rnn_mode=rnn_mode, input_mode=input_mode, direction=direction, dropout=dropout, seed=seed, seed2=seed2, is_training=is_training) - end - end end @@ -20984,46 +38036,88 @@ end """ begin - function assign_add_graph(ref_, value_; name=nothing, use_locking=nothing) - local desc - tf.with_op_name(name, "AssignAdd") do - desc = tf.NodeDescription("AssignAdd") - ref_ = convert(Tensor{Any}, ref_) - value_ = convert(Tensor{Any}, value_) - (ref_, value_) = tf.tf_promote(ref_, value_) - tf.add_input(desc, ref_) - tf.add_input(desc, value_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) + begin + function assign_add_graph(ref_, value_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "AssignAdd") do + desc = tf.NodeDescription("AssignAdd") + begin + begin + ref_ = convert(Tensor{Any}, ref_) + begin + end + end + begin + value_ = convert(Tensor{Any}, value_) + begin + end + end + begin + (ref_, value_) = tf.tf_promote(ref_, value_) + end + end + begin + begin + tf.add_input(desc, ref_) + end + begin + tf.add_input(desc, value_) + end + end + begin + begin + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function assign_add_eager(ref_, value_; name=nothing, use_locking=nothing) + desc = tf.EagerOp("AssignAdd") + ref_ = convert(tf.EagerTensor, ref_) + value_ = convert(tf.EagerTensor, value_) + begin + begin + tf.add_input(desc, ref_) + end + begin + tf.add_input(desc, value_) + end + end + begin + begin + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + end + begin + desc["T"] = tf.data_type(ref_) + end + begin + desc["T"] = tf.data_type(value_) + end + res = tf.execute(desc) + node = tf.TapeNode(assign_add, [ref_, value_], name=nothing, use_locking=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function assign_add(ref_, value_; name=nothing, use_locking=nothing) + if tf.in_eager_mode() + assign_add_eager(ref_, value_; name=name, use_locking=use_locking) + else + assign_add_graph(ref_, value_; name=name, use_locking=use_locking) + end end - end - tf.Tensor(tf.Operation(desc)) end - function assign_add_eager(ref_, value_; name=nothing, use_locking=nothing) - desc = tf.EagerOp("AssignAdd") - ref_ = convert(tf.EagerTensor, ref_) - value_ = convert(tf.EagerTensor, value_) - tf.add_input(desc, ref_) - tf.add_input(desc, value_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - desc["T"] = tf.data_type(ref_) - desc["T"] = tf.data_type(value_) - res = tf.execute(desc) - node = tf.TapeNode(assign_add, [ref_, value_], name=nothing, use_locking=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function assign_add(ref_, value_; name=nothing, use_locking=nothing) - if tf.in_eager_mode() - assign_add_eager(ref_, value_; name=name, use_locking=use_locking) - else - assign_add_graph(ref_, value_; name=name, use_locking=use_locking) - end - end end @@ -21033,45 +38127,77 @@ end """ begin - function tensor_dataset_graph(components_; name=nothing, Toutput_types=nothing, output_shapes=nothing) - local desc - tf.with_op_name(name, "TensorDataset") do - desc = tf.NodeDescription("TensorDataset") - components_ = [convert(Tensor{Any}, x) for x = components_] - tf.add_input(desc, components_) - if Toutput_types !== nothing - desc["Toutput_types"] = map(Base.identity, Toutput_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) + begin + function tensor_dataset_graph(components_; name=nothing, Toutput_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "TensorDataset") do + desc = tf.NodeDescription("TensorDataset") + begin + begin + components_ = [convert(Tensor{Any}, x) for x = components_] + begin + end + end + end + begin + begin + tf.add_input(desc, components_) + end + end + begin + begin + if Toutput_types !== nothing + desc["Toutput_types"] = map(Base.identity, Toutput_types) + end + end + begin + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function tensor_dataset_eager(components_; name=nothing, Toutput_types=nothing, output_shapes=nothing) + desc = tf.EagerOp("TensorDataset") + components_ = convert(tf.EagerTensor, components_) + begin + begin + tf.add_input(desc, components_) + end + end + begin + begin + if Toutput_types !== nothing + desc["Toutput_types"] = map(Base.identity, Toutput_types) + end + end + begin + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(tensor_dataset, [components_], name=nothing, Toutput_types=nothing, output_shapes=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_dataset(components_; name=nothing, Toutput_types=nothing, output_shapes=nothing) + if tf.in_eager_mode() + tensor_dataset_eager(components_; name=name, Toutput_types=Toutput_types, output_shapes=output_shapes) + else + tensor_dataset_graph(components_; name=name, Toutput_types=Toutput_types, output_shapes=output_shapes) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function tensor_dataset_eager(components_; name=nothing, Toutput_types=nothing, output_shapes=nothing) - desc = tf.EagerOp("TensorDataset") - components_ = convert(tf.EagerTensor, components_) - tf.add_input(desc, components_) - if Toutput_types !== nothing - desc["Toutput_types"] = map(Base.identity, Toutput_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - res = tf.execute(desc) - node = tf.TapeNode(tensor_dataset, [components_], name=nothing, Toutput_types=nothing, output_shapes=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_dataset(components_; name=nothing, Toutput_types=nothing, output_shapes=nothing) - if tf.in_eager_mode() - tensor_dataset_eager(components_; name=name, Toutput_types=Toutput_types, output_shapes=output_shapes) - else - tensor_dataset_graph(components_; name=name, Toutput_types=Toutput_types, output_shapes=output_shapes) - end - end end @@ -21081,41 +38207,73 @@ end """ begin - function bucketize_graph(input_; name=nothing, boundaries=nothing) - local desc - tf.with_op_name(name, "Bucketize") do - desc = tf.NodeDescription("Bucketize") - input_ = convert(Tensor{Any}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - if boundaries !== nothing - desc["boundaries"] = map(Base.identity, boundaries) + begin + function bucketize_graph(input_; name=nothing, boundaries=nothing) + local desc + tf.with_op_name(name, "Bucketize") do + desc = tf.NodeDescription("Bucketize") + begin + begin + input_ = convert(Tensor{Any}, input_) + begin + end + end + begin + (input_,) = tf.tf_promote(input_) + end + end + begin + begin + tf.add_input(desc, input_) + end + end + begin + begin + if boundaries !== nothing + desc["boundaries"] = map(Base.identity, boundaries) + end + end + end end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function bucketize_eager(input_; name=nothing, boundaries=nothing) - desc = tf.EagerOp("Bucketize") - input_ = convert(tf.EagerTensor, input_) - tf.add_input(desc, input_) - if boundaries !== nothing - desc["boundaries"] = map(Base.identity, boundaries) - end - desc["T"] = tf.data_type(input_) - res = tf.execute(desc) - node = tf.TapeNode(bucketize, [input_], name=nothing, boundaries=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function bucketize_eager(input_; name=nothing, boundaries=nothing) + desc = tf.EagerOp("Bucketize") + input_ = convert(tf.EagerTensor, input_) + begin + begin + tf.add_input(desc, input_) + end + end + begin + begin + if boundaries !== nothing + desc["boundaries"] = map(Base.identity, boundaries) + end + end + end + begin + desc["T"] = tf.data_type(input_) + end + res = tf.execute(desc) + node = tf.TapeNode(bucketize, [input_], name=nothing, boundaries=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function bucketize(input_; name=nothing, boundaries=nothing) - if tf.in_eager_mode() - bucketize_eager(input_; name=name, boundaries=boundaries) - else - bucketize_graph(input_; name=name, boundaries=boundaries) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function bucketize(input_; name=nothing, boundaries=nothing) + if tf.in_eager_mode() + bucketize_eager(input_; name=name, boundaries=boundaries) + else + bucketize_graph(input_; name=name, boundaries=boundaries) + end end - end + end end @@ -21125,53 +38283,109 @@ end """ begin - function sparse_reduce_max_graph(input_indices_, input_values_, input_shape_, reduction_axes_; name=nothing, keep_dims=nothing) - local desc - tf.with_op_name(name, "SparseReduceMax") do - desc = tf.NodeDescription("SparseReduceMax") - input_indices_ = convert(Tensor{Int64}, input_indices_) - input_values_ = convert(Tensor{Any}, input_values_) - input_shape_ = convert(Tensor{Int64}, input_shape_) - reduction_axes_ = convert(Tensor{Int32}, reduction_axes_) - (input_values_,) = tf.tf_promote(input_values_) - tf.add_input(desc, input_indices_) - tf.add_input(desc, input_values_) - tf.add_input(desc, input_shape_) - tf.add_input(desc, reduction_axes_) - if keep_dims !== nothing - desc["keep_dims"] = Base.Bool(keep_dims) + begin + function sparse_reduce_max_graph(input_indices_, input_values_, input_shape_, reduction_axes_; name=nothing, keep_dims=nothing) + local desc + tf.with_op_name(name, "SparseReduceMax") do + desc = tf.NodeDescription("SparseReduceMax") + begin + begin + input_indices_ = convert(Tensor{Int64}, input_indices_) + begin + end + end + begin + input_values_ = convert(Tensor{Any}, input_values_) + begin + end + end + begin + input_shape_ = convert(Tensor{Int64}, input_shape_) + begin + end + end + begin + reduction_axes_ = convert(Tensor{Int32}, reduction_axes_) + begin + end + end + begin + (input_values_,) = tf.tf_promote(input_values_) + end + end + begin + begin + tf.add_input(desc, input_indices_) + end + begin + tf.add_input(desc, input_values_) + end + begin + tf.add_input(desc, input_shape_) + end + begin + tf.add_input(desc, reduction_axes_) + end + end + begin + begin + if keep_dims !== nothing + desc["keep_dims"] = Base.Bool(keep_dims) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function sparse_reduce_max_eager(input_indices_, input_values_, input_shape_, reduction_axes_; name=nothing, keep_dims=nothing) + desc = tf.EagerOp("SparseReduceMax") + input_indices_ = convert(tf.EagerTensor, input_indices_) + input_values_ = convert(tf.EagerTensor, input_values_) + input_shape_ = convert(tf.EagerTensor, input_shape_) + reduction_axes_ = convert(tf.EagerTensor, reduction_axes_) + begin + begin + tf.add_input(desc, input_indices_) + end + begin + tf.add_input(desc, input_values_) + end + begin + tf.add_input(desc, input_shape_) + end + begin + tf.add_input(desc, reduction_axes_) + end + end + begin + begin + if keep_dims !== nothing + desc["keep_dims"] = Base.Bool(keep_dims) + end + end + end + begin + desc["T"] = tf.data_type(input_values_) + end + res = tf.execute(desc) + node = tf.TapeNode(sparse_reduce_max, [input_indices_, input_values_, input_shape_, reduction_axes_], name=nothing, keep_dims=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_reduce_max(input_indices_, input_values_, input_shape_, reduction_axes_; name=nothing, keep_dims=nothing) + if tf.in_eager_mode() + sparse_reduce_max_eager(input_indices_, input_values_, input_shape_, reduction_axes_; name=name, keep_dims=keep_dims) + else + sparse_reduce_max_graph(input_indices_, input_values_, input_shape_, reduction_axes_; name=name, keep_dims=keep_dims) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function sparse_reduce_max_eager(input_indices_, input_values_, input_shape_, reduction_axes_; name=nothing, keep_dims=nothing) - desc = tf.EagerOp("SparseReduceMax") - input_indices_ = convert(tf.EagerTensor, input_indices_) - input_values_ = convert(tf.EagerTensor, input_values_) - input_shape_ = convert(tf.EagerTensor, input_shape_) - reduction_axes_ = convert(tf.EagerTensor, reduction_axes_) - tf.add_input(desc, input_indices_) - tf.add_input(desc, input_values_) - tf.add_input(desc, input_shape_) - tf.add_input(desc, reduction_axes_) - if keep_dims !== nothing - desc["keep_dims"] = Base.Bool(keep_dims) - end - desc["T"] = tf.data_type(input_values_) - res = tf.execute(desc) - node = tf.TapeNode(sparse_reduce_max, [input_indices_, input_values_, input_shape_, reduction_axes_], name=nothing, keep_dims=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_reduce_max(input_indices_, input_values_, input_shape_, reduction_axes_; name=nothing, keep_dims=nothing) - if tf.in_eager_mode() - sparse_reduce_max_eager(input_indices_, input_values_, input_shape_, reduction_axes_; name=name, keep_dims=keep_dims) - else - sparse_reduce_max_graph(input_indices_, input_values_, input_shape_, reduction_axes_; name=name, keep_dims=keep_dims) - end - end end @@ -21181,52 +38395,98 @@ end """ begin - function tensor_array_grad_with_shape_graph(handle_, flow_in_, shape_to_prepend_; name=nothing, source=nothing) - local desc - tf.with_op_name(name, "TensorArrayGradWithShape") do - desc = tf.NodeDescription("TensorArrayGradWithShape") - handle_ = convert(Tensor{Any}, handle_) - flow_in_ = convert(Tensor{Float32}, flow_in_) - shape_to_prepend_ = convert(Tensor{Int32}, shape_to_prepend_) - tf.add_input(desc, handle_) - tf.add_input(desc, flow_in_) - tf.add_input(desc, shape_to_prepend_) - if source !== nothing - desc["source"] = Base.String(source) + begin + function tensor_array_grad_with_shape_graph(handle_, flow_in_, shape_to_prepend_; name=nothing, source=nothing) + local desc + tf.with_op_name(name, "TensorArrayGradWithShape") do + desc = tf.NodeDescription("TensorArrayGradWithShape") + begin + begin + handle_ = convert(Tensor{Any}, handle_) + begin + end + end + begin + flow_in_ = convert(Tensor{Float32}, flow_in_) + begin + end + end + begin + shape_to_prepend_ = convert(Tensor{Int32}, shape_to_prepend_) + begin + end + end + end + begin + begin + tf.add_input(desc, handle_) + end + begin + tf.add_input(desc, flow_in_) + end + begin + tf.add_input(desc, shape_to_prepend_) + end + end + begin + begin + if source !== nothing + desc["source"] = Base.String(source) + end + end + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + end + end + begin + function tensor_array_grad_with_shape_eager(handle_, flow_in_, shape_to_prepend_; name=nothing, source=nothing) + desc = tf.EagerOp("TensorArrayGradWithShape") + handle_ = convert(tf.EagerTensor, handle_) + flow_in_ = convert(tf.EagerTensor, flow_in_) + shape_to_prepend_ = convert(tf.EagerTensor, shape_to_prepend_) + begin + begin + tf.add_input(desc, handle_) + end + begin + tf.add_input(desc, flow_in_) + end + begin + tf.add_input(desc, shape_to_prepend_) + end + end + begin + begin + if source !== nothing + desc["source"] = Base.String(source) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(tensor_array_grad_with_shape, [handle_, flow_in_, shape_to_prepend_], name=nothing, source=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_array_grad_with_shape(handle_, flow_in_, shape_to_prepend_; name=nothing, source=nothing) + if tf.in_eager_mode() + tensor_array_grad_with_shape_eager(handle_, flow_in_, shape_to_prepend_; name=name, source=source) + else + tensor_array_grad_with_shape_graph(handle_, flow_in_, shape_to_prepend_; name=name, source=source) + end end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:2 - push!(out, tf.Tensor(op, out_idx)) - end - out end - function tensor_array_grad_with_shape_eager(handle_, flow_in_, shape_to_prepend_; name=nothing, source=nothing) - desc = tf.EagerOp("TensorArrayGradWithShape") - handle_ = convert(tf.EagerTensor, handle_) - flow_in_ = convert(tf.EagerTensor, flow_in_) - shape_to_prepend_ = convert(tf.EagerTensor, shape_to_prepend_) - tf.add_input(desc, handle_) - tf.add_input(desc, flow_in_) - tf.add_input(desc, shape_to_prepend_) - if source !== nothing - desc["source"] = Base.String(source) - end - res = tf.execute(desc) - node = tf.TapeNode(tensor_array_grad_with_shape, [handle_, flow_in_, shape_to_prepend_], name=nothing, source=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_array_grad_with_shape(handle_, flow_in_, shape_to_prepend_; name=nothing, source=nothing) - if tf.in_eager_mode() - tensor_array_grad_with_shape_eager(handle_, flow_in_, shape_to_prepend_; name=name, source=source) - else - tensor_array_grad_with_shape_graph(handle_, flow_in_, shape_to_prepend_; name=name, source=source) - end - end end @@ -21236,58 +38496,92 @@ end Retrieve embedding parameters for a single table. """ begin - function retrieve_tpu_embedding_mdl_adagrad_light_parameters_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - local desc - tf.with_op_name(name, "RetrieveTPUEmbeddingMDLAdagradLightParameters") do - desc = tf.NodeDescription("RetrieveTPUEmbeddingMDLAdagradLightParameters") - if table_id !== nothing - desc["table_id"] = Base.Int(table_id) - end - if table_name !== nothing - desc["table_name"] = Base.String(table_name) - end - if num_shards !== nothing - desc["num_shards"] = Base.Int(num_shards) - end - if shard_id !== nothing - desc["shard_id"] = Base.Int(shard_id) + begin + function retrieve_tpu_embedding_mdl_adagrad_light_parameters_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + local desc + tf.with_op_name(name, "RetrieveTPUEmbeddingMDLAdagradLightParameters") do + desc = tf.NodeDescription("RetrieveTPUEmbeddingMDLAdagradLightParameters") + begin + end + begin + end + begin + begin + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + end + begin + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + end + begin + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + end + begin + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + end + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:4 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + end + end + begin + function retrieve_tpu_embedding_mdl_adagrad_light_parameters_eager(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + desc = tf.EagerOp("RetrieveTPUEmbeddingMDLAdagradLightParameters") + begin + end + begin + begin + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + end + begin + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + end + begin + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + end + begin + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(retrieve_tpu_embedding_mdl_adagrad_light_parameters, [], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function retrieve_tpu_embedding_mdl_adagrad_light_parameters(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + if tf.in_eager_mode() + retrieve_tpu_embedding_mdl_adagrad_light_parameters_eager(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + else + retrieve_tpu_embedding_mdl_adagrad_light_parameters_graph(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + end end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:4 - push!(out, tf.Tensor(op, out_idx)) - end - out - end - function retrieve_tpu_embedding_mdl_adagrad_light_parameters_eager(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - desc = tf.EagerOp("RetrieveTPUEmbeddingMDLAdagradLightParameters") - if table_id !== nothing - desc["table_id"] = Base.Int(table_id) - end - if table_name !== nothing - desc["table_name"] = Base.String(table_name) - end - if num_shards !== nothing - desc["num_shards"] = Base.Int(num_shards) - end - if shard_id !== nothing - desc["shard_id"] = Base.Int(shard_id) - end - res = tf.execute(desc) - node = tf.TapeNode(retrieve_tpu_embedding_mdl_adagrad_light_parameters, [], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function retrieve_tpu_embedding_mdl_adagrad_light_parameters(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - if tf.in_eager_mode() - retrieve_tpu_embedding_mdl_adagrad_light_parameters_eager(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) - else - retrieve_tpu_embedding_mdl_adagrad_light_parameters_graph(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) - end - end end @@ -21297,33 +38591,57 @@ end """ begin - function tensor_array_close_v3_graph(handle_; name=nothing) - local desc - tf.with_op_name(name, "TensorArrayCloseV3") do - desc = tf.NodeDescription("TensorArrayCloseV3") - handle_ = convert(Tensor{Any}, handle_) - tf.add_input(desc, handle_) + begin + function tensor_array_close_v3_graph(handle_; name=nothing) + local desc + tf.with_op_name(name, "TensorArrayCloseV3") do + desc = tf.NodeDescription("TensorArrayCloseV3") + begin + begin + handle_ = convert(Tensor{Any}, handle_) + begin + end + end + end + begin + begin + tf.add_input(desc, handle_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function tensor_array_close_v3_eager(handle_; name=nothing) - desc = tf.EagerOp("TensorArrayCloseV3") - handle_ = convert(tf.EagerTensor, handle_) - tf.add_input(desc, handle_) - res = tf.execute(desc) - node = tf.TapeNode(tensor_array_close_v3, [handle_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function tensor_array_close_v3_eager(handle_; name=nothing) + desc = tf.EagerOp("TensorArrayCloseV3") + handle_ = convert(tf.EagerTensor, handle_) + begin + begin + tf.add_input(desc, handle_) + end + end + begin + end + res = tf.execute(desc) + node = tf.TapeNode(tensor_array_close_v3, [handle_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_array_close_v3(handle_; name=nothing) - if tf.in_eager_mode() - tensor_array_close_v3_eager(handle_; name=name) - else - tensor_array_close_v3_graph(handle_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_array_close_v3(handle_; name=nothing) + if tf.in_eager_mode() + tensor_array_close_v3_eager(handle_; name=name) + else + tensor_array_close_v3_graph(handle_; name=name) + end end - end + end end @@ -21333,49 +38651,105 @@ end """ begin - function non_max_suppression_with_overlaps_graph(overlaps_, scores_, max_output_size_, overlap_threshold_, score_threshold_; name=nothing) - local desc - tf.with_op_name(name, "NonMaxSuppressionWithOverlaps") do - desc = tf.NodeDescription("NonMaxSuppressionWithOverlaps") - overlaps_ = convert(Tensor{Float32}, overlaps_) - scores_ = convert(Tensor{Float32}, scores_) - max_output_size_ = convert(Tensor{Int32}, max_output_size_) - overlap_threshold_ = convert(Tensor{Float32}, overlap_threshold_) - score_threshold_ = convert(Tensor{Float32}, score_threshold_) - tf.add_input(desc, overlaps_) - tf.add_input(desc, scores_) - tf.add_input(desc, max_output_size_) - tf.add_input(desc, overlap_threshold_) - tf.add_input(desc, score_threshold_) - end - tf.Tensor(tf.Operation(desc)) - end - function non_max_suppression_with_overlaps_eager(overlaps_, scores_, max_output_size_, overlap_threshold_, score_threshold_; name=nothing) - desc = tf.EagerOp("NonMaxSuppressionWithOverlaps") - overlaps_ = convert(tf.EagerTensor, overlaps_) - scores_ = convert(tf.EagerTensor, scores_) - max_output_size_ = convert(tf.EagerTensor, max_output_size_) - overlap_threshold_ = convert(tf.EagerTensor, overlap_threshold_) - score_threshold_ = convert(tf.EagerTensor, score_threshold_) - tf.add_input(desc, overlaps_) - tf.add_input(desc, scores_) - tf.add_input(desc, max_output_size_) - tf.add_input(desc, overlap_threshold_) - tf.add_input(desc, score_threshold_) - res = tf.execute(desc) - node = tf.TapeNode(non_max_suppression_with_overlaps, [overlaps_, scores_, max_output_size_, overlap_threshold_, score_threshold_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function non_max_suppression_with_overlaps(overlaps_, scores_, max_output_size_, overlap_threshold_, score_threshold_; name=nothing) - if tf.in_eager_mode() - non_max_suppression_with_overlaps_eager(overlaps_, scores_, max_output_size_, overlap_threshold_, score_threshold_; name=name) - else - non_max_suppression_with_overlaps_graph(overlaps_, scores_, max_output_size_, overlap_threshold_, score_threshold_; name=name) + begin + function non_max_suppression_with_overlaps_graph(overlaps_, scores_, max_output_size_, overlap_threshold_, score_threshold_; name=nothing) + local desc + tf.with_op_name(name, "NonMaxSuppressionWithOverlaps") do + desc = tf.NodeDescription("NonMaxSuppressionWithOverlaps") + begin + begin + overlaps_ = convert(Tensor{Float32}, overlaps_) + begin + end + end + begin + scores_ = convert(Tensor{Float32}, scores_) + begin + end + end + begin + max_output_size_ = convert(Tensor{Int32}, max_output_size_) + begin + end + end + begin + overlap_threshold_ = convert(Tensor{Float32}, overlap_threshold_) + begin + end + end + begin + score_threshold_ = convert(Tensor{Float32}, score_threshold_) + begin + end + end + end + begin + begin + tf.add_input(desc, overlaps_) + end + begin + tf.add_input(desc, scores_) + end + begin + tf.add_input(desc, max_output_size_) + end + begin + tf.add_input(desc, overlap_threshold_) + end + begin + tf.add_input(desc, score_threshold_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function non_max_suppression_with_overlaps_eager(overlaps_, scores_, max_output_size_, overlap_threshold_, score_threshold_; name=nothing) + desc = tf.EagerOp("NonMaxSuppressionWithOverlaps") + overlaps_ = convert(tf.EagerTensor, overlaps_) + scores_ = convert(tf.EagerTensor, scores_) + max_output_size_ = convert(tf.EagerTensor, max_output_size_) + overlap_threshold_ = convert(tf.EagerTensor, overlap_threshold_) + score_threshold_ = convert(tf.EagerTensor, score_threshold_) + begin + begin + tf.add_input(desc, overlaps_) + end + begin + tf.add_input(desc, scores_) + end + begin + tf.add_input(desc, max_output_size_) + end + begin + tf.add_input(desc, overlap_threshold_) + end + begin + tf.add_input(desc, score_threshold_) + end + end + begin + end + res = tf.execute(desc) + node = tf.TapeNode(non_max_suppression_with_overlaps, [overlaps_, scores_, max_output_size_, overlap_threshold_, score_threshold_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function non_max_suppression_with_overlaps(overlaps_, scores_, max_output_size_, overlap_threshold_, score_threshold_; name=nothing) + if tf.in_eager_mode() + non_max_suppression_with_overlaps_eager(overlaps_, scores_, max_output_size_, overlap_threshold_, score_threshold_; name=name) + else + non_max_suppression_with_overlaps_graph(overlaps_, scores_, max_output_size_, overlap_threshold_, score_threshold_; name=name) + end end - end + end end @@ -21385,53 +38759,93 @@ end """ begin - function pack_graph(values_; name=nothing, N=nothing, axis=nothing) - local desc - tf.with_op_name(name, "Pack") do - desc = tf.NodeDescription("Pack") - values_ = [convert(Tensor{Any}, x) for x = values_] - (values_,) = tf.tf_promote(values_) - tf.add_input(desc, values_) - if N !== nothing - desc["N"] = Base.Int(N) - end - if axis !== nothing - axis = Base.Int(axis) - 1 - end - if axis !== nothing - desc["axis"] = Base.Int(axis) + begin + function pack_graph(values_; name=nothing, N=nothing, axis=nothing) + local desc + tf.with_op_name(name, "Pack") do + desc = tf.NodeDescription("Pack") + begin + begin + values_ = [convert(Tensor{Any}, x) for x = values_] + begin + end + end + begin + (values_,) = tf.tf_promote(values_) + end + end + begin + begin + tf.add_input(desc, values_) + end + end + begin + begin + if N !== nothing + desc["N"] = Base.Int(N) + end + end + begin + if axis !== nothing + axis = Base.Int(axis) - 1 + end + end + begin + if axis !== nothing + desc["axis"] = Base.Int(axis) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function pack_eager(values_; name=nothing, N=nothing, axis=nothing) + desc = tf.EagerOp("Pack") + values_ = convert(tf.EagerTensor, values_) + begin + begin + tf.add_input(desc, values_) + end + end + begin + begin + if N !== nothing + desc["N"] = Base.Int(N) + end + end + begin + if axis !== nothing + axis = Base.Int(axis) - 1 + end + end + begin + if axis !== nothing + desc["axis"] = Base.Int(axis) + end + end + end + begin + desc["T"] = tf.data_type(values_) + end + res = tf.execute(desc) + node = tf.TapeNode(pack, [values_], name=nothing, N=nothing, axis=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function pack(values_; name=nothing, N=nothing, axis=nothing) + if tf.in_eager_mode() + pack_eager(values_; name=name, N=N, axis=axis) + else + pack_graph(values_; name=name, N=N, axis=axis) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function pack_eager(values_; name=nothing, N=nothing, axis=nothing) - desc = tf.EagerOp("Pack") - values_ = convert(tf.EagerTensor, values_) - tf.add_input(desc, values_) - if N !== nothing - desc["N"] = Base.Int(N) - end - if axis !== nothing - axis = Base.Int(axis) - 1 - end - if axis !== nothing - desc["axis"] = Base.Int(axis) - end - desc["T"] = tf.data_type(values_) - res = tf.execute(desc) - node = tf.TapeNode(pack, [values_], name=nothing, N=nothing, axis=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function pack(values_; name=nothing, N=nothing, axis=nothing) - if tf.in_eager_mode() - pack_eager(values_; name=name, N=N, axis=axis) - else - pack_graph(values_; name=name, N=N, axis=axis) - end - end end @@ -21441,43 +38855,79 @@ end """ begin - function tensor_array_grad_v2_graph(handle_, flow_in_; name=nothing, source=nothing) - local desc - tf.with_op_name(name, "TensorArrayGradV2") do - desc = tf.NodeDescription("TensorArrayGradV2") - handle_ = convert(Tensor{String}, handle_) - flow_in_ = convert(Tensor{Float32}, flow_in_) - tf.add_input(desc, handle_) - tf.add_input(desc, flow_in_) - if source !== nothing - desc["source"] = Base.String(source) + begin + function tensor_array_grad_v2_graph(handle_, flow_in_; name=nothing, source=nothing) + local desc + tf.with_op_name(name, "TensorArrayGradV2") do + desc = tf.NodeDescription("TensorArrayGradV2") + begin + begin + handle_ = convert(Tensor{String}, handle_) + begin + end + end + begin + flow_in_ = convert(Tensor{Float32}, flow_in_) + begin + end + end + end + begin + begin + tf.add_input(desc, handle_) + end + begin + tf.add_input(desc, flow_in_) + end + end + begin + begin + if source !== nothing + desc["source"] = Base.String(source) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function tensor_array_grad_v2_eager(handle_, flow_in_; name=nothing, source=nothing) + desc = tf.EagerOp("TensorArrayGradV2") + handle_ = convert(tf.EagerTensor, handle_) + flow_in_ = convert(tf.EagerTensor, flow_in_) + begin + begin + tf.add_input(desc, handle_) + end + begin + tf.add_input(desc, flow_in_) + end + end + begin + begin + if source !== nothing + desc["source"] = Base.String(source) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(tensor_array_grad_v2, [handle_, flow_in_], name=nothing, source=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_array_grad_v2(handle_, flow_in_; name=nothing, source=nothing) + if tf.in_eager_mode() + tensor_array_grad_v2_eager(handle_, flow_in_; name=name, source=source) + else + tensor_array_grad_v2_graph(handle_, flow_in_; name=name, source=source) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function tensor_array_grad_v2_eager(handle_, flow_in_; name=nothing, source=nothing) - desc = tf.EagerOp("TensorArrayGradV2") - handle_ = convert(tf.EagerTensor, handle_) - flow_in_ = convert(tf.EagerTensor, flow_in_) - tf.add_input(desc, handle_) - tf.add_input(desc, flow_in_) - if source !== nothing - desc["source"] = Base.String(source) - end - res = tf.execute(desc) - node = tf.TapeNode(tensor_array_grad_v2, [handle_, flow_in_], name=nothing, source=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_array_grad_v2(handle_, flow_in_; name=nothing, source=nothing) - if tf.in_eager_mode() - tensor_array_grad_v2_eager(handle_, flow_in_; name=name, source=source) - else - tensor_array_grad_v2_graph(handle_, flow_in_; name=name, source=source) - end - end end @@ -21487,45 +38937,85 @@ end """ begin - function assign_sub_variable_op_graph(resource_, value_; name=nothing, dtype=nothing) - local desc - tf.with_op_name(name, "AssignSubVariableOp") do - desc = tf.NodeDescription("AssignSubVariableOp") - resource_ = convert(Tensor{Any}, resource_) - value_ = convert(Tensor{Any}, value_) - (value_,) = tf.tf_promote(value_) - tf.add_input(desc, resource_) - tf.add_input(desc, value_) - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) + begin + function assign_sub_variable_op_graph(resource_, value_; name=nothing, dtype=nothing) + local desc + tf.with_op_name(name, "AssignSubVariableOp") do + desc = tf.NodeDescription("AssignSubVariableOp") + begin + begin + resource_ = convert(Tensor{Any}, resource_) + begin + end + end + begin + value_ = convert(Tensor{Any}, value_) + begin + end + end + begin + (value_,) = tf.tf_promote(value_) + end + end + begin + begin + tf.add_input(desc, resource_) + end + begin + tf.add_input(desc, value_) + end + end + begin + begin + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function assign_sub_variable_op_eager(resource_, value_; name=nothing, dtype=nothing) + desc = tf.EagerOp("AssignSubVariableOp") + resource_ = convert(tf.EagerTensor, resource_) + value_ = convert(tf.EagerTensor, value_) + begin + begin + tf.add_input(desc, resource_) + end + begin + tf.add_input(desc, value_) + end + end + begin + begin + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + end + begin + desc["dtype"] = tf.data_type(value_) + end + res = tf.execute(desc) + node = tf.TapeNode(assign_sub_variable_op, [resource_, value_], name=nothing, dtype=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function assign_sub_variable_op(resource_, value_; name=nothing, dtype=nothing) + if tf.in_eager_mode() + assign_sub_variable_op_eager(resource_, value_; name=name, dtype=dtype) + else + assign_sub_variable_op_graph(resource_, value_; name=name, dtype=dtype) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function assign_sub_variable_op_eager(resource_, value_; name=nothing, dtype=nothing) - desc = tf.EagerOp("AssignSubVariableOp") - resource_ = convert(tf.EagerTensor, resource_) - value_ = convert(tf.EagerTensor, value_) - tf.add_input(desc, resource_) - tf.add_input(desc, value_) - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end - desc["dtype"] = tf.data_type(value_) - res = tf.execute(desc) - node = tf.TapeNode(assign_sub_variable_op, [resource_, value_], name=nothing, dtype=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function assign_sub_variable_op(resource_, value_; name=nothing, dtype=nothing) - if tf.in_eager_mode() - assign_sub_variable_op_eager(resource_, value_; name=name, dtype=dtype) - else - assign_sub_variable_op_graph(resource_, value_; name=name, dtype=dtype) - end - end end @@ -21535,33 +39025,57 @@ end """ begin - function batch_fft2d_graph(input_; name=nothing) - local desc - tf.with_op_name(name, "BatchFFT2D") do - desc = tf.NodeDescription("BatchFFT2D") - input_ = convert(Tensor{Complex{Float32}}, input_) - tf.add_input(desc, input_) + begin + function batch_fft2d_graph(input_; name=nothing) + local desc + tf.with_op_name(name, "BatchFFT2D") do + desc = tf.NodeDescription("BatchFFT2D") + begin + begin + input_ = convert(Tensor{Complex{Float32}}, input_) + begin + end + end + end + begin + begin + tf.add_input(desc, input_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function batch_fft2d_eager(input_; name=nothing) - desc = tf.EagerOp("BatchFFT2D") - input_ = convert(tf.EagerTensor, input_) - tf.add_input(desc, input_) - res = tf.execute(desc) - node = tf.TapeNode(batch_fft2d, [input_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function batch_fft2d_eager(input_; name=nothing) + desc = tf.EagerOp("BatchFFT2D") + input_ = convert(tf.EagerTensor, input_) + begin + begin + tf.add_input(desc, input_) + end + end + begin + end + res = tf.execute(desc) + node = tf.TapeNode(batch_fft2d, [input_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function batch_fft2d(input_; name=nothing) - if tf.in_eager_mode() - batch_fft2d_eager(input_; name=name) - else - batch_fft2d_graph(input_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function batch_fft2d(input_; name=nothing) + if tf.in_eager_mode() + batch_fft2d_eager(input_; name=name) + else + batch_fft2d_graph(input_; name=name) + end end - end + end end @@ -21571,33 +39085,57 @@ end """ begin - function close_summary_writer_graph(writer_; name=nothing) - local desc - tf.with_op_name(name, "CloseSummaryWriter") do - desc = tf.NodeDescription("CloseSummaryWriter") - writer_ = convert(Tensor{Any}, writer_) - tf.add_input(desc, writer_) + begin + function close_summary_writer_graph(writer_; name=nothing) + local desc + tf.with_op_name(name, "CloseSummaryWriter") do + desc = tf.NodeDescription("CloseSummaryWriter") + begin + begin + writer_ = convert(Tensor{Any}, writer_) + begin + end + end + end + begin + begin + tf.add_input(desc, writer_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function close_summary_writer_eager(writer_; name=nothing) - desc = tf.EagerOp("CloseSummaryWriter") - writer_ = convert(tf.EagerTensor, writer_) - tf.add_input(desc, writer_) - res = tf.execute(desc) - node = tf.TapeNode(close_summary_writer, [writer_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function close_summary_writer_eager(writer_; name=nothing) + desc = tf.EagerOp("CloseSummaryWriter") + writer_ = convert(tf.EagerTensor, writer_) + begin + begin + tf.add_input(desc, writer_) + end + end + begin + end + res = tf.execute(desc) + node = tf.TapeNode(close_summary_writer, [writer_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function close_summary_writer(writer_; name=nothing) - if tf.in_eager_mode() - close_summary_writer_eager(writer_; name=name) - else - close_summary_writer_graph(writer_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function close_summary_writer(writer_; name=nothing) + if tf.in_eager_mode() + close_summary_writer_eager(writer_; name=name) + else + close_summary_writer_graph(writer_; name=name) + end end - end + end end @@ -21607,35 +39145,63 @@ end """ begin - function rank_graph(input_; name=nothing) - local desc - tf.with_op_name(name, "Rank") do - desc = tf.NodeDescription("Rank") - input_ = convert(Tensor{Any}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) + begin + function rank_graph(input_; name=nothing) + local desc + tf.with_op_name(name, "Rank") do + desc = tf.NodeDescription("Rank") + begin + begin + input_ = convert(Tensor{Any}, input_) + begin + end + end + begin + (input_,) = tf.tf_promote(input_) + end + end + begin + begin + tf.add_input(desc, input_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function rank_eager(input_; name=nothing) - desc = tf.EagerOp("Rank") - input_ = convert(tf.EagerTensor, input_) - tf.add_input(desc, input_) - desc["T"] = tf.data_type(input_) - res = tf.execute(desc) - node = tf.TapeNode(rank, [input_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function rank_eager(input_; name=nothing) + desc = tf.EagerOp("Rank") + input_ = convert(tf.EagerTensor, input_) + begin + begin + tf.add_input(desc, input_) + end + end + begin + end + begin + desc["T"] = tf.data_type(input_) + end + res = tf.execute(desc) + node = tf.TapeNode(rank, [input_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function rank(input_; name=nothing) - if tf.in_eager_mode() - rank_eager(input_; name=name) - else - rank_graph(input_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function rank(input_; name=nothing) + if tf.in_eager_mode() + rank_eager(input_; name=name) + else + rank_graph(input_; name=name) + end end - end + end end @@ -21645,35 +39211,63 @@ end """ begin - function fft3d_graph(input_; name=nothing) - local desc - tf.with_op_name(name, "FFT3D") do - desc = tf.NodeDescription("FFT3D") - input_ = convert(Tensor{Complex{Float32}}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) + begin + function fft3d_graph(input_; name=nothing) + local desc + tf.with_op_name(name, "FFT3D") do + desc = tf.NodeDescription("FFT3D") + begin + begin + input_ = convert(Tensor{Complex{Float32}}, input_) + begin + end + end + begin + (input_,) = tf.tf_promote(input_) + end + end + begin + begin + tf.add_input(desc, input_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function fft3d_eager(input_; name=nothing) - desc = tf.EagerOp("FFT3D") - input_ = convert(tf.EagerTensor, input_) - tf.add_input(desc, input_) - desc["Tcomplex"] = tf.data_type(input_) - res = tf.execute(desc) - node = tf.TapeNode(fft3d, [input_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function fft3d_eager(input_; name=nothing) + desc = tf.EagerOp("FFT3D") + input_ = convert(tf.EagerTensor, input_) + begin + begin + tf.add_input(desc, input_) + end + end + begin + end + begin + desc["Tcomplex"] = tf.data_type(input_) + end + res = tf.execute(desc) + node = tf.TapeNode(fft3d, [input_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function fft3d(input_; name=nothing) - if tf.in_eager_mode() - fft3d_eager(input_; name=name) - else - fft3d_graph(input_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function fft3d(input_; name=nothing) + if tf.in_eager_mode() + fft3d_eager(input_; name=name) + else + fft3d_graph(input_; name=name) + end end - end + end end @@ -21683,76 +39277,178 @@ end """ begin - function apply_ftrl_graph(var_, accum_, linear_, grad_, lr_, l1_, l2_, lr_power_; name=nothing, use_locking=nothing) - local desc - tf.with_op_name(name, "ApplyFtrl") do - desc = tf.NodeDescription("ApplyFtrl") - var_ = convert(Tensor{Any}, var_) - accum_ = convert(Tensor{Any}, accum_) - linear_ = convert(Tensor{Any}, linear_) - grad_ = convert(Tensor{Any}, grad_) - lr_ = convert(Tensor{Any}, lr_) - l1_ = convert(Tensor{Any}, l1_) - l2_ = convert(Tensor{Any}, l2_) - lr_power_ = convert(Tensor{Any}, lr_power_) - (var_, accum_, linear_, grad_, lr_, l1_, l2_, lr_power_) = tf.tf_promote(var_, accum_, linear_, grad_, lr_, l1_, l2_, lr_power_) - tf.add_input(desc, var_) - tf.add_input(desc, accum_) - tf.add_input(desc, linear_) - tf.add_input(desc, grad_) - tf.add_input(desc, lr_) - tf.add_input(desc, l1_) - tf.add_input(desc, l2_) - tf.add_input(desc, lr_power_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - end - tf.Tensor(tf.Operation(desc)) - end - function apply_ftrl_eager(var_, accum_, linear_, grad_, lr_, l1_, l2_, lr_power_; name=nothing, use_locking=nothing) - desc = tf.EagerOp("ApplyFtrl") - var_ = convert(tf.EagerTensor, var_) - accum_ = convert(tf.EagerTensor, accum_) - linear_ = convert(tf.EagerTensor, linear_) - grad_ = convert(tf.EagerTensor, grad_) - lr_ = convert(tf.EagerTensor, lr_) - l1_ = convert(tf.EagerTensor, l1_) - l2_ = convert(tf.EagerTensor, l2_) - lr_power_ = convert(tf.EagerTensor, lr_power_) - tf.add_input(desc, var_) - tf.add_input(desc, accum_) - tf.add_input(desc, linear_) - tf.add_input(desc, grad_) - tf.add_input(desc, lr_) - tf.add_input(desc, l1_) - tf.add_input(desc, l2_) - tf.add_input(desc, lr_power_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - desc["T"] = tf.data_type(var_) - desc["T"] = tf.data_type(accum_) - desc["T"] = tf.data_type(linear_) - desc["T"] = tf.data_type(grad_) - desc["T"] = tf.data_type(lr_) - desc["T"] = tf.data_type(l1_) - desc["T"] = tf.data_type(l2_) - desc["T"] = tf.data_type(lr_power_) - res = tf.execute(desc) - node = tf.TapeNode(apply_ftrl, [var_, accum_, linear_, grad_, lr_, l1_, l2_, lr_power_], name=nothing, use_locking=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function apply_ftrl(var_, accum_, linear_, grad_, lr_, l1_, l2_, lr_power_; name=nothing, use_locking=nothing) - if tf.in_eager_mode() - apply_ftrl_eager(var_, accum_, linear_, grad_, lr_, l1_, l2_, lr_power_; name=name, use_locking=use_locking) - else - apply_ftrl_graph(var_, accum_, linear_, grad_, lr_, l1_, l2_, lr_power_; name=name, use_locking=use_locking) + begin + function apply_ftrl_graph(var_, accum_, linear_, grad_, lr_, l1_, l2_, lr_power_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "ApplyFtrl") do + desc = tf.NodeDescription("ApplyFtrl") + begin + begin + var_ = convert(Tensor{Any}, var_) + begin + end + end + begin + accum_ = convert(Tensor{Any}, accum_) + begin + end + end + begin + linear_ = convert(Tensor{Any}, linear_) + begin + end + end + begin + grad_ = convert(Tensor{Any}, grad_) + begin + end + end + begin + lr_ = convert(Tensor{Any}, lr_) + begin + end + end + begin + l1_ = convert(Tensor{Any}, l1_) + begin + end + end + begin + l2_ = convert(Tensor{Any}, l2_) + begin + end + end + begin + lr_power_ = convert(Tensor{Any}, lr_power_) + begin + end + end + begin + (var_, accum_, linear_, grad_, lr_, l1_, l2_, lr_power_) = tf.tf_promote(var_, accum_, linear_, grad_, lr_, l1_, l2_, lr_power_) + end + end + begin + begin + tf.add_input(desc, var_) + end + begin + tf.add_input(desc, accum_) + end + begin + tf.add_input(desc, linear_) + end + begin + tf.add_input(desc, grad_) + end + begin + tf.add_input(desc, lr_) + end + begin + tf.add_input(desc, l1_) + end + begin + tf.add_input(desc, l2_) + end + begin + tf.add_input(desc, lr_power_) + end + end + begin + begin + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function apply_ftrl_eager(var_, accum_, linear_, grad_, lr_, l1_, l2_, lr_power_; name=nothing, use_locking=nothing) + desc = tf.EagerOp("ApplyFtrl") + var_ = convert(tf.EagerTensor, var_) + accum_ = convert(tf.EagerTensor, accum_) + linear_ = convert(tf.EagerTensor, linear_) + grad_ = convert(tf.EagerTensor, grad_) + lr_ = convert(tf.EagerTensor, lr_) + l1_ = convert(tf.EagerTensor, l1_) + l2_ = convert(tf.EagerTensor, l2_) + lr_power_ = convert(tf.EagerTensor, lr_power_) + begin + begin + tf.add_input(desc, var_) + end + begin + tf.add_input(desc, accum_) + end + begin + tf.add_input(desc, linear_) + end + begin + tf.add_input(desc, grad_) + end + begin + tf.add_input(desc, lr_) + end + begin + tf.add_input(desc, l1_) + end + begin + tf.add_input(desc, l2_) + end + begin + tf.add_input(desc, lr_power_) + end + end + begin + begin + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + end + begin + desc["T"] = tf.data_type(var_) + end + begin + desc["T"] = tf.data_type(accum_) + end + begin + desc["T"] = tf.data_type(linear_) + end + begin + desc["T"] = tf.data_type(grad_) + end + begin + desc["T"] = tf.data_type(lr_) + end + begin + desc["T"] = tf.data_type(l1_) + end + begin + desc["T"] = tf.data_type(l2_) + end + begin + desc["T"] = tf.data_type(lr_power_) + end + res = tf.execute(desc) + node = tf.TapeNode(apply_ftrl, [var_, accum_, linear_, grad_, lr_, l1_, l2_, lr_power_], name=nothing, use_locking=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function apply_ftrl(var_, accum_, linear_, grad_, lr_, l1_, l2_, lr_power_; name=nothing, use_locking=nothing) + if tf.in_eager_mode() + apply_ftrl_eager(var_, accum_, linear_, grad_, lr_, l1_, l2_, lr_power_; name=name, use_locking=use_locking) + else + apply_ftrl_graph(var_, accum_, linear_, grad_, lr_, l1_, l2_, lr_power_; name=name, use_locking=use_locking) + end end - end + end end @@ -21762,41 +39458,65 @@ end """ begin - function abort_graph(; name=nothing, error_msg=nothing, exit_without_error=nothing) - local desc - tf.with_op_name(name, "Abort") do - desc = tf.NodeDescription("Abort") - if error_msg !== nothing - desc["error_msg"] = Base.String(error_msg) - end - if exit_without_error !== nothing - desc["exit_without_error"] = Base.Bool(exit_without_error) + begin + function abort_graph(; name=nothing, error_msg=nothing, exit_without_error=nothing) + local desc + tf.with_op_name(name, "Abort") do + desc = tf.NodeDescription("Abort") + begin + end + begin + end + begin + begin + if error_msg !== nothing + desc["error_msg"] = Base.String(error_msg) + end + end + begin + if exit_without_error !== nothing + desc["exit_without_error"] = Base.Bool(exit_without_error) + end + end + end end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function abort_eager(; name=nothing, error_msg=nothing, exit_without_error=nothing) - desc = tf.EagerOp("Abort") - if error_msg !== nothing - desc["error_msg"] = Base.String(error_msg) - end - if exit_without_error !== nothing - desc["exit_without_error"] = Base.Bool(exit_without_error) - end - res = tf.execute(desc) - node = tf.TapeNode(abort, [], name=nothing, error_msg=nothing, exit_without_error=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function abort_eager(; name=nothing, error_msg=nothing, exit_without_error=nothing) + desc = tf.EagerOp("Abort") + begin + end + begin + begin + if error_msg !== nothing + desc["error_msg"] = Base.String(error_msg) + end + end + begin + if exit_without_error !== nothing + desc["exit_without_error"] = Base.Bool(exit_without_error) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(abort, [], name=nothing, error_msg=nothing, exit_without_error=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function abort(; name=nothing, error_msg=nothing, exit_without_error=nothing) - if tf.in_eager_mode() - abort_eager(; name=name, error_msg=error_msg, exit_without_error=exit_without_error) - else - abort_graph(; name=name, error_msg=error_msg, exit_without_error=exit_without_error) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function abort(; name=nothing, error_msg=nothing, exit_without_error=nothing) + if tf.in_eager_mode() + abort_eager(; name=name, error_msg=error_msg, exit_without_error=exit_without_error) + else + abort_graph(; name=name, error_msg=error_msg, exit_without_error=exit_without_error) + end end - end + end end @@ -21806,51 +39526,87 @@ end """ begin - function audio_spectrogram_graph(input_; name=nothing, window_size=nothing, stride=nothing, magnitude_squared=nothing) - local desc - tf.with_op_name(name, "AudioSpectrogram") do - desc = tf.NodeDescription("AudioSpectrogram") - input_ = convert(Tensor{Float32}, input_) - tf.add_input(desc, input_) - if window_size !== nothing - desc["window_size"] = Base.Int(window_size) + begin + function audio_spectrogram_graph(input_; name=nothing, window_size=nothing, stride=nothing, magnitude_squared=nothing) + local desc + tf.with_op_name(name, "AudioSpectrogram") do + desc = tf.NodeDescription("AudioSpectrogram") + begin + begin + input_ = convert(Tensor{Float32}, input_) + begin + end + end + end + begin + begin + tf.add_input(desc, input_) + end + end + begin + begin + if window_size !== nothing + desc["window_size"] = Base.Int(window_size) + end + end + begin + if stride !== nothing + desc["stride"] = Base.Int(stride) + end + end + begin + if magnitude_squared !== nothing + desc["magnitude_squared"] = Base.Bool(magnitude_squared) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function audio_spectrogram_eager(input_; name=nothing, window_size=nothing, stride=nothing, magnitude_squared=nothing) + desc = tf.EagerOp("AudioSpectrogram") + input_ = convert(tf.EagerTensor, input_) + begin + begin + tf.add_input(desc, input_) + end + end + begin + begin + if window_size !== nothing + desc["window_size"] = Base.Int(window_size) + end + end + begin + if stride !== nothing + desc["stride"] = Base.Int(stride) + end + end + begin + if magnitude_squared !== nothing + desc["magnitude_squared"] = Base.Bool(magnitude_squared) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(audio_spectrogram, [input_], name=nothing, window_size=nothing, stride=nothing, magnitude_squared=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function audio_spectrogram(input_; name=nothing, window_size=nothing, stride=nothing, magnitude_squared=nothing) + if tf.in_eager_mode() + audio_spectrogram_eager(input_; name=name, window_size=window_size, stride=stride, magnitude_squared=magnitude_squared) + else + audio_spectrogram_graph(input_; name=name, window_size=window_size, stride=stride, magnitude_squared=magnitude_squared) + end end - if stride !== nothing - desc["stride"] = Base.Int(stride) - end - if magnitude_squared !== nothing - desc["magnitude_squared"] = Base.Bool(magnitude_squared) - end - end - tf.Tensor(tf.Operation(desc)) - end - function audio_spectrogram_eager(input_; name=nothing, window_size=nothing, stride=nothing, magnitude_squared=nothing) - desc = tf.EagerOp("AudioSpectrogram") - input_ = convert(tf.EagerTensor, input_) - tf.add_input(desc, input_) - if window_size !== nothing - desc["window_size"] = Base.Int(window_size) - end - if stride !== nothing - desc["stride"] = Base.Int(stride) - end - if magnitude_squared !== nothing - desc["magnitude_squared"] = Base.Bool(magnitude_squared) - end - res = tf.execute(desc) - node = tf.TapeNode(audio_spectrogram, [input_], name=nothing, window_size=nothing, stride=nothing, magnitude_squared=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function audio_spectrogram(input_; name=nothing, window_size=nothing, stride=nothing, magnitude_squared=nothing) - if tf.in_eager_mode() - audio_spectrogram_eager(input_; name=name, window_size=window_size, stride=stride, magnitude_squared=magnitude_squared) - else - audio_spectrogram_graph(input_; name=name, window_size=window_size, stride=stride, magnitude_squared=magnitude_squared) - end - end end @@ -21860,39 +39616,67 @@ end """ begin - function variable_shape_graph(input_; name=nothing, out_type=nothing) - local desc - tf.with_op_name(name, "VariableShape") do - desc = tf.NodeDescription("VariableShape") - input_ = convert(Tensor{Any}, input_) - tf.add_input(desc, input_) - if out_type !== nothing - desc["out_type"] = Base.identity(out_type) + begin + function variable_shape_graph(input_; name=nothing, out_type=nothing) + local desc + tf.with_op_name(name, "VariableShape") do + desc = tf.NodeDescription("VariableShape") + begin + begin + input_ = convert(Tensor{Any}, input_) + begin + end + end + end + begin + begin + tf.add_input(desc, input_) + end + end + begin + begin + if out_type !== nothing + desc["out_type"] = Base.identity(out_type) + end + end + end end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function variable_shape_eager(input_; name=nothing, out_type=nothing) - desc = tf.EagerOp("VariableShape") - input_ = convert(tf.EagerTensor, input_) - tf.add_input(desc, input_) - if out_type !== nothing - desc["out_type"] = Base.identity(out_type) - end - res = tf.execute(desc) - node = tf.TapeNode(variable_shape, [input_], name=nothing, out_type=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function variable_shape_eager(input_; name=nothing, out_type=nothing) + desc = tf.EagerOp("VariableShape") + input_ = convert(tf.EagerTensor, input_) + begin + begin + tf.add_input(desc, input_) + end + end + begin + begin + if out_type !== nothing + desc["out_type"] = Base.identity(out_type) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(variable_shape, [input_], name=nothing, out_type=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function variable_shape(input_; name=nothing, out_type=nothing) - if tf.in_eager_mode() - variable_shape_eager(input_; name=name, out_type=out_type) - else - variable_shape_graph(input_; name=name, out_type=out_type) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function variable_shape(input_; name=nothing, out_type=nothing) + if tf.in_eager_mode() + variable_shape_eager(input_; name=name, out_type=out_type) + else + variable_shape_graph(input_; name=name, out_type=out_type) + end end - end + end end @@ -21902,59 +39686,95 @@ end """ begin - function fifo_queue_v2_graph(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) - local desc - tf.with_op_name(name, "FIFOQueueV2") do - desc = tf.NodeDescription("FIFOQueueV2") - if component_types !== nothing - desc["component_types"] = map(Base.identity, component_types) + begin + function fifo_queue_v2_graph(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "FIFOQueueV2") do + desc = tf.NodeDescription("FIFOQueueV2") + begin + end + begin + end + begin + begin + if component_types !== nothing + desc["component_types"] = map(Base.identity, component_types) + end + end + begin + if shapes !== nothing + desc["shapes"] = map(Base.identity, shapes) + end + end + begin + if capacity !== nothing + desc["capacity"] = Base.Int(capacity) + end + end + begin + if container !== nothing + desc["container"] = Base.String(container) + end + end + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function fifo_queue_v2_eager(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) + desc = tf.EagerOp("FIFOQueueV2") + begin + end + begin + begin + if component_types !== nothing + desc["component_types"] = map(Base.identity, component_types) + end + end + begin + if shapes !== nothing + desc["shapes"] = map(Base.identity, shapes) + end + end + begin + if capacity !== nothing + desc["capacity"] = Base.Int(capacity) + end + end + begin + if container !== nothing + desc["container"] = Base.String(container) + end + end + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(fifo_queue_v2, [], name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function fifo_queue_v2(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) + if tf.in_eager_mode() + fifo_queue_v2_eager(; name=name, component_types=component_types, shapes=shapes, capacity=capacity, container=container, shared_name=shared_name) + else + fifo_queue_v2_graph(; name=name, component_types=component_types, shapes=shapes, capacity=capacity, container=container, shared_name=shared_name) + end end - if shapes !== nothing - desc["shapes"] = map(Base.identity, shapes) - end - if capacity !== nothing - desc["capacity"] = Base.Int(capacity) - end - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - end - tf.Tensor(tf.Operation(desc)) - end - function fifo_queue_v2_eager(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) - desc = tf.EagerOp("FIFOQueueV2") - if component_types !== nothing - desc["component_types"] = map(Base.identity, component_types) - end - if shapes !== nothing - desc["shapes"] = map(Base.identity, shapes) - end - if capacity !== nothing - desc["capacity"] = Base.Int(capacity) - end - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - res = tf.execute(desc) - node = tf.TapeNode(fifo_queue_v2, [], name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function fifo_queue_v2(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) - if tf.in_eager_mode() - fifo_queue_v2_eager(; name=name, component_types=component_types, shapes=shapes, capacity=capacity, container=container, shared_name=shared_name) - else - fifo_queue_v2_graph(; name=name, component_types=component_types, shapes=shapes, capacity=capacity, container=container, shared_name=shared_name) - end - end end @@ -21964,53 +39784,85 @@ end """ begin - function variable_graph(; name=nothing, shape=nothing, dtype=nothing, container=nothing, shared_name=nothing) - local desc - tf.with_op_name(name, "Variable") do - desc = tf.NodeDescription("Variable") - if shape !== nothing - desc["shape"] = Base.identity(shape) - end - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end - if container !== nothing - desc["container"] = Base.String(container) + begin + function variable_graph(; name=nothing, shape=nothing, dtype=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "Variable") do + desc = tf.NodeDescription("Variable") + begin + end + begin + end + begin + begin + if shape !== nothing + desc["shape"] = Base.identity(shape) + end + end + begin + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + begin + if container !== nothing + desc["container"] = Base.String(container) + end + end + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function variable_eager(; name=nothing, shape=nothing, dtype=nothing, container=nothing, shared_name=nothing) + desc = tf.EagerOp("Variable") + begin + end + begin + begin + if shape !== nothing + desc["shape"] = Base.identity(shape) + end + end + begin + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + begin + if container !== nothing + desc["container"] = Base.String(container) + end + end + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(variable, [], name=nothing, shape=nothing, dtype=nothing, container=nothing, shared_name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function variable(; name=nothing, shape=nothing, dtype=nothing, container=nothing, shared_name=nothing) + if tf.in_eager_mode() + variable_eager(; name=name, shape=shape, dtype=dtype, container=container, shared_name=shared_name) + else + variable_graph(; name=name, shape=shape, dtype=dtype, container=container, shared_name=shared_name) + end end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - end - tf.Tensor(tf.Operation(desc)) - end - function variable_eager(; name=nothing, shape=nothing, dtype=nothing, container=nothing, shared_name=nothing) - desc = tf.EagerOp("Variable") - if shape !== nothing - desc["shape"] = Base.identity(shape) - end - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - res = tf.execute(desc) - node = tf.TapeNode(variable, [], name=nothing, shape=nothing, dtype=nothing, container=nothing, shared_name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function variable(; name=nothing, shape=nothing, dtype=nothing, container=nothing, shared_name=nothing) - if tf.in_eager_mode() - variable_eager(; name=name, shape=shape, dtype=dtype, container=container, shared_name=shared_name) - else - variable_graph(; name=name, shape=shape, dtype=dtype, container=container, shared_name=shared_name) - end - end end @@ -22020,37 +39872,69 @@ end """ begin - function tensor_forest_create_tree_variable_graph(tree_handle_, tree_config_; name=nothing) - local desc - tf.with_op_name(name, "TensorForestCreateTreeVariable") do - desc = tf.NodeDescription("TensorForestCreateTreeVariable") - tree_handle_ = convert(Tensor{Any}, tree_handle_) - tree_config_ = convert(Tensor{String}, tree_config_) - tf.add_input(desc, tree_handle_) - tf.add_input(desc, tree_config_) + begin + function tensor_forest_create_tree_variable_graph(tree_handle_, tree_config_; name=nothing) + local desc + tf.with_op_name(name, "TensorForestCreateTreeVariable") do + desc = tf.NodeDescription("TensorForestCreateTreeVariable") + begin + begin + tree_handle_ = convert(Tensor{Any}, tree_handle_) + begin + end + end + begin + tree_config_ = convert(Tensor{String}, tree_config_) + begin + end + end + end + begin + begin + tf.add_input(desc, tree_handle_) + end + begin + tf.add_input(desc, tree_config_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function tensor_forest_create_tree_variable_eager(tree_handle_, tree_config_; name=nothing) - desc = tf.EagerOp("TensorForestCreateTreeVariable") - tree_handle_ = convert(tf.EagerTensor, tree_handle_) - tree_config_ = convert(tf.EagerTensor, tree_config_) - tf.add_input(desc, tree_handle_) - tf.add_input(desc, tree_config_) - res = tf.execute(desc) - node = tf.TapeNode(tensor_forest_create_tree_variable, [tree_handle_, tree_config_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function tensor_forest_create_tree_variable_eager(tree_handle_, tree_config_; name=nothing) + desc = tf.EagerOp("TensorForestCreateTreeVariable") + tree_handle_ = convert(tf.EagerTensor, tree_handle_) + tree_config_ = convert(tf.EagerTensor, tree_config_) + begin + begin + tf.add_input(desc, tree_handle_) + end + begin + tf.add_input(desc, tree_config_) + end + end + begin + end + res = tf.execute(desc) + node = tf.TapeNode(tensor_forest_create_tree_variable, [tree_handle_, tree_config_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_forest_create_tree_variable(tree_handle_, tree_config_; name=nothing) - if tf.in_eager_mode() - tensor_forest_create_tree_variable_eager(tree_handle_, tree_config_; name=name) - else - tensor_forest_create_tree_variable_graph(tree_handle_, tree_config_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_forest_create_tree_variable(tree_handle_, tree_config_; name=nothing) + if tf.in_eager_mode() + tensor_forest_create_tree_variable_eager(tree_handle_, tree_config_; name=name) + else + tensor_forest_create_tree_variable_graph(tree_handle_, tree_config_; name=name) + end end - end + end end @@ -22060,64 +39944,126 @@ end """ begin - function max_pool_grad_with_argmax_graph(input_, grad_, argmax_; name=nothing, ksize=nothing, strides=nothing, padding=nothing) - local desc - tf.with_op_name(name, "MaxPoolGradWithArgmax") do - desc = tf.NodeDescription("MaxPoolGradWithArgmax") - input_ = convert(Tensor{Any}, input_) - grad_ = convert(Tensor{Any}, grad_) - argmax_ = convert(Tensor{Any}, argmax_) - (argmax_,) = tf.tf_promote(argmax_) - (input_, grad_) = tf.tf_promote(input_, grad_) - tf.add_input(desc, input_) - tf.add_input(desc, grad_) - tf.add_input(desc, argmax_) - if ksize !== nothing - desc["ksize"] = map(Base.identity, ksize) + begin + function max_pool_grad_with_argmax_graph(input_, grad_, argmax_; name=nothing, ksize=nothing, strides=nothing, padding=nothing) + local desc + tf.with_op_name(name, "MaxPoolGradWithArgmax") do + desc = tf.NodeDescription("MaxPoolGradWithArgmax") + begin + begin + input_ = convert(Tensor{Any}, input_) + begin + end + end + begin + grad_ = convert(Tensor{Any}, grad_) + begin + end + end + begin + argmax_ = convert(Tensor{Any}, argmax_) + begin + end + end + begin + (argmax_,) = tf.tf_promote(argmax_) + end + begin + (input_, grad_) = tf.tf_promote(input_, grad_) + end + end + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, grad_) + end + begin + tf.add_input(desc, argmax_) + end + end + begin + begin + if ksize !== nothing + desc["ksize"] = map(Base.identity, ksize) + end + end + begin + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + end + begin + if padding !== nothing + desc["padding"] = Base.String(padding) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function max_pool_grad_with_argmax_eager(input_, grad_, argmax_; name=nothing, ksize=nothing, strides=nothing, padding=nothing) + desc = tf.EagerOp("MaxPoolGradWithArgmax") + input_ = convert(tf.EagerTensor, input_) + grad_ = convert(tf.EagerTensor, grad_) + argmax_ = convert(tf.EagerTensor, argmax_) + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, grad_) + end + begin + tf.add_input(desc, argmax_) + end + end + begin + begin + if ksize !== nothing + desc["ksize"] = map(Base.identity, ksize) + end + end + begin + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + end + begin + if padding !== nothing + desc["padding"] = Base.String(padding) + end + end + end + begin + desc["T"] = tf.data_type(input_) + end + begin + desc["T"] = tf.data_type(grad_) + end + begin + desc["Targmax"] = tf.data_type(argmax_) + end + res = tf.execute(desc) + node = tf.TapeNode(max_pool_grad_with_argmax, [input_, grad_, argmax_], name=nothing, ksize=nothing, strides=nothing, padding=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function max_pool_grad_with_argmax(input_, grad_, argmax_; name=nothing, ksize=nothing, strides=nothing, padding=nothing) + if tf.in_eager_mode() + max_pool_grad_with_argmax_eager(input_, grad_, argmax_; name=name, ksize=ksize, strides=strides, padding=padding) + else + max_pool_grad_with_argmax_graph(input_, grad_, argmax_; name=name, ksize=ksize, strides=strides, padding=padding) + end end - if strides !== nothing - desc["strides"] = map(Base.identity, strides) - end - if padding !== nothing - desc["padding"] = Base.String(padding) - end - end - tf.Tensor(tf.Operation(desc)) - end - function max_pool_grad_with_argmax_eager(input_, grad_, argmax_; name=nothing, ksize=nothing, strides=nothing, padding=nothing) - desc = tf.EagerOp("MaxPoolGradWithArgmax") - input_ = convert(tf.EagerTensor, input_) - grad_ = convert(tf.EagerTensor, grad_) - argmax_ = convert(tf.EagerTensor, argmax_) - tf.add_input(desc, input_) - tf.add_input(desc, grad_) - tf.add_input(desc, argmax_) - if ksize !== nothing - desc["ksize"] = map(Base.identity, ksize) - end - if strides !== nothing - desc["strides"] = map(Base.identity, strides) - end - if padding !== nothing - desc["padding"] = Base.String(padding) - end - desc["T"] = tf.data_type(input_) - desc["T"] = tf.data_type(grad_) - desc["Targmax"] = tf.data_type(argmax_) - res = tf.execute(desc) - node = tf.TapeNode(max_pool_grad_with_argmax, [input_, grad_, argmax_], name=nothing, ksize=nothing, strides=nothing, padding=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function max_pool_grad_with_argmax(input_, grad_, argmax_; name=nothing, ksize=nothing, strides=nothing, padding=nothing) - if tf.in_eager_mode() - max_pool_grad_with_argmax_eager(input_, grad_, argmax_; name=name, ksize=ksize, strides=strides, padding=padding) - else - max_pool_grad_with_argmax_graph(input_, grad_, argmax_; name=name, ksize=ksize, strides=strides, padding=padding) - end - end end @@ -22127,44 +40073,82 @@ end """ begin - function ref_switch_graph(data_, pred_; name=nothing) - local desc - tf.with_op_name(name, "RefSwitch") do - desc = tf.NodeDescription("RefSwitch") - data_ = convert(Tensor{Any}, data_) - pred_ = convert(Tensor{Bool}, pred_) - (data_,) = tf.tf_promote(data_) - tf.add_input(desc, data_) - tf.add_input(desc, pred_) - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:2 - push!(out, tf.Tensor(op, out_idx)) - end - out - end - function ref_switch_eager(data_, pred_; name=nothing) - desc = tf.EagerOp("RefSwitch") - data_ = convert(tf.EagerTensor, data_) - pred_ = convert(tf.EagerTensor, pred_) - tf.add_input(desc, data_) - tf.add_input(desc, pred_) - desc["T"] = tf.data_type(data_) - res = tf.execute(desc) - node = tf.TapeNode(ref_switch, [data_, pred_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function ref_switch(data_, pred_; name=nothing) - if tf.in_eager_mode() - ref_switch_eager(data_, pred_; name=name) - else - ref_switch_graph(data_, pred_; name=name) + begin + function ref_switch_graph(data_, pred_; name=nothing) + local desc + tf.with_op_name(name, "RefSwitch") do + desc = tf.NodeDescription("RefSwitch") + begin + begin + data_ = convert(Tensor{Any}, data_) + begin + end + end + begin + pred_ = convert(Tensor{Bool}, pred_) + begin + end + end + begin + (data_,) = tf.tf_promote(data_) + end + end + begin + begin + tf.add_input(desc, data_) + end + begin + tf.add_input(desc, pred_) + end + end + begin + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + end + end + begin + function ref_switch_eager(data_, pred_; name=nothing) + desc = tf.EagerOp("RefSwitch") + data_ = convert(tf.EagerTensor, data_) + pred_ = convert(tf.EagerTensor, pred_) + begin + begin + tf.add_input(desc, data_) + end + begin + tf.add_input(desc, pred_) + end + end + begin + end + begin + desc["T"] = tf.data_type(data_) + end + res = tf.execute(desc) + node = tf.TapeNode(ref_switch, [data_, pred_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function ref_switch(data_, pred_; name=nothing) + if tf.in_eager_mode() + ref_switch_eager(data_, pred_; name=name) + else + ref_switch_graph(data_, pred_; name=name) + end end - end + end end @@ -22174,33 +40158,57 @@ end """ begin - function sdca_fprint_graph(input_; name=nothing) - local desc - tf.with_op_name(name, "SdcaFprint") do - desc = tf.NodeDescription("SdcaFprint") - input_ = convert(Tensor{String}, input_) - tf.add_input(desc, input_) + begin + function sdca_fprint_graph(input_; name=nothing) + local desc + tf.with_op_name(name, "SdcaFprint") do + desc = tf.NodeDescription("SdcaFprint") + begin + begin + input_ = convert(Tensor{String}, input_) + begin + end + end + end + begin + begin + tf.add_input(desc, input_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function sdca_fprint_eager(input_; name=nothing) - desc = tf.EagerOp("SdcaFprint") - input_ = convert(tf.EagerTensor, input_) - tf.add_input(desc, input_) - res = tf.execute(desc) - node = tf.TapeNode(sdca_fprint, [input_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function sdca_fprint_eager(input_; name=nothing) + desc = tf.EagerOp("SdcaFprint") + input_ = convert(tf.EagerTensor, input_) + begin + begin + tf.add_input(desc, input_) + end + end + begin + end + res = tf.execute(desc) + node = tf.TapeNode(sdca_fprint, [input_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sdca_fprint(input_; name=nothing) - if tf.in_eager_mode() - sdca_fprint_eager(input_; name=name) - else - sdca_fprint_graph(input_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sdca_fprint(input_; name=nothing) + if tf.in_eager_mode() + sdca_fprint_eager(input_; name=name) + else + sdca_fprint_graph(input_; name=name) + end end - end + end end @@ -22210,41 +40218,73 @@ end """ begin - function leaky_relu_graph(features_; name=nothing, alpha=nothing) - local desc - tf.with_op_name(name, "LeakyRelu") do - desc = tf.NodeDescription("LeakyRelu") - features_ = convert(Tensor{Float32}, features_) - (features_,) = tf.tf_promote(features_) - tf.add_input(desc, features_) - if alpha !== nothing - desc["alpha"] = Base.identity(alpha) + begin + function leaky_relu_graph(features_; name=nothing, alpha=nothing) + local desc + tf.with_op_name(name, "LeakyRelu") do + desc = tf.NodeDescription("LeakyRelu") + begin + begin + features_ = convert(Tensor{Float32}, features_) + begin + end + end + begin + (features_,) = tf.tf_promote(features_) + end + end + begin + begin + tf.add_input(desc, features_) + end + end + begin + begin + if alpha !== nothing + desc["alpha"] = Base.identity(alpha) + end + end + end end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function leaky_relu_eager(features_; name=nothing, alpha=nothing) - desc = tf.EagerOp("LeakyRelu") - features_ = convert(tf.EagerTensor, features_) - tf.add_input(desc, features_) - if alpha !== nothing - desc["alpha"] = Base.identity(alpha) - end - desc["T"] = tf.data_type(features_) - res = tf.execute(desc) - node = tf.TapeNode(leaky_relu, [features_], name=nothing, alpha=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function leaky_relu_eager(features_; name=nothing, alpha=nothing) + desc = tf.EagerOp("LeakyRelu") + features_ = convert(tf.EagerTensor, features_) + begin + begin + tf.add_input(desc, features_) + end + end + begin + begin + if alpha !== nothing + desc["alpha"] = Base.identity(alpha) + end + end + end + begin + desc["T"] = tf.data_type(features_) + end + res = tf.execute(desc) + node = tf.TapeNode(leaky_relu, [features_], name=nothing, alpha=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function leaky_relu(features_; name=nothing, alpha=nothing) - if tf.in_eager_mode() - leaky_relu_eager(features_; name=name, alpha=alpha) - else - leaky_relu_graph(features_; name=name, alpha=alpha) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function leaky_relu(features_; name=nothing, alpha=nothing) + if tf.in_eager_mode() + leaky_relu_eager(features_; name=name, alpha=alpha) + else + leaky_relu_graph(features_; name=name, alpha=alpha) + end end - end + end end @@ -22254,39 +40294,67 @@ end """ begin - function identity_n_graph(input_; name=nothing, T=nothing) - local desc - tf.with_op_name(name, "IdentityN") do - desc = tf.NodeDescription("IdentityN") - input_ = [convert(Tensor{Any}, x) for x = input_] - tf.add_input(desc, input_) - if T !== nothing - desc["T"] = map(Base.identity, T) + begin + function identity_n_graph(input_; name=nothing, T=nothing) + local desc + tf.with_op_name(name, "IdentityN") do + desc = tf.NodeDescription("IdentityN") + begin + begin + input_ = [convert(Tensor{Any}, x) for x = input_] + begin + end + end + end + begin + begin + tf.add_input(desc, input_) + end + end + begin + begin + if T !== nothing + desc["T"] = map(Base.identity, T) + end + end + end end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function identity_n_eager(input_; name=nothing, T=nothing) - desc = tf.EagerOp("IdentityN") - input_ = convert(tf.EagerTensor, input_) - tf.add_input(desc, input_) - if T !== nothing - desc["T"] = map(Base.identity, T) - end - res = tf.execute(desc) - node = tf.TapeNode(identity_n, [input_], name=nothing, T=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function identity_n_eager(input_; name=nothing, T=nothing) + desc = tf.EagerOp("IdentityN") + input_ = convert(tf.EagerTensor, input_) + begin + begin + tf.add_input(desc, input_) + end + end + begin + begin + if T !== nothing + desc["T"] = map(Base.identity, T) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(identity_n, [input_], name=nothing, T=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function identity_n(input_; name=nothing, T=nothing) - if tf.in_eager_mode() - identity_n_eager(input_; name=name, T=T) - else - identity_n_graph(input_; name=name, T=T) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function identity_n(input_; name=nothing, T=nothing) + if tf.in_eager_mode() + identity_n_eager(input_; name=name, T=T) + else + identity_n_graph(input_; name=name, T=T) + end end - end + end end @@ -22296,130 +40364,292 @@ end """ begin - function cudnn_rnn_backprop_v2_graph(input_, input_h_, input_c_, params_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_, host_reserved_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) - local desc - tf.with_op_name(name, "CudnnRNNBackpropV2") do - desc = tf.NodeDescription("CudnnRNNBackpropV2") - input_ = convert(Tensor{Any}, input_) - input_h_ = convert(Tensor{Any}, input_h_) - input_c_ = convert(Tensor{Any}, input_c_) - params_ = convert(Tensor{Any}, params_) - output_ = convert(Tensor{Any}, output_) - output_h_ = convert(Tensor{Any}, output_h_) - output_c_ = convert(Tensor{Any}, output_c_) - output_backprop_ = convert(Tensor{Any}, output_backprop_) - output_h_backprop_ = convert(Tensor{Any}, output_h_backprop_) - output_c_backprop_ = convert(Tensor{Any}, output_c_backprop_) - reserve_space_ = convert(Tensor{Any}, reserve_space_) - host_reserved_ = convert(Tensor{Any}, host_reserved_) - (input_, input_h_, input_c_, params_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_) = tf.tf_promote(input_, input_h_, input_c_, params_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_) - tf.add_input(desc, input_) - tf.add_input(desc, input_h_) - tf.add_input(desc, input_c_) - tf.add_input(desc, params_) - tf.add_input(desc, output_) - tf.add_input(desc, output_h_) - tf.add_input(desc, output_c_) - tf.add_input(desc, output_backprop_) - tf.add_input(desc, output_h_backprop_) - tf.add_input(desc, output_c_backprop_) - tf.add_input(desc, reserve_space_) - tf.add_input(desc, host_reserved_) - if rnn_mode !== nothing - desc["rnn_mode"] = Base.String(rnn_mode) - end - if input_mode !== nothing - desc["input_mode"] = Base.String(input_mode) - end - if direction !== nothing - desc["direction"] = Base.String(direction) - end - if dropout !== nothing - desc["dropout"] = Base.identity(dropout) - end - if seed !== nothing - desc["seed"] = Base.Int(seed) - end - if seed2 !== nothing - desc["seed2"] = Base.Int(seed2) - end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:4 - push!(out, tf.Tensor(op, out_idx)) - end - out - end - function cudnn_rnn_backprop_v2_eager(input_, input_h_, input_c_, params_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_, host_reserved_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) - desc = tf.EagerOp("CudnnRNNBackpropV2") - input_ = convert(tf.EagerTensor, input_) - input_h_ = convert(tf.EagerTensor, input_h_) - input_c_ = convert(tf.EagerTensor, input_c_) - params_ = convert(tf.EagerTensor, params_) - output_ = convert(tf.EagerTensor, output_) - output_h_ = convert(tf.EagerTensor, output_h_) - output_c_ = convert(tf.EagerTensor, output_c_) - output_backprop_ = convert(tf.EagerTensor, output_backprop_) - output_h_backprop_ = convert(tf.EagerTensor, output_h_backprop_) - output_c_backprop_ = convert(tf.EagerTensor, output_c_backprop_) - reserve_space_ = convert(tf.EagerTensor, reserve_space_) - host_reserved_ = convert(tf.EagerTensor, host_reserved_) - tf.add_input(desc, input_) - tf.add_input(desc, input_h_) - tf.add_input(desc, input_c_) - tf.add_input(desc, params_) - tf.add_input(desc, output_) - tf.add_input(desc, output_h_) - tf.add_input(desc, output_c_) - tf.add_input(desc, output_backprop_) - tf.add_input(desc, output_h_backprop_) - tf.add_input(desc, output_c_backprop_) - tf.add_input(desc, reserve_space_) - tf.add_input(desc, host_reserved_) - if rnn_mode !== nothing - desc["rnn_mode"] = Base.String(rnn_mode) - end - if input_mode !== nothing - desc["input_mode"] = Base.String(input_mode) - end - if direction !== nothing - desc["direction"] = Base.String(direction) - end - if dropout !== nothing - desc["dropout"] = Base.identity(dropout) - end - if seed !== nothing - desc["seed"] = Base.Int(seed) - end - if seed2 !== nothing - desc["seed2"] = Base.Int(seed2) - end - desc["T"] = tf.data_type(input_) - desc["T"] = tf.data_type(input_h_) - desc["T"] = tf.data_type(input_c_) - desc["T"] = tf.data_type(params_) - desc["T"] = tf.data_type(output_) - desc["T"] = tf.data_type(output_h_) - desc["T"] = tf.data_type(output_c_) - desc["T"] = tf.data_type(output_backprop_) - desc["T"] = tf.data_type(output_h_backprop_) - desc["T"] = tf.data_type(output_c_backprop_) - desc["T"] = tf.data_type(reserve_space_) - res = tf.execute(desc) - node = tf.TapeNode(cudnn_rnn_backprop_v2, [input_, input_h_, input_c_, params_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_, host_reserved_], name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function cudnn_rnn_backprop_v2(input_, input_h_, input_c_, params_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_, host_reserved_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) - if tf.in_eager_mode() - cudnn_rnn_backprop_v2_eager(input_, input_h_, input_c_, params_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_, host_reserved_; name=name, rnn_mode=rnn_mode, input_mode=input_mode, direction=direction, dropout=dropout, seed=seed, seed2=seed2) - else - cudnn_rnn_backprop_v2_graph(input_, input_h_, input_c_, params_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_, host_reserved_; name=name, rnn_mode=rnn_mode, input_mode=input_mode, direction=direction, dropout=dropout, seed=seed, seed2=seed2) + begin + function cudnn_rnn_backprop_v2_graph(input_, input_h_, input_c_, params_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_, host_reserved_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) + local desc + tf.with_op_name(name, "CudnnRNNBackpropV2") do + desc = tf.NodeDescription("CudnnRNNBackpropV2") + begin + begin + input_ = convert(Tensor{Any}, input_) + begin + end + end + begin + input_h_ = convert(Tensor{Any}, input_h_) + begin + end + end + begin + input_c_ = convert(Tensor{Any}, input_c_) + begin + end + end + begin + params_ = convert(Tensor{Any}, params_) + begin + end + end + begin + output_ = convert(Tensor{Any}, output_) + begin + end + end + begin + output_h_ = convert(Tensor{Any}, output_h_) + begin + end + end + begin + output_c_ = convert(Tensor{Any}, output_c_) + begin + end + end + begin + output_backprop_ = convert(Tensor{Any}, output_backprop_) + begin + end + end + begin + output_h_backprop_ = convert(Tensor{Any}, output_h_backprop_) + begin + end + end + begin + output_c_backprop_ = convert(Tensor{Any}, output_c_backprop_) + begin + end + end + begin + reserve_space_ = convert(Tensor{Any}, reserve_space_) + begin + end + end + begin + host_reserved_ = convert(Tensor{Any}, host_reserved_) + begin + end + end + begin + (input_, input_h_, input_c_, params_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_) = tf.tf_promote(input_, input_h_, input_c_, params_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_) + end + end + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, input_h_) + end + begin + tf.add_input(desc, input_c_) + end + begin + tf.add_input(desc, params_) + end + begin + tf.add_input(desc, output_) + end + begin + tf.add_input(desc, output_h_) + end + begin + tf.add_input(desc, output_c_) + end + begin + tf.add_input(desc, output_backprop_) + end + begin + tf.add_input(desc, output_h_backprop_) + end + begin + tf.add_input(desc, output_c_backprop_) + end + begin + tf.add_input(desc, reserve_space_) + end + begin + tf.add_input(desc, host_reserved_) + end + end + begin + begin + if rnn_mode !== nothing + desc["rnn_mode"] = Base.String(rnn_mode) + end + end + begin + if input_mode !== nothing + desc["input_mode"] = Base.String(input_mode) + end + end + begin + if direction !== nothing + desc["direction"] = Base.String(direction) + end + end + begin + if dropout !== nothing + desc["dropout"] = Base.identity(dropout) + end + end + begin + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + end + begin + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end + end + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:4 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + end + end + begin + function cudnn_rnn_backprop_v2_eager(input_, input_h_, input_c_, params_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_, host_reserved_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) + desc = tf.EagerOp("CudnnRNNBackpropV2") + input_ = convert(tf.EagerTensor, input_) + input_h_ = convert(tf.EagerTensor, input_h_) + input_c_ = convert(tf.EagerTensor, input_c_) + params_ = convert(tf.EagerTensor, params_) + output_ = convert(tf.EagerTensor, output_) + output_h_ = convert(tf.EagerTensor, output_h_) + output_c_ = convert(tf.EagerTensor, output_c_) + output_backprop_ = convert(tf.EagerTensor, output_backprop_) + output_h_backprop_ = convert(tf.EagerTensor, output_h_backprop_) + output_c_backprop_ = convert(tf.EagerTensor, output_c_backprop_) + reserve_space_ = convert(tf.EagerTensor, reserve_space_) + host_reserved_ = convert(tf.EagerTensor, host_reserved_) + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, input_h_) + end + begin + tf.add_input(desc, input_c_) + end + begin + tf.add_input(desc, params_) + end + begin + tf.add_input(desc, output_) + end + begin + tf.add_input(desc, output_h_) + end + begin + tf.add_input(desc, output_c_) + end + begin + tf.add_input(desc, output_backprop_) + end + begin + tf.add_input(desc, output_h_backprop_) + end + begin + tf.add_input(desc, output_c_backprop_) + end + begin + tf.add_input(desc, reserve_space_) + end + begin + tf.add_input(desc, host_reserved_) + end + end + begin + begin + if rnn_mode !== nothing + desc["rnn_mode"] = Base.String(rnn_mode) + end + end + begin + if input_mode !== nothing + desc["input_mode"] = Base.String(input_mode) + end + end + begin + if direction !== nothing + desc["direction"] = Base.String(direction) + end + end + begin + if dropout !== nothing + desc["dropout"] = Base.identity(dropout) + end + end + begin + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + end + begin + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end + end + end + begin + desc["T"] = tf.data_type(input_) + end + begin + desc["T"] = tf.data_type(input_h_) + end + begin + desc["T"] = tf.data_type(input_c_) + end + begin + desc["T"] = tf.data_type(params_) + end + begin + desc["T"] = tf.data_type(output_) + end + begin + desc["T"] = tf.data_type(output_h_) + end + begin + desc["T"] = tf.data_type(output_c_) + end + begin + desc["T"] = tf.data_type(output_backprop_) + end + begin + desc["T"] = tf.data_type(output_h_backprop_) + end + begin + desc["T"] = tf.data_type(output_c_backprop_) + end + begin + desc["T"] = tf.data_type(reserve_space_) + end + res = tf.execute(desc) + node = tf.TapeNode(cudnn_rnn_backprop_v2, [input_, input_h_, input_c_, params_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_, host_reserved_], name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function cudnn_rnn_backprop_v2(input_, input_h_, input_c_, params_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_, host_reserved_; name=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) + if tf.in_eager_mode() + cudnn_rnn_backprop_v2_eager(input_, input_h_, input_c_, params_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_, host_reserved_; name=name, rnn_mode=rnn_mode, input_mode=input_mode, direction=direction, dropout=dropout, seed=seed, seed2=seed2) + else + cudnn_rnn_backprop_v2_graph(input_, input_h_, input_c_, params_, output_, output_h_, output_c_, output_backprop_, output_h_backprop_, output_c_backprop_, reserve_space_, host_reserved_; name=name, rnn_mode=rnn_mode, input_mode=input_mode, direction=direction, dropout=dropout, seed=seed, seed2=seed2) + end end - end + end end @@ -22429,48 +40659,94 @@ end """ begin - function requantization_range_graph(input_, input_min_, input_max_; name=nothing) - local desc - tf.with_op_name(name, "RequantizationRange") do - desc = tf.NodeDescription("RequantizationRange") - input_ = convert(Tensor{Any}, input_) - input_min_ = convert(Tensor{Float32}, input_min_) - input_max_ = convert(Tensor{Float32}, input_max_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - tf.add_input(desc, input_min_) - tf.add_input(desc, input_max_) - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:2 - push!(out, tf.Tensor(op, out_idx)) - end - out - end - function requantization_range_eager(input_, input_min_, input_max_; name=nothing) - desc = tf.EagerOp("RequantizationRange") - input_ = convert(tf.EagerTensor, input_) - input_min_ = convert(tf.EagerTensor, input_min_) - input_max_ = convert(tf.EagerTensor, input_max_) - tf.add_input(desc, input_) - tf.add_input(desc, input_min_) - tf.add_input(desc, input_max_) - desc["Tinput"] = tf.data_type(input_) - res = tf.execute(desc) - node = tf.TapeNode(requantization_range, [input_, input_min_, input_max_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function requantization_range(input_, input_min_, input_max_; name=nothing) - if tf.in_eager_mode() - requantization_range_eager(input_, input_min_, input_max_; name=name) - else - requantization_range_graph(input_, input_min_, input_max_; name=name) + begin + function requantization_range_graph(input_, input_min_, input_max_; name=nothing) + local desc + tf.with_op_name(name, "RequantizationRange") do + desc = tf.NodeDescription("RequantizationRange") + begin + begin + input_ = convert(Tensor{Any}, input_) + begin + end + end + begin + input_min_ = convert(Tensor{Float32}, input_min_) + begin + end + end + begin + input_max_ = convert(Tensor{Float32}, input_max_) + begin + end + end + begin + (input_,) = tf.tf_promote(input_) + end + end + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, input_min_) + end + begin + tf.add_input(desc, input_max_) + end + end + begin + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + end + end + begin + function requantization_range_eager(input_, input_min_, input_max_; name=nothing) + desc = tf.EagerOp("RequantizationRange") + input_ = convert(tf.EagerTensor, input_) + input_min_ = convert(tf.EagerTensor, input_min_) + input_max_ = convert(tf.EagerTensor, input_max_) + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, input_min_) + end + begin + tf.add_input(desc, input_max_) + end + end + begin + end + begin + desc["Tinput"] = tf.data_type(input_) + end + res = tf.execute(desc) + node = tf.TapeNode(requantization_range, [input_, input_min_, input_max_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function requantization_range(input_, input_min_, input_max_; name=nothing) + if tf.in_eager_mode() + requantization_range_eager(input_, input_min_, input_max_; name=name) + else + requantization_range_graph(input_, input_min_, input_max_; name=name) + end end - end + end end @@ -22480,40 +40756,78 @@ end """ begin - function maximum_graph(x_, y_; name=nothing) - local desc - tf.with_op_name(name, "Maximum") do - desc = tf.NodeDescription("Maximum") - x_ = convert(Tensor{Any}, x_) - y_ = convert(Tensor{Any}, y_) - (x_, y_) = tf.tf_promote(x_, y_) - tf.add_input(desc, x_) - tf.add_input(desc, y_) + begin + function maximum_graph(x_, y_; name=nothing) + local desc + tf.with_op_name(name, "Maximum") do + desc = tf.NodeDescription("Maximum") + begin + begin + x_ = convert(Tensor{Any}, x_) + begin + end + end + begin + y_ = convert(Tensor{Any}, y_) + begin + end + end + begin + (x_, y_) = tf.tf_promote(x_, y_) + end + end + begin + begin + tf.add_input(desc, x_) + end + begin + tf.add_input(desc, y_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function maximum_eager(x_, y_; name=nothing) - desc = tf.EagerOp("Maximum") - x_ = convert(tf.EagerTensor, x_) - y_ = convert(tf.EagerTensor, y_) - tf.add_input(desc, x_) - tf.add_input(desc, y_) - desc["T"] = tf.data_type(x_) - desc["T"] = tf.data_type(y_) - res = tf.execute(desc) - node = tf.TapeNode(maximum, [x_, y_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function maximum_eager(x_, y_; name=nothing) + desc = tf.EagerOp("Maximum") + x_ = convert(tf.EagerTensor, x_) + y_ = convert(tf.EagerTensor, y_) + begin + begin + tf.add_input(desc, x_) + end + begin + tf.add_input(desc, y_) + end + end + begin + end + begin + desc["T"] = tf.data_type(x_) + end + begin + desc["T"] = tf.data_type(y_) + end + res = tf.execute(desc) + node = tf.TapeNode(maximum, [x_, y_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function maximum(x_, y_; name=nothing) - if tf.in_eager_mode() - maximum_eager(x_, y_; name=name) - else - maximum_graph(x_, y_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function maximum(x_, y_; name=nothing) + if tf.in_eager_mode() + maximum_eager(x_, y_; name=name) + else + maximum_graph(x_, y_; name=name) + end end - end + end end @@ -22523,41 +40837,81 @@ end """ begin - function reshape_graph(tensor_, shape_; name=nothing) - local desc - tf.with_op_name(name, "Reshape") do - desc = tf.NodeDescription("Reshape") - tensor_ = convert(Tensor{Any}, tensor_) - shape_ = convert(Tensor{Int32}, shape_) - (tensor_,) = tf.tf_promote(tensor_) - (shape_,) = tf.tf_promote(shape_) - tf.add_input(desc, tensor_) - tf.add_input(desc, shape_) + begin + function reshape_graph(tensor_, shape_; name=nothing) + local desc + tf.with_op_name(name, "Reshape") do + desc = tf.NodeDescription("Reshape") + begin + begin + tensor_ = convert(Tensor{Any}, tensor_) + begin + end + end + begin + shape_ = convert(Tensor{Int32}, shape_) + begin + end + end + begin + (tensor_,) = tf.tf_promote(tensor_) + end + begin + (shape_,) = tf.tf_promote(shape_) + end + end + begin + begin + tf.add_input(desc, tensor_) + end + begin + tf.add_input(desc, shape_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function reshape_eager(tensor_, shape_; name=nothing) - desc = tf.EagerOp("Reshape") - tensor_ = convert(tf.EagerTensor, tensor_) - shape_ = convert(tf.EagerTensor, shape_) - tf.add_input(desc, tensor_) - tf.add_input(desc, shape_) - desc["T"] = tf.data_type(tensor_) - desc["Tshape"] = tf.data_type(shape_) - res = tf.execute(desc) - node = tf.TapeNode(reshape, [tensor_, shape_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function reshape_eager(tensor_, shape_; name=nothing) + desc = tf.EagerOp("Reshape") + tensor_ = convert(tf.EagerTensor, tensor_) + shape_ = convert(tf.EagerTensor, shape_) + begin + begin + tf.add_input(desc, tensor_) + end + begin + tf.add_input(desc, shape_) + end + end + begin + end + begin + desc["T"] = tf.data_type(tensor_) + end + begin + desc["Tshape"] = tf.data_type(shape_) + end + res = tf.execute(desc) + node = tf.TapeNode(reshape, [tensor_, shape_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function reshape(tensor_, shape_; name=nothing) - if tf.in_eager_mode() - reshape_eager(tensor_, shape_; name=name) - else - reshape_graph(tensor_, shape_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function reshape(tensor_, shape_; name=nothing) + if tf.in_eager_mode() + reshape_eager(tensor_, shape_; name=name) + else + reshape_graph(tensor_, shape_; name=name) + end end - end + end end @@ -22567,50 +40921,100 @@ end """ begin - function matrix_solve_ls_graph(matrix_, rhs_, l2_regularizer_; name=nothing, fast=nothing) - local desc - tf.with_op_name(name, "MatrixSolveLs") do - desc = tf.NodeDescription("MatrixSolveLs") - matrix_ = convert(Tensor{Any}, matrix_) - rhs_ = convert(Tensor{Any}, rhs_) - l2_regularizer_ = convert(Tensor{Float64}, l2_regularizer_) - (matrix_, rhs_) = tf.tf_promote(matrix_, rhs_) - tf.add_input(desc, matrix_) - tf.add_input(desc, rhs_) - tf.add_input(desc, l2_regularizer_) - if fast !== nothing - desc["fast"] = Base.Bool(fast) + begin + function matrix_solve_ls_graph(matrix_, rhs_, l2_regularizer_; name=nothing, fast=nothing) + local desc + tf.with_op_name(name, "MatrixSolveLs") do + desc = tf.NodeDescription("MatrixSolveLs") + begin + begin + matrix_ = convert(Tensor{Any}, matrix_) + begin + end + end + begin + rhs_ = convert(Tensor{Any}, rhs_) + begin + end + end + begin + l2_regularizer_ = convert(Tensor{Float64}, l2_regularizer_) + begin + end + end + begin + (matrix_, rhs_) = tf.tf_promote(matrix_, rhs_) + end + end + begin + begin + tf.add_input(desc, matrix_) + end + begin + tf.add_input(desc, rhs_) + end + begin + tf.add_input(desc, l2_regularizer_) + end + end + begin + begin + if fast !== nothing + desc["fast"] = Base.Bool(fast) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function matrix_solve_ls_eager(matrix_, rhs_, l2_regularizer_; name=nothing, fast=nothing) + desc = tf.EagerOp("MatrixSolveLs") + matrix_ = convert(tf.EagerTensor, matrix_) + rhs_ = convert(tf.EagerTensor, rhs_) + l2_regularizer_ = convert(tf.EagerTensor, l2_regularizer_) + begin + begin + tf.add_input(desc, matrix_) + end + begin + tf.add_input(desc, rhs_) + end + begin + tf.add_input(desc, l2_regularizer_) + end + end + begin + begin + if fast !== nothing + desc["fast"] = Base.Bool(fast) + end + end + end + begin + desc["T"] = tf.data_type(matrix_) + end + begin + desc["T"] = tf.data_type(rhs_) + end + res = tf.execute(desc) + node = tf.TapeNode(matrix_solve_ls, [matrix_, rhs_, l2_regularizer_], name=nothing, fast=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function matrix_solve_ls(matrix_, rhs_, l2_regularizer_; name=nothing, fast=nothing) + if tf.in_eager_mode() + matrix_solve_ls_eager(matrix_, rhs_, l2_regularizer_; name=name, fast=fast) + else + matrix_solve_ls_graph(matrix_, rhs_, l2_regularizer_; name=name, fast=fast) + end end - end - tf.Tensor(tf.Operation(desc)) end - function matrix_solve_ls_eager(matrix_, rhs_, l2_regularizer_; name=nothing, fast=nothing) - desc = tf.EagerOp("MatrixSolveLs") - matrix_ = convert(tf.EagerTensor, matrix_) - rhs_ = convert(tf.EagerTensor, rhs_) - l2_regularizer_ = convert(tf.EagerTensor, l2_regularizer_) - tf.add_input(desc, matrix_) - tf.add_input(desc, rhs_) - tf.add_input(desc, l2_regularizer_) - if fast !== nothing - desc["fast"] = Base.Bool(fast) - end - desc["T"] = tf.data_type(matrix_) - desc["T"] = tf.data_type(rhs_) - res = tf.execute(desc) - node = tf.TapeNode(matrix_solve_ls, [matrix_, rhs_, l2_regularizer_], name=nothing, fast=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function matrix_solve_ls(matrix_, rhs_, l2_regularizer_; name=nothing, fast=nothing) - if tf.in_eager_mode() - matrix_solve_ls_eager(matrix_, rhs_, l2_regularizer_; name=name, fast=fast) - else - matrix_solve_ls_graph(matrix_, rhs_, l2_regularizer_; name=name, fast=fast) - end - end end @@ -22620,41 +41024,81 @@ end """ begin - function tf_record_dataset_graph(filenames_, compression_type_, buffer_size_; name=nothing) - local desc - tf.with_op_name(name, "TFRecordDataset") do - desc = tf.NodeDescription("TFRecordDataset") - filenames_ = convert(Tensor{String}, filenames_) - compression_type_ = convert(Tensor{String}, compression_type_) - buffer_size_ = convert(Tensor{Int64}, buffer_size_) - tf.add_input(desc, filenames_) - tf.add_input(desc, compression_type_) - tf.add_input(desc, buffer_size_) - end - tf.Tensor(tf.Operation(desc)) - end - function tf_record_dataset_eager(filenames_, compression_type_, buffer_size_; name=nothing) - desc = tf.EagerOp("TFRecordDataset") - filenames_ = convert(tf.EagerTensor, filenames_) - compression_type_ = convert(tf.EagerTensor, compression_type_) - buffer_size_ = convert(tf.EagerTensor, buffer_size_) - tf.add_input(desc, filenames_) - tf.add_input(desc, compression_type_) - tf.add_input(desc, buffer_size_) - res = tf.execute(desc) - node = tf.TapeNode(tf_record_dataset, [filenames_, compression_type_, buffer_size_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tf_record_dataset(filenames_, compression_type_, buffer_size_; name=nothing) - if tf.in_eager_mode() - tf_record_dataset_eager(filenames_, compression_type_, buffer_size_; name=name) - else - tf_record_dataset_graph(filenames_, compression_type_, buffer_size_; name=name) + begin + function tf_record_dataset_graph(filenames_, compression_type_, buffer_size_; name=nothing) + local desc + tf.with_op_name(name, "TFRecordDataset") do + desc = tf.NodeDescription("TFRecordDataset") + begin + begin + filenames_ = convert(Tensor{String}, filenames_) + begin + end + end + begin + compression_type_ = convert(Tensor{String}, compression_type_) + begin + end + end + begin + buffer_size_ = convert(Tensor{Int64}, buffer_size_) + begin + end + end + end + begin + begin + tf.add_input(desc, filenames_) + end + begin + tf.add_input(desc, compression_type_) + end + begin + tf.add_input(desc, buffer_size_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function tf_record_dataset_eager(filenames_, compression_type_, buffer_size_; name=nothing) + desc = tf.EagerOp("TFRecordDataset") + filenames_ = convert(tf.EagerTensor, filenames_) + compression_type_ = convert(tf.EagerTensor, compression_type_) + buffer_size_ = convert(tf.EagerTensor, buffer_size_) + begin + begin + tf.add_input(desc, filenames_) + end + begin + tf.add_input(desc, compression_type_) + end + begin + tf.add_input(desc, buffer_size_) + end + end + begin + end + res = tf.execute(desc) + node = tf.TapeNode(tf_record_dataset, [filenames_, compression_type_, buffer_size_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tf_record_dataset(filenames_, compression_type_, buffer_size_; name=nothing) + if tf.in_eager_mode() + tf_record_dataset_eager(filenames_, compression_type_, buffer_size_; name=name) + else + tf_record_dataset_graph(filenames_, compression_type_, buffer_size_; name=name) + end end - end + end end @@ -22664,49 +41108,89 @@ end """ begin - function boosted_trees_example_debug_outputs_graph(tree_ensemble_handle_, bucketized_features_; name=nothing, num_bucketized_features=nothing, logits_dimension=nothing) - local desc - tf.with_op_name(name, "BoostedTreesExampleDebugOutputs") do - desc = tf.NodeDescription("BoostedTreesExampleDebugOutputs") - tree_ensemble_handle_ = convert(Tensor{Any}, tree_ensemble_handle_) - bucketized_features_ = [convert(Tensor{Int32}, x) for x = bucketized_features_] - tf.add_input(desc, tree_ensemble_handle_) - tf.add_input(desc, bucketized_features_) - if num_bucketized_features !== nothing - desc["num_bucketized_features"] = Base.Int(num_bucketized_features) - end - if logits_dimension !== nothing - desc["logits_dimension"] = Base.Int(logits_dimension) + begin + function boosted_trees_example_debug_outputs_graph(tree_ensemble_handle_, bucketized_features_; name=nothing, num_bucketized_features=nothing, logits_dimension=nothing) + local desc + tf.with_op_name(name, "BoostedTreesExampleDebugOutputs") do + desc = tf.NodeDescription("BoostedTreesExampleDebugOutputs") + begin + begin + tree_ensemble_handle_ = convert(Tensor{Any}, tree_ensemble_handle_) + begin + end + end + begin + bucketized_features_ = [convert(Tensor{Int32}, x) for x = bucketized_features_] + begin + end + end + end + begin + begin + tf.add_input(desc, tree_ensemble_handle_) + end + begin + tf.add_input(desc, bucketized_features_) + end + end + begin + begin + if num_bucketized_features !== nothing + desc["num_bucketized_features"] = Base.Int(num_bucketized_features) + end + end + begin + if logits_dimension !== nothing + desc["logits_dimension"] = Base.Int(logits_dimension) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function boosted_trees_example_debug_outputs_eager(tree_ensemble_handle_, bucketized_features_; name=nothing, num_bucketized_features=nothing, logits_dimension=nothing) + desc = tf.EagerOp("BoostedTreesExampleDebugOutputs") + tree_ensemble_handle_ = convert(tf.EagerTensor, tree_ensemble_handle_) + bucketized_features_ = convert(tf.EagerTensor, bucketized_features_) + begin + begin + tf.add_input(desc, tree_ensemble_handle_) + end + begin + tf.add_input(desc, bucketized_features_) + end + end + begin + begin + if num_bucketized_features !== nothing + desc["num_bucketized_features"] = Base.Int(num_bucketized_features) + end + end + begin + if logits_dimension !== nothing + desc["logits_dimension"] = Base.Int(logits_dimension) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(boosted_trees_example_debug_outputs, [tree_ensemble_handle_, bucketized_features_], name=nothing, num_bucketized_features=nothing, logits_dimension=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function boosted_trees_example_debug_outputs(tree_ensemble_handle_, bucketized_features_; name=nothing, num_bucketized_features=nothing, logits_dimension=nothing) + if tf.in_eager_mode() + boosted_trees_example_debug_outputs_eager(tree_ensemble_handle_, bucketized_features_; name=name, num_bucketized_features=num_bucketized_features, logits_dimension=logits_dimension) + else + boosted_trees_example_debug_outputs_graph(tree_ensemble_handle_, bucketized_features_; name=name, num_bucketized_features=num_bucketized_features, logits_dimension=logits_dimension) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function boosted_trees_example_debug_outputs_eager(tree_ensemble_handle_, bucketized_features_; name=nothing, num_bucketized_features=nothing, logits_dimension=nothing) - desc = tf.EagerOp("BoostedTreesExampleDebugOutputs") - tree_ensemble_handle_ = convert(tf.EagerTensor, tree_ensemble_handle_) - bucketized_features_ = convert(tf.EagerTensor, bucketized_features_) - tf.add_input(desc, tree_ensemble_handle_) - tf.add_input(desc, bucketized_features_) - if num_bucketized_features !== nothing - desc["num_bucketized_features"] = Base.Int(num_bucketized_features) - end - if logits_dimension !== nothing - desc["logits_dimension"] = Base.Int(logits_dimension) - end - res = tf.execute(desc) - node = tf.TapeNode(boosted_trees_example_debug_outputs, [tree_ensemble_handle_, bucketized_features_], name=nothing, num_bucketized_features=nothing, logits_dimension=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function boosted_trees_example_debug_outputs(tree_ensemble_handle_, bucketized_features_; name=nothing, num_bucketized_features=nothing, logits_dimension=nothing) - if tf.in_eager_mode() - boosted_trees_example_debug_outputs_eager(tree_ensemble_handle_, bucketized_features_; name=name, num_bucketized_features=num_bucketized_features, logits_dimension=logits_dimension) - else - boosted_trees_example_debug_outputs_graph(tree_ensemble_handle_, bucketized_features_; name=name, num_bucketized_features=num_bucketized_features, logits_dimension=logits_dimension) - end - end end @@ -22716,49 +41200,89 @@ end """ begin - function experimental_max_intra_op_parallelism_dataset_graph(input_dataset_, max_intra_op_parallelism_; name=nothing, output_types=nothing, output_shapes=nothing) - local desc - tf.with_op_name(name, "ExperimentalMaxIntraOpParallelismDataset") do - desc = tf.NodeDescription("ExperimentalMaxIntraOpParallelismDataset") - input_dataset_ = convert(Tensor{Any}, input_dataset_) - max_intra_op_parallelism_ = convert(Tensor{Int64}, max_intra_op_parallelism_) - tf.add_input(desc, input_dataset_) - tf.add_input(desc, max_intra_op_parallelism_) - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) + begin + function experimental_max_intra_op_parallelism_dataset_graph(input_dataset_, max_intra_op_parallelism_; name=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "ExperimentalMaxIntraOpParallelismDataset") do + desc = tf.NodeDescription("ExperimentalMaxIntraOpParallelismDataset") + begin + begin + input_dataset_ = convert(Tensor{Any}, input_dataset_) + begin + end + end + begin + max_intra_op_parallelism_ = convert(Tensor{Int64}, max_intra_op_parallelism_) + begin + end + end + end + begin + begin + tf.add_input(desc, input_dataset_) + end + begin + tf.add_input(desc, max_intra_op_parallelism_) + end + end + begin + begin + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + end + begin + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function experimental_max_intra_op_parallelism_dataset_eager(input_dataset_, max_intra_op_parallelism_; name=nothing, output_types=nothing, output_shapes=nothing) + desc = tf.EagerOp("ExperimentalMaxIntraOpParallelismDataset") + input_dataset_ = convert(tf.EagerTensor, input_dataset_) + max_intra_op_parallelism_ = convert(tf.EagerTensor, max_intra_op_parallelism_) + begin + begin + tf.add_input(desc, input_dataset_) + end + begin + tf.add_input(desc, max_intra_op_parallelism_) + end + end + begin + begin + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + end + begin + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(experimental_max_intra_op_parallelism_dataset, [input_dataset_, max_intra_op_parallelism_], name=nothing, output_types=nothing, output_shapes=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_max_intra_op_parallelism_dataset(input_dataset_, max_intra_op_parallelism_; name=nothing, output_types=nothing, output_shapes=nothing) + if tf.in_eager_mode() + experimental_max_intra_op_parallelism_dataset_eager(input_dataset_, max_intra_op_parallelism_; name=name, output_types=output_types, output_shapes=output_shapes) + else + experimental_max_intra_op_parallelism_dataset_graph(input_dataset_, max_intra_op_parallelism_; name=name, output_types=output_types, output_shapes=output_shapes) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function experimental_max_intra_op_parallelism_dataset_eager(input_dataset_, max_intra_op_parallelism_; name=nothing, output_types=nothing, output_shapes=nothing) - desc = tf.EagerOp("ExperimentalMaxIntraOpParallelismDataset") - input_dataset_ = convert(tf.EagerTensor, input_dataset_) - max_intra_op_parallelism_ = convert(tf.EagerTensor, max_intra_op_parallelism_) - tf.add_input(desc, input_dataset_) - tf.add_input(desc, max_intra_op_parallelism_) - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - res = tf.execute(desc) - node = tf.TapeNode(experimental_max_intra_op_parallelism_dataset, [input_dataset_, max_intra_op_parallelism_], name=nothing, output_types=nothing, output_shapes=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_max_intra_op_parallelism_dataset(input_dataset_, max_intra_op_parallelism_; name=nothing, output_types=nothing, output_shapes=nothing) - if tf.in_eager_mode() - experimental_max_intra_op_parallelism_dataset_eager(input_dataset_, max_intra_op_parallelism_; name=name, output_types=output_types, output_shapes=output_shapes) - else - experimental_max_intra_op_parallelism_dataset_graph(input_dataset_, max_intra_op_parallelism_; name=name, output_types=output_types, output_shapes=output_shapes) - end - end end @@ -22768,35 +41292,63 @@ end """ begin - function hsv_to_rgb_graph(images_; name=nothing) - local desc - tf.with_op_name(name, "HSVToRGB") do - desc = tf.NodeDescription("HSVToRGB") - images_ = convert(Tensor{Float32}, images_) - (images_,) = tf.tf_promote(images_) - tf.add_input(desc, images_) + begin + function hsv_to_rgb_graph(images_; name=nothing) + local desc + tf.with_op_name(name, "HSVToRGB") do + desc = tf.NodeDescription("HSVToRGB") + begin + begin + images_ = convert(Tensor{Float32}, images_) + begin + end + end + begin + (images_,) = tf.tf_promote(images_) + end + end + begin + begin + tf.add_input(desc, images_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function hsv_to_rgb_eager(images_; name=nothing) - desc = tf.EagerOp("HSVToRGB") - images_ = convert(tf.EagerTensor, images_) - tf.add_input(desc, images_) - desc["T"] = tf.data_type(images_) - res = tf.execute(desc) - node = tf.TapeNode(hsv_to_rgb, [images_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function hsv_to_rgb_eager(images_; name=nothing) + desc = tf.EagerOp("HSVToRGB") + images_ = convert(tf.EagerTensor, images_) + begin + begin + tf.add_input(desc, images_) + end + end + begin + end + begin + desc["T"] = tf.data_type(images_) + end + res = tf.execute(desc) + node = tf.TapeNode(hsv_to_rgb, [images_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function hsv_to_rgb(images_; name=nothing) - if tf.in_eager_mode() - hsv_to_rgb_eager(images_; name=name) - else - hsv_to_rgb_graph(images_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function hsv_to_rgb(images_; name=nothing) + if tf.in_eager_mode() + hsv_to_rgb_eager(images_; name=name) + else + hsv_to_rgb_graph(images_; name=name) + end end - end + end end @@ -22806,53 +41358,107 @@ end """ begin - function scatter_div_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) - local desc - tf.with_op_name(name, "ScatterDiv") do - desc = tf.NodeDescription("ScatterDiv") - ref_ = convert(Tensor{Any}, ref_) - indices_ = convert(Tensor{Any}, indices_) - indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) - updates_ = convert(Tensor{Any}, updates_) - (ref_, updates_) = tf.tf_promote(ref_, updates_) - (indices_,) = tf.tf_promote(indices_) - tf.add_input(desc, ref_) - tf.add_input(desc, indices_) - tf.add_input(desc, updates_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) + begin + function scatter_div_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "ScatterDiv") do + desc = tf.NodeDescription("ScatterDiv") + begin + begin + ref_ = convert(Tensor{Any}, ref_) + begin + end + end + begin + indices_ = convert(Tensor{Any}, indices_) + begin + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + end + end + begin + updates_ = convert(Tensor{Any}, updates_) + begin + end + end + begin + (ref_, updates_) = tf.tf_promote(ref_, updates_) + end + begin + (indices_,) = tf.tf_promote(indices_) + end + end + begin + begin + tf.add_input(desc, ref_) + end + begin + tf.add_input(desc, indices_) + end + begin + tf.add_input(desc, updates_) + end + end + begin + begin + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function scatter_div_eager(ref_, indices_, updates_; name=nothing, use_locking=nothing) + desc = tf.EagerOp("ScatterDiv") + ref_ = convert(tf.EagerTensor, ref_) + indices_ = convert(tf.EagerTensor, indices_) + updates_ = convert(tf.EagerTensor, updates_) + begin + begin + tf.add_input(desc, ref_) + end + begin + tf.add_input(desc, indices_) + end + begin + tf.add_input(desc, updates_) + end + end + begin + begin + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + end + begin + desc["T"] = tf.data_type(ref_) + end + begin + desc["Tindices"] = tf.data_type(indices_) + end + begin + desc["T"] = tf.data_type(updates_) + end + res = tf.execute(desc) + node = tf.TapeNode(scatter_div, [ref_, indices_, updates_], name=nothing, use_locking=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function scatter_div(ref_, indices_, updates_; name=nothing, use_locking=nothing) + if tf.in_eager_mode() + scatter_div_eager(ref_, indices_, updates_; name=name, use_locking=use_locking) + else + scatter_div_graph(ref_, indices_, updates_; name=name, use_locking=use_locking) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function scatter_div_eager(ref_, indices_, updates_; name=nothing, use_locking=nothing) - desc = tf.EagerOp("ScatterDiv") - ref_ = convert(tf.EagerTensor, ref_) - indices_ = convert(tf.EagerTensor, indices_) - updates_ = convert(tf.EagerTensor, updates_) - tf.add_input(desc, ref_) - tf.add_input(desc, indices_) - tf.add_input(desc, updates_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - desc["T"] = tf.data_type(ref_) - desc["Tindices"] = tf.data_type(indices_) - desc["T"] = tf.data_type(updates_) - res = tf.execute(desc) - node = tf.TapeNode(scatter_div, [ref_, indices_, updates_], name=nothing, use_locking=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function scatter_div(ref_, indices_, updates_; name=nothing, use_locking=nothing) - if tf.in_eager_mode() - scatter_div_eager(ref_, indices_, updates_; name=name, use_locking=use_locking) - else - scatter_div_graph(ref_, indices_, updates_; name=name, use_locking=use_locking) - end - end end @@ -22862,50 +41468,84 @@ end """ begin - function decode_wav_graph(contents_; name=nothing, desired_channels=nothing, desired_samples=nothing) - local desc - tf.with_op_name(name, "DecodeWav") do - desc = tf.NodeDescription("DecodeWav") - contents_ = convert(Tensor{String}, contents_) - tf.add_input(desc, contents_) - if desired_channels !== nothing - desc["desired_channels"] = Base.Int(desired_channels) + begin + function decode_wav_graph(contents_; name=nothing, desired_channels=nothing, desired_samples=nothing) + local desc + tf.with_op_name(name, "DecodeWav") do + desc = tf.NodeDescription("DecodeWav") + begin + begin + contents_ = convert(Tensor{String}, contents_) + begin + end + end + end + begin + begin + tf.add_input(desc, contents_) + end + end + begin + begin + if desired_channels !== nothing + desc["desired_channels"] = Base.Int(desired_channels) + end + end + begin + if desired_samples !== nothing + desc["desired_samples"] = Base.Int(desired_samples) + end + end + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + end + end + begin + function decode_wav_eager(contents_; name=nothing, desired_channels=nothing, desired_samples=nothing) + desc = tf.EagerOp("DecodeWav") + contents_ = convert(tf.EagerTensor, contents_) + begin + begin + tf.add_input(desc, contents_) + end + end + begin + begin + if desired_channels !== nothing + desc["desired_channels"] = Base.Int(desired_channels) + end + end + begin + if desired_samples !== nothing + desc["desired_samples"] = Base.Int(desired_samples) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(decode_wav, [contents_], name=nothing, desired_channels=nothing, desired_samples=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function decode_wav(contents_; name=nothing, desired_channels=nothing, desired_samples=nothing) + if tf.in_eager_mode() + decode_wav_eager(contents_; name=name, desired_channels=desired_channels, desired_samples=desired_samples) + else + decode_wav_graph(contents_; name=name, desired_channels=desired_channels, desired_samples=desired_samples) + end end - if desired_samples !== nothing - desc["desired_samples"] = Base.Int(desired_samples) - end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:2 - push!(out, tf.Tensor(op, out_idx)) - end - out - end - function decode_wav_eager(contents_; name=nothing, desired_channels=nothing, desired_samples=nothing) - desc = tf.EagerOp("DecodeWav") - contents_ = convert(tf.EagerTensor, contents_) - tf.add_input(desc, contents_) - if desired_channels !== nothing - desc["desired_channels"] = Base.Int(desired_channels) - end - if desired_samples !== nothing - desc["desired_samples"] = Base.Int(desired_samples) - end - res = tf.execute(desc) - node = tf.TapeNode(decode_wav, [contents_], name=nothing, desired_channels=nothing, desired_samples=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function decode_wav(contents_; name=nothing, desired_channels=nothing, desired_samples=nothing) - if tf.in_eager_mode() - decode_wav_eager(contents_; name=name, desired_channels=desired_channels, desired_samples=desired_samples) - else - decode_wav_graph(contents_; name=name, desired_channels=desired_channels, desired_samples=desired_samples) - end - end end @@ -22915,35 +41555,63 @@ end """ begin - function log_graph(x_; name=nothing) - local desc - tf.with_op_name(name, "Log") do - desc = tf.NodeDescription("Log") - x_ = convert(Tensor{Any}, x_) - (x_,) = tf.tf_promote(x_) - tf.add_input(desc, x_) + begin + function log_graph(x_; name=nothing) + local desc + tf.with_op_name(name, "Log") do + desc = tf.NodeDescription("Log") + begin + begin + x_ = convert(Tensor{Any}, x_) + begin + end + end + begin + (x_,) = tf.tf_promote(x_) + end + end + begin + begin + tf.add_input(desc, x_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function log_eager(x_; name=nothing) - desc = tf.EagerOp("Log") - x_ = convert(tf.EagerTensor, x_) - tf.add_input(desc, x_) - desc["T"] = tf.data_type(x_) - res = tf.execute(desc) - node = tf.TapeNode(log, [x_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function log_eager(x_; name=nothing) + desc = tf.EagerOp("Log") + x_ = convert(tf.EagerTensor, x_) + begin + begin + tf.add_input(desc, x_) + end + end + begin + end + begin + desc["T"] = tf.data_type(x_) + end + res = tf.execute(desc) + node = tf.TapeNode(log, [x_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function log(x_; name=nothing) - if tf.in_eager_mode() - log_eager(x_; name=name) - else - log_graph(x_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function log(x_; name=nothing) + if tf.in_eager_mode() + log_eager(x_; name=name) + else + log_graph(x_; name=name) + end end - end + end end @@ -22953,51 +41621,103 @@ end """ begin - function save_v2_graph(prefix_, tensor_names_, shape_and_slices_, tensors_; name=nothing, dtypes=nothing) - local desc - tf.with_op_name(name, "SaveV2") do - desc = tf.NodeDescription("SaveV2") - prefix_ = convert(Tensor{String}, prefix_) - tensor_names_ = convert(Tensor{String}, tensor_names_) - shape_and_slices_ = convert(Tensor{String}, shape_and_slices_) - tensors_ = [convert(Tensor{Any}, x) for x = tensors_] - tf.add_input(desc, prefix_) - tf.add_input(desc, tensor_names_) - tf.add_input(desc, shape_and_slices_) - tf.add_input(desc, tensors_) - if dtypes !== nothing - desc["dtypes"] = map(Base.identity, dtypes) + begin + function save_v2_graph(prefix_, tensor_names_, shape_and_slices_, tensors_; name=nothing, dtypes=nothing) + local desc + tf.with_op_name(name, "SaveV2") do + desc = tf.NodeDescription("SaveV2") + begin + begin + prefix_ = convert(Tensor{String}, prefix_) + begin + end + end + begin + tensor_names_ = convert(Tensor{String}, tensor_names_) + begin + end + end + begin + shape_and_slices_ = convert(Tensor{String}, shape_and_slices_) + begin + end + end + begin + tensors_ = [convert(Tensor{Any}, x) for x = tensors_] + begin + end + end + end + begin + begin + tf.add_input(desc, prefix_) + end + begin + tf.add_input(desc, tensor_names_) + end + begin + tf.add_input(desc, shape_and_slices_) + end + begin + tf.add_input(desc, tensors_) + end + end + begin + begin + if dtypes !== nothing + desc["dtypes"] = map(Base.identity, dtypes) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function save_v2_eager(prefix_, tensor_names_, shape_and_slices_, tensors_; name=nothing, dtypes=nothing) + desc = tf.EagerOp("SaveV2") + prefix_ = convert(tf.EagerTensor, prefix_) + tensor_names_ = convert(tf.EagerTensor, tensor_names_) + shape_and_slices_ = convert(tf.EagerTensor, shape_and_slices_) + tensors_ = convert(tf.EagerTensor, tensors_) + begin + begin + tf.add_input(desc, prefix_) + end + begin + tf.add_input(desc, tensor_names_) + end + begin + tf.add_input(desc, shape_and_slices_) + end + begin + tf.add_input(desc, tensors_) + end + end + begin + begin + if dtypes !== nothing + desc["dtypes"] = map(Base.identity, dtypes) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(save_v2, [prefix_, tensor_names_, shape_and_slices_, tensors_], name=nothing, dtypes=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function save_v2(prefix_, tensor_names_, shape_and_slices_, tensors_; name=nothing, dtypes=nothing) + if tf.in_eager_mode() + save_v2_eager(prefix_, tensor_names_, shape_and_slices_, tensors_; name=name, dtypes=dtypes) + else + save_v2_graph(prefix_, tensor_names_, shape_and_slices_, tensors_; name=name, dtypes=dtypes) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function save_v2_eager(prefix_, tensor_names_, shape_and_slices_, tensors_; name=nothing, dtypes=nothing) - desc = tf.EagerOp("SaveV2") - prefix_ = convert(tf.EagerTensor, prefix_) - tensor_names_ = convert(tf.EagerTensor, tensor_names_) - shape_and_slices_ = convert(tf.EagerTensor, shape_and_slices_) - tensors_ = convert(tf.EagerTensor, tensors_) - tf.add_input(desc, prefix_) - tf.add_input(desc, tensor_names_) - tf.add_input(desc, shape_and_slices_) - tf.add_input(desc, tensors_) - if dtypes !== nothing - desc["dtypes"] = map(Base.identity, dtypes) - end - res = tf.execute(desc) - node = tf.TapeNode(save_v2, [prefix_, tensor_names_, shape_and_slices_, tensors_], name=nothing, dtypes=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function save_v2(prefix_, tensor_names_, shape_and_slices_, tensors_; name=nothing, dtypes=nothing) - if tf.in_eager_mode() - save_v2_eager(prefix_, tensor_names_, shape_and_slices_, tensors_; name=name, dtypes=dtypes) - else - save_v2_graph(prefix_, tensor_names_, shape_and_slices_, tensors_; name=name, dtypes=dtypes) - end - end end @@ -23007,35 +41727,63 @@ end """ begin - function deep_copy_graph(x_; name=nothing) - local desc - tf.with_op_name(name, "DeepCopy") do - desc = tf.NodeDescription("DeepCopy") - x_ = convert(Tensor{Any}, x_) - (x_,) = tf.tf_promote(x_) - tf.add_input(desc, x_) + begin + function deep_copy_graph(x_; name=nothing) + local desc + tf.with_op_name(name, "DeepCopy") do + desc = tf.NodeDescription("DeepCopy") + begin + begin + x_ = convert(Tensor{Any}, x_) + begin + end + end + begin + (x_,) = tf.tf_promote(x_) + end + end + begin + begin + tf.add_input(desc, x_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function deep_copy_eager(x_; name=nothing) - desc = tf.EagerOp("DeepCopy") - x_ = convert(tf.EagerTensor, x_) - tf.add_input(desc, x_) - desc["T"] = tf.data_type(x_) - res = tf.execute(desc) - node = tf.TapeNode(deep_copy, [x_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function deep_copy_eager(x_; name=nothing) + desc = tf.EagerOp("DeepCopy") + x_ = convert(tf.EagerTensor, x_) + begin + begin + tf.add_input(desc, x_) + end + end + begin + end + begin + desc["T"] = tf.data_type(x_) + end + res = tf.execute(desc) + node = tf.TapeNode(deep_copy, [x_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function deep_copy(x_; name=nothing) - if tf.in_eager_mode() - deep_copy_eager(x_; name=name) - else - deep_copy_graph(x_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function deep_copy(x_; name=nothing) + if tf.in_eager_mode() + deep_copy_eager(x_; name=name) + else + deep_copy_graph(x_; name=name) + end end - end + end end @@ -23045,45 +41793,77 @@ end """ begin - function model_dataset_graph(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) - local desc - tf.with_op_name(name, "ModelDataset") do - desc = tf.NodeDescription("ModelDataset") - input_dataset_ = convert(Tensor{Any}, input_dataset_) - tf.add_input(desc, input_dataset_) - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) + begin + function model_dataset_graph(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "ModelDataset") do + desc = tf.NodeDescription("ModelDataset") + begin + begin + input_dataset_ = convert(Tensor{Any}, input_dataset_) + begin + end + end + end + begin + begin + tf.add_input(desc, input_dataset_) + end + end + begin + begin + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + end + begin + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function model_dataset_eager(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) + desc = tf.EagerOp("ModelDataset") + input_dataset_ = convert(tf.EagerTensor, input_dataset_) + begin + begin + tf.add_input(desc, input_dataset_) + end + end + begin + begin + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + end + begin + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(model_dataset, [input_dataset_], name=nothing, output_types=nothing, output_shapes=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function model_dataset(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) + if tf.in_eager_mode() + model_dataset_eager(input_dataset_; name=name, output_types=output_types, output_shapes=output_shapes) + else + model_dataset_graph(input_dataset_; name=name, output_types=output_types, output_shapes=output_shapes) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function model_dataset_eager(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) - desc = tf.EagerOp("ModelDataset") - input_dataset_ = convert(tf.EagerTensor, input_dataset_) - tf.add_input(desc, input_dataset_) - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - res = tf.execute(desc) - node = tf.TapeNode(model_dataset, [input_dataset_], name=nothing, output_types=nothing, output_shapes=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function model_dataset(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) - if tf.in_eager_mode() - model_dataset_eager(input_dataset_; name=name, output_types=output_types, output_shapes=output_shapes) - else - model_dataset_graph(input_dataset_; name=name, output_types=output_types, output_shapes=output_shapes) - end - end end @@ -23093,136 +41873,238 @@ end """ begin - function parse_sequence_example_graph(serialized_, debug_name_, context_dense_defaults_; name=nothing, feature_list_dense_missing_assumed_empty=nothing, context_sparse_keys=nothing, context_dense_keys=nothing, feature_list_sparse_keys=nothing, feature_list_dense_keys=nothing, Ncontext_sparse=nothing, Ncontext_dense=nothing, Nfeature_list_sparse=nothing, Nfeature_list_dense=nothing, context_sparse_types=nothing, Tcontext_dense=nothing, feature_list_dense_types=nothing, context_dense_shapes=nothing, feature_list_sparse_types=nothing, feature_list_dense_shapes=nothing) - local desc - tf.with_op_name(name, "ParseSequenceExample") do - desc = tf.NodeDescription("ParseSequenceExample") - serialized_ = convert(Tensor{String}, serialized_) - debug_name_ = convert(Tensor{String}, debug_name_) - context_dense_defaults_ = [convert(Tensor{Any}, x) for x = context_dense_defaults_] - tf.add_input(desc, serialized_) - tf.add_input(desc, debug_name_) - tf.add_input(desc, context_dense_defaults_) - if feature_list_dense_missing_assumed_empty !== nothing - desc["feature_list_dense_missing_assumed_empty"] = map(Base.identity, feature_list_dense_missing_assumed_empty) - end - if context_sparse_keys !== nothing - desc["context_sparse_keys"] = map(Base.identity, context_sparse_keys) - end - if context_dense_keys !== nothing - desc["context_dense_keys"] = map(Base.identity, context_dense_keys) - end - if feature_list_sparse_keys !== nothing - desc["feature_list_sparse_keys"] = map(Base.identity, feature_list_sparse_keys) - end - if feature_list_dense_keys !== nothing - desc["feature_list_dense_keys"] = map(Base.identity, feature_list_dense_keys) - end - if Ncontext_sparse !== nothing - desc["Ncontext_sparse"] = Base.Int(Ncontext_sparse) + begin + function parse_sequence_example_graph(serialized_, debug_name_, context_dense_defaults_; name=nothing, feature_list_dense_missing_assumed_empty=nothing, context_sparse_keys=nothing, context_dense_keys=nothing, feature_list_sparse_keys=nothing, feature_list_dense_keys=nothing, Ncontext_sparse=nothing, Ncontext_dense=nothing, Nfeature_list_sparse=nothing, Nfeature_list_dense=nothing, context_sparse_types=nothing, Tcontext_dense=nothing, feature_list_dense_types=nothing, context_dense_shapes=nothing, feature_list_sparse_types=nothing, feature_list_dense_shapes=nothing) + local desc + tf.with_op_name(name, "ParseSequenceExample") do + desc = tf.NodeDescription("ParseSequenceExample") + begin + begin + serialized_ = convert(Tensor{String}, serialized_) + begin + end + end + begin + debug_name_ = convert(Tensor{String}, debug_name_) + begin + end + end + begin + context_dense_defaults_ = [convert(Tensor{Any}, x) for x = context_dense_defaults_] + begin + end + end + end + begin + begin + tf.add_input(desc, serialized_) + end + begin + tf.add_input(desc, debug_name_) + end + begin + tf.add_input(desc, context_dense_defaults_) + end + end + begin + begin + if feature_list_dense_missing_assumed_empty !== nothing + desc["feature_list_dense_missing_assumed_empty"] = map(Base.identity, feature_list_dense_missing_assumed_empty) + end + end + begin + if context_sparse_keys !== nothing + desc["context_sparse_keys"] = map(Base.identity, context_sparse_keys) + end + end + begin + if context_dense_keys !== nothing + desc["context_dense_keys"] = map(Base.identity, context_dense_keys) + end + end + begin + if feature_list_sparse_keys !== nothing + desc["feature_list_sparse_keys"] = map(Base.identity, feature_list_sparse_keys) + end + end + begin + if feature_list_dense_keys !== nothing + desc["feature_list_dense_keys"] = map(Base.identity, feature_list_dense_keys) + end + end + begin + if Ncontext_sparse !== nothing + desc["Ncontext_sparse"] = Base.Int(Ncontext_sparse) + end + end + begin + if Ncontext_dense !== nothing + desc["Ncontext_dense"] = Base.Int(Ncontext_dense) + end + end + begin + if Nfeature_list_sparse !== nothing + desc["Nfeature_list_sparse"] = Base.Int(Nfeature_list_sparse) + end + end + begin + if Nfeature_list_dense !== nothing + desc["Nfeature_list_dense"] = Base.Int(Nfeature_list_dense) + end + end + begin + if context_sparse_types !== nothing + desc["context_sparse_types"] = map(Base.identity, context_sparse_types) + end + end + begin + if Tcontext_dense !== nothing + desc["Tcontext_dense"] = map(Base.identity, Tcontext_dense) + end + end + begin + if feature_list_dense_types !== nothing + desc["feature_list_dense_types"] = map(Base.identity, feature_list_dense_types) + end + end + begin + if context_dense_shapes !== nothing + desc["context_dense_shapes"] = map(Base.identity, context_dense_shapes) + end + end + begin + if feature_list_sparse_types !== nothing + desc["feature_list_sparse_types"] = map(Base.identity, feature_list_sparse_types) + end + end + begin + if feature_list_dense_shapes !== nothing + desc["feature_list_dense_shapes"] = map(Base.identity, feature_list_dense_shapes) + end + end + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:9 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + end + end + begin + function parse_sequence_example_eager(serialized_, debug_name_, context_dense_defaults_; name=nothing, feature_list_dense_missing_assumed_empty=nothing, context_sparse_keys=nothing, context_dense_keys=nothing, feature_list_sparse_keys=nothing, feature_list_dense_keys=nothing, Ncontext_sparse=nothing, Ncontext_dense=nothing, Nfeature_list_sparse=nothing, Nfeature_list_dense=nothing, context_sparse_types=nothing, Tcontext_dense=nothing, feature_list_dense_types=nothing, context_dense_shapes=nothing, feature_list_sparse_types=nothing, feature_list_dense_shapes=nothing) + desc = tf.EagerOp("ParseSequenceExample") + serialized_ = convert(tf.EagerTensor, serialized_) + debug_name_ = convert(tf.EagerTensor, debug_name_) + context_dense_defaults_ = convert(tf.EagerTensor, context_dense_defaults_) + begin + begin + tf.add_input(desc, serialized_) + end + begin + tf.add_input(desc, debug_name_) + end + begin + tf.add_input(desc, context_dense_defaults_) + end + end + begin + begin + if feature_list_dense_missing_assumed_empty !== nothing + desc["feature_list_dense_missing_assumed_empty"] = map(Base.identity, feature_list_dense_missing_assumed_empty) + end + end + begin + if context_sparse_keys !== nothing + desc["context_sparse_keys"] = map(Base.identity, context_sparse_keys) + end + end + begin + if context_dense_keys !== nothing + desc["context_dense_keys"] = map(Base.identity, context_dense_keys) + end + end + begin + if feature_list_sparse_keys !== nothing + desc["feature_list_sparse_keys"] = map(Base.identity, feature_list_sparse_keys) + end + end + begin + if feature_list_dense_keys !== nothing + desc["feature_list_dense_keys"] = map(Base.identity, feature_list_dense_keys) + end + end + begin + if Ncontext_sparse !== nothing + desc["Ncontext_sparse"] = Base.Int(Ncontext_sparse) + end + end + begin + if Ncontext_dense !== nothing + desc["Ncontext_dense"] = Base.Int(Ncontext_dense) + end + end + begin + if Nfeature_list_sparse !== nothing + desc["Nfeature_list_sparse"] = Base.Int(Nfeature_list_sparse) + end + end + begin + if Nfeature_list_dense !== nothing + desc["Nfeature_list_dense"] = Base.Int(Nfeature_list_dense) + end + end + begin + if context_sparse_types !== nothing + desc["context_sparse_types"] = map(Base.identity, context_sparse_types) + end + end + begin + if Tcontext_dense !== nothing + desc["Tcontext_dense"] = map(Base.identity, Tcontext_dense) + end + end + begin + if feature_list_dense_types !== nothing + desc["feature_list_dense_types"] = map(Base.identity, feature_list_dense_types) + end + end + begin + if context_dense_shapes !== nothing + desc["context_dense_shapes"] = map(Base.identity, context_dense_shapes) + end + end + begin + if feature_list_sparse_types !== nothing + desc["feature_list_sparse_types"] = map(Base.identity, feature_list_sparse_types) + end + end + begin + if feature_list_dense_shapes !== nothing + desc["feature_list_dense_shapes"] = map(Base.identity, feature_list_dense_shapes) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(parse_sequence_example, [serialized_, debug_name_, context_dense_defaults_], name=nothing, feature_list_dense_missing_assumed_empty=nothing, context_sparse_keys=nothing, context_dense_keys=nothing, feature_list_sparse_keys=nothing, feature_list_dense_keys=nothing, Ncontext_sparse=nothing, Ncontext_dense=nothing, Nfeature_list_sparse=nothing, Nfeature_list_dense=nothing, context_sparse_types=nothing, Tcontext_dense=nothing, feature_list_dense_types=nothing, context_dense_shapes=nothing, feature_list_sparse_types=nothing, feature_list_dense_shapes=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function parse_sequence_example(serialized_, debug_name_, context_dense_defaults_; name=nothing, feature_list_dense_missing_assumed_empty=nothing, context_sparse_keys=nothing, context_dense_keys=nothing, feature_list_sparse_keys=nothing, feature_list_dense_keys=nothing, Ncontext_sparse=nothing, Ncontext_dense=nothing, Nfeature_list_sparse=nothing, Nfeature_list_dense=nothing, context_sparse_types=nothing, Tcontext_dense=nothing, feature_list_dense_types=nothing, context_dense_shapes=nothing, feature_list_sparse_types=nothing, feature_list_dense_shapes=nothing) + if tf.in_eager_mode() + parse_sequence_example_eager(serialized_, debug_name_, context_dense_defaults_; name=name, feature_list_dense_missing_assumed_empty=feature_list_dense_missing_assumed_empty, context_sparse_keys=context_sparse_keys, context_dense_keys=context_dense_keys, feature_list_sparse_keys=feature_list_sparse_keys, feature_list_dense_keys=feature_list_dense_keys, Ncontext_sparse=Ncontext_sparse, Ncontext_dense=Ncontext_dense, Nfeature_list_sparse=Nfeature_list_sparse, Nfeature_list_dense=Nfeature_list_dense, context_sparse_types=context_sparse_types, Tcontext_dense=Tcontext_dense, feature_list_dense_types=feature_list_dense_types, context_dense_shapes=context_dense_shapes, feature_list_sparse_types=feature_list_sparse_types, feature_list_dense_shapes=feature_list_dense_shapes) + else + parse_sequence_example_graph(serialized_, debug_name_, context_dense_defaults_; name=name, feature_list_dense_missing_assumed_empty=feature_list_dense_missing_assumed_empty, context_sparse_keys=context_sparse_keys, context_dense_keys=context_dense_keys, feature_list_sparse_keys=feature_list_sparse_keys, feature_list_dense_keys=feature_list_dense_keys, Ncontext_sparse=Ncontext_sparse, Ncontext_dense=Ncontext_dense, Nfeature_list_sparse=Nfeature_list_sparse, Nfeature_list_dense=Nfeature_list_dense, context_sparse_types=context_sparse_types, Tcontext_dense=Tcontext_dense, feature_list_dense_types=feature_list_dense_types, context_dense_shapes=context_dense_shapes, feature_list_sparse_types=feature_list_sparse_types, feature_list_dense_shapes=feature_list_dense_shapes) + end end - if Ncontext_dense !== nothing - desc["Ncontext_dense"] = Base.Int(Ncontext_dense) - end - if Nfeature_list_sparse !== nothing - desc["Nfeature_list_sparse"] = Base.Int(Nfeature_list_sparse) - end - if Nfeature_list_dense !== nothing - desc["Nfeature_list_dense"] = Base.Int(Nfeature_list_dense) - end - if context_sparse_types !== nothing - desc["context_sparse_types"] = map(Base.identity, context_sparse_types) - end - if Tcontext_dense !== nothing - desc["Tcontext_dense"] = map(Base.identity, Tcontext_dense) - end - if feature_list_dense_types !== nothing - desc["feature_list_dense_types"] = map(Base.identity, feature_list_dense_types) - end - if context_dense_shapes !== nothing - desc["context_dense_shapes"] = map(Base.identity, context_dense_shapes) - end - if feature_list_sparse_types !== nothing - desc["feature_list_sparse_types"] = map(Base.identity, feature_list_sparse_types) - end - if feature_list_dense_shapes !== nothing - desc["feature_list_dense_shapes"] = map(Base.identity, feature_list_dense_shapes) - end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:9 - push!(out, tf.Tensor(op, out_idx)) - end - out - end - function parse_sequence_example_eager(serialized_, debug_name_, context_dense_defaults_; name=nothing, feature_list_dense_missing_assumed_empty=nothing, context_sparse_keys=nothing, context_dense_keys=nothing, feature_list_sparse_keys=nothing, feature_list_dense_keys=nothing, Ncontext_sparse=nothing, Ncontext_dense=nothing, Nfeature_list_sparse=nothing, Nfeature_list_dense=nothing, context_sparse_types=nothing, Tcontext_dense=nothing, feature_list_dense_types=nothing, context_dense_shapes=nothing, feature_list_sparse_types=nothing, feature_list_dense_shapes=nothing) - desc = tf.EagerOp("ParseSequenceExample") - serialized_ = convert(tf.EagerTensor, serialized_) - debug_name_ = convert(tf.EagerTensor, debug_name_) - context_dense_defaults_ = convert(tf.EagerTensor, context_dense_defaults_) - tf.add_input(desc, serialized_) - tf.add_input(desc, debug_name_) - tf.add_input(desc, context_dense_defaults_) - if feature_list_dense_missing_assumed_empty !== nothing - desc["feature_list_dense_missing_assumed_empty"] = map(Base.identity, feature_list_dense_missing_assumed_empty) - end - if context_sparse_keys !== nothing - desc["context_sparse_keys"] = map(Base.identity, context_sparse_keys) - end - if context_dense_keys !== nothing - desc["context_dense_keys"] = map(Base.identity, context_dense_keys) - end - if feature_list_sparse_keys !== nothing - desc["feature_list_sparse_keys"] = map(Base.identity, feature_list_sparse_keys) - end - if feature_list_dense_keys !== nothing - desc["feature_list_dense_keys"] = map(Base.identity, feature_list_dense_keys) - end - if Ncontext_sparse !== nothing - desc["Ncontext_sparse"] = Base.Int(Ncontext_sparse) - end - if Ncontext_dense !== nothing - desc["Ncontext_dense"] = Base.Int(Ncontext_dense) - end - if Nfeature_list_sparse !== nothing - desc["Nfeature_list_sparse"] = Base.Int(Nfeature_list_sparse) - end - if Nfeature_list_dense !== nothing - desc["Nfeature_list_dense"] = Base.Int(Nfeature_list_dense) - end - if context_sparse_types !== nothing - desc["context_sparse_types"] = map(Base.identity, context_sparse_types) - end - if Tcontext_dense !== nothing - desc["Tcontext_dense"] = map(Base.identity, Tcontext_dense) - end - if feature_list_dense_types !== nothing - desc["feature_list_dense_types"] = map(Base.identity, feature_list_dense_types) - end - if context_dense_shapes !== nothing - desc["context_dense_shapes"] = map(Base.identity, context_dense_shapes) - end - if feature_list_sparse_types !== nothing - desc["feature_list_sparse_types"] = map(Base.identity, feature_list_sparse_types) - end - if feature_list_dense_shapes !== nothing - desc["feature_list_dense_shapes"] = map(Base.identity, feature_list_dense_shapes) - end - res = tf.execute(desc) - node = tf.TapeNode(parse_sequence_example, [serialized_, debug_name_, context_dense_defaults_], name=nothing, feature_list_dense_missing_assumed_empty=nothing, context_sparse_keys=nothing, context_dense_keys=nothing, feature_list_sparse_keys=nothing, feature_list_dense_keys=nothing, Ncontext_sparse=nothing, Ncontext_dense=nothing, Nfeature_list_sparse=nothing, Nfeature_list_dense=nothing, context_sparse_types=nothing, Tcontext_dense=nothing, feature_list_dense_types=nothing, context_dense_shapes=nothing, feature_list_sparse_types=nothing, feature_list_dense_shapes=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function parse_sequence_example(serialized_, debug_name_, context_dense_defaults_; name=nothing, feature_list_dense_missing_assumed_empty=nothing, context_sparse_keys=nothing, context_dense_keys=nothing, feature_list_sparse_keys=nothing, feature_list_dense_keys=nothing, Ncontext_sparse=nothing, Ncontext_dense=nothing, Nfeature_list_sparse=nothing, Nfeature_list_dense=nothing, context_sparse_types=nothing, Tcontext_dense=nothing, feature_list_dense_types=nothing, context_dense_shapes=nothing, feature_list_sparse_types=nothing, feature_list_dense_shapes=nothing) - if tf.in_eager_mode() - parse_sequence_example_eager(serialized_, debug_name_, context_dense_defaults_; name=name, feature_list_dense_missing_assumed_empty=feature_list_dense_missing_assumed_empty, context_sparse_keys=context_sparse_keys, context_dense_keys=context_dense_keys, feature_list_sparse_keys=feature_list_sparse_keys, feature_list_dense_keys=feature_list_dense_keys, Ncontext_sparse=Ncontext_sparse, Ncontext_dense=Ncontext_dense, Nfeature_list_sparse=Nfeature_list_sparse, Nfeature_list_dense=Nfeature_list_dense, context_sparse_types=context_sparse_types, Tcontext_dense=Tcontext_dense, feature_list_dense_types=feature_list_dense_types, context_dense_shapes=context_dense_shapes, feature_list_sparse_types=feature_list_sparse_types, feature_list_dense_shapes=feature_list_dense_shapes) - else - parse_sequence_example_graph(serialized_, debug_name_, context_dense_defaults_; name=name, feature_list_dense_missing_assumed_empty=feature_list_dense_missing_assumed_empty, context_sparse_keys=context_sparse_keys, context_dense_keys=context_dense_keys, feature_list_sparse_keys=feature_list_sparse_keys, feature_list_dense_keys=feature_list_dense_keys, Ncontext_sparse=Ncontext_sparse, Ncontext_dense=Ncontext_dense, Nfeature_list_sparse=Nfeature_list_sparse, Nfeature_list_dense=Nfeature_list_dense, context_sparse_types=context_sparse_types, Tcontext_dense=Tcontext_dense, feature_list_dense_types=feature_list_dense_types, context_dense_shapes=context_dense_shapes, feature_list_sparse_types=feature_list_sparse_types, feature_list_dense_shapes=feature_list_dense_shapes) - end - end end @@ -23232,35 +42114,63 @@ end """ begin - function sinh_graph(x_; name=nothing) - local desc - tf.with_op_name(name, "Sinh") do - desc = tf.NodeDescription("Sinh") - x_ = convert(Tensor{Any}, x_) - (x_,) = tf.tf_promote(x_) - tf.add_input(desc, x_) + begin + function sinh_graph(x_; name=nothing) + local desc + tf.with_op_name(name, "Sinh") do + desc = tf.NodeDescription("Sinh") + begin + begin + x_ = convert(Tensor{Any}, x_) + begin + end + end + begin + (x_,) = tf.tf_promote(x_) + end + end + begin + begin + tf.add_input(desc, x_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function sinh_eager(x_; name=nothing) - desc = tf.EagerOp("Sinh") - x_ = convert(tf.EagerTensor, x_) - tf.add_input(desc, x_) - desc["T"] = tf.data_type(x_) - res = tf.execute(desc) - node = tf.TapeNode(sinh, [x_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function sinh_eager(x_; name=nothing) + desc = tf.EagerOp("Sinh") + x_ = convert(tf.EagerTensor, x_) + begin + begin + tf.add_input(desc, x_) + end + end + begin + end + begin + desc["T"] = tf.data_type(x_) + end + res = tf.execute(desc) + node = tf.TapeNode(sinh, [x_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sinh(x_; name=nothing) - if tf.in_eager_mode() - sinh_eager(x_; name=name) - else - sinh_graph(x_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sinh(x_; name=nothing) + if tf.in_eager_mode() + sinh_eager(x_; name=name) + else + sinh_graph(x_; name=name) + end end - end + end end @@ -23270,53 +42180,85 @@ end """ begin - function iterator_v2_graph(; name=nothing, shared_name=nothing, container=nothing, output_types=nothing, output_shapes=nothing) - local desc - tf.with_op_name(name, "IteratorV2") do - desc = tf.NodeDescription("IteratorV2") - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - if container !== nothing - desc["container"] = Base.String(container) - end - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) + begin + function iterator_v2_graph(; name=nothing, shared_name=nothing, container=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "IteratorV2") do + desc = tf.NodeDescription("IteratorV2") + begin + end + begin + end + begin + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + begin + if container !== nothing + desc["container"] = Base.String(container) + end + end + begin + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + end + begin + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function iterator_v2_eager(; name=nothing, shared_name=nothing, container=nothing, output_types=nothing, output_shapes=nothing) + desc = tf.EagerOp("IteratorV2") + begin + end + begin + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + begin + if container !== nothing + desc["container"] = Base.String(container) + end + end + begin + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + end + begin + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(iterator_v2, [], name=nothing, shared_name=nothing, container=nothing, output_types=nothing, output_shapes=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function iterator_v2(; name=nothing, shared_name=nothing, container=nothing, output_types=nothing, output_shapes=nothing) + if tf.in_eager_mode() + iterator_v2_eager(; name=name, shared_name=shared_name, container=container, output_types=output_types, output_shapes=output_shapes) + else + iterator_v2_graph(; name=name, shared_name=shared_name, container=container, output_types=output_types, output_shapes=output_shapes) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function iterator_v2_eager(; name=nothing, shared_name=nothing, container=nothing, output_types=nothing, output_shapes=nothing) - desc = tf.EagerOp("IteratorV2") - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - if container !== nothing - desc["container"] = Base.String(container) - end - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - res = tf.execute(desc) - node = tf.TapeNode(iterator_v2, [], name=nothing, shared_name=nothing, container=nothing, output_types=nothing, output_shapes=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function iterator_v2(; name=nothing, shared_name=nothing, container=nothing, output_types=nothing, output_shapes=nothing) - if tf.in_eager_mode() - iterator_v2_eager(; name=name, shared_name=shared_name, container=container, output_types=output_types, output_shapes=output_shapes) - else - iterator_v2_graph(; name=name, shared_name=shared_name, container=container, output_types=output_types, output_shapes=output_shapes) - end - end end @@ -23326,47 +42268,99 @@ end """ begin - function tensor_array_write_v2_graph(handle_, index_, value_, flow_in_; name=nothing) - local desc - tf.with_op_name(name, "TensorArrayWriteV2") do - desc = tf.NodeDescription("TensorArrayWriteV2") - handle_ = convert(Tensor{String}, handle_) - index_ = convert(Tensor{Int32}, index_) - value_ = convert(Tensor{Any}, value_) - flow_in_ = convert(Tensor{Float32}, flow_in_) - (value_,) = tf.tf_promote(value_) - tf.add_input(desc, handle_) - tf.add_input(desc, index_) - tf.add_input(desc, value_) - tf.add_input(desc, flow_in_) - end - tf.Tensor(tf.Operation(desc)) - end - function tensor_array_write_v2_eager(handle_, index_, value_, flow_in_; name=nothing) - desc = tf.EagerOp("TensorArrayWriteV2") - handle_ = convert(tf.EagerTensor, handle_) - index_ = convert(tf.EagerTensor, index_) - value_ = convert(tf.EagerTensor, value_) - flow_in_ = convert(tf.EagerTensor, flow_in_) - tf.add_input(desc, handle_) - tf.add_input(desc, index_) - tf.add_input(desc, value_) - tf.add_input(desc, flow_in_) - desc["T"] = tf.data_type(value_) - res = tf.execute(desc) - node = tf.TapeNode(tensor_array_write_v2, [handle_, index_, value_, flow_in_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_array_write_v2(handle_, index_, value_, flow_in_; name=nothing) - if tf.in_eager_mode() - tensor_array_write_v2_eager(handle_, index_, value_, flow_in_; name=name) - else - tensor_array_write_v2_graph(handle_, index_, value_, flow_in_; name=name) + begin + function tensor_array_write_v2_graph(handle_, index_, value_, flow_in_; name=nothing) + local desc + tf.with_op_name(name, "TensorArrayWriteV2") do + desc = tf.NodeDescription("TensorArrayWriteV2") + begin + begin + handle_ = convert(Tensor{String}, handle_) + begin + end + end + begin + index_ = convert(Tensor{Int32}, index_) + begin + end + end + begin + value_ = convert(Tensor{Any}, value_) + begin + end + end + begin + flow_in_ = convert(Tensor{Float32}, flow_in_) + begin + end + end + begin + (value_,) = tf.tf_promote(value_) + end + end + begin + begin + tf.add_input(desc, handle_) + end + begin + tf.add_input(desc, index_) + end + begin + tf.add_input(desc, value_) + end + begin + tf.add_input(desc, flow_in_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function tensor_array_write_v2_eager(handle_, index_, value_, flow_in_; name=nothing) + desc = tf.EagerOp("TensorArrayWriteV2") + handle_ = convert(tf.EagerTensor, handle_) + index_ = convert(tf.EagerTensor, index_) + value_ = convert(tf.EagerTensor, value_) + flow_in_ = convert(tf.EagerTensor, flow_in_) + begin + begin + tf.add_input(desc, handle_) + end + begin + tf.add_input(desc, index_) + end + begin + tf.add_input(desc, value_) + end + begin + tf.add_input(desc, flow_in_) + end + end + begin + end + begin + desc["T"] = tf.data_type(value_) + end + res = tf.execute(desc) + node = tf.TapeNode(tensor_array_write_v2, [handle_, index_, value_, flow_in_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_array_write_v2(handle_, index_, value_, flow_in_; name=nothing) + if tf.in_eager_mode() + tensor_array_write_v2_eager(handle_, index_, value_, flow_in_; name=name) + else + tensor_array_write_v2_graph(handle_, index_, value_, flow_in_; name=name) + end end - end + end end @@ -23376,39 +42370,67 @@ end """ begin - function tensor_list_element_shape_graph(input_handle_; name=nothing, shape_type=nothing) - local desc - tf.with_op_name(name, "TensorListElementShape") do - desc = tf.NodeDescription("TensorListElementShape") - input_handle_ = convert(Tensor{Any}, input_handle_) - tf.add_input(desc, input_handle_) - if shape_type !== nothing - desc["shape_type"] = Base.identity(shape_type) + begin + function tensor_list_element_shape_graph(input_handle_; name=nothing, shape_type=nothing) + local desc + tf.with_op_name(name, "TensorListElementShape") do + desc = tf.NodeDescription("TensorListElementShape") + begin + begin + input_handle_ = convert(Tensor{Any}, input_handle_) + begin + end + end + end + begin + begin + tf.add_input(desc, input_handle_) + end + end + begin + begin + if shape_type !== nothing + desc["shape_type"] = Base.identity(shape_type) + end + end + end end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function tensor_list_element_shape_eager(input_handle_; name=nothing, shape_type=nothing) - desc = tf.EagerOp("TensorListElementShape") - input_handle_ = convert(tf.EagerTensor, input_handle_) - tf.add_input(desc, input_handle_) - if shape_type !== nothing - desc["shape_type"] = Base.identity(shape_type) - end - res = tf.execute(desc) - node = tf.TapeNode(tensor_list_element_shape, [input_handle_], name=nothing, shape_type=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function tensor_list_element_shape_eager(input_handle_; name=nothing, shape_type=nothing) + desc = tf.EagerOp("TensorListElementShape") + input_handle_ = convert(tf.EagerTensor, input_handle_) + begin + begin + tf.add_input(desc, input_handle_) + end + end + begin + begin + if shape_type !== nothing + desc["shape_type"] = Base.identity(shape_type) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(tensor_list_element_shape, [input_handle_], name=nothing, shape_type=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_list_element_shape(input_handle_; name=nothing, shape_type=nothing) - if tf.in_eager_mode() - tensor_list_element_shape_eager(input_handle_; name=name, shape_type=shape_type) - else - tensor_list_element_shape_graph(input_handle_; name=name, shape_type=shape_type) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_list_element_shape(input_handle_; name=nothing, shape_type=nothing) + if tf.in_eager_mode() + tensor_list_element_shape_eager(input_handle_; name=name, shape_type=shape_type) + else + tensor_list_element_shape_graph(input_handle_; name=name, shape_type=shape_type) + end end - end + end end @@ -23418,33 +42440,57 @@ end """ begin - function queue_size_v2_graph(handle_; name=nothing) - local desc - tf.with_op_name(name, "QueueSizeV2") do - desc = tf.NodeDescription("QueueSizeV2") - handle_ = convert(Tensor{Any}, handle_) - tf.add_input(desc, handle_) + begin + function queue_size_v2_graph(handle_; name=nothing) + local desc + tf.with_op_name(name, "QueueSizeV2") do + desc = tf.NodeDescription("QueueSizeV2") + begin + begin + handle_ = convert(Tensor{Any}, handle_) + begin + end + end + end + begin + begin + tf.add_input(desc, handle_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function queue_size_v2_eager(handle_; name=nothing) - desc = tf.EagerOp("QueueSizeV2") - handle_ = convert(tf.EagerTensor, handle_) - tf.add_input(desc, handle_) - res = tf.execute(desc) - node = tf.TapeNode(queue_size_v2, [handle_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function queue_size_v2_eager(handle_; name=nothing) + desc = tf.EagerOp("QueueSizeV2") + handle_ = convert(tf.EagerTensor, handle_) + begin + begin + tf.add_input(desc, handle_) + end + end + begin + end + res = tf.execute(desc) + node = tf.TapeNode(queue_size_v2, [handle_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function queue_size_v2(handle_; name=nothing) - if tf.in_eager_mode() - queue_size_v2_eager(handle_; name=name) - else - queue_size_v2_graph(handle_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function queue_size_v2(handle_; name=nothing) + if tf.in_eager_mode() + queue_size_v2_eager(handle_; name=name) + else + queue_size_v2_graph(handle_; name=name) + end end - end + end end @@ -23454,35 +42500,63 @@ end """ begin - function expm1_graph(x_; name=nothing) - local desc - tf.with_op_name(name, "Expm1") do - desc = tf.NodeDescription("Expm1") - x_ = convert(Tensor{Any}, x_) - (x_,) = tf.tf_promote(x_) - tf.add_input(desc, x_) + begin + function expm1_graph(x_; name=nothing) + local desc + tf.with_op_name(name, "Expm1") do + desc = tf.NodeDescription("Expm1") + begin + begin + x_ = convert(Tensor{Any}, x_) + begin + end + end + begin + (x_,) = tf.tf_promote(x_) + end + end + begin + begin + tf.add_input(desc, x_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function expm1_eager(x_; name=nothing) - desc = tf.EagerOp("Expm1") - x_ = convert(tf.EagerTensor, x_) - tf.add_input(desc, x_) - desc["T"] = tf.data_type(x_) - res = tf.execute(desc) - node = tf.TapeNode(expm1, [x_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function expm1_eager(x_; name=nothing) + desc = tf.EagerOp("Expm1") + x_ = convert(tf.EagerTensor, x_) + begin + begin + tf.add_input(desc, x_) + end + end + begin + end + begin + desc["T"] = tf.data_type(x_) + end + res = tf.execute(desc) + node = tf.TapeNode(expm1, [x_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function expm1(x_; name=nothing) - if tf.in_eager_mode() - expm1_eager(x_; name=name) - else - expm1_graph(x_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function expm1(x_; name=nothing) + if tf.in_eager_mode() + expm1_eager(x_; name=name) + else + expm1_graph(x_; name=name) + end end - end + end end @@ -23492,43 +42566,87 @@ end """ begin - function batch_matrix_band_part_graph(input_, num_lower_, num_upper_; name=nothing) - local desc - tf.with_op_name(name, "BatchMatrixBandPart") do - desc = tf.NodeDescription("BatchMatrixBandPart") - input_ = convert(Tensor{Any}, input_) - num_lower_ = convert(Tensor{Int64}, num_lower_) - num_upper_ = convert(Tensor{Int64}, num_upper_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - tf.add_input(desc, num_lower_) - tf.add_input(desc, num_upper_) - end - tf.Tensor(tf.Operation(desc)) - end - function batch_matrix_band_part_eager(input_, num_lower_, num_upper_; name=nothing) - desc = tf.EagerOp("BatchMatrixBandPart") - input_ = convert(tf.EagerTensor, input_) - num_lower_ = convert(tf.EagerTensor, num_lower_) - num_upper_ = convert(tf.EagerTensor, num_upper_) - tf.add_input(desc, input_) - tf.add_input(desc, num_lower_) - tf.add_input(desc, num_upper_) - desc["T"] = tf.data_type(input_) - res = tf.execute(desc) - node = tf.TapeNode(batch_matrix_band_part, [input_, num_lower_, num_upper_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function batch_matrix_band_part(input_, num_lower_, num_upper_; name=nothing) - if tf.in_eager_mode() - batch_matrix_band_part_eager(input_, num_lower_, num_upper_; name=name) - else - batch_matrix_band_part_graph(input_, num_lower_, num_upper_; name=name) + begin + function batch_matrix_band_part_graph(input_, num_lower_, num_upper_; name=nothing) + local desc + tf.with_op_name(name, "BatchMatrixBandPart") do + desc = tf.NodeDescription("BatchMatrixBandPart") + begin + begin + input_ = convert(Tensor{Any}, input_) + begin + end + end + begin + num_lower_ = convert(Tensor{Int64}, num_lower_) + begin + end + end + begin + num_upper_ = convert(Tensor{Int64}, num_upper_) + begin + end + end + begin + (input_,) = tf.tf_promote(input_) + end + end + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, num_lower_) + end + begin + tf.add_input(desc, num_upper_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function batch_matrix_band_part_eager(input_, num_lower_, num_upper_; name=nothing) + desc = tf.EagerOp("BatchMatrixBandPart") + input_ = convert(tf.EagerTensor, input_) + num_lower_ = convert(tf.EagerTensor, num_lower_) + num_upper_ = convert(tf.EagerTensor, num_upper_) + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, num_lower_) + end + begin + tf.add_input(desc, num_upper_) + end + end + begin + end + begin + desc["T"] = tf.data_type(input_) + end + res = tf.execute(desc) + node = tf.TapeNode(batch_matrix_band_part, [input_, num_lower_, num_upper_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function batch_matrix_band_part(input_, num_lower_, num_upper_; name=nothing) + if tf.in_eager_mode() + batch_matrix_band_part_eager(input_, num_lower_, num_upper_; name=name) + else + batch_matrix_band_part_graph(input_, num_lower_, num_upper_; name=name) + end end - end + end end @@ -23538,49 +42656,89 @@ end """ begin - function concatenate_dataset_graph(input_dataset_, another_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) - local desc - tf.with_op_name(name, "ConcatenateDataset") do - desc = tf.NodeDescription("ConcatenateDataset") - input_dataset_ = convert(Tensor{Any}, input_dataset_) - another_dataset_ = convert(Tensor{Any}, another_dataset_) - tf.add_input(desc, input_dataset_) - tf.add_input(desc, another_dataset_) - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) + begin + function concatenate_dataset_graph(input_dataset_, another_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "ConcatenateDataset") do + desc = tf.NodeDescription("ConcatenateDataset") + begin + begin + input_dataset_ = convert(Tensor{Any}, input_dataset_) + begin + end + end + begin + another_dataset_ = convert(Tensor{Any}, another_dataset_) + begin + end + end + end + begin + begin + tf.add_input(desc, input_dataset_) + end + begin + tf.add_input(desc, another_dataset_) + end + end + begin + begin + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + end + begin + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function concatenate_dataset_eager(input_dataset_, another_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) + desc = tf.EagerOp("ConcatenateDataset") + input_dataset_ = convert(tf.EagerTensor, input_dataset_) + another_dataset_ = convert(tf.EagerTensor, another_dataset_) + begin + begin + tf.add_input(desc, input_dataset_) + end + begin + tf.add_input(desc, another_dataset_) + end + end + begin + begin + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + end + begin + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(concatenate_dataset, [input_dataset_, another_dataset_], name=nothing, output_types=nothing, output_shapes=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function concatenate_dataset(input_dataset_, another_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) + if tf.in_eager_mode() + concatenate_dataset_eager(input_dataset_, another_dataset_; name=name, output_types=output_types, output_shapes=output_shapes) + else + concatenate_dataset_graph(input_dataset_, another_dataset_; name=name, output_types=output_types, output_shapes=output_shapes) + end end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - end - tf.Tensor(tf.Operation(desc)) - end - function concatenate_dataset_eager(input_dataset_, another_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) - desc = tf.EagerOp("ConcatenateDataset") - input_dataset_ = convert(tf.EagerTensor, input_dataset_) - another_dataset_ = convert(tf.EagerTensor, another_dataset_) - tf.add_input(desc, input_dataset_) - tf.add_input(desc, another_dataset_) - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - res = tf.execute(desc) - node = tf.TapeNode(concatenate_dataset, [input_dataset_, another_dataset_], name=nothing, output_types=nothing, output_shapes=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function concatenate_dataset(input_dataset_, another_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) - if tf.in_eager_mode() - concatenate_dataset_eager(input_dataset_, another_dataset_; name=name, output_types=output_types, output_shapes=output_shapes) - else - concatenate_dataset_graph(input_dataset_, another_dataset_; name=name, output_types=output_types, output_shapes=output_shapes) - end - end end @@ -23590,33 +42748,57 @@ end """ begin - function decode_gif_graph(contents_; name=nothing) - local desc - tf.with_op_name(name, "DecodeGif") do - desc = tf.NodeDescription("DecodeGif") - contents_ = convert(Tensor{String}, contents_) - tf.add_input(desc, contents_) + begin + function decode_gif_graph(contents_; name=nothing) + local desc + tf.with_op_name(name, "DecodeGif") do + desc = tf.NodeDescription("DecodeGif") + begin + begin + contents_ = convert(Tensor{String}, contents_) + begin + end + end + end + begin + begin + tf.add_input(desc, contents_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function decode_gif_eager(contents_; name=nothing) - desc = tf.EagerOp("DecodeGif") - contents_ = convert(tf.EagerTensor, contents_) - tf.add_input(desc, contents_) - res = tf.execute(desc) - node = tf.TapeNode(decode_gif, [contents_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function decode_gif_eager(contents_; name=nothing) + desc = tf.EagerOp("DecodeGif") + contents_ = convert(tf.EagerTensor, contents_) + begin + begin + tf.add_input(desc, contents_) + end + end + begin + end + res = tf.execute(desc) + node = tf.TapeNode(decode_gif, [contents_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function decode_gif(contents_; name=nothing) - if tf.in_eager_mode() - decode_gif_eager(contents_; name=name) - else - decode_gif_graph(contents_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function decode_gif(contents_; name=nothing) + if tf.in_eager_mode() + decode_gif_eager(contents_; name=name) + else + decode_gif_graph(contents_; name=name) + end end - end + end end @@ -23626,117 +42808,213 @@ end Runs replicated computations on a distributed TPU system. """ begin - function tpu_replicate_graph(inputs_, broadcast_inputs_, variables_, guaranteed_constants_; name=nothing, computation=nothing, num_replicas=nothing, num_cores_per_replica=nothing, topology=nothing, use_tpu=nothing, device_assignment=nothing, host_compute_core=nothing, Tinputs=nothing, Tbroadcast_inputs=nothing, NumVariables=nothing, Tguaranteed_constants=nothing, output_types=nothing) - local desc - tf.with_op_name(name, "TPUReplicate") do - desc = tf.NodeDescription("TPUReplicate") - inputs_ = [convert(Tensor{Any}, x) for x = inputs_] - broadcast_inputs_ = [convert(Tensor{Any}, x) for x = broadcast_inputs_] - variables_ = [convert(Tensor{Any}, x) for x = variables_] - guaranteed_constants_ = [convert(Tensor{Any}, x) for x = guaranteed_constants_] - tf.add_input(desc, inputs_) - tf.add_input(desc, broadcast_inputs_) - tf.add_input(desc, variables_) - tf.add_input(desc, guaranteed_constants_) - if computation !== nothing - desc["computation"] = Base.identity(computation) - end - if num_replicas !== nothing - desc["num_replicas"] = Base.Int(num_replicas) - end - if num_cores_per_replica !== nothing - desc["num_cores_per_replica"] = Base.Int(num_cores_per_replica) + begin + function tpu_replicate_graph(inputs_, broadcast_inputs_, variables_, guaranteed_constants_; name=nothing, computation=nothing, num_replicas=nothing, num_cores_per_replica=nothing, topology=nothing, use_tpu=nothing, device_assignment=nothing, host_compute_core=nothing, Tinputs=nothing, Tbroadcast_inputs=nothing, NumVariables=nothing, Tguaranteed_constants=nothing, output_types=nothing) + local desc + tf.with_op_name(name, "TPUReplicate") do + desc = tf.NodeDescription("TPUReplicate") + begin + begin + inputs_ = [convert(Tensor{Any}, x) for x = inputs_] + begin + end + end + begin + broadcast_inputs_ = [convert(Tensor{Any}, x) for x = broadcast_inputs_] + begin + end + end + begin + variables_ = [convert(Tensor{Any}, x) for x = variables_] + begin + end + end + begin + guaranteed_constants_ = [convert(Tensor{Any}, x) for x = guaranteed_constants_] + begin + end + end + end + begin + begin + tf.add_input(desc, inputs_) + end + begin + tf.add_input(desc, broadcast_inputs_) + end + begin + tf.add_input(desc, variables_) + end + begin + tf.add_input(desc, guaranteed_constants_) + end + end + begin + begin + if computation !== nothing + desc["computation"] = Base.identity(computation) + end + end + begin + if num_replicas !== nothing + desc["num_replicas"] = Base.Int(num_replicas) + end + end + begin + if num_cores_per_replica !== nothing + desc["num_cores_per_replica"] = Base.Int(num_cores_per_replica) + end + end + begin + if topology !== nothing + desc["topology"] = Base.String(topology) + end + end + begin + if use_tpu !== nothing + desc["use_tpu"] = Base.Bool(use_tpu) + end + end + begin + if device_assignment !== nothing + desc["device_assignment"] = map(Base.identity, device_assignment) + end + end + begin + if host_compute_core !== nothing + desc["host_compute_core"] = map(Base.identity, host_compute_core) + end + end + begin + if Tinputs !== nothing + desc["Tinputs"] = map(Base.identity, Tinputs) + end + end + begin + if Tbroadcast_inputs !== nothing + desc["Tbroadcast_inputs"] = map(Base.identity, Tbroadcast_inputs) + end + end + begin + if NumVariables !== nothing + desc["NumVariables"] = Base.Int(NumVariables) + end + end + begin + if Tguaranteed_constants !== nothing + desc["Tguaranteed_constants"] = map(Base.identity, Tguaranteed_constants) + end + end + begin + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function tpu_replicate_eager(inputs_, broadcast_inputs_, variables_, guaranteed_constants_; name=nothing, computation=nothing, num_replicas=nothing, num_cores_per_replica=nothing, topology=nothing, use_tpu=nothing, device_assignment=nothing, host_compute_core=nothing, Tinputs=nothing, Tbroadcast_inputs=nothing, NumVariables=nothing, Tguaranteed_constants=nothing, output_types=nothing) + desc = tf.EagerOp("TPUReplicate") + inputs_ = convert(tf.EagerTensor, inputs_) + broadcast_inputs_ = convert(tf.EagerTensor, broadcast_inputs_) + variables_ = convert(tf.EagerTensor, variables_) + guaranteed_constants_ = convert(tf.EagerTensor, guaranteed_constants_) + begin + begin + tf.add_input(desc, inputs_) + end + begin + tf.add_input(desc, broadcast_inputs_) + end + begin + tf.add_input(desc, variables_) + end + begin + tf.add_input(desc, guaranteed_constants_) + end + end + begin + begin + if computation !== nothing + desc["computation"] = Base.identity(computation) + end + end + begin + if num_replicas !== nothing + desc["num_replicas"] = Base.Int(num_replicas) + end + end + begin + if num_cores_per_replica !== nothing + desc["num_cores_per_replica"] = Base.Int(num_cores_per_replica) + end + end + begin + if topology !== nothing + desc["topology"] = Base.String(topology) + end + end + begin + if use_tpu !== nothing + desc["use_tpu"] = Base.Bool(use_tpu) + end + end + begin + if device_assignment !== nothing + desc["device_assignment"] = map(Base.identity, device_assignment) + end + end + begin + if host_compute_core !== nothing + desc["host_compute_core"] = map(Base.identity, host_compute_core) + end + end + begin + if Tinputs !== nothing + desc["Tinputs"] = map(Base.identity, Tinputs) + end + end + begin + if Tbroadcast_inputs !== nothing + desc["Tbroadcast_inputs"] = map(Base.identity, Tbroadcast_inputs) + end + end + begin + if NumVariables !== nothing + desc["NumVariables"] = Base.Int(NumVariables) + end + end + begin + if Tguaranteed_constants !== nothing + desc["Tguaranteed_constants"] = map(Base.identity, Tguaranteed_constants) + end + end + begin + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(tpu_replicate, [inputs_, broadcast_inputs_, variables_, guaranteed_constants_], name=nothing, computation=nothing, num_replicas=nothing, num_cores_per_replica=nothing, topology=nothing, use_tpu=nothing, device_assignment=nothing, host_compute_core=nothing, Tinputs=nothing, Tbroadcast_inputs=nothing, NumVariables=nothing, Tguaranteed_constants=nothing, output_types=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tpu_replicate(inputs_, broadcast_inputs_, variables_, guaranteed_constants_; name=nothing, computation=nothing, num_replicas=nothing, num_cores_per_replica=nothing, topology=nothing, use_tpu=nothing, device_assignment=nothing, host_compute_core=nothing, Tinputs=nothing, Tbroadcast_inputs=nothing, NumVariables=nothing, Tguaranteed_constants=nothing, output_types=nothing) + if tf.in_eager_mode() + tpu_replicate_eager(inputs_, broadcast_inputs_, variables_, guaranteed_constants_; name=name, computation=computation, num_replicas=num_replicas, num_cores_per_replica=num_cores_per_replica, topology=topology, use_tpu=use_tpu, device_assignment=device_assignment, host_compute_core=host_compute_core, Tinputs=Tinputs, Tbroadcast_inputs=Tbroadcast_inputs, NumVariables=NumVariables, Tguaranteed_constants=Tguaranteed_constants, output_types=output_types) + else + tpu_replicate_graph(inputs_, broadcast_inputs_, variables_, guaranteed_constants_; name=name, computation=computation, num_replicas=num_replicas, num_cores_per_replica=num_cores_per_replica, topology=topology, use_tpu=use_tpu, device_assignment=device_assignment, host_compute_core=host_compute_core, Tinputs=Tinputs, Tbroadcast_inputs=Tbroadcast_inputs, NumVariables=NumVariables, Tguaranteed_constants=Tguaranteed_constants, output_types=output_types) + end end - if topology !== nothing - desc["topology"] = Base.String(topology) - end - if use_tpu !== nothing - desc["use_tpu"] = Base.Bool(use_tpu) - end - if device_assignment !== nothing - desc["device_assignment"] = map(Base.identity, device_assignment) - end - if host_compute_core !== nothing - desc["host_compute_core"] = map(Base.identity, host_compute_core) - end - if Tinputs !== nothing - desc["Tinputs"] = map(Base.identity, Tinputs) - end - if Tbroadcast_inputs !== nothing - desc["Tbroadcast_inputs"] = map(Base.identity, Tbroadcast_inputs) - end - if NumVariables !== nothing - desc["NumVariables"] = Base.Int(NumVariables) - end - if Tguaranteed_constants !== nothing - desc["Tguaranteed_constants"] = map(Base.identity, Tguaranteed_constants) - end - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - end - tf.Tensor(tf.Operation(desc)) - end - function tpu_replicate_eager(inputs_, broadcast_inputs_, variables_, guaranteed_constants_; name=nothing, computation=nothing, num_replicas=nothing, num_cores_per_replica=nothing, topology=nothing, use_tpu=nothing, device_assignment=nothing, host_compute_core=nothing, Tinputs=nothing, Tbroadcast_inputs=nothing, NumVariables=nothing, Tguaranteed_constants=nothing, output_types=nothing) - desc = tf.EagerOp("TPUReplicate") - inputs_ = convert(tf.EagerTensor, inputs_) - broadcast_inputs_ = convert(tf.EagerTensor, broadcast_inputs_) - variables_ = convert(tf.EagerTensor, variables_) - guaranteed_constants_ = convert(tf.EagerTensor, guaranteed_constants_) - tf.add_input(desc, inputs_) - tf.add_input(desc, broadcast_inputs_) - tf.add_input(desc, variables_) - tf.add_input(desc, guaranteed_constants_) - if computation !== nothing - desc["computation"] = Base.identity(computation) - end - if num_replicas !== nothing - desc["num_replicas"] = Base.Int(num_replicas) - end - if num_cores_per_replica !== nothing - desc["num_cores_per_replica"] = Base.Int(num_cores_per_replica) - end - if topology !== nothing - desc["topology"] = Base.String(topology) - end - if use_tpu !== nothing - desc["use_tpu"] = Base.Bool(use_tpu) - end - if device_assignment !== nothing - desc["device_assignment"] = map(Base.identity, device_assignment) - end - if host_compute_core !== nothing - desc["host_compute_core"] = map(Base.identity, host_compute_core) - end - if Tinputs !== nothing - desc["Tinputs"] = map(Base.identity, Tinputs) - end - if Tbroadcast_inputs !== nothing - desc["Tbroadcast_inputs"] = map(Base.identity, Tbroadcast_inputs) - end - if NumVariables !== nothing - desc["NumVariables"] = Base.Int(NumVariables) - end - if Tguaranteed_constants !== nothing - desc["Tguaranteed_constants"] = map(Base.identity, Tguaranteed_constants) - end - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - res = tf.execute(desc) - node = tf.TapeNode(tpu_replicate, [inputs_, broadcast_inputs_, variables_, guaranteed_constants_], name=nothing, computation=nothing, num_replicas=nothing, num_cores_per_replica=nothing, topology=nothing, use_tpu=nothing, device_assignment=nothing, host_compute_core=nothing, Tinputs=nothing, Tbroadcast_inputs=nothing, NumVariables=nothing, Tguaranteed_constants=nothing, output_types=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tpu_replicate(inputs_, broadcast_inputs_, variables_, guaranteed_constants_; name=nothing, computation=nothing, num_replicas=nothing, num_cores_per_replica=nothing, topology=nothing, use_tpu=nothing, device_assignment=nothing, host_compute_core=nothing, Tinputs=nothing, Tbroadcast_inputs=nothing, NumVariables=nothing, Tguaranteed_constants=nothing, output_types=nothing) - if tf.in_eager_mode() - tpu_replicate_eager(inputs_, broadcast_inputs_, variables_, guaranteed_constants_; name=name, computation=computation, num_replicas=num_replicas, num_cores_per_replica=num_cores_per_replica, topology=topology, use_tpu=use_tpu, device_assignment=device_assignment, host_compute_core=host_compute_core, Tinputs=Tinputs, Tbroadcast_inputs=Tbroadcast_inputs, NumVariables=NumVariables, Tguaranteed_constants=Tguaranteed_constants, output_types=output_types) - else - tpu_replicate_graph(inputs_, broadcast_inputs_, variables_, guaranteed_constants_; name=name, computation=computation, num_replicas=num_replicas, num_cores_per_replica=num_cores_per_replica, topology=topology, use_tpu=use_tpu, device_assignment=device_assignment, host_compute_core=host_compute_core, Tinputs=Tinputs, Tbroadcast_inputs=Tbroadcast_inputs, NumVariables=NumVariables, Tguaranteed_constants=Tguaranteed_constants, output_types=output_types) - end - end end @@ -23746,46 +43024,80 @@ end """ begin - function batch_self_adjoint_eig_v2_graph(input_; name=nothing, compute_v=nothing) - local desc - tf.with_op_name(name, "BatchSelfAdjointEigV2") do - desc = tf.NodeDescription("BatchSelfAdjointEigV2") - input_ = convert(Tensor{Any}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - if compute_v !== nothing - desc["compute_v"] = Base.Bool(compute_v) + begin + function batch_self_adjoint_eig_v2_graph(input_; name=nothing, compute_v=nothing) + local desc + tf.with_op_name(name, "BatchSelfAdjointEigV2") do + desc = tf.NodeDescription("BatchSelfAdjointEigV2") + begin + begin + input_ = convert(Tensor{Any}, input_) + begin + end + end + begin + (input_,) = tf.tf_promote(input_) + end + end + begin + begin + tf.add_input(desc, input_) + end + end + begin + begin + if compute_v !== nothing + desc["compute_v"] = Base.Bool(compute_v) + end + end + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out end end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:2 - push!(out, tf.Tensor(op, out_idx)) - end - out end - function batch_self_adjoint_eig_v2_eager(input_; name=nothing, compute_v=nothing) - desc = tf.EagerOp("BatchSelfAdjointEigV2") - input_ = convert(tf.EagerTensor, input_) - tf.add_input(desc, input_) - if compute_v !== nothing - desc["compute_v"] = Base.Bool(compute_v) - end - desc["T"] = tf.data_type(input_) - res = tf.execute(desc) - node = tf.TapeNode(batch_self_adjoint_eig_v2, [input_], name=nothing, compute_v=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res + begin + function batch_self_adjoint_eig_v2_eager(input_; name=nothing, compute_v=nothing) + desc = tf.EagerOp("BatchSelfAdjointEigV2") + input_ = convert(tf.EagerTensor, input_) + begin + begin + tf.add_input(desc, input_) + end + end + begin + begin + if compute_v !== nothing + desc["compute_v"] = Base.Bool(compute_v) + end + end + end + begin + desc["T"] = tf.data_type(input_) + end + res = tf.execute(desc) + node = tf.TapeNode(batch_self_adjoint_eig_v2, [input_], name=nothing, compute_v=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function batch_self_adjoint_eig_v2(input_; name=nothing, compute_v=nothing) - if tf.in_eager_mode() - batch_self_adjoint_eig_v2_eager(input_; name=name, compute_v=compute_v) - else - batch_self_adjoint_eig_v2_graph(input_; name=name, compute_v=compute_v) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function batch_self_adjoint_eig_v2(input_; name=nothing, compute_v=nothing) + if tf.in_eager_mode() + batch_self_adjoint_eig_v2_eager(input_; name=name, compute_v=compute_v) + else + batch_self_adjoint_eig_v2_graph(input_; name=name, compute_v=compute_v) + end end - end + end end @@ -23795,41 +43107,73 @@ end """ begin - function shape_graph(input_; name=nothing, out_type=nothing) - local desc - tf.with_op_name(name, "Shape") do - desc = tf.NodeDescription("Shape") - input_ = convert(Tensor{Any}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - if out_type !== nothing - desc["out_type"] = Base.identity(out_type) + begin + function shape_graph(input_; name=nothing, out_type=nothing) + local desc + tf.with_op_name(name, "Shape") do + desc = tf.NodeDescription("Shape") + begin + begin + input_ = convert(Tensor{Any}, input_) + begin + end + end + begin + (input_,) = tf.tf_promote(input_) + end + end + begin + begin + tf.add_input(desc, input_) + end + end + begin + begin + if out_type !== nothing + desc["out_type"] = Base.identity(out_type) + end + end + end end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function shape_eager(input_; name=nothing, out_type=nothing) - desc = tf.EagerOp("Shape") - input_ = convert(tf.EagerTensor, input_) - tf.add_input(desc, input_) - if out_type !== nothing - desc["out_type"] = Base.identity(out_type) - end - desc["T"] = tf.data_type(input_) - res = tf.execute(desc) - node = tf.TapeNode(shape, [input_], name=nothing, out_type=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function shape_eager(input_; name=nothing, out_type=nothing) + desc = tf.EagerOp("Shape") + input_ = convert(tf.EagerTensor, input_) + begin + begin + tf.add_input(desc, input_) + end + end + begin + begin + if out_type !== nothing + desc["out_type"] = Base.identity(out_type) + end + end + end + begin + desc["T"] = tf.data_type(input_) + end + res = tf.execute(desc) + node = tf.TapeNode(shape, [input_], name=nothing, out_type=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function shape(input_; name=nothing, out_type=nothing) - if tf.in_eager_mode() - shape_eager(input_; name=name, out_type=out_type) - else - shape_graph(input_; name=name, out_type=out_type) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function shape(input_; name=nothing, out_type=nothing) + if tf.in_eager_mode() + shape_eager(input_; name=name, out_type=out_type) + else + shape_graph(input_; name=name, out_type=out_type) + end end - end + end end @@ -23839,49 +43183,89 @@ end """ begin - function repeat_dataset_graph(input_dataset_, count_; name=nothing, output_types=nothing, output_shapes=nothing) - local desc - tf.with_op_name(name, "RepeatDataset") do - desc = tf.NodeDescription("RepeatDataset") - input_dataset_ = convert(Tensor{Any}, input_dataset_) - count_ = convert(Tensor{Int64}, count_) - tf.add_input(desc, input_dataset_) - tf.add_input(desc, count_) - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) + begin + function repeat_dataset_graph(input_dataset_, count_; name=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "RepeatDataset") do + desc = tf.NodeDescription("RepeatDataset") + begin + begin + input_dataset_ = convert(Tensor{Any}, input_dataset_) + begin + end + end + begin + count_ = convert(Tensor{Int64}, count_) + begin + end + end + end + begin + begin + tf.add_input(desc, input_dataset_) + end + begin + tf.add_input(desc, count_) + end + end + begin + begin + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + end + begin + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function repeat_dataset_eager(input_dataset_, count_; name=nothing, output_types=nothing, output_shapes=nothing) + desc = tf.EagerOp("RepeatDataset") + input_dataset_ = convert(tf.EagerTensor, input_dataset_) + count_ = convert(tf.EagerTensor, count_) + begin + begin + tf.add_input(desc, input_dataset_) + end + begin + tf.add_input(desc, count_) + end + end + begin + begin + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + end + begin + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(repeat_dataset, [input_dataset_, count_], name=nothing, output_types=nothing, output_shapes=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function repeat_dataset(input_dataset_, count_; name=nothing, output_types=nothing, output_shapes=nothing) + if tf.in_eager_mode() + repeat_dataset_eager(input_dataset_, count_; name=name, output_types=output_types, output_shapes=output_shapes) + else + repeat_dataset_graph(input_dataset_, count_; name=name, output_types=output_types, output_shapes=output_shapes) + end end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - end - tf.Tensor(tf.Operation(desc)) - end - function repeat_dataset_eager(input_dataset_, count_; name=nothing, output_types=nothing, output_shapes=nothing) - desc = tf.EagerOp("RepeatDataset") - input_dataset_ = convert(tf.EagerTensor, input_dataset_) - count_ = convert(tf.EagerTensor, count_) - tf.add_input(desc, input_dataset_) - tf.add_input(desc, count_) - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - res = tf.execute(desc) - node = tf.TapeNode(repeat_dataset, [input_dataset_, count_], name=nothing, output_types=nothing, output_shapes=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function repeat_dataset(input_dataset_, count_; name=nothing, output_types=nothing, output_shapes=nothing) - if tf.in_eager_mode() - repeat_dataset_eager(input_dataset_, count_; name=name, output_types=output_types, output_shapes=output_shapes) - else - repeat_dataset_graph(input_dataset_, count_; name=name, output_types=output_types, output_shapes=output_shapes) - end - end end @@ -23891,40 +43275,78 @@ end """ begin - function reciprocal_grad_graph(y_, dy_; name=nothing) - local desc - tf.with_op_name(name, "ReciprocalGrad") do - desc = tf.NodeDescription("ReciprocalGrad") - y_ = convert(Tensor{Any}, y_) - dy_ = convert(Tensor{Any}, dy_) - (y_, dy_) = tf.tf_promote(y_, dy_) - tf.add_input(desc, y_) - tf.add_input(desc, dy_) + begin + function reciprocal_grad_graph(y_, dy_; name=nothing) + local desc + tf.with_op_name(name, "ReciprocalGrad") do + desc = tf.NodeDescription("ReciprocalGrad") + begin + begin + y_ = convert(Tensor{Any}, y_) + begin + end + end + begin + dy_ = convert(Tensor{Any}, dy_) + begin + end + end + begin + (y_, dy_) = tf.tf_promote(y_, dy_) + end + end + begin + begin + tf.add_input(desc, y_) + end + begin + tf.add_input(desc, dy_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function reciprocal_grad_eager(y_, dy_; name=nothing) - desc = tf.EagerOp("ReciprocalGrad") - y_ = convert(tf.EagerTensor, y_) - dy_ = convert(tf.EagerTensor, dy_) - tf.add_input(desc, y_) - tf.add_input(desc, dy_) - desc["T"] = tf.data_type(y_) - desc["T"] = tf.data_type(dy_) - res = tf.execute(desc) - node = tf.TapeNode(reciprocal_grad, [y_, dy_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function reciprocal_grad_eager(y_, dy_; name=nothing) + desc = tf.EagerOp("ReciprocalGrad") + y_ = convert(tf.EagerTensor, y_) + dy_ = convert(tf.EagerTensor, dy_) + begin + begin + tf.add_input(desc, y_) + end + begin + tf.add_input(desc, dy_) + end + end + begin + end + begin + desc["T"] = tf.data_type(y_) + end + begin + desc["T"] = tf.data_type(dy_) + end + res = tf.execute(desc) + node = tf.TapeNode(reciprocal_grad, [y_, dy_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function reciprocal_grad(y_, dy_; name=nothing) - if tf.in_eager_mode() - reciprocal_grad_eager(y_, dy_; name=name) - else - reciprocal_grad_graph(y_, dy_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function reciprocal_grad(y_, dy_; name=nothing) + if tf.in_eager_mode() + reciprocal_grad_eager(y_, dy_; name=name) + else + reciprocal_grad_graph(y_, dy_; name=name) + end end - end + end end @@ -23934,53 +43356,109 @@ end """ begin - function crop_and_resize_grad_boxes_graph(grads_, image_, boxes_, box_ind_; name=nothing, method=nothing) - local desc - tf.with_op_name(name, "CropAndResizeGradBoxes") do - desc = tf.NodeDescription("CropAndResizeGradBoxes") - grads_ = convert(Tensor{Float32}, grads_) - image_ = convert(Tensor{Any}, image_) - boxes_ = convert(Tensor{Float32}, boxes_) - box_ind_ = convert(Tensor{Int32}, box_ind_) - (image_,) = tf.tf_promote(image_) - tf.add_input(desc, grads_) - tf.add_input(desc, image_) - tf.add_input(desc, boxes_) - tf.add_input(desc, box_ind_) - if method !== nothing - desc["method"] = Base.String(method) + begin + function crop_and_resize_grad_boxes_graph(grads_, image_, boxes_, box_ind_; name=nothing, method=nothing) + local desc + tf.with_op_name(name, "CropAndResizeGradBoxes") do + desc = tf.NodeDescription("CropAndResizeGradBoxes") + begin + begin + grads_ = convert(Tensor{Float32}, grads_) + begin + end + end + begin + image_ = convert(Tensor{Any}, image_) + begin + end + end + begin + boxes_ = convert(Tensor{Float32}, boxes_) + begin + end + end + begin + box_ind_ = convert(Tensor{Int32}, box_ind_) + begin + end + end + begin + (image_,) = tf.tf_promote(image_) + end + end + begin + begin + tf.add_input(desc, grads_) + end + begin + tf.add_input(desc, image_) + end + begin + tf.add_input(desc, boxes_) + end + begin + tf.add_input(desc, box_ind_) + end + end + begin + begin + if method !== nothing + desc["method"] = Base.String(method) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function crop_and_resize_grad_boxes_eager(grads_, image_, boxes_, box_ind_; name=nothing, method=nothing) + desc = tf.EagerOp("CropAndResizeGradBoxes") + grads_ = convert(tf.EagerTensor, grads_) + image_ = convert(tf.EagerTensor, image_) + boxes_ = convert(tf.EagerTensor, boxes_) + box_ind_ = convert(tf.EagerTensor, box_ind_) + begin + begin + tf.add_input(desc, grads_) + end + begin + tf.add_input(desc, image_) + end + begin + tf.add_input(desc, boxes_) + end + begin + tf.add_input(desc, box_ind_) + end + end + begin + begin + if method !== nothing + desc["method"] = Base.String(method) + end + end + end + begin + desc["T"] = tf.data_type(image_) + end + res = tf.execute(desc) + node = tf.TapeNode(crop_and_resize_grad_boxes, [grads_, image_, boxes_, box_ind_], name=nothing, method=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function crop_and_resize_grad_boxes(grads_, image_, boxes_, box_ind_; name=nothing, method=nothing) + if tf.in_eager_mode() + crop_and_resize_grad_boxes_eager(grads_, image_, boxes_, box_ind_; name=name, method=method) + else + crop_and_resize_grad_boxes_graph(grads_, image_, boxes_, box_ind_; name=name, method=method) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function crop_and_resize_grad_boxes_eager(grads_, image_, boxes_, box_ind_; name=nothing, method=nothing) - desc = tf.EagerOp("CropAndResizeGradBoxes") - grads_ = convert(tf.EagerTensor, grads_) - image_ = convert(tf.EagerTensor, image_) - boxes_ = convert(tf.EagerTensor, boxes_) - box_ind_ = convert(tf.EagerTensor, box_ind_) - tf.add_input(desc, grads_) - tf.add_input(desc, image_) - tf.add_input(desc, boxes_) - tf.add_input(desc, box_ind_) - if method !== nothing - desc["method"] = Base.String(method) - end - desc["T"] = tf.data_type(image_) - res = tf.execute(desc) - node = tf.TapeNode(crop_and_resize_grad_boxes, [grads_, image_, boxes_, box_ind_], name=nothing, method=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function crop_and_resize_grad_boxes(grads_, image_, boxes_, box_ind_; name=nothing, method=nothing) - if tf.in_eager_mode() - crop_and_resize_grad_boxes_eager(grads_, image_, boxes_, box_ind_; name=name, method=method) - else - crop_and_resize_grad_boxes_graph(grads_, image_, boxes_, box_ind_; name=name, method=method) - end - end end @@ -23990,46 +43468,88 @@ end """ begin - function batch_matrix_solve_graph(matrix_, rhs_; name=nothing, adjoint=nothing) - local desc - tf.with_op_name(name, "BatchMatrixSolve") do - desc = tf.NodeDescription("BatchMatrixSolve") - matrix_ = convert(Tensor{Any}, matrix_) - rhs_ = convert(Tensor{Any}, rhs_) - (matrix_, rhs_) = tf.tf_promote(matrix_, rhs_) - tf.add_input(desc, matrix_) - tf.add_input(desc, rhs_) - if adjoint !== nothing - desc["adjoint"] = Base.Bool(adjoint) + begin + function batch_matrix_solve_graph(matrix_, rhs_; name=nothing, adjoint=nothing) + local desc + tf.with_op_name(name, "BatchMatrixSolve") do + desc = tf.NodeDescription("BatchMatrixSolve") + begin + begin + matrix_ = convert(Tensor{Any}, matrix_) + begin + end + end + begin + rhs_ = convert(Tensor{Any}, rhs_) + begin + end + end + begin + (matrix_, rhs_) = tf.tf_promote(matrix_, rhs_) + end + end + begin + begin + tf.add_input(desc, matrix_) + end + begin + tf.add_input(desc, rhs_) + end + end + begin + begin + if adjoint !== nothing + desc["adjoint"] = Base.Bool(adjoint) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function batch_matrix_solve_eager(matrix_, rhs_; name=nothing, adjoint=nothing) + desc = tf.EagerOp("BatchMatrixSolve") + matrix_ = convert(tf.EagerTensor, matrix_) + rhs_ = convert(tf.EagerTensor, rhs_) + begin + begin + tf.add_input(desc, matrix_) + end + begin + tf.add_input(desc, rhs_) + end + end + begin + begin + if adjoint !== nothing + desc["adjoint"] = Base.Bool(adjoint) + end + end + end + begin + desc["T"] = tf.data_type(matrix_) + end + begin + desc["T"] = tf.data_type(rhs_) + end + res = tf.execute(desc) + node = tf.TapeNode(batch_matrix_solve, [matrix_, rhs_], name=nothing, adjoint=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function batch_matrix_solve(matrix_, rhs_; name=nothing, adjoint=nothing) + if tf.in_eager_mode() + batch_matrix_solve_eager(matrix_, rhs_; name=name, adjoint=adjoint) + else + batch_matrix_solve_graph(matrix_, rhs_; name=name, adjoint=adjoint) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function batch_matrix_solve_eager(matrix_, rhs_; name=nothing, adjoint=nothing) - desc = tf.EagerOp("BatchMatrixSolve") - matrix_ = convert(tf.EagerTensor, matrix_) - rhs_ = convert(tf.EagerTensor, rhs_) - tf.add_input(desc, matrix_) - tf.add_input(desc, rhs_) - if adjoint !== nothing - desc["adjoint"] = Base.Bool(adjoint) - end - desc["T"] = tf.data_type(matrix_) - desc["T"] = tf.data_type(rhs_) - res = tf.execute(desc) - node = tf.TapeNode(batch_matrix_solve, [matrix_, rhs_], name=nothing, adjoint=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function batch_matrix_solve(matrix_, rhs_; name=nothing, adjoint=nothing) - if tf.in_eager_mode() - batch_matrix_solve_eager(matrix_, rhs_; name=name, adjoint=adjoint) - else - batch_matrix_solve_graph(matrix_, rhs_; name=name, adjoint=adjoint) - end - end end @@ -24039,59 +43559,95 @@ end """ begin - function mutable_hash_table_v2_graph(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing) - local desc - tf.with_op_name(name, "MutableHashTableV2") do - desc = tf.NodeDescription("MutableHashTableV2") - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - if use_node_name_sharing !== nothing - desc["use_node_name_sharing"] = Base.Bool(use_node_name_sharing) + begin + function mutable_hash_table_v2_graph(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing) + local desc + tf.with_op_name(name, "MutableHashTableV2") do + desc = tf.NodeDescription("MutableHashTableV2") + begin + end + begin + end + begin + begin + if container !== nothing + desc["container"] = Base.String(container) + end + end + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + begin + if use_node_name_sharing !== nothing + desc["use_node_name_sharing"] = Base.Bool(use_node_name_sharing) + end + end + begin + if key_dtype !== nothing + desc["key_dtype"] = Base.identity(key_dtype) + end + end + begin + if value_dtype !== nothing + desc["value_dtype"] = Base.identity(value_dtype) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function mutable_hash_table_v2_eager(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing) + desc = tf.EagerOp("MutableHashTableV2") + begin + end + begin + begin + if container !== nothing + desc["container"] = Base.String(container) + end + end + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + begin + if use_node_name_sharing !== nothing + desc["use_node_name_sharing"] = Base.Bool(use_node_name_sharing) + end + end + begin + if key_dtype !== nothing + desc["key_dtype"] = Base.identity(key_dtype) + end + end + begin + if value_dtype !== nothing + desc["value_dtype"] = Base.identity(value_dtype) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(mutable_hash_table_v2, [], name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function mutable_hash_table_v2(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing) + if tf.in_eager_mode() + mutable_hash_table_v2_eager(; name=name, container=container, shared_name=shared_name, use_node_name_sharing=use_node_name_sharing, key_dtype=key_dtype, value_dtype=value_dtype) + else + mutable_hash_table_v2_graph(; name=name, container=container, shared_name=shared_name, use_node_name_sharing=use_node_name_sharing, key_dtype=key_dtype, value_dtype=value_dtype) + end end - if key_dtype !== nothing - desc["key_dtype"] = Base.identity(key_dtype) - end - if value_dtype !== nothing - desc["value_dtype"] = Base.identity(value_dtype) - end - end - tf.Tensor(tf.Operation(desc)) end - function mutable_hash_table_v2_eager(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing) - desc = tf.EagerOp("MutableHashTableV2") - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - if use_node_name_sharing !== nothing - desc["use_node_name_sharing"] = Base.Bool(use_node_name_sharing) - end - if key_dtype !== nothing - desc["key_dtype"] = Base.identity(key_dtype) - end - if value_dtype !== nothing - desc["value_dtype"] = Base.identity(value_dtype) - end - res = tf.execute(desc) - node = tf.TapeNode(mutable_hash_table_v2, [], name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function mutable_hash_table_v2(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing) - if tf.in_eager_mode() - mutable_hash_table_v2_eager(; name=name, container=container, shared_name=shared_name, use_node_name_sharing=use_node_name_sharing, key_dtype=key_dtype, value_dtype=value_dtype) - else - mutable_hash_table_v2_graph(; name=name, container=container, shared_name=shared_name, use_node_name_sharing=use_node_name_sharing, key_dtype=key_dtype, value_dtype=value_dtype) - end - end end @@ -24101,35 +43657,63 @@ end """ begin - function exit_graph(data_; name=nothing) - local desc - tf.with_op_name(name, "Exit") do - desc = tf.NodeDescription("Exit") - data_ = convert(Tensor{Any}, data_) - (data_,) = tf.tf_promote(data_) - tf.add_input(desc, data_) + begin + function exit_graph(data_; name=nothing) + local desc + tf.with_op_name(name, "Exit") do + desc = tf.NodeDescription("Exit") + begin + begin + data_ = convert(Tensor{Any}, data_) + begin + end + end + begin + (data_,) = tf.tf_promote(data_) + end + end + begin + begin + tf.add_input(desc, data_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function exit_eager(data_; name=nothing) - desc = tf.EagerOp("Exit") - data_ = convert(tf.EagerTensor, data_) - tf.add_input(desc, data_) - desc["T"] = tf.data_type(data_) - res = tf.execute(desc) - node = tf.TapeNode(exit, [data_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function exit_eager(data_; name=nothing) + desc = tf.EagerOp("Exit") + data_ = convert(tf.EagerTensor, data_) + begin + begin + tf.add_input(desc, data_) + end + end + begin + end + begin + desc["T"] = tf.data_type(data_) + end + res = tf.execute(desc) + node = tf.TapeNode(exit, [data_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function exit(data_; name=nothing) - if tf.in_eager_mode() - exit_eager(data_; name=name) - else - exit_graph(data_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function exit(data_; name=nothing) + if tf.in_eager_mode() + exit_eager(data_; name=name) + else + exit_graph(data_; name=name) + end end - end + end end @@ -24139,59 +43723,103 @@ end """ begin - function lrn_graph(input_; name=nothing, depth_radius=nothing, bias=nothing, alpha=nothing, beta=nothing) - local desc - tf.with_op_name(name, "LRN") do - desc = tf.NodeDescription("LRN") - input_ = convert(Tensor{Float32}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - if depth_radius !== nothing - desc["depth_radius"] = Base.Int(depth_radius) - end - if bias !== nothing - desc["bias"] = Base.identity(bias) + begin + function lrn_graph(input_; name=nothing, depth_radius=nothing, bias=nothing, alpha=nothing, beta=nothing) + local desc + tf.with_op_name(name, "LRN") do + desc = tf.NodeDescription("LRN") + begin + begin + input_ = convert(Tensor{Float32}, input_) + begin + end + end + begin + (input_,) = tf.tf_promote(input_) + end + end + begin + begin + tf.add_input(desc, input_) + end + end + begin + begin + if depth_radius !== nothing + desc["depth_radius"] = Base.Int(depth_radius) + end + end + begin + if bias !== nothing + desc["bias"] = Base.identity(bias) + end + end + begin + if alpha !== nothing + desc["alpha"] = Base.identity(alpha) + end + end + begin + if beta !== nothing + desc["beta"] = Base.identity(beta) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function lrn_eager(input_; name=nothing, depth_radius=nothing, bias=nothing, alpha=nothing, beta=nothing) + desc = tf.EagerOp("LRN") + input_ = convert(tf.EagerTensor, input_) + begin + begin + tf.add_input(desc, input_) + end + end + begin + begin + if depth_radius !== nothing + desc["depth_radius"] = Base.Int(depth_radius) + end + end + begin + if bias !== nothing + desc["bias"] = Base.identity(bias) + end + end + begin + if alpha !== nothing + desc["alpha"] = Base.identity(alpha) + end + end + begin + if beta !== nothing + desc["beta"] = Base.identity(beta) + end + end + end + begin + desc["T"] = tf.data_type(input_) + end + res = tf.execute(desc) + node = tf.TapeNode(lrn, [input_], name=nothing, depth_radius=nothing, bias=nothing, alpha=nothing, beta=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function lrn(input_; name=nothing, depth_radius=nothing, bias=nothing, alpha=nothing, beta=nothing) + if tf.in_eager_mode() + lrn_eager(input_; name=name, depth_radius=depth_radius, bias=bias, alpha=alpha, beta=beta) + else + lrn_graph(input_; name=name, depth_radius=depth_radius, bias=bias, alpha=alpha, beta=beta) + end end - if alpha !== nothing - desc["alpha"] = Base.identity(alpha) - end - if beta !== nothing - desc["beta"] = Base.identity(beta) - end - end - tf.Tensor(tf.Operation(desc)) - end - function lrn_eager(input_; name=nothing, depth_radius=nothing, bias=nothing, alpha=nothing, beta=nothing) - desc = tf.EagerOp("LRN") - input_ = convert(tf.EagerTensor, input_) - tf.add_input(desc, input_) - if depth_radius !== nothing - desc["depth_radius"] = Base.Int(depth_radius) - end - if bias !== nothing - desc["bias"] = Base.identity(bias) - end - if alpha !== nothing - desc["alpha"] = Base.identity(alpha) - end - if beta !== nothing - desc["beta"] = Base.identity(beta) - end - desc["T"] = tf.data_type(input_) - res = tf.execute(desc) - node = tf.TapeNode(lrn, [input_], name=nothing, depth_radius=nothing, bias=nothing, alpha=nothing, beta=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function lrn(input_; name=nothing, depth_radius=nothing, bias=nothing, alpha=nothing, beta=nothing) - if tf.in_eager_mode() - lrn_eager(input_; name=name, depth_radius=depth_radius, bias=bias, alpha=alpha, beta=beta) - else - lrn_graph(input_; name=name, depth_radius=depth_radius, bias=bias, alpha=alpha, beta=beta) - end - end end @@ -24201,63 +43829,115 @@ end """ begin - function stateless_if_graph(cond_, input_; name=nothing, Tin=nothing, Tout=nothing, then_branch=nothing, else_branch=nothing) - local desc - tf.with_op_name(name, "StatelessIf") do - desc = tf.NodeDescription("StatelessIf") - cond_ = convert(Tensor{Any}, cond_) - input_ = [convert(Tensor{Any}, x) for x = input_] - (cond_,) = tf.tf_promote(cond_) - tf.add_input(desc, cond_) - tf.add_input(desc, input_) - if Tin !== nothing - desc["Tin"] = map(Base.identity, Tin) - end - if Tout !== nothing - desc["Tout"] = map(Base.identity, Tout) + begin + function stateless_if_graph(cond_, input_; name=nothing, Tin=nothing, Tout=nothing, then_branch=nothing, else_branch=nothing) + local desc + tf.with_op_name(name, "StatelessIf") do + desc = tf.NodeDescription("StatelessIf") + begin + begin + cond_ = convert(Tensor{Any}, cond_) + begin + end + end + begin + input_ = [convert(Tensor{Any}, x) for x = input_] + begin + end + end + begin + (cond_,) = tf.tf_promote(cond_) + end + end + begin + begin + tf.add_input(desc, cond_) + end + begin + tf.add_input(desc, input_) + end + end + begin + begin + if Tin !== nothing + desc["Tin"] = map(Base.identity, Tin) + end + end + begin + if Tout !== nothing + desc["Tout"] = map(Base.identity, Tout) + end + end + begin + if then_branch !== nothing + desc["then_branch"] = Base.identity(then_branch) + end + end + begin + if else_branch !== nothing + desc["else_branch"] = Base.identity(else_branch) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function stateless_if_eager(cond_, input_; name=nothing, Tin=nothing, Tout=nothing, then_branch=nothing, else_branch=nothing) + desc = tf.EagerOp("StatelessIf") + cond_ = convert(tf.EagerTensor, cond_) + input_ = convert(tf.EagerTensor, input_) + begin + begin + tf.add_input(desc, cond_) + end + begin + tf.add_input(desc, input_) + end + end + begin + begin + if Tin !== nothing + desc["Tin"] = map(Base.identity, Tin) + end + end + begin + if Tout !== nothing + desc["Tout"] = map(Base.identity, Tout) + end + end + begin + if then_branch !== nothing + desc["then_branch"] = Base.identity(then_branch) + end + end + begin + if else_branch !== nothing + desc["else_branch"] = Base.identity(else_branch) + end + end + end + begin + desc["Tcond"] = tf.data_type(cond_) + end + res = tf.execute(desc) + node = tf.TapeNode(stateless_if, [cond_, input_], name=nothing, Tin=nothing, Tout=nothing, then_branch=nothing, else_branch=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function stateless_if(cond_, input_; name=nothing, Tin=nothing, Tout=nothing, then_branch=nothing, else_branch=nothing) + if tf.in_eager_mode() + stateless_if_eager(cond_, input_; name=name, Tin=Tin, Tout=Tout, then_branch=then_branch, else_branch=else_branch) + else + stateless_if_graph(cond_, input_; name=name, Tin=Tin, Tout=Tout, then_branch=then_branch, else_branch=else_branch) + end end - if then_branch !== nothing - desc["then_branch"] = Base.identity(then_branch) - end - if else_branch !== nothing - desc["else_branch"] = Base.identity(else_branch) - end - end - tf.Tensor(tf.Operation(desc)) - end - function stateless_if_eager(cond_, input_; name=nothing, Tin=nothing, Tout=nothing, then_branch=nothing, else_branch=nothing) - desc = tf.EagerOp("StatelessIf") - cond_ = convert(tf.EagerTensor, cond_) - input_ = convert(tf.EagerTensor, input_) - tf.add_input(desc, cond_) - tf.add_input(desc, input_) - if Tin !== nothing - desc["Tin"] = map(Base.identity, Tin) - end - if Tout !== nothing - desc["Tout"] = map(Base.identity, Tout) - end - if then_branch !== nothing - desc["then_branch"] = Base.identity(then_branch) - end - if else_branch !== nothing - desc["else_branch"] = Base.identity(else_branch) - end - desc["Tcond"] = tf.data_type(cond_) - res = tf.execute(desc) - node = tf.TapeNode(stateless_if, [cond_, input_], name=nothing, Tin=nothing, Tout=nothing, then_branch=nothing, else_branch=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function stateless_if(cond_, input_; name=nothing, Tin=nothing, Tout=nothing, then_branch=nothing, else_branch=nothing) - if tf.in_eager_mode() - stateless_if_eager(cond_, input_; name=name, Tin=Tin, Tout=Tout, then_branch=then_branch, else_branch=else_branch) - else - stateless_if_graph(cond_, input_; name=name, Tin=Tin, Tout=Tout, then_branch=then_branch, else_branch=else_branch) - end - end end @@ -24267,49 +43947,97 @@ end """ begin - function tensor_list_set_item_graph(input_handle_, index_, item_; name=nothing, element_dtype=nothing) - local desc - tf.with_op_name(name, "TensorListSetItem") do - desc = tf.NodeDescription("TensorListSetItem") - input_handle_ = convert(Tensor{Any}, input_handle_) - index_ = convert(Tensor{Int32}, index_) - item_ = convert(Tensor{Any}, item_) - (item_,) = tf.tf_promote(item_) - tf.add_input(desc, input_handle_) - tf.add_input(desc, index_) - tf.add_input(desc, item_) - if element_dtype !== nothing - desc["element_dtype"] = Base.identity(element_dtype) + begin + function tensor_list_set_item_graph(input_handle_, index_, item_; name=nothing, element_dtype=nothing) + local desc + tf.with_op_name(name, "TensorListSetItem") do + desc = tf.NodeDescription("TensorListSetItem") + begin + begin + input_handle_ = convert(Tensor{Any}, input_handle_) + begin + end + end + begin + index_ = convert(Tensor{Int32}, index_) + begin + end + end + begin + item_ = convert(Tensor{Any}, item_) + begin + end + end + begin + (item_,) = tf.tf_promote(item_) + end + end + begin + begin + tf.add_input(desc, input_handle_) + end + begin + tf.add_input(desc, index_) + end + begin + tf.add_input(desc, item_) + end + end + begin + begin + if element_dtype !== nothing + desc["element_dtype"] = Base.identity(element_dtype) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function tensor_list_set_item_eager(input_handle_, index_, item_; name=nothing, element_dtype=nothing) + desc = tf.EagerOp("TensorListSetItem") + input_handle_ = convert(tf.EagerTensor, input_handle_) + index_ = convert(tf.EagerTensor, index_) + item_ = convert(tf.EagerTensor, item_) + begin + begin + tf.add_input(desc, input_handle_) + end + begin + tf.add_input(desc, index_) + end + begin + tf.add_input(desc, item_) + end + end + begin + begin + if element_dtype !== nothing + desc["element_dtype"] = Base.identity(element_dtype) + end + end + end + begin + desc["element_dtype"] = tf.data_type(item_) + end + res = tf.execute(desc) + node = tf.TapeNode(tensor_list_set_item, [input_handle_, index_, item_], name=nothing, element_dtype=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_list_set_item(input_handle_, index_, item_; name=nothing, element_dtype=nothing) + if tf.in_eager_mode() + tensor_list_set_item_eager(input_handle_, index_, item_; name=name, element_dtype=element_dtype) + else + tensor_list_set_item_graph(input_handle_, index_, item_; name=name, element_dtype=element_dtype) + end end - end - tf.Tensor(tf.Operation(desc)) end - function tensor_list_set_item_eager(input_handle_, index_, item_; name=nothing, element_dtype=nothing) - desc = tf.EagerOp("TensorListSetItem") - input_handle_ = convert(tf.EagerTensor, input_handle_) - index_ = convert(tf.EagerTensor, index_) - item_ = convert(tf.EagerTensor, item_) - tf.add_input(desc, input_handle_) - tf.add_input(desc, index_) - tf.add_input(desc, item_) - if element_dtype !== nothing - desc["element_dtype"] = Base.identity(element_dtype) - end - desc["element_dtype"] = tf.data_type(item_) - res = tf.execute(desc) - node = tf.TapeNode(tensor_list_set_item, [input_handle_, index_, item_], name=nothing, element_dtype=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_list_set_item(input_handle_, index_, item_; name=nothing, element_dtype=nothing) - if tf.in_eager_mode() - tensor_list_set_item_eager(input_handle_, index_, item_; name=name, element_dtype=element_dtype) - else - tensor_list_set_item_graph(input_handle_, index_, item_; name=name, element_dtype=element_dtype) - end - end end @@ -24319,35 +44047,63 @@ end """ begin - function rsqrt_graph(x_; name=nothing) - local desc - tf.with_op_name(name, "Rsqrt") do - desc = tf.NodeDescription("Rsqrt") - x_ = convert(Tensor{Any}, x_) - (x_,) = tf.tf_promote(x_) - tf.add_input(desc, x_) + begin + function rsqrt_graph(x_; name=nothing) + local desc + tf.with_op_name(name, "Rsqrt") do + desc = tf.NodeDescription("Rsqrt") + begin + begin + x_ = convert(Tensor{Any}, x_) + begin + end + end + begin + (x_,) = tf.tf_promote(x_) + end + end + begin + begin + tf.add_input(desc, x_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function rsqrt_eager(x_; name=nothing) - desc = tf.EagerOp("Rsqrt") - x_ = convert(tf.EagerTensor, x_) - tf.add_input(desc, x_) - desc["T"] = tf.data_type(x_) - res = tf.execute(desc) - node = tf.TapeNode(rsqrt, [x_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function rsqrt_eager(x_; name=nothing) + desc = tf.EagerOp("Rsqrt") + x_ = convert(tf.EagerTensor, x_) + begin + begin + tf.add_input(desc, x_) + end + end + begin + end + begin + desc["T"] = tf.data_type(x_) + end + res = tf.execute(desc) + node = tf.TapeNode(rsqrt, [x_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function rsqrt(x_; name=nothing) - if tf.in_eager_mode() - rsqrt_eager(x_; name=name) - else - rsqrt_graph(x_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function rsqrt(x_; name=nothing) + if tf.in_eager_mode() + rsqrt_eager(x_; name=name) + else + rsqrt_graph(x_; name=name) + end end - end + end end @@ -24357,33 +44113,57 @@ end """ begin - function delete_session_tensor_graph(handle_; name=nothing) - local desc - tf.with_op_name(name, "DeleteSessionTensor") do - desc = tf.NodeDescription("DeleteSessionTensor") - handle_ = convert(Tensor{String}, handle_) - tf.add_input(desc, handle_) + begin + function delete_session_tensor_graph(handle_; name=nothing) + local desc + tf.with_op_name(name, "DeleteSessionTensor") do + desc = tf.NodeDescription("DeleteSessionTensor") + begin + begin + handle_ = convert(Tensor{String}, handle_) + begin + end + end + end + begin + begin + tf.add_input(desc, handle_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function delete_session_tensor_eager(handle_; name=nothing) - desc = tf.EagerOp("DeleteSessionTensor") - handle_ = convert(tf.EagerTensor, handle_) - tf.add_input(desc, handle_) - res = tf.execute(desc) - node = tf.TapeNode(delete_session_tensor, [handle_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function delete_session_tensor_eager(handle_; name=nothing) + desc = tf.EagerOp("DeleteSessionTensor") + handle_ = convert(tf.EagerTensor, handle_) + begin + begin + tf.add_input(desc, handle_) + end + end + begin + end + res = tf.execute(desc) + node = tf.TapeNode(delete_session_tensor, [handle_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function delete_session_tensor(handle_; name=nothing) - if tf.in_eager_mode() - delete_session_tensor_eager(handle_; name=name) - else - delete_session_tensor_graph(handle_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function delete_session_tensor(handle_; name=nothing) + if tf.in_eager_mode() + delete_session_tensor_eager(handle_; name=name) + else + delete_session_tensor_graph(handle_; name=name) + end end - end + end end @@ -24393,63 +44173,129 @@ end """ begin - function one_hot_graph(indices_, depth_, on_value_, off_value_; name=nothing, axis=nothing) - local desc - tf.with_op_name(name, "OneHot") do - desc = tf.NodeDescription("OneHot") - indices_ = convert(Tensor{Int64}, indices_) - indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) - depth_ = convert(Tensor{Int32}, depth_) - on_value_ = convert(Tensor{Any}, on_value_) - off_value_ = convert(Tensor{Any}, off_value_) - (on_value_, off_value_) = tf.tf_promote(on_value_, off_value_) - (indices_,) = tf.tf_promote(indices_) - tf.add_input(desc, indices_) - tf.add_input(desc, depth_) - tf.add_input(desc, on_value_) - tf.add_input(desc, off_value_) - if axis !== nothing - axis = Base.Int(axis) - 1 + begin + function one_hot_graph(indices_, depth_, on_value_, off_value_; name=nothing, axis=nothing) + local desc + tf.with_op_name(name, "OneHot") do + desc = tf.NodeDescription("OneHot") + begin + begin + indices_ = convert(Tensor{Int64}, indices_) + begin + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + end + end + begin + depth_ = convert(Tensor{Int32}, depth_) + begin + end + end + begin + on_value_ = convert(Tensor{Any}, on_value_) + begin + end + end + begin + off_value_ = convert(Tensor{Any}, off_value_) + begin + end + end + begin + (on_value_, off_value_) = tf.tf_promote(on_value_, off_value_) + end + begin + (indices_,) = tf.tf_promote(indices_) + end + end + begin + begin + tf.add_input(desc, indices_) + end + begin + tf.add_input(desc, depth_) + end + begin + tf.add_input(desc, on_value_) + end + begin + tf.add_input(desc, off_value_) + end + end + begin + begin + if axis !== nothing + axis = Base.Int(axis) - 1 + end + end + begin + if axis !== nothing + desc["axis"] = Base.Int(axis) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function one_hot_eager(indices_, depth_, on_value_, off_value_; name=nothing, axis=nothing) + desc = tf.EagerOp("OneHot") + indices_ = convert(tf.EagerTensor, indices_) + depth_ = convert(tf.EagerTensor, depth_) + on_value_ = convert(tf.EagerTensor, on_value_) + off_value_ = convert(tf.EagerTensor, off_value_) + begin + begin + tf.add_input(desc, indices_) + end + begin + tf.add_input(desc, depth_) + end + begin + tf.add_input(desc, on_value_) + end + begin + tf.add_input(desc, off_value_) + end + end + begin + begin + if axis !== nothing + axis = Base.Int(axis) - 1 + end + end + begin + if axis !== nothing + desc["axis"] = Base.Int(axis) + end + end + end + begin + desc["TI"] = tf.data_type(indices_) + end + begin + desc["T"] = tf.data_type(on_value_) + end + begin + desc["T"] = tf.data_type(off_value_) + end + res = tf.execute(desc) + node = tf.TapeNode(one_hot, [indices_, depth_, on_value_, off_value_], name=nothing, axis=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function one_hot(indices_, depth_, on_value_, off_value_; name=nothing, axis=nothing) + if tf.in_eager_mode() + one_hot_eager(indices_, depth_, on_value_, off_value_; name=name, axis=axis) + else + one_hot_graph(indices_, depth_, on_value_, off_value_; name=name, axis=axis) + end end - if axis !== nothing - desc["axis"] = Base.Int(axis) - end - end - tf.Tensor(tf.Operation(desc)) end - function one_hot_eager(indices_, depth_, on_value_, off_value_; name=nothing, axis=nothing) - desc = tf.EagerOp("OneHot") - indices_ = convert(tf.EagerTensor, indices_) - depth_ = convert(tf.EagerTensor, depth_) - on_value_ = convert(tf.EagerTensor, on_value_) - off_value_ = convert(tf.EagerTensor, off_value_) - tf.add_input(desc, indices_) - tf.add_input(desc, depth_) - tf.add_input(desc, on_value_) - tf.add_input(desc, off_value_) - if axis !== nothing - axis = Base.Int(axis) - 1 - end - if axis !== nothing - desc["axis"] = Base.Int(axis) - end - desc["TI"] = tf.data_type(indices_) - desc["T"] = tf.data_type(on_value_) - desc["T"] = tf.data_type(off_value_) - res = tf.execute(desc) - node = tf.TapeNode(one_hot, [indices_, depth_, on_value_, off_value_], name=nothing, axis=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function one_hot(indices_, depth_, on_value_, off_value_; name=nothing, axis=nothing) - if tf.in_eager_mode() - one_hot_eager(indices_, depth_, on_value_, off_value_; name=name, axis=axis) - else - one_hot_graph(indices_, depth_, on_value_, off_value_; name=name, axis=axis) - end - end end @@ -24459,73 +44305,169 @@ end """ begin - function resource_apply_ftrl_graph(var_, accum_, linear_, grad_, lr_, l1_, l2_, lr_power_; name=nothing, use_locking=nothing) - local desc - tf.with_op_name(name, "ResourceApplyFtrl") do - desc = tf.NodeDescription("ResourceApplyFtrl") - var_ = convert(Tensor{Any}, var_) - accum_ = convert(Tensor{Any}, accum_) - linear_ = convert(Tensor{Any}, linear_) - grad_ = convert(Tensor{Any}, grad_) - lr_ = convert(Tensor{Any}, lr_) - l1_ = convert(Tensor{Any}, l1_) - l2_ = convert(Tensor{Any}, l2_) - lr_power_ = convert(Tensor{Any}, lr_power_) - (grad_, lr_, l1_, l2_, lr_power_) = tf.tf_promote(grad_, lr_, l1_, l2_, lr_power_) - tf.add_input(desc, var_) - tf.add_input(desc, accum_) - tf.add_input(desc, linear_) - tf.add_input(desc, grad_) - tf.add_input(desc, lr_) - tf.add_input(desc, l1_) - tf.add_input(desc, l2_) - tf.add_input(desc, lr_power_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - end - tf.Tensor(tf.Operation(desc)) - end - function resource_apply_ftrl_eager(var_, accum_, linear_, grad_, lr_, l1_, l2_, lr_power_; name=nothing, use_locking=nothing) - desc = tf.EagerOp("ResourceApplyFtrl") - var_ = convert(tf.EagerTensor, var_) - accum_ = convert(tf.EagerTensor, accum_) - linear_ = convert(tf.EagerTensor, linear_) - grad_ = convert(tf.EagerTensor, grad_) - lr_ = convert(tf.EagerTensor, lr_) - l1_ = convert(tf.EagerTensor, l1_) - l2_ = convert(tf.EagerTensor, l2_) - lr_power_ = convert(tf.EagerTensor, lr_power_) - tf.add_input(desc, var_) - tf.add_input(desc, accum_) - tf.add_input(desc, linear_) - tf.add_input(desc, grad_) - tf.add_input(desc, lr_) - tf.add_input(desc, l1_) - tf.add_input(desc, l2_) - tf.add_input(desc, lr_power_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - desc["T"] = tf.data_type(grad_) - desc["T"] = tf.data_type(lr_) - desc["T"] = tf.data_type(l1_) - desc["T"] = tf.data_type(l2_) - desc["T"] = tf.data_type(lr_power_) - res = tf.execute(desc) - node = tf.TapeNode(resource_apply_ftrl, [var_, accum_, linear_, grad_, lr_, l1_, l2_, lr_power_], name=nothing, use_locking=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_apply_ftrl(var_, accum_, linear_, grad_, lr_, l1_, l2_, lr_power_; name=nothing, use_locking=nothing) - if tf.in_eager_mode() - resource_apply_ftrl_eager(var_, accum_, linear_, grad_, lr_, l1_, l2_, lr_power_; name=name, use_locking=use_locking) - else - resource_apply_ftrl_graph(var_, accum_, linear_, grad_, lr_, l1_, l2_, lr_power_; name=name, use_locking=use_locking) + begin + function resource_apply_ftrl_graph(var_, accum_, linear_, grad_, lr_, l1_, l2_, lr_power_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "ResourceApplyFtrl") do + desc = tf.NodeDescription("ResourceApplyFtrl") + begin + begin + var_ = convert(Tensor{Any}, var_) + begin + end + end + begin + accum_ = convert(Tensor{Any}, accum_) + begin + end + end + begin + linear_ = convert(Tensor{Any}, linear_) + begin + end + end + begin + grad_ = convert(Tensor{Any}, grad_) + begin + end + end + begin + lr_ = convert(Tensor{Any}, lr_) + begin + end + end + begin + l1_ = convert(Tensor{Any}, l1_) + begin + end + end + begin + l2_ = convert(Tensor{Any}, l2_) + begin + end + end + begin + lr_power_ = convert(Tensor{Any}, lr_power_) + begin + end + end + begin + (grad_, lr_, l1_, l2_, lr_power_) = tf.tf_promote(grad_, lr_, l1_, l2_, lr_power_) + end + end + begin + begin + tf.add_input(desc, var_) + end + begin + tf.add_input(desc, accum_) + end + begin + tf.add_input(desc, linear_) + end + begin + tf.add_input(desc, grad_) + end + begin + tf.add_input(desc, lr_) + end + begin + tf.add_input(desc, l1_) + end + begin + tf.add_input(desc, l2_) + end + begin + tf.add_input(desc, lr_power_) + end + end + begin + begin + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function resource_apply_ftrl_eager(var_, accum_, linear_, grad_, lr_, l1_, l2_, lr_power_; name=nothing, use_locking=nothing) + desc = tf.EagerOp("ResourceApplyFtrl") + var_ = convert(tf.EagerTensor, var_) + accum_ = convert(tf.EagerTensor, accum_) + linear_ = convert(tf.EagerTensor, linear_) + grad_ = convert(tf.EagerTensor, grad_) + lr_ = convert(tf.EagerTensor, lr_) + l1_ = convert(tf.EagerTensor, l1_) + l2_ = convert(tf.EagerTensor, l2_) + lr_power_ = convert(tf.EagerTensor, lr_power_) + begin + begin + tf.add_input(desc, var_) + end + begin + tf.add_input(desc, accum_) + end + begin + tf.add_input(desc, linear_) + end + begin + tf.add_input(desc, grad_) + end + begin + tf.add_input(desc, lr_) + end + begin + tf.add_input(desc, l1_) + end + begin + tf.add_input(desc, l2_) + end + begin + tf.add_input(desc, lr_power_) + end + end + begin + begin + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + end + begin + desc["T"] = tf.data_type(grad_) + end + begin + desc["T"] = tf.data_type(lr_) + end + begin + desc["T"] = tf.data_type(l1_) + end + begin + desc["T"] = tf.data_type(l2_) + end + begin + desc["T"] = tf.data_type(lr_power_) + end + res = tf.execute(desc) + node = tf.TapeNode(resource_apply_ftrl, [var_, accum_, linear_, grad_, lr_, l1_, l2_, lr_power_], name=nothing, use_locking=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_apply_ftrl(var_, accum_, linear_, grad_, lr_, l1_, l2_, lr_power_; name=nothing, use_locking=nothing) + if tf.in_eager_mode() + resource_apply_ftrl_eager(var_, accum_, linear_, grad_, lr_, l1_, l2_, lr_power_; name=name, use_locking=use_locking) + else + resource_apply_ftrl_graph(var_, accum_, linear_, grad_, lr_, l1_, l2_, lr_power_; name=name, use_locking=use_locking) + end end - end + end end @@ -24535,128 +44477,262 @@ end """ begin - function sdca_optimizer_v2_graph(sparse_example_indices_, sparse_feature_indices_, sparse_feature_values_, dense_features_, example_weights_, example_labels_, sparse_indices_, sparse_weights_, dense_weights_, example_state_data_; name=nothing, loss_type=nothing, adaptive=nothing, num_sparse_features=nothing, num_sparse_features_with_values=nothing, num_dense_features=nothing, l1=nothing, l2=nothing, num_loss_partitions=nothing, num_inner_iterations=nothing) - local desc - tf.with_op_name(name, "SdcaOptimizerV2") do - desc = tf.NodeDescription("SdcaOptimizerV2") - sparse_example_indices_ = [convert(Tensor{Int64}, x) for x = sparse_example_indices_] - sparse_feature_indices_ = [convert(Tensor{Int64}, x) for x = sparse_feature_indices_] - sparse_feature_values_ = [convert(Tensor{Float32}, x) for x = sparse_feature_values_] - dense_features_ = [convert(Tensor{Float32}, x) for x = dense_features_] - example_weights_ = convert(Tensor{Float32}, example_weights_) - example_labels_ = convert(Tensor{Float32}, example_labels_) - sparse_indices_ = [convert(Tensor{Int64}, x) for x = sparse_indices_] - sparse_weights_ = [convert(Tensor{Float32}, x) for x = sparse_weights_] - dense_weights_ = [convert(Tensor{Float32}, x) for x = dense_weights_] - example_state_data_ = convert(Tensor{Float32}, example_state_data_) - tf.add_input(desc, sparse_example_indices_) - tf.add_input(desc, sparse_feature_indices_) - tf.add_input(desc, sparse_feature_values_) - tf.add_input(desc, dense_features_) - tf.add_input(desc, example_weights_) - tf.add_input(desc, example_labels_) - tf.add_input(desc, sparse_indices_) - tf.add_input(desc, sparse_weights_) - tf.add_input(desc, dense_weights_) - tf.add_input(desc, example_state_data_) - if loss_type !== nothing - desc["loss_type"] = Base.String(loss_type) + begin + function sdca_optimizer_v2_graph(sparse_example_indices_, sparse_feature_indices_, sparse_feature_values_, dense_features_, example_weights_, example_labels_, sparse_indices_, sparse_weights_, dense_weights_, example_state_data_; name=nothing, loss_type=nothing, adaptive=nothing, num_sparse_features=nothing, num_sparse_features_with_values=nothing, num_dense_features=nothing, l1=nothing, l2=nothing, num_loss_partitions=nothing, num_inner_iterations=nothing) + local desc + tf.with_op_name(name, "SdcaOptimizerV2") do + desc = tf.NodeDescription("SdcaOptimizerV2") + begin + begin + sparse_example_indices_ = [convert(Tensor{Int64}, x) for x = sparse_example_indices_] + begin + end + end + begin + sparse_feature_indices_ = [convert(Tensor{Int64}, x) for x = sparse_feature_indices_] + begin + end + end + begin + sparse_feature_values_ = [convert(Tensor{Float32}, x) for x = sparse_feature_values_] + begin + end + end + begin + dense_features_ = [convert(Tensor{Float32}, x) for x = dense_features_] + begin + end + end + begin + example_weights_ = convert(Tensor{Float32}, example_weights_) + begin + end + end + begin + example_labels_ = convert(Tensor{Float32}, example_labels_) + begin + end + end + begin + sparse_indices_ = [convert(Tensor{Int64}, x) for x = sparse_indices_] + begin + end + end + begin + sparse_weights_ = [convert(Tensor{Float32}, x) for x = sparse_weights_] + begin + end + end + begin + dense_weights_ = [convert(Tensor{Float32}, x) for x = dense_weights_] + begin + end + end + begin + example_state_data_ = convert(Tensor{Float32}, example_state_data_) + begin + end + end + end + begin + begin + tf.add_input(desc, sparse_example_indices_) + end + begin + tf.add_input(desc, sparse_feature_indices_) + end + begin + tf.add_input(desc, sparse_feature_values_) + end + begin + tf.add_input(desc, dense_features_) + end + begin + tf.add_input(desc, example_weights_) + end + begin + tf.add_input(desc, example_labels_) + end + begin + tf.add_input(desc, sparse_indices_) + end + begin + tf.add_input(desc, sparse_weights_) + end + begin + tf.add_input(desc, dense_weights_) + end + begin + tf.add_input(desc, example_state_data_) + end + end + begin + begin + if loss_type !== nothing + desc["loss_type"] = Base.String(loss_type) + end + end + begin + if adaptive !== nothing + desc["adaptive"] = Base.Bool(adaptive) + end + end + begin + if num_sparse_features !== nothing + desc["num_sparse_features"] = Base.Int(num_sparse_features) + end + end + begin + if num_sparse_features_with_values !== nothing + desc["num_sparse_features_with_values"] = Base.Int(num_sparse_features_with_values) + end + end + begin + if num_dense_features !== nothing + desc["num_dense_features"] = Base.Int(num_dense_features) + end + end + begin + if l1 !== nothing + desc["l1"] = Base.identity(l1) + end + end + begin + if l2 !== nothing + desc["l2"] = Base.identity(l2) + end + end + begin + if num_loss_partitions !== nothing + desc["num_loss_partitions"] = Base.Int(num_loss_partitions) + end + end + begin + if num_inner_iterations !== nothing + desc["num_inner_iterations"] = Base.Int(num_inner_iterations) + end + end + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + end + end + begin + function sdca_optimizer_v2_eager(sparse_example_indices_, sparse_feature_indices_, sparse_feature_values_, dense_features_, example_weights_, example_labels_, sparse_indices_, sparse_weights_, dense_weights_, example_state_data_; name=nothing, loss_type=nothing, adaptive=nothing, num_sparse_features=nothing, num_sparse_features_with_values=nothing, num_dense_features=nothing, l1=nothing, l2=nothing, num_loss_partitions=nothing, num_inner_iterations=nothing) + desc = tf.EagerOp("SdcaOptimizerV2") + sparse_example_indices_ = convert(tf.EagerTensor, sparse_example_indices_) + sparse_feature_indices_ = convert(tf.EagerTensor, sparse_feature_indices_) + sparse_feature_values_ = convert(tf.EagerTensor, sparse_feature_values_) + dense_features_ = convert(tf.EagerTensor, dense_features_) + example_weights_ = convert(tf.EagerTensor, example_weights_) + example_labels_ = convert(tf.EagerTensor, example_labels_) + sparse_indices_ = convert(tf.EagerTensor, sparse_indices_) + sparse_weights_ = convert(tf.EagerTensor, sparse_weights_) + dense_weights_ = convert(tf.EagerTensor, dense_weights_) + example_state_data_ = convert(tf.EagerTensor, example_state_data_) + begin + begin + tf.add_input(desc, sparse_example_indices_) + end + begin + tf.add_input(desc, sparse_feature_indices_) + end + begin + tf.add_input(desc, sparse_feature_values_) + end + begin + tf.add_input(desc, dense_features_) + end + begin + tf.add_input(desc, example_weights_) + end + begin + tf.add_input(desc, example_labels_) + end + begin + tf.add_input(desc, sparse_indices_) + end + begin + tf.add_input(desc, sparse_weights_) + end + begin + tf.add_input(desc, dense_weights_) + end + begin + tf.add_input(desc, example_state_data_) + end + end + begin + begin + if loss_type !== nothing + desc["loss_type"] = Base.String(loss_type) + end + end + begin + if adaptive !== nothing + desc["adaptive"] = Base.Bool(adaptive) + end + end + begin + if num_sparse_features !== nothing + desc["num_sparse_features"] = Base.Int(num_sparse_features) + end + end + begin + if num_sparse_features_with_values !== nothing + desc["num_sparse_features_with_values"] = Base.Int(num_sparse_features_with_values) + end + end + begin + if num_dense_features !== nothing + desc["num_dense_features"] = Base.Int(num_dense_features) + end + end + begin + if l1 !== nothing + desc["l1"] = Base.identity(l1) + end + end + begin + if l2 !== nothing + desc["l2"] = Base.identity(l2) + end + end + begin + if num_loss_partitions !== nothing + desc["num_loss_partitions"] = Base.Int(num_loss_partitions) + end + end + begin + if num_inner_iterations !== nothing + desc["num_inner_iterations"] = Base.Int(num_inner_iterations) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(sdca_optimizer_v2, [sparse_example_indices_, sparse_feature_indices_, sparse_feature_values_, dense_features_, example_weights_, example_labels_, sparse_indices_, sparse_weights_, dense_weights_, example_state_data_], name=nothing, loss_type=nothing, adaptive=nothing, num_sparse_features=nothing, num_sparse_features_with_values=nothing, num_dense_features=nothing, l1=nothing, l2=nothing, num_loss_partitions=nothing, num_inner_iterations=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sdca_optimizer_v2(sparse_example_indices_, sparse_feature_indices_, sparse_feature_values_, dense_features_, example_weights_, example_labels_, sparse_indices_, sparse_weights_, dense_weights_, example_state_data_; name=nothing, loss_type=nothing, adaptive=nothing, num_sparse_features=nothing, num_sparse_features_with_values=nothing, num_dense_features=nothing, l1=nothing, l2=nothing, num_loss_partitions=nothing, num_inner_iterations=nothing) + if tf.in_eager_mode() + sdca_optimizer_v2_eager(sparse_example_indices_, sparse_feature_indices_, sparse_feature_values_, dense_features_, example_weights_, example_labels_, sparse_indices_, sparse_weights_, dense_weights_, example_state_data_; name=name, loss_type=loss_type, adaptive=adaptive, num_sparse_features=num_sparse_features, num_sparse_features_with_values=num_sparse_features_with_values, num_dense_features=num_dense_features, l1=l1, l2=l2, num_loss_partitions=num_loss_partitions, num_inner_iterations=num_inner_iterations) + else + sdca_optimizer_v2_graph(sparse_example_indices_, sparse_feature_indices_, sparse_feature_values_, dense_features_, example_weights_, example_labels_, sparse_indices_, sparse_weights_, dense_weights_, example_state_data_; name=name, loss_type=loss_type, adaptive=adaptive, num_sparse_features=num_sparse_features, num_sparse_features_with_values=num_sparse_features_with_values, num_dense_features=num_dense_features, l1=l1, l2=l2, num_loss_partitions=num_loss_partitions, num_inner_iterations=num_inner_iterations) + end end - if adaptive !== nothing - desc["adaptive"] = Base.Bool(adaptive) - end - if num_sparse_features !== nothing - desc["num_sparse_features"] = Base.Int(num_sparse_features) - end - if num_sparse_features_with_values !== nothing - desc["num_sparse_features_with_values"] = Base.Int(num_sparse_features_with_values) - end - if num_dense_features !== nothing - desc["num_dense_features"] = Base.Int(num_dense_features) - end - if l1 !== nothing - desc["l1"] = Base.identity(l1) - end - if l2 !== nothing - desc["l2"] = Base.identity(l2) - end - if num_loss_partitions !== nothing - desc["num_loss_partitions"] = Base.Int(num_loss_partitions) - end - if num_inner_iterations !== nothing - desc["num_inner_iterations"] = Base.Int(num_inner_iterations) - end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:3 - push!(out, tf.Tensor(op, out_idx)) - end - out - end - function sdca_optimizer_v2_eager(sparse_example_indices_, sparse_feature_indices_, sparse_feature_values_, dense_features_, example_weights_, example_labels_, sparse_indices_, sparse_weights_, dense_weights_, example_state_data_; name=nothing, loss_type=nothing, adaptive=nothing, num_sparse_features=nothing, num_sparse_features_with_values=nothing, num_dense_features=nothing, l1=nothing, l2=nothing, num_loss_partitions=nothing, num_inner_iterations=nothing) - desc = tf.EagerOp("SdcaOptimizerV2") - sparse_example_indices_ = convert(tf.EagerTensor, sparse_example_indices_) - sparse_feature_indices_ = convert(tf.EagerTensor, sparse_feature_indices_) - sparse_feature_values_ = convert(tf.EagerTensor, sparse_feature_values_) - dense_features_ = convert(tf.EagerTensor, dense_features_) - example_weights_ = convert(tf.EagerTensor, example_weights_) - example_labels_ = convert(tf.EagerTensor, example_labels_) - sparse_indices_ = convert(tf.EagerTensor, sparse_indices_) - sparse_weights_ = convert(tf.EagerTensor, sparse_weights_) - dense_weights_ = convert(tf.EagerTensor, dense_weights_) - example_state_data_ = convert(tf.EagerTensor, example_state_data_) - tf.add_input(desc, sparse_example_indices_) - tf.add_input(desc, sparse_feature_indices_) - tf.add_input(desc, sparse_feature_values_) - tf.add_input(desc, dense_features_) - tf.add_input(desc, example_weights_) - tf.add_input(desc, example_labels_) - tf.add_input(desc, sparse_indices_) - tf.add_input(desc, sparse_weights_) - tf.add_input(desc, dense_weights_) - tf.add_input(desc, example_state_data_) - if loss_type !== nothing - desc["loss_type"] = Base.String(loss_type) - end - if adaptive !== nothing - desc["adaptive"] = Base.Bool(adaptive) - end - if num_sparse_features !== nothing - desc["num_sparse_features"] = Base.Int(num_sparse_features) - end - if num_sparse_features_with_values !== nothing - desc["num_sparse_features_with_values"] = Base.Int(num_sparse_features_with_values) - end - if num_dense_features !== nothing - desc["num_dense_features"] = Base.Int(num_dense_features) - end - if l1 !== nothing - desc["l1"] = Base.identity(l1) - end - if l2 !== nothing - desc["l2"] = Base.identity(l2) - end - if num_loss_partitions !== nothing - desc["num_loss_partitions"] = Base.Int(num_loss_partitions) - end - if num_inner_iterations !== nothing - desc["num_inner_iterations"] = Base.Int(num_inner_iterations) - end - res = tf.execute(desc) - node = tf.TapeNode(sdca_optimizer_v2, [sparse_example_indices_, sparse_feature_indices_, sparse_feature_values_, dense_features_, example_weights_, example_labels_, sparse_indices_, sparse_weights_, dense_weights_, example_state_data_], name=nothing, loss_type=nothing, adaptive=nothing, num_sparse_features=nothing, num_sparse_features_with_values=nothing, num_dense_features=nothing, l1=nothing, l2=nothing, num_loss_partitions=nothing, num_inner_iterations=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sdca_optimizer_v2(sparse_example_indices_, sparse_feature_indices_, sparse_feature_values_, dense_features_, example_weights_, example_labels_, sparse_indices_, sparse_weights_, dense_weights_, example_state_data_; name=nothing, loss_type=nothing, adaptive=nothing, num_sparse_features=nothing, num_sparse_features_with_values=nothing, num_dense_features=nothing, l1=nothing, l2=nothing, num_loss_partitions=nothing, num_inner_iterations=nothing) - if tf.in_eager_mode() - sdca_optimizer_v2_eager(sparse_example_indices_, sparse_feature_indices_, sparse_feature_values_, dense_features_, example_weights_, example_labels_, sparse_indices_, sparse_weights_, dense_weights_, example_state_data_; name=name, loss_type=loss_type, adaptive=adaptive, num_sparse_features=num_sparse_features, num_sparse_features_with_values=num_sparse_features_with_values, num_dense_features=num_dense_features, l1=l1, l2=l2, num_loss_partitions=num_loss_partitions, num_inner_iterations=num_inner_iterations) - else - sdca_optimizer_v2_graph(sparse_example_indices_, sparse_feature_indices_, sparse_feature_values_, dense_features_, example_weights_, example_labels_, sparse_indices_, sparse_weights_, dense_weights_, example_state_data_; name=name, loss_type=loss_type, adaptive=adaptive, num_sparse_features=num_sparse_features, num_sparse_features_with_values=num_sparse_features_with_values, num_dense_features=num_dense_features, l1=l1, l2=l2, num_loss_partitions=num_loss_partitions, num_inner_iterations=num_inner_iterations) - end - end end @@ -24666,49 +44742,89 @@ end """ begin - function queue_enqueue_graph(handle_, components_; name=nothing, Tcomponents=nothing, timeout_ms=nothing) - local desc - tf.with_op_name(name, "QueueEnqueue") do - desc = tf.NodeDescription("QueueEnqueue") - handle_ = convert(Tensor{String}, handle_) - components_ = [convert(Tensor{Any}, x) for x = components_] - tf.add_input(desc, handle_) - tf.add_input(desc, components_) - if Tcomponents !== nothing - desc["Tcomponents"] = map(Base.identity, Tcomponents) + begin + function queue_enqueue_graph(handle_, components_; name=nothing, Tcomponents=nothing, timeout_ms=nothing) + local desc + tf.with_op_name(name, "QueueEnqueue") do + desc = tf.NodeDescription("QueueEnqueue") + begin + begin + handle_ = convert(Tensor{String}, handle_) + begin + end + end + begin + components_ = [convert(Tensor{Any}, x) for x = components_] + begin + end + end + end + begin + begin + tf.add_input(desc, handle_) + end + begin + tf.add_input(desc, components_) + end + end + begin + begin + if Tcomponents !== nothing + desc["Tcomponents"] = map(Base.identity, Tcomponents) + end + end + begin + if timeout_ms !== nothing + desc["timeout_ms"] = Base.Int(timeout_ms) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function queue_enqueue_eager(handle_, components_; name=nothing, Tcomponents=nothing, timeout_ms=nothing) + desc = tf.EagerOp("QueueEnqueue") + handle_ = convert(tf.EagerTensor, handle_) + components_ = convert(tf.EagerTensor, components_) + begin + begin + tf.add_input(desc, handle_) + end + begin + tf.add_input(desc, components_) + end + end + begin + begin + if Tcomponents !== nothing + desc["Tcomponents"] = map(Base.identity, Tcomponents) + end + end + begin + if timeout_ms !== nothing + desc["timeout_ms"] = Base.Int(timeout_ms) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(queue_enqueue, [handle_, components_], name=nothing, Tcomponents=nothing, timeout_ms=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function queue_enqueue(handle_, components_; name=nothing, Tcomponents=nothing, timeout_ms=nothing) + if tf.in_eager_mode() + queue_enqueue_eager(handle_, components_; name=name, Tcomponents=Tcomponents, timeout_ms=timeout_ms) + else + queue_enqueue_graph(handle_, components_; name=name, Tcomponents=Tcomponents, timeout_ms=timeout_ms) + end end - if timeout_ms !== nothing - desc["timeout_ms"] = Base.Int(timeout_ms) - end - end - tf.Tensor(tf.Operation(desc)) - end - function queue_enqueue_eager(handle_, components_; name=nothing, Tcomponents=nothing, timeout_ms=nothing) - desc = tf.EagerOp("QueueEnqueue") - handle_ = convert(tf.EagerTensor, handle_) - components_ = convert(tf.EagerTensor, components_) - tf.add_input(desc, handle_) - tf.add_input(desc, components_) - if Tcomponents !== nothing - desc["Tcomponents"] = map(Base.identity, Tcomponents) - end - if timeout_ms !== nothing - desc["timeout_ms"] = Base.Int(timeout_ms) - end - res = tf.execute(desc) - node = tf.TapeNode(queue_enqueue, [handle_, components_], name=nothing, Tcomponents=nothing, timeout_ms=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function queue_enqueue(handle_, components_; name=nothing, Tcomponents=nothing, timeout_ms=nothing) - if tf.in_eager_mode() - queue_enqueue_eager(handle_, components_; name=name, Tcomponents=Tcomponents, timeout_ms=timeout_ms) - else - queue_enqueue_graph(handle_, components_; name=name, Tcomponents=Tcomponents, timeout_ms=timeout_ms) - end - end end @@ -24718,60 +44834,106 @@ end """ begin - function ctc_beam_search_decoder_graph(inputs_, sequence_length_; name=nothing, beam_width=nothing, top_paths=nothing, merge_repeated=nothing) - local desc - tf.with_op_name(name, "CTCBeamSearchDecoder") do - desc = tf.NodeDescription("CTCBeamSearchDecoder") - inputs_ = convert(Tensor{Float32}, inputs_) - sequence_length_ = convert(Tensor{Int32}, sequence_length_) - tf.add_input(desc, inputs_) - tf.add_input(desc, sequence_length_) - if beam_width !== nothing - desc["beam_width"] = Base.Int(beam_width) + begin + function ctc_beam_search_decoder_graph(inputs_, sequence_length_; name=nothing, beam_width=nothing, top_paths=nothing, merge_repeated=nothing) + local desc + tf.with_op_name(name, "CTCBeamSearchDecoder") do + desc = tf.NodeDescription("CTCBeamSearchDecoder") + begin + begin + inputs_ = convert(Tensor{Float32}, inputs_) + begin + end + end + begin + sequence_length_ = convert(Tensor{Int32}, sequence_length_) + begin + end + end + end + begin + begin + tf.add_input(desc, inputs_) + end + begin + tf.add_input(desc, sequence_length_) + end + end + begin + begin + if beam_width !== nothing + desc["beam_width"] = Base.Int(beam_width) + end + end + begin + if top_paths !== nothing + desc["top_paths"] = Base.Int(top_paths) + end + end + begin + if merge_repeated !== nothing + desc["merge_repeated"] = Base.Bool(merge_repeated) + end + end + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:4 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + end + end + begin + function ctc_beam_search_decoder_eager(inputs_, sequence_length_; name=nothing, beam_width=nothing, top_paths=nothing, merge_repeated=nothing) + desc = tf.EagerOp("CTCBeamSearchDecoder") + inputs_ = convert(tf.EagerTensor, inputs_) + sequence_length_ = convert(tf.EagerTensor, sequence_length_) + begin + begin + tf.add_input(desc, inputs_) + end + begin + tf.add_input(desc, sequence_length_) + end + end + begin + begin + if beam_width !== nothing + desc["beam_width"] = Base.Int(beam_width) + end + end + begin + if top_paths !== nothing + desc["top_paths"] = Base.Int(top_paths) + end + end + begin + if merge_repeated !== nothing + desc["merge_repeated"] = Base.Bool(merge_repeated) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(ctc_beam_search_decoder, [inputs_, sequence_length_], name=nothing, beam_width=nothing, top_paths=nothing, merge_repeated=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function ctc_beam_search_decoder(inputs_, sequence_length_; name=nothing, beam_width=nothing, top_paths=nothing, merge_repeated=nothing) + if tf.in_eager_mode() + ctc_beam_search_decoder_eager(inputs_, sequence_length_; name=name, beam_width=beam_width, top_paths=top_paths, merge_repeated=merge_repeated) + else + ctc_beam_search_decoder_graph(inputs_, sequence_length_; name=name, beam_width=beam_width, top_paths=top_paths, merge_repeated=merge_repeated) + end end - if top_paths !== nothing - desc["top_paths"] = Base.Int(top_paths) - end - if merge_repeated !== nothing - desc["merge_repeated"] = Base.Bool(merge_repeated) - end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:4 - push!(out, tf.Tensor(op, out_idx)) - end - out end - function ctc_beam_search_decoder_eager(inputs_, sequence_length_; name=nothing, beam_width=nothing, top_paths=nothing, merge_repeated=nothing) - desc = tf.EagerOp("CTCBeamSearchDecoder") - inputs_ = convert(tf.EagerTensor, inputs_) - sequence_length_ = convert(tf.EagerTensor, sequence_length_) - tf.add_input(desc, inputs_) - tf.add_input(desc, sequence_length_) - if beam_width !== nothing - desc["beam_width"] = Base.Int(beam_width) - end - if top_paths !== nothing - desc["top_paths"] = Base.Int(top_paths) - end - if merge_repeated !== nothing - desc["merge_repeated"] = Base.Bool(merge_repeated) - end - res = tf.execute(desc) - node = tf.TapeNode(ctc_beam_search_decoder, [inputs_, sequence_length_], name=nothing, beam_width=nothing, top_paths=nothing, merge_repeated=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function ctc_beam_search_decoder(inputs_, sequence_length_; name=nothing, beam_width=nothing, top_paths=nothing, merge_repeated=nothing) - if tf.in_eager_mode() - ctc_beam_search_decoder_eager(inputs_, sequence_length_; name=name, beam_width=beam_width, top_paths=top_paths, merge_repeated=merge_repeated) - else - ctc_beam_search_decoder_graph(inputs_, sequence_length_; name=name, beam_width=beam_width, top_paths=top_paths, merge_repeated=merge_repeated) - end - end end @@ -24781,59 +44943,95 @@ end """ begin - function conditional_accumulator_graph(; name=nothing, dtype=nothing, shape=nothing, container=nothing, shared_name=nothing, reduction_type=nothing) - local desc - tf.with_op_name(name, "ConditionalAccumulator") do - desc = tf.NodeDescription("ConditionalAccumulator") - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end - if shape !== nothing - desc["shape"] = Base.identity(shape) + begin + function conditional_accumulator_graph(; name=nothing, dtype=nothing, shape=nothing, container=nothing, shared_name=nothing, reduction_type=nothing) + local desc + tf.with_op_name(name, "ConditionalAccumulator") do + desc = tf.NodeDescription("ConditionalAccumulator") + begin + end + begin + end + begin + begin + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + begin + if shape !== nothing + desc["shape"] = Base.identity(shape) + end + end + begin + if container !== nothing + desc["container"] = Base.String(container) + end + end + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + begin + if reduction_type !== nothing + desc["reduction_type"] = Base.String(reduction_type) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function conditional_accumulator_eager(; name=nothing, dtype=nothing, shape=nothing, container=nothing, shared_name=nothing, reduction_type=nothing) + desc = tf.EagerOp("ConditionalAccumulator") + begin + end + begin + begin + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + begin + if shape !== nothing + desc["shape"] = Base.identity(shape) + end + end + begin + if container !== nothing + desc["container"] = Base.String(container) + end + end + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + begin + if reduction_type !== nothing + desc["reduction_type"] = Base.String(reduction_type) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(conditional_accumulator, [], name=nothing, dtype=nothing, shape=nothing, container=nothing, shared_name=nothing, reduction_type=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function conditional_accumulator(; name=nothing, dtype=nothing, shape=nothing, container=nothing, shared_name=nothing, reduction_type=nothing) + if tf.in_eager_mode() + conditional_accumulator_eager(; name=name, dtype=dtype, shape=shape, container=container, shared_name=shared_name, reduction_type=reduction_type) + else + conditional_accumulator_graph(; name=name, dtype=dtype, shape=shape, container=container, shared_name=shared_name, reduction_type=reduction_type) + end end - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - if reduction_type !== nothing - desc["reduction_type"] = Base.String(reduction_type) - end - end - tf.Tensor(tf.Operation(desc)) - end - function conditional_accumulator_eager(; name=nothing, dtype=nothing, shape=nothing, container=nothing, shared_name=nothing, reduction_type=nothing) - desc = tf.EagerOp("ConditionalAccumulator") - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end - if shape !== nothing - desc["shape"] = Base.identity(shape) - end - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - if reduction_type !== nothing - desc["reduction_type"] = Base.String(reduction_type) - end - res = tf.execute(desc) - node = tf.TapeNode(conditional_accumulator, [], name=nothing, dtype=nothing, shape=nothing, container=nothing, shared_name=nothing, reduction_type=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function conditional_accumulator(; name=nothing, dtype=nothing, shape=nothing, container=nothing, shared_name=nothing, reduction_type=nothing) - if tf.in_eager_mode() - conditional_accumulator_eager(; name=name, dtype=dtype, shape=shape, container=container, shared_name=shared_name, reduction_type=reduction_type) - else - conditional_accumulator_graph(; name=name, dtype=dtype, shape=shape, container=container, shared_name=shared_name, reduction_type=reduction_type) - end - end end @@ -24843,41 +45041,65 @@ end """ begin - function whole_file_reader_graph(; name=nothing, container=nothing, shared_name=nothing) - local desc - tf.with_op_name(name, "WholeFileReader") do - desc = tf.NodeDescription("WholeFileReader") - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) + begin + function whole_file_reader_graph(; name=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "WholeFileReader") do + desc = tf.NodeDescription("WholeFileReader") + begin + end + begin + end + begin + begin + if container !== nothing + desc["container"] = Base.String(container) + end + end + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + end end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function whole_file_reader_eager(; name=nothing, container=nothing, shared_name=nothing) - desc = tf.EagerOp("WholeFileReader") - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - res = tf.execute(desc) - node = tf.TapeNode(whole_file_reader, [], name=nothing, container=nothing, shared_name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function whole_file_reader_eager(; name=nothing, container=nothing, shared_name=nothing) + desc = tf.EagerOp("WholeFileReader") + begin + end + begin + begin + if container !== nothing + desc["container"] = Base.String(container) + end + end + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(whole_file_reader, [], name=nothing, container=nothing, shared_name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function whole_file_reader(; name=nothing, container=nothing, shared_name=nothing) - if tf.in_eager_mode() - whole_file_reader_eager(; name=name, container=container, shared_name=shared_name) - else - whole_file_reader_graph(; name=name, container=container, shared_name=shared_name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function whole_file_reader(; name=nothing, container=nothing, shared_name=nothing) + if tf.in_eager_mode() + whole_file_reader_eager(; name=name, container=container, shared_name=shared_name) + else + whole_file_reader_graph(; name=name, container=container, shared_name=shared_name) + end end - end + end end @@ -24887,76 +45109,178 @@ end """ begin - function apply_rms_prop_graph(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=nothing, use_locking=nothing) - local desc - tf.with_op_name(name, "ApplyRMSProp") do - desc = tf.NodeDescription("ApplyRMSProp") - var_ = convert(Tensor{Any}, var_) - ms_ = convert(Tensor{Any}, ms_) - mom_ = convert(Tensor{Any}, mom_) - lr_ = convert(Tensor{Any}, lr_) - rho_ = convert(Tensor{Any}, rho_) - momentum_ = convert(Tensor{Any}, momentum_) - epsilon_ = convert(Tensor{Any}, epsilon_) - grad_ = convert(Tensor{Any}, grad_) - (var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_) = tf.tf_promote(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_) - tf.add_input(desc, var_) - tf.add_input(desc, ms_) - tf.add_input(desc, mom_) - tf.add_input(desc, lr_) - tf.add_input(desc, rho_) - tf.add_input(desc, momentum_) - tf.add_input(desc, epsilon_) - tf.add_input(desc, grad_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - end - tf.Tensor(tf.Operation(desc)) - end - function apply_rms_prop_eager(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=nothing, use_locking=nothing) - desc = tf.EagerOp("ApplyRMSProp") - var_ = convert(tf.EagerTensor, var_) - ms_ = convert(tf.EagerTensor, ms_) - mom_ = convert(tf.EagerTensor, mom_) - lr_ = convert(tf.EagerTensor, lr_) - rho_ = convert(tf.EagerTensor, rho_) - momentum_ = convert(tf.EagerTensor, momentum_) - epsilon_ = convert(tf.EagerTensor, epsilon_) - grad_ = convert(tf.EagerTensor, grad_) - tf.add_input(desc, var_) - tf.add_input(desc, ms_) - tf.add_input(desc, mom_) - tf.add_input(desc, lr_) - tf.add_input(desc, rho_) - tf.add_input(desc, momentum_) - tf.add_input(desc, epsilon_) - tf.add_input(desc, grad_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - desc["T"] = tf.data_type(var_) - desc["T"] = tf.data_type(ms_) - desc["T"] = tf.data_type(mom_) - desc["T"] = tf.data_type(lr_) - desc["T"] = tf.data_type(rho_) - desc["T"] = tf.data_type(momentum_) - desc["T"] = tf.data_type(epsilon_) - desc["T"] = tf.data_type(grad_) - res = tf.execute(desc) - node = tf.TapeNode(apply_rms_prop, [var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_], name=nothing, use_locking=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function apply_rms_prop(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=nothing, use_locking=nothing) - if tf.in_eager_mode() - apply_rms_prop_eager(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=name, use_locking=use_locking) - else - apply_rms_prop_graph(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=name, use_locking=use_locking) + begin + function apply_rms_prop_graph(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "ApplyRMSProp") do + desc = tf.NodeDescription("ApplyRMSProp") + begin + begin + var_ = convert(Tensor{Any}, var_) + begin + end + end + begin + ms_ = convert(Tensor{Any}, ms_) + begin + end + end + begin + mom_ = convert(Tensor{Any}, mom_) + begin + end + end + begin + lr_ = convert(Tensor{Any}, lr_) + begin + end + end + begin + rho_ = convert(Tensor{Any}, rho_) + begin + end + end + begin + momentum_ = convert(Tensor{Any}, momentum_) + begin + end + end + begin + epsilon_ = convert(Tensor{Any}, epsilon_) + begin + end + end + begin + grad_ = convert(Tensor{Any}, grad_) + begin + end + end + begin + (var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_) = tf.tf_promote(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_) + end + end + begin + begin + tf.add_input(desc, var_) + end + begin + tf.add_input(desc, ms_) + end + begin + tf.add_input(desc, mom_) + end + begin + tf.add_input(desc, lr_) + end + begin + tf.add_input(desc, rho_) + end + begin + tf.add_input(desc, momentum_) + end + begin + tf.add_input(desc, epsilon_) + end + begin + tf.add_input(desc, grad_) + end + end + begin + begin + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function apply_rms_prop_eager(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=nothing, use_locking=nothing) + desc = tf.EagerOp("ApplyRMSProp") + var_ = convert(tf.EagerTensor, var_) + ms_ = convert(tf.EagerTensor, ms_) + mom_ = convert(tf.EagerTensor, mom_) + lr_ = convert(tf.EagerTensor, lr_) + rho_ = convert(tf.EagerTensor, rho_) + momentum_ = convert(tf.EagerTensor, momentum_) + epsilon_ = convert(tf.EagerTensor, epsilon_) + grad_ = convert(tf.EagerTensor, grad_) + begin + begin + tf.add_input(desc, var_) + end + begin + tf.add_input(desc, ms_) + end + begin + tf.add_input(desc, mom_) + end + begin + tf.add_input(desc, lr_) + end + begin + tf.add_input(desc, rho_) + end + begin + tf.add_input(desc, momentum_) + end + begin + tf.add_input(desc, epsilon_) + end + begin + tf.add_input(desc, grad_) + end + end + begin + begin + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + end + begin + desc["T"] = tf.data_type(var_) + end + begin + desc["T"] = tf.data_type(ms_) + end + begin + desc["T"] = tf.data_type(mom_) + end + begin + desc["T"] = tf.data_type(lr_) + end + begin + desc["T"] = tf.data_type(rho_) + end + begin + desc["T"] = tf.data_type(momentum_) + end + begin + desc["T"] = tf.data_type(epsilon_) + end + begin + desc["T"] = tf.data_type(grad_) + end + res = tf.execute(desc) + node = tf.TapeNode(apply_rms_prop, [var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_], name=nothing, use_locking=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function apply_rms_prop(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=nothing, use_locking=nothing) + if tf.in_eager_mode() + apply_rms_prop_eager(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=name, use_locking=use_locking) + else + apply_rms_prop_graph(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=name, use_locking=use_locking) + end end - end + end end @@ -24966,37 +45290,69 @@ end """ begin - function adjust_saturation_graph(images_, scale_; name=nothing) - local desc - tf.with_op_name(name, "AdjustSaturation") do - desc = tf.NodeDescription("AdjustSaturation") - images_ = convert(Tensor{Float32}, images_) - scale_ = convert(Tensor{Float32}, scale_) - tf.add_input(desc, images_) - tf.add_input(desc, scale_) + begin + function adjust_saturation_graph(images_, scale_; name=nothing) + local desc + tf.with_op_name(name, "AdjustSaturation") do + desc = tf.NodeDescription("AdjustSaturation") + begin + begin + images_ = convert(Tensor{Float32}, images_) + begin + end + end + begin + scale_ = convert(Tensor{Float32}, scale_) + begin + end + end + end + begin + begin + tf.add_input(desc, images_) + end + begin + tf.add_input(desc, scale_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function adjust_saturation_eager(images_, scale_; name=nothing) - desc = tf.EagerOp("AdjustSaturation") - images_ = convert(tf.EagerTensor, images_) - scale_ = convert(tf.EagerTensor, scale_) - tf.add_input(desc, images_) - tf.add_input(desc, scale_) - res = tf.execute(desc) - node = tf.TapeNode(adjust_saturation, [images_, scale_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function adjust_saturation_eager(images_, scale_; name=nothing) + desc = tf.EagerOp("AdjustSaturation") + images_ = convert(tf.EagerTensor, images_) + scale_ = convert(tf.EagerTensor, scale_) + begin + begin + tf.add_input(desc, images_) + end + begin + tf.add_input(desc, scale_) + end + end + begin + end + res = tf.execute(desc) + node = tf.TapeNode(adjust_saturation, [images_, scale_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function adjust_saturation(images_, scale_; name=nothing) - if tf.in_eager_mode() - adjust_saturation_eager(images_, scale_; name=name) - else - adjust_saturation_graph(images_, scale_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function adjust_saturation(images_, scale_; name=nothing) + if tf.in_eager_mode() + adjust_saturation_eager(images_, scale_; name=name) + else + adjust_saturation_graph(images_, scale_; name=name) + end end - end + end end @@ -25006,39 +45362,75 @@ end """ begin - function lookup_table_remove_v2_graph(table_handle_, keys_; name=nothing) - local desc - tf.with_op_name(name, "LookupTableRemoveV2") do - desc = tf.NodeDescription("LookupTableRemoveV2") - table_handle_ = convert(Tensor{Any}, table_handle_) - keys_ = convert(Tensor{Any}, keys_) - (keys_,) = tf.tf_promote(keys_) - tf.add_input(desc, table_handle_) - tf.add_input(desc, keys_) + begin + function lookup_table_remove_v2_graph(table_handle_, keys_; name=nothing) + local desc + tf.with_op_name(name, "LookupTableRemoveV2") do + desc = tf.NodeDescription("LookupTableRemoveV2") + begin + begin + table_handle_ = convert(Tensor{Any}, table_handle_) + begin + end + end + begin + keys_ = convert(Tensor{Any}, keys_) + begin + end + end + begin + (keys_,) = tf.tf_promote(keys_) + end + end + begin + begin + tf.add_input(desc, table_handle_) + end + begin + tf.add_input(desc, keys_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function lookup_table_remove_v2_eager(table_handle_, keys_; name=nothing) - desc = tf.EagerOp("LookupTableRemoveV2") - table_handle_ = convert(tf.EagerTensor, table_handle_) - keys_ = convert(tf.EagerTensor, keys_) - tf.add_input(desc, table_handle_) - tf.add_input(desc, keys_) - desc["Tin"] = tf.data_type(keys_) - res = tf.execute(desc) - node = tf.TapeNode(lookup_table_remove_v2, [table_handle_, keys_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function lookup_table_remove_v2_eager(table_handle_, keys_; name=nothing) + desc = tf.EagerOp("LookupTableRemoveV2") + table_handle_ = convert(tf.EagerTensor, table_handle_) + keys_ = convert(tf.EagerTensor, keys_) + begin + begin + tf.add_input(desc, table_handle_) + end + begin + tf.add_input(desc, keys_) + end + end + begin + end + begin + desc["Tin"] = tf.data_type(keys_) + end + res = tf.execute(desc) + node = tf.TapeNode(lookup_table_remove_v2, [table_handle_, keys_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function lookup_table_remove_v2(table_handle_, keys_; name=nothing) - if tf.in_eager_mode() - lookup_table_remove_v2_eager(table_handle_, keys_; name=name) - else - lookup_table_remove_v2_graph(table_handle_, keys_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function lookup_table_remove_v2(table_handle_, keys_; name=nothing) + if tf.in_eager_mode() + lookup_table_remove_v2_eager(table_handle_, keys_; name=name) + else + lookup_table_remove_v2_graph(table_handle_, keys_; name=name) + end end - end + end end @@ -25048,39 +45440,67 @@ end """ begin - function queue_close_graph(handle_; name=nothing, cancel_pending_enqueues=nothing) - local desc - tf.with_op_name(name, "QueueClose") do - desc = tf.NodeDescription("QueueClose") - handle_ = convert(Tensor{String}, handle_) - tf.add_input(desc, handle_) - if cancel_pending_enqueues !== nothing - desc["cancel_pending_enqueues"] = Base.Bool(cancel_pending_enqueues) + begin + function queue_close_graph(handle_; name=nothing, cancel_pending_enqueues=nothing) + local desc + tf.with_op_name(name, "QueueClose") do + desc = tf.NodeDescription("QueueClose") + begin + begin + handle_ = convert(Tensor{String}, handle_) + begin + end + end + end + begin + begin + tf.add_input(desc, handle_) + end + end + begin + begin + if cancel_pending_enqueues !== nothing + desc["cancel_pending_enqueues"] = Base.Bool(cancel_pending_enqueues) + end + end + end end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function queue_close_eager(handle_; name=nothing, cancel_pending_enqueues=nothing) - desc = tf.EagerOp("QueueClose") - handle_ = convert(tf.EagerTensor, handle_) - tf.add_input(desc, handle_) - if cancel_pending_enqueues !== nothing - desc["cancel_pending_enqueues"] = Base.Bool(cancel_pending_enqueues) - end - res = tf.execute(desc) - node = tf.TapeNode(queue_close, [handle_], name=nothing, cancel_pending_enqueues=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function queue_close_eager(handle_; name=nothing, cancel_pending_enqueues=nothing) + desc = tf.EagerOp("QueueClose") + handle_ = convert(tf.EagerTensor, handle_) + begin + begin + tf.add_input(desc, handle_) + end + end + begin + begin + if cancel_pending_enqueues !== nothing + desc["cancel_pending_enqueues"] = Base.Bool(cancel_pending_enqueues) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(queue_close, [handle_], name=nothing, cancel_pending_enqueues=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function queue_close(handle_; name=nothing, cancel_pending_enqueues=nothing) - if tf.in_eager_mode() - queue_close_eager(handle_; name=name, cancel_pending_enqueues=cancel_pending_enqueues) - else - queue_close_graph(handle_; name=name, cancel_pending_enqueues=cancel_pending_enqueues) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function queue_close(handle_; name=nothing, cancel_pending_enqueues=nothing) + if tf.in_eager_mode() + queue_close_eager(handle_; name=name, cancel_pending_enqueues=cancel_pending_enqueues) + else + queue_close_graph(handle_; name=name, cancel_pending_enqueues=cancel_pending_enqueues) + end end - end + end end @@ -25090,49 +45510,89 @@ end """ begin - function prefetch_dataset_graph(input_dataset_, buffer_size_; name=nothing, output_types=nothing, output_shapes=nothing) - local desc - tf.with_op_name(name, "PrefetchDataset") do - desc = tf.NodeDescription("PrefetchDataset") - input_dataset_ = convert(Tensor{Any}, input_dataset_) - buffer_size_ = convert(Tensor{Int64}, buffer_size_) - tf.add_input(desc, input_dataset_) - tf.add_input(desc, buffer_size_) - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) + begin + function prefetch_dataset_graph(input_dataset_, buffer_size_; name=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "PrefetchDataset") do + desc = tf.NodeDescription("PrefetchDataset") + begin + begin + input_dataset_ = convert(Tensor{Any}, input_dataset_) + begin + end + end + begin + buffer_size_ = convert(Tensor{Int64}, buffer_size_) + begin + end + end + end + begin + begin + tf.add_input(desc, input_dataset_) + end + begin + tf.add_input(desc, buffer_size_) + end + end + begin + begin + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + end + begin + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function prefetch_dataset_eager(input_dataset_, buffer_size_; name=nothing, output_types=nothing, output_shapes=nothing) + desc = tf.EagerOp("PrefetchDataset") + input_dataset_ = convert(tf.EagerTensor, input_dataset_) + buffer_size_ = convert(tf.EagerTensor, buffer_size_) + begin + begin + tf.add_input(desc, input_dataset_) + end + begin + tf.add_input(desc, buffer_size_) + end + end + begin + begin + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + end + begin + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(prefetch_dataset, [input_dataset_, buffer_size_], name=nothing, output_types=nothing, output_shapes=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function prefetch_dataset(input_dataset_, buffer_size_; name=nothing, output_types=nothing, output_shapes=nothing) + if tf.in_eager_mode() + prefetch_dataset_eager(input_dataset_, buffer_size_; name=name, output_types=output_types, output_shapes=output_shapes) + else + prefetch_dataset_graph(input_dataset_, buffer_size_; name=name, output_types=output_types, output_shapes=output_shapes) + end end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - end - tf.Tensor(tf.Operation(desc)) - end - function prefetch_dataset_eager(input_dataset_, buffer_size_; name=nothing, output_types=nothing, output_shapes=nothing) - desc = tf.EagerOp("PrefetchDataset") - input_dataset_ = convert(tf.EagerTensor, input_dataset_) - buffer_size_ = convert(tf.EagerTensor, buffer_size_) - tf.add_input(desc, input_dataset_) - tf.add_input(desc, buffer_size_) - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - res = tf.execute(desc) - node = tf.TapeNode(prefetch_dataset, [input_dataset_, buffer_size_], name=nothing, output_types=nothing, output_shapes=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function prefetch_dataset(input_dataset_, buffer_size_; name=nothing, output_types=nothing, output_shapes=nothing) - if tf.in_eager_mode() - prefetch_dataset_eager(input_dataset_, buffer_size_; name=name, output_types=output_types, output_shapes=output_shapes) - else - prefetch_dataset_graph(input_dataset_, buffer_size_; name=name, output_types=output_types, output_shapes=output_shapes) - end - end end @@ -25142,73 +45602,129 @@ end """ begin - function map_dataset_graph(input_dataset_, other_arguments_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing, preserve_cardinality=nothing) - local desc - tf.with_op_name(name, "MapDataset") do - desc = tf.NodeDescription("MapDataset") - input_dataset_ = convert(Tensor{Any}, input_dataset_) - other_arguments_ = [convert(Tensor{Any}, x) for x = other_arguments_] - tf.add_input(desc, input_dataset_) - tf.add_input(desc, other_arguments_) - if f !== nothing - desc["f"] = Base.identity(f) - end - if Targuments !== nothing - desc["Targuments"] = map(Base.identity, Targuments) - end - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) + begin + function map_dataset_graph(input_dataset_, other_arguments_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing, preserve_cardinality=nothing) + local desc + tf.with_op_name(name, "MapDataset") do + desc = tf.NodeDescription("MapDataset") + begin + begin + input_dataset_ = convert(Tensor{Any}, input_dataset_) + begin + end + end + begin + other_arguments_ = [convert(Tensor{Any}, x) for x = other_arguments_] + begin + end + end + end + begin + begin + tf.add_input(desc, input_dataset_) + end + begin + tf.add_input(desc, other_arguments_) + end + end + begin + begin + if f !== nothing + desc["f"] = Base.identity(f) + end + end + begin + if Targuments !== nothing + desc["Targuments"] = map(Base.identity, Targuments) + end + end + begin + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + end + begin + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + begin + if use_inter_op_parallelism !== nothing + desc["use_inter_op_parallelism"] = Base.Bool(use_inter_op_parallelism) + end + end + begin + if preserve_cardinality !== nothing + desc["preserve_cardinality"] = Base.Bool(preserve_cardinality) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function map_dataset_eager(input_dataset_, other_arguments_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing, preserve_cardinality=nothing) + desc = tf.EagerOp("MapDataset") + input_dataset_ = convert(tf.EagerTensor, input_dataset_) + other_arguments_ = convert(tf.EagerTensor, other_arguments_) + begin + begin + tf.add_input(desc, input_dataset_) + end + begin + tf.add_input(desc, other_arguments_) + end + end + begin + begin + if f !== nothing + desc["f"] = Base.identity(f) + end + end + begin + if Targuments !== nothing + desc["Targuments"] = map(Base.identity, Targuments) + end + end + begin + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + end + begin + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + begin + if use_inter_op_parallelism !== nothing + desc["use_inter_op_parallelism"] = Base.Bool(use_inter_op_parallelism) + end + end + begin + if preserve_cardinality !== nothing + desc["preserve_cardinality"] = Base.Bool(preserve_cardinality) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(map_dataset, [input_dataset_, other_arguments_], name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing, preserve_cardinality=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function map_dataset(input_dataset_, other_arguments_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing, preserve_cardinality=nothing) + if tf.in_eager_mode() + map_dataset_eager(input_dataset_, other_arguments_; name=name, f=f, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes, use_inter_op_parallelism=use_inter_op_parallelism, preserve_cardinality=preserve_cardinality) + else + map_dataset_graph(input_dataset_, other_arguments_; name=name, f=f, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes, use_inter_op_parallelism=use_inter_op_parallelism, preserve_cardinality=preserve_cardinality) + end end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - if use_inter_op_parallelism !== nothing - desc["use_inter_op_parallelism"] = Base.Bool(use_inter_op_parallelism) - end - if preserve_cardinality !== nothing - desc["preserve_cardinality"] = Base.Bool(preserve_cardinality) - end - end - tf.Tensor(tf.Operation(desc)) end - function map_dataset_eager(input_dataset_, other_arguments_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing, preserve_cardinality=nothing) - desc = tf.EagerOp("MapDataset") - input_dataset_ = convert(tf.EagerTensor, input_dataset_) - other_arguments_ = convert(tf.EagerTensor, other_arguments_) - tf.add_input(desc, input_dataset_) - tf.add_input(desc, other_arguments_) - if f !== nothing - desc["f"] = Base.identity(f) - end - if Targuments !== nothing - desc["Targuments"] = map(Base.identity, Targuments) - end - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - if use_inter_op_parallelism !== nothing - desc["use_inter_op_parallelism"] = Base.Bool(use_inter_op_parallelism) - end - if preserve_cardinality !== nothing - desc["preserve_cardinality"] = Base.Bool(preserve_cardinality) - end - res = tf.execute(desc) - node = tf.TapeNode(map_dataset, [input_dataset_, other_arguments_], name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing, preserve_cardinality=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function map_dataset(input_dataset_, other_arguments_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, use_inter_op_parallelism=nothing, preserve_cardinality=nothing) - if tf.in_eager_mode() - map_dataset_eager(input_dataset_, other_arguments_; name=name, f=f, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes, use_inter_op_parallelism=use_inter_op_parallelism, preserve_cardinality=preserve_cardinality) - else - map_dataset_graph(input_dataset_, other_arguments_; name=name, f=f, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes, use_inter_op_parallelism=use_inter_op_parallelism, preserve_cardinality=preserve_cardinality) - end - end end @@ -25218,47 +45734,91 @@ end """ begin - function tensor_array_read_v3_graph(handle_, index_, flow_in_; name=nothing, dtype=nothing) - local desc - tf.with_op_name(name, "TensorArrayReadV3") do - desc = tf.NodeDescription("TensorArrayReadV3") - handle_ = convert(Tensor{Any}, handle_) - index_ = convert(Tensor{Int32}, index_) - flow_in_ = convert(Tensor{Float32}, flow_in_) - tf.add_input(desc, handle_) - tf.add_input(desc, index_) - tf.add_input(desc, flow_in_) - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) + begin + function tensor_array_read_v3_graph(handle_, index_, flow_in_; name=nothing, dtype=nothing) + local desc + tf.with_op_name(name, "TensorArrayReadV3") do + desc = tf.NodeDescription("TensorArrayReadV3") + begin + begin + handle_ = convert(Tensor{Any}, handle_) + begin + end + end + begin + index_ = convert(Tensor{Int32}, index_) + begin + end + end + begin + flow_in_ = convert(Tensor{Float32}, flow_in_) + begin + end + end + end + begin + begin + tf.add_input(desc, handle_) + end + begin + tf.add_input(desc, index_) + end + begin + tf.add_input(desc, flow_in_) + end + end + begin + begin + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function tensor_array_read_v3_eager(handle_, index_, flow_in_; name=nothing, dtype=nothing) + desc = tf.EagerOp("TensorArrayReadV3") + handle_ = convert(tf.EagerTensor, handle_) + index_ = convert(tf.EagerTensor, index_) + flow_in_ = convert(tf.EagerTensor, flow_in_) + begin + begin + tf.add_input(desc, handle_) + end + begin + tf.add_input(desc, index_) + end + begin + tf.add_input(desc, flow_in_) + end + end + begin + begin + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(tensor_array_read_v3, [handle_, index_, flow_in_], name=nothing, dtype=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_array_read_v3(handle_, index_, flow_in_; name=nothing, dtype=nothing) + if tf.in_eager_mode() + tensor_array_read_v3_eager(handle_, index_, flow_in_; name=name, dtype=dtype) + else + tensor_array_read_v3_graph(handle_, index_, flow_in_; name=name, dtype=dtype) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function tensor_array_read_v3_eager(handle_, index_, flow_in_; name=nothing, dtype=nothing) - desc = tf.EagerOp("TensorArrayReadV3") - handle_ = convert(tf.EagerTensor, handle_) - index_ = convert(tf.EagerTensor, index_) - flow_in_ = convert(tf.EagerTensor, flow_in_) - tf.add_input(desc, handle_) - tf.add_input(desc, index_) - tf.add_input(desc, flow_in_) - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end - res = tf.execute(desc) - node = tf.TapeNode(tensor_array_read_v3, [handle_, index_, flow_in_], name=nothing, dtype=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_array_read_v3(handle_, index_, flow_in_; name=nothing, dtype=nothing) - if tf.in_eager_mode() - tensor_array_read_v3_eager(handle_, index_, flow_in_; name=name, dtype=dtype) - else - tensor_array_read_v3_graph(handle_, index_, flow_in_; name=name, dtype=dtype) - end - end end @@ -25268,35 +45828,63 @@ end """ begin - function identity_graph(input_; name=nothing) - local desc - tf.with_op_name(name, "Identity") do - desc = tf.NodeDescription("Identity") - input_ = convert(Tensor{Any}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) + begin + function identity_graph(input_; name=nothing) + local desc + tf.with_op_name(name, "Identity") do + desc = tf.NodeDescription("Identity") + begin + begin + input_ = convert(Tensor{Any}, input_) + begin + end + end + begin + (input_,) = tf.tf_promote(input_) + end + end + begin + begin + tf.add_input(desc, input_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function identity_eager(input_; name=nothing) - desc = tf.EagerOp("Identity") - input_ = convert(tf.EagerTensor, input_) - tf.add_input(desc, input_) - desc["T"] = tf.data_type(input_) - res = tf.execute(desc) - node = tf.TapeNode(identity, [input_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function identity_eager(input_; name=nothing) + desc = tf.EagerOp("Identity") + input_ = convert(tf.EagerTensor, input_) + begin + begin + tf.add_input(desc, input_) + end + end + begin + end + begin + desc["T"] = tf.data_type(input_) + end + res = tf.execute(desc) + node = tf.TapeNode(identity, [input_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function identity(input_; name=nothing) - if tf.in_eager_mode() - identity_eager(input_; name=name) - else - identity_graph(input_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function identity(input_; name=nothing) + if tf.in_eager_mode() + identity_eager(input_; name=name) + else + identity_graph(input_; name=name) + end end - end + end end @@ -25306,63 +45894,115 @@ end """ begin - function print_graph(input_, data_; name=nothing, U=nothing, message=nothing, first_n=nothing, summarize=nothing) - local desc - tf.with_op_name(name, "Print") do - desc = tf.NodeDescription("Print") - input_ = convert(Tensor{Any}, input_) - data_ = [convert(Tensor{Any}, x) for x = data_] - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - tf.add_input(desc, data_) - if U !== nothing - desc["U"] = map(Base.identity, U) - end - if message !== nothing - desc["message"] = Base.String(message) + begin + function print_graph(input_, data_; name=nothing, U=nothing, message=nothing, first_n=nothing, summarize=nothing) + local desc + tf.with_op_name(name, "Print") do + desc = tf.NodeDescription("Print") + begin + begin + input_ = convert(Tensor{Any}, input_) + begin + end + end + begin + data_ = [convert(Tensor{Any}, x) for x = data_] + begin + end + end + begin + (input_,) = tf.tf_promote(input_) + end + end + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, data_) + end + end + begin + begin + if U !== nothing + desc["U"] = map(Base.identity, U) + end + end + begin + if message !== nothing + desc["message"] = Base.String(message) + end + end + begin + if first_n !== nothing + desc["first_n"] = Base.Int(first_n) + end + end + begin + if summarize !== nothing + desc["summarize"] = Base.Int(summarize) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function print_eager(input_, data_; name=nothing, U=nothing, message=nothing, first_n=nothing, summarize=nothing) + desc = tf.EagerOp("Print") + input_ = convert(tf.EagerTensor, input_) + data_ = convert(tf.EagerTensor, data_) + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, data_) + end + end + begin + begin + if U !== nothing + desc["U"] = map(Base.identity, U) + end + end + begin + if message !== nothing + desc["message"] = Base.String(message) + end + end + begin + if first_n !== nothing + desc["first_n"] = Base.Int(first_n) + end + end + begin + if summarize !== nothing + desc["summarize"] = Base.Int(summarize) + end + end + end + begin + desc["T"] = tf.data_type(input_) + end + res = tf.execute(desc) + node = tf.TapeNode(print, [input_, data_], name=nothing, U=nothing, message=nothing, first_n=nothing, summarize=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function print(input_, data_; name=nothing, U=nothing, message=nothing, first_n=nothing, summarize=nothing) + if tf.in_eager_mode() + print_eager(input_, data_; name=name, U=U, message=message, first_n=first_n, summarize=summarize) + else + print_graph(input_, data_; name=name, U=U, message=message, first_n=first_n, summarize=summarize) + end end - if first_n !== nothing - desc["first_n"] = Base.Int(first_n) - end - if summarize !== nothing - desc["summarize"] = Base.Int(summarize) - end - end - tf.Tensor(tf.Operation(desc)) - end - function print_eager(input_, data_; name=nothing, U=nothing, message=nothing, first_n=nothing, summarize=nothing) - desc = tf.EagerOp("Print") - input_ = convert(tf.EagerTensor, input_) - data_ = convert(tf.EagerTensor, data_) - tf.add_input(desc, input_) - tf.add_input(desc, data_) - if U !== nothing - desc["U"] = map(Base.identity, U) - end - if message !== nothing - desc["message"] = Base.String(message) - end - if first_n !== nothing - desc["first_n"] = Base.Int(first_n) - end - if summarize !== nothing - desc["summarize"] = Base.Int(summarize) - end - desc["T"] = tf.data_type(input_) - res = tf.execute(desc) - node = tf.TapeNode(print, [input_, data_], name=nothing, U=nothing, message=nothing, first_n=nothing, summarize=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function print(input_, data_; name=nothing, U=nothing, message=nothing, first_n=nothing, summarize=nothing) - if tf.in_eager_mode() - print_eager(input_, data_; name=name, U=U, message=message, first_n=first_n, summarize=summarize) - else - print_graph(input_, data_; name=name, U=U, message=message, first_n=first_n, summarize=summarize) - end - end end @@ -25372,59 +46012,103 @@ end """ begin - function collective_bcast_send_graph(input_; name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, shape=nothing) - local desc - tf.with_op_name(name, "CollectiveBcastSend") do - desc = tf.NodeDescription("CollectiveBcastSend") - input_ = convert(Tensor{Any}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - if group_size !== nothing - desc["group_size"] = Base.Int(group_size) - end - if group_key !== nothing - desc["group_key"] = Base.Int(group_key) + begin + function collective_bcast_send_graph(input_; name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, shape=nothing) + local desc + tf.with_op_name(name, "CollectiveBcastSend") do + desc = tf.NodeDescription("CollectiveBcastSend") + begin + begin + input_ = convert(Tensor{Any}, input_) + begin + end + end + begin + (input_,) = tf.tf_promote(input_) + end + end + begin + begin + tf.add_input(desc, input_) + end + end + begin + begin + if group_size !== nothing + desc["group_size"] = Base.Int(group_size) + end + end + begin + if group_key !== nothing + desc["group_key"] = Base.Int(group_key) + end + end + begin + if instance_key !== nothing + desc["instance_key"] = Base.Int(instance_key) + end + end + begin + if shape !== nothing + desc["shape"] = Base.identity(shape) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function collective_bcast_send_eager(input_; name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, shape=nothing) + desc = tf.EagerOp("CollectiveBcastSend") + input_ = convert(tf.EagerTensor, input_) + begin + begin + tf.add_input(desc, input_) + end + end + begin + begin + if group_size !== nothing + desc["group_size"] = Base.Int(group_size) + end + end + begin + if group_key !== nothing + desc["group_key"] = Base.Int(group_key) + end + end + begin + if instance_key !== nothing + desc["instance_key"] = Base.Int(instance_key) + end + end + begin + if shape !== nothing + desc["shape"] = Base.identity(shape) + end + end + end + begin + desc["T"] = tf.data_type(input_) + end + res = tf.execute(desc) + node = tf.TapeNode(collective_bcast_send, [input_], name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, shape=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function collective_bcast_send(input_; name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, shape=nothing) + if tf.in_eager_mode() + collective_bcast_send_eager(input_; name=name, group_size=group_size, group_key=group_key, instance_key=instance_key, shape=shape) + else + collective_bcast_send_graph(input_; name=name, group_size=group_size, group_key=group_key, instance_key=instance_key, shape=shape) + end end - if instance_key !== nothing - desc["instance_key"] = Base.Int(instance_key) - end - if shape !== nothing - desc["shape"] = Base.identity(shape) - end - end - tf.Tensor(tf.Operation(desc)) - end - function collective_bcast_send_eager(input_; name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, shape=nothing) - desc = tf.EagerOp("CollectiveBcastSend") - input_ = convert(tf.EagerTensor, input_) - tf.add_input(desc, input_) - if group_size !== nothing - desc["group_size"] = Base.Int(group_size) - end - if group_key !== nothing - desc["group_key"] = Base.Int(group_key) - end - if instance_key !== nothing - desc["instance_key"] = Base.Int(instance_key) - end - if shape !== nothing - desc["shape"] = Base.identity(shape) - end - desc["T"] = tf.data_type(input_) - res = tf.execute(desc) - node = tf.TapeNode(collective_bcast_send, [input_], name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, shape=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function collective_bcast_send(input_; name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, shape=nothing) - if tf.in_eager_mode() - collective_bcast_send_eager(input_; name=name, group_size=group_size, group_key=group_key, instance_key=instance_key, shape=shape) - else - collective_bcast_send_graph(input_; name=name, group_size=group_size, group_key=group_key, instance_key=instance_key, shape=shape) - end - end end @@ -25434,50 +46118,84 @@ end Converts a list of tensors to an array of tensors. """ begin - function _list_to_array_graph(input_; name=nothing, Tin=nothing, N=nothing) - local desc - tf.with_op_name(name, "_ListToArray") do - desc = tf.NodeDescription("_ListToArray") - input_ = [convert(Tensor{Any}, x) for x = input_] - tf.add_input(desc, input_) - if Tin !== nothing - desc["Tin"] = map(Base.identity, Tin) - end - if N !== nothing - desc["N"] = Base.Int(N) + begin + function _list_to_array_graph(input_; name=nothing, Tin=nothing, N=nothing) + local desc + tf.with_op_name(name, "_ListToArray") do + desc = tf.NodeDescription("_ListToArray") + begin + begin + input_ = [convert(Tensor{Any}, x) for x = input_] + begin + end + end + end + begin + begin + tf.add_input(desc, input_) + end + end + begin + begin + if Tin !== nothing + desc["Tin"] = map(Base.identity, Tin) + end + end + begin + if N !== nothing + desc["N"] = Base.Int(N) + end + end + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:N + push!(out, tf.Tensor(op, out_idx)) + end + out + end + end + end + begin + function _list_to_array_eager(input_; name=nothing, Tin=nothing, N=nothing) + desc = tf.EagerOp("_ListToArray") + input_ = convert(tf.EagerTensor, input_) + begin + begin + tf.add_input(desc, input_) + end + end + begin + begin + if Tin !== nothing + desc["Tin"] = map(Base.identity, Tin) + end + end + begin + if N !== nothing + desc["N"] = Base.Int(N) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(_list_to_array, [input_], name=nothing, Tin=nothing, N=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _list_to_array(input_; name=nothing, Tin=nothing, N=nothing) + if tf.in_eager_mode() + _list_to_array_eager(input_; name=name, Tin=Tin, N=N) + else + _list_to_array_graph(input_; name=name, Tin=Tin, N=N) + end end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:N - push!(out, tf.Tensor(op, out_idx)) - end - out - end - function _list_to_array_eager(input_; name=nothing, Tin=nothing, N=nothing) - desc = tf.EagerOp("_ListToArray") - input_ = convert(tf.EagerTensor, input_) - tf.add_input(desc, input_) - if Tin !== nothing - desc["Tin"] = map(Base.identity, Tin) - end - if N !== nothing - desc["N"] = Base.Int(N) - end - res = tf.execute(desc) - node = tf.TapeNode(_list_to_array, [input_], name=nothing, Tin=nothing, N=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _list_to_array(input_; name=nothing, Tin=nothing, N=nothing) - if tf.in_eager_mode() - _list_to_array_eager(input_; name=name, Tin=Tin, N=N) - else - _list_to_array_graph(input_; name=name, Tin=Tin, N=N) - end - end end @@ -25487,61 +46205,125 @@ end """ begin - function neg_train_graph(w_in_, w_out_, examples_, labels_, lr_; name=nothing, vocab_count=nothing, num_negative_samples=nothing) - local desc - tf.with_op_name(name, "NegTrain") do - desc = tf.NodeDescription("NegTrain") - w_in_ = convert(Tensor{Float32}, w_in_) - w_out_ = convert(Tensor{Float32}, w_out_) - examples_ = convert(Tensor{Int32}, examples_) - labels_ = convert(Tensor{Int32}, labels_) - lr_ = convert(Tensor{Float32}, lr_) - tf.add_input(desc, w_in_) - tf.add_input(desc, w_out_) - tf.add_input(desc, examples_) - tf.add_input(desc, labels_) - tf.add_input(desc, lr_) - if vocab_count !== nothing - desc["vocab_count"] = map(Base.identity, vocab_count) + begin + function neg_train_graph(w_in_, w_out_, examples_, labels_, lr_; name=nothing, vocab_count=nothing, num_negative_samples=nothing) + local desc + tf.with_op_name(name, "NegTrain") do + desc = tf.NodeDescription("NegTrain") + begin + begin + w_in_ = convert(Tensor{Float32}, w_in_) + begin + end + end + begin + w_out_ = convert(Tensor{Float32}, w_out_) + begin + end + end + begin + examples_ = convert(Tensor{Int32}, examples_) + begin + end + end + begin + labels_ = convert(Tensor{Int32}, labels_) + begin + end + end + begin + lr_ = convert(Tensor{Float32}, lr_) + begin + end + end + end + begin + begin + tf.add_input(desc, w_in_) + end + begin + tf.add_input(desc, w_out_) + end + begin + tf.add_input(desc, examples_) + end + begin + tf.add_input(desc, labels_) + end + begin + tf.add_input(desc, lr_) + end + end + begin + begin + if vocab_count !== nothing + desc["vocab_count"] = map(Base.identity, vocab_count) + end + end + begin + if num_negative_samples !== nothing + desc["num_negative_samples"] = Base.Int(num_negative_samples) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function neg_train_eager(w_in_, w_out_, examples_, labels_, lr_; name=nothing, vocab_count=nothing, num_negative_samples=nothing) + desc = tf.EagerOp("NegTrain") + w_in_ = convert(tf.EagerTensor, w_in_) + w_out_ = convert(tf.EagerTensor, w_out_) + examples_ = convert(tf.EagerTensor, examples_) + labels_ = convert(tf.EagerTensor, labels_) + lr_ = convert(tf.EagerTensor, lr_) + begin + begin + tf.add_input(desc, w_in_) + end + begin + tf.add_input(desc, w_out_) + end + begin + tf.add_input(desc, examples_) + end + begin + tf.add_input(desc, labels_) + end + begin + tf.add_input(desc, lr_) + end + end + begin + begin + if vocab_count !== nothing + desc["vocab_count"] = map(Base.identity, vocab_count) + end + end + begin + if num_negative_samples !== nothing + desc["num_negative_samples"] = Base.Int(num_negative_samples) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(neg_train, [w_in_, w_out_, examples_, labels_, lr_], name=nothing, vocab_count=nothing, num_negative_samples=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function neg_train(w_in_, w_out_, examples_, labels_, lr_; name=nothing, vocab_count=nothing, num_negative_samples=nothing) + if tf.in_eager_mode() + neg_train_eager(w_in_, w_out_, examples_, labels_, lr_; name=name, vocab_count=vocab_count, num_negative_samples=num_negative_samples) + else + neg_train_graph(w_in_, w_out_, examples_, labels_, lr_; name=name, vocab_count=vocab_count, num_negative_samples=num_negative_samples) + end end - if num_negative_samples !== nothing - desc["num_negative_samples"] = Base.Int(num_negative_samples) - end - end - tf.Tensor(tf.Operation(desc)) - end - function neg_train_eager(w_in_, w_out_, examples_, labels_, lr_; name=nothing, vocab_count=nothing, num_negative_samples=nothing) - desc = tf.EagerOp("NegTrain") - w_in_ = convert(tf.EagerTensor, w_in_) - w_out_ = convert(tf.EagerTensor, w_out_) - examples_ = convert(tf.EagerTensor, examples_) - labels_ = convert(tf.EagerTensor, labels_) - lr_ = convert(tf.EagerTensor, lr_) - tf.add_input(desc, w_in_) - tf.add_input(desc, w_out_) - tf.add_input(desc, examples_) - tf.add_input(desc, labels_) - tf.add_input(desc, lr_) - if vocab_count !== nothing - desc["vocab_count"] = map(Base.identity, vocab_count) - end - if num_negative_samples !== nothing - desc["num_negative_samples"] = Base.Int(num_negative_samples) - end - res = tf.execute(desc) - node = tf.TapeNode(neg_train, [w_in_, w_out_, examples_, labels_, lr_], name=nothing, vocab_count=nothing, num_negative_samples=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function neg_train(w_in_, w_out_, examples_, labels_, lr_; name=nothing, vocab_count=nothing, num_negative_samples=nothing) - if tf.in_eager_mode() - neg_train_eager(w_in_, w_out_, examples_, labels_, lr_; name=name, vocab_count=vocab_count, num_negative_samples=num_negative_samples) - else - neg_train_graph(w_in_, w_out_, examples_, labels_, lr_; name=name, vocab_count=vocab_count, num_negative_samples=num_negative_samples) - end - end end @@ -25551,43 +46333,79 @@ end """ begin - function merge_v2checkpoints_graph(checkpoint_prefixes_, destination_prefix_; name=nothing, delete_old_dirs=nothing) - local desc - tf.with_op_name(name, "MergeV2Checkpoints") do - desc = tf.NodeDescription("MergeV2Checkpoints") - checkpoint_prefixes_ = convert(Tensor{String}, checkpoint_prefixes_) - destination_prefix_ = convert(Tensor{String}, destination_prefix_) - tf.add_input(desc, checkpoint_prefixes_) - tf.add_input(desc, destination_prefix_) - if delete_old_dirs !== nothing - desc["delete_old_dirs"] = Base.Bool(delete_old_dirs) + begin + function merge_v2checkpoints_graph(checkpoint_prefixes_, destination_prefix_; name=nothing, delete_old_dirs=nothing) + local desc + tf.with_op_name(name, "MergeV2Checkpoints") do + desc = tf.NodeDescription("MergeV2Checkpoints") + begin + begin + checkpoint_prefixes_ = convert(Tensor{String}, checkpoint_prefixes_) + begin + end + end + begin + destination_prefix_ = convert(Tensor{String}, destination_prefix_) + begin + end + end + end + begin + begin + tf.add_input(desc, checkpoint_prefixes_) + end + begin + tf.add_input(desc, destination_prefix_) + end + end + begin + begin + if delete_old_dirs !== nothing + desc["delete_old_dirs"] = Base.Bool(delete_old_dirs) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function merge_v2checkpoints_eager(checkpoint_prefixes_, destination_prefix_; name=nothing, delete_old_dirs=nothing) + desc = tf.EagerOp("MergeV2Checkpoints") + checkpoint_prefixes_ = convert(tf.EagerTensor, checkpoint_prefixes_) + destination_prefix_ = convert(tf.EagerTensor, destination_prefix_) + begin + begin + tf.add_input(desc, checkpoint_prefixes_) + end + begin + tf.add_input(desc, destination_prefix_) + end + end + begin + begin + if delete_old_dirs !== nothing + desc["delete_old_dirs"] = Base.Bool(delete_old_dirs) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(merge_v2checkpoints, [checkpoint_prefixes_, destination_prefix_], name=nothing, delete_old_dirs=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function merge_v2checkpoints(checkpoint_prefixes_, destination_prefix_; name=nothing, delete_old_dirs=nothing) + if tf.in_eager_mode() + merge_v2checkpoints_eager(checkpoint_prefixes_, destination_prefix_; name=name, delete_old_dirs=delete_old_dirs) + else + merge_v2checkpoints_graph(checkpoint_prefixes_, destination_prefix_; name=name, delete_old_dirs=delete_old_dirs) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function merge_v2checkpoints_eager(checkpoint_prefixes_, destination_prefix_; name=nothing, delete_old_dirs=nothing) - desc = tf.EagerOp("MergeV2Checkpoints") - checkpoint_prefixes_ = convert(tf.EagerTensor, checkpoint_prefixes_) - destination_prefix_ = convert(tf.EagerTensor, destination_prefix_) - tf.add_input(desc, checkpoint_prefixes_) - tf.add_input(desc, destination_prefix_) - if delete_old_dirs !== nothing - desc["delete_old_dirs"] = Base.Bool(delete_old_dirs) - end - res = tf.execute(desc) - node = tf.TapeNode(merge_v2checkpoints, [checkpoint_prefixes_, destination_prefix_], name=nothing, delete_old_dirs=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function merge_v2checkpoints(checkpoint_prefixes_, destination_prefix_; name=nothing, delete_old_dirs=nothing) - if tf.in_eager_mode() - merge_v2checkpoints_eager(checkpoint_prefixes_, destination_prefix_; name=name, delete_old_dirs=delete_old_dirs) - else - merge_v2checkpoints_graph(checkpoint_prefixes_, destination_prefix_; name=name, delete_old_dirs=delete_old_dirs) - end - end end @@ -25597,33 +46415,57 @@ end Worker heartbeat op. """ begin - function worker_heartbeat_graph(request_; name=nothing) - local desc - tf.with_op_name(name, "WorkerHeartbeat") do - desc = tf.NodeDescription("WorkerHeartbeat") - request_ = convert(Tensor{String}, request_) - tf.add_input(desc, request_) + begin + function worker_heartbeat_graph(request_; name=nothing) + local desc + tf.with_op_name(name, "WorkerHeartbeat") do + desc = tf.NodeDescription("WorkerHeartbeat") + begin + begin + request_ = convert(Tensor{String}, request_) + begin + end + end + end + begin + begin + tf.add_input(desc, request_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function worker_heartbeat_eager(request_; name=nothing) - desc = tf.EagerOp("WorkerHeartbeat") - request_ = convert(tf.EagerTensor, request_) - tf.add_input(desc, request_) - res = tf.execute(desc) - node = tf.TapeNode(worker_heartbeat, [request_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function worker_heartbeat_eager(request_; name=nothing) + desc = tf.EagerOp("WorkerHeartbeat") + request_ = convert(tf.EagerTensor, request_) + begin + begin + tf.add_input(desc, request_) + end + end + begin + end + res = tf.execute(desc) + node = tf.TapeNode(worker_heartbeat, [request_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function worker_heartbeat(request_; name=nothing) - if tf.in_eager_mode() - worker_heartbeat_eager(request_; name=name) - else - worker_heartbeat_graph(request_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function worker_heartbeat(request_; name=nothing) + if tf.in_eager_mode() + worker_heartbeat_eager(request_; name=name) + else + worker_heartbeat_graph(request_; name=name) + end end - end + end end @@ -25633,39 +46475,75 @@ end An Op to permute tensors across replicated TPU instances. Each instance """ begin - function collective_permute_graph(input_, source_target_pairs_; name=nothing) - local desc - tf.with_op_name(name, "CollectivePermute") do - desc = tf.NodeDescription("CollectivePermute") - input_ = convert(Tensor{Any}, input_) - source_target_pairs_ = convert(Tensor{Int32}, source_target_pairs_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - tf.add_input(desc, source_target_pairs_) - end - tf.Tensor(tf.Operation(desc)) - end - function collective_permute_eager(input_, source_target_pairs_; name=nothing) - desc = tf.EagerOp("CollectivePermute") - input_ = convert(tf.EagerTensor, input_) - source_target_pairs_ = convert(tf.EagerTensor, source_target_pairs_) - tf.add_input(desc, input_) - tf.add_input(desc, source_target_pairs_) - desc["T"] = tf.data_type(input_) - res = tf.execute(desc) - node = tf.TapeNode(collective_permute, [input_, source_target_pairs_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function collective_permute(input_, source_target_pairs_; name=nothing) - if tf.in_eager_mode() - collective_permute_eager(input_, source_target_pairs_; name=name) - else - collective_permute_graph(input_, source_target_pairs_; name=name) + begin + function collective_permute_graph(input_, source_target_pairs_; name=nothing) + local desc + tf.with_op_name(name, "CollectivePermute") do + desc = tf.NodeDescription("CollectivePermute") + begin + begin + input_ = convert(Tensor{Any}, input_) + begin + end + end + begin + source_target_pairs_ = convert(Tensor{Int32}, source_target_pairs_) + begin + end + end + begin + (input_,) = tf.tf_promote(input_) + end + end + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, source_target_pairs_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function collective_permute_eager(input_, source_target_pairs_; name=nothing) + desc = tf.EagerOp("CollectivePermute") + input_ = convert(tf.EagerTensor, input_) + source_target_pairs_ = convert(tf.EagerTensor, source_target_pairs_) + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, source_target_pairs_) + end + end + begin + end + begin + desc["T"] = tf.data_type(input_) + end + res = tf.execute(desc) + node = tf.TapeNode(collective_permute, [input_, source_target_pairs_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function collective_permute(input_, source_target_pairs_; name=nothing) + if tf.in_eager_mode() + collective_permute_eager(input_, source_target_pairs_; name=name) + else + collective_permute_graph(input_, source_target_pairs_; name=name) + end end - end + end end @@ -25675,61 +46553,125 @@ end """ begin - function quantize_and_dequantize_v3_graph(input_, input_min_, input_max_, num_bits_; name=nothing, signed_input=nothing, range_given=nothing) - local desc - tf.with_op_name(name, "QuantizeAndDequantizeV3") do - desc = tf.NodeDescription("QuantizeAndDequantizeV3") - input_ = convert(Tensor{Any}, input_) - input_min_ = convert(Tensor{Any}, input_min_) - input_max_ = convert(Tensor{Any}, input_max_) - num_bits_ = convert(Tensor{Int32}, num_bits_) - (input_, input_min_, input_max_) = tf.tf_promote(input_, input_min_, input_max_) - tf.add_input(desc, input_) - tf.add_input(desc, input_min_) - tf.add_input(desc, input_max_) - tf.add_input(desc, num_bits_) - if signed_input !== nothing - desc["signed_input"] = Base.Bool(signed_input) + begin + function quantize_and_dequantize_v3_graph(input_, input_min_, input_max_, num_bits_; name=nothing, signed_input=nothing, range_given=nothing) + local desc + tf.with_op_name(name, "QuantizeAndDequantizeV3") do + desc = tf.NodeDescription("QuantizeAndDequantizeV3") + begin + begin + input_ = convert(Tensor{Any}, input_) + begin + end + end + begin + input_min_ = convert(Tensor{Any}, input_min_) + begin + end + end + begin + input_max_ = convert(Tensor{Any}, input_max_) + begin + end + end + begin + num_bits_ = convert(Tensor{Int32}, num_bits_) + begin + end + end + begin + (input_, input_min_, input_max_) = tf.tf_promote(input_, input_min_, input_max_) + end + end + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, input_min_) + end + begin + tf.add_input(desc, input_max_) + end + begin + tf.add_input(desc, num_bits_) + end + end + begin + begin + if signed_input !== nothing + desc["signed_input"] = Base.Bool(signed_input) + end + end + begin + if range_given !== nothing + desc["range_given"] = Base.Bool(range_given) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function quantize_and_dequantize_v3_eager(input_, input_min_, input_max_, num_bits_; name=nothing, signed_input=nothing, range_given=nothing) + desc = tf.EagerOp("QuantizeAndDequantizeV3") + input_ = convert(tf.EagerTensor, input_) + input_min_ = convert(tf.EagerTensor, input_min_) + input_max_ = convert(tf.EagerTensor, input_max_) + num_bits_ = convert(tf.EagerTensor, num_bits_) + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, input_min_) + end + begin + tf.add_input(desc, input_max_) + end + begin + tf.add_input(desc, num_bits_) + end + end + begin + begin + if signed_input !== nothing + desc["signed_input"] = Base.Bool(signed_input) + end + end + begin + if range_given !== nothing + desc["range_given"] = Base.Bool(range_given) + end + end + end + begin + desc["T"] = tf.data_type(input_) + end + begin + desc["T"] = tf.data_type(input_min_) + end + begin + desc["T"] = tf.data_type(input_max_) + end + res = tf.execute(desc) + node = tf.TapeNode(quantize_and_dequantize_v3, [input_, input_min_, input_max_, num_bits_], name=nothing, signed_input=nothing, range_given=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function quantize_and_dequantize_v3(input_, input_min_, input_max_, num_bits_; name=nothing, signed_input=nothing, range_given=nothing) + if tf.in_eager_mode() + quantize_and_dequantize_v3_eager(input_, input_min_, input_max_, num_bits_; name=name, signed_input=signed_input, range_given=range_given) + else + quantize_and_dequantize_v3_graph(input_, input_min_, input_max_, num_bits_; name=name, signed_input=signed_input, range_given=range_given) + end end - if range_given !== nothing - desc["range_given"] = Base.Bool(range_given) - end - end - tf.Tensor(tf.Operation(desc)) - end - function quantize_and_dequantize_v3_eager(input_, input_min_, input_max_, num_bits_; name=nothing, signed_input=nothing, range_given=nothing) - desc = tf.EagerOp("QuantizeAndDequantizeV3") - input_ = convert(tf.EagerTensor, input_) - input_min_ = convert(tf.EagerTensor, input_min_) - input_max_ = convert(tf.EagerTensor, input_max_) - num_bits_ = convert(tf.EagerTensor, num_bits_) - tf.add_input(desc, input_) - tf.add_input(desc, input_min_) - tf.add_input(desc, input_max_) - tf.add_input(desc, num_bits_) - if signed_input !== nothing - desc["signed_input"] = Base.Bool(signed_input) - end - if range_given !== nothing - desc["range_given"] = Base.Bool(range_given) - end - desc["T"] = tf.data_type(input_) - desc["T"] = tf.data_type(input_min_) - desc["T"] = tf.data_type(input_max_) - res = tf.execute(desc) - node = tf.TapeNode(quantize_and_dequantize_v3, [input_, input_min_, input_max_, num_bits_], name=nothing, signed_input=nothing, range_given=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function quantize_and_dequantize_v3(input_, input_min_, input_max_, num_bits_; name=nothing, signed_input=nothing, range_given=nothing) - if tf.in_eager_mode() - quantize_and_dequantize_v3_eager(input_, input_min_, input_max_, num_bits_; name=name, signed_input=signed_input, range_given=range_given) - else - quantize_and_dequantize_v3_graph(input_, input_min_, input_max_, num_bits_; name=name, signed_input=signed_input, range_given=range_given) - end - end end @@ -25739,59 +46681,95 @@ end """ begin - function hash_table_graph(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing) - local desc - tf.with_op_name(name, "HashTable") do - desc = tf.NodeDescription("HashTable") - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - if use_node_name_sharing !== nothing - desc["use_node_name_sharing"] = Base.Bool(use_node_name_sharing) + begin + function hash_table_graph(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing) + local desc + tf.with_op_name(name, "HashTable") do + desc = tf.NodeDescription("HashTable") + begin + end + begin + end + begin + begin + if container !== nothing + desc["container"] = Base.String(container) + end + end + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + begin + if use_node_name_sharing !== nothing + desc["use_node_name_sharing"] = Base.Bool(use_node_name_sharing) + end + end + begin + if key_dtype !== nothing + desc["key_dtype"] = Base.identity(key_dtype) + end + end + begin + if value_dtype !== nothing + desc["value_dtype"] = Base.identity(value_dtype) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function hash_table_eager(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing) + desc = tf.EagerOp("HashTable") + begin + end + begin + begin + if container !== nothing + desc["container"] = Base.String(container) + end + end + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + begin + if use_node_name_sharing !== nothing + desc["use_node_name_sharing"] = Base.Bool(use_node_name_sharing) + end + end + begin + if key_dtype !== nothing + desc["key_dtype"] = Base.identity(key_dtype) + end + end + begin + if value_dtype !== nothing + desc["value_dtype"] = Base.identity(value_dtype) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(hash_table, [], name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function hash_table(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing) + if tf.in_eager_mode() + hash_table_eager(; name=name, container=container, shared_name=shared_name, use_node_name_sharing=use_node_name_sharing, key_dtype=key_dtype, value_dtype=value_dtype) + else + hash_table_graph(; name=name, container=container, shared_name=shared_name, use_node_name_sharing=use_node_name_sharing, key_dtype=key_dtype, value_dtype=value_dtype) + end end - if key_dtype !== nothing - desc["key_dtype"] = Base.identity(key_dtype) - end - if value_dtype !== nothing - desc["value_dtype"] = Base.identity(value_dtype) - end - end - tf.Tensor(tf.Operation(desc)) end - function hash_table_eager(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing) - desc = tf.EagerOp("HashTable") - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - if use_node_name_sharing !== nothing - desc["use_node_name_sharing"] = Base.Bool(use_node_name_sharing) - end - if key_dtype !== nothing - desc["key_dtype"] = Base.identity(key_dtype) - end - if value_dtype !== nothing - desc["value_dtype"] = Base.identity(value_dtype) - end - res = tf.execute(desc) - node = tf.TapeNode(hash_table, [], name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function hash_table(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing) - if tf.in_eager_mode() - hash_table_eager(; name=name, container=container, shared_name=shared_name, use_node_name_sharing=use_node_name_sharing, key_dtype=key_dtype, value_dtype=value_dtype) - else - hash_table_graph(; name=name, container=container, shared_name=shared_name, use_node_name_sharing=use_node_name_sharing, key_dtype=key_dtype, value_dtype=value_dtype) - end - end end @@ -25801,40 +46779,78 @@ end """ begin - function softplus_grad_graph(gradients_, features_; name=nothing) - local desc - tf.with_op_name(name, "SoftplusGrad") do - desc = tf.NodeDescription("SoftplusGrad") - gradients_ = convert(Tensor{Any}, gradients_) - features_ = convert(Tensor{Any}, features_) - (gradients_, features_) = tf.tf_promote(gradients_, features_) - tf.add_input(desc, gradients_) - tf.add_input(desc, features_) + begin + function softplus_grad_graph(gradients_, features_; name=nothing) + local desc + tf.with_op_name(name, "SoftplusGrad") do + desc = tf.NodeDescription("SoftplusGrad") + begin + begin + gradients_ = convert(Tensor{Any}, gradients_) + begin + end + end + begin + features_ = convert(Tensor{Any}, features_) + begin + end + end + begin + (gradients_, features_) = tf.tf_promote(gradients_, features_) + end + end + begin + begin + tf.add_input(desc, gradients_) + end + begin + tf.add_input(desc, features_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function softplus_grad_eager(gradients_, features_; name=nothing) - desc = tf.EagerOp("SoftplusGrad") - gradients_ = convert(tf.EagerTensor, gradients_) - features_ = convert(tf.EagerTensor, features_) - tf.add_input(desc, gradients_) - tf.add_input(desc, features_) - desc["T"] = tf.data_type(gradients_) - desc["T"] = tf.data_type(features_) - res = tf.execute(desc) - node = tf.TapeNode(softplus_grad, [gradients_, features_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function softplus_grad_eager(gradients_, features_; name=nothing) + desc = tf.EagerOp("SoftplusGrad") + gradients_ = convert(tf.EagerTensor, gradients_) + features_ = convert(tf.EagerTensor, features_) + begin + begin + tf.add_input(desc, gradients_) + end + begin + tf.add_input(desc, features_) + end + end + begin + end + begin + desc["T"] = tf.data_type(gradients_) + end + begin + desc["T"] = tf.data_type(features_) + end + res = tf.execute(desc) + node = tf.TapeNode(softplus_grad, [gradients_, features_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function softplus_grad(gradients_, features_; name=nothing) - if tf.in_eager_mode() - softplus_grad_eager(gradients_, features_; name=name) - else - softplus_grad_graph(gradients_, features_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function softplus_grad(gradients_, features_; name=nothing) + if tf.in_eager_mode() + softplus_grad_eager(gradients_, features_; name=name) + else + softplus_grad_graph(gradients_, features_; name=name) + end end - end + end end @@ -25844,65 +46860,105 @@ end """ begin - function fixed_length_record_reader_graph(; name=nothing, header_bytes=nothing, record_bytes=nothing, footer_bytes=nothing, hop_bytes=nothing, container=nothing, shared_name=nothing) - local desc - tf.with_op_name(name, "FixedLengthRecordReader") do - desc = tf.NodeDescription("FixedLengthRecordReader") - if header_bytes !== nothing - desc["header_bytes"] = Base.Int(header_bytes) + begin + function fixed_length_record_reader_graph(; name=nothing, header_bytes=nothing, record_bytes=nothing, footer_bytes=nothing, hop_bytes=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "FixedLengthRecordReader") do + desc = tf.NodeDescription("FixedLengthRecordReader") + begin + end + begin + end + begin + begin + if header_bytes !== nothing + desc["header_bytes"] = Base.Int(header_bytes) + end + end + begin + if record_bytes !== nothing + desc["record_bytes"] = Base.Int(record_bytes) + end + end + begin + if footer_bytes !== nothing + desc["footer_bytes"] = Base.Int(footer_bytes) + end + end + begin + if hop_bytes !== nothing + desc["hop_bytes"] = Base.Int(hop_bytes) + end + end + begin + if container !== nothing + desc["container"] = Base.String(container) + end + end + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function fixed_length_record_reader_eager(; name=nothing, header_bytes=nothing, record_bytes=nothing, footer_bytes=nothing, hop_bytes=nothing, container=nothing, shared_name=nothing) + desc = tf.EagerOp("FixedLengthRecordReader") + begin + end + begin + begin + if header_bytes !== nothing + desc["header_bytes"] = Base.Int(header_bytes) + end + end + begin + if record_bytes !== nothing + desc["record_bytes"] = Base.Int(record_bytes) + end + end + begin + if footer_bytes !== nothing + desc["footer_bytes"] = Base.Int(footer_bytes) + end + end + begin + if hop_bytes !== nothing + desc["hop_bytes"] = Base.Int(hop_bytes) + end + end + begin + if container !== nothing + desc["container"] = Base.String(container) + end + end + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(fixed_length_record_reader, [], name=nothing, header_bytes=nothing, record_bytes=nothing, footer_bytes=nothing, hop_bytes=nothing, container=nothing, shared_name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function fixed_length_record_reader(; name=nothing, header_bytes=nothing, record_bytes=nothing, footer_bytes=nothing, hop_bytes=nothing, container=nothing, shared_name=nothing) + if tf.in_eager_mode() + fixed_length_record_reader_eager(; name=name, header_bytes=header_bytes, record_bytes=record_bytes, footer_bytes=footer_bytes, hop_bytes=hop_bytes, container=container, shared_name=shared_name) + else + fixed_length_record_reader_graph(; name=name, header_bytes=header_bytes, record_bytes=record_bytes, footer_bytes=footer_bytes, hop_bytes=hop_bytes, container=container, shared_name=shared_name) + end end - if record_bytes !== nothing - desc["record_bytes"] = Base.Int(record_bytes) - end - if footer_bytes !== nothing - desc["footer_bytes"] = Base.Int(footer_bytes) - end - if hop_bytes !== nothing - desc["hop_bytes"] = Base.Int(hop_bytes) - end - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - end - tf.Tensor(tf.Operation(desc)) - end - function fixed_length_record_reader_eager(; name=nothing, header_bytes=nothing, record_bytes=nothing, footer_bytes=nothing, hop_bytes=nothing, container=nothing, shared_name=nothing) - desc = tf.EagerOp("FixedLengthRecordReader") - if header_bytes !== nothing - desc["header_bytes"] = Base.Int(header_bytes) - end - if record_bytes !== nothing - desc["record_bytes"] = Base.Int(record_bytes) - end - if footer_bytes !== nothing - desc["footer_bytes"] = Base.Int(footer_bytes) - end - if hop_bytes !== nothing - desc["hop_bytes"] = Base.Int(hop_bytes) - end - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - res = tf.execute(desc) - node = tf.TapeNode(fixed_length_record_reader, [], name=nothing, header_bytes=nothing, record_bytes=nothing, footer_bytes=nothing, hop_bytes=nothing, container=nothing, shared_name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function fixed_length_record_reader(; name=nothing, header_bytes=nothing, record_bytes=nothing, footer_bytes=nothing, hop_bytes=nothing, container=nothing, shared_name=nothing) - if tf.in_eager_mode() - fixed_length_record_reader_eager(; name=name, header_bytes=header_bytes, record_bytes=record_bytes, footer_bytes=footer_bytes, hop_bytes=hop_bytes, container=container, shared_name=shared_name) - else - fixed_length_record_reader_graph(; name=name, header_bytes=header_bytes, record_bytes=record_bytes, footer_bytes=footer_bytes, hop_bytes=hop_bytes, container=container, shared_name=shared_name) - end - end end @@ -25912,47 +46968,99 @@ end """ begin - function tensor_array_scatter_v2_graph(handle_, indices_, value_, flow_in_; name=nothing) - local desc - tf.with_op_name(name, "TensorArrayScatterV2") do - desc = tf.NodeDescription("TensorArrayScatterV2") - handle_ = convert(Tensor{String}, handle_) - indices_ = convert(Tensor{Int32}, indices_) - value_ = convert(Tensor{Any}, value_) - flow_in_ = convert(Tensor{Float32}, flow_in_) - (value_,) = tf.tf_promote(value_) - tf.add_input(desc, handle_) - tf.add_input(desc, indices_) - tf.add_input(desc, value_) - tf.add_input(desc, flow_in_) - end - tf.Tensor(tf.Operation(desc)) - end - function tensor_array_scatter_v2_eager(handle_, indices_, value_, flow_in_; name=nothing) - desc = tf.EagerOp("TensorArrayScatterV2") - handle_ = convert(tf.EagerTensor, handle_) - indices_ = convert(tf.EagerTensor, indices_) - value_ = convert(tf.EagerTensor, value_) - flow_in_ = convert(tf.EagerTensor, flow_in_) - tf.add_input(desc, handle_) - tf.add_input(desc, indices_) - tf.add_input(desc, value_) - tf.add_input(desc, flow_in_) - desc["T"] = tf.data_type(value_) - res = tf.execute(desc) - node = tf.TapeNode(tensor_array_scatter_v2, [handle_, indices_, value_, flow_in_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_array_scatter_v2(handle_, indices_, value_, flow_in_; name=nothing) - if tf.in_eager_mode() - tensor_array_scatter_v2_eager(handle_, indices_, value_, flow_in_; name=name) - else - tensor_array_scatter_v2_graph(handle_, indices_, value_, flow_in_; name=name) + begin + function tensor_array_scatter_v2_graph(handle_, indices_, value_, flow_in_; name=nothing) + local desc + tf.with_op_name(name, "TensorArrayScatterV2") do + desc = tf.NodeDescription("TensorArrayScatterV2") + begin + begin + handle_ = convert(Tensor{String}, handle_) + begin + end + end + begin + indices_ = convert(Tensor{Int32}, indices_) + begin + end + end + begin + value_ = convert(Tensor{Any}, value_) + begin + end + end + begin + flow_in_ = convert(Tensor{Float32}, flow_in_) + begin + end + end + begin + (value_,) = tf.tf_promote(value_) + end + end + begin + begin + tf.add_input(desc, handle_) + end + begin + tf.add_input(desc, indices_) + end + begin + tf.add_input(desc, value_) + end + begin + tf.add_input(desc, flow_in_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function tensor_array_scatter_v2_eager(handle_, indices_, value_, flow_in_; name=nothing) + desc = tf.EagerOp("TensorArrayScatterV2") + handle_ = convert(tf.EagerTensor, handle_) + indices_ = convert(tf.EagerTensor, indices_) + value_ = convert(tf.EagerTensor, value_) + flow_in_ = convert(tf.EagerTensor, flow_in_) + begin + begin + tf.add_input(desc, handle_) + end + begin + tf.add_input(desc, indices_) + end + begin + tf.add_input(desc, value_) + end + begin + tf.add_input(desc, flow_in_) + end + end + begin + end + begin + desc["T"] = tf.data_type(value_) + end + res = tf.execute(desc) + node = tf.TapeNode(tensor_array_scatter_v2, [handle_, indices_, value_, flow_in_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_array_scatter_v2(handle_, indices_, value_, flow_in_; name=nothing) + if tf.in_eager_mode() + tensor_array_scatter_v2_eager(handle_, indices_, value_, flow_in_; name=name) + else + tensor_array_scatter_v2_graph(handle_, indices_, value_, flow_in_; name=name) + end end - end + end end @@ -25962,33 +47070,57 @@ end """ begin - function decode_json_example_graph(json_examples_; name=nothing) - local desc - tf.with_op_name(name, "DecodeJSONExample") do - desc = tf.NodeDescription("DecodeJSONExample") - json_examples_ = convert(Tensor{String}, json_examples_) - tf.add_input(desc, json_examples_) + begin + function decode_json_example_graph(json_examples_; name=nothing) + local desc + tf.with_op_name(name, "DecodeJSONExample") do + desc = tf.NodeDescription("DecodeJSONExample") + begin + begin + json_examples_ = convert(Tensor{String}, json_examples_) + begin + end + end + end + begin + begin + tf.add_input(desc, json_examples_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function decode_json_example_eager(json_examples_; name=nothing) - desc = tf.EagerOp("DecodeJSONExample") - json_examples_ = convert(tf.EagerTensor, json_examples_) - tf.add_input(desc, json_examples_) - res = tf.execute(desc) - node = tf.TapeNode(decode_json_example, [json_examples_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function decode_json_example_eager(json_examples_; name=nothing) + desc = tf.EagerOp("DecodeJSONExample") + json_examples_ = convert(tf.EagerTensor, json_examples_) + begin + begin + tf.add_input(desc, json_examples_) + end + end + begin + end + res = tf.execute(desc) + node = tf.TapeNode(decode_json_example, [json_examples_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function decode_json_example(json_examples_; name=nothing) - if tf.in_eager_mode() - decode_json_example_eager(json_examples_; name=name) - else - decode_json_example_graph(json_examples_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function decode_json_example(json_examples_; name=nothing) + if tf.in_eager_mode() + decode_json_example_eager(json_examples_; name=name) + else + decode_json_example_graph(json_examples_; name=name) + end end - end + end end @@ -25998,84 +47130,170 @@ end """ begin - function fused_batch_norm_grad_v2_graph(y_backprop_, x_, scale_, reserve_space_1_, reserve_space_2_; name=nothing, U=nothing, epsilon=nothing, data_format=nothing, is_training=nothing) - local desc - tf.with_op_name(name, "FusedBatchNormGradV2") do - desc = tf.NodeDescription("FusedBatchNormGradV2") - y_backprop_ = convert(Tensor{Any}, y_backprop_) - x_ = convert(Tensor{Any}, x_) - scale_ = convert(Tensor{Float32}, scale_) - reserve_space_1_ = convert(Tensor{Any}, reserve_space_1_) - reserve_space_2_ = convert(Tensor{Any}, reserve_space_2_) - (reserve_space_1_, reserve_space_2_) = tf.tf_promote(reserve_space_1_, reserve_space_2_) - (y_backprop_, x_) = tf.tf_promote(y_backprop_, x_) - tf.add_input(desc, y_backprop_) - tf.add_input(desc, x_) - tf.add_input(desc, scale_) - tf.add_input(desc, reserve_space_1_) - tf.add_input(desc, reserve_space_2_) - if U !== nothing - desc["U"] = Base.identity(U) + begin + function fused_batch_norm_grad_v2_graph(y_backprop_, x_, scale_, reserve_space_1_, reserve_space_2_; name=nothing, U=nothing, epsilon=nothing, data_format=nothing, is_training=nothing) + local desc + tf.with_op_name(name, "FusedBatchNormGradV2") do + desc = tf.NodeDescription("FusedBatchNormGradV2") + begin + begin + y_backprop_ = convert(Tensor{Any}, y_backprop_) + begin + end + end + begin + x_ = convert(Tensor{Any}, x_) + begin + end + end + begin + scale_ = convert(Tensor{Float32}, scale_) + begin + end + end + begin + reserve_space_1_ = convert(Tensor{Any}, reserve_space_1_) + begin + end + end + begin + reserve_space_2_ = convert(Tensor{Any}, reserve_space_2_) + begin + end + end + begin + (reserve_space_1_, reserve_space_2_) = tf.tf_promote(reserve_space_1_, reserve_space_2_) + end + begin + (y_backprop_, x_) = tf.tf_promote(y_backprop_, x_) + end + end + begin + begin + tf.add_input(desc, y_backprop_) + end + begin + tf.add_input(desc, x_) + end + begin + tf.add_input(desc, scale_) + end + begin + tf.add_input(desc, reserve_space_1_) + end + begin + tf.add_input(desc, reserve_space_2_) + end + end + begin + begin + if U !== nothing + desc["U"] = Base.identity(U) + end + end + begin + if epsilon !== nothing + desc["epsilon"] = Base.identity(epsilon) + end + end + begin + if data_format !== nothing + desc["data_format"] = Base.String(data_format) + end + end + begin + if is_training !== nothing + desc["is_training"] = Base.Bool(is_training) + end + end + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:5 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + end + end + begin + function fused_batch_norm_grad_v2_eager(y_backprop_, x_, scale_, reserve_space_1_, reserve_space_2_; name=nothing, U=nothing, epsilon=nothing, data_format=nothing, is_training=nothing) + desc = tf.EagerOp("FusedBatchNormGradV2") + y_backprop_ = convert(tf.EagerTensor, y_backprop_) + x_ = convert(tf.EagerTensor, x_) + scale_ = convert(tf.EagerTensor, scale_) + reserve_space_1_ = convert(tf.EagerTensor, reserve_space_1_) + reserve_space_2_ = convert(tf.EagerTensor, reserve_space_2_) + begin + begin + tf.add_input(desc, y_backprop_) + end + begin + tf.add_input(desc, x_) + end + begin + tf.add_input(desc, scale_) + end + begin + tf.add_input(desc, reserve_space_1_) + end + begin + tf.add_input(desc, reserve_space_2_) + end + end + begin + begin + if U !== nothing + desc["U"] = Base.identity(U) + end + end + begin + if epsilon !== nothing + desc["epsilon"] = Base.identity(epsilon) + end + end + begin + if data_format !== nothing + desc["data_format"] = Base.String(data_format) + end + end + begin + if is_training !== nothing + desc["is_training"] = Base.Bool(is_training) + end + end + end + begin + desc["T"] = tf.data_type(y_backprop_) + end + begin + desc["T"] = tf.data_type(x_) + end + begin + desc["U"] = tf.data_type(reserve_space_1_) + end + begin + desc["U"] = tf.data_type(reserve_space_2_) + end + res = tf.execute(desc) + node = tf.TapeNode(fused_batch_norm_grad_v2, [y_backprop_, x_, scale_, reserve_space_1_, reserve_space_2_], name=nothing, U=nothing, epsilon=nothing, data_format=nothing, is_training=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function fused_batch_norm_grad_v2(y_backprop_, x_, scale_, reserve_space_1_, reserve_space_2_; name=nothing, U=nothing, epsilon=nothing, data_format=nothing, is_training=nothing) + if tf.in_eager_mode() + fused_batch_norm_grad_v2_eager(y_backprop_, x_, scale_, reserve_space_1_, reserve_space_2_; name=name, U=U, epsilon=epsilon, data_format=data_format, is_training=is_training) + else + fused_batch_norm_grad_v2_graph(y_backprop_, x_, scale_, reserve_space_1_, reserve_space_2_; name=name, U=U, epsilon=epsilon, data_format=data_format, is_training=is_training) + end end - if epsilon !== nothing - desc["epsilon"] = Base.identity(epsilon) - end - if data_format !== nothing - desc["data_format"] = Base.String(data_format) - end - if is_training !== nothing - desc["is_training"] = Base.Bool(is_training) - end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:5 - push!(out, tf.Tensor(op, out_idx)) - end - out - end - function fused_batch_norm_grad_v2_eager(y_backprop_, x_, scale_, reserve_space_1_, reserve_space_2_; name=nothing, U=nothing, epsilon=nothing, data_format=nothing, is_training=nothing) - desc = tf.EagerOp("FusedBatchNormGradV2") - y_backprop_ = convert(tf.EagerTensor, y_backprop_) - x_ = convert(tf.EagerTensor, x_) - scale_ = convert(tf.EagerTensor, scale_) - reserve_space_1_ = convert(tf.EagerTensor, reserve_space_1_) - reserve_space_2_ = convert(tf.EagerTensor, reserve_space_2_) - tf.add_input(desc, y_backprop_) - tf.add_input(desc, x_) - tf.add_input(desc, scale_) - tf.add_input(desc, reserve_space_1_) - tf.add_input(desc, reserve_space_2_) - if U !== nothing - desc["U"] = Base.identity(U) - end - if epsilon !== nothing - desc["epsilon"] = Base.identity(epsilon) - end - if data_format !== nothing - desc["data_format"] = Base.String(data_format) - end - if is_training !== nothing - desc["is_training"] = Base.Bool(is_training) - end - desc["T"] = tf.data_type(y_backprop_) - desc["T"] = tf.data_type(x_) - desc["U"] = tf.data_type(reserve_space_1_) - desc["U"] = tf.data_type(reserve_space_2_) - res = tf.execute(desc) - node = tf.TapeNode(fused_batch_norm_grad_v2, [y_backprop_, x_, scale_, reserve_space_1_, reserve_space_2_], name=nothing, U=nothing, epsilon=nothing, data_format=nothing, is_training=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function fused_batch_norm_grad_v2(y_backprop_, x_, scale_, reserve_space_1_, reserve_space_2_; name=nothing, U=nothing, epsilon=nothing, data_format=nothing, is_training=nothing) - if tf.in_eager_mode() - fused_batch_norm_grad_v2_eager(y_backprop_, x_, scale_, reserve_space_1_, reserve_space_2_; name=name, U=U, epsilon=epsilon, data_format=data_format, is_training=is_training) - else - fused_batch_norm_grad_v2_graph(y_backprop_, x_, scale_, reserve_space_1_, reserve_space_2_; name=name, U=U, epsilon=epsilon, data_format=data_format, is_training=is_training) - end - end end @@ -26085,53 +47303,93 @@ end Cast x of type SrcT to y of DstT. """ begin - function _host_cast_graph(x_; name=nothing, SrcT=nothing, DstT=nothing, Truncate=nothing) - local desc - tf.with_op_name(name, "_HostCast") do - desc = tf.NodeDescription("_HostCast") - x_ = convert(Tensor{Any}, x_) - (x_,) = tf.tf_promote(x_) - tf.add_input(desc, x_) - if SrcT !== nothing - desc["SrcT"] = Base.identity(SrcT) - end - if DstT !== nothing - desc["DstT"] = Base.identity(DstT) + begin + function _host_cast_graph(x_; name=nothing, SrcT=nothing, DstT=nothing, Truncate=nothing) + local desc + tf.with_op_name(name, "_HostCast") do + desc = tf.NodeDescription("_HostCast") + begin + begin + x_ = convert(Tensor{Any}, x_) + begin + end + end + begin + (x_,) = tf.tf_promote(x_) + end + end + begin + begin + tf.add_input(desc, x_) + end + end + begin + begin + if SrcT !== nothing + desc["SrcT"] = Base.identity(SrcT) + end + end + begin + if DstT !== nothing + desc["DstT"] = Base.identity(DstT) + end + end + begin + if Truncate !== nothing + desc["Truncate"] = Base.Bool(Truncate) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function _host_cast_eager(x_; name=nothing, SrcT=nothing, DstT=nothing, Truncate=nothing) + desc = tf.EagerOp("_HostCast") + x_ = convert(tf.EagerTensor, x_) + begin + begin + tf.add_input(desc, x_) + end + end + begin + begin + if SrcT !== nothing + desc["SrcT"] = Base.identity(SrcT) + end + end + begin + if DstT !== nothing + desc["DstT"] = Base.identity(DstT) + end + end + begin + if Truncate !== nothing + desc["Truncate"] = Base.Bool(Truncate) + end + end + end + begin + desc["SrcT"] = tf.data_type(x_) + end + res = tf.execute(desc) + node = tf.TapeNode(_host_cast, [x_], name=nothing, SrcT=nothing, DstT=nothing, Truncate=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _host_cast(x_; name=nothing, SrcT=nothing, DstT=nothing, Truncate=nothing) + if tf.in_eager_mode() + _host_cast_eager(x_; name=name, SrcT=SrcT, DstT=DstT, Truncate=Truncate) + else + _host_cast_graph(x_; name=name, SrcT=SrcT, DstT=DstT, Truncate=Truncate) + end end - if Truncate !== nothing - desc["Truncate"] = Base.Bool(Truncate) - end - end - tf.Tensor(tf.Operation(desc)) - end - function _host_cast_eager(x_; name=nothing, SrcT=nothing, DstT=nothing, Truncate=nothing) - desc = tf.EagerOp("_HostCast") - x_ = convert(tf.EagerTensor, x_) - tf.add_input(desc, x_) - if SrcT !== nothing - desc["SrcT"] = Base.identity(SrcT) - end - if DstT !== nothing - desc["DstT"] = Base.identity(DstT) - end - if Truncate !== nothing - desc["Truncate"] = Base.Bool(Truncate) - end - desc["SrcT"] = tf.data_type(x_) - res = tf.execute(desc) - node = tf.TapeNode(_host_cast, [x_], name=nothing, SrcT=nothing, DstT=nothing, Truncate=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _host_cast(x_; name=nothing, SrcT=nothing, DstT=nothing, Truncate=nothing) - if tf.in_eager_mode() - _host_cast_eager(x_; name=name, SrcT=SrcT, DstT=DstT, Truncate=Truncate) - else - _host_cast_graph(x_; name=name, SrcT=SrcT, DstT=DstT, Truncate=Truncate) - end - end end @@ -26141,47 +47399,75 @@ end """ begin - function tf_record_reader_graph(; name=nothing, container=nothing, shared_name=nothing, compression_type=nothing) - local desc - tf.with_op_name(name, "TFRecordReader") do - desc = tf.NodeDescription("TFRecordReader") - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) + begin + function tf_record_reader_graph(; name=nothing, container=nothing, shared_name=nothing, compression_type=nothing) + local desc + tf.with_op_name(name, "TFRecordReader") do + desc = tf.NodeDescription("TFRecordReader") + begin + end + begin + end + begin + begin + if container !== nothing + desc["container"] = Base.String(container) + end + end + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + begin + if compression_type !== nothing + desc["compression_type"] = Base.String(compression_type) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function tf_record_reader_eager(; name=nothing, container=nothing, shared_name=nothing, compression_type=nothing) + desc = tf.EagerOp("TFRecordReader") + begin + end + begin + begin + if container !== nothing + desc["container"] = Base.String(container) + end + end + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + begin + if compression_type !== nothing + desc["compression_type"] = Base.String(compression_type) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(tf_record_reader, [], name=nothing, container=nothing, shared_name=nothing, compression_type=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tf_record_reader(; name=nothing, container=nothing, shared_name=nothing, compression_type=nothing) + if tf.in_eager_mode() + tf_record_reader_eager(; name=name, container=container, shared_name=shared_name, compression_type=compression_type) + else + tf_record_reader_graph(; name=name, container=container, shared_name=shared_name, compression_type=compression_type) + end end - if compression_type !== nothing - desc["compression_type"] = Base.String(compression_type) - end - end - tf.Tensor(tf.Operation(desc)) - end - function tf_record_reader_eager(; name=nothing, container=nothing, shared_name=nothing, compression_type=nothing) - desc = tf.EagerOp("TFRecordReader") - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - if compression_type !== nothing - desc["compression_type"] = Base.String(compression_type) - end - res = tf.execute(desc) - node = tf.TapeNode(tf_record_reader, [], name=nothing, container=nothing, shared_name=nothing, compression_type=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tf_record_reader(; name=nothing, container=nothing, shared_name=nothing, compression_type=nothing) - if tf.in_eager_mode() - tf_record_reader_eager(; name=name, container=container, shared_name=shared_name, compression_type=compression_type) - else - tf_record_reader_graph(; name=name, container=container, shared_name=shared_name, compression_type=compression_type) - end - end end @@ -26191,57 +47477,97 @@ end """ begin - function while__graph(input_; name=nothing, T=nothing, cond=nothing, body=nothing, output_shapes=nothing) - local desc - tf.with_op_name(name, "While") do - desc = tf.NodeDescription("While") - input_ = [convert(Tensor{Any}, x) for x = input_] - tf.add_input(desc, input_) - if T !== nothing - desc["T"] = map(Base.identity, T) - end - if cond !== nothing - desc["cond"] = Base.identity(cond) - end - if body !== nothing - desc["body"] = Base.identity(body) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) + begin + function while__graph(input_; name=nothing, T=nothing, cond=nothing, body=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "While") do + desc = tf.NodeDescription("While") + begin + begin + input_ = [convert(Tensor{Any}, x) for x = input_] + begin + end + end + end + begin + begin + tf.add_input(desc, input_) + end + end + begin + begin + if T !== nothing + desc["T"] = map(Base.identity, T) + end + end + begin + if cond !== nothing + desc["cond"] = Base.identity(cond) + end + end + begin + if body !== nothing + desc["body"] = Base.identity(body) + end + end + begin + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function while__eager(input_; name=nothing, T=nothing, cond=nothing, body=nothing, output_shapes=nothing) + desc = tf.EagerOp("While") + input_ = convert(tf.EagerTensor, input_) + begin + begin + tf.add_input(desc, input_) + end + end + begin + begin + if T !== nothing + desc["T"] = map(Base.identity, T) + end + end + begin + if cond !== nothing + desc["cond"] = Base.identity(cond) + end + end + begin + if body !== nothing + desc["body"] = Base.identity(body) + end + end + begin + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(while_, [input_], name=nothing, T=nothing, cond=nothing, body=nothing, output_shapes=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function while_(input_; name=nothing, T=nothing, cond=nothing, body=nothing, output_shapes=nothing) + if tf.in_eager_mode() + while__eager(input_; name=name, T=T, cond=cond, body=body, output_shapes=output_shapes) + else + while__graph(input_; name=name, T=T, cond=cond, body=body, output_shapes=output_shapes) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function while__eager(input_; name=nothing, T=nothing, cond=nothing, body=nothing, output_shapes=nothing) - desc = tf.EagerOp("While") - input_ = convert(tf.EagerTensor, input_) - tf.add_input(desc, input_) - if T !== nothing - desc["T"] = map(Base.identity, T) - end - if cond !== nothing - desc["cond"] = Base.identity(cond) - end - if body !== nothing - desc["body"] = Base.identity(body) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - res = tf.execute(desc) - node = tf.TapeNode(while_, [input_], name=nothing, T=nothing, cond=nothing, body=nothing, output_shapes=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function while_(input_; name=nothing, T=nothing, cond=nothing, body=nothing, output_shapes=nothing) - if tf.in_eager_mode() - while__eager(input_; name=name, T=T, cond=cond, body=body, output_shapes=output_shapes) - else - while__graph(input_; name=name, T=T, cond=cond, body=body, output_shapes=output_shapes) - end - end end @@ -26251,51 +47577,103 @@ end """ begin - function stateless_multinomial_graph(logits_, num_samples_, seed_; name=nothing, output_dtype=nothing) - local desc - tf.with_op_name(name, "StatelessMultinomial") do - desc = tf.NodeDescription("StatelessMultinomial") - logits_ = convert(Tensor{Any}, logits_) - num_samples_ = convert(Tensor{Int32}, num_samples_) - seed_ = convert(Tensor{Int64}, seed_) - (logits_,) = tf.tf_promote(logits_) - (seed_,) = tf.tf_promote(seed_) - tf.add_input(desc, logits_) - tf.add_input(desc, num_samples_) - tf.add_input(desc, seed_) - if output_dtype !== nothing - desc["output_dtype"] = Base.identity(output_dtype) + begin + function stateless_multinomial_graph(logits_, num_samples_, seed_; name=nothing, output_dtype=nothing) + local desc + tf.with_op_name(name, "StatelessMultinomial") do + desc = tf.NodeDescription("StatelessMultinomial") + begin + begin + logits_ = convert(Tensor{Any}, logits_) + begin + end + end + begin + num_samples_ = convert(Tensor{Int32}, num_samples_) + begin + end + end + begin + seed_ = convert(Tensor{Int64}, seed_) + begin + end + end + begin + (logits_,) = tf.tf_promote(logits_) + end + begin + (seed_,) = tf.tf_promote(seed_) + end + end + begin + begin + tf.add_input(desc, logits_) + end + begin + tf.add_input(desc, num_samples_) + end + begin + tf.add_input(desc, seed_) + end + end + begin + begin + if output_dtype !== nothing + desc["output_dtype"] = Base.identity(output_dtype) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function stateless_multinomial_eager(logits_, num_samples_, seed_; name=nothing, output_dtype=nothing) + desc = tf.EagerOp("StatelessMultinomial") + logits_ = convert(tf.EagerTensor, logits_) + num_samples_ = convert(tf.EagerTensor, num_samples_) + seed_ = convert(tf.EagerTensor, seed_) + begin + begin + tf.add_input(desc, logits_) + end + begin + tf.add_input(desc, num_samples_) + end + begin + tf.add_input(desc, seed_) + end + end + begin + begin + if output_dtype !== nothing + desc["output_dtype"] = Base.identity(output_dtype) + end + end + end + begin + desc["T"] = tf.data_type(logits_) + end + begin + desc["Tseed"] = tf.data_type(seed_) + end + res = tf.execute(desc) + node = tf.TapeNode(stateless_multinomial, [logits_, num_samples_, seed_], name=nothing, output_dtype=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function stateless_multinomial(logits_, num_samples_, seed_; name=nothing, output_dtype=nothing) + if tf.in_eager_mode() + stateless_multinomial_eager(logits_, num_samples_, seed_; name=name, output_dtype=output_dtype) + else + stateless_multinomial_graph(logits_, num_samples_, seed_; name=name, output_dtype=output_dtype) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function stateless_multinomial_eager(logits_, num_samples_, seed_; name=nothing, output_dtype=nothing) - desc = tf.EagerOp("StatelessMultinomial") - logits_ = convert(tf.EagerTensor, logits_) - num_samples_ = convert(tf.EagerTensor, num_samples_) - seed_ = convert(tf.EagerTensor, seed_) - tf.add_input(desc, logits_) - tf.add_input(desc, num_samples_) - tf.add_input(desc, seed_) - if output_dtype !== nothing - desc["output_dtype"] = Base.identity(output_dtype) - end - desc["T"] = tf.data_type(logits_) - desc["Tseed"] = tf.data_type(seed_) - res = tf.execute(desc) - node = tf.TapeNode(stateless_multinomial, [logits_, num_samples_, seed_], name=nothing, output_dtype=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function stateless_multinomial(logits_, num_samples_, seed_; name=nothing, output_dtype=nothing) - if tf.in_eager_mode() - stateless_multinomial_eager(logits_, num_samples_, seed_; name=name, output_dtype=output_dtype) - else - stateless_multinomial_graph(logits_, num_samples_, seed_; name=name, output_dtype=output_dtype) - end - end end @@ -26305,53 +47683,107 @@ end """ begin - function scatter_add_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) - local desc - tf.with_op_name(name, "ScatterAdd") do - desc = tf.NodeDescription("ScatterAdd") - ref_ = convert(Tensor{Any}, ref_) - indices_ = convert(Tensor{Any}, indices_) - indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) - updates_ = convert(Tensor{Any}, updates_) - (ref_, updates_) = tf.tf_promote(ref_, updates_) - (indices_,) = tf.tf_promote(indices_) - tf.add_input(desc, ref_) - tf.add_input(desc, indices_) - tf.add_input(desc, updates_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) + begin + function scatter_add_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "ScatterAdd") do + desc = tf.NodeDescription("ScatterAdd") + begin + begin + ref_ = convert(Tensor{Any}, ref_) + begin + end + end + begin + indices_ = convert(Tensor{Any}, indices_) + begin + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + end + end + begin + updates_ = convert(Tensor{Any}, updates_) + begin + end + end + begin + (ref_, updates_) = tf.tf_promote(ref_, updates_) + end + begin + (indices_,) = tf.tf_promote(indices_) + end + end + begin + begin + tf.add_input(desc, ref_) + end + begin + tf.add_input(desc, indices_) + end + begin + tf.add_input(desc, updates_) + end + end + begin + begin + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function scatter_add_eager(ref_, indices_, updates_; name=nothing, use_locking=nothing) + desc = tf.EagerOp("ScatterAdd") + ref_ = convert(tf.EagerTensor, ref_) + indices_ = convert(tf.EagerTensor, indices_) + updates_ = convert(tf.EagerTensor, updates_) + begin + begin + tf.add_input(desc, ref_) + end + begin + tf.add_input(desc, indices_) + end + begin + tf.add_input(desc, updates_) + end + end + begin + begin + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + end + begin + desc["T"] = tf.data_type(ref_) + end + begin + desc["Tindices"] = tf.data_type(indices_) + end + begin + desc["T"] = tf.data_type(updates_) + end + res = tf.execute(desc) + node = tf.TapeNode(scatter_add, [ref_, indices_, updates_], name=nothing, use_locking=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function scatter_add(ref_, indices_, updates_; name=nothing, use_locking=nothing) + if tf.in_eager_mode() + scatter_add_eager(ref_, indices_, updates_; name=name, use_locking=use_locking) + else + scatter_add_graph(ref_, indices_, updates_; name=name, use_locking=use_locking) + end end - end - tf.Tensor(tf.Operation(desc)) end - function scatter_add_eager(ref_, indices_, updates_; name=nothing, use_locking=nothing) - desc = tf.EagerOp("ScatterAdd") - ref_ = convert(tf.EagerTensor, ref_) - indices_ = convert(tf.EagerTensor, indices_) - updates_ = convert(tf.EagerTensor, updates_) - tf.add_input(desc, ref_) - tf.add_input(desc, indices_) - tf.add_input(desc, updates_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - desc["T"] = tf.data_type(ref_) - desc["Tindices"] = tf.data_type(indices_) - desc["T"] = tf.data_type(updates_) - res = tf.execute(desc) - node = tf.TapeNode(scatter_add, [ref_, indices_, updates_], name=nothing, use_locking=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function scatter_add(ref_, indices_, updates_; name=nothing, use_locking=nothing) - if tf.in_eager_mode() - scatter_add_eager(ref_, indices_, updates_; name=name, use_locking=use_locking) - else - scatter_add_graph(ref_, indices_, updates_; name=name, use_locking=use_locking) - end - end end @@ -26361,35 +47793,63 @@ end """ begin - function conj_graph(input_; name=nothing) - local desc - tf.with_op_name(name, "Conj") do - desc = tf.NodeDescription("Conj") - input_ = convert(Tensor{Complex{Float32}}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) + begin + function conj_graph(input_; name=nothing) + local desc + tf.with_op_name(name, "Conj") do + desc = tf.NodeDescription("Conj") + begin + begin + input_ = convert(Tensor{Complex{Float32}}, input_) + begin + end + end + begin + (input_,) = tf.tf_promote(input_) + end + end + begin + begin + tf.add_input(desc, input_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function conj_eager(input_; name=nothing) - desc = tf.EagerOp("Conj") - input_ = convert(tf.EagerTensor, input_) - tf.add_input(desc, input_) - desc["T"] = tf.data_type(input_) - res = tf.execute(desc) - node = tf.TapeNode(conj, [input_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function conj_eager(input_; name=nothing) + desc = tf.EagerOp("Conj") + input_ = convert(tf.EagerTensor, input_) + begin + begin + tf.add_input(desc, input_) + end + end + begin + end + begin + desc["T"] = tf.data_type(input_) + end + res = tf.execute(desc) + node = tf.TapeNode(conj, [input_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function conj(input_; name=nothing) - if tf.in_eager_mode() - conj_eager(input_; name=name) - else - conj_graph(input_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function conj(input_; name=nothing) + if tf.in_eager_mode() + conj_eager(input_; name=name) + else + conj_graph(input_; name=name) + end end - end + end end @@ -26399,45 +47859,85 @@ end """ begin - function parallel_dynamic_stitch_graph(indices_, data_; name=nothing, N=nothing) - local desc - tf.with_op_name(name, "ParallelDynamicStitch") do - desc = tf.NodeDescription("ParallelDynamicStitch") - indices_ = [convert(Tensor{Int32}, x) for x = indices_] - data_ = [convert(Tensor{Any}, x) for x = data_] - (data_,) = tf.tf_promote(data_) - tf.add_input(desc, indices_) - tf.add_input(desc, data_) - if N !== nothing - desc["N"] = Base.Int(N) + begin + function parallel_dynamic_stitch_graph(indices_, data_; name=nothing, N=nothing) + local desc + tf.with_op_name(name, "ParallelDynamicStitch") do + desc = tf.NodeDescription("ParallelDynamicStitch") + begin + begin + indices_ = [convert(Tensor{Int32}, x) for x = indices_] + begin + end + end + begin + data_ = [convert(Tensor{Any}, x) for x = data_] + begin + end + end + begin + (data_,) = tf.tf_promote(data_) + end + end + begin + begin + tf.add_input(desc, indices_) + end + begin + tf.add_input(desc, data_) + end + end + begin + begin + if N !== nothing + desc["N"] = Base.Int(N) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function parallel_dynamic_stitch_eager(indices_, data_; name=nothing, N=nothing) + desc = tf.EagerOp("ParallelDynamicStitch") + indices_ = convert(tf.EagerTensor, indices_) + data_ = convert(tf.EagerTensor, data_) + begin + begin + tf.add_input(desc, indices_) + end + begin + tf.add_input(desc, data_) + end + end + begin + begin + if N !== nothing + desc["N"] = Base.Int(N) + end + end + end + begin + desc["T"] = tf.data_type(data_) + end + res = tf.execute(desc) + node = tf.TapeNode(parallel_dynamic_stitch, [indices_, data_], name=nothing, N=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function parallel_dynamic_stitch(indices_, data_; name=nothing, N=nothing) + if tf.in_eager_mode() + parallel_dynamic_stitch_eager(indices_, data_; name=name, N=N) + else + parallel_dynamic_stitch_graph(indices_, data_; name=name, N=N) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function parallel_dynamic_stitch_eager(indices_, data_; name=nothing, N=nothing) - desc = tf.EagerOp("ParallelDynamicStitch") - indices_ = convert(tf.EagerTensor, indices_) - data_ = convert(tf.EagerTensor, data_) - tf.add_input(desc, indices_) - tf.add_input(desc, data_) - if N !== nothing - desc["N"] = Base.Int(N) - end - desc["T"] = tf.data_type(data_) - res = tf.execute(desc) - node = tf.TapeNode(parallel_dynamic_stitch, [indices_, data_], name=nothing, N=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function parallel_dynamic_stitch(indices_, data_; name=nothing, N=nothing) - if tf.in_eager_mode() - parallel_dynamic_stitch_eager(indices_, data_; name=name, N=N) - else - parallel_dynamic_stitch_graph(indices_, data_; name=name, N=N) - end - end end @@ -26447,37 +47947,69 @@ end """ begin - function make_iterator_graph(dataset_, iterator_; name=nothing) - local desc - tf.with_op_name(name, "MakeIterator") do - desc = tf.NodeDescription("MakeIterator") - dataset_ = convert(Tensor{Any}, dataset_) - iterator_ = convert(Tensor{Any}, iterator_) - tf.add_input(desc, dataset_) - tf.add_input(desc, iterator_) + begin + function make_iterator_graph(dataset_, iterator_; name=nothing) + local desc + tf.with_op_name(name, "MakeIterator") do + desc = tf.NodeDescription("MakeIterator") + begin + begin + dataset_ = convert(Tensor{Any}, dataset_) + begin + end + end + begin + iterator_ = convert(Tensor{Any}, iterator_) + begin + end + end + end + begin + begin + tf.add_input(desc, dataset_) + end + begin + tf.add_input(desc, iterator_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function make_iterator_eager(dataset_, iterator_; name=nothing) - desc = tf.EagerOp("MakeIterator") - dataset_ = convert(tf.EagerTensor, dataset_) - iterator_ = convert(tf.EagerTensor, iterator_) - tf.add_input(desc, dataset_) - tf.add_input(desc, iterator_) - res = tf.execute(desc) - node = tf.TapeNode(make_iterator, [dataset_, iterator_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function make_iterator_eager(dataset_, iterator_; name=nothing) + desc = tf.EagerOp("MakeIterator") + dataset_ = convert(tf.EagerTensor, dataset_) + iterator_ = convert(tf.EagerTensor, iterator_) + begin + begin + tf.add_input(desc, dataset_) + end + begin + tf.add_input(desc, iterator_) + end + end + begin + end + res = tf.execute(desc) + node = tf.TapeNode(make_iterator, [dataset_, iterator_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function make_iterator(dataset_, iterator_; name=nothing) - if tf.in_eager_mode() - make_iterator_eager(dataset_, iterator_; name=name) - else - make_iterator_graph(dataset_, iterator_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function make_iterator(dataset_, iterator_; name=nothing) + if tf.in_eager_mode() + make_iterator_eager(dataset_, iterator_; name=name) + else + make_iterator_graph(dataset_, iterator_; name=name) + end end - end + end end @@ -26487,37 +48019,69 @@ end """ begin - function rfft3d_graph(input_, fft_length_; name=nothing) - local desc - tf.with_op_name(name, "RFFT3D") do - desc = tf.NodeDescription("RFFT3D") - input_ = convert(Tensor{Float32}, input_) - fft_length_ = convert(Tensor{Int32}, fft_length_) - tf.add_input(desc, input_) - tf.add_input(desc, fft_length_) + begin + function rfft3d_graph(input_, fft_length_; name=nothing) + local desc + tf.with_op_name(name, "RFFT3D") do + desc = tf.NodeDescription("RFFT3D") + begin + begin + input_ = convert(Tensor{Float32}, input_) + begin + end + end + begin + fft_length_ = convert(Tensor{Int32}, fft_length_) + begin + end + end + end + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, fft_length_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function rfft3d_eager(input_, fft_length_; name=nothing) - desc = tf.EagerOp("RFFT3D") - input_ = convert(tf.EagerTensor, input_) - fft_length_ = convert(tf.EagerTensor, fft_length_) - tf.add_input(desc, input_) - tf.add_input(desc, fft_length_) - res = tf.execute(desc) - node = tf.TapeNode(rfft3d, [input_, fft_length_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function rfft3d_eager(input_, fft_length_; name=nothing) + desc = tf.EagerOp("RFFT3D") + input_ = convert(tf.EagerTensor, input_) + fft_length_ = convert(tf.EagerTensor, fft_length_) + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, fft_length_) + end + end + begin + end + res = tf.execute(desc) + node = tf.TapeNode(rfft3d, [input_, fft_length_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function rfft3d(input_, fft_length_; name=nothing) - if tf.in_eager_mode() - rfft3d_eager(input_, fft_length_; name=name) - else - rfft3d_graph(input_, fft_length_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function rfft3d(input_, fft_length_; name=nothing) + if tf.in_eager_mode() + rfft3d_eager(input_, fft_length_; name=name) + else + rfft3d_graph(input_, fft_length_; name=name) + end end - end + end end @@ -26527,58 +48091,116 @@ end """ begin - function sparse_reduce_sum_sparse_graph(input_indices_, input_values_, input_shape_, reduction_axes_; name=nothing, keep_dims=nothing) - local desc - tf.with_op_name(name, "SparseReduceSumSparse") do - desc = tf.NodeDescription("SparseReduceSumSparse") - input_indices_ = convert(Tensor{Int64}, input_indices_) - input_values_ = convert(Tensor{Any}, input_values_) - input_shape_ = convert(Tensor{Int64}, input_shape_) - reduction_axes_ = convert(Tensor{Int32}, reduction_axes_) - (input_values_,) = tf.tf_promote(input_values_) - tf.add_input(desc, input_indices_) - tf.add_input(desc, input_values_) - tf.add_input(desc, input_shape_) - tf.add_input(desc, reduction_axes_) - if keep_dims !== nothing - desc["keep_dims"] = Base.Bool(keep_dims) + begin + function sparse_reduce_sum_sparse_graph(input_indices_, input_values_, input_shape_, reduction_axes_; name=nothing, keep_dims=nothing) + local desc + tf.with_op_name(name, "SparseReduceSumSparse") do + desc = tf.NodeDescription("SparseReduceSumSparse") + begin + begin + input_indices_ = convert(Tensor{Int64}, input_indices_) + begin + end + end + begin + input_values_ = convert(Tensor{Any}, input_values_) + begin + end + end + begin + input_shape_ = convert(Tensor{Int64}, input_shape_) + begin + end + end + begin + reduction_axes_ = convert(Tensor{Int32}, reduction_axes_) + begin + end + end + begin + (input_values_,) = tf.tf_promote(input_values_) + end + end + begin + begin + tf.add_input(desc, input_indices_) + end + begin + tf.add_input(desc, input_values_) + end + begin + tf.add_input(desc, input_shape_) + end + begin + tf.add_input(desc, reduction_axes_) + end + end + begin + begin + if keep_dims !== nothing + desc["keep_dims"] = Base.Bool(keep_dims) + end + end + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + end + end + begin + function sparse_reduce_sum_sparse_eager(input_indices_, input_values_, input_shape_, reduction_axes_; name=nothing, keep_dims=nothing) + desc = tf.EagerOp("SparseReduceSumSparse") + input_indices_ = convert(tf.EagerTensor, input_indices_) + input_values_ = convert(tf.EagerTensor, input_values_) + input_shape_ = convert(tf.EagerTensor, input_shape_) + reduction_axes_ = convert(tf.EagerTensor, reduction_axes_) + begin + begin + tf.add_input(desc, input_indices_) + end + begin + tf.add_input(desc, input_values_) + end + begin + tf.add_input(desc, input_shape_) + end + begin + tf.add_input(desc, reduction_axes_) + end + end + begin + begin + if keep_dims !== nothing + desc["keep_dims"] = Base.Bool(keep_dims) + end + end + end + begin + desc["T"] = tf.data_type(input_values_) + end + res = tf.execute(desc) + node = tf.TapeNode(sparse_reduce_sum_sparse, [input_indices_, input_values_, input_shape_, reduction_axes_], name=nothing, keep_dims=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_reduce_sum_sparse(input_indices_, input_values_, input_shape_, reduction_axes_; name=nothing, keep_dims=nothing) + if tf.in_eager_mode() + sparse_reduce_sum_sparse_eager(input_indices_, input_values_, input_shape_, reduction_axes_; name=name, keep_dims=keep_dims) + else + sparse_reduce_sum_sparse_graph(input_indices_, input_values_, input_shape_, reduction_axes_; name=name, keep_dims=keep_dims) + end end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:3 - push!(out, tf.Tensor(op, out_idx)) - end - out end - function sparse_reduce_sum_sparse_eager(input_indices_, input_values_, input_shape_, reduction_axes_; name=nothing, keep_dims=nothing) - desc = tf.EagerOp("SparseReduceSumSparse") - input_indices_ = convert(tf.EagerTensor, input_indices_) - input_values_ = convert(tf.EagerTensor, input_values_) - input_shape_ = convert(tf.EagerTensor, input_shape_) - reduction_axes_ = convert(tf.EagerTensor, reduction_axes_) - tf.add_input(desc, input_indices_) - tf.add_input(desc, input_values_) - tf.add_input(desc, input_shape_) - tf.add_input(desc, reduction_axes_) - if keep_dims !== nothing - desc["keep_dims"] = Base.Bool(keep_dims) - end - desc["T"] = tf.data_type(input_values_) - res = tf.execute(desc) - node = tf.TapeNode(sparse_reduce_sum_sparse, [input_indices_, input_values_, input_shape_, reduction_axes_], name=nothing, keep_dims=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_reduce_sum_sparse(input_indices_, input_values_, input_shape_, reduction_axes_; name=nothing, keep_dims=nothing) - if tf.in_eager_mode() - sparse_reduce_sum_sparse_eager(input_indices_, input_values_, input_shape_, reduction_axes_; name=name, keep_dims=keep_dims) - else - sparse_reduce_sum_sparse_graph(input_indices_, input_values_, input_shape_, reduction_axes_; name=name, keep_dims=keep_dims) - end - end end @@ -26588,59 +48210,95 @@ end Allocates a mutable tensor that becomes available to appropriately annotated """ begin - function _scoped_allocator_graph(; name=nothing, shapes=nothing, shape=nothing, sa_name=nothing, id=nothing, expected_call_count=nothing) - local desc - tf.with_op_name(name, "_ScopedAllocator") do - desc = tf.NodeDescription("_ScopedAllocator") - if shapes !== nothing - desc["shapes"] = map(Base.identity, shapes) - end - if shape !== nothing - desc["shape"] = Base.identity(shape) - end - if sa_name !== nothing - desc["sa_name"] = Base.String(sa_name) - end - if id !== nothing - desc["id"] = Base.Int(id) + begin + function _scoped_allocator_graph(; name=nothing, shapes=nothing, shape=nothing, sa_name=nothing, id=nothing, expected_call_count=nothing) + local desc + tf.with_op_name(name, "_ScopedAllocator") do + desc = tf.NodeDescription("_ScopedAllocator") + begin + end + begin + end + begin + begin + if shapes !== nothing + desc["shapes"] = map(Base.identity, shapes) + end + end + begin + if shape !== nothing + desc["shape"] = Base.identity(shape) + end + end + begin + if sa_name !== nothing + desc["sa_name"] = Base.String(sa_name) + end + end + begin + if id !== nothing + desc["id"] = Base.Int(id) + end + end + begin + if expected_call_count !== nothing + desc["expected_call_count"] = Base.Int(expected_call_count) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function _scoped_allocator_eager(; name=nothing, shapes=nothing, shape=nothing, sa_name=nothing, id=nothing, expected_call_count=nothing) + desc = tf.EagerOp("_ScopedAllocator") + begin + end + begin + begin + if shapes !== nothing + desc["shapes"] = map(Base.identity, shapes) + end + end + begin + if shape !== nothing + desc["shape"] = Base.identity(shape) + end + end + begin + if sa_name !== nothing + desc["sa_name"] = Base.String(sa_name) + end + end + begin + if id !== nothing + desc["id"] = Base.Int(id) + end + end + begin + if expected_call_count !== nothing + desc["expected_call_count"] = Base.Int(expected_call_count) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(_scoped_allocator, [], name=nothing, shapes=nothing, shape=nothing, sa_name=nothing, id=nothing, expected_call_count=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _scoped_allocator(; name=nothing, shapes=nothing, shape=nothing, sa_name=nothing, id=nothing, expected_call_count=nothing) + if tf.in_eager_mode() + _scoped_allocator_eager(; name=name, shapes=shapes, shape=shape, sa_name=sa_name, id=id, expected_call_count=expected_call_count) + else + _scoped_allocator_graph(; name=name, shapes=shapes, shape=shape, sa_name=sa_name, id=id, expected_call_count=expected_call_count) + end end - if expected_call_count !== nothing - desc["expected_call_count"] = Base.Int(expected_call_count) - end - end - tf.Tensor(tf.Operation(desc)) - end - function _scoped_allocator_eager(; name=nothing, shapes=nothing, shape=nothing, sa_name=nothing, id=nothing, expected_call_count=nothing) - desc = tf.EagerOp("_ScopedAllocator") - if shapes !== nothing - desc["shapes"] = map(Base.identity, shapes) - end - if shape !== nothing - desc["shape"] = Base.identity(shape) - end - if sa_name !== nothing - desc["sa_name"] = Base.String(sa_name) - end - if id !== nothing - desc["id"] = Base.Int(id) - end - if expected_call_count !== nothing - desc["expected_call_count"] = Base.Int(expected_call_count) - end - res = tf.execute(desc) - node = tf.TapeNode(_scoped_allocator, [], name=nothing, shapes=nothing, shape=nothing, sa_name=nothing, id=nothing, expected_call_count=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _scoped_allocator(; name=nothing, shapes=nothing, shape=nothing, sa_name=nothing, id=nothing, expected_call_count=nothing) - if tf.in_eager_mode() - _scoped_allocator_eager(; name=name, shapes=shapes, shape=shape, sa_name=sa_name, id=id, expected_call_count=expected_call_count) - else - _scoped_allocator_graph(; name=name, shapes=shapes, shape=shape, sa_name=sa_name, id=id, expected_call_count=expected_call_count) - end - end end @@ -26650,65 +48308,121 @@ end Load embedding parameters for a single table. """ begin - function load_tpu_embedding_adadelta_parameters_graph(parameters_, accumulators_, updates_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - local desc - tf.with_op_name(name, "LoadTPUEmbeddingAdadeltaParameters") do - desc = tf.NodeDescription("LoadTPUEmbeddingAdadeltaParameters") - parameters_ = convert(Tensor{Float32}, parameters_) - accumulators_ = convert(Tensor{Float32}, accumulators_) - updates_ = convert(Tensor{Float32}, updates_) - tf.add_input(desc, parameters_) - tf.add_input(desc, accumulators_) - tf.add_input(desc, updates_) - if table_id !== nothing - desc["table_id"] = Base.Int(table_id) - end - if table_name !== nothing - desc["table_name"] = Base.String(table_name) + begin + function load_tpu_embedding_adadelta_parameters_graph(parameters_, accumulators_, updates_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + local desc + tf.with_op_name(name, "LoadTPUEmbeddingAdadeltaParameters") do + desc = tf.NodeDescription("LoadTPUEmbeddingAdadeltaParameters") + begin + begin + parameters_ = convert(Tensor{Float32}, parameters_) + begin + end + end + begin + accumulators_ = convert(Tensor{Float32}, accumulators_) + begin + end + end + begin + updates_ = convert(Tensor{Float32}, updates_) + begin + end + end + end + begin + begin + tf.add_input(desc, parameters_) + end + begin + tf.add_input(desc, accumulators_) + end + begin + tf.add_input(desc, updates_) + end + end + begin + begin + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + end + begin + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + end + begin + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + end + begin + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function load_tpu_embedding_adadelta_parameters_eager(parameters_, accumulators_, updates_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + desc = tf.EagerOp("LoadTPUEmbeddingAdadeltaParameters") + parameters_ = convert(tf.EagerTensor, parameters_) + accumulators_ = convert(tf.EagerTensor, accumulators_) + updates_ = convert(tf.EagerTensor, updates_) + begin + begin + tf.add_input(desc, parameters_) + end + begin + tf.add_input(desc, accumulators_) + end + begin + tf.add_input(desc, updates_) + end + end + begin + begin + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + end + begin + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + end + begin + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + end + begin + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(load_tpu_embedding_adadelta_parameters, [parameters_, accumulators_, updates_], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function load_tpu_embedding_adadelta_parameters(parameters_, accumulators_, updates_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + if tf.in_eager_mode() + load_tpu_embedding_adadelta_parameters_eager(parameters_, accumulators_, updates_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + else + load_tpu_embedding_adadelta_parameters_graph(parameters_, accumulators_, updates_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + end end - if num_shards !== nothing - desc["num_shards"] = Base.Int(num_shards) - end - if shard_id !== nothing - desc["shard_id"] = Base.Int(shard_id) - end - end - tf.Tensor(tf.Operation(desc)) - end - function load_tpu_embedding_adadelta_parameters_eager(parameters_, accumulators_, updates_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - desc = tf.EagerOp("LoadTPUEmbeddingAdadeltaParameters") - parameters_ = convert(tf.EagerTensor, parameters_) - accumulators_ = convert(tf.EagerTensor, accumulators_) - updates_ = convert(tf.EagerTensor, updates_) - tf.add_input(desc, parameters_) - tf.add_input(desc, accumulators_) - tf.add_input(desc, updates_) - if table_id !== nothing - desc["table_id"] = Base.Int(table_id) - end - if table_name !== nothing - desc["table_name"] = Base.String(table_name) - end - if num_shards !== nothing - desc["num_shards"] = Base.Int(num_shards) - end - if shard_id !== nothing - desc["shard_id"] = Base.Int(shard_id) - end - res = tf.execute(desc) - node = tf.TapeNode(load_tpu_embedding_adadelta_parameters, [parameters_, accumulators_, updates_], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function load_tpu_embedding_adadelta_parameters(parameters_, accumulators_, updates_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - if tf.in_eager_mode() - load_tpu_embedding_adadelta_parameters_eager(parameters_, accumulators_, updates_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) - else - load_tpu_embedding_adadelta_parameters_graph(parameters_, accumulators_, updates_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) - end - end end @@ -26718,67 +48432,151 @@ end """ begin - function sparse_add_graph(a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_, thresh_; name=nothing) - local desc - tf.with_op_name(name, "SparseAdd") do - desc = tf.NodeDescription("SparseAdd") - a_indices_ = convert(Tensor{Int64}, a_indices_) - a_values_ = convert(Tensor{Any}, a_values_) - a_shape_ = convert(Tensor{Int64}, a_shape_) - b_indices_ = convert(Tensor{Int64}, b_indices_) - b_values_ = convert(Tensor{Any}, b_values_) - b_shape_ = convert(Tensor{Int64}, b_shape_) - thresh_ = convert(Tensor{Any}, thresh_) - (thresh_,) = tf.tf_promote(thresh_) - (a_values_, b_values_) = tf.tf_promote(a_values_, b_values_) - tf.add_input(desc, a_indices_) - tf.add_input(desc, a_values_) - tf.add_input(desc, a_shape_) - tf.add_input(desc, b_indices_) - tf.add_input(desc, b_values_) - tf.add_input(desc, b_shape_) - tf.add_input(desc, thresh_) - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:3 - push!(out, tf.Tensor(op, out_idx)) - end - out - end - function sparse_add_eager(a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_, thresh_; name=nothing) - desc = tf.EagerOp("SparseAdd") - a_indices_ = convert(tf.EagerTensor, a_indices_) - a_values_ = convert(tf.EagerTensor, a_values_) - a_shape_ = convert(tf.EagerTensor, a_shape_) - b_indices_ = convert(tf.EagerTensor, b_indices_) - b_values_ = convert(tf.EagerTensor, b_values_) - b_shape_ = convert(tf.EagerTensor, b_shape_) - thresh_ = convert(tf.EagerTensor, thresh_) - tf.add_input(desc, a_indices_) - tf.add_input(desc, a_values_) - tf.add_input(desc, a_shape_) - tf.add_input(desc, b_indices_) - tf.add_input(desc, b_values_) - tf.add_input(desc, b_shape_) - tf.add_input(desc, thresh_) - desc["T"] = tf.data_type(a_values_) - desc["T"] = tf.data_type(b_values_) - desc["Treal"] = tf.data_type(thresh_) - res = tf.execute(desc) - node = tf.TapeNode(sparse_add, [a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_, thresh_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_add(a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_, thresh_; name=nothing) - if tf.in_eager_mode() - sparse_add_eager(a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_, thresh_; name=name) - else - sparse_add_graph(a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_, thresh_; name=name) + begin + function sparse_add_graph(a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_, thresh_; name=nothing) + local desc + tf.with_op_name(name, "SparseAdd") do + desc = tf.NodeDescription("SparseAdd") + begin + begin + a_indices_ = convert(Tensor{Int64}, a_indices_) + begin + end + end + begin + a_values_ = convert(Tensor{Any}, a_values_) + begin + end + end + begin + a_shape_ = convert(Tensor{Int64}, a_shape_) + begin + end + end + begin + b_indices_ = convert(Tensor{Int64}, b_indices_) + begin + end + end + begin + b_values_ = convert(Tensor{Any}, b_values_) + begin + end + end + begin + b_shape_ = convert(Tensor{Int64}, b_shape_) + begin + end + end + begin + thresh_ = convert(Tensor{Any}, thresh_) + begin + end + end + begin + (thresh_,) = tf.tf_promote(thresh_) + end + begin + (a_values_, b_values_) = tf.tf_promote(a_values_, b_values_) + end + end + begin + begin + tf.add_input(desc, a_indices_) + end + begin + tf.add_input(desc, a_values_) + end + begin + tf.add_input(desc, a_shape_) + end + begin + tf.add_input(desc, b_indices_) + end + begin + tf.add_input(desc, b_values_) + end + begin + tf.add_input(desc, b_shape_) + end + begin + tf.add_input(desc, thresh_) + end + end + begin + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + end + end + begin + function sparse_add_eager(a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_, thresh_; name=nothing) + desc = tf.EagerOp("SparseAdd") + a_indices_ = convert(tf.EagerTensor, a_indices_) + a_values_ = convert(tf.EagerTensor, a_values_) + a_shape_ = convert(tf.EagerTensor, a_shape_) + b_indices_ = convert(tf.EagerTensor, b_indices_) + b_values_ = convert(tf.EagerTensor, b_values_) + b_shape_ = convert(tf.EagerTensor, b_shape_) + thresh_ = convert(tf.EagerTensor, thresh_) + begin + begin + tf.add_input(desc, a_indices_) + end + begin + tf.add_input(desc, a_values_) + end + begin + tf.add_input(desc, a_shape_) + end + begin + tf.add_input(desc, b_indices_) + end + begin + tf.add_input(desc, b_values_) + end + begin + tf.add_input(desc, b_shape_) + end + begin + tf.add_input(desc, thresh_) + end + end + begin + end + begin + desc["T"] = tf.data_type(a_values_) + end + begin + desc["T"] = tf.data_type(b_values_) + end + begin + desc["Treal"] = tf.data_type(thresh_) + end + res = tf.execute(desc) + node = tf.TapeNode(sparse_add, [a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_, thresh_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_add(a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_, thresh_; name=nothing) + if tf.in_eager_mode() + sparse_add_eager(a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_, thresh_; name=name) + else + sparse_add_graph(a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_, thresh_; name=name) + end end - end + end end @@ -26788,48 +48586,86 @@ end """ begin - function ctc_greedy_decoder_graph(inputs_, sequence_length_; name=nothing, merge_repeated=nothing) - local desc - tf.with_op_name(name, "CTCGreedyDecoder") do - desc = tf.NodeDescription("CTCGreedyDecoder") - inputs_ = convert(Tensor{Float32}, inputs_) - sequence_length_ = convert(Tensor{Int32}, sequence_length_) - tf.add_input(desc, inputs_) - tf.add_input(desc, sequence_length_) - if merge_repeated !== nothing - desc["merge_repeated"] = Base.Bool(merge_repeated) + begin + function ctc_greedy_decoder_graph(inputs_, sequence_length_; name=nothing, merge_repeated=nothing) + local desc + tf.with_op_name(name, "CTCGreedyDecoder") do + desc = tf.NodeDescription("CTCGreedyDecoder") + begin + begin + inputs_ = convert(Tensor{Float32}, inputs_) + begin + end + end + begin + sequence_length_ = convert(Tensor{Int32}, sequence_length_) + begin + end + end + end + begin + begin + tf.add_input(desc, inputs_) + end + begin + tf.add_input(desc, sequence_length_) + end + end + begin + begin + if merge_repeated !== nothing + desc["merge_repeated"] = Base.Bool(merge_repeated) + end + end + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:4 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + end + end + begin + function ctc_greedy_decoder_eager(inputs_, sequence_length_; name=nothing, merge_repeated=nothing) + desc = tf.EagerOp("CTCGreedyDecoder") + inputs_ = convert(tf.EagerTensor, inputs_) + sequence_length_ = convert(tf.EagerTensor, sequence_length_) + begin + begin + tf.add_input(desc, inputs_) + end + begin + tf.add_input(desc, sequence_length_) + end + end + begin + begin + if merge_repeated !== nothing + desc["merge_repeated"] = Base.Bool(merge_repeated) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(ctc_greedy_decoder, [inputs_, sequence_length_], name=nothing, merge_repeated=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function ctc_greedy_decoder(inputs_, sequence_length_; name=nothing, merge_repeated=nothing) + if tf.in_eager_mode() + ctc_greedy_decoder_eager(inputs_, sequence_length_; name=name, merge_repeated=merge_repeated) + else + ctc_greedy_decoder_graph(inputs_, sequence_length_; name=name, merge_repeated=merge_repeated) + end end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:4 - push!(out, tf.Tensor(op, out_idx)) - end - out - end - function ctc_greedy_decoder_eager(inputs_, sequence_length_; name=nothing, merge_repeated=nothing) - desc = tf.EagerOp("CTCGreedyDecoder") - inputs_ = convert(tf.EagerTensor, inputs_) - sequence_length_ = convert(tf.EagerTensor, sequence_length_) - tf.add_input(desc, inputs_) - tf.add_input(desc, sequence_length_) - if merge_repeated !== nothing - desc["merge_repeated"] = Base.Bool(merge_repeated) - end - res = tf.execute(desc) - node = tf.TapeNode(ctc_greedy_decoder, [inputs_, sequence_length_], name=nothing, merge_repeated=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function ctc_greedy_decoder(inputs_, sequence_length_; name=nothing, merge_repeated=nothing) - if tf.in_eager_mode() - ctc_greedy_decoder_eager(inputs_, sequence_length_; name=name, merge_repeated=merge_repeated) - else - ctc_greedy_decoder_graph(inputs_, sequence_length_; name=name, merge_repeated=merge_repeated) - end - end end @@ -26839,47 +48675,75 @@ end """ begin - function immutable_const_graph(; name=nothing, dtype=nothing, shape=nothing, memory_region_name=nothing) - local desc - tf.with_op_name(name, "ImmutableConst") do - desc = tf.NodeDescription("ImmutableConst") - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) + begin + function immutable_const_graph(; name=nothing, dtype=nothing, shape=nothing, memory_region_name=nothing) + local desc + tf.with_op_name(name, "ImmutableConst") do + desc = tf.NodeDescription("ImmutableConst") + begin + end + begin + end + begin + begin + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + begin + if shape !== nothing + desc["shape"] = Base.identity(shape) + end + end + begin + if memory_region_name !== nothing + desc["memory_region_name"] = Base.String(memory_region_name) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function immutable_const_eager(; name=nothing, dtype=nothing, shape=nothing, memory_region_name=nothing) + desc = tf.EagerOp("ImmutableConst") + begin + end + begin + begin + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + begin + if shape !== nothing + desc["shape"] = Base.identity(shape) + end + end + begin + if memory_region_name !== nothing + desc["memory_region_name"] = Base.String(memory_region_name) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(immutable_const, [], name=nothing, dtype=nothing, shape=nothing, memory_region_name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function immutable_const(; name=nothing, dtype=nothing, shape=nothing, memory_region_name=nothing) + if tf.in_eager_mode() + immutable_const_eager(; name=name, dtype=dtype, shape=shape, memory_region_name=memory_region_name) + else + immutable_const_graph(; name=name, dtype=dtype, shape=shape, memory_region_name=memory_region_name) + end end - if shape !== nothing - desc["shape"] = Base.identity(shape) - end - if memory_region_name !== nothing - desc["memory_region_name"] = Base.String(memory_region_name) - end - end - tf.Tensor(tf.Operation(desc)) - end - function immutable_const_eager(; name=nothing, dtype=nothing, shape=nothing, memory_region_name=nothing) - desc = tf.EagerOp("ImmutableConst") - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end - if shape !== nothing - desc["shape"] = Base.identity(shape) - end - if memory_region_name !== nothing - desc["memory_region_name"] = Base.String(memory_region_name) - end - res = tf.execute(desc) - node = tf.TapeNode(immutable_const, [], name=nothing, dtype=nothing, shape=nothing, memory_region_name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function immutable_const(; name=nothing, dtype=nothing, shape=nothing, memory_region_name=nothing) - if tf.in_eager_mode() - immutable_const_eager(; name=name, dtype=dtype, shape=shape, memory_region_name=memory_region_name) - else - immutable_const_graph(; name=name, dtype=dtype, shape=shape, memory_region_name=memory_region_name) - end - end end @@ -26889,33 +48753,57 @@ end """ begin - function consume_mutex_lock_graph(mutex_lock_; name=nothing) - local desc - tf.with_op_name(name, "ConsumeMutexLock") do - desc = tf.NodeDescription("ConsumeMutexLock") - mutex_lock_ = convert(Tensor{Any}, mutex_lock_) - tf.add_input(desc, mutex_lock_) + begin + function consume_mutex_lock_graph(mutex_lock_; name=nothing) + local desc + tf.with_op_name(name, "ConsumeMutexLock") do + desc = tf.NodeDescription("ConsumeMutexLock") + begin + begin + mutex_lock_ = convert(Tensor{Any}, mutex_lock_) + begin + end + end + end + begin + begin + tf.add_input(desc, mutex_lock_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function consume_mutex_lock_eager(mutex_lock_; name=nothing) - desc = tf.EagerOp("ConsumeMutexLock") - mutex_lock_ = convert(tf.EagerTensor, mutex_lock_) - tf.add_input(desc, mutex_lock_) - res = tf.execute(desc) - node = tf.TapeNode(consume_mutex_lock, [mutex_lock_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function consume_mutex_lock_eager(mutex_lock_; name=nothing) + desc = tf.EagerOp("ConsumeMutexLock") + mutex_lock_ = convert(tf.EagerTensor, mutex_lock_) + begin + begin + tf.add_input(desc, mutex_lock_) + end + end + begin + end + res = tf.execute(desc) + node = tf.TapeNode(consume_mutex_lock, [mutex_lock_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function consume_mutex_lock(mutex_lock_; name=nothing) - if tf.in_eager_mode() - consume_mutex_lock_eager(mutex_lock_; name=name) - else - consume_mutex_lock_graph(mutex_lock_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function consume_mutex_lock(mutex_lock_; name=nothing) + if tf.in_eager_mode() + consume_mutex_lock_eager(mutex_lock_; name=name) + else + consume_mutex_lock_graph(mutex_lock_; name=name) + end end - end + end end @@ -26925,40 +48813,78 @@ end """ begin - function greater_equal_graph(x_, y_; name=nothing) - local desc - tf.with_op_name(name, "GreaterEqual") do - desc = tf.NodeDescription("GreaterEqual") - x_ = convert(Tensor{Any}, x_) - y_ = convert(Tensor{Any}, y_) - (x_, y_) = tf.tf_promote(x_, y_) - tf.add_input(desc, x_) - tf.add_input(desc, y_) + begin + function greater_equal_graph(x_, y_; name=nothing) + local desc + tf.with_op_name(name, "GreaterEqual") do + desc = tf.NodeDescription("GreaterEqual") + begin + begin + x_ = convert(Tensor{Any}, x_) + begin + end + end + begin + y_ = convert(Tensor{Any}, y_) + begin + end + end + begin + (x_, y_) = tf.tf_promote(x_, y_) + end + end + begin + begin + tf.add_input(desc, x_) + end + begin + tf.add_input(desc, y_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function greater_equal_eager(x_, y_; name=nothing) - desc = tf.EagerOp("GreaterEqual") - x_ = convert(tf.EagerTensor, x_) - y_ = convert(tf.EagerTensor, y_) - tf.add_input(desc, x_) - tf.add_input(desc, y_) - desc["T"] = tf.data_type(x_) - desc["T"] = tf.data_type(y_) - res = tf.execute(desc) - node = tf.TapeNode(greater_equal, [x_, y_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function greater_equal_eager(x_, y_; name=nothing) + desc = tf.EagerOp("GreaterEqual") + x_ = convert(tf.EagerTensor, x_) + y_ = convert(tf.EagerTensor, y_) + begin + begin + tf.add_input(desc, x_) + end + begin + tf.add_input(desc, y_) + end + end + begin + end + begin + desc["T"] = tf.data_type(x_) + end + begin + desc["T"] = tf.data_type(y_) + end + res = tf.execute(desc) + node = tf.TapeNode(greater_equal, [x_, y_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function greater_equal(x_, y_; name=nothing) - if tf.in_eager_mode() - greater_equal_eager(x_, y_; name=name) - else - greater_equal_graph(x_, y_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function greater_equal(x_, y_; name=nothing) + if tf.in_eager_mode() + greater_equal_eager(x_, y_; name=name) + else + greater_equal_graph(x_, y_; name=name) + end end - end + end end @@ -26968,61 +48894,109 @@ end """ begin - function initialize_table_from_text_file_v2_graph(table_handle_, filename_; name=nothing, key_index=nothing, value_index=nothing, vocab_size=nothing, delimiter=nothing) - local desc - tf.with_op_name(name, "InitializeTableFromTextFileV2") do - desc = tf.NodeDescription("InitializeTableFromTextFileV2") - table_handle_ = convert(Tensor{Any}, table_handle_) - filename_ = convert(Tensor{String}, filename_) - tf.add_input(desc, table_handle_) - tf.add_input(desc, filename_) - if key_index !== nothing - desc["key_index"] = Base.Int(key_index) + begin + function initialize_table_from_text_file_v2_graph(table_handle_, filename_; name=nothing, key_index=nothing, value_index=nothing, vocab_size=nothing, delimiter=nothing) + local desc + tf.with_op_name(name, "InitializeTableFromTextFileV2") do + desc = tf.NodeDescription("InitializeTableFromTextFileV2") + begin + begin + table_handle_ = convert(Tensor{Any}, table_handle_) + begin + end + end + begin + filename_ = convert(Tensor{String}, filename_) + begin + end + end + end + begin + begin + tf.add_input(desc, table_handle_) + end + begin + tf.add_input(desc, filename_) + end + end + begin + begin + if key_index !== nothing + desc["key_index"] = Base.Int(key_index) + end + end + begin + if value_index !== nothing + desc["value_index"] = Base.Int(value_index) + end + end + begin + if vocab_size !== nothing + desc["vocab_size"] = Base.Int(vocab_size) + end + end + begin + if delimiter !== nothing + desc["delimiter"] = Base.String(delimiter) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function initialize_table_from_text_file_v2_eager(table_handle_, filename_; name=nothing, key_index=nothing, value_index=nothing, vocab_size=nothing, delimiter=nothing) + desc = tf.EagerOp("InitializeTableFromTextFileV2") + table_handle_ = convert(tf.EagerTensor, table_handle_) + filename_ = convert(tf.EagerTensor, filename_) + begin + begin + tf.add_input(desc, table_handle_) + end + begin + tf.add_input(desc, filename_) + end + end + begin + begin + if key_index !== nothing + desc["key_index"] = Base.Int(key_index) + end + end + begin + if value_index !== nothing + desc["value_index"] = Base.Int(value_index) + end + end + begin + if vocab_size !== nothing + desc["vocab_size"] = Base.Int(vocab_size) + end + end + begin + if delimiter !== nothing + desc["delimiter"] = Base.String(delimiter) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(initialize_table_from_text_file_v2, [table_handle_, filename_], name=nothing, key_index=nothing, value_index=nothing, vocab_size=nothing, delimiter=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function initialize_table_from_text_file_v2(table_handle_, filename_; name=nothing, key_index=nothing, value_index=nothing, vocab_size=nothing, delimiter=nothing) + if tf.in_eager_mode() + initialize_table_from_text_file_v2_eager(table_handle_, filename_; name=name, key_index=key_index, value_index=value_index, vocab_size=vocab_size, delimiter=delimiter) + else + initialize_table_from_text_file_v2_graph(table_handle_, filename_; name=name, key_index=key_index, value_index=value_index, vocab_size=vocab_size, delimiter=delimiter) + end end - if value_index !== nothing - desc["value_index"] = Base.Int(value_index) - end - if vocab_size !== nothing - desc["vocab_size"] = Base.Int(vocab_size) - end - if delimiter !== nothing - desc["delimiter"] = Base.String(delimiter) - end - end - tf.Tensor(tf.Operation(desc)) - end - function initialize_table_from_text_file_v2_eager(table_handle_, filename_; name=nothing, key_index=nothing, value_index=nothing, vocab_size=nothing, delimiter=nothing) - desc = tf.EagerOp("InitializeTableFromTextFileV2") - table_handle_ = convert(tf.EagerTensor, table_handle_) - filename_ = convert(tf.EagerTensor, filename_) - tf.add_input(desc, table_handle_) - tf.add_input(desc, filename_) - if key_index !== nothing - desc["key_index"] = Base.Int(key_index) - end - if value_index !== nothing - desc["value_index"] = Base.Int(value_index) - end - if vocab_size !== nothing - desc["vocab_size"] = Base.Int(vocab_size) - end - if delimiter !== nothing - desc["delimiter"] = Base.String(delimiter) - end - res = tf.execute(desc) - node = tf.TapeNode(initialize_table_from_text_file_v2, [table_handle_, filename_], name=nothing, key_index=nothing, value_index=nothing, vocab_size=nothing, delimiter=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function initialize_table_from_text_file_v2(table_handle_, filename_; name=nothing, key_index=nothing, value_index=nothing, vocab_size=nothing, delimiter=nothing) - if tf.in_eager_mode() - initialize_table_from_text_file_v2_eager(table_handle_, filename_; name=name, key_index=key_index, value_index=value_index, vocab_size=vocab_size, delimiter=delimiter) - else - initialize_table_from_text_file_v2_graph(table_handle_, filename_; name=name, key_index=key_index, value_index=value_index, vocab_size=vocab_size, delimiter=delimiter) - end - end end @@ -27032,45 +49006,77 @@ end """ begin - function queue_dequeue_graph(handle_; name=nothing, component_types=nothing, timeout_ms=nothing) - local desc - tf.with_op_name(name, "QueueDequeue") do - desc = tf.NodeDescription("QueueDequeue") - handle_ = convert(Tensor{String}, handle_) - tf.add_input(desc, handle_) - if component_types !== nothing - desc["component_types"] = map(Base.identity, component_types) + begin + function queue_dequeue_graph(handle_; name=nothing, component_types=nothing, timeout_ms=nothing) + local desc + tf.with_op_name(name, "QueueDequeue") do + desc = tf.NodeDescription("QueueDequeue") + begin + begin + handle_ = convert(Tensor{String}, handle_) + begin + end + end + end + begin + begin + tf.add_input(desc, handle_) + end + end + begin + begin + if component_types !== nothing + desc["component_types"] = map(Base.identity, component_types) + end + end + begin + if timeout_ms !== nothing + desc["timeout_ms"] = Base.Int(timeout_ms) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function queue_dequeue_eager(handle_; name=nothing, component_types=nothing, timeout_ms=nothing) + desc = tf.EagerOp("QueueDequeue") + handle_ = convert(tf.EagerTensor, handle_) + begin + begin + tf.add_input(desc, handle_) + end + end + begin + begin + if component_types !== nothing + desc["component_types"] = map(Base.identity, component_types) + end + end + begin + if timeout_ms !== nothing + desc["timeout_ms"] = Base.Int(timeout_ms) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(queue_dequeue, [handle_], name=nothing, component_types=nothing, timeout_ms=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function queue_dequeue(handle_; name=nothing, component_types=nothing, timeout_ms=nothing) + if tf.in_eager_mode() + queue_dequeue_eager(handle_; name=name, component_types=component_types, timeout_ms=timeout_ms) + else + queue_dequeue_graph(handle_; name=name, component_types=component_types, timeout_ms=timeout_ms) + end end - if timeout_ms !== nothing - desc["timeout_ms"] = Base.Int(timeout_ms) - end - end - tf.Tensor(tf.Operation(desc)) - end - function queue_dequeue_eager(handle_; name=nothing, component_types=nothing, timeout_ms=nothing) - desc = tf.EagerOp("QueueDequeue") - handle_ = convert(tf.EagerTensor, handle_) - tf.add_input(desc, handle_) - if component_types !== nothing - desc["component_types"] = map(Base.identity, component_types) - end - if timeout_ms !== nothing - desc["timeout_ms"] = Base.Int(timeout_ms) - end - res = tf.execute(desc) - node = tf.TapeNode(queue_dequeue, [handle_], name=nothing, component_types=nothing, timeout_ms=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function queue_dequeue(handle_; name=nothing, component_types=nothing, timeout_ms=nothing) - if tf.in_eager_mode() - queue_dequeue_eager(handle_; name=name, component_types=component_types, timeout_ms=timeout_ms) - else - queue_dequeue_graph(handle_; name=name, component_types=component_types, timeout_ms=timeout_ms) - end - end end @@ -27080,40 +49086,78 @@ end """ begin - function equal_graph(x_, y_; name=nothing) - local desc - tf.with_op_name(name, "Equal") do - desc = tf.NodeDescription("Equal") - x_ = convert(Tensor{Any}, x_) - y_ = convert(Tensor{Any}, y_) - (x_, y_) = tf.tf_promote(x_, y_) - tf.add_input(desc, x_) - tf.add_input(desc, y_) + begin + function equal_graph(x_, y_; name=nothing) + local desc + tf.with_op_name(name, "Equal") do + desc = tf.NodeDescription("Equal") + begin + begin + x_ = convert(Tensor{Any}, x_) + begin + end + end + begin + y_ = convert(Tensor{Any}, y_) + begin + end + end + begin + (x_, y_) = tf.tf_promote(x_, y_) + end + end + begin + begin + tf.add_input(desc, x_) + end + begin + tf.add_input(desc, y_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function equal_eager(x_, y_; name=nothing) - desc = tf.EagerOp("Equal") - x_ = convert(tf.EagerTensor, x_) - y_ = convert(tf.EagerTensor, y_) - tf.add_input(desc, x_) - tf.add_input(desc, y_) - desc["T"] = tf.data_type(x_) - desc["T"] = tf.data_type(y_) - res = tf.execute(desc) - node = tf.TapeNode(equal, [x_, y_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function equal_eager(x_, y_; name=nothing) + desc = tf.EagerOp("Equal") + x_ = convert(tf.EagerTensor, x_) + y_ = convert(tf.EagerTensor, y_) + begin + begin + tf.add_input(desc, x_) + end + begin + tf.add_input(desc, y_) + end + end + begin + end + begin + desc["T"] = tf.data_type(x_) + end + begin + desc["T"] = tf.data_type(y_) + end + res = tf.execute(desc) + node = tf.TapeNode(equal, [x_, y_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function equal(x_, y_; name=nothing) - if tf.in_eager_mode() - equal_eager(x_, y_; name=name) - else - equal_graph(x_, y_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function equal(x_, y_; name=nothing) + if tf.in_eager_mode() + equal_eager(x_, y_; name=name) + else + equal_graph(x_, y_; name=name) + end end - end + end end @@ -27123,45 +49167,77 @@ end """ begin - function iterator_from_string_handle_graph(string_handle_; name=nothing, output_types=nothing, output_shapes=nothing) - local desc - tf.with_op_name(name, "IteratorFromStringHandle") do - desc = tf.NodeDescription("IteratorFromStringHandle") - string_handle_ = convert(Tensor{String}, string_handle_) - tf.add_input(desc, string_handle_) - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) + begin + function iterator_from_string_handle_graph(string_handle_; name=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "IteratorFromStringHandle") do + desc = tf.NodeDescription("IteratorFromStringHandle") + begin + begin + string_handle_ = convert(Tensor{String}, string_handle_) + begin + end + end + end + begin + begin + tf.add_input(desc, string_handle_) + end + end + begin + begin + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + end + begin + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function iterator_from_string_handle_eager(string_handle_; name=nothing, output_types=nothing, output_shapes=nothing) + desc = tf.EagerOp("IteratorFromStringHandle") + string_handle_ = convert(tf.EagerTensor, string_handle_) + begin + begin + tf.add_input(desc, string_handle_) + end + end + begin + begin + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + end + begin + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(iterator_from_string_handle, [string_handle_], name=nothing, output_types=nothing, output_shapes=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function iterator_from_string_handle(string_handle_; name=nothing, output_types=nothing, output_shapes=nothing) + if tf.in_eager_mode() + iterator_from_string_handle_eager(string_handle_; name=name, output_types=output_types, output_shapes=output_shapes) + else + iterator_from_string_handle_graph(string_handle_; name=name, output_types=output_types, output_shapes=output_shapes) + end end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - end - tf.Tensor(tf.Operation(desc)) - end - function iterator_from_string_handle_eager(string_handle_; name=nothing, output_types=nothing, output_shapes=nothing) - desc = tf.EagerOp("IteratorFromStringHandle") - string_handle_ = convert(tf.EagerTensor, string_handle_) - tf.add_input(desc, string_handle_) - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - res = tf.execute(desc) - node = tf.TapeNode(iterator_from_string_handle, [string_handle_], name=nothing, output_types=nothing, output_shapes=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function iterator_from_string_handle(string_handle_; name=nothing, output_types=nothing, output_shapes=nothing) - if tf.in_eager_mode() - iterator_from_string_handle_eager(string_handle_; name=name, output_types=output_types, output_shapes=output_shapes) - else - iterator_from_string_handle_graph(string_handle_; name=name, output_types=output_types, output_shapes=output_shapes) - end - end end @@ -27171,57 +49247,113 @@ end """ begin - function tensor_list_split_graph(tensor_, element_shape_, lengths_; name=nothing, element_dtype=nothing, shape_type=nothing) - local desc - tf.with_op_name(name, "TensorListSplit") do - desc = tf.NodeDescription("TensorListSplit") - tensor_ = convert(Tensor{Any}, tensor_) - element_shape_ = convert(Tensor{Any}, element_shape_) - lengths_ = convert(Tensor{Int64}, lengths_) - (tensor_,) = tf.tf_promote(tensor_) - (element_shape_,) = tf.tf_promote(element_shape_) - tf.add_input(desc, tensor_) - tf.add_input(desc, element_shape_) - tf.add_input(desc, lengths_) - if element_dtype !== nothing - desc["element_dtype"] = Base.identity(element_dtype) + begin + function tensor_list_split_graph(tensor_, element_shape_, lengths_; name=nothing, element_dtype=nothing, shape_type=nothing) + local desc + tf.with_op_name(name, "TensorListSplit") do + desc = tf.NodeDescription("TensorListSplit") + begin + begin + tensor_ = convert(Tensor{Any}, tensor_) + begin + end + end + begin + element_shape_ = convert(Tensor{Any}, element_shape_) + begin + end + end + begin + lengths_ = convert(Tensor{Int64}, lengths_) + begin + end + end + begin + (tensor_,) = tf.tf_promote(tensor_) + end + begin + (element_shape_,) = tf.tf_promote(element_shape_) + end + end + begin + begin + tf.add_input(desc, tensor_) + end + begin + tf.add_input(desc, element_shape_) + end + begin + tf.add_input(desc, lengths_) + end + end + begin + begin + if element_dtype !== nothing + desc["element_dtype"] = Base.identity(element_dtype) + end + end + begin + if shape_type !== nothing + desc["shape_type"] = Base.identity(shape_type) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function tensor_list_split_eager(tensor_, element_shape_, lengths_; name=nothing, element_dtype=nothing, shape_type=nothing) + desc = tf.EagerOp("TensorListSplit") + tensor_ = convert(tf.EagerTensor, tensor_) + element_shape_ = convert(tf.EagerTensor, element_shape_) + lengths_ = convert(tf.EagerTensor, lengths_) + begin + begin + tf.add_input(desc, tensor_) + end + begin + tf.add_input(desc, element_shape_) + end + begin + tf.add_input(desc, lengths_) + end + end + begin + begin + if element_dtype !== nothing + desc["element_dtype"] = Base.identity(element_dtype) + end + end + begin + if shape_type !== nothing + desc["shape_type"] = Base.identity(shape_type) + end + end + end + begin + desc["element_dtype"] = tf.data_type(tensor_) + end + begin + desc["shape_type"] = tf.data_type(element_shape_) + end + res = tf.execute(desc) + node = tf.TapeNode(tensor_list_split, [tensor_, element_shape_, lengths_], name=nothing, element_dtype=nothing, shape_type=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_list_split(tensor_, element_shape_, lengths_; name=nothing, element_dtype=nothing, shape_type=nothing) + if tf.in_eager_mode() + tensor_list_split_eager(tensor_, element_shape_, lengths_; name=name, element_dtype=element_dtype, shape_type=shape_type) + else + tensor_list_split_graph(tensor_, element_shape_, lengths_; name=name, element_dtype=element_dtype, shape_type=shape_type) + end end - if shape_type !== nothing - desc["shape_type"] = Base.identity(shape_type) - end - end - tf.Tensor(tf.Operation(desc)) - end - function tensor_list_split_eager(tensor_, element_shape_, lengths_; name=nothing, element_dtype=nothing, shape_type=nothing) - desc = tf.EagerOp("TensorListSplit") - tensor_ = convert(tf.EagerTensor, tensor_) - element_shape_ = convert(tf.EagerTensor, element_shape_) - lengths_ = convert(tf.EagerTensor, lengths_) - tf.add_input(desc, tensor_) - tf.add_input(desc, element_shape_) - tf.add_input(desc, lengths_) - if element_dtype !== nothing - desc["element_dtype"] = Base.identity(element_dtype) - end - if shape_type !== nothing - desc["shape_type"] = Base.identity(shape_type) - end - desc["element_dtype"] = tf.data_type(tensor_) - desc["shape_type"] = tf.data_type(element_shape_) - res = tf.execute(desc) - node = tf.TapeNode(tensor_list_split, [tensor_, element_shape_, lengths_], name=nothing, element_dtype=nothing, shape_type=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_list_split(tensor_, element_shape_, lengths_; name=nothing, element_dtype=nothing, shape_type=nothing) - if tf.in_eager_mode() - tensor_list_split_eager(tensor_, element_shape_, lengths_; name=name, element_dtype=element_dtype, shape_type=shape_type) - else - tensor_list_split_graph(tensor_, element_shape_, lengths_; name=name, element_dtype=element_dtype, shape_type=shape_type) - end - end end @@ -27231,76 +49363,130 @@ end """ begin - function fractional_max_pool_graph(value_; name=nothing, pooling_ratio=nothing, pseudo_random=nothing, overlapping=nothing, deterministic=nothing, seed=nothing, seed2=nothing) - local desc - tf.with_op_name(name, "FractionalMaxPool") do - desc = tf.NodeDescription("FractionalMaxPool") - value_ = convert(Tensor{Any}, value_) - (value_,) = tf.tf_promote(value_) - tf.add_input(desc, value_) - if pooling_ratio !== nothing - desc["pooling_ratio"] = map(Base.identity, pooling_ratio) - end - if pseudo_random !== nothing - desc["pseudo_random"] = Base.Bool(pseudo_random) - end - if overlapping !== nothing - desc["overlapping"] = Base.Bool(overlapping) - end - if deterministic !== nothing - desc["deterministic"] = Base.Bool(deterministic) - end - if seed !== nothing - desc["seed"] = Base.Int(seed) + begin + function fractional_max_pool_graph(value_; name=nothing, pooling_ratio=nothing, pseudo_random=nothing, overlapping=nothing, deterministic=nothing, seed=nothing, seed2=nothing) + local desc + tf.with_op_name(name, "FractionalMaxPool") do + desc = tf.NodeDescription("FractionalMaxPool") + begin + begin + value_ = convert(Tensor{Any}, value_) + begin + end + end + begin + (value_,) = tf.tf_promote(value_) + end + end + begin + begin + tf.add_input(desc, value_) + end + end + begin + begin + if pooling_ratio !== nothing + desc["pooling_ratio"] = map(Base.identity, pooling_ratio) + end + end + begin + if pseudo_random !== nothing + desc["pseudo_random"] = Base.Bool(pseudo_random) + end + end + begin + if overlapping !== nothing + desc["overlapping"] = Base.Bool(overlapping) + end + end + begin + if deterministic !== nothing + desc["deterministic"] = Base.Bool(deterministic) + end + end + begin + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + end + begin + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end + end + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + end + end + begin + function fractional_max_pool_eager(value_; name=nothing, pooling_ratio=nothing, pseudo_random=nothing, overlapping=nothing, deterministic=nothing, seed=nothing, seed2=nothing) + desc = tf.EagerOp("FractionalMaxPool") + value_ = convert(tf.EagerTensor, value_) + begin + begin + tf.add_input(desc, value_) + end + end + begin + begin + if pooling_ratio !== nothing + desc["pooling_ratio"] = map(Base.identity, pooling_ratio) + end + end + begin + if pseudo_random !== nothing + desc["pseudo_random"] = Base.Bool(pseudo_random) + end + end + begin + if overlapping !== nothing + desc["overlapping"] = Base.Bool(overlapping) + end + end + begin + if deterministic !== nothing + desc["deterministic"] = Base.Bool(deterministic) + end + end + begin + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + end + begin + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end + end + end + begin + desc["T"] = tf.data_type(value_) + end + res = tf.execute(desc) + node = tf.TapeNode(fractional_max_pool, [value_], name=nothing, pooling_ratio=nothing, pseudo_random=nothing, overlapping=nothing, deterministic=nothing, seed=nothing, seed2=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function fractional_max_pool(value_; name=nothing, pooling_ratio=nothing, pseudo_random=nothing, overlapping=nothing, deterministic=nothing, seed=nothing, seed2=nothing) + if tf.in_eager_mode() + fractional_max_pool_eager(value_; name=name, pooling_ratio=pooling_ratio, pseudo_random=pseudo_random, overlapping=overlapping, deterministic=deterministic, seed=seed, seed2=seed2) + else + fractional_max_pool_graph(value_; name=name, pooling_ratio=pooling_ratio, pseudo_random=pseudo_random, overlapping=overlapping, deterministic=deterministic, seed=seed, seed2=seed2) + end end - if seed2 !== nothing - desc["seed2"] = Base.Int(seed2) - end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:3 - push!(out, tf.Tensor(op, out_idx)) - end - out - end - function fractional_max_pool_eager(value_; name=nothing, pooling_ratio=nothing, pseudo_random=nothing, overlapping=nothing, deterministic=nothing, seed=nothing, seed2=nothing) - desc = tf.EagerOp("FractionalMaxPool") - value_ = convert(tf.EagerTensor, value_) - tf.add_input(desc, value_) - if pooling_ratio !== nothing - desc["pooling_ratio"] = map(Base.identity, pooling_ratio) - end - if pseudo_random !== nothing - desc["pseudo_random"] = Base.Bool(pseudo_random) - end - if overlapping !== nothing - desc["overlapping"] = Base.Bool(overlapping) - end - if deterministic !== nothing - desc["deterministic"] = Base.Bool(deterministic) - end - if seed !== nothing - desc["seed"] = Base.Int(seed) - end - if seed2 !== nothing - desc["seed2"] = Base.Int(seed2) - end - desc["T"] = tf.data_type(value_) - res = tf.execute(desc) - node = tf.TapeNode(fractional_max_pool, [value_], name=nothing, pooling_ratio=nothing, pseudo_random=nothing, overlapping=nothing, deterministic=nothing, seed=nothing, seed2=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function fractional_max_pool(value_; name=nothing, pooling_ratio=nothing, pseudo_random=nothing, overlapping=nothing, deterministic=nothing, seed=nothing, seed2=nothing) - if tf.in_eager_mode() - fractional_max_pool_eager(value_; name=name, pooling_ratio=pooling_ratio, pseudo_random=pseudo_random, overlapping=overlapping, deterministic=deterministic, seed=seed, seed2=seed2) - else - fractional_max_pool_graph(value_; name=name, pooling_ratio=pooling_ratio, pseudo_random=pseudo_random, overlapping=overlapping, deterministic=deterministic, seed=seed, seed2=seed2) - end - end end @@ -27310,47 +49496,97 @@ end """ begin - function scatter_nd_graph(indices_, updates_, shape_; name=nothing) - local desc - tf.with_op_name(name, "ScatterNd") do - desc = tf.NodeDescription("ScatterNd") - indices_ = convert(Tensor{Any}, indices_) - indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) - updates_ = convert(Tensor{Any}, updates_) - shape_ = convert(Tensor{Any}, shape_) - (updates_,) = tf.tf_promote(updates_) - (indices_, shape_) = tf.tf_promote(indices_, shape_) - tf.add_input(desc, indices_) - tf.add_input(desc, updates_) - tf.add_input(desc, shape_) - end - tf.Tensor(tf.Operation(desc)) + begin + function scatter_nd_graph(indices_, updates_, shape_; name=nothing) + local desc + tf.with_op_name(name, "ScatterNd") do + desc = tf.NodeDescription("ScatterNd") + begin + begin + indices_ = convert(Tensor{Any}, indices_) + begin + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + end + end + begin + updates_ = convert(Tensor{Any}, updates_) + begin + end + end + begin + shape_ = convert(Tensor{Any}, shape_) + begin + end + end + begin + (updates_,) = tf.tf_promote(updates_) + end + begin + (indices_, shape_) = tf.tf_promote(indices_, shape_) + end + end + begin + begin + tf.add_input(desc, indices_) + end + begin + tf.add_input(desc, updates_) + end + begin + tf.add_input(desc, shape_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function scatter_nd_eager(indices_, updates_, shape_; name=nothing) + desc = tf.EagerOp("ScatterNd") + indices_ = convert(tf.EagerTensor, indices_) + updates_ = convert(tf.EagerTensor, updates_) + shape_ = convert(tf.EagerTensor, shape_) + begin + begin + tf.add_input(desc, indices_) + end + begin + tf.add_input(desc, updates_) + end + begin + tf.add_input(desc, shape_) + end + end + begin + end + begin + desc["Tindices"] = tf.data_type(indices_) + end + begin + desc["T"] = tf.data_type(updates_) + end + begin + desc["Tindices"] = tf.data_type(shape_) + end + res = tf.execute(desc) + node = tf.TapeNode(scatter_nd, [indices_, updates_, shape_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function scatter_nd(indices_, updates_, shape_; name=nothing) + if tf.in_eager_mode() + scatter_nd_eager(indices_, updates_, shape_; name=name) + else + scatter_nd_graph(indices_, updates_, shape_; name=name) + end + end end - function scatter_nd_eager(indices_, updates_, shape_; name=nothing) - desc = tf.EagerOp("ScatterNd") - indices_ = convert(tf.EagerTensor, indices_) - updates_ = convert(tf.EagerTensor, updates_) - shape_ = convert(tf.EagerTensor, shape_) - tf.add_input(desc, indices_) - tf.add_input(desc, updates_) - tf.add_input(desc, shape_) - desc["Tindices"] = tf.data_type(indices_) - desc["T"] = tf.data_type(updates_) - desc["Tindices"] = tf.data_type(shape_) - res = tf.execute(desc) - node = tf.TapeNode(scatter_nd, [indices_, updates_, shape_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function scatter_nd(indices_, updates_, shape_; name=nothing) - if tf.in_eager_mode() - scatter_nd_eager(indices_, updates_, shape_; name=name) - else - scatter_nd_graph(indices_, updates_, shape_; name=name) - end - end end @@ -27360,44 +49596,90 @@ end """ begin - function select_graph(condition_, t_, e_; name=nothing) - local desc - tf.with_op_name(name, "Select") do - desc = tf.NodeDescription("Select") - condition_ = convert(Tensor{Bool}, condition_) - t_ = convert(Tensor{Any}, t_) - e_ = convert(Tensor{Any}, e_) - (t_, e_) = tf.tf_promote(t_, e_) - tf.add_input(desc, condition_) - tf.add_input(desc, t_) - tf.add_input(desc, e_) - end - tf.Tensor(tf.Operation(desc)) - end - function select_eager(condition_, t_, e_; name=nothing) - desc = tf.EagerOp("Select") - condition_ = convert(tf.EagerTensor, condition_) - t_ = convert(tf.EagerTensor, t_) - e_ = convert(tf.EagerTensor, e_) - tf.add_input(desc, condition_) - tf.add_input(desc, t_) - tf.add_input(desc, e_) - desc["T"] = tf.data_type(t_) - desc["T"] = tf.data_type(e_) - res = tf.execute(desc) - node = tf.TapeNode(select, [condition_, t_, e_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function select(condition_, t_, e_; name=nothing) - if tf.in_eager_mode() - select_eager(condition_, t_, e_; name=name) - else - select_graph(condition_, t_, e_; name=name) + begin + function select_graph(condition_, t_, e_; name=nothing) + local desc + tf.with_op_name(name, "Select") do + desc = tf.NodeDescription("Select") + begin + begin + condition_ = convert(Tensor{Bool}, condition_) + begin + end + end + begin + t_ = convert(Tensor{Any}, t_) + begin + end + end + begin + e_ = convert(Tensor{Any}, e_) + begin + end + end + begin + (t_, e_) = tf.tf_promote(t_, e_) + end + end + begin + begin + tf.add_input(desc, condition_) + end + begin + tf.add_input(desc, t_) + end + begin + tf.add_input(desc, e_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function select_eager(condition_, t_, e_; name=nothing) + desc = tf.EagerOp("Select") + condition_ = convert(tf.EagerTensor, condition_) + t_ = convert(tf.EagerTensor, t_) + e_ = convert(tf.EagerTensor, e_) + begin + begin + tf.add_input(desc, condition_) + end + begin + tf.add_input(desc, t_) + end + begin + tf.add_input(desc, e_) + end + end + begin + end + begin + desc["T"] = tf.data_type(t_) + end + begin + desc["T"] = tf.data_type(e_) + end + res = tf.execute(desc) + node = tf.TapeNode(select, [condition_, t_, e_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function select(condition_, t_, e_; name=nothing) + if tf.in_eager_mode() + select_eager(condition_, t_, e_; name=name) + else + select_graph(condition_, t_, e_; name=name) + end end - end + end end @@ -27407,48 +49689,92 @@ end """ begin - function min_graph(input_, reduction_indices_; name=nothing, keep_dims=nothing) - local desc - tf.with_op_name(name, "Min") do - desc = tf.NodeDescription("Min") - input_ = convert(Tensor{Any}, input_) - reduction_indices_ = convert(Tensor{Int32}, reduction_indices_) - reduction_indices_ = reduction_indices_ - convert(tf.Tensor{eltype(reduction_indices_)}, 1) - (input_,) = tf.tf_promote(input_) - (reduction_indices_,) = tf.tf_promote(reduction_indices_) - tf.add_input(desc, input_) - tf.add_input(desc, reduction_indices_) - if keep_dims !== nothing - desc["keep_dims"] = Base.Bool(keep_dims) + begin + function min_graph(input_, reduction_indices_; name=nothing, keep_dims=nothing) + local desc + tf.with_op_name(name, "Min") do + desc = tf.NodeDescription("Min") + begin + begin + input_ = convert(Tensor{Any}, input_) + begin + end + end + begin + reduction_indices_ = convert(Tensor{Int32}, reduction_indices_) + begin + reduction_indices_ = reduction_indices_ - convert(tf.Tensor{eltype(reduction_indices_)}, 1) + end + end + begin + (input_,) = tf.tf_promote(input_) + end + begin + (reduction_indices_,) = tf.tf_promote(reduction_indices_) + end + end + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, reduction_indices_) + end + end + begin + begin + if keep_dims !== nothing + desc["keep_dims"] = Base.Bool(keep_dims) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function min_eager(input_, reduction_indices_; name=nothing, keep_dims=nothing) + desc = tf.EagerOp("Min") + input_ = convert(tf.EagerTensor, input_) + reduction_indices_ = convert(tf.EagerTensor, reduction_indices_) + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, reduction_indices_) + end + end + begin + begin + if keep_dims !== nothing + desc["keep_dims"] = Base.Bool(keep_dims) + end + end + end + begin + desc["T"] = tf.data_type(input_) + end + begin + desc["Tidx"] = tf.data_type(reduction_indices_) + end + res = tf.execute(desc) + node = tf.TapeNode(min, [input_, reduction_indices_], name=nothing, keep_dims=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function min(input_, reduction_indices_; name=nothing, keep_dims=nothing) + if tf.in_eager_mode() + min_eager(input_, reduction_indices_; name=name, keep_dims=keep_dims) + else + min_graph(input_, reduction_indices_; name=name, keep_dims=keep_dims) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function min_eager(input_, reduction_indices_; name=nothing, keep_dims=nothing) - desc = tf.EagerOp("Min") - input_ = convert(tf.EagerTensor, input_) - reduction_indices_ = convert(tf.EagerTensor, reduction_indices_) - tf.add_input(desc, input_) - tf.add_input(desc, reduction_indices_) - if keep_dims !== nothing - desc["keep_dims"] = Base.Bool(keep_dims) - end - desc["T"] = tf.data_type(input_) - desc["Tidx"] = tf.data_type(reduction_indices_) - res = tf.execute(desc) - node = tf.TapeNode(min, [input_, reduction_indices_], name=nothing, keep_dims=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function min(input_, reduction_indices_; name=nothing, keep_dims=nothing) - if tf.in_eager_mode() - min_eager(input_, reduction_indices_; name=name, keep_dims=keep_dims) - else - min_graph(input_, reduction_indices_; name=name, keep_dims=keep_dims) - end - end end @@ -27458,69 +49784,133 @@ end """ begin - function lrn_grad_graph(input_grads_, input_image_, output_image_; name=nothing, depth_radius=nothing, bias=nothing, alpha=nothing, beta=nothing) - local desc - tf.with_op_name(name, "LRNGrad") do - desc = tf.NodeDescription("LRNGrad") - input_grads_ = convert(Tensor{Float32}, input_grads_) - input_image_ = convert(Tensor{Float32}, input_image_) - output_image_ = convert(Tensor{Float32}, output_image_) - (input_grads_, input_image_, output_image_) = tf.tf_promote(input_grads_, input_image_, output_image_) - tf.add_input(desc, input_grads_) - tf.add_input(desc, input_image_) - tf.add_input(desc, output_image_) - if depth_radius !== nothing - desc["depth_radius"] = Base.Int(depth_radius) - end - if bias !== nothing - desc["bias"] = Base.identity(bias) + begin + function lrn_grad_graph(input_grads_, input_image_, output_image_; name=nothing, depth_radius=nothing, bias=nothing, alpha=nothing, beta=nothing) + local desc + tf.with_op_name(name, "LRNGrad") do + desc = tf.NodeDescription("LRNGrad") + begin + begin + input_grads_ = convert(Tensor{Float32}, input_grads_) + begin + end + end + begin + input_image_ = convert(Tensor{Float32}, input_image_) + begin + end + end + begin + output_image_ = convert(Tensor{Float32}, output_image_) + begin + end + end + begin + (input_grads_, input_image_, output_image_) = tf.tf_promote(input_grads_, input_image_, output_image_) + end + end + begin + begin + tf.add_input(desc, input_grads_) + end + begin + tf.add_input(desc, input_image_) + end + begin + tf.add_input(desc, output_image_) + end + end + begin + begin + if depth_radius !== nothing + desc["depth_radius"] = Base.Int(depth_radius) + end + end + begin + if bias !== nothing + desc["bias"] = Base.identity(bias) + end + end + begin + if alpha !== nothing + desc["alpha"] = Base.identity(alpha) + end + end + begin + if beta !== nothing + desc["beta"] = Base.identity(beta) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function lrn_grad_eager(input_grads_, input_image_, output_image_; name=nothing, depth_radius=nothing, bias=nothing, alpha=nothing, beta=nothing) + desc = tf.EagerOp("LRNGrad") + input_grads_ = convert(tf.EagerTensor, input_grads_) + input_image_ = convert(tf.EagerTensor, input_image_) + output_image_ = convert(tf.EagerTensor, output_image_) + begin + begin + tf.add_input(desc, input_grads_) + end + begin + tf.add_input(desc, input_image_) + end + begin + tf.add_input(desc, output_image_) + end + end + begin + begin + if depth_radius !== nothing + desc["depth_radius"] = Base.Int(depth_radius) + end + end + begin + if bias !== nothing + desc["bias"] = Base.identity(bias) + end + end + begin + if alpha !== nothing + desc["alpha"] = Base.identity(alpha) + end + end + begin + if beta !== nothing + desc["beta"] = Base.identity(beta) + end + end + end + begin + desc["T"] = tf.data_type(input_grads_) + end + begin + desc["T"] = tf.data_type(input_image_) + end + begin + desc["T"] = tf.data_type(output_image_) + end + res = tf.execute(desc) + node = tf.TapeNode(lrn_grad, [input_grads_, input_image_, output_image_], name=nothing, depth_radius=nothing, bias=nothing, alpha=nothing, beta=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function lrn_grad(input_grads_, input_image_, output_image_; name=nothing, depth_radius=nothing, bias=nothing, alpha=nothing, beta=nothing) + if tf.in_eager_mode() + lrn_grad_eager(input_grads_, input_image_, output_image_; name=name, depth_radius=depth_radius, bias=bias, alpha=alpha, beta=beta) + else + lrn_grad_graph(input_grads_, input_image_, output_image_; name=name, depth_radius=depth_radius, bias=bias, alpha=alpha, beta=beta) + end end - if alpha !== nothing - desc["alpha"] = Base.identity(alpha) - end - if beta !== nothing - desc["beta"] = Base.identity(beta) - end - end - tf.Tensor(tf.Operation(desc)) - end - function lrn_grad_eager(input_grads_, input_image_, output_image_; name=nothing, depth_radius=nothing, bias=nothing, alpha=nothing, beta=nothing) - desc = tf.EagerOp("LRNGrad") - input_grads_ = convert(tf.EagerTensor, input_grads_) - input_image_ = convert(tf.EagerTensor, input_image_) - output_image_ = convert(tf.EagerTensor, output_image_) - tf.add_input(desc, input_grads_) - tf.add_input(desc, input_image_) - tf.add_input(desc, output_image_) - if depth_radius !== nothing - desc["depth_radius"] = Base.Int(depth_radius) - end - if bias !== nothing - desc["bias"] = Base.identity(bias) - end - if alpha !== nothing - desc["alpha"] = Base.identity(alpha) - end - if beta !== nothing - desc["beta"] = Base.identity(beta) - end - desc["T"] = tf.data_type(input_grads_) - desc["T"] = tf.data_type(input_image_) - desc["T"] = tf.data_type(output_image_) - res = tf.execute(desc) - node = tf.TapeNode(lrn_grad, [input_grads_, input_image_, output_image_], name=nothing, depth_radius=nothing, bias=nothing, alpha=nothing, beta=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function lrn_grad(input_grads_, input_image_, output_image_; name=nothing, depth_radius=nothing, bias=nothing, alpha=nothing, beta=nothing) - if tf.in_eager_mode() - lrn_grad_eager(input_grads_, input_image_, output_image_; name=name, depth_radius=depth_radius, bias=bias, alpha=alpha, beta=beta) - else - lrn_grad_graph(input_grads_, input_image_, output_image_; name=name, depth_radius=depth_radius, bias=bias, alpha=alpha, beta=beta) - end - end end @@ -27530,71 +49920,131 @@ end """ begin - function random_poisson_v2_graph(shape_, rate_; name=nothing, seed=nothing, seed2=nothing, S=nothing, R=nothing, dtype=nothing) - local desc - tf.with_op_name(name, "RandomPoissonV2") do - desc = tf.NodeDescription("RandomPoissonV2") - shape_ = convert(Tensor{Any}, shape_) - rate_ = convert(Tensor{Float64}, rate_) - (shape_,) = tf.tf_promote(shape_) - (rate_,) = tf.tf_promote(rate_) - tf.add_input(desc, shape_) - tf.add_input(desc, rate_) - if seed !== nothing - desc["seed"] = Base.Int(seed) - end - if seed2 !== nothing - desc["seed2"] = Base.Int(seed2) + begin + function random_poisson_v2_graph(shape_, rate_; name=nothing, seed=nothing, seed2=nothing, S=nothing, R=nothing, dtype=nothing) + local desc + tf.with_op_name(name, "RandomPoissonV2") do + desc = tf.NodeDescription("RandomPoissonV2") + begin + begin + shape_ = convert(Tensor{Any}, shape_) + begin + end + end + begin + rate_ = convert(Tensor{Float64}, rate_) + begin + end + end + begin + (shape_,) = tf.tf_promote(shape_) + end + begin + (rate_,) = tf.tf_promote(rate_) + end + end + begin + begin + tf.add_input(desc, shape_) + end + begin + tf.add_input(desc, rate_) + end + end + begin + begin + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + end + begin + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end + end + begin + if S !== nothing + desc["S"] = Base.identity(S) + end + end + begin + if R !== nothing + desc["R"] = Base.identity(R) + end + end + begin + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function random_poisson_v2_eager(shape_, rate_; name=nothing, seed=nothing, seed2=nothing, S=nothing, R=nothing, dtype=nothing) + desc = tf.EagerOp("RandomPoissonV2") + shape_ = convert(tf.EagerTensor, shape_) + rate_ = convert(tf.EagerTensor, rate_) + begin + begin + tf.add_input(desc, shape_) + end + begin + tf.add_input(desc, rate_) + end + end + begin + begin + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + end + begin + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end + end + begin + if S !== nothing + desc["S"] = Base.identity(S) + end + end + begin + if R !== nothing + desc["R"] = Base.identity(R) + end + end + begin + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + end + begin + desc["S"] = tf.data_type(shape_) + end + begin + desc["R"] = tf.data_type(rate_) + end + res = tf.execute(desc) + node = tf.TapeNode(random_poisson_v2, [shape_, rate_], name=nothing, seed=nothing, seed2=nothing, S=nothing, R=nothing, dtype=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function random_poisson_v2(shape_, rate_; name=nothing, seed=nothing, seed2=nothing, S=nothing, R=nothing, dtype=nothing) + if tf.in_eager_mode() + random_poisson_v2_eager(shape_, rate_; name=name, seed=seed, seed2=seed2, S=S, R=R, dtype=dtype) + else + random_poisson_v2_graph(shape_, rate_; name=name, seed=seed, seed2=seed2, S=S, R=R, dtype=dtype) + end end - if S !== nothing - desc["S"] = Base.identity(S) - end - if R !== nothing - desc["R"] = Base.identity(R) - end - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end - end - tf.Tensor(tf.Operation(desc)) end - function random_poisson_v2_eager(shape_, rate_; name=nothing, seed=nothing, seed2=nothing, S=nothing, R=nothing, dtype=nothing) - desc = tf.EagerOp("RandomPoissonV2") - shape_ = convert(tf.EagerTensor, shape_) - rate_ = convert(tf.EagerTensor, rate_) - tf.add_input(desc, shape_) - tf.add_input(desc, rate_) - if seed !== nothing - desc["seed"] = Base.Int(seed) - end - if seed2 !== nothing - desc["seed2"] = Base.Int(seed2) - end - if S !== nothing - desc["S"] = Base.identity(S) - end - if R !== nothing - desc["R"] = Base.identity(R) - end - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end - desc["S"] = tf.data_type(shape_) - desc["R"] = tf.data_type(rate_) - res = tf.execute(desc) - node = tf.TapeNode(random_poisson_v2, [shape_, rate_], name=nothing, seed=nothing, seed2=nothing, S=nothing, R=nothing, dtype=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function random_poisson_v2(shape_, rate_; name=nothing, seed=nothing, seed2=nothing, S=nothing, R=nothing, dtype=nothing) - if tf.in_eager_mode() - random_poisson_v2_eager(shape_, rate_; name=name, seed=seed, seed2=seed2, S=S, R=R, dtype=dtype) - else - random_poisson_v2_graph(shape_, rate_; name=name, seed=seed, seed2=seed2, S=S, R=R, dtype=dtype) - end - end end @@ -27604,59 +50054,95 @@ end """ begin - function fifo_queue_graph(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) - local desc - tf.with_op_name(name, "FIFOQueue") do - desc = tf.NodeDescription("FIFOQueue") - if component_types !== nothing - desc["component_types"] = map(Base.identity, component_types) - end - if shapes !== nothing - desc["shapes"] = map(Base.identity, shapes) - end - if capacity !== nothing - desc["capacity"] = Base.Int(capacity) - end - if container !== nothing - desc["container"] = Base.String(container) + begin + function fifo_queue_graph(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "FIFOQueue") do + desc = tf.NodeDescription("FIFOQueue") + begin + end + begin + end + begin + begin + if component_types !== nothing + desc["component_types"] = map(Base.identity, component_types) + end + end + begin + if shapes !== nothing + desc["shapes"] = map(Base.identity, shapes) + end + end + begin + if capacity !== nothing + desc["capacity"] = Base.Int(capacity) + end + end + begin + if container !== nothing + desc["container"] = Base.String(container) + end + end + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function fifo_queue_eager(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) + desc = tf.EagerOp("FIFOQueue") + begin + end + begin + begin + if component_types !== nothing + desc["component_types"] = map(Base.identity, component_types) + end + end + begin + if shapes !== nothing + desc["shapes"] = map(Base.identity, shapes) + end + end + begin + if capacity !== nothing + desc["capacity"] = Base.Int(capacity) + end + end + begin + if container !== nothing + desc["container"] = Base.String(container) + end + end + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(fifo_queue, [], name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function fifo_queue(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) + if tf.in_eager_mode() + fifo_queue_eager(; name=name, component_types=component_types, shapes=shapes, capacity=capacity, container=container, shared_name=shared_name) + else + fifo_queue_graph(; name=name, component_types=component_types, shapes=shapes, capacity=capacity, container=container, shared_name=shared_name) + end end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - end - tf.Tensor(tf.Operation(desc)) - end - function fifo_queue_eager(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) - desc = tf.EagerOp("FIFOQueue") - if component_types !== nothing - desc["component_types"] = map(Base.identity, component_types) - end - if shapes !== nothing - desc["shapes"] = map(Base.identity, shapes) - end - if capacity !== nothing - desc["capacity"] = Base.Int(capacity) - end - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - res = tf.execute(desc) - node = tf.TapeNode(fifo_queue, [], name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function fifo_queue(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) - if tf.in_eager_mode() - fifo_queue_eager(; name=name, component_types=component_types, shapes=shapes, capacity=capacity, container=container, shared_name=shared_name) - else - fifo_queue_graph(; name=name, component_types=component_types, shapes=shapes, capacity=capacity, container=container, shared_name=shared_name) - end - end end @@ -27666,67 +50152,149 @@ end """ begin - function resource_sparse_apply_proximal_gradient_descent_graph(var_, alpha_, l1_, l2_, grad_, indices_; name=nothing, use_locking=nothing) - local desc - tf.with_op_name(name, "ResourceSparseApplyProximalGradientDescent") do - desc = tf.NodeDescription("ResourceSparseApplyProximalGradientDescent") - var_ = convert(Tensor{Any}, var_) - alpha_ = convert(Tensor{Any}, alpha_) - l1_ = convert(Tensor{Any}, l1_) - l2_ = convert(Tensor{Any}, l2_) - grad_ = convert(Tensor{Any}, grad_) - indices_ = convert(Tensor{Any}, indices_) - indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) - (alpha_, l1_, l2_, grad_) = tf.tf_promote(alpha_, l1_, l2_, grad_) - (indices_,) = tf.tf_promote(indices_) - tf.add_input(desc, var_) - tf.add_input(desc, alpha_) - tf.add_input(desc, l1_) - tf.add_input(desc, l2_) - tf.add_input(desc, grad_) - tf.add_input(desc, indices_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - end - tf.Tensor(tf.Operation(desc)) - end - function resource_sparse_apply_proximal_gradient_descent_eager(var_, alpha_, l1_, l2_, grad_, indices_; name=nothing, use_locking=nothing) - desc = tf.EagerOp("ResourceSparseApplyProximalGradientDescent") - var_ = convert(tf.EagerTensor, var_) - alpha_ = convert(tf.EagerTensor, alpha_) - l1_ = convert(tf.EagerTensor, l1_) - l2_ = convert(tf.EagerTensor, l2_) - grad_ = convert(tf.EagerTensor, grad_) - indices_ = convert(tf.EagerTensor, indices_) - tf.add_input(desc, var_) - tf.add_input(desc, alpha_) - tf.add_input(desc, l1_) - tf.add_input(desc, l2_) - tf.add_input(desc, grad_) - tf.add_input(desc, indices_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - desc["T"] = tf.data_type(alpha_) - desc["T"] = tf.data_type(l1_) - desc["T"] = tf.data_type(l2_) - desc["T"] = tf.data_type(grad_) - desc["Tindices"] = tf.data_type(indices_) - res = tf.execute(desc) - node = tf.TapeNode(resource_sparse_apply_proximal_gradient_descent, [var_, alpha_, l1_, l2_, grad_, indices_], name=nothing, use_locking=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_sparse_apply_proximal_gradient_descent(var_, alpha_, l1_, l2_, grad_, indices_; name=nothing, use_locking=nothing) - if tf.in_eager_mode() - resource_sparse_apply_proximal_gradient_descent_eager(var_, alpha_, l1_, l2_, grad_, indices_; name=name, use_locking=use_locking) - else - resource_sparse_apply_proximal_gradient_descent_graph(var_, alpha_, l1_, l2_, grad_, indices_; name=name, use_locking=use_locking) + begin + function resource_sparse_apply_proximal_gradient_descent_graph(var_, alpha_, l1_, l2_, grad_, indices_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "ResourceSparseApplyProximalGradientDescent") do + desc = tf.NodeDescription("ResourceSparseApplyProximalGradientDescent") + begin + begin + var_ = convert(Tensor{Any}, var_) + begin + end + end + begin + alpha_ = convert(Tensor{Any}, alpha_) + begin + end + end + begin + l1_ = convert(Tensor{Any}, l1_) + begin + end + end + begin + l2_ = convert(Tensor{Any}, l2_) + begin + end + end + begin + grad_ = convert(Tensor{Any}, grad_) + begin + end + end + begin + indices_ = convert(Tensor{Any}, indices_) + begin + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + end + end + begin + (alpha_, l1_, l2_, grad_) = tf.tf_promote(alpha_, l1_, l2_, grad_) + end + begin + (indices_,) = tf.tf_promote(indices_) + end + end + begin + begin + tf.add_input(desc, var_) + end + begin + tf.add_input(desc, alpha_) + end + begin + tf.add_input(desc, l1_) + end + begin + tf.add_input(desc, l2_) + end + begin + tf.add_input(desc, grad_) + end + begin + tf.add_input(desc, indices_) + end + end + begin + begin + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function resource_sparse_apply_proximal_gradient_descent_eager(var_, alpha_, l1_, l2_, grad_, indices_; name=nothing, use_locking=nothing) + desc = tf.EagerOp("ResourceSparseApplyProximalGradientDescent") + var_ = convert(tf.EagerTensor, var_) + alpha_ = convert(tf.EagerTensor, alpha_) + l1_ = convert(tf.EagerTensor, l1_) + l2_ = convert(tf.EagerTensor, l2_) + grad_ = convert(tf.EagerTensor, grad_) + indices_ = convert(tf.EagerTensor, indices_) + begin + begin + tf.add_input(desc, var_) + end + begin + tf.add_input(desc, alpha_) + end + begin + tf.add_input(desc, l1_) + end + begin + tf.add_input(desc, l2_) + end + begin + tf.add_input(desc, grad_) + end + begin + tf.add_input(desc, indices_) + end + end + begin + begin + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + end + begin + desc["T"] = tf.data_type(alpha_) + end + begin + desc["T"] = tf.data_type(l1_) + end + begin + desc["T"] = tf.data_type(l2_) + end + begin + desc["T"] = tf.data_type(grad_) + end + begin + desc["Tindices"] = tf.data_type(indices_) + end + res = tf.execute(desc) + node = tf.TapeNode(resource_sparse_apply_proximal_gradient_descent, [var_, alpha_, l1_, l2_, grad_, indices_], name=nothing, use_locking=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_sparse_apply_proximal_gradient_descent(var_, alpha_, l1_, l2_, grad_, indices_; name=nothing, use_locking=nothing) + if tf.in_eager_mode() + resource_sparse_apply_proximal_gradient_descent_eager(var_, alpha_, l1_, l2_, grad_, indices_; name=name, use_locking=use_locking) + else + resource_sparse_apply_proximal_gradient_descent_graph(var_, alpha_, l1_, l2_, grad_, indices_; name=name, use_locking=use_locking) + end end - end + end end @@ -27736,45 +50304,77 @@ end """ begin - function experimental_non_serializable_dataset_graph(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) - local desc - tf.with_op_name(name, "ExperimentalNonSerializableDataset") do - desc = tf.NodeDescription("ExperimentalNonSerializableDataset") - input_dataset_ = convert(Tensor{Any}, input_dataset_) - tf.add_input(desc, input_dataset_) - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) + begin + function experimental_non_serializable_dataset_graph(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "ExperimentalNonSerializableDataset") do + desc = tf.NodeDescription("ExperimentalNonSerializableDataset") + begin + begin + input_dataset_ = convert(Tensor{Any}, input_dataset_) + begin + end + end + end + begin + begin + tf.add_input(desc, input_dataset_) + end + end + begin + begin + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + end + begin + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function experimental_non_serializable_dataset_eager(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) + desc = tf.EagerOp("ExperimentalNonSerializableDataset") + input_dataset_ = convert(tf.EagerTensor, input_dataset_) + begin + begin + tf.add_input(desc, input_dataset_) + end + end + begin + begin + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + end + begin + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(experimental_non_serializable_dataset, [input_dataset_], name=nothing, output_types=nothing, output_shapes=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_non_serializable_dataset(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) + if tf.in_eager_mode() + experimental_non_serializable_dataset_eager(input_dataset_; name=name, output_types=output_types, output_shapes=output_shapes) + else + experimental_non_serializable_dataset_graph(input_dataset_; name=name, output_types=output_types, output_shapes=output_shapes) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function experimental_non_serializable_dataset_eager(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) - desc = tf.EagerOp("ExperimentalNonSerializableDataset") - input_dataset_ = convert(tf.EagerTensor, input_dataset_) - tf.add_input(desc, input_dataset_) - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - res = tf.execute(desc) - node = tf.TapeNode(experimental_non_serializable_dataset, [input_dataset_], name=nothing, output_types=nothing, output_shapes=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_non_serializable_dataset(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) - if tf.in_eager_mode() - experimental_non_serializable_dataset_eager(input_dataset_; name=name, output_types=output_types, output_shapes=output_shapes) - else - experimental_non_serializable_dataset_graph(input_dataset_; name=name, output_types=output_types, output_shapes=output_shapes) - end - end end @@ -27784,63 +50384,123 @@ end """ begin - function dilation2d_backprop_filter_graph(input_, filter_, out_backprop_; name=nothing, strides=nothing, rates=nothing, padding=nothing) - local desc - tf.with_op_name(name, "Dilation2DBackpropFilter") do - desc = tf.NodeDescription("Dilation2DBackpropFilter") - input_ = convert(Tensor{Any}, input_) - filter_ = convert(Tensor{Any}, filter_) - out_backprop_ = convert(Tensor{Any}, out_backprop_) - (input_, filter_, out_backprop_) = tf.tf_promote(input_, filter_, out_backprop_) - tf.add_input(desc, input_) - tf.add_input(desc, filter_) - tf.add_input(desc, out_backprop_) - if strides !== nothing - desc["strides"] = map(Base.identity, strides) - end - if rates !== nothing - desc["rates"] = map(Base.identity, rates) + begin + function dilation2d_backprop_filter_graph(input_, filter_, out_backprop_; name=nothing, strides=nothing, rates=nothing, padding=nothing) + local desc + tf.with_op_name(name, "Dilation2DBackpropFilter") do + desc = tf.NodeDescription("Dilation2DBackpropFilter") + begin + begin + input_ = convert(Tensor{Any}, input_) + begin + end + end + begin + filter_ = convert(Tensor{Any}, filter_) + begin + end + end + begin + out_backprop_ = convert(Tensor{Any}, out_backprop_) + begin + end + end + begin + (input_, filter_, out_backprop_) = tf.tf_promote(input_, filter_, out_backprop_) + end + end + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, filter_) + end + begin + tf.add_input(desc, out_backprop_) + end + end + begin + begin + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + end + begin + if rates !== nothing + desc["rates"] = map(Base.identity, rates) + end + end + begin + if padding !== nothing + desc["padding"] = Base.String(padding) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function dilation2d_backprop_filter_eager(input_, filter_, out_backprop_; name=nothing, strides=nothing, rates=nothing, padding=nothing) + desc = tf.EagerOp("Dilation2DBackpropFilter") + input_ = convert(tf.EagerTensor, input_) + filter_ = convert(tf.EagerTensor, filter_) + out_backprop_ = convert(tf.EagerTensor, out_backprop_) + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, filter_) + end + begin + tf.add_input(desc, out_backprop_) + end + end + begin + begin + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + end + begin + if rates !== nothing + desc["rates"] = map(Base.identity, rates) + end + end + begin + if padding !== nothing + desc["padding"] = Base.String(padding) + end + end + end + begin + desc["T"] = tf.data_type(input_) + end + begin + desc["T"] = tf.data_type(filter_) + end + begin + desc["T"] = tf.data_type(out_backprop_) + end + res = tf.execute(desc) + node = tf.TapeNode(dilation2d_backprop_filter, [input_, filter_, out_backprop_], name=nothing, strides=nothing, rates=nothing, padding=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function dilation2d_backprop_filter(input_, filter_, out_backprop_; name=nothing, strides=nothing, rates=nothing, padding=nothing) + if tf.in_eager_mode() + dilation2d_backprop_filter_eager(input_, filter_, out_backprop_; name=name, strides=strides, rates=rates, padding=padding) + else + dilation2d_backprop_filter_graph(input_, filter_, out_backprop_; name=name, strides=strides, rates=rates, padding=padding) + end end - if padding !== nothing - desc["padding"] = Base.String(padding) - end - end - tf.Tensor(tf.Operation(desc)) - end - function dilation2d_backprop_filter_eager(input_, filter_, out_backprop_; name=nothing, strides=nothing, rates=nothing, padding=nothing) - desc = tf.EagerOp("Dilation2DBackpropFilter") - input_ = convert(tf.EagerTensor, input_) - filter_ = convert(tf.EagerTensor, filter_) - out_backprop_ = convert(tf.EagerTensor, out_backprop_) - tf.add_input(desc, input_) - tf.add_input(desc, filter_) - tf.add_input(desc, out_backprop_) - if strides !== nothing - desc["strides"] = map(Base.identity, strides) - end - if rates !== nothing - desc["rates"] = map(Base.identity, rates) - end - if padding !== nothing - desc["padding"] = Base.String(padding) - end - desc["T"] = tf.data_type(input_) - desc["T"] = tf.data_type(filter_) - desc["T"] = tf.data_type(out_backprop_) - res = tf.execute(desc) - node = tf.TapeNode(dilation2d_backprop_filter, [input_, filter_, out_backprop_], name=nothing, strides=nothing, rates=nothing, padding=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function dilation2d_backprop_filter(input_, filter_, out_backprop_; name=nothing, strides=nothing, rates=nothing, padding=nothing) - if tf.in_eager_mode() - dilation2d_backprop_filter_eager(input_, filter_, out_backprop_; name=name, strides=strides, rates=rates, padding=padding) - else - dilation2d_backprop_filter_graph(input_, filter_, out_backprop_; name=name, strides=strides, rates=rates, padding=padding) - end - end end @@ -27850,49 +50510,89 @@ end """ begin - function experimental_bytes_produced_stats_dataset_graph(input_dataset_, tag_; name=nothing, output_types=nothing, output_shapes=nothing) - local desc - tf.with_op_name(name, "ExperimentalBytesProducedStatsDataset") do - desc = tf.NodeDescription("ExperimentalBytesProducedStatsDataset") - input_dataset_ = convert(Tensor{Any}, input_dataset_) - tag_ = convert(Tensor{String}, tag_) - tf.add_input(desc, input_dataset_) - tf.add_input(desc, tag_) - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) + begin + function experimental_bytes_produced_stats_dataset_graph(input_dataset_, tag_; name=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "ExperimentalBytesProducedStatsDataset") do + desc = tf.NodeDescription("ExperimentalBytesProducedStatsDataset") + begin + begin + input_dataset_ = convert(Tensor{Any}, input_dataset_) + begin + end + end + begin + tag_ = convert(Tensor{String}, tag_) + begin + end + end + end + begin + begin + tf.add_input(desc, input_dataset_) + end + begin + tf.add_input(desc, tag_) + end + end + begin + begin + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + end + begin + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function experimental_bytes_produced_stats_dataset_eager(input_dataset_, tag_; name=nothing, output_types=nothing, output_shapes=nothing) + desc = tf.EagerOp("ExperimentalBytesProducedStatsDataset") + input_dataset_ = convert(tf.EagerTensor, input_dataset_) + tag_ = convert(tf.EagerTensor, tag_) + begin + begin + tf.add_input(desc, input_dataset_) + end + begin + tf.add_input(desc, tag_) + end + end + begin + begin + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + end + begin + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(experimental_bytes_produced_stats_dataset, [input_dataset_, tag_], name=nothing, output_types=nothing, output_shapes=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_bytes_produced_stats_dataset(input_dataset_, tag_; name=nothing, output_types=nothing, output_shapes=nothing) + if tf.in_eager_mode() + experimental_bytes_produced_stats_dataset_eager(input_dataset_, tag_; name=name, output_types=output_types, output_shapes=output_shapes) + else + experimental_bytes_produced_stats_dataset_graph(input_dataset_, tag_; name=name, output_types=output_types, output_shapes=output_shapes) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function experimental_bytes_produced_stats_dataset_eager(input_dataset_, tag_; name=nothing, output_types=nothing, output_shapes=nothing) - desc = tf.EagerOp("ExperimentalBytesProducedStatsDataset") - input_dataset_ = convert(tf.EagerTensor, input_dataset_) - tag_ = convert(tf.EagerTensor, tag_) - tf.add_input(desc, input_dataset_) - tf.add_input(desc, tag_) - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - res = tf.execute(desc) - node = tf.TapeNode(experimental_bytes_produced_stats_dataset, [input_dataset_, tag_], name=nothing, output_types=nothing, output_shapes=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_bytes_produced_stats_dataset(input_dataset_, tag_; name=nothing, output_types=nothing, output_shapes=nothing) - if tf.in_eager_mode() - experimental_bytes_produced_stats_dataset_eager(input_dataset_, tag_; name=name, output_types=output_types, output_shapes=output_shapes) - else - experimental_bytes_produced_stats_dataset_graph(input_dataset_, tag_; name=name, output_types=output_types, output_shapes=output_shapes) - end - end end @@ -27902,63 +50602,115 @@ end output = cond ? then_branch(input) : else_branch(input) """ begin - function _if_graph(cond_, input_; name=nothing, Tin=nothing, Tout=nothing, then_branch=nothing, else_branch=nothing) - local desc - tf.with_op_name(name, "_If") do - desc = tf.NodeDescription("_If") - cond_ = convert(Tensor{Any}, cond_) - input_ = [convert(Tensor{Any}, x) for x = input_] - (cond_,) = tf.tf_promote(cond_) - tf.add_input(desc, cond_) - tf.add_input(desc, input_) - if Tin !== nothing - desc["Tin"] = map(Base.identity, Tin) - end - if Tout !== nothing - desc["Tout"] = map(Base.identity, Tout) + begin + function _if_graph(cond_, input_; name=nothing, Tin=nothing, Tout=nothing, then_branch=nothing, else_branch=nothing) + local desc + tf.with_op_name(name, "_If") do + desc = tf.NodeDescription("_If") + begin + begin + cond_ = convert(Tensor{Any}, cond_) + begin + end + end + begin + input_ = [convert(Tensor{Any}, x) for x = input_] + begin + end + end + begin + (cond_,) = tf.tf_promote(cond_) + end + end + begin + begin + tf.add_input(desc, cond_) + end + begin + tf.add_input(desc, input_) + end + end + begin + begin + if Tin !== nothing + desc["Tin"] = map(Base.identity, Tin) + end + end + begin + if Tout !== nothing + desc["Tout"] = map(Base.identity, Tout) + end + end + begin + if then_branch !== nothing + desc["then_branch"] = Base.identity(then_branch) + end + end + begin + if else_branch !== nothing + desc["else_branch"] = Base.identity(else_branch) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function _if_eager(cond_, input_; name=nothing, Tin=nothing, Tout=nothing, then_branch=nothing, else_branch=nothing) + desc = tf.EagerOp("_If") + cond_ = convert(tf.EagerTensor, cond_) + input_ = convert(tf.EagerTensor, input_) + begin + begin + tf.add_input(desc, cond_) + end + begin + tf.add_input(desc, input_) + end + end + begin + begin + if Tin !== nothing + desc["Tin"] = map(Base.identity, Tin) + end + end + begin + if Tout !== nothing + desc["Tout"] = map(Base.identity, Tout) + end + end + begin + if then_branch !== nothing + desc["then_branch"] = Base.identity(then_branch) + end + end + begin + if else_branch !== nothing + desc["else_branch"] = Base.identity(else_branch) + end + end + end + begin + desc["Tcond"] = tf.data_type(cond_) + end + res = tf.execute(desc) + node = tf.TapeNode(_if, [cond_, input_], name=nothing, Tin=nothing, Tout=nothing, then_branch=nothing, else_branch=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _if(cond_, input_; name=nothing, Tin=nothing, Tout=nothing, then_branch=nothing, else_branch=nothing) + if tf.in_eager_mode() + _if_eager(cond_, input_; name=name, Tin=Tin, Tout=Tout, then_branch=then_branch, else_branch=else_branch) + else + _if_graph(cond_, input_; name=name, Tin=Tin, Tout=Tout, then_branch=then_branch, else_branch=else_branch) + end end - if then_branch !== nothing - desc["then_branch"] = Base.identity(then_branch) - end - if else_branch !== nothing - desc["else_branch"] = Base.identity(else_branch) - end - end - tf.Tensor(tf.Operation(desc)) - end - function _if_eager(cond_, input_; name=nothing, Tin=nothing, Tout=nothing, then_branch=nothing, else_branch=nothing) - desc = tf.EagerOp("_If") - cond_ = convert(tf.EagerTensor, cond_) - input_ = convert(tf.EagerTensor, input_) - tf.add_input(desc, cond_) - tf.add_input(desc, input_) - if Tin !== nothing - desc["Tin"] = map(Base.identity, Tin) - end - if Tout !== nothing - desc["Tout"] = map(Base.identity, Tout) - end - if then_branch !== nothing - desc["then_branch"] = Base.identity(then_branch) - end - if else_branch !== nothing - desc["else_branch"] = Base.identity(else_branch) - end - desc["Tcond"] = tf.data_type(cond_) - res = tf.execute(desc) - node = tf.TapeNode(_if, [cond_, input_], name=nothing, Tin=nothing, Tout=nothing, then_branch=nothing, else_branch=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _if(cond_, input_; name=nothing, Tin=nothing, Tout=nothing, then_branch=nothing, else_branch=nothing) - if tf.in_eager_mode() - _if_eager(cond_, input_; name=name, Tin=Tin, Tout=Tout, then_branch=then_branch, else_branch=else_branch) - else - _if_graph(cond_, input_; name=name, Tin=Tin, Tout=Tout, then_branch=then_branch, else_branch=else_branch) - end - end end @@ -27968,41 +50720,73 @@ end """ begin - function bias_add_grad_graph(out_backprop_; name=nothing, data_format=nothing) - local desc - tf.with_op_name(name, "BiasAddGrad") do - desc = tf.NodeDescription("BiasAddGrad") - out_backprop_ = convert(Tensor{Any}, out_backprop_) - (out_backprop_,) = tf.tf_promote(out_backprop_) - tf.add_input(desc, out_backprop_) - if data_format !== nothing - desc["data_format"] = Base.String(data_format) + begin + function bias_add_grad_graph(out_backprop_; name=nothing, data_format=nothing) + local desc + tf.with_op_name(name, "BiasAddGrad") do + desc = tf.NodeDescription("BiasAddGrad") + begin + begin + out_backprop_ = convert(Tensor{Any}, out_backprop_) + begin + end + end + begin + (out_backprop_,) = tf.tf_promote(out_backprop_) + end + end + begin + begin + tf.add_input(desc, out_backprop_) + end + end + begin + begin + if data_format !== nothing + desc["data_format"] = Base.String(data_format) + end + end + end end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function bias_add_grad_eager(out_backprop_; name=nothing, data_format=nothing) - desc = tf.EagerOp("BiasAddGrad") - out_backprop_ = convert(tf.EagerTensor, out_backprop_) - tf.add_input(desc, out_backprop_) - if data_format !== nothing - desc["data_format"] = Base.String(data_format) - end - desc["T"] = tf.data_type(out_backprop_) - res = tf.execute(desc) - node = tf.TapeNode(bias_add_grad, [out_backprop_], name=nothing, data_format=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function bias_add_grad_eager(out_backprop_; name=nothing, data_format=nothing) + desc = tf.EagerOp("BiasAddGrad") + out_backprop_ = convert(tf.EagerTensor, out_backprop_) + begin + begin + tf.add_input(desc, out_backprop_) + end + end + begin + begin + if data_format !== nothing + desc["data_format"] = Base.String(data_format) + end + end + end + begin + desc["T"] = tf.data_type(out_backprop_) + end + res = tf.execute(desc) + node = tf.TapeNode(bias_add_grad, [out_backprop_], name=nothing, data_format=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function bias_add_grad(out_backprop_; name=nothing, data_format=nothing) - if tf.in_eager_mode() - bias_add_grad_eager(out_backprop_; name=name, data_format=data_format) - else - bias_add_grad_graph(out_backprop_; name=name, data_format=data_format) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function bias_add_grad(out_backprop_; name=nothing, data_format=nothing) + if tf.in_eager_mode() + bias_add_grad_eager(out_backprop_; name=name, data_format=data_format) + else + bias_add_grad_graph(out_backprop_; name=name, data_format=data_format) + end end - end + end end @@ -28012,33 +50796,57 @@ end """ begin - function reader_serialize_state_v2_graph(reader_handle_; name=nothing) - local desc - tf.with_op_name(name, "ReaderSerializeStateV2") do - desc = tf.NodeDescription("ReaderSerializeStateV2") - reader_handle_ = convert(Tensor{Any}, reader_handle_) - tf.add_input(desc, reader_handle_) + begin + function reader_serialize_state_v2_graph(reader_handle_; name=nothing) + local desc + tf.with_op_name(name, "ReaderSerializeStateV2") do + desc = tf.NodeDescription("ReaderSerializeStateV2") + begin + begin + reader_handle_ = convert(Tensor{Any}, reader_handle_) + begin + end + end + end + begin + begin + tf.add_input(desc, reader_handle_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function reader_serialize_state_v2_eager(reader_handle_; name=nothing) - desc = tf.EagerOp("ReaderSerializeStateV2") - reader_handle_ = convert(tf.EagerTensor, reader_handle_) - tf.add_input(desc, reader_handle_) - res = tf.execute(desc) - node = tf.TapeNode(reader_serialize_state_v2, [reader_handle_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function reader_serialize_state_v2_eager(reader_handle_; name=nothing) + desc = tf.EagerOp("ReaderSerializeStateV2") + reader_handle_ = convert(tf.EagerTensor, reader_handle_) + begin + begin + tf.add_input(desc, reader_handle_) + end + end + begin + end + res = tf.execute(desc) + node = tf.TapeNode(reader_serialize_state_v2, [reader_handle_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function reader_serialize_state_v2(reader_handle_; name=nothing) - if tf.in_eager_mode() - reader_serialize_state_v2_eager(reader_handle_; name=name) - else - reader_serialize_state_v2_graph(reader_handle_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function reader_serialize_state_v2(reader_handle_; name=nothing) + if tf.in_eager_mode() + reader_serialize_state_v2_eager(reader_handle_; name=name) + else + reader_serialize_state_v2_graph(reader_handle_; name=name) + end end - end + end end @@ -28048,33 +50856,57 @@ end """ begin - function wrap_dataset_variant_graph(input_handle_; name=nothing) - local desc - tf.with_op_name(name, "WrapDatasetVariant") do - desc = tf.NodeDescription("WrapDatasetVariant") - input_handle_ = convert(Tensor{Any}, input_handle_) - tf.add_input(desc, input_handle_) + begin + function wrap_dataset_variant_graph(input_handle_; name=nothing) + local desc + tf.with_op_name(name, "WrapDatasetVariant") do + desc = tf.NodeDescription("WrapDatasetVariant") + begin + begin + input_handle_ = convert(Tensor{Any}, input_handle_) + begin + end + end + end + begin + begin + tf.add_input(desc, input_handle_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function wrap_dataset_variant_eager(input_handle_; name=nothing) - desc = tf.EagerOp("WrapDatasetVariant") - input_handle_ = convert(tf.EagerTensor, input_handle_) - tf.add_input(desc, input_handle_) - res = tf.execute(desc) - node = tf.TapeNode(wrap_dataset_variant, [input_handle_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function wrap_dataset_variant_eager(input_handle_; name=nothing) + desc = tf.EagerOp("WrapDatasetVariant") + input_handle_ = convert(tf.EagerTensor, input_handle_) + begin + begin + tf.add_input(desc, input_handle_) + end + end + begin + end + res = tf.execute(desc) + node = tf.TapeNode(wrap_dataset_variant, [input_handle_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function wrap_dataset_variant(input_handle_; name=nothing) - if tf.in_eager_mode() - wrap_dataset_variant_eager(input_handle_; name=name) - else - wrap_dataset_variant_graph(input_handle_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function wrap_dataset_variant(input_handle_; name=nothing) + if tf.in_eager_mode() + wrap_dataset_variant_eager(input_handle_; name=name) + else + wrap_dataset_variant_graph(input_handle_; name=name) + end end - end + end end @@ -28084,79 +50916,155 @@ end """ begin - function parallel_interleave_dataset_v2_graph(input_dataset_, other_arguments_, cycle_length_, block_length_, num_parallel_calls_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, sloppy=nothing) - local desc - tf.with_op_name(name, "ParallelInterleaveDatasetV2") do - desc = tf.NodeDescription("ParallelInterleaveDatasetV2") - input_dataset_ = convert(Tensor{Any}, input_dataset_) - other_arguments_ = [convert(Tensor{Any}, x) for x = other_arguments_] - cycle_length_ = convert(Tensor{Int64}, cycle_length_) - block_length_ = convert(Tensor{Int64}, block_length_) - num_parallel_calls_ = convert(Tensor{Int64}, num_parallel_calls_) - tf.add_input(desc, input_dataset_) - tf.add_input(desc, other_arguments_) - tf.add_input(desc, cycle_length_) - tf.add_input(desc, block_length_) - tf.add_input(desc, num_parallel_calls_) - if f !== nothing - desc["f"] = Base.identity(f) - end - if Targuments !== nothing - desc["Targuments"] = map(Base.identity, Targuments) - end - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) + begin + function parallel_interleave_dataset_v2_graph(input_dataset_, other_arguments_, cycle_length_, block_length_, num_parallel_calls_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, sloppy=nothing) + local desc + tf.with_op_name(name, "ParallelInterleaveDatasetV2") do + desc = tf.NodeDescription("ParallelInterleaveDatasetV2") + begin + begin + input_dataset_ = convert(Tensor{Any}, input_dataset_) + begin + end + end + begin + other_arguments_ = [convert(Tensor{Any}, x) for x = other_arguments_] + begin + end + end + begin + cycle_length_ = convert(Tensor{Int64}, cycle_length_) + begin + end + end + begin + block_length_ = convert(Tensor{Int64}, block_length_) + begin + end + end + begin + num_parallel_calls_ = convert(Tensor{Int64}, num_parallel_calls_) + begin + end + end + end + begin + begin + tf.add_input(desc, input_dataset_) + end + begin + tf.add_input(desc, other_arguments_) + end + begin + tf.add_input(desc, cycle_length_) + end + begin + tf.add_input(desc, block_length_) + end + begin + tf.add_input(desc, num_parallel_calls_) + end + end + begin + begin + if f !== nothing + desc["f"] = Base.identity(f) + end + end + begin + if Targuments !== nothing + desc["Targuments"] = map(Base.identity, Targuments) + end + end + begin + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + end + begin + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + begin + if sloppy !== nothing + desc["sloppy"] = Base.Bool(sloppy) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function parallel_interleave_dataset_v2_eager(input_dataset_, other_arguments_, cycle_length_, block_length_, num_parallel_calls_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, sloppy=nothing) + desc = tf.EagerOp("ParallelInterleaveDatasetV2") + input_dataset_ = convert(tf.EagerTensor, input_dataset_) + other_arguments_ = convert(tf.EagerTensor, other_arguments_) + cycle_length_ = convert(tf.EagerTensor, cycle_length_) + block_length_ = convert(tf.EagerTensor, block_length_) + num_parallel_calls_ = convert(tf.EagerTensor, num_parallel_calls_) + begin + begin + tf.add_input(desc, input_dataset_) + end + begin + tf.add_input(desc, other_arguments_) + end + begin + tf.add_input(desc, cycle_length_) + end + begin + tf.add_input(desc, block_length_) + end + begin + tf.add_input(desc, num_parallel_calls_) + end + end + begin + begin + if f !== nothing + desc["f"] = Base.identity(f) + end + end + begin + if Targuments !== nothing + desc["Targuments"] = map(Base.identity, Targuments) + end + end + begin + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + end + begin + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + begin + if sloppy !== nothing + desc["sloppy"] = Base.Bool(sloppy) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(parallel_interleave_dataset_v2, [input_dataset_, other_arguments_, cycle_length_, block_length_, num_parallel_calls_], name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, sloppy=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function parallel_interleave_dataset_v2(input_dataset_, other_arguments_, cycle_length_, block_length_, num_parallel_calls_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, sloppy=nothing) + if tf.in_eager_mode() + parallel_interleave_dataset_v2_eager(input_dataset_, other_arguments_, cycle_length_, block_length_, num_parallel_calls_; name=name, f=f, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes, sloppy=sloppy) + else + parallel_interleave_dataset_v2_graph(input_dataset_, other_arguments_, cycle_length_, block_length_, num_parallel_calls_; name=name, f=f, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes, sloppy=sloppy) + end end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - if sloppy !== nothing - desc["sloppy"] = Base.Bool(sloppy) - end - end - tf.Tensor(tf.Operation(desc)) - end - function parallel_interleave_dataset_v2_eager(input_dataset_, other_arguments_, cycle_length_, block_length_, num_parallel_calls_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, sloppy=nothing) - desc = tf.EagerOp("ParallelInterleaveDatasetV2") - input_dataset_ = convert(tf.EagerTensor, input_dataset_) - other_arguments_ = convert(tf.EagerTensor, other_arguments_) - cycle_length_ = convert(tf.EagerTensor, cycle_length_) - block_length_ = convert(tf.EagerTensor, block_length_) - num_parallel_calls_ = convert(tf.EagerTensor, num_parallel_calls_) - tf.add_input(desc, input_dataset_) - tf.add_input(desc, other_arguments_) - tf.add_input(desc, cycle_length_) - tf.add_input(desc, block_length_) - tf.add_input(desc, num_parallel_calls_) - if f !== nothing - desc["f"] = Base.identity(f) - end - if Targuments !== nothing - desc["Targuments"] = map(Base.identity, Targuments) - end - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - if sloppy !== nothing - desc["sloppy"] = Base.Bool(sloppy) - end - res = tf.execute(desc) - node = tf.TapeNode(parallel_interleave_dataset_v2, [input_dataset_, other_arguments_, cycle_length_, block_length_, num_parallel_calls_], name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, sloppy=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function parallel_interleave_dataset_v2(input_dataset_, other_arguments_, cycle_length_, block_length_, num_parallel_calls_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, sloppy=nothing) - if tf.in_eager_mode() - parallel_interleave_dataset_v2_eager(input_dataset_, other_arguments_, cycle_length_, block_length_, num_parallel_calls_; name=name, f=f, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes, sloppy=sloppy) - else - parallel_interleave_dataset_v2_graph(input_dataset_, other_arguments_, cycle_length_, block_length_, num_parallel_calls_; name=name, f=f, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes, sloppy=sloppy) - end - end end @@ -28166,68 +51074,130 @@ end """ begin - function depthwise_conv2d_native_backprop_input_graph(input_sizes_, filter_, out_backprop_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) - local desc - tf.with_op_name(name, "DepthwiseConv2dNativeBackpropInput") do - desc = tf.NodeDescription("DepthwiseConv2dNativeBackpropInput") - input_sizes_ = convert(Tensor{Int32}, input_sizes_) - filter_ = convert(Tensor{Any}, filter_) - out_backprop_ = convert(Tensor{Any}, out_backprop_) - (filter_, out_backprop_) = tf.tf_promote(filter_, out_backprop_) - tf.add_input(desc, input_sizes_) - tf.add_input(desc, filter_) - tf.add_input(desc, out_backprop_) - if strides !== nothing - desc["strides"] = map(Base.identity, strides) + begin + function depthwise_conv2d_native_backprop_input_graph(input_sizes_, filter_, out_backprop_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) + local desc + tf.with_op_name(name, "DepthwiseConv2dNativeBackpropInput") do + desc = tf.NodeDescription("DepthwiseConv2dNativeBackpropInput") + begin + begin + input_sizes_ = convert(Tensor{Int32}, input_sizes_) + begin + end + end + begin + filter_ = convert(Tensor{Any}, filter_) + begin + end + end + begin + out_backprop_ = convert(Tensor{Any}, out_backprop_) + begin + end + end + begin + (filter_, out_backprop_) = tf.tf_promote(filter_, out_backprop_) + end + end + begin + begin + tf.add_input(desc, input_sizes_) + end + begin + tf.add_input(desc, filter_) + end + begin + tf.add_input(desc, out_backprop_) + end + end + begin + begin + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + end + begin + if padding !== nothing + desc["padding"] = Base.String(padding) + end + end + begin + if data_format !== nothing + desc["data_format"] = Base.String(data_format) + end + end + begin + if dilations !== nothing + desc["dilations"] = map(Base.identity, dilations) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function depthwise_conv2d_native_backprop_input_eager(input_sizes_, filter_, out_backprop_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) + desc = tf.EagerOp("DepthwiseConv2dNativeBackpropInput") + input_sizes_ = convert(tf.EagerTensor, input_sizes_) + filter_ = convert(tf.EagerTensor, filter_) + out_backprop_ = convert(tf.EagerTensor, out_backprop_) + begin + begin + tf.add_input(desc, input_sizes_) + end + begin + tf.add_input(desc, filter_) + end + begin + tf.add_input(desc, out_backprop_) + end + end + begin + begin + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + end + begin + if padding !== nothing + desc["padding"] = Base.String(padding) + end + end + begin + if data_format !== nothing + desc["data_format"] = Base.String(data_format) + end + end + begin + if dilations !== nothing + desc["dilations"] = map(Base.identity, dilations) + end + end + end + begin + desc["T"] = tf.data_type(filter_) + end + begin + desc["T"] = tf.data_type(out_backprop_) + end + res = tf.execute(desc) + node = tf.TapeNode(depthwise_conv2d_native_backprop_input, [input_sizes_, filter_, out_backprop_], name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function depthwise_conv2d_native_backprop_input(input_sizes_, filter_, out_backprop_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) + if tf.in_eager_mode() + depthwise_conv2d_native_backprop_input_eager(input_sizes_, filter_, out_backprop_; name=name, strides=strides, padding=padding, data_format=data_format, dilations=dilations) + else + depthwise_conv2d_native_backprop_input_graph(input_sizes_, filter_, out_backprop_; name=name, strides=strides, padding=padding, data_format=data_format, dilations=dilations) + end end - if padding !== nothing - desc["padding"] = Base.String(padding) - end - if data_format !== nothing - desc["data_format"] = Base.String(data_format) - end - if dilations !== nothing - desc["dilations"] = map(Base.identity, dilations) - end - end - tf.Tensor(tf.Operation(desc)) - end - function depthwise_conv2d_native_backprop_input_eager(input_sizes_, filter_, out_backprop_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) - desc = tf.EagerOp("DepthwiseConv2dNativeBackpropInput") - input_sizes_ = convert(tf.EagerTensor, input_sizes_) - filter_ = convert(tf.EagerTensor, filter_) - out_backprop_ = convert(tf.EagerTensor, out_backprop_) - tf.add_input(desc, input_sizes_) - tf.add_input(desc, filter_) - tf.add_input(desc, out_backprop_) - if strides !== nothing - desc["strides"] = map(Base.identity, strides) - end - if padding !== nothing - desc["padding"] = Base.String(padding) - end - if data_format !== nothing - desc["data_format"] = Base.String(data_format) - end - if dilations !== nothing - desc["dilations"] = map(Base.identity, dilations) - end - desc["T"] = tf.data_type(filter_) - desc["T"] = tf.data_type(out_backprop_) - res = tf.execute(desc) - node = tf.TapeNode(depthwise_conv2d_native_backprop_input, [input_sizes_, filter_, out_backprop_], name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function depthwise_conv2d_native_backprop_input(input_sizes_, filter_, out_backprop_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) - if tf.in_eager_mode() - depthwise_conv2d_native_backprop_input_eager(input_sizes_, filter_, out_backprop_; name=name, strides=strides, padding=padding, data_format=data_format, dilations=dilations) - else - depthwise_conv2d_native_backprop_input_graph(input_sizes_, filter_, out_backprop_; name=name, strides=strides, padding=padding, data_format=data_format, dilations=dilations) - end - end end @@ -28237,73 +51207,169 @@ end """ begin - function resource_apply_rms_prop_graph(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=nothing, use_locking=nothing) - local desc - tf.with_op_name(name, "ResourceApplyRMSProp") do - desc = tf.NodeDescription("ResourceApplyRMSProp") - var_ = convert(Tensor{Any}, var_) - ms_ = convert(Tensor{Any}, ms_) - mom_ = convert(Tensor{Any}, mom_) - lr_ = convert(Tensor{Any}, lr_) - rho_ = convert(Tensor{Any}, rho_) - momentum_ = convert(Tensor{Any}, momentum_) - epsilon_ = convert(Tensor{Any}, epsilon_) - grad_ = convert(Tensor{Any}, grad_) - (lr_, rho_, momentum_, epsilon_, grad_) = tf.tf_promote(lr_, rho_, momentum_, epsilon_, grad_) - tf.add_input(desc, var_) - tf.add_input(desc, ms_) - tf.add_input(desc, mom_) - tf.add_input(desc, lr_) - tf.add_input(desc, rho_) - tf.add_input(desc, momentum_) - tf.add_input(desc, epsilon_) - tf.add_input(desc, grad_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - end - tf.Tensor(tf.Operation(desc)) - end - function resource_apply_rms_prop_eager(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=nothing, use_locking=nothing) - desc = tf.EagerOp("ResourceApplyRMSProp") - var_ = convert(tf.EagerTensor, var_) - ms_ = convert(tf.EagerTensor, ms_) - mom_ = convert(tf.EagerTensor, mom_) - lr_ = convert(tf.EagerTensor, lr_) - rho_ = convert(tf.EagerTensor, rho_) - momentum_ = convert(tf.EagerTensor, momentum_) - epsilon_ = convert(tf.EagerTensor, epsilon_) - grad_ = convert(tf.EagerTensor, grad_) - tf.add_input(desc, var_) - tf.add_input(desc, ms_) - tf.add_input(desc, mom_) - tf.add_input(desc, lr_) - tf.add_input(desc, rho_) - tf.add_input(desc, momentum_) - tf.add_input(desc, epsilon_) - tf.add_input(desc, grad_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - desc["T"] = tf.data_type(lr_) - desc["T"] = tf.data_type(rho_) - desc["T"] = tf.data_type(momentum_) - desc["T"] = tf.data_type(epsilon_) - desc["T"] = tf.data_type(grad_) - res = tf.execute(desc) - node = tf.TapeNode(resource_apply_rms_prop, [var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_], name=nothing, use_locking=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_apply_rms_prop(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=nothing, use_locking=nothing) - if tf.in_eager_mode() - resource_apply_rms_prop_eager(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=name, use_locking=use_locking) - else - resource_apply_rms_prop_graph(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=name, use_locking=use_locking) + begin + function resource_apply_rms_prop_graph(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "ResourceApplyRMSProp") do + desc = tf.NodeDescription("ResourceApplyRMSProp") + begin + begin + var_ = convert(Tensor{Any}, var_) + begin + end + end + begin + ms_ = convert(Tensor{Any}, ms_) + begin + end + end + begin + mom_ = convert(Tensor{Any}, mom_) + begin + end + end + begin + lr_ = convert(Tensor{Any}, lr_) + begin + end + end + begin + rho_ = convert(Tensor{Any}, rho_) + begin + end + end + begin + momentum_ = convert(Tensor{Any}, momentum_) + begin + end + end + begin + epsilon_ = convert(Tensor{Any}, epsilon_) + begin + end + end + begin + grad_ = convert(Tensor{Any}, grad_) + begin + end + end + begin + (lr_, rho_, momentum_, epsilon_, grad_) = tf.tf_promote(lr_, rho_, momentum_, epsilon_, grad_) + end + end + begin + begin + tf.add_input(desc, var_) + end + begin + tf.add_input(desc, ms_) + end + begin + tf.add_input(desc, mom_) + end + begin + tf.add_input(desc, lr_) + end + begin + tf.add_input(desc, rho_) + end + begin + tf.add_input(desc, momentum_) + end + begin + tf.add_input(desc, epsilon_) + end + begin + tf.add_input(desc, grad_) + end + end + begin + begin + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function resource_apply_rms_prop_eager(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=nothing, use_locking=nothing) + desc = tf.EagerOp("ResourceApplyRMSProp") + var_ = convert(tf.EagerTensor, var_) + ms_ = convert(tf.EagerTensor, ms_) + mom_ = convert(tf.EagerTensor, mom_) + lr_ = convert(tf.EagerTensor, lr_) + rho_ = convert(tf.EagerTensor, rho_) + momentum_ = convert(tf.EagerTensor, momentum_) + epsilon_ = convert(tf.EagerTensor, epsilon_) + grad_ = convert(tf.EagerTensor, grad_) + begin + begin + tf.add_input(desc, var_) + end + begin + tf.add_input(desc, ms_) + end + begin + tf.add_input(desc, mom_) + end + begin + tf.add_input(desc, lr_) + end + begin + tf.add_input(desc, rho_) + end + begin + tf.add_input(desc, momentum_) + end + begin + tf.add_input(desc, epsilon_) + end + begin + tf.add_input(desc, grad_) + end + end + begin + begin + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + end + begin + desc["T"] = tf.data_type(lr_) + end + begin + desc["T"] = tf.data_type(rho_) + end + begin + desc["T"] = tf.data_type(momentum_) + end + begin + desc["T"] = tf.data_type(epsilon_) + end + begin + desc["T"] = tf.data_type(grad_) + end + res = tf.execute(desc) + node = tf.TapeNode(resource_apply_rms_prop, [var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_], name=nothing, use_locking=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_apply_rms_prop(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=nothing, use_locking=nothing) + if tf.in_eager_mode() + resource_apply_rms_prop_eager(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=name, use_locking=use_locking) + else + resource_apply_rms_prop_graph(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=name, use_locking=use_locking) + end end - end + end end @@ -28313,45 +51379,77 @@ end """ begin - function experimental_lmdb_dataset_graph(filenames_; name=nothing, output_types=nothing, output_shapes=nothing) - local desc - tf.with_op_name(name, "ExperimentalLMDBDataset") do - desc = tf.NodeDescription("ExperimentalLMDBDataset") - filenames_ = convert(Tensor{String}, filenames_) - tf.add_input(desc, filenames_) - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) + begin + function experimental_lmdb_dataset_graph(filenames_; name=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "ExperimentalLMDBDataset") do + desc = tf.NodeDescription("ExperimentalLMDBDataset") + begin + begin + filenames_ = convert(Tensor{String}, filenames_) + begin + end + end + end + begin + begin + tf.add_input(desc, filenames_) + end + end + begin + begin + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + end + begin + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function experimental_lmdb_dataset_eager(filenames_; name=nothing, output_types=nothing, output_shapes=nothing) + desc = tf.EagerOp("ExperimentalLMDBDataset") + filenames_ = convert(tf.EagerTensor, filenames_) + begin + begin + tf.add_input(desc, filenames_) + end + end + begin + begin + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + end + begin + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(experimental_lmdb_dataset, [filenames_], name=nothing, output_types=nothing, output_shapes=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_lmdb_dataset(filenames_; name=nothing, output_types=nothing, output_shapes=nothing) + if tf.in_eager_mode() + experimental_lmdb_dataset_eager(filenames_; name=name, output_types=output_types, output_shapes=output_shapes) + else + experimental_lmdb_dataset_graph(filenames_; name=name, output_types=output_types, output_shapes=output_shapes) + end end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - end - tf.Tensor(tf.Operation(desc)) end - function experimental_lmdb_dataset_eager(filenames_; name=nothing, output_types=nothing, output_shapes=nothing) - desc = tf.EagerOp("ExperimentalLMDBDataset") - filenames_ = convert(tf.EagerTensor, filenames_) - tf.add_input(desc, filenames_) - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - res = tf.execute(desc) - node = tf.TapeNode(experimental_lmdb_dataset, [filenames_], name=nothing, output_types=nothing, output_shapes=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_lmdb_dataset(filenames_; name=nothing, output_types=nothing, output_shapes=nothing) - if tf.in_eager_mode() - experimental_lmdb_dataset_eager(filenames_; name=name, output_types=output_types, output_shapes=output_shapes) - else - experimental_lmdb_dataset_graph(filenames_; name=name, output_types=output_types, output_shapes=output_shapes) - end - end end @@ -28361,48 +51459,86 @@ end """ begin - function sparse_accumulator_take_gradient_graph(handle_, num_required_; name=nothing, dtype=nothing) - local desc - tf.with_op_name(name, "SparseAccumulatorTakeGradient") do - desc = tf.NodeDescription("SparseAccumulatorTakeGradient") - handle_ = convert(Tensor{String}, handle_) - num_required_ = convert(Tensor{Int32}, num_required_) - tf.add_input(desc, handle_) - tf.add_input(desc, num_required_) - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) + begin + function sparse_accumulator_take_gradient_graph(handle_, num_required_; name=nothing, dtype=nothing) + local desc + tf.with_op_name(name, "SparseAccumulatorTakeGradient") do + desc = tf.NodeDescription("SparseAccumulatorTakeGradient") + begin + begin + handle_ = convert(Tensor{String}, handle_) + begin + end + end + begin + num_required_ = convert(Tensor{Int32}, num_required_) + begin + end + end + end + begin + begin + tf.add_input(desc, handle_) + end + begin + tf.add_input(desc, num_required_) + end + end + begin + begin + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + end + end + begin + function sparse_accumulator_take_gradient_eager(handle_, num_required_; name=nothing, dtype=nothing) + desc = tf.EagerOp("SparseAccumulatorTakeGradient") + handle_ = convert(tf.EagerTensor, handle_) + num_required_ = convert(tf.EagerTensor, num_required_) + begin + begin + tf.add_input(desc, handle_) + end + begin + tf.add_input(desc, num_required_) + end + end + begin + begin + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(sparse_accumulator_take_gradient, [handle_, num_required_], name=nothing, dtype=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_accumulator_take_gradient(handle_, num_required_; name=nothing, dtype=nothing) + if tf.in_eager_mode() + sparse_accumulator_take_gradient_eager(handle_, num_required_; name=name, dtype=dtype) + else + sparse_accumulator_take_gradient_graph(handle_, num_required_; name=name, dtype=dtype) + end end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:3 - push!(out, tf.Tensor(op, out_idx)) - end - out end - function sparse_accumulator_take_gradient_eager(handle_, num_required_; name=nothing, dtype=nothing) - desc = tf.EagerOp("SparseAccumulatorTakeGradient") - handle_ = convert(tf.EagerTensor, handle_) - num_required_ = convert(tf.EagerTensor, num_required_) - tf.add_input(desc, handle_) - tf.add_input(desc, num_required_) - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end - res = tf.execute(desc) - node = tf.TapeNode(sparse_accumulator_take_gradient, [handle_, num_required_], name=nothing, dtype=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_accumulator_take_gradient(handle_, num_required_; name=nothing, dtype=nothing) - if tf.in_eager_mode() - sparse_accumulator_take_gradient_eager(handle_, num_required_; name=name, dtype=dtype) - else - sparse_accumulator_take_gradient_graph(handle_, num_required_; name=name, dtype=dtype) - end - end end @@ -28412,33 +51548,57 @@ end """ begin - function stack_close_v2_graph(handle_; name=nothing) - local desc - tf.with_op_name(name, "StackCloseV2") do - desc = tf.NodeDescription("StackCloseV2") - handle_ = convert(Tensor{Any}, handle_) - tf.add_input(desc, handle_) + begin + function stack_close_v2_graph(handle_; name=nothing) + local desc + tf.with_op_name(name, "StackCloseV2") do + desc = tf.NodeDescription("StackCloseV2") + begin + begin + handle_ = convert(Tensor{Any}, handle_) + begin + end + end + end + begin + begin + tf.add_input(desc, handle_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function stack_close_v2_eager(handle_; name=nothing) - desc = tf.EagerOp("StackCloseV2") - handle_ = convert(tf.EagerTensor, handle_) - tf.add_input(desc, handle_) - res = tf.execute(desc) - node = tf.TapeNode(stack_close_v2, [handle_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function stack_close_v2_eager(handle_; name=nothing) + desc = tf.EagerOp("StackCloseV2") + handle_ = convert(tf.EagerTensor, handle_) + begin + begin + tf.add_input(desc, handle_) + end + end + begin + end + res = tf.execute(desc) + node = tf.TapeNode(stack_close_v2, [handle_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function stack_close_v2(handle_; name=nothing) - if tf.in_eager_mode() - stack_close_v2_eager(handle_; name=name) - else - stack_close_v2_graph(handle_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function stack_close_v2(handle_; name=nothing) + if tf.in_eager_mode() + stack_close_v2_eager(handle_; name=name) + else + stack_close_v2_graph(handle_; name=name) + end end - end + end end @@ -28448,59 +51608,95 @@ end """ begin - function map_size_graph(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) - local desc - tf.with_op_name(name, "MapSize") do - desc = tf.NodeDescription("MapSize") - if capacity !== nothing - desc["capacity"] = Base.Int(capacity) - end - if memory_limit !== nothing - desc["memory_limit"] = Base.Int(memory_limit) + begin + function map_size_graph(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "MapSize") do + desc = tf.NodeDescription("MapSize") + begin + end + begin + end + begin + begin + if capacity !== nothing + desc["capacity"] = Base.Int(capacity) + end + end + begin + if memory_limit !== nothing + desc["memory_limit"] = Base.Int(memory_limit) + end + end + begin + if dtypes !== nothing + desc["dtypes"] = map(Base.identity, dtypes) + end + end + begin + if container !== nothing + desc["container"] = Base.String(container) + end + end + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function map_size_eager(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + desc = tf.EagerOp("MapSize") + begin + end + begin + begin + if capacity !== nothing + desc["capacity"] = Base.Int(capacity) + end + end + begin + if memory_limit !== nothing + desc["memory_limit"] = Base.Int(memory_limit) + end + end + begin + if dtypes !== nothing + desc["dtypes"] = map(Base.identity, dtypes) + end + end + begin + if container !== nothing + desc["container"] = Base.String(container) + end + end + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(map_size, [], name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function map_size(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + if tf.in_eager_mode() + map_size_eager(; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) + else + map_size_graph(; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) + end end - if dtypes !== nothing - desc["dtypes"] = map(Base.identity, dtypes) - end - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - end - tf.Tensor(tf.Operation(desc)) end - function map_size_eager(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) - desc = tf.EagerOp("MapSize") - if capacity !== nothing - desc["capacity"] = Base.Int(capacity) - end - if memory_limit !== nothing - desc["memory_limit"] = Base.Int(memory_limit) - end - if dtypes !== nothing - desc["dtypes"] = map(Base.identity, dtypes) - end - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - res = tf.execute(desc) - node = tf.TapeNode(map_size, [], name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function map_size(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) - if tf.in_eager_mode() - map_size_eager(; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) - else - map_size_graph(; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) - end - end end @@ -28510,72 +51706,166 @@ end """ begin - function resource_apply_adagrad_da_graph(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, lr_, l1_, l2_, global_step_; name=nothing, use_locking=nothing) - local desc - tf.with_op_name(name, "ResourceApplyAdagradDA") do - desc = tf.NodeDescription("ResourceApplyAdagradDA") - var_ = convert(Tensor{Any}, var_) - gradient_accumulator_ = convert(Tensor{Any}, gradient_accumulator_) - gradient_squared_accumulator_ = convert(Tensor{Any}, gradient_squared_accumulator_) - grad_ = convert(Tensor{Any}, grad_) - lr_ = convert(Tensor{Any}, lr_) - l1_ = convert(Tensor{Any}, l1_) - l2_ = convert(Tensor{Any}, l2_) - global_step_ = convert(Tensor{Int64}, global_step_) - (grad_, lr_, l1_, l2_) = tf.tf_promote(grad_, lr_, l1_, l2_) - tf.add_input(desc, var_) - tf.add_input(desc, gradient_accumulator_) - tf.add_input(desc, gradient_squared_accumulator_) - tf.add_input(desc, grad_) - tf.add_input(desc, lr_) - tf.add_input(desc, l1_) - tf.add_input(desc, l2_) - tf.add_input(desc, global_step_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - end - tf.Tensor(tf.Operation(desc)) - end - function resource_apply_adagrad_da_eager(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, lr_, l1_, l2_, global_step_; name=nothing, use_locking=nothing) - desc = tf.EagerOp("ResourceApplyAdagradDA") - var_ = convert(tf.EagerTensor, var_) - gradient_accumulator_ = convert(tf.EagerTensor, gradient_accumulator_) - gradient_squared_accumulator_ = convert(tf.EagerTensor, gradient_squared_accumulator_) - grad_ = convert(tf.EagerTensor, grad_) - lr_ = convert(tf.EagerTensor, lr_) - l1_ = convert(tf.EagerTensor, l1_) - l2_ = convert(tf.EagerTensor, l2_) - global_step_ = convert(tf.EagerTensor, global_step_) - tf.add_input(desc, var_) - tf.add_input(desc, gradient_accumulator_) - tf.add_input(desc, gradient_squared_accumulator_) - tf.add_input(desc, grad_) - tf.add_input(desc, lr_) - tf.add_input(desc, l1_) - tf.add_input(desc, l2_) - tf.add_input(desc, global_step_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - desc["T"] = tf.data_type(grad_) - desc["T"] = tf.data_type(lr_) - desc["T"] = tf.data_type(l1_) - desc["T"] = tf.data_type(l2_) - res = tf.execute(desc) - node = tf.TapeNode(resource_apply_adagrad_da, [var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, lr_, l1_, l2_, global_step_], name=nothing, use_locking=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_apply_adagrad_da(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, lr_, l1_, l2_, global_step_; name=nothing, use_locking=nothing) - if tf.in_eager_mode() - resource_apply_adagrad_da_eager(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, lr_, l1_, l2_, global_step_; name=name, use_locking=use_locking) - else - resource_apply_adagrad_da_graph(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, lr_, l1_, l2_, global_step_; name=name, use_locking=use_locking) + begin + function resource_apply_adagrad_da_graph(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, lr_, l1_, l2_, global_step_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "ResourceApplyAdagradDA") do + desc = tf.NodeDescription("ResourceApplyAdagradDA") + begin + begin + var_ = convert(Tensor{Any}, var_) + begin + end + end + begin + gradient_accumulator_ = convert(Tensor{Any}, gradient_accumulator_) + begin + end + end + begin + gradient_squared_accumulator_ = convert(Tensor{Any}, gradient_squared_accumulator_) + begin + end + end + begin + grad_ = convert(Tensor{Any}, grad_) + begin + end + end + begin + lr_ = convert(Tensor{Any}, lr_) + begin + end + end + begin + l1_ = convert(Tensor{Any}, l1_) + begin + end + end + begin + l2_ = convert(Tensor{Any}, l2_) + begin + end + end + begin + global_step_ = convert(Tensor{Int64}, global_step_) + begin + end + end + begin + (grad_, lr_, l1_, l2_) = tf.tf_promote(grad_, lr_, l1_, l2_) + end + end + begin + begin + tf.add_input(desc, var_) + end + begin + tf.add_input(desc, gradient_accumulator_) + end + begin + tf.add_input(desc, gradient_squared_accumulator_) + end + begin + tf.add_input(desc, grad_) + end + begin + tf.add_input(desc, lr_) + end + begin + tf.add_input(desc, l1_) + end + begin + tf.add_input(desc, l2_) + end + begin + tf.add_input(desc, global_step_) + end + end + begin + begin + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function resource_apply_adagrad_da_eager(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, lr_, l1_, l2_, global_step_; name=nothing, use_locking=nothing) + desc = tf.EagerOp("ResourceApplyAdagradDA") + var_ = convert(tf.EagerTensor, var_) + gradient_accumulator_ = convert(tf.EagerTensor, gradient_accumulator_) + gradient_squared_accumulator_ = convert(tf.EagerTensor, gradient_squared_accumulator_) + grad_ = convert(tf.EagerTensor, grad_) + lr_ = convert(tf.EagerTensor, lr_) + l1_ = convert(tf.EagerTensor, l1_) + l2_ = convert(tf.EagerTensor, l2_) + global_step_ = convert(tf.EagerTensor, global_step_) + begin + begin + tf.add_input(desc, var_) + end + begin + tf.add_input(desc, gradient_accumulator_) + end + begin + tf.add_input(desc, gradient_squared_accumulator_) + end + begin + tf.add_input(desc, grad_) + end + begin + tf.add_input(desc, lr_) + end + begin + tf.add_input(desc, l1_) + end + begin + tf.add_input(desc, l2_) + end + begin + tf.add_input(desc, global_step_) + end + end + begin + begin + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + end + begin + desc["T"] = tf.data_type(grad_) + end + begin + desc["T"] = tf.data_type(lr_) + end + begin + desc["T"] = tf.data_type(l1_) + end + begin + desc["T"] = tf.data_type(l2_) + end + res = tf.execute(desc) + node = tf.TapeNode(resource_apply_adagrad_da, [var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, lr_, l1_, l2_, global_step_], name=nothing, use_locking=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_apply_adagrad_da(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, lr_, l1_, l2_, global_step_; name=nothing, use_locking=nothing) + if tf.in_eager_mode() + resource_apply_adagrad_da_eager(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, lr_, l1_, l2_, global_step_; name=name, use_locking=use_locking) + else + resource_apply_adagrad_da_graph(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, lr_, l1_, l2_, global_step_; name=name, use_locking=use_locking) + end end - end + end end @@ -28585,33 +51875,57 @@ end """ begin - function tensor_forest_tree_size_graph(tree_handle_; name=nothing) - local desc - tf.with_op_name(name, "TensorForestTreeSize") do - desc = tf.NodeDescription("TensorForestTreeSize") - tree_handle_ = convert(Tensor{Any}, tree_handle_) - tf.add_input(desc, tree_handle_) + begin + function tensor_forest_tree_size_graph(tree_handle_; name=nothing) + local desc + tf.with_op_name(name, "TensorForestTreeSize") do + desc = tf.NodeDescription("TensorForestTreeSize") + begin + begin + tree_handle_ = convert(Tensor{Any}, tree_handle_) + begin + end + end + end + begin + begin + tf.add_input(desc, tree_handle_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function tensor_forest_tree_size_eager(tree_handle_; name=nothing) - desc = tf.EagerOp("TensorForestTreeSize") - tree_handle_ = convert(tf.EagerTensor, tree_handle_) - tf.add_input(desc, tree_handle_) - res = tf.execute(desc) - node = tf.TapeNode(tensor_forest_tree_size, [tree_handle_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function tensor_forest_tree_size_eager(tree_handle_; name=nothing) + desc = tf.EagerOp("TensorForestTreeSize") + tree_handle_ = convert(tf.EagerTensor, tree_handle_) + begin + begin + tf.add_input(desc, tree_handle_) + end + end + begin + end + res = tf.execute(desc) + node = tf.TapeNode(tensor_forest_tree_size, [tree_handle_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_forest_tree_size(tree_handle_; name=nothing) - if tf.in_eager_mode() - tensor_forest_tree_size_eager(tree_handle_; name=name) - else - tensor_forest_tree_size_graph(tree_handle_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_forest_tree_size(tree_handle_; name=nothing) + if tf.in_eager_mode() + tensor_forest_tree_size_eager(tree_handle_; name=name) + else + tensor_forest_tree_size_graph(tree_handle_; name=name) + end end - end + end end @@ -28621,35 +51935,63 @@ end """ begin - function matrix_diag_part_graph(input_; name=nothing) - local desc - tf.with_op_name(name, "MatrixDiagPart") do - desc = tf.NodeDescription("MatrixDiagPart") - input_ = convert(Tensor{Any}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) + begin + function matrix_diag_part_graph(input_; name=nothing) + local desc + tf.with_op_name(name, "MatrixDiagPart") do + desc = tf.NodeDescription("MatrixDiagPart") + begin + begin + input_ = convert(Tensor{Any}, input_) + begin + end + end + begin + (input_,) = tf.tf_promote(input_) + end + end + begin + begin + tf.add_input(desc, input_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function matrix_diag_part_eager(input_; name=nothing) - desc = tf.EagerOp("MatrixDiagPart") - input_ = convert(tf.EagerTensor, input_) - tf.add_input(desc, input_) - desc["T"] = tf.data_type(input_) - res = tf.execute(desc) - node = tf.TapeNode(matrix_diag_part, [input_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function matrix_diag_part_eager(input_; name=nothing) + desc = tf.EagerOp("MatrixDiagPart") + input_ = convert(tf.EagerTensor, input_) + begin + begin + tf.add_input(desc, input_) + end + end + begin + end + begin + desc["T"] = tf.data_type(input_) + end + res = tf.execute(desc) + node = tf.TapeNode(matrix_diag_part, [input_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function matrix_diag_part(input_; name=nothing) - if tf.in_eager_mode() - matrix_diag_part_eager(input_; name=name) - else - matrix_diag_part_graph(input_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function matrix_diag_part(input_; name=nothing) + if tf.in_eager_mode() + matrix_diag_part_eager(input_; name=name) + else + matrix_diag_part_graph(input_; name=name) + end end - end + end end @@ -28659,33 +52001,57 @@ end """ begin - function reader_num_work_units_completed_v2_graph(reader_handle_; name=nothing) - local desc - tf.with_op_name(name, "ReaderNumWorkUnitsCompletedV2") do - desc = tf.NodeDescription("ReaderNumWorkUnitsCompletedV2") - reader_handle_ = convert(Tensor{Any}, reader_handle_) - tf.add_input(desc, reader_handle_) + begin + function reader_num_work_units_completed_v2_graph(reader_handle_; name=nothing) + local desc + tf.with_op_name(name, "ReaderNumWorkUnitsCompletedV2") do + desc = tf.NodeDescription("ReaderNumWorkUnitsCompletedV2") + begin + begin + reader_handle_ = convert(Tensor{Any}, reader_handle_) + begin + end + end + end + begin + begin + tf.add_input(desc, reader_handle_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function reader_num_work_units_completed_v2_eager(reader_handle_; name=nothing) - desc = tf.EagerOp("ReaderNumWorkUnitsCompletedV2") - reader_handle_ = convert(tf.EagerTensor, reader_handle_) - tf.add_input(desc, reader_handle_) - res = tf.execute(desc) - node = tf.TapeNode(reader_num_work_units_completed_v2, [reader_handle_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function reader_num_work_units_completed_v2_eager(reader_handle_; name=nothing) + desc = tf.EagerOp("ReaderNumWorkUnitsCompletedV2") + reader_handle_ = convert(tf.EagerTensor, reader_handle_) + begin + begin + tf.add_input(desc, reader_handle_) + end + end + begin + end + res = tf.execute(desc) + node = tf.TapeNode(reader_num_work_units_completed_v2, [reader_handle_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function reader_num_work_units_completed_v2(reader_handle_; name=nothing) - if tf.in_eager_mode() - reader_num_work_units_completed_v2_eager(reader_handle_; name=name) - else - reader_num_work_units_completed_v2_graph(reader_handle_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function reader_num_work_units_completed_v2(reader_handle_; name=nothing) + if tf.in_eager_mode() + reader_num_work_units_completed_v2_eager(reader_handle_; name=name) + else + reader_num_work_units_completed_v2_graph(reader_handle_; name=name) + end end - end + end end @@ -28695,47 +52061,99 @@ end """ begin - function tensor_array_split_v3_graph(handle_, value_, lengths_, flow_in_; name=nothing) - local desc - tf.with_op_name(name, "TensorArraySplitV3") do - desc = tf.NodeDescription("TensorArraySplitV3") - handle_ = convert(Tensor{Any}, handle_) - value_ = convert(Tensor{Any}, value_) - lengths_ = convert(Tensor{Int64}, lengths_) - flow_in_ = convert(Tensor{Float32}, flow_in_) - (value_,) = tf.tf_promote(value_) - tf.add_input(desc, handle_) - tf.add_input(desc, value_) - tf.add_input(desc, lengths_) - tf.add_input(desc, flow_in_) - end - tf.Tensor(tf.Operation(desc)) - end - function tensor_array_split_v3_eager(handle_, value_, lengths_, flow_in_; name=nothing) - desc = tf.EagerOp("TensorArraySplitV3") - handle_ = convert(tf.EagerTensor, handle_) - value_ = convert(tf.EagerTensor, value_) - lengths_ = convert(tf.EagerTensor, lengths_) - flow_in_ = convert(tf.EagerTensor, flow_in_) - tf.add_input(desc, handle_) - tf.add_input(desc, value_) - tf.add_input(desc, lengths_) - tf.add_input(desc, flow_in_) - desc["T"] = tf.data_type(value_) - res = tf.execute(desc) - node = tf.TapeNode(tensor_array_split_v3, [handle_, value_, lengths_, flow_in_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_array_split_v3(handle_, value_, lengths_, flow_in_; name=nothing) - if tf.in_eager_mode() - tensor_array_split_v3_eager(handle_, value_, lengths_, flow_in_; name=name) - else - tensor_array_split_v3_graph(handle_, value_, lengths_, flow_in_; name=name) + begin + function tensor_array_split_v3_graph(handle_, value_, lengths_, flow_in_; name=nothing) + local desc + tf.with_op_name(name, "TensorArraySplitV3") do + desc = tf.NodeDescription("TensorArraySplitV3") + begin + begin + handle_ = convert(Tensor{Any}, handle_) + begin + end + end + begin + value_ = convert(Tensor{Any}, value_) + begin + end + end + begin + lengths_ = convert(Tensor{Int64}, lengths_) + begin + end + end + begin + flow_in_ = convert(Tensor{Float32}, flow_in_) + begin + end + end + begin + (value_,) = tf.tf_promote(value_) + end + end + begin + begin + tf.add_input(desc, handle_) + end + begin + tf.add_input(desc, value_) + end + begin + tf.add_input(desc, lengths_) + end + begin + tf.add_input(desc, flow_in_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function tensor_array_split_v3_eager(handle_, value_, lengths_, flow_in_; name=nothing) + desc = tf.EagerOp("TensorArraySplitV3") + handle_ = convert(tf.EagerTensor, handle_) + value_ = convert(tf.EagerTensor, value_) + lengths_ = convert(tf.EagerTensor, lengths_) + flow_in_ = convert(tf.EagerTensor, flow_in_) + begin + begin + tf.add_input(desc, handle_) + end + begin + tf.add_input(desc, value_) + end + begin + tf.add_input(desc, lengths_) + end + begin + tf.add_input(desc, flow_in_) + end + end + begin + end + begin + desc["T"] = tf.data_type(value_) + end + res = tf.execute(desc) + node = tf.TapeNode(tensor_array_split_v3, [handle_, value_, lengths_, flow_in_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_array_split_v3(handle_, value_, lengths_, flow_in_; name=nothing) + if tf.in_eager_mode() + tensor_array_split_v3_eager(handle_, value_, lengths_, flow_in_; name=name) + else + tensor_array_split_v3_graph(handle_, value_, lengths_, flow_in_; name=name) + end end - end + end end @@ -28745,59 +52163,123 @@ end """ begin - function sparse_to_dense_graph(sparse_indices_, output_shape_, sparse_values_, default_value_; name=nothing, validate_indices=nothing) - local desc - tf.with_op_name(name, "SparseToDense") do - desc = tf.NodeDescription("SparseToDense") - sparse_indices_ = convert(Tensor{Any}, sparse_indices_) - sparse_indices_ = sparse_indices_ - convert(tf.Tensor{eltype(sparse_indices_)}, 1) - output_shape_ = convert(Tensor{Any}, output_shape_) - output_shape_ = output_shape_ - convert(tf.Tensor{eltype(output_shape_)}, 1) - sparse_values_ = convert(Tensor{Any}, sparse_values_) - default_value_ = convert(Tensor{Any}, default_value_) - (sparse_values_, default_value_) = tf.tf_promote(sparse_values_, default_value_) - (sparse_indices_, output_shape_) = tf.tf_promote(sparse_indices_, output_shape_) - tf.add_input(desc, sparse_indices_) - tf.add_input(desc, output_shape_) - tf.add_input(desc, sparse_values_) - tf.add_input(desc, default_value_) - if validate_indices !== nothing - desc["validate_indices"] = Base.Bool(validate_indices) + begin + function sparse_to_dense_graph(sparse_indices_, output_shape_, sparse_values_, default_value_; name=nothing, validate_indices=nothing) + local desc + tf.with_op_name(name, "SparseToDense") do + desc = tf.NodeDescription("SparseToDense") + begin + begin + sparse_indices_ = convert(Tensor{Any}, sparse_indices_) + begin + sparse_indices_ = sparse_indices_ - convert(tf.Tensor{eltype(sparse_indices_)}, 1) + end + end + begin + output_shape_ = convert(Tensor{Any}, output_shape_) + begin + output_shape_ = output_shape_ - convert(tf.Tensor{eltype(output_shape_)}, 1) + end + end + begin + sparse_values_ = convert(Tensor{Any}, sparse_values_) + begin + end + end + begin + default_value_ = convert(Tensor{Any}, default_value_) + begin + end + end + begin + (sparse_values_, default_value_) = tf.tf_promote(sparse_values_, default_value_) + end + begin + (sparse_indices_, output_shape_) = tf.tf_promote(sparse_indices_, output_shape_) + end + end + begin + begin + tf.add_input(desc, sparse_indices_) + end + begin + tf.add_input(desc, output_shape_) + end + begin + tf.add_input(desc, sparse_values_) + end + begin + tf.add_input(desc, default_value_) + end + end + begin + begin + if validate_indices !== nothing + desc["validate_indices"] = Base.Bool(validate_indices) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function sparse_to_dense_eager(sparse_indices_, output_shape_, sparse_values_, default_value_; name=nothing, validate_indices=nothing) + desc = tf.EagerOp("SparseToDense") + sparse_indices_ = convert(tf.EagerTensor, sparse_indices_) + output_shape_ = convert(tf.EagerTensor, output_shape_) + sparse_values_ = convert(tf.EagerTensor, sparse_values_) + default_value_ = convert(tf.EagerTensor, default_value_) + begin + begin + tf.add_input(desc, sparse_indices_) + end + begin + tf.add_input(desc, output_shape_) + end + begin + tf.add_input(desc, sparse_values_) + end + begin + tf.add_input(desc, default_value_) + end + end + begin + begin + if validate_indices !== nothing + desc["validate_indices"] = Base.Bool(validate_indices) + end + end + end + begin + desc["Tindices"] = tf.data_type(sparse_indices_) + end + begin + desc["Tindices"] = tf.data_type(output_shape_) + end + begin + desc["T"] = tf.data_type(sparse_values_) + end + begin + desc["T"] = tf.data_type(default_value_) + end + res = tf.execute(desc) + node = tf.TapeNode(sparse_to_dense, [sparse_indices_, output_shape_, sparse_values_, default_value_], name=nothing, validate_indices=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_to_dense(sparse_indices_, output_shape_, sparse_values_, default_value_; name=nothing, validate_indices=nothing) + if tf.in_eager_mode() + sparse_to_dense_eager(sparse_indices_, output_shape_, sparse_values_, default_value_; name=name, validate_indices=validate_indices) + else + sparse_to_dense_graph(sparse_indices_, output_shape_, sparse_values_, default_value_; name=name, validate_indices=validate_indices) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function sparse_to_dense_eager(sparse_indices_, output_shape_, sparse_values_, default_value_; name=nothing, validate_indices=nothing) - desc = tf.EagerOp("SparseToDense") - sparse_indices_ = convert(tf.EagerTensor, sparse_indices_) - output_shape_ = convert(tf.EagerTensor, output_shape_) - sparse_values_ = convert(tf.EagerTensor, sparse_values_) - default_value_ = convert(tf.EagerTensor, default_value_) - tf.add_input(desc, sparse_indices_) - tf.add_input(desc, output_shape_) - tf.add_input(desc, sparse_values_) - tf.add_input(desc, default_value_) - if validate_indices !== nothing - desc["validate_indices"] = Base.Bool(validate_indices) - end - desc["Tindices"] = tf.data_type(sparse_indices_) - desc["Tindices"] = tf.data_type(output_shape_) - desc["T"] = tf.data_type(sparse_values_) - desc["T"] = tf.data_type(default_value_) - res = tf.execute(desc) - node = tf.TapeNode(sparse_to_dense, [sparse_indices_, output_shape_, sparse_values_, default_value_], name=nothing, validate_indices=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_to_dense(sparse_indices_, output_shape_, sparse_values_, default_value_; name=nothing, validate_indices=nothing) - if tf.in_eager_mode() - sparse_to_dense_eager(sparse_indices_, output_shape_, sparse_values_, default_value_; name=name, validate_indices=validate_indices) - else - sparse_to_dense_graph(sparse_indices_, output_shape_, sparse_values_, default_value_; name=name, validate_indices=validate_indices) - end - end end @@ -28807,41 +52289,73 @@ end Operator that connects N unreplicated inputs to an N-way replicated TPU computation. """ begin - function tpu_replicated_input_graph(inputs_; name=nothing, N=nothing) - local desc - tf.with_op_name(name, "TPUReplicatedInput") do - desc = tf.NodeDescription("TPUReplicatedInput") - inputs_ = [convert(Tensor{Any}, x) for x = inputs_] - (inputs_,) = tf.tf_promote(inputs_) - tf.add_input(desc, inputs_) - if N !== nothing - desc["N"] = Base.Int(N) + begin + function tpu_replicated_input_graph(inputs_; name=nothing, N=nothing) + local desc + tf.with_op_name(name, "TPUReplicatedInput") do + desc = tf.NodeDescription("TPUReplicatedInput") + begin + begin + inputs_ = [convert(Tensor{Any}, x) for x = inputs_] + begin + end + end + begin + (inputs_,) = tf.tf_promote(inputs_) + end + end + begin + begin + tf.add_input(desc, inputs_) + end + end + begin + begin + if N !== nothing + desc["N"] = Base.Int(N) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function tpu_replicated_input_eager(inputs_; name=nothing, N=nothing) + desc = tf.EagerOp("TPUReplicatedInput") + inputs_ = convert(tf.EagerTensor, inputs_) + begin + begin + tf.add_input(desc, inputs_) + end + end + begin + begin + if N !== nothing + desc["N"] = Base.Int(N) + end + end + end + begin + desc["T"] = tf.data_type(inputs_) + end + res = tf.execute(desc) + node = tf.TapeNode(tpu_replicated_input, [inputs_], name=nothing, N=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tpu_replicated_input(inputs_; name=nothing, N=nothing) + if tf.in_eager_mode() + tpu_replicated_input_eager(inputs_; name=name, N=N) + else + tpu_replicated_input_graph(inputs_; name=name, N=N) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function tpu_replicated_input_eager(inputs_; name=nothing, N=nothing) - desc = tf.EagerOp("TPUReplicatedInput") - inputs_ = convert(tf.EagerTensor, inputs_) - tf.add_input(desc, inputs_) - if N !== nothing - desc["N"] = Base.Int(N) - end - desc["T"] = tf.data_type(inputs_) - res = tf.execute(desc) - node = tf.TapeNode(tpu_replicated_input, [inputs_], name=nothing, N=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tpu_replicated_input(inputs_; name=nothing, N=nothing) - if tf.in_eager_mode() - tpu_replicated_input_eager(inputs_; name=name, N=N) - else - tpu_replicated_input_graph(inputs_; name=name, N=N) - end - end end @@ -28851,33 +52365,57 @@ end """ begin - function stack_close_graph(handle_; name=nothing) - local desc - tf.with_op_name(name, "StackClose") do - desc = tf.NodeDescription("StackClose") - handle_ = convert(Tensor{String}, handle_) - tf.add_input(desc, handle_) + begin + function stack_close_graph(handle_; name=nothing) + local desc + tf.with_op_name(name, "StackClose") do + desc = tf.NodeDescription("StackClose") + begin + begin + handle_ = convert(Tensor{String}, handle_) + begin + end + end + end + begin + begin + tf.add_input(desc, handle_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function stack_close_eager(handle_; name=nothing) - desc = tf.EagerOp("StackClose") - handle_ = convert(tf.EagerTensor, handle_) - tf.add_input(desc, handle_) - res = tf.execute(desc) - node = tf.TapeNode(stack_close, [handle_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function stack_close_eager(handle_; name=nothing) + desc = tf.EagerOp("StackClose") + handle_ = convert(tf.EagerTensor, handle_) + begin + begin + tf.add_input(desc, handle_) + end + end + begin + end + res = tf.execute(desc) + node = tf.TapeNode(stack_close, [handle_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function stack_close(handle_; name=nothing) - if tf.in_eager_mode() - stack_close_eager(handle_; name=name) - else - stack_close_graph(handle_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function stack_close(handle_; name=nothing) + if tf.in_eager_mode() + stack_close_eager(handle_; name=name) + else + stack_close_graph(handle_; name=name) + end end - end + end end @@ -28887,44 +52425,74 @@ end """ begin - function deserialize_many_sparse_graph(serialized_sparse_; name=nothing, dtype=nothing) - local desc - tf.with_op_name(name, "DeserializeManySparse") do - desc = tf.NodeDescription("DeserializeManySparse") - serialized_sparse_ = convert(Tensor{String}, serialized_sparse_) - tf.add_input(desc, serialized_sparse_) - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) + begin + function deserialize_many_sparse_graph(serialized_sparse_; name=nothing, dtype=nothing) + local desc + tf.with_op_name(name, "DeserializeManySparse") do + desc = tf.NodeDescription("DeserializeManySparse") + begin + begin + serialized_sparse_ = convert(Tensor{String}, serialized_sparse_) + begin + end + end + end + begin + begin + tf.add_input(desc, serialized_sparse_) + end + end + begin + begin + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out end end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:3 - push!(out, tf.Tensor(op, out_idx)) - end - out end - function deserialize_many_sparse_eager(serialized_sparse_; name=nothing, dtype=nothing) - desc = tf.EagerOp("DeserializeManySparse") - serialized_sparse_ = convert(tf.EagerTensor, serialized_sparse_) - tf.add_input(desc, serialized_sparse_) - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end - res = tf.execute(desc) - node = tf.TapeNode(deserialize_many_sparse, [serialized_sparse_], name=nothing, dtype=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res + begin + function deserialize_many_sparse_eager(serialized_sparse_; name=nothing, dtype=nothing) + desc = tf.EagerOp("DeserializeManySparse") + serialized_sparse_ = convert(tf.EagerTensor, serialized_sparse_) + begin + begin + tf.add_input(desc, serialized_sparse_) + end + end + begin + begin + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(deserialize_many_sparse, [serialized_sparse_], name=nothing, dtype=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function deserialize_many_sparse(serialized_sparse_; name=nothing, dtype=nothing) - if tf.in_eager_mode() - deserialize_many_sparse_eager(serialized_sparse_; name=name, dtype=dtype) - else - deserialize_many_sparse_graph(serialized_sparse_; name=name, dtype=dtype) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function deserialize_many_sparse(serialized_sparse_; name=nothing, dtype=nothing) + if tf.in_eager_mode() + deserialize_many_sparse_eager(serialized_sparse_; name=name, dtype=dtype) + else + deserialize_many_sparse_graph(serialized_sparse_; name=name, dtype=dtype) + end end - end + end end @@ -28934,53 +52502,93 @@ end Replacement node for NcclReduce. """ begin - function _nccl_reduce_recv_graph(input_; name=nothing, reduction=nothing, num_devices=nothing, shared_name=nothing) - local desc - tf.with_op_name(name, "_NcclReduceRecv") do - desc = tf.NodeDescription("_NcclReduceRecv") - input_ = convert(Tensor{Any}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - if reduction !== nothing - desc["reduction"] = Base.String(reduction) - end - if num_devices !== nothing - desc["num_devices"] = Base.Int(num_devices) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) + begin + function _nccl_reduce_recv_graph(input_; name=nothing, reduction=nothing, num_devices=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "_NcclReduceRecv") do + desc = tf.NodeDescription("_NcclReduceRecv") + begin + begin + input_ = convert(Tensor{Any}, input_) + begin + end + end + begin + (input_,) = tf.tf_promote(input_) + end + end + begin + begin + tf.add_input(desc, input_) + end + end + begin + begin + if reduction !== nothing + desc["reduction"] = Base.String(reduction) + end + end + begin + if num_devices !== nothing + desc["num_devices"] = Base.Int(num_devices) + end + end + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function _nccl_reduce_recv_eager(input_; name=nothing, reduction=nothing, num_devices=nothing, shared_name=nothing) + desc = tf.EagerOp("_NcclReduceRecv") + input_ = convert(tf.EagerTensor, input_) + begin + begin + tf.add_input(desc, input_) + end + end + begin + begin + if reduction !== nothing + desc["reduction"] = Base.String(reduction) + end + end + begin + if num_devices !== nothing + desc["num_devices"] = Base.Int(num_devices) + end + end + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + end + begin + desc["T"] = tf.data_type(input_) + end + res = tf.execute(desc) + node = tf.TapeNode(_nccl_reduce_recv, [input_], name=nothing, reduction=nothing, num_devices=nothing, shared_name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _nccl_reduce_recv(input_; name=nothing, reduction=nothing, num_devices=nothing, shared_name=nothing) + if tf.in_eager_mode() + _nccl_reduce_recv_eager(input_; name=name, reduction=reduction, num_devices=num_devices, shared_name=shared_name) + else + _nccl_reduce_recv_graph(input_; name=name, reduction=reduction, num_devices=num_devices, shared_name=shared_name) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function _nccl_reduce_recv_eager(input_; name=nothing, reduction=nothing, num_devices=nothing, shared_name=nothing) - desc = tf.EagerOp("_NcclReduceRecv") - input_ = convert(tf.EagerTensor, input_) - tf.add_input(desc, input_) - if reduction !== nothing - desc["reduction"] = Base.String(reduction) - end - if num_devices !== nothing - desc["num_devices"] = Base.Int(num_devices) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - desc["T"] = tf.data_type(input_) - res = tf.execute(desc) - node = tf.TapeNode(_nccl_reduce_recv, [input_], name=nothing, reduction=nothing, num_devices=nothing, shared_name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _nccl_reduce_recv(input_; name=nothing, reduction=nothing, num_devices=nothing, shared_name=nothing) - if tf.in_eager_mode() - _nccl_reduce_recv_eager(input_; name=name, reduction=reduction, num_devices=num_devices, shared_name=shared_name) - else - _nccl_reduce_recv_graph(input_; name=name, reduction=reduction, num_devices=num_devices, shared_name=shared_name) - end - end end @@ -28990,47 +52598,91 @@ end """ begin - function mirror_pad_grad_graph(input_, paddings_; name=nothing, mode=nothing) - local desc - tf.with_op_name(name, "MirrorPadGrad") do - desc = tf.NodeDescription("MirrorPadGrad") - input_ = convert(Tensor{Any}, input_) - paddings_ = convert(Tensor{Int32}, paddings_) - (input_,) = tf.tf_promote(input_) - (paddings_,) = tf.tf_promote(paddings_) - tf.add_input(desc, input_) - tf.add_input(desc, paddings_) - if mode !== nothing - desc["mode"] = Base.String(mode) + begin + function mirror_pad_grad_graph(input_, paddings_; name=nothing, mode=nothing) + local desc + tf.with_op_name(name, "MirrorPadGrad") do + desc = tf.NodeDescription("MirrorPadGrad") + begin + begin + input_ = convert(Tensor{Any}, input_) + begin + end + end + begin + paddings_ = convert(Tensor{Int32}, paddings_) + begin + end + end + begin + (input_,) = tf.tf_promote(input_) + end + begin + (paddings_,) = tf.tf_promote(paddings_) + end + end + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, paddings_) + end + end + begin + begin + if mode !== nothing + desc["mode"] = Base.String(mode) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function mirror_pad_grad_eager(input_, paddings_; name=nothing, mode=nothing) + desc = tf.EagerOp("MirrorPadGrad") + input_ = convert(tf.EagerTensor, input_) + paddings_ = convert(tf.EagerTensor, paddings_) + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, paddings_) + end + end + begin + begin + if mode !== nothing + desc["mode"] = Base.String(mode) + end + end + end + begin + desc["T"] = tf.data_type(input_) + end + begin + desc["Tpaddings"] = tf.data_type(paddings_) + end + res = tf.execute(desc) + node = tf.TapeNode(mirror_pad_grad, [input_, paddings_], name=nothing, mode=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function mirror_pad_grad(input_, paddings_; name=nothing, mode=nothing) + if tf.in_eager_mode() + mirror_pad_grad_eager(input_, paddings_; name=name, mode=mode) + else + mirror_pad_grad_graph(input_, paddings_; name=name, mode=mode) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function mirror_pad_grad_eager(input_, paddings_; name=nothing, mode=nothing) - desc = tf.EagerOp("MirrorPadGrad") - input_ = convert(tf.EagerTensor, input_) - paddings_ = convert(tf.EagerTensor, paddings_) - tf.add_input(desc, input_) - tf.add_input(desc, paddings_) - if mode !== nothing - desc["mode"] = Base.String(mode) - end - desc["T"] = tf.data_type(input_) - desc["Tpaddings"] = tf.data_type(paddings_) - res = tf.execute(desc) - node = tf.TapeNode(mirror_pad_grad, [input_, paddings_], name=nothing, mode=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function mirror_pad_grad(input_, paddings_; name=nothing, mode=nothing) - if tf.in_eager_mode() - mirror_pad_grad_eager(input_, paddings_; name=name, mode=mode) - else - mirror_pad_grad_graph(input_, paddings_; name=name, mode=mode) - end - end end @@ -29040,40 +52692,78 @@ end """ begin - function broadcast_args_graph(s0_, s1_; name=nothing) - local desc - tf.with_op_name(name, "BroadcastArgs") do - desc = tf.NodeDescription("BroadcastArgs") - s0_ = convert(Tensor{Int32}, s0_) - s1_ = convert(Tensor{Int32}, s1_) - (s0_, s1_) = tf.tf_promote(s0_, s1_) - tf.add_input(desc, s0_) - tf.add_input(desc, s1_) + begin + function broadcast_args_graph(s0_, s1_; name=nothing) + local desc + tf.with_op_name(name, "BroadcastArgs") do + desc = tf.NodeDescription("BroadcastArgs") + begin + begin + s0_ = convert(Tensor{Int32}, s0_) + begin + end + end + begin + s1_ = convert(Tensor{Int32}, s1_) + begin + end + end + begin + (s0_, s1_) = tf.tf_promote(s0_, s1_) + end + end + begin + begin + tf.add_input(desc, s0_) + end + begin + tf.add_input(desc, s1_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function broadcast_args_eager(s0_, s1_; name=nothing) - desc = tf.EagerOp("BroadcastArgs") - s0_ = convert(tf.EagerTensor, s0_) - s1_ = convert(tf.EagerTensor, s1_) - tf.add_input(desc, s0_) - tf.add_input(desc, s1_) - desc["T"] = tf.data_type(s0_) - desc["T"] = tf.data_type(s1_) - res = tf.execute(desc) - node = tf.TapeNode(broadcast_args, [s0_, s1_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function broadcast_args_eager(s0_, s1_; name=nothing) + desc = tf.EagerOp("BroadcastArgs") + s0_ = convert(tf.EagerTensor, s0_) + s1_ = convert(tf.EagerTensor, s1_) + begin + begin + tf.add_input(desc, s0_) + end + begin + tf.add_input(desc, s1_) + end + end + begin + end + begin + desc["T"] = tf.data_type(s0_) + end + begin + desc["T"] = tf.data_type(s1_) + end + res = tf.execute(desc) + node = tf.TapeNode(broadcast_args, [s0_, s1_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function broadcast_args(s0_, s1_; name=nothing) - if tf.in_eager_mode() - broadcast_args_eager(s0_, s1_; name=name) - else - broadcast_args_graph(s0_, s1_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function broadcast_args(s0_, s1_; name=nothing) + if tf.in_eager_mode() + broadcast_args_eager(s0_, s1_; name=name) + else + broadcast_args_graph(s0_, s1_; name=name) + end end - end + end end @@ -29083,47 +52773,91 @@ end """ begin - function stateless_truncated_normal_graph(shape_, seed_; name=nothing, dtype=nothing) - local desc - tf.with_op_name(name, "StatelessTruncatedNormal") do - desc = tf.NodeDescription("StatelessTruncatedNormal") - shape_ = convert(Tensor{Int32}, shape_) - seed_ = convert(Tensor{Int64}, seed_) - (shape_,) = tf.tf_promote(shape_) - (seed_,) = tf.tf_promote(seed_) - tf.add_input(desc, shape_) - tf.add_input(desc, seed_) - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) + begin + function stateless_truncated_normal_graph(shape_, seed_; name=nothing, dtype=nothing) + local desc + tf.with_op_name(name, "StatelessTruncatedNormal") do + desc = tf.NodeDescription("StatelessTruncatedNormal") + begin + begin + shape_ = convert(Tensor{Int32}, shape_) + begin + end + end + begin + seed_ = convert(Tensor{Int64}, seed_) + begin + end + end + begin + (shape_,) = tf.tf_promote(shape_) + end + begin + (seed_,) = tf.tf_promote(seed_) + end + end + begin + begin + tf.add_input(desc, shape_) + end + begin + tf.add_input(desc, seed_) + end + end + begin + begin + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function stateless_truncated_normal_eager(shape_, seed_; name=nothing, dtype=nothing) + desc = tf.EagerOp("StatelessTruncatedNormal") + shape_ = convert(tf.EagerTensor, shape_) + seed_ = convert(tf.EagerTensor, seed_) + begin + begin + tf.add_input(desc, shape_) + end + begin + tf.add_input(desc, seed_) + end + end + begin + begin + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + end + begin + desc["T"] = tf.data_type(shape_) + end + begin + desc["Tseed"] = tf.data_type(seed_) + end + res = tf.execute(desc) + node = tf.TapeNode(stateless_truncated_normal, [shape_, seed_], name=nothing, dtype=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function stateless_truncated_normal(shape_, seed_; name=nothing, dtype=nothing) + if tf.in_eager_mode() + stateless_truncated_normal_eager(shape_, seed_; name=name, dtype=dtype) + else + stateless_truncated_normal_graph(shape_, seed_; name=name, dtype=dtype) + end end - end - tf.Tensor(tf.Operation(desc)) end - function stateless_truncated_normal_eager(shape_, seed_; name=nothing, dtype=nothing) - desc = tf.EagerOp("StatelessTruncatedNormal") - shape_ = convert(tf.EagerTensor, shape_) - seed_ = convert(tf.EagerTensor, seed_) - tf.add_input(desc, shape_) - tf.add_input(desc, seed_) - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end - desc["T"] = tf.data_type(shape_) - desc["Tseed"] = tf.data_type(seed_) - res = tf.execute(desc) - node = tf.TapeNode(stateless_truncated_normal, [shape_, seed_], name=nothing, dtype=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function stateless_truncated_normal(shape_, seed_; name=nothing, dtype=nothing) - if tf.in_eager_mode() - stateless_truncated_normal_eager(shape_, seed_; name=name, dtype=dtype) - else - stateless_truncated_normal_graph(shape_, seed_; name=name, dtype=dtype) - end - end end @@ -29133,37 +52867,69 @@ end """ begin - function regex_full_match_graph(input_, pattern_; name=nothing) - local desc - tf.with_op_name(name, "RegexFullMatch") do - desc = tf.NodeDescription("RegexFullMatch") - input_ = convert(Tensor{String}, input_) - pattern_ = convert(Tensor{String}, pattern_) - tf.add_input(desc, input_) - tf.add_input(desc, pattern_) + begin + function regex_full_match_graph(input_, pattern_; name=nothing) + local desc + tf.with_op_name(name, "RegexFullMatch") do + desc = tf.NodeDescription("RegexFullMatch") + begin + begin + input_ = convert(Tensor{String}, input_) + begin + end + end + begin + pattern_ = convert(Tensor{String}, pattern_) + begin + end + end + end + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, pattern_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function regex_full_match_eager(input_, pattern_; name=nothing) - desc = tf.EagerOp("RegexFullMatch") - input_ = convert(tf.EagerTensor, input_) - pattern_ = convert(tf.EagerTensor, pattern_) - tf.add_input(desc, input_) - tf.add_input(desc, pattern_) - res = tf.execute(desc) - node = tf.TapeNode(regex_full_match, [input_, pattern_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function regex_full_match_eager(input_, pattern_; name=nothing) + desc = tf.EagerOp("RegexFullMatch") + input_ = convert(tf.EagerTensor, input_) + pattern_ = convert(tf.EagerTensor, pattern_) + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, pattern_) + end + end + begin + end + res = tf.execute(desc) + node = tf.TapeNode(regex_full_match, [input_, pattern_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function regex_full_match(input_, pattern_; name=nothing) - if tf.in_eager_mode() - regex_full_match_eager(input_, pattern_; name=name) - else - regex_full_match_graph(input_, pattern_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function regex_full_match(input_, pattern_; name=nothing) + if tf.in_eager_mode() + regex_full_match_eager(input_, pattern_; name=name) + else + regex_full_match_graph(input_, pattern_; name=name) + end end - end + end end @@ -29173,33 +52939,57 @@ end """ begin - function unwrap_dataset_variant_graph(input_handle_; name=nothing) - local desc - tf.with_op_name(name, "UnwrapDatasetVariant") do - desc = tf.NodeDescription("UnwrapDatasetVariant") - input_handle_ = convert(Tensor{Any}, input_handle_) - tf.add_input(desc, input_handle_) + begin + function unwrap_dataset_variant_graph(input_handle_; name=nothing) + local desc + tf.with_op_name(name, "UnwrapDatasetVariant") do + desc = tf.NodeDescription("UnwrapDatasetVariant") + begin + begin + input_handle_ = convert(Tensor{Any}, input_handle_) + begin + end + end + end + begin + begin + tf.add_input(desc, input_handle_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function unwrap_dataset_variant_eager(input_handle_; name=nothing) - desc = tf.EagerOp("UnwrapDatasetVariant") - input_handle_ = convert(tf.EagerTensor, input_handle_) - tf.add_input(desc, input_handle_) - res = tf.execute(desc) - node = tf.TapeNode(unwrap_dataset_variant, [input_handle_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function unwrap_dataset_variant_eager(input_handle_; name=nothing) + desc = tf.EagerOp("UnwrapDatasetVariant") + input_handle_ = convert(tf.EagerTensor, input_handle_) + begin + begin + tf.add_input(desc, input_handle_) + end + end + begin + end + res = tf.execute(desc) + node = tf.TapeNode(unwrap_dataset_variant, [input_handle_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function unwrap_dataset_variant(input_handle_; name=nothing) - if tf.in_eager_mode() - unwrap_dataset_variant_eager(input_handle_; name=name) - else - unwrap_dataset_variant_graph(input_handle_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function unwrap_dataset_variant(input_handle_; name=nothing) + if tf.in_eager_mode() + unwrap_dataset_variant_eager(input_handle_; name=name) + else + unwrap_dataset_variant_graph(input_handle_; name=name) + end end - end + end end @@ -29209,45 +52999,77 @@ end """ begin - function empty_graph(shape_; name=nothing, dtype=nothing, init=nothing) - local desc - tf.with_op_name(name, "Empty") do - desc = tf.NodeDescription("Empty") - shape_ = convert(Tensor{Int32}, shape_) - tf.add_input(desc, shape_) - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) + begin + function empty_graph(shape_; name=nothing, dtype=nothing, init=nothing) + local desc + tf.with_op_name(name, "Empty") do + desc = tf.NodeDescription("Empty") + begin + begin + shape_ = convert(Tensor{Int32}, shape_) + begin + end + end + end + begin + begin + tf.add_input(desc, shape_) + end + end + begin + begin + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + begin + if init !== nothing + desc["init"] = Base.Bool(init) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function empty_eager(shape_; name=nothing, dtype=nothing, init=nothing) + desc = tf.EagerOp("Empty") + shape_ = convert(tf.EagerTensor, shape_) + begin + begin + tf.add_input(desc, shape_) + end + end + begin + begin + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + begin + if init !== nothing + desc["init"] = Base.Bool(init) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(empty, [shape_], name=nothing, dtype=nothing, init=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function empty(shape_; name=nothing, dtype=nothing, init=nothing) + if tf.in_eager_mode() + empty_eager(shape_; name=name, dtype=dtype, init=init) + else + empty_graph(shape_; name=name, dtype=dtype, init=init) + end end - if init !== nothing - desc["init"] = Base.Bool(init) - end - end - tf.Tensor(tf.Operation(desc)) - end - function empty_eager(shape_; name=nothing, dtype=nothing, init=nothing) - desc = tf.EagerOp("Empty") - shape_ = convert(tf.EagerTensor, shape_) - tf.add_input(desc, shape_) - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end - if init !== nothing - desc["init"] = Base.Bool(init) - end - res = tf.execute(desc) - node = tf.TapeNode(empty, [shape_], name=nothing, dtype=nothing, init=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function empty(shape_; name=nothing, dtype=nothing, init=nothing) - if tf.in_eager_mode() - empty_eager(shape_; name=name, dtype=dtype, init=init) - else - empty_graph(shape_; name=name, dtype=dtype, init=init) - end - end end @@ -29257,47 +53079,75 @@ end Retrieve multiple values that will be emitted by the computation as an XLA """ begin - function outfeed_dequeue_tuple_graph(; name=nothing, dtypes=nothing, shapes=nothing, device_ordinal=nothing) - local desc - tf.with_op_name(name, "OutfeedDequeueTuple") do - desc = tf.NodeDescription("OutfeedDequeueTuple") - if dtypes !== nothing - desc["dtypes"] = map(Base.identity, dtypes) + begin + function outfeed_dequeue_tuple_graph(; name=nothing, dtypes=nothing, shapes=nothing, device_ordinal=nothing) + local desc + tf.with_op_name(name, "OutfeedDequeueTuple") do + desc = tf.NodeDescription("OutfeedDequeueTuple") + begin + end + begin + end + begin + begin + if dtypes !== nothing + desc["dtypes"] = map(Base.identity, dtypes) + end + end + begin + if shapes !== nothing + desc["shapes"] = map(Base.identity, shapes) + end + end + begin + if device_ordinal !== nothing + desc["device_ordinal"] = Base.Int(device_ordinal) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function outfeed_dequeue_tuple_eager(; name=nothing, dtypes=nothing, shapes=nothing, device_ordinal=nothing) + desc = tf.EagerOp("OutfeedDequeueTuple") + begin + end + begin + begin + if dtypes !== nothing + desc["dtypes"] = map(Base.identity, dtypes) + end + end + begin + if shapes !== nothing + desc["shapes"] = map(Base.identity, shapes) + end + end + begin + if device_ordinal !== nothing + desc["device_ordinal"] = Base.Int(device_ordinal) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(outfeed_dequeue_tuple, [], name=nothing, dtypes=nothing, shapes=nothing, device_ordinal=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function outfeed_dequeue_tuple(; name=nothing, dtypes=nothing, shapes=nothing, device_ordinal=nothing) + if tf.in_eager_mode() + outfeed_dequeue_tuple_eager(; name=name, dtypes=dtypes, shapes=shapes, device_ordinal=device_ordinal) + else + outfeed_dequeue_tuple_graph(; name=name, dtypes=dtypes, shapes=shapes, device_ordinal=device_ordinal) + end end - if shapes !== nothing - desc["shapes"] = map(Base.identity, shapes) - end - if device_ordinal !== nothing - desc["device_ordinal"] = Base.Int(device_ordinal) - end - end - tf.Tensor(tf.Operation(desc)) - end - function outfeed_dequeue_tuple_eager(; name=nothing, dtypes=nothing, shapes=nothing, device_ordinal=nothing) - desc = tf.EagerOp("OutfeedDequeueTuple") - if dtypes !== nothing - desc["dtypes"] = map(Base.identity, dtypes) - end - if shapes !== nothing - desc["shapes"] = map(Base.identity, shapes) - end - if device_ordinal !== nothing - desc["device_ordinal"] = Base.Int(device_ordinal) - end - res = tf.execute(desc) - node = tf.TapeNode(outfeed_dequeue_tuple, [], name=nothing, dtypes=nothing, shapes=nothing, device_ordinal=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function outfeed_dequeue_tuple(; name=nothing, dtypes=nothing, shapes=nothing, device_ordinal=nothing) - if tf.in_eager_mode() - outfeed_dequeue_tuple_eager(; name=name, dtypes=dtypes, shapes=shapes, device_ordinal=device_ordinal) - else - outfeed_dequeue_tuple_graph(; name=name, dtypes=dtypes, shapes=shapes, device_ordinal=device_ordinal) - end - end end @@ -29307,40 +53157,78 @@ end """ begin - function div_graph(x_, y_; name=nothing) - local desc - tf.with_op_name(name, "Div") do - desc = tf.NodeDescription("Div") - x_ = convert(Tensor{Any}, x_) - y_ = convert(Tensor{Any}, y_) - (x_, y_) = tf.tf_promote(x_, y_) - tf.add_input(desc, x_) - tf.add_input(desc, y_) + begin + function div_graph(x_, y_; name=nothing) + local desc + tf.with_op_name(name, "Div") do + desc = tf.NodeDescription("Div") + begin + begin + x_ = convert(Tensor{Any}, x_) + begin + end + end + begin + y_ = convert(Tensor{Any}, y_) + begin + end + end + begin + (x_, y_) = tf.tf_promote(x_, y_) + end + end + begin + begin + tf.add_input(desc, x_) + end + begin + tf.add_input(desc, y_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function div_eager(x_, y_; name=nothing) - desc = tf.EagerOp("Div") - x_ = convert(tf.EagerTensor, x_) - y_ = convert(tf.EagerTensor, y_) - tf.add_input(desc, x_) - tf.add_input(desc, y_) - desc["T"] = tf.data_type(x_) - desc["T"] = tf.data_type(y_) - res = tf.execute(desc) - node = tf.TapeNode(div, [x_, y_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function div_eager(x_, y_; name=nothing) + desc = tf.EagerOp("Div") + x_ = convert(tf.EagerTensor, x_) + y_ = convert(tf.EagerTensor, y_) + begin + begin + tf.add_input(desc, x_) + end + begin + tf.add_input(desc, y_) + end + end + begin + end + begin + desc["T"] = tf.data_type(x_) + end + begin + desc["T"] = tf.data_type(y_) + end + res = tf.execute(desc) + node = tf.TapeNode(div, [x_, y_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function div(x_, y_; name=nothing) - if tf.in_eager_mode() - div_eager(x_, y_; name=name) - else - div_graph(x_, y_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function div(x_, y_; name=nothing) + if tf.in_eager_mode() + div_eager(x_, y_; name=name) + else + div_graph(x_, y_; name=name) + end end - end + end end @@ -29350,59 +53238,95 @@ end """ begin - function barrier_graph(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) - local desc - tf.with_op_name(name, "Barrier") do - desc = tf.NodeDescription("Barrier") - if component_types !== nothing - desc["component_types"] = map(Base.identity, component_types) - end - if shapes !== nothing - desc["shapes"] = map(Base.identity, shapes) - end - if capacity !== nothing - desc["capacity"] = Base.Int(capacity) + begin + function barrier_graph(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "Barrier") do + desc = tf.NodeDescription("Barrier") + begin + end + begin + end + begin + begin + if component_types !== nothing + desc["component_types"] = map(Base.identity, component_types) + end + end + begin + if shapes !== nothing + desc["shapes"] = map(Base.identity, shapes) + end + end + begin + if capacity !== nothing + desc["capacity"] = Base.Int(capacity) + end + end + begin + if container !== nothing + desc["container"] = Base.String(container) + end + end + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function barrier_eager(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) + desc = tf.EagerOp("Barrier") + begin + end + begin + begin + if component_types !== nothing + desc["component_types"] = map(Base.identity, component_types) + end + end + begin + if shapes !== nothing + desc["shapes"] = map(Base.identity, shapes) + end + end + begin + if capacity !== nothing + desc["capacity"] = Base.Int(capacity) + end + end + begin + if container !== nothing + desc["container"] = Base.String(container) + end + end + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(barrier, [], name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function barrier(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) + if tf.in_eager_mode() + barrier_eager(; name=name, component_types=component_types, shapes=shapes, capacity=capacity, container=container, shared_name=shared_name) + else + barrier_graph(; name=name, component_types=component_types, shapes=shapes, capacity=capacity, container=container, shared_name=shared_name) + end end - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - end - tf.Tensor(tf.Operation(desc)) - end - function barrier_eager(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) - desc = tf.EagerOp("Barrier") - if component_types !== nothing - desc["component_types"] = map(Base.identity, component_types) - end - if shapes !== nothing - desc["shapes"] = map(Base.identity, shapes) - end - if capacity !== nothing - desc["capacity"] = Base.Int(capacity) - end - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - res = tf.execute(desc) - node = tf.TapeNode(barrier, [], name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function barrier(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) - if tf.in_eager_mode() - barrier_eager(; name=name, component_types=component_types, shapes=shapes, capacity=capacity, container=container, shared_name=shared_name) - else - barrier_graph(; name=name, component_types=component_types, shapes=shapes, capacity=capacity, container=container, shared_name=shared_name) - end - end end @@ -29412,40 +53336,78 @@ end """ begin - function truncate_div_graph(x_, y_; name=nothing) - local desc - tf.with_op_name(name, "TruncateDiv") do - desc = tf.NodeDescription("TruncateDiv") - x_ = convert(Tensor{Any}, x_) - y_ = convert(Tensor{Any}, y_) - (x_, y_) = tf.tf_promote(x_, y_) - tf.add_input(desc, x_) - tf.add_input(desc, y_) + begin + function truncate_div_graph(x_, y_; name=nothing) + local desc + tf.with_op_name(name, "TruncateDiv") do + desc = tf.NodeDescription("TruncateDiv") + begin + begin + x_ = convert(Tensor{Any}, x_) + begin + end + end + begin + y_ = convert(Tensor{Any}, y_) + begin + end + end + begin + (x_, y_) = tf.tf_promote(x_, y_) + end + end + begin + begin + tf.add_input(desc, x_) + end + begin + tf.add_input(desc, y_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function truncate_div_eager(x_, y_; name=nothing) - desc = tf.EagerOp("TruncateDiv") - x_ = convert(tf.EagerTensor, x_) - y_ = convert(tf.EagerTensor, y_) - tf.add_input(desc, x_) - tf.add_input(desc, y_) - desc["T"] = tf.data_type(x_) - desc["T"] = tf.data_type(y_) - res = tf.execute(desc) - node = tf.TapeNode(truncate_div, [x_, y_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function truncate_div_eager(x_, y_; name=nothing) + desc = tf.EagerOp("TruncateDiv") + x_ = convert(tf.EagerTensor, x_) + y_ = convert(tf.EagerTensor, y_) + begin + begin + tf.add_input(desc, x_) + end + begin + tf.add_input(desc, y_) + end + end + begin + end + begin + desc["T"] = tf.data_type(x_) + end + begin + desc["T"] = tf.data_type(y_) + end + res = tf.execute(desc) + node = tf.TapeNode(truncate_div, [x_, y_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function truncate_div(x_, y_; name=nothing) - if tf.in_eager_mode() - truncate_div_eager(x_, y_; name=name) - else - truncate_div_graph(x_, y_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function truncate_div(x_, y_; name=nothing) + if tf.in_eager_mode() + truncate_div_eager(x_, y_; name=name) + else + truncate_div_graph(x_, y_; name=name) + end end - end + end end @@ -29455,55 +53417,99 @@ end """ begin - function unicode_encode_graph(input_values_, input_splits_; name=nothing, errors=nothing, output_encoding=nothing, replacement_char=nothing) - local desc - tf.with_op_name(name, "UnicodeEncode") do - desc = tf.NodeDescription("UnicodeEncode") - input_values_ = convert(Tensor{Int32}, input_values_) - input_splits_ = convert(Tensor{Int64}, input_splits_) - tf.add_input(desc, input_values_) - tf.add_input(desc, input_splits_) - if errors !== nothing - desc["errors"] = Base.String(errors) + begin + function unicode_encode_graph(input_values_, input_splits_; name=nothing, errors=nothing, output_encoding=nothing, replacement_char=nothing) + local desc + tf.with_op_name(name, "UnicodeEncode") do + desc = tf.NodeDescription("UnicodeEncode") + begin + begin + input_values_ = convert(Tensor{Int32}, input_values_) + begin + end + end + begin + input_splits_ = convert(Tensor{Int64}, input_splits_) + begin + end + end + end + begin + begin + tf.add_input(desc, input_values_) + end + begin + tf.add_input(desc, input_splits_) + end + end + begin + begin + if errors !== nothing + desc["errors"] = Base.String(errors) + end + end + begin + if output_encoding !== nothing + desc["output_encoding"] = Base.String(output_encoding) + end + end + begin + if replacement_char !== nothing + desc["replacement_char"] = Base.Int(replacement_char) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function unicode_encode_eager(input_values_, input_splits_; name=nothing, errors=nothing, output_encoding=nothing, replacement_char=nothing) + desc = tf.EagerOp("UnicodeEncode") + input_values_ = convert(tf.EagerTensor, input_values_) + input_splits_ = convert(tf.EagerTensor, input_splits_) + begin + begin + tf.add_input(desc, input_values_) + end + begin + tf.add_input(desc, input_splits_) + end + end + begin + begin + if errors !== nothing + desc["errors"] = Base.String(errors) + end + end + begin + if output_encoding !== nothing + desc["output_encoding"] = Base.String(output_encoding) + end + end + begin + if replacement_char !== nothing + desc["replacement_char"] = Base.Int(replacement_char) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(unicode_encode, [input_values_, input_splits_], name=nothing, errors=nothing, output_encoding=nothing, replacement_char=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function unicode_encode(input_values_, input_splits_; name=nothing, errors=nothing, output_encoding=nothing, replacement_char=nothing) + if tf.in_eager_mode() + unicode_encode_eager(input_values_, input_splits_; name=name, errors=errors, output_encoding=output_encoding, replacement_char=replacement_char) + else + unicode_encode_graph(input_values_, input_splits_; name=name, errors=errors, output_encoding=output_encoding, replacement_char=replacement_char) + end end - if output_encoding !== nothing - desc["output_encoding"] = Base.String(output_encoding) - end - if replacement_char !== nothing - desc["replacement_char"] = Base.Int(replacement_char) - end - end - tf.Tensor(tf.Operation(desc)) - end - function unicode_encode_eager(input_values_, input_splits_; name=nothing, errors=nothing, output_encoding=nothing, replacement_char=nothing) - desc = tf.EagerOp("UnicodeEncode") - input_values_ = convert(tf.EagerTensor, input_values_) - input_splits_ = convert(tf.EagerTensor, input_splits_) - tf.add_input(desc, input_values_) - tf.add_input(desc, input_splits_) - if errors !== nothing - desc["errors"] = Base.String(errors) - end - if output_encoding !== nothing - desc["output_encoding"] = Base.String(output_encoding) - end - if replacement_char !== nothing - desc["replacement_char"] = Base.Int(replacement_char) - end - res = tf.execute(desc) - node = tf.TapeNode(unicode_encode, [input_values_, input_splits_], name=nothing, errors=nothing, output_encoding=nothing, replacement_char=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function unicode_encode(input_values_, input_splits_; name=nothing, errors=nothing, output_encoding=nothing, replacement_char=nothing) - if tf.in_eager_mode() - unicode_encode_eager(input_values_, input_splits_; name=name, errors=errors, output_encoding=output_encoding, replacement_char=replacement_char) - else - unicode_encode_graph(input_values_, input_splits_; name=name, errors=errors, output_encoding=output_encoding, replacement_char=replacement_char) - end - end end @@ -29513,39 +53519,67 @@ end """ begin - function merge_summary_graph(inputs_; name=nothing, N=nothing) - local desc - tf.with_op_name(name, "MergeSummary") do - desc = tf.NodeDescription("MergeSummary") - inputs_ = [convert(Tensor{String}, x) for x = inputs_] - tf.add_input(desc, inputs_) - if N !== nothing - desc["N"] = Base.Int(N) + begin + function merge_summary_graph(inputs_; name=nothing, N=nothing) + local desc + tf.with_op_name(name, "MergeSummary") do + desc = tf.NodeDescription("MergeSummary") + begin + begin + inputs_ = [convert(Tensor{String}, x) for x = inputs_] + begin + end + end + end + begin + begin + tf.add_input(desc, inputs_) + end + end + begin + begin + if N !== nothing + desc["N"] = Base.Int(N) + end + end + end end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function merge_summary_eager(inputs_; name=nothing, N=nothing) - desc = tf.EagerOp("MergeSummary") - inputs_ = convert(tf.EagerTensor, inputs_) - tf.add_input(desc, inputs_) - if N !== nothing - desc["N"] = Base.Int(N) - end - res = tf.execute(desc) - node = tf.TapeNode(merge_summary, [inputs_], name=nothing, N=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function merge_summary_eager(inputs_; name=nothing, N=nothing) + desc = tf.EagerOp("MergeSummary") + inputs_ = convert(tf.EagerTensor, inputs_) + begin + begin + tf.add_input(desc, inputs_) + end + end + begin + begin + if N !== nothing + desc["N"] = Base.Int(N) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(merge_summary, [inputs_], name=nothing, N=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function merge_summary(inputs_; name=nothing, N=nothing) - if tf.in_eager_mode() - merge_summary_eager(inputs_; name=name, N=N) - else - merge_summary_graph(inputs_; name=name, N=N) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function merge_summary(inputs_; name=nothing, N=nothing) + if tf.in_eager_mode() + merge_summary_eager(inputs_; name=name, N=N) + else + merge_summary_graph(inputs_; name=name, N=N) + end end - end + end end @@ -29555,33 +53589,57 @@ end """ begin - function fake_queue_graph(resource_; name=nothing) - local desc - tf.with_op_name(name, "FakeQueue") do - desc = tf.NodeDescription("FakeQueue") - resource_ = convert(Tensor{Any}, resource_) - tf.add_input(desc, resource_) + begin + function fake_queue_graph(resource_; name=nothing) + local desc + tf.with_op_name(name, "FakeQueue") do + desc = tf.NodeDescription("FakeQueue") + begin + begin + resource_ = convert(Tensor{Any}, resource_) + begin + end + end + end + begin + begin + tf.add_input(desc, resource_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function fake_queue_eager(resource_; name=nothing) - desc = tf.EagerOp("FakeQueue") - resource_ = convert(tf.EagerTensor, resource_) - tf.add_input(desc, resource_) - res = tf.execute(desc) - node = tf.TapeNode(fake_queue, [resource_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function fake_queue_eager(resource_; name=nothing) + desc = tf.EagerOp("FakeQueue") + resource_ = convert(tf.EagerTensor, resource_) + begin + begin + tf.add_input(desc, resource_) + end + end + begin + end + res = tf.execute(desc) + node = tf.TapeNode(fake_queue, [resource_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function fake_queue(resource_; name=nothing) - if tf.in_eager_mode() - fake_queue_eager(resource_; name=name) - else - fake_queue_graph(resource_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function fake_queue(resource_; name=nothing) + if tf.in_eager_mode() + fake_queue_eager(resource_; name=name) + else + fake_queue_graph(resource_; name=name) + end end - end + end end @@ -29591,35 +53649,63 @@ end """ begin - function batch_cholesky_graph(input_; name=nothing) - local desc - tf.with_op_name(name, "BatchCholesky") do - desc = tf.NodeDescription("BatchCholesky") - input_ = convert(Tensor{Any}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) + begin + function batch_cholesky_graph(input_; name=nothing) + local desc + tf.with_op_name(name, "BatchCholesky") do + desc = tf.NodeDescription("BatchCholesky") + begin + begin + input_ = convert(Tensor{Any}, input_) + begin + end + end + begin + (input_,) = tf.tf_promote(input_) + end + end + begin + begin + tf.add_input(desc, input_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function batch_cholesky_eager(input_; name=nothing) - desc = tf.EagerOp("BatchCholesky") - input_ = convert(tf.EagerTensor, input_) - tf.add_input(desc, input_) - desc["T"] = tf.data_type(input_) - res = tf.execute(desc) - node = tf.TapeNode(batch_cholesky, [input_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function batch_cholesky_eager(input_; name=nothing) + desc = tf.EagerOp("BatchCholesky") + input_ = convert(tf.EagerTensor, input_) + begin + begin + tf.add_input(desc, input_) + end + end + begin + end + begin + desc["T"] = tf.data_type(input_) + end + res = tf.execute(desc) + node = tf.TapeNode(batch_cholesky, [input_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function batch_cholesky(input_; name=nothing) - if tf.in_eager_mode() - batch_cholesky_eager(input_; name=name) - else - batch_cholesky_graph(input_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function batch_cholesky(input_; name=nothing) + if tf.in_eager_mode() + batch_cholesky_eager(input_; name=name) + else + batch_cholesky_graph(input_; name=name) + end end - end + end end @@ -29629,53 +53715,85 @@ end """ begin - function iterator_graph(; name=nothing, shared_name=nothing, container=nothing, output_types=nothing, output_shapes=nothing) - local desc - tf.with_op_name(name, "Iterator") do - desc = tf.NodeDescription("Iterator") - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - if container !== nothing - desc["container"] = Base.String(container) - end - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) + begin + function iterator_graph(; name=nothing, shared_name=nothing, container=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "Iterator") do + desc = tf.NodeDescription("Iterator") + begin + end + begin + end + begin + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + begin + if container !== nothing + desc["container"] = Base.String(container) + end + end + begin + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + end + begin + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function iterator_eager(; name=nothing, shared_name=nothing, container=nothing, output_types=nothing, output_shapes=nothing) + desc = tf.EagerOp("Iterator") + begin + end + begin + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + begin + if container !== nothing + desc["container"] = Base.String(container) + end + end + begin + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + end + begin + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(iterator, [], name=nothing, shared_name=nothing, container=nothing, output_types=nothing, output_shapes=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function iterator(; name=nothing, shared_name=nothing, container=nothing, output_types=nothing, output_shapes=nothing) + if tf.in_eager_mode() + iterator_eager(; name=name, shared_name=shared_name, container=container, output_types=output_types, output_shapes=output_shapes) + else + iterator_graph(; name=name, shared_name=shared_name, container=container, output_types=output_types, output_shapes=output_shapes) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function iterator_eager(; name=nothing, shared_name=nothing, container=nothing, output_types=nothing, output_shapes=nothing) - desc = tf.EagerOp("Iterator") - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - if container !== nothing - desc["container"] = Base.String(container) - end - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - res = tf.execute(desc) - node = tf.TapeNode(iterator, [], name=nothing, shared_name=nothing, container=nothing, output_types=nothing, output_shapes=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function iterator(; name=nothing, shared_name=nothing, container=nothing, output_types=nothing, output_shapes=nothing) - if tf.in_eager_mode() - iterator_eager(; name=name, shared_name=shared_name, container=container, output_types=output_types, output_shapes=output_shapes) - else - iterator_graph(; name=name, shared_name=shared_name, container=container, output_types=output_types, output_shapes=output_shapes) - end - end end @@ -29685,35 +53803,63 @@ end """ begin - function bessel_i1e_graph(x_; name=nothing) - local desc - tf.with_op_name(name, "BesselI1e") do - desc = tf.NodeDescription("BesselI1e") - x_ = convert(Tensor{Any}, x_) - (x_,) = tf.tf_promote(x_) - tf.add_input(desc, x_) + begin + function bessel_i1e_graph(x_; name=nothing) + local desc + tf.with_op_name(name, "BesselI1e") do + desc = tf.NodeDescription("BesselI1e") + begin + begin + x_ = convert(Tensor{Any}, x_) + begin + end + end + begin + (x_,) = tf.tf_promote(x_) + end + end + begin + begin + tf.add_input(desc, x_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function bessel_i1e_eager(x_; name=nothing) - desc = tf.EagerOp("BesselI1e") - x_ = convert(tf.EagerTensor, x_) - tf.add_input(desc, x_) - desc["T"] = tf.data_type(x_) - res = tf.execute(desc) - node = tf.TapeNode(bessel_i1e, [x_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function bessel_i1e_eager(x_; name=nothing) + desc = tf.EagerOp("BesselI1e") + x_ = convert(tf.EagerTensor, x_) + begin + begin + tf.add_input(desc, x_) + end + end + begin + end + begin + desc["T"] = tf.data_type(x_) + end + res = tf.execute(desc) + node = tf.TapeNode(bessel_i1e, [x_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function bessel_i1e(x_; name=nothing) - if tf.in_eager_mode() - bessel_i1e_eager(x_; name=name) - else - bessel_i1e_graph(x_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function bessel_i1e(x_; name=nothing) + if tf.in_eager_mode() + bessel_i1e_eager(x_; name=name) + else + bessel_i1e_graph(x_; name=name) + end end - end + end end @@ -29723,37 +53869,69 @@ end """ begin - function import_event_graph(writer_, event_; name=nothing) - local desc - tf.with_op_name(name, "ImportEvent") do - desc = tf.NodeDescription("ImportEvent") - writer_ = convert(Tensor{Any}, writer_) - event_ = convert(Tensor{String}, event_) - tf.add_input(desc, writer_) - tf.add_input(desc, event_) + begin + function import_event_graph(writer_, event_; name=nothing) + local desc + tf.with_op_name(name, "ImportEvent") do + desc = tf.NodeDescription("ImportEvent") + begin + begin + writer_ = convert(Tensor{Any}, writer_) + begin + end + end + begin + event_ = convert(Tensor{String}, event_) + begin + end + end + end + begin + begin + tf.add_input(desc, writer_) + end + begin + tf.add_input(desc, event_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function import_event_eager(writer_, event_; name=nothing) - desc = tf.EagerOp("ImportEvent") - writer_ = convert(tf.EagerTensor, writer_) - event_ = convert(tf.EagerTensor, event_) - tf.add_input(desc, writer_) - tf.add_input(desc, event_) - res = tf.execute(desc) - node = tf.TapeNode(import_event, [writer_, event_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function import_event_eager(writer_, event_; name=nothing) + desc = tf.EagerOp("ImportEvent") + writer_ = convert(tf.EagerTensor, writer_) + event_ = convert(tf.EagerTensor, event_) + begin + begin + tf.add_input(desc, writer_) + end + begin + tf.add_input(desc, event_) + end + end + begin + end + res = tf.execute(desc) + node = tf.TapeNode(import_event, [writer_, event_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function import_event(writer_, event_; name=nothing) - if tf.in_eager_mode() - import_event_eager(writer_, event_; name=name) - else - import_event_graph(writer_, event_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function import_event(writer_, event_; name=nothing) + if tf.in_eager_mode() + import_event_eager(writer_, event_; name=name) + else + import_event_graph(writer_, event_; name=name) + end end - end + end end @@ -29763,78 +53941,144 @@ end """ begin - function quantized_instance_norm_graph(x_, x_min_, x_max_; name=nothing, output_range_given=nothing, given_y_min=nothing, given_y_max=nothing, variance_epsilon=nothing, min_separation=nothing) - local desc - tf.with_op_name(name, "QuantizedInstanceNorm") do - desc = tf.NodeDescription("QuantizedInstanceNorm") - x_ = convert(Tensor{Any}, x_) - x_min_ = convert(Tensor{Float32}, x_min_) - x_max_ = convert(Tensor{Float32}, x_max_) - (x_,) = tf.tf_promote(x_) - tf.add_input(desc, x_) - tf.add_input(desc, x_min_) - tf.add_input(desc, x_max_) - if output_range_given !== nothing - desc["output_range_given"] = Base.Bool(output_range_given) - end - if given_y_min !== nothing - desc["given_y_min"] = Base.identity(given_y_min) - end - if given_y_max !== nothing - desc["given_y_max"] = Base.identity(given_y_max) + begin + function quantized_instance_norm_graph(x_, x_min_, x_max_; name=nothing, output_range_given=nothing, given_y_min=nothing, given_y_max=nothing, variance_epsilon=nothing, min_separation=nothing) + local desc + tf.with_op_name(name, "QuantizedInstanceNorm") do + desc = tf.NodeDescription("QuantizedInstanceNorm") + begin + begin + x_ = convert(Tensor{Any}, x_) + begin + end + end + begin + x_min_ = convert(Tensor{Float32}, x_min_) + begin + end + end + begin + x_max_ = convert(Tensor{Float32}, x_max_) + begin + end + end + begin + (x_,) = tf.tf_promote(x_) + end + end + begin + begin + tf.add_input(desc, x_) + end + begin + tf.add_input(desc, x_min_) + end + begin + tf.add_input(desc, x_max_) + end + end + begin + begin + if output_range_given !== nothing + desc["output_range_given"] = Base.Bool(output_range_given) + end + end + begin + if given_y_min !== nothing + desc["given_y_min"] = Base.identity(given_y_min) + end + end + begin + if given_y_max !== nothing + desc["given_y_max"] = Base.identity(given_y_max) + end + end + begin + if variance_epsilon !== nothing + desc["variance_epsilon"] = Base.identity(variance_epsilon) + end + end + begin + if min_separation !== nothing + desc["min_separation"] = Base.identity(min_separation) + end + end + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + end + end + begin + function quantized_instance_norm_eager(x_, x_min_, x_max_; name=nothing, output_range_given=nothing, given_y_min=nothing, given_y_max=nothing, variance_epsilon=nothing, min_separation=nothing) + desc = tf.EagerOp("QuantizedInstanceNorm") + x_ = convert(tf.EagerTensor, x_) + x_min_ = convert(tf.EagerTensor, x_min_) + x_max_ = convert(tf.EagerTensor, x_max_) + begin + begin + tf.add_input(desc, x_) + end + begin + tf.add_input(desc, x_min_) + end + begin + tf.add_input(desc, x_max_) + end + end + begin + begin + if output_range_given !== nothing + desc["output_range_given"] = Base.Bool(output_range_given) + end + end + begin + if given_y_min !== nothing + desc["given_y_min"] = Base.identity(given_y_min) + end + end + begin + if given_y_max !== nothing + desc["given_y_max"] = Base.identity(given_y_max) + end + end + begin + if variance_epsilon !== nothing + desc["variance_epsilon"] = Base.identity(variance_epsilon) + end + end + begin + if min_separation !== nothing + desc["min_separation"] = Base.identity(min_separation) + end + end + end + begin + desc["T"] = tf.data_type(x_) + end + res = tf.execute(desc) + node = tf.TapeNode(quantized_instance_norm, [x_, x_min_, x_max_], name=nothing, output_range_given=nothing, given_y_min=nothing, given_y_max=nothing, variance_epsilon=nothing, min_separation=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function quantized_instance_norm(x_, x_min_, x_max_; name=nothing, output_range_given=nothing, given_y_min=nothing, given_y_max=nothing, variance_epsilon=nothing, min_separation=nothing) + if tf.in_eager_mode() + quantized_instance_norm_eager(x_, x_min_, x_max_; name=name, output_range_given=output_range_given, given_y_min=given_y_min, given_y_max=given_y_max, variance_epsilon=variance_epsilon, min_separation=min_separation) + else + quantized_instance_norm_graph(x_, x_min_, x_max_; name=name, output_range_given=output_range_given, given_y_min=given_y_min, given_y_max=given_y_max, variance_epsilon=variance_epsilon, min_separation=min_separation) + end end - if variance_epsilon !== nothing - desc["variance_epsilon"] = Base.identity(variance_epsilon) - end - if min_separation !== nothing - desc["min_separation"] = Base.identity(min_separation) - end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:3 - push!(out, tf.Tensor(op, out_idx)) - end - out - end - function quantized_instance_norm_eager(x_, x_min_, x_max_; name=nothing, output_range_given=nothing, given_y_min=nothing, given_y_max=nothing, variance_epsilon=nothing, min_separation=nothing) - desc = tf.EagerOp("QuantizedInstanceNorm") - x_ = convert(tf.EagerTensor, x_) - x_min_ = convert(tf.EagerTensor, x_min_) - x_max_ = convert(tf.EagerTensor, x_max_) - tf.add_input(desc, x_) - tf.add_input(desc, x_min_) - tf.add_input(desc, x_max_) - if output_range_given !== nothing - desc["output_range_given"] = Base.Bool(output_range_given) - end - if given_y_min !== nothing - desc["given_y_min"] = Base.identity(given_y_min) - end - if given_y_max !== nothing - desc["given_y_max"] = Base.identity(given_y_max) - end - if variance_epsilon !== nothing - desc["variance_epsilon"] = Base.identity(variance_epsilon) - end - if min_separation !== nothing - desc["min_separation"] = Base.identity(min_separation) - end - desc["T"] = tf.data_type(x_) - res = tf.execute(desc) - node = tf.TapeNode(quantized_instance_norm, [x_, x_min_, x_max_], name=nothing, output_range_given=nothing, given_y_min=nothing, given_y_max=nothing, variance_epsilon=nothing, min_separation=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function quantized_instance_norm(x_, x_min_, x_max_; name=nothing, output_range_given=nothing, given_y_min=nothing, given_y_max=nothing, variance_epsilon=nothing, min_separation=nothing) - if tf.in_eager_mode() - quantized_instance_norm_eager(x_, x_min_, x_max_; name=name, output_range_given=output_range_given, given_y_min=given_y_min, given_y_max=given_y_max, variance_epsilon=variance_epsilon, min_separation=min_separation) - else - quantized_instance_norm_graph(x_, x_min_, x_max_; name=name, output_range_given=output_range_given, given_y_min=given_y_min, given_y_max=given_y_max, variance_epsilon=variance_epsilon, min_separation=min_separation) - end - end end @@ -29844,47 +54088,99 @@ end """ begin - function tensor_array_write_v3_graph(handle_, index_, value_, flow_in_; name=nothing) - local desc - tf.with_op_name(name, "TensorArrayWriteV3") do - desc = tf.NodeDescription("TensorArrayWriteV3") - handle_ = convert(Tensor{Any}, handle_) - index_ = convert(Tensor{Int32}, index_) - value_ = convert(Tensor{Any}, value_) - flow_in_ = convert(Tensor{Float32}, flow_in_) - (value_,) = tf.tf_promote(value_) - tf.add_input(desc, handle_) - tf.add_input(desc, index_) - tf.add_input(desc, value_) - tf.add_input(desc, flow_in_) - end - tf.Tensor(tf.Operation(desc)) - end - function tensor_array_write_v3_eager(handle_, index_, value_, flow_in_; name=nothing) - desc = tf.EagerOp("TensorArrayWriteV3") - handle_ = convert(tf.EagerTensor, handle_) - index_ = convert(tf.EagerTensor, index_) - value_ = convert(tf.EagerTensor, value_) - flow_in_ = convert(tf.EagerTensor, flow_in_) - tf.add_input(desc, handle_) - tf.add_input(desc, index_) - tf.add_input(desc, value_) - tf.add_input(desc, flow_in_) - desc["T"] = tf.data_type(value_) - res = tf.execute(desc) - node = tf.TapeNode(tensor_array_write_v3, [handle_, index_, value_, flow_in_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_array_write_v3(handle_, index_, value_, flow_in_; name=nothing) - if tf.in_eager_mode() - tensor_array_write_v3_eager(handle_, index_, value_, flow_in_; name=name) - else - tensor_array_write_v3_graph(handle_, index_, value_, flow_in_; name=name) + begin + function tensor_array_write_v3_graph(handle_, index_, value_, flow_in_; name=nothing) + local desc + tf.with_op_name(name, "TensorArrayWriteV3") do + desc = tf.NodeDescription("TensorArrayWriteV3") + begin + begin + handle_ = convert(Tensor{Any}, handle_) + begin + end + end + begin + index_ = convert(Tensor{Int32}, index_) + begin + end + end + begin + value_ = convert(Tensor{Any}, value_) + begin + end + end + begin + flow_in_ = convert(Tensor{Float32}, flow_in_) + begin + end + end + begin + (value_,) = tf.tf_promote(value_) + end + end + begin + begin + tf.add_input(desc, handle_) + end + begin + tf.add_input(desc, index_) + end + begin + tf.add_input(desc, value_) + end + begin + tf.add_input(desc, flow_in_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function tensor_array_write_v3_eager(handle_, index_, value_, flow_in_; name=nothing) + desc = tf.EagerOp("TensorArrayWriteV3") + handle_ = convert(tf.EagerTensor, handle_) + index_ = convert(tf.EagerTensor, index_) + value_ = convert(tf.EagerTensor, value_) + flow_in_ = convert(tf.EagerTensor, flow_in_) + begin + begin + tf.add_input(desc, handle_) + end + begin + tf.add_input(desc, index_) + end + begin + tf.add_input(desc, value_) + end + begin + tf.add_input(desc, flow_in_) + end + end + begin + end + begin + desc["T"] = tf.data_type(value_) + end + res = tf.execute(desc) + node = tf.TapeNode(tensor_array_write_v3, [handle_, index_, value_, flow_in_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_array_write_v3(handle_, index_, value_, flow_in_; name=nothing) + if tf.in_eager_mode() + tensor_array_write_v3_eager(handle_, index_, value_, flow_in_; name=name) + else + tensor_array_write_v3_graph(handle_, index_, value_, flow_in_; name=name) + end end - end + end end @@ -29894,61 +54190,109 @@ end Load embedding parameters for a single table. """ begin - function load_tpu_embedding_adagrad_parameters_graph(parameters_, accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - local desc - tf.with_op_name(name, "LoadTPUEmbeddingAdagradParameters") do - desc = tf.NodeDescription("LoadTPUEmbeddingAdagradParameters") - parameters_ = convert(Tensor{Float32}, parameters_) - accumulators_ = convert(Tensor{Float32}, accumulators_) - tf.add_input(desc, parameters_) - tf.add_input(desc, accumulators_) - if table_id !== nothing - desc["table_id"] = Base.Int(table_id) - end - if table_name !== nothing - desc["table_name"] = Base.String(table_name) + begin + function load_tpu_embedding_adagrad_parameters_graph(parameters_, accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + local desc + tf.with_op_name(name, "LoadTPUEmbeddingAdagradParameters") do + desc = tf.NodeDescription("LoadTPUEmbeddingAdagradParameters") + begin + begin + parameters_ = convert(Tensor{Float32}, parameters_) + begin + end + end + begin + accumulators_ = convert(Tensor{Float32}, accumulators_) + begin + end + end + end + begin + begin + tf.add_input(desc, parameters_) + end + begin + tf.add_input(desc, accumulators_) + end + end + begin + begin + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + end + begin + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + end + begin + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + end + begin + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function load_tpu_embedding_adagrad_parameters_eager(parameters_, accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + desc = tf.EagerOp("LoadTPUEmbeddingAdagradParameters") + parameters_ = convert(tf.EagerTensor, parameters_) + accumulators_ = convert(tf.EagerTensor, accumulators_) + begin + begin + tf.add_input(desc, parameters_) + end + begin + tf.add_input(desc, accumulators_) + end + end + begin + begin + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + end + begin + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + end + begin + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + end + begin + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(load_tpu_embedding_adagrad_parameters, [parameters_, accumulators_], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function load_tpu_embedding_adagrad_parameters(parameters_, accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + if tf.in_eager_mode() + load_tpu_embedding_adagrad_parameters_eager(parameters_, accumulators_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + else + load_tpu_embedding_adagrad_parameters_graph(parameters_, accumulators_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + end end - if num_shards !== nothing - desc["num_shards"] = Base.Int(num_shards) - end - if shard_id !== nothing - desc["shard_id"] = Base.Int(shard_id) - end - end - tf.Tensor(tf.Operation(desc)) - end - function load_tpu_embedding_adagrad_parameters_eager(parameters_, accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - desc = tf.EagerOp("LoadTPUEmbeddingAdagradParameters") - parameters_ = convert(tf.EagerTensor, parameters_) - accumulators_ = convert(tf.EagerTensor, accumulators_) - tf.add_input(desc, parameters_) - tf.add_input(desc, accumulators_) - if table_id !== nothing - desc["table_id"] = Base.Int(table_id) - end - if table_name !== nothing - desc["table_name"] = Base.String(table_name) - end - if num_shards !== nothing - desc["num_shards"] = Base.Int(num_shards) - end - if shard_id !== nothing - desc["shard_id"] = Base.Int(shard_id) - end - res = tf.execute(desc) - node = tf.TapeNode(load_tpu_embedding_adagrad_parameters, [parameters_, accumulators_], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function load_tpu_embedding_adagrad_parameters(parameters_, accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - if tf.in_eager_mode() - load_tpu_embedding_adagrad_parameters_eager(parameters_, accumulators_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) - else - load_tpu_embedding_adagrad_parameters_graph(parameters_, accumulators_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) - end - end end @@ -29958,57 +54302,105 @@ end """ begin - function dense_to_dense_set_operation_graph(set1_, set2_; name=nothing, set_operation=nothing, validate_indices=nothing) - local desc - tf.with_op_name(name, "DenseToDenseSetOperation") do - desc = tf.NodeDescription("DenseToDenseSetOperation") - set1_ = convert(Tensor{Any}, set1_) - set2_ = convert(Tensor{Any}, set2_) - (set1_, set2_) = tf.tf_promote(set1_, set2_) - tf.add_input(desc, set1_) - tf.add_input(desc, set2_) - if set_operation !== nothing - desc["set_operation"] = Base.String(set_operation) - end - if validate_indices !== nothing - desc["validate_indices"] = Base.Bool(validate_indices) + begin + function dense_to_dense_set_operation_graph(set1_, set2_; name=nothing, set_operation=nothing, validate_indices=nothing) + local desc + tf.with_op_name(name, "DenseToDenseSetOperation") do + desc = tf.NodeDescription("DenseToDenseSetOperation") + begin + begin + set1_ = convert(Tensor{Any}, set1_) + begin + end + end + begin + set2_ = convert(Tensor{Any}, set2_) + begin + end + end + begin + (set1_, set2_) = tf.tf_promote(set1_, set2_) + end + end + begin + begin + tf.add_input(desc, set1_) + end + begin + tf.add_input(desc, set2_) + end + end + begin + begin + if set_operation !== nothing + desc["set_operation"] = Base.String(set_operation) + end + end + begin + if validate_indices !== nothing + desc["validate_indices"] = Base.Bool(validate_indices) + end + end + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + end + end + begin + function dense_to_dense_set_operation_eager(set1_, set2_; name=nothing, set_operation=nothing, validate_indices=nothing) + desc = tf.EagerOp("DenseToDenseSetOperation") + set1_ = convert(tf.EagerTensor, set1_) + set2_ = convert(tf.EagerTensor, set2_) + begin + begin + tf.add_input(desc, set1_) + end + begin + tf.add_input(desc, set2_) + end + end + begin + begin + if set_operation !== nothing + desc["set_operation"] = Base.String(set_operation) + end + end + begin + if validate_indices !== nothing + desc["validate_indices"] = Base.Bool(validate_indices) + end + end + end + begin + desc["T"] = tf.data_type(set1_) + end + begin + desc["T"] = tf.data_type(set2_) + end + res = tf.execute(desc) + node = tf.TapeNode(dense_to_dense_set_operation, [set1_, set2_], name=nothing, set_operation=nothing, validate_indices=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function dense_to_dense_set_operation(set1_, set2_; name=nothing, set_operation=nothing, validate_indices=nothing) + if tf.in_eager_mode() + dense_to_dense_set_operation_eager(set1_, set2_; name=name, set_operation=set_operation, validate_indices=validate_indices) + else + dense_to_dense_set_operation_graph(set1_, set2_; name=name, set_operation=set_operation, validate_indices=validate_indices) + end end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:3 - push!(out, tf.Tensor(op, out_idx)) - end - out - end - function dense_to_dense_set_operation_eager(set1_, set2_; name=nothing, set_operation=nothing, validate_indices=nothing) - desc = tf.EagerOp("DenseToDenseSetOperation") - set1_ = convert(tf.EagerTensor, set1_) - set2_ = convert(tf.EagerTensor, set2_) - tf.add_input(desc, set1_) - tf.add_input(desc, set2_) - if set_operation !== nothing - desc["set_operation"] = Base.String(set_operation) - end - if validate_indices !== nothing - desc["validate_indices"] = Base.Bool(validate_indices) - end - desc["T"] = tf.data_type(set1_) - desc["T"] = tf.data_type(set2_) - res = tf.execute(desc) - node = tf.TapeNode(dense_to_dense_set_operation, [set1_, set2_], name=nothing, set_operation=nothing, validate_indices=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function dense_to_dense_set_operation(set1_, set2_; name=nothing, set_operation=nothing, validate_indices=nothing) - if tf.in_eager_mode() - dense_to_dense_set_operation_eager(set1_, set2_; name=name, set_operation=set_operation, validate_indices=validate_indices) - else - dense_to_dense_set_operation_graph(set1_, set2_; name=name, set_operation=set_operation, validate_indices=validate_indices) - end - end end @@ -30018,87 +54410,147 @@ end """ begin - function encode_jpeg_graph(image_; name=nothing, format=nothing, quality=nothing, progressive=nothing, optimize_size=nothing, chroma_downsampling=nothing, density_unit=nothing, x_density=nothing, y_density=nothing, xmp_metadata=nothing) - local desc - tf.with_op_name(name, "EncodeJpeg") do - desc = tf.NodeDescription("EncodeJpeg") - image_ = convert(Tensor{UInt8}, image_) - tf.add_input(desc, image_) - if format !== nothing - desc["format"] = Base.String(format) + begin + function encode_jpeg_graph(image_; name=nothing, format=nothing, quality=nothing, progressive=nothing, optimize_size=nothing, chroma_downsampling=nothing, density_unit=nothing, x_density=nothing, y_density=nothing, xmp_metadata=nothing) + local desc + tf.with_op_name(name, "EncodeJpeg") do + desc = tf.NodeDescription("EncodeJpeg") + begin + begin + image_ = convert(Tensor{UInt8}, image_) + begin + end + end + end + begin + begin + tf.add_input(desc, image_) + end + end + begin + begin + if format !== nothing + desc["format"] = Base.String(format) + end + end + begin + if quality !== nothing + desc["quality"] = Base.Int(quality) + end + end + begin + if progressive !== nothing + desc["progressive"] = Base.Bool(progressive) + end + end + begin + if optimize_size !== nothing + desc["optimize_size"] = Base.Bool(optimize_size) + end + end + begin + if chroma_downsampling !== nothing + desc["chroma_downsampling"] = Base.Bool(chroma_downsampling) + end + end + begin + if density_unit !== nothing + desc["density_unit"] = Base.String(density_unit) + end + end + begin + if x_density !== nothing + desc["x_density"] = Base.Int(x_density) + end + end + begin + if y_density !== nothing + desc["y_density"] = Base.Int(y_density) + end + end + begin + if xmp_metadata !== nothing + desc["xmp_metadata"] = Base.String(xmp_metadata) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function encode_jpeg_eager(image_; name=nothing, format=nothing, quality=nothing, progressive=nothing, optimize_size=nothing, chroma_downsampling=nothing, density_unit=nothing, x_density=nothing, y_density=nothing, xmp_metadata=nothing) + desc = tf.EagerOp("EncodeJpeg") + image_ = convert(tf.EagerTensor, image_) + begin + begin + tf.add_input(desc, image_) + end + end + begin + begin + if format !== nothing + desc["format"] = Base.String(format) + end + end + begin + if quality !== nothing + desc["quality"] = Base.Int(quality) + end + end + begin + if progressive !== nothing + desc["progressive"] = Base.Bool(progressive) + end + end + begin + if optimize_size !== nothing + desc["optimize_size"] = Base.Bool(optimize_size) + end + end + begin + if chroma_downsampling !== nothing + desc["chroma_downsampling"] = Base.Bool(chroma_downsampling) + end + end + begin + if density_unit !== nothing + desc["density_unit"] = Base.String(density_unit) + end + end + begin + if x_density !== nothing + desc["x_density"] = Base.Int(x_density) + end + end + begin + if y_density !== nothing + desc["y_density"] = Base.Int(y_density) + end + end + begin + if xmp_metadata !== nothing + desc["xmp_metadata"] = Base.String(xmp_metadata) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(encode_jpeg, [image_], name=nothing, format=nothing, quality=nothing, progressive=nothing, optimize_size=nothing, chroma_downsampling=nothing, density_unit=nothing, x_density=nothing, y_density=nothing, xmp_metadata=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function encode_jpeg(image_; name=nothing, format=nothing, quality=nothing, progressive=nothing, optimize_size=nothing, chroma_downsampling=nothing, density_unit=nothing, x_density=nothing, y_density=nothing, xmp_metadata=nothing) + if tf.in_eager_mode() + encode_jpeg_eager(image_; name=name, format=format, quality=quality, progressive=progressive, optimize_size=optimize_size, chroma_downsampling=chroma_downsampling, density_unit=density_unit, x_density=x_density, y_density=y_density, xmp_metadata=xmp_metadata) + else + encode_jpeg_graph(image_; name=name, format=format, quality=quality, progressive=progressive, optimize_size=optimize_size, chroma_downsampling=chroma_downsampling, density_unit=density_unit, x_density=x_density, y_density=y_density, xmp_metadata=xmp_metadata) + end end - if quality !== nothing - desc["quality"] = Base.Int(quality) - end - if progressive !== nothing - desc["progressive"] = Base.Bool(progressive) - end - if optimize_size !== nothing - desc["optimize_size"] = Base.Bool(optimize_size) - end - if chroma_downsampling !== nothing - desc["chroma_downsampling"] = Base.Bool(chroma_downsampling) - end - if density_unit !== nothing - desc["density_unit"] = Base.String(density_unit) - end - if x_density !== nothing - desc["x_density"] = Base.Int(x_density) - end - if y_density !== nothing - desc["y_density"] = Base.Int(y_density) - end - if xmp_metadata !== nothing - desc["xmp_metadata"] = Base.String(xmp_metadata) - end - end - tf.Tensor(tf.Operation(desc)) - end - function encode_jpeg_eager(image_; name=nothing, format=nothing, quality=nothing, progressive=nothing, optimize_size=nothing, chroma_downsampling=nothing, density_unit=nothing, x_density=nothing, y_density=nothing, xmp_metadata=nothing) - desc = tf.EagerOp("EncodeJpeg") - image_ = convert(tf.EagerTensor, image_) - tf.add_input(desc, image_) - if format !== nothing - desc["format"] = Base.String(format) - end - if quality !== nothing - desc["quality"] = Base.Int(quality) - end - if progressive !== nothing - desc["progressive"] = Base.Bool(progressive) - end - if optimize_size !== nothing - desc["optimize_size"] = Base.Bool(optimize_size) - end - if chroma_downsampling !== nothing - desc["chroma_downsampling"] = Base.Bool(chroma_downsampling) - end - if density_unit !== nothing - desc["density_unit"] = Base.String(density_unit) - end - if x_density !== nothing - desc["x_density"] = Base.Int(x_density) - end - if y_density !== nothing - desc["y_density"] = Base.Int(y_density) - end - if xmp_metadata !== nothing - desc["xmp_metadata"] = Base.String(xmp_metadata) - end - res = tf.execute(desc) - node = tf.TapeNode(encode_jpeg, [image_], name=nothing, format=nothing, quality=nothing, progressive=nothing, optimize_size=nothing, chroma_downsampling=nothing, density_unit=nothing, x_density=nothing, y_density=nothing, xmp_metadata=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function encode_jpeg(image_; name=nothing, format=nothing, quality=nothing, progressive=nothing, optimize_size=nothing, chroma_downsampling=nothing, density_unit=nothing, x_density=nothing, y_density=nothing, xmp_metadata=nothing) - if tf.in_eager_mode() - encode_jpeg_eager(image_; name=name, format=format, quality=quality, progressive=progressive, optimize_size=optimize_size, chroma_downsampling=chroma_downsampling, density_unit=density_unit, x_density=x_density, y_density=y_density, xmp_metadata=xmp_metadata) - else - encode_jpeg_graph(image_; name=name, format=format, quality=quality, progressive=progressive, optimize_size=optimize_size, chroma_downsampling=chroma_downsampling, density_unit=density_unit, x_density=x_density, y_density=y_density, xmp_metadata=xmp_metadata) - end - end end @@ -30108,62 +54560,120 @@ end """ begin - function fused_pad_conv2d_graph(input_, paddings_, filter_; name=nothing, mode=nothing, strides=nothing, padding=nothing) - local desc - tf.with_op_name(name, "FusedPadConv2D") do - desc = tf.NodeDescription("FusedPadConv2D") - input_ = convert(Tensor{Any}, input_) - paddings_ = convert(Tensor{Int32}, paddings_) - filter_ = convert(Tensor{Any}, filter_) - (input_, filter_) = tf.tf_promote(input_, filter_) - tf.add_input(desc, input_) - tf.add_input(desc, paddings_) - tf.add_input(desc, filter_) - if mode !== nothing - desc["mode"] = Base.String(mode) - end - if strides !== nothing - desc["strides"] = map(Base.identity, strides) - end - if padding !== nothing - desc["padding"] = Base.String(padding) + begin + function fused_pad_conv2d_graph(input_, paddings_, filter_; name=nothing, mode=nothing, strides=nothing, padding=nothing) + local desc + tf.with_op_name(name, "FusedPadConv2D") do + desc = tf.NodeDescription("FusedPadConv2D") + begin + begin + input_ = convert(Tensor{Any}, input_) + begin + end + end + begin + paddings_ = convert(Tensor{Int32}, paddings_) + begin + end + end + begin + filter_ = convert(Tensor{Any}, filter_) + begin + end + end + begin + (input_, filter_) = tf.tf_promote(input_, filter_) + end + end + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, paddings_) + end + begin + tf.add_input(desc, filter_) + end + end + begin + begin + if mode !== nothing + desc["mode"] = Base.String(mode) + end + end + begin + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + end + begin + if padding !== nothing + desc["padding"] = Base.String(padding) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function fused_pad_conv2d_eager(input_, paddings_, filter_; name=nothing, mode=nothing, strides=nothing, padding=nothing) + desc = tf.EagerOp("FusedPadConv2D") + input_ = convert(tf.EagerTensor, input_) + paddings_ = convert(tf.EagerTensor, paddings_) + filter_ = convert(tf.EagerTensor, filter_) + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, paddings_) + end + begin + tf.add_input(desc, filter_) + end + end + begin + begin + if mode !== nothing + desc["mode"] = Base.String(mode) + end + end + begin + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + end + begin + if padding !== nothing + desc["padding"] = Base.String(padding) + end + end + end + begin + desc["T"] = tf.data_type(input_) + end + begin + desc["T"] = tf.data_type(filter_) + end + res = tf.execute(desc) + node = tf.TapeNode(fused_pad_conv2d, [input_, paddings_, filter_], name=nothing, mode=nothing, strides=nothing, padding=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function fused_pad_conv2d(input_, paddings_, filter_; name=nothing, mode=nothing, strides=nothing, padding=nothing) + if tf.in_eager_mode() + fused_pad_conv2d_eager(input_, paddings_, filter_; name=name, mode=mode, strides=strides, padding=padding) + else + fused_pad_conv2d_graph(input_, paddings_, filter_; name=name, mode=mode, strides=strides, padding=padding) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function fused_pad_conv2d_eager(input_, paddings_, filter_; name=nothing, mode=nothing, strides=nothing, padding=nothing) - desc = tf.EagerOp("FusedPadConv2D") - input_ = convert(tf.EagerTensor, input_) - paddings_ = convert(tf.EagerTensor, paddings_) - filter_ = convert(tf.EagerTensor, filter_) - tf.add_input(desc, input_) - tf.add_input(desc, paddings_) - tf.add_input(desc, filter_) - if mode !== nothing - desc["mode"] = Base.String(mode) - end - if strides !== nothing - desc["strides"] = map(Base.identity, strides) - end - if padding !== nothing - desc["padding"] = Base.String(padding) - end - desc["T"] = tf.data_type(input_) - desc["T"] = tf.data_type(filter_) - res = tf.execute(desc) - node = tf.TapeNode(fused_pad_conv2d, [input_, paddings_, filter_], name=nothing, mode=nothing, strides=nothing, padding=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function fused_pad_conv2d(input_, paddings_, filter_; name=nothing, mode=nothing, strides=nothing, padding=nothing) - if tf.in_eager_mode() - fused_pad_conv2d_eager(input_, paddings_, filter_; name=name, mode=mode, strides=strides, padding=padding) - else - fused_pad_conv2d_graph(input_, paddings_, filter_; name=name, mode=mode, strides=strides, padding=padding) - end - end end @@ -30173,44 +54683,90 @@ end """ begin - function inplace_update_graph(x_, i_, v_; name=nothing) - local desc - tf.with_op_name(name, "InplaceUpdate") do - desc = tf.NodeDescription("InplaceUpdate") - x_ = convert(Tensor{Any}, x_) - i_ = convert(Tensor{Int32}, i_) - v_ = convert(Tensor{Any}, v_) - (x_, v_) = tf.tf_promote(x_, v_) - tf.add_input(desc, x_) - tf.add_input(desc, i_) - tf.add_input(desc, v_) - end - tf.Tensor(tf.Operation(desc)) - end - function inplace_update_eager(x_, i_, v_; name=nothing) - desc = tf.EagerOp("InplaceUpdate") - x_ = convert(tf.EagerTensor, x_) - i_ = convert(tf.EagerTensor, i_) - v_ = convert(tf.EagerTensor, v_) - tf.add_input(desc, x_) - tf.add_input(desc, i_) - tf.add_input(desc, v_) - desc["T"] = tf.data_type(x_) - desc["T"] = tf.data_type(v_) - res = tf.execute(desc) - node = tf.TapeNode(inplace_update, [x_, i_, v_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function inplace_update(x_, i_, v_; name=nothing) - if tf.in_eager_mode() - inplace_update_eager(x_, i_, v_; name=name) - else - inplace_update_graph(x_, i_, v_; name=name) + begin + function inplace_update_graph(x_, i_, v_; name=nothing) + local desc + tf.with_op_name(name, "InplaceUpdate") do + desc = tf.NodeDescription("InplaceUpdate") + begin + begin + x_ = convert(Tensor{Any}, x_) + begin + end + end + begin + i_ = convert(Tensor{Int32}, i_) + begin + end + end + begin + v_ = convert(Tensor{Any}, v_) + begin + end + end + begin + (x_, v_) = tf.tf_promote(x_, v_) + end + end + begin + begin + tf.add_input(desc, x_) + end + begin + tf.add_input(desc, i_) + end + begin + tf.add_input(desc, v_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function inplace_update_eager(x_, i_, v_; name=nothing) + desc = tf.EagerOp("InplaceUpdate") + x_ = convert(tf.EagerTensor, x_) + i_ = convert(tf.EagerTensor, i_) + v_ = convert(tf.EagerTensor, v_) + begin + begin + tf.add_input(desc, x_) + end + begin + tf.add_input(desc, i_) + end + begin + tf.add_input(desc, v_) + end + end + begin + end + begin + desc["T"] = tf.data_type(x_) + end + begin + desc["T"] = tf.data_type(v_) + end + res = tf.execute(desc) + node = tf.TapeNode(inplace_update, [x_, i_, v_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function inplace_update(x_, i_, v_; name=nothing) + if tf.in_eager_mode() + inplace_update_eager(x_, i_, v_; name=name) + else + inplace_update_graph(x_, i_, v_; name=name) + end end - end + end end @@ -30220,54 +54776,104 @@ end """ begin - function quantized_relu_graph(features_, min_features_, max_features_; name=nothing, out_type=nothing) - local desc - tf.with_op_name(name, "QuantizedRelu") do - desc = tf.NodeDescription("QuantizedRelu") - features_ = convert(Tensor{Any}, features_) - min_features_ = convert(Tensor{Float32}, min_features_) - max_features_ = convert(Tensor{Float32}, max_features_) - (features_,) = tf.tf_promote(features_) - tf.add_input(desc, features_) - tf.add_input(desc, min_features_) - tf.add_input(desc, max_features_) - if out_type !== nothing - desc["out_type"] = Base.identity(out_type) + begin + function quantized_relu_graph(features_, min_features_, max_features_; name=nothing, out_type=nothing) + local desc + tf.with_op_name(name, "QuantizedRelu") do + desc = tf.NodeDescription("QuantizedRelu") + begin + begin + features_ = convert(Tensor{Any}, features_) + begin + end + end + begin + min_features_ = convert(Tensor{Float32}, min_features_) + begin + end + end + begin + max_features_ = convert(Tensor{Float32}, max_features_) + begin + end + end + begin + (features_,) = tf.tf_promote(features_) + end + end + begin + begin + tf.add_input(desc, features_) + end + begin + tf.add_input(desc, min_features_) + end + begin + tf.add_input(desc, max_features_) + end + end + begin + begin + if out_type !== nothing + desc["out_type"] = Base.identity(out_type) + end + end + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + end + end + begin + function quantized_relu_eager(features_, min_features_, max_features_; name=nothing, out_type=nothing) + desc = tf.EagerOp("QuantizedRelu") + features_ = convert(tf.EagerTensor, features_) + min_features_ = convert(tf.EagerTensor, min_features_) + max_features_ = convert(tf.EagerTensor, max_features_) + begin + begin + tf.add_input(desc, features_) + end + begin + tf.add_input(desc, min_features_) + end + begin + tf.add_input(desc, max_features_) + end + end + begin + begin + if out_type !== nothing + desc["out_type"] = Base.identity(out_type) + end + end + end + begin + desc["Tinput"] = tf.data_type(features_) + end + res = tf.execute(desc) + node = tf.TapeNode(quantized_relu, [features_, min_features_, max_features_], name=nothing, out_type=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function quantized_relu(features_, min_features_, max_features_; name=nothing, out_type=nothing) + if tf.in_eager_mode() + quantized_relu_eager(features_, min_features_, max_features_; name=name, out_type=out_type) + else + quantized_relu_graph(features_, min_features_, max_features_; name=name, out_type=out_type) + end end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:3 - push!(out, tf.Tensor(op, out_idx)) - end - out end - function quantized_relu_eager(features_, min_features_, max_features_; name=nothing, out_type=nothing) - desc = tf.EagerOp("QuantizedRelu") - features_ = convert(tf.EagerTensor, features_) - min_features_ = convert(tf.EagerTensor, min_features_) - max_features_ = convert(tf.EagerTensor, max_features_) - tf.add_input(desc, features_) - tf.add_input(desc, min_features_) - tf.add_input(desc, max_features_) - if out_type !== nothing - desc["out_type"] = Base.identity(out_type) - end - desc["Tinput"] = tf.data_type(features_) - res = tf.execute(desc) - node = tf.TapeNode(quantized_relu, [features_, min_features_, max_features_], name=nothing, out_type=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function quantized_relu(features_, min_features_, max_features_; name=nothing, out_type=nothing) - if tf.in_eager_mode() - quantized_relu_eager(features_, min_features_, max_features_; name=name, out_type=out_type) - else - quantized_relu_graph(features_, min_features_, max_features_; name=name, out_type=out_type) - end - end end @@ -30277,42 +54883,82 @@ end """ begin - function gather_nd_graph(params_, indices_; name=nothing) - local desc - tf.with_op_name(name, "GatherNd") do - desc = tf.NodeDescription("GatherNd") - params_ = convert(Tensor{Any}, params_) - indices_ = convert(Tensor{Any}, indices_) - indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) - (params_,) = tf.tf_promote(params_) - (indices_,) = tf.tf_promote(indices_) - tf.add_input(desc, params_) - tf.add_input(desc, indices_) - end - tf.Tensor(tf.Operation(desc)) - end - function gather_nd_eager(params_, indices_; name=nothing) - desc = tf.EagerOp("GatherNd") - params_ = convert(tf.EagerTensor, params_) - indices_ = convert(tf.EagerTensor, indices_) - tf.add_input(desc, params_) - tf.add_input(desc, indices_) - desc["Tparams"] = tf.data_type(params_) - desc["Tindices"] = tf.data_type(indices_) - res = tf.execute(desc) - node = tf.TapeNode(gather_nd, [params_, indices_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function gather_nd(params_, indices_; name=nothing) - if tf.in_eager_mode() - gather_nd_eager(params_, indices_; name=name) - else - gather_nd_graph(params_, indices_; name=name) + begin + function gather_nd_graph(params_, indices_; name=nothing) + local desc + tf.with_op_name(name, "GatherNd") do + desc = tf.NodeDescription("GatherNd") + begin + begin + params_ = convert(Tensor{Any}, params_) + begin + end + end + begin + indices_ = convert(Tensor{Any}, indices_) + begin + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + end + end + begin + (params_,) = tf.tf_promote(params_) + end + begin + (indices_,) = tf.tf_promote(indices_) + end + end + begin + begin + tf.add_input(desc, params_) + end + begin + tf.add_input(desc, indices_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function gather_nd_eager(params_, indices_; name=nothing) + desc = tf.EagerOp("GatherNd") + params_ = convert(tf.EagerTensor, params_) + indices_ = convert(tf.EagerTensor, indices_) + begin + begin + tf.add_input(desc, params_) + end + begin + tf.add_input(desc, indices_) + end + end + begin + end + begin + desc["Tparams"] = tf.data_type(params_) + end + begin + desc["Tindices"] = tf.data_type(indices_) + end + res = tf.execute(desc) + node = tf.TapeNode(gather_nd, [params_, indices_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function gather_nd(params_, indices_; name=nothing) + if tf.in_eager_mode() + gather_nd_eager(params_, indices_; name=name) + else + gather_nd_graph(params_, indices_; name=name) + end end - end + end end @@ -30322,41 +54968,65 @@ end """ begin - function placeholder_graph(; name=nothing, dtype=nothing, shape=nothing) - local desc - tf.with_op_name(name, "Placeholder") do - desc = tf.NodeDescription("Placeholder") - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end - if shape !== nothing - desc["shape"] = Base.identity(shape) + begin + function placeholder_graph(; name=nothing, dtype=nothing, shape=nothing) + local desc + tf.with_op_name(name, "Placeholder") do + desc = tf.NodeDescription("Placeholder") + begin + end + begin + end + begin + begin + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + begin + if shape !== nothing + desc["shape"] = Base.identity(shape) + end + end + end end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function placeholder_eager(; name=nothing, dtype=nothing, shape=nothing) - desc = tf.EagerOp("Placeholder") - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end - if shape !== nothing - desc["shape"] = Base.identity(shape) - end - res = tf.execute(desc) - node = tf.TapeNode(placeholder, [], name=nothing, dtype=nothing, shape=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function placeholder_eager(; name=nothing, dtype=nothing, shape=nothing) + desc = tf.EagerOp("Placeholder") + begin + end + begin + begin + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + begin + if shape !== nothing + desc["shape"] = Base.identity(shape) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(placeholder, [], name=nothing, dtype=nothing, shape=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function placeholder(; name=nothing, dtype=nothing, shape=nothing) - if tf.in_eager_mode() - placeholder_eager(; name=name, dtype=dtype, shape=shape) - else - placeholder_graph(; name=name, dtype=dtype, shape=shape) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function placeholder(; name=nothing, dtype=nothing, shape=nothing) + if tf.in_eager_mode() + placeholder_eager(; name=name, dtype=dtype, shape=shape) + else + placeholder_graph(; name=name, dtype=dtype, shape=shape) + end end - end + end end @@ -30366,45 +55036,77 @@ end """ begin - function filter_by_last_component_dataset_graph(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) - local desc - tf.with_op_name(name, "FilterByLastComponentDataset") do - desc = tf.NodeDescription("FilterByLastComponentDataset") - input_dataset_ = convert(Tensor{Any}, input_dataset_) - tf.add_input(desc, input_dataset_) - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) + begin + function filter_by_last_component_dataset_graph(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "FilterByLastComponentDataset") do + desc = tf.NodeDescription("FilterByLastComponentDataset") + begin + begin + input_dataset_ = convert(Tensor{Any}, input_dataset_) + begin + end + end + end + begin + begin + tf.add_input(desc, input_dataset_) + end + end + begin + begin + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + end + begin + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function filter_by_last_component_dataset_eager(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) + desc = tf.EagerOp("FilterByLastComponentDataset") + input_dataset_ = convert(tf.EagerTensor, input_dataset_) + begin + begin + tf.add_input(desc, input_dataset_) + end + end + begin + begin + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + end + begin + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(filter_by_last_component_dataset, [input_dataset_], name=nothing, output_types=nothing, output_shapes=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function filter_by_last_component_dataset(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) + if tf.in_eager_mode() + filter_by_last_component_dataset_eager(input_dataset_; name=name, output_types=output_types, output_shapes=output_shapes) + else + filter_by_last_component_dataset_graph(input_dataset_; name=name, output_types=output_types, output_shapes=output_shapes) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function filter_by_last_component_dataset_eager(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) - desc = tf.EagerOp("FilterByLastComponentDataset") - input_dataset_ = convert(tf.EagerTensor, input_dataset_) - tf.add_input(desc, input_dataset_) - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - res = tf.execute(desc) - node = tf.TapeNode(filter_by_last_component_dataset, [input_dataset_], name=nothing, output_types=nothing, output_shapes=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function filter_by_last_component_dataset(input_dataset_; name=nothing, output_types=nothing, output_shapes=nothing) - if tf.in_eager_mode() - filter_by_last_component_dataset_eager(input_dataset_; name=name, output_types=output_types, output_shapes=output_shapes) - else - filter_by_last_component_dataset_graph(input_dataset_; name=name, output_types=output_types, output_shapes=output_shapes) - end - end end @@ -30414,45 +55116,93 @@ end """ begin - function clip_by_value_graph(t_, clip_value_min_, clip_value_max_; name=nothing) - local desc - tf.with_op_name(name, "ClipByValue") do - desc = tf.NodeDescription("ClipByValue") - t_ = convert(Tensor{Any}, t_) - clip_value_min_ = convert(Tensor{Any}, clip_value_min_) - clip_value_max_ = convert(Tensor{Any}, clip_value_max_) - (t_, clip_value_min_, clip_value_max_) = tf.tf_promote(t_, clip_value_min_, clip_value_max_) - tf.add_input(desc, t_) - tf.add_input(desc, clip_value_min_) - tf.add_input(desc, clip_value_max_) - end - tf.Tensor(tf.Operation(desc)) - end - function clip_by_value_eager(t_, clip_value_min_, clip_value_max_; name=nothing) - desc = tf.EagerOp("ClipByValue") - t_ = convert(tf.EagerTensor, t_) - clip_value_min_ = convert(tf.EagerTensor, clip_value_min_) - clip_value_max_ = convert(tf.EagerTensor, clip_value_max_) - tf.add_input(desc, t_) - tf.add_input(desc, clip_value_min_) - tf.add_input(desc, clip_value_max_) - desc["T"] = tf.data_type(t_) - desc["T"] = tf.data_type(clip_value_min_) - desc["T"] = tf.data_type(clip_value_max_) - res = tf.execute(desc) - node = tf.TapeNode(clip_by_value, [t_, clip_value_min_, clip_value_max_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function clip_by_value(t_, clip_value_min_, clip_value_max_; name=nothing) - if tf.in_eager_mode() - clip_by_value_eager(t_, clip_value_min_, clip_value_max_; name=name) - else - clip_by_value_graph(t_, clip_value_min_, clip_value_max_; name=name) + begin + function clip_by_value_graph(t_, clip_value_min_, clip_value_max_; name=nothing) + local desc + tf.with_op_name(name, "ClipByValue") do + desc = tf.NodeDescription("ClipByValue") + begin + begin + t_ = convert(Tensor{Any}, t_) + begin + end + end + begin + clip_value_min_ = convert(Tensor{Any}, clip_value_min_) + begin + end + end + begin + clip_value_max_ = convert(Tensor{Any}, clip_value_max_) + begin + end + end + begin + (t_, clip_value_min_, clip_value_max_) = tf.tf_promote(t_, clip_value_min_, clip_value_max_) + end + end + begin + begin + tf.add_input(desc, t_) + end + begin + tf.add_input(desc, clip_value_min_) + end + begin + tf.add_input(desc, clip_value_max_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function clip_by_value_eager(t_, clip_value_min_, clip_value_max_; name=nothing) + desc = tf.EagerOp("ClipByValue") + t_ = convert(tf.EagerTensor, t_) + clip_value_min_ = convert(tf.EagerTensor, clip_value_min_) + clip_value_max_ = convert(tf.EagerTensor, clip_value_max_) + begin + begin + tf.add_input(desc, t_) + end + begin + tf.add_input(desc, clip_value_min_) + end + begin + tf.add_input(desc, clip_value_max_) + end + end + begin + end + begin + desc["T"] = tf.data_type(t_) + end + begin + desc["T"] = tf.data_type(clip_value_min_) + end + begin + desc["T"] = tf.data_type(clip_value_max_) + end + res = tf.execute(desc) + node = tf.TapeNode(clip_by_value, [t_, clip_value_min_, clip_value_max_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function clip_by_value(t_, clip_value_min_, clip_value_max_; name=nothing) + if tf.in_eager_mode() + clip_by_value_eager(t_, clip_value_min_, clip_value_max_; name=name) + else + clip_by_value_graph(t_, clip_value_min_, clip_value_max_; name=name) + end end - end + end end @@ -30462,51 +55212,95 @@ end """ begin - function image_summary_graph(tag_, tensor_; name=nothing, max_images=nothing, bad_color=nothing) - local desc - tf.with_op_name(name, "ImageSummary") do - desc = tf.NodeDescription("ImageSummary") - tag_ = convert(Tensor{String}, tag_) - tensor_ = convert(Tensor{Float32}, tensor_) - (tensor_,) = tf.tf_promote(tensor_) - tf.add_input(desc, tag_) - tf.add_input(desc, tensor_) - if max_images !== nothing - desc["max_images"] = Base.Int(max_images) - end - if bad_color !== nothing - desc["bad_color"] = TensorFlow.RawTensor(bad_color) + begin + function image_summary_graph(tag_, tensor_; name=nothing, max_images=nothing, bad_color=nothing) + local desc + tf.with_op_name(name, "ImageSummary") do + desc = tf.NodeDescription("ImageSummary") + begin + begin + tag_ = convert(Tensor{String}, tag_) + begin + end + end + begin + tensor_ = convert(Tensor{Float32}, tensor_) + begin + end + end + begin + (tensor_,) = tf.tf_promote(tensor_) + end + end + begin + begin + tf.add_input(desc, tag_) + end + begin + tf.add_input(desc, tensor_) + end + end + begin + begin + if max_images !== nothing + desc["max_images"] = Base.Int(max_images) + end + end + begin + if bad_color !== nothing + desc["bad_color"] = TensorFlow.RawTensor(bad_color) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function image_summary_eager(tag_, tensor_; name=nothing, max_images=nothing, bad_color=nothing) + desc = tf.EagerOp("ImageSummary") + tag_ = convert(tf.EagerTensor, tag_) + tensor_ = convert(tf.EagerTensor, tensor_) + begin + begin + tf.add_input(desc, tag_) + end + begin + tf.add_input(desc, tensor_) + end + end + begin + begin + if max_images !== nothing + desc["max_images"] = Base.Int(max_images) + end + end + begin + if bad_color !== nothing + desc["bad_color"] = TensorFlow.RawTensor(bad_color) + end + end + end + begin + desc["T"] = tf.data_type(tensor_) + end + res = tf.execute(desc) + node = tf.TapeNode(image_summary, [tag_, tensor_], name=nothing, max_images=nothing, bad_color=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function image_summary(tag_, tensor_; name=nothing, max_images=nothing, bad_color=nothing) + if tf.in_eager_mode() + image_summary_eager(tag_, tensor_; name=name, max_images=max_images, bad_color=bad_color) + else + image_summary_graph(tag_, tensor_; name=name, max_images=max_images, bad_color=bad_color) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function image_summary_eager(tag_, tensor_; name=nothing, max_images=nothing, bad_color=nothing) - desc = tf.EagerOp("ImageSummary") - tag_ = convert(tf.EagerTensor, tag_) - tensor_ = convert(tf.EagerTensor, tensor_) - tf.add_input(desc, tag_) - tf.add_input(desc, tensor_) - if max_images !== nothing - desc["max_images"] = Base.Int(max_images) - end - if bad_color !== nothing - desc["bad_color"] = TensorFlow.RawTensor(bad_color) - end - desc["T"] = tf.data_type(tensor_) - res = tf.execute(desc) - node = tf.TapeNode(image_summary, [tag_, tensor_], name=nothing, max_images=nothing, bad_color=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function image_summary(tag_, tensor_; name=nothing, max_images=nothing, bad_color=nothing) - if tf.in_eager_mode() - image_summary_eager(tag_, tensor_; name=name, max_images=max_images, bad_color=bad_color) - else - image_summary_graph(tag_, tensor_; name=name, max_images=max_images, bad_color=bad_color) - end - end end @@ -30516,58 +55310,92 @@ end Retrieve embedding parameters for a single table. """ begin - function retrieve_tpu_embedding_adadelta_parameters_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - local desc - tf.with_op_name(name, "RetrieveTPUEmbeddingAdadeltaParameters") do - desc = tf.NodeDescription("RetrieveTPUEmbeddingAdadeltaParameters") - if table_id !== nothing - desc["table_id"] = Base.Int(table_id) - end - if table_name !== nothing - desc["table_name"] = Base.String(table_name) - end - if num_shards !== nothing - desc["num_shards"] = Base.Int(num_shards) - end - if shard_id !== nothing - desc["shard_id"] = Base.Int(shard_id) + begin + function retrieve_tpu_embedding_adadelta_parameters_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + local desc + tf.with_op_name(name, "RetrieveTPUEmbeddingAdadeltaParameters") do + desc = tf.NodeDescription("RetrieveTPUEmbeddingAdadeltaParameters") + begin + end + begin + end + begin + begin + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + end + begin + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + end + begin + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + end + begin + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + end + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + end + end + begin + function retrieve_tpu_embedding_adadelta_parameters_eager(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + desc = tf.EagerOp("RetrieveTPUEmbeddingAdadeltaParameters") + begin + end + begin + begin + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + end + begin + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + end + begin + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + end + begin + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(retrieve_tpu_embedding_adadelta_parameters, [], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function retrieve_tpu_embedding_adadelta_parameters(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + if tf.in_eager_mode() + retrieve_tpu_embedding_adadelta_parameters_eager(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + else + retrieve_tpu_embedding_adadelta_parameters_graph(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + end end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:3 - push!(out, tf.Tensor(op, out_idx)) - end - out - end - function retrieve_tpu_embedding_adadelta_parameters_eager(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - desc = tf.EagerOp("RetrieveTPUEmbeddingAdadeltaParameters") - if table_id !== nothing - desc["table_id"] = Base.Int(table_id) - end - if table_name !== nothing - desc["table_name"] = Base.String(table_name) - end - if num_shards !== nothing - desc["num_shards"] = Base.Int(num_shards) - end - if shard_id !== nothing - desc["shard_id"] = Base.Int(shard_id) - end - res = tf.execute(desc) - node = tf.TapeNode(retrieve_tpu_embedding_adadelta_parameters, [], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function retrieve_tpu_embedding_adadelta_parameters(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - if tf.in_eager_mode() - retrieve_tpu_embedding_adadelta_parameters_eager(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) - else - retrieve_tpu_embedding_adadelta_parameters_graph(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) - end - end end @@ -30577,45 +55405,77 @@ end """ begin - function string_join_graph(inputs_; name=nothing, N=nothing, separator=nothing) - local desc - tf.with_op_name(name, "StringJoin") do - desc = tf.NodeDescription("StringJoin") - inputs_ = [convert(Tensor{String}, x) for x = inputs_] - tf.add_input(desc, inputs_) - if N !== nothing - desc["N"] = Base.Int(N) + begin + function string_join_graph(inputs_; name=nothing, N=nothing, separator=nothing) + local desc + tf.with_op_name(name, "StringJoin") do + desc = tf.NodeDescription("StringJoin") + begin + begin + inputs_ = [convert(Tensor{String}, x) for x = inputs_] + begin + end + end + end + begin + begin + tf.add_input(desc, inputs_) + end + end + begin + begin + if N !== nothing + desc["N"] = Base.Int(N) + end + end + begin + if separator !== nothing + desc["separator"] = Base.String(separator) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function string_join_eager(inputs_; name=nothing, N=nothing, separator=nothing) + desc = tf.EagerOp("StringJoin") + inputs_ = convert(tf.EagerTensor, inputs_) + begin + begin + tf.add_input(desc, inputs_) + end + end + begin + begin + if N !== nothing + desc["N"] = Base.Int(N) + end + end + begin + if separator !== nothing + desc["separator"] = Base.String(separator) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(string_join, [inputs_], name=nothing, N=nothing, separator=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function string_join(inputs_; name=nothing, N=nothing, separator=nothing) + if tf.in_eager_mode() + string_join_eager(inputs_; name=name, N=N, separator=separator) + else + string_join_graph(inputs_; name=name, N=N, separator=separator) + end end - if separator !== nothing - desc["separator"] = Base.String(separator) - end - end - tf.Tensor(tf.Operation(desc)) end - function string_join_eager(inputs_; name=nothing, N=nothing, separator=nothing) - desc = tf.EagerOp("StringJoin") - inputs_ = convert(tf.EagerTensor, inputs_) - tf.add_input(desc, inputs_) - if N !== nothing - desc["N"] = Base.Int(N) - end - if separator !== nothing - desc["separator"] = Base.String(separator) - end - res = tf.execute(desc) - node = tf.TapeNode(string_join, [inputs_], name=nothing, N=nothing, separator=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function string_join(inputs_; name=nothing, N=nothing, separator=nothing) - if tf.in_eager_mode() - string_join_eager(inputs_; name=name, N=N, separator=separator) - else - string_join_graph(inputs_; name=name, N=N, separator=separator) - end - end end @@ -30625,52 +55485,104 @@ end """ begin - function resource_scatter_nd_add_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) - local desc - tf.with_op_name(name, "ResourceScatterNdAdd") do - desc = tf.NodeDescription("ResourceScatterNdAdd") - ref_ = convert(Tensor{Any}, ref_) - indices_ = convert(Tensor{Any}, indices_) - indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) - updates_ = convert(Tensor{Any}, updates_) - (updates_,) = tf.tf_promote(updates_) - (indices_,) = tf.tf_promote(indices_) - tf.add_input(desc, ref_) - tf.add_input(desc, indices_) - tf.add_input(desc, updates_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) + begin + function resource_scatter_nd_add_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "ResourceScatterNdAdd") do + desc = tf.NodeDescription("ResourceScatterNdAdd") + begin + begin + ref_ = convert(Tensor{Any}, ref_) + begin + end + end + begin + indices_ = convert(Tensor{Any}, indices_) + begin + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + end + end + begin + updates_ = convert(Tensor{Any}, updates_) + begin + end + end + begin + (updates_,) = tf.tf_promote(updates_) + end + begin + (indices_,) = tf.tf_promote(indices_) + end + end + begin + begin + tf.add_input(desc, ref_) + end + begin + tf.add_input(desc, indices_) + end + begin + tf.add_input(desc, updates_) + end + end + begin + begin + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function resource_scatter_nd_add_eager(ref_, indices_, updates_; name=nothing, use_locking=nothing) + desc = tf.EagerOp("ResourceScatterNdAdd") + ref_ = convert(tf.EagerTensor, ref_) + indices_ = convert(tf.EagerTensor, indices_) + updates_ = convert(tf.EagerTensor, updates_) + begin + begin + tf.add_input(desc, ref_) + end + begin + tf.add_input(desc, indices_) + end + begin + tf.add_input(desc, updates_) + end + end + begin + begin + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + end + begin + desc["Tindices"] = tf.data_type(indices_) + end + begin + desc["T"] = tf.data_type(updates_) + end + res = tf.execute(desc) + node = tf.TapeNode(resource_scatter_nd_add, [ref_, indices_, updates_], name=nothing, use_locking=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_scatter_nd_add(ref_, indices_, updates_; name=nothing, use_locking=nothing) + if tf.in_eager_mode() + resource_scatter_nd_add_eager(ref_, indices_, updates_; name=name, use_locking=use_locking) + else + resource_scatter_nd_add_graph(ref_, indices_, updates_; name=name, use_locking=use_locking) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function resource_scatter_nd_add_eager(ref_, indices_, updates_; name=nothing, use_locking=nothing) - desc = tf.EagerOp("ResourceScatterNdAdd") - ref_ = convert(tf.EagerTensor, ref_) - indices_ = convert(tf.EagerTensor, indices_) - updates_ = convert(tf.EagerTensor, updates_) - tf.add_input(desc, ref_) - tf.add_input(desc, indices_) - tf.add_input(desc, updates_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - desc["Tindices"] = tf.data_type(indices_) - desc["T"] = tf.data_type(updates_) - res = tf.execute(desc) - node = tf.TapeNode(resource_scatter_nd_add, [ref_, indices_, updates_], name=nothing, use_locking=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_scatter_nd_add(ref_, indices_, updates_; name=nothing, use_locking=nothing) - if tf.in_eager_mode() - resource_scatter_nd_add_eager(ref_, indices_, updates_; name=name, use_locking=use_locking) - else - resource_scatter_nd_add_graph(ref_, indices_, updates_; name=name, use_locking=use_locking) - end - end end @@ -30680,43 +55592,79 @@ end """ begin - function boosted_trees_quantile_stream_resource_deserialize_graph(quantile_stream_resource_handle_, bucket_boundaries_; name=nothing, num_streams=nothing) - local desc - tf.with_op_name(name, "BoostedTreesQuantileStreamResourceDeserialize") do - desc = tf.NodeDescription("BoostedTreesQuantileStreamResourceDeserialize") - quantile_stream_resource_handle_ = convert(Tensor{Any}, quantile_stream_resource_handle_) - bucket_boundaries_ = [convert(Tensor{Float32}, x) for x = bucket_boundaries_] - tf.add_input(desc, quantile_stream_resource_handle_) - tf.add_input(desc, bucket_boundaries_) - if num_streams !== nothing - desc["num_streams"] = Base.Int(num_streams) + begin + function boosted_trees_quantile_stream_resource_deserialize_graph(quantile_stream_resource_handle_, bucket_boundaries_; name=nothing, num_streams=nothing) + local desc + tf.with_op_name(name, "BoostedTreesQuantileStreamResourceDeserialize") do + desc = tf.NodeDescription("BoostedTreesQuantileStreamResourceDeserialize") + begin + begin + quantile_stream_resource_handle_ = convert(Tensor{Any}, quantile_stream_resource_handle_) + begin + end + end + begin + bucket_boundaries_ = [convert(Tensor{Float32}, x) for x = bucket_boundaries_] + begin + end + end + end + begin + begin + tf.add_input(desc, quantile_stream_resource_handle_) + end + begin + tf.add_input(desc, bucket_boundaries_) + end + end + begin + begin + if num_streams !== nothing + desc["num_streams"] = Base.Int(num_streams) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function boosted_trees_quantile_stream_resource_deserialize_eager(quantile_stream_resource_handle_, bucket_boundaries_; name=nothing, num_streams=nothing) + desc = tf.EagerOp("BoostedTreesQuantileStreamResourceDeserialize") + quantile_stream_resource_handle_ = convert(tf.EagerTensor, quantile_stream_resource_handle_) + bucket_boundaries_ = convert(tf.EagerTensor, bucket_boundaries_) + begin + begin + tf.add_input(desc, quantile_stream_resource_handle_) + end + begin + tf.add_input(desc, bucket_boundaries_) + end + end + begin + begin + if num_streams !== nothing + desc["num_streams"] = Base.Int(num_streams) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(boosted_trees_quantile_stream_resource_deserialize, [quantile_stream_resource_handle_, bucket_boundaries_], name=nothing, num_streams=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function boosted_trees_quantile_stream_resource_deserialize(quantile_stream_resource_handle_, bucket_boundaries_; name=nothing, num_streams=nothing) + if tf.in_eager_mode() + boosted_trees_quantile_stream_resource_deserialize_eager(quantile_stream_resource_handle_, bucket_boundaries_; name=name, num_streams=num_streams) + else + boosted_trees_quantile_stream_resource_deserialize_graph(quantile_stream_resource_handle_, bucket_boundaries_; name=name, num_streams=num_streams) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function boosted_trees_quantile_stream_resource_deserialize_eager(quantile_stream_resource_handle_, bucket_boundaries_; name=nothing, num_streams=nothing) - desc = tf.EagerOp("BoostedTreesQuantileStreamResourceDeserialize") - quantile_stream_resource_handle_ = convert(tf.EagerTensor, quantile_stream_resource_handle_) - bucket_boundaries_ = convert(tf.EagerTensor, bucket_boundaries_) - tf.add_input(desc, quantile_stream_resource_handle_) - tf.add_input(desc, bucket_boundaries_) - if num_streams !== nothing - desc["num_streams"] = Base.Int(num_streams) - end - res = tf.execute(desc) - node = tf.TapeNode(boosted_trees_quantile_stream_resource_deserialize, [quantile_stream_resource_handle_, bucket_boundaries_], name=nothing, num_streams=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function boosted_trees_quantile_stream_resource_deserialize(quantile_stream_resource_handle_, bucket_boundaries_; name=nothing, num_streams=nothing) - if tf.in_eager_mode() - boosted_trees_quantile_stream_resource_deserialize_eager(quantile_stream_resource_handle_, bucket_boundaries_; name=name, num_streams=num_streams) - else - boosted_trees_quantile_stream_resource_deserialize_graph(quantile_stream_resource_handle_, bucket_boundaries_; name=name, num_streams=num_streams) - end - end end @@ -30726,40 +55674,78 @@ end """ begin - function left_shift_graph(x_, y_; name=nothing) - local desc - tf.with_op_name(name, "LeftShift") do - desc = tf.NodeDescription("LeftShift") - x_ = convert(Tensor{Any}, x_) - y_ = convert(Tensor{Any}, y_) - (x_, y_) = tf.tf_promote(x_, y_) - tf.add_input(desc, x_) - tf.add_input(desc, y_) + begin + function left_shift_graph(x_, y_; name=nothing) + local desc + tf.with_op_name(name, "LeftShift") do + desc = tf.NodeDescription("LeftShift") + begin + begin + x_ = convert(Tensor{Any}, x_) + begin + end + end + begin + y_ = convert(Tensor{Any}, y_) + begin + end + end + begin + (x_, y_) = tf.tf_promote(x_, y_) + end + end + begin + begin + tf.add_input(desc, x_) + end + begin + tf.add_input(desc, y_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function left_shift_eager(x_, y_; name=nothing) - desc = tf.EagerOp("LeftShift") - x_ = convert(tf.EagerTensor, x_) - y_ = convert(tf.EagerTensor, y_) - tf.add_input(desc, x_) - tf.add_input(desc, y_) - desc["T"] = tf.data_type(x_) - desc["T"] = tf.data_type(y_) - res = tf.execute(desc) - node = tf.TapeNode(left_shift, [x_, y_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function left_shift_eager(x_, y_; name=nothing) + desc = tf.EagerOp("LeftShift") + x_ = convert(tf.EagerTensor, x_) + y_ = convert(tf.EagerTensor, y_) + begin + begin + tf.add_input(desc, x_) + end + begin + tf.add_input(desc, y_) + end + end + begin + end + begin + desc["T"] = tf.data_type(x_) + end + begin + desc["T"] = tf.data_type(y_) + end + res = tf.execute(desc) + node = tf.TapeNode(left_shift, [x_, y_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function left_shift(x_, y_; name=nothing) - if tf.in_eager_mode() - left_shift_eager(x_, y_; name=name) - else - left_shift_graph(x_, y_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function left_shift(x_, y_; name=nothing) + if tf.in_eager_mode() + left_shift_eager(x_, y_; name=name) + else + left_shift_graph(x_, y_; name=name) + end end - end + end end @@ -30769,47 +55755,97 @@ end """ begin - function tensor_scatter_add_graph(tensor_, indices_, updates_; name=nothing) - local desc - tf.with_op_name(name, "TensorScatterAdd") do - desc = tf.NodeDescription("TensorScatterAdd") - tensor_ = convert(Tensor{Any}, tensor_) - indices_ = convert(Tensor{Any}, indices_) - indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) - updates_ = convert(Tensor{Any}, updates_) - (tensor_, updates_) = tf.tf_promote(tensor_, updates_) - (indices_,) = tf.tf_promote(indices_) - tf.add_input(desc, tensor_) - tf.add_input(desc, indices_) - tf.add_input(desc, updates_) - end - tf.Tensor(tf.Operation(desc)) - end - function tensor_scatter_add_eager(tensor_, indices_, updates_; name=nothing) - desc = tf.EagerOp("TensorScatterAdd") - tensor_ = convert(tf.EagerTensor, tensor_) - indices_ = convert(tf.EagerTensor, indices_) - updates_ = convert(tf.EagerTensor, updates_) - tf.add_input(desc, tensor_) - tf.add_input(desc, indices_) - tf.add_input(desc, updates_) - desc["T"] = tf.data_type(tensor_) - desc["Tindices"] = tf.data_type(indices_) - desc["T"] = tf.data_type(updates_) - res = tf.execute(desc) - node = tf.TapeNode(tensor_scatter_add, [tensor_, indices_, updates_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_scatter_add(tensor_, indices_, updates_; name=nothing) - if tf.in_eager_mode() - tensor_scatter_add_eager(tensor_, indices_, updates_; name=name) - else - tensor_scatter_add_graph(tensor_, indices_, updates_; name=name) + begin + function tensor_scatter_add_graph(tensor_, indices_, updates_; name=nothing) + local desc + tf.with_op_name(name, "TensorScatterAdd") do + desc = tf.NodeDescription("TensorScatterAdd") + begin + begin + tensor_ = convert(Tensor{Any}, tensor_) + begin + end + end + begin + indices_ = convert(Tensor{Any}, indices_) + begin + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + end + end + begin + updates_ = convert(Tensor{Any}, updates_) + begin + end + end + begin + (tensor_, updates_) = tf.tf_promote(tensor_, updates_) + end + begin + (indices_,) = tf.tf_promote(indices_) + end + end + begin + begin + tf.add_input(desc, tensor_) + end + begin + tf.add_input(desc, indices_) + end + begin + tf.add_input(desc, updates_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function tensor_scatter_add_eager(tensor_, indices_, updates_; name=nothing) + desc = tf.EagerOp("TensorScatterAdd") + tensor_ = convert(tf.EagerTensor, tensor_) + indices_ = convert(tf.EagerTensor, indices_) + updates_ = convert(tf.EagerTensor, updates_) + begin + begin + tf.add_input(desc, tensor_) + end + begin + tf.add_input(desc, indices_) + end + begin + tf.add_input(desc, updates_) + end + end + begin + end + begin + desc["T"] = tf.data_type(tensor_) + end + begin + desc["Tindices"] = tf.data_type(indices_) + end + begin + desc["T"] = tf.data_type(updates_) + end + res = tf.execute(desc) + node = tf.TapeNode(tensor_scatter_add, [tensor_, indices_, updates_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_scatter_add(tensor_, indices_, updates_; name=nothing) + if tf.in_eager_mode() + tensor_scatter_add_eager(tensor_, indices_, updates_; name=name) + else + tensor_scatter_add_graph(tensor_, indices_, updates_; name=name) + end end - end + end end @@ -30819,64 +55855,102 @@ end """ begin - function _var_handles_op_graph(; name=nothing, containers=nothing, shared_names=nothing, N=nothing, dtypes=nothing, shapes=nothing) - local desc - tf.with_op_name(name, "_VarHandlesOp") do - desc = tf.NodeDescription("_VarHandlesOp") - if containers !== nothing - desc["containers"] = map(Base.identity, containers) + begin + function _var_handles_op_graph(; name=nothing, containers=nothing, shared_names=nothing, N=nothing, dtypes=nothing, shapes=nothing) + local desc + tf.with_op_name(name, "_VarHandlesOp") do + desc = tf.NodeDescription("_VarHandlesOp") + begin + end + begin + end + begin + begin + if containers !== nothing + desc["containers"] = map(Base.identity, containers) + end + end + begin + if shared_names !== nothing + desc["shared_names"] = map(Base.identity, shared_names) + end + end + begin + if N !== nothing + desc["N"] = Base.Int(N) + end + end + begin + if dtypes !== nothing + desc["dtypes"] = map(Base.identity, dtypes) + end + end + begin + if shapes !== nothing + desc["shapes"] = map(Base.identity, shapes) + end + end + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:N + push!(out, tf.Tensor(op, out_idx)) + end + out + end + end + end + begin + function _var_handles_op_eager(; name=nothing, containers=nothing, shared_names=nothing, N=nothing, dtypes=nothing, shapes=nothing) + desc = tf.EagerOp("_VarHandlesOp") + begin + end + begin + begin + if containers !== nothing + desc["containers"] = map(Base.identity, containers) + end + end + begin + if shared_names !== nothing + desc["shared_names"] = map(Base.identity, shared_names) + end + end + begin + if N !== nothing + desc["N"] = Base.Int(N) + end + end + begin + if dtypes !== nothing + desc["dtypes"] = map(Base.identity, dtypes) + end + end + begin + if shapes !== nothing + desc["shapes"] = map(Base.identity, shapes) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(_var_handles_op, [], name=nothing, containers=nothing, shared_names=nothing, N=nothing, dtypes=nothing, shapes=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _var_handles_op(; name=nothing, containers=nothing, shared_names=nothing, N=nothing, dtypes=nothing, shapes=nothing) + if tf.in_eager_mode() + _var_handles_op_eager(; name=name, containers=containers, shared_names=shared_names, N=N, dtypes=dtypes, shapes=shapes) + else + _var_handles_op_graph(; name=name, containers=containers, shared_names=shared_names, N=N, dtypes=dtypes, shapes=shapes) + end end - if shared_names !== nothing - desc["shared_names"] = map(Base.identity, shared_names) - end - if N !== nothing - desc["N"] = Base.Int(N) - end - if dtypes !== nothing - desc["dtypes"] = map(Base.identity, dtypes) - end - if shapes !== nothing - desc["shapes"] = map(Base.identity, shapes) - end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:N - push!(out, tf.Tensor(op, out_idx)) - end - out - end - function _var_handles_op_eager(; name=nothing, containers=nothing, shared_names=nothing, N=nothing, dtypes=nothing, shapes=nothing) - desc = tf.EagerOp("_VarHandlesOp") - if containers !== nothing - desc["containers"] = map(Base.identity, containers) - end - if shared_names !== nothing - desc["shared_names"] = map(Base.identity, shared_names) - end - if N !== nothing - desc["N"] = Base.Int(N) - end - if dtypes !== nothing - desc["dtypes"] = map(Base.identity, dtypes) - end - if shapes !== nothing - desc["shapes"] = map(Base.identity, shapes) - end - res = tf.execute(desc) - node = tf.TapeNode(_var_handles_op, [], name=nothing, containers=nothing, shared_names=nothing, N=nothing, dtypes=nothing, shapes=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _var_handles_op(; name=nothing, containers=nothing, shared_names=nothing, N=nothing, dtypes=nothing, shapes=nothing) - if tf.in_eager_mode() - _var_handles_op_eager(; name=name, containers=containers, shared_names=shared_names, N=N, dtypes=dtypes, shapes=shapes) - else - _var_handles_op_graph(; name=name, containers=containers, shared_names=shared_names, N=N, dtypes=dtypes, shapes=shapes) - end - end end @@ -30886,35 +55960,63 @@ end """ begin - function ifft3d_graph(input_; name=nothing) - local desc - tf.with_op_name(name, "IFFT3D") do - desc = tf.NodeDescription("IFFT3D") - input_ = convert(Tensor{Complex{Float32}}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) + begin + function ifft3d_graph(input_; name=nothing) + local desc + tf.with_op_name(name, "IFFT3D") do + desc = tf.NodeDescription("IFFT3D") + begin + begin + input_ = convert(Tensor{Complex{Float32}}, input_) + begin + end + end + begin + (input_,) = tf.tf_promote(input_) + end + end + begin + begin + tf.add_input(desc, input_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function ifft3d_eager(input_; name=nothing) - desc = tf.EagerOp("IFFT3D") - input_ = convert(tf.EagerTensor, input_) - tf.add_input(desc, input_) - desc["Tcomplex"] = tf.data_type(input_) - res = tf.execute(desc) - node = tf.TapeNode(ifft3d, [input_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function ifft3d_eager(input_; name=nothing) + desc = tf.EagerOp("IFFT3D") + input_ = convert(tf.EagerTensor, input_) + begin + begin + tf.add_input(desc, input_) + end + end + begin + end + begin + desc["Tcomplex"] = tf.data_type(input_) + end + res = tf.execute(desc) + node = tf.TapeNode(ifft3d, [input_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function ifft3d(input_; name=nothing) - if tf.in_eager_mode() - ifft3d_eager(input_; name=name) - else - ifft3d_graph(input_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function ifft3d(input_; name=nothing) + if tf.in_eager_mode() + ifft3d_eager(input_; name=name) + else + ifft3d_graph(input_; name=name) + end end - end + end end @@ -30924,45 +56026,85 @@ end """ begin - function ref_select_graph(index_, inputs_; name=nothing, N=nothing) - local desc - tf.with_op_name(name, "RefSelect") do - desc = tf.NodeDescription("RefSelect") - index_ = convert(Tensor{Int32}, index_) - inputs_ = [convert(Tensor{Any}, x) for x = inputs_] - (inputs_,) = tf.tf_promote(inputs_) - tf.add_input(desc, index_) - tf.add_input(desc, inputs_) - if N !== nothing - desc["N"] = Base.Int(N) + begin + function ref_select_graph(index_, inputs_; name=nothing, N=nothing) + local desc + tf.with_op_name(name, "RefSelect") do + desc = tf.NodeDescription("RefSelect") + begin + begin + index_ = convert(Tensor{Int32}, index_) + begin + end + end + begin + inputs_ = [convert(Tensor{Any}, x) for x = inputs_] + begin + end + end + begin + (inputs_,) = tf.tf_promote(inputs_) + end + end + begin + begin + tf.add_input(desc, index_) + end + begin + tf.add_input(desc, inputs_) + end + end + begin + begin + if N !== nothing + desc["N"] = Base.Int(N) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function ref_select_eager(index_, inputs_; name=nothing, N=nothing) + desc = tf.EagerOp("RefSelect") + index_ = convert(tf.EagerTensor, index_) + inputs_ = convert(tf.EagerTensor, inputs_) + begin + begin + tf.add_input(desc, index_) + end + begin + tf.add_input(desc, inputs_) + end + end + begin + begin + if N !== nothing + desc["N"] = Base.Int(N) + end + end + end + begin + desc["T"] = tf.data_type(inputs_) + end + res = tf.execute(desc) + node = tf.TapeNode(ref_select, [index_, inputs_], name=nothing, N=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function ref_select(index_, inputs_; name=nothing, N=nothing) + if tf.in_eager_mode() + ref_select_eager(index_, inputs_; name=name, N=N) + else + ref_select_graph(index_, inputs_; name=name, N=N) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function ref_select_eager(index_, inputs_; name=nothing, N=nothing) - desc = tf.EagerOp("RefSelect") - index_ = convert(tf.EagerTensor, index_) - inputs_ = convert(tf.EagerTensor, inputs_) - tf.add_input(desc, index_) - tf.add_input(desc, inputs_) - if N !== nothing - desc["N"] = Base.Int(N) - end - desc["T"] = tf.data_type(inputs_) - res = tf.execute(desc) - node = tf.TapeNode(ref_select, [index_, inputs_], name=nothing, N=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function ref_select(index_, inputs_; name=nothing, N=nothing) - if tf.in_eager_mode() - ref_select_eager(index_, inputs_; name=name, N=N) - else - ref_select_graph(index_, inputs_; name=name, N=N) - end - end end @@ -30972,43 +56114,87 @@ end """ begin - function sparse_tensor_slice_dataset_graph(indices_, values_, dense_shape_; name=nothing) - local desc - tf.with_op_name(name, "SparseTensorSliceDataset") do - desc = tf.NodeDescription("SparseTensorSliceDataset") - indices_ = convert(Tensor{Int64}, indices_) - values_ = convert(Tensor{Any}, values_) - dense_shape_ = convert(Tensor{Int64}, dense_shape_) - (values_,) = tf.tf_promote(values_) - tf.add_input(desc, indices_) - tf.add_input(desc, values_) - tf.add_input(desc, dense_shape_) - end - tf.Tensor(tf.Operation(desc)) - end - function sparse_tensor_slice_dataset_eager(indices_, values_, dense_shape_; name=nothing) - desc = tf.EagerOp("SparseTensorSliceDataset") - indices_ = convert(tf.EagerTensor, indices_) - values_ = convert(tf.EagerTensor, values_) - dense_shape_ = convert(tf.EagerTensor, dense_shape_) - tf.add_input(desc, indices_) - tf.add_input(desc, values_) - tf.add_input(desc, dense_shape_) - desc["Tvalues"] = tf.data_type(values_) - res = tf.execute(desc) - node = tf.TapeNode(sparse_tensor_slice_dataset, [indices_, values_, dense_shape_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_tensor_slice_dataset(indices_, values_, dense_shape_; name=nothing) - if tf.in_eager_mode() - sparse_tensor_slice_dataset_eager(indices_, values_, dense_shape_; name=name) - else - sparse_tensor_slice_dataset_graph(indices_, values_, dense_shape_; name=name) + begin + function sparse_tensor_slice_dataset_graph(indices_, values_, dense_shape_; name=nothing) + local desc + tf.with_op_name(name, "SparseTensorSliceDataset") do + desc = tf.NodeDescription("SparseTensorSliceDataset") + begin + begin + indices_ = convert(Tensor{Int64}, indices_) + begin + end + end + begin + values_ = convert(Tensor{Any}, values_) + begin + end + end + begin + dense_shape_ = convert(Tensor{Int64}, dense_shape_) + begin + end + end + begin + (values_,) = tf.tf_promote(values_) + end + end + begin + begin + tf.add_input(desc, indices_) + end + begin + tf.add_input(desc, values_) + end + begin + tf.add_input(desc, dense_shape_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function sparse_tensor_slice_dataset_eager(indices_, values_, dense_shape_; name=nothing) + desc = tf.EagerOp("SparseTensorSliceDataset") + indices_ = convert(tf.EagerTensor, indices_) + values_ = convert(tf.EagerTensor, values_) + dense_shape_ = convert(tf.EagerTensor, dense_shape_) + begin + begin + tf.add_input(desc, indices_) + end + begin + tf.add_input(desc, values_) + end + begin + tf.add_input(desc, dense_shape_) + end + end + begin + end + begin + desc["Tvalues"] = tf.data_type(values_) + end + res = tf.execute(desc) + node = tf.TapeNode(sparse_tensor_slice_dataset, [indices_, values_, dense_shape_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_tensor_slice_dataset(indices_, values_, dense_shape_; name=nothing) + if tf.in_eager_mode() + sparse_tensor_slice_dataset_eager(indices_, values_, dense_shape_; name=name) + else + sparse_tensor_slice_dataset_graph(indices_, values_, dense_shape_; name=name) + end end - end + end end @@ -31018,58 +56204,92 @@ end Retrieve embedding parameters for a single table. """ begin - function retrieve_tpu_embedding_ftrl_parameters_grad_accum_debug_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - local desc - tf.with_op_name(name, "RetrieveTPUEmbeddingFTRLParametersGradAccumDebug") do - desc = tf.NodeDescription("RetrieveTPUEmbeddingFTRLParametersGradAccumDebug") - if table_id !== nothing - desc["table_id"] = Base.Int(table_id) - end - if table_name !== nothing - desc["table_name"] = Base.String(table_name) - end - if num_shards !== nothing - desc["num_shards"] = Base.Int(num_shards) + begin + function retrieve_tpu_embedding_ftrl_parameters_grad_accum_debug_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + local desc + tf.with_op_name(name, "RetrieveTPUEmbeddingFTRLParametersGradAccumDebug") do + desc = tf.NodeDescription("RetrieveTPUEmbeddingFTRLParametersGradAccumDebug") + begin + end + begin + end + begin + begin + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + end + begin + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + end + begin + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + end + begin + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + end + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:4 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + end + end + begin + function retrieve_tpu_embedding_ftrl_parameters_grad_accum_debug_eager(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + desc = tf.EagerOp("RetrieveTPUEmbeddingFTRLParametersGradAccumDebug") + begin + end + begin + begin + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + end + begin + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + end + begin + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + end + begin + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(retrieve_tpu_embedding_ftrl_parameters_grad_accum_debug, [], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function retrieve_tpu_embedding_ftrl_parameters_grad_accum_debug(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + if tf.in_eager_mode() + retrieve_tpu_embedding_ftrl_parameters_grad_accum_debug_eager(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + else + retrieve_tpu_embedding_ftrl_parameters_grad_accum_debug_graph(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + end end - if shard_id !== nothing - desc["shard_id"] = Base.Int(shard_id) - end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:4 - push!(out, tf.Tensor(op, out_idx)) - end - out - end - function retrieve_tpu_embedding_ftrl_parameters_grad_accum_debug_eager(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - desc = tf.EagerOp("RetrieveTPUEmbeddingFTRLParametersGradAccumDebug") - if table_id !== nothing - desc["table_id"] = Base.Int(table_id) - end - if table_name !== nothing - desc["table_name"] = Base.String(table_name) - end - if num_shards !== nothing - desc["num_shards"] = Base.Int(num_shards) - end - if shard_id !== nothing - desc["shard_id"] = Base.Int(shard_id) - end - res = tf.execute(desc) - node = tf.TapeNode(retrieve_tpu_embedding_ftrl_parameters_grad_accum_debug, [], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function retrieve_tpu_embedding_ftrl_parameters_grad_accum_debug(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - if tf.in_eager_mode() - retrieve_tpu_embedding_ftrl_parameters_grad_accum_debug_eager(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) - else - retrieve_tpu_embedding_ftrl_parameters_grad_accum_debug_graph(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) - end - end end @@ -31079,33 +56299,57 @@ end """ begin - function batch_ifft2d_graph(input_; name=nothing) - local desc - tf.with_op_name(name, "BatchIFFT2D") do - desc = tf.NodeDescription("BatchIFFT2D") - input_ = convert(Tensor{Complex{Float32}}, input_) - tf.add_input(desc, input_) + begin + function batch_ifft2d_graph(input_; name=nothing) + local desc + tf.with_op_name(name, "BatchIFFT2D") do + desc = tf.NodeDescription("BatchIFFT2D") + begin + begin + input_ = convert(Tensor{Complex{Float32}}, input_) + begin + end + end + end + begin + begin + tf.add_input(desc, input_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function batch_ifft2d_eager(input_; name=nothing) - desc = tf.EagerOp("BatchIFFT2D") - input_ = convert(tf.EagerTensor, input_) - tf.add_input(desc, input_) - res = tf.execute(desc) - node = tf.TapeNode(batch_ifft2d, [input_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function batch_ifft2d_eager(input_; name=nothing) + desc = tf.EagerOp("BatchIFFT2D") + input_ = convert(tf.EagerTensor, input_) + begin + begin + tf.add_input(desc, input_) + end + end + begin + end + res = tf.execute(desc) + node = tf.TapeNode(batch_ifft2d, [input_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function batch_ifft2d(input_; name=nothing) - if tf.in_eager_mode() - batch_ifft2d_eager(input_; name=name) - else - batch_ifft2d_graph(input_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function batch_ifft2d(input_; name=nothing) + if tf.in_eager_mode() + batch_ifft2d_eager(input_; name=name) + else + batch_ifft2d_graph(input_; name=name) + end end - end + end end @@ -31115,53 +56359,101 @@ end """ begin - function tensor_array_gather_graph(handle_, indices_, flow_in_; name=nothing, dtype=nothing, element_shape=nothing) - local desc - tf.with_op_name(name, "TensorArrayGather") do - desc = tf.NodeDescription("TensorArrayGather") - handle_ = convert(Tensor{String}, handle_) - indices_ = convert(Tensor{Int32}, indices_) - flow_in_ = convert(Tensor{Float32}, flow_in_) - tf.add_input(desc, handle_) - tf.add_input(desc, indices_) - tf.add_input(desc, flow_in_) - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end - if element_shape !== nothing - desc["element_shape"] = Base.identity(element_shape) + begin + function tensor_array_gather_graph(handle_, indices_, flow_in_; name=nothing, dtype=nothing, element_shape=nothing) + local desc + tf.with_op_name(name, "TensorArrayGather") do + desc = tf.NodeDescription("TensorArrayGather") + begin + begin + handle_ = convert(Tensor{String}, handle_) + begin + end + end + begin + indices_ = convert(Tensor{Int32}, indices_) + begin + end + end + begin + flow_in_ = convert(Tensor{Float32}, flow_in_) + begin + end + end + end + begin + begin + tf.add_input(desc, handle_) + end + begin + tf.add_input(desc, indices_) + end + begin + tf.add_input(desc, flow_in_) + end + end + begin + begin + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + begin + if element_shape !== nothing + desc["element_shape"] = Base.identity(element_shape) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function tensor_array_gather_eager(handle_, indices_, flow_in_; name=nothing, dtype=nothing, element_shape=nothing) + desc = tf.EagerOp("TensorArrayGather") + handle_ = convert(tf.EagerTensor, handle_) + indices_ = convert(tf.EagerTensor, indices_) + flow_in_ = convert(tf.EagerTensor, flow_in_) + begin + begin + tf.add_input(desc, handle_) + end + begin + tf.add_input(desc, indices_) + end + begin + tf.add_input(desc, flow_in_) + end + end + begin + begin + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + begin + if element_shape !== nothing + desc["element_shape"] = Base.identity(element_shape) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(tensor_array_gather, [handle_, indices_, flow_in_], name=nothing, dtype=nothing, element_shape=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_array_gather(handle_, indices_, flow_in_; name=nothing, dtype=nothing, element_shape=nothing) + if tf.in_eager_mode() + tensor_array_gather_eager(handle_, indices_, flow_in_; name=name, dtype=dtype, element_shape=element_shape) + else + tensor_array_gather_graph(handle_, indices_, flow_in_; name=name, dtype=dtype, element_shape=element_shape) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function tensor_array_gather_eager(handle_, indices_, flow_in_; name=nothing, dtype=nothing, element_shape=nothing) - desc = tf.EagerOp("TensorArrayGather") - handle_ = convert(tf.EagerTensor, handle_) - indices_ = convert(tf.EagerTensor, indices_) - flow_in_ = convert(tf.EagerTensor, flow_in_) - tf.add_input(desc, handle_) - tf.add_input(desc, indices_) - tf.add_input(desc, flow_in_) - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end - if element_shape !== nothing - desc["element_shape"] = Base.identity(element_shape) - end - res = tf.execute(desc) - node = tf.TapeNode(tensor_array_gather, [handle_, indices_, flow_in_], name=nothing, dtype=nothing, element_shape=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_array_gather(handle_, indices_, flow_in_; name=nothing, dtype=nothing, element_shape=nothing) - if tf.in_eager_mode() - tensor_array_gather_eager(handle_, indices_, flow_in_; name=name, dtype=dtype, element_shape=element_shape) - else - tensor_array_gather_graph(handle_, indices_, flow_in_; name=name, dtype=dtype, element_shape=element_shape) - end - end end @@ -31171,52 +56463,112 @@ end """ begin - function sparse_segment_mean_with_num_segments_graph(data_, indices_, segment_ids_, num_segments_; name=nothing) - local desc - tf.with_op_name(name, "SparseSegmentMeanWithNumSegments") do - desc = tf.NodeDescription("SparseSegmentMeanWithNumSegments") - data_ = convert(Tensor{Any}, data_) - indices_ = convert(Tensor{Int32}, indices_) - indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) - segment_ids_ = convert(Tensor{Int32}, segment_ids_) - num_segments_ = convert(Tensor{Int32}, num_segments_) - (num_segments_,) = tf.tf_promote(num_segments_) - (data_,) = tf.tf_promote(data_) - (indices_,) = tf.tf_promote(indices_) - tf.add_input(desc, data_) - tf.add_input(desc, indices_) - tf.add_input(desc, segment_ids_) - tf.add_input(desc, num_segments_) - end - tf.Tensor(tf.Operation(desc)) - end - function sparse_segment_mean_with_num_segments_eager(data_, indices_, segment_ids_, num_segments_; name=nothing) - desc = tf.EagerOp("SparseSegmentMeanWithNumSegments") - data_ = convert(tf.EagerTensor, data_) - indices_ = convert(tf.EagerTensor, indices_) - segment_ids_ = convert(tf.EagerTensor, segment_ids_) - num_segments_ = convert(tf.EagerTensor, num_segments_) - tf.add_input(desc, data_) - tf.add_input(desc, indices_) - tf.add_input(desc, segment_ids_) - tf.add_input(desc, num_segments_) - desc["T"] = tf.data_type(data_) - desc["Tidx"] = tf.data_type(indices_) - desc["Tnumsegments"] = tf.data_type(num_segments_) - res = tf.execute(desc) - node = tf.TapeNode(sparse_segment_mean_with_num_segments, [data_, indices_, segment_ids_, num_segments_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_segment_mean_with_num_segments(data_, indices_, segment_ids_, num_segments_; name=nothing) - if tf.in_eager_mode() - sparse_segment_mean_with_num_segments_eager(data_, indices_, segment_ids_, num_segments_; name=name) - else - sparse_segment_mean_with_num_segments_graph(data_, indices_, segment_ids_, num_segments_; name=name) + begin + function sparse_segment_mean_with_num_segments_graph(data_, indices_, segment_ids_, num_segments_; name=nothing) + local desc + tf.with_op_name(name, "SparseSegmentMeanWithNumSegments") do + desc = tf.NodeDescription("SparseSegmentMeanWithNumSegments") + begin + begin + data_ = convert(Tensor{Any}, data_) + begin + end + end + begin + indices_ = convert(Tensor{Int32}, indices_) + begin + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + end + end + begin + segment_ids_ = convert(Tensor{Int32}, segment_ids_) + begin + end + end + begin + num_segments_ = convert(Tensor{Int32}, num_segments_) + begin + end + end + begin + (num_segments_,) = tf.tf_promote(num_segments_) + end + begin + (data_,) = tf.tf_promote(data_) + end + begin + (indices_,) = tf.tf_promote(indices_) + end + end + begin + begin + tf.add_input(desc, data_) + end + begin + tf.add_input(desc, indices_) + end + begin + tf.add_input(desc, segment_ids_) + end + begin + tf.add_input(desc, num_segments_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function sparse_segment_mean_with_num_segments_eager(data_, indices_, segment_ids_, num_segments_; name=nothing) + desc = tf.EagerOp("SparseSegmentMeanWithNumSegments") + data_ = convert(tf.EagerTensor, data_) + indices_ = convert(tf.EagerTensor, indices_) + segment_ids_ = convert(tf.EagerTensor, segment_ids_) + num_segments_ = convert(tf.EagerTensor, num_segments_) + begin + begin + tf.add_input(desc, data_) + end + begin + tf.add_input(desc, indices_) + end + begin + tf.add_input(desc, segment_ids_) + end + begin + tf.add_input(desc, num_segments_) + end + end + begin + end + begin + desc["T"] = tf.data_type(data_) + end + begin + desc["Tidx"] = tf.data_type(indices_) + end + begin + desc["Tnumsegments"] = tf.data_type(num_segments_) + end + res = tf.execute(desc) + node = tf.TapeNode(sparse_segment_mean_with_num_segments, [data_, indices_, segment_ids_, num_segments_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_segment_mean_with_num_segments(data_, indices_, segment_ids_, num_segments_; name=nothing) + if tf.in_eager_mode() + sparse_segment_mean_with_num_segments_eager(data_, indices_, segment_ids_, num_segments_; name=name) + else + sparse_segment_mean_with_num_segments_graph(data_, indices_, segment_ids_, num_segments_; name=name) + end end - end + end end @@ -31226,41 +56578,73 @@ end """ begin - function ensure_shape_graph(input_; name=nothing, shape=nothing) - local desc - tf.with_op_name(name, "EnsureShape") do - desc = tf.NodeDescription("EnsureShape") - input_ = convert(Tensor{Any}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - if shape !== nothing - desc["shape"] = Base.identity(shape) + begin + function ensure_shape_graph(input_; name=nothing, shape=nothing) + local desc + tf.with_op_name(name, "EnsureShape") do + desc = tf.NodeDescription("EnsureShape") + begin + begin + input_ = convert(Tensor{Any}, input_) + begin + end + end + begin + (input_,) = tf.tf_promote(input_) + end + end + begin + begin + tf.add_input(desc, input_) + end + end + begin + begin + if shape !== nothing + desc["shape"] = Base.identity(shape) + end + end + end end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function ensure_shape_eager(input_; name=nothing, shape=nothing) - desc = tf.EagerOp("EnsureShape") - input_ = convert(tf.EagerTensor, input_) - tf.add_input(desc, input_) - if shape !== nothing - desc["shape"] = Base.identity(shape) - end - desc["T"] = tf.data_type(input_) - res = tf.execute(desc) - node = tf.TapeNode(ensure_shape, [input_], name=nothing, shape=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function ensure_shape_eager(input_; name=nothing, shape=nothing) + desc = tf.EagerOp("EnsureShape") + input_ = convert(tf.EagerTensor, input_) + begin + begin + tf.add_input(desc, input_) + end + end + begin + begin + if shape !== nothing + desc["shape"] = Base.identity(shape) + end + end + end + begin + desc["T"] = tf.data_type(input_) + end + res = tf.execute(desc) + node = tf.TapeNode(ensure_shape, [input_], name=nothing, shape=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function ensure_shape(input_; name=nothing, shape=nothing) - if tf.in_eager_mode() - ensure_shape_eager(input_; name=name, shape=shape) - else - ensure_shape_graph(input_; name=name, shape=shape) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function ensure_shape(input_; name=nothing, shape=nothing) + if tf.in_eager_mode() + ensure_shape_eager(input_; name=name, shape=shape) + else + ensure_shape_graph(input_; name=name, shape=shape) + end end - end + end end @@ -31270,61 +56654,133 @@ end """ begin - function apply_proximal_gradient_descent_graph(var_, alpha_, l1_, l2_, delta_; name=nothing, use_locking=nothing) - local desc - tf.with_op_name(name, "ApplyProximalGradientDescent") do - desc = tf.NodeDescription("ApplyProximalGradientDescent") - var_ = convert(Tensor{Any}, var_) - alpha_ = convert(Tensor{Any}, alpha_) - l1_ = convert(Tensor{Any}, l1_) - l2_ = convert(Tensor{Any}, l2_) - delta_ = convert(Tensor{Any}, delta_) - (var_, alpha_, l1_, l2_, delta_) = tf.tf_promote(var_, alpha_, l1_, l2_, delta_) - tf.add_input(desc, var_) - tf.add_input(desc, alpha_) - tf.add_input(desc, l1_) - tf.add_input(desc, l2_) - tf.add_input(desc, delta_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) + begin + function apply_proximal_gradient_descent_graph(var_, alpha_, l1_, l2_, delta_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "ApplyProximalGradientDescent") do + desc = tf.NodeDescription("ApplyProximalGradientDescent") + begin + begin + var_ = convert(Tensor{Any}, var_) + begin + end + end + begin + alpha_ = convert(Tensor{Any}, alpha_) + begin + end + end + begin + l1_ = convert(Tensor{Any}, l1_) + begin + end + end + begin + l2_ = convert(Tensor{Any}, l2_) + begin + end + end + begin + delta_ = convert(Tensor{Any}, delta_) + begin + end + end + begin + (var_, alpha_, l1_, l2_, delta_) = tf.tf_promote(var_, alpha_, l1_, l2_, delta_) + end + end + begin + begin + tf.add_input(desc, var_) + end + begin + tf.add_input(desc, alpha_) + end + begin + tf.add_input(desc, l1_) + end + begin + tf.add_input(desc, l2_) + end + begin + tf.add_input(desc, delta_) + end + end + begin + begin + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function apply_proximal_gradient_descent_eager(var_, alpha_, l1_, l2_, delta_; name=nothing, use_locking=nothing) + desc = tf.EagerOp("ApplyProximalGradientDescent") + var_ = convert(tf.EagerTensor, var_) + alpha_ = convert(tf.EagerTensor, alpha_) + l1_ = convert(tf.EagerTensor, l1_) + l2_ = convert(tf.EagerTensor, l2_) + delta_ = convert(tf.EagerTensor, delta_) + begin + begin + tf.add_input(desc, var_) + end + begin + tf.add_input(desc, alpha_) + end + begin + tf.add_input(desc, l1_) + end + begin + tf.add_input(desc, l2_) + end + begin + tf.add_input(desc, delta_) + end + end + begin + begin + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + end + begin + desc["T"] = tf.data_type(var_) + end + begin + desc["T"] = tf.data_type(alpha_) + end + begin + desc["T"] = tf.data_type(l1_) + end + begin + desc["T"] = tf.data_type(l2_) + end + begin + desc["T"] = tf.data_type(delta_) + end + res = tf.execute(desc) + node = tf.TapeNode(apply_proximal_gradient_descent, [var_, alpha_, l1_, l2_, delta_], name=nothing, use_locking=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function apply_proximal_gradient_descent(var_, alpha_, l1_, l2_, delta_; name=nothing, use_locking=nothing) + if tf.in_eager_mode() + apply_proximal_gradient_descent_eager(var_, alpha_, l1_, l2_, delta_; name=name, use_locking=use_locking) + else + apply_proximal_gradient_descent_graph(var_, alpha_, l1_, l2_, delta_; name=name, use_locking=use_locking) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function apply_proximal_gradient_descent_eager(var_, alpha_, l1_, l2_, delta_; name=nothing, use_locking=nothing) - desc = tf.EagerOp("ApplyProximalGradientDescent") - var_ = convert(tf.EagerTensor, var_) - alpha_ = convert(tf.EagerTensor, alpha_) - l1_ = convert(tf.EagerTensor, l1_) - l2_ = convert(tf.EagerTensor, l2_) - delta_ = convert(tf.EagerTensor, delta_) - tf.add_input(desc, var_) - tf.add_input(desc, alpha_) - tf.add_input(desc, l1_) - tf.add_input(desc, l2_) - tf.add_input(desc, delta_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - desc["T"] = tf.data_type(var_) - desc["T"] = tf.data_type(alpha_) - desc["T"] = tf.data_type(l1_) - desc["T"] = tf.data_type(l2_) - desc["T"] = tf.data_type(delta_) - res = tf.execute(desc) - node = tf.TapeNode(apply_proximal_gradient_descent, [var_, alpha_, l1_, l2_, delta_], name=nothing, use_locking=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function apply_proximal_gradient_descent(var_, alpha_, l1_, l2_, delta_; name=nothing, use_locking=nothing) - if tf.in_eager_mode() - apply_proximal_gradient_descent_eager(var_, alpha_, l1_, l2_, delta_; name=name, use_locking=use_locking) - else - apply_proximal_gradient_descent_graph(var_, alpha_, l1_, l2_, delta_; name=name, use_locking=use_locking) - end - end end @@ -31334,71 +56790,123 @@ end """ begin - function collective_reduce_graph(input_; name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, merge_op=nothing, final_op=nothing, subdiv_offsets=nothing) - local desc - tf.with_op_name(name, "CollectiveReduce") do - desc = tf.NodeDescription("CollectiveReduce") - input_ = convert(Tensor{Any}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - if group_size !== nothing - desc["group_size"] = Base.Int(group_size) + begin + function collective_reduce_graph(input_; name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, merge_op=nothing, final_op=nothing, subdiv_offsets=nothing) + local desc + tf.with_op_name(name, "CollectiveReduce") do + desc = tf.NodeDescription("CollectiveReduce") + begin + begin + input_ = convert(Tensor{Any}, input_) + begin + end + end + begin + (input_,) = tf.tf_promote(input_) + end + end + begin + begin + tf.add_input(desc, input_) + end + end + begin + begin + if group_size !== nothing + desc["group_size"] = Base.Int(group_size) + end + end + begin + if group_key !== nothing + desc["group_key"] = Base.Int(group_key) + end + end + begin + if instance_key !== nothing + desc["instance_key"] = Base.Int(instance_key) + end + end + begin + if merge_op !== nothing + desc["merge_op"] = Base.String(merge_op) + end + end + begin + if final_op !== nothing + desc["final_op"] = Base.String(final_op) + end + end + begin + if subdiv_offsets !== nothing + desc["subdiv_offsets"] = map(Base.identity, subdiv_offsets) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function collective_reduce_eager(input_; name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, merge_op=nothing, final_op=nothing, subdiv_offsets=nothing) + desc = tf.EagerOp("CollectiveReduce") + input_ = convert(tf.EagerTensor, input_) + begin + begin + tf.add_input(desc, input_) + end + end + begin + begin + if group_size !== nothing + desc["group_size"] = Base.Int(group_size) + end + end + begin + if group_key !== nothing + desc["group_key"] = Base.Int(group_key) + end + end + begin + if instance_key !== nothing + desc["instance_key"] = Base.Int(instance_key) + end + end + begin + if merge_op !== nothing + desc["merge_op"] = Base.String(merge_op) + end + end + begin + if final_op !== nothing + desc["final_op"] = Base.String(final_op) + end + end + begin + if subdiv_offsets !== nothing + desc["subdiv_offsets"] = map(Base.identity, subdiv_offsets) + end + end + end + begin + desc["T"] = tf.data_type(input_) + end + res = tf.execute(desc) + node = tf.TapeNode(collective_reduce, [input_], name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, merge_op=nothing, final_op=nothing, subdiv_offsets=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function collective_reduce(input_; name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, merge_op=nothing, final_op=nothing, subdiv_offsets=nothing) + if tf.in_eager_mode() + collective_reduce_eager(input_; name=name, group_size=group_size, group_key=group_key, instance_key=instance_key, merge_op=merge_op, final_op=final_op, subdiv_offsets=subdiv_offsets) + else + collective_reduce_graph(input_; name=name, group_size=group_size, group_key=group_key, instance_key=instance_key, merge_op=merge_op, final_op=final_op, subdiv_offsets=subdiv_offsets) + end end - if group_key !== nothing - desc["group_key"] = Base.Int(group_key) - end - if instance_key !== nothing - desc["instance_key"] = Base.Int(instance_key) - end - if merge_op !== nothing - desc["merge_op"] = Base.String(merge_op) - end - if final_op !== nothing - desc["final_op"] = Base.String(final_op) - end - if subdiv_offsets !== nothing - desc["subdiv_offsets"] = map(Base.identity, subdiv_offsets) - end - end - tf.Tensor(tf.Operation(desc)) - end - function collective_reduce_eager(input_; name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, merge_op=nothing, final_op=nothing, subdiv_offsets=nothing) - desc = tf.EagerOp("CollectiveReduce") - input_ = convert(tf.EagerTensor, input_) - tf.add_input(desc, input_) - if group_size !== nothing - desc["group_size"] = Base.Int(group_size) - end - if group_key !== nothing - desc["group_key"] = Base.Int(group_key) - end - if instance_key !== nothing - desc["instance_key"] = Base.Int(instance_key) - end - if merge_op !== nothing - desc["merge_op"] = Base.String(merge_op) - end - if final_op !== nothing - desc["final_op"] = Base.String(final_op) - end - if subdiv_offsets !== nothing - desc["subdiv_offsets"] = map(Base.identity, subdiv_offsets) - end - desc["T"] = tf.data_type(input_) - res = tf.execute(desc) - node = tf.TapeNode(collective_reduce, [input_], name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, merge_op=nothing, final_op=nothing, subdiv_offsets=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function collective_reduce(input_; name=nothing, group_size=nothing, group_key=nothing, instance_key=nothing, merge_op=nothing, final_op=nothing, subdiv_offsets=nothing) - if tf.in_eager_mode() - collective_reduce_eager(input_; name=name, group_size=group_size, group_key=group_key, instance_key=instance_key, merge_op=merge_op, final_op=final_op, subdiv_offsets=subdiv_offsets) - else - collective_reduce_graph(input_; name=name, group_size=group_size, group_key=group_key, instance_key=instance_key, merge_op=merge_op, final_op=final_op, subdiv_offsets=subdiv_offsets) - end - end end @@ -31408,35 +56916,63 @@ end """ begin - function is_nan_graph(x_; name=nothing) - local desc - tf.with_op_name(name, "IsNan") do - desc = tf.NodeDescription("IsNan") - x_ = convert(Tensor{Any}, x_) - (x_,) = tf.tf_promote(x_) - tf.add_input(desc, x_) + begin + function is_nan_graph(x_; name=nothing) + local desc + tf.with_op_name(name, "IsNan") do + desc = tf.NodeDescription("IsNan") + begin + begin + x_ = convert(Tensor{Any}, x_) + begin + end + end + begin + (x_,) = tf.tf_promote(x_) + end + end + begin + begin + tf.add_input(desc, x_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function is_nan_eager(x_; name=nothing) - desc = tf.EagerOp("IsNan") - x_ = convert(tf.EagerTensor, x_) - tf.add_input(desc, x_) - desc["T"] = tf.data_type(x_) - res = tf.execute(desc) - node = tf.TapeNode(is_nan, [x_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function is_nan_eager(x_; name=nothing) + desc = tf.EagerOp("IsNan") + x_ = convert(tf.EagerTensor, x_) + begin + begin + tf.add_input(desc, x_) + end + end + begin + end + begin + desc["T"] = tf.data_type(x_) + end + res = tf.execute(desc) + node = tf.TapeNode(is_nan, [x_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function is_nan(x_; name=nothing) - if tf.in_eager_mode() - is_nan_eager(x_; name=name) - else - is_nan_graph(x_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function is_nan(x_; name=nothing) + if tf.in_eager_mode() + is_nan_eager(x_; name=name) + else + is_nan_graph(x_; name=name) + end end - end + end end @@ -31446,81 +56982,193 @@ end """ begin - function apply_ada_max_graph(var_, m_, v_, beta1_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing) - local desc - tf.with_op_name(name, "ApplyAdaMax") do - desc = tf.NodeDescription("ApplyAdaMax") - var_ = convert(Tensor{Any}, var_) - m_ = convert(Tensor{Any}, m_) - v_ = convert(Tensor{Any}, v_) - beta1_power_ = convert(Tensor{Any}, beta1_power_) - lr_ = convert(Tensor{Any}, lr_) - beta1_ = convert(Tensor{Any}, beta1_) - beta2_ = convert(Tensor{Any}, beta2_) - epsilon_ = convert(Tensor{Any}, epsilon_) - grad_ = convert(Tensor{Any}, grad_) - (var_, m_, v_, beta1_power_, lr_, beta1_, beta2_, epsilon_, grad_) = tf.tf_promote(var_, m_, v_, beta1_power_, lr_, beta1_, beta2_, epsilon_, grad_) - tf.add_input(desc, var_) - tf.add_input(desc, m_) - tf.add_input(desc, v_) - tf.add_input(desc, beta1_power_) - tf.add_input(desc, lr_) - tf.add_input(desc, beta1_) - tf.add_input(desc, beta2_) - tf.add_input(desc, epsilon_) - tf.add_input(desc, grad_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - end - tf.Tensor(tf.Operation(desc)) - end - function apply_ada_max_eager(var_, m_, v_, beta1_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing) - desc = tf.EagerOp("ApplyAdaMax") - var_ = convert(tf.EagerTensor, var_) - m_ = convert(tf.EagerTensor, m_) - v_ = convert(tf.EagerTensor, v_) - beta1_power_ = convert(tf.EagerTensor, beta1_power_) - lr_ = convert(tf.EagerTensor, lr_) - beta1_ = convert(tf.EagerTensor, beta1_) - beta2_ = convert(tf.EagerTensor, beta2_) - epsilon_ = convert(tf.EagerTensor, epsilon_) - grad_ = convert(tf.EagerTensor, grad_) - tf.add_input(desc, var_) - tf.add_input(desc, m_) - tf.add_input(desc, v_) - tf.add_input(desc, beta1_power_) - tf.add_input(desc, lr_) - tf.add_input(desc, beta1_) - tf.add_input(desc, beta2_) - tf.add_input(desc, epsilon_) - tf.add_input(desc, grad_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - desc["T"] = tf.data_type(var_) - desc["T"] = tf.data_type(m_) - desc["T"] = tf.data_type(v_) - desc["T"] = tf.data_type(beta1_power_) - desc["T"] = tf.data_type(lr_) - desc["T"] = tf.data_type(beta1_) - desc["T"] = tf.data_type(beta2_) - desc["T"] = tf.data_type(epsilon_) - desc["T"] = tf.data_type(grad_) - res = tf.execute(desc) - node = tf.TapeNode(apply_ada_max, [var_, m_, v_, beta1_power_, lr_, beta1_, beta2_, epsilon_, grad_], name=nothing, use_locking=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function apply_ada_max(var_, m_, v_, beta1_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing) - if tf.in_eager_mode() - apply_ada_max_eager(var_, m_, v_, beta1_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=name, use_locking=use_locking) - else - apply_ada_max_graph(var_, m_, v_, beta1_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=name, use_locking=use_locking) + begin + function apply_ada_max_graph(var_, m_, v_, beta1_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "ApplyAdaMax") do + desc = tf.NodeDescription("ApplyAdaMax") + begin + begin + var_ = convert(Tensor{Any}, var_) + begin + end + end + begin + m_ = convert(Tensor{Any}, m_) + begin + end + end + begin + v_ = convert(Tensor{Any}, v_) + begin + end + end + begin + beta1_power_ = convert(Tensor{Any}, beta1_power_) + begin + end + end + begin + lr_ = convert(Tensor{Any}, lr_) + begin + end + end + begin + beta1_ = convert(Tensor{Any}, beta1_) + begin + end + end + begin + beta2_ = convert(Tensor{Any}, beta2_) + begin + end + end + begin + epsilon_ = convert(Tensor{Any}, epsilon_) + begin + end + end + begin + grad_ = convert(Tensor{Any}, grad_) + begin + end + end + begin + (var_, m_, v_, beta1_power_, lr_, beta1_, beta2_, epsilon_, grad_) = tf.tf_promote(var_, m_, v_, beta1_power_, lr_, beta1_, beta2_, epsilon_, grad_) + end + end + begin + begin + tf.add_input(desc, var_) + end + begin + tf.add_input(desc, m_) + end + begin + tf.add_input(desc, v_) + end + begin + tf.add_input(desc, beta1_power_) + end + begin + tf.add_input(desc, lr_) + end + begin + tf.add_input(desc, beta1_) + end + begin + tf.add_input(desc, beta2_) + end + begin + tf.add_input(desc, epsilon_) + end + begin + tf.add_input(desc, grad_) + end + end + begin + begin + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function apply_ada_max_eager(var_, m_, v_, beta1_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing) + desc = tf.EagerOp("ApplyAdaMax") + var_ = convert(tf.EagerTensor, var_) + m_ = convert(tf.EagerTensor, m_) + v_ = convert(tf.EagerTensor, v_) + beta1_power_ = convert(tf.EagerTensor, beta1_power_) + lr_ = convert(tf.EagerTensor, lr_) + beta1_ = convert(tf.EagerTensor, beta1_) + beta2_ = convert(tf.EagerTensor, beta2_) + epsilon_ = convert(tf.EagerTensor, epsilon_) + grad_ = convert(tf.EagerTensor, grad_) + begin + begin + tf.add_input(desc, var_) + end + begin + tf.add_input(desc, m_) + end + begin + tf.add_input(desc, v_) + end + begin + tf.add_input(desc, beta1_power_) + end + begin + tf.add_input(desc, lr_) + end + begin + tf.add_input(desc, beta1_) + end + begin + tf.add_input(desc, beta2_) + end + begin + tf.add_input(desc, epsilon_) + end + begin + tf.add_input(desc, grad_) + end + end + begin + begin + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + end + begin + desc["T"] = tf.data_type(var_) + end + begin + desc["T"] = tf.data_type(m_) + end + begin + desc["T"] = tf.data_type(v_) + end + begin + desc["T"] = tf.data_type(beta1_power_) + end + begin + desc["T"] = tf.data_type(lr_) + end + begin + desc["T"] = tf.data_type(beta1_) + end + begin + desc["T"] = tf.data_type(beta2_) + end + begin + desc["T"] = tf.data_type(epsilon_) + end + begin + desc["T"] = tf.data_type(grad_) + end + res = tf.execute(desc) + node = tf.TapeNode(apply_ada_max, [var_, m_, v_, beta1_power_, lr_, beta1_, beta2_, epsilon_, grad_], name=nothing, use_locking=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function apply_ada_max(var_, m_, v_, beta1_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing) + if tf.in_eager_mode() + apply_ada_max_eager(var_, m_, v_, beta1_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=name, use_locking=use_locking) + else + apply_ada_max_graph(var_, m_, v_, beta1_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=name, use_locking=use_locking) + end end - end + end end @@ -31530,73 +57178,129 @@ end """ begin - function decode_and_crop_jpeg_graph(contents_, crop_window_; name=nothing, channels=nothing, ratio=nothing, fancy_upscaling=nothing, try_recover_truncated=nothing, acceptable_fraction=nothing, dct_method=nothing) - local desc - tf.with_op_name(name, "DecodeAndCropJpeg") do - desc = tf.NodeDescription("DecodeAndCropJpeg") - contents_ = convert(Tensor{String}, contents_) - crop_window_ = convert(Tensor{Int32}, crop_window_) - tf.add_input(desc, contents_) - tf.add_input(desc, crop_window_) - if channels !== nothing - desc["channels"] = Base.Int(channels) - end - if ratio !== nothing - desc["ratio"] = Base.Int(ratio) - end - if fancy_upscaling !== nothing - desc["fancy_upscaling"] = Base.Bool(fancy_upscaling) - end - if try_recover_truncated !== nothing - desc["try_recover_truncated"] = Base.Bool(try_recover_truncated) + begin + function decode_and_crop_jpeg_graph(contents_, crop_window_; name=nothing, channels=nothing, ratio=nothing, fancy_upscaling=nothing, try_recover_truncated=nothing, acceptable_fraction=nothing, dct_method=nothing) + local desc + tf.with_op_name(name, "DecodeAndCropJpeg") do + desc = tf.NodeDescription("DecodeAndCropJpeg") + begin + begin + contents_ = convert(Tensor{String}, contents_) + begin + end + end + begin + crop_window_ = convert(Tensor{Int32}, crop_window_) + begin + end + end + end + begin + begin + tf.add_input(desc, contents_) + end + begin + tf.add_input(desc, crop_window_) + end + end + begin + begin + if channels !== nothing + desc["channels"] = Base.Int(channels) + end + end + begin + if ratio !== nothing + desc["ratio"] = Base.Int(ratio) + end + end + begin + if fancy_upscaling !== nothing + desc["fancy_upscaling"] = Base.Bool(fancy_upscaling) + end + end + begin + if try_recover_truncated !== nothing + desc["try_recover_truncated"] = Base.Bool(try_recover_truncated) + end + end + begin + if acceptable_fraction !== nothing + desc["acceptable_fraction"] = Base.identity(acceptable_fraction) + end + end + begin + if dct_method !== nothing + desc["dct_method"] = Base.String(dct_method) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function decode_and_crop_jpeg_eager(contents_, crop_window_; name=nothing, channels=nothing, ratio=nothing, fancy_upscaling=nothing, try_recover_truncated=nothing, acceptable_fraction=nothing, dct_method=nothing) + desc = tf.EagerOp("DecodeAndCropJpeg") + contents_ = convert(tf.EagerTensor, contents_) + crop_window_ = convert(tf.EagerTensor, crop_window_) + begin + begin + tf.add_input(desc, contents_) + end + begin + tf.add_input(desc, crop_window_) + end + end + begin + begin + if channels !== nothing + desc["channels"] = Base.Int(channels) + end + end + begin + if ratio !== nothing + desc["ratio"] = Base.Int(ratio) + end + end + begin + if fancy_upscaling !== nothing + desc["fancy_upscaling"] = Base.Bool(fancy_upscaling) + end + end + begin + if try_recover_truncated !== nothing + desc["try_recover_truncated"] = Base.Bool(try_recover_truncated) + end + end + begin + if acceptable_fraction !== nothing + desc["acceptable_fraction"] = Base.identity(acceptable_fraction) + end + end + begin + if dct_method !== nothing + desc["dct_method"] = Base.String(dct_method) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(decode_and_crop_jpeg, [contents_, crop_window_], name=nothing, channels=nothing, ratio=nothing, fancy_upscaling=nothing, try_recover_truncated=nothing, acceptable_fraction=nothing, dct_method=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function decode_and_crop_jpeg(contents_, crop_window_; name=nothing, channels=nothing, ratio=nothing, fancy_upscaling=nothing, try_recover_truncated=nothing, acceptable_fraction=nothing, dct_method=nothing) + if tf.in_eager_mode() + decode_and_crop_jpeg_eager(contents_, crop_window_; name=name, channels=channels, ratio=ratio, fancy_upscaling=fancy_upscaling, try_recover_truncated=try_recover_truncated, acceptable_fraction=acceptable_fraction, dct_method=dct_method) + else + decode_and_crop_jpeg_graph(contents_, crop_window_; name=name, channels=channels, ratio=ratio, fancy_upscaling=fancy_upscaling, try_recover_truncated=try_recover_truncated, acceptable_fraction=acceptable_fraction, dct_method=dct_method) + end end - if acceptable_fraction !== nothing - desc["acceptable_fraction"] = Base.identity(acceptable_fraction) - end - if dct_method !== nothing - desc["dct_method"] = Base.String(dct_method) - end - end - tf.Tensor(tf.Operation(desc)) - end - function decode_and_crop_jpeg_eager(contents_, crop_window_; name=nothing, channels=nothing, ratio=nothing, fancy_upscaling=nothing, try_recover_truncated=nothing, acceptable_fraction=nothing, dct_method=nothing) - desc = tf.EagerOp("DecodeAndCropJpeg") - contents_ = convert(tf.EagerTensor, contents_) - crop_window_ = convert(tf.EagerTensor, crop_window_) - tf.add_input(desc, contents_) - tf.add_input(desc, crop_window_) - if channels !== nothing - desc["channels"] = Base.Int(channels) - end - if ratio !== nothing - desc["ratio"] = Base.Int(ratio) - end - if fancy_upscaling !== nothing - desc["fancy_upscaling"] = Base.Bool(fancy_upscaling) - end - if try_recover_truncated !== nothing - desc["try_recover_truncated"] = Base.Bool(try_recover_truncated) - end - if acceptable_fraction !== nothing - desc["acceptable_fraction"] = Base.identity(acceptable_fraction) - end - if dct_method !== nothing - desc["dct_method"] = Base.String(dct_method) - end - res = tf.execute(desc) - node = tf.TapeNode(decode_and_crop_jpeg, [contents_, crop_window_], name=nothing, channels=nothing, ratio=nothing, fancy_upscaling=nothing, try_recover_truncated=nothing, acceptable_fraction=nothing, dct_method=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function decode_and_crop_jpeg(contents_, crop_window_; name=nothing, channels=nothing, ratio=nothing, fancy_upscaling=nothing, try_recover_truncated=nothing, acceptable_fraction=nothing, dct_method=nothing) - if tf.in_eager_mode() - decode_and_crop_jpeg_eager(contents_, crop_window_; name=name, channels=channels, ratio=ratio, fancy_upscaling=fancy_upscaling, try_recover_truncated=try_recover_truncated, acceptable_fraction=acceptable_fraction, dct_method=dct_method) - else - decode_and_crop_jpeg_graph(contents_, crop_window_; name=name, channels=channels, ratio=ratio, fancy_upscaling=fancy_upscaling, try_recover_truncated=try_recover_truncated, acceptable_fraction=acceptable_fraction, dct_method=dct_method) - end - end end @@ -31606,81 +57310,193 @@ end """ begin - function apply_centered_rms_prop_graph(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=nothing, use_locking=nothing) - local desc - tf.with_op_name(name, "ApplyCenteredRMSProp") do - desc = tf.NodeDescription("ApplyCenteredRMSProp") - var_ = convert(Tensor{Any}, var_) - mg_ = convert(Tensor{Any}, mg_) - ms_ = convert(Tensor{Any}, ms_) - mom_ = convert(Tensor{Any}, mom_) - lr_ = convert(Tensor{Any}, lr_) - rho_ = convert(Tensor{Any}, rho_) - momentum_ = convert(Tensor{Any}, momentum_) - epsilon_ = convert(Tensor{Any}, epsilon_) - grad_ = convert(Tensor{Any}, grad_) - (var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_) = tf.tf_promote(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_) - tf.add_input(desc, var_) - tf.add_input(desc, mg_) - tf.add_input(desc, ms_) - tf.add_input(desc, mom_) - tf.add_input(desc, lr_) - tf.add_input(desc, rho_) - tf.add_input(desc, momentum_) - tf.add_input(desc, epsilon_) - tf.add_input(desc, grad_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - end - tf.Tensor(tf.Operation(desc)) - end - function apply_centered_rms_prop_eager(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=nothing, use_locking=nothing) - desc = tf.EagerOp("ApplyCenteredRMSProp") - var_ = convert(tf.EagerTensor, var_) - mg_ = convert(tf.EagerTensor, mg_) - ms_ = convert(tf.EagerTensor, ms_) - mom_ = convert(tf.EagerTensor, mom_) - lr_ = convert(tf.EagerTensor, lr_) - rho_ = convert(tf.EagerTensor, rho_) - momentum_ = convert(tf.EagerTensor, momentum_) - epsilon_ = convert(tf.EagerTensor, epsilon_) - grad_ = convert(tf.EagerTensor, grad_) - tf.add_input(desc, var_) - tf.add_input(desc, mg_) - tf.add_input(desc, ms_) - tf.add_input(desc, mom_) - tf.add_input(desc, lr_) - tf.add_input(desc, rho_) - tf.add_input(desc, momentum_) - tf.add_input(desc, epsilon_) - tf.add_input(desc, grad_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - desc["T"] = tf.data_type(var_) - desc["T"] = tf.data_type(mg_) - desc["T"] = tf.data_type(ms_) - desc["T"] = tf.data_type(mom_) - desc["T"] = tf.data_type(lr_) - desc["T"] = tf.data_type(rho_) - desc["T"] = tf.data_type(momentum_) - desc["T"] = tf.data_type(epsilon_) - desc["T"] = tf.data_type(grad_) - res = tf.execute(desc) - node = tf.TapeNode(apply_centered_rms_prop, [var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_], name=nothing, use_locking=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function apply_centered_rms_prop(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=nothing, use_locking=nothing) - if tf.in_eager_mode() - apply_centered_rms_prop_eager(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=name, use_locking=use_locking) - else - apply_centered_rms_prop_graph(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=name, use_locking=use_locking) + begin + function apply_centered_rms_prop_graph(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "ApplyCenteredRMSProp") do + desc = tf.NodeDescription("ApplyCenteredRMSProp") + begin + begin + var_ = convert(Tensor{Any}, var_) + begin + end + end + begin + mg_ = convert(Tensor{Any}, mg_) + begin + end + end + begin + ms_ = convert(Tensor{Any}, ms_) + begin + end + end + begin + mom_ = convert(Tensor{Any}, mom_) + begin + end + end + begin + lr_ = convert(Tensor{Any}, lr_) + begin + end + end + begin + rho_ = convert(Tensor{Any}, rho_) + begin + end + end + begin + momentum_ = convert(Tensor{Any}, momentum_) + begin + end + end + begin + epsilon_ = convert(Tensor{Any}, epsilon_) + begin + end + end + begin + grad_ = convert(Tensor{Any}, grad_) + begin + end + end + begin + (var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_) = tf.tf_promote(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_) + end + end + begin + begin + tf.add_input(desc, var_) + end + begin + tf.add_input(desc, mg_) + end + begin + tf.add_input(desc, ms_) + end + begin + tf.add_input(desc, mom_) + end + begin + tf.add_input(desc, lr_) + end + begin + tf.add_input(desc, rho_) + end + begin + tf.add_input(desc, momentum_) + end + begin + tf.add_input(desc, epsilon_) + end + begin + tf.add_input(desc, grad_) + end + end + begin + begin + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function apply_centered_rms_prop_eager(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=nothing, use_locking=nothing) + desc = tf.EagerOp("ApplyCenteredRMSProp") + var_ = convert(tf.EagerTensor, var_) + mg_ = convert(tf.EagerTensor, mg_) + ms_ = convert(tf.EagerTensor, ms_) + mom_ = convert(tf.EagerTensor, mom_) + lr_ = convert(tf.EagerTensor, lr_) + rho_ = convert(tf.EagerTensor, rho_) + momentum_ = convert(tf.EagerTensor, momentum_) + epsilon_ = convert(tf.EagerTensor, epsilon_) + grad_ = convert(tf.EagerTensor, grad_) + begin + begin + tf.add_input(desc, var_) + end + begin + tf.add_input(desc, mg_) + end + begin + tf.add_input(desc, ms_) + end + begin + tf.add_input(desc, mom_) + end + begin + tf.add_input(desc, lr_) + end + begin + tf.add_input(desc, rho_) + end + begin + tf.add_input(desc, momentum_) + end + begin + tf.add_input(desc, epsilon_) + end + begin + tf.add_input(desc, grad_) + end + end + begin + begin + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + end + begin + desc["T"] = tf.data_type(var_) + end + begin + desc["T"] = tf.data_type(mg_) + end + begin + desc["T"] = tf.data_type(ms_) + end + begin + desc["T"] = tf.data_type(mom_) + end + begin + desc["T"] = tf.data_type(lr_) + end + begin + desc["T"] = tf.data_type(rho_) + end + begin + desc["T"] = tf.data_type(momentum_) + end + begin + desc["T"] = tf.data_type(epsilon_) + end + begin + desc["T"] = tf.data_type(grad_) + end + res = tf.execute(desc) + node = tf.TapeNode(apply_centered_rms_prop, [var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_], name=nothing, use_locking=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function apply_centered_rms_prop(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=nothing, use_locking=nothing) + if tf.in_eager_mode() + apply_centered_rms_prop_eager(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=name, use_locking=use_locking) + else + apply_centered_rms_prop_graph(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=name, use_locking=use_locking) + end end - end + end end @@ -31690,68 +57506,130 @@ end """ begin - function conv3d_backprop_filter_v2_graph(input_, filter_sizes_, out_backprop_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) - local desc - tf.with_op_name(name, "Conv3DBackpropFilterV2") do - desc = tf.NodeDescription("Conv3DBackpropFilterV2") - input_ = convert(Tensor{Any}, input_) - filter_sizes_ = convert(Tensor{Int32}, filter_sizes_) - out_backprop_ = convert(Tensor{Any}, out_backprop_) - (input_, out_backprop_) = tf.tf_promote(input_, out_backprop_) - tf.add_input(desc, input_) - tf.add_input(desc, filter_sizes_) - tf.add_input(desc, out_backprop_) - if strides !== nothing - desc["strides"] = map(Base.identity, strides) - end - if padding !== nothing - desc["padding"] = Base.String(padding) + begin + function conv3d_backprop_filter_v2_graph(input_, filter_sizes_, out_backprop_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) + local desc + tf.with_op_name(name, "Conv3DBackpropFilterV2") do + desc = tf.NodeDescription("Conv3DBackpropFilterV2") + begin + begin + input_ = convert(Tensor{Any}, input_) + begin + end + end + begin + filter_sizes_ = convert(Tensor{Int32}, filter_sizes_) + begin + end + end + begin + out_backprop_ = convert(Tensor{Any}, out_backprop_) + begin + end + end + begin + (input_, out_backprop_) = tf.tf_promote(input_, out_backprop_) + end + end + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, filter_sizes_) + end + begin + tf.add_input(desc, out_backprop_) + end + end + begin + begin + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + end + begin + if padding !== nothing + desc["padding"] = Base.String(padding) + end + end + begin + if data_format !== nothing + desc["data_format"] = Base.String(data_format) + end + end + begin + if dilations !== nothing + desc["dilations"] = map(Base.identity, dilations) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function conv3d_backprop_filter_v2_eager(input_, filter_sizes_, out_backprop_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) + desc = tf.EagerOp("Conv3DBackpropFilterV2") + input_ = convert(tf.EagerTensor, input_) + filter_sizes_ = convert(tf.EagerTensor, filter_sizes_) + out_backprop_ = convert(tf.EagerTensor, out_backprop_) + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, filter_sizes_) + end + begin + tf.add_input(desc, out_backprop_) + end + end + begin + begin + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + end + begin + if padding !== nothing + desc["padding"] = Base.String(padding) + end + end + begin + if data_format !== nothing + desc["data_format"] = Base.String(data_format) + end + end + begin + if dilations !== nothing + desc["dilations"] = map(Base.identity, dilations) + end + end + end + begin + desc["T"] = tf.data_type(input_) + end + begin + desc["T"] = tf.data_type(out_backprop_) + end + res = tf.execute(desc) + node = tf.TapeNode(conv3d_backprop_filter_v2, [input_, filter_sizes_, out_backprop_], name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function conv3d_backprop_filter_v2(input_, filter_sizes_, out_backprop_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) + if tf.in_eager_mode() + conv3d_backprop_filter_v2_eager(input_, filter_sizes_, out_backprop_; name=name, strides=strides, padding=padding, data_format=data_format, dilations=dilations) + else + conv3d_backprop_filter_v2_graph(input_, filter_sizes_, out_backprop_; name=name, strides=strides, padding=padding, data_format=data_format, dilations=dilations) + end end - if data_format !== nothing - desc["data_format"] = Base.String(data_format) - end - if dilations !== nothing - desc["dilations"] = map(Base.identity, dilations) - end - end - tf.Tensor(tf.Operation(desc)) - end - function conv3d_backprop_filter_v2_eager(input_, filter_sizes_, out_backprop_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) - desc = tf.EagerOp("Conv3DBackpropFilterV2") - input_ = convert(tf.EagerTensor, input_) - filter_sizes_ = convert(tf.EagerTensor, filter_sizes_) - out_backprop_ = convert(tf.EagerTensor, out_backprop_) - tf.add_input(desc, input_) - tf.add_input(desc, filter_sizes_) - tf.add_input(desc, out_backprop_) - if strides !== nothing - desc["strides"] = map(Base.identity, strides) - end - if padding !== nothing - desc["padding"] = Base.String(padding) - end - if data_format !== nothing - desc["data_format"] = Base.String(data_format) - end - if dilations !== nothing - desc["dilations"] = map(Base.identity, dilations) - end - desc["T"] = tf.data_type(input_) - desc["T"] = tf.data_type(out_backprop_) - res = tf.execute(desc) - node = tf.TapeNode(conv3d_backprop_filter_v2, [input_, filter_sizes_, out_backprop_], name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function conv3d_backprop_filter_v2(input_, filter_sizes_, out_backprop_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) - if tf.in_eager_mode() - conv3d_backprop_filter_v2_eager(input_, filter_sizes_, out_backprop_; name=name, strides=strides, padding=padding, data_format=data_format, dilations=dilations) - else - conv3d_backprop_filter_v2_graph(input_, filter_sizes_, out_backprop_; name=name, strides=strides, padding=padding, data_format=data_format, dilations=dilations) - end - end end @@ -31761,52 +57639,98 @@ end """ begin - function matrix_triangular_solve_graph(matrix_, rhs_; name=nothing, lower=nothing, adjoint=nothing) - local desc - tf.with_op_name(name, "MatrixTriangularSolve") do - desc = tf.NodeDescription("MatrixTriangularSolve") - matrix_ = convert(Tensor{Any}, matrix_) - rhs_ = convert(Tensor{Any}, rhs_) - (matrix_, rhs_) = tf.tf_promote(matrix_, rhs_) - tf.add_input(desc, matrix_) - tf.add_input(desc, rhs_) - if lower !== nothing - desc["lower"] = Base.Bool(lower) - end - if adjoint !== nothing - desc["adjoint"] = Base.Bool(adjoint) + begin + function matrix_triangular_solve_graph(matrix_, rhs_; name=nothing, lower=nothing, adjoint=nothing) + local desc + tf.with_op_name(name, "MatrixTriangularSolve") do + desc = tf.NodeDescription("MatrixTriangularSolve") + begin + begin + matrix_ = convert(Tensor{Any}, matrix_) + begin + end + end + begin + rhs_ = convert(Tensor{Any}, rhs_) + begin + end + end + begin + (matrix_, rhs_) = tf.tf_promote(matrix_, rhs_) + end + end + begin + begin + tf.add_input(desc, matrix_) + end + begin + tf.add_input(desc, rhs_) + end + end + begin + begin + if lower !== nothing + desc["lower"] = Base.Bool(lower) + end + end + begin + if adjoint !== nothing + desc["adjoint"] = Base.Bool(adjoint) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function matrix_triangular_solve_eager(matrix_, rhs_; name=nothing, lower=nothing, adjoint=nothing) + desc = tf.EagerOp("MatrixTriangularSolve") + matrix_ = convert(tf.EagerTensor, matrix_) + rhs_ = convert(tf.EagerTensor, rhs_) + begin + begin + tf.add_input(desc, matrix_) + end + begin + tf.add_input(desc, rhs_) + end + end + begin + begin + if lower !== nothing + desc["lower"] = Base.Bool(lower) + end + end + begin + if adjoint !== nothing + desc["adjoint"] = Base.Bool(adjoint) + end + end + end + begin + desc["T"] = tf.data_type(matrix_) + end + begin + desc["T"] = tf.data_type(rhs_) + end + res = tf.execute(desc) + node = tf.TapeNode(matrix_triangular_solve, [matrix_, rhs_], name=nothing, lower=nothing, adjoint=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function matrix_triangular_solve(matrix_, rhs_; name=nothing, lower=nothing, adjoint=nothing) + if tf.in_eager_mode() + matrix_triangular_solve_eager(matrix_, rhs_; name=name, lower=lower, adjoint=adjoint) + else + matrix_triangular_solve_graph(matrix_, rhs_; name=name, lower=lower, adjoint=adjoint) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function matrix_triangular_solve_eager(matrix_, rhs_; name=nothing, lower=nothing, adjoint=nothing) - desc = tf.EagerOp("MatrixTriangularSolve") - matrix_ = convert(tf.EagerTensor, matrix_) - rhs_ = convert(tf.EagerTensor, rhs_) - tf.add_input(desc, matrix_) - tf.add_input(desc, rhs_) - if lower !== nothing - desc["lower"] = Base.Bool(lower) - end - if adjoint !== nothing - desc["adjoint"] = Base.Bool(adjoint) - end - desc["T"] = tf.data_type(matrix_) - desc["T"] = tf.data_type(rhs_) - res = tf.execute(desc) - node = tf.TapeNode(matrix_triangular_solve, [matrix_, rhs_], name=nothing, lower=nothing, adjoint=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function matrix_triangular_solve(matrix_, rhs_; name=nothing, lower=nothing, adjoint=nothing) - if tf.in_eager_mode() - matrix_triangular_solve_eager(matrix_, rhs_; name=name, lower=lower, adjoint=adjoint) - else - matrix_triangular_solve_graph(matrix_, rhs_; name=name, lower=lower, adjoint=adjoint) - end - end end @@ -31816,33 +57740,57 @@ end """ begin - function reader_num_work_units_completed_graph(reader_handle_; name=nothing) - local desc - tf.with_op_name(name, "ReaderNumWorkUnitsCompleted") do - desc = tf.NodeDescription("ReaderNumWorkUnitsCompleted") - reader_handle_ = convert(Tensor{String}, reader_handle_) - tf.add_input(desc, reader_handle_) + begin + function reader_num_work_units_completed_graph(reader_handle_; name=nothing) + local desc + tf.with_op_name(name, "ReaderNumWorkUnitsCompleted") do + desc = tf.NodeDescription("ReaderNumWorkUnitsCompleted") + begin + begin + reader_handle_ = convert(Tensor{String}, reader_handle_) + begin + end + end + end + begin + begin + tf.add_input(desc, reader_handle_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function reader_num_work_units_completed_eager(reader_handle_; name=nothing) - desc = tf.EagerOp("ReaderNumWorkUnitsCompleted") - reader_handle_ = convert(tf.EagerTensor, reader_handle_) - tf.add_input(desc, reader_handle_) - res = tf.execute(desc) - node = tf.TapeNode(reader_num_work_units_completed, [reader_handle_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function reader_num_work_units_completed_eager(reader_handle_; name=nothing) + desc = tf.EagerOp("ReaderNumWorkUnitsCompleted") + reader_handle_ = convert(tf.EagerTensor, reader_handle_) + begin + begin + tf.add_input(desc, reader_handle_) + end + end + begin + end + res = tf.execute(desc) + node = tf.TapeNode(reader_num_work_units_completed, [reader_handle_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function reader_num_work_units_completed(reader_handle_; name=nothing) - if tf.in_eager_mode() - reader_num_work_units_completed_eager(reader_handle_; name=name) - else - reader_num_work_units_completed_graph(reader_handle_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function reader_num_work_units_completed(reader_handle_; name=nothing) + if tf.in_eager_mode() + reader_num_work_units_completed_eager(reader_handle_; name=name) + else + reader_num_work_units_completed_graph(reader_handle_; name=name) + end end - end + end end @@ -31852,55 +57800,115 @@ end """ begin - function write_audio_summary_graph(writer_, step_, tag_, tensor_, sample_rate_; name=nothing, max_outputs=nothing) - local desc - tf.with_op_name(name, "WriteAudioSummary") do - desc = tf.NodeDescription("WriteAudioSummary") - writer_ = convert(Tensor{Any}, writer_) - step_ = convert(Tensor{Int64}, step_) - tag_ = convert(Tensor{String}, tag_) - tensor_ = convert(Tensor{Float32}, tensor_) - sample_rate_ = convert(Tensor{Float32}, sample_rate_) - tf.add_input(desc, writer_) - tf.add_input(desc, step_) - tf.add_input(desc, tag_) - tf.add_input(desc, tensor_) - tf.add_input(desc, sample_rate_) - if max_outputs !== nothing - desc["max_outputs"] = Base.Int(max_outputs) + begin + function write_audio_summary_graph(writer_, step_, tag_, tensor_, sample_rate_; name=nothing, max_outputs=nothing) + local desc + tf.with_op_name(name, "WriteAudioSummary") do + desc = tf.NodeDescription("WriteAudioSummary") + begin + begin + writer_ = convert(Tensor{Any}, writer_) + begin + end + end + begin + step_ = convert(Tensor{Int64}, step_) + begin + end + end + begin + tag_ = convert(Tensor{String}, tag_) + begin + end + end + begin + tensor_ = convert(Tensor{Float32}, tensor_) + begin + end + end + begin + sample_rate_ = convert(Tensor{Float32}, sample_rate_) + begin + end + end + end + begin + begin + tf.add_input(desc, writer_) + end + begin + tf.add_input(desc, step_) + end + begin + tf.add_input(desc, tag_) + end + begin + tf.add_input(desc, tensor_) + end + begin + tf.add_input(desc, sample_rate_) + end + end + begin + begin + if max_outputs !== nothing + desc["max_outputs"] = Base.Int(max_outputs) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function write_audio_summary_eager(writer_, step_, tag_, tensor_, sample_rate_; name=nothing, max_outputs=nothing) + desc = tf.EagerOp("WriteAudioSummary") + writer_ = convert(tf.EagerTensor, writer_) + step_ = convert(tf.EagerTensor, step_) + tag_ = convert(tf.EagerTensor, tag_) + tensor_ = convert(tf.EagerTensor, tensor_) + sample_rate_ = convert(tf.EagerTensor, sample_rate_) + begin + begin + tf.add_input(desc, writer_) + end + begin + tf.add_input(desc, step_) + end + begin + tf.add_input(desc, tag_) + end + begin + tf.add_input(desc, tensor_) + end + begin + tf.add_input(desc, sample_rate_) + end + end + begin + begin + if max_outputs !== nothing + desc["max_outputs"] = Base.Int(max_outputs) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(write_audio_summary, [writer_, step_, tag_, tensor_, sample_rate_], name=nothing, max_outputs=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function write_audio_summary(writer_, step_, tag_, tensor_, sample_rate_; name=nothing, max_outputs=nothing) + if tf.in_eager_mode() + write_audio_summary_eager(writer_, step_, tag_, tensor_, sample_rate_; name=name, max_outputs=max_outputs) + else + write_audio_summary_graph(writer_, step_, tag_, tensor_, sample_rate_; name=name, max_outputs=max_outputs) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function write_audio_summary_eager(writer_, step_, tag_, tensor_, sample_rate_; name=nothing, max_outputs=nothing) - desc = tf.EagerOp("WriteAudioSummary") - writer_ = convert(tf.EagerTensor, writer_) - step_ = convert(tf.EagerTensor, step_) - tag_ = convert(tf.EagerTensor, tag_) - tensor_ = convert(tf.EagerTensor, tensor_) - sample_rate_ = convert(tf.EagerTensor, sample_rate_) - tf.add_input(desc, writer_) - tf.add_input(desc, step_) - tf.add_input(desc, tag_) - tf.add_input(desc, tensor_) - tf.add_input(desc, sample_rate_) - if max_outputs !== nothing - desc["max_outputs"] = Base.Int(max_outputs) - end - res = tf.execute(desc) - node = tf.TapeNode(write_audio_summary, [writer_, step_, tag_, tensor_, sample_rate_], name=nothing, max_outputs=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function write_audio_summary(writer_, step_, tag_, tensor_, sample_rate_; name=nothing, max_outputs=nothing) - if tf.in_eager_mode() - write_audio_summary_eager(writer_, step_, tag_, tensor_, sample_rate_; name=name, max_outputs=max_outputs) - else - write_audio_summary_graph(writer_, step_, tag_, tensor_, sample_rate_; name=name, max_outputs=max_outputs) - end - end end @@ -31910,37 +57918,69 @@ end """ begin - function sharded_filespec_graph(basename_, num_shards_; name=nothing) - local desc - tf.with_op_name(name, "ShardedFilespec") do - desc = tf.NodeDescription("ShardedFilespec") - basename_ = convert(Tensor{String}, basename_) - num_shards_ = convert(Tensor{Int32}, num_shards_) - tf.add_input(desc, basename_) - tf.add_input(desc, num_shards_) + begin + function sharded_filespec_graph(basename_, num_shards_; name=nothing) + local desc + tf.with_op_name(name, "ShardedFilespec") do + desc = tf.NodeDescription("ShardedFilespec") + begin + begin + basename_ = convert(Tensor{String}, basename_) + begin + end + end + begin + num_shards_ = convert(Tensor{Int32}, num_shards_) + begin + end + end + end + begin + begin + tf.add_input(desc, basename_) + end + begin + tf.add_input(desc, num_shards_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function sharded_filespec_eager(basename_, num_shards_; name=nothing) - desc = tf.EagerOp("ShardedFilespec") - basename_ = convert(tf.EagerTensor, basename_) - num_shards_ = convert(tf.EagerTensor, num_shards_) - tf.add_input(desc, basename_) - tf.add_input(desc, num_shards_) - res = tf.execute(desc) - node = tf.TapeNode(sharded_filespec, [basename_, num_shards_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function sharded_filespec_eager(basename_, num_shards_; name=nothing) + desc = tf.EagerOp("ShardedFilespec") + basename_ = convert(tf.EagerTensor, basename_) + num_shards_ = convert(tf.EagerTensor, num_shards_) + begin + begin + tf.add_input(desc, basename_) + end + begin + tf.add_input(desc, num_shards_) + end + end + begin + end + res = tf.execute(desc) + node = tf.TapeNode(sharded_filespec, [basename_, num_shards_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sharded_filespec(basename_, num_shards_; name=nothing) - if tf.in_eager_mode() - sharded_filespec_eager(basename_, num_shards_; name=name) - else - sharded_filespec_graph(basename_, num_shards_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sharded_filespec(basename_, num_shards_; name=nothing) + if tf.in_eager_mode() + sharded_filespec_eager(basename_, num_shards_; name=name) + else + sharded_filespec_graph(basename_, num_shards_; name=name) + end end - end + end end @@ -31950,40 +57990,78 @@ end """ begin - function div_no_nan_graph(x_, y_; name=nothing) - local desc - tf.with_op_name(name, "DivNoNan") do - desc = tf.NodeDescription("DivNoNan") - x_ = convert(Tensor{Any}, x_) - y_ = convert(Tensor{Any}, y_) - (x_, y_) = tf.tf_promote(x_, y_) - tf.add_input(desc, x_) - tf.add_input(desc, y_) + begin + function div_no_nan_graph(x_, y_; name=nothing) + local desc + tf.with_op_name(name, "DivNoNan") do + desc = tf.NodeDescription("DivNoNan") + begin + begin + x_ = convert(Tensor{Any}, x_) + begin + end + end + begin + y_ = convert(Tensor{Any}, y_) + begin + end + end + begin + (x_, y_) = tf.tf_promote(x_, y_) + end + end + begin + begin + tf.add_input(desc, x_) + end + begin + tf.add_input(desc, y_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function div_no_nan_eager(x_, y_; name=nothing) - desc = tf.EagerOp("DivNoNan") - x_ = convert(tf.EagerTensor, x_) - y_ = convert(tf.EagerTensor, y_) - tf.add_input(desc, x_) - tf.add_input(desc, y_) - desc["T"] = tf.data_type(x_) - desc["T"] = tf.data_type(y_) - res = tf.execute(desc) - node = tf.TapeNode(div_no_nan, [x_, y_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function div_no_nan_eager(x_, y_; name=nothing) + desc = tf.EagerOp("DivNoNan") + x_ = convert(tf.EagerTensor, x_) + y_ = convert(tf.EagerTensor, y_) + begin + begin + tf.add_input(desc, x_) + end + begin + tf.add_input(desc, y_) + end + end + begin + end + begin + desc["T"] = tf.data_type(x_) + end + begin + desc["T"] = tf.data_type(y_) + end + res = tf.execute(desc) + node = tf.TapeNode(div_no_nan, [x_, y_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function div_no_nan(x_, y_; name=nothing) - if tf.in_eager_mode() - div_no_nan_eager(x_, y_; name=name) - else - div_no_nan_graph(x_, y_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function div_no_nan(x_, y_; name=nothing) + if tf.in_eager_mode() + div_no_nan_eager(x_, y_; name=name) + else + div_no_nan_graph(x_, y_; name=name) + end end - end + end end @@ -31993,63 +58071,131 @@ end """ begin - function sparse_accumulator_apply_gradient_graph(handle_, local_step_, gradient_indices_, gradient_values_, gradient_shape_; name=nothing, dtype=nothing, has_known_shape=nothing) - local desc - tf.with_op_name(name, "SparseAccumulatorApplyGradient") do - desc = tf.NodeDescription("SparseAccumulatorApplyGradient") - handle_ = convert(Tensor{String}, handle_) - local_step_ = convert(Tensor{Int64}, local_step_) - gradient_indices_ = convert(Tensor{Int64}, gradient_indices_) - gradient_values_ = convert(Tensor{Any}, gradient_values_) - gradient_shape_ = convert(Tensor{Int64}, gradient_shape_) - (gradient_values_,) = tf.tf_promote(gradient_values_) - tf.add_input(desc, handle_) - tf.add_input(desc, local_step_) - tf.add_input(desc, gradient_indices_) - tf.add_input(desc, gradient_values_) - tf.add_input(desc, gradient_shape_) - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end - if has_known_shape !== nothing - desc["has_known_shape"] = Base.Bool(has_known_shape) + begin + function sparse_accumulator_apply_gradient_graph(handle_, local_step_, gradient_indices_, gradient_values_, gradient_shape_; name=nothing, dtype=nothing, has_known_shape=nothing) + local desc + tf.with_op_name(name, "SparseAccumulatorApplyGradient") do + desc = tf.NodeDescription("SparseAccumulatorApplyGradient") + begin + begin + handle_ = convert(Tensor{String}, handle_) + begin + end + end + begin + local_step_ = convert(Tensor{Int64}, local_step_) + begin + end + end + begin + gradient_indices_ = convert(Tensor{Int64}, gradient_indices_) + begin + end + end + begin + gradient_values_ = convert(Tensor{Any}, gradient_values_) + begin + end + end + begin + gradient_shape_ = convert(Tensor{Int64}, gradient_shape_) + begin + end + end + begin + (gradient_values_,) = tf.tf_promote(gradient_values_) + end + end + begin + begin + tf.add_input(desc, handle_) + end + begin + tf.add_input(desc, local_step_) + end + begin + tf.add_input(desc, gradient_indices_) + end + begin + tf.add_input(desc, gradient_values_) + end + begin + tf.add_input(desc, gradient_shape_) + end + end + begin + begin + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + begin + if has_known_shape !== nothing + desc["has_known_shape"] = Base.Bool(has_known_shape) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function sparse_accumulator_apply_gradient_eager(handle_, local_step_, gradient_indices_, gradient_values_, gradient_shape_; name=nothing, dtype=nothing, has_known_shape=nothing) + desc = tf.EagerOp("SparseAccumulatorApplyGradient") + handle_ = convert(tf.EagerTensor, handle_) + local_step_ = convert(tf.EagerTensor, local_step_) + gradient_indices_ = convert(tf.EagerTensor, gradient_indices_) + gradient_values_ = convert(tf.EagerTensor, gradient_values_) + gradient_shape_ = convert(tf.EagerTensor, gradient_shape_) + begin + begin + tf.add_input(desc, handle_) + end + begin + tf.add_input(desc, local_step_) + end + begin + tf.add_input(desc, gradient_indices_) + end + begin + tf.add_input(desc, gradient_values_) + end + begin + tf.add_input(desc, gradient_shape_) + end + end + begin + begin + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + begin + if has_known_shape !== nothing + desc["has_known_shape"] = Base.Bool(has_known_shape) + end + end + end + begin + desc["dtype"] = tf.data_type(gradient_values_) + end + res = tf.execute(desc) + node = tf.TapeNode(sparse_accumulator_apply_gradient, [handle_, local_step_, gradient_indices_, gradient_values_, gradient_shape_], name=nothing, dtype=nothing, has_known_shape=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_accumulator_apply_gradient(handle_, local_step_, gradient_indices_, gradient_values_, gradient_shape_; name=nothing, dtype=nothing, has_known_shape=nothing) + if tf.in_eager_mode() + sparse_accumulator_apply_gradient_eager(handle_, local_step_, gradient_indices_, gradient_values_, gradient_shape_; name=name, dtype=dtype, has_known_shape=has_known_shape) + else + sparse_accumulator_apply_gradient_graph(handle_, local_step_, gradient_indices_, gradient_values_, gradient_shape_; name=name, dtype=dtype, has_known_shape=has_known_shape) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function sparse_accumulator_apply_gradient_eager(handle_, local_step_, gradient_indices_, gradient_values_, gradient_shape_; name=nothing, dtype=nothing, has_known_shape=nothing) - desc = tf.EagerOp("SparseAccumulatorApplyGradient") - handle_ = convert(tf.EagerTensor, handle_) - local_step_ = convert(tf.EagerTensor, local_step_) - gradient_indices_ = convert(tf.EagerTensor, gradient_indices_) - gradient_values_ = convert(tf.EagerTensor, gradient_values_) - gradient_shape_ = convert(tf.EagerTensor, gradient_shape_) - tf.add_input(desc, handle_) - tf.add_input(desc, local_step_) - tf.add_input(desc, gradient_indices_) - tf.add_input(desc, gradient_values_) - tf.add_input(desc, gradient_shape_) - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end - if has_known_shape !== nothing - desc["has_known_shape"] = Base.Bool(has_known_shape) - end - desc["dtype"] = tf.data_type(gradient_values_) - res = tf.execute(desc) - node = tf.TapeNode(sparse_accumulator_apply_gradient, [handle_, local_step_, gradient_indices_, gradient_values_, gradient_shape_], name=nothing, dtype=nothing, has_known_shape=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_accumulator_apply_gradient(handle_, local_step_, gradient_indices_, gradient_values_, gradient_shape_; name=nothing, dtype=nothing, has_known_shape=nothing) - if tf.in_eager_mode() - sparse_accumulator_apply_gradient_eager(handle_, local_step_, gradient_indices_, gradient_values_, gradient_shape_; name=name, dtype=dtype, has_known_shape=has_known_shape) - else - sparse_accumulator_apply_gradient_graph(handle_, local_step_, gradient_indices_, gradient_values_, gradient_shape_; name=name, dtype=dtype, has_known_shape=has_known_shape) - end - end end @@ -32059,50 +58205,92 @@ end """ begin - function ragged_tensor_to_sparse_graph(rt_nested_splits_, rt_dense_values_; name=nothing, RAGGED_RANK=nothing) - local desc - tf.with_op_name(name, "RaggedTensorToSparse") do - desc = tf.NodeDescription("RaggedTensorToSparse") - rt_nested_splits_ = [convert(Tensor{Int64}, x) for x = rt_nested_splits_] - rt_dense_values_ = convert(Tensor{Any}, rt_dense_values_) - (rt_dense_values_,) = tf.tf_promote(rt_dense_values_) - tf.add_input(desc, rt_nested_splits_) - tf.add_input(desc, rt_dense_values_) - if RAGGED_RANK !== nothing - desc["RAGGED_RANK"] = Base.Int(RAGGED_RANK) + begin + function ragged_tensor_to_sparse_graph(rt_nested_splits_, rt_dense_values_; name=nothing, RAGGED_RANK=nothing) + local desc + tf.with_op_name(name, "RaggedTensorToSparse") do + desc = tf.NodeDescription("RaggedTensorToSparse") + begin + begin + rt_nested_splits_ = [convert(Tensor{Int64}, x) for x = rt_nested_splits_] + begin + end + end + begin + rt_dense_values_ = convert(Tensor{Any}, rt_dense_values_) + begin + end + end + begin + (rt_dense_values_,) = tf.tf_promote(rt_dense_values_) + end + end + begin + begin + tf.add_input(desc, rt_nested_splits_) + end + begin + tf.add_input(desc, rt_dense_values_) + end + end + begin + begin + if RAGGED_RANK !== nothing + desc["RAGGED_RANK"] = Base.Int(RAGGED_RANK) + end + end + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + end + end + begin + function ragged_tensor_to_sparse_eager(rt_nested_splits_, rt_dense_values_; name=nothing, RAGGED_RANK=nothing) + desc = tf.EagerOp("RaggedTensorToSparse") + rt_nested_splits_ = convert(tf.EagerTensor, rt_nested_splits_) + rt_dense_values_ = convert(tf.EagerTensor, rt_dense_values_) + begin + begin + tf.add_input(desc, rt_nested_splits_) + end + begin + tf.add_input(desc, rt_dense_values_) + end + end + begin + begin + if RAGGED_RANK !== nothing + desc["RAGGED_RANK"] = Base.Int(RAGGED_RANK) + end + end + end + begin + desc["T"] = tf.data_type(rt_dense_values_) + end + res = tf.execute(desc) + node = tf.TapeNode(ragged_tensor_to_sparse, [rt_nested_splits_, rt_dense_values_], name=nothing, RAGGED_RANK=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function ragged_tensor_to_sparse(rt_nested_splits_, rt_dense_values_; name=nothing, RAGGED_RANK=nothing) + if tf.in_eager_mode() + ragged_tensor_to_sparse_eager(rt_nested_splits_, rt_dense_values_; name=name, RAGGED_RANK=RAGGED_RANK) + else + ragged_tensor_to_sparse_graph(rt_nested_splits_, rt_dense_values_; name=name, RAGGED_RANK=RAGGED_RANK) + end end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:3 - push!(out, tf.Tensor(op, out_idx)) - end - out - end - function ragged_tensor_to_sparse_eager(rt_nested_splits_, rt_dense_values_; name=nothing, RAGGED_RANK=nothing) - desc = tf.EagerOp("RaggedTensorToSparse") - rt_nested_splits_ = convert(tf.EagerTensor, rt_nested_splits_) - rt_dense_values_ = convert(tf.EagerTensor, rt_dense_values_) - tf.add_input(desc, rt_nested_splits_) - tf.add_input(desc, rt_dense_values_) - if RAGGED_RANK !== nothing - desc["RAGGED_RANK"] = Base.Int(RAGGED_RANK) - end - desc["T"] = tf.data_type(rt_dense_values_) - res = tf.execute(desc) - node = tf.TapeNode(ragged_tensor_to_sparse, [rt_nested_splits_, rt_dense_values_], name=nothing, RAGGED_RANK=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function ragged_tensor_to_sparse(rt_nested_splits_, rt_dense_values_; name=nothing, RAGGED_RANK=nothing) - if tf.in_eager_mode() - ragged_tensor_to_sparse_eager(rt_nested_splits_, rt_dense_values_; name=name, RAGGED_RANK=RAGGED_RANK) - else - ragged_tensor_to_sparse_graph(rt_nested_splits_, rt_dense_values_; name=name, RAGGED_RANK=RAGGED_RANK) - end - end end @@ -32112,53 +58300,93 @@ end """ begin - function extract_volume_patches_graph(input_; name=nothing, ksizes=nothing, strides=nothing, padding=nothing) - local desc - tf.with_op_name(name, "ExtractVolumePatches") do - desc = tf.NodeDescription("ExtractVolumePatches") - input_ = convert(Tensor{Any}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - if ksizes !== nothing - desc["ksizes"] = map(Base.identity, ksizes) + begin + function extract_volume_patches_graph(input_; name=nothing, ksizes=nothing, strides=nothing, padding=nothing) + local desc + tf.with_op_name(name, "ExtractVolumePatches") do + desc = tf.NodeDescription("ExtractVolumePatches") + begin + begin + input_ = convert(Tensor{Any}, input_) + begin + end + end + begin + (input_,) = tf.tf_promote(input_) + end + end + begin + begin + tf.add_input(desc, input_) + end + end + begin + begin + if ksizes !== nothing + desc["ksizes"] = map(Base.identity, ksizes) + end + end + begin + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + end + begin + if padding !== nothing + desc["padding"] = Base.String(padding) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function extract_volume_patches_eager(input_; name=nothing, ksizes=nothing, strides=nothing, padding=nothing) + desc = tf.EagerOp("ExtractVolumePatches") + input_ = convert(tf.EagerTensor, input_) + begin + begin + tf.add_input(desc, input_) + end + end + begin + begin + if ksizes !== nothing + desc["ksizes"] = map(Base.identity, ksizes) + end + end + begin + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + end + begin + if padding !== nothing + desc["padding"] = Base.String(padding) + end + end + end + begin + desc["T"] = tf.data_type(input_) + end + res = tf.execute(desc) + node = tf.TapeNode(extract_volume_patches, [input_], name=nothing, ksizes=nothing, strides=nothing, padding=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function extract_volume_patches(input_; name=nothing, ksizes=nothing, strides=nothing, padding=nothing) + if tf.in_eager_mode() + extract_volume_patches_eager(input_; name=name, ksizes=ksizes, strides=strides, padding=padding) + else + extract_volume_patches_graph(input_; name=name, ksizes=ksizes, strides=strides, padding=padding) + end end - if strides !== nothing - desc["strides"] = map(Base.identity, strides) - end - if padding !== nothing - desc["padding"] = Base.String(padding) - end - end - tf.Tensor(tf.Operation(desc)) - end - function extract_volume_patches_eager(input_; name=nothing, ksizes=nothing, strides=nothing, padding=nothing) - desc = tf.EagerOp("ExtractVolumePatches") - input_ = convert(tf.EagerTensor, input_) - tf.add_input(desc, input_) - if ksizes !== nothing - desc["ksizes"] = map(Base.identity, ksizes) - end - if strides !== nothing - desc["strides"] = map(Base.identity, strides) - end - if padding !== nothing - desc["padding"] = Base.String(padding) - end - desc["T"] = tf.data_type(input_) - res = tf.execute(desc) - node = tf.TapeNode(extract_volume_patches, [input_], name=nothing, ksizes=nothing, strides=nothing, padding=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function extract_volume_patches(input_; name=nothing, ksizes=nothing, strides=nothing, padding=nothing) - if tf.in_eager_mode() - extract_volume_patches_eager(input_; name=name, ksizes=ksizes, strides=strides, padding=padding) - else - extract_volume_patches_graph(input_; name=name, ksizes=ksizes, strides=strides, padding=padding) - end - end end @@ -32168,55 +58396,107 @@ end """ begin - function barrier_insert_many_graph(handle_, keys_, values_; name=nothing, component_index=nothing) - local desc - tf.with_op_name(name, "BarrierInsertMany") do - desc = tf.NodeDescription("BarrierInsertMany") - handle_ = convert(Tensor{String}, handle_) - keys_ = convert(Tensor{String}, keys_) - values_ = convert(Tensor{Any}, values_) - (values_,) = tf.tf_promote(values_) - tf.add_input(desc, handle_) - tf.add_input(desc, keys_) - tf.add_input(desc, values_) - if component_index !== nothing - component_index = Base.Int(component_index) - 1 + begin + function barrier_insert_many_graph(handle_, keys_, values_; name=nothing, component_index=nothing) + local desc + tf.with_op_name(name, "BarrierInsertMany") do + desc = tf.NodeDescription("BarrierInsertMany") + begin + begin + handle_ = convert(Tensor{String}, handle_) + begin + end + end + begin + keys_ = convert(Tensor{String}, keys_) + begin + end + end + begin + values_ = convert(Tensor{Any}, values_) + begin + end + end + begin + (values_,) = tf.tf_promote(values_) + end + end + begin + begin + tf.add_input(desc, handle_) + end + begin + tf.add_input(desc, keys_) + end + begin + tf.add_input(desc, values_) + end + end + begin + begin + if component_index !== nothing + component_index = Base.Int(component_index) - 1 + end + end + begin + if component_index !== nothing + desc["component_index"] = Base.Int(component_index) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function barrier_insert_many_eager(handle_, keys_, values_; name=nothing, component_index=nothing) + desc = tf.EagerOp("BarrierInsertMany") + handle_ = convert(tf.EagerTensor, handle_) + keys_ = convert(tf.EagerTensor, keys_) + values_ = convert(tf.EagerTensor, values_) + begin + begin + tf.add_input(desc, handle_) + end + begin + tf.add_input(desc, keys_) + end + begin + tf.add_input(desc, values_) + end + end + begin + begin + if component_index !== nothing + component_index = Base.Int(component_index) - 1 + end + end + begin + if component_index !== nothing + desc["component_index"] = Base.Int(component_index) + end + end + end + begin + desc["T"] = tf.data_type(values_) + end + res = tf.execute(desc) + node = tf.TapeNode(barrier_insert_many, [handle_, keys_, values_], name=nothing, component_index=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function barrier_insert_many(handle_, keys_, values_; name=nothing, component_index=nothing) + if tf.in_eager_mode() + barrier_insert_many_eager(handle_, keys_, values_; name=name, component_index=component_index) + else + barrier_insert_many_graph(handle_, keys_, values_; name=name, component_index=component_index) + end end - if component_index !== nothing - desc["component_index"] = Base.Int(component_index) - end - end - tf.Tensor(tf.Operation(desc)) - end - function barrier_insert_many_eager(handle_, keys_, values_; name=nothing, component_index=nothing) - desc = tf.EagerOp("BarrierInsertMany") - handle_ = convert(tf.EagerTensor, handle_) - keys_ = convert(tf.EagerTensor, keys_) - values_ = convert(tf.EagerTensor, values_) - tf.add_input(desc, handle_) - tf.add_input(desc, keys_) - tf.add_input(desc, values_) - if component_index !== nothing - component_index = Base.Int(component_index) - 1 - end - if component_index !== nothing - desc["component_index"] = Base.Int(component_index) - end - desc["T"] = tf.data_type(values_) - res = tf.execute(desc) - node = tf.TapeNode(barrier_insert_many, [handle_, keys_, values_], name=nothing, component_index=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function barrier_insert_many(handle_, keys_, values_; name=nothing, component_index=nothing) - if tf.in_eager_mode() - barrier_insert_many_eager(handle_, keys_, values_; name=name, component_index=component_index) - else - barrier_insert_many_graph(handle_, keys_, values_; name=name, component_index=component_index) - end - end end @@ -32226,41 +58506,65 @@ end """ begin - function const__graph(; name=nothing, value=nothing, dtype=nothing) - local desc - tf.with_op_name(name, "Const") do - desc = tf.NodeDescription("Const") - if value !== nothing - desc["value"] = TensorFlow.RawTensor(value) - end - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) + begin + function const__graph(; name=nothing, value=nothing, dtype=nothing) + local desc + tf.with_op_name(name, "Const") do + desc = tf.NodeDescription("Const") + begin + end + begin + end + begin + begin + if value !== nothing + desc["value"] = TensorFlow.RawTensor(value) + end + end + begin + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + end end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function const__eager(; name=nothing, value=nothing, dtype=nothing) - desc = tf.EagerOp("Const") - if value !== nothing - desc["value"] = TensorFlow.RawTensor(value) - end - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end - res = tf.execute(desc) - node = tf.TapeNode(const_, [], name=nothing, value=nothing, dtype=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function const__eager(; name=nothing, value=nothing, dtype=nothing) + desc = tf.EagerOp("Const") + begin + end + begin + begin + if value !== nothing + desc["value"] = TensorFlow.RawTensor(value) + end + end + begin + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(const_, [], name=nothing, value=nothing, dtype=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function const_(; name=nothing, value=nothing, dtype=nothing) - if tf.in_eager_mode() - const__eager(; name=name, value=value, dtype=dtype) - else - const__graph(; name=name, value=value, dtype=dtype) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function const_(; name=nothing, value=nothing, dtype=nothing) + if tf.in_eager_mode() + const__eager(; name=name, value=value, dtype=dtype) + else + const__graph(; name=name, value=value, dtype=dtype) + end end - end + end end @@ -32270,47 +58574,91 @@ end """ begin - function space_to_batch_graph(input_, paddings_; name=nothing, block_size=nothing) - local desc - tf.with_op_name(name, "SpaceToBatch") do - desc = tf.NodeDescription("SpaceToBatch") - input_ = convert(Tensor{Any}, input_) - paddings_ = convert(Tensor{Int32}, paddings_) - (input_,) = tf.tf_promote(input_) - (paddings_,) = tf.tf_promote(paddings_) - tf.add_input(desc, input_) - tf.add_input(desc, paddings_) - if block_size !== nothing - desc["block_size"] = Base.Int(block_size) + begin + function space_to_batch_graph(input_, paddings_; name=nothing, block_size=nothing) + local desc + tf.with_op_name(name, "SpaceToBatch") do + desc = tf.NodeDescription("SpaceToBatch") + begin + begin + input_ = convert(Tensor{Any}, input_) + begin + end + end + begin + paddings_ = convert(Tensor{Int32}, paddings_) + begin + end + end + begin + (input_,) = tf.tf_promote(input_) + end + begin + (paddings_,) = tf.tf_promote(paddings_) + end + end + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, paddings_) + end + end + begin + begin + if block_size !== nothing + desc["block_size"] = Base.Int(block_size) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function space_to_batch_eager(input_, paddings_; name=nothing, block_size=nothing) + desc = tf.EagerOp("SpaceToBatch") + input_ = convert(tf.EagerTensor, input_) + paddings_ = convert(tf.EagerTensor, paddings_) + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, paddings_) + end + end + begin + begin + if block_size !== nothing + desc["block_size"] = Base.Int(block_size) + end + end + end + begin + desc["T"] = tf.data_type(input_) + end + begin + desc["Tpaddings"] = tf.data_type(paddings_) + end + res = tf.execute(desc) + node = tf.TapeNode(space_to_batch, [input_, paddings_], name=nothing, block_size=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function space_to_batch(input_, paddings_; name=nothing, block_size=nothing) + if tf.in_eager_mode() + space_to_batch_eager(input_, paddings_; name=name, block_size=block_size) + else + space_to_batch_graph(input_, paddings_; name=name, block_size=block_size) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function space_to_batch_eager(input_, paddings_; name=nothing, block_size=nothing) - desc = tf.EagerOp("SpaceToBatch") - input_ = convert(tf.EagerTensor, input_) - paddings_ = convert(tf.EagerTensor, paddings_) - tf.add_input(desc, input_) - tf.add_input(desc, paddings_) - if block_size !== nothing - desc["block_size"] = Base.Int(block_size) - end - desc["T"] = tf.data_type(input_) - desc["Tpaddings"] = tf.data_type(paddings_) - res = tf.execute(desc) - node = tf.TapeNode(space_to_batch, [input_, paddings_], name=nothing, block_size=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function space_to_batch(input_, paddings_; name=nothing, block_size=nothing) - if tf.in_eager_mode() - space_to_batch_eager(input_, paddings_; name=name, block_size=block_size) - else - space_to_batch_graph(input_, paddings_; name=name, block_size=block_size) - end - end end @@ -32320,59 +58668,95 @@ end """ begin - function stage_size_graph(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) - local desc - tf.with_op_name(name, "StageSize") do - desc = tf.NodeDescription("StageSize") - if capacity !== nothing - desc["capacity"] = Base.Int(capacity) + begin + function stage_size_graph(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "StageSize") do + desc = tf.NodeDescription("StageSize") + begin + end + begin + end + begin + begin + if capacity !== nothing + desc["capacity"] = Base.Int(capacity) + end + end + begin + if memory_limit !== nothing + desc["memory_limit"] = Base.Int(memory_limit) + end + end + begin + if dtypes !== nothing + desc["dtypes"] = map(Base.identity, dtypes) + end + end + begin + if container !== nothing + desc["container"] = Base.String(container) + end + end + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function stage_size_eager(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + desc = tf.EagerOp("StageSize") + begin + end + begin + begin + if capacity !== nothing + desc["capacity"] = Base.Int(capacity) + end + end + begin + if memory_limit !== nothing + desc["memory_limit"] = Base.Int(memory_limit) + end + end + begin + if dtypes !== nothing + desc["dtypes"] = map(Base.identity, dtypes) + end + end + begin + if container !== nothing + desc["container"] = Base.String(container) + end + end + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(stage_size, [], name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function stage_size(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + if tf.in_eager_mode() + stage_size_eager(; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) + else + stage_size_graph(; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) + end end - if memory_limit !== nothing - desc["memory_limit"] = Base.Int(memory_limit) - end - if dtypes !== nothing - desc["dtypes"] = map(Base.identity, dtypes) - end - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - end - tf.Tensor(tf.Operation(desc)) end - function stage_size_eager(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) - desc = tf.EagerOp("StageSize") - if capacity !== nothing - desc["capacity"] = Base.Int(capacity) - end - if memory_limit !== nothing - desc["memory_limit"] = Base.Int(memory_limit) - end - if dtypes !== nothing - desc["dtypes"] = map(Base.identity, dtypes) - end - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - res = tf.execute(desc) - node = tf.TapeNode(stage_size, [], name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function stage_size(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) - if tf.in_eager_mode() - stage_size_eager(; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) - else - stage_size_graph(; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) - end - end end @@ -32382,51 +58766,95 @@ end """ begin - function empty_tensor_list_graph(element_shape_, max_num_elements_; name=nothing, element_dtype=nothing, shape_type=nothing) - local desc - tf.with_op_name(name, "EmptyTensorList") do - desc = tf.NodeDescription("EmptyTensorList") - element_shape_ = convert(Tensor{Any}, element_shape_) - max_num_elements_ = convert(Tensor{Int32}, max_num_elements_) - (element_shape_,) = tf.tf_promote(element_shape_) - tf.add_input(desc, element_shape_) - tf.add_input(desc, max_num_elements_) - if element_dtype !== nothing - desc["element_dtype"] = Base.identity(element_dtype) + begin + function empty_tensor_list_graph(element_shape_, max_num_elements_; name=nothing, element_dtype=nothing, shape_type=nothing) + local desc + tf.with_op_name(name, "EmptyTensorList") do + desc = tf.NodeDescription("EmptyTensorList") + begin + begin + element_shape_ = convert(Tensor{Any}, element_shape_) + begin + end + end + begin + max_num_elements_ = convert(Tensor{Int32}, max_num_elements_) + begin + end + end + begin + (element_shape_,) = tf.tf_promote(element_shape_) + end + end + begin + begin + tf.add_input(desc, element_shape_) + end + begin + tf.add_input(desc, max_num_elements_) + end + end + begin + begin + if element_dtype !== nothing + desc["element_dtype"] = Base.identity(element_dtype) + end + end + begin + if shape_type !== nothing + desc["shape_type"] = Base.identity(shape_type) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function empty_tensor_list_eager(element_shape_, max_num_elements_; name=nothing, element_dtype=nothing, shape_type=nothing) + desc = tf.EagerOp("EmptyTensorList") + element_shape_ = convert(tf.EagerTensor, element_shape_) + max_num_elements_ = convert(tf.EagerTensor, max_num_elements_) + begin + begin + tf.add_input(desc, element_shape_) + end + begin + tf.add_input(desc, max_num_elements_) + end + end + begin + begin + if element_dtype !== nothing + desc["element_dtype"] = Base.identity(element_dtype) + end + end + begin + if shape_type !== nothing + desc["shape_type"] = Base.identity(shape_type) + end + end + end + begin + desc["shape_type"] = tf.data_type(element_shape_) + end + res = tf.execute(desc) + node = tf.TapeNode(empty_tensor_list, [element_shape_, max_num_elements_], name=nothing, element_dtype=nothing, shape_type=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function empty_tensor_list(element_shape_, max_num_elements_; name=nothing, element_dtype=nothing, shape_type=nothing) + if tf.in_eager_mode() + empty_tensor_list_eager(element_shape_, max_num_elements_; name=name, element_dtype=element_dtype, shape_type=shape_type) + else + empty_tensor_list_graph(element_shape_, max_num_elements_; name=name, element_dtype=element_dtype, shape_type=shape_type) + end end - if shape_type !== nothing - desc["shape_type"] = Base.identity(shape_type) - end - end - tf.Tensor(tf.Operation(desc)) end - function empty_tensor_list_eager(element_shape_, max_num_elements_; name=nothing, element_dtype=nothing, shape_type=nothing) - desc = tf.EagerOp("EmptyTensorList") - element_shape_ = convert(tf.EagerTensor, element_shape_) - max_num_elements_ = convert(tf.EagerTensor, max_num_elements_) - tf.add_input(desc, element_shape_) - tf.add_input(desc, max_num_elements_) - if element_dtype !== nothing - desc["element_dtype"] = Base.identity(element_dtype) - end - if shape_type !== nothing - desc["shape_type"] = Base.identity(shape_type) - end - desc["shape_type"] = tf.data_type(element_shape_) - res = tf.execute(desc) - node = tf.TapeNode(empty_tensor_list, [element_shape_, max_num_elements_], name=nothing, element_dtype=nothing, shape_type=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function empty_tensor_list(element_shape_, max_num_elements_; name=nothing, element_dtype=nothing, shape_type=nothing) - if tf.in_eager_mode() - empty_tensor_list_eager(element_shape_, max_num_elements_; name=name, element_dtype=element_dtype, shape_type=shape_type) - else - empty_tensor_list_graph(element_shape_, max_num_elements_; name=name, element_dtype=element_dtype, shape_type=shape_type) - end - end end @@ -32436,46 +58864,80 @@ end """ begin - function lu_graph(input_; name=nothing, output_idx_type=nothing) - local desc - tf.with_op_name(name, "Lu") do - desc = tf.NodeDescription("Lu") - input_ = convert(Tensor{Any}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - if output_idx_type !== nothing - desc["output_idx_type"] = Base.identity(output_idx_type) + begin + function lu_graph(input_; name=nothing, output_idx_type=nothing) + local desc + tf.with_op_name(name, "Lu") do + desc = tf.NodeDescription("Lu") + begin + begin + input_ = convert(Tensor{Any}, input_) + begin + end + end + begin + (input_,) = tf.tf_promote(input_) + end + end + begin + begin + tf.add_input(desc, input_) + end + end + begin + begin + if output_idx_type !== nothing + desc["output_idx_type"] = Base.identity(output_idx_type) + end + end + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out end end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:2 - push!(out, tf.Tensor(op, out_idx)) - end - out end - function lu_eager(input_; name=nothing, output_idx_type=nothing) - desc = tf.EagerOp("Lu") - input_ = convert(tf.EagerTensor, input_) - tf.add_input(desc, input_) - if output_idx_type !== nothing - desc["output_idx_type"] = Base.identity(output_idx_type) - end - desc["T"] = tf.data_type(input_) - res = tf.execute(desc) - node = tf.TapeNode(lu, [input_], name=nothing, output_idx_type=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res + begin + function lu_eager(input_; name=nothing, output_idx_type=nothing) + desc = tf.EagerOp("Lu") + input_ = convert(tf.EagerTensor, input_) + begin + begin + tf.add_input(desc, input_) + end + end + begin + begin + if output_idx_type !== nothing + desc["output_idx_type"] = Base.identity(output_idx_type) + end + end + end + begin + desc["T"] = tf.data_type(input_) + end + res = tf.execute(desc) + node = tf.TapeNode(lu, [input_], name=nothing, output_idx_type=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function lu(input_; name=nothing, output_idx_type=nothing) - if tf.in_eager_mode() - lu_eager(input_; name=name, output_idx_type=output_idx_type) - else - lu_graph(input_; name=name, output_idx_type=output_idx_type) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function lu(input_; name=nothing, output_idx_type=nothing) + if tf.in_eager_mode() + lu_eager(input_; name=name, output_idx_type=output_idx_type) + else + lu_graph(input_; name=name, output_idx_type=output_idx_type) + end end - end + end end @@ -32485,39 +58947,67 @@ end """ begin - function decode_compressed_graph(bytes_; name=nothing, compression_type=nothing) - local desc - tf.with_op_name(name, "DecodeCompressed") do - desc = tf.NodeDescription("DecodeCompressed") - bytes_ = convert(Tensor{String}, bytes_) - tf.add_input(desc, bytes_) - if compression_type !== nothing - desc["compression_type"] = Base.String(compression_type) + begin + function decode_compressed_graph(bytes_; name=nothing, compression_type=nothing) + local desc + tf.with_op_name(name, "DecodeCompressed") do + desc = tf.NodeDescription("DecodeCompressed") + begin + begin + bytes_ = convert(Tensor{String}, bytes_) + begin + end + end + end + begin + begin + tf.add_input(desc, bytes_) + end + end + begin + begin + if compression_type !== nothing + desc["compression_type"] = Base.String(compression_type) + end + end + end end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function decode_compressed_eager(bytes_; name=nothing, compression_type=nothing) - desc = tf.EagerOp("DecodeCompressed") - bytes_ = convert(tf.EagerTensor, bytes_) - tf.add_input(desc, bytes_) - if compression_type !== nothing - desc["compression_type"] = Base.String(compression_type) - end - res = tf.execute(desc) - node = tf.TapeNode(decode_compressed, [bytes_], name=nothing, compression_type=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function decode_compressed_eager(bytes_; name=nothing, compression_type=nothing) + desc = tf.EagerOp("DecodeCompressed") + bytes_ = convert(tf.EagerTensor, bytes_) + begin + begin + tf.add_input(desc, bytes_) + end + end + begin + begin + if compression_type !== nothing + desc["compression_type"] = Base.String(compression_type) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(decode_compressed, [bytes_], name=nothing, compression_type=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function decode_compressed(bytes_; name=nothing, compression_type=nothing) - if tf.in_eager_mode() - decode_compressed_eager(bytes_; name=name, compression_type=compression_type) - else - decode_compressed_graph(bytes_; name=name, compression_type=compression_type) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function decode_compressed(bytes_; name=nothing, compression_type=nothing) + if tf.in_eager_mode() + decode_compressed_eager(bytes_; name=name, compression_type=compression_type) + else + decode_compressed_graph(bytes_; name=name, compression_type=compression_type) + end end - end + end end @@ -32527,39 +59017,67 @@ end """ begin - function get_session_tensor_graph(handle_; name=nothing, dtype=nothing) - local desc - tf.with_op_name(name, "GetSessionTensor") do - desc = tf.NodeDescription("GetSessionTensor") - handle_ = convert(Tensor{String}, handle_) - tf.add_input(desc, handle_) - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) + begin + function get_session_tensor_graph(handle_; name=nothing, dtype=nothing) + local desc + tf.with_op_name(name, "GetSessionTensor") do + desc = tf.NodeDescription("GetSessionTensor") + begin + begin + handle_ = convert(Tensor{String}, handle_) + begin + end + end + end + begin + begin + tf.add_input(desc, handle_) + end + end + begin + begin + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + end end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function get_session_tensor_eager(handle_; name=nothing, dtype=nothing) - desc = tf.EagerOp("GetSessionTensor") - handle_ = convert(tf.EagerTensor, handle_) - tf.add_input(desc, handle_) - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end - res = tf.execute(desc) - node = tf.TapeNode(get_session_tensor, [handle_], name=nothing, dtype=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function get_session_tensor_eager(handle_; name=nothing, dtype=nothing) + desc = tf.EagerOp("GetSessionTensor") + handle_ = convert(tf.EagerTensor, handle_) + begin + begin + tf.add_input(desc, handle_) + end + end + begin + begin + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(get_session_tensor, [handle_], name=nothing, dtype=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function get_session_tensor(handle_; name=nothing, dtype=nothing) - if tf.in_eager_mode() - get_session_tensor_eager(handle_; name=name, dtype=dtype) - else - get_session_tensor_graph(handle_; name=name, dtype=dtype) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function get_session_tensor(handle_; name=nothing, dtype=nothing) + if tf.in_eager_mode() + get_session_tensor_eager(handle_; name=name, dtype=dtype) + else + get_session_tensor_graph(handle_; name=name, dtype=dtype) + end end - end + end end @@ -32569,53 +59087,101 @@ end """ begin - function tensor_array_gather_v3_graph(handle_, indices_, flow_in_; name=nothing, dtype=nothing, element_shape=nothing) - local desc - tf.with_op_name(name, "TensorArrayGatherV3") do - desc = tf.NodeDescription("TensorArrayGatherV3") - handle_ = convert(Tensor{Any}, handle_) - indices_ = convert(Tensor{Int32}, indices_) - flow_in_ = convert(Tensor{Float32}, flow_in_) - tf.add_input(desc, handle_) - tf.add_input(desc, indices_) - tf.add_input(desc, flow_in_) - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end - if element_shape !== nothing - desc["element_shape"] = Base.identity(element_shape) + begin + function tensor_array_gather_v3_graph(handle_, indices_, flow_in_; name=nothing, dtype=nothing, element_shape=nothing) + local desc + tf.with_op_name(name, "TensorArrayGatherV3") do + desc = tf.NodeDescription("TensorArrayGatherV3") + begin + begin + handle_ = convert(Tensor{Any}, handle_) + begin + end + end + begin + indices_ = convert(Tensor{Int32}, indices_) + begin + end + end + begin + flow_in_ = convert(Tensor{Float32}, flow_in_) + begin + end + end + end + begin + begin + tf.add_input(desc, handle_) + end + begin + tf.add_input(desc, indices_) + end + begin + tf.add_input(desc, flow_in_) + end + end + begin + begin + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + begin + if element_shape !== nothing + desc["element_shape"] = Base.identity(element_shape) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function tensor_array_gather_v3_eager(handle_, indices_, flow_in_; name=nothing, dtype=nothing, element_shape=nothing) + desc = tf.EagerOp("TensorArrayGatherV3") + handle_ = convert(tf.EagerTensor, handle_) + indices_ = convert(tf.EagerTensor, indices_) + flow_in_ = convert(tf.EagerTensor, flow_in_) + begin + begin + tf.add_input(desc, handle_) + end + begin + tf.add_input(desc, indices_) + end + begin + tf.add_input(desc, flow_in_) + end + end + begin + begin + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + begin + if element_shape !== nothing + desc["element_shape"] = Base.identity(element_shape) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(tensor_array_gather_v3, [handle_, indices_, flow_in_], name=nothing, dtype=nothing, element_shape=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_array_gather_v3(handle_, indices_, flow_in_; name=nothing, dtype=nothing, element_shape=nothing) + if tf.in_eager_mode() + tensor_array_gather_v3_eager(handle_, indices_, flow_in_; name=name, dtype=dtype, element_shape=element_shape) + else + tensor_array_gather_v3_graph(handle_, indices_, flow_in_; name=name, dtype=dtype, element_shape=element_shape) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function tensor_array_gather_v3_eager(handle_, indices_, flow_in_; name=nothing, dtype=nothing, element_shape=nothing) - desc = tf.EagerOp("TensorArrayGatherV3") - handle_ = convert(tf.EagerTensor, handle_) - indices_ = convert(tf.EagerTensor, indices_) - flow_in_ = convert(tf.EagerTensor, flow_in_) - tf.add_input(desc, handle_) - tf.add_input(desc, indices_) - tf.add_input(desc, flow_in_) - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end - if element_shape !== nothing - desc["element_shape"] = Base.identity(element_shape) - end - res = tf.execute(desc) - node = tf.TapeNode(tensor_array_gather_v3, [handle_, indices_, flow_in_], name=nothing, dtype=nothing, element_shape=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_array_gather_v3(handle_, indices_, flow_in_; name=nothing, dtype=nothing, element_shape=nothing) - if tf.in_eager_mode() - tensor_array_gather_v3_eager(handle_, indices_, flow_in_; name=name, dtype=dtype, element_shape=element_shape) - else - tensor_array_gather_v3_graph(handle_, indices_, flow_in_; name=name, dtype=dtype, element_shape=element_shape) - end - end end @@ -32625,39 +59191,67 @@ end """ begin - function destroy_resource_op_graph(resource_; name=nothing, ignore_lookup_error=nothing) - local desc - tf.with_op_name(name, "DestroyResourceOp") do - desc = tf.NodeDescription("DestroyResourceOp") - resource_ = convert(Tensor{Any}, resource_) - tf.add_input(desc, resource_) - if ignore_lookup_error !== nothing - desc["ignore_lookup_error"] = Base.Bool(ignore_lookup_error) + begin + function destroy_resource_op_graph(resource_; name=nothing, ignore_lookup_error=nothing) + local desc + tf.with_op_name(name, "DestroyResourceOp") do + desc = tf.NodeDescription("DestroyResourceOp") + begin + begin + resource_ = convert(Tensor{Any}, resource_) + begin + end + end + end + begin + begin + tf.add_input(desc, resource_) + end + end + begin + begin + if ignore_lookup_error !== nothing + desc["ignore_lookup_error"] = Base.Bool(ignore_lookup_error) + end + end + end end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function destroy_resource_op_eager(resource_; name=nothing, ignore_lookup_error=nothing) - desc = tf.EagerOp("DestroyResourceOp") - resource_ = convert(tf.EagerTensor, resource_) - tf.add_input(desc, resource_) - if ignore_lookup_error !== nothing - desc["ignore_lookup_error"] = Base.Bool(ignore_lookup_error) - end - res = tf.execute(desc) - node = tf.TapeNode(destroy_resource_op, [resource_], name=nothing, ignore_lookup_error=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function destroy_resource_op_eager(resource_; name=nothing, ignore_lookup_error=nothing) + desc = tf.EagerOp("DestroyResourceOp") + resource_ = convert(tf.EagerTensor, resource_) + begin + begin + tf.add_input(desc, resource_) + end + end + begin + begin + if ignore_lookup_error !== nothing + desc["ignore_lookup_error"] = Base.Bool(ignore_lookup_error) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(destroy_resource_op, [resource_], name=nothing, ignore_lookup_error=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function destroy_resource_op(resource_; name=nothing, ignore_lookup_error=nothing) - if tf.in_eager_mode() - destroy_resource_op_eager(resource_; name=name, ignore_lookup_error=ignore_lookup_error) - else - destroy_resource_op_graph(resource_; name=name, ignore_lookup_error=ignore_lookup_error) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function destroy_resource_op(resource_; name=nothing, ignore_lookup_error=nothing) + if tf.in_eager_mode() + destroy_resource_op_eager(resource_; name=name, ignore_lookup_error=ignore_lookup_error) + else + destroy_resource_op_graph(resource_; name=name, ignore_lookup_error=ignore_lookup_error) + end end - end + end end @@ -32667,69 +59261,133 @@ end Load embedding parameters for a single table. """ begin - function load_tpu_embedding_ftrl_parameters_grad_accum_debug_graph(parameters_, accumulators_, linears_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - local desc - tf.with_op_name(name, "LoadTPUEmbeddingFTRLParametersGradAccumDebug") do - desc = tf.NodeDescription("LoadTPUEmbeddingFTRLParametersGradAccumDebug") - parameters_ = convert(Tensor{Float32}, parameters_) - accumulators_ = convert(Tensor{Float32}, accumulators_) - linears_ = convert(Tensor{Float32}, linears_) - gradient_accumulators_ = convert(Tensor{Float32}, gradient_accumulators_) - tf.add_input(desc, parameters_) - tf.add_input(desc, accumulators_) - tf.add_input(desc, linears_) - tf.add_input(desc, gradient_accumulators_) - if table_id !== nothing - desc["table_id"] = Base.Int(table_id) - end - if table_name !== nothing - desc["table_name"] = Base.String(table_name) - end - if num_shards !== nothing - desc["num_shards"] = Base.Int(num_shards) - end - if shard_id !== nothing - desc["shard_id"] = Base.Int(shard_id) + begin + function load_tpu_embedding_ftrl_parameters_grad_accum_debug_graph(parameters_, accumulators_, linears_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + local desc + tf.with_op_name(name, "LoadTPUEmbeddingFTRLParametersGradAccumDebug") do + desc = tf.NodeDescription("LoadTPUEmbeddingFTRLParametersGradAccumDebug") + begin + begin + parameters_ = convert(Tensor{Float32}, parameters_) + begin + end + end + begin + accumulators_ = convert(Tensor{Float32}, accumulators_) + begin + end + end + begin + linears_ = convert(Tensor{Float32}, linears_) + begin + end + end + begin + gradient_accumulators_ = convert(Tensor{Float32}, gradient_accumulators_) + begin + end + end + end + begin + begin + tf.add_input(desc, parameters_) + end + begin + tf.add_input(desc, accumulators_) + end + begin + tf.add_input(desc, linears_) + end + begin + tf.add_input(desc, gradient_accumulators_) + end + end + begin + begin + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + end + begin + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + end + begin + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + end + begin + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function load_tpu_embedding_ftrl_parameters_grad_accum_debug_eager(parameters_, accumulators_, linears_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + desc = tf.EagerOp("LoadTPUEmbeddingFTRLParametersGradAccumDebug") + parameters_ = convert(tf.EagerTensor, parameters_) + accumulators_ = convert(tf.EagerTensor, accumulators_) + linears_ = convert(tf.EagerTensor, linears_) + gradient_accumulators_ = convert(tf.EagerTensor, gradient_accumulators_) + begin + begin + tf.add_input(desc, parameters_) + end + begin + tf.add_input(desc, accumulators_) + end + begin + tf.add_input(desc, linears_) + end + begin + tf.add_input(desc, gradient_accumulators_) + end + end + begin + begin + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + end + begin + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + end + begin + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + end + begin + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(load_tpu_embedding_ftrl_parameters_grad_accum_debug, [parameters_, accumulators_, linears_, gradient_accumulators_], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function load_tpu_embedding_ftrl_parameters_grad_accum_debug(parameters_, accumulators_, linears_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + if tf.in_eager_mode() + load_tpu_embedding_ftrl_parameters_grad_accum_debug_eager(parameters_, accumulators_, linears_, gradient_accumulators_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + else + load_tpu_embedding_ftrl_parameters_grad_accum_debug_graph(parameters_, accumulators_, linears_, gradient_accumulators_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function load_tpu_embedding_ftrl_parameters_grad_accum_debug_eager(parameters_, accumulators_, linears_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - desc = tf.EagerOp("LoadTPUEmbeddingFTRLParametersGradAccumDebug") - parameters_ = convert(tf.EagerTensor, parameters_) - accumulators_ = convert(tf.EagerTensor, accumulators_) - linears_ = convert(tf.EagerTensor, linears_) - gradient_accumulators_ = convert(tf.EagerTensor, gradient_accumulators_) - tf.add_input(desc, parameters_) - tf.add_input(desc, accumulators_) - tf.add_input(desc, linears_) - tf.add_input(desc, gradient_accumulators_) - if table_id !== nothing - desc["table_id"] = Base.Int(table_id) - end - if table_name !== nothing - desc["table_name"] = Base.String(table_name) - end - if num_shards !== nothing - desc["num_shards"] = Base.Int(num_shards) - end - if shard_id !== nothing - desc["shard_id"] = Base.Int(shard_id) - end - res = tf.execute(desc) - node = tf.TapeNode(load_tpu_embedding_ftrl_parameters_grad_accum_debug, [parameters_, accumulators_, linears_, gradient_accumulators_], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function load_tpu_embedding_ftrl_parameters_grad_accum_debug(parameters_, accumulators_, linears_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - if tf.in_eager_mode() - load_tpu_embedding_ftrl_parameters_grad_accum_debug_eager(parameters_, accumulators_, linears_, gradient_accumulators_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) - else - load_tpu_embedding_ftrl_parameters_grad_accum_debug_graph(parameters_, accumulators_, linears_, gradient_accumulators_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) - end - end end @@ -32739,47 +59397,75 @@ end """ begin - function text_line_reader_graph(; name=nothing, skip_header_lines=nothing, container=nothing, shared_name=nothing) - local desc - tf.with_op_name(name, "TextLineReader") do - desc = tf.NodeDescription("TextLineReader") - if skip_header_lines !== nothing - desc["skip_header_lines"] = Base.Int(skip_header_lines) - end - if container !== nothing - desc["container"] = Base.String(container) + begin + function text_line_reader_graph(; name=nothing, skip_header_lines=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "TextLineReader") do + desc = tf.NodeDescription("TextLineReader") + begin + end + begin + end + begin + begin + if skip_header_lines !== nothing + desc["skip_header_lines"] = Base.Int(skip_header_lines) + end + end + begin + if container !== nothing + desc["container"] = Base.String(container) + end + end + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function text_line_reader_eager(; name=nothing, skip_header_lines=nothing, container=nothing, shared_name=nothing) + desc = tf.EagerOp("TextLineReader") + begin + end + begin + begin + if skip_header_lines !== nothing + desc["skip_header_lines"] = Base.Int(skip_header_lines) + end + end + begin + if container !== nothing + desc["container"] = Base.String(container) + end + end + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(text_line_reader, [], name=nothing, skip_header_lines=nothing, container=nothing, shared_name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function text_line_reader(; name=nothing, skip_header_lines=nothing, container=nothing, shared_name=nothing) + if tf.in_eager_mode() + text_line_reader_eager(; name=name, skip_header_lines=skip_header_lines, container=container, shared_name=shared_name) + else + text_line_reader_graph(; name=name, skip_header_lines=skip_header_lines, container=container, shared_name=shared_name) + end end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - end - tf.Tensor(tf.Operation(desc)) end - function text_line_reader_eager(; name=nothing, skip_header_lines=nothing, container=nothing, shared_name=nothing) - desc = tf.EagerOp("TextLineReader") - if skip_header_lines !== nothing - desc["skip_header_lines"] = Base.Int(skip_header_lines) - end - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - res = tf.execute(desc) - node = tf.TapeNode(text_line_reader, [], name=nothing, skip_header_lines=nothing, container=nothing, shared_name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function text_line_reader(; name=nothing, skip_header_lines=nothing, container=nothing, shared_name=nothing) - if tf.in_eager_mode() - text_line_reader_eager(; name=name, skip_header_lines=skip_header_lines, container=container, shared_name=shared_name) - else - text_line_reader_graph(; name=name, skip_header_lines=skip_header_lines, container=container, shared_name=shared_name) - end - end end @@ -32789,49 +59475,105 @@ end """ begin - function create_summary_db_writer_graph(writer_, db_uri_, experiment_name_, run_name_, user_name_; name=nothing) - local desc - tf.with_op_name(name, "CreateSummaryDbWriter") do - desc = tf.NodeDescription("CreateSummaryDbWriter") - writer_ = convert(Tensor{Any}, writer_) - db_uri_ = convert(Tensor{String}, db_uri_) - experiment_name_ = convert(Tensor{String}, experiment_name_) - run_name_ = convert(Tensor{String}, run_name_) - user_name_ = convert(Tensor{String}, user_name_) - tf.add_input(desc, writer_) - tf.add_input(desc, db_uri_) - tf.add_input(desc, experiment_name_) - tf.add_input(desc, run_name_) - tf.add_input(desc, user_name_) - end - tf.Tensor(tf.Operation(desc)) - end - function create_summary_db_writer_eager(writer_, db_uri_, experiment_name_, run_name_, user_name_; name=nothing) - desc = tf.EagerOp("CreateSummaryDbWriter") - writer_ = convert(tf.EagerTensor, writer_) - db_uri_ = convert(tf.EagerTensor, db_uri_) - experiment_name_ = convert(tf.EagerTensor, experiment_name_) - run_name_ = convert(tf.EagerTensor, run_name_) - user_name_ = convert(tf.EagerTensor, user_name_) - tf.add_input(desc, writer_) - tf.add_input(desc, db_uri_) - tf.add_input(desc, experiment_name_) - tf.add_input(desc, run_name_) - tf.add_input(desc, user_name_) - res = tf.execute(desc) - node = tf.TapeNode(create_summary_db_writer, [writer_, db_uri_, experiment_name_, run_name_, user_name_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function create_summary_db_writer(writer_, db_uri_, experiment_name_, run_name_, user_name_; name=nothing) - if tf.in_eager_mode() - create_summary_db_writer_eager(writer_, db_uri_, experiment_name_, run_name_, user_name_; name=name) - else - create_summary_db_writer_graph(writer_, db_uri_, experiment_name_, run_name_, user_name_; name=name) + begin + function create_summary_db_writer_graph(writer_, db_uri_, experiment_name_, run_name_, user_name_; name=nothing) + local desc + tf.with_op_name(name, "CreateSummaryDbWriter") do + desc = tf.NodeDescription("CreateSummaryDbWriter") + begin + begin + writer_ = convert(Tensor{Any}, writer_) + begin + end + end + begin + db_uri_ = convert(Tensor{String}, db_uri_) + begin + end + end + begin + experiment_name_ = convert(Tensor{String}, experiment_name_) + begin + end + end + begin + run_name_ = convert(Tensor{String}, run_name_) + begin + end + end + begin + user_name_ = convert(Tensor{String}, user_name_) + begin + end + end + end + begin + begin + tf.add_input(desc, writer_) + end + begin + tf.add_input(desc, db_uri_) + end + begin + tf.add_input(desc, experiment_name_) + end + begin + tf.add_input(desc, run_name_) + end + begin + tf.add_input(desc, user_name_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function create_summary_db_writer_eager(writer_, db_uri_, experiment_name_, run_name_, user_name_; name=nothing) + desc = tf.EagerOp("CreateSummaryDbWriter") + writer_ = convert(tf.EagerTensor, writer_) + db_uri_ = convert(tf.EagerTensor, db_uri_) + experiment_name_ = convert(tf.EagerTensor, experiment_name_) + run_name_ = convert(tf.EagerTensor, run_name_) + user_name_ = convert(tf.EagerTensor, user_name_) + begin + begin + tf.add_input(desc, writer_) + end + begin + tf.add_input(desc, db_uri_) + end + begin + tf.add_input(desc, experiment_name_) + end + begin + tf.add_input(desc, run_name_) + end + begin + tf.add_input(desc, user_name_) + end + end + begin + end + res = tf.execute(desc) + node = tf.TapeNode(create_summary_db_writer, [writer_, db_uri_, experiment_name_, run_name_, user_name_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function create_summary_db_writer(writer_, db_uri_, experiment_name_, run_name_, user_name_; name=nothing) + if tf.in_eager_mode() + create_summary_db_writer_eager(writer_, db_uri_, experiment_name_, run_name_, user_name_; name=name) + else + create_summary_db_writer_graph(writer_, db_uri_, experiment_name_, run_name_, user_name_; name=name) + end end - end + end end @@ -32841,40 +59583,78 @@ end """ begin - function tanh_grad_graph(y_, dy_; name=nothing) - local desc - tf.with_op_name(name, "TanhGrad") do - desc = tf.NodeDescription("TanhGrad") - y_ = convert(Tensor{Any}, y_) - dy_ = convert(Tensor{Any}, dy_) - (y_, dy_) = tf.tf_promote(y_, dy_) - tf.add_input(desc, y_) - tf.add_input(desc, dy_) + begin + function tanh_grad_graph(y_, dy_; name=nothing) + local desc + tf.with_op_name(name, "TanhGrad") do + desc = tf.NodeDescription("TanhGrad") + begin + begin + y_ = convert(Tensor{Any}, y_) + begin + end + end + begin + dy_ = convert(Tensor{Any}, dy_) + begin + end + end + begin + (y_, dy_) = tf.tf_promote(y_, dy_) + end + end + begin + begin + tf.add_input(desc, y_) + end + begin + tf.add_input(desc, dy_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function tanh_grad_eager(y_, dy_; name=nothing) - desc = tf.EagerOp("TanhGrad") - y_ = convert(tf.EagerTensor, y_) - dy_ = convert(tf.EagerTensor, dy_) - tf.add_input(desc, y_) - tf.add_input(desc, dy_) - desc["T"] = tf.data_type(y_) - desc["T"] = tf.data_type(dy_) - res = tf.execute(desc) - node = tf.TapeNode(tanh_grad, [y_, dy_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function tanh_grad_eager(y_, dy_; name=nothing) + desc = tf.EagerOp("TanhGrad") + y_ = convert(tf.EagerTensor, y_) + dy_ = convert(tf.EagerTensor, dy_) + begin + begin + tf.add_input(desc, y_) + end + begin + tf.add_input(desc, dy_) + end + end + begin + end + begin + desc["T"] = tf.data_type(y_) + end + begin + desc["T"] = tf.data_type(dy_) + end + res = tf.execute(desc) + node = tf.TapeNode(tanh_grad, [y_, dy_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tanh_grad(y_, dy_; name=nothing) - if tf.in_eager_mode() - tanh_grad_eager(y_, dy_; name=name) - else - tanh_grad_graph(y_, dy_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tanh_grad(y_, dy_; name=nothing) + if tf.in_eager_mode() + tanh_grad_eager(y_, dy_; name=name) + else + tanh_grad_graph(y_, dy_; name=name) + end end - end + end end @@ -32884,33 +59664,57 @@ end """ begin - function decode_base64_graph(input_; name=nothing) - local desc - tf.with_op_name(name, "DecodeBase64") do - desc = tf.NodeDescription("DecodeBase64") - input_ = convert(Tensor{String}, input_) - tf.add_input(desc, input_) + begin + function decode_base64_graph(input_; name=nothing) + local desc + tf.with_op_name(name, "DecodeBase64") do + desc = tf.NodeDescription("DecodeBase64") + begin + begin + input_ = convert(Tensor{String}, input_) + begin + end + end + end + begin + begin + tf.add_input(desc, input_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function decode_base64_eager(input_; name=nothing) - desc = tf.EagerOp("DecodeBase64") - input_ = convert(tf.EagerTensor, input_) - tf.add_input(desc, input_) - res = tf.execute(desc) - node = tf.TapeNode(decode_base64, [input_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function decode_base64_eager(input_; name=nothing) + desc = tf.EagerOp("DecodeBase64") + input_ = convert(tf.EagerTensor, input_) + begin + begin + tf.add_input(desc, input_) + end + end + begin + end + res = tf.execute(desc) + node = tf.TapeNode(decode_base64, [input_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function decode_base64(input_; name=nothing) - if tf.in_eager_mode() - decode_base64_eager(input_; name=name) - else - decode_base64_graph(input_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function decode_base64(input_; name=nothing) + if tf.in_eager_mode() + decode_base64_eager(input_; name=name) + else + decode_base64_graph(input_; name=name) + end end - end + end end @@ -32920,65 +59724,137 @@ end """ begin - function max_pool_grad_grad_v2_graph(orig_input_, orig_output_, grad_, ksize_, strides_; name=nothing, padding=nothing, data_format=nothing) - local desc - tf.with_op_name(name, "MaxPoolGradGradV2") do - desc = tf.NodeDescription("MaxPoolGradGradV2") - orig_input_ = convert(Tensor{Any}, orig_input_) - orig_output_ = convert(Tensor{Any}, orig_output_) - grad_ = convert(Tensor{Any}, grad_) - ksize_ = convert(Tensor{Int32}, ksize_) - strides_ = convert(Tensor{Int32}, strides_) - (orig_input_, orig_output_, grad_) = tf.tf_promote(orig_input_, orig_output_, grad_) - tf.add_input(desc, orig_input_) - tf.add_input(desc, orig_output_) - tf.add_input(desc, grad_) - tf.add_input(desc, ksize_) - tf.add_input(desc, strides_) - if padding !== nothing - desc["padding"] = Base.String(padding) - end - if data_format !== nothing - desc["data_format"] = Base.String(data_format) + begin + function max_pool_grad_grad_v2_graph(orig_input_, orig_output_, grad_, ksize_, strides_; name=nothing, padding=nothing, data_format=nothing) + local desc + tf.with_op_name(name, "MaxPoolGradGradV2") do + desc = tf.NodeDescription("MaxPoolGradGradV2") + begin + begin + orig_input_ = convert(Tensor{Any}, orig_input_) + begin + end + end + begin + orig_output_ = convert(Tensor{Any}, orig_output_) + begin + end + end + begin + grad_ = convert(Tensor{Any}, grad_) + begin + end + end + begin + ksize_ = convert(Tensor{Int32}, ksize_) + begin + end + end + begin + strides_ = convert(Tensor{Int32}, strides_) + begin + end + end + begin + (orig_input_, orig_output_, grad_) = tf.tf_promote(orig_input_, orig_output_, grad_) + end + end + begin + begin + tf.add_input(desc, orig_input_) + end + begin + tf.add_input(desc, orig_output_) + end + begin + tf.add_input(desc, grad_) + end + begin + tf.add_input(desc, ksize_) + end + begin + tf.add_input(desc, strides_) + end + end + begin + begin + if padding !== nothing + desc["padding"] = Base.String(padding) + end + end + begin + if data_format !== nothing + desc["data_format"] = Base.String(data_format) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function max_pool_grad_grad_v2_eager(orig_input_, orig_output_, grad_, ksize_, strides_; name=nothing, padding=nothing, data_format=nothing) + desc = tf.EagerOp("MaxPoolGradGradV2") + orig_input_ = convert(tf.EagerTensor, orig_input_) + orig_output_ = convert(tf.EagerTensor, orig_output_) + grad_ = convert(tf.EagerTensor, grad_) + ksize_ = convert(tf.EagerTensor, ksize_) + strides_ = convert(tf.EagerTensor, strides_) + begin + begin + tf.add_input(desc, orig_input_) + end + begin + tf.add_input(desc, orig_output_) + end + begin + tf.add_input(desc, grad_) + end + begin + tf.add_input(desc, ksize_) + end + begin + tf.add_input(desc, strides_) + end + end + begin + begin + if padding !== nothing + desc["padding"] = Base.String(padding) + end + end + begin + if data_format !== nothing + desc["data_format"] = Base.String(data_format) + end + end + end + begin + desc["T"] = tf.data_type(orig_input_) + end + begin + desc["T"] = tf.data_type(orig_output_) + end + begin + desc["T"] = tf.data_type(grad_) + end + res = tf.execute(desc) + node = tf.TapeNode(max_pool_grad_grad_v2, [orig_input_, orig_output_, grad_, ksize_, strides_], name=nothing, padding=nothing, data_format=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function max_pool_grad_grad_v2(orig_input_, orig_output_, grad_, ksize_, strides_; name=nothing, padding=nothing, data_format=nothing) + if tf.in_eager_mode() + max_pool_grad_grad_v2_eager(orig_input_, orig_output_, grad_, ksize_, strides_; name=name, padding=padding, data_format=data_format) + else + max_pool_grad_grad_v2_graph(orig_input_, orig_output_, grad_, ksize_, strides_; name=name, padding=padding, data_format=data_format) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function max_pool_grad_grad_v2_eager(orig_input_, orig_output_, grad_, ksize_, strides_; name=nothing, padding=nothing, data_format=nothing) - desc = tf.EagerOp("MaxPoolGradGradV2") - orig_input_ = convert(tf.EagerTensor, orig_input_) - orig_output_ = convert(tf.EagerTensor, orig_output_) - grad_ = convert(tf.EagerTensor, grad_) - ksize_ = convert(tf.EagerTensor, ksize_) - strides_ = convert(tf.EagerTensor, strides_) - tf.add_input(desc, orig_input_) - tf.add_input(desc, orig_output_) - tf.add_input(desc, grad_) - tf.add_input(desc, ksize_) - tf.add_input(desc, strides_) - if padding !== nothing - desc["padding"] = Base.String(padding) - end - if data_format !== nothing - desc["data_format"] = Base.String(data_format) - end - desc["T"] = tf.data_type(orig_input_) - desc["T"] = tf.data_type(orig_output_) - desc["T"] = tf.data_type(grad_) - res = tf.execute(desc) - node = tf.TapeNode(max_pool_grad_grad_v2, [orig_input_, orig_output_, grad_, ksize_, strides_], name=nothing, padding=nothing, data_format=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function max_pool_grad_grad_v2(orig_input_, orig_output_, grad_, ksize_, strides_; name=nothing, padding=nothing, data_format=nothing) - if tf.in_eager_mode() - max_pool_grad_grad_v2_eager(orig_input_, orig_output_, grad_, ksize_, strides_; name=name, padding=padding, data_format=data_format) - else - max_pool_grad_grad_v2_graph(orig_input_, orig_output_, grad_, ksize_, strides_; name=name, padding=padding, data_format=data_format) - end - end end @@ -32988,47 +59864,91 @@ end """ begin - function audio_summary_v2_graph(tag_, tensor_, sample_rate_; name=nothing, max_outputs=nothing) - local desc - tf.with_op_name(name, "AudioSummaryV2") do - desc = tf.NodeDescription("AudioSummaryV2") - tag_ = convert(Tensor{String}, tag_) - tensor_ = convert(Tensor{Float32}, tensor_) - sample_rate_ = convert(Tensor{Float32}, sample_rate_) - tf.add_input(desc, tag_) - tf.add_input(desc, tensor_) - tf.add_input(desc, sample_rate_) - if max_outputs !== nothing - desc["max_outputs"] = Base.Int(max_outputs) + begin + function audio_summary_v2_graph(tag_, tensor_, sample_rate_; name=nothing, max_outputs=nothing) + local desc + tf.with_op_name(name, "AudioSummaryV2") do + desc = tf.NodeDescription("AudioSummaryV2") + begin + begin + tag_ = convert(Tensor{String}, tag_) + begin + end + end + begin + tensor_ = convert(Tensor{Float32}, tensor_) + begin + end + end + begin + sample_rate_ = convert(Tensor{Float32}, sample_rate_) + begin + end + end + end + begin + begin + tf.add_input(desc, tag_) + end + begin + tf.add_input(desc, tensor_) + end + begin + tf.add_input(desc, sample_rate_) + end + end + begin + begin + if max_outputs !== nothing + desc["max_outputs"] = Base.Int(max_outputs) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function audio_summary_v2_eager(tag_, tensor_, sample_rate_; name=nothing, max_outputs=nothing) + desc = tf.EagerOp("AudioSummaryV2") + tag_ = convert(tf.EagerTensor, tag_) + tensor_ = convert(tf.EagerTensor, tensor_) + sample_rate_ = convert(tf.EagerTensor, sample_rate_) + begin + begin + tf.add_input(desc, tag_) + end + begin + tf.add_input(desc, tensor_) + end + begin + tf.add_input(desc, sample_rate_) + end + end + begin + begin + if max_outputs !== nothing + desc["max_outputs"] = Base.Int(max_outputs) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(audio_summary_v2, [tag_, tensor_, sample_rate_], name=nothing, max_outputs=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function audio_summary_v2(tag_, tensor_, sample_rate_; name=nothing, max_outputs=nothing) + if tf.in_eager_mode() + audio_summary_v2_eager(tag_, tensor_, sample_rate_; name=name, max_outputs=max_outputs) + else + audio_summary_v2_graph(tag_, tensor_, sample_rate_; name=name, max_outputs=max_outputs) + end end - end - tf.Tensor(tf.Operation(desc)) end - function audio_summary_v2_eager(tag_, tensor_, sample_rate_; name=nothing, max_outputs=nothing) - desc = tf.EagerOp("AudioSummaryV2") - tag_ = convert(tf.EagerTensor, tag_) - tensor_ = convert(tf.EagerTensor, tensor_) - sample_rate_ = convert(tf.EagerTensor, sample_rate_) - tf.add_input(desc, tag_) - tf.add_input(desc, tensor_) - tf.add_input(desc, sample_rate_) - if max_outputs !== nothing - desc["max_outputs"] = Base.Int(max_outputs) - end - res = tf.execute(desc) - node = tf.TapeNode(audio_summary_v2, [tag_, tensor_, sample_rate_], name=nothing, max_outputs=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function audio_summary_v2(tag_, tensor_, sample_rate_; name=nothing, max_outputs=nothing) - if tf.in_eager_mode() - audio_summary_v2_eager(tag_, tensor_, sample_rate_; name=name, max_outputs=max_outputs) - else - audio_summary_v2_graph(tag_, tensor_, sample_rate_; name=name, max_outputs=max_outputs) - end - end end @@ -33038,69 +59958,117 @@ end """ begin - function stateful_partitioned_call_graph(args_; name=nothing, Tin=nothing, Tout=nothing, f=nothing, config=nothing, config_proto=nothing, executor_type=nothing) - local desc - tf.with_op_name(name, "StatefulPartitionedCall") do - desc = tf.NodeDescription("StatefulPartitionedCall") - args_ = [convert(Tensor{Any}, x) for x = args_] - tf.add_input(desc, args_) - if Tin !== nothing - desc["Tin"] = map(Base.identity, Tin) - end - if Tout !== nothing - desc["Tout"] = map(Base.identity, Tout) - end - if f !== nothing - desc["f"] = Base.identity(f) - end - if config !== nothing - desc["config"] = Base.String(config) + begin + function stateful_partitioned_call_graph(args_; name=nothing, Tin=nothing, Tout=nothing, f=nothing, config=nothing, config_proto=nothing, executor_type=nothing) + local desc + tf.with_op_name(name, "StatefulPartitionedCall") do + desc = tf.NodeDescription("StatefulPartitionedCall") + begin + begin + args_ = [convert(Tensor{Any}, x) for x = args_] + begin + end + end + end + begin + begin + tf.add_input(desc, args_) + end + end + begin + begin + if Tin !== nothing + desc["Tin"] = map(Base.identity, Tin) + end + end + begin + if Tout !== nothing + desc["Tout"] = map(Base.identity, Tout) + end + end + begin + if f !== nothing + desc["f"] = Base.identity(f) + end + end + begin + if config !== nothing + desc["config"] = Base.String(config) + end + end + begin + if config_proto !== nothing + desc["config_proto"] = Base.String(config_proto) + end + end + begin + if executor_type !== nothing + desc["executor_type"] = Base.String(executor_type) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function stateful_partitioned_call_eager(args_; name=nothing, Tin=nothing, Tout=nothing, f=nothing, config=nothing, config_proto=nothing, executor_type=nothing) + desc = tf.EagerOp("StatefulPartitionedCall") + args_ = convert(tf.EagerTensor, args_) + begin + begin + tf.add_input(desc, args_) + end + end + begin + begin + if Tin !== nothing + desc["Tin"] = map(Base.identity, Tin) + end + end + begin + if Tout !== nothing + desc["Tout"] = map(Base.identity, Tout) + end + end + begin + if f !== nothing + desc["f"] = Base.identity(f) + end + end + begin + if config !== nothing + desc["config"] = Base.String(config) + end + end + begin + if config_proto !== nothing + desc["config_proto"] = Base.String(config_proto) + end + end + begin + if executor_type !== nothing + desc["executor_type"] = Base.String(executor_type) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(stateful_partitioned_call, [args_], name=nothing, Tin=nothing, Tout=nothing, f=nothing, config=nothing, config_proto=nothing, executor_type=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function stateful_partitioned_call(args_; name=nothing, Tin=nothing, Tout=nothing, f=nothing, config=nothing, config_proto=nothing, executor_type=nothing) + if tf.in_eager_mode() + stateful_partitioned_call_eager(args_; name=name, Tin=Tin, Tout=Tout, f=f, config=config, config_proto=config_proto, executor_type=executor_type) + else + stateful_partitioned_call_graph(args_; name=name, Tin=Tin, Tout=Tout, f=f, config=config, config_proto=config_proto, executor_type=executor_type) + end end - if config_proto !== nothing - desc["config_proto"] = Base.String(config_proto) - end - if executor_type !== nothing - desc["executor_type"] = Base.String(executor_type) - end - end - tf.Tensor(tf.Operation(desc)) - end - function stateful_partitioned_call_eager(args_; name=nothing, Tin=nothing, Tout=nothing, f=nothing, config=nothing, config_proto=nothing, executor_type=nothing) - desc = tf.EagerOp("StatefulPartitionedCall") - args_ = convert(tf.EagerTensor, args_) - tf.add_input(desc, args_) - if Tin !== nothing - desc["Tin"] = map(Base.identity, Tin) - end - if Tout !== nothing - desc["Tout"] = map(Base.identity, Tout) - end - if f !== nothing - desc["f"] = Base.identity(f) - end - if config !== nothing - desc["config"] = Base.String(config) - end - if config_proto !== nothing - desc["config_proto"] = Base.String(config_proto) - end - if executor_type !== nothing - desc["executor_type"] = Base.String(executor_type) - end - res = tf.execute(desc) - node = tf.TapeNode(stateful_partitioned_call, [args_], name=nothing, Tin=nothing, Tout=nothing, f=nothing, config=nothing, config_proto=nothing, executor_type=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function stateful_partitioned_call(args_; name=nothing, Tin=nothing, Tout=nothing, f=nothing, config=nothing, config_proto=nothing, executor_type=nothing) - if tf.in_eager_mode() - stateful_partitioned_call_eager(args_; name=name, Tin=Tin, Tout=Tout, f=f, config=config, config_proto=config_proto, executor_type=executor_type) - else - stateful_partitioned_call_graph(args_; name=name, Tin=Tin, Tout=Tout, f=f, config=config, config_proto=config_proto, executor_type=executor_type) - end - end end @@ -33110,70 +60078,128 @@ end Acts like a Concat Op that merges multple tensors into one, however it must """ begin - function _scoped_allocator_concat_graph(backing_, inputs_; name=nothing, shape=nothing, reshape=nothing, sa_name=nothing, id=nothing, N=nothing) - local desc - tf.with_op_name(name, "_ScopedAllocatorConcat") do - desc = tf.NodeDescription("_ScopedAllocatorConcat") - backing_ = convert(Tensor{Any}, backing_) - inputs_ = [convert(Tensor{Any}, x) for x = inputs_] - (backing_, inputs_) = tf.tf_promote(backing_, inputs_) - tf.add_input(desc, backing_) - tf.add_input(desc, inputs_) - if shape !== nothing - desc["shape"] = Base.identity(shape) - end - if reshape !== nothing - desc["reshape"] = Base.Bool(reshape) - end - if sa_name !== nothing - desc["sa_name"] = Base.String(sa_name) - end - if id !== nothing - desc["id"] = Base.Int(id) + begin + function _scoped_allocator_concat_graph(backing_, inputs_; name=nothing, shape=nothing, reshape=nothing, sa_name=nothing, id=nothing, N=nothing) + local desc + tf.with_op_name(name, "_ScopedAllocatorConcat") do + desc = tf.NodeDescription("_ScopedAllocatorConcat") + begin + begin + backing_ = convert(Tensor{Any}, backing_) + begin + end + end + begin + inputs_ = [convert(Tensor{Any}, x) for x = inputs_] + begin + end + end + begin + (backing_, inputs_) = tf.tf_promote(backing_, inputs_) + end + end + begin + begin + tf.add_input(desc, backing_) + end + begin + tf.add_input(desc, inputs_) + end + end + begin + begin + if shape !== nothing + desc["shape"] = Base.identity(shape) + end + end + begin + if reshape !== nothing + desc["reshape"] = Base.Bool(reshape) + end + end + begin + if sa_name !== nothing + desc["sa_name"] = Base.String(sa_name) + end + end + begin + if id !== nothing + desc["id"] = Base.Int(id) + end + end + begin + if N !== nothing + desc["N"] = Base.Int(N) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function _scoped_allocator_concat_eager(backing_, inputs_; name=nothing, shape=nothing, reshape=nothing, sa_name=nothing, id=nothing, N=nothing) + desc = tf.EagerOp("_ScopedAllocatorConcat") + backing_ = convert(tf.EagerTensor, backing_) + inputs_ = convert(tf.EagerTensor, inputs_) + begin + begin + tf.add_input(desc, backing_) + end + begin + tf.add_input(desc, inputs_) + end + end + begin + begin + if shape !== nothing + desc["shape"] = Base.identity(shape) + end + end + begin + if reshape !== nothing + desc["reshape"] = Base.Bool(reshape) + end + end + begin + if sa_name !== nothing + desc["sa_name"] = Base.String(sa_name) + end + end + begin + if id !== nothing + desc["id"] = Base.Int(id) + end + end + begin + if N !== nothing + desc["N"] = Base.Int(N) + end + end + end + begin + desc["T"] = tf.data_type(backing_) + end + begin + desc["T"] = tf.data_type(inputs_) + end + res = tf.execute(desc) + node = tf.TapeNode(_scoped_allocator_concat, [backing_, inputs_], name=nothing, shape=nothing, reshape=nothing, sa_name=nothing, id=nothing, N=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _scoped_allocator_concat(backing_, inputs_; name=nothing, shape=nothing, reshape=nothing, sa_name=nothing, id=nothing, N=nothing) + if tf.in_eager_mode() + _scoped_allocator_concat_eager(backing_, inputs_; name=name, shape=shape, reshape=reshape, sa_name=sa_name, id=id, N=N) + else + _scoped_allocator_concat_graph(backing_, inputs_; name=name, shape=shape, reshape=reshape, sa_name=sa_name, id=id, N=N) + end end - if N !== nothing - desc["N"] = Base.Int(N) - end - end - tf.Tensor(tf.Operation(desc)) - end - function _scoped_allocator_concat_eager(backing_, inputs_; name=nothing, shape=nothing, reshape=nothing, sa_name=nothing, id=nothing, N=nothing) - desc = tf.EagerOp("_ScopedAllocatorConcat") - backing_ = convert(tf.EagerTensor, backing_) - inputs_ = convert(tf.EagerTensor, inputs_) - tf.add_input(desc, backing_) - tf.add_input(desc, inputs_) - if shape !== nothing - desc["shape"] = Base.identity(shape) - end - if reshape !== nothing - desc["reshape"] = Base.Bool(reshape) - end - if sa_name !== nothing - desc["sa_name"] = Base.String(sa_name) - end - if id !== nothing - desc["id"] = Base.Int(id) - end - if N !== nothing - desc["N"] = Base.Int(N) - end - desc["T"] = tf.data_type(backing_) - desc["T"] = tf.data_type(inputs_) - res = tf.execute(desc) - node = tf.TapeNode(_scoped_allocator_concat, [backing_, inputs_], name=nothing, shape=nothing, reshape=nothing, sa_name=nothing, id=nothing, N=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _scoped_allocator_concat(backing_, inputs_; name=nothing, shape=nothing, reshape=nothing, sa_name=nothing, id=nothing, N=nothing) - if tf.in_eager_mode() - _scoped_allocator_concat_eager(backing_, inputs_; name=name, shape=shape, reshape=reshape, sa_name=sa_name, id=id, N=N) - else - _scoped_allocator_concat_graph(backing_, inputs_; name=name, shape=shape, reshape=reshape, sa_name=sa_name, id=id, N=N) - end - end end @@ -33183,61 +60209,109 @@ end """ begin - function fake_quant_with_min_max_args_gradient_graph(gradients_, inputs_; name=nothing, min=nothing, max=nothing, num_bits=nothing, narrow_range=nothing) - local desc - tf.with_op_name(name, "FakeQuantWithMinMaxArgsGradient") do - desc = tf.NodeDescription("FakeQuantWithMinMaxArgsGradient") - gradients_ = convert(Tensor{Float32}, gradients_) - inputs_ = convert(Tensor{Float32}, inputs_) - tf.add_input(desc, gradients_) - tf.add_input(desc, inputs_) - if min !== nothing - desc["min"] = Base.identity(min) - end - if max !== nothing - desc["max"] = Base.identity(max) + begin + function fake_quant_with_min_max_args_gradient_graph(gradients_, inputs_; name=nothing, min=nothing, max=nothing, num_bits=nothing, narrow_range=nothing) + local desc + tf.with_op_name(name, "FakeQuantWithMinMaxArgsGradient") do + desc = tf.NodeDescription("FakeQuantWithMinMaxArgsGradient") + begin + begin + gradients_ = convert(Tensor{Float32}, gradients_) + begin + end + end + begin + inputs_ = convert(Tensor{Float32}, inputs_) + begin + end + end + end + begin + begin + tf.add_input(desc, gradients_) + end + begin + tf.add_input(desc, inputs_) + end + end + begin + begin + if min !== nothing + desc["min"] = Base.identity(min) + end + end + begin + if max !== nothing + desc["max"] = Base.identity(max) + end + end + begin + if num_bits !== nothing + desc["num_bits"] = Base.Int(num_bits) + end + end + begin + if narrow_range !== nothing + desc["narrow_range"] = Base.Bool(narrow_range) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function fake_quant_with_min_max_args_gradient_eager(gradients_, inputs_; name=nothing, min=nothing, max=nothing, num_bits=nothing, narrow_range=nothing) + desc = tf.EagerOp("FakeQuantWithMinMaxArgsGradient") + gradients_ = convert(tf.EagerTensor, gradients_) + inputs_ = convert(tf.EagerTensor, inputs_) + begin + begin + tf.add_input(desc, gradients_) + end + begin + tf.add_input(desc, inputs_) + end + end + begin + begin + if min !== nothing + desc["min"] = Base.identity(min) + end + end + begin + if max !== nothing + desc["max"] = Base.identity(max) + end + end + begin + if num_bits !== nothing + desc["num_bits"] = Base.Int(num_bits) + end + end + begin + if narrow_range !== nothing + desc["narrow_range"] = Base.Bool(narrow_range) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(fake_quant_with_min_max_args_gradient, [gradients_, inputs_], name=nothing, min=nothing, max=nothing, num_bits=nothing, narrow_range=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function fake_quant_with_min_max_args_gradient(gradients_, inputs_; name=nothing, min=nothing, max=nothing, num_bits=nothing, narrow_range=nothing) + if tf.in_eager_mode() + fake_quant_with_min_max_args_gradient_eager(gradients_, inputs_; name=name, min=min, max=max, num_bits=num_bits, narrow_range=narrow_range) + else + fake_quant_with_min_max_args_gradient_graph(gradients_, inputs_; name=name, min=min, max=max, num_bits=num_bits, narrow_range=narrow_range) + end end - if num_bits !== nothing - desc["num_bits"] = Base.Int(num_bits) - end - if narrow_range !== nothing - desc["narrow_range"] = Base.Bool(narrow_range) - end - end - tf.Tensor(tf.Operation(desc)) - end - function fake_quant_with_min_max_args_gradient_eager(gradients_, inputs_; name=nothing, min=nothing, max=nothing, num_bits=nothing, narrow_range=nothing) - desc = tf.EagerOp("FakeQuantWithMinMaxArgsGradient") - gradients_ = convert(tf.EagerTensor, gradients_) - inputs_ = convert(tf.EagerTensor, inputs_) - tf.add_input(desc, gradients_) - tf.add_input(desc, inputs_) - if min !== nothing - desc["min"] = Base.identity(min) - end - if max !== nothing - desc["max"] = Base.identity(max) - end - if num_bits !== nothing - desc["num_bits"] = Base.Int(num_bits) - end - if narrow_range !== nothing - desc["narrow_range"] = Base.Bool(narrow_range) - end - res = tf.execute(desc) - node = tf.TapeNode(fake_quant_with_min_max_args_gradient, [gradients_, inputs_], name=nothing, min=nothing, max=nothing, num_bits=nothing, narrow_range=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function fake_quant_with_min_max_args_gradient(gradients_, inputs_; name=nothing, min=nothing, max=nothing, num_bits=nothing, narrow_range=nothing) - if tf.in_eager_mode() - fake_quant_with_min_max_args_gradient_eager(gradients_, inputs_; name=name, min=min, max=max, num_bits=num_bits, narrow_range=narrow_range) - else - fake_quant_with_min_max_args_gradient_graph(gradients_, inputs_; name=name, min=min, max=max, num_bits=num_bits, narrow_range=narrow_range) - end - end end @@ -33247,52 +60321,90 @@ end """ begin - function batch_svd_graph(input_; name=nothing, compute_uv=nothing, full_matrices=nothing) - local desc - tf.with_op_name(name, "BatchSvd") do - desc = tf.NodeDescription("BatchSvd") - input_ = convert(Tensor{Any}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - if compute_uv !== nothing - desc["compute_uv"] = Base.Bool(compute_uv) - end - if full_matrices !== nothing - desc["full_matrices"] = Base.Bool(full_matrices) + begin + function batch_svd_graph(input_; name=nothing, compute_uv=nothing, full_matrices=nothing) + local desc + tf.with_op_name(name, "BatchSvd") do + desc = tf.NodeDescription("BatchSvd") + begin + begin + input_ = convert(Tensor{Any}, input_) + begin + end + end + begin + (input_,) = tf.tf_promote(input_) + end + end + begin + begin + tf.add_input(desc, input_) + end + end + begin + begin + if compute_uv !== nothing + desc["compute_uv"] = Base.Bool(compute_uv) + end + end + begin + if full_matrices !== nothing + desc["full_matrices"] = Base.Bool(full_matrices) + end + end + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + end + end + begin + function batch_svd_eager(input_; name=nothing, compute_uv=nothing, full_matrices=nothing) + desc = tf.EagerOp("BatchSvd") + input_ = convert(tf.EagerTensor, input_) + begin + begin + tf.add_input(desc, input_) + end + end + begin + begin + if compute_uv !== nothing + desc["compute_uv"] = Base.Bool(compute_uv) + end + end + begin + if full_matrices !== nothing + desc["full_matrices"] = Base.Bool(full_matrices) + end + end + end + begin + desc["T"] = tf.data_type(input_) + end + res = tf.execute(desc) + node = tf.TapeNode(batch_svd, [input_], name=nothing, compute_uv=nothing, full_matrices=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function batch_svd(input_; name=nothing, compute_uv=nothing, full_matrices=nothing) + if tf.in_eager_mode() + batch_svd_eager(input_; name=name, compute_uv=compute_uv, full_matrices=full_matrices) + else + batch_svd_graph(input_; name=name, compute_uv=compute_uv, full_matrices=full_matrices) + end end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:3 - push!(out, tf.Tensor(op, out_idx)) - end - out - end - function batch_svd_eager(input_; name=nothing, compute_uv=nothing, full_matrices=nothing) - desc = tf.EagerOp("BatchSvd") - input_ = convert(tf.EagerTensor, input_) - tf.add_input(desc, input_) - if compute_uv !== nothing - desc["compute_uv"] = Base.Bool(compute_uv) - end - if full_matrices !== nothing - desc["full_matrices"] = Base.Bool(full_matrices) - end - desc["T"] = tf.data_type(input_) - res = tf.execute(desc) - node = tf.TapeNode(batch_svd, [input_], name=nothing, compute_uv=nothing, full_matrices=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function batch_svd(input_; name=nothing, compute_uv=nothing, full_matrices=nothing) - if tf.in_eager_mode() - batch_svd_eager(input_; name=name, compute_uv=compute_uv, full_matrices=full_matrices) - else - batch_svd_graph(input_; name=name, compute_uv=compute_uv, full_matrices=full_matrices) - end - end end @@ -33302,77 +60414,141 @@ end """ begin - function map_stage_graph(key_, indices_, values_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, fake_dtypes=nothing, container=nothing, shared_name=nothing) - local desc - tf.with_op_name(name, "MapStage") do - desc = tf.NodeDescription("MapStage") - key_ = convert(Tensor{Int64}, key_) - indices_ = convert(Tensor{Int32}, indices_) - values_ = [convert(Tensor{Any}, x) for x = values_] - tf.add_input(desc, key_) - tf.add_input(desc, indices_) - tf.add_input(desc, values_) - if capacity !== nothing - desc["capacity"] = Base.Int(capacity) - end - if memory_limit !== nothing - desc["memory_limit"] = Base.Int(memory_limit) - end - if dtypes !== nothing - desc["dtypes"] = map(Base.identity, dtypes) - end - if fake_dtypes !== nothing - desc["fake_dtypes"] = map(Base.identity, fake_dtypes) - end - if container !== nothing - desc["container"] = Base.String(container) + begin + function map_stage_graph(key_, indices_, values_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, fake_dtypes=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "MapStage") do + desc = tf.NodeDescription("MapStage") + begin + begin + key_ = convert(Tensor{Int64}, key_) + begin + end + end + begin + indices_ = convert(Tensor{Int32}, indices_) + begin + end + end + begin + values_ = [convert(Tensor{Any}, x) for x = values_] + begin + end + end + end + begin + begin + tf.add_input(desc, key_) + end + begin + tf.add_input(desc, indices_) + end + begin + tf.add_input(desc, values_) + end + end + begin + begin + if capacity !== nothing + desc["capacity"] = Base.Int(capacity) + end + end + begin + if memory_limit !== nothing + desc["memory_limit"] = Base.Int(memory_limit) + end + end + begin + if dtypes !== nothing + desc["dtypes"] = map(Base.identity, dtypes) + end + end + begin + if fake_dtypes !== nothing + desc["fake_dtypes"] = map(Base.identity, fake_dtypes) + end + end + begin + if container !== nothing + desc["container"] = Base.String(container) + end + end + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function map_stage_eager(key_, indices_, values_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, fake_dtypes=nothing, container=nothing, shared_name=nothing) + desc = tf.EagerOp("MapStage") + key_ = convert(tf.EagerTensor, key_) + indices_ = convert(tf.EagerTensor, indices_) + values_ = convert(tf.EagerTensor, values_) + begin + begin + tf.add_input(desc, key_) + end + begin + tf.add_input(desc, indices_) + end + begin + tf.add_input(desc, values_) + end + end + begin + begin + if capacity !== nothing + desc["capacity"] = Base.Int(capacity) + end + end + begin + if memory_limit !== nothing + desc["memory_limit"] = Base.Int(memory_limit) + end + end + begin + if dtypes !== nothing + desc["dtypes"] = map(Base.identity, dtypes) + end + end + begin + if fake_dtypes !== nothing + desc["fake_dtypes"] = map(Base.identity, fake_dtypes) + end + end + begin + if container !== nothing + desc["container"] = Base.String(container) + end + end + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(map_stage, [key_, indices_, values_], name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, fake_dtypes=nothing, container=nothing, shared_name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function map_stage(key_, indices_, values_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, fake_dtypes=nothing, container=nothing, shared_name=nothing) + if tf.in_eager_mode() + map_stage_eager(key_, indices_, values_; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, fake_dtypes=fake_dtypes, container=container, shared_name=shared_name) + else + map_stage_graph(key_, indices_, values_; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, fake_dtypes=fake_dtypes, container=container, shared_name=shared_name) + end end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - end - tf.Tensor(tf.Operation(desc)) - end - function map_stage_eager(key_, indices_, values_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, fake_dtypes=nothing, container=nothing, shared_name=nothing) - desc = tf.EagerOp("MapStage") - key_ = convert(tf.EagerTensor, key_) - indices_ = convert(tf.EagerTensor, indices_) - values_ = convert(tf.EagerTensor, values_) - tf.add_input(desc, key_) - tf.add_input(desc, indices_) - tf.add_input(desc, values_) - if capacity !== nothing - desc["capacity"] = Base.Int(capacity) - end - if memory_limit !== nothing - desc["memory_limit"] = Base.Int(memory_limit) - end - if dtypes !== nothing - desc["dtypes"] = map(Base.identity, dtypes) - end - if fake_dtypes !== nothing - desc["fake_dtypes"] = map(Base.identity, fake_dtypes) - end - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - res = tf.execute(desc) - node = tf.TapeNode(map_stage, [key_, indices_, values_], name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, fake_dtypes=nothing, container=nothing, shared_name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function map_stage(key_, indices_, values_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, fake_dtypes=nothing, container=nothing, shared_name=nothing) - if tf.in_eager_mode() - map_stage_eager(key_, indices_, values_; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, fake_dtypes=fake_dtypes, container=container, shared_name=shared_name) - else - map_stage_graph(key_, indices_, values_; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, fake_dtypes=fake_dtypes, container=container, shared_name=shared_name) - end - end end @@ -33382,80 +60558,188 @@ end """ begin - function resource_sparse_apply_ftrl_graph(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, lr_power_; name=nothing, use_locking=nothing) - local desc - tf.with_op_name(name, "ResourceSparseApplyFtrl") do - desc = tf.NodeDescription("ResourceSparseApplyFtrl") - var_ = convert(Tensor{Any}, var_) - accum_ = convert(Tensor{Any}, accum_) - linear_ = convert(Tensor{Any}, linear_) - grad_ = convert(Tensor{Any}, grad_) - indices_ = convert(Tensor{Any}, indices_) - indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) - lr_ = convert(Tensor{Any}, lr_) - l1_ = convert(Tensor{Any}, l1_) - l2_ = convert(Tensor{Any}, l2_) - lr_power_ = convert(Tensor{Any}, lr_power_) - (grad_, lr_, l1_, l2_, lr_power_) = tf.tf_promote(grad_, lr_, l1_, l2_, lr_power_) - (indices_,) = tf.tf_promote(indices_) - tf.add_input(desc, var_) - tf.add_input(desc, accum_) - tf.add_input(desc, linear_) - tf.add_input(desc, grad_) - tf.add_input(desc, indices_) - tf.add_input(desc, lr_) - tf.add_input(desc, l1_) - tf.add_input(desc, l2_) - tf.add_input(desc, lr_power_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - end - tf.Tensor(tf.Operation(desc)) - end - function resource_sparse_apply_ftrl_eager(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, lr_power_; name=nothing, use_locking=nothing) - desc = tf.EagerOp("ResourceSparseApplyFtrl") - var_ = convert(tf.EagerTensor, var_) - accum_ = convert(tf.EagerTensor, accum_) - linear_ = convert(tf.EagerTensor, linear_) - grad_ = convert(tf.EagerTensor, grad_) - indices_ = convert(tf.EagerTensor, indices_) - lr_ = convert(tf.EagerTensor, lr_) - l1_ = convert(tf.EagerTensor, l1_) - l2_ = convert(tf.EagerTensor, l2_) - lr_power_ = convert(tf.EagerTensor, lr_power_) - tf.add_input(desc, var_) - tf.add_input(desc, accum_) - tf.add_input(desc, linear_) - tf.add_input(desc, grad_) - tf.add_input(desc, indices_) - tf.add_input(desc, lr_) - tf.add_input(desc, l1_) - tf.add_input(desc, l2_) - tf.add_input(desc, lr_power_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - desc["T"] = tf.data_type(grad_) - desc["Tindices"] = tf.data_type(indices_) - desc["T"] = tf.data_type(lr_) - desc["T"] = tf.data_type(l1_) - desc["T"] = tf.data_type(l2_) - desc["T"] = tf.data_type(lr_power_) - res = tf.execute(desc) - node = tf.TapeNode(resource_sparse_apply_ftrl, [var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, lr_power_], name=nothing, use_locking=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_sparse_apply_ftrl(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, lr_power_; name=nothing, use_locking=nothing) - if tf.in_eager_mode() - resource_sparse_apply_ftrl_eager(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, lr_power_; name=name, use_locking=use_locking) - else - resource_sparse_apply_ftrl_graph(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, lr_power_; name=name, use_locking=use_locking) + begin + function resource_sparse_apply_ftrl_graph(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, lr_power_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "ResourceSparseApplyFtrl") do + desc = tf.NodeDescription("ResourceSparseApplyFtrl") + begin + begin + var_ = convert(Tensor{Any}, var_) + begin + end + end + begin + accum_ = convert(Tensor{Any}, accum_) + begin + end + end + begin + linear_ = convert(Tensor{Any}, linear_) + begin + end + end + begin + grad_ = convert(Tensor{Any}, grad_) + begin + end + end + begin + indices_ = convert(Tensor{Any}, indices_) + begin + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + end + end + begin + lr_ = convert(Tensor{Any}, lr_) + begin + end + end + begin + l1_ = convert(Tensor{Any}, l1_) + begin + end + end + begin + l2_ = convert(Tensor{Any}, l2_) + begin + end + end + begin + lr_power_ = convert(Tensor{Any}, lr_power_) + begin + end + end + begin + (grad_, lr_, l1_, l2_, lr_power_) = tf.tf_promote(grad_, lr_, l1_, l2_, lr_power_) + end + begin + (indices_,) = tf.tf_promote(indices_) + end + end + begin + begin + tf.add_input(desc, var_) + end + begin + tf.add_input(desc, accum_) + end + begin + tf.add_input(desc, linear_) + end + begin + tf.add_input(desc, grad_) + end + begin + tf.add_input(desc, indices_) + end + begin + tf.add_input(desc, lr_) + end + begin + tf.add_input(desc, l1_) + end + begin + tf.add_input(desc, l2_) + end + begin + tf.add_input(desc, lr_power_) + end + end + begin + begin + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function resource_sparse_apply_ftrl_eager(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, lr_power_; name=nothing, use_locking=nothing) + desc = tf.EagerOp("ResourceSparseApplyFtrl") + var_ = convert(tf.EagerTensor, var_) + accum_ = convert(tf.EagerTensor, accum_) + linear_ = convert(tf.EagerTensor, linear_) + grad_ = convert(tf.EagerTensor, grad_) + indices_ = convert(tf.EagerTensor, indices_) + lr_ = convert(tf.EagerTensor, lr_) + l1_ = convert(tf.EagerTensor, l1_) + l2_ = convert(tf.EagerTensor, l2_) + lr_power_ = convert(tf.EagerTensor, lr_power_) + begin + begin + tf.add_input(desc, var_) + end + begin + tf.add_input(desc, accum_) + end + begin + tf.add_input(desc, linear_) + end + begin + tf.add_input(desc, grad_) + end + begin + tf.add_input(desc, indices_) + end + begin + tf.add_input(desc, lr_) + end + begin + tf.add_input(desc, l1_) + end + begin + tf.add_input(desc, l2_) + end + begin + tf.add_input(desc, lr_power_) + end + end + begin + begin + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + end + begin + desc["T"] = tf.data_type(grad_) + end + begin + desc["Tindices"] = tf.data_type(indices_) + end + begin + desc["T"] = tf.data_type(lr_) + end + begin + desc["T"] = tf.data_type(l1_) + end + begin + desc["T"] = tf.data_type(l2_) + end + begin + desc["T"] = tf.data_type(lr_power_) + end + res = tf.execute(desc) + node = tf.TapeNode(resource_sparse_apply_ftrl, [var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, lr_power_], name=nothing, use_locking=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_sparse_apply_ftrl(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, lr_power_; name=nothing, use_locking=nothing) + if tf.in_eager_mode() + resource_sparse_apply_ftrl_eager(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, lr_power_; name=name, use_locking=use_locking) + else + resource_sparse_apply_ftrl_graph(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, lr_power_; name=name, use_locking=use_locking) + end end - end + end end @@ -33465,45 +60749,85 @@ end """ begin - function resize_nearest_neighbor_graph(images_, size_; name=nothing, align_corners=nothing) - local desc - tf.with_op_name(name, "ResizeNearestNeighbor") do - desc = tf.NodeDescription("ResizeNearestNeighbor") - images_ = convert(Tensor{Any}, images_) - size_ = convert(Tensor{Int32}, size_) - (images_,) = tf.tf_promote(images_) - tf.add_input(desc, images_) - tf.add_input(desc, size_) - if align_corners !== nothing - desc["align_corners"] = Base.Bool(align_corners) + begin + function resize_nearest_neighbor_graph(images_, size_; name=nothing, align_corners=nothing) + local desc + tf.with_op_name(name, "ResizeNearestNeighbor") do + desc = tf.NodeDescription("ResizeNearestNeighbor") + begin + begin + images_ = convert(Tensor{Any}, images_) + begin + end + end + begin + size_ = convert(Tensor{Int32}, size_) + begin + end + end + begin + (images_,) = tf.tf_promote(images_) + end + end + begin + begin + tf.add_input(desc, images_) + end + begin + tf.add_input(desc, size_) + end + end + begin + begin + if align_corners !== nothing + desc["align_corners"] = Base.Bool(align_corners) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function resize_nearest_neighbor_eager(images_, size_; name=nothing, align_corners=nothing) + desc = tf.EagerOp("ResizeNearestNeighbor") + images_ = convert(tf.EagerTensor, images_) + size_ = convert(tf.EagerTensor, size_) + begin + begin + tf.add_input(desc, images_) + end + begin + tf.add_input(desc, size_) + end + end + begin + begin + if align_corners !== nothing + desc["align_corners"] = Base.Bool(align_corners) + end + end + end + begin + desc["T"] = tf.data_type(images_) + end + res = tf.execute(desc) + node = tf.TapeNode(resize_nearest_neighbor, [images_, size_], name=nothing, align_corners=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resize_nearest_neighbor(images_, size_; name=nothing, align_corners=nothing) + if tf.in_eager_mode() + resize_nearest_neighbor_eager(images_, size_; name=name, align_corners=align_corners) + else + resize_nearest_neighbor_graph(images_, size_; name=name, align_corners=align_corners) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function resize_nearest_neighbor_eager(images_, size_; name=nothing, align_corners=nothing) - desc = tf.EagerOp("ResizeNearestNeighbor") - images_ = convert(tf.EagerTensor, images_) - size_ = convert(tf.EagerTensor, size_) - tf.add_input(desc, images_) - tf.add_input(desc, size_) - if align_corners !== nothing - desc["align_corners"] = Base.Bool(align_corners) - end - desc["T"] = tf.data_type(images_) - res = tf.execute(desc) - node = tf.TapeNode(resize_nearest_neighbor, [images_, size_], name=nothing, align_corners=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resize_nearest_neighbor(images_, size_; name=nothing, align_corners=nothing) - if tf.in_eager_mode() - resize_nearest_neighbor_eager(images_, size_; name=name, align_corners=align_corners) - else - resize_nearest_neighbor_graph(images_, size_; name=name, align_corners=align_corners) - end - end end @@ -33513,77 +60837,173 @@ end """ begin - function experimental_csv_dataset_graph(filenames_, compression_type_, buffer_size_, header_, field_delim_, use_quote_delim_, na_value_, select_cols_, record_defaults_; name=nothing, output_types=nothing, output_shapes=nothing) - local desc - tf.with_op_name(name, "ExperimentalCSVDataset") do - desc = tf.NodeDescription("ExperimentalCSVDataset") - filenames_ = convert(Tensor{String}, filenames_) - compression_type_ = convert(Tensor{String}, compression_type_) - buffer_size_ = convert(Tensor{Int64}, buffer_size_) - header_ = convert(Tensor{Bool}, header_) - field_delim_ = convert(Tensor{String}, field_delim_) - use_quote_delim_ = convert(Tensor{Bool}, use_quote_delim_) - na_value_ = convert(Tensor{String}, na_value_) - select_cols_ = convert(Tensor{Int64}, select_cols_) - record_defaults_ = [convert(Tensor{Any}, x) for x = record_defaults_] - tf.add_input(desc, filenames_) - tf.add_input(desc, compression_type_) - tf.add_input(desc, buffer_size_) - tf.add_input(desc, header_) - tf.add_input(desc, field_delim_) - tf.add_input(desc, use_quote_delim_) - tf.add_input(desc, na_value_) - tf.add_input(desc, select_cols_) - tf.add_input(desc, record_defaults_) - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - end - tf.Tensor(tf.Operation(desc)) - end - function experimental_csv_dataset_eager(filenames_, compression_type_, buffer_size_, header_, field_delim_, use_quote_delim_, na_value_, select_cols_, record_defaults_; name=nothing, output_types=nothing, output_shapes=nothing) - desc = tf.EagerOp("ExperimentalCSVDataset") - filenames_ = convert(tf.EagerTensor, filenames_) - compression_type_ = convert(tf.EagerTensor, compression_type_) - buffer_size_ = convert(tf.EagerTensor, buffer_size_) - header_ = convert(tf.EagerTensor, header_) - field_delim_ = convert(tf.EagerTensor, field_delim_) - use_quote_delim_ = convert(tf.EagerTensor, use_quote_delim_) - na_value_ = convert(tf.EagerTensor, na_value_) - select_cols_ = convert(tf.EagerTensor, select_cols_) - record_defaults_ = convert(tf.EagerTensor, record_defaults_) - tf.add_input(desc, filenames_) - tf.add_input(desc, compression_type_) - tf.add_input(desc, buffer_size_) - tf.add_input(desc, header_) - tf.add_input(desc, field_delim_) - tf.add_input(desc, use_quote_delim_) - tf.add_input(desc, na_value_) - tf.add_input(desc, select_cols_) - tf.add_input(desc, record_defaults_) - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - res = tf.execute(desc) - node = tf.TapeNode(experimental_csv_dataset, [filenames_, compression_type_, buffer_size_, header_, field_delim_, use_quote_delim_, na_value_, select_cols_, record_defaults_], name=nothing, output_types=nothing, output_shapes=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_csv_dataset(filenames_, compression_type_, buffer_size_, header_, field_delim_, use_quote_delim_, na_value_, select_cols_, record_defaults_; name=nothing, output_types=nothing, output_shapes=nothing) - if tf.in_eager_mode() - experimental_csv_dataset_eager(filenames_, compression_type_, buffer_size_, header_, field_delim_, use_quote_delim_, na_value_, select_cols_, record_defaults_; name=name, output_types=output_types, output_shapes=output_shapes) - else - experimental_csv_dataset_graph(filenames_, compression_type_, buffer_size_, header_, field_delim_, use_quote_delim_, na_value_, select_cols_, record_defaults_; name=name, output_types=output_types, output_shapes=output_shapes) + begin + function experimental_csv_dataset_graph(filenames_, compression_type_, buffer_size_, header_, field_delim_, use_quote_delim_, na_value_, select_cols_, record_defaults_; name=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "ExperimentalCSVDataset") do + desc = tf.NodeDescription("ExperimentalCSVDataset") + begin + begin + filenames_ = convert(Tensor{String}, filenames_) + begin + end + end + begin + compression_type_ = convert(Tensor{String}, compression_type_) + begin + end + end + begin + buffer_size_ = convert(Tensor{Int64}, buffer_size_) + begin + end + end + begin + header_ = convert(Tensor{Bool}, header_) + begin + end + end + begin + field_delim_ = convert(Tensor{String}, field_delim_) + begin + end + end + begin + use_quote_delim_ = convert(Tensor{Bool}, use_quote_delim_) + begin + end + end + begin + na_value_ = convert(Tensor{String}, na_value_) + begin + end + end + begin + select_cols_ = convert(Tensor{Int64}, select_cols_) + begin + end + end + begin + record_defaults_ = [convert(Tensor{Any}, x) for x = record_defaults_] + begin + end + end + end + begin + begin + tf.add_input(desc, filenames_) + end + begin + tf.add_input(desc, compression_type_) + end + begin + tf.add_input(desc, buffer_size_) + end + begin + tf.add_input(desc, header_) + end + begin + tf.add_input(desc, field_delim_) + end + begin + tf.add_input(desc, use_quote_delim_) + end + begin + tf.add_input(desc, na_value_) + end + begin + tf.add_input(desc, select_cols_) + end + begin + tf.add_input(desc, record_defaults_) + end + end + begin + begin + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + end + begin + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function experimental_csv_dataset_eager(filenames_, compression_type_, buffer_size_, header_, field_delim_, use_quote_delim_, na_value_, select_cols_, record_defaults_; name=nothing, output_types=nothing, output_shapes=nothing) + desc = tf.EagerOp("ExperimentalCSVDataset") + filenames_ = convert(tf.EagerTensor, filenames_) + compression_type_ = convert(tf.EagerTensor, compression_type_) + buffer_size_ = convert(tf.EagerTensor, buffer_size_) + header_ = convert(tf.EagerTensor, header_) + field_delim_ = convert(tf.EagerTensor, field_delim_) + use_quote_delim_ = convert(tf.EagerTensor, use_quote_delim_) + na_value_ = convert(tf.EagerTensor, na_value_) + select_cols_ = convert(tf.EagerTensor, select_cols_) + record_defaults_ = convert(tf.EagerTensor, record_defaults_) + begin + begin + tf.add_input(desc, filenames_) + end + begin + tf.add_input(desc, compression_type_) + end + begin + tf.add_input(desc, buffer_size_) + end + begin + tf.add_input(desc, header_) + end + begin + tf.add_input(desc, field_delim_) + end + begin + tf.add_input(desc, use_quote_delim_) + end + begin + tf.add_input(desc, na_value_) + end + begin + tf.add_input(desc, select_cols_) + end + begin + tf.add_input(desc, record_defaults_) + end + end + begin + begin + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + end + begin + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(experimental_csv_dataset, [filenames_, compression_type_, buffer_size_, header_, field_delim_, use_quote_delim_, na_value_, select_cols_, record_defaults_], name=nothing, output_types=nothing, output_shapes=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_csv_dataset(filenames_, compression_type_, buffer_size_, header_, field_delim_, use_quote_delim_, na_value_, select_cols_, record_defaults_; name=nothing, output_types=nothing, output_shapes=nothing) + if tf.in_eager_mode() + experimental_csv_dataset_eager(filenames_, compression_type_, buffer_size_, header_, field_delim_, use_quote_delim_, na_value_, select_cols_, record_defaults_; name=name, output_types=output_types, output_shapes=output_shapes) + else + experimental_csv_dataset_graph(filenames_, compression_type_, buffer_size_, header_, field_delim_, use_quote_delim_, na_value_, select_cols_, record_defaults_; name=name, output_types=output_types, output_shapes=output_shapes) + end end - end + end end @@ -33593,53 +61013,109 @@ end Returns x * y element-wise. """ begin - function _mkl_mul_graph(x_, y_, mkl_x_, mkl_y_; name=nothing) - local desc - tf.with_op_name(name, "_MklMul") do - desc = tf.NodeDescription("_MklMul") - x_ = convert(Tensor{Any}, x_) - y_ = convert(Tensor{Any}, y_) - mkl_x_ = convert(Tensor{UInt8}, mkl_x_) - mkl_y_ = convert(Tensor{UInt8}, mkl_y_) - (x_, y_) = tf.tf_promote(x_, y_) - tf.add_input(desc, x_) - tf.add_input(desc, y_) - tf.add_input(desc, mkl_x_) - tf.add_input(desc, mkl_y_) - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:2 - push!(out, tf.Tensor(op, out_idx)) - end - out - end - function _mkl_mul_eager(x_, y_, mkl_x_, mkl_y_; name=nothing) - desc = tf.EagerOp("_MklMul") - x_ = convert(tf.EagerTensor, x_) - y_ = convert(tf.EagerTensor, y_) - mkl_x_ = convert(tf.EagerTensor, mkl_x_) - mkl_y_ = convert(tf.EagerTensor, mkl_y_) - tf.add_input(desc, x_) - tf.add_input(desc, y_) - tf.add_input(desc, mkl_x_) - tf.add_input(desc, mkl_y_) - desc["T"] = tf.data_type(x_) - desc["T"] = tf.data_type(y_) - res = tf.execute(desc) - node = tf.TapeNode(_mkl_mul, [x_, y_, mkl_x_, mkl_y_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _mkl_mul(x_, y_, mkl_x_, mkl_y_; name=nothing) - if tf.in_eager_mode() - _mkl_mul_eager(x_, y_, mkl_x_, mkl_y_; name=name) - else - _mkl_mul_graph(x_, y_, mkl_x_, mkl_y_; name=name) + begin + function _mkl_mul_graph(x_, y_, mkl_x_, mkl_y_; name=nothing) + local desc + tf.with_op_name(name, "_MklMul") do + desc = tf.NodeDescription("_MklMul") + begin + begin + x_ = convert(Tensor{Any}, x_) + begin + end + end + begin + y_ = convert(Tensor{Any}, y_) + begin + end + end + begin + mkl_x_ = convert(Tensor{UInt8}, mkl_x_) + begin + end + end + begin + mkl_y_ = convert(Tensor{UInt8}, mkl_y_) + begin + end + end + begin + (x_, y_) = tf.tf_promote(x_, y_) + end + end + begin + begin + tf.add_input(desc, x_) + end + begin + tf.add_input(desc, y_) + end + begin + tf.add_input(desc, mkl_x_) + end + begin + tf.add_input(desc, mkl_y_) + end + end + begin + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + end + end + begin + function _mkl_mul_eager(x_, y_, mkl_x_, mkl_y_; name=nothing) + desc = tf.EagerOp("_MklMul") + x_ = convert(tf.EagerTensor, x_) + y_ = convert(tf.EagerTensor, y_) + mkl_x_ = convert(tf.EagerTensor, mkl_x_) + mkl_y_ = convert(tf.EagerTensor, mkl_y_) + begin + begin + tf.add_input(desc, x_) + end + begin + tf.add_input(desc, y_) + end + begin + tf.add_input(desc, mkl_x_) + end + begin + tf.add_input(desc, mkl_y_) + end + end + begin + end + begin + desc["T"] = tf.data_type(x_) + end + begin + desc["T"] = tf.data_type(y_) + end + res = tf.execute(desc) + node = tf.TapeNode(_mkl_mul, [x_, y_, mkl_x_, mkl_y_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _mkl_mul(x_, y_, mkl_x_, mkl_y_; name=nothing) + if tf.in_eager_mode() + _mkl_mul_eager(x_, y_, mkl_x_, mkl_y_; name=name) + else + _mkl_mul_graph(x_, y_, mkl_x_, mkl_y_; name=name) + end end - end + end end @@ -33649,35 +61125,63 @@ end """ begin - function batch_matrix_diag_graph(diagonal_; name=nothing) - local desc - tf.with_op_name(name, "BatchMatrixDiag") do - desc = tf.NodeDescription("BatchMatrixDiag") - diagonal_ = convert(Tensor{Any}, diagonal_) - (diagonal_,) = tf.tf_promote(diagonal_) - tf.add_input(desc, diagonal_) + begin + function batch_matrix_diag_graph(diagonal_; name=nothing) + local desc + tf.with_op_name(name, "BatchMatrixDiag") do + desc = tf.NodeDescription("BatchMatrixDiag") + begin + begin + diagonal_ = convert(Tensor{Any}, diagonal_) + begin + end + end + begin + (diagonal_,) = tf.tf_promote(diagonal_) + end + end + begin + begin + tf.add_input(desc, diagonal_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function batch_matrix_diag_eager(diagonal_; name=nothing) - desc = tf.EagerOp("BatchMatrixDiag") - diagonal_ = convert(tf.EagerTensor, diagonal_) - tf.add_input(desc, diagonal_) - desc["T"] = tf.data_type(diagonal_) - res = tf.execute(desc) - node = tf.TapeNode(batch_matrix_diag, [diagonal_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function batch_matrix_diag_eager(diagonal_; name=nothing) + desc = tf.EagerOp("BatchMatrixDiag") + diagonal_ = convert(tf.EagerTensor, diagonal_) + begin + begin + tf.add_input(desc, diagonal_) + end + end + begin + end + begin + desc["T"] = tf.data_type(diagonal_) + end + res = tf.execute(desc) + node = tf.TapeNode(batch_matrix_diag, [diagonal_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function batch_matrix_diag(diagonal_; name=nothing) - if tf.in_eager_mode() - batch_matrix_diag_eager(diagonal_; name=name) - else - batch_matrix_diag_graph(diagonal_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function batch_matrix_diag(diagonal_; name=nothing) + if tf.in_eager_mode() + batch_matrix_diag_eager(diagonal_; name=name) + else + batch_matrix_diag_graph(diagonal_; name=name) + end end - end + end end @@ -33687,35 +61191,63 @@ end """ begin - function is_inf_graph(x_; name=nothing) - local desc - tf.with_op_name(name, "IsInf") do - desc = tf.NodeDescription("IsInf") - x_ = convert(Tensor{Any}, x_) - (x_,) = tf.tf_promote(x_) - tf.add_input(desc, x_) + begin + function is_inf_graph(x_; name=nothing) + local desc + tf.with_op_name(name, "IsInf") do + desc = tf.NodeDescription("IsInf") + begin + begin + x_ = convert(Tensor{Any}, x_) + begin + end + end + begin + (x_,) = tf.tf_promote(x_) + end + end + begin + begin + tf.add_input(desc, x_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function is_inf_eager(x_; name=nothing) - desc = tf.EagerOp("IsInf") - x_ = convert(tf.EagerTensor, x_) - tf.add_input(desc, x_) - desc["T"] = tf.data_type(x_) - res = tf.execute(desc) - node = tf.TapeNode(is_inf, [x_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function is_inf_eager(x_; name=nothing) + desc = tf.EagerOp("IsInf") + x_ = convert(tf.EagerTensor, x_) + begin + begin + tf.add_input(desc, x_) + end + end + begin + end + begin + desc["T"] = tf.data_type(x_) + end + res = tf.execute(desc) + node = tf.TapeNode(is_inf, [x_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function is_inf(x_; name=nothing) - if tf.in_eager_mode() - is_inf_eager(x_; name=name) - else - is_inf_graph(x_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function is_inf(x_; name=nothing) + if tf.in_eager_mode() + is_inf_eager(x_; name=name) + else + is_inf_graph(x_; name=name) + end end - end + end end @@ -33725,110 +61257,184 @@ end """ begin - function fixed_unigram_candidate_sampler_graph(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, vocab_file=nothing, distortion=nothing, num_reserved_ids=nothing, num_shards=nothing, shard=nothing, unigrams=nothing, seed=nothing, seed2=nothing) - local desc - tf.with_op_name(name, "FixedUnigramCandidateSampler") do - desc = tf.NodeDescription("FixedUnigramCandidateSampler") - true_classes_ = convert(Tensor{Int64}, true_classes_) - tf.add_input(desc, true_classes_) - if num_true !== nothing - desc["num_true"] = Base.Int(num_true) - end - if num_sampled !== nothing - desc["num_sampled"] = Base.Int(num_sampled) - end - if unique !== nothing - desc["unique"] = Base.Bool(unique) - end - if range_max !== nothing - desc["range_max"] = Base.Int(range_max) - end - if vocab_file !== nothing - desc["vocab_file"] = Base.String(vocab_file) - end - if distortion !== nothing - desc["distortion"] = Base.identity(distortion) + begin + function fixed_unigram_candidate_sampler_graph(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, vocab_file=nothing, distortion=nothing, num_reserved_ids=nothing, num_shards=nothing, shard=nothing, unigrams=nothing, seed=nothing, seed2=nothing) + local desc + tf.with_op_name(name, "FixedUnigramCandidateSampler") do + desc = tf.NodeDescription("FixedUnigramCandidateSampler") + begin + begin + true_classes_ = convert(Tensor{Int64}, true_classes_) + begin + end + end + end + begin + begin + tf.add_input(desc, true_classes_) + end + end + begin + begin + if num_true !== nothing + desc["num_true"] = Base.Int(num_true) + end + end + begin + if num_sampled !== nothing + desc["num_sampled"] = Base.Int(num_sampled) + end + end + begin + if unique !== nothing + desc["unique"] = Base.Bool(unique) + end + end + begin + if range_max !== nothing + desc["range_max"] = Base.Int(range_max) + end + end + begin + if vocab_file !== nothing + desc["vocab_file"] = Base.String(vocab_file) + end + end + begin + if distortion !== nothing + desc["distortion"] = Base.identity(distortion) + end + end + begin + if num_reserved_ids !== nothing + desc["num_reserved_ids"] = Base.Int(num_reserved_ids) + end + end + begin + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + end + begin + if shard !== nothing + desc["shard"] = Base.Int(shard) + end + end + begin + if unigrams !== nothing + desc["unigrams"] = map(Base.identity, unigrams) + end + end + begin + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + end + begin + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end + end + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + end + end + begin + function fixed_unigram_candidate_sampler_eager(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, vocab_file=nothing, distortion=nothing, num_reserved_ids=nothing, num_shards=nothing, shard=nothing, unigrams=nothing, seed=nothing, seed2=nothing) + desc = tf.EagerOp("FixedUnigramCandidateSampler") + true_classes_ = convert(tf.EagerTensor, true_classes_) + begin + begin + tf.add_input(desc, true_classes_) + end + end + begin + begin + if num_true !== nothing + desc["num_true"] = Base.Int(num_true) + end + end + begin + if num_sampled !== nothing + desc["num_sampled"] = Base.Int(num_sampled) + end + end + begin + if unique !== nothing + desc["unique"] = Base.Bool(unique) + end + end + begin + if range_max !== nothing + desc["range_max"] = Base.Int(range_max) + end + end + begin + if vocab_file !== nothing + desc["vocab_file"] = Base.String(vocab_file) + end + end + begin + if distortion !== nothing + desc["distortion"] = Base.identity(distortion) + end + end + begin + if num_reserved_ids !== nothing + desc["num_reserved_ids"] = Base.Int(num_reserved_ids) + end + end + begin + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + end + begin + if shard !== nothing + desc["shard"] = Base.Int(shard) + end + end + begin + if unigrams !== nothing + desc["unigrams"] = map(Base.identity, unigrams) + end + end + begin + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + end + begin + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(fixed_unigram_candidate_sampler, [true_classes_], name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, vocab_file=nothing, distortion=nothing, num_reserved_ids=nothing, num_shards=nothing, shard=nothing, unigrams=nothing, seed=nothing, seed2=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function fixed_unigram_candidate_sampler(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, vocab_file=nothing, distortion=nothing, num_reserved_ids=nothing, num_shards=nothing, shard=nothing, unigrams=nothing, seed=nothing, seed2=nothing) + if tf.in_eager_mode() + fixed_unigram_candidate_sampler_eager(true_classes_; name=name, num_true=num_true, num_sampled=num_sampled, unique=unique, range_max=range_max, vocab_file=vocab_file, distortion=distortion, num_reserved_ids=num_reserved_ids, num_shards=num_shards, shard=shard, unigrams=unigrams, seed=seed, seed2=seed2) + else + fixed_unigram_candidate_sampler_graph(true_classes_; name=name, num_true=num_true, num_sampled=num_sampled, unique=unique, range_max=range_max, vocab_file=vocab_file, distortion=distortion, num_reserved_ids=num_reserved_ids, num_shards=num_shards, shard=shard, unigrams=unigrams, seed=seed, seed2=seed2) + end end - if num_reserved_ids !== nothing - desc["num_reserved_ids"] = Base.Int(num_reserved_ids) - end - if num_shards !== nothing - desc["num_shards"] = Base.Int(num_shards) - end - if shard !== nothing - desc["shard"] = Base.Int(shard) - end - if unigrams !== nothing - desc["unigrams"] = map(Base.identity, unigrams) - end - if seed !== nothing - desc["seed"] = Base.Int(seed) - end - if seed2 !== nothing - desc["seed2"] = Base.Int(seed2) - end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:3 - push!(out, tf.Tensor(op, out_idx)) - end - out - end - function fixed_unigram_candidate_sampler_eager(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, vocab_file=nothing, distortion=nothing, num_reserved_ids=nothing, num_shards=nothing, shard=nothing, unigrams=nothing, seed=nothing, seed2=nothing) - desc = tf.EagerOp("FixedUnigramCandidateSampler") - true_classes_ = convert(tf.EagerTensor, true_classes_) - tf.add_input(desc, true_classes_) - if num_true !== nothing - desc["num_true"] = Base.Int(num_true) - end - if num_sampled !== nothing - desc["num_sampled"] = Base.Int(num_sampled) - end - if unique !== nothing - desc["unique"] = Base.Bool(unique) - end - if range_max !== nothing - desc["range_max"] = Base.Int(range_max) - end - if vocab_file !== nothing - desc["vocab_file"] = Base.String(vocab_file) - end - if distortion !== nothing - desc["distortion"] = Base.identity(distortion) - end - if num_reserved_ids !== nothing - desc["num_reserved_ids"] = Base.Int(num_reserved_ids) - end - if num_shards !== nothing - desc["num_shards"] = Base.Int(num_shards) - end - if shard !== nothing - desc["shard"] = Base.Int(shard) - end - if unigrams !== nothing - desc["unigrams"] = map(Base.identity, unigrams) - end - if seed !== nothing - desc["seed"] = Base.Int(seed) - end - if seed2 !== nothing - desc["seed2"] = Base.Int(seed2) - end - res = tf.execute(desc) - node = tf.TapeNode(fixed_unigram_candidate_sampler, [true_classes_], name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, vocab_file=nothing, distortion=nothing, num_reserved_ids=nothing, num_shards=nothing, shard=nothing, unigrams=nothing, seed=nothing, seed2=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function fixed_unigram_candidate_sampler(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, vocab_file=nothing, distortion=nothing, num_reserved_ids=nothing, num_shards=nothing, shard=nothing, unigrams=nothing, seed=nothing, seed2=nothing) - if tf.in_eager_mode() - fixed_unigram_candidate_sampler_eager(true_classes_; name=name, num_true=num_true, num_sampled=num_sampled, unique=unique, range_max=range_max, vocab_file=vocab_file, distortion=distortion, num_reserved_ids=num_reserved_ids, num_shards=num_shards, shard=shard, unigrams=unigrams, seed=seed, seed2=seed2) - else - fixed_unigram_candidate_sampler_graph(true_classes_; name=name, num_true=num_true, num_sampled=num_sampled, unique=unique, range_max=range_max, vocab_file=vocab_file, distortion=distortion, num_reserved_ids=num_reserved_ids, num_shards=num_shards, shard=shard, unigrams=unigrams, seed=seed, seed2=seed2) - end - end end @@ -33838,42 +61444,80 @@ end """ begin - function unravel_index_graph(indices_, dims_; name=nothing) - local desc - tf.with_op_name(name, "UnravelIndex") do - desc = tf.NodeDescription("UnravelIndex") - indices_ = convert(Tensor{Int32}, indices_) - indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) - dims_ = convert(Tensor{Int32}, dims_) - dims_ = dims_ - convert(tf.Tensor{eltype(dims_)}, 1) - (indices_, dims_) = tf.tf_promote(indices_, dims_) - tf.add_input(desc, indices_) - tf.add_input(desc, dims_) + begin + function unravel_index_graph(indices_, dims_; name=nothing) + local desc + tf.with_op_name(name, "UnravelIndex") do + desc = tf.NodeDescription("UnravelIndex") + begin + begin + indices_ = convert(Tensor{Int32}, indices_) + begin + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + end + end + begin + dims_ = convert(Tensor{Int32}, dims_) + begin + dims_ = dims_ - convert(tf.Tensor{eltype(dims_)}, 1) + end + end + begin + (indices_, dims_) = tf.tf_promote(indices_, dims_) + end + end + begin + begin + tf.add_input(desc, indices_) + end + begin + tf.add_input(desc, dims_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function unravel_index_eager(indices_, dims_; name=nothing) - desc = tf.EagerOp("UnravelIndex") - indices_ = convert(tf.EagerTensor, indices_) - dims_ = convert(tf.EagerTensor, dims_) - tf.add_input(desc, indices_) - tf.add_input(desc, dims_) - desc["Tidx"] = tf.data_type(indices_) - desc["Tidx"] = tf.data_type(dims_) - res = tf.execute(desc) - node = tf.TapeNode(unravel_index, [indices_, dims_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function unravel_index_eager(indices_, dims_; name=nothing) + desc = tf.EagerOp("UnravelIndex") + indices_ = convert(tf.EagerTensor, indices_) + dims_ = convert(tf.EagerTensor, dims_) + begin + begin + tf.add_input(desc, indices_) + end + begin + tf.add_input(desc, dims_) + end + end + begin + end + begin + desc["Tidx"] = tf.data_type(indices_) + end + begin + desc["Tidx"] = tf.data_type(dims_) + end + res = tf.execute(desc) + node = tf.TapeNode(unravel_index, [indices_, dims_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function unravel_index(indices_, dims_; name=nothing) - if tf.in_eager_mode() - unravel_index_eager(indices_, dims_; name=name) - else - unravel_index_graph(indices_, dims_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function unravel_index(indices_, dims_; name=nothing) + if tf.in_eager_mode() + unravel_index_eager(indices_, dims_; name=name) + else + unravel_index_graph(indices_, dims_; name=name) + end end - end + end end @@ -33883,88 +61527,212 @@ end """ begin - function sparse_apply_ftrl_v2_graph(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=nothing, use_locking=nothing) - local desc - tf.with_op_name(name, "SparseApplyFtrlV2") do - desc = tf.NodeDescription("SparseApplyFtrlV2") - var_ = convert(Tensor{Any}, var_) - accum_ = convert(Tensor{Any}, accum_) - linear_ = convert(Tensor{Any}, linear_) - grad_ = convert(Tensor{Any}, grad_) - indices_ = convert(Tensor{Any}, indices_) - indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) - lr_ = convert(Tensor{Any}, lr_) - l1_ = convert(Tensor{Any}, l1_) - l2_ = convert(Tensor{Any}, l2_) - l2_shrinkage_ = convert(Tensor{Any}, l2_shrinkage_) - lr_power_ = convert(Tensor{Any}, lr_power_) - (var_, accum_, linear_, grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_) = tf.tf_promote(var_, accum_, linear_, grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_) - (indices_,) = tf.tf_promote(indices_) - tf.add_input(desc, var_) - tf.add_input(desc, accum_) - tf.add_input(desc, linear_) - tf.add_input(desc, grad_) - tf.add_input(desc, indices_) - tf.add_input(desc, lr_) - tf.add_input(desc, l1_) - tf.add_input(desc, l2_) - tf.add_input(desc, l2_shrinkage_) - tf.add_input(desc, lr_power_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - end - tf.Tensor(tf.Operation(desc)) - end - function sparse_apply_ftrl_v2_eager(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=nothing, use_locking=nothing) - desc = tf.EagerOp("SparseApplyFtrlV2") - var_ = convert(tf.EagerTensor, var_) - accum_ = convert(tf.EagerTensor, accum_) - linear_ = convert(tf.EagerTensor, linear_) - grad_ = convert(tf.EagerTensor, grad_) - indices_ = convert(tf.EagerTensor, indices_) - lr_ = convert(tf.EagerTensor, lr_) - l1_ = convert(tf.EagerTensor, l1_) - l2_ = convert(tf.EagerTensor, l2_) - l2_shrinkage_ = convert(tf.EagerTensor, l2_shrinkage_) - lr_power_ = convert(tf.EagerTensor, lr_power_) - tf.add_input(desc, var_) - tf.add_input(desc, accum_) - tf.add_input(desc, linear_) - tf.add_input(desc, grad_) - tf.add_input(desc, indices_) - tf.add_input(desc, lr_) - tf.add_input(desc, l1_) - tf.add_input(desc, l2_) - tf.add_input(desc, l2_shrinkage_) - tf.add_input(desc, lr_power_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - desc["T"] = tf.data_type(var_) - desc["T"] = tf.data_type(accum_) - desc["T"] = tf.data_type(linear_) - desc["T"] = tf.data_type(grad_) - desc["Tindices"] = tf.data_type(indices_) - desc["T"] = tf.data_type(lr_) - desc["T"] = tf.data_type(l1_) - desc["T"] = tf.data_type(l2_) - desc["T"] = tf.data_type(l2_shrinkage_) - desc["T"] = tf.data_type(lr_power_) - res = tf.execute(desc) - node = tf.TapeNode(sparse_apply_ftrl_v2, [var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, l2_shrinkage_, lr_power_], name=nothing, use_locking=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_apply_ftrl_v2(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=nothing, use_locking=nothing) - if tf.in_eager_mode() - sparse_apply_ftrl_v2_eager(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=name, use_locking=use_locking) - else - sparse_apply_ftrl_v2_graph(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=name, use_locking=use_locking) + begin + function sparse_apply_ftrl_v2_graph(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "SparseApplyFtrlV2") do + desc = tf.NodeDescription("SparseApplyFtrlV2") + begin + begin + var_ = convert(Tensor{Any}, var_) + begin + end + end + begin + accum_ = convert(Tensor{Any}, accum_) + begin + end + end + begin + linear_ = convert(Tensor{Any}, linear_) + begin + end + end + begin + grad_ = convert(Tensor{Any}, grad_) + begin + end + end + begin + indices_ = convert(Tensor{Any}, indices_) + begin + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + end + end + begin + lr_ = convert(Tensor{Any}, lr_) + begin + end + end + begin + l1_ = convert(Tensor{Any}, l1_) + begin + end + end + begin + l2_ = convert(Tensor{Any}, l2_) + begin + end + end + begin + l2_shrinkage_ = convert(Tensor{Any}, l2_shrinkage_) + begin + end + end + begin + lr_power_ = convert(Tensor{Any}, lr_power_) + begin + end + end + begin + (var_, accum_, linear_, grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_) = tf.tf_promote(var_, accum_, linear_, grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_) + end + begin + (indices_,) = tf.tf_promote(indices_) + end + end + begin + begin + tf.add_input(desc, var_) + end + begin + tf.add_input(desc, accum_) + end + begin + tf.add_input(desc, linear_) + end + begin + tf.add_input(desc, grad_) + end + begin + tf.add_input(desc, indices_) + end + begin + tf.add_input(desc, lr_) + end + begin + tf.add_input(desc, l1_) + end + begin + tf.add_input(desc, l2_) + end + begin + tf.add_input(desc, l2_shrinkage_) + end + begin + tf.add_input(desc, lr_power_) + end + end + begin + begin + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function sparse_apply_ftrl_v2_eager(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=nothing, use_locking=nothing) + desc = tf.EagerOp("SparseApplyFtrlV2") + var_ = convert(tf.EagerTensor, var_) + accum_ = convert(tf.EagerTensor, accum_) + linear_ = convert(tf.EagerTensor, linear_) + grad_ = convert(tf.EagerTensor, grad_) + indices_ = convert(tf.EagerTensor, indices_) + lr_ = convert(tf.EagerTensor, lr_) + l1_ = convert(tf.EagerTensor, l1_) + l2_ = convert(tf.EagerTensor, l2_) + l2_shrinkage_ = convert(tf.EagerTensor, l2_shrinkage_) + lr_power_ = convert(tf.EagerTensor, lr_power_) + begin + begin + tf.add_input(desc, var_) + end + begin + tf.add_input(desc, accum_) + end + begin + tf.add_input(desc, linear_) + end + begin + tf.add_input(desc, grad_) + end + begin + tf.add_input(desc, indices_) + end + begin + tf.add_input(desc, lr_) + end + begin + tf.add_input(desc, l1_) + end + begin + tf.add_input(desc, l2_) + end + begin + tf.add_input(desc, l2_shrinkage_) + end + begin + tf.add_input(desc, lr_power_) + end + end + begin + begin + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + end + begin + desc["T"] = tf.data_type(var_) + end + begin + desc["T"] = tf.data_type(accum_) + end + begin + desc["T"] = tf.data_type(linear_) + end + begin + desc["T"] = tf.data_type(grad_) + end + begin + desc["Tindices"] = tf.data_type(indices_) + end + begin + desc["T"] = tf.data_type(lr_) + end + begin + desc["T"] = tf.data_type(l1_) + end + begin + desc["T"] = tf.data_type(l2_) + end + begin + desc["T"] = tf.data_type(l2_shrinkage_) + end + begin + desc["T"] = tf.data_type(lr_power_) + end + res = tf.execute(desc) + node = tf.TapeNode(sparse_apply_ftrl_v2, [var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, l2_shrinkage_, lr_power_], name=nothing, use_locking=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_apply_ftrl_v2(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=nothing, use_locking=nothing) + if tf.in_eager_mode() + sparse_apply_ftrl_v2_eager(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=name, use_locking=use_locking) + else + sparse_apply_ftrl_v2_graph(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=name, use_locking=use_locking) + end end - end + end end @@ -33974,48 +61742,92 @@ end """ begin - function max_graph(input_, reduction_indices_; name=nothing, keep_dims=nothing) - local desc - tf.with_op_name(name, "Max") do - desc = tf.NodeDescription("Max") - input_ = convert(Tensor{Any}, input_) - reduction_indices_ = convert(Tensor{Int32}, reduction_indices_) - reduction_indices_ = reduction_indices_ - convert(tf.Tensor{eltype(reduction_indices_)}, 1) - (input_,) = tf.tf_promote(input_) - (reduction_indices_,) = tf.tf_promote(reduction_indices_) - tf.add_input(desc, input_) - tf.add_input(desc, reduction_indices_) - if keep_dims !== nothing - desc["keep_dims"] = Base.Bool(keep_dims) + begin + function max_graph(input_, reduction_indices_; name=nothing, keep_dims=nothing) + local desc + tf.with_op_name(name, "Max") do + desc = tf.NodeDescription("Max") + begin + begin + input_ = convert(Tensor{Any}, input_) + begin + end + end + begin + reduction_indices_ = convert(Tensor{Int32}, reduction_indices_) + begin + reduction_indices_ = reduction_indices_ - convert(tf.Tensor{eltype(reduction_indices_)}, 1) + end + end + begin + (input_,) = tf.tf_promote(input_) + end + begin + (reduction_indices_,) = tf.tf_promote(reduction_indices_) + end + end + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, reduction_indices_) + end + end + begin + begin + if keep_dims !== nothing + desc["keep_dims"] = Base.Bool(keep_dims) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function max_eager(input_, reduction_indices_; name=nothing, keep_dims=nothing) + desc = tf.EagerOp("Max") + input_ = convert(tf.EagerTensor, input_) + reduction_indices_ = convert(tf.EagerTensor, reduction_indices_) + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, reduction_indices_) + end + end + begin + begin + if keep_dims !== nothing + desc["keep_dims"] = Base.Bool(keep_dims) + end + end + end + begin + desc["T"] = tf.data_type(input_) + end + begin + desc["Tidx"] = tf.data_type(reduction_indices_) + end + res = tf.execute(desc) + node = tf.TapeNode(max, [input_, reduction_indices_], name=nothing, keep_dims=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function max(input_, reduction_indices_; name=nothing, keep_dims=nothing) + if tf.in_eager_mode() + max_eager(input_, reduction_indices_; name=name, keep_dims=keep_dims) + else + max_graph(input_, reduction_indices_; name=name, keep_dims=keep_dims) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function max_eager(input_, reduction_indices_; name=nothing, keep_dims=nothing) - desc = tf.EagerOp("Max") - input_ = convert(tf.EagerTensor, input_) - reduction_indices_ = convert(tf.EagerTensor, reduction_indices_) - tf.add_input(desc, input_) - tf.add_input(desc, reduction_indices_) - if keep_dims !== nothing - desc["keep_dims"] = Base.Bool(keep_dims) - end - desc["T"] = tf.data_type(input_) - desc["Tidx"] = tf.data_type(reduction_indices_) - res = tf.execute(desc) - node = tf.TapeNode(max, [input_, reduction_indices_], name=nothing, keep_dims=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function max(input_, reduction_indices_; name=nothing, keep_dims=nothing) - if tf.in_eager_mode() - max_eager(input_, reduction_indices_; name=name, keep_dims=keep_dims) - else - max_graph(input_, reduction_indices_; name=name, keep_dims=keep_dims) - end - end end @@ -34025,35 +61837,63 @@ end """ begin - function ifft2d_graph(input_; name=nothing) - local desc - tf.with_op_name(name, "IFFT2D") do - desc = tf.NodeDescription("IFFT2D") - input_ = convert(Tensor{Complex{Float32}}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) + begin + function ifft2d_graph(input_; name=nothing) + local desc + tf.with_op_name(name, "IFFT2D") do + desc = tf.NodeDescription("IFFT2D") + begin + begin + input_ = convert(Tensor{Complex{Float32}}, input_) + begin + end + end + begin + (input_,) = tf.tf_promote(input_) + end + end + begin + begin + tf.add_input(desc, input_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function ifft2d_eager(input_; name=nothing) - desc = tf.EagerOp("IFFT2D") - input_ = convert(tf.EagerTensor, input_) - tf.add_input(desc, input_) - desc["Tcomplex"] = tf.data_type(input_) - res = tf.execute(desc) - node = tf.TapeNode(ifft2d, [input_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function ifft2d_eager(input_; name=nothing) + desc = tf.EagerOp("IFFT2D") + input_ = convert(tf.EagerTensor, input_) + begin + begin + tf.add_input(desc, input_) + end + end + begin + end + begin + desc["Tcomplex"] = tf.data_type(input_) + end + res = tf.execute(desc) + node = tf.TapeNode(ifft2d, [input_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function ifft2d(input_; name=nothing) - if tf.in_eager_mode() - ifft2d_eager(input_; name=name) - else - ifft2d_graph(input_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function ifft2d(input_; name=nothing) + if tf.in_eager_mode() + ifft2d_eager(input_; name=name) + else + ifft2d_graph(input_; name=name) + end end - end + end end @@ -34063,66 +61903,124 @@ end """ begin - function sparse_concat_graph(indices_, values_, shapes_; name=nothing, concat_dim=nothing, N=nothing) - local desc - tf.with_op_name(name, "SparseConcat") do - desc = tf.NodeDescription("SparseConcat") - indices_ = [convert(Tensor{Int64}, x) for x = indices_] - values_ = [convert(Tensor{Any}, x) for x = values_] - shapes_ = [convert(Tensor{Int64}, x) for x = shapes_] - (values_,) = tf.tf_promote(values_) - tf.add_input(desc, indices_) - tf.add_input(desc, values_) - tf.add_input(desc, shapes_) - if concat_dim !== nothing - concat_dim = Base.Int(concat_dim) - 1 - end - if concat_dim !== nothing - desc["concat_dim"] = Base.Int(concat_dim) + begin + function sparse_concat_graph(indices_, values_, shapes_; name=nothing, concat_dim=nothing, N=nothing) + local desc + tf.with_op_name(name, "SparseConcat") do + desc = tf.NodeDescription("SparseConcat") + begin + begin + indices_ = [convert(Tensor{Int64}, x) for x = indices_] + begin + end + end + begin + values_ = [convert(Tensor{Any}, x) for x = values_] + begin + end + end + begin + shapes_ = [convert(Tensor{Int64}, x) for x = shapes_] + begin + end + end + begin + (values_,) = tf.tf_promote(values_) + end + end + begin + begin + tf.add_input(desc, indices_) + end + begin + tf.add_input(desc, values_) + end + begin + tf.add_input(desc, shapes_) + end + end + begin + begin + if concat_dim !== nothing + concat_dim = Base.Int(concat_dim) - 1 + end + end + begin + if concat_dim !== nothing + desc["concat_dim"] = Base.Int(concat_dim) + end + end + begin + if N !== nothing + desc["N"] = Base.Int(N) + end + end + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + end + end + begin + function sparse_concat_eager(indices_, values_, shapes_; name=nothing, concat_dim=nothing, N=nothing) + desc = tf.EagerOp("SparseConcat") + indices_ = convert(tf.EagerTensor, indices_) + values_ = convert(tf.EagerTensor, values_) + shapes_ = convert(tf.EagerTensor, shapes_) + begin + begin + tf.add_input(desc, indices_) + end + begin + tf.add_input(desc, values_) + end + begin + tf.add_input(desc, shapes_) + end + end + begin + begin + if concat_dim !== nothing + concat_dim = Base.Int(concat_dim) - 1 + end + end + begin + if concat_dim !== nothing + desc["concat_dim"] = Base.Int(concat_dim) + end + end + begin + if N !== nothing + desc["N"] = Base.Int(N) + end + end + end + begin + desc["T"] = tf.data_type(values_) + end + res = tf.execute(desc) + node = tf.TapeNode(sparse_concat, [indices_, values_, shapes_], name=nothing, concat_dim=nothing, N=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_concat(indices_, values_, shapes_; name=nothing, concat_dim=nothing, N=nothing) + if tf.in_eager_mode() + sparse_concat_eager(indices_, values_, shapes_; name=name, concat_dim=concat_dim, N=N) + else + sparse_concat_graph(indices_, values_, shapes_; name=name, concat_dim=concat_dim, N=N) + end end - if N !== nothing - desc["N"] = Base.Int(N) - end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:3 - push!(out, tf.Tensor(op, out_idx)) - end - out - end - function sparse_concat_eager(indices_, values_, shapes_; name=nothing, concat_dim=nothing, N=nothing) - desc = tf.EagerOp("SparseConcat") - indices_ = convert(tf.EagerTensor, indices_) - values_ = convert(tf.EagerTensor, values_) - shapes_ = convert(tf.EagerTensor, shapes_) - tf.add_input(desc, indices_) - tf.add_input(desc, values_) - tf.add_input(desc, shapes_) - if concat_dim !== nothing - concat_dim = Base.Int(concat_dim) - 1 - end - if concat_dim !== nothing - desc["concat_dim"] = Base.Int(concat_dim) - end - if N !== nothing - desc["N"] = Base.Int(N) - end - desc["T"] = tf.data_type(values_) - res = tf.execute(desc) - node = tf.TapeNode(sparse_concat, [indices_, values_, shapes_], name=nothing, concat_dim=nothing, N=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_concat(indices_, values_, shapes_; name=nothing, concat_dim=nothing, N=nothing) - if tf.in_eager_mode() - sparse_concat_eager(indices_, values_, shapes_; name=name, concat_dim=concat_dim, N=N) - else - sparse_concat_graph(indices_, values_, shapes_; name=name, concat_dim=concat_dim, N=N) - end - end end @@ -34132,39 +62030,75 @@ end """ begin - function histogram_summary_graph(tag_, values_; name=nothing) - local desc - tf.with_op_name(name, "HistogramSummary") do - desc = tf.NodeDescription("HistogramSummary") - tag_ = convert(Tensor{String}, tag_) - values_ = convert(Tensor{Float32}, values_) - (values_,) = tf.tf_promote(values_) - tf.add_input(desc, tag_) - tf.add_input(desc, values_) + begin + function histogram_summary_graph(tag_, values_; name=nothing) + local desc + tf.with_op_name(name, "HistogramSummary") do + desc = tf.NodeDescription("HistogramSummary") + begin + begin + tag_ = convert(Tensor{String}, tag_) + begin + end + end + begin + values_ = convert(Tensor{Float32}, values_) + begin + end + end + begin + (values_,) = tf.tf_promote(values_) + end + end + begin + begin + tf.add_input(desc, tag_) + end + begin + tf.add_input(desc, values_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function histogram_summary_eager(tag_, values_; name=nothing) - desc = tf.EagerOp("HistogramSummary") - tag_ = convert(tf.EagerTensor, tag_) - values_ = convert(tf.EagerTensor, values_) - tf.add_input(desc, tag_) - tf.add_input(desc, values_) - desc["T"] = tf.data_type(values_) - res = tf.execute(desc) - node = tf.TapeNode(histogram_summary, [tag_, values_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function histogram_summary_eager(tag_, values_; name=nothing) + desc = tf.EagerOp("HistogramSummary") + tag_ = convert(tf.EagerTensor, tag_) + values_ = convert(tf.EagerTensor, values_) + begin + begin + tf.add_input(desc, tag_) + end + begin + tf.add_input(desc, values_) + end + end + begin + end + begin + desc["T"] = tf.data_type(values_) + end + res = tf.execute(desc) + node = tf.TapeNode(histogram_summary, [tag_, values_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function histogram_summary(tag_, values_; name=nothing) - if tf.in_eager_mode() - histogram_summary_eager(tag_, values_; name=name) - else - histogram_summary_graph(tag_, values_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function histogram_summary(tag_, values_; name=nothing) + if tf.in_eager_mode() + histogram_summary_eager(tag_, values_; name=name) + else + histogram_summary_graph(tag_, values_; name=name) + end end - end + end end @@ -34174,42 +62108,82 @@ end """ begin - function segment_sum_graph(data_, segment_ids_; name=nothing) - local desc - tf.with_op_name(name, "SegmentSum") do - desc = tf.NodeDescription("SegmentSum") - data_ = convert(Tensor{Any}, data_) - segment_ids_ = convert(Tensor{Any}, segment_ids_) - segment_ids_ = segment_ids_ - convert(tf.Tensor{eltype(segment_ids_)}, 1) - (data_,) = tf.tf_promote(data_) - (segment_ids_,) = tf.tf_promote(segment_ids_) - tf.add_input(desc, data_) - tf.add_input(desc, segment_ids_) - end - tf.Tensor(tf.Operation(desc)) - end - function segment_sum_eager(data_, segment_ids_; name=nothing) - desc = tf.EagerOp("SegmentSum") - data_ = convert(tf.EagerTensor, data_) - segment_ids_ = convert(tf.EagerTensor, segment_ids_) - tf.add_input(desc, data_) - tf.add_input(desc, segment_ids_) - desc["T"] = tf.data_type(data_) - desc["Tindices"] = tf.data_type(segment_ids_) - res = tf.execute(desc) - node = tf.TapeNode(segment_sum, [data_, segment_ids_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function segment_sum(data_, segment_ids_; name=nothing) - if tf.in_eager_mode() - segment_sum_eager(data_, segment_ids_; name=name) - else - segment_sum_graph(data_, segment_ids_; name=name) + begin + function segment_sum_graph(data_, segment_ids_; name=nothing) + local desc + tf.with_op_name(name, "SegmentSum") do + desc = tf.NodeDescription("SegmentSum") + begin + begin + data_ = convert(Tensor{Any}, data_) + begin + end + end + begin + segment_ids_ = convert(Tensor{Any}, segment_ids_) + begin + segment_ids_ = segment_ids_ - convert(tf.Tensor{eltype(segment_ids_)}, 1) + end + end + begin + (data_,) = tf.tf_promote(data_) + end + begin + (segment_ids_,) = tf.tf_promote(segment_ids_) + end + end + begin + begin + tf.add_input(desc, data_) + end + begin + tf.add_input(desc, segment_ids_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function segment_sum_eager(data_, segment_ids_; name=nothing) + desc = tf.EagerOp("SegmentSum") + data_ = convert(tf.EagerTensor, data_) + segment_ids_ = convert(tf.EagerTensor, segment_ids_) + begin + begin + tf.add_input(desc, data_) + end + begin + tf.add_input(desc, segment_ids_) + end + end + begin + end + begin + desc["T"] = tf.data_type(data_) + end + begin + desc["Tindices"] = tf.data_type(segment_ids_) + end + res = tf.execute(desc) + node = tf.TapeNode(segment_sum, [data_, segment_ids_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function segment_sum(data_, segment_ids_; name=nothing) + if tf.in_eager_mode() + segment_sum_eager(data_, segment_ids_; name=name) + else + segment_sum_graph(data_, segment_ids_; name=name) + end end - end + end end @@ -34219,35 +62193,63 @@ end """ begin - function exp_graph(x_; name=nothing) - local desc - tf.with_op_name(name, "Exp") do - desc = tf.NodeDescription("Exp") - x_ = convert(Tensor{Any}, x_) - (x_,) = tf.tf_promote(x_) - tf.add_input(desc, x_) + begin + function exp_graph(x_; name=nothing) + local desc + tf.with_op_name(name, "Exp") do + desc = tf.NodeDescription("Exp") + begin + begin + x_ = convert(Tensor{Any}, x_) + begin + end + end + begin + (x_,) = tf.tf_promote(x_) + end + end + begin + begin + tf.add_input(desc, x_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function exp_eager(x_; name=nothing) - desc = tf.EagerOp("Exp") - x_ = convert(tf.EagerTensor, x_) - tf.add_input(desc, x_) - desc["T"] = tf.data_type(x_) - res = tf.execute(desc) - node = tf.TapeNode(exp, [x_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function exp_eager(x_; name=nothing) + desc = tf.EagerOp("Exp") + x_ = convert(tf.EagerTensor, x_) + begin + begin + tf.add_input(desc, x_) + end + end + begin + end + begin + desc["T"] = tf.data_type(x_) + end + res = tf.execute(desc) + node = tf.TapeNode(exp, [x_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function exp(x_; name=nothing) - if tf.in_eager_mode() - exp_eager(x_; name=name) - else - exp_graph(x_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function exp(x_; name=nothing) + if tf.in_eager_mode() + exp_eager(x_; name=name) + else + exp_graph(x_; name=name) + end end - end + end end @@ -34257,47 +62259,75 @@ end An op that sets up the centralized structures for a distributed TPU """ begin - function configure_distributed_tpu_graph(; name=nothing, embedding_config=nothing, tpu_embedding_config=nothing, is_global_init=nothing) - local desc - tf.with_op_name(name, "ConfigureDistributedTPU") do - desc = tf.NodeDescription("ConfigureDistributedTPU") - if embedding_config !== nothing - desc["embedding_config"] = Base.String(embedding_config) - end - if tpu_embedding_config !== nothing - desc["tpu_embedding_config"] = Base.String(tpu_embedding_config) - end - if is_global_init !== nothing - desc["is_global_init"] = Base.Bool(is_global_init) + begin + function configure_distributed_tpu_graph(; name=nothing, embedding_config=nothing, tpu_embedding_config=nothing, is_global_init=nothing) + local desc + tf.with_op_name(name, "ConfigureDistributedTPU") do + desc = tf.NodeDescription("ConfigureDistributedTPU") + begin + end + begin + end + begin + begin + if embedding_config !== nothing + desc["embedding_config"] = Base.String(embedding_config) + end + end + begin + if tpu_embedding_config !== nothing + desc["tpu_embedding_config"] = Base.String(tpu_embedding_config) + end + end + begin + if is_global_init !== nothing + desc["is_global_init"] = Base.Bool(is_global_init) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function configure_distributed_tpu_eager(; name=nothing, embedding_config=nothing, tpu_embedding_config=nothing, is_global_init=nothing) + desc = tf.EagerOp("ConfigureDistributedTPU") + begin + end + begin + begin + if embedding_config !== nothing + desc["embedding_config"] = Base.String(embedding_config) + end + end + begin + if tpu_embedding_config !== nothing + desc["tpu_embedding_config"] = Base.String(tpu_embedding_config) + end + end + begin + if is_global_init !== nothing + desc["is_global_init"] = Base.Bool(is_global_init) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(configure_distributed_tpu, [], name=nothing, embedding_config=nothing, tpu_embedding_config=nothing, is_global_init=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function configure_distributed_tpu(; name=nothing, embedding_config=nothing, tpu_embedding_config=nothing, is_global_init=nothing) + if tf.in_eager_mode() + configure_distributed_tpu_eager(; name=name, embedding_config=embedding_config, tpu_embedding_config=tpu_embedding_config, is_global_init=is_global_init) + else + configure_distributed_tpu_graph(; name=name, embedding_config=embedding_config, tpu_embedding_config=tpu_embedding_config, is_global_init=is_global_init) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function configure_distributed_tpu_eager(; name=nothing, embedding_config=nothing, tpu_embedding_config=nothing, is_global_init=nothing) - desc = tf.EagerOp("ConfigureDistributedTPU") - if embedding_config !== nothing - desc["embedding_config"] = Base.String(embedding_config) - end - if tpu_embedding_config !== nothing - desc["tpu_embedding_config"] = Base.String(tpu_embedding_config) - end - if is_global_init !== nothing - desc["is_global_init"] = Base.Bool(is_global_init) - end - res = tf.execute(desc) - node = tf.TapeNode(configure_distributed_tpu, [], name=nothing, embedding_config=nothing, tpu_embedding_config=nothing, is_global_init=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function configure_distributed_tpu(; name=nothing, embedding_config=nothing, tpu_embedding_config=nothing, is_global_init=nothing) - if tf.in_eager_mode() - configure_distributed_tpu_eager(; name=name, embedding_config=embedding_config, tpu_embedding_config=tpu_embedding_config, is_global_init=is_global_init) - else - configure_distributed_tpu_graph(; name=name, embedding_config=embedding_config, tpu_embedding_config=tpu_embedding_config, is_global_init=is_global_init) - end - end end @@ -34307,55 +62337,99 @@ end A placeholder op for multiple values that will be sent from TensorFlow to a """ begin - function _xla_send_from_host_graph(inputs_, dynamic_key_; name=nothing, Tinputs=nothing, key=nothing, device_ordinal=nothing) - local desc - tf.with_op_name(name, "_XlaSendFromHost") do - desc = tf.NodeDescription("_XlaSendFromHost") - inputs_ = [convert(Tensor{Any}, x) for x = inputs_] - dynamic_key_ = convert(Tensor{String}, dynamic_key_) - tf.add_input(desc, inputs_) - tf.add_input(desc, dynamic_key_) - if Tinputs !== nothing - desc["Tinputs"] = map(Base.identity, Tinputs) + begin + function _xla_send_from_host_graph(inputs_, dynamic_key_; name=nothing, Tinputs=nothing, key=nothing, device_ordinal=nothing) + local desc + tf.with_op_name(name, "_XlaSendFromHost") do + desc = tf.NodeDescription("_XlaSendFromHost") + begin + begin + inputs_ = [convert(Tensor{Any}, x) for x = inputs_] + begin + end + end + begin + dynamic_key_ = convert(Tensor{String}, dynamic_key_) + begin + end + end + end + begin + begin + tf.add_input(desc, inputs_) + end + begin + tf.add_input(desc, dynamic_key_) + end + end + begin + begin + if Tinputs !== nothing + desc["Tinputs"] = map(Base.identity, Tinputs) + end + end + begin + if key !== nothing + desc["key"] = Base.String(key) + end + end + begin + if device_ordinal !== nothing + desc["device_ordinal"] = Base.Int(device_ordinal) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function _xla_send_from_host_eager(inputs_, dynamic_key_; name=nothing, Tinputs=nothing, key=nothing, device_ordinal=nothing) + desc = tf.EagerOp("_XlaSendFromHost") + inputs_ = convert(tf.EagerTensor, inputs_) + dynamic_key_ = convert(tf.EagerTensor, dynamic_key_) + begin + begin + tf.add_input(desc, inputs_) + end + begin + tf.add_input(desc, dynamic_key_) + end + end + begin + begin + if Tinputs !== nothing + desc["Tinputs"] = map(Base.identity, Tinputs) + end + end + begin + if key !== nothing + desc["key"] = Base.String(key) + end + end + begin + if device_ordinal !== nothing + desc["device_ordinal"] = Base.Int(device_ordinal) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(_xla_send_from_host, [inputs_, dynamic_key_], name=nothing, Tinputs=nothing, key=nothing, device_ordinal=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _xla_send_from_host(inputs_, dynamic_key_; name=nothing, Tinputs=nothing, key=nothing, device_ordinal=nothing) + if tf.in_eager_mode() + _xla_send_from_host_eager(inputs_, dynamic_key_; name=name, Tinputs=Tinputs, key=key, device_ordinal=device_ordinal) + else + _xla_send_from_host_graph(inputs_, dynamic_key_; name=name, Tinputs=Tinputs, key=key, device_ordinal=device_ordinal) + end end - if key !== nothing - desc["key"] = Base.String(key) - end - if device_ordinal !== nothing - desc["device_ordinal"] = Base.Int(device_ordinal) - end - end - tf.Tensor(tf.Operation(desc)) - end - function _xla_send_from_host_eager(inputs_, dynamic_key_; name=nothing, Tinputs=nothing, key=nothing, device_ordinal=nothing) - desc = tf.EagerOp("_XlaSendFromHost") - inputs_ = convert(tf.EagerTensor, inputs_) - dynamic_key_ = convert(tf.EagerTensor, dynamic_key_) - tf.add_input(desc, inputs_) - tf.add_input(desc, dynamic_key_) - if Tinputs !== nothing - desc["Tinputs"] = map(Base.identity, Tinputs) - end - if key !== nothing - desc["key"] = Base.String(key) - end - if device_ordinal !== nothing - desc["device_ordinal"] = Base.Int(device_ordinal) - end - res = tf.execute(desc) - node = tf.TapeNode(_xla_send_from_host, [inputs_, dynamic_key_], name=nothing, Tinputs=nothing, key=nothing, device_ordinal=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _xla_send_from_host(inputs_, dynamic_key_; name=nothing, Tinputs=nothing, key=nothing, device_ordinal=nothing) - if tf.in_eager_mode() - _xla_send_from_host_eager(inputs_, dynamic_key_; name=name, Tinputs=Tinputs, key=key, device_ordinal=device_ordinal) - else - _xla_send_from_host_graph(inputs_, dynamic_key_; name=name, Tinputs=Tinputs, key=key, device_ordinal=device_ordinal) - end - end end @@ -34365,35 +62439,63 @@ end """ begin - function get_session_handle_v2_graph(value_; name=nothing) - local desc - tf.with_op_name(name, "GetSessionHandleV2") do - desc = tf.NodeDescription("GetSessionHandleV2") - value_ = convert(Tensor{Any}, value_) - (value_,) = tf.tf_promote(value_) - tf.add_input(desc, value_) + begin + function get_session_handle_v2_graph(value_; name=nothing) + local desc + tf.with_op_name(name, "GetSessionHandleV2") do + desc = tf.NodeDescription("GetSessionHandleV2") + begin + begin + value_ = convert(Tensor{Any}, value_) + begin + end + end + begin + (value_,) = tf.tf_promote(value_) + end + end + begin + begin + tf.add_input(desc, value_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function get_session_handle_v2_eager(value_; name=nothing) - desc = tf.EagerOp("GetSessionHandleV2") - value_ = convert(tf.EagerTensor, value_) - tf.add_input(desc, value_) - desc["T"] = tf.data_type(value_) - res = tf.execute(desc) - node = tf.TapeNode(get_session_handle_v2, [value_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function get_session_handle_v2_eager(value_; name=nothing) + desc = tf.EagerOp("GetSessionHandleV2") + value_ = convert(tf.EagerTensor, value_) + begin + begin + tf.add_input(desc, value_) + end + end + begin + end + begin + desc["T"] = tf.data_type(value_) + end + res = tf.execute(desc) + node = tf.TapeNode(get_session_handle_v2, [value_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function get_session_handle_v2(value_; name=nothing) - if tf.in_eager_mode() - get_session_handle_v2_eager(value_; name=name) - else - get_session_handle_v2_graph(value_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function get_session_handle_v2(value_; name=nothing) + if tf.in_eager_mode() + get_session_handle_v2_eager(value_; name=name) + else + get_session_handle_v2_graph(value_; name=name) + end end - end + end end @@ -34403,40 +62505,78 @@ end """ begin - function relu_grad_graph(gradients_, features_; name=nothing) - local desc - tf.with_op_name(name, "ReluGrad") do - desc = tf.NodeDescription("ReluGrad") - gradients_ = convert(Tensor{Any}, gradients_) - features_ = convert(Tensor{Any}, features_) - (gradients_, features_) = tf.tf_promote(gradients_, features_) - tf.add_input(desc, gradients_) - tf.add_input(desc, features_) + begin + function relu_grad_graph(gradients_, features_; name=nothing) + local desc + tf.with_op_name(name, "ReluGrad") do + desc = tf.NodeDescription("ReluGrad") + begin + begin + gradients_ = convert(Tensor{Any}, gradients_) + begin + end + end + begin + features_ = convert(Tensor{Any}, features_) + begin + end + end + begin + (gradients_, features_) = tf.tf_promote(gradients_, features_) + end + end + begin + begin + tf.add_input(desc, gradients_) + end + begin + tf.add_input(desc, features_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function relu_grad_eager(gradients_, features_; name=nothing) - desc = tf.EagerOp("ReluGrad") - gradients_ = convert(tf.EagerTensor, gradients_) - features_ = convert(tf.EagerTensor, features_) - tf.add_input(desc, gradients_) - tf.add_input(desc, features_) - desc["T"] = tf.data_type(gradients_) - desc["T"] = tf.data_type(features_) - res = tf.execute(desc) - node = tf.TapeNode(relu_grad, [gradients_, features_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function relu_grad_eager(gradients_, features_; name=nothing) + desc = tf.EagerOp("ReluGrad") + gradients_ = convert(tf.EagerTensor, gradients_) + features_ = convert(tf.EagerTensor, features_) + begin + begin + tf.add_input(desc, gradients_) + end + begin + tf.add_input(desc, features_) + end + end + begin + end + begin + desc["T"] = tf.data_type(gradients_) + end + begin + desc["T"] = tf.data_type(features_) + end + res = tf.execute(desc) + node = tf.TapeNode(relu_grad, [gradients_, features_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function relu_grad(gradients_, features_; name=nothing) - if tf.in_eager_mode() - relu_grad_eager(gradients_, features_; name=name) - else - relu_grad_graph(gradients_, features_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function relu_grad(gradients_, features_; name=nothing) + if tf.in_eager_mode() + relu_grad_eager(gradients_, features_; name=name) + else + relu_grad_graph(gradients_, features_; name=name) + end end - end + end end @@ -34446,48 +62586,100 @@ end """ begin - function unsorted_segment_min_graph(data_, segment_ids_, num_segments_; name=nothing) - local desc - tf.with_op_name(name, "UnsortedSegmentMin") do - desc = tf.NodeDescription("UnsortedSegmentMin") - data_ = convert(Tensor{Any}, data_) - segment_ids_ = convert(Tensor{Any}, segment_ids_) - segment_ids_ = segment_ids_ - convert(tf.Tensor{eltype(segment_ids_)}, 1) - num_segments_ = convert(Tensor{Int32}, num_segments_) - (num_segments_,) = tf.tf_promote(num_segments_) - (data_,) = tf.tf_promote(data_) - (segment_ids_,) = tf.tf_promote(segment_ids_) - tf.add_input(desc, data_) - tf.add_input(desc, segment_ids_) - tf.add_input(desc, num_segments_) - end - tf.Tensor(tf.Operation(desc)) - end - function unsorted_segment_min_eager(data_, segment_ids_, num_segments_; name=nothing) - desc = tf.EagerOp("UnsortedSegmentMin") - data_ = convert(tf.EagerTensor, data_) - segment_ids_ = convert(tf.EagerTensor, segment_ids_) - num_segments_ = convert(tf.EagerTensor, num_segments_) - tf.add_input(desc, data_) - tf.add_input(desc, segment_ids_) - tf.add_input(desc, num_segments_) - desc["T"] = tf.data_type(data_) - desc["Tindices"] = tf.data_type(segment_ids_) - desc["Tnumsegments"] = tf.data_type(num_segments_) - res = tf.execute(desc) - node = tf.TapeNode(unsorted_segment_min, [data_, segment_ids_, num_segments_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function unsorted_segment_min(data_, segment_ids_, num_segments_; name=nothing) - if tf.in_eager_mode() - unsorted_segment_min_eager(data_, segment_ids_, num_segments_; name=name) - else - unsorted_segment_min_graph(data_, segment_ids_, num_segments_; name=name) + begin + function unsorted_segment_min_graph(data_, segment_ids_, num_segments_; name=nothing) + local desc + tf.with_op_name(name, "UnsortedSegmentMin") do + desc = tf.NodeDescription("UnsortedSegmentMin") + begin + begin + data_ = convert(Tensor{Any}, data_) + begin + end + end + begin + segment_ids_ = convert(Tensor{Any}, segment_ids_) + begin + segment_ids_ = segment_ids_ - convert(tf.Tensor{eltype(segment_ids_)}, 1) + end + end + begin + num_segments_ = convert(Tensor{Int32}, num_segments_) + begin + end + end + begin + (num_segments_,) = tf.tf_promote(num_segments_) + end + begin + (data_,) = tf.tf_promote(data_) + end + begin + (segment_ids_,) = tf.tf_promote(segment_ids_) + end + end + begin + begin + tf.add_input(desc, data_) + end + begin + tf.add_input(desc, segment_ids_) + end + begin + tf.add_input(desc, num_segments_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function unsorted_segment_min_eager(data_, segment_ids_, num_segments_; name=nothing) + desc = tf.EagerOp("UnsortedSegmentMin") + data_ = convert(tf.EagerTensor, data_) + segment_ids_ = convert(tf.EagerTensor, segment_ids_) + num_segments_ = convert(tf.EagerTensor, num_segments_) + begin + begin + tf.add_input(desc, data_) + end + begin + tf.add_input(desc, segment_ids_) + end + begin + tf.add_input(desc, num_segments_) + end + end + begin + end + begin + desc["T"] = tf.data_type(data_) + end + begin + desc["Tindices"] = tf.data_type(segment_ids_) + end + begin + desc["Tnumsegments"] = tf.data_type(num_segments_) + end + res = tf.execute(desc) + node = tf.TapeNode(unsorted_segment_min, [data_, segment_ids_, num_segments_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function unsorted_segment_min(data_, segment_ids_, num_segments_; name=nothing) + if tf.in_eager_mode() + unsorted_segment_min_eager(data_, segment_ids_, num_segments_; name=name) + else + unsorted_segment_min_graph(data_, segment_ids_, num_segments_; name=name) + end end - end + end end @@ -34497,84 +62689,162 @@ end """ begin - function parse_example_graph(serialized_, names_, sparse_keys_, dense_keys_, dense_defaults_; name=nothing, Nsparse=nothing, Ndense=nothing, sparse_types=nothing, Tdense=nothing, dense_shapes=nothing) - local desc - tf.with_op_name(name, "ParseExample") do - desc = tf.NodeDescription("ParseExample") - serialized_ = convert(Tensor{String}, serialized_) - names_ = convert(Tensor{String}, names_) - sparse_keys_ = [convert(Tensor{String}, x) for x = sparse_keys_] - dense_keys_ = [convert(Tensor{String}, x) for x = dense_keys_] - dense_defaults_ = [convert(Tensor{Any}, x) for x = dense_defaults_] - tf.add_input(desc, serialized_) - tf.add_input(desc, names_) - tf.add_input(desc, sparse_keys_) - tf.add_input(desc, dense_keys_) - tf.add_input(desc, dense_defaults_) - if Nsparse !== nothing - desc["Nsparse"] = Base.Int(Nsparse) - end - if Ndense !== nothing - desc["Ndense"] = Base.Int(Ndense) + begin + function parse_example_graph(serialized_, names_, sparse_keys_, dense_keys_, dense_defaults_; name=nothing, Nsparse=nothing, Ndense=nothing, sparse_types=nothing, Tdense=nothing, dense_shapes=nothing) + local desc + tf.with_op_name(name, "ParseExample") do + desc = tf.NodeDescription("ParseExample") + begin + begin + serialized_ = convert(Tensor{String}, serialized_) + begin + end + end + begin + names_ = convert(Tensor{String}, names_) + begin + end + end + begin + sparse_keys_ = [convert(Tensor{String}, x) for x = sparse_keys_] + begin + end + end + begin + dense_keys_ = [convert(Tensor{String}, x) for x = dense_keys_] + begin + end + end + begin + dense_defaults_ = [convert(Tensor{Any}, x) for x = dense_defaults_] + begin + end + end + end + begin + begin + tf.add_input(desc, serialized_) + end + begin + tf.add_input(desc, names_) + end + begin + tf.add_input(desc, sparse_keys_) + end + begin + tf.add_input(desc, dense_keys_) + end + begin + tf.add_input(desc, dense_defaults_) + end + end + begin + begin + if Nsparse !== nothing + desc["Nsparse"] = Base.Int(Nsparse) + end + end + begin + if Ndense !== nothing + desc["Ndense"] = Base.Int(Ndense) + end + end + begin + if sparse_types !== nothing + desc["sparse_types"] = map(Base.identity, sparse_types) + end + end + begin + if Tdense !== nothing + desc["Tdense"] = map(Base.identity, Tdense) + end + end + begin + if dense_shapes !== nothing + desc["dense_shapes"] = map(Base.identity, dense_shapes) + end + end + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:4 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + end + end + begin + function parse_example_eager(serialized_, names_, sparse_keys_, dense_keys_, dense_defaults_; name=nothing, Nsparse=nothing, Ndense=nothing, sparse_types=nothing, Tdense=nothing, dense_shapes=nothing) + desc = tf.EagerOp("ParseExample") + serialized_ = convert(tf.EagerTensor, serialized_) + names_ = convert(tf.EagerTensor, names_) + sparse_keys_ = convert(tf.EagerTensor, sparse_keys_) + dense_keys_ = convert(tf.EagerTensor, dense_keys_) + dense_defaults_ = convert(tf.EagerTensor, dense_defaults_) + begin + begin + tf.add_input(desc, serialized_) + end + begin + tf.add_input(desc, names_) + end + begin + tf.add_input(desc, sparse_keys_) + end + begin + tf.add_input(desc, dense_keys_) + end + begin + tf.add_input(desc, dense_defaults_) + end + end + begin + begin + if Nsparse !== nothing + desc["Nsparse"] = Base.Int(Nsparse) + end + end + begin + if Ndense !== nothing + desc["Ndense"] = Base.Int(Ndense) + end + end + begin + if sparse_types !== nothing + desc["sparse_types"] = map(Base.identity, sparse_types) + end + end + begin + if Tdense !== nothing + desc["Tdense"] = map(Base.identity, Tdense) + end + end + begin + if dense_shapes !== nothing + desc["dense_shapes"] = map(Base.identity, dense_shapes) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(parse_example, [serialized_, names_, sparse_keys_, dense_keys_, dense_defaults_], name=nothing, Nsparse=nothing, Ndense=nothing, sparse_types=nothing, Tdense=nothing, dense_shapes=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function parse_example(serialized_, names_, sparse_keys_, dense_keys_, dense_defaults_; name=nothing, Nsparse=nothing, Ndense=nothing, sparse_types=nothing, Tdense=nothing, dense_shapes=nothing) + if tf.in_eager_mode() + parse_example_eager(serialized_, names_, sparse_keys_, dense_keys_, dense_defaults_; name=name, Nsparse=Nsparse, Ndense=Ndense, sparse_types=sparse_types, Tdense=Tdense, dense_shapes=dense_shapes) + else + parse_example_graph(serialized_, names_, sparse_keys_, dense_keys_, dense_defaults_; name=name, Nsparse=Nsparse, Ndense=Ndense, sparse_types=sparse_types, Tdense=Tdense, dense_shapes=dense_shapes) + end end - if sparse_types !== nothing - desc["sparse_types"] = map(Base.identity, sparse_types) - end - if Tdense !== nothing - desc["Tdense"] = map(Base.identity, Tdense) - end - if dense_shapes !== nothing - desc["dense_shapes"] = map(Base.identity, dense_shapes) - end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:4 - push!(out, tf.Tensor(op, out_idx)) - end - out - end - function parse_example_eager(serialized_, names_, sparse_keys_, dense_keys_, dense_defaults_; name=nothing, Nsparse=nothing, Ndense=nothing, sparse_types=nothing, Tdense=nothing, dense_shapes=nothing) - desc = tf.EagerOp("ParseExample") - serialized_ = convert(tf.EagerTensor, serialized_) - names_ = convert(tf.EagerTensor, names_) - sparse_keys_ = convert(tf.EagerTensor, sparse_keys_) - dense_keys_ = convert(tf.EagerTensor, dense_keys_) - dense_defaults_ = convert(tf.EagerTensor, dense_defaults_) - tf.add_input(desc, serialized_) - tf.add_input(desc, names_) - tf.add_input(desc, sparse_keys_) - tf.add_input(desc, dense_keys_) - tf.add_input(desc, dense_defaults_) - if Nsparse !== nothing - desc["Nsparse"] = Base.Int(Nsparse) - end - if Ndense !== nothing - desc["Ndense"] = Base.Int(Ndense) - end - if sparse_types !== nothing - desc["sparse_types"] = map(Base.identity, sparse_types) - end - if Tdense !== nothing - desc["Tdense"] = map(Base.identity, Tdense) - end - if dense_shapes !== nothing - desc["dense_shapes"] = map(Base.identity, dense_shapes) - end - res = tf.execute(desc) - node = tf.TapeNode(parse_example, [serialized_, names_, sparse_keys_, dense_keys_, dense_defaults_], name=nothing, Nsparse=nothing, Ndense=nothing, sparse_types=nothing, Tdense=nothing, dense_shapes=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function parse_example(serialized_, names_, sparse_keys_, dense_keys_, dense_defaults_; name=nothing, Nsparse=nothing, Ndense=nothing, sparse_types=nothing, Tdense=nothing, dense_shapes=nothing) - if tf.in_eager_mode() - parse_example_eager(serialized_, names_, sparse_keys_, dense_keys_, dense_defaults_; name=name, Nsparse=Nsparse, Ndense=Ndense, sparse_types=sparse_types, Tdense=Tdense, dense_shapes=dense_shapes) - else - parse_example_graph(serialized_, names_, sparse_keys_, dense_keys_, dense_defaults_; name=name, Nsparse=Nsparse, Ndense=Ndense, sparse_types=sparse_types, Tdense=Tdense, dense_shapes=dense_shapes) - end - end end @@ -34584,49 +62854,89 @@ end """ begin - function queue_enqueue_v2_graph(handle_, components_; name=nothing, Tcomponents=nothing, timeout_ms=nothing) - local desc - tf.with_op_name(name, "QueueEnqueueV2") do - desc = tf.NodeDescription("QueueEnqueueV2") - handle_ = convert(Tensor{Any}, handle_) - components_ = [convert(Tensor{Any}, x) for x = components_] - tf.add_input(desc, handle_) - tf.add_input(desc, components_) - if Tcomponents !== nothing - desc["Tcomponents"] = map(Base.identity, Tcomponents) + begin + function queue_enqueue_v2_graph(handle_, components_; name=nothing, Tcomponents=nothing, timeout_ms=nothing) + local desc + tf.with_op_name(name, "QueueEnqueueV2") do + desc = tf.NodeDescription("QueueEnqueueV2") + begin + begin + handle_ = convert(Tensor{Any}, handle_) + begin + end + end + begin + components_ = [convert(Tensor{Any}, x) for x = components_] + begin + end + end + end + begin + begin + tf.add_input(desc, handle_) + end + begin + tf.add_input(desc, components_) + end + end + begin + begin + if Tcomponents !== nothing + desc["Tcomponents"] = map(Base.identity, Tcomponents) + end + end + begin + if timeout_ms !== nothing + desc["timeout_ms"] = Base.Int(timeout_ms) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function queue_enqueue_v2_eager(handle_, components_; name=nothing, Tcomponents=nothing, timeout_ms=nothing) + desc = tf.EagerOp("QueueEnqueueV2") + handle_ = convert(tf.EagerTensor, handle_) + components_ = convert(tf.EagerTensor, components_) + begin + begin + tf.add_input(desc, handle_) + end + begin + tf.add_input(desc, components_) + end + end + begin + begin + if Tcomponents !== nothing + desc["Tcomponents"] = map(Base.identity, Tcomponents) + end + end + begin + if timeout_ms !== nothing + desc["timeout_ms"] = Base.Int(timeout_ms) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(queue_enqueue_v2, [handle_, components_], name=nothing, Tcomponents=nothing, timeout_ms=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function queue_enqueue_v2(handle_, components_; name=nothing, Tcomponents=nothing, timeout_ms=nothing) + if tf.in_eager_mode() + queue_enqueue_v2_eager(handle_, components_; name=name, Tcomponents=Tcomponents, timeout_ms=timeout_ms) + else + queue_enqueue_v2_graph(handle_, components_; name=name, Tcomponents=Tcomponents, timeout_ms=timeout_ms) + end end - if timeout_ms !== nothing - desc["timeout_ms"] = Base.Int(timeout_ms) - end - end - tf.Tensor(tf.Operation(desc)) end - function queue_enqueue_v2_eager(handle_, components_; name=nothing, Tcomponents=nothing, timeout_ms=nothing) - desc = tf.EagerOp("QueueEnqueueV2") - handle_ = convert(tf.EagerTensor, handle_) - components_ = convert(tf.EagerTensor, components_) - tf.add_input(desc, handle_) - tf.add_input(desc, components_) - if Tcomponents !== nothing - desc["Tcomponents"] = map(Base.identity, Tcomponents) - end - if timeout_ms !== nothing - desc["timeout_ms"] = Base.Int(timeout_ms) - end - res = tf.execute(desc) - node = tf.TapeNode(queue_enqueue_v2, [handle_, components_], name=nothing, Tcomponents=nothing, timeout_ms=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function queue_enqueue_v2(handle_, components_; name=nothing, Tcomponents=nothing, timeout_ms=nothing) - if tf.in_eager_mode() - queue_enqueue_v2_eager(handle_, components_; name=name, Tcomponents=Tcomponents, timeout_ms=timeout_ms) - else - queue_enqueue_v2_graph(handle_, components_; name=name, Tcomponents=Tcomponents, timeout_ms=timeout_ms) - end - end end @@ -34636,53 +62946,107 @@ end """ begin - function scatter_nd_add_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) - local desc - tf.with_op_name(name, "ScatterNdAdd") do - desc = tf.NodeDescription("ScatterNdAdd") - ref_ = convert(Tensor{Any}, ref_) - indices_ = convert(Tensor{Any}, indices_) - indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) - updates_ = convert(Tensor{Any}, updates_) - (ref_, updates_) = tf.tf_promote(ref_, updates_) - (indices_,) = tf.tf_promote(indices_) - tf.add_input(desc, ref_) - tf.add_input(desc, indices_) - tf.add_input(desc, updates_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) + begin + function scatter_nd_add_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "ScatterNdAdd") do + desc = tf.NodeDescription("ScatterNdAdd") + begin + begin + ref_ = convert(Tensor{Any}, ref_) + begin + end + end + begin + indices_ = convert(Tensor{Any}, indices_) + begin + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + end + end + begin + updates_ = convert(Tensor{Any}, updates_) + begin + end + end + begin + (ref_, updates_) = tf.tf_promote(ref_, updates_) + end + begin + (indices_,) = tf.tf_promote(indices_) + end + end + begin + begin + tf.add_input(desc, ref_) + end + begin + tf.add_input(desc, indices_) + end + begin + tf.add_input(desc, updates_) + end + end + begin + begin + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function scatter_nd_add_eager(ref_, indices_, updates_; name=nothing, use_locking=nothing) + desc = tf.EagerOp("ScatterNdAdd") + ref_ = convert(tf.EagerTensor, ref_) + indices_ = convert(tf.EagerTensor, indices_) + updates_ = convert(tf.EagerTensor, updates_) + begin + begin + tf.add_input(desc, ref_) + end + begin + tf.add_input(desc, indices_) + end + begin + tf.add_input(desc, updates_) + end + end + begin + begin + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + end + begin + desc["T"] = tf.data_type(ref_) + end + begin + desc["Tindices"] = tf.data_type(indices_) + end + begin + desc["T"] = tf.data_type(updates_) + end + res = tf.execute(desc) + node = tf.TapeNode(scatter_nd_add, [ref_, indices_, updates_], name=nothing, use_locking=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function scatter_nd_add(ref_, indices_, updates_; name=nothing, use_locking=nothing) + if tf.in_eager_mode() + scatter_nd_add_eager(ref_, indices_, updates_; name=name, use_locking=use_locking) + else + scatter_nd_add_graph(ref_, indices_, updates_; name=name, use_locking=use_locking) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function scatter_nd_add_eager(ref_, indices_, updates_; name=nothing, use_locking=nothing) - desc = tf.EagerOp("ScatterNdAdd") - ref_ = convert(tf.EagerTensor, ref_) - indices_ = convert(tf.EagerTensor, indices_) - updates_ = convert(tf.EagerTensor, updates_) - tf.add_input(desc, ref_) - tf.add_input(desc, indices_) - tf.add_input(desc, updates_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - desc["T"] = tf.data_type(ref_) - desc["Tindices"] = tf.data_type(indices_) - desc["T"] = tf.data_type(updates_) - res = tf.execute(desc) - node = tf.TapeNode(scatter_nd_add, [ref_, indices_, updates_], name=nothing, use_locking=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function scatter_nd_add(ref_, indices_, updates_; name=nothing, use_locking=nothing) - if tf.in_eager_mode() - scatter_nd_add_eager(ref_, indices_, updates_; name=name, use_locking=use_locking) - else - scatter_nd_add_graph(ref_, indices_, updates_; name=name, use_locking=use_locking) - end - end end @@ -34692,33 +63056,57 @@ end """ begin - function reader_num_records_produced_v2_graph(reader_handle_; name=nothing) - local desc - tf.with_op_name(name, "ReaderNumRecordsProducedV2") do - desc = tf.NodeDescription("ReaderNumRecordsProducedV2") - reader_handle_ = convert(Tensor{Any}, reader_handle_) - tf.add_input(desc, reader_handle_) + begin + function reader_num_records_produced_v2_graph(reader_handle_; name=nothing) + local desc + tf.with_op_name(name, "ReaderNumRecordsProducedV2") do + desc = tf.NodeDescription("ReaderNumRecordsProducedV2") + begin + begin + reader_handle_ = convert(Tensor{Any}, reader_handle_) + begin + end + end + end + begin + begin + tf.add_input(desc, reader_handle_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function reader_num_records_produced_v2_eager(reader_handle_; name=nothing) - desc = tf.EagerOp("ReaderNumRecordsProducedV2") - reader_handle_ = convert(tf.EagerTensor, reader_handle_) - tf.add_input(desc, reader_handle_) - res = tf.execute(desc) - node = tf.TapeNode(reader_num_records_produced_v2, [reader_handle_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function reader_num_records_produced_v2_eager(reader_handle_; name=nothing) + desc = tf.EagerOp("ReaderNumRecordsProducedV2") + reader_handle_ = convert(tf.EagerTensor, reader_handle_) + begin + begin + tf.add_input(desc, reader_handle_) + end + end + begin + end + res = tf.execute(desc) + node = tf.TapeNode(reader_num_records_produced_v2, [reader_handle_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function reader_num_records_produced_v2(reader_handle_; name=nothing) - if tf.in_eager_mode() - reader_num_records_produced_v2_eager(reader_handle_; name=name) - else - reader_num_records_produced_v2_graph(reader_handle_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function reader_num_records_produced_v2(reader_handle_; name=nothing) + if tf.in_eager_mode() + reader_num_records_produced_v2_eager(reader_handle_; name=name) + else + reader_num_records_produced_v2_graph(reader_handle_; name=name) + end end - end + end end @@ -34728,69 +63116,133 @@ end Load embedding parameters for a single table. """ begin - function load_tpu_embedding_centered_rms_prop_parameters_graph(parameters_, ms_, mom_, mg_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - local desc - tf.with_op_name(name, "LoadTPUEmbeddingCenteredRMSPropParameters") do - desc = tf.NodeDescription("LoadTPUEmbeddingCenteredRMSPropParameters") - parameters_ = convert(Tensor{Float32}, parameters_) - ms_ = convert(Tensor{Float32}, ms_) - mom_ = convert(Tensor{Float32}, mom_) - mg_ = convert(Tensor{Float32}, mg_) - tf.add_input(desc, parameters_) - tf.add_input(desc, ms_) - tf.add_input(desc, mom_) - tf.add_input(desc, mg_) - if table_id !== nothing - desc["table_id"] = Base.Int(table_id) - end - if table_name !== nothing - desc["table_name"] = Base.String(table_name) - end - if num_shards !== nothing - desc["num_shards"] = Base.Int(num_shards) + begin + function load_tpu_embedding_centered_rms_prop_parameters_graph(parameters_, ms_, mom_, mg_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + local desc + tf.with_op_name(name, "LoadTPUEmbeddingCenteredRMSPropParameters") do + desc = tf.NodeDescription("LoadTPUEmbeddingCenteredRMSPropParameters") + begin + begin + parameters_ = convert(Tensor{Float32}, parameters_) + begin + end + end + begin + ms_ = convert(Tensor{Float32}, ms_) + begin + end + end + begin + mom_ = convert(Tensor{Float32}, mom_) + begin + end + end + begin + mg_ = convert(Tensor{Float32}, mg_) + begin + end + end + end + begin + begin + tf.add_input(desc, parameters_) + end + begin + tf.add_input(desc, ms_) + end + begin + tf.add_input(desc, mom_) + end + begin + tf.add_input(desc, mg_) + end + end + begin + begin + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + end + begin + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + end + begin + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + end + begin + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function load_tpu_embedding_centered_rms_prop_parameters_eager(parameters_, ms_, mom_, mg_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + desc = tf.EagerOp("LoadTPUEmbeddingCenteredRMSPropParameters") + parameters_ = convert(tf.EagerTensor, parameters_) + ms_ = convert(tf.EagerTensor, ms_) + mom_ = convert(tf.EagerTensor, mom_) + mg_ = convert(tf.EagerTensor, mg_) + begin + begin + tf.add_input(desc, parameters_) + end + begin + tf.add_input(desc, ms_) + end + begin + tf.add_input(desc, mom_) + end + begin + tf.add_input(desc, mg_) + end + end + begin + begin + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + end + begin + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + end + begin + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + end + begin + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(load_tpu_embedding_centered_rms_prop_parameters, [parameters_, ms_, mom_, mg_], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function load_tpu_embedding_centered_rms_prop_parameters(parameters_, ms_, mom_, mg_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + if tf.in_eager_mode() + load_tpu_embedding_centered_rms_prop_parameters_eager(parameters_, ms_, mom_, mg_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + else + load_tpu_embedding_centered_rms_prop_parameters_graph(parameters_, ms_, mom_, mg_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + end end - if shard_id !== nothing - desc["shard_id"] = Base.Int(shard_id) - end - end - tf.Tensor(tf.Operation(desc)) - end - function load_tpu_embedding_centered_rms_prop_parameters_eager(parameters_, ms_, mom_, mg_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - desc = tf.EagerOp("LoadTPUEmbeddingCenteredRMSPropParameters") - parameters_ = convert(tf.EagerTensor, parameters_) - ms_ = convert(tf.EagerTensor, ms_) - mom_ = convert(tf.EagerTensor, mom_) - mg_ = convert(tf.EagerTensor, mg_) - tf.add_input(desc, parameters_) - tf.add_input(desc, ms_) - tf.add_input(desc, mom_) - tf.add_input(desc, mg_) - if table_id !== nothing - desc["table_id"] = Base.Int(table_id) - end - if table_name !== nothing - desc["table_name"] = Base.String(table_name) - end - if num_shards !== nothing - desc["num_shards"] = Base.Int(num_shards) - end - if shard_id !== nothing - desc["shard_id"] = Base.Int(shard_id) - end - res = tf.execute(desc) - node = tf.TapeNode(load_tpu_embedding_centered_rms_prop_parameters, [parameters_, ms_, mom_, mg_], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function load_tpu_embedding_centered_rms_prop_parameters(parameters_, ms_, mom_, mg_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - if tf.in_eager_mode() - load_tpu_embedding_centered_rms_prop_parameters_eager(parameters_, ms_, mom_, mg_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) - else - load_tpu_embedding_centered_rms_prop_parameters_graph(parameters_, ms_, mom_, mg_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) - end - end end @@ -34800,46 +63252,88 @@ end """ begin - function assign_sub_graph(ref_, value_; name=nothing, use_locking=nothing) - local desc - tf.with_op_name(name, "AssignSub") do - desc = tf.NodeDescription("AssignSub") - ref_ = convert(Tensor{Any}, ref_) - value_ = convert(Tensor{Any}, value_) - (ref_, value_) = tf.tf_promote(ref_, value_) - tf.add_input(desc, ref_) - tf.add_input(desc, value_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) + begin + function assign_sub_graph(ref_, value_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "AssignSub") do + desc = tf.NodeDescription("AssignSub") + begin + begin + ref_ = convert(Tensor{Any}, ref_) + begin + end + end + begin + value_ = convert(Tensor{Any}, value_) + begin + end + end + begin + (ref_, value_) = tf.tf_promote(ref_, value_) + end + end + begin + begin + tf.add_input(desc, ref_) + end + begin + tf.add_input(desc, value_) + end + end + begin + begin + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function assign_sub_eager(ref_, value_; name=nothing, use_locking=nothing) + desc = tf.EagerOp("AssignSub") + ref_ = convert(tf.EagerTensor, ref_) + value_ = convert(tf.EagerTensor, value_) + begin + begin + tf.add_input(desc, ref_) + end + begin + tf.add_input(desc, value_) + end + end + begin + begin + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + end + begin + desc["T"] = tf.data_type(ref_) + end + begin + desc["T"] = tf.data_type(value_) + end + res = tf.execute(desc) + node = tf.TapeNode(assign_sub, [ref_, value_], name=nothing, use_locking=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function assign_sub(ref_, value_; name=nothing, use_locking=nothing) + if tf.in_eager_mode() + assign_sub_eager(ref_, value_; name=name, use_locking=use_locking) + else + assign_sub_graph(ref_, value_; name=name, use_locking=use_locking) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function assign_sub_eager(ref_, value_; name=nothing, use_locking=nothing) - desc = tf.EagerOp("AssignSub") - ref_ = convert(tf.EagerTensor, ref_) - value_ = convert(tf.EagerTensor, value_) - tf.add_input(desc, ref_) - tf.add_input(desc, value_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - desc["T"] = tf.data_type(ref_) - desc["T"] = tf.data_type(value_) - res = tf.execute(desc) - node = tf.TapeNode(assign_sub, [ref_, value_], name=nothing, use_locking=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function assign_sub(ref_, value_; name=nothing, use_locking=nothing) - if tf.in_eager_mode() - assign_sub_eager(ref_, value_; name=name, use_locking=use_locking) - else - assign_sub_graph(ref_, value_; name=name, use_locking=use_locking) - end - end end @@ -34849,48 +63343,100 @@ end """ begin - function unsorted_segment_sum_graph(data_, segment_ids_, num_segments_; name=nothing) - local desc - tf.with_op_name(name, "UnsortedSegmentSum") do - desc = tf.NodeDescription("UnsortedSegmentSum") - data_ = convert(Tensor{Any}, data_) - segment_ids_ = convert(Tensor{Any}, segment_ids_) - segment_ids_ = segment_ids_ - convert(tf.Tensor{eltype(segment_ids_)}, 1) - num_segments_ = convert(Tensor{Int32}, num_segments_) - (num_segments_,) = tf.tf_promote(num_segments_) - (data_,) = tf.tf_promote(data_) - (segment_ids_,) = tf.tf_promote(segment_ids_) - tf.add_input(desc, data_) - tf.add_input(desc, segment_ids_) - tf.add_input(desc, num_segments_) - end - tf.Tensor(tf.Operation(desc)) - end - function unsorted_segment_sum_eager(data_, segment_ids_, num_segments_; name=nothing) - desc = tf.EagerOp("UnsortedSegmentSum") - data_ = convert(tf.EagerTensor, data_) - segment_ids_ = convert(tf.EagerTensor, segment_ids_) - num_segments_ = convert(tf.EagerTensor, num_segments_) - tf.add_input(desc, data_) - tf.add_input(desc, segment_ids_) - tf.add_input(desc, num_segments_) - desc["T"] = tf.data_type(data_) - desc["Tindices"] = tf.data_type(segment_ids_) - desc["Tnumsegments"] = tf.data_type(num_segments_) - res = tf.execute(desc) - node = tf.TapeNode(unsorted_segment_sum, [data_, segment_ids_, num_segments_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function unsorted_segment_sum(data_, segment_ids_, num_segments_; name=nothing) - if tf.in_eager_mode() - unsorted_segment_sum_eager(data_, segment_ids_, num_segments_; name=name) - else - unsorted_segment_sum_graph(data_, segment_ids_, num_segments_; name=name) + begin + function unsorted_segment_sum_graph(data_, segment_ids_, num_segments_; name=nothing) + local desc + tf.with_op_name(name, "UnsortedSegmentSum") do + desc = tf.NodeDescription("UnsortedSegmentSum") + begin + begin + data_ = convert(Tensor{Any}, data_) + begin + end + end + begin + segment_ids_ = convert(Tensor{Any}, segment_ids_) + begin + segment_ids_ = segment_ids_ - convert(tf.Tensor{eltype(segment_ids_)}, 1) + end + end + begin + num_segments_ = convert(Tensor{Int32}, num_segments_) + begin + end + end + begin + (num_segments_,) = tf.tf_promote(num_segments_) + end + begin + (data_,) = tf.tf_promote(data_) + end + begin + (segment_ids_,) = tf.tf_promote(segment_ids_) + end + end + begin + begin + tf.add_input(desc, data_) + end + begin + tf.add_input(desc, segment_ids_) + end + begin + tf.add_input(desc, num_segments_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function unsorted_segment_sum_eager(data_, segment_ids_, num_segments_; name=nothing) + desc = tf.EagerOp("UnsortedSegmentSum") + data_ = convert(tf.EagerTensor, data_) + segment_ids_ = convert(tf.EagerTensor, segment_ids_) + num_segments_ = convert(tf.EagerTensor, num_segments_) + begin + begin + tf.add_input(desc, data_) + end + begin + tf.add_input(desc, segment_ids_) + end + begin + tf.add_input(desc, num_segments_) + end + end + begin + end + begin + desc["T"] = tf.data_type(data_) + end + begin + desc["Tindices"] = tf.data_type(segment_ids_) + end + begin + desc["Tnumsegments"] = tf.data_type(num_segments_) + end + res = tf.execute(desc) + node = tf.TapeNode(unsorted_segment_sum, [data_, segment_ids_, num_segments_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function unsorted_segment_sum(data_, segment_ids_, num_segments_; name=nothing) + if tf.in_eager_mode() + unsorted_segment_sum_eager(data_, segment_ids_, num_segments_; name=name) + else + unsorted_segment_sum_graph(data_, segment_ids_, num_segments_; name=name) + end end - end + end end @@ -34900,78 +63446,160 @@ end """ begin - function fused_batch_norm_grad_graph(y_backprop_, x_, scale_, reserve_space_1_, reserve_space_2_; name=nothing, epsilon=nothing, data_format=nothing, is_training=nothing) - local desc - tf.with_op_name(name, "FusedBatchNormGrad") do - desc = tf.NodeDescription("FusedBatchNormGrad") - y_backprop_ = convert(Tensor{Any}, y_backprop_) - x_ = convert(Tensor{Any}, x_) - scale_ = convert(Tensor{Any}, scale_) - reserve_space_1_ = convert(Tensor{Any}, reserve_space_1_) - reserve_space_2_ = convert(Tensor{Any}, reserve_space_2_) - (y_backprop_, x_, scale_, reserve_space_1_, reserve_space_2_) = tf.tf_promote(y_backprop_, x_, scale_, reserve_space_1_, reserve_space_2_) - tf.add_input(desc, y_backprop_) - tf.add_input(desc, x_) - tf.add_input(desc, scale_) - tf.add_input(desc, reserve_space_1_) - tf.add_input(desc, reserve_space_2_) - if epsilon !== nothing - desc["epsilon"] = Base.identity(epsilon) - end - if data_format !== nothing - desc["data_format"] = Base.String(data_format) - end - if is_training !== nothing - desc["is_training"] = Base.Bool(is_training) + begin + function fused_batch_norm_grad_graph(y_backprop_, x_, scale_, reserve_space_1_, reserve_space_2_; name=nothing, epsilon=nothing, data_format=nothing, is_training=nothing) + local desc + tf.with_op_name(name, "FusedBatchNormGrad") do + desc = tf.NodeDescription("FusedBatchNormGrad") + begin + begin + y_backprop_ = convert(Tensor{Any}, y_backprop_) + begin + end + end + begin + x_ = convert(Tensor{Any}, x_) + begin + end + end + begin + scale_ = convert(Tensor{Any}, scale_) + begin + end + end + begin + reserve_space_1_ = convert(Tensor{Any}, reserve_space_1_) + begin + end + end + begin + reserve_space_2_ = convert(Tensor{Any}, reserve_space_2_) + begin + end + end + begin + (y_backprop_, x_, scale_, reserve_space_1_, reserve_space_2_) = tf.tf_promote(y_backprop_, x_, scale_, reserve_space_1_, reserve_space_2_) + end + end + begin + begin + tf.add_input(desc, y_backprop_) + end + begin + tf.add_input(desc, x_) + end + begin + tf.add_input(desc, scale_) + end + begin + tf.add_input(desc, reserve_space_1_) + end + begin + tf.add_input(desc, reserve_space_2_) + end + end + begin + begin + if epsilon !== nothing + desc["epsilon"] = Base.identity(epsilon) + end + end + begin + if data_format !== nothing + desc["data_format"] = Base.String(data_format) + end + end + begin + if is_training !== nothing + desc["is_training"] = Base.Bool(is_training) + end + end + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:5 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + end + end + begin + function fused_batch_norm_grad_eager(y_backprop_, x_, scale_, reserve_space_1_, reserve_space_2_; name=nothing, epsilon=nothing, data_format=nothing, is_training=nothing) + desc = tf.EagerOp("FusedBatchNormGrad") + y_backprop_ = convert(tf.EagerTensor, y_backprop_) + x_ = convert(tf.EagerTensor, x_) + scale_ = convert(tf.EagerTensor, scale_) + reserve_space_1_ = convert(tf.EagerTensor, reserve_space_1_) + reserve_space_2_ = convert(tf.EagerTensor, reserve_space_2_) + begin + begin + tf.add_input(desc, y_backprop_) + end + begin + tf.add_input(desc, x_) + end + begin + tf.add_input(desc, scale_) + end + begin + tf.add_input(desc, reserve_space_1_) + end + begin + tf.add_input(desc, reserve_space_2_) + end + end + begin + begin + if epsilon !== nothing + desc["epsilon"] = Base.identity(epsilon) + end + end + begin + if data_format !== nothing + desc["data_format"] = Base.String(data_format) + end + end + begin + if is_training !== nothing + desc["is_training"] = Base.Bool(is_training) + end + end + end + begin + desc["T"] = tf.data_type(y_backprop_) + end + begin + desc["T"] = tf.data_type(x_) + end + begin + desc["T"] = tf.data_type(scale_) + end + begin + desc["T"] = tf.data_type(reserve_space_1_) + end + begin + desc["T"] = tf.data_type(reserve_space_2_) + end + res = tf.execute(desc) + node = tf.TapeNode(fused_batch_norm_grad, [y_backprop_, x_, scale_, reserve_space_1_, reserve_space_2_], name=nothing, epsilon=nothing, data_format=nothing, is_training=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function fused_batch_norm_grad(y_backprop_, x_, scale_, reserve_space_1_, reserve_space_2_; name=nothing, epsilon=nothing, data_format=nothing, is_training=nothing) + if tf.in_eager_mode() + fused_batch_norm_grad_eager(y_backprop_, x_, scale_, reserve_space_1_, reserve_space_2_; name=name, epsilon=epsilon, data_format=data_format, is_training=is_training) + else + fused_batch_norm_grad_graph(y_backprop_, x_, scale_, reserve_space_1_, reserve_space_2_; name=name, epsilon=epsilon, data_format=data_format, is_training=is_training) + end end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:5 - push!(out, tf.Tensor(op, out_idx)) - end - out - end - function fused_batch_norm_grad_eager(y_backprop_, x_, scale_, reserve_space_1_, reserve_space_2_; name=nothing, epsilon=nothing, data_format=nothing, is_training=nothing) - desc = tf.EagerOp("FusedBatchNormGrad") - y_backprop_ = convert(tf.EagerTensor, y_backprop_) - x_ = convert(tf.EagerTensor, x_) - scale_ = convert(tf.EagerTensor, scale_) - reserve_space_1_ = convert(tf.EagerTensor, reserve_space_1_) - reserve_space_2_ = convert(tf.EagerTensor, reserve_space_2_) - tf.add_input(desc, y_backprop_) - tf.add_input(desc, x_) - tf.add_input(desc, scale_) - tf.add_input(desc, reserve_space_1_) - tf.add_input(desc, reserve_space_2_) - if epsilon !== nothing - desc["epsilon"] = Base.identity(epsilon) - end - if data_format !== nothing - desc["data_format"] = Base.String(data_format) - end - if is_training !== nothing - desc["is_training"] = Base.Bool(is_training) - end - desc["T"] = tf.data_type(y_backprop_) - desc["T"] = tf.data_type(x_) - desc["T"] = tf.data_type(scale_) - desc["T"] = tf.data_type(reserve_space_1_) - desc["T"] = tf.data_type(reserve_space_2_) - res = tf.execute(desc) - node = tf.TapeNode(fused_batch_norm_grad, [y_backprop_, x_, scale_, reserve_space_1_, reserve_space_2_], name=nothing, epsilon=nothing, data_format=nothing, is_training=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function fused_batch_norm_grad(y_backprop_, x_, scale_, reserve_space_1_, reserve_space_2_; name=nothing, epsilon=nothing, data_format=nothing, is_training=nothing) - if tf.in_eager_mode() - fused_batch_norm_grad_eager(y_backprop_, x_, scale_, reserve_space_1_, reserve_space_2_; name=name, epsilon=epsilon, data_format=data_format, is_training=is_training) - else - fused_batch_norm_grad_graph(y_backprop_, x_, scale_, reserve_space_1_, reserve_space_2_; name=name, epsilon=epsilon, data_format=data_format, is_training=is_training) - end - end end @@ -34981,65 +63609,137 @@ end """ begin - function max_pool_grad_v2_graph(orig_input_, orig_output_, grad_, ksize_, strides_; name=nothing, padding=nothing, data_format=nothing) - local desc - tf.with_op_name(name, "MaxPoolGradV2") do - desc = tf.NodeDescription("MaxPoolGradV2") - orig_input_ = convert(Tensor{Float32}, orig_input_) - orig_output_ = convert(Tensor{Float32}, orig_output_) - grad_ = convert(Tensor{Float32}, grad_) - ksize_ = convert(Tensor{Int32}, ksize_) - strides_ = convert(Tensor{Int32}, strides_) - (orig_input_, orig_output_, grad_) = tf.tf_promote(orig_input_, orig_output_, grad_) - tf.add_input(desc, orig_input_) - tf.add_input(desc, orig_output_) - tf.add_input(desc, grad_) - tf.add_input(desc, ksize_) - tf.add_input(desc, strides_) - if padding !== nothing - desc["padding"] = Base.String(padding) - end - if data_format !== nothing - desc["data_format"] = Base.String(data_format) + begin + function max_pool_grad_v2_graph(orig_input_, orig_output_, grad_, ksize_, strides_; name=nothing, padding=nothing, data_format=nothing) + local desc + tf.with_op_name(name, "MaxPoolGradV2") do + desc = tf.NodeDescription("MaxPoolGradV2") + begin + begin + orig_input_ = convert(Tensor{Float32}, orig_input_) + begin + end + end + begin + orig_output_ = convert(Tensor{Float32}, orig_output_) + begin + end + end + begin + grad_ = convert(Tensor{Float32}, grad_) + begin + end + end + begin + ksize_ = convert(Tensor{Int32}, ksize_) + begin + end + end + begin + strides_ = convert(Tensor{Int32}, strides_) + begin + end + end + begin + (orig_input_, orig_output_, grad_) = tf.tf_promote(orig_input_, orig_output_, grad_) + end + end + begin + begin + tf.add_input(desc, orig_input_) + end + begin + tf.add_input(desc, orig_output_) + end + begin + tf.add_input(desc, grad_) + end + begin + tf.add_input(desc, ksize_) + end + begin + tf.add_input(desc, strides_) + end + end + begin + begin + if padding !== nothing + desc["padding"] = Base.String(padding) + end + end + begin + if data_format !== nothing + desc["data_format"] = Base.String(data_format) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function max_pool_grad_v2_eager(orig_input_, orig_output_, grad_, ksize_, strides_; name=nothing, padding=nothing, data_format=nothing) + desc = tf.EagerOp("MaxPoolGradV2") + orig_input_ = convert(tf.EagerTensor, orig_input_) + orig_output_ = convert(tf.EagerTensor, orig_output_) + grad_ = convert(tf.EagerTensor, grad_) + ksize_ = convert(tf.EagerTensor, ksize_) + strides_ = convert(tf.EagerTensor, strides_) + begin + begin + tf.add_input(desc, orig_input_) + end + begin + tf.add_input(desc, orig_output_) + end + begin + tf.add_input(desc, grad_) + end + begin + tf.add_input(desc, ksize_) + end + begin + tf.add_input(desc, strides_) + end + end + begin + begin + if padding !== nothing + desc["padding"] = Base.String(padding) + end + end + begin + if data_format !== nothing + desc["data_format"] = Base.String(data_format) + end + end + end + begin + desc["T"] = tf.data_type(orig_input_) + end + begin + desc["T"] = tf.data_type(orig_output_) + end + begin + desc["T"] = tf.data_type(grad_) + end + res = tf.execute(desc) + node = tf.TapeNode(max_pool_grad_v2, [orig_input_, orig_output_, grad_, ksize_, strides_], name=nothing, padding=nothing, data_format=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function max_pool_grad_v2(orig_input_, orig_output_, grad_, ksize_, strides_; name=nothing, padding=nothing, data_format=nothing) + if tf.in_eager_mode() + max_pool_grad_v2_eager(orig_input_, orig_output_, grad_, ksize_, strides_; name=name, padding=padding, data_format=data_format) + else + max_pool_grad_v2_graph(orig_input_, orig_output_, grad_, ksize_, strides_; name=name, padding=padding, data_format=data_format) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function max_pool_grad_v2_eager(orig_input_, orig_output_, grad_, ksize_, strides_; name=nothing, padding=nothing, data_format=nothing) - desc = tf.EagerOp("MaxPoolGradV2") - orig_input_ = convert(tf.EagerTensor, orig_input_) - orig_output_ = convert(tf.EagerTensor, orig_output_) - grad_ = convert(tf.EagerTensor, grad_) - ksize_ = convert(tf.EagerTensor, ksize_) - strides_ = convert(tf.EagerTensor, strides_) - tf.add_input(desc, orig_input_) - tf.add_input(desc, orig_output_) - tf.add_input(desc, grad_) - tf.add_input(desc, ksize_) - tf.add_input(desc, strides_) - if padding !== nothing - desc["padding"] = Base.String(padding) - end - if data_format !== nothing - desc["data_format"] = Base.String(data_format) - end - desc["T"] = tf.data_type(orig_input_) - desc["T"] = tf.data_type(orig_output_) - desc["T"] = tf.data_type(grad_) - res = tf.execute(desc) - node = tf.TapeNode(max_pool_grad_v2, [orig_input_, orig_output_, grad_, ksize_, strides_], name=nothing, padding=nothing, data_format=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function max_pool_grad_v2(orig_input_, orig_output_, grad_, ksize_, strides_; name=nothing, padding=nothing, data_format=nothing) - if tf.in_eager_mode() - max_pool_grad_v2_eager(orig_input_, orig_output_, grad_, ksize_, strides_; name=name, padding=padding, data_format=data_format) - else - max_pool_grad_v2_graph(orig_input_, orig_output_, grad_, ksize_, strides_; name=name, padding=padding, data_format=data_format) - end - end end @@ -35049,41 +63749,81 @@ end """ begin - function boosted_trees_create_ensemble_graph(tree_ensemble_handle_, stamp_token_, tree_ensemble_serialized_; name=nothing) - local desc - tf.with_op_name(name, "BoostedTreesCreateEnsemble") do - desc = tf.NodeDescription("BoostedTreesCreateEnsemble") - tree_ensemble_handle_ = convert(Tensor{Any}, tree_ensemble_handle_) - stamp_token_ = convert(Tensor{Int64}, stamp_token_) - tree_ensemble_serialized_ = convert(Tensor{String}, tree_ensemble_serialized_) - tf.add_input(desc, tree_ensemble_handle_) - tf.add_input(desc, stamp_token_) - tf.add_input(desc, tree_ensemble_serialized_) - end - tf.Tensor(tf.Operation(desc)) - end - function boosted_trees_create_ensemble_eager(tree_ensemble_handle_, stamp_token_, tree_ensemble_serialized_; name=nothing) - desc = tf.EagerOp("BoostedTreesCreateEnsemble") - tree_ensemble_handle_ = convert(tf.EagerTensor, tree_ensemble_handle_) - stamp_token_ = convert(tf.EagerTensor, stamp_token_) - tree_ensemble_serialized_ = convert(tf.EagerTensor, tree_ensemble_serialized_) - tf.add_input(desc, tree_ensemble_handle_) - tf.add_input(desc, stamp_token_) - tf.add_input(desc, tree_ensemble_serialized_) - res = tf.execute(desc) - node = tf.TapeNode(boosted_trees_create_ensemble, [tree_ensemble_handle_, stamp_token_, tree_ensemble_serialized_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function boosted_trees_create_ensemble(tree_ensemble_handle_, stamp_token_, tree_ensemble_serialized_; name=nothing) - if tf.in_eager_mode() - boosted_trees_create_ensemble_eager(tree_ensemble_handle_, stamp_token_, tree_ensemble_serialized_; name=name) - else - boosted_trees_create_ensemble_graph(tree_ensemble_handle_, stamp_token_, tree_ensemble_serialized_; name=name) + begin + function boosted_trees_create_ensemble_graph(tree_ensemble_handle_, stamp_token_, tree_ensemble_serialized_; name=nothing) + local desc + tf.with_op_name(name, "BoostedTreesCreateEnsemble") do + desc = tf.NodeDescription("BoostedTreesCreateEnsemble") + begin + begin + tree_ensemble_handle_ = convert(Tensor{Any}, tree_ensemble_handle_) + begin + end + end + begin + stamp_token_ = convert(Tensor{Int64}, stamp_token_) + begin + end + end + begin + tree_ensemble_serialized_ = convert(Tensor{String}, tree_ensemble_serialized_) + begin + end + end + end + begin + begin + tf.add_input(desc, tree_ensemble_handle_) + end + begin + tf.add_input(desc, stamp_token_) + end + begin + tf.add_input(desc, tree_ensemble_serialized_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function boosted_trees_create_ensemble_eager(tree_ensemble_handle_, stamp_token_, tree_ensemble_serialized_; name=nothing) + desc = tf.EagerOp("BoostedTreesCreateEnsemble") + tree_ensemble_handle_ = convert(tf.EagerTensor, tree_ensemble_handle_) + stamp_token_ = convert(tf.EagerTensor, stamp_token_) + tree_ensemble_serialized_ = convert(tf.EagerTensor, tree_ensemble_serialized_) + begin + begin + tf.add_input(desc, tree_ensemble_handle_) + end + begin + tf.add_input(desc, stamp_token_) + end + begin + tf.add_input(desc, tree_ensemble_serialized_) + end + end + begin + end + res = tf.execute(desc) + node = tf.TapeNode(boosted_trees_create_ensemble, [tree_ensemble_handle_, stamp_token_, tree_ensemble_serialized_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function boosted_trees_create_ensemble(tree_ensemble_handle_, stamp_token_, tree_ensemble_serialized_; name=nothing) + if tf.in_eager_mode() + boosted_trees_create_ensemble_eager(tree_ensemble_handle_, stamp_token_, tree_ensemble_serialized_; name=name) + else + boosted_trees_create_ensemble_graph(tree_ensemble_handle_, stamp_token_, tree_ensemble_serialized_; name=name) + end end - end + end end @@ -35093,59 +63833,95 @@ end """ begin - function ordered_map_incomplete_size_graph(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) - local desc - tf.with_op_name(name, "OrderedMapIncompleteSize") do - desc = tf.NodeDescription("OrderedMapIncompleteSize") - if capacity !== nothing - desc["capacity"] = Base.Int(capacity) - end - if memory_limit !== nothing - desc["memory_limit"] = Base.Int(memory_limit) + begin + function ordered_map_incomplete_size_graph(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "OrderedMapIncompleteSize") do + desc = tf.NodeDescription("OrderedMapIncompleteSize") + begin + end + begin + end + begin + begin + if capacity !== nothing + desc["capacity"] = Base.Int(capacity) + end + end + begin + if memory_limit !== nothing + desc["memory_limit"] = Base.Int(memory_limit) + end + end + begin + if dtypes !== nothing + desc["dtypes"] = map(Base.identity, dtypes) + end + end + begin + if container !== nothing + desc["container"] = Base.String(container) + end + end + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function ordered_map_incomplete_size_eager(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + desc = tf.EagerOp("OrderedMapIncompleteSize") + begin + end + begin + begin + if capacity !== nothing + desc["capacity"] = Base.Int(capacity) + end + end + begin + if memory_limit !== nothing + desc["memory_limit"] = Base.Int(memory_limit) + end + end + begin + if dtypes !== nothing + desc["dtypes"] = map(Base.identity, dtypes) + end + end + begin + if container !== nothing + desc["container"] = Base.String(container) + end + end + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(ordered_map_incomplete_size, [], name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function ordered_map_incomplete_size(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + if tf.in_eager_mode() + ordered_map_incomplete_size_eager(; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) + else + ordered_map_incomplete_size_graph(; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) + end end - if dtypes !== nothing - desc["dtypes"] = map(Base.identity, dtypes) - end - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - end - tf.Tensor(tf.Operation(desc)) end - function ordered_map_incomplete_size_eager(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) - desc = tf.EagerOp("OrderedMapIncompleteSize") - if capacity !== nothing - desc["capacity"] = Base.Int(capacity) - end - if memory_limit !== nothing - desc["memory_limit"] = Base.Int(memory_limit) - end - if dtypes !== nothing - desc["dtypes"] = map(Base.identity, dtypes) - end - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - res = tf.execute(desc) - node = tf.TapeNode(ordered_map_incomplete_size, [], name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function ordered_map_incomplete_size(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) - if tf.in_eager_mode() - ordered_map_incomplete_size_eager(; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) - else - ordered_map_incomplete_size_graph(; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) - end - end end @@ -35155,64 +63931,102 @@ end """ begin - function skipgram_graph(; name=nothing, filename=nothing, batch_size=nothing, window_size=nothing, min_count=nothing, subsample=nothing) - local desc - tf.with_op_name(name, "Skipgram") do - desc = tf.NodeDescription("Skipgram") - if filename !== nothing - desc["filename"] = Base.String(filename) - end - if batch_size !== nothing - desc["batch_size"] = Base.Int(batch_size) - end - if window_size !== nothing - desc["window_size"] = Base.Int(window_size) - end - if min_count !== nothing - desc["min_count"] = Base.Int(min_count) + begin + function skipgram_graph(; name=nothing, filename=nothing, batch_size=nothing, window_size=nothing, min_count=nothing, subsample=nothing) + local desc + tf.with_op_name(name, "Skipgram") do + desc = tf.NodeDescription("Skipgram") + begin + end + begin + end + begin + begin + if filename !== nothing + desc["filename"] = Base.String(filename) + end + end + begin + if batch_size !== nothing + desc["batch_size"] = Base.Int(batch_size) + end + end + begin + if window_size !== nothing + desc["window_size"] = Base.Int(window_size) + end + end + begin + if min_count !== nothing + desc["min_count"] = Base.Int(min_count) + end + end + begin + if subsample !== nothing + desc["subsample"] = Base.identity(subsample) + end + end + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:7 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + end + end + begin + function skipgram_eager(; name=nothing, filename=nothing, batch_size=nothing, window_size=nothing, min_count=nothing, subsample=nothing) + desc = tf.EagerOp("Skipgram") + begin + end + begin + begin + if filename !== nothing + desc["filename"] = Base.String(filename) + end + end + begin + if batch_size !== nothing + desc["batch_size"] = Base.Int(batch_size) + end + end + begin + if window_size !== nothing + desc["window_size"] = Base.Int(window_size) + end + end + begin + if min_count !== nothing + desc["min_count"] = Base.Int(min_count) + end + end + begin + if subsample !== nothing + desc["subsample"] = Base.identity(subsample) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(skipgram, [], name=nothing, filename=nothing, batch_size=nothing, window_size=nothing, min_count=nothing, subsample=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function skipgram(; name=nothing, filename=nothing, batch_size=nothing, window_size=nothing, min_count=nothing, subsample=nothing) + if tf.in_eager_mode() + skipgram_eager(; name=name, filename=filename, batch_size=batch_size, window_size=window_size, min_count=min_count, subsample=subsample) + else + skipgram_graph(; name=name, filename=filename, batch_size=batch_size, window_size=window_size, min_count=min_count, subsample=subsample) + end end - if subsample !== nothing - desc["subsample"] = Base.identity(subsample) - end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:7 - push!(out, tf.Tensor(op, out_idx)) - end - out - end - function skipgram_eager(; name=nothing, filename=nothing, batch_size=nothing, window_size=nothing, min_count=nothing, subsample=nothing) - desc = tf.EagerOp("Skipgram") - if filename !== nothing - desc["filename"] = Base.String(filename) - end - if batch_size !== nothing - desc["batch_size"] = Base.Int(batch_size) - end - if window_size !== nothing - desc["window_size"] = Base.Int(window_size) - end - if min_count !== nothing - desc["min_count"] = Base.Int(min_count) - end - if subsample !== nothing - desc["subsample"] = Base.identity(subsample) - end - res = tf.execute(desc) - node = tf.TapeNode(skipgram, [], name=nothing, filename=nothing, batch_size=nothing, window_size=nothing, min_count=nothing, subsample=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function skipgram(; name=nothing, filename=nothing, batch_size=nothing, window_size=nothing, min_count=nothing, subsample=nothing) - if tf.in_eager_mode() - skipgram_eager(; name=name, filename=filename, batch_size=batch_size, window_size=window_size, min_count=min_count, subsample=subsample) - else - skipgram_graph(; name=name, filename=filename, batch_size=batch_size, window_size=window_size, min_count=min_count, subsample=subsample) - end - end end @@ -35222,48 +64036,92 @@ end """ begin - function arg_min_graph(input_, dimension_; name=nothing, output_type=nothing) - local desc - tf.with_op_name(name, "ArgMin") do - desc = tf.NodeDescription("ArgMin") - input_ = convert(Tensor{Any}, input_) - dimension_ = convert(Tensor{Int32}, dimension_) - dimension_ = dimension_ - convert(tf.Tensor{eltype(dimension_)}, 1) - (input_,) = tf.tf_promote(input_) - (dimension_,) = tf.tf_promote(dimension_) - tf.add_input(desc, input_) - tf.add_input(desc, dimension_) - if output_type !== nothing - desc["output_type"] = Base.identity(output_type) + begin + function arg_min_graph(input_, dimension_; name=nothing, output_type=nothing) + local desc + tf.with_op_name(name, "ArgMin") do + desc = tf.NodeDescription("ArgMin") + begin + begin + input_ = convert(Tensor{Any}, input_) + begin + end + end + begin + dimension_ = convert(Tensor{Int32}, dimension_) + begin + dimension_ = dimension_ - convert(tf.Tensor{eltype(dimension_)}, 1) + end + end + begin + (input_,) = tf.tf_promote(input_) + end + begin + (dimension_,) = tf.tf_promote(dimension_) + end + end + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, dimension_) + end + end + begin + begin + if output_type !== nothing + desc["output_type"] = Base.identity(output_type) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function arg_min_eager(input_, dimension_; name=nothing, output_type=nothing) + desc = tf.EagerOp("ArgMin") + input_ = convert(tf.EagerTensor, input_) + dimension_ = convert(tf.EagerTensor, dimension_) + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, dimension_) + end + end + begin + begin + if output_type !== nothing + desc["output_type"] = Base.identity(output_type) + end + end + end + begin + desc["T"] = tf.data_type(input_) + end + begin + desc["Tidx"] = tf.data_type(dimension_) + end + res = tf.execute(desc) + node = tf.TapeNode(arg_min, [input_, dimension_], name=nothing, output_type=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function arg_min(input_, dimension_; name=nothing, output_type=nothing) + if tf.in_eager_mode() + arg_min_eager(input_, dimension_; name=name, output_type=output_type) + else + arg_min_graph(input_, dimension_; name=name, output_type=output_type) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function arg_min_eager(input_, dimension_; name=nothing, output_type=nothing) - desc = tf.EagerOp("ArgMin") - input_ = convert(tf.EagerTensor, input_) - dimension_ = convert(tf.EagerTensor, dimension_) - tf.add_input(desc, input_) - tf.add_input(desc, dimension_) - if output_type !== nothing - desc["output_type"] = Base.identity(output_type) - end - desc["T"] = tf.data_type(input_) - desc["Tidx"] = tf.data_type(dimension_) - res = tf.execute(desc) - node = tf.TapeNode(arg_min, [input_, dimension_], name=nothing, output_type=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function arg_min(input_, dimension_; name=nothing, output_type=nothing) - if tf.in_eager_mode() - arg_min_eager(input_, dimension_; name=name, output_type=output_type) - else - arg_min_graph(input_, dimension_; name=name, output_type=output_type) - end - end end @@ -35273,49 +64131,89 @@ end """ begin - function queue_dequeue_many_graph(handle_, n_; name=nothing, component_types=nothing, timeout_ms=nothing) - local desc - tf.with_op_name(name, "QueueDequeueMany") do - desc = tf.NodeDescription("QueueDequeueMany") - handle_ = convert(Tensor{String}, handle_) - n_ = convert(Tensor{Int32}, n_) - tf.add_input(desc, handle_) - tf.add_input(desc, n_) - if component_types !== nothing - desc["component_types"] = map(Base.identity, component_types) + begin + function queue_dequeue_many_graph(handle_, n_; name=nothing, component_types=nothing, timeout_ms=nothing) + local desc + tf.with_op_name(name, "QueueDequeueMany") do + desc = tf.NodeDescription("QueueDequeueMany") + begin + begin + handle_ = convert(Tensor{String}, handle_) + begin + end + end + begin + n_ = convert(Tensor{Int32}, n_) + begin + end + end + end + begin + begin + tf.add_input(desc, handle_) + end + begin + tf.add_input(desc, n_) + end + end + begin + begin + if component_types !== nothing + desc["component_types"] = map(Base.identity, component_types) + end + end + begin + if timeout_ms !== nothing + desc["timeout_ms"] = Base.Int(timeout_ms) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function queue_dequeue_many_eager(handle_, n_; name=nothing, component_types=nothing, timeout_ms=nothing) + desc = tf.EagerOp("QueueDequeueMany") + handle_ = convert(tf.EagerTensor, handle_) + n_ = convert(tf.EagerTensor, n_) + begin + begin + tf.add_input(desc, handle_) + end + begin + tf.add_input(desc, n_) + end + end + begin + begin + if component_types !== nothing + desc["component_types"] = map(Base.identity, component_types) + end + end + begin + if timeout_ms !== nothing + desc["timeout_ms"] = Base.Int(timeout_ms) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(queue_dequeue_many, [handle_, n_], name=nothing, component_types=nothing, timeout_ms=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function queue_dequeue_many(handle_, n_; name=nothing, component_types=nothing, timeout_ms=nothing) + if tf.in_eager_mode() + queue_dequeue_many_eager(handle_, n_; name=name, component_types=component_types, timeout_ms=timeout_ms) + else + queue_dequeue_many_graph(handle_, n_; name=name, component_types=component_types, timeout_ms=timeout_ms) + end end - if timeout_ms !== nothing - desc["timeout_ms"] = Base.Int(timeout_ms) - end - end - tf.Tensor(tf.Operation(desc)) end - function queue_dequeue_many_eager(handle_, n_; name=nothing, component_types=nothing, timeout_ms=nothing) - desc = tf.EagerOp("QueueDequeueMany") - handle_ = convert(tf.EagerTensor, handle_) - n_ = convert(tf.EagerTensor, n_) - tf.add_input(desc, handle_) - tf.add_input(desc, n_) - if component_types !== nothing - desc["component_types"] = map(Base.identity, component_types) - end - if timeout_ms !== nothing - desc["timeout_ms"] = Base.Int(timeout_ms) - end - res = tf.execute(desc) - node = tf.TapeNode(queue_dequeue_many, [handle_, n_], name=nothing, component_types=nothing, timeout_ms=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function queue_dequeue_many(handle_, n_; name=nothing, component_types=nothing, timeout_ms=nothing) - if tf.in_eager_mode() - queue_dequeue_many_eager(handle_, n_; name=name, component_types=component_types, timeout_ms=timeout_ms) - else - queue_dequeue_many_graph(handle_, n_; name=name, component_types=component_types, timeout_ms=timeout_ms) - end - end end @@ -35325,38 +64223,64 @@ end """ begin - function boosted_trees_serialize_ensemble_graph(tree_ensemble_handle_; name=nothing) - local desc - tf.with_op_name(name, "BoostedTreesSerializeEnsemble") do - desc = tf.NodeDescription("BoostedTreesSerializeEnsemble") - tree_ensemble_handle_ = convert(Tensor{Any}, tree_ensemble_handle_) - tf.add_input(desc, tree_ensemble_handle_) - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:2 - push!(out, tf.Tensor(op, out_idx)) + begin + function boosted_trees_serialize_ensemble_graph(tree_ensemble_handle_; name=nothing) + local desc + tf.with_op_name(name, "BoostedTreesSerializeEnsemble") do + desc = tf.NodeDescription("BoostedTreesSerializeEnsemble") + begin + begin + tree_ensemble_handle_ = convert(Tensor{Any}, tree_ensemble_handle_) + begin + end + end + end + begin + begin + tf.add_input(desc, tree_ensemble_handle_) + end + end + begin + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end end - out end - function boosted_trees_serialize_ensemble_eager(tree_ensemble_handle_; name=nothing) - desc = tf.EagerOp("BoostedTreesSerializeEnsemble") - tree_ensemble_handle_ = convert(tf.EagerTensor, tree_ensemble_handle_) - tf.add_input(desc, tree_ensemble_handle_) - res = tf.execute(desc) - node = tf.TapeNode(boosted_trees_serialize_ensemble, [tree_ensemble_handle_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res + begin + function boosted_trees_serialize_ensemble_eager(tree_ensemble_handle_; name=nothing) + desc = tf.EagerOp("BoostedTreesSerializeEnsemble") + tree_ensemble_handle_ = convert(tf.EagerTensor, tree_ensemble_handle_) + begin + begin + tf.add_input(desc, tree_ensemble_handle_) + end + end + begin + end + res = tf.execute(desc) + node = tf.TapeNode(boosted_trees_serialize_ensemble, [tree_ensemble_handle_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function boosted_trees_serialize_ensemble(tree_ensemble_handle_; name=nothing) - if tf.in_eager_mode() - boosted_trees_serialize_ensemble_eager(tree_ensemble_handle_; name=name) - else - boosted_trees_serialize_ensemble_graph(tree_ensemble_handle_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function boosted_trees_serialize_ensemble(tree_ensemble_handle_; name=nothing) + if tf.in_eager_mode() + boosted_trees_serialize_ensemble_eager(tree_ensemble_handle_; name=name) + else + boosted_trees_serialize_ensemble_graph(tree_ensemble_handle_; name=name) + end end - end + end end @@ -35366,40 +64290,78 @@ end """ begin - function minimum_graph(x_, y_; name=nothing) - local desc - tf.with_op_name(name, "Minimum") do - desc = tf.NodeDescription("Minimum") - x_ = convert(Tensor{Any}, x_) - y_ = convert(Tensor{Any}, y_) - (x_, y_) = tf.tf_promote(x_, y_) - tf.add_input(desc, x_) - tf.add_input(desc, y_) + begin + function minimum_graph(x_, y_; name=nothing) + local desc + tf.with_op_name(name, "Minimum") do + desc = tf.NodeDescription("Minimum") + begin + begin + x_ = convert(Tensor{Any}, x_) + begin + end + end + begin + y_ = convert(Tensor{Any}, y_) + begin + end + end + begin + (x_, y_) = tf.tf_promote(x_, y_) + end + end + begin + begin + tf.add_input(desc, x_) + end + begin + tf.add_input(desc, y_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function minimum_eager(x_, y_; name=nothing) - desc = tf.EagerOp("Minimum") - x_ = convert(tf.EagerTensor, x_) - y_ = convert(tf.EagerTensor, y_) - tf.add_input(desc, x_) - tf.add_input(desc, y_) - desc["T"] = tf.data_type(x_) - desc["T"] = tf.data_type(y_) - res = tf.execute(desc) - node = tf.TapeNode(minimum, [x_, y_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function minimum_eager(x_, y_; name=nothing) + desc = tf.EagerOp("Minimum") + x_ = convert(tf.EagerTensor, x_) + y_ = convert(tf.EagerTensor, y_) + begin + begin + tf.add_input(desc, x_) + end + begin + tf.add_input(desc, y_) + end + end + begin + end + begin + desc["T"] = tf.data_type(x_) + end + begin + desc["T"] = tf.data_type(y_) + end + res = tf.execute(desc) + node = tf.TapeNode(minimum, [x_, y_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function minimum(x_, y_; name=nothing) - if tf.in_eager_mode() - minimum_eager(x_, y_; name=name) - else - minimum_graph(x_, y_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function minimum(x_, y_; name=nothing) + if tf.in_eager_mode() + minimum_eager(x_, y_; name=name) + else + minimum_graph(x_, y_; name=name) + end end - end + end end @@ -35409,50 +64371,100 @@ end """ begin - function substr_graph(input_, pos_, len_; name=nothing, unit=nothing) - local desc - tf.with_op_name(name, "Substr") do - desc = tf.NodeDescription("Substr") - input_ = convert(Tensor{String}, input_) - pos_ = convert(Tensor{Any}, pos_) - len_ = convert(Tensor{Any}, len_) - (pos_, len_) = tf.tf_promote(pos_, len_) - tf.add_input(desc, input_) - tf.add_input(desc, pos_) - tf.add_input(desc, len_) - if unit !== nothing - desc["unit"] = Base.String(unit) + begin + function substr_graph(input_, pos_, len_; name=nothing, unit=nothing) + local desc + tf.with_op_name(name, "Substr") do + desc = tf.NodeDescription("Substr") + begin + begin + input_ = convert(Tensor{String}, input_) + begin + end + end + begin + pos_ = convert(Tensor{Any}, pos_) + begin + end + end + begin + len_ = convert(Tensor{Any}, len_) + begin + end + end + begin + (pos_, len_) = tf.tf_promote(pos_, len_) + end + end + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, pos_) + end + begin + tf.add_input(desc, len_) + end + end + begin + begin + if unit !== nothing + desc["unit"] = Base.String(unit) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function substr_eager(input_, pos_, len_; name=nothing, unit=nothing) + desc = tf.EagerOp("Substr") + input_ = convert(tf.EagerTensor, input_) + pos_ = convert(tf.EagerTensor, pos_) + len_ = convert(tf.EagerTensor, len_) + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, pos_) + end + begin + tf.add_input(desc, len_) + end + end + begin + begin + if unit !== nothing + desc["unit"] = Base.String(unit) + end + end + end + begin + desc["T"] = tf.data_type(pos_) + end + begin + desc["T"] = tf.data_type(len_) + end + res = tf.execute(desc) + node = tf.TapeNode(substr, [input_, pos_, len_], name=nothing, unit=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function substr(input_, pos_, len_; name=nothing, unit=nothing) + if tf.in_eager_mode() + substr_eager(input_, pos_, len_; name=name, unit=unit) + else + substr_graph(input_, pos_, len_; name=name, unit=unit) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function substr_eager(input_, pos_, len_; name=nothing, unit=nothing) - desc = tf.EagerOp("Substr") - input_ = convert(tf.EagerTensor, input_) - pos_ = convert(tf.EagerTensor, pos_) - len_ = convert(tf.EagerTensor, len_) - tf.add_input(desc, input_) - tf.add_input(desc, pos_) - tf.add_input(desc, len_) - if unit !== nothing - desc["unit"] = Base.String(unit) - end - desc["T"] = tf.data_type(pos_) - desc["T"] = tf.data_type(len_) - res = tf.execute(desc) - node = tf.TapeNode(substr, [input_, pos_, len_], name=nothing, unit=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function substr(input_, pos_, len_; name=nothing, unit=nothing) - if tf.in_eager_mode() - substr_eager(input_, pos_, len_; name=name, unit=unit) - else - substr_graph(input_, pos_, len_; name=name, unit=unit) - end - end end @@ -35462,33 +64474,57 @@ end """ begin - function queue_size_graph(handle_; name=nothing) - local desc - tf.with_op_name(name, "QueueSize") do - desc = tf.NodeDescription("QueueSize") - handle_ = convert(Tensor{String}, handle_) - tf.add_input(desc, handle_) + begin + function queue_size_graph(handle_; name=nothing) + local desc + tf.with_op_name(name, "QueueSize") do + desc = tf.NodeDescription("QueueSize") + begin + begin + handle_ = convert(Tensor{String}, handle_) + begin + end + end + end + begin + begin + tf.add_input(desc, handle_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function queue_size_eager(handle_; name=nothing) - desc = tf.EagerOp("QueueSize") - handle_ = convert(tf.EagerTensor, handle_) - tf.add_input(desc, handle_) - res = tf.execute(desc) - node = tf.TapeNode(queue_size, [handle_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function queue_size_eager(handle_; name=nothing) + desc = tf.EagerOp("QueueSize") + handle_ = convert(tf.EagerTensor, handle_) + begin + begin + tf.add_input(desc, handle_) + end + end + begin + end + res = tf.execute(desc) + node = tf.TapeNode(queue_size, [handle_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function queue_size(handle_; name=nothing) - if tf.in_eager_mode() - queue_size_eager(handle_; name=name) - else - queue_size_graph(handle_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function queue_size(handle_; name=nothing) + if tf.in_eager_mode() + queue_size_eager(handle_; name=name) + else + queue_size_graph(handle_; name=name) + end end - end + end end @@ -35498,81 +64534,193 @@ end """ begin - function apply_ftrl_v2_graph(var_, accum_, linear_, grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=nothing, use_locking=nothing) - local desc - tf.with_op_name(name, "ApplyFtrlV2") do - desc = tf.NodeDescription("ApplyFtrlV2") - var_ = convert(Tensor{Any}, var_) - accum_ = convert(Tensor{Any}, accum_) - linear_ = convert(Tensor{Any}, linear_) - grad_ = convert(Tensor{Any}, grad_) - lr_ = convert(Tensor{Any}, lr_) - l1_ = convert(Tensor{Any}, l1_) - l2_ = convert(Tensor{Any}, l2_) - l2_shrinkage_ = convert(Tensor{Any}, l2_shrinkage_) - lr_power_ = convert(Tensor{Any}, lr_power_) - (var_, accum_, linear_, grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_) = tf.tf_promote(var_, accum_, linear_, grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_) - tf.add_input(desc, var_) - tf.add_input(desc, accum_) - tf.add_input(desc, linear_) - tf.add_input(desc, grad_) - tf.add_input(desc, lr_) - tf.add_input(desc, l1_) - tf.add_input(desc, l2_) - tf.add_input(desc, l2_shrinkage_) - tf.add_input(desc, lr_power_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - end - tf.Tensor(tf.Operation(desc)) - end - function apply_ftrl_v2_eager(var_, accum_, linear_, grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=nothing, use_locking=nothing) - desc = tf.EagerOp("ApplyFtrlV2") - var_ = convert(tf.EagerTensor, var_) - accum_ = convert(tf.EagerTensor, accum_) - linear_ = convert(tf.EagerTensor, linear_) - grad_ = convert(tf.EagerTensor, grad_) - lr_ = convert(tf.EagerTensor, lr_) - l1_ = convert(tf.EagerTensor, l1_) - l2_ = convert(tf.EagerTensor, l2_) - l2_shrinkage_ = convert(tf.EagerTensor, l2_shrinkage_) - lr_power_ = convert(tf.EagerTensor, lr_power_) - tf.add_input(desc, var_) - tf.add_input(desc, accum_) - tf.add_input(desc, linear_) - tf.add_input(desc, grad_) - tf.add_input(desc, lr_) - tf.add_input(desc, l1_) - tf.add_input(desc, l2_) - tf.add_input(desc, l2_shrinkage_) - tf.add_input(desc, lr_power_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - desc["T"] = tf.data_type(var_) - desc["T"] = tf.data_type(accum_) - desc["T"] = tf.data_type(linear_) - desc["T"] = tf.data_type(grad_) - desc["T"] = tf.data_type(lr_) - desc["T"] = tf.data_type(l1_) - desc["T"] = tf.data_type(l2_) - desc["T"] = tf.data_type(l2_shrinkage_) - desc["T"] = tf.data_type(lr_power_) - res = tf.execute(desc) - node = tf.TapeNode(apply_ftrl_v2, [var_, accum_, linear_, grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_], name=nothing, use_locking=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function apply_ftrl_v2(var_, accum_, linear_, grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=nothing, use_locking=nothing) - if tf.in_eager_mode() - apply_ftrl_v2_eager(var_, accum_, linear_, grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=name, use_locking=use_locking) - else - apply_ftrl_v2_graph(var_, accum_, linear_, grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=name, use_locking=use_locking) + begin + function apply_ftrl_v2_graph(var_, accum_, linear_, grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "ApplyFtrlV2") do + desc = tf.NodeDescription("ApplyFtrlV2") + begin + begin + var_ = convert(Tensor{Any}, var_) + begin + end + end + begin + accum_ = convert(Tensor{Any}, accum_) + begin + end + end + begin + linear_ = convert(Tensor{Any}, linear_) + begin + end + end + begin + grad_ = convert(Tensor{Any}, grad_) + begin + end + end + begin + lr_ = convert(Tensor{Any}, lr_) + begin + end + end + begin + l1_ = convert(Tensor{Any}, l1_) + begin + end + end + begin + l2_ = convert(Tensor{Any}, l2_) + begin + end + end + begin + l2_shrinkage_ = convert(Tensor{Any}, l2_shrinkage_) + begin + end + end + begin + lr_power_ = convert(Tensor{Any}, lr_power_) + begin + end + end + begin + (var_, accum_, linear_, grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_) = tf.tf_promote(var_, accum_, linear_, grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_) + end + end + begin + begin + tf.add_input(desc, var_) + end + begin + tf.add_input(desc, accum_) + end + begin + tf.add_input(desc, linear_) + end + begin + tf.add_input(desc, grad_) + end + begin + tf.add_input(desc, lr_) + end + begin + tf.add_input(desc, l1_) + end + begin + tf.add_input(desc, l2_) + end + begin + tf.add_input(desc, l2_shrinkage_) + end + begin + tf.add_input(desc, lr_power_) + end + end + begin + begin + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function apply_ftrl_v2_eager(var_, accum_, linear_, grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=nothing, use_locking=nothing) + desc = tf.EagerOp("ApplyFtrlV2") + var_ = convert(tf.EagerTensor, var_) + accum_ = convert(tf.EagerTensor, accum_) + linear_ = convert(tf.EagerTensor, linear_) + grad_ = convert(tf.EagerTensor, grad_) + lr_ = convert(tf.EagerTensor, lr_) + l1_ = convert(tf.EagerTensor, l1_) + l2_ = convert(tf.EagerTensor, l2_) + l2_shrinkage_ = convert(tf.EagerTensor, l2_shrinkage_) + lr_power_ = convert(tf.EagerTensor, lr_power_) + begin + begin + tf.add_input(desc, var_) + end + begin + tf.add_input(desc, accum_) + end + begin + tf.add_input(desc, linear_) + end + begin + tf.add_input(desc, grad_) + end + begin + tf.add_input(desc, lr_) + end + begin + tf.add_input(desc, l1_) + end + begin + tf.add_input(desc, l2_) + end + begin + tf.add_input(desc, l2_shrinkage_) + end + begin + tf.add_input(desc, lr_power_) + end + end + begin + begin + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + end + begin + desc["T"] = tf.data_type(var_) + end + begin + desc["T"] = tf.data_type(accum_) + end + begin + desc["T"] = tf.data_type(linear_) + end + begin + desc["T"] = tf.data_type(grad_) + end + begin + desc["T"] = tf.data_type(lr_) + end + begin + desc["T"] = tf.data_type(l1_) + end + begin + desc["T"] = tf.data_type(l2_) + end + begin + desc["T"] = tf.data_type(l2_shrinkage_) + end + begin + desc["T"] = tf.data_type(lr_power_) + end + res = tf.execute(desc) + node = tf.TapeNode(apply_ftrl_v2, [var_, accum_, linear_, grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_], name=nothing, use_locking=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function apply_ftrl_v2(var_, accum_, linear_, grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=nothing, use_locking=nothing) + if tf.in_eager_mode() + apply_ftrl_v2_eager(var_, accum_, linear_, grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=name, use_locking=use_locking) + else + apply_ftrl_v2_graph(var_, accum_, linear_, grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=name, use_locking=use_locking) + end end - end + end end @@ -35582,46 +64730,94 @@ end """ begin - function sparse_segment_mean_graph(data_, indices_, segment_ids_; name=nothing) - local desc - tf.with_op_name(name, "SparseSegmentMean") do - desc = tf.NodeDescription("SparseSegmentMean") - data_ = convert(Tensor{Any}, data_) - indices_ = convert(Tensor{Int32}, indices_) - indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) - segment_ids_ = convert(Tensor{Int32}, segment_ids_) - (data_,) = tf.tf_promote(data_) - (indices_,) = tf.tf_promote(indices_) - tf.add_input(desc, data_) - tf.add_input(desc, indices_) - tf.add_input(desc, segment_ids_) - end - tf.Tensor(tf.Operation(desc)) - end - function sparse_segment_mean_eager(data_, indices_, segment_ids_; name=nothing) - desc = tf.EagerOp("SparseSegmentMean") - data_ = convert(tf.EagerTensor, data_) - indices_ = convert(tf.EagerTensor, indices_) - segment_ids_ = convert(tf.EagerTensor, segment_ids_) - tf.add_input(desc, data_) - tf.add_input(desc, indices_) - tf.add_input(desc, segment_ids_) - desc["T"] = tf.data_type(data_) - desc["Tidx"] = tf.data_type(indices_) - res = tf.execute(desc) - node = tf.TapeNode(sparse_segment_mean, [data_, indices_, segment_ids_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_segment_mean(data_, indices_, segment_ids_; name=nothing) - if tf.in_eager_mode() - sparse_segment_mean_eager(data_, indices_, segment_ids_; name=name) - else - sparse_segment_mean_graph(data_, indices_, segment_ids_; name=name) + begin + function sparse_segment_mean_graph(data_, indices_, segment_ids_; name=nothing) + local desc + tf.with_op_name(name, "SparseSegmentMean") do + desc = tf.NodeDescription("SparseSegmentMean") + begin + begin + data_ = convert(Tensor{Any}, data_) + begin + end + end + begin + indices_ = convert(Tensor{Int32}, indices_) + begin + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + end + end + begin + segment_ids_ = convert(Tensor{Int32}, segment_ids_) + begin + end + end + begin + (data_,) = tf.tf_promote(data_) + end + begin + (indices_,) = tf.tf_promote(indices_) + end + end + begin + begin + tf.add_input(desc, data_) + end + begin + tf.add_input(desc, indices_) + end + begin + tf.add_input(desc, segment_ids_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function sparse_segment_mean_eager(data_, indices_, segment_ids_; name=nothing) + desc = tf.EagerOp("SparseSegmentMean") + data_ = convert(tf.EagerTensor, data_) + indices_ = convert(tf.EagerTensor, indices_) + segment_ids_ = convert(tf.EagerTensor, segment_ids_) + begin + begin + tf.add_input(desc, data_) + end + begin + tf.add_input(desc, indices_) + end + begin + tf.add_input(desc, segment_ids_) + end + end + begin + end + begin + desc["T"] = tf.data_type(data_) + end + begin + desc["Tidx"] = tf.data_type(indices_) + end + res = tf.execute(desc) + node = tf.TapeNode(sparse_segment_mean, [data_, indices_, segment_ids_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_segment_mean(data_, indices_, segment_ids_; name=nothing) + if tf.in_eager_mode() + sparse_segment_mean_eager(data_, indices_, segment_ids_; name=name) + else + sparse_segment_mean_graph(data_, indices_, segment_ids_; name=name) + end end - end + end end @@ -35631,61 +64827,109 @@ end Load embedding parameters for a single table. """ begin - function load_tpu_embedding_momentum_parameters_graph(parameters_, momenta_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - local desc - tf.with_op_name(name, "LoadTPUEmbeddingMomentumParameters") do - desc = tf.NodeDescription("LoadTPUEmbeddingMomentumParameters") - parameters_ = convert(Tensor{Float32}, parameters_) - momenta_ = convert(Tensor{Float32}, momenta_) - tf.add_input(desc, parameters_) - tf.add_input(desc, momenta_) - if table_id !== nothing - desc["table_id"] = Base.Int(table_id) - end - if table_name !== nothing - desc["table_name"] = Base.String(table_name) - end - if num_shards !== nothing - desc["num_shards"] = Base.Int(num_shards) + begin + function load_tpu_embedding_momentum_parameters_graph(parameters_, momenta_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + local desc + tf.with_op_name(name, "LoadTPUEmbeddingMomentumParameters") do + desc = tf.NodeDescription("LoadTPUEmbeddingMomentumParameters") + begin + begin + parameters_ = convert(Tensor{Float32}, parameters_) + begin + end + end + begin + momenta_ = convert(Tensor{Float32}, momenta_) + begin + end + end + end + begin + begin + tf.add_input(desc, parameters_) + end + begin + tf.add_input(desc, momenta_) + end + end + begin + begin + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + end + begin + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + end + begin + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + end + begin + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function load_tpu_embedding_momentum_parameters_eager(parameters_, momenta_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + desc = tf.EagerOp("LoadTPUEmbeddingMomentumParameters") + parameters_ = convert(tf.EagerTensor, parameters_) + momenta_ = convert(tf.EagerTensor, momenta_) + begin + begin + tf.add_input(desc, parameters_) + end + begin + tf.add_input(desc, momenta_) + end + end + begin + begin + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + end + begin + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + end + begin + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + end + begin + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(load_tpu_embedding_momentum_parameters, [parameters_, momenta_], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function load_tpu_embedding_momentum_parameters(parameters_, momenta_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + if tf.in_eager_mode() + load_tpu_embedding_momentum_parameters_eager(parameters_, momenta_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + else + load_tpu_embedding_momentum_parameters_graph(parameters_, momenta_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + end end - if shard_id !== nothing - desc["shard_id"] = Base.Int(shard_id) - end - end - tf.Tensor(tf.Operation(desc)) - end - function load_tpu_embedding_momentum_parameters_eager(parameters_, momenta_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - desc = tf.EagerOp("LoadTPUEmbeddingMomentumParameters") - parameters_ = convert(tf.EagerTensor, parameters_) - momenta_ = convert(tf.EagerTensor, momenta_) - tf.add_input(desc, parameters_) - tf.add_input(desc, momenta_) - if table_id !== nothing - desc["table_id"] = Base.Int(table_id) - end - if table_name !== nothing - desc["table_name"] = Base.String(table_name) - end - if num_shards !== nothing - desc["num_shards"] = Base.Int(num_shards) - end - if shard_id !== nothing - desc["shard_id"] = Base.Int(shard_id) - end - res = tf.execute(desc) - node = tf.TapeNode(load_tpu_embedding_momentum_parameters, [parameters_, momenta_], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function load_tpu_embedding_momentum_parameters(parameters_, momenta_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - if tf.in_eager_mode() - load_tpu_embedding_momentum_parameters_eager(parameters_, momenta_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) - else - load_tpu_embedding_momentum_parameters_graph(parameters_, momenta_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) - end - end end @@ -35695,64 +64939,142 @@ end """ begin - function resource_apply_proximal_adagrad_graph(var_, accum_, lr_, l1_, l2_, grad_; name=nothing, use_locking=nothing) - local desc - tf.with_op_name(name, "ResourceApplyProximalAdagrad") do - desc = tf.NodeDescription("ResourceApplyProximalAdagrad") - var_ = convert(Tensor{Any}, var_) - accum_ = convert(Tensor{Any}, accum_) - lr_ = convert(Tensor{Any}, lr_) - l1_ = convert(Tensor{Any}, l1_) - l2_ = convert(Tensor{Any}, l2_) - grad_ = convert(Tensor{Any}, grad_) - (lr_, l1_, l2_, grad_) = tf.tf_promote(lr_, l1_, l2_, grad_) - tf.add_input(desc, var_) - tf.add_input(desc, accum_) - tf.add_input(desc, lr_) - tf.add_input(desc, l1_) - tf.add_input(desc, l2_) - tf.add_input(desc, grad_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) + begin + function resource_apply_proximal_adagrad_graph(var_, accum_, lr_, l1_, l2_, grad_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "ResourceApplyProximalAdagrad") do + desc = tf.NodeDescription("ResourceApplyProximalAdagrad") + begin + begin + var_ = convert(Tensor{Any}, var_) + begin + end + end + begin + accum_ = convert(Tensor{Any}, accum_) + begin + end + end + begin + lr_ = convert(Tensor{Any}, lr_) + begin + end + end + begin + l1_ = convert(Tensor{Any}, l1_) + begin + end + end + begin + l2_ = convert(Tensor{Any}, l2_) + begin + end + end + begin + grad_ = convert(Tensor{Any}, grad_) + begin + end + end + begin + (lr_, l1_, l2_, grad_) = tf.tf_promote(lr_, l1_, l2_, grad_) + end + end + begin + begin + tf.add_input(desc, var_) + end + begin + tf.add_input(desc, accum_) + end + begin + tf.add_input(desc, lr_) + end + begin + tf.add_input(desc, l1_) + end + begin + tf.add_input(desc, l2_) + end + begin + tf.add_input(desc, grad_) + end + end + begin + begin + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function resource_apply_proximal_adagrad_eager(var_, accum_, lr_, l1_, l2_, grad_; name=nothing, use_locking=nothing) + desc = tf.EagerOp("ResourceApplyProximalAdagrad") + var_ = convert(tf.EagerTensor, var_) + accum_ = convert(tf.EagerTensor, accum_) + lr_ = convert(tf.EagerTensor, lr_) + l1_ = convert(tf.EagerTensor, l1_) + l2_ = convert(tf.EagerTensor, l2_) + grad_ = convert(tf.EagerTensor, grad_) + begin + begin + tf.add_input(desc, var_) + end + begin + tf.add_input(desc, accum_) + end + begin + tf.add_input(desc, lr_) + end + begin + tf.add_input(desc, l1_) + end + begin + tf.add_input(desc, l2_) + end + begin + tf.add_input(desc, grad_) + end + end + begin + begin + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + end + begin + desc["T"] = tf.data_type(lr_) + end + begin + desc["T"] = tf.data_type(l1_) + end + begin + desc["T"] = tf.data_type(l2_) + end + begin + desc["T"] = tf.data_type(grad_) + end + res = tf.execute(desc) + node = tf.TapeNode(resource_apply_proximal_adagrad, [var_, accum_, lr_, l1_, l2_, grad_], name=nothing, use_locking=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_apply_proximal_adagrad(var_, accum_, lr_, l1_, l2_, grad_; name=nothing, use_locking=nothing) + if tf.in_eager_mode() + resource_apply_proximal_adagrad_eager(var_, accum_, lr_, l1_, l2_, grad_; name=name, use_locking=use_locking) + else + resource_apply_proximal_adagrad_graph(var_, accum_, lr_, l1_, l2_, grad_; name=name, use_locking=use_locking) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function resource_apply_proximal_adagrad_eager(var_, accum_, lr_, l1_, l2_, grad_; name=nothing, use_locking=nothing) - desc = tf.EagerOp("ResourceApplyProximalAdagrad") - var_ = convert(tf.EagerTensor, var_) - accum_ = convert(tf.EagerTensor, accum_) - lr_ = convert(tf.EagerTensor, lr_) - l1_ = convert(tf.EagerTensor, l1_) - l2_ = convert(tf.EagerTensor, l2_) - grad_ = convert(tf.EagerTensor, grad_) - tf.add_input(desc, var_) - tf.add_input(desc, accum_) - tf.add_input(desc, lr_) - tf.add_input(desc, l1_) - tf.add_input(desc, l2_) - tf.add_input(desc, grad_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - desc["T"] = tf.data_type(lr_) - desc["T"] = tf.data_type(l1_) - desc["T"] = tf.data_type(l2_) - desc["T"] = tf.data_type(grad_) - res = tf.execute(desc) - node = tf.TapeNode(resource_apply_proximal_adagrad, [var_, accum_, lr_, l1_, l2_, grad_], name=nothing, use_locking=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_apply_proximal_adagrad(var_, accum_, lr_, l1_, l2_, grad_; name=nothing, use_locking=nothing) - if tf.in_eager_mode() - resource_apply_proximal_adagrad_eager(var_, accum_, lr_, l1_, l2_, grad_; name=name, use_locking=use_locking) - else - resource_apply_proximal_adagrad_graph(var_, accum_, lr_, l1_, l2_, grad_; name=name, use_locking=use_locking) - end - end end @@ -35762,53 +65084,101 @@ end """ begin - function tensor_array_gather_v2_graph(handle_, indices_, flow_in_; name=nothing, dtype=nothing, element_shape=nothing) - local desc - tf.with_op_name(name, "TensorArrayGatherV2") do - desc = tf.NodeDescription("TensorArrayGatherV2") - handle_ = convert(Tensor{String}, handle_) - indices_ = convert(Tensor{Int32}, indices_) - flow_in_ = convert(Tensor{Float32}, flow_in_) - tf.add_input(desc, handle_) - tf.add_input(desc, indices_) - tf.add_input(desc, flow_in_) - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) + begin + function tensor_array_gather_v2_graph(handle_, indices_, flow_in_; name=nothing, dtype=nothing, element_shape=nothing) + local desc + tf.with_op_name(name, "TensorArrayGatherV2") do + desc = tf.NodeDescription("TensorArrayGatherV2") + begin + begin + handle_ = convert(Tensor{String}, handle_) + begin + end + end + begin + indices_ = convert(Tensor{Int32}, indices_) + begin + end + end + begin + flow_in_ = convert(Tensor{Float32}, flow_in_) + begin + end + end + end + begin + begin + tf.add_input(desc, handle_) + end + begin + tf.add_input(desc, indices_) + end + begin + tf.add_input(desc, flow_in_) + end + end + begin + begin + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + begin + if element_shape !== nothing + desc["element_shape"] = Base.identity(element_shape) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function tensor_array_gather_v2_eager(handle_, indices_, flow_in_; name=nothing, dtype=nothing, element_shape=nothing) + desc = tf.EagerOp("TensorArrayGatherV2") + handle_ = convert(tf.EagerTensor, handle_) + indices_ = convert(tf.EagerTensor, indices_) + flow_in_ = convert(tf.EagerTensor, flow_in_) + begin + begin + tf.add_input(desc, handle_) + end + begin + tf.add_input(desc, indices_) + end + begin + tf.add_input(desc, flow_in_) + end + end + begin + begin + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + begin + if element_shape !== nothing + desc["element_shape"] = Base.identity(element_shape) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(tensor_array_gather_v2, [handle_, indices_, flow_in_], name=nothing, dtype=nothing, element_shape=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_array_gather_v2(handle_, indices_, flow_in_; name=nothing, dtype=nothing, element_shape=nothing) + if tf.in_eager_mode() + tensor_array_gather_v2_eager(handle_, indices_, flow_in_; name=name, dtype=dtype, element_shape=element_shape) + else + tensor_array_gather_v2_graph(handle_, indices_, flow_in_; name=name, dtype=dtype, element_shape=element_shape) + end end - if element_shape !== nothing - desc["element_shape"] = Base.identity(element_shape) - end - end - tf.Tensor(tf.Operation(desc)) end - function tensor_array_gather_v2_eager(handle_, indices_, flow_in_; name=nothing, dtype=nothing, element_shape=nothing) - desc = tf.EagerOp("TensorArrayGatherV2") - handle_ = convert(tf.EagerTensor, handle_) - indices_ = convert(tf.EagerTensor, indices_) - flow_in_ = convert(tf.EagerTensor, flow_in_) - tf.add_input(desc, handle_) - tf.add_input(desc, indices_) - tf.add_input(desc, flow_in_) - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end - if element_shape !== nothing - desc["element_shape"] = Base.identity(element_shape) - end - res = tf.execute(desc) - node = tf.TapeNode(tensor_array_gather_v2, [handle_, indices_, flow_in_], name=nothing, dtype=nothing, element_shape=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_array_gather_v2(handle_, indices_, flow_in_; name=nothing, dtype=nothing, element_shape=nothing) - if tf.in_eager_mode() - tensor_array_gather_v2_eager(handle_, indices_, flow_in_; name=name, dtype=dtype, element_shape=element_shape) - else - tensor_array_gather_v2_graph(handle_, indices_, flow_in_; name=name, dtype=dtype, element_shape=element_shape) - end - end end @@ -35818,40 +65188,78 @@ end """ begin - function less_graph(x_, y_; name=nothing) - local desc - tf.with_op_name(name, "Less") do - desc = tf.NodeDescription("Less") - x_ = convert(Tensor{Any}, x_) - y_ = convert(Tensor{Any}, y_) - (x_, y_) = tf.tf_promote(x_, y_) - tf.add_input(desc, x_) - tf.add_input(desc, y_) + begin + function less_graph(x_, y_; name=nothing) + local desc + tf.with_op_name(name, "Less") do + desc = tf.NodeDescription("Less") + begin + begin + x_ = convert(Tensor{Any}, x_) + begin + end + end + begin + y_ = convert(Tensor{Any}, y_) + begin + end + end + begin + (x_, y_) = tf.tf_promote(x_, y_) + end + end + begin + begin + tf.add_input(desc, x_) + end + begin + tf.add_input(desc, y_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function less_eager(x_, y_; name=nothing) - desc = tf.EagerOp("Less") - x_ = convert(tf.EagerTensor, x_) - y_ = convert(tf.EagerTensor, y_) - tf.add_input(desc, x_) - tf.add_input(desc, y_) - desc["T"] = tf.data_type(x_) - desc["T"] = tf.data_type(y_) - res = tf.execute(desc) - node = tf.TapeNode(less, [x_, y_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function less_eager(x_, y_; name=nothing) + desc = tf.EagerOp("Less") + x_ = convert(tf.EagerTensor, x_) + y_ = convert(tf.EagerTensor, y_) + begin + begin + tf.add_input(desc, x_) + end + begin + tf.add_input(desc, y_) + end + end + begin + end + begin + desc["T"] = tf.data_type(x_) + end + begin + desc["T"] = tf.data_type(y_) + end + res = tf.execute(desc) + node = tf.TapeNode(less, [x_, y_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function less(x_, y_; name=nothing) - if tf.in_eager_mode() - less_eager(x_, y_; name=name) - else - less_graph(x_, y_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function less(x_, y_; name=nothing) + if tf.in_eager_mode() + less_eager(x_, y_; name=name) + else + less_graph(x_, y_; name=name) + end end - end + end end @@ -35861,41 +65269,65 @@ end """ begin - function host_const_graph(; name=nothing, value=nothing, dtype=nothing) - local desc - tf.with_op_name(name, "HostConst") do - desc = tf.NodeDescription("HostConst") - if value !== nothing - desc["value"] = TensorFlow.RawTensor(value) - end - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) + begin + function host_const_graph(; name=nothing, value=nothing, dtype=nothing) + local desc + tf.with_op_name(name, "HostConst") do + desc = tf.NodeDescription("HostConst") + begin + end + begin + end + begin + begin + if value !== nothing + desc["value"] = TensorFlow.RawTensor(value) + end + end + begin + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + end end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function host_const_eager(; name=nothing, value=nothing, dtype=nothing) - desc = tf.EagerOp("HostConst") - if value !== nothing - desc["value"] = TensorFlow.RawTensor(value) - end - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end - res = tf.execute(desc) - node = tf.TapeNode(host_const, [], name=nothing, value=nothing, dtype=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function host_const_eager(; name=nothing, value=nothing, dtype=nothing) + desc = tf.EagerOp("HostConst") + begin + end + begin + begin + if value !== nothing + desc["value"] = TensorFlow.RawTensor(value) + end + end + begin + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(host_const, [], name=nothing, value=nothing, dtype=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function host_const(; name=nothing, value=nothing, dtype=nothing) - if tf.in_eager_mode() - host_const_eager(; name=name, value=value, dtype=dtype) - else - host_const_graph(; name=name, value=value, dtype=dtype) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function host_const(; name=nothing, value=nothing, dtype=nothing) + if tf.in_eager_mode() + host_const_eager(; name=name, value=value, dtype=dtype) + else + host_const_graph(; name=name, value=value, dtype=dtype) + end end - end + end end @@ -35905,46 +65337,88 @@ end """ begin - function upper_bound_graph(sorted_inputs_, values_; name=nothing, out_type=nothing) - local desc - tf.with_op_name(name, "UpperBound") do - desc = tf.NodeDescription("UpperBound") - sorted_inputs_ = convert(Tensor{Any}, sorted_inputs_) - values_ = convert(Tensor{Any}, values_) - (sorted_inputs_, values_) = tf.tf_promote(sorted_inputs_, values_) - tf.add_input(desc, sorted_inputs_) - tf.add_input(desc, values_) - if out_type !== nothing - desc["out_type"] = Base.identity(out_type) + begin + function upper_bound_graph(sorted_inputs_, values_; name=nothing, out_type=nothing) + local desc + tf.with_op_name(name, "UpperBound") do + desc = tf.NodeDescription("UpperBound") + begin + begin + sorted_inputs_ = convert(Tensor{Any}, sorted_inputs_) + begin + end + end + begin + values_ = convert(Tensor{Any}, values_) + begin + end + end + begin + (sorted_inputs_, values_) = tf.tf_promote(sorted_inputs_, values_) + end + end + begin + begin + tf.add_input(desc, sorted_inputs_) + end + begin + tf.add_input(desc, values_) + end + end + begin + begin + if out_type !== nothing + desc["out_type"] = Base.identity(out_type) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function upper_bound_eager(sorted_inputs_, values_; name=nothing, out_type=nothing) + desc = tf.EagerOp("UpperBound") + sorted_inputs_ = convert(tf.EagerTensor, sorted_inputs_) + values_ = convert(tf.EagerTensor, values_) + begin + begin + tf.add_input(desc, sorted_inputs_) + end + begin + tf.add_input(desc, values_) + end + end + begin + begin + if out_type !== nothing + desc["out_type"] = Base.identity(out_type) + end + end + end + begin + desc["T"] = tf.data_type(sorted_inputs_) + end + begin + desc["T"] = tf.data_type(values_) + end + res = tf.execute(desc) + node = tf.TapeNode(upper_bound, [sorted_inputs_, values_], name=nothing, out_type=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function upper_bound(sorted_inputs_, values_; name=nothing, out_type=nothing) + if tf.in_eager_mode() + upper_bound_eager(sorted_inputs_, values_; name=name, out_type=out_type) + else + upper_bound_graph(sorted_inputs_, values_; name=name, out_type=out_type) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function upper_bound_eager(sorted_inputs_, values_; name=nothing, out_type=nothing) - desc = tf.EagerOp("UpperBound") - sorted_inputs_ = convert(tf.EagerTensor, sorted_inputs_) - values_ = convert(tf.EagerTensor, values_) - tf.add_input(desc, sorted_inputs_) - tf.add_input(desc, values_) - if out_type !== nothing - desc["out_type"] = Base.identity(out_type) - end - desc["T"] = tf.data_type(sorted_inputs_) - desc["T"] = tf.data_type(values_) - res = tf.execute(desc) - node = tf.TapeNode(upper_bound, [sorted_inputs_, values_], name=nothing, out_type=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function upper_bound(sorted_inputs_, values_; name=nothing, out_type=nothing) - if tf.in_eager_mode() - upper_bound_eager(sorted_inputs_, values_; name=name, out_type=out_type) - else - upper_bound_graph(sorted_inputs_, values_; name=name, out_type=out_type) - end - end end @@ -35954,43 +65428,79 @@ end """ begin - function tensor_list_get_item_graph(input_handle_, index_; name=nothing, element_dtype=nothing) - local desc - tf.with_op_name(name, "TensorListGetItem") do - desc = tf.NodeDescription("TensorListGetItem") - input_handle_ = convert(Tensor{Any}, input_handle_) - index_ = convert(Tensor{Int32}, index_) - tf.add_input(desc, input_handle_) - tf.add_input(desc, index_) - if element_dtype !== nothing - desc["element_dtype"] = Base.identity(element_dtype) + begin + function tensor_list_get_item_graph(input_handle_, index_; name=nothing, element_dtype=nothing) + local desc + tf.with_op_name(name, "TensorListGetItem") do + desc = tf.NodeDescription("TensorListGetItem") + begin + begin + input_handle_ = convert(Tensor{Any}, input_handle_) + begin + end + end + begin + index_ = convert(Tensor{Int32}, index_) + begin + end + end + end + begin + begin + tf.add_input(desc, input_handle_) + end + begin + tf.add_input(desc, index_) + end + end + begin + begin + if element_dtype !== nothing + desc["element_dtype"] = Base.identity(element_dtype) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function tensor_list_get_item_eager(input_handle_, index_; name=nothing, element_dtype=nothing) + desc = tf.EagerOp("TensorListGetItem") + input_handle_ = convert(tf.EagerTensor, input_handle_) + index_ = convert(tf.EagerTensor, index_) + begin + begin + tf.add_input(desc, input_handle_) + end + begin + tf.add_input(desc, index_) + end + end + begin + begin + if element_dtype !== nothing + desc["element_dtype"] = Base.identity(element_dtype) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(tensor_list_get_item, [input_handle_, index_], name=nothing, element_dtype=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_list_get_item(input_handle_, index_; name=nothing, element_dtype=nothing) + if tf.in_eager_mode() + tensor_list_get_item_eager(input_handle_, index_; name=name, element_dtype=element_dtype) + else + tensor_list_get_item_graph(input_handle_, index_; name=name, element_dtype=element_dtype) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function tensor_list_get_item_eager(input_handle_, index_; name=nothing, element_dtype=nothing) - desc = tf.EagerOp("TensorListGetItem") - input_handle_ = convert(tf.EagerTensor, input_handle_) - index_ = convert(tf.EagerTensor, index_) - tf.add_input(desc, input_handle_) - tf.add_input(desc, index_) - if element_dtype !== nothing - desc["element_dtype"] = Base.identity(element_dtype) - end - res = tf.execute(desc) - node = tf.TapeNode(tensor_list_get_item, [input_handle_, index_], name=nothing, element_dtype=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_list_get_item(input_handle_, index_; name=nothing, element_dtype=nothing) - if tf.in_eager_mode() - tensor_list_get_item_eager(input_handle_, index_; name=name, element_dtype=element_dtype) - else - tensor_list_get_item_graph(input_handle_, index_; name=name, element_dtype=element_dtype) - end - end end @@ -36000,53 +65510,101 @@ end """ begin - function fake_quant_with_min_max_vars_graph(inputs_, min_, max_; name=nothing, num_bits=nothing, narrow_range=nothing) - local desc - tf.with_op_name(name, "FakeQuantWithMinMaxVars") do - desc = tf.NodeDescription("FakeQuantWithMinMaxVars") - inputs_ = convert(Tensor{Float32}, inputs_) - min_ = convert(Tensor{Float32}, min_) - max_ = convert(Tensor{Float32}, max_) - tf.add_input(desc, inputs_) - tf.add_input(desc, min_) - tf.add_input(desc, max_) - if num_bits !== nothing - desc["num_bits"] = Base.Int(num_bits) + begin + function fake_quant_with_min_max_vars_graph(inputs_, min_, max_; name=nothing, num_bits=nothing, narrow_range=nothing) + local desc + tf.with_op_name(name, "FakeQuantWithMinMaxVars") do + desc = tf.NodeDescription("FakeQuantWithMinMaxVars") + begin + begin + inputs_ = convert(Tensor{Float32}, inputs_) + begin + end + end + begin + min_ = convert(Tensor{Float32}, min_) + begin + end + end + begin + max_ = convert(Tensor{Float32}, max_) + begin + end + end + end + begin + begin + tf.add_input(desc, inputs_) + end + begin + tf.add_input(desc, min_) + end + begin + tf.add_input(desc, max_) + end + end + begin + begin + if num_bits !== nothing + desc["num_bits"] = Base.Int(num_bits) + end + end + begin + if narrow_range !== nothing + desc["narrow_range"] = Base.Bool(narrow_range) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function fake_quant_with_min_max_vars_eager(inputs_, min_, max_; name=nothing, num_bits=nothing, narrow_range=nothing) + desc = tf.EagerOp("FakeQuantWithMinMaxVars") + inputs_ = convert(tf.EagerTensor, inputs_) + min_ = convert(tf.EagerTensor, min_) + max_ = convert(tf.EagerTensor, max_) + begin + begin + tf.add_input(desc, inputs_) + end + begin + tf.add_input(desc, min_) + end + begin + tf.add_input(desc, max_) + end + end + begin + begin + if num_bits !== nothing + desc["num_bits"] = Base.Int(num_bits) + end + end + begin + if narrow_range !== nothing + desc["narrow_range"] = Base.Bool(narrow_range) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(fake_quant_with_min_max_vars, [inputs_, min_, max_], name=nothing, num_bits=nothing, narrow_range=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function fake_quant_with_min_max_vars(inputs_, min_, max_; name=nothing, num_bits=nothing, narrow_range=nothing) + if tf.in_eager_mode() + fake_quant_with_min_max_vars_eager(inputs_, min_, max_; name=name, num_bits=num_bits, narrow_range=narrow_range) + else + fake_quant_with_min_max_vars_graph(inputs_, min_, max_; name=name, num_bits=num_bits, narrow_range=narrow_range) + end end - if narrow_range !== nothing - desc["narrow_range"] = Base.Bool(narrow_range) - end - end - tf.Tensor(tf.Operation(desc)) - end - function fake_quant_with_min_max_vars_eager(inputs_, min_, max_; name=nothing, num_bits=nothing, narrow_range=nothing) - desc = tf.EagerOp("FakeQuantWithMinMaxVars") - inputs_ = convert(tf.EagerTensor, inputs_) - min_ = convert(tf.EagerTensor, min_) - max_ = convert(tf.EagerTensor, max_) - tf.add_input(desc, inputs_) - tf.add_input(desc, min_) - tf.add_input(desc, max_) - if num_bits !== nothing - desc["num_bits"] = Base.Int(num_bits) - end - if narrow_range !== nothing - desc["narrow_range"] = Base.Bool(narrow_range) - end - res = tf.execute(desc) - node = tf.TapeNode(fake_quant_with_min_max_vars, [inputs_, min_, max_], name=nothing, num_bits=nothing, narrow_range=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function fake_quant_with_min_max_vars(inputs_, min_, max_; name=nothing, num_bits=nothing, narrow_range=nothing) - if tf.in_eager_mode() - fake_quant_with_min_max_vars_eager(inputs_, min_, max_; name=name, num_bits=num_bits, narrow_range=narrow_range) - else - fake_quant_with_min_max_vars_graph(inputs_, min_, max_; name=name, num_bits=num_bits, narrow_range=narrow_range) - end - end end @@ -36056,33 +65614,57 @@ end """ begin - function is_boosted_trees_quantile_stream_resource_initialized_graph(quantile_stream_resource_handle_; name=nothing) - local desc - tf.with_op_name(name, "IsBoostedTreesQuantileStreamResourceInitialized") do - desc = tf.NodeDescription("IsBoostedTreesQuantileStreamResourceInitialized") - quantile_stream_resource_handle_ = convert(Tensor{Any}, quantile_stream_resource_handle_) - tf.add_input(desc, quantile_stream_resource_handle_) + begin + function is_boosted_trees_quantile_stream_resource_initialized_graph(quantile_stream_resource_handle_; name=nothing) + local desc + tf.with_op_name(name, "IsBoostedTreesQuantileStreamResourceInitialized") do + desc = tf.NodeDescription("IsBoostedTreesQuantileStreamResourceInitialized") + begin + begin + quantile_stream_resource_handle_ = convert(Tensor{Any}, quantile_stream_resource_handle_) + begin + end + end + end + begin + begin + tf.add_input(desc, quantile_stream_resource_handle_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function is_boosted_trees_quantile_stream_resource_initialized_eager(quantile_stream_resource_handle_; name=nothing) - desc = tf.EagerOp("IsBoostedTreesQuantileStreamResourceInitialized") - quantile_stream_resource_handle_ = convert(tf.EagerTensor, quantile_stream_resource_handle_) - tf.add_input(desc, quantile_stream_resource_handle_) - res = tf.execute(desc) - node = tf.TapeNode(is_boosted_trees_quantile_stream_resource_initialized, [quantile_stream_resource_handle_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function is_boosted_trees_quantile_stream_resource_initialized_eager(quantile_stream_resource_handle_; name=nothing) + desc = tf.EagerOp("IsBoostedTreesQuantileStreamResourceInitialized") + quantile_stream_resource_handle_ = convert(tf.EagerTensor, quantile_stream_resource_handle_) + begin + begin + tf.add_input(desc, quantile_stream_resource_handle_) + end + end + begin + end + res = tf.execute(desc) + node = tf.TapeNode(is_boosted_trees_quantile_stream_resource_initialized, [quantile_stream_resource_handle_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function is_boosted_trees_quantile_stream_resource_initialized(quantile_stream_resource_handle_; name=nothing) - if tf.in_eager_mode() - is_boosted_trees_quantile_stream_resource_initialized_eager(quantile_stream_resource_handle_; name=name) - else - is_boosted_trees_quantile_stream_resource_initialized_graph(quantile_stream_resource_handle_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function is_boosted_trees_quantile_stream_resource_initialized(quantile_stream_resource_handle_; name=nothing) + if tf.in_eager_mode() + is_boosted_trees_quantile_stream_resource_initialized_eager(quantile_stream_resource_handle_; name=name) + else + is_boosted_trees_quantile_stream_resource_initialized_graph(quantile_stream_resource_handle_; name=name) + end end - end + end end @@ -36092,46 +65674,88 @@ end """ begin - function reader_read_up_to_v2_graph(reader_handle_, queue_handle_, num_records_; name=nothing) - local desc - tf.with_op_name(name, "ReaderReadUpToV2") do - desc = tf.NodeDescription("ReaderReadUpToV2") - reader_handle_ = convert(Tensor{Any}, reader_handle_) - queue_handle_ = convert(Tensor{Any}, queue_handle_) - num_records_ = convert(Tensor{Int64}, num_records_) - tf.add_input(desc, reader_handle_) - tf.add_input(desc, queue_handle_) - tf.add_input(desc, num_records_) - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:2 - push!(out, tf.Tensor(op, out_idx)) - end - out - end - function reader_read_up_to_v2_eager(reader_handle_, queue_handle_, num_records_; name=nothing) - desc = tf.EagerOp("ReaderReadUpToV2") - reader_handle_ = convert(tf.EagerTensor, reader_handle_) - queue_handle_ = convert(tf.EagerTensor, queue_handle_) - num_records_ = convert(tf.EagerTensor, num_records_) - tf.add_input(desc, reader_handle_) - tf.add_input(desc, queue_handle_) - tf.add_input(desc, num_records_) - res = tf.execute(desc) - node = tf.TapeNode(reader_read_up_to_v2, [reader_handle_, queue_handle_, num_records_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function reader_read_up_to_v2(reader_handle_, queue_handle_, num_records_; name=nothing) - if tf.in_eager_mode() - reader_read_up_to_v2_eager(reader_handle_, queue_handle_, num_records_; name=name) - else - reader_read_up_to_v2_graph(reader_handle_, queue_handle_, num_records_; name=name) + begin + function reader_read_up_to_v2_graph(reader_handle_, queue_handle_, num_records_; name=nothing) + local desc + tf.with_op_name(name, "ReaderReadUpToV2") do + desc = tf.NodeDescription("ReaderReadUpToV2") + begin + begin + reader_handle_ = convert(Tensor{Any}, reader_handle_) + begin + end + end + begin + queue_handle_ = convert(Tensor{Any}, queue_handle_) + begin + end + end + begin + num_records_ = convert(Tensor{Int64}, num_records_) + begin + end + end + end + begin + begin + tf.add_input(desc, reader_handle_) + end + begin + tf.add_input(desc, queue_handle_) + end + begin + tf.add_input(desc, num_records_) + end + end + begin + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + end + end + begin + function reader_read_up_to_v2_eager(reader_handle_, queue_handle_, num_records_; name=nothing) + desc = tf.EagerOp("ReaderReadUpToV2") + reader_handle_ = convert(tf.EagerTensor, reader_handle_) + queue_handle_ = convert(tf.EagerTensor, queue_handle_) + num_records_ = convert(tf.EagerTensor, num_records_) + begin + begin + tf.add_input(desc, reader_handle_) + end + begin + tf.add_input(desc, queue_handle_) + end + begin + tf.add_input(desc, num_records_) + end + end + begin + end + res = tf.execute(desc) + node = tf.TapeNode(reader_read_up_to_v2, [reader_handle_, queue_handle_, num_records_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function reader_read_up_to_v2(reader_handle_, queue_handle_, num_records_; name=nothing) + if tf.in_eager_mode() + reader_read_up_to_v2_eager(reader_handle_, queue_handle_, num_records_; name=name) + else + reader_read_up_to_v2_graph(reader_handle_, queue_handle_, num_records_; name=name) + end end - end + end end @@ -36141,40 +65765,78 @@ end """ begin - function complex_graph(real_, imag_; name=nothing) - local desc - tf.with_op_name(name, "Complex") do - desc = tf.NodeDescription("Complex") - real_ = convert(Tensor{Float32}, real_) - imag_ = convert(Tensor{Float32}, imag_) - (real_, imag_) = tf.tf_promote(real_, imag_) - tf.add_input(desc, real_) - tf.add_input(desc, imag_) + begin + function complex_graph(real_, imag_; name=nothing) + local desc + tf.with_op_name(name, "Complex") do + desc = tf.NodeDescription("Complex") + begin + begin + real_ = convert(Tensor{Float32}, real_) + begin + end + end + begin + imag_ = convert(Tensor{Float32}, imag_) + begin + end + end + begin + (real_, imag_) = tf.tf_promote(real_, imag_) + end + end + begin + begin + tf.add_input(desc, real_) + end + begin + tf.add_input(desc, imag_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function complex_eager(real_, imag_; name=nothing) - desc = tf.EagerOp("Complex") - real_ = convert(tf.EagerTensor, real_) - imag_ = convert(tf.EagerTensor, imag_) - tf.add_input(desc, real_) - tf.add_input(desc, imag_) - desc["T"] = tf.data_type(real_) - desc["T"] = tf.data_type(imag_) - res = tf.execute(desc) - node = tf.TapeNode(complex, [real_, imag_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function complex_eager(real_, imag_; name=nothing) + desc = tf.EagerOp("Complex") + real_ = convert(tf.EagerTensor, real_) + imag_ = convert(tf.EagerTensor, imag_) + begin + begin + tf.add_input(desc, real_) + end + begin + tf.add_input(desc, imag_) + end + end + begin + end + begin + desc["T"] = tf.data_type(real_) + end + begin + desc["T"] = tf.data_type(imag_) + end + res = tf.execute(desc) + node = tf.TapeNode(complex, [real_, imag_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function complex(real_, imag_; name=nothing) - if tf.in_eager_mode() - complex_eager(real_, imag_; name=name) - else - complex_graph(real_, imag_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function complex(real_, imag_; name=nothing) + if tf.in_eager_mode() + complex_eager(real_, imag_; name=name) + else + complex_graph(real_, imag_; name=name) + end end - end + end end @@ -36184,51 +65846,95 @@ end """ begin - function tensor_list_reserve_graph(element_shape_, num_elements_; name=nothing, element_dtype=nothing, shape_type=nothing) - local desc - tf.with_op_name(name, "TensorListReserve") do - desc = tf.NodeDescription("TensorListReserve") - element_shape_ = convert(Tensor{Any}, element_shape_) - num_elements_ = convert(Tensor{Int32}, num_elements_) - (element_shape_,) = tf.tf_promote(element_shape_) - tf.add_input(desc, element_shape_) - tf.add_input(desc, num_elements_) - if element_dtype !== nothing - desc["element_dtype"] = Base.identity(element_dtype) - end - if shape_type !== nothing - desc["shape_type"] = Base.identity(shape_type) + begin + function tensor_list_reserve_graph(element_shape_, num_elements_; name=nothing, element_dtype=nothing, shape_type=nothing) + local desc + tf.with_op_name(name, "TensorListReserve") do + desc = tf.NodeDescription("TensorListReserve") + begin + begin + element_shape_ = convert(Tensor{Any}, element_shape_) + begin + end + end + begin + num_elements_ = convert(Tensor{Int32}, num_elements_) + begin + end + end + begin + (element_shape_,) = tf.tf_promote(element_shape_) + end + end + begin + begin + tf.add_input(desc, element_shape_) + end + begin + tf.add_input(desc, num_elements_) + end + end + begin + begin + if element_dtype !== nothing + desc["element_dtype"] = Base.identity(element_dtype) + end + end + begin + if shape_type !== nothing + desc["shape_type"] = Base.identity(shape_type) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function tensor_list_reserve_eager(element_shape_, num_elements_; name=nothing, element_dtype=nothing, shape_type=nothing) + desc = tf.EagerOp("TensorListReserve") + element_shape_ = convert(tf.EagerTensor, element_shape_) + num_elements_ = convert(tf.EagerTensor, num_elements_) + begin + begin + tf.add_input(desc, element_shape_) + end + begin + tf.add_input(desc, num_elements_) + end + end + begin + begin + if element_dtype !== nothing + desc["element_dtype"] = Base.identity(element_dtype) + end + end + begin + if shape_type !== nothing + desc["shape_type"] = Base.identity(shape_type) + end + end + end + begin + desc["shape_type"] = tf.data_type(element_shape_) + end + res = tf.execute(desc) + node = tf.TapeNode(tensor_list_reserve, [element_shape_, num_elements_], name=nothing, element_dtype=nothing, shape_type=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_list_reserve(element_shape_, num_elements_; name=nothing, element_dtype=nothing, shape_type=nothing) + if tf.in_eager_mode() + tensor_list_reserve_eager(element_shape_, num_elements_; name=name, element_dtype=element_dtype, shape_type=shape_type) + else + tensor_list_reserve_graph(element_shape_, num_elements_; name=name, element_dtype=element_dtype, shape_type=shape_type) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function tensor_list_reserve_eager(element_shape_, num_elements_; name=nothing, element_dtype=nothing, shape_type=nothing) - desc = tf.EagerOp("TensorListReserve") - element_shape_ = convert(tf.EagerTensor, element_shape_) - num_elements_ = convert(tf.EagerTensor, num_elements_) - tf.add_input(desc, element_shape_) - tf.add_input(desc, num_elements_) - if element_dtype !== nothing - desc["element_dtype"] = Base.identity(element_dtype) - end - if shape_type !== nothing - desc["shape_type"] = Base.identity(shape_type) - end - desc["shape_type"] = tf.data_type(element_shape_) - res = tf.execute(desc) - node = tf.TapeNode(tensor_list_reserve, [element_shape_, num_elements_], name=nothing, element_dtype=nothing, shape_type=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_list_reserve(element_shape_, num_elements_; name=nothing, element_dtype=nothing, shape_type=nothing) - if tf.in_eager_mode() - tensor_list_reserve_eager(element_shape_, num_elements_; name=name, element_dtype=element_dtype, shape_type=shape_type) - else - tensor_list_reserve_graph(element_shape_, num_elements_; name=name, element_dtype=element_dtype, shape_type=shape_type) - end - end end @@ -36238,41 +65944,73 @@ end """ begin - function bitcast_graph(input_; name=nothing, type_=nothing) - local desc - tf.with_op_name(name, "Bitcast") do - desc = tf.NodeDescription("Bitcast") - input_ = convert(Tensor{Any}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - if type_ !== nothing - desc["type"] = Base.identity(type_) + begin + function bitcast_graph(input_; name=nothing, type_=nothing) + local desc + tf.with_op_name(name, "Bitcast") do + desc = tf.NodeDescription("Bitcast") + begin + begin + input_ = convert(Tensor{Any}, input_) + begin + end + end + begin + (input_,) = tf.tf_promote(input_) + end + end + begin + begin + tf.add_input(desc, input_) + end + end + begin + begin + if type_ !== nothing + desc["type"] = Base.identity(type_) + end + end + end end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function bitcast_eager(input_; name=nothing, type_=nothing) - desc = tf.EagerOp("Bitcast") - input_ = convert(tf.EagerTensor, input_) - tf.add_input(desc, input_) - if type_ !== nothing - desc["type"] = Base.identity(type_) - end - desc["T"] = tf.data_type(input_) - res = tf.execute(desc) - node = tf.TapeNode(bitcast, [input_], name=nothing, type_=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function bitcast_eager(input_; name=nothing, type_=nothing) + desc = tf.EagerOp("Bitcast") + input_ = convert(tf.EagerTensor, input_) + begin + begin + tf.add_input(desc, input_) + end + end + begin + begin + if type_ !== nothing + desc["type"] = Base.identity(type_) + end + end + end + begin + desc["T"] = tf.data_type(input_) + end + res = tf.execute(desc) + node = tf.TapeNode(bitcast, [input_], name=nothing, type_=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function bitcast(input_; name=nothing, type_=nothing) - if tf.in_eager_mode() - bitcast_eager(input_; name=name, type_=type_) - else - bitcast_graph(input_; name=name, type_=type_) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function bitcast(input_; name=nothing, type_=nothing) + if tf.in_eager_mode() + bitcast_eager(input_; name=name, type_=type_) + else + bitcast_graph(input_; name=name, type_=type_) + end end - end + end end @@ -36282,59 +66020,95 @@ end """ begin - function priority_queue_graph(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) - local desc - tf.with_op_name(name, "PriorityQueue") do - desc = tf.NodeDescription("PriorityQueue") - if component_types !== nothing - desc["component_types"] = map(Base.identity, component_types) - end - if shapes !== nothing - desc["shapes"] = map(Base.identity, shapes) - end - if capacity !== nothing - desc["capacity"] = Base.Int(capacity) - end - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) + begin + function priority_queue_graph(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "PriorityQueue") do + desc = tf.NodeDescription("PriorityQueue") + begin + end + begin + end + begin + begin + if component_types !== nothing + desc["component_types"] = map(Base.identity, component_types) + end + end + begin + if shapes !== nothing + desc["shapes"] = map(Base.identity, shapes) + end + end + begin + if capacity !== nothing + desc["capacity"] = Base.Int(capacity) + end + end + begin + if container !== nothing + desc["container"] = Base.String(container) + end + end + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function priority_queue_eager(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) + desc = tf.EagerOp("PriorityQueue") + begin + end + begin + begin + if component_types !== nothing + desc["component_types"] = map(Base.identity, component_types) + end + end + begin + if shapes !== nothing + desc["shapes"] = map(Base.identity, shapes) + end + end + begin + if capacity !== nothing + desc["capacity"] = Base.Int(capacity) + end + end + begin + if container !== nothing + desc["container"] = Base.String(container) + end + end + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(priority_queue, [], name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function priority_queue(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) + if tf.in_eager_mode() + priority_queue_eager(; name=name, component_types=component_types, shapes=shapes, capacity=capacity, container=container, shared_name=shared_name) + else + priority_queue_graph(; name=name, component_types=component_types, shapes=shapes, capacity=capacity, container=container, shared_name=shared_name) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function priority_queue_eager(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) - desc = tf.EagerOp("PriorityQueue") - if component_types !== nothing - desc["component_types"] = map(Base.identity, component_types) - end - if shapes !== nothing - desc["shapes"] = map(Base.identity, shapes) - end - if capacity !== nothing - desc["capacity"] = Base.Int(capacity) - end - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - res = tf.execute(desc) - node = tf.TapeNode(priority_queue, [], name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function priority_queue(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) - if tf.in_eager_mode() - priority_queue_eager(; name=name, component_types=component_types, shapes=shapes, capacity=capacity, container=container, shared_name=shared_name) - else - priority_queue_graph(; name=name, component_types=component_types, shapes=shapes, capacity=capacity, container=container, shared_name=shared_name) - end - end end @@ -36344,118 +66118,280 @@ end """ begin - function quantized_batch_norm_with_global_normalization_graph(t_, t_min_, t_max_, m_, m_min_, m_max_, v_, v_min_, v_max_, beta_, beta_min_, beta_max_, gamma_, gamma_min_, gamma_max_; name=nothing, out_type=nothing, variance_epsilon=nothing, scale_after_normalization=nothing) - local desc - tf.with_op_name(name, "QuantizedBatchNormWithGlobalNormalization") do - desc = tf.NodeDescription("QuantizedBatchNormWithGlobalNormalization") - t_ = convert(Tensor{Any}, t_) - t_min_ = convert(Tensor{Float32}, t_min_) - t_max_ = convert(Tensor{Float32}, t_max_) - m_ = convert(Tensor{Any}, m_) - m_min_ = convert(Tensor{Float32}, m_min_) - m_max_ = convert(Tensor{Float32}, m_max_) - v_ = convert(Tensor{Any}, v_) - v_min_ = convert(Tensor{Float32}, v_min_) - v_max_ = convert(Tensor{Float32}, v_max_) - beta_ = convert(Tensor{Any}, beta_) - beta_min_ = convert(Tensor{Float32}, beta_min_) - beta_max_ = convert(Tensor{Float32}, beta_max_) - gamma_ = convert(Tensor{Any}, gamma_) - gamma_min_ = convert(Tensor{Float32}, gamma_min_) - gamma_max_ = convert(Tensor{Float32}, gamma_max_) - (t_, m_, v_, beta_, gamma_) = tf.tf_promote(t_, m_, v_, beta_, gamma_) - tf.add_input(desc, t_) - tf.add_input(desc, t_min_) - tf.add_input(desc, t_max_) - tf.add_input(desc, m_) - tf.add_input(desc, m_min_) - tf.add_input(desc, m_max_) - tf.add_input(desc, v_) - tf.add_input(desc, v_min_) - tf.add_input(desc, v_max_) - tf.add_input(desc, beta_) - tf.add_input(desc, beta_min_) - tf.add_input(desc, beta_max_) - tf.add_input(desc, gamma_) - tf.add_input(desc, gamma_min_) - tf.add_input(desc, gamma_max_) - if out_type !== nothing - desc["out_type"] = Base.identity(out_type) - end - if variance_epsilon !== nothing - desc["variance_epsilon"] = Base.identity(variance_epsilon) - end - if scale_after_normalization !== nothing - desc["scale_after_normalization"] = Base.Bool(scale_after_normalization) - end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:3 - push!(out, tf.Tensor(op, out_idx)) - end - out - end - function quantized_batch_norm_with_global_normalization_eager(t_, t_min_, t_max_, m_, m_min_, m_max_, v_, v_min_, v_max_, beta_, beta_min_, beta_max_, gamma_, gamma_min_, gamma_max_; name=nothing, out_type=nothing, variance_epsilon=nothing, scale_after_normalization=nothing) - desc = tf.EagerOp("QuantizedBatchNormWithGlobalNormalization") - t_ = convert(tf.EagerTensor, t_) - t_min_ = convert(tf.EagerTensor, t_min_) - t_max_ = convert(tf.EagerTensor, t_max_) - m_ = convert(tf.EagerTensor, m_) - m_min_ = convert(tf.EagerTensor, m_min_) - m_max_ = convert(tf.EagerTensor, m_max_) - v_ = convert(tf.EagerTensor, v_) - v_min_ = convert(tf.EagerTensor, v_min_) - v_max_ = convert(tf.EagerTensor, v_max_) - beta_ = convert(tf.EagerTensor, beta_) - beta_min_ = convert(tf.EagerTensor, beta_min_) - beta_max_ = convert(tf.EagerTensor, beta_max_) - gamma_ = convert(tf.EagerTensor, gamma_) - gamma_min_ = convert(tf.EagerTensor, gamma_min_) - gamma_max_ = convert(tf.EagerTensor, gamma_max_) - tf.add_input(desc, t_) - tf.add_input(desc, t_min_) - tf.add_input(desc, t_max_) - tf.add_input(desc, m_) - tf.add_input(desc, m_min_) - tf.add_input(desc, m_max_) - tf.add_input(desc, v_) - tf.add_input(desc, v_min_) - tf.add_input(desc, v_max_) - tf.add_input(desc, beta_) - tf.add_input(desc, beta_min_) - tf.add_input(desc, beta_max_) - tf.add_input(desc, gamma_) - tf.add_input(desc, gamma_min_) - tf.add_input(desc, gamma_max_) - if out_type !== nothing - desc["out_type"] = Base.identity(out_type) - end - if variance_epsilon !== nothing - desc["variance_epsilon"] = Base.identity(variance_epsilon) - end - if scale_after_normalization !== nothing - desc["scale_after_normalization"] = Base.Bool(scale_after_normalization) - end - desc["Tinput"] = tf.data_type(t_) - desc["Tinput"] = tf.data_type(m_) - desc["Tinput"] = tf.data_type(v_) - desc["Tinput"] = tf.data_type(beta_) - desc["Tinput"] = tf.data_type(gamma_) - res = tf.execute(desc) - node = tf.TapeNode(quantized_batch_norm_with_global_normalization, [t_, t_min_, t_max_, m_, m_min_, m_max_, v_, v_min_, v_max_, beta_, beta_min_, beta_max_, gamma_, gamma_min_, gamma_max_], name=nothing, out_type=nothing, variance_epsilon=nothing, scale_after_normalization=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function quantized_batch_norm_with_global_normalization(t_, t_min_, t_max_, m_, m_min_, m_max_, v_, v_min_, v_max_, beta_, beta_min_, beta_max_, gamma_, gamma_min_, gamma_max_; name=nothing, out_type=nothing, variance_epsilon=nothing, scale_after_normalization=nothing) - if tf.in_eager_mode() - quantized_batch_norm_with_global_normalization_eager(t_, t_min_, t_max_, m_, m_min_, m_max_, v_, v_min_, v_max_, beta_, beta_min_, beta_max_, gamma_, gamma_min_, gamma_max_; name=name, out_type=out_type, variance_epsilon=variance_epsilon, scale_after_normalization=scale_after_normalization) - else - quantized_batch_norm_with_global_normalization_graph(t_, t_min_, t_max_, m_, m_min_, m_max_, v_, v_min_, v_max_, beta_, beta_min_, beta_max_, gamma_, gamma_min_, gamma_max_; name=name, out_type=out_type, variance_epsilon=variance_epsilon, scale_after_normalization=scale_after_normalization) + begin + function quantized_batch_norm_with_global_normalization_graph(t_, t_min_, t_max_, m_, m_min_, m_max_, v_, v_min_, v_max_, beta_, beta_min_, beta_max_, gamma_, gamma_min_, gamma_max_; name=nothing, out_type=nothing, variance_epsilon=nothing, scale_after_normalization=nothing) + local desc + tf.with_op_name(name, "QuantizedBatchNormWithGlobalNormalization") do + desc = tf.NodeDescription("QuantizedBatchNormWithGlobalNormalization") + begin + begin + t_ = convert(Tensor{Any}, t_) + begin + end + end + begin + t_min_ = convert(Tensor{Float32}, t_min_) + begin + end + end + begin + t_max_ = convert(Tensor{Float32}, t_max_) + begin + end + end + begin + m_ = convert(Tensor{Any}, m_) + begin + end + end + begin + m_min_ = convert(Tensor{Float32}, m_min_) + begin + end + end + begin + m_max_ = convert(Tensor{Float32}, m_max_) + begin + end + end + begin + v_ = convert(Tensor{Any}, v_) + begin + end + end + begin + v_min_ = convert(Tensor{Float32}, v_min_) + begin + end + end + begin + v_max_ = convert(Tensor{Float32}, v_max_) + begin + end + end + begin + beta_ = convert(Tensor{Any}, beta_) + begin + end + end + begin + beta_min_ = convert(Tensor{Float32}, beta_min_) + begin + end + end + begin + beta_max_ = convert(Tensor{Float32}, beta_max_) + begin + end + end + begin + gamma_ = convert(Tensor{Any}, gamma_) + begin + end + end + begin + gamma_min_ = convert(Tensor{Float32}, gamma_min_) + begin + end + end + begin + gamma_max_ = convert(Tensor{Float32}, gamma_max_) + begin + end + end + begin + (t_, m_, v_, beta_, gamma_) = tf.tf_promote(t_, m_, v_, beta_, gamma_) + end + end + begin + begin + tf.add_input(desc, t_) + end + begin + tf.add_input(desc, t_min_) + end + begin + tf.add_input(desc, t_max_) + end + begin + tf.add_input(desc, m_) + end + begin + tf.add_input(desc, m_min_) + end + begin + tf.add_input(desc, m_max_) + end + begin + tf.add_input(desc, v_) + end + begin + tf.add_input(desc, v_min_) + end + begin + tf.add_input(desc, v_max_) + end + begin + tf.add_input(desc, beta_) + end + begin + tf.add_input(desc, beta_min_) + end + begin + tf.add_input(desc, beta_max_) + end + begin + tf.add_input(desc, gamma_) + end + begin + tf.add_input(desc, gamma_min_) + end + begin + tf.add_input(desc, gamma_max_) + end + end + begin + begin + if out_type !== nothing + desc["out_type"] = Base.identity(out_type) + end + end + begin + if variance_epsilon !== nothing + desc["variance_epsilon"] = Base.identity(variance_epsilon) + end + end + begin + if scale_after_normalization !== nothing + desc["scale_after_normalization"] = Base.Bool(scale_after_normalization) + end + end + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + end + end + begin + function quantized_batch_norm_with_global_normalization_eager(t_, t_min_, t_max_, m_, m_min_, m_max_, v_, v_min_, v_max_, beta_, beta_min_, beta_max_, gamma_, gamma_min_, gamma_max_; name=nothing, out_type=nothing, variance_epsilon=nothing, scale_after_normalization=nothing) + desc = tf.EagerOp("QuantizedBatchNormWithGlobalNormalization") + t_ = convert(tf.EagerTensor, t_) + t_min_ = convert(tf.EagerTensor, t_min_) + t_max_ = convert(tf.EagerTensor, t_max_) + m_ = convert(tf.EagerTensor, m_) + m_min_ = convert(tf.EagerTensor, m_min_) + m_max_ = convert(tf.EagerTensor, m_max_) + v_ = convert(tf.EagerTensor, v_) + v_min_ = convert(tf.EagerTensor, v_min_) + v_max_ = convert(tf.EagerTensor, v_max_) + beta_ = convert(tf.EagerTensor, beta_) + beta_min_ = convert(tf.EagerTensor, beta_min_) + beta_max_ = convert(tf.EagerTensor, beta_max_) + gamma_ = convert(tf.EagerTensor, gamma_) + gamma_min_ = convert(tf.EagerTensor, gamma_min_) + gamma_max_ = convert(tf.EagerTensor, gamma_max_) + begin + begin + tf.add_input(desc, t_) + end + begin + tf.add_input(desc, t_min_) + end + begin + tf.add_input(desc, t_max_) + end + begin + tf.add_input(desc, m_) + end + begin + tf.add_input(desc, m_min_) + end + begin + tf.add_input(desc, m_max_) + end + begin + tf.add_input(desc, v_) + end + begin + tf.add_input(desc, v_min_) + end + begin + tf.add_input(desc, v_max_) + end + begin + tf.add_input(desc, beta_) + end + begin + tf.add_input(desc, beta_min_) + end + begin + tf.add_input(desc, beta_max_) + end + begin + tf.add_input(desc, gamma_) + end + begin + tf.add_input(desc, gamma_min_) + end + begin + tf.add_input(desc, gamma_max_) + end + end + begin + begin + if out_type !== nothing + desc["out_type"] = Base.identity(out_type) + end + end + begin + if variance_epsilon !== nothing + desc["variance_epsilon"] = Base.identity(variance_epsilon) + end + end + begin + if scale_after_normalization !== nothing + desc["scale_after_normalization"] = Base.Bool(scale_after_normalization) + end + end + end + begin + desc["Tinput"] = tf.data_type(t_) + end + begin + desc["Tinput"] = tf.data_type(m_) + end + begin + desc["Tinput"] = tf.data_type(v_) + end + begin + desc["Tinput"] = tf.data_type(beta_) + end + begin + desc["Tinput"] = tf.data_type(gamma_) + end + res = tf.execute(desc) + node = tf.TapeNode(quantized_batch_norm_with_global_normalization, [t_, t_min_, t_max_, m_, m_min_, m_max_, v_, v_min_, v_max_, beta_, beta_min_, beta_max_, gamma_, gamma_min_, gamma_max_], name=nothing, out_type=nothing, variance_epsilon=nothing, scale_after_normalization=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function quantized_batch_norm_with_global_normalization(t_, t_min_, t_max_, m_, m_min_, m_max_, v_, v_min_, v_max_, beta_, beta_min_, beta_max_, gamma_, gamma_min_, gamma_max_; name=nothing, out_type=nothing, variance_epsilon=nothing, scale_after_normalization=nothing) + if tf.in_eager_mode() + quantized_batch_norm_with_global_normalization_eager(t_, t_min_, t_max_, m_, m_min_, m_max_, v_, v_min_, v_max_, beta_, beta_min_, beta_max_, gamma_, gamma_min_, gamma_max_; name=name, out_type=out_type, variance_epsilon=variance_epsilon, scale_after_normalization=scale_after_normalization) + else + quantized_batch_norm_with_global_normalization_graph(t_, t_min_, t_max_, m_, m_min_, m_max_, v_, v_min_, v_max_, beta_, beta_min_, beta_max_, gamma_, gamma_min_, gamma_max_; name=name, out_type=out_type, variance_epsilon=variance_epsilon, scale_after_normalization=scale_after_normalization) + end end - end + end end @@ -36465,35 +66401,63 @@ end """ begin - function cos_graph(x_; name=nothing) - local desc - tf.with_op_name(name, "Cos") do - desc = tf.NodeDescription("Cos") - x_ = convert(Tensor{Any}, x_) - (x_,) = tf.tf_promote(x_) - tf.add_input(desc, x_) + begin + function cos_graph(x_; name=nothing) + local desc + tf.with_op_name(name, "Cos") do + desc = tf.NodeDescription("Cos") + begin + begin + x_ = convert(Tensor{Any}, x_) + begin + end + end + begin + (x_,) = tf.tf_promote(x_) + end + end + begin + begin + tf.add_input(desc, x_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function cos_eager(x_; name=nothing) - desc = tf.EagerOp("Cos") - x_ = convert(tf.EagerTensor, x_) - tf.add_input(desc, x_) - desc["T"] = tf.data_type(x_) - res = tf.execute(desc) - node = tf.TapeNode(cos, [x_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function cos_eager(x_; name=nothing) + desc = tf.EagerOp("Cos") + x_ = convert(tf.EagerTensor, x_) + begin + begin + tf.add_input(desc, x_) + end + end + begin + end + begin + desc["T"] = tf.data_type(x_) + end + res = tf.execute(desc) + node = tf.TapeNode(cos, [x_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function cos(x_; name=nothing) - if tf.in_eager_mode() - cos_eager(x_; name=name) - else - cos_graph(x_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function cos(x_; name=nothing) + if tf.in_eager_mode() + cos_eager(x_; name=name) + else + cos_graph(x_; name=name) + end end - end + end end @@ -36503,54 +66467,104 @@ end """ begin - function quantize_down_and_shrink_range_graph(input_, input_min_, input_max_; name=nothing, out_type=nothing) - local desc - tf.with_op_name(name, "QuantizeDownAndShrinkRange") do - desc = tf.NodeDescription("QuantizeDownAndShrinkRange") - input_ = convert(Tensor{Any}, input_) - input_min_ = convert(Tensor{Float32}, input_min_) - input_max_ = convert(Tensor{Float32}, input_max_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - tf.add_input(desc, input_min_) - tf.add_input(desc, input_max_) - if out_type !== nothing - desc["out_type"] = Base.identity(out_type) + begin + function quantize_down_and_shrink_range_graph(input_, input_min_, input_max_; name=nothing, out_type=nothing) + local desc + tf.with_op_name(name, "QuantizeDownAndShrinkRange") do + desc = tf.NodeDescription("QuantizeDownAndShrinkRange") + begin + begin + input_ = convert(Tensor{Any}, input_) + begin + end + end + begin + input_min_ = convert(Tensor{Float32}, input_min_) + begin + end + end + begin + input_max_ = convert(Tensor{Float32}, input_max_) + begin + end + end + begin + (input_,) = tf.tf_promote(input_) + end + end + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, input_min_) + end + begin + tf.add_input(desc, input_max_) + end + end + begin + begin + if out_type !== nothing + desc["out_type"] = Base.identity(out_type) + end + end + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + end + end + begin + function quantize_down_and_shrink_range_eager(input_, input_min_, input_max_; name=nothing, out_type=nothing) + desc = tf.EagerOp("QuantizeDownAndShrinkRange") + input_ = convert(tf.EagerTensor, input_) + input_min_ = convert(tf.EagerTensor, input_min_) + input_max_ = convert(tf.EagerTensor, input_max_) + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, input_min_) + end + begin + tf.add_input(desc, input_max_) + end + end + begin + begin + if out_type !== nothing + desc["out_type"] = Base.identity(out_type) + end + end + end + begin + desc["Tinput"] = tf.data_type(input_) + end + res = tf.execute(desc) + node = tf.TapeNode(quantize_down_and_shrink_range, [input_, input_min_, input_max_], name=nothing, out_type=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function quantize_down_and_shrink_range(input_, input_min_, input_max_; name=nothing, out_type=nothing) + if tf.in_eager_mode() + quantize_down_and_shrink_range_eager(input_, input_min_, input_max_; name=name, out_type=out_type) + else + quantize_down_and_shrink_range_graph(input_, input_min_, input_max_; name=name, out_type=out_type) + end end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:3 - push!(out, tf.Tensor(op, out_idx)) - end - out - end - function quantize_down_and_shrink_range_eager(input_, input_min_, input_max_; name=nothing, out_type=nothing) - desc = tf.EagerOp("QuantizeDownAndShrinkRange") - input_ = convert(tf.EagerTensor, input_) - input_min_ = convert(tf.EagerTensor, input_min_) - input_max_ = convert(tf.EagerTensor, input_max_) - tf.add_input(desc, input_) - tf.add_input(desc, input_min_) - tf.add_input(desc, input_max_) - if out_type !== nothing - desc["out_type"] = Base.identity(out_type) - end - desc["Tinput"] = tf.data_type(input_) - res = tf.execute(desc) - node = tf.TapeNode(quantize_down_and_shrink_range, [input_, input_min_, input_max_], name=nothing, out_type=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function quantize_down_and_shrink_range(input_, input_min_, input_max_; name=nothing, out_type=nothing) - if tf.in_eager_mode() - quantize_down_and_shrink_range_eager(input_, input_min_, input_max_; name=name, out_type=out_type) - else - quantize_down_and_shrink_range_graph(input_, input_min_, input_max_; name=name, out_type=out_type) - end - end end @@ -36560,49 +66574,89 @@ end """ begin - function experimental_random_dataset_graph(seed_, seed2_; name=nothing, output_types=nothing, output_shapes=nothing) - local desc - tf.with_op_name(name, "ExperimentalRandomDataset") do - desc = tf.NodeDescription("ExperimentalRandomDataset") - seed_ = convert(Tensor{Int64}, seed_) - seed2_ = convert(Tensor{Int64}, seed2_) - tf.add_input(desc, seed_) - tf.add_input(desc, seed2_) - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) + begin + function experimental_random_dataset_graph(seed_, seed2_; name=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "ExperimentalRandomDataset") do + desc = tf.NodeDescription("ExperimentalRandomDataset") + begin + begin + seed_ = convert(Tensor{Int64}, seed_) + begin + end + end + begin + seed2_ = convert(Tensor{Int64}, seed2_) + begin + end + end + end + begin + begin + tf.add_input(desc, seed_) + end + begin + tf.add_input(desc, seed2_) + end + end + begin + begin + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + end + begin + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function experimental_random_dataset_eager(seed_, seed2_; name=nothing, output_types=nothing, output_shapes=nothing) + desc = tf.EagerOp("ExperimentalRandomDataset") + seed_ = convert(tf.EagerTensor, seed_) + seed2_ = convert(tf.EagerTensor, seed2_) + begin + begin + tf.add_input(desc, seed_) + end + begin + tf.add_input(desc, seed2_) + end + end + begin + begin + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + end + begin + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(experimental_random_dataset, [seed_, seed2_], name=nothing, output_types=nothing, output_shapes=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_random_dataset(seed_, seed2_; name=nothing, output_types=nothing, output_shapes=nothing) + if tf.in_eager_mode() + experimental_random_dataset_eager(seed_, seed2_; name=name, output_types=output_types, output_shapes=output_shapes) + else + experimental_random_dataset_graph(seed_, seed2_; name=name, output_types=output_types, output_shapes=output_shapes) + end end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - end - tf.Tensor(tf.Operation(desc)) - end - function experimental_random_dataset_eager(seed_, seed2_; name=nothing, output_types=nothing, output_shapes=nothing) - desc = tf.EagerOp("ExperimentalRandomDataset") - seed_ = convert(tf.EagerTensor, seed_) - seed2_ = convert(tf.EagerTensor, seed2_) - tf.add_input(desc, seed_) - tf.add_input(desc, seed2_) - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - res = tf.execute(desc) - node = tf.TapeNode(experimental_random_dataset, [seed_, seed2_], name=nothing, output_types=nothing, output_shapes=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_random_dataset(seed_, seed2_; name=nothing, output_types=nothing, output_shapes=nothing) - if tf.in_eager_mode() - experimental_random_dataset_eager(seed_, seed2_; name=name, output_types=output_types, output_shapes=output_shapes) - else - experimental_random_dataset_graph(seed_, seed2_; name=name, output_types=output_types, output_shapes=output_shapes) - end - end end @@ -36612,59 +66666,111 @@ end """ begin - function rpc_graph(address_, method_, request_; name=nothing, protocol=nothing, fail_fast=nothing, timeout_in_ms=nothing) - local desc - tf.with_op_name(name, "Rpc") do - desc = tf.NodeDescription("Rpc") - address_ = convert(Tensor{String}, address_) - method_ = convert(Tensor{String}, method_) - request_ = convert(Tensor{String}, request_) - tf.add_input(desc, address_) - tf.add_input(desc, method_) - tf.add_input(desc, request_) - if protocol !== nothing - desc["protocol"] = Base.String(protocol) - end - if fail_fast !== nothing - desc["fail_fast"] = Base.Bool(fail_fast) - end - if timeout_in_ms !== nothing - desc["timeout_in_ms"] = Base.Int(timeout_in_ms) + begin + function rpc_graph(address_, method_, request_; name=nothing, protocol=nothing, fail_fast=nothing, timeout_in_ms=nothing) + local desc + tf.with_op_name(name, "Rpc") do + desc = tf.NodeDescription("Rpc") + begin + begin + address_ = convert(Tensor{String}, address_) + begin + end + end + begin + method_ = convert(Tensor{String}, method_) + begin + end + end + begin + request_ = convert(Tensor{String}, request_) + begin + end + end + end + begin + begin + tf.add_input(desc, address_) + end + begin + tf.add_input(desc, method_) + end + begin + tf.add_input(desc, request_) + end + end + begin + begin + if protocol !== nothing + desc["protocol"] = Base.String(protocol) + end + end + begin + if fail_fast !== nothing + desc["fail_fast"] = Base.Bool(fail_fast) + end + end + begin + if timeout_in_ms !== nothing + desc["timeout_in_ms"] = Base.Int(timeout_in_ms) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function rpc_eager(address_, method_, request_; name=nothing, protocol=nothing, fail_fast=nothing, timeout_in_ms=nothing) + desc = tf.EagerOp("Rpc") + address_ = convert(tf.EagerTensor, address_) + method_ = convert(tf.EagerTensor, method_) + request_ = convert(tf.EagerTensor, request_) + begin + begin + tf.add_input(desc, address_) + end + begin + tf.add_input(desc, method_) + end + begin + tf.add_input(desc, request_) + end + end + begin + begin + if protocol !== nothing + desc["protocol"] = Base.String(protocol) + end + end + begin + if fail_fast !== nothing + desc["fail_fast"] = Base.Bool(fail_fast) + end + end + begin + if timeout_in_ms !== nothing + desc["timeout_in_ms"] = Base.Int(timeout_in_ms) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(rpc, [address_, method_, request_], name=nothing, protocol=nothing, fail_fast=nothing, timeout_in_ms=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function rpc(address_, method_, request_; name=nothing, protocol=nothing, fail_fast=nothing, timeout_in_ms=nothing) + if tf.in_eager_mode() + rpc_eager(address_, method_, request_; name=name, protocol=protocol, fail_fast=fail_fast, timeout_in_ms=timeout_in_ms) + else + rpc_graph(address_, method_, request_; name=name, protocol=protocol, fail_fast=fail_fast, timeout_in_ms=timeout_in_ms) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function rpc_eager(address_, method_, request_; name=nothing, protocol=nothing, fail_fast=nothing, timeout_in_ms=nothing) - desc = tf.EagerOp("Rpc") - address_ = convert(tf.EagerTensor, address_) - method_ = convert(tf.EagerTensor, method_) - request_ = convert(tf.EagerTensor, request_) - tf.add_input(desc, address_) - tf.add_input(desc, method_) - tf.add_input(desc, request_) - if protocol !== nothing - desc["protocol"] = Base.String(protocol) - end - if fail_fast !== nothing - desc["fail_fast"] = Base.Bool(fail_fast) - end - if timeout_in_ms !== nothing - desc["timeout_in_ms"] = Base.Int(timeout_in_ms) - end - res = tf.execute(desc) - node = tf.TapeNode(rpc, [address_, method_, request_], name=nothing, protocol=nothing, fail_fast=nothing, timeout_in_ms=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function rpc(address_, method_, request_; name=nothing, protocol=nothing, fail_fast=nothing, timeout_in_ms=nothing) - if tf.in_eager_mode() - rpc_eager(address_, method_, request_; name=name, protocol=protocol, fail_fast=fail_fast, timeout_in_ms=timeout_in_ms) - else - rpc_graph(address_, method_, request_; name=name, protocol=protocol, fail_fast=fail_fast, timeout_in_ms=timeout_in_ms) - end - end end @@ -36674,33 +66780,57 @@ end """ begin - function tensor_list_length_graph(input_handle_; name=nothing) - local desc - tf.with_op_name(name, "TensorListLength") do - desc = tf.NodeDescription("TensorListLength") - input_handle_ = convert(Tensor{Any}, input_handle_) - tf.add_input(desc, input_handle_) + begin + function tensor_list_length_graph(input_handle_; name=nothing) + local desc + tf.with_op_name(name, "TensorListLength") do + desc = tf.NodeDescription("TensorListLength") + begin + begin + input_handle_ = convert(Tensor{Any}, input_handle_) + begin + end + end + end + begin + begin + tf.add_input(desc, input_handle_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function tensor_list_length_eager(input_handle_; name=nothing) - desc = tf.EagerOp("TensorListLength") - input_handle_ = convert(tf.EagerTensor, input_handle_) - tf.add_input(desc, input_handle_) - res = tf.execute(desc) - node = tf.TapeNode(tensor_list_length, [input_handle_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function tensor_list_length_eager(input_handle_; name=nothing) + desc = tf.EagerOp("TensorListLength") + input_handle_ = convert(tf.EagerTensor, input_handle_) + begin + begin + tf.add_input(desc, input_handle_) + end + end + begin + end + res = tf.execute(desc) + node = tf.TapeNode(tensor_list_length, [input_handle_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_list_length(input_handle_; name=nothing) - if tf.in_eager_mode() - tensor_list_length_eager(input_handle_; name=name) - else - tensor_list_length_graph(input_handle_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_list_length(input_handle_; name=nothing) + if tf.in_eager_mode() + tensor_list_length_eager(input_handle_; name=name) + else + tensor_list_length_graph(input_handle_; name=name) + end end - end + end end @@ -36710,59 +66840,95 @@ end """ begin - function map_incomplete_size_graph(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) - local desc - tf.with_op_name(name, "MapIncompleteSize") do - desc = tf.NodeDescription("MapIncompleteSize") - if capacity !== nothing - desc["capacity"] = Base.Int(capacity) - end - if memory_limit !== nothing - desc["memory_limit"] = Base.Int(memory_limit) - end - if dtypes !== nothing - desc["dtypes"] = map(Base.identity, dtypes) + begin + function map_incomplete_size_graph(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "MapIncompleteSize") do + desc = tf.NodeDescription("MapIncompleteSize") + begin + end + begin + end + begin + begin + if capacity !== nothing + desc["capacity"] = Base.Int(capacity) + end + end + begin + if memory_limit !== nothing + desc["memory_limit"] = Base.Int(memory_limit) + end + end + begin + if dtypes !== nothing + desc["dtypes"] = map(Base.identity, dtypes) + end + end + begin + if container !== nothing + desc["container"] = Base.String(container) + end + end + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function map_incomplete_size_eager(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + desc = tf.EagerOp("MapIncompleteSize") + begin + end + begin + begin + if capacity !== nothing + desc["capacity"] = Base.Int(capacity) + end + end + begin + if memory_limit !== nothing + desc["memory_limit"] = Base.Int(memory_limit) + end + end + begin + if dtypes !== nothing + desc["dtypes"] = map(Base.identity, dtypes) + end + end + begin + if container !== nothing + desc["container"] = Base.String(container) + end + end + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(map_incomplete_size, [], name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function map_incomplete_size(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + if tf.in_eager_mode() + map_incomplete_size_eager(; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) + else + map_incomplete_size_graph(; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) + end end - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - end - tf.Tensor(tf.Operation(desc)) - end - function map_incomplete_size_eager(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) - desc = tf.EagerOp("MapIncompleteSize") - if capacity !== nothing - desc["capacity"] = Base.Int(capacity) - end - if memory_limit !== nothing - desc["memory_limit"] = Base.Int(memory_limit) - end - if dtypes !== nothing - desc["dtypes"] = map(Base.identity, dtypes) - end - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - res = tf.execute(desc) - node = tf.TapeNode(map_incomplete_size, [], name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function map_incomplete_size(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) - if tf.in_eager_mode() - map_incomplete_size_eager(; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) - else - map_incomplete_size_graph(; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) - end - end end @@ -36772,51 +66938,87 @@ end """ begin - function stateless_while_graph(input_; name=nothing, T=nothing, cond=nothing, body=nothing) - local desc - tf.with_op_name(name, "StatelessWhile") do - desc = tf.NodeDescription("StatelessWhile") - input_ = [convert(Tensor{Any}, x) for x = input_] - tf.add_input(desc, input_) - if T !== nothing - desc["T"] = map(Base.identity, T) + begin + function stateless_while_graph(input_; name=nothing, T=nothing, cond=nothing, body=nothing) + local desc + tf.with_op_name(name, "StatelessWhile") do + desc = tf.NodeDescription("StatelessWhile") + begin + begin + input_ = [convert(Tensor{Any}, x) for x = input_] + begin + end + end + end + begin + begin + tf.add_input(desc, input_) + end + end + begin + begin + if T !== nothing + desc["T"] = map(Base.identity, T) + end + end + begin + if cond !== nothing + desc["cond"] = Base.identity(cond) + end + end + begin + if body !== nothing + desc["body"] = Base.identity(body) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function stateless_while_eager(input_; name=nothing, T=nothing, cond=nothing, body=nothing) + desc = tf.EagerOp("StatelessWhile") + input_ = convert(tf.EagerTensor, input_) + begin + begin + tf.add_input(desc, input_) + end + end + begin + begin + if T !== nothing + desc["T"] = map(Base.identity, T) + end + end + begin + if cond !== nothing + desc["cond"] = Base.identity(cond) + end + end + begin + if body !== nothing + desc["body"] = Base.identity(body) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(stateless_while, [input_], name=nothing, T=nothing, cond=nothing, body=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function stateless_while(input_; name=nothing, T=nothing, cond=nothing, body=nothing) + if tf.in_eager_mode() + stateless_while_eager(input_; name=name, T=T, cond=cond, body=body) + else + stateless_while_graph(input_; name=name, T=T, cond=cond, body=body) + end end - if cond !== nothing - desc["cond"] = Base.identity(cond) - end - if body !== nothing - desc["body"] = Base.identity(body) - end - end - tf.Tensor(tf.Operation(desc)) - end - function stateless_while_eager(input_; name=nothing, T=nothing, cond=nothing, body=nothing) - desc = tf.EagerOp("StatelessWhile") - input_ = convert(tf.EagerTensor, input_) - tf.add_input(desc, input_) - if T !== nothing - desc["T"] = map(Base.identity, T) - end - if cond !== nothing - desc["cond"] = Base.identity(cond) - end - if body !== nothing - desc["body"] = Base.identity(body) - end - res = tf.execute(desc) - node = tf.TapeNode(stateless_while, [input_], name=nothing, T=nothing, cond=nothing, body=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function stateless_while(input_; name=nothing, T=nothing, cond=nothing, body=nothing) - if tf.in_eager_mode() - stateless_while_eager(input_; name=name, T=T, cond=cond, body=body) - else - stateless_while_graph(input_; name=name, T=T, cond=cond, body=body) - end - end end @@ -36826,59 +67028,95 @@ end """ begin - function sparse_conditional_accumulator_graph(; name=nothing, dtype=nothing, shape=nothing, container=nothing, shared_name=nothing, reduction_type=nothing) - local desc - tf.with_op_name(name, "SparseConditionalAccumulator") do - desc = tf.NodeDescription("SparseConditionalAccumulator") - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end - if shape !== nothing - desc["shape"] = Base.identity(shape) - end - if container !== nothing - desc["container"] = Base.String(container) + begin + function sparse_conditional_accumulator_graph(; name=nothing, dtype=nothing, shape=nothing, container=nothing, shared_name=nothing, reduction_type=nothing) + local desc + tf.with_op_name(name, "SparseConditionalAccumulator") do + desc = tf.NodeDescription("SparseConditionalAccumulator") + begin + end + begin + end + begin + begin + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + begin + if shape !== nothing + desc["shape"] = Base.identity(shape) + end + end + begin + if container !== nothing + desc["container"] = Base.String(container) + end + end + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + begin + if reduction_type !== nothing + desc["reduction_type"] = Base.String(reduction_type) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function sparse_conditional_accumulator_eager(; name=nothing, dtype=nothing, shape=nothing, container=nothing, shared_name=nothing, reduction_type=nothing) + desc = tf.EagerOp("SparseConditionalAccumulator") + begin + end + begin + begin + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + begin + if shape !== nothing + desc["shape"] = Base.identity(shape) + end + end + begin + if container !== nothing + desc["container"] = Base.String(container) + end + end + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + begin + if reduction_type !== nothing + desc["reduction_type"] = Base.String(reduction_type) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(sparse_conditional_accumulator, [], name=nothing, dtype=nothing, shape=nothing, container=nothing, shared_name=nothing, reduction_type=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_conditional_accumulator(; name=nothing, dtype=nothing, shape=nothing, container=nothing, shared_name=nothing, reduction_type=nothing) + if tf.in_eager_mode() + sparse_conditional_accumulator_eager(; name=name, dtype=dtype, shape=shape, container=container, shared_name=shared_name, reduction_type=reduction_type) + else + sparse_conditional_accumulator_graph(; name=name, dtype=dtype, shape=shape, container=container, shared_name=shared_name, reduction_type=reduction_type) + end end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - if reduction_type !== nothing - desc["reduction_type"] = Base.String(reduction_type) - end - end - tf.Tensor(tf.Operation(desc)) - end - function sparse_conditional_accumulator_eager(; name=nothing, dtype=nothing, shape=nothing, container=nothing, shared_name=nothing, reduction_type=nothing) - desc = tf.EagerOp("SparseConditionalAccumulator") - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end - if shape !== nothing - desc["shape"] = Base.identity(shape) - end - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - if reduction_type !== nothing - desc["reduction_type"] = Base.String(reduction_type) - end - res = tf.execute(desc) - node = tf.TapeNode(sparse_conditional_accumulator, [], name=nothing, dtype=nothing, shape=nothing, container=nothing, shared_name=nothing, reduction_type=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_conditional_accumulator(; name=nothing, dtype=nothing, shape=nothing, container=nothing, shared_name=nothing, reduction_type=nothing) - if tf.in_eager_mode() - sparse_conditional_accumulator_eager(; name=name, dtype=dtype, shape=shape, container=container, shared_name=shared_name, reduction_type=reduction_type) - else - sparse_conditional_accumulator_graph(; name=name, dtype=dtype, shape=shape, container=container, shared_name=shared_name, reduction_type=reduction_type) - end - end end @@ -36888,42 +67126,82 @@ end """ begin - function segment_min_graph(data_, segment_ids_; name=nothing) - local desc - tf.with_op_name(name, "SegmentMin") do - desc = tf.NodeDescription("SegmentMin") - data_ = convert(Tensor{Any}, data_) - segment_ids_ = convert(Tensor{Any}, segment_ids_) - segment_ids_ = segment_ids_ - convert(tf.Tensor{eltype(segment_ids_)}, 1) - (data_,) = tf.tf_promote(data_) - (segment_ids_,) = tf.tf_promote(segment_ids_) - tf.add_input(desc, data_) - tf.add_input(desc, segment_ids_) - end - tf.Tensor(tf.Operation(desc)) - end - function segment_min_eager(data_, segment_ids_; name=nothing) - desc = tf.EagerOp("SegmentMin") - data_ = convert(tf.EagerTensor, data_) - segment_ids_ = convert(tf.EagerTensor, segment_ids_) - tf.add_input(desc, data_) - tf.add_input(desc, segment_ids_) - desc["T"] = tf.data_type(data_) - desc["Tindices"] = tf.data_type(segment_ids_) - res = tf.execute(desc) - node = tf.TapeNode(segment_min, [data_, segment_ids_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function segment_min(data_, segment_ids_; name=nothing) - if tf.in_eager_mode() - segment_min_eager(data_, segment_ids_; name=name) - else - segment_min_graph(data_, segment_ids_; name=name) + begin + function segment_min_graph(data_, segment_ids_; name=nothing) + local desc + tf.with_op_name(name, "SegmentMin") do + desc = tf.NodeDescription("SegmentMin") + begin + begin + data_ = convert(Tensor{Any}, data_) + begin + end + end + begin + segment_ids_ = convert(Tensor{Any}, segment_ids_) + begin + segment_ids_ = segment_ids_ - convert(tf.Tensor{eltype(segment_ids_)}, 1) + end + end + begin + (data_,) = tf.tf_promote(data_) + end + begin + (segment_ids_,) = tf.tf_promote(segment_ids_) + end + end + begin + begin + tf.add_input(desc, data_) + end + begin + tf.add_input(desc, segment_ids_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function segment_min_eager(data_, segment_ids_; name=nothing) + desc = tf.EagerOp("SegmentMin") + data_ = convert(tf.EagerTensor, data_) + segment_ids_ = convert(tf.EagerTensor, segment_ids_) + begin + begin + tf.add_input(desc, data_) + end + begin + tf.add_input(desc, segment_ids_) + end + end + begin + end + begin + desc["T"] = tf.data_type(data_) + end + begin + desc["Tindices"] = tf.data_type(segment_ids_) + end + res = tf.execute(desc) + node = tf.TapeNode(segment_min, [data_, segment_ids_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function segment_min(data_, segment_ids_; name=nothing) + if tf.in_eager_mode() + segment_min_eager(data_, segment_ids_; name=name) + else + segment_min_graph(data_, segment_ids_; name=name) + end end - end + end end @@ -36933,41 +67211,81 @@ end """ begin - function write_graph_summary_graph(writer_, step_, tensor_; name=nothing) - local desc - tf.with_op_name(name, "WriteGraphSummary") do - desc = tf.NodeDescription("WriteGraphSummary") - writer_ = convert(Tensor{Any}, writer_) - step_ = convert(Tensor{Int64}, step_) - tensor_ = convert(Tensor{String}, tensor_) - tf.add_input(desc, writer_) - tf.add_input(desc, step_) - tf.add_input(desc, tensor_) - end - tf.Tensor(tf.Operation(desc)) - end - function write_graph_summary_eager(writer_, step_, tensor_; name=nothing) - desc = tf.EagerOp("WriteGraphSummary") - writer_ = convert(tf.EagerTensor, writer_) - step_ = convert(tf.EagerTensor, step_) - tensor_ = convert(tf.EagerTensor, tensor_) - tf.add_input(desc, writer_) - tf.add_input(desc, step_) - tf.add_input(desc, tensor_) - res = tf.execute(desc) - node = tf.TapeNode(write_graph_summary, [writer_, step_, tensor_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function write_graph_summary(writer_, step_, tensor_; name=nothing) - if tf.in_eager_mode() - write_graph_summary_eager(writer_, step_, tensor_; name=name) - else - write_graph_summary_graph(writer_, step_, tensor_; name=name) + begin + function write_graph_summary_graph(writer_, step_, tensor_; name=nothing) + local desc + tf.with_op_name(name, "WriteGraphSummary") do + desc = tf.NodeDescription("WriteGraphSummary") + begin + begin + writer_ = convert(Tensor{Any}, writer_) + begin + end + end + begin + step_ = convert(Tensor{Int64}, step_) + begin + end + end + begin + tensor_ = convert(Tensor{String}, tensor_) + begin + end + end + end + begin + begin + tf.add_input(desc, writer_) + end + begin + tf.add_input(desc, step_) + end + begin + tf.add_input(desc, tensor_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function write_graph_summary_eager(writer_, step_, tensor_; name=nothing) + desc = tf.EagerOp("WriteGraphSummary") + writer_ = convert(tf.EagerTensor, writer_) + step_ = convert(tf.EagerTensor, step_) + tensor_ = convert(tf.EagerTensor, tensor_) + begin + begin + tf.add_input(desc, writer_) + end + begin + tf.add_input(desc, step_) + end + begin + tf.add_input(desc, tensor_) + end + end + begin + end + res = tf.execute(desc) + node = tf.TapeNode(write_graph_summary, [writer_, step_, tensor_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function write_graph_summary(writer_, step_, tensor_; name=nothing) + if tf.in_eager_mode() + write_graph_summary_eager(writer_, step_, tensor_; name=name) + else + write_graph_summary_graph(writer_, step_, tensor_; name=name) + end end - end + end end @@ -36977,40 +67295,78 @@ end """ begin - function cholesky_grad_graph(l_, grad_; name=nothing) - local desc - tf.with_op_name(name, "CholeskyGrad") do - desc = tf.NodeDescription("CholeskyGrad") - l_ = convert(Tensor{Any}, l_) - grad_ = convert(Tensor{Any}, grad_) - (l_, grad_) = tf.tf_promote(l_, grad_) - tf.add_input(desc, l_) - tf.add_input(desc, grad_) + begin + function cholesky_grad_graph(l_, grad_; name=nothing) + local desc + tf.with_op_name(name, "CholeskyGrad") do + desc = tf.NodeDescription("CholeskyGrad") + begin + begin + l_ = convert(Tensor{Any}, l_) + begin + end + end + begin + grad_ = convert(Tensor{Any}, grad_) + begin + end + end + begin + (l_, grad_) = tf.tf_promote(l_, grad_) + end + end + begin + begin + tf.add_input(desc, l_) + end + begin + tf.add_input(desc, grad_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function cholesky_grad_eager(l_, grad_; name=nothing) - desc = tf.EagerOp("CholeskyGrad") - l_ = convert(tf.EagerTensor, l_) - grad_ = convert(tf.EagerTensor, grad_) - tf.add_input(desc, l_) - tf.add_input(desc, grad_) - desc["T"] = tf.data_type(l_) - desc["T"] = tf.data_type(grad_) - res = tf.execute(desc) - node = tf.TapeNode(cholesky_grad, [l_, grad_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function cholesky_grad_eager(l_, grad_; name=nothing) + desc = tf.EagerOp("CholeskyGrad") + l_ = convert(tf.EagerTensor, l_) + grad_ = convert(tf.EagerTensor, grad_) + begin + begin + tf.add_input(desc, l_) + end + begin + tf.add_input(desc, grad_) + end + end + begin + end + begin + desc["T"] = tf.data_type(l_) + end + begin + desc["T"] = tf.data_type(grad_) + end + res = tf.execute(desc) + node = tf.TapeNode(cholesky_grad, [l_, grad_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function cholesky_grad(l_, grad_; name=nothing) - if tf.in_eager_mode() - cholesky_grad_eager(l_, grad_; name=name) - else - cholesky_grad_graph(l_, grad_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function cholesky_grad(l_, grad_; name=nothing) + if tf.in_eager_mode() + cholesky_grad_eager(l_, grad_; name=name) + else + cholesky_grad_graph(l_, grad_; name=name) + end end - end + end end @@ -37020,74 +67376,124 @@ end """ begin - function log_uniform_candidate_sampler_graph(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing) - local desc - tf.with_op_name(name, "LogUniformCandidateSampler") do - desc = tf.NodeDescription("LogUniformCandidateSampler") - true_classes_ = convert(Tensor{Int64}, true_classes_) - tf.add_input(desc, true_classes_) - if num_true !== nothing - desc["num_true"] = Base.Int(num_true) - end - if num_sampled !== nothing - desc["num_sampled"] = Base.Int(num_sampled) - end - if unique !== nothing - desc["unique"] = Base.Bool(unique) - end - if range_max !== nothing - desc["range_max"] = Base.Int(range_max) - end - if seed !== nothing - desc["seed"] = Base.Int(seed) + begin + function log_uniform_candidate_sampler_graph(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing) + local desc + tf.with_op_name(name, "LogUniformCandidateSampler") do + desc = tf.NodeDescription("LogUniformCandidateSampler") + begin + begin + true_classes_ = convert(Tensor{Int64}, true_classes_) + begin + end + end + end + begin + begin + tf.add_input(desc, true_classes_) + end + end + begin + begin + if num_true !== nothing + desc["num_true"] = Base.Int(num_true) + end + end + begin + if num_sampled !== nothing + desc["num_sampled"] = Base.Int(num_sampled) + end + end + begin + if unique !== nothing + desc["unique"] = Base.Bool(unique) + end + end + begin + if range_max !== nothing + desc["range_max"] = Base.Int(range_max) + end + end + begin + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + end + begin + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end + end + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + end + end + begin + function log_uniform_candidate_sampler_eager(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing) + desc = tf.EagerOp("LogUniformCandidateSampler") + true_classes_ = convert(tf.EagerTensor, true_classes_) + begin + begin + tf.add_input(desc, true_classes_) + end + end + begin + begin + if num_true !== nothing + desc["num_true"] = Base.Int(num_true) + end + end + begin + if num_sampled !== nothing + desc["num_sampled"] = Base.Int(num_sampled) + end + end + begin + if unique !== nothing + desc["unique"] = Base.Bool(unique) + end + end + begin + if range_max !== nothing + desc["range_max"] = Base.Int(range_max) + end + end + begin + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + end + begin + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(log_uniform_candidate_sampler, [true_classes_], name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function log_uniform_candidate_sampler(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing) + if tf.in_eager_mode() + log_uniform_candidate_sampler_eager(true_classes_; name=name, num_true=num_true, num_sampled=num_sampled, unique=unique, range_max=range_max, seed=seed, seed2=seed2) + else + log_uniform_candidate_sampler_graph(true_classes_; name=name, num_true=num_true, num_sampled=num_sampled, unique=unique, range_max=range_max, seed=seed, seed2=seed2) + end end - if seed2 !== nothing - desc["seed2"] = Base.Int(seed2) - end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:3 - push!(out, tf.Tensor(op, out_idx)) - end - out - end - function log_uniform_candidate_sampler_eager(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing) - desc = tf.EagerOp("LogUniformCandidateSampler") - true_classes_ = convert(tf.EagerTensor, true_classes_) - tf.add_input(desc, true_classes_) - if num_true !== nothing - desc["num_true"] = Base.Int(num_true) - end - if num_sampled !== nothing - desc["num_sampled"] = Base.Int(num_sampled) - end - if unique !== nothing - desc["unique"] = Base.Bool(unique) - end - if range_max !== nothing - desc["range_max"] = Base.Int(range_max) - end - if seed !== nothing - desc["seed"] = Base.Int(seed) - end - if seed2 !== nothing - desc["seed2"] = Base.Int(seed2) - end - res = tf.execute(desc) - node = tf.TapeNode(log_uniform_candidate_sampler, [true_classes_], name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function log_uniform_candidate_sampler(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing) - if tf.in_eager_mode() - log_uniform_candidate_sampler_eager(true_classes_; name=name, num_true=num_true, num_sampled=num_sampled, unique=unique, range_max=range_max, seed=seed, seed2=seed2) - else - log_uniform_candidate_sampler_graph(true_classes_; name=name, num_true=num_true, num_sampled=num_sampled, unique=unique, range_max=range_max, seed=seed, seed2=seed2) - end - end end @@ -37097,49 +67503,97 @@ end """ begin - function serialize_sparse_graph(sparse_indices_, sparse_values_, sparse_shape_; name=nothing, out_type=nothing) - local desc - tf.with_op_name(name, "SerializeSparse") do - desc = tf.NodeDescription("SerializeSparse") - sparse_indices_ = convert(Tensor{Int64}, sparse_indices_) - sparse_values_ = convert(Tensor{Any}, sparse_values_) - sparse_shape_ = convert(Tensor{Int64}, sparse_shape_) - (sparse_values_,) = tf.tf_promote(sparse_values_) - tf.add_input(desc, sparse_indices_) - tf.add_input(desc, sparse_values_) - tf.add_input(desc, sparse_shape_) - if out_type !== nothing - desc["out_type"] = Base.identity(out_type) + begin + function serialize_sparse_graph(sparse_indices_, sparse_values_, sparse_shape_; name=nothing, out_type=nothing) + local desc + tf.with_op_name(name, "SerializeSparse") do + desc = tf.NodeDescription("SerializeSparse") + begin + begin + sparse_indices_ = convert(Tensor{Int64}, sparse_indices_) + begin + end + end + begin + sparse_values_ = convert(Tensor{Any}, sparse_values_) + begin + end + end + begin + sparse_shape_ = convert(Tensor{Int64}, sparse_shape_) + begin + end + end + begin + (sparse_values_,) = tf.tf_promote(sparse_values_) + end + end + begin + begin + tf.add_input(desc, sparse_indices_) + end + begin + tf.add_input(desc, sparse_values_) + end + begin + tf.add_input(desc, sparse_shape_) + end + end + begin + begin + if out_type !== nothing + desc["out_type"] = Base.identity(out_type) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function serialize_sparse_eager(sparse_indices_, sparse_values_, sparse_shape_; name=nothing, out_type=nothing) + desc = tf.EagerOp("SerializeSparse") + sparse_indices_ = convert(tf.EagerTensor, sparse_indices_) + sparse_values_ = convert(tf.EagerTensor, sparse_values_) + sparse_shape_ = convert(tf.EagerTensor, sparse_shape_) + begin + begin + tf.add_input(desc, sparse_indices_) + end + begin + tf.add_input(desc, sparse_values_) + end + begin + tf.add_input(desc, sparse_shape_) + end + end + begin + begin + if out_type !== nothing + desc["out_type"] = Base.identity(out_type) + end + end + end + begin + desc["T"] = tf.data_type(sparse_values_) + end + res = tf.execute(desc) + node = tf.TapeNode(serialize_sparse, [sparse_indices_, sparse_values_, sparse_shape_], name=nothing, out_type=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function serialize_sparse(sparse_indices_, sparse_values_, sparse_shape_; name=nothing, out_type=nothing) + if tf.in_eager_mode() + serialize_sparse_eager(sparse_indices_, sparse_values_, sparse_shape_; name=name, out_type=out_type) + else + serialize_sparse_graph(sparse_indices_, sparse_values_, sparse_shape_; name=name, out_type=out_type) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function serialize_sparse_eager(sparse_indices_, sparse_values_, sparse_shape_; name=nothing, out_type=nothing) - desc = tf.EagerOp("SerializeSparse") - sparse_indices_ = convert(tf.EagerTensor, sparse_indices_) - sparse_values_ = convert(tf.EagerTensor, sparse_values_) - sparse_shape_ = convert(tf.EagerTensor, sparse_shape_) - tf.add_input(desc, sparse_indices_) - tf.add_input(desc, sparse_values_) - tf.add_input(desc, sparse_shape_) - if out_type !== nothing - desc["out_type"] = Base.identity(out_type) - end - desc["T"] = tf.data_type(sparse_values_) - res = tf.execute(desc) - node = tf.TapeNode(serialize_sparse, [sparse_indices_, sparse_values_, sparse_shape_], name=nothing, out_type=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function serialize_sparse(sparse_indices_, sparse_values_, sparse_shape_; name=nothing, out_type=nothing) - if tf.in_eager_mode() - serialize_sparse_eager(sparse_indices_, sparse_values_, sparse_shape_; name=name, out_type=out_type) - else - serialize_sparse_graph(sparse_indices_, sparse_values_, sparse_shape_; name=name, out_type=out_type) - end - end end @@ -37149,47 +67603,97 @@ end """ begin - function scatter_nd_non_aliasing_add_graph(input_, indices_, updates_; name=nothing) - local desc - tf.with_op_name(name, "ScatterNdNonAliasingAdd") do - desc = tf.NodeDescription("ScatterNdNonAliasingAdd") - input_ = convert(Tensor{Any}, input_) - indices_ = convert(Tensor{Any}, indices_) - indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) - updates_ = convert(Tensor{Any}, updates_) - (input_, updates_) = tf.tf_promote(input_, updates_) - (indices_,) = tf.tf_promote(indices_) - tf.add_input(desc, input_) - tf.add_input(desc, indices_) - tf.add_input(desc, updates_) - end - tf.Tensor(tf.Operation(desc)) - end - function scatter_nd_non_aliasing_add_eager(input_, indices_, updates_; name=nothing) - desc = tf.EagerOp("ScatterNdNonAliasingAdd") - input_ = convert(tf.EagerTensor, input_) - indices_ = convert(tf.EagerTensor, indices_) - updates_ = convert(tf.EagerTensor, updates_) - tf.add_input(desc, input_) - tf.add_input(desc, indices_) - tf.add_input(desc, updates_) - desc["T"] = tf.data_type(input_) - desc["Tindices"] = tf.data_type(indices_) - desc["T"] = tf.data_type(updates_) - res = tf.execute(desc) - node = tf.TapeNode(scatter_nd_non_aliasing_add, [input_, indices_, updates_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function scatter_nd_non_aliasing_add(input_, indices_, updates_; name=nothing) - if tf.in_eager_mode() - scatter_nd_non_aliasing_add_eager(input_, indices_, updates_; name=name) - else - scatter_nd_non_aliasing_add_graph(input_, indices_, updates_; name=name) + begin + function scatter_nd_non_aliasing_add_graph(input_, indices_, updates_; name=nothing) + local desc + tf.with_op_name(name, "ScatterNdNonAliasingAdd") do + desc = tf.NodeDescription("ScatterNdNonAliasingAdd") + begin + begin + input_ = convert(Tensor{Any}, input_) + begin + end + end + begin + indices_ = convert(Tensor{Any}, indices_) + begin + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + end + end + begin + updates_ = convert(Tensor{Any}, updates_) + begin + end + end + begin + (input_, updates_) = tf.tf_promote(input_, updates_) + end + begin + (indices_,) = tf.tf_promote(indices_) + end + end + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, indices_) + end + begin + tf.add_input(desc, updates_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function scatter_nd_non_aliasing_add_eager(input_, indices_, updates_; name=nothing) + desc = tf.EagerOp("ScatterNdNonAliasingAdd") + input_ = convert(tf.EagerTensor, input_) + indices_ = convert(tf.EagerTensor, indices_) + updates_ = convert(tf.EagerTensor, updates_) + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, indices_) + end + begin + tf.add_input(desc, updates_) + end + end + begin + end + begin + desc["T"] = tf.data_type(input_) + end + begin + desc["Tindices"] = tf.data_type(indices_) + end + begin + desc["T"] = tf.data_type(updates_) + end + res = tf.execute(desc) + node = tf.TapeNode(scatter_nd_non_aliasing_add, [input_, indices_, updates_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function scatter_nd_non_aliasing_add(input_, indices_, updates_; name=nothing) + if tf.in_eager_mode() + scatter_nd_non_aliasing_add_eager(input_, indices_, updates_; name=name) + else + scatter_nd_non_aliasing_add_graph(input_, indices_, updates_; name=name) + end end - end + end end @@ -37199,46 +67703,80 @@ end """ begin - function ref_merge_graph(inputs_; name=nothing, N=nothing) - local desc - tf.with_op_name(name, "RefMerge") do - desc = tf.NodeDescription("RefMerge") - inputs_ = [convert(Tensor{Any}, x) for x = inputs_] - (inputs_,) = tf.tf_promote(inputs_) - tf.add_input(desc, inputs_) - if N !== nothing - desc["N"] = Base.Int(N) + begin + function ref_merge_graph(inputs_; name=nothing, N=nothing) + local desc + tf.with_op_name(name, "RefMerge") do + desc = tf.NodeDescription("RefMerge") + begin + begin + inputs_ = [convert(Tensor{Any}, x) for x = inputs_] + begin + end + end + begin + (inputs_,) = tf.tf_promote(inputs_) + end + end + begin + begin + tf.add_input(desc, inputs_) + end + end + begin + begin + if N !== nothing + desc["N"] = Base.Int(N) + end + end + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out end end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:2 - push!(out, tf.Tensor(op, out_idx)) - end - out end - function ref_merge_eager(inputs_; name=nothing, N=nothing) - desc = tf.EagerOp("RefMerge") - inputs_ = convert(tf.EagerTensor, inputs_) - tf.add_input(desc, inputs_) - if N !== nothing - desc["N"] = Base.Int(N) - end - desc["T"] = tf.data_type(inputs_) - res = tf.execute(desc) - node = tf.TapeNode(ref_merge, [inputs_], name=nothing, N=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res + begin + function ref_merge_eager(inputs_; name=nothing, N=nothing) + desc = tf.EagerOp("RefMerge") + inputs_ = convert(tf.EagerTensor, inputs_) + begin + begin + tf.add_input(desc, inputs_) + end + end + begin + begin + if N !== nothing + desc["N"] = Base.Int(N) + end + end + end + begin + desc["T"] = tf.data_type(inputs_) + end + res = tf.execute(desc) + node = tf.TapeNode(ref_merge, [inputs_], name=nothing, N=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function ref_merge(inputs_; name=nothing, N=nothing) - if tf.in_eager_mode() - ref_merge_eager(inputs_; name=name, N=N) - else - ref_merge_graph(inputs_; name=name, N=N) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function ref_merge(inputs_; name=nothing, N=nothing) + if tf.in_eager_mode() + ref_merge_eager(inputs_; name=name, N=N) + else + ref_merge_graph(inputs_; name=name, N=N) + end end - end + end end @@ -37248,44 +67786,74 @@ end """ begin - function tensor_list_concat_graph(input_handle_; name=nothing, element_dtype=nothing) - local desc - tf.with_op_name(name, "TensorListConcat") do - desc = tf.NodeDescription("TensorListConcat") - input_handle_ = convert(Tensor{Any}, input_handle_) - tf.add_input(desc, input_handle_) - if element_dtype !== nothing - desc["element_dtype"] = Base.identity(element_dtype) + begin + function tensor_list_concat_graph(input_handle_; name=nothing, element_dtype=nothing) + local desc + tf.with_op_name(name, "TensorListConcat") do + desc = tf.NodeDescription("TensorListConcat") + begin + begin + input_handle_ = convert(Tensor{Any}, input_handle_) + begin + end + end + end + begin + begin + tf.add_input(desc, input_handle_) + end + end + begin + begin + if element_dtype !== nothing + desc["element_dtype"] = Base.identity(element_dtype) + end + end + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out end end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:2 - push!(out, tf.Tensor(op, out_idx)) - end - out end - function tensor_list_concat_eager(input_handle_; name=nothing, element_dtype=nothing) - desc = tf.EagerOp("TensorListConcat") - input_handle_ = convert(tf.EagerTensor, input_handle_) - tf.add_input(desc, input_handle_) - if element_dtype !== nothing - desc["element_dtype"] = Base.identity(element_dtype) - end - res = tf.execute(desc) - node = tf.TapeNode(tensor_list_concat, [input_handle_], name=nothing, element_dtype=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res + begin + function tensor_list_concat_eager(input_handle_; name=nothing, element_dtype=nothing) + desc = tf.EagerOp("TensorListConcat") + input_handle_ = convert(tf.EagerTensor, input_handle_) + begin + begin + tf.add_input(desc, input_handle_) + end + end + begin + begin + if element_dtype !== nothing + desc["element_dtype"] = Base.identity(element_dtype) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(tensor_list_concat, [input_handle_], name=nothing, element_dtype=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_list_concat(input_handle_; name=nothing, element_dtype=nothing) - if tf.in_eager_mode() - tensor_list_concat_eager(input_handle_; name=name, element_dtype=element_dtype) - else - tensor_list_concat_graph(input_handle_; name=name, element_dtype=element_dtype) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_list_concat(input_handle_; name=nothing, element_dtype=nothing) + if tf.in_eager_mode() + tensor_list_concat_eager(input_handle_; name=name, element_dtype=element_dtype) + else + tensor_list_concat_graph(input_handle_; name=name, element_dtype=element_dtype) + end end - end + end end @@ -37295,94 +67863,184 @@ end """ begin - function cudnn_rnn_canonical_to_params_graph(num_layers_, num_units_, input_size_, weights_, biases_; name=nothing, num_params=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) - local desc - tf.with_op_name(name, "CudnnRNNCanonicalToParams") do - desc = tf.NodeDescription("CudnnRNNCanonicalToParams") - num_layers_ = convert(Tensor{Int32}, num_layers_) - num_units_ = convert(Tensor{Int32}, num_units_) - input_size_ = convert(Tensor{Int32}, input_size_) - weights_ = [convert(Tensor{Any}, x) for x = weights_] - biases_ = [convert(Tensor{Any}, x) for x = biases_] - (weights_, biases_) = tf.tf_promote(weights_, biases_) - tf.add_input(desc, num_layers_) - tf.add_input(desc, num_units_) - tf.add_input(desc, input_size_) - tf.add_input(desc, weights_) - tf.add_input(desc, biases_) - if num_params !== nothing - desc["num_params"] = Base.Int(num_params) - end - if rnn_mode !== nothing - desc["rnn_mode"] = Base.String(rnn_mode) - end - if input_mode !== nothing - desc["input_mode"] = Base.String(input_mode) - end - if direction !== nothing - desc["direction"] = Base.String(direction) - end - if dropout !== nothing - desc["dropout"] = Base.identity(dropout) - end - if seed !== nothing - desc["seed"] = Base.Int(seed) - end - if seed2 !== nothing - desc["seed2"] = Base.Int(seed2) + begin + function cudnn_rnn_canonical_to_params_graph(num_layers_, num_units_, input_size_, weights_, biases_; name=nothing, num_params=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) + local desc + tf.with_op_name(name, "CudnnRNNCanonicalToParams") do + desc = tf.NodeDescription("CudnnRNNCanonicalToParams") + begin + begin + num_layers_ = convert(Tensor{Int32}, num_layers_) + begin + end + end + begin + num_units_ = convert(Tensor{Int32}, num_units_) + begin + end + end + begin + input_size_ = convert(Tensor{Int32}, input_size_) + begin + end + end + begin + weights_ = [convert(Tensor{Any}, x) for x = weights_] + begin + end + end + begin + biases_ = [convert(Tensor{Any}, x) for x = biases_] + begin + end + end + begin + (weights_, biases_) = tf.tf_promote(weights_, biases_) + end + end + begin + begin + tf.add_input(desc, num_layers_) + end + begin + tf.add_input(desc, num_units_) + end + begin + tf.add_input(desc, input_size_) + end + begin + tf.add_input(desc, weights_) + end + begin + tf.add_input(desc, biases_) + end + end + begin + begin + if num_params !== nothing + desc["num_params"] = Base.Int(num_params) + end + end + begin + if rnn_mode !== nothing + desc["rnn_mode"] = Base.String(rnn_mode) + end + end + begin + if input_mode !== nothing + desc["input_mode"] = Base.String(input_mode) + end + end + begin + if direction !== nothing + desc["direction"] = Base.String(direction) + end + end + begin + if dropout !== nothing + desc["dropout"] = Base.identity(dropout) + end + end + begin + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + end + begin + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function cudnn_rnn_canonical_to_params_eager(num_layers_, num_units_, input_size_, weights_, biases_; name=nothing, num_params=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) + desc = tf.EagerOp("CudnnRNNCanonicalToParams") + num_layers_ = convert(tf.EagerTensor, num_layers_) + num_units_ = convert(tf.EagerTensor, num_units_) + input_size_ = convert(tf.EagerTensor, input_size_) + weights_ = convert(tf.EagerTensor, weights_) + biases_ = convert(tf.EagerTensor, biases_) + begin + begin + tf.add_input(desc, num_layers_) + end + begin + tf.add_input(desc, num_units_) + end + begin + tf.add_input(desc, input_size_) + end + begin + tf.add_input(desc, weights_) + end + begin + tf.add_input(desc, biases_) + end + end + begin + begin + if num_params !== nothing + desc["num_params"] = Base.Int(num_params) + end + end + begin + if rnn_mode !== nothing + desc["rnn_mode"] = Base.String(rnn_mode) + end + end + begin + if input_mode !== nothing + desc["input_mode"] = Base.String(input_mode) + end + end + begin + if direction !== nothing + desc["direction"] = Base.String(direction) + end + end + begin + if dropout !== nothing + desc["dropout"] = Base.identity(dropout) + end + end + begin + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + end + begin + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end + end + end + begin + desc["T"] = tf.data_type(weights_) + end + begin + desc["T"] = tf.data_type(biases_) + end + res = tf.execute(desc) + node = tf.TapeNode(cudnn_rnn_canonical_to_params, [num_layers_, num_units_, input_size_, weights_, biases_], name=nothing, num_params=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function cudnn_rnn_canonical_to_params(num_layers_, num_units_, input_size_, weights_, biases_; name=nothing, num_params=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) + if tf.in_eager_mode() + cudnn_rnn_canonical_to_params_eager(num_layers_, num_units_, input_size_, weights_, biases_; name=name, num_params=num_params, rnn_mode=rnn_mode, input_mode=input_mode, direction=direction, dropout=dropout, seed=seed, seed2=seed2) + else + cudnn_rnn_canonical_to_params_graph(num_layers_, num_units_, input_size_, weights_, biases_; name=name, num_params=num_params, rnn_mode=rnn_mode, input_mode=input_mode, direction=direction, dropout=dropout, seed=seed, seed2=seed2) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function cudnn_rnn_canonical_to_params_eager(num_layers_, num_units_, input_size_, weights_, biases_; name=nothing, num_params=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) - desc = tf.EagerOp("CudnnRNNCanonicalToParams") - num_layers_ = convert(tf.EagerTensor, num_layers_) - num_units_ = convert(tf.EagerTensor, num_units_) - input_size_ = convert(tf.EagerTensor, input_size_) - weights_ = convert(tf.EagerTensor, weights_) - biases_ = convert(tf.EagerTensor, biases_) - tf.add_input(desc, num_layers_) - tf.add_input(desc, num_units_) - tf.add_input(desc, input_size_) - tf.add_input(desc, weights_) - tf.add_input(desc, biases_) - if num_params !== nothing - desc["num_params"] = Base.Int(num_params) - end - if rnn_mode !== nothing - desc["rnn_mode"] = Base.String(rnn_mode) - end - if input_mode !== nothing - desc["input_mode"] = Base.String(input_mode) - end - if direction !== nothing - desc["direction"] = Base.String(direction) - end - if dropout !== nothing - desc["dropout"] = Base.identity(dropout) - end - if seed !== nothing - desc["seed"] = Base.Int(seed) - end - if seed2 !== nothing - desc["seed2"] = Base.Int(seed2) - end - desc["T"] = tf.data_type(weights_) - desc["T"] = tf.data_type(biases_) - res = tf.execute(desc) - node = tf.TapeNode(cudnn_rnn_canonical_to_params, [num_layers_, num_units_, input_size_, weights_, biases_], name=nothing, num_params=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function cudnn_rnn_canonical_to_params(num_layers_, num_units_, input_size_, weights_, biases_; name=nothing, num_params=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) - if tf.in_eager_mode() - cudnn_rnn_canonical_to_params_eager(num_layers_, num_units_, input_size_, weights_, biases_; name=name, num_params=num_params, rnn_mode=rnn_mode, input_mode=input_mode, direction=direction, dropout=dropout, seed=seed, seed2=seed2) - else - cudnn_rnn_canonical_to_params_graph(num_layers_, num_units_, input_size_, weights_, biases_; name=name, num_params=num_params, rnn_mode=rnn_mode, input_mode=input_mode, direction=direction, dropout=dropout, seed=seed, seed2=seed2) - end - end end @@ -37392,78 +68050,182 @@ end """ begin - function sparse_apply_adadelta_graph(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) - local desc - tf.with_op_name(name, "SparseApplyAdadelta") do - desc = tf.NodeDescription("SparseApplyAdadelta") - var_ = convert(Tensor{Any}, var_) - accum_ = convert(Tensor{Any}, accum_) - accum_update_ = convert(Tensor{Any}, accum_update_) - lr_ = convert(Tensor{Any}, lr_) - rho_ = convert(Tensor{Any}, rho_) - epsilon_ = convert(Tensor{Any}, epsilon_) - grad_ = convert(Tensor{Any}, grad_) - indices_ = convert(Tensor{Any}, indices_) - indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) - (var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_) = tf.tf_promote(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_) - (indices_,) = tf.tf_promote(indices_) - tf.add_input(desc, var_) - tf.add_input(desc, accum_) - tf.add_input(desc, accum_update_) - tf.add_input(desc, lr_) - tf.add_input(desc, rho_) - tf.add_input(desc, epsilon_) - tf.add_input(desc, grad_) - tf.add_input(desc, indices_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - end - tf.Tensor(tf.Operation(desc)) - end - function sparse_apply_adadelta_eager(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) - desc = tf.EagerOp("SparseApplyAdadelta") - var_ = convert(tf.EagerTensor, var_) - accum_ = convert(tf.EagerTensor, accum_) - accum_update_ = convert(tf.EagerTensor, accum_update_) - lr_ = convert(tf.EagerTensor, lr_) - rho_ = convert(tf.EagerTensor, rho_) - epsilon_ = convert(tf.EagerTensor, epsilon_) - grad_ = convert(tf.EagerTensor, grad_) - indices_ = convert(tf.EagerTensor, indices_) - tf.add_input(desc, var_) - tf.add_input(desc, accum_) - tf.add_input(desc, accum_update_) - tf.add_input(desc, lr_) - tf.add_input(desc, rho_) - tf.add_input(desc, epsilon_) - tf.add_input(desc, grad_) - tf.add_input(desc, indices_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - desc["T"] = tf.data_type(var_) - desc["T"] = tf.data_type(accum_) - desc["T"] = tf.data_type(accum_update_) - desc["T"] = tf.data_type(lr_) - desc["T"] = tf.data_type(rho_) - desc["T"] = tf.data_type(epsilon_) - desc["T"] = tf.data_type(grad_) - desc["Tindices"] = tf.data_type(indices_) - res = tf.execute(desc) - node = tf.TapeNode(sparse_apply_adadelta, [var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_, indices_], name=nothing, use_locking=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_apply_adadelta(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) - if tf.in_eager_mode() - sparse_apply_adadelta_eager(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_, indices_; name=name, use_locking=use_locking) - else - sparse_apply_adadelta_graph(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_, indices_; name=name, use_locking=use_locking) + begin + function sparse_apply_adadelta_graph(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "SparseApplyAdadelta") do + desc = tf.NodeDescription("SparseApplyAdadelta") + begin + begin + var_ = convert(Tensor{Any}, var_) + begin + end + end + begin + accum_ = convert(Tensor{Any}, accum_) + begin + end + end + begin + accum_update_ = convert(Tensor{Any}, accum_update_) + begin + end + end + begin + lr_ = convert(Tensor{Any}, lr_) + begin + end + end + begin + rho_ = convert(Tensor{Any}, rho_) + begin + end + end + begin + epsilon_ = convert(Tensor{Any}, epsilon_) + begin + end + end + begin + grad_ = convert(Tensor{Any}, grad_) + begin + end + end + begin + indices_ = convert(Tensor{Any}, indices_) + begin + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + end + end + begin + (var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_) = tf.tf_promote(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_) + end + begin + (indices_,) = tf.tf_promote(indices_) + end + end + begin + begin + tf.add_input(desc, var_) + end + begin + tf.add_input(desc, accum_) + end + begin + tf.add_input(desc, accum_update_) + end + begin + tf.add_input(desc, lr_) + end + begin + tf.add_input(desc, rho_) + end + begin + tf.add_input(desc, epsilon_) + end + begin + tf.add_input(desc, grad_) + end + begin + tf.add_input(desc, indices_) + end + end + begin + begin + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function sparse_apply_adadelta_eager(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) + desc = tf.EagerOp("SparseApplyAdadelta") + var_ = convert(tf.EagerTensor, var_) + accum_ = convert(tf.EagerTensor, accum_) + accum_update_ = convert(tf.EagerTensor, accum_update_) + lr_ = convert(tf.EagerTensor, lr_) + rho_ = convert(tf.EagerTensor, rho_) + epsilon_ = convert(tf.EagerTensor, epsilon_) + grad_ = convert(tf.EagerTensor, grad_) + indices_ = convert(tf.EagerTensor, indices_) + begin + begin + tf.add_input(desc, var_) + end + begin + tf.add_input(desc, accum_) + end + begin + tf.add_input(desc, accum_update_) + end + begin + tf.add_input(desc, lr_) + end + begin + tf.add_input(desc, rho_) + end + begin + tf.add_input(desc, epsilon_) + end + begin + tf.add_input(desc, grad_) + end + begin + tf.add_input(desc, indices_) + end + end + begin + begin + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + end + begin + desc["T"] = tf.data_type(var_) + end + begin + desc["T"] = tf.data_type(accum_) + end + begin + desc["T"] = tf.data_type(accum_update_) + end + begin + desc["T"] = tf.data_type(lr_) + end + begin + desc["T"] = tf.data_type(rho_) + end + begin + desc["T"] = tf.data_type(epsilon_) + end + begin + desc["T"] = tf.data_type(grad_) + end + begin + desc["Tindices"] = tf.data_type(indices_) + end + res = tf.execute(desc) + node = tf.TapeNode(sparse_apply_adadelta, [var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_, indices_], name=nothing, use_locking=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_apply_adadelta(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) + if tf.in_eager_mode() + sparse_apply_adadelta_eager(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_, indices_; name=name, use_locking=use_locking) + else + sparse_apply_adadelta_graph(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_, indices_; name=name, use_locking=use_locking) + end end - end + end end @@ -37473,33 +68235,57 @@ end """ begin - function tensor_array_close_graph(handle_; name=nothing) - local desc - tf.with_op_name(name, "TensorArrayClose") do - desc = tf.NodeDescription("TensorArrayClose") - handle_ = convert(Tensor{String}, handle_) - tf.add_input(desc, handle_) + begin + function tensor_array_close_graph(handle_; name=nothing) + local desc + tf.with_op_name(name, "TensorArrayClose") do + desc = tf.NodeDescription("TensorArrayClose") + begin + begin + handle_ = convert(Tensor{String}, handle_) + begin + end + end + end + begin + begin + tf.add_input(desc, handle_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function tensor_array_close_eager(handle_; name=nothing) - desc = tf.EagerOp("TensorArrayClose") - handle_ = convert(tf.EagerTensor, handle_) - tf.add_input(desc, handle_) - res = tf.execute(desc) - node = tf.TapeNode(tensor_array_close, [handle_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_array_close(handle_; name=nothing) - if tf.in_eager_mode() - tensor_array_close_eager(handle_; name=name) - else - tensor_array_close_graph(handle_; name=name) + begin + function tensor_array_close_eager(handle_; name=nothing) + desc = tf.EagerOp("TensorArrayClose") + handle_ = convert(tf.EagerTensor, handle_) + begin + begin + tf.add_input(desc, handle_) + end + end + begin + end + res = tf.execute(desc) + node = tf.TapeNode(tensor_array_close, [handle_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] end end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_array_close(handle_; name=nothing) + if tf.in_eager_mode() + tensor_array_close_eager(handle_; name=name) + else + tensor_array_close_graph(handle_; name=name) + end + end + end end @@ -37509,40 +68295,78 @@ end """ begin - function selu_grad_graph(gradients_, outputs_; name=nothing) - local desc - tf.with_op_name(name, "SeluGrad") do - desc = tf.NodeDescription("SeluGrad") - gradients_ = convert(Tensor{Any}, gradients_) - outputs_ = convert(Tensor{Any}, outputs_) - (gradients_, outputs_) = tf.tf_promote(gradients_, outputs_) - tf.add_input(desc, gradients_) - tf.add_input(desc, outputs_) + begin + function selu_grad_graph(gradients_, outputs_; name=nothing) + local desc + tf.with_op_name(name, "SeluGrad") do + desc = tf.NodeDescription("SeluGrad") + begin + begin + gradients_ = convert(Tensor{Any}, gradients_) + begin + end + end + begin + outputs_ = convert(Tensor{Any}, outputs_) + begin + end + end + begin + (gradients_, outputs_) = tf.tf_promote(gradients_, outputs_) + end + end + begin + begin + tf.add_input(desc, gradients_) + end + begin + tf.add_input(desc, outputs_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function selu_grad_eager(gradients_, outputs_; name=nothing) - desc = tf.EagerOp("SeluGrad") - gradients_ = convert(tf.EagerTensor, gradients_) - outputs_ = convert(tf.EagerTensor, outputs_) - tf.add_input(desc, gradients_) - tf.add_input(desc, outputs_) - desc["T"] = tf.data_type(gradients_) - desc["T"] = tf.data_type(outputs_) - res = tf.execute(desc) - node = tf.TapeNode(selu_grad, [gradients_, outputs_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function selu_grad_eager(gradients_, outputs_; name=nothing) + desc = tf.EagerOp("SeluGrad") + gradients_ = convert(tf.EagerTensor, gradients_) + outputs_ = convert(tf.EagerTensor, outputs_) + begin + begin + tf.add_input(desc, gradients_) + end + begin + tf.add_input(desc, outputs_) + end + end + begin + end + begin + desc["T"] = tf.data_type(gradients_) + end + begin + desc["T"] = tf.data_type(outputs_) + end + res = tf.execute(desc) + node = tf.TapeNode(selu_grad, [gradients_, outputs_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function selu_grad(gradients_, outputs_; name=nothing) - if tf.in_eager_mode() - selu_grad_eager(gradients_, outputs_; name=name) - else - selu_grad_graph(gradients_, outputs_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function selu_grad(gradients_, outputs_; name=nothing) + if tf.in_eager_mode() + selu_grad_eager(gradients_, outputs_; name=name) + else + selu_grad_graph(gradients_, outputs_; name=name) + end end - end + end end @@ -37552,51 +68376,103 @@ end """ begin - function crop_and_resize_grad_image_graph(grads_, boxes_, box_ind_, image_size_; name=nothing, method=nothing) - local desc - tf.with_op_name(name, "CropAndResizeGradImage") do - desc = tf.NodeDescription("CropAndResizeGradImage") - grads_ = convert(Tensor{Float32}, grads_) - boxes_ = convert(Tensor{Float32}, boxes_) - box_ind_ = convert(Tensor{Int32}, box_ind_) - image_size_ = convert(Tensor{Int32}, image_size_) - tf.add_input(desc, grads_) - tf.add_input(desc, boxes_) - tf.add_input(desc, box_ind_) - tf.add_input(desc, image_size_) - if method !== nothing - desc["method"] = Base.String(method) + begin + function crop_and_resize_grad_image_graph(grads_, boxes_, box_ind_, image_size_; name=nothing, method=nothing) + local desc + tf.with_op_name(name, "CropAndResizeGradImage") do + desc = tf.NodeDescription("CropAndResizeGradImage") + begin + begin + grads_ = convert(Tensor{Float32}, grads_) + begin + end + end + begin + boxes_ = convert(Tensor{Float32}, boxes_) + begin + end + end + begin + box_ind_ = convert(Tensor{Int32}, box_ind_) + begin + end + end + begin + image_size_ = convert(Tensor{Int32}, image_size_) + begin + end + end + end + begin + begin + tf.add_input(desc, grads_) + end + begin + tf.add_input(desc, boxes_) + end + begin + tf.add_input(desc, box_ind_) + end + begin + tf.add_input(desc, image_size_) + end + end + begin + begin + if method !== nothing + desc["method"] = Base.String(method) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function crop_and_resize_grad_image_eager(grads_, boxes_, box_ind_, image_size_; name=nothing, method=nothing) + desc = tf.EagerOp("CropAndResizeGradImage") + grads_ = convert(tf.EagerTensor, grads_) + boxes_ = convert(tf.EagerTensor, boxes_) + box_ind_ = convert(tf.EagerTensor, box_ind_) + image_size_ = convert(tf.EagerTensor, image_size_) + begin + begin + tf.add_input(desc, grads_) + end + begin + tf.add_input(desc, boxes_) + end + begin + tf.add_input(desc, box_ind_) + end + begin + tf.add_input(desc, image_size_) + end + end + begin + begin + if method !== nothing + desc["method"] = Base.String(method) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(crop_and_resize_grad_image, [grads_, boxes_, box_ind_, image_size_], name=nothing, method=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function crop_and_resize_grad_image(grads_, boxes_, box_ind_, image_size_; name=nothing, method=nothing) + if tf.in_eager_mode() + crop_and_resize_grad_image_eager(grads_, boxes_, box_ind_, image_size_; name=name, method=method) + else + crop_and_resize_grad_image_graph(grads_, boxes_, box_ind_, image_size_; name=name, method=method) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function crop_and_resize_grad_image_eager(grads_, boxes_, box_ind_, image_size_; name=nothing, method=nothing) - desc = tf.EagerOp("CropAndResizeGradImage") - grads_ = convert(tf.EagerTensor, grads_) - boxes_ = convert(tf.EagerTensor, boxes_) - box_ind_ = convert(tf.EagerTensor, box_ind_) - image_size_ = convert(tf.EagerTensor, image_size_) - tf.add_input(desc, grads_) - tf.add_input(desc, boxes_) - tf.add_input(desc, box_ind_) - tf.add_input(desc, image_size_) - if method !== nothing - desc["method"] = Base.String(method) - end - res = tf.execute(desc) - node = tf.TapeNode(crop_and_resize_grad_image, [grads_, boxes_, box_ind_, image_size_], name=nothing, method=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function crop_and_resize_grad_image(grads_, boxes_, box_ind_, image_size_; name=nothing, method=nothing) - if tf.in_eager_mode() - crop_and_resize_grad_image_eager(grads_, boxes_, box_ind_, image_size_; name=name, method=method) - else - crop_and_resize_grad_image_graph(grads_, boxes_, box_ind_, image_size_; name=name, method=method) - end - end end @@ -37606,37 +68482,69 @@ end """ begin - function rfft_graph(input_, fft_length_; name=nothing) - local desc - tf.with_op_name(name, "RFFT") do - desc = tf.NodeDescription("RFFT") - input_ = convert(Tensor{Float32}, input_) - fft_length_ = convert(Tensor{Int32}, fft_length_) - tf.add_input(desc, input_) - tf.add_input(desc, fft_length_) + begin + function rfft_graph(input_, fft_length_; name=nothing) + local desc + tf.with_op_name(name, "RFFT") do + desc = tf.NodeDescription("RFFT") + begin + begin + input_ = convert(Tensor{Float32}, input_) + begin + end + end + begin + fft_length_ = convert(Tensor{Int32}, fft_length_) + begin + end + end + end + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, fft_length_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function rfft_eager(input_, fft_length_; name=nothing) - desc = tf.EagerOp("RFFT") - input_ = convert(tf.EagerTensor, input_) - fft_length_ = convert(tf.EagerTensor, fft_length_) - tf.add_input(desc, input_) - tf.add_input(desc, fft_length_) - res = tf.execute(desc) - node = tf.TapeNode(rfft, [input_, fft_length_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function rfft_eager(input_, fft_length_; name=nothing) + desc = tf.EagerOp("RFFT") + input_ = convert(tf.EagerTensor, input_) + fft_length_ = convert(tf.EagerTensor, fft_length_) + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, fft_length_) + end + end + begin + end + res = tf.execute(desc) + node = tf.TapeNode(rfft, [input_, fft_length_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function rfft(input_, fft_length_; name=nothing) - if tf.in_eager_mode() - rfft_eager(input_, fft_length_; name=name) - else - rfft_graph(input_, fft_length_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function rfft(input_, fft_length_; name=nothing) + if tf.in_eager_mode() + rfft_eager(input_, fft_length_; name=name) + else + rfft_graph(input_, fft_length_; name=name) + end end - end + end end @@ -37646,53 +68554,101 @@ end """ begin - function experimental_sql_dataset_graph(driver_name_, data_source_name_, query_; name=nothing, output_types=nothing, output_shapes=nothing) - local desc - tf.with_op_name(name, "ExperimentalSqlDataset") do - desc = tf.NodeDescription("ExperimentalSqlDataset") - driver_name_ = convert(Tensor{String}, driver_name_) - data_source_name_ = convert(Tensor{String}, data_source_name_) - query_ = convert(Tensor{String}, query_) - tf.add_input(desc, driver_name_) - tf.add_input(desc, data_source_name_) - tf.add_input(desc, query_) - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) + begin + function experimental_sql_dataset_graph(driver_name_, data_source_name_, query_; name=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "ExperimentalSqlDataset") do + desc = tf.NodeDescription("ExperimentalSqlDataset") + begin + begin + driver_name_ = convert(Tensor{String}, driver_name_) + begin + end + end + begin + data_source_name_ = convert(Tensor{String}, data_source_name_) + begin + end + end + begin + query_ = convert(Tensor{String}, query_) + begin + end + end + end + begin + begin + tf.add_input(desc, driver_name_) + end + begin + tf.add_input(desc, data_source_name_) + end + begin + tf.add_input(desc, query_) + end + end + begin + begin + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + end + begin + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function experimental_sql_dataset_eager(driver_name_, data_source_name_, query_; name=nothing, output_types=nothing, output_shapes=nothing) + desc = tf.EagerOp("ExperimentalSqlDataset") + driver_name_ = convert(tf.EagerTensor, driver_name_) + data_source_name_ = convert(tf.EagerTensor, data_source_name_) + query_ = convert(tf.EagerTensor, query_) + begin + begin + tf.add_input(desc, driver_name_) + end + begin + tf.add_input(desc, data_source_name_) + end + begin + tf.add_input(desc, query_) + end + end + begin + begin + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + end + begin + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(experimental_sql_dataset, [driver_name_, data_source_name_, query_], name=nothing, output_types=nothing, output_shapes=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_sql_dataset(driver_name_, data_source_name_, query_; name=nothing, output_types=nothing, output_shapes=nothing) + if tf.in_eager_mode() + experimental_sql_dataset_eager(driver_name_, data_source_name_, query_; name=name, output_types=output_types, output_shapes=output_shapes) + else + experimental_sql_dataset_graph(driver_name_, data_source_name_, query_; name=name, output_types=output_types, output_shapes=output_shapes) + end end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - end - tf.Tensor(tf.Operation(desc)) - end - function experimental_sql_dataset_eager(driver_name_, data_source_name_, query_; name=nothing, output_types=nothing, output_shapes=nothing) - desc = tf.EagerOp("ExperimentalSqlDataset") - driver_name_ = convert(tf.EagerTensor, driver_name_) - data_source_name_ = convert(tf.EagerTensor, data_source_name_) - query_ = convert(tf.EagerTensor, query_) - tf.add_input(desc, driver_name_) - tf.add_input(desc, data_source_name_) - tf.add_input(desc, query_) - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - res = tf.execute(desc) - node = tf.TapeNode(experimental_sql_dataset, [driver_name_, data_source_name_, query_], name=nothing, output_types=nothing, output_shapes=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_sql_dataset(driver_name_, data_source_name_, query_; name=nothing, output_types=nothing, output_shapes=nothing) - if tf.in_eager_mode() - experimental_sql_dataset_eager(driver_name_, data_source_name_, query_; name=name, output_types=output_types, output_shapes=output_shapes) - else - experimental_sql_dataset_graph(driver_name_, data_source_name_, query_; name=name, output_types=output_types, output_shapes=output_shapes) - end - end end @@ -37702,69 +68658,157 @@ end """ begin - function resource_apply_power_sign_graph(var_, m_, lr_, logbase_, sign_decay_, beta_, grad_; name=nothing, use_locking=nothing) - local desc - tf.with_op_name(name, "ResourceApplyPowerSign") do - desc = tf.NodeDescription("ResourceApplyPowerSign") - var_ = convert(Tensor{Any}, var_) - m_ = convert(Tensor{Any}, m_) - lr_ = convert(Tensor{Any}, lr_) - logbase_ = convert(Tensor{Any}, logbase_) - sign_decay_ = convert(Tensor{Any}, sign_decay_) - beta_ = convert(Tensor{Any}, beta_) - grad_ = convert(Tensor{Any}, grad_) - (lr_, logbase_, sign_decay_, beta_, grad_) = tf.tf_promote(lr_, logbase_, sign_decay_, beta_, grad_) - tf.add_input(desc, var_) - tf.add_input(desc, m_) - tf.add_input(desc, lr_) - tf.add_input(desc, logbase_) - tf.add_input(desc, sign_decay_) - tf.add_input(desc, beta_) - tf.add_input(desc, grad_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - end - tf.Tensor(tf.Operation(desc)) - end - function resource_apply_power_sign_eager(var_, m_, lr_, logbase_, sign_decay_, beta_, grad_; name=nothing, use_locking=nothing) - desc = tf.EagerOp("ResourceApplyPowerSign") - var_ = convert(tf.EagerTensor, var_) - m_ = convert(tf.EagerTensor, m_) - lr_ = convert(tf.EagerTensor, lr_) - logbase_ = convert(tf.EagerTensor, logbase_) - sign_decay_ = convert(tf.EagerTensor, sign_decay_) - beta_ = convert(tf.EagerTensor, beta_) - grad_ = convert(tf.EagerTensor, grad_) - tf.add_input(desc, var_) - tf.add_input(desc, m_) - tf.add_input(desc, lr_) - tf.add_input(desc, logbase_) - tf.add_input(desc, sign_decay_) - tf.add_input(desc, beta_) - tf.add_input(desc, grad_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - desc["T"] = tf.data_type(lr_) - desc["T"] = tf.data_type(logbase_) - desc["T"] = tf.data_type(sign_decay_) - desc["T"] = tf.data_type(beta_) - desc["T"] = tf.data_type(grad_) - res = tf.execute(desc) - node = tf.TapeNode(resource_apply_power_sign, [var_, m_, lr_, logbase_, sign_decay_, beta_, grad_], name=nothing, use_locking=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_apply_power_sign(var_, m_, lr_, logbase_, sign_decay_, beta_, grad_; name=nothing, use_locking=nothing) - if tf.in_eager_mode() - resource_apply_power_sign_eager(var_, m_, lr_, logbase_, sign_decay_, beta_, grad_; name=name, use_locking=use_locking) - else - resource_apply_power_sign_graph(var_, m_, lr_, logbase_, sign_decay_, beta_, grad_; name=name, use_locking=use_locking) + begin + function resource_apply_power_sign_graph(var_, m_, lr_, logbase_, sign_decay_, beta_, grad_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "ResourceApplyPowerSign") do + desc = tf.NodeDescription("ResourceApplyPowerSign") + begin + begin + var_ = convert(Tensor{Any}, var_) + begin + end + end + begin + m_ = convert(Tensor{Any}, m_) + begin + end + end + begin + lr_ = convert(Tensor{Any}, lr_) + begin + end + end + begin + logbase_ = convert(Tensor{Any}, logbase_) + begin + end + end + begin + sign_decay_ = convert(Tensor{Any}, sign_decay_) + begin + end + end + begin + beta_ = convert(Tensor{Any}, beta_) + begin + end + end + begin + grad_ = convert(Tensor{Any}, grad_) + begin + end + end + begin + (lr_, logbase_, sign_decay_, beta_, grad_) = tf.tf_promote(lr_, logbase_, sign_decay_, beta_, grad_) + end + end + begin + begin + tf.add_input(desc, var_) + end + begin + tf.add_input(desc, m_) + end + begin + tf.add_input(desc, lr_) + end + begin + tf.add_input(desc, logbase_) + end + begin + tf.add_input(desc, sign_decay_) + end + begin + tf.add_input(desc, beta_) + end + begin + tf.add_input(desc, grad_) + end + end + begin + begin + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function resource_apply_power_sign_eager(var_, m_, lr_, logbase_, sign_decay_, beta_, grad_; name=nothing, use_locking=nothing) + desc = tf.EagerOp("ResourceApplyPowerSign") + var_ = convert(tf.EagerTensor, var_) + m_ = convert(tf.EagerTensor, m_) + lr_ = convert(tf.EagerTensor, lr_) + logbase_ = convert(tf.EagerTensor, logbase_) + sign_decay_ = convert(tf.EagerTensor, sign_decay_) + beta_ = convert(tf.EagerTensor, beta_) + grad_ = convert(tf.EagerTensor, grad_) + begin + begin + tf.add_input(desc, var_) + end + begin + tf.add_input(desc, m_) + end + begin + tf.add_input(desc, lr_) + end + begin + tf.add_input(desc, logbase_) + end + begin + tf.add_input(desc, sign_decay_) + end + begin + tf.add_input(desc, beta_) + end + begin + tf.add_input(desc, grad_) + end + end + begin + begin + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + end + begin + desc["T"] = tf.data_type(lr_) + end + begin + desc["T"] = tf.data_type(logbase_) + end + begin + desc["T"] = tf.data_type(sign_decay_) + end + begin + desc["T"] = tf.data_type(beta_) + end + begin + desc["T"] = tf.data_type(grad_) + end + res = tf.execute(desc) + node = tf.TapeNode(resource_apply_power_sign, [var_, m_, lr_, logbase_, sign_decay_, beta_, grad_], name=nothing, use_locking=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_apply_power_sign(var_, m_, lr_, logbase_, sign_decay_, beta_, grad_; name=nothing, use_locking=nothing) + if tf.in_eager_mode() + resource_apply_power_sign_eager(var_, m_, lr_, logbase_, sign_decay_, beta_, grad_; name=name, use_locking=use_locking) + else + resource_apply_power_sign_graph(var_, m_, lr_, logbase_, sign_decay_, beta_, grad_; name=name, use_locking=use_locking) + end end - end + end end @@ -37774,35 +68818,63 @@ end """ begin - function matrix_determinant_graph(input_; name=nothing) - local desc - tf.with_op_name(name, "MatrixDeterminant") do - desc = tf.NodeDescription("MatrixDeterminant") - input_ = convert(Tensor{Any}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) + begin + function matrix_determinant_graph(input_; name=nothing) + local desc + tf.with_op_name(name, "MatrixDeterminant") do + desc = tf.NodeDescription("MatrixDeterminant") + begin + begin + input_ = convert(Tensor{Any}, input_) + begin + end + end + begin + (input_,) = tf.tf_promote(input_) + end + end + begin + begin + tf.add_input(desc, input_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function matrix_determinant_eager(input_; name=nothing) - desc = tf.EagerOp("MatrixDeterminant") - input_ = convert(tf.EagerTensor, input_) - tf.add_input(desc, input_) - desc["T"] = tf.data_type(input_) - res = tf.execute(desc) - node = tf.TapeNode(matrix_determinant, [input_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function matrix_determinant_eager(input_; name=nothing) + desc = tf.EagerOp("MatrixDeterminant") + input_ = convert(tf.EagerTensor, input_) + begin + begin + tf.add_input(desc, input_) + end + end + begin + end + begin + desc["T"] = tf.data_type(input_) + end + res = tf.execute(desc) + node = tf.TapeNode(matrix_determinant, [input_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function matrix_determinant(input_; name=nothing) - if tf.in_eager_mode() - matrix_determinant_eager(input_; name=name) - else - matrix_determinant_graph(input_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function matrix_determinant(input_; name=nothing) + if tf.in_eager_mode() + matrix_determinant_eager(input_; name=name) + else + matrix_determinant_graph(input_; name=name) + end end - end + end end @@ -37812,51 +68884,87 @@ end """ begin - function static_regex_replace_graph(input_; name=nothing, pattern=nothing, rewrite=nothing, replace_global=nothing) - local desc - tf.with_op_name(name, "StaticRegexReplace") do - desc = tf.NodeDescription("StaticRegexReplace") - input_ = convert(Tensor{String}, input_) - tf.add_input(desc, input_) - if pattern !== nothing - desc["pattern"] = Base.String(pattern) - end - if rewrite !== nothing - desc["rewrite"] = Base.String(rewrite) + begin + function static_regex_replace_graph(input_; name=nothing, pattern=nothing, rewrite=nothing, replace_global=nothing) + local desc + tf.with_op_name(name, "StaticRegexReplace") do + desc = tf.NodeDescription("StaticRegexReplace") + begin + begin + input_ = convert(Tensor{String}, input_) + begin + end + end + end + begin + begin + tf.add_input(desc, input_) + end + end + begin + begin + if pattern !== nothing + desc["pattern"] = Base.String(pattern) + end + end + begin + if rewrite !== nothing + desc["rewrite"] = Base.String(rewrite) + end + end + begin + if replace_global !== nothing + desc["replace_global"] = Base.Bool(replace_global) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function static_regex_replace_eager(input_; name=nothing, pattern=nothing, rewrite=nothing, replace_global=nothing) + desc = tf.EagerOp("StaticRegexReplace") + input_ = convert(tf.EagerTensor, input_) + begin + begin + tf.add_input(desc, input_) + end + end + begin + begin + if pattern !== nothing + desc["pattern"] = Base.String(pattern) + end + end + begin + if rewrite !== nothing + desc["rewrite"] = Base.String(rewrite) + end + end + begin + if replace_global !== nothing + desc["replace_global"] = Base.Bool(replace_global) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(static_regex_replace, [input_], name=nothing, pattern=nothing, rewrite=nothing, replace_global=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function static_regex_replace(input_; name=nothing, pattern=nothing, rewrite=nothing, replace_global=nothing) + if tf.in_eager_mode() + static_regex_replace_eager(input_; name=name, pattern=pattern, rewrite=rewrite, replace_global=replace_global) + else + static_regex_replace_graph(input_; name=name, pattern=pattern, rewrite=rewrite, replace_global=replace_global) + end end - if replace_global !== nothing - desc["replace_global"] = Base.Bool(replace_global) - end - end - tf.Tensor(tf.Operation(desc)) - end - function static_regex_replace_eager(input_; name=nothing, pattern=nothing, rewrite=nothing, replace_global=nothing) - desc = tf.EagerOp("StaticRegexReplace") - input_ = convert(tf.EagerTensor, input_) - tf.add_input(desc, input_) - if pattern !== nothing - desc["pattern"] = Base.String(pattern) - end - if rewrite !== nothing - desc["rewrite"] = Base.String(rewrite) - end - if replace_global !== nothing - desc["replace_global"] = Base.Bool(replace_global) - end - res = tf.execute(desc) - node = tf.TapeNode(static_regex_replace, [input_], name=nothing, pattern=nothing, rewrite=nothing, replace_global=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function static_regex_replace(input_; name=nothing, pattern=nothing, rewrite=nothing, replace_global=nothing) - if tf.in_eager_mode() - static_regex_replace_eager(input_; name=name, pattern=pattern, rewrite=rewrite, replace_global=replace_global) - else - static_regex_replace_graph(input_; name=name, pattern=pattern, rewrite=rewrite, replace_global=replace_global) - end - end end @@ -37866,59 +68974,103 @@ end """ begin - function avg_pool_graph(value_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) - local desc - tf.with_op_name(name, "AvgPool") do - desc = tf.NodeDescription("AvgPool") - value_ = convert(Tensor{Any}, value_) - (value_,) = tf.tf_promote(value_) - tf.add_input(desc, value_) - if ksize !== nothing - desc["ksize"] = map(Base.identity, ksize) - end - if strides !== nothing - desc["strides"] = map(Base.identity, strides) - end - if padding !== nothing - desc["padding"] = Base.String(padding) - end - if data_format !== nothing - desc["data_format"] = Base.String(data_format) + begin + function avg_pool_graph(value_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + local desc + tf.with_op_name(name, "AvgPool") do + desc = tf.NodeDescription("AvgPool") + begin + begin + value_ = convert(Tensor{Any}, value_) + begin + end + end + begin + (value_,) = tf.tf_promote(value_) + end + end + begin + begin + tf.add_input(desc, value_) + end + end + begin + begin + if ksize !== nothing + desc["ksize"] = map(Base.identity, ksize) + end + end + begin + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + end + begin + if padding !== nothing + desc["padding"] = Base.String(padding) + end + end + begin + if data_format !== nothing + desc["data_format"] = Base.String(data_format) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function avg_pool_eager(value_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + desc = tf.EagerOp("AvgPool") + value_ = convert(tf.EagerTensor, value_) + begin + begin + tf.add_input(desc, value_) + end + end + begin + begin + if ksize !== nothing + desc["ksize"] = map(Base.identity, ksize) + end + end + begin + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + end + begin + if padding !== nothing + desc["padding"] = Base.String(padding) + end + end + begin + if data_format !== nothing + desc["data_format"] = Base.String(data_format) + end + end + end + begin + desc["T"] = tf.data_type(value_) + end + res = tf.execute(desc) + node = tf.TapeNode(avg_pool, [value_], name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function avg_pool(value_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + if tf.in_eager_mode() + avg_pool_eager(value_; name=name, ksize=ksize, strides=strides, padding=padding, data_format=data_format) + else + avg_pool_graph(value_; name=name, ksize=ksize, strides=strides, padding=padding, data_format=data_format) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function avg_pool_eager(value_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) - desc = tf.EagerOp("AvgPool") - value_ = convert(tf.EagerTensor, value_) - tf.add_input(desc, value_) - if ksize !== nothing - desc["ksize"] = map(Base.identity, ksize) - end - if strides !== nothing - desc["strides"] = map(Base.identity, strides) - end - if padding !== nothing - desc["padding"] = Base.String(padding) - end - if data_format !== nothing - desc["data_format"] = Base.String(data_format) - end - desc["T"] = tf.data_type(value_) - res = tf.execute(desc) - node = tf.TapeNode(avg_pool, [value_], name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function avg_pool(value_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) - if tf.in_eager_mode() - avg_pool_eager(value_; name=name, ksize=ksize, strides=strides, padding=padding, data_format=data_format) - else - avg_pool_graph(value_; name=name, ksize=ksize, strides=strides, padding=padding, data_format=data_format) - end - end end @@ -37928,48 +69080,102 @@ end """ begin - function sparse_dense_cwise_add_graph(sp_indices_, sp_values_, sp_shape_, dense_; name=nothing) - local desc - tf.with_op_name(name, "SparseDenseCwiseAdd") do - desc = tf.NodeDescription("SparseDenseCwiseAdd") - sp_indices_ = convert(Tensor{Int64}, sp_indices_) - sp_values_ = convert(Tensor{Any}, sp_values_) - sp_shape_ = convert(Tensor{Int64}, sp_shape_) - dense_ = convert(Tensor{Any}, dense_) - (sp_values_, dense_) = tf.tf_promote(sp_values_, dense_) - tf.add_input(desc, sp_indices_) - tf.add_input(desc, sp_values_) - tf.add_input(desc, sp_shape_) - tf.add_input(desc, dense_) - end - tf.Tensor(tf.Operation(desc)) - end - function sparse_dense_cwise_add_eager(sp_indices_, sp_values_, sp_shape_, dense_; name=nothing) - desc = tf.EagerOp("SparseDenseCwiseAdd") - sp_indices_ = convert(tf.EagerTensor, sp_indices_) - sp_values_ = convert(tf.EagerTensor, sp_values_) - sp_shape_ = convert(tf.EagerTensor, sp_shape_) - dense_ = convert(tf.EagerTensor, dense_) - tf.add_input(desc, sp_indices_) - tf.add_input(desc, sp_values_) - tf.add_input(desc, sp_shape_) - tf.add_input(desc, dense_) - desc["T"] = tf.data_type(sp_values_) - desc["T"] = tf.data_type(dense_) - res = tf.execute(desc) - node = tf.TapeNode(sparse_dense_cwise_add, [sp_indices_, sp_values_, sp_shape_, dense_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_dense_cwise_add(sp_indices_, sp_values_, sp_shape_, dense_; name=nothing) - if tf.in_eager_mode() - sparse_dense_cwise_add_eager(sp_indices_, sp_values_, sp_shape_, dense_; name=name) - else - sparse_dense_cwise_add_graph(sp_indices_, sp_values_, sp_shape_, dense_; name=name) + begin + function sparse_dense_cwise_add_graph(sp_indices_, sp_values_, sp_shape_, dense_; name=nothing) + local desc + tf.with_op_name(name, "SparseDenseCwiseAdd") do + desc = tf.NodeDescription("SparseDenseCwiseAdd") + begin + begin + sp_indices_ = convert(Tensor{Int64}, sp_indices_) + begin + end + end + begin + sp_values_ = convert(Tensor{Any}, sp_values_) + begin + end + end + begin + sp_shape_ = convert(Tensor{Int64}, sp_shape_) + begin + end + end + begin + dense_ = convert(Tensor{Any}, dense_) + begin + end + end + begin + (sp_values_, dense_) = tf.tf_promote(sp_values_, dense_) + end + end + begin + begin + tf.add_input(desc, sp_indices_) + end + begin + tf.add_input(desc, sp_values_) + end + begin + tf.add_input(desc, sp_shape_) + end + begin + tf.add_input(desc, dense_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function sparse_dense_cwise_add_eager(sp_indices_, sp_values_, sp_shape_, dense_; name=nothing) + desc = tf.EagerOp("SparseDenseCwiseAdd") + sp_indices_ = convert(tf.EagerTensor, sp_indices_) + sp_values_ = convert(tf.EagerTensor, sp_values_) + sp_shape_ = convert(tf.EagerTensor, sp_shape_) + dense_ = convert(tf.EagerTensor, dense_) + begin + begin + tf.add_input(desc, sp_indices_) + end + begin + tf.add_input(desc, sp_values_) + end + begin + tf.add_input(desc, sp_shape_) + end + begin + tf.add_input(desc, dense_) + end + end + begin + end + begin + desc["T"] = tf.data_type(sp_values_) + end + begin + desc["T"] = tf.data_type(dense_) + end + res = tf.execute(desc) + node = tf.TapeNode(sparse_dense_cwise_add, [sp_indices_, sp_values_, sp_shape_, dense_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_dense_cwise_add(sp_indices_, sp_values_, sp_shape_, dense_; name=nothing) + if tf.in_eager_mode() + sparse_dense_cwise_add_eager(sp_indices_, sp_values_, sp_shape_, dense_; name=name) + else + sparse_dense_cwise_add_graph(sp_indices_, sp_values_, sp_shape_, dense_; name=name) + end end - end + end end @@ -37979,40 +69185,78 @@ end """ begin - function bias_add_v1_graph(value_, bias_; name=nothing) - local desc - tf.with_op_name(name, "BiasAddV1") do - desc = tf.NodeDescription("BiasAddV1") - value_ = convert(Tensor{Any}, value_) - bias_ = convert(Tensor{Any}, bias_) - (value_, bias_) = tf.tf_promote(value_, bias_) - tf.add_input(desc, value_) - tf.add_input(desc, bias_) + begin + function bias_add_v1_graph(value_, bias_; name=nothing) + local desc + tf.with_op_name(name, "BiasAddV1") do + desc = tf.NodeDescription("BiasAddV1") + begin + begin + value_ = convert(Tensor{Any}, value_) + begin + end + end + begin + bias_ = convert(Tensor{Any}, bias_) + begin + end + end + begin + (value_, bias_) = tf.tf_promote(value_, bias_) + end + end + begin + begin + tf.add_input(desc, value_) + end + begin + tf.add_input(desc, bias_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function bias_add_v1_eager(value_, bias_; name=nothing) - desc = tf.EagerOp("BiasAddV1") - value_ = convert(tf.EagerTensor, value_) - bias_ = convert(tf.EagerTensor, bias_) - tf.add_input(desc, value_) - tf.add_input(desc, bias_) - desc["T"] = tf.data_type(value_) - desc["T"] = tf.data_type(bias_) - res = tf.execute(desc) - node = tf.TapeNode(bias_add_v1, [value_, bias_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function bias_add_v1_eager(value_, bias_; name=nothing) + desc = tf.EagerOp("BiasAddV1") + value_ = convert(tf.EagerTensor, value_) + bias_ = convert(tf.EagerTensor, bias_) + begin + begin + tf.add_input(desc, value_) + end + begin + tf.add_input(desc, bias_) + end + end + begin + end + begin + desc["T"] = tf.data_type(value_) + end + begin + desc["T"] = tf.data_type(bias_) + end + res = tf.execute(desc) + node = tf.TapeNode(bias_add_v1, [value_, bias_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function bias_add_v1(value_, bias_; name=nothing) - if tf.in_eager_mode() - bias_add_v1_eager(value_, bias_; name=name) - else - bias_add_v1_graph(value_, bias_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function bias_add_v1(value_, bias_; name=nothing) + if tf.in_eager_mode() + bias_add_v1_eager(value_, bias_; name=name) + else + bias_add_v1_graph(value_, bias_; name=name) + end end - end + end end @@ -38022,35 +69266,63 @@ end """ begin - function invert_permutation_graph(x_; name=nothing) - local desc - tf.with_op_name(name, "InvertPermutation") do - desc = tf.NodeDescription("InvertPermutation") - x_ = convert(Tensor{Int32}, x_) - (x_,) = tf.tf_promote(x_) - tf.add_input(desc, x_) + begin + function invert_permutation_graph(x_; name=nothing) + local desc + tf.with_op_name(name, "InvertPermutation") do + desc = tf.NodeDescription("InvertPermutation") + begin + begin + x_ = convert(Tensor{Int32}, x_) + begin + end + end + begin + (x_,) = tf.tf_promote(x_) + end + end + begin + begin + tf.add_input(desc, x_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function invert_permutation_eager(x_; name=nothing) - desc = tf.EagerOp("InvertPermutation") - x_ = convert(tf.EagerTensor, x_) - tf.add_input(desc, x_) - desc["T"] = tf.data_type(x_) - res = tf.execute(desc) - node = tf.TapeNode(invert_permutation, [x_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function invert_permutation_eager(x_; name=nothing) + desc = tf.EagerOp("InvertPermutation") + x_ = convert(tf.EagerTensor, x_) + begin + begin + tf.add_input(desc, x_) + end + end + begin + end + begin + desc["T"] = tf.data_type(x_) + end + res = tf.execute(desc) + node = tf.TapeNode(invert_permutation, [x_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function invert_permutation(x_; name=nothing) - if tf.in_eager_mode() - invert_permutation_eager(x_; name=name) - else - invert_permutation_graph(x_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function invert_permutation(x_; name=nothing) + if tf.in_eager_mode() + invert_permutation_eager(x_; name=name) + else + invert_permutation_graph(x_; name=name) + end end - end + end end @@ -38060,59 +69332,95 @@ end """ begin - function hash_table_v2_graph(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing) - local desc - tf.with_op_name(name, "HashTableV2") do - desc = tf.NodeDescription("HashTableV2") - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - if use_node_name_sharing !== nothing - desc["use_node_name_sharing"] = Base.Bool(use_node_name_sharing) + begin + function hash_table_v2_graph(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing) + local desc + tf.with_op_name(name, "HashTableV2") do + desc = tf.NodeDescription("HashTableV2") + begin + end + begin + end + begin + begin + if container !== nothing + desc["container"] = Base.String(container) + end + end + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + begin + if use_node_name_sharing !== nothing + desc["use_node_name_sharing"] = Base.Bool(use_node_name_sharing) + end + end + begin + if key_dtype !== nothing + desc["key_dtype"] = Base.identity(key_dtype) + end + end + begin + if value_dtype !== nothing + desc["value_dtype"] = Base.identity(value_dtype) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function hash_table_v2_eager(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing) + desc = tf.EagerOp("HashTableV2") + begin + end + begin + begin + if container !== nothing + desc["container"] = Base.String(container) + end + end + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + begin + if use_node_name_sharing !== nothing + desc["use_node_name_sharing"] = Base.Bool(use_node_name_sharing) + end + end + begin + if key_dtype !== nothing + desc["key_dtype"] = Base.identity(key_dtype) + end + end + begin + if value_dtype !== nothing + desc["value_dtype"] = Base.identity(value_dtype) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(hash_table_v2, [], name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function hash_table_v2(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing) + if tf.in_eager_mode() + hash_table_v2_eager(; name=name, container=container, shared_name=shared_name, use_node_name_sharing=use_node_name_sharing, key_dtype=key_dtype, value_dtype=value_dtype) + else + hash_table_v2_graph(; name=name, container=container, shared_name=shared_name, use_node_name_sharing=use_node_name_sharing, key_dtype=key_dtype, value_dtype=value_dtype) + end end - if key_dtype !== nothing - desc["key_dtype"] = Base.identity(key_dtype) - end - if value_dtype !== nothing - desc["value_dtype"] = Base.identity(value_dtype) - end - end - tf.Tensor(tf.Operation(desc)) - end - function hash_table_v2_eager(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing) - desc = tf.EagerOp("HashTableV2") - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - if use_node_name_sharing !== nothing - desc["use_node_name_sharing"] = Base.Bool(use_node_name_sharing) - end - if key_dtype !== nothing - desc["key_dtype"] = Base.identity(key_dtype) - end - if value_dtype !== nothing - desc["value_dtype"] = Base.identity(value_dtype) - end - res = tf.execute(desc) - node = tf.TapeNode(hash_table_v2, [], name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function hash_table_v2(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing) - if tf.in_eager_mode() - hash_table_v2_eager(; name=name, container=container, shared_name=shared_name, use_node_name_sharing=use_node_name_sharing, key_dtype=key_dtype, value_dtype=value_dtype) - else - hash_table_v2_graph(; name=name, container=container, shared_name=shared_name, use_node_name_sharing=use_node_name_sharing, key_dtype=key_dtype, value_dtype=value_dtype) - end - end end @@ -38122,74 +69430,162 @@ end """ begin - function sparse_apply_momentum_graph(var_, accum_, lr_, grad_, indices_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) - local desc - tf.with_op_name(name, "SparseApplyMomentum") do - desc = tf.NodeDescription("SparseApplyMomentum") - var_ = convert(Tensor{Any}, var_) - accum_ = convert(Tensor{Any}, accum_) - lr_ = convert(Tensor{Any}, lr_) - grad_ = convert(Tensor{Any}, grad_) - indices_ = convert(Tensor{Any}, indices_) - indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) - momentum_ = convert(Tensor{Any}, momentum_) - (var_, accum_, lr_, grad_, momentum_) = tf.tf_promote(var_, accum_, lr_, grad_, momentum_) - (indices_,) = tf.tf_promote(indices_) - tf.add_input(desc, var_) - tf.add_input(desc, accum_) - tf.add_input(desc, lr_) - tf.add_input(desc, grad_) - tf.add_input(desc, indices_) - tf.add_input(desc, momentum_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - if use_nesterov !== nothing - desc["use_nesterov"] = Base.Bool(use_nesterov) - end - end - tf.Tensor(tf.Operation(desc)) - end - function sparse_apply_momentum_eager(var_, accum_, lr_, grad_, indices_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) - desc = tf.EagerOp("SparseApplyMomentum") - var_ = convert(tf.EagerTensor, var_) - accum_ = convert(tf.EagerTensor, accum_) - lr_ = convert(tf.EagerTensor, lr_) - grad_ = convert(tf.EagerTensor, grad_) - indices_ = convert(tf.EagerTensor, indices_) - momentum_ = convert(tf.EagerTensor, momentum_) - tf.add_input(desc, var_) - tf.add_input(desc, accum_) - tf.add_input(desc, lr_) - tf.add_input(desc, grad_) - tf.add_input(desc, indices_) - tf.add_input(desc, momentum_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - if use_nesterov !== nothing - desc["use_nesterov"] = Base.Bool(use_nesterov) - end - desc["T"] = tf.data_type(var_) - desc["T"] = tf.data_type(accum_) - desc["T"] = tf.data_type(lr_) - desc["T"] = tf.data_type(grad_) - desc["Tindices"] = tf.data_type(indices_) - desc["T"] = tf.data_type(momentum_) - res = tf.execute(desc) - node = tf.TapeNode(sparse_apply_momentum, [var_, accum_, lr_, grad_, indices_, momentum_], name=nothing, use_locking=nothing, use_nesterov=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_apply_momentum(var_, accum_, lr_, grad_, indices_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) - if tf.in_eager_mode() - sparse_apply_momentum_eager(var_, accum_, lr_, grad_, indices_, momentum_; name=name, use_locking=use_locking, use_nesterov=use_nesterov) - else - sparse_apply_momentum_graph(var_, accum_, lr_, grad_, indices_, momentum_; name=name, use_locking=use_locking, use_nesterov=use_nesterov) + begin + function sparse_apply_momentum_graph(var_, accum_, lr_, grad_, indices_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) + local desc + tf.with_op_name(name, "SparseApplyMomentum") do + desc = tf.NodeDescription("SparseApplyMomentum") + begin + begin + var_ = convert(Tensor{Any}, var_) + begin + end + end + begin + accum_ = convert(Tensor{Any}, accum_) + begin + end + end + begin + lr_ = convert(Tensor{Any}, lr_) + begin + end + end + begin + grad_ = convert(Tensor{Any}, grad_) + begin + end + end + begin + indices_ = convert(Tensor{Any}, indices_) + begin + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + end + end + begin + momentum_ = convert(Tensor{Any}, momentum_) + begin + end + end + begin + (var_, accum_, lr_, grad_, momentum_) = tf.tf_promote(var_, accum_, lr_, grad_, momentum_) + end + begin + (indices_,) = tf.tf_promote(indices_) + end + end + begin + begin + tf.add_input(desc, var_) + end + begin + tf.add_input(desc, accum_) + end + begin + tf.add_input(desc, lr_) + end + begin + tf.add_input(desc, grad_) + end + begin + tf.add_input(desc, indices_) + end + begin + tf.add_input(desc, momentum_) + end + end + begin + begin + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + begin + if use_nesterov !== nothing + desc["use_nesterov"] = Base.Bool(use_nesterov) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function sparse_apply_momentum_eager(var_, accum_, lr_, grad_, indices_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) + desc = tf.EagerOp("SparseApplyMomentum") + var_ = convert(tf.EagerTensor, var_) + accum_ = convert(tf.EagerTensor, accum_) + lr_ = convert(tf.EagerTensor, lr_) + grad_ = convert(tf.EagerTensor, grad_) + indices_ = convert(tf.EagerTensor, indices_) + momentum_ = convert(tf.EagerTensor, momentum_) + begin + begin + tf.add_input(desc, var_) + end + begin + tf.add_input(desc, accum_) + end + begin + tf.add_input(desc, lr_) + end + begin + tf.add_input(desc, grad_) + end + begin + tf.add_input(desc, indices_) + end + begin + tf.add_input(desc, momentum_) + end + end + begin + begin + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + begin + if use_nesterov !== nothing + desc["use_nesterov"] = Base.Bool(use_nesterov) + end + end + end + begin + desc["T"] = tf.data_type(var_) + end + begin + desc["T"] = tf.data_type(accum_) + end + begin + desc["T"] = tf.data_type(lr_) + end + begin + desc["T"] = tf.data_type(grad_) + end + begin + desc["Tindices"] = tf.data_type(indices_) + end + begin + desc["T"] = tf.data_type(momentum_) + end + res = tf.execute(desc) + node = tf.TapeNode(sparse_apply_momentum, [var_, accum_, lr_, grad_, indices_, momentum_], name=nothing, use_locking=nothing, use_nesterov=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_apply_momentum(var_, accum_, lr_, grad_, indices_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) + if tf.in_eager_mode() + sparse_apply_momentum_eager(var_, accum_, lr_, grad_, indices_, momentum_; name=name, use_locking=use_locking, use_nesterov=use_nesterov) + else + sparse_apply_momentum_graph(var_, accum_, lr_, grad_, indices_, momentum_; name=name, use_locking=use_locking, use_nesterov=use_nesterov) + end end - end + end end @@ -38199,53 +69595,93 @@ end An op which feeds a single Tensor value into the computation. """ begin - function infeed_enqueue_graph(input_; name=nothing, dtype=nothing, shape=nothing, device_ordinal=nothing) - local desc - tf.with_op_name(name, "InfeedEnqueue") do - desc = tf.NodeDescription("InfeedEnqueue") - input_ = convert(Tensor{Any}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end - if shape !== nothing - desc["shape"] = Base.identity(shape) - end - if device_ordinal !== nothing - desc["device_ordinal"] = Base.Int(device_ordinal) + begin + function infeed_enqueue_graph(input_; name=nothing, dtype=nothing, shape=nothing, device_ordinal=nothing) + local desc + tf.with_op_name(name, "InfeedEnqueue") do + desc = tf.NodeDescription("InfeedEnqueue") + begin + begin + input_ = convert(Tensor{Any}, input_) + begin + end + end + begin + (input_,) = tf.tf_promote(input_) + end + end + begin + begin + tf.add_input(desc, input_) + end + end + begin + begin + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + begin + if shape !== nothing + desc["shape"] = Base.identity(shape) + end + end + begin + if device_ordinal !== nothing + desc["device_ordinal"] = Base.Int(device_ordinal) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function infeed_enqueue_eager(input_; name=nothing, dtype=nothing, shape=nothing, device_ordinal=nothing) + desc = tf.EagerOp("InfeedEnqueue") + input_ = convert(tf.EagerTensor, input_) + begin + begin + tf.add_input(desc, input_) + end + end + begin + begin + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + begin + if shape !== nothing + desc["shape"] = Base.identity(shape) + end + end + begin + if device_ordinal !== nothing + desc["device_ordinal"] = Base.Int(device_ordinal) + end + end + end + begin + desc["dtype"] = tf.data_type(input_) + end + res = tf.execute(desc) + node = tf.TapeNode(infeed_enqueue, [input_], name=nothing, dtype=nothing, shape=nothing, device_ordinal=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function infeed_enqueue(input_; name=nothing, dtype=nothing, shape=nothing, device_ordinal=nothing) + if tf.in_eager_mode() + infeed_enqueue_eager(input_; name=name, dtype=dtype, shape=shape, device_ordinal=device_ordinal) + else + infeed_enqueue_graph(input_; name=name, dtype=dtype, shape=shape, device_ordinal=device_ordinal) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function infeed_enqueue_eager(input_; name=nothing, dtype=nothing, shape=nothing, device_ordinal=nothing) - desc = tf.EagerOp("InfeedEnqueue") - input_ = convert(tf.EagerTensor, input_) - tf.add_input(desc, input_) - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end - if shape !== nothing - desc["shape"] = Base.identity(shape) - end - if device_ordinal !== nothing - desc["device_ordinal"] = Base.Int(device_ordinal) - end - desc["dtype"] = tf.data_type(input_) - res = tf.execute(desc) - node = tf.TapeNode(infeed_enqueue, [input_], name=nothing, dtype=nothing, shape=nothing, device_ordinal=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function infeed_enqueue(input_; name=nothing, dtype=nothing, shape=nothing, device_ordinal=nothing) - if tf.in_eager_mode() - infeed_enqueue_eager(input_; name=name, dtype=dtype, shape=shape, device_ordinal=device_ordinal) - else - infeed_enqueue_graph(input_; name=name, dtype=dtype, shape=shape, device_ordinal=device_ordinal) - end - end end @@ -38255,58 +69691,124 @@ end """ begin - function stateless_random_uniform_int_graph(shape_, seed_, minval_, maxval_; name=nothing, dtype=nothing) - local desc - tf.with_op_name(name, "StatelessRandomUniformInt") do - desc = tf.NodeDescription("StatelessRandomUniformInt") - shape_ = convert(Tensor{Any}, shape_) - seed_ = convert(Tensor{Int64}, seed_) - minval_ = convert(Tensor{Any}, minval_) - maxval_ = convert(Tensor{Any}, maxval_) - (minval_, maxval_) = tf.tf_promote(minval_, maxval_) - (shape_,) = tf.tf_promote(shape_) - (seed_,) = tf.tf_promote(seed_) - tf.add_input(desc, shape_) - tf.add_input(desc, seed_) - tf.add_input(desc, minval_) - tf.add_input(desc, maxval_) - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) + begin + function stateless_random_uniform_int_graph(shape_, seed_, minval_, maxval_; name=nothing, dtype=nothing) + local desc + tf.with_op_name(name, "StatelessRandomUniformInt") do + desc = tf.NodeDescription("StatelessRandomUniformInt") + begin + begin + shape_ = convert(Tensor{Any}, shape_) + begin + end + end + begin + seed_ = convert(Tensor{Int64}, seed_) + begin + end + end + begin + minval_ = convert(Tensor{Any}, minval_) + begin + end + end + begin + maxval_ = convert(Tensor{Any}, maxval_) + begin + end + end + begin + (minval_, maxval_) = tf.tf_promote(minval_, maxval_) + end + begin + (shape_,) = tf.tf_promote(shape_) + end + begin + (seed_,) = tf.tf_promote(seed_) + end + end + begin + begin + tf.add_input(desc, shape_) + end + begin + tf.add_input(desc, seed_) + end + begin + tf.add_input(desc, minval_) + end + begin + tf.add_input(desc, maxval_) + end + end + begin + begin + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function stateless_random_uniform_int_eager(shape_, seed_, minval_, maxval_; name=nothing, dtype=nothing) + desc = tf.EagerOp("StatelessRandomUniformInt") + shape_ = convert(tf.EagerTensor, shape_) + seed_ = convert(tf.EagerTensor, seed_) + minval_ = convert(tf.EagerTensor, minval_) + maxval_ = convert(tf.EagerTensor, maxval_) + begin + begin + tf.add_input(desc, shape_) + end + begin + tf.add_input(desc, seed_) + end + begin + tf.add_input(desc, minval_) + end + begin + tf.add_input(desc, maxval_) + end + end + begin + begin + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + end + begin + desc["T"] = tf.data_type(shape_) + end + begin + desc["Tseed"] = tf.data_type(seed_) + end + begin + desc["dtype"] = tf.data_type(minval_) + end + begin + desc["dtype"] = tf.data_type(maxval_) + end + res = tf.execute(desc) + node = tf.TapeNode(stateless_random_uniform_int, [shape_, seed_, minval_, maxval_], name=nothing, dtype=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function stateless_random_uniform_int(shape_, seed_, minval_, maxval_; name=nothing, dtype=nothing) + if tf.in_eager_mode() + stateless_random_uniform_int_eager(shape_, seed_, minval_, maxval_; name=name, dtype=dtype) + else + stateless_random_uniform_int_graph(shape_, seed_, minval_, maxval_; name=name, dtype=dtype) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function stateless_random_uniform_int_eager(shape_, seed_, minval_, maxval_; name=nothing, dtype=nothing) - desc = tf.EagerOp("StatelessRandomUniformInt") - shape_ = convert(tf.EagerTensor, shape_) - seed_ = convert(tf.EagerTensor, seed_) - minval_ = convert(tf.EagerTensor, minval_) - maxval_ = convert(tf.EagerTensor, maxval_) - tf.add_input(desc, shape_) - tf.add_input(desc, seed_) - tf.add_input(desc, minval_) - tf.add_input(desc, maxval_) - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end - desc["T"] = tf.data_type(shape_) - desc["Tseed"] = tf.data_type(seed_) - desc["dtype"] = tf.data_type(minval_) - desc["dtype"] = tf.data_type(maxval_) - res = tf.execute(desc) - node = tf.TapeNode(stateless_random_uniform_int, [shape_, seed_, minval_, maxval_], name=nothing, dtype=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function stateless_random_uniform_int(shape_, seed_, minval_, maxval_; name=nothing, dtype=nothing) - if tf.in_eager_mode() - stateless_random_uniform_int_eager(shape_, seed_, minval_, maxval_; name=name, dtype=dtype) - else - stateless_random_uniform_int_graph(shape_, seed_, minval_, maxval_; name=name, dtype=dtype) - end - end end @@ -38316,65 +69818,113 @@ end Sends the named tensor from send_device to recv_device. """ begin - function _send_graph(tensor_; name=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing) - local desc - tf.with_op_name(name, "_Send") do - desc = tf.NodeDescription("_Send") - tensor_ = convert(Tensor{Any}, tensor_) - (tensor_,) = tf.tf_promote(tensor_) - tf.add_input(desc, tensor_) - if tensor_name !== nothing - desc["tensor_name"] = Base.String(tensor_name) - end - if send_device !== nothing - desc["send_device"] = Base.String(send_device) - end - if send_device_incarnation !== nothing - desc["send_device_incarnation"] = Base.Int(send_device_incarnation) + begin + function _send_graph(tensor_; name=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing) + local desc + tf.with_op_name(name, "_Send") do + desc = tf.NodeDescription("_Send") + begin + begin + tensor_ = convert(Tensor{Any}, tensor_) + begin + end + end + begin + (tensor_,) = tf.tf_promote(tensor_) + end + end + begin + begin + tf.add_input(desc, tensor_) + end + end + begin + begin + if tensor_name !== nothing + desc["tensor_name"] = Base.String(tensor_name) + end + end + begin + if send_device !== nothing + desc["send_device"] = Base.String(send_device) + end + end + begin + if send_device_incarnation !== nothing + desc["send_device_incarnation"] = Base.Int(send_device_incarnation) + end + end + begin + if recv_device !== nothing + desc["recv_device"] = Base.String(recv_device) + end + end + begin + if client_terminated !== nothing + desc["client_terminated"] = Base.Bool(client_terminated) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function _send_eager(tensor_; name=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing) + desc = tf.EagerOp("_Send") + tensor_ = convert(tf.EagerTensor, tensor_) + begin + begin + tf.add_input(desc, tensor_) + end + end + begin + begin + if tensor_name !== nothing + desc["tensor_name"] = Base.String(tensor_name) + end + end + begin + if send_device !== nothing + desc["send_device"] = Base.String(send_device) + end + end + begin + if send_device_incarnation !== nothing + desc["send_device_incarnation"] = Base.Int(send_device_incarnation) + end + end + begin + if recv_device !== nothing + desc["recv_device"] = Base.String(recv_device) + end + end + begin + if client_terminated !== nothing + desc["client_terminated"] = Base.Bool(client_terminated) + end + end + end + begin + desc["T"] = tf.data_type(tensor_) + end + res = tf.execute(desc) + node = tf.TapeNode(_send, [tensor_], name=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _send(tensor_; name=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing) + if tf.in_eager_mode() + _send_eager(tensor_; name=name, tensor_name=tensor_name, send_device=send_device, send_device_incarnation=send_device_incarnation, recv_device=recv_device, client_terminated=client_terminated) + else + _send_graph(tensor_; name=name, tensor_name=tensor_name, send_device=send_device, send_device_incarnation=send_device_incarnation, recv_device=recv_device, client_terminated=client_terminated) + end end - if recv_device !== nothing - desc["recv_device"] = Base.String(recv_device) - end - if client_terminated !== nothing - desc["client_terminated"] = Base.Bool(client_terminated) - end - end - tf.Tensor(tf.Operation(desc)) - end - function _send_eager(tensor_; name=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing) - desc = tf.EagerOp("_Send") - tensor_ = convert(tf.EagerTensor, tensor_) - tf.add_input(desc, tensor_) - if tensor_name !== nothing - desc["tensor_name"] = Base.String(tensor_name) - end - if send_device !== nothing - desc["send_device"] = Base.String(send_device) - end - if send_device_incarnation !== nothing - desc["send_device_incarnation"] = Base.Int(send_device_incarnation) - end - if recv_device !== nothing - desc["recv_device"] = Base.String(recv_device) - end - if client_terminated !== nothing - desc["client_terminated"] = Base.Bool(client_terminated) - end - desc["T"] = tf.data_type(tensor_) - res = tf.execute(desc) - node = tf.TapeNode(_send, [tensor_], name=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _send(tensor_; name=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing) - if tf.in_eager_mode() - _send_eager(tensor_; name=name, tensor_name=tensor_name, send_device=send_device, send_device_incarnation=send_device_incarnation, recv_device=recv_device, client_terminated=client_terminated) - else - _send_graph(tensor_; name=name, tensor_name=tensor_name, send_device=send_device, send_device_incarnation=send_device_incarnation, recv_device=recv_device, client_terminated=client_terminated) - end - end end @@ -38384,69 +69934,133 @@ end Load embedding parameters for a single table. """ begin - function load_tpu_embedding_adadelta_parameters_grad_accum_debug_graph(parameters_, accumulators_, updates_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - local desc - tf.with_op_name(name, "LoadTPUEmbeddingAdadeltaParametersGradAccumDebug") do - desc = tf.NodeDescription("LoadTPUEmbeddingAdadeltaParametersGradAccumDebug") - parameters_ = convert(Tensor{Float32}, parameters_) - accumulators_ = convert(Tensor{Float32}, accumulators_) - updates_ = convert(Tensor{Float32}, updates_) - gradient_accumulators_ = convert(Tensor{Float32}, gradient_accumulators_) - tf.add_input(desc, parameters_) - tf.add_input(desc, accumulators_) - tf.add_input(desc, updates_) - tf.add_input(desc, gradient_accumulators_) - if table_id !== nothing - desc["table_id"] = Base.Int(table_id) + begin + function load_tpu_embedding_adadelta_parameters_grad_accum_debug_graph(parameters_, accumulators_, updates_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + local desc + tf.with_op_name(name, "LoadTPUEmbeddingAdadeltaParametersGradAccumDebug") do + desc = tf.NodeDescription("LoadTPUEmbeddingAdadeltaParametersGradAccumDebug") + begin + begin + parameters_ = convert(Tensor{Float32}, parameters_) + begin + end + end + begin + accumulators_ = convert(Tensor{Float32}, accumulators_) + begin + end + end + begin + updates_ = convert(Tensor{Float32}, updates_) + begin + end + end + begin + gradient_accumulators_ = convert(Tensor{Float32}, gradient_accumulators_) + begin + end + end + end + begin + begin + tf.add_input(desc, parameters_) + end + begin + tf.add_input(desc, accumulators_) + end + begin + tf.add_input(desc, updates_) + end + begin + tf.add_input(desc, gradient_accumulators_) + end + end + begin + begin + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + end + begin + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + end + begin + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + end + begin + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function load_tpu_embedding_adadelta_parameters_grad_accum_debug_eager(parameters_, accumulators_, updates_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + desc = tf.EagerOp("LoadTPUEmbeddingAdadeltaParametersGradAccumDebug") + parameters_ = convert(tf.EagerTensor, parameters_) + accumulators_ = convert(tf.EagerTensor, accumulators_) + updates_ = convert(tf.EagerTensor, updates_) + gradient_accumulators_ = convert(tf.EagerTensor, gradient_accumulators_) + begin + begin + tf.add_input(desc, parameters_) + end + begin + tf.add_input(desc, accumulators_) + end + begin + tf.add_input(desc, updates_) + end + begin + tf.add_input(desc, gradient_accumulators_) + end + end + begin + begin + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + end + begin + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + end + begin + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + end + begin + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(load_tpu_embedding_adadelta_parameters_grad_accum_debug, [parameters_, accumulators_, updates_, gradient_accumulators_], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function load_tpu_embedding_adadelta_parameters_grad_accum_debug(parameters_, accumulators_, updates_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + if tf.in_eager_mode() + load_tpu_embedding_adadelta_parameters_grad_accum_debug_eager(parameters_, accumulators_, updates_, gradient_accumulators_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + else + load_tpu_embedding_adadelta_parameters_grad_accum_debug_graph(parameters_, accumulators_, updates_, gradient_accumulators_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + end end - if table_name !== nothing - desc["table_name"] = Base.String(table_name) - end - if num_shards !== nothing - desc["num_shards"] = Base.Int(num_shards) - end - if shard_id !== nothing - desc["shard_id"] = Base.Int(shard_id) - end - end - tf.Tensor(tf.Operation(desc)) end - function load_tpu_embedding_adadelta_parameters_grad_accum_debug_eager(parameters_, accumulators_, updates_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - desc = tf.EagerOp("LoadTPUEmbeddingAdadeltaParametersGradAccumDebug") - parameters_ = convert(tf.EagerTensor, parameters_) - accumulators_ = convert(tf.EagerTensor, accumulators_) - updates_ = convert(tf.EagerTensor, updates_) - gradient_accumulators_ = convert(tf.EagerTensor, gradient_accumulators_) - tf.add_input(desc, parameters_) - tf.add_input(desc, accumulators_) - tf.add_input(desc, updates_) - tf.add_input(desc, gradient_accumulators_) - if table_id !== nothing - desc["table_id"] = Base.Int(table_id) - end - if table_name !== nothing - desc["table_name"] = Base.String(table_name) - end - if num_shards !== nothing - desc["num_shards"] = Base.Int(num_shards) - end - if shard_id !== nothing - desc["shard_id"] = Base.Int(shard_id) - end - res = tf.execute(desc) - node = tf.TapeNode(load_tpu_embedding_adadelta_parameters_grad_accum_debug, [parameters_, accumulators_, updates_, gradient_accumulators_], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function load_tpu_embedding_adadelta_parameters_grad_accum_debug(parameters_, accumulators_, updates_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - if tf.in_eager_mode() - load_tpu_embedding_adadelta_parameters_grad_accum_debug_eager(parameters_, accumulators_, updates_, gradient_accumulators_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) - else - load_tpu_embedding_adadelta_parameters_grad_accum_debug_graph(parameters_, accumulators_, updates_, gradient_accumulators_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) - end - end end @@ -38456,67 +70070,119 @@ end """ begin - function map_peek_graph(key_, indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) - local desc - tf.with_op_name(name, "MapPeek") do - desc = tf.NodeDescription("MapPeek") - key_ = convert(Tensor{Int64}, key_) - indices_ = convert(Tensor{Int32}, indices_) - tf.add_input(desc, key_) - tf.add_input(desc, indices_) - if capacity !== nothing - desc["capacity"] = Base.Int(capacity) + begin + function map_peek_graph(key_, indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "MapPeek") do + desc = tf.NodeDescription("MapPeek") + begin + begin + key_ = convert(Tensor{Int64}, key_) + begin + end + end + begin + indices_ = convert(Tensor{Int32}, indices_) + begin + end + end + end + begin + begin + tf.add_input(desc, key_) + end + begin + tf.add_input(desc, indices_) + end + end + begin + begin + if capacity !== nothing + desc["capacity"] = Base.Int(capacity) + end + end + begin + if memory_limit !== nothing + desc["memory_limit"] = Base.Int(memory_limit) + end + end + begin + if dtypes !== nothing + desc["dtypes"] = map(Base.identity, dtypes) + end + end + begin + if container !== nothing + desc["container"] = Base.String(container) + end + end + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function map_peek_eager(key_, indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + desc = tf.EagerOp("MapPeek") + key_ = convert(tf.EagerTensor, key_) + indices_ = convert(tf.EagerTensor, indices_) + begin + begin + tf.add_input(desc, key_) + end + begin + tf.add_input(desc, indices_) + end + end + begin + begin + if capacity !== nothing + desc["capacity"] = Base.Int(capacity) + end + end + begin + if memory_limit !== nothing + desc["memory_limit"] = Base.Int(memory_limit) + end + end + begin + if dtypes !== nothing + desc["dtypes"] = map(Base.identity, dtypes) + end + end + begin + if container !== nothing + desc["container"] = Base.String(container) + end + end + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(map_peek, [key_, indices_], name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function map_peek(key_, indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + if tf.in_eager_mode() + map_peek_eager(key_, indices_; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) + else + map_peek_graph(key_, indices_; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) + end end - if memory_limit !== nothing - desc["memory_limit"] = Base.Int(memory_limit) - end - if dtypes !== nothing - desc["dtypes"] = map(Base.identity, dtypes) - end - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - end - tf.Tensor(tf.Operation(desc)) - end - function map_peek_eager(key_, indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) - desc = tf.EagerOp("MapPeek") - key_ = convert(tf.EagerTensor, key_) - indices_ = convert(tf.EagerTensor, indices_) - tf.add_input(desc, key_) - tf.add_input(desc, indices_) - if capacity !== nothing - desc["capacity"] = Base.Int(capacity) - end - if memory_limit !== nothing - desc["memory_limit"] = Base.Int(memory_limit) - end - if dtypes !== nothing - desc["dtypes"] = map(Base.identity, dtypes) - end - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - res = tf.execute(desc) - node = tf.TapeNode(map_peek, [key_, indices_], name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function map_peek(key_, indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) - if tf.in_eager_mode() - map_peek_eager(key_, indices_; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) - else - map_peek_graph(key_, indices_; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) - end - end end @@ -38526,47 +70192,99 @@ end """ begin - function write_scalar_summary_graph(writer_, step_, tag_, value_; name=nothing) - local desc - tf.with_op_name(name, "WriteScalarSummary") do - desc = tf.NodeDescription("WriteScalarSummary") - writer_ = convert(Tensor{Any}, writer_) - step_ = convert(Tensor{Int64}, step_) - tag_ = convert(Tensor{String}, tag_) - value_ = convert(Tensor{Any}, value_) - (value_,) = tf.tf_promote(value_) - tf.add_input(desc, writer_) - tf.add_input(desc, step_) - tf.add_input(desc, tag_) - tf.add_input(desc, value_) - end - tf.Tensor(tf.Operation(desc)) - end - function write_scalar_summary_eager(writer_, step_, tag_, value_; name=nothing) - desc = tf.EagerOp("WriteScalarSummary") - writer_ = convert(tf.EagerTensor, writer_) - step_ = convert(tf.EagerTensor, step_) - tag_ = convert(tf.EagerTensor, tag_) - value_ = convert(tf.EagerTensor, value_) - tf.add_input(desc, writer_) - tf.add_input(desc, step_) - tf.add_input(desc, tag_) - tf.add_input(desc, value_) - desc["T"] = tf.data_type(value_) - res = tf.execute(desc) - node = tf.TapeNode(write_scalar_summary, [writer_, step_, tag_, value_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function write_scalar_summary(writer_, step_, tag_, value_; name=nothing) - if tf.in_eager_mode() - write_scalar_summary_eager(writer_, step_, tag_, value_; name=name) - else - write_scalar_summary_graph(writer_, step_, tag_, value_; name=name) + begin + function write_scalar_summary_graph(writer_, step_, tag_, value_; name=nothing) + local desc + tf.with_op_name(name, "WriteScalarSummary") do + desc = tf.NodeDescription("WriteScalarSummary") + begin + begin + writer_ = convert(Tensor{Any}, writer_) + begin + end + end + begin + step_ = convert(Tensor{Int64}, step_) + begin + end + end + begin + tag_ = convert(Tensor{String}, tag_) + begin + end + end + begin + value_ = convert(Tensor{Any}, value_) + begin + end + end + begin + (value_,) = tf.tf_promote(value_) + end + end + begin + begin + tf.add_input(desc, writer_) + end + begin + tf.add_input(desc, step_) + end + begin + tf.add_input(desc, tag_) + end + begin + tf.add_input(desc, value_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function write_scalar_summary_eager(writer_, step_, tag_, value_; name=nothing) + desc = tf.EagerOp("WriteScalarSummary") + writer_ = convert(tf.EagerTensor, writer_) + step_ = convert(tf.EagerTensor, step_) + tag_ = convert(tf.EagerTensor, tag_) + value_ = convert(tf.EagerTensor, value_) + begin + begin + tf.add_input(desc, writer_) + end + begin + tf.add_input(desc, step_) + end + begin + tf.add_input(desc, tag_) + end + begin + tf.add_input(desc, value_) + end + end + begin + end + begin + desc["T"] = tf.data_type(value_) + end + res = tf.execute(desc) + node = tf.TapeNode(write_scalar_summary, [writer_, step_, tag_, value_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function write_scalar_summary(writer_, step_, tag_, value_; name=nothing) + if tf.in_eager_mode() + write_scalar_summary_eager(writer_, step_, tag_, value_; name=name) + else + write_scalar_summary_graph(writer_, step_, tag_, value_; name=name) + end end - end + end end @@ -38576,68 +70294,114 @@ end """ begin - function ordered_map_unstage_no_key_graph(indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) - local desc - tf.with_op_name(name, "OrderedMapUnstageNoKey") do - desc = tf.NodeDescription("OrderedMapUnstageNoKey") - indices_ = convert(Tensor{Int32}, indices_) - tf.add_input(desc, indices_) - if capacity !== nothing - desc["capacity"] = Base.Int(capacity) - end - if memory_limit !== nothing - desc["memory_limit"] = Base.Int(memory_limit) - end - if dtypes !== nothing - desc["dtypes"] = map(Base.identity, dtypes) - end - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) + begin + function ordered_map_unstage_no_key_graph(indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "OrderedMapUnstageNoKey") do + desc = tf.NodeDescription("OrderedMapUnstageNoKey") + begin + begin + indices_ = convert(Tensor{Int32}, indices_) + begin + end + end + end + begin + begin + tf.add_input(desc, indices_) + end + end + begin + begin + if capacity !== nothing + desc["capacity"] = Base.Int(capacity) + end + end + begin + if memory_limit !== nothing + desc["memory_limit"] = Base.Int(memory_limit) + end + end + begin + if dtypes !== nothing + desc["dtypes"] = map(Base.identity, dtypes) + end + end + begin + if container !== nothing + desc["container"] = Base.String(container) + end + end + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + end + end + begin + function ordered_map_unstage_no_key_eager(indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + desc = tf.EagerOp("OrderedMapUnstageNoKey") + indices_ = convert(tf.EagerTensor, indices_) + begin + begin + tf.add_input(desc, indices_) + end + end + begin + begin + if capacity !== nothing + desc["capacity"] = Base.Int(capacity) + end + end + begin + if memory_limit !== nothing + desc["memory_limit"] = Base.Int(memory_limit) + end + end + begin + if dtypes !== nothing + desc["dtypes"] = map(Base.identity, dtypes) + end + end + begin + if container !== nothing + desc["container"] = Base.String(container) + end + end + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(ordered_map_unstage_no_key, [indices_], name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function ordered_map_unstage_no_key(indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + if tf.in_eager_mode() + ordered_map_unstage_no_key_eager(indices_; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) + else + ordered_map_unstage_no_key_graph(indices_; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) + end end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:2 - push!(out, tf.Tensor(op, out_idx)) - end - out - end - function ordered_map_unstage_no_key_eager(indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) - desc = tf.EagerOp("OrderedMapUnstageNoKey") - indices_ = convert(tf.EagerTensor, indices_) - tf.add_input(desc, indices_) - if capacity !== nothing - desc["capacity"] = Base.Int(capacity) - end - if memory_limit !== nothing - desc["memory_limit"] = Base.Int(memory_limit) - end - if dtypes !== nothing - desc["dtypes"] = map(Base.identity, dtypes) - end - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - res = tf.execute(desc) - node = tf.TapeNode(ordered_map_unstage_no_key, [indices_], name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function ordered_map_unstage_no_key(indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) - if tf.in_eager_mode() - ordered_map_unstage_no_key_eager(indices_; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) - else - ordered_map_unstage_no_key_graph(indices_; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) - end - end end @@ -38647,88 +70411,212 @@ end """ begin - function sparse_apply_centered_rms_prop_graph(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) - local desc - tf.with_op_name(name, "SparseApplyCenteredRMSProp") do - desc = tf.NodeDescription("SparseApplyCenteredRMSProp") - var_ = convert(Tensor{Any}, var_) - mg_ = convert(Tensor{Any}, mg_) - ms_ = convert(Tensor{Any}, ms_) - mom_ = convert(Tensor{Any}, mom_) - lr_ = convert(Tensor{Any}, lr_) - rho_ = convert(Tensor{Any}, rho_) - momentum_ = convert(Tensor{Any}, momentum_) - epsilon_ = convert(Tensor{Any}, epsilon_) - grad_ = convert(Tensor{Any}, grad_) - indices_ = convert(Tensor{Any}, indices_) - indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) - (var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_) = tf.tf_promote(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_) - (indices_,) = tf.tf_promote(indices_) - tf.add_input(desc, var_) - tf.add_input(desc, mg_) - tf.add_input(desc, ms_) - tf.add_input(desc, mom_) - tf.add_input(desc, lr_) - tf.add_input(desc, rho_) - tf.add_input(desc, momentum_) - tf.add_input(desc, epsilon_) - tf.add_input(desc, grad_) - tf.add_input(desc, indices_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - end - tf.Tensor(tf.Operation(desc)) - end - function sparse_apply_centered_rms_prop_eager(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) - desc = tf.EagerOp("SparseApplyCenteredRMSProp") - var_ = convert(tf.EagerTensor, var_) - mg_ = convert(tf.EagerTensor, mg_) - ms_ = convert(tf.EagerTensor, ms_) - mom_ = convert(tf.EagerTensor, mom_) - lr_ = convert(tf.EagerTensor, lr_) - rho_ = convert(tf.EagerTensor, rho_) - momentum_ = convert(tf.EagerTensor, momentum_) - epsilon_ = convert(tf.EagerTensor, epsilon_) - grad_ = convert(tf.EagerTensor, grad_) - indices_ = convert(tf.EagerTensor, indices_) - tf.add_input(desc, var_) - tf.add_input(desc, mg_) - tf.add_input(desc, ms_) - tf.add_input(desc, mom_) - tf.add_input(desc, lr_) - tf.add_input(desc, rho_) - tf.add_input(desc, momentum_) - tf.add_input(desc, epsilon_) - tf.add_input(desc, grad_) - tf.add_input(desc, indices_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - desc["T"] = tf.data_type(var_) - desc["T"] = tf.data_type(mg_) - desc["T"] = tf.data_type(ms_) - desc["T"] = tf.data_type(mom_) - desc["T"] = tf.data_type(lr_) - desc["T"] = tf.data_type(rho_) - desc["T"] = tf.data_type(momentum_) - desc["T"] = tf.data_type(epsilon_) - desc["T"] = tf.data_type(grad_) - desc["Tindices"] = tf.data_type(indices_) - res = tf.execute(desc) - node = tf.TapeNode(sparse_apply_centered_rms_prop, [var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_], name=nothing, use_locking=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_apply_centered_rms_prop(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) - if tf.in_eager_mode() - sparse_apply_centered_rms_prop_eager(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=name, use_locking=use_locking) - else - sparse_apply_centered_rms_prop_graph(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=name, use_locking=use_locking) + begin + function sparse_apply_centered_rms_prop_graph(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "SparseApplyCenteredRMSProp") do + desc = tf.NodeDescription("SparseApplyCenteredRMSProp") + begin + begin + var_ = convert(Tensor{Any}, var_) + begin + end + end + begin + mg_ = convert(Tensor{Any}, mg_) + begin + end + end + begin + ms_ = convert(Tensor{Any}, ms_) + begin + end + end + begin + mom_ = convert(Tensor{Any}, mom_) + begin + end + end + begin + lr_ = convert(Tensor{Any}, lr_) + begin + end + end + begin + rho_ = convert(Tensor{Any}, rho_) + begin + end + end + begin + momentum_ = convert(Tensor{Any}, momentum_) + begin + end + end + begin + epsilon_ = convert(Tensor{Any}, epsilon_) + begin + end + end + begin + grad_ = convert(Tensor{Any}, grad_) + begin + end + end + begin + indices_ = convert(Tensor{Any}, indices_) + begin + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + end + end + begin + (var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_) = tf.tf_promote(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_) + end + begin + (indices_,) = tf.tf_promote(indices_) + end + end + begin + begin + tf.add_input(desc, var_) + end + begin + tf.add_input(desc, mg_) + end + begin + tf.add_input(desc, ms_) + end + begin + tf.add_input(desc, mom_) + end + begin + tf.add_input(desc, lr_) + end + begin + tf.add_input(desc, rho_) + end + begin + tf.add_input(desc, momentum_) + end + begin + tf.add_input(desc, epsilon_) + end + begin + tf.add_input(desc, grad_) + end + begin + tf.add_input(desc, indices_) + end + end + begin + begin + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function sparse_apply_centered_rms_prop_eager(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) + desc = tf.EagerOp("SparseApplyCenteredRMSProp") + var_ = convert(tf.EagerTensor, var_) + mg_ = convert(tf.EagerTensor, mg_) + ms_ = convert(tf.EagerTensor, ms_) + mom_ = convert(tf.EagerTensor, mom_) + lr_ = convert(tf.EagerTensor, lr_) + rho_ = convert(tf.EagerTensor, rho_) + momentum_ = convert(tf.EagerTensor, momentum_) + epsilon_ = convert(tf.EagerTensor, epsilon_) + grad_ = convert(tf.EagerTensor, grad_) + indices_ = convert(tf.EagerTensor, indices_) + begin + begin + tf.add_input(desc, var_) + end + begin + tf.add_input(desc, mg_) + end + begin + tf.add_input(desc, ms_) + end + begin + tf.add_input(desc, mom_) + end + begin + tf.add_input(desc, lr_) + end + begin + tf.add_input(desc, rho_) + end + begin + tf.add_input(desc, momentum_) + end + begin + tf.add_input(desc, epsilon_) + end + begin + tf.add_input(desc, grad_) + end + begin + tf.add_input(desc, indices_) + end + end + begin + begin + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + end + begin + desc["T"] = tf.data_type(var_) + end + begin + desc["T"] = tf.data_type(mg_) + end + begin + desc["T"] = tf.data_type(ms_) + end + begin + desc["T"] = tf.data_type(mom_) + end + begin + desc["T"] = tf.data_type(lr_) + end + begin + desc["T"] = tf.data_type(rho_) + end + begin + desc["T"] = tf.data_type(momentum_) + end + begin + desc["T"] = tf.data_type(epsilon_) + end + begin + desc["T"] = tf.data_type(grad_) + end + begin + desc["Tindices"] = tf.data_type(indices_) + end + res = tf.execute(desc) + node = tf.TapeNode(sparse_apply_centered_rms_prop, [var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_], name=nothing, use_locking=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_apply_centered_rms_prop(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) + if tf.in_eager_mode() + sparse_apply_centered_rms_prop_eager(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=name, use_locking=use_locking) + else + sparse_apply_centered_rms_prop_graph(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=name, use_locking=use_locking) + end end - end + end end @@ -38738,70 +70626,136 @@ end """ begin - function conv3d_backprop_input_v2_graph(input_sizes_, filter_, out_backprop_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) - local desc - tf.with_op_name(name, "Conv3DBackpropInputV2") do - desc = tf.NodeDescription("Conv3DBackpropInputV2") - input_sizes_ = convert(Tensor{Int32}, input_sizes_) - filter_ = convert(Tensor{Any}, filter_) - out_backprop_ = convert(Tensor{Any}, out_backprop_) - (filter_, out_backprop_) = tf.tf_promote(filter_, out_backprop_) - (input_sizes_,) = tf.tf_promote(input_sizes_) - tf.add_input(desc, input_sizes_) - tf.add_input(desc, filter_) - tf.add_input(desc, out_backprop_) - if strides !== nothing - desc["strides"] = map(Base.identity, strides) - end - if padding !== nothing - desc["padding"] = Base.String(padding) - end - if data_format !== nothing - desc["data_format"] = Base.String(data_format) - end - if dilations !== nothing - desc["dilations"] = map(Base.identity, dilations) + begin + function conv3d_backprop_input_v2_graph(input_sizes_, filter_, out_backprop_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) + local desc + tf.with_op_name(name, "Conv3DBackpropInputV2") do + desc = tf.NodeDescription("Conv3DBackpropInputV2") + begin + begin + input_sizes_ = convert(Tensor{Int32}, input_sizes_) + begin + end + end + begin + filter_ = convert(Tensor{Any}, filter_) + begin + end + end + begin + out_backprop_ = convert(Tensor{Any}, out_backprop_) + begin + end + end + begin + (filter_, out_backprop_) = tf.tf_promote(filter_, out_backprop_) + end + begin + (input_sizes_,) = tf.tf_promote(input_sizes_) + end + end + begin + begin + tf.add_input(desc, input_sizes_) + end + begin + tf.add_input(desc, filter_) + end + begin + tf.add_input(desc, out_backprop_) + end + end + begin + begin + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + end + begin + if padding !== nothing + desc["padding"] = Base.String(padding) + end + end + begin + if data_format !== nothing + desc["data_format"] = Base.String(data_format) + end + end + begin + if dilations !== nothing + desc["dilations"] = map(Base.identity, dilations) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function conv3d_backprop_input_v2_eager(input_sizes_, filter_, out_backprop_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) + desc = tf.EagerOp("Conv3DBackpropInputV2") + input_sizes_ = convert(tf.EagerTensor, input_sizes_) + filter_ = convert(tf.EagerTensor, filter_) + out_backprop_ = convert(tf.EagerTensor, out_backprop_) + begin + begin + tf.add_input(desc, input_sizes_) + end + begin + tf.add_input(desc, filter_) + end + begin + tf.add_input(desc, out_backprop_) + end + end + begin + begin + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + end + begin + if padding !== nothing + desc["padding"] = Base.String(padding) + end + end + begin + if data_format !== nothing + desc["data_format"] = Base.String(data_format) + end + end + begin + if dilations !== nothing + desc["dilations"] = map(Base.identity, dilations) + end + end + end + begin + desc["Tshape"] = tf.data_type(input_sizes_) + end + begin + desc["T"] = tf.data_type(filter_) + end + begin + desc["T"] = tf.data_type(out_backprop_) + end + res = tf.execute(desc) + node = tf.TapeNode(conv3d_backprop_input_v2, [input_sizes_, filter_, out_backprop_], name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function conv3d_backprop_input_v2(input_sizes_, filter_, out_backprop_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) + if tf.in_eager_mode() + conv3d_backprop_input_v2_eager(input_sizes_, filter_, out_backprop_; name=name, strides=strides, padding=padding, data_format=data_format, dilations=dilations) + else + conv3d_backprop_input_v2_graph(input_sizes_, filter_, out_backprop_; name=name, strides=strides, padding=padding, data_format=data_format, dilations=dilations) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function conv3d_backprop_input_v2_eager(input_sizes_, filter_, out_backprop_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) - desc = tf.EagerOp("Conv3DBackpropInputV2") - input_sizes_ = convert(tf.EagerTensor, input_sizes_) - filter_ = convert(tf.EagerTensor, filter_) - out_backprop_ = convert(tf.EagerTensor, out_backprop_) - tf.add_input(desc, input_sizes_) - tf.add_input(desc, filter_) - tf.add_input(desc, out_backprop_) - if strides !== nothing - desc["strides"] = map(Base.identity, strides) - end - if padding !== nothing - desc["padding"] = Base.String(padding) - end - if data_format !== nothing - desc["data_format"] = Base.String(data_format) - end - if dilations !== nothing - desc["dilations"] = map(Base.identity, dilations) - end - desc["Tshape"] = tf.data_type(input_sizes_) - desc["T"] = tf.data_type(filter_) - desc["T"] = tf.data_type(out_backprop_) - res = tf.execute(desc) - node = tf.TapeNode(conv3d_backprop_input_v2, [input_sizes_, filter_, out_backprop_], name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function conv3d_backprop_input_v2(input_sizes_, filter_, out_backprop_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) - if tf.in_eager_mode() - conv3d_backprop_input_v2_eager(input_sizes_, filter_, out_backprop_; name=name, strides=strides, padding=padding, data_format=data_format, dilations=dilations) - else - conv3d_backprop_input_v2_graph(input_sizes_, filter_, out_backprop_; name=name, strides=strides, padding=padding, data_format=data_format, dilations=dilations) - end - end end @@ -38811,58 +70765,92 @@ end Retrieve embedding parameters for a single table. """ begin - function retrieve_tpu_embedding_proximal_adagrad_parameters_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - local desc - tf.with_op_name(name, "RetrieveTPUEmbeddingProximalAdagradParameters") do - desc = tf.NodeDescription("RetrieveTPUEmbeddingProximalAdagradParameters") - if table_id !== nothing - desc["table_id"] = Base.Int(table_id) - end - if table_name !== nothing - desc["table_name"] = Base.String(table_name) - end - if num_shards !== nothing - desc["num_shards"] = Base.Int(num_shards) - end - if shard_id !== nothing - desc["shard_id"] = Base.Int(shard_id) + begin + function retrieve_tpu_embedding_proximal_adagrad_parameters_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + local desc + tf.with_op_name(name, "RetrieveTPUEmbeddingProximalAdagradParameters") do + desc = tf.NodeDescription("RetrieveTPUEmbeddingProximalAdagradParameters") + begin + end + begin + end + begin + begin + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + end + begin + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + end + begin + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + end + begin + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + end + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + end + end + begin + function retrieve_tpu_embedding_proximal_adagrad_parameters_eager(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + desc = tf.EagerOp("RetrieveTPUEmbeddingProximalAdagradParameters") + begin + end + begin + begin + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + end + begin + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + end + begin + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + end + begin + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(retrieve_tpu_embedding_proximal_adagrad_parameters, [], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function retrieve_tpu_embedding_proximal_adagrad_parameters(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + if tf.in_eager_mode() + retrieve_tpu_embedding_proximal_adagrad_parameters_eager(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + else + retrieve_tpu_embedding_proximal_adagrad_parameters_graph(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + end end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:2 - push!(out, tf.Tensor(op, out_idx)) - end - out - end - function retrieve_tpu_embedding_proximal_adagrad_parameters_eager(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - desc = tf.EagerOp("RetrieveTPUEmbeddingProximalAdagradParameters") - if table_id !== nothing - desc["table_id"] = Base.Int(table_id) - end - if table_name !== nothing - desc["table_name"] = Base.String(table_name) - end - if num_shards !== nothing - desc["num_shards"] = Base.Int(num_shards) - end - if shard_id !== nothing - desc["shard_id"] = Base.Int(shard_id) - end - res = tf.execute(desc) - node = tf.TapeNode(retrieve_tpu_embedding_proximal_adagrad_parameters, [], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function retrieve_tpu_embedding_proximal_adagrad_parameters(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - if tf.in_eager_mode() - retrieve_tpu_embedding_proximal_adagrad_parameters_eager(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) - else - retrieve_tpu_embedding_proximal_adagrad_parameters_graph(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) - end - end end @@ -38872,47 +70860,83 @@ end """ begin - function random_shuffle_graph(value_; name=nothing, seed=nothing, seed2=nothing) - local desc - tf.with_op_name(name, "RandomShuffle") do - desc = tf.NodeDescription("RandomShuffle") - value_ = convert(Tensor{Any}, value_) - (value_,) = tf.tf_promote(value_) - tf.add_input(desc, value_) - if seed !== nothing - desc["seed"] = Base.Int(seed) + begin + function random_shuffle_graph(value_; name=nothing, seed=nothing, seed2=nothing) + local desc + tf.with_op_name(name, "RandomShuffle") do + desc = tf.NodeDescription("RandomShuffle") + begin + begin + value_ = convert(Tensor{Any}, value_) + begin + end + end + begin + (value_,) = tf.tf_promote(value_) + end + end + begin + begin + tf.add_input(desc, value_) + end + end + begin + begin + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + end + begin + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function random_shuffle_eager(value_; name=nothing, seed=nothing, seed2=nothing) + desc = tf.EagerOp("RandomShuffle") + value_ = convert(tf.EagerTensor, value_) + begin + begin + tf.add_input(desc, value_) + end + end + begin + begin + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + end + begin + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end + end + end + begin + desc["T"] = tf.data_type(value_) + end + res = tf.execute(desc) + node = tf.TapeNode(random_shuffle, [value_], name=nothing, seed=nothing, seed2=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function random_shuffle(value_; name=nothing, seed=nothing, seed2=nothing) + if tf.in_eager_mode() + random_shuffle_eager(value_; name=name, seed=seed, seed2=seed2) + else + random_shuffle_graph(value_; name=name, seed=seed, seed2=seed2) + end end - if seed2 !== nothing - desc["seed2"] = Base.Int(seed2) - end - end - tf.Tensor(tf.Operation(desc)) end - function random_shuffle_eager(value_; name=nothing, seed=nothing, seed2=nothing) - desc = tf.EagerOp("RandomShuffle") - value_ = convert(tf.EagerTensor, value_) - tf.add_input(desc, value_) - if seed !== nothing - desc["seed"] = Base.Int(seed) - end - if seed2 !== nothing - desc["seed2"] = Base.Int(seed2) - end - desc["T"] = tf.data_type(value_) - res = tf.execute(desc) - node = tf.TapeNode(random_shuffle, [value_], name=nothing, seed=nothing, seed2=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function random_shuffle(value_; name=nothing, seed=nothing, seed2=nothing) - if tf.in_eager_mode() - random_shuffle_eager(value_; name=name, seed=seed, seed2=seed2) - else - random_shuffle_graph(value_; name=name, seed=seed, seed2=seed2) - end - end end @@ -38922,74 +70946,124 @@ end """ begin - function uniform_candidate_sampler_graph(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing) - local desc - tf.with_op_name(name, "UniformCandidateSampler") do - desc = tf.NodeDescription("UniformCandidateSampler") - true_classes_ = convert(Tensor{Int64}, true_classes_) - tf.add_input(desc, true_classes_) - if num_true !== nothing - desc["num_true"] = Base.Int(num_true) - end - if num_sampled !== nothing - desc["num_sampled"] = Base.Int(num_sampled) - end - if unique !== nothing - desc["unique"] = Base.Bool(unique) + begin + function uniform_candidate_sampler_graph(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing) + local desc + tf.with_op_name(name, "UniformCandidateSampler") do + desc = tf.NodeDescription("UniformCandidateSampler") + begin + begin + true_classes_ = convert(Tensor{Int64}, true_classes_) + begin + end + end + end + begin + begin + tf.add_input(desc, true_classes_) + end + end + begin + begin + if num_true !== nothing + desc["num_true"] = Base.Int(num_true) + end + end + begin + if num_sampled !== nothing + desc["num_sampled"] = Base.Int(num_sampled) + end + end + begin + if unique !== nothing + desc["unique"] = Base.Bool(unique) + end + end + begin + if range_max !== nothing + desc["range_max"] = Base.Int(range_max) + end + end + begin + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + end + begin + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end + end + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + end + end + begin + function uniform_candidate_sampler_eager(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing) + desc = tf.EagerOp("UniformCandidateSampler") + true_classes_ = convert(tf.EagerTensor, true_classes_) + begin + begin + tf.add_input(desc, true_classes_) + end + end + begin + begin + if num_true !== nothing + desc["num_true"] = Base.Int(num_true) + end + end + begin + if num_sampled !== nothing + desc["num_sampled"] = Base.Int(num_sampled) + end + end + begin + if unique !== nothing + desc["unique"] = Base.Bool(unique) + end + end + begin + if range_max !== nothing + desc["range_max"] = Base.Int(range_max) + end + end + begin + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + end + begin + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(uniform_candidate_sampler, [true_classes_], name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function uniform_candidate_sampler(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing) + if tf.in_eager_mode() + uniform_candidate_sampler_eager(true_classes_; name=name, num_true=num_true, num_sampled=num_sampled, unique=unique, range_max=range_max, seed=seed, seed2=seed2) + else + uniform_candidate_sampler_graph(true_classes_; name=name, num_true=num_true, num_sampled=num_sampled, unique=unique, range_max=range_max, seed=seed, seed2=seed2) + end end - if range_max !== nothing - desc["range_max"] = Base.Int(range_max) - end - if seed !== nothing - desc["seed"] = Base.Int(seed) - end - if seed2 !== nothing - desc["seed2"] = Base.Int(seed2) - end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:3 - push!(out, tf.Tensor(op, out_idx)) - end - out - end - function uniform_candidate_sampler_eager(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing) - desc = tf.EagerOp("UniformCandidateSampler") - true_classes_ = convert(tf.EagerTensor, true_classes_) - tf.add_input(desc, true_classes_) - if num_true !== nothing - desc["num_true"] = Base.Int(num_true) - end - if num_sampled !== nothing - desc["num_sampled"] = Base.Int(num_sampled) - end - if unique !== nothing - desc["unique"] = Base.Bool(unique) - end - if range_max !== nothing - desc["range_max"] = Base.Int(range_max) - end - if seed !== nothing - desc["seed"] = Base.Int(seed) - end - if seed2 !== nothing - desc["seed2"] = Base.Int(seed2) - end - res = tf.execute(desc) - node = tf.TapeNode(uniform_candidate_sampler, [true_classes_], name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function uniform_candidate_sampler(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing) - if tf.in_eager_mode() - uniform_candidate_sampler_eager(true_classes_; name=name, num_true=num_true, num_sampled=num_sampled, unique=unique, range_max=range_max, seed=seed, seed2=seed2) - else - uniform_candidate_sampler_graph(true_classes_; name=name, num_true=num_true, num_sampled=num_sampled, unique=unique, range_max=range_max, seed=seed, seed2=seed2) - end - end end @@ -38999,47 +71073,99 @@ end """ begin - function tensor_array_split_v2_graph(handle_, value_, lengths_, flow_in_; name=nothing) - local desc - tf.with_op_name(name, "TensorArraySplitV2") do - desc = tf.NodeDescription("TensorArraySplitV2") - handle_ = convert(Tensor{String}, handle_) - value_ = convert(Tensor{Any}, value_) - lengths_ = convert(Tensor{Int64}, lengths_) - flow_in_ = convert(Tensor{Float32}, flow_in_) - (value_,) = tf.tf_promote(value_) - tf.add_input(desc, handle_) - tf.add_input(desc, value_) - tf.add_input(desc, lengths_) - tf.add_input(desc, flow_in_) - end - tf.Tensor(tf.Operation(desc)) - end - function tensor_array_split_v2_eager(handle_, value_, lengths_, flow_in_; name=nothing) - desc = tf.EagerOp("TensorArraySplitV2") - handle_ = convert(tf.EagerTensor, handle_) - value_ = convert(tf.EagerTensor, value_) - lengths_ = convert(tf.EagerTensor, lengths_) - flow_in_ = convert(tf.EagerTensor, flow_in_) - tf.add_input(desc, handle_) - tf.add_input(desc, value_) - tf.add_input(desc, lengths_) - tf.add_input(desc, flow_in_) - desc["T"] = tf.data_type(value_) - res = tf.execute(desc) - node = tf.TapeNode(tensor_array_split_v2, [handle_, value_, lengths_, flow_in_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_array_split_v2(handle_, value_, lengths_, flow_in_; name=nothing) - if tf.in_eager_mode() - tensor_array_split_v2_eager(handle_, value_, lengths_, flow_in_; name=name) - else - tensor_array_split_v2_graph(handle_, value_, lengths_, flow_in_; name=name) + begin + function tensor_array_split_v2_graph(handle_, value_, lengths_, flow_in_; name=nothing) + local desc + tf.with_op_name(name, "TensorArraySplitV2") do + desc = tf.NodeDescription("TensorArraySplitV2") + begin + begin + handle_ = convert(Tensor{String}, handle_) + begin + end + end + begin + value_ = convert(Tensor{Any}, value_) + begin + end + end + begin + lengths_ = convert(Tensor{Int64}, lengths_) + begin + end + end + begin + flow_in_ = convert(Tensor{Float32}, flow_in_) + begin + end + end + begin + (value_,) = tf.tf_promote(value_) + end + end + begin + begin + tf.add_input(desc, handle_) + end + begin + tf.add_input(desc, value_) + end + begin + tf.add_input(desc, lengths_) + end + begin + tf.add_input(desc, flow_in_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function tensor_array_split_v2_eager(handle_, value_, lengths_, flow_in_; name=nothing) + desc = tf.EagerOp("TensorArraySplitV2") + handle_ = convert(tf.EagerTensor, handle_) + value_ = convert(tf.EagerTensor, value_) + lengths_ = convert(tf.EagerTensor, lengths_) + flow_in_ = convert(tf.EagerTensor, flow_in_) + begin + begin + tf.add_input(desc, handle_) + end + begin + tf.add_input(desc, value_) + end + begin + tf.add_input(desc, lengths_) + end + begin + tf.add_input(desc, flow_in_) + end + end + begin + end + begin + desc["T"] = tf.data_type(value_) + end + res = tf.execute(desc) + node = tf.TapeNode(tensor_array_split_v2, [handle_, value_, lengths_, flow_in_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_array_split_v2(handle_, value_, lengths_, flow_in_; name=nothing) + if tf.in_eager_mode() + tensor_array_split_v2_eager(handle_, value_, lengths_, flow_in_; name=name) + else + tensor_array_split_v2_graph(handle_, value_, lengths_, flow_in_; name=name) + end end - end + end end @@ -39049,130 +71175,236 @@ end """ begin - function mutable_dense_hash_table_v2_graph(empty_key_, deleted_key_; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing, initial_num_buckets=nothing, max_load_factor=nothing) - local desc - tf.with_op_name(name, "MutableDenseHashTableV2") do - desc = tf.NodeDescription("MutableDenseHashTableV2") - empty_key_ = convert(Tensor{Any}, empty_key_) - deleted_key_ = convert(Tensor{Any}, deleted_key_) - (empty_key_, deleted_key_) = tf.tf_promote(empty_key_, deleted_key_) - tf.add_input(desc, empty_key_) - tf.add_input(desc, deleted_key_) - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) + begin + function mutable_dense_hash_table_v2_graph(empty_key_, deleted_key_; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing, initial_num_buckets=nothing, max_load_factor=nothing) + local desc + tf.with_op_name(name, "MutableDenseHashTableV2") do + desc = tf.NodeDescription("MutableDenseHashTableV2") + begin + begin + empty_key_ = convert(Tensor{Any}, empty_key_) + begin + end + end + begin + deleted_key_ = convert(Tensor{Any}, deleted_key_) + begin + end + end + begin + (empty_key_, deleted_key_) = tf.tf_promote(empty_key_, deleted_key_) + end + end + begin + begin + tf.add_input(desc, empty_key_) + end + begin + tf.add_input(desc, deleted_key_) + end + end + begin + begin + if container !== nothing + desc["container"] = Base.String(container) + end + end + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + begin + if use_node_name_sharing !== nothing + desc["use_node_name_sharing"] = Base.Bool(use_node_name_sharing) + end + end + begin + if key_dtype !== nothing + desc["key_dtype"] = Base.identity(key_dtype) + end + end + begin + if value_dtype !== nothing + desc["value_dtype"] = Base.identity(value_dtype) + end + end + begin + if value_shape !== nothing + desc["value_shape"] = Base.identity(value_shape) + end + end + begin + if initial_num_buckets !== nothing + desc["initial_num_buckets"] = Base.Int(initial_num_buckets) + end + end + begin + if max_load_factor !== nothing + desc["max_load_factor"] = Base.identity(max_load_factor) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function mutable_dense_hash_table_v2_eager(empty_key_, deleted_key_; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing, initial_num_buckets=nothing, max_load_factor=nothing) + desc = tf.EagerOp("MutableDenseHashTableV2") + empty_key_ = convert(tf.EagerTensor, empty_key_) + deleted_key_ = convert(tf.EagerTensor, deleted_key_) + begin + begin + tf.add_input(desc, empty_key_) + end + begin + tf.add_input(desc, deleted_key_) + end + end + begin + begin + if container !== nothing + desc["container"] = Base.String(container) + end + end + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + begin + if use_node_name_sharing !== nothing + desc["use_node_name_sharing"] = Base.Bool(use_node_name_sharing) + end + end + begin + if key_dtype !== nothing + desc["key_dtype"] = Base.identity(key_dtype) + end + end + begin + if value_dtype !== nothing + desc["value_dtype"] = Base.identity(value_dtype) + end + end + begin + if value_shape !== nothing + desc["value_shape"] = Base.identity(value_shape) + end + end + begin + if initial_num_buckets !== nothing + desc["initial_num_buckets"] = Base.Int(initial_num_buckets) + end + end + begin + if max_load_factor !== nothing + desc["max_load_factor"] = Base.identity(max_load_factor) + end + end + end + begin + desc["key_dtype"] = tf.data_type(empty_key_) + end + begin + desc["key_dtype"] = tf.data_type(deleted_key_) + end + res = tf.execute(desc) + node = tf.TapeNode(mutable_dense_hash_table_v2, [empty_key_, deleted_key_], name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing, initial_num_buckets=nothing, max_load_factor=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function mutable_dense_hash_table_v2(empty_key_, deleted_key_; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing, initial_num_buckets=nothing, max_load_factor=nothing) + if tf.in_eager_mode() + mutable_dense_hash_table_v2_eager(empty_key_, deleted_key_; name=name, container=container, shared_name=shared_name, use_node_name_sharing=use_node_name_sharing, key_dtype=key_dtype, value_dtype=value_dtype, value_shape=value_shape, initial_num_buckets=initial_num_buckets, max_load_factor=max_load_factor) + else + mutable_dense_hash_table_v2_graph(empty_key_, deleted_key_; name=name, container=container, shared_name=shared_name, use_node_name_sharing=use_node_name_sharing, key_dtype=key_dtype, value_dtype=value_dtype, value_shape=value_shape, initial_num_buckets=initial_num_buckets, max_load_factor=max_load_factor) + end end - if use_node_name_sharing !== nothing - desc["use_node_name_sharing"] = Base.Bool(use_node_name_sharing) - end - if key_dtype !== nothing - desc["key_dtype"] = Base.identity(key_dtype) + end +end + + +""" + draw_bounding_boxes(images, boxes) + + +""" +begin + begin + function draw_bounding_boxes_graph(images_, boxes_; name=nothing) + local desc + tf.with_op_name(name, "DrawBoundingBoxes") do + desc = tf.NodeDescription("DrawBoundingBoxes") + begin + begin + images_ = convert(Tensor{Float32}, images_) + begin + end + end + begin + boxes_ = convert(Tensor{Float32}, boxes_) + begin + end + end + begin + (images_,) = tf.tf_promote(images_) + end + end + begin + begin + tf.add_input(desc, images_) + end + begin + tf.add_input(desc, boxes_) + end + end + begin + end end - if value_dtype !== nothing - desc["value_dtype"] = Base.identity(value_dtype) + tf.Tensor(tf.Operation(desc)) + end + end + begin + function draw_bounding_boxes_eager(images_, boxes_; name=nothing) + desc = tf.EagerOp("DrawBoundingBoxes") + images_ = convert(tf.EagerTensor, images_) + boxes_ = convert(tf.EagerTensor, boxes_) + begin + begin + tf.add_input(desc, images_) + end + begin + tf.add_input(desc, boxes_) + end end - if value_shape !== nothing - desc["value_shape"] = Base.identity(value_shape) + begin end - if initial_num_buckets !== nothing - desc["initial_num_buckets"] = Base.Int(initial_num_buckets) + begin + desc["T"] = tf.data_type(images_) end - if max_load_factor !== nothing - desc["max_load_factor"] = Base.identity(max_load_factor) + res = tf.execute(desc) + node = tf.TapeNode(draw_bounding_boxes, [images_, boxes_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] end end - tf.Tensor(tf.Operation(desc)) end - function mutable_dense_hash_table_v2_eager(empty_key_, deleted_key_; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing, initial_num_buckets=nothing, max_load_factor=nothing) - desc = tf.EagerOp("MutableDenseHashTableV2") - empty_key_ = convert(tf.EagerTensor, empty_key_) - deleted_key_ = convert(tf.EagerTensor, deleted_key_) - tf.add_input(desc, empty_key_) - tf.add_input(desc, deleted_key_) - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - if use_node_name_sharing !== nothing - desc["use_node_name_sharing"] = Base.Bool(use_node_name_sharing) - end - if key_dtype !== nothing - desc["key_dtype"] = Base.identity(key_dtype) - end - if value_dtype !== nothing - desc["value_dtype"] = Base.identity(value_dtype) - end - if value_shape !== nothing - desc["value_shape"] = Base.identity(value_shape) - end - if initial_num_buckets !== nothing - desc["initial_num_buckets"] = Base.Int(initial_num_buckets) - end - if max_load_factor !== nothing - desc["max_load_factor"] = Base.identity(max_load_factor) - end - desc["key_dtype"] = tf.data_type(empty_key_) - desc["key_dtype"] = tf.data_type(deleted_key_) - res = tf.execute(desc) - node = tf.TapeNode(mutable_dense_hash_table_v2, [empty_key_, deleted_key_], name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing, initial_num_buckets=nothing, max_load_factor=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function mutable_dense_hash_table_v2(empty_key_, deleted_key_; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing, initial_num_buckets=nothing, max_load_factor=nothing) - if tf.in_eager_mode() - mutable_dense_hash_table_v2_eager(empty_key_, deleted_key_; name=name, container=container, shared_name=shared_name, use_node_name_sharing=use_node_name_sharing, key_dtype=key_dtype, value_dtype=value_dtype, value_shape=value_shape, initial_num_buckets=initial_num_buckets, max_load_factor=max_load_factor) - else - mutable_dense_hash_table_v2_graph(empty_key_, deleted_key_; name=name, container=container, shared_name=shared_name, use_node_name_sharing=use_node_name_sharing, key_dtype=key_dtype, value_dtype=value_dtype, value_shape=value_shape, initial_num_buckets=initial_num_buckets, max_load_factor=max_load_factor) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function draw_bounding_boxes(images_, boxes_; name=nothing) + if tf.in_eager_mode() + draw_bounding_boxes_eager(images_, boxes_; name=name) + else + draw_bounding_boxes_graph(images_, boxes_; name=name) + end end - end -end - - -""" - draw_bounding_boxes(images, boxes) - - -""" -begin - function draw_bounding_boxes_graph(images_, boxes_; name=nothing) - local desc - tf.with_op_name(name, "DrawBoundingBoxes") do - desc = tf.NodeDescription("DrawBoundingBoxes") - images_ = convert(Tensor{Float32}, images_) - boxes_ = convert(Tensor{Float32}, boxes_) - (images_,) = tf.tf_promote(images_) - tf.add_input(desc, images_) - tf.add_input(desc, boxes_) - end - tf.Tensor(tf.Operation(desc)) end - function draw_bounding_boxes_eager(images_, boxes_; name=nothing) - desc = tf.EagerOp("DrawBoundingBoxes") - images_ = convert(tf.EagerTensor, images_) - boxes_ = convert(tf.EagerTensor, boxes_) - tf.add_input(desc, images_) - tf.add_input(desc, boxes_) - desc["T"] = tf.data_type(images_) - res = tf.execute(desc) - node = tf.TapeNode(draw_bounding_boxes, [images_, boxes_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function draw_bounding_boxes(images_, boxes_; name=nothing) - if tf.in_eager_mode() - draw_bounding_boxes_eager(images_, boxes_; name=name) - else - draw_bounding_boxes_graph(images_, boxes_; name=name) - end - end end @@ -39182,73 +71414,167 @@ end """ begin - function sparse_apply_proximal_adagrad_graph(var_, accum_, lr_, l1_, l2_, grad_, indices_; name=nothing, use_locking=nothing) - local desc - tf.with_op_name(name, "SparseApplyProximalAdagrad") do - desc = tf.NodeDescription("SparseApplyProximalAdagrad") - var_ = convert(Tensor{Any}, var_) - accum_ = convert(Tensor{Any}, accum_) - lr_ = convert(Tensor{Any}, lr_) - l1_ = convert(Tensor{Any}, l1_) - l2_ = convert(Tensor{Any}, l2_) - grad_ = convert(Tensor{Any}, grad_) - indices_ = convert(Tensor{Any}, indices_) - indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) - (var_, accum_, lr_, l1_, l2_, grad_) = tf.tf_promote(var_, accum_, lr_, l1_, l2_, grad_) - (indices_,) = tf.tf_promote(indices_) - tf.add_input(desc, var_) - tf.add_input(desc, accum_) - tf.add_input(desc, lr_) - tf.add_input(desc, l1_) - tf.add_input(desc, l2_) - tf.add_input(desc, grad_) - tf.add_input(desc, indices_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - end - tf.Tensor(tf.Operation(desc)) - end - function sparse_apply_proximal_adagrad_eager(var_, accum_, lr_, l1_, l2_, grad_, indices_; name=nothing, use_locking=nothing) - desc = tf.EagerOp("SparseApplyProximalAdagrad") - var_ = convert(tf.EagerTensor, var_) - accum_ = convert(tf.EagerTensor, accum_) - lr_ = convert(tf.EagerTensor, lr_) - l1_ = convert(tf.EagerTensor, l1_) - l2_ = convert(tf.EagerTensor, l2_) - grad_ = convert(tf.EagerTensor, grad_) - indices_ = convert(tf.EagerTensor, indices_) - tf.add_input(desc, var_) - tf.add_input(desc, accum_) - tf.add_input(desc, lr_) - tf.add_input(desc, l1_) - tf.add_input(desc, l2_) - tf.add_input(desc, grad_) - tf.add_input(desc, indices_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - desc["T"] = tf.data_type(var_) - desc["T"] = tf.data_type(accum_) - desc["T"] = tf.data_type(lr_) - desc["T"] = tf.data_type(l1_) - desc["T"] = tf.data_type(l2_) - desc["T"] = tf.data_type(grad_) - desc["Tindices"] = tf.data_type(indices_) - res = tf.execute(desc) - node = tf.TapeNode(sparse_apply_proximal_adagrad, [var_, accum_, lr_, l1_, l2_, grad_, indices_], name=nothing, use_locking=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_apply_proximal_adagrad(var_, accum_, lr_, l1_, l2_, grad_, indices_; name=nothing, use_locking=nothing) - if tf.in_eager_mode() - sparse_apply_proximal_adagrad_eager(var_, accum_, lr_, l1_, l2_, grad_, indices_; name=name, use_locking=use_locking) - else - sparse_apply_proximal_adagrad_graph(var_, accum_, lr_, l1_, l2_, grad_, indices_; name=name, use_locking=use_locking) + begin + function sparse_apply_proximal_adagrad_graph(var_, accum_, lr_, l1_, l2_, grad_, indices_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "SparseApplyProximalAdagrad") do + desc = tf.NodeDescription("SparseApplyProximalAdagrad") + begin + begin + var_ = convert(Tensor{Any}, var_) + begin + end + end + begin + accum_ = convert(Tensor{Any}, accum_) + begin + end + end + begin + lr_ = convert(Tensor{Any}, lr_) + begin + end + end + begin + l1_ = convert(Tensor{Any}, l1_) + begin + end + end + begin + l2_ = convert(Tensor{Any}, l2_) + begin + end + end + begin + grad_ = convert(Tensor{Any}, grad_) + begin + end + end + begin + indices_ = convert(Tensor{Any}, indices_) + begin + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + end + end + begin + (var_, accum_, lr_, l1_, l2_, grad_) = tf.tf_promote(var_, accum_, lr_, l1_, l2_, grad_) + end + begin + (indices_,) = tf.tf_promote(indices_) + end + end + begin + begin + tf.add_input(desc, var_) + end + begin + tf.add_input(desc, accum_) + end + begin + tf.add_input(desc, lr_) + end + begin + tf.add_input(desc, l1_) + end + begin + tf.add_input(desc, l2_) + end + begin + tf.add_input(desc, grad_) + end + begin + tf.add_input(desc, indices_) + end + end + begin + begin + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function sparse_apply_proximal_adagrad_eager(var_, accum_, lr_, l1_, l2_, grad_, indices_; name=nothing, use_locking=nothing) + desc = tf.EagerOp("SparseApplyProximalAdagrad") + var_ = convert(tf.EagerTensor, var_) + accum_ = convert(tf.EagerTensor, accum_) + lr_ = convert(tf.EagerTensor, lr_) + l1_ = convert(tf.EagerTensor, l1_) + l2_ = convert(tf.EagerTensor, l2_) + grad_ = convert(tf.EagerTensor, grad_) + indices_ = convert(tf.EagerTensor, indices_) + begin + begin + tf.add_input(desc, var_) + end + begin + tf.add_input(desc, accum_) + end + begin + tf.add_input(desc, lr_) + end + begin + tf.add_input(desc, l1_) + end + begin + tf.add_input(desc, l2_) + end + begin + tf.add_input(desc, grad_) + end + begin + tf.add_input(desc, indices_) + end + end + begin + begin + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + end + begin + desc["T"] = tf.data_type(var_) + end + begin + desc["T"] = tf.data_type(accum_) + end + begin + desc["T"] = tf.data_type(lr_) + end + begin + desc["T"] = tf.data_type(l1_) + end + begin + desc["T"] = tf.data_type(l2_) + end + begin + desc["T"] = tf.data_type(grad_) + end + begin + desc["Tindices"] = tf.data_type(indices_) + end + res = tf.execute(desc) + node = tf.TapeNode(sparse_apply_proximal_adagrad, [var_, accum_, lr_, l1_, l2_, grad_, indices_], name=nothing, use_locking=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_apply_proximal_adagrad(var_, accum_, lr_, l1_, l2_, grad_, indices_; name=nothing, use_locking=nothing) + if tf.in_eager_mode() + sparse_apply_proximal_adagrad_eager(var_, accum_, lr_, l1_, l2_, grad_, indices_; name=name, use_locking=use_locking) + else + sparse_apply_proximal_adagrad_graph(var_, accum_, lr_, l1_, l2_, grad_, indices_; name=name, use_locking=use_locking) + end end - end + end end @@ -39258,53 +71584,101 @@ end """ begin - function range_dataset_graph(start_, stop_, step_; name=nothing, output_types=nothing, output_shapes=nothing) - local desc - tf.with_op_name(name, "RangeDataset") do - desc = tf.NodeDescription("RangeDataset") - start_ = convert(Tensor{Int64}, start_) - stop_ = convert(Tensor{Int64}, stop_) - step_ = convert(Tensor{Int64}, step_) - tf.add_input(desc, start_) - tf.add_input(desc, stop_) - tf.add_input(desc, step_) - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) + begin + function range_dataset_graph(start_, stop_, step_; name=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "RangeDataset") do + desc = tf.NodeDescription("RangeDataset") + begin + begin + start_ = convert(Tensor{Int64}, start_) + begin + end + end + begin + stop_ = convert(Tensor{Int64}, stop_) + begin + end + end + begin + step_ = convert(Tensor{Int64}, step_) + begin + end + end + end + begin + begin + tf.add_input(desc, start_) + end + begin + tf.add_input(desc, stop_) + end + begin + tf.add_input(desc, step_) + end + end + begin + begin + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + end + begin + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function range_dataset_eager(start_, stop_, step_; name=nothing, output_types=nothing, output_shapes=nothing) + desc = tf.EagerOp("RangeDataset") + start_ = convert(tf.EagerTensor, start_) + stop_ = convert(tf.EagerTensor, stop_) + step_ = convert(tf.EagerTensor, step_) + begin + begin + tf.add_input(desc, start_) + end + begin + tf.add_input(desc, stop_) + end + begin + tf.add_input(desc, step_) + end + end + begin + begin + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + end + begin + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(range_dataset, [start_, stop_, step_], name=nothing, output_types=nothing, output_shapes=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function range_dataset(start_, stop_, step_; name=nothing, output_types=nothing, output_shapes=nothing) + if tf.in_eager_mode() + range_dataset_eager(start_, stop_, step_; name=name, output_types=output_types, output_shapes=output_shapes) + else + range_dataset_graph(start_, stop_, step_; name=name, output_types=output_types, output_shapes=output_shapes) + end end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - end - tf.Tensor(tf.Operation(desc)) end - function range_dataset_eager(start_, stop_, step_; name=nothing, output_types=nothing, output_shapes=nothing) - desc = tf.EagerOp("RangeDataset") - start_ = convert(tf.EagerTensor, start_) - stop_ = convert(tf.EagerTensor, stop_) - step_ = convert(tf.EagerTensor, step_) - tf.add_input(desc, start_) - tf.add_input(desc, stop_) - tf.add_input(desc, step_) - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - res = tf.execute(desc) - node = tf.TapeNode(range_dataset, [start_, stop_, step_], name=nothing, output_types=nothing, output_shapes=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function range_dataset(start_, stop_, step_; name=nothing, output_types=nothing, output_shapes=nothing) - if tf.in_eager_mode() - range_dataset_eager(start_, stop_, step_; name=name, output_types=output_types, output_shapes=output_shapes) - else - range_dataset_graph(start_, stop_, step_; name=name, output_types=output_types, output_shapes=output_shapes) - end - end end @@ -39314,37 +71688,69 @@ end """ begin - function reader_restore_state_v2_graph(reader_handle_, state_; name=nothing) - local desc - tf.with_op_name(name, "ReaderRestoreStateV2") do - desc = tf.NodeDescription("ReaderRestoreStateV2") - reader_handle_ = convert(Tensor{Any}, reader_handle_) - state_ = convert(Tensor{String}, state_) - tf.add_input(desc, reader_handle_) - tf.add_input(desc, state_) + begin + function reader_restore_state_v2_graph(reader_handle_, state_; name=nothing) + local desc + tf.with_op_name(name, "ReaderRestoreStateV2") do + desc = tf.NodeDescription("ReaderRestoreStateV2") + begin + begin + reader_handle_ = convert(Tensor{Any}, reader_handle_) + begin + end + end + begin + state_ = convert(Tensor{String}, state_) + begin + end + end + end + begin + begin + tf.add_input(desc, reader_handle_) + end + begin + tf.add_input(desc, state_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function reader_restore_state_v2_eager(reader_handle_, state_; name=nothing) - desc = tf.EagerOp("ReaderRestoreStateV2") - reader_handle_ = convert(tf.EagerTensor, reader_handle_) - state_ = convert(tf.EagerTensor, state_) - tf.add_input(desc, reader_handle_) - tf.add_input(desc, state_) - res = tf.execute(desc) - node = tf.TapeNode(reader_restore_state_v2, [reader_handle_, state_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function reader_restore_state_v2_eager(reader_handle_, state_; name=nothing) + desc = tf.EagerOp("ReaderRestoreStateV2") + reader_handle_ = convert(tf.EagerTensor, reader_handle_) + state_ = convert(tf.EagerTensor, state_) + begin + begin + tf.add_input(desc, reader_handle_) + end + begin + tf.add_input(desc, state_) + end + end + begin + end + res = tf.execute(desc) + node = tf.TapeNode(reader_restore_state_v2, [reader_handle_, state_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function reader_restore_state_v2(reader_handle_, state_; name=nothing) - if tf.in_eager_mode() - reader_restore_state_v2_eager(reader_handle_, state_; name=name) - else - reader_restore_state_v2_graph(reader_handle_, state_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function reader_restore_state_v2(reader_handle_, state_; name=nothing) + if tf.in_eager_mode() + reader_restore_state_v2_eager(reader_handle_, state_; name=name) + else + reader_restore_state_v2_graph(reader_handle_, state_; name=name) + end end - end + end end @@ -39354,50 +71760,92 @@ end """ begin - function top_kv2_graph(input_, k_; name=nothing, sorted=nothing) - local desc - tf.with_op_name(name, "TopKV2") do - desc = tf.NodeDescription("TopKV2") - input_ = convert(Tensor{Any}, input_) - k_ = convert(Tensor{Int32}, k_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - tf.add_input(desc, k_) - if sorted !== nothing - desc["sorted"] = Base.Bool(sorted) + begin + function top_kv2_graph(input_, k_; name=nothing, sorted=nothing) + local desc + tf.with_op_name(name, "TopKV2") do + desc = tf.NodeDescription("TopKV2") + begin + begin + input_ = convert(Tensor{Any}, input_) + begin + end + end + begin + k_ = convert(Tensor{Int32}, k_) + begin + end + end + begin + (input_,) = tf.tf_promote(input_) + end + end + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, k_) + end + end + begin + begin + if sorted !== nothing + desc["sorted"] = Base.Bool(sorted) + end + end + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + end + end + begin + function top_kv2_eager(input_, k_; name=nothing, sorted=nothing) + desc = tf.EagerOp("TopKV2") + input_ = convert(tf.EagerTensor, input_) + k_ = convert(tf.EagerTensor, k_) + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, k_) + end + end + begin + begin + if sorted !== nothing + desc["sorted"] = Base.Bool(sorted) + end + end + end + begin + desc["T"] = tf.data_type(input_) + end + res = tf.execute(desc) + node = tf.TapeNode(top_kv2, [input_, k_], name=nothing, sorted=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function top_kv2(input_, k_; name=nothing, sorted=nothing) + if tf.in_eager_mode() + top_kv2_eager(input_, k_; name=name, sorted=sorted) + else + top_kv2_graph(input_, k_; name=name, sorted=sorted) + end end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:2 - push!(out, tf.Tensor(op, out_idx)) - end - out - end - function top_kv2_eager(input_, k_; name=nothing, sorted=nothing) - desc = tf.EagerOp("TopKV2") - input_ = convert(tf.EagerTensor, input_) - k_ = convert(tf.EagerTensor, k_) - tf.add_input(desc, input_) - tf.add_input(desc, k_) - if sorted !== nothing - desc["sorted"] = Base.Bool(sorted) - end - desc["T"] = tf.data_type(input_) - res = tf.execute(desc) - node = tf.TapeNode(top_kv2, [input_, k_], name=nothing, sorted=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function top_kv2(input_, k_; name=nothing, sorted=nothing) - if tf.in_eager_mode() - top_kv2_eager(input_, k_; name=name, sorted=sorted) - else - top_kv2_graph(input_, k_; name=name, sorted=sorted) - end - end end @@ -39407,35 +71855,63 @@ end """ begin - function atanh_graph(x_; name=nothing) - local desc - tf.with_op_name(name, "Atanh") do - desc = tf.NodeDescription("Atanh") - x_ = convert(Tensor{Any}, x_) - (x_,) = tf.tf_promote(x_) - tf.add_input(desc, x_) + begin + function atanh_graph(x_; name=nothing) + local desc + tf.with_op_name(name, "Atanh") do + desc = tf.NodeDescription("Atanh") + begin + begin + x_ = convert(Tensor{Any}, x_) + begin + end + end + begin + (x_,) = tf.tf_promote(x_) + end + end + begin + begin + tf.add_input(desc, x_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function atanh_eager(x_; name=nothing) - desc = tf.EagerOp("Atanh") - x_ = convert(tf.EagerTensor, x_) - tf.add_input(desc, x_) - desc["T"] = tf.data_type(x_) - res = tf.execute(desc) - node = tf.TapeNode(atanh, [x_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function atanh_eager(x_; name=nothing) + desc = tf.EagerOp("Atanh") + x_ = convert(tf.EagerTensor, x_) + begin + begin + tf.add_input(desc, x_) + end + end + begin + end + begin + desc["T"] = tf.data_type(x_) + end + res = tf.execute(desc) + node = tf.TapeNode(atanh, [x_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function atanh(x_; name=nothing) - if tf.in_eager_mode() - atanh_eager(x_; name=name) - else - atanh_graph(x_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function atanh(x_; name=nothing) + if tf.in_eager_mode() + atanh_eager(x_; name=name) + else + atanh_graph(x_; name=name) + end end - end + end end @@ -39445,35 +71921,63 @@ end """ begin - function debug_gradient_identity_graph(input_; name=nothing) - local desc - tf.with_op_name(name, "DebugGradientIdentity") do - desc = tf.NodeDescription("DebugGradientIdentity") - input_ = convert(Tensor{Any}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) + begin + function debug_gradient_identity_graph(input_; name=nothing) + local desc + tf.with_op_name(name, "DebugGradientIdentity") do + desc = tf.NodeDescription("DebugGradientIdentity") + begin + begin + input_ = convert(Tensor{Any}, input_) + begin + end + end + begin + (input_,) = tf.tf_promote(input_) + end + end + begin + begin + tf.add_input(desc, input_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function debug_gradient_identity_eager(input_; name=nothing) - desc = tf.EagerOp("DebugGradientIdentity") - input_ = convert(tf.EagerTensor, input_) - tf.add_input(desc, input_) - desc["T"] = tf.data_type(input_) - res = tf.execute(desc) - node = tf.TapeNode(debug_gradient_identity, [input_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function debug_gradient_identity_eager(input_; name=nothing) + desc = tf.EagerOp("DebugGradientIdentity") + input_ = convert(tf.EagerTensor, input_) + begin + begin + tf.add_input(desc, input_) + end + end + begin + end + begin + desc["T"] = tf.data_type(input_) + end + res = tf.execute(desc) + node = tf.TapeNode(debug_gradient_identity, [input_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function debug_gradient_identity(input_; name=nothing) - if tf.in_eager_mode() - debug_gradient_identity_eager(input_; name=name) - else - debug_gradient_identity_graph(input_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function debug_gradient_identity(input_; name=nothing) + if tf.in_eager_mode() + debug_gradient_identity_eager(input_; name=name) + else + debug_gradient_identity_graph(input_; name=name) + end end - end + end end @@ -39483,52 +71987,106 @@ end """ begin - function sparse_add_grad_graph(backprop_val_grad_, a_indices_, b_indices_, sum_indices_; name=nothing) - local desc - tf.with_op_name(name, "SparseAddGrad") do - desc = tf.NodeDescription("SparseAddGrad") - backprop_val_grad_ = convert(Tensor{Any}, backprop_val_grad_) - a_indices_ = convert(Tensor{Int64}, a_indices_) - b_indices_ = convert(Tensor{Int64}, b_indices_) - sum_indices_ = convert(Tensor{Int64}, sum_indices_) - (backprop_val_grad_,) = tf.tf_promote(backprop_val_grad_) - tf.add_input(desc, backprop_val_grad_) - tf.add_input(desc, a_indices_) - tf.add_input(desc, b_indices_) - tf.add_input(desc, sum_indices_) - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:2 - push!(out, tf.Tensor(op, out_idx)) - end - out - end - function sparse_add_grad_eager(backprop_val_grad_, a_indices_, b_indices_, sum_indices_; name=nothing) - desc = tf.EagerOp("SparseAddGrad") - backprop_val_grad_ = convert(tf.EagerTensor, backprop_val_grad_) - a_indices_ = convert(tf.EagerTensor, a_indices_) - b_indices_ = convert(tf.EagerTensor, b_indices_) - sum_indices_ = convert(tf.EagerTensor, sum_indices_) - tf.add_input(desc, backprop_val_grad_) - tf.add_input(desc, a_indices_) - tf.add_input(desc, b_indices_) - tf.add_input(desc, sum_indices_) - desc["T"] = tf.data_type(backprop_val_grad_) - res = tf.execute(desc) - node = tf.TapeNode(sparse_add_grad, [backprop_val_grad_, a_indices_, b_indices_, sum_indices_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_add_grad(backprop_val_grad_, a_indices_, b_indices_, sum_indices_; name=nothing) - if tf.in_eager_mode() - sparse_add_grad_eager(backprop_val_grad_, a_indices_, b_indices_, sum_indices_; name=name) - else - sparse_add_grad_graph(backprop_val_grad_, a_indices_, b_indices_, sum_indices_; name=name) + begin + function sparse_add_grad_graph(backprop_val_grad_, a_indices_, b_indices_, sum_indices_; name=nothing) + local desc + tf.with_op_name(name, "SparseAddGrad") do + desc = tf.NodeDescription("SparseAddGrad") + begin + begin + backprop_val_grad_ = convert(Tensor{Any}, backprop_val_grad_) + begin + end + end + begin + a_indices_ = convert(Tensor{Int64}, a_indices_) + begin + end + end + begin + b_indices_ = convert(Tensor{Int64}, b_indices_) + begin + end + end + begin + sum_indices_ = convert(Tensor{Int64}, sum_indices_) + begin + end + end + begin + (backprop_val_grad_,) = tf.tf_promote(backprop_val_grad_) + end + end + begin + begin + tf.add_input(desc, backprop_val_grad_) + end + begin + tf.add_input(desc, a_indices_) + end + begin + tf.add_input(desc, b_indices_) + end + begin + tf.add_input(desc, sum_indices_) + end + end + begin + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + end + end + begin + function sparse_add_grad_eager(backprop_val_grad_, a_indices_, b_indices_, sum_indices_; name=nothing) + desc = tf.EagerOp("SparseAddGrad") + backprop_val_grad_ = convert(tf.EagerTensor, backprop_val_grad_) + a_indices_ = convert(tf.EagerTensor, a_indices_) + b_indices_ = convert(tf.EagerTensor, b_indices_) + sum_indices_ = convert(tf.EagerTensor, sum_indices_) + begin + begin + tf.add_input(desc, backprop_val_grad_) + end + begin + tf.add_input(desc, a_indices_) + end + begin + tf.add_input(desc, b_indices_) + end + begin + tf.add_input(desc, sum_indices_) + end + end + begin + end + begin + desc["T"] = tf.data_type(backprop_val_grad_) + end + res = tf.execute(desc) + node = tf.TapeNode(sparse_add_grad, [backprop_val_grad_, a_indices_, b_indices_, sum_indices_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_add_grad(backprop_val_grad_, a_indices_, b_indices_, sum_indices_; name=nothing) + if tf.in_eager_mode() + sparse_add_grad_eager(backprop_val_grad_, a_indices_, b_indices_, sum_indices_; name=name) + else + sparse_add_grad_graph(backprop_val_grad_, a_indices_, b_indices_, sum_indices_; name=name) + end end - end + end end @@ -39538,52 +72096,104 @@ end """ begin - function resource_scatter_add_graph(resource_, indices_, updates_; name=nothing, dtype=nothing) - local desc - tf.with_op_name(name, "ResourceScatterAdd") do - desc = tf.NodeDescription("ResourceScatterAdd") - resource_ = convert(Tensor{Any}, resource_) - indices_ = convert(Tensor{Any}, indices_) - indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) - updates_ = convert(Tensor{Any}, updates_) - (updates_,) = tf.tf_promote(updates_) - (indices_,) = tf.tf_promote(indices_) - tf.add_input(desc, resource_) - tf.add_input(desc, indices_) - tf.add_input(desc, updates_) - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) + begin + function resource_scatter_add_graph(resource_, indices_, updates_; name=nothing, dtype=nothing) + local desc + tf.with_op_name(name, "ResourceScatterAdd") do + desc = tf.NodeDescription("ResourceScatterAdd") + begin + begin + resource_ = convert(Tensor{Any}, resource_) + begin + end + end + begin + indices_ = convert(Tensor{Any}, indices_) + begin + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + end + end + begin + updates_ = convert(Tensor{Any}, updates_) + begin + end + end + begin + (updates_,) = tf.tf_promote(updates_) + end + begin + (indices_,) = tf.tf_promote(indices_) + end + end + begin + begin + tf.add_input(desc, resource_) + end + begin + tf.add_input(desc, indices_) + end + begin + tf.add_input(desc, updates_) + end + end + begin + begin + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function resource_scatter_add_eager(resource_, indices_, updates_; name=nothing, dtype=nothing) + desc = tf.EagerOp("ResourceScatterAdd") + resource_ = convert(tf.EagerTensor, resource_) + indices_ = convert(tf.EagerTensor, indices_) + updates_ = convert(tf.EagerTensor, updates_) + begin + begin + tf.add_input(desc, resource_) + end + begin + tf.add_input(desc, indices_) + end + begin + tf.add_input(desc, updates_) + end + end + begin + begin + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + end + begin + desc["Tindices"] = tf.data_type(indices_) + end + begin + desc["dtype"] = tf.data_type(updates_) + end + res = tf.execute(desc) + node = tf.TapeNode(resource_scatter_add, [resource_, indices_, updates_], name=nothing, dtype=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_scatter_add(resource_, indices_, updates_; name=nothing, dtype=nothing) + if tf.in_eager_mode() + resource_scatter_add_eager(resource_, indices_, updates_; name=name, dtype=dtype) + else + resource_scatter_add_graph(resource_, indices_, updates_; name=name, dtype=dtype) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function resource_scatter_add_eager(resource_, indices_, updates_; name=nothing, dtype=nothing) - desc = tf.EagerOp("ResourceScatterAdd") - resource_ = convert(tf.EagerTensor, resource_) - indices_ = convert(tf.EagerTensor, indices_) - updates_ = convert(tf.EagerTensor, updates_) - tf.add_input(desc, resource_) - tf.add_input(desc, indices_) - tf.add_input(desc, updates_) - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end - desc["Tindices"] = tf.data_type(indices_) - desc["dtype"] = tf.data_type(updates_) - res = tf.execute(desc) - node = tf.TapeNode(resource_scatter_add, [resource_, indices_, updates_], name=nothing, dtype=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_scatter_add(resource_, indices_, updates_; name=nothing, dtype=nothing) - if tf.in_eager_mode() - resource_scatter_add_eager(resource_, indices_, updates_; name=name, dtype=dtype) - else - resource_scatter_add_graph(resource_, indices_, updates_; name=name, dtype=dtype) - end - end end @@ -39593,35 +72203,63 @@ end """ begin - function ceil_graph(x_; name=nothing) - local desc - tf.with_op_name(name, "Ceil") do - desc = tf.NodeDescription("Ceil") - x_ = convert(Tensor{Any}, x_) - (x_,) = tf.tf_promote(x_) - tf.add_input(desc, x_) + begin + function ceil_graph(x_; name=nothing) + local desc + tf.with_op_name(name, "Ceil") do + desc = tf.NodeDescription("Ceil") + begin + begin + x_ = convert(Tensor{Any}, x_) + begin + end + end + begin + (x_,) = tf.tf_promote(x_) + end + end + begin + begin + tf.add_input(desc, x_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function ceil_eager(x_; name=nothing) - desc = tf.EagerOp("Ceil") - x_ = convert(tf.EagerTensor, x_) - tf.add_input(desc, x_) - desc["T"] = tf.data_type(x_) - res = tf.execute(desc) - node = tf.TapeNode(ceil, [x_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function ceil_eager(x_; name=nothing) + desc = tf.EagerOp("Ceil") + x_ = convert(tf.EagerTensor, x_) + begin + begin + tf.add_input(desc, x_) + end + end + begin + end + begin + desc["T"] = tf.data_type(x_) + end + res = tf.execute(desc) + node = tf.TapeNode(ceil, [x_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function ceil(x_; name=nothing) - if tf.in_eager_mode() - ceil_eager(x_; name=name) - else - ceil_graph(x_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function ceil(x_; name=nothing) + if tf.in_eager_mode() + ceil_eager(x_; name=name) + else + ceil_graph(x_; name=name) + end end - end + end end @@ -39631,47 +72269,91 @@ end """ begin - function save_graph(filename_, tensor_names_, data_; name=nothing, T=nothing) - local desc - tf.with_op_name(name, "Save") do - desc = tf.NodeDescription("Save") - filename_ = convert(Tensor{String}, filename_) - tensor_names_ = convert(Tensor{String}, tensor_names_) - data_ = [convert(Tensor{Any}, x) for x = data_] - tf.add_input(desc, filename_) - tf.add_input(desc, tensor_names_) - tf.add_input(desc, data_) - if T !== nothing - desc["T"] = map(Base.identity, T) + begin + function save_graph(filename_, tensor_names_, data_; name=nothing, T=nothing) + local desc + tf.with_op_name(name, "Save") do + desc = tf.NodeDescription("Save") + begin + begin + filename_ = convert(Tensor{String}, filename_) + begin + end + end + begin + tensor_names_ = convert(Tensor{String}, tensor_names_) + begin + end + end + begin + data_ = [convert(Tensor{Any}, x) for x = data_] + begin + end + end + end + begin + begin + tf.add_input(desc, filename_) + end + begin + tf.add_input(desc, tensor_names_) + end + begin + tf.add_input(desc, data_) + end + end + begin + begin + if T !== nothing + desc["T"] = map(Base.identity, T) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function save_eager(filename_, tensor_names_, data_; name=nothing, T=nothing) + desc = tf.EagerOp("Save") + filename_ = convert(tf.EagerTensor, filename_) + tensor_names_ = convert(tf.EagerTensor, tensor_names_) + data_ = convert(tf.EagerTensor, data_) + begin + begin + tf.add_input(desc, filename_) + end + begin + tf.add_input(desc, tensor_names_) + end + begin + tf.add_input(desc, data_) + end + end + begin + begin + if T !== nothing + desc["T"] = map(Base.identity, T) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(save, [filename_, tensor_names_, data_], name=nothing, T=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function save(filename_, tensor_names_, data_; name=nothing, T=nothing) + if tf.in_eager_mode() + save_eager(filename_, tensor_names_, data_; name=name, T=T) + else + save_graph(filename_, tensor_names_, data_; name=name, T=T) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function save_eager(filename_, tensor_names_, data_; name=nothing, T=nothing) - desc = tf.EagerOp("Save") - filename_ = convert(tf.EagerTensor, filename_) - tensor_names_ = convert(tf.EagerTensor, tensor_names_) - data_ = convert(tf.EagerTensor, data_) - tf.add_input(desc, filename_) - tf.add_input(desc, tensor_names_) - tf.add_input(desc, data_) - if T !== nothing - desc["T"] = map(Base.identity, T) - end - res = tf.execute(desc) - node = tf.TapeNode(save, [filename_, tensor_names_, data_], name=nothing, T=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function save(filename_, tensor_names_, data_; name=nothing, T=nothing) - if tf.in_eager_mode() - save_eager(filename_, tensor_names_, data_; name=name, T=T) - else - save_graph(filename_, tensor_names_, data_; name=name, T=T) - end - end end @@ -39681,58 +72363,92 @@ end Retrieve embedding parameters for a single table. """ begin - function retrieve_tpu_embedding_centered_rms_prop_parameters_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - local desc - tf.with_op_name(name, "RetrieveTPUEmbeddingCenteredRMSPropParameters") do - desc = tf.NodeDescription("RetrieveTPUEmbeddingCenteredRMSPropParameters") - if table_id !== nothing - desc["table_id"] = Base.Int(table_id) - end - if table_name !== nothing - desc["table_name"] = Base.String(table_name) + begin + function retrieve_tpu_embedding_centered_rms_prop_parameters_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + local desc + tf.with_op_name(name, "RetrieveTPUEmbeddingCenteredRMSPropParameters") do + desc = tf.NodeDescription("RetrieveTPUEmbeddingCenteredRMSPropParameters") + begin + end + begin + end + begin + begin + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + end + begin + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + end + begin + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + end + begin + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + end + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:4 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + end + end + begin + function retrieve_tpu_embedding_centered_rms_prop_parameters_eager(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + desc = tf.EagerOp("RetrieveTPUEmbeddingCenteredRMSPropParameters") + begin + end + begin + begin + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + end + begin + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + end + begin + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + end + begin + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(retrieve_tpu_embedding_centered_rms_prop_parameters, [], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function retrieve_tpu_embedding_centered_rms_prop_parameters(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + if tf.in_eager_mode() + retrieve_tpu_embedding_centered_rms_prop_parameters_eager(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + else + retrieve_tpu_embedding_centered_rms_prop_parameters_graph(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + end end - if num_shards !== nothing - desc["num_shards"] = Base.Int(num_shards) - end - if shard_id !== nothing - desc["shard_id"] = Base.Int(shard_id) - end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:4 - push!(out, tf.Tensor(op, out_idx)) - end - out end - function retrieve_tpu_embedding_centered_rms_prop_parameters_eager(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - desc = tf.EagerOp("RetrieveTPUEmbeddingCenteredRMSPropParameters") - if table_id !== nothing - desc["table_id"] = Base.Int(table_id) - end - if table_name !== nothing - desc["table_name"] = Base.String(table_name) - end - if num_shards !== nothing - desc["num_shards"] = Base.Int(num_shards) - end - if shard_id !== nothing - desc["shard_id"] = Base.Int(shard_id) - end - res = tf.execute(desc) - node = tf.TapeNode(retrieve_tpu_embedding_centered_rms_prop_parameters, [], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function retrieve_tpu_embedding_centered_rms_prop_parameters(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - if tf.in_eager_mode() - retrieve_tpu_embedding_centered_rms_prop_parameters_eager(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) - else - retrieve_tpu_embedding_centered_rms_prop_parameters_graph(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) - end - end end @@ -39742,58 +72458,116 @@ end """ begin - function quantized_concat_graph(concat_dim_, values_, input_mins_, input_maxes_; name=nothing, N=nothing) - local desc - tf.with_op_name(name, "QuantizedConcat") do - desc = tf.NodeDescription("QuantizedConcat") - concat_dim_ = convert(Tensor{Int32}, concat_dim_) - values_ = [convert(Tensor{Any}, x) for x = values_] - input_mins_ = [convert(Tensor{Float32}, x) for x = input_mins_] - input_maxes_ = [convert(Tensor{Float32}, x) for x = input_maxes_] - (values_,) = tf.tf_promote(values_) - tf.add_input(desc, concat_dim_) - tf.add_input(desc, values_) - tf.add_input(desc, input_mins_) - tf.add_input(desc, input_maxes_) - if N !== nothing - desc["N"] = Base.Int(N) + begin + function quantized_concat_graph(concat_dim_, values_, input_mins_, input_maxes_; name=nothing, N=nothing) + local desc + tf.with_op_name(name, "QuantizedConcat") do + desc = tf.NodeDescription("QuantizedConcat") + begin + begin + concat_dim_ = convert(Tensor{Int32}, concat_dim_) + begin + end + end + begin + values_ = [convert(Tensor{Any}, x) for x = values_] + begin + end + end + begin + input_mins_ = [convert(Tensor{Float32}, x) for x = input_mins_] + begin + end + end + begin + input_maxes_ = [convert(Tensor{Float32}, x) for x = input_maxes_] + begin + end + end + begin + (values_,) = tf.tf_promote(values_) + end + end + begin + begin + tf.add_input(desc, concat_dim_) + end + begin + tf.add_input(desc, values_) + end + begin + tf.add_input(desc, input_mins_) + end + begin + tf.add_input(desc, input_maxes_) + end + end + begin + begin + if N !== nothing + desc["N"] = Base.Int(N) + end + end + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + end + end + begin + function quantized_concat_eager(concat_dim_, values_, input_mins_, input_maxes_; name=nothing, N=nothing) + desc = tf.EagerOp("QuantizedConcat") + concat_dim_ = convert(tf.EagerTensor, concat_dim_) + values_ = convert(tf.EagerTensor, values_) + input_mins_ = convert(tf.EagerTensor, input_mins_) + input_maxes_ = convert(tf.EagerTensor, input_maxes_) + begin + begin + tf.add_input(desc, concat_dim_) + end + begin + tf.add_input(desc, values_) + end + begin + tf.add_input(desc, input_mins_) + end + begin + tf.add_input(desc, input_maxes_) + end + end + begin + begin + if N !== nothing + desc["N"] = Base.Int(N) + end + end + end + begin + desc["T"] = tf.data_type(values_) + end + res = tf.execute(desc) + node = tf.TapeNode(quantized_concat, [concat_dim_, values_, input_mins_, input_maxes_], name=nothing, N=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function quantized_concat(concat_dim_, values_, input_mins_, input_maxes_; name=nothing, N=nothing) + if tf.in_eager_mode() + quantized_concat_eager(concat_dim_, values_, input_mins_, input_maxes_; name=name, N=N) + else + quantized_concat_graph(concat_dim_, values_, input_mins_, input_maxes_; name=name, N=N) + end end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:3 - push!(out, tf.Tensor(op, out_idx)) - end - out - end - function quantized_concat_eager(concat_dim_, values_, input_mins_, input_maxes_; name=nothing, N=nothing) - desc = tf.EagerOp("QuantizedConcat") - concat_dim_ = convert(tf.EagerTensor, concat_dim_) - values_ = convert(tf.EagerTensor, values_) - input_mins_ = convert(tf.EagerTensor, input_mins_) - input_maxes_ = convert(tf.EagerTensor, input_maxes_) - tf.add_input(desc, concat_dim_) - tf.add_input(desc, values_) - tf.add_input(desc, input_mins_) - tf.add_input(desc, input_maxes_) - if N !== nothing - desc["N"] = Base.Int(N) - end - desc["T"] = tf.data_type(values_) - res = tf.execute(desc) - node = tf.TapeNode(quantized_concat, [concat_dim_, values_, input_mins_, input_maxes_], name=nothing, N=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function quantized_concat(concat_dim_, values_, input_mins_, input_maxes_; name=nothing, N=nothing) - if tf.in_eager_mode() - quantized_concat_eager(concat_dim_, values_, input_mins_, input_maxes_; name=name, N=N) - else - quantized_concat_graph(concat_dim_, values_, input_mins_, input_maxes_; name=name, N=N) - end - end end @@ -39803,35 +72577,63 @@ end """ begin - function zeros_like_graph(x_; name=nothing) - local desc - tf.with_op_name(name, "ZerosLike") do - desc = tf.NodeDescription("ZerosLike") - x_ = convert(Tensor{Any}, x_) - (x_,) = tf.tf_promote(x_) - tf.add_input(desc, x_) + begin + function zeros_like_graph(x_; name=nothing) + local desc + tf.with_op_name(name, "ZerosLike") do + desc = tf.NodeDescription("ZerosLike") + begin + begin + x_ = convert(Tensor{Any}, x_) + begin + end + end + begin + (x_,) = tf.tf_promote(x_) + end + end + begin + begin + tf.add_input(desc, x_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function zeros_like_eager(x_; name=nothing) - desc = tf.EagerOp("ZerosLike") - x_ = convert(tf.EagerTensor, x_) - tf.add_input(desc, x_) - desc["T"] = tf.data_type(x_) - res = tf.execute(desc) - node = tf.TapeNode(zeros_like, [x_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function zeros_like_eager(x_; name=nothing) + desc = tf.EagerOp("ZerosLike") + x_ = convert(tf.EagerTensor, x_) + begin + begin + tf.add_input(desc, x_) + end + end + begin + end + begin + desc["T"] = tf.data_type(x_) + end + res = tf.execute(desc) + node = tf.TapeNode(zeros_like, [x_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function zeros_like(x_; name=nothing) - if tf.in_eager_mode() - zeros_like_eager(x_; name=name) - else - zeros_like_graph(x_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function zeros_like(x_; name=nothing) + if tf.in_eager_mode() + zeros_like_eager(x_; name=name) + else + zeros_like_graph(x_; name=name) + end end - end + end end @@ -39841,76 +72643,130 @@ end """ begin - function fractional_avg_pool_graph(value_; name=nothing, pooling_ratio=nothing, pseudo_random=nothing, overlapping=nothing, deterministic=nothing, seed=nothing, seed2=nothing) - local desc - tf.with_op_name(name, "FractionalAvgPool") do - desc = tf.NodeDescription("FractionalAvgPool") - value_ = convert(Tensor{Any}, value_) - (value_,) = tf.tf_promote(value_) - tf.add_input(desc, value_) - if pooling_ratio !== nothing - desc["pooling_ratio"] = map(Base.identity, pooling_ratio) - end - if pseudo_random !== nothing - desc["pseudo_random"] = Base.Bool(pseudo_random) - end - if overlapping !== nothing - desc["overlapping"] = Base.Bool(overlapping) + begin + function fractional_avg_pool_graph(value_; name=nothing, pooling_ratio=nothing, pseudo_random=nothing, overlapping=nothing, deterministic=nothing, seed=nothing, seed2=nothing) + local desc + tf.with_op_name(name, "FractionalAvgPool") do + desc = tf.NodeDescription("FractionalAvgPool") + begin + begin + value_ = convert(Tensor{Any}, value_) + begin + end + end + begin + (value_,) = tf.tf_promote(value_) + end + end + begin + begin + tf.add_input(desc, value_) + end + end + begin + begin + if pooling_ratio !== nothing + desc["pooling_ratio"] = map(Base.identity, pooling_ratio) + end + end + begin + if pseudo_random !== nothing + desc["pseudo_random"] = Base.Bool(pseudo_random) + end + end + begin + if overlapping !== nothing + desc["overlapping"] = Base.Bool(overlapping) + end + end + begin + if deterministic !== nothing + desc["deterministic"] = Base.Bool(deterministic) + end + end + begin + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + end + begin + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end + end + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + end + end + begin + function fractional_avg_pool_eager(value_; name=nothing, pooling_ratio=nothing, pseudo_random=nothing, overlapping=nothing, deterministic=nothing, seed=nothing, seed2=nothing) + desc = tf.EagerOp("FractionalAvgPool") + value_ = convert(tf.EagerTensor, value_) + begin + begin + tf.add_input(desc, value_) + end + end + begin + begin + if pooling_ratio !== nothing + desc["pooling_ratio"] = map(Base.identity, pooling_ratio) + end + end + begin + if pseudo_random !== nothing + desc["pseudo_random"] = Base.Bool(pseudo_random) + end + end + begin + if overlapping !== nothing + desc["overlapping"] = Base.Bool(overlapping) + end + end + begin + if deterministic !== nothing + desc["deterministic"] = Base.Bool(deterministic) + end + end + begin + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + end + begin + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end + end + end + begin + desc["T"] = tf.data_type(value_) + end + res = tf.execute(desc) + node = tf.TapeNode(fractional_avg_pool, [value_], name=nothing, pooling_ratio=nothing, pseudo_random=nothing, overlapping=nothing, deterministic=nothing, seed=nothing, seed2=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function fractional_avg_pool(value_; name=nothing, pooling_ratio=nothing, pseudo_random=nothing, overlapping=nothing, deterministic=nothing, seed=nothing, seed2=nothing) + if tf.in_eager_mode() + fractional_avg_pool_eager(value_; name=name, pooling_ratio=pooling_ratio, pseudo_random=pseudo_random, overlapping=overlapping, deterministic=deterministic, seed=seed, seed2=seed2) + else + fractional_avg_pool_graph(value_; name=name, pooling_ratio=pooling_ratio, pseudo_random=pseudo_random, overlapping=overlapping, deterministic=deterministic, seed=seed, seed2=seed2) + end end - if deterministic !== nothing - desc["deterministic"] = Base.Bool(deterministic) - end - if seed !== nothing - desc["seed"] = Base.Int(seed) - end - if seed2 !== nothing - desc["seed2"] = Base.Int(seed2) - end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:3 - push!(out, tf.Tensor(op, out_idx)) - end - out - end - function fractional_avg_pool_eager(value_; name=nothing, pooling_ratio=nothing, pseudo_random=nothing, overlapping=nothing, deterministic=nothing, seed=nothing, seed2=nothing) - desc = tf.EagerOp("FractionalAvgPool") - value_ = convert(tf.EagerTensor, value_) - tf.add_input(desc, value_) - if pooling_ratio !== nothing - desc["pooling_ratio"] = map(Base.identity, pooling_ratio) - end - if pseudo_random !== nothing - desc["pseudo_random"] = Base.Bool(pseudo_random) - end - if overlapping !== nothing - desc["overlapping"] = Base.Bool(overlapping) - end - if deterministic !== nothing - desc["deterministic"] = Base.Bool(deterministic) - end - if seed !== nothing - desc["seed"] = Base.Int(seed) - end - if seed2 !== nothing - desc["seed2"] = Base.Int(seed2) - end - desc["T"] = tf.data_type(value_) - res = tf.execute(desc) - node = tf.TapeNode(fractional_avg_pool, [value_], name=nothing, pooling_ratio=nothing, pseudo_random=nothing, overlapping=nothing, deterministic=nothing, seed=nothing, seed2=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function fractional_avg_pool(value_; name=nothing, pooling_ratio=nothing, pseudo_random=nothing, overlapping=nothing, deterministic=nothing, seed=nothing, seed2=nothing) - if tf.in_eager_mode() - fractional_avg_pool_eager(value_; name=name, pooling_ratio=pooling_ratio, pseudo_random=pseudo_random, overlapping=overlapping, deterministic=deterministic, seed=seed, seed2=seed2) - else - fractional_avg_pool_graph(value_; name=name, pooling_ratio=pooling_ratio, pseudo_random=pseudo_random, overlapping=overlapping, deterministic=deterministic, seed=seed, seed2=seed2) - end - end end @@ -39920,62 +72776,136 @@ end """ begin - function edit_distance_graph(hypothesis_indices_, hypothesis_values_, hypothesis_shape_, truth_indices_, truth_values_, truth_shape_; name=nothing, normalize=nothing) - local desc - tf.with_op_name(name, "EditDistance") do - desc = tf.NodeDescription("EditDistance") - hypothesis_indices_ = convert(Tensor{Int64}, hypothesis_indices_) - hypothesis_values_ = convert(Tensor{Any}, hypothesis_values_) - hypothesis_shape_ = convert(Tensor{Int64}, hypothesis_shape_) - truth_indices_ = convert(Tensor{Int64}, truth_indices_) - truth_values_ = convert(Tensor{Any}, truth_values_) - truth_shape_ = convert(Tensor{Int64}, truth_shape_) - (hypothesis_values_, truth_values_) = tf.tf_promote(hypothesis_values_, truth_values_) - tf.add_input(desc, hypothesis_indices_) - tf.add_input(desc, hypothesis_values_) - tf.add_input(desc, hypothesis_shape_) - tf.add_input(desc, truth_indices_) - tf.add_input(desc, truth_values_) - tf.add_input(desc, truth_shape_) - if normalize !== nothing - desc["normalize"] = Base.Bool(normalize) + begin + function edit_distance_graph(hypothesis_indices_, hypothesis_values_, hypothesis_shape_, truth_indices_, truth_values_, truth_shape_; name=nothing, normalize=nothing) + local desc + tf.with_op_name(name, "EditDistance") do + desc = tf.NodeDescription("EditDistance") + begin + begin + hypothesis_indices_ = convert(Tensor{Int64}, hypothesis_indices_) + begin + end + end + begin + hypothesis_values_ = convert(Tensor{Any}, hypothesis_values_) + begin + end + end + begin + hypothesis_shape_ = convert(Tensor{Int64}, hypothesis_shape_) + begin + end + end + begin + truth_indices_ = convert(Tensor{Int64}, truth_indices_) + begin + end + end + begin + truth_values_ = convert(Tensor{Any}, truth_values_) + begin + end + end + begin + truth_shape_ = convert(Tensor{Int64}, truth_shape_) + begin + end + end + begin + (hypothesis_values_, truth_values_) = tf.tf_promote(hypothesis_values_, truth_values_) + end + end + begin + begin + tf.add_input(desc, hypothesis_indices_) + end + begin + tf.add_input(desc, hypothesis_values_) + end + begin + tf.add_input(desc, hypothesis_shape_) + end + begin + tf.add_input(desc, truth_indices_) + end + begin + tf.add_input(desc, truth_values_) + end + begin + tf.add_input(desc, truth_shape_) + end + end + begin + begin + if normalize !== nothing + desc["normalize"] = Base.Bool(normalize) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function edit_distance_eager(hypothesis_indices_, hypothesis_values_, hypothesis_shape_, truth_indices_, truth_values_, truth_shape_; name=nothing, normalize=nothing) + desc = tf.EagerOp("EditDistance") + hypothesis_indices_ = convert(tf.EagerTensor, hypothesis_indices_) + hypothesis_values_ = convert(tf.EagerTensor, hypothesis_values_) + hypothesis_shape_ = convert(tf.EagerTensor, hypothesis_shape_) + truth_indices_ = convert(tf.EagerTensor, truth_indices_) + truth_values_ = convert(tf.EagerTensor, truth_values_) + truth_shape_ = convert(tf.EagerTensor, truth_shape_) + begin + begin + tf.add_input(desc, hypothesis_indices_) + end + begin + tf.add_input(desc, hypothesis_values_) + end + begin + tf.add_input(desc, hypothesis_shape_) + end + begin + tf.add_input(desc, truth_indices_) + end + begin + tf.add_input(desc, truth_values_) + end + begin + tf.add_input(desc, truth_shape_) + end + end + begin + begin + if normalize !== nothing + desc["normalize"] = Base.Bool(normalize) + end + end + end + begin + desc["T"] = tf.data_type(hypothesis_values_) + end + begin + desc["T"] = tf.data_type(truth_values_) + end + res = tf.execute(desc) + node = tf.TapeNode(edit_distance, [hypothesis_indices_, hypothesis_values_, hypothesis_shape_, truth_indices_, truth_values_, truth_shape_], name=nothing, normalize=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function edit_distance(hypothesis_indices_, hypothesis_values_, hypothesis_shape_, truth_indices_, truth_values_, truth_shape_; name=nothing, normalize=nothing) + if tf.in_eager_mode() + edit_distance_eager(hypothesis_indices_, hypothesis_values_, hypothesis_shape_, truth_indices_, truth_values_, truth_shape_; name=name, normalize=normalize) + else + edit_distance_graph(hypothesis_indices_, hypothesis_values_, hypothesis_shape_, truth_indices_, truth_values_, truth_shape_; name=name, normalize=normalize) + end end - end - tf.Tensor(tf.Operation(desc)) end - function edit_distance_eager(hypothesis_indices_, hypothesis_values_, hypothesis_shape_, truth_indices_, truth_values_, truth_shape_; name=nothing, normalize=nothing) - desc = tf.EagerOp("EditDistance") - hypothesis_indices_ = convert(tf.EagerTensor, hypothesis_indices_) - hypothesis_values_ = convert(tf.EagerTensor, hypothesis_values_) - hypothesis_shape_ = convert(tf.EagerTensor, hypothesis_shape_) - truth_indices_ = convert(tf.EagerTensor, truth_indices_) - truth_values_ = convert(tf.EagerTensor, truth_values_) - truth_shape_ = convert(tf.EagerTensor, truth_shape_) - tf.add_input(desc, hypothesis_indices_) - tf.add_input(desc, hypothesis_values_) - tf.add_input(desc, hypothesis_shape_) - tf.add_input(desc, truth_indices_) - tf.add_input(desc, truth_values_) - tf.add_input(desc, truth_shape_) - if normalize !== nothing - desc["normalize"] = Base.Bool(normalize) - end - desc["T"] = tf.data_type(hypothesis_values_) - desc["T"] = tf.data_type(truth_values_) - res = tf.execute(desc) - node = tf.TapeNode(edit_distance, [hypothesis_indices_, hypothesis_values_, hypothesis_shape_, truth_indices_, truth_values_, truth_shape_], name=nothing, normalize=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function edit_distance(hypothesis_indices_, hypothesis_values_, hypothesis_shape_, truth_indices_, truth_values_, truth_shape_; name=nothing, normalize=nothing) - if tf.in_eager_mode() - edit_distance_eager(hypothesis_indices_, hypothesis_values_, hypothesis_shape_, truth_indices_, truth_values_, truth_shape_; name=name, normalize=normalize) - else - edit_distance_graph(hypothesis_indices_, hypothesis_values_, hypothesis_shape_, truth_indices_, truth_values_, truth_shape_; name=name, normalize=normalize) - end - end end @@ -39985,52 +72915,98 @@ end """ begin - function unique_v2_graph(x_, axis_; name=nothing, out_idx=nothing) - local desc - tf.with_op_name(name, "UniqueV2") do - desc = tf.NodeDescription("UniqueV2") - x_ = convert(Tensor{Any}, x_) - axis_ = convert(Tensor{Int64}, axis_) - (x_,) = tf.tf_promote(x_) - (axis_,) = tf.tf_promote(axis_) - tf.add_input(desc, x_) - tf.add_input(desc, axis_) - if out_idx !== nothing - desc["out_idx"] = Base.identity(out_idx) + begin + function unique_v2_graph(x_, axis_; name=nothing, out_idx=nothing) + local desc + tf.with_op_name(name, "UniqueV2") do + desc = tf.NodeDescription("UniqueV2") + begin + begin + x_ = convert(Tensor{Any}, x_) + begin + end + end + begin + axis_ = convert(Tensor{Int64}, axis_) + begin + end + end + begin + (x_,) = tf.tf_promote(x_) + end + begin + (axis_,) = tf.tf_promote(axis_) + end + end + begin + begin + tf.add_input(desc, x_) + end + begin + tf.add_input(desc, axis_) + end + end + begin + begin + if out_idx !== nothing + desc["out_idx"] = Base.identity(out_idx) + end + end + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + end + end + begin + function unique_v2_eager(x_, axis_; name=nothing, out_idx=nothing) + desc = tf.EagerOp("UniqueV2") + x_ = convert(tf.EagerTensor, x_) + axis_ = convert(tf.EagerTensor, axis_) + begin + begin + tf.add_input(desc, x_) + end + begin + tf.add_input(desc, axis_) + end + end + begin + begin + if out_idx !== nothing + desc["out_idx"] = Base.identity(out_idx) + end + end + end + begin + desc["T"] = tf.data_type(x_) + end + begin + desc["Taxis"] = tf.data_type(axis_) + end + res = tf.execute(desc) + node = tf.TapeNode(unique_v2, [x_, axis_], name=nothing, out_idx=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function unique_v2(x_, axis_; name=nothing, out_idx=nothing) + if tf.in_eager_mode() + unique_v2_eager(x_, axis_; name=name, out_idx=out_idx) + else + unique_v2_graph(x_, axis_; name=name, out_idx=out_idx) + end end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:2 - push!(out, tf.Tensor(op, out_idx)) - end - out - end - function unique_v2_eager(x_, axis_; name=nothing, out_idx=nothing) - desc = tf.EagerOp("UniqueV2") - x_ = convert(tf.EagerTensor, x_) - axis_ = convert(tf.EagerTensor, axis_) - tf.add_input(desc, x_) - tf.add_input(desc, axis_) - if out_idx !== nothing - desc["out_idx"] = Base.identity(out_idx) - end - desc["T"] = tf.data_type(x_) - desc["Taxis"] = tf.data_type(axis_) - res = tf.execute(desc) - node = tf.TapeNode(unique_v2, [x_, axis_], name=nothing, out_idx=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function unique_v2(x_, axis_; name=nothing, out_idx=nothing) - if tf.in_eager_mode() - unique_v2_eager(x_, axis_; name=name, out_idx=out_idx) - else - unique_v2_graph(x_, axis_; name=name, out_idx=out_idx) - end - end end @@ -40040,69 +73016,133 @@ end """ begin - function quantize_and_dequantize_v2_graph(input_, input_min_, input_max_; name=nothing, signed_input=nothing, num_bits=nothing, range_given=nothing, round_mode=nothing) - local desc - tf.with_op_name(name, "QuantizeAndDequantizeV2") do - desc = tf.NodeDescription("QuantizeAndDequantizeV2") - input_ = convert(Tensor{Any}, input_) - input_min_ = convert(Tensor{Any}, input_min_) - input_max_ = convert(Tensor{Any}, input_max_) - (input_, input_min_, input_max_) = tf.tf_promote(input_, input_min_, input_max_) - tf.add_input(desc, input_) - tf.add_input(desc, input_min_) - tf.add_input(desc, input_max_) - if signed_input !== nothing - desc["signed_input"] = Base.Bool(signed_input) + begin + function quantize_and_dequantize_v2_graph(input_, input_min_, input_max_; name=nothing, signed_input=nothing, num_bits=nothing, range_given=nothing, round_mode=nothing) + local desc + tf.with_op_name(name, "QuantizeAndDequantizeV2") do + desc = tf.NodeDescription("QuantizeAndDequantizeV2") + begin + begin + input_ = convert(Tensor{Any}, input_) + begin + end + end + begin + input_min_ = convert(Tensor{Any}, input_min_) + begin + end + end + begin + input_max_ = convert(Tensor{Any}, input_max_) + begin + end + end + begin + (input_, input_min_, input_max_) = tf.tf_promote(input_, input_min_, input_max_) + end + end + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, input_min_) + end + begin + tf.add_input(desc, input_max_) + end + end + begin + begin + if signed_input !== nothing + desc["signed_input"] = Base.Bool(signed_input) + end + end + begin + if num_bits !== nothing + desc["num_bits"] = Base.Int(num_bits) + end + end + begin + if range_given !== nothing + desc["range_given"] = Base.Bool(range_given) + end + end + begin + if round_mode !== nothing + desc["round_mode"] = Base.String(round_mode) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function quantize_and_dequantize_v2_eager(input_, input_min_, input_max_; name=nothing, signed_input=nothing, num_bits=nothing, range_given=nothing, round_mode=nothing) + desc = tf.EagerOp("QuantizeAndDequantizeV2") + input_ = convert(tf.EagerTensor, input_) + input_min_ = convert(tf.EagerTensor, input_min_) + input_max_ = convert(tf.EagerTensor, input_max_) + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, input_min_) + end + begin + tf.add_input(desc, input_max_) + end + end + begin + begin + if signed_input !== nothing + desc["signed_input"] = Base.Bool(signed_input) + end + end + begin + if num_bits !== nothing + desc["num_bits"] = Base.Int(num_bits) + end + end + begin + if range_given !== nothing + desc["range_given"] = Base.Bool(range_given) + end + end + begin + if round_mode !== nothing + desc["round_mode"] = Base.String(round_mode) + end + end + end + begin + desc["T"] = tf.data_type(input_) + end + begin + desc["T"] = tf.data_type(input_min_) + end + begin + desc["T"] = tf.data_type(input_max_) + end + res = tf.execute(desc) + node = tf.TapeNode(quantize_and_dequantize_v2, [input_, input_min_, input_max_], name=nothing, signed_input=nothing, num_bits=nothing, range_given=nothing, round_mode=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function quantize_and_dequantize_v2(input_, input_min_, input_max_; name=nothing, signed_input=nothing, num_bits=nothing, range_given=nothing, round_mode=nothing) + if tf.in_eager_mode() + quantize_and_dequantize_v2_eager(input_, input_min_, input_max_; name=name, signed_input=signed_input, num_bits=num_bits, range_given=range_given, round_mode=round_mode) + else + quantize_and_dequantize_v2_graph(input_, input_min_, input_max_; name=name, signed_input=signed_input, num_bits=num_bits, range_given=range_given, round_mode=round_mode) + end end - if num_bits !== nothing - desc["num_bits"] = Base.Int(num_bits) - end - if range_given !== nothing - desc["range_given"] = Base.Bool(range_given) - end - if round_mode !== nothing - desc["round_mode"] = Base.String(round_mode) - end - end - tf.Tensor(tf.Operation(desc)) end - function quantize_and_dequantize_v2_eager(input_, input_min_, input_max_; name=nothing, signed_input=nothing, num_bits=nothing, range_given=nothing, round_mode=nothing) - desc = tf.EagerOp("QuantizeAndDequantizeV2") - input_ = convert(tf.EagerTensor, input_) - input_min_ = convert(tf.EagerTensor, input_min_) - input_max_ = convert(tf.EagerTensor, input_max_) - tf.add_input(desc, input_) - tf.add_input(desc, input_min_) - tf.add_input(desc, input_max_) - if signed_input !== nothing - desc["signed_input"] = Base.Bool(signed_input) - end - if num_bits !== nothing - desc["num_bits"] = Base.Int(num_bits) - end - if range_given !== nothing - desc["range_given"] = Base.Bool(range_given) - end - if round_mode !== nothing - desc["round_mode"] = Base.String(round_mode) - end - desc["T"] = tf.data_type(input_) - desc["T"] = tf.data_type(input_min_) - desc["T"] = tf.data_type(input_max_) - res = tf.execute(desc) - node = tf.TapeNode(quantize_and_dequantize_v2, [input_, input_min_, input_max_], name=nothing, signed_input=nothing, num_bits=nothing, range_given=nothing, round_mode=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function quantize_and_dequantize_v2(input_, input_min_, input_max_; name=nothing, signed_input=nothing, num_bits=nothing, range_given=nothing, round_mode=nothing) - if tf.in_eager_mode() - quantize_and_dequantize_v2_eager(input_, input_min_, input_max_; name=name, signed_input=signed_input, num_bits=num_bits, range_given=range_given, round_mode=round_mode) - else - quantize_and_dequantize_v2_graph(input_, input_min_, input_max_; name=name, signed_input=signed_input, num_bits=num_bits, range_given=range_given, round_mode=round_mode) - end - end end @@ -40112,65 +73152,113 @@ end """ begin - function quantize_and_dequantize_graph(input_; name=nothing, signed_input=nothing, num_bits=nothing, range_given=nothing, input_min=nothing, input_max=nothing) - local desc - tf.with_op_name(name, "QuantizeAndDequantize") do - desc = tf.NodeDescription("QuantizeAndDequantize") - input_ = convert(Tensor{Any}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - if signed_input !== nothing - desc["signed_input"] = Base.Bool(signed_input) + begin + function quantize_and_dequantize_graph(input_; name=nothing, signed_input=nothing, num_bits=nothing, range_given=nothing, input_min=nothing, input_max=nothing) + local desc + tf.with_op_name(name, "QuantizeAndDequantize") do + desc = tf.NodeDescription("QuantizeAndDequantize") + begin + begin + input_ = convert(Tensor{Any}, input_) + begin + end + end + begin + (input_,) = tf.tf_promote(input_) + end + end + begin + begin + tf.add_input(desc, input_) + end + end + begin + begin + if signed_input !== nothing + desc["signed_input"] = Base.Bool(signed_input) + end + end + begin + if num_bits !== nothing + desc["num_bits"] = Base.Int(num_bits) + end + end + begin + if range_given !== nothing + desc["range_given"] = Base.Bool(range_given) + end + end + begin + if input_min !== nothing + desc["input_min"] = Base.identity(input_min) + end + end + begin + if input_max !== nothing + desc["input_max"] = Base.identity(input_max) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function quantize_and_dequantize_eager(input_; name=nothing, signed_input=nothing, num_bits=nothing, range_given=nothing, input_min=nothing, input_max=nothing) + desc = tf.EagerOp("QuantizeAndDequantize") + input_ = convert(tf.EagerTensor, input_) + begin + begin + tf.add_input(desc, input_) + end + end + begin + begin + if signed_input !== nothing + desc["signed_input"] = Base.Bool(signed_input) + end + end + begin + if num_bits !== nothing + desc["num_bits"] = Base.Int(num_bits) + end + end + begin + if range_given !== nothing + desc["range_given"] = Base.Bool(range_given) + end + end + begin + if input_min !== nothing + desc["input_min"] = Base.identity(input_min) + end + end + begin + if input_max !== nothing + desc["input_max"] = Base.identity(input_max) + end + end + end + begin + desc["T"] = tf.data_type(input_) + end + res = tf.execute(desc) + node = tf.TapeNode(quantize_and_dequantize, [input_], name=nothing, signed_input=nothing, num_bits=nothing, range_given=nothing, input_min=nothing, input_max=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function quantize_and_dequantize(input_; name=nothing, signed_input=nothing, num_bits=nothing, range_given=nothing, input_min=nothing, input_max=nothing) + if tf.in_eager_mode() + quantize_and_dequantize_eager(input_; name=name, signed_input=signed_input, num_bits=num_bits, range_given=range_given, input_min=input_min, input_max=input_max) + else + quantize_and_dequantize_graph(input_; name=name, signed_input=signed_input, num_bits=num_bits, range_given=range_given, input_min=input_min, input_max=input_max) + end end - if num_bits !== nothing - desc["num_bits"] = Base.Int(num_bits) - end - if range_given !== nothing - desc["range_given"] = Base.Bool(range_given) - end - if input_min !== nothing - desc["input_min"] = Base.identity(input_min) - end - if input_max !== nothing - desc["input_max"] = Base.identity(input_max) - end - end - tf.Tensor(tf.Operation(desc)) - end - function quantize_and_dequantize_eager(input_; name=nothing, signed_input=nothing, num_bits=nothing, range_given=nothing, input_min=nothing, input_max=nothing) - desc = tf.EagerOp("QuantizeAndDequantize") - input_ = convert(tf.EagerTensor, input_) - tf.add_input(desc, input_) - if signed_input !== nothing - desc["signed_input"] = Base.Bool(signed_input) - end - if num_bits !== nothing - desc["num_bits"] = Base.Int(num_bits) - end - if range_given !== nothing - desc["range_given"] = Base.Bool(range_given) - end - if input_min !== nothing - desc["input_min"] = Base.identity(input_min) - end - if input_max !== nothing - desc["input_max"] = Base.identity(input_max) - end - desc["T"] = tf.data_type(input_) - res = tf.execute(desc) - node = tf.TapeNode(quantize_and_dequantize, [input_], name=nothing, signed_input=nothing, num_bits=nothing, range_given=nothing, input_min=nothing, input_max=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function quantize_and_dequantize(input_; name=nothing, signed_input=nothing, num_bits=nothing, range_given=nothing, input_min=nothing, input_max=nothing) - if tf.in_eager_mode() - quantize_and_dequantize_eager(input_; name=name, signed_input=signed_input, num_bits=num_bits, range_given=range_given, input_min=input_min, input_max=input_max) - else - quantize_and_dequantize_graph(input_; name=name, signed_input=signed_input, num_bits=num_bits, range_given=range_given, input_min=input_min, input_max=input_max) - end - end end @@ -40180,44 +73268,74 @@ end """ begin - function tensor_list_pop_back_graph(input_handle_; name=nothing, element_dtype=nothing) - local desc - tf.with_op_name(name, "TensorListPopBack") do - desc = tf.NodeDescription("TensorListPopBack") - input_handle_ = convert(Tensor{Any}, input_handle_) - tf.add_input(desc, input_handle_) - if element_dtype !== nothing - desc["element_dtype"] = Base.identity(element_dtype) + begin + function tensor_list_pop_back_graph(input_handle_; name=nothing, element_dtype=nothing) + local desc + tf.with_op_name(name, "TensorListPopBack") do + desc = tf.NodeDescription("TensorListPopBack") + begin + begin + input_handle_ = convert(Tensor{Any}, input_handle_) + begin + end + end + end + begin + begin + tf.add_input(desc, input_handle_) + end + end + begin + begin + if element_dtype !== nothing + desc["element_dtype"] = Base.identity(element_dtype) + end + end + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out end end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:2 - push!(out, tf.Tensor(op, out_idx)) - end - out end - function tensor_list_pop_back_eager(input_handle_; name=nothing, element_dtype=nothing) - desc = tf.EagerOp("TensorListPopBack") - input_handle_ = convert(tf.EagerTensor, input_handle_) - tf.add_input(desc, input_handle_) - if element_dtype !== nothing - desc["element_dtype"] = Base.identity(element_dtype) - end - res = tf.execute(desc) - node = tf.TapeNode(tensor_list_pop_back, [input_handle_], name=nothing, element_dtype=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res + begin + function tensor_list_pop_back_eager(input_handle_; name=nothing, element_dtype=nothing) + desc = tf.EagerOp("TensorListPopBack") + input_handle_ = convert(tf.EagerTensor, input_handle_) + begin + begin + tf.add_input(desc, input_handle_) + end + end + begin + begin + if element_dtype !== nothing + desc["element_dtype"] = Base.identity(element_dtype) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(tensor_list_pop_back, [input_handle_], name=nothing, element_dtype=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_list_pop_back(input_handle_; name=nothing, element_dtype=nothing) - if tf.in_eager_mode() - tensor_list_pop_back_eager(input_handle_; name=name, element_dtype=element_dtype) - else - tensor_list_pop_back_graph(input_handle_; name=name, element_dtype=element_dtype) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_list_pop_back(input_handle_; name=nothing, element_dtype=nothing) + if tf.in_eager_mode() + tensor_list_pop_back_eager(input_handle_; name=name, element_dtype=element_dtype) + else + tensor_list_pop_back_graph(input_handle_; name=name, element_dtype=element_dtype) + end end - end + end end @@ -40227,59 +73345,103 @@ end Debug NaN Value Counter Op """ begin - function debug_nan_count_graph(input_; name=nothing, device_name=nothing, tensor_name=nothing, debug_urls=nothing, gated_grpc=nothing) - local desc - tf.with_op_name(name, "DebugNanCount") do - desc = tf.NodeDescription("DebugNanCount") - input_ = convert(Tensor{Any}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - if device_name !== nothing - desc["device_name"] = Base.String(device_name) - end - if tensor_name !== nothing - desc["tensor_name"] = Base.String(tensor_name) - end - if debug_urls !== nothing - desc["debug_urls"] = map(Base.identity, debug_urls) + begin + function debug_nan_count_graph(input_; name=nothing, device_name=nothing, tensor_name=nothing, debug_urls=nothing, gated_grpc=nothing) + local desc + tf.with_op_name(name, "DebugNanCount") do + desc = tf.NodeDescription("DebugNanCount") + begin + begin + input_ = convert(Tensor{Any}, input_) + begin + end + end + begin + (input_,) = tf.tf_promote(input_) + end + end + begin + begin + tf.add_input(desc, input_) + end + end + begin + begin + if device_name !== nothing + desc["device_name"] = Base.String(device_name) + end + end + begin + if tensor_name !== nothing + desc["tensor_name"] = Base.String(tensor_name) + end + end + begin + if debug_urls !== nothing + desc["debug_urls"] = map(Base.identity, debug_urls) + end + end + begin + if gated_grpc !== nothing + desc["gated_grpc"] = Base.Bool(gated_grpc) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function debug_nan_count_eager(input_; name=nothing, device_name=nothing, tensor_name=nothing, debug_urls=nothing, gated_grpc=nothing) + desc = tf.EagerOp("DebugNanCount") + input_ = convert(tf.EagerTensor, input_) + begin + begin + tf.add_input(desc, input_) + end + end + begin + begin + if device_name !== nothing + desc["device_name"] = Base.String(device_name) + end + end + begin + if tensor_name !== nothing + desc["tensor_name"] = Base.String(tensor_name) + end + end + begin + if debug_urls !== nothing + desc["debug_urls"] = map(Base.identity, debug_urls) + end + end + begin + if gated_grpc !== nothing + desc["gated_grpc"] = Base.Bool(gated_grpc) + end + end + end + begin + desc["T"] = tf.data_type(input_) + end + res = tf.execute(desc) + node = tf.TapeNode(debug_nan_count, [input_], name=nothing, device_name=nothing, tensor_name=nothing, debug_urls=nothing, gated_grpc=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function debug_nan_count(input_; name=nothing, device_name=nothing, tensor_name=nothing, debug_urls=nothing, gated_grpc=nothing) + if tf.in_eager_mode() + debug_nan_count_eager(input_; name=name, device_name=device_name, tensor_name=tensor_name, debug_urls=debug_urls, gated_grpc=gated_grpc) + else + debug_nan_count_graph(input_; name=name, device_name=device_name, tensor_name=tensor_name, debug_urls=debug_urls, gated_grpc=gated_grpc) + end end - if gated_grpc !== nothing - desc["gated_grpc"] = Base.Bool(gated_grpc) - end - end - tf.Tensor(tf.Operation(desc)) - end - function debug_nan_count_eager(input_; name=nothing, device_name=nothing, tensor_name=nothing, debug_urls=nothing, gated_grpc=nothing) - desc = tf.EagerOp("DebugNanCount") - input_ = convert(tf.EagerTensor, input_) - tf.add_input(desc, input_) - if device_name !== nothing - desc["device_name"] = Base.String(device_name) - end - if tensor_name !== nothing - desc["tensor_name"] = Base.String(tensor_name) - end - if debug_urls !== nothing - desc["debug_urls"] = map(Base.identity, debug_urls) - end - if gated_grpc !== nothing - desc["gated_grpc"] = Base.Bool(gated_grpc) - end - desc["T"] = tf.data_type(input_) - res = tf.execute(desc) - node = tf.TapeNode(debug_nan_count, [input_], name=nothing, device_name=nothing, tensor_name=nothing, debug_urls=nothing, gated_grpc=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function debug_nan_count(input_; name=nothing, device_name=nothing, tensor_name=nothing, debug_urls=nothing, gated_grpc=nothing) - if tf.in_eager_mode() - debug_nan_count_eager(input_; name=name, device_name=device_name, tensor_name=tensor_name, debug_urls=debug_urls, gated_grpc=gated_grpc) - else - debug_nan_count_graph(input_; name=name, device_name=device_name, tensor_name=tensor_name, debug_urls=debug_urls, gated_grpc=gated_grpc) - end - end end @@ -40289,75 +73451,175 @@ end """ begin - function apply_adagrad_da_graph(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, lr_, l1_, l2_, global_step_; name=nothing, use_locking=nothing) - local desc - tf.with_op_name(name, "ApplyAdagradDA") do - desc = tf.NodeDescription("ApplyAdagradDA") - var_ = convert(Tensor{Any}, var_) - gradient_accumulator_ = convert(Tensor{Any}, gradient_accumulator_) - gradient_squared_accumulator_ = convert(Tensor{Any}, gradient_squared_accumulator_) - grad_ = convert(Tensor{Any}, grad_) - lr_ = convert(Tensor{Any}, lr_) - l1_ = convert(Tensor{Any}, l1_) - l2_ = convert(Tensor{Any}, l2_) - global_step_ = convert(Tensor{Int64}, global_step_) - (var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, lr_, l1_, l2_) = tf.tf_promote(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, lr_, l1_, l2_) - tf.add_input(desc, var_) - tf.add_input(desc, gradient_accumulator_) - tf.add_input(desc, gradient_squared_accumulator_) - tf.add_input(desc, grad_) - tf.add_input(desc, lr_) - tf.add_input(desc, l1_) - tf.add_input(desc, l2_) - tf.add_input(desc, global_step_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - end - tf.Tensor(tf.Operation(desc)) - end - function apply_adagrad_da_eager(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, lr_, l1_, l2_, global_step_; name=nothing, use_locking=nothing) - desc = tf.EagerOp("ApplyAdagradDA") - var_ = convert(tf.EagerTensor, var_) - gradient_accumulator_ = convert(tf.EagerTensor, gradient_accumulator_) - gradient_squared_accumulator_ = convert(tf.EagerTensor, gradient_squared_accumulator_) - grad_ = convert(tf.EagerTensor, grad_) - lr_ = convert(tf.EagerTensor, lr_) - l1_ = convert(tf.EagerTensor, l1_) - l2_ = convert(tf.EagerTensor, l2_) - global_step_ = convert(tf.EagerTensor, global_step_) - tf.add_input(desc, var_) - tf.add_input(desc, gradient_accumulator_) - tf.add_input(desc, gradient_squared_accumulator_) - tf.add_input(desc, grad_) - tf.add_input(desc, lr_) - tf.add_input(desc, l1_) - tf.add_input(desc, l2_) - tf.add_input(desc, global_step_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - desc["T"] = tf.data_type(var_) - desc["T"] = tf.data_type(gradient_accumulator_) - desc["T"] = tf.data_type(gradient_squared_accumulator_) - desc["T"] = tf.data_type(grad_) - desc["T"] = tf.data_type(lr_) - desc["T"] = tf.data_type(l1_) - desc["T"] = tf.data_type(l2_) - res = tf.execute(desc) - node = tf.TapeNode(apply_adagrad_da, [var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, lr_, l1_, l2_, global_step_], name=nothing, use_locking=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function apply_adagrad_da(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, lr_, l1_, l2_, global_step_; name=nothing, use_locking=nothing) - if tf.in_eager_mode() - apply_adagrad_da_eager(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, lr_, l1_, l2_, global_step_; name=name, use_locking=use_locking) - else - apply_adagrad_da_graph(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, lr_, l1_, l2_, global_step_; name=name, use_locking=use_locking) + begin + function apply_adagrad_da_graph(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, lr_, l1_, l2_, global_step_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "ApplyAdagradDA") do + desc = tf.NodeDescription("ApplyAdagradDA") + begin + begin + var_ = convert(Tensor{Any}, var_) + begin + end + end + begin + gradient_accumulator_ = convert(Tensor{Any}, gradient_accumulator_) + begin + end + end + begin + gradient_squared_accumulator_ = convert(Tensor{Any}, gradient_squared_accumulator_) + begin + end + end + begin + grad_ = convert(Tensor{Any}, grad_) + begin + end + end + begin + lr_ = convert(Tensor{Any}, lr_) + begin + end + end + begin + l1_ = convert(Tensor{Any}, l1_) + begin + end + end + begin + l2_ = convert(Tensor{Any}, l2_) + begin + end + end + begin + global_step_ = convert(Tensor{Int64}, global_step_) + begin + end + end + begin + (var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, lr_, l1_, l2_) = tf.tf_promote(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, lr_, l1_, l2_) + end + end + begin + begin + tf.add_input(desc, var_) + end + begin + tf.add_input(desc, gradient_accumulator_) + end + begin + tf.add_input(desc, gradient_squared_accumulator_) + end + begin + tf.add_input(desc, grad_) + end + begin + tf.add_input(desc, lr_) + end + begin + tf.add_input(desc, l1_) + end + begin + tf.add_input(desc, l2_) + end + begin + tf.add_input(desc, global_step_) + end + end + begin + begin + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function apply_adagrad_da_eager(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, lr_, l1_, l2_, global_step_; name=nothing, use_locking=nothing) + desc = tf.EagerOp("ApplyAdagradDA") + var_ = convert(tf.EagerTensor, var_) + gradient_accumulator_ = convert(tf.EagerTensor, gradient_accumulator_) + gradient_squared_accumulator_ = convert(tf.EagerTensor, gradient_squared_accumulator_) + grad_ = convert(tf.EagerTensor, grad_) + lr_ = convert(tf.EagerTensor, lr_) + l1_ = convert(tf.EagerTensor, l1_) + l2_ = convert(tf.EagerTensor, l2_) + global_step_ = convert(tf.EagerTensor, global_step_) + begin + begin + tf.add_input(desc, var_) + end + begin + tf.add_input(desc, gradient_accumulator_) + end + begin + tf.add_input(desc, gradient_squared_accumulator_) + end + begin + tf.add_input(desc, grad_) + end + begin + tf.add_input(desc, lr_) + end + begin + tf.add_input(desc, l1_) + end + begin + tf.add_input(desc, l2_) + end + begin + tf.add_input(desc, global_step_) + end + end + begin + begin + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + end + begin + desc["T"] = tf.data_type(var_) + end + begin + desc["T"] = tf.data_type(gradient_accumulator_) + end + begin + desc["T"] = tf.data_type(gradient_squared_accumulator_) + end + begin + desc["T"] = tf.data_type(grad_) + end + begin + desc["T"] = tf.data_type(lr_) + end + begin + desc["T"] = tf.data_type(l1_) + end + begin + desc["T"] = tf.data_type(l2_) + end + res = tf.execute(desc) + node = tf.TapeNode(apply_adagrad_da, [var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, lr_, l1_, l2_, global_step_], name=nothing, use_locking=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function apply_adagrad_da(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, lr_, l1_, l2_, global_step_; name=nothing, use_locking=nothing) + if tf.in_eager_mode() + apply_adagrad_da_eager(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, lr_, l1_, l2_, global_step_; name=name, use_locking=use_locking) + else + apply_adagrad_da_graph(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, lr_, l1_, l2_, global_step_; name=name, use_locking=use_locking) + end end - end + end end @@ -40367,64 +73629,118 @@ end """ begin - function depthwise_conv2d_native_graph(input_, filter_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) - local desc - tf.with_op_name(name, "DepthwiseConv2dNative") do - desc = tf.NodeDescription("DepthwiseConv2dNative") - input_ = convert(Tensor{Any}, input_) - filter_ = convert(Tensor{Any}, filter_) - (input_, filter_) = tf.tf_promote(input_, filter_) - tf.add_input(desc, input_) - tf.add_input(desc, filter_) - if strides !== nothing - desc["strides"] = map(Base.identity, strides) + begin + function depthwise_conv2d_native_graph(input_, filter_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) + local desc + tf.with_op_name(name, "DepthwiseConv2dNative") do + desc = tf.NodeDescription("DepthwiseConv2dNative") + begin + begin + input_ = convert(Tensor{Any}, input_) + begin + end + end + begin + filter_ = convert(Tensor{Any}, filter_) + begin + end + end + begin + (input_, filter_) = tf.tf_promote(input_, filter_) + end + end + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, filter_) + end + end + begin + begin + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + end + begin + if padding !== nothing + desc["padding"] = Base.String(padding) + end + end + begin + if data_format !== nothing + desc["data_format"] = Base.String(data_format) + end + end + begin + if dilations !== nothing + desc["dilations"] = map(Base.identity, dilations) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function depthwise_conv2d_native_eager(input_, filter_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) + desc = tf.EagerOp("DepthwiseConv2dNative") + input_ = convert(tf.EagerTensor, input_) + filter_ = convert(tf.EagerTensor, filter_) + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, filter_) + end + end + begin + begin + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + end + begin + if padding !== nothing + desc["padding"] = Base.String(padding) + end + end + begin + if data_format !== nothing + desc["data_format"] = Base.String(data_format) + end + end + begin + if dilations !== nothing + desc["dilations"] = map(Base.identity, dilations) + end + end + end + begin + desc["T"] = tf.data_type(input_) + end + begin + desc["T"] = tf.data_type(filter_) + end + res = tf.execute(desc) + node = tf.TapeNode(depthwise_conv2d_native, [input_, filter_], name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function depthwise_conv2d_native(input_, filter_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) + if tf.in_eager_mode() + depthwise_conv2d_native_eager(input_, filter_; name=name, strides=strides, padding=padding, data_format=data_format, dilations=dilations) + else + depthwise_conv2d_native_graph(input_, filter_; name=name, strides=strides, padding=padding, data_format=data_format, dilations=dilations) + end end - if padding !== nothing - desc["padding"] = Base.String(padding) - end - if data_format !== nothing - desc["data_format"] = Base.String(data_format) - end - if dilations !== nothing - desc["dilations"] = map(Base.identity, dilations) - end - end - tf.Tensor(tf.Operation(desc)) end - function depthwise_conv2d_native_eager(input_, filter_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) - desc = tf.EagerOp("DepthwiseConv2dNative") - input_ = convert(tf.EagerTensor, input_) - filter_ = convert(tf.EagerTensor, filter_) - tf.add_input(desc, input_) - tf.add_input(desc, filter_) - if strides !== nothing - desc["strides"] = map(Base.identity, strides) - end - if padding !== nothing - desc["padding"] = Base.String(padding) - end - if data_format !== nothing - desc["data_format"] = Base.String(data_format) - end - if dilations !== nothing - desc["dilations"] = map(Base.identity, dilations) - end - desc["T"] = tf.data_type(input_) - desc["T"] = tf.data_type(filter_) - res = tf.execute(desc) - node = tf.TapeNode(depthwise_conv2d_native, [input_, filter_], name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function depthwise_conv2d_native(input_, filter_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) - if tf.in_eager_mode() - depthwise_conv2d_native_eager(input_, filter_; name=name, strides=strides, padding=padding, data_format=data_format, dilations=dilations) - else - depthwise_conv2d_native_graph(input_, filter_; name=name, strides=strides, padding=padding, data_format=data_format, dilations=dilations) - end - end end @@ -40434,33 +73750,57 @@ end """ begin - function serialize_iterator_graph(resource_handle_; name=nothing) - local desc - tf.with_op_name(name, "SerializeIterator") do - desc = tf.NodeDescription("SerializeIterator") - resource_handle_ = convert(Tensor{Any}, resource_handle_) - tf.add_input(desc, resource_handle_) + begin + function serialize_iterator_graph(resource_handle_; name=nothing) + local desc + tf.with_op_name(name, "SerializeIterator") do + desc = tf.NodeDescription("SerializeIterator") + begin + begin + resource_handle_ = convert(Tensor{Any}, resource_handle_) + begin + end + end + end + begin + begin + tf.add_input(desc, resource_handle_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function serialize_iterator_eager(resource_handle_; name=nothing) - desc = tf.EagerOp("SerializeIterator") - resource_handle_ = convert(tf.EagerTensor, resource_handle_) - tf.add_input(desc, resource_handle_) - res = tf.execute(desc) - node = tf.TapeNode(serialize_iterator, [resource_handle_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function serialize_iterator_eager(resource_handle_; name=nothing) + desc = tf.EagerOp("SerializeIterator") + resource_handle_ = convert(tf.EagerTensor, resource_handle_) + begin + begin + tf.add_input(desc, resource_handle_) + end + end + begin + end + res = tf.execute(desc) + node = tf.TapeNode(serialize_iterator, [resource_handle_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function serialize_iterator(resource_handle_; name=nothing) - if tf.in_eager_mode() - serialize_iterator_eager(resource_handle_; name=name) - else - serialize_iterator_graph(resource_handle_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function serialize_iterator(resource_handle_; name=nothing) + if tf.in_eager_mode() + serialize_iterator_eager(resource_handle_; name=name) + else + serialize_iterator_graph(resource_handle_; name=name) + end end - end + end end @@ -40470,33 +73810,57 @@ end """ begin - function dataset_to_graph_graph(input_dataset_; name=nothing) - local desc - tf.with_op_name(name, "DatasetToGraph") do - desc = tf.NodeDescription("DatasetToGraph") - input_dataset_ = convert(Tensor{Any}, input_dataset_) - tf.add_input(desc, input_dataset_) + begin + function dataset_to_graph_graph(input_dataset_; name=nothing) + local desc + tf.with_op_name(name, "DatasetToGraph") do + desc = tf.NodeDescription("DatasetToGraph") + begin + begin + input_dataset_ = convert(Tensor{Any}, input_dataset_) + begin + end + end + end + begin + begin + tf.add_input(desc, input_dataset_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function dataset_to_graph_eager(input_dataset_; name=nothing) - desc = tf.EagerOp("DatasetToGraph") - input_dataset_ = convert(tf.EagerTensor, input_dataset_) - tf.add_input(desc, input_dataset_) - res = tf.execute(desc) - node = tf.TapeNode(dataset_to_graph, [input_dataset_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function dataset_to_graph_eager(input_dataset_; name=nothing) + desc = tf.EagerOp("DatasetToGraph") + input_dataset_ = convert(tf.EagerTensor, input_dataset_) + begin + begin + tf.add_input(desc, input_dataset_) + end + end + begin + end + res = tf.execute(desc) + node = tf.TapeNode(dataset_to_graph, [input_dataset_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function dataset_to_graph(input_dataset_; name=nothing) - if tf.in_eager_mode() - dataset_to_graph_eager(input_dataset_; name=name) - else - dataset_to_graph_graph(input_dataset_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function dataset_to_graph(input_dataset_; name=nothing) + if tf.in_eager_mode() + dataset_to_graph_eager(input_dataset_; name=name) + else + dataset_to_graph_graph(input_dataset_; name=name) + end end - end + end end @@ -40506,52 +73870,90 @@ end """ begin - function top_k_graph(input_; name=nothing, k=nothing, sorted=nothing) - local desc - tf.with_op_name(name, "TopK") do - desc = tf.NodeDescription("TopK") - input_ = convert(Tensor{Any}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - if k !== nothing - desc["k"] = Base.Int(k) + begin + function top_k_graph(input_; name=nothing, k=nothing, sorted=nothing) + local desc + tf.with_op_name(name, "TopK") do + desc = tf.NodeDescription("TopK") + begin + begin + input_ = convert(Tensor{Any}, input_) + begin + end + end + begin + (input_,) = tf.tf_promote(input_) + end + end + begin + begin + tf.add_input(desc, input_) + end + end + begin + begin + if k !== nothing + desc["k"] = Base.Int(k) + end + end + begin + if sorted !== nothing + desc["sorted"] = Base.Bool(sorted) + end + end + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + end + end + begin + function top_k_eager(input_; name=nothing, k=nothing, sorted=nothing) + desc = tf.EagerOp("TopK") + input_ = convert(tf.EagerTensor, input_) + begin + begin + tf.add_input(desc, input_) + end + end + begin + begin + if k !== nothing + desc["k"] = Base.Int(k) + end + end + begin + if sorted !== nothing + desc["sorted"] = Base.Bool(sorted) + end + end + end + begin + desc["T"] = tf.data_type(input_) + end + res = tf.execute(desc) + node = tf.TapeNode(top_k, [input_], name=nothing, k=nothing, sorted=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function top_k(input_; name=nothing, k=nothing, sorted=nothing) + if tf.in_eager_mode() + top_k_eager(input_; name=name, k=k, sorted=sorted) + else + top_k_graph(input_; name=name, k=k, sorted=sorted) + end end - if sorted !== nothing - desc["sorted"] = Base.Bool(sorted) - end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:2 - push!(out, tf.Tensor(op, out_idx)) - end - out - end - function top_k_eager(input_; name=nothing, k=nothing, sorted=nothing) - desc = tf.EagerOp("TopK") - input_ = convert(tf.EagerTensor, input_) - tf.add_input(desc, input_) - if k !== nothing - desc["k"] = Base.Int(k) - end - if sorted !== nothing - desc["sorted"] = Base.Bool(sorted) - end - desc["T"] = tf.data_type(input_) - res = tf.execute(desc) - node = tf.TapeNode(top_k, [input_], name=nothing, k=nothing, sorted=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function top_k(input_; name=nothing, k=nothing, sorted=nothing) - if tf.in_eager_mode() - top_k_eager(input_; name=name, k=k, sorted=sorted) - else - top_k_graph(input_; name=name, k=k, sorted=sorted) - end - end end @@ -40561,78 +73963,184 @@ end """ begin - function resource_apply_ftrl_v2_graph(var_, accum_, linear_, grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=nothing, use_locking=nothing) - local desc - tf.with_op_name(name, "ResourceApplyFtrlV2") do - desc = tf.NodeDescription("ResourceApplyFtrlV2") - var_ = convert(Tensor{Any}, var_) - accum_ = convert(Tensor{Any}, accum_) - linear_ = convert(Tensor{Any}, linear_) - grad_ = convert(Tensor{Any}, grad_) - lr_ = convert(Tensor{Any}, lr_) - l1_ = convert(Tensor{Any}, l1_) - l2_ = convert(Tensor{Any}, l2_) - l2_shrinkage_ = convert(Tensor{Any}, l2_shrinkage_) - lr_power_ = convert(Tensor{Any}, lr_power_) - (grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_) = tf.tf_promote(grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_) - tf.add_input(desc, var_) - tf.add_input(desc, accum_) - tf.add_input(desc, linear_) - tf.add_input(desc, grad_) - tf.add_input(desc, lr_) - tf.add_input(desc, l1_) - tf.add_input(desc, l2_) - tf.add_input(desc, l2_shrinkage_) - tf.add_input(desc, lr_power_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - end - tf.Tensor(tf.Operation(desc)) - end - function resource_apply_ftrl_v2_eager(var_, accum_, linear_, grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=nothing, use_locking=nothing) - desc = tf.EagerOp("ResourceApplyFtrlV2") - var_ = convert(tf.EagerTensor, var_) - accum_ = convert(tf.EagerTensor, accum_) - linear_ = convert(tf.EagerTensor, linear_) - grad_ = convert(tf.EagerTensor, grad_) - lr_ = convert(tf.EagerTensor, lr_) - l1_ = convert(tf.EagerTensor, l1_) - l2_ = convert(tf.EagerTensor, l2_) - l2_shrinkage_ = convert(tf.EagerTensor, l2_shrinkage_) - lr_power_ = convert(tf.EagerTensor, lr_power_) - tf.add_input(desc, var_) - tf.add_input(desc, accum_) - tf.add_input(desc, linear_) - tf.add_input(desc, grad_) - tf.add_input(desc, lr_) - tf.add_input(desc, l1_) - tf.add_input(desc, l2_) - tf.add_input(desc, l2_shrinkage_) - tf.add_input(desc, lr_power_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - desc["T"] = tf.data_type(grad_) - desc["T"] = tf.data_type(lr_) - desc["T"] = tf.data_type(l1_) - desc["T"] = tf.data_type(l2_) - desc["T"] = tf.data_type(l2_shrinkage_) - desc["T"] = tf.data_type(lr_power_) - res = tf.execute(desc) - node = tf.TapeNode(resource_apply_ftrl_v2, [var_, accum_, linear_, grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_], name=nothing, use_locking=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_apply_ftrl_v2(var_, accum_, linear_, grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=nothing, use_locking=nothing) - if tf.in_eager_mode() - resource_apply_ftrl_v2_eager(var_, accum_, linear_, grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=name, use_locking=use_locking) - else - resource_apply_ftrl_v2_graph(var_, accum_, linear_, grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=name, use_locking=use_locking) + begin + function resource_apply_ftrl_v2_graph(var_, accum_, linear_, grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "ResourceApplyFtrlV2") do + desc = tf.NodeDescription("ResourceApplyFtrlV2") + begin + begin + var_ = convert(Tensor{Any}, var_) + begin + end + end + begin + accum_ = convert(Tensor{Any}, accum_) + begin + end + end + begin + linear_ = convert(Tensor{Any}, linear_) + begin + end + end + begin + grad_ = convert(Tensor{Any}, grad_) + begin + end + end + begin + lr_ = convert(Tensor{Any}, lr_) + begin + end + end + begin + l1_ = convert(Tensor{Any}, l1_) + begin + end + end + begin + l2_ = convert(Tensor{Any}, l2_) + begin + end + end + begin + l2_shrinkage_ = convert(Tensor{Any}, l2_shrinkage_) + begin + end + end + begin + lr_power_ = convert(Tensor{Any}, lr_power_) + begin + end + end + begin + (grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_) = tf.tf_promote(grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_) + end + end + begin + begin + tf.add_input(desc, var_) + end + begin + tf.add_input(desc, accum_) + end + begin + tf.add_input(desc, linear_) + end + begin + tf.add_input(desc, grad_) + end + begin + tf.add_input(desc, lr_) + end + begin + tf.add_input(desc, l1_) + end + begin + tf.add_input(desc, l2_) + end + begin + tf.add_input(desc, l2_shrinkage_) + end + begin + tf.add_input(desc, lr_power_) + end + end + begin + begin + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function resource_apply_ftrl_v2_eager(var_, accum_, linear_, grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=nothing, use_locking=nothing) + desc = tf.EagerOp("ResourceApplyFtrlV2") + var_ = convert(tf.EagerTensor, var_) + accum_ = convert(tf.EagerTensor, accum_) + linear_ = convert(tf.EagerTensor, linear_) + grad_ = convert(tf.EagerTensor, grad_) + lr_ = convert(tf.EagerTensor, lr_) + l1_ = convert(tf.EagerTensor, l1_) + l2_ = convert(tf.EagerTensor, l2_) + l2_shrinkage_ = convert(tf.EagerTensor, l2_shrinkage_) + lr_power_ = convert(tf.EagerTensor, lr_power_) + begin + begin + tf.add_input(desc, var_) + end + begin + tf.add_input(desc, accum_) + end + begin + tf.add_input(desc, linear_) + end + begin + tf.add_input(desc, grad_) + end + begin + tf.add_input(desc, lr_) + end + begin + tf.add_input(desc, l1_) + end + begin + tf.add_input(desc, l2_) + end + begin + tf.add_input(desc, l2_shrinkage_) + end + begin + tf.add_input(desc, lr_power_) + end + end + begin + begin + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + end + begin + desc["T"] = tf.data_type(grad_) + end + begin + desc["T"] = tf.data_type(lr_) + end + begin + desc["T"] = tf.data_type(l1_) + end + begin + desc["T"] = tf.data_type(l2_) + end + begin + desc["T"] = tf.data_type(l2_shrinkage_) + end + begin + desc["T"] = tf.data_type(lr_power_) + end + res = tf.execute(desc) + node = tf.TapeNode(resource_apply_ftrl_v2, [var_, accum_, linear_, grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_], name=nothing, use_locking=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_apply_ftrl_v2(var_, accum_, linear_, grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=nothing, use_locking=nothing) + if tf.in_eager_mode() + resource_apply_ftrl_v2_eager(var_, accum_, linear_, grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=name, use_locking=use_locking) + else + resource_apply_ftrl_v2_graph(var_, accum_, linear_, grad_, lr_, l1_, l2_, l2_shrinkage_, lr_power_; name=name, use_locking=use_locking) + end end - end + end end @@ -40642,45 +74150,77 @@ end Replacement node for NcclBroadcast. """ begin - function _nccl_broadcast_recv_graph(shape_; name=nothing, num_devices=nothing, shared_name=nothing) - local desc - tf.with_op_name(name, "_NcclBroadcastRecv") do - desc = tf.NodeDescription("_NcclBroadcastRecv") - shape_ = convert(Tensor{Int32}, shape_) - tf.add_input(desc, shape_) - if num_devices !== nothing - desc["num_devices"] = Base.Int(num_devices) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) + begin + function _nccl_broadcast_recv_graph(shape_; name=nothing, num_devices=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "_NcclBroadcastRecv") do + desc = tf.NodeDescription("_NcclBroadcastRecv") + begin + begin + shape_ = convert(Tensor{Int32}, shape_) + begin + end + end + end + begin + begin + tf.add_input(desc, shape_) + end + end + begin + begin + if num_devices !== nothing + desc["num_devices"] = Base.Int(num_devices) + end + end + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function _nccl_broadcast_recv_eager(shape_; name=nothing, num_devices=nothing, shared_name=nothing) + desc = tf.EagerOp("_NcclBroadcastRecv") + shape_ = convert(tf.EagerTensor, shape_) + begin + begin + tf.add_input(desc, shape_) + end + end + begin + begin + if num_devices !== nothing + desc["num_devices"] = Base.Int(num_devices) + end + end + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(_nccl_broadcast_recv, [shape_], name=nothing, num_devices=nothing, shared_name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _nccl_broadcast_recv(shape_; name=nothing, num_devices=nothing, shared_name=nothing) + if tf.in_eager_mode() + _nccl_broadcast_recv_eager(shape_; name=name, num_devices=num_devices, shared_name=shared_name) + else + _nccl_broadcast_recv_graph(shape_; name=name, num_devices=num_devices, shared_name=shared_name) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function _nccl_broadcast_recv_eager(shape_; name=nothing, num_devices=nothing, shared_name=nothing) - desc = tf.EagerOp("_NcclBroadcastRecv") - shape_ = convert(tf.EagerTensor, shape_) - tf.add_input(desc, shape_) - if num_devices !== nothing - desc["num_devices"] = Base.Int(num_devices) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - res = tf.execute(desc) - node = tf.TapeNode(_nccl_broadcast_recv, [shape_], name=nothing, num_devices=nothing, shared_name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _nccl_broadcast_recv(shape_; name=nothing, num_devices=nothing, shared_name=nothing) - if tf.in_eager_mode() - _nccl_broadcast_recv_eager(shape_; name=name, num_devices=num_devices, shared_name=shared_name) - else - _nccl_broadcast_recv_graph(shape_; name=name, num_devices=num_devices, shared_name=shared_name) - end - end end @@ -40690,33 +74230,57 @@ end """ begin - function queue_is_closed_graph(handle_; name=nothing) - local desc - tf.with_op_name(name, "QueueIsClosed") do - desc = tf.NodeDescription("QueueIsClosed") - handle_ = convert(Tensor{String}, handle_) - tf.add_input(desc, handle_) + begin + function queue_is_closed_graph(handle_; name=nothing) + local desc + tf.with_op_name(name, "QueueIsClosed") do + desc = tf.NodeDescription("QueueIsClosed") + begin + begin + handle_ = convert(Tensor{String}, handle_) + begin + end + end + end + begin + begin + tf.add_input(desc, handle_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function queue_is_closed_eager(handle_; name=nothing) - desc = tf.EagerOp("QueueIsClosed") - handle_ = convert(tf.EagerTensor, handle_) - tf.add_input(desc, handle_) - res = tf.execute(desc) - node = tf.TapeNode(queue_is_closed, [handle_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function queue_is_closed_eager(handle_; name=nothing) + desc = tf.EagerOp("QueueIsClosed") + handle_ = convert(tf.EagerTensor, handle_) + begin + begin + tf.add_input(desc, handle_) + end + end + begin + end + res = tf.execute(desc) + node = tf.TapeNode(queue_is_closed, [handle_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function queue_is_closed(handle_; name=nothing) - if tf.in_eager_mode() - queue_is_closed_eager(handle_; name=name) - else - queue_is_closed_graph(handle_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function queue_is_closed(handle_; name=nothing) + if tf.in_eager_mode() + queue_is_closed_eager(handle_; name=name) + else + queue_is_closed_graph(handle_; name=name) + end end - end + end end @@ -40726,63 +74290,123 @@ end """ begin - function shuffle_dataset_graph(input_dataset_, buffer_size_, seed_, seed2_; name=nothing, reshuffle_each_iteration=nothing, output_types=nothing, output_shapes=nothing) - local desc - tf.with_op_name(name, "ShuffleDataset") do - desc = tf.NodeDescription("ShuffleDataset") - input_dataset_ = convert(Tensor{Any}, input_dataset_) - buffer_size_ = convert(Tensor{Int64}, buffer_size_) - seed_ = convert(Tensor{Int64}, seed_) - seed2_ = convert(Tensor{Int64}, seed2_) - tf.add_input(desc, input_dataset_) - tf.add_input(desc, buffer_size_) - tf.add_input(desc, seed_) - tf.add_input(desc, seed2_) - if reshuffle_each_iteration !== nothing - desc["reshuffle_each_iteration"] = Base.Bool(reshuffle_each_iteration) - end - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) + begin + function shuffle_dataset_graph(input_dataset_, buffer_size_, seed_, seed2_; name=nothing, reshuffle_each_iteration=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "ShuffleDataset") do + desc = tf.NodeDescription("ShuffleDataset") + begin + begin + input_dataset_ = convert(Tensor{Any}, input_dataset_) + begin + end + end + begin + buffer_size_ = convert(Tensor{Int64}, buffer_size_) + begin + end + end + begin + seed_ = convert(Tensor{Int64}, seed_) + begin + end + end + begin + seed2_ = convert(Tensor{Int64}, seed2_) + begin + end + end + end + begin + begin + tf.add_input(desc, input_dataset_) + end + begin + tf.add_input(desc, buffer_size_) + end + begin + tf.add_input(desc, seed_) + end + begin + tf.add_input(desc, seed2_) + end + end + begin + begin + if reshuffle_each_iteration !== nothing + desc["reshuffle_each_iteration"] = Base.Bool(reshuffle_each_iteration) + end + end + begin + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + end + begin + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function shuffle_dataset_eager(input_dataset_, buffer_size_, seed_, seed2_; name=nothing, reshuffle_each_iteration=nothing, output_types=nothing, output_shapes=nothing) + desc = tf.EagerOp("ShuffleDataset") + input_dataset_ = convert(tf.EagerTensor, input_dataset_) + buffer_size_ = convert(tf.EagerTensor, buffer_size_) + seed_ = convert(tf.EagerTensor, seed_) + seed2_ = convert(tf.EagerTensor, seed2_) + begin + begin + tf.add_input(desc, input_dataset_) + end + begin + tf.add_input(desc, buffer_size_) + end + begin + tf.add_input(desc, seed_) + end + begin + tf.add_input(desc, seed2_) + end + end + begin + begin + if reshuffle_each_iteration !== nothing + desc["reshuffle_each_iteration"] = Base.Bool(reshuffle_each_iteration) + end + end + begin + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + end + begin + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(shuffle_dataset, [input_dataset_, buffer_size_, seed_, seed2_], name=nothing, reshuffle_each_iteration=nothing, output_types=nothing, output_shapes=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function shuffle_dataset(input_dataset_, buffer_size_, seed_, seed2_; name=nothing, reshuffle_each_iteration=nothing, output_types=nothing, output_shapes=nothing) + if tf.in_eager_mode() + shuffle_dataset_eager(input_dataset_, buffer_size_, seed_, seed2_; name=name, reshuffle_each_iteration=reshuffle_each_iteration, output_types=output_types, output_shapes=output_shapes) + else + shuffle_dataset_graph(input_dataset_, buffer_size_, seed_, seed2_; name=name, reshuffle_each_iteration=reshuffle_each_iteration, output_types=output_types, output_shapes=output_shapes) + end end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - end - tf.Tensor(tf.Operation(desc)) end - function shuffle_dataset_eager(input_dataset_, buffer_size_, seed_, seed2_; name=nothing, reshuffle_each_iteration=nothing, output_types=nothing, output_shapes=nothing) - desc = tf.EagerOp("ShuffleDataset") - input_dataset_ = convert(tf.EagerTensor, input_dataset_) - buffer_size_ = convert(tf.EagerTensor, buffer_size_) - seed_ = convert(tf.EagerTensor, seed_) - seed2_ = convert(tf.EagerTensor, seed2_) - tf.add_input(desc, input_dataset_) - tf.add_input(desc, buffer_size_) - tf.add_input(desc, seed_) - tf.add_input(desc, seed2_) - if reshuffle_each_iteration !== nothing - desc["reshuffle_each_iteration"] = Base.Bool(reshuffle_each_iteration) - end - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - res = tf.execute(desc) - node = tf.TapeNode(shuffle_dataset, [input_dataset_, buffer_size_, seed_, seed2_], name=nothing, reshuffle_each_iteration=nothing, output_types=nothing, output_shapes=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function shuffle_dataset(input_dataset_, buffer_size_, seed_, seed2_; name=nothing, reshuffle_each_iteration=nothing, output_types=nothing, output_shapes=nothing) - if tf.in_eager_mode() - shuffle_dataset_eager(input_dataset_, buffer_size_, seed_, seed2_; name=name, reshuffle_each_iteration=reshuffle_each_iteration, output_types=output_types, output_shapes=output_shapes) - else - shuffle_dataset_graph(input_dataset_, buffer_size_, seed_, seed2_; name=name, reshuffle_each_iteration=reshuffle_each_iteration, output_types=output_types, output_shapes=output_shapes) - end - end end @@ -40792,46 +74416,80 @@ end """ begin - function deserialize_sparse_graph(serialized_sparse_; name=nothing, dtype=nothing) - local desc - tf.with_op_name(name, "DeserializeSparse") do - desc = tf.NodeDescription("DeserializeSparse") - serialized_sparse_ = convert(Tensor{String}, serialized_sparse_) - (serialized_sparse_,) = tf.tf_promote(serialized_sparse_) - tf.add_input(desc, serialized_sparse_) - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) + begin + function deserialize_sparse_graph(serialized_sparse_; name=nothing, dtype=nothing) + local desc + tf.with_op_name(name, "DeserializeSparse") do + desc = tf.NodeDescription("DeserializeSparse") + begin + begin + serialized_sparse_ = convert(Tensor{String}, serialized_sparse_) + begin + end + end + begin + (serialized_sparse_,) = tf.tf_promote(serialized_sparse_) + end + end + begin + begin + tf.add_input(desc, serialized_sparse_) + end + end + begin + begin + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out end end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:3 - push!(out, tf.Tensor(op, out_idx)) - end - out end - function deserialize_sparse_eager(serialized_sparse_; name=nothing, dtype=nothing) - desc = tf.EagerOp("DeserializeSparse") - serialized_sparse_ = convert(tf.EagerTensor, serialized_sparse_) - tf.add_input(desc, serialized_sparse_) - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end - desc["Tserialized"] = tf.data_type(serialized_sparse_) - res = tf.execute(desc) - node = tf.TapeNode(deserialize_sparse, [serialized_sparse_], name=nothing, dtype=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res + begin + function deserialize_sparse_eager(serialized_sparse_; name=nothing, dtype=nothing) + desc = tf.EagerOp("DeserializeSparse") + serialized_sparse_ = convert(tf.EagerTensor, serialized_sparse_) + begin + begin + tf.add_input(desc, serialized_sparse_) + end + end + begin + begin + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + end + begin + desc["Tserialized"] = tf.data_type(serialized_sparse_) + end + res = tf.execute(desc) + node = tf.TapeNode(deserialize_sparse, [serialized_sparse_], name=nothing, dtype=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function deserialize_sparse(serialized_sparse_; name=nothing, dtype=nothing) - if tf.in_eager_mode() - deserialize_sparse_eager(serialized_sparse_; name=name, dtype=dtype) - else - deserialize_sparse_graph(serialized_sparse_; name=name, dtype=dtype) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function deserialize_sparse(serialized_sparse_; name=nothing, dtype=nothing) + if tf.in_eager_mode() + deserialize_sparse_eager(serialized_sparse_; name=name, dtype=dtype) + else + deserialize_sparse_graph(serialized_sparse_; name=name, dtype=dtype) + end end - end + end end @@ -40841,59 +74499,95 @@ end """ begin - function priority_queue_v2_graph(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) - local desc - tf.with_op_name(name, "PriorityQueueV2") do - desc = tf.NodeDescription("PriorityQueueV2") - if component_types !== nothing - desc["component_types"] = map(Base.identity, component_types) + begin + function priority_queue_v2_graph(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "PriorityQueueV2") do + desc = tf.NodeDescription("PriorityQueueV2") + begin + end + begin + end + begin + begin + if component_types !== nothing + desc["component_types"] = map(Base.identity, component_types) + end + end + begin + if shapes !== nothing + desc["shapes"] = map(Base.identity, shapes) + end + end + begin + if capacity !== nothing + desc["capacity"] = Base.Int(capacity) + end + end + begin + if container !== nothing + desc["container"] = Base.String(container) + end + end + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function priority_queue_v2_eager(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) + desc = tf.EagerOp("PriorityQueueV2") + begin + end + begin + begin + if component_types !== nothing + desc["component_types"] = map(Base.identity, component_types) + end + end + begin + if shapes !== nothing + desc["shapes"] = map(Base.identity, shapes) + end + end + begin + if capacity !== nothing + desc["capacity"] = Base.Int(capacity) + end + end + begin + if container !== nothing + desc["container"] = Base.String(container) + end + end + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(priority_queue_v2, [], name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function priority_queue_v2(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) + if tf.in_eager_mode() + priority_queue_v2_eager(; name=name, component_types=component_types, shapes=shapes, capacity=capacity, container=container, shared_name=shared_name) + else + priority_queue_v2_graph(; name=name, component_types=component_types, shapes=shapes, capacity=capacity, container=container, shared_name=shared_name) + end end - if shapes !== nothing - desc["shapes"] = map(Base.identity, shapes) - end - if capacity !== nothing - desc["capacity"] = Base.Int(capacity) - end - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - end - tf.Tensor(tf.Operation(desc)) - end - function priority_queue_v2_eager(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) - desc = tf.EagerOp("PriorityQueueV2") - if component_types !== nothing - desc["component_types"] = map(Base.identity, component_types) - end - if shapes !== nothing - desc["shapes"] = map(Base.identity, shapes) - end - if capacity !== nothing - desc["capacity"] = Base.Int(capacity) - end - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - res = tf.execute(desc) - node = tf.TapeNode(priority_queue_v2, [], name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function priority_queue_v2(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) - if tf.in_eager_mode() - priority_queue_v2_eager(; name=name, component_types=component_types, shapes=shapes, capacity=capacity, container=container, shared_name=shared_name) - else - priority_queue_v2_graph(; name=name, component_types=component_types, shapes=shapes, capacity=capacity, container=container, shared_name=shared_name) - end - end end @@ -40903,35 +74597,55 @@ end A graph node which represents an argument to a function. """ begin - function _device_arg_graph(; name=nothing, index=nothing) - local desc - tf.with_op_name(name, "_DeviceArg") do - desc = tf.NodeDescription("_DeviceArg") - if index !== nothing - desc["index"] = Base.Int(index) + begin + function _device_arg_graph(; name=nothing, index=nothing) + local desc + tf.with_op_name(name, "_DeviceArg") do + desc = tf.NodeDescription("_DeviceArg") + begin + end + begin + end + begin + begin + if index !== nothing + desc["index"] = Base.Int(index) + end + end + end end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function _device_arg_eager(; name=nothing, index=nothing) - desc = tf.EagerOp("_DeviceArg") - if index !== nothing - desc["index"] = Base.Int(index) - end - res = tf.execute(desc) - node = tf.TapeNode(_device_arg, [], name=nothing, index=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function _device_arg_eager(; name=nothing, index=nothing) + desc = tf.EagerOp("_DeviceArg") + begin + end + begin + begin + if index !== nothing + desc["index"] = Base.Int(index) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(_device_arg, [], name=nothing, index=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _device_arg(; name=nothing, index=nothing) - if tf.in_eager_mode() - _device_arg_eager(; name=name, index=index) - else - _device_arg_graph(; name=name, index=index) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _device_arg(; name=nothing, index=nothing) + if tf.in_eager_mode() + _device_arg_eager(; name=name, index=index) + else + _device_arg_graph(; name=name, index=index) + end end - end + end end @@ -40941,53 +74655,93 @@ end """ begin - function truncated_normal_graph(shape_; name=nothing, seed=nothing, seed2=nothing, dtype=nothing) - local desc - tf.with_op_name(name, "TruncatedNormal") do - desc = tf.NodeDescription("TruncatedNormal") - shape_ = convert(Tensor{Any}, shape_) - (shape_,) = tf.tf_promote(shape_) - tf.add_input(desc, shape_) - if seed !== nothing - desc["seed"] = Base.Int(seed) + begin + function truncated_normal_graph(shape_; name=nothing, seed=nothing, seed2=nothing, dtype=nothing) + local desc + tf.with_op_name(name, "TruncatedNormal") do + desc = tf.NodeDescription("TruncatedNormal") + begin + begin + shape_ = convert(Tensor{Any}, shape_) + begin + end + end + begin + (shape_,) = tf.tf_promote(shape_) + end + end + begin + begin + tf.add_input(desc, shape_) + end + end + begin + begin + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + end + begin + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end + end + begin + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function truncated_normal_eager(shape_; name=nothing, seed=nothing, seed2=nothing, dtype=nothing) + desc = tf.EagerOp("TruncatedNormal") + shape_ = convert(tf.EagerTensor, shape_) + begin + begin + tf.add_input(desc, shape_) + end + end + begin + begin + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + end + begin + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end + end + begin + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + end + begin + desc["T"] = tf.data_type(shape_) + end + res = tf.execute(desc) + node = tf.TapeNode(truncated_normal, [shape_], name=nothing, seed=nothing, seed2=nothing, dtype=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function truncated_normal(shape_; name=nothing, seed=nothing, seed2=nothing, dtype=nothing) + if tf.in_eager_mode() + truncated_normal_eager(shape_; name=name, seed=seed, seed2=seed2, dtype=dtype) + else + truncated_normal_graph(shape_; name=name, seed=seed, seed2=seed2, dtype=dtype) + end end - if seed2 !== nothing - desc["seed2"] = Base.Int(seed2) - end - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end - end - tf.Tensor(tf.Operation(desc)) - end - function truncated_normal_eager(shape_; name=nothing, seed=nothing, seed2=nothing, dtype=nothing) - desc = tf.EagerOp("TruncatedNormal") - shape_ = convert(tf.EagerTensor, shape_) - tf.add_input(desc, shape_) - if seed !== nothing - desc["seed"] = Base.Int(seed) - end - if seed2 !== nothing - desc["seed2"] = Base.Int(seed2) - end - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end - desc["T"] = tf.data_type(shape_) - res = tf.execute(desc) - node = tf.TapeNode(truncated_normal, [shape_], name=nothing, seed=nothing, seed2=nothing, dtype=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function truncated_normal(shape_; name=nothing, seed=nothing, seed2=nothing, dtype=nothing) - if tf.in_eager_mode() - truncated_normal_eager(shape_; name=name, seed=seed, seed2=seed2, dtype=dtype) - else - truncated_normal_graph(shape_; name=name, seed=seed, seed2=seed2, dtype=dtype) - end - end end @@ -40997,43 +74751,79 @@ end """ begin - function tensor_forest_tree_predict_graph(tree_handle_, dense_features_; name=nothing, logits_dimension=nothing) - local desc - tf.with_op_name(name, "TensorForestTreePredict") do - desc = tf.NodeDescription("TensorForestTreePredict") - tree_handle_ = convert(Tensor{Any}, tree_handle_) - dense_features_ = convert(Tensor{Float32}, dense_features_) - tf.add_input(desc, tree_handle_) - tf.add_input(desc, dense_features_) - if logits_dimension !== nothing - desc["logits_dimension"] = Base.Int(logits_dimension) + begin + function tensor_forest_tree_predict_graph(tree_handle_, dense_features_; name=nothing, logits_dimension=nothing) + local desc + tf.with_op_name(name, "TensorForestTreePredict") do + desc = tf.NodeDescription("TensorForestTreePredict") + begin + begin + tree_handle_ = convert(Tensor{Any}, tree_handle_) + begin + end + end + begin + dense_features_ = convert(Tensor{Float32}, dense_features_) + begin + end + end + end + begin + begin + tf.add_input(desc, tree_handle_) + end + begin + tf.add_input(desc, dense_features_) + end + end + begin + begin + if logits_dimension !== nothing + desc["logits_dimension"] = Base.Int(logits_dimension) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function tensor_forest_tree_predict_eager(tree_handle_, dense_features_; name=nothing, logits_dimension=nothing) + desc = tf.EagerOp("TensorForestTreePredict") + tree_handle_ = convert(tf.EagerTensor, tree_handle_) + dense_features_ = convert(tf.EagerTensor, dense_features_) + begin + begin + tf.add_input(desc, tree_handle_) + end + begin + tf.add_input(desc, dense_features_) + end + end + begin + begin + if logits_dimension !== nothing + desc["logits_dimension"] = Base.Int(logits_dimension) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(tensor_forest_tree_predict, [tree_handle_, dense_features_], name=nothing, logits_dimension=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_forest_tree_predict(tree_handle_, dense_features_; name=nothing, logits_dimension=nothing) + if tf.in_eager_mode() + tensor_forest_tree_predict_eager(tree_handle_, dense_features_; name=name, logits_dimension=logits_dimension) + else + tensor_forest_tree_predict_graph(tree_handle_, dense_features_; name=name, logits_dimension=logits_dimension) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function tensor_forest_tree_predict_eager(tree_handle_, dense_features_; name=nothing, logits_dimension=nothing) - desc = tf.EagerOp("TensorForestTreePredict") - tree_handle_ = convert(tf.EagerTensor, tree_handle_) - dense_features_ = convert(tf.EagerTensor, dense_features_) - tf.add_input(desc, tree_handle_) - tf.add_input(desc, dense_features_) - if logits_dimension !== nothing - desc["logits_dimension"] = Base.Int(logits_dimension) - end - res = tf.execute(desc) - node = tf.TapeNode(tensor_forest_tree_predict, [tree_handle_, dense_features_], name=nothing, logits_dimension=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_forest_tree_predict(tree_handle_, dense_features_; name=nothing, logits_dimension=nothing) - if tf.in_eager_mode() - tensor_forest_tree_predict_eager(tree_handle_, dense_features_; name=name, logits_dimension=logits_dimension) - else - tensor_forest_tree_predict_graph(tree_handle_, dense_features_; name=name, logits_dimension=logits_dimension) - end - end end @@ -41043,45 +74833,77 @@ end """ begin - function stack_v2_graph(max_size_; name=nothing, elem_type=nothing, stack_name=nothing) - local desc - tf.with_op_name(name, "StackV2") do - desc = tf.NodeDescription("StackV2") - max_size_ = convert(Tensor{Int32}, max_size_) - tf.add_input(desc, max_size_) - if elem_type !== nothing - desc["elem_type"] = Base.identity(elem_type) + begin + function stack_v2_graph(max_size_; name=nothing, elem_type=nothing, stack_name=nothing) + local desc + tf.with_op_name(name, "StackV2") do + desc = tf.NodeDescription("StackV2") + begin + begin + max_size_ = convert(Tensor{Int32}, max_size_) + begin + end + end + end + begin + begin + tf.add_input(desc, max_size_) + end + end + begin + begin + if elem_type !== nothing + desc["elem_type"] = Base.identity(elem_type) + end + end + begin + if stack_name !== nothing + desc["stack_name"] = Base.String(stack_name) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function stack_v2_eager(max_size_; name=nothing, elem_type=nothing, stack_name=nothing) + desc = tf.EagerOp("StackV2") + max_size_ = convert(tf.EagerTensor, max_size_) + begin + begin + tf.add_input(desc, max_size_) + end + end + begin + begin + if elem_type !== nothing + desc["elem_type"] = Base.identity(elem_type) + end + end + begin + if stack_name !== nothing + desc["stack_name"] = Base.String(stack_name) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(stack_v2, [max_size_], name=nothing, elem_type=nothing, stack_name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function stack_v2(max_size_; name=nothing, elem_type=nothing, stack_name=nothing) + if tf.in_eager_mode() + stack_v2_eager(max_size_; name=name, elem_type=elem_type, stack_name=stack_name) + else + stack_v2_graph(max_size_; name=name, elem_type=elem_type, stack_name=stack_name) + end end - if stack_name !== nothing - desc["stack_name"] = Base.String(stack_name) - end - end - tf.Tensor(tf.Operation(desc)) - end - function stack_v2_eager(max_size_; name=nothing, elem_type=nothing, stack_name=nothing) - desc = tf.EagerOp("StackV2") - max_size_ = convert(tf.EagerTensor, max_size_) - tf.add_input(desc, max_size_) - if elem_type !== nothing - desc["elem_type"] = Base.identity(elem_type) - end - if stack_name !== nothing - desc["stack_name"] = Base.String(stack_name) - end - res = tf.execute(desc) - node = tf.TapeNode(stack_v2, [max_size_], name=nothing, elem_type=nothing, stack_name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function stack_v2(max_size_; name=nothing, elem_type=nothing, stack_name=nothing) - if tf.in_eager_mode() - stack_v2_eager(max_size_; name=name, elem_type=elem_type, stack_name=stack_name) - else - stack_v2_graph(max_size_; name=name, elem_type=elem_type, stack_name=stack_name) - end - end end @@ -41091,33 +74913,57 @@ end """ begin - function accumulator_num_accumulated_graph(handle_; name=nothing) - local desc - tf.with_op_name(name, "AccumulatorNumAccumulated") do - desc = tf.NodeDescription("AccumulatorNumAccumulated") - handle_ = convert(Tensor{String}, handle_) - tf.add_input(desc, handle_) + begin + function accumulator_num_accumulated_graph(handle_; name=nothing) + local desc + tf.with_op_name(name, "AccumulatorNumAccumulated") do + desc = tf.NodeDescription("AccumulatorNumAccumulated") + begin + begin + handle_ = convert(Tensor{String}, handle_) + begin + end + end + end + begin + begin + tf.add_input(desc, handle_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function accumulator_num_accumulated_eager(handle_; name=nothing) - desc = tf.EagerOp("AccumulatorNumAccumulated") - handle_ = convert(tf.EagerTensor, handle_) - tf.add_input(desc, handle_) - res = tf.execute(desc) - node = tf.TapeNode(accumulator_num_accumulated, [handle_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function accumulator_num_accumulated_eager(handle_; name=nothing) + desc = tf.EagerOp("AccumulatorNumAccumulated") + handle_ = convert(tf.EagerTensor, handle_) + begin + begin + tf.add_input(desc, handle_) + end + end + begin + end + res = tf.execute(desc) + node = tf.TapeNode(accumulator_num_accumulated, [handle_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function accumulator_num_accumulated(handle_; name=nothing) - if tf.in_eager_mode() - accumulator_num_accumulated_eager(handle_; name=name) - else - accumulator_num_accumulated_graph(handle_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function accumulator_num_accumulated(handle_; name=nothing) + if tf.in_eager_mode() + accumulator_num_accumulated_eager(handle_; name=name) + else + accumulator_num_accumulated_graph(handle_; name=name) + end end - end + end end @@ -41127,33 +74973,57 @@ end """ begin - function reader_reset_v2_graph(reader_handle_; name=nothing) - local desc - tf.with_op_name(name, "ReaderResetV2") do - desc = tf.NodeDescription("ReaderResetV2") - reader_handle_ = convert(Tensor{Any}, reader_handle_) - tf.add_input(desc, reader_handle_) + begin + function reader_reset_v2_graph(reader_handle_; name=nothing) + local desc + tf.with_op_name(name, "ReaderResetV2") do + desc = tf.NodeDescription("ReaderResetV2") + begin + begin + reader_handle_ = convert(Tensor{Any}, reader_handle_) + begin + end + end + end + begin + begin + tf.add_input(desc, reader_handle_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function reader_reset_v2_eager(reader_handle_; name=nothing) - desc = tf.EagerOp("ReaderResetV2") - reader_handle_ = convert(tf.EagerTensor, reader_handle_) - tf.add_input(desc, reader_handle_) - res = tf.execute(desc) - node = tf.TapeNode(reader_reset_v2, [reader_handle_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function reader_reset_v2_eager(reader_handle_; name=nothing) + desc = tf.EagerOp("ReaderResetV2") + reader_handle_ = convert(tf.EagerTensor, reader_handle_) + begin + begin + tf.add_input(desc, reader_handle_) + end + end + begin + end + res = tf.execute(desc) + node = tf.TapeNode(reader_reset_v2, [reader_handle_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function reader_reset_v2(reader_handle_; name=nothing) - if tf.in_eager_mode() - reader_reset_v2_eager(reader_handle_; name=name) - else - reader_reset_v2_graph(reader_handle_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function reader_reset_v2(reader_handle_; name=nothing) + if tf.in_eager_mode() + reader_reset_v2_eager(reader_handle_; name=name) + else + reader_reset_v2_graph(reader_handle_; name=name) + end end - end + end end @@ -41163,71 +75033,163 @@ end """ begin - function apply_add_sign_graph(var_, m_, lr_, alpha_, sign_decay_, beta_, grad_; name=nothing, use_locking=nothing) - local desc - tf.with_op_name(name, "ApplyAddSign") do - desc = tf.NodeDescription("ApplyAddSign") - var_ = convert(Tensor{Any}, var_) - m_ = convert(Tensor{Any}, m_) - lr_ = convert(Tensor{Any}, lr_) - alpha_ = convert(Tensor{Any}, alpha_) - sign_decay_ = convert(Tensor{Any}, sign_decay_) - beta_ = convert(Tensor{Any}, beta_) - grad_ = convert(Tensor{Any}, grad_) - (var_, m_, lr_, alpha_, sign_decay_, beta_, grad_) = tf.tf_promote(var_, m_, lr_, alpha_, sign_decay_, beta_, grad_) - tf.add_input(desc, var_) - tf.add_input(desc, m_) - tf.add_input(desc, lr_) - tf.add_input(desc, alpha_) - tf.add_input(desc, sign_decay_) - tf.add_input(desc, beta_) - tf.add_input(desc, grad_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - end - tf.Tensor(tf.Operation(desc)) - end - function apply_add_sign_eager(var_, m_, lr_, alpha_, sign_decay_, beta_, grad_; name=nothing, use_locking=nothing) - desc = tf.EagerOp("ApplyAddSign") - var_ = convert(tf.EagerTensor, var_) - m_ = convert(tf.EagerTensor, m_) - lr_ = convert(tf.EagerTensor, lr_) - alpha_ = convert(tf.EagerTensor, alpha_) - sign_decay_ = convert(tf.EagerTensor, sign_decay_) - beta_ = convert(tf.EagerTensor, beta_) - grad_ = convert(tf.EagerTensor, grad_) - tf.add_input(desc, var_) - tf.add_input(desc, m_) - tf.add_input(desc, lr_) - tf.add_input(desc, alpha_) - tf.add_input(desc, sign_decay_) - tf.add_input(desc, beta_) - tf.add_input(desc, grad_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - desc["T"] = tf.data_type(var_) - desc["T"] = tf.data_type(m_) - desc["T"] = tf.data_type(lr_) - desc["T"] = tf.data_type(alpha_) - desc["T"] = tf.data_type(sign_decay_) - desc["T"] = tf.data_type(beta_) - desc["T"] = tf.data_type(grad_) - res = tf.execute(desc) - node = tf.TapeNode(apply_add_sign, [var_, m_, lr_, alpha_, sign_decay_, beta_, grad_], name=nothing, use_locking=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function apply_add_sign(var_, m_, lr_, alpha_, sign_decay_, beta_, grad_; name=nothing, use_locking=nothing) - if tf.in_eager_mode() - apply_add_sign_eager(var_, m_, lr_, alpha_, sign_decay_, beta_, grad_; name=name, use_locking=use_locking) - else - apply_add_sign_graph(var_, m_, lr_, alpha_, sign_decay_, beta_, grad_; name=name, use_locking=use_locking) + begin + function apply_add_sign_graph(var_, m_, lr_, alpha_, sign_decay_, beta_, grad_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "ApplyAddSign") do + desc = tf.NodeDescription("ApplyAddSign") + begin + begin + var_ = convert(Tensor{Any}, var_) + begin + end + end + begin + m_ = convert(Tensor{Any}, m_) + begin + end + end + begin + lr_ = convert(Tensor{Any}, lr_) + begin + end + end + begin + alpha_ = convert(Tensor{Any}, alpha_) + begin + end + end + begin + sign_decay_ = convert(Tensor{Any}, sign_decay_) + begin + end + end + begin + beta_ = convert(Tensor{Any}, beta_) + begin + end + end + begin + grad_ = convert(Tensor{Any}, grad_) + begin + end + end + begin + (var_, m_, lr_, alpha_, sign_decay_, beta_, grad_) = tf.tf_promote(var_, m_, lr_, alpha_, sign_decay_, beta_, grad_) + end + end + begin + begin + tf.add_input(desc, var_) + end + begin + tf.add_input(desc, m_) + end + begin + tf.add_input(desc, lr_) + end + begin + tf.add_input(desc, alpha_) + end + begin + tf.add_input(desc, sign_decay_) + end + begin + tf.add_input(desc, beta_) + end + begin + tf.add_input(desc, grad_) + end + end + begin + begin + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function apply_add_sign_eager(var_, m_, lr_, alpha_, sign_decay_, beta_, grad_; name=nothing, use_locking=nothing) + desc = tf.EagerOp("ApplyAddSign") + var_ = convert(tf.EagerTensor, var_) + m_ = convert(tf.EagerTensor, m_) + lr_ = convert(tf.EagerTensor, lr_) + alpha_ = convert(tf.EagerTensor, alpha_) + sign_decay_ = convert(tf.EagerTensor, sign_decay_) + beta_ = convert(tf.EagerTensor, beta_) + grad_ = convert(tf.EagerTensor, grad_) + begin + begin + tf.add_input(desc, var_) + end + begin + tf.add_input(desc, m_) + end + begin + tf.add_input(desc, lr_) + end + begin + tf.add_input(desc, alpha_) + end + begin + tf.add_input(desc, sign_decay_) + end + begin + tf.add_input(desc, beta_) + end + begin + tf.add_input(desc, grad_) + end + end + begin + begin + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + end + begin + desc["T"] = tf.data_type(var_) + end + begin + desc["T"] = tf.data_type(m_) + end + begin + desc["T"] = tf.data_type(lr_) + end + begin + desc["T"] = tf.data_type(alpha_) + end + begin + desc["T"] = tf.data_type(sign_decay_) + end + begin + desc["T"] = tf.data_type(beta_) + end + begin + desc["T"] = tf.data_type(grad_) + end + res = tf.execute(desc) + node = tf.TapeNode(apply_add_sign, [var_, m_, lr_, alpha_, sign_decay_, beta_, grad_], name=nothing, use_locking=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function apply_add_sign(var_, m_, lr_, alpha_, sign_decay_, beta_, grad_; name=nothing, use_locking=nothing) + if tf.in_eager_mode() + apply_add_sign_eager(var_, m_, lr_, alpha_, sign_decay_, beta_, grad_; name=name, use_locking=use_locking) + else + apply_add_sign_graph(var_, m_, lr_, alpha_, sign_decay_, beta_, grad_; name=name, use_locking=use_locking) + end end - end + end end @@ -41237,58 +75199,92 @@ end Retrieve embedding parameters for a single table. """ begin - function retrieve_tpu_embedding_rms_prop_parameters_grad_accum_debug_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - local desc - tf.with_op_name(name, "RetrieveTPUEmbeddingRMSPropParametersGradAccumDebug") do - desc = tf.NodeDescription("RetrieveTPUEmbeddingRMSPropParametersGradAccumDebug") - if table_id !== nothing - desc["table_id"] = Base.Int(table_id) - end - if table_name !== nothing - desc["table_name"] = Base.String(table_name) - end - if num_shards !== nothing - desc["num_shards"] = Base.Int(num_shards) + begin + function retrieve_tpu_embedding_rms_prop_parameters_grad_accum_debug_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + local desc + tf.with_op_name(name, "RetrieveTPUEmbeddingRMSPropParametersGradAccumDebug") do + desc = tf.NodeDescription("RetrieveTPUEmbeddingRMSPropParametersGradAccumDebug") + begin + end + begin + end + begin + begin + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + end + begin + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + end + begin + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + end + begin + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + end + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:4 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + end + end + begin + function retrieve_tpu_embedding_rms_prop_parameters_grad_accum_debug_eager(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + desc = tf.EagerOp("RetrieveTPUEmbeddingRMSPropParametersGradAccumDebug") + begin + end + begin + begin + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + end + begin + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + end + begin + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + end + begin + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(retrieve_tpu_embedding_rms_prop_parameters_grad_accum_debug, [], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function retrieve_tpu_embedding_rms_prop_parameters_grad_accum_debug(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + if tf.in_eager_mode() + retrieve_tpu_embedding_rms_prop_parameters_grad_accum_debug_eager(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + else + retrieve_tpu_embedding_rms_prop_parameters_grad_accum_debug_graph(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + end end - if shard_id !== nothing - desc["shard_id"] = Base.Int(shard_id) - end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:4 - push!(out, tf.Tensor(op, out_idx)) - end - out - end - function retrieve_tpu_embedding_rms_prop_parameters_grad_accum_debug_eager(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - desc = tf.EagerOp("RetrieveTPUEmbeddingRMSPropParametersGradAccumDebug") - if table_id !== nothing - desc["table_id"] = Base.Int(table_id) - end - if table_name !== nothing - desc["table_name"] = Base.String(table_name) - end - if num_shards !== nothing - desc["num_shards"] = Base.Int(num_shards) - end - if shard_id !== nothing - desc["shard_id"] = Base.Int(shard_id) - end - res = tf.execute(desc) - node = tf.TapeNode(retrieve_tpu_embedding_rms_prop_parameters_grad_accum_debug, [], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function retrieve_tpu_embedding_rms_prop_parameters_grad_accum_debug(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - if tf.in_eager_mode() - retrieve_tpu_embedding_rms_prop_parameters_grad_accum_debug_eager(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) - else - retrieve_tpu_embedding_rms_prop_parameters_grad_accum_debug_graph(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) - end - end end @@ -41298,35 +75294,63 @@ end """ begin - function rint_graph(x_; name=nothing) - local desc - tf.with_op_name(name, "Rint") do - desc = tf.NodeDescription("Rint") - x_ = convert(Tensor{Any}, x_) - (x_,) = tf.tf_promote(x_) - tf.add_input(desc, x_) + begin + function rint_graph(x_; name=nothing) + local desc + tf.with_op_name(name, "Rint") do + desc = tf.NodeDescription("Rint") + begin + begin + x_ = convert(Tensor{Any}, x_) + begin + end + end + begin + (x_,) = tf.tf_promote(x_) + end + end + begin + begin + tf.add_input(desc, x_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function rint_eager(x_; name=nothing) - desc = tf.EagerOp("Rint") - x_ = convert(tf.EagerTensor, x_) - tf.add_input(desc, x_) - desc["T"] = tf.data_type(x_) - res = tf.execute(desc) - node = tf.TapeNode(rint, [x_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function rint_eager(x_; name=nothing) + desc = tf.EagerOp("Rint") + x_ = convert(tf.EagerTensor, x_) + begin + begin + tf.add_input(desc, x_) + end + end + begin + end + begin + desc["T"] = tf.data_type(x_) + end + res = tf.execute(desc) + node = tf.TapeNode(rint, [x_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function rint(x_; name=nothing) - if tf.in_eager_mode() - rint_eager(x_; name=name) - else - rint_graph(x_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function rint(x_; name=nothing) + if tf.in_eager_mode() + rint_eager(x_; name=name) + else + rint_graph(x_; name=name) + end end - end + end end @@ -41336,58 +75360,92 @@ end Retrieve embedding parameters for a single table. """ begin - function retrieve_tpu_embedding_adadelta_parameters_grad_accum_debug_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - local desc - tf.with_op_name(name, "RetrieveTPUEmbeddingAdadeltaParametersGradAccumDebug") do - desc = tf.NodeDescription("RetrieveTPUEmbeddingAdadeltaParametersGradAccumDebug") - if table_id !== nothing - desc["table_id"] = Base.Int(table_id) - end - if table_name !== nothing - desc["table_name"] = Base.String(table_name) - end - if num_shards !== nothing - desc["num_shards"] = Base.Int(num_shards) + begin + function retrieve_tpu_embedding_adadelta_parameters_grad_accum_debug_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + local desc + tf.with_op_name(name, "RetrieveTPUEmbeddingAdadeltaParametersGradAccumDebug") do + desc = tf.NodeDescription("RetrieveTPUEmbeddingAdadeltaParametersGradAccumDebug") + begin + end + begin + end + begin + begin + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + end + begin + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + end + begin + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + end + begin + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + end + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:4 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + end + end + begin + function retrieve_tpu_embedding_adadelta_parameters_grad_accum_debug_eager(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + desc = tf.EagerOp("RetrieveTPUEmbeddingAdadeltaParametersGradAccumDebug") + begin + end + begin + begin + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + end + begin + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + end + begin + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + end + begin + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(retrieve_tpu_embedding_adadelta_parameters_grad_accum_debug, [], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function retrieve_tpu_embedding_adadelta_parameters_grad_accum_debug(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + if tf.in_eager_mode() + retrieve_tpu_embedding_adadelta_parameters_grad_accum_debug_eager(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + else + retrieve_tpu_embedding_adadelta_parameters_grad_accum_debug_graph(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + end end - if shard_id !== nothing - desc["shard_id"] = Base.Int(shard_id) - end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:4 - push!(out, tf.Tensor(op, out_idx)) - end - out - end - function retrieve_tpu_embedding_adadelta_parameters_grad_accum_debug_eager(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - desc = tf.EagerOp("RetrieveTPUEmbeddingAdadeltaParametersGradAccumDebug") - if table_id !== nothing - desc["table_id"] = Base.Int(table_id) - end - if table_name !== nothing - desc["table_name"] = Base.String(table_name) - end - if num_shards !== nothing - desc["num_shards"] = Base.Int(num_shards) - end - if shard_id !== nothing - desc["shard_id"] = Base.Int(shard_id) - end - res = tf.execute(desc) - node = tf.TapeNode(retrieve_tpu_embedding_adadelta_parameters_grad_accum_debug, [], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function retrieve_tpu_embedding_adadelta_parameters_grad_accum_debug(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - if tf.in_eager_mode() - retrieve_tpu_embedding_adadelta_parameters_grad_accum_debug_eager(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) - else - retrieve_tpu_embedding_adadelta_parameters_grad_accum_debug_graph(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) - end - end end @@ -41397,59 +75455,111 @@ end """ begin - function extract_glimpse_graph(input_, size_, offsets_; name=nothing, centered=nothing, normalized=nothing, uniform_noise=nothing) - local desc - tf.with_op_name(name, "ExtractGlimpse") do - desc = tf.NodeDescription("ExtractGlimpse") - input_ = convert(Tensor{Float32}, input_) - size_ = convert(Tensor{Int32}, size_) - offsets_ = convert(Tensor{Float32}, offsets_) - tf.add_input(desc, input_) - tf.add_input(desc, size_) - tf.add_input(desc, offsets_) - if centered !== nothing - desc["centered"] = Base.Bool(centered) - end - if normalized !== nothing - desc["normalized"] = Base.Bool(normalized) + begin + function extract_glimpse_graph(input_, size_, offsets_; name=nothing, centered=nothing, normalized=nothing, uniform_noise=nothing) + local desc + tf.with_op_name(name, "ExtractGlimpse") do + desc = tf.NodeDescription("ExtractGlimpse") + begin + begin + input_ = convert(Tensor{Float32}, input_) + begin + end + end + begin + size_ = convert(Tensor{Int32}, size_) + begin + end + end + begin + offsets_ = convert(Tensor{Float32}, offsets_) + begin + end + end + end + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, size_) + end + begin + tf.add_input(desc, offsets_) + end + end + begin + begin + if centered !== nothing + desc["centered"] = Base.Bool(centered) + end + end + begin + if normalized !== nothing + desc["normalized"] = Base.Bool(normalized) + end + end + begin + if uniform_noise !== nothing + desc["uniform_noise"] = Base.Bool(uniform_noise) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function extract_glimpse_eager(input_, size_, offsets_; name=nothing, centered=nothing, normalized=nothing, uniform_noise=nothing) + desc = tf.EagerOp("ExtractGlimpse") + input_ = convert(tf.EagerTensor, input_) + size_ = convert(tf.EagerTensor, size_) + offsets_ = convert(tf.EagerTensor, offsets_) + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, size_) + end + begin + tf.add_input(desc, offsets_) + end + end + begin + begin + if centered !== nothing + desc["centered"] = Base.Bool(centered) + end + end + begin + if normalized !== nothing + desc["normalized"] = Base.Bool(normalized) + end + end + begin + if uniform_noise !== nothing + desc["uniform_noise"] = Base.Bool(uniform_noise) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(extract_glimpse, [input_, size_, offsets_], name=nothing, centered=nothing, normalized=nothing, uniform_noise=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function extract_glimpse(input_, size_, offsets_; name=nothing, centered=nothing, normalized=nothing, uniform_noise=nothing) + if tf.in_eager_mode() + extract_glimpse_eager(input_, size_, offsets_; name=name, centered=centered, normalized=normalized, uniform_noise=uniform_noise) + else + extract_glimpse_graph(input_, size_, offsets_; name=name, centered=centered, normalized=normalized, uniform_noise=uniform_noise) + end end - if uniform_noise !== nothing - desc["uniform_noise"] = Base.Bool(uniform_noise) - end - end - tf.Tensor(tf.Operation(desc)) - end - function extract_glimpse_eager(input_, size_, offsets_; name=nothing, centered=nothing, normalized=nothing, uniform_noise=nothing) - desc = tf.EagerOp("ExtractGlimpse") - input_ = convert(tf.EagerTensor, input_) - size_ = convert(tf.EagerTensor, size_) - offsets_ = convert(tf.EagerTensor, offsets_) - tf.add_input(desc, input_) - tf.add_input(desc, size_) - tf.add_input(desc, offsets_) - if centered !== nothing - desc["centered"] = Base.Bool(centered) - end - if normalized !== nothing - desc["normalized"] = Base.Bool(normalized) - end - if uniform_noise !== nothing - desc["uniform_noise"] = Base.Bool(uniform_noise) - end - res = tf.execute(desc) - node = tf.TapeNode(extract_glimpse, [input_, size_, offsets_], name=nothing, centered=nothing, normalized=nothing, uniform_noise=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function extract_glimpse(input_, size_, offsets_; name=nothing, centered=nothing, normalized=nothing, uniform_noise=nothing) - if tf.in_eager_mode() - extract_glimpse_eager(input_, size_, offsets_; name=name, centered=centered, normalized=normalized, uniform_noise=uniform_noise) - else - extract_glimpse_graph(input_, size_, offsets_; name=name, centered=centered, normalized=normalized, uniform_noise=uniform_noise) - end - end end @@ -41459,45 +75569,77 @@ end """ begin - function string_to_hash_bucket_strong_graph(input_; name=nothing, num_buckets=nothing, key=nothing) - local desc - tf.with_op_name(name, "StringToHashBucketStrong") do - desc = tf.NodeDescription("StringToHashBucketStrong") - input_ = convert(Tensor{String}, input_) - tf.add_input(desc, input_) - if num_buckets !== nothing - desc["num_buckets"] = Base.Int(num_buckets) - end - if key !== nothing - desc["key"] = map(Base.identity, key) + begin + function string_to_hash_bucket_strong_graph(input_; name=nothing, num_buckets=nothing, key=nothing) + local desc + tf.with_op_name(name, "StringToHashBucketStrong") do + desc = tf.NodeDescription("StringToHashBucketStrong") + begin + begin + input_ = convert(Tensor{String}, input_) + begin + end + end + end + begin + begin + tf.add_input(desc, input_) + end + end + begin + begin + if num_buckets !== nothing + desc["num_buckets"] = Base.Int(num_buckets) + end + end + begin + if key !== nothing + desc["key"] = map(Base.identity, key) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function string_to_hash_bucket_strong_eager(input_; name=nothing, num_buckets=nothing, key=nothing) + desc = tf.EagerOp("StringToHashBucketStrong") + input_ = convert(tf.EagerTensor, input_) + begin + begin + tf.add_input(desc, input_) + end + end + begin + begin + if num_buckets !== nothing + desc["num_buckets"] = Base.Int(num_buckets) + end + end + begin + if key !== nothing + desc["key"] = map(Base.identity, key) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(string_to_hash_bucket_strong, [input_], name=nothing, num_buckets=nothing, key=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function string_to_hash_bucket_strong(input_; name=nothing, num_buckets=nothing, key=nothing) + if tf.in_eager_mode() + string_to_hash_bucket_strong_eager(input_; name=name, num_buckets=num_buckets, key=key) + else + string_to_hash_bucket_strong_graph(input_; name=name, num_buckets=num_buckets, key=key) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function string_to_hash_bucket_strong_eager(input_; name=nothing, num_buckets=nothing, key=nothing) - desc = tf.EagerOp("StringToHashBucketStrong") - input_ = convert(tf.EagerTensor, input_) - tf.add_input(desc, input_) - if num_buckets !== nothing - desc["num_buckets"] = Base.Int(num_buckets) - end - if key !== nothing - desc["key"] = map(Base.identity, key) - end - res = tf.execute(desc) - node = tf.TapeNode(string_to_hash_bucket_strong, [input_], name=nothing, num_buckets=nothing, key=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function string_to_hash_bucket_strong(input_; name=nothing, num_buckets=nothing, key=nothing) - if tf.in_eager_mode() - string_to_hash_bucket_strong_eager(input_; name=name, num_buckets=num_buckets, key=key) - else - string_to_hash_bucket_strong_graph(input_; name=name, num_buckets=num_buckets, key=key) - end - end end @@ -41507,59 +75649,95 @@ end """ begin - function one_shot_iterator_graph(; name=nothing, dataset_factory=nothing, output_types=nothing, output_shapes=nothing, container=nothing, shared_name=nothing) - local desc - tf.with_op_name(name, "OneShotIterator") do - desc = tf.NodeDescription("OneShotIterator") - if dataset_factory !== nothing - desc["dataset_factory"] = Base.identity(dataset_factory) - end - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - if container !== nothing - desc["container"] = Base.String(container) + begin + function one_shot_iterator_graph(; name=nothing, dataset_factory=nothing, output_types=nothing, output_shapes=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "OneShotIterator") do + desc = tf.NodeDescription("OneShotIterator") + begin + end + begin + end + begin + begin + if dataset_factory !== nothing + desc["dataset_factory"] = Base.identity(dataset_factory) + end + end + begin + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + end + begin + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + begin + if container !== nothing + desc["container"] = Base.String(container) + end + end + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function one_shot_iterator_eager(; name=nothing, dataset_factory=nothing, output_types=nothing, output_shapes=nothing, container=nothing, shared_name=nothing) + desc = tf.EagerOp("OneShotIterator") + begin + end + begin + begin + if dataset_factory !== nothing + desc["dataset_factory"] = Base.identity(dataset_factory) + end + end + begin + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + end + begin + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + begin + if container !== nothing + desc["container"] = Base.String(container) + end + end + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(one_shot_iterator, [], name=nothing, dataset_factory=nothing, output_types=nothing, output_shapes=nothing, container=nothing, shared_name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function one_shot_iterator(; name=nothing, dataset_factory=nothing, output_types=nothing, output_shapes=nothing, container=nothing, shared_name=nothing) + if tf.in_eager_mode() + one_shot_iterator_eager(; name=name, dataset_factory=dataset_factory, output_types=output_types, output_shapes=output_shapes, container=container, shared_name=shared_name) + else + one_shot_iterator_graph(; name=name, dataset_factory=dataset_factory, output_types=output_types, output_shapes=output_shapes, container=container, shared_name=shared_name) + end end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - end - tf.Tensor(tf.Operation(desc)) - end - function one_shot_iterator_eager(; name=nothing, dataset_factory=nothing, output_types=nothing, output_shapes=nothing, container=nothing, shared_name=nothing) - desc = tf.EagerOp("OneShotIterator") - if dataset_factory !== nothing - desc["dataset_factory"] = Base.identity(dataset_factory) - end - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - res = tf.execute(desc) - node = tf.TapeNode(one_shot_iterator, [], name=nothing, dataset_factory=nothing, output_types=nothing, output_shapes=nothing, container=nothing, shared_name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function one_shot_iterator(; name=nothing, dataset_factory=nothing, output_types=nothing, output_shapes=nothing, container=nothing, shared_name=nothing) - if tf.in_eager_mode() - one_shot_iterator_eager(; name=name, dataset_factory=dataset_factory, output_types=output_types, output_shapes=output_shapes, container=container, shared_name=shared_name) - else - one_shot_iterator_graph(; name=name, dataset_factory=dataset_factory, output_types=output_types, output_shapes=output_shapes, container=container, shared_name=shared_name) - end - end end @@ -41569,72 +75747,156 @@ end """ begin - function resource_sparse_apply_momentum_graph(var_, accum_, lr_, grad_, indices_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) - local desc - tf.with_op_name(name, "ResourceSparseApplyMomentum") do - desc = tf.NodeDescription("ResourceSparseApplyMomentum") - var_ = convert(Tensor{Any}, var_) - accum_ = convert(Tensor{Any}, accum_) - lr_ = convert(Tensor{Any}, lr_) - grad_ = convert(Tensor{Any}, grad_) - indices_ = convert(Tensor{Any}, indices_) - indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) - momentum_ = convert(Tensor{Any}, momentum_) - (lr_, grad_, momentum_) = tf.tf_promote(lr_, grad_, momentum_) - (indices_,) = tf.tf_promote(indices_) - tf.add_input(desc, var_) - tf.add_input(desc, accum_) - tf.add_input(desc, lr_) - tf.add_input(desc, grad_) - tf.add_input(desc, indices_) - tf.add_input(desc, momentum_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - if use_nesterov !== nothing - desc["use_nesterov"] = Base.Bool(use_nesterov) + begin + function resource_sparse_apply_momentum_graph(var_, accum_, lr_, grad_, indices_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) + local desc + tf.with_op_name(name, "ResourceSparseApplyMomentum") do + desc = tf.NodeDescription("ResourceSparseApplyMomentum") + begin + begin + var_ = convert(Tensor{Any}, var_) + begin + end + end + begin + accum_ = convert(Tensor{Any}, accum_) + begin + end + end + begin + lr_ = convert(Tensor{Any}, lr_) + begin + end + end + begin + grad_ = convert(Tensor{Any}, grad_) + begin + end + end + begin + indices_ = convert(Tensor{Any}, indices_) + begin + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + end + end + begin + momentum_ = convert(Tensor{Any}, momentum_) + begin + end + end + begin + (lr_, grad_, momentum_) = tf.tf_promote(lr_, grad_, momentum_) + end + begin + (indices_,) = tf.tf_promote(indices_) + end + end + begin + begin + tf.add_input(desc, var_) + end + begin + tf.add_input(desc, accum_) + end + begin + tf.add_input(desc, lr_) + end + begin + tf.add_input(desc, grad_) + end + begin + tf.add_input(desc, indices_) + end + begin + tf.add_input(desc, momentum_) + end + end + begin + begin + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + begin + if use_nesterov !== nothing + desc["use_nesterov"] = Base.Bool(use_nesterov) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function resource_sparse_apply_momentum_eager(var_, accum_, lr_, grad_, indices_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) + desc = tf.EagerOp("ResourceSparseApplyMomentum") + var_ = convert(tf.EagerTensor, var_) + accum_ = convert(tf.EagerTensor, accum_) + lr_ = convert(tf.EagerTensor, lr_) + grad_ = convert(tf.EagerTensor, grad_) + indices_ = convert(tf.EagerTensor, indices_) + momentum_ = convert(tf.EagerTensor, momentum_) + begin + begin + tf.add_input(desc, var_) + end + begin + tf.add_input(desc, accum_) + end + begin + tf.add_input(desc, lr_) + end + begin + tf.add_input(desc, grad_) + end + begin + tf.add_input(desc, indices_) + end + begin + tf.add_input(desc, momentum_) + end + end + begin + begin + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + begin + if use_nesterov !== nothing + desc["use_nesterov"] = Base.Bool(use_nesterov) + end + end + end + begin + desc["T"] = tf.data_type(lr_) + end + begin + desc["T"] = tf.data_type(grad_) + end + begin + desc["Tindices"] = tf.data_type(indices_) + end + begin + desc["T"] = tf.data_type(momentum_) + end + res = tf.execute(desc) + node = tf.TapeNode(resource_sparse_apply_momentum, [var_, accum_, lr_, grad_, indices_, momentum_], name=nothing, use_locking=nothing, use_nesterov=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_sparse_apply_momentum(var_, accum_, lr_, grad_, indices_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) + if tf.in_eager_mode() + resource_sparse_apply_momentum_eager(var_, accum_, lr_, grad_, indices_, momentum_; name=name, use_locking=use_locking, use_nesterov=use_nesterov) + else + resource_sparse_apply_momentum_graph(var_, accum_, lr_, grad_, indices_, momentum_; name=name, use_locking=use_locking, use_nesterov=use_nesterov) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function resource_sparse_apply_momentum_eager(var_, accum_, lr_, grad_, indices_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) - desc = tf.EagerOp("ResourceSparseApplyMomentum") - var_ = convert(tf.EagerTensor, var_) - accum_ = convert(tf.EagerTensor, accum_) - lr_ = convert(tf.EagerTensor, lr_) - grad_ = convert(tf.EagerTensor, grad_) - indices_ = convert(tf.EagerTensor, indices_) - momentum_ = convert(tf.EagerTensor, momentum_) - tf.add_input(desc, var_) - tf.add_input(desc, accum_) - tf.add_input(desc, lr_) - tf.add_input(desc, grad_) - tf.add_input(desc, indices_) - tf.add_input(desc, momentum_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - if use_nesterov !== nothing - desc["use_nesterov"] = Base.Bool(use_nesterov) - end - desc["T"] = tf.data_type(lr_) - desc["T"] = tf.data_type(grad_) - desc["Tindices"] = tf.data_type(indices_) - desc["T"] = tf.data_type(momentum_) - res = tf.execute(desc) - node = tf.TapeNode(resource_sparse_apply_momentum, [var_, accum_, lr_, grad_, indices_, momentum_], name=nothing, use_locking=nothing, use_nesterov=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_sparse_apply_momentum(var_, accum_, lr_, grad_, indices_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) - if tf.in_eager_mode() - resource_sparse_apply_momentum_eager(var_, accum_, lr_, grad_, indices_, momentum_; name=name, use_locking=use_locking, use_nesterov=use_nesterov) - else - resource_sparse_apply_momentum_graph(var_, accum_, lr_, grad_, indices_, momentum_; name=name, use_locking=use_locking, use_nesterov=use_nesterov) - end - end end @@ -41644,51 +75906,103 @@ end """ begin - function save_slices_graph(filename_, tensor_names_, shapes_and_slices_, data_; name=nothing, T=nothing) - local desc - tf.with_op_name(name, "SaveSlices") do - desc = tf.NodeDescription("SaveSlices") - filename_ = convert(Tensor{String}, filename_) - tensor_names_ = convert(Tensor{String}, tensor_names_) - shapes_and_slices_ = convert(Tensor{String}, shapes_and_slices_) - data_ = [convert(Tensor{Any}, x) for x = data_] - tf.add_input(desc, filename_) - tf.add_input(desc, tensor_names_) - tf.add_input(desc, shapes_and_slices_) - tf.add_input(desc, data_) - if T !== nothing - desc["T"] = map(Base.identity, T) + begin + function save_slices_graph(filename_, tensor_names_, shapes_and_slices_, data_; name=nothing, T=nothing) + local desc + tf.with_op_name(name, "SaveSlices") do + desc = tf.NodeDescription("SaveSlices") + begin + begin + filename_ = convert(Tensor{String}, filename_) + begin + end + end + begin + tensor_names_ = convert(Tensor{String}, tensor_names_) + begin + end + end + begin + shapes_and_slices_ = convert(Tensor{String}, shapes_and_slices_) + begin + end + end + begin + data_ = [convert(Tensor{Any}, x) for x = data_] + begin + end + end + end + begin + begin + tf.add_input(desc, filename_) + end + begin + tf.add_input(desc, tensor_names_) + end + begin + tf.add_input(desc, shapes_and_slices_) + end + begin + tf.add_input(desc, data_) + end + end + begin + begin + if T !== nothing + desc["T"] = map(Base.identity, T) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function save_slices_eager(filename_, tensor_names_, shapes_and_slices_, data_; name=nothing, T=nothing) + desc = tf.EagerOp("SaveSlices") + filename_ = convert(tf.EagerTensor, filename_) + tensor_names_ = convert(tf.EagerTensor, tensor_names_) + shapes_and_slices_ = convert(tf.EagerTensor, shapes_and_slices_) + data_ = convert(tf.EagerTensor, data_) + begin + begin + tf.add_input(desc, filename_) + end + begin + tf.add_input(desc, tensor_names_) + end + begin + tf.add_input(desc, shapes_and_slices_) + end + begin + tf.add_input(desc, data_) + end + end + begin + begin + if T !== nothing + desc["T"] = map(Base.identity, T) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(save_slices, [filename_, tensor_names_, shapes_and_slices_, data_], name=nothing, T=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function save_slices(filename_, tensor_names_, shapes_and_slices_, data_; name=nothing, T=nothing) + if tf.in_eager_mode() + save_slices_eager(filename_, tensor_names_, shapes_and_slices_, data_; name=name, T=T) + else + save_slices_graph(filename_, tensor_names_, shapes_and_slices_, data_; name=name, T=T) + end end - end - tf.Tensor(tf.Operation(desc)) end - function save_slices_eager(filename_, tensor_names_, shapes_and_slices_, data_; name=nothing, T=nothing) - desc = tf.EagerOp("SaveSlices") - filename_ = convert(tf.EagerTensor, filename_) - tensor_names_ = convert(tf.EagerTensor, tensor_names_) - shapes_and_slices_ = convert(tf.EagerTensor, shapes_and_slices_) - data_ = convert(tf.EagerTensor, data_) - tf.add_input(desc, filename_) - tf.add_input(desc, tensor_names_) - tf.add_input(desc, shapes_and_slices_) - tf.add_input(desc, data_) - if T !== nothing - desc["T"] = map(Base.identity, T) - end - res = tf.execute(desc) - node = tf.TapeNode(save_slices, [filename_, tensor_names_, shapes_and_slices_, data_], name=nothing, T=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function save_slices(filename_, tensor_names_, shapes_and_slices_, data_; name=nothing, T=nothing) - if tf.in_eager_mode() - save_slices_eager(filename_, tensor_names_, shapes_and_slices_, data_; name=name, T=T) - else - save_slices_graph(filename_, tensor_names_, shapes_and_slices_, data_; name=name, T=T) - end - end end @@ -41698,33 +76012,57 @@ end """ begin - function experimental_dataset_cardinality_graph(input_dataset_; name=nothing) - local desc - tf.with_op_name(name, "ExperimentalDatasetCardinality") do - desc = tf.NodeDescription("ExperimentalDatasetCardinality") - input_dataset_ = convert(Tensor{Any}, input_dataset_) - tf.add_input(desc, input_dataset_) + begin + function experimental_dataset_cardinality_graph(input_dataset_; name=nothing) + local desc + tf.with_op_name(name, "ExperimentalDatasetCardinality") do + desc = tf.NodeDescription("ExperimentalDatasetCardinality") + begin + begin + input_dataset_ = convert(Tensor{Any}, input_dataset_) + begin + end + end + end + begin + begin + tf.add_input(desc, input_dataset_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function experimental_dataset_cardinality_eager(input_dataset_; name=nothing) - desc = tf.EagerOp("ExperimentalDatasetCardinality") - input_dataset_ = convert(tf.EagerTensor, input_dataset_) - tf.add_input(desc, input_dataset_) - res = tf.execute(desc) - node = tf.TapeNode(experimental_dataset_cardinality, [input_dataset_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function experimental_dataset_cardinality_eager(input_dataset_; name=nothing) + desc = tf.EagerOp("ExperimentalDatasetCardinality") + input_dataset_ = convert(tf.EagerTensor, input_dataset_) + begin + begin + tf.add_input(desc, input_dataset_) + end + end + begin + end + res = tf.execute(desc) + node = tf.TapeNode(experimental_dataset_cardinality, [input_dataset_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_dataset_cardinality(input_dataset_; name=nothing) - if tf.in_eager_mode() - experimental_dataset_cardinality_eager(input_dataset_; name=name) - else - experimental_dataset_cardinality_graph(input_dataset_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_dataset_cardinality(input_dataset_; name=nothing) + if tf.in_eager_mode() + experimental_dataset_cardinality_eager(input_dataset_; name=name) + else + experimental_dataset_cardinality_graph(input_dataset_; name=name) + end end - end + end end @@ -41734,79 +76072,155 @@ end """ begin - function experimental_numa_map_and_batch_dataset_graph(input_dataset_, other_arguments_, batch_size_, num_parallel_calls_, drop_remainder_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, preserve_cardinality=nothing) - local desc - tf.with_op_name(name, "ExperimentalNumaMapAndBatchDataset") do - desc = tf.NodeDescription("ExperimentalNumaMapAndBatchDataset") - input_dataset_ = convert(Tensor{Any}, input_dataset_) - other_arguments_ = [convert(Tensor{Any}, x) for x = other_arguments_] - batch_size_ = convert(Tensor{Int64}, batch_size_) - num_parallel_calls_ = convert(Tensor{Int64}, num_parallel_calls_) - drop_remainder_ = convert(Tensor{Bool}, drop_remainder_) - tf.add_input(desc, input_dataset_) - tf.add_input(desc, other_arguments_) - tf.add_input(desc, batch_size_) - tf.add_input(desc, num_parallel_calls_) - tf.add_input(desc, drop_remainder_) - if f !== nothing - desc["f"] = Base.identity(f) - end - if Targuments !== nothing - desc["Targuments"] = map(Base.identity, Targuments) + begin + function experimental_numa_map_and_batch_dataset_graph(input_dataset_, other_arguments_, batch_size_, num_parallel_calls_, drop_remainder_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, preserve_cardinality=nothing) + local desc + tf.with_op_name(name, "ExperimentalNumaMapAndBatchDataset") do + desc = tf.NodeDescription("ExperimentalNumaMapAndBatchDataset") + begin + begin + input_dataset_ = convert(Tensor{Any}, input_dataset_) + begin + end + end + begin + other_arguments_ = [convert(Tensor{Any}, x) for x = other_arguments_] + begin + end + end + begin + batch_size_ = convert(Tensor{Int64}, batch_size_) + begin + end + end + begin + num_parallel_calls_ = convert(Tensor{Int64}, num_parallel_calls_) + begin + end + end + begin + drop_remainder_ = convert(Tensor{Bool}, drop_remainder_) + begin + end + end + end + begin + begin + tf.add_input(desc, input_dataset_) + end + begin + tf.add_input(desc, other_arguments_) + end + begin + tf.add_input(desc, batch_size_) + end + begin + tf.add_input(desc, num_parallel_calls_) + end + begin + tf.add_input(desc, drop_remainder_) + end + end + begin + begin + if f !== nothing + desc["f"] = Base.identity(f) + end + end + begin + if Targuments !== nothing + desc["Targuments"] = map(Base.identity, Targuments) + end + end + begin + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + end + begin + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + begin + if preserve_cardinality !== nothing + desc["preserve_cardinality"] = Base.Bool(preserve_cardinality) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function experimental_numa_map_and_batch_dataset_eager(input_dataset_, other_arguments_, batch_size_, num_parallel_calls_, drop_remainder_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, preserve_cardinality=nothing) + desc = tf.EagerOp("ExperimentalNumaMapAndBatchDataset") + input_dataset_ = convert(tf.EagerTensor, input_dataset_) + other_arguments_ = convert(tf.EagerTensor, other_arguments_) + batch_size_ = convert(tf.EagerTensor, batch_size_) + num_parallel_calls_ = convert(tf.EagerTensor, num_parallel_calls_) + drop_remainder_ = convert(tf.EagerTensor, drop_remainder_) + begin + begin + tf.add_input(desc, input_dataset_) + end + begin + tf.add_input(desc, other_arguments_) + end + begin + tf.add_input(desc, batch_size_) + end + begin + tf.add_input(desc, num_parallel_calls_) + end + begin + tf.add_input(desc, drop_remainder_) + end + end + begin + begin + if f !== nothing + desc["f"] = Base.identity(f) + end + end + begin + if Targuments !== nothing + desc["Targuments"] = map(Base.identity, Targuments) + end + end + begin + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + end + begin + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + begin + if preserve_cardinality !== nothing + desc["preserve_cardinality"] = Base.Bool(preserve_cardinality) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(experimental_numa_map_and_batch_dataset, [input_dataset_, other_arguments_, batch_size_, num_parallel_calls_, drop_remainder_], name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, preserve_cardinality=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_numa_map_and_batch_dataset(input_dataset_, other_arguments_, batch_size_, num_parallel_calls_, drop_remainder_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, preserve_cardinality=nothing) + if tf.in_eager_mode() + experimental_numa_map_and_batch_dataset_eager(input_dataset_, other_arguments_, batch_size_, num_parallel_calls_, drop_remainder_; name=name, f=f, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes, preserve_cardinality=preserve_cardinality) + else + experimental_numa_map_and_batch_dataset_graph(input_dataset_, other_arguments_, batch_size_, num_parallel_calls_, drop_remainder_; name=name, f=f, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes, preserve_cardinality=preserve_cardinality) + end end - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - if preserve_cardinality !== nothing - desc["preserve_cardinality"] = Base.Bool(preserve_cardinality) - end - end - tf.Tensor(tf.Operation(desc)) - end - function experimental_numa_map_and_batch_dataset_eager(input_dataset_, other_arguments_, batch_size_, num_parallel_calls_, drop_remainder_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, preserve_cardinality=nothing) - desc = tf.EagerOp("ExperimentalNumaMapAndBatchDataset") - input_dataset_ = convert(tf.EagerTensor, input_dataset_) - other_arguments_ = convert(tf.EagerTensor, other_arguments_) - batch_size_ = convert(tf.EagerTensor, batch_size_) - num_parallel_calls_ = convert(tf.EagerTensor, num_parallel_calls_) - drop_remainder_ = convert(tf.EagerTensor, drop_remainder_) - tf.add_input(desc, input_dataset_) - tf.add_input(desc, other_arguments_) - tf.add_input(desc, batch_size_) - tf.add_input(desc, num_parallel_calls_) - tf.add_input(desc, drop_remainder_) - if f !== nothing - desc["f"] = Base.identity(f) - end - if Targuments !== nothing - desc["Targuments"] = map(Base.identity, Targuments) - end - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - if preserve_cardinality !== nothing - desc["preserve_cardinality"] = Base.Bool(preserve_cardinality) - end - res = tf.execute(desc) - node = tf.TapeNode(experimental_numa_map_and_batch_dataset, [input_dataset_, other_arguments_, batch_size_, num_parallel_calls_, drop_remainder_], name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, preserve_cardinality=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_numa_map_and_batch_dataset(input_dataset_, other_arguments_, batch_size_, num_parallel_calls_, drop_remainder_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, preserve_cardinality=nothing) - if tf.in_eager_mode() - experimental_numa_map_and_batch_dataset_eager(input_dataset_, other_arguments_, batch_size_, num_parallel_calls_, drop_remainder_; name=name, f=f, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes, preserve_cardinality=preserve_cardinality) - else - experimental_numa_map_and_batch_dataset_graph(input_dataset_, other_arguments_, batch_size_, num_parallel_calls_, drop_remainder_; name=name, f=f, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes, preserve_cardinality=preserve_cardinality) - end - end end @@ -41816,35 +76230,63 @@ end """ begin - function is_finite_graph(x_; name=nothing) - local desc - tf.with_op_name(name, "IsFinite") do - desc = tf.NodeDescription("IsFinite") - x_ = convert(Tensor{Any}, x_) - (x_,) = tf.tf_promote(x_) - tf.add_input(desc, x_) + begin + function is_finite_graph(x_; name=nothing) + local desc + tf.with_op_name(name, "IsFinite") do + desc = tf.NodeDescription("IsFinite") + begin + begin + x_ = convert(Tensor{Any}, x_) + begin + end + end + begin + (x_,) = tf.tf_promote(x_) + end + end + begin + begin + tf.add_input(desc, x_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function is_finite_eager(x_; name=nothing) - desc = tf.EagerOp("IsFinite") - x_ = convert(tf.EagerTensor, x_) - tf.add_input(desc, x_) - desc["T"] = tf.data_type(x_) - res = tf.execute(desc) - node = tf.TapeNode(is_finite, [x_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function is_finite_eager(x_; name=nothing) + desc = tf.EagerOp("IsFinite") + x_ = convert(tf.EagerTensor, x_) + begin + begin + tf.add_input(desc, x_) + end + end + begin + end + begin + desc["T"] = tf.data_type(x_) + end + res = tf.execute(desc) + node = tf.TapeNode(is_finite, [x_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function is_finite(x_; name=nothing) - if tf.in_eager_mode() - is_finite_eager(x_; name=name) - else - is_finite_graph(x_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function is_finite(x_; name=nothing) + if tf.in_eager_mode() + is_finite_eager(x_; name=name) + else + is_finite_graph(x_; name=name) + end end - end + end end @@ -41854,57 +76296,105 @@ end An Op to exchange data across TPU replicas. On each replica, the input is """ begin - function all_to_all_graph(input_, group_assignment_; name=nothing, concat_dimension=nothing, split_dimension=nothing, split_count=nothing) - local desc - tf.with_op_name(name, "AllToAll") do - desc = tf.NodeDescription("AllToAll") - input_ = convert(Tensor{Any}, input_) - group_assignment_ = convert(Tensor{Int32}, group_assignment_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - tf.add_input(desc, group_assignment_) - if concat_dimension !== nothing - desc["concat_dimension"] = Base.Int(concat_dimension) + begin + function all_to_all_graph(input_, group_assignment_; name=nothing, concat_dimension=nothing, split_dimension=nothing, split_count=nothing) + local desc + tf.with_op_name(name, "AllToAll") do + desc = tf.NodeDescription("AllToAll") + begin + begin + input_ = convert(Tensor{Any}, input_) + begin + end + end + begin + group_assignment_ = convert(Tensor{Int32}, group_assignment_) + begin + end + end + begin + (input_,) = tf.tf_promote(input_) + end + end + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, group_assignment_) + end + end + begin + begin + if concat_dimension !== nothing + desc["concat_dimension"] = Base.Int(concat_dimension) + end + end + begin + if split_dimension !== nothing + desc["split_dimension"] = Base.Int(split_dimension) + end + end + begin + if split_count !== nothing + desc["split_count"] = Base.Int(split_count) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function all_to_all_eager(input_, group_assignment_; name=nothing, concat_dimension=nothing, split_dimension=nothing, split_count=nothing) + desc = tf.EagerOp("AllToAll") + input_ = convert(tf.EagerTensor, input_) + group_assignment_ = convert(tf.EagerTensor, group_assignment_) + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, group_assignment_) + end + end + begin + begin + if concat_dimension !== nothing + desc["concat_dimension"] = Base.Int(concat_dimension) + end + end + begin + if split_dimension !== nothing + desc["split_dimension"] = Base.Int(split_dimension) + end + end + begin + if split_count !== nothing + desc["split_count"] = Base.Int(split_count) + end + end + end + begin + desc["T"] = tf.data_type(input_) + end + res = tf.execute(desc) + node = tf.TapeNode(all_to_all, [input_, group_assignment_], name=nothing, concat_dimension=nothing, split_dimension=nothing, split_count=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function all_to_all(input_, group_assignment_; name=nothing, concat_dimension=nothing, split_dimension=nothing, split_count=nothing) + if tf.in_eager_mode() + all_to_all_eager(input_, group_assignment_; name=name, concat_dimension=concat_dimension, split_dimension=split_dimension, split_count=split_count) + else + all_to_all_graph(input_, group_assignment_; name=name, concat_dimension=concat_dimension, split_dimension=split_dimension, split_count=split_count) + end end - if split_dimension !== nothing - desc["split_dimension"] = Base.Int(split_dimension) - end - if split_count !== nothing - desc["split_count"] = Base.Int(split_count) - end - end - tf.Tensor(tf.Operation(desc)) - end - function all_to_all_eager(input_, group_assignment_; name=nothing, concat_dimension=nothing, split_dimension=nothing, split_count=nothing) - desc = tf.EagerOp("AllToAll") - input_ = convert(tf.EagerTensor, input_) - group_assignment_ = convert(tf.EagerTensor, group_assignment_) - tf.add_input(desc, input_) - tf.add_input(desc, group_assignment_) - if concat_dimension !== nothing - desc["concat_dimension"] = Base.Int(concat_dimension) - end - if split_dimension !== nothing - desc["split_dimension"] = Base.Int(split_dimension) - end - if split_count !== nothing - desc["split_count"] = Base.Int(split_count) - end - desc["T"] = tf.data_type(input_) - res = tf.execute(desc) - node = tf.TapeNode(all_to_all, [input_, group_assignment_], name=nothing, concat_dimension=nothing, split_dimension=nothing, split_count=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function all_to_all(input_, group_assignment_; name=nothing, concat_dimension=nothing, split_dimension=nothing, split_count=nothing) - if tf.in_eager_mode() - all_to_all_eager(input_, group_assignment_; name=name, concat_dimension=concat_dimension, split_dimension=split_dimension, split_count=split_count) - else - all_to_all_graph(input_, group_assignment_; name=name, concat_dimension=concat_dimension, split_dimension=split_dimension, split_count=split_count) - end - end end @@ -41914,56 +76404,94 @@ end """ begin - function take_many_sparse_from_tensors_map_graph(sparse_handles_; name=nothing, dtype=nothing, container=nothing, shared_name=nothing) - local desc - tf.with_op_name(name, "TakeManySparseFromTensorsMap") do - desc = tf.NodeDescription("TakeManySparseFromTensorsMap") - sparse_handles_ = convert(Tensor{Int64}, sparse_handles_) - tf.add_input(desc, sparse_handles_) - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) + begin + function take_many_sparse_from_tensors_map_graph(sparse_handles_; name=nothing, dtype=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "TakeManySparseFromTensorsMap") do + desc = tf.NodeDescription("TakeManySparseFromTensorsMap") + begin + begin + sparse_handles_ = convert(Tensor{Int64}, sparse_handles_) + begin + end + end + end + begin + begin + tf.add_input(desc, sparse_handles_) + end + end + begin + begin + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + begin + if container !== nothing + desc["container"] = Base.String(container) + end + end + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + end + end + begin + function take_many_sparse_from_tensors_map_eager(sparse_handles_; name=nothing, dtype=nothing, container=nothing, shared_name=nothing) + desc = tf.EagerOp("TakeManySparseFromTensorsMap") + sparse_handles_ = convert(tf.EagerTensor, sparse_handles_) + begin + begin + tf.add_input(desc, sparse_handles_) + end + end + begin + begin + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + begin + if container !== nothing + desc["container"] = Base.String(container) + end + end + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(take_many_sparse_from_tensors_map, [sparse_handles_], name=nothing, dtype=nothing, container=nothing, shared_name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function take_many_sparse_from_tensors_map(sparse_handles_; name=nothing, dtype=nothing, container=nothing, shared_name=nothing) + if tf.in_eager_mode() + take_many_sparse_from_tensors_map_eager(sparse_handles_; name=name, dtype=dtype, container=container, shared_name=shared_name) + else + take_many_sparse_from_tensors_map_graph(sparse_handles_; name=name, dtype=dtype, container=container, shared_name=shared_name) + end end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:3 - push!(out, tf.Tensor(op, out_idx)) - end - out - end - function take_many_sparse_from_tensors_map_eager(sparse_handles_; name=nothing, dtype=nothing, container=nothing, shared_name=nothing) - desc = tf.EagerOp("TakeManySparseFromTensorsMap") - sparse_handles_ = convert(tf.EagerTensor, sparse_handles_) - tf.add_input(desc, sparse_handles_) - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - res = tf.execute(desc) - node = tf.TapeNode(take_many_sparse_from_tensors_map, [sparse_handles_], name=nothing, dtype=nothing, container=nothing, shared_name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function take_many_sparse_from_tensors_map(sparse_handles_; name=nothing, dtype=nothing, container=nothing, shared_name=nothing) - if tf.in_eager_mode() - take_many_sparse_from_tensors_map_eager(sparse_handles_; name=name, dtype=dtype, container=container, shared_name=shared_name) - else - take_many_sparse_from_tensors_map_graph(sparse_handles_; name=name, dtype=dtype, container=container, shared_name=shared_name) - end - end end @@ -41973,35 +76501,63 @@ end """ begin - function batch_matrix_diag_part_graph(input_; name=nothing) - local desc - tf.with_op_name(name, "BatchMatrixDiagPart") do - desc = tf.NodeDescription("BatchMatrixDiagPart") - input_ = convert(Tensor{Any}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) + begin + function batch_matrix_diag_part_graph(input_; name=nothing) + local desc + tf.with_op_name(name, "BatchMatrixDiagPart") do + desc = tf.NodeDescription("BatchMatrixDiagPart") + begin + begin + input_ = convert(Tensor{Any}, input_) + begin + end + end + begin + (input_,) = tf.tf_promote(input_) + end + end + begin + begin + tf.add_input(desc, input_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function batch_matrix_diag_part_eager(input_; name=nothing) - desc = tf.EagerOp("BatchMatrixDiagPart") - input_ = convert(tf.EagerTensor, input_) - tf.add_input(desc, input_) - desc["T"] = tf.data_type(input_) - res = tf.execute(desc) - node = tf.TapeNode(batch_matrix_diag_part, [input_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function batch_matrix_diag_part_eager(input_; name=nothing) + desc = tf.EagerOp("BatchMatrixDiagPart") + input_ = convert(tf.EagerTensor, input_) + begin + begin + tf.add_input(desc, input_) + end + end + begin + end + begin + desc["T"] = tf.data_type(input_) + end + res = tf.execute(desc) + node = tf.TapeNode(batch_matrix_diag_part, [input_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function batch_matrix_diag_part(input_; name=nothing) - if tf.in_eager_mode() - batch_matrix_diag_part_eager(input_; name=name) - else - batch_matrix_diag_part_graph(input_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function batch_matrix_diag_part(input_; name=nothing) + if tf.in_eager_mode() + batch_matrix_diag_part_eager(input_; name=name) + else + batch_matrix_diag_part_graph(input_; name=name) + end end - end + end end @@ -42011,49 +76567,105 @@ end """ begin - function fixed_length_record_dataset_graph(filenames_, header_bytes_, record_bytes_, footer_bytes_, buffer_size_; name=nothing) - local desc - tf.with_op_name(name, "FixedLengthRecordDataset") do - desc = tf.NodeDescription("FixedLengthRecordDataset") - filenames_ = convert(Tensor{String}, filenames_) - header_bytes_ = convert(Tensor{Int64}, header_bytes_) - record_bytes_ = convert(Tensor{Int64}, record_bytes_) - footer_bytes_ = convert(Tensor{Int64}, footer_bytes_) - buffer_size_ = convert(Tensor{Int64}, buffer_size_) - tf.add_input(desc, filenames_) - tf.add_input(desc, header_bytes_) - tf.add_input(desc, record_bytes_) - tf.add_input(desc, footer_bytes_) - tf.add_input(desc, buffer_size_) - end - tf.Tensor(tf.Operation(desc)) - end - function fixed_length_record_dataset_eager(filenames_, header_bytes_, record_bytes_, footer_bytes_, buffer_size_; name=nothing) - desc = tf.EagerOp("FixedLengthRecordDataset") - filenames_ = convert(tf.EagerTensor, filenames_) - header_bytes_ = convert(tf.EagerTensor, header_bytes_) - record_bytes_ = convert(tf.EagerTensor, record_bytes_) - footer_bytes_ = convert(tf.EagerTensor, footer_bytes_) - buffer_size_ = convert(tf.EagerTensor, buffer_size_) - tf.add_input(desc, filenames_) - tf.add_input(desc, header_bytes_) - tf.add_input(desc, record_bytes_) - tf.add_input(desc, footer_bytes_) - tf.add_input(desc, buffer_size_) - res = tf.execute(desc) - node = tf.TapeNode(fixed_length_record_dataset, [filenames_, header_bytes_, record_bytes_, footer_bytes_, buffer_size_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function fixed_length_record_dataset(filenames_, header_bytes_, record_bytes_, footer_bytes_, buffer_size_; name=nothing) - if tf.in_eager_mode() - fixed_length_record_dataset_eager(filenames_, header_bytes_, record_bytes_, footer_bytes_, buffer_size_; name=name) - else - fixed_length_record_dataset_graph(filenames_, header_bytes_, record_bytes_, footer_bytes_, buffer_size_; name=name) + begin + function fixed_length_record_dataset_graph(filenames_, header_bytes_, record_bytes_, footer_bytes_, buffer_size_; name=nothing) + local desc + tf.with_op_name(name, "FixedLengthRecordDataset") do + desc = tf.NodeDescription("FixedLengthRecordDataset") + begin + begin + filenames_ = convert(Tensor{String}, filenames_) + begin + end + end + begin + header_bytes_ = convert(Tensor{Int64}, header_bytes_) + begin + end + end + begin + record_bytes_ = convert(Tensor{Int64}, record_bytes_) + begin + end + end + begin + footer_bytes_ = convert(Tensor{Int64}, footer_bytes_) + begin + end + end + begin + buffer_size_ = convert(Tensor{Int64}, buffer_size_) + begin + end + end + end + begin + begin + tf.add_input(desc, filenames_) + end + begin + tf.add_input(desc, header_bytes_) + end + begin + tf.add_input(desc, record_bytes_) + end + begin + tf.add_input(desc, footer_bytes_) + end + begin + tf.add_input(desc, buffer_size_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function fixed_length_record_dataset_eager(filenames_, header_bytes_, record_bytes_, footer_bytes_, buffer_size_; name=nothing) + desc = tf.EagerOp("FixedLengthRecordDataset") + filenames_ = convert(tf.EagerTensor, filenames_) + header_bytes_ = convert(tf.EagerTensor, header_bytes_) + record_bytes_ = convert(tf.EagerTensor, record_bytes_) + footer_bytes_ = convert(tf.EagerTensor, footer_bytes_) + buffer_size_ = convert(tf.EagerTensor, buffer_size_) + begin + begin + tf.add_input(desc, filenames_) + end + begin + tf.add_input(desc, header_bytes_) + end + begin + tf.add_input(desc, record_bytes_) + end + begin + tf.add_input(desc, footer_bytes_) + end + begin + tf.add_input(desc, buffer_size_) + end + end + begin + end + res = tf.execute(desc) + node = tf.TapeNode(fixed_length_record_dataset, [filenames_, header_bytes_, record_bytes_, footer_bytes_, buffer_size_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function fixed_length_record_dataset(filenames_, header_bytes_, record_bytes_, footer_bytes_, buffer_size_; name=nothing) + if tf.in_eager_mode() + fixed_length_record_dataset_eager(filenames_, header_bytes_, record_bytes_, footer_bytes_, buffer_size_; name=name) + else + fixed_length_record_dataset_graph(filenames_, header_bytes_, record_bytes_, footer_bytes_, buffer_size_; name=name) + end end - end + end end @@ -42063,45 +76675,85 @@ end """ begin - function stack_push_graph(handle_, elem_; name=nothing, swap_memory=nothing) - local desc - tf.with_op_name(name, "StackPush") do - desc = tf.NodeDescription("StackPush") - handle_ = convert(Tensor{String}, handle_) - elem_ = convert(Tensor{Any}, elem_) - (elem_,) = tf.tf_promote(elem_) - tf.add_input(desc, handle_) - tf.add_input(desc, elem_) - if swap_memory !== nothing - desc["swap_memory"] = Base.Bool(swap_memory) + begin + function stack_push_graph(handle_, elem_; name=nothing, swap_memory=nothing) + local desc + tf.with_op_name(name, "StackPush") do + desc = tf.NodeDescription("StackPush") + begin + begin + handle_ = convert(Tensor{String}, handle_) + begin + end + end + begin + elem_ = convert(Tensor{Any}, elem_) + begin + end + end + begin + (elem_,) = tf.tf_promote(elem_) + end + end + begin + begin + tf.add_input(desc, handle_) + end + begin + tf.add_input(desc, elem_) + end + end + begin + begin + if swap_memory !== nothing + desc["swap_memory"] = Base.Bool(swap_memory) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function stack_push_eager(handle_, elem_; name=nothing, swap_memory=nothing) + desc = tf.EagerOp("StackPush") + handle_ = convert(tf.EagerTensor, handle_) + elem_ = convert(tf.EagerTensor, elem_) + begin + begin + tf.add_input(desc, handle_) + end + begin + tf.add_input(desc, elem_) + end + end + begin + begin + if swap_memory !== nothing + desc["swap_memory"] = Base.Bool(swap_memory) + end + end + end + begin + desc["T"] = tf.data_type(elem_) + end + res = tf.execute(desc) + node = tf.TapeNode(stack_push, [handle_, elem_], name=nothing, swap_memory=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function stack_push(handle_, elem_; name=nothing, swap_memory=nothing) + if tf.in_eager_mode() + stack_push_eager(handle_, elem_; name=name, swap_memory=swap_memory) + else + stack_push_graph(handle_, elem_; name=name, swap_memory=swap_memory) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function stack_push_eager(handle_, elem_; name=nothing, swap_memory=nothing) - desc = tf.EagerOp("StackPush") - handle_ = convert(tf.EagerTensor, handle_) - elem_ = convert(tf.EagerTensor, elem_) - tf.add_input(desc, handle_) - tf.add_input(desc, elem_) - if swap_memory !== nothing - desc["swap_memory"] = Base.Bool(swap_memory) - end - desc["T"] = tf.data_type(elem_) - res = tf.execute(desc) - node = tf.TapeNode(stack_push, [handle_, elem_], name=nothing, swap_memory=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function stack_push(handle_, elem_; name=nothing, swap_memory=nothing) - if tf.in_eager_mode() - stack_push_eager(handle_, elem_; name=name, swap_memory=swap_memory) - else - stack_push_graph(handle_, elem_; name=name, swap_memory=swap_memory) - end - end end @@ -42111,41 +76763,65 @@ end """ begin - function placeholder_v2_graph(; name=nothing, dtype=nothing, shape=nothing) - local desc - tf.with_op_name(name, "PlaceholderV2") do - desc = tf.NodeDescription("PlaceholderV2") - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end - if shape !== nothing - desc["shape"] = Base.identity(shape) + begin + function placeholder_v2_graph(; name=nothing, dtype=nothing, shape=nothing) + local desc + tf.with_op_name(name, "PlaceholderV2") do + desc = tf.NodeDescription("PlaceholderV2") + begin + end + begin + end + begin + begin + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + begin + if shape !== nothing + desc["shape"] = Base.identity(shape) + end + end + end end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function placeholder_v2_eager(; name=nothing, dtype=nothing, shape=nothing) - desc = tf.EagerOp("PlaceholderV2") - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end - if shape !== nothing - desc["shape"] = Base.identity(shape) - end - res = tf.execute(desc) - node = tf.TapeNode(placeholder_v2, [], name=nothing, dtype=nothing, shape=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function placeholder_v2_eager(; name=nothing, dtype=nothing, shape=nothing) + desc = tf.EagerOp("PlaceholderV2") + begin + end + begin + begin + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + begin + if shape !== nothing + desc["shape"] = Base.identity(shape) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(placeholder_v2, [], name=nothing, dtype=nothing, shape=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function placeholder_v2(; name=nothing, dtype=nothing, shape=nothing) - if tf.in_eager_mode() - placeholder_v2_eager(; name=name, dtype=dtype, shape=shape) - else - placeholder_v2_graph(; name=name, dtype=dtype, shape=shape) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function placeholder_v2(; name=nothing, dtype=nothing, shape=nothing) + if tf.in_eager_mode() + placeholder_v2_eager(; name=name, dtype=dtype, shape=shape) + else + placeholder_v2_graph(; name=name, dtype=dtype, shape=shape) + end end - end + end end @@ -42155,41 +76831,81 @@ end """ begin - function multi_device_iterator_init_graph(dataset_, multi_device_iterator_, max_buffer_size_; name=nothing) - local desc - tf.with_op_name(name, "MultiDeviceIteratorInit") do - desc = tf.NodeDescription("MultiDeviceIteratorInit") - dataset_ = convert(Tensor{Any}, dataset_) - multi_device_iterator_ = convert(Tensor{Any}, multi_device_iterator_) - max_buffer_size_ = convert(Tensor{Int64}, max_buffer_size_) - tf.add_input(desc, dataset_) - tf.add_input(desc, multi_device_iterator_) - tf.add_input(desc, max_buffer_size_) - end - tf.Tensor(tf.Operation(desc)) - end - function multi_device_iterator_init_eager(dataset_, multi_device_iterator_, max_buffer_size_; name=nothing) - desc = tf.EagerOp("MultiDeviceIteratorInit") - dataset_ = convert(tf.EagerTensor, dataset_) - multi_device_iterator_ = convert(tf.EagerTensor, multi_device_iterator_) - max_buffer_size_ = convert(tf.EagerTensor, max_buffer_size_) - tf.add_input(desc, dataset_) - tf.add_input(desc, multi_device_iterator_) - tf.add_input(desc, max_buffer_size_) - res = tf.execute(desc) - node = tf.TapeNode(multi_device_iterator_init, [dataset_, multi_device_iterator_, max_buffer_size_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function multi_device_iterator_init(dataset_, multi_device_iterator_, max_buffer_size_; name=nothing) - if tf.in_eager_mode() - multi_device_iterator_init_eager(dataset_, multi_device_iterator_, max_buffer_size_; name=name) - else - multi_device_iterator_init_graph(dataset_, multi_device_iterator_, max_buffer_size_; name=name) + begin + function multi_device_iterator_init_graph(dataset_, multi_device_iterator_, max_buffer_size_; name=nothing) + local desc + tf.with_op_name(name, "MultiDeviceIteratorInit") do + desc = tf.NodeDescription("MultiDeviceIteratorInit") + begin + begin + dataset_ = convert(Tensor{Any}, dataset_) + begin + end + end + begin + multi_device_iterator_ = convert(Tensor{Any}, multi_device_iterator_) + begin + end + end + begin + max_buffer_size_ = convert(Tensor{Int64}, max_buffer_size_) + begin + end + end + end + begin + begin + tf.add_input(desc, dataset_) + end + begin + tf.add_input(desc, multi_device_iterator_) + end + begin + tf.add_input(desc, max_buffer_size_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function multi_device_iterator_init_eager(dataset_, multi_device_iterator_, max_buffer_size_; name=nothing) + desc = tf.EagerOp("MultiDeviceIteratorInit") + dataset_ = convert(tf.EagerTensor, dataset_) + multi_device_iterator_ = convert(tf.EagerTensor, multi_device_iterator_) + max_buffer_size_ = convert(tf.EagerTensor, max_buffer_size_) + begin + begin + tf.add_input(desc, dataset_) + end + begin + tf.add_input(desc, multi_device_iterator_) + end + begin + tf.add_input(desc, max_buffer_size_) + end + end + begin + end + res = tf.execute(desc) + node = tf.TapeNode(multi_device_iterator_init, [dataset_, multi_device_iterator_, max_buffer_size_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function multi_device_iterator_init(dataset_, multi_device_iterator_, max_buffer_size_; name=nothing) + if tf.in_eager_mode() + multi_device_iterator_init_eager(dataset_, multi_device_iterator_, max_buffer_size_; name=name) + else + multi_device_iterator_init_graph(dataset_, multi_device_iterator_, max_buffer_size_; name=name) + end end - end + end end @@ -42199,41 +76915,81 @@ end Re-configures the GCS block cache with the new configuration values. """ begin - function gcs_configure_block_cache_graph(max_cache_size_, block_size_, max_staleness_; name=nothing) - local desc - tf.with_op_name(name, "GcsConfigureBlockCache") do - desc = tf.NodeDescription("GcsConfigureBlockCache") - max_cache_size_ = convert(Tensor{Any}, max_cache_size_) - block_size_ = convert(Tensor{Any}, block_size_) - max_staleness_ = convert(Tensor{Any}, max_staleness_) - tf.add_input(desc, max_cache_size_) - tf.add_input(desc, block_size_) - tf.add_input(desc, max_staleness_) - end - tf.Tensor(tf.Operation(desc)) - end - function gcs_configure_block_cache_eager(max_cache_size_, block_size_, max_staleness_; name=nothing) - desc = tf.EagerOp("GcsConfigureBlockCache") - max_cache_size_ = convert(tf.EagerTensor, max_cache_size_) - block_size_ = convert(tf.EagerTensor, block_size_) - max_staleness_ = convert(tf.EagerTensor, max_staleness_) - tf.add_input(desc, max_cache_size_) - tf.add_input(desc, block_size_) - tf.add_input(desc, max_staleness_) - res = tf.execute(desc) - node = tf.TapeNode(gcs_configure_block_cache, [max_cache_size_, block_size_, max_staleness_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function gcs_configure_block_cache(max_cache_size_, block_size_, max_staleness_; name=nothing) - if tf.in_eager_mode() - gcs_configure_block_cache_eager(max_cache_size_, block_size_, max_staleness_; name=name) - else - gcs_configure_block_cache_graph(max_cache_size_, block_size_, max_staleness_; name=name) + begin + function gcs_configure_block_cache_graph(max_cache_size_, block_size_, max_staleness_; name=nothing) + local desc + tf.with_op_name(name, "GcsConfigureBlockCache") do + desc = tf.NodeDescription("GcsConfigureBlockCache") + begin + begin + max_cache_size_ = convert(Tensor{Any}, max_cache_size_) + begin + end + end + begin + block_size_ = convert(Tensor{Any}, block_size_) + begin + end + end + begin + max_staleness_ = convert(Tensor{Any}, max_staleness_) + begin + end + end + end + begin + begin + tf.add_input(desc, max_cache_size_) + end + begin + tf.add_input(desc, block_size_) + end + begin + tf.add_input(desc, max_staleness_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function gcs_configure_block_cache_eager(max_cache_size_, block_size_, max_staleness_; name=nothing) + desc = tf.EagerOp("GcsConfigureBlockCache") + max_cache_size_ = convert(tf.EagerTensor, max_cache_size_) + block_size_ = convert(tf.EagerTensor, block_size_) + max_staleness_ = convert(tf.EagerTensor, max_staleness_) + begin + begin + tf.add_input(desc, max_cache_size_) + end + begin + tf.add_input(desc, block_size_) + end + begin + tf.add_input(desc, max_staleness_) + end + end + begin + end + res = tf.execute(desc) + node = tf.TapeNode(gcs_configure_block_cache, [max_cache_size_, block_size_, max_staleness_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function gcs_configure_block_cache(max_cache_size_, block_size_, max_staleness_; name=nothing) + if tf.in_eager_mode() + gcs_configure_block_cache_eager(max_cache_size_, block_size_, max_staleness_; name=name) + else + gcs_configure_block_cache_graph(max_cache_size_, block_size_, max_staleness_; name=name) + end end - end + end end @@ -42243,45 +76999,77 @@ end """ begin - function queue_dequeue_v2_graph(handle_; name=nothing, component_types=nothing, timeout_ms=nothing) - local desc - tf.with_op_name(name, "QueueDequeueV2") do - desc = tf.NodeDescription("QueueDequeueV2") - handle_ = convert(Tensor{Any}, handle_) - tf.add_input(desc, handle_) - if component_types !== nothing - desc["component_types"] = map(Base.identity, component_types) + begin + function queue_dequeue_v2_graph(handle_; name=nothing, component_types=nothing, timeout_ms=nothing) + local desc + tf.with_op_name(name, "QueueDequeueV2") do + desc = tf.NodeDescription("QueueDequeueV2") + begin + begin + handle_ = convert(Tensor{Any}, handle_) + begin + end + end + end + begin + begin + tf.add_input(desc, handle_) + end + end + begin + begin + if component_types !== nothing + desc["component_types"] = map(Base.identity, component_types) + end + end + begin + if timeout_ms !== nothing + desc["timeout_ms"] = Base.Int(timeout_ms) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function queue_dequeue_v2_eager(handle_; name=nothing, component_types=nothing, timeout_ms=nothing) + desc = tf.EagerOp("QueueDequeueV2") + handle_ = convert(tf.EagerTensor, handle_) + begin + begin + tf.add_input(desc, handle_) + end + end + begin + begin + if component_types !== nothing + desc["component_types"] = map(Base.identity, component_types) + end + end + begin + if timeout_ms !== nothing + desc["timeout_ms"] = Base.Int(timeout_ms) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(queue_dequeue_v2, [handle_], name=nothing, component_types=nothing, timeout_ms=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function queue_dequeue_v2(handle_; name=nothing, component_types=nothing, timeout_ms=nothing) + if tf.in_eager_mode() + queue_dequeue_v2_eager(handle_; name=name, component_types=component_types, timeout_ms=timeout_ms) + else + queue_dequeue_v2_graph(handle_; name=name, component_types=component_types, timeout_ms=timeout_ms) + end end - if timeout_ms !== nothing - desc["timeout_ms"] = Base.Int(timeout_ms) - end - end - tf.Tensor(tf.Operation(desc)) - end - function queue_dequeue_v2_eager(handle_; name=nothing, component_types=nothing, timeout_ms=nothing) - desc = tf.EagerOp("QueueDequeueV2") - handle_ = convert(tf.EagerTensor, handle_) - tf.add_input(desc, handle_) - if component_types !== nothing - desc["component_types"] = map(Base.identity, component_types) - end - if timeout_ms !== nothing - desc["timeout_ms"] = Base.Int(timeout_ms) - end - res = tf.execute(desc) - node = tf.TapeNode(queue_dequeue_v2, [handle_], name=nothing, component_types=nothing, timeout_ms=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function queue_dequeue_v2(handle_; name=nothing, component_types=nothing, timeout_ms=nothing) - if tf.in_eager_mode() - queue_dequeue_v2_eager(handle_; name=name, component_types=component_types, timeout_ms=timeout_ms) - else - queue_dequeue_v2_graph(handle_; name=name, component_types=component_types, timeout_ms=timeout_ms) - end - end end @@ -42291,41 +77079,81 @@ end """ begin - function transpose_graph(x_, perm_; name=nothing) - local desc - tf.with_op_name(name, "Transpose") do - desc = tf.NodeDescription("Transpose") - x_ = convert(Tensor{Any}, x_) - perm_ = convert(Tensor{Int32}, perm_) - (perm_,) = tf.tf_promote(perm_) - (x_,) = tf.tf_promote(x_) - tf.add_input(desc, x_) - tf.add_input(desc, perm_) + begin + function transpose_graph(x_, perm_; name=nothing) + local desc + tf.with_op_name(name, "Transpose") do + desc = tf.NodeDescription("Transpose") + begin + begin + x_ = convert(Tensor{Any}, x_) + begin + end + end + begin + perm_ = convert(Tensor{Int32}, perm_) + begin + end + end + begin + (perm_,) = tf.tf_promote(perm_) + end + begin + (x_,) = tf.tf_promote(x_) + end + end + begin + begin + tf.add_input(desc, x_) + end + begin + tf.add_input(desc, perm_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function transpose_eager(x_, perm_; name=nothing) - desc = tf.EagerOp("Transpose") - x_ = convert(tf.EagerTensor, x_) - perm_ = convert(tf.EagerTensor, perm_) - tf.add_input(desc, x_) - tf.add_input(desc, perm_) - desc["T"] = tf.data_type(x_) - desc["Tperm"] = tf.data_type(perm_) - res = tf.execute(desc) - node = tf.TapeNode(transpose, [x_, perm_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function transpose_eager(x_, perm_; name=nothing) + desc = tf.EagerOp("Transpose") + x_ = convert(tf.EagerTensor, x_) + perm_ = convert(tf.EagerTensor, perm_) + begin + begin + tf.add_input(desc, x_) + end + begin + tf.add_input(desc, perm_) + end + end + begin + end + begin + desc["T"] = tf.data_type(x_) + end + begin + desc["Tperm"] = tf.data_type(perm_) + end + res = tf.execute(desc) + node = tf.TapeNode(transpose, [x_, perm_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function transpose(x_, perm_; name=nothing) - if tf.in_eager_mode() - transpose_eager(x_, perm_; name=name) - else - transpose_graph(x_, perm_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function transpose(x_, perm_; name=nothing) + if tf.in_eager_mode() + transpose_eager(x_, perm_; name=name) + else + transpose_graph(x_, perm_; name=name) + end end - end + end end @@ -42335,58 +77163,92 @@ end Retrieve embedding parameters for a single table. """ begin - function retrieve_tpu_embedding_rms_prop_parameters_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - local desc - tf.with_op_name(name, "RetrieveTPUEmbeddingRMSPropParameters") do - desc = tf.NodeDescription("RetrieveTPUEmbeddingRMSPropParameters") - if table_id !== nothing - desc["table_id"] = Base.Int(table_id) + begin + function retrieve_tpu_embedding_rms_prop_parameters_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + local desc + tf.with_op_name(name, "RetrieveTPUEmbeddingRMSPropParameters") do + desc = tf.NodeDescription("RetrieveTPUEmbeddingRMSPropParameters") + begin + end + begin + end + begin + begin + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + end + begin + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + end + begin + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + end + begin + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + end + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + end + end + begin + function retrieve_tpu_embedding_rms_prop_parameters_eager(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + desc = tf.EagerOp("RetrieveTPUEmbeddingRMSPropParameters") + begin + end + begin + begin + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + end + begin + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + end + begin + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + end + begin + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(retrieve_tpu_embedding_rms_prop_parameters, [], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function retrieve_tpu_embedding_rms_prop_parameters(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + if tf.in_eager_mode() + retrieve_tpu_embedding_rms_prop_parameters_eager(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + else + retrieve_tpu_embedding_rms_prop_parameters_graph(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + end end - if table_name !== nothing - desc["table_name"] = Base.String(table_name) - end - if num_shards !== nothing - desc["num_shards"] = Base.Int(num_shards) - end - if shard_id !== nothing - desc["shard_id"] = Base.Int(shard_id) - end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:3 - push!(out, tf.Tensor(op, out_idx)) - end - out - end - function retrieve_tpu_embedding_rms_prop_parameters_eager(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - desc = tf.EagerOp("RetrieveTPUEmbeddingRMSPropParameters") - if table_id !== nothing - desc["table_id"] = Base.Int(table_id) - end - if table_name !== nothing - desc["table_name"] = Base.String(table_name) - end - if num_shards !== nothing - desc["num_shards"] = Base.Int(num_shards) - end - if shard_id !== nothing - desc["shard_id"] = Base.Int(shard_id) - end - res = tf.execute(desc) - node = tf.TapeNode(retrieve_tpu_embedding_rms_prop_parameters, [], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function retrieve_tpu_embedding_rms_prop_parameters(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - if tf.in_eager_mode() - retrieve_tpu_embedding_rms_prop_parameters_eager(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) - else - retrieve_tpu_embedding_rms_prop_parameters_graph(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) - end - end end @@ -42396,35 +77258,63 @@ end """ begin - function ifft_graph(input_; name=nothing) - local desc - tf.with_op_name(name, "IFFT") do - desc = tf.NodeDescription("IFFT") - input_ = convert(Tensor{Complex{Float32}}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - end - tf.Tensor(tf.Operation(desc)) - end - function ifft_eager(input_; name=nothing) - desc = tf.EagerOp("IFFT") - input_ = convert(tf.EagerTensor, input_) - tf.add_input(desc, input_) - desc["Tcomplex"] = tf.data_type(input_) - res = tf.execute(desc) - node = tf.TapeNode(ifft, [input_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function ifft_graph(input_; name=nothing) + local desc + tf.with_op_name(name, "IFFT") do + desc = tf.NodeDescription("IFFT") + begin + begin + input_ = convert(Tensor{Complex{Float32}}, input_) + begin + end + end + begin + (input_,) = tf.tf_promote(input_) + end + end + begin + begin + tf.add_input(desc, input_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function ifft(input_; name=nothing) - if tf.in_eager_mode() - ifft_eager(input_; name=name) - else - ifft_graph(input_; name=name) + begin + function ifft_eager(input_; name=nothing) + desc = tf.EagerOp("IFFT") + input_ = convert(tf.EagerTensor, input_) + begin + begin + tf.add_input(desc, input_) + end + end + begin + end + begin + desc["Tcomplex"] = tf.data_type(input_) + end + res = tf.execute(desc) + node = tf.TapeNode(ifft, [input_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] end end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function ifft(input_; name=nothing) + if tf.in_eager_mode() + ifft_eager(input_; name=name) + else + ifft_graph(input_; name=name) + end + end + end end @@ -42434,52 +77324,112 @@ end """ begin - function sparse_segment_sum_with_num_segments_graph(data_, indices_, segment_ids_, num_segments_; name=nothing) - local desc - tf.with_op_name(name, "SparseSegmentSumWithNumSegments") do - desc = tf.NodeDescription("SparseSegmentSumWithNumSegments") - data_ = convert(Tensor{Any}, data_) - indices_ = convert(Tensor{Int32}, indices_) - indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) - segment_ids_ = convert(Tensor{Int32}, segment_ids_) - num_segments_ = convert(Tensor{Int32}, num_segments_) - (num_segments_,) = tf.tf_promote(num_segments_) - (data_,) = tf.tf_promote(data_) - (indices_,) = tf.tf_promote(indices_) - tf.add_input(desc, data_) - tf.add_input(desc, indices_) - tf.add_input(desc, segment_ids_) - tf.add_input(desc, num_segments_) - end - tf.Tensor(tf.Operation(desc)) - end - function sparse_segment_sum_with_num_segments_eager(data_, indices_, segment_ids_, num_segments_; name=nothing) - desc = tf.EagerOp("SparseSegmentSumWithNumSegments") - data_ = convert(tf.EagerTensor, data_) - indices_ = convert(tf.EagerTensor, indices_) - segment_ids_ = convert(tf.EagerTensor, segment_ids_) - num_segments_ = convert(tf.EagerTensor, num_segments_) - tf.add_input(desc, data_) - tf.add_input(desc, indices_) - tf.add_input(desc, segment_ids_) - tf.add_input(desc, num_segments_) - desc["T"] = tf.data_type(data_) - desc["Tidx"] = tf.data_type(indices_) - desc["Tnumsegments"] = tf.data_type(num_segments_) - res = tf.execute(desc) - node = tf.TapeNode(sparse_segment_sum_with_num_segments, [data_, indices_, segment_ids_, num_segments_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_segment_sum_with_num_segments(data_, indices_, segment_ids_, num_segments_; name=nothing) - if tf.in_eager_mode() - sparse_segment_sum_with_num_segments_eager(data_, indices_, segment_ids_, num_segments_; name=name) - else - sparse_segment_sum_with_num_segments_graph(data_, indices_, segment_ids_, num_segments_; name=name) + begin + function sparse_segment_sum_with_num_segments_graph(data_, indices_, segment_ids_, num_segments_; name=nothing) + local desc + tf.with_op_name(name, "SparseSegmentSumWithNumSegments") do + desc = tf.NodeDescription("SparseSegmentSumWithNumSegments") + begin + begin + data_ = convert(Tensor{Any}, data_) + begin + end + end + begin + indices_ = convert(Tensor{Int32}, indices_) + begin + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + end + end + begin + segment_ids_ = convert(Tensor{Int32}, segment_ids_) + begin + end + end + begin + num_segments_ = convert(Tensor{Int32}, num_segments_) + begin + end + end + begin + (num_segments_,) = tf.tf_promote(num_segments_) + end + begin + (data_,) = tf.tf_promote(data_) + end + begin + (indices_,) = tf.tf_promote(indices_) + end + end + begin + begin + tf.add_input(desc, data_) + end + begin + tf.add_input(desc, indices_) + end + begin + tf.add_input(desc, segment_ids_) + end + begin + tf.add_input(desc, num_segments_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function sparse_segment_sum_with_num_segments_eager(data_, indices_, segment_ids_, num_segments_; name=nothing) + desc = tf.EagerOp("SparseSegmentSumWithNumSegments") + data_ = convert(tf.EagerTensor, data_) + indices_ = convert(tf.EagerTensor, indices_) + segment_ids_ = convert(tf.EagerTensor, segment_ids_) + num_segments_ = convert(tf.EagerTensor, num_segments_) + begin + begin + tf.add_input(desc, data_) + end + begin + tf.add_input(desc, indices_) + end + begin + tf.add_input(desc, segment_ids_) + end + begin + tf.add_input(desc, num_segments_) + end + end + begin + end + begin + desc["T"] = tf.data_type(data_) + end + begin + desc["Tidx"] = tf.data_type(indices_) + end + begin + desc["Tnumsegments"] = tf.data_type(num_segments_) + end + res = tf.execute(desc) + node = tf.TapeNode(sparse_segment_sum_with_num_segments, [data_, indices_, segment_ids_, num_segments_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_segment_sum_with_num_segments(data_, indices_, segment_ids_, num_segments_; name=nothing) + if tf.in_eager_mode() + sparse_segment_sum_with_num_segments_eager(data_, indices_, segment_ids_, num_segments_; name=name) + else + sparse_segment_sum_with_num_segments_graph(data_, indices_, segment_ids_, num_segments_; name=name) + end end - end + end end @@ -42489,33 +77439,57 @@ end """ begin - function queue_is_closed_v2_graph(handle_; name=nothing) - local desc - tf.with_op_name(name, "QueueIsClosedV2") do - desc = tf.NodeDescription("QueueIsClosedV2") - handle_ = convert(Tensor{Any}, handle_) - tf.add_input(desc, handle_) + begin + function queue_is_closed_v2_graph(handle_; name=nothing) + local desc + tf.with_op_name(name, "QueueIsClosedV2") do + desc = tf.NodeDescription("QueueIsClosedV2") + begin + begin + handle_ = convert(Tensor{Any}, handle_) + begin + end + end + end + begin + begin + tf.add_input(desc, handle_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function queue_is_closed_v2_eager(handle_; name=nothing) - desc = tf.EagerOp("QueueIsClosedV2") - handle_ = convert(tf.EagerTensor, handle_) - tf.add_input(desc, handle_) - res = tf.execute(desc) - node = tf.TapeNode(queue_is_closed_v2, [handle_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function queue_is_closed_v2_eager(handle_; name=nothing) + desc = tf.EagerOp("QueueIsClosedV2") + handle_ = convert(tf.EagerTensor, handle_) + begin + begin + tf.add_input(desc, handle_) + end + end + begin + end + res = tf.execute(desc) + node = tf.TapeNode(queue_is_closed_v2, [handle_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function queue_is_closed_v2(handle_; name=nothing) - if tf.in_eager_mode() - queue_is_closed_v2_eager(handle_; name=name) - else - queue_is_closed_v2_graph(handle_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function queue_is_closed_v2(handle_; name=nothing) + if tf.in_eager_mode() + queue_is_closed_v2_eager(handle_; name=name) + else + queue_is_closed_v2_graph(handle_; name=name) + end end - end + end end @@ -42525,74 +77499,156 @@ end """ begin - function parameterized_truncated_normal_graph(shape_, means_, stdevs_, minvals_, maxvals_; name=nothing, seed=nothing, seed2=nothing, dtype=nothing) - local desc - tf.with_op_name(name, "ParameterizedTruncatedNormal") do - desc = tf.NodeDescription("ParameterizedTruncatedNormal") - shape_ = convert(Tensor{Any}, shape_) - means_ = convert(Tensor{Any}, means_) - stdevs_ = convert(Tensor{Any}, stdevs_) - minvals_ = convert(Tensor{Any}, minvals_) - maxvals_ = convert(Tensor{Any}, maxvals_) - (means_, stdevs_, minvals_, maxvals_) = tf.tf_promote(means_, stdevs_, minvals_, maxvals_) - (shape_,) = tf.tf_promote(shape_) - tf.add_input(desc, shape_) - tf.add_input(desc, means_) - tf.add_input(desc, stdevs_) - tf.add_input(desc, minvals_) - tf.add_input(desc, maxvals_) - if seed !== nothing - desc["seed"] = Base.Int(seed) - end - if seed2 !== nothing - desc["seed2"] = Base.Int(seed2) - end - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) + begin + function parameterized_truncated_normal_graph(shape_, means_, stdevs_, minvals_, maxvals_; name=nothing, seed=nothing, seed2=nothing, dtype=nothing) + local desc + tf.with_op_name(name, "ParameterizedTruncatedNormal") do + desc = tf.NodeDescription("ParameterizedTruncatedNormal") + begin + begin + shape_ = convert(Tensor{Any}, shape_) + begin + end + end + begin + means_ = convert(Tensor{Any}, means_) + begin + end + end + begin + stdevs_ = convert(Tensor{Any}, stdevs_) + begin + end + end + begin + minvals_ = convert(Tensor{Any}, minvals_) + begin + end + end + begin + maxvals_ = convert(Tensor{Any}, maxvals_) + begin + end + end + begin + (means_, stdevs_, minvals_, maxvals_) = tf.tf_promote(means_, stdevs_, minvals_, maxvals_) + end + begin + (shape_,) = tf.tf_promote(shape_) + end + end + begin + begin + tf.add_input(desc, shape_) + end + begin + tf.add_input(desc, means_) + end + begin + tf.add_input(desc, stdevs_) + end + begin + tf.add_input(desc, minvals_) + end + begin + tf.add_input(desc, maxvals_) + end + end + begin + begin + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + end + begin + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end + end + begin + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function parameterized_truncated_normal_eager(shape_, means_, stdevs_, minvals_, maxvals_; name=nothing, seed=nothing, seed2=nothing, dtype=nothing) + desc = tf.EagerOp("ParameterizedTruncatedNormal") + shape_ = convert(tf.EagerTensor, shape_) + means_ = convert(tf.EagerTensor, means_) + stdevs_ = convert(tf.EagerTensor, stdevs_) + minvals_ = convert(tf.EagerTensor, minvals_) + maxvals_ = convert(tf.EagerTensor, maxvals_) + begin + begin + tf.add_input(desc, shape_) + end + begin + tf.add_input(desc, means_) + end + begin + tf.add_input(desc, stdevs_) + end + begin + tf.add_input(desc, minvals_) + end + begin + tf.add_input(desc, maxvals_) + end + end + begin + begin + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + end + begin + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end + end + begin + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + end + begin + desc["T"] = tf.data_type(shape_) + end + begin + desc["dtype"] = tf.data_type(means_) + end + begin + desc["dtype"] = tf.data_type(stdevs_) + end + begin + desc["dtype"] = tf.data_type(minvals_) + end + begin + desc["dtype"] = tf.data_type(maxvals_) + end + res = tf.execute(desc) + node = tf.TapeNode(parameterized_truncated_normal, [shape_, means_, stdevs_, minvals_, maxvals_], name=nothing, seed=nothing, seed2=nothing, dtype=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function parameterized_truncated_normal(shape_, means_, stdevs_, minvals_, maxvals_; name=nothing, seed=nothing, seed2=nothing, dtype=nothing) + if tf.in_eager_mode() + parameterized_truncated_normal_eager(shape_, means_, stdevs_, minvals_, maxvals_; name=name, seed=seed, seed2=seed2, dtype=dtype) + else + parameterized_truncated_normal_graph(shape_, means_, stdevs_, minvals_, maxvals_; name=name, seed=seed, seed2=seed2, dtype=dtype) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function parameterized_truncated_normal_eager(shape_, means_, stdevs_, minvals_, maxvals_; name=nothing, seed=nothing, seed2=nothing, dtype=nothing) - desc = tf.EagerOp("ParameterizedTruncatedNormal") - shape_ = convert(tf.EagerTensor, shape_) - means_ = convert(tf.EagerTensor, means_) - stdevs_ = convert(tf.EagerTensor, stdevs_) - minvals_ = convert(tf.EagerTensor, minvals_) - maxvals_ = convert(tf.EagerTensor, maxvals_) - tf.add_input(desc, shape_) - tf.add_input(desc, means_) - tf.add_input(desc, stdevs_) - tf.add_input(desc, minvals_) - tf.add_input(desc, maxvals_) - if seed !== nothing - desc["seed"] = Base.Int(seed) - end - if seed2 !== nothing - desc["seed2"] = Base.Int(seed2) - end - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end - desc["T"] = tf.data_type(shape_) - desc["dtype"] = tf.data_type(means_) - desc["dtype"] = tf.data_type(stdevs_) - desc["dtype"] = tf.data_type(minvals_) - desc["dtype"] = tf.data_type(maxvals_) - res = tf.execute(desc) - node = tf.TapeNode(parameterized_truncated_normal, [shape_, means_, stdevs_, minvals_, maxvals_], name=nothing, seed=nothing, seed2=nothing, dtype=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function parameterized_truncated_normal(shape_, means_, stdevs_, minvals_, maxvals_; name=nothing, seed=nothing, seed2=nothing, dtype=nothing) - if tf.in_eager_mode() - parameterized_truncated_normal_eager(shape_, means_, stdevs_, minvals_, maxvals_; name=name, seed=seed, seed2=seed2, dtype=dtype) - else - parameterized_truncated_normal_graph(shape_, means_, stdevs_, minvals_, maxvals_; name=name, seed=seed, seed2=seed2, dtype=dtype) - end - end end @@ -42602,35 +77658,63 @@ end """ begin - function diag_part_graph(input_; name=nothing) - local desc - tf.with_op_name(name, "DiagPart") do - desc = tf.NodeDescription("DiagPart") - input_ = convert(Tensor{Any}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) + begin + function diag_part_graph(input_; name=nothing) + local desc + tf.with_op_name(name, "DiagPart") do + desc = tf.NodeDescription("DiagPart") + begin + begin + input_ = convert(Tensor{Any}, input_) + begin + end + end + begin + (input_,) = tf.tf_promote(input_) + end + end + begin + begin + tf.add_input(desc, input_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function diag_part_eager(input_; name=nothing) - desc = tf.EagerOp("DiagPart") - input_ = convert(tf.EagerTensor, input_) - tf.add_input(desc, input_) - desc["T"] = tf.data_type(input_) - res = tf.execute(desc) - node = tf.TapeNode(diag_part, [input_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function diag_part_eager(input_; name=nothing) + desc = tf.EagerOp("DiagPart") + input_ = convert(tf.EagerTensor, input_) + begin + begin + tf.add_input(desc, input_) + end + end + begin + end + begin + desc["T"] = tf.data_type(input_) + end + res = tf.execute(desc) + node = tf.TapeNode(diag_part, [input_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function diag_part(input_; name=nothing) - if tf.in_eager_mode() - diag_part_eager(input_; name=name) - else - diag_part_graph(input_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function diag_part(input_; name=nothing) + if tf.in_eager_mode() + diag_part_eager(input_; name=name) + else + diag_part_graph(input_; name=name) + end end - end + end end @@ -42640,47 +77724,91 @@ end """ begin - function regex_replace_graph(input_, pattern_, rewrite_; name=nothing, replace_global=nothing) - local desc - tf.with_op_name(name, "RegexReplace") do - desc = tf.NodeDescription("RegexReplace") - input_ = convert(Tensor{String}, input_) - pattern_ = convert(Tensor{String}, pattern_) - rewrite_ = convert(Tensor{String}, rewrite_) - tf.add_input(desc, input_) - tf.add_input(desc, pattern_) - tf.add_input(desc, rewrite_) - if replace_global !== nothing - desc["replace_global"] = Base.Bool(replace_global) + begin + function regex_replace_graph(input_, pattern_, rewrite_; name=nothing, replace_global=nothing) + local desc + tf.with_op_name(name, "RegexReplace") do + desc = tf.NodeDescription("RegexReplace") + begin + begin + input_ = convert(Tensor{String}, input_) + begin + end + end + begin + pattern_ = convert(Tensor{String}, pattern_) + begin + end + end + begin + rewrite_ = convert(Tensor{String}, rewrite_) + begin + end + end + end + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, pattern_) + end + begin + tf.add_input(desc, rewrite_) + end + end + begin + begin + if replace_global !== nothing + desc["replace_global"] = Base.Bool(replace_global) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function regex_replace_eager(input_, pattern_, rewrite_; name=nothing, replace_global=nothing) + desc = tf.EagerOp("RegexReplace") + input_ = convert(tf.EagerTensor, input_) + pattern_ = convert(tf.EagerTensor, pattern_) + rewrite_ = convert(tf.EagerTensor, rewrite_) + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, pattern_) + end + begin + tf.add_input(desc, rewrite_) + end + end + begin + begin + if replace_global !== nothing + desc["replace_global"] = Base.Bool(replace_global) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(regex_replace, [input_, pattern_, rewrite_], name=nothing, replace_global=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function regex_replace(input_, pattern_, rewrite_; name=nothing, replace_global=nothing) + if tf.in_eager_mode() + regex_replace_eager(input_, pattern_, rewrite_; name=name, replace_global=replace_global) + else + regex_replace_graph(input_, pattern_, rewrite_; name=name, replace_global=replace_global) + end end - end - tf.Tensor(tf.Operation(desc)) end - function regex_replace_eager(input_, pattern_, rewrite_; name=nothing, replace_global=nothing) - desc = tf.EagerOp("RegexReplace") - input_ = convert(tf.EagerTensor, input_) - pattern_ = convert(tf.EagerTensor, pattern_) - rewrite_ = convert(tf.EagerTensor, rewrite_) - tf.add_input(desc, input_) - tf.add_input(desc, pattern_) - tf.add_input(desc, rewrite_) - if replace_global !== nothing - desc["replace_global"] = Base.Bool(replace_global) - end - res = tf.execute(desc) - node = tf.TapeNode(regex_replace, [input_, pattern_, rewrite_], name=nothing, replace_global=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function regex_replace(input_, pattern_, rewrite_; name=nothing, replace_global=nothing) - if tf.in_eager_mode() - regex_replace_eager(input_, pattern_, rewrite_; name=name, replace_global=replace_global) - else - regex_replace_graph(input_, pattern_, rewrite_; name=name, replace_global=replace_global) - end - end end @@ -42690,63 +77818,129 @@ end """ begin - function sparse_tensor_dense_mat_mul_graph(a_indices_, a_values_, a_shape_, b_; name=nothing, adjoint_a=nothing, adjoint_b=nothing) - local desc - tf.with_op_name(name, "SparseTensorDenseMatMul") do - desc = tf.NodeDescription("SparseTensorDenseMatMul") - a_indices_ = convert(Tensor{Int64}, a_indices_) - a_indices_ = a_indices_ - convert(tf.Tensor{eltype(a_indices_)}, 1) - a_values_ = convert(Tensor{Any}, a_values_) - a_shape_ = convert(Tensor{Int64}, a_shape_) - b_ = convert(Tensor{Any}, b_) - (a_values_, b_) = tf.tf_promote(a_values_, b_) - (a_indices_,) = tf.tf_promote(a_indices_) - tf.add_input(desc, a_indices_) - tf.add_input(desc, a_values_) - tf.add_input(desc, a_shape_) - tf.add_input(desc, b_) - if adjoint_a !== nothing - desc["adjoint_a"] = Base.Bool(adjoint_a) - end - if adjoint_b !== nothing - desc["adjoint_b"] = Base.Bool(adjoint_b) + begin + function sparse_tensor_dense_mat_mul_graph(a_indices_, a_values_, a_shape_, b_; name=nothing, adjoint_a=nothing, adjoint_b=nothing) + local desc + tf.with_op_name(name, "SparseTensorDenseMatMul") do + desc = tf.NodeDescription("SparseTensorDenseMatMul") + begin + begin + a_indices_ = convert(Tensor{Int64}, a_indices_) + begin + a_indices_ = a_indices_ - convert(tf.Tensor{eltype(a_indices_)}, 1) + end + end + begin + a_values_ = convert(Tensor{Any}, a_values_) + begin + end + end + begin + a_shape_ = convert(Tensor{Int64}, a_shape_) + begin + end + end + begin + b_ = convert(Tensor{Any}, b_) + begin + end + end + begin + (a_values_, b_) = tf.tf_promote(a_values_, b_) + end + begin + (a_indices_,) = tf.tf_promote(a_indices_) + end + end + begin + begin + tf.add_input(desc, a_indices_) + end + begin + tf.add_input(desc, a_values_) + end + begin + tf.add_input(desc, a_shape_) + end + begin + tf.add_input(desc, b_) + end + end + begin + begin + if adjoint_a !== nothing + desc["adjoint_a"] = Base.Bool(adjoint_a) + end + end + begin + if adjoint_b !== nothing + desc["adjoint_b"] = Base.Bool(adjoint_b) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function sparse_tensor_dense_mat_mul_eager(a_indices_, a_values_, a_shape_, b_; name=nothing, adjoint_a=nothing, adjoint_b=nothing) + desc = tf.EagerOp("SparseTensorDenseMatMul") + a_indices_ = convert(tf.EagerTensor, a_indices_) + a_values_ = convert(tf.EagerTensor, a_values_) + a_shape_ = convert(tf.EagerTensor, a_shape_) + b_ = convert(tf.EagerTensor, b_) + begin + begin + tf.add_input(desc, a_indices_) + end + begin + tf.add_input(desc, a_values_) + end + begin + tf.add_input(desc, a_shape_) + end + begin + tf.add_input(desc, b_) + end + end + begin + begin + if adjoint_a !== nothing + desc["adjoint_a"] = Base.Bool(adjoint_a) + end + end + begin + if adjoint_b !== nothing + desc["adjoint_b"] = Base.Bool(adjoint_b) + end + end + end + begin + desc["Tindices"] = tf.data_type(a_indices_) + end + begin + desc["T"] = tf.data_type(a_values_) + end + begin + desc["T"] = tf.data_type(b_) + end + res = tf.execute(desc) + node = tf.TapeNode(sparse_tensor_dense_mat_mul, [a_indices_, a_values_, a_shape_, b_], name=nothing, adjoint_a=nothing, adjoint_b=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_tensor_dense_mat_mul(a_indices_, a_values_, a_shape_, b_; name=nothing, adjoint_a=nothing, adjoint_b=nothing) + if tf.in_eager_mode() + sparse_tensor_dense_mat_mul_eager(a_indices_, a_values_, a_shape_, b_; name=name, adjoint_a=adjoint_a, adjoint_b=adjoint_b) + else + sparse_tensor_dense_mat_mul_graph(a_indices_, a_values_, a_shape_, b_; name=name, adjoint_a=adjoint_a, adjoint_b=adjoint_b) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function sparse_tensor_dense_mat_mul_eager(a_indices_, a_values_, a_shape_, b_; name=nothing, adjoint_a=nothing, adjoint_b=nothing) - desc = tf.EagerOp("SparseTensorDenseMatMul") - a_indices_ = convert(tf.EagerTensor, a_indices_) - a_values_ = convert(tf.EagerTensor, a_values_) - a_shape_ = convert(tf.EagerTensor, a_shape_) - b_ = convert(tf.EagerTensor, b_) - tf.add_input(desc, a_indices_) - tf.add_input(desc, a_values_) - tf.add_input(desc, a_shape_) - tf.add_input(desc, b_) - if adjoint_a !== nothing - desc["adjoint_a"] = Base.Bool(adjoint_a) - end - if adjoint_b !== nothing - desc["adjoint_b"] = Base.Bool(adjoint_b) - end - desc["Tindices"] = tf.data_type(a_indices_) - desc["T"] = tf.data_type(a_values_) - desc["T"] = tf.data_type(b_) - res = tf.execute(desc) - node = tf.TapeNode(sparse_tensor_dense_mat_mul, [a_indices_, a_values_, a_shape_, b_], name=nothing, adjoint_a=nothing, adjoint_b=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_tensor_dense_mat_mul(a_indices_, a_values_, a_shape_, b_; name=nothing, adjoint_a=nothing, adjoint_b=nothing) - if tf.in_eager_mode() - sparse_tensor_dense_mat_mul_eager(a_indices_, a_values_, a_shape_, b_; name=name, adjoint_a=adjoint_a, adjoint_b=adjoint_b) - else - sparse_tensor_dense_mat_mul_graph(a_indices_, a_values_, a_shape_, b_; name=name, adjoint_a=adjoint_a, adjoint_b=adjoint_b) - end - end end @@ -42756,67 +77950,119 @@ end """ begin - function map_defun_graph(arguments_, captured_inputs_; name=nothing, Targuments=nothing, Tcaptured=nothing, output_types=nothing, output_shapes=nothing, f=nothing) - local desc - tf.with_op_name(name, "MapDefun") do - desc = tf.NodeDescription("MapDefun") - arguments_ = [convert(Tensor{Any}, x) for x = arguments_] - captured_inputs_ = [convert(Tensor{Any}, x) for x = captured_inputs_] - tf.add_input(desc, arguments_) - tf.add_input(desc, captured_inputs_) - if Targuments !== nothing - desc["Targuments"] = map(Base.identity, Targuments) - end - if Tcaptured !== nothing - desc["Tcaptured"] = map(Base.identity, Tcaptured) - end - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) + begin + function map_defun_graph(arguments_, captured_inputs_; name=nothing, Targuments=nothing, Tcaptured=nothing, output_types=nothing, output_shapes=nothing, f=nothing) + local desc + tf.with_op_name(name, "MapDefun") do + desc = tf.NodeDescription("MapDefun") + begin + begin + arguments_ = [convert(Tensor{Any}, x) for x = arguments_] + begin + end + end + begin + captured_inputs_ = [convert(Tensor{Any}, x) for x = captured_inputs_] + begin + end + end + end + begin + begin + tf.add_input(desc, arguments_) + end + begin + tf.add_input(desc, captured_inputs_) + end + end + begin + begin + if Targuments !== nothing + desc["Targuments"] = map(Base.identity, Targuments) + end + end + begin + if Tcaptured !== nothing + desc["Tcaptured"] = map(Base.identity, Tcaptured) + end + end + begin + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + end + begin + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + begin + if f !== nothing + desc["f"] = Base.identity(f) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function map_defun_eager(arguments_, captured_inputs_; name=nothing, Targuments=nothing, Tcaptured=nothing, output_types=nothing, output_shapes=nothing, f=nothing) + desc = tf.EagerOp("MapDefun") + arguments_ = convert(tf.EagerTensor, arguments_) + captured_inputs_ = convert(tf.EagerTensor, captured_inputs_) + begin + begin + tf.add_input(desc, arguments_) + end + begin + tf.add_input(desc, captured_inputs_) + end + end + begin + begin + if Targuments !== nothing + desc["Targuments"] = map(Base.identity, Targuments) + end + end + begin + if Tcaptured !== nothing + desc["Tcaptured"] = map(Base.identity, Tcaptured) + end + end + begin + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + end + begin + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + begin + if f !== nothing + desc["f"] = Base.identity(f) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(map_defun, [arguments_, captured_inputs_], name=nothing, Targuments=nothing, Tcaptured=nothing, output_types=nothing, output_shapes=nothing, f=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function map_defun(arguments_, captured_inputs_; name=nothing, Targuments=nothing, Tcaptured=nothing, output_types=nothing, output_shapes=nothing, f=nothing) + if tf.in_eager_mode() + map_defun_eager(arguments_, captured_inputs_; name=name, Targuments=Targuments, Tcaptured=Tcaptured, output_types=output_types, output_shapes=output_shapes, f=f) + else + map_defun_graph(arguments_, captured_inputs_; name=name, Targuments=Targuments, Tcaptured=Tcaptured, output_types=output_types, output_shapes=output_shapes, f=f) + end end - if f !== nothing - desc["f"] = Base.identity(f) - end - end - tf.Tensor(tf.Operation(desc)) - end - function map_defun_eager(arguments_, captured_inputs_; name=nothing, Targuments=nothing, Tcaptured=nothing, output_types=nothing, output_shapes=nothing, f=nothing) - desc = tf.EagerOp("MapDefun") - arguments_ = convert(tf.EagerTensor, arguments_) - captured_inputs_ = convert(tf.EagerTensor, captured_inputs_) - tf.add_input(desc, arguments_) - tf.add_input(desc, captured_inputs_) - if Targuments !== nothing - desc["Targuments"] = map(Base.identity, Targuments) - end - if Tcaptured !== nothing - desc["Tcaptured"] = map(Base.identity, Tcaptured) - end - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - if f !== nothing - desc["f"] = Base.identity(f) - end - res = tf.execute(desc) - node = tf.TapeNode(map_defun, [arguments_, captured_inputs_], name=nothing, Targuments=nothing, Tcaptured=nothing, output_types=nothing, output_shapes=nothing, f=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function map_defun(arguments_, captured_inputs_; name=nothing, Targuments=nothing, Tcaptured=nothing, output_types=nothing, output_shapes=nothing, f=nothing) - if tf.in_eager_mode() - map_defun_eager(arguments_, captured_inputs_; name=name, Targuments=Targuments, Tcaptured=Tcaptured, output_types=output_types, output_shapes=output_shapes, f=f) - else - map_defun_graph(arguments_, captured_inputs_; name=name, Targuments=Targuments, Tcaptured=Tcaptured, output_types=output_types, output_shapes=output_shapes, f=f) - end - end end @@ -42826,74 +78072,124 @@ end """ begin - function thread_unsafe_unigram_candidate_sampler_graph(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing) - local desc - tf.with_op_name(name, "ThreadUnsafeUnigramCandidateSampler") do - desc = tf.NodeDescription("ThreadUnsafeUnigramCandidateSampler") - true_classes_ = convert(Tensor{Int64}, true_classes_) - tf.add_input(desc, true_classes_) - if num_true !== nothing - desc["num_true"] = Base.Int(num_true) - end - if num_sampled !== nothing - desc["num_sampled"] = Base.Int(num_sampled) + begin + function thread_unsafe_unigram_candidate_sampler_graph(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing) + local desc + tf.with_op_name(name, "ThreadUnsafeUnigramCandidateSampler") do + desc = tf.NodeDescription("ThreadUnsafeUnigramCandidateSampler") + begin + begin + true_classes_ = convert(Tensor{Int64}, true_classes_) + begin + end + end + end + begin + begin + tf.add_input(desc, true_classes_) + end + end + begin + begin + if num_true !== nothing + desc["num_true"] = Base.Int(num_true) + end + end + begin + if num_sampled !== nothing + desc["num_sampled"] = Base.Int(num_sampled) + end + end + begin + if unique !== nothing + desc["unique"] = Base.Bool(unique) + end + end + begin + if range_max !== nothing + desc["range_max"] = Base.Int(range_max) + end + end + begin + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + end + begin + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end + end + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + end + end + begin + function thread_unsafe_unigram_candidate_sampler_eager(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing) + desc = tf.EagerOp("ThreadUnsafeUnigramCandidateSampler") + true_classes_ = convert(tf.EagerTensor, true_classes_) + begin + begin + tf.add_input(desc, true_classes_) + end + end + begin + begin + if num_true !== nothing + desc["num_true"] = Base.Int(num_true) + end + end + begin + if num_sampled !== nothing + desc["num_sampled"] = Base.Int(num_sampled) + end + end + begin + if unique !== nothing + desc["unique"] = Base.Bool(unique) + end + end + begin + if range_max !== nothing + desc["range_max"] = Base.Int(range_max) + end + end + begin + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + end + begin + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(thread_unsafe_unigram_candidate_sampler, [true_classes_], name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function thread_unsafe_unigram_candidate_sampler(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing) + if tf.in_eager_mode() + thread_unsafe_unigram_candidate_sampler_eager(true_classes_; name=name, num_true=num_true, num_sampled=num_sampled, unique=unique, range_max=range_max, seed=seed, seed2=seed2) + else + thread_unsafe_unigram_candidate_sampler_graph(true_classes_; name=name, num_true=num_true, num_sampled=num_sampled, unique=unique, range_max=range_max, seed=seed, seed2=seed2) + end end - if unique !== nothing - desc["unique"] = Base.Bool(unique) - end - if range_max !== nothing - desc["range_max"] = Base.Int(range_max) - end - if seed !== nothing - desc["seed"] = Base.Int(seed) - end - if seed2 !== nothing - desc["seed2"] = Base.Int(seed2) - end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:3 - push!(out, tf.Tensor(op, out_idx)) - end - out - end - function thread_unsafe_unigram_candidate_sampler_eager(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing) - desc = tf.EagerOp("ThreadUnsafeUnigramCandidateSampler") - true_classes_ = convert(tf.EagerTensor, true_classes_) - tf.add_input(desc, true_classes_) - if num_true !== nothing - desc["num_true"] = Base.Int(num_true) - end - if num_sampled !== nothing - desc["num_sampled"] = Base.Int(num_sampled) - end - if unique !== nothing - desc["unique"] = Base.Bool(unique) - end - if range_max !== nothing - desc["range_max"] = Base.Int(range_max) - end - if seed !== nothing - desc["seed"] = Base.Int(seed) - end - if seed2 !== nothing - desc["seed2"] = Base.Int(seed2) - end - res = tf.execute(desc) - node = tf.TapeNode(thread_unsafe_unigram_candidate_sampler, [true_classes_], name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function thread_unsafe_unigram_candidate_sampler(true_classes_; name=nothing, num_true=nothing, num_sampled=nothing, unique=nothing, range_max=nothing, seed=nothing, seed2=nothing) - if tf.in_eager_mode() - thread_unsafe_unigram_candidate_sampler_eager(true_classes_; name=name, num_true=num_true, num_sampled=num_sampled, unique=unique, range_max=range_max, seed=seed, seed2=seed2) - else - thread_unsafe_unigram_candidate_sampler_graph(true_classes_; name=name, num_true=num_true, num_sampled=num_sampled, unique=unique, range_max=range_max, seed=seed, seed2=seed2) - end - end end @@ -42903,58 +78199,92 @@ end Retrieve embedding parameters for a single table. """ begin - function retrieve_tpu_embedding_adam_parameters_grad_accum_debug_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - local desc - tf.with_op_name(name, "RetrieveTPUEmbeddingADAMParametersGradAccumDebug") do - desc = tf.NodeDescription("RetrieveTPUEmbeddingADAMParametersGradAccumDebug") - if table_id !== nothing - desc["table_id"] = Base.Int(table_id) + begin + function retrieve_tpu_embedding_adam_parameters_grad_accum_debug_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + local desc + tf.with_op_name(name, "RetrieveTPUEmbeddingADAMParametersGradAccumDebug") do + desc = tf.NodeDescription("RetrieveTPUEmbeddingADAMParametersGradAccumDebug") + begin + end + begin + end + begin + begin + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + end + begin + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + end + begin + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + end + begin + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + end + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:4 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + end + end + begin + function retrieve_tpu_embedding_adam_parameters_grad_accum_debug_eager(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + desc = tf.EagerOp("RetrieveTPUEmbeddingADAMParametersGradAccumDebug") + begin + end + begin + begin + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + end + begin + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + end + begin + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + end + begin + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(retrieve_tpu_embedding_adam_parameters_grad_accum_debug, [], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function retrieve_tpu_embedding_adam_parameters_grad_accum_debug(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + if tf.in_eager_mode() + retrieve_tpu_embedding_adam_parameters_grad_accum_debug_eager(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + else + retrieve_tpu_embedding_adam_parameters_grad_accum_debug_graph(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + end end - if table_name !== nothing - desc["table_name"] = Base.String(table_name) - end - if num_shards !== nothing - desc["num_shards"] = Base.Int(num_shards) - end - if shard_id !== nothing - desc["shard_id"] = Base.Int(shard_id) - end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:4 - push!(out, tf.Tensor(op, out_idx)) - end - out - end - function retrieve_tpu_embedding_adam_parameters_grad_accum_debug_eager(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - desc = tf.EagerOp("RetrieveTPUEmbeddingADAMParametersGradAccumDebug") - if table_id !== nothing - desc["table_id"] = Base.Int(table_id) - end - if table_name !== nothing - desc["table_name"] = Base.String(table_name) - end - if num_shards !== nothing - desc["num_shards"] = Base.Int(num_shards) - end - if shard_id !== nothing - desc["shard_id"] = Base.Int(shard_id) - end - res = tf.execute(desc) - node = tf.TapeNode(retrieve_tpu_embedding_adam_parameters_grad_accum_debug, [], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function retrieve_tpu_embedding_adam_parameters_grad_accum_debug(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - if tf.in_eager_mode() - retrieve_tpu_embedding_adam_parameters_grad_accum_debug_eager(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) - else - retrieve_tpu_embedding_adam_parameters_grad_accum_debug_graph(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) - end - end end @@ -42964,47 +78294,83 @@ end """ begin - function parallel_concat_graph(values_; name=nothing, N=nothing, shape=nothing) - local desc - tf.with_op_name(name, "ParallelConcat") do - desc = tf.NodeDescription("ParallelConcat") - values_ = [convert(Tensor{Any}, x) for x = values_] - (values_,) = tf.tf_promote(values_) - tf.add_input(desc, values_) - if N !== nothing - desc["N"] = Base.Int(N) - end - if shape !== nothing - desc["shape"] = Base.identity(shape) + begin + function parallel_concat_graph(values_; name=nothing, N=nothing, shape=nothing) + local desc + tf.with_op_name(name, "ParallelConcat") do + desc = tf.NodeDescription("ParallelConcat") + begin + begin + values_ = [convert(Tensor{Any}, x) for x = values_] + begin + end + end + begin + (values_,) = tf.tf_promote(values_) + end + end + begin + begin + tf.add_input(desc, values_) + end + end + begin + begin + if N !== nothing + desc["N"] = Base.Int(N) + end + end + begin + if shape !== nothing + desc["shape"] = Base.identity(shape) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function parallel_concat_eager(values_; name=nothing, N=nothing, shape=nothing) + desc = tf.EagerOp("ParallelConcat") + values_ = convert(tf.EagerTensor, values_) + begin + begin + tf.add_input(desc, values_) + end + end + begin + begin + if N !== nothing + desc["N"] = Base.Int(N) + end + end + begin + if shape !== nothing + desc["shape"] = Base.identity(shape) + end + end + end + begin + desc["T"] = tf.data_type(values_) + end + res = tf.execute(desc) + node = tf.TapeNode(parallel_concat, [values_], name=nothing, N=nothing, shape=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function parallel_concat(values_; name=nothing, N=nothing, shape=nothing) + if tf.in_eager_mode() + parallel_concat_eager(values_; name=name, N=N, shape=shape) + else + parallel_concat_graph(values_; name=name, N=N, shape=shape) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function parallel_concat_eager(values_; name=nothing, N=nothing, shape=nothing) - desc = tf.EagerOp("ParallelConcat") - values_ = convert(tf.EagerTensor, values_) - tf.add_input(desc, values_) - if N !== nothing - desc["N"] = Base.Int(N) - end - if shape !== nothing - desc["shape"] = Base.identity(shape) - end - desc["T"] = tf.data_type(values_) - res = tf.execute(desc) - node = tf.TapeNode(parallel_concat, [values_], name=nothing, N=nothing, shape=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function parallel_concat(values_; name=nothing, N=nothing, shape=nothing) - if tf.in_eager_mode() - parallel_concat_eager(values_; name=name, N=N, shape=shape) - else - parallel_concat_graph(values_; name=name, N=N, shape=shape) - end - end end @@ -43014,45 +78380,93 @@ end """ begin - function lookup_table_find_v2_graph(table_handle_, keys_, default_value_; name=nothing) - local desc - tf.with_op_name(name, "LookupTableFindV2") do - desc = tf.NodeDescription("LookupTableFindV2") - table_handle_ = convert(Tensor{Any}, table_handle_) - keys_ = convert(Tensor{Any}, keys_) - default_value_ = convert(Tensor{Any}, default_value_) - (keys_,) = tf.tf_promote(keys_) - (default_value_,) = tf.tf_promote(default_value_) - tf.add_input(desc, table_handle_) - tf.add_input(desc, keys_) - tf.add_input(desc, default_value_) - end - tf.Tensor(tf.Operation(desc)) - end - function lookup_table_find_v2_eager(table_handle_, keys_, default_value_; name=nothing) - desc = tf.EagerOp("LookupTableFindV2") - table_handle_ = convert(tf.EagerTensor, table_handle_) - keys_ = convert(tf.EagerTensor, keys_) - default_value_ = convert(tf.EagerTensor, default_value_) - tf.add_input(desc, table_handle_) - tf.add_input(desc, keys_) - tf.add_input(desc, default_value_) - desc["Tin"] = tf.data_type(keys_) - desc["Tout"] = tf.data_type(default_value_) - res = tf.execute(desc) - node = tf.TapeNode(lookup_table_find_v2, [table_handle_, keys_, default_value_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function lookup_table_find_v2(table_handle_, keys_, default_value_; name=nothing) - if tf.in_eager_mode() - lookup_table_find_v2_eager(table_handle_, keys_, default_value_; name=name) - else - lookup_table_find_v2_graph(table_handle_, keys_, default_value_; name=name) + begin + function lookup_table_find_v2_graph(table_handle_, keys_, default_value_; name=nothing) + local desc + tf.with_op_name(name, "LookupTableFindV2") do + desc = tf.NodeDescription("LookupTableFindV2") + begin + begin + table_handle_ = convert(Tensor{Any}, table_handle_) + begin + end + end + begin + keys_ = convert(Tensor{Any}, keys_) + begin + end + end + begin + default_value_ = convert(Tensor{Any}, default_value_) + begin + end + end + begin + (keys_,) = tf.tf_promote(keys_) + end + begin + (default_value_,) = tf.tf_promote(default_value_) + end + end + begin + begin + tf.add_input(desc, table_handle_) + end + begin + tf.add_input(desc, keys_) + end + begin + tf.add_input(desc, default_value_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function lookup_table_find_v2_eager(table_handle_, keys_, default_value_; name=nothing) + desc = tf.EagerOp("LookupTableFindV2") + table_handle_ = convert(tf.EagerTensor, table_handle_) + keys_ = convert(tf.EagerTensor, keys_) + default_value_ = convert(tf.EagerTensor, default_value_) + begin + begin + tf.add_input(desc, table_handle_) + end + begin + tf.add_input(desc, keys_) + end + begin + tf.add_input(desc, default_value_) + end + end + begin + end + begin + desc["Tin"] = tf.data_type(keys_) + end + begin + desc["Tout"] = tf.data_type(default_value_) + end + res = tf.execute(desc) + node = tf.TapeNode(lookup_table_find_v2, [table_handle_, keys_, default_value_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function lookup_table_find_v2(table_handle_, keys_, default_value_; name=nothing) + if tf.in_eager_mode() + lookup_table_find_v2_eager(table_handle_, keys_, default_value_; name=name) + else + lookup_table_find_v2_graph(table_handle_, keys_, default_value_; name=name) + end end - end + end end @@ -43062,37 +78476,69 @@ end """ begin - function tensor_forest_tree_deserialize_graph(tree_handle_, tree_config_; name=nothing) - local desc - tf.with_op_name(name, "TensorForestTreeDeserialize") do - desc = tf.NodeDescription("TensorForestTreeDeserialize") - tree_handle_ = convert(Tensor{Any}, tree_handle_) - tree_config_ = convert(Tensor{String}, tree_config_) - tf.add_input(desc, tree_handle_) - tf.add_input(desc, tree_config_) + begin + function tensor_forest_tree_deserialize_graph(tree_handle_, tree_config_; name=nothing) + local desc + tf.with_op_name(name, "TensorForestTreeDeserialize") do + desc = tf.NodeDescription("TensorForestTreeDeserialize") + begin + begin + tree_handle_ = convert(Tensor{Any}, tree_handle_) + begin + end + end + begin + tree_config_ = convert(Tensor{String}, tree_config_) + begin + end + end + end + begin + begin + tf.add_input(desc, tree_handle_) + end + begin + tf.add_input(desc, tree_config_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function tensor_forest_tree_deserialize_eager(tree_handle_, tree_config_; name=nothing) - desc = tf.EagerOp("TensorForestTreeDeserialize") - tree_handle_ = convert(tf.EagerTensor, tree_handle_) - tree_config_ = convert(tf.EagerTensor, tree_config_) - tf.add_input(desc, tree_handle_) - tf.add_input(desc, tree_config_) - res = tf.execute(desc) - node = tf.TapeNode(tensor_forest_tree_deserialize, [tree_handle_, tree_config_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function tensor_forest_tree_deserialize_eager(tree_handle_, tree_config_; name=nothing) + desc = tf.EagerOp("TensorForestTreeDeserialize") + tree_handle_ = convert(tf.EagerTensor, tree_handle_) + tree_config_ = convert(tf.EagerTensor, tree_config_) + begin + begin + tf.add_input(desc, tree_handle_) + end + begin + tf.add_input(desc, tree_config_) + end + end + begin + end + res = tf.execute(desc) + node = tf.TapeNode(tensor_forest_tree_deserialize, [tree_handle_, tree_config_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_forest_tree_deserialize(tree_handle_, tree_config_; name=nothing) - if tf.in_eager_mode() - tensor_forest_tree_deserialize_eager(tree_handle_, tree_config_; name=name) - else - tensor_forest_tree_deserialize_graph(tree_handle_, tree_config_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_forest_tree_deserialize(tree_handle_, tree_config_; name=nothing) + if tf.in_eager_mode() + tensor_forest_tree_deserialize_eager(tree_handle_, tree_config_; name=name) + else + tensor_forest_tree_deserialize_graph(tree_handle_, tree_config_; name=name) + end end - end + end end @@ -43102,58 +78548,92 @@ end Retrieve embedding parameters for a single table. """ begin - function retrieve_tpu_embedding_momentum_parameters_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - local desc - tf.with_op_name(name, "RetrieveTPUEmbeddingMomentumParameters") do - desc = tf.NodeDescription("RetrieveTPUEmbeddingMomentumParameters") - if table_id !== nothing - desc["table_id"] = Base.Int(table_id) - end - if table_name !== nothing - desc["table_name"] = Base.String(table_name) - end - if num_shards !== nothing - desc["num_shards"] = Base.Int(num_shards) - end - if shard_id !== nothing - desc["shard_id"] = Base.Int(shard_id) + begin + function retrieve_tpu_embedding_momentum_parameters_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + local desc + tf.with_op_name(name, "RetrieveTPUEmbeddingMomentumParameters") do + desc = tf.NodeDescription("RetrieveTPUEmbeddingMomentumParameters") + begin + end + begin + end + begin + begin + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + end + begin + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + end + begin + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + end + begin + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + end + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + end + end + begin + function retrieve_tpu_embedding_momentum_parameters_eager(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + desc = tf.EagerOp("RetrieveTPUEmbeddingMomentumParameters") + begin + end + begin + begin + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + end + begin + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + end + begin + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + end + begin + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(retrieve_tpu_embedding_momentum_parameters, [], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function retrieve_tpu_embedding_momentum_parameters(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + if tf.in_eager_mode() + retrieve_tpu_embedding_momentum_parameters_eager(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + else + retrieve_tpu_embedding_momentum_parameters_graph(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + end end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:2 - push!(out, tf.Tensor(op, out_idx)) - end - out - end - function retrieve_tpu_embedding_momentum_parameters_eager(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - desc = tf.EagerOp("RetrieveTPUEmbeddingMomentumParameters") - if table_id !== nothing - desc["table_id"] = Base.Int(table_id) - end - if table_name !== nothing - desc["table_name"] = Base.String(table_name) - end - if num_shards !== nothing - desc["num_shards"] = Base.Int(num_shards) - end - if shard_id !== nothing - desc["shard_id"] = Base.Int(shard_id) - end - res = tf.execute(desc) - node = tf.TapeNode(retrieve_tpu_embedding_momentum_parameters, [], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function retrieve_tpu_embedding_momentum_parameters(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - if tf.in_eager_mode() - retrieve_tpu_embedding_momentum_parameters_eager(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) - else - retrieve_tpu_embedding_momentum_parameters_graph(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) - end - end end @@ -43163,57 +78643,97 @@ end """ begin - function fake_quant_with_min_max_args_graph(inputs_; name=nothing, min=nothing, max=nothing, num_bits=nothing, narrow_range=nothing) - local desc - tf.with_op_name(name, "FakeQuantWithMinMaxArgs") do - desc = tf.NodeDescription("FakeQuantWithMinMaxArgs") - inputs_ = convert(Tensor{Float32}, inputs_) - tf.add_input(desc, inputs_) - if min !== nothing - desc["min"] = Base.identity(min) - end - if max !== nothing - desc["max"] = Base.identity(max) - end - if num_bits !== nothing - desc["num_bits"] = Base.Int(num_bits) + begin + function fake_quant_with_min_max_args_graph(inputs_; name=nothing, min=nothing, max=nothing, num_bits=nothing, narrow_range=nothing) + local desc + tf.with_op_name(name, "FakeQuantWithMinMaxArgs") do + desc = tf.NodeDescription("FakeQuantWithMinMaxArgs") + begin + begin + inputs_ = convert(Tensor{Float32}, inputs_) + begin + end + end + end + begin + begin + tf.add_input(desc, inputs_) + end + end + begin + begin + if min !== nothing + desc["min"] = Base.identity(min) + end + end + begin + if max !== nothing + desc["max"] = Base.identity(max) + end + end + begin + if num_bits !== nothing + desc["num_bits"] = Base.Int(num_bits) + end + end + begin + if narrow_range !== nothing + desc["narrow_range"] = Base.Bool(narrow_range) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function fake_quant_with_min_max_args_eager(inputs_; name=nothing, min=nothing, max=nothing, num_bits=nothing, narrow_range=nothing) + desc = tf.EagerOp("FakeQuantWithMinMaxArgs") + inputs_ = convert(tf.EagerTensor, inputs_) + begin + begin + tf.add_input(desc, inputs_) + end + end + begin + begin + if min !== nothing + desc["min"] = Base.identity(min) + end + end + begin + if max !== nothing + desc["max"] = Base.identity(max) + end + end + begin + if num_bits !== nothing + desc["num_bits"] = Base.Int(num_bits) + end + end + begin + if narrow_range !== nothing + desc["narrow_range"] = Base.Bool(narrow_range) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(fake_quant_with_min_max_args, [inputs_], name=nothing, min=nothing, max=nothing, num_bits=nothing, narrow_range=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function fake_quant_with_min_max_args(inputs_; name=nothing, min=nothing, max=nothing, num_bits=nothing, narrow_range=nothing) + if tf.in_eager_mode() + fake_quant_with_min_max_args_eager(inputs_; name=name, min=min, max=max, num_bits=num_bits, narrow_range=narrow_range) + else + fake_quant_with_min_max_args_graph(inputs_; name=name, min=min, max=max, num_bits=num_bits, narrow_range=narrow_range) + end end - if narrow_range !== nothing - desc["narrow_range"] = Base.Bool(narrow_range) - end - end - tf.Tensor(tf.Operation(desc)) - end - function fake_quant_with_min_max_args_eager(inputs_; name=nothing, min=nothing, max=nothing, num_bits=nothing, narrow_range=nothing) - desc = tf.EagerOp("FakeQuantWithMinMaxArgs") - inputs_ = convert(tf.EagerTensor, inputs_) - tf.add_input(desc, inputs_) - if min !== nothing - desc["min"] = Base.identity(min) - end - if max !== nothing - desc["max"] = Base.identity(max) - end - if num_bits !== nothing - desc["num_bits"] = Base.Int(num_bits) - end - if narrow_range !== nothing - desc["narrow_range"] = Base.Bool(narrow_range) - end - res = tf.execute(desc) - node = tf.TapeNode(fake_quant_with_min_max_args, [inputs_], name=nothing, min=nothing, max=nothing, num_bits=nothing, narrow_range=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function fake_quant_with_min_max_args(inputs_; name=nothing, min=nothing, max=nothing, num_bits=nothing, narrow_range=nothing) - if tf.in_eager_mode() - fake_quant_with_min_max_args_eager(inputs_; name=name, min=min, max=max, num_bits=num_bits, narrow_range=narrow_range) - else - fake_quant_with_min_max_args_graph(inputs_; name=name, min=min, max=max, num_bits=num_bits, narrow_range=narrow_range) - end - end end @@ -43223,50 +78743,100 @@ end """ begin - function resource_apply_gradient_descent_graph(var_, alpha_, delta_; name=nothing, use_locking=nothing) - local desc - tf.with_op_name(name, "ResourceApplyGradientDescent") do - desc = tf.NodeDescription("ResourceApplyGradientDescent") - var_ = convert(Tensor{Any}, var_) - alpha_ = convert(Tensor{Any}, alpha_) - delta_ = convert(Tensor{Any}, delta_) - (alpha_, delta_) = tf.tf_promote(alpha_, delta_) - tf.add_input(desc, var_) - tf.add_input(desc, alpha_) - tf.add_input(desc, delta_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) + begin + function resource_apply_gradient_descent_graph(var_, alpha_, delta_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "ResourceApplyGradientDescent") do + desc = tf.NodeDescription("ResourceApplyGradientDescent") + begin + begin + var_ = convert(Tensor{Any}, var_) + begin + end + end + begin + alpha_ = convert(Tensor{Any}, alpha_) + begin + end + end + begin + delta_ = convert(Tensor{Any}, delta_) + begin + end + end + begin + (alpha_, delta_) = tf.tf_promote(alpha_, delta_) + end + end + begin + begin + tf.add_input(desc, var_) + end + begin + tf.add_input(desc, alpha_) + end + begin + tf.add_input(desc, delta_) + end + end + begin + begin + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function resource_apply_gradient_descent_eager(var_, alpha_, delta_; name=nothing, use_locking=nothing) + desc = tf.EagerOp("ResourceApplyGradientDescent") + var_ = convert(tf.EagerTensor, var_) + alpha_ = convert(tf.EagerTensor, alpha_) + delta_ = convert(tf.EagerTensor, delta_) + begin + begin + tf.add_input(desc, var_) + end + begin + tf.add_input(desc, alpha_) + end + begin + tf.add_input(desc, delta_) + end + end + begin + begin + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + end + begin + desc["T"] = tf.data_type(alpha_) + end + begin + desc["T"] = tf.data_type(delta_) + end + res = tf.execute(desc) + node = tf.TapeNode(resource_apply_gradient_descent, [var_, alpha_, delta_], name=nothing, use_locking=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_apply_gradient_descent(var_, alpha_, delta_; name=nothing, use_locking=nothing) + if tf.in_eager_mode() + resource_apply_gradient_descent_eager(var_, alpha_, delta_; name=name, use_locking=use_locking) + else + resource_apply_gradient_descent_graph(var_, alpha_, delta_; name=name, use_locking=use_locking) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function resource_apply_gradient_descent_eager(var_, alpha_, delta_; name=nothing, use_locking=nothing) - desc = tf.EagerOp("ResourceApplyGradientDescent") - var_ = convert(tf.EagerTensor, var_) - alpha_ = convert(tf.EagerTensor, alpha_) - delta_ = convert(tf.EagerTensor, delta_) - tf.add_input(desc, var_) - tf.add_input(desc, alpha_) - tf.add_input(desc, delta_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - desc["T"] = tf.data_type(alpha_) - desc["T"] = tf.data_type(delta_) - res = tf.execute(desc) - node = tf.TapeNode(resource_apply_gradient_descent, [var_, alpha_, delta_], name=nothing, use_locking=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_apply_gradient_descent(var_, alpha_, delta_; name=nothing, use_locking=nothing) - if tf.in_eager_mode() - resource_apply_gradient_descent_eager(var_, alpha_, delta_; name=name, use_locking=use_locking) - else - resource_apply_gradient_descent_graph(var_, alpha_, delta_; name=name, use_locking=use_locking) - end - end end @@ -43276,57 +78846,113 @@ end """ begin - function experimental_sliding_window_dataset_graph(input_dataset_, window_size_, window_shift_, window_stride_; name=nothing, output_types=nothing, output_shapes=nothing) - local desc - tf.with_op_name(name, "ExperimentalSlidingWindowDataset") do - desc = tf.NodeDescription("ExperimentalSlidingWindowDataset") - input_dataset_ = convert(Tensor{Any}, input_dataset_) - window_size_ = convert(Tensor{Int64}, window_size_) - window_shift_ = convert(Tensor{Int64}, window_shift_) - window_stride_ = convert(Tensor{Int64}, window_stride_) - tf.add_input(desc, input_dataset_) - tf.add_input(desc, window_size_) - tf.add_input(desc, window_shift_) - tf.add_input(desc, window_stride_) - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) + begin + function experimental_sliding_window_dataset_graph(input_dataset_, window_size_, window_shift_, window_stride_; name=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "ExperimentalSlidingWindowDataset") do + desc = tf.NodeDescription("ExperimentalSlidingWindowDataset") + begin + begin + input_dataset_ = convert(Tensor{Any}, input_dataset_) + begin + end + end + begin + window_size_ = convert(Tensor{Int64}, window_size_) + begin + end + end + begin + window_shift_ = convert(Tensor{Int64}, window_shift_) + begin + end + end + begin + window_stride_ = convert(Tensor{Int64}, window_stride_) + begin + end + end + end + begin + begin + tf.add_input(desc, input_dataset_) + end + begin + tf.add_input(desc, window_size_) + end + begin + tf.add_input(desc, window_shift_) + end + begin + tf.add_input(desc, window_stride_) + end + end + begin + begin + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + end + begin + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function experimental_sliding_window_dataset_eager(input_dataset_, window_size_, window_shift_, window_stride_; name=nothing, output_types=nothing, output_shapes=nothing) + desc = tf.EagerOp("ExperimentalSlidingWindowDataset") + input_dataset_ = convert(tf.EagerTensor, input_dataset_) + window_size_ = convert(tf.EagerTensor, window_size_) + window_shift_ = convert(tf.EagerTensor, window_shift_) + window_stride_ = convert(tf.EagerTensor, window_stride_) + begin + begin + tf.add_input(desc, input_dataset_) + end + begin + tf.add_input(desc, window_size_) + end + begin + tf.add_input(desc, window_shift_) + end + begin + tf.add_input(desc, window_stride_) + end + end + begin + begin + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + end + begin + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(experimental_sliding_window_dataset, [input_dataset_, window_size_, window_shift_, window_stride_], name=nothing, output_types=nothing, output_shapes=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_sliding_window_dataset(input_dataset_, window_size_, window_shift_, window_stride_; name=nothing, output_types=nothing, output_shapes=nothing) + if tf.in_eager_mode() + experimental_sliding_window_dataset_eager(input_dataset_, window_size_, window_shift_, window_stride_; name=name, output_types=output_types, output_shapes=output_shapes) + else + experimental_sliding_window_dataset_graph(input_dataset_, window_size_, window_shift_, window_stride_; name=name, output_types=output_types, output_shapes=output_shapes) + end end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - end - tf.Tensor(tf.Operation(desc)) - end - function experimental_sliding_window_dataset_eager(input_dataset_, window_size_, window_shift_, window_stride_; name=nothing, output_types=nothing, output_shapes=nothing) - desc = tf.EagerOp("ExperimentalSlidingWindowDataset") - input_dataset_ = convert(tf.EagerTensor, input_dataset_) - window_size_ = convert(tf.EagerTensor, window_size_) - window_shift_ = convert(tf.EagerTensor, window_shift_) - window_stride_ = convert(tf.EagerTensor, window_stride_) - tf.add_input(desc, input_dataset_) - tf.add_input(desc, window_size_) - tf.add_input(desc, window_shift_) - tf.add_input(desc, window_stride_) - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - res = tf.execute(desc) - node = tf.TapeNode(experimental_sliding_window_dataset, [input_dataset_, window_size_, window_shift_, window_stride_], name=nothing, output_types=nothing, output_shapes=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_sliding_window_dataset(input_dataset_, window_size_, window_shift_, window_stride_; name=nothing, output_types=nothing, output_shapes=nothing) - if tf.in_eager_mode() - experimental_sliding_window_dataset_eager(input_dataset_, window_size_, window_shift_, window_stride_; name=name, output_types=output_types, output_shapes=output_shapes) - else - experimental_sliding_window_dataset_graph(input_dataset_, window_size_, window_shift_, window_stride_; name=name, output_types=output_types, output_shapes=output_shapes) - end - end end @@ -43336,45 +78962,77 @@ end """ begin - function decode_raw_graph(bytes_; name=nothing, out_type=nothing, little_endian=nothing) - local desc - tf.with_op_name(name, "DecodeRaw") do - desc = tf.NodeDescription("DecodeRaw") - bytes_ = convert(Tensor{String}, bytes_) - tf.add_input(desc, bytes_) - if out_type !== nothing - desc["out_type"] = Base.identity(out_type) + begin + function decode_raw_graph(bytes_; name=nothing, out_type=nothing, little_endian=nothing) + local desc + tf.with_op_name(name, "DecodeRaw") do + desc = tf.NodeDescription("DecodeRaw") + begin + begin + bytes_ = convert(Tensor{String}, bytes_) + begin + end + end + end + begin + begin + tf.add_input(desc, bytes_) + end + end + begin + begin + if out_type !== nothing + desc["out_type"] = Base.identity(out_type) + end + end + begin + if little_endian !== nothing + desc["little_endian"] = Base.Bool(little_endian) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function decode_raw_eager(bytes_; name=nothing, out_type=nothing, little_endian=nothing) + desc = tf.EagerOp("DecodeRaw") + bytes_ = convert(tf.EagerTensor, bytes_) + begin + begin + tf.add_input(desc, bytes_) + end + end + begin + begin + if out_type !== nothing + desc["out_type"] = Base.identity(out_type) + end + end + begin + if little_endian !== nothing + desc["little_endian"] = Base.Bool(little_endian) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(decode_raw, [bytes_], name=nothing, out_type=nothing, little_endian=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function decode_raw(bytes_; name=nothing, out_type=nothing, little_endian=nothing) + if tf.in_eager_mode() + decode_raw_eager(bytes_; name=name, out_type=out_type, little_endian=little_endian) + else + decode_raw_graph(bytes_; name=name, out_type=out_type, little_endian=little_endian) + end end - if little_endian !== nothing - desc["little_endian"] = Base.Bool(little_endian) - end - end - tf.Tensor(tf.Operation(desc)) - end - function decode_raw_eager(bytes_; name=nothing, out_type=nothing, little_endian=nothing) - desc = tf.EagerOp("DecodeRaw") - bytes_ = convert(tf.EagerTensor, bytes_) - tf.add_input(desc, bytes_) - if out_type !== nothing - desc["out_type"] = Base.identity(out_type) - end - if little_endian !== nothing - desc["little_endian"] = Base.Bool(little_endian) - end - res = tf.execute(desc) - node = tf.TapeNode(decode_raw, [bytes_], name=nothing, out_type=nothing, little_endian=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function decode_raw(bytes_; name=nothing, out_type=nothing, little_endian=nothing) - if tf.in_eager_mode() - decode_raw_eager(bytes_; name=name, out_type=out_type, little_endian=little_endian) - else - decode_raw_graph(bytes_; name=name, out_type=out_type, little_endian=little_endian) - end - end end @@ -43384,62 +79042,120 @@ end """ begin - function fake_quant_with_min_max_vars_per_channel_gradient_graph(gradients_, inputs_, min_, max_; name=nothing, num_bits=nothing, narrow_range=nothing) - local desc - tf.with_op_name(name, "FakeQuantWithMinMaxVarsPerChannelGradient") do - desc = tf.NodeDescription("FakeQuantWithMinMaxVarsPerChannelGradient") - gradients_ = convert(Tensor{Float32}, gradients_) - inputs_ = convert(Tensor{Float32}, inputs_) - min_ = convert(Tensor{Float32}, min_) - max_ = convert(Tensor{Float32}, max_) - tf.add_input(desc, gradients_) - tf.add_input(desc, inputs_) - tf.add_input(desc, min_) - tf.add_input(desc, max_) - if num_bits !== nothing - desc["num_bits"] = Base.Int(num_bits) + begin + function fake_quant_with_min_max_vars_per_channel_gradient_graph(gradients_, inputs_, min_, max_; name=nothing, num_bits=nothing, narrow_range=nothing) + local desc + tf.with_op_name(name, "FakeQuantWithMinMaxVarsPerChannelGradient") do + desc = tf.NodeDescription("FakeQuantWithMinMaxVarsPerChannelGradient") + begin + begin + gradients_ = convert(Tensor{Float32}, gradients_) + begin + end + end + begin + inputs_ = convert(Tensor{Float32}, inputs_) + begin + end + end + begin + min_ = convert(Tensor{Float32}, min_) + begin + end + end + begin + max_ = convert(Tensor{Float32}, max_) + begin + end + end + end + begin + begin + tf.add_input(desc, gradients_) + end + begin + tf.add_input(desc, inputs_) + end + begin + tf.add_input(desc, min_) + end + begin + tf.add_input(desc, max_) + end + end + begin + begin + if num_bits !== nothing + desc["num_bits"] = Base.Int(num_bits) + end + end + begin + if narrow_range !== nothing + desc["narrow_range"] = Base.Bool(narrow_range) + end + end + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + end + end + begin + function fake_quant_with_min_max_vars_per_channel_gradient_eager(gradients_, inputs_, min_, max_; name=nothing, num_bits=nothing, narrow_range=nothing) + desc = tf.EagerOp("FakeQuantWithMinMaxVarsPerChannelGradient") + gradients_ = convert(tf.EagerTensor, gradients_) + inputs_ = convert(tf.EagerTensor, inputs_) + min_ = convert(tf.EagerTensor, min_) + max_ = convert(tf.EagerTensor, max_) + begin + begin + tf.add_input(desc, gradients_) + end + begin + tf.add_input(desc, inputs_) + end + begin + tf.add_input(desc, min_) + end + begin + tf.add_input(desc, max_) + end + end + begin + begin + if num_bits !== nothing + desc["num_bits"] = Base.Int(num_bits) + end + end + begin + if narrow_range !== nothing + desc["narrow_range"] = Base.Bool(narrow_range) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(fake_quant_with_min_max_vars_per_channel_gradient, [gradients_, inputs_, min_, max_], name=nothing, num_bits=nothing, narrow_range=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function fake_quant_with_min_max_vars_per_channel_gradient(gradients_, inputs_, min_, max_; name=nothing, num_bits=nothing, narrow_range=nothing) + if tf.in_eager_mode() + fake_quant_with_min_max_vars_per_channel_gradient_eager(gradients_, inputs_, min_, max_; name=name, num_bits=num_bits, narrow_range=narrow_range) + else + fake_quant_with_min_max_vars_per_channel_gradient_graph(gradients_, inputs_, min_, max_; name=name, num_bits=num_bits, narrow_range=narrow_range) + end end - if narrow_range !== nothing - desc["narrow_range"] = Base.Bool(narrow_range) - end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:3 - push!(out, tf.Tensor(op, out_idx)) - end - out - end - function fake_quant_with_min_max_vars_per_channel_gradient_eager(gradients_, inputs_, min_, max_; name=nothing, num_bits=nothing, narrow_range=nothing) - desc = tf.EagerOp("FakeQuantWithMinMaxVarsPerChannelGradient") - gradients_ = convert(tf.EagerTensor, gradients_) - inputs_ = convert(tf.EagerTensor, inputs_) - min_ = convert(tf.EagerTensor, min_) - max_ = convert(tf.EagerTensor, max_) - tf.add_input(desc, gradients_) - tf.add_input(desc, inputs_) - tf.add_input(desc, min_) - tf.add_input(desc, max_) - if num_bits !== nothing - desc["num_bits"] = Base.Int(num_bits) - end - if narrow_range !== nothing - desc["narrow_range"] = Base.Bool(narrow_range) - end - res = tf.execute(desc) - node = tf.TapeNode(fake_quant_with_min_max_vars_per_channel_gradient, [gradients_, inputs_, min_, max_], name=nothing, num_bits=nothing, narrow_range=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function fake_quant_with_min_max_vars_per_channel_gradient(gradients_, inputs_, min_, max_; name=nothing, num_bits=nothing, narrow_range=nothing) - if tf.in_eager_mode() - fake_quant_with_min_max_vars_per_channel_gradient_eager(gradients_, inputs_, min_, max_; name=name, num_bits=num_bits, narrow_range=narrow_range) - else - fake_quant_with_min_max_vars_per_channel_gradient_graph(gradients_, inputs_, min_, max_; name=name, num_bits=num_bits, narrow_range=narrow_range) - end - end end @@ -43449,52 +79165,98 @@ end """ begin - function unique_with_counts_v2_graph(x_, axis_; name=nothing, out_idx=nothing) - local desc - tf.with_op_name(name, "UniqueWithCountsV2") do - desc = tf.NodeDescription("UniqueWithCountsV2") - x_ = convert(Tensor{Any}, x_) - axis_ = convert(Tensor{Int64}, axis_) - (x_,) = tf.tf_promote(x_) - (axis_,) = tf.tf_promote(axis_) - tf.add_input(desc, x_) - tf.add_input(desc, axis_) - if out_idx !== nothing - desc["out_idx"] = Base.identity(out_idx) + begin + function unique_with_counts_v2_graph(x_, axis_; name=nothing, out_idx=nothing) + local desc + tf.with_op_name(name, "UniqueWithCountsV2") do + desc = tf.NodeDescription("UniqueWithCountsV2") + begin + begin + x_ = convert(Tensor{Any}, x_) + begin + end + end + begin + axis_ = convert(Tensor{Int64}, axis_) + begin + end + end + begin + (x_,) = tf.tf_promote(x_) + end + begin + (axis_,) = tf.tf_promote(axis_) + end + end + begin + begin + tf.add_input(desc, x_) + end + begin + tf.add_input(desc, axis_) + end + end + begin + begin + if out_idx !== nothing + desc["out_idx"] = Base.identity(out_idx) + end + end + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + end + end + begin + function unique_with_counts_v2_eager(x_, axis_; name=nothing, out_idx=nothing) + desc = tf.EagerOp("UniqueWithCountsV2") + x_ = convert(tf.EagerTensor, x_) + axis_ = convert(tf.EagerTensor, axis_) + begin + begin + tf.add_input(desc, x_) + end + begin + tf.add_input(desc, axis_) + end + end + begin + begin + if out_idx !== nothing + desc["out_idx"] = Base.identity(out_idx) + end + end + end + begin + desc["T"] = tf.data_type(x_) + end + begin + desc["Taxis"] = tf.data_type(axis_) + end + res = tf.execute(desc) + node = tf.TapeNode(unique_with_counts_v2, [x_, axis_], name=nothing, out_idx=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function unique_with_counts_v2(x_, axis_; name=nothing, out_idx=nothing) + if tf.in_eager_mode() + unique_with_counts_v2_eager(x_, axis_; name=name, out_idx=out_idx) + else + unique_with_counts_v2_graph(x_, axis_; name=name, out_idx=out_idx) + end end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:3 - push!(out, tf.Tensor(op, out_idx)) - end - out - end - function unique_with_counts_v2_eager(x_, axis_; name=nothing, out_idx=nothing) - desc = tf.EagerOp("UniqueWithCountsV2") - x_ = convert(tf.EagerTensor, x_) - axis_ = convert(tf.EagerTensor, axis_) - tf.add_input(desc, x_) - tf.add_input(desc, axis_) - if out_idx !== nothing - desc["out_idx"] = Base.identity(out_idx) - end - desc["T"] = tf.data_type(x_) - desc["Taxis"] = tf.data_type(axis_) - res = tf.execute(desc) - node = tf.TapeNode(unique_with_counts_v2, [x_, axis_], name=nothing, out_idx=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function unique_with_counts_v2(x_, axis_; name=nothing, out_idx=nothing) - if tf.in_eager_mode() - unique_with_counts_v2_eager(x_, axis_; name=name, out_idx=out_idx) - else - unique_with_counts_v2_graph(x_, axis_; name=name, out_idx=out_idx) - end - end end @@ -43504,49 +79266,89 @@ end """ begin - function experimental_sleep_dataset_graph(input_dataset_, sleep_microseconds_; name=nothing, output_types=nothing, output_shapes=nothing) - local desc - tf.with_op_name(name, "ExperimentalSleepDataset") do - desc = tf.NodeDescription("ExperimentalSleepDataset") - input_dataset_ = convert(Tensor{Any}, input_dataset_) - sleep_microseconds_ = convert(Tensor{Int64}, sleep_microseconds_) - tf.add_input(desc, input_dataset_) - tf.add_input(desc, sleep_microseconds_) - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) + begin + function experimental_sleep_dataset_graph(input_dataset_, sleep_microseconds_; name=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "ExperimentalSleepDataset") do + desc = tf.NodeDescription("ExperimentalSleepDataset") + begin + begin + input_dataset_ = convert(Tensor{Any}, input_dataset_) + begin + end + end + begin + sleep_microseconds_ = convert(Tensor{Int64}, sleep_microseconds_) + begin + end + end + end + begin + begin + tf.add_input(desc, input_dataset_) + end + begin + tf.add_input(desc, sleep_microseconds_) + end + end + begin + begin + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + end + begin + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function experimental_sleep_dataset_eager(input_dataset_, sleep_microseconds_; name=nothing, output_types=nothing, output_shapes=nothing) + desc = tf.EagerOp("ExperimentalSleepDataset") + input_dataset_ = convert(tf.EagerTensor, input_dataset_) + sleep_microseconds_ = convert(tf.EagerTensor, sleep_microseconds_) + begin + begin + tf.add_input(desc, input_dataset_) + end + begin + tf.add_input(desc, sleep_microseconds_) + end + end + begin + begin + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + end + begin + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(experimental_sleep_dataset, [input_dataset_, sleep_microseconds_], name=nothing, output_types=nothing, output_shapes=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_sleep_dataset(input_dataset_, sleep_microseconds_; name=nothing, output_types=nothing, output_shapes=nothing) + if tf.in_eager_mode() + experimental_sleep_dataset_eager(input_dataset_, sleep_microseconds_; name=name, output_types=output_types, output_shapes=output_shapes) + else + experimental_sleep_dataset_graph(input_dataset_, sleep_microseconds_; name=name, output_types=output_types, output_shapes=output_shapes) + end end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - end - tf.Tensor(tf.Operation(desc)) - end - function experimental_sleep_dataset_eager(input_dataset_, sleep_microseconds_; name=nothing, output_types=nothing, output_shapes=nothing) - desc = tf.EagerOp("ExperimentalSleepDataset") - input_dataset_ = convert(tf.EagerTensor, input_dataset_) - sleep_microseconds_ = convert(tf.EagerTensor, sleep_microseconds_) - tf.add_input(desc, input_dataset_) - tf.add_input(desc, sleep_microseconds_) - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - res = tf.execute(desc) - node = tf.TapeNode(experimental_sleep_dataset, [input_dataset_, sleep_microseconds_], name=nothing, output_types=nothing, output_shapes=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_sleep_dataset(input_dataset_, sleep_microseconds_; name=nothing, output_types=nothing, output_shapes=nothing) - if tf.in_eager_mode() - experimental_sleep_dataset_eager(input_dataset_, sleep_microseconds_; name=name, output_types=output_types, output_shapes=output_shapes) - else - experimental_sleep_dataset_graph(input_dataset_, sleep_microseconds_; name=name, output_types=output_types, output_shapes=output_shapes) - end - end end @@ -43556,46 +79358,80 @@ end Operator that connects the output of an N-way replicated TPU computation to N separate outputs. """ begin - function tpu_replicated_output_graph(input_; name=nothing, num_replicas=nothing) - local desc - tf.with_op_name(name, "TPUReplicatedOutput") do - desc = tf.NodeDescription("TPUReplicatedOutput") - input_ = convert(Tensor{Any}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - if num_replicas !== nothing - desc["num_replicas"] = Base.Int(num_replicas) + begin + function tpu_replicated_output_graph(input_; name=nothing, num_replicas=nothing) + local desc + tf.with_op_name(name, "TPUReplicatedOutput") do + desc = tf.NodeDescription("TPUReplicatedOutput") + begin + begin + input_ = convert(Tensor{Any}, input_) + begin + end + end + begin + (input_,) = tf.tf_promote(input_) + end + end + begin + begin + tf.add_input(desc, input_) + end + end + begin + begin + if num_replicas !== nothing + desc["num_replicas"] = Base.Int(num_replicas) + end + end + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:num_replicas + push!(out, tf.Tensor(op, out_idx)) + end + out + end + end + end + begin + function tpu_replicated_output_eager(input_; name=nothing, num_replicas=nothing) + desc = tf.EagerOp("TPUReplicatedOutput") + input_ = convert(tf.EagerTensor, input_) + begin + begin + tf.add_input(desc, input_) + end + end + begin + begin + if num_replicas !== nothing + desc["num_replicas"] = Base.Int(num_replicas) + end + end + end + begin + desc["T"] = tf.data_type(input_) + end + res = tf.execute(desc) + node = tf.TapeNode(tpu_replicated_output, [input_], name=nothing, num_replicas=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tpu_replicated_output(input_; name=nothing, num_replicas=nothing) + if tf.in_eager_mode() + tpu_replicated_output_eager(input_; name=name, num_replicas=num_replicas) + else + tpu_replicated_output_graph(input_; name=name, num_replicas=num_replicas) + end end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:num_replicas - push!(out, tf.Tensor(op, out_idx)) - end - out - end - function tpu_replicated_output_eager(input_; name=nothing, num_replicas=nothing) - desc = tf.EagerOp("TPUReplicatedOutput") - input_ = convert(tf.EagerTensor, input_) - tf.add_input(desc, input_) - if num_replicas !== nothing - desc["num_replicas"] = Base.Int(num_replicas) - end - desc["T"] = tf.data_type(input_) - res = tf.execute(desc) - node = tf.TapeNode(tpu_replicated_output, [input_], name=nothing, num_replicas=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tpu_replicated_output(input_; name=nothing, num_replicas=nothing) - if tf.in_eager_mode() - tpu_replicated_output_eager(input_; name=name, num_replicas=num_replicas) - else - tpu_replicated_output_graph(input_; name=name, num_replicas=num_replicas) - end - end end @@ -43605,46 +79441,88 @@ end """ begin - function lower_bound_graph(sorted_inputs_, values_; name=nothing, out_type=nothing) - local desc - tf.with_op_name(name, "LowerBound") do - desc = tf.NodeDescription("LowerBound") - sorted_inputs_ = convert(Tensor{Any}, sorted_inputs_) - values_ = convert(Tensor{Any}, values_) - (sorted_inputs_, values_) = tf.tf_promote(sorted_inputs_, values_) - tf.add_input(desc, sorted_inputs_) - tf.add_input(desc, values_) - if out_type !== nothing - desc["out_type"] = Base.identity(out_type) + begin + function lower_bound_graph(sorted_inputs_, values_; name=nothing, out_type=nothing) + local desc + tf.with_op_name(name, "LowerBound") do + desc = tf.NodeDescription("LowerBound") + begin + begin + sorted_inputs_ = convert(Tensor{Any}, sorted_inputs_) + begin + end + end + begin + values_ = convert(Tensor{Any}, values_) + begin + end + end + begin + (sorted_inputs_, values_) = tf.tf_promote(sorted_inputs_, values_) + end + end + begin + begin + tf.add_input(desc, sorted_inputs_) + end + begin + tf.add_input(desc, values_) + end + end + begin + begin + if out_type !== nothing + desc["out_type"] = Base.identity(out_type) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function lower_bound_eager(sorted_inputs_, values_; name=nothing, out_type=nothing) + desc = tf.EagerOp("LowerBound") + sorted_inputs_ = convert(tf.EagerTensor, sorted_inputs_) + values_ = convert(tf.EagerTensor, values_) + begin + begin + tf.add_input(desc, sorted_inputs_) + end + begin + tf.add_input(desc, values_) + end + end + begin + begin + if out_type !== nothing + desc["out_type"] = Base.identity(out_type) + end + end + end + begin + desc["T"] = tf.data_type(sorted_inputs_) + end + begin + desc["T"] = tf.data_type(values_) + end + res = tf.execute(desc) + node = tf.TapeNode(lower_bound, [sorted_inputs_, values_], name=nothing, out_type=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function lower_bound(sorted_inputs_, values_; name=nothing, out_type=nothing) + if tf.in_eager_mode() + lower_bound_eager(sorted_inputs_, values_; name=name, out_type=out_type) + else + lower_bound_graph(sorted_inputs_, values_; name=name, out_type=out_type) + end end - end - tf.Tensor(tf.Operation(desc)) end - function lower_bound_eager(sorted_inputs_, values_; name=nothing, out_type=nothing) - desc = tf.EagerOp("LowerBound") - sorted_inputs_ = convert(tf.EagerTensor, sorted_inputs_) - values_ = convert(tf.EagerTensor, values_) - tf.add_input(desc, sorted_inputs_) - tf.add_input(desc, values_) - if out_type !== nothing - desc["out_type"] = Base.identity(out_type) - end - desc["T"] = tf.data_type(sorted_inputs_) - desc["T"] = tf.data_type(values_) - res = tf.execute(desc) - node = tf.TapeNode(lower_bound, [sorted_inputs_, values_], name=nothing, out_type=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function lower_bound(sorted_inputs_, values_; name=nothing, out_type=nothing) - if tf.in_eager_mode() - lower_bound_eager(sorted_inputs_, values_; name=name, out_type=out_type) - else - lower_bound_graph(sorted_inputs_, values_; name=name, out_type=out_type) - end - end end @@ -43654,35 +79532,63 @@ end """ begin - function tan_graph(x_; name=nothing) - local desc - tf.with_op_name(name, "Tan") do - desc = tf.NodeDescription("Tan") - x_ = convert(Tensor{Any}, x_) - (x_,) = tf.tf_promote(x_) - tf.add_input(desc, x_) + begin + function tan_graph(x_; name=nothing) + local desc + tf.with_op_name(name, "Tan") do + desc = tf.NodeDescription("Tan") + begin + begin + x_ = convert(Tensor{Any}, x_) + begin + end + end + begin + (x_,) = tf.tf_promote(x_) + end + end + begin + begin + tf.add_input(desc, x_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function tan_eager(x_; name=nothing) - desc = tf.EagerOp("Tan") - x_ = convert(tf.EagerTensor, x_) - tf.add_input(desc, x_) - desc["T"] = tf.data_type(x_) - res = tf.execute(desc) - node = tf.TapeNode(tan, [x_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function tan_eager(x_; name=nothing) + desc = tf.EagerOp("Tan") + x_ = convert(tf.EagerTensor, x_) + begin + begin + tf.add_input(desc, x_) + end + end + begin + end + begin + desc["T"] = tf.data_type(x_) + end + res = tf.execute(desc) + node = tf.TapeNode(tan, [x_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tan(x_; name=nothing) - if tf.in_eager_mode() - tan_eager(x_; name=name) - else - tan_graph(x_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tan(x_; name=nothing) + if tf.in_eager_mode() + tan_eager(x_; name=name) + else + tan_graph(x_; name=name) + end end - end + end end @@ -43692,53 +79598,93 @@ end """ begin - function enter_graph(data_; name=nothing, frame_name=nothing, is_constant=nothing, parallel_iterations=nothing) - local desc - tf.with_op_name(name, "Enter") do - desc = tf.NodeDescription("Enter") - data_ = convert(Tensor{Any}, data_) - (data_,) = tf.tf_promote(data_) - tf.add_input(desc, data_) - if frame_name !== nothing - desc["frame_name"] = Base.String(frame_name) + begin + function enter_graph(data_; name=nothing, frame_name=nothing, is_constant=nothing, parallel_iterations=nothing) + local desc + tf.with_op_name(name, "Enter") do + desc = tf.NodeDescription("Enter") + begin + begin + data_ = convert(Tensor{Any}, data_) + begin + end + end + begin + (data_,) = tf.tf_promote(data_) + end + end + begin + begin + tf.add_input(desc, data_) + end + end + begin + begin + if frame_name !== nothing + desc["frame_name"] = Base.String(frame_name) + end + end + begin + if is_constant !== nothing + desc["is_constant"] = Base.Bool(is_constant) + end + end + begin + if parallel_iterations !== nothing + desc["parallel_iterations"] = Base.Int(parallel_iterations) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function enter_eager(data_; name=nothing, frame_name=nothing, is_constant=nothing, parallel_iterations=nothing) + desc = tf.EagerOp("Enter") + data_ = convert(tf.EagerTensor, data_) + begin + begin + tf.add_input(desc, data_) + end + end + begin + begin + if frame_name !== nothing + desc["frame_name"] = Base.String(frame_name) + end + end + begin + if is_constant !== nothing + desc["is_constant"] = Base.Bool(is_constant) + end + end + begin + if parallel_iterations !== nothing + desc["parallel_iterations"] = Base.Int(parallel_iterations) + end + end + end + begin + desc["T"] = tf.data_type(data_) + end + res = tf.execute(desc) + node = tf.TapeNode(enter, [data_], name=nothing, frame_name=nothing, is_constant=nothing, parallel_iterations=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function enter(data_; name=nothing, frame_name=nothing, is_constant=nothing, parallel_iterations=nothing) + if tf.in_eager_mode() + enter_eager(data_; name=name, frame_name=frame_name, is_constant=is_constant, parallel_iterations=parallel_iterations) + else + enter_graph(data_; name=name, frame_name=frame_name, is_constant=is_constant, parallel_iterations=parallel_iterations) + end end - if is_constant !== nothing - desc["is_constant"] = Base.Bool(is_constant) - end - if parallel_iterations !== nothing - desc["parallel_iterations"] = Base.Int(parallel_iterations) - end - end - tf.Tensor(tf.Operation(desc)) - end - function enter_eager(data_; name=nothing, frame_name=nothing, is_constant=nothing, parallel_iterations=nothing) - desc = tf.EagerOp("Enter") - data_ = convert(tf.EagerTensor, data_) - tf.add_input(desc, data_) - if frame_name !== nothing - desc["frame_name"] = Base.String(frame_name) - end - if is_constant !== nothing - desc["is_constant"] = Base.Bool(is_constant) - end - if parallel_iterations !== nothing - desc["parallel_iterations"] = Base.Int(parallel_iterations) - end - desc["T"] = tf.data_type(data_) - res = tf.execute(desc) - node = tf.TapeNode(enter, [data_], name=nothing, frame_name=nothing, is_constant=nothing, parallel_iterations=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function enter(data_; name=nothing, frame_name=nothing, is_constant=nothing, parallel_iterations=nothing) - if tf.in_eager_mode() - enter_eager(data_; name=name, frame_name=frame_name, is_constant=is_constant, parallel_iterations=parallel_iterations) - else - enter_graph(data_; name=name, frame_name=frame_name, is_constant=is_constant, parallel_iterations=parallel_iterations) - end - end end @@ -43748,51 +79694,87 @@ end An op which feeds multiple Tensor values into the computation as an XLA tuple. """ begin - function infeed_enqueue_tuple_graph(inputs_; name=nothing, dtypes=nothing, shapes=nothing, device_ordinal=nothing) - local desc - tf.with_op_name(name, "InfeedEnqueueTuple") do - desc = tf.NodeDescription("InfeedEnqueueTuple") - inputs_ = [convert(Tensor{Any}, x) for x = inputs_] - tf.add_input(desc, inputs_) - if dtypes !== nothing - desc["dtypes"] = map(Base.identity, dtypes) - end - if shapes !== nothing - desc["shapes"] = map(Base.identity, shapes) - end - if device_ordinal !== nothing - desc["device_ordinal"] = Base.Int(device_ordinal) + begin + function infeed_enqueue_tuple_graph(inputs_; name=nothing, dtypes=nothing, shapes=nothing, device_ordinal=nothing) + local desc + tf.with_op_name(name, "InfeedEnqueueTuple") do + desc = tf.NodeDescription("InfeedEnqueueTuple") + begin + begin + inputs_ = [convert(Tensor{Any}, x) for x = inputs_] + begin + end + end + end + begin + begin + tf.add_input(desc, inputs_) + end + end + begin + begin + if dtypes !== nothing + desc["dtypes"] = map(Base.identity, dtypes) + end + end + begin + if shapes !== nothing + desc["shapes"] = map(Base.identity, shapes) + end + end + begin + if device_ordinal !== nothing + desc["device_ordinal"] = Base.Int(device_ordinal) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function infeed_enqueue_tuple_eager(inputs_; name=nothing, dtypes=nothing, shapes=nothing, device_ordinal=nothing) + desc = tf.EagerOp("InfeedEnqueueTuple") + inputs_ = convert(tf.EagerTensor, inputs_) + begin + begin + tf.add_input(desc, inputs_) + end + end + begin + begin + if dtypes !== nothing + desc["dtypes"] = map(Base.identity, dtypes) + end + end + begin + if shapes !== nothing + desc["shapes"] = map(Base.identity, shapes) + end + end + begin + if device_ordinal !== nothing + desc["device_ordinal"] = Base.Int(device_ordinal) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(infeed_enqueue_tuple, [inputs_], name=nothing, dtypes=nothing, shapes=nothing, device_ordinal=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function infeed_enqueue_tuple(inputs_; name=nothing, dtypes=nothing, shapes=nothing, device_ordinal=nothing) + if tf.in_eager_mode() + infeed_enqueue_tuple_eager(inputs_; name=name, dtypes=dtypes, shapes=shapes, device_ordinal=device_ordinal) + else + infeed_enqueue_tuple_graph(inputs_; name=name, dtypes=dtypes, shapes=shapes, device_ordinal=device_ordinal) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function infeed_enqueue_tuple_eager(inputs_; name=nothing, dtypes=nothing, shapes=nothing, device_ordinal=nothing) - desc = tf.EagerOp("InfeedEnqueueTuple") - inputs_ = convert(tf.EagerTensor, inputs_) - tf.add_input(desc, inputs_) - if dtypes !== nothing - desc["dtypes"] = map(Base.identity, dtypes) - end - if shapes !== nothing - desc["shapes"] = map(Base.identity, shapes) - end - if device_ordinal !== nothing - desc["device_ordinal"] = Base.Int(device_ordinal) - end - res = tf.execute(desc) - node = tf.TapeNode(infeed_enqueue_tuple, [inputs_], name=nothing, dtypes=nothing, shapes=nothing, device_ordinal=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function infeed_enqueue_tuple(inputs_; name=nothing, dtypes=nothing, shapes=nothing, device_ordinal=nothing) - if tf.in_eager_mode() - infeed_enqueue_tuple_eager(inputs_; name=name, dtypes=dtypes, shapes=shapes, device_ordinal=device_ordinal) - else - infeed_enqueue_tuple_graph(inputs_; name=name, dtypes=dtypes, shapes=shapes, device_ordinal=device_ordinal) - end - end end @@ -43802,35 +79784,63 @@ end """ begin - function square_graph(x_; name=nothing) - local desc - tf.with_op_name(name, "Square") do - desc = tf.NodeDescription("Square") - x_ = convert(Tensor{Any}, x_) - (x_,) = tf.tf_promote(x_) - tf.add_input(desc, x_) + begin + function square_graph(x_; name=nothing) + local desc + tf.with_op_name(name, "Square") do + desc = tf.NodeDescription("Square") + begin + begin + x_ = convert(Tensor{Any}, x_) + begin + end + end + begin + (x_,) = tf.tf_promote(x_) + end + end + begin + begin + tf.add_input(desc, x_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function square_eager(x_; name=nothing) - desc = tf.EagerOp("Square") - x_ = convert(tf.EagerTensor, x_) - tf.add_input(desc, x_) - desc["T"] = tf.data_type(x_) - res = tf.execute(desc) - node = tf.TapeNode(square, [x_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function square_eager(x_; name=nothing) + desc = tf.EagerOp("Square") + x_ = convert(tf.EagerTensor, x_) + begin + begin + tf.add_input(desc, x_) + end + end + begin + end + begin + desc["T"] = tf.data_type(x_) + end + res = tf.execute(desc) + node = tf.TapeNode(square, [x_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function square(x_; name=nothing) - if tf.in_eager_mode() - square_eager(x_; name=name) - else - square_graph(x_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function square(x_; name=nothing) + if tf.in_eager_mode() + square_eager(x_; name=name) + else + square_graph(x_; name=name) + end end - end + end end @@ -43840,33 +79850,57 @@ end An op that informs a host of the global ids of all the of TPUs in the """ begin - function _set_global_tpu_array_graph(topology_; name=nothing) - local desc - tf.with_op_name(name, "_SetGlobalTPUArray") do - desc = tf.NodeDescription("_SetGlobalTPUArray") - topology_ = convert(Tensor{String}, topology_) - tf.add_input(desc, topology_) + begin + function _set_global_tpu_array_graph(topology_; name=nothing) + local desc + tf.with_op_name(name, "_SetGlobalTPUArray") do + desc = tf.NodeDescription("_SetGlobalTPUArray") + begin + begin + topology_ = convert(Tensor{String}, topology_) + begin + end + end + end + begin + begin + tf.add_input(desc, topology_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function _set_global_tpu_array_eager(topology_; name=nothing) - desc = tf.EagerOp("_SetGlobalTPUArray") - topology_ = convert(tf.EagerTensor, topology_) - tf.add_input(desc, topology_) - res = tf.execute(desc) - node = tf.TapeNode(_set_global_tpu_array, [topology_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function _set_global_tpu_array_eager(topology_; name=nothing) + desc = tf.EagerOp("_SetGlobalTPUArray") + topology_ = convert(tf.EagerTensor, topology_) + begin + begin + tf.add_input(desc, topology_) + end + end + begin + end + res = tf.execute(desc) + node = tf.TapeNode(_set_global_tpu_array, [topology_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _set_global_tpu_array(topology_; name=nothing) - if tf.in_eager_mode() - _set_global_tpu_array_eager(topology_; name=name) - else - _set_global_tpu_array_graph(topology_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _set_global_tpu_array(topology_; name=nothing) + if tf.in_eager_mode() + _set_global_tpu_array_eager(topology_; name=name) + else + _set_global_tpu_array_graph(topology_; name=name) + end end - end + end end @@ -43876,35 +79910,63 @@ end """ begin - function debug_gradient_ref_identity_graph(input_; name=nothing) - local desc - tf.with_op_name(name, "DebugGradientRefIdentity") do - desc = tf.NodeDescription("DebugGradientRefIdentity") - input_ = convert(Tensor{Any}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) + begin + function debug_gradient_ref_identity_graph(input_; name=nothing) + local desc + tf.with_op_name(name, "DebugGradientRefIdentity") do + desc = tf.NodeDescription("DebugGradientRefIdentity") + begin + begin + input_ = convert(Tensor{Any}, input_) + begin + end + end + begin + (input_,) = tf.tf_promote(input_) + end + end + begin + begin + tf.add_input(desc, input_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function debug_gradient_ref_identity_eager(input_; name=nothing) - desc = tf.EagerOp("DebugGradientRefIdentity") - input_ = convert(tf.EagerTensor, input_) - tf.add_input(desc, input_) - desc["T"] = tf.data_type(input_) - res = tf.execute(desc) - node = tf.TapeNode(debug_gradient_ref_identity, [input_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function debug_gradient_ref_identity_eager(input_; name=nothing) + desc = tf.EagerOp("DebugGradientRefIdentity") + input_ = convert(tf.EagerTensor, input_) + begin + begin + tf.add_input(desc, input_) + end + end + begin + end + begin + desc["T"] = tf.data_type(input_) + end + res = tf.execute(desc) + node = tf.TapeNode(debug_gradient_ref_identity, [input_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function debug_gradient_ref_identity(input_; name=nothing) - if tf.in_eager_mode() - debug_gradient_ref_identity_eager(input_; name=name) - else - debug_gradient_ref_identity_graph(input_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function debug_gradient_ref_identity(input_; name=nothing) + if tf.in_eager_mode() + debug_gradient_ref_identity_eager(input_; name=name) + else + debug_gradient_ref_identity_graph(input_; name=name) + end end - end + end end @@ -43914,71 +79976,163 @@ end """ begin - function apply_adadelta_graph(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_; name=nothing, use_locking=nothing) - local desc - tf.with_op_name(name, "ApplyAdadelta") do - desc = tf.NodeDescription("ApplyAdadelta") - var_ = convert(Tensor{Any}, var_) - accum_ = convert(Tensor{Any}, accum_) - accum_update_ = convert(Tensor{Any}, accum_update_) - lr_ = convert(Tensor{Any}, lr_) - rho_ = convert(Tensor{Any}, rho_) - epsilon_ = convert(Tensor{Any}, epsilon_) - grad_ = convert(Tensor{Any}, grad_) - (var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_) = tf.tf_promote(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_) - tf.add_input(desc, var_) - tf.add_input(desc, accum_) - tf.add_input(desc, accum_update_) - tf.add_input(desc, lr_) - tf.add_input(desc, rho_) - tf.add_input(desc, epsilon_) - tf.add_input(desc, grad_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - end - tf.Tensor(tf.Operation(desc)) - end - function apply_adadelta_eager(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_; name=nothing, use_locking=nothing) - desc = tf.EagerOp("ApplyAdadelta") - var_ = convert(tf.EagerTensor, var_) - accum_ = convert(tf.EagerTensor, accum_) - accum_update_ = convert(tf.EagerTensor, accum_update_) - lr_ = convert(tf.EagerTensor, lr_) - rho_ = convert(tf.EagerTensor, rho_) - epsilon_ = convert(tf.EagerTensor, epsilon_) - grad_ = convert(tf.EagerTensor, grad_) - tf.add_input(desc, var_) - tf.add_input(desc, accum_) - tf.add_input(desc, accum_update_) - tf.add_input(desc, lr_) - tf.add_input(desc, rho_) - tf.add_input(desc, epsilon_) - tf.add_input(desc, grad_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - desc["T"] = tf.data_type(var_) - desc["T"] = tf.data_type(accum_) - desc["T"] = tf.data_type(accum_update_) - desc["T"] = tf.data_type(lr_) - desc["T"] = tf.data_type(rho_) - desc["T"] = tf.data_type(epsilon_) - desc["T"] = tf.data_type(grad_) - res = tf.execute(desc) - node = tf.TapeNode(apply_adadelta, [var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_], name=nothing, use_locking=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function apply_adadelta(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_; name=nothing, use_locking=nothing) - if tf.in_eager_mode() - apply_adadelta_eager(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_; name=name, use_locking=use_locking) - else - apply_adadelta_graph(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_; name=name, use_locking=use_locking) + begin + function apply_adadelta_graph(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "ApplyAdadelta") do + desc = tf.NodeDescription("ApplyAdadelta") + begin + begin + var_ = convert(Tensor{Any}, var_) + begin + end + end + begin + accum_ = convert(Tensor{Any}, accum_) + begin + end + end + begin + accum_update_ = convert(Tensor{Any}, accum_update_) + begin + end + end + begin + lr_ = convert(Tensor{Any}, lr_) + begin + end + end + begin + rho_ = convert(Tensor{Any}, rho_) + begin + end + end + begin + epsilon_ = convert(Tensor{Any}, epsilon_) + begin + end + end + begin + grad_ = convert(Tensor{Any}, grad_) + begin + end + end + begin + (var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_) = tf.tf_promote(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_) + end + end + begin + begin + tf.add_input(desc, var_) + end + begin + tf.add_input(desc, accum_) + end + begin + tf.add_input(desc, accum_update_) + end + begin + tf.add_input(desc, lr_) + end + begin + tf.add_input(desc, rho_) + end + begin + tf.add_input(desc, epsilon_) + end + begin + tf.add_input(desc, grad_) + end + end + begin + begin + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function apply_adadelta_eager(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_; name=nothing, use_locking=nothing) + desc = tf.EagerOp("ApplyAdadelta") + var_ = convert(tf.EagerTensor, var_) + accum_ = convert(tf.EagerTensor, accum_) + accum_update_ = convert(tf.EagerTensor, accum_update_) + lr_ = convert(tf.EagerTensor, lr_) + rho_ = convert(tf.EagerTensor, rho_) + epsilon_ = convert(tf.EagerTensor, epsilon_) + grad_ = convert(tf.EagerTensor, grad_) + begin + begin + tf.add_input(desc, var_) + end + begin + tf.add_input(desc, accum_) + end + begin + tf.add_input(desc, accum_update_) + end + begin + tf.add_input(desc, lr_) + end + begin + tf.add_input(desc, rho_) + end + begin + tf.add_input(desc, epsilon_) + end + begin + tf.add_input(desc, grad_) + end + end + begin + begin + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + end + begin + desc["T"] = tf.data_type(var_) + end + begin + desc["T"] = tf.data_type(accum_) + end + begin + desc["T"] = tf.data_type(accum_update_) + end + begin + desc["T"] = tf.data_type(lr_) + end + begin + desc["T"] = tf.data_type(rho_) + end + begin + desc["T"] = tf.data_type(epsilon_) + end + begin + desc["T"] = tf.data_type(grad_) + end + res = tf.execute(desc) + node = tf.TapeNode(apply_adadelta, [var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_], name=nothing, use_locking=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function apply_adadelta(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_; name=nothing, use_locking=nothing) + if tf.in_eager_mode() + apply_adadelta_eager(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_; name=name, use_locking=use_locking) + else + apply_adadelta_graph(var_, accum_, accum_update_, lr_, rho_, epsilon_, grad_; name=name, use_locking=use_locking) + end end - end + end end @@ -43988,93 +80142,173 @@ end """ begin - function experimental_group_by_window_dataset_graph(input_dataset_, key_func_other_arguments_, reduce_func_other_arguments_, window_size_func_other_arguments_; name=nothing, key_func=nothing, reduce_func=nothing, window_size_func=nothing, Tkey_func_other_arguments=nothing, Treduce_func_other_arguments=nothing, Twindow_size_func_other_arguments=nothing, output_types=nothing, output_shapes=nothing) - local desc - tf.with_op_name(name, "ExperimentalGroupByWindowDataset") do - desc = tf.NodeDescription("ExperimentalGroupByWindowDataset") - input_dataset_ = convert(Tensor{Any}, input_dataset_) - key_func_other_arguments_ = [convert(Tensor{Any}, x) for x = key_func_other_arguments_] - reduce_func_other_arguments_ = [convert(Tensor{Any}, x) for x = reduce_func_other_arguments_] - window_size_func_other_arguments_ = [convert(Tensor{Any}, x) for x = window_size_func_other_arguments_] - tf.add_input(desc, input_dataset_) - tf.add_input(desc, key_func_other_arguments_) - tf.add_input(desc, reduce_func_other_arguments_) - tf.add_input(desc, window_size_func_other_arguments_) - if key_func !== nothing - desc["key_func"] = Base.identity(key_func) - end - if reduce_func !== nothing - desc["reduce_func"] = Base.identity(reduce_func) - end - if window_size_func !== nothing - desc["window_size_func"] = Base.identity(window_size_func) + begin + function experimental_group_by_window_dataset_graph(input_dataset_, key_func_other_arguments_, reduce_func_other_arguments_, window_size_func_other_arguments_; name=nothing, key_func=nothing, reduce_func=nothing, window_size_func=nothing, Tkey_func_other_arguments=nothing, Treduce_func_other_arguments=nothing, Twindow_size_func_other_arguments=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "ExperimentalGroupByWindowDataset") do + desc = tf.NodeDescription("ExperimentalGroupByWindowDataset") + begin + begin + input_dataset_ = convert(Tensor{Any}, input_dataset_) + begin + end + end + begin + key_func_other_arguments_ = [convert(Tensor{Any}, x) for x = key_func_other_arguments_] + begin + end + end + begin + reduce_func_other_arguments_ = [convert(Tensor{Any}, x) for x = reduce_func_other_arguments_] + begin + end + end + begin + window_size_func_other_arguments_ = [convert(Tensor{Any}, x) for x = window_size_func_other_arguments_] + begin + end + end + end + begin + begin + tf.add_input(desc, input_dataset_) + end + begin + tf.add_input(desc, key_func_other_arguments_) + end + begin + tf.add_input(desc, reduce_func_other_arguments_) + end + begin + tf.add_input(desc, window_size_func_other_arguments_) + end + end + begin + begin + if key_func !== nothing + desc["key_func"] = Base.identity(key_func) + end + end + begin + if reduce_func !== nothing + desc["reduce_func"] = Base.identity(reduce_func) + end + end + begin + if window_size_func !== nothing + desc["window_size_func"] = Base.identity(window_size_func) + end + end + begin + if Tkey_func_other_arguments !== nothing + desc["Tkey_func_other_arguments"] = map(Base.identity, Tkey_func_other_arguments) + end + end + begin + if Treduce_func_other_arguments !== nothing + desc["Treduce_func_other_arguments"] = map(Base.identity, Treduce_func_other_arguments) + end + end + begin + if Twindow_size_func_other_arguments !== nothing + desc["Twindow_size_func_other_arguments"] = map(Base.identity, Twindow_size_func_other_arguments) + end + end + begin + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + end + begin + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function experimental_group_by_window_dataset_eager(input_dataset_, key_func_other_arguments_, reduce_func_other_arguments_, window_size_func_other_arguments_; name=nothing, key_func=nothing, reduce_func=nothing, window_size_func=nothing, Tkey_func_other_arguments=nothing, Treduce_func_other_arguments=nothing, Twindow_size_func_other_arguments=nothing, output_types=nothing, output_shapes=nothing) + desc = tf.EagerOp("ExperimentalGroupByWindowDataset") + input_dataset_ = convert(tf.EagerTensor, input_dataset_) + key_func_other_arguments_ = convert(tf.EagerTensor, key_func_other_arguments_) + reduce_func_other_arguments_ = convert(tf.EagerTensor, reduce_func_other_arguments_) + window_size_func_other_arguments_ = convert(tf.EagerTensor, window_size_func_other_arguments_) + begin + begin + tf.add_input(desc, input_dataset_) + end + begin + tf.add_input(desc, key_func_other_arguments_) + end + begin + tf.add_input(desc, reduce_func_other_arguments_) + end + begin + tf.add_input(desc, window_size_func_other_arguments_) + end + end + begin + begin + if key_func !== nothing + desc["key_func"] = Base.identity(key_func) + end + end + begin + if reduce_func !== nothing + desc["reduce_func"] = Base.identity(reduce_func) + end + end + begin + if window_size_func !== nothing + desc["window_size_func"] = Base.identity(window_size_func) + end + end + begin + if Tkey_func_other_arguments !== nothing + desc["Tkey_func_other_arguments"] = map(Base.identity, Tkey_func_other_arguments) + end + end + begin + if Treduce_func_other_arguments !== nothing + desc["Treduce_func_other_arguments"] = map(Base.identity, Treduce_func_other_arguments) + end + end + begin + if Twindow_size_func_other_arguments !== nothing + desc["Twindow_size_func_other_arguments"] = map(Base.identity, Twindow_size_func_other_arguments) + end + end + begin + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + end + begin + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(experimental_group_by_window_dataset, [input_dataset_, key_func_other_arguments_, reduce_func_other_arguments_, window_size_func_other_arguments_], name=nothing, key_func=nothing, reduce_func=nothing, window_size_func=nothing, Tkey_func_other_arguments=nothing, Treduce_func_other_arguments=nothing, Twindow_size_func_other_arguments=nothing, output_types=nothing, output_shapes=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_group_by_window_dataset(input_dataset_, key_func_other_arguments_, reduce_func_other_arguments_, window_size_func_other_arguments_; name=nothing, key_func=nothing, reduce_func=nothing, window_size_func=nothing, Tkey_func_other_arguments=nothing, Treduce_func_other_arguments=nothing, Twindow_size_func_other_arguments=nothing, output_types=nothing, output_shapes=nothing) + if tf.in_eager_mode() + experimental_group_by_window_dataset_eager(input_dataset_, key_func_other_arguments_, reduce_func_other_arguments_, window_size_func_other_arguments_; name=name, key_func=key_func, reduce_func=reduce_func, window_size_func=window_size_func, Tkey_func_other_arguments=Tkey_func_other_arguments, Treduce_func_other_arguments=Treduce_func_other_arguments, Twindow_size_func_other_arguments=Twindow_size_func_other_arguments, output_types=output_types, output_shapes=output_shapes) + else + experimental_group_by_window_dataset_graph(input_dataset_, key_func_other_arguments_, reduce_func_other_arguments_, window_size_func_other_arguments_; name=name, key_func=key_func, reduce_func=reduce_func, window_size_func=window_size_func, Tkey_func_other_arguments=Tkey_func_other_arguments, Treduce_func_other_arguments=Treduce_func_other_arguments, Twindow_size_func_other_arguments=Twindow_size_func_other_arguments, output_types=output_types, output_shapes=output_shapes) + end end - if Tkey_func_other_arguments !== nothing - desc["Tkey_func_other_arguments"] = map(Base.identity, Tkey_func_other_arguments) - end - if Treduce_func_other_arguments !== nothing - desc["Treduce_func_other_arguments"] = map(Base.identity, Treduce_func_other_arguments) - end - if Twindow_size_func_other_arguments !== nothing - desc["Twindow_size_func_other_arguments"] = map(Base.identity, Twindow_size_func_other_arguments) - end - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - end - tf.Tensor(tf.Operation(desc)) end - function experimental_group_by_window_dataset_eager(input_dataset_, key_func_other_arguments_, reduce_func_other_arguments_, window_size_func_other_arguments_; name=nothing, key_func=nothing, reduce_func=nothing, window_size_func=nothing, Tkey_func_other_arguments=nothing, Treduce_func_other_arguments=nothing, Twindow_size_func_other_arguments=nothing, output_types=nothing, output_shapes=nothing) - desc = tf.EagerOp("ExperimentalGroupByWindowDataset") - input_dataset_ = convert(tf.EagerTensor, input_dataset_) - key_func_other_arguments_ = convert(tf.EagerTensor, key_func_other_arguments_) - reduce_func_other_arguments_ = convert(tf.EagerTensor, reduce_func_other_arguments_) - window_size_func_other_arguments_ = convert(tf.EagerTensor, window_size_func_other_arguments_) - tf.add_input(desc, input_dataset_) - tf.add_input(desc, key_func_other_arguments_) - tf.add_input(desc, reduce_func_other_arguments_) - tf.add_input(desc, window_size_func_other_arguments_) - if key_func !== nothing - desc["key_func"] = Base.identity(key_func) - end - if reduce_func !== nothing - desc["reduce_func"] = Base.identity(reduce_func) - end - if window_size_func !== nothing - desc["window_size_func"] = Base.identity(window_size_func) - end - if Tkey_func_other_arguments !== nothing - desc["Tkey_func_other_arguments"] = map(Base.identity, Tkey_func_other_arguments) - end - if Treduce_func_other_arguments !== nothing - desc["Treduce_func_other_arguments"] = map(Base.identity, Treduce_func_other_arguments) - end - if Twindow_size_func_other_arguments !== nothing - desc["Twindow_size_func_other_arguments"] = map(Base.identity, Twindow_size_func_other_arguments) - end - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - res = tf.execute(desc) - node = tf.TapeNode(experimental_group_by_window_dataset, [input_dataset_, key_func_other_arguments_, reduce_func_other_arguments_, window_size_func_other_arguments_], name=nothing, key_func=nothing, reduce_func=nothing, window_size_func=nothing, Tkey_func_other_arguments=nothing, Treduce_func_other_arguments=nothing, Twindow_size_func_other_arguments=nothing, output_types=nothing, output_shapes=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_group_by_window_dataset(input_dataset_, key_func_other_arguments_, reduce_func_other_arguments_, window_size_func_other_arguments_; name=nothing, key_func=nothing, reduce_func=nothing, window_size_func=nothing, Tkey_func_other_arguments=nothing, Treduce_func_other_arguments=nothing, Twindow_size_func_other_arguments=nothing, output_types=nothing, output_shapes=nothing) - if tf.in_eager_mode() - experimental_group_by_window_dataset_eager(input_dataset_, key_func_other_arguments_, reduce_func_other_arguments_, window_size_func_other_arguments_; name=name, key_func=key_func, reduce_func=reduce_func, window_size_func=window_size_func, Tkey_func_other_arguments=Tkey_func_other_arguments, Treduce_func_other_arguments=Treduce_func_other_arguments, Twindow_size_func_other_arguments=Twindow_size_func_other_arguments, output_types=output_types, output_shapes=output_shapes) - else - experimental_group_by_window_dataset_graph(input_dataset_, key_func_other_arguments_, reduce_func_other_arguments_, window_size_func_other_arguments_; name=name, key_func=key_func, reduce_func=reduce_func, window_size_func=window_size_func, Tkey_func_other_arguments=Tkey_func_other_arguments, Treduce_func_other_arguments=Treduce_func_other_arguments, Twindow_size_func_other_arguments=Twindow_size_func_other_arguments, output_types=output_types, output_shapes=output_shapes) - end - end end @@ -44084,49 +80318,89 @@ end """ begin - function audio_summary_graph(tag_, tensor_; name=nothing, sample_rate=nothing, max_outputs=nothing) - local desc - tf.with_op_name(name, "AudioSummary") do - desc = tf.NodeDescription("AudioSummary") - tag_ = convert(Tensor{String}, tag_) - tensor_ = convert(Tensor{Float32}, tensor_) - tf.add_input(desc, tag_) - tf.add_input(desc, tensor_) - if sample_rate !== nothing - desc["sample_rate"] = Base.identity(sample_rate) + begin + function audio_summary_graph(tag_, tensor_; name=nothing, sample_rate=nothing, max_outputs=nothing) + local desc + tf.with_op_name(name, "AudioSummary") do + desc = tf.NodeDescription("AudioSummary") + begin + begin + tag_ = convert(Tensor{String}, tag_) + begin + end + end + begin + tensor_ = convert(Tensor{Float32}, tensor_) + begin + end + end + end + begin + begin + tf.add_input(desc, tag_) + end + begin + tf.add_input(desc, tensor_) + end + end + begin + begin + if sample_rate !== nothing + desc["sample_rate"] = Base.identity(sample_rate) + end + end + begin + if max_outputs !== nothing + desc["max_outputs"] = Base.Int(max_outputs) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function audio_summary_eager(tag_, tensor_; name=nothing, sample_rate=nothing, max_outputs=nothing) + desc = tf.EagerOp("AudioSummary") + tag_ = convert(tf.EagerTensor, tag_) + tensor_ = convert(tf.EagerTensor, tensor_) + begin + begin + tf.add_input(desc, tag_) + end + begin + tf.add_input(desc, tensor_) + end + end + begin + begin + if sample_rate !== nothing + desc["sample_rate"] = Base.identity(sample_rate) + end + end + begin + if max_outputs !== nothing + desc["max_outputs"] = Base.Int(max_outputs) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(audio_summary, [tag_, tensor_], name=nothing, sample_rate=nothing, max_outputs=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function audio_summary(tag_, tensor_; name=nothing, sample_rate=nothing, max_outputs=nothing) + if tf.in_eager_mode() + audio_summary_eager(tag_, tensor_; name=name, sample_rate=sample_rate, max_outputs=max_outputs) + else + audio_summary_graph(tag_, tensor_; name=name, sample_rate=sample_rate, max_outputs=max_outputs) + end end - if max_outputs !== nothing - desc["max_outputs"] = Base.Int(max_outputs) - end - end - tf.Tensor(tf.Operation(desc)) - end - function audio_summary_eager(tag_, tensor_; name=nothing, sample_rate=nothing, max_outputs=nothing) - desc = tf.EagerOp("AudioSummary") - tag_ = convert(tf.EagerTensor, tag_) - tensor_ = convert(tf.EagerTensor, tensor_) - tf.add_input(desc, tag_) - tf.add_input(desc, tensor_) - if sample_rate !== nothing - desc["sample_rate"] = Base.identity(sample_rate) - end - if max_outputs !== nothing - desc["max_outputs"] = Base.Int(max_outputs) - end - res = tf.execute(desc) - node = tf.TapeNode(audio_summary, [tag_, tensor_], name=nothing, sample_rate=nothing, max_outputs=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function audio_summary(tag_, tensor_; name=nothing, sample_rate=nothing, max_outputs=nothing) - if tf.in_eager_mode() - audio_summary_eager(tag_, tensor_; name=name, sample_rate=sample_rate, max_outputs=max_outputs) - else - audio_summary_graph(tag_, tensor_; name=name, sample_rate=sample_rate, max_outputs=max_outputs) - end - end end @@ -44136,40 +80410,78 @@ end """ begin - function squared_difference_graph(x_, y_; name=nothing) - local desc - tf.with_op_name(name, "SquaredDifference") do - desc = tf.NodeDescription("SquaredDifference") - x_ = convert(Tensor{Any}, x_) - y_ = convert(Tensor{Any}, y_) - (x_, y_) = tf.tf_promote(x_, y_) - tf.add_input(desc, x_) - tf.add_input(desc, y_) + begin + function squared_difference_graph(x_, y_; name=nothing) + local desc + tf.with_op_name(name, "SquaredDifference") do + desc = tf.NodeDescription("SquaredDifference") + begin + begin + x_ = convert(Tensor{Any}, x_) + begin + end + end + begin + y_ = convert(Tensor{Any}, y_) + begin + end + end + begin + (x_, y_) = tf.tf_promote(x_, y_) + end + end + begin + begin + tf.add_input(desc, x_) + end + begin + tf.add_input(desc, y_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function squared_difference_eager(x_, y_; name=nothing) - desc = tf.EagerOp("SquaredDifference") - x_ = convert(tf.EagerTensor, x_) - y_ = convert(tf.EagerTensor, y_) - tf.add_input(desc, x_) - tf.add_input(desc, y_) - desc["T"] = tf.data_type(x_) - desc["T"] = tf.data_type(y_) - res = tf.execute(desc) - node = tf.TapeNode(squared_difference, [x_, y_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function squared_difference_eager(x_, y_; name=nothing) + desc = tf.EagerOp("SquaredDifference") + x_ = convert(tf.EagerTensor, x_) + y_ = convert(tf.EagerTensor, y_) + begin + begin + tf.add_input(desc, x_) + end + begin + tf.add_input(desc, y_) + end + end + begin + end + begin + desc["T"] = tf.data_type(x_) + end + begin + desc["T"] = tf.data_type(y_) + end + res = tf.execute(desc) + node = tf.TapeNode(squared_difference, [x_, y_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function squared_difference(x_, y_; name=nothing) - if tf.in_eager_mode() - squared_difference_eager(x_, y_; name=name) - else - squared_difference_graph(x_, y_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function squared_difference(x_, y_; name=nothing) + if tf.in_eager_mode() + squared_difference_eager(x_, y_; name=name) + else + squared_difference_graph(x_, y_; name=name) + end end - end + end end @@ -44179,53 +80491,107 @@ end """ begin - function scatter_nd_update_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) - local desc - tf.with_op_name(name, "ScatterNdUpdate") do - desc = tf.NodeDescription("ScatterNdUpdate") - ref_ = convert(Tensor{Any}, ref_) - indices_ = convert(Tensor{Any}, indices_) - indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) - updates_ = convert(Tensor{Any}, updates_) - (ref_, updates_) = tf.tf_promote(ref_, updates_) - (indices_,) = tf.tf_promote(indices_) - tf.add_input(desc, ref_) - tf.add_input(desc, indices_) - tf.add_input(desc, updates_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) + begin + function scatter_nd_update_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "ScatterNdUpdate") do + desc = tf.NodeDescription("ScatterNdUpdate") + begin + begin + ref_ = convert(Tensor{Any}, ref_) + begin + end + end + begin + indices_ = convert(Tensor{Any}, indices_) + begin + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + end + end + begin + updates_ = convert(Tensor{Any}, updates_) + begin + end + end + begin + (ref_, updates_) = tf.tf_promote(ref_, updates_) + end + begin + (indices_,) = tf.tf_promote(indices_) + end + end + begin + begin + tf.add_input(desc, ref_) + end + begin + tf.add_input(desc, indices_) + end + begin + tf.add_input(desc, updates_) + end + end + begin + begin + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function scatter_nd_update_eager(ref_, indices_, updates_; name=nothing, use_locking=nothing) + desc = tf.EagerOp("ScatterNdUpdate") + ref_ = convert(tf.EagerTensor, ref_) + indices_ = convert(tf.EagerTensor, indices_) + updates_ = convert(tf.EagerTensor, updates_) + begin + begin + tf.add_input(desc, ref_) + end + begin + tf.add_input(desc, indices_) + end + begin + tf.add_input(desc, updates_) + end + end + begin + begin + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + end + begin + desc["T"] = tf.data_type(ref_) + end + begin + desc["Tindices"] = tf.data_type(indices_) + end + begin + desc["T"] = tf.data_type(updates_) + end + res = tf.execute(desc) + node = tf.TapeNode(scatter_nd_update, [ref_, indices_, updates_], name=nothing, use_locking=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function scatter_nd_update(ref_, indices_, updates_; name=nothing, use_locking=nothing) + if tf.in_eager_mode() + scatter_nd_update_eager(ref_, indices_, updates_; name=name, use_locking=use_locking) + else + scatter_nd_update_graph(ref_, indices_, updates_; name=name, use_locking=use_locking) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function scatter_nd_update_eager(ref_, indices_, updates_; name=nothing, use_locking=nothing) - desc = tf.EagerOp("ScatterNdUpdate") - ref_ = convert(tf.EagerTensor, ref_) - indices_ = convert(tf.EagerTensor, indices_) - updates_ = convert(tf.EagerTensor, updates_) - tf.add_input(desc, ref_) - tf.add_input(desc, indices_) - tf.add_input(desc, updates_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - desc["T"] = tf.data_type(ref_) - desc["Tindices"] = tf.data_type(indices_) - desc["T"] = tf.data_type(updates_) - res = tf.execute(desc) - node = tf.TapeNode(scatter_nd_update, [ref_, indices_, updates_], name=nothing, use_locking=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function scatter_nd_update(ref_, indices_, updates_; name=nothing, use_locking=nothing) - if tf.in_eager_mode() - scatter_nd_update_eager(ref_, indices_, updates_; name=name, use_locking=use_locking) - else - scatter_nd_update_graph(ref_, indices_, updates_; name=name, use_locking=use_locking) - end - end end @@ -44235,45 +80601,85 @@ end """ begin - function dynamic_stitch_graph(indices_, data_; name=nothing, N=nothing) - local desc - tf.with_op_name(name, "DynamicStitch") do - desc = tf.NodeDescription("DynamicStitch") - indices_ = [convert(Tensor{Int32}, x) for x = indices_] - data_ = [convert(Tensor{Any}, x) for x = data_] - (data_,) = tf.tf_promote(data_) - tf.add_input(desc, indices_) - tf.add_input(desc, data_) - if N !== nothing - desc["N"] = Base.Int(N) + begin + function dynamic_stitch_graph(indices_, data_; name=nothing, N=nothing) + local desc + tf.with_op_name(name, "DynamicStitch") do + desc = tf.NodeDescription("DynamicStitch") + begin + begin + indices_ = [convert(Tensor{Int32}, x) for x = indices_] + begin + end + end + begin + data_ = [convert(Tensor{Any}, x) for x = data_] + begin + end + end + begin + (data_,) = tf.tf_promote(data_) + end + end + begin + begin + tf.add_input(desc, indices_) + end + begin + tf.add_input(desc, data_) + end + end + begin + begin + if N !== nothing + desc["N"] = Base.Int(N) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function dynamic_stitch_eager(indices_, data_; name=nothing, N=nothing) + desc = tf.EagerOp("DynamicStitch") + indices_ = convert(tf.EagerTensor, indices_) + data_ = convert(tf.EagerTensor, data_) + begin + begin + tf.add_input(desc, indices_) + end + begin + tf.add_input(desc, data_) + end + end + begin + begin + if N !== nothing + desc["N"] = Base.Int(N) + end + end + end + begin + desc["T"] = tf.data_type(data_) + end + res = tf.execute(desc) + node = tf.TapeNode(dynamic_stitch, [indices_, data_], name=nothing, N=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function dynamic_stitch(indices_, data_; name=nothing, N=nothing) + if tf.in_eager_mode() + dynamic_stitch_eager(indices_, data_; name=name, N=N) + else + dynamic_stitch_graph(indices_, data_; name=name, N=N) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function dynamic_stitch_eager(indices_, data_; name=nothing, N=nothing) - desc = tf.EagerOp("DynamicStitch") - indices_ = convert(tf.EagerTensor, indices_) - data_ = convert(tf.EagerTensor, data_) - tf.add_input(desc, indices_) - tf.add_input(desc, data_) - if N !== nothing - desc["N"] = Base.Int(N) - end - desc["T"] = tf.data_type(data_) - res = tf.execute(desc) - node = tf.TapeNode(dynamic_stitch, [indices_, data_], name=nothing, N=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function dynamic_stitch(indices_, data_; name=nothing, N=nothing) - if tf.in_eager_mode() - dynamic_stitch_eager(indices_, data_; name=name, N=N) - else - dynamic_stitch_graph(indices_, data_; name=name, N=N) - end - end end @@ -44283,35 +80689,63 @@ end """ begin - function ones_like_graph(x_; name=nothing) - local desc - tf.with_op_name(name, "OnesLike") do - desc = tf.NodeDescription("OnesLike") - x_ = convert(Tensor{Any}, x_) - (x_,) = tf.tf_promote(x_) - tf.add_input(desc, x_) + begin + function ones_like_graph(x_; name=nothing) + local desc + tf.with_op_name(name, "OnesLike") do + desc = tf.NodeDescription("OnesLike") + begin + begin + x_ = convert(Tensor{Any}, x_) + begin + end + end + begin + (x_,) = tf.tf_promote(x_) + end + end + begin + begin + tf.add_input(desc, x_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function ones_like_eager(x_; name=nothing) - desc = tf.EagerOp("OnesLike") - x_ = convert(tf.EagerTensor, x_) - tf.add_input(desc, x_) - desc["T"] = tf.data_type(x_) - res = tf.execute(desc) - node = tf.TapeNode(ones_like, [x_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function ones_like_eager(x_; name=nothing) + desc = tf.EagerOp("OnesLike") + x_ = convert(tf.EagerTensor, x_) + begin + begin + tf.add_input(desc, x_) + end + end + begin + end + begin + desc["T"] = tf.data_type(x_) + end + res = tf.execute(desc) + node = tf.TapeNode(ones_like, [x_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function ones_like(x_; name=nothing) - if tf.in_eager_mode() - ones_like_eager(x_; name=name) - else - ones_like_graph(x_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function ones_like(x_; name=nothing) + if tf.in_eager_mode() + ones_like_eager(x_; name=name) + else + ones_like_graph(x_; name=name) + end end - end + end end @@ -44321,59 +80755,127 @@ end """ begin - function fractional_max_pool_grad_graph(orig_input_, orig_output_, out_backprop_, row_pooling_sequence_, col_pooling_sequence_; name=nothing, overlapping=nothing) - local desc - tf.with_op_name(name, "FractionalMaxPoolGrad") do - desc = tf.NodeDescription("FractionalMaxPoolGrad") - orig_input_ = convert(Tensor{Any}, orig_input_) - orig_output_ = convert(Tensor{Any}, orig_output_) - out_backprop_ = convert(Tensor{Any}, out_backprop_) - row_pooling_sequence_ = convert(Tensor{Int64}, row_pooling_sequence_) - col_pooling_sequence_ = convert(Tensor{Int64}, col_pooling_sequence_) - (orig_input_, orig_output_, out_backprop_) = tf.tf_promote(orig_input_, orig_output_, out_backprop_) - tf.add_input(desc, orig_input_) - tf.add_input(desc, orig_output_) - tf.add_input(desc, out_backprop_) - tf.add_input(desc, row_pooling_sequence_) - tf.add_input(desc, col_pooling_sequence_) - if overlapping !== nothing - desc["overlapping"] = Base.Bool(overlapping) + begin + function fractional_max_pool_grad_graph(orig_input_, orig_output_, out_backprop_, row_pooling_sequence_, col_pooling_sequence_; name=nothing, overlapping=nothing) + local desc + tf.with_op_name(name, "FractionalMaxPoolGrad") do + desc = tf.NodeDescription("FractionalMaxPoolGrad") + begin + begin + orig_input_ = convert(Tensor{Any}, orig_input_) + begin + end + end + begin + orig_output_ = convert(Tensor{Any}, orig_output_) + begin + end + end + begin + out_backprop_ = convert(Tensor{Any}, out_backprop_) + begin + end + end + begin + row_pooling_sequence_ = convert(Tensor{Int64}, row_pooling_sequence_) + begin + end + end + begin + col_pooling_sequence_ = convert(Tensor{Int64}, col_pooling_sequence_) + begin + end + end + begin + (orig_input_, orig_output_, out_backprop_) = tf.tf_promote(orig_input_, orig_output_, out_backprop_) + end + end + begin + begin + tf.add_input(desc, orig_input_) + end + begin + tf.add_input(desc, orig_output_) + end + begin + tf.add_input(desc, out_backprop_) + end + begin + tf.add_input(desc, row_pooling_sequence_) + end + begin + tf.add_input(desc, col_pooling_sequence_) + end + end + begin + begin + if overlapping !== nothing + desc["overlapping"] = Base.Bool(overlapping) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function fractional_max_pool_grad_eager(orig_input_, orig_output_, out_backprop_, row_pooling_sequence_, col_pooling_sequence_; name=nothing, overlapping=nothing) + desc = tf.EagerOp("FractionalMaxPoolGrad") + orig_input_ = convert(tf.EagerTensor, orig_input_) + orig_output_ = convert(tf.EagerTensor, orig_output_) + out_backprop_ = convert(tf.EagerTensor, out_backprop_) + row_pooling_sequence_ = convert(tf.EagerTensor, row_pooling_sequence_) + col_pooling_sequence_ = convert(tf.EagerTensor, col_pooling_sequence_) + begin + begin + tf.add_input(desc, orig_input_) + end + begin + tf.add_input(desc, orig_output_) + end + begin + tf.add_input(desc, out_backprop_) + end + begin + tf.add_input(desc, row_pooling_sequence_) + end + begin + tf.add_input(desc, col_pooling_sequence_) + end + end + begin + begin + if overlapping !== nothing + desc["overlapping"] = Base.Bool(overlapping) + end + end + end + begin + desc["T"] = tf.data_type(orig_input_) + end + begin + desc["T"] = tf.data_type(orig_output_) + end + begin + desc["T"] = tf.data_type(out_backprop_) + end + res = tf.execute(desc) + node = tf.TapeNode(fractional_max_pool_grad, [orig_input_, orig_output_, out_backprop_, row_pooling_sequence_, col_pooling_sequence_], name=nothing, overlapping=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function fractional_max_pool_grad(orig_input_, orig_output_, out_backprop_, row_pooling_sequence_, col_pooling_sequence_; name=nothing, overlapping=nothing) + if tf.in_eager_mode() + fractional_max_pool_grad_eager(orig_input_, orig_output_, out_backprop_, row_pooling_sequence_, col_pooling_sequence_; name=name, overlapping=overlapping) + else + fractional_max_pool_grad_graph(orig_input_, orig_output_, out_backprop_, row_pooling_sequence_, col_pooling_sequence_; name=name, overlapping=overlapping) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function fractional_max_pool_grad_eager(orig_input_, orig_output_, out_backprop_, row_pooling_sequence_, col_pooling_sequence_; name=nothing, overlapping=nothing) - desc = tf.EagerOp("FractionalMaxPoolGrad") - orig_input_ = convert(tf.EagerTensor, orig_input_) - orig_output_ = convert(tf.EagerTensor, orig_output_) - out_backprop_ = convert(tf.EagerTensor, out_backprop_) - row_pooling_sequence_ = convert(tf.EagerTensor, row_pooling_sequence_) - col_pooling_sequence_ = convert(tf.EagerTensor, col_pooling_sequence_) - tf.add_input(desc, orig_input_) - tf.add_input(desc, orig_output_) - tf.add_input(desc, out_backprop_) - tf.add_input(desc, row_pooling_sequence_) - tf.add_input(desc, col_pooling_sequence_) - if overlapping !== nothing - desc["overlapping"] = Base.Bool(overlapping) - end - desc["T"] = tf.data_type(orig_input_) - desc["T"] = tf.data_type(orig_output_) - desc["T"] = tf.data_type(out_backprop_) - res = tf.execute(desc) - node = tf.TapeNode(fractional_max_pool_grad, [orig_input_, orig_output_, out_backprop_, row_pooling_sequence_, col_pooling_sequence_], name=nothing, overlapping=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function fractional_max_pool_grad(orig_input_, orig_output_, out_backprop_, row_pooling_sequence_, col_pooling_sequence_; name=nothing, overlapping=nothing) - if tf.in_eager_mode() - fractional_max_pool_grad_eager(orig_input_, orig_output_, out_backprop_, row_pooling_sequence_, col_pooling_sequence_; name=name, overlapping=overlapping) - else - fractional_max_pool_grad_graph(orig_input_, orig_output_, out_backprop_, row_pooling_sequence_, col_pooling_sequence_; name=name, overlapping=overlapping) - end - end end @@ -44383,55 +80885,99 @@ end """ begin - function remote_call_graph(target_, args_; name=nothing, Tin=nothing, Tout=nothing, f=nothing) - local desc - tf.with_op_name(name, "RemoteCall") do - desc = tf.NodeDescription("RemoteCall") - target_ = convert(Tensor{String}, target_) - args_ = [convert(Tensor{Any}, x) for x = args_] - tf.add_input(desc, target_) - tf.add_input(desc, args_) - if Tin !== nothing - desc["Tin"] = map(Base.identity, Tin) - end - if Tout !== nothing - desc["Tout"] = map(Base.identity, Tout) + begin + function remote_call_graph(target_, args_; name=nothing, Tin=nothing, Tout=nothing, f=nothing) + local desc + tf.with_op_name(name, "RemoteCall") do + desc = tf.NodeDescription("RemoteCall") + begin + begin + target_ = convert(Tensor{String}, target_) + begin + end + end + begin + args_ = [convert(Tensor{Any}, x) for x = args_] + begin + end + end + end + begin + begin + tf.add_input(desc, target_) + end + begin + tf.add_input(desc, args_) + end + end + begin + begin + if Tin !== nothing + desc["Tin"] = map(Base.identity, Tin) + end + end + begin + if Tout !== nothing + desc["Tout"] = map(Base.identity, Tout) + end + end + begin + if f !== nothing + desc["f"] = Base.identity(f) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function remote_call_eager(target_, args_; name=nothing, Tin=nothing, Tout=nothing, f=nothing) + desc = tf.EagerOp("RemoteCall") + target_ = convert(tf.EagerTensor, target_) + args_ = convert(tf.EagerTensor, args_) + begin + begin + tf.add_input(desc, target_) + end + begin + tf.add_input(desc, args_) + end + end + begin + begin + if Tin !== nothing + desc["Tin"] = map(Base.identity, Tin) + end + end + begin + if Tout !== nothing + desc["Tout"] = map(Base.identity, Tout) + end + end + begin + if f !== nothing + desc["f"] = Base.identity(f) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(remote_call, [target_, args_], name=nothing, Tin=nothing, Tout=nothing, f=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function remote_call(target_, args_; name=nothing, Tin=nothing, Tout=nothing, f=nothing) + if tf.in_eager_mode() + remote_call_eager(target_, args_; name=name, Tin=Tin, Tout=Tout, f=f) + else + remote_call_graph(target_, args_; name=name, Tin=Tin, Tout=Tout, f=f) + end end - if f !== nothing - desc["f"] = Base.identity(f) - end - end - tf.Tensor(tf.Operation(desc)) - end - function remote_call_eager(target_, args_; name=nothing, Tin=nothing, Tout=nothing, f=nothing) - desc = tf.EagerOp("RemoteCall") - target_ = convert(tf.EagerTensor, target_) - args_ = convert(tf.EagerTensor, args_) - tf.add_input(desc, target_) - tf.add_input(desc, args_) - if Tin !== nothing - desc["Tin"] = map(Base.identity, Tin) - end - if Tout !== nothing - desc["Tout"] = map(Base.identity, Tout) - end - if f !== nothing - desc["f"] = Base.identity(f) - end - res = tf.execute(desc) - node = tf.TapeNode(remote_call, [target_, args_], name=nothing, Tin=nothing, Tout=nothing, f=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function remote_call(target_, args_; name=nothing, Tin=nothing, Tout=nothing, f=nothing) - if tf.in_eager_mode() - remote_call_eager(target_, args_; name=name, Tin=Tin, Tout=Tout, f=f) - else - remote_call_graph(target_, args_; name=name, Tin=Tin, Tout=Tout, f=f) - end - end end @@ -44441,48 +80987,92 @@ end """ begin - function gather_graph(params_, indices_; name=nothing, validate_indices=nothing) - local desc - tf.with_op_name(name, "Gather") do - desc = tf.NodeDescription("Gather") - params_ = convert(Tensor{Any}, params_) - indices_ = convert(Tensor{Any}, indices_) - indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) - (params_,) = tf.tf_promote(params_) - (indices_,) = tf.tf_promote(indices_) - tf.add_input(desc, params_) - tf.add_input(desc, indices_) - if validate_indices !== nothing - desc["validate_indices"] = Base.Bool(validate_indices) + begin + function gather_graph(params_, indices_; name=nothing, validate_indices=nothing) + local desc + tf.with_op_name(name, "Gather") do + desc = tf.NodeDescription("Gather") + begin + begin + params_ = convert(Tensor{Any}, params_) + begin + end + end + begin + indices_ = convert(Tensor{Any}, indices_) + begin + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + end + end + begin + (params_,) = tf.tf_promote(params_) + end + begin + (indices_,) = tf.tf_promote(indices_) + end + end + begin + begin + tf.add_input(desc, params_) + end + begin + tf.add_input(desc, indices_) + end + end + begin + begin + if validate_indices !== nothing + desc["validate_indices"] = Base.Bool(validate_indices) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function gather_eager(params_, indices_; name=nothing, validate_indices=nothing) + desc = tf.EagerOp("Gather") + params_ = convert(tf.EagerTensor, params_) + indices_ = convert(tf.EagerTensor, indices_) + begin + begin + tf.add_input(desc, params_) + end + begin + tf.add_input(desc, indices_) + end + end + begin + begin + if validate_indices !== nothing + desc["validate_indices"] = Base.Bool(validate_indices) + end + end + end + begin + desc["Tparams"] = tf.data_type(params_) + end + begin + desc["Tindices"] = tf.data_type(indices_) + end + res = tf.execute(desc) + node = tf.TapeNode(gather, [params_, indices_], name=nothing, validate_indices=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function gather(params_, indices_; name=nothing, validate_indices=nothing) + if tf.in_eager_mode() + gather_eager(params_, indices_; name=name, validate_indices=validate_indices) + else + gather_graph(params_, indices_; name=name, validate_indices=validate_indices) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function gather_eager(params_, indices_; name=nothing, validate_indices=nothing) - desc = tf.EagerOp("Gather") - params_ = convert(tf.EagerTensor, params_) - indices_ = convert(tf.EagerTensor, indices_) - tf.add_input(desc, params_) - tf.add_input(desc, indices_) - if validate_indices !== nothing - desc["validate_indices"] = Base.Bool(validate_indices) - end - desc["Tparams"] = tf.data_type(params_) - desc["Tindices"] = tf.data_type(indices_) - res = tf.execute(desc) - node = tf.TapeNode(gather, [params_, indices_], name=nothing, validate_indices=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function gather(params_, indices_; name=nothing, validate_indices=nothing) - if tf.in_eager_mode() - gather_eager(params_, indices_; name=name, validate_indices=validate_indices) - else - gather_graph(params_, indices_; name=name, validate_indices=validate_indices) - end - end end @@ -44492,74 +81082,156 @@ end """ begin - function quantized_mat_mul_graph(a_, b_, min_a_, max_a_, min_b_, max_b_; name=nothing, transpose_a=nothing, transpose_b=nothing) - local desc - tf.with_op_name(name, "QuantizedMatMul") do - desc = tf.NodeDescription("QuantizedMatMul") - a_ = convert(Tensor{Any}, a_) - b_ = convert(Tensor{Any}, b_) - min_a_ = convert(Tensor{Float32}, min_a_) - max_a_ = convert(Tensor{Float32}, max_a_) - min_b_ = convert(Tensor{Float32}, min_b_) - max_b_ = convert(Tensor{Float32}, max_b_) - (a_,) = tf.tf_promote(a_) - (b_,) = tf.tf_promote(b_) - tf.add_input(desc, a_) - tf.add_input(desc, b_) - tf.add_input(desc, min_a_) - tf.add_input(desc, max_a_) - tf.add_input(desc, min_b_) - tf.add_input(desc, max_b_) - if transpose_a !== nothing - desc["transpose_a"] = Base.Bool(transpose_a) - end - if transpose_b !== nothing - desc["transpose_b"] = Base.Bool(transpose_b) + begin + function quantized_mat_mul_graph(a_, b_, min_a_, max_a_, min_b_, max_b_; name=nothing, transpose_a=nothing, transpose_b=nothing) + local desc + tf.with_op_name(name, "QuantizedMatMul") do + desc = tf.NodeDescription("QuantizedMatMul") + begin + begin + a_ = convert(Tensor{Any}, a_) + begin + end + end + begin + b_ = convert(Tensor{Any}, b_) + begin + end + end + begin + min_a_ = convert(Tensor{Float32}, min_a_) + begin + end + end + begin + max_a_ = convert(Tensor{Float32}, max_a_) + begin + end + end + begin + min_b_ = convert(Tensor{Float32}, min_b_) + begin + end + end + begin + max_b_ = convert(Tensor{Float32}, max_b_) + begin + end + end + begin + (a_,) = tf.tf_promote(a_) + end + begin + (b_,) = tf.tf_promote(b_) + end + end + begin + begin + tf.add_input(desc, a_) + end + begin + tf.add_input(desc, b_) + end + begin + tf.add_input(desc, min_a_) + end + begin + tf.add_input(desc, max_a_) + end + begin + tf.add_input(desc, min_b_) + end + begin + tf.add_input(desc, max_b_) + end + end + begin + begin + if transpose_a !== nothing + desc["transpose_a"] = Base.Bool(transpose_a) + end + end + begin + if transpose_b !== nothing + desc["transpose_b"] = Base.Bool(transpose_b) + end + end + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + end + end + begin + function quantized_mat_mul_eager(a_, b_, min_a_, max_a_, min_b_, max_b_; name=nothing, transpose_a=nothing, transpose_b=nothing) + desc = tf.EagerOp("QuantizedMatMul") + a_ = convert(tf.EagerTensor, a_) + b_ = convert(tf.EagerTensor, b_) + min_a_ = convert(tf.EagerTensor, min_a_) + max_a_ = convert(tf.EagerTensor, max_a_) + min_b_ = convert(tf.EagerTensor, min_b_) + max_b_ = convert(tf.EagerTensor, max_b_) + begin + begin + tf.add_input(desc, a_) + end + begin + tf.add_input(desc, b_) + end + begin + tf.add_input(desc, min_a_) + end + begin + tf.add_input(desc, max_a_) + end + begin + tf.add_input(desc, min_b_) + end + begin + tf.add_input(desc, max_b_) + end + end + begin + begin + if transpose_a !== nothing + desc["transpose_a"] = Base.Bool(transpose_a) + end + end + begin + if transpose_b !== nothing + desc["transpose_b"] = Base.Bool(transpose_b) + end + end + end + begin + desc["T1"] = tf.data_type(a_) + end + begin + desc["T2"] = tf.data_type(b_) + end + res = tf.execute(desc) + node = tf.TapeNode(quantized_mat_mul, [a_, b_, min_a_, max_a_, min_b_, max_b_], name=nothing, transpose_a=nothing, transpose_b=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function quantized_mat_mul(a_, b_, min_a_, max_a_, min_b_, max_b_; name=nothing, transpose_a=nothing, transpose_b=nothing) + if tf.in_eager_mode() + quantized_mat_mul_eager(a_, b_, min_a_, max_a_, min_b_, max_b_; name=name, transpose_a=transpose_a, transpose_b=transpose_b) + else + quantized_mat_mul_graph(a_, b_, min_a_, max_a_, min_b_, max_b_; name=name, transpose_a=transpose_a, transpose_b=transpose_b) + end end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:3 - push!(out, tf.Tensor(op, out_idx)) - end - out - end - function quantized_mat_mul_eager(a_, b_, min_a_, max_a_, min_b_, max_b_; name=nothing, transpose_a=nothing, transpose_b=nothing) - desc = tf.EagerOp("QuantizedMatMul") - a_ = convert(tf.EagerTensor, a_) - b_ = convert(tf.EagerTensor, b_) - min_a_ = convert(tf.EagerTensor, min_a_) - max_a_ = convert(tf.EagerTensor, max_a_) - min_b_ = convert(tf.EagerTensor, min_b_) - max_b_ = convert(tf.EagerTensor, max_b_) - tf.add_input(desc, a_) - tf.add_input(desc, b_) - tf.add_input(desc, min_a_) - tf.add_input(desc, max_a_) - tf.add_input(desc, min_b_) - tf.add_input(desc, max_b_) - if transpose_a !== nothing - desc["transpose_a"] = Base.Bool(transpose_a) - end - if transpose_b !== nothing - desc["transpose_b"] = Base.Bool(transpose_b) - end - desc["T1"] = tf.data_type(a_) - desc["T2"] = tf.data_type(b_) - res = tf.execute(desc) - node = tf.TapeNode(quantized_mat_mul, [a_, b_, min_a_, max_a_, min_b_, max_b_], name=nothing, transpose_a=nothing, transpose_b=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function quantized_mat_mul(a_, b_, min_a_, max_a_, min_b_, max_b_; name=nothing, transpose_a=nothing, transpose_b=nothing) - if tf.in_eager_mode() - quantized_mat_mul_eager(a_, b_, min_a_, max_a_, min_b_, max_b_; name=name, transpose_a=transpose_a, transpose_b=transpose_b) - else - quantized_mat_mul_graph(a_, b_, min_a_, max_a_, min_b_, max_b_; name=name, transpose_a=transpose_a, transpose_b=transpose_b) - end - end end @@ -44569,62 +81241,104 @@ end """ begin - function unicode_decode_with_offsets_graph(input_; name=nothing, input_encoding=nothing, errors=nothing, replacement_char=nothing, replace_control_characters=nothing) - local desc - tf.with_op_name(name, "UnicodeDecodeWithOffsets") do - desc = tf.NodeDescription("UnicodeDecodeWithOffsets") - input_ = convert(Tensor{String}, input_) - tf.add_input(desc, input_) - if input_encoding !== nothing - desc["input_encoding"] = Base.String(input_encoding) + begin + function unicode_decode_with_offsets_graph(input_; name=nothing, input_encoding=nothing, errors=nothing, replacement_char=nothing, replace_control_characters=nothing) + local desc + tf.with_op_name(name, "UnicodeDecodeWithOffsets") do + desc = tf.NodeDescription("UnicodeDecodeWithOffsets") + begin + begin + input_ = convert(Tensor{String}, input_) + begin + end + end + end + begin + begin + tf.add_input(desc, input_) + end + end + begin + begin + if input_encoding !== nothing + desc["input_encoding"] = Base.String(input_encoding) + end + end + begin + if errors !== nothing + desc["errors"] = Base.String(errors) + end + end + begin + if replacement_char !== nothing + desc["replacement_char"] = Base.Int(replacement_char) + end + end + begin + if replace_control_characters !== nothing + desc["replace_control_characters"] = Base.Bool(replace_control_characters) + end + end + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + end + end + begin + function unicode_decode_with_offsets_eager(input_; name=nothing, input_encoding=nothing, errors=nothing, replacement_char=nothing, replace_control_characters=nothing) + desc = tf.EagerOp("UnicodeDecodeWithOffsets") + input_ = convert(tf.EagerTensor, input_) + begin + begin + tf.add_input(desc, input_) + end + end + begin + begin + if input_encoding !== nothing + desc["input_encoding"] = Base.String(input_encoding) + end + end + begin + if errors !== nothing + desc["errors"] = Base.String(errors) + end + end + begin + if replacement_char !== nothing + desc["replacement_char"] = Base.Int(replacement_char) + end + end + begin + if replace_control_characters !== nothing + desc["replace_control_characters"] = Base.Bool(replace_control_characters) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(unicode_decode_with_offsets, [input_], name=nothing, input_encoding=nothing, errors=nothing, replacement_char=nothing, replace_control_characters=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function unicode_decode_with_offsets(input_; name=nothing, input_encoding=nothing, errors=nothing, replacement_char=nothing, replace_control_characters=nothing) + if tf.in_eager_mode() + unicode_decode_with_offsets_eager(input_; name=name, input_encoding=input_encoding, errors=errors, replacement_char=replacement_char, replace_control_characters=replace_control_characters) + else + unicode_decode_with_offsets_graph(input_; name=name, input_encoding=input_encoding, errors=errors, replacement_char=replacement_char, replace_control_characters=replace_control_characters) + end end - if errors !== nothing - desc["errors"] = Base.String(errors) - end - if replacement_char !== nothing - desc["replacement_char"] = Base.Int(replacement_char) - end - if replace_control_characters !== nothing - desc["replace_control_characters"] = Base.Bool(replace_control_characters) - end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:3 - push!(out, tf.Tensor(op, out_idx)) - end - out - end - function unicode_decode_with_offsets_eager(input_; name=nothing, input_encoding=nothing, errors=nothing, replacement_char=nothing, replace_control_characters=nothing) - desc = tf.EagerOp("UnicodeDecodeWithOffsets") - input_ = convert(tf.EagerTensor, input_) - tf.add_input(desc, input_) - if input_encoding !== nothing - desc["input_encoding"] = Base.String(input_encoding) - end - if errors !== nothing - desc["errors"] = Base.String(errors) - end - if replacement_char !== nothing - desc["replacement_char"] = Base.Int(replacement_char) - end - if replace_control_characters !== nothing - desc["replace_control_characters"] = Base.Bool(replace_control_characters) - end - res = tf.execute(desc) - node = tf.TapeNode(unicode_decode_with_offsets, [input_], name=nothing, input_encoding=nothing, errors=nothing, replacement_char=nothing, replace_control_characters=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function unicode_decode_with_offsets(input_; name=nothing, input_encoding=nothing, errors=nothing, replacement_char=nothing, replace_control_characters=nothing) - if tf.in_eager_mode() - unicode_decode_with_offsets_eager(input_; name=name, input_encoding=input_encoding, errors=errors, replacement_char=replacement_char, replace_control_characters=replace_control_characters) - else - unicode_decode_with_offsets_graph(input_; name=name, input_encoding=input_encoding, errors=errors, replacement_char=replacement_char, replace_control_characters=replace_control_characters) - end - end end @@ -44634,49 +81348,97 @@ end """ begin - function accumulator_apply_gradient_graph(handle_, local_step_, gradient_; name=nothing, dtype=nothing) - local desc - tf.with_op_name(name, "AccumulatorApplyGradient") do - desc = tf.NodeDescription("AccumulatorApplyGradient") - handle_ = convert(Tensor{String}, handle_) - local_step_ = convert(Tensor{Int64}, local_step_) - gradient_ = convert(Tensor{Any}, gradient_) - (gradient_,) = tf.tf_promote(gradient_) - tf.add_input(desc, handle_) - tf.add_input(desc, local_step_) - tf.add_input(desc, gradient_) - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) + begin + function accumulator_apply_gradient_graph(handle_, local_step_, gradient_; name=nothing, dtype=nothing) + local desc + tf.with_op_name(name, "AccumulatorApplyGradient") do + desc = tf.NodeDescription("AccumulatorApplyGradient") + begin + begin + handle_ = convert(Tensor{String}, handle_) + begin + end + end + begin + local_step_ = convert(Tensor{Int64}, local_step_) + begin + end + end + begin + gradient_ = convert(Tensor{Any}, gradient_) + begin + end + end + begin + (gradient_,) = tf.tf_promote(gradient_) + end + end + begin + begin + tf.add_input(desc, handle_) + end + begin + tf.add_input(desc, local_step_) + end + begin + tf.add_input(desc, gradient_) + end + end + begin + begin + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function accumulator_apply_gradient_eager(handle_, local_step_, gradient_; name=nothing, dtype=nothing) + desc = tf.EagerOp("AccumulatorApplyGradient") + handle_ = convert(tf.EagerTensor, handle_) + local_step_ = convert(tf.EagerTensor, local_step_) + gradient_ = convert(tf.EagerTensor, gradient_) + begin + begin + tf.add_input(desc, handle_) + end + begin + tf.add_input(desc, local_step_) + end + begin + tf.add_input(desc, gradient_) + end + end + begin + begin + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + end + begin + desc["dtype"] = tf.data_type(gradient_) + end + res = tf.execute(desc) + node = tf.TapeNode(accumulator_apply_gradient, [handle_, local_step_, gradient_], name=nothing, dtype=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function accumulator_apply_gradient(handle_, local_step_, gradient_; name=nothing, dtype=nothing) + if tf.in_eager_mode() + accumulator_apply_gradient_eager(handle_, local_step_, gradient_; name=name, dtype=dtype) + else + accumulator_apply_gradient_graph(handle_, local_step_, gradient_; name=name, dtype=dtype) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function accumulator_apply_gradient_eager(handle_, local_step_, gradient_; name=nothing, dtype=nothing) - desc = tf.EagerOp("AccumulatorApplyGradient") - handle_ = convert(tf.EagerTensor, handle_) - local_step_ = convert(tf.EagerTensor, local_step_) - gradient_ = convert(tf.EagerTensor, gradient_) - tf.add_input(desc, handle_) - tf.add_input(desc, local_step_) - tf.add_input(desc, gradient_) - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end - desc["dtype"] = tf.data_type(gradient_) - res = tf.execute(desc) - node = tf.TapeNode(accumulator_apply_gradient, [handle_, local_step_, gradient_], name=nothing, dtype=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function accumulator_apply_gradient(handle_, local_step_, gradient_; name=nothing, dtype=nothing) - if tf.in_eager_mode() - accumulator_apply_gradient_eager(handle_, local_step_, gradient_; name=name, dtype=dtype) - else - accumulator_apply_gradient_graph(handle_, local_step_, gradient_; name=name, dtype=dtype) - end - end end @@ -44686,69 +81448,133 @@ end This Op eases the porting of code that uses tf.nn.embedding_lookup_sparse(). """ begin - function enqueue_tpu_embedding_sparse_tensor_batch_graph(sample_indices_, embedding_indices_, aggregation_weights_, mode_override_; name=nothing, N=nothing, device_ordinal=nothing, combiners=nothing, table_ids=nothing) - local desc - tf.with_op_name(name, "EnqueueTPUEmbeddingSparseTensorBatch") do - desc = tf.NodeDescription("EnqueueTPUEmbeddingSparseTensorBatch") - sample_indices_ = [convert(Tensor{Int32}, x) for x = sample_indices_] - embedding_indices_ = [convert(Tensor{Int32}, x) for x = embedding_indices_] - aggregation_weights_ = [convert(Tensor{Float32}, x) for x = aggregation_weights_] - mode_override_ = convert(Tensor{String}, mode_override_) - tf.add_input(desc, sample_indices_) - tf.add_input(desc, embedding_indices_) - tf.add_input(desc, aggregation_weights_) - tf.add_input(desc, mode_override_) - if N !== nothing - desc["N"] = Base.Int(N) - end - if device_ordinal !== nothing - desc["device_ordinal"] = Base.Int(device_ordinal) + begin + function enqueue_tpu_embedding_sparse_tensor_batch_graph(sample_indices_, embedding_indices_, aggregation_weights_, mode_override_; name=nothing, N=nothing, device_ordinal=nothing, combiners=nothing, table_ids=nothing) + local desc + tf.with_op_name(name, "EnqueueTPUEmbeddingSparseTensorBatch") do + desc = tf.NodeDescription("EnqueueTPUEmbeddingSparseTensorBatch") + begin + begin + sample_indices_ = [convert(Tensor{Int32}, x) for x = sample_indices_] + begin + end + end + begin + embedding_indices_ = [convert(Tensor{Int32}, x) for x = embedding_indices_] + begin + end + end + begin + aggregation_weights_ = [convert(Tensor{Float32}, x) for x = aggregation_weights_] + begin + end + end + begin + mode_override_ = convert(Tensor{String}, mode_override_) + begin + end + end + end + begin + begin + tf.add_input(desc, sample_indices_) + end + begin + tf.add_input(desc, embedding_indices_) + end + begin + tf.add_input(desc, aggregation_weights_) + end + begin + tf.add_input(desc, mode_override_) + end + end + begin + begin + if N !== nothing + desc["N"] = Base.Int(N) + end + end + begin + if device_ordinal !== nothing + desc["device_ordinal"] = Base.Int(device_ordinal) + end + end + begin + if combiners !== nothing + desc["combiners"] = map(Base.identity, combiners) + end + end + begin + if table_ids !== nothing + desc["table_ids"] = map(Base.identity, table_ids) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function enqueue_tpu_embedding_sparse_tensor_batch_eager(sample_indices_, embedding_indices_, aggregation_weights_, mode_override_; name=nothing, N=nothing, device_ordinal=nothing, combiners=nothing, table_ids=nothing) + desc = tf.EagerOp("EnqueueTPUEmbeddingSparseTensorBatch") + sample_indices_ = convert(tf.EagerTensor, sample_indices_) + embedding_indices_ = convert(tf.EagerTensor, embedding_indices_) + aggregation_weights_ = convert(tf.EagerTensor, aggregation_weights_) + mode_override_ = convert(tf.EagerTensor, mode_override_) + begin + begin + tf.add_input(desc, sample_indices_) + end + begin + tf.add_input(desc, embedding_indices_) + end + begin + tf.add_input(desc, aggregation_weights_) + end + begin + tf.add_input(desc, mode_override_) + end + end + begin + begin + if N !== nothing + desc["N"] = Base.Int(N) + end + end + begin + if device_ordinal !== nothing + desc["device_ordinal"] = Base.Int(device_ordinal) + end + end + begin + if combiners !== nothing + desc["combiners"] = map(Base.identity, combiners) + end + end + begin + if table_ids !== nothing + desc["table_ids"] = map(Base.identity, table_ids) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(enqueue_tpu_embedding_sparse_tensor_batch, [sample_indices_, embedding_indices_, aggregation_weights_, mode_override_], name=nothing, N=nothing, device_ordinal=nothing, combiners=nothing, table_ids=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function enqueue_tpu_embedding_sparse_tensor_batch(sample_indices_, embedding_indices_, aggregation_weights_, mode_override_; name=nothing, N=nothing, device_ordinal=nothing, combiners=nothing, table_ids=nothing) + if tf.in_eager_mode() + enqueue_tpu_embedding_sparse_tensor_batch_eager(sample_indices_, embedding_indices_, aggregation_weights_, mode_override_; name=name, N=N, device_ordinal=device_ordinal, combiners=combiners, table_ids=table_ids) + else + enqueue_tpu_embedding_sparse_tensor_batch_graph(sample_indices_, embedding_indices_, aggregation_weights_, mode_override_; name=name, N=N, device_ordinal=device_ordinal, combiners=combiners, table_ids=table_ids) + end end - if combiners !== nothing - desc["combiners"] = map(Base.identity, combiners) - end - if table_ids !== nothing - desc["table_ids"] = map(Base.identity, table_ids) - end - end - tf.Tensor(tf.Operation(desc)) - end - function enqueue_tpu_embedding_sparse_tensor_batch_eager(sample_indices_, embedding_indices_, aggregation_weights_, mode_override_; name=nothing, N=nothing, device_ordinal=nothing, combiners=nothing, table_ids=nothing) - desc = tf.EagerOp("EnqueueTPUEmbeddingSparseTensorBatch") - sample_indices_ = convert(tf.EagerTensor, sample_indices_) - embedding_indices_ = convert(tf.EagerTensor, embedding_indices_) - aggregation_weights_ = convert(tf.EagerTensor, aggregation_weights_) - mode_override_ = convert(tf.EagerTensor, mode_override_) - tf.add_input(desc, sample_indices_) - tf.add_input(desc, embedding_indices_) - tf.add_input(desc, aggregation_weights_) - tf.add_input(desc, mode_override_) - if N !== nothing - desc["N"] = Base.Int(N) - end - if device_ordinal !== nothing - desc["device_ordinal"] = Base.Int(device_ordinal) - end - if combiners !== nothing - desc["combiners"] = map(Base.identity, combiners) - end - if table_ids !== nothing - desc["table_ids"] = map(Base.identity, table_ids) - end - res = tf.execute(desc) - node = tf.TapeNode(enqueue_tpu_embedding_sparse_tensor_batch, [sample_indices_, embedding_indices_, aggregation_weights_, mode_override_], name=nothing, N=nothing, device_ordinal=nothing, combiners=nothing, table_ids=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function enqueue_tpu_embedding_sparse_tensor_batch(sample_indices_, embedding_indices_, aggregation_weights_, mode_override_; name=nothing, N=nothing, device_ordinal=nothing, combiners=nothing, table_ids=nothing) - if tf.in_eager_mode() - enqueue_tpu_embedding_sparse_tensor_batch_eager(sample_indices_, embedding_indices_, aggregation_weights_, mode_override_; name=name, N=N, device_ordinal=device_ordinal, combiners=combiners, table_ids=table_ids) - else - enqueue_tpu_embedding_sparse_tensor_batch_graph(sample_indices_, embedding_indices_, aggregation_weights_, mode_override_; name=name, N=N, device_ordinal=device_ordinal, combiners=combiners, table_ids=table_ids) - end - end end @@ -44758,51 +81584,111 @@ end """ begin - function write_summary_graph(writer_, step_, tensor_, tag_, summary_metadata_; name=nothing) - local desc - tf.with_op_name(name, "WriteSummary") do - desc = tf.NodeDescription("WriteSummary") - writer_ = convert(Tensor{Any}, writer_) - step_ = convert(Tensor{Int64}, step_) - tensor_ = convert(Tensor{Any}, tensor_) - tag_ = convert(Tensor{String}, tag_) - summary_metadata_ = convert(Tensor{String}, summary_metadata_) - (tensor_,) = tf.tf_promote(tensor_) - tf.add_input(desc, writer_) - tf.add_input(desc, step_) - tf.add_input(desc, tensor_) - tf.add_input(desc, tag_) - tf.add_input(desc, summary_metadata_) - end - tf.Tensor(tf.Operation(desc)) - end - function write_summary_eager(writer_, step_, tensor_, tag_, summary_metadata_; name=nothing) - desc = tf.EagerOp("WriteSummary") - writer_ = convert(tf.EagerTensor, writer_) - step_ = convert(tf.EagerTensor, step_) - tensor_ = convert(tf.EagerTensor, tensor_) - tag_ = convert(tf.EagerTensor, tag_) - summary_metadata_ = convert(tf.EagerTensor, summary_metadata_) - tf.add_input(desc, writer_) - tf.add_input(desc, step_) - tf.add_input(desc, tensor_) - tf.add_input(desc, tag_) - tf.add_input(desc, summary_metadata_) - desc["T"] = tf.data_type(tensor_) - res = tf.execute(desc) - node = tf.TapeNode(write_summary, [writer_, step_, tensor_, tag_, summary_metadata_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function write_summary(writer_, step_, tensor_, tag_, summary_metadata_; name=nothing) - if tf.in_eager_mode() - write_summary_eager(writer_, step_, tensor_, tag_, summary_metadata_; name=name) - else - write_summary_graph(writer_, step_, tensor_, tag_, summary_metadata_; name=name) + begin + function write_summary_graph(writer_, step_, tensor_, tag_, summary_metadata_; name=nothing) + local desc + tf.with_op_name(name, "WriteSummary") do + desc = tf.NodeDescription("WriteSummary") + begin + begin + writer_ = convert(Tensor{Any}, writer_) + begin + end + end + begin + step_ = convert(Tensor{Int64}, step_) + begin + end + end + begin + tensor_ = convert(Tensor{Any}, tensor_) + begin + end + end + begin + tag_ = convert(Tensor{String}, tag_) + begin + end + end + begin + summary_metadata_ = convert(Tensor{String}, summary_metadata_) + begin + end + end + begin + (tensor_,) = tf.tf_promote(tensor_) + end + end + begin + begin + tf.add_input(desc, writer_) + end + begin + tf.add_input(desc, step_) + end + begin + tf.add_input(desc, tensor_) + end + begin + tf.add_input(desc, tag_) + end + begin + tf.add_input(desc, summary_metadata_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function write_summary_eager(writer_, step_, tensor_, tag_, summary_metadata_; name=nothing) + desc = tf.EagerOp("WriteSummary") + writer_ = convert(tf.EagerTensor, writer_) + step_ = convert(tf.EagerTensor, step_) + tensor_ = convert(tf.EagerTensor, tensor_) + tag_ = convert(tf.EagerTensor, tag_) + summary_metadata_ = convert(tf.EagerTensor, summary_metadata_) + begin + begin + tf.add_input(desc, writer_) + end + begin + tf.add_input(desc, step_) + end + begin + tf.add_input(desc, tensor_) + end + begin + tf.add_input(desc, tag_) + end + begin + tf.add_input(desc, summary_metadata_) + end + end + begin + end + begin + desc["T"] = tf.data_type(tensor_) + end + res = tf.execute(desc) + node = tf.TapeNode(write_summary, [writer_, step_, tensor_, tag_, summary_metadata_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function write_summary(writer_, step_, tensor_, tag_, summary_metadata_; name=nothing) + if tf.in_eager_mode() + write_summary_eager(writer_, step_, tensor_, tag_, summary_metadata_; name=name) + else + write_summary_graph(writer_, step_, tensor_, tag_, summary_metadata_; name=name) + end end - end + end end @@ -44812,86 +81698,176 @@ end """ begin - function quantized_conv2d_graph(input_, filter_, min_input_, max_input_, min_filter_, max_filter_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) - local desc - tf.with_op_name(name, "QuantizedConv2D") do - desc = tf.NodeDescription("QuantizedConv2D") - input_ = convert(Tensor{Any}, input_) - filter_ = convert(Tensor{Any}, filter_) - min_input_ = convert(Tensor{Float32}, min_input_) - max_input_ = convert(Tensor{Float32}, max_input_) - min_filter_ = convert(Tensor{Float32}, min_filter_) - max_filter_ = convert(Tensor{Float32}, max_filter_) - (filter_,) = tf.tf_promote(filter_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - tf.add_input(desc, filter_) - tf.add_input(desc, min_input_) - tf.add_input(desc, max_input_) - tf.add_input(desc, min_filter_) - tf.add_input(desc, max_filter_) - if out_type !== nothing - desc["out_type"] = Base.identity(out_type) - end - if strides !== nothing - desc["strides"] = map(Base.identity, strides) - end - if padding !== nothing - desc["padding"] = Base.String(padding) - end - if dilations !== nothing - desc["dilations"] = map(Base.identity, dilations) + begin + function quantized_conv2d_graph(input_, filter_, min_input_, max_input_, min_filter_, max_filter_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) + local desc + tf.with_op_name(name, "QuantizedConv2D") do + desc = tf.NodeDescription("QuantizedConv2D") + begin + begin + input_ = convert(Tensor{Any}, input_) + begin + end + end + begin + filter_ = convert(Tensor{Any}, filter_) + begin + end + end + begin + min_input_ = convert(Tensor{Float32}, min_input_) + begin + end + end + begin + max_input_ = convert(Tensor{Float32}, max_input_) + begin + end + end + begin + min_filter_ = convert(Tensor{Float32}, min_filter_) + begin + end + end + begin + max_filter_ = convert(Tensor{Float32}, max_filter_) + begin + end + end + begin + (filter_,) = tf.tf_promote(filter_) + end + begin + (input_,) = tf.tf_promote(input_) + end + end + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, filter_) + end + begin + tf.add_input(desc, min_input_) + end + begin + tf.add_input(desc, max_input_) + end + begin + tf.add_input(desc, min_filter_) + end + begin + tf.add_input(desc, max_filter_) + end + end + begin + begin + if out_type !== nothing + desc["out_type"] = Base.identity(out_type) + end + end + begin + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + end + begin + if padding !== nothing + desc["padding"] = Base.String(padding) + end + end + begin + if dilations !== nothing + desc["dilations"] = map(Base.identity, dilations) + end + end + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + end + end + begin + function quantized_conv2d_eager(input_, filter_, min_input_, max_input_, min_filter_, max_filter_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) + desc = tf.EagerOp("QuantizedConv2D") + input_ = convert(tf.EagerTensor, input_) + filter_ = convert(tf.EagerTensor, filter_) + min_input_ = convert(tf.EagerTensor, min_input_) + max_input_ = convert(tf.EagerTensor, max_input_) + min_filter_ = convert(tf.EagerTensor, min_filter_) + max_filter_ = convert(tf.EagerTensor, max_filter_) + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, filter_) + end + begin + tf.add_input(desc, min_input_) + end + begin + tf.add_input(desc, max_input_) + end + begin + tf.add_input(desc, min_filter_) + end + begin + tf.add_input(desc, max_filter_) + end + end + begin + begin + if out_type !== nothing + desc["out_type"] = Base.identity(out_type) + end + end + begin + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + end + begin + if padding !== nothing + desc["padding"] = Base.String(padding) + end + end + begin + if dilations !== nothing + desc["dilations"] = map(Base.identity, dilations) + end + end + end + begin + desc["Tinput"] = tf.data_type(input_) + end + begin + desc["Tfilter"] = tf.data_type(filter_) + end + res = tf.execute(desc) + node = tf.TapeNode(quantized_conv2d, [input_, filter_, min_input_, max_input_, min_filter_, max_filter_], name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function quantized_conv2d(input_, filter_, min_input_, max_input_, min_filter_, max_filter_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) + if tf.in_eager_mode() + quantized_conv2d_eager(input_, filter_, min_input_, max_input_, min_filter_, max_filter_; name=name, out_type=out_type, strides=strides, padding=padding, dilations=dilations) + else + quantized_conv2d_graph(input_, filter_, min_input_, max_input_, min_filter_, max_filter_; name=name, out_type=out_type, strides=strides, padding=padding, dilations=dilations) + end end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:3 - push!(out, tf.Tensor(op, out_idx)) - end - out - end - function quantized_conv2d_eager(input_, filter_, min_input_, max_input_, min_filter_, max_filter_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) - desc = tf.EagerOp("QuantizedConv2D") - input_ = convert(tf.EagerTensor, input_) - filter_ = convert(tf.EagerTensor, filter_) - min_input_ = convert(tf.EagerTensor, min_input_) - max_input_ = convert(tf.EagerTensor, max_input_) - min_filter_ = convert(tf.EagerTensor, min_filter_) - max_filter_ = convert(tf.EagerTensor, max_filter_) - tf.add_input(desc, input_) - tf.add_input(desc, filter_) - tf.add_input(desc, min_input_) - tf.add_input(desc, max_input_) - tf.add_input(desc, min_filter_) - tf.add_input(desc, max_filter_) - if out_type !== nothing - desc["out_type"] = Base.identity(out_type) - end - if strides !== nothing - desc["strides"] = map(Base.identity, strides) - end - if padding !== nothing - desc["padding"] = Base.String(padding) - end - if dilations !== nothing - desc["dilations"] = map(Base.identity, dilations) - end - desc["Tinput"] = tf.data_type(input_) - desc["Tfilter"] = tf.data_type(filter_) - res = tf.execute(desc) - node = tf.TapeNode(quantized_conv2d, [input_, filter_, min_input_, max_input_, min_filter_, max_filter_], name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function quantized_conv2d(input_, filter_, min_input_, max_input_, min_filter_, max_filter_; name=nothing, out_type=nothing, strides=nothing, padding=nothing, dilations=nothing) - if tf.in_eager_mode() - quantized_conv2d_eager(input_, filter_, min_input_, max_input_, min_filter_, max_filter_; name=name, out_type=out_type, strides=strides, padding=padding, dilations=dilations) - else - quantized_conv2d_graph(input_, filter_, min_input_, max_input_, min_filter_, max_filter_; name=name, out_type=out_type, strides=strides, padding=padding, dilations=dilations) - end - end end @@ -44901,65 +81877,137 @@ end """ begin - function resource_apply_momentum_graph(var_, accum_, lr_, grad_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) - local desc - tf.with_op_name(name, "ResourceApplyMomentum") do - desc = tf.NodeDescription("ResourceApplyMomentum") - var_ = convert(Tensor{Any}, var_) - accum_ = convert(Tensor{Any}, accum_) - lr_ = convert(Tensor{Any}, lr_) - grad_ = convert(Tensor{Any}, grad_) - momentum_ = convert(Tensor{Any}, momentum_) - (lr_, grad_, momentum_) = tf.tf_promote(lr_, grad_, momentum_) - tf.add_input(desc, var_) - tf.add_input(desc, accum_) - tf.add_input(desc, lr_) - tf.add_input(desc, grad_) - tf.add_input(desc, momentum_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) + begin + function resource_apply_momentum_graph(var_, accum_, lr_, grad_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) + local desc + tf.with_op_name(name, "ResourceApplyMomentum") do + desc = tf.NodeDescription("ResourceApplyMomentum") + begin + begin + var_ = convert(Tensor{Any}, var_) + begin + end + end + begin + accum_ = convert(Tensor{Any}, accum_) + begin + end + end + begin + lr_ = convert(Tensor{Any}, lr_) + begin + end + end + begin + grad_ = convert(Tensor{Any}, grad_) + begin + end + end + begin + momentum_ = convert(Tensor{Any}, momentum_) + begin + end + end + begin + (lr_, grad_, momentum_) = tf.tf_promote(lr_, grad_, momentum_) + end + end + begin + begin + tf.add_input(desc, var_) + end + begin + tf.add_input(desc, accum_) + end + begin + tf.add_input(desc, lr_) + end + begin + tf.add_input(desc, grad_) + end + begin + tf.add_input(desc, momentum_) + end + end + begin + begin + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + begin + if use_nesterov !== nothing + desc["use_nesterov"] = Base.Bool(use_nesterov) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function resource_apply_momentum_eager(var_, accum_, lr_, grad_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) + desc = tf.EagerOp("ResourceApplyMomentum") + var_ = convert(tf.EagerTensor, var_) + accum_ = convert(tf.EagerTensor, accum_) + lr_ = convert(tf.EagerTensor, lr_) + grad_ = convert(tf.EagerTensor, grad_) + momentum_ = convert(tf.EagerTensor, momentum_) + begin + begin + tf.add_input(desc, var_) + end + begin + tf.add_input(desc, accum_) + end + begin + tf.add_input(desc, lr_) + end + begin + tf.add_input(desc, grad_) + end + begin + tf.add_input(desc, momentum_) + end + end + begin + begin + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + begin + if use_nesterov !== nothing + desc["use_nesterov"] = Base.Bool(use_nesterov) + end + end + end + begin + desc["T"] = tf.data_type(lr_) + end + begin + desc["T"] = tf.data_type(grad_) + end + begin + desc["T"] = tf.data_type(momentum_) + end + res = tf.execute(desc) + node = tf.TapeNode(resource_apply_momentum, [var_, accum_, lr_, grad_, momentum_], name=nothing, use_locking=nothing, use_nesterov=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_apply_momentum(var_, accum_, lr_, grad_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) + if tf.in_eager_mode() + resource_apply_momentum_eager(var_, accum_, lr_, grad_, momentum_; name=name, use_locking=use_locking, use_nesterov=use_nesterov) + else + resource_apply_momentum_graph(var_, accum_, lr_, grad_, momentum_; name=name, use_locking=use_locking, use_nesterov=use_nesterov) + end end - if use_nesterov !== nothing - desc["use_nesterov"] = Base.Bool(use_nesterov) - end - end - tf.Tensor(tf.Operation(desc)) end - function resource_apply_momentum_eager(var_, accum_, lr_, grad_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) - desc = tf.EagerOp("ResourceApplyMomentum") - var_ = convert(tf.EagerTensor, var_) - accum_ = convert(tf.EagerTensor, accum_) - lr_ = convert(tf.EagerTensor, lr_) - grad_ = convert(tf.EagerTensor, grad_) - momentum_ = convert(tf.EagerTensor, momentum_) - tf.add_input(desc, var_) - tf.add_input(desc, accum_) - tf.add_input(desc, lr_) - tf.add_input(desc, grad_) - tf.add_input(desc, momentum_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - if use_nesterov !== nothing - desc["use_nesterov"] = Base.Bool(use_nesterov) - end - desc["T"] = tf.data_type(lr_) - desc["T"] = tf.data_type(grad_) - desc["T"] = tf.data_type(momentum_) - res = tf.execute(desc) - node = tf.TapeNode(resource_apply_momentum, [var_, accum_, lr_, grad_, momentum_], name=nothing, use_locking=nothing, use_nesterov=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_apply_momentum(var_, accum_, lr_, grad_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) - if tf.in_eager_mode() - resource_apply_momentum_eager(var_, accum_, lr_, grad_, momentum_; name=name, use_locking=use_locking, use_nesterov=use_nesterov) - else - resource_apply_momentum_graph(var_, accum_, lr_, grad_, momentum_; name=name, use_locking=use_locking, use_nesterov=use_nesterov) - end - end end @@ -44969,35 +82017,63 @@ end """ begin - function log1p_graph(x_; name=nothing) - local desc - tf.with_op_name(name, "Log1p") do - desc = tf.NodeDescription("Log1p") - x_ = convert(Tensor{Any}, x_) - (x_,) = tf.tf_promote(x_) - tf.add_input(desc, x_) + begin + function log1p_graph(x_; name=nothing) + local desc + tf.with_op_name(name, "Log1p") do + desc = tf.NodeDescription("Log1p") + begin + begin + x_ = convert(Tensor{Any}, x_) + begin + end + end + begin + (x_,) = tf.tf_promote(x_) + end + end + begin + begin + tf.add_input(desc, x_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function log1p_eager(x_; name=nothing) - desc = tf.EagerOp("Log1p") - x_ = convert(tf.EagerTensor, x_) - tf.add_input(desc, x_) - desc["T"] = tf.data_type(x_) - res = tf.execute(desc) - node = tf.TapeNode(log1p, [x_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function log1p_eager(x_; name=nothing) + desc = tf.EagerOp("Log1p") + x_ = convert(tf.EagerTensor, x_) + begin + begin + tf.add_input(desc, x_) + end + end + begin + end + begin + desc["T"] = tf.data_type(x_) + end + res = tf.execute(desc) + node = tf.TapeNode(log1p, [x_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function log1p(x_; name=nothing) - if tf.in_eager_mode() - log1p_eager(x_; name=name) - else - log1p_graph(x_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function log1p(x_; name=nothing) + if tf.in_eager_mode() + log1p_eager(x_; name=name) + else + log1p_graph(x_; name=name) + end end - end + end end @@ -45007,59 +82083,95 @@ end """ begin - function ordered_map_clear_graph(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) - local desc - tf.with_op_name(name, "OrderedMapClear") do - desc = tf.NodeDescription("OrderedMapClear") - if capacity !== nothing - desc["capacity"] = Base.Int(capacity) - end - if memory_limit !== nothing - desc["memory_limit"] = Base.Int(memory_limit) - end - if dtypes !== nothing - desc["dtypes"] = map(Base.identity, dtypes) - end - if container !== nothing - desc["container"] = Base.String(container) + begin + function ordered_map_clear_graph(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "OrderedMapClear") do + desc = tf.NodeDescription("OrderedMapClear") + begin + end + begin + end + begin + begin + if capacity !== nothing + desc["capacity"] = Base.Int(capacity) + end + end + begin + if memory_limit !== nothing + desc["memory_limit"] = Base.Int(memory_limit) + end + end + begin + if dtypes !== nothing + desc["dtypes"] = map(Base.identity, dtypes) + end + end + begin + if container !== nothing + desc["container"] = Base.String(container) + end + end + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function ordered_map_clear_eager(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + desc = tf.EagerOp("OrderedMapClear") + begin + end + begin + begin + if capacity !== nothing + desc["capacity"] = Base.Int(capacity) + end + end + begin + if memory_limit !== nothing + desc["memory_limit"] = Base.Int(memory_limit) + end + end + begin + if dtypes !== nothing + desc["dtypes"] = map(Base.identity, dtypes) + end + end + begin + if container !== nothing + desc["container"] = Base.String(container) + end + end + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(ordered_map_clear, [], name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function ordered_map_clear(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + if tf.in_eager_mode() + ordered_map_clear_eager(; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) + else + ordered_map_clear_graph(; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) + end end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - end - tf.Tensor(tf.Operation(desc)) - end - function ordered_map_clear_eager(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) - desc = tf.EagerOp("OrderedMapClear") - if capacity !== nothing - desc["capacity"] = Base.Int(capacity) - end - if memory_limit !== nothing - desc["memory_limit"] = Base.Int(memory_limit) - end - if dtypes !== nothing - desc["dtypes"] = map(Base.identity, dtypes) - end - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - res = tf.execute(desc) - node = tf.TapeNode(ordered_map_clear, [], name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function ordered_map_clear(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) - if tf.in_eager_mode() - ordered_map_clear_eager(; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) - else - ordered_map_clear_graph(; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) - end - end end @@ -45069,52 +82181,104 @@ end """ begin - function resource_scatter_update_graph(resource_, indices_, updates_; name=nothing, dtype=nothing) - local desc - tf.with_op_name(name, "ResourceScatterUpdate") do - desc = tf.NodeDescription("ResourceScatterUpdate") - resource_ = convert(Tensor{Any}, resource_) - indices_ = convert(Tensor{Any}, indices_) - indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) - updates_ = convert(Tensor{Any}, updates_) - (updates_,) = tf.tf_promote(updates_) - (indices_,) = tf.tf_promote(indices_) - tf.add_input(desc, resource_) - tf.add_input(desc, indices_) - tf.add_input(desc, updates_) - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) + begin + function resource_scatter_update_graph(resource_, indices_, updates_; name=nothing, dtype=nothing) + local desc + tf.with_op_name(name, "ResourceScatterUpdate") do + desc = tf.NodeDescription("ResourceScatterUpdate") + begin + begin + resource_ = convert(Tensor{Any}, resource_) + begin + end + end + begin + indices_ = convert(Tensor{Any}, indices_) + begin + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + end + end + begin + updates_ = convert(Tensor{Any}, updates_) + begin + end + end + begin + (updates_,) = tf.tf_promote(updates_) + end + begin + (indices_,) = tf.tf_promote(indices_) + end + end + begin + begin + tf.add_input(desc, resource_) + end + begin + tf.add_input(desc, indices_) + end + begin + tf.add_input(desc, updates_) + end + end + begin + begin + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function resource_scatter_update_eager(resource_, indices_, updates_; name=nothing, dtype=nothing) + desc = tf.EagerOp("ResourceScatterUpdate") + resource_ = convert(tf.EagerTensor, resource_) + indices_ = convert(tf.EagerTensor, indices_) + updates_ = convert(tf.EagerTensor, updates_) + begin + begin + tf.add_input(desc, resource_) + end + begin + tf.add_input(desc, indices_) + end + begin + tf.add_input(desc, updates_) + end + end + begin + begin + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + end + begin + desc["Tindices"] = tf.data_type(indices_) + end + begin + desc["dtype"] = tf.data_type(updates_) + end + res = tf.execute(desc) + node = tf.TapeNode(resource_scatter_update, [resource_, indices_, updates_], name=nothing, dtype=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_scatter_update(resource_, indices_, updates_; name=nothing, dtype=nothing) + if tf.in_eager_mode() + resource_scatter_update_eager(resource_, indices_, updates_; name=name, dtype=dtype) + else + resource_scatter_update_graph(resource_, indices_, updates_; name=name, dtype=dtype) + end end - end - tf.Tensor(tf.Operation(desc)) end - function resource_scatter_update_eager(resource_, indices_, updates_; name=nothing, dtype=nothing) - desc = tf.EagerOp("ResourceScatterUpdate") - resource_ = convert(tf.EagerTensor, resource_) - indices_ = convert(tf.EagerTensor, indices_) - updates_ = convert(tf.EagerTensor, updates_) - tf.add_input(desc, resource_) - tf.add_input(desc, indices_) - tf.add_input(desc, updates_) - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end - desc["Tindices"] = tf.data_type(indices_) - desc["dtype"] = tf.data_type(updates_) - res = tf.execute(desc) - node = tf.TapeNode(resource_scatter_update, [resource_, indices_, updates_], name=nothing, dtype=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_scatter_update(resource_, indices_, updates_; name=nothing, dtype=nothing) - if tf.in_eager_mode() - resource_scatter_update_eager(resource_, indices_, updates_; name=name, dtype=dtype) - else - resource_scatter_update_graph(resource_, indices_, updates_; name=name, dtype=dtype) - end - end end @@ -45124,66 +82288,116 @@ end """ begin - function barrier_take_many_graph(handle_, num_elements_; name=nothing, component_types=nothing, allow_small_batch=nothing, wait_for_incomplete=nothing, timeout_ms=nothing) - local desc - tf.with_op_name(name, "BarrierTakeMany") do - desc = tf.NodeDescription("BarrierTakeMany") - handle_ = convert(Tensor{String}, handle_) - num_elements_ = convert(Tensor{Int32}, num_elements_) - tf.add_input(desc, handle_) - tf.add_input(desc, num_elements_) - if component_types !== nothing - desc["component_types"] = map(Base.identity, component_types) - end - if allow_small_batch !== nothing - desc["allow_small_batch"] = Base.Bool(allow_small_batch) - end - if wait_for_incomplete !== nothing - desc["wait_for_incomplete"] = Base.Bool(wait_for_incomplete) - end - if timeout_ms !== nothing - desc["timeout_ms"] = Base.Int(timeout_ms) + begin + function barrier_take_many_graph(handle_, num_elements_; name=nothing, component_types=nothing, allow_small_batch=nothing, wait_for_incomplete=nothing, timeout_ms=nothing) + local desc + tf.with_op_name(name, "BarrierTakeMany") do + desc = tf.NodeDescription("BarrierTakeMany") + begin + begin + handle_ = convert(Tensor{String}, handle_) + begin + end + end + begin + num_elements_ = convert(Tensor{Int32}, num_elements_) + begin + end + end + end + begin + begin + tf.add_input(desc, handle_) + end + begin + tf.add_input(desc, num_elements_) + end + end + begin + begin + if component_types !== nothing + desc["component_types"] = map(Base.identity, component_types) + end + end + begin + if allow_small_batch !== nothing + desc["allow_small_batch"] = Base.Bool(allow_small_batch) + end + end + begin + if wait_for_incomplete !== nothing + desc["wait_for_incomplete"] = Base.Bool(wait_for_incomplete) + end + end + begin + if timeout_ms !== nothing + desc["timeout_ms"] = Base.Int(timeout_ms) + end + end + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + end + end + begin + function barrier_take_many_eager(handle_, num_elements_; name=nothing, component_types=nothing, allow_small_batch=nothing, wait_for_incomplete=nothing, timeout_ms=nothing) + desc = tf.EagerOp("BarrierTakeMany") + handle_ = convert(tf.EagerTensor, handle_) + num_elements_ = convert(tf.EagerTensor, num_elements_) + begin + begin + tf.add_input(desc, handle_) + end + begin + tf.add_input(desc, num_elements_) + end + end + begin + begin + if component_types !== nothing + desc["component_types"] = map(Base.identity, component_types) + end + end + begin + if allow_small_batch !== nothing + desc["allow_small_batch"] = Base.Bool(allow_small_batch) + end + end + begin + if wait_for_incomplete !== nothing + desc["wait_for_incomplete"] = Base.Bool(wait_for_incomplete) + end + end + begin + if timeout_ms !== nothing + desc["timeout_ms"] = Base.Int(timeout_ms) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(barrier_take_many, [handle_, num_elements_], name=nothing, component_types=nothing, allow_small_batch=nothing, wait_for_incomplete=nothing, timeout_ms=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function barrier_take_many(handle_, num_elements_; name=nothing, component_types=nothing, allow_small_batch=nothing, wait_for_incomplete=nothing, timeout_ms=nothing) + if tf.in_eager_mode() + barrier_take_many_eager(handle_, num_elements_; name=name, component_types=component_types, allow_small_batch=allow_small_batch, wait_for_incomplete=wait_for_incomplete, timeout_ms=timeout_ms) + else + barrier_take_many_graph(handle_, num_elements_; name=name, component_types=component_types, allow_small_batch=allow_small_batch, wait_for_incomplete=wait_for_incomplete, timeout_ms=timeout_ms) + end end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:3 - push!(out, tf.Tensor(op, out_idx)) - end - out - end - function barrier_take_many_eager(handle_, num_elements_; name=nothing, component_types=nothing, allow_small_batch=nothing, wait_for_incomplete=nothing, timeout_ms=nothing) - desc = tf.EagerOp("BarrierTakeMany") - handle_ = convert(tf.EagerTensor, handle_) - num_elements_ = convert(tf.EagerTensor, num_elements_) - tf.add_input(desc, handle_) - tf.add_input(desc, num_elements_) - if component_types !== nothing - desc["component_types"] = map(Base.identity, component_types) - end - if allow_small_batch !== nothing - desc["allow_small_batch"] = Base.Bool(allow_small_batch) - end - if wait_for_incomplete !== nothing - desc["wait_for_incomplete"] = Base.Bool(wait_for_incomplete) - end - if timeout_ms !== nothing - desc["timeout_ms"] = Base.Int(timeout_ms) - end - res = tf.execute(desc) - node = tf.TapeNode(barrier_take_many, [handle_, num_elements_], name=nothing, component_types=nothing, allow_small_batch=nothing, wait_for_incomplete=nothing, timeout_ms=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function barrier_take_many(handle_, num_elements_; name=nothing, component_types=nothing, allow_small_batch=nothing, wait_for_incomplete=nothing, timeout_ms=nothing) - if tf.in_eager_mode() - barrier_take_many_eager(handle_, num_elements_; name=name, component_types=component_types, allow_small_batch=allow_small_batch, wait_for_incomplete=wait_for_incomplete, timeout_ms=timeout_ms) - else - barrier_take_many_graph(handle_, num_elements_; name=name, component_types=component_types, allow_small_batch=allow_small_batch, wait_for_incomplete=wait_for_incomplete, timeout_ms=timeout_ms) - end - end end @@ -45193,65 +82407,137 @@ end """ begin - function resource_apply_keras_momentum_graph(var_, accum_, lr_, grad_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) - local desc - tf.with_op_name(name, "ResourceApplyKerasMomentum") do - desc = tf.NodeDescription("ResourceApplyKerasMomentum") - var_ = convert(Tensor{Any}, var_) - accum_ = convert(Tensor{Any}, accum_) - lr_ = convert(Tensor{Any}, lr_) - grad_ = convert(Tensor{Any}, grad_) - momentum_ = convert(Tensor{Any}, momentum_) - (lr_, grad_, momentum_) = tf.tf_promote(lr_, grad_, momentum_) - tf.add_input(desc, var_) - tf.add_input(desc, accum_) - tf.add_input(desc, lr_) - tf.add_input(desc, grad_) - tf.add_input(desc, momentum_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) + begin + function resource_apply_keras_momentum_graph(var_, accum_, lr_, grad_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) + local desc + tf.with_op_name(name, "ResourceApplyKerasMomentum") do + desc = tf.NodeDescription("ResourceApplyKerasMomentum") + begin + begin + var_ = convert(Tensor{Any}, var_) + begin + end + end + begin + accum_ = convert(Tensor{Any}, accum_) + begin + end + end + begin + lr_ = convert(Tensor{Any}, lr_) + begin + end + end + begin + grad_ = convert(Tensor{Any}, grad_) + begin + end + end + begin + momentum_ = convert(Tensor{Any}, momentum_) + begin + end + end + begin + (lr_, grad_, momentum_) = tf.tf_promote(lr_, grad_, momentum_) + end + end + begin + begin + tf.add_input(desc, var_) + end + begin + tf.add_input(desc, accum_) + end + begin + tf.add_input(desc, lr_) + end + begin + tf.add_input(desc, grad_) + end + begin + tf.add_input(desc, momentum_) + end + end + begin + begin + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + begin + if use_nesterov !== nothing + desc["use_nesterov"] = Base.Bool(use_nesterov) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function resource_apply_keras_momentum_eager(var_, accum_, lr_, grad_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) + desc = tf.EagerOp("ResourceApplyKerasMomentum") + var_ = convert(tf.EagerTensor, var_) + accum_ = convert(tf.EagerTensor, accum_) + lr_ = convert(tf.EagerTensor, lr_) + grad_ = convert(tf.EagerTensor, grad_) + momentum_ = convert(tf.EagerTensor, momentum_) + begin + begin + tf.add_input(desc, var_) + end + begin + tf.add_input(desc, accum_) + end + begin + tf.add_input(desc, lr_) + end + begin + tf.add_input(desc, grad_) + end + begin + tf.add_input(desc, momentum_) + end + end + begin + begin + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + begin + if use_nesterov !== nothing + desc["use_nesterov"] = Base.Bool(use_nesterov) + end + end + end + begin + desc["T"] = tf.data_type(lr_) + end + begin + desc["T"] = tf.data_type(grad_) + end + begin + desc["T"] = tf.data_type(momentum_) + end + res = tf.execute(desc) + node = tf.TapeNode(resource_apply_keras_momentum, [var_, accum_, lr_, grad_, momentum_], name=nothing, use_locking=nothing, use_nesterov=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_apply_keras_momentum(var_, accum_, lr_, grad_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) + if tf.in_eager_mode() + resource_apply_keras_momentum_eager(var_, accum_, lr_, grad_, momentum_; name=name, use_locking=use_locking, use_nesterov=use_nesterov) + else + resource_apply_keras_momentum_graph(var_, accum_, lr_, grad_, momentum_; name=name, use_locking=use_locking, use_nesterov=use_nesterov) + end end - if use_nesterov !== nothing - desc["use_nesterov"] = Base.Bool(use_nesterov) - end - end - tf.Tensor(tf.Operation(desc)) end - function resource_apply_keras_momentum_eager(var_, accum_, lr_, grad_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) - desc = tf.EagerOp("ResourceApplyKerasMomentum") - var_ = convert(tf.EagerTensor, var_) - accum_ = convert(tf.EagerTensor, accum_) - lr_ = convert(tf.EagerTensor, lr_) - grad_ = convert(tf.EagerTensor, grad_) - momentum_ = convert(tf.EagerTensor, momentum_) - tf.add_input(desc, var_) - tf.add_input(desc, accum_) - tf.add_input(desc, lr_) - tf.add_input(desc, grad_) - tf.add_input(desc, momentum_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - if use_nesterov !== nothing - desc["use_nesterov"] = Base.Bool(use_nesterov) - end - desc["T"] = tf.data_type(lr_) - desc["T"] = tf.data_type(grad_) - desc["T"] = tf.data_type(momentum_) - res = tf.execute(desc) - node = tf.TapeNode(resource_apply_keras_momentum, [var_, accum_, lr_, grad_, momentum_], name=nothing, use_locking=nothing, use_nesterov=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_apply_keras_momentum(var_, accum_, lr_, grad_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) - if tf.in_eager_mode() - resource_apply_keras_momentum_eager(var_, accum_, lr_, grad_, momentum_; name=name, use_locking=use_locking, use_nesterov=use_nesterov) - else - resource_apply_keras_momentum_graph(var_, accum_, lr_, grad_, momentum_; name=name, use_locking=use_locking, use_nesterov=use_nesterov) - end - end end @@ -45261,71 +82547,115 @@ end Generates serialized partition messages suitable for batch reads. """ begin - function generate_big_query_reader_partitions_graph(; name=nothing, project_id=nothing, dataset_id=nothing, table_id=nothing, columns=nothing, timestamp_millis=nothing, num_partitions=nothing, test_end_point=nothing) - local desc - tf.with_op_name(name, "GenerateBigQueryReaderPartitions") do - desc = tf.NodeDescription("GenerateBigQueryReaderPartitions") - if project_id !== nothing - desc["project_id"] = Base.String(project_id) - end - if dataset_id !== nothing - desc["dataset_id"] = Base.String(dataset_id) - end - if table_id !== nothing - desc["table_id"] = Base.String(table_id) + begin + function generate_big_query_reader_partitions_graph(; name=nothing, project_id=nothing, dataset_id=nothing, table_id=nothing, columns=nothing, timestamp_millis=nothing, num_partitions=nothing, test_end_point=nothing) + local desc + tf.with_op_name(name, "GenerateBigQueryReaderPartitions") do + desc = tf.NodeDescription("GenerateBigQueryReaderPartitions") + begin + end + begin + end + begin + begin + if project_id !== nothing + desc["project_id"] = Base.String(project_id) + end + end + begin + if dataset_id !== nothing + desc["dataset_id"] = Base.String(dataset_id) + end + end + begin + if table_id !== nothing + desc["table_id"] = Base.String(table_id) + end + end + begin + if columns !== nothing + desc["columns"] = map(Base.identity, columns) + end + end + begin + if timestamp_millis !== nothing + desc["timestamp_millis"] = Base.Int(timestamp_millis) + end + end + begin + if num_partitions !== nothing + desc["num_partitions"] = Base.Int(num_partitions) + end + end + begin + if test_end_point !== nothing + desc["test_end_point"] = Base.String(test_end_point) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function generate_big_query_reader_partitions_eager(; name=nothing, project_id=nothing, dataset_id=nothing, table_id=nothing, columns=nothing, timestamp_millis=nothing, num_partitions=nothing, test_end_point=nothing) + desc = tf.EagerOp("GenerateBigQueryReaderPartitions") + begin + end + begin + begin + if project_id !== nothing + desc["project_id"] = Base.String(project_id) + end + end + begin + if dataset_id !== nothing + desc["dataset_id"] = Base.String(dataset_id) + end + end + begin + if table_id !== nothing + desc["table_id"] = Base.String(table_id) + end + end + begin + if columns !== nothing + desc["columns"] = map(Base.identity, columns) + end + end + begin + if timestamp_millis !== nothing + desc["timestamp_millis"] = Base.Int(timestamp_millis) + end + end + begin + if num_partitions !== nothing + desc["num_partitions"] = Base.Int(num_partitions) + end + end + begin + if test_end_point !== nothing + desc["test_end_point"] = Base.String(test_end_point) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(generate_big_query_reader_partitions, [], name=nothing, project_id=nothing, dataset_id=nothing, table_id=nothing, columns=nothing, timestamp_millis=nothing, num_partitions=nothing, test_end_point=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function generate_big_query_reader_partitions(; name=nothing, project_id=nothing, dataset_id=nothing, table_id=nothing, columns=nothing, timestamp_millis=nothing, num_partitions=nothing, test_end_point=nothing) + if tf.in_eager_mode() + generate_big_query_reader_partitions_eager(; name=name, project_id=project_id, dataset_id=dataset_id, table_id=table_id, columns=columns, timestamp_millis=timestamp_millis, num_partitions=num_partitions, test_end_point=test_end_point) + else + generate_big_query_reader_partitions_graph(; name=name, project_id=project_id, dataset_id=dataset_id, table_id=table_id, columns=columns, timestamp_millis=timestamp_millis, num_partitions=num_partitions, test_end_point=test_end_point) + end end - if columns !== nothing - desc["columns"] = map(Base.identity, columns) - end - if timestamp_millis !== nothing - desc["timestamp_millis"] = Base.Int(timestamp_millis) - end - if num_partitions !== nothing - desc["num_partitions"] = Base.Int(num_partitions) - end - if test_end_point !== nothing - desc["test_end_point"] = Base.String(test_end_point) - end - end - tf.Tensor(tf.Operation(desc)) - end - function generate_big_query_reader_partitions_eager(; name=nothing, project_id=nothing, dataset_id=nothing, table_id=nothing, columns=nothing, timestamp_millis=nothing, num_partitions=nothing, test_end_point=nothing) - desc = tf.EagerOp("GenerateBigQueryReaderPartitions") - if project_id !== nothing - desc["project_id"] = Base.String(project_id) - end - if dataset_id !== nothing - desc["dataset_id"] = Base.String(dataset_id) - end - if table_id !== nothing - desc["table_id"] = Base.String(table_id) - end - if columns !== nothing - desc["columns"] = map(Base.identity, columns) - end - if timestamp_millis !== nothing - desc["timestamp_millis"] = Base.Int(timestamp_millis) - end - if num_partitions !== nothing - desc["num_partitions"] = Base.Int(num_partitions) - end - if test_end_point !== nothing - desc["test_end_point"] = Base.String(test_end_point) - end - res = tf.execute(desc) - node = tf.TapeNode(generate_big_query_reader_partitions, [], name=nothing, project_id=nothing, dataset_id=nothing, table_id=nothing, columns=nothing, timestamp_millis=nothing, num_partitions=nothing, test_end_point=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function generate_big_query_reader_partitions(; name=nothing, project_id=nothing, dataset_id=nothing, table_id=nothing, columns=nothing, timestamp_millis=nothing, num_partitions=nothing, test_end_point=nothing) - if tf.in_eager_mode() - generate_big_query_reader_partitions_eager(; name=name, project_id=project_id, dataset_id=dataset_id, table_id=table_id, columns=columns, timestamp_millis=timestamp_millis, num_partitions=num_partitions, test_end_point=test_end_point) - else - generate_big_query_reader_partitions_graph(; name=name, project_id=project_id, dataset_id=dataset_id, table_id=table_id, columns=columns, timestamp_millis=timestamp_millis, num_partitions=num_partitions, test_end_point=test_end_point) - end - end end @@ -45335,51 +82665,87 @@ end A placeholder op for multiple values that will be sent to TensorFlow from a """ begin - function _xla_recv_at_host_graph(dynamic_key_; name=nothing, Toutputs=nothing, key=nothing, device_ordinal=nothing) - local desc - tf.with_op_name(name, "_XlaRecvAtHost") do - desc = tf.NodeDescription("_XlaRecvAtHost") - dynamic_key_ = convert(Tensor{String}, dynamic_key_) - tf.add_input(desc, dynamic_key_) - if Toutputs !== nothing - desc["Toutputs"] = map(Base.identity, Toutputs) + begin + function _xla_recv_at_host_graph(dynamic_key_; name=nothing, Toutputs=nothing, key=nothing, device_ordinal=nothing) + local desc + tf.with_op_name(name, "_XlaRecvAtHost") do + desc = tf.NodeDescription("_XlaRecvAtHost") + begin + begin + dynamic_key_ = convert(Tensor{String}, dynamic_key_) + begin + end + end + end + begin + begin + tf.add_input(desc, dynamic_key_) + end + end + begin + begin + if Toutputs !== nothing + desc["Toutputs"] = map(Base.identity, Toutputs) + end + end + begin + if key !== nothing + desc["key"] = Base.String(key) + end + end + begin + if device_ordinal !== nothing + desc["device_ordinal"] = Base.Int(device_ordinal) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function _xla_recv_at_host_eager(dynamic_key_; name=nothing, Toutputs=nothing, key=nothing, device_ordinal=nothing) + desc = tf.EagerOp("_XlaRecvAtHost") + dynamic_key_ = convert(tf.EagerTensor, dynamic_key_) + begin + begin + tf.add_input(desc, dynamic_key_) + end + end + begin + begin + if Toutputs !== nothing + desc["Toutputs"] = map(Base.identity, Toutputs) + end + end + begin + if key !== nothing + desc["key"] = Base.String(key) + end + end + begin + if device_ordinal !== nothing + desc["device_ordinal"] = Base.Int(device_ordinal) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(_xla_recv_at_host, [dynamic_key_], name=nothing, Toutputs=nothing, key=nothing, device_ordinal=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _xla_recv_at_host(dynamic_key_; name=nothing, Toutputs=nothing, key=nothing, device_ordinal=nothing) + if tf.in_eager_mode() + _xla_recv_at_host_eager(dynamic_key_; name=name, Toutputs=Toutputs, key=key, device_ordinal=device_ordinal) + else + _xla_recv_at_host_graph(dynamic_key_; name=name, Toutputs=Toutputs, key=key, device_ordinal=device_ordinal) + end end - if key !== nothing - desc["key"] = Base.String(key) - end - if device_ordinal !== nothing - desc["device_ordinal"] = Base.Int(device_ordinal) - end - end - tf.Tensor(tf.Operation(desc)) - end - function _xla_recv_at_host_eager(dynamic_key_; name=nothing, Toutputs=nothing, key=nothing, device_ordinal=nothing) - desc = tf.EagerOp("_XlaRecvAtHost") - dynamic_key_ = convert(tf.EagerTensor, dynamic_key_) - tf.add_input(desc, dynamic_key_) - if Toutputs !== nothing - desc["Toutputs"] = map(Base.identity, Toutputs) - end - if key !== nothing - desc["key"] = Base.String(key) - end - if device_ordinal !== nothing - desc["device_ordinal"] = Base.Int(device_ordinal) - end - res = tf.execute(desc) - node = tf.TapeNode(_xla_recv_at_host, [dynamic_key_], name=nothing, Toutputs=nothing, key=nothing, device_ordinal=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _xla_recv_at_host(dynamic_key_; name=nothing, Toutputs=nothing, key=nothing, device_ordinal=nothing) - if tf.in_eager_mode() - _xla_recv_at_host_eager(dynamic_key_; name=name, Toutputs=Toutputs, key=key, device_ordinal=device_ordinal) - else - _xla_recv_at_host_graph(dynamic_key_; name=name, Toutputs=Toutputs, key=key, device_ordinal=device_ordinal) - end - end end @@ -45389,66 +82755,124 @@ end """ begin - function quantized_avg_pool_graph(input_, min_input_, max_input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing) - local desc - tf.with_op_name(name, "QuantizedAvgPool") do - desc = tf.NodeDescription("QuantizedAvgPool") - input_ = convert(Tensor{Any}, input_) - min_input_ = convert(Tensor{Float32}, min_input_) - max_input_ = convert(Tensor{Float32}, max_input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - tf.add_input(desc, min_input_) - tf.add_input(desc, max_input_) - if ksize !== nothing - desc["ksize"] = map(Base.identity, ksize) - end - if strides !== nothing - desc["strides"] = map(Base.identity, strides) - end - if padding !== nothing - desc["padding"] = Base.String(padding) + begin + function quantized_avg_pool_graph(input_, min_input_, max_input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing) + local desc + tf.with_op_name(name, "QuantizedAvgPool") do + desc = tf.NodeDescription("QuantizedAvgPool") + begin + begin + input_ = convert(Tensor{Any}, input_) + begin + end + end + begin + min_input_ = convert(Tensor{Float32}, min_input_) + begin + end + end + begin + max_input_ = convert(Tensor{Float32}, max_input_) + begin + end + end + begin + (input_,) = tf.tf_promote(input_) + end + end + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, min_input_) + end + begin + tf.add_input(desc, max_input_) + end + end + begin + begin + if ksize !== nothing + desc["ksize"] = map(Base.identity, ksize) + end + end + begin + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + end + begin + if padding !== nothing + desc["padding"] = Base.String(padding) + end + end + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + end + end + begin + function quantized_avg_pool_eager(input_, min_input_, max_input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing) + desc = tf.EagerOp("QuantizedAvgPool") + input_ = convert(tf.EagerTensor, input_) + min_input_ = convert(tf.EagerTensor, min_input_) + max_input_ = convert(tf.EagerTensor, max_input_) + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, min_input_) + end + begin + tf.add_input(desc, max_input_) + end + end + begin + begin + if ksize !== nothing + desc["ksize"] = map(Base.identity, ksize) + end + end + begin + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + end + begin + if padding !== nothing + desc["padding"] = Base.String(padding) + end + end + end + begin + desc["T"] = tf.data_type(input_) + end + res = tf.execute(desc) + node = tf.TapeNode(quantized_avg_pool, [input_, min_input_, max_input_], name=nothing, ksize=nothing, strides=nothing, padding=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function quantized_avg_pool(input_, min_input_, max_input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing) + if tf.in_eager_mode() + quantized_avg_pool_eager(input_, min_input_, max_input_; name=name, ksize=ksize, strides=strides, padding=padding) + else + quantized_avg_pool_graph(input_, min_input_, max_input_; name=name, ksize=ksize, strides=strides, padding=padding) + end end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:3 - push!(out, tf.Tensor(op, out_idx)) - end - out - end - function quantized_avg_pool_eager(input_, min_input_, max_input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing) - desc = tf.EagerOp("QuantizedAvgPool") - input_ = convert(tf.EagerTensor, input_) - min_input_ = convert(tf.EagerTensor, min_input_) - max_input_ = convert(tf.EagerTensor, max_input_) - tf.add_input(desc, input_) - tf.add_input(desc, min_input_) - tf.add_input(desc, max_input_) - if ksize !== nothing - desc["ksize"] = map(Base.identity, ksize) - end - if strides !== nothing - desc["strides"] = map(Base.identity, strides) - end - if padding !== nothing - desc["padding"] = Base.String(padding) - end - desc["T"] = tf.data_type(input_) - res = tf.execute(desc) - node = tf.TapeNode(quantized_avg_pool, [input_, min_input_, max_input_], name=nothing, ksize=nothing, strides=nothing, padding=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function quantized_avg_pool(input_, min_input_, max_input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing) - if tf.in_eager_mode() - quantized_avg_pool_eager(input_, min_input_, max_input_; name=name, ksize=ksize, strides=strides, padding=padding) - else - quantized_avg_pool_graph(input_, min_input_, max_input_; name=name, ksize=ksize, strides=strides, padding=padding) - end - end end @@ -45458,87 +82882,211 @@ end """ begin - function resource_apply_adam_with_amsgrad_graph(var_, m_, v_, vhat_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing) - local desc - tf.with_op_name(name, "ResourceApplyAdamWithAmsgrad") do - desc = tf.NodeDescription("ResourceApplyAdamWithAmsgrad") - var_ = convert(Tensor{Any}, var_) - m_ = convert(Tensor{Any}, m_) - v_ = convert(Tensor{Any}, v_) - vhat_ = convert(Tensor{Any}, vhat_) - beta1_power_ = convert(Tensor{Any}, beta1_power_) - beta2_power_ = convert(Tensor{Any}, beta2_power_) - lr_ = convert(Tensor{Any}, lr_) - beta1_ = convert(Tensor{Any}, beta1_) - beta2_ = convert(Tensor{Any}, beta2_) - epsilon_ = convert(Tensor{Any}, epsilon_) - grad_ = convert(Tensor{Any}, grad_) - (beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_) = tf.tf_promote(beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_) - tf.add_input(desc, var_) - tf.add_input(desc, m_) - tf.add_input(desc, v_) - tf.add_input(desc, vhat_) - tf.add_input(desc, beta1_power_) - tf.add_input(desc, beta2_power_) - tf.add_input(desc, lr_) - tf.add_input(desc, beta1_) - tf.add_input(desc, beta2_) - tf.add_input(desc, epsilon_) - tf.add_input(desc, grad_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - end - tf.Tensor(tf.Operation(desc)) - end - function resource_apply_adam_with_amsgrad_eager(var_, m_, v_, vhat_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing) - desc = tf.EagerOp("ResourceApplyAdamWithAmsgrad") - var_ = convert(tf.EagerTensor, var_) - m_ = convert(tf.EagerTensor, m_) - v_ = convert(tf.EagerTensor, v_) - vhat_ = convert(tf.EagerTensor, vhat_) - beta1_power_ = convert(tf.EagerTensor, beta1_power_) - beta2_power_ = convert(tf.EagerTensor, beta2_power_) - lr_ = convert(tf.EagerTensor, lr_) - beta1_ = convert(tf.EagerTensor, beta1_) - beta2_ = convert(tf.EagerTensor, beta2_) - epsilon_ = convert(tf.EagerTensor, epsilon_) - grad_ = convert(tf.EagerTensor, grad_) - tf.add_input(desc, var_) - tf.add_input(desc, m_) - tf.add_input(desc, v_) - tf.add_input(desc, vhat_) - tf.add_input(desc, beta1_power_) - tf.add_input(desc, beta2_power_) - tf.add_input(desc, lr_) - tf.add_input(desc, beta1_) - tf.add_input(desc, beta2_) - tf.add_input(desc, epsilon_) - tf.add_input(desc, grad_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - desc["T"] = tf.data_type(beta1_power_) - desc["T"] = tf.data_type(beta2_power_) - desc["T"] = tf.data_type(lr_) - desc["T"] = tf.data_type(beta1_) - desc["T"] = tf.data_type(beta2_) - desc["T"] = tf.data_type(epsilon_) - desc["T"] = tf.data_type(grad_) - res = tf.execute(desc) - node = tf.TapeNode(resource_apply_adam_with_amsgrad, [var_, m_, v_, vhat_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_], name=nothing, use_locking=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_apply_adam_with_amsgrad(var_, m_, v_, vhat_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing) - if tf.in_eager_mode() - resource_apply_adam_with_amsgrad_eager(var_, m_, v_, vhat_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=name, use_locking=use_locking) - else - resource_apply_adam_with_amsgrad_graph(var_, m_, v_, vhat_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=name, use_locking=use_locking) + begin + function resource_apply_adam_with_amsgrad_graph(var_, m_, v_, vhat_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "ResourceApplyAdamWithAmsgrad") do + desc = tf.NodeDescription("ResourceApplyAdamWithAmsgrad") + begin + begin + var_ = convert(Tensor{Any}, var_) + begin + end + end + begin + m_ = convert(Tensor{Any}, m_) + begin + end + end + begin + v_ = convert(Tensor{Any}, v_) + begin + end + end + begin + vhat_ = convert(Tensor{Any}, vhat_) + begin + end + end + begin + beta1_power_ = convert(Tensor{Any}, beta1_power_) + begin + end + end + begin + beta2_power_ = convert(Tensor{Any}, beta2_power_) + begin + end + end + begin + lr_ = convert(Tensor{Any}, lr_) + begin + end + end + begin + beta1_ = convert(Tensor{Any}, beta1_) + begin + end + end + begin + beta2_ = convert(Tensor{Any}, beta2_) + begin + end + end + begin + epsilon_ = convert(Tensor{Any}, epsilon_) + begin + end + end + begin + grad_ = convert(Tensor{Any}, grad_) + begin + end + end + begin + (beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_) = tf.tf_promote(beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_) + end + end + begin + begin + tf.add_input(desc, var_) + end + begin + tf.add_input(desc, m_) + end + begin + tf.add_input(desc, v_) + end + begin + tf.add_input(desc, vhat_) + end + begin + tf.add_input(desc, beta1_power_) + end + begin + tf.add_input(desc, beta2_power_) + end + begin + tf.add_input(desc, lr_) + end + begin + tf.add_input(desc, beta1_) + end + begin + tf.add_input(desc, beta2_) + end + begin + tf.add_input(desc, epsilon_) + end + begin + tf.add_input(desc, grad_) + end + end + begin + begin + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function resource_apply_adam_with_amsgrad_eager(var_, m_, v_, vhat_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing) + desc = tf.EagerOp("ResourceApplyAdamWithAmsgrad") + var_ = convert(tf.EagerTensor, var_) + m_ = convert(tf.EagerTensor, m_) + v_ = convert(tf.EagerTensor, v_) + vhat_ = convert(tf.EagerTensor, vhat_) + beta1_power_ = convert(tf.EagerTensor, beta1_power_) + beta2_power_ = convert(tf.EagerTensor, beta2_power_) + lr_ = convert(tf.EagerTensor, lr_) + beta1_ = convert(tf.EagerTensor, beta1_) + beta2_ = convert(tf.EagerTensor, beta2_) + epsilon_ = convert(tf.EagerTensor, epsilon_) + grad_ = convert(tf.EagerTensor, grad_) + begin + begin + tf.add_input(desc, var_) + end + begin + tf.add_input(desc, m_) + end + begin + tf.add_input(desc, v_) + end + begin + tf.add_input(desc, vhat_) + end + begin + tf.add_input(desc, beta1_power_) + end + begin + tf.add_input(desc, beta2_power_) + end + begin + tf.add_input(desc, lr_) + end + begin + tf.add_input(desc, beta1_) + end + begin + tf.add_input(desc, beta2_) + end + begin + tf.add_input(desc, epsilon_) + end + begin + tf.add_input(desc, grad_) + end + end + begin + begin + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + end + begin + desc["T"] = tf.data_type(beta1_power_) + end + begin + desc["T"] = tf.data_type(beta2_power_) + end + begin + desc["T"] = tf.data_type(lr_) + end + begin + desc["T"] = tf.data_type(beta1_) + end + begin + desc["T"] = tf.data_type(beta2_) + end + begin + desc["T"] = tf.data_type(epsilon_) + end + begin + desc["T"] = tf.data_type(grad_) + end + res = tf.execute(desc) + node = tf.TapeNode(resource_apply_adam_with_amsgrad, [var_, m_, v_, vhat_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_], name=nothing, use_locking=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_apply_adam_with_amsgrad(var_, m_, v_, vhat_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=nothing, use_locking=nothing) + if tf.in_eager_mode() + resource_apply_adam_with_amsgrad_eager(var_, m_, v_, vhat_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=name, use_locking=use_locking) + else + resource_apply_adam_with_amsgrad_graph(var_, m_, v_, vhat_, beta1_power_, beta2_power_, lr_, beta1_, beta2_, epsilon_, grad_; name=name, use_locking=use_locking) + end end - end + end end @@ -45548,65 +83096,105 @@ end Receives the named tensor from send_device on recv_device. """ begin - function _host_recv_graph(; name=nothing, tensor_type=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing) - local desc - tf.with_op_name(name, "_HostRecv") do - desc = tf.NodeDescription("_HostRecv") - if tensor_type !== nothing - desc["tensor_type"] = Base.identity(tensor_type) - end - if tensor_name !== nothing - desc["tensor_name"] = Base.String(tensor_name) + begin + function _host_recv_graph(; name=nothing, tensor_type=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing) + local desc + tf.with_op_name(name, "_HostRecv") do + desc = tf.NodeDescription("_HostRecv") + begin + end + begin + end + begin + begin + if tensor_type !== nothing + desc["tensor_type"] = Base.identity(tensor_type) + end + end + begin + if tensor_name !== nothing + desc["tensor_name"] = Base.String(tensor_name) + end + end + begin + if send_device !== nothing + desc["send_device"] = Base.String(send_device) + end + end + begin + if send_device_incarnation !== nothing + desc["send_device_incarnation"] = Base.Int(send_device_incarnation) + end + end + begin + if recv_device !== nothing + desc["recv_device"] = Base.String(recv_device) + end + end + begin + if client_terminated !== nothing + desc["client_terminated"] = Base.Bool(client_terminated) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function _host_recv_eager(; name=nothing, tensor_type=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing) + desc = tf.EagerOp("_HostRecv") + begin + end + begin + begin + if tensor_type !== nothing + desc["tensor_type"] = Base.identity(tensor_type) + end + end + begin + if tensor_name !== nothing + desc["tensor_name"] = Base.String(tensor_name) + end + end + begin + if send_device !== nothing + desc["send_device"] = Base.String(send_device) + end + end + begin + if send_device_incarnation !== nothing + desc["send_device_incarnation"] = Base.Int(send_device_incarnation) + end + end + begin + if recv_device !== nothing + desc["recv_device"] = Base.String(recv_device) + end + end + begin + if client_terminated !== nothing + desc["client_terminated"] = Base.Bool(client_terminated) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(_host_recv, [], name=nothing, tensor_type=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _host_recv(; name=nothing, tensor_type=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing) + if tf.in_eager_mode() + _host_recv_eager(; name=name, tensor_type=tensor_type, tensor_name=tensor_name, send_device=send_device, send_device_incarnation=send_device_incarnation, recv_device=recv_device, client_terminated=client_terminated) + else + _host_recv_graph(; name=name, tensor_type=tensor_type, tensor_name=tensor_name, send_device=send_device, send_device_incarnation=send_device_incarnation, recv_device=recv_device, client_terminated=client_terminated) + end end - if send_device !== nothing - desc["send_device"] = Base.String(send_device) - end - if send_device_incarnation !== nothing - desc["send_device_incarnation"] = Base.Int(send_device_incarnation) - end - if recv_device !== nothing - desc["recv_device"] = Base.String(recv_device) - end - if client_terminated !== nothing - desc["client_terminated"] = Base.Bool(client_terminated) - end - end - tf.Tensor(tf.Operation(desc)) - end - function _host_recv_eager(; name=nothing, tensor_type=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing) - desc = tf.EagerOp("_HostRecv") - if tensor_type !== nothing - desc["tensor_type"] = Base.identity(tensor_type) - end - if tensor_name !== nothing - desc["tensor_name"] = Base.String(tensor_name) - end - if send_device !== nothing - desc["send_device"] = Base.String(send_device) - end - if send_device_incarnation !== nothing - desc["send_device_incarnation"] = Base.Int(send_device_incarnation) - end - if recv_device !== nothing - desc["recv_device"] = Base.String(recv_device) - end - if client_terminated !== nothing - desc["client_terminated"] = Base.Bool(client_terminated) - end - res = tf.execute(desc) - node = tf.TapeNode(_host_recv, [], name=nothing, tensor_type=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _host_recv(; name=nothing, tensor_type=nothing, tensor_name=nothing, send_device=nothing, send_device_incarnation=nothing, recv_device=nothing, client_terminated=nothing) - if tf.in_eager_mode() - _host_recv_eager(; name=name, tensor_type=tensor_type, tensor_name=tensor_name, send_device=send_device, send_device_incarnation=send_device_incarnation, recv_device=recv_device, client_terminated=client_terminated) - else - _host_recv_graph(; name=name, tensor_type=tensor_type, tensor_name=tensor_name, send_device=send_device, send_device_incarnation=send_device_incarnation, recv_device=recv_device, client_terminated=client_terminated) - end - end end @@ -45616,49 +83204,105 @@ end """ begin - function boosted_trees_center_bias_graph(tree_ensemble_handle_, mean_gradients_, mean_hessians_, l1_, l2_; name=nothing) - local desc - tf.with_op_name(name, "BoostedTreesCenterBias") do - desc = tf.NodeDescription("BoostedTreesCenterBias") - tree_ensemble_handle_ = convert(Tensor{Any}, tree_ensemble_handle_) - mean_gradients_ = convert(Tensor{Float32}, mean_gradients_) - mean_hessians_ = convert(Tensor{Float32}, mean_hessians_) - l1_ = convert(Tensor{Float32}, l1_) - l2_ = convert(Tensor{Float32}, l2_) - tf.add_input(desc, tree_ensemble_handle_) - tf.add_input(desc, mean_gradients_) - tf.add_input(desc, mean_hessians_) - tf.add_input(desc, l1_) - tf.add_input(desc, l2_) - end - tf.Tensor(tf.Operation(desc)) - end - function boosted_trees_center_bias_eager(tree_ensemble_handle_, mean_gradients_, mean_hessians_, l1_, l2_; name=nothing) - desc = tf.EagerOp("BoostedTreesCenterBias") - tree_ensemble_handle_ = convert(tf.EagerTensor, tree_ensemble_handle_) - mean_gradients_ = convert(tf.EagerTensor, mean_gradients_) - mean_hessians_ = convert(tf.EagerTensor, mean_hessians_) - l1_ = convert(tf.EagerTensor, l1_) - l2_ = convert(tf.EagerTensor, l2_) - tf.add_input(desc, tree_ensemble_handle_) - tf.add_input(desc, mean_gradients_) - tf.add_input(desc, mean_hessians_) - tf.add_input(desc, l1_) - tf.add_input(desc, l2_) - res = tf.execute(desc) - node = tf.TapeNode(boosted_trees_center_bias, [tree_ensemble_handle_, mean_gradients_, mean_hessians_, l1_, l2_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function boosted_trees_center_bias(tree_ensemble_handle_, mean_gradients_, mean_hessians_, l1_, l2_; name=nothing) - if tf.in_eager_mode() - boosted_trees_center_bias_eager(tree_ensemble_handle_, mean_gradients_, mean_hessians_, l1_, l2_; name=name) - else - boosted_trees_center_bias_graph(tree_ensemble_handle_, mean_gradients_, mean_hessians_, l1_, l2_; name=name) + begin + function boosted_trees_center_bias_graph(tree_ensemble_handle_, mean_gradients_, mean_hessians_, l1_, l2_; name=nothing) + local desc + tf.with_op_name(name, "BoostedTreesCenterBias") do + desc = tf.NodeDescription("BoostedTreesCenterBias") + begin + begin + tree_ensemble_handle_ = convert(Tensor{Any}, tree_ensemble_handle_) + begin + end + end + begin + mean_gradients_ = convert(Tensor{Float32}, mean_gradients_) + begin + end + end + begin + mean_hessians_ = convert(Tensor{Float32}, mean_hessians_) + begin + end + end + begin + l1_ = convert(Tensor{Float32}, l1_) + begin + end + end + begin + l2_ = convert(Tensor{Float32}, l2_) + begin + end + end + end + begin + begin + tf.add_input(desc, tree_ensemble_handle_) + end + begin + tf.add_input(desc, mean_gradients_) + end + begin + tf.add_input(desc, mean_hessians_) + end + begin + tf.add_input(desc, l1_) + end + begin + tf.add_input(desc, l2_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function boosted_trees_center_bias_eager(tree_ensemble_handle_, mean_gradients_, mean_hessians_, l1_, l2_; name=nothing) + desc = tf.EagerOp("BoostedTreesCenterBias") + tree_ensemble_handle_ = convert(tf.EagerTensor, tree_ensemble_handle_) + mean_gradients_ = convert(tf.EagerTensor, mean_gradients_) + mean_hessians_ = convert(tf.EagerTensor, mean_hessians_) + l1_ = convert(tf.EagerTensor, l1_) + l2_ = convert(tf.EagerTensor, l2_) + begin + begin + tf.add_input(desc, tree_ensemble_handle_) + end + begin + tf.add_input(desc, mean_gradients_) + end + begin + tf.add_input(desc, mean_hessians_) + end + begin + tf.add_input(desc, l1_) + end + begin + tf.add_input(desc, l2_) + end + end + begin + end + res = tf.execute(desc) + node = tf.TapeNode(boosted_trees_center_bias, [tree_ensemble_handle_, mean_gradients_, mean_hessians_, l1_, l2_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function boosted_trees_center_bias(tree_ensemble_handle_, mean_gradients_, mean_hessians_, l1_, l2_; name=nothing) + if tf.in_eager_mode() + boosted_trees_center_bias_eager(tree_ensemble_handle_, mean_gradients_, mean_hessians_, l1_, l2_; name=name) + else + boosted_trees_center_bias_graph(tree_ensemble_handle_, mean_gradients_, mean_hessians_, l1_, l2_; name=name) + end end - end + end end @@ -45668,33 +83312,57 @@ end """ begin - function lookup_table_size_v2_graph(table_handle_; name=nothing) - local desc - tf.with_op_name(name, "LookupTableSizeV2") do - desc = tf.NodeDescription("LookupTableSizeV2") - table_handle_ = convert(Tensor{Any}, table_handle_) - tf.add_input(desc, table_handle_) + begin + function lookup_table_size_v2_graph(table_handle_; name=nothing) + local desc + tf.with_op_name(name, "LookupTableSizeV2") do + desc = tf.NodeDescription("LookupTableSizeV2") + begin + begin + table_handle_ = convert(Tensor{Any}, table_handle_) + begin + end + end + end + begin + begin + tf.add_input(desc, table_handle_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function lookup_table_size_v2_eager(table_handle_; name=nothing) - desc = tf.EagerOp("LookupTableSizeV2") - table_handle_ = convert(tf.EagerTensor, table_handle_) - tf.add_input(desc, table_handle_) - res = tf.execute(desc) - node = tf.TapeNode(lookup_table_size_v2, [table_handle_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function lookup_table_size_v2_eager(table_handle_; name=nothing) + desc = tf.EagerOp("LookupTableSizeV2") + table_handle_ = convert(tf.EagerTensor, table_handle_) + begin + begin + tf.add_input(desc, table_handle_) + end + end + begin + end + res = tf.execute(desc) + node = tf.TapeNode(lookup_table_size_v2, [table_handle_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function lookup_table_size_v2(table_handle_; name=nothing) - if tf.in_eager_mode() - lookup_table_size_v2_eager(table_handle_; name=name) - else - lookup_table_size_v2_graph(table_handle_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function lookup_table_size_v2(table_handle_; name=nothing) + if tf.in_eager_mode() + lookup_table_size_v2_eager(table_handle_; name=name) + else + lookup_table_size_v2_graph(table_handle_; name=name) + end end - end + end end @@ -45704,37 +83372,69 @@ end """ begin - function irfft_graph(input_, fft_length_; name=nothing) - local desc - tf.with_op_name(name, "IRFFT") do - desc = tf.NodeDescription("IRFFT") - input_ = convert(Tensor{Complex{Float32}}, input_) - fft_length_ = convert(Tensor{Int32}, fft_length_) - tf.add_input(desc, input_) - tf.add_input(desc, fft_length_) + begin + function irfft_graph(input_, fft_length_; name=nothing) + local desc + tf.with_op_name(name, "IRFFT") do + desc = tf.NodeDescription("IRFFT") + begin + begin + input_ = convert(Tensor{Complex{Float32}}, input_) + begin + end + end + begin + fft_length_ = convert(Tensor{Int32}, fft_length_) + begin + end + end + end + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, fft_length_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function irfft_eager(input_, fft_length_; name=nothing) - desc = tf.EagerOp("IRFFT") - input_ = convert(tf.EagerTensor, input_) - fft_length_ = convert(tf.EagerTensor, fft_length_) - tf.add_input(desc, input_) - tf.add_input(desc, fft_length_) - res = tf.execute(desc) - node = tf.TapeNode(irfft, [input_, fft_length_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function irfft_eager(input_, fft_length_; name=nothing) + desc = tf.EagerOp("IRFFT") + input_ = convert(tf.EagerTensor, input_) + fft_length_ = convert(tf.EagerTensor, fft_length_) + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, fft_length_) + end + end + begin + end + res = tf.execute(desc) + node = tf.TapeNode(irfft, [input_, fft_length_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function irfft(input_, fft_length_; name=nothing) - if tf.in_eager_mode() - irfft_eager(input_, fft_length_; name=name) - else - irfft_graph(input_, fft_length_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function irfft(input_, fft_length_; name=nothing) + if tf.in_eager_mode() + irfft_eager(input_, fft_length_; name=name) + else + irfft_graph(input_, fft_length_; name=name) + end end - end + end end @@ -45744,44 +83444,90 @@ end """ begin - function inplace_add_graph(x_, i_, v_; name=nothing) - local desc - tf.with_op_name(name, "InplaceAdd") do - desc = tf.NodeDescription("InplaceAdd") - x_ = convert(Tensor{Any}, x_) - i_ = convert(Tensor{Int32}, i_) - v_ = convert(Tensor{Any}, v_) - (x_, v_) = tf.tf_promote(x_, v_) - tf.add_input(desc, x_) - tf.add_input(desc, i_) - tf.add_input(desc, v_) - end - tf.Tensor(tf.Operation(desc)) - end - function inplace_add_eager(x_, i_, v_; name=nothing) - desc = tf.EagerOp("InplaceAdd") - x_ = convert(tf.EagerTensor, x_) - i_ = convert(tf.EagerTensor, i_) - v_ = convert(tf.EagerTensor, v_) - tf.add_input(desc, x_) - tf.add_input(desc, i_) - tf.add_input(desc, v_) - desc["T"] = tf.data_type(x_) - desc["T"] = tf.data_type(v_) - res = tf.execute(desc) - node = tf.TapeNode(inplace_add, [x_, i_, v_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function inplace_add(x_, i_, v_; name=nothing) - if tf.in_eager_mode() - inplace_add_eager(x_, i_, v_; name=name) - else - inplace_add_graph(x_, i_, v_; name=name) + begin + function inplace_add_graph(x_, i_, v_; name=nothing) + local desc + tf.with_op_name(name, "InplaceAdd") do + desc = tf.NodeDescription("InplaceAdd") + begin + begin + x_ = convert(Tensor{Any}, x_) + begin + end + end + begin + i_ = convert(Tensor{Int32}, i_) + begin + end + end + begin + v_ = convert(Tensor{Any}, v_) + begin + end + end + begin + (x_, v_) = tf.tf_promote(x_, v_) + end + end + begin + begin + tf.add_input(desc, x_) + end + begin + tf.add_input(desc, i_) + end + begin + tf.add_input(desc, v_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function inplace_add_eager(x_, i_, v_; name=nothing) + desc = tf.EagerOp("InplaceAdd") + x_ = convert(tf.EagerTensor, x_) + i_ = convert(tf.EagerTensor, i_) + v_ = convert(tf.EagerTensor, v_) + begin + begin + tf.add_input(desc, x_) + end + begin + tf.add_input(desc, i_) + end + begin + tf.add_input(desc, v_) + end + end + begin + end + begin + desc["T"] = tf.data_type(x_) + end + begin + desc["T"] = tf.data_type(v_) + end + res = tf.execute(desc) + node = tf.TapeNode(inplace_add, [x_, i_, v_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function inplace_add(x_, i_, v_; name=nothing) + if tf.in_eager_mode() + inplace_add_eager(x_, i_, v_; name=name) + else + inplace_add_graph(x_, i_, v_; name=name) + end end - end + end end @@ -45791,46 +83537,88 @@ end """ begin - function bias_add_graph(value_, bias_; name=nothing, data_format=nothing) - local desc - tf.with_op_name(name, "BiasAdd") do - desc = tf.NodeDescription("BiasAdd") - value_ = convert(Tensor{Any}, value_) - bias_ = convert(Tensor{Any}, bias_) - (value_, bias_) = tf.tf_promote(value_, bias_) - tf.add_input(desc, value_) - tf.add_input(desc, bias_) - if data_format !== nothing - desc["data_format"] = Base.String(data_format) + begin + function bias_add_graph(value_, bias_; name=nothing, data_format=nothing) + local desc + tf.with_op_name(name, "BiasAdd") do + desc = tf.NodeDescription("BiasAdd") + begin + begin + value_ = convert(Tensor{Any}, value_) + begin + end + end + begin + bias_ = convert(Tensor{Any}, bias_) + begin + end + end + begin + (value_, bias_) = tf.tf_promote(value_, bias_) + end + end + begin + begin + tf.add_input(desc, value_) + end + begin + tf.add_input(desc, bias_) + end + end + begin + begin + if data_format !== nothing + desc["data_format"] = Base.String(data_format) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function bias_add_eager(value_, bias_; name=nothing, data_format=nothing) + desc = tf.EagerOp("BiasAdd") + value_ = convert(tf.EagerTensor, value_) + bias_ = convert(tf.EagerTensor, bias_) + begin + begin + tf.add_input(desc, value_) + end + begin + tf.add_input(desc, bias_) + end + end + begin + begin + if data_format !== nothing + desc["data_format"] = Base.String(data_format) + end + end + end + begin + desc["T"] = tf.data_type(value_) + end + begin + desc["T"] = tf.data_type(bias_) + end + res = tf.execute(desc) + node = tf.TapeNode(bias_add, [value_, bias_], name=nothing, data_format=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function bias_add(value_, bias_; name=nothing, data_format=nothing) + if tf.in_eager_mode() + bias_add_eager(value_, bias_; name=name, data_format=data_format) + else + bias_add_graph(value_, bias_; name=name, data_format=data_format) + end end - end - tf.Tensor(tf.Operation(desc)) end - function bias_add_eager(value_, bias_; name=nothing, data_format=nothing) - desc = tf.EagerOp("BiasAdd") - value_ = convert(tf.EagerTensor, value_) - bias_ = convert(tf.EagerTensor, bias_) - tf.add_input(desc, value_) - tf.add_input(desc, bias_) - if data_format !== nothing - desc["data_format"] = Base.String(data_format) - end - desc["T"] = tf.data_type(value_) - desc["T"] = tf.data_type(bias_) - res = tf.execute(desc) - node = tf.TapeNode(bias_add, [value_, bias_], name=nothing, data_format=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function bias_add(value_, bias_; name=nothing, data_format=nothing) - if tf.in_eager_mode() - bias_add_eager(value_, bias_; name=name, data_format=data_format) - else - bias_add_graph(value_, bias_; name=name, data_format=data_format) - end - end end @@ -45840,30 +83628,45 @@ end An op that disconnects the TPUs on a host from a running distributed """ begin - function _disconnect_host_from_distributed_tpu_system_graph(; name=nothing) - local desc - tf.with_op_name(name, "_DisconnectHostFromDistributedTPUSystem") do - desc - tf.NodeDescription("_DisconnectHostFromDistributedTPUSystem") + begin + function _disconnect_host_from_distributed_tpu_system_graph(; name=nothing) + local desc + tf.with_op_name(name, "_DisconnectHostFromDistributedTPUSystem") do + desc = tf.NodeDescription("_DisconnectHostFromDistributedTPUSystem") + begin + end + begin + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function _disconnect_host_from_distributed_tpu_system_eager(; name=nothing) - desc = tf.EagerOp("_DisconnectHostFromDistributedTPUSystem") - res = tf.execute(desc) - node = tf.TapeNode(_disconnect_host_from_distributed_tpu_system, [], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function _disconnect_host_from_distributed_tpu_system_eager(; name=nothing) + desc = tf.EagerOp("_DisconnectHostFromDistributedTPUSystem") + begin + end + begin + end + res = tf.execute(desc) + node = tf.TapeNode(_disconnect_host_from_distributed_tpu_system, [], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _disconnect_host_from_distributed_tpu_system(; name=nothing) - if tf.in_eager_mode() - _disconnect_host_from_distributed_tpu_system_eager(; name=name) - else - _disconnect_host_from_distributed_tpu_system_graph(; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _disconnect_host_from_distributed_tpu_system(; name=nothing) + if tf.in_eager_mode() + _disconnect_host_from_distributed_tpu_system_eager(; name=name) + else + _disconnect_host_from_distributed_tpu_system_graph(; name=name) + end end - end + end end @@ -45873,69 +83676,133 @@ end Load embedding parameters for a single table. """ begin - function load_tpu_embedding_adam_parameters_grad_accum_debug_graph(parameters_, momenta_, velocities_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - local desc - tf.with_op_name(name, "LoadTPUEmbeddingADAMParametersGradAccumDebug") do - desc = tf.NodeDescription("LoadTPUEmbeddingADAMParametersGradAccumDebug") - parameters_ = convert(Tensor{Float32}, parameters_) - momenta_ = convert(Tensor{Float32}, momenta_) - velocities_ = convert(Tensor{Float32}, velocities_) - gradient_accumulators_ = convert(Tensor{Float32}, gradient_accumulators_) - tf.add_input(desc, parameters_) - tf.add_input(desc, momenta_) - tf.add_input(desc, velocities_) - tf.add_input(desc, gradient_accumulators_) - if table_id !== nothing - desc["table_id"] = Base.Int(table_id) - end - if table_name !== nothing - desc["table_name"] = Base.String(table_name) + begin + function load_tpu_embedding_adam_parameters_grad_accum_debug_graph(parameters_, momenta_, velocities_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + local desc + tf.with_op_name(name, "LoadTPUEmbeddingADAMParametersGradAccumDebug") do + desc = tf.NodeDescription("LoadTPUEmbeddingADAMParametersGradAccumDebug") + begin + begin + parameters_ = convert(Tensor{Float32}, parameters_) + begin + end + end + begin + momenta_ = convert(Tensor{Float32}, momenta_) + begin + end + end + begin + velocities_ = convert(Tensor{Float32}, velocities_) + begin + end + end + begin + gradient_accumulators_ = convert(Tensor{Float32}, gradient_accumulators_) + begin + end + end + end + begin + begin + tf.add_input(desc, parameters_) + end + begin + tf.add_input(desc, momenta_) + end + begin + tf.add_input(desc, velocities_) + end + begin + tf.add_input(desc, gradient_accumulators_) + end + end + begin + begin + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + end + begin + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + end + begin + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + end + begin + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function load_tpu_embedding_adam_parameters_grad_accum_debug_eager(parameters_, momenta_, velocities_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + desc = tf.EagerOp("LoadTPUEmbeddingADAMParametersGradAccumDebug") + parameters_ = convert(tf.EagerTensor, parameters_) + momenta_ = convert(tf.EagerTensor, momenta_) + velocities_ = convert(tf.EagerTensor, velocities_) + gradient_accumulators_ = convert(tf.EagerTensor, gradient_accumulators_) + begin + begin + tf.add_input(desc, parameters_) + end + begin + tf.add_input(desc, momenta_) + end + begin + tf.add_input(desc, velocities_) + end + begin + tf.add_input(desc, gradient_accumulators_) + end + end + begin + begin + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + end + begin + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + end + begin + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + end + begin + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(load_tpu_embedding_adam_parameters_grad_accum_debug, [parameters_, momenta_, velocities_, gradient_accumulators_], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function load_tpu_embedding_adam_parameters_grad_accum_debug(parameters_, momenta_, velocities_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + if tf.in_eager_mode() + load_tpu_embedding_adam_parameters_grad_accum_debug_eager(parameters_, momenta_, velocities_, gradient_accumulators_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + else + load_tpu_embedding_adam_parameters_grad_accum_debug_graph(parameters_, momenta_, velocities_, gradient_accumulators_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + end end - if num_shards !== nothing - desc["num_shards"] = Base.Int(num_shards) - end - if shard_id !== nothing - desc["shard_id"] = Base.Int(shard_id) - end - end - tf.Tensor(tf.Operation(desc)) - end - function load_tpu_embedding_adam_parameters_grad_accum_debug_eager(parameters_, momenta_, velocities_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - desc = tf.EagerOp("LoadTPUEmbeddingADAMParametersGradAccumDebug") - parameters_ = convert(tf.EagerTensor, parameters_) - momenta_ = convert(tf.EagerTensor, momenta_) - velocities_ = convert(tf.EagerTensor, velocities_) - gradient_accumulators_ = convert(tf.EagerTensor, gradient_accumulators_) - tf.add_input(desc, parameters_) - tf.add_input(desc, momenta_) - tf.add_input(desc, velocities_) - tf.add_input(desc, gradient_accumulators_) - if table_id !== nothing - desc["table_id"] = Base.Int(table_id) - end - if table_name !== nothing - desc["table_name"] = Base.String(table_name) - end - if num_shards !== nothing - desc["num_shards"] = Base.Int(num_shards) - end - if shard_id !== nothing - desc["shard_id"] = Base.Int(shard_id) - end - res = tf.execute(desc) - node = tf.TapeNode(load_tpu_embedding_adam_parameters_grad_accum_debug, [parameters_, momenta_, velocities_, gradient_accumulators_], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function load_tpu_embedding_adam_parameters_grad_accum_debug(parameters_, momenta_, velocities_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - if tf.in_eager_mode() - load_tpu_embedding_adam_parameters_grad_accum_debug_eager(parameters_, momenta_, velocities_, gradient_accumulators_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) - else - load_tpu_embedding_adam_parameters_grad_accum_debug_graph(parameters_, momenta_, velocities_, gradient_accumulators_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) - end - end end @@ -45945,50 +83812,100 @@ end """ begin - function ragged_range_graph(starts_, limits_, deltas_; name=nothing) - local desc - tf.with_op_name(name, "RaggedRange") do - desc = tf.NodeDescription("RaggedRange") - starts_ = convert(Tensor{Int32}, starts_) - limits_ = convert(Tensor{Int32}, limits_) - deltas_ = convert(Tensor{Int32}, deltas_) - (starts_, limits_, deltas_) = tf.tf_promote(starts_, limits_, deltas_) - tf.add_input(desc, starts_) - tf.add_input(desc, limits_) - tf.add_input(desc, deltas_) - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:2 - push!(out, tf.Tensor(op, out_idx)) - end - out - end - function ragged_range_eager(starts_, limits_, deltas_; name=nothing) - desc = tf.EagerOp("RaggedRange") - starts_ = convert(tf.EagerTensor, starts_) - limits_ = convert(tf.EagerTensor, limits_) - deltas_ = convert(tf.EagerTensor, deltas_) - tf.add_input(desc, starts_) - tf.add_input(desc, limits_) - tf.add_input(desc, deltas_) - desc["T"] = tf.data_type(starts_) - desc["T"] = tf.data_type(limits_) - desc["T"] = tf.data_type(deltas_) - res = tf.execute(desc) - node = tf.TapeNode(ragged_range, [starts_, limits_, deltas_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function ragged_range(starts_, limits_, deltas_; name=nothing) - if tf.in_eager_mode() - ragged_range_eager(starts_, limits_, deltas_; name=name) - else - ragged_range_graph(starts_, limits_, deltas_; name=name) + begin + function ragged_range_graph(starts_, limits_, deltas_; name=nothing) + local desc + tf.with_op_name(name, "RaggedRange") do + desc = tf.NodeDescription("RaggedRange") + begin + begin + starts_ = convert(Tensor{Int32}, starts_) + begin + end + end + begin + limits_ = convert(Tensor{Int32}, limits_) + begin + end + end + begin + deltas_ = convert(Tensor{Int32}, deltas_) + begin + end + end + begin + (starts_, limits_, deltas_) = tf.tf_promote(starts_, limits_, deltas_) + end + end + begin + begin + tf.add_input(desc, starts_) + end + begin + tf.add_input(desc, limits_) + end + begin + tf.add_input(desc, deltas_) + end + end + begin + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + end + end + begin + function ragged_range_eager(starts_, limits_, deltas_; name=nothing) + desc = tf.EagerOp("RaggedRange") + starts_ = convert(tf.EagerTensor, starts_) + limits_ = convert(tf.EagerTensor, limits_) + deltas_ = convert(tf.EagerTensor, deltas_) + begin + begin + tf.add_input(desc, starts_) + end + begin + tf.add_input(desc, limits_) + end + begin + tf.add_input(desc, deltas_) + end + end + begin + end + begin + desc["T"] = tf.data_type(starts_) + end + begin + desc["T"] = tf.data_type(limits_) + end + begin + desc["T"] = tf.data_type(deltas_) + end + res = tf.execute(desc) + node = tf.TapeNode(ragged_range, [starts_, limits_, deltas_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function ragged_range(starts_, limits_, deltas_; name=nothing) + if tf.in_eager_mode() + ragged_range_eager(starts_, limits_, deltas_; name=name) + else + ragged_range_graph(starts_, limits_, deltas_; name=name) + end end - end + end end @@ -45998,61 +83915,125 @@ end """ begin - function window_dataset_graph(input_dataset_, size_, shift_, stride_, drop_remainder_; name=nothing, output_types=nothing, output_shapes=nothing) - local desc - tf.with_op_name(name, "WindowDataset") do - desc = tf.NodeDescription("WindowDataset") - input_dataset_ = convert(Tensor{Any}, input_dataset_) - size_ = convert(Tensor{Int64}, size_) - shift_ = convert(Tensor{Int64}, shift_) - stride_ = convert(Tensor{Int64}, stride_) - drop_remainder_ = convert(Tensor{Bool}, drop_remainder_) - tf.add_input(desc, input_dataset_) - tf.add_input(desc, size_) - tf.add_input(desc, shift_) - tf.add_input(desc, stride_) - tf.add_input(desc, drop_remainder_) - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) + begin + function window_dataset_graph(input_dataset_, size_, shift_, stride_, drop_remainder_; name=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "WindowDataset") do + desc = tf.NodeDescription("WindowDataset") + begin + begin + input_dataset_ = convert(Tensor{Any}, input_dataset_) + begin + end + end + begin + size_ = convert(Tensor{Int64}, size_) + begin + end + end + begin + shift_ = convert(Tensor{Int64}, shift_) + begin + end + end + begin + stride_ = convert(Tensor{Int64}, stride_) + begin + end + end + begin + drop_remainder_ = convert(Tensor{Bool}, drop_remainder_) + begin + end + end + end + begin + begin + tf.add_input(desc, input_dataset_) + end + begin + tf.add_input(desc, size_) + end + begin + tf.add_input(desc, shift_) + end + begin + tf.add_input(desc, stride_) + end + begin + tf.add_input(desc, drop_remainder_) + end + end + begin + begin + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + end + begin + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function window_dataset_eager(input_dataset_, size_, shift_, stride_, drop_remainder_; name=nothing, output_types=nothing, output_shapes=nothing) + desc = tf.EagerOp("WindowDataset") + input_dataset_ = convert(tf.EagerTensor, input_dataset_) + size_ = convert(tf.EagerTensor, size_) + shift_ = convert(tf.EagerTensor, shift_) + stride_ = convert(tf.EagerTensor, stride_) + drop_remainder_ = convert(tf.EagerTensor, drop_remainder_) + begin + begin + tf.add_input(desc, input_dataset_) + end + begin + tf.add_input(desc, size_) + end + begin + tf.add_input(desc, shift_) + end + begin + tf.add_input(desc, stride_) + end + begin + tf.add_input(desc, drop_remainder_) + end + end + begin + begin + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + end + begin + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(window_dataset, [input_dataset_, size_, shift_, stride_, drop_remainder_], name=nothing, output_types=nothing, output_shapes=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function window_dataset(input_dataset_, size_, shift_, stride_, drop_remainder_; name=nothing, output_types=nothing, output_shapes=nothing) + if tf.in_eager_mode() + window_dataset_eager(input_dataset_, size_, shift_, stride_, drop_remainder_; name=name, output_types=output_types, output_shapes=output_shapes) + else + window_dataset_graph(input_dataset_, size_, shift_, stride_, drop_remainder_; name=name, output_types=output_types, output_shapes=output_shapes) + end end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - end - tf.Tensor(tf.Operation(desc)) end - function window_dataset_eager(input_dataset_, size_, shift_, stride_, drop_remainder_; name=nothing, output_types=nothing, output_shapes=nothing) - desc = tf.EagerOp("WindowDataset") - input_dataset_ = convert(tf.EagerTensor, input_dataset_) - size_ = convert(tf.EagerTensor, size_) - shift_ = convert(tf.EagerTensor, shift_) - stride_ = convert(tf.EagerTensor, stride_) - drop_remainder_ = convert(tf.EagerTensor, drop_remainder_) - tf.add_input(desc, input_dataset_) - tf.add_input(desc, size_) - tf.add_input(desc, shift_) - tf.add_input(desc, stride_) - tf.add_input(desc, drop_remainder_) - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - res = tf.execute(desc) - node = tf.TapeNode(window_dataset, [input_dataset_, size_, shift_, stride_, drop_remainder_], name=nothing, output_types=nothing, output_shapes=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function window_dataset(input_dataset_, size_, shift_, stride_, drop_remainder_; name=nothing, output_types=nothing, output_shapes=nothing) - if tf.in_eager_mode() - window_dataset_eager(input_dataset_, size_, shift_, stride_, drop_remainder_; name=name, output_types=output_types, output_shapes=output_shapes) - else - window_dataset_graph(input_dataset_, size_, shift_, stride_, drop_remainder_; name=name, output_types=output_types, output_shapes=output_shapes) - end - end end @@ -46062,35 +84043,63 @@ end """ begin - function diag_graph(diagonal_; name=nothing) - local desc - tf.with_op_name(name, "Diag") do - desc = tf.NodeDescription("Diag") - diagonal_ = convert(Tensor{Any}, diagonal_) - (diagonal_,) = tf.tf_promote(diagonal_) - tf.add_input(desc, diagonal_) + begin + function diag_graph(diagonal_; name=nothing) + local desc + tf.with_op_name(name, "Diag") do + desc = tf.NodeDescription("Diag") + begin + begin + diagonal_ = convert(Tensor{Any}, diagonal_) + begin + end + end + begin + (diagonal_,) = tf.tf_promote(diagonal_) + end + end + begin + begin + tf.add_input(desc, diagonal_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function diag_eager(diagonal_; name=nothing) - desc = tf.EagerOp("Diag") - diagonal_ = convert(tf.EagerTensor, diagonal_) - tf.add_input(desc, diagonal_) - desc["T"] = tf.data_type(diagonal_) - res = tf.execute(desc) - node = tf.TapeNode(diag, [diagonal_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function diag_eager(diagonal_; name=nothing) + desc = tf.EagerOp("Diag") + diagonal_ = convert(tf.EagerTensor, diagonal_) + begin + begin + tf.add_input(desc, diagonal_) + end + end + begin + end + begin + desc["T"] = tf.data_type(diagonal_) + end + res = tf.execute(desc) + node = tf.TapeNode(diag, [diagonal_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function diag(diagonal_; name=nothing) - if tf.in_eager_mode() - diag_eager(diagonal_; name=name) - else - diag_graph(diagonal_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function diag(diagonal_; name=nothing) + if tf.in_eager_mode() + diag_eager(diagonal_; name=name) + else + diag_graph(diagonal_; name=name) + end end - end + end end @@ -46100,41 +84109,65 @@ end A placeholder op for a value that will be fed into the computation. """ begin - function infeed_dequeue_graph(; name=nothing, dtype=nothing, shape=nothing) - local desc - tf.with_op_name(name, "InfeedDequeue") do - desc = tf.NodeDescription("InfeedDequeue") - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end - if shape !== nothing - desc["shape"] = Base.identity(shape) + begin + function infeed_dequeue_graph(; name=nothing, dtype=nothing, shape=nothing) + local desc + tf.with_op_name(name, "InfeedDequeue") do + desc = tf.NodeDescription("InfeedDequeue") + begin + end + begin + end + begin + begin + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + begin + if shape !== nothing + desc["shape"] = Base.identity(shape) + end + end + end end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function infeed_dequeue_eager(; name=nothing, dtype=nothing, shape=nothing) - desc = tf.EagerOp("InfeedDequeue") - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end - if shape !== nothing - desc["shape"] = Base.identity(shape) - end - res = tf.execute(desc) - node = tf.TapeNode(infeed_dequeue, [], name=nothing, dtype=nothing, shape=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function infeed_dequeue_eager(; name=nothing, dtype=nothing, shape=nothing) + desc = tf.EagerOp("InfeedDequeue") + begin + end + begin + begin + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + begin + if shape !== nothing + desc["shape"] = Base.identity(shape) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(infeed_dequeue, [], name=nothing, dtype=nothing, shape=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function infeed_dequeue(; name=nothing, dtype=nothing, shape=nothing) - if tf.in_eager_mode() - infeed_dequeue_eager(; name=name, dtype=dtype, shape=shape) - else - infeed_dequeue_graph(; name=name, dtype=dtype, shape=shape) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function infeed_dequeue(; name=nothing, dtype=nothing, shape=nothing) + if tf.in_eager_mode() + infeed_dequeue_eager(; name=name, dtype=dtype, shape=shape) + else + infeed_dequeue_graph(; name=name, dtype=dtype, shape=shape) + end end - end + end end @@ -46144,49 +84177,89 @@ end """ begin - function experimental_latency_stats_dataset_graph(input_dataset_, tag_; name=nothing, output_types=nothing, output_shapes=nothing) - local desc - tf.with_op_name(name, "ExperimentalLatencyStatsDataset") do - desc = tf.NodeDescription("ExperimentalLatencyStatsDataset") - input_dataset_ = convert(Tensor{Any}, input_dataset_) - tag_ = convert(Tensor{String}, tag_) - tf.add_input(desc, input_dataset_) - tf.add_input(desc, tag_) - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) + begin + function experimental_latency_stats_dataset_graph(input_dataset_, tag_; name=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "ExperimentalLatencyStatsDataset") do + desc = tf.NodeDescription("ExperimentalLatencyStatsDataset") + begin + begin + input_dataset_ = convert(Tensor{Any}, input_dataset_) + begin + end + end + begin + tag_ = convert(Tensor{String}, tag_) + begin + end + end + end + begin + begin + tf.add_input(desc, input_dataset_) + end + begin + tf.add_input(desc, tag_) + end + end + begin + begin + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + end + begin + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function experimental_latency_stats_dataset_eager(input_dataset_, tag_; name=nothing, output_types=nothing, output_shapes=nothing) + desc = tf.EagerOp("ExperimentalLatencyStatsDataset") + input_dataset_ = convert(tf.EagerTensor, input_dataset_) + tag_ = convert(tf.EagerTensor, tag_) + begin + begin + tf.add_input(desc, input_dataset_) + end + begin + tf.add_input(desc, tag_) + end + end + begin + begin + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + end + begin + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(experimental_latency_stats_dataset, [input_dataset_, tag_], name=nothing, output_types=nothing, output_shapes=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_latency_stats_dataset(input_dataset_, tag_; name=nothing, output_types=nothing, output_shapes=nothing) + if tf.in_eager_mode() + experimental_latency_stats_dataset_eager(input_dataset_, tag_; name=name, output_types=output_types, output_shapes=output_shapes) + else + experimental_latency_stats_dataset_graph(input_dataset_, tag_; name=name, output_types=output_types, output_shapes=output_shapes) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function experimental_latency_stats_dataset_eager(input_dataset_, tag_; name=nothing, output_types=nothing, output_shapes=nothing) - desc = tf.EagerOp("ExperimentalLatencyStatsDataset") - input_dataset_ = convert(tf.EagerTensor, input_dataset_) - tag_ = convert(tf.EagerTensor, tag_) - tf.add_input(desc, input_dataset_) - tf.add_input(desc, tag_) - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - res = tf.execute(desc) - node = tf.TapeNode(experimental_latency_stats_dataset, [input_dataset_, tag_], name=nothing, output_types=nothing, output_shapes=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_latency_stats_dataset(input_dataset_, tag_; name=nothing, output_types=nothing, output_shapes=nothing) - if tf.in_eager_mode() - experimental_latency_stats_dataset_eager(input_dataset_, tag_; name=name, output_types=output_types, output_shapes=output_shapes) - else - experimental_latency_stats_dataset_graph(input_dataset_, tag_; name=name, output_types=output_types, output_shapes=output_shapes) - end - end end @@ -46196,55 +84269,107 @@ end """ begin - function add_sparse_to_tensors_map_graph(sparse_indices_, sparse_values_, sparse_shape_; name=nothing, container=nothing, shared_name=nothing) - local desc - tf.with_op_name(name, "AddSparseToTensorsMap") do - desc = tf.NodeDescription("AddSparseToTensorsMap") - sparse_indices_ = convert(Tensor{Int64}, sparse_indices_) - sparse_values_ = convert(Tensor{Any}, sparse_values_) - sparse_shape_ = convert(Tensor{Int64}, sparse_shape_) - (sparse_values_,) = tf.tf_promote(sparse_values_) - tf.add_input(desc, sparse_indices_) - tf.add_input(desc, sparse_values_) - tf.add_input(desc, sparse_shape_) - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) + begin + function add_sparse_to_tensors_map_graph(sparse_indices_, sparse_values_, sparse_shape_; name=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "AddSparseToTensorsMap") do + desc = tf.NodeDescription("AddSparseToTensorsMap") + begin + begin + sparse_indices_ = convert(Tensor{Int64}, sparse_indices_) + begin + end + end + begin + sparse_values_ = convert(Tensor{Any}, sparse_values_) + begin + end + end + begin + sparse_shape_ = convert(Tensor{Int64}, sparse_shape_) + begin + end + end + begin + (sparse_values_,) = tf.tf_promote(sparse_values_) + end + end + begin + begin + tf.add_input(desc, sparse_indices_) + end + begin + tf.add_input(desc, sparse_values_) + end + begin + tf.add_input(desc, sparse_shape_) + end + end + begin + begin + if container !== nothing + desc["container"] = Base.String(container) + end + end + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function add_sparse_to_tensors_map_eager(sparse_indices_, sparse_values_, sparse_shape_; name=nothing, container=nothing, shared_name=nothing) + desc = tf.EagerOp("AddSparseToTensorsMap") + sparse_indices_ = convert(tf.EagerTensor, sparse_indices_) + sparse_values_ = convert(tf.EagerTensor, sparse_values_) + sparse_shape_ = convert(tf.EagerTensor, sparse_shape_) + begin + begin + tf.add_input(desc, sparse_indices_) + end + begin + tf.add_input(desc, sparse_values_) + end + begin + tf.add_input(desc, sparse_shape_) + end + end + begin + begin + if container !== nothing + desc["container"] = Base.String(container) + end + end + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + end + begin + desc["T"] = tf.data_type(sparse_values_) + end + res = tf.execute(desc) + node = tf.TapeNode(add_sparse_to_tensors_map, [sparse_indices_, sparse_values_, sparse_shape_], name=nothing, container=nothing, shared_name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function add_sparse_to_tensors_map(sparse_indices_, sparse_values_, sparse_shape_; name=nothing, container=nothing, shared_name=nothing) + if tf.in_eager_mode() + add_sparse_to_tensors_map_eager(sparse_indices_, sparse_values_, sparse_shape_; name=name, container=container, shared_name=shared_name) + else + add_sparse_to_tensors_map_graph(sparse_indices_, sparse_values_, sparse_shape_; name=name, container=container, shared_name=shared_name) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function add_sparse_to_tensors_map_eager(sparse_indices_, sparse_values_, sparse_shape_; name=nothing, container=nothing, shared_name=nothing) - desc = tf.EagerOp("AddSparseToTensorsMap") - sparse_indices_ = convert(tf.EagerTensor, sparse_indices_) - sparse_values_ = convert(tf.EagerTensor, sparse_values_) - sparse_shape_ = convert(tf.EagerTensor, sparse_shape_) - tf.add_input(desc, sparse_indices_) - tf.add_input(desc, sparse_values_) - tf.add_input(desc, sparse_shape_) - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - desc["T"] = tf.data_type(sparse_values_) - res = tf.execute(desc) - node = tf.TapeNode(add_sparse_to_tensors_map, [sparse_indices_, sparse_values_, sparse_shape_], name=nothing, container=nothing, shared_name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function add_sparse_to_tensors_map(sparse_indices_, sparse_values_, sparse_shape_; name=nothing, container=nothing, shared_name=nothing) - if tf.in_eager_mode() - add_sparse_to_tensors_map_eager(sparse_indices_, sparse_values_, sparse_shape_; name=name, container=container, shared_name=shared_name) - else - add_sparse_to_tensors_map_graph(sparse_indices_, sparse_values_, sparse_shape_; name=name, container=container, shared_name=shared_name) - end - end end @@ -46254,63 +84379,121 @@ end """ begin - function ragged_gather_graph(params_nested_splits_, params_dense_values_, indices_; name=nothing, PARAMS_RAGGED_RANK=nothing, OUTPUT_RAGGED_RANK=nothing) - local desc - tf.with_op_name(name, "RaggedGather") do - desc = tf.NodeDescription("RaggedGather") - params_nested_splits_ = [convert(Tensor{Int64}, x) for x = params_nested_splits_] - params_dense_values_ = convert(Tensor{Any}, params_dense_values_) - indices_ = convert(Tensor{Any}, indices_) - indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) - (indices_,) = tf.tf_promote(indices_) - (params_dense_values_,) = tf.tf_promote(params_dense_values_) - tf.add_input(desc, params_nested_splits_) - tf.add_input(desc, params_dense_values_) - tf.add_input(desc, indices_) - if PARAMS_RAGGED_RANK !== nothing - desc["PARAMS_RAGGED_RANK"] = Base.Int(PARAMS_RAGGED_RANK) - end - if OUTPUT_RAGGED_RANK !== nothing - desc["OUTPUT_RAGGED_RANK"] = Base.Int(OUTPUT_RAGGED_RANK) + begin + function ragged_gather_graph(params_nested_splits_, params_dense_values_, indices_; name=nothing, PARAMS_RAGGED_RANK=nothing, OUTPUT_RAGGED_RANK=nothing) + local desc + tf.with_op_name(name, "RaggedGather") do + desc = tf.NodeDescription("RaggedGather") + begin + begin + params_nested_splits_ = [convert(Tensor{Int64}, x) for x = params_nested_splits_] + begin + end + end + begin + params_dense_values_ = convert(Tensor{Any}, params_dense_values_) + begin + end + end + begin + indices_ = convert(Tensor{Any}, indices_) + begin + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + end + end + begin + (indices_,) = tf.tf_promote(indices_) + end + begin + (params_dense_values_,) = tf.tf_promote(params_dense_values_) + end + end + begin + begin + tf.add_input(desc, params_nested_splits_) + end + begin + tf.add_input(desc, params_dense_values_) + end + begin + tf.add_input(desc, indices_) + end + end + begin + begin + if PARAMS_RAGGED_RANK !== nothing + desc["PARAMS_RAGGED_RANK"] = Base.Int(PARAMS_RAGGED_RANK) + end + end + begin + if OUTPUT_RAGGED_RANK !== nothing + desc["OUTPUT_RAGGED_RANK"] = Base.Int(OUTPUT_RAGGED_RANK) + end + end + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + end + end + begin + function ragged_gather_eager(params_nested_splits_, params_dense_values_, indices_; name=nothing, PARAMS_RAGGED_RANK=nothing, OUTPUT_RAGGED_RANK=nothing) + desc = tf.EagerOp("RaggedGather") + params_nested_splits_ = convert(tf.EagerTensor, params_nested_splits_) + params_dense_values_ = convert(tf.EagerTensor, params_dense_values_) + indices_ = convert(tf.EagerTensor, indices_) + begin + begin + tf.add_input(desc, params_nested_splits_) + end + begin + tf.add_input(desc, params_dense_values_) + end + begin + tf.add_input(desc, indices_) + end + end + begin + begin + if PARAMS_RAGGED_RANK !== nothing + desc["PARAMS_RAGGED_RANK"] = Base.Int(PARAMS_RAGGED_RANK) + end + end + begin + if OUTPUT_RAGGED_RANK !== nothing + desc["OUTPUT_RAGGED_RANK"] = Base.Int(OUTPUT_RAGGED_RANK) + end + end + end + begin + desc["Tvalues"] = tf.data_type(params_dense_values_) + end + begin + desc["Tindices"] = tf.data_type(indices_) + end + res = tf.execute(desc) + node = tf.TapeNode(ragged_gather, [params_nested_splits_, params_dense_values_, indices_], name=nothing, PARAMS_RAGGED_RANK=nothing, OUTPUT_RAGGED_RANK=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function ragged_gather(params_nested_splits_, params_dense_values_, indices_; name=nothing, PARAMS_RAGGED_RANK=nothing, OUTPUT_RAGGED_RANK=nothing) + if tf.in_eager_mode() + ragged_gather_eager(params_nested_splits_, params_dense_values_, indices_; name=name, PARAMS_RAGGED_RANK=PARAMS_RAGGED_RANK, OUTPUT_RAGGED_RANK=OUTPUT_RAGGED_RANK) + else + ragged_gather_graph(params_nested_splits_, params_dense_values_, indices_; name=name, PARAMS_RAGGED_RANK=PARAMS_RAGGED_RANK, OUTPUT_RAGGED_RANK=OUTPUT_RAGGED_RANK) + end end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:2 - push!(out, tf.Tensor(op, out_idx)) - end - out end - function ragged_gather_eager(params_nested_splits_, params_dense_values_, indices_; name=nothing, PARAMS_RAGGED_RANK=nothing, OUTPUT_RAGGED_RANK=nothing) - desc = tf.EagerOp("RaggedGather") - params_nested_splits_ = convert(tf.EagerTensor, params_nested_splits_) - params_dense_values_ = convert(tf.EagerTensor, params_dense_values_) - indices_ = convert(tf.EagerTensor, indices_) - tf.add_input(desc, params_nested_splits_) - tf.add_input(desc, params_dense_values_) - tf.add_input(desc, indices_) - if PARAMS_RAGGED_RANK !== nothing - desc["PARAMS_RAGGED_RANK"] = Base.Int(PARAMS_RAGGED_RANK) - end - if OUTPUT_RAGGED_RANK !== nothing - desc["OUTPUT_RAGGED_RANK"] = Base.Int(OUTPUT_RAGGED_RANK) - end - desc["Tvalues"] = tf.data_type(params_dense_values_) - desc["Tindices"] = tf.data_type(indices_) - res = tf.execute(desc) - node = tf.TapeNode(ragged_gather, [params_nested_splits_, params_dense_values_, indices_], name=nothing, PARAMS_RAGGED_RANK=nothing, OUTPUT_RAGGED_RANK=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function ragged_gather(params_nested_splits_, params_dense_values_, indices_; name=nothing, PARAMS_RAGGED_RANK=nothing, OUTPUT_RAGGED_RANK=nothing) - if tf.in_eager_mode() - ragged_gather_eager(params_nested_splits_, params_dense_values_, indices_; name=name, PARAMS_RAGGED_RANK=PARAMS_RAGGED_RANK, OUTPUT_RAGGED_RANK=OUTPUT_RAGGED_RANK) - else - ragged_gather_graph(params_nested_splits_, params_dense_values_, indices_; name=name, PARAMS_RAGGED_RANK=PARAMS_RAGGED_RANK, OUTPUT_RAGGED_RANK=OUTPUT_RAGGED_RANK) - end - end end @@ -46320,35 +84503,63 @@ end """ begin - function rgb_to_hsv_graph(images_; name=nothing) - local desc - tf.with_op_name(name, "RGBToHSV") do - desc = tf.NodeDescription("RGBToHSV") - images_ = convert(Tensor{Float32}, images_) - (images_,) = tf.tf_promote(images_) - tf.add_input(desc, images_) + begin + function rgb_to_hsv_graph(images_; name=nothing) + local desc + tf.with_op_name(name, "RGBToHSV") do + desc = tf.NodeDescription("RGBToHSV") + begin + begin + images_ = convert(Tensor{Float32}, images_) + begin + end + end + begin + (images_,) = tf.tf_promote(images_) + end + end + begin + begin + tf.add_input(desc, images_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function rgb_to_hsv_eager(images_; name=nothing) - desc = tf.EagerOp("RGBToHSV") - images_ = convert(tf.EagerTensor, images_) - tf.add_input(desc, images_) - desc["T"] = tf.data_type(images_) - res = tf.execute(desc) - node = tf.TapeNode(rgb_to_hsv, [images_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function rgb_to_hsv_eager(images_; name=nothing) + desc = tf.EagerOp("RGBToHSV") + images_ = convert(tf.EagerTensor, images_) + begin + begin + tf.add_input(desc, images_) + end + end + begin + end + begin + desc["T"] = tf.data_type(images_) + end + res = tf.execute(desc) + node = tf.TapeNode(rgb_to_hsv, [images_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function rgb_to_hsv(images_; name=nothing) - if tf.in_eager_mode() - rgb_to_hsv_eager(images_; name=name) - else - rgb_to_hsv_graph(images_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function rgb_to_hsv(images_; name=nothing) + if tf.in_eager_mode() + rgb_to_hsv_eager(images_; name=name) + else + rgb_to_hsv_graph(images_; name=name) + end end - end + end end @@ -46358,33 +84569,57 @@ end """ begin - function multi_device_iterator_to_string_handle_graph(multi_device_iterator_; name=nothing) - local desc - tf.with_op_name(name, "MultiDeviceIteratorToStringHandle") do - desc = tf.NodeDescription("MultiDeviceIteratorToStringHandle") - multi_device_iterator_ = convert(Tensor{Any}, multi_device_iterator_) - tf.add_input(desc, multi_device_iterator_) + begin + function multi_device_iterator_to_string_handle_graph(multi_device_iterator_; name=nothing) + local desc + tf.with_op_name(name, "MultiDeviceIteratorToStringHandle") do + desc = tf.NodeDescription("MultiDeviceIteratorToStringHandle") + begin + begin + multi_device_iterator_ = convert(Tensor{Any}, multi_device_iterator_) + begin + end + end + end + begin + begin + tf.add_input(desc, multi_device_iterator_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function multi_device_iterator_to_string_handle_eager(multi_device_iterator_; name=nothing) - desc = tf.EagerOp("MultiDeviceIteratorToStringHandle") - multi_device_iterator_ = convert(tf.EagerTensor, multi_device_iterator_) - tf.add_input(desc, multi_device_iterator_) - res = tf.execute(desc) - node = tf.TapeNode(multi_device_iterator_to_string_handle, [multi_device_iterator_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function multi_device_iterator_to_string_handle_eager(multi_device_iterator_; name=nothing) + desc = tf.EagerOp("MultiDeviceIteratorToStringHandle") + multi_device_iterator_ = convert(tf.EagerTensor, multi_device_iterator_) + begin + begin + tf.add_input(desc, multi_device_iterator_) + end + end + begin + end + res = tf.execute(desc) + node = tf.TapeNode(multi_device_iterator_to_string_handle, [multi_device_iterator_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function multi_device_iterator_to_string_handle(multi_device_iterator_; name=nothing) - if tf.in_eager_mode() - multi_device_iterator_to_string_handle_eager(multi_device_iterator_; name=name) - else - multi_device_iterator_to_string_handle_graph(multi_device_iterator_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function multi_device_iterator_to_string_handle(multi_device_iterator_; name=nothing) + if tf.in_eager_mode() + multi_device_iterator_to_string_handle_eager(multi_device_iterator_; name=name) + else + multi_device_iterator_to_string_handle_graph(multi_device_iterator_; name=name) + end end - end + end end @@ -46394,57 +84629,113 @@ end """ begin - function for__graph(start_, limit_, delta_, input_; name=nothing, T=nothing, body=nothing) - local desc - tf.with_op_name(name, "For") do - desc = tf.NodeDescription("For") - start_ = convert(Tensor{Int32}, start_) - limit_ = convert(Tensor{Int32}, limit_) - delta_ = convert(Tensor{Int32}, delta_) - input_ = [convert(Tensor{Any}, x) for x = input_] - tf.add_input(desc, start_) - tf.add_input(desc, limit_) - tf.add_input(desc, delta_) - tf.add_input(desc, input_) - if T !== nothing - desc["T"] = map(Base.identity, T) - end - if body !== nothing - desc["body"] = Base.identity(body) + begin + function for__graph(start_, limit_, delta_, input_; name=nothing, T=nothing, body=nothing) + local desc + tf.with_op_name(name, "For") do + desc = tf.NodeDescription("For") + begin + begin + start_ = convert(Tensor{Int32}, start_) + begin + end + end + begin + limit_ = convert(Tensor{Int32}, limit_) + begin + end + end + begin + delta_ = convert(Tensor{Int32}, delta_) + begin + end + end + begin + input_ = [convert(Tensor{Any}, x) for x = input_] + begin + end + end + end + begin + begin + tf.add_input(desc, start_) + end + begin + tf.add_input(desc, limit_) + end + begin + tf.add_input(desc, delta_) + end + begin + tf.add_input(desc, input_) + end + end + begin + begin + if T !== nothing + desc["T"] = map(Base.identity, T) + end + end + begin + if body !== nothing + desc["body"] = Base.identity(body) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function for__eager(start_, limit_, delta_, input_; name=nothing, T=nothing, body=nothing) + desc = tf.EagerOp("For") + start_ = convert(tf.EagerTensor, start_) + limit_ = convert(tf.EagerTensor, limit_) + delta_ = convert(tf.EagerTensor, delta_) + input_ = convert(tf.EagerTensor, input_) + begin + begin + tf.add_input(desc, start_) + end + begin + tf.add_input(desc, limit_) + end + begin + tf.add_input(desc, delta_) + end + begin + tf.add_input(desc, input_) + end + end + begin + begin + if T !== nothing + desc["T"] = map(Base.identity, T) + end + end + begin + if body !== nothing + desc["body"] = Base.identity(body) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(for_, [start_, limit_, delta_, input_], name=nothing, T=nothing, body=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function for_(start_, limit_, delta_, input_; name=nothing, T=nothing, body=nothing) + if tf.in_eager_mode() + for__eager(start_, limit_, delta_, input_; name=name, T=T, body=body) + else + for__graph(start_, limit_, delta_, input_; name=name, T=T, body=body) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function for__eager(start_, limit_, delta_, input_; name=nothing, T=nothing, body=nothing) - desc = tf.EagerOp("For") - start_ = convert(tf.EagerTensor, start_) - limit_ = convert(tf.EagerTensor, limit_) - delta_ = convert(tf.EagerTensor, delta_) - input_ = convert(tf.EagerTensor, input_) - tf.add_input(desc, start_) - tf.add_input(desc, limit_) - tf.add_input(desc, delta_) - tf.add_input(desc, input_) - if T !== nothing - desc["T"] = map(Base.identity, T) - end - if body !== nothing - desc["body"] = Base.identity(body) - end - res = tf.execute(desc) - node = tf.TapeNode(for_, [start_, limit_, delta_, input_], name=nothing, T=nothing, body=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function for_(start_, limit_, delta_, input_; name=nothing, T=nothing, body=nothing) - if tf.in_eager_mode() - for__eager(start_, limit_, delta_, input_; name=name, T=T, body=body) - else - for__graph(start_, limit_, delta_, input_; name=name, T=T, body=body) - end - end end @@ -46454,58 +84745,116 @@ end """ begin - function sparse_reduce_max_sparse_graph(input_indices_, input_values_, input_shape_, reduction_axes_; name=nothing, keep_dims=nothing) - local desc - tf.with_op_name(name, "SparseReduceMaxSparse") do - desc = tf.NodeDescription("SparseReduceMaxSparse") - input_indices_ = convert(Tensor{Int64}, input_indices_) - input_values_ = convert(Tensor{Any}, input_values_) - input_shape_ = convert(Tensor{Int64}, input_shape_) - reduction_axes_ = convert(Tensor{Int32}, reduction_axes_) - (input_values_,) = tf.tf_promote(input_values_) - tf.add_input(desc, input_indices_) - tf.add_input(desc, input_values_) - tf.add_input(desc, input_shape_) - tf.add_input(desc, reduction_axes_) - if keep_dims !== nothing - desc["keep_dims"] = Base.Bool(keep_dims) + begin + function sparse_reduce_max_sparse_graph(input_indices_, input_values_, input_shape_, reduction_axes_; name=nothing, keep_dims=nothing) + local desc + tf.with_op_name(name, "SparseReduceMaxSparse") do + desc = tf.NodeDescription("SparseReduceMaxSparse") + begin + begin + input_indices_ = convert(Tensor{Int64}, input_indices_) + begin + end + end + begin + input_values_ = convert(Tensor{Any}, input_values_) + begin + end + end + begin + input_shape_ = convert(Tensor{Int64}, input_shape_) + begin + end + end + begin + reduction_axes_ = convert(Tensor{Int32}, reduction_axes_) + begin + end + end + begin + (input_values_,) = tf.tf_promote(input_values_) + end + end + begin + begin + tf.add_input(desc, input_indices_) + end + begin + tf.add_input(desc, input_values_) + end + begin + tf.add_input(desc, input_shape_) + end + begin + tf.add_input(desc, reduction_axes_) + end + end + begin + begin + if keep_dims !== nothing + desc["keep_dims"] = Base.Bool(keep_dims) + end + end + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + end + end + begin + function sparse_reduce_max_sparse_eager(input_indices_, input_values_, input_shape_, reduction_axes_; name=nothing, keep_dims=nothing) + desc = tf.EagerOp("SparseReduceMaxSparse") + input_indices_ = convert(tf.EagerTensor, input_indices_) + input_values_ = convert(tf.EagerTensor, input_values_) + input_shape_ = convert(tf.EagerTensor, input_shape_) + reduction_axes_ = convert(tf.EagerTensor, reduction_axes_) + begin + begin + tf.add_input(desc, input_indices_) + end + begin + tf.add_input(desc, input_values_) + end + begin + tf.add_input(desc, input_shape_) + end + begin + tf.add_input(desc, reduction_axes_) + end + end + begin + begin + if keep_dims !== nothing + desc["keep_dims"] = Base.Bool(keep_dims) + end + end + end + begin + desc["T"] = tf.data_type(input_values_) + end + res = tf.execute(desc) + node = tf.TapeNode(sparse_reduce_max_sparse, [input_indices_, input_values_, input_shape_, reduction_axes_], name=nothing, keep_dims=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_reduce_max_sparse(input_indices_, input_values_, input_shape_, reduction_axes_; name=nothing, keep_dims=nothing) + if tf.in_eager_mode() + sparse_reduce_max_sparse_eager(input_indices_, input_values_, input_shape_, reduction_axes_; name=name, keep_dims=keep_dims) + else + sparse_reduce_max_sparse_graph(input_indices_, input_values_, input_shape_, reduction_axes_; name=name, keep_dims=keep_dims) + end end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:3 - push!(out, tf.Tensor(op, out_idx)) - end - out - end - function sparse_reduce_max_sparse_eager(input_indices_, input_values_, input_shape_, reduction_axes_; name=nothing, keep_dims=nothing) - desc = tf.EagerOp("SparseReduceMaxSparse") - input_indices_ = convert(tf.EagerTensor, input_indices_) - input_values_ = convert(tf.EagerTensor, input_values_) - input_shape_ = convert(tf.EagerTensor, input_shape_) - reduction_axes_ = convert(tf.EagerTensor, reduction_axes_) - tf.add_input(desc, input_indices_) - tf.add_input(desc, input_values_) - tf.add_input(desc, input_shape_) - tf.add_input(desc, reduction_axes_) - if keep_dims !== nothing - desc["keep_dims"] = Base.Bool(keep_dims) - end - desc["T"] = tf.data_type(input_values_) - res = tf.execute(desc) - node = tf.TapeNode(sparse_reduce_max_sparse, [input_indices_, input_values_, input_shape_, reduction_axes_], name=nothing, keep_dims=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_reduce_max_sparse(input_indices_, input_values_, input_shape_, reduction_axes_; name=nothing, keep_dims=nothing) - if tf.in_eager_mode() - sparse_reduce_max_sparse_eager(input_indices_, input_values_, input_shape_, reduction_axes_; name=name, keep_dims=keep_dims) - else - sparse_reduce_max_sparse_graph(input_indices_, input_values_, input_shape_, reduction_axes_; name=name, keep_dims=keep_dims) - end - end end @@ -46515,48 +84864,86 @@ end """ begin - function concat_offset_graph(concat_dim_, shape_; name=nothing, N=nothing) - local desc - tf.with_op_name(name, "ConcatOffset") do - desc = tf.NodeDescription("ConcatOffset") - concat_dim_ = convert(Tensor{Int32}, concat_dim_) - shape_ = [convert(Tensor{Int32}, x) for x = shape_] - tf.add_input(desc, concat_dim_) - tf.add_input(desc, shape_) - if N !== nothing - desc["N"] = Base.Int(N) + begin + function concat_offset_graph(concat_dim_, shape_; name=nothing, N=nothing) + local desc + tf.with_op_name(name, "ConcatOffset") do + desc = tf.NodeDescription("ConcatOffset") + begin + begin + concat_dim_ = convert(Tensor{Int32}, concat_dim_) + begin + end + end + begin + shape_ = [convert(Tensor{Int32}, x) for x = shape_] + begin + end + end + end + begin + begin + tf.add_input(desc, concat_dim_) + end + begin + tf.add_input(desc, shape_) + end + end + begin + begin + if N !== nothing + desc["N"] = Base.Int(N) + end + end + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:N + push!(out, tf.Tensor(op, out_idx)) + end + out + end + end + end + begin + function concat_offset_eager(concat_dim_, shape_; name=nothing, N=nothing) + desc = tf.EagerOp("ConcatOffset") + concat_dim_ = convert(tf.EagerTensor, concat_dim_) + shape_ = convert(tf.EagerTensor, shape_) + begin + begin + tf.add_input(desc, concat_dim_) + end + begin + tf.add_input(desc, shape_) + end + end + begin + begin + if N !== nothing + desc["N"] = Base.Int(N) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(concat_offset, [concat_dim_, shape_], name=nothing, N=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function concat_offset(concat_dim_, shape_; name=nothing, N=nothing) + if tf.in_eager_mode() + concat_offset_eager(concat_dim_, shape_; name=name, N=N) + else + concat_offset_graph(concat_dim_, shape_; name=name, N=N) + end end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:N - push!(out, tf.Tensor(op, out_idx)) - end - out end - function concat_offset_eager(concat_dim_, shape_; name=nothing, N=nothing) - desc = tf.EagerOp("ConcatOffset") - concat_dim_ = convert(tf.EagerTensor, concat_dim_) - shape_ = convert(tf.EagerTensor, shape_) - tf.add_input(desc, concat_dim_) - tf.add_input(desc, shape_) - if N !== nothing - desc["N"] = Base.Int(N) - end - res = tf.execute(desc) - node = tf.TapeNode(concat_offset, [concat_dim_, shape_], name=nothing, N=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function concat_offset(concat_dim_, shape_; name=nothing, N=nothing) - if tf.in_eager_mode() - concat_offset_eager(concat_dim_, shape_; name=name, N=N) - else - concat_offset_graph(concat_dim_, shape_; name=name, N=N) - end - end end @@ -46566,63 +84953,107 @@ end """ begin - function stage_graph(values_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) - local desc - tf.with_op_name(name, "Stage") do - desc = tf.NodeDescription("Stage") - values_ = [convert(Tensor{Any}, x) for x = values_] - tf.add_input(desc, values_) - if capacity !== nothing - desc["capacity"] = Base.Int(capacity) - end - if memory_limit !== nothing - desc["memory_limit"] = Base.Int(memory_limit) - end - if dtypes !== nothing - desc["dtypes"] = map(Base.identity, dtypes) - end - if container !== nothing - desc["container"] = Base.String(container) + begin + function stage_graph(values_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "Stage") do + desc = tf.NodeDescription("Stage") + begin + begin + values_ = [convert(Tensor{Any}, x) for x = values_] + begin + end + end + end + begin + begin + tf.add_input(desc, values_) + end + end + begin + begin + if capacity !== nothing + desc["capacity"] = Base.Int(capacity) + end + end + begin + if memory_limit !== nothing + desc["memory_limit"] = Base.Int(memory_limit) + end + end + begin + if dtypes !== nothing + desc["dtypes"] = map(Base.identity, dtypes) + end + end + begin + if container !== nothing + desc["container"] = Base.String(container) + end + end + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function stage_eager(values_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + desc = tf.EagerOp("Stage") + values_ = convert(tf.EagerTensor, values_) + begin + begin + tf.add_input(desc, values_) + end + end + begin + begin + if capacity !== nothing + desc["capacity"] = Base.Int(capacity) + end + end + begin + if memory_limit !== nothing + desc["memory_limit"] = Base.Int(memory_limit) + end + end + begin + if dtypes !== nothing + desc["dtypes"] = map(Base.identity, dtypes) + end + end + begin + if container !== nothing + desc["container"] = Base.String(container) + end + end + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(stage, [values_], name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function stage(values_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + if tf.in_eager_mode() + stage_eager(values_; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) + else + stage_graph(values_; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) + end end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - end - tf.Tensor(tf.Operation(desc)) - end - function stage_eager(values_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) - desc = tf.EagerOp("Stage") - values_ = convert(tf.EagerTensor, values_) - tf.add_input(desc, values_) - if capacity !== nothing - desc["capacity"] = Base.Int(capacity) - end - if memory_limit !== nothing - desc["memory_limit"] = Base.Int(memory_limit) - end - if dtypes !== nothing - desc["dtypes"] = map(Base.identity, dtypes) - end - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - res = tf.execute(desc) - node = tf.TapeNode(stage, [values_], name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function stage(values_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) - if tf.in_eager_mode() - stage_eager(values_; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) - else - stage_graph(values_; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) - end - end end @@ -46632,44 +85063,82 @@ end """ begin - function switch_graph(data_, pred_; name=nothing) - local desc - tf.with_op_name(name, "Switch") do - desc = tf.NodeDescription("Switch") - data_ = convert(Tensor{Any}, data_) - pred_ = convert(Tensor{Bool}, pred_) - (data_,) = tf.tf_promote(data_) - tf.add_input(desc, data_) - tf.add_input(desc, pred_) - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:2 - push!(out, tf.Tensor(op, out_idx)) - end - out - end - function switch_eager(data_, pred_; name=nothing) - desc = tf.EagerOp("Switch") - data_ = convert(tf.EagerTensor, data_) - pred_ = convert(tf.EagerTensor, pred_) - tf.add_input(desc, data_) - tf.add_input(desc, pred_) - desc["T"] = tf.data_type(data_) - res = tf.execute(desc) - node = tf.TapeNode(switch, [data_, pred_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function switch(data_, pred_; name=nothing) - if tf.in_eager_mode() - switch_eager(data_, pred_; name=name) - else - switch_graph(data_, pred_; name=name) + begin + function switch_graph(data_, pred_; name=nothing) + local desc + tf.with_op_name(name, "Switch") do + desc = tf.NodeDescription("Switch") + begin + begin + data_ = convert(Tensor{Any}, data_) + begin + end + end + begin + pred_ = convert(Tensor{Bool}, pred_) + begin + end + end + begin + (data_,) = tf.tf_promote(data_) + end + end + begin + begin + tf.add_input(desc, data_) + end + begin + tf.add_input(desc, pred_) + end + end + begin + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + end + end + begin + function switch_eager(data_, pred_; name=nothing) + desc = tf.EagerOp("Switch") + data_ = convert(tf.EagerTensor, data_) + pred_ = convert(tf.EagerTensor, pred_) + begin + begin + tf.add_input(desc, data_) + end + begin + tf.add_input(desc, pred_) + end + end + begin + end + begin + desc["T"] = tf.data_type(data_) + end + res = tf.execute(desc) + node = tf.TapeNode(switch, [data_, pred_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function switch(data_, pred_; name=nothing) + if tf.in_eager_mode() + switch_eager(data_, pred_; name=name) + else + switch_graph(data_, pred_; name=name) + end end - end + end end @@ -46679,49 +85148,89 @@ end """ begin - function queue_dequeue_many_v2_graph(handle_, n_; name=nothing, component_types=nothing, timeout_ms=nothing) - local desc - tf.with_op_name(name, "QueueDequeueManyV2") do - desc = tf.NodeDescription("QueueDequeueManyV2") - handle_ = convert(Tensor{Any}, handle_) - n_ = convert(Tensor{Int32}, n_) - tf.add_input(desc, handle_) - tf.add_input(desc, n_) - if component_types !== nothing - desc["component_types"] = map(Base.identity, component_types) + begin + function queue_dequeue_many_v2_graph(handle_, n_; name=nothing, component_types=nothing, timeout_ms=nothing) + local desc + tf.with_op_name(name, "QueueDequeueManyV2") do + desc = tf.NodeDescription("QueueDequeueManyV2") + begin + begin + handle_ = convert(Tensor{Any}, handle_) + begin + end + end + begin + n_ = convert(Tensor{Int32}, n_) + begin + end + end + end + begin + begin + tf.add_input(desc, handle_) + end + begin + tf.add_input(desc, n_) + end + end + begin + begin + if component_types !== nothing + desc["component_types"] = map(Base.identity, component_types) + end + end + begin + if timeout_ms !== nothing + desc["timeout_ms"] = Base.Int(timeout_ms) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function queue_dequeue_many_v2_eager(handle_, n_; name=nothing, component_types=nothing, timeout_ms=nothing) + desc = tf.EagerOp("QueueDequeueManyV2") + handle_ = convert(tf.EagerTensor, handle_) + n_ = convert(tf.EagerTensor, n_) + begin + begin + tf.add_input(desc, handle_) + end + begin + tf.add_input(desc, n_) + end + end + begin + begin + if component_types !== nothing + desc["component_types"] = map(Base.identity, component_types) + end + end + begin + if timeout_ms !== nothing + desc["timeout_ms"] = Base.Int(timeout_ms) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(queue_dequeue_many_v2, [handle_, n_], name=nothing, component_types=nothing, timeout_ms=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function queue_dequeue_many_v2(handle_, n_; name=nothing, component_types=nothing, timeout_ms=nothing) + if tf.in_eager_mode() + queue_dequeue_many_v2_eager(handle_, n_; name=name, component_types=component_types, timeout_ms=timeout_ms) + else + queue_dequeue_many_v2_graph(handle_, n_; name=name, component_types=component_types, timeout_ms=timeout_ms) + end end - if timeout_ms !== nothing - desc["timeout_ms"] = Base.Int(timeout_ms) - end - end - tf.Tensor(tf.Operation(desc)) end - function queue_dequeue_many_v2_eager(handle_, n_; name=nothing, component_types=nothing, timeout_ms=nothing) - desc = tf.EagerOp("QueueDequeueManyV2") - handle_ = convert(tf.EagerTensor, handle_) - n_ = convert(tf.EagerTensor, n_) - tf.add_input(desc, handle_) - tf.add_input(desc, n_) - if component_types !== nothing - desc["component_types"] = map(Base.identity, component_types) - end - if timeout_ms !== nothing - desc["timeout_ms"] = Base.Int(timeout_ms) - end - res = tf.execute(desc) - node = tf.TapeNode(queue_dequeue_many_v2, [handle_, n_], name=nothing, component_types=nothing, timeout_ms=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function queue_dequeue_many_v2(handle_, n_; name=nothing, component_types=nothing, timeout_ms=nothing) - if tf.in_eager_mode() - queue_dequeue_many_v2_eager(handle_, n_; name=name, component_types=component_types, timeout_ms=timeout_ms) - else - queue_dequeue_many_v2_graph(handle_, n_; name=name, component_types=component_types, timeout_ms=timeout_ms) - end - end end @@ -46731,42 +85240,82 @@ end """ begin - function segment_prod_graph(data_, segment_ids_; name=nothing) - local desc - tf.with_op_name(name, "SegmentProd") do - desc = tf.NodeDescription("SegmentProd") - data_ = convert(Tensor{Any}, data_) - segment_ids_ = convert(Tensor{Any}, segment_ids_) - segment_ids_ = segment_ids_ - convert(tf.Tensor{eltype(segment_ids_)}, 1) - (data_,) = tf.tf_promote(data_) - (segment_ids_,) = tf.tf_promote(segment_ids_) - tf.add_input(desc, data_) - tf.add_input(desc, segment_ids_) - end - tf.Tensor(tf.Operation(desc)) - end - function segment_prod_eager(data_, segment_ids_; name=nothing) - desc = tf.EagerOp("SegmentProd") - data_ = convert(tf.EagerTensor, data_) - segment_ids_ = convert(tf.EagerTensor, segment_ids_) - tf.add_input(desc, data_) - tf.add_input(desc, segment_ids_) - desc["T"] = tf.data_type(data_) - desc["Tindices"] = tf.data_type(segment_ids_) - res = tf.execute(desc) - node = tf.TapeNode(segment_prod, [data_, segment_ids_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function segment_prod(data_, segment_ids_; name=nothing) - if tf.in_eager_mode() - segment_prod_eager(data_, segment_ids_; name=name) - else - segment_prod_graph(data_, segment_ids_; name=name) + begin + function segment_prod_graph(data_, segment_ids_; name=nothing) + local desc + tf.with_op_name(name, "SegmentProd") do + desc = tf.NodeDescription("SegmentProd") + begin + begin + data_ = convert(Tensor{Any}, data_) + begin + end + end + begin + segment_ids_ = convert(Tensor{Any}, segment_ids_) + begin + segment_ids_ = segment_ids_ - convert(tf.Tensor{eltype(segment_ids_)}, 1) + end + end + begin + (data_,) = tf.tf_promote(data_) + end + begin + (segment_ids_,) = tf.tf_promote(segment_ids_) + end + end + begin + begin + tf.add_input(desc, data_) + end + begin + tf.add_input(desc, segment_ids_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function segment_prod_eager(data_, segment_ids_; name=nothing) + desc = tf.EagerOp("SegmentProd") + data_ = convert(tf.EagerTensor, data_) + segment_ids_ = convert(tf.EagerTensor, segment_ids_) + begin + begin + tf.add_input(desc, data_) + end + begin + tf.add_input(desc, segment_ids_) + end + end + begin + end + begin + desc["T"] = tf.data_type(data_) + end + begin + desc["Tindices"] = tf.data_type(segment_ids_) + end + res = tf.execute(desc) + node = tf.TapeNode(segment_prod, [data_, segment_ids_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function segment_prod(data_, segment_ids_; name=nothing) + if tf.in_eager_mode() + segment_prod_eager(data_, segment_ids_; name=name) + else + segment_prod_graph(data_, segment_ids_; name=name) + end end - end + end end @@ -46776,46 +85325,88 @@ end """ begin - function approximate_equal_graph(x_, y_; name=nothing, tolerance=nothing) - local desc - tf.with_op_name(name, "ApproximateEqual") do - desc = tf.NodeDescription("ApproximateEqual") - x_ = convert(Tensor{Any}, x_) - y_ = convert(Tensor{Any}, y_) - (x_, y_) = tf.tf_promote(x_, y_) - tf.add_input(desc, x_) - tf.add_input(desc, y_) - if tolerance !== nothing - desc["tolerance"] = Base.identity(tolerance) + begin + function approximate_equal_graph(x_, y_; name=nothing, tolerance=nothing) + local desc + tf.with_op_name(name, "ApproximateEqual") do + desc = tf.NodeDescription("ApproximateEqual") + begin + begin + x_ = convert(Tensor{Any}, x_) + begin + end + end + begin + y_ = convert(Tensor{Any}, y_) + begin + end + end + begin + (x_, y_) = tf.tf_promote(x_, y_) + end + end + begin + begin + tf.add_input(desc, x_) + end + begin + tf.add_input(desc, y_) + end + end + begin + begin + if tolerance !== nothing + desc["tolerance"] = Base.identity(tolerance) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function approximate_equal_eager(x_, y_; name=nothing, tolerance=nothing) + desc = tf.EagerOp("ApproximateEqual") + x_ = convert(tf.EagerTensor, x_) + y_ = convert(tf.EagerTensor, y_) + begin + begin + tf.add_input(desc, x_) + end + begin + tf.add_input(desc, y_) + end + end + begin + begin + if tolerance !== nothing + desc["tolerance"] = Base.identity(tolerance) + end + end + end + begin + desc["T"] = tf.data_type(x_) + end + begin + desc["T"] = tf.data_type(y_) + end + res = tf.execute(desc) + node = tf.TapeNode(approximate_equal, [x_, y_], name=nothing, tolerance=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function approximate_equal(x_, y_; name=nothing, tolerance=nothing) + if tf.in_eager_mode() + approximate_equal_eager(x_, y_; name=name, tolerance=tolerance) + else + approximate_equal_graph(x_, y_; name=name, tolerance=tolerance) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function approximate_equal_eager(x_, y_; name=nothing, tolerance=nothing) - desc = tf.EagerOp("ApproximateEqual") - x_ = convert(tf.EagerTensor, x_) - y_ = convert(tf.EagerTensor, y_) - tf.add_input(desc, x_) - tf.add_input(desc, y_) - if tolerance !== nothing - desc["tolerance"] = Base.identity(tolerance) - end - desc["T"] = tf.data_type(x_) - desc["T"] = tf.data_type(y_) - res = tf.execute(desc) - node = tf.TapeNode(approximate_equal, [x_, y_], name=nothing, tolerance=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function approximate_equal(x_, y_; name=nothing, tolerance=nothing) - if tf.in_eager_mode() - approximate_equal_eager(x_, y_; name=name, tolerance=tolerance) - else - approximate_equal_graph(x_, y_; name=name, tolerance=tolerance) - end - end end @@ -46825,70 +85416,128 @@ end """ begin - function conv2d_graph(input_, filter_; name=nothing, strides=nothing, use_cudnn_on_gpu=nothing, padding=nothing, data_format=nothing, dilations=nothing) - local desc - tf.with_op_name(name, "Conv2D") do - desc = tf.NodeDescription("Conv2D") - input_ = convert(Tensor{Any}, input_) - filter_ = convert(Tensor{Any}, filter_) - (input_, filter_) = tf.tf_promote(input_, filter_) - tf.add_input(desc, input_) - tf.add_input(desc, filter_) - if strides !== nothing - desc["strides"] = map(Base.identity, strides) - end - if use_cudnn_on_gpu !== nothing - desc["use_cudnn_on_gpu"] = Base.Bool(use_cudnn_on_gpu) - end - if padding !== nothing - desc["padding"] = Base.String(padding) + begin + function conv2d_graph(input_, filter_; name=nothing, strides=nothing, use_cudnn_on_gpu=nothing, padding=nothing, data_format=nothing, dilations=nothing) + local desc + tf.with_op_name(name, "Conv2D") do + desc = tf.NodeDescription("Conv2D") + begin + begin + input_ = convert(Tensor{Any}, input_) + begin + end + end + begin + filter_ = convert(Tensor{Any}, filter_) + begin + end + end + begin + (input_, filter_) = tf.tf_promote(input_, filter_) + end + end + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, filter_) + end + end + begin + begin + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + end + begin + if use_cudnn_on_gpu !== nothing + desc["use_cudnn_on_gpu"] = Base.Bool(use_cudnn_on_gpu) + end + end + begin + if padding !== nothing + desc["padding"] = Base.String(padding) + end + end + begin + if data_format !== nothing + desc["data_format"] = Base.String(data_format) + end + end + begin + if dilations !== nothing + desc["dilations"] = map(Base.identity, dilations) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function conv2d_eager(input_, filter_; name=nothing, strides=nothing, use_cudnn_on_gpu=nothing, padding=nothing, data_format=nothing, dilations=nothing) + desc = tf.EagerOp("Conv2D") + input_ = convert(tf.EagerTensor, input_) + filter_ = convert(tf.EagerTensor, filter_) + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, filter_) + end + end + begin + begin + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + end + begin + if use_cudnn_on_gpu !== nothing + desc["use_cudnn_on_gpu"] = Base.Bool(use_cudnn_on_gpu) + end + end + begin + if padding !== nothing + desc["padding"] = Base.String(padding) + end + end + begin + if data_format !== nothing + desc["data_format"] = Base.String(data_format) + end + end + begin + if dilations !== nothing + desc["dilations"] = map(Base.identity, dilations) + end + end + end + begin + desc["T"] = tf.data_type(input_) + end + begin + desc["T"] = tf.data_type(filter_) + end + res = tf.execute(desc) + node = tf.TapeNode(conv2d, [input_, filter_], name=nothing, strides=nothing, use_cudnn_on_gpu=nothing, padding=nothing, data_format=nothing, dilations=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function conv2d(input_, filter_; name=nothing, strides=nothing, use_cudnn_on_gpu=nothing, padding=nothing, data_format=nothing, dilations=nothing) + if tf.in_eager_mode() + conv2d_eager(input_, filter_; name=name, strides=strides, use_cudnn_on_gpu=use_cudnn_on_gpu, padding=padding, data_format=data_format, dilations=dilations) + else + conv2d_graph(input_, filter_; name=name, strides=strides, use_cudnn_on_gpu=use_cudnn_on_gpu, padding=padding, data_format=data_format, dilations=dilations) + end end - if data_format !== nothing - desc["data_format"] = Base.String(data_format) - end - if dilations !== nothing - desc["dilations"] = map(Base.identity, dilations) - end - end - tf.Tensor(tf.Operation(desc)) - end - function conv2d_eager(input_, filter_; name=nothing, strides=nothing, use_cudnn_on_gpu=nothing, padding=nothing, data_format=nothing, dilations=nothing) - desc = tf.EagerOp("Conv2D") - input_ = convert(tf.EagerTensor, input_) - filter_ = convert(tf.EagerTensor, filter_) - tf.add_input(desc, input_) - tf.add_input(desc, filter_) - if strides !== nothing - desc["strides"] = map(Base.identity, strides) - end - if use_cudnn_on_gpu !== nothing - desc["use_cudnn_on_gpu"] = Base.Bool(use_cudnn_on_gpu) - end - if padding !== nothing - desc["padding"] = Base.String(padding) - end - if data_format !== nothing - desc["data_format"] = Base.String(data_format) - end - if dilations !== nothing - desc["dilations"] = map(Base.identity, dilations) - end - desc["T"] = tf.data_type(input_) - desc["T"] = tf.data_type(filter_) - res = tf.execute(desc) - node = tf.TapeNode(conv2d, [input_, filter_], name=nothing, strides=nothing, use_cudnn_on_gpu=nothing, padding=nothing, data_format=nothing, dilations=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function conv2d(input_, filter_; name=nothing, strides=nothing, use_cudnn_on_gpu=nothing, padding=nothing, data_format=nothing, dilations=nothing) - if tf.in_eager_mode() - conv2d_eager(input_, filter_; name=name, strides=strides, use_cudnn_on_gpu=use_cudnn_on_gpu, padding=padding, data_format=data_format, dilations=dilations) - else - conv2d_graph(input_, filter_; name=name, strides=strides, use_cudnn_on_gpu=use_cudnn_on_gpu, padding=padding, data_format=data_format, dilations=dilations) - end - end end @@ -46898,39 +85547,75 @@ end An Op to sum inputs across replicated TPU instances. Each instance supplies its """ begin - function cross_replica_sum_graph(input_, group_assignment_; name=nothing) - local desc - tf.with_op_name(name, "CrossReplicaSum") do - desc = tf.NodeDescription("CrossReplicaSum") - input_ = convert(Tensor{Any}, input_) - group_assignment_ = convert(Tensor{Int32}, group_assignment_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - tf.add_input(desc, group_assignment_) - end - tf.Tensor(tf.Operation(desc)) - end - function cross_replica_sum_eager(input_, group_assignment_; name=nothing) - desc = tf.EagerOp("CrossReplicaSum") - input_ = convert(tf.EagerTensor, input_) - group_assignment_ = convert(tf.EagerTensor, group_assignment_) - tf.add_input(desc, input_) - tf.add_input(desc, group_assignment_) - desc["T"] = tf.data_type(input_) - res = tf.execute(desc) - node = tf.TapeNode(cross_replica_sum, [input_, group_assignment_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function cross_replica_sum(input_, group_assignment_; name=nothing) - if tf.in_eager_mode() - cross_replica_sum_eager(input_, group_assignment_; name=name) - else - cross_replica_sum_graph(input_, group_assignment_; name=name) + begin + function cross_replica_sum_graph(input_, group_assignment_; name=nothing) + local desc + tf.with_op_name(name, "CrossReplicaSum") do + desc = tf.NodeDescription("CrossReplicaSum") + begin + begin + input_ = convert(Tensor{Any}, input_) + begin + end + end + begin + group_assignment_ = convert(Tensor{Int32}, group_assignment_) + begin + end + end + begin + (input_,) = tf.tf_promote(input_) + end + end + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, group_assignment_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function cross_replica_sum_eager(input_, group_assignment_; name=nothing) + desc = tf.EagerOp("CrossReplicaSum") + input_ = convert(tf.EagerTensor, input_) + group_assignment_ = convert(tf.EagerTensor, group_assignment_) + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, group_assignment_) + end + end + begin + end + begin + desc["T"] = tf.data_type(input_) + end + res = tf.execute(desc) + node = tf.TapeNode(cross_replica_sum, [input_, group_assignment_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function cross_replica_sum(input_, group_assignment_; name=nothing) + if tf.in_eager_mode() + cross_replica_sum_eager(input_, group_assignment_; name=name) + else + cross_replica_sum_graph(input_, group_assignment_; name=name) + end end - end + end end @@ -46940,65 +85625,121 @@ end """ begin - function sparse_mat_mul_graph(a_, b_; name=nothing, transpose_a=nothing, transpose_b=nothing, a_is_sparse=nothing, b_is_sparse=nothing) - local desc - tf.with_op_name(name, "SparseMatMul") do - desc = tf.NodeDescription("SparseMatMul") - a_ = convert(Tensor{Float32}, a_) - b_ = convert(Tensor{Float32}, b_) - (b_,) = tf.tf_promote(b_) - (a_,) = tf.tf_promote(a_) - tf.add_input(desc, a_) - tf.add_input(desc, b_) - if transpose_a !== nothing - desc["transpose_a"] = Base.Bool(transpose_a) - end - if transpose_b !== nothing - desc["transpose_b"] = Base.Bool(transpose_b) - end - if a_is_sparse !== nothing - desc["a_is_sparse"] = Base.Bool(a_is_sparse) + begin + function sparse_mat_mul_graph(a_, b_; name=nothing, transpose_a=nothing, transpose_b=nothing, a_is_sparse=nothing, b_is_sparse=nothing) + local desc + tf.with_op_name(name, "SparseMatMul") do + desc = tf.NodeDescription("SparseMatMul") + begin + begin + a_ = convert(Tensor{Float32}, a_) + begin + end + end + begin + b_ = convert(Tensor{Float32}, b_) + begin + end + end + begin + (b_,) = tf.tf_promote(b_) + end + begin + (a_,) = tf.tf_promote(a_) + end + end + begin + begin + tf.add_input(desc, a_) + end + begin + tf.add_input(desc, b_) + end + end + begin + begin + if transpose_a !== nothing + desc["transpose_a"] = Base.Bool(transpose_a) + end + end + begin + if transpose_b !== nothing + desc["transpose_b"] = Base.Bool(transpose_b) + end + end + begin + if a_is_sparse !== nothing + desc["a_is_sparse"] = Base.Bool(a_is_sparse) + end + end + begin + if b_is_sparse !== nothing + desc["b_is_sparse"] = Base.Bool(b_is_sparse) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function sparse_mat_mul_eager(a_, b_; name=nothing, transpose_a=nothing, transpose_b=nothing, a_is_sparse=nothing, b_is_sparse=nothing) + desc = tf.EagerOp("SparseMatMul") + a_ = convert(tf.EagerTensor, a_) + b_ = convert(tf.EagerTensor, b_) + begin + begin + tf.add_input(desc, a_) + end + begin + tf.add_input(desc, b_) + end + end + begin + begin + if transpose_a !== nothing + desc["transpose_a"] = Base.Bool(transpose_a) + end + end + begin + if transpose_b !== nothing + desc["transpose_b"] = Base.Bool(transpose_b) + end + end + begin + if a_is_sparse !== nothing + desc["a_is_sparse"] = Base.Bool(a_is_sparse) + end + end + begin + if b_is_sparse !== nothing + desc["b_is_sparse"] = Base.Bool(b_is_sparse) + end + end + end + begin + desc["Ta"] = tf.data_type(a_) + end + begin + desc["Tb"] = tf.data_type(b_) + end + res = tf.execute(desc) + node = tf.TapeNode(sparse_mat_mul, [a_, b_], name=nothing, transpose_a=nothing, transpose_b=nothing, a_is_sparse=nothing, b_is_sparse=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_mat_mul(a_, b_; name=nothing, transpose_a=nothing, transpose_b=nothing, a_is_sparse=nothing, b_is_sparse=nothing) + if tf.in_eager_mode() + sparse_mat_mul_eager(a_, b_; name=name, transpose_a=transpose_a, transpose_b=transpose_b, a_is_sparse=a_is_sparse, b_is_sparse=b_is_sparse) + else + sparse_mat_mul_graph(a_, b_; name=name, transpose_a=transpose_a, transpose_b=transpose_b, a_is_sparse=a_is_sparse, b_is_sparse=b_is_sparse) + end end - if b_is_sparse !== nothing - desc["b_is_sparse"] = Base.Bool(b_is_sparse) - end - end - tf.Tensor(tf.Operation(desc)) - end - function sparse_mat_mul_eager(a_, b_; name=nothing, transpose_a=nothing, transpose_b=nothing, a_is_sparse=nothing, b_is_sparse=nothing) - desc = tf.EagerOp("SparseMatMul") - a_ = convert(tf.EagerTensor, a_) - b_ = convert(tf.EagerTensor, b_) - tf.add_input(desc, a_) - tf.add_input(desc, b_) - if transpose_a !== nothing - desc["transpose_a"] = Base.Bool(transpose_a) - end - if transpose_b !== nothing - desc["transpose_b"] = Base.Bool(transpose_b) - end - if a_is_sparse !== nothing - desc["a_is_sparse"] = Base.Bool(a_is_sparse) - end - if b_is_sparse !== nothing - desc["b_is_sparse"] = Base.Bool(b_is_sparse) - end - desc["Ta"] = tf.data_type(a_) - desc["Tb"] = tf.data_type(b_) - res = tf.execute(desc) - node = tf.TapeNode(sparse_mat_mul, [a_, b_], name=nothing, transpose_a=nothing, transpose_b=nothing, a_is_sparse=nothing, b_is_sparse=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_mat_mul(a_, b_; name=nothing, transpose_a=nothing, transpose_b=nothing, a_is_sparse=nothing, b_is_sparse=nothing) - if tf.in_eager_mode() - sparse_mat_mul_eager(a_, b_; name=name, transpose_a=transpose_a, transpose_b=transpose_b, a_is_sparse=a_is_sparse, b_is_sparse=b_is_sparse) - else - sparse_mat_mul_graph(a_, b_; name=name, transpose_a=transpose_a, transpose_b=transpose_b, a_is_sparse=a_is_sparse, b_is_sparse=b_is_sparse) - end - end end @@ -47008,69 +85749,125 @@ end Acts roughly like a SplitV Op that splits one tensor into multiple tensors """ begin - function _scoped_allocator_split_graph(concat_, split_; name=nothing, sa_name=nothing, id=nothing, N=nothing, shapes=nothing) - local desc - tf.with_op_name(name, "_ScopedAllocatorSplit") do - desc = tf.NodeDescription("_ScopedAllocatorSplit") - concat_ = convert(Tensor{Any}, concat_) - split_ = [convert(Tensor{Any}, x) for x = split_] - (concat_, split_) = tf.tf_promote(concat_, split_) - tf.add_input(desc, concat_) - tf.add_input(desc, split_) - if sa_name !== nothing - desc["sa_name"] = Base.String(sa_name) - end - if id !== nothing - desc["id"] = Base.Int(id) - end - if N !== nothing - desc["N"] = Base.Int(N) + begin + function _scoped_allocator_split_graph(concat_, split_; name=nothing, sa_name=nothing, id=nothing, N=nothing, shapes=nothing) + local desc + tf.with_op_name(name, "_ScopedAllocatorSplit") do + desc = tf.NodeDescription("_ScopedAllocatorSplit") + begin + begin + concat_ = convert(Tensor{Any}, concat_) + begin + end + end + begin + split_ = [convert(Tensor{Any}, x) for x = split_] + begin + end + end + begin + (concat_, split_) = tf.tf_promote(concat_, split_) + end + end + begin + begin + tf.add_input(desc, concat_) + end + begin + tf.add_input(desc, split_) + end + end + begin + begin + if sa_name !== nothing + desc["sa_name"] = Base.String(sa_name) + end + end + begin + if id !== nothing + desc["id"] = Base.Int(id) + end + end + begin + if N !== nothing + desc["N"] = Base.Int(N) + end + end + begin + if shapes !== nothing + desc["shapes"] = map(Base.identity, shapes) + end + end + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:N + push!(out, tf.Tensor(op, out_idx)) + end + out + end + end + end + begin + function _scoped_allocator_split_eager(concat_, split_; name=nothing, sa_name=nothing, id=nothing, N=nothing, shapes=nothing) + desc = tf.EagerOp("_ScopedAllocatorSplit") + concat_ = convert(tf.EagerTensor, concat_) + split_ = convert(tf.EagerTensor, split_) + begin + begin + tf.add_input(desc, concat_) + end + begin + tf.add_input(desc, split_) + end + end + begin + begin + if sa_name !== nothing + desc["sa_name"] = Base.String(sa_name) + end + end + begin + if id !== nothing + desc["id"] = Base.Int(id) + end + end + begin + if N !== nothing + desc["N"] = Base.Int(N) + end + end + begin + if shapes !== nothing + desc["shapes"] = map(Base.identity, shapes) + end + end + end + begin + desc["T"] = tf.data_type(concat_) + end + begin + desc["T"] = tf.data_type(split_) + end + res = tf.execute(desc) + node = tf.TapeNode(_scoped_allocator_split, [concat_, split_], name=nothing, sa_name=nothing, id=nothing, N=nothing, shapes=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _scoped_allocator_split(concat_, split_; name=nothing, sa_name=nothing, id=nothing, N=nothing, shapes=nothing) + if tf.in_eager_mode() + _scoped_allocator_split_eager(concat_, split_; name=name, sa_name=sa_name, id=id, N=N, shapes=shapes) + else + _scoped_allocator_split_graph(concat_, split_; name=name, sa_name=sa_name, id=id, N=N, shapes=shapes) + end end - if shapes !== nothing - desc["shapes"] = map(Base.identity, shapes) - end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:N - push!(out, tf.Tensor(op, out_idx)) - end - out - end - function _scoped_allocator_split_eager(concat_, split_; name=nothing, sa_name=nothing, id=nothing, N=nothing, shapes=nothing) - desc = tf.EagerOp("_ScopedAllocatorSplit") - concat_ = convert(tf.EagerTensor, concat_) - split_ = convert(tf.EagerTensor, split_) - tf.add_input(desc, concat_) - tf.add_input(desc, split_) - if sa_name !== nothing - desc["sa_name"] = Base.String(sa_name) - end - if id !== nothing - desc["id"] = Base.Int(id) - end - if N !== nothing - desc["N"] = Base.Int(N) - end - if shapes !== nothing - desc["shapes"] = map(Base.identity, shapes) - end - desc["T"] = tf.data_type(concat_) - desc["T"] = tf.data_type(split_) - res = tf.execute(desc) - node = tf.TapeNode(_scoped_allocator_split, [concat_, split_], name=nothing, sa_name=nothing, id=nothing, N=nothing, shapes=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _scoped_allocator_split(concat_, split_; name=nothing, sa_name=nothing, id=nothing, N=nothing, shapes=nothing) - if tf.in_eager_mode() - _scoped_allocator_split_eager(concat_, split_; name=name, sa_name=sa_name, id=id, N=N, shapes=shapes) - else - _scoped_allocator_split_graph(concat_, split_; name=name, sa_name=sa_name, id=id, N=N, shapes=shapes) - end - end end @@ -47080,40 +85877,78 @@ end """ begin - function igammac_graph(a_, x_; name=nothing) - local desc - tf.with_op_name(name, "Igammac") do - desc = tf.NodeDescription("Igammac") - a_ = convert(Tensor{Any}, a_) - x_ = convert(Tensor{Any}, x_) - (a_, x_) = tf.tf_promote(a_, x_) - tf.add_input(desc, a_) - tf.add_input(desc, x_) + begin + function igammac_graph(a_, x_; name=nothing) + local desc + tf.with_op_name(name, "Igammac") do + desc = tf.NodeDescription("Igammac") + begin + begin + a_ = convert(Tensor{Any}, a_) + begin + end + end + begin + x_ = convert(Tensor{Any}, x_) + begin + end + end + begin + (a_, x_) = tf.tf_promote(a_, x_) + end + end + begin + begin + tf.add_input(desc, a_) + end + begin + tf.add_input(desc, x_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function igammac_eager(a_, x_; name=nothing) - desc = tf.EagerOp("Igammac") - a_ = convert(tf.EagerTensor, a_) - x_ = convert(tf.EagerTensor, x_) - tf.add_input(desc, a_) - tf.add_input(desc, x_) - desc["T"] = tf.data_type(a_) - desc["T"] = tf.data_type(x_) - res = tf.execute(desc) - node = tf.TapeNode(igammac, [a_, x_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function igammac_eager(a_, x_; name=nothing) + desc = tf.EagerOp("Igammac") + a_ = convert(tf.EagerTensor, a_) + x_ = convert(tf.EagerTensor, x_) + begin + begin + tf.add_input(desc, a_) + end + begin + tf.add_input(desc, x_) + end + end + begin + end + begin + desc["T"] = tf.data_type(a_) + end + begin + desc["T"] = tf.data_type(x_) + end + res = tf.execute(desc) + node = tf.TapeNode(igammac, [a_, x_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function igammac(a_, x_; name=nothing) - if tf.in_eager_mode() - igammac_eager(a_, x_; name=name) - else - igammac_graph(a_, x_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function igammac(a_, x_; name=nothing) + if tf.in_eager_mode() + igammac_eager(a_, x_; name=name) + else + igammac_graph(a_, x_; name=name) + end end - end + end end @@ -47123,52 +85958,98 @@ end """ begin - function batch_mat_mul_graph(x_, y_; name=nothing, adj_x=nothing, adj_y=nothing) - local desc - tf.with_op_name(name, "BatchMatMul") do - desc = tf.NodeDescription("BatchMatMul") - x_ = convert(Tensor{Any}, x_) - y_ = convert(Tensor{Any}, y_) - (x_, y_) = tf.tf_promote(x_, y_) - tf.add_input(desc, x_) - tf.add_input(desc, y_) - if adj_x !== nothing - desc["adj_x"] = Base.Bool(adj_x) - end - if adj_y !== nothing - desc["adj_y"] = Base.Bool(adj_y) + begin + function batch_mat_mul_graph(x_, y_; name=nothing, adj_x=nothing, adj_y=nothing) + local desc + tf.with_op_name(name, "BatchMatMul") do + desc = tf.NodeDescription("BatchMatMul") + begin + begin + x_ = convert(Tensor{Any}, x_) + begin + end + end + begin + y_ = convert(Tensor{Any}, y_) + begin + end + end + begin + (x_, y_) = tf.tf_promote(x_, y_) + end + end + begin + begin + tf.add_input(desc, x_) + end + begin + tf.add_input(desc, y_) + end + end + begin + begin + if adj_x !== nothing + desc["adj_x"] = Base.Bool(adj_x) + end + end + begin + if adj_y !== nothing + desc["adj_y"] = Base.Bool(adj_y) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function batch_mat_mul_eager(x_, y_; name=nothing, adj_x=nothing, adj_y=nothing) + desc = tf.EagerOp("BatchMatMul") + x_ = convert(tf.EagerTensor, x_) + y_ = convert(tf.EagerTensor, y_) + begin + begin + tf.add_input(desc, x_) + end + begin + tf.add_input(desc, y_) + end + end + begin + begin + if adj_x !== nothing + desc["adj_x"] = Base.Bool(adj_x) + end + end + begin + if adj_y !== nothing + desc["adj_y"] = Base.Bool(adj_y) + end + end + end + begin + desc["T"] = tf.data_type(x_) + end + begin + desc["T"] = tf.data_type(y_) + end + res = tf.execute(desc) + node = tf.TapeNode(batch_mat_mul, [x_, y_], name=nothing, adj_x=nothing, adj_y=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function batch_mat_mul(x_, y_; name=nothing, adj_x=nothing, adj_y=nothing) + if tf.in_eager_mode() + batch_mat_mul_eager(x_, y_; name=name, adj_x=adj_x, adj_y=adj_y) + else + batch_mat_mul_graph(x_, y_; name=name, adj_x=adj_x, adj_y=adj_y) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function batch_mat_mul_eager(x_, y_; name=nothing, adj_x=nothing, adj_y=nothing) - desc = tf.EagerOp("BatchMatMul") - x_ = convert(tf.EagerTensor, x_) - y_ = convert(tf.EagerTensor, y_) - tf.add_input(desc, x_) - tf.add_input(desc, y_) - if adj_x !== nothing - desc["adj_x"] = Base.Bool(adj_x) - end - if adj_y !== nothing - desc["adj_y"] = Base.Bool(adj_y) - end - desc["T"] = tf.data_type(x_) - desc["T"] = tf.data_type(y_) - res = tf.execute(desc) - node = tf.TapeNode(batch_mat_mul, [x_, y_], name=nothing, adj_x=nothing, adj_y=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function batch_mat_mul(x_, y_; name=nothing, adj_x=nothing, adj_y=nothing) - if tf.in_eager_mode() - batch_mat_mul_eager(x_, y_; name=name, adj_x=adj_x, adj_y=adj_y) - else - batch_mat_mul_graph(x_, y_; name=name, adj_x=adj_x, adj_y=adj_y) - end - end end @@ -47178,49 +86059,89 @@ end """ begin - function tensor_array_pack_graph(handle_, flow_in_; name=nothing, dtype=nothing, element_shape=nothing) - local desc - tf.with_op_name(name, "TensorArrayPack") do - desc = tf.NodeDescription("TensorArrayPack") - handle_ = convert(Tensor{String}, handle_) - flow_in_ = convert(Tensor{Float32}, flow_in_) - tf.add_input(desc, handle_) - tf.add_input(desc, flow_in_) - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end - if element_shape !== nothing - desc["element_shape"] = Base.identity(element_shape) + begin + function tensor_array_pack_graph(handle_, flow_in_; name=nothing, dtype=nothing, element_shape=nothing) + local desc + tf.with_op_name(name, "TensorArrayPack") do + desc = tf.NodeDescription("TensorArrayPack") + begin + begin + handle_ = convert(Tensor{String}, handle_) + begin + end + end + begin + flow_in_ = convert(Tensor{Float32}, flow_in_) + begin + end + end + end + begin + begin + tf.add_input(desc, handle_) + end + begin + tf.add_input(desc, flow_in_) + end + end + begin + begin + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + begin + if element_shape !== nothing + desc["element_shape"] = Base.identity(element_shape) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function tensor_array_pack_eager(handle_, flow_in_; name=nothing, dtype=nothing, element_shape=nothing) + desc = tf.EagerOp("TensorArrayPack") + handle_ = convert(tf.EagerTensor, handle_) + flow_in_ = convert(tf.EagerTensor, flow_in_) + begin + begin + tf.add_input(desc, handle_) + end + begin + tf.add_input(desc, flow_in_) + end + end + begin + begin + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + begin + if element_shape !== nothing + desc["element_shape"] = Base.identity(element_shape) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(tensor_array_pack, [handle_, flow_in_], name=nothing, dtype=nothing, element_shape=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_array_pack(handle_, flow_in_; name=nothing, dtype=nothing, element_shape=nothing) + if tf.in_eager_mode() + tensor_array_pack_eager(handle_, flow_in_; name=name, dtype=dtype, element_shape=element_shape) + else + tensor_array_pack_graph(handle_, flow_in_; name=name, dtype=dtype, element_shape=element_shape) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function tensor_array_pack_eager(handle_, flow_in_; name=nothing, dtype=nothing, element_shape=nothing) - desc = tf.EagerOp("TensorArrayPack") - handle_ = convert(tf.EagerTensor, handle_) - flow_in_ = convert(tf.EagerTensor, flow_in_) - tf.add_input(desc, handle_) - tf.add_input(desc, flow_in_) - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end - if element_shape !== nothing - desc["element_shape"] = Base.identity(element_shape) - end - res = tf.execute(desc) - node = tf.TapeNode(tensor_array_pack, [handle_, flow_in_], name=nothing, dtype=nothing, element_shape=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_array_pack(handle_, flow_in_; name=nothing, dtype=nothing, element_shape=nothing) - if tf.in_eager_mode() - tensor_array_pack_eager(handle_, flow_in_; name=name, dtype=dtype, element_shape=element_shape) - else - tensor_array_pack_graph(handle_, flow_in_; name=name, dtype=dtype, element_shape=element_shape) - end - end end @@ -47230,39 +86151,67 @@ end """ begin - function queue_close_v2_graph(handle_; name=nothing, cancel_pending_enqueues=nothing) - local desc - tf.with_op_name(name, "QueueCloseV2") do - desc = tf.NodeDescription("QueueCloseV2") - handle_ = convert(Tensor{Any}, handle_) - tf.add_input(desc, handle_) - if cancel_pending_enqueues !== nothing - desc["cancel_pending_enqueues"] = Base.Bool(cancel_pending_enqueues) + begin + function queue_close_v2_graph(handle_; name=nothing, cancel_pending_enqueues=nothing) + local desc + tf.with_op_name(name, "QueueCloseV2") do + desc = tf.NodeDescription("QueueCloseV2") + begin + begin + handle_ = convert(Tensor{Any}, handle_) + begin + end + end + end + begin + begin + tf.add_input(desc, handle_) + end + end + begin + begin + if cancel_pending_enqueues !== nothing + desc["cancel_pending_enqueues"] = Base.Bool(cancel_pending_enqueues) + end + end + end end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function queue_close_v2_eager(handle_; name=nothing, cancel_pending_enqueues=nothing) - desc = tf.EagerOp("QueueCloseV2") - handle_ = convert(tf.EagerTensor, handle_) - tf.add_input(desc, handle_) - if cancel_pending_enqueues !== nothing - desc["cancel_pending_enqueues"] = Base.Bool(cancel_pending_enqueues) - end - res = tf.execute(desc) - node = tf.TapeNode(queue_close_v2, [handle_], name=nothing, cancel_pending_enqueues=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function queue_close_v2_eager(handle_; name=nothing, cancel_pending_enqueues=nothing) + desc = tf.EagerOp("QueueCloseV2") + handle_ = convert(tf.EagerTensor, handle_) + begin + begin + tf.add_input(desc, handle_) + end + end + begin + begin + if cancel_pending_enqueues !== nothing + desc["cancel_pending_enqueues"] = Base.Bool(cancel_pending_enqueues) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(queue_close_v2, [handle_], name=nothing, cancel_pending_enqueues=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function queue_close_v2(handle_; name=nothing, cancel_pending_enqueues=nothing) - if tf.in_eager_mode() - queue_close_v2_eager(handle_; name=name, cancel_pending_enqueues=cancel_pending_enqueues) - else - queue_close_v2_graph(handle_; name=name, cancel_pending_enqueues=cancel_pending_enqueues) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function queue_close_v2(handle_; name=nothing, cancel_pending_enqueues=nothing) + if tf.in_eager_mode() + queue_close_v2_eager(handle_; name=name, cancel_pending_enqueues=cancel_pending_enqueues) + else + queue_close_v2_graph(handle_; name=name, cancel_pending_enqueues=cancel_pending_enqueues) + end end - end + end end @@ -47272,63 +86221,123 @@ end An op that enqueues TPUEmbedding input indices from a SparseTensor. """ begin - function enqueue_tpu_embedding_sparse_batch_graph(sample_indices_, embedding_indices_, aggregation_weights_, mode_override_; name=nothing, N=nothing, device_ordinal=nothing, combiners=nothing) - local desc - tf.with_op_name(name, "EnqueueTPUEmbeddingSparseBatch") do - desc = tf.NodeDescription("EnqueueTPUEmbeddingSparseBatch") - sample_indices_ = [convert(Tensor{Int32}, x) for x = sample_indices_] - embedding_indices_ = [convert(Tensor{Int32}, x) for x = embedding_indices_] - aggregation_weights_ = [convert(Tensor{Float32}, x) for x = aggregation_weights_] - mode_override_ = convert(Tensor{String}, mode_override_) - tf.add_input(desc, sample_indices_) - tf.add_input(desc, embedding_indices_) - tf.add_input(desc, aggregation_weights_) - tf.add_input(desc, mode_override_) - if N !== nothing - desc["N"] = Base.Int(N) - end - if device_ordinal !== nothing - desc["device_ordinal"] = Base.Int(device_ordinal) + begin + function enqueue_tpu_embedding_sparse_batch_graph(sample_indices_, embedding_indices_, aggregation_weights_, mode_override_; name=nothing, N=nothing, device_ordinal=nothing, combiners=nothing) + local desc + tf.with_op_name(name, "EnqueueTPUEmbeddingSparseBatch") do + desc = tf.NodeDescription("EnqueueTPUEmbeddingSparseBatch") + begin + begin + sample_indices_ = [convert(Tensor{Int32}, x) for x = sample_indices_] + begin + end + end + begin + embedding_indices_ = [convert(Tensor{Int32}, x) for x = embedding_indices_] + begin + end + end + begin + aggregation_weights_ = [convert(Tensor{Float32}, x) for x = aggregation_weights_] + begin + end + end + begin + mode_override_ = convert(Tensor{String}, mode_override_) + begin + end + end + end + begin + begin + tf.add_input(desc, sample_indices_) + end + begin + tf.add_input(desc, embedding_indices_) + end + begin + tf.add_input(desc, aggregation_weights_) + end + begin + tf.add_input(desc, mode_override_) + end + end + begin + begin + if N !== nothing + desc["N"] = Base.Int(N) + end + end + begin + if device_ordinal !== nothing + desc["device_ordinal"] = Base.Int(device_ordinal) + end + end + begin + if combiners !== nothing + desc["combiners"] = map(Base.identity, combiners) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function enqueue_tpu_embedding_sparse_batch_eager(sample_indices_, embedding_indices_, aggregation_weights_, mode_override_; name=nothing, N=nothing, device_ordinal=nothing, combiners=nothing) + desc = tf.EagerOp("EnqueueTPUEmbeddingSparseBatch") + sample_indices_ = convert(tf.EagerTensor, sample_indices_) + embedding_indices_ = convert(tf.EagerTensor, embedding_indices_) + aggregation_weights_ = convert(tf.EagerTensor, aggregation_weights_) + mode_override_ = convert(tf.EagerTensor, mode_override_) + begin + begin + tf.add_input(desc, sample_indices_) + end + begin + tf.add_input(desc, embedding_indices_) + end + begin + tf.add_input(desc, aggregation_weights_) + end + begin + tf.add_input(desc, mode_override_) + end + end + begin + begin + if N !== nothing + desc["N"] = Base.Int(N) + end + end + begin + if device_ordinal !== nothing + desc["device_ordinal"] = Base.Int(device_ordinal) + end + end + begin + if combiners !== nothing + desc["combiners"] = map(Base.identity, combiners) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(enqueue_tpu_embedding_sparse_batch, [sample_indices_, embedding_indices_, aggregation_weights_, mode_override_], name=nothing, N=nothing, device_ordinal=nothing, combiners=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function enqueue_tpu_embedding_sparse_batch(sample_indices_, embedding_indices_, aggregation_weights_, mode_override_; name=nothing, N=nothing, device_ordinal=nothing, combiners=nothing) + if tf.in_eager_mode() + enqueue_tpu_embedding_sparse_batch_eager(sample_indices_, embedding_indices_, aggregation_weights_, mode_override_; name=name, N=N, device_ordinal=device_ordinal, combiners=combiners) + else + enqueue_tpu_embedding_sparse_batch_graph(sample_indices_, embedding_indices_, aggregation_weights_, mode_override_; name=name, N=N, device_ordinal=device_ordinal, combiners=combiners) + end end - if combiners !== nothing - desc["combiners"] = map(Base.identity, combiners) - end - end - tf.Tensor(tf.Operation(desc)) end - function enqueue_tpu_embedding_sparse_batch_eager(sample_indices_, embedding_indices_, aggregation_weights_, mode_override_; name=nothing, N=nothing, device_ordinal=nothing, combiners=nothing) - desc = tf.EagerOp("EnqueueTPUEmbeddingSparseBatch") - sample_indices_ = convert(tf.EagerTensor, sample_indices_) - embedding_indices_ = convert(tf.EagerTensor, embedding_indices_) - aggregation_weights_ = convert(tf.EagerTensor, aggregation_weights_) - mode_override_ = convert(tf.EagerTensor, mode_override_) - tf.add_input(desc, sample_indices_) - tf.add_input(desc, embedding_indices_) - tf.add_input(desc, aggregation_weights_) - tf.add_input(desc, mode_override_) - if N !== nothing - desc["N"] = Base.Int(N) - end - if device_ordinal !== nothing - desc["device_ordinal"] = Base.Int(device_ordinal) - end - if combiners !== nothing - desc["combiners"] = map(Base.identity, combiners) - end - res = tf.execute(desc) - node = tf.TapeNode(enqueue_tpu_embedding_sparse_batch, [sample_indices_, embedding_indices_, aggregation_weights_, mode_override_], name=nothing, N=nothing, device_ordinal=nothing, combiners=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function enqueue_tpu_embedding_sparse_batch(sample_indices_, embedding_indices_, aggregation_weights_, mode_override_; name=nothing, N=nothing, device_ordinal=nothing, combiners=nothing) - if tf.in_eager_mode() - enqueue_tpu_embedding_sparse_batch_eager(sample_indices_, embedding_indices_, aggregation_weights_, mode_override_; name=name, N=N, device_ordinal=device_ordinal, combiners=combiners) - else - enqueue_tpu_embedding_sparse_batch_graph(sample_indices_, embedding_indices_, aggregation_weights_, mode_override_; name=name, N=N, device_ordinal=device_ordinal, combiners=combiners) - end - end end @@ -47338,37 +86347,69 @@ end """ begin - function reader_restore_state_graph(reader_handle_, state_; name=nothing) - local desc - tf.with_op_name(name, "ReaderRestoreState") do - desc = tf.NodeDescription("ReaderRestoreState") - reader_handle_ = convert(Tensor{String}, reader_handle_) - state_ = convert(Tensor{String}, state_) - tf.add_input(desc, reader_handle_) - tf.add_input(desc, state_) + begin + function reader_restore_state_graph(reader_handle_, state_; name=nothing) + local desc + tf.with_op_name(name, "ReaderRestoreState") do + desc = tf.NodeDescription("ReaderRestoreState") + begin + begin + reader_handle_ = convert(Tensor{String}, reader_handle_) + begin + end + end + begin + state_ = convert(Tensor{String}, state_) + begin + end + end + end + begin + begin + tf.add_input(desc, reader_handle_) + end + begin + tf.add_input(desc, state_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function reader_restore_state_eager(reader_handle_, state_; name=nothing) - desc = tf.EagerOp("ReaderRestoreState") - reader_handle_ = convert(tf.EagerTensor, reader_handle_) - state_ = convert(tf.EagerTensor, state_) - tf.add_input(desc, reader_handle_) - tf.add_input(desc, state_) - res = tf.execute(desc) - node = tf.TapeNode(reader_restore_state, [reader_handle_, state_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function reader_restore_state_eager(reader_handle_, state_; name=nothing) + desc = tf.EagerOp("ReaderRestoreState") + reader_handle_ = convert(tf.EagerTensor, reader_handle_) + state_ = convert(tf.EagerTensor, state_) + begin + begin + tf.add_input(desc, reader_handle_) + end + begin + tf.add_input(desc, state_) + end + end + begin + end + res = tf.execute(desc) + node = tf.TapeNode(reader_restore_state, [reader_handle_, state_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function reader_restore_state(reader_handle_, state_; name=nothing) - if tf.in_eager_mode() - reader_restore_state_eager(reader_handle_, state_; name=name) - else - reader_restore_state_graph(reader_handle_, state_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function reader_restore_state(reader_handle_, state_; name=nothing) + if tf.in_eager_mode() + reader_restore_state_eager(reader_handle_, state_; name=name) + else + reader_restore_state_graph(reader_handle_, state_; name=name) + end end - end + end end @@ -47378,87 +86419,163 @@ end *NOTE*: Do not invoke this operator directly in Python. Grappler is """ begin - function _fused_conv2d_graph(input_, filter_, args_; name=nothing, num_args=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing, fused_ops=nothing, epsilon=nothing) - local desc - tf.with_op_name(name, "_FusedConv2D") do - desc = tf.NodeDescription("_FusedConv2D") - input_ = convert(Tensor{Any}, input_) - filter_ = convert(Tensor{Any}, filter_) - args_ = [convert(Tensor{Any}, x) for x = args_] - (input_, filter_, args_) = tf.tf_promote(input_, filter_, args_) - tf.add_input(desc, input_) - tf.add_input(desc, filter_) - tf.add_input(desc, args_) - if num_args !== nothing - desc["num_args"] = Base.Int(num_args) - end - if strides !== nothing - desc["strides"] = map(Base.identity, strides) - end - if padding !== nothing - desc["padding"] = Base.String(padding) - end - if data_format !== nothing - desc["data_format"] = Base.String(data_format) + begin + function _fused_conv2d_graph(input_, filter_, args_; name=nothing, num_args=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing, fused_ops=nothing, epsilon=nothing) + local desc + tf.with_op_name(name, "_FusedConv2D") do + desc = tf.NodeDescription("_FusedConv2D") + begin + begin + input_ = convert(Tensor{Any}, input_) + begin + end + end + begin + filter_ = convert(Tensor{Any}, filter_) + begin + end + end + begin + args_ = [convert(Tensor{Any}, x) for x = args_] + begin + end + end + begin + (input_, filter_, args_) = tf.tf_promote(input_, filter_, args_) + end + end + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, filter_) + end + begin + tf.add_input(desc, args_) + end + end + begin + begin + if num_args !== nothing + desc["num_args"] = Base.Int(num_args) + end + end + begin + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + end + begin + if padding !== nothing + desc["padding"] = Base.String(padding) + end + end + begin + if data_format !== nothing + desc["data_format"] = Base.String(data_format) + end + end + begin + if dilations !== nothing + desc["dilations"] = map(Base.identity, dilations) + end + end + begin + if fused_ops !== nothing + desc["fused_ops"] = map(Base.identity, fused_ops) + end + end + begin + if epsilon !== nothing + desc["epsilon"] = Base.identity(epsilon) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function _fused_conv2d_eager(input_, filter_, args_; name=nothing, num_args=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing, fused_ops=nothing, epsilon=nothing) + desc = tf.EagerOp("_FusedConv2D") + input_ = convert(tf.EagerTensor, input_) + filter_ = convert(tf.EagerTensor, filter_) + args_ = convert(tf.EagerTensor, args_) + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, filter_) + end + begin + tf.add_input(desc, args_) + end + end + begin + begin + if num_args !== nothing + desc["num_args"] = Base.Int(num_args) + end + end + begin + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + end + begin + if padding !== nothing + desc["padding"] = Base.String(padding) + end + end + begin + if data_format !== nothing + desc["data_format"] = Base.String(data_format) + end + end + begin + if dilations !== nothing + desc["dilations"] = map(Base.identity, dilations) + end + end + begin + if fused_ops !== nothing + desc["fused_ops"] = map(Base.identity, fused_ops) + end + end + begin + if epsilon !== nothing + desc["epsilon"] = Base.identity(epsilon) + end + end + end + begin + desc["T"] = tf.data_type(input_) + end + begin + desc["T"] = tf.data_type(filter_) + end + begin + desc["T"] = tf.data_type(args_) + end + res = tf.execute(desc) + node = tf.TapeNode(_fused_conv2d, [input_, filter_, args_], name=nothing, num_args=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing, fused_ops=nothing, epsilon=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _fused_conv2d(input_, filter_, args_; name=nothing, num_args=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing, fused_ops=nothing, epsilon=nothing) + if tf.in_eager_mode() + _fused_conv2d_eager(input_, filter_, args_; name=name, num_args=num_args, strides=strides, padding=padding, data_format=data_format, dilations=dilations, fused_ops=fused_ops, epsilon=epsilon) + else + _fused_conv2d_graph(input_, filter_, args_; name=name, num_args=num_args, strides=strides, padding=padding, data_format=data_format, dilations=dilations, fused_ops=fused_ops, epsilon=epsilon) + end end - if dilations !== nothing - desc["dilations"] = map(Base.identity, dilations) - end - if fused_ops !== nothing - desc["fused_ops"] = map(Base.identity, fused_ops) - end - if epsilon !== nothing - desc["epsilon"] = Base.identity(epsilon) - end - end - tf.Tensor(tf.Operation(desc)) - end - function _fused_conv2d_eager(input_, filter_, args_; name=nothing, num_args=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing, fused_ops=nothing, epsilon=nothing) - desc = tf.EagerOp("_FusedConv2D") - input_ = convert(tf.EagerTensor, input_) - filter_ = convert(tf.EagerTensor, filter_) - args_ = convert(tf.EagerTensor, args_) - tf.add_input(desc, input_) - tf.add_input(desc, filter_) - tf.add_input(desc, args_) - if num_args !== nothing - desc["num_args"] = Base.Int(num_args) - end - if strides !== nothing - desc["strides"] = map(Base.identity, strides) - end - if padding !== nothing - desc["padding"] = Base.String(padding) - end - if data_format !== nothing - desc["data_format"] = Base.String(data_format) - end - if dilations !== nothing - desc["dilations"] = map(Base.identity, dilations) - end - if fused_ops !== nothing - desc["fused_ops"] = map(Base.identity, fused_ops) - end - if epsilon !== nothing - desc["epsilon"] = Base.identity(epsilon) - end - desc["T"] = tf.data_type(input_) - desc["T"] = tf.data_type(filter_) - desc["T"] = tf.data_type(args_) - res = tf.execute(desc) - node = tf.TapeNode(_fused_conv2d, [input_, filter_, args_], name=nothing, num_args=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing, fused_ops=nothing, epsilon=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _fused_conv2d(input_, filter_, args_; name=nothing, num_args=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing, fused_ops=nothing, epsilon=nothing) - if tf.in_eager_mode() - _fused_conv2d_eager(input_, filter_, args_; name=name, num_args=num_args, strides=strides, padding=padding, data_format=data_format, dilations=dilations, fused_ops=fused_ops, epsilon=epsilon) - else - _fused_conv2d_graph(input_, filter_, args_; name=name, num_args=num_args, strides=strides, padding=padding, data_format=data_format, dilations=dilations, fused_ops=fused_ops, epsilon=epsilon) - end - end end @@ -47468,45 +86585,77 @@ end """ begin - function _read_variables_op_graph(resources_; name=nothing, N=nothing, dtypes=nothing) - local desc - tf.with_op_name(name, "_ReadVariablesOp") do - desc = tf.NodeDescription("_ReadVariablesOp") - resources_ = [convert(Tensor{Any}, x) for x = resources_] - tf.add_input(desc, resources_) - if N !== nothing - desc["N"] = Base.Int(N) - end - if dtypes !== nothing - desc["dtypes"] = map(Base.identity, dtypes) + begin + function _read_variables_op_graph(resources_; name=nothing, N=nothing, dtypes=nothing) + local desc + tf.with_op_name(name, "_ReadVariablesOp") do + desc = tf.NodeDescription("_ReadVariablesOp") + begin + begin + resources_ = [convert(Tensor{Any}, x) for x = resources_] + begin + end + end + end + begin + begin + tf.add_input(desc, resources_) + end + end + begin + begin + if N !== nothing + desc["N"] = Base.Int(N) + end + end + begin + if dtypes !== nothing + desc["dtypes"] = map(Base.identity, dtypes) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function _read_variables_op_eager(resources_; name=nothing, N=nothing, dtypes=nothing) + desc = tf.EagerOp("_ReadVariablesOp") + resources_ = convert(tf.EagerTensor, resources_) + begin + begin + tf.add_input(desc, resources_) + end + end + begin + begin + if N !== nothing + desc["N"] = Base.Int(N) + end + end + begin + if dtypes !== nothing + desc["dtypes"] = map(Base.identity, dtypes) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(_read_variables_op, [resources_], name=nothing, N=nothing, dtypes=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _read_variables_op(resources_; name=nothing, N=nothing, dtypes=nothing) + if tf.in_eager_mode() + _read_variables_op_eager(resources_; name=name, N=N, dtypes=dtypes) + else + _read_variables_op_graph(resources_; name=name, N=N, dtypes=dtypes) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function _read_variables_op_eager(resources_; name=nothing, N=nothing, dtypes=nothing) - desc = tf.EagerOp("_ReadVariablesOp") - resources_ = convert(tf.EagerTensor, resources_) - tf.add_input(desc, resources_) - if N !== nothing - desc["N"] = Base.Int(N) - end - if dtypes !== nothing - desc["dtypes"] = map(Base.identity, dtypes) - end - res = tf.execute(desc) - node = tf.TapeNode(_read_variables_op, [resources_], name=nothing, N=nothing, dtypes=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _read_variables_op(resources_; name=nothing, N=nothing, dtypes=nothing) - if tf.in_eager_mode() - _read_variables_op_eager(resources_; name=name, N=N, dtypes=dtypes) - else - _read_variables_op_graph(resources_; name=name, N=N, dtypes=dtypes) - end - end end @@ -47516,65 +86665,105 @@ end """ begin - function mutable_hash_table_of_tensors_graph(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing) - local desc - tf.with_op_name(name, "MutableHashTableOfTensors") do - desc = tf.NodeDescription("MutableHashTableOfTensors") - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) + begin + function mutable_hash_table_of_tensors_graph(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing) + local desc + tf.with_op_name(name, "MutableHashTableOfTensors") do + desc = tf.NodeDescription("MutableHashTableOfTensors") + begin + end + begin + end + begin + begin + if container !== nothing + desc["container"] = Base.String(container) + end + end + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + begin + if use_node_name_sharing !== nothing + desc["use_node_name_sharing"] = Base.Bool(use_node_name_sharing) + end + end + begin + if key_dtype !== nothing + desc["key_dtype"] = Base.identity(key_dtype) + end + end + begin + if value_dtype !== nothing + desc["value_dtype"] = Base.identity(value_dtype) + end + end + begin + if value_shape !== nothing + desc["value_shape"] = Base.identity(value_shape) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function mutable_hash_table_of_tensors_eager(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing) + desc = tf.EagerOp("MutableHashTableOfTensors") + begin + end + begin + begin + if container !== nothing + desc["container"] = Base.String(container) + end + end + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + begin + if use_node_name_sharing !== nothing + desc["use_node_name_sharing"] = Base.Bool(use_node_name_sharing) + end + end + begin + if key_dtype !== nothing + desc["key_dtype"] = Base.identity(key_dtype) + end + end + begin + if value_dtype !== nothing + desc["value_dtype"] = Base.identity(value_dtype) + end + end + begin + if value_shape !== nothing + desc["value_shape"] = Base.identity(value_shape) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(mutable_hash_table_of_tensors, [], name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function mutable_hash_table_of_tensors(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing) + if tf.in_eager_mode() + mutable_hash_table_of_tensors_eager(; name=name, container=container, shared_name=shared_name, use_node_name_sharing=use_node_name_sharing, key_dtype=key_dtype, value_dtype=value_dtype, value_shape=value_shape) + else + mutable_hash_table_of_tensors_graph(; name=name, container=container, shared_name=shared_name, use_node_name_sharing=use_node_name_sharing, key_dtype=key_dtype, value_dtype=value_dtype, value_shape=value_shape) + end end - if use_node_name_sharing !== nothing - desc["use_node_name_sharing"] = Base.Bool(use_node_name_sharing) - end - if key_dtype !== nothing - desc["key_dtype"] = Base.identity(key_dtype) - end - if value_dtype !== nothing - desc["value_dtype"] = Base.identity(value_dtype) - end - if value_shape !== nothing - desc["value_shape"] = Base.identity(value_shape) - end - end - tf.Tensor(tf.Operation(desc)) - end - function mutable_hash_table_of_tensors_eager(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing) - desc = tf.EagerOp("MutableHashTableOfTensors") - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - if use_node_name_sharing !== nothing - desc["use_node_name_sharing"] = Base.Bool(use_node_name_sharing) - end - if key_dtype !== nothing - desc["key_dtype"] = Base.identity(key_dtype) - end - if value_dtype !== nothing - desc["value_dtype"] = Base.identity(value_dtype) - end - if value_shape !== nothing - desc["value_shape"] = Base.identity(value_shape) - end - res = tf.execute(desc) - node = tf.TapeNode(mutable_hash_table_of_tensors, [], name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function mutable_hash_table_of_tensors(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing) - if tf.in_eager_mode() - mutable_hash_table_of_tensors_eager(; name=name, container=container, shared_name=shared_name, use_node_name_sharing=use_node_name_sharing, key_dtype=key_dtype, value_dtype=value_dtype, value_shape=value_shape) - else - mutable_hash_table_of_tensors_graph(; name=name, container=container, shared_name=shared_name, use_node_name_sharing=use_node_name_sharing, key_dtype=key_dtype, value_dtype=value_dtype, value_shape=value_shape) - end - end end @@ -47584,33 +86773,57 @@ end """ begin - function read_file_graph(filename_; name=nothing) - local desc - tf.with_op_name(name, "ReadFile") do - desc = tf.NodeDescription("ReadFile") - filename_ = convert(Tensor{String}, filename_) - tf.add_input(desc, filename_) + begin + function read_file_graph(filename_; name=nothing) + local desc + tf.with_op_name(name, "ReadFile") do + desc = tf.NodeDescription("ReadFile") + begin + begin + filename_ = convert(Tensor{String}, filename_) + begin + end + end + end + begin + begin + tf.add_input(desc, filename_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function read_file_eager(filename_; name=nothing) - desc = tf.EagerOp("ReadFile") - filename_ = convert(tf.EagerTensor, filename_) - tf.add_input(desc, filename_) - res = tf.execute(desc) - node = tf.TapeNode(read_file, [filename_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function read_file_eager(filename_; name=nothing) + desc = tf.EagerOp("ReadFile") + filename_ = convert(tf.EagerTensor, filename_) + begin + begin + tf.add_input(desc, filename_) + end + end + begin + end + res = tf.execute(desc) + node = tf.TapeNode(read_file, [filename_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function read_file(filename_; name=nothing) - if tf.in_eager_mode() - read_file_eager(filename_; name=name) - else - read_file_graph(filename_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function read_file(filename_; name=nothing) + if tf.in_eager_mode() + read_file_eager(filename_; name=name) + else + read_file_graph(filename_; name=name) + end end - end + end end @@ -47620,69 +86833,133 @@ end Load embedding parameters for a single table. """ begin - function load_tpu_embedding_mdl_adagrad_light_parameters_graph(parameters_, accumulators_, weights_, benefits_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - local desc - tf.with_op_name(name, "LoadTPUEmbeddingMDLAdagradLightParameters") do - desc = tf.NodeDescription("LoadTPUEmbeddingMDLAdagradLightParameters") - parameters_ = convert(Tensor{Float32}, parameters_) - accumulators_ = convert(Tensor{Float32}, accumulators_) - weights_ = convert(Tensor{Float32}, weights_) - benefits_ = convert(Tensor{Float32}, benefits_) - tf.add_input(desc, parameters_) - tf.add_input(desc, accumulators_) - tf.add_input(desc, weights_) - tf.add_input(desc, benefits_) - if table_id !== nothing - desc["table_id"] = Base.Int(table_id) - end - if table_name !== nothing - desc["table_name"] = Base.String(table_name) - end - if num_shards !== nothing - desc["num_shards"] = Base.Int(num_shards) - end - if shard_id !== nothing - desc["shard_id"] = Base.Int(shard_id) + begin + function load_tpu_embedding_mdl_adagrad_light_parameters_graph(parameters_, accumulators_, weights_, benefits_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + local desc + tf.with_op_name(name, "LoadTPUEmbeddingMDLAdagradLightParameters") do + desc = tf.NodeDescription("LoadTPUEmbeddingMDLAdagradLightParameters") + begin + begin + parameters_ = convert(Tensor{Float32}, parameters_) + begin + end + end + begin + accumulators_ = convert(Tensor{Float32}, accumulators_) + begin + end + end + begin + weights_ = convert(Tensor{Float32}, weights_) + begin + end + end + begin + benefits_ = convert(Tensor{Float32}, benefits_) + begin + end + end + end + begin + begin + tf.add_input(desc, parameters_) + end + begin + tf.add_input(desc, accumulators_) + end + begin + tf.add_input(desc, weights_) + end + begin + tf.add_input(desc, benefits_) + end + end + begin + begin + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + end + begin + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + end + begin + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + end + begin + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function load_tpu_embedding_mdl_adagrad_light_parameters_eager(parameters_, accumulators_, weights_, benefits_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + desc = tf.EagerOp("LoadTPUEmbeddingMDLAdagradLightParameters") + parameters_ = convert(tf.EagerTensor, parameters_) + accumulators_ = convert(tf.EagerTensor, accumulators_) + weights_ = convert(tf.EagerTensor, weights_) + benefits_ = convert(tf.EagerTensor, benefits_) + begin + begin + tf.add_input(desc, parameters_) + end + begin + tf.add_input(desc, accumulators_) + end + begin + tf.add_input(desc, weights_) + end + begin + tf.add_input(desc, benefits_) + end + end + begin + begin + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + end + begin + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + end + begin + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + end + begin + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(load_tpu_embedding_mdl_adagrad_light_parameters, [parameters_, accumulators_, weights_, benefits_], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function load_tpu_embedding_mdl_adagrad_light_parameters(parameters_, accumulators_, weights_, benefits_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + if tf.in_eager_mode() + load_tpu_embedding_mdl_adagrad_light_parameters_eager(parameters_, accumulators_, weights_, benefits_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + else + load_tpu_embedding_mdl_adagrad_light_parameters_graph(parameters_, accumulators_, weights_, benefits_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function load_tpu_embedding_mdl_adagrad_light_parameters_eager(parameters_, accumulators_, weights_, benefits_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - desc = tf.EagerOp("LoadTPUEmbeddingMDLAdagradLightParameters") - parameters_ = convert(tf.EagerTensor, parameters_) - accumulators_ = convert(tf.EagerTensor, accumulators_) - weights_ = convert(tf.EagerTensor, weights_) - benefits_ = convert(tf.EagerTensor, benefits_) - tf.add_input(desc, parameters_) - tf.add_input(desc, accumulators_) - tf.add_input(desc, weights_) - tf.add_input(desc, benefits_) - if table_id !== nothing - desc["table_id"] = Base.Int(table_id) - end - if table_name !== nothing - desc["table_name"] = Base.String(table_name) - end - if num_shards !== nothing - desc["num_shards"] = Base.Int(num_shards) - end - if shard_id !== nothing - desc["shard_id"] = Base.Int(shard_id) - end - res = tf.execute(desc) - node = tf.TapeNode(load_tpu_embedding_mdl_adagrad_light_parameters, [parameters_, accumulators_, weights_, benefits_], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function load_tpu_embedding_mdl_adagrad_light_parameters(parameters_, accumulators_, weights_, benefits_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - if tf.in_eager_mode() - load_tpu_embedding_mdl_adagrad_light_parameters_eager(parameters_, accumulators_, weights_, benefits_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) - else - load_tpu_embedding_mdl_adagrad_light_parameters_graph(parameters_, accumulators_, weights_, benefits_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) - end - end end @@ -47692,53 +86969,109 @@ end """ begin - function fractional_avg_pool_grad_graph(orig_input_tensor_shape_, out_backprop_, row_pooling_sequence_, col_pooling_sequence_; name=nothing, overlapping=nothing) - local desc - tf.with_op_name(name, "FractionalAvgPoolGrad") do - desc = tf.NodeDescription("FractionalAvgPoolGrad") - orig_input_tensor_shape_ = convert(Tensor{Int64}, orig_input_tensor_shape_) - out_backprop_ = convert(Tensor{Any}, out_backprop_) - row_pooling_sequence_ = convert(Tensor{Int64}, row_pooling_sequence_) - col_pooling_sequence_ = convert(Tensor{Int64}, col_pooling_sequence_) - (out_backprop_,) = tf.tf_promote(out_backprop_) - tf.add_input(desc, orig_input_tensor_shape_) - tf.add_input(desc, out_backprop_) - tf.add_input(desc, row_pooling_sequence_) - tf.add_input(desc, col_pooling_sequence_) - if overlapping !== nothing - desc["overlapping"] = Base.Bool(overlapping) + begin + function fractional_avg_pool_grad_graph(orig_input_tensor_shape_, out_backprop_, row_pooling_sequence_, col_pooling_sequence_; name=nothing, overlapping=nothing) + local desc + tf.with_op_name(name, "FractionalAvgPoolGrad") do + desc = tf.NodeDescription("FractionalAvgPoolGrad") + begin + begin + orig_input_tensor_shape_ = convert(Tensor{Int64}, orig_input_tensor_shape_) + begin + end + end + begin + out_backprop_ = convert(Tensor{Any}, out_backprop_) + begin + end + end + begin + row_pooling_sequence_ = convert(Tensor{Int64}, row_pooling_sequence_) + begin + end + end + begin + col_pooling_sequence_ = convert(Tensor{Int64}, col_pooling_sequence_) + begin + end + end + begin + (out_backprop_,) = tf.tf_promote(out_backprop_) + end + end + begin + begin + tf.add_input(desc, orig_input_tensor_shape_) + end + begin + tf.add_input(desc, out_backprop_) + end + begin + tf.add_input(desc, row_pooling_sequence_) + end + begin + tf.add_input(desc, col_pooling_sequence_) + end + end + begin + begin + if overlapping !== nothing + desc["overlapping"] = Base.Bool(overlapping) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function fractional_avg_pool_grad_eager(orig_input_tensor_shape_, out_backprop_, row_pooling_sequence_, col_pooling_sequence_; name=nothing, overlapping=nothing) + desc = tf.EagerOp("FractionalAvgPoolGrad") + orig_input_tensor_shape_ = convert(tf.EagerTensor, orig_input_tensor_shape_) + out_backprop_ = convert(tf.EagerTensor, out_backprop_) + row_pooling_sequence_ = convert(tf.EagerTensor, row_pooling_sequence_) + col_pooling_sequence_ = convert(tf.EagerTensor, col_pooling_sequence_) + begin + begin + tf.add_input(desc, orig_input_tensor_shape_) + end + begin + tf.add_input(desc, out_backprop_) + end + begin + tf.add_input(desc, row_pooling_sequence_) + end + begin + tf.add_input(desc, col_pooling_sequence_) + end + end + begin + begin + if overlapping !== nothing + desc["overlapping"] = Base.Bool(overlapping) + end + end + end + begin + desc["T"] = tf.data_type(out_backprop_) + end + res = tf.execute(desc) + node = tf.TapeNode(fractional_avg_pool_grad, [orig_input_tensor_shape_, out_backprop_, row_pooling_sequence_, col_pooling_sequence_], name=nothing, overlapping=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function fractional_avg_pool_grad(orig_input_tensor_shape_, out_backprop_, row_pooling_sequence_, col_pooling_sequence_; name=nothing, overlapping=nothing) + if tf.in_eager_mode() + fractional_avg_pool_grad_eager(orig_input_tensor_shape_, out_backprop_, row_pooling_sequence_, col_pooling_sequence_; name=name, overlapping=overlapping) + else + fractional_avg_pool_grad_graph(orig_input_tensor_shape_, out_backprop_, row_pooling_sequence_, col_pooling_sequence_; name=name, overlapping=overlapping) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function fractional_avg_pool_grad_eager(orig_input_tensor_shape_, out_backprop_, row_pooling_sequence_, col_pooling_sequence_; name=nothing, overlapping=nothing) - desc = tf.EagerOp("FractionalAvgPoolGrad") - orig_input_tensor_shape_ = convert(tf.EagerTensor, orig_input_tensor_shape_) - out_backprop_ = convert(tf.EagerTensor, out_backprop_) - row_pooling_sequence_ = convert(tf.EagerTensor, row_pooling_sequence_) - col_pooling_sequence_ = convert(tf.EagerTensor, col_pooling_sequence_) - tf.add_input(desc, orig_input_tensor_shape_) - tf.add_input(desc, out_backprop_) - tf.add_input(desc, row_pooling_sequence_) - tf.add_input(desc, col_pooling_sequence_) - if overlapping !== nothing - desc["overlapping"] = Base.Bool(overlapping) - end - desc["T"] = tf.data_type(out_backprop_) - res = tf.execute(desc) - node = tf.TapeNode(fractional_avg_pool_grad, [orig_input_tensor_shape_, out_backprop_, row_pooling_sequence_, col_pooling_sequence_], name=nothing, overlapping=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function fractional_avg_pool_grad(orig_input_tensor_shape_, out_backprop_, row_pooling_sequence_, col_pooling_sequence_; name=nothing, overlapping=nothing) - if tf.in_eager_mode() - fractional_avg_pool_grad_eager(orig_input_tensor_shape_, out_backprop_, row_pooling_sequence_, col_pooling_sequence_; name=name, overlapping=overlapping) - else - fractional_avg_pool_grad_graph(orig_input_tensor_shape_, out_backprop_, row_pooling_sequence_, col_pooling_sequence_; name=name, overlapping=overlapping) - end - end end @@ -47748,65 +87081,121 @@ end Load embedding parameters for a single table. """ begin - function load_tpu_embedding_adagrad_parameters_grad_accum_debug_graph(parameters_, accumulators_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - local desc - tf.with_op_name(name, "LoadTPUEmbeddingAdagradParametersGradAccumDebug") do - desc = tf.NodeDescription("LoadTPUEmbeddingAdagradParametersGradAccumDebug") - parameters_ = convert(Tensor{Float32}, parameters_) - accumulators_ = convert(Tensor{Float32}, accumulators_) - gradient_accumulators_ = convert(Tensor{Float32}, gradient_accumulators_) - tf.add_input(desc, parameters_) - tf.add_input(desc, accumulators_) - tf.add_input(desc, gradient_accumulators_) - if table_id !== nothing - desc["table_id"] = Base.Int(table_id) - end - if table_name !== nothing - desc["table_name"] = Base.String(table_name) + begin + function load_tpu_embedding_adagrad_parameters_grad_accum_debug_graph(parameters_, accumulators_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + local desc + tf.with_op_name(name, "LoadTPUEmbeddingAdagradParametersGradAccumDebug") do + desc = tf.NodeDescription("LoadTPUEmbeddingAdagradParametersGradAccumDebug") + begin + begin + parameters_ = convert(Tensor{Float32}, parameters_) + begin + end + end + begin + accumulators_ = convert(Tensor{Float32}, accumulators_) + begin + end + end + begin + gradient_accumulators_ = convert(Tensor{Float32}, gradient_accumulators_) + begin + end + end + end + begin + begin + tf.add_input(desc, parameters_) + end + begin + tf.add_input(desc, accumulators_) + end + begin + tf.add_input(desc, gradient_accumulators_) + end + end + begin + begin + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + end + begin + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + end + begin + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + end + begin + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function load_tpu_embedding_adagrad_parameters_grad_accum_debug_eager(parameters_, accumulators_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + desc = tf.EagerOp("LoadTPUEmbeddingAdagradParametersGradAccumDebug") + parameters_ = convert(tf.EagerTensor, parameters_) + accumulators_ = convert(tf.EagerTensor, accumulators_) + gradient_accumulators_ = convert(tf.EagerTensor, gradient_accumulators_) + begin + begin + tf.add_input(desc, parameters_) + end + begin + tf.add_input(desc, accumulators_) + end + begin + tf.add_input(desc, gradient_accumulators_) + end + end + begin + begin + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + end + begin + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + end + begin + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + end + begin + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(load_tpu_embedding_adagrad_parameters_grad_accum_debug, [parameters_, accumulators_, gradient_accumulators_], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function load_tpu_embedding_adagrad_parameters_grad_accum_debug(parameters_, accumulators_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + if tf.in_eager_mode() + load_tpu_embedding_adagrad_parameters_grad_accum_debug_eager(parameters_, accumulators_, gradient_accumulators_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + else + load_tpu_embedding_adagrad_parameters_grad_accum_debug_graph(parameters_, accumulators_, gradient_accumulators_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + end end - if num_shards !== nothing - desc["num_shards"] = Base.Int(num_shards) - end - if shard_id !== nothing - desc["shard_id"] = Base.Int(shard_id) - end - end - tf.Tensor(tf.Operation(desc)) - end - function load_tpu_embedding_adagrad_parameters_grad_accum_debug_eager(parameters_, accumulators_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - desc = tf.EagerOp("LoadTPUEmbeddingAdagradParametersGradAccumDebug") - parameters_ = convert(tf.EagerTensor, parameters_) - accumulators_ = convert(tf.EagerTensor, accumulators_) - gradient_accumulators_ = convert(tf.EagerTensor, gradient_accumulators_) - tf.add_input(desc, parameters_) - tf.add_input(desc, accumulators_) - tf.add_input(desc, gradient_accumulators_) - if table_id !== nothing - desc["table_id"] = Base.Int(table_id) - end - if table_name !== nothing - desc["table_name"] = Base.String(table_name) - end - if num_shards !== nothing - desc["num_shards"] = Base.Int(num_shards) - end - if shard_id !== nothing - desc["shard_id"] = Base.Int(shard_id) - end - res = tf.execute(desc) - node = tf.TapeNode(load_tpu_embedding_adagrad_parameters_grad_accum_debug, [parameters_, accumulators_, gradient_accumulators_], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function load_tpu_embedding_adagrad_parameters_grad_accum_debug(parameters_, accumulators_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - if tf.in_eager_mode() - load_tpu_embedding_adagrad_parameters_grad_accum_debug_eager(parameters_, accumulators_, gradient_accumulators_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) - else - load_tpu_embedding_adagrad_parameters_grad_accum_debug_graph(parameters_, accumulators_, gradient_accumulators_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) - end - end end @@ -47816,43 +87205,87 @@ end """ begin - function bincount_graph(arr_, size_, weights_; name=nothing) - local desc - tf.with_op_name(name, "Bincount") do - desc = tf.NodeDescription("Bincount") - arr_ = convert(Tensor{Int32}, arr_) - size_ = convert(Tensor{Int32}, size_) - weights_ = convert(Tensor{Any}, weights_) - (weights_,) = tf.tf_promote(weights_) - tf.add_input(desc, arr_) - tf.add_input(desc, size_) - tf.add_input(desc, weights_) - end - tf.Tensor(tf.Operation(desc)) - end - function bincount_eager(arr_, size_, weights_; name=nothing) - desc = tf.EagerOp("Bincount") - arr_ = convert(tf.EagerTensor, arr_) - size_ = convert(tf.EagerTensor, size_) - weights_ = convert(tf.EagerTensor, weights_) - tf.add_input(desc, arr_) - tf.add_input(desc, size_) - tf.add_input(desc, weights_) - desc["T"] = tf.data_type(weights_) - res = tf.execute(desc) - node = tf.TapeNode(bincount, [arr_, size_, weights_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function bincount(arr_, size_, weights_; name=nothing) - if tf.in_eager_mode() - bincount_eager(arr_, size_, weights_; name=name) - else - bincount_graph(arr_, size_, weights_; name=name) + begin + function bincount_graph(arr_, size_, weights_; name=nothing) + local desc + tf.with_op_name(name, "Bincount") do + desc = tf.NodeDescription("Bincount") + begin + begin + arr_ = convert(Tensor{Int32}, arr_) + begin + end + end + begin + size_ = convert(Tensor{Int32}, size_) + begin + end + end + begin + weights_ = convert(Tensor{Any}, weights_) + begin + end + end + begin + (weights_,) = tf.tf_promote(weights_) + end + end + begin + begin + tf.add_input(desc, arr_) + end + begin + tf.add_input(desc, size_) + end + begin + tf.add_input(desc, weights_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function bincount_eager(arr_, size_, weights_; name=nothing) + desc = tf.EagerOp("Bincount") + arr_ = convert(tf.EagerTensor, arr_) + size_ = convert(tf.EagerTensor, size_) + weights_ = convert(tf.EagerTensor, weights_) + begin + begin + tf.add_input(desc, arr_) + end + begin + tf.add_input(desc, size_) + end + begin + tf.add_input(desc, weights_) + end + end + begin + end + begin + desc["T"] = tf.data_type(weights_) + end + res = tf.execute(desc) + node = tf.TapeNode(bincount, [arr_, size_, weights_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function bincount(arr_, size_, weights_; name=nothing) + if tf.in_eager_mode() + bincount_eager(arr_, size_, weights_; name=name) + else + bincount_graph(arr_, size_, weights_; name=name) + end end - end + end end @@ -47862,35 +87295,63 @@ end """ begin - function inv_graph(x_; name=nothing) - local desc - tf.with_op_name(name, "Inv") do - desc = tf.NodeDescription("Inv") - x_ = convert(Tensor{Any}, x_) - (x_,) = tf.tf_promote(x_) - tf.add_input(desc, x_) + begin + function inv_graph(x_; name=nothing) + local desc + tf.with_op_name(name, "Inv") do + desc = tf.NodeDescription("Inv") + begin + begin + x_ = convert(Tensor{Any}, x_) + begin + end + end + begin + (x_,) = tf.tf_promote(x_) + end + end + begin + begin + tf.add_input(desc, x_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function inv_eager(x_; name=nothing) - desc = tf.EagerOp("Inv") - x_ = convert(tf.EagerTensor, x_) - tf.add_input(desc, x_) - desc["T"] = tf.data_type(x_) - res = tf.execute(desc) - node = tf.TapeNode(inv, [x_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function inv_eager(x_; name=nothing) + desc = tf.EagerOp("Inv") + x_ = convert(tf.EagerTensor, x_) + begin + begin + tf.add_input(desc, x_) + end + end + begin + end + begin + desc["T"] = tf.data_type(x_) + end + res = tf.execute(desc) + node = tf.TapeNode(inv, [x_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function inv(x_; name=nothing) - if tf.in_eager_mode() - inv_eager(x_; name=name) - else - inv_graph(x_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function inv(x_; name=nothing) + if tf.in_eager_mode() + inv_eager(x_; name=name) + else + inv_graph(x_; name=name) + end end - end + end end @@ -47900,66 +87361,148 @@ end """ begin - function apply_proximal_adagrad_graph(var_, accum_, lr_, l1_, l2_, grad_; name=nothing, use_locking=nothing) - local desc - tf.with_op_name(name, "ApplyProximalAdagrad") do - desc = tf.NodeDescription("ApplyProximalAdagrad") - var_ = convert(Tensor{Any}, var_) - accum_ = convert(Tensor{Any}, accum_) - lr_ = convert(Tensor{Any}, lr_) - l1_ = convert(Tensor{Any}, l1_) - l2_ = convert(Tensor{Any}, l2_) - grad_ = convert(Tensor{Any}, grad_) - (var_, accum_, lr_, l1_, l2_, grad_) = tf.tf_promote(var_, accum_, lr_, l1_, l2_, grad_) - tf.add_input(desc, var_) - tf.add_input(desc, accum_) - tf.add_input(desc, lr_) - tf.add_input(desc, l1_) - tf.add_input(desc, l2_) - tf.add_input(desc, grad_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - end - tf.Tensor(tf.Operation(desc)) - end - function apply_proximal_adagrad_eager(var_, accum_, lr_, l1_, l2_, grad_; name=nothing, use_locking=nothing) - desc = tf.EagerOp("ApplyProximalAdagrad") - var_ = convert(tf.EagerTensor, var_) - accum_ = convert(tf.EagerTensor, accum_) - lr_ = convert(tf.EagerTensor, lr_) - l1_ = convert(tf.EagerTensor, l1_) - l2_ = convert(tf.EagerTensor, l2_) - grad_ = convert(tf.EagerTensor, grad_) - tf.add_input(desc, var_) - tf.add_input(desc, accum_) - tf.add_input(desc, lr_) - tf.add_input(desc, l1_) - tf.add_input(desc, l2_) - tf.add_input(desc, grad_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - desc["T"] = tf.data_type(var_) - desc["T"] = tf.data_type(accum_) - desc["T"] = tf.data_type(lr_) - desc["T"] = tf.data_type(l1_) - desc["T"] = tf.data_type(l2_) - desc["T"] = tf.data_type(grad_) - res = tf.execute(desc) - node = tf.TapeNode(apply_proximal_adagrad, [var_, accum_, lr_, l1_, l2_, grad_], name=nothing, use_locking=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function apply_proximal_adagrad(var_, accum_, lr_, l1_, l2_, grad_; name=nothing, use_locking=nothing) - if tf.in_eager_mode() - apply_proximal_adagrad_eager(var_, accum_, lr_, l1_, l2_, grad_; name=name, use_locking=use_locking) - else - apply_proximal_adagrad_graph(var_, accum_, lr_, l1_, l2_, grad_; name=name, use_locking=use_locking) + begin + function apply_proximal_adagrad_graph(var_, accum_, lr_, l1_, l2_, grad_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "ApplyProximalAdagrad") do + desc = tf.NodeDescription("ApplyProximalAdagrad") + begin + begin + var_ = convert(Tensor{Any}, var_) + begin + end + end + begin + accum_ = convert(Tensor{Any}, accum_) + begin + end + end + begin + lr_ = convert(Tensor{Any}, lr_) + begin + end + end + begin + l1_ = convert(Tensor{Any}, l1_) + begin + end + end + begin + l2_ = convert(Tensor{Any}, l2_) + begin + end + end + begin + grad_ = convert(Tensor{Any}, grad_) + begin + end + end + begin + (var_, accum_, lr_, l1_, l2_, grad_) = tf.tf_promote(var_, accum_, lr_, l1_, l2_, grad_) + end + end + begin + begin + tf.add_input(desc, var_) + end + begin + tf.add_input(desc, accum_) + end + begin + tf.add_input(desc, lr_) + end + begin + tf.add_input(desc, l1_) + end + begin + tf.add_input(desc, l2_) + end + begin + tf.add_input(desc, grad_) + end + end + begin + begin + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function apply_proximal_adagrad_eager(var_, accum_, lr_, l1_, l2_, grad_; name=nothing, use_locking=nothing) + desc = tf.EagerOp("ApplyProximalAdagrad") + var_ = convert(tf.EagerTensor, var_) + accum_ = convert(tf.EagerTensor, accum_) + lr_ = convert(tf.EagerTensor, lr_) + l1_ = convert(tf.EagerTensor, l1_) + l2_ = convert(tf.EagerTensor, l2_) + grad_ = convert(tf.EagerTensor, grad_) + begin + begin + tf.add_input(desc, var_) + end + begin + tf.add_input(desc, accum_) + end + begin + tf.add_input(desc, lr_) + end + begin + tf.add_input(desc, l1_) + end + begin + tf.add_input(desc, l2_) + end + begin + tf.add_input(desc, grad_) + end + end + begin + begin + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + end + begin + desc["T"] = tf.data_type(var_) + end + begin + desc["T"] = tf.data_type(accum_) + end + begin + desc["T"] = tf.data_type(lr_) + end + begin + desc["T"] = tf.data_type(l1_) + end + begin + desc["T"] = tf.data_type(l2_) + end + begin + desc["T"] = tf.data_type(grad_) + end + res = tf.execute(desc) + node = tf.TapeNode(apply_proximal_adagrad, [var_, accum_, lr_, l1_, l2_, grad_], name=nothing, use_locking=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function apply_proximal_adagrad(var_, accum_, lr_, l1_, l2_, grad_; name=nothing, use_locking=nothing) + if tf.in_eager_mode() + apply_proximal_adagrad_eager(var_, accum_, lr_, l1_, l2_, grad_; name=name, use_locking=use_locking) + else + apply_proximal_adagrad_graph(var_, accum_, lr_, l1_, l2_, grad_; name=name, use_locking=use_locking) + end end - end + end end @@ -47969,48 +87512,100 @@ end """ begin - function gather_v2_graph(params_, indices_, axis_; name=nothing) - local desc - tf.with_op_name(name, "GatherV2") do - desc = tf.NodeDescription("GatherV2") - params_ = convert(Tensor{Any}, params_) - indices_ = convert(Tensor{Any}, indices_) - indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) - axis_ = convert(Tensor{Any}, axis_) - (params_,) = tf.tf_promote(params_) - (indices_,) = tf.tf_promote(indices_) - (axis_,) = tf.tf_promote(axis_) - tf.add_input(desc, params_) - tf.add_input(desc, indices_) - tf.add_input(desc, axis_) - end - tf.Tensor(tf.Operation(desc)) - end - function gather_v2_eager(params_, indices_, axis_; name=nothing) - desc = tf.EagerOp("GatherV2") - params_ = convert(tf.EagerTensor, params_) - indices_ = convert(tf.EagerTensor, indices_) - axis_ = convert(tf.EagerTensor, axis_) - tf.add_input(desc, params_) - tf.add_input(desc, indices_) - tf.add_input(desc, axis_) - desc["Tparams"] = tf.data_type(params_) - desc["Tindices"] = tf.data_type(indices_) - desc["Taxis"] = tf.data_type(axis_) - res = tf.execute(desc) - node = tf.TapeNode(gather_v2, [params_, indices_, axis_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function gather_v2(params_, indices_, axis_; name=nothing) - if tf.in_eager_mode() - gather_v2_eager(params_, indices_, axis_; name=name) - else - gather_v2_graph(params_, indices_, axis_; name=name) + begin + function gather_v2_graph(params_, indices_, axis_; name=nothing) + local desc + tf.with_op_name(name, "GatherV2") do + desc = tf.NodeDescription("GatherV2") + begin + begin + params_ = convert(Tensor{Any}, params_) + begin + end + end + begin + indices_ = convert(Tensor{Any}, indices_) + begin + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + end + end + begin + axis_ = convert(Tensor{Any}, axis_) + begin + end + end + begin + (params_,) = tf.tf_promote(params_) + end + begin + (indices_,) = tf.tf_promote(indices_) + end + begin + (axis_,) = tf.tf_promote(axis_) + end + end + begin + begin + tf.add_input(desc, params_) + end + begin + tf.add_input(desc, indices_) + end + begin + tf.add_input(desc, axis_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function gather_v2_eager(params_, indices_, axis_; name=nothing) + desc = tf.EagerOp("GatherV2") + params_ = convert(tf.EagerTensor, params_) + indices_ = convert(tf.EagerTensor, indices_) + axis_ = convert(tf.EagerTensor, axis_) + begin + begin + tf.add_input(desc, params_) + end + begin + tf.add_input(desc, indices_) + end + begin + tf.add_input(desc, axis_) + end + end + begin + end + begin + desc["Tparams"] = tf.data_type(params_) + end + begin + desc["Tindices"] = tf.data_type(indices_) + end + begin + desc["Taxis"] = tf.data_type(axis_) + end + res = tf.execute(desc) + node = tf.TapeNode(gather_v2, [params_, indices_, axis_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function gather_v2(params_, indices_, axis_; name=nothing) + if tf.in_eager_mode() + gather_v2_eager(params_, indices_, axis_; name=name) + else + gather_v2_graph(params_, indices_, axis_; name=name) + end end - end + end end @@ -48020,37 +87615,69 @@ end """ begin - function write_file_graph(filename_, contents_; name=nothing) - local desc - tf.with_op_name(name, "WriteFile") do - desc = tf.NodeDescription("WriteFile") - filename_ = convert(Tensor{String}, filename_) - contents_ = convert(Tensor{String}, contents_) - tf.add_input(desc, filename_) - tf.add_input(desc, contents_) + begin + function write_file_graph(filename_, contents_; name=nothing) + local desc + tf.with_op_name(name, "WriteFile") do + desc = tf.NodeDescription("WriteFile") + begin + begin + filename_ = convert(Tensor{String}, filename_) + begin + end + end + begin + contents_ = convert(Tensor{String}, contents_) + begin + end + end + end + begin + begin + tf.add_input(desc, filename_) + end + begin + tf.add_input(desc, contents_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function write_file_eager(filename_, contents_; name=nothing) - desc = tf.EagerOp("WriteFile") - filename_ = convert(tf.EagerTensor, filename_) - contents_ = convert(tf.EagerTensor, contents_) - tf.add_input(desc, filename_) - tf.add_input(desc, contents_) - res = tf.execute(desc) - node = tf.TapeNode(write_file, [filename_, contents_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function write_file_eager(filename_, contents_; name=nothing) + desc = tf.EagerOp("WriteFile") + filename_ = convert(tf.EagerTensor, filename_) + contents_ = convert(tf.EagerTensor, contents_) + begin + begin + tf.add_input(desc, filename_) + end + begin + tf.add_input(desc, contents_) + end + end + begin + end + res = tf.execute(desc) + node = tf.TapeNode(write_file, [filename_, contents_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function write_file(filename_, contents_; name=nothing) - if tf.in_eager_mode() - write_file_eager(filename_, contents_; name=name) - else - write_file_graph(filename_, contents_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function write_file(filename_, contents_; name=nothing) + if tf.in_eager_mode() + write_file_eager(filename_, contents_; name=name) + else + write_file_graph(filename_, contents_; name=name) + end end - end + end end @@ -48060,38 +87687,64 @@ end """ begin - function boosted_trees_get_ensemble_states_graph(tree_ensemble_handle_; name=nothing) - local desc - tf.with_op_name(name, "BoostedTreesGetEnsembleStates") do - desc = tf.NodeDescription("BoostedTreesGetEnsembleStates") - tree_ensemble_handle_ = convert(Tensor{Any}, tree_ensemble_handle_) - tf.add_input(desc, tree_ensemble_handle_) - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:5 - push!(out, tf.Tensor(op, out_idx)) + begin + function boosted_trees_get_ensemble_states_graph(tree_ensemble_handle_; name=nothing) + local desc + tf.with_op_name(name, "BoostedTreesGetEnsembleStates") do + desc = tf.NodeDescription("BoostedTreesGetEnsembleStates") + begin + begin + tree_ensemble_handle_ = convert(Tensor{Any}, tree_ensemble_handle_) + begin + end + end + end + begin + begin + tf.add_input(desc, tree_ensemble_handle_) + end + end + begin + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:5 + push!(out, tf.Tensor(op, out_idx)) + end + out + end end - out end - function boosted_trees_get_ensemble_states_eager(tree_ensemble_handle_; name=nothing) - desc = tf.EagerOp("BoostedTreesGetEnsembleStates") - tree_ensemble_handle_ = convert(tf.EagerTensor, tree_ensemble_handle_) - tf.add_input(desc, tree_ensemble_handle_) - res = tf.execute(desc) - node = tf.TapeNode(boosted_trees_get_ensemble_states, [tree_ensemble_handle_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res + begin + function boosted_trees_get_ensemble_states_eager(tree_ensemble_handle_; name=nothing) + desc = tf.EagerOp("BoostedTreesGetEnsembleStates") + tree_ensemble_handle_ = convert(tf.EagerTensor, tree_ensemble_handle_) + begin + begin + tf.add_input(desc, tree_ensemble_handle_) + end + end + begin + end + res = tf.execute(desc) + node = tf.TapeNode(boosted_trees_get_ensemble_states, [tree_ensemble_handle_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function boosted_trees_get_ensemble_states(tree_ensemble_handle_; name=nothing) - if tf.in_eager_mode() - boosted_trees_get_ensemble_states_eager(tree_ensemble_handle_; name=name) - else - boosted_trees_get_ensemble_states_graph(tree_ensemble_handle_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function boosted_trees_get_ensemble_states(tree_ensemble_handle_; name=nothing) + if tf.in_eager_mode() + boosted_trees_get_ensemble_states_eager(tree_ensemble_handle_; name=name) + else + boosted_trees_get_ensemble_states_graph(tree_ensemble_handle_; name=name) + end end - end + end end @@ -48101,52 +87754,96 @@ end """ begin - function resource_gather_graph(resource_, indices_; name=nothing, validate_indices=nothing, dtype=nothing) - local desc - tf.with_op_name(name, "ResourceGather") do - desc = tf.NodeDescription("ResourceGather") - resource_ = convert(Tensor{Any}, resource_) - indices_ = convert(Tensor{Any}, indices_) - indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) - (indices_,) = tf.tf_promote(indices_) - tf.add_input(desc, resource_) - tf.add_input(desc, indices_) - if validate_indices !== nothing - desc["validate_indices"] = Base.Bool(validate_indices) + begin + function resource_gather_graph(resource_, indices_; name=nothing, validate_indices=nothing, dtype=nothing) + local desc + tf.with_op_name(name, "ResourceGather") do + desc = tf.NodeDescription("ResourceGather") + begin + begin + resource_ = convert(Tensor{Any}, resource_) + begin + end + end + begin + indices_ = convert(Tensor{Any}, indices_) + begin + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + end + end + begin + (indices_,) = tf.tf_promote(indices_) + end + end + begin + begin + tf.add_input(desc, resource_) + end + begin + tf.add_input(desc, indices_) + end + end + begin + begin + if validate_indices !== nothing + desc["validate_indices"] = Base.Bool(validate_indices) + end + end + begin + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function resource_gather_eager(resource_, indices_; name=nothing, validate_indices=nothing, dtype=nothing) + desc = tf.EagerOp("ResourceGather") + resource_ = convert(tf.EagerTensor, resource_) + indices_ = convert(tf.EagerTensor, indices_) + begin + begin + tf.add_input(desc, resource_) + end + begin + tf.add_input(desc, indices_) + end + end + begin + begin + if validate_indices !== nothing + desc["validate_indices"] = Base.Bool(validate_indices) + end + end + begin + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + end + begin + desc["Tindices"] = tf.data_type(indices_) + end + res = tf.execute(desc) + node = tf.TapeNode(resource_gather, [resource_, indices_], name=nothing, validate_indices=nothing, dtype=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_gather(resource_, indices_; name=nothing, validate_indices=nothing, dtype=nothing) + if tf.in_eager_mode() + resource_gather_eager(resource_, indices_; name=name, validate_indices=validate_indices, dtype=dtype) + else + resource_gather_graph(resource_, indices_; name=name, validate_indices=validate_indices, dtype=dtype) + end end - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end - end - tf.Tensor(tf.Operation(desc)) end - function resource_gather_eager(resource_, indices_; name=nothing, validate_indices=nothing, dtype=nothing) - desc = tf.EagerOp("ResourceGather") - resource_ = convert(tf.EagerTensor, resource_) - indices_ = convert(tf.EagerTensor, indices_) - tf.add_input(desc, resource_) - tf.add_input(desc, indices_) - if validate_indices !== nothing - desc["validate_indices"] = Base.Bool(validate_indices) - end - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end - desc["Tindices"] = tf.data_type(indices_) - res = tf.execute(desc) - node = tf.TapeNode(resource_gather, [resource_, indices_], name=nothing, validate_indices=nothing, dtype=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_gather(resource_, indices_; name=nothing, validate_indices=nothing, dtype=nothing) - if tf.in_eager_mode() - resource_gather_eager(resource_, indices_; name=name, validate_indices=validate_indices, dtype=dtype) - else - resource_gather_graph(resource_, indices_; name=name, validate_indices=validate_indices, dtype=dtype) - end - end end @@ -48156,60 +87853,130 @@ end """ begin - function resource_apply_proximal_gradient_descent_graph(var_, alpha_, l1_, l2_, delta_; name=nothing, use_locking=nothing) - local desc - tf.with_op_name(name, "ResourceApplyProximalGradientDescent") do - desc = tf.NodeDescription("ResourceApplyProximalGradientDescent") - var_ = convert(Tensor{Any}, var_) - alpha_ = convert(Tensor{Any}, alpha_) - l1_ = convert(Tensor{Any}, l1_) - l2_ = convert(Tensor{Any}, l2_) - delta_ = convert(Tensor{Any}, delta_) - (alpha_, l1_, l2_, delta_) = tf.tf_promote(alpha_, l1_, l2_, delta_) - tf.add_input(desc, var_) - tf.add_input(desc, alpha_) - tf.add_input(desc, l1_) - tf.add_input(desc, l2_) - tf.add_input(desc, delta_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) + begin + function resource_apply_proximal_gradient_descent_graph(var_, alpha_, l1_, l2_, delta_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "ResourceApplyProximalGradientDescent") do + desc = tf.NodeDescription("ResourceApplyProximalGradientDescent") + begin + begin + var_ = convert(Tensor{Any}, var_) + begin + end + end + begin + alpha_ = convert(Tensor{Any}, alpha_) + begin + end + end + begin + l1_ = convert(Tensor{Any}, l1_) + begin + end + end + begin + l2_ = convert(Tensor{Any}, l2_) + begin + end + end + begin + delta_ = convert(Tensor{Any}, delta_) + begin + end + end + begin + (alpha_, l1_, l2_, delta_) = tf.tf_promote(alpha_, l1_, l2_, delta_) + end + end + begin + begin + tf.add_input(desc, var_) + end + begin + tf.add_input(desc, alpha_) + end + begin + tf.add_input(desc, l1_) + end + begin + tf.add_input(desc, l2_) + end + begin + tf.add_input(desc, delta_) + end + end + begin + begin + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function resource_apply_proximal_gradient_descent_eager(var_, alpha_, l1_, l2_, delta_; name=nothing, use_locking=nothing) + desc = tf.EagerOp("ResourceApplyProximalGradientDescent") + var_ = convert(tf.EagerTensor, var_) + alpha_ = convert(tf.EagerTensor, alpha_) + l1_ = convert(tf.EagerTensor, l1_) + l2_ = convert(tf.EagerTensor, l2_) + delta_ = convert(tf.EagerTensor, delta_) + begin + begin + tf.add_input(desc, var_) + end + begin + tf.add_input(desc, alpha_) + end + begin + tf.add_input(desc, l1_) + end + begin + tf.add_input(desc, l2_) + end + begin + tf.add_input(desc, delta_) + end + end + begin + begin + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + end + begin + desc["T"] = tf.data_type(alpha_) + end + begin + desc["T"] = tf.data_type(l1_) + end + begin + desc["T"] = tf.data_type(l2_) + end + begin + desc["T"] = tf.data_type(delta_) + end + res = tf.execute(desc) + node = tf.TapeNode(resource_apply_proximal_gradient_descent, [var_, alpha_, l1_, l2_, delta_], name=nothing, use_locking=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_apply_proximal_gradient_descent(var_, alpha_, l1_, l2_, delta_; name=nothing, use_locking=nothing) + if tf.in_eager_mode() + resource_apply_proximal_gradient_descent_eager(var_, alpha_, l1_, l2_, delta_; name=name, use_locking=use_locking) + else + resource_apply_proximal_gradient_descent_graph(var_, alpha_, l1_, l2_, delta_; name=name, use_locking=use_locking) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function resource_apply_proximal_gradient_descent_eager(var_, alpha_, l1_, l2_, delta_; name=nothing, use_locking=nothing) - desc = tf.EagerOp("ResourceApplyProximalGradientDescent") - var_ = convert(tf.EagerTensor, var_) - alpha_ = convert(tf.EagerTensor, alpha_) - l1_ = convert(tf.EagerTensor, l1_) - l2_ = convert(tf.EagerTensor, l2_) - delta_ = convert(tf.EagerTensor, delta_) - tf.add_input(desc, var_) - tf.add_input(desc, alpha_) - tf.add_input(desc, l1_) - tf.add_input(desc, l2_) - tf.add_input(desc, delta_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - desc["T"] = tf.data_type(alpha_) - desc["T"] = tf.data_type(l1_) - desc["T"] = tf.data_type(l2_) - desc["T"] = tf.data_type(delta_) - res = tf.execute(desc) - node = tf.TapeNode(resource_apply_proximal_gradient_descent, [var_, alpha_, l1_, l2_, delta_], name=nothing, use_locking=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_apply_proximal_gradient_descent(var_, alpha_, l1_, l2_, delta_; name=nothing, use_locking=nothing) - if tf.in_eager_mode() - resource_apply_proximal_gradient_descent_eager(var_, alpha_, l1_, l2_, delta_; name=name, use_locking=use_locking) - else - resource_apply_proximal_gradient_descent_graph(var_, alpha_, l1_, l2_, delta_; name=name, use_locking=use_locking) - end - end end @@ -48219,40 +87986,78 @@ end """ begin - function truncate_mod_graph(x_, y_; name=nothing) - local desc - tf.with_op_name(name, "TruncateMod") do - desc = tf.NodeDescription("TruncateMod") - x_ = convert(Tensor{Any}, x_) - y_ = convert(Tensor{Any}, y_) - (x_, y_) = tf.tf_promote(x_, y_) - tf.add_input(desc, x_) - tf.add_input(desc, y_) + begin + function truncate_mod_graph(x_, y_; name=nothing) + local desc + tf.with_op_name(name, "TruncateMod") do + desc = tf.NodeDescription("TruncateMod") + begin + begin + x_ = convert(Tensor{Any}, x_) + begin + end + end + begin + y_ = convert(Tensor{Any}, y_) + begin + end + end + begin + (x_, y_) = tf.tf_promote(x_, y_) + end + end + begin + begin + tf.add_input(desc, x_) + end + begin + tf.add_input(desc, y_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function truncate_mod_eager(x_, y_; name=nothing) - desc = tf.EagerOp("TruncateMod") - x_ = convert(tf.EagerTensor, x_) - y_ = convert(tf.EagerTensor, y_) - tf.add_input(desc, x_) - tf.add_input(desc, y_) - desc["T"] = tf.data_type(x_) - desc["T"] = tf.data_type(y_) - res = tf.execute(desc) - node = tf.TapeNode(truncate_mod, [x_, y_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function truncate_mod_eager(x_, y_; name=nothing) + desc = tf.EagerOp("TruncateMod") + x_ = convert(tf.EagerTensor, x_) + y_ = convert(tf.EagerTensor, y_) + begin + begin + tf.add_input(desc, x_) + end + begin + tf.add_input(desc, y_) + end + end + begin + end + begin + desc["T"] = tf.data_type(x_) + end + begin + desc["T"] = tf.data_type(y_) + end + res = tf.execute(desc) + node = tf.TapeNode(truncate_mod, [x_, y_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function truncate_mod(x_, y_; name=nothing) - if tf.in_eager_mode() - truncate_mod_eager(x_, y_; name=name) - else - truncate_mod_graph(x_, y_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function truncate_mod(x_, y_; name=nothing) + if tf.in_eager_mode() + truncate_mod_eager(x_, y_; name=name) + else + truncate_mod_graph(x_, y_; name=name) + end end - end + end end @@ -48262,40 +88067,70 @@ end """ begin - function log_matrix_determinant_graph(input_; name=nothing) - local desc - tf.with_op_name(name, "LogMatrixDeterminant") do - desc = tf.NodeDescription("LogMatrixDeterminant") - input_ = convert(Tensor{Any}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:2 - push!(out, tf.Tensor(op, out_idx)) + begin + function log_matrix_determinant_graph(input_; name=nothing) + local desc + tf.with_op_name(name, "LogMatrixDeterminant") do + desc = tf.NodeDescription("LogMatrixDeterminant") + begin + begin + input_ = convert(Tensor{Any}, input_) + begin + end + end + begin + (input_,) = tf.tf_promote(input_) + end + end + begin + begin + tf.add_input(desc, input_) + end + end + begin + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end end - out end - function log_matrix_determinant_eager(input_; name=nothing) - desc = tf.EagerOp("LogMatrixDeterminant") - input_ = convert(tf.EagerTensor, input_) - tf.add_input(desc, input_) - desc["T"] = tf.data_type(input_) - res = tf.execute(desc) - node = tf.TapeNode(log_matrix_determinant, [input_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res + begin + function log_matrix_determinant_eager(input_; name=nothing) + desc = tf.EagerOp("LogMatrixDeterminant") + input_ = convert(tf.EagerTensor, input_) + begin + begin + tf.add_input(desc, input_) + end + end + begin + end + begin + desc["T"] = tf.data_type(input_) + end + res = tf.execute(desc) + node = tf.TapeNode(log_matrix_determinant, [input_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function log_matrix_determinant(input_; name=nothing) - if tf.in_eager_mode() - log_matrix_determinant_eager(input_; name=name) - else - log_matrix_determinant_graph(input_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function log_matrix_determinant(input_; name=nothing) + if tf.in_eager_mode() + log_matrix_determinant_eager(input_; name=name) + else + log_matrix_determinant_graph(input_; name=name) + end end - end + end end @@ -48305,37 +88140,69 @@ end """ begin - function irfft2d_graph(input_, fft_length_; name=nothing) - local desc - tf.with_op_name(name, "IRFFT2D") do - desc = tf.NodeDescription("IRFFT2D") - input_ = convert(Tensor{Complex{Float32}}, input_) - fft_length_ = convert(Tensor{Int32}, fft_length_) - tf.add_input(desc, input_) - tf.add_input(desc, fft_length_) + begin + function irfft2d_graph(input_, fft_length_; name=nothing) + local desc + tf.with_op_name(name, "IRFFT2D") do + desc = tf.NodeDescription("IRFFT2D") + begin + begin + input_ = convert(Tensor{Complex{Float32}}, input_) + begin + end + end + begin + fft_length_ = convert(Tensor{Int32}, fft_length_) + begin + end + end + end + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, fft_length_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function irfft2d_eager(input_, fft_length_; name=nothing) - desc = tf.EagerOp("IRFFT2D") - input_ = convert(tf.EagerTensor, input_) - fft_length_ = convert(tf.EagerTensor, fft_length_) - tf.add_input(desc, input_) - tf.add_input(desc, fft_length_) - res = tf.execute(desc) - node = tf.TapeNode(irfft2d, [input_, fft_length_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function irfft2d_eager(input_, fft_length_; name=nothing) + desc = tf.EagerOp("IRFFT2D") + input_ = convert(tf.EagerTensor, input_) + fft_length_ = convert(tf.EagerTensor, fft_length_) + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, fft_length_) + end + end + begin + end + res = tf.execute(desc) + node = tf.TapeNode(irfft2d, [input_, fft_length_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function irfft2d(input_, fft_length_; name=nothing) - if tf.in_eager_mode() - irfft2d_eager(input_, fft_length_; name=name) - else - irfft2d_graph(input_, fft_length_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function irfft2d(input_, fft_length_; name=nothing) + if tf.in_eager_mode() + irfft2d_eager(input_, fft_length_; name=name) + else + irfft2d_graph(input_, fft_length_; name=name) + end end - end + end end @@ -48345,62 +88212,120 @@ end """ begin - function boosted_trees_training_predict_graph(tree_ensemble_handle_, cached_tree_ids_, cached_node_ids_, bucketized_features_; name=nothing, num_bucketized_features=nothing, logits_dimension=nothing) - local desc - tf.with_op_name(name, "BoostedTreesTrainingPredict") do - desc = tf.NodeDescription("BoostedTreesTrainingPredict") - tree_ensemble_handle_ = convert(Tensor{Any}, tree_ensemble_handle_) - cached_tree_ids_ = convert(Tensor{Int32}, cached_tree_ids_) - cached_node_ids_ = convert(Tensor{Int32}, cached_node_ids_) - bucketized_features_ = [convert(Tensor{Int32}, x) for x = bucketized_features_] - tf.add_input(desc, tree_ensemble_handle_) - tf.add_input(desc, cached_tree_ids_) - tf.add_input(desc, cached_node_ids_) - tf.add_input(desc, bucketized_features_) - if num_bucketized_features !== nothing - desc["num_bucketized_features"] = Base.Int(num_bucketized_features) - end - if logits_dimension !== nothing - desc["logits_dimension"] = Base.Int(logits_dimension) + begin + function boosted_trees_training_predict_graph(tree_ensemble_handle_, cached_tree_ids_, cached_node_ids_, bucketized_features_; name=nothing, num_bucketized_features=nothing, logits_dimension=nothing) + local desc + tf.with_op_name(name, "BoostedTreesTrainingPredict") do + desc = tf.NodeDescription("BoostedTreesTrainingPredict") + begin + begin + tree_ensemble_handle_ = convert(Tensor{Any}, tree_ensemble_handle_) + begin + end + end + begin + cached_tree_ids_ = convert(Tensor{Int32}, cached_tree_ids_) + begin + end + end + begin + cached_node_ids_ = convert(Tensor{Int32}, cached_node_ids_) + begin + end + end + begin + bucketized_features_ = [convert(Tensor{Int32}, x) for x = bucketized_features_] + begin + end + end + end + begin + begin + tf.add_input(desc, tree_ensemble_handle_) + end + begin + tf.add_input(desc, cached_tree_ids_) + end + begin + tf.add_input(desc, cached_node_ids_) + end + begin + tf.add_input(desc, bucketized_features_) + end + end + begin + begin + if num_bucketized_features !== nothing + desc["num_bucketized_features"] = Base.Int(num_bucketized_features) + end + end + begin + if logits_dimension !== nothing + desc["logits_dimension"] = Base.Int(logits_dimension) + end + end + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + end + end + begin + function boosted_trees_training_predict_eager(tree_ensemble_handle_, cached_tree_ids_, cached_node_ids_, bucketized_features_; name=nothing, num_bucketized_features=nothing, logits_dimension=nothing) + desc = tf.EagerOp("BoostedTreesTrainingPredict") + tree_ensemble_handle_ = convert(tf.EagerTensor, tree_ensemble_handle_) + cached_tree_ids_ = convert(tf.EagerTensor, cached_tree_ids_) + cached_node_ids_ = convert(tf.EagerTensor, cached_node_ids_) + bucketized_features_ = convert(tf.EagerTensor, bucketized_features_) + begin + begin + tf.add_input(desc, tree_ensemble_handle_) + end + begin + tf.add_input(desc, cached_tree_ids_) + end + begin + tf.add_input(desc, cached_node_ids_) + end + begin + tf.add_input(desc, bucketized_features_) + end + end + begin + begin + if num_bucketized_features !== nothing + desc["num_bucketized_features"] = Base.Int(num_bucketized_features) + end + end + begin + if logits_dimension !== nothing + desc["logits_dimension"] = Base.Int(logits_dimension) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(boosted_trees_training_predict, [tree_ensemble_handle_, cached_tree_ids_, cached_node_ids_, bucketized_features_], name=nothing, num_bucketized_features=nothing, logits_dimension=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function boosted_trees_training_predict(tree_ensemble_handle_, cached_tree_ids_, cached_node_ids_, bucketized_features_; name=nothing, num_bucketized_features=nothing, logits_dimension=nothing) + if tf.in_eager_mode() + boosted_trees_training_predict_eager(tree_ensemble_handle_, cached_tree_ids_, cached_node_ids_, bucketized_features_; name=name, num_bucketized_features=num_bucketized_features, logits_dimension=logits_dimension) + else + boosted_trees_training_predict_graph(tree_ensemble_handle_, cached_tree_ids_, cached_node_ids_, bucketized_features_; name=name, num_bucketized_features=num_bucketized_features, logits_dimension=logits_dimension) + end end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:3 - push!(out, tf.Tensor(op, out_idx)) - end - out - end - function boosted_trees_training_predict_eager(tree_ensemble_handle_, cached_tree_ids_, cached_node_ids_, bucketized_features_; name=nothing, num_bucketized_features=nothing, logits_dimension=nothing) - desc = tf.EagerOp("BoostedTreesTrainingPredict") - tree_ensemble_handle_ = convert(tf.EagerTensor, tree_ensemble_handle_) - cached_tree_ids_ = convert(tf.EagerTensor, cached_tree_ids_) - cached_node_ids_ = convert(tf.EagerTensor, cached_node_ids_) - bucketized_features_ = convert(tf.EagerTensor, bucketized_features_) - tf.add_input(desc, tree_ensemble_handle_) - tf.add_input(desc, cached_tree_ids_) - tf.add_input(desc, cached_node_ids_) - tf.add_input(desc, bucketized_features_) - if num_bucketized_features !== nothing - desc["num_bucketized_features"] = Base.Int(num_bucketized_features) - end - if logits_dimension !== nothing - desc["logits_dimension"] = Base.Int(logits_dimension) - end - res = tf.execute(desc) - node = tf.TapeNode(boosted_trees_training_predict, [tree_ensemble_handle_, cached_tree_ids_, cached_node_ids_, bucketized_features_], name=nothing, num_bucketized_features=nothing, logits_dimension=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function boosted_trees_training_predict(tree_ensemble_handle_, cached_tree_ids_, cached_node_ids_, bucketized_features_; name=nothing, num_bucketized_features=nothing, logits_dimension=nothing) - if tf.in_eager_mode() - boosted_trees_training_predict_eager(tree_ensemble_handle_, cached_tree_ids_, cached_node_ids_, bucketized_features_; name=name, num_bucketized_features=num_bucketized_features, logits_dimension=logits_dimension) - else - boosted_trees_training_predict_graph(tree_ensemble_handle_, cached_tree_ids_, cached_node_ids_, bucketized_features_; name=name, num_bucketized_features=num_bucketized_features, logits_dimension=logits_dimension) - end - end end @@ -48410,35 +88335,63 @@ end """ begin - function floor_graph(x_; name=nothing) - local desc - tf.with_op_name(name, "Floor") do - desc = tf.NodeDescription("Floor") - x_ = convert(Tensor{Any}, x_) - (x_,) = tf.tf_promote(x_) - tf.add_input(desc, x_) + begin + function floor_graph(x_; name=nothing) + local desc + tf.with_op_name(name, "Floor") do + desc = tf.NodeDescription("Floor") + begin + begin + x_ = convert(Tensor{Any}, x_) + begin + end + end + begin + (x_,) = tf.tf_promote(x_) + end + end + begin + begin + tf.add_input(desc, x_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function floor_eager(x_; name=nothing) - desc = tf.EagerOp("Floor") - x_ = convert(tf.EagerTensor, x_) - tf.add_input(desc, x_) - desc["T"] = tf.data_type(x_) - res = tf.execute(desc) - node = tf.TapeNode(floor, [x_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function floor_eager(x_; name=nothing) + desc = tf.EagerOp("Floor") + x_ = convert(tf.EagerTensor, x_) + begin + begin + tf.add_input(desc, x_) + end + end + begin + end + begin + desc["T"] = tf.data_type(x_) + end + res = tf.execute(desc) + node = tf.TapeNode(floor, [x_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function floor(x_; name=nothing) - if tf.in_eager_mode() - floor_eager(x_; name=name) - else - floor_graph(x_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function floor(x_; name=nothing) + if tf.in_eager_mode() + floor_eager(x_; name=name) + else + floor_graph(x_; name=name) + end end - end + end end @@ -48448,57 +88401,121 @@ end """ begin - function write_image_summary_graph(writer_, step_, tag_, tensor_, bad_color_; name=nothing, max_images=nothing) - local desc - tf.with_op_name(name, "WriteImageSummary") do - desc = tf.NodeDescription("WriteImageSummary") - writer_ = convert(Tensor{Any}, writer_) - step_ = convert(Tensor{Int64}, step_) - tag_ = convert(Tensor{String}, tag_) - tensor_ = convert(Tensor{Float32}, tensor_) - bad_color_ = convert(Tensor{UInt8}, bad_color_) - (tensor_,) = tf.tf_promote(tensor_) - tf.add_input(desc, writer_) - tf.add_input(desc, step_) - tf.add_input(desc, tag_) - tf.add_input(desc, tensor_) - tf.add_input(desc, bad_color_) - if max_images !== nothing - desc["max_images"] = Base.Int(max_images) + begin + function write_image_summary_graph(writer_, step_, tag_, tensor_, bad_color_; name=nothing, max_images=nothing) + local desc + tf.with_op_name(name, "WriteImageSummary") do + desc = tf.NodeDescription("WriteImageSummary") + begin + begin + writer_ = convert(Tensor{Any}, writer_) + begin + end + end + begin + step_ = convert(Tensor{Int64}, step_) + begin + end + end + begin + tag_ = convert(Tensor{String}, tag_) + begin + end + end + begin + tensor_ = convert(Tensor{Float32}, tensor_) + begin + end + end + begin + bad_color_ = convert(Tensor{UInt8}, bad_color_) + begin + end + end + begin + (tensor_,) = tf.tf_promote(tensor_) + end + end + begin + begin + tf.add_input(desc, writer_) + end + begin + tf.add_input(desc, step_) + end + begin + tf.add_input(desc, tag_) + end + begin + tf.add_input(desc, tensor_) + end + begin + tf.add_input(desc, bad_color_) + end + end + begin + begin + if max_images !== nothing + desc["max_images"] = Base.Int(max_images) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function write_image_summary_eager(writer_, step_, tag_, tensor_, bad_color_; name=nothing, max_images=nothing) + desc = tf.EagerOp("WriteImageSummary") + writer_ = convert(tf.EagerTensor, writer_) + step_ = convert(tf.EagerTensor, step_) + tag_ = convert(tf.EagerTensor, tag_) + tensor_ = convert(tf.EagerTensor, tensor_) + bad_color_ = convert(tf.EagerTensor, bad_color_) + begin + begin + tf.add_input(desc, writer_) + end + begin + tf.add_input(desc, step_) + end + begin + tf.add_input(desc, tag_) + end + begin + tf.add_input(desc, tensor_) + end + begin + tf.add_input(desc, bad_color_) + end + end + begin + begin + if max_images !== nothing + desc["max_images"] = Base.Int(max_images) + end + end + end + begin + desc["T"] = tf.data_type(tensor_) + end + res = tf.execute(desc) + node = tf.TapeNode(write_image_summary, [writer_, step_, tag_, tensor_, bad_color_], name=nothing, max_images=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function write_image_summary(writer_, step_, tag_, tensor_, bad_color_; name=nothing, max_images=nothing) + if tf.in_eager_mode() + write_image_summary_eager(writer_, step_, tag_, tensor_, bad_color_; name=name, max_images=max_images) + else + write_image_summary_graph(writer_, step_, tag_, tensor_, bad_color_; name=name, max_images=max_images) + end end - end - tf.Tensor(tf.Operation(desc)) end - function write_image_summary_eager(writer_, step_, tag_, tensor_, bad_color_; name=nothing, max_images=nothing) - desc = tf.EagerOp("WriteImageSummary") - writer_ = convert(tf.EagerTensor, writer_) - step_ = convert(tf.EagerTensor, step_) - tag_ = convert(tf.EagerTensor, tag_) - tensor_ = convert(tf.EagerTensor, tensor_) - bad_color_ = convert(tf.EagerTensor, bad_color_) - tf.add_input(desc, writer_) - tf.add_input(desc, step_) - tf.add_input(desc, tag_) - tf.add_input(desc, tensor_) - tf.add_input(desc, bad_color_) - if max_images !== nothing - desc["max_images"] = Base.Int(max_images) - end - desc["T"] = tf.data_type(tensor_) - res = tf.execute(desc) - node = tf.TapeNode(write_image_summary, [writer_, step_, tag_, tensor_, bad_color_], name=nothing, max_images=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function write_image_summary(writer_, step_, tag_, tensor_, bad_color_; name=nothing, max_images=nothing) - if tf.in_eager_mode() - write_image_summary_eager(writer_, step_, tag_, tensor_, bad_color_; name=name, max_images=max_images) - else - write_image_summary_graph(writer_, step_, tag_, tensor_, bad_color_; name=name, max_images=max_images) - end - end end @@ -48508,39 +88525,75 @@ end """ begin - function tile_grad_graph(input_, multiples_; name=nothing) - local desc - tf.with_op_name(name, "TileGrad") do - desc = tf.NodeDescription("TileGrad") - input_ = convert(Tensor{Any}, input_) - multiples_ = convert(Tensor{Int32}, multiples_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - tf.add_input(desc, multiples_) + begin + function tile_grad_graph(input_, multiples_; name=nothing) + local desc + tf.with_op_name(name, "TileGrad") do + desc = tf.NodeDescription("TileGrad") + begin + begin + input_ = convert(Tensor{Any}, input_) + begin + end + end + begin + multiples_ = convert(Tensor{Int32}, multiples_) + begin + end + end + begin + (input_,) = tf.tf_promote(input_) + end + end + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, multiples_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function tile_grad_eager(input_, multiples_; name=nothing) - desc = tf.EagerOp("TileGrad") - input_ = convert(tf.EagerTensor, input_) - multiples_ = convert(tf.EagerTensor, multiples_) - tf.add_input(desc, input_) - tf.add_input(desc, multiples_) - desc["T"] = tf.data_type(input_) - res = tf.execute(desc) - node = tf.TapeNode(tile_grad, [input_, multiples_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function tile_grad_eager(input_, multiples_; name=nothing) + desc = tf.EagerOp("TileGrad") + input_ = convert(tf.EagerTensor, input_) + multiples_ = convert(tf.EagerTensor, multiples_) + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, multiples_) + end + end + begin + end + begin + desc["T"] = tf.data_type(input_) + end + res = tf.execute(desc) + node = tf.TapeNode(tile_grad, [input_, multiples_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tile_grad(input_, multiples_; name=nothing) - if tf.in_eager_mode() - tile_grad_eager(input_, multiples_; name=name) - else - tile_grad_graph(input_, multiples_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tile_grad(input_, multiples_; name=nothing) + if tf.in_eager_mode() + tile_grad_eager(input_, multiples_; name=name) + else + tile_grad_graph(input_, multiples_; name=name) + end end - end + end end @@ -48550,65 +88603,121 @@ end Load embedding parameters for a single table. """ begin - function load_tpu_embedding_proximal_adagrad_parameters_grad_accum_debug_graph(parameters_, accumulators_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - local desc - tf.with_op_name(name, "LoadTPUEmbeddingProximalAdagradParametersGradAccumDebug") do - desc = tf.NodeDescription("LoadTPUEmbeddingProximalAdagradParametersGradAccumDebug") - parameters_ = convert(Tensor{Float32}, parameters_) - accumulators_ = convert(Tensor{Float32}, accumulators_) - gradient_accumulators_ = convert(Tensor{Float32}, gradient_accumulators_) - tf.add_input(desc, parameters_) - tf.add_input(desc, accumulators_) - tf.add_input(desc, gradient_accumulators_) - if table_id !== nothing - desc["table_id"] = Base.Int(table_id) + begin + function load_tpu_embedding_proximal_adagrad_parameters_grad_accum_debug_graph(parameters_, accumulators_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + local desc + tf.with_op_name(name, "LoadTPUEmbeddingProximalAdagradParametersGradAccumDebug") do + desc = tf.NodeDescription("LoadTPUEmbeddingProximalAdagradParametersGradAccumDebug") + begin + begin + parameters_ = convert(Tensor{Float32}, parameters_) + begin + end + end + begin + accumulators_ = convert(Tensor{Float32}, accumulators_) + begin + end + end + begin + gradient_accumulators_ = convert(Tensor{Float32}, gradient_accumulators_) + begin + end + end + end + begin + begin + tf.add_input(desc, parameters_) + end + begin + tf.add_input(desc, accumulators_) + end + begin + tf.add_input(desc, gradient_accumulators_) + end + end + begin + begin + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + end + begin + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + end + begin + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + end + begin + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function load_tpu_embedding_proximal_adagrad_parameters_grad_accum_debug_eager(parameters_, accumulators_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + desc = tf.EagerOp("LoadTPUEmbeddingProximalAdagradParametersGradAccumDebug") + parameters_ = convert(tf.EagerTensor, parameters_) + accumulators_ = convert(tf.EagerTensor, accumulators_) + gradient_accumulators_ = convert(tf.EagerTensor, gradient_accumulators_) + begin + begin + tf.add_input(desc, parameters_) + end + begin + tf.add_input(desc, accumulators_) + end + begin + tf.add_input(desc, gradient_accumulators_) + end + end + begin + begin + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + end + begin + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + end + begin + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + end + begin + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(load_tpu_embedding_proximal_adagrad_parameters_grad_accum_debug, [parameters_, accumulators_, gradient_accumulators_], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function load_tpu_embedding_proximal_adagrad_parameters_grad_accum_debug(parameters_, accumulators_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + if tf.in_eager_mode() + load_tpu_embedding_proximal_adagrad_parameters_grad_accum_debug_eager(parameters_, accumulators_, gradient_accumulators_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + else + load_tpu_embedding_proximal_adagrad_parameters_grad_accum_debug_graph(parameters_, accumulators_, gradient_accumulators_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + end end - if table_name !== nothing - desc["table_name"] = Base.String(table_name) - end - if num_shards !== nothing - desc["num_shards"] = Base.Int(num_shards) - end - if shard_id !== nothing - desc["shard_id"] = Base.Int(shard_id) - end - end - tf.Tensor(tf.Operation(desc)) - end - function load_tpu_embedding_proximal_adagrad_parameters_grad_accum_debug_eager(parameters_, accumulators_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - desc = tf.EagerOp("LoadTPUEmbeddingProximalAdagradParametersGradAccumDebug") - parameters_ = convert(tf.EagerTensor, parameters_) - accumulators_ = convert(tf.EagerTensor, accumulators_) - gradient_accumulators_ = convert(tf.EagerTensor, gradient_accumulators_) - tf.add_input(desc, parameters_) - tf.add_input(desc, accumulators_) - tf.add_input(desc, gradient_accumulators_) - if table_id !== nothing - desc["table_id"] = Base.Int(table_id) - end - if table_name !== nothing - desc["table_name"] = Base.String(table_name) - end - if num_shards !== nothing - desc["num_shards"] = Base.Int(num_shards) - end - if shard_id !== nothing - desc["shard_id"] = Base.Int(shard_id) - end - res = tf.execute(desc) - node = tf.TapeNode(load_tpu_embedding_proximal_adagrad_parameters_grad_accum_debug, [parameters_, accumulators_, gradient_accumulators_], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function load_tpu_embedding_proximal_adagrad_parameters_grad_accum_debug(parameters_, accumulators_, gradient_accumulators_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - if tf.in_eager_mode() - load_tpu_embedding_proximal_adagrad_parameters_grad_accum_debug_eager(parameters_, accumulators_, gradient_accumulators_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) - else - load_tpu_embedding_proximal_adagrad_parameters_grad_accum_debug_graph(parameters_, accumulators_, gradient_accumulators_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) - end - end end @@ -48618,48 +88727,86 @@ end """ begin - function tensor_array_grad_v3_graph(handle_, flow_in_; name=nothing, source=nothing) - local desc - tf.with_op_name(name, "TensorArrayGradV3") do - desc = tf.NodeDescription("TensorArrayGradV3") - handle_ = convert(Tensor{Any}, handle_) - flow_in_ = convert(Tensor{Float32}, flow_in_) - tf.add_input(desc, handle_) - tf.add_input(desc, flow_in_) - if source !== nothing - desc["source"] = Base.String(source) + begin + function tensor_array_grad_v3_graph(handle_, flow_in_; name=nothing, source=nothing) + local desc + tf.with_op_name(name, "TensorArrayGradV3") do + desc = tf.NodeDescription("TensorArrayGradV3") + begin + begin + handle_ = convert(Tensor{Any}, handle_) + begin + end + end + begin + flow_in_ = convert(Tensor{Float32}, flow_in_) + begin + end + end + end + begin + begin + tf.add_input(desc, handle_) + end + begin + tf.add_input(desc, flow_in_) + end + end + begin + begin + if source !== nothing + desc["source"] = Base.String(source) + end + end + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + end + end + begin + function tensor_array_grad_v3_eager(handle_, flow_in_; name=nothing, source=nothing) + desc = tf.EagerOp("TensorArrayGradV3") + handle_ = convert(tf.EagerTensor, handle_) + flow_in_ = convert(tf.EagerTensor, flow_in_) + begin + begin + tf.add_input(desc, handle_) + end + begin + tf.add_input(desc, flow_in_) + end + end + begin + begin + if source !== nothing + desc["source"] = Base.String(source) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(tensor_array_grad_v3, [handle_, flow_in_], name=nothing, source=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_array_grad_v3(handle_, flow_in_; name=nothing, source=nothing) + if tf.in_eager_mode() + tensor_array_grad_v3_eager(handle_, flow_in_; name=name, source=source) + else + tensor_array_grad_v3_graph(handle_, flow_in_; name=name, source=source) + end end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:2 - push!(out, tf.Tensor(op, out_idx)) - end - out - end - function tensor_array_grad_v3_eager(handle_, flow_in_; name=nothing, source=nothing) - desc = tf.EagerOp("TensorArrayGradV3") - handle_ = convert(tf.EagerTensor, handle_) - flow_in_ = convert(tf.EagerTensor, flow_in_) - tf.add_input(desc, handle_) - tf.add_input(desc, flow_in_) - if source !== nothing - desc["source"] = Base.String(source) - end - res = tf.execute(desc) - node = tf.TapeNode(tensor_array_grad_v3, [handle_, flow_in_], name=nothing, source=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_array_grad_v3(handle_, flow_in_; name=nothing, source=nothing) - if tf.in_eager_mode() - tensor_array_grad_v3_eager(handle_, flow_in_; name=name, source=source) - else - tensor_array_grad_v3_graph(handle_, flow_in_; name=name, source=source) - end - end end @@ -48669,49 +88816,89 @@ end An op that enqueues a list of input batch tensors to TPUEmbedding. """ begin - function enqueue_tpu_embedding_integer_batch_graph(batch_, mode_override_; name=nothing, N=nothing, device_ordinal=nothing) - local desc - tf.with_op_name(name, "EnqueueTPUEmbeddingIntegerBatch") do - desc = tf.NodeDescription("EnqueueTPUEmbeddingIntegerBatch") - batch_ = [convert(Tensor{Int32}, x) for x = batch_] - mode_override_ = convert(Tensor{String}, mode_override_) - tf.add_input(desc, batch_) - tf.add_input(desc, mode_override_) - if N !== nothing - desc["N"] = Base.Int(N) - end - if device_ordinal !== nothing - desc["device_ordinal"] = Base.Int(device_ordinal) + begin + function enqueue_tpu_embedding_integer_batch_graph(batch_, mode_override_; name=nothing, N=nothing, device_ordinal=nothing) + local desc + tf.with_op_name(name, "EnqueueTPUEmbeddingIntegerBatch") do + desc = tf.NodeDescription("EnqueueTPUEmbeddingIntegerBatch") + begin + begin + batch_ = [convert(Tensor{Int32}, x) for x = batch_] + begin + end + end + begin + mode_override_ = convert(Tensor{String}, mode_override_) + begin + end + end + end + begin + begin + tf.add_input(desc, batch_) + end + begin + tf.add_input(desc, mode_override_) + end + end + begin + begin + if N !== nothing + desc["N"] = Base.Int(N) + end + end + begin + if device_ordinal !== nothing + desc["device_ordinal"] = Base.Int(device_ordinal) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function enqueue_tpu_embedding_integer_batch_eager(batch_, mode_override_; name=nothing, N=nothing, device_ordinal=nothing) + desc = tf.EagerOp("EnqueueTPUEmbeddingIntegerBatch") + batch_ = convert(tf.EagerTensor, batch_) + mode_override_ = convert(tf.EagerTensor, mode_override_) + begin + begin + tf.add_input(desc, batch_) + end + begin + tf.add_input(desc, mode_override_) + end + end + begin + begin + if N !== nothing + desc["N"] = Base.Int(N) + end + end + begin + if device_ordinal !== nothing + desc["device_ordinal"] = Base.Int(device_ordinal) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(enqueue_tpu_embedding_integer_batch, [batch_, mode_override_], name=nothing, N=nothing, device_ordinal=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function enqueue_tpu_embedding_integer_batch(batch_, mode_override_; name=nothing, N=nothing, device_ordinal=nothing) + if tf.in_eager_mode() + enqueue_tpu_embedding_integer_batch_eager(batch_, mode_override_; name=name, N=N, device_ordinal=device_ordinal) + else + enqueue_tpu_embedding_integer_batch_graph(batch_, mode_override_; name=name, N=N, device_ordinal=device_ordinal) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function enqueue_tpu_embedding_integer_batch_eager(batch_, mode_override_; name=nothing, N=nothing, device_ordinal=nothing) - desc = tf.EagerOp("EnqueueTPUEmbeddingIntegerBatch") - batch_ = convert(tf.EagerTensor, batch_) - mode_override_ = convert(tf.EagerTensor, mode_override_) - tf.add_input(desc, batch_) - tf.add_input(desc, mode_override_) - if N !== nothing - desc["N"] = Base.Int(N) - end - if device_ordinal !== nothing - desc["device_ordinal"] = Base.Int(device_ordinal) - end - res = tf.execute(desc) - node = tf.TapeNode(enqueue_tpu_embedding_integer_batch, [batch_, mode_override_], name=nothing, N=nothing, device_ordinal=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function enqueue_tpu_embedding_integer_batch(batch_, mode_override_; name=nothing, N=nothing, device_ordinal=nothing) - if tf.in_eager_mode() - enqueue_tpu_embedding_integer_batch_eager(batch_, mode_override_; name=name, N=N, device_ordinal=device_ordinal) - else - enqueue_tpu_embedding_integer_batch_graph(batch_, mode_override_; name=name, N=N, device_ordinal=device_ordinal) - end - end end @@ -48721,78 +88908,160 @@ end """ begin - function fused_batch_norm_graph(x_, scale_, offset_, mean_, variance_; name=nothing, epsilon=nothing, data_format=nothing, is_training=nothing) - local desc - tf.with_op_name(name, "FusedBatchNorm") do - desc = tf.NodeDescription("FusedBatchNorm") - x_ = convert(Tensor{Any}, x_) - scale_ = convert(Tensor{Any}, scale_) - offset_ = convert(Tensor{Any}, offset_) - mean_ = convert(Tensor{Any}, mean_) - variance_ = convert(Tensor{Any}, variance_) - (x_, scale_, offset_, mean_, variance_) = tf.tf_promote(x_, scale_, offset_, mean_, variance_) - tf.add_input(desc, x_) - tf.add_input(desc, scale_) - tf.add_input(desc, offset_) - tf.add_input(desc, mean_) - tf.add_input(desc, variance_) - if epsilon !== nothing - desc["epsilon"] = Base.identity(epsilon) - end - if data_format !== nothing - desc["data_format"] = Base.String(data_format) + begin + function fused_batch_norm_graph(x_, scale_, offset_, mean_, variance_; name=nothing, epsilon=nothing, data_format=nothing, is_training=nothing) + local desc + tf.with_op_name(name, "FusedBatchNorm") do + desc = tf.NodeDescription("FusedBatchNorm") + begin + begin + x_ = convert(Tensor{Any}, x_) + begin + end + end + begin + scale_ = convert(Tensor{Any}, scale_) + begin + end + end + begin + offset_ = convert(Tensor{Any}, offset_) + begin + end + end + begin + mean_ = convert(Tensor{Any}, mean_) + begin + end + end + begin + variance_ = convert(Tensor{Any}, variance_) + begin + end + end + begin + (x_, scale_, offset_, mean_, variance_) = tf.tf_promote(x_, scale_, offset_, mean_, variance_) + end + end + begin + begin + tf.add_input(desc, x_) + end + begin + tf.add_input(desc, scale_) + end + begin + tf.add_input(desc, offset_) + end + begin + tf.add_input(desc, mean_) + end + begin + tf.add_input(desc, variance_) + end + end + begin + begin + if epsilon !== nothing + desc["epsilon"] = Base.identity(epsilon) + end + end + begin + if data_format !== nothing + desc["data_format"] = Base.String(data_format) + end + end + begin + if is_training !== nothing + desc["is_training"] = Base.Bool(is_training) + end + end + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:5 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + end + end + begin + function fused_batch_norm_eager(x_, scale_, offset_, mean_, variance_; name=nothing, epsilon=nothing, data_format=nothing, is_training=nothing) + desc = tf.EagerOp("FusedBatchNorm") + x_ = convert(tf.EagerTensor, x_) + scale_ = convert(tf.EagerTensor, scale_) + offset_ = convert(tf.EagerTensor, offset_) + mean_ = convert(tf.EagerTensor, mean_) + variance_ = convert(tf.EagerTensor, variance_) + begin + begin + tf.add_input(desc, x_) + end + begin + tf.add_input(desc, scale_) + end + begin + tf.add_input(desc, offset_) + end + begin + tf.add_input(desc, mean_) + end + begin + tf.add_input(desc, variance_) + end + end + begin + begin + if epsilon !== nothing + desc["epsilon"] = Base.identity(epsilon) + end + end + begin + if data_format !== nothing + desc["data_format"] = Base.String(data_format) + end + end + begin + if is_training !== nothing + desc["is_training"] = Base.Bool(is_training) + end + end + end + begin + desc["T"] = tf.data_type(x_) + end + begin + desc["T"] = tf.data_type(scale_) + end + begin + desc["T"] = tf.data_type(offset_) + end + begin + desc["T"] = tf.data_type(mean_) + end + begin + desc["T"] = tf.data_type(variance_) + end + res = tf.execute(desc) + node = tf.TapeNode(fused_batch_norm, [x_, scale_, offset_, mean_, variance_], name=nothing, epsilon=nothing, data_format=nothing, is_training=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function fused_batch_norm(x_, scale_, offset_, mean_, variance_; name=nothing, epsilon=nothing, data_format=nothing, is_training=nothing) + if tf.in_eager_mode() + fused_batch_norm_eager(x_, scale_, offset_, mean_, variance_; name=name, epsilon=epsilon, data_format=data_format, is_training=is_training) + else + fused_batch_norm_graph(x_, scale_, offset_, mean_, variance_; name=name, epsilon=epsilon, data_format=data_format, is_training=is_training) + end end - if is_training !== nothing - desc["is_training"] = Base.Bool(is_training) - end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:5 - push!(out, tf.Tensor(op, out_idx)) - end - out - end - function fused_batch_norm_eager(x_, scale_, offset_, mean_, variance_; name=nothing, epsilon=nothing, data_format=nothing, is_training=nothing) - desc = tf.EagerOp("FusedBatchNorm") - x_ = convert(tf.EagerTensor, x_) - scale_ = convert(tf.EagerTensor, scale_) - offset_ = convert(tf.EagerTensor, offset_) - mean_ = convert(tf.EagerTensor, mean_) - variance_ = convert(tf.EagerTensor, variance_) - tf.add_input(desc, x_) - tf.add_input(desc, scale_) - tf.add_input(desc, offset_) - tf.add_input(desc, mean_) - tf.add_input(desc, variance_) - if epsilon !== nothing - desc["epsilon"] = Base.identity(epsilon) - end - if data_format !== nothing - desc["data_format"] = Base.String(data_format) - end - if is_training !== nothing - desc["is_training"] = Base.Bool(is_training) - end - desc["T"] = tf.data_type(x_) - desc["T"] = tf.data_type(scale_) - desc["T"] = tf.data_type(offset_) - desc["T"] = tf.data_type(mean_) - desc["T"] = tf.data_type(variance_) - res = tf.execute(desc) - node = tf.TapeNode(fused_batch_norm, [x_, scale_, offset_, mean_, variance_], name=nothing, epsilon=nothing, data_format=nothing, is_training=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function fused_batch_norm(x_, scale_, offset_, mean_, variance_; name=nothing, epsilon=nothing, data_format=nothing, is_training=nothing) - if tf.in_eager_mode() - fused_batch_norm_eager(x_, scale_, offset_, mean_, variance_; name=name, epsilon=epsilon, data_format=data_format, is_training=is_training) - else - fused_batch_norm_graph(x_, scale_, offset_, mean_, variance_; name=name, epsilon=epsilon, data_format=data_format, is_training=is_training) - end - end end @@ -48802,37 +89071,69 @@ end """ begin - function logical_and_graph(x_, y_; name=nothing) - local desc - tf.with_op_name(name, "LogicalAnd") do - desc = tf.NodeDescription("LogicalAnd") - x_ = convert(Tensor{Bool}, x_) - y_ = convert(Tensor{Bool}, y_) - tf.add_input(desc, x_) - tf.add_input(desc, y_) + begin + function logical_and_graph(x_, y_; name=nothing) + local desc + tf.with_op_name(name, "LogicalAnd") do + desc = tf.NodeDescription("LogicalAnd") + begin + begin + x_ = convert(Tensor{Bool}, x_) + begin + end + end + begin + y_ = convert(Tensor{Bool}, y_) + begin + end + end + end + begin + begin + tf.add_input(desc, x_) + end + begin + tf.add_input(desc, y_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function logical_and_eager(x_, y_; name=nothing) - desc = tf.EagerOp("LogicalAnd") - x_ = convert(tf.EagerTensor, x_) - y_ = convert(tf.EagerTensor, y_) - tf.add_input(desc, x_) - tf.add_input(desc, y_) - res = tf.execute(desc) - node = tf.TapeNode(logical_and, [x_, y_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function logical_and_eager(x_, y_; name=nothing) + desc = tf.EagerOp("LogicalAnd") + x_ = convert(tf.EagerTensor, x_) + y_ = convert(tf.EagerTensor, y_) + begin + begin + tf.add_input(desc, x_) + end + begin + tf.add_input(desc, y_) + end + end + begin + end + res = tf.execute(desc) + node = tf.TapeNode(logical_and, [x_, y_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function logical_and(x_, y_; name=nothing) - if tf.in_eager_mode() - logical_and_eager(x_, y_; name=name) - else - logical_and_graph(x_, y_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function logical_and(x_, y_; name=nothing) + if tf.in_eager_mode() + logical_and_eager(x_, y_; name=name) + else + logical_and_graph(x_, y_; name=name) + end end - end + end end @@ -48842,47 +89143,97 @@ end """ begin - function tensor_scatter_update_graph(tensor_, indices_, updates_; name=nothing) - local desc - tf.with_op_name(name, "TensorScatterUpdate") do - desc = tf.NodeDescription("TensorScatterUpdate") - tensor_ = convert(Tensor{Any}, tensor_) - indices_ = convert(Tensor{Any}, indices_) - indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) - updates_ = convert(Tensor{Any}, updates_) - (tensor_, updates_) = tf.tf_promote(tensor_, updates_) - (indices_,) = tf.tf_promote(indices_) - tf.add_input(desc, tensor_) - tf.add_input(desc, indices_) - tf.add_input(desc, updates_) - end - tf.Tensor(tf.Operation(desc)) - end - function tensor_scatter_update_eager(tensor_, indices_, updates_; name=nothing) - desc = tf.EagerOp("TensorScatterUpdate") - tensor_ = convert(tf.EagerTensor, tensor_) - indices_ = convert(tf.EagerTensor, indices_) - updates_ = convert(tf.EagerTensor, updates_) - tf.add_input(desc, tensor_) - tf.add_input(desc, indices_) - tf.add_input(desc, updates_) - desc["T"] = tf.data_type(tensor_) - desc["Tindices"] = tf.data_type(indices_) - desc["T"] = tf.data_type(updates_) - res = tf.execute(desc) - node = tf.TapeNode(tensor_scatter_update, [tensor_, indices_, updates_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_scatter_update(tensor_, indices_, updates_; name=nothing) - if tf.in_eager_mode() - tensor_scatter_update_eager(tensor_, indices_, updates_; name=name) - else - tensor_scatter_update_graph(tensor_, indices_, updates_; name=name) + begin + function tensor_scatter_update_graph(tensor_, indices_, updates_; name=nothing) + local desc + tf.with_op_name(name, "TensorScatterUpdate") do + desc = tf.NodeDescription("TensorScatterUpdate") + begin + begin + tensor_ = convert(Tensor{Any}, tensor_) + begin + end + end + begin + indices_ = convert(Tensor{Any}, indices_) + begin + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + end + end + begin + updates_ = convert(Tensor{Any}, updates_) + begin + end + end + begin + (tensor_, updates_) = tf.tf_promote(tensor_, updates_) + end + begin + (indices_,) = tf.tf_promote(indices_) + end + end + begin + begin + tf.add_input(desc, tensor_) + end + begin + tf.add_input(desc, indices_) + end + begin + tf.add_input(desc, updates_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function tensor_scatter_update_eager(tensor_, indices_, updates_; name=nothing) + desc = tf.EagerOp("TensorScatterUpdate") + tensor_ = convert(tf.EagerTensor, tensor_) + indices_ = convert(tf.EagerTensor, indices_) + updates_ = convert(tf.EagerTensor, updates_) + begin + begin + tf.add_input(desc, tensor_) + end + begin + tf.add_input(desc, indices_) + end + begin + tf.add_input(desc, updates_) + end + end + begin + end + begin + desc["T"] = tf.data_type(tensor_) + end + begin + desc["Tindices"] = tf.data_type(indices_) + end + begin + desc["T"] = tf.data_type(updates_) + end + res = tf.execute(desc) + node = tf.TapeNode(tensor_scatter_update, [tensor_, indices_, updates_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_scatter_update(tensor_, indices_, updates_; name=nothing) + if tf.in_eager_mode() + tensor_scatter_update_eager(tensor_, indices_, updates_; name=name) + else + tensor_scatter_update_graph(tensor_, indices_, updates_; name=name) + end end - end + end end @@ -48892,47 +89243,75 @@ end """ begin - function text_line_reader_v2_graph(; name=nothing, skip_header_lines=nothing, container=nothing, shared_name=nothing) - local desc - tf.with_op_name(name, "TextLineReaderV2") do - desc = tf.NodeDescription("TextLineReaderV2") - if skip_header_lines !== nothing - desc["skip_header_lines"] = Base.Int(skip_header_lines) - end - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) + begin + function text_line_reader_v2_graph(; name=nothing, skip_header_lines=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "TextLineReaderV2") do + desc = tf.NodeDescription("TextLineReaderV2") + begin + end + begin + end + begin + begin + if skip_header_lines !== nothing + desc["skip_header_lines"] = Base.Int(skip_header_lines) + end + end + begin + if container !== nothing + desc["container"] = Base.String(container) + end + end + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function text_line_reader_v2_eager(; name=nothing, skip_header_lines=nothing, container=nothing, shared_name=nothing) + desc = tf.EagerOp("TextLineReaderV2") + begin + end + begin + begin + if skip_header_lines !== nothing + desc["skip_header_lines"] = Base.Int(skip_header_lines) + end + end + begin + if container !== nothing + desc["container"] = Base.String(container) + end + end + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(text_line_reader_v2, [], name=nothing, skip_header_lines=nothing, container=nothing, shared_name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function text_line_reader_v2(; name=nothing, skip_header_lines=nothing, container=nothing, shared_name=nothing) + if tf.in_eager_mode() + text_line_reader_v2_eager(; name=name, skip_header_lines=skip_header_lines, container=container, shared_name=shared_name) + else + text_line_reader_v2_graph(; name=name, skip_header_lines=skip_header_lines, container=container, shared_name=shared_name) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function text_line_reader_v2_eager(; name=nothing, skip_header_lines=nothing, container=nothing, shared_name=nothing) - desc = tf.EagerOp("TextLineReaderV2") - if skip_header_lines !== nothing - desc["skip_header_lines"] = Base.Int(skip_header_lines) - end - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - res = tf.execute(desc) - node = tf.TapeNode(text_line_reader_v2, [], name=nothing, skip_header_lines=nothing, container=nothing, shared_name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function text_line_reader_v2(; name=nothing, skip_header_lines=nothing, container=nothing, shared_name=nothing) - if tf.in_eager_mode() - text_line_reader_v2_eager(; name=name, skip_header_lines=skip_header_lines, container=container, shared_name=shared_name) - else - text_line_reader_v2_graph(; name=name, skip_header_lines=skip_header_lines, container=container, shared_name=shared_name) - end - end end @@ -48942,45 +89321,77 @@ end """ begin - function tensor_slice_dataset_graph(components_; name=nothing, Toutput_types=nothing, output_shapes=nothing) - local desc - tf.with_op_name(name, "TensorSliceDataset") do - desc = tf.NodeDescription("TensorSliceDataset") - components_ = [convert(Tensor{Any}, x) for x = components_] - tf.add_input(desc, components_) - if Toutput_types !== nothing - desc["Toutput_types"] = map(Base.identity, Toutput_types) + begin + function tensor_slice_dataset_graph(components_; name=nothing, Toutput_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "TensorSliceDataset") do + desc = tf.NodeDescription("TensorSliceDataset") + begin + begin + components_ = [convert(Tensor{Any}, x) for x = components_] + begin + end + end + end + begin + begin + tf.add_input(desc, components_) + end + end + begin + begin + if Toutput_types !== nothing + desc["Toutput_types"] = map(Base.identity, Toutput_types) + end + end + begin + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function tensor_slice_dataset_eager(components_; name=nothing, Toutput_types=nothing, output_shapes=nothing) + desc = tf.EagerOp("TensorSliceDataset") + components_ = convert(tf.EagerTensor, components_) + begin + begin + tf.add_input(desc, components_) + end + end + begin + begin + if Toutput_types !== nothing + desc["Toutput_types"] = map(Base.identity, Toutput_types) + end + end + begin + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(tensor_slice_dataset, [components_], name=nothing, Toutput_types=nothing, output_shapes=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_slice_dataset(components_; name=nothing, Toutput_types=nothing, output_shapes=nothing) + if tf.in_eager_mode() + tensor_slice_dataset_eager(components_; name=name, Toutput_types=Toutput_types, output_shapes=output_shapes) + else + tensor_slice_dataset_graph(components_; name=name, Toutput_types=Toutput_types, output_shapes=output_shapes) + end end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - end - tf.Tensor(tf.Operation(desc)) - end - function tensor_slice_dataset_eager(components_; name=nothing, Toutput_types=nothing, output_shapes=nothing) - desc = tf.EagerOp("TensorSliceDataset") - components_ = convert(tf.EagerTensor, components_) - tf.add_input(desc, components_) - if Toutput_types !== nothing - desc["Toutput_types"] = map(Base.identity, Toutput_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - res = tf.execute(desc) - node = tf.TapeNode(tensor_slice_dataset, [components_], name=nothing, Toutput_types=nothing, output_shapes=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_slice_dataset(components_; name=nothing, Toutput_types=nothing, output_shapes=nothing) - if tf.in_eager_mode() - tensor_slice_dataset_eager(components_; name=name, Toutput_types=Toutput_types, output_shapes=output_shapes) - else - tensor_slice_dataset_graph(components_; name=name, Toutput_types=Toutput_types, output_shapes=output_shapes) - end - end end @@ -48990,47 +89401,99 @@ end """ begin - function tensor_array_scatter_v3_graph(handle_, indices_, value_, flow_in_; name=nothing) - local desc - tf.with_op_name(name, "TensorArrayScatterV3") do - desc = tf.NodeDescription("TensorArrayScatterV3") - handle_ = convert(Tensor{Any}, handle_) - indices_ = convert(Tensor{Int32}, indices_) - value_ = convert(Tensor{Any}, value_) - flow_in_ = convert(Tensor{Float32}, flow_in_) - (value_,) = tf.tf_promote(value_) - tf.add_input(desc, handle_) - tf.add_input(desc, indices_) - tf.add_input(desc, value_) - tf.add_input(desc, flow_in_) - end - tf.Tensor(tf.Operation(desc)) - end - function tensor_array_scatter_v3_eager(handle_, indices_, value_, flow_in_; name=nothing) - desc = tf.EagerOp("TensorArrayScatterV3") - handle_ = convert(tf.EagerTensor, handle_) - indices_ = convert(tf.EagerTensor, indices_) - value_ = convert(tf.EagerTensor, value_) - flow_in_ = convert(tf.EagerTensor, flow_in_) - tf.add_input(desc, handle_) - tf.add_input(desc, indices_) - tf.add_input(desc, value_) - tf.add_input(desc, flow_in_) - desc["T"] = tf.data_type(value_) - res = tf.execute(desc) - node = tf.TapeNode(tensor_array_scatter_v3, [handle_, indices_, value_, flow_in_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_array_scatter_v3(handle_, indices_, value_, flow_in_; name=nothing) - if tf.in_eager_mode() - tensor_array_scatter_v3_eager(handle_, indices_, value_, flow_in_; name=name) - else - tensor_array_scatter_v3_graph(handle_, indices_, value_, flow_in_; name=name) + begin + function tensor_array_scatter_v3_graph(handle_, indices_, value_, flow_in_; name=nothing) + local desc + tf.with_op_name(name, "TensorArrayScatterV3") do + desc = tf.NodeDescription("TensorArrayScatterV3") + begin + begin + handle_ = convert(Tensor{Any}, handle_) + begin + end + end + begin + indices_ = convert(Tensor{Int32}, indices_) + begin + end + end + begin + value_ = convert(Tensor{Any}, value_) + begin + end + end + begin + flow_in_ = convert(Tensor{Float32}, flow_in_) + begin + end + end + begin + (value_,) = tf.tf_promote(value_) + end + end + begin + begin + tf.add_input(desc, handle_) + end + begin + tf.add_input(desc, indices_) + end + begin + tf.add_input(desc, value_) + end + begin + tf.add_input(desc, flow_in_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function tensor_array_scatter_v3_eager(handle_, indices_, value_, flow_in_; name=nothing) + desc = tf.EagerOp("TensorArrayScatterV3") + handle_ = convert(tf.EagerTensor, handle_) + indices_ = convert(tf.EagerTensor, indices_) + value_ = convert(tf.EagerTensor, value_) + flow_in_ = convert(tf.EagerTensor, flow_in_) + begin + begin + tf.add_input(desc, handle_) + end + begin + tf.add_input(desc, indices_) + end + begin + tf.add_input(desc, value_) + end + begin + tf.add_input(desc, flow_in_) + end + end + begin + end + begin + desc["T"] = tf.data_type(value_) + end + res = tf.execute(desc) + node = tf.TapeNode(tensor_array_scatter_v3, [handle_, indices_, value_, flow_in_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_array_scatter_v3(handle_, indices_, value_, flow_in_; name=nothing) + if tf.in_eager_mode() + tensor_array_scatter_v3_eager(handle_, indices_, value_, flow_in_; name=name) + else + tensor_array_scatter_v3_graph(handle_, indices_, value_, flow_in_; name=name) + end end - end + end end @@ -49040,45 +89503,85 @@ end """ begin - function resize_nearest_neighbor_grad_graph(grads_, size_; name=nothing, align_corners=nothing) - local desc - tf.with_op_name(name, "ResizeNearestNeighborGrad") do - desc = tf.NodeDescription("ResizeNearestNeighborGrad") - grads_ = convert(Tensor{Any}, grads_) - size_ = convert(Tensor{Int32}, size_) - (grads_,) = tf.tf_promote(grads_) - tf.add_input(desc, grads_) - tf.add_input(desc, size_) - if align_corners !== nothing - desc["align_corners"] = Base.Bool(align_corners) + begin + function resize_nearest_neighbor_grad_graph(grads_, size_; name=nothing, align_corners=nothing) + local desc + tf.with_op_name(name, "ResizeNearestNeighborGrad") do + desc = tf.NodeDescription("ResizeNearestNeighborGrad") + begin + begin + grads_ = convert(Tensor{Any}, grads_) + begin + end + end + begin + size_ = convert(Tensor{Int32}, size_) + begin + end + end + begin + (grads_,) = tf.tf_promote(grads_) + end + end + begin + begin + tf.add_input(desc, grads_) + end + begin + tf.add_input(desc, size_) + end + end + begin + begin + if align_corners !== nothing + desc["align_corners"] = Base.Bool(align_corners) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function resize_nearest_neighbor_grad_eager(grads_, size_; name=nothing, align_corners=nothing) + desc = tf.EagerOp("ResizeNearestNeighborGrad") + grads_ = convert(tf.EagerTensor, grads_) + size_ = convert(tf.EagerTensor, size_) + begin + begin + tf.add_input(desc, grads_) + end + begin + tf.add_input(desc, size_) + end + end + begin + begin + if align_corners !== nothing + desc["align_corners"] = Base.Bool(align_corners) + end + end + end + begin + desc["T"] = tf.data_type(grads_) + end + res = tf.execute(desc) + node = tf.TapeNode(resize_nearest_neighbor_grad, [grads_, size_], name=nothing, align_corners=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resize_nearest_neighbor_grad(grads_, size_; name=nothing, align_corners=nothing) + if tf.in_eager_mode() + resize_nearest_neighbor_grad_eager(grads_, size_; name=name, align_corners=align_corners) + else + resize_nearest_neighbor_grad_graph(grads_, size_; name=name, align_corners=align_corners) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function resize_nearest_neighbor_grad_eager(grads_, size_; name=nothing, align_corners=nothing) - desc = tf.EagerOp("ResizeNearestNeighborGrad") - grads_ = convert(tf.EagerTensor, grads_) - size_ = convert(tf.EagerTensor, size_) - tf.add_input(desc, grads_) - tf.add_input(desc, size_) - if align_corners !== nothing - desc["align_corners"] = Base.Bool(align_corners) - end - desc["T"] = tf.data_type(grads_) - res = tf.execute(desc) - node = tf.TapeNode(resize_nearest_neighbor_grad, [grads_, size_], name=nothing, align_corners=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resize_nearest_neighbor_grad(grads_, size_; name=nothing, align_corners=nothing) - if tf.in_eager_mode() - resize_nearest_neighbor_grad_eager(grads_, size_; name=name, align_corners=align_corners) - else - resize_nearest_neighbor_grad_graph(grads_, size_; name=name, align_corners=align_corners) - end - end end @@ -49088,71 +89591,163 @@ end """ begin - function apply_power_sign_graph(var_, m_, lr_, logbase_, sign_decay_, beta_, grad_; name=nothing, use_locking=nothing) - local desc - tf.with_op_name(name, "ApplyPowerSign") do - desc = tf.NodeDescription("ApplyPowerSign") - var_ = convert(Tensor{Any}, var_) - m_ = convert(Tensor{Any}, m_) - lr_ = convert(Tensor{Any}, lr_) - logbase_ = convert(Tensor{Any}, logbase_) - sign_decay_ = convert(Tensor{Any}, sign_decay_) - beta_ = convert(Tensor{Any}, beta_) - grad_ = convert(Tensor{Any}, grad_) - (var_, m_, lr_, logbase_, sign_decay_, beta_, grad_) = tf.tf_promote(var_, m_, lr_, logbase_, sign_decay_, beta_, grad_) - tf.add_input(desc, var_) - tf.add_input(desc, m_) - tf.add_input(desc, lr_) - tf.add_input(desc, logbase_) - tf.add_input(desc, sign_decay_) - tf.add_input(desc, beta_) - tf.add_input(desc, grad_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - end - tf.Tensor(tf.Operation(desc)) - end - function apply_power_sign_eager(var_, m_, lr_, logbase_, sign_decay_, beta_, grad_; name=nothing, use_locking=nothing) - desc = tf.EagerOp("ApplyPowerSign") - var_ = convert(tf.EagerTensor, var_) - m_ = convert(tf.EagerTensor, m_) - lr_ = convert(tf.EagerTensor, lr_) - logbase_ = convert(tf.EagerTensor, logbase_) - sign_decay_ = convert(tf.EagerTensor, sign_decay_) - beta_ = convert(tf.EagerTensor, beta_) - grad_ = convert(tf.EagerTensor, grad_) - tf.add_input(desc, var_) - tf.add_input(desc, m_) - tf.add_input(desc, lr_) - tf.add_input(desc, logbase_) - tf.add_input(desc, sign_decay_) - tf.add_input(desc, beta_) - tf.add_input(desc, grad_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - desc["T"] = tf.data_type(var_) - desc["T"] = tf.data_type(m_) - desc["T"] = tf.data_type(lr_) - desc["T"] = tf.data_type(logbase_) - desc["T"] = tf.data_type(sign_decay_) - desc["T"] = tf.data_type(beta_) - desc["T"] = tf.data_type(grad_) - res = tf.execute(desc) - node = tf.TapeNode(apply_power_sign, [var_, m_, lr_, logbase_, sign_decay_, beta_, grad_], name=nothing, use_locking=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function apply_power_sign(var_, m_, lr_, logbase_, sign_decay_, beta_, grad_; name=nothing, use_locking=nothing) - if tf.in_eager_mode() - apply_power_sign_eager(var_, m_, lr_, logbase_, sign_decay_, beta_, grad_; name=name, use_locking=use_locking) - else - apply_power_sign_graph(var_, m_, lr_, logbase_, sign_decay_, beta_, grad_; name=name, use_locking=use_locking) + begin + function apply_power_sign_graph(var_, m_, lr_, logbase_, sign_decay_, beta_, grad_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "ApplyPowerSign") do + desc = tf.NodeDescription("ApplyPowerSign") + begin + begin + var_ = convert(Tensor{Any}, var_) + begin + end + end + begin + m_ = convert(Tensor{Any}, m_) + begin + end + end + begin + lr_ = convert(Tensor{Any}, lr_) + begin + end + end + begin + logbase_ = convert(Tensor{Any}, logbase_) + begin + end + end + begin + sign_decay_ = convert(Tensor{Any}, sign_decay_) + begin + end + end + begin + beta_ = convert(Tensor{Any}, beta_) + begin + end + end + begin + grad_ = convert(Tensor{Any}, grad_) + begin + end + end + begin + (var_, m_, lr_, logbase_, sign_decay_, beta_, grad_) = tf.tf_promote(var_, m_, lr_, logbase_, sign_decay_, beta_, grad_) + end + end + begin + begin + tf.add_input(desc, var_) + end + begin + tf.add_input(desc, m_) + end + begin + tf.add_input(desc, lr_) + end + begin + tf.add_input(desc, logbase_) + end + begin + tf.add_input(desc, sign_decay_) + end + begin + tf.add_input(desc, beta_) + end + begin + tf.add_input(desc, grad_) + end + end + begin + begin + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function apply_power_sign_eager(var_, m_, lr_, logbase_, sign_decay_, beta_, grad_; name=nothing, use_locking=nothing) + desc = tf.EagerOp("ApplyPowerSign") + var_ = convert(tf.EagerTensor, var_) + m_ = convert(tf.EagerTensor, m_) + lr_ = convert(tf.EagerTensor, lr_) + logbase_ = convert(tf.EagerTensor, logbase_) + sign_decay_ = convert(tf.EagerTensor, sign_decay_) + beta_ = convert(tf.EagerTensor, beta_) + grad_ = convert(tf.EagerTensor, grad_) + begin + begin + tf.add_input(desc, var_) + end + begin + tf.add_input(desc, m_) + end + begin + tf.add_input(desc, lr_) + end + begin + tf.add_input(desc, logbase_) + end + begin + tf.add_input(desc, sign_decay_) + end + begin + tf.add_input(desc, beta_) + end + begin + tf.add_input(desc, grad_) + end + end + begin + begin + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + end + begin + desc["T"] = tf.data_type(var_) + end + begin + desc["T"] = tf.data_type(m_) + end + begin + desc["T"] = tf.data_type(lr_) + end + begin + desc["T"] = tf.data_type(logbase_) + end + begin + desc["T"] = tf.data_type(sign_decay_) + end + begin + desc["T"] = tf.data_type(beta_) + end + begin + desc["T"] = tf.data_type(grad_) + end + res = tf.execute(desc) + node = tf.TapeNode(apply_power_sign, [var_, m_, lr_, logbase_, sign_decay_, beta_, grad_], name=nothing, use_locking=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function apply_power_sign(var_, m_, lr_, logbase_, sign_decay_, beta_, grad_; name=nothing, use_locking=nothing) + if tf.in_eager_mode() + apply_power_sign_eager(var_, m_, lr_, logbase_, sign_decay_, beta_, grad_; name=name, use_locking=use_locking) + else + apply_power_sign_graph(var_, m_, lr_, logbase_, sign_decay_, beta_, grad_; name=name, use_locking=use_locking) + end end - end + end end @@ -49162,47 +89757,91 @@ end """ begin - function mirror_pad_graph(input_, paddings_; name=nothing, mode=nothing) - local desc - tf.with_op_name(name, "MirrorPad") do - desc = tf.NodeDescription("MirrorPad") - input_ = convert(Tensor{Any}, input_) - paddings_ = convert(Tensor{Int32}, paddings_) - (input_,) = tf.tf_promote(input_) - (paddings_,) = tf.tf_promote(paddings_) - tf.add_input(desc, input_) - tf.add_input(desc, paddings_) - if mode !== nothing - desc["mode"] = Base.String(mode) + begin + function mirror_pad_graph(input_, paddings_; name=nothing, mode=nothing) + local desc + tf.with_op_name(name, "MirrorPad") do + desc = tf.NodeDescription("MirrorPad") + begin + begin + input_ = convert(Tensor{Any}, input_) + begin + end + end + begin + paddings_ = convert(Tensor{Int32}, paddings_) + begin + end + end + begin + (input_,) = tf.tf_promote(input_) + end + begin + (paddings_,) = tf.tf_promote(paddings_) + end + end + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, paddings_) + end + end + begin + begin + if mode !== nothing + desc["mode"] = Base.String(mode) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function mirror_pad_eager(input_, paddings_; name=nothing, mode=nothing) + desc = tf.EagerOp("MirrorPad") + input_ = convert(tf.EagerTensor, input_) + paddings_ = convert(tf.EagerTensor, paddings_) + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, paddings_) + end + end + begin + begin + if mode !== nothing + desc["mode"] = Base.String(mode) + end + end + end + begin + desc["T"] = tf.data_type(input_) + end + begin + desc["Tpaddings"] = tf.data_type(paddings_) + end + res = tf.execute(desc) + node = tf.TapeNode(mirror_pad, [input_, paddings_], name=nothing, mode=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function mirror_pad(input_, paddings_; name=nothing, mode=nothing) + if tf.in_eager_mode() + mirror_pad_eager(input_, paddings_; name=name, mode=mode) + else + mirror_pad_graph(input_, paddings_; name=name, mode=mode) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function mirror_pad_eager(input_, paddings_; name=nothing, mode=nothing) - desc = tf.EagerOp("MirrorPad") - input_ = convert(tf.EagerTensor, input_) - paddings_ = convert(tf.EagerTensor, paddings_) - tf.add_input(desc, input_) - tf.add_input(desc, paddings_) - if mode !== nothing - desc["mode"] = Base.String(mode) - end - desc["T"] = tf.data_type(input_) - desc["Tpaddings"] = tf.data_type(paddings_) - res = tf.execute(desc) - node = tf.TapeNode(mirror_pad, [input_, paddings_], name=nothing, mode=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function mirror_pad(input_, paddings_; name=nothing, mode=nothing) - if tf.in_eager_mode() - mirror_pad_eager(input_, paddings_; name=name, mode=mode) - else - mirror_pad_graph(input_, paddings_; name=name, mode=mode) - end - end end @@ -49212,33 +89851,57 @@ end """ begin - function logical_not_graph(x_; name=nothing) - local desc - tf.with_op_name(name, "LogicalNot") do - desc = tf.NodeDescription("LogicalNot") - x_ = convert(Tensor{Bool}, x_) - tf.add_input(desc, x_) + begin + function logical_not_graph(x_; name=nothing) + local desc + tf.with_op_name(name, "LogicalNot") do + desc = tf.NodeDescription("LogicalNot") + begin + begin + x_ = convert(Tensor{Bool}, x_) + begin + end + end + end + begin + begin + tf.add_input(desc, x_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function logical_not_eager(x_; name=nothing) - desc = tf.EagerOp("LogicalNot") - x_ = convert(tf.EagerTensor, x_) - tf.add_input(desc, x_) - res = tf.execute(desc) - node = tf.TapeNode(logical_not, [x_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function logical_not_eager(x_; name=nothing) + desc = tf.EagerOp("LogicalNot") + x_ = convert(tf.EagerTensor, x_) + begin + begin + tf.add_input(desc, x_) + end + end + begin + end + res = tf.execute(desc) + node = tf.TapeNode(logical_not, [x_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function logical_not(x_; name=nothing) - if tf.in_eager_mode() - logical_not_eager(x_; name=name) - else - logical_not_graph(x_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function logical_not(x_; name=nothing) + if tf.in_eager_mode() + logical_not_eager(x_; name=name) + else + logical_not_graph(x_; name=name) + end end - end + end end @@ -49248,33 +89911,57 @@ end """ begin - function batch_ifft_graph(input_; name=nothing) - local desc - tf.with_op_name(name, "BatchIFFT") do - desc = tf.NodeDescription("BatchIFFT") - input_ = convert(Tensor{Complex{Float32}}, input_) - tf.add_input(desc, input_) + begin + function batch_ifft_graph(input_; name=nothing) + local desc + tf.with_op_name(name, "BatchIFFT") do + desc = tf.NodeDescription("BatchIFFT") + begin + begin + input_ = convert(Tensor{Complex{Float32}}, input_) + begin + end + end + end + begin + begin + tf.add_input(desc, input_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function batch_ifft_eager(input_; name=nothing) - desc = tf.EagerOp("BatchIFFT") - input_ = convert(tf.EagerTensor, input_) - tf.add_input(desc, input_) - res = tf.execute(desc) - node = tf.TapeNode(batch_ifft, [input_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function batch_ifft_eager(input_; name=nothing) + desc = tf.EagerOp("BatchIFFT") + input_ = convert(tf.EagerTensor, input_) + begin + begin + tf.add_input(desc, input_) + end + end + begin + end + res = tf.execute(desc) + node = tf.TapeNode(batch_ifft, [input_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function batch_ifft(input_; name=nothing) - if tf.in_eager_mode() - batch_ifft_eager(input_; name=name) - else - batch_ifft_graph(input_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function batch_ifft(input_; name=nothing) + if tf.in_eager_mode() + batch_ifft_eager(input_; name=name) + else + batch_ifft_graph(input_; name=name) + end end - end + end end @@ -49284,54 +89971,96 @@ end """ begin - function tensor_array_concat_v2_graph(handle_, flow_in_; name=nothing, dtype=nothing, element_shape_except0=nothing) - local desc - tf.with_op_name(name, "TensorArrayConcatV2") do - desc = tf.NodeDescription("TensorArrayConcatV2") - handle_ = convert(Tensor{String}, handle_) - flow_in_ = convert(Tensor{Float32}, flow_in_) - tf.add_input(desc, handle_) - tf.add_input(desc, flow_in_) - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) + begin + function tensor_array_concat_v2_graph(handle_, flow_in_; name=nothing, dtype=nothing, element_shape_except0=nothing) + local desc + tf.with_op_name(name, "TensorArrayConcatV2") do + desc = tf.NodeDescription("TensorArrayConcatV2") + begin + begin + handle_ = convert(Tensor{String}, handle_) + begin + end + end + begin + flow_in_ = convert(Tensor{Float32}, flow_in_) + begin + end + end + end + begin + begin + tf.add_input(desc, handle_) + end + begin + tf.add_input(desc, flow_in_) + end + end + begin + begin + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + begin + if element_shape_except0 !== nothing + desc["element_shape_except0"] = Base.identity(element_shape_except0) + end + end + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + end + end + begin + function tensor_array_concat_v2_eager(handle_, flow_in_; name=nothing, dtype=nothing, element_shape_except0=nothing) + desc = tf.EagerOp("TensorArrayConcatV2") + handle_ = convert(tf.EagerTensor, handle_) + flow_in_ = convert(tf.EagerTensor, flow_in_) + begin + begin + tf.add_input(desc, handle_) + end + begin + tf.add_input(desc, flow_in_) + end + end + begin + begin + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + begin + if element_shape_except0 !== nothing + desc["element_shape_except0"] = Base.identity(element_shape_except0) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(tensor_array_concat_v2, [handle_, flow_in_], name=nothing, dtype=nothing, element_shape_except0=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_array_concat_v2(handle_, flow_in_; name=nothing, dtype=nothing, element_shape_except0=nothing) + if tf.in_eager_mode() + tensor_array_concat_v2_eager(handle_, flow_in_; name=name, dtype=dtype, element_shape_except0=element_shape_except0) + else + tensor_array_concat_v2_graph(handle_, flow_in_; name=name, dtype=dtype, element_shape_except0=element_shape_except0) + end end - if element_shape_except0 !== nothing - desc["element_shape_except0"] = Base.identity(element_shape_except0) - end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:2 - push!(out, tf.Tensor(op, out_idx)) - end - out - end - function tensor_array_concat_v2_eager(handle_, flow_in_; name=nothing, dtype=nothing, element_shape_except0=nothing) - desc = tf.EagerOp("TensorArrayConcatV2") - handle_ = convert(tf.EagerTensor, handle_) - flow_in_ = convert(tf.EagerTensor, flow_in_) - tf.add_input(desc, handle_) - tf.add_input(desc, flow_in_) - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end - if element_shape_except0 !== nothing - desc["element_shape_except0"] = Base.identity(element_shape_except0) - end - res = tf.execute(desc) - node = tf.TapeNode(tensor_array_concat_v2, [handle_, flow_in_], name=nothing, dtype=nothing, element_shape_except0=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_array_concat_v2(handle_, flow_in_; name=nothing, dtype=nothing, element_shape_except0=nothing) - if tf.in_eager_mode() - tensor_array_concat_v2_eager(handle_, flow_in_; name=name, dtype=dtype, element_shape_except0=element_shape_except0) - else - tensor_array_concat_v2_graph(handle_, flow_in_; name=name, dtype=dtype, element_shape_except0=element_shape_except0) - end - end end @@ -49341,48 +90070,92 @@ end """ begin - function sum_graph(input_, reduction_indices_; name=nothing, keep_dims=nothing) - local desc - tf.with_op_name(name, "Sum") do - desc = tf.NodeDescription("Sum") - input_ = convert(Tensor{Any}, input_) - reduction_indices_ = convert(Tensor{Int32}, reduction_indices_) - reduction_indices_ = reduction_indices_ - convert(tf.Tensor{eltype(reduction_indices_)}, 1) - (input_,) = tf.tf_promote(input_) - (reduction_indices_,) = tf.tf_promote(reduction_indices_) - tf.add_input(desc, input_) - tf.add_input(desc, reduction_indices_) - if keep_dims !== nothing - desc["keep_dims"] = Base.Bool(keep_dims) + begin + function sum_graph(input_, reduction_indices_; name=nothing, keep_dims=nothing) + local desc + tf.with_op_name(name, "Sum") do + desc = tf.NodeDescription("Sum") + begin + begin + input_ = convert(Tensor{Any}, input_) + begin + end + end + begin + reduction_indices_ = convert(Tensor{Int32}, reduction_indices_) + begin + reduction_indices_ = reduction_indices_ - convert(tf.Tensor{eltype(reduction_indices_)}, 1) + end + end + begin + (input_,) = tf.tf_promote(input_) + end + begin + (reduction_indices_,) = tf.tf_promote(reduction_indices_) + end + end + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, reduction_indices_) + end + end + begin + begin + if keep_dims !== nothing + desc["keep_dims"] = Base.Bool(keep_dims) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function sum_eager(input_, reduction_indices_; name=nothing, keep_dims=nothing) + desc = tf.EagerOp("Sum") + input_ = convert(tf.EagerTensor, input_) + reduction_indices_ = convert(tf.EagerTensor, reduction_indices_) + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, reduction_indices_) + end + end + begin + begin + if keep_dims !== nothing + desc["keep_dims"] = Base.Bool(keep_dims) + end + end + end + begin + desc["T"] = tf.data_type(input_) + end + begin + desc["Tidx"] = tf.data_type(reduction_indices_) + end + res = tf.execute(desc) + node = tf.TapeNode(sum, [input_, reduction_indices_], name=nothing, keep_dims=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sum(input_, reduction_indices_; name=nothing, keep_dims=nothing) + if tf.in_eager_mode() + sum_eager(input_, reduction_indices_; name=name, keep_dims=keep_dims) + else + sum_graph(input_, reduction_indices_; name=name, keep_dims=keep_dims) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function sum_eager(input_, reduction_indices_; name=nothing, keep_dims=nothing) - desc = tf.EagerOp("Sum") - input_ = convert(tf.EagerTensor, input_) - reduction_indices_ = convert(tf.EagerTensor, reduction_indices_) - tf.add_input(desc, input_) - tf.add_input(desc, reduction_indices_) - if keep_dims !== nothing - desc["keep_dims"] = Base.Bool(keep_dims) - end - desc["T"] = tf.data_type(input_) - desc["Tidx"] = tf.data_type(reduction_indices_) - res = tf.execute(desc) - node = tf.TapeNode(sum, [input_, reduction_indices_], name=nothing, keep_dims=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sum(input_, reduction_indices_; name=nothing, keep_dims=nothing) - if tf.in_eager_mode() - sum_eager(input_, reduction_indices_; name=name, keep_dims=keep_dims) - else - sum_graph(input_, reduction_indices_; name=name, keep_dims=keep_dims) - end - end end @@ -49392,49 +90165,89 @@ end """ begin - function boosted_trees_predict_graph(tree_ensemble_handle_, bucketized_features_; name=nothing, num_bucketized_features=nothing, logits_dimension=nothing) - local desc - tf.with_op_name(name, "BoostedTreesPredict") do - desc = tf.NodeDescription("BoostedTreesPredict") - tree_ensemble_handle_ = convert(Tensor{Any}, tree_ensemble_handle_) - bucketized_features_ = [convert(Tensor{Int32}, x) for x = bucketized_features_] - tf.add_input(desc, tree_ensemble_handle_) - tf.add_input(desc, bucketized_features_) - if num_bucketized_features !== nothing - desc["num_bucketized_features"] = Base.Int(num_bucketized_features) - end - if logits_dimension !== nothing - desc["logits_dimension"] = Base.Int(logits_dimension) + begin + function boosted_trees_predict_graph(tree_ensemble_handle_, bucketized_features_; name=nothing, num_bucketized_features=nothing, logits_dimension=nothing) + local desc + tf.with_op_name(name, "BoostedTreesPredict") do + desc = tf.NodeDescription("BoostedTreesPredict") + begin + begin + tree_ensemble_handle_ = convert(Tensor{Any}, tree_ensemble_handle_) + begin + end + end + begin + bucketized_features_ = [convert(Tensor{Int32}, x) for x = bucketized_features_] + begin + end + end + end + begin + begin + tf.add_input(desc, tree_ensemble_handle_) + end + begin + tf.add_input(desc, bucketized_features_) + end + end + begin + begin + if num_bucketized_features !== nothing + desc["num_bucketized_features"] = Base.Int(num_bucketized_features) + end + end + begin + if logits_dimension !== nothing + desc["logits_dimension"] = Base.Int(logits_dimension) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function boosted_trees_predict_eager(tree_ensemble_handle_, bucketized_features_; name=nothing, num_bucketized_features=nothing, logits_dimension=nothing) + desc = tf.EagerOp("BoostedTreesPredict") + tree_ensemble_handle_ = convert(tf.EagerTensor, tree_ensemble_handle_) + bucketized_features_ = convert(tf.EagerTensor, bucketized_features_) + begin + begin + tf.add_input(desc, tree_ensemble_handle_) + end + begin + tf.add_input(desc, bucketized_features_) + end + end + begin + begin + if num_bucketized_features !== nothing + desc["num_bucketized_features"] = Base.Int(num_bucketized_features) + end + end + begin + if logits_dimension !== nothing + desc["logits_dimension"] = Base.Int(logits_dimension) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(boosted_trees_predict, [tree_ensemble_handle_, bucketized_features_], name=nothing, num_bucketized_features=nothing, logits_dimension=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function boosted_trees_predict(tree_ensemble_handle_, bucketized_features_; name=nothing, num_bucketized_features=nothing, logits_dimension=nothing) + if tf.in_eager_mode() + boosted_trees_predict_eager(tree_ensemble_handle_, bucketized_features_; name=name, num_bucketized_features=num_bucketized_features, logits_dimension=logits_dimension) + else + boosted_trees_predict_graph(tree_ensemble_handle_, bucketized_features_; name=name, num_bucketized_features=num_bucketized_features, logits_dimension=logits_dimension) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function boosted_trees_predict_eager(tree_ensemble_handle_, bucketized_features_; name=nothing, num_bucketized_features=nothing, logits_dimension=nothing) - desc = tf.EagerOp("BoostedTreesPredict") - tree_ensemble_handle_ = convert(tf.EagerTensor, tree_ensemble_handle_) - bucketized_features_ = convert(tf.EagerTensor, bucketized_features_) - tf.add_input(desc, tree_ensemble_handle_) - tf.add_input(desc, bucketized_features_) - if num_bucketized_features !== nothing - desc["num_bucketized_features"] = Base.Int(num_bucketized_features) - end - if logits_dimension !== nothing - desc["logits_dimension"] = Base.Int(logits_dimension) - end - res = tf.execute(desc) - node = tf.TapeNode(boosted_trees_predict, [tree_ensemble_handle_, bucketized_features_], name=nothing, num_bucketized_features=nothing, logits_dimension=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function boosted_trees_predict(tree_ensemble_handle_, bucketized_features_; name=nothing, num_bucketized_features=nothing, logits_dimension=nothing) - if tf.in_eager_mode() - boosted_trees_predict_eager(tree_ensemble_handle_, bucketized_features_; name=name, num_bucketized_features=num_bucketized_features, logits_dimension=logits_dimension) - else - boosted_trees_predict_graph(tree_ensemble_handle_, bucketized_features_; name=name, num_bucketized_features=num_bucketized_features, logits_dimension=logits_dimension) - end - end end @@ -49444,67 +90257,141 @@ end """ begin - function resource_sparse_apply_adagrad_graph(var_, accum_, lr_, grad_, indices_; name=nothing, use_locking=nothing, update_slots=nothing) - local desc - tf.with_op_name(name, "ResourceSparseApplyAdagrad") do - desc = tf.NodeDescription("ResourceSparseApplyAdagrad") - var_ = convert(Tensor{Any}, var_) - accum_ = convert(Tensor{Any}, accum_) - lr_ = convert(Tensor{Any}, lr_) - grad_ = convert(Tensor{Any}, grad_) - indices_ = convert(Tensor{Any}, indices_) - indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) - (lr_, grad_) = tf.tf_promote(lr_, grad_) - (indices_,) = tf.tf_promote(indices_) - tf.add_input(desc, var_) - tf.add_input(desc, accum_) - tf.add_input(desc, lr_) - tf.add_input(desc, grad_) - tf.add_input(desc, indices_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - if update_slots !== nothing - desc["update_slots"] = Base.Bool(update_slots) + begin + function resource_sparse_apply_adagrad_graph(var_, accum_, lr_, grad_, indices_; name=nothing, use_locking=nothing, update_slots=nothing) + local desc + tf.with_op_name(name, "ResourceSparseApplyAdagrad") do + desc = tf.NodeDescription("ResourceSparseApplyAdagrad") + begin + begin + var_ = convert(Tensor{Any}, var_) + begin + end + end + begin + accum_ = convert(Tensor{Any}, accum_) + begin + end + end + begin + lr_ = convert(Tensor{Any}, lr_) + begin + end + end + begin + grad_ = convert(Tensor{Any}, grad_) + begin + end + end + begin + indices_ = convert(Tensor{Any}, indices_) + begin + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + end + end + begin + (lr_, grad_) = tf.tf_promote(lr_, grad_) + end + begin + (indices_,) = tf.tf_promote(indices_) + end + end + begin + begin + tf.add_input(desc, var_) + end + begin + tf.add_input(desc, accum_) + end + begin + tf.add_input(desc, lr_) + end + begin + tf.add_input(desc, grad_) + end + begin + tf.add_input(desc, indices_) + end + end + begin + begin + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + begin + if update_slots !== nothing + desc["update_slots"] = Base.Bool(update_slots) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function resource_sparse_apply_adagrad_eager(var_, accum_, lr_, grad_, indices_; name=nothing, use_locking=nothing, update_slots=nothing) + desc = tf.EagerOp("ResourceSparseApplyAdagrad") + var_ = convert(tf.EagerTensor, var_) + accum_ = convert(tf.EagerTensor, accum_) + lr_ = convert(tf.EagerTensor, lr_) + grad_ = convert(tf.EagerTensor, grad_) + indices_ = convert(tf.EagerTensor, indices_) + begin + begin + tf.add_input(desc, var_) + end + begin + tf.add_input(desc, accum_) + end + begin + tf.add_input(desc, lr_) + end + begin + tf.add_input(desc, grad_) + end + begin + tf.add_input(desc, indices_) + end + end + begin + begin + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + begin + if update_slots !== nothing + desc["update_slots"] = Base.Bool(update_slots) + end + end + end + begin + desc["T"] = tf.data_type(lr_) + end + begin + desc["T"] = tf.data_type(grad_) + end + begin + desc["Tindices"] = tf.data_type(indices_) + end + res = tf.execute(desc) + node = tf.TapeNode(resource_sparse_apply_adagrad, [var_, accum_, lr_, grad_, indices_], name=nothing, use_locking=nothing, update_slots=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_sparse_apply_adagrad(var_, accum_, lr_, grad_, indices_; name=nothing, use_locking=nothing, update_slots=nothing) + if tf.in_eager_mode() + resource_sparse_apply_adagrad_eager(var_, accum_, lr_, grad_, indices_; name=name, use_locking=use_locking, update_slots=update_slots) + else + resource_sparse_apply_adagrad_graph(var_, accum_, lr_, grad_, indices_; name=name, use_locking=use_locking, update_slots=update_slots) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function resource_sparse_apply_adagrad_eager(var_, accum_, lr_, grad_, indices_; name=nothing, use_locking=nothing, update_slots=nothing) - desc = tf.EagerOp("ResourceSparseApplyAdagrad") - var_ = convert(tf.EagerTensor, var_) - accum_ = convert(tf.EagerTensor, accum_) - lr_ = convert(tf.EagerTensor, lr_) - grad_ = convert(tf.EagerTensor, grad_) - indices_ = convert(tf.EagerTensor, indices_) - tf.add_input(desc, var_) - tf.add_input(desc, accum_) - tf.add_input(desc, lr_) - tf.add_input(desc, grad_) - tf.add_input(desc, indices_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - if update_slots !== nothing - desc["update_slots"] = Base.Bool(update_slots) - end - desc["T"] = tf.data_type(lr_) - desc["T"] = tf.data_type(grad_) - desc["Tindices"] = tf.data_type(indices_) - res = tf.execute(desc) - node = tf.TapeNode(resource_sparse_apply_adagrad, [var_, accum_, lr_, grad_, indices_], name=nothing, use_locking=nothing, update_slots=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_sparse_apply_adagrad(var_, accum_, lr_, grad_, indices_; name=nothing, use_locking=nothing, update_slots=nothing) - if tf.in_eager_mode() - resource_sparse_apply_adagrad_eager(var_, accum_, lr_, grad_, indices_; name=name, use_locking=use_locking, update_slots=update_slots) - else - resource_sparse_apply_adagrad_graph(var_, accum_, lr_, grad_, indices_; name=name, use_locking=use_locking, update_slots=update_slots) - end - end end @@ -49514,46 +90401,88 @@ end """ begin - function leaky_relu_grad_graph(gradients_, features_; name=nothing, alpha=nothing) - local desc - tf.with_op_name(name, "LeakyReluGrad") do - desc = tf.NodeDescription("LeakyReluGrad") - gradients_ = convert(Tensor{Float32}, gradients_) - features_ = convert(Tensor{Float32}, features_) - (gradients_, features_) = tf.tf_promote(gradients_, features_) - tf.add_input(desc, gradients_) - tf.add_input(desc, features_) - if alpha !== nothing - desc["alpha"] = Base.identity(alpha) + begin + function leaky_relu_grad_graph(gradients_, features_; name=nothing, alpha=nothing) + local desc + tf.with_op_name(name, "LeakyReluGrad") do + desc = tf.NodeDescription("LeakyReluGrad") + begin + begin + gradients_ = convert(Tensor{Float32}, gradients_) + begin + end + end + begin + features_ = convert(Tensor{Float32}, features_) + begin + end + end + begin + (gradients_, features_) = tf.tf_promote(gradients_, features_) + end + end + begin + begin + tf.add_input(desc, gradients_) + end + begin + tf.add_input(desc, features_) + end + end + begin + begin + if alpha !== nothing + desc["alpha"] = Base.identity(alpha) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function leaky_relu_grad_eager(gradients_, features_; name=nothing, alpha=nothing) + desc = tf.EagerOp("LeakyReluGrad") + gradients_ = convert(tf.EagerTensor, gradients_) + features_ = convert(tf.EagerTensor, features_) + begin + begin + tf.add_input(desc, gradients_) + end + begin + tf.add_input(desc, features_) + end + end + begin + begin + if alpha !== nothing + desc["alpha"] = Base.identity(alpha) + end + end + end + begin + desc["T"] = tf.data_type(gradients_) + end + begin + desc["T"] = tf.data_type(features_) + end + res = tf.execute(desc) + node = tf.TapeNode(leaky_relu_grad, [gradients_, features_], name=nothing, alpha=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function leaky_relu_grad(gradients_, features_; name=nothing, alpha=nothing) + if tf.in_eager_mode() + leaky_relu_grad_eager(gradients_, features_; name=name, alpha=alpha) + else + leaky_relu_grad_graph(gradients_, features_; name=name, alpha=alpha) + end end - end - tf.Tensor(tf.Operation(desc)) end - function leaky_relu_grad_eager(gradients_, features_; name=nothing, alpha=nothing) - desc = tf.EagerOp("LeakyReluGrad") - gradients_ = convert(tf.EagerTensor, gradients_) - features_ = convert(tf.EagerTensor, features_) - tf.add_input(desc, gradients_) - tf.add_input(desc, features_) - if alpha !== nothing - desc["alpha"] = Base.identity(alpha) - end - desc["T"] = tf.data_type(gradients_) - desc["T"] = tf.data_type(features_) - res = tf.execute(desc) - node = tf.TapeNode(leaky_relu_grad, [gradients_, features_], name=nothing, alpha=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function leaky_relu_grad(gradients_, features_; name=nothing, alpha=nothing) - if tf.in_eager_mode() - leaky_relu_grad_eager(gradients_, features_; name=name, alpha=alpha) - else - leaky_relu_grad_graph(gradients_, features_; name=name, alpha=alpha) - end - end end @@ -49563,41 +90492,73 @@ end A graph node which represents a return value of a function. """ begin - function _device_retval_graph(input_; name=nothing, index=nothing) - local desc - tf.with_op_name(name, "_DeviceRetval") do - desc = tf.NodeDescription("_DeviceRetval") - input_ = convert(Tensor{Any}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - if index !== nothing - desc["index"] = Base.Int(index) + begin + function _device_retval_graph(input_; name=nothing, index=nothing) + local desc + tf.with_op_name(name, "_DeviceRetval") do + desc = tf.NodeDescription("_DeviceRetval") + begin + begin + input_ = convert(Tensor{Any}, input_) + begin + end + end + begin + (input_,) = tf.tf_promote(input_) + end + end + begin + begin + tf.add_input(desc, input_) + end + end + begin + begin + if index !== nothing + desc["index"] = Base.Int(index) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function _device_retval_eager(input_; name=nothing, index=nothing) + desc = tf.EagerOp("_DeviceRetval") + input_ = convert(tf.EagerTensor, input_) + begin + begin + tf.add_input(desc, input_) + end + end + begin + begin + if index !== nothing + desc["index"] = Base.Int(index) + end + end + end + begin + desc["T"] = tf.data_type(input_) + end + res = tf.execute(desc) + node = tf.TapeNode(_device_retval, [input_], name=nothing, index=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _device_retval(input_; name=nothing, index=nothing) + if tf.in_eager_mode() + _device_retval_eager(input_; name=name, index=index) + else + _device_retval_graph(input_; name=name, index=index) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function _device_retval_eager(input_; name=nothing, index=nothing) - desc = tf.EagerOp("_DeviceRetval") - input_ = convert(tf.EagerTensor, input_) - tf.add_input(desc, input_) - if index !== nothing - desc["index"] = Base.Int(index) - end - desc["T"] = tf.data_type(input_) - res = tf.execute(desc) - node = tf.TapeNode(_device_retval, [input_], name=nothing, index=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _device_retval(input_; name=nothing, index=nothing) - if tf.in_eager_mode() - _device_retval_eager(input_; name=name, index=index) - else - _device_retval_graph(input_; name=name, index=index) - end - end end @@ -49607,41 +90568,81 @@ end """ begin - function pad_graph(input_, paddings_; name=nothing) - local desc - tf.with_op_name(name, "Pad") do - desc = tf.NodeDescription("Pad") - input_ = convert(Tensor{Any}, input_) - paddings_ = convert(Tensor{Int32}, paddings_) - (input_,) = tf.tf_promote(input_) - (paddings_,) = tf.tf_promote(paddings_) - tf.add_input(desc, input_) - tf.add_input(desc, paddings_) + begin + function pad_graph(input_, paddings_; name=nothing) + local desc + tf.with_op_name(name, "Pad") do + desc = tf.NodeDescription("Pad") + begin + begin + input_ = convert(Tensor{Any}, input_) + begin + end + end + begin + paddings_ = convert(Tensor{Int32}, paddings_) + begin + end + end + begin + (input_,) = tf.tf_promote(input_) + end + begin + (paddings_,) = tf.tf_promote(paddings_) + end + end + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, paddings_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function pad_eager(input_, paddings_; name=nothing) - desc = tf.EagerOp("Pad") - input_ = convert(tf.EagerTensor, input_) - paddings_ = convert(tf.EagerTensor, paddings_) - tf.add_input(desc, input_) - tf.add_input(desc, paddings_) - desc["T"] = tf.data_type(input_) - desc["Tpaddings"] = tf.data_type(paddings_) - res = tf.execute(desc) - node = tf.TapeNode(pad, [input_, paddings_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function pad_eager(input_, paddings_; name=nothing) + desc = tf.EagerOp("Pad") + input_ = convert(tf.EagerTensor, input_) + paddings_ = convert(tf.EagerTensor, paddings_) + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, paddings_) + end + end + begin + end + begin + desc["T"] = tf.data_type(input_) + end + begin + desc["Tpaddings"] = tf.data_type(paddings_) + end + res = tf.execute(desc) + node = tf.TapeNode(pad, [input_, paddings_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function pad(input_, paddings_; name=nothing) - if tf.in_eager_mode() - pad_eager(input_, paddings_; name=name) - else - pad_graph(input_, paddings_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function pad(input_, paddings_; name=nothing) + if tf.in_eager_mode() + pad_eager(input_, paddings_; name=name) + else + pad_graph(input_, paddings_; name=name) + end end - end + end end @@ -49651,55 +90652,107 @@ end """ begin - function add_many_sparse_to_tensors_map_graph(sparse_indices_, sparse_values_, sparse_shape_; name=nothing, container=nothing, shared_name=nothing) - local desc - tf.with_op_name(name, "AddManySparseToTensorsMap") do - desc = tf.NodeDescription("AddManySparseToTensorsMap") - sparse_indices_ = convert(Tensor{Int64}, sparse_indices_) - sparse_values_ = convert(Tensor{Any}, sparse_values_) - sparse_shape_ = convert(Tensor{Int64}, sparse_shape_) - (sparse_values_,) = tf.tf_promote(sparse_values_) - tf.add_input(desc, sparse_indices_) - tf.add_input(desc, sparse_values_) - tf.add_input(desc, sparse_shape_) - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) + begin + function add_many_sparse_to_tensors_map_graph(sparse_indices_, sparse_values_, sparse_shape_; name=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "AddManySparseToTensorsMap") do + desc = tf.NodeDescription("AddManySparseToTensorsMap") + begin + begin + sparse_indices_ = convert(Tensor{Int64}, sparse_indices_) + begin + end + end + begin + sparse_values_ = convert(Tensor{Any}, sparse_values_) + begin + end + end + begin + sparse_shape_ = convert(Tensor{Int64}, sparse_shape_) + begin + end + end + begin + (sparse_values_,) = tf.tf_promote(sparse_values_) + end + end + begin + begin + tf.add_input(desc, sparse_indices_) + end + begin + tf.add_input(desc, sparse_values_) + end + begin + tf.add_input(desc, sparse_shape_) + end + end + begin + begin + if container !== nothing + desc["container"] = Base.String(container) + end + end + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function add_many_sparse_to_tensors_map_eager(sparse_indices_, sparse_values_, sparse_shape_; name=nothing, container=nothing, shared_name=nothing) + desc = tf.EagerOp("AddManySparseToTensorsMap") + sparse_indices_ = convert(tf.EagerTensor, sparse_indices_) + sparse_values_ = convert(tf.EagerTensor, sparse_values_) + sparse_shape_ = convert(tf.EagerTensor, sparse_shape_) + begin + begin + tf.add_input(desc, sparse_indices_) + end + begin + tf.add_input(desc, sparse_values_) + end + begin + tf.add_input(desc, sparse_shape_) + end + end + begin + begin + if container !== nothing + desc["container"] = Base.String(container) + end + end + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + end + begin + desc["T"] = tf.data_type(sparse_values_) + end + res = tf.execute(desc) + node = tf.TapeNode(add_many_sparse_to_tensors_map, [sparse_indices_, sparse_values_, sparse_shape_], name=nothing, container=nothing, shared_name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function add_many_sparse_to_tensors_map(sparse_indices_, sparse_values_, sparse_shape_; name=nothing, container=nothing, shared_name=nothing) + if tf.in_eager_mode() + add_many_sparse_to_tensors_map_eager(sparse_indices_, sparse_values_, sparse_shape_; name=name, container=container, shared_name=shared_name) + else + add_many_sparse_to_tensors_map_graph(sparse_indices_, sparse_values_, sparse_shape_; name=name, container=container, shared_name=shared_name) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function add_many_sparse_to_tensors_map_eager(sparse_indices_, sparse_values_, sparse_shape_; name=nothing, container=nothing, shared_name=nothing) - desc = tf.EagerOp("AddManySparseToTensorsMap") - sparse_indices_ = convert(tf.EagerTensor, sparse_indices_) - sparse_values_ = convert(tf.EagerTensor, sparse_values_) - sparse_shape_ = convert(tf.EagerTensor, sparse_shape_) - tf.add_input(desc, sparse_indices_) - tf.add_input(desc, sparse_values_) - tf.add_input(desc, sparse_shape_) - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - desc["T"] = tf.data_type(sparse_values_) - res = tf.execute(desc) - node = tf.TapeNode(add_many_sparse_to_tensors_map, [sparse_indices_, sparse_values_, sparse_shape_], name=nothing, container=nothing, shared_name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function add_many_sparse_to_tensors_map(sparse_indices_, sparse_values_, sparse_shape_; name=nothing, container=nothing, shared_name=nothing) - if tf.in_eager_mode() - add_many_sparse_to_tensors_map_eager(sparse_indices_, sparse_values_, sparse_shape_; name=name, container=container, shared_name=shared_name) - else - add_many_sparse_to_tensors_map_graph(sparse_indices_, sparse_values_, sparse_shape_; name=name, container=container, shared_name=shared_name) - end - end end @@ -49709,48 +90762,94 @@ end """ begin - function sparse_reorder_graph(input_indices_, input_values_, input_shape_; name=nothing) - local desc - tf.with_op_name(name, "SparseReorder") do - desc = tf.NodeDescription("SparseReorder") - input_indices_ = convert(Tensor{Int64}, input_indices_) - input_values_ = convert(Tensor{Any}, input_values_) - input_shape_ = convert(Tensor{Int64}, input_shape_) - (input_values_,) = tf.tf_promote(input_values_) - tf.add_input(desc, input_indices_) - tf.add_input(desc, input_values_) - tf.add_input(desc, input_shape_) - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:2 - push!(out, tf.Tensor(op, out_idx)) - end - out - end - function sparse_reorder_eager(input_indices_, input_values_, input_shape_; name=nothing) - desc = tf.EagerOp("SparseReorder") - input_indices_ = convert(tf.EagerTensor, input_indices_) - input_values_ = convert(tf.EagerTensor, input_values_) - input_shape_ = convert(tf.EagerTensor, input_shape_) - tf.add_input(desc, input_indices_) - tf.add_input(desc, input_values_) - tf.add_input(desc, input_shape_) - desc["T"] = tf.data_type(input_values_) - res = tf.execute(desc) - node = tf.TapeNode(sparse_reorder, [input_indices_, input_values_, input_shape_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_reorder(input_indices_, input_values_, input_shape_; name=nothing) - if tf.in_eager_mode() - sparse_reorder_eager(input_indices_, input_values_, input_shape_; name=name) - else - sparse_reorder_graph(input_indices_, input_values_, input_shape_; name=name) + begin + function sparse_reorder_graph(input_indices_, input_values_, input_shape_; name=nothing) + local desc + tf.with_op_name(name, "SparseReorder") do + desc = tf.NodeDescription("SparseReorder") + begin + begin + input_indices_ = convert(Tensor{Int64}, input_indices_) + begin + end + end + begin + input_values_ = convert(Tensor{Any}, input_values_) + begin + end + end + begin + input_shape_ = convert(Tensor{Int64}, input_shape_) + begin + end + end + begin + (input_values_,) = tf.tf_promote(input_values_) + end + end + begin + begin + tf.add_input(desc, input_indices_) + end + begin + tf.add_input(desc, input_values_) + end + begin + tf.add_input(desc, input_shape_) + end + end + begin + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + end + end + begin + function sparse_reorder_eager(input_indices_, input_values_, input_shape_; name=nothing) + desc = tf.EagerOp("SparseReorder") + input_indices_ = convert(tf.EagerTensor, input_indices_) + input_values_ = convert(tf.EagerTensor, input_values_) + input_shape_ = convert(tf.EagerTensor, input_shape_) + begin + begin + tf.add_input(desc, input_indices_) + end + begin + tf.add_input(desc, input_values_) + end + begin + tf.add_input(desc, input_shape_) + end + end + begin + end + begin + desc["T"] = tf.data_type(input_values_) + end + res = tf.execute(desc) + node = tf.TapeNode(sparse_reorder, [input_indices_, input_values_, input_shape_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_reorder(input_indices_, input_values_, input_shape_; name=nothing) + if tf.in_eager_mode() + sparse_reorder_eager(input_indices_, input_values_, input_shape_; name=name) + else + sparse_reorder_graph(input_indices_, input_values_, input_shape_; name=name) + end end - end + end end @@ -49760,40 +90859,78 @@ end """ begin - function bitwise_xor_graph(x_, y_; name=nothing) - local desc - tf.with_op_name(name, "BitwiseXor") do - desc = tf.NodeDescription("BitwiseXor") - x_ = convert(Tensor{Any}, x_) - y_ = convert(Tensor{Any}, y_) - (x_, y_) = tf.tf_promote(x_, y_) - tf.add_input(desc, x_) - tf.add_input(desc, y_) + begin + function bitwise_xor_graph(x_, y_; name=nothing) + local desc + tf.with_op_name(name, "BitwiseXor") do + desc = tf.NodeDescription("BitwiseXor") + begin + begin + x_ = convert(Tensor{Any}, x_) + begin + end + end + begin + y_ = convert(Tensor{Any}, y_) + begin + end + end + begin + (x_, y_) = tf.tf_promote(x_, y_) + end + end + begin + begin + tf.add_input(desc, x_) + end + begin + tf.add_input(desc, y_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function bitwise_xor_eager(x_, y_; name=nothing) - desc = tf.EagerOp("BitwiseXor") - x_ = convert(tf.EagerTensor, x_) - y_ = convert(tf.EagerTensor, y_) - tf.add_input(desc, x_) - tf.add_input(desc, y_) - desc["T"] = tf.data_type(x_) - desc["T"] = tf.data_type(y_) - res = tf.execute(desc) - node = tf.TapeNode(bitwise_xor, [x_, y_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function bitwise_xor_eager(x_, y_; name=nothing) + desc = tf.EagerOp("BitwiseXor") + x_ = convert(tf.EagerTensor, x_) + y_ = convert(tf.EagerTensor, y_) + begin + begin + tf.add_input(desc, x_) + end + begin + tf.add_input(desc, y_) + end + end + begin + end + begin + desc["T"] = tf.data_type(x_) + end + begin + desc["T"] = tf.data_type(y_) + end + res = tf.execute(desc) + node = tf.TapeNode(bitwise_xor, [x_, y_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function bitwise_xor(x_, y_; name=nothing) - if tf.in_eager_mode() - bitwise_xor_eager(x_, y_; name=name) - else - bitwise_xor_graph(x_, y_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function bitwise_xor(x_, y_; name=nothing) + if tf.in_eager_mode() + bitwise_xor_eager(x_, y_; name=name) + else + bitwise_xor_graph(x_, y_; name=name) + end end - end + end end @@ -49803,40 +90940,78 @@ end """ begin - function batch_matrix_set_diag_graph(input_, diagonal_; name=nothing) - local desc - tf.with_op_name(name, "BatchMatrixSetDiag") do - desc = tf.NodeDescription("BatchMatrixSetDiag") - input_ = convert(Tensor{Any}, input_) - diagonal_ = convert(Tensor{Any}, diagonal_) - (input_, diagonal_) = tf.tf_promote(input_, diagonal_) - tf.add_input(desc, input_) - tf.add_input(desc, diagonal_) + begin + function batch_matrix_set_diag_graph(input_, diagonal_; name=nothing) + local desc + tf.with_op_name(name, "BatchMatrixSetDiag") do + desc = tf.NodeDescription("BatchMatrixSetDiag") + begin + begin + input_ = convert(Tensor{Any}, input_) + begin + end + end + begin + diagonal_ = convert(Tensor{Any}, diagonal_) + begin + end + end + begin + (input_, diagonal_) = tf.tf_promote(input_, diagonal_) + end + end + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, diagonal_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function batch_matrix_set_diag_eager(input_, diagonal_; name=nothing) - desc = tf.EagerOp("BatchMatrixSetDiag") - input_ = convert(tf.EagerTensor, input_) - diagonal_ = convert(tf.EagerTensor, diagonal_) - tf.add_input(desc, input_) - tf.add_input(desc, diagonal_) - desc["T"] = tf.data_type(input_) - desc["T"] = tf.data_type(diagonal_) - res = tf.execute(desc) - node = tf.TapeNode(batch_matrix_set_diag, [input_, diagonal_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function batch_matrix_set_diag_eager(input_, diagonal_; name=nothing) + desc = tf.EagerOp("BatchMatrixSetDiag") + input_ = convert(tf.EagerTensor, input_) + diagonal_ = convert(tf.EagerTensor, diagonal_) + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, diagonal_) + end + end + begin + end + begin + desc["T"] = tf.data_type(input_) + end + begin + desc["T"] = tf.data_type(diagonal_) + end + res = tf.execute(desc) + node = tf.TapeNode(batch_matrix_set_diag, [input_, diagonal_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function batch_matrix_set_diag(input_, diagonal_; name=nothing) - if tf.in_eager_mode() - batch_matrix_set_diag_eager(input_, diagonal_; name=name) - else - batch_matrix_set_diag_graph(input_, diagonal_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function batch_matrix_set_diag(input_, diagonal_; name=nothing) + if tf.in_eager_mode() + batch_matrix_set_diag_eager(input_, diagonal_; name=name) + else + batch_matrix_set_diag_graph(input_, diagonal_; name=name) + end end - end + end end @@ -49846,45 +91021,93 @@ end """ begin - function lookup_table_insert_v2_graph(table_handle_, keys_, values_; name=nothing) - local desc - tf.with_op_name(name, "LookupTableInsertV2") do - desc = tf.NodeDescription("LookupTableInsertV2") - table_handle_ = convert(Tensor{Any}, table_handle_) - keys_ = convert(Tensor{Any}, keys_) - values_ = convert(Tensor{Any}, values_) - (keys_,) = tf.tf_promote(keys_) - (values_,) = tf.tf_promote(values_) - tf.add_input(desc, table_handle_) - tf.add_input(desc, keys_) - tf.add_input(desc, values_) - end - tf.Tensor(tf.Operation(desc)) - end - function lookup_table_insert_v2_eager(table_handle_, keys_, values_; name=nothing) - desc = tf.EagerOp("LookupTableInsertV2") - table_handle_ = convert(tf.EagerTensor, table_handle_) - keys_ = convert(tf.EagerTensor, keys_) - values_ = convert(tf.EagerTensor, values_) - tf.add_input(desc, table_handle_) - tf.add_input(desc, keys_) - tf.add_input(desc, values_) - desc["Tin"] = tf.data_type(keys_) - desc["Tout"] = tf.data_type(values_) - res = tf.execute(desc) - node = tf.TapeNode(lookup_table_insert_v2, [table_handle_, keys_, values_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function lookup_table_insert_v2(table_handle_, keys_, values_; name=nothing) - if tf.in_eager_mode() - lookup_table_insert_v2_eager(table_handle_, keys_, values_; name=name) - else - lookup_table_insert_v2_graph(table_handle_, keys_, values_; name=name) + begin + function lookup_table_insert_v2_graph(table_handle_, keys_, values_; name=nothing) + local desc + tf.with_op_name(name, "LookupTableInsertV2") do + desc = tf.NodeDescription("LookupTableInsertV2") + begin + begin + table_handle_ = convert(Tensor{Any}, table_handle_) + begin + end + end + begin + keys_ = convert(Tensor{Any}, keys_) + begin + end + end + begin + values_ = convert(Tensor{Any}, values_) + begin + end + end + begin + (keys_,) = tf.tf_promote(keys_) + end + begin + (values_,) = tf.tf_promote(values_) + end + end + begin + begin + tf.add_input(desc, table_handle_) + end + begin + tf.add_input(desc, keys_) + end + begin + tf.add_input(desc, values_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function lookup_table_insert_v2_eager(table_handle_, keys_, values_; name=nothing) + desc = tf.EagerOp("LookupTableInsertV2") + table_handle_ = convert(tf.EagerTensor, table_handle_) + keys_ = convert(tf.EagerTensor, keys_) + values_ = convert(tf.EagerTensor, values_) + begin + begin + tf.add_input(desc, table_handle_) + end + begin + tf.add_input(desc, keys_) + end + begin + tf.add_input(desc, values_) + end + end + begin + end + begin + desc["Tin"] = tf.data_type(keys_) + end + begin + desc["Tout"] = tf.data_type(values_) + end + res = tf.execute(desc) + node = tf.TapeNode(lookup_table_insert_v2, [table_handle_, keys_, values_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function lookup_table_insert_v2(table_handle_, keys_, values_; name=nothing) + if tf.in_eager_mode() + lookup_table_insert_v2_eager(table_handle_, keys_, values_; name=name) + else + lookup_table_insert_v2_graph(table_handle_, keys_, values_; name=name) + end end - end + end end @@ -49894,53 +91117,101 @@ end """ begin - function experimental_dense_to_sparse_batch_dataset_graph(input_dataset_, batch_size_, row_shape_; name=nothing, output_types=nothing, output_shapes=nothing) - local desc - tf.with_op_name(name, "ExperimentalDenseToSparseBatchDataset") do - desc = tf.NodeDescription("ExperimentalDenseToSparseBatchDataset") - input_dataset_ = convert(Tensor{Any}, input_dataset_) - batch_size_ = convert(Tensor{Int64}, batch_size_) - row_shape_ = convert(Tensor{Int64}, row_shape_) - tf.add_input(desc, input_dataset_) - tf.add_input(desc, batch_size_) - tf.add_input(desc, row_shape_) - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) + begin + function experimental_dense_to_sparse_batch_dataset_graph(input_dataset_, batch_size_, row_shape_; name=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "ExperimentalDenseToSparseBatchDataset") do + desc = tf.NodeDescription("ExperimentalDenseToSparseBatchDataset") + begin + begin + input_dataset_ = convert(Tensor{Any}, input_dataset_) + begin + end + end + begin + batch_size_ = convert(Tensor{Int64}, batch_size_) + begin + end + end + begin + row_shape_ = convert(Tensor{Int64}, row_shape_) + begin + end + end + end + begin + begin + tf.add_input(desc, input_dataset_) + end + begin + tf.add_input(desc, batch_size_) + end + begin + tf.add_input(desc, row_shape_) + end + end + begin + begin + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + end + begin + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function experimental_dense_to_sparse_batch_dataset_eager(input_dataset_, batch_size_, row_shape_; name=nothing, output_types=nothing, output_shapes=nothing) + desc = tf.EagerOp("ExperimentalDenseToSparseBatchDataset") + input_dataset_ = convert(tf.EagerTensor, input_dataset_) + batch_size_ = convert(tf.EagerTensor, batch_size_) + row_shape_ = convert(tf.EagerTensor, row_shape_) + begin + begin + tf.add_input(desc, input_dataset_) + end + begin + tf.add_input(desc, batch_size_) + end + begin + tf.add_input(desc, row_shape_) + end + end + begin + begin + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + end + begin + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(experimental_dense_to_sparse_batch_dataset, [input_dataset_, batch_size_, row_shape_], name=nothing, output_types=nothing, output_shapes=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_dense_to_sparse_batch_dataset(input_dataset_, batch_size_, row_shape_; name=nothing, output_types=nothing, output_shapes=nothing) + if tf.in_eager_mode() + experimental_dense_to_sparse_batch_dataset_eager(input_dataset_, batch_size_, row_shape_; name=name, output_types=output_types, output_shapes=output_shapes) + else + experimental_dense_to_sparse_batch_dataset_graph(input_dataset_, batch_size_, row_shape_; name=name, output_types=output_types, output_shapes=output_shapes) + end end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - end - tf.Tensor(tf.Operation(desc)) - end - function experimental_dense_to_sparse_batch_dataset_eager(input_dataset_, batch_size_, row_shape_; name=nothing, output_types=nothing, output_shapes=nothing) - desc = tf.EagerOp("ExperimentalDenseToSparseBatchDataset") - input_dataset_ = convert(tf.EagerTensor, input_dataset_) - batch_size_ = convert(tf.EagerTensor, batch_size_) - row_shape_ = convert(tf.EagerTensor, row_shape_) - tf.add_input(desc, input_dataset_) - tf.add_input(desc, batch_size_) - tf.add_input(desc, row_shape_) - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - res = tf.execute(desc) - node = tf.TapeNode(experimental_dense_to_sparse_batch_dataset, [input_dataset_, batch_size_, row_shape_], name=nothing, output_types=nothing, output_shapes=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_dense_to_sparse_batch_dataset(input_dataset_, batch_size_, row_shape_; name=nothing, output_types=nothing, output_shapes=nothing) - if tf.in_eager_mode() - experimental_dense_to_sparse_batch_dataset_eager(input_dataset_, batch_size_, row_shape_; name=name, output_types=output_types, output_shapes=output_shapes) - else - experimental_dense_to_sparse_batch_dataset_graph(input_dataset_, batch_size_, row_shape_; name=name, output_types=output_types, output_shapes=output_shapes) - end - end end @@ -49950,80 +91221,188 @@ end """ begin - function resource_sparse_apply_rms_prop_graph(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) - local desc - tf.with_op_name(name, "ResourceSparseApplyRMSProp") do - desc = tf.NodeDescription("ResourceSparseApplyRMSProp") - var_ = convert(Tensor{Any}, var_) - ms_ = convert(Tensor{Any}, ms_) - mom_ = convert(Tensor{Any}, mom_) - lr_ = convert(Tensor{Any}, lr_) - rho_ = convert(Tensor{Any}, rho_) - momentum_ = convert(Tensor{Any}, momentum_) - epsilon_ = convert(Tensor{Any}, epsilon_) - grad_ = convert(Tensor{Any}, grad_) - indices_ = convert(Tensor{Any}, indices_) - indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) - (lr_, rho_, momentum_, epsilon_, grad_) = tf.tf_promote(lr_, rho_, momentum_, epsilon_, grad_) - (indices_,) = tf.tf_promote(indices_) - tf.add_input(desc, var_) - tf.add_input(desc, ms_) - tf.add_input(desc, mom_) - tf.add_input(desc, lr_) - tf.add_input(desc, rho_) - tf.add_input(desc, momentum_) - tf.add_input(desc, epsilon_) - tf.add_input(desc, grad_) - tf.add_input(desc, indices_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - end - tf.Tensor(tf.Operation(desc)) - end - function resource_sparse_apply_rms_prop_eager(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) - desc = tf.EagerOp("ResourceSparseApplyRMSProp") - var_ = convert(tf.EagerTensor, var_) - ms_ = convert(tf.EagerTensor, ms_) - mom_ = convert(tf.EagerTensor, mom_) - lr_ = convert(tf.EagerTensor, lr_) - rho_ = convert(tf.EagerTensor, rho_) - momentum_ = convert(tf.EagerTensor, momentum_) - epsilon_ = convert(tf.EagerTensor, epsilon_) - grad_ = convert(tf.EagerTensor, grad_) - indices_ = convert(tf.EagerTensor, indices_) - tf.add_input(desc, var_) - tf.add_input(desc, ms_) - tf.add_input(desc, mom_) - tf.add_input(desc, lr_) - tf.add_input(desc, rho_) - tf.add_input(desc, momentum_) - tf.add_input(desc, epsilon_) - tf.add_input(desc, grad_) - tf.add_input(desc, indices_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - desc["T"] = tf.data_type(lr_) - desc["T"] = tf.data_type(rho_) - desc["T"] = tf.data_type(momentum_) - desc["T"] = tf.data_type(epsilon_) - desc["T"] = tf.data_type(grad_) - desc["Tindices"] = tf.data_type(indices_) - res = tf.execute(desc) - node = tf.TapeNode(resource_sparse_apply_rms_prop, [var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_], name=nothing, use_locking=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_sparse_apply_rms_prop(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) - if tf.in_eager_mode() - resource_sparse_apply_rms_prop_eager(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=name, use_locking=use_locking) - else - resource_sparse_apply_rms_prop_graph(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=name, use_locking=use_locking) + begin + function resource_sparse_apply_rms_prop_graph(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "ResourceSparseApplyRMSProp") do + desc = tf.NodeDescription("ResourceSparseApplyRMSProp") + begin + begin + var_ = convert(Tensor{Any}, var_) + begin + end + end + begin + ms_ = convert(Tensor{Any}, ms_) + begin + end + end + begin + mom_ = convert(Tensor{Any}, mom_) + begin + end + end + begin + lr_ = convert(Tensor{Any}, lr_) + begin + end + end + begin + rho_ = convert(Tensor{Any}, rho_) + begin + end + end + begin + momentum_ = convert(Tensor{Any}, momentum_) + begin + end + end + begin + epsilon_ = convert(Tensor{Any}, epsilon_) + begin + end + end + begin + grad_ = convert(Tensor{Any}, grad_) + begin + end + end + begin + indices_ = convert(Tensor{Any}, indices_) + begin + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + end + end + begin + (lr_, rho_, momentum_, epsilon_, grad_) = tf.tf_promote(lr_, rho_, momentum_, epsilon_, grad_) + end + begin + (indices_,) = tf.tf_promote(indices_) + end + end + begin + begin + tf.add_input(desc, var_) + end + begin + tf.add_input(desc, ms_) + end + begin + tf.add_input(desc, mom_) + end + begin + tf.add_input(desc, lr_) + end + begin + tf.add_input(desc, rho_) + end + begin + tf.add_input(desc, momentum_) + end + begin + tf.add_input(desc, epsilon_) + end + begin + tf.add_input(desc, grad_) + end + begin + tf.add_input(desc, indices_) + end + end + begin + begin + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function resource_sparse_apply_rms_prop_eager(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) + desc = tf.EagerOp("ResourceSparseApplyRMSProp") + var_ = convert(tf.EagerTensor, var_) + ms_ = convert(tf.EagerTensor, ms_) + mom_ = convert(tf.EagerTensor, mom_) + lr_ = convert(tf.EagerTensor, lr_) + rho_ = convert(tf.EagerTensor, rho_) + momentum_ = convert(tf.EagerTensor, momentum_) + epsilon_ = convert(tf.EagerTensor, epsilon_) + grad_ = convert(tf.EagerTensor, grad_) + indices_ = convert(tf.EagerTensor, indices_) + begin + begin + tf.add_input(desc, var_) + end + begin + tf.add_input(desc, ms_) + end + begin + tf.add_input(desc, mom_) + end + begin + tf.add_input(desc, lr_) + end + begin + tf.add_input(desc, rho_) + end + begin + tf.add_input(desc, momentum_) + end + begin + tf.add_input(desc, epsilon_) + end + begin + tf.add_input(desc, grad_) + end + begin + tf.add_input(desc, indices_) + end + end + begin + begin + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + end + begin + desc["T"] = tf.data_type(lr_) + end + begin + desc["T"] = tf.data_type(rho_) + end + begin + desc["T"] = tf.data_type(momentum_) + end + begin + desc["T"] = tf.data_type(epsilon_) + end + begin + desc["T"] = tf.data_type(grad_) + end + begin + desc["Tindices"] = tf.data_type(indices_) + end + res = tf.execute(desc) + node = tf.TapeNode(resource_sparse_apply_rms_prop, [var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_], name=nothing, use_locking=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_sparse_apply_rms_prop(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=nothing, use_locking=nothing) + if tf.in_eager_mode() + resource_sparse_apply_rms_prop_eager(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=name, use_locking=use_locking) + else + resource_sparse_apply_rms_prop_graph(var_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_, indices_; name=name, use_locking=use_locking) + end end - end + end end @@ -50033,51 +91412,95 @@ end """ begin - function random_crop_graph(image_, size_; name=nothing, seed=nothing, seed2=nothing) - local desc - tf.with_op_name(name, "RandomCrop") do - desc = tf.NodeDescription("RandomCrop") - image_ = convert(Tensor{Any}, image_) - size_ = convert(Tensor{Int64}, size_) - (image_,) = tf.tf_promote(image_) - tf.add_input(desc, image_) - tf.add_input(desc, size_) - if seed !== nothing - desc["seed"] = Base.Int(seed) + begin + function random_crop_graph(image_, size_; name=nothing, seed=nothing, seed2=nothing) + local desc + tf.with_op_name(name, "RandomCrop") do + desc = tf.NodeDescription("RandomCrop") + begin + begin + image_ = convert(Tensor{Any}, image_) + begin + end + end + begin + size_ = convert(Tensor{Int64}, size_) + begin + end + end + begin + (image_,) = tf.tf_promote(image_) + end + end + begin + begin + tf.add_input(desc, image_) + end + begin + tf.add_input(desc, size_) + end + end + begin + begin + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + end + begin + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function random_crop_eager(image_, size_; name=nothing, seed=nothing, seed2=nothing) + desc = tf.EagerOp("RandomCrop") + image_ = convert(tf.EagerTensor, image_) + size_ = convert(tf.EagerTensor, size_) + begin + begin + tf.add_input(desc, image_) + end + begin + tf.add_input(desc, size_) + end + end + begin + begin + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + end + begin + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end + end + end + begin + desc["T"] = tf.data_type(image_) + end + res = tf.execute(desc) + node = tf.TapeNode(random_crop, [image_, size_], name=nothing, seed=nothing, seed2=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function random_crop(image_, size_; name=nothing, seed=nothing, seed2=nothing) + if tf.in_eager_mode() + random_crop_eager(image_, size_; name=name, seed=seed, seed2=seed2) + else + random_crop_graph(image_, size_; name=name, seed=seed, seed2=seed2) + end end - if seed2 !== nothing - desc["seed2"] = Base.Int(seed2) - end - end - tf.Tensor(tf.Operation(desc)) end - function random_crop_eager(image_, size_; name=nothing, seed=nothing, seed2=nothing) - desc = tf.EagerOp("RandomCrop") - image_ = convert(tf.EagerTensor, image_) - size_ = convert(tf.EagerTensor, size_) - tf.add_input(desc, image_) - tf.add_input(desc, size_) - if seed !== nothing - desc["seed"] = Base.Int(seed) - end - if seed2 !== nothing - desc["seed2"] = Base.Int(seed2) - end - desc["T"] = tf.data_type(image_) - res = tf.execute(desc) - node = tf.TapeNode(random_crop, [image_, size_], name=nothing, seed=nothing, seed2=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function random_crop(image_, size_; name=nothing, seed=nothing, seed2=nothing) - if tf.in_eager_mode() - random_crop_eager(image_, size_; name=name, seed=seed, seed2=seed2) - else - random_crop_graph(image_, size_; name=name, seed=seed, seed2=seed2) - end - end end @@ -50087,45 +91510,93 @@ end """ begin - function lookup_table_import_v2_graph(table_handle_, keys_, values_; name=nothing) - local desc - tf.with_op_name(name, "LookupTableImportV2") do - desc = tf.NodeDescription("LookupTableImportV2") - table_handle_ = convert(Tensor{Any}, table_handle_) - keys_ = convert(Tensor{Any}, keys_) - values_ = convert(Tensor{Any}, values_) - (keys_,) = tf.tf_promote(keys_) - (values_,) = tf.tf_promote(values_) - tf.add_input(desc, table_handle_) - tf.add_input(desc, keys_) - tf.add_input(desc, values_) - end - tf.Tensor(tf.Operation(desc)) - end - function lookup_table_import_v2_eager(table_handle_, keys_, values_; name=nothing) - desc = tf.EagerOp("LookupTableImportV2") - table_handle_ = convert(tf.EagerTensor, table_handle_) - keys_ = convert(tf.EagerTensor, keys_) - values_ = convert(tf.EagerTensor, values_) - tf.add_input(desc, table_handle_) - tf.add_input(desc, keys_) - tf.add_input(desc, values_) - desc["Tin"] = tf.data_type(keys_) - desc["Tout"] = tf.data_type(values_) - res = tf.execute(desc) - node = tf.TapeNode(lookup_table_import_v2, [table_handle_, keys_, values_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function lookup_table_import_v2(table_handle_, keys_, values_; name=nothing) - if tf.in_eager_mode() - lookup_table_import_v2_eager(table_handle_, keys_, values_; name=name) - else - lookup_table_import_v2_graph(table_handle_, keys_, values_; name=name) + begin + function lookup_table_import_v2_graph(table_handle_, keys_, values_; name=nothing) + local desc + tf.with_op_name(name, "LookupTableImportV2") do + desc = tf.NodeDescription("LookupTableImportV2") + begin + begin + table_handle_ = convert(Tensor{Any}, table_handle_) + begin + end + end + begin + keys_ = convert(Tensor{Any}, keys_) + begin + end + end + begin + values_ = convert(Tensor{Any}, values_) + begin + end + end + begin + (keys_,) = tf.tf_promote(keys_) + end + begin + (values_,) = tf.tf_promote(values_) + end + end + begin + begin + tf.add_input(desc, table_handle_) + end + begin + tf.add_input(desc, keys_) + end + begin + tf.add_input(desc, values_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function lookup_table_import_v2_eager(table_handle_, keys_, values_; name=nothing) + desc = tf.EagerOp("LookupTableImportV2") + table_handle_ = convert(tf.EagerTensor, table_handle_) + keys_ = convert(tf.EagerTensor, keys_) + values_ = convert(tf.EagerTensor, values_) + begin + begin + tf.add_input(desc, table_handle_) + end + begin + tf.add_input(desc, keys_) + end + begin + tf.add_input(desc, values_) + end + end + begin + end + begin + desc["Tin"] = tf.data_type(keys_) + end + begin + desc["Tout"] = tf.data_type(values_) + end + res = tf.execute(desc) + node = tf.TapeNode(lookup_table_import_v2, [table_handle_, keys_, values_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function lookup_table_import_v2(table_handle_, keys_, values_; name=nothing) + if tf.in_eager_mode() + lookup_table_import_v2_eager(table_handle_, keys_, values_; name=name) + else + lookup_table_import_v2_graph(table_handle_, keys_, values_; name=name) + end end - end + end end @@ -50135,52 +91606,104 @@ end """ begin - function resource_scatter_nd_update_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) - local desc - tf.with_op_name(name, "ResourceScatterNdUpdate") do - desc = tf.NodeDescription("ResourceScatterNdUpdate") - ref_ = convert(Tensor{Any}, ref_) - indices_ = convert(Tensor{Any}, indices_) - indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) - updates_ = convert(Tensor{Any}, updates_) - (updates_,) = tf.tf_promote(updates_) - (indices_,) = tf.tf_promote(indices_) - tf.add_input(desc, ref_) - tf.add_input(desc, indices_) - tf.add_input(desc, updates_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) + begin + function resource_scatter_nd_update_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "ResourceScatterNdUpdate") do + desc = tf.NodeDescription("ResourceScatterNdUpdate") + begin + begin + ref_ = convert(Tensor{Any}, ref_) + begin + end + end + begin + indices_ = convert(Tensor{Any}, indices_) + begin + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + end + end + begin + updates_ = convert(Tensor{Any}, updates_) + begin + end + end + begin + (updates_,) = tf.tf_promote(updates_) + end + begin + (indices_,) = tf.tf_promote(indices_) + end + end + begin + begin + tf.add_input(desc, ref_) + end + begin + tf.add_input(desc, indices_) + end + begin + tf.add_input(desc, updates_) + end + end + begin + begin + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function resource_scatter_nd_update_eager(ref_, indices_, updates_; name=nothing, use_locking=nothing) + desc = tf.EagerOp("ResourceScatterNdUpdate") + ref_ = convert(tf.EagerTensor, ref_) + indices_ = convert(tf.EagerTensor, indices_) + updates_ = convert(tf.EagerTensor, updates_) + begin + begin + tf.add_input(desc, ref_) + end + begin + tf.add_input(desc, indices_) + end + begin + tf.add_input(desc, updates_) + end + end + begin + begin + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + end + begin + desc["Tindices"] = tf.data_type(indices_) + end + begin + desc["T"] = tf.data_type(updates_) + end + res = tf.execute(desc) + node = tf.TapeNode(resource_scatter_nd_update, [ref_, indices_, updates_], name=nothing, use_locking=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_scatter_nd_update(ref_, indices_, updates_; name=nothing, use_locking=nothing) + if tf.in_eager_mode() + resource_scatter_nd_update_eager(ref_, indices_, updates_; name=name, use_locking=use_locking) + else + resource_scatter_nd_update_graph(ref_, indices_, updates_; name=name, use_locking=use_locking) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function resource_scatter_nd_update_eager(ref_, indices_, updates_; name=nothing, use_locking=nothing) - desc = tf.EagerOp("ResourceScatterNdUpdate") - ref_ = convert(tf.EagerTensor, ref_) - indices_ = convert(tf.EagerTensor, indices_) - updates_ = convert(tf.EagerTensor, updates_) - tf.add_input(desc, ref_) - tf.add_input(desc, indices_) - tf.add_input(desc, updates_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - desc["Tindices"] = tf.data_type(indices_) - desc["T"] = tf.data_type(updates_) - res = tf.execute(desc) - node = tf.TapeNode(resource_scatter_nd_update, [ref_, indices_, updates_], name=nothing, use_locking=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_scatter_nd_update(ref_, indices_, updates_; name=nothing, use_locking=nothing) - if tf.in_eager_mode() - resource_scatter_nd_update_eager(ref_, indices_, updates_; name=name, use_locking=use_locking) - else - resource_scatter_nd_update_graph(ref_, indices_, updates_; name=name, use_locking=use_locking) - end - end end @@ -50190,39 +91713,67 @@ end """ begin - function static_regex_full_match_graph(input_; name=nothing, pattern=nothing) - local desc - tf.with_op_name(name, "StaticRegexFullMatch") do - desc = tf.NodeDescription("StaticRegexFullMatch") - input_ = convert(Tensor{String}, input_) - tf.add_input(desc, input_) - if pattern !== nothing - desc["pattern"] = Base.String(pattern) + begin + function static_regex_full_match_graph(input_; name=nothing, pattern=nothing) + local desc + tf.with_op_name(name, "StaticRegexFullMatch") do + desc = tf.NodeDescription("StaticRegexFullMatch") + begin + begin + input_ = convert(Tensor{String}, input_) + begin + end + end + end + begin + begin + tf.add_input(desc, input_) + end + end + begin + begin + if pattern !== nothing + desc["pattern"] = Base.String(pattern) + end + end + end end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function static_regex_full_match_eager(input_; name=nothing, pattern=nothing) - desc = tf.EagerOp("StaticRegexFullMatch") - input_ = convert(tf.EagerTensor, input_) - tf.add_input(desc, input_) - if pattern !== nothing - desc["pattern"] = Base.String(pattern) - end - res = tf.execute(desc) - node = tf.TapeNode(static_regex_full_match, [input_], name=nothing, pattern=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function static_regex_full_match_eager(input_; name=nothing, pattern=nothing) + desc = tf.EagerOp("StaticRegexFullMatch") + input_ = convert(tf.EagerTensor, input_) + begin + begin + tf.add_input(desc, input_) + end + end + begin + begin + if pattern !== nothing + desc["pattern"] = Base.String(pattern) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(static_regex_full_match, [input_], name=nothing, pattern=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function static_regex_full_match(input_; name=nothing, pattern=nothing) - if tf.in_eager_mode() - static_regex_full_match_eager(input_; name=name, pattern=pattern) - else - static_regex_full_match_graph(input_; name=name, pattern=pattern) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function static_regex_full_match(input_; name=nothing, pattern=nothing) + if tf.in_eager_mode() + static_regex_full_match_eager(input_; name=name, pattern=pattern) + else + static_regex_full_match_graph(input_; name=name, pattern=pattern) + end end - end + end end @@ -50232,33 +91783,57 @@ end Configures the credentials used by the GCS client of the local TF runtime. """ begin - function gcs_configure_credentials_graph(json_; name=nothing) - local desc - tf.with_op_name(name, "GcsConfigureCredentials") do - desc = tf.NodeDescription("GcsConfigureCredentials") - json_ = convert(Tensor{String}, json_) - tf.add_input(desc, json_) + begin + function gcs_configure_credentials_graph(json_; name=nothing) + local desc + tf.with_op_name(name, "GcsConfigureCredentials") do + desc = tf.NodeDescription("GcsConfigureCredentials") + begin + begin + json_ = convert(Tensor{String}, json_) + begin + end + end + end + begin + begin + tf.add_input(desc, json_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function gcs_configure_credentials_eager(json_; name=nothing) - desc = tf.EagerOp("GcsConfigureCredentials") - json_ = convert(tf.EagerTensor, json_) - tf.add_input(desc, json_) - res = tf.execute(desc) - node = tf.TapeNode(gcs_configure_credentials, [json_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function gcs_configure_credentials_eager(json_; name=nothing) + desc = tf.EagerOp("GcsConfigureCredentials") + json_ = convert(tf.EagerTensor, json_) + begin + begin + tf.add_input(desc, json_) + end + end + begin + end + res = tf.execute(desc) + node = tf.TapeNode(gcs_configure_credentials, [json_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function gcs_configure_credentials(json_; name=nothing) - if tf.in_eager_mode() - gcs_configure_credentials_eager(json_; name=name) - else - gcs_configure_credentials_graph(json_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function gcs_configure_credentials(json_; name=nothing) + if tf.in_eager_mode() + gcs_configure_credentials_eager(json_; name=name) + else + gcs_configure_credentials_graph(json_; name=name) + end end - end + end end @@ -50268,37 +91843,69 @@ end """ begin - function tensor_array_size_v3_graph(handle_, flow_in_; name=nothing) - local desc - tf.with_op_name(name, "TensorArraySizeV3") do - desc = tf.NodeDescription("TensorArraySizeV3") - handle_ = convert(Tensor{Any}, handle_) - flow_in_ = convert(Tensor{Float32}, flow_in_) - tf.add_input(desc, handle_) - tf.add_input(desc, flow_in_) + begin + function tensor_array_size_v3_graph(handle_, flow_in_; name=nothing) + local desc + tf.with_op_name(name, "TensorArraySizeV3") do + desc = tf.NodeDescription("TensorArraySizeV3") + begin + begin + handle_ = convert(Tensor{Any}, handle_) + begin + end + end + begin + flow_in_ = convert(Tensor{Float32}, flow_in_) + begin + end + end + end + begin + begin + tf.add_input(desc, handle_) + end + begin + tf.add_input(desc, flow_in_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function tensor_array_size_v3_eager(handle_, flow_in_; name=nothing) - desc = tf.EagerOp("TensorArraySizeV3") - handle_ = convert(tf.EagerTensor, handle_) - flow_in_ = convert(tf.EagerTensor, flow_in_) - tf.add_input(desc, handle_) - tf.add_input(desc, flow_in_) - res = tf.execute(desc) - node = tf.TapeNode(tensor_array_size_v3, [handle_, flow_in_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function tensor_array_size_v3_eager(handle_, flow_in_; name=nothing) + desc = tf.EagerOp("TensorArraySizeV3") + handle_ = convert(tf.EagerTensor, handle_) + flow_in_ = convert(tf.EagerTensor, flow_in_) + begin + begin + tf.add_input(desc, handle_) + end + begin + tf.add_input(desc, flow_in_) + end + end + begin + end + res = tf.execute(desc) + node = tf.TapeNode(tensor_array_size_v3, [handle_, flow_in_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_array_size_v3(handle_, flow_in_; name=nothing) - if tf.in_eager_mode() - tensor_array_size_v3_eager(handle_, flow_in_; name=name) - else - tensor_array_size_v3_graph(handle_, flow_in_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_array_size_v3(handle_, flow_in_; name=nothing) + if tf.in_eager_mode() + tensor_array_size_v3_eager(handle_, flow_in_; name=name) + else + tensor_array_size_v3_graph(handle_, flow_in_; name=name) + end end - end + end end @@ -50308,52 +91915,112 @@ end """ begin - function sparse_segment_sqrt_n_with_num_segments_graph(data_, indices_, segment_ids_, num_segments_; name=nothing) - local desc - tf.with_op_name(name, "SparseSegmentSqrtNWithNumSegments") do - desc = tf.NodeDescription("SparseSegmentSqrtNWithNumSegments") - data_ = convert(Tensor{Any}, data_) - indices_ = convert(Tensor{Int32}, indices_) - indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) - segment_ids_ = convert(Tensor{Int32}, segment_ids_) - num_segments_ = convert(Tensor{Int32}, num_segments_) - (num_segments_,) = tf.tf_promote(num_segments_) - (data_,) = tf.tf_promote(data_) - (indices_,) = tf.tf_promote(indices_) - tf.add_input(desc, data_) - tf.add_input(desc, indices_) - tf.add_input(desc, segment_ids_) - tf.add_input(desc, num_segments_) - end - tf.Tensor(tf.Operation(desc)) - end - function sparse_segment_sqrt_n_with_num_segments_eager(data_, indices_, segment_ids_, num_segments_; name=nothing) - desc = tf.EagerOp("SparseSegmentSqrtNWithNumSegments") - data_ = convert(tf.EagerTensor, data_) - indices_ = convert(tf.EagerTensor, indices_) - segment_ids_ = convert(tf.EagerTensor, segment_ids_) - num_segments_ = convert(tf.EagerTensor, num_segments_) - tf.add_input(desc, data_) - tf.add_input(desc, indices_) - tf.add_input(desc, segment_ids_) - tf.add_input(desc, num_segments_) - desc["T"] = tf.data_type(data_) - desc["Tidx"] = tf.data_type(indices_) - desc["Tnumsegments"] = tf.data_type(num_segments_) - res = tf.execute(desc) - node = tf.TapeNode(sparse_segment_sqrt_n_with_num_segments, [data_, indices_, segment_ids_, num_segments_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_segment_sqrt_n_with_num_segments(data_, indices_, segment_ids_, num_segments_; name=nothing) - if tf.in_eager_mode() - sparse_segment_sqrt_n_with_num_segments_eager(data_, indices_, segment_ids_, num_segments_; name=name) - else - sparse_segment_sqrt_n_with_num_segments_graph(data_, indices_, segment_ids_, num_segments_; name=name) + begin + function sparse_segment_sqrt_n_with_num_segments_graph(data_, indices_, segment_ids_, num_segments_; name=nothing) + local desc + tf.with_op_name(name, "SparseSegmentSqrtNWithNumSegments") do + desc = tf.NodeDescription("SparseSegmentSqrtNWithNumSegments") + begin + begin + data_ = convert(Tensor{Any}, data_) + begin + end + end + begin + indices_ = convert(Tensor{Int32}, indices_) + begin + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + end + end + begin + segment_ids_ = convert(Tensor{Int32}, segment_ids_) + begin + end + end + begin + num_segments_ = convert(Tensor{Int32}, num_segments_) + begin + end + end + begin + (num_segments_,) = tf.tf_promote(num_segments_) + end + begin + (data_,) = tf.tf_promote(data_) + end + begin + (indices_,) = tf.tf_promote(indices_) + end + end + begin + begin + tf.add_input(desc, data_) + end + begin + tf.add_input(desc, indices_) + end + begin + tf.add_input(desc, segment_ids_) + end + begin + tf.add_input(desc, num_segments_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function sparse_segment_sqrt_n_with_num_segments_eager(data_, indices_, segment_ids_, num_segments_; name=nothing) + desc = tf.EagerOp("SparseSegmentSqrtNWithNumSegments") + data_ = convert(tf.EagerTensor, data_) + indices_ = convert(tf.EagerTensor, indices_) + segment_ids_ = convert(tf.EagerTensor, segment_ids_) + num_segments_ = convert(tf.EagerTensor, num_segments_) + begin + begin + tf.add_input(desc, data_) + end + begin + tf.add_input(desc, indices_) + end + begin + tf.add_input(desc, segment_ids_) + end + begin + tf.add_input(desc, num_segments_) + end + end + begin + end + begin + desc["T"] = tf.data_type(data_) + end + begin + desc["Tidx"] = tf.data_type(indices_) + end + begin + desc["Tnumsegments"] = tf.data_type(num_segments_) + end + res = tf.execute(desc) + node = tf.TapeNode(sparse_segment_sqrt_n_with_num_segments, [data_, indices_, segment_ids_, num_segments_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_segment_sqrt_n_with_num_segments(data_, indices_, segment_ids_, num_segments_; name=nothing) + if tf.in_eager_mode() + sparse_segment_sqrt_n_with_num_segments_eager(data_, indices_, segment_ids_, num_segments_; name=name) + else + sparse_segment_sqrt_n_with_num_segments_graph(data_, indices_, segment_ids_, num_segments_; name=name) + end end - end + end end @@ -50363,74 +92030,140 @@ end """ begin - function conv2d_backprop_filter_graph(input_, filter_sizes_, out_backprop_; name=nothing, strides=nothing, use_cudnn_on_gpu=nothing, padding=nothing, data_format=nothing, dilations=nothing) - local desc - tf.with_op_name(name, "Conv2DBackpropFilter") do - desc = tf.NodeDescription("Conv2DBackpropFilter") - input_ = convert(Tensor{Any}, input_) - filter_sizes_ = convert(Tensor{Int32}, filter_sizes_) - out_backprop_ = convert(Tensor{Any}, out_backprop_) - (input_, out_backprop_) = tf.tf_promote(input_, out_backprop_) - tf.add_input(desc, input_) - tf.add_input(desc, filter_sizes_) - tf.add_input(desc, out_backprop_) - if strides !== nothing - desc["strides"] = map(Base.identity, strides) - end - if use_cudnn_on_gpu !== nothing - desc["use_cudnn_on_gpu"] = Base.Bool(use_cudnn_on_gpu) - end - if padding !== nothing - desc["padding"] = Base.String(padding) + begin + function conv2d_backprop_filter_graph(input_, filter_sizes_, out_backprop_; name=nothing, strides=nothing, use_cudnn_on_gpu=nothing, padding=nothing, data_format=nothing, dilations=nothing) + local desc + tf.with_op_name(name, "Conv2DBackpropFilter") do + desc = tf.NodeDescription("Conv2DBackpropFilter") + begin + begin + input_ = convert(Tensor{Any}, input_) + begin + end + end + begin + filter_sizes_ = convert(Tensor{Int32}, filter_sizes_) + begin + end + end + begin + out_backprop_ = convert(Tensor{Any}, out_backprop_) + begin + end + end + begin + (input_, out_backprop_) = tf.tf_promote(input_, out_backprop_) + end + end + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, filter_sizes_) + end + begin + tf.add_input(desc, out_backprop_) + end + end + begin + begin + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + end + begin + if use_cudnn_on_gpu !== nothing + desc["use_cudnn_on_gpu"] = Base.Bool(use_cudnn_on_gpu) + end + end + begin + if padding !== nothing + desc["padding"] = Base.String(padding) + end + end + begin + if data_format !== nothing + desc["data_format"] = Base.String(data_format) + end + end + begin + if dilations !== nothing + desc["dilations"] = map(Base.identity, dilations) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function conv2d_backprop_filter_eager(input_, filter_sizes_, out_backprop_; name=nothing, strides=nothing, use_cudnn_on_gpu=nothing, padding=nothing, data_format=nothing, dilations=nothing) + desc = tf.EagerOp("Conv2DBackpropFilter") + input_ = convert(tf.EagerTensor, input_) + filter_sizes_ = convert(tf.EagerTensor, filter_sizes_) + out_backprop_ = convert(tf.EagerTensor, out_backprop_) + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, filter_sizes_) + end + begin + tf.add_input(desc, out_backprop_) + end + end + begin + begin + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + end + begin + if use_cudnn_on_gpu !== nothing + desc["use_cudnn_on_gpu"] = Base.Bool(use_cudnn_on_gpu) + end + end + begin + if padding !== nothing + desc["padding"] = Base.String(padding) + end + end + begin + if data_format !== nothing + desc["data_format"] = Base.String(data_format) + end + end + begin + if dilations !== nothing + desc["dilations"] = map(Base.identity, dilations) + end + end + end + begin + desc["T"] = tf.data_type(input_) + end + begin + desc["T"] = tf.data_type(out_backprop_) + end + res = tf.execute(desc) + node = tf.TapeNode(conv2d_backprop_filter, [input_, filter_sizes_, out_backprop_], name=nothing, strides=nothing, use_cudnn_on_gpu=nothing, padding=nothing, data_format=nothing, dilations=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function conv2d_backprop_filter(input_, filter_sizes_, out_backprop_; name=nothing, strides=nothing, use_cudnn_on_gpu=nothing, padding=nothing, data_format=nothing, dilations=nothing) + if tf.in_eager_mode() + conv2d_backprop_filter_eager(input_, filter_sizes_, out_backprop_; name=name, strides=strides, use_cudnn_on_gpu=use_cudnn_on_gpu, padding=padding, data_format=data_format, dilations=dilations) + else + conv2d_backprop_filter_graph(input_, filter_sizes_, out_backprop_; name=name, strides=strides, use_cudnn_on_gpu=use_cudnn_on_gpu, padding=padding, data_format=data_format, dilations=dilations) + end end - if data_format !== nothing - desc["data_format"] = Base.String(data_format) - end - if dilations !== nothing - desc["dilations"] = map(Base.identity, dilations) - end - end - tf.Tensor(tf.Operation(desc)) - end - function conv2d_backprop_filter_eager(input_, filter_sizes_, out_backprop_; name=nothing, strides=nothing, use_cudnn_on_gpu=nothing, padding=nothing, data_format=nothing, dilations=nothing) - desc = tf.EagerOp("Conv2DBackpropFilter") - input_ = convert(tf.EagerTensor, input_) - filter_sizes_ = convert(tf.EagerTensor, filter_sizes_) - out_backprop_ = convert(tf.EagerTensor, out_backprop_) - tf.add_input(desc, input_) - tf.add_input(desc, filter_sizes_) - tf.add_input(desc, out_backprop_) - if strides !== nothing - desc["strides"] = map(Base.identity, strides) - end - if use_cudnn_on_gpu !== nothing - desc["use_cudnn_on_gpu"] = Base.Bool(use_cudnn_on_gpu) - end - if padding !== nothing - desc["padding"] = Base.String(padding) - end - if data_format !== nothing - desc["data_format"] = Base.String(data_format) - end - if dilations !== nothing - desc["dilations"] = map(Base.identity, dilations) - end - desc["T"] = tf.data_type(input_) - desc["T"] = tf.data_type(out_backprop_) - res = tf.execute(desc) - node = tf.TapeNode(conv2d_backprop_filter, [input_, filter_sizes_, out_backprop_], name=nothing, strides=nothing, use_cudnn_on_gpu=nothing, padding=nothing, data_format=nothing, dilations=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function conv2d_backprop_filter(input_, filter_sizes_, out_backprop_; name=nothing, strides=nothing, use_cudnn_on_gpu=nothing, padding=nothing, data_format=nothing, dilations=nothing) - if tf.in_eager_mode() - conv2d_backprop_filter_eager(input_, filter_sizes_, out_backprop_; name=name, strides=strides, use_cudnn_on_gpu=use_cudnn_on_gpu, padding=padding, data_format=data_format, dilations=dilations) - else - conv2d_backprop_filter_graph(input_, filter_sizes_, out_backprop_; name=name, strides=strides, use_cudnn_on_gpu=use_cudnn_on_gpu, padding=padding, data_format=data_format, dilations=dilations) - end - end end @@ -50440,109 +92173,205 @@ end """ begin - function experimental_group_by_reducer_dataset_graph(input_dataset_, key_func_other_arguments_, init_func_other_arguments_, reduce_func_other_arguments_, finalize_func_other_arguments_; name=nothing, key_func=nothing, init_func=nothing, reduce_func=nothing, finalize_func=nothing, Tkey_func_other_arguments=nothing, Tinit_func_other_arguments=nothing, Treduce_func_other_arguments=nothing, Tfinalize_func_other_arguments=nothing, output_types=nothing, output_shapes=nothing) - local desc - tf.with_op_name(name, "ExperimentalGroupByReducerDataset") do - desc = tf.NodeDescription("ExperimentalGroupByReducerDataset") - input_dataset_ = convert(Tensor{Any}, input_dataset_) - key_func_other_arguments_ = [convert(Tensor{Any}, x) for x = key_func_other_arguments_] - init_func_other_arguments_ = [convert(Tensor{Any}, x) for x = init_func_other_arguments_] - reduce_func_other_arguments_ = [convert(Tensor{Any}, x) for x = reduce_func_other_arguments_] - finalize_func_other_arguments_ = [convert(Tensor{Any}, x) for x = finalize_func_other_arguments_] - tf.add_input(desc, input_dataset_) - tf.add_input(desc, key_func_other_arguments_) - tf.add_input(desc, init_func_other_arguments_) - tf.add_input(desc, reduce_func_other_arguments_) - tf.add_input(desc, finalize_func_other_arguments_) - if key_func !== nothing - desc["key_func"] = Base.identity(key_func) + begin + function experimental_group_by_reducer_dataset_graph(input_dataset_, key_func_other_arguments_, init_func_other_arguments_, reduce_func_other_arguments_, finalize_func_other_arguments_; name=nothing, key_func=nothing, init_func=nothing, reduce_func=nothing, finalize_func=nothing, Tkey_func_other_arguments=nothing, Tinit_func_other_arguments=nothing, Treduce_func_other_arguments=nothing, Tfinalize_func_other_arguments=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "ExperimentalGroupByReducerDataset") do + desc = tf.NodeDescription("ExperimentalGroupByReducerDataset") + begin + begin + input_dataset_ = convert(Tensor{Any}, input_dataset_) + begin + end + end + begin + key_func_other_arguments_ = [convert(Tensor{Any}, x) for x = key_func_other_arguments_] + begin + end + end + begin + init_func_other_arguments_ = [convert(Tensor{Any}, x) for x = init_func_other_arguments_] + begin + end + end + begin + reduce_func_other_arguments_ = [convert(Tensor{Any}, x) for x = reduce_func_other_arguments_] + begin + end + end + begin + finalize_func_other_arguments_ = [convert(Tensor{Any}, x) for x = finalize_func_other_arguments_] + begin + end + end + end + begin + begin + tf.add_input(desc, input_dataset_) + end + begin + tf.add_input(desc, key_func_other_arguments_) + end + begin + tf.add_input(desc, init_func_other_arguments_) + end + begin + tf.add_input(desc, reduce_func_other_arguments_) + end + begin + tf.add_input(desc, finalize_func_other_arguments_) + end + end + begin + begin + if key_func !== nothing + desc["key_func"] = Base.identity(key_func) + end + end + begin + if init_func !== nothing + desc["init_func"] = Base.identity(init_func) + end + end + begin + if reduce_func !== nothing + desc["reduce_func"] = Base.identity(reduce_func) + end + end + begin + if finalize_func !== nothing + desc["finalize_func"] = Base.identity(finalize_func) + end + end + begin + if Tkey_func_other_arguments !== nothing + desc["Tkey_func_other_arguments"] = map(Base.identity, Tkey_func_other_arguments) + end + end + begin + if Tinit_func_other_arguments !== nothing + desc["Tinit_func_other_arguments"] = map(Base.identity, Tinit_func_other_arguments) + end + end + begin + if Treduce_func_other_arguments !== nothing + desc["Treduce_func_other_arguments"] = map(Base.identity, Treduce_func_other_arguments) + end + end + begin + if Tfinalize_func_other_arguments !== nothing + desc["Tfinalize_func_other_arguments"] = map(Base.identity, Tfinalize_func_other_arguments) + end + end + begin + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + end + begin + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function experimental_group_by_reducer_dataset_eager(input_dataset_, key_func_other_arguments_, init_func_other_arguments_, reduce_func_other_arguments_, finalize_func_other_arguments_; name=nothing, key_func=nothing, init_func=nothing, reduce_func=nothing, finalize_func=nothing, Tkey_func_other_arguments=nothing, Tinit_func_other_arguments=nothing, Treduce_func_other_arguments=nothing, Tfinalize_func_other_arguments=nothing, output_types=nothing, output_shapes=nothing) + desc = tf.EagerOp("ExperimentalGroupByReducerDataset") + input_dataset_ = convert(tf.EagerTensor, input_dataset_) + key_func_other_arguments_ = convert(tf.EagerTensor, key_func_other_arguments_) + init_func_other_arguments_ = convert(tf.EagerTensor, init_func_other_arguments_) + reduce_func_other_arguments_ = convert(tf.EagerTensor, reduce_func_other_arguments_) + finalize_func_other_arguments_ = convert(tf.EagerTensor, finalize_func_other_arguments_) + begin + begin + tf.add_input(desc, input_dataset_) + end + begin + tf.add_input(desc, key_func_other_arguments_) + end + begin + tf.add_input(desc, init_func_other_arguments_) + end + begin + tf.add_input(desc, reduce_func_other_arguments_) + end + begin + tf.add_input(desc, finalize_func_other_arguments_) + end + end + begin + begin + if key_func !== nothing + desc["key_func"] = Base.identity(key_func) + end + end + begin + if init_func !== nothing + desc["init_func"] = Base.identity(init_func) + end + end + begin + if reduce_func !== nothing + desc["reduce_func"] = Base.identity(reduce_func) + end + end + begin + if finalize_func !== nothing + desc["finalize_func"] = Base.identity(finalize_func) + end + end + begin + if Tkey_func_other_arguments !== nothing + desc["Tkey_func_other_arguments"] = map(Base.identity, Tkey_func_other_arguments) + end + end + begin + if Tinit_func_other_arguments !== nothing + desc["Tinit_func_other_arguments"] = map(Base.identity, Tinit_func_other_arguments) + end + end + begin + if Treduce_func_other_arguments !== nothing + desc["Treduce_func_other_arguments"] = map(Base.identity, Treduce_func_other_arguments) + end + end + begin + if Tfinalize_func_other_arguments !== nothing + desc["Tfinalize_func_other_arguments"] = map(Base.identity, Tfinalize_func_other_arguments) + end + end + begin + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + end + begin + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(experimental_group_by_reducer_dataset, [input_dataset_, key_func_other_arguments_, init_func_other_arguments_, reduce_func_other_arguments_, finalize_func_other_arguments_], name=nothing, key_func=nothing, init_func=nothing, reduce_func=nothing, finalize_func=nothing, Tkey_func_other_arguments=nothing, Tinit_func_other_arguments=nothing, Treduce_func_other_arguments=nothing, Tfinalize_func_other_arguments=nothing, output_types=nothing, output_shapes=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_group_by_reducer_dataset(input_dataset_, key_func_other_arguments_, init_func_other_arguments_, reduce_func_other_arguments_, finalize_func_other_arguments_; name=nothing, key_func=nothing, init_func=nothing, reduce_func=nothing, finalize_func=nothing, Tkey_func_other_arguments=nothing, Tinit_func_other_arguments=nothing, Treduce_func_other_arguments=nothing, Tfinalize_func_other_arguments=nothing, output_types=nothing, output_shapes=nothing) + if tf.in_eager_mode() + experimental_group_by_reducer_dataset_eager(input_dataset_, key_func_other_arguments_, init_func_other_arguments_, reduce_func_other_arguments_, finalize_func_other_arguments_; name=name, key_func=key_func, init_func=init_func, reduce_func=reduce_func, finalize_func=finalize_func, Tkey_func_other_arguments=Tkey_func_other_arguments, Tinit_func_other_arguments=Tinit_func_other_arguments, Treduce_func_other_arguments=Treduce_func_other_arguments, Tfinalize_func_other_arguments=Tfinalize_func_other_arguments, output_types=output_types, output_shapes=output_shapes) + else + experimental_group_by_reducer_dataset_graph(input_dataset_, key_func_other_arguments_, init_func_other_arguments_, reduce_func_other_arguments_, finalize_func_other_arguments_; name=name, key_func=key_func, init_func=init_func, reduce_func=reduce_func, finalize_func=finalize_func, Tkey_func_other_arguments=Tkey_func_other_arguments, Tinit_func_other_arguments=Tinit_func_other_arguments, Treduce_func_other_arguments=Treduce_func_other_arguments, Tfinalize_func_other_arguments=Tfinalize_func_other_arguments, output_types=output_types, output_shapes=output_shapes) + end end - if init_func !== nothing - desc["init_func"] = Base.identity(init_func) - end - if reduce_func !== nothing - desc["reduce_func"] = Base.identity(reduce_func) - end - if finalize_func !== nothing - desc["finalize_func"] = Base.identity(finalize_func) - end - if Tkey_func_other_arguments !== nothing - desc["Tkey_func_other_arguments"] = map(Base.identity, Tkey_func_other_arguments) - end - if Tinit_func_other_arguments !== nothing - desc["Tinit_func_other_arguments"] = map(Base.identity, Tinit_func_other_arguments) - end - if Treduce_func_other_arguments !== nothing - desc["Treduce_func_other_arguments"] = map(Base.identity, Treduce_func_other_arguments) - end - if Tfinalize_func_other_arguments !== nothing - desc["Tfinalize_func_other_arguments"] = map(Base.identity, Tfinalize_func_other_arguments) - end - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - end - tf.Tensor(tf.Operation(desc)) - end - function experimental_group_by_reducer_dataset_eager(input_dataset_, key_func_other_arguments_, init_func_other_arguments_, reduce_func_other_arguments_, finalize_func_other_arguments_; name=nothing, key_func=nothing, init_func=nothing, reduce_func=nothing, finalize_func=nothing, Tkey_func_other_arguments=nothing, Tinit_func_other_arguments=nothing, Treduce_func_other_arguments=nothing, Tfinalize_func_other_arguments=nothing, output_types=nothing, output_shapes=nothing) - desc = tf.EagerOp("ExperimentalGroupByReducerDataset") - input_dataset_ = convert(tf.EagerTensor, input_dataset_) - key_func_other_arguments_ = convert(tf.EagerTensor, key_func_other_arguments_) - init_func_other_arguments_ = convert(tf.EagerTensor, init_func_other_arguments_) - reduce_func_other_arguments_ = convert(tf.EagerTensor, reduce_func_other_arguments_) - finalize_func_other_arguments_ = convert(tf.EagerTensor, finalize_func_other_arguments_) - tf.add_input(desc, input_dataset_) - tf.add_input(desc, key_func_other_arguments_) - tf.add_input(desc, init_func_other_arguments_) - tf.add_input(desc, reduce_func_other_arguments_) - tf.add_input(desc, finalize_func_other_arguments_) - if key_func !== nothing - desc["key_func"] = Base.identity(key_func) - end - if init_func !== nothing - desc["init_func"] = Base.identity(init_func) - end - if reduce_func !== nothing - desc["reduce_func"] = Base.identity(reduce_func) - end - if finalize_func !== nothing - desc["finalize_func"] = Base.identity(finalize_func) - end - if Tkey_func_other_arguments !== nothing - desc["Tkey_func_other_arguments"] = map(Base.identity, Tkey_func_other_arguments) - end - if Tinit_func_other_arguments !== nothing - desc["Tinit_func_other_arguments"] = map(Base.identity, Tinit_func_other_arguments) - end - if Treduce_func_other_arguments !== nothing - desc["Treduce_func_other_arguments"] = map(Base.identity, Treduce_func_other_arguments) - end - if Tfinalize_func_other_arguments !== nothing - desc["Tfinalize_func_other_arguments"] = map(Base.identity, Tfinalize_func_other_arguments) - end - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - res = tf.execute(desc) - node = tf.TapeNode(experimental_group_by_reducer_dataset, [input_dataset_, key_func_other_arguments_, init_func_other_arguments_, reduce_func_other_arguments_, finalize_func_other_arguments_], name=nothing, key_func=nothing, init_func=nothing, reduce_func=nothing, finalize_func=nothing, Tkey_func_other_arguments=nothing, Tinit_func_other_arguments=nothing, Treduce_func_other_arguments=nothing, Tfinalize_func_other_arguments=nothing, output_types=nothing, output_shapes=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_group_by_reducer_dataset(input_dataset_, key_func_other_arguments_, init_func_other_arguments_, reduce_func_other_arguments_, finalize_func_other_arguments_; name=nothing, key_func=nothing, init_func=nothing, reduce_func=nothing, finalize_func=nothing, Tkey_func_other_arguments=nothing, Tinit_func_other_arguments=nothing, Treduce_func_other_arguments=nothing, Tfinalize_func_other_arguments=nothing, output_types=nothing, output_shapes=nothing) - if tf.in_eager_mode() - experimental_group_by_reducer_dataset_eager(input_dataset_, key_func_other_arguments_, init_func_other_arguments_, reduce_func_other_arguments_, finalize_func_other_arguments_; name=name, key_func=key_func, init_func=init_func, reduce_func=reduce_func, finalize_func=finalize_func, Tkey_func_other_arguments=Tkey_func_other_arguments, Tinit_func_other_arguments=Tinit_func_other_arguments, Treduce_func_other_arguments=Treduce_func_other_arguments, Tfinalize_func_other_arguments=Tfinalize_func_other_arguments, output_types=output_types, output_shapes=output_shapes) - else - experimental_group_by_reducer_dataset_graph(input_dataset_, key_func_other_arguments_, init_func_other_arguments_, reduce_func_other_arguments_, finalize_func_other_arguments_; name=name, key_func=key_func, init_func=init_func, reduce_func=reduce_func, finalize_func=finalize_func, Tkey_func_other_arguments=Tkey_func_other_arguments, Tinit_func_other_arguments=Tinit_func_other_arguments, Treduce_func_other_arguments=Treduce_func_other_arguments, Tfinalize_func_other_arguments=Tfinalize_func_other_arguments, output_types=output_types, output_shapes=output_shapes) - end - end end @@ -50552,69 +92381,133 @@ end """ begin - function max_pool_grad_graph(orig_input_, orig_output_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) - local desc - tf.with_op_name(name, "MaxPoolGrad") do - desc = tf.NodeDescription("MaxPoolGrad") - orig_input_ = convert(Tensor{Float32}, orig_input_) - orig_output_ = convert(Tensor{Float32}, orig_output_) - grad_ = convert(Tensor{Float32}, grad_) - (orig_input_, orig_output_, grad_) = tf.tf_promote(orig_input_, orig_output_, grad_) - tf.add_input(desc, orig_input_) - tf.add_input(desc, orig_output_) - tf.add_input(desc, grad_) - if ksize !== nothing - desc["ksize"] = map(Base.identity, ksize) + begin + function max_pool_grad_graph(orig_input_, orig_output_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + local desc + tf.with_op_name(name, "MaxPoolGrad") do + desc = tf.NodeDescription("MaxPoolGrad") + begin + begin + orig_input_ = convert(Tensor{Float32}, orig_input_) + begin + end + end + begin + orig_output_ = convert(Tensor{Float32}, orig_output_) + begin + end + end + begin + grad_ = convert(Tensor{Float32}, grad_) + begin + end + end + begin + (orig_input_, orig_output_, grad_) = tf.tf_promote(orig_input_, orig_output_, grad_) + end + end + begin + begin + tf.add_input(desc, orig_input_) + end + begin + tf.add_input(desc, orig_output_) + end + begin + tf.add_input(desc, grad_) + end + end + begin + begin + if ksize !== nothing + desc["ksize"] = map(Base.identity, ksize) + end + end + begin + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + end + begin + if padding !== nothing + desc["padding"] = Base.String(padding) + end + end + begin + if data_format !== nothing + desc["data_format"] = Base.String(data_format) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function max_pool_grad_eager(orig_input_, orig_output_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + desc = tf.EagerOp("MaxPoolGrad") + orig_input_ = convert(tf.EagerTensor, orig_input_) + orig_output_ = convert(tf.EagerTensor, orig_output_) + grad_ = convert(tf.EagerTensor, grad_) + begin + begin + tf.add_input(desc, orig_input_) + end + begin + tf.add_input(desc, orig_output_) + end + begin + tf.add_input(desc, grad_) + end + end + begin + begin + if ksize !== nothing + desc["ksize"] = map(Base.identity, ksize) + end + end + begin + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + end + begin + if padding !== nothing + desc["padding"] = Base.String(padding) + end + end + begin + if data_format !== nothing + desc["data_format"] = Base.String(data_format) + end + end + end + begin + desc["T"] = tf.data_type(orig_input_) + end + begin + desc["T"] = tf.data_type(orig_output_) + end + begin + desc["T"] = tf.data_type(grad_) + end + res = tf.execute(desc) + node = tf.TapeNode(max_pool_grad, [orig_input_, orig_output_, grad_], name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function max_pool_grad(orig_input_, orig_output_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + if tf.in_eager_mode() + max_pool_grad_eager(orig_input_, orig_output_, grad_; name=name, ksize=ksize, strides=strides, padding=padding, data_format=data_format) + else + max_pool_grad_graph(orig_input_, orig_output_, grad_; name=name, ksize=ksize, strides=strides, padding=padding, data_format=data_format) + end end - if strides !== nothing - desc["strides"] = map(Base.identity, strides) - end - if padding !== nothing - desc["padding"] = Base.String(padding) - end - if data_format !== nothing - desc["data_format"] = Base.String(data_format) - end - end - tf.Tensor(tf.Operation(desc)) - end - function max_pool_grad_eager(orig_input_, orig_output_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) - desc = tf.EagerOp("MaxPoolGrad") - orig_input_ = convert(tf.EagerTensor, orig_input_) - orig_output_ = convert(tf.EagerTensor, orig_output_) - grad_ = convert(tf.EagerTensor, grad_) - tf.add_input(desc, orig_input_) - tf.add_input(desc, orig_output_) - tf.add_input(desc, grad_) - if ksize !== nothing - desc["ksize"] = map(Base.identity, ksize) - end - if strides !== nothing - desc["strides"] = map(Base.identity, strides) - end - if padding !== nothing - desc["padding"] = Base.String(padding) - end - if data_format !== nothing - desc["data_format"] = Base.String(data_format) - end - desc["T"] = tf.data_type(orig_input_) - desc["T"] = tf.data_type(orig_output_) - desc["T"] = tf.data_type(grad_) - res = tf.execute(desc) - node = tf.TapeNode(max_pool_grad, [orig_input_, orig_output_, grad_], name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function max_pool_grad(orig_input_, orig_output_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) - if tf.in_eager_mode() - max_pool_grad_eager(orig_input_, orig_output_, grad_; name=name, ksize=ksize, strides=strides, padding=padding, data_format=data_format) - else - max_pool_grad_graph(orig_input_, orig_output_, grad_; name=name, ksize=ksize, strides=strides, padding=padding, data_format=data_format) - end - end end @@ -50624,33 +92517,57 @@ end An op that connects each chip on the host to a centralized UberDriver to allow """ begin - function _initialize_host_for_distributed_tpu_graph(input_; name=nothing) - local desc - tf.with_op_name(name, "_InitializeHostForDistributedTPU") do - desc = tf.NodeDescription("_InitializeHostForDistributedTPU") - input_ = convert(Tensor{String}, input_) - tf.add_input(desc, input_) + begin + function _initialize_host_for_distributed_tpu_graph(input_; name=nothing) + local desc + tf.with_op_name(name, "_InitializeHostForDistributedTPU") do + desc = tf.NodeDescription("_InitializeHostForDistributedTPU") + begin + begin + input_ = convert(Tensor{String}, input_) + begin + end + end + end + begin + begin + tf.add_input(desc, input_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function _initialize_host_for_distributed_tpu_eager(input_; name=nothing) - desc = tf.EagerOp("_InitializeHostForDistributedTPU") - input_ = convert(tf.EagerTensor, input_) - tf.add_input(desc, input_) - res = tf.execute(desc) - node = tf.TapeNode(_initialize_host_for_distributed_tpu, [input_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function _initialize_host_for_distributed_tpu_eager(input_; name=nothing) + desc = tf.EagerOp("_InitializeHostForDistributedTPU") + input_ = convert(tf.EagerTensor, input_) + begin + begin + tf.add_input(desc, input_) + end + end + begin + end + res = tf.execute(desc) + node = tf.TapeNode(_initialize_host_for_distributed_tpu, [input_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _initialize_host_for_distributed_tpu(input_; name=nothing) - if tf.in_eager_mode() - _initialize_host_for_distributed_tpu_eager(input_; name=name) - else - _initialize_host_for_distributed_tpu_graph(input_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _initialize_host_for_distributed_tpu(input_; name=nothing) + if tf.in_eager_mode() + _initialize_host_for_distributed_tpu_eager(input_; name=name) + else + _initialize_host_for_distributed_tpu_graph(input_; name=name) + end end - end + end end @@ -50660,63 +92577,107 @@ end """ begin - function stage_peek_graph(index_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) - local desc - tf.with_op_name(name, "StagePeek") do - desc = tf.NodeDescription("StagePeek") - index_ = convert(Tensor{Int32}, index_) - tf.add_input(desc, index_) - if capacity !== nothing - desc["capacity"] = Base.Int(capacity) - end - if memory_limit !== nothing - desc["memory_limit"] = Base.Int(memory_limit) - end - if dtypes !== nothing - desc["dtypes"] = map(Base.identity, dtypes) + begin + function stage_peek_graph(index_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "StagePeek") do + desc = tf.NodeDescription("StagePeek") + begin + begin + index_ = convert(Tensor{Int32}, index_) + begin + end + end + end + begin + begin + tf.add_input(desc, index_) + end + end + begin + begin + if capacity !== nothing + desc["capacity"] = Base.Int(capacity) + end + end + begin + if memory_limit !== nothing + desc["memory_limit"] = Base.Int(memory_limit) + end + end + begin + if dtypes !== nothing + desc["dtypes"] = map(Base.identity, dtypes) + end + end + begin + if container !== nothing + desc["container"] = Base.String(container) + end + end + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function stage_peek_eager(index_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + desc = tf.EagerOp("StagePeek") + index_ = convert(tf.EagerTensor, index_) + begin + begin + tf.add_input(desc, index_) + end + end + begin + begin + if capacity !== nothing + desc["capacity"] = Base.Int(capacity) + end + end + begin + if memory_limit !== nothing + desc["memory_limit"] = Base.Int(memory_limit) + end + end + begin + if dtypes !== nothing + desc["dtypes"] = map(Base.identity, dtypes) + end + end + begin + if container !== nothing + desc["container"] = Base.String(container) + end + end + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(stage_peek, [index_], name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function stage_peek(index_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + if tf.in_eager_mode() + stage_peek_eager(index_; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) + else + stage_peek_graph(index_; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) + end end - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - end - tf.Tensor(tf.Operation(desc)) - end - function stage_peek_eager(index_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) - desc = tf.EagerOp("StagePeek") - index_ = convert(tf.EagerTensor, index_) - tf.add_input(desc, index_) - if capacity !== nothing - desc["capacity"] = Base.Int(capacity) - end - if memory_limit !== nothing - desc["memory_limit"] = Base.Int(memory_limit) - end - if dtypes !== nothing - desc["dtypes"] = map(Base.identity, dtypes) - end - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - res = tf.execute(desc) - node = tf.TapeNode(stage_peek, [index_], name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function stage_peek(index_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) - if tf.in_eager_mode() - stage_peek_eager(index_; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) - else - stage_peek_graph(index_; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) - end - end end @@ -50726,46 +92687,96 @@ end """ begin - function pad_v2_graph(input_, paddings_, constant_values_; name=nothing) - local desc - tf.with_op_name(name, "PadV2") do - desc = tf.NodeDescription("PadV2") - input_ = convert(Tensor{Any}, input_) - paddings_ = convert(Tensor{Int32}, paddings_) - constant_values_ = convert(Tensor{Any}, constant_values_) - (input_, constant_values_) = tf.tf_promote(input_, constant_values_) - (paddings_,) = tf.tf_promote(paddings_) - tf.add_input(desc, input_) - tf.add_input(desc, paddings_) - tf.add_input(desc, constant_values_) - end - tf.Tensor(tf.Operation(desc)) - end - function pad_v2_eager(input_, paddings_, constant_values_; name=nothing) - desc = tf.EagerOp("PadV2") - input_ = convert(tf.EagerTensor, input_) - paddings_ = convert(tf.EagerTensor, paddings_) - constant_values_ = convert(tf.EagerTensor, constant_values_) - tf.add_input(desc, input_) - tf.add_input(desc, paddings_) - tf.add_input(desc, constant_values_) - desc["T"] = tf.data_type(input_) - desc["Tpaddings"] = tf.data_type(paddings_) - desc["T"] = tf.data_type(constant_values_) - res = tf.execute(desc) - node = tf.TapeNode(pad_v2, [input_, paddings_, constant_values_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function pad_v2(input_, paddings_, constant_values_; name=nothing) - if tf.in_eager_mode() - pad_v2_eager(input_, paddings_, constant_values_; name=name) - else - pad_v2_graph(input_, paddings_, constant_values_; name=name) + begin + function pad_v2_graph(input_, paddings_, constant_values_; name=nothing) + local desc + tf.with_op_name(name, "PadV2") do + desc = tf.NodeDescription("PadV2") + begin + begin + input_ = convert(Tensor{Any}, input_) + begin + end + end + begin + paddings_ = convert(Tensor{Int32}, paddings_) + begin + end + end + begin + constant_values_ = convert(Tensor{Any}, constant_values_) + begin + end + end + begin + (input_, constant_values_) = tf.tf_promote(input_, constant_values_) + end + begin + (paddings_,) = tf.tf_promote(paddings_) + end + end + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, paddings_) + end + begin + tf.add_input(desc, constant_values_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function pad_v2_eager(input_, paddings_, constant_values_; name=nothing) + desc = tf.EagerOp("PadV2") + input_ = convert(tf.EagerTensor, input_) + paddings_ = convert(tf.EagerTensor, paddings_) + constant_values_ = convert(tf.EagerTensor, constant_values_) + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, paddings_) + end + begin + tf.add_input(desc, constant_values_) + end + end + begin + end + begin + desc["T"] = tf.data_type(input_) + end + begin + desc["Tpaddings"] = tf.data_type(paddings_) + end + begin + desc["T"] = tf.data_type(constant_values_) + end + res = tf.execute(desc) + node = tf.TapeNode(pad_v2, [input_, paddings_, constant_values_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function pad_v2(input_, paddings_, constant_values_; name=nothing) + if tf.in_eager_mode() + pad_v2_eager(input_, paddings_, constant_values_; name=name) + else + pad_v2_graph(input_, paddings_, constant_values_; name=name) + end end - end + end end @@ -50775,45 +92786,77 @@ end """ begin - function optional_get_value_graph(optional_; name=nothing, output_types=nothing, output_shapes=nothing) - local desc - tf.with_op_name(name, "OptionalGetValue") do - desc = tf.NodeDescription("OptionalGetValue") - optional_ = convert(Tensor{Any}, optional_) - tf.add_input(desc, optional_) - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) + begin + function optional_get_value_graph(optional_; name=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "OptionalGetValue") do + desc = tf.NodeDescription("OptionalGetValue") + begin + begin + optional_ = convert(Tensor{Any}, optional_) + begin + end + end + end + begin + begin + tf.add_input(desc, optional_) + end + end + begin + begin + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + end + begin + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function optional_get_value_eager(optional_; name=nothing, output_types=nothing, output_shapes=nothing) + desc = tf.EagerOp("OptionalGetValue") + optional_ = convert(tf.EagerTensor, optional_) + begin + begin + tf.add_input(desc, optional_) + end + end + begin + begin + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + end + begin + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(optional_get_value, [optional_], name=nothing, output_types=nothing, output_shapes=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function optional_get_value(optional_; name=nothing, output_types=nothing, output_shapes=nothing) + if tf.in_eager_mode() + optional_get_value_eager(optional_; name=name, output_types=output_types, output_shapes=output_shapes) + else + optional_get_value_graph(optional_; name=name, output_types=output_types, output_shapes=output_shapes) + end end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - end - tf.Tensor(tf.Operation(desc)) end - function optional_get_value_eager(optional_; name=nothing, output_types=nothing, output_shapes=nothing) - desc = tf.EagerOp("OptionalGetValue") - optional_ = convert(tf.EagerTensor, optional_) - tf.add_input(desc, optional_) - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - res = tf.execute(desc) - node = tf.TapeNode(optional_get_value, [optional_], name=nothing, output_types=nothing, output_shapes=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function optional_get_value(optional_; name=nothing, output_types=nothing, output_shapes=nothing) - if tf.in_eager_mode() - optional_get_value_eager(optional_; name=name, output_types=output_types, output_shapes=output_shapes) - else - optional_get_value_graph(optional_; name=name, output_types=output_types, output_shapes=output_shapes) - end - end end @@ -50823,39 +92866,67 @@ end """ begin - function print_v2_graph(input_; name=nothing, output_stream=nothing) - local desc - tf.with_op_name(name, "PrintV2") do - desc = tf.NodeDescription("PrintV2") - input_ = convert(Tensor{String}, input_) - tf.add_input(desc, input_) - if output_stream !== nothing - desc["output_stream"] = Base.String(output_stream) + begin + function print_v2_graph(input_; name=nothing, output_stream=nothing) + local desc + tf.with_op_name(name, "PrintV2") do + desc = tf.NodeDescription("PrintV2") + begin + begin + input_ = convert(Tensor{String}, input_) + begin + end + end + end + begin + begin + tf.add_input(desc, input_) + end + end + begin + begin + if output_stream !== nothing + desc["output_stream"] = Base.String(output_stream) + end + end + end end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function print_v2_eager(input_; name=nothing, output_stream=nothing) - desc = tf.EagerOp("PrintV2") - input_ = convert(tf.EagerTensor, input_) - tf.add_input(desc, input_) - if output_stream !== nothing - desc["output_stream"] = Base.String(output_stream) - end - res = tf.execute(desc) - node = tf.TapeNode(print_v2, [input_], name=nothing, output_stream=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function print_v2_eager(input_; name=nothing, output_stream=nothing) + desc = tf.EagerOp("PrintV2") + input_ = convert(tf.EagerTensor, input_) + begin + begin + tf.add_input(desc, input_) + end + end + begin + begin + if output_stream !== nothing + desc["output_stream"] = Base.String(output_stream) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(print_v2, [input_], name=nothing, output_stream=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function print_v2(input_; name=nothing, output_stream=nothing) - if tf.in_eager_mode() - print_v2_eager(input_; name=name, output_stream=output_stream) - else - print_v2_graph(input_; name=name, output_stream=output_stream) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function print_v2(input_; name=nothing, output_stream=nothing) + if tf.in_eager_mode() + print_v2_eager(input_; name=name, output_stream=output_stream) + else + print_v2_graph(input_; name=name, output_stream=output_stream) + end end - end + end end @@ -50865,41 +92936,65 @@ end Creates an empty Tensor with shape `shape` and type `dtype`. """ begin - function _parallel_concat_start_graph(; name=nothing, shape=nothing, dtype=nothing) - local desc - tf.with_op_name(name, "_ParallelConcatStart") do - desc = tf.NodeDescription("_ParallelConcatStart") - if shape !== nothing - desc["shape"] = Base.identity(shape) - end - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) + begin + function _parallel_concat_start_graph(; name=nothing, shape=nothing, dtype=nothing) + local desc + tf.with_op_name(name, "_ParallelConcatStart") do + desc = tf.NodeDescription("_ParallelConcatStart") + begin + end + begin + end + begin + begin + if shape !== nothing + desc["shape"] = Base.identity(shape) + end + end + begin + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + end end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function _parallel_concat_start_eager(; name=nothing, shape=nothing, dtype=nothing) - desc = tf.EagerOp("_ParallelConcatStart") - if shape !== nothing - desc["shape"] = Base.identity(shape) - end - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end - res = tf.execute(desc) - node = tf.TapeNode(_parallel_concat_start, [], name=nothing, shape=nothing, dtype=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function _parallel_concat_start_eager(; name=nothing, shape=nothing, dtype=nothing) + desc = tf.EagerOp("_ParallelConcatStart") + begin + end + begin + begin + if shape !== nothing + desc["shape"] = Base.identity(shape) + end + end + begin + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(_parallel_concat_start, [], name=nothing, shape=nothing, dtype=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _parallel_concat_start(; name=nothing, shape=nothing, dtype=nothing) - if tf.in_eager_mode() - _parallel_concat_start_eager(; name=name, shape=shape, dtype=dtype) - else - _parallel_concat_start_graph(; name=name, shape=shape, dtype=dtype) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _parallel_concat_start(; name=nothing, shape=nothing, dtype=nothing) + if tf.in_eager_mode() + _parallel_concat_start_eager(; name=name, shape=shape, dtype=dtype) + else + _parallel_concat_start_graph(; name=name, shape=shape, dtype=dtype) + end end - end + end end @@ -50909,65 +93004,121 @@ end Load embedding parameters for a single table. """ begin - function load_tpu_embedding_ftrl_parameters_graph(parameters_, accumulators_, linears_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - local desc - tf.with_op_name(name, "LoadTPUEmbeddingFTRLParameters") do - desc = tf.NodeDescription("LoadTPUEmbeddingFTRLParameters") - parameters_ = convert(Tensor{Float32}, parameters_) - accumulators_ = convert(Tensor{Float32}, accumulators_) - linears_ = convert(Tensor{Float32}, linears_) - tf.add_input(desc, parameters_) - tf.add_input(desc, accumulators_) - tf.add_input(desc, linears_) - if table_id !== nothing - desc["table_id"] = Base.Int(table_id) + begin + function load_tpu_embedding_ftrl_parameters_graph(parameters_, accumulators_, linears_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + local desc + tf.with_op_name(name, "LoadTPUEmbeddingFTRLParameters") do + desc = tf.NodeDescription("LoadTPUEmbeddingFTRLParameters") + begin + begin + parameters_ = convert(Tensor{Float32}, parameters_) + begin + end + end + begin + accumulators_ = convert(Tensor{Float32}, accumulators_) + begin + end + end + begin + linears_ = convert(Tensor{Float32}, linears_) + begin + end + end + end + begin + begin + tf.add_input(desc, parameters_) + end + begin + tf.add_input(desc, accumulators_) + end + begin + tf.add_input(desc, linears_) + end + end + begin + begin + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + end + begin + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + end + begin + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + end + begin + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function load_tpu_embedding_ftrl_parameters_eager(parameters_, accumulators_, linears_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + desc = tf.EagerOp("LoadTPUEmbeddingFTRLParameters") + parameters_ = convert(tf.EagerTensor, parameters_) + accumulators_ = convert(tf.EagerTensor, accumulators_) + linears_ = convert(tf.EagerTensor, linears_) + begin + begin + tf.add_input(desc, parameters_) + end + begin + tf.add_input(desc, accumulators_) + end + begin + tf.add_input(desc, linears_) + end + end + begin + begin + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + end + begin + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + end + begin + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + end + begin + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(load_tpu_embedding_ftrl_parameters, [parameters_, accumulators_, linears_], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function load_tpu_embedding_ftrl_parameters(parameters_, accumulators_, linears_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + if tf.in_eager_mode() + load_tpu_embedding_ftrl_parameters_eager(parameters_, accumulators_, linears_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + else + load_tpu_embedding_ftrl_parameters_graph(parameters_, accumulators_, linears_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + end end - if table_name !== nothing - desc["table_name"] = Base.String(table_name) - end - if num_shards !== nothing - desc["num_shards"] = Base.Int(num_shards) - end - if shard_id !== nothing - desc["shard_id"] = Base.Int(shard_id) - end - end - tf.Tensor(tf.Operation(desc)) end - function load_tpu_embedding_ftrl_parameters_eager(parameters_, accumulators_, linears_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - desc = tf.EagerOp("LoadTPUEmbeddingFTRLParameters") - parameters_ = convert(tf.EagerTensor, parameters_) - accumulators_ = convert(tf.EagerTensor, accumulators_) - linears_ = convert(tf.EagerTensor, linears_) - tf.add_input(desc, parameters_) - tf.add_input(desc, accumulators_) - tf.add_input(desc, linears_) - if table_id !== nothing - desc["table_id"] = Base.Int(table_id) - end - if table_name !== nothing - desc["table_name"] = Base.String(table_name) - end - if num_shards !== nothing - desc["num_shards"] = Base.Int(num_shards) - end - if shard_id !== nothing - desc["shard_id"] = Base.Int(shard_id) - end - res = tf.execute(desc) - node = tf.TapeNode(load_tpu_embedding_ftrl_parameters, [parameters_, accumulators_, linears_], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function load_tpu_embedding_ftrl_parameters(parameters_, accumulators_, linears_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - if tf.in_eager_mode() - load_tpu_embedding_ftrl_parameters_eager(parameters_, accumulators_, linears_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) - else - load_tpu_embedding_ftrl_parameters_graph(parameters_, accumulators_, linears_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) - end - end end @@ -50977,56 +93128,118 @@ end """ begin - function sparse_slice_graph(indices_, values_, shape_, start_, size_; name=nothing) - local desc - tf.with_op_name(name, "SparseSlice") do - desc = tf.NodeDescription("SparseSlice") - indices_ = convert(Tensor{Int64}, indices_) - values_ = convert(Tensor{Any}, values_) - shape_ = convert(Tensor{Int64}, shape_) - start_ = convert(Tensor{Int64}, start_) - size_ = convert(Tensor{Int64}, size_) - (values_,) = tf.tf_promote(values_) - tf.add_input(desc, indices_) - tf.add_input(desc, values_) - tf.add_input(desc, shape_) - tf.add_input(desc, start_) - tf.add_input(desc, size_) - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:3 - push!(out, tf.Tensor(op, out_idx)) - end - out - end - function sparse_slice_eager(indices_, values_, shape_, start_, size_; name=nothing) - desc = tf.EagerOp("SparseSlice") - indices_ = convert(tf.EagerTensor, indices_) - values_ = convert(tf.EagerTensor, values_) - shape_ = convert(tf.EagerTensor, shape_) - start_ = convert(tf.EagerTensor, start_) - size_ = convert(tf.EagerTensor, size_) - tf.add_input(desc, indices_) - tf.add_input(desc, values_) - tf.add_input(desc, shape_) - tf.add_input(desc, start_) - tf.add_input(desc, size_) - desc["T"] = tf.data_type(values_) - res = tf.execute(desc) - node = tf.TapeNode(sparse_slice, [indices_, values_, shape_, start_, size_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_slice(indices_, values_, shape_, start_, size_; name=nothing) - if tf.in_eager_mode() - sparse_slice_eager(indices_, values_, shape_, start_, size_; name=name) - else - sparse_slice_graph(indices_, values_, shape_, start_, size_; name=name) + begin + function sparse_slice_graph(indices_, values_, shape_, start_, size_; name=nothing) + local desc + tf.with_op_name(name, "SparseSlice") do + desc = tf.NodeDescription("SparseSlice") + begin + begin + indices_ = convert(Tensor{Int64}, indices_) + begin + end + end + begin + values_ = convert(Tensor{Any}, values_) + begin + end + end + begin + shape_ = convert(Tensor{Int64}, shape_) + begin + end + end + begin + start_ = convert(Tensor{Int64}, start_) + begin + end + end + begin + size_ = convert(Tensor{Int64}, size_) + begin + end + end + begin + (values_,) = tf.tf_promote(values_) + end + end + begin + begin + tf.add_input(desc, indices_) + end + begin + tf.add_input(desc, values_) + end + begin + tf.add_input(desc, shape_) + end + begin + tf.add_input(desc, start_) + end + begin + tf.add_input(desc, size_) + end + end + begin + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + end + end + begin + function sparse_slice_eager(indices_, values_, shape_, start_, size_; name=nothing) + desc = tf.EagerOp("SparseSlice") + indices_ = convert(tf.EagerTensor, indices_) + values_ = convert(tf.EagerTensor, values_) + shape_ = convert(tf.EagerTensor, shape_) + start_ = convert(tf.EagerTensor, start_) + size_ = convert(tf.EagerTensor, size_) + begin + begin + tf.add_input(desc, indices_) + end + begin + tf.add_input(desc, values_) + end + begin + tf.add_input(desc, shape_) + end + begin + tf.add_input(desc, start_) + end + begin + tf.add_input(desc, size_) + end + end + begin + end + begin + desc["T"] = tf.data_type(values_) + end + res = tf.execute(desc) + node = tf.TapeNode(sparse_slice, [indices_, values_, shape_, start_, size_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_slice(indices_, values_, shape_, start_, size_; name=nothing) + if tf.in_eager_mode() + sparse_slice_eager(indices_, values_, shape_, start_, size_; name=name) + else + sparse_slice_graph(indices_, values_, shape_, start_, size_; name=name) + end end - end + end end @@ -51036,52 +93249,98 @@ end """ begin - function boosted_trees_make_quantile_summaries_graph(float_values_, example_weights_, epsilon_; name=nothing, num_features=nothing) - local desc - tf.with_op_name(name, "BoostedTreesMakeQuantileSummaries") do - desc = tf.NodeDescription("BoostedTreesMakeQuantileSummaries") - float_values_ = [convert(Tensor{Float32}, x) for x = float_values_] - example_weights_ = convert(Tensor{Float32}, example_weights_) - epsilon_ = convert(Tensor{Float32}, epsilon_) - tf.add_input(desc, float_values_) - tf.add_input(desc, example_weights_) - tf.add_input(desc, epsilon_) - if num_features !== nothing - desc["num_features"] = Base.Int(num_features) + begin + function boosted_trees_make_quantile_summaries_graph(float_values_, example_weights_, epsilon_; name=nothing, num_features=nothing) + local desc + tf.with_op_name(name, "BoostedTreesMakeQuantileSummaries") do + desc = tf.NodeDescription("BoostedTreesMakeQuantileSummaries") + begin + begin + float_values_ = [convert(Tensor{Float32}, x) for x = float_values_] + begin + end + end + begin + example_weights_ = convert(Tensor{Float32}, example_weights_) + begin + end + end + begin + epsilon_ = convert(Tensor{Float32}, epsilon_) + begin + end + end + end + begin + begin + tf.add_input(desc, float_values_) + end + begin + tf.add_input(desc, example_weights_) + end + begin + tf.add_input(desc, epsilon_) + end + end + begin + begin + if num_features !== nothing + desc["num_features"] = Base.Int(num_features) + end + end + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:num_features + push!(out, tf.Tensor(op, out_idx)) + end + out + end + end + end + begin + function boosted_trees_make_quantile_summaries_eager(float_values_, example_weights_, epsilon_; name=nothing, num_features=nothing) + desc = tf.EagerOp("BoostedTreesMakeQuantileSummaries") + float_values_ = convert(tf.EagerTensor, float_values_) + example_weights_ = convert(tf.EagerTensor, example_weights_) + epsilon_ = convert(tf.EagerTensor, epsilon_) + begin + begin + tf.add_input(desc, float_values_) + end + begin + tf.add_input(desc, example_weights_) + end + begin + tf.add_input(desc, epsilon_) + end + end + begin + begin + if num_features !== nothing + desc["num_features"] = Base.Int(num_features) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(boosted_trees_make_quantile_summaries, [float_values_, example_weights_, epsilon_], name=nothing, num_features=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function boosted_trees_make_quantile_summaries(float_values_, example_weights_, epsilon_; name=nothing, num_features=nothing) + if tf.in_eager_mode() + boosted_trees_make_quantile_summaries_eager(float_values_, example_weights_, epsilon_; name=name, num_features=num_features) + else + boosted_trees_make_quantile_summaries_graph(float_values_, example_weights_, epsilon_; name=name, num_features=num_features) + end end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:num_features - push!(out, tf.Tensor(op, out_idx)) - end - out - end - function boosted_trees_make_quantile_summaries_eager(float_values_, example_weights_, epsilon_; name=nothing, num_features=nothing) - desc = tf.EagerOp("BoostedTreesMakeQuantileSummaries") - float_values_ = convert(tf.EagerTensor, float_values_) - example_weights_ = convert(tf.EagerTensor, example_weights_) - epsilon_ = convert(tf.EagerTensor, epsilon_) - tf.add_input(desc, float_values_) - tf.add_input(desc, example_weights_) - tf.add_input(desc, epsilon_) - if num_features !== nothing - desc["num_features"] = Base.Int(num_features) - end - res = tf.execute(desc) - node = tf.TapeNode(boosted_trees_make_quantile_summaries, [float_values_, example_weights_, epsilon_], name=nothing, num_features=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function boosted_trees_make_quantile_summaries(float_values_, example_weights_, epsilon_; name=nothing, num_features=nothing) - if tf.in_eager_mode() - boosted_trees_make_quantile_summaries_eager(float_values_, example_weights_, epsilon_; name=name, num_features=num_features) - else - boosted_trees_make_quantile_summaries_graph(float_values_, example_weights_, epsilon_; name=name, num_features=num_features) - end - end end @@ -51091,46 +93350,88 @@ end """ begin - function matrix_solve_graph(matrix_, rhs_; name=nothing, adjoint=nothing) - local desc - tf.with_op_name(name, "MatrixSolve") do - desc = tf.NodeDescription("MatrixSolve") - matrix_ = convert(Tensor{Any}, matrix_) - rhs_ = convert(Tensor{Any}, rhs_) - (matrix_, rhs_) = tf.tf_promote(matrix_, rhs_) - tf.add_input(desc, matrix_) - tf.add_input(desc, rhs_) - if adjoint !== nothing - desc["adjoint"] = Base.Bool(adjoint) + begin + function matrix_solve_graph(matrix_, rhs_; name=nothing, adjoint=nothing) + local desc + tf.with_op_name(name, "MatrixSolve") do + desc = tf.NodeDescription("MatrixSolve") + begin + begin + matrix_ = convert(Tensor{Any}, matrix_) + begin + end + end + begin + rhs_ = convert(Tensor{Any}, rhs_) + begin + end + end + begin + (matrix_, rhs_) = tf.tf_promote(matrix_, rhs_) + end + end + begin + begin + tf.add_input(desc, matrix_) + end + begin + tf.add_input(desc, rhs_) + end + end + begin + begin + if adjoint !== nothing + desc["adjoint"] = Base.Bool(adjoint) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function matrix_solve_eager(matrix_, rhs_; name=nothing, adjoint=nothing) + desc = tf.EagerOp("MatrixSolve") + matrix_ = convert(tf.EagerTensor, matrix_) + rhs_ = convert(tf.EagerTensor, rhs_) + begin + begin + tf.add_input(desc, matrix_) + end + begin + tf.add_input(desc, rhs_) + end + end + begin + begin + if adjoint !== nothing + desc["adjoint"] = Base.Bool(adjoint) + end + end + end + begin + desc["T"] = tf.data_type(matrix_) + end + begin + desc["T"] = tf.data_type(rhs_) + end + res = tf.execute(desc) + node = tf.TapeNode(matrix_solve, [matrix_, rhs_], name=nothing, adjoint=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function matrix_solve(matrix_, rhs_; name=nothing, adjoint=nothing) + if tf.in_eager_mode() + matrix_solve_eager(matrix_, rhs_; name=name, adjoint=adjoint) + else + matrix_solve_graph(matrix_, rhs_; name=name, adjoint=adjoint) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function matrix_solve_eager(matrix_, rhs_; name=nothing, adjoint=nothing) - desc = tf.EagerOp("MatrixSolve") - matrix_ = convert(tf.EagerTensor, matrix_) - rhs_ = convert(tf.EagerTensor, rhs_) - tf.add_input(desc, matrix_) - tf.add_input(desc, rhs_) - if adjoint !== nothing - desc["adjoint"] = Base.Bool(adjoint) - end - desc["T"] = tf.data_type(matrix_) - desc["T"] = tf.data_type(rhs_) - res = tf.execute(desc) - node = tf.TapeNode(matrix_solve, [matrix_, rhs_], name=nothing, adjoint=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function matrix_solve(matrix_, rhs_; name=nothing, adjoint=nothing) - if tf.in_eager_mode() - matrix_solve_eager(matrix_, rhs_; name=name, adjoint=adjoint) - else - matrix_solve_graph(matrix_, rhs_; name=name, adjoint=adjoint) - end - end end @@ -51140,39 +93441,67 @@ end An op that sets up the centralized structures for a distributed TPU """ begin - function _configure_distributed_tpu_graph(inputs_; name=nothing, N=nothing) - local desc - tf.with_op_name(name, "_ConfigureDistributedTPU") do - desc = tf.NodeDescription("_ConfigureDistributedTPU") - inputs_ = [convert(Tensor{Int32}, x) for x = inputs_] - tf.add_input(desc, inputs_) - if N !== nothing - desc["N"] = Base.Int(N) + begin + function _configure_distributed_tpu_graph(inputs_; name=nothing, N=nothing) + local desc + tf.with_op_name(name, "_ConfigureDistributedTPU") do + desc = tf.NodeDescription("_ConfigureDistributedTPU") + begin + begin + inputs_ = [convert(Tensor{Int32}, x) for x = inputs_] + begin + end + end + end + begin + begin + tf.add_input(desc, inputs_) + end + end + begin + begin + if N !== nothing + desc["N"] = Base.Int(N) + end + end + end end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function _configure_distributed_tpu_eager(inputs_; name=nothing, N=nothing) - desc = tf.EagerOp("_ConfigureDistributedTPU") - inputs_ = convert(tf.EagerTensor, inputs_) - tf.add_input(desc, inputs_) - if N !== nothing - desc["N"] = Base.Int(N) - end - res = tf.execute(desc) - node = tf.TapeNode(_configure_distributed_tpu, [inputs_], name=nothing, N=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function _configure_distributed_tpu_eager(inputs_; name=nothing, N=nothing) + desc = tf.EagerOp("_ConfigureDistributedTPU") + inputs_ = convert(tf.EagerTensor, inputs_) + begin + begin + tf.add_input(desc, inputs_) + end + end + begin + begin + if N !== nothing + desc["N"] = Base.Int(N) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(_configure_distributed_tpu, [inputs_], name=nothing, N=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _configure_distributed_tpu(inputs_; name=nothing, N=nothing) - if tf.in_eager_mode() - _configure_distributed_tpu_eager(inputs_; name=name, N=N) - else - _configure_distributed_tpu_graph(inputs_; name=name, N=N) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _configure_distributed_tpu(inputs_; name=nothing, N=nothing) + if tf.in_eager_mode() + _configure_distributed_tpu_eager(inputs_; name=name, N=N) + else + _configure_distributed_tpu_graph(inputs_; name=name, N=N) + end end - end + end end @@ -51182,37 +93511,69 @@ end """ begin - function adjust_contrastv2_graph(images_, contrast_factor_; name=nothing) - local desc - tf.with_op_name(name, "AdjustContrastv2") do - desc = tf.NodeDescription("AdjustContrastv2") - images_ = convert(Tensor{Float32}, images_) - contrast_factor_ = convert(Tensor{Float32}, contrast_factor_) - tf.add_input(desc, images_) - tf.add_input(desc, contrast_factor_) + begin + function adjust_contrastv2_graph(images_, contrast_factor_; name=nothing) + local desc + tf.with_op_name(name, "AdjustContrastv2") do + desc = tf.NodeDescription("AdjustContrastv2") + begin + begin + images_ = convert(Tensor{Float32}, images_) + begin + end + end + begin + contrast_factor_ = convert(Tensor{Float32}, contrast_factor_) + begin + end + end + end + begin + begin + tf.add_input(desc, images_) + end + begin + tf.add_input(desc, contrast_factor_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function adjust_contrastv2_eager(images_, contrast_factor_; name=nothing) - desc = tf.EagerOp("AdjustContrastv2") - images_ = convert(tf.EagerTensor, images_) - contrast_factor_ = convert(tf.EagerTensor, contrast_factor_) - tf.add_input(desc, images_) - tf.add_input(desc, contrast_factor_) - res = tf.execute(desc) - node = tf.TapeNode(adjust_contrastv2, [images_, contrast_factor_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function adjust_contrastv2_eager(images_, contrast_factor_; name=nothing) + desc = tf.EagerOp("AdjustContrastv2") + images_ = convert(tf.EagerTensor, images_) + contrast_factor_ = convert(tf.EagerTensor, contrast_factor_) + begin + begin + tf.add_input(desc, images_) + end + begin + tf.add_input(desc, contrast_factor_) + end + end + begin + end + res = tf.execute(desc) + node = tf.TapeNode(adjust_contrastv2, [images_, contrast_factor_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function adjust_contrastv2(images_, contrast_factor_; name=nothing) - if tf.in_eager_mode() - adjust_contrastv2_eager(images_, contrast_factor_; name=name) - else - adjust_contrastv2_graph(images_, contrast_factor_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function adjust_contrastv2(images_, contrast_factor_; name=nothing) + if tf.in_eager_mode() + adjust_contrastv2_eager(images_, contrast_factor_; name=name) + else + adjust_contrastv2_graph(images_, contrast_factor_; name=name) + end end - end + end end @@ -51222,53 +93583,109 @@ end Returns the max of x and y (i.e. x > y ? x : y) element-wise. """ begin - function _mkl_maximum_graph(x_, y_, mkl_x_, mkl_y_; name=nothing) - local desc - tf.with_op_name(name, "_MklMaximum") do - desc = tf.NodeDescription("_MklMaximum") - x_ = convert(Tensor{Any}, x_) - y_ = convert(Tensor{Any}, y_) - mkl_x_ = convert(Tensor{UInt8}, mkl_x_) - mkl_y_ = convert(Tensor{UInt8}, mkl_y_) - (x_, y_) = tf.tf_promote(x_, y_) - tf.add_input(desc, x_) - tf.add_input(desc, y_) - tf.add_input(desc, mkl_x_) - tf.add_input(desc, mkl_y_) - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:2 - push!(out, tf.Tensor(op, out_idx)) - end - out - end - function _mkl_maximum_eager(x_, y_, mkl_x_, mkl_y_; name=nothing) - desc = tf.EagerOp("_MklMaximum") - x_ = convert(tf.EagerTensor, x_) - y_ = convert(tf.EagerTensor, y_) - mkl_x_ = convert(tf.EagerTensor, mkl_x_) - mkl_y_ = convert(tf.EagerTensor, mkl_y_) - tf.add_input(desc, x_) - tf.add_input(desc, y_) - tf.add_input(desc, mkl_x_) - tf.add_input(desc, mkl_y_) - desc["T"] = tf.data_type(x_) - desc["T"] = tf.data_type(y_) - res = tf.execute(desc) - node = tf.TapeNode(_mkl_maximum, [x_, y_, mkl_x_, mkl_y_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _mkl_maximum(x_, y_, mkl_x_, mkl_y_; name=nothing) - if tf.in_eager_mode() - _mkl_maximum_eager(x_, y_, mkl_x_, mkl_y_; name=name) - else - _mkl_maximum_graph(x_, y_, mkl_x_, mkl_y_; name=name) + begin + function _mkl_maximum_graph(x_, y_, mkl_x_, mkl_y_; name=nothing) + local desc + tf.with_op_name(name, "_MklMaximum") do + desc = tf.NodeDescription("_MklMaximum") + begin + begin + x_ = convert(Tensor{Any}, x_) + begin + end + end + begin + y_ = convert(Tensor{Any}, y_) + begin + end + end + begin + mkl_x_ = convert(Tensor{UInt8}, mkl_x_) + begin + end + end + begin + mkl_y_ = convert(Tensor{UInt8}, mkl_y_) + begin + end + end + begin + (x_, y_) = tf.tf_promote(x_, y_) + end + end + begin + begin + tf.add_input(desc, x_) + end + begin + tf.add_input(desc, y_) + end + begin + tf.add_input(desc, mkl_x_) + end + begin + tf.add_input(desc, mkl_y_) + end + end + begin + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + end + end + begin + function _mkl_maximum_eager(x_, y_, mkl_x_, mkl_y_; name=nothing) + desc = tf.EagerOp("_MklMaximum") + x_ = convert(tf.EagerTensor, x_) + y_ = convert(tf.EagerTensor, y_) + mkl_x_ = convert(tf.EagerTensor, mkl_x_) + mkl_y_ = convert(tf.EagerTensor, mkl_y_) + begin + begin + tf.add_input(desc, x_) + end + begin + tf.add_input(desc, y_) + end + begin + tf.add_input(desc, mkl_x_) + end + begin + tf.add_input(desc, mkl_y_) + end + end + begin + end + begin + desc["T"] = tf.data_type(x_) + end + begin + desc["T"] = tf.data_type(y_) + end + res = tf.execute(desc) + node = tf.TapeNode(_mkl_maximum, [x_, y_, mkl_x_, mkl_y_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _mkl_maximum(x_, y_, mkl_x_, mkl_y_; name=nothing) + if tf.in_eager_mode() + _mkl_maximum_eager(x_, y_, mkl_x_, mkl_y_; name=name) + else + _mkl_maximum_graph(x_, y_, mkl_x_, mkl_y_; name=name) + end end - end + end end @@ -51278,83 +93695,151 @@ end """ begin - function cudnn_rnn_params_size_graph(num_layers_, num_units_, input_size_; name=nothing, S=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) - local desc - tf.with_op_name(name, "CudnnRNNParamsSize") do - desc = tf.NodeDescription("CudnnRNNParamsSize") - num_layers_ = convert(Tensor{Int32}, num_layers_) - num_units_ = convert(Tensor{Int32}, num_units_) - input_size_ = convert(Tensor{Int32}, input_size_) - tf.add_input(desc, num_layers_) - tf.add_input(desc, num_units_) - tf.add_input(desc, input_size_) - if S !== nothing - desc["S"] = Base.identity(S) - end - if rnn_mode !== nothing - desc["rnn_mode"] = Base.String(rnn_mode) + begin + function cudnn_rnn_params_size_graph(num_layers_, num_units_, input_size_; name=nothing, S=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) + local desc + tf.with_op_name(name, "CudnnRNNParamsSize") do + desc = tf.NodeDescription("CudnnRNNParamsSize") + begin + begin + num_layers_ = convert(Tensor{Int32}, num_layers_) + begin + end + end + begin + num_units_ = convert(Tensor{Int32}, num_units_) + begin + end + end + begin + input_size_ = convert(Tensor{Int32}, input_size_) + begin + end + end + end + begin + begin + tf.add_input(desc, num_layers_) + end + begin + tf.add_input(desc, num_units_) + end + begin + tf.add_input(desc, input_size_) + end + end + begin + begin + if S !== nothing + desc["S"] = Base.identity(S) + end + end + begin + if rnn_mode !== nothing + desc["rnn_mode"] = Base.String(rnn_mode) + end + end + begin + if input_mode !== nothing + desc["input_mode"] = Base.String(input_mode) + end + end + begin + if direction !== nothing + desc["direction"] = Base.String(direction) + end + end + begin + if dropout !== nothing + desc["dropout"] = Base.identity(dropout) + end + end + begin + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + end + begin + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function cudnn_rnn_params_size_eager(num_layers_, num_units_, input_size_; name=nothing, S=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) + desc = tf.EagerOp("CudnnRNNParamsSize") + num_layers_ = convert(tf.EagerTensor, num_layers_) + num_units_ = convert(tf.EagerTensor, num_units_) + input_size_ = convert(tf.EagerTensor, input_size_) + begin + begin + tf.add_input(desc, num_layers_) + end + begin + tf.add_input(desc, num_units_) + end + begin + tf.add_input(desc, input_size_) + end + end + begin + begin + if S !== nothing + desc["S"] = Base.identity(S) + end + end + begin + if rnn_mode !== nothing + desc["rnn_mode"] = Base.String(rnn_mode) + end + end + begin + if input_mode !== nothing + desc["input_mode"] = Base.String(input_mode) + end + end + begin + if direction !== nothing + desc["direction"] = Base.String(direction) + end + end + begin + if dropout !== nothing + desc["dropout"] = Base.identity(dropout) + end + end + begin + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + end + begin + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(cudnn_rnn_params_size, [num_layers_, num_units_, input_size_], name=nothing, S=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function cudnn_rnn_params_size(num_layers_, num_units_, input_size_; name=nothing, S=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) + if tf.in_eager_mode() + cudnn_rnn_params_size_eager(num_layers_, num_units_, input_size_; name=name, S=S, rnn_mode=rnn_mode, input_mode=input_mode, direction=direction, dropout=dropout, seed=seed, seed2=seed2) + else + cudnn_rnn_params_size_graph(num_layers_, num_units_, input_size_; name=name, S=S, rnn_mode=rnn_mode, input_mode=input_mode, direction=direction, dropout=dropout, seed=seed, seed2=seed2) + end end - if input_mode !== nothing - desc["input_mode"] = Base.String(input_mode) - end - if direction !== nothing - desc["direction"] = Base.String(direction) - end - if dropout !== nothing - desc["dropout"] = Base.identity(dropout) - end - if seed !== nothing - desc["seed"] = Base.Int(seed) - end - if seed2 !== nothing - desc["seed2"] = Base.Int(seed2) - end - end - tf.Tensor(tf.Operation(desc)) - end - function cudnn_rnn_params_size_eager(num_layers_, num_units_, input_size_; name=nothing, S=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) - desc = tf.EagerOp("CudnnRNNParamsSize") - num_layers_ = convert(tf.EagerTensor, num_layers_) - num_units_ = convert(tf.EagerTensor, num_units_) - input_size_ = convert(tf.EagerTensor, input_size_) - tf.add_input(desc, num_layers_) - tf.add_input(desc, num_units_) - tf.add_input(desc, input_size_) - if S !== nothing - desc["S"] = Base.identity(S) - end - if rnn_mode !== nothing - desc["rnn_mode"] = Base.String(rnn_mode) - end - if input_mode !== nothing - desc["input_mode"] = Base.String(input_mode) - end - if direction !== nothing - desc["direction"] = Base.String(direction) - end - if dropout !== nothing - desc["dropout"] = Base.identity(dropout) - end - if seed !== nothing - desc["seed"] = Base.Int(seed) - end - if seed2 !== nothing - desc["seed2"] = Base.Int(seed2) - end - res = tf.execute(desc) - node = tf.TapeNode(cudnn_rnn_params_size, [num_layers_, num_units_, input_size_], name=nothing, S=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function cudnn_rnn_params_size(num_layers_, num_units_, input_size_; name=nothing, S=nothing, rnn_mode=nothing, input_mode=nothing, direction=nothing, dropout=nothing, seed=nothing, seed2=nothing) - if tf.in_eager_mode() - cudnn_rnn_params_size_eager(num_layers_, num_units_, input_size_; name=name, S=S, rnn_mode=rnn_mode, input_mode=input_mode, direction=direction, dropout=dropout, seed=seed, seed2=seed2) - else - cudnn_rnn_params_size_graph(num_layers_, num_units_, input_size_; name=name, S=S, rnn_mode=rnn_mode, input_mode=input_mode, direction=direction, dropout=dropout, seed=seed, seed2=seed2) - end - end end @@ -51364,43 +93849,79 @@ end """ begin - function boosted_trees_quantile_stream_resource_add_summaries_graph(quantile_stream_resource_handle_, summaries_; name=nothing, num_features=nothing) - local desc - tf.with_op_name(name, "BoostedTreesQuantileStreamResourceAddSummaries") do - desc = tf.NodeDescription("BoostedTreesQuantileStreamResourceAddSummaries") - quantile_stream_resource_handle_ = convert(Tensor{Any}, quantile_stream_resource_handle_) - summaries_ = [convert(Tensor{Float32}, x) for x = summaries_] - tf.add_input(desc, quantile_stream_resource_handle_) - tf.add_input(desc, summaries_) - if num_features !== nothing - desc["num_features"] = Base.Int(num_features) + begin + function boosted_trees_quantile_stream_resource_add_summaries_graph(quantile_stream_resource_handle_, summaries_; name=nothing, num_features=nothing) + local desc + tf.with_op_name(name, "BoostedTreesQuantileStreamResourceAddSummaries") do + desc = tf.NodeDescription("BoostedTreesQuantileStreamResourceAddSummaries") + begin + begin + quantile_stream_resource_handle_ = convert(Tensor{Any}, quantile_stream_resource_handle_) + begin + end + end + begin + summaries_ = [convert(Tensor{Float32}, x) for x = summaries_] + begin + end + end + end + begin + begin + tf.add_input(desc, quantile_stream_resource_handle_) + end + begin + tf.add_input(desc, summaries_) + end + end + begin + begin + if num_features !== nothing + desc["num_features"] = Base.Int(num_features) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function boosted_trees_quantile_stream_resource_add_summaries_eager(quantile_stream_resource_handle_, summaries_; name=nothing, num_features=nothing) + desc = tf.EagerOp("BoostedTreesQuantileStreamResourceAddSummaries") + quantile_stream_resource_handle_ = convert(tf.EagerTensor, quantile_stream_resource_handle_) + summaries_ = convert(tf.EagerTensor, summaries_) + begin + begin + tf.add_input(desc, quantile_stream_resource_handle_) + end + begin + tf.add_input(desc, summaries_) + end + end + begin + begin + if num_features !== nothing + desc["num_features"] = Base.Int(num_features) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(boosted_trees_quantile_stream_resource_add_summaries, [quantile_stream_resource_handle_, summaries_], name=nothing, num_features=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function boosted_trees_quantile_stream_resource_add_summaries(quantile_stream_resource_handle_, summaries_; name=nothing, num_features=nothing) + if tf.in_eager_mode() + boosted_trees_quantile_stream_resource_add_summaries_eager(quantile_stream_resource_handle_, summaries_; name=name, num_features=num_features) + else + boosted_trees_quantile_stream_resource_add_summaries_graph(quantile_stream_resource_handle_, summaries_; name=name, num_features=num_features) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function boosted_trees_quantile_stream_resource_add_summaries_eager(quantile_stream_resource_handle_, summaries_; name=nothing, num_features=nothing) - desc = tf.EagerOp("BoostedTreesQuantileStreamResourceAddSummaries") - quantile_stream_resource_handle_ = convert(tf.EagerTensor, quantile_stream_resource_handle_) - summaries_ = convert(tf.EagerTensor, summaries_) - tf.add_input(desc, quantile_stream_resource_handle_) - tf.add_input(desc, summaries_) - if num_features !== nothing - desc["num_features"] = Base.Int(num_features) - end - res = tf.execute(desc) - node = tf.TapeNode(boosted_trees_quantile_stream_resource_add_summaries, [quantile_stream_resource_handle_, summaries_], name=nothing, num_features=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function boosted_trees_quantile_stream_resource_add_summaries(quantile_stream_resource_handle_, summaries_; name=nothing, num_features=nothing) - if tf.in_eager_mode() - boosted_trees_quantile_stream_resource_add_summaries_eager(quantile_stream_resource_handle_, summaries_; name=name, num_features=num_features) - else - boosted_trees_quantile_stream_resource_add_summaries_graph(quantile_stream_resource_handle_, summaries_; name=name, num_features=num_features) - end - end end @@ -51410,33 +93931,57 @@ end """ begin - function batch_ifft3d_graph(input_; name=nothing) - local desc - tf.with_op_name(name, "BatchIFFT3D") do - desc = tf.NodeDescription("BatchIFFT3D") - input_ = convert(Tensor{Complex{Float32}}, input_) - tf.add_input(desc, input_) + begin + function batch_ifft3d_graph(input_; name=nothing) + local desc + tf.with_op_name(name, "BatchIFFT3D") do + desc = tf.NodeDescription("BatchIFFT3D") + begin + begin + input_ = convert(Tensor{Complex{Float32}}, input_) + begin + end + end + end + begin + begin + tf.add_input(desc, input_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function batch_ifft3d_eager(input_; name=nothing) - desc = tf.EagerOp("BatchIFFT3D") - input_ = convert(tf.EagerTensor, input_) - tf.add_input(desc, input_) - res = tf.execute(desc) - node = tf.TapeNode(batch_ifft3d, [input_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function batch_ifft3d_eager(input_; name=nothing) + desc = tf.EagerOp("BatchIFFT3D") + input_ = convert(tf.EagerTensor, input_) + begin + begin + tf.add_input(desc, input_) + end + end + begin + end + res = tf.execute(desc) + node = tf.TapeNode(batch_ifft3d, [input_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function batch_ifft3d(input_; name=nothing) - if tf.in_eager_mode() - batch_ifft3d_eager(input_; name=name) - else - batch_ifft3d_graph(input_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function batch_ifft3d(input_; name=nothing) + if tf.in_eager_mode() + batch_ifft3d_eager(input_; name=name) + else + batch_ifft3d_graph(input_; name=name) + end end - end + end end @@ -51446,35 +93991,63 @@ end """ begin - function sigmoid_graph(x_; name=nothing) - local desc - tf.with_op_name(name, "Sigmoid") do - desc = tf.NodeDescription("Sigmoid") - x_ = convert(Tensor{Any}, x_) - (x_,) = tf.tf_promote(x_) - tf.add_input(desc, x_) + begin + function sigmoid_graph(x_; name=nothing) + local desc + tf.with_op_name(name, "Sigmoid") do + desc = tf.NodeDescription("Sigmoid") + begin + begin + x_ = convert(Tensor{Any}, x_) + begin + end + end + begin + (x_,) = tf.tf_promote(x_) + end + end + begin + begin + tf.add_input(desc, x_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function sigmoid_eager(x_; name=nothing) - desc = tf.EagerOp("Sigmoid") - x_ = convert(tf.EagerTensor, x_) - tf.add_input(desc, x_) - desc["T"] = tf.data_type(x_) - res = tf.execute(desc) - node = tf.TapeNode(sigmoid, [x_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function sigmoid_eager(x_; name=nothing) + desc = tf.EagerOp("Sigmoid") + x_ = convert(tf.EagerTensor, x_) + begin + begin + tf.add_input(desc, x_) + end + end + begin + end + begin + desc["T"] = tf.data_type(x_) + end + res = tf.execute(desc) + node = tf.TapeNode(sigmoid, [x_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sigmoid(x_; name=nothing) - if tf.in_eager_mode() - sigmoid_eager(x_; name=name) - else - sigmoid_graph(x_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sigmoid(x_; name=nothing) + if tf.in_eager_mode() + sigmoid_eager(x_; name=name) + else + sigmoid_graph(x_; name=name) + end end - end + end end @@ -51484,42 +94057,82 @@ end """ begin - function segment_mean_graph(data_, segment_ids_; name=nothing) - local desc - tf.with_op_name(name, "SegmentMean") do - desc = tf.NodeDescription("SegmentMean") - data_ = convert(Tensor{Any}, data_) - segment_ids_ = convert(Tensor{Any}, segment_ids_) - segment_ids_ = segment_ids_ - convert(tf.Tensor{eltype(segment_ids_)}, 1) - (data_,) = tf.tf_promote(data_) - (segment_ids_,) = tf.tf_promote(segment_ids_) - tf.add_input(desc, data_) - tf.add_input(desc, segment_ids_) - end - tf.Tensor(tf.Operation(desc)) - end - function segment_mean_eager(data_, segment_ids_; name=nothing) - desc = tf.EagerOp("SegmentMean") - data_ = convert(tf.EagerTensor, data_) - segment_ids_ = convert(tf.EagerTensor, segment_ids_) - tf.add_input(desc, data_) - tf.add_input(desc, segment_ids_) - desc["T"] = tf.data_type(data_) - desc["Tindices"] = tf.data_type(segment_ids_) - res = tf.execute(desc) - node = tf.TapeNode(segment_mean, [data_, segment_ids_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function segment_mean(data_, segment_ids_; name=nothing) - if tf.in_eager_mode() - segment_mean_eager(data_, segment_ids_; name=name) - else - segment_mean_graph(data_, segment_ids_; name=name) + begin + function segment_mean_graph(data_, segment_ids_; name=nothing) + local desc + tf.with_op_name(name, "SegmentMean") do + desc = tf.NodeDescription("SegmentMean") + begin + begin + data_ = convert(Tensor{Any}, data_) + begin + end + end + begin + segment_ids_ = convert(Tensor{Any}, segment_ids_) + begin + segment_ids_ = segment_ids_ - convert(tf.Tensor{eltype(segment_ids_)}, 1) + end + end + begin + (data_,) = tf.tf_promote(data_) + end + begin + (segment_ids_,) = tf.tf_promote(segment_ids_) + end + end + begin + begin + tf.add_input(desc, data_) + end + begin + tf.add_input(desc, segment_ids_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function segment_mean_eager(data_, segment_ids_; name=nothing) + desc = tf.EagerOp("SegmentMean") + data_ = convert(tf.EagerTensor, data_) + segment_ids_ = convert(tf.EagerTensor, segment_ids_) + begin + begin + tf.add_input(desc, data_) + end + begin + tf.add_input(desc, segment_ids_) + end + end + begin + end + begin + desc["T"] = tf.data_type(data_) + end + begin + desc["Tindices"] = tf.data_type(segment_ids_) + end + res = tf.execute(desc) + node = tf.TapeNode(segment_mean, [data_, segment_ids_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function segment_mean(data_, segment_ids_; name=nothing) + if tf.in_eager_mode() + segment_mean_eager(data_, segment_ids_; name=name) + else + segment_mean_graph(data_, segment_ids_; name=name) + end end - end + end end @@ -51529,33 +94142,57 @@ end """ begin - function is_boosted_trees_ensemble_initialized_graph(tree_ensemble_handle_; name=nothing) - local desc - tf.with_op_name(name, "IsBoostedTreesEnsembleInitialized") do - desc = tf.NodeDescription("IsBoostedTreesEnsembleInitialized") - tree_ensemble_handle_ = convert(Tensor{Any}, tree_ensemble_handle_) - tf.add_input(desc, tree_ensemble_handle_) + begin + function is_boosted_trees_ensemble_initialized_graph(tree_ensemble_handle_; name=nothing) + local desc + tf.with_op_name(name, "IsBoostedTreesEnsembleInitialized") do + desc = tf.NodeDescription("IsBoostedTreesEnsembleInitialized") + begin + begin + tree_ensemble_handle_ = convert(Tensor{Any}, tree_ensemble_handle_) + begin + end + end + end + begin + begin + tf.add_input(desc, tree_ensemble_handle_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function is_boosted_trees_ensemble_initialized_eager(tree_ensemble_handle_; name=nothing) - desc = tf.EagerOp("IsBoostedTreesEnsembleInitialized") - tree_ensemble_handle_ = convert(tf.EagerTensor, tree_ensemble_handle_) - tf.add_input(desc, tree_ensemble_handle_) - res = tf.execute(desc) - node = tf.TapeNode(is_boosted_trees_ensemble_initialized, [tree_ensemble_handle_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function is_boosted_trees_ensemble_initialized_eager(tree_ensemble_handle_; name=nothing) + desc = tf.EagerOp("IsBoostedTreesEnsembleInitialized") + tree_ensemble_handle_ = convert(tf.EagerTensor, tree_ensemble_handle_) + begin + begin + tf.add_input(desc, tree_ensemble_handle_) + end + end + begin + end + res = tf.execute(desc) + node = tf.TapeNode(is_boosted_trees_ensemble_initialized, [tree_ensemble_handle_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function is_boosted_trees_ensemble_initialized(tree_ensemble_handle_; name=nothing) - if tf.in_eager_mode() - is_boosted_trees_ensemble_initialized_eager(tree_ensemble_handle_; name=name) - else - is_boosted_trees_ensemble_initialized_graph(tree_ensemble_handle_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function is_boosted_trees_ensemble_initialized(tree_ensemble_handle_; name=nothing) + if tf.in_eager_mode() + is_boosted_trees_ensemble_initialized_eager(tree_ensemble_handle_; name=name) + else + is_boosted_trees_ensemble_initialized_graph(tree_ensemble_handle_; name=name) + end end - end + end end @@ -51565,37 +94202,69 @@ end """ begin - function tensor_array_size_v2_graph(handle_, flow_in_; name=nothing) - local desc - tf.with_op_name(name, "TensorArraySizeV2") do - desc = tf.NodeDescription("TensorArraySizeV2") - handle_ = convert(Tensor{String}, handle_) - flow_in_ = convert(Tensor{Float32}, flow_in_) - tf.add_input(desc, handle_) - tf.add_input(desc, flow_in_) + begin + function tensor_array_size_v2_graph(handle_, flow_in_; name=nothing) + local desc + tf.with_op_name(name, "TensorArraySizeV2") do + desc = tf.NodeDescription("TensorArraySizeV2") + begin + begin + handle_ = convert(Tensor{String}, handle_) + begin + end + end + begin + flow_in_ = convert(Tensor{Float32}, flow_in_) + begin + end + end + end + begin + begin + tf.add_input(desc, handle_) + end + begin + tf.add_input(desc, flow_in_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function tensor_array_size_v2_eager(handle_, flow_in_; name=nothing) - desc = tf.EagerOp("TensorArraySizeV2") - handle_ = convert(tf.EagerTensor, handle_) - flow_in_ = convert(tf.EagerTensor, flow_in_) - tf.add_input(desc, handle_) - tf.add_input(desc, flow_in_) - res = tf.execute(desc) - node = tf.TapeNode(tensor_array_size_v2, [handle_, flow_in_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function tensor_array_size_v2_eager(handle_, flow_in_; name=nothing) + desc = tf.EagerOp("TensorArraySizeV2") + handle_ = convert(tf.EagerTensor, handle_) + flow_in_ = convert(tf.EagerTensor, flow_in_) + begin + begin + tf.add_input(desc, handle_) + end + begin + tf.add_input(desc, flow_in_) + end + end + begin + end + res = tf.execute(desc) + node = tf.TapeNode(tensor_array_size_v2, [handle_, flow_in_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_array_size_v2(handle_, flow_in_; name=nothing) - if tf.in_eager_mode() - tensor_array_size_v2_eager(handle_, flow_in_; name=name) - else - tensor_array_size_v2_graph(handle_, flow_in_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_array_size_v2(handle_, flow_in_; name=nothing) + if tf.in_eager_mode() + tensor_array_size_v2_eager(handle_, flow_in_; name=name) + else + tensor_array_size_v2_graph(handle_, flow_in_; name=name) + end end - end + end end @@ -51605,53 +94274,109 @@ end Returns x - y element-wise. """ begin - function _mkl_sub_graph(x_, y_, mkl_x_, mkl_y_; name=nothing) - local desc - tf.with_op_name(name, "_MklSub") do - desc = tf.NodeDescription("_MklSub") - x_ = convert(Tensor{Any}, x_) - y_ = convert(Tensor{Any}, y_) - mkl_x_ = convert(Tensor{UInt8}, mkl_x_) - mkl_y_ = convert(Tensor{UInt8}, mkl_y_) - (x_, y_) = tf.tf_promote(x_, y_) - tf.add_input(desc, x_) - tf.add_input(desc, y_) - tf.add_input(desc, mkl_x_) - tf.add_input(desc, mkl_y_) - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:2 - push!(out, tf.Tensor(op, out_idx)) - end - out - end - function _mkl_sub_eager(x_, y_, mkl_x_, mkl_y_; name=nothing) - desc = tf.EagerOp("_MklSub") - x_ = convert(tf.EagerTensor, x_) - y_ = convert(tf.EagerTensor, y_) - mkl_x_ = convert(tf.EagerTensor, mkl_x_) - mkl_y_ = convert(tf.EagerTensor, mkl_y_) - tf.add_input(desc, x_) - tf.add_input(desc, y_) - tf.add_input(desc, mkl_x_) - tf.add_input(desc, mkl_y_) - desc["T"] = tf.data_type(x_) - desc["T"] = tf.data_type(y_) - res = tf.execute(desc) - node = tf.TapeNode(_mkl_sub, [x_, y_, mkl_x_, mkl_y_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _mkl_sub(x_, y_, mkl_x_, mkl_y_; name=nothing) - if tf.in_eager_mode() - _mkl_sub_eager(x_, y_, mkl_x_, mkl_y_; name=name) - else - _mkl_sub_graph(x_, y_, mkl_x_, mkl_y_; name=name) + begin + function _mkl_sub_graph(x_, y_, mkl_x_, mkl_y_; name=nothing) + local desc + tf.with_op_name(name, "_MklSub") do + desc = tf.NodeDescription("_MklSub") + begin + begin + x_ = convert(Tensor{Any}, x_) + begin + end + end + begin + y_ = convert(Tensor{Any}, y_) + begin + end + end + begin + mkl_x_ = convert(Tensor{UInt8}, mkl_x_) + begin + end + end + begin + mkl_y_ = convert(Tensor{UInt8}, mkl_y_) + begin + end + end + begin + (x_, y_) = tf.tf_promote(x_, y_) + end + end + begin + begin + tf.add_input(desc, x_) + end + begin + tf.add_input(desc, y_) + end + begin + tf.add_input(desc, mkl_x_) + end + begin + tf.add_input(desc, mkl_y_) + end + end + begin + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + end + end + begin + function _mkl_sub_eager(x_, y_, mkl_x_, mkl_y_; name=nothing) + desc = tf.EagerOp("_MklSub") + x_ = convert(tf.EagerTensor, x_) + y_ = convert(tf.EagerTensor, y_) + mkl_x_ = convert(tf.EagerTensor, mkl_x_) + mkl_y_ = convert(tf.EagerTensor, mkl_y_) + begin + begin + tf.add_input(desc, x_) + end + begin + tf.add_input(desc, y_) + end + begin + tf.add_input(desc, mkl_x_) + end + begin + tf.add_input(desc, mkl_y_) + end + end + begin + end + begin + desc["T"] = tf.data_type(x_) + end + begin + desc["T"] = tf.data_type(y_) + end + res = tf.execute(desc) + node = tf.TapeNode(_mkl_sub, [x_, y_, mkl_x_, mkl_y_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _mkl_sub(x_, y_, mkl_x_, mkl_y_; name=nothing) + if tf.in_eager_mode() + _mkl_sub_eager(x_, y_, mkl_x_, mkl_y_; name=name) + else + _mkl_sub_graph(x_, y_, mkl_x_, mkl_y_; name=name) + end end - end + end end @@ -51661,55 +94386,99 @@ end An op that performs gradient updates of embedding tables. """ begin - function send_tpu_embedding_gradients_graph(inputs_, learning_rates_; name=nothing, N=nothing, NN=nothing, config=nothing) - local desc - tf.with_op_name(name, "SendTPUEmbeddingGradients") do - desc = tf.NodeDescription("SendTPUEmbeddingGradients") - inputs_ = [convert(Tensor{Float32}, x) for x = inputs_] - learning_rates_ = [convert(Tensor{Float32}, x) for x = learning_rates_] - tf.add_input(desc, inputs_) - tf.add_input(desc, learning_rates_) - if N !== nothing - desc["N"] = Base.Int(N) - end - if NN !== nothing - desc["NN"] = Base.Int(NN) + begin + function send_tpu_embedding_gradients_graph(inputs_, learning_rates_; name=nothing, N=nothing, NN=nothing, config=nothing) + local desc + tf.with_op_name(name, "SendTPUEmbeddingGradients") do + desc = tf.NodeDescription("SendTPUEmbeddingGradients") + begin + begin + inputs_ = [convert(Tensor{Float32}, x) for x = inputs_] + begin + end + end + begin + learning_rates_ = [convert(Tensor{Float32}, x) for x = learning_rates_] + begin + end + end + end + begin + begin + tf.add_input(desc, inputs_) + end + begin + tf.add_input(desc, learning_rates_) + end + end + begin + begin + if N !== nothing + desc["N"] = Base.Int(N) + end + end + begin + if NN !== nothing + desc["NN"] = Base.Int(NN) + end + end + begin + if config !== nothing + desc["config"] = Base.String(config) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function send_tpu_embedding_gradients_eager(inputs_, learning_rates_; name=nothing, N=nothing, NN=nothing, config=nothing) + desc = tf.EagerOp("SendTPUEmbeddingGradients") + inputs_ = convert(tf.EagerTensor, inputs_) + learning_rates_ = convert(tf.EagerTensor, learning_rates_) + begin + begin + tf.add_input(desc, inputs_) + end + begin + tf.add_input(desc, learning_rates_) + end + end + begin + begin + if N !== nothing + desc["N"] = Base.Int(N) + end + end + begin + if NN !== nothing + desc["NN"] = Base.Int(NN) + end + end + begin + if config !== nothing + desc["config"] = Base.String(config) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(send_tpu_embedding_gradients, [inputs_, learning_rates_], name=nothing, N=nothing, NN=nothing, config=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function send_tpu_embedding_gradients(inputs_, learning_rates_; name=nothing, N=nothing, NN=nothing, config=nothing) + if tf.in_eager_mode() + send_tpu_embedding_gradients_eager(inputs_, learning_rates_; name=name, N=N, NN=NN, config=config) + else + send_tpu_embedding_gradients_graph(inputs_, learning_rates_; name=name, N=N, NN=NN, config=config) + end end - if config !== nothing - desc["config"] = Base.String(config) - end - end - tf.Tensor(tf.Operation(desc)) end - function send_tpu_embedding_gradients_eager(inputs_, learning_rates_; name=nothing, N=nothing, NN=nothing, config=nothing) - desc = tf.EagerOp("SendTPUEmbeddingGradients") - inputs_ = convert(tf.EagerTensor, inputs_) - learning_rates_ = convert(tf.EagerTensor, learning_rates_) - tf.add_input(desc, inputs_) - tf.add_input(desc, learning_rates_) - if N !== nothing - desc["N"] = Base.Int(N) - end - if NN !== nothing - desc["NN"] = Base.Int(NN) - end - if config !== nothing - desc["config"] = Base.String(config) - end - res = tf.execute(desc) - node = tf.TapeNode(send_tpu_embedding_gradients, [inputs_, learning_rates_], name=nothing, N=nothing, NN=nothing, config=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function send_tpu_embedding_gradients(inputs_, learning_rates_; name=nothing, N=nothing, NN=nothing, config=nothing) - if tf.in_eager_mode() - send_tpu_embedding_gradients_eager(inputs_, learning_rates_; name=name, N=N, NN=NN, config=config) - else - send_tpu_embedding_gradients_graph(inputs_, learning_rates_; name=name, N=N, NN=NN, config=config) - end - end end @@ -51719,59 +94488,103 @@ end """ begin - function max_pool3d_graph(input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) - local desc - tf.with_op_name(name, "MaxPool3D") do - desc = tf.NodeDescription("MaxPool3D") - input_ = convert(Tensor{Any}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - if ksize !== nothing - desc["ksize"] = map(Base.identity, ksize) - end - if strides !== nothing - desc["strides"] = map(Base.identity, strides) + begin + function max_pool3d_graph(input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + local desc + tf.with_op_name(name, "MaxPool3D") do + desc = tf.NodeDescription("MaxPool3D") + begin + begin + input_ = convert(Tensor{Any}, input_) + begin + end + end + begin + (input_,) = tf.tf_promote(input_) + end + end + begin + begin + tf.add_input(desc, input_) + end + end + begin + begin + if ksize !== nothing + desc["ksize"] = map(Base.identity, ksize) + end + end + begin + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + end + begin + if padding !== nothing + desc["padding"] = Base.String(padding) + end + end + begin + if data_format !== nothing + desc["data_format"] = Base.String(data_format) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function max_pool3d_eager(input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + desc = tf.EagerOp("MaxPool3D") + input_ = convert(tf.EagerTensor, input_) + begin + begin + tf.add_input(desc, input_) + end + end + begin + begin + if ksize !== nothing + desc["ksize"] = map(Base.identity, ksize) + end + end + begin + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + end + begin + if padding !== nothing + desc["padding"] = Base.String(padding) + end + end + begin + if data_format !== nothing + desc["data_format"] = Base.String(data_format) + end + end + end + begin + desc["T"] = tf.data_type(input_) + end + res = tf.execute(desc) + node = tf.TapeNode(max_pool3d, [input_], name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function max_pool3d(input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + if tf.in_eager_mode() + max_pool3d_eager(input_; name=name, ksize=ksize, strides=strides, padding=padding, data_format=data_format) + else + max_pool3d_graph(input_; name=name, ksize=ksize, strides=strides, padding=padding, data_format=data_format) + end end - if padding !== nothing - desc["padding"] = Base.String(padding) - end - if data_format !== nothing - desc["data_format"] = Base.String(data_format) - end - end - tf.Tensor(tf.Operation(desc)) - end - function max_pool3d_eager(input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) - desc = tf.EagerOp("MaxPool3D") - input_ = convert(tf.EagerTensor, input_) - tf.add_input(desc, input_) - if ksize !== nothing - desc["ksize"] = map(Base.identity, ksize) - end - if strides !== nothing - desc["strides"] = map(Base.identity, strides) - end - if padding !== nothing - desc["padding"] = Base.String(padding) - end - if data_format !== nothing - desc["data_format"] = Base.String(data_format) - end - desc["T"] = tf.data_type(input_) - res = tf.execute(desc) - node = tf.TapeNode(max_pool3d, [input_], name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function max_pool3d(input_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) - if tf.in_eager_mode() - max_pool3d_eager(input_; name=name, ksize=ksize, strides=strides, padding=padding, data_format=data_format) - else - max_pool3d_graph(input_; name=name, ksize=ksize, strides=strides, padding=padding, data_format=data_format) - end - end end @@ -51781,48 +94594,92 @@ end """ begin - function prod_graph(input_, reduction_indices_; name=nothing, keep_dims=nothing) - local desc - tf.with_op_name(name, "Prod") do - desc = tf.NodeDescription("Prod") - input_ = convert(Tensor{Any}, input_) - reduction_indices_ = convert(Tensor{Int32}, reduction_indices_) - reduction_indices_ = reduction_indices_ - convert(tf.Tensor{eltype(reduction_indices_)}, 1) - (input_,) = tf.tf_promote(input_) - (reduction_indices_,) = tf.tf_promote(reduction_indices_) - tf.add_input(desc, input_) - tf.add_input(desc, reduction_indices_) - if keep_dims !== nothing - desc["keep_dims"] = Base.Bool(keep_dims) + begin + function prod_graph(input_, reduction_indices_; name=nothing, keep_dims=nothing) + local desc + tf.with_op_name(name, "Prod") do + desc = tf.NodeDescription("Prod") + begin + begin + input_ = convert(Tensor{Any}, input_) + begin + end + end + begin + reduction_indices_ = convert(Tensor{Int32}, reduction_indices_) + begin + reduction_indices_ = reduction_indices_ - convert(tf.Tensor{eltype(reduction_indices_)}, 1) + end + end + begin + (input_,) = tf.tf_promote(input_) + end + begin + (reduction_indices_,) = tf.tf_promote(reduction_indices_) + end + end + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, reduction_indices_) + end + end + begin + begin + if keep_dims !== nothing + desc["keep_dims"] = Base.Bool(keep_dims) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function prod_eager(input_, reduction_indices_; name=nothing, keep_dims=nothing) + desc = tf.EagerOp("Prod") + input_ = convert(tf.EagerTensor, input_) + reduction_indices_ = convert(tf.EagerTensor, reduction_indices_) + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, reduction_indices_) + end + end + begin + begin + if keep_dims !== nothing + desc["keep_dims"] = Base.Bool(keep_dims) + end + end + end + begin + desc["T"] = tf.data_type(input_) + end + begin + desc["Tidx"] = tf.data_type(reduction_indices_) + end + res = tf.execute(desc) + node = tf.TapeNode(prod, [input_, reduction_indices_], name=nothing, keep_dims=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function prod(input_, reduction_indices_; name=nothing, keep_dims=nothing) + if tf.in_eager_mode() + prod_eager(input_, reduction_indices_; name=name, keep_dims=keep_dims) + else + prod_graph(input_, reduction_indices_; name=name, keep_dims=keep_dims) + end end - end - tf.Tensor(tf.Operation(desc)) end - function prod_eager(input_, reduction_indices_; name=nothing, keep_dims=nothing) - desc = tf.EagerOp("Prod") - input_ = convert(tf.EagerTensor, input_) - reduction_indices_ = convert(tf.EagerTensor, reduction_indices_) - tf.add_input(desc, input_) - tf.add_input(desc, reduction_indices_) - if keep_dims !== nothing - desc["keep_dims"] = Base.Bool(keep_dims) - end - desc["T"] = tf.data_type(input_) - desc["Tidx"] = tf.data_type(reduction_indices_) - res = tf.execute(desc) - node = tf.TapeNode(prod, [input_, reduction_indices_], name=nothing, keep_dims=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function prod(input_, reduction_indices_; name=nothing, keep_dims=nothing) - if tf.in_eager_mode() - prod_eager(input_, reduction_indices_; name=name, keep_dims=keep_dims) - else - prod_graph(input_, reduction_indices_; name=name, keep_dims=keep_dims) - end - end end @@ -51832,33 +94689,57 @@ end """ begin - function experimental_identity_indexed_dataset_graph(size_; name=nothing) - local desc - tf.with_op_name(name, "ExperimentalIdentityIndexedDataset") do - desc = tf.NodeDescription("ExperimentalIdentityIndexedDataset") - size_ = convert(Tensor{Any}, size_) - tf.add_input(desc, size_) + begin + function experimental_identity_indexed_dataset_graph(size_; name=nothing) + local desc + tf.with_op_name(name, "ExperimentalIdentityIndexedDataset") do + desc = tf.NodeDescription("ExperimentalIdentityIndexedDataset") + begin + begin + size_ = convert(Tensor{Any}, size_) + begin + end + end + end + begin + begin + tf.add_input(desc, size_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function experimental_identity_indexed_dataset_eager(size_; name=nothing) - desc = tf.EagerOp("ExperimentalIdentityIndexedDataset") - size_ = convert(tf.EagerTensor, size_) - tf.add_input(desc, size_) - res = tf.execute(desc) - node = tf.TapeNode(experimental_identity_indexed_dataset, [size_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function experimental_identity_indexed_dataset_eager(size_; name=nothing) + desc = tf.EagerOp("ExperimentalIdentityIndexedDataset") + size_ = convert(tf.EagerTensor, size_) + begin + begin + tf.add_input(desc, size_) + end + end + begin + end + res = tf.execute(desc) + node = tf.TapeNode(experimental_identity_indexed_dataset, [size_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_identity_indexed_dataset(size_; name=nothing) - if tf.in_eager_mode() - experimental_identity_indexed_dataset_eager(size_; name=name) - else - experimental_identity_indexed_dataset_graph(size_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_identity_indexed_dataset(size_; name=nothing) + if tf.in_eager_mode() + experimental_identity_indexed_dataset_eager(size_; name=name) + else + experimental_identity_indexed_dataset_graph(size_; name=name) + end end - end + end end @@ -51868,45 +94749,85 @@ end """ begin - function tensor_list_push_back_graph(input_handle_, tensor_; name=nothing, element_dtype=nothing) - local desc - tf.with_op_name(name, "TensorListPushBack") do - desc = tf.NodeDescription("TensorListPushBack") - input_handle_ = convert(Tensor{Any}, input_handle_) - tensor_ = convert(Tensor{Any}, tensor_) - (tensor_,) = tf.tf_promote(tensor_) - tf.add_input(desc, input_handle_) - tf.add_input(desc, tensor_) - if element_dtype !== nothing - desc["element_dtype"] = Base.identity(element_dtype) + begin + function tensor_list_push_back_graph(input_handle_, tensor_; name=nothing, element_dtype=nothing) + local desc + tf.with_op_name(name, "TensorListPushBack") do + desc = tf.NodeDescription("TensorListPushBack") + begin + begin + input_handle_ = convert(Tensor{Any}, input_handle_) + begin + end + end + begin + tensor_ = convert(Tensor{Any}, tensor_) + begin + end + end + begin + (tensor_,) = tf.tf_promote(tensor_) + end + end + begin + begin + tf.add_input(desc, input_handle_) + end + begin + tf.add_input(desc, tensor_) + end + end + begin + begin + if element_dtype !== nothing + desc["element_dtype"] = Base.identity(element_dtype) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function tensor_list_push_back_eager(input_handle_, tensor_; name=nothing, element_dtype=nothing) + desc = tf.EagerOp("TensorListPushBack") + input_handle_ = convert(tf.EagerTensor, input_handle_) + tensor_ = convert(tf.EagerTensor, tensor_) + begin + begin + tf.add_input(desc, input_handle_) + end + begin + tf.add_input(desc, tensor_) + end + end + begin + begin + if element_dtype !== nothing + desc["element_dtype"] = Base.identity(element_dtype) + end + end + end + begin + desc["element_dtype"] = tf.data_type(tensor_) + end + res = tf.execute(desc) + node = tf.TapeNode(tensor_list_push_back, [input_handle_, tensor_], name=nothing, element_dtype=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_list_push_back(input_handle_, tensor_; name=nothing, element_dtype=nothing) + if tf.in_eager_mode() + tensor_list_push_back_eager(input_handle_, tensor_; name=name, element_dtype=element_dtype) + else + tensor_list_push_back_graph(input_handle_, tensor_; name=name, element_dtype=element_dtype) + end end - end - tf.Tensor(tf.Operation(desc)) end - function tensor_list_push_back_eager(input_handle_, tensor_; name=nothing, element_dtype=nothing) - desc = tf.EagerOp("TensorListPushBack") - input_handle_ = convert(tf.EagerTensor, input_handle_) - tensor_ = convert(tf.EagerTensor, tensor_) - tf.add_input(desc, input_handle_) - tf.add_input(desc, tensor_) - if element_dtype !== nothing - desc["element_dtype"] = Base.identity(element_dtype) - end - desc["element_dtype"] = tf.data_type(tensor_) - res = tf.execute(desc) - node = tf.TapeNode(tensor_list_push_back, [input_handle_, tensor_], name=nothing, element_dtype=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_list_push_back(input_handle_, tensor_; name=nothing, element_dtype=nothing) - if tf.in_eager_mode() - tensor_list_push_back_eager(input_handle_, tensor_; name=name, element_dtype=element_dtype) - else - tensor_list_push_back_graph(input_handle_, tensor_; name=name, element_dtype=element_dtype) - end - end end @@ -51916,109 +94837,189 @@ end """ begin - function batch_function_graph(in_tensors_, captured_tensors_; name=nothing, f=nothing, num_batch_threads=nothing, max_batch_size=nothing, batch_timeout_micros=nothing, max_enqueued_batches=nothing, allowed_batch_sizes=nothing, container=nothing, shared_name=nothing, batching_queue=nothing, Tin=nothing, Tcaptured=nothing, Tout=nothing) - local desc - tf.with_op_name(name, "BatchFunction") do - desc = tf.NodeDescription("BatchFunction") - in_tensors_ = [convert(Tensor{Any}, x) for x = in_tensors_] - captured_tensors_ = [convert(Tensor{Any}, x) for x = captured_tensors_] - tf.add_input(desc, in_tensors_) - tf.add_input(desc, captured_tensors_) - if f !== nothing - desc["f"] = Base.identity(f) - end - if num_batch_threads !== nothing - desc["num_batch_threads"] = Base.Int(num_batch_threads) - end - if max_batch_size !== nothing - desc["max_batch_size"] = Base.Int(max_batch_size) - end - if batch_timeout_micros !== nothing - desc["batch_timeout_micros"] = Base.Int(batch_timeout_micros) + begin + function batch_function_graph(in_tensors_, captured_tensors_; name=nothing, f=nothing, num_batch_threads=nothing, max_batch_size=nothing, batch_timeout_micros=nothing, max_enqueued_batches=nothing, allowed_batch_sizes=nothing, container=nothing, shared_name=nothing, batching_queue=nothing, Tin=nothing, Tcaptured=nothing, Tout=nothing) + local desc + tf.with_op_name(name, "BatchFunction") do + desc = tf.NodeDescription("BatchFunction") + begin + begin + in_tensors_ = [convert(Tensor{Any}, x) for x = in_tensors_] + begin + end + end + begin + captured_tensors_ = [convert(Tensor{Any}, x) for x = captured_tensors_] + begin + end + end + end + begin + begin + tf.add_input(desc, in_tensors_) + end + begin + tf.add_input(desc, captured_tensors_) + end + end + begin + begin + if f !== nothing + desc["f"] = Base.identity(f) + end + end + begin + if num_batch_threads !== nothing + desc["num_batch_threads"] = Base.Int(num_batch_threads) + end + end + begin + if max_batch_size !== nothing + desc["max_batch_size"] = Base.Int(max_batch_size) + end + end + begin + if batch_timeout_micros !== nothing + desc["batch_timeout_micros"] = Base.Int(batch_timeout_micros) + end + end + begin + if max_enqueued_batches !== nothing + desc["max_enqueued_batches"] = Base.Int(max_enqueued_batches) + end + end + begin + if allowed_batch_sizes !== nothing + desc["allowed_batch_sizes"] = map(Base.identity, allowed_batch_sizes) + end + end + begin + if container !== nothing + desc["container"] = Base.String(container) + end + end + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + begin + if batching_queue !== nothing + desc["batching_queue"] = Base.String(batching_queue) + end + end + begin + if Tin !== nothing + desc["Tin"] = map(Base.identity, Tin) + end + end + begin + if Tcaptured !== nothing + desc["Tcaptured"] = map(Base.identity, Tcaptured) + end + end + begin + if Tout !== nothing + desc["Tout"] = map(Base.identity, Tout) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function batch_function_eager(in_tensors_, captured_tensors_; name=nothing, f=nothing, num_batch_threads=nothing, max_batch_size=nothing, batch_timeout_micros=nothing, max_enqueued_batches=nothing, allowed_batch_sizes=nothing, container=nothing, shared_name=nothing, batching_queue=nothing, Tin=nothing, Tcaptured=nothing, Tout=nothing) + desc = tf.EagerOp("BatchFunction") + in_tensors_ = convert(tf.EagerTensor, in_tensors_) + captured_tensors_ = convert(tf.EagerTensor, captured_tensors_) + begin + begin + tf.add_input(desc, in_tensors_) + end + begin + tf.add_input(desc, captured_tensors_) + end + end + begin + begin + if f !== nothing + desc["f"] = Base.identity(f) + end + end + begin + if num_batch_threads !== nothing + desc["num_batch_threads"] = Base.Int(num_batch_threads) + end + end + begin + if max_batch_size !== nothing + desc["max_batch_size"] = Base.Int(max_batch_size) + end + end + begin + if batch_timeout_micros !== nothing + desc["batch_timeout_micros"] = Base.Int(batch_timeout_micros) + end + end + begin + if max_enqueued_batches !== nothing + desc["max_enqueued_batches"] = Base.Int(max_enqueued_batches) + end + end + begin + if allowed_batch_sizes !== nothing + desc["allowed_batch_sizes"] = map(Base.identity, allowed_batch_sizes) + end + end + begin + if container !== nothing + desc["container"] = Base.String(container) + end + end + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + begin + if batching_queue !== nothing + desc["batching_queue"] = Base.String(batching_queue) + end + end + begin + if Tin !== nothing + desc["Tin"] = map(Base.identity, Tin) + end + end + begin + if Tcaptured !== nothing + desc["Tcaptured"] = map(Base.identity, Tcaptured) + end + end + begin + if Tout !== nothing + desc["Tout"] = map(Base.identity, Tout) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(batch_function, [in_tensors_, captured_tensors_], name=nothing, f=nothing, num_batch_threads=nothing, max_batch_size=nothing, batch_timeout_micros=nothing, max_enqueued_batches=nothing, allowed_batch_sizes=nothing, container=nothing, shared_name=nothing, batching_queue=nothing, Tin=nothing, Tcaptured=nothing, Tout=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function batch_function(in_tensors_, captured_tensors_; name=nothing, f=nothing, num_batch_threads=nothing, max_batch_size=nothing, batch_timeout_micros=nothing, max_enqueued_batches=nothing, allowed_batch_sizes=nothing, container=nothing, shared_name=nothing, batching_queue=nothing, Tin=nothing, Tcaptured=nothing, Tout=nothing) + if tf.in_eager_mode() + batch_function_eager(in_tensors_, captured_tensors_; name=name, f=f, num_batch_threads=num_batch_threads, max_batch_size=max_batch_size, batch_timeout_micros=batch_timeout_micros, max_enqueued_batches=max_enqueued_batches, allowed_batch_sizes=allowed_batch_sizes, container=container, shared_name=shared_name, batching_queue=batching_queue, Tin=Tin, Tcaptured=Tcaptured, Tout=Tout) + else + batch_function_graph(in_tensors_, captured_tensors_; name=name, f=f, num_batch_threads=num_batch_threads, max_batch_size=max_batch_size, batch_timeout_micros=batch_timeout_micros, max_enqueued_batches=max_enqueued_batches, allowed_batch_sizes=allowed_batch_sizes, container=container, shared_name=shared_name, batching_queue=batching_queue, Tin=Tin, Tcaptured=Tcaptured, Tout=Tout) + end end - if max_enqueued_batches !== nothing - desc["max_enqueued_batches"] = Base.Int(max_enqueued_batches) - end - if allowed_batch_sizes !== nothing - desc["allowed_batch_sizes"] = map(Base.identity, allowed_batch_sizes) - end - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - if batching_queue !== nothing - desc["batching_queue"] = Base.String(batching_queue) - end - if Tin !== nothing - desc["Tin"] = map(Base.identity, Tin) - end - if Tcaptured !== nothing - desc["Tcaptured"] = map(Base.identity, Tcaptured) - end - if Tout !== nothing - desc["Tout"] = map(Base.identity, Tout) - end - end - tf.Tensor(tf.Operation(desc)) - end - function batch_function_eager(in_tensors_, captured_tensors_; name=nothing, f=nothing, num_batch_threads=nothing, max_batch_size=nothing, batch_timeout_micros=nothing, max_enqueued_batches=nothing, allowed_batch_sizes=nothing, container=nothing, shared_name=nothing, batching_queue=nothing, Tin=nothing, Tcaptured=nothing, Tout=nothing) - desc = tf.EagerOp("BatchFunction") - in_tensors_ = convert(tf.EagerTensor, in_tensors_) - captured_tensors_ = convert(tf.EagerTensor, captured_tensors_) - tf.add_input(desc, in_tensors_) - tf.add_input(desc, captured_tensors_) - if f !== nothing - desc["f"] = Base.identity(f) - end - if num_batch_threads !== nothing - desc["num_batch_threads"] = Base.Int(num_batch_threads) - end - if max_batch_size !== nothing - desc["max_batch_size"] = Base.Int(max_batch_size) - end - if batch_timeout_micros !== nothing - desc["batch_timeout_micros"] = Base.Int(batch_timeout_micros) - end - if max_enqueued_batches !== nothing - desc["max_enqueued_batches"] = Base.Int(max_enqueued_batches) - end - if allowed_batch_sizes !== nothing - desc["allowed_batch_sizes"] = map(Base.identity, allowed_batch_sizes) - end - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - if batching_queue !== nothing - desc["batching_queue"] = Base.String(batching_queue) - end - if Tin !== nothing - desc["Tin"] = map(Base.identity, Tin) - end - if Tcaptured !== nothing - desc["Tcaptured"] = map(Base.identity, Tcaptured) - end - if Tout !== nothing - desc["Tout"] = map(Base.identity, Tout) - end - res = tf.execute(desc) - node = tf.TapeNode(batch_function, [in_tensors_, captured_tensors_], name=nothing, f=nothing, num_batch_threads=nothing, max_batch_size=nothing, batch_timeout_micros=nothing, max_enqueued_batches=nothing, allowed_batch_sizes=nothing, container=nothing, shared_name=nothing, batching_queue=nothing, Tin=nothing, Tcaptured=nothing, Tout=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function batch_function(in_tensors_, captured_tensors_; name=nothing, f=nothing, num_batch_threads=nothing, max_batch_size=nothing, batch_timeout_micros=nothing, max_enqueued_batches=nothing, allowed_batch_sizes=nothing, container=nothing, shared_name=nothing, batching_queue=nothing, Tin=nothing, Tcaptured=nothing, Tout=nothing) - if tf.in_eager_mode() - batch_function_eager(in_tensors_, captured_tensors_; name=name, f=f, num_batch_threads=num_batch_threads, max_batch_size=max_batch_size, batch_timeout_micros=batch_timeout_micros, max_enqueued_batches=max_enqueued_batches, allowed_batch_sizes=allowed_batch_sizes, container=container, shared_name=shared_name, batching_queue=batching_queue, Tin=Tin, Tcaptured=Tcaptured, Tout=Tout) - else - batch_function_graph(in_tensors_, captured_tensors_; name=name, f=f, num_batch_threads=num_batch_threads, max_batch_size=max_batch_size, batch_timeout_micros=batch_timeout_micros, max_enqueued_batches=max_enqueued_batches, allowed_batch_sizes=allowed_batch_sizes, container=container, shared_name=shared_name, batching_queue=batching_queue, Tin=Tin, Tcaptured=Tcaptured, Tout=Tout) - end - end end @@ -52028,53 +95029,109 @@ end """ begin - function sparse_fill_empty_rows_graph(indices_, values_, dense_shape_, default_value_; name=nothing) - local desc - tf.with_op_name(name, "SparseFillEmptyRows") do - desc = tf.NodeDescription("SparseFillEmptyRows") - indices_ = convert(Tensor{Int64}, indices_) - values_ = convert(Tensor{Any}, values_) - dense_shape_ = convert(Tensor{Int64}, dense_shape_) - default_value_ = convert(Tensor{Any}, default_value_) - (values_, default_value_) = tf.tf_promote(values_, default_value_) - tf.add_input(desc, indices_) - tf.add_input(desc, values_) - tf.add_input(desc, dense_shape_) - tf.add_input(desc, default_value_) - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:4 - push!(out, tf.Tensor(op, out_idx)) - end - out - end - function sparse_fill_empty_rows_eager(indices_, values_, dense_shape_, default_value_; name=nothing) - desc = tf.EagerOp("SparseFillEmptyRows") - indices_ = convert(tf.EagerTensor, indices_) - values_ = convert(tf.EagerTensor, values_) - dense_shape_ = convert(tf.EagerTensor, dense_shape_) - default_value_ = convert(tf.EagerTensor, default_value_) - tf.add_input(desc, indices_) - tf.add_input(desc, values_) - tf.add_input(desc, dense_shape_) - tf.add_input(desc, default_value_) - desc["T"] = tf.data_type(values_) - desc["T"] = tf.data_type(default_value_) - res = tf.execute(desc) - node = tf.TapeNode(sparse_fill_empty_rows, [indices_, values_, dense_shape_, default_value_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_fill_empty_rows(indices_, values_, dense_shape_, default_value_; name=nothing) - if tf.in_eager_mode() - sparse_fill_empty_rows_eager(indices_, values_, dense_shape_, default_value_; name=name) - else - sparse_fill_empty_rows_graph(indices_, values_, dense_shape_, default_value_; name=name) + begin + function sparse_fill_empty_rows_graph(indices_, values_, dense_shape_, default_value_; name=nothing) + local desc + tf.with_op_name(name, "SparseFillEmptyRows") do + desc = tf.NodeDescription("SparseFillEmptyRows") + begin + begin + indices_ = convert(Tensor{Int64}, indices_) + begin + end + end + begin + values_ = convert(Tensor{Any}, values_) + begin + end + end + begin + dense_shape_ = convert(Tensor{Int64}, dense_shape_) + begin + end + end + begin + default_value_ = convert(Tensor{Any}, default_value_) + begin + end + end + begin + (values_, default_value_) = tf.tf_promote(values_, default_value_) + end + end + begin + begin + tf.add_input(desc, indices_) + end + begin + tf.add_input(desc, values_) + end + begin + tf.add_input(desc, dense_shape_) + end + begin + tf.add_input(desc, default_value_) + end + end + begin + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:4 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + end + end + begin + function sparse_fill_empty_rows_eager(indices_, values_, dense_shape_, default_value_; name=nothing) + desc = tf.EagerOp("SparseFillEmptyRows") + indices_ = convert(tf.EagerTensor, indices_) + values_ = convert(tf.EagerTensor, values_) + dense_shape_ = convert(tf.EagerTensor, dense_shape_) + default_value_ = convert(tf.EagerTensor, default_value_) + begin + begin + tf.add_input(desc, indices_) + end + begin + tf.add_input(desc, values_) + end + begin + tf.add_input(desc, dense_shape_) + end + begin + tf.add_input(desc, default_value_) + end + end + begin + end + begin + desc["T"] = tf.data_type(values_) + end + begin + desc["T"] = tf.data_type(default_value_) + end + res = tf.execute(desc) + node = tf.TapeNode(sparse_fill_empty_rows, [indices_, values_, dense_shape_, default_value_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_fill_empty_rows(indices_, values_, dense_shape_, default_value_; name=nothing) + if tf.in_eager_mode() + sparse_fill_empty_rows_eager(indices_, values_, dense_shape_, default_value_; name=name) + else + sparse_fill_empty_rows_graph(indices_, values_, dense_shape_, default_value_; name=name) + end end - end + end end @@ -52084,46 +95141,80 @@ end """ begin - function self_adjoint_eig_v2_graph(input_; name=nothing, compute_v=nothing) - local desc - tf.with_op_name(name, "SelfAdjointEigV2") do - desc = tf.NodeDescription("SelfAdjointEigV2") - input_ = convert(Tensor{Any}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - if compute_v !== nothing - desc["compute_v"] = Base.Bool(compute_v) + begin + function self_adjoint_eig_v2_graph(input_; name=nothing, compute_v=nothing) + local desc + tf.with_op_name(name, "SelfAdjointEigV2") do + desc = tf.NodeDescription("SelfAdjointEigV2") + begin + begin + input_ = convert(Tensor{Any}, input_) + begin + end + end + begin + (input_,) = tf.tf_promote(input_) + end + end + begin + begin + tf.add_input(desc, input_) + end + end + begin + begin + if compute_v !== nothing + desc["compute_v"] = Base.Bool(compute_v) + end + end + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out end end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:2 - push!(out, tf.Tensor(op, out_idx)) - end - out end - function self_adjoint_eig_v2_eager(input_; name=nothing, compute_v=nothing) - desc = tf.EagerOp("SelfAdjointEigV2") - input_ = convert(tf.EagerTensor, input_) - tf.add_input(desc, input_) - if compute_v !== nothing - desc["compute_v"] = Base.Bool(compute_v) - end - desc["T"] = tf.data_type(input_) - res = tf.execute(desc) - node = tf.TapeNode(self_adjoint_eig_v2, [input_], name=nothing, compute_v=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res + begin + function self_adjoint_eig_v2_eager(input_; name=nothing, compute_v=nothing) + desc = tf.EagerOp("SelfAdjointEigV2") + input_ = convert(tf.EagerTensor, input_) + begin + begin + tf.add_input(desc, input_) + end + end + begin + begin + if compute_v !== nothing + desc["compute_v"] = Base.Bool(compute_v) + end + end + end + begin + desc["T"] = tf.data_type(input_) + end + res = tf.execute(desc) + node = tf.TapeNode(self_adjoint_eig_v2, [input_], name=nothing, compute_v=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function self_adjoint_eig_v2(input_; name=nothing, compute_v=nothing) - if tf.in_eager_mode() - self_adjoint_eig_v2_eager(input_; name=name, compute_v=compute_v) - else - self_adjoint_eig_v2_graph(input_; name=name, compute_v=compute_v) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function self_adjoint_eig_v2(input_; name=nothing, compute_v=nothing) + if tf.in_eager_mode() + self_adjoint_eig_v2_eager(input_; name=name, compute_v=compute_v) + else + self_adjoint_eig_v2_graph(input_; name=name, compute_v=compute_v) + end end - end + end end @@ -52133,58 +95224,92 @@ end Retrieve embedding parameters for a single table. """ begin - function retrieve_tpu_embedding_ftrl_parameters_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - local desc - tf.with_op_name(name, "RetrieveTPUEmbeddingFTRLParameters") do - desc = tf.NodeDescription("RetrieveTPUEmbeddingFTRLParameters") - if table_id !== nothing - desc["table_id"] = Base.Int(table_id) - end - if table_name !== nothing - desc["table_name"] = Base.String(table_name) - end - if num_shards !== nothing - desc["num_shards"] = Base.Int(num_shards) - end - if shard_id !== nothing - desc["shard_id"] = Base.Int(shard_id) + begin + function retrieve_tpu_embedding_ftrl_parameters_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + local desc + tf.with_op_name(name, "RetrieveTPUEmbeddingFTRLParameters") do + desc = tf.NodeDescription("RetrieveTPUEmbeddingFTRLParameters") + begin + end + begin + end + begin + begin + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + end + begin + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + end + begin + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + end + begin + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + end + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + end + end + begin + function retrieve_tpu_embedding_ftrl_parameters_eager(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + desc = tf.EagerOp("RetrieveTPUEmbeddingFTRLParameters") + begin + end + begin + begin + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + end + begin + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + end + begin + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + end + begin + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(retrieve_tpu_embedding_ftrl_parameters, [], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function retrieve_tpu_embedding_ftrl_parameters(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + if tf.in_eager_mode() + retrieve_tpu_embedding_ftrl_parameters_eager(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + else + retrieve_tpu_embedding_ftrl_parameters_graph(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + end end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:3 - push!(out, tf.Tensor(op, out_idx)) - end - out - end - function retrieve_tpu_embedding_ftrl_parameters_eager(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - desc = tf.EagerOp("RetrieveTPUEmbeddingFTRLParameters") - if table_id !== nothing - desc["table_id"] = Base.Int(table_id) - end - if table_name !== nothing - desc["table_name"] = Base.String(table_name) - end - if num_shards !== nothing - desc["num_shards"] = Base.Int(num_shards) - end - if shard_id !== nothing - desc["shard_id"] = Base.Int(shard_id) - end - res = tf.execute(desc) - node = tf.TapeNode(retrieve_tpu_embedding_ftrl_parameters, [], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function retrieve_tpu_embedding_ftrl_parameters(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - if tf.in_eager_mode() - retrieve_tpu_embedding_ftrl_parameters_eager(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) - else - retrieve_tpu_embedding_ftrl_parameters_graph(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) - end - end end @@ -52194,79 +95319,185 @@ end """ begin - function resource_sparse_apply_adagrad_da_graph(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, indices_, lr_, l1_, l2_, global_step_; name=nothing, use_locking=nothing) - local desc - tf.with_op_name(name, "ResourceSparseApplyAdagradDA") do - desc = tf.NodeDescription("ResourceSparseApplyAdagradDA") - var_ = convert(Tensor{Any}, var_) - gradient_accumulator_ = convert(Tensor{Any}, gradient_accumulator_) - gradient_squared_accumulator_ = convert(Tensor{Any}, gradient_squared_accumulator_) - grad_ = convert(Tensor{Any}, grad_) - indices_ = convert(Tensor{Any}, indices_) - indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) - lr_ = convert(Tensor{Any}, lr_) - l1_ = convert(Tensor{Any}, l1_) - l2_ = convert(Tensor{Any}, l2_) - global_step_ = convert(Tensor{Int64}, global_step_) - (grad_, lr_, l1_, l2_) = tf.tf_promote(grad_, lr_, l1_, l2_) - (indices_,) = tf.tf_promote(indices_) - tf.add_input(desc, var_) - tf.add_input(desc, gradient_accumulator_) - tf.add_input(desc, gradient_squared_accumulator_) - tf.add_input(desc, grad_) - tf.add_input(desc, indices_) - tf.add_input(desc, lr_) - tf.add_input(desc, l1_) - tf.add_input(desc, l2_) - tf.add_input(desc, global_step_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - end - tf.Tensor(tf.Operation(desc)) - end - function resource_sparse_apply_adagrad_da_eager(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, indices_, lr_, l1_, l2_, global_step_; name=nothing, use_locking=nothing) - desc = tf.EagerOp("ResourceSparseApplyAdagradDA") - var_ = convert(tf.EagerTensor, var_) - gradient_accumulator_ = convert(tf.EagerTensor, gradient_accumulator_) - gradient_squared_accumulator_ = convert(tf.EagerTensor, gradient_squared_accumulator_) - grad_ = convert(tf.EagerTensor, grad_) - indices_ = convert(tf.EagerTensor, indices_) - lr_ = convert(tf.EagerTensor, lr_) - l1_ = convert(tf.EagerTensor, l1_) - l2_ = convert(tf.EagerTensor, l2_) - global_step_ = convert(tf.EagerTensor, global_step_) - tf.add_input(desc, var_) - tf.add_input(desc, gradient_accumulator_) - tf.add_input(desc, gradient_squared_accumulator_) - tf.add_input(desc, grad_) - tf.add_input(desc, indices_) - tf.add_input(desc, lr_) - tf.add_input(desc, l1_) - tf.add_input(desc, l2_) - tf.add_input(desc, global_step_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - desc["T"] = tf.data_type(grad_) - desc["Tindices"] = tf.data_type(indices_) - desc["T"] = tf.data_type(lr_) - desc["T"] = tf.data_type(l1_) - desc["T"] = tf.data_type(l2_) - res = tf.execute(desc) - node = tf.TapeNode(resource_sparse_apply_adagrad_da, [var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, indices_, lr_, l1_, l2_, global_step_], name=nothing, use_locking=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_sparse_apply_adagrad_da(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, indices_, lr_, l1_, l2_, global_step_; name=nothing, use_locking=nothing) - if tf.in_eager_mode() - resource_sparse_apply_adagrad_da_eager(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, indices_, lr_, l1_, l2_, global_step_; name=name, use_locking=use_locking) - else - resource_sparse_apply_adagrad_da_graph(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, indices_, lr_, l1_, l2_, global_step_; name=name, use_locking=use_locking) + begin + function resource_sparse_apply_adagrad_da_graph(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, indices_, lr_, l1_, l2_, global_step_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "ResourceSparseApplyAdagradDA") do + desc = tf.NodeDescription("ResourceSparseApplyAdagradDA") + begin + begin + var_ = convert(Tensor{Any}, var_) + begin + end + end + begin + gradient_accumulator_ = convert(Tensor{Any}, gradient_accumulator_) + begin + end + end + begin + gradient_squared_accumulator_ = convert(Tensor{Any}, gradient_squared_accumulator_) + begin + end + end + begin + grad_ = convert(Tensor{Any}, grad_) + begin + end + end + begin + indices_ = convert(Tensor{Any}, indices_) + begin + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + end + end + begin + lr_ = convert(Tensor{Any}, lr_) + begin + end + end + begin + l1_ = convert(Tensor{Any}, l1_) + begin + end + end + begin + l2_ = convert(Tensor{Any}, l2_) + begin + end + end + begin + global_step_ = convert(Tensor{Int64}, global_step_) + begin + end + end + begin + (grad_, lr_, l1_, l2_) = tf.tf_promote(grad_, lr_, l1_, l2_) + end + begin + (indices_,) = tf.tf_promote(indices_) + end + end + begin + begin + tf.add_input(desc, var_) + end + begin + tf.add_input(desc, gradient_accumulator_) + end + begin + tf.add_input(desc, gradient_squared_accumulator_) + end + begin + tf.add_input(desc, grad_) + end + begin + tf.add_input(desc, indices_) + end + begin + tf.add_input(desc, lr_) + end + begin + tf.add_input(desc, l1_) + end + begin + tf.add_input(desc, l2_) + end + begin + tf.add_input(desc, global_step_) + end + end + begin + begin + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function resource_sparse_apply_adagrad_da_eager(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, indices_, lr_, l1_, l2_, global_step_; name=nothing, use_locking=nothing) + desc = tf.EagerOp("ResourceSparseApplyAdagradDA") + var_ = convert(tf.EagerTensor, var_) + gradient_accumulator_ = convert(tf.EagerTensor, gradient_accumulator_) + gradient_squared_accumulator_ = convert(tf.EagerTensor, gradient_squared_accumulator_) + grad_ = convert(tf.EagerTensor, grad_) + indices_ = convert(tf.EagerTensor, indices_) + lr_ = convert(tf.EagerTensor, lr_) + l1_ = convert(tf.EagerTensor, l1_) + l2_ = convert(tf.EagerTensor, l2_) + global_step_ = convert(tf.EagerTensor, global_step_) + begin + begin + tf.add_input(desc, var_) + end + begin + tf.add_input(desc, gradient_accumulator_) + end + begin + tf.add_input(desc, gradient_squared_accumulator_) + end + begin + tf.add_input(desc, grad_) + end + begin + tf.add_input(desc, indices_) + end + begin + tf.add_input(desc, lr_) + end + begin + tf.add_input(desc, l1_) + end + begin + tf.add_input(desc, l2_) + end + begin + tf.add_input(desc, global_step_) + end + end + begin + begin + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + end + begin + desc["T"] = tf.data_type(grad_) + end + begin + desc["Tindices"] = tf.data_type(indices_) + end + begin + desc["T"] = tf.data_type(lr_) + end + begin + desc["T"] = tf.data_type(l1_) + end + begin + desc["T"] = tf.data_type(l2_) + end + res = tf.execute(desc) + node = tf.TapeNode(resource_sparse_apply_adagrad_da, [var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, indices_, lr_, l1_, l2_, global_step_], name=nothing, use_locking=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_sparse_apply_adagrad_da(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, indices_, lr_, l1_, l2_, global_step_; name=nothing, use_locking=nothing) + if tf.in_eager_mode() + resource_sparse_apply_adagrad_da_eager(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, indices_, lr_, l1_, l2_, global_step_; name=name, use_locking=use_locking) + else + resource_sparse_apply_adagrad_da_graph(var_, gradient_accumulator_, gradient_squared_accumulator_, grad_, indices_, lr_, l1_, l2_, global_step_; name=name, use_locking=use_locking) + end end - end + end end @@ -52276,47 +95507,75 @@ end """ begin - function temporary_variable_graph(; name=nothing, shape=nothing, dtype=nothing, var_name=nothing) - local desc - tf.with_op_name(name, "TemporaryVariable") do - desc = tf.NodeDescription("TemporaryVariable") - if shape !== nothing - desc["shape"] = Base.identity(shape) + begin + function temporary_variable_graph(; name=nothing, shape=nothing, dtype=nothing, var_name=nothing) + local desc + tf.with_op_name(name, "TemporaryVariable") do + desc = tf.NodeDescription("TemporaryVariable") + begin + end + begin + end + begin + begin + if shape !== nothing + desc["shape"] = Base.identity(shape) + end + end + begin + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + begin + if var_name !== nothing + desc["var_name"] = Base.String(var_name) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function temporary_variable_eager(; name=nothing, shape=nothing, dtype=nothing, var_name=nothing) + desc = tf.EagerOp("TemporaryVariable") + begin + end + begin + begin + if shape !== nothing + desc["shape"] = Base.identity(shape) + end + end + begin + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + begin + if var_name !== nothing + desc["var_name"] = Base.String(var_name) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(temporary_variable, [], name=nothing, shape=nothing, dtype=nothing, var_name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function temporary_variable(; name=nothing, shape=nothing, dtype=nothing, var_name=nothing) + if tf.in_eager_mode() + temporary_variable_eager(; name=name, shape=shape, dtype=dtype, var_name=var_name) + else + temporary_variable_graph(; name=name, shape=shape, dtype=dtype, var_name=var_name) + end end - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end - if var_name !== nothing - desc["var_name"] = Base.String(var_name) - end - end - tf.Tensor(tf.Operation(desc)) - end - function temporary_variable_eager(; name=nothing, shape=nothing, dtype=nothing, var_name=nothing) - desc = tf.EagerOp("TemporaryVariable") - if shape !== nothing - desc["shape"] = Base.identity(shape) - end - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end - if var_name !== nothing - desc["var_name"] = Base.String(var_name) - end - res = tf.execute(desc) - node = tf.TapeNode(temporary_variable, [], name=nothing, shape=nothing, dtype=nothing, var_name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function temporary_variable(; name=nothing, shape=nothing, dtype=nothing, var_name=nothing) - if tf.in_eager_mode() - temporary_variable_eager(; name=name, shape=shape, dtype=dtype, var_name=var_name) - else - temporary_variable_graph(; name=name, shape=shape, dtype=dtype, var_name=var_name) - end - end end @@ -52326,69 +95585,157 @@ end """ begin - function resource_apply_add_sign_graph(var_, m_, lr_, alpha_, sign_decay_, beta_, grad_; name=nothing, use_locking=nothing) - local desc - tf.with_op_name(name, "ResourceApplyAddSign") do - desc = tf.NodeDescription("ResourceApplyAddSign") - var_ = convert(Tensor{Any}, var_) - m_ = convert(Tensor{Any}, m_) - lr_ = convert(Tensor{Any}, lr_) - alpha_ = convert(Tensor{Any}, alpha_) - sign_decay_ = convert(Tensor{Any}, sign_decay_) - beta_ = convert(Tensor{Any}, beta_) - grad_ = convert(Tensor{Any}, grad_) - (lr_, alpha_, sign_decay_, beta_, grad_) = tf.tf_promote(lr_, alpha_, sign_decay_, beta_, grad_) - tf.add_input(desc, var_) - tf.add_input(desc, m_) - tf.add_input(desc, lr_) - tf.add_input(desc, alpha_) - tf.add_input(desc, sign_decay_) - tf.add_input(desc, beta_) - tf.add_input(desc, grad_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - end - tf.Tensor(tf.Operation(desc)) - end - function resource_apply_add_sign_eager(var_, m_, lr_, alpha_, sign_decay_, beta_, grad_; name=nothing, use_locking=nothing) - desc = tf.EagerOp("ResourceApplyAddSign") - var_ = convert(tf.EagerTensor, var_) - m_ = convert(tf.EagerTensor, m_) - lr_ = convert(tf.EagerTensor, lr_) - alpha_ = convert(tf.EagerTensor, alpha_) - sign_decay_ = convert(tf.EagerTensor, sign_decay_) - beta_ = convert(tf.EagerTensor, beta_) - grad_ = convert(tf.EagerTensor, grad_) - tf.add_input(desc, var_) - tf.add_input(desc, m_) - tf.add_input(desc, lr_) - tf.add_input(desc, alpha_) - tf.add_input(desc, sign_decay_) - tf.add_input(desc, beta_) - tf.add_input(desc, grad_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - desc["T"] = tf.data_type(lr_) - desc["T"] = tf.data_type(alpha_) - desc["T"] = tf.data_type(sign_decay_) - desc["T"] = tf.data_type(beta_) - desc["T"] = tf.data_type(grad_) - res = tf.execute(desc) - node = tf.TapeNode(resource_apply_add_sign, [var_, m_, lr_, alpha_, sign_decay_, beta_, grad_], name=nothing, use_locking=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_apply_add_sign(var_, m_, lr_, alpha_, sign_decay_, beta_, grad_; name=nothing, use_locking=nothing) - if tf.in_eager_mode() - resource_apply_add_sign_eager(var_, m_, lr_, alpha_, sign_decay_, beta_, grad_; name=name, use_locking=use_locking) - else - resource_apply_add_sign_graph(var_, m_, lr_, alpha_, sign_decay_, beta_, grad_; name=name, use_locking=use_locking) + begin + function resource_apply_add_sign_graph(var_, m_, lr_, alpha_, sign_decay_, beta_, grad_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "ResourceApplyAddSign") do + desc = tf.NodeDescription("ResourceApplyAddSign") + begin + begin + var_ = convert(Tensor{Any}, var_) + begin + end + end + begin + m_ = convert(Tensor{Any}, m_) + begin + end + end + begin + lr_ = convert(Tensor{Any}, lr_) + begin + end + end + begin + alpha_ = convert(Tensor{Any}, alpha_) + begin + end + end + begin + sign_decay_ = convert(Tensor{Any}, sign_decay_) + begin + end + end + begin + beta_ = convert(Tensor{Any}, beta_) + begin + end + end + begin + grad_ = convert(Tensor{Any}, grad_) + begin + end + end + begin + (lr_, alpha_, sign_decay_, beta_, grad_) = tf.tf_promote(lr_, alpha_, sign_decay_, beta_, grad_) + end + end + begin + begin + tf.add_input(desc, var_) + end + begin + tf.add_input(desc, m_) + end + begin + tf.add_input(desc, lr_) + end + begin + tf.add_input(desc, alpha_) + end + begin + tf.add_input(desc, sign_decay_) + end + begin + tf.add_input(desc, beta_) + end + begin + tf.add_input(desc, grad_) + end + end + begin + begin + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function resource_apply_add_sign_eager(var_, m_, lr_, alpha_, sign_decay_, beta_, grad_; name=nothing, use_locking=nothing) + desc = tf.EagerOp("ResourceApplyAddSign") + var_ = convert(tf.EagerTensor, var_) + m_ = convert(tf.EagerTensor, m_) + lr_ = convert(tf.EagerTensor, lr_) + alpha_ = convert(tf.EagerTensor, alpha_) + sign_decay_ = convert(tf.EagerTensor, sign_decay_) + beta_ = convert(tf.EagerTensor, beta_) + grad_ = convert(tf.EagerTensor, grad_) + begin + begin + tf.add_input(desc, var_) + end + begin + tf.add_input(desc, m_) + end + begin + tf.add_input(desc, lr_) + end + begin + tf.add_input(desc, alpha_) + end + begin + tf.add_input(desc, sign_decay_) + end + begin + tf.add_input(desc, beta_) + end + begin + tf.add_input(desc, grad_) + end + end + begin + begin + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + end + begin + desc["T"] = tf.data_type(lr_) + end + begin + desc["T"] = tf.data_type(alpha_) + end + begin + desc["T"] = tf.data_type(sign_decay_) + end + begin + desc["T"] = tf.data_type(beta_) + end + begin + desc["T"] = tf.data_type(grad_) + end + res = tf.execute(desc) + node = tf.TapeNode(resource_apply_add_sign, [var_, m_, lr_, alpha_, sign_decay_, beta_, grad_], name=nothing, use_locking=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_apply_add_sign(var_, m_, lr_, alpha_, sign_decay_, beta_, grad_; name=nothing, use_locking=nothing) + if tf.in_eager_mode() + resource_apply_add_sign_eager(var_, m_, lr_, alpha_, sign_decay_, beta_, grad_; name=name, use_locking=use_locking) + else + resource_apply_add_sign_graph(var_, m_, lr_, alpha_, sign_decay_, beta_, grad_; name=name, use_locking=use_locking) + end end - end + end end @@ -52398,47 +95745,99 @@ end """ begin - function roll_graph(input_, shift_, axis_; name=nothing) - local desc - tf.with_op_name(name, "Roll") do - desc = tf.NodeDescription("Roll") - input_ = convert(Tensor{Any}, input_) - shift_ = convert(Tensor{Any}, shift_) - axis_ = convert(Tensor{Any}, axis_) - (input_,) = tf.tf_promote(input_) - (shift_,) = tf.tf_promote(shift_) - (axis_,) = tf.tf_promote(axis_) - tf.add_input(desc, input_) - tf.add_input(desc, shift_) - tf.add_input(desc, axis_) - end - tf.Tensor(tf.Operation(desc)) - end - function roll_eager(input_, shift_, axis_; name=nothing) - desc = tf.EagerOp("Roll") - input_ = convert(tf.EagerTensor, input_) - shift_ = convert(tf.EagerTensor, shift_) - axis_ = convert(tf.EagerTensor, axis_) - tf.add_input(desc, input_) - tf.add_input(desc, shift_) - tf.add_input(desc, axis_) - desc["T"] = tf.data_type(input_) - desc["Tshift"] = tf.data_type(shift_) - desc["Taxis"] = tf.data_type(axis_) - res = tf.execute(desc) - node = tf.TapeNode(roll, [input_, shift_, axis_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function roll(input_, shift_, axis_; name=nothing) - if tf.in_eager_mode() - roll_eager(input_, shift_, axis_; name=name) - else - roll_graph(input_, shift_, axis_; name=name) + begin + function roll_graph(input_, shift_, axis_; name=nothing) + local desc + tf.with_op_name(name, "Roll") do + desc = tf.NodeDescription("Roll") + begin + begin + input_ = convert(Tensor{Any}, input_) + begin + end + end + begin + shift_ = convert(Tensor{Any}, shift_) + begin + end + end + begin + axis_ = convert(Tensor{Any}, axis_) + begin + end + end + begin + (input_,) = tf.tf_promote(input_) + end + begin + (shift_,) = tf.tf_promote(shift_) + end + begin + (axis_,) = tf.tf_promote(axis_) + end + end + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, shift_) + end + begin + tf.add_input(desc, axis_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function roll_eager(input_, shift_, axis_; name=nothing) + desc = tf.EagerOp("Roll") + input_ = convert(tf.EagerTensor, input_) + shift_ = convert(tf.EagerTensor, shift_) + axis_ = convert(tf.EagerTensor, axis_) + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, shift_) + end + begin + tf.add_input(desc, axis_) + end + end + begin + end + begin + desc["T"] = tf.data_type(input_) + end + begin + desc["Tshift"] = tf.data_type(shift_) + end + begin + desc["Taxis"] = tf.data_type(axis_) + end + res = tf.execute(desc) + node = tf.TapeNode(roll, [input_, shift_, axis_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function roll(input_, shift_, axis_; name=nothing) + if tf.in_eager_mode() + roll_eager(input_, shift_, axis_; name=name) + else + roll_graph(input_, shift_, axis_; name=name) + end end - end + end end @@ -52448,40 +95847,78 @@ end """ begin - function xdivy_graph(x_, y_; name=nothing) - local desc - tf.with_op_name(name, "Xdivy") do - desc = tf.NodeDescription("Xdivy") - x_ = convert(Tensor{Any}, x_) - y_ = convert(Tensor{Any}, y_) - (x_, y_) = tf.tf_promote(x_, y_) - tf.add_input(desc, x_) - tf.add_input(desc, y_) + begin + function xdivy_graph(x_, y_; name=nothing) + local desc + tf.with_op_name(name, "Xdivy") do + desc = tf.NodeDescription("Xdivy") + begin + begin + x_ = convert(Tensor{Any}, x_) + begin + end + end + begin + y_ = convert(Tensor{Any}, y_) + begin + end + end + begin + (x_, y_) = tf.tf_promote(x_, y_) + end + end + begin + begin + tf.add_input(desc, x_) + end + begin + tf.add_input(desc, y_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function xdivy_eager(x_, y_; name=nothing) - desc = tf.EagerOp("Xdivy") - x_ = convert(tf.EagerTensor, x_) - y_ = convert(tf.EagerTensor, y_) - tf.add_input(desc, x_) - tf.add_input(desc, y_) - desc["T"] = tf.data_type(x_) - desc["T"] = tf.data_type(y_) - res = tf.execute(desc) - node = tf.TapeNode(xdivy, [x_, y_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function xdivy_eager(x_, y_; name=nothing) + desc = tf.EagerOp("Xdivy") + x_ = convert(tf.EagerTensor, x_) + y_ = convert(tf.EagerTensor, y_) + begin + begin + tf.add_input(desc, x_) + end + begin + tf.add_input(desc, y_) + end + end + begin + end + begin + desc["T"] = tf.data_type(x_) + end + begin + desc["T"] = tf.data_type(y_) + end + res = tf.execute(desc) + node = tf.TapeNode(xdivy, [x_, y_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function xdivy(x_, y_; name=nothing) - if tf.in_eager_mode() - xdivy_eager(x_, y_; name=name) - else - xdivy_graph(x_, y_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function xdivy(x_, y_; name=nothing) + if tf.in_eager_mode() + xdivy_eager(x_, y_; name=name) + else + xdivy_graph(x_, y_; name=name) + end end - end + end end @@ -52491,69 +95928,133 @@ end """ begin - function max_pool3d_grad_grad_graph(orig_input_, orig_output_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) - local desc - tf.with_op_name(name, "MaxPool3DGradGrad") do - desc = tf.NodeDescription("MaxPool3DGradGrad") - orig_input_ = convert(Tensor{Any}, orig_input_) - orig_output_ = convert(Tensor{Any}, orig_output_) - grad_ = convert(Tensor{Any}, grad_) - (orig_input_, orig_output_, grad_) = tf.tf_promote(orig_input_, orig_output_, grad_) - tf.add_input(desc, orig_input_) - tf.add_input(desc, orig_output_) - tf.add_input(desc, grad_) - if ksize !== nothing - desc["ksize"] = map(Base.identity, ksize) - end - if strides !== nothing - desc["strides"] = map(Base.identity, strides) - end - if padding !== nothing - desc["padding"] = Base.String(padding) + begin + function max_pool3d_grad_grad_graph(orig_input_, orig_output_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + local desc + tf.with_op_name(name, "MaxPool3DGradGrad") do + desc = tf.NodeDescription("MaxPool3DGradGrad") + begin + begin + orig_input_ = convert(Tensor{Any}, orig_input_) + begin + end + end + begin + orig_output_ = convert(Tensor{Any}, orig_output_) + begin + end + end + begin + grad_ = convert(Tensor{Any}, grad_) + begin + end + end + begin + (orig_input_, orig_output_, grad_) = tf.tf_promote(orig_input_, orig_output_, grad_) + end + end + begin + begin + tf.add_input(desc, orig_input_) + end + begin + tf.add_input(desc, orig_output_) + end + begin + tf.add_input(desc, grad_) + end + end + begin + begin + if ksize !== nothing + desc["ksize"] = map(Base.identity, ksize) + end + end + begin + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + end + begin + if padding !== nothing + desc["padding"] = Base.String(padding) + end + end + begin + if data_format !== nothing + desc["data_format"] = Base.String(data_format) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function max_pool3d_grad_grad_eager(orig_input_, orig_output_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + desc = tf.EagerOp("MaxPool3DGradGrad") + orig_input_ = convert(tf.EagerTensor, orig_input_) + orig_output_ = convert(tf.EagerTensor, orig_output_) + grad_ = convert(tf.EagerTensor, grad_) + begin + begin + tf.add_input(desc, orig_input_) + end + begin + tf.add_input(desc, orig_output_) + end + begin + tf.add_input(desc, grad_) + end + end + begin + begin + if ksize !== nothing + desc["ksize"] = map(Base.identity, ksize) + end + end + begin + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + end + begin + if padding !== nothing + desc["padding"] = Base.String(padding) + end + end + begin + if data_format !== nothing + desc["data_format"] = Base.String(data_format) + end + end + end + begin + desc["T"] = tf.data_type(orig_input_) + end + begin + desc["T"] = tf.data_type(orig_output_) + end + begin + desc["T"] = tf.data_type(grad_) + end + res = tf.execute(desc) + node = tf.TapeNode(max_pool3d_grad_grad, [orig_input_, orig_output_, grad_], name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function max_pool3d_grad_grad(orig_input_, orig_output_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) + if tf.in_eager_mode() + max_pool3d_grad_grad_eager(orig_input_, orig_output_, grad_; name=name, ksize=ksize, strides=strides, padding=padding, data_format=data_format) + else + max_pool3d_grad_grad_graph(orig_input_, orig_output_, grad_; name=name, ksize=ksize, strides=strides, padding=padding, data_format=data_format) + end end - if data_format !== nothing - desc["data_format"] = Base.String(data_format) - end - end - tf.Tensor(tf.Operation(desc)) - end - function max_pool3d_grad_grad_eager(orig_input_, orig_output_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) - desc = tf.EagerOp("MaxPool3DGradGrad") - orig_input_ = convert(tf.EagerTensor, orig_input_) - orig_output_ = convert(tf.EagerTensor, orig_output_) - grad_ = convert(tf.EagerTensor, grad_) - tf.add_input(desc, orig_input_) - tf.add_input(desc, orig_output_) - tf.add_input(desc, grad_) - if ksize !== nothing - desc["ksize"] = map(Base.identity, ksize) - end - if strides !== nothing - desc["strides"] = map(Base.identity, strides) - end - if padding !== nothing - desc["padding"] = Base.String(padding) - end - if data_format !== nothing - desc["data_format"] = Base.String(data_format) - end - desc["T"] = tf.data_type(orig_input_) - desc["T"] = tf.data_type(orig_output_) - desc["T"] = tf.data_type(grad_) - res = tf.execute(desc) - node = tf.TapeNode(max_pool3d_grad_grad, [orig_input_, orig_output_, grad_], name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function max_pool3d_grad_grad(orig_input_, orig_output_, grad_; name=nothing, ksize=nothing, strides=nothing, padding=nothing, data_format=nothing) - if tf.in_eager_mode() - max_pool3d_grad_grad_eager(orig_input_, orig_output_, grad_; name=name, ksize=ksize, strides=strides, padding=padding, data_format=data_format) - else - max_pool3d_grad_grad_graph(orig_input_, orig_output_, grad_; name=name, ksize=ksize, strides=strides, padding=padding, data_format=data_format) - end - end end @@ -52563,68 +96064,146 @@ end """ begin - function quantized_bias_add_graph(input_, bias_, min_input_, max_input_, min_bias_, max_bias_; name=nothing, out_type=nothing) - local desc - tf.with_op_name(name, "QuantizedBiasAdd") do - desc = tf.NodeDescription("QuantizedBiasAdd") - input_ = convert(Tensor{Any}, input_) - bias_ = convert(Tensor{Any}, bias_) - min_input_ = convert(Tensor{Float32}, min_input_) - max_input_ = convert(Tensor{Float32}, max_input_) - min_bias_ = convert(Tensor{Float32}, min_bias_) - max_bias_ = convert(Tensor{Float32}, max_bias_) - (input_,) = tf.tf_promote(input_) - (bias_,) = tf.tf_promote(bias_) - tf.add_input(desc, input_) - tf.add_input(desc, bias_) - tf.add_input(desc, min_input_) - tf.add_input(desc, max_input_) - tf.add_input(desc, min_bias_) - tf.add_input(desc, max_bias_) - if out_type !== nothing - desc["out_type"] = Base.identity(out_type) + begin + function quantized_bias_add_graph(input_, bias_, min_input_, max_input_, min_bias_, max_bias_; name=nothing, out_type=nothing) + local desc + tf.with_op_name(name, "QuantizedBiasAdd") do + desc = tf.NodeDescription("QuantizedBiasAdd") + begin + begin + input_ = convert(Tensor{Any}, input_) + begin + end + end + begin + bias_ = convert(Tensor{Any}, bias_) + begin + end + end + begin + min_input_ = convert(Tensor{Float32}, min_input_) + begin + end + end + begin + max_input_ = convert(Tensor{Float32}, max_input_) + begin + end + end + begin + min_bias_ = convert(Tensor{Float32}, min_bias_) + begin + end + end + begin + max_bias_ = convert(Tensor{Float32}, max_bias_) + begin + end + end + begin + (input_,) = tf.tf_promote(input_) + end + begin + (bias_,) = tf.tf_promote(bias_) + end + end + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, bias_) + end + begin + tf.add_input(desc, min_input_) + end + begin + tf.add_input(desc, max_input_) + end + begin + tf.add_input(desc, min_bias_) + end + begin + tf.add_input(desc, max_bias_) + end + end + begin + begin + if out_type !== nothing + desc["out_type"] = Base.identity(out_type) + end + end + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + end + end + begin + function quantized_bias_add_eager(input_, bias_, min_input_, max_input_, min_bias_, max_bias_; name=nothing, out_type=nothing) + desc = tf.EagerOp("QuantizedBiasAdd") + input_ = convert(tf.EagerTensor, input_) + bias_ = convert(tf.EagerTensor, bias_) + min_input_ = convert(tf.EagerTensor, min_input_) + max_input_ = convert(tf.EagerTensor, max_input_) + min_bias_ = convert(tf.EagerTensor, min_bias_) + max_bias_ = convert(tf.EagerTensor, max_bias_) + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, bias_) + end + begin + tf.add_input(desc, min_input_) + end + begin + tf.add_input(desc, max_input_) + end + begin + tf.add_input(desc, min_bias_) + end + begin + tf.add_input(desc, max_bias_) + end + end + begin + begin + if out_type !== nothing + desc["out_type"] = Base.identity(out_type) + end + end + end + begin + desc["T1"] = tf.data_type(input_) + end + begin + desc["T2"] = tf.data_type(bias_) + end + res = tf.execute(desc) + node = tf.TapeNode(quantized_bias_add, [input_, bias_, min_input_, max_input_, min_bias_, max_bias_], name=nothing, out_type=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function quantized_bias_add(input_, bias_, min_input_, max_input_, min_bias_, max_bias_; name=nothing, out_type=nothing) + if tf.in_eager_mode() + quantized_bias_add_eager(input_, bias_, min_input_, max_input_, min_bias_, max_bias_; name=name, out_type=out_type) + else + quantized_bias_add_graph(input_, bias_, min_input_, max_input_, min_bias_, max_bias_; name=name, out_type=out_type) + end end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:3 - push!(out, tf.Tensor(op, out_idx)) - end - out end - function quantized_bias_add_eager(input_, bias_, min_input_, max_input_, min_bias_, max_bias_; name=nothing, out_type=nothing) - desc = tf.EagerOp("QuantizedBiasAdd") - input_ = convert(tf.EagerTensor, input_) - bias_ = convert(tf.EagerTensor, bias_) - min_input_ = convert(tf.EagerTensor, min_input_) - max_input_ = convert(tf.EagerTensor, max_input_) - min_bias_ = convert(tf.EagerTensor, min_bias_) - max_bias_ = convert(tf.EagerTensor, max_bias_) - tf.add_input(desc, input_) - tf.add_input(desc, bias_) - tf.add_input(desc, min_input_) - tf.add_input(desc, max_input_) - tf.add_input(desc, min_bias_) - tf.add_input(desc, max_bias_) - if out_type !== nothing - desc["out_type"] = Base.identity(out_type) - end - desc["T1"] = tf.data_type(input_) - desc["T2"] = tf.data_type(bias_) - res = tf.execute(desc) - node = tf.TapeNode(quantized_bias_add, [input_, bias_, min_input_, max_input_, min_bias_, max_bias_], name=nothing, out_type=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function quantized_bias_add(input_, bias_, min_input_, max_input_, min_bias_, max_bias_; name=nothing, out_type=nothing) - if tf.in_eager_mode() - quantized_bias_add_eager(input_, bias_, min_input_, max_input_, min_bias_, max_bias_; name=name, out_type=out_type) - else - quantized_bias_add_graph(input_, bias_, min_input_, max_input_, min_bias_, max_bias_; name=name, out_type=out_type) - end - end end @@ -52634,59 +96213,119 @@ end """ begin - function crop_and_resize_graph(image_, boxes_, box_ind_, crop_size_; name=nothing, method=nothing, extrapolation_value=nothing) - local desc - tf.with_op_name(name, "CropAndResize") do - desc = tf.NodeDescription("CropAndResize") - image_ = convert(Tensor{Any}, image_) - boxes_ = convert(Tensor{Float32}, boxes_) - box_ind_ = convert(Tensor{Int32}, box_ind_) - crop_size_ = convert(Tensor{Int32}, crop_size_) - (image_,) = tf.tf_promote(image_) - tf.add_input(desc, image_) - tf.add_input(desc, boxes_) - tf.add_input(desc, box_ind_) - tf.add_input(desc, crop_size_) - if method !== nothing - desc["method"] = Base.String(method) - end - if extrapolation_value !== nothing - desc["extrapolation_value"] = Base.identity(extrapolation_value) + begin + function crop_and_resize_graph(image_, boxes_, box_ind_, crop_size_; name=nothing, method=nothing, extrapolation_value=nothing) + local desc + tf.with_op_name(name, "CropAndResize") do + desc = tf.NodeDescription("CropAndResize") + begin + begin + image_ = convert(Tensor{Any}, image_) + begin + end + end + begin + boxes_ = convert(Tensor{Float32}, boxes_) + begin + end + end + begin + box_ind_ = convert(Tensor{Int32}, box_ind_) + begin + end + end + begin + crop_size_ = convert(Tensor{Int32}, crop_size_) + begin + end + end + begin + (image_,) = tf.tf_promote(image_) + end + end + begin + begin + tf.add_input(desc, image_) + end + begin + tf.add_input(desc, boxes_) + end + begin + tf.add_input(desc, box_ind_) + end + begin + tf.add_input(desc, crop_size_) + end + end + begin + begin + if method !== nothing + desc["method"] = Base.String(method) + end + end + begin + if extrapolation_value !== nothing + desc["extrapolation_value"] = Base.identity(extrapolation_value) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function crop_and_resize_eager(image_, boxes_, box_ind_, crop_size_; name=nothing, method=nothing, extrapolation_value=nothing) + desc = tf.EagerOp("CropAndResize") + image_ = convert(tf.EagerTensor, image_) + boxes_ = convert(tf.EagerTensor, boxes_) + box_ind_ = convert(tf.EagerTensor, box_ind_) + crop_size_ = convert(tf.EagerTensor, crop_size_) + begin + begin + tf.add_input(desc, image_) + end + begin + tf.add_input(desc, boxes_) + end + begin + tf.add_input(desc, box_ind_) + end + begin + tf.add_input(desc, crop_size_) + end + end + begin + begin + if method !== nothing + desc["method"] = Base.String(method) + end + end + begin + if extrapolation_value !== nothing + desc["extrapolation_value"] = Base.identity(extrapolation_value) + end + end + end + begin + desc["T"] = tf.data_type(image_) + end + res = tf.execute(desc) + node = tf.TapeNode(crop_and_resize, [image_, boxes_, box_ind_, crop_size_], name=nothing, method=nothing, extrapolation_value=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function crop_and_resize(image_, boxes_, box_ind_, crop_size_; name=nothing, method=nothing, extrapolation_value=nothing) + if tf.in_eager_mode() + crop_and_resize_eager(image_, boxes_, box_ind_, crop_size_; name=name, method=method, extrapolation_value=extrapolation_value) + else + crop_and_resize_graph(image_, boxes_, box_ind_, crop_size_; name=name, method=method, extrapolation_value=extrapolation_value) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function crop_and_resize_eager(image_, boxes_, box_ind_, crop_size_; name=nothing, method=nothing, extrapolation_value=nothing) - desc = tf.EagerOp("CropAndResize") - image_ = convert(tf.EagerTensor, image_) - boxes_ = convert(tf.EagerTensor, boxes_) - box_ind_ = convert(tf.EagerTensor, box_ind_) - crop_size_ = convert(tf.EagerTensor, crop_size_) - tf.add_input(desc, image_) - tf.add_input(desc, boxes_) - tf.add_input(desc, box_ind_) - tf.add_input(desc, crop_size_) - if method !== nothing - desc["method"] = Base.String(method) - end - if extrapolation_value !== nothing - desc["extrapolation_value"] = Base.identity(extrapolation_value) - end - desc["T"] = tf.data_type(image_) - res = tf.execute(desc) - node = tf.TapeNode(crop_and_resize, [image_, boxes_, box_ind_, crop_size_], name=nothing, method=nothing, extrapolation_value=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function crop_and_resize(image_, boxes_, box_ind_, crop_size_; name=nothing, method=nothing, extrapolation_value=nothing) - if tf.in_eager_mode() - crop_and_resize_eager(image_, boxes_, box_ind_, crop_size_; name=name, method=method, extrapolation_value=extrapolation_value) - else - crop_and_resize_graph(image_, boxes_, box_ind_, crop_size_; name=name, method=method, extrapolation_value=extrapolation_value) - end - end end @@ -52696,68 +96335,114 @@ end """ begin - function map_unstage_no_key_graph(indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) - local desc - tf.with_op_name(name, "MapUnstageNoKey") do - desc = tf.NodeDescription("MapUnstageNoKey") - indices_ = convert(Tensor{Int32}, indices_) - tf.add_input(desc, indices_) - if capacity !== nothing - desc["capacity"] = Base.Int(capacity) - end - if memory_limit !== nothing - desc["memory_limit"] = Base.Int(memory_limit) - end - if dtypes !== nothing - desc["dtypes"] = map(Base.identity, dtypes) - end - if container !== nothing - desc["container"] = Base.String(container) + begin + function map_unstage_no_key_graph(indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "MapUnstageNoKey") do + desc = tf.NodeDescription("MapUnstageNoKey") + begin + begin + indices_ = convert(Tensor{Int32}, indices_) + begin + end + end + end + begin + begin + tf.add_input(desc, indices_) + end + end + begin + begin + if capacity !== nothing + desc["capacity"] = Base.Int(capacity) + end + end + begin + if memory_limit !== nothing + desc["memory_limit"] = Base.Int(memory_limit) + end + end + begin + if dtypes !== nothing + desc["dtypes"] = map(Base.identity, dtypes) + end + end + begin + if container !== nothing + desc["container"] = Base.String(container) + end + end + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + end + end + begin + function map_unstage_no_key_eager(indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + desc = tf.EagerOp("MapUnstageNoKey") + indices_ = convert(tf.EagerTensor, indices_) + begin + begin + tf.add_input(desc, indices_) + end + end + begin + begin + if capacity !== nothing + desc["capacity"] = Base.Int(capacity) + end + end + begin + if memory_limit !== nothing + desc["memory_limit"] = Base.Int(memory_limit) + end + end + begin + if dtypes !== nothing + desc["dtypes"] = map(Base.identity, dtypes) + end + end + begin + if container !== nothing + desc["container"] = Base.String(container) + end + end + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(map_unstage_no_key, [indices_], name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function map_unstage_no_key(indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + if tf.in_eager_mode() + map_unstage_no_key_eager(indices_; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) + else + map_unstage_no_key_graph(indices_; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) + end end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:2 - push!(out, tf.Tensor(op, out_idx)) - end - out - end - function map_unstage_no_key_eager(indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) - desc = tf.EagerOp("MapUnstageNoKey") - indices_ = convert(tf.EagerTensor, indices_) - tf.add_input(desc, indices_) - if capacity !== nothing - desc["capacity"] = Base.Int(capacity) - end - if memory_limit !== nothing - desc["memory_limit"] = Base.Int(memory_limit) - end - if dtypes !== nothing - desc["dtypes"] = map(Base.identity, dtypes) - end - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - res = tf.execute(desc) - node = tf.TapeNode(map_unstage_no_key, [indices_], name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function map_unstage_no_key(indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) - if tf.in_eager_mode() - map_unstage_no_key_eager(indices_; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) - else - map_unstage_no_key_graph(indices_; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) - end - end end @@ -52767,53 +96452,107 @@ end """ begin - function scatter_nd_sub_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) - local desc - tf.with_op_name(name, "ScatterNdSub") do - desc = tf.NodeDescription("ScatterNdSub") - ref_ = convert(Tensor{Any}, ref_) - indices_ = convert(Tensor{Any}, indices_) - indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) - updates_ = convert(Tensor{Any}, updates_) - (ref_, updates_) = tf.tf_promote(ref_, updates_) - (indices_,) = tf.tf_promote(indices_) - tf.add_input(desc, ref_) - tf.add_input(desc, indices_) - tf.add_input(desc, updates_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) + begin + function scatter_nd_sub_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "ScatterNdSub") do + desc = tf.NodeDescription("ScatterNdSub") + begin + begin + ref_ = convert(Tensor{Any}, ref_) + begin + end + end + begin + indices_ = convert(Tensor{Any}, indices_) + begin + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + end + end + begin + updates_ = convert(Tensor{Any}, updates_) + begin + end + end + begin + (ref_, updates_) = tf.tf_promote(ref_, updates_) + end + begin + (indices_,) = tf.tf_promote(indices_) + end + end + begin + begin + tf.add_input(desc, ref_) + end + begin + tf.add_input(desc, indices_) + end + begin + tf.add_input(desc, updates_) + end + end + begin + begin + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function scatter_nd_sub_eager(ref_, indices_, updates_; name=nothing, use_locking=nothing) + desc = tf.EagerOp("ScatterNdSub") + ref_ = convert(tf.EagerTensor, ref_) + indices_ = convert(tf.EagerTensor, indices_) + updates_ = convert(tf.EagerTensor, updates_) + begin + begin + tf.add_input(desc, ref_) + end + begin + tf.add_input(desc, indices_) + end + begin + tf.add_input(desc, updates_) + end + end + begin + begin + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + end + begin + desc["T"] = tf.data_type(ref_) + end + begin + desc["Tindices"] = tf.data_type(indices_) + end + begin + desc["T"] = tf.data_type(updates_) + end + res = tf.execute(desc) + node = tf.TapeNode(scatter_nd_sub, [ref_, indices_, updates_], name=nothing, use_locking=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function scatter_nd_sub(ref_, indices_, updates_; name=nothing, use_locking=nothing) + if tf.in_eager_mode() + scatter_nd_sub_eager(ref_, indices_, updates_; name=name, use_locking=use_locking) + else + scatter_nd_sub_graph(ref_, indices_, updates_; name=name, use_locking=use_locking) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function scatter_nd_sub_eager(ref_, indices_, updates_; name=nothing, use_locking=nothing) - desc = tf.EagerOp("ScatterNdSub") - ref_ = convert(tf.EagerTensor, ref_) - indices_ = convert(tf.EagerTensor, indices_) - updates_ = convert(tf.EagerTensor, updates_) - tf.add_input(desc, ref_) - tf.add_input(desc, indices_) - tf.add_input(desc, updates_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - desc["T"] = tf.data_type(ref_) - desc["Tindices"] = tf.data_type(indices_) - desc["T"] = tf.data_type(updates_) - res = tf.execute(desc) - node = tf.TapeNode(scatter_nd_sub, [ref_, indices_, updates_], name=nothing, use_locking=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function scatter_nd_sub(ref_, indices_, updates_; name=nothing, use_locking=nothing) - if tf.in_eager_mode() - scatter_nd_sub_eager(ref_, indices_, updates_; name=name, use_locking=use_locking) - else - scatter_nd_sub_graph(ref_, indices_, updates_; name=name, use_locking=use_locking) - end - end end @@ -52823,45 +96562,85 @@ end """ begin - function resize_bilinear_graph(images_, size_; name=nothing, align_corners=nothing) - local desc - tf.with_op_name(name, "ResizeBilinear") do - desc = tf.NodeDescription("ResizeBilinear") - images_ = convert(Tensor{Any}, images_) - size_ = convert(Tensor{Int32}, size_) - (images_,) = tf.tf_promote(images_) - tf.add_input(desc, images_) - tf.add_input(desc, size_) - if align_corners !== nothing - desc["align_corners"] = Base.Bool(align_corners) + begin + function resize_bilinear_graph(images_, size_; name=nothing, align_corners=nothing) + local desc + tf.with_op_name(name, "ResizeBilinear") do + desc = tf.NodeDescription("ResizeBilinear") + begin + begin + images_ = convert(Tensor{Any}, images_) + begin + end + end + begin + size_ = convert(Tensor{Int32}, size_) + begin + end + end + begin + (images_,) = tf.tf_promote(images_) + end + end + begin + begin + tf.add_input(desc, images_) + end + begin + tf.add_input(desc, size_) + end + end + begin + begin + if align_corners !== nothing + desc["align_corners"] = Base.Bool(align_corners) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function resize_bilinear_eager(images_, size_; name=nothing, align_corners=nothing) + desc = tf.EagerOp("ResizeBilinear") + images_ = convert(tf.EagerTensor, images_) + size_ = convert(tf.EagerTensor, size_) + begin + begin + tf.add_input(desc, images_) + end + begin + tf.add_input(desc, size_) + end + end + begin + begin + if align_corners !== nothing + desc["align_corners"] = Base.Bool(align_corners) + end + end + end + begin + desc["T"] = tf.data_type(images_) + end + res = tf.execute(desc) + node = tf.TapeNode(resize_bilinear, [images_, size_], name=nothing, align_corners=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resize_bilinear(images_, size_; name=nothing, align_corners=nothing) + if tf.in_eager_mode() + resize_bilinear_eager(images_, size_; name=name, align_corners=align_corners) + else + resize_bilinear_graph(images_, size_; name=name, align_corners=align_corners) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function resize_bilinear_eager(images_, size_; name=nothing, align_corners=nothing) - desc = tf.EagerOp("ResizeBilinear") - images_ = convert(tf.EagerTensor, images_) - size_ = convert(tf.EagerTensor, size_) - tf.add_input(desc, images_) - tf.add_input(desc, size_) - if align_corners !== nothing - desc["align_corners"] = Base.Bool(align_corners) - end - desc["T"] = tf.data_type(images_) - res = tf.execute(desc) - node = tf.TapeNode(resize_bilinear, [images_, size_], name=nothing, align_corners=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resize_bilinear(images_, size_; name=nothing, align_corners=nothing) - if tf.in_eager_mode() - resize_bilinear_eager(images_, size_; name=name, align_corners=align_corners) - else - resize_bilinear_graph(images_, size_; name=name, align_corners=align_corners) - end - end end @@ -52871,67 +96650,119 @@ end """ begin - function ordered_map_peek_graph(key_, indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) - local desc - tf.with_op_name(name, "OrderedMapPeek") do - desc = tf.NodeDescription("OrderedMapPeek") - key_ = convert(Tensor{Int64}, key_) - indices_ = convert(Tensor{Int32}, indices_) - tf.add_input(desc, key_) - tf.add_input(desc, indices_) - if capacity !== nothing - desc["capacity"] = Base.Int(capacity) + begin + function ordered_map_peek_graph(key_, indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "OrderedMapPeek") do + desc = tf.NodeDescription("OrderedMapPeek") + begin + begin + key_ = convert(Tensor{Int64}, key_) + begin + end + end + begin + indices_ = convert(Tensor{Int32}, indices_) + begin + end + end + end + begin + begin + tf.add_input(desc, key_) + end + begin + tf.add_input(desc, indices_) + end + end + begin + begin + if capacity !== nothing + desc["capacity"] = Base.Int(capacity) + end + end + begin + if memory_limit !== nothing + desc["memory_limit"] = Base.Int(memory_limit) + end + end + begin + if dtypes !== nothing + desc["dtypes"] = map(Base.identity, dtypes) + end + end + begin + if container !== nothing + desc["container"] = Base.String(container) + end + end + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function ordered_map_peek_eager(key_, indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + desc = tf.EagerOp("OrderedMapPeek") + key_ = convert(tf.EagerTensor, key_) + indices_ = convert(tf.EagerTensor, indices_) + begin + begin + tf.add_input(desc, key_) + end + begin + tf.add_input(desc, indices_) + end + end + begin + begin + if capacity !== nothing + desc["capacity"] = Base.Int(capacity) + end + end + begin + if memory_limit !== nothing + desc["memory_limit"] = Base.Int(memory_limit) + end + end + begin + if dtypes !== nothing + desc["dtypes"] = map(Base.identity, dtypes) + end + end + begin + if container !== nothing + desc["container"] = Base.String(container) + end + end + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(ordered_map_peek, [key_, indices_], name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function ordered_map_peek(key_, indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + if tf.in_eager_mode() + ordered_map_peek_eager(key_, indices_; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) + else + ordered_map_peek_graph(key_, indices_; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) + end end - if memory_limit !== nothing - desc["memory_limit"] = Base.Int(memory_limit) - end - if dtypes !== nothing - desc["dtypes"] = map(Base.identity, dtypes) - end - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - end - tf.Tensor(tf.Operation(desc)) - end - function ordered_map_peek_eager(key_, indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) - desc = tf.EagerOp("OrderedMapPeek") - key_ = convert(tf.EagerTensor, key_) - indices_ = convert(tf.EagerTensor, indices_) - tf.add_input(desc, key_) - tf.add_input(desc, indices_) - if capacity !== nothing - desc["capacity"] = Base.Int(capacity) - end - if memory_limit !== nothing - desc["memory_limit"] = Base.Int(memory_limit) - end - if dtypes !== nothing - desc["dtypes"] = map(Base.identity, dtypes) - end - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - res = tf.execute(desc) - node = tf.TapeNode(ordered_map_peek, [key_, indices_], name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function ordered_map_peek(key_, indices_; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) - if tf.in_eager_mode() - ordered_map_peek_eager(key_, indices_; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) - else - ordered_map_peek_graph(key_, indices_; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) - end - end end @@ -52941,63 +96772,107 @@ end """ begin - function tensor_array_graph(size_; name=nothing, dtype=nothing, dynamic_size=nothing, clear_after_read=nothing, tensor_array_name=nothing, element_shape=nothing) - local desc - tf.with_op_name(name, "TensorArray") do - desc = tf.NodeDescription("TensorArray") - size_ = convert(Tensor{Int32}, size_) - tf.add_input(desc, size_) - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end - if dynamic_size !== nothing - desc["dynamic_size"] = Base.Bool(dynamic_size) - end - if clear_after_read !== nothing - desc["clear_after_read"] = Base.Bool(clear_after_read) + begin + function tensor_array_graph(size_; name=nothing, dtype=nothing, dynamic_size=nothing, clear_after_read=nothing, tensor_array_name=nothing, element_shape=nothing) + local desc + tf.with_op_name(name, "TensorArray") do + desc = tf.NodeDescription("TensorArray") + begin + begin + size_ = convert(Tensor{Int32}, size_) + begin + end + end + end + begin + begin + tf.add_input(desc, size_) + end + end + begin + begin + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + begin + if dynamic_size !== nothing + desc["dynamic_size"] = Base.Bool(dynamic_size) + end + end + begin + if clear_after_read !== nothing + desc["clear_after_read"] = Base.Bool(clear_after_read) + end + end + begin + if tensor_array_name !== nothing + desc["tensor_array_name"] = Base.String(tensor_array_name) + end + end + begin + if element_shape !== nothing + desc["element_shape"] = Base.identity(element_shape) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function tensor_array_eager(size_; name=nothing, dtype=nothing, dynamic_size=nothing, clear_after_read=nothing, tensor_array_name=nothing, element_shape=nothing) + desc = tf.EagerOp("TensorArray") + size_ = convert(tf.EagerTensor, size_) + begin + begin + tf.add_input(desc, size_) + end + end + begin + begin + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + begin + if dynamic_size !== nothing + desc["dynamic_size"] = Base.Bool(dynamic_size) + end + end + begin + if clear_after_read !== nothing + desc["clear_after_read"] = Base.Bool(clear_after_read) + end + end + begin + if tensor_array_name !== nothing + desc["tensor_array_name"] = Base.String(tensor_array_name) + end + end + begin + if element_shape !== nothing + desc["element_shape"] = Base.identity(element_shape) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(tensor_array, [size_], name=nothing, dtype=nothing, dynamic_size=nothing, clear_after_read=nothing, tensor_array_name=nothing, element_shape=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_array(size_; name=nothing, dtype=nothing, dynamic_size=nothing, clear_after_read=nothing, tensor_array_name=nothing, element_shape=nothing) + if tf.in_eager_mode() + tensor_array_eager(size_; name=name, dtype=dtype, dynamic_size=dynamic_size, clear_after_read=clear_after_read, tensor_array_name=tensor_array_name, element_shape=element_shape) + else + tensor_array_graph(size_; name=name, dtype=dtype, dynamic_size=dynamic_size, clear_after_read=clear_after_read, tensor_array_name=tensor_array_name, element_shape=element_shape) + end end - if tensor_array_name !== nothing - desc["tensor_array_name"] = Base.String(tensor_array_name) - end - if element_shape !== nothing - desc["element_shape"] = Base.identity(element_shape) - end - end - tf.Tensor(tf.Operation(desc)) - end - function tensor_array_eager(size_; name=nothing, dtype=nothing, dynamic_size=nothing, clear_after_read=nothing, tensor_array_name=nothing, element_shape=nothing) - desc = tf.EagerOp("TensorArray") - size_ = convert(tf.EagerTensor, size_) - tf.add_input(desc, size_) - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end - if dynamic_size !== nothing - desc["dynamic_size"] = Base.Bool(dynamic_size) - end - if clear_after_read !== nothing - desc["clear_after_read"] = Base.Bool(clear_after_read) - end - if tensor_array_name !== nothing - desc["tensor_array_name"] = Base.String(tensor_array_name) - end - if element_shape !== nothing - desc["element_shape"] = Base.identity(element_shape) - end - res = tf.execute(desc) - node = tf.TapeNode(tensor_array, [size_], name=nothing, dtype=nothing, dynamic_size=nothing, clear_after_read=nothing, tensor_array_name=nothing, element_shape=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_array(size_; name=nothing, dtype=nothing, dynamic_size=nothing, clear_after_read=nothing, tensor_array_name=nothing, element_shape=nothing) - if tf.in_eager_mode() - tensor_array_eager(size_; name=name, dtype=dtype, dynamic_size=dynamic_size, clear_after_read=clear_after_read, tensor_array_name=tensor_array_name, element_shape=element_shape) - else - tensor_array_graph(size_; name=name, dtype=dtype, dynamic_size=dynamic_size, clear_after_read=clear_after_read, tensor_array_name=tensor_array_name, element_shape=element_shape) - end - end end @@ -53007,44 +96882,90 @@ end """ begin - function inplace_sub_graph(x_, i_, v_; name=nothing) - local desc - tf.with_op_name(name, "InplaceSub") do - desc = tf.NodeDescription("InplaceSub") - x_ = convert(Tensor{Any}, x_) - i_ = convert(Tensor{Int32}, i_) - v_ = convert(Tensor{Any}, v_) - (x_, v_) = tf.tf_promote(x_, v_) - tf.add_input(desc, x_) - tf.add_input(desc, i_) - tf.add_input(desc, v_) - end - tf.Tensor(tf.Operation(desc)) - end - function inplace_sub_eager(x_, i_, v_; name=nothing) - desc = tf.EagerOp("InplaceSub") - x_ = convert(tf.EagerTensor, x_) - i_ = convert(tf.EagerTensor, i_) - v_ = convert(tf.EagerTensor, v_) - tf.add_input(desc, x_) - tf.add_input(desc, i_) - tf.add_input(desc, v_) - desc["T"] = tf.data_type(x_) - desc["T"] = tf.data_type(v_) - res = tf.execute(desc) - node = tf.TapeNode(inplace_sub, [x_, i_, v_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function inplace_sub(x_, i_, v_; name=nothing) - if tf.in_eager_mode() - inplace_sub_eager(x_, i_, v_; name=name) - else - inplace_sub_graph(x_, i_, v_; name=name) + begin + function inplace_sub_graph(x_, i_, v_; name=nothing) + local desc + tf.with_op_name(name, "InplaceSub") do + desc = tf.NodeDescription("InplaceSub") + begin + begin + x_ = convert(Tensor{Any}, x_) + begin + end + end + begin + i_ = convert(Tensor{Int32}, i_) + begin + end + end + begin + v_ = convert(Tensor{Any}, v_) + begin + end + end + begin + (x_, v_) = tf.tf_promote(x_, v_) + end + end + begin + begin + tf.add_input(desc, x_) + end + begin + tf.add_input(desc, i_) + end + begin + tf.add_input(desc, v_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function inplace_sub_eager(x_, i_, v_; name=nothing) + desc = tf.EagerOp("InplaceSub") + x_ = convert(tf.EagerTensor, x_) + i_ = convert(tf.EagerTensor, i_) + v_ = convert(tf.EagerTensor, v_) + begin + begin + tf.add_input(desc, x_) + end + begin + tf.add_input(desc, i_) + end + begin + tf.add_input(desc, v_) + end + end + begin + end + begin + desc["T"] = tf.data_type(x_) + end + begin + desc["T"] = tf.data_type(v_) + end + res = tf.execute(desc) + node = tf.TapeNode(inplace_sub, [x_, i_, v_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function inplace_sub(x_, i_, v_; name=nothing) + if tf.in_eager_mode() + inplace_sub_eager(x_, i_, v_; name=name) + else + inplace_sub_graph(x_, i_, v_; name=name) + end end - end + end end @@ -53054,40 +96975,78 @@ end """ begin - function pow_graph(x_, y_; name=nothing) - local desc - tf.with_op_name(name, "Pow") do - desc = tf.NodeDescription("Pow") - x_ = convert(Tensor{Any}, x_) - y_ = convert(Tensor{Any}, y_) - (x_, y_) = tf.tf_promote(x_, y_) - tf.add_input(desc, x_) - tf.add_input(desc, y_) + begin + function pow_graph(x_, y_; name=nothing) + local desc + tf.with_op_name(name, "Pow") do + desc = tf.NodeDescription("Pow") + begin + begin + x_ = convert(Tensor{Any}, x_) + begin + end + end + begin + y_ = convert(Tensor{Any}, y_) + begin + end + end + begin + (x_, y_) = tf.tf_promote(x_, y_) + end + end + begin + begin + tf.add_input(desc, x_) + end + begin + tf.add_input(desc, y_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function pow_eager(x_, y_; name=nothing) - desc = tf.EagerOp("Pow") - x_ = convert(tf.EagerTensor, x_) - y_ = convert(tf.EagerTensor, y_) - tf.add_input(desc, x_) - tf.add_input(desc, y_) - desc["T"] = tf.data_type(x_) - desc["T"] = tf.data_type(y_) - res = tf.execute(desc) - node = tf.TapeNode(pow, [x_, y_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function pow_eager(x_, y_; name=nothing) + desc = tf.EagerOp("Pow") + x_ = convert(tf.EagerTensor, x_) + y_ = convert(tf.EagerTensor, y_) + begin + begin + tf.add_input(desc, x_) + end + begin + tf.add_input(desc, y_) + end + end + begin + end + begin + desc["T"] = tf.data_type(x_) + end + begin + desc["T"] = tf.data_type(y_) + end + res = tf.execute(desc) + node = tf.TapeNode(pow, [x_, y_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function pow(x_, y_; name=nothing) - if tf.in_eager_mode() - pow_eager(x_, y_; name=name) - else - pow_graph(x_, y_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function pow(x_, y_; name=nothing) + if tf.in_eager_mode() + pow_eager(x_, y_; name=name) + else + pow_graph(x_, y_; name=name) + end end - end + end end @@ -53097,35 +97056,63 @@ end """ begin - function ref_next_iteration_graph(data_; name=nothing) - local desc - tf.with_op_name(name, "RefNextIteration") do - desc = tf.NodeDescription("RefNextIteration") - data_ = convert(Tensor{Any}, data_) - (data_,) = tf.tf_promote(data_) - tf.add_input(desc, data_) + begin + function ref_next_iteration_graph(data_; name=nothing) + local desc + tf.with_op_name(name, "RefNextIteration") do + desc = tf.NodeDescription("RefNextIteration") + begin + begin + data_ = convert(Tensor{Any}, data_) + begin + end + end + begin + (data_,) = tf.tf_promote(data_) + end + end + begin + begin + tf.add_input(desc, data_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function ref_next_iteration_eager(data_; name=nothing) - desc = tf.EagerOp("RefNextIteration") - data_ = convert(tf.EagerTensor, data_) - tf.add_input(desc, data_) - desc["T"] = tf.data_type(data_) - res = tf.execute(desc) - node = tf.TapeNode(ref_next_iteration, [data_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function ref_next_iteration_eager(data_; name=nothing) + desc = tf.EagerOp("RefNextIteration") + data_ = convert(tf.EagerTensor, data_) + begin + begin + tf.add_input(desc, data_) + end + end + begin + end + begin + desc["T"] = tf.data_type(data_) + end + res = tf.execute(desc) + node = tf.TapeNode(ref_next_iteration, [data_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function ref_next_iteration(data_; name=nothing) - if tf.in_eager_mode() - ref_next_iteration_eager(data_; name=name) - else - ref_next_iteration_graph(data_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function ref_next_iteration(data_; name=nothing) + if tf.in_eager_mode() + ref_next_iteration_eager(data_; name=name) + else + ref_next_iteration_graph(data_; name=name) + end end - end + end end @@ -53135,39 +97122,75 @@ end """ begin - function scalar_summary_graph(tags_, values_; name=nothing) - local desc - tf.with_op_name(name, "ScalarSummary") do - desc = tf.NodeDescription("ScalarSummary") - tags_ = convert(Tensor{String}, tags_) - values_ = convert(Tensor{Any}, values_) - (values_,) = tf.tf_promote(values_) - tf.add_input(desc, tags_) - tf.add_input(desc, values_) + begin + function scalar_summary_graph(tags_, values_; name=nothing) + local desc + tf.with_op_name(name, "ScalarSummary") do + desc = tf.NodeDescription("ScalarSummary") + begin + begin + tags_ = convert(Tensor{String}, tags_) + begin + end + end + begin + values_ = convert(Tensor{Any}, values_) + begin + end + end + begin + (values_,) = tf.tf_promote(values_) + end + end + begin + begin + tf.add_input(desc, tags_) + end + begin + tf.add_input(desc, values_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function scalar_summary_eager(tags_, values_; name=nothing) - desc = tf.EagerOp("ScalarSummary") - tags_ = convert(tf.EagerTensor, tags_) - values_ = convert(tf.EagerTensor, values_) - tf.add_input(desc, tags_) - tf.add_input(desc, values_) - desc["T"] = tf.data_type(values_) - res = tf.execute(desc) - node = tf.TapeNode(scalar_summary, [tags_, values_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function scalar_summary_eager(tags_, values_; name=nothing) + desc = tf.EagerOp("ScalarSummary") + tags_ = convert(tf.EagerTensor, tags_) + values_ = convert(tf.EagerTensor, values_) + begin + begin + tf.add_input(desc, tags_) + end + begin + tf.add_input(desc, values_) + end + end + begin + end + begin + desc["T"] = tf.data_type(values_) + end + res = tf.execute(desc) + node = tf.TapeNode(scalar_summary, [tags_, values_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function scalar_summary(tags_, values_; name=nothing) - if tf.in_eager_mode() - scalar_summary_eager(tags_, values_; name=name) - else - scalar_summary_graph(tags_, values_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function scalar_summary(tags_, values_; name=nothing) + if tf.in_eager_mode() + scalar_summary_eager(tags_, values_; name=name) + else + scalar_summary_graph(tags_, values_; name=name) + end end - end + end end @@ -53177,48 +97200,86 @@ end """ begin - function string_split_v2_graph(input_, sep_; name=nothing, maxsplit=nothing) - local desc - tf.with_op_name(name, "StringSplitV2") do - desc = tf.NodeDescription("StringSplitV2") - input_ = convert(Tensor{String}, input_) - sep_ = convert(Tensor{String}, sep_) - tf.add_input(desc, input_) - tf.add_input(desc, sep_) - if maxsplit !== nothing - desc["maxsplit"] = Base.Int(maxsplit) + begin + function string_split_v2_graph(input_, sep_; name=nothing, maxsplit=nothing) + local desc + tf.with_op_name(name, "StringSplitV2") do + desc = tf.NodeDescription("StringSplitV2") + begin + begin + input_ = convert(Tensor{String}, input_) + begin + end + end + begin + sep_ = convert(Tensor{String}, sep_) + begin + end + end + end + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, sep_) + end + end + begin + begin + if maxsplit !== nothing + desc["maxsplit"] = Base.Int(maxsplit) + end + end + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + end + end + begin + function string_split_v2_eager(input_, sep_; name=nothing, maxsplit=nothing) + desc = tf.EagerOp("StringSplitV2") + input_ = convert(tf.EagerTensor, input_) + sep_ = convert(tf.EagerTensor, sep_) + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, sep_) + end + end + begin + begin + if maxsplit !== nothing + desc["maxsplit"] = Base.Int(maxsplit) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(string_split_v2, [input_, sep_], name=nothing, maxsplit=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function string_split_v2(input_, sep_; name=nothing, maxsplit=nothing) + if tf.in_eager_mode() + string_split_v2_eager(input_, sep_; name=name, maxsplit=maxsplit) + else + string_split_v2_graph(input_, sep_; name=name, maxsplit=maxsplit) + end end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:3 - push!(out, tf.Tensor(op, out_idx)) - end - out - end - function string_split_v2_eager(input_, sep_; name=nothing, maxsplit=nothing) - desc = tf.EagerOp("StringSplitV2") - input_ = convert(tf.EagerTensor, input_) - sep_ = convert(tf.EagerTensor, sep_) - tf.add_input(desc, input_) - tf.add_input(desc, sep_) - if maxsplit !== nothing - desc["maxsplit"] = Base.Int(maxsplit) - end - res = tf.execute(desc) - node = tf.TapeNode(string_split_v2, [input_, sep_], name=nothing, maxsplit=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function string_split_v2(input_, sep_; name=nothing, maxsplit=nothing) - if tf.in_eager_mode() - string_split_v2_eager(input_, sep_; name=name, maxsplit=maxsplit) - else - string_split_v2_graph(input_, sep_; name=name, maxsplit=maxsplit) - end - end end @@ -53228,35 +97289,63 @@ end """ begin - function bessel_i0e_graph(x_; name=nothing) - local desc - tf.with_op_name(name, "BesselI0e") do - desc = tf.NodeDescription("BesselI0e") - x_ = convert(Tensor{Any}, x_) - (x_,) = tf.tf_promote(x_) - tf.add_input(desc, x_) + begin + function bessel_i0e_graph(x_; name=nothing) + local desc + tf.with_op_name(name, "BesselI0e") do + desc = tf.NodeDescription("BesselI0e") + begin + begin + x_ = convert(Tensor{Any}, x_) + begin + end + end + begin + (x_,) = tf.tf_promote(x_) + end + end + begin + begin + tf.add_input(desc, x_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function bessel_i0e_eager(x_; name=nothing) - desc = tf.EagerOp("BesselI0e") - x_ = convert(tf.EagerTensor, x_) - tf.add_input(desc, x_) - desc["T"] = tf.data_type(x_) - res = tf.execute(desc) - node = tf.TapeNode(bessel_i0e, [x_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function bessel_i0e_eager(x_; name=nothing) + desc = tf.EagerOp("BesselI0e") + x_ = convert(tf.EagerTensor, x_) + begin + begin + tf.add_input(desc, x_) + end + end + begin + end + begin + desc["T"] = tf.data_type(x_) + end + res = tf.execute(desc) + node = tf.TapeNode(bessel_i0e, [x_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function bessel_i0e(x_; name=nothing) - if tf.in_eager_mode() - bessel_i0e_eager(x_; name=name) - else - bessel_i0e_graph(x_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function bessel_i0e(x_; name=nothing) + if tf.in_eager_mode() + bessel_i0e_eager(x_; name=name) + else + bessel_i0e_graph(x_; name=name) + end end - end + end end @@ -53266,46 +97355,80 @@ end """ begin - function unique_graph(x_; name=nothing, out_idx=nothing) - local desc - tf.with_op_name(name, "Unique") do - desc = tf.NodeDescription("Unique") - x_ = convert(Tensor{Any}, x_) - (x_,) = tf.tf_promote(x_) - tf.add_input(desc, x_) - if out_idx !== nothing - desc["out_idx"] = Base.identity(out_idx) + begin + function unique_graph(x_; name=nothing, out_idx=nothing) + local desc + tf.with_op_name(name, "Unique") do + desc = tf.NodeDescription("Unique") + begin + begin + x_ = convert(Tensor{Any}, x_) + begin + end + end + begin + (x_,) = tf.tf_promote(x_) + end + end + begin + begin + tf.add_input(desc, x_) + end + end + begin + begin + if out_idx !== nothing + desc["out_idx"] = Base.identity(out_idx) + end + end + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out end end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:2 - push!(out, tf.Tensor(op, out_idx)) - end - out end - function unique_eager(x_; name=nothing, out_idx=nothing) - desc = tf.EagerOp("Unique") - x_ = convert(tf.EagerTensor, x_) - tf.add_input(desc, x_) - if out_idx !== nothing - desc["out_idx"] = Base.identity(out_idx) - end - desc["T"] = tf.data_type(x_) - res = tf.execute(desc) - node = tf.TapeNode(unique, [x_], name=nothing, out_idx=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res + begin + function unique_eager(x_; name=nothing, out_idx=nothing) + desc = tf.EagerOp("Unique") + x_ = convert(tf.EagerTensor, x_) + begin + begin + tf.add_input(desc, x_) + end + end + begin + begin + if out_idx !== nothing + desc["out_idx"] = Base.identity(out_idx) + end + end + end + begin + desc["T"] = tf.data_type(x_) + end + res = tf.execute(desc) + node = tf.TapeNode(unique, [x_], name=nothing, out_idx=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function unique(x_; name=nothing, out_idx=nothing) - if tf.in_eager_mode() - unique_eager(x_; name=name, out_idx=out_idx) - else - unique_graph(x_; name=name, out_idx=out_idx) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function unique(x_; name=nothing, out_idx=nothing) + if tf.in_eager_mode() + unique_eager(x_; name=name, out_idx=out_idx) + else + unique_graph(x_; name=name, out_idx=out_idx) + end end - end + end end @@ -53315,35 +97438,63 @@ end """ begin - function next_iteration_graph(data_; name=nothing) - local desc - tf.with_op_name(name, "NextIteration") do - desc = tf.NodeDescription("NextIteration") - data_ = convert(Tensor{Any}, data_) - (data_,) = tf.tf_promote(data_) - tf.add_input(desc, data_) + begin + function next_iteration_graph(data_; name=nothing) + local desc + tf.with_op_name(name, "NextIteration") do + desc = tf.NodeDescription("NextIteration") + begin + begin + data_ = convert(Tensor{Any}, data_) + begin + end + end + begin + (data_,) = tf.tf_promote(data_) + end + end + begin + begin + tf.add_input(desc, data_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function next_iteration_eager(data_; name=nothing) - desc = tf.EagerOp("NextIteration") - data_ = convert(tf.EagerTensor, data_) - tf.add_input(desc, data_) - desc["T"] = tf.data_type(data_) - res = tf.execute(desc) - node = tf.TapeNode(next_iteration, [data_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function next_iteration_eager(data_; name=nothing) + desc = tf.EagerOp("NextIteration") + data_ = convert(tf.EagerTensor, data_) + begin + begin + tf.add_input(desc, data_) + end + end + begin + end + begin + desc["T"] = tf.data_type(data_) + end + res = tf.execute(desc) + node = tf.TapeNode(next_iteration, [data_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function next_iteration(data_; name=nothing) - if tf.in_eager_mode() - next_iteration_eager(data_; name=name) - else - next_iteration_graph(data_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function next_iteration(data_; name=nothing) + if tf.in_eager_mode() + next_iteration_eager(data_; name=name) + else + next_iteration_graph(data_; name=name) + end end - end + end end @@ -53353,65 +97504,121 @@ end Load embedding parameters for a single table. """ begin - function load_tpu_embedding_rms_prop_parameters_graph(parameters_, ms_, mom_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - local desc - tf.with_op_name(name, "LoadTPUEmbeddingRMSPropParameters") do - desc = tf.NodeDescription("LoadTPUEmbeddingRMSPropParameters") - parameters_ = convert(Tensor{Float32}, parameters_) - ms_ = convert(Tensor{Float32}, ms_) - mom_ = convert(Tensor{Float32}, mom_) - tf.add_input(desc, parameters_) - tf.add_input(desc, ms_) - tf.add_input(desc, mom_) - if table_id !== nothing - desc["table_id"] = Base.Int(table_id) - end - if table_name !== nothing - desc["table_name"] = Base.String(table_name) - end - if num_shards !== nothing - desc["num_shards"] = Base.Int(num_shards) + begin + function load_tpu_embedding_rms_prop_parameters_graph(parameters_, ms_, mom_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + local desc + tf.with_op_name(name, "LoadTPUEmbeddingRMSPropParameters") do + desc = tf.NodeDescription("LoadTPUEmbeddingRMSPropParameters") + begin + begin + parameters_ = convert(Tensor{Float32}, parameters_) + begin + end + end + begin + ms_ = convert(Tensor{Float32}, ms_) + begin + end + end + begin + mom_ = convert(Tensor{Float32}, mom_) + begin + end + end + end + begin + begin + tf.add_input(desc, parameters_) + end + begin + tf.add_input(desc, ms_) + end + begin + tf.add_input(desc, mom_) + end + end + begin + begin + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + end + begin + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + end + begin + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + end + begin + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function load_tpu_embedding_rms_prop_parameters_eager(parameters_, ms_, mom_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + desc = tf.EagerOp("LoadTPUEmbeddingRMSPropParameters") + parameters_ = convert(tf.EagerTensor, parameters_) + ms_ = convert(tf.EagerTensor, ms_) + mom_ = convert(tf.EagerTensor, mom_) + begin + begin + tf.add_input(desc, parameters_) + end + begin + tf.add_input(desc, ms_) + end + begin + tf.add_input(desc, mom_) + end + end + begin + begin + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + end + begin + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + end + begin + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + end + begin + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(load_tpu_embedding_rms_prop_parameters, [parameters_, ms_, mom_], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function load_tpu_embedding_rms_prop_parameters(parameters_, ms_, mom_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + if tf.in_eager_mode() + load_tpu_embedding_rms_prop_parameters_eager(parameters_, ms_, mom_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + else + load_tpu_embedding_rms_prop_parameters_graph(parameters_, ms_, mom_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + end end - if shard_id !== nothing - desc["shard_id"] = Base.Int(shard_id) - end - end - tf.Tensor(tf.Operation(desc)) - end - function load_tpu_embedding_rms_prop_parameters_eager(parameters_, ms_, mom_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - desc = tf.EagerOp("LoadTPUEmbeddingRMSPropParameters") - parameters_ = convert(tf.EagerTensor, parameters_) - ms_ = convert(tf.EagerTensor, ms_) - mom_ = convert(tf.EagerTensor, mom_) - tf.add_input(desc, parameters_) - tf.add_input(desc, ms_) - tf.add_input(desc, mom_) - if table_id !== nothing - desc["table_id"] = Base.Int(table_id) - end - if table_name !== nothing - desc["table_name"] = Base.String(table_name) - end - if num_shards !== nothing - desc["num_shards"] = Base.Int(num_shards) - end - if shard_id !== nothing - desc["shard_id"] = Base.Int(shard_id) - end - res = tf.execute(desc) - node = tf.TapeNode(load_tpu_embedding_rms_prop_parameters, [parameters_, ms_, mom_], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function load_tpu_embedding_rms_prop_parameters(parameters_, ms_, mom_; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - if tf.in_eager_mode() - load_tpu_embedding_rms_prop_parameters_eager(parameters_, ms_, mom_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) - else - load_tpu_embedding_rms_prop_parameters_graph(parameters_, ms_, mom_; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) - end - end end @@ -53421,51 +97628,87 @@ end """ begin - function eager_py_func_graph(input_; name=nothing, token=nothing, Tin=nothing, Tout=nothing) - local desc - tf.with_op_name(name, "EagerPyFunc") do - desc = tf.NodeDescription("EagerPyFunc") - input_ = [convert(Tensor{Any}, x) for x = input_] - tf.add_input(desc, input_) - if token !== nothing - desc["token"] = Base.String(token) - end - if Tin !== nothing - desc["Tin"] = map(Base.identity, Tin) - end - if Tout !== nothing - desc["Tout"] = map(Base.identity, Tout) + begin + function eager_py_func_graph(input_; name=nothing, token=nothing, Tin=nothing, Tout=nothing) + local desc + tf.with_op_name(name, "EagerPyFunc") do + desc = tf.NodeDescription("EagerPyFunc") + begin + begin + input_ = [convert(Tensor{Any}, x) for x = input_] + begin + end + end + end + begin + begin + tf.add_input(desc, input_) + end + end + begin + begin + if token !== nothing + desc["token"] = Base.String(token) + end + end + begin + if Tin !== nothing + desc["Tin"] = map(Base.identity, Tin) + end + end + begin + if Tout !== nothing + desc["Tout"] = map(Base.identity, Tout) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function eager_py_func_eager(input_; name=nothing, token=nothing, Tin=nothing, Tout=nothing) + desc = tf.EagerOp("EagerPyFunc") + input_ = convert(tf.EagerTensor, input_) + begin + begin + tf.add_input(desc, input_) + end + end + begin + begin + if token !== nothing + desc["token"] = Base.String(token) + end + end + begin + if Tin !== nothing + desc["Tin"] = map(Base.identity, Tin) + end + end + begin + if Tout !== nothing + desc["Tout"] = map(Base.identity, Tout) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(eager_py_func, [input_], name=nothing, token=nothing, Tin=nothing, Tout=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function eager_py_func(input_; name=nothing, token=nothing, Tin=nothing, Tout=nothing) + if tf.in_eager_mode() + eager_py_func_eager(input_; name=name, token=token, Tin=Tin, Tout=Tout) + else + eager_py_func_graph(input_; name=name, token=token, Tin=Tin, Tout=Tout) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function eager_py_func_eager(input_; name=nothing, token=nothing, Tin=nothing, Tout=nothing) - desc = tf.EagerOp("EagerPyFunc") - input_ = convert(tf.EagerTensor, input_) - tf.add_input(desc, input_) - if token !== nothing - desc["token"] = Base.String(token) - end - if Tin !== nothing - desc["Tin"] = map(Base.identity, Tin) - end - if Tout !== nothing - desc["Tout"] = map(Base.identity, Tout) - end - res = tf.execute(desc) - node = tf.TapeNode(eager_py_func, [input_], name=nothing, token=nothing, Tin=nothing, Tout=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function eager_py_func(input_; name=nothing, token=nothing, Tin=nothing, Tout=nothing) - if tf.in_eager_mode() - eager_py_func_eager(input_; name=name, token=token, Tin=Tin, Tout=Tout) - else - eager_py_func_graph(input_; name=name, token=token, Tin=Tin, Tout=Tout) - end - end end @@ -53475,41 +97718,65 @@ end """ begin - function whole_file_reader_v2_graph(; name=nothing, container=nothing, shared_name=nothing) - local desc - tf.with_op_name(name, "WholeFileReaderV2") do - desc = tf.NodeDescription("WholeFileReaderV2") - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) + begin + function whole_file_reader_v2_graph(; name=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "WholeFileReaderV2") do + desc = tf.NodeDescription("WholeFileReaderV2") + begin + end + begin + end + begin + begin + if container !== nothing + desc["container"] = Base.String(container) + end + end + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + end end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function whole_file_reader_v2_eager(; name=nothing, container=nothing, shared_name=nothing) - desc = tf.EagerOp("WholeFileReaderV2") - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - res = tf.execute(desc) - node = tf.TapeNode(whole_file_reader_v2, [], name=nothing, container=nothing, shared_name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function whole_file_reader_v2_eager(; name=nothing, container=nothing, shared_name=nothing) + desc = tf.EagerOp("WholeFileReaderV2") + begin + end + begin + begin + if container !== nothing + desc["container"] = Base.String(container) + end + end + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(whole_file_reader_v2, [], name=nothing, container=nothing, shared_name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function whole_file_reader_v2(; name=nothing, container=nothing, shared_name=nothing) - if tf.in_eager_mode() - whole_file_reader_v2_eager(; name=name, container=container, shared_name=shared_name) - else - whole_file_reader_v2_graph(; name=name, container=container, shared_name=shared_name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function whole_file_reader_v2(; name=nothing, container=nothing, shared_name=nothing) + if tf.in_eager_mode() + whole_file_reader_v2_eager(; name=name, container=container, shared_name=shared_name) + else + whole_file_reader_v2_graph(; name=name, container=container, shared_name=shared_name) + end end - end + end end @@ -53519,47 +97786,97 @@ end """ begin - function tensor_scatter_sub_graph(tensor_, indices_, updates_; name=nothing) - local desc - tf.with_op_name(name, "TensorScatterSub") do - desc = tf.NodeDescription("TensorScatterSub") - tensor_ = convert(Tensor{Any}, tensor_) - indices_ = convert(Tensor{Any}, indices_) - indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) - updates_ = convert(Tensor{Any}, updates_) - (tensor_, updates_) = tf.tf_promote(tensor_, updates_) - (indices_,) = tf.tf_promote(indices_) - tf.add_input(desc, tensor_) - tf.add_input(desc, indices_) - tf.add_input(desc, updates_) - end - tf.Tensor(tf.Operation(desc)) - end - function tensor_scatter_sub_eager(tensor_, indices_, updates_; name=nothing) - desc = tf.EagerOp("TensorScatterSub") - tensor_ = convert(tf.EagerTensor, tensor_) - indices_ = convert(tf.EagerTensor, indices_) - updates_ = convert(tf.EagerTensor, updates_) - tf.add_input(desc, tensor_) - tf.add_input(desc, indices_) - tf.add_input(desc, updates_) - desc["T"] = tf.data_type(tensor_) - desc["Tindices"] = tf.data_type(indices_) - desc["T"] = tf.data_type(updates_) - res = tf.execute(desc) - node = tf.TapeNode(tensor_scatter_sub, [tensor_, indices_, updates_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_scatter_sub(tensor_, indices_, updates_; name=nothing) - if tf.in_eager_mode() - tensor_scatter_sub_eager(tensor_, indices_, updates_; name=name) - else - tensor_scatter_sub_graph(tensor_, indices_, updates_; name=name) + begin + function tensor_scatter_sub_graph(tensor_, indices_, updates_; name=nothing) + local desc + tf.with_op_name(name, "TensorScatterSub") do + desc = tf.NodeDescription("TensorScatterSub") + begin + begin + tensor_ = convert(Tensor{Any}, tensor_) + begin + end + end + begin + indices_ = convert(Tensor{Any}, indices_) + begin + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + end + end + begin + updates_ = convert(Tensor{Any}, updates_) + begin + end + end + begin + (tensor_, updates_) = tf.tf_promote(tensor_, updates_) + end + begin + (indices_,) = tf.tf_promote(indices_) + end + end + begin + begin + tf.add_input(desc, tensor_) + end + begin + tf.add_input(desc, indices_) + end + begin + tf.add_input(desc, updates_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function tensor_scatter_sub_eager(tensor_, indices_, updates_; name=nothing) + desc = tf.EagerOp("TensorScatterSub") + tensor_ = convert(tf.EagerTensor, tensor_) + indices_ = convert(tf.EagerTensor, indices_) + updates_ = convert(tf.EagerTensor, updates_) + begin + begin + tf.add_input(desc, tensor_) + end + begin + tf.add_input(desc, indices_) + end + begin + tf.add_input(desc, updates_) + end + end + begin + end + begin + desc["T"] = tf.data_type(tensor_) + end + begin + desc["Tindices"] = tf.data_type(indices_) + end + begin + desc["T"] = tf.data_type(updates_) + end + res = tf.execute(desc) + node = tf.TapeNode(tensor_scatter_sub, [tensor_, indices_, updates_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_scatter_sub(tensor_, indices_, updates_; name=nothing) + if tf.in_eager_mode() + tensor_scatter_sub_eager(tensor_, indices_, updates_; name=name) + else + tensor_scatter_sub_graph(tensor_, indices_, updates_; name=name) + end end - end + end end @@ -53569,53 +97886,107 @@ end """ begin - function scatter_max_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) - local desc - tf.with_op_name(name, "ScatterMax") do - desc = tf.NodeDescription("ScatterMax") - ref_ = convert(Tensor{Any}, ref_) - indices_ = convert(Tensor{Any}, indices_) - indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) - updates_ = convert(Tensor{Any}, updates_) - (ref_, updates_) = tf.tf_promote(ref_, updates_) - (indices_,) = tf.tf_promote(indices_) - tf.add_input(desc, ref_) - tf.add_input(desc, indices_) - tf.add_input(desc, updates_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) + begin + function scatter_max_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "ScatterMax") do + desc = tf.NodeDescription("ScatterMax") + begin + begin + ref_ = convert(Tensor{Any}, ref_) + begin + end + end + begin + indices_ = convert(Tensor{Any}, indices_) + begin + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + end + end + begin + updates_ = convert(Tensor{Any}, updates_) + begin + end + end + begin + (ref_, updates_) = tf.tf_promote(ref_, updates_) + end + begin + (indices_,) = tf.tf_promote(indices_) + end + end + begin + begin + tf.add_input(desc, ref_) + end + begin + tf.add_input(desc, indices_) + end + begin + tf.add_input(desc, updates_) + end + end + begin + begin + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function scatter_max_eager(ref_, indices_, updates_; name=nothing, use_locking=nothing) + desc = tf.EagerOp("ScatterMax") + ref_ = convert(tf.EagerTensor, ref_) + indices_ = convert(tf.EagerTensor, indices_) + updates_ = convert(tf.EagerTensor, updates_) + begin + begin + tf.add_input(desc, ref_) + end + begin + tf.add_input(desc, indices_) + end + begin + tf.add_input(desc, updates_) + end + end + begin + begin + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + end + begin + desc["T"] = tf.data_type(ref_) + end + begin + desc["Tindices"] = tf.data_type(indices_) + end + begin + desc["T"] = tf.data_type(updates_) + end + res = tf.execute(desc) + node = tf.TapeNode(scatter_max, [ref_, indices_, updates_], name=nothing, use_locking=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function scatter_max(ref_, indices_, updates_; name=nothing, use_locking=nothing) + if tf.in_eager_mode() + scatter_max_eager(ref_, indices_, updates_; name=name, use_locking=use_locking) + else + scatter_max_graph(ref_, indices_, updates_; name=name, use_locking=use_locking) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function scatter_max_eager(ref_, indices_, updates_; name=nothing, use_locking=nothing) - desc = tf.EagerOp("ScatterMax") - ref_ = convert(tf.EagerTensor, ref_) - indices_ = convert(tf.EagerTensor, indices_) - updates_ = convert(tf.EagerTensor, updates_) - tf.add_input(desc, ref_) - tf.add_input(desc, indices_) - tf.add_input(desc, updates_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - desc["T"] = tf.data_type(ref_) - desc["Tindices"] = tf.data_type(indices_) - desc["T"] = tf.data_type(updates_) - res = tf.execute(desc) - node = tf.TapeNode(scatter_max, [ref_, indices_, updates_], name=nothing, use_locking=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function scatter_max(ref_, indices_, updates_; name=nothing, use_locking=nothing) - if tf.in_eager_mode() - scatter_max_eager(ref_, indices_, updates_; name=name, use_locking=use_locking) - else - scatter_max_graph(ref_, indices_, updates_; name=name, use_locking=use_locking) - end - end end @@ -53625,35 +97996,63 @@ end """ begin - function sqrt_graph(x_; name=nothing) - local desc - tf.with_op_name(name, "Sqrt") do - desc = tf.NodeDescription("Sqrt") - x_ = convert(Tensor{Any}, x_) - (x_,) = tf.tf_promote(x_) - tf.add_input(desc, x_) + begin + function sqrt_graph(x_; name=nothing) + local desc + tf.with_op_name(name, "Sqrt") do + desc = tf.NodeDescription("Sqrt") + begin + begin + x_ = convert(Tensor{Any}, x_) + begin + end + end + begin + (x_,) = tf.tf_promote(x_) + end + end + begin + begin + tf.add_input(desc, x_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function sqrt_eager(x_; name=nothing) - desc = tf.EagerOp("Sqrt") - x_ = convert(tf.EagerTensor, x_) - tf.add_input(desc, x_) - desc["T"] = tf.data_type(x_) - res = tf.execute(desc) - node = tf.TapeNode(sqrt, [x_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function sqrt_eager(x_; name=nothing) + desc = tf.EagerOp("Sqrt") + x_ = convert(tf.EagerTensor, x_) + begin + begin + tf.add_input(desc, x_) + end + end + begin + end + begin + desc["T"] = tf.data_type(x_) + end + res = tf.execute(desc) + node = tf.TapeNode(sqrt, [x_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sqrt(x_; name=nothing) - if tf.in_eager_mode() - sqrt_eager(x_; name=name) - else - sqrt_graph(x_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sqrt(x_; name=nothing) + if tf.in_eager_mode() + sqrt_eager(x_; name=name) + else + sqrt_graph(x_; name=name) + end end - end + end end @@ -53663,43 +98062,79 @@ end """ begin - function accumulator_take_gradient_graph(handle_, num_required_; name=nothing, dtype=nothing) - local desc - tf.with_op_name(name, "AccumulatorTakeGradient") do - desc = tf.NodeDescription("AccumulatorTakeGradient") - handle_ = convert(Tensor{String}, handle_) - num_required_ = convert(Tensor{Int32}, num_required_) - tf.add_input(desc, handle_) - tf.add_input(desc, num_required_) - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) + begin + function accumulator_take_gradient_graph(handle_, num_required_; name=nothing, dtype=nothing) + local desc + tf.with_op_name(name, "AccumulatorTakeGradient") do + desc = tf.NodeDescription("AccumulatorTakeGradient") + begin + begin + handle_ = convert(Tensor{String}, handle_) + begin + end + end + begin + num_required_ = convert(Tensor{Int32}, num_required_) + begin + end + end + end + begin + begin + tf.add_input(desc, handle_) + end + begin + tf.add_input(desc, num_required_) + end + end + begin + begin + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function accumulator_take_gradient_eager(handle_, num_required_; name=nothing, dtype=nothing) + desc = tf.EagerOp("AccumulatorTakeGradient") + handle_ = convert(tf.EagerTensor, handle_) + num_required_ = convert(tf.EagerTensor, num_required_) + begin + begin + tf.add_input(desc, handle_) + end + begin + tf.add_input(desc, num_required_) + end + end + begin + begin + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(accumulator_take_gradient, [handle_, num_required_], name=nothing, dtype=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function accumulator_take_gradient(handle_, num_required_; name=nothing, dtype=nothing) + if tf.in_eager_mode() + accumulator_take_gradient_eager(handle_, num_required_; name=name, dtype=dtype) + else + accumulator_take_gradient_graph(handle_, num_required_; name=name, dtype=dtype) + end end - end - tf.Tensor(tf.Operation(desc)) end - function accumulator_take_gradient_eager(handle_, num_required_; name=nothing, dtype=nothing) - desc = tf.EagerOp("AccumulatorTakeGradient") - handle_ = convert(tf.EagerTensor, handle_) - num_required_ = convert(tf.EagerTensor, num_required_) - tf.add_input(desc, handle_) - tf.add_input(desc, num_required_) - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end - res = tf.execute(desc) - node = tf.TapeNode(accumulator_take_gradient, [handle_, num_required_], name=nothing, dtype=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function accumulator_take_gradient(handle_, num_required_; name=nothing, dtype=nothing) - if tf.in_eager_mode() - accumulator_take_gradient_eager(handle_, num_required_; name=name, dtype=dtype) - else - accumulator_take_gradient_graph(handle_, num_required_; name=name, dtype=dtype) - end - end end @@ -53709,53 +98144,109 @@ end Returns x + y element-wise. """ begin - function _mkl_add_graph(x_, y_, mkl_x_, mkl_y_; name=nothing) - local desc - tf.with_op_name(name, "_MklAdd") do - desc = tf.NodeDescription("_MklAdd") - x_ = convert(Tensor{Any}, x_) - y_ = convert(Tensor{Any}, y_) - mkl_x_ = convert(Tensor{UInt8}, mkl_x_) - mkl_y_ = convert(Tensor{UInt8}, mkl_y_) - (x_, y_) = tf.tf_promote(x_, y_) - tf.add_input(desc, x_) - tf.add_input(desc, y_) - tf.add_input(desc, mkl_x_) - tf.add_input(desc, mkl_y_) - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:2 - push!(out, tf.Tensor(op, out_idx)) - end - out - end - function _mkl_add_eager(x_, y_, mkl_x_, mkl_y_; name=nothing) - desc = tf.EagerOp("_MklAdd") - x_ = convert(tf.EagerTensor, x_) - y_ = convert(tf.EagerTensor, y_) - mkl_x_ = convert(tf.EagerTensor, mkl_x_) - mkl_y_ = convert(tf.EagerTensor, mkl_y_) - tf.add_input(desc, x_) - tf.add_input(desc, y_) - tf.add_input(desc, mkl_x_) - tf.add_input(desc, mkl_y_) - desc["T"] = tf.data_type(x_) - desc["T"] = tf.data_type(y_) - res = tf.execute(desc) - node = tf.TapeNode(_mkl_add, [x_, y_, mkl_x_, mkl_y_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _mkl_add(x_, y_, mkl_x_, mkl_y_; name=nothing) - if tf.in_eager_mode() - _mkl_add_eager(x_, y_, mkl_x_, mkl_y_; name=name) - else - _mkl_add_graph(x_, y_, mkl_x_, mkl_y_; name=name) + begin + function _mkl_add_graph(x_, y_, mkl_x_, mkl_y_; name=nothing) + local desc + tf.with_op_name(name, "_MklAdd") do + desc = tf.NodeDescription("_MklAdd") + begin + begin + x_ = convert(Tensor{Any}, x_) + begin + end + end + begin + y_ = convert(Tensor{Any}, y_) + begin + end + end + begin + mkl_x_ = convert(Tensor{UInt8}, mkl_x_) + begin + end + end + begin + mkl_y_ = convert(Tensor{UInt8}, mkl_y_) + begin + end + end + begin + (x_, y_) = tf.tf_promote(x_, y_) + end + end + begin + begin + tf.add_input(desc, x_) + end + begin + tf.add_input(desc, y_) + end + begin + tf.add_input(desc, mkl_x_) + end + begin + tf.add_input(desc, mkl_y_) + end + end + begin + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + end + end + begin + function _mkl_add_eager(x_, y_, mkl_x_, mkl_y_; name=nothing) + desc = tf.EagerOp("_MklAdd") + x_ = convert(tf.EagerTensor, x_) + y_ = convert(tf.EagerTensor, y_) + mkl_x_ = convert(tf.EagerTensor, mkl_x_) + mkl_y_ = convert(tf.EagerTensor, mkl_y_) + begin + begin + tf.add_input(desc, x_) + end + begin + tf.add_input(desc, y_) + end + begin + tf.add_input(desc, mkl_x_) + end + begin + tf.add_input(desc, mkl_y_) + end + end + begin + end + begin + desc["T"] = tf.data_type(x_) + end + begin + desc["T"] = tf.data_type(y_) + end + res = tf.execute(desc) + node = tf.TapeNode(_mkl_add, [x_, y_, mkl_x_, mkl_y_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _mkl_add(x_, y_, mkl_x_, mkl_y_; name=nothing) + if tf.in_eager_mode() + _mkl_add_eager(x_, y_, mkl_x_, mkl_y_; name=name) + else + _mkl_add_graph(x_, y_, mkl_x_, mkl_y_; name=name) + end end - end + end end @@ -53765,39 +98256,67 @@ end An op which emits multiple Tensor values from an XLA computation. """ begin - function outfeed_enqueue_tuple_graph(inputs_; name=nothing, dtypes=nothing) - local desc - tf.with_op_name(name, "OutfeedEnqueueTuple") do - desc = tf.NodeDescription("OutfeedEnqueueTuple") - inputs_ = [convert(Tensor{Any}, x) for x = inputs_] - tf.add_input(desc, inputs_) - if dtypes !== nothing - desc["dtypes"] = map(Base.identity, dtypes) + begin + function outfeed_enqueue_tuple_graph(inputs_; name=nothing, dtypes=nothing) + local desc + tf.with_op_name(name, "OutfeedEnqueueTuple") do + desc = tf.NodeDescription("OutfeedEnqueueTuple") + begin + begin + inputs_ = [convert(Tensor{Any}, x) for x = inputs_] + begin + end + end + end + begin + begin + tf.add_input(desc, inputs_) + end + end + begin + begin + if dtypes !== nothing + desc["dtypes"] = map(Base.identity, dtypes) + end + end + end end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function outfeed_enqueue_tuple_eager(inputs_; name=nothing, dtypes=nothing) - desc = tf.EagerOp("OutfeedEnqueueTuple") - inputs_ = convert(tf.EagerTensor, inputs_) - tf.add_input(desc, inputs_) - if dtypes !== nothing - desc["dtypes"] = map(Base.identity, dtypes) - end - res = tf.execute(desc) - node = tf.TapeNode(outfeed_enqueue_tuple, [inputs_], name=nothing, dtypes=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function outfeed_enqueue_tuple_eager(inputs_; name=nothing, dtypes=nothing) + desc = tf.EagerOp("OutfeedEnqueueTuple") + inputs_ = convert(tf.EagerTensor, inputs_) + begin + begin + tf.add_input(desc, inputs_) + end + end + begin + begin + if dtypes !== nothing + desc["dtypes"] = map(Base.identity, dtypes) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(outfeed_enqueue_tuple, [inputs_], name=nothing, dtypes=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function outfeed_enqueue_tuple(inputs_; name=nothing, dtypes=nothing) - if tf.in_eager_mode() - outfeed_enqueue_tuple_eager(inputs_; name=name, dtypes=dtypes) - else - outfeed_enqueue_tuple_graph(inputs_; name=name, dtypes=dtypes) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function outfeed_enqueue_tuple(inputs_; name=nothing, dtypes=nothing) + if tf.in_eager_mode() + outfeed_enqueue_tuple_eager(inputs_; name=name, dtypes=dtypes) + else + outfeed_enqueue_tuple_graph(inputs_; name=name, dtypes=dtypes) + end end - end + end end @@ -53807,35 +98326,63 @@ end """ begin - function reciprocal_graph(x_; name=nothing) - local desc - tf.with_op_name(name, "Reciprocal") do - desc = tf.NodeDescription("Reciprocal") - x_ = convert(Tensor{Any}, x_) - (x_,) = tf.tf_promote(x_) - tf.add_input(desc, x_) + begin + function reciprocal_graph(x_; name=nothing) + local desc + tf.with_op_name(name, "Reciprocal") do + desc = tf.NodeDescription("Reciprocal") + begin + begin + x_ = convert(Tensor{Any}, x_) + begin + end + end + begin + (x_,) = tf.tf_promote(x_) + end + end + begin + begin + tf.add_input(desc, x_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function reciprocal_eager(x_; name=nothing) - desc = tf.EagerOp("Reciprocal") - x_ = convert(tf.EagerTensor, x_) - tf.add_input(desc, x_) - desc["T"] = tf.data_type(x_) - res = tf.execute(desc) - node = tf.TapeNode(reciprocal, [x_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function reciprocal_eager(x_; name=nothing) + desc = tf.EagerOp("Reciprocal") + x_ = convert(tf.EagerTensor, x_) + begin + begin + tf.add_input(desc, x_) + end + end + begin + end + begin + desc["T"] = tf.data_type(x_) + end + res = tf.execute(desc) + node = tf.TapeNode(reciprocal, [x_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function reciprocal(x_; name=nothing) - if tf.in_eager_mode() - reciprocal_eager(x_; name=name) - else - reciprocal_graph(x_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function reciprocal(x_; name=nothing) + if tf.in_eager_mode() + reciprocal_eager(x_; name=name) + else + reciprocal_graph(x_; name=name) + end end - end + end end @@ -53845,33 +98392,57 @@ end """ begin - function string_strip_graph(input_; name=nothing) - local desc - tf.with_op_name(name, "StringStrip") do - desc = tf.NodeDescription("StringStrip") - input_ = convert(Tensor{String}, input_) - tf.add_input(desc, input_) + begin + function string_strip_graph(input_; name=nothing) + local desc + tf.with_op_name(name, "StringStrip") do + desc = tf.NodeDescription("StringStrip") + begin + begin + input_ = convert(Tensor{String}, input_) + begin + end + end + end + begin + begin + tf.add_input(desc, input_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function string_strip_eager(input_; name=nothing) - desc = tf.EagerOp("StringStrip") - input_ = convert(tf.EagerTensor, input_) - tf.add_input(desc, input_) - res = tf.execute(desc) - node = tf.TapeNode(string_strip, [input_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function string_strip_eager(input_; name=nothing) + desc = tf.EagerOp("StringStrip") + input_ = convert(tf.EagerTensor, input_) + begin + begin + tf.add_input(desc, input_) + end + end + begin + end + res = tf.execute(desc) + node = tf.TapeNode(string_strip, [input_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function string_strip(input_; name=nothing) - if tf.in_eager_mode() - string_strip_eager(input_; name=name) - else - string_strip_graph(input_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function string_strip(input_; name=nothing) + if tf.in_eager_mode() + string_strip_eager(input_; name=name) + else + string_strip_graph(input_; name=name) + end end - end + end end @@ -53881,33 +98452,57 @@ end """ begin - function barrier_ready_size_graph(handle_; name=nothing) - local desc - tf.with_op_name(name, "BarrierReadySize") do - desc = tf.NodeDescription("BarrierReadySize") - handle_ = convert(Tensor{String}, handle_) - tf.add_input(desc, handle_) + begin + function barrier_ready_size_graph(handle_; name=nothing) + local desc + tf.with_op_name(name, "BarrierReadySize") do + desc = tf.NodeDescription("BarrierReadySize") + begin + begin + handle_ = convert(Tensor{String}, handle_) + begin + end + end + end + begin + begin + tf.add_input(desc, handle_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function barrier_ready_size_eager(handle_; name=nothing) - desc = tf.EagerOp("BarrierReadySize") - handle_ = convert(tf.EagerTensor, handle_) - tf.add_input(desc, handle_) - res = tf.execute(desc) - node = tf.TapeNode(barrier_ready_size, [handle_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function barrier_ready_size_eager(handle_; name=nothing) + desc = tf.EagerOp("BarrierReadySize") + handle_ = convert(tf.EagerTensor, handle_) + begin + begin + tf.add_input(desc, handle_) + end + end + begin + end + res = tf.execute(desc) + node = tf.TapeNode(barrier_ready_size, [handle_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function barrier_ready_size(handle_; name=nothing) - if tf.in_eager_mode() - barrier_ready_size_eager(handle_; name=name) - else - barrier_ready_size_graph(handle_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function barrier_ready_size(handle_; name=nothing) + if tf.in_eager_mode() + barrier_ready_size_eager(handle_; name=name) + else + barrier_ready_size_graph(handle_; name=name) + end end - end + end end @@ -53917,53 +98512,101 @@ end """ begin - function fake_quant_with_min_max_vars_per_channel_graph(inputs_, min_, max_; name=nothing, num_bits=nothing, narrow_range=nothing) - local desc - tf.with_op_name(name, "FakeQuantWithMinMaxVarsPerChannel") do - desc = tf.NodeDescription("FakeQuantWithMinMaxVarsPerChannel") - inputs_ = convert(Tensor{Float32}, inputs_) - min_ = convert(Tensor{Float32}, min_) - max_ = convert(Tensor{Float32}, max_) - tf.add_input(desc, inputs_) - tf.add_input(desc, min_) - tf.add_input(desc, max_) - if num_bits !== nothing - desc["num_bits"] = Base.Int(num_bits) - end - if narrow_range !== nothing - desc["narrow_range"] = Base.Bool(narrow_range) + begin + function fake_quant_with_min_max_vars_per_channel_graph(inputs_, min_, max_; name=nothing, num_bits=nothing, narrow_range=nothing) + local desc + tf.with_op_name(name, "FakeQuantWithMinMaxVarsPerChannel") do + desc = tf.NodeDescription("FakeQuantWithMinMaxVarsPerChannel") + begin + begin + inputs_ = convert(Tensor{Float32}, inputs_) + begin + end + end + begin + min_ = convert(Tensor{Float32}, min_) + begin + end + end + begin + max_ = convert(Tensor{Float32}, max_) + begin + end + end + end + begin + begin + tf.add_input(desc, inputs_) + end + begin + tf.add_input(desc, min_) + end + begin + tf.add_input(desc, max_) + end + end + begin + begin + if num_bits !== nothing + desc["num_bits"] = Base.Int(num_bits) + end + end + begin + if narrow_range !== nothing + desc["narrow_range"] = Base.Bool(narrow_range) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function fake_quant_with_min_max_vars_per_channel_eager(inputs_, min_, max_; name=nothing, num_bits=nothing, narrow_range=nothing) + desc = tf.EagerOp("FakeQuantWithMinMaxVarsPerChannel") + inputs_ = convert(tf.EagerTensor, inputs_) + min_ = convert(tf.EagerTensor, min_) + max_ = convert(tf.EagerTensor, max_) + begin + begin + tf.add_input(desc, inputs_) + end + begin + tf.add_input(desc, min_) + end + begin + tf.add_input(desc, max_) + end + end + begin + begin + if num_bits !== nothing + desc["num_bits"] = Base.Int(num_bits) + end + end + begin + if narrow_range !== nothing + desc["narrow_range"] = Base.Bool(narrow_range) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(fake_quant_with_min_max_vars_per_channel, [inputs_, min_, max_], name=nothing, num_bits=nothing, narrow_range=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function fake_quant_with_min_max_vars_per_channel(inputs_, min_, max_; name=nothing, num_bits=nothing, narrow_range=nothing) + if tf.in_eager_mode() + fake_quant_with_min_max_vars_per_channel_eager(inputs_, min_, max_; name=name, num_bits=num_bits, narrow_range=narrow_range) + else + fake_quant_with_min_max_vars_per_channel_graph(inputs_, min_, max_; name=name, num_bits=num_bits, narrow_range=narrow_range) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function fake_quant_with_min_max_vars_per_channel_eager(inputs_, min_, max_; name=nothing, num_bits=nothing, narrow_range=nothing) - desc = tf.EagerOp("FakeQuantWithMinMaxVarsPerChannel") - inputs_ = convert(tf.EagerTensor, inputs_) - min_ = convert(tf.EagerTensor, min_) - max_ = convert(tf.EagerTensor, max_) - tf.add_input(desc, inputs_) - tf.add_input(desc, min_) - tf.add_input(desc, max_) - if num_bits !== nothing - desc["num_bits"] = Base.Int(num_bits) - end - if narrow_range !== nothing - desc["narrow_range"] = Base.Bool(narrow_range) - end - res = tf.execute(desc) - node = tf.TapeNode(fake_quant_with_min_max_vars_per_channel, [inputs_, min_, max_], name=nothing, num_bits=nothing, narrow_range=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function fake_quant_with_min_max_vars_per_channel(inputs_, min_, max_; name=nothing, num_bits=nothing, narrow_range=nothing) - if tf.in_eager_mode() - fake_quant_with_min_max_vars_per_channel_eager(inputs_, min_, max_; name=name, num_bits=num_bits, narrow_range=narrow_range) - else - fake_quant_with_min_max_vars_per_channel_graph(inputs_, min_, max_; name=name, num_bits=num_bits, narrow_range=narrow_range) - end - end end @@ -53973,39 +98616,67 @@ end """ begin - function string_to_hash_bucket_graph(string_tensor_; name=nothing, num_buckets=nothing) - local desc - tf.with_op_name(name, "StringToHashBucket") do - desc = tf.NodeDescription("StringToHashBucket") - string_tensor_ = convert(Tensor{String}, string_tensor_) - tf.add_input(desc, string_tensor_) - if num_buckets !== nothing - desc["num_buckets"] = Base.Int(num_buckets) + begin + function string_to_hash_bucket_graph(string_tensor_; name=nothing, num_buckets=nothing) + local desc + tf.with_op_name(name, "StringToHashBucket") do + desc = tf.NodeDescription("StringToHashBucket") + begin + begin + string_tensor_ = convert(Tensor{String}, string_tensor_) + begin + end + end + end + begin + begin + tf.add_input(desc, string_tensor_) + end + end + begin + begin + if num_buckets !== nothing + desc["num_buckets"] = Base.Int(num_buckets) + end + end + end end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function string_to_hash_bucket_eager(string_tensor_; name=nothing, num_buckets=nothing) - desc = tf.EagerOp("StringToHashBucket") - string_tensor_ = convert(tf.EagerTensor, string_tensor_) - tf.add_input(desc, string_tensor_) - if num_buckets !== nothing - desc["num_buckets"] = Base.Int(num_buckets) - end - res = tf.execute(desc) - node = tf.TapeNode(string_to_hash_bucket, [string_tensor_], name=nothing, num_buckets=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function string_to_hash_bucket_eager(string_tensor_; name=nothing, num_buckets=nothing) + desc = tf.EagerOp("StringToHashBucket") + string_tensor_ = convert(tf.EagerTensor, string_tensor_) + begin + begin + tf.add_input(desc, string_tensor_) + end + end + begin + begin + if num_buckets !== nothing + desc["num_buckets"] = Base.Int(num_buckets) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(string_to_hash_bucket, [string_tensor_], name=nothing, num_buckets=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function string_to_hash_bucket(string_tensor_; name=nothing, num_buckets=nothing) - if tf.in_eager_mode() - string_to_hash_bucket_eager(string_tensor_; name=name, num_buckets=num_buckets) - else - string_to_hash_bucket_graph(string_tensor_; name=name, num_buckets=num_buckets) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function string_to_hash_bucket(string_tensor_; name=nothing, num_buckets=nothing) + if tf.in_eager_mode() + string_to_hash_bucket_eager(string_tensor_; name=name, num_buckets=num_buckets) + else + string_to_hash_bucket_graph(string_tensor_; name=name, num_buckets=num_buckets) + end end - end + end end @@ -54015,54 +98686,96 @@ end """ begin - function tensor_array_concat_graph(handle_, flow_in_; name=nothing, dtype=nothing, element_shape_except0=nothing) - local desc - tf.with_op_name(name, "TensorArrayConcat") do - desc = tf.NodeDescription("TensorArrayConcat") - handle_ = convert(Tensor{String}, handle_) - flow_in_ = convert(Tensor{Float32}, flow_in_) - tf.add_input(desc, handle_) - tf.add_input(desc, flow_in_) - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end - if element_shape_except0 !== nothing - desc["element_shape_except0"] = Base.identity(element_shape_except0) + begin + function tensor_array_concat_graph(handle_, flow_in_; name=nothing, dtype=nothing, element_shape_except0=nothing) + local desc + tf.with_op_name(name, "TensorArrayConcat") do + desc = tf.NodeDescription("TensorArrayConcat") + begin + begin + handle_ = convert(Tensor{String}, handle_) + begin + end + end + begin + flow_in_ = convert(Tensor{Float32}, flow_in_) + begin + end + end + end + begin + begin + tf.add_input(desc, handle_) + end + begin + tf.add_input(desc, flow_in_) + end + end + begin + begin + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + begin + if element_shape_except0 !== nothing + desc["element_shape_except0"] = Base.identity(element_shape_except0) + end + end + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + end + end + begin + function tensor_array_concat_eager(handle_, flow_in_; name=nothing, dtype=nothing, element_shape_except0=nothing) + desc = tf.EagerOp("TensorArrayConcat") + handle_ = convert(tf.EagerTensor, handle_) + flow_in_ = convert(tf.EagerTensor, flow_in_) + begin + begin + tf.add_input(desc, handle_) + end + begin + tf.add_input(desc, flow_in_) + end + end + begin + begin + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + begin + if element_shape_except0 !== nothing + desc["element_shape_except0"] = Base.identity(element_shape_except0) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(tensor_array_concat, [handle_, flow_in_], name=nothing, dtype=nothing, element_shape_except0=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_array_concat(handle_, flow_in_; name=nothing, dtype=nothing, element_shape_except0=nothing) + if tf.in_eager_mode() + tensor_array_concat_eager(handle_, flow_in_; name=name, dtype=dtype, element_shape_except0=element_shape_except0) + else + tensor_array_concat_graph(handle_, flow_in_; name=name, dtype=dtype, element_shape_except0=element_shape_except0) + end end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:2 - push!(out, tf.Tensor(op, out_idx)) - end - out - end - function tensor_array_concat_eager(handle_, flow_in_; name=nothing, dtype=nothing, element_shape_except0=nothing) - desc = tf.EagerOp("TensorArrayConcat") - handle_ = convert(tf.EagerTensor, handle_) - flow_in_ = convert(tf.EagerTensor, flow_in_) - tf.add_input(desc, handle_) - tf.add_input(desc, flow_in_) - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end - if element_shape_except0 !== nothing - desc["element_shape_except0"] = Base.identity(element_shape_except0) - end - res = tf.execute(desc) - node = tf.TapeNode(tensor_array_concat, [handle_, flow_in_], name=nothing, dtype=nothing, element_shape_except0=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_array_concat(handle_, flow_in_; name=nothing, dtype=nothing, element_shape_except0=nothing) - if tf.in_eager_mode() - tensor_array_concat_eager(handle_, flow_in_; name=name, dtype=dtype, element_shape_except0=element_shape_except0) - else - tensor_array_concat_graph(handle_, flow_in_; name=name, dtype=dtype, element_shape_except0=element_shape_except0) - end - end end @@ -54072,41 +98785,81 @@ end """ begin - function sharded_filename_graph(basename_, shard_, num_shards_; name=nothing) - local desc - tf.with_op_name(name, "ShardedFilename") do - desc = tf.NodeDescription("ShardedFilename") - basename_ = convert(Tensor{String}, basename_) - shard_ = convert(Tensor{Int32}, shard_) - num_shards_ = convert(Tensor{Int32}, num_shards_) - tf.add_input(desc, basename_) - tf.add_input(desc, shard_) - tf.add_input(desc, num_shards_) - end - tf.Tensor(tf.Operation(desc)) - end - function sharded_filename_eager(basename_, shard_, num_shards_; name=nothing) - desc = tf.EagerOp("ShardedFilename") - basename_ = convert(tf.EagerTensor, basename_) - shard_ = convert(tf.EagerTensor, shard_) - num_shards_ = convert(tf.EagerTensor, num_shards_) - tf.add_input(desc, basename_) - tf.add_input(desc, shard_) - tf.add_input(desc, num_shards_) - res = tf.execute(desc) - node = tf.TapeNode(sharded_filename, [basename_, shard_, num_shards_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sharded_filename(basename_, shard_, num_shards_; name=nothing) - if tf.in_eager_mode() - sharded_filename_eager(basename_, shard_, num_shards_; name=name) - else - sharded_filename_graph(basename_, shard_, num_shards_; name=name) + begin + function sharded_filename_graph(basename_, shard_, num_shards_; name=nothing) + local desc + tf.with_op_name(name, "ShardedFilename") do + desc = tf.NodeDescription("ShardedFilename") + begin + begin + basename_ = convert(Tensor{String}, basename_) + begin + end + end + begin + shard_ = convert(Tensor{Int32}, shard_) + begin + end + end + begin + num_shards_ = convert(Tensor{Int32}, num_shards_) + begin + end + end + end + begin + begin + tf.add_input(desc, basename_) + end + begin + tf.add_input(desc, shard_) + end + begin + tf.add_input(desc, num_shards_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function sharded_filename_eager(basename_, shard_, num_shards_; name=nothing) + desc = tf.EagerOp("ShardedFilename") + basename_ = convert(tf.EagerTensor, basename_) + shard_ = convert(tf.EagerTensor, shard_) + num_shards_ = convert(tf.EagerTensor, num_shards_) + begin + begin + tf.add_input(desc, basename_) + end + begin + tf.add_input(desc, shard_) + end + begin + tf.add_input(desc, num_shards_) + end + end + begin + end + res = tf.execute(desc) + node = tf.TapeNode(sharded_filename, [basename_, shard_, num_shards_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sharded_filename(basename_, shard_, num_shards_; name=nothing) + if tf.in_eager_mode() + sharded_filename_eager(basename_, shard_, num_shards_; name=name) + else + sharded_filename_graph(basename_, shard_, num_shards_; name=name) + end end - end + end end @@ -54116,51 +98869,87 @@ end """ begin - function py_func_graph(input_; name=nothing, token=nothing, Tin=nothing, Tout=nothing) - local desc - tf.with_op_name(name, "PyFunc") do - desc = tf.NodeDescription("PyFunc") - input_ = [convert(Tensor{Any}, x) for x = input_] - tf.add_input(desc, input_) - if token !== nothing - desc["token"] = Base.String(token) - end - if Tin !== nothing - desc["Tin"] = map(Base.identity, Tin) - end - if Tout !== nothing - desc["Tout"] = map(Base.identity, Tout) + begin + function py_func_graph(input_; name=nothing, token=nothing, Tin=nothing, Tout=nothing) + local desc + tf.with_op_name(name, "PyFunc") do + desc = tf.NodeDescription("PyFunc") + begin + begin + input_ = [convert(Tensor{Any}, x) for x = input_] + begin + end + end + end + begin + begin + tf.add_input(desc, input_) + end + end + begin + begin + if token !== nothing + desc["token"] = Base.String(token) + end + end + begin + if Tin !== nothing + desc["Tin"] = map(Base.identity, Tin) + end + end + begin + if Tout !== nothing + desc["Tout"] = map(Base.identity, Tout) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function py_func_eager(input_; name=nothing, token=nothing, Tin=nothing, Tout=nothing) + desc = tf.EagerOp("PyFunc") + input_ = convert(tf.EagerTensor, input_) + begin + begin + tf.add_input(desc, input_) + end + end + begin + begin + if token !== nothing + desc["token"] = Base.String(token) + end + end + begin + if Tin !== nothing + desc["Tin"] = map(Base.identity, Tin) + end + end + begin + if Tout !== nothing + desc["Tout"] = map(Base.identity, Tout) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(py_func, [input_], name=nothing, token=nothing, Tin=nothing, Tout=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function py_func(input_; name=nothing, token=nothing, Tin=nothing, Tout=nothing) + if tf.in_eager_mode() + py_func_eager(input_; name=name, token=token, Tin=Tin, Tout=Tout) + else + py_func_graph(input_; name=name, token=token, Tin=Tin, Tout=Tout) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function py_func_eager(input_; name=nothing, token=nothing, Tin=nothing, Tout=nothing) - desc = tf.EagerOp("PyFunc") - input_ = convert(tf.EagerTensor, input_) - tf.add_input(desc, input_) - if token !== nothing - desc["token"] = Base.String(token) - end - if Tin !== nothing - desc["Tin"] = map(Base.identity, Tin) - end - if Tout !== nothing - desc["Tout"] = map(Base.identity, Tout) - end - res = tf.execute(desc) - node = tf.TapeNode(py_func, [input_], name=nothing, token=nothing, Tin=nothing, Tout=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function py_func(input_; name=nothing, token=nothing, Tin=nothing, Tout=nothing) - if tf.in_eager_mode() - py_func_eager(input_; name=name, token=token, Tin=Tin, Tout=Tout) - else - py_func_graph(input_; name=name, token=token, Tin=Tin, Tout=Tout) - end - end end @@ -54170,48 +98959,100 @@ end """ begin - function unsorted_segment_prod_graph(data_, segment_ids_, num_segments_; name=nothing) - local desc - tf.with_op_name(name, "UnsortedSegmentProd") do - desc = tf.NodeDescription("UnsortedSegmentProd") - data_ = convert(Tensor{Any}, data_) - segment_ids_ = convert(Tensor{Any}, segment_ids_) - segment_ids_ = segment_ids_ - convert(tf.Tensor{eltype(segment_ids_)}, 1) - num_segments_ = convert(Tensor{Int32}, num_segments_) - (num_segments_,) = tf.tf_promote(num_segments_) - (data_,) = tf.tf_promote(data_) - (segment_ids_,) = tf.tf_promote(segment_ids_) - tf.add_input(desc, data_) - tf.add_input(desc, segment_ids_) - tf.add_input(desc, num_segments_) - end - tf.Tensor(tf.Operation(desc)) - end - function unsorted_segment_prod_eager(data_, segment_ids_, num_segments_; name=nothing) - desc = tf.EagerOp("UnsortedSegmentProd") - data_ = convert(tf.EagerTensor, data_) - segment_ids_ = convert(tf.EagerTensor, segment_ids_) - num_segments_ = convert(tf.EagerTensor, num_segments_) - tf.add_input(desc, data_) - tf.add_input(desc, segment_ids_) - tf.add_input(desc, num_segments_) - desc["T"] = tf.data_type(data_) - desc["Tindices"] = tf.data_type(segment_ids_) - desc["Tnumsegments"] = tf.data_type(num_segments_) - res = tf.execute(desc) - node = tf.TapeNode(unsorted_segment_prod, [data_, segment_ids_, num_segments_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function unsorted_segment_prod(data_, segment_ids_, num_segments_; name=nothing) - if tf.in_eager_mode() - unsorted_segment_prod_eager(data_, segment_ids_, num_segments_; name=name) - else - unsorted_segment_prod_graph(data_, segment_ids_, num_segments_; name=name) + begin + function unsorted_segment_prod_graph(data_, segment_ids_, num_segments_; name=nothing) + local desc + tf.with_op_name(name, "UnsortedSegmentProd") do + desc = tf.NodeDescription("UnsortedSegmentProd") + begin + begin + data_ = convert(Tensor{Any}, data_) + begin + end + end + begin + segment_ids_ = convert(Tensor{Any}, segment_ids_) + begin + segment_ids_ = segment_ids_ - convert(tf.Tensor{eltype(segment_ids_)}, 1) + end + end + begin + num_segments_ = convert(Tensor{Int32}, num_segments_) + begin + end + end + begin + (num_segments_,) = tf.tf_promote(num_segments_) + end + begin + (data_,) = tf.tf_promote(data_) + end + begin + (segment_ids_,) = tf.tf_promote(segment_ids_) + end + end + begin + begin + tf.add_input(desc, data_) + end + begin + tf.add_input(desc, segment_ids_) + end + begin + tf.add_input(desc, num_segments_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function unsorted_segment_prod_eager(data_, segment_ids_, num_segments_; name=nothing) + desc = tf.EagerOp("UnsortedSegmentProd") + data_ = convert(tf.EagerTensor, data_) + segment_ids_ = convert(tf.EagerTensor, segment_ids_) + num_segments_ = convert(tf.EagerTensor, num_segments_) + begin + begin + tf.add_input(desc, data_) + end + begin + tf.add_input(desc, segment_ids_) + end + begin + tf.add_input(desc, num_segments_) + end + end + begin + end + begin + desc["T"] = tf.data_type(data_) + end + begin + desc["Tindices"] = tf.data_type(segment_ids_) + end + begin + desc["Tnumsegments"] = tf.data_type(num_segments_) + end + res = tf.execute(desc) + node = tf.TapeNode(unsorted_segment_prod, [data_, segment_ids_, num_segments_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function unsorted_segment_prod(data_, segment_ids_, num_segments_; name=nothing) + if tf.in_eager_mode() + unsorted_segment_prod_eager(data_, segment_ids_, num_segments_; name=name) + else + unsorted_segment_prod_graph(data_, segment_ids_, num_segments_; name=name) + end end - end + end end @@ -54221,41 +99062,73 @@ end """ begin - function count_up_to_graph(ref_; name=nothing, limit=nothing) - local desc - tf.with_op_name(name, "CountUpTo") do - desc = tf.NodeDescription("CountUpTo") - ref_ = convert(Tensor{Any}, ref_) - (ref_,) = tf.tf_promote(ref_) - tf.add_input(desc, ref_) - if limit !== nothing - desc["limit"] = Base.Int(limit) + begin + function count_up_to_graph(ref_; name=nothing, limit=nothing) + local desc + tf.with_op_name(name, "CountUpTo") do + desc = tf.NodeDescription("CountUpTo") + begin + begin + ref_ = convert(Tensor{Any}, ref_) + begin + end + end + begin + (ref_,) = tf.tf_promote(ref_) + end + end + begin + begin + tf.add_input(desc, ref_) + end + end + begin + begin + if limit !== nothing + desc["limit"] = Base.Int(limit) + end + end + end end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function count_up_to_eager(ref_; name=nothing, limit=nothing) - desc = tf.EagerOp("CountUpTo") - ref_ = convert(tf.EagerTensor, ref_) - tf.add_input(desc, ref_) - if limit !== nothing - desc["limit"] = Base.Int(limit) - end - desc["T"] = tf.data_type(ref_) - res = tf.execute(desc) - node = tf.TapeNode(count_up_to, [ref_], name=nothing, limit=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function count_up_to_eager(ref_; name=nothing, limit=nothing) + desc = tf.EagerOp("CountUpTo") + ref_ = convert(tf.EagerTensor, ref_) + begin + begin + tf.add_input(desc, ref_) + end + end + begin + begin + if limit !== nothing + desc["limit"] = Base.Int(limit) + end + end + end + begin + desc["T"] = tf.data_type(ref_) + end + res = tf.execute(desc) + node = tf.TapeNode(count_up_to, [ref_], name=nothing, limit=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function count_up_to(ref_; name=nothing, limit=nothing) - if tf.in_eager_mode() - count_up_to_eager(ref_; name=name, limit=limit) - else - count_up_to_graph(ref_; name=name, limit=limit) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function count_up_to(ref_; name=nothing, limit=nothing) + if tf.in_eager_mode() + count_up_to_eager(ref_; name=name, limit=limit) + else + count_up_to_graph(ref_; name=name, limit=limit) + end end - end + end end @@ -54265,59 +99138,111 @@ end """ begin - function random_gamma_graph(shape_, alpha_; name=nothing, seed=nothing, seed2=nothing, S=nothing) - local desc - tf.with_op_name(name, "RandomGamma") do - desc = tf.NodeDescription("RandomGamma") - shape_ = convert(Tensor{Any}, shape_) - alpha_ = convert(Tensor{Any}, alpha_) - (shape_,) = tf.tf_promote(shape_) - (alpha_,) = tf.tf_promote(alpha_) - tf.add_input(desc, shape_) - tf.add_input(desc, alpha_) - if seed !== nothing - desc["seed"] = Base.Int(seed) - end - if seed2 !== nothing - desc["seed2"] = Base.Int(seed2) + begin + function random_gamma_graph(shape_, alpha_; name=nothing, seed=nothing, seed2=nothing, S=nothing) + local desc + tf.with_op_name(name, "RandomGamma") do + desc = tf.NodeDescription("RandomGamma") + begin + begin + shape_ = convert(Tensor{Any}, shape_) + begin + end + end + begin + alpha_ = convert(Tensor{Any}, alpha_) + begin + end + end + begin + (shape_,) = tf.tf_promote(shape_) + end + begin + (alpha_,) = tf.tf_promote(alpha_) + end + end + begin + begin + tf.add_input(desc, shape_) + end + begin + tf.add_input(desc, alpha_) + end + end + begin + begin + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + end + begin + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end + end + begin + if S !== nothing + desc["S"] = Base.identity(S) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function random_gamma_eager(shape_, alpha_; name=nothing, seed=nothing, seed2=nothing, S=nothing) + desc = tf.EagerOp("RandomGamma") + shape_ = convert(tf.EagerTensor, shape_) + alpha_ = convert(tf.EagerTensor, alpha_) + begin + begin + tf.add_input(desc, shape_) + end + begin + tf.add_input(desc, alpha_) + end + end + begin + begin + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + end + begin + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end + end + begin + if S !== nothing + desc["S"] = Base.identity(S) + end + end + end + begin + desc["S"] = tf.data_type(shape_) + end + begin + desc["T"] = tf.data_type(alpha_) + end + res = tf.execute(desc) + node = tf.TapeNode(random_gamma, [shape_, alpha_], name=nothing, seed=nothing, seed2=nothing, S=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function random_gamma(shape_, alpha_; name=nothing, seed=nothing, seed2=nothing, S=nothing) + if tf.in_eager_mode() + random_gamma_eager(shape_, alpha_; name=name, seed=seed, seed2=seed2, S=S) + else + random_gamma_graph(shape_, alpha_; name=name, seed=seed, seed2=seed2, S=S) + end end - if S !== nothing - desc["S"] = Base.identity(S) - end - end - tf.Tensor(tf.Operation(desc)) - end - function random_gamma_eager(shape_, alpha_; name=nothing, seed=nothing, seed2=nothing, S=nothing) - desc = tf.EagerOp("RandomGamma") - shape_ = convert(tf.EagerTensor, shape_) - alpha_ = convert(tf.EagerTensor, alpha_) - tf.add_input(desc, shape_) - tf.add_input(desc, alpha_) - if seed !== nothing - desc["seed"] = Base.Int(seed) - end - if seed2 !== nothing - desc["seed2"] = Base.Int(seed2) - end - if S !== nothing - desc["S"] = Base.identity(S) - end - desc["S"] = tf.data_type(shape_) - desc["T"] = tf.data_type(alpha_) - res = tf.execute(desc) - node = tf.TapeNode(random_gamma, [shape_, alpha_], name=nothing, seed=nothing, seed2=nothing, S=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function random_gamma(shape_, alpha_; name=nothing, seed=nothing, seed2=nothing, S=nothing) - if tf.in_eager_mode() - random_gamma_eager(shape_, alpha_; name=name, seed=seed, seed2=seed2, S=S) - else - random_gamma_graph(shape_, alpha_; name=name, seed=seed, seed2=seed2, S=S) - end - end end @@ -54327,43 +99252,79 @@ end """ begin - function tensor_array_grad_graph(handle_, flow_in_; name=nothing, source=nothing) - local desc - tf.with_op_name(name, "TensorArrayGrad") do - desc = tf.NodeDescription("TensorArrayGrad") - handle_ = convert(Tensor{String}, handle_) - flow_in_ = convert(Tensor{Float32}, flow_in_) - tf.add_input(desc, handle_) - tf.add_input(desc, flow_in_) - if source !== nothing - desc["source"] = Base.String(source) + begin + function tensor_array_grad_graph(handle_, flow_in_; name=nothing, source=nothing) + local desc + tf.with_op_name(name, "TensorArrayGrad") do + desc = tf.NodeDescription("TensorArrayGrad") + begin + begin + handle_ = convert(Tensor{String}, handle_) + begin + end + end + begin + flow_in_ = convert(Tensor{Float32}, flow_in_) + begin + end + end + end + begin + begin + tf.add_input(desc, handle_) + end + begin + tf.add_input(desc, flow_in_) + end + end + begin + begin + if source !== nothing + desc["source"] = Base.String(source) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function tensor_array_grad_eager(handle_, flow_in_; name=nothing, source=nothing) + desc = tf.EagerOp("TensorArrayGrad") + handle_ = convert(tf.EagerTensor, handle_) + flow_in_ = convert(tf.EagerTensor, flow_in_) + begin + begin + tf.add_input(desc, handle_) + end + begin + tf.add_input(desc, flow_in_) + end + end + begin + begin + if source !== nothing + desc["source"] = Base.String(source) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(tensor_array_grad, [handle_, flow_in_], name=nothing, source=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_array_grad(handle_, flow_in_; name=nothing, source=nothing) + if tf.in_eager_mode() + tensor_array_grad_eager(handle_, flow_in_; name=name, source=source) + else + tensor_array_grad_graph(handle_, flow_in_; name=name, source=source) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function tensor_array_grad_eager(handle_, flow_in_; name=nothing, source=nothing) - desc = tf.EagerOp("TensorArrayGrad") - handle_ = convert(tf.EagerTensor, handle_) - flow_in_ = convert(tf.EagerTensor, flow_in_) - tf.add_input(desc, handle_) - tf.add_input(desc, flow_in_) - if source !== nothing - desc["source"] = Base.String(source) - end - res = tf.execute(desc) - node = tf.TapeNode(tensor_array_grad, [handle_, flow_in_], name=nothing, source=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_array_grad(handle_, flow_in_; name=nothing, source=nothing) - if tf.in_eager_mode() - tensor_array_grad_eager(handle_, flow_in_; name=name, source=source) - else - tensor_array_grad_graph(handle_, flow_in_; name=name, source=source) - end - end end @@ -54373,58 +99334,108 @@ end """ begin - function dilation2d_graph(input_, filter_; name=nothing, strides=nothing, rates=nothing, padding=nothing) - local desc - tf.with_op_name(name, "Dilation2D") do - desc = tf.NodeDescription("Dilation2D") - input_ = convert(Tensor{Any}, input_) - filter_ = convert(Tensor{Any}, filter_) - (input_, filter_) = tf.tf_promote(input_, filter_) - tf.add_input(desc, input_) - tf.add_input(desc, filter_) - if strides !== nothing - desc["strides"] = map(Base.identity, strides) - end - if rates !== nothing - desc["rates"] = map(Base.identity, rates) - end - if padding !== nothing - desc["padding"] = Base.String(padding) + begin + function dilation2d_graph(input_, filter_; name=nothing, strides=nothing, rates=nothing, padding=nothing) + local desc + tf.with_op_name(name, "Dilation2D") do + desc = tf.NodeDescription("Dilation2D") + begin + begin + input_ = convert(Tensor{Any}, input_) + begin + end + end + begin + filter_ = convert(Tensor{Any}, filter_) + begin + end + end + begin + (input_, filter_) = tf.tf_promote(input_, filter_) + end + end + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, filter_) + end + end + begin + begin + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + end + begin + if rates !== nothing + desc["rates"] = map(Base.identity, rates) + end + end + begin + if padding !== nothing + desc["padding"] = Base.String(padding) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function dilation2d_eager(input_, filter_; name=nothing, strides=nothing, rates=nothing, padding=nothing) + desc = tf.EagerOp("Dilation2D") + input_ = convert(tf.EagerTensor, input_) + filter_ = convert(tf.EagerTensor, filter_) + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, filter_) + end + end + begin + begin + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + end + begin + if rates !== nothing + desc["rates"] = map(Base.identity, rates) + end + end + begin + if padding !== nothing + desc["padding"] = Base.String(padding) + end + end + end + begin + desc["T"] = tf.data_type(input_) + end + begin + desc["T"] = tf.data_type(filter_) + end + res = tf.execute(desc) + node = tf.TapeNode(dilation2d, [input_, filter_], name=nothing, strides=nothing, rates=nothing, padding=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function dilation2d(input_, filter_; name=nothing, strides=nothing, rates=nothing, padding=nothing) + if tf.in_eager_mode() + dilation2d_eager(input_, filter_; name=name, strides=strides, rates=rates, padding=padding) + else + dilation2d_graph(input_, filter_; name=name, strides=strides, rates=rates, padding=padding) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function dilation2d_eager(input_, filter_; name=nothing, strides=nothing, rates=nothing, padding=nothing) - desc = tf.EagerOp("Dilation2D") - input_ = convert(tf.EagerTensor, input_) - filter_ = convert(tf.EagerTensor, filter_) - tf.add_input(desc, input_) - tf.add_input(desc, filter_) - if strides !== nothing - desc["strides"] = map(Base.identity, strides) - end - if rates !== nothing - desc["rates"] = map(Base.identity, rates) - end - if padding !== nothing - desc["padding"] = Base.String(padding) - end - desc["T"] = tf.data_type(input_) - desc["T"] = tf.data_type(filter_) - res = tf.execute(desc) - node = tf.TapeNode(dilation2d, [input_, filter_], name=nothing, strides=nothing, rates=nothing, padding=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function dilation2d(input_, filter_; name=nothing, strides=nothing, rates=nothing, padding=nothing) - if tf.in_eager_mode() - dilation2d_eager(input_, filter_; name=name, strides=strides, rates=rates, padding=padding) - else - dilation2d_graph(input_, filter_; name=name, strides=strides, rates=rates, padding=padding) - end - end end @@ -54434,61 +99445,117 @@ end """ begin - function unbatch_graph(batched_tensor_, batch_index_, id_; name=nothing, timeout_micros=nothing, container=nothing, shared_name=nothing) - local desc - tf.with_op_name(name, "Unbatch") do - desc = tf.NodeDescription("Unbatch") - batched_tensor_ = convert(Tensor{Any}, batched_tensor_) - batch_index_ = convert(Tensor{Int64}, batch_index_) - id_ = convert(Tensor{Int64}, id_) - (batched_tensor_,) = tf.tf_promote(batched_tensor_) - tf.add_input(desc, batched_tensor_) - tf.add_input(desc, batch_index_) - tf.add_input(desc, id_) - if timeout_micros !== nothing - desc["timeout_micros"] = Base.Int(timeout_micros) - end - if container !== nothing - desc["container"] = Base.String(container) + begin + function unbatch_graph(batched_tensor_, batch_index_, id_; name=nothing, timeout_micros=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "Unbatch") do + desc = tf.NodeDescription("Unbatch") + begin + begin + batched_tensor_ = convert(Tensor{Any}, batched_tensor_) + begin + end + end + begin + batch_index_ = convert(Tensor{Int64}, batch_index_) + begin + end + end + begin + id_ = convert(Tensor{Int64}, id_) + begin + end + end + begin + (batched_tensor_,) = tf.tf_promote(batched_tensor_) + end + end + begin + begin + tf.add_input(desc, batched_tensor_) + end + begin + tf.add_input(desc, batch_index_) + end + begin + tf.add_input(desc, id_) + end + end + begin + begin + if timeout_micros !== nothing + desc["timeout_micros"] = Base.Int(timeout_micros) + end + end + begin + if container !== nothing + desc["container"] = Base.String(container) + end + end + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function unbatch_eager(batched_tensor_, batch_index_, id_; name=nothing, timeout_micros=nothing, container=nothing, shared_name=nothing) + desc = tf.EagerOp("Unbatch") + batched_tensor_ = convert(tf.EagerTensor, batched_tensor_) + batch_index_ = convert(tf.EagerTensor, batch_index_) + id_ = convert(tf.EagerTensor, id_) + begin + begin + tf.add_input(desc, batched_tensor_) + end + begin + tf.add_input(desc, batch_index_) + end + begin + tf.add_input(desc, id_) + end + end + begin + begin + if timeout_micros !== nothing + desc["timeout_micros"] = Base.Int(timeout_micros) + end + end + begin + if container !== nothing + desc["container"] = Base.String(container) + end + end + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + end + begin + desc["T"] = tf.data_type(batched_tensor_) + end + res = tf.execute(desc) + node = tf.TapeNode(unbatch, [batched_tensor_, batch_index_, id_], name=nothing, timeout_micros=nothing, container=nothing, shared_name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function unbatch(batched_tensor_, batch_index_, id_; name=nothing, timeout_micros=nothing, container=nothing, shared_name=nothing) + if tf.in_eager_mode() + unbatch_eager(batched_tensor_, batch_index_, id_; name=name, timeout_micros=timeout_micros, container=container, shared_name=shared_name) + else + unbatch_graph(batched_tensor_, batch_index_, id_; name=name, timeout_micros=timeout_micros, container=container, shared_name=shared_name) + end end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - end - tf.Tensor(tf.Operation(desc)) end - function unbatch_eager(batched_tensor_, batch_index_, id_; name=nothing, timeout_micros=nothing, container=nothing, shared_name=nothing) - desc = tf.EagerOp("Unbatch") - batched_tensor_ = convert(tf.EagerTensor, batched_tensor_) - batch_index_ = convert(tf.EagerTensor, batch_index_) - id_ = convert(tf.EagerTensor, id_) - tf.add_input(desc, batched_tensor_) - tf.add_input(desc, batch_index_) - tf.add_input(desc, id_) - if timeout_micros !== nothing - desc["timeout_micros"] = Base.Int(timeout_micros) - end - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - desc["T"] = tf.data_type(batched_tensor_) - res = tf.execute(desc) - node = tf.TapeNode(unbatch, [batched_tensor_, batch_index_, id_], name=nothing, timeout_micros=nothing, container=nothing, shared_name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function unbatch(batched_tensor_, batch_index_, id_; name=nothing, timeout_micros=nothing, container=nothing, shared_name=nothing) - if tf.in_eager_mode() - unbatch_eager(batched_tensor_, batch_index_, id_; name=name, timeout_micros=timeout_micros, container=container, shared_name=shared_name) - else - unbatch_graph(batched_tensor_, batch_index_, id_; name=name, timeout_micros=timeout_micros, container=container, shared_name=shared_name) - end - end end @@ -54498,35 +99565,63 @@ end """ begin - function get_session_handle_graph(value_; name=nothing) - local desc - tf.with_op_name(name, "GetSessionHandle") do - desc = tf.NodeDescription("GetSessionHandle") - value_ = convert(Tensor{Any}, value_) - (value_,) = tf.tf_promote(value_) - tf.add_input(desc, value_) + begin + function get_session_handle_graph(value_; name=nothing) + local desc + tf.with_op_name(name, "GetSessionHandle") do + desc = tf.NodeDescription("GetSessionHandle") + begin + begin + value_ = convert(Tensor{Any}, value_) + begin + end + end + begin + (value_,) = tf.tf_promote(value_) + end + end + begin + begin + tf.add_input(desc, value_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function get_session_handle_eager(value_; name=nothing) - desc = tf.EagerOp("GetSessionHandle") - value_ = convert(tf.EagerTensor, value_) - tf.add_input(desc, value_) - desc["T"] = tf.data_type(value_) - res = tf.execute(desc) - node = tf.TapeNode(get_session_handle, [value_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function get_session_handle_eager(value_; name=nothing) + desc = tf.EagerOp("GetSessionHandle") + value_ = convert(tf.EagerTensor, value_) + begin + begin + tf.add_input(desc, value_) + end + end + begin + end + begin + desc["T"] = tf.data_type(value_) + end + res = tf.execute(desc) + node = tf.TapeNode(get_session_handle, [value_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function get_session_handle(value_; name=nothing) - if tf.in_eager_mode() - get_session_handle_eager(value_; name=name) - else - get_session_handle_graph(value_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function get_session_handle(value_; name=nothing) + if tf.in_eager_mode() + get_session_handle_eager(value_; name=name) + else + get_session_handle_graph(value_; name=name) + end end - end + end end @@ -54536,58 +99631,92 @@ end Retrieve embedding parameters for a single table. """ begin - function retrieve_tpu_embedding_adam_parameters_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - local desc - tf.with_op_name(name, "RetrieveTPUEmbeddingADAMParameters") do - desc = tf.NodeDescription("RetrieveTPUEmbeddingADAMParameters") - if table_id !== nothing - desc["table_id"] = Base.Int(table_id) - end - if table_name !== nothing - desc["table_name"] = Base.String(table_name) + begin + function retrieve_tpu_embedding_adam_parameters_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + local desc + tf.with_op_name(name, "RetrieveTPUEmbeddingADAMParameters") do + desc = tf.NodeDescription("RetrieveTPUEmbeddingADAMParameters") + begin + end + begin + end + begin + begin + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + end + begin + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + end + begin + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + end + begin + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + end + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + end + end + begin + function retrieve_tpu_embedding_adam_parameters_eager(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + desc = tf.EagerOp("RetrieveTPUEmbeddingADAMParameters") + begin + end + begin + begin + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + end + begin + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + end + begin + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + end + begin + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(retrieve_tpu_embedding_adam_parameters, [], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function retrieve_tpu_embedding_adam_parameters(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + if tf.in_eager_mode() + retrieve_tpu_embedding_adam_parameters_eager(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + else + retrieve_tpu_embedding_adam_parameters_graph(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + end end - if num_shards !== nothing - desc["num_shards"] = Base.Int(num_shards) - end - if shard_id !== nothing - desc["shard_id"] = Base.Int(shard_id) - end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:3 - push!(out, tf.Tensor(op, out_idx)) - end - out end - function retrieve_tpu_embedding_adam_parameters_eager(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - desc = tf.EagerOp("RetrieveTPUEmbeddingADAMParameters") - if table_id !== nothing - desc["table_id"] = Base.Int(table_id) - end - if table_name !== nothing - desc["table_name"] = Base.String(table_name) - end - if num_shards !== nothing - desc["num_shards"] = Base.Int(num_shards) - end - if shard_id !== nothing - desc["shard_id"] = Base.Int(shard_id) - end - res = tf.execute(desc) - node = tf.TapeNode(retrieve_tpu_embedding_adam_parameters, [], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function retrieve_tpu_embedding_adam_parameters(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - if tf.in_eager_mode() - retrieve_tpu_embedding_adam_parameters_eager(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) - else - retrieve_tpu_embedding_adam_parameters_graph(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) - end - end end @@ -54597,65 +99726,105 @@ end """ begin - function mutable_hash_table_of_tensors_v2_graph(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing) - local desc - tf.with_op_name(name, "MutableHashTableOfTensorsV2") do - desc = tf.NodeDescription("MutableHashTableOfTensorsV2") - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - if use_node_name_sharing !== nothing - desc["use_node_name_sharing"] = Base.Bool(use_node_name_sharing) - end - if key_dtype !== nothing - desc["key_dtype"] = Base.identity(key_dtype) + begin + function mutable_hash_table_of_tensors_v2_graph(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing) + local desc + tf.with_op_name(name, "MutableHashTableOfTensorsV2") do + desc = tf.NodeDescription("MutableHashTableOfTensorsV2") + begin + end + begin + end + begin + begin + if container !== nothing + desc["container"] = Base.String(container) + end + end + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + begin + if use_node_name_sharing !== nothing + desc["use_node_name_sharing"] = Base.Bool(use_node_name_sharing) + end + end + begin + if key_dtype !== nothing + desc["key_dtype"] = Base.identity(key_dtype) + end + end + begin + if value_dtype !== nothing + desc["value_dtype"] = Base.identity(value_dtype) + end + end + begin + if value_shape !== nothing + desc["value_shape"] = Base.identity(value_shape) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function mutable_hash_table_of_tensors_v2_eager(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing) + desc = tf.EagerOp("MutableHashTableOfTensorsV2") + begin + end + begin + begin + if container !== nothing + desc["container"] = Base.String(container) + end + end + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + begin + if use_node_name_sharing !== nothing + desc["use_node_name_sharing"] = Base.Bool(use_node_name_sharing) + end + end + begin + if key_dtype !== nothing + desc["key_dtype"] = Base.identity(key_dtype) + end + end + begin + if value_dtype !== nothing + desc["value_dtype"] = Base.identity(value_dtype) + end + end + begin + if value_shape !== nothing + desc["value_shape"] = Base.identity(value_shape) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(mutable_hash_table_of_tensors_v2, [], name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function mutable_hash_table_of_tensors_v2(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing) + if tf.in_eager_mode() + mutable_hash_table_of_tensors_v2_eager(; name=name, container=container, shared_name=shared_name, use_node_name_sharing=use_node_name_sharing, key_dtype=key_dtype, value_dtype=value_dtype, value_shape=value_shape) + else + mutable_hash_table_of_tensors_v2_graph(; name=name, container=container, shared_name=shared_name, use_node_name_sharing=use_node_name_sharing, key_dtype=key_dtype, value_dtype=value_dtype, value_shape=value_shape) + end end - if value_dtype !== nothing - desc["value_dtype"] = Base.identity(value_dtype) - end - if value_shape !== nothing - desc["value_shape"] = Base.identity(value_shape) - end - end - tf.Tensor(tf.Operation(desc)) - end - function mutable_hash_table_of_tensors_v2_eager(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing) - desc = tf.EagerOp("MutableHashTableOfTensorsV2") - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - if use_node_name_sharing !== nothing - desc["use_node_name_sharing"] = Base.Bool(use_node_name_sharing) - end - if key_dtype !== nothing - desc["key_dtype"] = Base.identity(key_dtype) - end - if value_dtype !== nothing - desc["value_dtype"] = Base.identity(value_dtype) - end - if value_shape !== nothing - desc["value_shape"] = Base.identity(value_shape) - end - res = tf.execute(desc) - node = tf.TapeNode(mutable_hash_table_of_tensors_v2, [], name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function mutable_hash_table_of_tensors_v2(; name=nothing, container=nothing, shared_name=nothing, use_node_name_sharing=nothing, key_dtype=nothing, value_dtype=nothing, value_shape=nothing) - if tf.in_eager_mode() - mutable_hash_table_of_tensors_v2_eager(; name=name, container=container, shared_name=shared_name, use_node_name_sharing=use_node_name_sharing, key_dtype=key_dtype, value_dtype=value_dtype, value_shape=value_shape) - else - mutable_hash_table_of_tensors_v2_graph(; name=name, container=container, shared_name=shared_name, use_node_name_sharing=use_node_name_sharing, key_dtype=key_dtype, value_dtype=value_dtype, value_shape=value_shape) - end - end end @@ -54665,83 +99834,197 @@ end """ begin - function sparse_apply_ftrl_graph(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, lr_power_; name=nothing, use_locking=nothing) - local desc - tf.with_op_name(name, "SparseApplyFtrl") do - desc = tf.NodeDescription("SparseApplyFtrl") - var_ = convert(Tensor{Any}, var_) - accum_ = convert(Tensor{Any}, accum_) - linear_ = convert(Tensor{Any}, linear_) - grad_ = convert(Tensor{Any}, grad_) - indices_ = convert(Tensor{Any}, indices_) - indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) - lr_ = convert(Tensor{Any}, lr_) - l1_ = convert(Tensor{Any}, l1_) - l2_ = convert(Tensor{Any}, l2_) - lr_power_ = convert(Tensor{Any}, lr_power_) - (var_, accum_, linear_, grad_, lr_, l1_, l2_, lr_power_) = tf.tf_promote(var_, accum_, linear_, grad_, lr_, l1_, l2_, lr_power_) - (indices_,) = tf.tf_promote(indices_) - tf.add_input(desc, var_) - tf.add_input(desc, accum_) - tf.add_input(desc, linear_) - tf.add_input(desc, grad_) - tf.add_input(desc, indices_) - tf.add_input(desc, lr_) - tf.add_input(desc, l1_) - tf.add_input(desc, l2_) - tf.add_input(desc, lr_power_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - end - tf.Tensor(tf.Operation(desc)) - end - function sparse_apply_ftrl_eager(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, lr_power_; name=nothing, use_locking=nothing) - desc = tf.EagerOp("SparseApplyFtrl") - var_ = convert(tf.EagerTensor, var_) - accum_ = convert(tf.EagerTensor, accum_) - linear_ = convert(tf.EagerTensor, linear_) - grad_ = convert(tf.EagerTensor, grad_) - indices_ = convert(tf.EagerTensor, indices_) - lr_ = convert(tf.EagerTensor, lr_) - l1_ = convert(tf.EagerTensor, l1_) - l2_ = convert(tf.EagerTensor, l2_) - lr_power_ = convert(tf.EagerTensor, lr_power_) - tf.add_input(desc, var_) - tf.add_input(desc, accum_) - tf.add_input(desc, linear_) - tf.add_input(desc, grad_) - tf.add_input(desc, indices_) - tf.add_input(desc, lr_) - tf.add_input(desc, l1_) - tf.add_input(desc, l2_) - tf.add_input(desc, lr_power_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - desc["T"] = tf.data_type(var_) - desc["T"] = tf.data_type(accum_) - desc["T"] = tf.data_type(linear_) - desc["T"] = tf.data_type(grad_) - desc["Tindices"] = tf.data_type(indices_) - desc["T"] = tf.data_type(lr_) - desc["T"] = tf.data_type(l1_) - desc["T"] = tf.data_type(l2_) - desc["T"] = tf.data_type(lr_power_) - res = tf.execute(desc) - node = tf.TapeNode(sparse_apply_ftrl, [var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, lr_power_], name=nothing, use_locking=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_apply_ftrl(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, lr_power_; name=nothing, use_locking=nothing) - if tf.in_eager_mode() - sparse_apply_ftrl_eager(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, lr_power_; name=name, use_locking=use_locking) - else - sparse_apply_ftrl_graph(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, lr_power_; name=name, use_locking=use_locking) + begin + function sparse_apply_ftrl_graph(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, lr_power_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "SparseApplyFtrl") do + desc = tf.NodeDescription("SparseApplyFtrl") + begin + begin + var_ = convert(Tensor{Any}, var_) + begin + end + end + begin + accum_ = convert(Tensor{Any}, accum_) + begin + end + end + begin + linear_ = convert(Tensor{Any}, linear_) + begin + end + end + begin + grad_ = convert(Tensor{Any}, grad_) + begin + end + end + begin + indices_ = convert(Tensor{Any}, indices_) + begin + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + end + end + begin + lr_ = convert(Tensor{Any}, lr_) + begin + end + end + begin + l1_ = convert(Tensor{Any}, l1_) + begin + end + end + begin + l2_ = convert(Tensor{Any}, l2_) + begin + end + end + begin + lr_power_ = convert(Tensor{Any}, lr_power_) + begin + end + end + begin + (var_, accum_, linear_, grad_, lr_, l1_, l2_, lr_power_) = tf.tf_promote(var_, accum_, linear_, grad_, lr_, l1_, l2_, lr_power_) + end + begin + (indices_,) = tf.tf_promote(indices_) + end + end + begin + begin + tf.add_input(desc, var_) + end + begin + tf.add_input(desc, accum_) + end + begin + tf.add_input(desc, linear_) + end + begin + tf.add_input(desc, grad_) + end + begin + tf.add_input(desc, indices_) + end + begin + tf.add_input(desc, lr_) + end + begin + tf.add_input(desc, l1_) + end + begin + tf.add_input(desc, l2_) + end + begin + tf.add_input(desc, lr_power_) + end + end + begin + begin + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function sparse_apply_ftrl_eager(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, lr_power_; name=nothing, use_locking=nothing) + desc = tf.EagerOp("SparseApplyFtrl") + var_ = convert(tf.EagerTensor, var_) + accum_ = convert(tf.EagerTensor, accum_) + linear_ = convert(tf.EagerTensor, linear_) + grad_ = convert(tf.EagerTensor, grad_) + indices_ = convert(tf.EagerTensor, indices_) + lr_ = convert(tf.EagerTensor, lr_) + l1_ = convert(tf.EagerTensor, l1_) + l2_ = convert(tf.EagerTensor, l2_) + lr_power_ = convert(tf.EagerTensor, lr_power_) + begin + begin + tf.add_input(desc, var_) + end + begin + tf.add_input(desc, accum_) + end + begin + tf.add_input(desc, linear_) + end + begin + tf.add_input(desc, grad_) + end + begin + tf.add_input(desc, indices_) + end + begin + tf.add_input(desc, lr_) + end + begin + tf.add_input(desc, l1_) + end + begin + tf.add_input(desc, l2_) + end + begin + tf.add_input(desc, lr_power_) + end + end + begin + begin + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + end + begin + desc["T"] = tf.data_type(var_) + end + begin + desc["T"] = tf.data_type(accum_) + end + begin + desc["T"] = tf.data_type(linear_) + end + begin + desc["T"] = tf.data_type(grad_) + end + begin + desc["Tindices"] = tf.data_type(indices_) + end + begin + desc["T"] = tf.data_type(lr_) + end + begin + desc["T"] = tf.data_type(l1_) + end + begin + desc["T"] = tf.data_type(l2_) + end + begin + desc["T"] = tf.data_type(lr_power_) + end + res = tf.execute(desc) + node = tf.TapeNode(sparse_apply_ftrl, [var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, lr_power_], name=nothing, use_locking=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_apply_ftrl(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, lr_power_; name=nothing, use_locking=nothing) + if tf.in_eager_mode() + sparse_apply_ftrl_eager(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, lr_power_; name=name, use_locking=use_locking) + else + sparse_apply_ftrl_graph(var_, accum_, linear_, grad_, indices_, lr_, l1_, l2_, lr_power_; name=name, use_locking=use_locking) + end end - end + end end @@ -54751,53 +100034,101 @@ end """ begin - function batch_dataset_v2_graph(input_dataset_, batch_size_, drop_remainder_; name=nothing, output_types=nothing, output_shapes=nothing) - local desc - tf.with_op_name(name, "BatchDatasetV2") do - desc = tf.NodeDescription("BatchDatasetV2") - input_dataset_ = convert(Tensor{Any}, input_dataset_) - batch_size_ = convert(Tensor{Int64}, batch_size_) - drop_remainder_ = convert(Tensor{Bool}, drop_remainder_) - tf.add_input(desc, input_dataset_) - tf.add_input(desc, batch_size_) - tf.add_input(desc, drop_remainder_) - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) + begin + function batch_dataset_v2_graph(input_dataset_, batch_size_, drop_remainder_; name=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "BatchDatasetV2") do + desc = tf.NodeDescription("BatchDatasetV2") + begin + begin + input_dataset_ = convert(Tensor{Any}, input_dataset_) + begin + end + end + begin + batch_size_ = convert(Tensor{Int64}, batch_size_) + begin + end + end + begin + drop_remainder_ = convert(Tensor{Bool}, drop_remainder_) + begin + end + end + end + begin + begin + tf.add_input(desc, input_dataset_) + end + begin + tf.add_input(desc, batch_size_) + end + begin + tf.add_input(desc, drop_remainder_) + end + end + begin + begin + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + end + begin + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function batch_dataset_v2_eager(input_dataset_, batch_size_, drop_remainder_; name=nothing, output_types=nothing, output_shapes=nothing) + desc = tf.EagerOp("BatchDatasetV2") + input_dataset_ = convert(tf.EagerTensor, input_dataset_) + batch_size_ = convert(tf.EagerTensor, batch_size_) + drop_remainder_ = convert(tf.EagerTensor, drop_remainder_) + begin + begin + tf.add_input(desc, input_dataset_) + end + begin + tf.add_input(desc, batch_size_) + end + begin + tf.add_input(desc, drop_remainder_) + end + end + begin + begin + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + end + begin + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(batch_dataset_v2, [input_dataset_, batch_size_, drop_remainder_], name=nothing, output_types=nothing, output_shapes=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function batch_dataset_v2(input_dataset_, batch_size_, drop_remainder_; name=nothing, output_types=nothing, output_shapes=nothing) + if tf.in_eager_mode() + batch_dataset_v2_eager(input_dataset_, batch_size_, drop_remainder_; name=name, output_types=output_types, output_shapes=output_shapes) + else + batch_dataset_v2_graph(input_dataset_, batch_size_, drop_remainder_; name=name, output_types=output_types, output_shapes=output_shapes) + end end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - end - tf.Tensor(tf.Operation(desc)) end - function batch_dataset_v2_eager(input_dataset_, batch_size_, drop_remainder_; name=nothing, output_types=nothing, output_shapes=nothing) - desc = tf.EagerOp("BatchDatasetV2") - input_dataset_ = convert(tf.EagerTensor, input_dataset_) - batch_size_ = convert(tf.EagerTensor, batch_size_) - drop_remainder_ = convert(tf.EagerTensor, drop_remainder_) - tf.add_input(desc, input_dataset_) - tf.add_input(desc, batch_size_) - tf.add_input(desc, drop_remainder_) - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - res = tf.execute(desc) - node = tf.TapeNode(batch_dataset_v2, [input_dataset_, batch_size_, drop_remainder_], name=nothing, output_types=nothing, output_shapes=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function batch_dataset_v2(input_dataset_, batch_size_, drop_remainder_; name=nothing, output_types=nothing, output_shapes=nothing) - if tf.in_eager_mode() - batch_dataset_v2_eager(input_dataset_, batch_size_, drop_remainder_; name=name, output_types=output_types, output_shapes=output_shapes) - else - batch_dataset_v2_graph(input_dataset_, batch_size_, drop_remainder_; name=name, output_types=output_types, output_shapes=output_shapes) - end - end end @@ -54807,61 +100138,133 @@ end """ begin - function sparse_sparse_minimum_graph(a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_; name=nothing) - local desc - tf.with_op_name(name, "SparseSparseMinimum") do - desc = tf.NodeDescription("SparseSparseMinimum") - a_indices_ = convert(Tensor{Int64}, a_indices_) - a_values_ = convert(Tensor{Any}, a_values_) - a_shape_ = convert(Tensor{Int64}, a_shape_) - b_indices_ = convert(Tensor{Int64}, b_indices_) - b_values_ = convert(Tensor{Any}, b_values_) - b_shape_ = convert(Tensor{Int64}, b_shape_) - (a_values_, b_values_) = tf.tf_promote(a_values_, b_values_) - tf.add_input(desc, a_indices_) - tf.add_input(desc, a_values_) - tf.add_input(desc, a_shape_) - tf.add_input(desc, b_indices_) - tf.add_input(desc, b_values_) - tf.add_input(desc, b_shape_) - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:2 - push!(out, tf.Tensor(op, out_idx)) - end - out - end - function sparse_sparse_minimum_eager(a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_; name=nothing) - desc = tf.EagerOp("SparseSparseMinimum") - a_indices_ = convert(tf.EagerTensor, a_indices_) - a_values_ = convert(tf.EagerTensor, a_values_) - a_shape_ = convert(tf.EagerTensor, a_shape_) - b_indices_ = convert(tf.EagerTensor, b_indices_) - b_values_ = convert(tf.EagerTensor, b_values_) - b_shape_ = convert(tf.EagerTensor, b_shape_) - tf.add_input(desc, a_indices_) - tf.add_input(desc, a_values_) - tf.add_input(desc, a_shape_) - tf.add_input(desc, b_indices_) - tf.add_input(desc, b_values_) - tf.add_input(desc, b_shape_) - desc["T"] = tf.data_type(a_values_) - desc["T"] = tf.data_type(b_values_) - res = tf.execute(desc) - node = tf.TapeNode(sparse_sparse_minimum, [a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_sparse_minimum(a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_; name=nothing) - if tf.in_eager_mode() - sparse_sparse_minimum_eager(a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_; name=name) - else - sparse_sparse_minimum_graph(a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_; name=name) + begin + function sparse_sparse_minimum_graph(a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_; name=nothing) + local desc + tf.with_op_name(name, "SparseSparseMinimum") do + desc = tf.NodeDescription("SparseSparseMinimum") + begin + begin + a_indices_ = convert(Tensor{Int64}, a_indices_) + begin + end + end + begin + a_values_ = convert(Tensor{Any}, a_values_) + begin + end + end + begin + a_shape_ = convert(Tensor{Int64}, a_shape_) + begin + end + end + begin + b_indices_ = convert(Tensor{Int64}, b_indices_) + begin + end + end + begin + b_values_ = convert(Tensor{Any}, b_values_) + begin + end + end + begin + b_shape_ = convert(Tensor{Int64}, b_shape_) + begin + end + end + begin + (a_values_, b_values_) = tf.tf_promote(a_values_, b_values_) + end + end + begin + begin + tf.add_input(desc, a_indices_) + end + begin + tf.add_input(desc, a_values_) + end + begin + tf.add_input(desc, a_shape_) + end + begin + tf.add_input(desc, b_indices_) + end + begin + tf.add_input(desc, b_values_) + end + begin + tf.add_input(desc, b_shape_) + end + end + begin + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + end + end + begin + function sparse_sparse_minimum_eager(a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_; name=nothing) + desc = tf.EagerOp("SparseSparseMinimum") + a_indices_ = convert(tf.EagerTensor, a_indices_) + a_values_ = convert(tf.EagerTensor, a_values_) + a_shape_ = convert(tf.EagerTensor, a_shape_) + b_indices_ = convert(tf.EagerTensor, b_indices_) + b_values_ = convert(tf.EagerTensor, b_values_) + b_shape_ = convert(tf.EagerTensor, b_shape_) + begin + begin + tf.add_input(desc, a_indices_) + end + begin + tf.add_input(desc, a_values_) + end + begin + tf.add_input(desc, a_shape_) + end + begin + tf.add_input(desc, b_indices_) + end + begin + tf.add_input(desc, b_values_) + end + begin + tf.add_input(desc, b_shape_) + end + end + begin + end + begin + desc["T"] = tf.data_type(a_values_) + end + begin + desc["T"] = tf.data_type(b_values_) + end + res = tf.execute(desc) + node = tf.TapeNode(sparse_sparse_minimum, [a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_sparse_minimum(a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_; name=nothing) + if tf.in_eager_mode() + sparse_sparse_minimum_eager(a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_; name=name) + else + sparse_sparse_minimum_graph(a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_; name=name) + end end - end + end end @@ -54871,42 +100274,82 @@ end """ begin - function reverse_v2_graph(tensor_, axis_; name=nothing) - local desc - tf.with_op_name(name, "ReverseV2") do - desc = tf.NodeDescription("ReverseV2") - tensor_ = convert(Tensor{Any}, tensor_) - axis_ = convert(Tensor{Int32}, axis_) - axis_ = axis_ - convert(tf.Tensor{eltype(axis_)}, 1) - (tensor_,) = tf.tf_promote(tensor_) - (axis_,) = tf.tf_promote(axis_) - tf.add_input(desc, tensor_) - tf.add_input(desc, axis_) - end - tf.Tensor(tf.Operation(desc)) - end - function reverse_v2_eager(tensor_, axis_; name=nothing) - desc = tf.EagerOp("ReverseV2") - tensor_ = convert(tf.EagerTensor, tensor_) - axis_ = convert(tf.EagerTensor, axis_) - tf.add_input(desc, tensor_) - tf.add_input(desc, axis_) - desc["T"] = tf.data_type(tensor_) - desc["Tidx"] = tf.data_type(axis_) - res = tf.execute(desc) - node = tf.TapeNode(reverse_v2, [tensor_, axis_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function reverse_v2(tensor_, axis_; name=nothing) - if tf.in_eager_mode() - reverse_v2_eager(tensor_, axis_; name=name) - else - reverse_v2_graph(tensor_, axis_; name=name) + begin + function reverse_v2_graph(tensor_, axis_; name=nothing) + local desc + tf.with_op_name(name, "ReverseV2") do + desc = tf.NodeDescription("ReverseV2") + begin + begin + tensor_ = convert(Tensor{Any}, tensor_) + begin + end + end + begin + axis_ = convert(Tensor{Int32}, axis_) + begin + axis_ = axis_ - convert(tf.Tensor{eltype(axis_)}, 1) + end + end + begin + (tensor_,) = tf.tf_promote(tensor_) + end + begin + (axis_,) = tf.tf_promote(axis_) + end + end + begin + begin + tf.add_input(desc, tensor_) + end + begin + tf.add_input(desc, axis_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function reverse_v2_eager(tensor_, axis_; name=nothing) + desc = tf.EagerOp("ReverseV2") + tensor_ = convert(tf.EagerTensor, tensor_) + axis_ = convert(tf.EagerTensor, axis_) + begin + begin + tf.add_input(desc, tensor_) + end + begin + tf.add_input(desc, axis_) + end + end + begin + end + begin + desc["T"] = tf.data_type(tensor_) + end + begin + desc["Tidx"] = tf.data_type(axis_) + end + res = tf.execute(desc) + node = tf.TapeNode(reverse_v2, [tensor_, axis_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function reverse_v2(tensor_, axis_; name=nothing) + if tf.in_eager_mode() + reverse_v2_eager(tensor_, axis_; name=name) + else + reverse_v2_graph(tensor_, axis_; name=name) + end end - end + end end @@ -54916,120 +100359,224 @@ end """ begin - function strided_slice_graph(input_, begin_, end_, strides_; name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing) - local desc - tf.with_op_name(name, "StridedSlice") do - desc = tf.NodeDescription("StridedSlice") - input_ = convert(Tensor{Any}, input_) - begin_ = convert(Tensor{Any}, begin_) - begin_ = begin_ - convert(tf.Tensor{eltype(begin_)}, 1) - end_ = convert(Tensor{Any}, end_) - end_ = end_ - convert(tf.Tensor{eltype(end_)}, 1) - strides_ = convert(Tensor{Any}, strides_) - strides_ = strides_ - convert(tf.Tensor{eltype(strides_)}, 1) - (input_,) = tf.tf_promote(input_) - (begin_, end_, strides_) = tf.tf_promote(begin_, end_, strides_) - tf.add_input(desc, input_) - tf.add_input(desc, begin_) - tf.add_input(desc, end_) - tf.add_input(desc, strides_) - if Index !== nothing - desc["Index"] = Base.identity(Index) - end - if begin_mask !== nothing - begin_mask = Base.Int(begin_mask) - 1 + begin + function strided_slice_graph(input_, begin_, end_, strides_; name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing) + local desc + tf.with_op_name(name, "StridedSlice") do + desc = tf.NodeDescription("StridedSlice") + begin + begin + input_ = convert(Tensor{Any}, input_) + begin + end + end + begin + begin_ = convert(Tensor{Any}, begin_) + begin + begin_ = begin_ - convert(tf.Tensor{eltype(begin_)}, 1) + end + end + begin + end_ = convert(Tensor{Any}, end_) + begin + end_ = end_ - convert(tf.Tensor{eltype(end_)}, 1) + end + end + begin + strides_ = convert(Tensor{Any}, strides_) + begin + strides_ = strides_ - convert(tf.Tensor{eltype(strides_)}, 1) + end + end + begin + (input_,) = tf.tf_promote(input_) + end + begin + (begin_, end_, strides_) = tf.tf_promote(begin_, end_, strides_) + end + end + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, begin_) + end + begin + tf.add_input(desc, end_) + end + begin + tf.add_input(desc, strides_) + end + end + begin + begin + if Index !== nothing + desc["Index"] = Base.identity(Index) + end + end + begin + if begin_mask !== nothing + begin_mask = Base.Int(begin_mask) - 1 + end + end + begin + if begin_mask !== nothing + desc["begin_mask"] = Base.Int(begin_mask) + end + end + begin + if end_mask !== nothing + end_mask = Base.Int(end_mask) - 1 + end + end + begin + if end_mask !== nothing + desc["end_mask"] = Base.Int(end_mask) + end + end + begin + if ellipsis_mask !== nothing + ellipsis_mask = Base.Int(ellipsis_mask) - 1 + end + end + begin + if ellipsis_mask !== nothing + desc["ellipsis_mask"] = Base.Int(ellipsis_mask) + end + end + begin + if new_axis_mask !== nothing + new_axis_mask = Base.Int(new_axis_mask) - 1 + end + end + begin + if new_axis_mask !== nothing + desc["new_axis_mask"] = Base.Int(new_axis_mask) + end + end + begin + if shrink_axis_mask !== nothing + shrink_axis_mask = Base.Int(shrink_axis_mask) - 1 + end + end + begin + if shrink_axis_mask !== nothing + desc["shrink_axis_mask"] = Base.Int(shrink_axis_mask) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function strided_slice_eager(input_, begin_, end_, strides_; name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing) + desc = tf.EagerOp("StridedSlice") + input_ = convert(tf.EagerTensor, input_) + begin_ = convert(tf.EagerTensor, begin_) + end_ = convert(tf.EagerTensor, end_) + strides_ = convert(tf.EagerTensor, strides_) + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, begin_) + end + begin + tf.add_input(desc, end_) + end + begin + tf.add_input(desc, strides_) + end + end + begin + begin + if Index !== nothing + desc["Index"] = Base.identity(Index) + end + end + begin + if begin_mask !== nothing + begin_mask = Base.Int(begin_mask) - 1 + end + end + begin + if begin_mask !== nothing + desc["begin_mask"] = Base.Int(begin_mask) + end + end + begin + if end_mask !== nothing + end_mask = Base.Int(end_mask) - 1 + end + end + begin + if end_mask !== nothing + desc["end_mask"] = Base.Int(end_mask) + end + end + begin + if ellipsis_mask !== nothing + ellipsis_mask = Base.Int(ellipsis_mask) - 1 + end + end + begin + if ellipsis_mask !== nothing + desc["ellipsis_mask"] = Base.Int(ellipsis_mask) + end + end + begin + if new_axis_mask !== nothing + new_axis_mask = Base.Int(new_axis_mask) - 1 + end + end + begin + if new_axis_mask !== nothing + desc["new_axis_mask"] = Base.Int(new_axis_mask) + end + end + begin + if shrink_axis_mask !== nothing + shrink_axis_mask = Base.Int(shrink_axis_mask) - 1 + end + end + begin + if shrink_axis_mask !== nothing + desc["shrink_axis_mask"] = Base.Int(shrink_axis_mask) + end + end + end + begin + desc["T"] = tf.data_type(input_) + end + begin + desc["Index"] = tf.data_type(begin_) + end + begin + desc["Index"] = tf.data_type(end_) + end + begin + desc["Index"] = tf.data_type(strides_) + end + res = tf.execute(desc) + node = tf.TapeNode(strided_slice, [input_, begin_, end_, strides_], name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function strided_slice(input_, begin_, end_, strides_; name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing) + if tf.in_eager_mode() + strided_slice_eager(input_, begin_, end_, strides_; name=name, Index=Index, begin_mask=begin_mask, end_mask=end_mask, ellipsis_mask=ellipsis_mask, new_axis_mask=new_axis_mask, shrink_axis_mask=shrink_axis_mask) + else + strided_slice_graph(input_, begin_, end_, strides_; name=name, Index=Index, begin_mask=begin_mask, end_mask=end_mask, ellipsis_mask=ellipsis_mask, new_axis_mask=new_axis_mask, shrink_axis_mask=shrink_axis_mask) + end end - if begin_mask !== nothing - desc["begin_mask"] = Base.Int(begin_mask) - end - if end_mask !== nothing - end_mask = Base.Int(end_mask) - 1 - end - if end_mask !== nothing - desc["end_mask"] = Base.Int(end_mask) - end - if ellipsis_mask !== nothing - ellipsis_mask = Base.Int(ellipsis_mask) - 1 - end - if ellipsis_mask !== nothing - desc["ellipsis_mask"] = Base.Int(ellipsis_mask) - end - if new_axis_mask !== nothing - new_axis_mask = Base.Int(new_axis_mask) - 1 - end - if new_axis_mask !== nothing - desc["new_axis_mask"] = Base.Int(new_axis_mask) - end - if shrink_axis_mask !== nothing - shrink_axis_mask = Base.Int(shrink_axis_mask) - 1 - end - if shrink_axis_mask !== nothing - desc["shrink_axis_mask"] = Base.Int(shrink_axis_mask) - end - end - tf.Tensor(tf.Operation(desc)) end - function strided_slice_eager(input_, begin_, end_, strides_; name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing) - desc = tf.EagerOp("StridedSlice") - input_ = convert(tf.EagerTensor, input_) - begin_ = convert(tf.EagerTensor, begin_) - end_ = convert(tf.EagerTensor, end_) - strides_ = convert(tf.EagerTensor, strides_) - tf.add_input(desc, input_) - tf.add_input(desc, begin_) - tf.add_input(desc, end_) - tf.add_input(desc, strides_) - if Index !== nothing - desc["Index"] = Base.identity(Index) - end - if begin_mask !== nothing - begin_mask = Base.Int(begin_mask) - 1 - end - if begin_mask !== nothing - desc["begin_mask"] = Base.Int(begin_mask) - end - if end_mask !== nothing - end_mask = Base.Int(end_mask) - 1 - end - if end_mask !== nothing - desc["end_mask"] = Base.Int(end_mask) - end - if ellipsis_mask !== nothing - ellipsis_mask = Base.Int(ellipsis_mask) - 1 - end - if ellipsis_mask !== nothing - desc["ellipsis_mask"] = Base.Int(ellipsis_mask) - end - if new_axis_mask !== nothing - new_axis_mask = Base.Int(new_axis_mask) - 1 - end - if new_axis_mask !== nothing - desc["new_axis_mask"] = Base.Int(new_axis_mask) - end - if shrink_axis_mask !== nothing - shrink_axis_mask = Base.Int(shrink_axis_mask) - 1 - end - if shrink_axis_mask !== nothing - desc["shrink_axis_mask"] = Base.Int(shrink_axis_mask) - end - desc["T"] = tf.data_type(input_) - desc["Index"] = tf.data_type(begin_) - desc["Index"] = tf.data_type(end_) - desc["Index"] = tf.data_type(strides_) - res = tf.execute(desc) - node = tf.TapeNode(strided_slice, [input_, begin_, end_, strides_], name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function strided_slice(input_, begin_, end_, strides_; name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing) - if tf.in_eager_mode() - strided_slice_eager(input_, begin_, end_, strides_; name=name, Index=Index, begin_mask=begin_mask, end_mask=end_mask, ellipsis_mask=ellipsis_mask, new_axis_mask=new_axis_mask, shrink_axis_mask=shrink_axis_mask) - else - strided_slice_graph(input_, begin_, end_, strides_; name=name, Index=Index, begin_mask=begin_mask, end_mask=end_mask, ellipsis_mask=ellipsis_mask, new_axis_mask=new_axis_mask, shrink_axis_mask=shrink_axis_mask) - end - end end @@ -55039,33 +100586,57 @@ end """ begin - function matching_files_graph(pattern_; name=nothing) - local desc - tf.with_op_name(name, "MatchingFiles") do - desc = tf.NodeDescription("MatchingFiles") - pattern_ = convert(Tensor{String}, pattern_) - tf.add_input(desc, pattern_) + begin + function matching_files_graph(pattern_; name=nothing) + local desc + tf.with_op_name(name, "MatchingFiles") do + desc = tf.NodeDescription("MatchingFiles") + begin + begin + pattern_ = convert(Tensor{String}, pattern_) + begin + end + end + end + begin + begin + tf.add_input(desc, pattern_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function matching_files_eager(pattern_; name=nothing) - desc = tf.EagerOp("MatchingFiles") - pattern_ = convert(tf.EagerTensor, pattern_) - tf.add_input(desc, pattern_) - res = tf.execute(desc) - node = tf.TapeNode(matching_files, [pattern_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function matching_files_eager(pattern_; name=nothing) + desc = tf.EagerOp("MatchingFiles") + pattern_ = convert(tf.EagerTensor, pattern_) + begin + begin + tf.add_input(desc, pattern_) + end + end + begin + end + res = tf.execute(desc) + node = tf.TapeNode(matching_files, [pattern_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function matching_files(pattern_; name=nothing) - if tf.in_eager_mode() - matching_files_eager(pattern_; name=name) - else - matching_files_graph(pattern_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function matching_files(pattern_; name=nothing) + if tf.in_eager_mode() + matching_files_eager(pattern_; name=name) + else + matching_files_graph(pattern_; name=name) + end end - end + end end @@ -55075,39 +100646,67 @@ end """ begin - function encode_base64_graph(input_; name=nothing, pad=nothing) - local desc - tf.with_op_name(name, "EncodeBase64") do - desc = tf.NodeDescription("EncodeBase64") - input_ = convert(Tensor{String}, input_) - tf.add_input(desc, input_) - if pad !== nothing - desc["pad"] = Base.Bool(pad) + begin + function encode_base64_graph(input_; name=nothing, pad=nothing) + local desc + tf.with_op_name(name, "EncodeBase64") do + desc = tf.NodeDescription("EncodeBase64") + begin + begin + input_ = convert(Tensor{String}, input_) + begin + end + end + end + begin + begin + tf.add_input(desc, input_) + end + end + begin + begin + if pad !== nothing + desc["pad"] = Base.Bool(pad) + end + end + end end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function encode_base64_eager(input_; name=nothing, pad=nothing) - desc = tf.EagerOp("EncodeBase64") - input_ = convert(tf.EagerTensor, input_) - tf.add_input(desc, input_) - if pad !== nothing - desc["pad"] = Base.Bool(pad) - end - res = tf.execute(desc) - node = tf.TapeNode(encode_base64, [input_], name=nothing, pad=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function encode_base64_eager(input_; name=nothing, pad=nothing) + desc = tf.EagerOp("EncodeBase64") + input_ = convert(tf.EagerTensor, input_) + begin + begin + tf.add_input(desc, input_) + end + end + begin + begin + if pad !== nothing + desc["pad"] = Base.Bool(pad) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(encode_base64, [input_], name=nothing, pad=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function encode_base64(input_; name=nothing, pad=nothing) - if tf.in_eager_mode() - encode_base64_eager(input_; name=name, pad=pad) - else - encode_base64_graph(input_; name=name, pad=pad) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function encode_base64(input_; name=nothing, pad=nothing) + if tf.in_eager_mode() + encode_base64_eager(input_; name=name, pad=pad) + else + encode_base64_graph(input_; name=name, pad=pad) + end end - end + end end @@ -55117,45 +100716,77 @@ end """ begin - function iterator_get_next_as_optional_graph(iterator_; name=nothing, output_types=nothing, output_shapes=nothing) - local desc - tf.with_op_name(name, "IteratorGetNextAsOptional") do - desc = tf.NodeDescription("IteratorGetNextAsOptional") - iterator_ = convert(Tensor{Any}, iterator_) - tf.add_input(desc, iterator_) - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) + begin + function iterator_get_next_as_optional_graph(iterator_; name=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "IteratorGetNextAsOptional") do + desc = tf.NodeDescription("IteratorGetNextAsOptional") + begin + begin + iterator_ = convert(Tensor{Any}, iterator_) + begin + end + end + end + begin + begin + tf.add_input(desc, iterator_) + end + end + begin + begin + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + end + begin + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function iterator_get_next_as_optional_eager(iterator_; name=nothing, output_types=nothing, output_shapes=nothing) + desc = tf.EagerOp("IteratorGetNextAsOptional") + iterator_ = convert(tf.EagerTensor, iterator_) + begin + begin + tf.add_input(desc, iterator_) + end + end + begin + begin + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + end + begin + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(iterator_get_next_as_optional, [iterator_], name=nothing, output_types=nothing, output_shapes=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function iterator_get_next_as_optional(iterator_; name=nothing, output_types=nothing, output_shapes=nothing) + if tf.in_eager_mode() + iterator_get_next_as_optional_eager(iterator_; name=name, output_types=output_types, output_shapes=output_shapes) + else + iterator_get_next_as_optional_graph(iterator_; name=name, output_types=output_types, output_shapes=output_shapes) + end end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - end - tf.Tensor(tf.Operation(desc)) end - function iterator_get_next_as_optional_eager(iterator_; name=nothing, output_types=nothing, output_shapes=nothing) - desc = tf.EagerOp("IteratorGetNextAsOptional") - iterator_ = convert(tf.EagerTensor, iterator_) - tf.add_input(desc, iterator_) - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - res = tf.execute(desc) - node = tf.TapeNode(iterator_get_next_as_optional, [iterator_], name=nothing, output_types=nothing, output_shapes=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function iterator_get_next_as_optional(iterator_; name=nothing, output_types=nothing, output_shapes=nothing) - if tf.in_eager_mode() - iterator_get_next_as_optional_eager(iterator_; name=name, output_types=output_types, output_shapes=output_shapes) - else - iterator_get_next_as_optional_graph(iterator_; name=name, output_types=output_types, output_shapes=output_shapes) - end - end end @@ -55165,59 +100796,95 @@ end """ begin - function padding_fifo_queue_graph(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) - local desc - tf.with_op_name(name, "PaddingFIFOQueue") do - desc = tf.NodeDescription("PaddingFIFOQueue") - if component_types !== nothing - desc["component_types"] = map(Base.identity, component_types) - end - if shapes !== nothing - desc["shapes"] = map(Base.identity, shapes) - end - if capacity !== nothing - desc["capacity"] = Base.Int(capacity) - end - if container !== nothing - desc["container"] = Base.String(container) + begin + function padding_fifo_queue_graph(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "PaddingFIFOQueue") do + desc = tf.NodeDescription("PaddingFIFOQueue") + begin + end + begin + end + begin + begin + if component_types !== nothing + desc["component_types"] = map(Base.identity, component_types) + end + end + begin + if shapes !== nothing + desc["shapes"] = map(Base.identity, shapes) + end + end + begin + if capacity !== nothing + desc["capacity"] = Base.Int(capacity) + end + end + begin + if container !== nothing + desc["container"] = Base.String(container) + end + end + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function padding_fifo_queue_eager(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) + desc = tf.EagerOp("PaddingFIFOQueue") + begin + end + begin + begin + if component_types !== nothing + desc["component_types"] = map(Base.identity, component_types) + end + end + begin + if shapes !== nothing + desc["shapes"] = map(Base.identity, shapes) + end + end + begin + if capacity !== nothing + desc["capacity"] = Base.Int(capacity) + end + end + begin + if container !== nothing + desc["container"] = Base.String(container) + end + end + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(padding_fifo_queue, [], name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function padding_fifo_queue(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) + if tf.in_eager_mode() + padding_fifo_queue_eager(; name=name, component_types=component_types, shapes=shapes, capacity=capacity, container=container, shared_name=shared_name) + else + padding_fifo_queue_graph(; name=name, component_types=component_types, shapes=shapes, capacity=capacity, container=container, shared_name=shared_name) + end end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - end - tf.Tensor(tf.Operation(desc)) end - function padding_fifo_queue_eager(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) - desc = tf.EagerOp("PaddingFIFOQueue") - if component_types !== nothing - desc["component_types"] = map(Base.identity, component_types) - end - if shapes !== nothing - desc["shapes"] = map(Base.identity, shapes) - end - if capacity !== nothing - desc["capacity"] = Base.Int(capacity) - end - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - res = tf.execute(desc) - node = tf.TapeNode(padding_fifo_queue, [], name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function padding_fifo_queue(; name=nothing, component_types=nothing, shapes=nothing, capacity=nothing, container=nothing, shared_name=nothing) - if tf.in_eager_mode() - padding_fifo_queue_eager(; name=name, component_types=component_types, shapes=shapes, capacity=capacity, container=container, shared_name=shared_name) - else - padding_fifo_queue_graph(; name=name, component_types=component_types, shapes=shapes, capacity=capacity, container=container, shared_name=shared_name) - end - end end @@ -55227,33 +100894,57 @@ end """ begin - function iterator_to_string_handle_graph(resource_handle_; name=nothing) - local desc - tf.with_op_name(name, "IteratorToStringHandle") do - desc = tf.NodeDescription("IteratorToStringHandle") - resource_handle_ = convert(Tensor{Any}, resource_handle_) - tf.add_input(desc, resource_handle_) + begin + function iterator_to_string_handle_graph(resource_handle_; name=nothing) + local desc + tf.with_op_name(name, "IteratorToStringHandle") do + desc = tf.NodeDescription("IteratorToStringHandle") + begin + begin + resource_handle_ = convert(Tensor{Any}, resource_handle_) + begin + end + end + end + begin + begin + tf.add_input(desc, resource_handle_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function iterator_to_string_handle_eager(resource_handle_; name=nothing) - desc = tf.EagerOp("IteratorToStringHandle") - resource_handle_ = convert(tf.EagerTensor, resource_handle_) - tf.add_input(desc, resource_handle_) - res = tf.execute(desc) - node = tf.TapeNode(iterator_to_string_handle, [resource_handle_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function iterator_to_string_handle_eager(resource_handle_; name=nothing) + desc = tf.EagerOp("IteratorToStringHandle") + resource_handle_ = convert(tf.EagerTensor, resource_handle_) + begin + begin + tf.add_input(desc, resource_handle_) + end + end + begin + end + res = tf.execute(desc) + node = tf.TapeNode(iterator_to_string_handle, [resource_handle_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function iterator_to_string_handle(resource_handle_; name=nothing) - if tf.in_eager_mode() - iterator_to_string_handle_eager(resource_handle_; name=name) - else - iterator_to_string_handle_graph(resource_handle_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function iterator_to_string_handle(resource_handle_; name=nothing) + if tf.in_eager_mode() + iterator_to_string_handle_eager(resource_handle_; name=name) + else + iterator_to_string_handle_graph(resource_handle_; name=name) + end end - end + end end @@ -55263,64 +100954,126 @@ end """ begin - function max_pool_grad_grad_with_argmax_graph(input_, grad_, argmax_; name=nothing, ksize=nothing, strides=nothing, padding=nothing) - local desc - tf.with_op_name(name, "MaxPoolGradGradWithArgmax") do - desc = tf.NodeDescription("MaxPoolGradGradWithArgmax") - input_ = convert(Tensor{Any}, input_) - grad_ = convert(Tensor{Any}, grad_) - argmax_ = convert(Tensor{Any}, argmax_) - (argmax_,) = tf.tf_promote(argmax_) - (input_, grad_) = tf.tf_promote(input_, grad_) - tf.add_input(desc, input_) - tf.add_input(desc, grad_) - tf.add_input(desc, argmax_) - if ksize !== nothing - desc["ksize"] = map(Base.identity, ksize) - end - if strides !== nothing - desc["strides"] = map(Base.identity, strides) + begin + function max_pool_grad_grad_with_argmax_graph(input_, grad_, argmax_; name=nothing, ksize=nothing, strides=nothing, padding=nothing) + local desc + tf.with_op_name(name, "MaxPoolGradGradWithArgmax") do + desc = tf.NodeDescription("MaxPoolGradGradWithArgmax") + begin + begin + input_ = convert(Tensor{Any}, input_) + begin + end + end + begin + grad_ = convert(Tensor{Any}, grad_) + begin + end + end + begin + argmax_ = convert(Tensor{Any}, argmax_) + begin + end + end + begin + (argmax_,) = tf.tf_promote(argmax_) + end + begin + (input_, grad_) = tf.tf_promote(input_, grad_) + end + end + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, grad_) + end + begin + tf.add_input(desc, argmax_) + end + end + begin + begin + if ksize !== nothing + desc["ksize"] = map(Base.identity, ksize) + end + end + begin + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + end + begin + if padding !== nothing + desc["padding"] = Base.String(padding) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function max_pool_grad_grad_with_argmax_eager(input_, grad_, argmax_; name=nothing, ksize=nothing, strides=nothing, padding=nothing) + desc = tf.EagerOp("MaxPoolGradGradWithArgmax") + input_ = convert(tf.EagerTensor, input_) + grad_ = convert(tf.EagerTensor, grad_) + argmax_ = convert(tf.EagerTensor, argmax_) + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, grad_) + end + begin + tf.add_input(desc, argmax_) + end + end + begin + begin + if ksize !== nothing + desc["ksize"] = map(Base.identity, ksize) + end + end + begin + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + end + begin + if padding !== nothing + desc["padding"] = Base.String(padding) + end + end + end + begin + desc["T"] = tf.data_type(input_) + end + begin + desc["T"] = tf.data_type(grad_) + end + begin + desc["Targmax"] = tf.data_type(argmax_) + end + res = tf.execute(desc) + node = tf.TapeNode(max_pool_grad_grad_with_argmax, [input_, grad_, argmax_], name=nothing, ksize=nothing, strides=nothing, padding=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function max_pool_grad_grad_with_argmax(input_, grad_, argmax_; name=nothing, ksize=nothing, strides=nothing, padding=nothing) + if tf.in_eager_mode() + max_pool_grad_grad_with_argmax_eager(input_, grad_, argmax_; name=name, ksize=ksize, strides=strides, padding=padding) + else + max_pool_grad_grad_with_argmax_graph(input_, grad_, argmax_; name=name, ksize=ksize, strides=strides, padding=padding) + end end - if padding !== nothing - desc["padding"] = Base.String(padding) - end - end - tf.Tensor(tf.Operation(desc)) - end - function max_pool_grad_grad_with_argmax_eager(input_, grad_, argmax_; name=nothing, ksize=nothing, strides=nothing, padding=nothing) - desc = tf.EagerOp("MaxPoolGradGradWithArgmax") - input_ = convert(tf.EagerTensor, input_) - grad_ = convert(tf.EagerTensor, grad_) - argmax_ = convert(tf.EagerTensor, argmax_) - tf.add_input(desc, input_) - tf.add_input(desc, grad_) - tf.add_input(desc, argmax_) - if ksize !== nothing - desc["ksize"] = map(Base.identity, ksize) - end - if strides !== nothing - desc["strides"] = map(Base.identity, strides) - end - if padding !== nothing - desc["padding"] = Base.String(padding) - end - desc["T"] = tf.data_type(input_) - desc["T"] = tf.data_type(grad_) - desc["Targmax"] = tf.data_type(argmax_) - res = tf.execute(desc) - node = tf.TapeNode(max_pool_grad_grad_with_argmax, [input_, grad_, argmax_], name=nothing, ksize=nothing, strides=nothing, padding=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function max_pool_grad_grad_with_argmax(input_, grad_, argmax_; name=nothing, ksize=nothing, strides=nothing, padding=nothing) - if tf.in_eager_mode() - max_pool_grad_grad_with_argmax_eager(input_, grad_, argmax_; name=name, ksize=ksize, strides=strides, padding=padding) - else - max_pool_grad_grad_with_argmax_graph(input_, grad_, argmax_; name=name, ksize=ksize, strides=strides, padding=padding) - end - end end @@ -55330,43 +101083,79 @@ end """ begin - function tensor_list_gather_graph(input_handle_, indices_; name=nothing, element_dtype=nothing) - local desc - tf.with_op_name(name, "TensorListGather") do - desc = tf.NodeDescription("TensorListGather") - input_handle_ = convert(Tensor{Any}, input_handle_) - indices_ = convert(Tensor{Int32}, indices_) - tf.add_input(desc, input_handle_) - tf.add_input(desc, indices_) - if element_dtype !== nothing - desc["element_dtype"] = Base.identity(element_dtype) + begin + function tensor_list_gather_graph(input_handle_, indices_; name=nothing, element_dtype=nothing) + local desc + tf.with_op_name(name, "TensorListGather") do + desc = tf.NodeDescription("TensorListGather") + begin + begin + input_handle_ = convert(Tensor{Any}, input_handle_) + begin + end + end + begin + indices_ = convert(Tensor{Int32}, indices_) + begin + end + end + end + begin + begin + tf.add_input(desc, input_handle_) + end + begin + tf.add_input(desc, indices_) + end + end + begin + begin + if element_dtype !== nothing + desc["element_dtype"] = Base.identity(element_dtype) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function tensor_list_gather_eager(input_handle_, indices_; name=nothing, element_dtype=nothing) + desc = tf.EagerOp("TensorListGather") + input_handle_ = convert(tf.EagerTensor, input_handle_) + indices_ = convert(tf.EagerTensor, indices_) + begin + begin + tf.add_input(desc, input_handle_) + end + begin + tf.add_input(desc, indices_) + end + end + begin + begin + if element_dtype !== nothing + desc["element_dtype"] = Base.identity(element_dtype) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(tensor_list_gather, [input_handle_, indices_], name=nothing, element_dtype=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_list_gather(input_handle_, indices_; name=nothing, element_dtype=nothing) + if tf.in_eager_mode() + tensor_list_gather_eager(input_handle_, indices_; name=name, element_dtype=element_dtype) + else + tensor_list_gather_graph(input_handle_, indices_; name=name, element_dtype=element_dtype) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function tensor_list_gather_eager(input_handle_, indices_; name=nothing, element_dtype=nothing) - desc = tf.EagerOp("TensorListGather") - input_handle_ = convert(tf.EagerTensor, input_handle_) - indices_ = convert(tf.EagerTensor, indices_) - tf.add_input(desc, input_handle_) - tf.add_input(desc, indices_) - if element_dtype !== nothing - desc["element_dtype"] = Base.identity(element_dtype) - end - res = tf.execute(desc) - node = tf.TapeNode(tensor_list_gather, [input_handle_, indices_], name=nothing, element_dtype=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_list_gather(input_handle_, indices_; name=nothing, element_dtype=nothing) - if tf.in_eager_mode() - tensor_list_gather_eager(input_handle_, indices_; name=name, element_dtype=element_dtype) - else - tensor_list_gather_graph(input_handle_, indices_; name=name, element_dtype=element_dtype) - end - end end @@ -55376,57 +101165,105 @@ end """ begin - function multinomial_graph(logits_, num_samples_; name=nothing, seed=nothing, seed2=nothing, output_dtype=nothing) - local desc - tf.with_op_name(name, "Multinomial") do - desc = tf.NodeDescription("Multinomial") - logits_ = convert(Tensor{Any}, logits_) - num_samples_ = convert(Tensor{Int32}, num_samples_) - (logits_,) = tf.tf_promote(logits_) - tf.add_input(desc, logits_) - tf.add_input(desc, num_samples_) - if seed !== nothing - desc["seed"] = Base.Int(seed) + begin + function multinomial_graph(logits_, num_samples_; name=nothing, seed=nothing, seed2=nothing, output_dtype=nothing) + local desc + tf.with_op_name(name, "Multinomial") do + desc = tf.NodeDescription("Multinomial") + begin + begin + logits_ = convert(Tensor{Any}, logits_) + begin + end + end + begin + num_samples_ = convert(Tensor{Int32}, num_samples_) + begin + end + end + begin + (logits_,) = tf.tf_promote(logits_) + end + end + begin + begin + tf.add_input(desc, logits_) + end + begin + tf.add_input(desc, num_samples_) + end + end + begin + begin + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + end + begin + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end + end + begin + if output_dtype !== nothing + desc["output_dtype"] = Base.identity(output_dtype) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function multinomial_eager(logits_, num_samples_; name=nothing, seed=nothing, seed2=nothing, output_dtype=nothing) + desc = tf.EagerOp("Multinomial") + logits_ = convert(tf.EagerTensor, logits_) + num_samples_ = convert(tf.EagerTensor, num_samples_) + begin + begin + tf.add_input(desc, logits_) + end + begin + tf.add_input(desc, num_samples_) + end + end + begin + begin + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + end + begin + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end + end + begin + if output_dtype !== nothing + desc["output_dtype"] = Base.identity(output_dtype) + end + end + end + begin + desc["T"] = tf.data_type(logits_) + end + res = tf.execute(desc) + node = tf.TapeNode(multinomial, [logits_, num_samples_], name=nothing, seed=nothing, seed2=nothing, output_dtype=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function multinomial(logits_, num_samples_; name=nothing, seed=nothing, seed2=nothing, output_dtype=nothing) + if tf.in_eager_mode() + multinomial_eager(logits_, num_samples_; name=name, seed=seed, seed2=seed2, output_dtype=output_dtype) + else + multinomial_graph(logits_, num_samples_; name=name, seed=seed, seed2=seed2, output_dtype=output_dtype) + end end - if seed2 !== nothing - desc["seed2"] = Base.Int(seed2) - end - if output_dtype !== nothing - desc["output_dtype"] = Base.identity(output_dtype) - end - end - tf.Tensor(tf.Operation(desc)) - end - function multinomial_eager(logits_, num_samples_; name=nothing, seed=nothing, seed2=nothing, output_dtype=nothing) - desc = tf.EagerOp("Multinomial") - logits_ = convert(tf.EagerTensor, logits_) - num_samples_ = convert(tf.EagerTensor, num_samples_) - tf.add_input(desc, logits_) - tf.add_input(desc, num_samples_) - if seed !== nothing - desc["seed"] = Base.Int(seed) - end - if seed2 !== nothing - desc["seed2"] = Base.Int(seed2) - end - if output_dtype !== nothing - desc["output_dtype"] = Base.identity(output_dtype) - end - desc["T"] = tf.data_type(logits_) - res = tf.execute(desc) - node = tf.TapeNode(multinomial, [logits_, num_samples_], name=nothing, seed=nothing, seed2=nothing, output_dtype=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function multinomial(logits_, num_samples_; name=nothing, seed=nothing, seed2=nothing, output_dtype=nothing) - if tf.in_eager_mode() - multinomial_eager(logits_, num_samples_; name=name, seed=seed, seed2=seed2, output_dtype=output_dtype) - else - multinomial_graph(logits_, num_samples_; name=name, seed=seed, seed2=seed2, output_dtype=output_dtype) - end - end end @@ -55436,47 +101273,91 @@ end """ begin - function tensor_array_read_graph(handle_, index_, flow_in_; name=nothing, dtype=nothing) - local desc - tf.with_op_name(name, "TensorArrayRead") do - desc = tf.NodeDescription("TensorArrayRead") - handle_ = convert(Tensor{String}, handle_) - index_ = convert(Tensor{Int32}, index_) - flow_in_ = convert(Tensor{Float32}, flow_in_) - tf.add_input(desc, handle_) - tf.add_input(desc, index_) - tf.add_input(desc, flow_in_) - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) + begin + function tensor_array_read_graph(handle_, index_, flow_in_; name=nothing, dtype=nothing) + local desc + tf.with_op_name(name, "TensorArrayRead") do + desc = tf.NodeDescription("TensorArrayRead") + begin + begin + handle_ = convert(Tensor{String}, handle_) + begin + end + end + begin + index_ = convert(Tensor{Int32}, index_) + begin + end + end + begin + flow_in_ = convert(Tensor{Float32}, flow_in_) + begin + end + end + end + begin + begin + tf.add_input(desc, handle_) + end + begin + tf.add_input(desc, index_) + end + begin + tf.add_input(desc, flow_in_) + end + end + begin + begin + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function tensor_array_read_eager(handle_, index_, flow_in_; name=nothing, dtype=nothing) + desc = tf.EagerOp("TensorArrayRead") + handle_ = convert(tf.EagerTensor, handle_) + index_ = convert(tf.EagerTensor, index_) + flow_in_ = convert(tf.EagerTensor, flow_in_) + begin + begin + tf.add_input(desc, handle_) + end + begin + tf.add_input(desc, index_) + end + begin + tf.add_input(desc, flow_in_) + end + end + begin + begin + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(tensor_array_read, [handle_, index_, flow_in_], name=nothing, dtype=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_array_read(handle_, index_, flow_in_; name=nothing, dtype=nothing) + if tf.in_eager_mode() + tensor_array_read_eager(handle_, index_, flow_in_; name=name, dtype=dtype) + else + tensor_array_read_graph(handle_, index_, flow_in_; name=name, dtype=dtype) + end end - end - tf.Tensor(tf.Operation(desc)) end - function tensor_array_read_eager(handle_, index_, flow_in_; name=nothing, dtype=nothing) - desc = tf.EagerOp("TensorArrayRead") - handle_ = convert(tf.EagerTensor, handle_) - index_ = convert(tf.EagerTensor, index_) - flow_in_ = convert(tf.EagerTensor, flow_in_) - tf.add_input(desc, handle_) - tf.add_input(desc, index_) - tf.add_input(desc, flow_in_) - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end - res = tf.execute(desc) - node = tf.TapeNode(tensor_array_read, [handle_, index_, flow_in_], name=nothing, dtype=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_array_read(handle_, index_, flow_in_; name=nothing, dtype=nothing) - if tf.in_eager_mode() - tensor_array_read_eager(handle_, index_, flow_in_; name=name, dtype=dtype) - else - tensor_array_read_graph(handle_, index_, flow_in_; name=name, dtype=dtype) - end - end end @@ -55486,49 +101367,89 @@ end """ begin - function experimental_indexed_dataset_get_graph(materialized_, index_; name=nothing, output_types=nothing, output_shapes=nothing) - local desc - tf.with_op_name(name, "ExperimentalIndexedDatasetGet") do - desc = tf.NodeDescription("ExperimentalIndexedDatasetGet") - materialized_ = convert(Tensor{Any}, materialized_) - index_ = convert(Tensor{Any}, index_) - tf.add_input(desc, materialized_) - tf.add_input(desc, index_) - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) + begin + function experimental_indexed_dataset_get_graph(materialized_, index_; name=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "ExperimentalIndexedDatasetGet") do + desc = tf.NodeDescription("ExperimentalIndexedDatasetGet") + begin + begin + materialized_ = convert(Tensor{Any}, materialized_) + begin + end + end + begin + index_ = convert(Tensor{Any}, index_) + begin + end + end + end + begin + begin + tf.add_input(desc, materialized_) + end + begin + tf.add_input(desc, index_) + end + end + begin + begin + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + end + begin + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function experimental_indexed_dataset_get_eager(materialized_, index_; name=nothing, output_types=nothing, output_shapes=nothing) + desc = tf.EagerOp("ExperimentalIndexedDatasetGet") + materialized_ = convert(tf.EagerTensor, materialized_) + index_ = convert(tf.EagerTensor, index_) + begin + begin + tf.add_input(desc, materialized_) + end + begin + tf.add_input(desc, index_) + end + end + begin + begin + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + end + begin + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(experimental_indexed_dataset_get, [materialized_, index_], name=nothing, output_types=nothing, output_shapes=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_indexed_dataset_get(materialized_, index_; name=nothing, output_types=nothing, output_shapes=nothing) + if tf.in_eager_mode() + experimental_indexed_dataset_get_eager(materialized_, index_; name=name, output_types=output_types, output_shapes=output_shapes) + else + experimental_indexed_dataset_get_graph(materialized_, index_; name=name, output_types=output_types, output_shapes=output_shapes) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function experimental_indexed_dataset_get_eager(materialized_, index_; name=nothing, output_types=nothing, output_shapes=nothing) - desc = tf.EagerOp("ExperimentalIndexedDatasetGet") - materialized_ = convert(tf.EagerTensor, materialized_) - index_ = convert(tf.EagerTensor, index_) - tf.add_input(desc, materialized_) - tf.add_input(desc, index_) - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - res = tf.execute(desc) - node = tf.TapeNode(experimental_indexed_dataset_get, [materialized_, index_], name=nothing, output_types=nothing, output_shapes=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_indexed_dataset_get(materialized_, index_; name=nothing, output_types=nothing, output_shapes=nothing) - if tf.in_eager_mode() - experimental_indexed_dataset_get_eager(materialized_, index_; name=name, output_types=output_types, output_shapes=output_shapes) - else - experimental_indexed_dataset_get_graph(materialized_, index_; name=name, output_types=output_types, output_shapes=output_shapes) - end - end end @@ -55538,45 +101459,77 @@ end """ begin - function iterator_from_string_handle_v2_graph(string_handle_; name=nothing, output_types=nothing, output_shapes=nothing) - local desc - tf.with_op_name(name, "IteratorFromStringHandleV2") do - desc = tf.NodeDescription("IteratorFromStringHandleV2") - string_handle_ = convert(Tensor{String}, string_handle_) - tf.add_input(desc, string_handle_) - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) + begin + function iterator_from_string_handle_v2_graph(string_handle_; name=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "IteratorFromStringHandleV2") do + desc = tf.NodeDescription("IteratorFromStringHandleV2") + begin + begin + string_handle_ = convert(Tensor{String}, string_handle_) + begin + end + end + end + begin + begin + tf.add_input(desc, string_handle_) + end + end + begin + begin + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + end + begin + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function iterator_from_string_handle_v2_eager(string_handle_; name=nothing, output_types=nothing, output_shapes=nothing) + desc = tf.EagerOp("IteratorFromStringHandleV2") + string_handle_ = convert(tf.EagerTensor, string_handle_) + begin + begin + tf.add_input(desc, string_handle_) + end + end + begin + begin + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + end + begin + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(iterator_from_string_handle_v2, [string_handle_], name=nothing, output_types=nothing, output_shapes=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function iterator_from_string_handle_v2(string_handle_; name=nothing, output_types=nothing, output_shapes=nothing) + if tf.in_eager_mode() + iterator_from_string_handle_v2_eager(string_handle_; name=name, output_types=output_types, output_shapes=output_shapes) + else + iterator_from_string_handle_v2_graph(string_handle_; name=name, output_types=output_types, output_shapes=output_shapes) + end end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - end - tf.Tensor(tf.Operation(desc)) - end - function iterator_from_string_handle_v2_eager(string_handle_; name=nothing, output_types=nothing, output_shapes=nothing) - desc = tf.EagerOp("IteratorFromStringHandleV2") - string_handle_ = convert(tf.EagerTensor, string_handle_) - tf.add_input(desc, string_handle_) - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - res = tf.execute(desc) - node = tf.TapeNode(iterator_from_string_handle_v2, [string_handle_], name=nothing, output_types=nothing, output_shapes=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function iterator_from_string_handle_v2(string_handle_; name=nothing, output_types=nothing, output_shapes=nothing) - if tf.in_eager_mode() - iterator_from_string_handle_v2_eager(string_handle_; name=name, output_types=output_types, output_shapes=output_shapes) - else - iterator_from_string_handle_v2_graph(string_handle_; name=name, output_types=output_types, output_shapes=output_shapes) - end - end end @@ -55586,40 +101539,78 @@ end """ begin - function bitwise_or_graph(x_, y_; name=nothing) - local desc - tf.with_op_name(name, "BitwiseOr") do - desc = tf.NodeDescription("BitwiseOr") - x_ = convert(Tensor{Any}, x_) - y_ = convert(Tensor{Any}, y_) - (x_, y_) = tf.tf_promote(x_, y_) - tf.add_input(desc, x_) - tf.add_input(desc, y_) + begin + function bitwise_or_graph(x_, y_; name=nothing) + local desc + tf.with_op_name(name, "BitwiseOr") do + desc = tf.NodeDescription("BitwiseOr") + begin + begin + x_ = convert(Tensor{Any}, x_) + begin + end + end + begin + y_ = convert(Tensor{Any}, y_) + begin + end + end + begin + (x_, y_) = tf.tf_promote(x_, y_) + end + end + begin + begin + tf.add_input(desc, x_) + end + begin + tf.add_input(desc, y_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function bitwise_or_eager(x_, y_; name=nothing) - desc = tf.EagerOp("BitwiseOr") - x_ = convert(tf.EagerTensor, x_) - y_ = convert(tf.EagerTensor, y_) - tf.add_input(desc, x_) - tf.add_input(desc, y_) - desc["T"] = tf.data_type(x_) - desc["T"] = tf.data_type(y_) - res = tf.execute(desc) - node = tf.TapeNode(bitwise_or, [x_, y_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function bitwise_or_eager(x_, y_; name=nothing) + desc = tf.EagerOp("BitwiseOr") + x_ = convert(tf.EagerTensor, x_) + y_ = convert(tf.EagerTensor, y_) + begin + begin + tf.add_input(desc, x_) + end + begin + tf.add_input(desc, y_) + end + end + begin + end + begin + desc["T"] = tf.data_type(x_) + end + begin + desc["T"] = tf.data_type(y_) + end + res = tf.execute(desc) + node = tf.TapeNode(bitwise_or, [x_, y_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function bitwise_or(x_, y_; name=nothing) - if tf.in_eager_mode() - bitwise_or_eager(x_, y_; name=name) - else - bitwise_or_graph(x_, y_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function bitwise_or(x_, y_; name=nothing) + if tf.in_eager_mode() + bitwise_or_eager(x_, y_; name=name) + else + bitwise_or_graph(x_, y_; name=name) + end end - end + end end @@ -55629,48 +101620,100 @@ end """ begin - function unsorted_segment_max_graph(data_, segment_ids_, num_segments_; name=nothing) - local desc - tf.with_op_name(name, "UnsortedSegmentMax") do - desc = tf.NodeDescription("UnsortedSegmentMax") - data_ = convert(Tensor{Any}, data_) - segment_ids_ = convert(Tensor{Any}, segment_ids_) - segment_ids_ = segment_ids_ - convert(tf.Tensor{eltype(segment_ids_)}, 1) - num_segments_ = convert(Tensor{Int32}, num_segments_) - (num_segments_,) = tf.tf_promote(num_segments_) - (data_,) = tf.tf_promote(data_) - (segment_ids_,) = tf.tf_promote(segment_ids_) - tf.add_input(desc, data_) - tf.add_input(desc, segment_ids_) - tf.add_input(desc, num_segments_) - end - tf.Tensor(tf.Operation(desc)) - end - function unsorted_segment_max_eager(data_, segment_ids_, num_segments_; name=nothing) - desc = tf.EagerOp("UnsortedSegmentMax") - data_ = convert(tf.EagerTensor, data_) - segment_ids_ = convert(tf.EagerTensor, segment_ids_) - num_segments_ = convert(tf.EagerTensor, num_segments_) - tf.add_input(desc, data_) - tf.add_input(desc, segment_ids_) - tf.add_input(desc, num_segments_) - desc["T"] = tf.data_type(data_) - desc["Tindices"] = tf.data_type(segment_ids_) - desc["Tnumsegments"] = tf.data_type(num_segments_) - res = tf.execute(desc) - node = tf.TapeNode(unsorted_segment_max, [data_, segment_ids_, num_segments_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function unsorted_segment_max(data_, segment_ids_, num_segments_; name=nothing) - if tf.in_eager_mode() - unsorted_segment_max_eager(data_, segment_ids_, num_segments_; name=name) - else - unsorted_segment_max_graph(data_, segment_ids_, num_segments_; name=name) + begin + function unsorted_segment_max_graph(data_, segment_ids_, num_segments_; name=nothing) + local desc + tf.with_op_name(name, "UnsortedSegmentMax") do + desc = tf.NodeDescription("UnsortedSegmentMax") + begin + begin + data_ = convert(Tensor{Any}, data_) + begin + end + end + begin + segment_ids_ = convert(Tensor{Any}, segment_ids_) + begin + segment_ids_ = segment_ids_ - convert(tf.Tensor{eltype(segment_ids_)}, 1) + end + end + begin + num_segments_ = convert(Tensor{Int32}, num_segments_) + begin + end + end + begin + (num_segments_,) = tf.tf_promote(num_segments_) + end + begin + (data_,) = tf.tf_promote(data_) + end + begin + (segment_ids_,) = tf.tf_promote(segment_ids_) + end + end + begin + begin + tf.add_input(desc, data_) + end + begin + tf.add_input(desc, segment_ids_) + end + begin + tf.add_input(desc, num_segments_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function unsorted_segment_max_eager(data_, segment_ids_, num_segments_; name=nothing) + desc = tf.EagerOp("UnsortedSegmentMax") + data_ = convert(tf.EagerTensor, data_) + segment_ids_ = convert(tf.EagerTensor, segment_ids_) + num_segments_ = convert(tf.EagerTensor, num_segments_) + begin + begin + tf.add_input(desc, data_) + end + begin + tf.add_input(desc, segment_ids_) + end + begin + tf.add_input(desc, num_segments_) + end + end + begin + end + begin + desc["T"] = tf.data_type(data_) + end + begin + desc["Tindices"] = tf.data_type(segment_ids_) + end + begin + desc["Tnumsegments"] = tf.data_type(num_segments_) + end + res = tf.execute(desc) + node = tf.TapeNode(unsorted_segment_max, [data_, segment_ids_, num_segments_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function unsorted_segment_max(data_, segment_ids_, num_segments_; name=nothing) + if tf.in_eager_mode() + unsorted_segment_max_eager(data_, segment_ids_, num_segments_; name=name) + else + unsorted_segment_max_graph(data_, segment_ids_, num_segments_; name=name) + end end - end + end end @@ -55680,53 +101723,109 @@ end Returns (x - y)(x - y) element-wise. """ begin - function _mkl_squared_difference_graph(x_, y_, mkl_x_, mkl_y_; name=nothing) - local desc - tf.with_op_name(name, "_MklSquaredDifference") do - desc = tf.NodeDescription("_MklSquaredDifference") - x_ = convert(Tensor{Any}, x_) - y_ = convert(Tensor{Any}, y_) - mkl_x_ = convert(Tensor{UInt8}, mkl_x_) - mkl_y_ = convert(Tensor{UInt8}, mkl_y_) - (x_, y_) = tf.tf_promote(x_, y_) - tf.add_input(desc, x_) - tf.add_input(desc, y_) - tf.add_input(desc, mkl_x_) - tf.add_input(desc, mkl_y_) - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:2 - push!(out, tf.Tensor(op, out_idx)) - end - out - end - function _mkl_squared_difference_eager(x_, y_, mkl_x_, mkl_y_; name=nothing) - desc = tf.EagerOp("_MklSquaredDifference") - x_ = convert(tf.EagerTensor, x_) - y_ = convert(tf.EagerTensor, y_) - mkl_x_ = convert(tf.EagerTensor, mkl_x_) - mkl_y_ = convert(tf.EagerTensor, mkl_y_) - tf.add_input(desc, x_) - tf.add_input(desc, y_) - tf.add_input(desc, mkl_x_) - tf.add_input(desc, mkl_y_) - desc["T"] = tf.data_type(x_) - desc["T"] = tf.data_type(y_) - res = tf.execute(desc) - node = tf.TapeNode(_mkl_squared_difference, [x_, y_, mkl_x_, mkl_y_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _mkl_squared_difference(x_, y_, mkl_x_, mkl_y_; name=nothing) - if tf.in_eager_mode() - _mkl_squared_difference_eager(x_, y_, mkl_x_, mkl_y_; name=name) - else - _mkl_squared_difference_graph(x_, y_, mkl_x_, mkl_y_; name=name) + begin + function _mkl_squared_difference_graph(x_, y_, mkl_x_, mkl_y_; name=nothing) + local desc + tf.with_op_name(name, "_MklSquaredDifference") do + desc = tf.NodeDescription("_MklSquaredDifference") + begin + begin + x_ = convert(Tensor{Any}, x_) + begin + end + end + begin + y_ = convert(Tensor{Any}, y_) + begin + end + end + begin + mkl_x_ = convert(Tensor{UInt8}, mkl_x_) + begin + end + end + begin + mkl_y_ = convert(Tensor{UInt8}, mkl_y_) + begin + end + end + begin + (x_, y_) = tf.tf_promote(x_, y_) + end + end + begin + begin + tf.add_input(desc, x_) + end + begin + tf.add_input(desc, y_) + end + begin + tf.add_input(desc, mkl_x_) + end + begin + tf.add_input(desc, mkl_y_) + end + end + begin + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + end + end + begin + function _mkl_squared_difference_eager(x_, y_, mkl_x_, mkl_y_; name=nothing) + desc = tf.EagerOp("_MklSquaredDifference") + x_ = convert(tf.EagerTensor, x_) + y_ = convert(tf.EagerTensor, y_) + mkl_x_ = convert(tf.EagerTensor, mkl_x_) + mkl_y_ = convert(tf.EagerTensor, mkl_y_) + begin + begin + tf.add_input(desc, x_) + end + begin + tf.add_input(desc, y_) + end + begin + tf.add_input(desc, mkl_x_) + end + begin + tf.add_input(desc, mkl_y_) + end + end + begin + end + begin + desc["T"] = tf.data_type(x_) + end + begin + desc["T"] = tf.data_type(y_) + end + res = tf.execute(desc) + node = tf.TapeNode(_mkl_squared_difference, [x_, y_, mkl_x_, mkl_y_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _mkl_squared_difference(x_, y_, mkl_x_, mkl_y_; name=nothing) + if tf.in_eager_mode() + _mkl_squared_difference_eager(x_, y_, mkl_x_, mkl_y_; name=name) + else + _mkl_squared_difference_graph(x_, y_, mkl_x_, mkl_y_; name=name) + end end - end + end end @@ -55736,63 +101835,123 @@ end """ begin - function conv3d_backprop_filter_graph(input_, filter_, out_backprop_; name=nothing, strides=nothing, padding=nothing, dilations=nothing) - local desc - tf.with_op_name(name, "Conv3DBackpropFilter") do - desc = tf.NodeDescription("Conv3DBackpropFilter") - input_ = convert(Tensor{Any}, input_) - filter_ = convert(Tensor{Any}, filter_) - out_backprop_ = convert(Tensor{Any}, out_backprop_) - (input_, filter_, out_backprop_) = tf.tf_promote(input_, filter_, out_backprop_) - tf.add_input(desc, input_) - tf.add_input(desc, filter_) - tf.add_input(desc, out_backprop_) - if strides !== nothing - desc["strides"] = map(Base.identity, strides) - end - if padding !== nothing - desc["padding"] = Base.String(padding) - end - if dilations !== nothing - desc["dilations"] = map(Base.identity, dilations) + begin + function conv3d_backprop_filter_graph(input_, filter_, out_backprop_; name=nothing, strides=nothing, padding=nothing, dilations=nothing) + local desc + tf.with_op_name(name, "Conv3DBackpropFilter") do + desc = tf.NodeDescription("Conv3DBackpropFilter") + begin + begin + input_ = convert(Tensor{Any}, input_) + begin + end + end + begin + filter_ = convert(Tensor{Any}, filter_) + begin + end + end + begin + out_backprop_ = convert(Tensor{Any}, out_backprop_) + begin + end + end + begin + (input_, filter_, out_backprop_) = tf.tf_promote(input_, filter_, out_backprop_) + end + end + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, filter_) + end + begin + tf.add_input(desc, out_backprop_) + end + end + begin + begin + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + end + begin + if padding !== nothing + desc["padding"] = Base.String(padding) + end + end + begin + if dilations !== nothing + desc["dilations"] = map(Base.identity, dilations) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function conv3d_backprop_filter_eager(input_, filter_, out_backprop_; name=nothing, strides=nothing, padding=nothing, dilations=nothing) + desc = tf.EagerOp("Conv3DBackpropFilter") + input_ = convert(tf.EagerTensor, input_) + filter_ = convert(tf.EagerTensor, filter_) + out_backprop_ = convert(tf.EagerTensor, out_backprop_) + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, filter_) + end + begin + tf.add_input(desc, out_backprop_) + end + end + begin + begin + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + end + begin + if padding !== nothing + desc["padding"] = Base.String(padding) + end + end + begin + if dilations !== nothing + desc["dilations"] = map(Base.identity, dilations) + end + end + end + begin + desc["T"] = tf.data_type(input_) + end + begin + desc["T"] = tf.data_type(filter_) + end + begin + desc["T"] = tf.data_type(out_backprop_) + end + res = tf.execute(desc) + node = tf.TapeNode(conv3d_backprop_filter, [input_, filter_, out_backprop_], name=nothing, strides=nothing, padding=nothing, dilations=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function conv3d_backprop_filter(input_, filter_, out_backprop_; name=nothing, strides=nothing, padding=nothing, dilations=nothing) + if tf.in_eager_mode() + conv3d_backprop_filter_eager(input_, filter_, out_backprop_; name=name, strides=strides, padding=padding, dilations=dilations) + else + conv3d_backprop_filter_graph(input_, filter_, out_backprop_; name=name, strides=strides, padding=padding, dilations=dilations) + end end - end - tf.Tensor(tf.Operation(desc)) end - function conv3d_backprop_filter_eager(input_, filter_, out_backprop_; name=nothing, strides=nothing, padding=nothing, dilations=nothing) - desc = tf.EagerOp("Conv3DBackpropFilter") - input_ = convert(tf.EagerTensor, input_) - filter_ = convert(tf.EagerTensor, filter_) - out_backprop_ = convert(tf.EagerTensor, out_backprop_) - tf.add_input(desc, input_) - tf.add_input(desc, filter_) - tf.add_input(desc, out_backprop_) - if strides !== nothing - desc["strides"] = map(Base.identity, strides) - end - if padding !== nothing - desc["padding"] = Base.String(padding) - end - if dilations !== nothing - desc["dilations"] = map(Base.identity, dilations) - end - desc["T"] = tf.data_type(input_) - desc["T"] = tf.data_type(filter_) - desc["T"] = tf.data_type(out_backprop_) - res = tf.execute(desc) - node = tf.TapeNode(conv3d_backprop_filter, [input_, filter_, out_backprop_], name=nothing, strides=nothing, padding=nothing, dilations=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function conv3d_backprop_filter(input_, filter_, out_backprop_; name=nothing, strides=nothing, padding=nothing, dilations=nothing) - if tf.in_eager_mode() - conv3d_backprop_filter_eager(input_, filter_, out_backprop_; name=name, strides=strides, padding=padding, dilations=dilations) - else - conv3d_backprop_filter_graph(input_, filter_, out_backprop_; name=name, strides=strides, padding=padding, dilations=dilations) - end - end end @@ -55802,69 +101961,125 @@ end """ begin - function if__graph(cond_, input_; name=nothing, Tin=nothing, Tout=nothing, then_branch=nothing, else_branch=nothing, output_shapes=nothing) - local desc - tf.with_op_name(name, "If") do - desc = tf.NodeDescription("If") - cond_ = convert(Tensor{Any}, cond_) - input_ = [convert(Tensor{Any}, x) for x = input_] - (cond_,) = tf.tf_promote(cond_) - tf.add_input(desc, cond_) - tf.add_input(desc, input_) - if Tin !== nothing - desc["Tin"] = map(Base.identity, Tin) - end - if Tout !== nothing - desc["Tout"] = map(Base.identity, Tout) - end - if then_branch !== nothing - desc["then_branch"] = Base.identity(then_branch) - end - if else_branch !== nothing - desc["else_branch"] = Base.identity(else_branch) + begin + function if__graph(cond_, input_; name=nothing, Tin=nothing, Tout=nothing, then_branch=nothing, else_branch=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "If") do + desc = tf.NodeDescription("If") + begin + begin + cond_ = convert(Tensor{Any}, cond_) + begin + end + end + begin + input_ = [convert(Tensor{Any}, x) for x = input_] + begin + end + end + begin + (cond_,) = tf.tf_promote(cond_) + end + end + begin + begin + tf.add_input(desc, cond_) + end + begin + tf.add_input(desc, input_) + end + end + begin + begin + if Tin !== nothing + desc["Tin"] = map(Base.identity, Tin) + end + end + begin + if Tout !== nothing + desc["Tout"] = map(Base.identity, Tout) + end + end + begin + if then_branch !== nothing + desc["then_branch"] = Base.identity(then_branch) + end + end + begin + if else_branch !== nothing + desc["else_branch"] = Base.identity(else_branch) + end + end + begin + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function if__eager(cond_, input_; name=nothing, Tin=nothing, Tout=nothing, then_branch=nothing, else_branch=nothing, output_shapes=nothing) + desc = tf.EagerOp("If") + cond_ = convert(tf.EagerTensor, cond_) + input_ = convert(tf.EagerTensor, input_) + begin + begin + tf.add_input(desc, cond_) + end + begin + tf.add_input(desc, input_) + end + end + begin + begin + if Tin !== nothing + desc["Tin"] = map(Base.identity, Tin) + end + end + begin + if Tout !== nothing + desc["Tout"] = map(Base.identity, Tout) + end + end + begin + if then_branch !== nothing + desc["then_branch"] = Base.identity(then_branch) + end + end + begin + if else_branch !== nothing + desc["else_branch"] = Base.identity(else_branch) + end + end + begin + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + end + begin + desc["Tcond"] = tf.data_type(cond_) + end + res = tf.execute(desc) + node = tf.TapeNode(if_, [cond_, input_], name=nothing, Tin=nothing, Tout=nothing, then_branch=nothing, else_branch=nothing, output_shapes=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function if_(cond_, input_; name=nothing, Tin=nothing, Tout=nothing, then_branch=nothing, else_branch=nothing, output_shapes=nothing) + if tf.in_eager_mode() + if__eager(cond_, input_; name=name, Tin=Tin, Tout=Tout, then_branch=then_branch, else_branch=else_branch, output_shapes=output_shapes) + else + if__graph(cond_, input_; name=name, Tin=Tin, Tout=Tout, then_branch=then_branch, else_branch=else_branch, output_shapes=output_shapes) + end end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - end - tf.Tensor(tf.Operation(desc)) end - function if__eager(cond_, input_; name=nothing, Tin=nothing, Tout=nothing, then_branch=nothing, else_branch=nothing, output_shapes=nothing) - desc = tf.EagerOp("If") - cond_ = convert(tf.EagerTensor, cond_) - input_ = convert(tf.EagerTensor, input_) - tf.add_input(desc, cond_) - tf.add_input(desc, input_) - if Tin !== nothing - desc["Tin"] = map(Base.identity, Tin) - end - if Tout !== nothing - desc["Tout"] = map(Base.identity, Tout) - end - if then_branch !== nothing - desc["then_branch"] = Base.identity(then_branch) - end - if else_branch !== nothing - desc["else_branch"] = Base.identity(else_branch) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - desc["Tcond"] = tf.data_type(cond_) - res = tf.execute(desc) - node = tf.TapeNode(if_, [cond_, input_], name=nothing, Tin=nothing, Tout=nothing, then_branch=nothing, else_branch=nothing, output_shapes=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function if_(cond_, input_; name=nothing, Tin=nothing, Tout=nothing, then_branch=nothing, else_branch=nothing, output_shapes=nothing) - if tf.in_eager_mode() - if__eager(cond_, input_; name=name, Tin=Tin, Tout=Tout, then_branch=then_branch, else_branch=else_branch, output_shapes=output_shapes) - else - if__graph(cond_, input_; name=name, Tin=Tin, Tout=Tout, then_branch=then_branch, else_branch=else_branch, output_shapes=output_shapes) - end - end end @@ -55874,61 +102089,109 @@ end """ begin - function flat_map_dataset_graph(input_dataset_, other_arguments_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) - local desc - tf.with_op_name(name, "FlatMapDataset") do - desc = tf.NodeDescription("FlatMapDataset") - input_dataset_ = convert(Tensor{Any}, input_dataset_) - other_arguments_ = [convert(Tensor{Any}, x) for x = other_arguments_] - tf.add_input(desc, input_dataset_) - tf.add_input(desc, other_arguments_) - if f !== nothing - desc["f"] = Base.identity(f) - end - if Targuments !== nothing - desc["Targuments"] = map(Base.identity, Targuments) + begin + function flat_map_dataset_graph(input_dataset_, other_arguments_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "FlatMapDataset") do + desc = tf.NodeDescription("FlatMapDataset") + begin + begin + input_dataset_ = convert(Tensor{Any}, input_dataset_) + begin + end + end + begin + other_arguments_ = [convert(Tensor{Any}, x) for x = other_arguments_] + begin + end + end + end + begin + begin + tf.add_input(desc, input_dataset_) + end + begin + tf.add_input(desc, other_arguments_) + end + end + begin + begin + if f !== nothing + desc["f"] = Base.identity(f) + end + end + begin + if Targuments !== nothing + desc["Targuments"] = map(Base.identity, Targuments) + end + end + begin + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + end + begin + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function flat_map_dataset_eager(input_dataset_, other_arguments_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) + desc = tf.EagerOp("FlatMapDataset") + input_dataset_ = convert(tf.EagerTensor, input_dataset_) + other_arguments_ = convert(tf.EagerTensor, other_arguments_) + begin + begin + tf.add_input(desc, input_dataset_) + end + begin + tf.add_input(desc, other_arguments_) + end + end + begin + begin + if f !== nothing + desc["f"] = Base.identity(f) + end + end + begin + if Targuments !== nothing + desc["Targuments"] = map(Base.identity, Targuments) + end + end + begin + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + end + begin + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(flat_map_dataset, [input_dataset_, other_arguments_], name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function flat_map_dataset(input_dataset_, other_arguments_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) + if tf.in_eager_mode() + flat_map_dataset_eager(input_dataset_, other_arguments_; name=name, f=f, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes) + else + flat_map_dataset_graph(input_dataset_, other_arguments_; name=name, f=f, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes) + end end - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - end - tf.Tensor(tf.Operation(desc)) end - function flat_map_dataset_eager(input_dataset_, other_arguments_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) - desc = tf.EagerOp("FlatMapDataset") - input_dataset_ = convert(tf.EagerTensor, input_dataset_) - other_arguments_ = convert(tf.EagerTensor, other_arguments_) - tf.add_input(desc, input_dataset_) - tf.add_input(desc, other_arguments_) - if f !== nothing - desc["f"] = Base.identity(f) - end - if Targuments !== nothing - desc["Targuments"] = map(Base.identity, Targuments) - end - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - res = tf.execute(desc) - node = tf.TapeNode(flat_map_dataset, [input_dataset_, other_arguments_], name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function flat_map_dataset(input_dataset_, other_arguments_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) - if tf.in_eager_mode() - flat_map_dataset_eager(input_dataset_, other_arguments_; name=name, f=f, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes) - else - flat_map_dataset_graph(input_dataset_, other_arguments_; name=name, f=f, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes) - end - end end @@ -55938,57 +102201,113 @@ end """ begin - function tensor_list_scatter_graph(tensor_, indices_, element_shape_; name=nothing, element_dtype=nothing, shape_type=nothing) - local desc - tf.with_op_name(name, "TensorListScatter") do - desc = tf.NodeDescription("TensorListScatter") - tensor_ = convert(Tensor{Any}, tensor_) - indices_ = convert(Tensor{Int32}, indices_) - element_shape_ = convert(Tensor{Any}, element_shape_) - (tensor_,) = tf.tf_promote(tensor_) - (element_shape_,) = tf.tf_promote(element_shape_) - tf.add_input(desc, tensor_) - tf.add_input(desc, indices_) - tf.add_input(desc, element_shape_) - if element_dtype !== nothing - desc["element_dtype"] = Base.identity(element_dtype) + begin + function tensor_list_scatter_graph(tensor_, indices_, element_shape_; name=nothing, element_dtype=nothing, shape_type=nothing) + local desc + tf.with_op_name(name, "TensorListScatter") do + desc = tf.NodeDescription("TensorListScatter") + begin + begin + tensor_ = convert(Tensor{Any}, tensor_) + begin + end + end + begin + indices_ = convert(Tensor{Int32}, indices_) + begin + end + end + begin + element_shape_ = convert(Tensor{Any}, element_shape_) + begin + end + end + begin + (tensor_,) = tf.tf_promote(tensor_) + end + begin + (element_shape_,) = tf.tf_promote(element_shape_) + end + end + begin + begin + tf.add_input(desc, tensor_) + end + begin + tf.add_input(desc, indices_) + end + begin + tf.add_input(desc, element_shape_) + end + end + begin + begin + if element_dtype !== nothing + desc["element_dtype"] = Base.identity(element_dtype) + end + end + begin + if shape_type !== nothing + desc["shape_type"] = Base.identity(shape_type) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function tensor_list_scatter_eager(tensor_, indices_, element_shape_; name=nothing, element_dtype=nothing, shape_type=nothing) + desc = tf.EagerOp("TensorListScatter") + tensor_ = convert(tf.EagerTensor, tensor_) + indices_ = convert(tf.EagerTensor, indices_) + element_shape_ = convert(tf.EagerTensor, element_shape_) + begin + begin + tf.add_input(desc, tensor_) + end + begin + tf.add_input(desc, indices_) + end + begin + tf.add_input(desc, element_shape_) + end + end + begin + begin + if element_dtype !== nothing + desc["element_dtype"] = Base.identity(element_dtype) + end + end + begin + if shape_type !== nothing + desc["shape_type"] = Base.identity(shape_type) + end + end + end + begin + desc["element_dtype"] = tf.data_type(tensor_) + end + begin + desc["shape_type"] = tf.data_type(element_shape_) + end + res = tf.execute(desc) + node = tf.TapeNode(tensor_list_scatter, [tensor_, indices_, element_shape_], name=nothing, element_dtype=nothing, shape_type=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_list_scatter(tensor_, indices_, element_shape_; name=nothing, element_dtype=nothing, shape_type=nothing) + if tf.in_eager_mode() + tensor_list_scatter_eager(tensor_, indices_, element_shape_; name=name, element_dtype=element_dtype, shape_type=shape_type) + else + tensor_list_scatter_graph(tensor_, indices_, element_shape_; name=name, element_dtype=element_dtype, shape_type=shape_type) + end end - if shape_type !== nothing - desc["shape_type"] = Base.identity(shape_type) - end - end - tf.Tensor(tf.Operation(desc)) end - function tensor_list_scatter_eager(tensor_, indices_, element_shape_; name=nothing, element_dtype=nothing, shape_type=nothing) - desc = tf.EagerOp("TensorListScatter") - tensor_ = convert(tf.EagerTensor, tensor_) - indices_ = convert(tf.EagerTensor, indices_) - element_shape_ = convert(tf.EagerTensor, element_shape_) - tf.add_input(desc, tensor_) - tf.add_input(desc, indices_) - tf.add_input(desc, element_shape_) - if element_dtype !== nothing - desc["element_dtype"] = Base.identity(element_dtype) - end - if shape_type !== nothing - desc["shape_type"] = Base.identity(shape_type) - end - desc["element_dtype"] = tf.data_type(tensor_) - desc["shape_type"] = tf.data_type(element_shape_) - res = tf.execute(desc) - node = tf.TapeNode(tensor_list_scatter, [tensor_, indices_, element_shape_], name=nothing, element_dtype=nothing, shape_type=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_list_scatter(tensor_, indices_, element_shape_; name=nothing, element_dtype=nothing, shape_type=nothing) - if tf.in_eager_mode() - tensor_list_scatter_eager(tensor_, indices_, element_shape_; name=name, element_dtype=element_dtype, shape_type=shape_type) - else - tensor_list_scatter_graph(tensor_, indices_, element_shape_; name=name, element_dtype=element_dtype, shape_type=shape_type) - end - end end @@ -55998,40 +102317,78 @@ end """ begin - function softsign_grad_graph(gradients_, features_; name=nothing) - local desc - tf.with_op_name(name, "SoftsignGrad") do - desc = tf.NodeDescription("SoftsignGrad") - gradients_ = convert(Tensor{Any}, gradients_) - features_ = convert(Tensor{Any}, features_) - (gradients_, features_) = tf.tf_promote(gradients_, features_) - tf.add_input(desc, gradients_) - tf.add_input(desc, features_) + begin + function softsign_grad_graph(gradients_, features_; name=nothing) + local desc + tf.with_op_name(name, "SoftsignGrad") do + desc = tf.NodeDescription("SoftsignGrad") + begin + begin + gradients_ = convert(Tensor{Any}, gradients_) + begin + end + end + begin + features_ = convert(Tensor{Any}, features_) + begin + end + end + begin + (gradients_, features_) = tf.tf_promote(gradients_, features_) + end + end + begin + begin + tf.add_input(desc, gradients_) + end + begin + tf.add_input(desc, features_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function softsign_grad_eager(gradients_, features_; name=nothing) - desc = tf.EagerOp("SoftsignGrad") - gradients_ = convert(tf.EagerTensor, gradients_) - features_ = convert(tf.EagerTensor, features_) - tf.add_input(desc, gradients_) - tf.add_input(desc, features_) - desc["T"] = tf.data_type(gradients_) - desc["T"] = tf.data_type(features_) - res = tf.execute(desc) - node = tf.TapeNode(softsign_grad, [gradients_, features_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function softsign_grad_eager(gradients_, features_; name=nothing) + desc = tf.EagerOp("SoftsignGrad") + gradients_ = convert(tf.EagerTensor, gradients_) + features_ = convert(tf.EagerTensor, features_) + begin + begin + tf.add_input(desc, gradients_) + end + begin + tf.add_input(desc, features_) + end + end + begin + end + begin + desc["T"] = tf.data_type(gradients_) + end + begin + desc["T"] = tf.data_type(features_) + end + res = tf.execute(desc) + node = tf.TapeNode(softsign_grad, [gradients_, features_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function softsign_grad(gradients_, features_; name=nothing) - if tf.in_eager_mode() - softsign_grad_eager(gradients_, features_; name=name) - else - softsign_grad_graph(gradients_, features_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function softsign_grad(gradients_, features_; name=nothing) + if tf.in_eager_mode() + softsign_grad_eager(gradients_, features_; name=name) + else + softsign_grad_graph(gradients_, features_; name=name) + end end - end + end end @@ -56041,47 +102398,83 @@ end Copy Host Op. """ begin - function copy_host_graph(input_; name=nothing, tensor_name=nothing, debug_ops_spec=nothing) - local desc - tf.with_op_name(name, "CopyHost") do - desc = tf.NodeDescription("CopyHost") - input_ = convert(Tensor{Any}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - if tensor_name !== nothing - desc["tensor_name"] = Base.String(tensor_name) + begin + function copy_host_graph(input_; name=nothing, tensor_name=nothing, debug_ops_spec=nothing) + local desc + tf.with_op_name(name, "CopyHost") do + desc = tf.NodeDescription("CopyHost") + begin + begin + input_ = convert(Tensor{Any}, input_) + begin + end + end + begin + (input_,) = tf.tf_promote(input_) + end + end + begin + begin + tf.add_input(desc, input_) + end + end + begin + begin + if tensor_name !== nothing + desc["tensor_name"] = Base.String(tensor_name) + end + end + begin + if debug_ops_spec !== nothing + desc["debug_ops_spec"] = map(Base.identity, debug_ops_spec) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function copy_host_eager(input_; name=nothing, tensor_name=nothing, debug_ops_spec=nothing) + desc = tf.EagerOp("CopyHost") + input_ = convert(tf.EagerTensor, input_) + begin + begin + tf.add_input(desc, input_) + end + end + begin + begin + if tensor_name !== nothing + desc["tensor_name"] = Base.String(tensor_name) + end + end + begin + if debug_ops_spec !== nothing + desc["debug_ops_spec"] = map(Base.identity, debug_ops_spec) + end + end + end + begin + desc["T"] = tf.data_type(input_) + end + res = tf.execute(desc) + node = tf.TapeNode(copy_host, [input_], name=nothing, tensor_name=nothing, debug_ops_spec=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function copy_host(input_; name=nothing, tensor_name=nothing, debug_ops_spec=nothing) + if tf.in_eager_mode() + copy_host_eager(input_; name=name, tensor_name=tensor_name, debug_ops_spec=debug_ops_spec) + else + copy_host_graph(input_; name=name, tensor_name=tensor_name, debug_ops_spec=debug_ops_spec) + end end - if debug_ops_spec !== nothing - desc["debug_ops_spec"] = map(Base.identity, debug_ops_spec) - end - end - tf.Tensor(tf.Operation(desc)) - end - function copy_host_eager(input_; name=nothing, tensor_name=nothing, debug_ops_spec=nothing) - desc = tf.EagerOp("CopyHost") - input_ = convert(tf.EagerTensor, input_) - tf.add_input(desc, input_) - if tensor_name !== nothing - desc["tensor_name"] = Base.String(tensor_name) - end - if debug_ops_spec !== nothing - desc["debug_ops_spec"] = map(Base.identity, debug_ops_spec) - end - desc["T"] = tf.data_type(input_) - res = tf.execute(desc) - node = tf.TapeNode(copy_host, [input_], name=nothing, tensor_name=nothing, debug_ops_spec=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function copy_host(input_; name=nothing, tensor_name=nothing, debug_ops_spec=nothing) - if tf.in_eager_mode() - copy_host_eager(input_; name=name, tensor_name=tensor_name, debug_ops_spec=debug_ops_spec) - else - copy_host_graph(input_; name=name, tensor_name=tensor_name, debug_ops_spec=debug_ops_spec) - end - end end @@ -56091,47 +102484,97 @@ end """ begin - function lin_space_graph(start_, stop_, num_; name=nothing) - local desc - tf.with_op_name(name, "LinSpace") do - desc = tf.NodeDescription("LinSpace") - start_ = convert(Tensor{Any}, start_) - stop_ = convert(Tensor{Any}, stop_) - num_ = convert(Tensor{Int32}, num_) - num_ = num_ - convert(tf.Tensor{eltype(num_)}, 1) - (start_, stop_) = tf.tf_promote(start_, stop_) - (num_,) = tf.tf_promote(num_) - tf.add_input(desc, start_) - tf.add_input(desc, stop_) - tf.add_input(desc, num_) - end - tf.Tensor(tf.Operation(desc)) - end - function lin_space_eager(start_, stop_, num_; name=nothing) - desc = tf.EagerOp("LinSpace") - start_ = convert(tf.EagerTensor, start_) - stop_ = convert(tf.EagerTensor, stop_) - num_ = convert(tf.EagerTensor, num_) - tf.add_input(desc, start_) - tf.add_input(desc, stop_) - tf.add_input(desc, num_) - desc["T"] = tf.data_type(start_) - desc["T"] = tf.data_type(stop_) - desc["Tidx"] = tf.data_type(num_) - res = tf.execute(desc) - node = tf.TapeNode(lin_space, [start_, stop_, num_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function lin_space(start_, stop_, num_; name=nothing) - if tf.in_eager_mode() - lin_space_eager(start_, stop_, num_; name=name) - else - lin_space_graph(start_, stop_, num_; name=name) + begin + function lin_space_graph(start_, stop_, num_; name=nothing) + local desc + tf.with_op_name(name, "LinSpace") do + desc = tf.NodeDescription("LinSpace") + begin + begin + start_ = convert(Tensor{Any}, start_) + begin + end + end + begin + stop_ = convert(Tensor{Any}, stop_) + begin + end + end + begin + num_ = convert(Tensor{Int32}, num_) + begin + num_ = num_ - convert(tf.Tensor{eltype(num_)}, 1) + end + end + begin + (start_, stop_) = tf.tf_promote(start_, stop_) + end + begin + (num_,) = tf.tf_promote(num_) + end + end + begin + begin + tf.add_input(desc, start_) + end + begin + tf.add_input(desc, stop_) + end + begin + tf.add_input(desc, num_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function lin_space_eager(start_, stop_, num_; name=nothing) + desc = tf.EagerOp("LinSpace") + start_ = convert(tf.EagerTensor, start_) + stop_ = convert(tf.EagerTensor, stop_) + num_ = convert(tf.EagerTensor, num_) + begin + begin + tf.add_input(desc, start_) + end + begin + tf.add_input(desc, stop_) + end + begin + tf.add_input(desc, num_) + end + end + begin + end + begin + desc["T"] = tf.data_type(start_) + end + begin + desc["T"] = tf.data_type(stop_) + end + begin + desc["Tidx"] = tf.data_type(num_) + end + res = tf.execute(desc) + node = tf.TapeNode(lin_space, [start_, stop_, num_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function lin_space(start_, stop_, num_; name=nothing) + if tf.in_eager_mode() + lin_space_eager(start_, stop_, num_; name=name) + else + lin_space_graph(start_, stop_, num_; name=name) + end end - end + end end @@ -56141,46 +102584,88 @@ end Updates input `value` at `loc` with `update`. """ begin - function _parallel_concat_update_graph(value_, update_; name=nothing, loc=nothing) - local desc - tf.with_op_name(name, "_ParallelConcatUpdate") do - desc = tf.NodeDescription("_ParallelConcatUpdate") - value_ = convert(Tensor{Any}, value_) - update_ = convert(Tensor{Any}, update_) - (value_, update_) = tf.tf_promote(value_, update_) - tf.add_input(desc, value_) - tf.add_input(desc, update_) - if loc !== nothing - desc["loc"] = Base.Int(loc) + begin + function _parallel_concat_update_graph(value_, update_; name=nothing, loc=nothing) + local desc + tf.with_op_name(name, "_ParallelConcatUpdate") do + desc = tf.NodeDescription("_ParallelConcatUpdate") + begin + begin + value_ = convert(Tensor{Any}, value_) + begin + end + end + begin + update_ = convert(Tensor{Any}, update_) + begin + end + end + begin + (value_, update_) = tf.tf_promote(value_, update_) + end + end + begin + begin + tf.add_input(desc, value_) + end + begin + tf.add_input(desc, update_) + end + end + begin + begin + if loc !== nothing + desc["loc"] = Base.Int(loc) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function _parallel_concat_update_eager(value_, update_; name=nothing, loc=nothing) + desc = tf.EagerOp("_ParallelConcatUpdate") + value_ = convert(tf.EagerTensor, value_) + update_ = convert(tf.EagerTensor, update_) + begin + begin + tf.add_input(desc, value_) + end + begin + tf.add_input(desc, update_) + end + end + begin + begin + if loc !== nothing + desc["loc"] = Base.Int(loc) + end + end + end + begin + desc["T"] = tf.data_type(value_) + end + begin + desc["T"] = tf.data_type(update_) + end + res = tf.execute(desc) + node = tf.TapeNode(_parallel_concat_update, [value_, update_], name=nothing, loc=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _parallel_concat_update(value_, update_; name=nothing, loc=nothing) + if tf.in_eager_mode() + _parallel_concat_update_eager(value_, update_; name=name, loc=loc) + else + _parallel_concat_update_graph(value_, update_; name=name, loc=loc) + end end - end - tf.Tensor(tf.Operation(desc)) end - function _parallel_concat_update_eager(value_, update_; name=nothing, loc=nothing) - desc = tf.EagerOp("_ParallelConcatUpdate") - value_ = convert(tf.EagerTensor, value_) - update_ = convert(tf.EagerTensor, update_) - tf.add_input(desc, value_) - tf.add_input(desc, update_) - if loc !== nothing - desc["loc"] = Base.Int(loc) - end - desc["T"] = tf.data_type(value_) - desc["T"] = tf.data_type(update_) - res = tf.execute(desc) - node = tf.TapeNode(_parallel_concat_update, [value_, update_], name=nothing, loc=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _parallel_concat_update(value_, update_; name=nothing, loc=nothing) - if tf.in_eager_mode() - _parallel_concat_update_eager(value_, update_; name=name, loc=loc) - else - _parallel_concat_update_graph(value_, update_; name=name, loc=loc) - end - end end @@ -56190,41 +102675,65 @@ end """ begin - function stack_graph(; name=nothing, elem_type=nothing, stack_name=nothing) - local desc - tf.with_op_name(name, "Stack") do - desc = tf.NodeDescription("Stack") - if elem_type !== nothing - desc["elem_type"] = Base.identity(elem_type) - end - if stack_name !== nothing - desc["stack_name"] = Base.String(stack_name) + begin + function stack_graph(; name=nothing, elem_type=nothing, stack_name=nothing) + local desc + tf.with_op_name(name, "Stack") do + desc = tf.NodeDescription("Stack") + begin + end + begin + end + begin + begin + if elem_type !== nothing + desc["elem_type"] = Base.identity(elem_type) + end + end + begin + if stack_name !== nothing + desc["stack_name"] = Base.String(stack_name) + end + end + end end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function stack_eager(; name=nothing, elem_type=nothing, stack_name=nothing) - desc = tf.EagerOp("Stack") - if elem_type !== nothing - desc["elem_type"] = Base.identity(elem_type) - end - if stack_name !== nothing - desc["stack_name"] = Base.String(stack_name) - end - res = tf.execute(desc) - node = tf.TapeNode(stack, [], name=nothing, elem_type=nothing, stack_name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function stack_eager(; name=nothing, elem_type=nothing, stack_name=nothing) + desc = tf.EagerOp("Stack") + begin + end + begin + begin + if elem_type !== nothing + desc["elem_type"] = Base.identity(elem_type) + end + end + begin + if stack_name !== nothing + desc["stack_name"] = Base.String(stack_name) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(stack, [], name=nothing, elem_type=nothing, stack_name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function stack(; name=nothing, elem_type=nothing, stack_name=nothing) - if tf.in_eager_mode() - stack_eager(; name=name, elem_type=elem_type, stack_name=stack_name) - else - stack_graph(; name=name, elem_type=elem_type, stack_name=stack_name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function stack(; name=nothing, elem_type=nothing, stack_name=nothing) + if tf.in_eager_mode() + stack_eager(; name=name, elem_type=elem_type, stack_name=stack_name) + else + stack_graph(; name=name, elem_type=elem_type, stack_name=stack_name) + end end - end + end end @@ -56234,45 +102743,85 @@ end """ begin - function stack_push_v2_graph(handle_, elem_; name=nothing, swap_memory=nothing) - local desc - tf.with_op_name(name, "StackPushV2") do - desc = tf.NodeDescription("StackPushV2") - handle_ = convert(Tensor{Any}, handle_) - elem_ = convert(Tensor{Any}, elem_) - (elem_,) = tf.tf_promote(elem_) - tf.add_input(desc, handle_) - tf.add_input(desc, elem_) - if swap_memory !== nothing - desc["swap_memory"] = Base.Bool(swap_memory) + begin + function stack_push_v2_graph(handle_, elem_; name=nothing, swap_memory=nothing) + local desc + tf.with_op_name(name, "StackPushV2") do + desc = tf.NodeDescription("StackPushV2") + begin + begin + handle_ = convert(Tensor{Any}, handle_) + begin + end + end + begin + elem_ = convert(Tensor{Any}, elem_) + begin + end + end + begin + (elem_,) = tf.tf_promote(elem_) + end + end + begin + begin + tf.add_input(desc, handle_) + end + begin + tf.add_input(desc, elem_) + end + end + begin + begin + if swap_memory !== nothing + desc["swap_memory"] = Base.Bool(swap_memory) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function stack_push_v2_eager(handle_, elem_; name=nothing, swap_memory=nothing) + desc = tf.EagerOp("StackPushV2") + handle_ = convert(tf.EagerTensor, handle_) + elem_ = convert(tf.EagerTensor, elem_) + begin + begin + tf.add_input(desc, handle_) + end + begin + tf.add_input(desc, elem_) + end + end + begin + begin + if swap_memory !== nothing + desc["swap_memory"] = Base.Bool(swap_memory) + end + end + end + begin + desc["T"] = tf.data_type(elem_) + end + res = tf.execute(desc) + node = tf.TapeNode(stack_push_v2, [handle_, elem_], name=nothing, swap_memory=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function stack_push_v2(handle_, elem_; name=nothing, swap_memory=nothing) + if tf.in_eager_mode() + stack_push_v2_eager(handle_, elem_; name=name, swap_memory=swap_memory) + else + stack_push_v2_graph(handle_, elem_; name=name, swap_memory=swap_memory) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function stack_push_v2_eager(handle_, elem_; name=nothing, swap_memory=nothing) - desc = tf.EagerOp("StackPushV2") - handle_ = convert(tf.EagerTensor, handle_) - elem_ = convert(tf.EagerTensor, elem_) - tf.add_input(desc, handle_) - tf.add_input(desc, elem_) - if swap_memory !== nothing - desc["swap_memory"] = Base.Bool(swap_memory) - end - desc["T"] = tf.data_type(elem_) - res = tf.execute(desc) - node = tf.TapeNode(stack_push_v2, [handle_, elem_], name=nothing, swap_memory=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function stack_push_v2(handle_, elem_; name=nothing, swap_memory=nothing) - if tf.in_eager_mode() - stack_push_v2_eager(handle_, elem_; name=name, swap_memory=swap_memory) - else - stack_push_v2_graph(handle_, elem_; name=name, swap_memory=swap_memory) - end - end end @@ -56282,45 +102831,85 @@ end """ begin - function assign_variable_op_graph(resource_, value_; name=nothing, dtype=nothing) - local desc - tf.with_op_name(name, "AssignVariableOp") do - desc = tf.NodeDescription("AssignVariableOp") - resource_ = convert(Tensor{Any}, resource_) - value_ = convert(Tensor{Any}, value_) - (value_,) = tf.tf_promote(value_) - tf.add_input(desc, resource_) - tf.add_input(desc, value_) - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) + begin + function assign_variable_op_graph(resource_, value_; name=nothing, dtype=nothing) + local desc + tf.with_op_name(name, "AssignVariableOp") do + desc = tf.NodeDescription("AssignVariableOp") + begin + begin + resource_ = convert(Tensor{Any}, resource_) + begin + end + end + begin + value_ = convert(Tensor{Any}, value_) + begin + end + end + begin + (value_,) = tf.tf_promote(value_) + end + end + begin + begin + tf.add_input(desc, resource_) + end + begin + tf.add_input(desc, value_) + end + end + begin + begin + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function assign_variable_op_eager(resource_, value_; name=nothing, dtype=nothing) + desc = tf.EagerOp("AssignVariableOp") + resource_ = convert(tf.EagerTensor, resource_) + value_ = convert(tf.EagerTensor, value_) + begin + begin + tf.add_input(desc, resource_) + end + begin + tf.add_input(desc, value_) + end + end + begin + begin + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + end + begin + desc["dtype"] = tf.data_type(value_) + end + res = tf.execute(desc) + node = tf.TapeNode(assign_variable_op, [resource_, value_], name=nothing, dtype=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function assign_variable_op(resource_, value_; name=nothing, dtype=nothing) + if tf.in_eager_mode() + assign_variable_op_eager(resource_, value_; name=name, dtype=dtype) + else + assign_variable_op_graph(resource_, value_; name=name, dtype=dtype) + end end - end - tf.Tensor(tf.Operation(desc)) end - function assign_variable_op_eager(resource_, value_; name=nothing, dtype=nothing) - desc = tf.EagerOp("AssignVariableOp") - resource_ = convert(tf.EagerTensor, resource_) - value_ = convert(tf.EagerTensor, value_) - tf.add_input(desc, resource_) - tf.add_input(desc, value_) - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end - desc["dtype"] = tf.data_type(value_) - res = tf.execute(desc) - node = tf.TapeNode(assign_variable_op, [resource_, value_], name=nothing, dtype=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function assign_variable_op(resource_, value_; name=nothing, dtype=nothing) - if tf.in_eager_mode() - assign_variable_op_eager(resource_, value_; name=name, dtype=dtype) - else - assign_variable_op_graph(resource_, value_; name=name, dtype=dtype) - end - end end @@ -56330,59 +102919,117 @@ end """ begin - function sparse_split_graph(split_dim_, indices_, values_, shape_; name=nothing, num_split=nothing) - local desc - tf.with_op_name(name, "SparseSplit") do - desc = tf.NodeDescription("SparseSplit") - split_dim_ = convert(Tensor{Int64}, split_dim_) - split_dim_ = split_dim_ - convert(tf.Tensor{eltype(split_dim_)}, 1) - indices_ = convert(Tensor{Int64}, indices_) - values_ = convert(Tensor{Any}, values_) - shape_ = convert(Tensor{Int64}, shape_) - (values_,) = tf.tf_promote(values_) - tf.add_input(desc, split_dim_) - tf.add_input(desc, indices_) - tf.add_input(desc, values_) - tf.add_input(desc, shape_) - if num_split !== nothing - desc["num_split"] = Base.Int(num_split) + begin + function sparse_split_graph(split_dim_, indices_, values_, shape_; name=nothing, num_split=nothing) + local desc + tf.with_op_name(name, "SparseSplit") do + desc = tf.NodeDescription("SparseSplit") + begin + begin + split_dim_ = convert(Tensor{Int64}, split_dim_) + begin + split_dim_ = split_dim_ - convert(tf.Tensor{eltype(split_dim_)}, 1) + end + end + begin + indices_ = convert(Tensor{Int64}, indices_) + begin + end + end + begin + values_ = convert(Tensor{Any}, values_) + begin + end + end + begin + shape_ = convert(Tensor{Int64}, shape_) + begin + end + end + begin + (values_,) = tf.tf_promote(values_) + end + end + begin + begin + tf.add_input(desc, split_dim_) + end + begin + tf.add_input(desc, indices_) + end + begin + tf.add_input(desc, values_) + end + begin + tf.add_input(desc, shape_) + end + end + begin + begin + if num_split !== nothing + desc["num_split"] = Base.Int(num_split) + end + end + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + end + end + begin + function sparse_split_eager(split_dim_, indices_, values_, shape_; name=nothing, num_split=nothing) + desc = tf.EagerOp("SparseSplit") + split_dim_ = convert(tf.EagerTensor, split_dim_) + indices_ = convert(tf.EagerTensor, indices_) + values_ = convert(tf.EagerTensor, values_) + shape_ = convert(tf.EagerTensor, shape_) + begin + begin + tf.add_input(desc, split_dim_) + end + begin + tf.add_input(desc, indices_) + end + begin + tf.add_input(desc, values_) + end + begin + tf.add_input(desc, shape_) + end + end + begin + begin + if num_split !== nothing + desc["num_split"] = Base.Int(num_split) + end + end + end + begin + desc["T"] = tf.data_type(values_) + end + res = tf.execute(desc) + node = tf.TapeNode(sparse_split, [split_dim_, indices_, values_, shape_], name=nothing, num_split=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_split(split_dim_, indices_, values_, shape_; name=nothing, num_split=nothing) + if tf.in_eager_mode() + sparse_split_eager(split_dim_, indices_, values_, shape_; name=name, num_split=num_split) + else + sparse_split_graph(split_dim_, indices_, values_, shape_; name=name, num_split=num_split) + end end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:3 - push!(out, tf.Tensor(op, out_idx)) - end - out end - function sparse_split_eager(split_dim_, indices_, values_, shape_; name=nothing, num_split=nothing) - desc = tf.EagerOp("SparseSplit") - split_dim_ = convert(tf.EagerTensor, split_dim_) - indices_ = convert(tf.EagerTensor, indices_) - values_ = convert(tf.EagerTensor, values_) - shape_ = convert(tf.EagerTensor, shape_) - tf.add_input(desc, split_dim_) - tf.add_input(desc, indices_) - tf.add_input(desc, values_) - tf.add_input(desc, shape_) - if num_split !== nothing - desc["num_split"] = Base.Int(num_split) - end - desc["T"] = tf.data_type(values_) - res = tf.execute(desc) - node = tf.TapeNode(sparse_split, [split_dim_, indices_, values_, shape_], name=nothing, num_split=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_split(split_dim_, indices_, values_, shape_; name=nothing, num_split=nothing) - if tf.in_eager_mode() - sparse_split_eager(split_dim_, indices_, values_, shape_; name=name, num_split=num_split) - else - sparse_split_graph(split_dim_, indices_, values_, shape_; name=name, num_split=num_split) - end - end end @@ -56392,43 +103039,87 @@ end """ begin - function tensor_array_unpack_graph(handle_, value_, flow_in_; name=nothing) - local desc - tf.with_op_name(name, "TensorArrayUnpack") do - desc = tf.NodeDescription("TensorArrayUnpack") - handle_ = convert(Tensor{String}, handle_) - value_ = convert(Tensor{Any}, value_) - flow_in_ = convert(Tensor{Float32}, flow_in_) - (value_,) = tf.tf_promote(value_) - tf.add_input(desc, handle_) - tf.add_input(desc, value_) - tf.add_input(desc, flow_in_) - end - tf.Tensor(tf.Operation(desc)) - end - function tensor_array_unpack_eager(handle_, value_, flow_in_; name=nothing) - desc = tf.EagerOp("TensorArrayUnpack") - handle_ = convert(tf.EagerTensor, handle_) - value_ = convert(tf.EagerTensor, value_) - flow_in_ = convert(tf.EagerTensor, flow_in_) - tf.add_input(desc, handle_) - tf.add_input(desc, value_) - tf.add_input(desc, flow_in_) - desc["T"] = tf.data_type(value_) - res = tf.execute(desc) - node = tf.TapeNode(tensor_array_unpack, [handle_, value_, flow_in_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_array_unpack(handle_, value_, flow_in_; name=nothing) - if tf.in_eager_mode() - tensor_array_unpack_eager(handle_, value_, flow_in_; name=name) - else - tensor_array_unpack_graph(handle_, value_, flow_in_; name=name) + begin + function tensor_array_unpack_graph(handle_, value_, flow_in_; name=nothing) + local desc + tf.with_op_name(name, "TensorArrayUnpack") do + desc = tf.NodeDescription("TensorArrayUnpack") + begin + begin + handle_ = convert(Tensor{String}, handle_) + begin + end + end + begin + value_ = convert(Tensor{Any}, value_) + begin + end + end + begin + flow_in_ = convert(Tensor{Float32}, flow_in_) + begin + end + end + begin + (value_,) = tf.tf_promote(value_) + end + end + begin + begin + tf.add_input(desc, handle_) + end + begin + tf.add_input(desc, value_) + end + begin + tf.add_input(desc, flow_in_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function tensor_array_unpack_eager(handle_, value_, flow_in_; name=nothing) + desc = tf.EagerOp("TensorArrayUnpack") + handle_ = convert(tf.EagerTensor, handle_) + value_ = convert(tf.EagerTensor, value_) + flow_in_ = convert(tf.EagerTensor, flow_in_) + begin + begin + tf.add_input(desc, handle_) + end + begin + tf.add_input(desc, value_) + end + begin + tf.add_input(desc, flow_in_) + end + end + begin + end + begin + desc["T"] = tf.data_type(value_) + end + res = tf.execute(desc) + node = tf.TapeNode(tensor_array_unpack, [handle_, value_, flow_in_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_array_unpack(handle_, value_, flow_in_; name=nothing) + if tf.in_eager_mode() + tensor_array_unpack_eager(handle_, value_, flow_in_; name=name) + else + tensor_array_unpack_graph(handle_, value_, flow_in_; name=name) + end end - end + end end @@ -56438,45 +103129,77 @@ end """ begin - function tensor_list_stack_graph(input_handle_; name=nothing, element_dtype=nothing, num_elements=nothing) - local desc - tf.with_op_name(name, "TensorListStack") do - desc = tf.NodeDescription("TensorListStack") - input_handle_ = convert(Tensor{Any}, input_handle_) - tf.add_input(desc, input_handle_) - if element_dtype !== nothing - desc["element_dtype"] = Base.identity(element_dtype) - end - if num_elements !== nothing - desc["num_elements"] = Base.Int(num_elements) + begin + function tensor_list_stack_graph(input_handle_; name=nothing, element_dtype=nothing, num_elements=nothing) + local desc + tf.with_op_name(name, "TensorListStack") do + desc = tf.NodeDescription("TensorListStack") + begin + begin + input_handle_ = convert(Tensor{Any}, input_handle_) + begin + end + end + end + begin + begin + tf.add_input(desc, input_handle_) + end + end + begin + begin + if element_dtype !== nothing + desc["element_dtype"] = Base.identity(element_dtype) + end + end + begin + if num_elements !== nothing + desc["num_elements"] = Base.Int(num_elements) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function tensor_list_stack_eager(input_handle_; name=nothing, element_dtype=nothing, num_elements=nothing) + desc = tf.EagerOp("TensorListStack") + input_handle_ = convert(tf.EagerTensor, input_handle_) + begin + begin + tf.add_input(desc, input_handle_) + end + end + begin + begin + if element_dtype !== nothing + desc["element_dtype"] = Base.identity(element_dtype) + end + end + begin + if num_elements !== nothing + desc["num_elements"] = Base.Int(num_elements) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(tensor_list_stack, [input_handle_], name=nothing, element_dtype=nothing, num_elements=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_list_stack(input_handle_; name=nothing, element_dtype=nothing, num_elements=nothing) + if tf.in_eager_mode() + tensor_list_stack_eager(input_handle_; name=name, element_dtype=element_dtype, num_elements=num_elements) + else + tensor_list_stack_graph(input_handle_; name=name, element_dtype=element_dtype, num_elements=num_elements) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function tensor_list_stack_eager(input_handle_; name=nothing, element_dtype=nothing, num_elements=nothing) - desc = tf.EagerOp("TensorListStack") - input_handle_ = convert(tf.EagerTensor, input_handle_) - tf.add_input(desc, input_handle_) - if element_dtype !== nothing - desc["element_dtype"] = Base.identity(element_dtype) - end - if num_elements !== nothing - desc["num_elements"] = Base.Int(num_elements) - end - res = tf.execute(desc) - node = tf.TapeNode(tensor_list_stack, [input_handle_], name=nothing, element_dtype=nothing, num_elements=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_list_stack(input_handle_; name=nothing, element_dtype=nothing, num_elements=nothing) - if tf.in_eager_mode() - tensor_list_stack_eager(input_handle_; name=name, element_dtype=element_dtype, num_elements=num_elements) - else - tensor_list_stack_graph(input_handle_; name=name, element_dtype=element_dtype, num_elements=num_elements) - end - end end @@ -56486,33 +103209,57 @@ end """ begin - function barrier_incomplete_size_graph(handle_; name=nothing) - local desc - tf.with_op_name(name, "BarrierIncompleteSize") do - desc = tf.NodeDescription("BarrierIncompleteSize") - handle_ = convert(Tensor{String}, handle_) - tf.add_input(desc, handle_) + begin + function barrier_incomplete_size_graph(handle_; name=nothing) + local desc + tf.with_op_name(name, "BarrierIncompleteSize") do + desc = tf.NodeDescription("BarrierIncompleteSize") + begin + begin + handle_ = convert(Tensor{String}, handle_) + begin + end + end + end + begin + begin + tf.add_input(desc, handle_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function barrier_incomplete_size_eager(handle_; name=nothing) - desc = tf.EagerOp("BarrierIncompleteSize") - handle_ = convert(tf.EagerTensor, handle_) - tf.add_input(desc, handle_) - res = tf.execute(desc) - node = tf.TapeNode(barrier_incomplete_size, [handle_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function barrier_incomplete_size_eager(handle_; name=nothing) + desc = tf.EagerOp("BarrierIncompleteSize") + handle_ = convert(tf.EagerTensor, handle_) + begin + begin + tf.add_input(desc, handle_) + end + end + begin + end + res = tf.execute(desc) + node = tf.TapeNode(barrier_incomplete_size, [handle_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function barrier_incomplete_size(handle_; name=nothing) - if tf.in_eager_mode() - barrier_incomplete_size_eager(handle_; name=name) - else - barrier_incomplete_size_graph(handle_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function barrier_incomplete_size(handle_; name=nothing) + if tf.in_eager_mode() + barrier_incomplete_size_eager(handle_; name=name) + else + barrier_incomplete_size_graph(handle_; name=name) + end end - end + end end @@ -56522,49 +103269,89 @@ end """ begin - function restore_graph(file_pattern_, tensor_name_; name=nothing, dt=nothing, preferred_shard=nothing) - local desc - tf.with_op_name(name, "Restore") do - desc = tf.NodeDescription("Restore") - file_pattern_ = convert(Tensor{String}, file_pattern_) - tensor_name_ = convert(Tensor{String}, tensor_name_) - tf.add_input(desc, file_pattern_) - tf.add_input(desc, tensor_name_) - if dt !== nothing - desc["dt"] = Base.identity(dt) - end - if preferred_shard !== nothing - desc["preferred_shard"] = Base.Int(preferred_shard) + begin + function restore_graph(file_pattern_, tensor_name_; name=nothing, dt=nothing, preferred_shard=nothing) + local desc + tf.with_op_name(name, "Restore") do + desc = tf.NodeDescription("Restore") + begin + begin + file_pattern_ = convert(Tensor{String}, file_pattern_) + begin + end + end + begin + tensor_name_ = convert(Tensor{String}, tensor_name_) + begin + end + end + end + begin + begin + tf.add_input(desc, file_pattern_) + end + begin + tf.add_input(desc, tensor_name_) + end + end + begin + begin + if dt !== nothing + desc["dt"] = Base.identity(dt) + end + end + begin + if preferred_shard !== nothing + desc["preferred_shard"] = Base.Int(preferred_shard) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function restore_eager(file_pattern_, tensor_name_; name=nothing, dt=nothing, preferred_shard=nothing) + desc = tf.EagerOp("Restore") + file_pattern_ = convert(tf.EagerTensor, file_pattern_) + tensor_name_ = convert(tf.EagerTensor, tensor_name_) + begin + begin + tf.add_input(desc, file_pattern_) + end + begin + tf.add_input(desc, tensor_name_) + end + end + begin + begin + if dt !== nothing + desc["dt"] = Base.identity(dt) + end + end + begin + if preferred_shard !== nothing + desc["preferred_shard"] = Base.Int(preferred_shard) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(restore, [file_pattern_, tensor_name_], name=nothing, dt=nothing, preferred_shard=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function restore(file_pattern_, tensor_name_; name=nothing, dt=nothing, preferred_shard=nothing) + if tf.in_eager_mode() + restore_eager(file_pattern_, tensor_name_; name=name, dt=dt, preferred_shard=preferred_shard) + else + restore_graph(file_pattern_, tensor_name_; name=name, dt=dt, preferred_shard=preferred_shard) + end end - end - tf.Tensor(tf.Operation(desc)) end - function restore_eager(file_pattern_, tensor_name_; name=nothing, dt=nothing, preferred_shard=nothing) - desc = tf.EagerOp("Restore") - file_pattern_ = convert(tf.EagerTensor, file_pattern_) - tensor_name_ = convert(tf.EagerTensor, tensor_name_) - tf.add_input(desc, file_pattern_) - tf.add_input(desc, tensor_name_) - if dt !== nothing - desc["dt"] = Base.identity(dt) - end - if preferred_shard !== nothing - desc["preferred_shard"] = Base.Int(preferred_shard) - end - res = tf.execute(desc) - node = tf.TapeNode(restore, [file_pattern_, tensor_name_], name=nothing, dt=nothing, preferred_shard=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function restore(file_pattern_, tensor_name_; name=nothing, dt=nothing, preferred_shard=nothing) - if tf.in_eager_mode() - restore_eager(file_pattern_, tensor_name_; name=name, dt=dt, preferred_shard=preferred_shard) - else - restore_graph(file_pattern_, tensor_name_; name=name, dt=dt, preferred_shard=preferred_shard) - end - end end @@ -56574,74 +103361,124 @@ end """ begin - function tensor_array_v3_graph(size_; name=nothing, dtype=nothing, element_shape=nothing, dynamic_size=nothing, clear_after_read=nothing, identical_element_shapes=nothing, tensor_array_name=nothing) - local desc - tf.with_op_name(name, "TensorArrayV3") do - desc = tf.NodeDescription("TensorArrayV3") - size_ = convert(Tensor{Int32}, size_) - tf.add_input(desc, size_) - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end - if element_shape !== nothing - desc["element_shape"] = Base.identity(element_shape) - end - if dynamic_size !== nothing - desc["dynamic_size"] = Base.Bool(dynamic_size) - end - if clear_after_read !== nothing - desc["clear_after_read"] = Base.Bool(clear_after_read) - end - if identical_element_shapes !== nothing - desc["identical_element_shapes"] = Base.Bool(identical_element_shapes) + begin + function tensor_array_v3_graph(size_; name=nothing, dtype=nothing, element_shape=nothing, dynamic_size=nothing, clear_after_read=nothing, identical_element_shapes=nothing, tensor_array_name=nothing) + local desc + tf.with_op_name(name, "TensorArrayV3") do + desc = tf.NodeDescription("TensorArrayV3") + begin + begin + size_ = convert(Tensor{Int32}, size_) + begin + end + end + end + begin + begin + tf.add_input(desc, size_) + end + end + begin + begin + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + begin + if element_shape !== nothing + desc["element_shape"] = Base.identity(element_shape) + end + end + begin + if dynamic_size !== nothing + desc["dynamic_size"] = Base.Bool(dynamic_size) + end + end + begin + if clear_after_read !== nothing + desc["clear_after_read"] = Base.Bool(clear_after_read) + end + end + begin + if identical_element_shapes !== nothing + desc["identical_element_shapes"] = Base.Bool(identical_element_shapes) + end + end + begin + if tensor_array_name !== nothing + desc["tensor_array_name"] = Base.String(tensor_array_name) + end + end + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + end + end + begin + function tensor_array_v3_eager(size_; name=nothing, dtype=nothing, element_shape=nothing, dynamic_size=nothing, clear_after_read=nothing, identical_element_shapes=nothing, tensor_array_name=nothing) + desc = tf.EagerOp("TensorArrayV3") + size_ = convert(tf.EagerTensor, size_) + begin + begin + tf.add_input(desc, size_) + end + end + begin + begin + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + begin + if element_shape !== nothing + desc["element_shape"] = Base.identity(element_shape) + end + end + begin + if dynamic_size !== nothing + desc["dynamic_size"] = Base.Bool(dynamic_size) + end + end + begin + if clear_after_read !== nothing + desc["clear_after_read"] = Base.Bool(clear_after_read) + end + end + begin + if identical_element_shapes !== nothing + desc["identical_element_shapes"] = Base.Bool(identical_element_shapes) + end + end + begin + if tensor_array_name !== nothing + desc["tensor_array_name"] = Base.String(tensor_array_name) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(tensor_array_v3, [size_], name=nothing, dtype=nothing, element_shape=nothing, dynamic_size=nothing, clear_after_read=nothing, identical_element_shapes=nothing, tensor_array_name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_array_v3(size_; name=nothing, dtype=nothing, element_shape=nothing, dynamic_size=nothing, clear_after_read=nothing, identical_element_shapes=nothing, tensor_array_name=nothing) + if tf.in_eager_mode() + tensor_array_v3_eager(size_; name=name, dtype=dtype, element_shape=element_shape, dynamic_size=dynamic_size, clear_after_read=clear_after_read, identical_element_shapes=identical_element_shapes, tensor_array_name=tensor_array_name) + else + tensor_array_v3_graph(size_; name=name, dtype=dtype, element_shape=element_shape, dynamic_size=dynamic_size, clear_after_read=clear_after_read, identical_element_shapes=identical_element_shapes, tensor_array_name=tensor_array_name) + end end - if tensor_array_name !== nothing - desc["tensor_array_name"] = Base.String(tensor_array_name) - end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:2 - push!(out, tf.Tensor(op, out_idx)) - end - out end - function tensor_array_v3_eager(size_; name=nothing, dtype=nothing, element_shape=nothing, dynamic_size=nothing, clear_after_read=nothing, identical_element_shapes=nothing, tensor_array_name=nothing) - desc = tf.EagerOp("TensorArrayV3") - size_ = convert(tf.EagerTensor, size_) - tf.add_input(desc, size_) - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end - if element_shape !== nothing - desc["element_shape"] = Base.identity(element_shape) - end - if dynamic_size !== nothing - desc["dynamic_size"] = Base.Bool(dynamic_size) - end - if clear_after_read !== nothing - desc["clear_after_read"] = Base.Bool(clear_after_read) - end - if identical_element_shapes !== nothing - desc["identical_element_shapes"] = Base.Bool(identical_element_shapes) - end - if tensor_array_name !== nothing - desc["tensor_array_name"] = Base.String(tensor_array_name) - end - res = tf.execute(desc) - node = tf.TapeNode(tensor_array_v3, [size_], name=nothing, dtype=nothing, element_shape=nothing, dynamic_size=nothing, clear_after_read=nothing, identical_element_shapes=nothing, tensor_array_name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_array_v3(size_; name=nothing, dtype=nothing, element_shape=nothing, dynamic_size=nothing, clear_after_read=nothing, identical_element_shapes=nothing, tensor_array_name=nothing) - if tf.in_eager_mode() - tensor_array_v3_eager(size_; name=name, dtype=dtype, element_shape=element_shape, dynamic_size=dynamic_size, clear_after_read=clear_after_read, identical_element_shapes=identical_element_shapes, tensor_array_name=tensor_array_name) - else - tensor_array_v3_graph(size_; name=name, dtype=dtype, element_shape=element_shape, dynamic_size=dynamic_size, clear_after_read=clear_after_read, identical_element_shapes=identical_element_shapes, tensor_array_name=tensor_array_name) - end - end end @@ -56651,49 +103488,89 @@ end """ begin - function experimental_assert_next_dataset_graph(input_dataset_, transformations_; name=nothing, output_types=nothing, output_shapes=nothing) - local desc - tf.with_op_name(name, "ExperimentalAssertNextDataset") do - desc = tf.NodeDescription("ExperimentalAssertNextDataset") - input_dataset_ = convert(Tensor{Any}, input_dataset_) - transformations_ = convert(Tensor{String}, transformations_) - tf.add_input(desc, input_dataset_) - tf.add_input(desc, transformations_) - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) + begin + function experimental_assert_next_dataset_graph(input_dataset_, transformations_; name=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "ExperimentalAssertNextDataset") do + desc = tf.NodeDescription("ExperimentalAssertNextDataset") + begin + begin + input_dataset_ = convert(Tensor{Any}, input_dataset_) + begin + end + end + begin + transformations_ = convert(Tensor{String}, transformations_) + begin + end + end + end + begin + begin + tf.add_input(desc, input_dataset_) + end + begin + tf.add_input(desc, transformations_) + end + end + begin + begin + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + end + begin + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function experimental_assert_next_dataset_eager(input_dataset_, transformations_; name=nothing, output_types=nothing, output_shapes=nothing) + desc = tf.EagerOp("ExperimentalAssertNextDataset") + input_dataset_ = convert(tf.EagerTensor, input_dataset_) + transformations_ = convert(tf.EagerTensor, transformations_) + begin + begin + tf.add_input(desc, input_dataset_) + end + begin + tf.add_input(desc, transformations_) + end + end + begin + begin + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + end + begin + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(experimental_assert_next_dataset, [input_dataset_, transformations_], name=nothing, output_types=nothing, output_shapes=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_assert_next_dataset(input_dataset_, transformations_; name=nothing, output_types=nothing, output_shapes=nothing) + if tf.in_eager_mode() + experimental_assert_next_dataset_eager(input_dataset_, transformations_; name=name, output_types=output_types, output_shapes=output_shapes) + else + experimental_assert_next_dataset_graph(input_dataset_, transformations_; name=name, output_types=output_types, output_shapes=output_shapes) + end end - end - tf.Tensor(tf.Operation(desc)) end - function experimental_assert_next_dataset_eager(input_dataset_, transformations_; name=nothing, output_types=nothing, output_shapes=nothing) - desc = tf.EagerOp("ExperimentalAssertNextDataset") - input_dataset_ = convert(tf.EagerTensor, input_dataset_) - transformations_ = convert(tf.EagerTensor, transformations_) - tf.add_input(desc, input_dataset_) - tf.add_input(desc, transformations_) - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - res = tf.execute(desc) - node = tf.TapeNode(experimental_assert_next_dataset, [input_dataset_, transformations_], name=nothing, output_types=nothing, output_shapes=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_assert_next_dataset(input_dataset_, transformations_; name=nothing, output_types=nothing, output_shapes=nothing) - if tf.in_eager_mode() - experimental_assert_next_dataset_eager(input_dataset_, transformations_; name=name, output_types=output_types, output_shapes=output_shapes) - else - experimental_assert_next_dataset_graph(input_dataset_, transformations_; name=name, output_types=output_types, output_shapes=output_shapes) - end - end end @@ -56703,45 +103580,85 @@ end """ begin - function in_top_k_graph(predictions_, targets_; name=nothing, k=nothing) - local desc - tf.with_op_name(name, "InTopK") do - desc = tf.NodeDescription("InTopK") - predictions_ = convert(Tensor{Float32}, predictions_) - targets_ = convert(Tensor{Int32}, targets_) - (targets_,) = tf.tf_promote(targets_) - tf.add_input(desc, predictions_) - tf.add_input(desc, targets_) - if k !== nothing - desc["k"] = Base.Int(k) + begin + function in_top_k_graph(predictions_, targets_; name=nothing, k=nothing) + local desc + tf.with_op_name(name, "InTopK") do + desc = tf.NodeDescription("InTopK") + begin + begin + predictions_ = convert(Tensor{Float32}, predictions_) + begin + end + end + begin + targets_ = convert(Tensor{Int32}, targets_) + begin + end + end + begin + (targets_,) = tf.tf_promote(targets_) + end + end + begin + begin + tf.add_input(desc, predictions_) + end + begin + tf.add_input(desc, targets_) + end + end + begin + begin + if k !== nothing + desc["k"] = Base.Int(k) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function in_top_k_eager(predictions_, targets_; name=nothing, k=nothing) + desc = tf.EagerOp("InTopK") + predictions_ = convert(tf.EagerTensor, predictions_) + targets_ = convert(tf.EagerTensor, targets_) + begin + begin + tf.add_input(desc, predictions_) + end + begin + tf.add_input(desc, targets_) + end + end + begin + begin + if k !== nothing + desc["k"] = Base.Int(k) + end + end + end + begin + desc["T"] = tf.data_type(targets_) + end + res = tf.execute(desc) + node = tf.TapeNode(in_top_k, [predictions_, targets_], name=nothing, k=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function in_top_k(predictions_, targets_; name=nothing, k=nothing) + if tf.in_eager_mode() + in_top_k_eager(predictions_, targets_; name=name, k=k) + else + in_top_k_graph(predictions_, targets_; name=name, k=k) + end end - end - tf.Tensor(tf.Operation(desc)) end - function in_top_k_eager(predictions_, targets_; name=nothing, k=nothing) - desc = tf.EagerOp("InTopK") - predictions_ = convert(tf.EagerTensor, predictions_) - targets_ = convert(tf.EagerTensor, targets_) - tf.add_input(desc, predictions_) - tf.add_input(desc, targets_) - if k !== nothing - desc["k"] = Base.Int(k) - end - desc["T"] = tf.data_type(targets_) - res = tf.execute(desc) - node = tf.TapeNode(in_top_k, [predictions_, targets_], name=nothing, k=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function in_top_k(predictions_, targets_; name=nothing, k=nothing) - if tf.in_eager_mode() - in_top_k_eager(predictions_, targets_; name=name, k=k) - else - in_top_k_graph(predictions_, targets_; name=name, k=k) - end - end end @@ -56751,53 +103668,107 @@ end """ begin - function scatter_sub_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) - local desc - tf.with_op_name(name, "ScatterSub") do - desc = tf.NodeDescription("ScatterSub") - ref_ = convert(Tensor{Any}, ref_) - indices_ = convert(Tensor{Any}, indices_) - indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) - updates_ = convert(Tensor{Any}, updates_) - (ref_, updates_) = tf.tf_promote(ref_, updates_) - (indices_,) = tf.tf_promote(indices_) - tf.add_input(desc, ref_) - tf.add_input(desc, indices_) - tf.add_input(desc, updates_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) + begin + function scatter_sub_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "ScatterSub") do + desc = tf.NodeDescription("ScatterSub") + begin + begin + ref_ = convert(Tensor{Any}, ref_) + begin + end + end + begin + indices_ = convert(Tensor{Any}, indices_) + begin + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + end + end + begin + updates_ = convert(Tensor{Any}, updates_) + begin + end + end + begin + (ref_, updates_) = tf.tf_promote(ref_, updates_) + end + begin + (indices_,) = tf.tf_promote(indices_) + end + end + begin + begin + tf.add_input(desc, ref_) + end + begin + tf.add_input(desc, indices_) + end + begin + tf.add_input(desc, updates_) + end + end + begin + begin + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function scatter_sub_eager(ref_, indices_, updates_; name=nothing, use_locking=nothing) + desc = tf.EagerOp("ScatterSub") + ref_ = convert(tf.EagerTensor, ref_) + indices_ = convert(tf.EagerTensor, indices_) + updates_ = convert(tf.EagerTensor, updates_) + begin + begin + tf.add_input(desc, ref_) + end + begin + tf.add_input(desc, indices_) + end + begin + tf.add_input(desc, updates_) + end + end + begin + begin + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + end + begin + desc["T"] = tf.data_type(ref_) + end + begin + desc["Tindices"] = tf.data_type(indices_) + end + begin + desc["T"] = tf.data_type(updates_) + end + res = tf.execute(desc) + node = tf.TapeNode(scatter_sub, [ref_, indices_, updates_], name=nothing, use_locking=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function scatter_sub(ref_, indices_, updates_; name=nothing, use_locking=nothing) + if tf.in_eager_mode() + scatter_sub_eager(ref_, indices_, updates_; name=name, use_locking=use_locking) + else + scatter_sub_graph(ref_, indices_, updates_; name=name, use_locking=use_locking) + end end - end - tf.Tensor(tf.Operation(desc)) end - function scatter_sub_eager(ref_, indices_, updates_; name=nothing, use_locking=nothing) - desc = tf.EagerOp("ScatterSub") - ref_ = convert(tf.EagerTensor, ref_) - indices_ = convert(tf.EagerTensor, indices_) - updates_ = convert(tf.EagerTensor, updates_) - tf.add_input(desc, ref_) - tf.add_input(desc, indices_) - tf.add_input(desc, updates_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - desc["T"] = tf.data_type(ref_) - desc["Tindices"] = tf.data_type(indices_) - desc["T"] = tf.data_type(updates_) - res = tf.execute(desc) - node = tf.TapeNode(scatter_sub, [ref_, indices_, updates_], name=nothing, use_locking=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function scatter_sub(ref_, indices_, updates_; name=nothing, use_locking=nothing) - if tf.in_eager_mode() - scatter_sub_eager(ref_, indices_, updates_; name=name, use_locking=use_locking) - else - scatter_sub_graph(ref_, indices_, updates_; name=name, use_locking=use_locking) - end - end end @@ -56807,315 +103778,567 @@ end """ begin - function acosh_graph(x_; name=nothing) - local desc - tf.with_op_name(name, "Acosh") do - desc = tf.NodeDescription("Acosh") - x_ = convert(Tensor{Any}, x_) - (x_,) = tf.tf_promote(x_) - tf.add_input(desc, x_) - end - tf.Tensor(tf.Operation(desc)) - end - function acosh_eager(x_; name=nothing) - desc = tf.EagerOp("Acosh") - x_ = convert(tf.EagerTensor, x_) - tf.add_input(desc, x_) - desc["T"] = tf.data_type(x_) - res = tf.execute(desc) - node = tf.TapeNode(acosh, [x_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function acosh(x_; name=nothing) - if tf.in_eager_mode() - acosh_eager(x_; name=name) - else - acosh_graph(x_; name=name) + begin + function acosh_graph(x_; name=nothing) + local desc + tf.with_op_name(name, "Acosh") do + desc = tf.NodeDescription("Acosh") + begin + begin + x_ = convert(Tensor{Any}, x_) + begin + end + end + begin + (x_,) = tf.tf_promote(x_) + end + end + begin + begin + tf.add_input(desc, x_) + end + end + begin + end end + tf.Tensor(tf.Operation(desc)) end -end - - -""" - depthwise_conv2d_native_backprop_filter(input, filter_sizes, out_backprop; data_format=, dilations=[1, 1, 1, 1]) - - -""" -begin - function depthwise_conv2d_native_backprop_filter_graph(input_, filter_sizes_, out_backprop_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) - local desc - tf.with_op_name(name, "DepthwiseConv2dNativeBackpropFilter") do - desc = tf.NodeDescription("DepthwiseConv2dNativeBackpropFilter") - input_ = convert(Tensor{Any}, input_) - filter_sizes_ = convert(Tensor{Int32}, filter_sizes_) - out_backprop_ = convert(Tensor{Any}, out_backprop_) - (input_, out_backprop_) = tf.tf_promote(input_, out_backprop_) - tf.add_input(desc, input_) - tf.add_input(desc, filter_sizes_) - tf.add_input(desc, out_backprop_) - if strides !== nothing - desc["strides"] = map(Base.identity, strides) + end + begin + function acosh_eager(x_; name=nothing) + desc = tf.EagerOp("Acosh") + x_ = convert(tf.EagerTensor, x_) + begin + begin + tf.add_input(desc, x_) + end end - if padding !== nothing - desc["padding"] = Base.String(padding) + begin end - if data_format !== nothing - desc["data_format"] = Base.String(data_format) + begin + desc["T"] = tf.data_type(x_) end - if dilations !== nothing - desc["dilations"] = map(Base.identity, dilations) + res = tf.execute(desc) + node = tf.TapeNode(acosh, [x_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] end end - tf.Tensor(tf.Operation(desc)) end - function depthwise_conv2d_native_backprop_filter_eager(input_, filter_sizes_, out_backprop_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) - desc = tf.EagerOp("DepthwiseConv2dNativeBackpropFilter") - input_ = convert(tf.EagerTensor, input_) - filter_sizes_ = convert(tf.EagerTensor, filter_sizes_) - out_backprop_ = convert(tf.EagerTensor, out_backprop_) - tf.add_input(desc, input_) - tf.add_input(desc, filter_sizes_) - tf.add_input(desc, out_backprop_) - if strides !== nothing - desc["strides"] = map(Base.identity, strides) - end - if padding !== nothing - desc["padding"] = Base.String(padding) - end - if data_format !== nothing - desc["data_format"] = Base.String(data_format) - end - if dilations !== nothing - desc["dilations"] = map(Base.identity, dilations) - end - desc["T"] = tf.data_type(input_) - desc["T"] = tf.data_type(out_backprop_) - res = tf.execute(desc) - node = tf.TapeNode(depthwise_conv2d_native_backprop_filter, [input_, filter_sizes_, out_backprop_], name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function depthwise_conv2d_native_backprop_filter(input_, filter_sizes_, out_backprop_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) - if tf.in_eager_mode() - depthwise_conv2d_native_backprop_filter_eager(input_, filter_sizes_, out_backprop_; name=name, strides=strides, padding=padding, data_format=data_format, dilations=dilations) - else - depthwise_conv2d_native_backprop_filter_graph(input_, filter_sizes_, out_backprop_; name=name, strides=strides, padding=padding, data_format=data_format, dilations=dilations) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function acosh(x_; name=nothing) + if tf.in_eager_mode() + acosh_eager(x_; name=name) + else + acosh_graph(x_; name=name) + end end - end + end end """ - quantize_v2(input, min_range, max_range; mode=, round_mode=) + depthwise_conv2d_native_backprop_filter(input, filter_sizes, out_backprop; data_format=, dilations=[1, 1, 1, 1]) """ begin - function quantize_v2_graph(input_, min_range_, max_range_; name=nothing, mode=nothing, round_mode=nothing) - local desc - tf.with_op_name(name, "QuantizeV2") do - desc = tf.NodeDescription("QuantizeV2") - input_ = convert(Tensor{Float32}, input_) - min_range_ = convert(Tensor{Float32}, min_range_) - max_range_ = convert(Tensor{Float32}, max_range_) - tf.add_input(desc, input_) - tf.add_input(desc, min_range_) - tf.add_input(desc, max_range_) - if mode !== nothing - desc["mode"] = Base.String(mode) - end - if round_mode !== nothing - desc["round_mode"] = Base.String(round_mode) + begin + function depthwise_conv2d_native_backprop_filter_graph(input_, filter_sizes_, out_backprop_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) + local desc + tf.with_op_name(name, "DepthwiseConv2dNativeBackpropFilter") do + desc = tf.NodeDescription("DepthwiseConv2dNativeBackpropFilter") + begin + begin + input_ = convert(Tensor{Any}, input_) + begin + end + end + begin + filter_sizes_ = convert(Tensor{Int32}, filter_sizes_) + begin + end + end + begin + out_backprop_ = convert(Tensor{Any}, out_backprop_) + begin + end + end + begin + (input_, out_backprop_) = tf.tf_promote(input_, out_backprop_) + end + end + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, filter_sizes_) + end + begin + tf.add_input(desc, out_backprop_) + end + end + begin + begin + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + end + begin + if padding !== nothing + desc["padding"] = Base.String(padding) + end + end + begin + if data_format !== nothing + desc["data_format"] = Base.String(data_format) + end + end + begin + if dilations !== nothing + desc["dilations"] = map(Base.identity, dilations) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function depthwise_conv2d_native_backprop_filter_eager(input_, filter_sizes_, out_backprop_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) + desc = tf.EagerOp("DepthwiseConv2dNativeBackpropFilter") + input_ = convert(tf.EagerTensor, input_) + filter_sizes_ = convert(tf.EagerTensor, filter_sizes_) + out_backprop_ = convert(tf.EagerTensor, out_backprop_) + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, filter_sizes_) + end + begin + tf.add_input(desc, out_backprop_) + end + end + begin + begin + if strides !== nothing + desc["strides"] = map(Base.identity, strides) + end + end + begin + if padding !== nothing + desc["padding"] = Base.String(padding) + end + end + begin + if data_format !== nothing + desc["data_format"] = Base.String(data_format) + end + end + begin + if dilations !== nothing + desc["dilations"] = map(Base.identity, dilations) + end + end + end + begin + desc["T"] = tf.data_type(input_) + end + begin + desc["T"] = tf.data_type(out_backprop_) + end + res = tf.execute(desc) + node = tf.TapeNode(depthwise_conv2d_native_backprop_filter, [input_, filter_sizes_, out_backprop_], name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function depthwise_conv2d_native_backprop_filter(input_, filter_sizes_, out_backprop_; name=nothing, strides=nothing, padding=nothing, data_format=nothing, dilations=nothing) + if tf.in_eager_mode() + depthwise_conv2d_native_backprop_filter_eager(input_, filter_sizes_, out_backprop_; name=name, strides=strides, padding=padding, data_format=data_format, dilations=dilations) + else + depthwise_conv2d_native_backprop_filter_graph(input_, filter_sizes_, out_backprop_; name=name, strides=strides, padding=padding, data_format=data_format, dilations=dilations) + end end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:3 - push!(out, tf.Tensor(op, out_idx)) - end - out - end - function quantize_v2_eager(input_, min_range_, max_range_; name=nothing, mode=nothing, round_mode=nothing) - desc = tf.EagerOp("QuantizeV2") - input_ = convert(tf.EagerTensor, input_) - min_range_ = convert(tf.EagerTensor, min_range_) - max_range_ = convert(tf.EagerTensor, max_range_) - tf.add_input(desc, input_) - tf.add_input(desc, min_range_) - tf.add_input(desc, max_range_) - if mode !== nothing - desc["mode"] = Base.String(mode) - end - if round_mode !== nothing - desc["round_mode"] = Base.String(round_mode) - end - res = tf.execute(desc) - node = tf.TapeNode(quantize_v2, [input_, min_range_, max_range_], name=nothing, mode=nothing, round_mode=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function quantize_v2(input_, min_range_, max_range_; name=nothing, mode=nothing, round_mode=nothing) - if tf.in_eager_mode() - quantize_v2_eager(input_, min_range_, max_range_; name=name, mode=mode, round_mode=round_mode) - else - quantize_v2_graph(input_, min_range_, max_range_; name=name, mode=mode, round_mode=round_mode) - end - end end """ - cast(x; Truncate=false) + quantize_v2(input, min_range, max_range; mode=, round_mode=) """ begin - function cast_graph(x_; name=nothing, SrcT=nothing, DstT=nothing, Truncate=nothing) - local desc - tf.with_op_name(name, "Cast") do - desc = tf.NodeDescription("Cast") - x_ = convert(Tensor{Any}, x_) - (x_,) = tf.tf_promote(x_) - tf.add_input(desc, x_) - if SrcT !== nothing - desc["SrcT"] = Base.identity(SrcT) - end - if DstT !== nothing - desc["DstT"] = Base.identity(DstT) + begin + function quantize_v2_graph(input_, min_range_, max_range_; name=nothing, mode=nothing, round_mode=nothing) + local desc + tf.with_op_name(name, "QuantizeV2") do + desc = tf.NodeDescription("QuantizeV2") + begin + begin + input_ = convert(Tensor{Float32}, input_) + begin + end + end + begin + min_range_ = convert(Tensor{Float32}, min_range_) + begin + end + end + begin + max_range_ = convert(Tensor{Float32}, max_range_) + begin + end + end + end + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, min_range_) + end + begin + tf.add_input(desc, max_range_) + end + end + begin + begin + if mode !== nothing + desc["mode"] = Base.String(mode) + end + end + begin + if round_mode !== nothing + desc["round_mode"] = Base.String(round_mode) + end + end + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + end + end + begin + function quantize_v2_eager(input_, min_range_, max_range_; name=nothing, mode=nothing, round_mode=nothing) + desc = tf.EagerOp("QuantizeV2") + input_ = convert(tf.EagerTensor, input_) + min_range_ = convert(tf.EagerTensor, min_range_) + max_range_ = convert(tf.EagerTensor, max_range_) + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, min_range_) + end + begin + tf.add_input(desc, max_range_) + end + end + begin + begin + if mode !== nothing + desc["mode"] = Base.String(mode) + end + end + begin + if round_mode !== nothing + desc["round_mode"] = Base.String(round_mode) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(quantize_v2, [input_, min_range_, max_range_], name=nothing, mode=nothing, round_mode=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function quantize_v2(input_, min_range_, max_range_; name=nothing, mode=nothing, round_mode=nothing) + if tf.in_eager_mode() + quantize_v2_eager(input_, min_range_, max_range_; name=name, mode=mode, round_mode=round_mode) + else + quantize_v2_graph(input_, min_range_, max_range_; name=name, mode=mode, round_mode=round_mode) + end end - if Truncate !== nothing - desc["Truncate"] = Base.Bool(Truncate) - end - end - tf.Tensor(tf.Operation(desc)) end - function cast_eager(x_; name=nothing, SrcT=nothing, DstT=nothing, Truncate=nothing) - desc = tf.EagerOp("Cast") - x_ = convert(tf.EagerTensor, x_) - tf.add_input(desc, x_) - if SrcT !== nothing - desc["SrcT"] = Base.identity(SrcT) - end - if DstT !== nothing - desc["DstT"] = Base.identity(DstT) - end - if Truncate !== nothing - desc["Truncate"] = Base.Bool(Truncate) - end - desc["SrcT"] = tf.data_type(x_) - res = tf.execute(desc) - node = tf.TapeNode(cast, [x_], name=nothing, SrcT=nothing, DstT=nothing, Truncate=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function cast(x_; name=nothing, SrcT=nothing, DstT=nothing, Truncate=nothing) - if tf.in_eager_mode() - cast_eager(x_; name=name, SrcT=SrcT, DstT=DstT, Truncate=Truncate) - else - cast_graph(x_; name=name, SrcT=SrcT, DstT=DstT, Truncate=Truncate) - end - end end -""" - generator_dataset(init_func_other_args, next_func_other_args, finalize_func_other_args) - - -""" -begin - function generator_dataset_graph(init_func_other_args_, next_func_other_args_, finalize_func_other_args_; name=nothing, init_func=nothing, next_func=nothing, finalize_func=nothing, Tinit_func_args=nothing, Tnext_func_args=nothing, Tfinalize_func_args=nothing, output_types=nothing, output_shapes=nothing) - local desc - tf.with_op_name(name, "GeneratorDataset") do - desc = tf.NodeDescription("GeneratorDataset") - init_func_other_args_ = [convert(Tensor{Any}, x) for x = init_func_other_args_] - next_func_other_args_ = [convert(Tensor{Any}, x) for x = next_func_other_args_] - finalize_func_other_args_ = [convert(Tensor{Any}, x) for x = finalize_func_other_args_] - tf.add_input(desc, init_func_other_args_) - tf.add_input(desc, next_func_other_args_) - tf.add_input(desc, finalize_func_other_args_) - if init_func !== nothing - desc["init_func"] = Base.identity(init_func) - end - if next_func !== nothing - desc["next_func"] = Base.identity(next_func) - end - if finalize_func !== nothing - desc["finalize_func"] = Base.identity(finalize_func) - end - if Tinit_func_args !== nothing - desc["Tinit_func_args"] = map(Base.identity, Tinit_func_args) - end - if Tnext_func_args !== nothing - desc["Tnext_func_args"] = map(Base.identity, Tnext_func_args) - end - if Tfinalize_func_args !== nothing - desc["Tfinalize_func_args"] = map(Base.identity, Tfinalize_func_args) - end - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - end - tf.Tensor(tf.Operation(desc)) - end - function generator_dataset_eager(init_func_other_args_, next_func_other_args_, finalize_func_other_args_; name=nothing, init_func=nothing, next_func=nothing, finalize_func=nothing, Tinit_func_args=nothing, Tnext_func_args=nothing, Tfinalize_func_args=nothing, output_types=nothing, output_shapes=nothing) - desc = tf.EagerOp("GeneratorDataset") - init_func_other_args_ = convert(tf.EagerTensor, init_func_other_args_) - next_func_other_args_ = convert(tf.EagerTensor, next_func_other_args_) - finalize_func_other_args_ = convert(tf.EagerTensor, finalize_func_other_args_) - tf.add_input(desc, init_func_other_args_) - tf.add_input(desc, next_func_other_args_) - tf.add_input(desc, finalize_func_other_args_) - if init_func !== nothing - desc["init_func"] = Base.identity(init_func) - end - if next_func !== nothing - desc["next_func"] = Base.identity(next_func) - end - if finalize_func !== nothing - desc["finalize_func"] = Base.identity(finalize_func) - end - if Tinit_func_args !== nothing - desc["Tinit_func_args"] = map(Base.identity, Tinit_func_args) - end - if Tnext_func_args !== nothing - desc["Tnext_func_args"] = map(Base.identity, Tnext_func_args) - end - if Tfinalize_func_args !== nothing - desc["Tfinalize_func_args"] = map(Base.identity, Tfinalize_func_args) - end - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - res = tf.execute(desc) - node = tf.TapeNode(generator_dataset, [init_func_other_args_, next_func_other_args_, finalize_func_other_args_], name=nothing, init_func=nothing, next_func=nothing, finalize_func=nothing, Tinit_func_args=nothing, Tnext_func_args=nothing, Tfinalize_func_args=nothing, output_types=nothing, output_shapes=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end +""" + cast(x; Truncate=false) + + +""" +begin + begin + function cast_graph(x_; name=nothing, SrcT=nothing, DstT=nothing, Truncate=nothing) + local desc + tf.with_op_name(name, "Cast") do + desc = tf.NodeDescription("Cast") + begin + begin + x_ = convert(Tensor{Any}, x_) + begin + end + end + begin + (x_,) = tf.tf_promote(x_) + end + end + begin + begin + tf.add_input(desc, x_) + end + end + begin + begin + if SrcT !== nothing + desc["SrcT"] = Base.identity(SrcT) + end + end + begin + if DstT !== nothing + desc["DstT"] = Base.identity(DstT) + end + end + begin + if Truncate !== nothing + desc["Truncate"] = Base.Bool(Truncate) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function cast_eager(x_; name=nothing, SrcT=nothing, DstT=nothing, Truncate=nothing) + desc = tf.EagerOp("Cast") + x_ = convert(tf.EagerTensor, x_) + begin + begin + tf.add_input(desc, x_) + end + end + begin + begin + if SrcT !== nothing + desc["SrcT"] = Base.identity(SrcT) + end + end + begin + if DstT !== nothing + desc["DstT"] = Base.identity(DstT) + end + end + begin + if Truncate !== nothing + desc["Truncate"] = Base.Bool(Truncate) + end + end + end + begin + desc["SrcT"] = tf.data_type(x_) + end + res = tf.execute(desc) + node = tf.TapeNode(cast, [x_], name=nothing, SrcT=nothing, DstT=nothing, Truncate=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function cast(x_; name=nothing, SrcT=nothing, DstT=nothing, Truncate=nothing) + if tf.in_eager_mode() + cast_eager(x_; name=name, SrcT=SrcT, DstT=DstT, Truncate=Truncate) + else + cast_graph(x_; name=name, SrcT=SrcT, DstT=DstT, Truncate=Truncate) + end + end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function generator_dataset(init_func_other_args_, next_func_other_args_, finalize_func_other_args_; name=nothing, init_func=nothing, next_func=nothing, finalize_func=nothing, Tinit_func_args=nothing, Tnext_func_args=nothing, Tfinalize_func_args=nothing, output_types=nothing, output_shapes=nothing) - if tf.in_eager_mode() - generator_dataset_eager(init_func_other_args_, next_func_other_args_, finalize_func_other_args_; name=name, init_func=init_func, next_func=next_func, finalize_func=finalize_func, Tinit_func_args=Tinit_func_args, Tnext_func_args=Tnext_func_args, Tfinalize_func_args=Tfinalize_func_args, output_types=output_types, output_shapes=output_shapes) - else - generator_dataset_graph(init_func_other_args_, next_func_other_args_, finalize_func_other_args_; name=name, init_func=init_func, next_func=next_func, finalize_func=finalize_func, Tinit_func_args=Tinit_func_args, Tnext_func_args=Tnext_func_args, Tfinalize_func_args=Tfinalize_func_args, output_types=output_types, output_shapes=output_shapes) +end + + +""" + generator_dataset(init_func_other_args, next_func_other_args, finalize_func_other_args) + + +""" +begin + begin + function generator_dataset_graph(init_func_other_args_, next_func_other_args_, finalize_func_other_args_; name=nothing, init_func=nothing, next_func=nothing, finalize_func=nothing, Tinit_func_args=nothing, Tnext_func_args=nothing, Tfinalize_func_args=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "GeneratorDataset") do + desc = tf.NodeDescription("GeneratorDataset") + begin + begin + init_func_other_args_ = [convert(Tensor{Any}, x) for x = init_func_other_args_] + begin + end + end + begin + next_func_other_args_ = [convert(Tensor{Any}, x) for x = next_func_other_args_] + begin + end + end + begin + finalize_func_other_args_ = [convert(Tensor{Any}, x) for x = finalize_func_other_args_] + begin + end + end + end + begin + begin + tf.add_input(desc, init_func_other_args_) + end + begin + tf.add_input(desc, next_func_other_args_) + end + begin + tf.add_input(desc, finalize_func_other_args_) + end + end + begin + begin + if init_func !== nothing + desc["init_func"] = Base.identity(init_func) + end + end + begin + if next_func !== nothing + desc["next_func"] = Base.identity(next_func) + end + end + begin + if finalize_func !== nothing + desc["finalize_func"] = Base.identity(finalize_func) + end + end + begin + if Tinit_func_args !== nothing + desc["Tinit_func_args"] = map(Base.identity, Tinit_func_args) + end + end + begin + if Tnext_func_args !== nothing + desc["Tnext_func_args"] = map(Base.identity, Tnext_func_args) + end + end + begin + if Tfinalize_func_args !== nothing + desc["Tfinalize_func_args"] = map(Base.identity, Tfinalize_func_args) + end + end + begin + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + end + begin + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function generator_dataset_eager(init_func_other_args_, next_func_other_args_, finalize_func_other_args_; name=nothing, init_func=nothing, next_func=nothing, finalize_func=nothing, Tinit_func_args=nothing, Tnext_func_args=nothing, Tfinalize_func_args=nothing, output_types=nothing, output_shapes=nothing) + desc = tf.EagerOp("GeneratorDataset") + init_func_other_args_ = convert(tf.EagerTensor, init_func_other_args_) + next_func_other_args_ = convert(tf.EagerTensor, next_func_other_args_) + finalize_func_other_args_ = convert(tf.EagerTensor, finalize_func_other_args_) + begin + begin + tf.add_input(desc, init_func_other_args_) + end + begin + tf.add_input(desc, next_func_other_args_) + end + begin + tf.add_input(desc, finalize_func_other_args_) + end + end + begin + begin + if init_func !== nothing + desc["init_func"] = Base.identity(init_func) + end + end + begin + if next_func !== nothing + desc["next_func"] = Base.identity(next_func) + end + end + begin + if finalize_func !== nothing + desc["finalize_func"] = Base.identity(finalize_func) + end + end + begin + if Tinit_func_args !== nothing + desc["Tinit_func_args"] = map(Base.identity, Tinit_func_args) + end + end + begin + if Tnext_func_args !== nothing + desc["Tnext_func_args"] = map(Base.identity, Tnext_func_args) + end + end + begin + if Tfinalize_func_args !== nothing + desc["Tfinalize_func_args"] = map(Base.identity, Tfinalize_func_args) + end + end + begin + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + end + begin + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(generator_dataset, [init_func_other_args_, next_func_other_args_, finalize_func_other_args_], name=nothing, init_func=nothing, next_func=nothing, finalize_func=nothing, Tinit_func_args=nothing, Tnext_func_args=nothing, Tfinalize_func_args=nothing, output_types=nothing, output_shapes=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function generator_dataset(init_func_other_args_, next_func_other_args_, finalize_func_other_args_; name=nothing, init_func=nothing, next_func=nothing, finalize_func=nothing, Tinit_func_args=nothing, Tnext_func_args=nothing, Tfinalize_func_args=nothing, output_types=nothing, output_shapes=nothing) + if tf.in_eager_mode() + generator_dataset_eager(init_func_other_args_, next_func_other_args_, finalize_func_other_args_; name=name, init_func=init_func, next_func=next_func, finalize_func=finalize_func, Tinit_func_args=Tinit_func_args, Tnext_func_args=Tnext_func_args, Tfinalize_func_args=Tfinalize_func_args, output_types=output_types, output_shapes=output_shapes) + else + generator_dataset_graph(init_func_other_args_, next_func_other_args_, finalize_func_other_args_; name=name, init_func=init_func, next_func=next_func, finalize_func=finalize_func, Tinit_func_args=Tinit_func_args, Tnext_func_args=Tnext_func_args, Tfinalize_func_args=Tfinalize_func_args, output_types=output_types, output_shapes=output_shapes) + end end - end + end end @@ -57125,33 +104348,57 @@ end """ begin - function tensor_forest_tree_serialize_graph(tree_handle_; name=nothing) - local desc - tf.with_op_name(name, "TensorForestTreeSerialize") do - desc = tf.NodeDescription("TensorForestTreeSerialize") - tree_handle_ = convert(Tensor{Any}, tree_handle_) - tf.add_input(desc, tree_handle_) + begin + function tensor_forest_tree_serialize_graph(tree_handle_; name=nothing) + local desc + tf.with_op_name(name, "TensorForestTreeSerialize") do + desc = tf.NodeDescription("TensorForestTreeSerialize") + begin + begin + tree_handle_ = convert(Tensor{Any}, tree_handle_) + begin + end + end + end + begin + begin + tf.add_input(desc, tree_handle_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function tensor_forest_tree_serialize_eager(tree_handle_; name=nothing) - desc = tf.EagerOp("TensorForestTreeSerialize") - tree_handle_ = convert(tf.EagerTensor, tree_handle_) - tf.add_input(desc, tree_handle_) - res = tf.execute(desc) - node = tf.TapeNode(tensor_forest_tree_serialize, [tree_handle_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function tensor_forest_tree_serialize_eager(tree_handle_; name=nothing) + desc = tf.EagerOp("TensorForestTreeSerialize") + tree_handle_ = convert(tf.EagerTensor, tree_handle_) + begin + begin + tf.add_input(desc, tree_handle_) + end + end + begin + end + res = tf.execute(desc) + node = tf.TapeNode(tensor_forest_tree_serialize, [tree_handle_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_forest_tree_serialize(tree_handle_; name=nothing) - if tf.in_eager_mode() - tensor_forest_tree_serialize_eager(tree_handle_; name=name) - else - tensor_forest_tree_serialize_graph(tree_handle_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_forest_tree_serialize(tree_handle_; name=nothing) + if tf.in_eager_mode() + tensor_forest_tree_serialize_eager(tree_handle_; name=name) + else + tensor_forest_tree_serialize_graph(tree_handle_; name=name) + end end - end + end end @@ -57161,33 +104408,57 @@ end """ begin - function tensor_array_close_v2_graph(handle_; name=nothing) - local desc - tf.with_op_name(name, "TensorArrayCloseV2") do - desc = tf.NodeDescription("TensorArrayCloseV2") - handle_ = convert(Tensor{String}, handle_) - tf.add_input(desc, handle_) + begin + function tensor_array_close_v2_graph(handle_; name=nothing) + local desc + tf.with_op_name(name, "TensorArrayCloseV2") do + desc = tf.NodeDescription("TensorArrayCloseV2") + begin + begin + handle_ = convert(Tensor{String}, handle_) + begin + end + end + end + begin + begin + tf.add_input(desc, handle_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function tensor_array_close_v2_eager(handle_; name=nothing) - desc = tf.EagerOp("TensorArrayCloseV2") - handle_ = convert(tf.EagerTensor, handle_) - tf.add_input(desc, handle_) - res = tf.execute(desc) - node = tf.TapeNode(tensor_array_close_v2, [handle_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function tensor_array_close_v2_eager(handle_; name=nothing) + desc = tf.EagerOp("TensorArrayCloseV2") + handle_ = convert(tf.EagerTensor, handle_) + begin + begin + tf.add_input(desc, handle_) + end + end + begin + end + res = tf.execute(desc) + node = tf.TapeNode(tensor_array_close_v2, [handle_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_array_close_v2(handle_; name=nothing) - if tf.in_eager_mode() - tensor_array_close_v2_eager(handle_; name=name) - else - tensor_array_close_v2_graph(handle_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_array_close_v2(handle_; name=nothing) + if tf.in_eager_mode() + tensor_array_close_v2_eager(handle_; name=name) + else + tensor_array_close_v2_graph(handle_; name=name) + end end - end + end end @@ -57197,77 +104468,125 @@ end A Reader that outputs rows from a BigQuery table as tensorflow Examples. """ begin - function big_query_reader_graph(; name=nothing, container=nothing, shared_name=nothing, project_id=nothing, dataset_id=nothing, table_id=nothing, columns=nothing, timestamp_millis=nothing, test_end_point=nothing) - local desc - tf.with_op_name(name, "BigQueryReader") do - desc = tf.NodeDescription("BigQueryReader") - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - if project_id !== nothing - desc["project_id"] = Base.String(project_id) + begin + function big_query_reader_graph(; name=nothing, container=nothing, shared_name=nothing, project_id=nothing, dataset_id=nothing, table_id=nothing, columns=nothing, timestamp_millis=nothing, test_end_point=nothing) + local desc + tf.with_op_name(name, "BigQueryReader") do + desc = tf.NodeDescription("BigQueryReader") + begin + end + begin + end + begin + begin + if container !== nothing + desc["container"] = Base.String(container) + end + end + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + begin + if project_id !== nothing + desc["project_id"] = Base.String(project_id) + end + end + begin + if dataset_id !== nothing + desc["dataset_id"] = Base.String(dataset_id) + end + end + begin + if table_id !== nothing + desc["table_id"] = Base.String(table_id) + end + end + begin + if columns !== nothing + desc["columns"] = map(Base.identity, columns) + end + end + begin + if timestamp_millis !== nothing + desc["timestamp_millis"] = Base.Int(timestamp_millis) + end + end + begin + if test_end_point !== nothing + desc["test_end_point"] = Base.String(test_end_point) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function big_query_reader_eager(; name=nothing, container=nothing, shared_name=nothing, project_id=nothing, dataset_id=nothing, table_id=nothing, columns=nothing, timestamp_millis=nothing, test_end_point=nothing) + desc = tf.EagerOp("BigQueryReader") + begin + end + begin + begin + if container !== nothing + desc["container"] = Base.String(container) + end + end + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + begin + if project_id !== nothing + desc["project_id"] = Base.String(project_id) + end + end + begin + if dataset_id !== nothing + desc["dataset_id"] = Base.String(dataset_id) + end + end + begin + if table_id !== nothing + desc["table_id"] = Base.String(table_id) + end + end + begin + if columns !== nothing + desc["columns"] = map(Base.identity, columns) + end + end + begin + if timestamp_millis !== nothing + desc["timestamp_millis"] = Base.Int(timestamp_millis) + end + end + begin + if test_end_point !== nothing + desc["test_end_point"] = Base.String(test_end_point) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(big_query_reader, [], name=nothing, container=nothing, shared_name=nothing, project_id=nothing, dataset_id=nothing, table_id=nothing, columns=nothing, timestamp_millis=nothing, test_end_point=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function big_query_reader(; name=nothing, container=nothing, shared_name=nothing, project_id=nothing, dataset_id=nothing, table_id=nothing, columns=nothing, timestamp_millis=nothing, test_end_point=nothing) + if tf.in_eager_mode() + big_query_reader_eager(; name=name, container=container, shared_name=shared_name, project_id=project_id, dataset_id=dataset_id, table_id=table_id, columns=columns, timestamp_millis=timestamp_millis, test_end_point=test_end_point) + else + big_query_reader_graph(; name=name, container=container, shared_name=shared_name, project_id=project_id, dataset_id=dataset_id, table_id=table_id, columns=columns, timestamp_millis=timestamp_millis, test_end_point=test_end_point) + end end - if dataset_id !== nothing - desc["dataset_id"] = Base.String(dataset_id) - end - if table_id !== nothing - desc["table_id"] = Base.String(table_id) - end - if columns !== nothing - desc["columns"] = map(Base.identity, columns) - end - if timestamp_millis !== nothing - desc["timestamp_millis"] = Base.Int(timestamp_millis) - end - if test_end_point !== nothing - desc["test_end_point"] = Base.String(test_end_point) - end - end - tf.Tensor(tf.Operation(desc)) - end - function big_query_reader_eager(; name=nothing, container=nothing, shared_name=nothing, project_id=nothing, dataset_id=nothing, table_id=nothing, columns=nothing, timestamp_millis=nothing, test_end_point=nothing) - desc = tf.EagerOp("BigQueryReader") - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - if project_id !== nothing - desc["project_id"] = Base.String(project_id) - end - if dataset_id !== nothing - desc["dataset_id"] = Base.String(dataset_id) - end - if table_id !== nothing - desc["table_id"] = Base.String(table_id) - end - if columns !== nothing - desc["columns"] = map(Base.identity, columns) - end - if timestamp_millis !== nothing - desc["timestamp_millis"] = Base.Int(timestamp_millis) - end - if test_end_point !== nothing - desc["test_end_point"] = Base.String(test_end_point) - end - res = tf.execute(desc) - node = tf.TapeNode(big_query_reader, [], name=nothing, container=nothing, shared_name=nothing, project_id=nothing, dataset_id=nothing, table_id=nothing, columns=nothing, timestamp_millis=nothing, test_end_point=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function big_query_reader(; name=nothing, container=nothing, shared_name=nothing, project_id=nothing, dataset_id=nothing, table_id=nothing, columns=nothing, timestamp_millis=nothing, test_end_point=nothing) - if tf.in_eager_mode() - big_query_reader_eager(; name=name, container=container, shared_name=shared_name, project_id=project_id, dataset_id=dataset_id, table_id=table_id, columns=columns, timestamp_millis=timestamp_millis, test_end_point=test_end_point) - else - big_query_reader_graph(; name=name, container=container, shared_name=shared_name, project_id=project_id, dataset_id=dataset_id, table_id=table_id, columns=columns, timestamp_millis=timestamp_millis, test_end_point=test_end_point) - end - end end @@ -57277,42 +104596,76 @@ end """ begin - function reader_read_v2_graph(reader_handle_, queue_handle_; name=nothing) - local desc - tf.with_op_name(name, "ReaderReadV2") do - desc = tf.NodeDescription("ReaderReadV2") - reader_handle_ = convert(Tensor{Any}, reader_handle_) - queue_handle_ = convert(Tensor{Any}, queue_handle_) - tf.add_input(desc, reader_handle_) - tf.add_input(desc, queue_handle_) - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:2 - push!(out, tf.Tensor(op, out_idx)) + begin + function reader_read_v2_graph(reader_handle_, queue_handle_; name=nothing) + local desc + tf.with_op_name(name, "ReaderReadV2") do + desc = tf.NodeDescription("ReaderReadV2") + begin + begin + reader_handle_ = convert(Tensor{Any}, reader_handle_) + begin + end + end + begin + queue_handle_ = convert(Tensor{Any}, queue_handle_) + begin + end + end + end + begin + begin + tf.add_input(desc, reader_handle_) + end + begin + tf.add_input(desc, queue_handle_) + end + end + begin + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end end - out end - function reader_read_v2_eager(reader_handle_, queue_handle_; name=nothing) - desc = tf.EagerOp("ReaderReadV2") - reader_handle_ = convert(tf.EagerTensor, reader_handle_) - queue_handle_ = convert(tf.EagerTensor, queue_handle_) - tf.add_input(desc, reader_handle_) - tf.add_input(desc, queue_handle_) - res = tf.execute(desc) - node = tf.TapeNode(reader_read_v2, [reader_handle_, queue_handle_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res + begin + function reader_read_v2_eager(reader_handle_, queue_handle_; name=nothing) + desc = tf.EagerOp("ReaderReadV2") + reader_handle_ = convert(tf.EagerTensor, reader_handle_) + queue_handle_ = convert(tf.EagerTensor, queue_handle_) + begin + begin + tf.add_input(desc, reader_handle_) + end + begin + tf.add_input(desc, queue_handle_) + end + end + begin + end + res = tf.execute(desc) + node = tf.TapeNode(reader_read_v2, [reader_handle_, queue_handle_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function reader_read_v2(reader_handle_, queue_handle_; name=nothing) - if tf.in_eager_mode() - reader_read_v2_eager(reader_handle_, queue_handle_; name=name) - else - reader_read_v2_graph(reader_handle_, queue_handle_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function reader_read_v2(reader_handle_, queue_handle_; name=nothing) + if tf.in_eager_mode() + reader_read_v2_eager(reader_handle_, queue_handle_; name=name) + else + reader_read_v2_graph(reader_handle_, queue_handle_; name=name) + end end - end + end end @@ -57322,40 +104675,78 @@ end """ begin - function mod_graph(x_, y_; name=nothing) - local desc - tf.with_op_name(name, "Mod") do - desc = tf.NodeDescription("Mod") - x_ = convert(Tensor{Any}, x_) - y_ = convert(Tensor{Any}, y_) - (x_, y_) = tf.tf_promote(x_, y_) - tf.add_input(desc, x_) - tf.add_input(desc, y_) + begin + function mod_graph(x_, y_; name=nothing) + local desc + tf.with_op_name(name, "Mod") do + desc = tf.NodeDescription("Mod") + begin + begin + x_ = convert(Tensor{Any}, x_) + begin + end + end + begin + y_ = convert(Tensor{Any}, y_) + begin + end + end + begin + (x_, y_) = tf.tf_promote(x_, y_) + end + end + begin + begin + tf.add_input(desc, x_) + end + begin + tf.add_input(desc, y_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function mod_eager(x_, y_; name=nothing) - desc = tf.EagerOp("Mod") - x_ = convert(tf.EagerTensor, x_) - y_ = convert(tf.EagerTensor, y_) - tf.add_input(desc, x_) - tf.add_input(desc, y_) - desc["T"] = tf.data_type(x_) - desc["T"] = tf.data_type(y_) - res = tf.execute(desc) - node = tf.TapeNode(mod, [x_, y_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function mod_eager(x_, y_; name=nothing) + desc = tf.EagerOp("Mod") + x_ = convert(tf.EagerTensor, x_) + y_ = convert(tf.EagerTensor, y_) + begin + begin + tf.add_input(desc, x_) + end + begin + tf.add_input(desc, y_) + end + end + begin + end + begin + desc["T"] = tf.data_type(x_) + end + begin + desc["T"] = tf.data_type(y_) + end + res = tf.execute(desc) + node = tf.TapeNode(mod, [x_, y_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function mod(x_, y_; name=nothing) - if tf.in_eager_mode() - mod_eager(x_, y_; name=name) - else - mod_graph(x_, y_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function mod(x_, y_; name=nothing) + if tf.in_eager_mode() + mod_eager(x_, y_; name=name) + else + mod_graph(x_, y_; name=name) + end end - end + end end @@ -57365,40 +104756,78 @@ end """ begin - function add_v2_graph(x_, y_; name=nothing) - local desc - tf.with_op_name(name, "AddV2") do - desc = tf.NodeDescription("AddV2") - x_ = convert(Tensor{Any}, x_) - y_ = convert(Tensor{Any}, y_) - (x_, y_) = tf.tf_promote(x_, y_) - tf.add_input(desc, x_) - tf.add_input(desc, y_) + begin + function add_v2_graph(x_, y_; name=nothing) + local desc + tf.with_op_name(name, "AddV2") do + desc = tf.NodeDescription("AddV2") + begin + begin + x_ = convert(Tensor{Any}, x_) + begin + end + end + begin + y_ = convert(Tensor{Any}, y_) + begin + end + end + begin + (x_, y_) = tf.tf_promote(x_, y_) + end + end + begin + begin + tf.add_input(desc, x_) + end + begin + tf.add_input(desc, y_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function add_v2_eager(x_, y_; name=nothing) - desc = tf.EagerOp("AddV2") - x_ = convert(tf.EagerTensor, x_) - y_ = convert(tf.EagerTensor, y_) - tf.add_input(desc, x_) - tf.add_input(desc, y_) - desc["T"] = tf.data_type(x_) - desc["T"] = tf.data_type(y_) - res = tf.execute(desc) - node = tf.TapeNode(add_v2, [x_, y_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function add_v2_eager(x_, y_; name=nothing) + desc = tf.EagerOp("AddV2") + x_ = convert(tf.EagerTensor, x_) + y_ = convert(tf.EagerTensor, y_) + begin + begin + tf.add_input(desc, x_) + end + begin + tf.add_input(desc, y_) + end + end + begin + end + begin + desc["T"] = tf.data_type(x_) + end + begin + desc["T"] = tf.data_type(y_) + end + res = tf.execute(desc) + node = tf.TapeNode(add_v2, [x_, y_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function add_v2(x_, y_; name=nothing) - if tf.in_eager_mode() - add_v2_eager(x_, y_; name=name) - else - add_v2_graph(x_, y_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function add_v2(x_, y_; name=nothing) + if tf.in_eager_mode() + add_v2_eager(x_, y_; name=name) + else + add_v2_graph(x_, y_; name=name) + end end - end + end end @@ -57408,47 +104837,91 @@ end """ begin - function stateless_random_normal_graph(shape_, seed_; name=nothing, dtype=nothing) - local desc - tf.with_op_name(name, "StatelessRandomNormal") do - desc = tf.NodeDescription("StatelessRandomNormal") - shape_ = convert(Tensor{Int32}, shape_) - seed_ = convert(Tensor{Int64}, seed_) - (shape_,) = tf.tf_promote(shape_) - (seed_,) = tf.tf_promote(seed_) - tf.add_input(desc, shape_) - tf.add_input(desc, seed_) - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) + begin + function stateless_random_normal_graph(shape_, seed_; name=nothing, dtype=nothing) + local desc + tf.with_op_name(name, "StatelessRandomNormal") do + desc = tf.NodeDescription("StatelessRandomNormal") + begin + begin + shape_ = convert(Tensor{Int32}, shape_) + begin + end + end + begin + seed_ = convert(Tensor{Int64}, seed_) + begin + end + end + begin + (shape_,) = tf.tf_promote(shape_) + end + begin + (seed_,) = tf.tf_promote(seed_) + end + end + begin + begin + tf.add_input(desc, shape_) + end + begin + tf.add_input(desc, seed_) + end + end + begin + begin + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function stateless_random_normal_eager(shape_, seed_; name=nothing, dtype=nothing) + desc = tf.EagerOp("StatelessRandomNormal") + shape_ = convert(tf.EagerTensor, shape_) + seed_ = convert(tf.EagerTensor, seed_) + begin + begin + tf.add_input(desc, shape_) + end + begin + tf.add_input(desc, seed_) + end + end + begin + begin + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + end + begin + desc["T"] = tf.data_type(shape_) + end + begin + desc["Tseed"] = tf.data_type(seed_) + end + res = tf.execute(desc) + node = tf.TapeNode(stateless_random_normal, [shape_, seed_], name=nothing, dtype=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function stateless_random_normal(shape_, seed_; name=nothing, dtype=nothing) + if tf.in_eager_mode() + stateless_random_normal_eager(shape_, seed_; name=name, dtype=dtype) + else + stateless_random_normal_graph(shape_, seed_; name=name, dtype=dtype) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function stateless_random_normal_eager(shape_, seed_; name=nothing, dtype=nothing) - desc = tf.EagerOp("StatelessRandomNormal") - shape_ = convert(tf.EagerTensor, shape_) - seed_ = convert(tf.EagerTensor, seed_) - tf.add_input(desc, shape_) - tf.add_input(desc, seed_) - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end - desc["T"] = tf.data_type(shape_) - desc["Tseed"] = tf.data_type(seed_) - res = tf.execute(desc) - node = tf.TapeNode(stateless_random_normal, [shape_, seed_], name=nothing, dtype=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function stateless_random_normal(shape_, seed_; name=nothing, dtype=nothing) - if tf.in_eager_mode() - stateless_random_normal_eager(shape_, seed_; name=name, dtype=dtype) - else - stateless_random_normal_graph(shape_, seed_; name=name, dtype=dtype) - end - end end @@ -57458,125 +104931,239 @@ end """ begin - function strided_slice_assign_graph(ref_, begin_, end_, strides_, value_; name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing) - local desc - tf.with_op_name(name, "StridedSliceAssign") do - desc = tf.NodeDescription("StridedSliceAssign") - ref_ = convert(Tensor{Any}, ref_) - begin_ = convert(Tensor{Any}, begin_) - begin_ = begin_ - convert(tf.Tensor{eltype(begin_)}, 1) - end_ = convert(Tensor{Any}, end_) - end_ = end_ - convert(tf.Tensor{eltype(end_)}, 1) - strides_ = convert(Tensor{Any}, strides_) - strides_ = strides_ - convert(tf.Tensor{eltype(strides_)}, 1) - value_ = convert(Tensor{Any}, value_) - (ref_, value_) = tf.tf_promote(ref_, value_) - (begin_, end_, strides_) = tf.tf_promote(begin_, end_, strides_) - tf.add_input(desc, ref_) - tf.add_input(desc, begin_) - tf.add_input(desc, end_) - tf.add_input(desc, strides_) - tf.add_input(desc, value_) - if Index !== nothing - desc["Index"] = Base.identity(Index) + begin + function strided_slice_assign_graph(ref_, begin_, end_, strides_, value_; name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing) + local desc + tf.with_op_name(name, "StridedSliceAssign") do + desc = tf.NodeDescription("StridedSliceAssign") + begin + begin + ref_ = convert(Tensor{Any}, ref_) + begin + end + end + begin + begin_ = convert(Tensor{Any}, begin_) + begin + begin_ = begin_ - convert(tf.Tensor{eltype(begin_)}, 1) + end + end + begin + end_ = convert(Tensor{Any}, end_) + begin + end_ = end_ - convert(tf.Tensor{eltype(end_)}, 1) + end + end + begin + strides_ = convert(Tensor{Any}, strides_) + begin + strides_ = strides_ - convert(tf.Tensor{eltype(strides_)}, 1) + end + end + begin + value_ = convert(Tensor{Any}, value_) + begin + end + end + begin + (ref_, value_) = tf.tf_promote(ref_, value_) + end + begin + (begin_, end_, strides_) = tf.tf_promote(begin_, end_, strides_) + end + end + begin + begin + tf.add_input(desc, ref_) + end + begin + tf.add_input(desc, begin_) + end + begin + tf.add_input(desc, end_) + end + begin + tf.add_input(desc, strides_) + end + begin + tf.add_input(desc, value_) + end + end + begin + begin + if Index !== nothing + desc["Index"] = Base.identity(Index) + end + end + begin + if begin_mask !== nothing + begin_mask = Base.Int(begin_mask) - 1 + end + end + begin + if begin_mask !== nothing + desc["begin_mask"] = Base.Int(begin_mask) + end + end + begin + if end_mask !== nothing + end_mask = Base.Int(end_mask) - 1 + end + end + begin + if end_mask !== nothing + desc["end_mask"] = Base.Int(end_mask) + end + end + begin + if ellipsis_mask !== nothing + ellipsis_mask = Base.Int(ellipsis_mask) - 1 + end + end + begin + if ellipsis_mask !== nothing + desc["ellipsis_mask"] = Base.Int(ellipsis_mask) + end + end + begin + if new_axis_mask !== nothing + new_axis_mask = Base.Int(new_axis_mask) - 1 + end + end + begin + if new_axis_mask !== nothing + desc["new_axis_mask"] = Base.Int(new_axis_mask) + end + end + begin + if shrink_axis_mask !== nothing + shrink_axis_mask = Base.Int(shrink_axis_mask) - 1 + end + end + begin + if shrink_axis_mask !== nothing + desc["shrink_axis_mask"] = Base.Int(shrink_axis_mask) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function strided_slice_assign_eager(ref_, begin_, end_, strides_, value_; name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing) + desc = tf.EagerOp("StridedSliceAssign") + ref_ = convert(tf.EagerTensor, ref_) + begin_ = convert(tf.EagerTensor, begin_) + end_ = convert(tf.EagerTensor, end_) + strides_ = convert(tf.EagerTensor, strides_) + value_ = convert(tf.EagerTensor, value_) + begin + begin + tf.add_input(desc, ref_) + end + begin + tf.add_input(desc, begin_) + end + begin + tf.add_input(desc, end_) + end + begin + tf.add_input(desc, strides_) + end + begin + tf.add_input(desc, value_) + end + end + begin + begin + if Index !== nothing + desc["Index"] = Base.identity(Index) + end + end + begin + if begin_mask !== nothing + begin_mask = Base.Int(begin_mask) - 1 + end + end + begin + if begin_mask !== nothing + desc["begin_mask"] = Base.Int(begin_mask) + end + end + begin + if end_mask !== nothing + end_mask = Base.Int(end_mask) - 1 + end + end + begin + if end_mask !== nothing + desc["end_mask"] = Base.Int(end_mask) + end + end + begin + if ellipsis_mask !== nothing + ellipsis_mask = Base.Int(ellipsis_mask) - 1 + end + end + begin + if ellipsis_mask !== nothing + desc["ellipsis_mask"] = Base.Int(ellipsis_mask) + end + end + begin + if new_axis_mask !== nothing + new_axis_mask = Base.Int(new_axis_mask) - 1 + end + end + begin + if new_axis_mask !== nothing + desc["new_axis_mask"] = Base.Int(new_axis_mask) + end + end + begin + if shrink_axis_mask !== nothing + shrink_axis_mask = Base.Int(shrink_axis_mask) - 1 + end + end + begin + if shrink_axis_mask !== nothing + desc["shrink_axis_mask"] = Base.Int(shrink_axis_mask) + end + end + end + begin + desc["T"] = tf.data_type(ref_) + end + begin + desc["Index"] = tf.data_type(begin_) + end + begin + desc["Index"] = tf.data_type(end_) + end + begin + desc["Index"] = tf.data_type(strides_) + end + begin + desc["T"] = tf.data_type(value_) + end + res = tf.execute(desc) + node = tf.TapeNode(strided_slice_assign, [ref_, begin_, end_, strides_, value_], name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function strided_slice_assign(ref_, begin_, end_, strides_, value_; name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing) + if tf.in_eager_mode() + strided_slice_assign_eager(ref_, begin_, end_, strides_, value_; name=name, Index=Index, begin_mask=begin_mask, end_mask=end_mask, ellipsis_mask=ellipsis_mask, new_axis_mask=new_axis_mask, shrink_axis_mask=shrink_axis_mask) + else + strided_slice_assign_graph(ref_, begin_, end_, strides_, value_; name=name, Index=Index, begin_mask=begin_mask, end_mask=end_mask, ellipsis_mask=ellipsis_mask, new_axis_mask=new_axis_mask, shrink_axis_mask=shrink_axis_mask) + end end - if begin_mask !== nothing - begin_mask = Base.Int(begin_mask) - 1 - end - if begin_mask !== nothing - desc["begin_mask"] = Base.Int(begin_mask) - end - if end_mask !== nothing - end_mask = Base.Int(end_mask) - 1 - end - if end_mask !== nothing - desc["end_mask"] = Base.Int(end_mask) - end - if ellipsis_mask !== nothing - ellipsis_mask = Base.Int(ellipsis_mask) - 1 - end - if ellipsis_mask !== nothing - desc["ellipsis_mask"] = Base.Int(ellipsis_mask) - end - if new_axis_mask !== nothing - new_axis_mask = Base.Int(new_axis_mask) - 1 - end - if new_axis_mask !== nothing - desc["new_axis_mask"] = Base.Int(new_axis_mask) - end - if shrink_axis_mask !== nothing - shrink_axis_mask = Base.Int(shrink_axis_mask) - 1 - end - if shrink_axis_mask !== nothing - desc["shrink_axis_mask"] = Base.Int(shrink_axis_mask) - end - end - tf.Tensor(tf.Operation(desc)) - end - function strided_slice_assign_eager(ref_, begin_, end_, strides_, value_; name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing) - desc = tf.EagerOp("StridedSliceAssign") - ref_ = convert(tf.EagerTensor, ref_) - begin_ = convert(tf.EagerTensor, begin_) - end_ = convert(tf.EagerTensor, end_) - strides_ = convert(tf.EagerTensor, strides_) - value_ = convert(tf.EagerTensor, value_) - tf.add_input(desc, ref_) - tf.add_input(desc, begin_) - tf.add_input(desc, end_) - tf.add_input(desc, strides_) - tf.add_input(desc, value_) - if Index !== nothing - desc["Index"] = Base.identity(Index) - end - if begin_mask !== nothing - begin_mask = Base.Int(begin_mask) - 1 - end - if begin_mask !== nothing - desc["begin_mask"] = Base.Int(begin_mask) - end - if end_mask !== nothing - end_mask = Base.Int(end_mask) - 1 - end - if end_mask !== nothing - desc["end_mask"] = Base.Int(end_mask) - end - if ellipsis_mask !== nothing - ellipsis_mask = Base.Int(ellipsis_mask) - 1 - end - if ellipsis_mask !== nothing - desc["ellipsis_mask"] = Base.Int(ellipsis_mask) - end - if new_axis_mask !== nothing - new_axis_mask = Base.Int(new_axis_mask) - 1 - end - if new_axis_mask !== nothing - desc["new_axis_mask"] = Base.Int(new_axis_mask) - end - if shrink_axis_mask !== nothing - shrink_axis_mask = Base.Int(shrink_axis_mask) - 1 - end - if shrink_axis_mask !== nothing - desc["shrink_axis_mask"] = Base.Int(shrink_axis_mask) - end - desc["T"] = tf.data_type(ref_) - desc["Index"] = tf.data_type(begin_) - desc["Index"] = tf.data_type(end_) - desc["Index"] = tf.data_type(strides_) - desc["T"] = tf.data_type(value_) - res = tf.execute(desc) - node = tf.TapeNode(strided_slice_assign, [ref_, begin_, end_, strides_, value_], name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function strided_slice_assign(ref_, begin_, end_, strides_, value_; name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing) - if tf.in_eager_mode() - strided_slice_assign_eager(ref_, begin_, end_, strides_, value_; name=name, Index=Index, begin_mask=begin_mask, end_mask=end_mask, ellipsis_mask=ellipsis_mask, new_axis_mask=new_axis_mask, shrink_axis_mask=shrink_axis_mask) - else - strided_slice_assign_graph(ref_, begin_, end_, strides_, value_; name=name, Index=Index, begin_mask=begin_mask, end_mask=end_mask, ellipsis_mask=ellipsis_mask, new_axis_mask=new_axis_mask, shrink_axis_mask=shrink_axis_mask) - end - end end @@ -57586,53 +105173,107 @@ end """ begin - function scatter_min_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) - local desc - tf.with_op_name(name, "ScatterMin") do - desc = tf.NodeDescription("ScatterMin") - ref_ = convert(Tensor{Any}, ref_) - indices_ = convert(Tensor{Any}, indices_) - indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) - updates_ = convert(Tensor{Any}, updates_) - (ref_, updates_) = tf.tf_promote(ref_, updates_) - (indices_,) = tf.tf_promote(indices_) - tf.add_input(desc, ref_) - tf.add_input(desc, indices_) - tf.add_input(desc, updates_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) + begin + function scatter_min_graph(ref_, indices_, updates_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "ScatterMin") do + desc = tf.NodeDescription("ScatterMin") + begin + begin + ref_ = convert(Tensor{Any}, ref_) + begin + end + end + begin + indices_ = convert(Tensor{Any}, indices_) + begin + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + end + end + begin + updates_ = convert(Tensor{Any}, updates_) + begin + end + end + begin + (ref_, updates_) = tf.tf_promote(ref_, updates_) + end + begin + (indices_,) = tf.tf_promote(indices_) + end + end + begin + begin + tf.add_input(desc, ref_) + end + begin + tf.add_input(desc, indices_) + end + begin + tf.add_input(desc, updates_) + end + end + begin + begin + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function scatter_min_eager(ref_, indices_, updates_; name=nothing, use_locking=nothing) + desc = tf.EagerOp("ScatterMin") + ref_ = convert(tf.EagerTensor, ref_) + indices_ = convert(tf.EagerTensor, indices_) + updates_ = convert(tf.EagerTensor, updates_) + begin + begin + tf.add_input(desc, ref_) + end + begin + tf.add_input(desc, indices_) + end + begin + tf.add_input(desc, updates_) + end + end + begin + begin + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + end + begin + desc["T"] = tf.data_type(ref_) + end + begin + desc["Tindices"] = tf.data_type(indices_) + end + begin + desc["T"] = tf.data_type(updates_) + end + res = tf.execute(desc) + node = tf.TapeNode(scatter_min, [ref_, indices_, updates_], name=nothing, use_locking=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function scatter_min(ref_, indices_, updates_; name=nothing, use_locking=nothing) + if tf.in_eager_mode() + scatter_min_eager(ref_, indices_, updates_; name=name, use_locking=use_locking) + else + scatter_min_graph(ref_, indices_, updates_; name=name, use_locking=use_locking) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function scatter_min_eager(ref_, indices_, updates_; name=nothing, use_locking=nothing) - desc = tf.EagerOp("ScatterMin") - ref_ = convert(tf.EagerTensor, ref_) - indices_ = convert(tf.EagerTensor, indices_) - updates_ = convert(tf.EagerTensor, updates_) - tf.add_input(desc, ref_) - tf.add_input(desc, indices_) - tf.add_input(desc, updates_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - desc["T"] = tf.data_type(ref_) - desc["Tindices"] = tf.data_type(indices_) - desc["T"] = tf.data_type(updates_) - res = tf.execute(desc) - node = tf.TapeNode(scatter_min, [ref_, indices_, updates_], name=nothing, use_locking=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function scatter_min(ref_, indices_, updates_; name=nothing, use_locking=nothing) - if tf.in_eager_mode() - scatter_min_eager(ref_, indices_, updates_; name=name, use_locking=use_locking) - else - scatter_min_graph(ref_, indices_, updates_; name=name, use_locking=use_locking) - end - end end @@ -57642,124 +105283,236 @@ end """ begin - function resource_strided_slice_assign_graph(ref_, begin_, end_, strides_, value_; name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing) - local desc - tf.with_op_name(name, "ResourceStridedSliceAssign") do - desc = tf.NodeDescription("ResourceStridedSliceAssign") - ref_ = convert(Tensor{Any}, ref_) - begin_ = convert(Tensor{Any}, begin_) - begin_ = begin_ - convert(tf.Tensor{eltype(begin_)}, 1) - end_ = convert(Tensor{Any}, end_) - end_ = end_ - convert(tf.Tensor{eltype(end_)}, 1) - strides_ = convert(Tensor{Any}, strides_) - strides_ = strides_ - convert(tf.Tensor{eltype(strides_)}, 1) - value_ = convert(Tensor{Any}, value_) - (value_,) = tf.tf_promote(value_) - (begin_, end_, strides_) = tf.tf_promote(begin_, end_, strides_) - tf.add_input(desc, ref_) - tf.add_input(desc, begin_) - tf.add_input(desc, end_) - tf.add_input(desc, strides_) - tf.add_input(desc, value_) - if Index !== nothing - desc["Index"] = Base.identity(Index) + begin + function resource_strided_slice_assign_graph(ref_, begin_, end_, strides_, value_; name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing) + local desc + tf.with_op_name(name, "ResourceStridedSliceAssign") do + desc = tf.NodeDescription("ResourceStridedSliceAssign") + begin + begin + ref_ = convert(Tensor{Any}, ref_) + begin + end + end + begin + begin_ = convert(Tensor{Any}, begin_) + begin + begin_ = begin_ - convert(tf.Tensor{eltype(begin_)}, 1) + end + end + begin + end_ = convert(Tensor{Any}, end_) + begin + end_ = end_ - convert(tf.Tensor{eltype(end_)}, 1) + end + end + begin + strides_ = convert(Tensor{Any}, strides_) + begin + strides_ = strides_ - convert(tf.Tensor{eltype(strides_)}, 1) + end + end + begin + value_ = convert(Tensor{Any}, value_) + begin + end + end + begin + (value_,) = tf.tf_promote(value_) + end + begin + (begin_, end_, strides_) = tf.tf_promote(begin_, end_, strides_) + end + end + begin + begin + tf.add_input(desc, ref_) + end + begin + tf.add_input(desc, begin_) + end + begin + tf.add_input(desc, end_) + end + begin + tf.add_input(desc, strides_) + end + begin + tf.add_input(desc, value_) + end + end + begin + begin + if Index !== nothing + desc["Index"] = Base.identity(Index) + end + end + begin + if begin_mask !== nothing + begin_mask = Base.Int(begin_mask) - 1 + end + end + begin + if begin_mask !== nothing + desc["begin_mask"] = Base.Int(begin_mask) + end + end + begin + if end_mask !== nothing + end_mask = Base.Int(end_mask) - 1 + end + end + begin + if end_mask !== nothing + desc["end_mask"] = Base.Int(end_mask) + end + end + begin + if ellipsis_mask !== nothing + ellipsis_mask = Base.Int(ellipsis_mask) - 1 + end + end + begin + if ellipsis_mask !== nothing + desc["ellipsis_mask"] = Base.Int(ellipsis_mask) + end + end + begin + if new_axis_mask !== nothing + new_axis_mask = Base.Int(new_axis_mask) - 1 + end + end + begin + if new_axis_mask !== nothing + desc["new_axis_mask"] = Base.Int(new_axis_mask) + end + end + begin + if shrink_axis_mask !== nothing + shrink_axis_mask = Base.Int(shrink_axis_mask) - 1 + end + end + begin + if shrink_axis_mask !== nothing + desc["shrink_axis_mask"] = Base.Int(shrink_axis_mask) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function resource_strided_slice_assign_eager(ref_, begin_, end_, strides_, value_; name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing) + desc = tf.EagerOp("ResourceStridedSliceAssign") + ref_ = convert(tf.EagerTensor, ref_) + begin_ = convert(tf.EagerTensor, begin_) + end_ = convert(tf.EagerTensor, end_) + strides_ = convert(tf.EagerTensor, strides_) + value_ = convert(tf.EagerTensor, value_) + begin + begin + tf.add_input(desc, ref_) + end + begin + tf.add_input(desc, begin_) + end + begin + tf.add_input(desc, end_) + end + begin + tf.add_input(desc, strides_) + end + begin + tf.add_input(desc, value_) + end + end + begin + begin + if Index !== nothing + desc["Index"] = Base.identity(Index) + end + end + begin + if begin_mask !== nothing + begin_mask = Base.Int(begin_mask) - 1 + end + end + begin + if begin_mask !== nothing + desc["begin_mask"] = Base.Int(begin_mask) + end + end + begin + if end_mask !== nothing + end_mask = Base.Int(end_mask) - 1 + end + end + begin + if end_mask !== nothing + desc["end_mask"] = Base.Int(end_mask) + end + end + begin + if ellipsis_mask !== nothing + ellipsis_mask = Base.Int(ellipsis_mask) - 1 + end + end + begin + if ellipsis_mask !== nothing + desc["ellipsis_mask"] = Base.Int(ellipsis_mask) + end + end + begin + if new_axis_mask !== nothing + new_axis_mask = Base.Int(new_axis_mask) - 1 + end + end + begin + if new_axis_mask !== nothing + desc["new_axis_mask"] = Base.Int(new_axis_mask) + end + end + begin + if shrink_axis_mask !== nothing + shrink_axis_mask = Base.Int(shrink_axis_mask) - 1 + end + end + begin + if shrink_axis_mask !== nothing + desc["shrink_axis_mask"] = Base.Int(shrink_axis_mask) + end + end + end + begin + desc["Index"] = tf.data_type(begin_) + end + begin + desc["Index"] = tf.data_type(end_) + end + begin + desc["Index"] = tf.data_type(strides_) + end + begin + desc["T"] = tf.data_type(value_) + end + res = tf.execute(desc) + node = tf.TapeNode(resource_strided_slice_assign, [ref_, begin_, end_, strides_, value_], name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_strided_slice_assign(ref_, begin_, end_, strides_, value_; name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing) + if tf.in_eager_mode() + resource_strided_slice_assign_eager(ref_, begin_, end_, strides_, value_; name=name, Index=Index, begin_mask=begin_mask, end_mask=end_mask, ellipsis_mask=ellipsis_mask, new_axis_mask=new_axis_mask, shrink_axis_mask=shrink_axis_mask) + else + resource_strided_slice_assign_graph(ref_, begin_, end_, strides_, value_; name=name, Index=Index, begin_mask=begin_mask, end_mask=end_mask, ellipsis_mask=ellipsis_mask, new_axis_mask=new_axis_mask, shrink_axis_mask=shrink_axis_mask) + end end - if begin_mask !== nothing - begin_mask = Base.Int(begin_mask) - 1 - end - if begin_mask !== nothing - desc["begin_mask"] = Base.Int(begin_mask) - end - if end_mask !== nothing - end_mask = Base.Int(end_mask) - 1 - end - if end_mask !== nothing - desc["end_mask"] = Base.Int(end_mask) - end - if ellipsis_mask !== nothing - ellipsis_mask = Base.Int(ellipsis_mask) - 1 - end - if ellipsis_mask !== nothing - desc["ellipsis_mask"] = Base.Int(ellipsis_mask) - end - if new_axis_mask !== nothing - new_axis_mask = Base.Int(new_axis_mask) - 1 - end - if new_axis_mask !== nothing - desc["new_axis_mask"] = Base.Int(new_axis_mask) - end - if shrink_axis_mask !== nothing - shrink_axis_mask = Base.Int(shrink_axis_mask) - 1 - end - if shrink_axis_mask !== nothing - desc["shrink_axis_mask"] = Base.Int(shrink_axis_mask) - end - end - tf.Tensor(tf.Operation(desc)) - end - function resource_strided_slice_assign_eager(ref_, begin_, end_, strides_, value_; name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing) - desc = tf.EagerOp("ResourceStridedSliceAssign") - ref_ = convert(tf.EagerTensor, ref_) - begin_ = convert(tf.EagerTensor, begin_) - end_ = convert(tf.EagerTensor, end_) - strides_ = convert(tf.EagerTensor, strides_) - value_ = convert(tf.EagerTensor, value_) - tf.add_input(desc, ref_) - tf.add_input(desc, begin_) - tf.add_input(desc, end_) - tf.add_input(desc, strides_) - tf.add_input(desc, value_) - if Index !== nothing - desc["Index"] = Base.identity(Index) - end - if begin_mask !== nothing - begin_mask = Base.Int(begin_mask) - 1 - end - if begin_mask !== nothing - desc["begin_mask"] = Base.Int(begin_mask) - end - if end_mask !== nothing - end_mask = Base.Int(end_mask) - 1 - end - if end_mask !== nothing - desc["end_mask"] = Base.Int(end_mask) - end - if ellipsis_mask !== nothing - ellipsis_mask = Base.Int(ellipsis_mask) - 1 - end - if ellipsis_mask !== nothing - desc["ellipsis_mask"] = Base.Int(ellipsis_mask) - end - if new_axis_mask !== nothing - new_axis_mask = Base.Int(new_axis_mask) - 1 - end - if new_axis_mask !== nothing - desc["new_axis_mask"] = Base.Int(new_axis_mask) - end - if shrink_axis_mask !== nothing - shrink_axis_mask = Base.Int(shrink_axis_mask) - 1 - end - if shrink_axis_mask !== nothing - desc["shrink_axis_mask"] = Base.Int(shrink_axis_mask) - end - desc["Index"] = tf.data_type(begin_) - desc["Index"] = tf.data_type(end_) - desc["Index"] = tf.data_type(strides_) - desc["T"] = tf.data_type(value_) - res = tf.execute(desc) - node = tf.TapeNode(resource_strided_slice_assign, [ref_, begin_, end_, strides_, value_], name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_strided_slice_assign(ref_, begin_, end_, strides_, value_; name=nothing, Index=nothing, begin_mask=nothing, end_mask=nothing, ellipsis_mask=nothing, new_axis_mask=nothing, shrink_axis_mask=nothing) - if tf.in_eager_mode() - resource_strided_slice_assign_eager(ref_, begin_, end_, strides_, value_; name=name, Index=Index, begin_mask=begin_mask, end_mask=end_mask, ellipsis_mask=ellipsis_mask, new_axis_mask=new_axis_mask, shrink_axis_mask=shrink_axis_mask) - else - resource_strided_slice_assign_graph(ref_, begin_, end_, strides_, value_; name=name, Index=Index, begin_mask=begin_mask, end_mask=end_mask, ellipsis_mask=ellipsis_mask, new_axis_mask=new_axis_mask, shrink_axis_mask=shrink_axis_mask) - end - end end @@ -57769,40 +105522,78 @@ end """ begin - function random_gamma_grad_graph(alpha_, sample_; name=nothing) - local desc - tf.with_op_name(name, "RandomGammaGrad") do - desc = tf.NodeDescription("RandomGammaGrad") - alpha_ = convert(Tensor{Any}, alpha_) - sample_ = convert(Tensor{Any}, sample_) - (alpha_, sample_) = tf.tf_promote(alpha_, sample_) - tf.add_input(desc, alpha_) - tf.add_input(desc, sample_) + begin + function random_gamma_grad_graph(alpha_, sample_; name=nothing) + local desc + tf.with_op_name(name, "RandomGammaGrad") do + desc = tf.NodeDescription("RandomGammaGrad") + begin + begin + alpha_ = convert(Tensor{Any}, alpha_) + begin + end + end + begin + sample_ = convert(Tensor{Any}, sample_) + begin + end + end + begin + (alpha_, sample_) = tf.tf_promote(alpha_, sample_) + end + end + begin + begin + tf.add_input(desc, alpha_) + end + begin + tf.add_input(desc, sample_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function random_gamma_grad_eager(alpha_, sample_; name=nothing) - desc = tf.EagerOp("RandomGammaGrad") - alpha_ = convert(tf.EagerTensor, alpha_) - sample_ = convert(tf.EagerTensor, sample_) - tf.add_input(desc, alpha_) - tf.add_input(desc, sample_) - desc["T"] = tf.data_type(alpha_) - desc["T"] = tf.data_type(sample_) - res = tf.execute(desc) - node = tf.TapeNode(random_gamma_grad, [alpha_, sample_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function random_gamma_grad_eager(alpha_, sample_; name=nothing) + desc = tf.EagerOp("RandomGammaGrad") + alpha_ = convert(tf.EagerTensor, alpha_) + sample_ = convert(tf.EagerTensor, sample_) + begin + begin + tf.add_input(desc, alpha_) + end + begin + tf.add_input(desc, sample_) + end + end + begin + end + begin + desc["T"] = tf.data_type(alpha_) + end + begin + desc["T"] = tf.data_type(sample_) + end + res = tf.execute(desc) + node = tf.TapeNode(random_gamma_grad, [alpha_, sample_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function random_gamma_grad(alpha_, sample_; name=nothing) - if tf.in_eager_mode() - random_gamma_grad_eager(alpha_, sample_; name=name) - else - random_gamma_grad_graph(alpha_, sample_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function random_gamma_grad(alpha_, sample_; name=nothing) + if tf.in_eager_mode() + random_gamma_grad_eager(alpha_, sample_; name=name) + else + random_gamma_grad_graph(alpha_, sample_; name=name) + end end - end + end end @@ -57812,72 +105603,156 @@ end """ begin - function resource_sparse_apply_keras_momentum_graph(var_, accum_, lr_, grad_, indices_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) - local desc - tf.with_op_name(name, "ResourceSparseApplyKerasMomentum") do - desc = tf.NodeDescription("ResourceSparseApplyKerasMomentum") - var_ = convert(Tensor{Any}, var_) - accum_ = convert(Tensor{Any}, accum_) - lr_ = convert(Tensor{Any}, lr_) - grad_ = convert(Tensor{Any}, grad_) - indices_ = convert(Tensor{Any}, indices_) - indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) - momentum_ = convert(Tensor{Any}, momentum_) - (lr_, grad_, momentum_) = tf.tf_promote(lr_, grad_, momentum_) - (indices_,) = tf.tf_promote(indices_) - tf.add_input(desc, var_) - tf.add_input(desc, accum_) - tf.add_input(desc, lr_) - tf.add_input(desc, grad_) - tf.add_input(desc, indices_) - tf.add_input(desc, momentum_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) + begin + function resource_sparse_apply_keras_momentum_graph(var_, accum_, lr_, grad_, indices_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) + local desc + tf.with_op_name(name, "ResourceSparseApplyKerasMomentum") do + desc = tf.NodeDescription("ResourceSparseApplyKerasMomentum") + begin + begin + var_ = convert(Tensor{Any}, var_) + begin + end + end + begin + accum_ = convert(Tensor{Any}, accum_) + begin + end + end + begin + lr_ = convert(Tensor{Any}, lr_) + begin + end + end + begin + grad_ = convert(Tensor{Any}, grad_) + begin + end + end + begin + indices_ = convert(Tensor{Any}, indices_) + begin + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + end + end + begin + momentum_ = convert(Tensor{Any}, momentum_) + begin + end + end + begin + (lr_, grad_, momentum_) = tf.tf_promote(lr_, grad_, momentum_) + end + begin + (indices_,) = tf.tf_promote(indices_) + end + end + begin + begin + tf.add_input(desc, var_) + end + begin + tf.add_input(desc, accum_) + end + begin + tf.add_input(desc, lr_) + end + begin + tf.add_input(desc, grad_) + end + begin + tf.add_input(desc, indices_) + end + begin + tf.add_input(desc, momentum_) + end + end + begin + begin + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + begin + if use_nesterov !== nothing + desc["use_nesterov"] = Base.Bool(use_nesterov) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function resource_sparse_apply_keras_momentum_eager(var_, accum_, lr_, grad_, indices_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) + desc = tf.EagerOp("ResourceSparseApplyKerasMomentum") + var_ = convert(tf.EagerTensor, var_) + accum_ = convert(tf.EagerTensor, accum_) + lr_ = convert(tf.EagerTensor, lr_) + grad_ = convert(tf.EagerTensor, grad_) + indices_ = convert(tf.EagerTensor, indices_) + momentum_ = convert(tf.EagerTensor, momentum_) + begin + begin + tf.add_input(desc, var_) + end + begin + tf.add_input(desc, accum_) + end + begin + tf.add_input(desc, lr_) + end + begin + tf.add_input(desc, grad_) + end + begin + tf.add_input(desc, indices_) + end + begin + tf.add_input(desc, momentum_) + end + end + begin + begin + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + begin + if use_nesterov !== nothing + desc["use_nesterov"] = Base.Bool(use_nesterov) + end + end + end + begin + desc["T"] = tf.data_type(lr_) + end + begin + desc["T"] = tf.data_type(grad_) + end + begin + desc["Tindices"] = tf.data_type(indices_) + end + begin + desc["T"] = tf.data_type(momentum_) + end + res = tf.execute(desc) + node = tf.TapeNode(resource_sparse_apply_keras_momentum, [var_, accum_, lr_, grad_, indices_, momentum_], name=nothing, use_locking=nothing, use_nesterov=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_sparse_apply_keras_momentum(var_, accum_, lr_, grad_, indices_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) + if tf.in_eager_mode() + resource_sparse_apply_keras_momentum_eager(var_, accum_, lr_, grad_, indices_, momentum_; name=name, use_locking=use_locking, use_nesterov=use_nesterov) + else + resource_sparse_apply_keras_momentum_graph(var_, accum_, lr_, grad_, indices_, momentum_; name=name, use_locking=use_locking, use_nesterov=use_nesterov) + end end - if use_nesterov !== nothing - desc["use_nesterov"] = Base.Bool(use_nesterov) - end - end - tf.Tensor(tf.Operation(desc)) - end - function resource_sparse_apply_keras_momentum_eager(var_, accum_, lr_, grad_, indices_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) - desc = tf.EagerOp("ResourceSparseApplyKerasMomentum") - var_ = convert(tf.EagerTensor, var_) - accum_ = convert(tf.EagerTensor, accum_) - lr_ = convert(tf.EagerTensor, lr_) - grad_ = convert(tf.EagerTensor, grad_) - indices_ = convert(tf.EagerTensor, indices_) - momentum_ = convert(tf.EagerTensor, momentum_) - tf.add_input(desc, var_) - tf.add_input(desc, accum_) - tf.add_input(desc, lr_) - tf.add_input(desc, grad_) - tf.add_input(desc, indices_) - tf.add_input(desc, momentum_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - if use_nesterov !== nothing - desc["use_nesterov"] = Base.Bool(use_nesterov) - end - desc["T"] = tf.data_type(lr_) - desc["T"] = tf.data_type(grad_) - desc["Tindices"] = tf.data_type(indices_) - desc["T"] = tf.data_type(momentum_) - res = tf.execute(desc) - node = tf.TapeNode(resource_sparse_apply_keras_momentum, [var_, accum_, lr_, grad_, indices_, momentum_], name=nothing, use_locking=nothing, use_nesterov=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_sparse_apply_keras_momentum(var_, accum_, lr_, grad_, indices_, momentum_; name=nothing, use_locking=nothing, use_nesterov=nothing) - if tf.in_eager_mode() - resource_sparse_apply_keras_momentum_eager(var_, accum_, lr_, grad_, indices_, momentum_; name=name, use_locking=use_locking, use_nesterov=use_nesterov) - else - resource_sparse_apply_keras_momentum_graph(var_, accum_, lr_, grad_, indices_, momentum_; name=name, use_locking=use_locking, use_nesterov=use_nesterov) - end - end end @@ -57887,47 +105762,91 @@ end """ begin - function boosted_trees_create_quantile_stream_resource_graph(quantile_stream_resource_handle_, epsilon_, num_streams_; name=nothing, max_elements=nothing) - local desc - tf.with_op_name(name, "BoostedTreesCreateQuantileStreamResource") do - desc = tf.NodeDescription("BoostedTreesCreateQuantileStreamResource") - quantile_stream_resource_handle_ = convert(Tensor{Any}, quantile_stream_resource_handle_) - epsilon_ = convert(Tensor{Float32}, epsilon_) - num_streams_ = convert(Tensor{Int64}, num_streams_) - tf.add_input(desc, quantile_stream_resource_handle_) - tf.add_input(desc, epsilon_) - tf.add_input(desc, num_streams_) - if max_elements !== nothing - desc["max_elements"] = Base.Int(max_elements) + begin + function boosted_trees_create_quantile_stream_resource_graph(quantile_stream_resource_handle_, epsilon_, num_streams_; name=nothing, max_elements=nothing) + local desc + tf.with_op_name(name, "BoostedTreesCreateQuantileStreamResource") do + desc = tf.NodeDescription("BoostedTreesCreateQuantileStreamResource") + begin + begin + quantile_stream_resource_handle_ = convert(Tensor{Any}, quantile_stream_resource_handle_) + begin + end + end + begin + epsilon_ = convert(Tensor{Float32}, epsilon_) + begin + end + end + begin + num_streams_ = convert(Tensor{Int64}, num_streams_) + begin + end + end + end + begin + begin + tf.add_input(desc, quantile_stream_resource_handle_) + end + begin + tf.add_input(desc, epsilon_) + end + begin + tf.add_input(desc, num_streams_) + end + end + begin + begin + if max_elements !== nothing + desc["max_elements"] = Base.Int(max_elements) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function boosted_trees_create_quantile_stream_resource_eager(quantile_stream_resource_handle_, epsilon_, num_streams_; name=nothing, max_elements=nothing) + desc = tf.EagerOp("BoostedTreesCreateQuantileStreamResource") + quantile_stream_resource_handle_ = convert(tf.EagerTensor, quantile_stream_resource_handle_) + epsilon_ = convert(tf.EagerTensor, epsilon_) + num_streams_ = convert(tf.EagerTensor, num_streams_) + begin + begin + tf.add_input(desc, quantile_stream_resource_handle_) + end + begin + tf.add_input(desc, epsilon_) + end + begin + tf.add_input(desc, num_streams_) + end + end + begin + begin + if max_elements !== nothing + desc["max_elements"] = Base.Int(max_elements) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(boosted_trees_create_quantile_stream_resource, [quantile_stream_resource_handle_, epsilon_, num_streams_], name=nothing, max_elements=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function boosted_trees_create_quantile_stream_resource(quantile_stream_resource_handle_, epsilon_, num_streams_; name=nothing, max_elements=nothing) + if tf.in_eager_mode() + boosted_trees_create_quantile_stream_resource_eager(quantile_stream_resource_handle_, epsilon_, num_streams_; name=name, max_elements=max_elements) + else + boosted_trees_create_quantile_stream_resource_graph(quantile_stream_resource_handle_, epsilon_, num_streams_; name=name, max_elements=max_elements) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function boosted_trees_create_quantile_stream_resource_eager(quantile_stream_resource_handle_, epsilon_, num_streams_; name=nothing, max_elements=nothing) - desc = tf.EagerOp("BoostedTreesCreateQuantileStreamResource") - quantile_stream_resource_handle_ = convert(tf.EagerTensor, quantile_stream_resource_handle_) - epsilon_ = convert(tf.EagerTensor, epsilon_) - num_streams_ = convert(tf.EagerTensor, num_streams_) - tf.add_input(desc, quantile_stream_resource_handle_) - tf.add_input(desc, epsilon_) - tf.add_input(desc, num_streams_) - if max_elements !== nothing - desc["max_elements"] = Base.Int(max_elements) - end - res = tf.execute(desc) - node = tf.TapeNode(boosted_trees_create_quantile_stream_resource, [quantile_stream_resource_handle_, epsilon_, num_streams_], name=nothing, max_elements=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function boosted_trees_create_quantile_stream_resource(quantile_stream_resource_handle_, epsilon_, num_streams_; name=nothing, max_elements=nothing) - if tf.in_eager_mode() - boosted_trees_create_quantile_stream_resource_eager(quantile_stream_resource_handle_, epsilon_, num_streams_; name=name, max_elements=max_elements) - else - boosted_trees_create_quantile_stream_resource_graph(quantile_stream_resource_handle_, epsilon_, num_streams_; name=name, max_elements=max_elements) - end - end end @@ -57937,54 +105856,104 @@ end """ begin - function quantized_relu6_graph(features_, min_features_, max_features_; name=nothing, out_type=nothing) - local desc - tf.with_op_name(name, "QuantizedRelu6") do - desc = tf.NodeDescription("QuantizedRelu6") - features_ = convert(Tensor{Any}, features_) - min_features_ = convert(Tensor{Float32}, min_features_) - max_features_ = convert(Tensor{Float32}, max_features_) - (features_,) = tf.tf_promote(features_) - tf.add_input(desc, features_) - tf.add_input(desc, min_features_) - tf.add_input(desc, max_features_) - if out_type !== nothing - desc["out_type"] = Base.identity(out_type) + begin + function quantized_relu6_graph(features_, min_features_, max_features_; name=nothing, out_type=nothing) + local desc + tf.with_op_name(name, "QuantizedRelu6") do + desc = tf.NodeDescription("QuantizedRelu6") + begin + begin + features_ = convert(Tensor{Any}, features_) + begin + end + end + begin + min_features_ = convert(Tensor{Float32}, min_features_) + begin + end + end + begin + max_features_ = convert(Tensor{Float32}, max_features_) + begin + end + end + begin + (features_,) = tf.tf_promote(features_) + end + end + begin + begin + tf.add_input(desc, features_) + end + begin + tf.add_input(desc, min_features_) + end + begin + tf.add_input(desc, max_features_) + end + end + begin + begin + if out_type !== nothing + desc["out_type"] = Base.identity(out_type) + end + end + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + end + end + begin + function quantized_relu6_eager(features_, min_features_, max_features_; name=nothing, out_type=nothing) + desc = tf.EagerOp("QuantizedRelu6") + features_ = convert(tf.EagerTensor, features_) + min_features_ = convert(tf.EagerTensor, min_features_) + max_features_ = convert(tf.EagerTensor, max_features_) + begin + begin + tf.add_input(desc, features_) + end + begin + tf.add_input(desc, min_features_) + end + begin + tf.add_input(desc, max_features_) + end + end + begin + begin + if out_type !== nothing + desc["out_type"] = Base.identity(out_type) + end + end + end + begin + desc["Tinput"] = tf.data_type(features_) + end + res = tf.execute(desc) + node = tf.TapeNode(quantized_relu6, [features_, min_features_, max_features_], name=nothing, out_type=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function quantized_relu6(features_, min_features_, max_features_; name=nothing, out_type=nothing) + if tf.in_eager_mode() + quantized_relu6_eager(features_, min_features_, max_features_; name=name, out_type=out_type) + else + quantized_relu6_graph(features_, min_features_, max_features_; name=name, out_type=out_type) + end end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:3 - push!(out, tf.Tensor(op, out_idx)) - end - out - end - function quantized_relu6_eager(features_, min_features_, max_features_; name=nothing, out_type=nothing) - desc = tf.EagerOp("QuantizedRelu6") - features_ = convert(tf.EagerTensor, features_) - min_features_ = convert(tf.EagerTensor, min_features_) - max_features_ = convert(tf.EagerTensor, max_features_) - tf.add_input(desc, features_) - tf.add_input(desc, min_features_) - tf.add_input(desc, max_features_) - if out_type !== nothing - desc["out_type"] = Base.identity(out_type) - end - desc["Tinput"] = tf.data_type(features_) - res = tf.execute(desc) - node = tf.TapeNode(quantized_relu6, [features_, min_features_, max_features_], name=nothing, out_type=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function quantized_relu6(features_, min_features_, max_features_; name=nothing, out_type=nothing) - if tf.in_eager_mode() - quantized_relu6_eager(features_, min_features_, max_features_; name=name, out_type=out_type) - else - quantized_relu6_graph(features_, min_features_, max_features_; name=name, out_type=out_type) - end - end end @@ -57994,61 +105963,133 @@ end """ begin - function sparse_sparse_maximum_graph(a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_; name=nothing) - local desc - tf.with_op_name(name, "SparseSparseMaximum") do - desc = tf.NodeDescription("SparseSparseMaximum") - a_indices_ = convert(Tensor{Int64}, a_indices_) - a_values_ = convert(Tensor{Any}, a_values_) - a_shape_ = convert(Tensor{Int64}, a_shape_) - b_indices_ = convert(Tensor{Int64}, b_indices_) - b_values_ = convert(Tensor{Any}, b_values_) - b_shape_ = convert(Tensor{Int64}, b_shape_) - (a_values_, b_values_) = tf.tf_promote(a_values_, b_values_) - tf.add_input(desc, a_indices_) - tf.add_input(desc, a_values_) - tf.add_input(desc, a_shape_) - tf.add_input(desc, b_indices_) - tf.add_input(desc, b_values_) - tf.add_input(desc, b_shape_) - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:2 - push!(out, tf.Tensor(op, out_idx)) - end - out - end - function sparse_sparse_maximum_eager(a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_; name=nothing) - desc = tf.EagerOp("SparseSparseMaximum") - a_indices_ = convert(tf.EagerTensor, a_indices_) - a_values_ = convert(tf.EagerTensor, a_values_) - a_shape_ = convert(tf.EagerTensor, a_shape_) - b_indices_ = convert(tf.EagerTensor, b_indices_) - b_values_ = convert(tf.EagerTensor, b_values_) - b_shape_ = convert(tf.EagerTensor, b_shape_) - tf.add_input(desc, a_indices_) - tf.add_input(desc, a_values_) - tf.add_input(desc, a_shape_) - tf.add_input(desc, b_indices_) - tf.add_input(desc, b_values_) - tf.add_input(desc, b_shape_) - desc["T"] = tf.data_type(a_values_) - desc["T"] = tf.data_type(b_values_) - res = tf.execute(desc) - node = tf.TapeNode(sparse_sparse_maximum, [a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_sparse_maximum(a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_; name=nothing) - if tf.in_eager_mode() - sparse_sparse_maximum_eager(a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_; name=name) - else - sparse_sparse_maximum_graph(a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_; name=name) + begin + function sparse_sparse_maximum_graph(a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_; name=nothing) + local desc + tf.with_op_name(name, "SparseSparseMaximum") do + desc = tf.NodeDescription("SparseSparseMaximum") + begin + begin + a_indices_ = convert(Tensor{Int64}, a_indices_) + begin + end + end + begin + a_values_ = convert(Tensor{Any}, a_values_) + begin + end + end + begin + a_shape_ = convert(Tensor{Int64}, a_shape_) + begin + end + end + begin + b_indices_ = convert(Tensor{Int64}, b_indices_) + begin + end + end + begin + b_values_ = convert(Tensor{Any}, b_values_) + begin + end + end + begin + b_shape_ = convert(Tensor{Int64}, b_shape_) + begin + end + end + begin + (a_values_, b_values_) = tf.tf_promote(a_values_, b_values_) + end + end + begin + begin + tf.add_input(desc, a_indices_) + end + begin + tf.add_input(desc, a_values_) + end + begin + tf.add_input(desc, a_shape_) + end + begin + tf.add_input(desc, b_indices_) + end + begin + tf.add_input(desc, b_values_) + end + begin + tf.add_input(desc, b_shape_) + end + end + begin + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + end + end + begin + function sparse_sparse_maximum_eager(a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_; name=nothing) + desc = tf.EagerOp("SparseSparseMaximum") + a_indices_ = convert(tf.EagerTensor, a_indices_) + a_values_ = convert(tf.EagerTensor, a_values_) + a_shape_ = convert(tf.EagerTensor, a_shape_) + b_indices_ = convert(tf.EagerTensor, b_indices_) + b_values_ = convert(tf.EagerTensor, b_values_) + b_shape_ = convert(tf.EagerTensor, b_shape_) + begin + begin + tf.add_input(desc, a_indices_) + end + begin + tf.add_input(desc, a_values_) + end + begin + tf.add_input(desc, a_shape_) + end + begin + tf.add_input(desc, b_indices_) + end + begin + tf.add_input(desc, b_values_) + end + begin + tf.add_input(desc, b_shape_) + end + end + begin + end + begin + desc["T"] = tf.data_type(a_values_) + end + begin + desc["T"] = tf.data_type(b_values_) + end + res = tf.execute(desc) + node = tf.TapeNode(sparse_sparse_maximum, [a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_sparse_maximum(a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_; name=nothing) + if tf.in_eager_mode() + sparse_sparse_maximum_eager(a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_; name=name) + else + sparse_sparse_maximum_graph(a_indices_, a_values_, a_shape_, b_indices_, b_values_, b_shape_; name=name) + end end - end + end end @@ -58058,67 +106099,143 @@ end """ begin - function batch_norm_with_global_normalization_graph(t_, m_, v_, beta_, gamma_; name=nothing, variance_epsilon=nothing, scale_after_normalization=nothing) - local desc - tf.with_op_name(name, "BatchNormWithGlobalNormalization") do - desc = tf.NodeDescription("BatchNormWithGlobalNormalization") - t_ = convert(Tensor{Any}, t_) - m_ = convert(Tensor{Any}, m_) - v_ = convert(Tensor{Any}, v_) - beta_ = convert(Tensor{Any}, beta_) - gamma_ = convert(Tensor{Any}, gamma_) - (t_, m_, v_, beta_, gamma_) = tf.tf_promote(t_, m_, v_, beta_, gamma_) - tf.add_input(desc, t_) - tf.add_input(desc, m_) - tf.add_input(desc, v_) - tf.add_input(desc, beta_) - tf.add_input(desc, gamma_) - if variance_epsilon !== nothing - desc["variance_epsilon"] = Base.identity(variance_epsilon) + begin + function batch_norm_with_global_normalization_graph(t_, m_, v_, beta_, gamma_; name=nothing, variance_epsilon=nothing, scale_after_normalization=nothing) + local desc + tf.with_op_name(name, "BatchNormWithGlobalNormalization") do + desc = tf.NodeDescription("BatchNormWithGlobalNormalization") + begin + begin + t_ = convert(Tensor{Any}, t_) + begin + end + end + begin + m_ = convert(Tensor{Any}, m_) + begin + end + end + begin + v_ = convert(Tensor{Any}, v_) + begin + end + end + begin + beta_ = convert(Tensor{Any}, beta_) + begin + end + end + begin + gamma_ = convert(Tensor{Any}, gamma_) + begin + end + end + begin + (t_, m_, v_, beta_, gamma_) = tf.tf_promote(t_, m_, v_, beta_, gamma_) + end + end + begin + begin + tf.add_input(desc, t_) + end + begin + tf.add_input(desc, m_) + end + begin + tf.add_input(desc, v_) + end + begin + tf.add_input(desc, beta_) + end + begin + tf.add_input(desc, gamma_) + end + end + begin + begin + if variance_epsilon !== nothing + desc["variance_epsilon"] = Base.identity(variance_epsilon) + end + end + begin + if scale_after_normalization !== nothing + desc["scale_after_normalization"] = Base.Bool(scale_after_normalization) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function batch_norm_with_global_normalization_eager(t_, m_, v_, beta_, gamma_; name=nothing, variance_epsilon=nothing, scale_after_normalization=nothing) + desc = tf.EagerOp("BatchNormWithGlobalNormalization") + t_ = convert(tf.EagerTensor, t_) + m_ = convert(tf.EagerTensor, m_) + v_ = convert(tf.EagerTensor, v_) + beta_ = convert(tf.EagerTensor, beta_) + gamma_ = convert(tf.EagerTensor, gamma_) + begin + begin + tf.add_input(desc, t_) + end + begin + tf.add_input(desc, m_) + end + begin + tf.add_input(desc, v_) + end + begin + tf.add_input(desc, beta_) + end + begin + tf.add_input(desc, gamma_) + end + end + begin + begin + if variance_epsilon !== nothing + desc["variance_epsilon"] = Base.identity(variance_epsilon) + end + end + begin + if scale_after_normalization !== nothing + desc["scale_after_normalization"] = Base.Bool(scale_after_normalization) + end + end + end + begin + desc["T"] = tf.data_type(t_) + end + begin + desc["T"] = tf.data_type(m_) + end + begin + desc["T"] = tf.data_type(v_) + end + begin + desc["T"] = tf.data_type(beta_) + end + begin + desc["T"] = tf.data_type(gamma_) + end + res = tf.execute(desc) + node = tf.TapeNode(batch_norm_with_global_normalization, [t_, m_, v_, beta_, gamma_], name=nothing, variance_epsilon=nothing, scale_after_normalization=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function batch_norm_with_global_normalization(t_, m_, v_, beta_, gamma_; name=nothing, variance_epsilon=nothing, scale_after_normalization=nothing) + if tf.in_eager_mode() + batch_norm_with_global_normalization_eager(t_, m_, v_, beta_, gamma_; name=name, variance_epsilon=variance_epsilon, scale_after_normalization=scale_after_normalization) + else + batch_norm_with_global_normalization_graph(t_, m_, v_, beta_, gamma_; name=name, variance_epsilon=variance_epsilon, scale_after_normalization=scale_after_normalization) + end end - if scale_after_normalization !== nothing - desc["scale_after_normalization"] = Base.Bool(scale_after_normalization) - end - end - tf.Tensor(tf.Operation(desc)) end - function batch_norm_with_global_normalization_eager(t_, m_, v_, beta_, gamma_; name=nothing, variance_epsilon=nothing, scale_after_normalization=nothing) - desc = tf.EagerOp("BatchNormWithGlobalNormalization") - t_ = convert(tf.EagerTensor, t_) - m_ = convert(tf.EagerTensor, m_) - v_ = convert(tf.EagerTensor, v_) - beta_ = convert(tf.EagerTensor, beta_) - gamma_ = convert(tf.EagerTensor, gamma_) - tf.add_input(desc, t_) - tf.add_input(desc, m_) - tf.add_input(desc, v_) - tf.add_input(desc, beta_) - tf.add_input(desc, gamma_) - if variance_epsilon !== nothing - desc["variance_epsilon"] = Base.identity(variance_epsilon) - end - if scale_after_normalization !== nothing - desc["scale_after_normalization"] = Base.Bool(scale_after_normalization) - end - desc["T"] = tf.data_type(t_) - desc["T"] = tf.data_type(m_) - desc["T"] = tf.data_type(v_) - desc["T"] = tf.data_type(beta_) - desc["T"] = tf.data_type(gamma_) - res = tf.execute(desc) - node = tf.TapeNode(batch_norm_with_global_normalization, [t_, m_, v_, beta_, gamma_], name=nothing, variance_epsilon=nothing, scale_after_normalization=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function batch_norm_with_global_normalization(t_, m_, v_, beta_, gamma_; name=nothing, variance_epsilon=nothing, scale_after_normalization=nothing) - if tf.in_eager_mode() - batch_norm_with_global_normalization_eager(t_, m_, v_, beta_, gamma_; name=name, variance_epsilon=variance_epsilon, scale_after_normalization=scale_after_normalization) - else - batch_norm_with_global_normalization_graph(t_, m_, v_, beta_, gamma_; name=name, variance_epsilon=variance_epsilon, scale_after_normalization=scale_after_normalization) - end - end end @@ -58128,44 +106245,90 @@ end """ begin - function in_top_kv2_graph(predictions_, targets_, k_; name=nothing) - local desc - tf.with_op_name(name, "InTopKV2") do - desc = tf.NodeDescription("InTopKV2") - predictions_ = convert(Tensor{Float32}, predictions_) - targets_ = convert(Tensor{Int32}, targets_) - k_ = convert(Tensor{Int32}, k_) - (targets_, k_) = tf.tf_promote(targets_, k_) - tf.add_input(desc, predictions_) - tf.add_input(desc, targets_) - tf.add_input(desc, k_) - end - tf.Tensor(tf.Operation(desc)) - end - function in_top_kv2_eager(predictions_, targets_, k_; name=nothing) - desc = tf.EagerOp("InTopKV2") - predictions_ = convert(tf.EagerTensor, predictions_) - targets_ = convert(tf.EagerTensor, targets_) - k_ = convert(tf.EagerTensor, k_) - tf.add_input(desc, predictions_) - tf.add_input(desc, targets_) - tf.add_input(desc, k_) - desc["T"] = tf.data_type(targets_) - desc["T"] = tf.data_type(k_) - res = tf.execute(desc) - node = tf.TapeNode(in_top_kv2, [predictions_, targets_, k_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function in_top_kv2(predictions_, targets_, k_; name=nothing) - if tf.in_eager_mode() - in_top_kv2_eager(predictions_, targets_, k_; name=name) - else - in_top_kv2_graph(predictions_, targets_, k_; name=name) + begin + function in_top_kv2_graph(predictions_, targets_, k_; name=nothing) + local desc + tf.with_op_name(name, "InTopKV2") do + desc = tf.NodeDescription("InTopKV2") + begin + begin + predictions_ = convert(Tensor{Float32}, predictions_) + begin + end + end + begin + targets_ = convert(Tensor{Int32}, targets_) + begin + end + end + begin + k_ = convert(Tensor{Int32}, k_) + begin + end + end + begin + (targets_, k_) = tf.tf_promote(targets_, k_) + end + end + begin + begin + tf.add_input(desc, predictions_) + end + begin + tf.add_input(desc, targets_) + end + begin + tf.add_input(desc, k_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function in_top_kv2_eager(predictions_, targets_, k_; name=nothing) + desc = tf.EagerOp("InTopKV2") + predictions_ = convert(tf.EagerTensor, predictions_) + targets_ = convert(tf.EagerTensor, targets_) + k_ = convert(tf.EagerTensor, k_) + begin + begin + tf.add_input(desc, predictions_) + end + begin + tf.add_input(desc, targets_) + end + begin + tf.add_input(desc, k_) + end + end + begin + end + begin + desc["T"] = tf.data_type(targets_) + end + begin + desc["T"] = tf.data_type(k_) + end + res = tf.execute(desc) + node = tf.TapeNode(in_top_kv2, [predictions_, targets_, k_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function in_top_kv2(predictions_, targets_, k_; name=nothing) + if tf.in_eager_mode() + in_top_kv2_eager(predictions_, targets_, k_; name=name) + else + in_top_kv2_graph(predictions_, targets_, k_; name=name) + end end - end + end end @@ -58175,35 +106338,63 @@ end """ begin - function cholesky_graph(input_; name=nothing) - local desc - tf.with_op_name(name, "Cholesky") do - desc = tf.NodeDescription("Cholesky") - input_ = convert(Tensor{Any}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) + begin + function cholesky_graph(input_; name=nothing) + local desc + tf.with_op_name(name, "Cholesky") do + desc = tf.NodeDescription("Cholesky") + begin + begin + input_ = convert(Tensor{Any}, input_) + begin + end + end + begin + (input_,) = tf.tf_promote(input_) + end + end + begin + begin + tf.add_input(desc, input_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function cholesky_eager(input_; name=nothing) - desc = tf.EagerOp("Cholesky") - input_ = convert(tf.EagerTensor, input_) - tf.add_input(desc, input_) - desc["T"] = tf.data_type(input_) - res = tf.execute(desc) - node = tf.TapeNode(cholesky, [input_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function cholesky_eager(input_; name=nothing) + desc = tf.EagerOp("Cholesky") + input_ = convert(tf.EagerTensor, input_) + begin + begin + tf.add_input(desc, input_) + end + end + begin + end + begin + desc["T"] = tf.data_type(input_) + end + res = tf.execute(desc) + node = tf.TapeNode(cholesky, [input_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function cholesky(input_; name=nothing) - if tf.in_eager_mode() - cholesky_eager(input_; name=name) - else - cholesky_graph(input_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function cholesky(input_; name=nothing) + if tf.in_eager_mode() + cholesky_eager(input_; name=name) + else + cholesky_graph(input_; name=name) + end end - end + end end @@ -58213,77 +106404,181 @@ end """ begin - function resource_apply_centered_rms_prop_graph(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=nothing, use_locking=nothing) - local desc - tf.with_op_name(name, "ResourceApplyCenteredRMSProp") do - desc = tf.NodeDescription("ResourceApplyCenteredRMSProp") - var_ = convert(Tensor{Any}, var_) - mg_ = convert(Tensor{Any}, mg_) - ms_ = convert(Tensor{Any}, ms_) - mom_ = convert(Tensor{Any}, mom_) - lr_ = convert(Tensor{Any}, lr_) - rho_ = convert(Tensor{Any}, rho_) - momentum_ = convert(Tensor{Any}, momentum_) - epsilon_ = convert(Tensor{Any}, epsilon_) - grad_ = convert(Tensor{Any}, grad_) - (lr_, rho_, momentum_, epsilon_, grad_) = tf.tf_promote(lr_, rho_, momentum_, epsilon_, grad_) - tf.add_input(desc, var_) - tf.add_input(desc, mg_) - tf.add_input(desc, ms_) - tf.add_input(desc, mom_) - tf.add_input(desc, lr_) - tf.add_input(desc, rho_) - tf.add_input(desc, momentum_) - tf.add_input(desc, epsilon_) - tf.add_input(desc, grad_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - end - tf.Tensor(tf.Operation(desc)) - end - function resource_apply_centered_rms_prop_eager(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=nothing, use_locking=nothing) - desc = tf.EagerOp("ResourceApplyCenteredRMSProp") - var_ = convert(tf.EagerTensor, var_) - mg_ = convert(tf.EagerTensor, mg_) - ms_ = convert(tf.EagerTensor, ms_) - mom_ = convert(tf.EagerTensor, mom_) - lr_ = convert(tf.EagerTensor, lr_) - rho_ = convert(tf.EagerTensor, rho_) - momentum_ = convert(tf.EagerTensor, momentum_) - epsilon_ = convert(tf.EagerTensor, epsilon_) - grad_ = convert(tf.EagerTensor, grad_) - tf.add_input(desc, var_) - tf.add_input(desc, mg_) - tf.add_input(desc, ms_) - tf.add_input(desc, mom_) - tf.add_input(desc, lr_) - tf.add_input(desc, rho_) - tf.add_input(desc, momentum_) - tf.add_input(desc, epsilon_) - tf.add_input(desc, grad_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - desc["T"] = tf.data_type(lr_) - desc["T"] = tf.data_type(rho_) - desc["T"] = tf.data_type(momentum_) - desc["T"] = tf.data_type(epsilon_) - desc["T"] = tf.data_type(grad_) - res = tf.execute(desc) - node = tf.TapeNode(resource_apply_centered_rms_prop, [var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_], name=nothing, use_locking=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_apply_centered_rms_prop(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=nothing, use_locking=nothing) - if tf.in_eager_mode() - resource_apply_centered_rms_prop_eager(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=name, use_locking=use_locking) - else - resource_apply_centered_rms_prop_graph(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=name, use_locking=use_locking) + begin + function resource_apply_centered_rms_prop_graph(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=nothing, use_locking=nothing) + local desc + tf.with_op_name(name, "ResourceApplyCenteredRMSProp") do + desc = tf.NodeDescription("ResourceApplyCenteredRMSProp") + begin + begin + var_ = convert(Tensor{Any}, var_) + begin + end + end + begin + mg_ = convert(Tensor{Any}, mg_) + begin + end + end + begin + ms_ = convert(Tensor{Any}, ms_) + begin + end + end + begin + mom_ = convert(Tensor{Any}, mom_) + begin + end + end + begin + lr_ = convert(Tensor{Any}, lr_) + begin + end + end + begin + rho_ = convert(Tensor{Any}, rho_) + begin + end + end + begin + momentum_ = convert(Tensor{Any}, momentum_) + begin + end + end + begin + epsilon_ = convert(Tensor{Any}, epsilon_) + begin + end + end + begin + grad_ = convert(Tensor{Any}, grad_) + begin + end + end + begin + (lr_, rho_, momentum_, epsilon_, grad_) = tf.tf_promote(lr_, rho_, momentum_, epsilon_, grad_) + end + end + begin + begin + tf.add_input(desc, var_) + end + begin + tf.add_input(desc, mg_) + end + begin + tf.add_input(desc, ms_) + end + begin + tf.add_input(desc, mom_) + end + begin + tf.add_input(desc, lr_) + end + begin + tf.add_input(desc, rho_) + end + begin + tf.add_input(desc, momentum_) + end + begin + tf.add_input(desc, epsilon_) + end + begin + tf.add_input(desc, grad_) + end + end + begin + begin + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function resource_apply_centered_rms_prop_eager(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=nothing, use_locking=nothing) + desc = tf.EagerOp("ResourceApplyCenteredRMSProp") + var_ = convert(tf.EagerTensor, var_) + mg_ = convert(tf.EagerTensor, mg_) + ms_ = convert(tf.EagerTensor, ms_) + mom_ = convert(tf.EagerTensor, mom_) + lr_ = convert(tf.EagerTensor, lr_) + rho_ = convert(tf.EagerTensor, rho_) + momentum_ = convert(tf.EagerTensor, momentum_) + epsilon_ = convert(tf.EagerTensor, epsilon_) + grad_ = convert(tf.EagerTensor, grad_) + begin + begin + tf.add_input(desc, var_) + end + begin + tf.add_input(desc, mg_) + end + begin + tf.add_input(desc, ms_) + end + begin + tf.add_input(desc, mom_) + end + begin + tf.add_input(desc, lr_) + end + begin + tf.add_input(desc, rho_) + end + begin + tf.add_input(desc, momentum_) + end + begin + tf.add_input(desc, epsilon_) + end + begin + tf.add_input(desc, grad_) + end + end + begin + begin + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + end + begin + desc["T"] = tf.data_type(lr_) + end + begin + desc["T"] = tf.data_type(rho_) + end + begin + desc["T"] = tf.data_type(momentum_) + end + begin + desc["T"] = tf.data_type(epsilon_) + end + begin + desc["T"] = tf.data_type(grad_) + end + res = tf.execute(desc) + node = tf.TapeNode(resource_apply_centered_rms_prop, [var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_], name=nothing, use_locking=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_apply_centered_rms_prop(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=nothing, use_locking=nothing) + if tf.in_eager_mode() + resource_apply_centered_rms_prop_eager(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=name, use_locking=use_locking) + else + resource_apply_centered_rms_prop_graph(var_, mg_, ms_, mom_, lr_, rho_, momentum_, epsilon_, grad_; name=name, use_locking=use_locking) + end end - end + end end @@ -58293,60 +106588,122 @@ end """ begin - function resource_apply_adagrad_graph(var_, accum_, lr_, grad_; name=nothing, use_locking=nothing, update_slots=nothing) - local desc - tf.with_op_name(name, "ResourceApplyAdagrad") do - desc = tf.NodeDescription("ResourceApplyAdagrad") - var_ = convert(Tensor{Any}, var_) - accum_ = convert(Tensor{Any}, accum_) - lr_ = convert(Tensor{Any}, lr_) - grad_ = convert(Tensor{Any}, grad_) - (lr_, grad_) = tf.tf_promote(lr_, grad_) - tf.add_input(desc, var_) - tf.add_input(desc, accum_) - tf.add_input(desc, lr_) - tf.add_input(desc, grad_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - if update_slots !== nothing - desc["update_slots"] = Base.Bool(update_slots) + begin + function resource_apply_adagrad_graph(var_, accum_, lr_, grad_; name=nothing, use_locking=nothing, update_slots=nothing) + local desc + tf.with_op_name(name, "ResourceApplyAdagrad") do + desc = tf.NodeDescription("ResourceApplyAdagrad") + begin + begin + var_ = convert(Tensor{Any}, var_) + begin + end + end + begin + accum_ = convert(Tensor{Any}, accum_) + begin + end + end + begin + lr_ = convert(Tensor{Any}, lr_) + begin + end + end + begin + grad_ = convert(Tensor{Any}, grad_) + begin + end + end + begin + (lr_, grad_) = tf.tf_promote(lr_, grad_) + end + end + begin + begin + tf.add_input(desc, var_) + end + begin + tf.add_input(desc, accum_) + end + begin + tf.add_input(desc, lr_) + end + begin + tf.add_input(desc, grad_) + end + end + begin + begin + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + begin + if update_slots !== nothing + desc["update_slots"] = Base.Bool(update_slots) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function resource_apply_adagrad_eager(var_, accum_, lr_, grad_; name=nothing, use_locking=nothing, update_slots=nothing) + desc = tf.EagerOp("ResourceApplyAdagrad") + var_ = convert(tf.EagerTensor, var_) + accum_ = convert(tf.EagerTensor, accum_) + lr_ = convert(tf.EagerTensor, lr_) + grad_ = convert(tf.EagerTensor, grad_) + begin + begin + tf.add_input(desc, var_) + end + begin + tf.add_input(desc, accum_) + end + begin + tf.add_input(desc, lr_) + end + begin + tf.add_input(desc, grad_) + end + end + begin + begin + if use_locking !== nothing + desc["use_locking"] = Base.Bool(use_locking) + end + end + begin + if update_slots !== nothing + desc["update_slots"] = Base.Bool(update_slots) + end + end + end + begin + desc["T"] = tf.data_type(lr_) + end + begin + desc["T"] = tf.data_type(grad_) + end + res = tf.execute(desc) + node = tf.TapeNode(resource_apply_adagrad, [var_, accum_, lr_, grad_], name=nothing, use_locking=nothing, update_slots=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_apply_adagrad(var_, accum_, lr_, grad_; name=nothing, use_locking=nothing, update_slots=nothing) + if tf.in_eager_mode() + resource_apply_adagrad_eager(var_, accum_, lr_, grad_; name=name, use_locking=use_locking, update_slots=update_slots) + else + resource_apply_adagrad_graph(var_, accum_, lr_, grad_; name=name, use_locking=use_locking, update_slots=update_slots) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function resource_apply_adagrad_eager(var_, accum_, lr_, grad_; name=nothing, use_locking=nothing, update_slots=nothing) - desc = tf.EagerOp("ResourceApplyAdagrad") - var_ = convert(tf.EagerTensor, var_) - accum_ = convert(tf.EagerTensor, accum_) - lr_ = convert(tf.EagerTensor, lr_) - grad_ = convert(tf.EagerTensor, grad_) - tf.add_input(desc, var_) - tf.add_input(desc, accum_) - tf.add_input(desc, lr_) - tf.add_input(desc, grad_) - if use_locking !== nothing - desc["use_locking"] = Base.Bool(use_locking) - end - if update_slots !== nothing - desc["update_slots"] = Base.Bool(update_slots) - end - desc["T"] = tf.data_type(lr_) - desc["T"] = tf.data_type(grad_) - res = tf.execute(desc) - node = tf.TapeNode(resource_apply_adagrad, [var_, accum_, lr_, grad_], name=nothing, use_locking=nothing, update_slots=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_apply_adagrad(var_, accum_, lr_, grad_; name=nothing, use_locking=nothing, update_slots=nothing) - if tf.in_eager_mode() - resource_apply_adagrad_eager(var_, accum_, lr_, grad_; name=name, use_locking=use_locking, update_slots=update_slots) - else - resource_apply_adagrad_graph(var_, accum_, lr_, grad_; name=name, use_locking=use_locking, update_slots=update_slots) - end - end end @@ -58356,81 +106713,169 @@ end """ begin - function experimental_parallel_interleave_dataset_graph(input_dataset_, other_arguments_, cycle_length_, block_length_, sloppy_, buffer_output_elements_, prefetch_input_elements_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) - local desc - tf.with_op_name(name, "ExperimentalParallelInterleaveDataset") do - desc = tf.NodeDescription("ExperimentalParallelInterleaveDataset") - input_dataset_ = convert(Tensor{Any}, input_dataset_) - other_arguments_ = [convert(Tensor{Any}, x) for x = other_arguments_] - cycle_length_ = convert(Tensor{Int64}, cycle_length_) - block_length_ = convert(Tensor{Int64}, block_length_) - sloppy_ = convert(Tensor{Bool}, sloppy_) - buffer_output_elements_ = convert(Tensor{Int64}, buffer_output_elements_) - prefetch_input_elements_ = convert(Tensor{Int64}, prefetch_input_elements_) - tf.add_input(desc, input_dataset_) - tf.add_input(desc, other_arguments_) - tf.add_input(desc, cycle_length_) - tf.add_input(desc, block_length_) - tf.add_input(desc, sloppy_) - tf.add_input(desc, buffer_output_elements_) - tf.add_input(desc, prefetch_input_elements_) - if f !== nothing - desc["f"] = Base.identity(f) - end - if Targuments !== nothing - desc["Targuments"] = map(Base.identity, Targuments) - end - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) + begin + function experimental_parallel_interleave_dataset_graph(input_dataset_, other_arguments_, cycle_length_, block_length_, sloppy_, buffer_output_elements_, prefetch_input_elements_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "ExperimentalParallelInterleaveDataset") do + desc = tf.NodeDescription("ExperimentalParallelInterleaveDataset") + begin + begin + input_dataset_ = convert(Tensor{Any}, input_dataset_) + begin + end + end + begin + other_arguments_ = [convert(Tensor{Any}, x) for x = other_arguments_] + begin + end + end + begin + cycle_length_ = convert(Tensor{Int64}, cycle_length_) + begin + end + end + begin + block_length_ = convert(Tensor{Int64}, block_length_) + begin + end + end + begin + sloppy_ = convert(Tensor{Bool}, sloppy_) + begin + end + end + begin + buffer_output_elements_ = convert(Tensor{Int64}, buffer_output_elements_) + begin + end + end + begin + prefetch_input_elements_ = convert(Tensor{Int64}, prefetch_input_elements_) + begin + end + end + end + begin + begin + tf.add_input(desc, input_dataset_) + end + begin + tf.add_input(desc, other_arguments_) + end + begin + tf.add_input(desc, cycle_length_) + end + begin + tf.add_input(desc, block_length_) + end + begin + tf.add_input(desc, sloppy_) + end + begin + tf.add_input(desc, buffer_output_elements_) + end + begin + tf.add_input(desc, prefetch_input_elements_) + end + end + begin + begin + if f !== nothing + desc["f"] = Base.identity(f) + end + end + begin + if Targuments !== nothing + desc["Targuments"] = map(Base.identity, Targuments) + end + end + begin + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + end + begin + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function experimental_parallel_interleave_dataset_eager(input_dataset_, other_arguments_, cycle_length_, block_length_, sloppy_, buffer_output_elements_, prefetch_input_elements_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) + desc = tf.EagerOp("ExperimentalParallelInterleaveDataset") + input_dataset_ = convert(tf.EagerTensor, input_dataset_) + other_arguments_ = convert(tf.EagerTensor, other_arguments_) + cycle_length_ = convert(tf.EagerTensor, cycle_length_) + block_length_ = convert(tf.EagerTensor, block_length_) + sloppy_ = convert(tf.EagerTensor, sloppy_) + buffer_output_elements_ = convert(tf.EagerTensor, buffer_output_elements_) + prefetch_input_elements_ = convert(tf.EagerTensor, prefetch_input_elements_) + begin + begin + tf.add_input(desc, input_dataset_) + end + begin + tf.add_input(desc, other_arguments_) + end + begin + tf.add_input(desc, cycle_length_) + end + begin + tf.add_input(desc, block_length_) + end + begin + tf.add_input(desc, sloppy_) + end + begin + tf.add_input(desc, buffer_output_elements_) + end + begin + tf.add_input(desc, prefetch_input_elements_) + end + end + begin + begin + if f !== nothing + desc["f"] = Base.identity(f) + end + end + begin + if Targuments !== nothing + desc["Targuments"] = map(Base.identity, Targuments) + end + end + begin + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + end + begin + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(experimental_parallel_interleave_dataset, [input_dataset_, other_arguments_, cycle_length_, block_length_, sloppy_, buffer_output_elements_, prefetch_input_elements_], name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_parallel_interleave_dataset(input_dataset_, other_arguments_, cycle_length_, block_length_, sloppy_, buffer_output_elements_, prefetch_input_elements_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) + if tf.in_eager_mode() + experimental_parallel_interleave_dataset_eager(input_dataset_, other_arguments_, cycle_length_, block_length_, sloppy_, buffer_output_elements_, prefetch_input_elements_; name=name, f=f, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes) + else + experimental_parallel_interleave_dataset_graph(input_dataset_, other_arguments_, cycle_length_, block_length_, sloppy_, buffer_output_elements_, prefetch_input_elements_; name=name, f=f, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function experimental_parallel_interleave_dataset_eager(input_dataset_, other_arguments_, cycle_length_, block_length_, sloppy_, buffer_output_elements_, prefetch_input_elements_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) - desc = tf.EagerOp("ExperimentalParallelInterleaveDataset") - input_dataset_ = convert(tf.EagerTensor, input_dataset_) - other_arguments_ = convert(tf.EagerTensor, other_arguments_) - cycle_length_ = convert(tf.EagerTensor, cycle_length_) - block_length_ = convert(tf.EagerTensor, block_length_) - sloppy_ = convert(tf.EagerTensor, sloppy_) - buffer_output_elements_ = convert(tf.EagerTensor, buffer_output_elements_) - prefetch_input_elements_ = convert(tf.EagerTensor, prefetch_input_elements_) - tf.add_input(desc, input_dataset_) - tf.add_input(desc, other_arguments_) - tf.add_input(desc, cycle_length_) - tf.add_input(desc, block_length_) - tf.add_input(desc, sloppy_) - tf.add_input(desc, buffer_output_elements_) - tf.add_input(desc, prefetch_input_elements_) - if f !== nothing - desc["f"] = Base.identity(f) - end - if Targuments !== nothing - desc["Targuments"] = map(Base.identity, Targuments) - end - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - res = tf.execute(desc) - node = tf.TapeNode(experimental_parallel_interleave_dataset, [input_dataset_, other_arguments_, cycle_length_, block_length_, sloppy_, buffer_output_elements_, prefetch_input_elements_], name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function experimental_parallel_interleave_dataset(input_dataset_, other_arguments_, cycle_length_, block_length_, sloppy_, buffer_output_elements_, prefetch_input_elements_; name=nothing, f=nothing, Targuments=nothing, output_types=nothing, output_shapes=nothing) - if tf.in_eager_mode() - experimental_parallel_interleave_dataset_eager(input_dataset_, other_arguments_, cycle_length_, block_length_, sloppy_, buffer_output_elements_, prefetch_input_elements_; name=name, f=f, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes) - else - experimental_parallel_interleave_dataset_graph(input_dataset_, other_arguments_, cycle_length_, block_length_, sloppy_, buffer_output_elements_, prefetch_input_elements_; name=name, f=f, Targuments=Targuments, output_types=output_types, output_shapes=output_shapes) - end - end end @@ -58440,45 +106885,85 @@ end """ begin - function resize_bicubic_grad_graph(grads_, original_image_; name=nothing, align_corners=nothing) - local desc - tf.with_op_name(name, "ResizeBicubicGrad") do - desc = tf.NodeDescription("ResizeBicubicGrad") - grads_ = convert(Tensor{Float32}, grads_) - original_image_ = convert(Tensor{Any}, original_image_) - (original_image_,) = tf.tf_promote(original_image_) - tf.add_input(desc, grads_) - tf.add_input(desc, original_image_) - if align_corners !== nothing - desc["align_corners"] = Base.Bool(align_corners) + begin + function resize_bicubic_grad_graph(grads_, original_image_; name=nothing, align_corners=nothing) + local desc + tf.with_op_name(name, "ResizeBicubicGrad") do + desc = tf.NodeDescription("ResizeBicubicGrad") + begin + begin + grads_ = convert(Tensor{Float32}, grads_) + begin + end + end + begin + original_image_ = convert(Tensor{Any}, original_image_) + begin + end + end + begin + (original_image_,) = tf.tf_promote(original_image_) + end + end + begin + begin + tf.add_input(desc, grads_) + end + begin + tf.add_input(desc, original_image_) + end + end + begin + begin + if align_corners !== nothing + desc["align_corners"] = Base.Bool(align_corners) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function resize_bicubic_grad_eager(grads_, original_image_; name=nothing, align_corners=nothing) + desc = tf.EagerOp("ResizeBicubicGrad") + grads_ = convert(tf.EagerTensor, grads_) + original_image_ = convert(tf.EagerTensor, original_image_) + begin + begin + tf.add_input(desc, grads_) + end + begin + tf.add_input(desc, original_image_) + end + end + begin + begin + if align_corners !== nothing + desc["align_corners"] = Base.Bool(align_corners) + end + end + end + begin + desc["T"] = tf.data_type(original_image_) + end + res = tf.execute(desc) + node = tf.TapeNode(resize_bicubic_grad, [grads_, original_image_], name=nothing, align_corners=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resize_bicubic_grad(grads_, original_image_; name=nothing, align_corners=nothing) + if tf.in_eager_mode() + resize_bicubic_grad_eager(grads_, original_image_; name=name, align_corners=align_corners) + else + resize_bicubic_grad_graph(grads_, original_image_; name=name, align_corners=align_corners) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function resize_bicubic_grad_eager(grads_, original_image_; name=nothing, align_corners=nothing) - desc = tf.EagerOp("ResizeBicubicGrad") - grads_ = convert(tf.EagerTensor, grads_) - original_image_ = convert(tf.EagerTensor, original_image_) - tf.add_input(desc, grads_) - tf.add_input(desc, original_image_) - if align_corners !== nothing - desc["align_corners"] = Base.Bool(align_corners) - end - desc["T"] = tf.data_type(original_image_) - res = tf.execute(desc) - node = tf.TapeNode(resize_bicubic_grad, [grads_, original_image_], name=nothing, align_corners=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resize_bicubic_grad(grads_, original_image_; name=nothing, align_corners=nothing) - if tf.in_eager_mode() - resize_bicubic_grad_eager(grads_, original_image_; name=name, align_corners=align_corners) - else - resize_bicubic_grad_graph(grads_, original_image_; name=name, align_corners=align_corners) - end - end end @@ -58488,35 +106973,63 @@ end """ begin - function batch_self_adjoint_eig_graph(input_; name=nothing) - local desc - tf.with_op_name(name, "BatchSelfAdjointEig") do - desc = tf.NodeDescription("BatchSelfAdjointEig") - input_ = convert(Tensor{Any}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) + begin + function batch_self_adjoint_eig_graph(input_; name=nothing) + local desc + tf.with_op_name(name, "BatchSelfAdjointEig") do + desc = tf.NodeDescription("BatchSelfAdjointEig") + begin + begin + input_ = convert(Tensor{Any}, input_) + begin + end + end + begin + (input_,) = tf.tf_promote(input_) + end + end + begin + begin + tf.add_input(desc, input_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function batch_self_adjoint_eig_eager(input_; name=nothing) - desc = tf.EagerOp("BatchSelfAdjointEig") - input_ = convert(tf.EagerTensor, input_) - tf.add_input(desc, input_) - desc["T"] = tf.data_type(input_) - res = tf.execute(desc) - node = tf.TapeNode(batch_self_adjoint_eig, [input_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function batch_self_adjoint_eig_eager(input_; name=nothing) + desc = tf.EagerOp("BatchSelfAdjointEig") + input_ = convert(tf.EagerTensor, input_) + begin + begin + tf.add_input(desc, input_) + end + end + begin + end + begin + desc["T"] = tf.data_type(input_) + end + res = tf.execute(desc) + node = tf.TapeNode(batch_self_adjoint_eig, [input_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function batch_self_adjoint_eig(input_; name=nothing) - if tf.in_eager_mode() - batch_self_adjoint_eig_eager(input_; name=name) - else - batch_self_adjoint_eig_graph(input_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function batch_self_adjoint_eig(input_; name=nothing) + if tf.in_eager_mode() + batch_self_adjoint_eig_eager(input_; name=name) + else + batch_self_adjoint_eig_graph(input_; name=name) + end end - end + end end @@ -58526,43 +107039,87 @@ end """ begin - function sparse_softmax_graph(sp_indices_, sp_values_, sp_shape_; name=nothing) - local desc - tf.with_op_name(name, "SparseSoftmax") do - desc = tf.NodeDescription("SparseSoftmax") - sp_indices_ = convert(Tensor{Int64}, sp_indices_) - sp_values_ = convert(Tensor{Any}, sp_values_) - sp_shape_ = convert(Tensor{Int64}, sp_shape_) - (sp_values_,) = tf.tf_promote(sp_values_) - tf.add_input(desc, sp_indices_) - tf.add_input(desc, sp_values_) - tf.add_input(desc, sp_shape_) - end - tf.Tensor(tf.Operation(desc)) - end - function sparse_softmax_eager(sp_indices_, sp_values_, sp_shape_; name=nothing) - desc = tf.EagerOp("SparseSoftmax") - sp_indices_ = convert(tf.EagerTensor, sp_indices_) - sp_values_ = convert(tf.EagerTensor, sp_values_) - sp_shape_ = convert(tf.EagerTensor, sp_shape_) - tf.add_input(desc, sp_indices_) - tf.add_input(desc, sp_values_) - tf.add_input(desc, sp_shape_) - desc["T"] = tf.data_type(sp_values_) - res = tf.execute(desc) - node = tf.TapeNode(sparse_softmax, [sp_indices_, sp_values_, sp_shape_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_softmax(sp_indices_, sp_values_, sp_shape_; name=nothing) - if tf.in_eager_mode() - sparse_softmax_eager(sp_indices_, sp_values_, sp_shape_; name=name) - else - sparse_softmax_graph(sp_indices_, sp_values_, sp_shape_; name=name) + begin + function sparse_softmax_graph(sp_indices_, sp_values_, sp_shape_; name=nothing) + local desc + tf.with_op_name(name, "SparseSoftmax") do + desc = tf.NodeDescription("SparseSoftmax") + begin + begin + sp_indices_ = convert(Tensor{Int64}, sp_indices_) + begin + end + end + begin + sp_values_ = convert(Tensor{Any}, sp_values_) + begin + end + end + begin + sp_shape_ = convert(Tensor{Int64}, sp_shape_) + begin + end + end + begin + (sp_values_,) = tf.tf_promote(sp_values_) + end + end + begin + begin + tf.add_input(desc, sp_indices_) + end + begin + tf.add_input(desc, sp_values_) + end + begin + tf.add_input(desc, sp_shape_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function sparse_softmax_eager(sp_indices_, sp_values_, sp_shape_; name=nothing) + desc = tf.EagerOp("SparseSoftmax") + sp_indices_ = convert(tf.EagerTensor, sp_indices_) + sp_values_ = convert(tf.EagerTensor, sp_values_) + sp_shape_ = convert(tf.EagerTensor, sp_shape_) + begin + begin + tf.add_input(desc, sp_indices_) + end + begin + tf.add_input(desc, sp_values_) + end + begin + tf.add_input(desc, sp_shape_) + end + end + begin + end + begin + desc["T"] = tf.data_type(sp_values_) + end + res = tf.execute(desc) + node = tf.TapeNode(sparse_softmax, [sp_indices_, sp_values_, sp_shape_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_softmax(sp_indices_, sp_values_, sp_shape_; name=nothing) + if tf.in_eager_mode() + sparse_softmax_eager(sp_indices_, sp_values_, sp_shape_; name=name) + else + sparse_softmax_graph(sp_indices_, sp_values_, sp_shape_; name=name) + end end - end + end end @@ -58572,35 +107129,63 @@ end """ begin - function asinh_graph(x_; name=nothing) - local desc - tf.with_op_name(name, "Asinh") do - desc = tf.NodeDescription("Asinh") - x_ = convert(Tensor{Any}, x_) - (x_,) = tf.tf_promote(x_) - tf.add_input(desc, x_) + begin + function asinh_graph(x_; name=nothing) + local desc + tf.with_op_name(name, "Asinh") do + desc = tf.NodeDescription("Asinh") + begin + begin + x_ = convert(Tensor{Any}, x_) + begin + end + end + begin + (x_,) = tf.tf_promote(x_) + end + end + begin + begin + tf.add_input(desc, x_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function asinh_eager(x_; name=nothing) - desc = tf.EagerOp("Asinh") - x_ = convert(tf.EagerTensor, x_) - tf.add_input(desc, x_) - desc["T"] = tf.data_type(x_) - res = tf.execute(desc) - node = tf.TapeNode(asinh, [x_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function asinh_eager(x_; name=nothing) + desc = tf.EagerOp("Asinh") + x_ = convert(tf.EagerTensor, x_) + begin + begin + tf.add_input(desc, x_) + end + end + begin + end + begin + desc["T"] = tf.data_type(x_) + end + res = tf.execute(desc) + node = tf.TapeNode(asinh, [x_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function asinh(x_; name=nothing) - if tf.in_eager_mode() - asinh_eager(x_; name=name) - else - asinh_graph(x_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function asinh(x_; name=nothing) + if tf.in_eager_mode() + asinh_eager(x_; name=name) + else + asinh_graph(x_; name=name) + end end - end + end end @@ -58610,41 +107195,73 @@ end """ begin - function matrix_inverse_graph(input_; name=nothing, adjoint=nothing) - local desc - tf.with_op_name(name, "MatrixInverse") do - desc = tf.NodeDescription("MatrixInverse") - input_ = convert(Tensor{Any}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - if adjoint !== nothing - desc["adjoint"] = Base.Bool(adjoint) + begin + function matrix_inverse_graph(input_; name=nothing, adjoint=nothing) + local desc + tf.with_op_name(name, "MatrixInverse") do + desc = tf.NodeDescription("MatrixInverse") + begin + begin + input_ = convert(Tensor{Any}, input_) + begin + end + end + begin + (input_,) = tf.tf_promote(input_) + end + end + begin + begin + tf.add_input(desc, input_) + end + end + begin + begin + if adjoint !== nothing + desc["adjoint"] = Base.Bool(adjoint) + end + end + end end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function matrix_inverse_eager(input_; name=nothing, adjoint=nothing) - desc = tf.EagerOp("MatrixInverse") - input_ = convert(tf.EagerTensor, input_) - tf.add_input(desc, input_) - if adjoint !== nothing - desc["adjoint"] = Base.Bool(adjoint) - end - desc["T"] = tf.data_type(input_) - res = tf.execute(desc) - node = tf.TapeNode(matrix_inverse, [input_], name=nothing, adjoint=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function matrix_inverse_eager(input_; name=nothing, adjoint=nothing) + desc = tf.EagerOp("MatrixInverse") + input_ = convert(tf.EagerTensor, input_) + begin + begin + tf.add_input(desc, input_) + end + end + begin + begin + if adjoint !== nothing + desc["adjoint"] = Base.Bool(adjoint) + end + end + end + begin + desc["T"] = tf.data_type(input_) + end + res = tf.execute(desc) + node = tf.TapeNode(matrix_inverse, [input_], name=nothing, adjoint=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function matrix_inverse(input_; name=nothing, adjoint=nothing) - if tf.in_eager_mode() - matrix_inverse_eager(input_; name=name, adjoint=adjoint) - else - matrix_inverse_graph(input_; name=name, adjoint=adjoint) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function matrix_inverse(input_; name=nothing, adjoint=nothing) + if tf.in_eager_mode() + matrix_inverse_eager(input_; name=name, adjoint=adjoint) + else + matrix_inverse_graph(input_; name=name, adjoint=adjoint) + end end - end + end end @@ -58654,43 +107271,79 @@ end """ begin - function tensor_list_concat_lists_graph(input_a_, input_b_; name=nothing, element_dtype=nothing) - local desc - tf.with_op_name(name, "TensorListConcatLists") do - desc = tf.NodeDescription("TensorListConcatLists") - input_a_ = convert(Tensor{Any}, input_a_) - input_b_ = convert(Tensor{Any}, input_b_) - tf.add_input(desc, input_a_) - tf.add_input(desc, input_b_) - if element_dtype !== nothing - desc["element_dtype"] = Base.identity(element_dtype) + begin + function tensor_list_concat_lists_graph(input_a_, input_b_; name=nothing, element_dtype=nothing) + local desc + tf.with_op_name(name, "TensorListConcatLists") do + desc = tf.NodeDescription("TensorListConcatLists") + begin + begin + input_a_ = convert(Tensor{Any}, input_a_) + begin + end + end + begin + input_b_ = convert(Tensor{Any}, input_b_) + begin + end + end + end + begin + begin + tf.add_input(desc, input_a_) + end + begin + tf.add_input(desc, input_b_) + end + end + begin + begin + if element_dtype !== nothing + desc["element_dtype"] = Base.identity(element_dtype) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function tensor_list_concat_lists_eager(input_a_, input_b_; name=nothing, element_dtype=nothing) + desc = tf.EagerOp("TensorListConcatLists") + input_a_ = convert(tf.EagerTensor, input_a_) + input_b_ = convert(tf.EagerTensor, input_b_) + begin + begin + tf.add_input(desc, input_a_) + end + begin + tf.add_input(desc, input_b_) + end + end + begin + begin + if element_dtype !== nothing + desc["element_dtype"] = Base.identity(element_dtype) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(tensor_list_concat_lists, [input_a_, input_b_], name=nothing, element_dtype=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_list_concat_lists(input_a_, input_b_; name=nothing, element_dtype=nothing) + if tf.in_eager_mode() + tensor_list_concat_lists_eager(input_a_, input_b_; name=name, element_dtype=element_dtype) + else + tensor_list_concat_lists_graph(input_a_, input_b_; name=name, element_dtype=element_dtype) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function tensor_list_concat_lists_eager(input_a_, input_b_; name=nothing, element_dtype=nothing) - desc = tf.EagerOp("TensorListConcatLists") - input_a_ = convert(tf.EagerTensor, input_a_) - input_b_ = convert(tf.EagerTensor, input_b_) - tf.add_input(desc, input_a_) - tf.add_input(desc, input_b_) - if element_dtype !== nothing - desc["element_dtype"] = Base.identity(element_dtype) - end - res = tf.execute(desc) - node = tf.TapeNode(tensor_list_concat_lists, [input_a_, input_b_], name=nothing, element_dtype=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function tensor_list_concat_lists(input_a_, input_b_; name=nothing, element_dtype=nothing) - if tf.in_eager_mode() - tensor_list_concat_lists_eager(input_a_, input_b_; name=name, element_dtype=element_dtype) - else - tensor_list_concat_lists_graph(input_a_, input_b_; name=name, element_dtype=element_dtype) - end - end end @@ -58700,62 +107353,128 @@ end """ begin - function requantize_graph(input_, input_min_, input_max_, requested_output_min_, requested_output_max_; name=nothing, out_type=nothing) - local desc - tf.with_op_name(name, "Requantize") do - desc = tf.NodeDescription("Requantize") - input_ = convert(Tensor{Any}, input_) - input_min_ = convert(Tensor{Float32}, input_min_) - input_max_ = convert(Tensor{Float32}, input_max_) - requested_output_min_ = convert(Tensor{Float32}, requested_output_min_) - requested_output_max_ = convert(Tensor{Float32}, requested_output_max_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - tf.add_input(desc, input_min_) - tf.add_input(desc, input_max_) - tf.add_input(desc, requested_output_min_) - tf.add_input(desc, requested_output_max_) - if out_type !== nothing - desc["out_type"] = Base.identity(out_type) + begin + function requantize_graph(input_, input_min_, input_max_, requested_output_min_, requested_output_max_; name=nothing, out_type=nothing) + local desc + tf.with_op_name(name, "Requantize") do + desc = tf.NodeDescription("Requantize") + begin + begin + input_ = convert(Tensor{Any}, input_) + begin + end + end + begin + input_min_ = convert(Tensor{Float32}, input_min_) + begin + end + end + begin + input_max_ = convert(Tensor{Float32}, input_max_) + begin + end + end + begin + requested_output_min_ = convert(Tensor{Float32}, requested_output_min_) + begin + end + end + begin + requested_output_max_ = convert(Tensor{Float32}, requested_output_max_) + begin + end + end + begin + (input_,) = tf.tf_promote(input_) + end + end + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, input_min_) + end + begin + tf.add_input(desc, input_max_) + end + begin + tf.add_input(desc, requested_output_min_) + end + begin + tf.add_input(desc, requested_output_max_) + end + end + begin + begin + if out_type !== nothing + desc["out_type"] = Base.identity(out_type) + end + end + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + end + end + begin + function requantize_eager(input_, input_min_, input_max_, requested_output_min_, requested_output_max_; name=nothing, out_type=nothing) + desc = tf.EagerOp("Requantize") + input_ = convert(tf.EagerTensor, input_) + input_min_ = convert(tf.EagerTensor, input_min_) + input_max_ = convert(tf.EagerTensor, input_max_) + requested_output_min_ = convert(tf.EagerTensor, requested_output_min_) + requested_output_max_ = convert(tf.EagerTensor, requested_output_max_) + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, input_min_) + end + begin + tf.add_input(desc, input_max_) + end + begin + tf.add_input(desc, requested_output_min_) + end + begin + tf.add_input(desc, requested_output_max_) + end + end + begin + begin + if out_type !== nothing + desc["out_type"] = Base.identity(out_type) + end + end + end + begin + desc["Tinput"] = tf.data_type(input_) + end + res = tf.execute(desc) + node = tf.TapeNode(requantize, [input_, input_min_, input_max_, requested_output_min_, requested_output_max_], name=nothing, out_type=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function requantize(input_, input_min_, input_max_, requested_output_min_, requested_output_max_; name=nothing, out_type=nothing) + if tf.in_eager_mode() + requantize_eager(input_, input_min_, input_max_, requested_output_min_, requested_output_max_; name=name, out_type=out_type) + else + requantize_graph(input_, input_min_, input_max_, requested_output_min_, requested_output_max_; name=name, out_type=out_type) + end end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:3 - push!(out, tf.Tensor(op, out_idx)) - end - out - end - function requantize_eager(input_, input_min_, input_max_, requested_output_min_, requested_output_max_; name=nothing, out_type=nothing) - desc = tf.EagerOp("Requantize") - input_ = convert(tf.EagerTensor, input_) - input_min_ = convert(tf.EagerTensor, input_min_) - input_max_ = convert(tf.EagerTensor, input_max_) - requested_output_min_ = convert(tf.EagerTensor, requested_output_min_) - requested_output_max_ = convert(tf.EagerTensor, requested_output_max_) - tf.add_input(desc, input_) - tf.add_input(desc, input_min_) - tf.add_input(desc, input_max_) - tf.add_input(desc, requested_output_min_) - tf.add_input(desc, requested_output_max_) - if out_type !== nothing - desc["out_type"] = Base.identity(out_type) - end - desc["Tinput"] = tf.data_type(input_) - res = tf.execute(desc) - node = tf.TapeNode(requantize, [input_, input_min_, input_max_, requested_output_min_, requested_output_max_], name=nothing, out_type=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function requantize(input_, input_min_, input_max_, requested_output_min_, requested_output_max_; name=nothing, out_type=nothing) - if tf.in_eager_mode() - requantize_eager(input_, input_min_, input_max_, requested_output_min_, requested_output_max_; name=name, out_type=out_type) - else - requantize_graph(input_, input_min_, input_max_, requested_output_min_, requested_output_max_; name=name, out_type=out_type) - end - end end @@ -58765,35 +107484,63 @@ end """ begin - function fft_graph(input_; name=nothing) - local desc - tf.with_op_name(name, "FFT") do - desc = tf.NodeDescription("FFT") - input_ = convert(Tensor{Complex{Float32}}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) + begin + function fft_graph(input_; name=nothing) + local desc + tf.with_op_name(name, "FFT") do + desc = tf.NodeDescription("FFT") + begin + begin + input_ = convert(Tensor{Complex{Float32}}, input_) + begin + end + end + begin + (input_,) = tf.tf_promote(input_) + end + end + begin + begin + tf.add_input(desc, input_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function fft_eager(input_; name=nothing) - desc = tf.EagerOp("FFT") - input_ = convert(tf.EagerTensor, input_) - tf.add_input(desc, input_) - desc["Tcomplex"] = tf.data_type(input_) - res = tf.execute(desc) - node = tf.TapeNode(fft, [input_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function fft_eager(input_; name=nothing) + desc = tf.EagerOp("FFT") + input_ = convert(tf.EagerTensor, input_) + begin + begin + tf.add_input(desc, input_) + end + end + begin + end + begin + desc["Tcomplex"] = tf.data_type(input_) + end + res = tf.execute(desc) + node = tf.TapeNode(fft, [input_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function fft(input_; name=nothing) - if tf.in_eager_mode() - fft_eager(input_; name=name) - else - fft_graph(input_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function fft(input_; name=nothing) + if tf.in_eager_mode() + fft_eager(input_; name=name) + else + fft_graph(input_; name=name) + end end - end + end end @@ -58803,41 +107550,81 @@ end """ begin - function conjugate_transpose_graph(x_, perm_; name=nothing) - local desc - tf.with_op_name(name, "ConjugateTranspose") do - desc = tf.NodeDescription("ConjugateTranspose") - x_ = convert(Tensor{Any}, x_) - perm_ = convert(Tensor{Int32}, perm_) - (perm_,) = tf.tf_promote(perm_) - (x_,) = tf.tf_promote(x_) - tf.add_input(desc, x_) - tf.add_input(desc, perm_) + begin + function conjugate_transpose_graph(x_, perm_; name=nothing) + local desc + tf.with_op_name(name, "ConjugateTranspose") do + desc = tf.NodeDescription("ConjugateTranspose") + begin + begin + x_ = convert(Tensor{Any}, x_) + begin + end + end + begin + perm_ = convert(Tensor{Int32}, perm_) + begin + end + end + begin + (perm_,) = tf.tf_promote(perm_) + end + begin + (x_,) = tf.tf_promote(x_) + end + end + begin + begin + tf.add_input(desc, x_) + end + begin + tf.add_input(desc, perm_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function conjugate_transpose_eager(x_, perm_; name=nothing) - desc = tf.EagerOp("ConjugateTranspose") - x_ = convert(tf.EagerTensor, x_) - perm_ = convert(tf.EagerTensor, perm_) - tf.add_input(desc, x_) - tf.add_input(desc, perm_) - desc["T"] = tf.data_type(x_) - desc["Tperm"] = tf.data_type(perm_) - res = tf.execute(desc) - node = tf.TapeNode(conjugate_transpose, [x_, perm_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function conjugate_transpose_eager(x_, perm_; name=nothing) + desc = tf.EagerOp("ConjugateTranspose") + x_ = convert(tf.EagerTensor, x_) + perm_ = convert(tf.EagerTensor, perm_) + begin + begin + tf.add_input(desc, x_) + end + begin + tf.add_input(desc, perm_) + end + end + begin + end + begin + desc["T"] = tf.data_type(x_) + end + begin + desc["Tperm"] = tf.data_type(perm_) + end + res = tf.execute(desc) + node = tf.TapeNode(conjugate_transpose, [x_, perm_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function conjugate_transpose(x_, perm_; name=nothing) - if tf.in_eager_mode() - conjugate_transpose_eager(x_, perm_; name=name) - else - conjugate_transpose_graph(x_, perm_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function conjugate_transpose(x_, perm_; name=nothing) + if tf.in_eager_mode() + conjugate_transpose_eager(x_, perm_; name=name) + else + conjugate_transpose_graph(x_, perm_; name=name) + end end - end + end end @@ -58847,59 +107634,95 @@ end """ begin - function unstage_graph(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) - local desc - tf.with_op_name(name, "Unstage") do - desc = tf.NodeDescription("Unstage") - if capacity !== nothing - desc["capacity"] = Base.Int(capacity) - end - if memory_limit !== nothing - desc["memory_limit"] = Base.Int(memory_limit) - end - if dtypes !== nothing - desc["dtypes"] = map(Base.identity, dtypes) + begin + function unstage_graph(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + local desc + tf.with_op_name(name, "Unstage") do + desc = tf.NodeDescription("Unstage") + begin + end + begin + end + begin + begin + if capacity !== nothing + desc["capacity"] = Base.Int(capacity) + end + end + begin + if memory_limit !== nothing + desc["memory_limit"] = Base.Int(memory_limit) + end + end + begin + if dtypes !== nothing + desc["dtypes"] = map(Base.identity, dtypes) + end + end + begin + if container !== nothing + desc["container"] = Base.String(container) + end + end + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function unstage_eager(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + desc = tf.EagerOp("Unstage") + begin + end + begin + begin + if capacity !== nothing + desc["capacity"] = Base.Int(capacity) + end + end + begin + if memory_limit !== nothing + desc["memory_limit"] = Base.Int(memory_limit) + end + end + begin + if dtypes !== nothing + desc["dtypes"] = map(Base.identity, dtypes) + end + end + begin + if container !== nothing + desc["container"] = Base.String(container) + end + end + begin + if shared_name !== nothing + desc["shared_name"] = Base.String(shared_name) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(unstage, [], name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function unstage(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) + if tf.in_eager_mode() + unstage_eager(; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) + else + unstage_graph(; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) + end end - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - end - tf.Tensor(tf.Operation(desc)) - end - function unstage_eager(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) - desc = tf.EagerOp("Unstage") - if capacity !== nothing - desc["capacity"] = Base.Int(capacity) - end - if memory_limit !== nothing - desc["memory_limit"] = Base.Int(memory_limit) - end - if dtypes !== nothing - desc["dtypes"] = map(Base.identity, dtypes) - end - if container !== nothing - desc["container"] = Base.String(container) - end - if shared_name !== nothing - desc["shared_name"] = Base.String(shared_name) - end - res = tf.execute(desc) - node = tf.TapeNode(unstage, [], name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function unstage(; name=nothing, capacity=nothing, memory_limit=nothing, dtypes=nothing, container=nothing, shared_name=nothing) - if tf.in_eager_mode() - unstage_eager(; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) - else - unstage_graph(; name=name, capacity=capacity, memory_limit=memory_limit, dtypes=dtypes, container=container, shared_name=shared_name) - end - end end @@ -58909,40 +107732,78 @@ end """ begin - function relu6grad_graph(gradients_, features_; name=nothing) - local desc - tf.with_op_name(name, "Relu6Grad") do - desc = tf.NodeDescription("Relu6Grad") - gradients_ = convert(Tensor{Any}, gradients_) - features_ = convert(Tensor{Any}, features_) - (gradients_, features_) = tf.tf_promote(gradients_, features_) - tf.add_input(desc, gradients_) - tf.add_input(desc, features_) + begin + function relu6grad_graph(gradients_, features_; name=nothing) + local desc + tf.with_op_name(name, "Relu6Grad") do + desc = tf.NodeDescription("Relu6Grad") + begin + begin + gradients_ = convert(Tensor{Any}, gradients_) + begin + end + end + begin + features_ = convert(Tensor{Any}, features_) + begin + end + end + begin + (gradients_, features_) = tf.tf_promote(gradients_, features_) + end + end + begin + begin + tf.add_input(desc, gradients_) + end + begin + tf.add_input(desc, features_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function relu6grad_eager(gradients_, features_; name=nothing) - desc = tf.EagerOp("Relu6Grad") - gradients_ = convert(tf.EagerTensor, gradients_) - features_ = convert(tf.EagerTensor, features_) - tf.add_input(desc, gradients_) - tf.add_input(desc, features_) - desc["T"] = tf.data_type(gradients_) - desc["T"] = tf.data_type(features_) - res = tf.execute(desc) - node = tf.TapeNode(relu6grad, [gradients_, features_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function relu6grad_eager(gradients_, features_; name=nothing) + desc = tf.EagerOp("Relu6Grad") + gradients_ = convert(tf.EagerTensor, gradients_) + features_ = convert(tf.EagerTensor, features_) + begin + begin + tf.add_input(desc, gradients_) + end + begin + tf.add_input(desc, features_) + end + end + begin + end + begin + desc["T"] = tf.data_type(gradients_) + end + begin + desc["T"] = tf.data_type(features_) + end + res = tf.execute(desc) + node = tf.TapeNode(relu6grad, [gradients_, features_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function relu6grad(gradients_, features_; name=nothing) - if tf.in_eager_mode() - relu6grad_eager(gradients_, features_; name=name) - else - relu6grad_graph(gradients_, features_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function relu6grad(gradients_, features_; name=nothing) + if tf.in_eager_mode() + relu6grad_eager(gradients_, features_; name=name) + else + relu6grad_graph(gradients_, features_; name=name) + end end - end + end end @@ -58952,47 +107813,83 @@ end Converts an array of tensors to a list of tensors. """ begin - function _array_to_list_graph(input_; name=nothing, N=nothing, out_types=nothing) - local desc - tf.with_op_name(name, "_ArrayToList") do - desc = tf.NodeDescription("_ArrayToList") - input_ = [convert(Tensor{Any}, x) for x = input_] - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - if N !== nothing - desc["N"] = Base.Int(N) + begin + function _array_to_list_graph(input_; name=nothing, N=nothing, out_types=nothing) + local desc + tf.with_op_name(name, "_ArrayToList") do + desc = tf.NodeDescription("_ArrayToList") + begin + begin + input_ = [convert(Tensor{Any}, x) for x = input_] + begin + end + end + begin + (input_,) = tf.tf_promote(input_) + end + end + begin + begin + tf.add_input(desc, input_) + end + end + begin + begin + if N !== nothing + desc["N"] = Base.Int(N) + end + end + begin + if out_types !== nothing + desc["out_types"] = map(Base.identity, out_types) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function _array_to_list_eager(input_; name=nothing, N=nothing, out_types=nothing) + desc = tf.EagerOp("_ArrayToList") + input_ = convert(tf.EagerTensor, input_) + begin + begin + tf.add_input(desc, input_) + end + end + begin + begin + if N !== nothing + desc["N"] = Base.Int(N) + end + end + begin + if out_types !== nothing + desc["out_types"] = map(Base.identity, out_types) + end + end + end + begin + desc["T"] = tf.data_type(input_) + end + res = tf.execute(desc) + node = tf.TapeNode(_array_to_list, [input_], name=nothing, N=nothing, out_types=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _array_to_list(input_; name=nothing, N=nothing, out_types=nothing) + if tf.in_eager_mode() + _array_to_list_eager(input_; name=name, N=N, out_types=out_types) + else + _array_to_list_graph(input_; name=name, N=N, out_types=out_types) + end end - if out_types !== nothing - desc["out_types"] = map(Base.identity, out_types) - end - end - tf.Tensor(tf.Operation(desc)) - end - function _array_to_list_eager(input_; name=nothing, N=nothing, out_types=nothing) - desc = tf.EagerOp("_ArrayToList") - input_ = convert(tf.EagerTensor, input_) - tf.add_input(desc, input_) - if N !== nothing - desc["N"] = Base.Int(N) - end - if out_types !== nothing - desc["out_types"] = map(Base.identity, out_types) - end - desc["T"] = tf.data_type(input_) - res = tf.execute(desc) - node = tf.TapeNode(_array_to_list, [input_], name=nothing, N=nothing, out_types=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function _array_to_list(input_; name=nothing, N=nothing, out_types=nothing) - if tf.in_eager_mode() - _array_to_list_eager(input_; name=name, N=N, out_types=out_types) - else - _array_to_list_graph(input_; name=name, N=N, out_types=out_types) - end - end end @@ -59002,42 +107899,82 @@ end """ begin - function expand_dims_graph(input_, dim_; name=nothing) - local desc - tf.with_op_name(name, "ExpandDims") do - desc = tf.NodeDescription("ExpandDims") - input_ = convert(Tensor{Any}, input_) - dim_ = convert(Tensor{Int32}, dim_) - dim_ = dim_ - convert(tf.Tensor{eltype(dim_)}, 1) - (input_,) = tf.tf_promote(input_) - (dim_,) = tf.tf_promote(dim_) - tf.add_input(desc, input_) - tf.add_input(desc, dim_) - end - tf.Tensor(tf.Operation(desc)) - end - function expand_dims_eager(input_, dim_; name=nothing) - desc = tf.EagerOp("ExpandDims") - input_ = convert(tf.EagerTensor, input_) - dim_ = convert(tf.EagerTensor, dim_) - tf.add_input(desc, input_) - tf.add_input(desc, dim_) - desc["T"] = tf.data_type(input_) - desc["Tdim"] = tf.data_type(dim_) - res = tf.execute(desc) - node = tf.TapeNode(expand_dims, [input_, dim_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function expand_dims(input_, dim_; name=nothing) - if tf.in_eager_mode() - expand_dims_eager(input_, dim_; name=name) - else - expand_dims_graph(input_, dim_; name=name) + begin + function expand_dims_graph(input_, dim_; name=nothing) + local desc + tf.with_op_name(name, "ExpandDims") do + desc = tf.NodeDescription("ExpandDims") + begin + begin + input_ = convert(Tensor{Any}, input_) + begin + end + end + begin + dim_ = convert(Tensor{Int32}, dim_) + begin + dim_ = dim_ - convert(tf.Tensor{eltype(dim_)}, 1) + end + end + begin + (input_,) = tf.tf_promote(input_) + end + begin + (dim_,) = tf.tf_promote(dim_) + end + end + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, dim_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function expand_dims_eager(input_, dim_; name=nothing) + desc = tf.EagerOp("ExpandDims") + input_ = convert(tf.EagerTensor, input_) + dim_ = convert(tf.EagerTensor, dim_) + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, dim_) + end + end + begin + end + begin + desc["T"] = tf.data_type(input_) + end + begin + desc["Tdim"] = tf.data_type(dim_) + end + res = tf.execute(desc) + node = tf.TapeNode(expand_dims, [input_, dim_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function expand_dims(input_, dim_; name=nothing) + if tf.in_eager_mode() + expand_dims_eager(input_, dim_; name=name) + else + expand_dims_graph(input_, dim_; name=name) + end end - end + end end @@ -59047,40 +107984,78 @@ end """ begin - function inv_grad_graph(y_, dy_; name=nothing) - local desc - tf.with_op_name(name, "InvGrad") do - desc = tf.NodeDescription("InvGrad") - y_ = convert(Tensor{Any}, y_) - dy_ = convert(Tensor{Any}, dy_) - (y_, dy_) = tf.tf_promote(y_, dy_) - tf.add_input(desc, y_) - tf.add_input(desc, dy_) + begin + function inv_grad_graph(y_, dy_; name=nothing) + local desc + tf.with_op_name(name, "InvGrad") do + desc = tf.NodeDescription("InvGrad") + begin + begin + y_ = convert(Tensor{Any}, y_) + begin + end + end + begin + dy_ = convert(Tensor{Any}, dy_) + begin + end + end + begin + (y_, dy_) = tf.tf_promote(y_, dy_) + end + end + begin + begin + tf.add_input(desc, y_) + end + begin + tf.add_input(desc, dy_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function inv_grad_eager(y_, dy_; name=nothing) - desc = tf.EagerOp("InvGrad") - y_ = convert(tf.EagerTensor, y_) - dy_ = convert(tf.EagerTensor, dy_) - tf.add_input(desc, y_) - tf.add_input(desc, dy_) - desc["T"] = tf.data_type(y_) - desc["T"] = tf.data_type(dy_) - res = tf.execute(desc) - node = tf.TapeNode(inv_grad, [y_, dy_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function inv_grad_eager(y_, dy_; name=nothing) + desc = tf.EagerOp("InvGrad") + y_ = convert(tf.EagerTensor, y_) + dy_ = convert(tf.EagerTensor, dy_) + begin + begin + tf.add_input(desc, y_) + end + begin + tf.add_input(desc, dy_) + end + end + begin + end + begin + desc["T"] = tf.data_type(y_) + end + begin + desc["T"] = tf.data_type(dy_) + end + res = tf.execute(desc) + node = tf.TapeNode(inv_grad, [y_, dy_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function inv_grad(y_, dy_; name=nothing) - if tf.in_eager_mode() - inv_grad_eager(y_, dy_; name=name) - else - inv_grad_graph(y_, dy_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function inv_grad(y_, dy_; name=nothing) + if tf.in_eager_mode() + inv_grad_eager(y_, dy_; name=name) + else + inv_grad_graph(y_, dy_; name=name) + end end - end + end end @@ -59090,47 +108065,91 @@ end """ begin - function non_max_suppression_graph(boxes_, scores_, max_output_size_; name=nothing, iou_threshold=nothing) - local desc - tf.with_op_name(name, "NonMaxSuppression") do - desc = tf.NodeDescription("NonMaxSuppression") - boxes_ = convert(Tensor{Float32}, boxes_) - scores_ = convert(Tensor{Float32}, scores_) - max_output_size_ = convert(Tensor{Int32}, max_output_size_) - tf.add_input(desc, boxes_) - tf.add_input(desc, scores_) - tf.add_input(desc, max_output_size_) - if iou_threshold !== nothing - desc["iou_threshold"] = Base.identity(iou_threshold) + begin + function non_max_suppression_graph(boxes_, scores_, max_output_size_; name=nothing, iou_threshold=nothing) + local desc + tf.with_op_name(name, "NonMaxSuppression") do + desc = tf.NodeDescription("NonMaxSuppression") + begin + begin + boxes_ = convert(Tensor{Float32}, boxes_) + begin + end + end + begin + scores_ = convert(Tensor{Float32}, scores_) + begin + end + end + begin + max_output_size_ = convert(Tensor{Int32}, max_output_size_) + begin + end + end + end + begin + begin + tf.add_input(desc, boxes_) + end + begin + tf.add_input(desc, scores_) + end + begin + tf.add_input(desc, max_output_size_) + end + end + begin + begin + if iou_threshold !== nothing + desc["iou_threshold"] = Base.identity(iou_threshold) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function non_max_suppression_eager(boxes_, scores_, max_output_size_; name=nothing, iou_threshold=nothing) + desc = tf.EagerOp("NonMaxSuppression") + boxes_ = convert(tf.EagerTensor, boxes_) + scores_ = convert(tf.EagerTensor, scores_) + max_output_size_ = convert(tf.EagerTensor, max_output_size_) + begin + begin + tf.add_input(desc, boxes_) + end + begin + tf.add_input(desc, scores_) + end + begin + tf.add_input(desc, max_output_size_) + end + end + begin + begin + if iou_threshold !== nothing + desc["iou_threshold"] = Base.identity(iou_threshold) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(non_max_suppression, [boxes_, scores_, max_output_size_], name=nothing, iou_threshold=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function non_max_suppression(boxes_, scores_, max_output_size_; name=nothing, iou_threshold=nothing) + if tf.in_eager_mode() + non_max_suppression_eager(boxes_, scores_, max_output_size_; name=name, iou_threshold=iou_threshold) + else + non_max_suppression_graph(boxes_, scores_, max_output_size_; name=name, iou_threshold=iou_threshold) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function non_max_suppression_eager(boxes_, scores_, max_output_size_; name=nothing, iou_threshold=nothing) - desc = tf.EagerOp("NonMaxSuppression") - boxes_ = convert(tf.EagerTensor, boxes_) - scores_ = convert(tf.EagerTensor, scores_) - max_output_size_ = convert(tf.EagerTensor, max_output_size_) - tf.add_input(desc, boxes_) - tf.add_input(desc, scores_) - tf.add_input(desc, max_output_size_) - if iou_threshold !== nothing - desc["iou_threshold"] = Base.identity(iou_threshold) - end - res = tf.execute(desc) - node = tf.TapeNode(non_max_suppression, [boxes_, scores_, max_output_size_], name=nothing, iou_threshold=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function non_max_suppression(boxes_, scores_, max_output_size_; name=nothing, iou_threshold=nothing) - if tf.in_eager_mode() - non_max_suppression_eager(boxes_, scores_, max_output_size_; name=name, iou_threshold=iou_threshold) - else - non_max_suppression_graph(boxes_, scores_, max_output_size_; name=name, iou_threshold=iou_threshold) - end - end end @@ -59140,35 +108159,63 @@ end """ begin - function l2loss_graph(t_; name=nothing) - local desc - tf.with_op_name(name, "L2Loss") do - desc = tf.NodeDescription("L2Loss") - t_ = convert(Tensor{Any}, t_) - (t_,) = tf.tf_promote(t_) - tf.add_input(desc, t_) + begin + function l2loss_graph(t_; name=nothing) + local desc + tf.with_op_name(name, "L2Loss") do + desc = tf.NodeDescription("L2Loss") + begin + begin + t_ = convert(Tensor{Any}, t_) + begin + end + end + begin + (t_,) = tf.tf_promote(t_) + end + end + begin + begin + tf.add_input(desc, t_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function l2loss_eager(t_; name=nothing) - desc = tf.EagerOp("L2Loss") - t_ = convert(tf.EagerTensor, t_) - tf.add_input(desc, t_) - desc["T"] = tf.data_type(t_) - res = tf.execute(desc) - node = tf.TapeNode(l2loss, [t_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function l2loss_eager(t_; name=nothing) + desc = tf.EagerOp("L2Loss") + t_ = convert(tf.EagerTensor, t_) + begin + begin + tf.add_input(desc, t_) + end + end + begin + end + begin + desc["T"] = tf.data_type(t_) + end + res = tf.execute(desc) + node = tf.TapeNode(l2loss, [t_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function l2loss(t_; name=nothing) - if tf.in_eager_mode() - l2loss_eager(t_; name=name) - else - l2loss_graph(t_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function l2loss(t_; name=nothing) + if tf.in_eager_mode() + l2loss_eager(t_; name=name) + else + l2loss_graph(t_; name=name) + end end - end + end end @@ -59178,45 +108225,85 @@ end """ begin - function resize_area_graph(images_, size_; name=nothing, align_corners=nothing) - local desc - tf.with_op_name(name, "ResizeArea") do - desc = tf.NodeDescription("ResizeArea") - images_ = convert(Tensor{Any}, images_) - size_ = convert(Tensor{Int32}, size_) - (images_,) = tf.tf_promote(images_) - tf.add_input(desc, images_) - tf.add_input(desc, size_) - if align_corners !== nothing - desc["align_corners"] = Base.Bool(align_corners) + begin + function resize_area_graph(images_, size_; name=nothing, align_corners=nothing) + local desc + tf.with_op_name(name, "ResizeArea") do + desc = tf.NodeDescription("ResizeArea") + begin + begin + images_ = convert(Tensor{Any}, images_) + begin + end + end + begin + size_ = convert(Tensor{Int32}, size_) + begin + end + end + begin + (images_,) = tf.tf_promote(images_) + end + end + begin + begin + tf.add_input(desc, images_) + end + begin + tf.add_input(desc, size_) + end + end + begin + begin + if align_corners !== nothing + desc["align_corners"] = Base.Bool(align_corners) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function resize_area_eager(images_, size_; name=nothing, align_corners=nothing) + desc = tf.EagerOp("ResizeArea") + images_ = convert(tf.EagerTensor, images_) + size_ = convert(tf.EagerTensor, size_) + begin + begin + tf.add_input(desc, images_) + end + begin + tf.add_input(desc, size_) + end + end + begin + begin + if align_corners !== nothing + desc["align_corners"] = Base.Bool(align_corners) + end + end + end + begin + desc["T"] = tf.data_type(images_) + end + res = tf.execute(desc) + node = tf.TapeNode(resize_area, [images_, size_], name=nothing, align_corners=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resize_area(images_, size_; name=nothing, align_corners=nothing) + if tf.in_eager_mode() + resize_area_eager(images_, size_; name=name, align_corners=align_corners) + else + resize_area_graph(images_, size_; name=name, align_corners=align_corners) + end end - end - tf.Tensor(tf.Operation(desc)) end - function resize_area_eager(images_, size_; name=nothing, align_corners=nothing) - desc = tf.EagerOp("ResizeArea") - images_ = convert(tf.EagerTensor, images_) - size_ = convert(tf.EagerTensor, size_) - tf.add_input(desc, images_) - tf.add_input(desc, size_) - if align_corners !== nothing - desc["align_corners"] = Base.Bool(align_corners) - end - desc["T"] = tf.data_type(images_) - res = tf.execute(desc) - node = tf.TapeNode(resize_area, [images_, size_], name=nothing, align_corners=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resize_area(images_, size_; name=nothing, align_corners=nothing) - if tf.in_eager_mode() - resize_area_eager(images_, size_; name=name, align_corners=align_corners) - else - resize_area_graph(images_, size_; name=name, align_corners=align_corners) - end - end end @@ -59226,98 +108313,180 @@ end """ begin - function sparse_cross_graph(indices_, values_, shapes_, dense_inputs_; name=nothing, N=nothing, hashed_output=nothing, num_buckets=nothing, hash_key=nothing, sparse_types=nothing, dense_types=nothing, out_type=nothing, internal_type=nothing) - local desc - tf.with_op_name(name, "SparseCross") do - desc = tf.NodeDescription("SparseCross") - indices_ = [convert(Tensor{Int64}, x) for x = indices_] - values_ = [convert(Tensor{Any}, x) for x = values_] - shapes_ = [convert(Tensor{Int64}, x) for x = shapes_] - dense_inputs_ = [convert(Tensor{Any}, x) for x = dense_inputs_] - tf.add_input(desc, indices_) - tf.add_input(desc, values_) - tf.add_input(desc, shapes_) - tf.add_input(desc, dense_inputs_) - if N !== nothing - desc["N"] = Base.Int(N) - end - if hashed_output !== nothing - desc["hashed_output"] = Base.Bool(hashed_output) - end - if num_buckets !== nothing - desc["num_buckets"] = Base.Int(num_buckets) - end - if hash_key !== nothing - desc["hash_key"] = Base.Int(hash_key) + begin + function sparse_cross_graph(indices_, values_, shapes_, dense_inputs_; name=nothing, N=nothing, hashed_output=nothing, num_buckets=nothing, hash_key=nothing, sparse_types=nothing, dense_types=nothing, out_type=nothing, internal_type=nothing) + local desc + tf.with_op_name(name, "SparseCross") do + desc = tf.NodeDescription("SparseCross") + begin + begin + indices_ = [convert(Tensor{Int64}, x) for x = indices_] + begin + end + end + begin + values_ = [convert(Tensor{Any}, x) for x = values_] + begin + end + end + begin + shapes_ = [convert(Tensor{Int64}, x) for x = shapes_] + begin + end + end + begin + dense_inputs_ = [convert(Tensor{Any}, x) for x = dense_inputs_] + begin + end + end + end + begin + begin + tf.add_input(desc, indices_) + end + begin + tf.add_input(desc, values_) + end + begin + tf.add_input(desc, shapes_) + end + begin + tf.add_input(desc, dense_inputs_) + end + end + begin + begin + if N !== nothing + desc["N"] = Base.Int(N) + end + end + begin + if hashed_output !== nothing + desc["hashed_output"] = Base.Bool(hashed_output) + end + end + begin + if num_buckets !== nothing + desc["num_buckets"] = Base.Int(num_buckets) + end + end + begin + if hash_key !== nothing + desc["hash_key"] = Base.Int(hash_key) + end + end + begin + if sparse_types !== nothing + desc["sparse_types"] = map(Base.identity, sparse_types) + end + end + begin + if dense_types !== nothing + desc["dense_types"] = map(Base.identity, dense_types) + end + end + begin + if out_type !== nothing + desc["out_type"] = Base.identity(out_type) + end + end + begin + if internal_type !== nothing + desc["internal_type"] = Base.identity(internal_type) + end + end + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + end + end + begin + function sparse_cross_eager(indices_, values_, shapes_, dense_inputs_; name=nothing, N=nothing, hashed_output=nothing, num_buckets=nothing, hash_key=nothing, sparse_types=nothing, dense_types=nothing, out_type=nothing, internal_type=nothing) + desc = tf.EagerOp("SparseCross") + indices_ = convert(tf.EagerTensor, indices_) + values_ = convert(tf.EagerTensor, values_) + shapes_ = convert(tf.EagerTensor, shapes_) + dense_inputs_ = convert(tf.EagerTensor, dense_inputs_) + begin + begin + tf.add_input(desc, indices_) + end + begin + tf.add_input(desc, values_) + end + begin + tf.add_input(desc, shapes_) + end + begin + tf.add_input(desc, dense_inputs_) + end + end + begin + begin + if N !== nothing + desc["N"] = Base.Int(N) + end + end + begin + if hashed_output !== nothing + desc["hashed_output"] = Base.Bool(hashed_output) + end + end + begin + if num_buckets !== nothing + desc["num_buckets"] = Base.Int(num_buckets) + end + end + begin + if hash_key !== nothing + desc["hash_key"] = Base.Int(hash_key) + end + end + begin + if sparse_types !== nothing + desc["sparse_types"] = map(Base.identity, sparse_types) + end + end + begin + if dense_types !== nothing + desc["dense_types"] = map(Base.identity, dense_types) + end + end + begin + if out_type !== nothing + desc["out_type"] = Base.identity(out_type) + end + end + begin + if internal_type !== nothing + desc["internal_type"] = Base.identity(internal_type) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(sparse_cross, [indices_, values_, shapes_, dense_inputs_], name=nothing, N=nothing, hashed_output=nothing, num_buckets=nothing, hash_key=nothing, sparse_types=nothing, dense_types=nothing, out_type=nothing, internal_type=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_cross(indices_, values_, shapes_, dense_inputs_; name=nothing, N=nothing, hashed_output=nothing, num_buckets=nothing, hash_key=nothing, sparse_types=nothing, dense_types=nothing, out_type=nothing, internal_type=nothing) + if tf.in_eager_mode() + sparse_cross_eager(indices_, values_, shapes_, dense_inputs_; name=name, N=N, hashed_output=hashed_output, num_buckets=num_buckets, hash_key=hash_key, sparse_types=sparse_types, dense_types=dense_types, out_type=out_type, internal_type=internal_type) + else + sparse_cross_graph(indices_, values_, shapes_, dense_inputs_; name=name, N=N, hashed_output=hashed_output, num_buckets=num_buckets, hash_key=hash_key, sparse_types=sparse_types, dense_types=dense_types, out_type=out_type, internal_type=internal_type) + end end - if sparse_types !== nothing - desc["sparse_types"] = map(Base.identity, sparse_types) - end - if dense_types !== nothing - desc["dense_types"] = map(Base.identity, dense_types) - end - if out_type !== nothing - desc["out_type"] = Base.identity(out_type) - end - if internal_type !== nothing - desc["internal_type"] = Base.identity(internal_type) - end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:3 - push!(out, tf.Tensor(op, out_idx)) - end - out - end - function sparse_cross_eager(indices_, values_, shapes_, dense_inputs_; name=nothing, N=nothing, hashed_output=nothing, num_buckets=nothing, hash_key=nothing, sparse_types=nothing, dense_types=nothing, out_type=nothing, internal_type=nothing) - desc = tf.EagerOp("SparseCross") - indices_ = convert(tf.EagerTensor, indices_) - values_ = convert(tf.EagerTensor, values_) - shapes_ = convert(tf.EagerTensor, shapes_) - dense_inputs_ = convert(tf.EagerTensor, dense_inputs_) - tf.add_input(desc, indices_) - tf.add_input(desc, values_) - tf.add_input(desc, shapes_) - tf.add_input(desc, dense_inputs_) - if N !== nothing - desc["N"] = Base.Int(N) - end - if hashed_output !== nothing - desc["hashed_output"] = Base.Bool(hashed_output) - end - if num_buckets !== nothing - desc["num_buckets"] = Base.Int(num_buckets) - end - if hash_key !== nothing - desc["hash_key"] = Base.Int(hash_key) - end - if sparse_types !== nothing - desc["sparse_types"] = map(Base.identity, sparse_types) - end - if dense_types !== nothing - desc["dense_types"] = map(Base.identity, dense_types) - end - if out_type !== nothing - desc["out_type"] = Base.identity(out_type) - end - if internal_type !== nothing - desc["internal_type"] = Base.identity(internal_type) - end - res = tf.execute(desc) - node = tf.TapeNode(sparse_cross, [indices_, values_, shapes_, dense_inputs_], name=nothing, N=nothing, hashed_output=nothing, num_buckets=nothing, hash_key=nothing, sparse_types=nothing, dense_types=nothing, out_type=nothing, internal_type=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_cross(indices_, values_, shapes_, dense_inputs_; name=nothing, N=nothing, hashed_output=nothing, num_buckets=nothing, hash_key=nothing, sparse_types=nothing, dense_types=nothing, out_type=nothing, internal_type=nothing) - if tf.in_eager_mode() - sparse_cross_eager(indices_, values_, shapes_, dense_inputs_; name=name, N=N, hashed_output=hashed_output, num_buckets=num_buckets, hash_key=hash_key, sparse_types=sparse_types, dense_types=dense_types, out_type=out_type, internal_type=internal_type) - else - sparse_cross_graph(indices_, values_, shapes_, dense_inputs_; name=name, N=N, hashed_output=hashed_output, num_buckets=num_buckets, hash_key=hash_key, sparse_types=sparse_types, dense_types=dense_types, out_type=out_type, internal_type=internal_type) - end - end end @@ -59327,33 +108496,57 @@ end """ begin - function batch_fft3d_graph(input_; name=nothing) - local desc - tf.with_op_name(name, "BatchFFT3D") do - desc = tf.NodeDescription("BatchFFT3D") - input_ = convert(Tensor{Complex{Float32}}, input_) - tf.add_input(desc, input_) + begin + function batch_fft3d_graph(input_; name=nothing) + local desc + tf.with_op_name(name, "BatchFFT3D") do + desc = tf.NodeDescription("BatchFFT3D") + begin + begin + input_ = convert(Tensor{Complex{Float32}}, input_) + begin + end + end + end + begin + begin + tf.add_input(desc, input_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function batch_fft3d_eager(input_; name=nothing) - desc = tf.EagerOp("BatchFFT3D") - input_ = convert(tf.EagerTensor, input_) - tf.add_input(desc, input_) - res = tf.execute(desc) - node = tf.TapeNode(batch_fft3d, [input_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function batch_fft3d_eager(input_; name=nothing) + desc = tf.EagerOp("BatchFFT3D") + input_ = convert(tf.EagerTensor, input_) + begin + begin + tf.add_input(desc, input_) + end + end + begin + end + res = tf.execute(desc) + node = tf.TapeNode(batch_fft3d, [input_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function batch_fft3d(input_; name=nothing) - if tf.in_eager_mode() - batch_fft3d_eager(input_; name=name) - else - batch_fft3d_graph(input_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function batch_fft3d(input_; name=nothing) + if tf.in_eager_mode() + batch_fft3d_eager(input_; name=name) + else + batch_fft3d_graph(input_; name=name) + end end - end + end end @@ -59363,53 +108556,93 @@ end """ begin - function random_standard_normal_graph(shape_; name=nothing, seed=nothing, seed2=nothing, dtype=nothing) - local desc - tf.with_op_name(name, "RandomStandardNormal") do - desc = tf.NodeDescription("RandomStandardNormal") - shape_ = convert(Tensor{Any}, shape_) - (shape_,) = tf.tf_promote(shape_) - tf.add_input(desc, shape_) - if seed !== nothing - desc["seed"] = Base.Int(seed) + begin + function random_standard_normal_graph(shape_; name=nothing, seed=nothing, seed2=nothing, dtype=nothing) + local desc + tf.with_op_name(name, "RandomStandardNormal") do + desc = tf.NodeDescription("RandomStandardNormal") + begin + begin + shape_ = convert(Tensor{Any}, shape_) + begin + end + end + begin + (shape_,) = tf.tf_promote(shape_) + end + end + begin + begin + tf.add_input(desc, shape_) + end + end + begin + begin + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + end + begin + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end + end + begin + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function random_standard_normal_eager(shape_; name=nothing, seed=nothing, seed2=nothing, dtype=nothing) + desc = tf.EagerOp("RandomStandardNormal") + shape_ = convert(tf.EagerTensor, shape_) + begin + begin + tf.add_input(desc, shape_) + end + end + begin + begin + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + end + begin + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end + end + begin + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + end + begin + desc["T"] = tf.data_type(shape_) + end + res = tf.execute(desc) + node = tf.TapeNode(random_standard_normal, [shape_], name=nothing, seed=nothing, seed2=nothing, dtype=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function random_standard_normal(shape_; name=nothing, seed=nothing, seed2=nothing, dtype=nothing) + if tf.in_eager_mode() + random_standard_normal_eager(shape_; name=name, seed=seed, seed2=seed2, dtype=dtype) + else + random_standard_normal_graph(shape_; name=name, seed=seed, seed2=seed2, dtype=dtype) + end end - if seed2 !== nothing - desc["seed2"] = Base.Int(seed2) - end - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end - end - tf.Tensor(tf.Operation(desc)) - end - function random_standard_normal_eager(shape_; name=nothing, seed=nothing, seed2=nothing, dtype=nothing) - desc = tf.EagerOp("RandomStandardNormal") - shape_ = convert(tf.EagerTensor, shape_) - tf.add_input(desc, shape_) - if seed !== nothing - desc["seed"] = Base.Int(seed) - end - if seed2 !== nothing - desc["seed2"] = Base.Int(seed2) - end - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end - desc["T"] = tf.data_type(shape_) - res = tf.execute(desc) - node = tf.TapeNode(random_standard_normal, [shape_], name=nothing, seed=nothing, seed2=nothing, dtype=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function random_standard_normal(shape_; name=nothing, seed=nothing, seed2=nothing, dtype=nothing) - if tf.in_eager_mode() - random_standard_normal_eager(shape_; name=name, seed=seed, seed2=seed2, dtype=dtype) - else - random_standard_normal_graph(shape_; name=name, seed=seed, seed2=seed2, dtype=dtype) - end - end end @@ -59419,52 +108652,104 @@ end """ begin - function resource_scatter_mul_graph(resource_, indices_, updates_; name=nothing, dtype=nothing) - local desc - tf.with_op_name(name, "ResourceScatterMul") do - desc = tf.NodeDescription("ResourceScatterMul") - resource_ = convert(Tensor{Any}, resource_) - indices_ = convert(Tensor{Any}, indices_) - indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) - updates_ = convert(Tensor{Any}, updates_) - (updates_,) = tf.tf_promote(updates_) - (indices_,) = tf.tf_promote(indices_) - tf.add_input(desc, resource_) - tf.add_input(desc, indices_) - tf.add_input(desc, updates_) - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) + begin + function resource_scatter_mul_graph(resource_, indices_, updates_; name=nothing, dtype=nothing) + local desc + tf.with_op_name(name, "ResourceScatterMul") do + desc = tf.NodeDescription("ResourceScatterMul") + begin + begin + resource_ = convert(Tensor{Any}, resource_) + begin + end + end + begin + indices_ = convert(Tensor{Any}, indices_) + begin + indices_ = indices_ - convert(tf.Tensor{eltype(indices_)}, 1) + end + end + begin + updates_ = convert(Tensor{Any}, updates_) + begin + end + end + begin + (updates_,) = tf.tf_promote(updates_) + end + begin + (indices_,) = tf.tf_promote(indices_) + end + end + begin + begin + tf.add_input(desc, resource_) + end + begin + tf.add_input(desc, indices_) + end + begin + tf.add_input(desc, updates_) + end + end + begin + begin + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function resource_scatter_mul_eager(resource_, indices_, updates_; name=nothing, dtype=nothing) + desc = tf.EagerOp("ResourceScatterMul") + resource_ = convert(tf.EagerTensor, resource_) + indices_ = convert(tf.EagerTensor, indices_) + updates_ = convert(tf.EagerTensor, updates_) + begin + begin + tf.add_input(desc, resource_) + end + begin + tf.add_input(desc, indices_) + end + begin + tf.add_input(desc, updates_) + end + end + begin + begin + if dtype !== nothing + desc["dtype"] = Base.identity(dtype) + end + end + end + begin + desc["Tindices"] = tf.data_type(indices_) + end + begin + desc["dtype"] = tf.data_type(updates_) + end + res = tf.execute(desc) + node = tf.TapeNode(resource_scatter_mul, [resource_, indices_, updates_], name=nothing, dtype=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_scatter_mul(resource_, indices_, updates_; name=nothing, dtype=nothing) + if tf.in_eager_mode() + resource_scatter_mul_eager(resource_, indices_, updates_; name=name, dtype=dtype) + else + resource_scatter_mul_graph(resource_, indices_, updates_; name=name, dtype=dtype) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function resource_scatter_mul_eager(resource_, indices_, updates_; name=nothing, dtype=nothing) - desc = tf.EagerOp("ResourceScatterMul") - resource_ = convert(tf.EagerTensor, resource_) - indices_ = convert(tf.EagerTensor, indices_) - updates_ = convert(tf.EagerTensor, updates_) - tf.add_input(desc, resource_) - tf.add_input(desc, indices_) - tf.add_input(desc, updates_) - if dtype !== nothing - desc["dtype"] = Base.identity(dtype) - end - desc["Tindices"] = tf.data_type(indices_) - desc["dtype"] = tf.data_type(updates_) - res = tf.execute(desc) - node = tf.TapeNode(resource_scatter_mul, [resource_, indices_, updates_], name=nothing, dtype=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function resource_scatter_mul(resource_, indices_, updates_; name=nothing, dtype=nothing) - if tf.in_eager_mode() - resource_scatter_mul_eager(resource_, indices_, updates_; name=name, dtype=dtype) - else - resource_scatter_mul_graph(resource_, indices_, updates_; name=name, dtype=dtype) - end - end end @@ -59474,128 +108759,262 @@ end """ begin - function sdca_optimizer_graph(sparse_example_indices_, sparse_feature_indices_, sparse_feature_values_, dense_features_, example_weights_, example_labels_, sparse_indices_, sparse_weights_, dense_weights_, example_state_data_; name=nothing, loss_type=nothing, adaptative=nothing, num_sparse_features=nothing, num_sparse_features_with_values=nothing, num_dense_features=nothing, l1=nothing, l2=nothing, num_loss_partitions=nothing, num_inner_iterations=nothing) - local desc - tf.with_op_name(name, "SdcaOptimizer") do - desc = tf.NodeDescription("SdcaOptimizer") - sparse_example_indices_ = [convert(Tensor{Int64}, x) for x = sparse_example_indices_] - sparse_feature_indices_ = [convert(Tensor{Int64}, x) for x = sparse_feature_indices_] - sparse_feature_values_ = [convert(Tensor{Float32}, x) for x = sparse_feature_values_] - dense_features_ = [convert(Tensor{Float32}, x) for x = dense_features_] - example_weights_ = convert(Tensor{Float32}, example_weights_) - example_labels_ = convert(Tensor{Float32}, example_labels_) - sparse_indices_ = [convert(Tensor{Int64}, x) for x = sparse_indices_] - sparse_weights_ = [convert(Tensor{Float32}, x) for x = sparse_weights_] - dense_weights_ = [convert(Tensor{Float32}, x) for x = dense_weights_] - example_state_data_ = convert(Tensor{Float32}, example_state_data_) - tf.add_input(desc, sparse_example_indices_) - tf.add_input(desc, sparse_feature_indices_) - tf.add_input(desc, sparse_feature_values_) - tf.add_input(desc, dense_features_) - tf.add_input(desc, example_weights_) - tf.add_input(desc, example_labels_) - tf.add_input(desc, sparse_indices_) - tf.add_input(desc, sparse_weights_) - tf.add_input(desc, dense_weights_) - tf.add_input(desc, example_state_data_) - if loss_type !== nothing - desc["loss_type"] = Base.String(loss_type) + begin + function sdca_optimizer_graph(sparse_example_indices_, sparse_feature_indices_, sparse_feature_values_, dense_features_, example_weights_, example_labels_, sparse_indices_, sparse_weights_, dense_weights_, example_state_data_; name=nothing, loss_type=nothing, adaptative=nothing, num_sparse_features=nothing, num_sparse_features_with_values=nothing, num_dense_features=nothing, l1=nothing, l2=nothing, num_loss_partitions=nothing, num_inner_iterations=nothing) + local desc + tf.with_op_name(name, "SdcaOptimizer") do + desc = tf.NodeDescription("SdcaOptimizer") + begin + begin + sparse_example_indices_ = [convert(Tensor{Int64}, x) for x = sparse_example_indices_] + begin + end + end + begin + sparse_feature_indices_ = [convert(Tensor{Int64}, x) for x = sparse_feature_indices_] + begin + end + end + begin + sparse_feature_values_ = [convert(Tensor{Float32}, x) for x = sparse_feature_values_] + begin + end + end + begin + dense_features_ = [convert(Tensor{Float32}, x) for x = dense_features_] + begin + end + end + begin + example_weights_ = convert(Tensor{Float32}, example_weights_) + begin + end + end + begin + example_labels_ = convert(Tensor{Float32}, example_labels_) + begin + end + end + begin + sparse_indices_ = [convert(Tensor{Int64}, x) for x = sparse_indices_] + begin + end + end + begin + sparse_weights_ = [convert(Tensor{Float32}, x) for x = sparse_weights_] + begin + end + end + begin + dense_weights_ = [convert(Tensor{Float32}, x) for x = dense_weights_] + begin + end + end + begin + example_state_data_ = convert(Tensor{Float32}, example_state_data_) + begin + end + end + end + begin + begin + tf.add_input(desc, sparse_example_indices_) + end + begin + tf.add_input(desc, sparse_feature_indices_) + end + begin + tf.add_input(desc, sparse_feature_values_) + end + begin + tf.add_input(desc, dense_features_) + end + begin + tf.add_input(desc, example_weights_) + end + begin + tf.add_input(desc, example_labels_) + end + begin + tf.add_input(desc, sparse_indices_) + end + begin + tf.add_input(desc, sparse_weights_) + end + begin + tf.add_input(desc, dense_weights_) + end + begin + tf.add_input(desc, example_state_data_) + end + end + begin + begin + if loss_type !== nothing + desc["loss_type"] = Base.String(loss_type) + end + end + begin + if adaptative !== nothing + desc["adaptative"] = Base.Bool(adaptative) + end + end + begin + if num_sparse_features !== nothing + desc["num_sparse_features"] = Base.Int(num_sparse_features) + end + end + begin + if num_sparse_features_with_values !== nothing + desc["num_sparse_features_with_values"] = Base.Int(num_sparse_features_with_values) + end + end + begin + if num_dense_features !== nothing + desc["num_dense_features"] = Base.Int(num_dense_features) + end + end + begin + if l1 !== nothing + desc["l1"] = Base.identity(l1) + end + end + begin + if l2 !== nothing + desc["l2"] = Base.identity(l2) + end + end + begin + if num_loss_partitions !== nothing + desc["num_loss_partitions"] = Base.Int(num_loss_partitions) + end + end + begin + if num_inner_iterations !== nothing + desc["num_inner_iterations"] = Base.Int(num_inner_iterations) + end + end + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + end + end + begin + function sdca_optimizer_eager(sparse_example_indices_, sparse_feature_indices_, sparse_feature_values_, dense_features_, example_weights_, example_labels_, sparse_indices_, sparse_weights_, dense_weights_, example_state_data_; name=nothing, loss_type=nothing, adaptative=nothing, num_sparse_features=nothing, num_sparse_features_with_values=nothing, num_dense_features=nothing, l1=nothing, l2=nothing, num_loss_partitions=nothing, num_inner_iterations=nothing) + desc = tf.EagerOp("SdcaOptimizer") + sparse_example_indices_ = convert(tf.EagerTensor, sparse_example_indices_) + sparse_feature_indices_ = convert(tf.EagerTensor, sparse_feature_indices_) + sparse_feature_values_ = convert(tf.EagerTensor, sparse_feature_values_) + dense_features_ = convert(tf.EagerTensor, dense_features_) + example_weights_ = convert(tf.EagerTensor, example_weights_) + example_labels_ = convert(tf.EagerTensor, example_labels_) + sparse_indices_ = convert(tf.EagerTensor, sparse_indices_) + sparse_weights_ = convert(tf.EagerTensor, sparse_weights_) + dense_weights_ = convert(tf.EagerTensor, dense_weights_) + example_state_data_ = convert(tf.EagerTensor, example_state_data_) + begin + begin + tf.add_input(desc, sparse_example_indices_) + end + begin + tf.add_input(desc, sparse_feature_indices_) + end + begin + tf.add_input(desc, sparse_feature_values_) + end + begin + tf.add_input(desc, dense_features_) + end + begin + tf.add_input(desc, example_weights_) + end + begin + tf.add_input(desc, example_labels_) + end + begin + tf.add_input(desc, sparse_indices_) + end + begin + tf.add_input(desc, sparse_weights_) + end + begin + tf.add_input(desc, dense_weights_) + end + begin + tf.add_input(desc, example_state_data_) + end + end + begin + begin + if loss_type !== nothing + desc["loss_type"] = Base.String(loss_type) + end + end + begin + if adaptative !== nothing + desc["adaptative"] = Base.Bool(adaptative) + end + end + begin + if num_sparse_features !== nothing + desc["num_sparse_features"] = Base.Int(num_sparse_features) + end + end + begin + if num_sparse_features_with_values !== nothing + desc["num_sparse_features_with_values"] = Base.Int(num_sparse_features_with_values) + end + end + begin + if num_dense_features !== nothing + desc["num_dense_features"] = Base.Int(num_dense_features) + end + end + begin + if l1 !== nothing + desc["l1"] = Base.identity(l1) + end + end + begin + if l2 !== nothing + desc["l2"] = Base.identity(l2) + end + end + begin + if num_loss_partitions !== nothing + desc["num_loss_partitions"] = Base.Int(num_loss_partitions) + end + end + begin + if num_inner_iterations !== nothing + desc["num_inner_iterations"] = Base.Int(num_inner_iterations) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(sdca_optimizer, [sparse_example_indices_, sparse_feature_indices_, sparse_feature_values_, dense_features_, example_weights_, example_labels_, sparse_indices_, sparse_weights_, dense_weights_, example_state_data_], name=nothing, loss_type=nothing, adaptative=nothing, num_sparse_features=nothing, num_sparse_features_with_values=nothing, num_dense_features=nothing, l1=nothing, l2=nothing, num_loss_partitions=nothing, num_inner_iterations=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sdca_optimizer(sparse_example_indices_, sparse_feature_indices_, sparse_feature_values_, dense_features_, example_weights_, example_labels_, sparse_indices_, sparse_weights_, dense_weights_, example_state_data_; name=nothing, loss_type=nothing, adaptative=nothing, num_sparse_features=nothing, num_sparse_features_with_values=nothing, num_dense_features=nothing, l1=nothing, l2=nothing, num_loss_partitions=nothing, num_inner_iterations=nothing) + if tf.in_eager_mode() + sdca_optimizer_eager(sparse_example_indices_, sparse_feature_indices_, sparse_feature_values_, dense_features_, example_weights_, example_labels_, sparse_indices_, sparse_weights_, dense_weights_, example_state_data_; name=name, loss_type=loss_type, adaptative=adaptative, num_sparse_features=num_sparse_features, num_sparse_features_with_values=num_sparse_features_with_values, num_dense_features=num_dense_features, l1=l1, l2=l2, num_loss_partitions=num_loss_partitions, num_inner_iterations=num_inner_iterations) + else + sdca_optimizer_graph(sparse_example_indices_, sparse_feature_indices_, sparse_feature_values_, dense_features_, example_weights_, example_labels_, sparse_indices_, sparse_weights_, dense_weights_, example_state_data_; name=name, loss_type=loss_type, adaptative=adaptative, num_sparse_features=num_sparse_features, num_sparse_features_with_values=num_sparse_features_with_values, num_dense_features=num_dense_features, l1=l1, l2=l2, num_loss_partitions=num_loss_partitions, num_inner_iterations=num_inner_iterations) + end end - if adaptative !== nothing - desc["adaptative"] = Base.Bool(adaptative) - end - if num_sparse_features !== nothing - desc["num_sparse_features"] = Base.Int(num_sparse_features) - end - if num_sparse_features_with_values !== nothing - desc["num_sparse_features_with_values"] = Base.Int(num_sparse_features_with_values) - end - if num_dense_features !== nothing - desc["num_dense_features"] = Base.Int(num_dense_features) - end - if l1 !== nothing - desc["l1"] = Base.identity(l1) - end - if l2 !== nothing - desc["l2"] = Base.identity(l2) - end - if num_loss_partitions !== nothing - desc["num_loss_partitions"] = Base.Int(num_loss_partitions) - end - if num_inner_iterations !== nothing - desc["num_inner_iterations"] = Base.Int(num_inner_iterations) - end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:3 - push!(out, tf.Tensor(op, out_idx)) - end - out - end - function sdca_optimizer_eager(sparse_example_indices_, sparse_feature_indices_, sparse_feature_values_, dense_features_, example_weights_, example_labels_, sparse_indices_, sparse_weights_, dense_weights_, example_state_data_; name=nothing, loss_type=nothing, adaptative=nothing, num_sparse_features=nothing, num_sparse_features_with_values=nothing, num_dense_features=nothing, l1=nothing, l2=nothing, num_loss_partitions=nothing, num_inner_iterations=nothing) - desc = tf.EagerOp("SdcaOptimizer") - sparse_example_indices_ = convert(tf.EagerTensor, sparse_example_indices_) - sparse_feature_indices_ = convert(tf.EagerTensor, sparse_feature_indices_) - sparse_feature_values_ = convert(tf.EagerTensor, sparse_feature_values_) - dense_features_ = convert(tf.EagerTensor, dense_features_) - example_weights_ = convert(tf.EagerTensor, example_weights_) - example_labels_ = convert(tf.EagerTensor, example_labels_) - sparse_indices_ = convert(tf.EagerTensor, sparse_indices_) - sparse_weights_ = convert(tf.EagerTensor, sparse_weights_) - dense_weights_ = convert(tf.EagerTensor, dense_weights_) - example_state_data_ = convert(tf.EagerTensor, example_state_data_) - tf.add_input(desc, sparse_example_indices_) - tf.add_input(desc, sparse_feature_indices_) - tf.add_input(desc, sparse_feature_values_) - tf.add_input(desc, dense_features_) - tf.add_input(desc, example_weights_) - tf.add_input(desc, example_labels_) - tf.add_input(desc, sparse_indices_) - tf.add_input(desc, sparse_weights_) - tf.add_input(desc, dense_weights_) - tf.add_input(desc, example_state_data_) - if loss_type !== nothing - desc["loss_type"] = Base.String(loss_type) - end - if adaptative !== nothing - desc["adaptative"] = Base.Bool(adaptative) - end - if num_sparse_features !== nothing - desc["num_sparse_features"] = Base.Int(num_sparse_features) - end - if num_sparse_features_with_values !== nothing - desc["num_sparse_features_with_values"] = Base.Int(num_sparse_features_with_values) - end - if num_dense_features !== nothing - desc["num_dense_features"] = Base.Int(num_dense_features) - end - if l1 !== nothing - desc["l1"] = Base.identity(l1) - end - if l2 !== nothing - desc["l2"] = Base.identity(l2) - end - if num_loss_partitions !== nothing - desc["num_loss_partitions"] = Base.Int(num_loss_partitions) - end - if num_inner_iterations !== nothing - desc["num_inner_iterations"] = Base.Int(num_inner_iterations) - end - res = tf.execute(desc) - node = tf.TapeNode(sdca_optimizer, [sparse_example_indices_, sparse_feature_indices_, sparse_feature_values_, dense_features_, example_weights_, example_labels_, sparse_indices_, sparse_weights_, dense_weights_, example_state_data_], name=nothing, loss_type=nothing, adaptative=nothing, num_sparse_features=nothing, num_sparse_features_with_values=nothing, num_dense_features=nothing, l1=nothing, l2=nothing, num_loss_partitions=nothing, num_inner_iterations=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sdca_optimizer(sparse_example_indices_, sparse_feature_indices_, sparse_feature_values_, dense_features_, example_weights_, example_labels_, sparse_indices_, sparse_weights_, dense_weights_, example_state_data_; name=nothing, loss_type=nothing, adaptative=nothing, num_sparse_features=nothing, num_sparse_features_with_values=nothing, num_dense_features=nothing, l1=nothing, l2=nothing, num_loss_partitions=nothing, num_inner_iterations=nothing) - if tf.in_eager_mode() - sdca_optimizer_eager(sparse_example_indices_, sparse_feature_indices_, sparse_feature_values_, dense_features_, example_weights_, example_labels_, sparse_indices_, sparse_weights_, dense_weights_, example_state_data_; name=name, loss_type=loss_type, adaptative=adaptative, num_sparse_features=num_sparse_features, num_sparse_features_with_values=num_sparse_features_with_values, num_dense_features=num_dense_features, l1=l1, l2=l2, num_loss_partitions=num_loss_partitions, num_inner_iterations=num_inner_iterations) - else - sdca_optimizer_graph(sparse_example_indices_, sparse_feature_indices_, sparse_feature_values_, dense_features_, example_weights_, example_labels_, sparse_indices_, sparse_weights_, dense_weights_, example_state_data_; name=name, loss_type=loss_type, adaptative=adaptative, num_sparse_features=num_sparse_features, num_sparse_features_with_values=num_sparse_features_with_values, num_dense_features=num_dense_features, l1=l1, l2=l2, num_loss_partitions=num_loss_partitions, num_inner_iterations=num_inner_iterations) - end - end end @@ -59605,40 +109024,78 @@ end """ begin - function zeta_graph(x_, q_; name=nothing) - local desc - tf.with_op_name(name, "Zeta") do - desc = tf.NodeDescription("Zeta") - x_ = convert(Tensor{Any}, x_) - q_ = convert(Tensor{Any}, q_) - (x_, q_) = tf.tf_promote(x_, q_) - tf.add_input(desc, x_) - tf.add_input(desc, q_) + begin + function zeta_graph(x_, q_; name=nothing) + local desc + tf.with_op_name(name, "Zeta") do + desc = tf.NodeDescription("Zeta") + begin + begin + x_ = convert(Tensor{Any}, x_) + begin + end + end + begin + q_ = convert(Tensor{Any}, q_) + begin + end + end + begin + (x_, q_) = tf.tf_promote(x_, q_) + end + end + begin + begin + tf.add_input(desc, x_) + end + begin + tf.add_input(desc, q_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function zeta_eager(x_, q_; name=nothing) - desc = tf.EagerOp("Zeta") - x_ = convert(tf.EagerTensor, x_) - q_ = convert(tf.EagerTensor, q_) - tf.add_input(desc, x_) - tf.add_input(desc, q_) - desc["T"] = tf.data_type(x_) - desc["T"] = tf.data_type(q_) - res = tf.execute(desc) - node = tf.TapeNode(zeta, [x_, q_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function zeta_eager(x_, q_; name=nothing) + desc = tf.EagerOp("Zeta") + x_ = convert(tf.EagerTensor, x_) + q_ = convert(tf.EagerTensor, q_) + begin + begin + tf.add_input(desc, x_) + end + begin + tf.add_input(desc, q_) + end + end + begin + end + begin + desc["T"] = tf.data_type(x_) + end + begin + desc["T"] = tf.data_type(q_) + end + res = tf.execute(desc) + node = tf.TapeNode(zeta, [x_, q_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function zeta(x_, q_; name=nothing) - if tf.in_eager_mode() - zeta_eager(x_, q_; name=name) - else - zeta_graph(x_, q_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function zeta(x_, q_; name=nothing) + if tf.in_eager_mode() + zeta_eager(x_, q_; name=name) + else + zeta_graph(x_, q_; name=name) + end end - end + end end @@ -59648,86 +109105,152 @@ end """ begin - function sample_distorted_bounding_box_graph(image_size_, bounding_boxes_; name=nothing, seed=nothing, seed2=nothing, min_object_covered=nothing, aspect_ratio_range=nothing, area_range=nothing, max_attempts=nothing, use_image_if_no_bounding_boxes=nothing) - local desc - tf.with_op_name(name, "SampleDistortedBoundingBox") do - desc = tf.NodeDescription("SampleDistortedBoundingBox") - image_size_ = convert(Tensor{Any}, image_size_) - bounding_boxes_ = convert(Tensor{Float32}, bounding_boxes_) - (image_size_,) = tf.tf_promote(image_size_) - tf.add_input(desc, image_size_) - tf.add_input(desc, bounding_boxes_) - if seed !== nothing - desc["seed"] = Base.Int(seed) - end - if seed2 !== nothing - desc["seed2"] = Base.Int(seed2) - end - if min_object_covered !== nothing - desc["min_object_covered"] = Base.identity(min_object_covered) - end - if aspect_ratio_range !== nothing - desc["aspect_ratio_range"] = map(Base.identity, aspect_ratio_range) - end - if area_range !== nothing - desc["area_range"] = map(Base.identity, area_range) + begin + function sample_distorted_bounding_box_graph(image_size_, bounding_boxes_; name=nothing, seed=nothing, seed2=nothing, min_object_covered=nothing, aspect_ratio_range=nothing, area_range=nothing, max_attempts=nothing, use_image_if_no_bounding_boxes=nothing) + local desc + tf.with_op_name(name, "SampleDistortedBoundingBox") do + desc = tf.NodeDescription("SampleDistortedBoundingBox") + begin + begin + image_size_ = convert(Tensor{Any}, image_size_) + begin + end + end + begin + bounding_boxes_ = convert(Tensor{Float32}, bounding_boxes_) + begin + end + end + begin + (image_size_,) = tf.tf_promote(image_size_) + end + end + begin + begin + tf.add_input(desc, image_size_) + end + begin + tf.add_input(desc, bounding_boxes_) + end + end + begin + begin + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + end + begin + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end + end + begin + if min_object_covered !== nothing + desc["min_object_covered"] = Base.identity(min_object_covered) + end + end + begin + if aspect_ratio_range !== nothing + desc["aspect_ratio_range"] = map(Base.identity, aspect_ratio_range) + end + end + begin + if area_range !== nothing + desc["area_range"] = map(Base.identity, area_range) + end + end + begin + if max_attempts !== nothing + desc["max_attempts"] = Base.Int(max_attempts) + end + end + begin + if use_image_if_no_bounding_boxes !== nothing + desc["use_image_if_no_bounding_boxes"] = Base.Bool(use_image_if_no_bounding_boxes) + end + end + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + end + end + begin + function sample_distorted_bounding_box_eager(image_size_, bounding_boxes_; name=nothing, seed=nothing, seed2=nothing, min_object_covered=nothing, aspect_ratio_range=nothing, area_range=nothing, max_attempts=nothing, use_image_if_no_bounding_boxes=nothing) + desc = tf.EagerOp("SampleDistortedBoundingBox") + image_size_ = convert(tf.EagerTensor, image_size_) + bounding_boxes_ = convert(tf.EagerTensor, bounding_boxes_) + begin + begin + tf.add_input(desc, image_size_) + end + begin + tf.add_input(desc, bounding_boxes_) + end + end + begin + begin + if seed !== nothing + desc["seed"] = Base.Int(seed) + end + end + begin + if seed2 !== nothing + desc["seed2"] = Base.Int(seed2) + end + end + begin + if min_object_covered !== nothing + desc["min_object_covered"] = Base.identity(min_object_covered) + end + end + begin + if aspect_ratio_range !== nothing + desc["aspect_ratio_range"] = map(Base.identity, aspect_ratio_range) + end + end + begin + if area_range !== nothing + desc["area_range"] = map(Base.identity, area_range) + end + end + begin + if max_attempts !== nothing + desc["max_attempts"] = Base.Int(max_attempts) + end + end + begin + if use_image_if_no_bounding_boxes !== nothing + desc["use_image_if_no_bounding_boxes"] = Base.Bool(use_image_if_no_bounding_boxes) + end + end + end + begin + desc["T"] = tf.data_type(image_size_) + end + res = tf.execute(desc) + node = tf.TapeNode(sample_distorted_bounding_box, [image_size_, bounding_boxes_], name=nothing, seed=nothing, seed2=nothing, min_object_covered=nothing, aspect_ratio_range=nothing, area_range=nothing, max_attempts=nothing, use_image_if_no_bounding_boxes=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sample_distorted_bounding_box(image_size_, bounding_boxes_; name=nothing, seed=nothing, seed2=nothing, min_object_covered=nothing, aspect_ratio_range=nothing, area_range=nothing, max_attempts=nothing, use_image_if_no_bounding_boxes=nothing) + if tf.in_eager_mode() + sample_distorted_bounding_box_eager(image_size_, bounding_boxes_; name=name, seed=seed, seed2=seed2, min_object_covered=min_object_covered, aspect_ratio_range=aspect_ratio_range, area_range=area_range, max_attempts=max_attempts, use_image_if_no_bounding_boxes=use_image_if_no_bounding_boxes) + else + sample_distorted_bounding_box_graph(image_size_, bounding_boxes_; name=name, seed=seed, seed2=seed2, min_object_covered=min_object_covered, aspect_ratio_range=aspect_ratio_range, area_range=area_range, max_attempts=max_attempts, use_image_if_no_bounding_boxes=use_image_if_no_bounding_boxes) + end end - if max_attempts !== nothing - desc["max_attempts"] = Base.Int(max_attempts) - end - if use_image_if_no_bounding_boxes !== nothing - desc["use_image_if_no_bounding_boxes"] = Base.Bool(use_image_if_no_bounding_boxes) - end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:3 - push!(out, tf.Tensor(op, out_idx)) - end - out - end - function sample_distorted_bounding_box_eager(image_size_, bounding_boxes_; name=nothing, seed=nothing, seed2=nothing, min_object_covered=nothing, aspect_ratio_range=nothing, area_range=nothing, max_attempts=nothing, use_image_if_no_bounding_boxes=nothing) - desc = tf.EagerOp("SampleDistortedBoundingBox") - image_size_ = convert(tf.EagerTensor, image_size_) - bounding_boxes_ = convert(tf.EagerTensor, bounding_boxes_) - tf.add_input(desc, image_size_) - tf.add_input(desc, bounding_boxes_) - if seed !== nothing - desc["seed"] = Base.Int(seed) - end - if seed2 !== nothing - desc["seed2"] = Base.Int(seed2) - end - if min_object_covered !== nothing - desc["min_object_covered"] = Base.identity(min_object_covered) - end - if aspect_ratio_range !== nothing - desc["aspect_ratio_range"] = map(Base.identity, aspect_ratio_range) - end - if area_range !== nothing - desc["area_range"] = map(Base.identity, area_range) - end - if max_attempts !== nothing - desc["max_attempts"] = Base.Int(max_attempts) - end - if use_image_if_no_bounding_boxes !== nothing - desc["use_image_if_no_bounding_boxes"] = Base.Bool(use_image_if_no_bounding_boxes) - end - desc["T"] = tf.data_type(image_size_) - res = tf.execute(desc) - node = tf.TapeNode(sample_distorted_bounding_box, [image_size_, bounding_boxes_], name=nothing, seed=nothing, seed2=nothing, min_object_covered=nothing, aspect_ratio_range=nothing, area_range=nothing, max_attempts=nothing, use_image_if_no_bounding_boxes=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sample_distorted_bounding_box(image_size_, bounding_boxes_; name=nothing, seed=nothing, seed2=nothing, min_object_covered=nothing, aspect_ratio_range=nothing, area_range=nothing, max_attempts=nothing, use_image_if_no_bounding_boxes=nothing) - if tf.in_eager_mode() - sample_distorted_bounding_box_eager(image_size_, bounding_boxes_; name=name, seed=seed, seed2=seed2, min_object_covered=min_object_covered, aspect_ratio_range=aspect_ratio_range, area_range=area_range, max_attempts=max_attempts, use_image_if_no_bounding_boxes=use_image_if_no_bounding_boxes) - else - sample_distorted_bounding_box_graph(image_size_, bounding_boxes_; name=name, seed=seed, seed2=seed2, min_object_covered=min_object_covered, aspect_ratio_range=aspect_ratio_range, area_range=area_range, max_attempts=max_attempts, use_image_if_no_bounding_boxes=use_image_if_no_bounding_boxes) - end - end end @@ -59737,40 +109260,78 @@ end """ begin - function igamma_grad_a_graph(a_, x_; name=nothing) - local desc - tf.with_op_name(name, "IgammaGradA") do - desc = tf.NodeDescription("IgammaGradA") - a_ = convert(Tensor{Any}, a_) - x_ = convert(Tensor{Any}, x_) - (a_, x_) = tf.tf_promote(a_, x_) - tf.add_input(desc, a_) - tf.add_input(desc, x_) + begin + function igamma_grad_a_graph(a_, x_; name=nothing) + local desc + tf.with_op_name(name, "IgammaGradA") do + desc = tf.NodeDescription("IgammaGradA") + begin + begin + a_ = convert(Tensor{Any}, a_) + begin + end + end + begin + x_ = convert(Tensor{Any}, x_) + begin + end + end + begin + (a_, x_) = tf.tf_promote(a_, x_) + end + end + begin + begin + tf.add_input(desc, a_) + end + begin + tf.add_input(desc, x_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function igamma_grad_a_eager(a_, x_; name=nothing) - desc = tf.EagerOp("IgammaGradA") - a_ = convert(tf.EagerTensor, a_) - x_ = convert(tf.EagerTensor, x_) - tf.add_input(desc, a_) - tf.add_input(desc, x_) - desc["T"] = tf.data_type(a_) - desc["T"] = tf.data_type(x_) - res = tf.execute(desc) - node = tf.TapeNode(igamma_grad_a, [a_, x_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function igamma_grad_a_eager(a_, x_; name=nothing) + desc = tf.EagerOp("IgammaGradA") + a_ = convert(tf.EagerTensor, a_) + x_ = convert(tf.EagerTensor, x_) + begin + begin + tf.add_input(desc, a_) + end + begin + tf.add_input(desc, x_) + end + end + begin + end + begin + desc["T"] = tf.data_type(a_) + end + begin + desc["T"] = tf.data_type(x_) + end + res = tf.execute(desc) + node = tf.TapeNode(igamma_grad_a, [a_, x_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function igamma_grad_a(a_, x_; name=nothing) - if tf.in_eager_mode() - igamma_grad_a_eager(a_, x_; name=name) - else - igamma_grad_a_graph(a_, x_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function igamma_grad_a(a_, x_; name=nothing) + if tf.in_eager_mode() + igamma_grad_a_eager(a_, x_; name=name) + else + igamma_grad_a_graph(a_, x_; name=name) + end end - end + end end @@ -59780,42 +109341,82 @@ end """ begin - function segment_max_graph(data_, segment_ids_; name=nothing) - local desc - tf.with_op_name(name, "SegmentMax") do - desc = tf.NodeDescription("SegmentMax") - data_ = convert(Tensor{Any}, data_) - segment_ids_ = convert(Tensor{Any}, segment_ids_) - segment_ids_ = segment_ids_ - convert(tf.Tensor{eltype(segment_ids_)}, 1) - (data_,) = tf.tf_promote(data_) - (segment_ids_,) = tf.tf_promote(segment_ids_) - tf.add_input(desc, data_) - tf.add_input(desc, segment_ids_) - end - tf.Tensor(tf.Operation(desc)) - end - function segment_max_eager(data_, segment_ids_; name=nothing) - desc = tf.EagerOp("SegmentMax") - data_ = convert(tf.EagerTensor, data_) - segment_ids_ = convert(tf.EagerTensor, segment_ids_) - tf.add_input(desc, data_) - tf.add_input(desc, segment_ids_) - desc["T"] = tf.data_type(data_) - desc["Tindices"] = tf.data_type(segment_ids_) - res = tf.execute(desc) - node = tf.TapeNode(segment_max, [data_, segment_ids_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function segment_max(data_, segment_ids_; name=nothing) - if tf.in_eager_mode() - segment_max_eager(data_, segment_ids_; name=name) - else - segment_max_graph(data_, segment_ids_; name=name) + begin + function segment_max_graph(data_, segment_ids_; name=nothing) + local desc + tf.with_op_name(name, "SegmentMax") do + desc = tf.NodeDescription("SegmentMax") + begin + begin + data_ = convert(Tensor{Any}, data_) + begin + end + end + begin + segment_ids_ = convert(Tensor{Any}, segment_ids_) + begin + segment_ids_ = segment_ids_ - convert(tf.Tensor{eltype(segment_ids_)}, 1) + end + end + begin + (data_,) = tf.tf_promote(data_) + end + begin + (segment_ids_,) = tf.tf_promote(segment_ids_) + end + end + begin + begin + tf.add_input(desc, data_) + end + begin + tf.add_input(desc, segment_ids_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function segment_max_eager(data_, segment_ids_; name=nothing) + desc = tf.EagerOp("SegmentMax") + data_ = convert(tf.EagerTensor, data_) + segment_ids_ = convert(tf.EagerTensor, segment_ids_) + begin + begin + tf.add_input(desc, data_) + end + begin + tf.add_input(desc, segment_ids_) + end + end + begin + end + begin + desc["T"] = tf.data_type(data_) + end + begin + desc["Tindices"] = tf.data_type(segment_ids_) + end + res = tf.execute(desc) + node = tf.TapeNode(segment_max, [data_, segment_ids_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function segment_max(data_, segment_ids_; name=nothing) + if tf.in_eager_mode() + segment_max_eager(data_, segment_ids_; name=name) + else + segment_max_graph(data_, segment_ids_; name=name) + end end - end + end end @@ -59825,45 +109426,93 @@ end """ begin - function range_graph(start_, limit_, delta_; name=nothing) - local desc - tf.with_op_name(name, "Range") do - desc = tf.NodeDescription("Range") - start_ = convert(Tensor{Int32}, start_) - limit_ = convert(Tensor{Int32}, limit_) - delta_ = convert(Tensor{Int32}, delta_) - (start_, limit_, delta_) = tf.tf_promote(start_, limit_, delta_) - tf.add_input(desc, start_) - tf.add_input(desc, limit_) - tf.add_input(desc, delta_) - end - tf.Tensor(tf.Operation(desc)) - end - function range_eager(start_, limit_, delta_; name=nothing) - desc = tf.EagerOp("Range") - start_ = convert(tf.EagerTensor, start_) - limit_ = convert(tf.EagerTensor, limit_) - delta_ = convert(tf.EagerTensor, delta_) - tf.add_input(desc, start_) - tf.add_input(desc, limit_) - tf.add_input(desc, delta_) - desc["Tidx"] = tf.data_type(start_) - desc["Tidx"] = tf.data_type(limit_) - desc["Tidx"] = tf.data_type(delta_) - res = tf.execute(desc) - node = tf.TapeNode(range, [start_, limit_, delta_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function range(start_, limit_, delta_; name=nothing) - if tf.in_eager_mode() - range_eager(start_, limit_, delta_; name=name) - else - range_graph(start_, limit_, delta_; name=name) + begin + function range_graph(start_, limit_, delta_; name=nothing) + local desc + tf.with_op_name(name, "Range") do + desc = tf.NodeDescription("Range") + begin + begin + start_ = convert(Tensor{Int32}, start_) + begin + end + end + begin + limit_ = convert(Tensor{Int32}, limit_) + begin + end + end + begin + delta_ = convert(Tensor{Int32}, delta_) + begin + end + end + begin + (start_, limit_, delta_) = tf.tf_promote(start_, limit_, delta_) + end + end + begin + begin + tf.add_input(desc, start_) + end + begin + tf.add_input(desc, limit_) + end + begin + tf.add_input(desc, delta_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function range_eager(start_, limit_, delta_; name=nothing) + desc = tf.EagerOp("Range") + start_ = convert(tf.EagerTensor, start_) + limit_ = convert(tf.EagerTensor, limit_) + delta_ = convert(tf.EagerTensor, delta_) + begin + begin + tf.add_input(desc, start_) + end + begin + tf.add_input(desc, limit_) + end + begin + tf.add_input(desc, delta_) + end + end + begin + end + begin + desc["Tidx"] = tf.data_type(start_) + end + begin + desc["Tidx"] = tf.data_type(limit_) + end + begin + desc["Tidx"] = tf.data_type(delta_) + end + res = tf.execute(desc) + node = tf.TapeNode(range, [start_, limit_, delta_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function range(start_, limit_, delta_; name=nothing) + if tf.in_eager_mode() + range_eager(start_, limit_, delta_; name=name) + else + range_graph(start_, limit_, delta_; name=name) + end end - end + end end @@ -59873,58 +109522,92 @@ end Retrieve embedding parameters for a single table. """ begin - function retrieve_tpu_embedding_momentum_parameters_grad_accum_debug_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - local desc - tf.with_op_name(name, "RetrieveTPUEmbeddingMomentumParametersGradAccumDebug") do - desc = tf.NodeDescription("RetrieveTPUEmbeddingMomentumParametersGradAccumDebug") - if table_id !== nothing - desc["table_id"] = Base.Int(table_id) - end - if table_name !== nothing - desc["table_name"] = Base.String(table_name) + begin + function retrieve_tpu_embedding_momentum_parameters_grad_accum_debug_graph(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + local desc + tf.with_op_name(name, "RetrieveTPUEmbeddingMomentumParametersGradAccumDebug") do + desc = tf.NodeDescription("RetrieveTPUEmbeddingMomentumParametersGradAccumDebug") + begin + end + begin + end + begin + begin + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + end + begin + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + end + begin + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + end + begin + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + end + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:3 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + end + end + begin + function retrieve_tpu_embedding_momentum_parameters_grad_accum_debug_eager(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + desc = tf.EagerOp("RetrieveTPUEmbeddingMomentumParametersGradAccumDebug") + begin + end + begin + begin + if table_id !== nothing + desc["table_id"] = Base.Int(table_id) + end + end + begin + if table_name !== nothing + desc["table_name"] = Base.String(table_name) + end + end + begin + if num_shards !== nothing + desc["num_shards"] = Base.Int(num_shards) + end + end + begin + if shard_id !== nothing + desc["shard_id"] = Base.Int(shard_id) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(retrieve_tpu_embedding_momentum_parameters_grad_accum_debug, [], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function retrieve_tpu_embedding_momentum_parameters_grad_accum_debug(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) + if tf.in_eager_mode() + retrieve_tpu_embedding_momentum_parameters_grad_accum_debug_eager(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + else + retrieve_tpu_embedding_momentum_parameters_grad_accum_debug_graph(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) + end end - if num_shards !== nothing - desc["num_shards"] = Base.Int(num_shards) - end - if shard_id !== nothing - desc["shard_id"] = Base.Int(shard_id) - end - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:3 - push!(out, tf.Tensor(op, out_idx)) - end - out end - function retrieve_tpu_embedding_momentum_parameters_grad_accum_debug_eager(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - desc = tf.EagerOp("RetrieveTPUEmbeddingMomentumParametersGradAccumDebug") - if table_id !== nothing - desc["table_id"] = Base.Int(table_id) - end - if table_name !== nothing - desc["table_name"] = Base.String(table_name) - end - if num_shards !== nothing - desc["num_shards"] = Base.Int(num_shards) - end - if shard_id !== nothing - desc["shard_id"] = Base.Int(shard_id) - end - res = tf.execute(desc) - node = tf.TapeNode(retrieve_tpu_embedding_momentum_parameters_grad_accum_debug, [], name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function retrieve_tpu_embedding_momentum_parameters_grad_accum_debug(; name=nothing, table_id=nothing, table_name=nothing, num_shards=nothing, shard_id=nothing) - if tf.in_eager_mode() - retrieve_tpu_embedding_momentum_parameters_grad_accum_debug_eager(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) - else - retrieve_tpu_embedding_momentum_parameters_grad_accum_debug_graph(; name=name, table_id=table_id, table_name=table_name, num_shards=num_shards, shard_id=shard_id) - end - end end @@ -59934,33 +109617,57 @@ end """ begin - function flush_summary_writer_graph(writer_; name=nothing) - local desc - tf.with_op_name(name, "FlushSummaryWriter") do - desc = tf.NodeDescription("FlushSummaryWriter") - writer_ = convert(Tensor{Any}, writer_) - tf.add_input(desc, writer_) + begin + function flush_summary_writer_graph(writer_; name=nothing) + local desc + tf.with_op_name(name, "FlushSummaryWriter") do + desc = tf.NodeDescription("FlushSummaryWriter") + begin + begin + writer_ = convert(Tensor{Any}, writer_) + begin + end + end + end + begin + begin + tf.add_input(desc, writer_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function flush_summary_writer_eager(writer_; name=nothing) - desc = tf.EagerOp("FlushSummaryWriter") - writer_ = convert(tf.EagerTensor, writer_) - tf.add_input(desc, writer_) - res = tf.execute(desc) - node = tf.TapeNode(flush_summary_writer, [writer_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function flush_summary_writer_eager(writer_; name=nothing) + desc = tf.EagerOp("FlushSummaryWriter") + writer_ = convert(tf.EagerTensor, writer_) + begin + begin + tf.add_input(desc, writer_) + end + end + begin + end + res = tf.execute(desc) + node = tf.TapeNode(flush_summary_writer, [writer_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function flush_summary_writer(writer_; name=nothing) - if tf.in_eager_mode() - flush_summary_writer_eager(writer_; name=name) - else - flush_summary_writer_graph(writer_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function flush_summary_writer(writer_; name=nothing) + if tf.in_eager_mode() + flush_summary_writer_eager(writer_; name=name) + else + flush_summary_writer_graph(writer_; name=name) + end end - end + end end @@ -59970,49 +109677,97 @@ end """ begin - function dequantize_graph(input_, min_range_, max_range_; name=nothing, mode=nothing) - local desc - tf.with_op_name(name, "Dequantize") do - desc = tf.NodeDescription("Dequantize") - input_ = convert(Tensor{Any}, input_) - min_range_ = convert(Tensor{Float32}, min_range_) - max_range_ = convert(Tensor{Float32}, max_range_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - tf.add_input(desc, min_range_) - tf.add_input(desc, max_range_) - if mode !== nothing - desc["mode"] = Base.String(mode) + begin + function dequantize_graph(input_, min_range_, max_range_; name=nothing, mode=nothing) + local desc + tf.with_op_name(name, "Dequantize") do + desc = tf.NodeDescription("Dequantize") + begin + begin + input_ = convert(Tensor{Any}, input_) + begin + end + end + begin + min_range_ = convert(Tensor{Float32}, min_range_) + begin + end + end + begin + max_range_ = convert(Tensor{Float32}, max_range_) + begin + end + end + begin + (input_,) = tf.tf_promote(input_) + end + end + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, min_range_) + end + begin + tf.add_input(desc, max_range_) + end + end + begin + begin + if mode !== nothing + desc["mode"] = Base.String(mode) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function dequantize_eager(input_, min_range_, max_range_; name=nothing, mode=nothing) + desc = tf.EagerOp("Dequantize") + input_ = convert(tf.EagerTensor, input_) + min_range_ = convert(tf.EagerTensor, min_range_) + max_range_ = convert(tf.EagerTensor, max_range_) + begin + begin + tf.add_input(desc, input_) + end + begin + tf.add_input(desc, min_range_) + end + begin + tf.add_input(desc, max_range_) + end + end + begin + begin + if mode !== nothing + desc["mode"] = Base.String(mode) + end + end + end + begin + desc["T"] = tf.data_type(input_) + end + res = tf.execute(desc) + node = tf.TapeNode(dequantize, [input_, min_range_, max_range_], name=nothing, mode=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function dequantize(input_, min_range_, max_range_; name=nothing, mode=nothing) + if tf.in_eager_mode() + dequantize_eager(input_, min_range_, max_range_; name=name, mode=mode) + else + dequantize_graph(input_, min_range_, max_range_; name=name, mode=mode) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function dequantize_eager(input_, min_range_, max_range_; name=nothing, mode=nothing) - desc = tf.EagerOp("Dequantize") - input_ = convert(tf.EagerTensor, input_) - min_range_ = convert(tf.EagerTensor, min_range_) - max_range_ = convert(tf.EagerTensor, max_range_) - tf.add_input(desc, input_) - tf.add_input(desc, min_range_) - tf.add_input(desc, max_range_) - if mode !== nothing - desc["mode"] = Base.String(mode) - end - desc["T"] = tf.data_type(input_) - res = tf.execute(desc) - node = tf.TapeNode(dequantize, [input_, min_range_, max_range_], name=nothing, mode=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function dequantize(input_, min_range_, max_range_; name=nothing, mode=nothing) - if tf.in_eager_mode() - dequantize_eager(input_, min_range_, max_range_; name=name, mode=mode) - else - dequantize_graph(input_, min_range_, max_range_; name=name, mode=mode) - end - end end @@ -60022,44 +109777,82 @@ end """ begin - function sparse_fill_empty_rows_grad_graph(reverse_index_map_, grad_values_; name=nothing) - local desc - tf.with_op_name(name, "SparseFillEmptyRowsGrad") do - desc = tf.NodeDescription("SparseFillEmptyRowsGrad") - reverse_index_map_ = convert(Tensor{Int64}, reverse_index_map_) - grad_values_ = convert(Tensor{Any}, grad_values_) - (grad_values_,) = tf.tf_promote(grad_values_) - tf.add_input(desc, reverse_index_map_) - tf.add_input(desc, grad_values_) - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:2 - push!(out, tf.Tensor(op, out_idx)) - end - out - end - function sparse_fill_empty_rows_grad_eager(reverse_index_map_, grad_values_; name=nothing) - desc = tf.EagerOp("SparseFillEmptyRowsGrad") - reverse_index_map_ = convert(tf.EagerTensor, reverse_index_map_) - grad_values_ = convert(tf.EagerTensor, grad_values_) - tf.add_input(desc, reverse_index_map_) - tf.add_input(desc, grad_values_) - desc["T"] = tf.data_type(grad_values_) - res = tf.execute(desc) - node = tf.TapeNode(sparse_fill_empty_rows_grad, [reverse_index_map_, grad_values_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_fill_empty_rows_grad(reverse_index_map_, grad_values_; name=nothing) - if tf.in_eager_mode() - sparse_fill_empty_rows_grad_eager(reverse_index_map_, grad_values_; name=name) - else - sparse_fill_empty_rows_grad_graph(reverse_index_map_, grad_values_; name=name) + begin + function sparse_fill_empty_rows_grad_graph(reverse_index_map_, grad_values_; name=nothing) + local desc + tf.with_op_name(name, "SparseFillEmptyRowsGrad") do + desc = tf.NodeDescription("SparseFillEmptyRowsGrad") + begin + begin + reverse_index_map_ = convert(Tensor{Int64}, reverse_index_map_) + begin + end + end + begin + grad_values_ = convert(Tensor{Any}, grad_values_) + begin + end + end + begin + (grad_values_,) = tf.tf_promote(grad_values_) + end + end + begin + begin + tf.add_input(desc, reverse_index_map_) + end + begin + tf.add_input(desc, grad_values_) + end + end + begin + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end + end + end + begin + function sparse_fill_empty_rows_grad_eager(reverse_index_map_, grad_values_; name=nothing) + desc = tf.EagerOp("SparseFillEmptyRowsGrad") + reverse_index_map_ = convert(tf.EagerTensor, reverse_index_map_) + grad_values_ = convert(tf.EagerTensor, grad_values_) + begin + begin + tf.add_input(desc, reverse_index_map_) + end + begin + tf.add_input(desc, grad_values_) + end + end + begin + end + begin + desc["T"] = tf.data_type(grad_values_) + end + res = tf.execute(desc) + node = tf.TapeNode(sparse_fill_empty_rows_grad, [reverse_index_map_, grad_values_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_fill_empty_rows_grad(reverse_index_map_, grad_values_; name=nothing) + if tf.in_eager_mode() + sparse_fill_empty_rows_grad_eager(reverse_index_map_, grad_values_; name=name) + else + sparse_fill_empty_rows_grad_graph(reverse_index_map_, grad_values_; name=name) + end end - end + end end @@ -60069,45 +109862,77 @@ end """ begin - function iterator_get_next_graph(iterator_; name=nothing, output_types=nothing, output_shapes=nothing) - local desc - tf.with_op_name(name, "IteratorGetNext") do - desc = tf.NodeDescription("IteratorGetNext") - iterator_ = convert(Tensor{Any}, iterator_) - tf.add_input(desc, iterator_) - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) + begin + function iterator_get_next_graph(iterator_; name=nothing, output_types=nothing, output_shapes=nothing) + local desc + tf.with_op_name(name, "IteratorGetNext") do + desc = tf.NodeDescription("IteratorGetNext") + begin + begin + iterator_ = convert(Tensor{Any}, iterator_) + begin + end + end + end + begin + begin + tf.add_input(desc, iterator_) + end + end + begin + begin + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + end + begin + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function iterator_get_next_eager(iterator_; name=nothing, output_types=nothing, output_shapes=nothing) + desc = tf.EagerOp("IteratorGetNext") + iterator_ = convert(tf.EagerTensor, iterator_) + begin + begin + tf.add_input(desc, iterator_) + end + end + begin + begin + if output_types !== nothing + desc["output_types"] = map(Base.identity, output_types) + end + end + begin + if output_shapes !== nothing + desc["output_shapes"] = map(Base.identity, output_shapes) + end + end + end + res = tf.execute(desc) + node = tf.TapeNode(iterator_get_next, [iterator_], name=nothing, output_types=nothing, output_shapes=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function iterator_get_next(iterator_; name=nothing, output_types=nothing, output_shapes=nothing) + if tf.in_eager_mode() + iterator_get_next_eager(iterator_; name=name, output_types=output_types, output_shapes=output_shapes) + else + iterator_get_next_graph(iterator_; name=name, output_types=output_types, output_shapes=output_shapes) + end end - end - tf.Tensor(tf.Operation(desc)) - end - function iterator_get_next_eager(iterator_; name=nothing, output_types=nothing, output_shapes=nothing) - desc = tf.EagerOp("IteratorGetNext") - iterator_ = convert(tf.EagerTensor, iterator_) - tf.add_input(desc, iterator_) - if output_types !== nothing - desc["output_types"] = map(Base.identity, output_types) - end - if output_shapes !== nothing - desc["output_shapes"] = map(Base.identity, output_shapes) - end - res = tf.execute(desc) - node = tf.TapeNode(iterator_get_next, [iterator_], name=nothing, output_types=nothing, output_shapes=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function iterator_get_next(iterator_; name=nothing, output_types=nothing, output_shapes=nothing) - if tf.in_eager_mode() - iterator_get_next_eager(iterator_; name=name, output_types=output_types, output_shapes=output_shapes) - else - iterator_get_next_graph(iterator_; name=name, output_types=output_types, output_shapes=output_shapes) - end - end end @@ -60117,41 +109942,73 @@ end """ begin - function prevent_gradient_graph(input_; name=nothing, message=nothing) - local desc - tf.with_op_name(name, "PreventGradient") do - desc = tf.NodeDescription("PreventGradient") - input_ = convert(Tensor{Any}, input_) - (input_,) = tf.tf_promote(input_) - tf.add_input(desc, input_) - if message !== nothing - desc["message"] = Base.String(message) + begin + function prevent_gradient_graph(input_; name=nothing, message=nothing) + local desc + tf.with_op_name(name, "PreventGradient") do + desc = tf.NodeDescription("PreventGradient") + begin + begin + input_ = convert(Tensor{Any}, input_) + begin + end + end + begin + (input_,) = tf.tf_promote(input_) + end + end + begin + begin + tf.add_input(desc, input_) + end + end + begin + begin + if message !== nothing + desc["message"] = Base.String(message) + end + end + end end + tf.Tensor(tf.Operation(desc)) end - tf.Tensor(tf.Operation(desc)) end - function prevent_gradient_eager(input_; name=nothing, message=nothing) - desc = tf.EagerOp("PreventGradient") - input_ = convert(tf.EagerTensor, input_) - tf.add_input(desc, input_) - if message !== nothing - desc["message"] = Base.String(message) - end - desc["T"] = tf.data_type(input_) - res = tf.execute(desc) - node = tf.TapeNode(prevent_gradient, [input_], name=nothing, message=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] + begin + function prevent_gradient_eager(input_; name=nothing, message=nothing) + desc = tf.EagerOp("PreventGradient") + input_ = convert(tf.EagerTensor, input_) + begin + begin + tf.add_input(desc, input_) + end + end + begin + begin + if message !== nothing + desc["message"] = Base.String(message) + end + end + end + begin + desc["T"] = tf.data_type(input_) + end + res = tf.execute(desc) + node = tf.TapeNode(prevent_gradient, [input_], name=nothing, message=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function prevent_gradient(input_; name=nothing, message=nothing) - if tf.in_eager_mode() - prevent_gradient_eager(input_; name=name, message=message) - else - prevent_gradient_graph(input_; name=name, message=message) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function prevent_gradient(input_; name=nothing, message=nothing) + if tf.in_eager_mode() + prevent_gradient_eager(input_; name=name, message=message) + else + prevent_gradient_graph(input_; name=name, message=message) + end end - end + end end @@ -60161,53 +110018,113 @@ end """ begin - function sparse_tensor_dense_add_graph(a_indices_, a_values_, a_shape_, b_; name=nothing) - local desc - tf.with_op_name(name, "SparseTensorDenseAdd") do - desc = tf.NodeDescription("SparseTensorDenseAdd") - a_indices_ = convert(Tensor{Any}, a_indices_) - a_indices_ = a_indices_ - convert(tf.Tensor{eltype(a_indices_)}, 1) - a_values_ = convert(Tensor{Any}, a_values_) - a_shape_ = convert(Tensor{Any}, a_shape_) - a_shape_ = a_shape_ - convert(tf.Tensor{eltype(a_shape_)}, 1) - b_ = convert(Tensor{Any}, b_) - (a_values_, b_) = tf.tf_promote(a_values_, b_) - (a_indices_, a_shape_) = tf.tf_promote(a_indices_, a_shape_) - tf.add_input(desc, a_indices_) - tf.add_input(desc, a_values_) - tf.add_input(desc, a_shape_) - tf.add_input(desc, b_) - end - tf.Tensor(tf.Operation(desc)) - end - function sparse_tensor_dense_add_eager(a_indices_, a_values_, a_shape_, b_; name=nothing) - desc = tf.EagerOp("SparseTensorDenseAdd") - a_indices_ = convert(tf.EagerTensor, a_indices_) - a_values_ = convert(tf.EagerTensor, a_values_) - a_shape_ = convert(tf.EagerTensor, a_shape_) - b_ = convert(tf.EagerTensor, b_) - tf.add_input(desc, a_indices_) - tf.add_input(desc, a_values_) - tf.add_input(desc, a_shape_) - tf.add_input(desc, b_) - desc["Tindices"] = tf.data_type(a_indices_) - desc["T"] = tf.data_type(a_values_) - desc["Tindices"] = tf.data_type(a_shape_) - desc["T"] = tf.data_type(b_) - res = tf.execute(desc) - node = tf.TapeNode(sparse_tensor_dense_add, [a_indices_, a_values_, a_shape_, b_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res[1] - end - end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_tensor_dense_add(a_indices_, a_values_, a_shape_, b_; name=nothing) - if tf.in_eager_mode() - sparse_tensor_dense_add_eager(a_indices_, a_values_, a_shape_, b_; name=name) - else - sparse_tensor_dense_add_graph(a_indices_, a_values_, a_shape_, b_; name=name) + begin + function sparse_tensor_dense_add_graph(a_indices_, a_values_, a_shape_, b_; name=nothing) + local desc + tf.with_op_name(name, "SparseTensorDenseAdd") do + desc = tf.NodeDescription("SparseTensorDenseAdd") + begin + begin + a_indices_ = convert(Tensor{Any}, a_indices_) + begin + a_indices_ = a_indices_ - convert(tf.Tensor{eltype(a_indices_)}, 1) + end + end + begin + a_values_ = convert(Tensor{Any}, a_values_) + begin + end + end + begin + a_shape_ = convert(Tensor{Any}, a_shape_) + begin + a_shape_ = a_shape_ - convert(tf.Tensor{eltype(a_shape_)}, 1) + end + end + begin + b_ = convert(Tensor{Any}, b_) + begin + end + end + begin + (a_values_, b_) = tf.tf_promote(a_values_, b_) + end + begin + (a_indices_, a_shape_) = tf.tf_promote(a_indices_, a_shape_) + end + end + begin + begin + tf.add_input(desc, a_indices_) + end + begin + tf.add_input(desc, a_values_) + end + begin + tf.add_input(desc, a_shape_) + end + begin + tf.add_input(desc, b_) + end + end + begin + end + end + tf.Tensor(tf.Operation(desc)) + end + end + begin + function sparse_tensor_dense_add_eager(a_indices_, a_values_, a_shape_, b_; name=nothing) + desc = tf.EagerOp("SparseTensorDenseAdd") + a_indices_ = convert(tf.EagerTensor, a_indices_) + a_values_ = convert(tf.EagerTensor, a_values_) + a_shape_ = convert(tf.EagerTensor, a_shape_) + b_ = convert(tf.EagerTensor, b_) + begin + begin + tf.add_input(desc, a_indices_) + end + begin + tf.add_input(desc, a_values_) + end + begin + tf.add_input(desc, a_shape_) + end + begin + tf.add_input(desc, b_) + end + end + begin + end + begin + desc["Tindices"] = tf.data_type(a_indices_) + end + begin + desc["T"] = tf.data_type(a_values_) + end + begin + desc["Tindices"] = tf.data_type(a_shape_) + end + begin + desc["T"] = tf.data_type(b_) + end + res = tf.execute(desc) + node = tf.TapeNode(sparse_tensor_dense_add, [a_indices_, a_values_, a_shape_, b_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res[1] + end + end + end + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function sparse_tensor_dense_add(a_indices_, a_values_, a_shape_, b_; name=nothing) + if tf.in_eager_mode() + sparse_tensor_dense_add_eager(a_indices_, a_values_, a_shape_, b_; name=name) + else + sparse_tensor_dense_add_graph(a_indices_, a_values_, a_shape_, b_; name=name) + end end - end + end end @@ -60217,38 +110134,64 @@ end """ begin - function lookup_table_export_graph(table_handle_; name=nothing) - local desc - tf.with_op_name(name, "LookupTableExport") do - desc = tf.NodeDescription("LookupTableExport") - table_handle_ = convert(Tensor{String}, table_handle_) - tf.add_input(desc, table_handle_) - end - out = tf.Tensor[] - op = tf.Operation(desc) - for out_idx = 1:2 - push!(out, tf.Tensor(op, out_idx)) + begin + function lookup_table_export_graph(table_handle_; name=nothing) + local desc + tf.with_op_name(name, "LookupTableExport") do + desc = tf.NodeDescription("LookupTableExport") + begin + begin + table_handle_ = convert(Tensor{String}, table_handle_) + begin + end + end + end + begin + begin + tf.add_input(desc, table_handle_) + end + end + begin + end + end + begin + out = tf.Tensor[] + op = tf.Operation(desc) + for out_idx = 1:2 + push!(out, tf.Tensor(op, out_idx)) + end + out + end end - out end - function lookup_table_export_eager(table_handle_; name=nothing) - desc = tf.EagerOp("LookupTableExport") - table_handle_ = convert(tf.EagerTensor, table_handle_) - tf.add_input(desc, table_handle_) - res = tf.execute(desc) - node = tf.TapeNode(lookup_table_export, [table_handle_], name=nothing, res) - if length(res) >= 1 - tf.add_node(res[1], node) - return res + begin + function lookup_table_export_eager(table_handle_; name=nothing) + desc = tf.EagerOp("LookupTableExport") + table_handle_ = convert(tf.EagerTensor, table_handle_) + begin + begin + tf.add_input(desc, table_handle_) + end + end + begin + end + res = tf.execute(desc) + node = tf.TapeNode(lookup_table_export, [table_handle_], name=nothing, res) + if length(res) >= 1 + tf.add_node(res[1], node) + return res + end end end - #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function lookup_table_export(table_handle_; name=nothing) - if tf.in_eager_mode() - lookup_table_export_eager(table_handle_; name=name) - else - lookup_table_export_graph(table_handle_; name=name) + begin + #= /Users/malmaud/Documents/code/TensorFlow/src/generate_ops.jl:262 =# tf.@op function lookup_table_export(table_handle_; name=nothing) + if tf.in_eager_mode() + lookup_table_export_eager(table_handle_; name=name) + else + lookup_table_export_graph(table_handle_; name=name) + end end - end + end end From f2037bd4d4c6c76ef39bc4879da62fa24e31af48 Mon Sep 17 00:00:00 2001 From: Jon Malmaud Date: Fri, 15 Mar 2019 21:13:44 -0400 Subject: [PATCH 48/49] Downgrade conda version --- deps/build.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deps/build.jl b/deps/build.jl index 50978a3a..57fb3683 100644 --- a/deps/build.jl +++ b/deps/build.jl @@ -2,7 +2,7 @@ using PyCall using Conda const cur_version = "1.13.1" -const cur_py_version = "1.13.1" +const cur_py_version = "1.12" # Temporarily downgrade Python version until 1.13.1 is released on Conda ############################ From 0492959d1b880b5429ae9c14d68ec761fad09164 Mon Sep 17 00:00:00 2001 From: Jon Malmaud Date: Mon, 18 Mar 2019 12:11:23 -0400 Subject: [PATCH 49/49] Change isfile to ispath in summary_writer --- src/summary_writer.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/summary_writer.jl b/src/summary_writer.jl index 57ef4d21..12f26475 100644 --- a/src/summary_writer.jl +++ b/src/summary_writer.jl @@ -37,7 +37,7 @@ function FileWriter(log_dir::AbstractString; graph=nothing) local path for i in Iterators.countfrom(1) path = joinpath(log_dir, "events.out.tfevents.$i") - isfile(path) || break + ispath(path) || break end writer = FileWriter(open(path, "w"), String(log_dir)) if graph !== nothing